diff --git a/Makefile b/Makefile index 7c1012adf..be82f2f54 100644 --- a/Makefile +++ b/Makefile @@ -77,9 +77,11 @@ export SUBPACKAGES := $(SUBPACKAGES) # Setup Kubernetes tools KIND_VERSION = v0.21.0 -UP_VERSION = v0.20.0 +UP_VERSION = v0.28.0 UP_CHANNEL = stable UPTEST_VERSION = v0.11.1 +UPTEST_LOCAL_VERSION = v0.12.0-9.gac371c9 +UPTEST_LOCAL_CHANNEL = main KUSTOMIZE_VERSION = v5.3.0 YQ_VERSION = v4.40.5 UXP_VERSION = 1.14.6-up.1 @@ -89,6 +91,16 @@ export UP_CHANNEL := $(UP_CHANNEL) -include build/makelib/k8s_tools.mk +# uptest download and install +UPTEST_LOCAL := $(TOOLS_HOST_DIR)/uptest-$(UPTEST_LOCAL_VERSION) + +$(UPTEST_LOCAL): + @$(INFO) installing uptest $(UPTEST_LOCAL) + @mkdir -p $(TOOLS_HOST_DIR) + @curl -fsSLo $(UPTEST_LOCAL) https://s3.us-west-2.amazonaws.com/crossplane.uptest.releases/$(UPTEST_LOCAL_CHANNEL)/$(UPTEST_LOCAL_VERSION)/bin/$(SAFEHOST_PLATFORM)/uptest || $(FAIL) + @chmod +x $(UPTEST_LOCAL) + @$(OK) installing uptest $(UPTEST_LOCAL) + # ==================================================================================== # Setup Images @@ -195,9 +207,9 @@ CROSSPLANE_NAMESPACE = upbound-system # - UPTEST_EXAMPLE_LIST, a comma-separated list of examples to test # - UPTEST_CLOUD_CREDENTIALS (optional), cloud credentials for the provider being tested, e.g. export UPTEST_CLOUD_CREDENTIALS=$(cat ~/azure.json) # - UPTEST_DATASOURCE_PATH (optional), see https://github.com/upbound/uptest#injecting-dynamic-values-and-datasource -uptest: $(UPTEST) $(KUBECTL) $(KUTTL) +uptest: $(UPTEST_LOCAL) $(KUBECTL) $(KUTTL) @$(INFO) running automated tests - @KUBECTL=$(KUBECTL) KUTTL=$(KUTTL) CROSSPLANE_NAMESPACE=$(CROSSPLANE_NAMESPACE) $(UPTEST) e2e "${UPTEST_EXAMPLE_LIST}" --data-source="${UPTEST_DATASOURCE_PATH}" --setup-script=cluster/test/setup.sh --default-conditions="Test" || $(FAIL) + @KUBECTL=$(KUBECTL) KUTTL=$(KUTTL) CROSSPLANE_NAMESPACE=$(CROSSPLANE_NAMESPACE) $(UPTEST_LOCAL) e2e "${UPTEST_EXAMPLE_LIST}" --data-source="${UPTEST_DATASOURCE_PATH}" --setup-script=cluster/test/setup.sh --default-conditions="Test" || $(FAIL) @$(OK) running automated tests uptest-local: diff --git a/apis/alertsmanagement/v1beta1/zz_generated.conversion_spokes.go b/apis/alertsmanagement/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..340e8b079 --- /dev/null +++ b/apis/alertsmanagement/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this MonitorActionRuleActionGroup to the hub type. +func (tr *MonitorActionRuleActionGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorActionRuleActionGroup type. +func (tr *MonitorActionRuleActionGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorActionRuleSuppression to the hub type. +func (tr *MonitorActionRuleSuppression) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorActionRuleSuppression type. +func (tr *MonitorActionRuleSuppression) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorAlertProcessingRuleActionGroup to the hub type. +func (tr *MonitorAlertProcessingRuleActionGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorAlertProcessingRuleActionGroup type. +func (tr *MonitorAlertProcessingRuleActionGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorAlertProcessingRuleSuppression to the hub type. +func (tr *MonitorAlertProcessingRuleSuppression) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorAlertProcessingRuleSuppression type. +func (tr *MonitorAlertProcessingRuleSuppression) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorSmartDetectorAlertRule to the hub type. +func (tr *MonitorSmartDetectorAlertRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorSmartDetectorAlertRule type. +func (tr *MonitorSmartDetectorAlertRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/alertsmanagement/v1beta1/zz_generated.conversion_hubs.go b/apis/alertsmanagement/v1beta2/zz_generated.conversion_hubs.go similarity index 97% rename from apis/alertsmanagement/v1beta1/zz_generated.conversion_hubs.go rename to apis/alertsmanagement/v1beta2/zz_generated.conversion_hubs.go index 43cb9d3a7..d88b4a8f2 100755 --- a/apis/alertsmanagement/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/alertsmanagement/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *MonitorActionRuleActionGroup) Hub() {} diff --git a/apis/alertsmanagement/v1beta2/zz_generated.deepcopy.go b/apis/alertsmanagement/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..1bb1438be --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,7337 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionGroupInitParameters) DeepCopyInto(out *ActionGroupInitParameters) { + *out = *in + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.Ids != nil { + in, out := &in.Ids, &out.Ids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IdsRefs != nil { + in, out := &in.IdsRefs, &out.IdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdsSelector != nil { + in, out := &in.IdsSelector, &out.IdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebhookPayload != nil { + in, out := &in.WebhookPayload, &out.WebhookPayload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionGroupInitParameters. +func (in *ActionGroupInitParameters) DeepCopy() *ActionGroupInitParameters { + if in == nil { + return nil + } + out := new(ActionGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionGroupObservation) DeepCopyInto(out *ActionGroupObservation) { + *out = *in + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.Ids != nil { + in, out := &in.Ids, &out.Ids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WebhookPayload != nil { + in, out := &in.WebhookPayload, &out.WebhookPayload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionGroupObservation. +func (in *ActionGroupObservation) DeepCopy() *ActionGroupObservation { + if in == nil { + return nil + } + out := new(ActionGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionGroupParameters) DeepCopyInto(out *ActionGroupParameters) { + *out = *in + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } + if in.Ids != nil { + in, out := &in.Ids, &out.Ids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IdsRefs != nil { + in, out := &in.IdsRefs, &out.IdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdsSelector != nil { + in, out := &in.IdsSelector, &out.IdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebhookPayload != nil { + in, out := &in.WebhookPayload, &out.WebhookPayload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionGroupParameters. +func (in *ActionGroupParameters) DeepCopy() *ActionGroupParameters { + if in == nil { + return nil + } + out := new(ActionGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertContextInitParameters) DeepCopyInto(out *AlertContextInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertContextInitParameters. +func (in *AlertContextInitParameters) DeepCopy() *AlertContextInitParameters { + if in == nil { + return nil + } + out := new(AlertContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertContextObservation) DeepCopyInto(out *AlertContextObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertContextObservation. +func (in *AlertContextObservation) DeepCopy() *AlertContextObservation { + if in == nil { + return nil + } + out := new(AlertContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertContextParameters) DeepCopyInto(out *AlertContextParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertContextParameters. +func (in *AlertContextParameters) DeepCopy() *AlertContextParameters { + if in == nil { + return nil + } + out := new(AlertContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRuleIDInitParameters) DeepCopyInto(out *AlertRuleIDInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRuleIDInitParameters. +func (in *AlertRuleIDInitParameters) DeepCopy() *AlertRuleIDInitParameters { + if in == nil { + return nil + } + out := new(AlertRuleIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRuleIDObservation) DeepCopyInto(out *AlertRuleIDObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRuleIDObservation. +func (in *AlertRuleIDObservation) DeepCopy() *AlertRuleIDObservation { + if in == nil { + return nil + } + out := new(AlertRuleIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRuleIDParameters) DeepCopyInto(out *AlertRuleIDParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRuleIDParameters. +func (in *AlertRuleIDParameters) DeepCopy() *AlertRuleIDParameters { + if in == nil { + return nil + } + out := new(AlertRuleIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRuleNameInitParameters) DeepCopyInto(out *AlertRuleNameInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRuleNameInitParameters. +func (in *AlertRuleNameInitParameters) DeepCopy() *AlertRuleNameInitParameters { + if in == nil { + return nil + } + out := new(AlertRuleNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRuleNameObservation) DeepCopyInto(out *AlertRuleNameObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRuleNameObservation. +func (in *AlertRuleNameObservation) DeepCopy() *AlertRuleNameObservation { + if in == nil { + return nil + } + out := new(AlertRuleNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRuleNameParameters) DeepCopyInto(out *AlertRuleNameParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRuleNameParameters. +func (in *AlertRuleNameParameters) DeepCopy() *AlertRuleNameParameters { + if in == nil { + return nil + } + out := new(AlertRuleNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertContextInitParameters) DeepCopyInto(out *ConditionAlertContextInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertContextInitParameters. +func (in *ConditionAlertContextInitParameters) DeepCopy() *ConditionAlertContextInitParameters { + if in == nil { + return nil + } + out := new(ConditionAlertContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertContextObservation) DeepCopyInto(out *ConditionAlertContextObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertContextObservation. +func (in *ConditionAlertContextObservation) DeepCopy() *ConditionAlertContextObservation { + if in == nil { + return nil + } + out := new(ConditionAlertContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertContextParameters) DeepCopyInto(out *ConditionAlertContextParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertContextParameters. +func (in *ConditionAlertContextParameters) DeepCopy() *ConditionAlertContextParameters { + if in == nil { + return nil + } + out := new(ConditionAlertContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertRuleIDInitParameters) DeepCopyInto(out *ConditionAlertRuleIDInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertRuleIDInitParameters. +func (in *ConditionAlertRuleIDInitParameters) DeepCopy() *ConditionAlertRuleIDInitParameters { + if in == nil { + return nil + } + out := new(ConditionAlertRuleIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertRuleIDObservation) DeepCopyInto(out *ConditionAlertRuleIDObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertRuleIDObservation. +func (in *ConditionAlertRuleIDObservation) DeepCopy() *ConditionAlertRuleIDObservation { + if in == nil { + return nil + } + out := new(ConditionAlertRuleIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertRuleIDParameters) DeepCopyInto(out *ConditionAlertRuleIDParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertRuleIDParameters. +func (in *ConditionAlertRuleIDParameters) DeepCopy() *ConditionAlertRuleIDParameters { + if in == nil { + return nil + } + out := new(ConditionAlertRuleIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertRuleNameInitParameters) DeepCopyInto(out *ConditionAlertRuleNameInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertRuleNameInitParameters. +func (in *ConditionAlertRuleNameInitParameters) DeepCopy() *ConditionAlertRuleNameInitParameters { + if in == nil { + return nil + } + out := new(ConditionAlertRuleNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertRuleNameObservation) DeepCopyInto(out *ConditionAlertRuleNameObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertRuleNameObservation. +func (in *ConditionAlertRuleNameObservation) DeepCopy() *ConditionAlertRuleNameObservation { + if in == nil { + return nil + } + out := new(ConditionAlertRuleNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionAlertRuleNameParameters) DeepCopyInto(out *ConditionAlertRuleNameParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionAlertRuleNameParameters. +func (in *ConditionAlertRuleNameParameters) DeepCopy() *ConditionAlertRuleNameParameters { + if in == nil { + return nil + } + out := new(ConditionAlertRuleNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionDescriptionInitParameters) DeepCopyInto(out *ConditionDescriptionInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionDescriptionInitParameters. +func (in *ConditionDescriptionInitParameters) DeepCopy() *ConditionDescriptionInitParameters { + if in == nil { + return nil + } + out := new(ConditionDescriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionDescriptionObservation) DeepCopyInto(out *ConditionDescriptionObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionDescriptionObservation. +func (in *ConditionDescriptionObservation) DeepCopy() *ConditionDescriptionObservation { + if in == nil { + return nil + } + out := new(ConditionDescriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionDescriptionParameters) DeepCopyInto(out *ConditionDescriptionParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionDescriptionParameters. +func (in *ConditionDescriptionParameters) DeepCopy() *ConditionDescriptionParameters { + if in == nil { + return nil + } + out := new(ConditionDescriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(AlertContextInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(AlertRuleIDInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(DescriptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(MonitorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(SeverityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(TargetResourceTypeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorConditionInitParameters) DeepCopyInto(out *ConditionMonitorConditionInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorConditionInitParameters. +func (in *ConditionMonitorConditionInitParameters) DeepCopy() *ConditionMonitorConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionMonitorConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorConditionObservation) DeepCopyInto(out *ConditionMonitorConditionObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorConditionObservation. +func (in *ConditionMonitorConditionObservation) DeepCopy() *ConditionMonitorConditionObservation { + if in == nil { + return nil + } + out := new(ConditionMonitorConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorConditionParameters) DeepCopyInto(out *ConditionMonitorConditionParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorConditionParameters. +func (in *ConditionMonitorConditionParameters) DeepCopy() *ConditionMonitorConditionParameters { + if in == nil { + return nil + } + out := new(ConditionMonitorConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorInitParameters) DeepCopyInto(out *ConditionMonitorInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorInitParameters. +func (in *ConditionMonitorInitParameters) DeepCopy() *ConditionMonitorInitParameters { + if in == nil { + return nil + } + out := new(ConditionMonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorObservation) DeepCopyInto(out *ConditionMonitorObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorObservation. +func (in *ConditionMonitorObservation) DeepCopy() *ConditionMonitorObservation { + if in == nil { + return nil + } + out := new(ConditionMonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorParameters) DeepCopyInto(out *ConditionMonitorParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorParameters. +func (in *ConditionMonitorParameters) DeepCopy() *ConditionMonitorParameters { + if in == nil { + return nil + } + out := new(ConditionMonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorServiceInitParameters) DeepCopyInto(out *ConditionMonitorServiceInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorServiceInitParameters. +func (in *ConditionMonitorServiceInitParameters) DeepCopy() *ConditionMonitorServiceInitParameters { + if in == nil { + return nil + } + out := new(ConditionMonitorServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorServiceObservation) DeepCopyInto(out *ConditionMonitorServiceObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorServiceObservation. +func (in *ConditionMonitorServiceObservation) DeepCopy() *ConditionMonitorServiceObservation { + if in == nil { + return nil + } + out := new(ConditionMonitorServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionMonitorServiceParameters) DeepCopyInto(out *ConditionMonitorServiceParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionMonitorServiceParameters. +func (in *ConditionMonitorServiceParameters) DeepCopy() *ConditionMonitorServiceParameters { + if in == nil { + return nil + } + out := new(ConditionMonitorServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(AlertContextObservation) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(AlertRuleIDObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(DescriptionObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(MonitorObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(SeverityObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(TargetResourceTypeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(AlertContextParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(AlertRuleIDParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(DescriptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(MonitorParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(SeverityParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(TargetResourceTypeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSeverityInitParameters) DeepCopyInto(out *ConditionSeverityInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSeverityInitParameters. +func (in *ConditionSeverityInitParameters) DeepCopy() *ConditionSeverityInitParameters { + if in == nil { + return nil + } + out := new(ConditionSeverityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSeverityObservation) DeepCopyInto(out *ConditionSeverityObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSeverityObservation. +func (in *ConditionSeverityObservation) DeepCopy() *ConditionSeverityObservation { + if in == nil { + return nil + } + out := new(ConditionSeverityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSeverityParameters) DeepCopyInto(out *ConditionSeverityParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSeverityParameters. +func (in *ConditionSeverityParameters) DeepCopy() *ConditionSeverityParameters { + if in == nil { + return nil + } + out := new(ConditionSeverityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSignalTypeInitParameters) DeepCopyInto(out *ConditionSignalTypeInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSignalTypeInitParameters. +func (in *ConditionSignalTypeInitParameters) DeepCopy() *ConditionSignalTypeInitParameters { + if in == nil { + return nil + } + out := new(ConditionSignalTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSignalTypeObservation) DeepCopyInto(out *ConditionSignalTypeObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSignalTypeObservation. +func (in *ConditionSignalTypeObservation) DeepCopy() *ConditionSignalTypeObservation { + if in == nil { + return nil + } + out := new(ConditionSignalTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionSignalTypeParameters) DeepCopyInto(out *ConditionSignalTypeParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionSignalTypeParameters. +func (in *ConditionSignalTypeParameters) DeepCopy() *ConditionSignalTypeParameters { + if in == nil { + return nil + } + out := new(ConditionSignalTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceGroupInitParameters) DeepCopyInto(out *ConditionTargetResourceGroupInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceGroupInitParameters. +func (in *ConditionTargetResourceGroupInitParameters) DeepCopy() *ConditionTargetResourceGroupInitParameters { + if in == nil { + return nil + } + out := new(ConditionTargetResourceGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceGroupObservation) DeepCopyInto(out *ConditionTargetResourceGroupObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceGroupObservation. +func (in *ConditionTargetResourceGroupObservation) DeepCopy() *ConditionTargetResourceGroupObservation { + if in == nil { + return nil + } + out := new(ConditionTargetResourceGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceGroupParameters) DeepCopyInto(out *ConditionTargetResourceGroupParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceGroupParameters. +func (in *ConditionTargetResourceGroupParameters) DeepCopy() *ConditionTargetResourceGroupParameters { + if in == nil { + return nil + } + out := new(ConditionTargetResourceGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceInitParameters) DeepCopyInto(out *ConditionTargetResourceInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceInitParameters. +func (in *ConditionTargetResourceInitParameters) DeepCopy() *ConditionTargetResourceInitParameters { + if in == nil { + return nil + } + out := new(ConditionTargetResourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceObservation) DeepCopyInto(out *ConditionTargetResourceObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceObservation. +func (in *ConditionTargetResourceObservation) DeepCopy() *ConditionTargetResourceObservation { + if in == nil { + return nil + } + out := new(ConditionTargetResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceParameters) DeepCopyInto(out *ConditionTargetResourceParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceParameters. +func (in *ConditionTargetResourceParameters) DeepCopy() *ConditionTargetResourceParameters { + if in == nil { + return nil + } + out := new(ConditionTargetResourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceTypeInitParameters) DeepCopyInto(out *ConditionTargetResourceTypeInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceTypeInitParameters. +func (in *ConditionTargetResourceTypeInitParameters) DeepCopy() *ConditionTargetResourceTypeInitParameters { + if in == nil { + return nil + } + out := new(ConditionTargetResourceTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceTypeObservation) DeepCopyInto(out *ConditionTargetResourceTypeObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceTypeObservation. +func (in *ConditionTargetResourceTypeObservation) DeepCopy() *ConditionTargetResourceTypeObservation { + if in == nil { + return nil + } + out := new(ConditionTargetResourceTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionTargetResourceTypeParameters) DeepCopyInto(out *ConditionTargetResourceTypeParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionTargetResourceTypeParameters. +func (in *ConditionTargetResourceTypeParameters) DeepCopy() *ConditionTargetResourceTypeParameters { + if in == nil { + return nil + } + out := new(ConditionTargetResourceTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyInitParameters) DeepCopyInto(out *DailyInitParameters) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyInitParameters. +func (in *DailyInitParameters) DeepCopy() *DailyInitParameters { + if in == nil { + return nil + } + out := new(DailyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyObservation) DeepCopyInto(out *DailyObservation) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyObservation. +func (in *DailyObservation) DeepCopy() *DailyObservation { + if in == nil { + return nil + } + out := new(DailyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyParameters) DeepCopyInto(out *DailyParameters) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyParameters. +func (in *DailyParameters) DeepCopy() *DailyParameters { + if in == nil { + return nil + } + out := new(DailyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DescriptionInitParameters) DeepCopyInto(out *DescriptionInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DescriptionInitParameters. +func (in *DescriptionInitParameters) DeepCopy() *DescriptionInitParameters { + if in == nil { + return nil + } + out := new(DescriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DescriptionObservation) DeepCopyInto(out *DescriptionObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DescriptionObservation. +func (in *DescriptionObservation) DeepCopy() *DescriptionObservation { + if in == nil { + return nil + } + out := new(DescriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DescriptionParameters) DeepCopyInto(out *DescriptionParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DescriptionParameters. +func (in *DescriptionParameters) DeepCopy() *DescriptionParameters { + if in == nil { + return nil + } + out := new(DescriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleActionGroup) DeepCopyInto(out *MonitorActionRuleActionGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleActionGroup. +func (in *MonitorActionRuleActionGroup) DeepCopy() *MonitorActionRuleActionGroup { + if in == nil { + return nil + } + out := new(MonitorActionRuleActionGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorActionRuleActionGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleActionGroupInitParameters) DeepCopyInto(out *MonitorActionRuleActionGroupInitParameters) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.ActionGroupIDRef != nil { + in, out := &in.ActionGroupIDRef, &out.ActionGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ActionGroupIDSelector != nil { + in, out := &in.ActionGroupIDSelector, &out.ActionGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleActionGroupInitParameters. +func (in *MonitorActionRuleActionGroupInitParameters) DeepCopy() *MonitorActionRuleActionGroupInitParameters { + if in == nil { + return nil + } + out := new(MonitorActionRuleActionGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleActionGroupList) DeepCopyInto(out *MonitorActionRuleActionGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorActionRuleActionGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleActionGroupList. +func (in *MonitorActionRuleActionGroupList) DeepCopy() *MonitorActionRuleActionGroupList { + if in == nil { + return nil + } + out := new(MonitorActionRuleActionGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorActionRuleActionGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleActionGroupObservation) DeepCopyInto(out *MonitorActionRuleActionGroupObservation) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleActionGroupObservation. +func (in *MonitorActionRuleActionGroupObservation) DeepCopy() *MonitorActionRuleActionGroupObservation { + if in == nil { + return nil + } + out := new(MonitorActionRuleActionGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleActionGroupParameters) DeepCopyInto(out *MonitorActionRuleActionGroupParameters) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.ActionGroupIDRef != nil { + in, out := &in.ActionGroupIDRef, &out.ActionGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ActionGroupIDSelector != nil { + in, out := &in.ActionGroupIDSelector, &out.ActionGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleActionGroupParameters. +func (in *MonitorActionRuleActionGroupParameters) DeepCopy() *MonitorActionRuleActionGroupParameters { + if in == nil { + return nil + } + out := new(MonitorActionRuleActionGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleActionGroupSpec) DeepCopyInto(out *MonitorActionRuleActionGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleActionGroupSpec. +func (in *MonitorActionRuleActionGroupSpec) DeepCopy() *MonitorActionRuleActionGroupSpec { + if in == nil { + return nil + } + out := new(MonitorActionRuleActionGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleActionGroupStatus) DeepCopyInto(out *MonitorActionRuleActionGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleActionGroupStatus. +func (in *MonitorActionRuleActionGroupStatus) DeepCopy() *MonitorActionRuleActionGroupStatus { + if in == nil { + return nil + } + out := new(MonitorActionRuleActionGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppression) DeepCopyInto(out *MonitorActionRuleSuppression) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppression. +func (in *MonitorActionRuleSuppression) DeepCopy() *MonitorActionRuleSuppression { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorActionRuleSuppression) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionConditionInitParameters) DeepCopyInto(out *MonitorActionRuleSuppressionConditionInitParameters) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(ConditionAlertContextInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(ConditionAlertRuleIDInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(ConditionDescriptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(ConditionMonitorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(ConditionMonitorServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(ConditionSeverityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(ConditionTargetResourceTypeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionConditionInitParameters. +func (in *MonitorActionRuleSuppressionConditionInitParameters) DeepCopy() *MonitorActionRuleSuppressionConditionInitParameters { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionConditionObservation) DeepCopyInto(out *MonitorActionRuleSuppressionConditionObservation) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(ConditionAlertContextObservation) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(ConditionAlertRuleIDObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(ConditionDescriptionObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(ConditionMonitorObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(ConditionMonitorServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(ConditionSeverityObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(ConditionTargetResourceTypeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionConditionObservation. +func (in *MonitorActionRuleSuppressionConditionObservation) DeepCopy() *MonitorActionRuleSuppressionConditionObservation { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionConditionParameters) DeepCopyInto(out *MonitorActionRuleSuppressionConditionParameters) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(ConditionAlertContextParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(ConditionAlertRuleIDParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(ConditionDescriptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(ConditionMonitorParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(ConditionMonitorServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(ConditionSeverityParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(ConditionTargetResourceTypeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionConditionParameters. +func (in *MonitorActionRuleSuppressionConditionParameters) DeepCopy() *MonitorActionRuleSuppressionConditionParameters { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionInitParameters) DeepCopyInto(out *MonitorActionRuleSuppressionInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorActionRuleSuppressionConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(MonitorActionRuleSuppressionScopeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Suppression != nil { + in, out := &in.Suppression, &out.Suppression + *out = new(SuppressionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionInitParameters. +func (in *MonitorActionRuleSuppressionInitParameters) DeepCopy() *MonitorActionRuleSuppressionInitParameters { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionList) DeepCopyInto(out *MonitorActionRuleSuppressionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorActionRuleSuppression, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionList. +func (in *MonitorActionRuleSuppressionList) DeepCopy() *MonitorActionRuleSuppressionList { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorActionRuleSuppressionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionObservation) DeepCopyInto(out *MonitorActionRuleSuppressionObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorActionRuleSuppressionConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(MonitorActionRuleSuppressionScopeObservation) + (*in).DeepCopyInto(*out) + } + if in.Suppression != nil { + in, out := &in.Suppression, &out.Suppression + *out = new(SuppressionObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionObservation. +func (in *MonitorActionRuleSuppressionObservation) DeepCopy() *MonitorActionRuleSuppressionObservation { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionParameters) DeepCopyInto(out *MonitorActionRuleSuppressionParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorActionRuleSuppressionConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(MonitorActionRuleSuppressionScopeParameters) + (*in).DeepCopyInto(*out) + } + if in.Suppression != nil { + in, out := &in.Suppression, &out.Suppression + *out = new(SuppressionParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionParameters. +func (in *MonitorActionRuleSuppressionParameters) DeepCopy() *MonitorActionRuleSuppressionParameters { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionScopeInitParameters) DeepCopyInto(out *MonitorActionRuleSuppressionScopeInitParameters) { + *out = *in + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionScopeInitParameters. +func (in *MonitorActionRuleSuppressionScopeInitParameters) DeepCopy() *MonitorActionRuleSuppressionScopeInitParameters { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionScopeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionScopeObservation) DeepCopyInto(out *MonitorActionRuleSuppressionScopeObservation) { + *out = *in + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionScopeObservation. +func (in *MonitorActionRuleSuppressionScopeObservation) DeepCopy() *MonitorActionRuleSuppressionScopeObservation { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionScopeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionScopeParameters) DeepCopyInto(out *MonitorActionRuleSuppressionScopeParameters) { + *out = *in + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionScopeParameters. +func (in *MonitorActionRuleSuppressionScopeParameters) DeepCopy() *MonitorActionRuleSuppressionScopeParameters { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionScopeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionSpec) DeepCopyInto(out *MonitorActionRuleSuppressionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionSpec. +func (in *MonitorActionRuleSuppressionSpec) DeepCopy() *MonitorActionRuleSuppressionSpec { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionRuleSuppressionStatus) DeepCopyInto(out *MonitorActionRuleSuppressionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionRuleSuppressionStatus. +func (in *MonitorActionRuleSuppressionStatus) DeepCopy() *MonitorActionRuleSuppressionStatus { + if in == nil { + return nil + } + out := new(MonitorActionRuleSuppressionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroup) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroup. +func (in *MonitorAlertProcessingRuleActionGroup) DeepCopy() *MonitorAlertProcessingRuleActionGroup { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorAlertProcessingRuleActionGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation. +func (in *MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionInitParameters) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleName != nil { + in, out := &in.AlertRuleName, &out.AlertRuleName + *out = new(AlertRuleNameInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorCondition != nil { + in, out := &in.MonitorCondition, &out.MonitorCondition + *out = new(MonitorConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SignalType != nil { + in, out := &in.SignalType, &out.SignalType + *out = new(SignalTypeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(TargetResourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceGroup != nil { + in, out := &in.TargetResourceGroup, &out.TargetResourceGroup + *out = new(TargetResourceGroupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation. +func (in *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionObservation) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleName != nil { + in, out := &in.AlertRuleName, &out.AlertRuleName + *out = new(AlertRuleNameObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitorCondition != nil { + in, out := &in.MonitorCondition, &out.MonitorCondition + *out = new(MonitorConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(MonitorAlertProcessingRuleActionGroupConditionSeverityObservation) + (*in).DeepCopyInto(*out) + } + if in.SignalType != nil { + in, out := &in.SignalType, &out.SignalType + *out = new(SignalTypeObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(TargetResourceObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceGroup != nil { + in, out := &in.TargetResourceGroup, &out.TargetResourceGroup + *out = new(TargetResourceGroupObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionObservation. +func (in *MonitorAlertProcessingRuleActionGroupConditionObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionParameters) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleName != nil { + in, out := &in.AlertRuleName, &out.AlertRuleName + *out = new(AlertRuleNameParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorCondition != nil { + in, out := &in.MonitorCondition, &out.MonitorCondition + *out = new(MonitorConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(MonitorAlertProcessingRuleActionGroupConditionSeverityParameters) + (*in).DeepCopyInto(*out) + } + if in.SignalType != nil { + in, out := &in.SignalType, &out.SignalType + *out = new(SignalTypeParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(TargetResourceParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceGroup != nil { + in, out := &in.TargetResourceGroup, &out.TargetResourceGroup + *out = new(TargetResourceGroupParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionSeverityObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionSeverityObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionSeverityObservation. +func (in *MonitorAlertProcessingRuleActionGroupConditionSeverityObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionSeverityObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionSeverityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionSeverityParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionSeverityParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionSeverityParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionSeverityParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionSeverityParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionSeverityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation. +func (in *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters. +func (in *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupInitParameters) { + *out = *in + if in.AddActionGroupIds != nil { + in, out := &in.AddActionGroupIds, &out.AddActionGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AddActionGroupIdsRefs != nil { + in, out := &in.AddActionGroupIdsRefs, &out.AddActionGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AddActionGroupIdsSelector != nil { + in, out := &in.AddActionGroupIdsSelector, &out.AddActionGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorAlertProcessingRuleActionGroupConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(MonitorAlertProcessingRuleActionGroupScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupList) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorAlertProcessingRuleActionGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupList. +func (in *MonitorAlertProcessingRuleActionGroupList) DeepCopy() *MonitorAlertProcessingRuleActionGroupList { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorAlertProcessingRuleActionGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupObservation) { + *out = *in + if in.AddActionGroupIds != nil { + in, out := &in.AddActionGroupIds, &out.AddActionGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorAlertProcessingRuleActionGroupConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(MonitorAlertProcessingRuleActionGroupScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupObservation. +func (in *MonitorAlertProcessingRuleActionGroupObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupParameters) { + *out = *in + if in.AddActionGroupIds != nil { + in, out := &in.AddActionGroupIds, &out.AddActionGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AddActionGroupIdsRefs != nil { + in, out := &in.AddActionGroupIdsRefs, &out.AddActionGroupIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AddActionGroupIdsSelector != nil { + in, out := &in.AddActionGroupIdsSelector, &out.AddActionGroupIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorAlertProcessingRuleActionGroupConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(MonitorAlertProcessingRuleActionGroupScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupParameters. +func (in *MonitorAlertProcessingRuleActionGroupParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupScheduleInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupScheduleInitParameters) { + *out = *in + if in.EffectiveFrom != nil { + in, out := &in.EffectiveFrom, &out.EffectiveFrom + *out = new(string) + **out = **in + } + if in.EffectiveUntil != nil { + in, out := &in.EffectiveUntil, &out.EffectiveUntil + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupScheduleInitParameters. +func (in *MonitorAlertProcessingRuleActionGroupScheduleInitParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupScheduleInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupScheduleObservation) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupScheduleObservation) { + *out = *in + if in.EffectiveFrom != nil { + in, out := &in.EffectiveFrom, &out.EffectiveFrom + *out = new(string) + **out = **in + } + if in.EffectiveUntil != nil { + in, out := &in.EffectiveUntil, &out.EffectiveUntil + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceObservation) + (*in).DeepCopyInto(*out) + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupScheduleObservation. +func (in *MonitorAlertProcessingRuleActionGroupScheduleObservation) DeepCopy() *MonitorAlertProcessingRuleActionGroupScheduleObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupScheduleParameters) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupScheduleParameters) { + *out = *in + if in.EffectiveFrom != nil { + in, out := &in.EffectiveFrom, &out.EffectiveFrom + *out = new(string) + **out = **in + } + if in.EffectiveUntil != nil { + in, out := &in.EffectiveUntil, &out.EffectiveUntil + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupScheduleParameters. +func (in *MonitorAlertProcessingRuleActionGroupScheduleParameters) DeepCopy() *MonitorAlertProcessingRuleActionGroupScheduleParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupSpec) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupSpec. +func (in *MonitorAlertProcessingRuleActionGroupSpec) DeepCopy() *MonitorAlertProcessingRuleActionGroupSpec { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleActionGroupStatus) DeepCopyInto(out *MonitorAlertProcessingRuleActionGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleActionGroupStatus. +func (in *MonitorAlertProcessingRuleActionGroupStatus) DeepCopy() *MonitorAlertProcessingRuleActionGroupStatus { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleActionGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppression) DeepCopyInto(out *MonitorAlertProcessingRuleSuppression) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppression. +func (in *MonitorAlertProcessingRuleSuppression) DeepCopy() *MonitorAlertProcessingRuleSuppression { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorAlertProcessingRuleSuppression) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation. +func (in *MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionInitParameters) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleName != nil { + in, out := &in.AlertRuleName, &out.AlertRuleName + *out = new(ConditionAlertRuleNameInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorCondition != nil { + in, out := &in.MonitorCondition, &out.MonitorCondition + *out = new(ConditionMonitorConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SignalType != nil { + in, out := &in.SignalType, &out.SignalType + *out = new(ConditionSignalTypeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(ConditionTargetResourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceGroup != nil { + in, out := &in.TargetResourceGroup, &out.TargetResourceGroup + *out = new(ConditionTargetResourceGroupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation. +func (in *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionObservation) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleName != nil { + in, out := &in.AlertRuleName, &out.AlertRuleName + *out = new(ConditionAlertRuleNameObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitorCondition != nil { + in, out := &in.MonitorCondition, &out.MonitorCondition + *out = new(ConditionMonitorConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(MonitorAlertProcessingRuleSuppressionConditionSeverityObservation) + (*in).DeepCopyInto(*out) + } + if in.SignalType != nil { + in, out := &in.SignalType, &out.SignalType + *out = new(ConditionSignalTypeObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(ConditionTargetResourceObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceGroup != nil { + in, out := &in.TargetResourceGroup, &out.TargetResourceGroup + *out = new(ConditionTargetResourceGroupObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionObservation. +func (in *MonitorAlertProcessingRuleSuppressionConditionObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionParameters) { + *out = *in + if in.AlertContext != nil { + in, out := &in.AlertContext, &out.AlertContext + *out = new(MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleID != nil { + in, out := &in.AlertRuleID, &out.AlertRuleID + *out = new(MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters) + (*in).DeepCopyInto(*out) + } + if in.AlertRuleName != nil { + in, out := &in.AlertRuleName, &out.AlertRuleName + *out = new(ConditionAlertRuleNameParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorCondition != nil { + in, out := &in.MonitorCondition, &out.MonitorCondition + *out = new(ConditionMonitorConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorService != nil { + in, out := &in.MonitorService, &out.MonitorService + *out = new(MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(MonitorAlertProcessingRuleSuppressionConditionSeverityParameters) + (*in).DeepCopyInto(*out) + } + if in.SignalType != nil { + in, out := &in.SignalType, &out.SignalType + *out = new(ConditionSignalTypeParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResource != nil { + in, out := &in.TargetResource, &out.TargetResource + *out = new(ConditionTargetResourceParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceGroup != nil { + in, out := &in.TargetResourceGroup, &out.TargetResourceGroup + *out = new(ConditionTargetResourceGroupParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionSeverityObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionSeverityObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionSeverityObservation. +func (in *MonitorAlertProcessingRuleSuppressionConditionSeverityObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionSeverityObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionSeverityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionSeverityParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionSeverityParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionSeverityParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionSeverityParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionSeverityParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionSeverityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation. +func (in *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters. +func (in *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorAlertProcessingRuleSuppressionConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(MonitorAlertProcessingRuleSuppressionScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionList) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorAlertProcessingRuleSuppression, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionList. +func (in *MonitorAlertProcessingRuleSuppressionList) DeepCopy() *MonitorAlertProcessingRuleSuppressionList { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorAlertProcessingRuleSuppressionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorAlertProcessingRuleSuppressionConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(MonitorAlertProcessingRuleSuppressionScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionObservation. +func (in *MonitorAlertProcessingRuleSuppressionObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(MonitorAlertProcessingRuleSuppressionConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(MonitorAlertProcessingRuleSuppressionScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionParameters. +func (in *MonitorAlertProcessingRuleSuppressionParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionScheduleInitParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionScheduleInitParameters) { + *out = *in + if in.EffectiveFrom != nil { + in, out := &in.EffectiveFrom, &out.EffectiveFrom + *out = new(string) + **out = **in + } + if in.EffectiveUntil != nil { + in, out := &in.EffectiveUntil, &out.EffectiveUntil + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(ScheduleRecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionScheduleInitParameters. +func (in *MonitorAlertProcessingRuleSuppressionScheduleInitParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionScheduleInitParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionScheduleObservation) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionScheduleObservation) { + *out = *in + if in.EffectiveFrom != nil { + in, out := &in.EffectiveFrom, &out.EffectiveFrom + *out = new(string) + **out = **in + } + if in.EffectiveUntil != nil { + in, out := &in.EffectiveUntil, &out.EffectiveUntil + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(ScheduleRecurrenceObservation) + (*in).DeepCopyInto(*out) + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionScheduleObservation. +func (in *MonitorAlertProcessingRuleSuppressionScheduleObservation) DeepCopy() *MonitorAlertProcessingRuleSuppressionScheduleObservation { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionScheduleParameters) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionScheduleParameters) { + *out = *in + if in.EffectiveFrom != nil { + in, out := &in.EffectiveFrom, &out.EffectiveFrom + *out = new(string) + **out = **in + } + if in.EffectiveUntil != nil { + in, out := &in.EffectiveUntil, &out.EffectiveUntil + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(ScheduleRecurrenceParameters) + (*in).DeepCopyInto(*out) + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionScheduleParameters. +func (in *MonitorAlertProcessingRuleSuppressionScheduleParameters) DeepCopy() *MonitorAlertProcessingRuleSuppressionScheduleParameters { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionSpec) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionSpec. +func (in *MonitorAlertProcessingRuleSuppressionSpec) DeepCopy() *MonitorAlertProcessingRuleSuppressionSpec { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAlertProcessingRuleSuppressionStatus) DeepCopyInto(out *MonitorAlertProcessingRuleSuppressionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAlertProcessingRuleSuppressionStatus. +func (in *MonitorAlertProcessingRuleSuppressionStatus) DeepCopy() *MonitorAlertProcessingRuleSuppressionStatus { + if in == nil { + return nil + } + out := new(MonitorAlertProcessingRuleSuppressionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorConditionInitParameters) DeepCopyInto(out *MonitorConditionInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorConditionInitParameters. +func (in *MonitorConditionInitParameters) DeepCopy() *MonitorConditionInitParameters { + if in == nil { + return nil + } + out := new(MonitorConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorConditionObservation) DeepCopyInto(out *MonitorConditionObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorConditionObservation. +func (in *MonitorConditionObservation) DeepCopy() *MonitorConditionObservation { + if in == nil { + return nil + } + out := new(MonitorConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorConditionParameters) DeepCopyInto(out *MonitorConditionParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorConditionParameters. +func (in *MonitorConditionParameters) DeepCopy() *MonitorConditionParameters { + if in == nil { + return nil + } + out := new(MonitorConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorInitParameters) DeepCopyInto(out *MonitorInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorInitParameters. +func (in *MonitorInitParameters) DeepCopy() *MonitorInitParameters { + if in == nil { + return nil + } + out := new(MonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorObservation) DeepCopyInto(out *MonitorObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorObservation. +func (in *MonitorObservation) DeepCopy() *MonitorObservation { + if in == nil { + return nil + } + out := new(MonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorParameters) DeepCopyInto(out *MonitorParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorParameters. +func (in *MonitorParameters) DeepCopy() *MonitorParameters { + if in == nil { + return nil + } + out := new(MonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorServiceInitParameters) DeepCopyInto(out *MonitorServiceInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorServiceInitParameters. +func (in *MonitorServiceInitParameters) DeepCopy() *MonitorServiceInitParameters { + if in == nil { + return nil + } + out := new(MonitorServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorServiceObservation) DeepCopyInto(out *MonitorServiceObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorServiceObservation. +func (in *MonitorServiceObservation) DeepCopy() *MonitorServiceObservation { + if in == nil { + return nil + } + out := new(MonitorServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorServiceParameters) DeepCopyInto(out *MonitorServiceParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorServiceParameters. +func (in *MonitorServiceParameters) DeepCopy() *MonitorServiceParameters { + if in == nil { + return nil + } + out := new(MonitorServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSmartDetectorAlertRule) DeepCopyInto(out *MonitorSmartDetectorAlertRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSmartDetectorAlertRule. +func (in *MonitorSmartDetectorAlertRule) DeepCopy() *MonitorSmartDetectorAlertRule { + if in == nil { + return nil + } + out := new(MonitorSmartDetectorAlertRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorSmartDetectorAlertRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSmartDetectorAlertRuleInitParameters) DeepCopyInto(out *MonitorSmartDetectorAlertRuleInitParameters) { + *out = *in + if in.ActionGroup != nil { + in, out := &in.ActionGroup, &out.ActionGroup + *out = new(ActionGroupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DetectorType != nil { + in, out := &in.DetectorType, &out.DetectorType + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ScopeResourceIds != nil { + in, out := &in.ScopeResourceIds, &out.ScopeResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopeResourceIdsRefs != nil { + in, out := &in.ScopeResourceIdsRefs, &out.ScopeResourceIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopeResourceIdsSelector != nil { + in, out := &in.ScopeResourceIdsSelector, &out.ScopeResourceIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThrottlingDuration != nil { + in, out := &in.ThrottlingDuration, &out.ThrottlingDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSmartDetectorAlertRuleInitParameters. +func (in *MonitorSmartDetectorAlertRuleInitParameters) DeepCopy() *MonitorSmartDetectorAlertRuleInitParameters { + if in == nil { + return nil + } + out := new(MonitorSmartDetectorAlertRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSmartDetectorAlertRuleList) DeepCopyInto(out *MonitorSmartDetectorAlertRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorSmartDetectorAlertRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSmartDetectorAlertRuleList. +func (in *MonitorSmartDetectorAlertRuleList) DeepCopy() *MonitorSmartDetectorAlertRuleList { + if in == nil { + return nil + } + out := new(MonitorSmartDetectorAlertRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorSmartDetectorAlertRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSmartDetectorAlertRuleObservation) DeepCopyInto(out *MonitorSmartDetectorAlertRuleObservation) { + *out = *in + if in.ActionGroup != nil { + in, out := &in.ActionGroup, &out.ActionGroup + *out = new(ActionGroupObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DetectorType != nil { + in, out := &in.DetectorType, &out.DetectorType + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ScopeResourceIds != nil { + in, out := &in.ScopeResourceIds, &out.ScopeResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThrottlingDuration != nil { + in, out := &in.ThrottlingDuration, &out.ThrottlingDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSmartDetectorAlertRuleObservation. +func (in *MonitorSmartDetectorAlertRuleObservation) DeepCopy() *MonitorSmartDetectorAlertRuleObservation { + if in == nil { + return nil + } + out := new(MonitorSmartDetectorAlertRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSmartDetectorAlertRuleParameters) DeepCopyInto(out *MonitorSmartDetectorAlertRuleParameters) { + *out = *in + if in.ActionGroup != nil { + in, out := &in.ActionGroup, &out.ActionGroup + *out = new(ActionGroupParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DetectorType != nil { + in, out := &in.DetectorType, &out.DetectorType + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ScopeResourceIds != nil { + in, out := &in.ScopeResourceIds, &out.ScopeResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopeResourceIdsRefs != nil { + in, out := &in.ScopeResourceIdsRefs, &out.ScopeResourceIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopeResourceIdsSelector != nil { + in, out := &in.ScopeResourceIdsSelector, &out.ScopeResourceIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThrottlingDuration != nil { + in, out := &in.ThrottlingDuration, &out.ThrottlingDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSmartDetectorAlertRuleParameters. +func (in *MonitorSmartDetectorAlertRuleParameters) DeepCopy() *MonitorSmartDetectorAlertRuleParameters { + if in == nil { + return nil + } + out := new(MonitorSmartDetectorAlertRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSmartDetectorAlertRuleSpec) DeepCopyInto(out *MonitorSmartDetectorAlertRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSmartDetectorAlertRuleSpec. +func (in *MonitorSmartDetectorAlertRuleSpec) DeepCopy() *MonitorSmartDetectorAlertRuleSpec { + if in == nil { + return nil + } + out := new(MonitorSmartDetectorAlertRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSmartDetectorAlertRuleStatus) DeepCopyInto(out *MonitorSmartDetectorAlertRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSmartDetectorAlertRuleStatus. +func (in *MonitorSmartDetectorAlertRuleStatus) DeepCopy() *MonitorSmartDetectorAlertRuleStatus { + if in == nil { + return nil + } + out := new(MonitorSmartDetectorAlertRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyInitParameters) DeepCopyInto(out *MonthlyInitParameters) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyInitParameters. +func (in *MonthlyInitParameters) DeepCopy() *MonthlyInitParameters { + if in == nil { + return nil + } + out := new(MonthlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyObservation) DeepCopyInto(out *MonthlyObservation) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyObservation. +func (in *MonthlyObservation) DeepCopy() *MonthlyObservation { + if in == nil { + return nil + } + out := new(MonthlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyParameters) DeepCopyInto(out *MonthlyParameters) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyParameters. +func (in *MonthlyParameters) DeepCopy() *MonthlyParameters { + if in == nil { + return nil + } + out := new(MonthlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceDailyInitParameters) DeepCopyInto(out *RecurrenceDailyInitParameters) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceDailyInitParameters. +func (in *RecurrenceDailyInitParameters) DeepCopy() *RecurrenceDailyInitParameters { + if in == nil { + return nil + } + out := new(RecurrenceDailyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceDailyObservation) DeepCopyInto(out *RecurrenceDailyObservation) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceDailyObservation. +func (in *RecurrenceDailyObservation) DeepCopy() *RecurrenceDailyObservation { + if in == nil { + return nil + } + out := new(RecurrenceDailyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceDailyParameters) DeepCopyInto(out *RecurrenceDailyParameters) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceDailyParameters. +func (in *RecurrenceDailyParameters) DeepCopy() *RecurrenceDailyParameters { + if in == nil { + return nil + } + out := new(RecurrenceDailyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceInitParameters) DeepCopyInto(out *RecurrenceInitParameters) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = make([]DailyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weekly != nil { + in, out := &in.Weekly, &out.Weekly + *out = make([]WeeklyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceInitParameters. +func (in *RecurrenceInitParameters) DeepCopy() *RecurrenceInitParameters { + if in == nil { + return nil + } + out := new(RecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceMonthlyInitParameters) DeepCopyInto(out *RecurrenceMonthlyInitParameters) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceMonthlyInitParameters. +func (in *RecurrenceMonthlyInitParameters) DeepCopy() *RecurrenceMonthlyInitParameters { + if in == nil { + return nil + } + out := new(RecurrenceMonthlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceMonthlyObservation) DeepCopyInto(out *RecurrenceMonthlyObservation) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceMonthlyObservation. +func (in *RecurrenceMonthlyObservation) DeepCopy() *RecurrenceMonthlyObservation { + if in == nil { + return nil + } + out := new(RecurrenceMonthlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceMonthlyParameters) DeepCopyInto(out *RecurrenceMonthlyParameters) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceMonthlyParameters. +func (in *RecurrenceMonthlyParameters) DeepCopy() *RecurrenceMonthlyParameters { + if in == nil { + return nil + } + out := new(RecurrenceMonthlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceObservation) DeepCopyInto(out *RecurrenceObservation) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = make([]DailyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weekly != nil { + in, out := &in.Weekly, &out.Weekly + *out = make([]WeeklyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceObservation. +func (in *RecurrenceObservation) DeepCopy() *RecurrenceObservation { + if in == nil { + return nil + } + out := new(RecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceParameters) DeepCopyInto(out *RecurrenceParameters) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = make([]DailyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weekly != nil { + in, out := &in.Weekly, &out.Weekly + *out = make([]WeeklyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceParameters. +func (in *RecurrenceParameters) DeepCopy() *RecurrenceParameters { + if in == nil { + return nil + } + out := new(RecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceWeeklyInitParameters) DeepCopyInto(out *RecurrenceWeeklyInitParameters) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceWeeklyInitParameters. +func (in *RecurrenceWeeklyInitParameters) DeepCopy() *RecurrenceWeeklyInitParameters { + if in == nil { + return nil + } + out := new(RecurrenceWeeklyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceWeeklyObservation) DeepCopyInto(out *RecurrenceWeeklyObservation) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceWeeklyObservation. +func (in *RecurrenceWeeklyObservation) DeepCopy() *RecurrenceWeeklyObservation { + if in == nil { + return nil + } + out := new(RecurrenceWeeklyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceWeeklyParameters) DeepCopyInto(out *RecurrenceWeeklyParameters) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceWeeklyParameters. +func (in *RecurrenceWeeklyParameters) DeepCopy() *RecurrenceWeeklyParameters { + if in == nil { + return nil + } + out := new(RecurrenceWeeklyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.EndDateUtc != nil { + in, out := &in.EndDateUtc, &out.EndDateUtc + *out = new(string) + **out = **in + } + if in.RecurrenceMonthly != nil { + in, out := &in.RecurrenceMonthly, &out.RecurrenceMonthly + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RecurrenceWeekly != nil { + in, out := &in.RecurrenceWeekly, &out.RecurrenceWeekly + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StartDateUtc != nil { + in, out := &in.StartDateUtc, &out.StartDateUtc + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.EndDateUtc != nil { + in, out := &in.EndDateUtc, &out.EndDateUtc + *out = new(string) + **out = **in + } + if in.RecurrenceMonthly != nil { + in, out := &in.RecurrenceMonthly, &out.RecurrenceMonthly + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RecurrenceWeekly != nil { + in, out := &in.RecurrenceWeekly, &out.RecurrenceWeekly + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StartDateUtc != nil { + in, out := &in.StartDateUtc, &out.StartDateUtc + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.EndDateUtc != nil { + in, out := &in.EndDateUtc, &out.EndDateUtc + *out = new(string) + **out = **in + } + if in.RecurrenceMonthly != nil { + in, out := &in.RecurrenceMonthly, &out.RecurrenceMonthly + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.RecurrenceWeekly != nil { + in, out := &in.RecurrenceWeekly, &out.RecurrenceWeekly + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StartDateUtc != nil { + in, out := &in.StartDateUtc, &out.StartDateUtc + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleRecurrenceInitParameters) DeepCopyInto(out *ScheduleRecurrenceInitParameters) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = make([]RecurrenceDailyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]RecurrenceMonthlyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weekly != nil { + in, out := &in.Weekly, &out.Weekly + *out = make([]RecurrenceWeeklyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleRecurrenceInitParameters. +func (in *ScheduleRecurrenceInitParameters) DeepCopy() *ScheduleRecurrenceInitParameters { + if in == nil { + return nil + } + out := new(ScheduleRecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleRecurrenceObservation) DeepCopyInto(out *ScheduleRecurrenceObservation) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = make([]RecurrenceDailyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]RecurrenceMonthlyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weekly != nil { + in, out := &in.Weekly, &out.Weekly + *out = make([]RecurrenceWeeklyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleRecurrenceObservation. +func (in *ScheduleRecurrenceObservation) DeepCopy() *ScheduleRecurrenceObservation { + if in == nil { + return nil + } + out := new(ScheduleRecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleRecurrenceParameters) DeepCopyInto(out *ScheduleRecurrenceParameters) { + *out = *in + if in.Daily != nil { + in, out := &in.Daily, &out.Daily + *out = make([]RecurrenceDailyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]RecurrenceMonthlyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Weekly != nil { + in, out := &in.Weekly, &out.Weekly + *out = make([]RecurrenceWeeklyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleRecurrenceParameters. +func (in *ScheduleRecurrenceParameters) DeepCopy() *ScheduleRecurrenceParameters { + if in == nil { + return nil + } + out := new(ScheduleRecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeInitParameters) DeepCopyInto(out *ScopeInitParameters) { + *out = *in + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeInitParameters. +func (in *ScopeInitParameters) DeepCopy() *ScopeInitParameters { + if in == nil { + return nil + } + out := new(ScopeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeObservation) DeepCopyInto(out *ScopeObservation) { + *out = *in + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeObservation. +func (in *ScopeObservation) DeepCopy() *ScopeObservation { + if in == nil { + return nil + } + out := new(ScopeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeParameters) DeepCopyInto(out *ScopeParameters) { + *out = *in + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeParameters. +func (in *ScopeParameters) DeepCopy() *ScopeParameters { + if in == nil { + return nil + } + out := new(ScopeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeverityInitParameters) DeepCopyInto(out *SeverityInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeverityInitParameters. +func (in *SeverityInitParameters) DeepCopy() *SeverityInitParameters { + if in == nil { + return nil + } + out := new(SeverityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeverityObservation) DeepCopyInto(out *SeverityObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeverityObservation. +func (in *SeverityObservation) DeepCopy() *SeverityObservation { + if in == nil { + return nil + } + out := new(SeverityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeverityParameters) DeepCopyInto(out *SeverityParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeverityParameters. +func (in *SeverityParameters) DeepCopy() *SeverityParameters { + if in == nil { + return nil + } + out := new(SeverityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignalTypeInitParameters) DeepCopyInto(out *SignalTypeInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignalTypeInitParameters. +func (in *SignalTypeInitParameters) DeepCopy() *SignalTypeInitParameters { + if in == nil { + return nil + } + out := new(SignalTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignalTypeObservation) DeepCopyInto(out *SignalTypeObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignalTypeObservation. +func (in *SignalTypeObservation) DeepCopy() *SignalTypeObservation { + if in == nil { + return nil + } + out := new(SignalTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignalTypeParameters) DeepCopyInto(out *SignalTypeParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignalTypeParameters. +func (in *SignalTypeParameters) DeepCopy() *SignalTypeParameters { + if in == nil { + return nil + } + out := new(SignalTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuppressionInitParameters) DeepCopyInto(out *SuppressionInitParameters) { + *out = *in + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuppressionInitParameters. +func (in *SuppressionInitParameters) DeepCopy() *SuppressionInitParameters { + if in == nil { + return nil + } + out := new(SuppressionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuppressionObservation) DeepCopyInto(out *SuppressionObservation) { + *out = *in + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuppressionObservation. +func (in *SuppressionObservation) DeepCopy() *SuppressionObservation { + if in == nil { + return nil + } + out := new(SuppressionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuppressionParameters) DeepCopyInto(out *SuppressionParameters) { + *out = *in + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuppressionParameters. +func (in *SuppressionParameters) DeepCopy() *SuppressionParameters { + if in == nil { + return nil + } + out := new(SuppressionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceGroupInitParameters) DeepCopyInto(out *TargetResourceGroupInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceGroupInitParameters. +func (in *TargetResourceGroupInitParameters) DeepCopy() *TargetResourceGroupInitParameters { + if in == nil { + return nil + } + out := new(TargetResourceGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceGroupObservation) DeepCopyInto(out *TargetResourceGroupObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceGroupObservation. +func (in *TargetResourceGroupObservation) DeepCopy() *TargetResourceGroupObservation { + if in == nil { + return nil + } + out := new(TargetResourceGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceGroupParameters) DeepCopyInto(out *TargetResourceGroupParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceGroupParameters. +func (in *TargetResourceGroupParameters) DeepCopy() *TargetResourceGroupParameters { + if in == nil { + return nil + } + out := new(TargetResourceGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceInitParameters) DeepCopyInto(out *TargetResourceInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceInitParameters. +func (in *TargetResourceInitParameters) DeepCopy() *TargetResourceInitParameters { + if in == nil { + return nil + } + out := new(TargetResourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceObservation) DeepCopyInto(out *TargetResourceObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceObservation. +func (in *TargetResourceObservation) DeepCopy() *TargetResourceObservation { + if in == nil { + return nil + } + out := new(TargetResourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceParameters) DeepCopyInto(out *TargetResourceParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceParameters. +func (in *TargetResourceParameters) DeepCopy() *TargetResourceParameters { + if in == nil { + return nil + } + out := new(TargetResourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceTypeInitParameters) DeepCopyInto(out *TargetResourceTypeInitParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceTypeInitParameters. +func (in *TargetResourceTypeInitParameters) DeepCopy() *TargetResourceTypeInitParameters { + if in == nil { + return nil + } + out := new(TargetResourceTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceTypeObservation) DeepCopyInto(out *TargetResourceTypeObservation) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceTypeObservation. +func (in *TargetResourceTypeObservation) DeepCopy() *TargetResourceTypeObservation { + if in == nil { + return nil + } + out := new(TargetResourceTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetResourceTypeParameters) DeepCopyInto(out *TargetResourceTypeParameters) { + *out = *in + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetResourceTypeParameters. +func (in *TargetResourceTypeParameters) DeepCopy() *TargetResourceTypeParameters { + if in == nil { + return nil + } + out := new(TargetResourceTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyInitParameters) DeepCopyInto(out *WeeklyInitParameters) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyInitParameters. +func (in *WeeklyInitParameters) DeepCopy() *WeeklyInitParameters { + if in == nil { + return nil + } + out := new(WeeklyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyObservation) DeepCopyInto(out *WeeklyObservation) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyObservation. +func (in *WeeklyObservation) DeepCopy() *WeeklyObservation { + if in == nil { + return nil + } + out := new(WeeklyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyParameters) DeepCopyInto(out *WeeklyParameters) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyParameters. +func (in *WeeklyParameters) DeepCopy() *WeeklyParameters { + if in == nil { + return nil + } + out := new(WeeklyParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/alertsmanagement/v1beta2/zz_generated.managed.go b/apis/alertsmanagement/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..72306b48d --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorActionRuleActionGroup. +func (mg *MonitorActionRuleActionGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/alertsmanagement/v1beta2/zz_generated.managedlist.go b/apis/alertsmanagement/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..743a1b2cd --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this MonitorActionRuleActionGroupList. +func (l *MonitorActionRuleActionGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorActionRuleSuppressionList. +func (l *MonitorActionRuleSuppressionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorAlertProcessingRuleActionGroupList. +func (l *MonitorAlertProcessingRuleActionGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorAlertProcessingRuleSuppressionList. +func (l *MonitorAlertProcessingRuleSuppressionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorSmartDetectorAlertRuleList. +func (l *MonitorSmartDetectorAlertRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/alertsmanagement/v1beta2/zz_generated.resolvers.go b/apis/alertsmanagement/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..10a5cbd2b --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,428 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *MonitorActionRuleActionGroup) ResolveReferences( // ResolveReferences of this MonitorActionRuleActionGroup. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ActionGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ActionGroupIDRef, + Selector: mg.Spec.ForProvider.ActionGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ActionGroupID") + } + mg.Spec.ForProvider.ActionGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ActionGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ActionGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ActionGroupIDRef, + Selector: mg.Spec.InitProvider.ActionGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ActionGroupID") + } + mg.Spec.InitProvider.ActionGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ActionGroupIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MonitorActionRuleSuppression. +func (mg *MonitorActionRuleSuppression) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MonitorAlertProcessingRuleActionGroup. +func (mg *MonitorAlertProcessingRuleActionGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.AddActionGroupIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.AddActionGroupIdsRefs, + Selector: mg.Spec.ForProvider.AddActionGroupIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AddActionGroupIds") + } + mg.Spec.ForProvider.AddActionGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.AddActionGroupIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.ScopesRefs, + Selector: mg.Spec.ForProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Scopes") + } + mg.Spec.ForProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ScopesRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.AddActionGroupIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.AddActionGroupIdsRefs, + Selector: mg.Spec.InitProvider.AddActionGroupIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AddActionGroupIds") + } + mg.Spec.InitProvider.AddActionGroupIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.AddActionGroupIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.ScopesRefs, + Selector: mg.Spec.InitProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Scopes") + } + mg.Spec.InitProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ScopesRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this MonitorAlertProcessingRuleSuppression. +func (mg *MonitorAlertProcessingRuleSuppression) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.ScopesRefs, + Selector: mg.Spec.ForProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Scopes") + } + mg.Spec.ForProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ScopesRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.ScopesRefs, + Selector: mg.Spec.InitProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Scopes") + } + mg.Spec.InitProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ScopesRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this MonitorSmartDetectorAlertRule. +func (mg *MonitorSmartDetectorAlertRule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.ActionGroup != nil { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.ActionGroup.Ids), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.ActionGroup.IdsRefs, + Selector: mg.Spec.ForProvider.ActionGroup.IdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ActionGroup.Ids") + } + mg.Spec.ForProvider.ActionGroup.Ids = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ActionGroup.IdsRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.ScopeResourceIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.ScopeResourceIdsRefs, + Selector: mg.Spec.ForProvider.ScopeResourceIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ScopeResourceIds") + } + mg.Spec.ForProvider.ScopeResourceIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ScopeResourceIdsRefs = mrsp.ResolvedReferences + + if mg.Spec.InitProvider.ActionGroup != nil { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.ActionGroup.Ids), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.ActionGroup.IdsRefs, + Selector: mg.Spec.InitProvider.ActionGroup.IdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ActionGroup.Ids") + } + mg.Spec.InitProvider.ActionGroup.Ids = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ActionGroup.IdsRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.ScopeResourceIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.ScopeResourceIdsRefs, + Selector: mg.Spec.InitProvider.ScopeResourceIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ScopeResourceIds") + } + mg.Spec.InitProvider.ScopeResourceIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ScopeResourceIdsRefs = mrsp.ResolvedReferences + + return nil +} diff --git a/apis/alertsmanagement/v1beta2/zz_groupversion_info.go b/apis/alertsmanagement/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..8e6b0c877 --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=alertsmanagement.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "alertsmanagement.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/alertsmanagement/v1beta2/zz_monitoractionruleactiongroup_terraformed.go b/apis/alertsmanagement/v1beta2/zz_monitoractionruleactiongroup_terraformed.go new file mode 100755 index 000000000..f94f3611c --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitoractionruleactiongroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorActionRuleActionGroup +func (mg *MonitorActionRuleActionGroup) GetTerraformResourceType() string { + return "azurerm_monitor_action_rule_action_group" +} + +// GetConnectionDetailsMapping for this MonitorActionRuleActionGroup +func (tr *MonitorActionRuleActionGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorActionRuleActionGroup +func (tr *MonitorActionRuleActionGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorActionRuleActionGroup +func (tr *MonitorActionRuleActionGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorActionRuleActionGroup +func (tr *MonitorActionRuleActionGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorActionRuleActionGroup +func (tr *MonitorActionRuleActionGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorActionRuleActionGroup +func (tr *MonitorActionRuleActionGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorActionRuleActionGroup +func (tr *MonitorActionRuleActionGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorActionRuleActionGroup +func (tr *MonitorActionRuleActionGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorActionRuleActionGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorActionRuleActionGroup) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorActionRuleActionGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorActionRuleActionGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitoractionruleactiongroup_types.go b/apis/alertsmanagement/v1beta2/zz_monitoractionruleactiongroup_types.go new file mode 100755 index 000000000..fb6406f3d --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitoractionruleactiongroup_types.go @@ -0,0 +1,520 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AlertContextInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AlertContextObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AlertContextParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type AlertRuleIDInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AlertRuleIDObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AlertRuleIDParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionInitParameters struct { + + // A alert_context block as defined below. + AlertContext *AlertContextInitParameters `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined below. + AlertRuleID *AlertRuleIDInitParameters `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A description block as defined below. + Description *DescriptionInitParameters `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor block as defined below. + Monitor *MonitorInitParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A monitor_service block as defined below. + MonitorService *MonitorServiceInitParameters `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + Severity *SeverityInitParameters `json:"severity,omitempty" tf:"severity,omitempty"` + + // A target_resource_type block as defined below. + TargetResourceType *TargetResourceTypeInitParameters `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type ConditionObservation struct { + + // A alert_context block as defined below. + AlertContext *AlertContextObservation `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined below. + AlertRuleID *AlertRuleIDObservation `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A description block as defined below. + Description *DescriptionObservation `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor block as defined below. + Monitor *MonitorObservation `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A monitor_service block as defined below. + MonitorService *MonitorServiceObservation `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + Severity *SeverityObservation `json:"severity,omitempty" tf:"severity,omitempty"` + + // A target_resource_type block as defined below. + TargetResourceType *TargetResourceTypeObservation `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type ConditionParameters struct { + + // A alert_context block as defined below. + // +kubebuilder:validation:Optional + AlertContext *AlertContextParameters `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined below. + // +kubebuilder:validation:Optional + AlertRuleID *AlertRuleIDParameters `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A description block as defined below. + // +kubebuilder:validation:Optional + Description *DescriptionParameters `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor block as defined below. + // +kubebuilder:validation:Optional + Monitor *MonitorParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A monitor_service block as defined below. + // +kubebuilder:validation:Optional + MonitorService *MonitorServiceParameters `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + // +kubebuilder:validation:Optional + Severity *SeverityParameters `json:"severity,omitempty" tf:"severity,omitempty"` + + // A target_resource_type block as defined below. + // +kubebuilder:validation:Optional + TargetResourceType *TargetResourceTypeParameters `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type DescriptionInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DescriptionObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DescriptionParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorActionRuleActionGroupInitParameters struct { + + // Specifies the resource id of monitor action group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // Reference to a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDRef *v1.Reference `json:"actionGroupIdRef,omitempty" tf:"-"` + + // Selector for a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDSelector *v1.Selector `json:"actionGroupIdSelector,omitempty" tf:"-"` + + // A condition block as defined below. + Condition *ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Action Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Is the Action Rule enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A scope block as defined below. + Scope *ScopeInitParameters `json:"scope,omitempty" tf:"scope,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorActionRuleActionGroupObservation struct { + + // Specifies the resource id of monitor action group. + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // A condition block as defined below. + Condition *ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Action Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Is the Action Rule enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Monitor Action Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A scope block as defined below. + Scope *ScopeObservation `json:"scope,omitempty" tf:"scope,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorActionRuleActionGroupParameters struct { + + // Specifies the resource id of monitor action group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // Reference to a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDRef *v1.Reference `json:"actionGroupIdRef,omitempty" tf:"-"` + + // Selector for a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDSelector *v1.Selector `json:"actionGroupIdSelector,omitempty" tf:"-"` + + // A condition block as defined below. + // +kubebuilder:validation:Optional + Condition *ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Action Rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Is the Action Rule enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A scope block as defined below. + // +kubebuilder:validation:Optional + Scope *ScopeParameters `json:"scope,omitempty" tf:"scope,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorServiceInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorServiceObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorServiceParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ScopeInitParameters struct { + + // A list of resource IDs of the given scope type which will be the target of action rule. + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // Specifies the type of target scope. Possible values are ResourceGroup and Resource. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ScopeObservation struct { + + // A list of resource IDs of the given scope type which will be the target of action rule. + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // Specifies the type of target scope. Possible values are ResourceGroup and Resource. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ScopeParameters struct { + + // A list of resource IDs of the given scope type which will be the target of action rule. + // +kubebuilder:validation:Optional + // +listType=set + ResourceIds []*string `json:"resourceIds" tf:"resource_ids,omitempty"` + + // Specifies the type of target scope. Possible values are ResourceGroup and Resource. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SeverityInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SeverityObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SeverityParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type TargetResourceTypeInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetResourceTypeObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetResourceTypeParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +// MonitorActionRuleActionGroupSpec defines the desired state of MonitorActionRuleActionGroup +type MonitorActionRuleActionGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorActionRuleActionGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorActionRuleActionGroupInitParameters `json:"initProvider,omitempty"` +} + +// MonitorActionRuleActionGroupStatus defines the observed state of MonitorActionRuleActionGroup. +type MonitorActionRuleActionGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorActionRuleActionGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorActionRuleActionGroup is the Schema for the MonitorActionRuleActionGroups API. Manages an Monitor Action Rule which type is action group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorActionRuleActionGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MonitorActionRuleActionGroupSpec `json:"spec"` + Status MonitorActionRuleActionGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorActionRuleActionGroupList contains a list of MonitorActionRuleActionGroups +type MonitorActionRuleActionGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorActionRuleActionGroup `json:"items"` +} + +// Repository type metadata. +var ( + MonitorActionRuleActionGroup_Kind = "MonitorActionRuleActionGroup" + MonitorActionRuleActionGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorActionRuleActionGroup_Kind}.String() + MonitorActionRuleActionGroup_KindAPIVersion = MonitorActionRuleActionGroup_Kind + "." + CRDGroupVersion.String() + MonitorActionRuleActionGroup_GroupVersionKind = CRDGroupVersion.WithKind(MonitorActionRuleActionGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorActionRuleActionGroup{}, &MonitorActionRuleActionGroupList{}) +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitoractionrulesuppression_terraformed.go b/apis/alertsmanagement/v1beta2/zz_monitoractionrulesuppression_terraformed.go new file mode 100755 index 000000000..48e8e2351 --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitoractionrulesuppression_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorActionRuleSuppression +func (mg *MonitorActionRuleSuppression) GetTerraformResourceType() string { + return "azurerm_monitor_action_rule_suppression" +} + +// GetConnectionDetailsMapping for this MonitorActionRuleSuppression +func (tr *MonitorActionRuleSuppression) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorActionRuleSuppression +func (tr *MonitorActionRuleSuppression) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorActionRuleSuppression +func (tr *MonitorActionRuleSuppression) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorActionRuleSuppression +func (tr *MonitorActionRuleSuppression) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorActionRuleSuppression +func (tr *MonitorActionRuleSuppression) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorActionRuleSuppression +func (tr *MonitorActionRuleSuppression) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorActionRuleSuppression +func (tr *MonitorActionRuleSuppression) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorActionRuleSuppression +func (tr *MonitorActionRuleSuppression) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorActionRuleSuppression using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorActionRuleSuppression) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorActionRuleSuppressionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorActionRuleSuppression) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitoractionrulesuppression_types.go b/apis/alertsmanagement/v1beta2/zz_monitoractionrulesuppression_types.go new file mode 100755 index 000000000..f91319bf2 --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitoractionrulesuppression_types.go @@ -0,0 +1,585 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConditionAlertContextInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionAlertContextObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionAlertContextParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionAlertRuleIDInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionAlertRuleIDObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionAlertRuleIDParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionDescriptionInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionDescriptionObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionDescriptionParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionMonitorInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionMonitorObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionMonitorParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionMonitorServiceInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionMonitorServiceObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionMonitorServiceParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionSeverityInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionSeverityObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionSeverityParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionTargetResourceTypeInitParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionTargetResourceTypeObservation struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionTargetResourceTypeParameters struct { + + // The operator for a given condition. Possible values are Equals and NotEquals. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorActionRuleSuppressionConditionInitParameters struct { + + // A alert_context block as defined below. + AlertContext *ConditionAlertContextInitParameters `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined below. + AlertRuleID *ConditionAlertRuleIDInitParameters `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A description block as defined below. + Description *ConditionDescriptionInitParameters `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor block as defined below. + Monitor *ConditionMonitorInitParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A monitor_service block as defined below. + MonitorService *ConditionMonitorServiceInitParameters `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + Severity *ConditionSeverityInitParameters `json:"severity,omitempty" tf:"severity,omitempty"` + + // A target_resource_type block as defined below. + TargetResourceType *ConditionTargetResourceTypeInitParameters `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorActionRuleSuppressionConditionObservation struct { + + // A alert_context block as defined below. + AlertContext *ConditionAlertContextObservation `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined below. + AlertRuleID *ConditionAlertRuleIDObservation `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A description block as defined below. + Description *ConditionDescriptionObservation `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor block as defined below. + Monitor *ConditionMonitorObservation `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A monitor_service block as defined below. + MonitorService *ConditionMonitorServiceObservation `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + Severity *ConditionSeverityObservation `json:"severity,omitempty" tf:"severity,omitempty"` + + // A target_resource_type block as defined below. + TargetResourceType *ConditionTargetResourceTypeObservation `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorActionRuleSuppressionConditionParameters struct { + + // A alert_context block as defined below. + // +kubebuilder:validation:Optional + AlertContext *ConditionAlertContextParameters `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined below. + // +kubebuilder:validation:Optional + AlertRuleID *ConditionAlertRuleIDParameters `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A description block as defined below. + // +kubebuilder:validation:Optional + Description *ConditionDescriptionParameters `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor block as defined below. + // +kubebuilder:validation:Optional + Monitor *ConditionMonitorParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A monitor_service block as defined below. + // +kubebuilder:validation:Optional + MonitorService *ConditionMonitorServiceParameters `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + // +kubebuilder:validation:Optional + Severity *ConditionSeverityParameters `json:"severity,omitempty" tf:"severity,omitempty"` + + // A target_resource_type block as defined below. + // +kubebuilder:validation:Optional + TargetResourceType *ConditionTargetResourceTypeParameters `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorActionRuleSuppressionInitParameters struct { + + // A condition block as defined below. + Condition *MonitorActionRuleSuppressionConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Action Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Is the Action Rule enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A scope block as defined below. + Scope *MonitorActionRuleSuppressionScopeInitParameters `json:"scope,omitempty" tf:"scope,omitempty"` + + // A suppression block as defined below. + Suppression *SuppressionInitParameters `json:"suppression,omitempty" tf:"suppression,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorActionRuleSuppressionObservation struct { + + // A condition block as defined below. + Condition *MonitorActionRuleSuppressionConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Action Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Is the Action Rule enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Monitor Action Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A scope block as defined below. + Scope *MonitorActionRuleSuppressionScopeObservation `json:"scope,omitempty" tf:"scope,omitempty"` + + // A suppression block as defined below. + Suppression *SuppressionObservation `json:"suppression,omitempty" tf:"suppression,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorActionRuleSuppressionParameters struct { + + // A condition block as defined below. + // +kubebuilder:validation:Optional + Condition *MonitorActionRuleSuppressionConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Action Rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Is the Action Rule enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A scope block as defined below. + // +kubebuilder:validation:Optional + Scope *MonitorActionRuleSuppressionScopeParameters `json:"scope,omitempty" tf:"scope,omitempty"` + + // A suppression block as defined below. + // +kubebuilder:validation:Optional + Suppression *SuppressionParameters `json:"suppression,omitempty" tf:"suppression,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorActionRuleSuppressionScopeInitParameters struct { + + // A list of resource IDs of the given scope type which will be the target of action rule. + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // Specifies the type of target scope. Possible values are ResourceGroup and Resource. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MonitorActionRuleSuppressionScopeObservation struct { + + // A list of resource IDs of the given scope type which will be the target of action rule. + // +listType=set + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // Specifies the type of target scope. Possible values are ResourceGroup and Resource. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MonitorActionRuleSuppressionScopeParameters struct { + + // A list of resource IDs of the given scope type which will be the target of action rule. + // +kubebuilder:validation:Optional + // +listType=set + ResourceIds []*string `json:"resourceIds" tf:"resource_ids,omitempty"` + + // Specifies the type of target scope. Possible values are ResourceGroup and Resource. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ScheduleInitParameters struct { + + // specifies the recurrence UTC end datetime (Y-m-d'T'H:M:S'Z'). + EndDateUtc *string `json:"endDateUtc,omitempty" tf:"end_date_utc,omitempty"` + + // specifies the list of dayOfMonth to recurrence. Possible values are between 1 - 31. Required if recurrence_type is Monthly. + // +listType=set + RecurrenceMonthly []*float64 `json:"recurrenceMonthly,omitempty" tf:"recurrence_monthly,omitempty"` + + // specifies the list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + // +listType=set + RecurrenceWeekly []*string `json:"recurrenceWeekly,omitempty" tf:"recurrence_weekly,omitempty"` + + // specifies the recurrence UTC start datetime (Y-m-d'T'H:M:S'Z'). + StartDateUtc *string `json:"startDateUtc,omitempty" tf:"start_date_utc,omitempty"` +} + +type ScheduleObservation struct { + + // specifies the recurrence UTC end datetime (Y-m-d'T'H:M:S'Z'). + EndDateUtc *string `json:"endDateUtc,omitempty" tf:"end_date_utc,omitempty"` + + // specifies the list of dayOfMonth to recurrence. Possible values are between 1 - 31. Required if recurrence_type is Monthly. + // +listType=set + RecurrenceMonthly []*float64 `json:"recurrenceMonthly,omitempty" tf:"recurrence_monthly,omitempty"` + + // specifies the list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + // +listType=set + RecurrenceWeekly []*string `json:"recurrenceWeekly,omitempty" tf:"recurrence_weekly,omitempty"` + + // specifies the recurrence UTC start datetime (Y-m-d'T'H:M:S'Z'). + StartDateUtc *string `json:"startDateUtc,omitempty" tf:"start_date_utc,omitempty"` +} + +type ScheduleParameters struct { + + // specifies the recurrence UTC end datetime (Y-m-d'T'H:M:S'Z'). + // +kubebuilder:validation:Optional + EndDateUtc *string `json:"endDateUtc" tf:"end_date_utc,omitempty"` + + // specifies the list of dayOfMonth to recurrence. Possible values are between 1 - 31. Required if recurrence_type is Monthly. + // +kubebuilder:validation:Optional + // +listType=set + RecurrenceMonthly []*float64 `json:"recurrenceMonthly,omitempty" tf:"recurrence_monthly,omitempty"` + + // specifies the list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + // +kubebuilder:validation:Optional + // +listType=set + RecurrenceWeekly []*string `json:"recurrenceWeekly,omitempty" tf:"recurrence_weekly,omitempty"` + + // specifies the recurrence UTC start datetime (Y-m-d'T'H:M:S'Z'). + // +kubebuilder:validation:Optional + StartDateUtc *string `json:"startDateUtc" tf:"start_date_utc,omitempty"` +} + +type SuppressionInitParameters struct { + + // Specifies the type of suppression. Possible values are Always, Daily, Monthly, Once, and Weekly. + RecurrenceType *string `json:"recurrenceType,omitempty" tf:"recurrence_type,omitempty"` + + // A schedule block as defined below. Required if recurrence_type is Daily, Monthly, Once or Weekly. + Schedule *ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type SuppressionObservation struct { + + // Specifies the type of suppression. Possible values are Always, Daily, Monthly, Once, and Weekly. + RecurrenceType *string `json:"recurrenceType,omitempty" tf:"recurrence_type,omitempty"` + + // A schedule block as defined below. Required if recurrence_type is Daily, Monthly, Once or Weekly. + Schedule *ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type SuppressionParameters struct { + + // Specifies the type of suppression. Possible values are Always, Daily, Monthly, Once, and Weekly. + // +kubebuilder:validation:Optional + RecurrenceType *string `json:"recurrenceType" tf:"recurrence_type,omitempty"` + + // A schedule block as defined below. Required if recurrence_type is Daily, Monthly, Once or Weekly. + // +kubebuilder:validation:Optional + Schedule *ScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +// MonitorActionRuleSuppressionSpec defines the desired state of MonitorActionRuleSuppression +type MonitorActionRuleSuppressionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorActionRuleSuppressionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorActionRuleSuppressionInitParameters `json:"initProvider,omitempty"` +} + +// MonitorActionRuleSuppressionStatus defines the observed state of MonitorActionRuleSuppression. +type MonitorActionRuleSuppressionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorActionRuleSuppressionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorActionRuleSuppression is the Schema for the MonitorActionRuleSuppressions API. Manages an Monitor Action Rule which type is suppression. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorActionRuleSuppression struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.suppression) || (has(self.initProvider) && has(self.initProvider.suppression))",message="spec.forProvider.suppression is a required parameter" + Spec MonitorActionRuleSuppressionSpec `json:"spec"` + Status MonitorActionRuleSuppressionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorActionRuleSuppressionList contains a list of MonitorActionRuleSuppressions +type MonitorActionRuleSuppressionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorActionRuleSuppression `json:"items"` +} + +// Repository type metadata. +var ( + MonitorActionRuleSuppression_Kind = "MonitorActionRuleSuppression" + MonitorActionRuleSuppression_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorActionRuleSuppression_Kind}.String() + MonitorActionRuleSuppression_KindAPIVersion = MonitorActionRuleSuppression_Kind + "." + CRDGroupVersion.String() + MonitorActionRuleSuppression_GroupVersionKind = CRDGroupVersion.WithKind(MonitorActionRuleSuppression_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorActionRuleSuppression{}, &MonitorActionRuleSuppressionList{}) +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingruleactiongroup_terraformed.go b/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingruleactiongroup_terraformed.go new file mode 100755 index 000000000..1d81e4e50 --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingruleactiongroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorAlertProcessingRuleActionGroup +func (mg *MonitorAlertProcessingRuleActionGroup) GetTerraformResourceType() string { + return "azurerm_monitor_alert_processing_rule_action_group" +} + +// GetConnectionDetailsMapping for this MonitorAlertProcessingRuleActionGroup +func (tr *MonitorAlertProcessingRuleActionGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorAlertProcessingRuleActionGroup +func (tr *MonitorAlertProcessingRuleActionGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorAlertProcessingRuleActionGroup +func (tr *MonitorAlertProcessingRuleActionGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorAlertProcessingRuleActionGroup +func (tr *MonitorAlertProcessingRuleActionGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorAlertProcessingRuleActionGroup +func (tr *MonitorAlertProcessingRuleActionGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorAlertProcessingRuleActionGroup +func (tr *MonitorAlertProcessingRuleActionGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorAlertProcessingRuleActionGroup +func (tr *MonitorAlertProcessingRuleActionGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorAlertProcessingRuleActionGroup +func (tr *MonitorAlertProcessingRuleActionGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorAlertProcessingRuleActionGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorAlertProcessingRuleActionGroup) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorAlertProcessingRuleActionGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorAlertProcessingRuleActionGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingruleactiongroup_types.go b/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingruleactiongroup_types.go new file mode 100755 index 000000000..f95f632ce --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingruleactiongroup_types.go @@ -0,0 +1,848 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AlertRuleNameInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AlertRuleNameObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type AlertRuleNameParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type DailyInitParameters struct { + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type DailyObservation struct { + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type DailyParameters struct { + + // Specifies the recurrence end time (H:M:S). + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime" tf:"start_time,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionInitParameters struct { + + // A alert_context block as defined above. + AlertContext *MonitorAlertProcessingRuleActionGroupConditionAlertContextInitParameters `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined above. + AlertRuleID *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDInitParameters `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A alert_rule_name block as defined above. + AlertRuleName *AlertRuleNameInitParameters `json:"alertRuleName,omitempty" tf:"alert_rule_name,omitempty"` + + // A description block as defined below. + Description *MonitorAlertProcessingRuleActionGroupConditionDescriptionInitParameters `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor_condition block as defined below. + MonitorCondition *MonitorConditionInitParameters `json:"monitorCondition,omitempty" tf:"monitor_condition,omitempty"` + + // A monitor_service block as defined below. + MonitorService *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + Severity *MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters `json:"severity,omitempty" tf:"severity,omitempty"` + + // A signal_type block as defined below. + SignalType *SignalTypeInitParameters `json:"signalType,omitempty" tf:"signal_type,omitempty"` + + // A target_resource block as defined below. + TargetResource *TargetResourceInitParameters `json:"targetResource,omitempty" tf:"target_resource,omitempty"` + + // A target_resource_group block as defined below. + TargetResourceGroup *TargetResourceGroupInitParameters `json:"targetResourceGroup,omitempty" tf:"target_resource_group,omitempty"` + + // A target_resource_type block as defined below. + TargetResourceType *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionMonitorServiceInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionObservation struct { + + // A alert_context block as defined above. + AlertContext *MonitorAlertProcessingRuleActionGroupConditionAlertContextObservation `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined above. + AlertRuleID *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDObservation `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A alert_rule_name block as defined above. + AlertRuleName *AlertRuleNameObservation `json:"alertRuleName,omitempty" tf:"alert_rule_name,omitempty"` + + // A description block as defined below. + Description *MonitorAlertProcessingRuleActionGroupConditionDescriptionObservation `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor_condition block as defined below. + MonitorCondition *MonitorConditionObservation `json:"monitorCondition,omitempty" tf:"monitor_condition,omitempty"` + + // A monitor_service block as defined below. + MonitorService *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceObservation `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + Severity *MonitorAlertProcessingRuleActionGroupConditionSeverityObservation `json:"severity,omitempty" tf:"severity,omitempty"` + + // A signal_type block as defined below. + SignalType *SignalTypeObservation `json:"signalType,omitempty" tf:"signal_type,omitempty"` + + // A target_resource block as defined below. + TargetResource *TargetResourceObservation `json:"targetResource,omitempty" tf:"target_resource,omitempty"` + + // A target_resource_group block as defined below. + TargetResourceGroup *TargetResourceGroupObservation `json:"targetResourceGroup,omitempty" tf:"target_resource_group,omitempty"` + + // A target_resource_type block as defined below. + TargetResourceType *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionParameters struct { + + // A alert_context block as defined above. + // +kubebuilder:validation:Optional + AlertContext *MonitorAlertProcessingRuleActionGroupConditionAlertContextParameters `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined above. + // +kubebuilder:validation:Optional + AlertRuleID *MonitorAlertProcessingRuleActionGroupConditionAlertRuleIDParameters `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A alert_rule_name block as defined above. + // +kubebuilder:validation:Optional + AlertRuleName *AlertRuleNameParameters `json:"alertRuleName,omitempty" tf:"alert_rule_name,omitempty"` + + // A description block as defined below. + // +kubebuilder:validation:Optional + Description *MonitorAlertProcessingRuleActionGroupConditionDescriptionParameters `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor_condition block as defined below. + // +kubebuilder:validation:Optional + MonitorCondition *MonitorConditionParameters `json:"monitorCondition,omitempty" tf:"monitor_condition,omitempty"` + + // A monitor_service block as defined below. + // +kubebuilder:validation:Optional + MonitorService *MonitorAlertProcessingRuleActionGroupConditionMonitorServiceParameters `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + // +kubebuilder:validation:Optional + Severity *MonitorAlertProcessingRuleActionGroupConditionSeverityParameters `json:"severity,omitempty" tf:"severity,omitempty"` + + // A signal_type block as defined below. + // +kubebuilder:validation:Optional + SignalType *SignalTypeParameters `json:"signalType,omitempty" tf:"signal_type,omitempty"` + + // A target_resource block as defined below. + // +kubebuilder:validation:Optional + TargetResource *TargetResourceParameters `json:"targetResource,omitempty" tf:"target_resource,omitempty"` + + // A target_resource_group block as defined below. + // +kubebuilder:validation:Optional + TargetResourceGroup *TargetResourceGroupParameters `json:"targetResourceGroup,omitempty" tf:"target_resource_group,omitempty"` + + // A target_resource_type block as defined below. + // +kubebuilder:validation:Optional + TargetResourceType *MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionSeverityInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionSeverityObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionSeverityParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupConditionTargetResourceTypeParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupInitParameters struct { + + // Specifies a list of Action Group IDs. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + AddActionGroupIds []*string `json:"addActionGroupIds,omitempty" tf:"add_action_group_ids,omitempty"` + + // References to MonitorActionGroup in insights to populate addActionGroupIds. + // +kubebuilder:validation:Optional + AddActionGroupIdsRefs []v1.Reference `json:"addActionGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of MonitorActionGroup in insights to populate addActionGroupIds. + // +kubebuilder:validation:Optional + AddActionGroupIdsSelector *v1.Selector `json:"addActionGroupIdsSelector,omitempty" tf:"-"` + + // A condition block as defined below. + Condition *MonitorAlertProcessingRuleActionGroupConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Alert Processing Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the Alert Processing Rule be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A schedule block as defined below. + Schedule *MonitorAlertProcessingRuleActionGroupScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A list of resource IDs which will be the target of alert processing rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Alert Processing Rule. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupObservation struct { + + // Specifies a list of Action Group IDs. + AddActionGroupIds []*string `json:"addActionGroupIds,omitempty" tf:"add_action_group_ids,omitempty"` + + // A condition block as defined below. + Condition *MonitorAlertProcessingRuleActionGroupConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Alert Processing Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the Alert Processing Rule be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Alert Processing Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Resource Group where the Alert Processing Rule should exist. Changing this forces a new Alert Processing Rule to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A schedule block as defined below. + Schedule *MonitorAlertProcessingRuleActionGroupScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A list of resource IDs which will be the target of alert processing rule. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // A mapping of tags which should be assigned to the Alert Processing Rule. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupParameters struct { + + // Specifies a list of Action Group IDs. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + AddActionGroupIds []*string `json:"addActionGroupIds,omitempty" tf:"add_action_group_ids,omitempty"` + + // References to MonitorActionGroup in insights to populate addActionGroupIds. + // +kubebuilder:validation:Optional + AddActionGroupIdsRefs []v1.Reference `json:"addActionGroupIdsRefs,omitempty" tf:"-"` + + // Selector for a list of MonitorActionGroup in insights to populate addActionGroupIds. + // +kubebuilder:validation:Optional + AddActionGroupIdsSelector *v1.Selector `json:"addActionGroupIdsSelector,omitempty" tf:"-"` + + // A condition block as defined below. + // +kubebuilder:validation:Optional + Condition *MonitorAlertProcessingRuleActionGroupConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Alert Processing Rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the Alert Processing Rule be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the Resource Group where the Alert Processing Rule should exist. Changing this forces a new Alert Processing Rule to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A schedule block as defined below. + // +kubebuilder:validation:Optional + Schedule *MonitorAlertProcessingRuleActionGroupScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A list of resource IDs which will be the target of alert processing rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Alert Processing Rule. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupScheduleInitParameters struct { + + // Specifies the Alert Processing Rule effective start time (Y-m-d'T'H:M:S). + EffectiveFrom *string `json:"effectiveFrom,omitempty" tf:"effective_from,omitempty"` + + // Specifies the Alert Processing Rule effective end time (Y-m-d'T'H:M:S). + EffectiveUntil *string `json:"effectiveUntil,omitempty" tf:"effective_until,omitempty"` + + // A recurrence block as defined above. + Recurrence *RecurrenceInitParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // The time zone (e.g. Pacific Standard time, Eastern Standard Time). Defaults to UTC. possible values are defined here. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupScheduleObservation struct { + + // Specifies the Alert Processing Rule effective start time (Y-m-d'T'H:M:S). + EffectiveFrom *string `json:"effectiveFrom,omitempty" tf:"effective_from,omitempty"` + + // Specifies the Alert Processing Rule effective end time (Y-m-d'T'H:M:S). + EffectiveUntil *string `json:"effectiveUntil,omitempty" tf:"effective_until,omitempty"` + + // A recurrence block as defined above. + Recurrence *RecurrenceObservation `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // The time zone (e.g. Pacific Standard time, Eastern Standard Time). Defaults to UTC. possible values are defined here. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MonitorAlertProcessingRuleActionGroupScheduleParameters struct { + + // Specifies the Alert Processing Rule effective start time (Y-m-d'T'H:M:S). + // +kubebuilder:validation:Optional + EffectiveFrom *string `json:"effectiveFrom,omitempty" tf:"effective_from,omitempty"` + + // Specifies the Alert Processing Rule effective end time (Y-m-d'T'H:M:S). + // +kubebuilder:validation:Optional + EffectiveUntil *string `json:"effectiveUntil,omitempty" tf:"effective_until,omitempty"` + + // A recurrence block as defined above. + // +kubebuilder:validation:Optional + Recurrence *RecurrenceParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // The time zone (e.g. Pacific Standard time, Eastern Standard Time). Defaults to UTC. possible values are defined here. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MonitorConditionInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorConditionObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorConditionParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonthlyInitParameters struct { + + // Specifies a list of dayOfMonth to recurrence. Possible values are integers between 1 - 31. + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type MonthlyObservation struct { + + // Specifies a list of dayOfMonth to recurrence. Possible values are integers between 1 - 31. + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type MonthlyParameters struct { + + // Specifies a list of dayOfMonth to recurrence. Possible values are integers between 1 - 31. + // +kubebuilder:validation:Optional + DaysOfMonth []*float64 `json:"daysOfMonth" tf:"days_of_month,omitempty"` + + // Specifies the recurrence end time (H:M:S). + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type RecurrenceInitParameters struct { + + // One or more daily blocks as defined above. + Daily []DailyInitParameters `json:"daily,omitempty" tf:"daily,omitempty"` + + // One or more monthly blocks as defined above. + Monthly []MonthlyInitParameters `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // One or more weekly blocks as defined below. + Weekly []WeeklyInitParameters `json:"weekly,omitempty" tf:"weekly,omitempty"` +} + +type RecurrenceObservation struct { + + // One or more daily blocks as defined above. + Daily []DailyObservation `json:"daily,omitempty" tf:"daily,omitempty"` + + // One or more monthly blocks as defined above. + Monthly []MonthlyObservation `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // One or more weekly blocks as defined below. + Weekly []WeeklyObservation `json:"weekly,omitempty" tf:"weekly,omitempty"` +} + +type RecurrenceParameters struct { + + // One or more daily blocks as defined above. + // +kubebuilder:validation:Optional + Daily []DailyParameters `json:"daily,omitempty" tf:"daily,omitempty"` + + // One or more monthly blocks as defined above. + // +kubebuilder:validation:Optional + Monthly []MonthlyParameters `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // One or more weekly blocks as defined below. + // +kubebuilder:validation:Optional + Weekly []WeeklyParameters `json:"weekly,omitempty" tf:"weekly,omitempty"` +} + +type SignalTypeInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SignalTypeObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SignalTypeParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type TargetResourceGroupInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetResourceGroupObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetResourceGroupParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type TargetResourceInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetResourceObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TargetResourceParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type WeeklyInitParameters struct { + + // Specifies a list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, and Saturday. + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WeeklyObservation struct { + + // Specifies a list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, and Saturday. + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WeeklyParameters struct { + + // Specifies a list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, and Saturday. + // +kubebuilder:validation:Optional + DaysOfWeek []*string `json:"daysOfWeek" tf:"days_of_week,omitempty"` + + // Specifies the recurrence end time (H:M:S). + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +// MonitorAlertProcessingRuleActionGroupSpec defines the desired state of MonitorAlertProcessingRuleActionGroup +type MonitorAlertProcessingRuleActionGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorAlertProcessingRuleActionGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorAlertProcessingRuleActionGroupInitParameters `json:"initProvider,omitempty"` +} + +// MonitorAlertProcessingRuleActionGroupStatus defines the observed state of MonitorAlertProcessingRuleActionGroup. +type MonitorAlertProcessingRuleActionGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorAlertProcessingRuleActionGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorAlertProcessingRuleActionGroup is the Schema for the MonitorAlertProcessingRuleActionGroups API. Manages an Alert Processing Rule which apply action group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorAlertProcessingRuleActionGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MonitorAlertProcessingRuleActionGroupSpec `json:"spec"` + Status MonitorAlertProcessingRuleActionGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorAlertProcessingRuleActionGroupList contains a list of MonitorAlertProcessingRuleActionGroups +type MonitorAlertProcessingRuleActionGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorAlertProcessingRuleActionGroup `json:"items"` +} + +// Repository type metadata. +var ( + MonitorAlertProcessingRuleActionGroup_Kind = "MonitorAlertProcessingRuleActionGroup" + MonitorAlertProcessingRuleActionGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorAlertProcessingRuleActionGroup_Kind}.String() + MonitorAlertProcessingRuleActionGroup_KindAPIVersion = MonitorAlertProcessingRuleActionGroup_Kind + "." + CRDGroupVersion.String() + MonitorAlertProcessingRuleActionGroup_GroupVersionKind = CRDGroupVersion.WithKind(MonitorAlertProcessingRuleActionGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorAlertProcessingRuleActionGroup{}, &MonitorAlertProcessingRuleActionGroupList{}) +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingrulesuppression_terraformed.go b/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingrulesuppression_terraformed.go new file mode 100755 index 000000000..1167314dc --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingrulesuppression_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorAlertProcessingRuleSuppression +func (mg *MonitorAlertProcessingRuleSuppression) GetTerraformResourceType() string { + return "azurerm_monitor_alert_processing_rule_suppression" +} + +// GetConnectionDetailsMapping for this MonitorAlertProcessingRuleSuppression +func (tr *MonitorAlertProcessingRuleSuppression) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorAlertProcessingRuleSuppression +func (tr *MonitorAlertProcessingRuleSuppression) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorAlertProcessingRuleSuppression +func (tr *MonitorAlertProcessingRuleSuppression) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorAlertProcessingRuleSuppression +func (tr *MonitorAlertProcessingRuleSuppression) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorAlertProcessingRuleSuppression +func (tr *MonitorAlertProcessingRuleSuppression) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorAlertProcessingRuleSuppression +func (tr *MonitorAlertProcessingRuleSuppression) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorAlertProcessingRuleSuppression +func (tr *MonitorAlertProcessingRuleSuppression) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorAlertProcessingRuleSuppression +func (tr *MonitorAlertProcessingRuleSuppression) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorAlertProcessingRuleSuppression using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorAlertProcessingRuleSuppression) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorAlertProcessingRuleSuppressionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorAlertProcessingRuleSuppression) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingrulesuppression_types.go b/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingrulesuppression_types.go new file mode 100755 index 000000000..d5e5ad586 --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitoralertprocessingrulesuppression_types.go @@ -0,0 +1,818 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConditionAlertRuleNameInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionAlertRuleNameObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionAlertRuleNameParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionMonitorConditionInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionMonitorConditionObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionMonitorConditionParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionSignalTypeInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionSignalTypeObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionSignalTypeParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionTargetResourceGroupInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionTargetResourceGroupObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionTargetResourceGroupParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type ConditionTargetResourceInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionTargetResourceObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ConditionTargetResourceParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionInitParameters struct { + + // A alert_context block as defined above. + AlertContext *MonitorAlertProcessingRuleSuppressionConditionAlertContextInitParameters `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined above. + AlertRuleID *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDInitParameters `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A alert_rule_name block as defined above. + AlertRuleName *ConditionAlertRuleNameInitParameters `json:"alertRuleName,omitempty" tf:"alert_rule_name,omitempty"` + + // A description block as defined below. + Description *MonitorAlertProcessingRuleSuppressionConditionDescriptionInitParameters `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor_condition block as defined below. + MonitorCondition *ConditionMonitorConditionInitParameters `json:"monitorCondition,omitempty" tf:"monitor_condition,omitempty"` + + // A monitor_service block as defined below. + MonitorService *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + Severity *MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters `json:"severity,omitempty" tf:"severity,omitempty"` + + // A signal_type block as defined below. + SignalType *ConditionSignalTypeInitParameters `json:"signalType,omitempty" tf:"signal_type,omitempty"` + + // A target_resource block as defined below. + TargetResource *ConditionTargetResourceInitParameters `json:"targetResource,omitempty" tf:"target_resource,omitempty"` + + // A target_resource_group block as defined below. + TargetResourceGroup *ConditionTargetResourceGroupInitParameters `json:"targetResourceGroup,omitempty" tf:"target_resource_group,omitempty"` + + // A target_resource_type block as defined below. + TargetResourceType *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionMonitorServiceInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionObservation struct { + + // A alert_context block as defined above. + AlertContext *MonitorAlertProcessingRuleSuppressionConditionAlertContextObservation `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined above. + AlertRuleID *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDObservation `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A alert_rule_name block as defined above. + AlertRuleName *ConditionAlertRuleNameObservation `json:"alertRuleName,omitempty" tf:"alert_rule_name,omitempty"` + + // A description block as defined below. + Description *MonitorAlertProcessingRuleSuppressionConditionDescriptionObservation `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor_condition block as defined below. + MonitorCondition *ConditionMonitorConditionObservation `json:"monitorCondition,omitempty" tf:"monitor_condition,omitempty"` + + // A monitor_service block as defined below. + MonitorService *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceObservation `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + Severity *MonitorAlertProcessingRuleSuppressionConditionSeverityObservation `json:"severity,omitempty" tf:"severity,omitempty"` + + // A signal_type block as defined below. + SignalType *ConditionSignalTypeObservation `json:"signalType,omitempty" tf:"signal_type,omitempty"` + + // A target_resource block as defined below. + TargetResource *ConditionTargetResourceObservation `json:"targetResource,omitempty" tf:"target_resource,omitempty"` + + // A target_resource_group block as defined below. + TargetResourceGroup *ConditionTargetResourceGroupObservation `json:"targetResourceGroup,omitempty" tf:"target_resource_group,omitempty"` + + // A target_resource_type block as defined below. + TargetResourceType *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionParameters struct { + + // A alert_context block as defined above. + // +kubebuilder:validation:Optional + AlertContext *MonitorAlertProcessingRuleSuppressionConditionAlertContextParameters `json:"alertContext,omitempty" tf:"alert_context,omitempty"` + + // A alert_rule_id block as defined above. + // +kubebuilder:validation:Optional + AlertRuleID *MonitorAlertProcessingRuleSuppressionConditionAlertRuleIDParameters `json:"alertRuleId,omitempty" tf:"alert_rule_id,omitempty"` + + // A alert_rule_name block as defined above. + // +kubebuilder:validation:Optional + AlertRuleName *ConditionAlertRuleNameParameters `json:"alertRuleName,omitempty" tf:"alert_rule_name,omitempty"` + + // A description block as defined below. + // +kubebuilder:validation:Optional + Description *MonitorAlertProcessingRuleSuppressionConditionDescriptionParameters `json:"description,omitempty" tf:"description,omitempty"` + + // A monitor_condition block as defined below. + // +kubebuilder:validation:Optional + MonitorCondition *ConditionMonitorConditionParameters `json:"monitorCondition,omitempty" tf:"monitor_condition,omitempty"` + + // A monitor_service block as defined below. + // +kubebuilder:validation:Optional + MonitorService *MonitorAlertProcessingRuleSuppressionConditionMonitorServiceParameters `json:"monitorService,omitempty" tf:"monitor_service,omitempty"` + + // A severity block as defined below. + // +kubebuilder:validation:Optional + Severity *MonitorAlertProcessingRuleSuppressionConditionSeverityParameters `json:"severity,omitempty" tf:"severity,omitempty"` + + // A signal_type block as defined below. + // +kubebuilder:validation:Optional + SignalType *ConditionSignalTypeParameters `json:"signalType,omitempty" tf:"signal_type,omitempty"` + + // A target_resource block as defined below. + // +kubebuilder:validation:Optional + TargetResource *ConditionTargetResourceParameters `json:"targetResource,omitempty" tf:"target_resource,omitempty"` + + // A target_resource_group block as defined below. + // +kubebuilder:validation:Optional + TargetResourceGroup *ConditionTargetResourceGroupParameters `json:"targetResourceGroup,omitempty" tf:"target_resource_group,omitempty"` + + // A target_resource_type block as defined below. + // +kubebuilder:validation:Optional + TargetResourceType *MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionSeverityInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionSeverityObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionSeverityParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeInitParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeObservation struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionConditionTargetResourceTypeParameters struct { + + // The operator for a given condition. Possible values are Equals, NotEquals, Contains, and DoesNotContain. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of values to match for a given condition. The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionInitParameters struct { + + // A condition block as defined below. + Condition *MonitorAlertProcessingRuleSuppressionConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Alert Processing Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the Alert Processing Rule be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A schedule block as defined below. + Schedule *MonitorAlertProcessingRuleSuppressionScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A list of resource IDs which will be the target of Alert Processing Rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Alert Processing Rule. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionObservation struct { + + // A condition block as defined below. + Condition *MonitorAlertProcessingRuleSuppressionConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Alert Processing Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the Alert Processing Rule be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Alert Processing Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Resource Group where the Alert Processing Rule should exist. Changing this forces a new Alert Processing Rule to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A schedule block as defined below. + Schedule *MonitorAlertProcessingRuleSuppressionScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A list of resource IDs which will be the target of Alert Processing Rule. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // A mapping of tags which should be assigned to the Alert Processing Rule. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionParameters struct { + + // A condition block as defined below. + // +kubebuilder:validation:Optional + Condition *MonitorAlertProcessingRuleSuppressionConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // Specifies a description for the Alert Processing Rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the Alert Processing Rule be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the Resource Group where the Alert Processing Rule should exist. Changing this forces a new Alert Processing Rule to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A schedule block as defined below. + // +kubebuilder:validation:Optional + Schedule *MonitorAlertProcessingRuleSuppressionScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A list of resource IDs which will be the target of Alert Processing Rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Alert Processing Rule. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionScheduleInitParameters struct { + + // Specifies the Alert Processing Rule effective start time (Y-m-d'T'H:M:S). + EffectiveFrom *string `json:"effectiveFrom,omitempty" tf:"effective_from,omitempty"` + + // Specifies the Alert Processing Rule effective end time (Y-m-d'T'H:M:S). + EffectiveUntil *string `json:"effectiveUntil,omitempty" tf:"effective_until,omitempty"` + + // A recurrence block as defined above. + Recurrence *ScheduleRecurrenceInitParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // The time zone (e.g. Pacific Standard time, Eastern Standard Time). Defaults to UTC. possible values are defined here. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionScheduleObservation struct { + + // Specifies the Alert Processing Rule effective start time (Y-m-d'T'H:M:S). + EffectiveFrom *string `json:"effectiveFrom,omitempty" tf:"effective_from,omitempty"` + + // Specifies the Alert Processing Rule effective end time (Y-m-d'T'H:M:S). + EffectiveUntil *string `json:"effectiveUntil,omitempty" tf:"effective_until,omitempty"` + + // A recurrence block as defined above. + Recurrence *ScheduleRecurrenceObservation `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // The time zone (e.g. Pacific Standard time, Eastern Standard Time). Defaults to UTC. possible values are defined here. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type MonitorAlertProcessingRuleSuppressionScheduleParameters struct { + + // Specifies the Alert Processing Rule effective start time (Y-m-d'T'H:M:S). + // +kubebuilder:validation:Optional + EffectiveFrom *string `json:"effectiveFrom,omitempty" tf:"effective_from,omitempty"` + + // Specifies the Alert Processing Rule effective end time (Y-m-d'T'H:M:S). + // +kubebuilder:validation:Optional + EffectiveUntil *string `json:"effectiveUntil,omitempty" tf:"effective_until,omitempty"` + + // A recurrence block as defined above. + // +kubebuilder:validation:Optional + Recurrence *ScheduleRecurrenceParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // The time zone (e.g. Pacific Standard time, Eastern Standard Time). Defaults to UTC. possible values are defined here. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type RecurrenceDailyInitParameters struct { + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type RecurrenceDailyObservation struct { + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type RecurrenceDailyParameters struct { + + // Specifies the recurrence end time (H:M:S). + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime" tf:"start_time,omitempty"` +} + +type RecurrenceMonthlyInitParameters struct { + + // Specifies a list of dayOfMonth to recurrence. Possible values are integers between 1 - 31. + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type RecurrenceMonthlyObservation struct { + + // Specifies a list of dayOfMonth to recurrence. Possible values are integers between 1 - 31. + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type RecurrenceMonthlyParameters struct { + + // Specifies a list of dayOfMonth to recurrence. Possible values are integers between 1 - 31. + // +kubebuilder:validation:Optional + DaysOfMonth []*float64 `json:"daysOfMonth" tf:"days_of_month,omitempty"` + + // Specifies the recurrence end time (H:M:S). + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type RecurrenceWeeklyInitParameters struct { + + // Specifies a list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, and Saturday. + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type RecurrenceWeeklyObservation struct { + + // Specifies a list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, and Saturday. + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Specifies the recurrence end time (H:M:S). + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type RecurrenceWeeklyParameters struct { + + // Specifies a list of dayOfWeek to recurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, and Saturday. + // +kubebuilder:validation:Optional + DaysOfWeek []*string `json:"daysOfWeek" tf:"days_of_week,omitempty"` + + // Specifies the recurrence end time (H:M:S). + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // Specifies the recurrence start time (H:M:S). + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type ScheduleRecurrenceInitParameters struct { + + // One or more daily blocks as defined above. + Daily []RecurrenceDailyInitParameters `json:"daily,omitempty" tf:"daily,omitempty"` + + // One or more monthly blocks as defined above. + Monthly []RecurrenceMonthlyInitParameters `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // One or more weekly blocks as defined below. + Weekly []RecurrenceWeeklyInitParameters `json:"weekly,omitempty" tf:"weekly,omitempty"` +} + +type ScheduleRecurrenceObservation struct { + + // One or more daily blocks as defined above. + Daily []RecurrenceDailyObservation `json:"daily,omitempty" tf:"daily,omitempty"` + + // One or more monthly blocks as defined above. + Monthly []RecurrenceMonthlyObservation `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // One or more weekly blocks as defined below. + Weekly []RecurrenceWeeklyObservation `json:"weekly,omitempty" tf:"weekly,omitempty"` +} + +type ScheduleRecurrenceParameters struct { + + // One or more daily blocks as defined above. + // +kubebuilder:validation:Optional + Daily []RecurrenceDailyParameters `json:"daily,omitempty" tf:"daily,omitempty"` + + // One or more monthly blocks as defined above. + // +kubebuilder:validation:Optional + Monthly []RecurrenceMonthlyParameters `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // One or more weekly blocks as defined below. + // +kubebuilder:validation:Optional + Weekly []RecurrenceWeeklyParameters `json:"weekly,omitempty" tf:"weekly,omitempty"` +} + +// MonitorAlertProcessingRuleSuppressionSpec defines the desired state of MonitorAlertProcessingRuleSuppression +type MonitorAlertProcessingRuleSuppressionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorAlertProcessingRuleSuppressionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorAlertProcessingRuleSuppressionInitParameters `json:"initProvider,omitempty"` +} + +// MonitorAlertProcessingRuleSuppressionStatus defines the observed state of MonitorAlertProcessingRuleSuppression. +type MonitorAlertProcessingRuleSuppressionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorAlertProcessingRuleSuppressionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorAlertProcessingRuleSuppression is the Schema for the MonitorAlertProcessingRuleSuppressions API. Manages an Alert Processing Rule which suppress notifications. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorAlertProcessingRuleSuppression struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MonitorAlertProcessingRuleSuppressionSpec `json:"spec"` + Status MonitorAlertProcessingRuleSuppressionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorAlertProcessingRuleSuppressionList contains a list of MonitorAlertProcessingRuleSuppressions +type MonitorAlertProcessingRuleSuppressionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorAlertProcessingRuleSuppression `json:"items"` +} + +// Repository type metadata. +var ( + MonitorAlertProcessingRuleSuppression_Kind = "MonitorAlertProcessingRuleSuppression" + MonitorAlertProcessingRuleSuppression_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorAlertProcessingRuleSuppression_Kind}.String() + MonitorAlertProcessingRuleSuppression_KindAPIVersion = MonitorAlertProcessingRuleSuppression_Kind + "." + CRDGroupVersion.String() + MonitorAlertProcessingRuleSuppression_GroupVersionKind = CRDGroupVersion.WithKind(MonitorAlertProcessingRuleSuppression_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorAlertProcessingRuleSuppression{}, &MonitorAlertProcessingRuleSuppressionList{}) +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitorsmartdetectoralertrule_terraformed.go b/apis/alertsmanagement/v1beta2/zz_monitorsmartdetectoralertrule_terraformed.go new file mode 100755 index 000000000..cb7a2721c --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitorsmartdetectoralertrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorSmartDetectorAlertRule +func (mg *MonitorSmartDetectorAlertRule) GetTerraformResourceType() string { + return "azurerm_monitor_smart_detector_alert_rule" +} + +// GetConnectionDetailsMapping for this MonitorSmartDetectorAlertRule +func (tr *MonitorSmartDetectorAlertRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorSmartDetectorAlertRule +func (tr *MonitorSmartDetectorAlertRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorSmartDetectorAlertRule +func (tr *MonitorSmartDetectorAlertRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorSmartDetectorAlertRule +func (tr *MonitorSmartDetectorAlertRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorSmartDetectorAlertRule +func (tr *MonitorSmartDetectorAlertRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorSmartDetectorAlertRule +func (tr *MonitorSmartDetectorAlertRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorSmartDetectorAlertRule +func (tr *MonitorSmartDetectorAlertRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorSmartDetectorAlertRule +func (tr *MonitorSmartDetectorAlertRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorSmartDetectorAlertRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorSmartDetectorAlertRule) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorSmartDetectorAlertRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorSmartDetectorAlertRule) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/alertsmanagement/v1beta2/zz_monitorsmartdetectoralertrule_types.go b/apis/alertsmanagement/v1beta2/zz_monitorsmartdetectoralertrule_types.go new file mode 100755 index 000000000..bdf8734b9 --- /dev/null +++ b/apis/alertsmanagement/v1beta2/zz_monitorsmartdetectoralertrule_types.go @@ -0,0 +1,307 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionGroupInitParameters struct { + + // Specifies a custom email subject if Email Receiver is specified in Monitor Action Group resource. + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Specifies the action group ids. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +listType=set + Ids []*string `json:"ids,omitempty" tf:"ids,omitempty"` + + // References to MonitorActionGroup in insights to populate ids. + // +kubebuilder:validation:Optional + IdsRefs []v1.Reference `json:"idsRefs,omitempty" tf:"-"` + + // Selector for a list of MonitorActionGroup in insights to populate ids. + // +kubebuilder:validation:Optional + IdsSelector *v1.Selector `json:"idsSelector,omitempty" tf:"-"` + + // A JSON String which Specifies the custom webhook payload if Webhook Receiver is specified in Monitor Action Group resource. + WebhookPayload *string `json:"webhookPayload,omitempty" tf:"webhook_payload,omitempty"` +} + +type ActionGroupObservation struct { + + // Specifies a custom email subject if Email Receiver is specified in Monitor Action Group resource. + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Specifies the action group ids. + // +listType=set + Ids []*string `json:"ids,omitempty" tf:"ids,omitempty"` + + // A JSON String which Specifies the custom webhook payload if Webhook Receiver is specified in Monitor Action Group resource. + WebhookPayload *string `json:"webhookPayload,omitempty" tf:"webhook_payload,omitempty"` +} + +type ActionGroupParameters struct { + + // Specifies a custom email subject if Email Receiver is specified in Monitor Action Group resource. + // +kubebuilder:validation:Optional + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` + + // Specifies the action group ids. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + Ids []*string `json:"ids,omitempty" tf:"ids,omitempty"` + + // References to MonitorActionGroup in insights to populate ids. + // +kubebuilder:validation:Optional + IdsRefs []v1.Reference `json:"idsRefs,omitempty" tf:"-"` + + // Selector for a list of MonitorActionGroup in insights to populate ids. + // +kubebuilder:validation:Optional + IdsSelector *v1.Selector `json:"idsSelector,omitempty" tf:"-"` + + // A JSON String which Specifies the custom webhook payload if Webhook Receiver is specified in Monitor Action Group resource. + // +kubebuilder:validation:Optional + WebhookPayload *string `json:"webhookPayload,omitempty" tf:"webhook_payload,omitempty"` +} + +type MonitorSmartDetectorAlertRuleInitParameters struct { + + // An action_group block as defined below. + ActionGroup *ActionGroupInitParameters `json:"actionGroup,omitempty" tf:"action_group,omitempty"` + + // Specifies a description for the Smart Detector Alert Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the Built-In Smart Detector type that this alert rule will use. Currently the only possible values are FailureAnomaliesDetector, RequestPerformanceDegradationDetector, DependencyPerformanceDegradationDetector, ExceptionVolumeChangedDetector, TraceSeverityDetector, MemoryLeakDetector. + DetectorType *string `json:"detectorType,omitempty" tf:"detector_type,omitempty"` + + // Is the Smart Detector Alert Rule enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the frequency of this Smart Detector Alert Rule in ISO8601 format. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies the name of the Monitor Smart Detector Alert Rule. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the resource group in which the Monitor Smart Detector Alert Rule should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the scopes of this Smart Detector Alert Rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +listType=set + ScopeResourceIds []*string `json:"scopeResourceIds,omitempty" tf:"scope_resource_ids,omitempty"` + + // References to ApplicationInsights in insights to populate scopeResourceIds. + // +kubebuilder:validation:Optional + ScopeResourceIdsRefs []v1.Reference `json:"scopeResourceIdsRefs,omitempty" tf:"-"` + + // Selector for a list of ApplicationInsights in insights to populate scopeResourceIds. + // +kubebuilder:validation:Optional + ScopeResourceIdsSelector *v1.Selector `json:"scopeResourceIdsSelector,omitempty" tf:"-"` + + // Specifies the severity of this Smart Detector Alert Rule. Possible values are Sev0, Sev1, Sev2, Sev3 or Sev4. + Severity *string `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the duration (in ISO8601 format) to wait before notifying on the alert rule again. + ThrottlingDuration *string `json:"throttlingDuration,omitempty" tf:"throttling_duration,omitempty"` +} + +type MonitorSmartDetectorAlertRuleObservation struct { + + // An action_group block as defined below. + ActionGroup *ActionGroupObservation `json:"actionGroup,omitempty" tf:"action_group,omitempty"` + + // Specifies a description for the Smart Detector Alert Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the Built-In Smart Detector type that this alert rule will use. Currently the only possible values are FailureAnomaliesDetector, RequestPerformanceDegradationDetector, DependencyPerformanceDegradationDetector, ExceptionVolumeChangedDetector, TraceSeverityDetector, MemoryLeakDetector. + DetectorType *string `json:"detectorType,omitempty" tf:"detector_type,omitempty"` + + // Is the Smart Detector Alert Rule enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the frequency of this Smart Detector Alert Rule in ISO8601 format. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The ID of the Monitor Smart Detector Alert Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the Monitor Smart Detector Alert Rule. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the resource group in which the Monitor Smart Detector Alert Rule should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the scopes of this Smart Detector Alert Rule. + // +listType=set + ScopeResourceIds []*string `json:"scopeResourceIds,omitempty" tf:"scope_resource_ids,omitempty"` + + // Specifies the severity of this Smart Detector Alert Rule. Possible values are Sev0, Sev1, Sev2, Sev3 or Sev4. + Severity *string `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the duration (in ISO8601 format) to wait before notifying on the alert rule again. + ThrottlingDuration *string `json:"throttlingDuration,omitempty" tf:"throttling_duration,omitempty"` +} + +type MonitorSmartDetectorAlertRuleParameters struct { + + // An action_group block as defined below. + // +kubebuilder:validation:Optional + ActionGroup *ActionGroupParameters `json:"actionGroup,omitempty" tf:"action_group,omitempty"` + + // Specifies a description for the Smart Detector Alert Rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the Built-In Smart Detector type that this alert rule will use. Currently the only possible values are FailureAnomaliesDetector, RequestPerformanceDegradationDetector, DependencyPerformanceDegradationDetector, ExceptionVolumeChangedDetector, TraceSeverityDetector, MemoryLeakDetector. + // +kubebuilder:validation:Optional + DetectorType *string `json:"detectorType,omitempty" tf:"detector_type,omitempty"` + + // Is the Smart Detector Alert Rule enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the frequency of this Smart Detector Alert Rule in ISO8601 format. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies the name of the Monitor Smart Detector Alert Rule. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the resource group in which the Monitor Smart Detector Alert Rule should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the scopes of this Smart Detector Alert Rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + ScopeResourceIds []*string `json:"scopeResourceIds,omitempty" tf:"scope_resource_ids,omitempty"` + + // References to ApplicationInsights in insights to populate scopeResourceIds. + // +kubebuilder:validation:Optional + ScopeResourceIdsRefs []v1.Reference `json:"scopeResourceIdsRefs,omitempty" tf:"-"` + + // Selector for a list of ApplicationInsights in insights to populate scopeResourceIds. + // +kubebuilder:validation:Optional + ScopeResourceIdsSelector *v1.Selector `json:"scopeResourceIdsSelector,omitempty" tf:"-"` + + // Specifies the severity of this Smart Detector Alert Rule. Possible values are Sev0, Sev1, Sev2, Sev3 or Sev4. + // +kubebuilder:validation:Optional + Severity *string `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the duration (in ISO8601 format) to wait before notifying on the alert rule again. + // +kubebuilder:validation:Optional + ThrottlingDuration *string `json:"throttlingDuration,omitempty" tf:"throttling_duration,omitempty"` +} + +// MonitorSmartDetectorAlertRuleSpec defines the desired state of MonitorSmartDetectorAlertRule +type MonitorSmartDetectorAlertRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorSmartDetectorAlertRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorSmartDetectorAlertRuleInitParameters `json:"initProvider,omitempty"` +} + +// MonitorSmartDetectorAlertRuleStatus defines the observed state of MonitorSmartDetectorAlertRule. +type MonitorSmartDetectorAlertRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorSmartDetectorAlertRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorSmartDetectorAlertRule is the Schema for the MonitorSmartDetectorAlertRules API. Manages an Monitor Smart Detector Alert Rule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorSmartDetectorAlertRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.actionGroup) || (has(self.initProvider) && has(self.initProvider.actionGroup))",message="spec.forProvider.actionGroup is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.detectorType) || (has(self.initProvider) && has(self.initProvider.detectorType))",message="spec.forProvider.detectorType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.frequency) || (has(self.initProvider) && has(self.initProvider.frequency))",message="spec.forProvider.frequency is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.severity) || (has(self.initProvider) && has(self.initProvider.severity))",message="spec.forProvider.severity is a required parameter" + Spec MonitorSmartDetectorAlertRuleSpec `json:"spec"` + Status MonitorSmartDetectorAlertRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorSmartDetectorAlertRuleList contains a list of MonitorSmartDetectorAlertRules +type MonitorSmartDetectorAlertRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorSmartDetectorAlertRule `json:"items"` +} + +// Repository type metadata. +var ( + MonitorSmartDetectorAlertRule_Kind = "MonitorSmartDetectorAlertRule" + MonitorSmartDetectorAlertRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorSmartDetectorAlertRule_Kind}.String() + MonitorSmartDetectorAlertRule_KindAPIVersion = MonitorSmartDetectorAlertRule_Kind + "." + CRDGroupVersion.String() + MonitorSmartDetectorAlertRule_GroupVersionKind = CRDGroupVersion.WithKind(MonitorSmartDetectorAlertRule_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorSmartDetectorAlertRule{}, &MonitorSmartDetectorAlertRuleList{}) +} diff --git a/apis/apimanagement/v1beta1/zz_apioperationpolicy_types.go b/apis/apimanagement/v1beta1/zz_apioperationpolicy_types.go index 6f013f6b0..b529df36b 100755 --- a/apis/apimanagement/v1beta1/zz_apioperationpolicy_types.go +++ b/apis/apimanagement/v1beta1/zz_apioperationpolicy_types.go @@ -49,7 +49,7 @@ type APIOperationPolicyObservation struct { type APIOperationPolicyParameters struct { // The name of the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.APIOperation + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.APIOperation // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("api_management_name",false) // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` @@ -63,7 +63,7 @@ type APIOperationPolicyParameters struct { APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` // The name of the API within the API Management Service where the Operation exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.APIOperation + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.APIOperation // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("api_name",false) // +kubebuilder:validation:Optional APIName *string `json:"apiName,omitempty" tf:"api_name,omitempty"` @@ -77,7 +77,7 @@ type APIOperationPolicyParameters struct { APINameSelector *v1.Selector `json:"apiNameSelector,omitempty" tf:"-"` // The operation identifier within an API. Must be unique in the current API Management service instance. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.APIOperation + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.APIOperation // +kubebuilder:validation:Optional OperationID *string `json:"operationId,omitempty" tf:"operation_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_apioperationtag_types.go b/apis/apimanagement/v1beta1/zz_apioperationtag_types.go index 16170cc69..a091728cb 100755 --- a/apis/apimanagement/v1beta1/zz_apioperationtag_types.go +++ b/apis/apimanagement/v1beta1/zz_apioperationtag_types.go @@ -34,7 +34,7 @@ type APIOperationTagObservation struct { type APIOperationTagParameters struct { // The ID of the API Management API Operation. Changing this forces a new API Management API Operation Tag to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.APIOperation + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.APIOperation // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIOperationID *string `json:"apiOperationId,omitempty" tf:"api_operation_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_apipolicy_types.go b/apis/apimanagement/v1beta1/zz_apipolicy_types.go index af21843d7..faf101daa 100755 --- a/apis/apimanagement/v1beta1/zz_apipolicy_types.go +++ b/apis/apimanagement/v1beta1/zz_apipolicy_types.go @@ -46,7 +46,7 @@ type APIPolicyObservation struct { type APIPolicyParameters struct { // The name of the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` @@ -59,7 +59,7 @@ type APIPolicyParameters struct { APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` // The ID of the API Management API within the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API // +kubebuilder:validation:Optional APIName *string `json:"apiName,omitempty" tf:"api_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_apirelease_types.go b/apis/apimanagement/v1beta1/zz_apirelease_types.go index 1516ac559..d063d01c3 100755 --- a/apis/apimanagement/v1beta1/zz_apirelease_types.go +++ b/apis/apimanagement/v1beta1/zz_apirelease_types.go @@ -34,7 +34,7 @@ type APIReleaseObservation struct { type APIReleaseParameters struct { // The ID of the API Management API. Changing this forces a new API Management API Release to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_apischema_types.go b/apis/apimanagement/v1beta1/zz_apischema_types.go index b9a8d324c..f5876b5dd 100755 --- a/apis/apimanagement/v1beta1/zz_apischema_types.go +++ b/apis/apimanagement/v1beta1/zz_apischema_types.go @@ -58,7 +58,7 @@ type APISchemaObservation struct { type APISchemaParameters struct { // The Name of the API Management Service where the API exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` @@ -71,7 +71,7 @@ type APISchemaParameters struct { APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` // The name of the API within the API Management Service where this API Schema should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API // +kubebuilder:validation:Optional APIName *string `json:"apiName,omitempty" tf:"api_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_apitag_types.go b/apis/apimanagement/v1beta1/zz_apitag_types.go index c9b9e8bc2..79e7c36a0 100755 --- a/apis/apimanagement/v1beta1/zz_apitag_types.go +++ b/apis/apimanagement/v1beta1/zz_apitag_types.go @@ -28,7 +28,7 @@ type APITagObservation struct { type APITagParameters struct { // The ID of the API Management API. Changing this forces a new API Management API Tag to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_apiversionset_types.go b/apis/apimanagement/v1beta1/zz_apiversionset_types.go index 610770d7e..b0970959f 100755 --- a/apis/apimanagement/v1beta1/zz_apiversionset_types.go +++ b/apis/apimanagement/v1beta1/zz_apiversionset_types.go @@ -61,7 +61,7 @@ type APIVersionSetObservation struct { type APIVersionSetParameters struct { // The name of the API Management Service in which the API Version Set should exist. May only contain alphanumeric characters and dashes up to 50 characters in length. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_authorizationserver_types.go b/apis/apimanagement/v1beta1/zz_authorizationserver_types.go index dddd58141..02de11767 100755 --- a/apis/apimanagement/v1beta1/zz_authorizationserver_types.go +++ b/apis/apimanagement/v1beta1/zz_authorizationserver_types.go @@ -123,7 +123,7 @@ type AuthorizationServerObservation struct { type AuthorizationServerParameters struct { // The name of the API Management Service in which this Authorization Server should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_certificate_types.go b/apis/apimanagement/v1beta1/zz_certificate_types.go index fffad6dfc..968810d5a 100755 --- a/apis/apimanagement/v1beta1/zz_certificate_types.go +++ b/apis/apimanagement/v1beta1/zz_certificate_types.go @@ -19,7 +19,7 @@ type CertificateInitParameters_2 struct { KeyVaultIdentityClientID *string `json:"keyVaultIdentityClientId,omitempty" tf:"key_vault_identity_client_id,omitempty"` // The ID of the Key Vault Secret containing the SSL Certificate, which must be of the type application/x-pkcs12. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("secret_id",true) KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` @@ -62,7 +62,7 @@ type CertificateObservation_2 struct { type CertificateParameters_2 struct { // The Name of the API Management Service where this Service should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` @@ -83,7 +83,7 @@ type CertificateParameters_2 struct { KeyVaultIdentityClientID *string `json:"keyVaultIdentityClientId,omitempty" tf:"key_vault_identity_client_id,omitempty"` // The ID of the Key Vault Secret containing the SSL Certificate, which must be of the type application/x-pkcs12. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("secret_id",true) // +kubebuilder:validation:Optional KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_customdomain_types.go b/apis/apimanagement/v1beta1/zz_customdomain_types.go index 8c6b2afd7..7539c1e6c 100755 --- a/apis/apimanagement/v1beta1/zz_customdomain_types.go +++ b/apis/apimanagement/v1beta1/zz_customdomain_types.go @@ -19,7 +19,7 @@ type CustomDomainDeveloperPortalInitParameters struct { HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("versionless_secret_id",true) KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -77,7 +77,7 @@ type CustomDomainDeveloperPortalParameters struct { HostName *string `json:"hostName" tf:"host_name,omitempty"` // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("versionless_secret_id",true) // +kubebuilder:validation:Optional KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -210,7 +210,7 @@ type CustomDomainObservation struct { type CustomDomainParameters struct { // The ID of the API Management service for which to configure Custom Domains. Changing this forces a new API Management Custom Domain resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` @@ -385,7 +385,7 @@ type GatewayInitParameters struct { HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("versionless_secret_id",true) KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -450,7 +450,7 @@ type GatewayParameters struct { HostName *string `json:"hostName" tf:"host_name,omitempty"` // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("versionless_secret_id",true) // +kubebuilder:validation:Optional KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_emailtemplate_types.go b/apis/apimanagement/v1beta1/zz_emailtemplate_types.go index e9ed18b2f..ac9dd16b6 100755 --- a/apis/apimanagement/v1beta1/zz_emailtemplate_types.go +++ b/apis/apimanagement/v1beta1/zz_emailtemplate_types.go @@ -52,7 +52,7 @@ type EmailTemplateObservation struct { type EmailTemplateParameters struct { // The name of the API Management Service in which the Email Template should exist. Changing this forces a new API Management Email Template to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_gatewayapi_types.go b/apis/apimanagement/v1beta1/zz_gatewayapi_types.go index 76c445d90..f97fb5451 100755 --- a/apis/apimanagement/v1beta1/zz_gatewayapi_types.go +++ b/apis/apimanagement/v1beta1/zz_gatewayapi_types.go @@ -16,7 +16,7 @@ import ( type GatewayAPIInitParameters struct { // The Identifier of the API Management API within the API Management Service. Changing this forces a new API Management Gateway API to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` @@ -29,7 +29,7 @@ type GatewayAPIInitParameters struct { APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` // The Identifier for the API Management Gateway. Changing this forces a new API Management Gateway API to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Gateway + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Gateway // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() GatewayID *string `json:"gatewayId,omitempty" tf:"gateway_id,omitempty"` @@ -57,7 +57,7 @@ type GatewayAPIObservation struct { type GatewayAPIParameters struct { // The Identifier of the API Management API within the API Management Service. Changing this forces a new API Management Gateway API to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` @@ -71,7 +71,7 @@ type GatewayAPIParameters struct { APIIDSelector *v1.Selector `json:"apiIdSelector,omitempty" tf:"-"` // The Identifier for the API Management Gateway. Changing this forces a new API Management Gateway API to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Gateway + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Gateway // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional GatewayID *string `json:"gatewayId,omitempty" tf:"gateway_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_generated.conversion_hubs.go b/apis/apimanagement/v1beta1/zz_generated.conversion_hubs.go index f893ce69b..b385a9ef0 100755 --- a/apis/apimanagement/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/apimanagement/v1beta1/zz_generated.conversion_hubs.go @@ -6,18 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Management) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *API) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *APIDiagnostic) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *APIOperation) Hub() {} - // Hub marks this type as a conversion hub. func (tr *APIOperationPolicy) Hub() {} @@ -42,24 +30,15 @@ func (tr *APIVersionSet) Hub() {} // Hub marks this type as a conversion hub. func (tr *AuthorizationServer) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Backend) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Certificate) Hub() {} // Hub marks this type as a conversion hub. func (tr *CustomDomain) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Diagnostic) Hub() {} - // Hub marks this type as a conversion hub. func (tr *EmailTemplate) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Gateway) Hub() {} - // Hub marks this type as a conversion hub. func (tr *GatewayAPI) Hub() {} @@ -81,12 +60,6 @@ func (tr *IdentityProviderMicrosoft) Hub() {} // Hub marks this type as a conversion hub. func (tr *IdentityProviderTwitter) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Logger) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *NamedValue) Hub() {} - // Hub marks this type as a conversion hub. func (tr *NotificationRecipientEmail) Hub() {} diff --git a/apis/apimanagement/v1beta1/zz_generated.conversion_spokes.go b/apis/apimanagement/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..9beee7955 --- /dev/null +++ b/apis/apimanagement/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this API to the hub type. +func (tr *API) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the API type. +func (tr *API) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this APIDiagnostic to the hub type. +func (tr *APIDiagnostic) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the APIDiagnostic type. +func (tr *APIDiagnostic) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this APIOperation to the hub type. +func (tr *APIOperation) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the APIOperation type. +func (tr *APIOperation) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Backend to the hub type. +func (tr *Backend) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Backend type. +func (tr *Backend) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Diagnostic to the hub type. +func (tr *Diagnostic) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Diagnostic type. +func (tr *Diagnostic) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Gateway to the hub type. +func (tr *Gateway) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Gateway type. +func (tr *Gateway) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Logger to the hub type. +func (tr *Logger) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Logger type. +func (tr *Logger) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Management to the hub type. +func (tr *Management) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Management type. +func (tr *Management) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this NamedValue to the hub type. +func (tr *NamedValue) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the NamedValue type. +func (tr *NamedValue) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/apimanagement/v1beta1/zz_generated.resolvers.go b/apis/apimanagement/v1beta1/zz_generated.resolvers.go index 7a935817c..1f52875f4 100644 --- a/apis/apimanagement/v1beta1/zz_generated.resolvers.go +++ b/apis/apimanagement/v1beta1/zz_generated.resolvers.go @@ -254,7 +254,7 @@ func (mg *APIOperationPolicy) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "APIOperation", "APIOperationList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "APIOperation", "APIOperationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -273,7 +273,7 @@ func (mg *APIOperationPolicy) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "APIOperation", "APIOperationList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "APIOperation", "APIOperationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -292,7 +292,7 @@ func (mg *APIOperationPolicy) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.APIName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APINameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "APIOperation", "APIOperationList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "APIOperation", "APIOperationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -342,7 +342,7 @@ func (mg *APIOperationTag) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "APIOperation", "APIOperationList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "APIOperation", "APIOperationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -373,7 +373,7 @@ func (mg *APIPolicy) ResolveReferences(ctx context.Context, c client.Reader) err var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -392,7 +392,7 @@ func (mg *APIPolicy) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -442,7 +442,7 @@ func (mg *APIRelease) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -473,7 +473,7 @@ func (mg *APISchema) ResolveReferences(ctx context.Context, c client.Reader) err var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -492,7 +492,7 @@ func (mg *APISchema) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -542,7 +542,7 @@ func (mg *APITag) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -573,7 +573,7 @@ func (mg *APIVersionSet) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -623,7 +623,7 @@ func (mg *AuthorizationServer) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -723,7 +723,7 @@ func (mg *Certificate) ResolveReferences(ctx context.Context, c client.Reader) e var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -742,7 +742,7 @@ func (mg *Certificate) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -780,7 +780,7 @@ func (mg *Certificate) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -811,7 +811,7 @@ func (mg *CustomDomain) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -832,7 +832,7 @@ func (mg *CustomDomain) ResolveReferences(ctx context.Context, c client.Reader) for i3 := 0; i3 < len(mg.Spec.ForProvider.DeveloperPortal); i3++ { { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -853,7 +853,7 @@ func (mg *CustomDomain) ResolveReferences(ctx context.Context, c client.Reader) } for i3 := 0; i3 < len(mg.Spec.ForProvider.Gateway); i3++ { { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -874,7 +874,7 @@ func (mg *CustomDomain) ResolveReferences(ctx context.Context, c client.Reader) } for i3 := 0; i3 < len(mg.Spec.InitProvider.DeveloperPortal); i3++ { { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -895,7 +895,7 @@ func (mg *CustomDomain) ResolveReferences(ctx context.Context, c client.Reader) } for i3 := 0; i3 < len(mg.Spec.InitProvider.Gateway); i3++ { { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1015,7 +1015,7 @@ func (mg *EmailTemplate) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1115,7 +1115,7 @@ func (mg *GatewayAPI) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1134,7 +1134,7 @@ func (mg *GatewayAPI) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Gateway", "GatewayList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Gateway", "GatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1153,7 +1153,7 @@ func (mg *GatewayAPI) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.GatewayID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.GatewayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1172,7 +1172,7 @@ func (mg *GatewayAPI) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.InitProvider.APIID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.APIIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Gateway", "GatewayList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Gateway", "GatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1203,7 +1203,7 @@ func (mg *GlobalSchema) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1253,7 +1253,7 @@ func (mg *IdentityProviderAAD) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1303,7 +1303,7 @@ func (mg *IdentityProviderFacebook) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1341,7 +1341,7 @@ func (mg *IdentityProviderFacebook) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1391,7 +1391,7 @@ func (mg *IdentityProviderGoogle) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1429,7 +1429,7 @@ func (mg *IdentityProviderGoogle) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1479,7 +1479,7 @@ func (mg *IdentityProviderMicrosoft) ResolveReferences(ctx context.Context, c cl var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1517,7 +1517,7 @@ func (mg *IdentityProviderMicrosoft) ResolveReferences(ctx context.Context, c cl mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1567,7 +1567,7 @@ func (mg *IdentityProviderTwitter) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1605,7 +1605,7 @@ func (mg *IdentityProviderTwitter) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1913,7 +1913,7 @@ func (mg *NotificationRecipientEmail) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1944,7 +1944,7 @@ func (mg *NotificationRecipientUser) ResolveReferences(ctx context.Context, c cl var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1982,7 +1982,7 @@ func (mg *NotificationRecipientUser) ResolveReferences(ctx context.Context, c cl mg.Spec.ForProvider.UserID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.UserIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2032,7 +2032,7 @@ func (mg *OpenIDConnectProvider) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2082,7 +2082,7 @@ func (mg *Policy) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2113,7 +2113,7 @@ func (mg *Product) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2163,7 +2163,7 @@ func (mg *ProductAPI) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2182,7 +2182,7 @@ func (mg *ProductAPI) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "API", "APIList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2251,7 +2251,7 @@ func (mg *ProductPolicy) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2320,7 +2320,7 @@ func (mg *ProductTag) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2396,7 +2396,7 @@ func (mg *ProductTag) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2484,7 +2484,7 @@ func (mg *RedisCache) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2503,7 +2503,7 @@ func (mg *RedisCache) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.APIManagementID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.APIManagementIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2522,7 +2522,7 @@ func (mg *RedisCache) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.RedisCacheID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RedisCacheIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2553,7 +2553,7 @@ func (mg *Subscription) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2679,7 +2679,7 @@ func (mg *Tag) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2710,7 +2710,7 @@ func (mg *User) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta1", "Management", "ManagementList") + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/apimanagement/v1beta1/zz_globalschema_types.go b/apis/apimanagement/v1beta1/zz_globalschema_types.go index 68c41be63..ad2d63331 100755 --- a/apis/apimanagement/v1beta1/zz_globalschema_types.go +++ b/apis/apimanagement/v1beta1/zz_globalschema_types.go @@ -49,7 +49,7 @@ type GlobalSchemaObservation struct { type GlobalSchemaParameters struct { // The Name of the API Management Service where the API exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_identityprovideraad_types.go b/apis/apimanagement/v1beta1/zz_identityprovideraad_types.go index 1e4b6e88e..3718bfbbc 100755 --- a/apis/apimanagement/v1beta1/zz_identityprovideraad_types.go +++ b/apis/apimanagement/v1beta1/zz_identityprovideraad_types.go @@ -49,7 +49,7 @@ type IdentityProviderAADObservation struct { type IdentityProviderAADParameters struct { // The Name of the API Management Service where this AAD Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_identityproviderfacebook_types.go b/apis/apimanagement/v1beta1/zz_identityproviderfacebook_types.go index c8df2634c..dc8d6b508 100755 --- a/apis/apimanagement/v1beta1/zz_identityproviderfacebook_types.go +++ b/apis/apimanagement/v1beta1/zz_identityproviderfacebook_types.go @@ -16,7 +16,7 @@ import ( type IdentityProviderFacebookInitParameters struct { // The Name of the API Management Service where this Facebook Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` // Reference to a Management in apimanagement to populate apiManagementName. @@ -61,7 +61,7 @@ type IdentityProviderFacebookObservation struct { type IdentityProviderFacebookParameters struct { // The Name of the API Management Service where this Facebook Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_identityprovidergoogle_types.go b/apis/apimanagement/v1beta1/zz_identityprovidergoogle_types.go index 3a2dbd4eb..15067c6dd 100755 --- a/apis/apimanagement/v1beta1/zz_identityprovidergoogle_types.go +++ b/apis/apimanagement/v1beta1/zz_identityprovidergoogle_types.go @@ -16,7 +16,7 @@ import ( type IdentityProviderGoogleInitParameters struct { // The Name of the API Management Service where this Google Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` // Reference to a Management in apimanagement to populate apiManagementName. @@ -61,7 +61,7 @@ type IdentityProviderGoogleObservation struct { type IdentityProviderGoogleParameters struct { // The Name of the API Management Service where this Google Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_identityprovidermicrosoft_types.go b/apis/apimanagement/v1beta1/zz_identityprovidermicrosoft_types.go index 934c6f5ea..1e806b9bf 100755 --- a/apis/apimanagement/v1beta1/zz_identityprovidermicrosoft_types.go +++ b/apis/apimanagement/v1beta1/zz_identityprovidermicrosoft_types.go @@ -16,7 +16,7 @@ import ( type IdentityProviderMicrosoftInitParameters struct { // The Name of the API Management Service where this Microsoft Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` // Reference to a Management in apimanagement to populate apiManagementName. @@ -61,7 +61,7 @@ type IdentityProviderMicrosoftObservation struct { type IdentityProviderMicrosoftParameters struct { // The Name of the API Management Service where this Microsoft Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_identityprovidertwitter_types.go b/apis/apimanagement/v1beta1/zz_identityprovidertwitter_types.go index d1b848205..4d2d37993 100755 --- a/apis/apimanagement/v1beta1/zz_identityprovidertwitter_types.go +++ b/apis/apimanagement/v1beta1/zz_identityprovidertwitter_types.go @@ -16,7 +16,7 @@ import ( type IdentityProviderTwitterInitParameters struct { // The Name of the API Management Service where this Twitter Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` // Reference to a Management in apimanagement to populate apiManagementName. @@ -59,7 +59,7 @@ type IdentityProviderTwitterParameters struct { APIKeySecretRef v1.SecretKeySelector `json:"apiKeySecretRef" tf:"-"` // The Name of the API Management Service where this Twitter Identity Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_notificationrecipientemail_types.go b/apis/apimanagement/v1beta1/zz_notificationrecipientemail_types.go index f4cc0b0ba..ecca1b4d8 100755 --- a/apis/apimanagement/v1beta1/zz_notificationrecipientemail_types.go +++ b/apis/apimanagement/v1beta1/zz_notificationrecipientemail_types.go @@ -34,7 +34,7 @@ type NotificationRecipientEmailObservation struct { type NotificationRecipientEmailParameters struct { // The ID of the API Management Service from which to create this Notification Recipient Email. Changing this forces a new API Management Notification Recipient Email to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_notificationrecipientuser_types.go b/apis/apimanagement/v1beta1/zz_notificationrecipientuser_types.go index 8917745d4..a051345fb 100755 --- a/apis/apimanagement/v1beta1/zz_notificationrecipientuser_types.go +++ b/apis/apimanagement/v1beta1/zz_notificationrecipientuser_types.go @@ -16,7 +16,7 @@ import ( type NotificationRecipientUserInitParameters struct { // The ID of the API Management Service from which to create this Notification Recipient User. Changing this forces a new API Management Notification Recipient User to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` @@ -62,7 +62,7 @@ type NotificationRecipientUserObservation struct { type NotificationRecipientUserParameters struct { // The ID of the API Management Service from which to create this Notification Recipient User. Changing this forces a new API Management Notification Recipient User to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_openidconnectprovider_types.go b/apis/apimanagement/v1beta1/zz_openidconnectprovider_types.go index 3a209e18a..1f8afe7da 100755 --- a/apis/apimanagement/v1beta1/zz_openidconnectprovider_types.go +++ b/apis/apimanagement/v1beta1/zz_openidconnectprovider_types.go @@ -49,7 +49,7 @@ type OpenIDConnectProviderObservation struct { type OpenIDConnectProviderParameters struct { // The name of the API Management Service in which this OpenID Connect Provider should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_policy_types.go b/apis/apimanagement/v1beta1/zz_policy_types.go index f3d62f634..02e002101 100755 --- a/apis/apimanagement/v1beta1/zz_policy_types.go +++ b/apis/apimanagement/v1beta1/zz_policy_types.go @@ -40,7 +40,7 @@ type PolicyObservation_2 struct { type PolicyParameters_2 struct { // The ID of the API Management service. Changing this forces a new API Management service Policy to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_product_types.go b/apis/apimanagement/v1beta1/zz_product_types.go index 1a487ea02..aafa98c87 100755 --- a/apis/apimanagement/v1beta1/zz_product_types.go +++ b/apis/apimanagement/v1beta1/zz_product_types.go @@ -73,7 +73,7 @@ type ProductObservation struct { type ProductParameters struct { // The name of the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_productapi_types.go b/apis/apimanagement/v1beta1/zz_productapi_types.go index 32ce6e075..7b2c803a0 100755 --- a/apis/apimanagement/v1beta1/zz_productapi_types.go +++ b/apis/apimanagement/v1beta1/zz_productapi_types.go @@ -37,7 +37,7 @@ type ProductAPIObservation struct { type ProductAPIParameters struct { // The name of the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` @@ -50,7 +50,7 @@ type ProductAPIParameters struct { APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` // The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.API + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API // +kubebuilder:validation:Optional APIName *string `json:"apiName,omitempty" tf:"api_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_productpolicy_types.go b/apis/apimanagement/v1beta1/zz_productpolicy_types.go index 72d361b3c..b065c157e 100755 --- a/apis/apimanagement/v1beta1/zz_productpolicy_types.go +++ b/apis/apimanagement/v1beta1/zz_productpolicy_types.go @@ -46,7 +46,7 @@ type ProductPolicyObservation struct { type ProductPolicyParameters struct { // The name of the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_producttag_types.go b/apis/apimanagement/v1beta1/zz_producttag_types.go index 8a4783c53..74f9cf0a9 100755 --- a/apis/apimanagement/v1beta1/zz_producttag_types.go +++ b/apis/apimanagement/v1beta1/zz_producttag_types.go @@ -16,7 +16,7 @@ import ( type ProductTagInitParameters struct { // The name of the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` // Reference to a Management in apimanagement to populate apiManagementName. @@ -85,7 +85,7 @@ type ProductTagObservation struct { type ProductTagParameters struct { // The name of the API Management Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_rediscache_types.go b/apis/apimanagement/v1beta1/zz_rediscache_types.go index 40c0aed89..4361f1b79 100755 --- a/apis/apimanagement/v1beta1/zz_rediscache_types.go +++ b/apis/apimanagement/v1beta1/zz_rediscache_types.go @@ -22,7 +22,7 @@ type RedisCacheInitParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // The resource ID of the Cache for Redis. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RedisCacheID *string `json:"redisCacheId,omitempty" tf:"redis_cache_id,omitempty"` @@ -56,7 +56,7 @@ type RedisCacheObservation struct { type RedisCacheParameters struct { // The resource ID of the API Management Service from which to create this external cache. Changing this forces a new API Management Redis Cache to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` @@ -82,7 +82,7 @@ type RedisCacheParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // The resource ID of the Cache for Redis. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RedisCacheID *string `json:"redisCacheId,omitempty" tf:"redis_cache_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_subscription_types.go b/apis/apimanagement/v1beta1/zz_subscription_types.go index b3f69051d..5bcefdf8e 100755 --- a/apis/apimanagement/v1beta1/zz_subscription_types.go +++ b/apis/apimanagement/v1beta1/zz_subscription_types.go @@ -91,7 +91,7 @@ type SubscriptionParameters struct { APIID *string `json:"apiId,omitempty" tf:"api_id,omitempty"` // The name of the API Management Service where this Subscription should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_tag_types.go b/apis/apimanagement/v1beta1/zz_tag_types.go index 47ac94b3d..7adbf367d 100755 --- a/apis/apimanagement/v1beta1/zz_tag_types.go +++ b/apis/apimanagement/v1beta1/zz_tag_types.go @@ -34,7 +34,7 @@ type TagObservation struct { type TagParameters struct { // The ID of the API Management. Changing this forces a new API Management Tag to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` diff --git a/apis/apimanagement/v1beta1/zz_user_types.go b/apis/apimanagement/v1beta1/zz_user_types.go index c8397e89d..6a1bdae30 100755 --- a/apis/apimanagement/v1beta1/zz_user_types.go +++ b/apis/apimanagement/v1beta1/zz_user_types.go @@ -67,7 +67,7 @@ type UserObservation struct { type UserParameters struct { // The name of the API Management Service in which the User should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta1.Management + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management // +kubebuilder:validation:Optional APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` diff --git a/apis/apimanagement/v1beta2/zz_api_terraformed.go b/apis/apimanagement/v1beta2/zz_api_terraformed.go new file mode 100755 index 000000000..93d8bb473 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_api_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this API +func (mg *API) GetTerraformResourceType() string { + return "azurerm_api_management_api" +} + +// GetConnectionDetailsMapping for this API +func (tr *API) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this API +func (tr *API) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this API +func (tr *API) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this API +func (tr *API) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this API +func (tr *API) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this API +func (tr *API) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this API +func (tr *API) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this API +func (tr *API) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this API using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *API) LateInitialize(attrs []byte) (bool, error) { + params := &APIParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *API) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/apimanagement/v1beta2/zz_api_types.go b/apis/apimanagement/v1beta2/zz_api_types.go new file mode 100755 index 000000000..0dacfaf88 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_api_types.go @@ -0,0 +1,560 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APIInitParameters struct { + + // Type of API. Possible values are graphql, http, soap, and websocket. Defaults to http. + APIType *string `json:"apiType,omitempty" tf:"api_type,omitempty"` + + // A contact block as documented below. + Contact *ContactInitParameters `json:"contact,omitempty" tf:"contact,omitempty"` + + // A description of the API Management API, which may include HTML formatting tags. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The display name of the API. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // A import block as documented below. + Import *ImportInitParameters `json:"import,omitempty" tf:"import,omitempty"` + + // A license block as documented below. + License *LicenseInitParameters `json:"license,omitempty" tf:"license,omitempty"` + + // An oauth2_authorization block as documented below. + Oauth2Authorization *Oauth2AuthorizationInitParameters `json:"oauth2Authorization,omitempty" tf:"oauth2_authorization,omitempty"` + + // An openid_authentication block as documented below. + OpenIDAuthentication *OpenIDAuthenticationInitParameters `json:"openidAuthentication,omitempty" tf:"openid_authentication,omitempty"` + + // The Path for this API Management API, which is a relative URL which uniquely identifies this API and all of its resource paths within the API Management Service. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A list of protocols the operations in this API can be invoked. Possible values are http, https, ws, and wss. + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // The description of the API Revision of the API Management API. + RevisionDescription *string `json:"revisionDescription,omitempty" tf:"revision_description,omitempty"` + + // Absolute URL of the backend service implementing this API. + ServiceURL *string `json:"serviceUrl,omitempty" tf:"service_url,omitempty"` + + // Should this API expose a SOAP frontend, rather than a HTTP frontend? Defaults to false. + SoapPassThrough *bool `json:"soapPassThrough,omitempty" tf:"soap_pass_through,omitempty"` + + // The API id of the source API, which could be in format azurerm_api_management_api.example.id or in format azurerm_api_management_api.example.id;rev=1 + SourceAPIID *string `json:"sourceApiId,omitempty" tf:"source_api_id,omitempty"` + + // A subscription_key_parameter_names block as documented below. + SubscriptionKeyParameterNames *SubscriptionKeyParameterNamesInitParameters `json:"subscriptionKeyParameterNames,omitempty" tf:"subscription_key_parameter_names,omitempty"` + + // Should this API require a subscription key? Defaults to true. + SubscriptionRequired *bool `json:"subscriptionRequired,omitempty" tf:"subscription_required,omitempty"` + + // Absolute URL of the Terms of Service for the API. + TermsOfServiceURL *string `json:"termsOfServiceUrl,omitempty" tf:"terms_of_service_url,omitempty"` + + // The Version number of this API, if this API is versioned. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The description of the API Version of the API Management API. + VersionDescription *string `json:"versionDescription,omitempty" tf:"version_description,omitempty"` + + // The ID of the Version Set which this API is associated with. + VersionSetID *string `json:"versionSetId,omitempty" tf:"version_set_id,omitempty"` +} + +type APIObservation struct { + + // The Name of the API Management Service where this API should be created. Changing this forces a new resource to be created. + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Type of API. Possible values are graphql, http, soap, and websocket. Defaults to http. + APIType *string `json:"apiType,omitempty" tf:"api_type,omitempty"` + + // A contact block as documented below. + Contact *ContactObservation `json:"contact,omitempty" tf:"contact,omitempty"` + + // A description of the API Management API, which may include HTML formatting tags. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The display name of the API. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The ID of the API Management API. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A import block as documented below. + Import *ImportObservation `json:"import,omitempty" tf:"import,omitempty"` + + // Is this the current API Revision? + IsCurrent *bool `json:"isCurrent,omitempty" tf:"is_current,omitempty"` + + // Is this API Revision online/accessible via the Gateway? + IsOnline *bool `json:"isOnline,omitempty" tf:"is_online,omitempty"` + + // A license block as documented below. + License *LicenseObservation `json:"license,omitempty" tf:"license,omitempty"` + + // An oauth2_authorization block as documented below. + Oauth2Authorization *Oauth2AuthorizationObservation `json:"oauth2Authorization,omitempty" tf:"oauth2_authorization,omitempty"` + + // An openid_authentication block as documented below. + OpenIDAuthentication *OpenIDAuthenticationObservation `json:"openidAuthentication,omitempty" tf:"openid_authentication,omitempty"` + + // The Path for this API Management API, which is a relative URL which uniquely identifies this API and all of its resource paths within the API Management Service. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A list of protocols the operations in this API can be invoked. Possible values are http, https, ws, and wss. + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // The Name of the Resource Group where the API Management API exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The Revision which used for this API. Changing this forces a new resource to be created. + Revision *string `json:"revision,omitempty" tf:"revision,omitempty"` + + // The description of the API Revision of the API Management API. + RevisionDescription *string `json:"revisionDescription,omitempty" tf:"revision_description,omitempty"` + + // Absolute URL of the backend service implementing this API. + ServiceURL *string `json:"serviceUrl,omitempty" tf:"service_url,omitempty"` + + // Should this API expose a SOAP frontend, rather than a HTTP frontend? Defaults to false. + SoapPassThrough *bool `json:"soapPassThrough,omitempty" tf:"soap_pass_through,omitempty"` + + // The API id of the source API, which could be in format azurerm_api_management_api.example.id or in format azurerm_api_management_api.example.id;rev=1 + SourceAPIID *string `json:"sourceApiId,omitempty" tf:"source_api_id,omitempty"` + + // A subscription_key_parameter_names block as documented below. + SubscriptionKeyParameterNames *SubscriptionKeyParameterNamesObservation `json:"subscriptionKeyParameterNames,omitempty" tf:"subscription_key_parameter_names,omitempty"` + + // Should this API require a subscription key? Defaults to true. + SubscriptionRequired *bool `json:"subscriptionRequired,omitempty" tf:"subscription_required,omitempty"` + + // Absolute URL of the Terms of Service for the API. + TermsOfServiceURL *string `json:"termsOfServiceUrl,omitempty" tf:"terms_of_service_url,omitempty"` + + // The Version number of this API, if this API is versioned. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The description of the API Version of the API Management API. + VersionDescription *string `json:"versionDescription,omitempty" tf:"version_description,omitempty"` + + // The ID of the Version Set which this API is associated with. + VersionSetID *string `json:"versionSetId,omitempty" tf:"version_set_id,omitempty"` +} + +type APIParameters struct { + + // The Name of the API Management Service where this API should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +kubebuilder:validation:Optional + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameRef *v1.Reference `json:"apiManagementNameRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` + + // Type of API. Possible values are graphql, http, soap, and websocket. Defaults to http. + // +kubebuilder:validation:Optional + APIType *string `json:"apiType,omitempty" tf:"api_type,omitempty"` + + // A contact block as documented below. + // +kubebuilder:validation:Optional + Contact *ContactParameters `json:"contact,omitempty" tf:"contact,omitempty"` + + // A description of the API Management API, which may include HTML formatting tags. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The display name of the API. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // A import block as documented below. + // +kubebuilder:validation:Optional + Import *ImportParameters `json:"import,omitempty" tf:"import,omitempty"` + + // A license block as documented below. + // +kubebuilder:validation:Optional + License *LicenseParameters `json:"license,omitempty" tf:"license,omitempty"` + + // An oauth2_authorization block as documented below. + // +kubebuilder:validation:Optional + Oauth2Authorization *Oauth2AuthorizationParameters `json:"oauth2Authorization,omitempty" tf:"oauth2_authorization,omitempty"` + + // An openid_authentication block as documented below. + // +kubebuilder:validation:Optional + OpenIDAuthentication *OpenIDAuthenticationParameters `json:"openidAuthentication,omitempty" tf:"openid_authentication,omitempty"` + + // The Path for this API Management API, which is a relative URL which uniquely identifies this API and all of its resource paths within the API Management Service. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A list of protocols the operations in this API can be invoked. Possible values are http, https, ws, and wss. + // +kubebuilder:validation:Optional + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // The Name of the Resource Group where the API Management API exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The Revision which used for this API. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + Revision *string `json:"revision" tf:"revision,omitempty"` + + // The description of the API Revision of the API Management API. + // +kubebuilder:validation:Optional + RevisionDescription *string `json:"revisionDescription,omitempty" tf:"revision_description,omitempty"` + + // Absolute URL of the backend service implementing this API. + // +kubebuilder:validation:Optional + ServiceURL *string `json:"serviceUrl,omitempty" tf:"service_url,omitempty"` + + // Should this API expose a SOAP frontend, rather than a HTTP frontend? Defaults to false. + // +kubebuilder:validation:Optional + SoapPassThrough *bool `json:"soapPassThrough,omitempty" tf:"soap_pass_through,omitempty"` + + // The API id of the source API, which could be in format azurerm_api_management_api.example.id or in format azurerm_api_management_api.example.id;rev=1 + // +kubebuilder:validation:Optional + SourceAPIID *string `json:"sourceApiId,omitempty" tf:"source_api_id,omitempty"` + + // A subscription_key_parameter_names block as documented below. + // +kubebuilder:validation:Optional + SubscriptionKeyParameterNames *SubscriptionKeyParameterNamesParameters `json:"subscriptionKeyParameterNames,omitempty" tf:"subscription_key_parameter_names,omitempty"` + + // Should this API require a subscription key? Defaults to true. + // +kubebuilder:validation:Optional + SubscriptionRequired *bool `json:"subscriptionRequired,omitempty" tf:"subscription_required,omitempty"` + + // Absolute URL of the Terms of Service for the API. + // +kubebuilder:validation:Optional + TermsOfServiceURL *string `json:"termsOfServiceUrl,omitempty" tf:"terms_of_service_url,omitempty"` + + // The Version number of this API, if this API is versioned. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The description of the API Version of the API Management API. + // +kubebuilder:validation:Optional + VersionDescription *string `json:"versionDescription,omitempty" tf:"version_description,omitempty"` + + // The ID of the Version Set which this API is associated with. + // +kubebuilder:validation:Optional + VersionSetID *string `json:"versionSetId,omitempty" tf:"version_set_id,omitempty"` +} + +type ContactInitParameters struct { + + // The email address of the contact person/organization. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The name of the contact person/organization. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Absolute URL of the contact information. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ContactObservation struct { + + // The email address of the contact person/organization. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The name of the contact person/organization. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Absolute URL of the contact information. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ContactParameters struct { + + // The email address of the contact person/organization. + // +kubebuilder:validation:Optional + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The name of the contact person/organization. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Absolute URL of the contact information. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type ImportInitParameters struct { + + // The format of the content from which the API Definition should be imported. Possible values are: openapi, openapi+json, openapi+json-link, openapi-link, swagger-json, swagger-link-json, wadl-link-json, wadl-xml, wsdl and wsdl-link. + ContentFormat *string `json:"contentFormat,omitempty" tf:"content_format,omitempty"` + + // The Content from which the API Definition should be imported. When a content_format of *-link-* is specified this must be a URL, otherwise this must be defined inline. + ContentValue *string `json:"contentValue,omitempty" tf:"content_value,omitempty"` + + // A wsdl_selector block as defined below, which allows you to limit the import of a WSDL to only a subset of the document. This can only be specified when content_format is wsdl or wsdl-link. + WsdlSelector *WsdlSelectorInitParameters `json:"wsdlSelector,omitempty" tf:"wsdl_selector,omitempty"` +} + +type ImportObservation struct { + + // The format of the content from which the API Definition should be imported. Possible values are: openapi, openapi+json, openapi+json-link, openapi-link, swagger-json, swagger-link-json, wadl-link-json, wadl-xml, wsdl and wsdl-link. + ContentFormat *string `json:"contentFormat,omitempty" tf:"content_format,omitempty"` + + // The Content from which the API Definition should be imported. When a content_format of *-link-* is specified this must be a URL, otherwise this must be defined inline. + ContentValue *string `json:"contentValue,omitempty" tf:"content_value,omitempty"` + + // A wsdl_selector block as defined below, which allows you to limit the import of a WSDL to only a subset of the document. This can only be specified when content_format is wsdl or wsdl-link. + WsdlSelector *WsdlSelectorObservation `json:"wsdlSelector,omitempty" tf:"wsdl_selector,omitempty"` +} + +type ImportParameters struct { + + // The format of the content from which the API Definition should be imported. Possible values are: openapi, openapi+json, openapi+json-link, openapi-link, swagger-json, swagger-link-json, wadl-link-json, wadl-xml, wsdl and wsdl-link. + // +kubebuilder:validation:Optional + ContentFormat *string `json:"contentFormat" tf:"content_format,omitempty"` + + // The Content from which the API Definition should be imported. When a content_format of *-link-* is specified this must be a URL, otherwise this must be defined inline. + // +kubebuilder:validation:Optional + ContentValue *string `json:"contentValue" tf:"content_value,omitempty"` + + // A wsdl_selector block as defined below, which allows you to limit the import of a WSDL to only a subset of the document. This can only be specified when content_format is wsdl or wsdl-link. + // +kubebuilder:validation:Optional + WsdlSelector *WsdlSelectorParameters `json:"wsdlSelector,omitempty" tf:"wsdl_selector,omitempty"` +} + +type LicenseInitParameters struct { + + // The name of the license . + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Absolute URL of the license. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type LicenseObservation struct { + + // The name of the license . + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Absolute URL of the license. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type LicenseParameters struct { + + // The name of the license . + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Absolute URL of the license. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type Oauth2AuthorizationInitParameters struct { + + // OAuth authorization server identifier. The name of an OAuth2 Authorization Server. + AuthorizationServerName *string `json:"authorizationServerName,omitempty" tf:"authorization_server_name,omitempty"` + + // Operations scope. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type Oauth2AuthorizationObservation struct { + + // OAuth authorization server identifier. The name of an OAuth2 Authorization Server. + AuthorizationServerName *string `json:"authorizationServerName,omitempty" tf:"authorization_server_name,omitempty"` + + // Operations scope. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type Oauth2AuthorizationParameters struct { + + // OAuth authorization server identifier. The name of an OAuth2 Authorization Server. + // +kubebuilder:validation:Optional + AuthorizationServerName *string `json:"authorizationServerName" tf:"authorization_server_name,omitempty"` + + // Operations scope. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type OpenIDAuthenticationInitParameters struct { + + // How to send token to the server. A list of zero or more methods. Valid values are authorizationHeader and query. + // +listType=set + BearerTokenSendingMethods []*string `json:"bearerTokenSendingMethods,omitempty" tf:"bearer_token_sending_methods,omitempty"` + + // OpenID Connect provider identifier. The name of an OpenID Connect Provider. + OpenIDProviderName *string `json:"openidProviderName,omitempty" tf:"openid_provider_name,omitempty"` +} + +type OpenIDAuthenticationObservation struct { + + // How to send token to the server. A list of zero or more methods. Valid values are authorizationHeader and query. + // +listType=set + BearerTokenSendingMethods []*string `json:"bearerTokenSendingMethods,omitempty" tf:"bearer_token_sending_methods,omitempty"` + + // OpenID Connect provider identifier. The name of an OpenID Connect Provider. + OpenIDProviderName *string `json:"openidProviderName,omitempty" tf:"openid_provider_name,omitempty"` +} + +type OpenIDAuthenticationParameters struct { + + // How to send token to the server. A list of zero or more methods. Valid values are authorizationHeader and query. + // +kubebuilder:validation:Optional + // +listType=set + BearerTokenSendingMethods []*string `json:"bearerTokenSendingMethods,omitempty" tf:"bearer_token_sending_methods,omitempty"` + + // OpenID Connect provider identifier. The name of an OpenID Connect Provider. + // +kubebuilder:validation:Optional + OpenIDProviderName *string `json:"openidProviderName" tf:"openid_provider_name,omitempty"` +} + +type SubscriptionKeyParameterNamesInitParameters struct { + + // The name of the HTTP Header which should be used for the Subscription Key. + Header *string `json:"header,omitempty" tf:"header,omitempty"` + + // The name of the QueryString parameter which should be used for the Subscription Key. + Query *string `json:"query,omitempty" tf:"query,omitempty"` +} + +type SubscriptionKeyParameterNamesObservation struct { + + // The name of the HTTP Header which should be used for the Subscription Key. + Header *string `json:"header,omitempty" tf:"header,omitempty"` + + // The name of the QueryString parameter which should be used for the Subscription Key. + Query *string `json:"query,omitempty" tf:"query,omitempty"` +} + +type SubscriptionKeyParameterNamesParameters struct { + + // The name of the HTTP Header which should be used for the Subscription Key. + // +kubebuilder:validation:Optional + Header *string `json:"header" tf:"header,omitempty"` + + // The name of the QueryString parameter which should be used for the Subscription Key. + // +kubebuilder:validation:Optional + Query *string `json:"query" tf:"query,omitempty"` +} + +type WsdlSelectorInitParameters struct { + + // The name of endpoint (port) to import from WSDL. + EndpointName *string `json:"endpointName,omitempty" tf:"endpoint_name,omitempty"` + + // The name of service to import from WSDL. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type WsdlSelectorObservation struct { + + // The name of endpoint (port) to import from WSDL. + EndpointName *string `json:"endpointName,omitempty" tf:"endpoint_name,omitempty"` + + // The name of service to import from WSDL. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` +} + +type WsdlSelectorParameters struct { + + // The name of endpoint (port) to import from WSDL. + // +kubebuilder:validation:Optional + EndpointName *string `json:"endpointName" tf:"endpoint_name,omitempty"` + + // The name of service to import from WSDL. + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName" tf:"service_name,omitempty"` +} + +// APISpec defines the desired state of API +type APISpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider APIParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider APIInitParameters `json:"initProvider,omitempty"` +} + +// APIStatus defines the observed state of API. +type APIStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider APIObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// API is the Schema for the APIs API. Manages an API within an API Management Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type API struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec APISpec `json:"spec"` + Status APIStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// APIList contains a list of APIs +type APIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []API `json:"items"` +} + +// Repository type metadata. +var ( + API_Kind = "API" + API_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: API_Kind}.String() + API_KindAPIVersion = API_Kind + "." + CRDGroupVersion.String() + API_GroupVersionKind = CRDGroupVersion.WithKind(API_Kind) +) + +func init() { + SchemeBuilder.Register(&API{}, &APIList{}) +} diff --git a/apis/apimanagement/v1beta2/zz_apidiagnostic_terraformed.go b/apis/apimanagement/v1beta2/zz_apidiagnostic_terraformed.go new file mode 100755 index 000000000..55520ba13 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_apidiagnostic_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this APIDiagnostic +func (mg *APIDiagnostic) GetTerraformResourceType() string { + return "azurerm_api_management_api_diagnostic" +} + +// GetConnectionDetailsMapping for this APIDiagnostic +func (tr *APIDiagnostic) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this APIDiagnostic +func (tr *APIDiagnostic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this APIDiagnostic +func (tr *APIDiagnostic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this APIDiagnostic +func (tr *APIDiagnostic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this APIDiagnostic +func (tr *APIDiagnostic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this APIDiagnostic +func (tr *APIDiagnostic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this APIDiagnostic +func (tr *APIDiagnostic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this APIDiagnostic +func (tr *APIDiagnostic) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this APIDiagnostic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *APIDiagnostic) LateInitialize(attrs []byte) (bool, error) { + params := &APIDiagnosticParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *APIDiagnostic) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apimanagement/v1beta2/zz_apidiagnostic_types.go b/apis/apimanagement/v1beta2/zz_apidiagnostic_types.go new file mode 100755 index 000000000..25ca8d6f6 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_apidiagnostic_types.go @@ -0,0 +1,780 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APIDiagnosticInitParameters struct { + + // The ID (name) of the Diagnostics Logger. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Logger + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + APIManagementLoggerID *string `json:"apiManagementLoggerId,omitempty" tf:"api_management_logger_id,omitempty"` + + // Reference to a Logger in apimanagement to populate apiManagementLoggerId. + // +kubebuilder:validation:Optional + APIManagementLoggerIDRef *v1.Reference `json:"apiManagementLoggerIdRef,omitempty" tf:"-"` + + // Selector for a Logger in apimanagement to populate apiManagementLoggerId. + // +kubebuilder:validation:Optional + APIManagementLoggerIDSelector *v1.Selector `json:"apiManagementLoggerIdSelector,omitempty" tf:"-"` + + // Always log errors. Send telemetry if there is an erroneous condition, regardless of sampling settings. + AlwaysLogErrors *bool `json:"alwaysLogErrors,omitempty" tf:"always_log_errors,omitempty"` + + // A backend_request block as defined below. + BackendRequest *BackendRequestInitParameters `json:"backendRequest,omitempty" tf:"backend_request,omitempty"` + + // A backend_response block as defined below. + BackendResponse *BackendResponseInitParameters `json:"backendResponse,omitempty" tf:"backend_response,omitempty"` + + // A frontend_request block as defined below. + FrontendRequest *FrontendRequestInitParameters `json:"frontendRequest,omitempty" tf:"frontend_request,omitempty"` + + // A frontend_response block as defined below. + FrontendResponse *FrontendResponseInitParameters `json:"frontendResponse,omitempty" tf:"frontend_response,omitempty"` + + // The HTTP Correlation Protocol to use. Possible values are None, Legacy or W3C. + HTTPCorrelationProtocol *string `json:"httpCorrelationProtocol,omitempty" tf:"http_correlation_protocol,omitempty"` + + // Log client IP address. + LogClientIP *bool `json:"logClientIp,omitempty" tf:"log_client_ip,omitempty"` + + // The format of the Operation Name for Application Insights telemetries. Possible values are Name, and Url. Defaults to Name. + OperationNameFormat *string `json:"operationNameFormat,omitempty" tf:"operation_name_format,omitempty"` + + // Sampling (%). For high traffic APIs, please read this documentation to understand performance implications and log sampling. Valid values are between 0.0 and 100.0. + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // Logging verbosity. Possible values are verbose, information or error. + Verbosity *string `json:"verbosity,omitempty" tf:"verbosity,omitempty"` +} + +type APIDiagnosticObservation struct { + + // The ID (name) of the Diagnostics Logger. + APIManagementLoggerID *string `json:"apiManagementLoggerId,omitempty" tf:"api_management_logger_id,omitempty"` + + // The name of the API Management Service instance. Changing this forces a new API Management Service API Diagnostics Logs to be created. + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // The name of the API on which to configure the Diagnostics Logs. Changing this forces a new API Management Service API Diagnostics Logs to be created. + APIName *string `json:"apiName,omitempty" tf:"api_name,omitempty"` + + // Always log errors. Send telemetry if there is an erroneous condition, regardless of sampling settings. + AlwaysLogErrors *bool `json:"alwaysLogErrors,omitempty" tf:"always_log_errors,omitempty"` + + // A backend_request block as defined below. + BackendRequest *BackendRequestObservation `json:"backendRequest,omitempty" tf:"backend_request,omitempty"` + + // A backend_response block as defined below. + BackendResponse *BackendResponseObservation `json:"backendResponse,omitempty" tf:"backend_response,omitempty"` + + // A frontend_request block as defined below. + FrontendRequest *FrontendRequestObservation `json:"frontendRequest,omitempty" tf:"frontend_request,omitempty"` + + // A frontend_response block as defined below. + FrontendResponse *FrontendResponseObservation `json:"frontendResponse,omitempty" tf:"frontend_response,omitempty"` + + // The HTTP Correlation Protocol to use. Possible values are None, Legacy or W3C. + HTTPCorrelationProtocol *string `json:"httpCorrelationProtocol,omitempty" tf:"http_correlation_protocol,omitempty"` + + // The ID of the API Management Service API Diagnostics Logs. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Log client IP address. + LogClientIP *bool `json:"logClientIp,omitempty" tf:"log_client_ip,omitempty"` + + // The format of the Operation Name for Application Insights telemetries. Possible values are Name, and Url. Defaults to Name. + OperationNameFormat *string `json:"operationNameFormat,omitempty" tf:"operation_name_format,omitempty"` + + // The name of the Resource Group where the API Management Service API Diagnostics Logs should exist. Changing this forces a new API Management Service API Diagnostics Logs to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Sampling (%). For high traffic APIs, please read this documentation to understand performance implications and log sampling. Valid values are between 0.0 and 100.0. + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // Logging verbosity. Possible values are verbose, information or error. + Verbosity *string `json:"verbosity,omitempty" tf:"verbosity,omitempty"` +} + +type APIDiagnosticParameters struct { + + // The ID (name) of the Diagnostics Logger. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Logger + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + APIManagementLoggerID *string `json:"apiManagementLoggerId,omitempty" tf:"api_management_logger_id,omitempty"` + + // Reference to a Logger in apimanagement to populate apiManagementLoggerId. + // +kubebuilder:validation:Optional + APIManagementLoggerIDRef *v1.Reference `json:"apiManagementLoggerIdRef,omitempty" tf:"-"` + + // Selector for a Logger in apimanagement to populate apiManagementLoggerId. + // +kubebuilder:validation:Optional + APIManagementLoggerIDSelector *v1.Selector `json:"apiManagementLoggerIdSelector,omitempty" tf:"-"` + + // The name of the API Management Service instance. Changing this forces a new API Management Service API Diagnostics Logs to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +kubebuilder:validation:Optional + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameRef *v1.Reference `json:"apiManagementNameRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` + + // The name of the API on which to configure the Diagnostics Logs. Changing this forces a new API Management Service API Diagnostics Logs to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API + // +kubebuilder:validation:Optional + APIName *string `json:"apiName,omitempty" tf:"api_name,omitempty"` + + // Reference to a API in apimanagement to populate apiName. + // +kubebuilder:validation:Optional + APINameRef *v1.Reference `json:"apiNameRef,omitempty" tf:"-"` + + // Selector for a API in apimanagement to populate apiName. + // +kubebuilder:validation:Optional + APINameSelector *v1.Selector `json:"apiNameSelector,omitempty" tf:"-"` + + // Always log errors. Send telemetry if there is an erroneous condition, regardless of sampling settings. + // +kubebuilder:validation:Optional + AlwaysLogErrors *bool `json:"alwaysLogErrors,omitempty" tf:"always_log_errors,omitempty"` + + // A backend_request block as defined below. + // +kubebuilder:validation:Optional + BackendRequest *BackendRequestParameters `json:"backendRequest,omitempty" tf:"backend_request,omitempty"` + + // A backend_response block as defined below. + // +kubebuilder:validation:Optional + BackendResponse *BackendResponseParameters `json:"backendResponse,omitempty" tf:"backend_response,omitempty"` + + // A frontend_request block as defined below. + // +kubebuilder:validation:Optional + FrontendRequest *FrontendRequestParameters `json:"frontendRequest,omitempty" tf:"frontend_request,omitempty"` + + // A frontend_response block as defined below. + // +kubebuilder:validation:Optional + FrontendResponse *FrontendResponseParameters `json:"frontendResponse,omitempty" tf:"frontend_response,omitempty"` + + // The HTTP Correlation Protocol to use. Possible values are None, Legacy or W3C. + // +kubebuilder:validation:Optional + HTTPCorrelationProtocol *string `json:"httpCorrelationProtocol,omitempty" tf:"http_correlation_protocol,omitempty"` + + // Log client IP address. + // +kubebuilder:validation:Optional + LogClientIP *bool `json:"logClientIp,omitempty" tf:"log_client_ip,omitempty"` + + // The format of the Operation Name for Application Insights telemetries. Possible values are Name, and Url. Defaults to Name. + // +kubebuilder:validation:Optional + OperationNameFormat *string `json:"operationNameFormat,omitempty" tf:"operation_name_format,omitempty"` + + // The name of the Resource Group where the API Management Service API Diagnostics Logs should exist. Changing this forces a new API Management Service API Diagnostics Logs to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Sampling (%). For high traffic APIs, please read this documentation to understand performance implications and log sampling. Valid values are between 0.0 and 100.0. + // +kubebuilder:validation:Optional + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // Logging verbosity. Possible values are verbose, information or error. + // +kubebuilder:validation:Optional + Verbosity *string `json:"verbosity,omitempty" tf:"verbosity,omitempty"` +} + +type BackendRequestInitParameters struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *DataMaskingInitParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type BackendRequestObservation struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *DataMaskingObservation `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type BackendRequestParameters struct { + + // Number of payload bytes to log (up to 8192). + // +kubebuilder:validation:Optional + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + // +kubebuilder:validation:Optional + DataMasking *DataMaskingParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +kubebuilder:validation:Optional + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type BackendResponseDataMaskingInitParameters struct { + + // A headers block as defined below. + Headers []DataMaskingHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []DataMaskingQueryParamsInitParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type BackendResponseDataMaskingObservation struct { + + // A headers block as defined below. + Headers []DataMaskingHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []DataMaskingQueryParamsObservation `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type BackendResponseDataMaskingParameters struct { + + // A headers block as defined below. + // +kubebuilder:validation:Optional + Headers []DataMaskingHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + // +kubebuilder:validation:Optional + QueryParams []DataMaskingQueryParamsParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type BackendResponseInitParameters struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *BackendResponseDataMaskingInitParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type BackendResponseObservation struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *BackendResponseDataMaskingObservation `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type BackendResponseParameters struct { + + // Number of payload bytes to log (up to 8192). + // +kubebuilder:validation:Optional + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + // +kubebuilder:validation:Optional + DataMasking *BackendResponseDataMaskingParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +kubebuilder:validation:Optional + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DataMaskingHeadersInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DataMaskingHeadersObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DataMaskingHeadersParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DataMaskingInitParameters struct { + + // A headers block as defined below. + Headers []HeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []QueryParamsInitParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DataMaskingObservation struct { + + // A headers block as defined below. + Headers []HeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []QueryParamsObservation `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DataMaskingParameters struct { + + // A headers block as defined below. + // +kubebuilder:validation:Optional + Headers []HeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + // +kubebuilder:validation:Optional + QueryParams []QueryParamsParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DataMaskingQueryParamsInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DataMaskingQueryParamsObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DataMaskingQueryParamsParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FrontendRequestDataMaskingHeadersInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FrontendRequestDataMaskingHeadersObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FrontendRequestDataMaskingHeadersParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FrontendRequestDataMaskingInitParameters struct { + + // A headers block as defined below. + Headers []FrontendRequestDataMaskingHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []FrontendRequestDataMaskingQueryParamsInitParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type FrontendRequestDataMaskingObservation struct { + + // A headers block as defined below. + Headers []FrontendRequestDataMaskingHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []FrontendRequestDataMaskingQueryParamsObservation `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type FrontendRequestDataMaskingParameters struct { + + // A headers block as defined below. + // +kubebuilder:validation:Optional + Headers []FrontendRequestDataMaskingHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + // +kubebuilder:validation:Optional + QueryParams []FrontendRequestDataMaskingQueryParamsParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type FrontendRequestDataMaskingQueryParamsInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FrontendRequestDataMaskingQueryParamsObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FrontendRequestDataMaskingQueryParamsParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FrontendRequestInitParameters struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *FrontendRequestDataMaskingInitParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type FrontendRequestObservation struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *FrontendRequestDataMaskingObservation `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type FrontendRequestParameters struct { + + // Number of payload bytes to log (up to 8192). + // +kubebuilder:validation:Optional + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + // +kubebuilder:validation:Optional + DataMasking *FrontendRequestDataMaskingParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +kubebuilder:validation:Optional + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type FrontendResponseDataMaskingHeadersInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FrontendResponseDataMaskingHeadersObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FrontendResponseDataMaskingHeadersParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FrontendResponseDataMaskingInitParameters struct { + + // A headers block as defined below. + Headers []FrontendResponseDataMaskingHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []FrontendResponseDataMaskingQueryParamsInitParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type FrontendResponseDataMaskingObservation struct { + + // A headers block as defined below. + Headers []FrontendResponseDataMaskingHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []FrontendResponseDataMaskingQueryParamsObservation `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type FrontendResponseDataMaskingParameters struct { + + // A headers block as defined below. + // +kubebuilder:validation:Optional + Headers []FrontendResponseDataMaskingHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + // +kubebuilder:validation:Optional + QueryParams []FrontendResponseDataMaskingQueryParamsParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type FrontendResponseDataMaskingQueryParamsInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FrontendResponseDataMaskingQueryParamsObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FrontendResponseDataMaskingQueryParamsParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type FrontendResponseInitParameters struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *FrontendResponseDataMaskingInitParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type FrontendResponseObservation struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *FrontendResponseDataMaskingObservation `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type FrontendResponseParameters struct { + + // Number of payload bytes to log (up to 8192). + // +kubebuilder:validation:Optional + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + // +kubebuilder:validation:Optional + DataMasking *FrontendResponseDataMaskingParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +kubebuilder:validation:Optional + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type HeadersInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeadersObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeadersParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type QueryParamsInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type QueryParamsObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type QueryParamsParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// APIDiagnosticSpec defines the desired state of APIDiagnostic +type APIDiagnosticSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider APIDiagnosticParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider APIDiagnosticInitParameters `json:"initProvider,omitempty"` +} + +// APIDiagnosticStatus defines the observed state of APIDiagnostic. +type APIDiagnosticStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider APIDiagnosticObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// APIDiagnostic is the Schema for the APIDiagnostics API. Manages a API Management Service API Diagnostics Logs. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type APIDiagnostic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec APIDiagnosticSpec `json:"spec"` + Status APIDiagnosticStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// APIDiagnosticList contains a list of APIDiagnostics +type APIDiagnosticList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []APIDiagnostic `json:"items"` +} + +// Repository type metadata. +var ( + APIDiagnostic_Kind = "APIDiagnostic" + APIDiagnostic_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: APIDiagnostic_Kind}.String() + APIDiagnostic_KindAPIVersion = APIDiagnostic_Kind + "." + CRDGroupVersion.String() + APIDiagnostic_GroupVersionKind = CRDGroupVersion.WithKind(APIDiagnostic_Kind) +) + +func init() { + SchemeBuilder.Register(&APIDiagnostic{}, &APIDiagnosticList{}) +} diff --git a/apis/apimanagement/v1beta2/zz_apioperation_terraformed.go b/apis/apimanagement/v1beta2/zz_apioperation_terraformed.go new file mode 100755 index 000000000..411cb8a60 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_apioperation_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this APIOperation +func (mg *APIOperation) GetTerraformResourceType() string { + return "azurerm_api_management_api_operation" +} + +// GetConnectionDetailsMapping for this APIOperation +func (tr *APIOperation) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this APIOperation +func (tr *APIOperation) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this APIOperation +func (tr *APIOperation) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this APIOperation +func (tr *APIOperation) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this APIOperation +func (tr *APIOperation) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this APIOperation +func (tr *APIOperation) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this APIOperation +func (tr *APIOperation) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this APIOperation +func (tr *APIOperation) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this APIOperation using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *APIOperation) LateInitialize(attrs []byte) (bool, error) { + params := &APIOperationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *APIOperation) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apimanagement/v1beta2/zz_apioperation_types.go b/apis/apimanagement/v1beta2/zz_apioperation_types.go new file mode 100755 index 000000000..18e3a1b03 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_apioperation_types.go @@ -0,0 +1,1507 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APIOperationInitParameters struct { + + // A description for this API Operation, which may include HTML formatting tags. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this API Management Operation. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The HTTP Method used for this API Management Operation, like GET, DELETE, PUT or POST - but not limited to these values. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // A request block as defined below. + Request *RequestInitParameters `json:"request,omitempty" tf:"request,omitempty"` + + // One or more response blocks as defined below. + Response []ResponseInitParameters `json:"response,omitempty" tf:"response,omitempty"` + + // One or more template_parameter blocks as defined below. Required if url_template contains one or more parameters. + TemplateParameter []TemplateParameterInitParameters `json:"templateParameter,omitempty" tf:"template_parameter,omitempty"` + + // The relative URL Template identifying the target resource for this operation, which may include parameters. + URLTemplate *string `json:"urlTemplate,omitempty" tf:"url_template,omitempty"` +} + +type APIOperationObservation struct { + + // The Name of the API Management Service where the API exists. Changing this forces a new resource to be created. + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // The name of the API within the API Management Service where this API Operation should be created. Changing this forces a new resource to be created. + APIName *string `json:"apiName,omitempty" tf:"api_name,omitempty"` + + // A description for this API Operation, which may include HTML formatting tags. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this API Management Operation. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The ID of the API Management API Operation. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The HTTP Method used for this API Management Operation, like GET, DELETE, PUT or POST - but not limited to these values. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // A request block as defined below. + Request *RequestObservation `json:"request,omitempty" tf:"request,omitempty"` + + // The Name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // One or more response blocks as defined below. + Response []ResponseObservation `json:"response,omitempty" tf:"response,omitempty"` + + // One or more template_parameter blocks as defined below. Required if url_template contains one or more parameters. + TemplateParameter []TemplateParameterObservation `json:"templateParameter,omitempty" tf:"template_parameter,omitempty"` + + // The relative URL Template identifying the target resource for this operation, which may include parameters. + URLTemplate *string `json:"urlTemplate,omitempty" tf:"url_template,omitempty"` +} + +type APIOperationParameters struct { + + // The Name of the API Management Service where the API exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +kubebuilder:validation:Optional + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameRef *v1.Reference `json:"apiManagementNameRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` + + // The name of the API within the API Management Service where this API Operation should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.API + // +kubebuilder:validation:Optional + APIName *string `json:"apiName,omitempty" tf:"api_name,omitempty"` + + // Reference to a API in apimanagement to populate apiName. + // +kubebuilder:validation:Optional + APINameRef *v1.Reference `json:"apiNameRef,omitempty" tf:"-"` + + // Selector for a API in apimanagement to populate apiName. + // +kubebuilder:validation:Optional + APINameSelector *v1.Selector `json:"apiNameSelector,omitempty" tf:"-"` + + // A description for this API Operation, which may include HTML formatting tags. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this API Management Operation. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The HTTP Method used for this API Management Operation, like GET, DELETE, PUT or POST - but not limited to these values. + // +kubebuilder:validation:Optional + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // A request block as defined below. + // +kubebuilder:validation:Optional + Request *RequestParameters `json:"request,omitempty" tf:"request,omitempty"` + + // The Name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // One or more response blocks as defined below. + // +kubebuilder:validation:Optional + Response []ResponseParameters `json:"response,omitempty" tf:"response,omitempty"` + + // One or more template_parameter blocks as defined below. Required if url_template contains one or more parameters. + // +kubebuilder:validation:Optional + TemplateParameter []TemplateParameterParameters `json:"templateParameter,omitempty" tf:"template_parameter,omitempty"` + + // The relative URL Template identifying the target resource for this operation, which may include parameters. + // +kubebuilder:validation:Optional + URLTemplate *string `json:"urlTemplate,omitempty" tf:"url_template,omitempty"` +} + +type ExampleInitParameters struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ExampleObservation struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ExampleParameters struct { + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + // +kubebuilder:validation:Optional + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A short description for this example. + // +kubebuilder:validation:Optional + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FormParameterExampleInitParameters struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FormParameterExampleObservation struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FormParameterExampleParameters struct { + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + // +kubebuilder:validation:Optional + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A short description for this example. + // +kubebuilder:validation:Optional + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FormParameterInitParameters struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []FormParameterExampleInitParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FormParameterObservation struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []FormParameterExampleObservation `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FormParameterParameters struct { + + // The default value for this Template Parameter. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + // +kubebuilder:validation:Optional + Example []FormParameterExampleParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this Template Parameter Required? + // +kubebuilder:validation:Optional + Required *bool `json:"required" tf:"required,omitempty"` + + // The name of the Schema. + // +kubebuilder:validation:Optional + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The type name defined by the Schema. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HeaderExampleInitParameters struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderExampleObservation struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderExampleParameters struct { + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + // +kubebuilder:validation:Optional + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A short description for this example. + // +kubebuilder:validation:Optional + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderInitParameters struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []ExampleInitParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HeaderObservation struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []ExampleObservation `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type HeaderParameters struct { + + // The default value for this Template Parameter. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + // +kubebuilder:validation:Optional + Example []ExampleParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this Template Parameter Required? + // +kubebuilder:validation:Optional + Required *bool `json:"required" tf:"required,omitempty"` + + // The name of the Schema. + // +kubebuilder:validation:Optional + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The type name defined by the Schema. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type QueryParameterExampleInitParameters struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type QueryParameterExampleObservation struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type QueryParameterExampleParameters struct { + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + // +kubebuilder:validation:Optional + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A short description for this example. + // +kubebuilder:validation:Optional + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type QueryParameterInitParameters struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []QueryParameterExampleInitParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type QueryParameterObservation struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []QueryParameterExampleObservation `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type QueryParameterParameters struct { + + // The default value for this Template Parameter. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + // +kubebuilder:validation:Optional + Example []QueryParameterExampleParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this Template Parameter Required? + // +kubebuilder:validation:Optional + Required *bool `json:"required" tf:"required,omitempty"` + + // The name of the Schema. + // +kubebuilder:validation:Optional + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The type name defined by the Schema. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type RepresentationExampleInitParameters struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RepresentationExampleObservation struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RepresentationExampleParameters struct { + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + // +kubebuilder:validation:Optional + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A short description for this example. + // +kubebuilder:validation:Optional + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RepresentationFormParameterExampleInitParameters struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RepresentationFormParameterExampleObservation struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RepresentationFormParameterExampleParameters struct { + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + // +kubebuilder:validation:Optional + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A short description for this example. + // +kubebuilder:validation:Optional + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RepresentationFormParameterInitParameters struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []RepresentationFormParameterExampleInitParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type RepresentationFormParameterObservation struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []RepresentationFormParameterExampleObservation `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type RepresentationFormParameterParameters struct { + + // The default value for this Template Parameter. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + // +kubebuilder:validation:Optional + Example []RepresentationFormParameterExampleParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this Template Parameter Required? + // +kubebuilder:validation:Optional + Required *bool `json:"required" tf:"required,omitempty"` + + // The name of the Schema. + // +kubebuilder:validation:Optional + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The type name defined by the Schema. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type RepresentationInitParameters struct { + + // The Content Type of this representation, such as application/json. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // One or more example blocks as defined above. + Example []RepresentationExampleInitParameters `json:"example,omitempty" tf:"example,omitempty"` + + // One or more form_parameter block as defined above. + FormParameter []FormParameterInitParameters `json:"formParameter,omitempty" tf:"form_parameter,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` +} + +type RepresentationObservation struct { + + // The Content Type of this representation, such as application/json. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // One or more example blocks as defined above. + Example []RepresentationExampleObservation `json:"example,omitempty" tf:"example,omitempty"` + + // One or more form_parameter block as defined above. + FormParameter []FormParameterObservation `json:"formParameter,omitempty" tf:"form_parameter,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` +} + +type RepresentationParameters struct { + + // The Content Type of this representation, such as application/json. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // One or more example blocks as defined above. + // +kubebuilder:validation:Optional + Example []RepresentationExampleParameters `json:"example,omitempty" tf:"example,omitempty"` + + // One or more form_parameter block as defined above. + // +kubebuilder:validation:Optional + FormParameter []FormParameterParameters `json:"formParameter,omitempty" tf:"form_parameter,omitempty"` + + // The name of the Schema. + // +kubebuilder:validation:Optional + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The type name defined by the Schema. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` +} + +type RequestInitParameters struct { + + // A description of the HTTP Request, which may include HTML tags. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more header blocks as defined above. + Header []HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // One or more query_parameter blocks as defined above. + QueryParameter []QueryParameterInitParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // One or more representation blocks as defined below. + Representation []RepresentationInitParameters `json:"representation,omitempty" tf:"representation,omitempty"` +} + +type RequestObservation struct { + + // A description of the HTTP Request, which may include HTML tags. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more header blocks as defined above. + Header []HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // One or more query_parameter blocks as defined above. + QueryParameter []QueryParameterObservation `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // One or more representation blocks as defined below. + Representation []RepresentationObservation `json:"representation,omitempty" tf:"representation,omitempty"` +} + +type RequestParameters struct { + + // A description of the HTTP Request, which may include HTML tags. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more header blocks as defined above. + // +kubebuilder:validation:Optional + Header []HeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // One or more query_parameter blocks as defined above. + // +kubebuilder:validation:Optional + QueryParameter []QueryParameterParameters `json:"queryParameter,omitempty" tf:"query_parameter,omitempty"` + + // One or more representation blocks as defined below. + // +kubebuilder:validation:Optional + Representation []RepresentationParameters `json:"representation,omitempty" tf:"representation,omitempty"` +} + +type ResponseHeaderInitParameters struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []HeaderExampleInitParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ResponseHeaderObservation struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []HeaderExampleObservation `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ResponseHeaderParameters struct { + + // The default value for this Template Parameter. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + // +kubebuilder:validation:Optional + Example []HeaderExampleParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this Template Parameter Required? + // +kubebuilder:validation:Optional + Required *bool `json:"required" tf:"required,omitempty"` + + // The name of the Schema. + // +kubebuilder:validation:Optional + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The type name defined by the Schema. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type ResponseInitParameters struct { + + // A description of the HTTP Response, which may include HTML tags. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more header blocks as defined above. + Header []ResponseHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // One or more representation blocks as defined below. + Representation []ResponseRepresentationInitParameters `json:"representation,omitempty" tf:"representation,omitempty"` + + // The HTTP Status Code. + StatusCode *float64 `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ResponseObservation struct { + + // A description of the HTTP Response, which may include HTML tags. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more header blocks as defined above. + Header []ResponseHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // One or more representation blocks as defined below. + Representation []ResponseRepresentationObservation `json:"representation,omitempty" tf:"representation,omitempty"` + + // The HTTP Status Code. + StatusCode *float64 `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type ResponseParameters struct { + + // A description of the HTTP Response, which may include HTML tags. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more header blocks as defined above. + // +kubebuilder:validation:Optional + Header []ResponseHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // One or more representation blocks as defined below. + // +kubebuilder:validation:Optional + Representation []ResponseRepresentationParameters `json:"representation,omitempty" tf:"representation,omitempty"` + + // The HTTP Status Code. + // +kubebuilder:validation:Optional + StatusCode *float64 `json:"statusCode" tf:"status_code,omitempty"` +} + +type ResponseRepresentationExampleInitParameters struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseRepresentationExampleObservation struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseRepresentationExampleParameters struct { + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + // +kubebuilder:validation:Optional + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A short description for this example. + // +kubebuilder:validation:Optional + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseRepresentationInitParameters struct { + + // The Content Type of this representation, such as application/json. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // One or more example blocks as defined above. + Example []ResponseRepresentationExampleInitParameters `json:"example,omitempty" tf:"example,omitempty"` + + // One or more form_parameter block as defined above. + FormParameter []RepresentationFormParameterInitParameters `json:"formParameter,omitempty" tf:"form_parameter,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` +} + +type ResponseRepresentationObservation struct { + + // The Content Type of this representation, such as application/json. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // One or more example blocks as defined above. + Example []ResponseRepresentationExampleObservation `json:"example,omitempty" tf:"example,omitempty"` + + // One or more form_parameter block as defined above. + FormParameter []RepresentationFormParameterObservation `json:"formParameter,omitempty" tf:"form_parameter,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` +} + +type ResponseRepresentationParameters struct { + + // The Content Type of this representation, such as application/json. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` + + // One or more example blocks as defined above. + // +kubebuilder:validation:Optional + Example []ResponseRepresentationExampleParameters `json:"example,omitempty" tf:"example,omitempty"` + + // One or more form_parameter block as defined above. + // +kubebuilder:validation:Optional + FormParameter []RepresentationFormParameterParameters `json:"formParameter,omitempty" tf:"form_parameter,omitempty"` + + // The name of the Schema. + // +kubebuilder:validation:Optional + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The type name defined by the Schema. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` +} + +type TemplateParameterExampleInitParameters struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TemplateParameterExampleObservation struct { + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A short description for this example. + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TemplateParameterExampleParameters struct { + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A URL that points to the literal example. + // +kubebuilder:validation:Optional + ExternalValue *string `json:"externalValue,omitempty" tf:"external_value,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A short description for this example. + // +kubebuilder:validation:Optional + Summary *string `json:"summary,omitempty" tf:"summary,omitempty"` + + // The example of the representation. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TemplateParameterInitParameters struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []TemplateParameterExampleInitParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TemplateParameterObservation struct { + + // The default value for this Template Parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + Example []TemplateParameterExampleObservation `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this Template Parameter Required? + Required *bool `json:"required,omitempty" tf:"required,omitempty"` + + // The name of the Schema. + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The type name defined by the Schema. + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TemplateParameterParameters struct { + + // The default value for this Template Parameter. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // A description of this Template Parameter. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more example blocks as defined above. + // +kubebuilder:validation:Optional + Example []TemplateParameterExampleParameters `json:"example,omitempty" tf:"example,omitempty"` + + // The Name of this Template Parameter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this Template Parameter Required? + // +kubebuilder:validation:Optional + Required *bool `json:"required" tf:"required,omitempty"` + + // The name of the Schema. + // +kubebuilder:validation:Optional + SchemaID *string `json:"schemaId,omitempty" tf:"schema_id,omitempty"` + + // The Type of this Template Parameter, such as a string. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The type name defined by the Schema. + // +kubebuilder:validation:Optional + TypeName *string `json:"typeName,omitempty" tf:"type_name,omitempty"` + + // One or more acceptable values for this Template Parameter. + // +kubebuilder:validation:Optional + // +listType=set + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +// APIOperationSpec defines the desired state of APIOperation +type APIOperationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider APIOperationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider APIOperationInitParameters `json:"initProvider,omitempty"` +} + +// APIOperationStatus defines the observed state of APIOperation. +type APIOperationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider APIOperationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// APIOperation is the Schema for the APIOperations API. Manages an API Operation within an API Management Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type APIOperation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.displayName) || (has(self.initProvider) && has(self.initProvider.displayName))",message="spec.forProvider.displayName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.method) || (has(self.initProvider) && has(self.initProvider.method))",message="spec.forProvider.method is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.urlTemplate) || (has(self.initProvider) && has(self.initProvider.urlTemplate))",message="spec.forProvider.urlTemplate is a required parameter" + Spec APIOperationSpec `json:"spec"` + Status APIOperationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// APIOperationList contains a list of APIOperations +type APIOperationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []APIOperation `json:"items"` +} + +// Repository type metadata. +var ( + APIOperation_Kind = "APIOperation" + APIOperation_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: APIOperation_Kind}.String() + APIOperation_KindAPIVersion = APIOperation_Kind + "." + CRDGroupVersion.String() + APIOperation_GroupVersionKind = CRDGroupVersion.WithKind(APIOperation_Kind) +) + +func init() { + SchemeBuilder.Register(&APIOperation{}, &APIOperationList{}) +} diff --git a/apis/apimanagement/v1beta2/zz_backend_terraformed.go b/apis/apimanagement/v1beta2/zz_backend_terraformed.go new file mode 100755 index 000000000..b817d9ff0 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_backend_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Backend +func (mg *Backend) GetTerraformResourceType() string { + return "azurerm_api_management_backend" +} + +// GetConnectionDetailsMapping for this Backend +func (tr *Backend) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"proxy[*].password": "spec.forProvider.proxy[*].passwordSecretRef"} +} + +// GetObservation of this Backend +func (tr *Backend) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Backend +func (tr *Backend) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Backend +func (tr *Backend) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Backend +func (tr *Backend) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Backend +func (tr *Backend) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Backend +func (tr *Backend) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Backend +func (tr *Backend) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Backend using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Backend) LateInitialize(attrs []byte) (bool, error) { + params := &BackendParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Backend) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apimanagement/v1beta2/zz_backend_types.go b/apis/apimanagement/v1beta2/zz_backend_types.go new file mode 100755 index 000000000..0ba30ed86 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_backend_types.go @@ -0,0 +1,460 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthorizationInitParameters struct { + + // The authentication Parameter value. + Parameter *string `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The authentication Scheme name. + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type AuthorizationObservation struct { + + // The authentication Parameter value. + Parameter *string `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The authentication Scheme name. + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type AuthorizationParameters struct { + + // The authentication Parameter value. + // +kubebuilder:validation:Optional + Parameter *string `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The authentication Scheme name. + // +kubebuilder:validation:Optional + Scheme *string `json:"scheme,omitempty" tf:"scheme,omitempty"` +} + +type BackendInitParameters struct { + + // A credentials block as documented below. + Credentials *CredentialsInitParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The description of the backend. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The protocol used by the backend host. Possible values are http or soap. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // A proxy block as documented below. + Proxy *BackendProxyInitParameters `json:"proxy,omitempty" tf:"proxy,omitempty"` + + // The management URI of the backend host in an external system. This URI can be the ARM Resource ID of Logic Apps, Function Apps or API Apps, or the management endpoint of a Service Fabric cluster. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // A service_fabric_cluster block as documented below. + ServiceFabricCluster *ServiceFabricClusterInitParameters `json:"serviceFabricCluster,omitempty" tf:"service_fabric_cluster,omitempty"` + + // A tls block as documented below. + TLS *TLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` + + // The title of the backend. + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // The URL of the backend host. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type BackendObservation struct { + + // The Name of the API Management Service where this backend should be created. Changing this forces a new resource to be created. + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // A credentials block as documented below. + Credentials *CredentialsObservation `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The description of the backend. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the API Management API. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The protocol used by the backend host. Possible values are http or soap. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // A proxy block as documented below. + Proxy *BackendProxyObservation `json:"proxy,omitempty" tf:"proxy,omitempty"` + + // The Name of the Resource Group where the API Management Service exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The management URI of the backend host in an external system. This URI can be the ARM Resource ID of Logic Apps, Function Apps or API Apps, or the management endpoint of a Service Fabric cluster. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // A service_fabric_cluster block as documented below. + ServiceFabricCluster *ServiceFabricClusterObservation `json:"serviceFabricCluster,omitempty" tf:"service_fabric_cluster,omitempty"` + + // A tls block as documented below. + TLS *TLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + + // The title of the backend. + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // The URL of the backend host. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type BackendParameters struct { + + // The Name of the API Management Service where this backend should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +kubebuilder:validation:Optional + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameRef *v1.Reference `json:"apiManagementNameRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` + + // A credentials block as documented below. + // +kubebuilder:validation:Optional + Credentials *CredentialsParameters `json:"credentials,omitempty" tf:"credentials,omitempty"` + + // The description of the backend. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The protocol used by the backend host. Possible values are http or soap. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // A proxy block as documented below. + // +kubebuilder:validation:Optional + Proxy *BackendProxyParameters `json:"proxy,omitempty" tf:"proxy,omitempty"` + + // The Name of the Resource Group where the API Management Service exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The management URI of the backend host in an external system. This URI can be the ARM Resource ID of Logic Apps, Function Apps or API Apps, or the management endpoint of a Service Fabric cluster. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // A service_fabric_cluster block as documented below. + // +kubebuilder:validation:Optional + ServiceFabricCluster *ServiceFabricClusterParameters `json:"serviceFabricCluster,omitempty" tf:"service_fabric_cluster,omitempty"` + + // A tls block as documented below. + // +kubebuilder:validation:Optional + TLS *TLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` + + // The title of the backend. + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // The URL of the backend host. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type BackendProxyInitParameters struct { + + // The URL of the proxy server. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The username to connect to the proxy server. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type BackendProxyObservation struct { + + // The URL of the proxy server. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // The username to connect to the proxy server. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type BackendProxyParameters struct { + + // The password to connect to the proxy server. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // The URL of the proxy server. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` + + // The username to connect to the proxy server. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type CredentialsInitParameters struct { + + // An authorization block as defined below. + Authorization *AuthorizationInitParameters `json:"authorization,omitempty" tf:"authorization,omitempty"` + + // A list of client certificate thumbprints to present to the backend host. The certificates must exist within the API Management Service. + Certificate []*string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A mapping of header parameters to pass to the backend host. The keys are the header names and the values are a comma separated string of header values. This is converted to a list before being passed to the API. + // +mapType=granular + Header map[string]*string `json:"header,omitempty" tf:"header,omitempty"` + + // A mapping of query parameters to pass to the backend host. The keys are the query names and the values are a comma separated string of query values. This is converted to a list before being passed to the API. + // +mapType=granular + Query map[string]*string `json:"query,omitempty" tf:"query,omitempty"` +} + +type CredentialsObservation struct { + + // An authorization block as defined below. + Authorization *AuthorizationObservation `json:"authorization,omitempty" tf:"authorization,omitempty"` + + // A list of client certificate thumbprints to present to the backend host. The certificates must exist within the API Management Service. + Certificate []*string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A mapping of header parameters to pass to the backend host. The keys are the header names and the values are a comma separated string of header values. This is converted to a list before being passed to the API. + // +mapType=granular + Header map[string]*string `json:"header,omitempty" tf:"header,omitempty"` + + // A mapping of query parameters to pass to the backend host. The keys are the query names and the values are a comma separated string of query values. This is converted to a list before being passed to the API. + // +mapType=granular + Query map[string]*string `json:"query,omitempty" tf:"query,omitempty"` +} + +type CredentialsParameters struct { + + // An authorization block as defined below. + // +kubebuilder:validation:Optional + Authorization *AuthorizationParameters `json:"authorization,omitempty" tf:"authorization,omitempty"` + + // A list of client certificate thumbprints to present to the backend host. The certificates must exist within the API Management Service. + // +kubebuilder:validation:Optional + Certificate []*string `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A mapping of header parameters to pass to the backend host. The keys are the header names and the values are a comma separated string of header values. This is converted to a list before being passed to the API. + // +kubebuilder:validation:Optional + // +mapType=granular + Header map[string]*string `json:"header,omitempty" tf:"header,omitempty"` + + // A mapping of query parameters to pass to the backend host. The keys are the query names and the values are a comma separated string of query values. This is converted to a list before being passed to the API. + // +kubebuilder:validation:Optional + // +mapType=granular + Query map[string]*string `json:"query,omitempty" tf:"query,omitempty"` +} + +type ServerX509NameInitParameters struct { + + // The thumbprint for the issuer of the certificate. + IssuerCertificateThumbprint *string `json:"issuerCertificateThumbprint,omitempty" tf:"issuer_certificate_thumbprint,omitempty"` + + // The common name of the certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ServerX509NameObservation struct { + + // The thumbprint for the issuer of the certificate. + IssuerCertificateThumbprint *string `json:"issuerCertificateThumbprint,omitempty" tf:"issuer_certificate_thumbprint,omitempty"` + + // The common name of the certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ServerX509NameParameters struct { + + // The thumbprint for the issuer of the certificate. + // +kubebuilder:validation:Optional + IssuerCertificateThumbprint *string `json:"issuerCertificateThumbprint" tf:"issuer_certificate_thumbprint,omitempty"` + + // The common name of the certificate. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type ServiceFabricClusterInitParameters struct { + + // The client certificate resource id for the management endpoint. + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // The client certificate thumbprint for the management endpoint. + ClientCertificateThumbprint *string `json:"clientCertificateThumbprint,omitempty" tf:"client_certificate_thumbprint,omitempty"` + + // A list of cluster management endpoints. + // +listType=set + ManagementEndpoints []*string `json:"managementEndpoints,omitempty" tf:"management_endpoints,omitempty"` + + // The maximum number of retries when attempting resolve the partition. + MaxPartitionResolutionRetries *float64 `json:"maxPartitionResolutionRetries,omitempty" tf:"max_partition_resolution_retries,omitempty"` + + // A list of thumbprints of the server certificates of the Service Fabric cluster. + // +listType=set + ServerCertificateThumbprints []*string `json:"serverCertificateThumbprints,omitempty" tf:"server_certificate_thumbprints,omitempty"` + + // One or more server_x509_name blocks as documented below. + ServerX509Name []ServerX509NameInitParameters `json:"serverX509Name,omitempty" tf:"server_x509_name,omitempty"` +} + +type ServiceFabricClusterObservation struct { + + // The client certificate resource id for the management endpoint. + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // The client certificate thumbprint for the management endpoint. + ClientCertificateThumbprint *string `json:"clientCertificateThumbprint,omitempty" tf:"client_certificate_thumbprint,omitempty"` + + // A list of cluster management endpoints. + // +listType=set + ManagementEndpoints []*string `json:"managementEndpoints,omitempty" tf:"management_endpoints,omitempty"` + + // The maximum number of retries when attempting resolve the partition. + MaxPartitionResolutionRetries *float64 `json:"maxPartitionResolutionRetries,omitempty" tf:"max_partition_resolution_retries,omitempty"` + + // A list of thumbprints of the server certificates of the Service Fabric cluster. + // +listType=set + ServerCertificateThumbprints []*string `json:"serverCertificateThumbprints,omitempty" tf:"server_certificate_thumbprints,omitempty"` + + // One or more server_x509_name blocks as documented below. + ServerX509Name []ServerX509NameObservation `json:"serverX509Name,omitempty" tf:"server_x509_name,omitempty"` +} + +type ServiceFabricClusterParameters struct { + + // The client certificate resource id for the management endpoint. + // +kubebuilder:validation:Optional + ClientCertificateID *string `json:"clientCertificateId,omitempty" tf:"client_certificate_id,omitempty"` + + // The client certificate thumbprint for the management endpoint. + // +kubebuilder:validation:Optional + ClientCertificateThumbprint *string `json:"clientCertificateThumbprint,omitempty" tf:"client_certificate_thumbprint,omitempty"` + + // A list of cluster management endpoints. + // +kubebuilder:validation:Optional + // +listType=set + ManagementEndpoints []*string `json:"managementEndpoints" tf:"management_endpoints,omitempty"` + + // The maximum number of retries when attempting resolve the partition. + // +kubebuilder:validation:Optional + MaxPartitionResolutionRetries *float64 `json:"maxPartitionResolutionRetries" tf:"max_partition_resolution_retries,omitempty"` + + // A list of thumbprints of the server certificates of the Service Fabric cluster. + // +kubebuilder:validation:Optional + // +listType=set + ServerCertificateThumbprints []*string `json:"serverCertificateThumbprints,omitempty" tf:"server_certificate_thumbprints,omitempty"` + + // One or more server_x509_name blocks as documented below. + // +kubebuilder:validation:Optional + ServerX509Name []ServerX509NameParameters `json:"serverX509Name,omitempty" tf:"server_x509_name,omitempty"` +} + +type TLSInitParameters struct { + + // Flag indicating whether SSL certificate chain validation should be done when using self-signed certificates for the backend host. + ValidateCertificateChain *bool `json:"validateCertificateChain,omitempty" tf:"validate_certificate_chain,omitempty"` + + // Flag indicating whether SSL certificate name validation should be done when using self-signed certificates for the backend host. + ValidateCertificateName *bool `json:"validateCertificateName,omitempty" tf:"validate_certificate_name,omitempty"` +} + +type TLSObservation struct { + + // Flag indicating whether SSL certificate chain validation should be done when using self-signed certificates for the backend host. + ValidateCertificateChain *bool `json:"validateCertificateChain,omitempty" tf:"validate_certificate_chain,omitempty"` + + // Flag indicating whether SSL certificate name validation should be done when using self-signed certificates for the backend host. + ValidateCertificateName *bool `json:"validateCertificateName,omitempty" tf:"validate_certificate_name,omitempty"` +} + +type TLSParameters struct { + + // Flag indicating whether SSL certificate chain validation should be done when using self-signed certificates for the backend host. + // +kubebuilder:validation:Optional + ValidateCertificateChain *bool `json:"validateCertificateChain,omitempty" tf:"validate_certificate_chain,omitempty"` + + // Flag indicating whether SSL certificate name validation should be done when using self-signed certificates for the backend host. + // +kubebuilder:validation:Optional + ValidateCertificateName *bool `json:"validateCertificateName,omitempty" tf:"validate_certificate_name,omitempty"` +} + +// BackendSpec defines the desired state of Backend +type BackendSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BackendParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BackendInitParameters `json:"initProvider,omitempty"` +} + +// BackendStatus defines the observed state of Backend. +type BackendStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BackendObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Backend is the Schema for the Backends API. Manages a backend within an API Management Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Backend struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.protocol) || (has(self.initProvider) && has(self.initProvider.protocol))",message="spec.forProvider.protocol is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.url) || (has(self.initProvider) && has(self.initProvider.url))",message="spec.forProvider.url is a required parameter" + Spec BackendSpec `json:"spec"` + Status BackendStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackendList contains a list of Backends +type BackendList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Backend `json:"items"` +} + +// Repository type metadata. +var ( + Backend_Kind = "Backend" + Backend_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Backend_Kind}.String() + Backend_KindAPIVersion = Backend_Kind + "." + CRDGroupVersion.String() + Backend_GroupVersionKind = CRDGroupVersion.WithKind(Backend_Kind) +) + +func init() { + SchemeBuilder.Register(&Backend{}, &BackendList{}) +} diff --git a/apis/apimanagement/v1beta2/zz_diagnostic_terraformed.go b/apis/apimanagement/v1beta2/zz_diagnostic_terraformed.go new file mode 100755 index 000000000..ed9fd1c84 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_diagnostic_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Diagnostic +func (mg *Diagnostic) GetTerraformResourceType() string { + return "azurerm_api_management_diagnostic" +} + +// GetConnectionDetailsMapping for this Diagnostic +func (tr *Diagnostic) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Diagnostic +func (tr *Diagnostic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Diagnostic +func (tr *Diagnostic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Diagnostic +func (tr *Diagnostic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Diagnostic +func (tr *Diagnostic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Diagnostic +func (tr *Diagnostic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Diagnostic +func (tr *Diagnostic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Diagnostic +func (tr *Diagnostic) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Diagnostic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Diagnostic) LateInitialize(attrs []byte) (bool, error) { + params := &DiagnosticParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Diagnostic) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apimanagement/v1beta2/zz_diagnostic_types.go b/apis/apimanagement/v1beta2/zz_diagnostic_types.go new file mode 100755 index 000000000..2fb0d0c6a --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_diagnostic_types.go @@ -0,0 +1,764 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackendRequestDataMaskingHeadersInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BackendRequestDataMaskingHeadersObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BackendRequestDataMaskingHeadersParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type BackendRequestDataMaskingInitParameters struct { + + // A headers block as defined below. + Headers []BackendRequestDataMaskingHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []BackendRequestDataMaskingQueryParamsInitParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type BackendRequestDataMaskingObservation struct { + + // A headers block as defined below. + Headers []BackendRequestDataMaskingHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []BackendRequestDataMaskingQueryParamsObservation `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type BackendRequestDataMaskingParameters struct { + + // A headers block as defined below. + // +kubebuilder:validation:Optional + Headers []BackendRequestDataMaskingHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + // +kubebuilder:validation:Optional + QueryParams []BackendRequestDataMaskingQueryParamsParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type BackendRequestDataMaskingQueryParamsInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BackendRequestDataMaskingQueryParamsObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BackendRequestDataMaskingQueryParamsParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type BackendResponseDataMaskingHeadersInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BackendResponseDataMaskingHeadersObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BackendResponseDataMaskingHeadersParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type BackendResponseDataMaskingQueryParamsInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BackendResponseDataMaskingQueryParamsObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type BackendResponseDataMaskingQueryParamsParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DiagnosticBackendRequestInitParameters struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *BackendRequestDataMaskingInitParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticBackendRequestObservation struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *BackendRequestDataMaskingObservation `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticBackendRequestParameters struct { + + // Number of payload bytes to log (up to 8192). + // +kubebuilder:validation:Optional + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + // +kubebuilder:validation:Optional + DataMasking *BackendRequestDataMaskingParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +kubebuilder:validation:Optional + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticBackendResponseDataMaskingInitParameters struct { + + // A headers block as defined below. + Headers []BackendResponseDataMaskingHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []BackendResponseDataMaskingQueryParamsInitParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticBackendResponseDataMaskingObservation struct { + + // A headers block as defined below. + Headers []BackendResponseDataMaskingHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []BackendResponseDataMaskingQueryParamsObservation `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticBackendResponseDataMaskingParameters struct { + + // A headers block as defined below. + // +kubebuilder:validation:Optional + Headers []BackendResponseDataMaskingHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + // +kubebuilder:validation:Optional + QueryParams []BackendResponseDataMaskingQueryParamsParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticBackendResponseInitParameters struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *DiagnosticBackendResponseDataMaskingInitParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticBackendResponseObservation struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *DiagnosticBackendResponseDataMaskingObservation `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticBackendResponseParameters struct { + + // Number of payload bytes to log (up to 8192). + // +kubebuilder:validation:Optional + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + // +kubebuilder:validation:Optional + DataMasking *DiagnosticBackendResponseDataMaskingParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +kubebuilder:validation:Optional + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingHeadersInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingHeadersObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingHeadersParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingInitParameters struct { + + // A headers block as defined below. + Headers []DiagnosticFrontendRequestDataMaskingHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingObservation struct { + + // A headers block as defined below. + Headers []DiagnosticFrontendRequestDataMaskingHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []DiagnosticFrontendRequestDataMaskingQueryParamsObservation `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingParameters struct { + + // A headers block as defined below. + // +kubebuilder:validation:Optional + Headers []DiagnosticFrontendRequestDataMaskingHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + // +kubebuilder:validation:Optional + QueryParams []DiagnosticFrontendRequestDataMaskingQueryParamsParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingQueryParamsObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DiagnosticFrontendRequestDataMaskingQueryParamsParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DiagnosticFrontendRequestInitParameters struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *DiagnosticFrontendRequestDataMaskingInitParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticFrontendRequestObservation struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *DiagnosticFrontendRequestDataMaskingObservation `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticFrontendRequestParameters struct { + + // Number of payload bytes to log (up to 8192). + // +kubebuilder:validation:Optional + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + // +kubebuilder:validation:Optional + DataMasking *DiagnosticFrontendRequestDataMaskingParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +kubebuilder:validation:Optional + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingHeadersInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingHeadersObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingHeadersParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingInitParameters struct { + + // A headers block as defined below. + Headers []DiagnosticFrontendResponseDataMaskingHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingObservation struct { + + // A headers block as defined below. + Headers []DiagnosticFrontendResponseDataMaskingHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + QueryParams []DiagnosticFrontendResponseDataMaskingQueryParamsObservation `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingParameters struct { + + // A headers block as defined below. + // +kubebuilder:validation:Optional + Headers []DiagnosticFrontendResponseDataMaskingHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // A query_params block as defined below. + // +kubebuilder:validation:Optional + QueryParams []DiagnosticFrontendResponseDataMaskingQueryParamsParameters `json:"queryParams,omitempty" tf:"query_params,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingQueryParamsObservation struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type DiagnosticFrontendResponseDataMaskingQueryParamsParameters struct { + + // The data masking mode. Possible values are Mask and Hide for query_params. The only possible value is Mask for headers. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // The name of the header or the query parameter to mask. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DiagnosticFrontendResponseInitParameters struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *DiagnosticFrontendResponseDataMaskingInitParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticFrontendResponseObservation struct { + + // Number of payload bytes to log (up to 8192). + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + DataMasking *DiagnosticFrontendResponseDataMaskingObservation `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticFrontendResponseParameters struct { + + // Number of payload bytes to log (up to 8192). + // +kubebuilder:validation:Optional + BodyBytes *float64 `json:"bodyBytes,omitempty" tf:"body_bytes,omitempty"` + + // A data_masking block as defined below. + // +kubebuilder:validation:Optional + DataMasking *DiagnosticFrontendResponseDataMaskingParameters `json:"dataMasking,omitempty" tf:"data_masking,omitempty"` + + // Specifies a list of headers to log. + // +kubebuilder:validation:Optional + // +listType=set + HeadersToLog []*string `json:"headersToLog,omitempty" tf:"headers_to_log,omitempty"` +} + +type DiagnosticInitParameters struct { + + // The id of the target API Management Logger where the API Management Diagnostic should be saved. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Logger + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + APIManagementLoggerID *string `json:"apiManagementLoggerId,omitempty" tf:"api_management_logger_id,omitempty"` + + // Reference to a Logger in apimanagement to populate apiManagementLoggerId. + // +kubebuilder:validation:Optional + APIManagementLoggerIDRef *v1.Reference `json:"apiManagementLoggerIdRef,omitempty" tf:"-"` + + // Selector for a Logger in apimanagement to populate apiManagementLoggerId. + // +kubebuilder:validation:Optional + APIManagementLoggerIDSelector *v1.Selector `json:"apiManagementLoggerIdSelector,omitempty" tf:"-"` + + // Always log errors. Send telemetry if there is an erroneous condition, regardless of sampling settings. + AlwaysLogErrors *bool `json:"alwaysLogErrors,omitempty" tf:"always_log_errors,omitempty"` + + // A backend_request block as defined below. + BackendRequest *DiagnosticBackendRequestInitParameters `json:"backendRequest,omitempty" tf:"backend_request,omitempty"` + + // A backend_response block as defined below. + BackendResponse *DiagnosticBackendResponseInitParameters `json:"backendResponse,omitempty" tf:"backend_response,omitempty"` + + // A frontend_request block as defined below. + FrontendRequest *DiagnosticFrontendRequestInitParameters `json:"frontendRequest,omitempty" tf:"frontend_request,omitempty"` + + // A frontend_response block as defined below. + FrontendResponse *DiagnosticFrontendResponseInitParameters `json:"frontendResponse,omitempty" tf:"frontend_response,omitempty"` + + // The HTTP Correlation Protocol to use. Possible values are None, Legacy or W3C. + HTTPCorrelationProtocol *string `json:"httpCorrelationProtocol,omitempty" tf:"http_correlation_protocol,omitempty"` + + // Log client IP address. + LogClientIP *bool `json:"logClientIp,omitempty" tf:"log_client_ip,omitempty"` + + // The format of the Operation Name for Application Insights telemetries. Possible values are Name, and Url. Defaults to Name. + OperationNameFormat *string `json:"operationNameFormat,omitempty" tf:"operation_name_format,omitempty"` + + // Sampling (%). For high traffic APIs, please read this documentation to understand performance implications and log sampling. Valid values are between 0.0 and 100.0. + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // Logging verbosity. Possible values are verbose, information or error. + Verbosity *string `json:"verbosity,omitempty" tf:"verbosity,omitempty"` +} + +type DiagnosticObservation struct { + + // The id of the target API Management Logger where the API Management Diagnostic should be saved. + APIManagementLoggerID *string `json:"apiManagementLoggerId,omitempty" tf:"api_management_logger_id,omitempty"` + + // The Name of the API Management Service where this Diagnostic should be created. Changing this forces a new resource to be created. + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Always log errors. Send telemetry if there is an erroneous condition, regardless of sampling settings. + AlwaysLogErrors *bool `json:"alwaysLogErrors,omitempty" tf:"always_log_errors,omitempty"` + + // A backend_request block as defined below. + BackendRequest *DiagnosticBackendRequestObservation `json:"backendRequest,omitempty" tf:"backend_request,omitempty"` + + // A backend_response block as defined below. + BackendResponse *DiagnosticBackendResponseObservation `json:"backendResponse,omitempty" tf:"backend_response,omitempty"` + + // A frontend_request block as defined below. + FrontendRequest *DiagnosticFrontendRequestObservation `json:"frontendRequest,omitempty" tf:"frontend_request,omitempty"` + + // A frontend_response block as defined below. + FrontendResponse *DiagnosticFrontendResponseObservation `json:"frontendResponse,omitempty" tf:"frontend_response,omitempty"` + + // The HTTP Correlation Protocol to use. Possible values are None, Legacy or W3C. + HTTPCorrelationProtocol *string `json:"httpCorrelationProtocol,omitempty" tf:"http_correlation_protocol,omitempty"` + + // The ID of the API Management Diagnostic. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Log client IP address. + LogClientIP *bool `json:"logClientIp,omitempty" tf:"log_client_ip,omitempty"` + + // The format of the Operation Name for Application Insights telemetries. Possible values are Name, and Url. Defaults to Name. + OperationNameFormat *string `json:"operationNameFormat,omitempty" tf:"operation_name_format,omitempty"` + + // The Name of the Resource Group where the API Management Service exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Sampling (%). For high traffic APIs, please read this documentation to understand performance implications and log sampling. Valid values are between 0.0 and 100.0. + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // Logging verbosity. Possible values are verbose, information or error. + Verbosity *string `json:"verbosity,omitempty" tf:"verbosity,omitempty"` +} + +type DiagnosticParameters struct { + + // The id of the target API Management Logger where the API Management Diagnostic should be saved. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Logger + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + APIManagementLoggerID *string `json:"apiManagementLoggerId,omitempty" tf:"api_management_logger_id,omitempty"` + + // Reference to a Logger in apimanagement to populate apiManagementLoggerId. + // +kubebuilder:validation:Optional + APIManagementLoggerIDRef *v1.Reference `json:"apiManagementLoggerIdRef,omitempty" tf:"-"` + + // Selector for a Logger in apimanagement to populate apiManagementLoggerId. + // +kubebuilder:validation:Optional + APIManagementLoggerIDSelector *v1.Selector `json:"apiManagementLoggerIdSelector,omitempty" tf:"-"` + + // The Name of the API Management Service where this Diagnostic should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +kubebuilder:validation:Optional + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameRef *v1.Reference `json:"apiManagementNameRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` + + // Always log errors. Send telemetry if there is an erroneous condition, regardless of sampling settings. + // +kubebuilder:validation:Optional + AlwaysLogErrors *bool `json:"alwaysLogErrors,omitempty" tf:"always_log_errors,omitempty"` + + // A backend_request block as defined below. + // +kubebuilder:validation:Optional + BackendRequest *DiagnosticBackendRequestParameters `json:"backendRequest,omitempty" tf:"backend_request,omitempty"` + + // A backend_response block as defined below. + // +kubebuilder:validation:Optional + BackendResponse *DiagnosticBackendResponseParameters `json:"backendResponse,omitempty" tf:"backend_response,omitempty"` + + // A frontend_request block as defined below. + // +kubebuilder:validation:Optional + FrontendRequest *DiagnosticFrontendRequestParameters `json:"frontendRequest,omitempty" tf:"frontend_request,omitempty"` + + // A frontend_response block as defined below. + // +kubebuilder:validation:Optional + FrontendResponse *DiagnosticFrontendResponseParameters `json:"frontendResponse,omitempty" tf:"frontend_response,omitempty"` + + // The HTTP Correlation Protocol to use. Possible values are None, Legacy or W3C. + // +kubebuilder:validation:Optional + HTTPCorrelationProtocol *string `json:"httpCorrelationProtocol,omitempty" tf:"http_correlation_protocol,omitempty"` + + // Log client IP address. + // +kubebuilder:validation:Optional + LogClientIP *bool `json:"logClientIp,omitempty" tf:"log_client_ip,omitempty"` + + // The format of the Operation Name for Application Insights telemetries. Possible values are Name, and Url. Defaults to Name. + // +kubebuilder:validation:Optional + OperationNameFormat *string `json:"operationNameFormat,omitempty" tf:"operation_name_format,omitempty"` + + // The Name of the Resource Group where the API Management Service exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Sampling (%). For high traffic APIs, please read this documentation to understand performance implications and log sampling. Valid values are between 0.0 and 100.0. + // +kubebuilder:validation:Optional + SamplingPercentage *float64 `json:"samplingPercentage,omitempty" tf:"sampling_percentage,omitempty"` + + // Logging verbosity. Possible values are verbose, information or error. + // +kubebuilder:validation:Optional + Verbosity *string `json:"verbosity,omitempty" tf:"verbosity,omitempty"` +} + +// DiagnosticSpec defines the desired state of Diagnostic +type DiagnosticSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DiagnosticParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DiagnosticInitParameters `json:"initProvider,omitempty"` +} + +// DiagnosticStatus defines the observed state of Diagnostic. +type DiagnosticStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DiagnosticObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Diagnostic is the Schema for the Diagnostics API. Manages an API Management Service Diagnostic. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Diagnostic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DiagnosticSpec `json:"spec"` + Status DiagnosticStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DiagnosticList contains a list of Diagnostics +type DiagnosticList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Diagnostic `json:"items"` +} + +// Repository type metadata. +var ( + Diagnostic_Kind = "Diagnostic" + Diagnostic_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Diagnostic_Kind}.String() + Diagnostic_KindAPIVersion = Diagnostic_Kind + "." + CRDGroupVersion.String() + Diagnostic_GroupVersionKind = CRDGroupVersion.WithKind(Diagnostic_Kind) +) + +func init() { + SchemeBuilder.Register(&Diagnostic{}, &DiagnosticList{}) +} diff --git a/apis/apimanagement/v1beta2/zz_gateway_terraformed.go b/apis/apimanagement/v1beta2/zz_gateway_terraformed.go new file mode 100755 index 000000000..d80ddf918 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_gateway_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Gateway +func (mg *Gateway) GetTerraformResourceType() string { + return "azurerm_api_management_gateway" +} + +// GetConnectionDetailsMapping for this Gateway +func (tr *Gateway) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Gateway +func (tr *Gateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Gateway +func (tr *Gateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Gateway +func (tr *Gateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Gateway +func (tr *Gateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Gateway +func (tr *Gateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Gateway +func (tr *Gateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Gateway +func (tr *Gateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Gateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Gateway) LateInitialize(attrs []byte) (bool, error) { + params := &GatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Gateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apimanagement/v1beta2/zz_gateway_types.go b/apis/apimanagement/v1beta2/zz_gateway_types.go new file mode 100755 index 000000000..64c879764 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_gateway_types.go @@ -0,0 +1,186 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GatewayInitParameters struct { + + // The ID of the API Management Resource in which the gateway will be created. Changing this forces a new API Management Gateway resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementId. + // +kubebuilder:validation:Optional + APIManagementIDRef *v1.Reference `json:"apiManagementIdRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementId. + // +kubebuilder:validation:Optional + APIManagementIDSelector *v1.Selector `json:"apiManagementIdSelector,omitempty" tf:"-"` + + // The description of the API Management Gateway. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A location_data block as documented below. + LocationData *LocationDataInitParameters `json:"locationData,omitempty" tf:"location_data,omitempty"` +} + +type GatewayObservation struct { + + // The ID of the API Management Resource in which the gateway will be created. Changing this forces a new API Management Gateway resource to be created. + APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` + + // The description of the API Management Gateway. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the API Management Gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A location_data block as documented below. + LocationData *LocationDataObservation `json:"locationData,omitempty" tf:"location_data,omitempty"` +} + +type GatewayParameters struct { + + // The ID of the API Management Resource in which the gateway will be created. Changing this forces a new API Management Gateway resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + APIManagementID *string `json:"apiManagementId,omitempty" tf:"api_management_id,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementId. + // +kubebuilder:validation:Optional + APIManagementIDRef *v1.Reference `json:"apiManagementIdRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementId. + // +kubebuilder:validation:Optional + APIManagementIDSelector *v1.Selector `json:"apiManagementIdSelector,omitempty" tf:"-"` + + // The description of the API Management Gateway. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A location_data block as documented below. + // +kubebuilder:validation:Optional + LocationData *LocationDataParameters `json:"locationData,omitempty" tf:"location_data,omitempty"` +} + +type LocationDataInitParameters struct { + + // The city or locality where the resource is located. + City *string `json:"city,omitempty" tf:"city,omitempty"` + + // The district, state, or province where the resource is located. + District *string `json:"district,omitempty" tf:"district,omitempty"` + + // A canonical name for the geographic or physical location. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The country or region where the resource is located. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type LocationDataObservation struct { + + // The city or locality where the resource is located. + City *string `json:"city,omitempty" tf:"city,omitempty"` + + // The district, state, or province where the resource is located. + District *string `json:"district,omitempty" tf:"district,omitempty"` + + // A canonical name for the geographic or physical location. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The country or region where the resource is located. + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +type LocationDataParameters struct { + + // The city or locality where the resource is located. + // +kubebuilder:validation:Optional + City *string `json:"city,omitempty" tf:"city,omitempty"` + + // The district, state, or province where the resource is located. + // +kubebuilder:validation:Optional + District *string `json:"district,omitempty" tf:"district,omitempty"` + + // A canonical name for the geographic or physical location. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The country or region where the resource is located. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` +} + +// GatewaySpec defines the desired state of Gateway +type GatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GatewayInitParameters `json:"initProvider,omitempty"` +} + +// GatewayStatus defines the observed state of Gateway. +type GatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Gateway is the Schema for the Gateways API. Manages an API Management Gateway. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Gateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.locationData) || (has(self.initProvider) && has(self.initProvider.locationData))",message="spec.forProvider.locationData is a required parameter" + Spec GatewaySpec `json:"spec"` + Status GatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GatewayList contains a list of Gateways +type GatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Gateway `json:"items"` +} + +// Repository type metadata. +var ( + Gateway_Kind = "Gateway" + Gateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Gateway_Kind}.String() + Gateway_KindAPIVersion = Gateway_Kind + "." + CRDGroupVersion.String() + Gateway_GroupVersionKind = CRDGroupVersion.WithKind(Gateway_Kind) +) + +func init() { + SchemeBuilder.Register(&Gateway{}, &GatewayList{}) +} diff --git a/apis/apimanagement/v1beta2/zz_generated.conversion_hubs.go b/apis/apimanagement/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..6d654b3b3 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *API) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *APIDiagnostic) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *APIOperation) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Backend) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Diagnostic) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Gateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Logger) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Management) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *NamedValue) Hub() {} diff --git a/apis/apimanagement/v1beta2/zz_generated.deepcopy.go b/apis/apimanagement/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..f9c42aeff --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,12231 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *API) DeepCopyInto(out *API) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new API. +func (in *API) DeepCopy() *API { + if in == nil { + return nil + } + out := new(API) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *API) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIDiagnostic) DeepCopyInto(out *APIDiagnostic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIDiagnostic. +func (in *APIDiagnostic) DeepCopy() *APIDiagnostic { + if in == nil { + return nil + } + out := new(APIDiagnostic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIDiagnostic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIDiagnosticInitParameters) DeepCopyInto(out *APIDiagnosticInitParameters) { + *out = *in + if in.APIManagementLoggerID != nil { + in, out := &in.APIManagementLoggerID, &out.APIManagementLoggerID + *out = new(string) + **out = **in + } + if in.APIManagementLoggerIDRef != nil { + in, out := &in.APIManagementLoggerIDRef, &out.APIManagementLoggerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementLoggerIDSelector != nil { + in, out := &in.APIManagementLoggerIDSelector, &out.APIManagementLoggerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AlwaysLogErrors != nil { + in, out := &in.AlwaysLogErrors, &out.AlwaysLogErrors + *out = new(bool) + **out = **in + } + if in.BackendRequest != nil { + in, out := &in.BackendRequest, &out.BackendRequest + *out = new(BackendRequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BackendResponse != nil { + in, out := &in.BackendResponse, &out.BackendResponse + *out = new(BackendResponseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendRequest != nil { + in, out := &in.FrontendRequest, &out.FrontendRequest + *out = new(FrontendRequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendResponse != nil { + in, out := &in.FrontendResponse, &out.FrontendResponse + *out = new(FrontendResponseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPCorrelationProtocol != nil { + in, out := &in.HTTPCorrelationProtocol, &out.HTTPCorrelationProtocol + *out = new(string) + **out = **in + } + if in.LogClientIP != nil { + in, out := &in.LogClientIP, &out.LogClientIP + *out = new(bool) + **out = **in + } + if in.OperationNameFormat != nil { + in, out := &in.OperationNameFormat, &out.OperationNameFormat + *out = new(string) + **out = **in + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.Verbosity != nil { + in, out := &in.Verbosity, &out.Verbosity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIDiagnosticInitParameters. +func (in *APIDiagnosticInitParameters) DeepCopy() *APIDiagnosticInitParameters { + if in == nil { + return nil + } + out := new(APIDiagnosticInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIDiagnosticList) DeepCopyInto(out *APIDiagnosticList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIDiagnostic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIDiagnosticList. +func (in *APIDiagnosticList) DeepCopy() *APIDiagnosticList { + if in == nil { + return nil + } + out := new(APIDiagnosticList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIDiagnosticList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIDiagnosticObservation) DeepCopyInto(out *APIDiagnosticObservation) { + *out = *in + if in.APIManagementLoggerID != nil { + in, out := &in.APIManagementLoggerID, &out.APIManagementLoggerID + *out = new(string) + **out = **in + } + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIName != nil { + in, out := &in.APIName, &out.APIName + *out = new(string) + **out = **in + } + if in.AlwaysLogErrors != nil { + in, out := &in.AlwaysLogErrors, &out.AlwaysLogErrors + *out = new(bool) + **out = **in + } + if in.BackendRequest != nil { + in, out := &in.BackendRequest, &out.BackendRequest + *out = new(BackendRequestObservation) + (*in).DeepCopyInto(*out) + } + if in.BackendResponse != nil { + in, out := &in.BackendResponse, &out.BackendResponse + *out = new(BackendResponseObservation) + (*in).DeepCopyInto(*out) + } + if in.FrontendRequest != nil { + in, out := &in.FrontendRequest, &out.FrontendRequest + *out = new(FrontendRequestObservation) + (*in).DeepCopyInto(*out) + } + if in.FrontendResponse != nil { + in, out := &in.FrontendResponse, &out.FrontendResponse + *out = new(FrontendResponseObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPCorrelationProtocol != nil { + in, out := &in.HTTPCorrelationProtocol, &out.HTTPCorrelationProtocol + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LogClientIP != nil { + in, out := &in.LogClientIP, &out.LogClientIP + *out = new(bool) + **out = **in + } + if in.OperationNameFormat != nil { + in, out := &in.OperationNameFormat, &out.OperationNameFormat + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.Verbosity != nil { + in, out := &in.Verbosity, &out.Verbosity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIDiagnosticObservation. +func (in *APIDiagnosticObservation) DeepCopy() *APIDiagnosticObservation { + if in == nil { + return nil + } + out := new(APIDiagnosticObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIDiagnosticParameters) DeepCopyInto(out *APIDiagnosticParameters) { + *out = *in + if in.APIManagementLoggerID != nil { + in, out := &in.APIManagementLoggerID, &out.APIManagementLoggerID + *out = new(string) + **out = **in + } + if in.APIManagementLoggerIDRef != nil { + in, out := &in.APIManagementLoggerIDRef, &out.APIManagementLoggerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementLoggerIDSelector != nil { + in, out := &in.APIManagementLoggerIDSelector, &out.APIManagementLoggerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIManagementNameRef != nil { + in, out := &in.APIManagementNameRef, &out.APIManagementNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementNameSelector != nil { + in, out := &in.APIManagementNameSelector, &out.APIManagementNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.APIName != nil { + in, out := &in.APIName, &out.APIName + *out = new(string) + **out = **in + } + if in.APINameRef != nil { + in, out := &in.APINameRef, &out.APINameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APINameSelector != nil { + in, out := &in.APINameSelector, &out.APINameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AlwaysLogErrors != nil { + in, out := &in.AlwaysLogErrors, &out.AlwaysLogErrors + *out = new(bool) + **out = **in + } + if in.BackendRequest != nil { + in, out := &in.BackendRequest, &out.BackendRequest + *out = new(BackendRequestParameters) + (*in).DeepCopyInto(*out) + } + if in.BackendResponse != nil { + in, out := &in.BackendResponse, &out.BackendResponse + *out = new(BackendResponseParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendRequest != nil { + in, out := &in.FrontendRequest, &out.FrontendRequest + *out = new(FrontendRequestParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendResponse != nil { + in, out := &in.FrontendResponse, &out.FrontendResponse + *out = new(FrontendResponseParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPCorrelationProtocol != nil { + in, out := &in.HTTPCorrelationProtocol, &out.HTTPCorrelationProtocol + *out = new(string) + **out = **in + } + if in.LogClientIP != nil { + in, out := &in.LogClientIP, &out.LogClientIP + *out = new(bool) + **out = **in + } + if in.OperationNameFormat != nil { + in, out := &in.OperationNameFormat, &out.OperationNameFormat + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.Verbosity != nil { + in, out := &in.Verbosity, &out.Verbosity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIDiagnosticParameters. +func (in *APIDiagnosticParameters) DeepCopy() *APIDiagnosticParameters { + if in == nil { + return nil + } + out := new(APIDiagnosticParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIDiagnosticSpec) DeepCopyInto(out *APIDiagnosticSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIDiagnosticSpec. +func (in *APIDiagnosticSpec) DeepCopy() *APIDiagnosticSpec { + if in == nil { + return nil + } + out := new(APIDiagnosticSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIDiagnosticStatus) DeepCopyInto(out *APIDiagnosticStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIDiagnosticStatus. +func (in *APIDiagnosticStatus) DeepCopy() *APIDiagnosticStatus { + if in == nil { + return nil + } + out := new(APIDiagnosticStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIInitParameters) DeepCopyInto(out *APIInitParameters) { + *out = *in + if in.APIType != nil { + in, out := &in.APIType, &out.APIType + *out = new(string) + **out = **in + } + if in.Contact != nil { + in, out := &in.Contact, &out.Contact + *out = new(ContactInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImportInitParameters) + (*in).DeepCopyInto(*out) + } + if in.License != nil { + in, out := &in.License, &out.License + *out = new(LicenseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Oauth2Authorization != nil { + in, out := &in.Oauth2Authorization, &out.Oauth2Authorization + *out = new(Oauth2AuthorizationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OpenIDAuthentication != nil { + in, out := &in.OpenIDAuthentication, &out.OpenIDAuthentication + *out = new(OpenIDAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RevisionDescription != nil { + in, out := &in.RevisionDescription, &out.RevisionDescription + *out = new(string) + **out = **in + } + if in.ServiceURL != nil { + in, out := &in.ServiceURL, &out.ServiceURL + *out = new(string) + **out = **in + } + if in.SoapPassThrough != nil { + in, out := &in.SoapPassThrough, &out.SoapPassThrough + *out = new(bool) + **out = **in + } + if in.SourceAPIID != nil { + in, out := &in.SourceAPIID, &out.SourceAPIID + *out = new(string) + **out = **in + } + if in.SubscriptionKeyParameterNames != nil { + in, out := &in.SubscriptionKeyParameterNames, &out.SubscriptionKeyParameterNames + *out = new(SubscriptionKeyParameterNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubscriptionRequired != nil { + in, out := &in.SubscriptionRequired, &out.SubscriptionRequired + *out = new(bool) + **out = **in + } + if in.TermsOfServiceURL != nil { + in, out := &in.TermsOfServiceURL, &out.TermsOfServiceURL + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionDescription != nil { + in, out := &in.VersionDescription, &out.VersionDescription + *out = new(string) + **out = **in + } + if in.VersionSetID != nil { + in, out := &in.VersionSetID, &out.VersionSetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIInitParameters. +func (in *APIInitParameters) DeepCopy() *APIInitParameters { + if in == nil { + return nil + } + out := new(APIInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIList) DeepCopyInto(out *APIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]API, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIList. +func (in *APIList) DeepCopy() *APIList { + if in == nil { + return nil + } + out := new(APIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIObservation) DeepCopyInto(out *APIObservation) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIType != nil { + in, out := &in.APIType, &out.APIType + *out = new(string) + **out = **in + } + if in.Contact != nil { + in, out := &in.Contact, &out.Contact + *out = new(ContactObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImportObservation) + (*in).DeepCopyInto(*out) + } + if in.IsCurrent != nil { + in, out := &in.IsCurrent, &out.IsCurrent + *out = new(bool) + **out = **in + } + if in.IsOnline != nil { + in, out := &in.IsOnline, &out.IsOnline + *out = new(bool) + **out = **in + } + if in.License != nil { + in, out := &in.License, &out.License + *out = new(LicenseObservation) + (*in).DeepCopyInto(*out) + } + if in.Oauth2Authorization != nil { + in, out := &in.Oauth2Authorization, &out.Oauth2Authorization + *out = new(Oauth2AuthorizationObservation) + (*in).DeepCopyInto(*out) + } + if in.OpenIDAuthentication != nil { + in, out := &in.OpenIDAuthentication, &out.OpenIDAuthentication + *out = new(OpenIDAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(string) + **out = **in + } + if in.RevisionDescription != nil { + in, out := &in.RevisionDescription, &out.RevisionDescription + *out = new(string) + **out = **in + } + if in.ServiceURL != nil { + in, out := &in.ServiceURL, &out.ServiceURL + *out = new(string) + **out = **in + } + if in.SoapPassThrough != nil { + in, out := &in.SoapPassThrough, &out.SoapPassThrough + *out = new(bool) + **out = **in + } + if in.SourceAPIID != nil { + in, out := &in.SourceAPIID, &out.SourceAPIID + *out = new(string) + **out = **in + } + if in.SubscriptionKeyParameterNames != nil { + in, out := &in.SubscriptionKeyParameterNames, &out.SubscriptionKeyParameterNames + *out = new(SubscriptionKeyParameterNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.SubscriptionRequired != nil { + in, out := &in.SubscriptionRequired, &out.SubscriptionRequired + *out = new(bool) + **out = **in + } + if in.TermsOfServiceURL != nil { + in, out := &in.TermsOfServiceURL, &out.TermsOfServiceURL + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionDescription != nil { + in, out := &in.VersionDescription, &out.VersionDescription + *out = new(string) + **out = **in + } + if in.VersionSetID != nil { + in, out := &in.VersionSetID, &out.VersionSetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIObservation. +func (in *APIObservation) DeepCopy() *APIObservation { + if in == nil { + return nil + } + out := new(APIObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIOperation) DeepCopyInto(out *APIOperation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIOperation. +func (in *APIOperation) DeepCopy() *APIOperation { + if in == nil { + return nil + } + out := new(APIOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIOperation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIOperationInitParameters) DeepCopyInto(out *APIOperationInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(RequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]ResponseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TemplateParameter != nil { + in, out := &in.TemplateParameter, &out.TemplateParameter + *out = make([]TemplateParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIOperationInitParameters. +func (in *APIOperationInitParameters) DeepCopy() *APIOperationInitParameters { + if in == nil { + return nil + } + out := new(APIOperationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIOperationList) DeepCopyInto(out *APIOperationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIOperation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIOperationList. +func (in *APIOperationList) DeepCopy() *APIOperationList { + if in == nil { + return nil + } + out := new(APIOperationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIOperationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIOperationObservation) DeepCopyInto(out *APIOperationObservation) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIName != nil { + in, out := &in.APIName, &out.APIName + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(RequestObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]ResponseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TemplateParameter != nil { + in, out := &in.TemplateParameter, &out.TemplateParameter + *out = make([]TemplateParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIOperationObservation. +func (in *APIOperationObservation) DeepCopy() *APIOperationObservation { + if in == nil { + return nil + } + out := new(APIOperationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIOperationParameters) DeepCopyInto(out *APIOperationParameters) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIManagementNameRef != nil { + in, out := &in.APIManagementNameRef, &out.APIManagementNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementNameSelector != nil { + in, out := &in.APIManagementNameSelector, &out.APIManagementNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.APIName != nil { + in, out := &in.APIName, &out.APIName + *out = new(string) + **out = **in + } + if in.APINameRef != nil { + in, out := &in.APINameRef, &out.APINameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APINameSelector != nil { + in, out := &in.APINameSelector, &out.APINameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(RequestParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]ResponseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TemplateParameter != nil { + in, out := &in.TemplateParameter, &out.TemplateParameter + *out = make([]TemplateParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIOperationParameters. +func (in *APIOperationParameters) DeepCopy() *APIOperationParameters { + if in == nil { + return nil + } + out := new(APIOperationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIOperationSpec) DeepCopyInto(out *APIOperationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIOperationSpec. +func (in *APIOperationSpec) DeepCopy() *APIOperationSpec { + if in == nil { + return nil + } + out := new(APIOperationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIOperationStatus) DeepCopyInto(out *APIOperationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIOperationStatus. +func (in *APIOperationStatus) DeepCopy() *APIOperationStatus { + if in == nil { + return nil + } + out := new(APIOperationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIParameters) DeepCopyInto(out *APIParameters) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIManagementNameRef != nil { + in, out := &in.APIManagementNameRef, &out.APIManagementNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementNameSelector != nil { + in, out := &in.APIManagementNameSelector, &out.APIManagementNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.APIType != nil { + in, out := &in.APIType, &out.APIType + *out = new(string) + **out = **in + } + if in.Contact != nil { + in, out := &in.Contact, &out.Contact + *out = new(ContactParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImportParameters) + (*in).DeepCopyInto(*out) + } + if in.License != nil { + in, out := &in.License, &out.License + *out = new(LicenseParameters) + (*in).DeepCopyInto(*out) + } + if in.Oauth2Authorization != nil { + in, out := &in.Oauth2Authorization, &out.Oauth2Authorization + *out = new(Oauth2AuthorizationParameters) + (*in).DeepCopyInto(*out) + } + if in.OpenIDAuthentication != nil { + in, out := &in.OpenIDAuthentication, &out.OpenIDAuthentication + *out = new(OpenIDAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(string) + **out = **in + } + if in.RevisionDescription != nil { + in, out := &in.RevisionDescription, &out.RevisionDescription + *out = new(string) + **out = **in + } + if in.ServiceURL != nil { + in, out := &in.ServiceURL, &out.ServiceURL + *out = new(string) + **out = **in + } + if in.SoapPassThrough != nil { + in, out := &in.SoapPassThrough, &out.SoapPassThrough + *out = new(bool) + **out = **in + } + if in.SourceAPIID != nil { + in, out := &in.SourceAPIID, &out.SourceAPIID + *out = new(string) + **out = **in + } + if in.SubscriptionKeyParameterNames != nil { + in, out := &in.SubscriptionKeyParameterNames, &out.SubscriptionKeyParameterNames + *out = new(SubscriptionKeyParameterNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.SubscriptionRequired != nil { + in, out := &in.SubscriptionRequired, &out.SubscriptionRequired + *out = new(bool) + **out = **in + } + if in.TermsOfServiceURL != nil { + in, out := &in.TermsOfServiceURL, &out.TermsOfServiceURL + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionDescription != nil { + in, out := &in.VersionDescription, &out.VersionDescription + *out = new(string) + **out = **in + } + if in.VersionSetID != nil { + in, out := &in.VersionSetID, &out.VersionSetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIParameters. +func (in *APIParameters) DeepCopy() *APIParameters { + if in == nil { + return nil + } + out := new(APIParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISpec) DeepCopyInto(out *APISpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISpec. +func (in *APISpec) DeepCopy() *APISpec { + if in == nil { + return nil + } + out := new(APISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIStatus) DeepCopyInto(out *APIStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIStatus. +func (in *APIStatus) DeepCopy() *APIStatus { + if in == nil { + return nil + } + out := new(APIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalLocationInitParameters) DeepCopyInto(out *AdditionalLocationInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.GatewayDisabled != nil { + in, out := &in.GatewayDisabled, &out.GatewayDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(VirtualNetworkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalLocationInitParameters. +func (in *AdditionalLocationInitParameters) DeepCopy() *AdditionalLocationInitParameters { + if in == nil { + return nil + } + out := new(AdditionalLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalLocationObservation) DeepCopyInto(out *AdditionalLocationObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.GatewayDisabled != nil { + in, out := &in.GatewayDisabled, &out.GatewayDisabled + *out = new(bool) + **out = **in + } + if in.GatewayRegionalURL != nil { + in, out := &in.GatewayRegionalURL, &out.GatewayRegionalURL + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateIPAddresses != nil { + in, out := &in.PrivateIPAddresses, &out.PrivateIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicIPAddresses != nil { + in, out := &in.PublicIPAddresses, &out.PublicIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(VirtualNetworkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalLocationObservation. +func (in *AdditionalLocationObservation) DeepCopy() *AdditionalLocationObservation { + if in == nil { + return nil + } + out := new(AdditionalLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalLocationParameters) DeepCopyInto(out *AdditionalLocationParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.GatewayDisabled != nil { + in, out := &in.GatewayDisabled, &out.GatewayDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(VirtualNetworkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalLocationParameters. +func (in *AdditionalLocationParameters) DeepCopy() *AdditionalLocationParameters { + if in == nil { + return nil + } + out := new(AdditionalLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsInitParameters) DeepCopyInto(out *ApplicationInsightsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsInitParameters. +func (in *ApplicationInsightsInitParameters) DeepCopy() *ApplicationInsightsInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInsightsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsObservation) DeepCopyInto(out *ApplicationInsightsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsObservation. +func (in *ApplicationInsightsObservation) DeepCopy() *ApplicationInsightsObservation { + if in == nil { + return nil + } + out := new(ApplicationInsightsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsParameters) DeepCopyInto(out *ApplicationInsightsParameters) { + *out = *in + out.InstrumentationKeySecretRef = in.InstrumentationKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsParameters. +func (in *ApplicationInsightsParameters) DeepCopy() *ApplicationInsightsParameters { + if in == nil { + return nil + } + out := new(ApplicationInsightsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationInitParameters) DeepCopyInto(out *AuthorizationInitParameters) { + *out = *in + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationInitParameters. +func (in *AuthorizationInitParameters) DeepCopy() *AuthorizationInitParameters { + if in == nil { + return nil + } + out := new(AuthorizationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationObservation) DeepCopyInto(out *AuthorizationObservation) { + *out = *in + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationObservation. +func (in *AuthorizationObservation) DeepCopy() *AuthorizationObservation { + if in == nil { + return nil + } + out := new(AuthorizationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationParameters) DeepCopyInto(out *AuthorizationParameters) { + *out = *in + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationParameters. +func (in *AuthorizationParameters) DeepCopy() *AuthorizationParameters { + if in == nil { + return nil + } + out := new(AuthorizationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backend) DeepCopyInto(out *Backend) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend. +func (in *Backend) DeepCopy() *Backend { + if in == nil { + return nil + } + out := new(Backend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backend) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendInitParameters) DeepCopyInto(out *BackendInitParameters) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(CredentialsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(BackendProxyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ServiceFabricCluster != nil { + in, out := &in.ServiceFabricCluster, &out.ServiceFabricCluster + *out = new(ServiceFabricClusterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendInitParameters. +func (in *BackendInitParameters) DeepCopy() *BackendInitParameters { + if in == nil { + return nil + } + out := new(BackendInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendList) DeepCopyInto(out *BackendList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backend, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendList. +func (in *BackendList) DeepCopy() *BackendList { + if in == nil { + return nil + } + out := new(BackendList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendObservation) DeepCopyInto(out *BackendObservation) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(CredentialsObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(BackendProxyObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ServiceFabricCluster != nil { + in, out := &in.ServiceFabricCluster, &out.ServiceFabricCluster + *out = new(ServiceFabricClusterObservation) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSObservation) + (*in).DeepCopyInto(*out) + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendObservation. +func (in *BackendObservation) DeepCopy() *BackendObservation { + if in == nil { + return nil + } + out := new(BackendObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendParameters) DeepCopyInto(out *BackendParameters) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIManagementNameRef != nil { + in, out := &in.APIManagementNameRef, &out.APIManagementNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementNameSelector != nil { + in, out := &in.APIManagementNameSelector, &out.APIManagementNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = new(CredentialsParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(BackendProxyParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ServiceFabricCluster != nil { + in, out := &in.ServiceFabricCluster, &out.ServiceFabricCluster + *out = new(ServiceFabricClusterParameters) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSParameters) + (*in).DeepCopyInto(*out) + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendParameters. +func (in *BackendParameters) DeepCopy() *BackendParameters { + if in == nil { + return nil + } + out := new(BackendParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendProxyInitParameters) DeepCopyInto(out *BackendProxyInitParameters) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendProxyInitParameters. +func (in *BackendProxyInitParameters) DeepCopy() *BackendProxyInitParameters { + if in == nil { + return nil + } + out := new(BackendProxyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendProxyObservation) DeepCopyInto(out *BackendProxyObservation) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendProxyObservation. +func (in *BackendProxyObservation) DeepCopy() *BackendProxyObservation { + if in == nil { + return nil + } + out := new(BackendProxyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendProxyParameters) DeepCopyInto(out *BackendProxyParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendProxyParameters. +func (in *BackendProxyParameters) DeepCopy() *BackendProxyParameters { + if in == nil { + return nil + } + out := new(BackendProxyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingHeadersInitParameters) DeepCopyInto(out *BackendRequestDataMaskingHeadersInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingHeadersInitParameters. +func (in *BackendRequestDataMaskingHeadersInitParameters) DeepCopy() *BackendRequestDataMaskingHeadersInitParameters { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingHeadersObservation) DeepCopyInto(out *BackendRequestDataMaskingHeadersObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingHeadersObservation. +func (in *BackendRequestDataMaskingHeadersObservation) DeepCopy() *BackendRequestDataMaskingHeadersObservation { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingHeadersParameters) DeepCopyInto(out *BackendRequestDataMaskingHeadersParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingHeadersParameters. +func (in *BackendRequestDataMaskingHeadersParameters) DeepCopy() *BackendRequestDataMaskingHeadersParameters { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingInitParameters) DeepCopyInto(out *BackendRequestDataMaskingInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]BackendRequestDataMaskingHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]BackendRequestDataMaskingQueryParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingInitParameters. +func (in *BackendRequestDataMaskingInitParameters) DeepCopy() *BackendRequestDataMaskingInitParameters { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingObservation) DeepCopyInto(out *BackendRequestDataMaskingObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]BackendRequestDataMaskingHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]BackendRequestDataMaskingQueryParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingObservation. +func (in *BackendRequestDataMaskingObservation) DeepCopy() *BackendRequestDataMaskingObservation { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingParameters) DeepCopyInto(out *BackendRequestDataMaskingParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]BackendRequestDataMaskingHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]BackendRequestDataMaskingQueryParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingParameters. +func (in *BackendRequestDataMaskingParameters) DeepCopy() *BackendRequestDataMaskingParameters { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingQueryParamsInitParameters) DeepCopyInto(out *BackendRequestDataMaskingQueryParamsInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingQueryParamsInitParameters. +func (in *BackendRequestDataMaskingQueryParamsInitParameters) DeepCopy() *BackendRequestDataMaskingQueryParamsInitParameters { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingQueryParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingQueryParamsObservation) DeepCopyInto(out *BackendRequestDataMaskingQueryParamsObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingQueryParamsObservation. +func (in *BackendRequestDataMaskingQueryParamsObservation) DeepCopy() *BackendRequestDataMaskingQueryParamsObservation { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingQueryParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestDataMaskingQueryParamsParameters) DeepCopyInto(out *BackendRequestDataMaskingQueryParamsParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestDataMaskingQueryParamsParameters. +func (in *BackendRequestDataMaskingQueryParamsParameters) DeepCopy() *BackendRequestDataMaskingQueryParamsParameters { + if in == nil { + return nil + } + out := new(BackendRequestDataMaskingQueryParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestInitParameters) DeepCopyInto(out *BackendRequestInitParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DataMaskingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestInitParameters. +func (in *BackendRequestInitParameters) DeepCopy() *BackendRequestInitParameters { + if in == nil { + return nil + } + out := new(BackendRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestObservation) DeepCopyInto(out *BackendRequestObservation) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DataMaskingObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestObservation. +func (in *BackendRequestObservation) DeepCopy() *BackendRequestObservation { + if in == nil { + return nil + } + out := new(BackendRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendRequestParameters) DeepCopyInto(out *BackendRequestParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DataMaskingParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendRequestParameters. +func (in *BackendRequestParameters) DeepCopy() *BackendRequestParameters { + if in == nil { + return nil + } + out := new(BackendRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingHeadersInitParameters) DeepCopyInto(out *BackendResponseDataMaskingHeadersInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingHeadersInitParameters. +func (in *BackendResponseDataMaskingHeadersInitParameters) DeepCopy() *BackendResponseDataMaskingHeadersInitParameters { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingHeadersObservation) DeepCopyInto(out *BackendResponseDataMaskingHeadersObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingHeadersObservation. +func (in *BackendResponseDataMaskingHeadersObservation) DeepCopy() *BackendResponseDataMaskingHeadersObservation { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingHeadersParameters) DeepCopyInto(out *BackendResponseDataMaskingHeadersParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingHeadersParameters. +func (in *BackendResponseDataMaskingHeadersParameters) DeepCopy() *BackendResponseDataMaskingHeadersParameters { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingInitParameters) DeepCopyInto(out *BackendResponseDataMaskingInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DataMaskingHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DataMaskingQueryParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingInitParameters. +func (in *BackendResponseDataMaskingInitParameters) DeepCopy() *BackendResponseDataMaskingInitParameters { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingObservation) DeepCopyInto(out *BackendResponseDataMaskingObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DataMaskingHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DataMaskingQueryParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingObservation. +func (in *BackendResponseDataMaskingObservation) DeepCopy() *BackendResponseDataMaskingObservation { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingParameters) DeepCopyInto(out *BackendResponseDataMaskingParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DataMaskingHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DataMaskingQueryParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingParameters. +func (in *BackendResponseDataMaskingParameters) DeepCopy() *BackendResponseDataMaskingParameters { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingQueryParamsInitParameters) DeepCopyInto(out *BackendResponseDataMaskingQueryParamsInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingQueryParamsInitParameters. +func (in *BackendResponseDataMaskingQueryParamsInitParameters) DeepCopy() *BackendResponseDataMaskingQueryParamsInitParameters { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingQueryParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingQueryParamsObservation) DeepCopyInto(out *BackendResponseDataMaskingQueryParamsObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingQueryParamsObservation. +func (in *BackendResponseDataMaskingQueryParamsObservation) DeepCopy() *BackendResponseDataMaskingQueryParamsObservation { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingQueryParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseDataMaskingQueryParamsParameters) DeepCopyInto(out *BackendResponseDataMaskingQueryParamsParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseDataMaskingQueryParamsParameters. +func (in *BackendResponseDataMaskingQueryParamsParameters) DeepCopy() *BackendResponseDataMaskingQueryParamsParameters { + if in == nil { + return nil + } + out := new(BackendResponseDataMaskingQueryParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseInitParameters) DeepCopyInto(out *BackendResponseInitParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(BackendResponseDataMaskingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseInitParameters. +func (in *BackendResponseInitParameters) DeepCopy() *BackendResponseInitParameters { + if in == nil { + return nil + } + out := new(BackendResponseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseObservation) DeepCopyInto(out *BackendResponseObservation) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(BackendResponseDataMaskingObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseObservation. +func (in *BackendResponseObservation) DeepCopy() *BackendResponseObservation { + if in == nil { + return nil + } + out := new(BackendResponseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendResponseParameters) DeepCopyInto(out *BackendResponseParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(BackendResponseDataMaskingParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendResponseParameters. +func (in *BackendResponseParameters) DeepCopy() *BackendResponseParameters { + if in == nil { + return nil + } + out := new(BackendResponseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendSpec) DeepCopyInto(out *BackendSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendSpec. +func (in *BackendSpec) DeepCopy() *BackendSpec { + if in == nil { + return nil + } + out := new(BackendSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendStatus) DeepCopyInto(out *BackendStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendStatus. +func (in *BackendStatus) DeepCopy() *BackendStatus { + if in == nil { + return nil + } + out := new(BackendStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.StoreName != nil { + in, out := &in.StoreName, &out.StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } + if in.StoreName != nil { + in, out := &in.StoreName, &out.StoreName + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.CertificatePasswordSecretRef != nil { + in, out := &in.CertificatePasswordSecretRef, &out.CertificatePasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + out.EncodedCertificateSecretRef = in.EncodedCertificateSecretRef + if in.StoreName != nil { + in, out := &in.StoreName, &out.StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContactInitParameters) DeepCopyInto(out *ContactInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContactInitParameters. +func (in *ContactInitParameters) DeepCopy() *ContactInitParameters { + if in == nil { + return nil + } + out := new(ContactInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContactObservation) DeepCopyInto(out *ContactObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContactObservation. +func (in *ContactObservation) DeepCopy() *ContactObservation { + if in == nil { + return nil + } + out := new(ContactObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContactParameters) DeepCopyInto(out *ContactParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContactParameters. +func (in *ContactParameters) DeepCopy() *ContactParameters { + if in == nil { + return nil + } + out := new(ContactParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsInitParameters) DeepCopyInto(out *CredentialsInitParameters) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(AuthorizationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsInitParameters. +func (in *CredentialsInitParameters) DeepCopy() *CredentialsInitParameters { + if in == nil { + return nil + } + out := new(CredentialsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsObservation) DeepCopyInto(out *CredentialsObservation) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(AuthorizationObservation) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsObservation. +func (in *CredentialsObservation) DeepCopy() *CredentialsObservation { + if in == nil { + return nil + } + out := new(CredentialsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsParameters) DeepCopyInto(out *CredentialsParameters) { + *out = *in + if in.Authorization != nil { + in, out := &in.Authorization, &out.Authorization + *out = new(AuthorizationParameters) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsParameters. +func (in *CredentialsParameters) DeepCopy() *CredentialsParameters { + if in == nil { + return nil + } + out := new(CredentialsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingHeadersInitParameters) DeepCopyInto(out *DataMaskingHeadersInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingHeadersInitParameters. +func (in *DataMaskingHeadersInitParameters) DeepCopy() *DataMaskingHeadersInitParameters { + if in == nil { + return nil + } + out := new(DataMaskingHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingHeadersObservation) DeepCopyInto(out *DataMaskingHeadersObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingHeadersObservation. +func (in *DataMaskingHeadersObservation) DeepCopy() *DataMaskingHeadersObservation { + if in == nil { + return nil + } + out := new(DataMaskingHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingHeadersParameters) DeepCopyInto(out *DataMaskingHeadersParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingHeadersParameters. +func (in *DataMaskingHeadersParameters) DeepCopy() *DataMaskingHeadersParameters { + if in == nil { + return nil + } + out := new(DataMaskingHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingInitParameters) DeepCopyInto(out *DataMaskingInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]QueryParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingInitParameters. +func (in *DataMaskingInitParameters) DeepCopy() *DataMaskingInitParameters { + if in == nil { + return nil + } + out := new(DataMaskingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingObservation) DeepCopyInto(out *DataMaskingObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]QueryParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingObservation. +func (in *DataMaskingObservation) DeepCopy() *DataMaskingObservation { + if in == nil { + return nil + } + out := new(DataMaskingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingParameters) DeepCopyInto(out *DataMaskingParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]QueryParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingParameters. +func (in *DataMaskingParameters) DeepCopy() *DataMaskingParameters { + if in == nil { + return nil + } + out := new(DataMaskingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingQueryParamsInitParameters) DeepCopyInto(out *DataMaskingQueryParamsInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingQueryParamsInitParameters. +func (in *DataMaskingQueryParamsInitParameters) DeepCopy() *DataMaskingQueryParamsInitParameters { + if in == nil { + return nil + } + out := new(DataMaskingQueryParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingQueryParamsObservation) DeepCopyInto(out *DataMaskingQueryParamsObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingQueryParamsObservation. +func (in *DataMaskingQueryParamsObservation) DeepCopy() *DataMaskingQueryParamsObservation { + if in == nil { + return nil + } + out := new(DataMaskingQueryParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataMaskingQueryParamsParameters) DeepCopyInto(out *DataMaskingQueryParamsParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMaskingQueryParamsParameters. +func (in *DataMaskingQueryParamsParameters) DeepCopy() *DataMaskingQueryParamsParameters { + if in == nil { + return nil + } + out := new(DataMaskingQueryParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegationInitParameters) DeepCopyInto(out *DelegationInitParameters) { + *out = *in + if in.SubscriptionsEnabled != nil { + in, out := &in.SubscriptionsEnabled, &out.SubscriptionsEnabled + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserRegistrationEnabled != nil { + in, out := &in.UserRegistrationEnabled, &out.UserRegistrationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegationInitParameters. +func (in *DelegationInitParameters) DeepCopy() *DelegationInitParameters { + if in == nil { + return nil + } + out := new(DelegationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegationObservation) DeepCopyInto(out *DelegationObservation) { + *out = *in + if in.SubscriptionsEnabled != nil { + in, out := &in.SubscriptionsEnabled, &out.SubscriptionsEnabled + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserRegistrationEnabled != nil { + in, out := &in.UserRegistrationEnabled, &out.UserRegistrationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegationObservation. +func (in *DelegationObservation) DeepCopy() *DelegationObservation { + if in == nil { + return nil + } + out := new(DelegationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegationParameters) DeepCopyInto(out *DelegationParameters) { + *out = *in + if in.SubscriptionsEnabled != nil { + in, out := &in.SubscriptionsEnabled, &out.SubscriptionsEnabled + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.UserRegistrationEnabled != nil { + in, out := &in.UserRegistrationEnabled, &out.UserRegistrationEnabled + *out = new(bool) + **out = **in + } + if in.ValidationKeySecretRef != nil { + in, out := &in.ValidationKeySecretRef, &out.ValidationKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegationParameters. +func (in *DelegationParameters) DeepCopy() *DelegationParameters { + if in == nil { + return nil + } + out := new(DelegationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperPortalInitParameters) DeepCopyInto(out *DeveloperPortalInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperPortalInitParameters. +func (in *DeveloperPortalInitParameters) DeepCopy() *DeveloperPortalInitParameters { + if in == nil { + return nil + } + out := new(DeveloperPortalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperPortalObservation) DeepCopyInto(out *DeveloperPortalObservation) { + *out = *in + if in.CertificateSource != nil { + in, out := &in.CertificateSource, &out.CertificateSource + *out = new(string) + **out = **in + } + if in.CertificateStatus != nil { + in, out := &in.CertificateStatus, &out.CertificateStatus + *out = new(string) + **out = **in + } + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.NegotiateClientCertificate != nil { + in, out := &in.NegotiateClientCertificate, &out.NegotiateClientCertificate + *out = new(bool) + **out = **in + } + if in.SSLKeyvaultIdentityClientID != nil { + in, out := &in.SSLKeyvaultIdentityClientID, &out.SSLKeyvaultIdentityClientID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperPortalObservation. +func (in *DeveloperPortalObservation) DeepCopy() *DeveloperPortalObservation { + if in == nil { + return nil + } + out := new(DeveloperPortalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperPortalParameters) DeepCopyInto(out *DeveloperPortalParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperPortalParameters. +func (in *DeveloperPortalParameters) DeepCopy() *DeveloperPortalParameters { + if in == nil { + return nil + } + out := new(DeveloperPortalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Diagnostic) DeepCopyInto(out *Diagnostic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Diagnostic. +func (in *Diagnostic) DeepCopy() *Diagnostic { + if in == nil { + return nil + } + out := new(Diagnostic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Diagnostic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendRequestInitParameters) DeepCopyInto(out *DiagnosticBackendRequestInitParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(BackendRequestDataMaskingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendRequestInitParameters. +func (in *DiagnosticBackendRequestInitParameters) DeepCopy() *DiagnosticBackendRequestInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticBackendRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendRequestObservation) DeepCopyInto(out *DiagnosticBackendRequestObservation) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(BackendRequestDataMaskingObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendRequestObservation. +func (in *DiagnosticBackendRequestObservation) DeepCopy() *DiagnosticBackendRequestObservation { + if in == nil { + return nil + } + out := new(DiagnosticBackendRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendRequestParameters) DeepCopyInto(out *DiagnosticBackendRequestParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(BackendRequestDataMaskingParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendRequestParameters. +func (in *DiagnosticBackendRequestParameters) DeepCopy() *DiagnosticBackendRequestParameters { + if in == nil { + return nil + } + out := new(DiagnosticBackendRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendResponseDataMaskingInitParameters) DeepCopyInto(out *DiagnosticBackendResponseDataMaskingInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]BackendResponseDataMaskingHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]BackendResponseDataMaskingQueryParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendResponseDataMaskingInitParameters. +func (in *DiagnosticBackendResponseDataMaskingInitParameters) DeepCopy() *DiagnosticBackendResponseDataMaskingInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticBackendResponseDataMaskingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendResponseDataMaskingObservation) DeepCopyInto(out *DiagnosticBackendResponseDataMaskingObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]BackendResponseDataMaskingHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]BackendResponseDataMaskingQueryParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendResponseDataMaskingObservation. +func (in *DiagnosticBackendResponseDataMaskingObservation) DeepCopy() *DiagnosticBackendResponseDataMaskingObservation { + if in == nil { + return nil + } + out := new(DiagnosticBackendResponseDataMaskingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendResponseDataMaskingParameters) DeepCopyInto(out *DiagnosticBackendResponseDataMaskingParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]BackendResponseDataMaskingHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]BackendResponseDataMaskingQueryParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendResponseDataMaskingParameters. +func (in *DiagnosticBackendResponseDataMaskingParameters) DeepCopy() *DiagnosticBackendResponseDataMaskingParameters { + if in == nil { + return nil + } + out := new(DiagnosticBackendResponseDataMaskingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendResponseInitParameters) DeepCopyInto(out *DiagnosticBackendResponseInitParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticBackendResponseDataMaskingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendResponseInitParameters. +func (in *DiagnosticBackendResponseInitParameters) DeepCopy() *DiagnosticBackendResponseInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticBackendResponseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendResponseObservation) DeepCopyInto(out *DiagnosticBackendResponseObservation) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticBackendResponseDataMaskingObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendResponseObservation. +func (in *DiagnosticBackendResponseObservation) DeepCopy() *DiagnosticBackendResponseObservation { + if in == nil { + return nil + } + out := new(DiagnosticBackendResponseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticBackendResponseParameters) DeepCopyInto(out *DiagnosticBackendResponseParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticBackendResponseDataMaskingParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticBackendResponseParameters. +func (in *DiagnosticBackendResponseParameters) DeepCopy() *DiagnosticBackendResponseParameters { + if in == nil { + return nil + } + out := new(DiagnosticBackendResponseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingHeadersInitParameters) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingHeadersInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingHeadersInitParameters. +func (in *DiagnosticFrontendRequestDataMaskingHeadersInitParameters) DeepCopy() *DiagnosticFrontendRequestDataMaskingHeadersInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingHeadersObservation) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingHeadersObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingHeadersObservation. +func (in *DiagnosticFrontendRequestDataMaskingHeadersObservation) DeepCopy() *DiagnosticFrontendRequestDataMaskingHeadersObservation { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingHeadersParameters) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingHeadersParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingHeadersParameters. +func (in *DiagnosticFrontendRequestDataMaskingHeadersParameters) DeepCopy() *DiagnosticFrontendRequestDataMaskingHeadersParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingInitParameters) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DiagnosticFrontendRequestDataMaskingHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingInitParameters. +func (in *DiagnosticFrontendRequestDataMaskingInitParameters) DeepCopy() *DiagnosticFrontendRequestDataMaskingInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingObservation) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DiagnosticFrontendRequestDataMaskingHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DiagnosticFrontendRequestDataMaskingQueryParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingObservation. +func (in *DiagnosticFrontendRequestDataMaskingObservation) DeepCopy() *DiagnosticFrontendRequestDataMaskingObservation { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingParameters) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DiagnosticFrontendRequestDataMaskingHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DiagnosticFrontendRequestDataMaskingQueryParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingParameters. +func (in *DiagnosticFrontendRequestDataMaskingParameters) DeepCopy() *DiagnosticFrontendRequestDataMaskingParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters. +func (in *DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters) DeepCopy() *DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingQueryParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingQueryParamsObservation) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingQueryParamsObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingQueryParamsObservation. +func (in *DiagnosticFrontendRequestDataMaskingQueryParamsObservation) DeepCopy() *DiagnosticFrontendRequestDataMaskingQueryParamsObservation { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingQueryParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestDataMaskingQueryParamsParameters) DeepCopyInto(out *DiagnosticFrontendRequestDataMaskingQueryParamsParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestDataMaskingQueryParamsParameters. +func (in *DiagnosticFrontendRequestDataMaskingQueryParamsParameters) DeepCopy() *DiagnosticFrontendRequestDataMaskingQueryParamsParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestDataMaskingQueryParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestInitParameters) DeepCopyInto(out *DiagnosticFrontendRequestInitParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticFrontendRequestDataMaskingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestInitParameters. +func (in *DiagnosticFrontendRequestInitParameters) DeepCopy() *DiagnosticFrontendRequestInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestObservation) DeepCopyInto(out *DiagnosticFrontendRequestObservation) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticFrontendRequestDataMaskingObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestObservation. +func (in *DiagnosticFrontendRequestObservation) DeepCopy() *DiagnosticFrontendRequestObservation { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendRequestParameters) DeepCopyInto(out *DiagnosticFrontendRequestParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticFrontendRequestDataMaskingParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendRequestParameters. +func (in *DiagnosticFrontendRequestParameters) DeepCopy() *DiagnosticFrontendRequestParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingHeadersInitParameters) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingHeadersInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingHeadersInitParameters. +func (in *DiagnosticFrontendResponseDataMaskingHeadersInitParameters) DeepCopy() *DiagnosticFrontendResponseDataMaskingHeadersInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingHeadersObservation) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingHeadersObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingHeadersObservation. +func (in *DiagnosticFrontendResponseDataMaskingHeadersObservation) DeepCopy() *DiagnosticFrontendResponseDataMaskingHeadersObservation { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingHeadersParameters) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingHeadersParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingHeadersParameters. +func (in *DiagnosticFrontendResponseDataMaskingHeadersParameters) DeepCopy() *DiagnosticFrontendResponseDataMaskingHeadersParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingInitParameters) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DiagnosticFrontendResponseDataMaskingHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingInitParameters. +func (in *DiagnosticFrontendResponseDataMaskingInitParameters) DeepCopy() *DiagnosticFrontendResponseDataMaskingInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingObservation) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DiagnosticFrontendResponseDataMaskingHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DiagnosticFrontendResponseDataMaskingQueryParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingObservation. +func (in *DiagnosticFrontendResponseDataMaskingObservation) DeepCopy() *DiagnosticFrontendResponseDataMaskingObservation { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingParameters) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]DiagnosticFrontendResponseDataMaskingHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]DiagnosticFrontendResponseDataMaskingQueryParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingParameters. +func (in *DiagnosticFrontendResponseDataMaskingParameters) DeepCopy() *DiagnosticFrontendResponseDataMaskingParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters. +func (in *DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters) DeepCopy() *DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingQueryParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingQueryParamsObservation) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingQueryParamsObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingQueryParamsObservation. +func (in *DiagnosticFrontendResponseDataMaskingQueryParamsObservation) DeepCopy() *DiagnosticFrontendResponseDataMaskingQueryParamsObservation { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingQueryParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseDataMaskingQueryParamsParameters) DeepCopyInto(out *DiagnosticFrontendResponseDataMaskingQueryParamsParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseDataMaskingQueryParamsParameters. +func (in *DiagnosticFrontendResponseDataMaskingQueryParamsParameters) DeepCopy() *DiagnosticFrontendResponseDataMaskingQueryParamsParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseDataMaskingQueryParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseInitParameters) DeepCopyInto(out *DiagnosticFrontendResponseInitParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticFrontendResponseDataMaskingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseInitParameters. +func (in *DiagnosticFrontendResponseInitParameters) DeepCopy() *DiagnosticFrontendResponseInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseObservation) DeepCopyInto(out *DiagnosticFrontendResponseObservation) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticFrontendResponseDataMaskingObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseObservation. +func (in *DiagnosticFrontendResponseObservation) DeepCopy() *DiagnosticFrontendResponseObservation { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticFrontendResponseParameters) DeepCopyInto(out *DiagnosticFrontendResponseParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(DiagnosticFrontendResponseDataMaskingParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticFrontendResponseParameters. +func (in *DiagnosticFrontendResponseParameters) DeepCopy() *DiagnosticFrontendResponseParameters { + if in == nil { + return nil + } + out := new(DiagnosticFrontendResponseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticInitParameters) DeepCopyInto(out *DiagnosticInitParameters) { + *out = *in + if in.APIManagementLoggerID != nil { + in, out := &in.APIManagementLoggerID, &out.APIManagementLoggerID + *out = new(string) + **out = **in + } + if in.APIManagementLoggerIDRef != nil { + in, out := &in.APIManagementLoggerIDRef, &out.APIManagementLoggerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementLoggerIDSelector != nil { + in, out := &in.APIManagementLoggerIDSelector, &out.APIManagementLoggerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AlwaysLogErrors != nil { + in, out := &in.AlwaysLogErrors, &out.AlwaysLogErrors + *out = new(bool) + **out = **in + } + if in.BackendRequest != nil { + in, out := &in.BackendRequest, &out.BackendRequest + *out = new(DiagnosticBackendRequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BackendResponse != nil { + in, out := &in.BackendResponse, &out.BackendResponse + *out = new(DiagnosticBackendResponseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendRequest != nil { + in, out := &in.FrontendRequest, &out.FrontendRequest + *out = new(DiagnosticFrontendRequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendResponse != nil { + in, out := &in.FrontendResponse, &out.FrontendResponse + *out = new(DiagnosticFrontendResponseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPCorrelationProtocol != nil { + in, out := &in.HTTPCorrelationProtocol, &out.HTTPCorrelationProtocol + *out = new(string) + **out = **in + } + if in.LogClientIP != nil { + in, out := &in.LogClientIP, &out.LogClientIP + *out = new(bool) + **out = **in + } + if in.OperationNameFormat != nil { + in, out := &in.OperationNameFormat, &out.OperationNameFormat + *out = new(string) + **out = **in + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.Verbosity != nil { + in, out := &in.Verbosity, &out.Verbosity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticInitParameters. +func (in *DiagnosticInitParameters) DeepCopy() *DiagnosticInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticList) DeepCopyInto(out *DiagnosticList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Diagnostic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticList. +func (in *DiagnosticList) DeepCopy() *DiagnosticList { + if in == nil { + return nil + } + out := new(DiagnosticList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiagnosticList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticObservation) DeepCopyInto(out *DiagnosticObservation) { + *out = *in + if in.APIManagementLoggerID != nil { + in, out := &in.APIManagementLoggerID, &out.APIManagementLoggerID + *out = new(string) + **out = **in + } + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.AlwaysLogErrors != nil { + in, out := &in.AlwaysLogErrors, &out.AlwaysLogErrors + *out = new(bool) + **out = **in + } + if in.BackendRequest != nil { + in, out := &in.BackendRequest, &out.BackendRequest + *out = new(DiagnosticBackendRequestObservation) + (*in).DeepCopyInto(*out) + } + if in.BackendResponse != nil { + in, out := &in.BackendResponse, &out.BackendResponse + *out = new(DiagnosticBackendResponseObservation) + (*in).DeepCopyInto(*out) + } + if in.FrontendRequest != nil { + in, out := &in.FrontendRequest, &out.FrontendRequest + *out = new(DiagnosticFrontendRequestObservation) + (*in).DeepCopyInto(*out) + } + if in.FrontendResponse != nil { + in, out := &in.FrontendResponse, &out.FrontendResponse + *out = new(DiagnosticFrontendResponseObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPCorrelationProtocol != nil { + in, out := &in.HTTPCorrelationProtocol, &out.HTTPCorrelationProtocol + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LogClientIP != nil { + in, out := &in.LogClientIP, &out.LogClientIP + *out = new(bool) + **out = **in + } + if in.OperationNameFormat != nil { + in, out := &in.OperationNameFormat, &out.OperationNameFormat + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.Verbosity != nil { + in, out := &in.Verbosity, &out.Verbosity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticObservation. +func (in *DiagnosticObservation) DeepCopy() *DiagnosticObservation { + if in == nil { + return nil + } + out := new(DiagnosticObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticParameters) DeepCopyInto(out *DiagnosticParameters) { + *out = *in + if in.APIManagementLoggerID != nil { + in, out := &in.APIManagementLoggerID, &out.APIManagementLoggerID + *out = new(string) + **out = **in + } + if in.APIManagementLoggerIDRef != nil { + in, out := &in.APIManagementLoggerIDRef, &out.APIManagementLoggerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementLoggerIDSelector != nil { + in, out := &in.APIManagementLoggerIDSelector, &out.APIManagementLoggerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIManagementNameRef != nil { + in, out := &in.APIManagementNameRef, &out.APIManagementNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementNameSelector != nil { + in, out := &in.APIManagementNameSelector, &out.APIManagementNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AlwaysLogErrors != nil { + in, out := &in.AlwaysLogErrors, &out.AlwaysLogErrors + *out = new(bool) + **out = **in + } + if in.BackendRequest != nil { + in, out := &in.BackendRequest, &out.BackendRequest + *out = new(DiagnosticBackendRequestParameters) + (*in).DeepCopyInto(*out) + } + if in.BackendResponse != nil { + in, out := &in.BackendResponse, &out.BackendResponse + *out = new(DiagnosticBackendResponseParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendRequest != nil { + in, out := &in.FrontendRequest, &out.FrontendRequest + *out = new(DiagnosticFrontendRequestParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendResponse != nil { + in, out := &in.FrontendResponse, &out.FrontendResponse + *out = new(DiagnosticFrontendResponseParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPCorrelationProtocol != nil { + in, out := &in.HTTPCorrelationProtocol, &out.HTTPCorrelationProtocol + *out = new(string) + **out = **in + } + if in.LogClientIP != nil { + in, out := &in.LogClientIP, &out.LogClientIP + *out = new(bool) + **out = **in + } + if in.OperationNameFormat != nil { + in, out := &in.OperationNameFormat, &out.OperationNameFormat + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SamplingPercentage != nil { + in, out := &in.SamplingPercentage, &out.SamplingPercentage + *out = new(float64) + **out = **in + } + if in.Verbosity != nil { + in, out := &in.Verbosity, &out.Verbosity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticParameters. +func (in *DiagnosticParameters) DeepCopy() *DiagnosticParameters { + if in == nil { + return nil + } + out := new(DiagnosticParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticSpec) DeepCopyInto(out *DiagnosticSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticSpec. +func (in *DiagnosticSpec) DeepCopy() *DiagnosticSpec { + if in == nil { + return nil + } + out := new(DiagnosticSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticStatus) DeepCopyInto(out *DiagnosticStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticStatus. +func (in *DiagnosticStatus) DeepCopy() *DiagnosticStatus { + if in == nil { + return nil + } + out := new(DiagnosticStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubInitParameters) DeepCopyInto(out *EventHubInitParameters) { + *out = *in + if in.EndpointURI != nil { + in, out := &in.EndpointURI, &out.EndpointURI + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityClientID != nil { + in, out := &in.UserAssignedIdentityClientID, &out.UserAssignedIdentityClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubInitParameters. +func (in *EventHubInitParameters) DeepCopy() *EventHubInitParameters { + if in == nil { + return nil + } + out := new(EventHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubObservation) DeepCopyInto(out *EventHubObservation) { + *out = *in + if in.EndpointURI != nil { + in, out := &in.EndpointURI, &out.EndpointURI + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityClientID != nil { + in, out := &in.UserAssignedIdentityClientID, &out.UserAssignedIdentityClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubObservation. +func (in *EventHubObservation) DeepCopy() *EventHubObservation { + if in == nil { + return nil + } + out := new(EventHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubParameters) DeepCopyInto(out *EventHubParameters) { + *out = *in + if in.ConnectionStringSecretRef != nil { + in, out := &in.ConnectionStringSecretRef, &out.ConnectionStringSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.EndpointURI != nil { + in, out := &in.EndpointURI, &out.EndpointURI + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityClientID != nil { + in, out := &in.UserAssignedIdentityClientID, &out.UserAssignedIdentityClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubParameters. +func (in *EventHubParameters) DeepCopy() *EventHubParameters { + if in == nil { + return nil + } + out := new(EventHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExampleInitParameters) DeepCopyInto(out *ExampleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleInitParameters. +func (in *ExampleInitParameters) DeepCopy() *ExampleInitParameters { + if in == nil { + return nil + } + out := new(ExampleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExampleObservation) DeepCopyInto(out *ExampleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleObservation. +func (in *ExampleObservation) DeepCopy() *ExampleObservation { + if in == nil { + return nil + } + out := new(ExampleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExampleParameters) DeepCopyInto(out *ExampleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleParameters. +func (in *ExampleParameters) DeepCopy() *ExampleParameters { + if in == nil { + return nil + } + out := new(ExampleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormParameterExampleInitParameters) DeepCopyInto(out *FormParameterExampleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormParameterExampleInitParameters. +func (in *FormParameterExampleInitParameters) DeepCopy() *FormParameterExampleInitParameters { + if in == nil { + return nil + } + out := new(FormParameterExampleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormParameterExampleObservation) DeepCopyInto(out *FormParameterExampleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormParameterExampleObservation. +func (in *FormParameterExampleObservation) DeepCopy() *FormParameterExampleObservation { + if in == nil { + return nil + } + out := new(FormParameterExampleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormParameterExampleParameters) DeepCopyInto(out *FormParameterExampleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormParameterExampleParameters. +func (in *FormParameterExampleParameters) DeepCopy() *FormParameterExampleParameters { + if in == nil { + return nil + } + out := new(FormParameterExampleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormParameterInitParameters) DeepCopyInto(out *FormParameterInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]FormParameterExampleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormParameterInitParameters. +func (in *FormParameterInitParameters) DeepCopy() *FormParameterInitParameters { + if in == nil { + return nil + } + out := new(FormParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormParameterObservation) DeepCopyInto(out *FormParameterObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]FormParameterExampleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormParameterObservation. +func (in *FormParameterObservation) DeepCopy() *FormParameterObservation { + if in == nil { + return nil + } + out := new(FormParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormParameterParameters) DeepCopyInto(out *FormParameterParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]FormParameterExampleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormParameterParameters. +func (in *FormParameterParameters) DeepCopy() *FormParameterParameters { + if in == nil { + return nil + } + out := new(FormParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingHeadersInitParameters) DeepCopyInto(out *FrontendRequestDataMaskingHeadersInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingHeadersInitParameters. +func (in *FrontendRequestDataMaskingHeadersInitParameters) DeepCopy() *FrontendRequestDataMaskingHeadersInitParameters { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingHeadersObservation) DeepCopyInto(out *FrontendRequestDataMaskingHeadersObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingHeadersObservation. +func (in *FrontendRequestDataMaskingHeadersObservation) DeepCopy() *FrontendRequestDataMaskingHeadersObservation { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingHeadersParameters) DeepCopyInto(out *FrontendRequestDataMaskingHeadersParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingHeadersParameters. +func (in *FrontendRequestDataMaskingHeadersParameters) DeepCopy() *FrontendRequestDataMaskingHeadersParameters { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingInitParameters) DeepCopyInto(out *FrontendRequestDataMaskingInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]FrontendRequestDataMaskingHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]FrontendRequestDataMaskingQueryParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingInitParameters. +func (in *FrontendRequestDataMaskingInitParameters) DeepCopy() *FrontendRequestDataMaskingInitParameters { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingObservation) DeepCopyInto(out *FrontendRequestDataMaskingObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]FrontendRequestDataMaskingHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]FrontendRequestDataMaskingQueryParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingObservation. +func (in *FrontendRequestDataMaskingObservation) DeepCopy() *FrontendRequestDataMaskingObservation { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingParameters) DeepCopyInto(out *FrontendRequestDataMaskingParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]FrontendRequestDataMaskingHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]FrontendRequestDataMaskingQueryParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingParameters. +func (in *FrontendRequestDataMaskingParameters) DeepCopy() *FrontendRequestDataMaskingParameters { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingQueryParamsInitParameters) DeepCopyInto(out *FrontendRequestDataMaskingQueryParamsInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingQueryParamsInitParameters. +func (in *FrontendRequestDataMaskingQueryParamsInitParameters) DeepCopy() *FrontendRequestDataMaskingQueryParamsInitParameters { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingQueryParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingQueryParamsObservation) DeepCopyInto(out *FrontendRequestDataMaskingQueryParamsObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingQueryParamsObservation. +func (in *FrontendRequestDataMaskingQueryParamsObservation) DeepCopy() *FrontendRequestDataMaskingQueryParamsObservation { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingQueryParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestDataMaskingQueryParamsParameters) DeepCopyInto(out *FrontendRequestDataMaskingQueryParamsParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestDataMaskingQueryParamsParameters. +func (in *FrontendRequestDataMaskingQueryParamsParameters) DeepCopy() *FrontendRequestDataMaskingQueryParamsParameters { + if in == nil { + return nil + } + out := new(FrontendRequestDataMaskingQueryParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestInitParameters) DeepCopyInto(out *FrontendRequestInitParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(FrontendRequestDataMaskingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestInitParameters. +func (in *FrontendRequestInitParameters) DeepCopy() *FrontendRequestInitParameters { + if in == nil { + return nil + } + out := new(FrontendRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestObservation) DeepCopyInto(out *FrontendRequestObservation) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(FrontendRequestDataMaskingObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestObservation. +func (in *FrontendRequestObservation) DeepCopy() *FrontendRequestObservation { + if in == nil { + return nil + } + out := new(FrontendRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendRequestParameters) DeepCopyInto(out *FrontendRequestParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(FrontendRequestDataMaskingParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendRequestParameters. +func (in *FrontendRequestParameters) DeepCopy() *FrontendRequestParameters { + if in == nil { + return nil + } + out := new(FrontendRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingHeadersInitParameters) DeepCopyInto(out *FrontendResponseDataMaskingHeadersInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingHeadersInitParameters. +func (in *FrontendResponseDataMaskingHeadersInitParameters) DeepCopy() *FrontendResponseDataMaskingHeadersInitParameters { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingHeadersObservation) DeepCopyInto(out *FrontendResponseDataMaskingHeadersObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingHeadersObservation. +func (in *FrontendResponseDataMaskingHeadersObservation) DeepCopy() *FrontendResponseDataMaskingHeadersObservation { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingHeadersParameters) DeepCopyInto(out *FrontendResponseDataMaskingHeadersParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingHeadersParameters. +func (in *FrontendResponseDataMaskingHeadersParameters) DeepCopy() *FrontendResponseDataMaskingHeadersParameters { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingInitParameters) DeepCopyInto(out *FrontendResponseDataMaskingInitParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]FrontendResponseDataMaskingHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]FrontendResponseDataMaskingQueryParamsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingInitParameters. +func (in *FrontendResponseDataMaskingInitParameters) DeepCopy() *FrontendResponseDataMaskingInitParameters { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingObservation) DeepCopyInto(out *FrontendResponseDataMaskingObservation) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]FrontendResponseDataMaskingHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]FrontendResponseDataMaskingQueryParamsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingObservation. +func (in *FrontendResponseDataMaskingObservation) DeepCopy() *FrontendResponseDataMaskingObservation { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingParameters) DeepCopyInto(out *FrontendResponseDataMaskingParameters) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]FrontendResponseDataMaskingHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParams != nil { + in, out := &in.QueryParams, &out.QueryParams + *out = make([]FrontendResponseDataMaskingQueryParamsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingParameters. +func (in *FrontendResponseDataMaskingParameters) DeepCopy() *FrontendResponseDataMaskingParameters { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingQueryParamsInitParameters) DeepCopyInto(out *FrontendResponseDataMaskingQueryParamsInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingQueryParamsInitParameters. +func (in *FrontendResponseDataMaskingQueryParamsInitParameters) DeepCopy() *FrontendResponseDataMaskingQueryParamsInitParameters { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingQueryParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingQueryParamsObservation) DeepCopyInto(out *FrontendResponseDataMaskingQueryParamsObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingQueryParamsObservation. +func (in *FrontendResponseDataMaskingQueryParamsObservation) DeepCopy() *FrontendResponseDataMaskingQueryParamsObservation { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingQueryParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseDataMaskingQueryParamsParameters) DeepCopyInto(out *FrontendResponseDataMaskingQueryParamsParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseDataMaskingQueryParamsParameters. +func (in *FrontendResponseDataMaskingQueryParamsParameters) DeepCopy() *FrontendResponseDataMaskingQueryParamsParameters { + if in == nil { + return nil + } + out := new(FrontendResponseDataMaskingQueryParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseInitParameters) DeepCopyInto(out *FrontendResponseInitParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(FrontendResponseDataMaskingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseInitParameters. +func (in *FrontendResponseInitParameters) DeepCopy() *FrontendResponseInitParameters { + if in == nil { + return nil + } + out := new(FrontendResponseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseObservation) DeepCopyInto(out *FrontendResponseObservation) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(FrontendResponseDataMaskingObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseObservation. +func (in *FrontendResponseObservation) DeepCopy() *FrontendResponseObservation { + if in == nil { + return nil + } + out := new(FrontendResponseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendResponseParameters) DeepCopyInto(out *FrontendResponseParameters) { + *out = *in + if in.BodyBytes != nil { + in, out := &in.BodyBytes, &out.BodyBytes + *out = new(float64) + **out = **in + } + if in.DataMasking != nil { + in, out := &in.DataMasking, &out.DataMasking + *out = new(FrontendResponseDataMaskingParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadersToLog != nil { + in, out := &in.HeadersToLog, &out.HeadersToLog + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendResponseParameters. +func (in *FrontendResponseParameters) DeepCopy() *FrontendResponseParameters { + if in == nil { + return nil + } + out := new(FrontendResponseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayInitParameters) DeepCopyInto(out *GatewayInitParameters) { + *out = *in + if in.APIManagementID != nil { + in, out := &in.APIManagementID, &out.APIManagementID + *out = new(string) + **out = **in + } + if in.APIManagementIDRef != nil { + in, out := &in.APIManagementIDRef, &out.APIManagementIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementIDSelector != nil { + in, out := &in.APIManagementIDSelector, &out.APIManagementIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LocationData != nil { + in, out := &in.LocationData, &out.LocationData + *out = new(LocationDataInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayInitParameters. +func (in *GatewayInitParameters) DeepCopy() *GatewayInitParameters { + if in == nil { + return nil + } + out := new(GatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayObservation) DeepCopyInto(out *GatewayObservation) { + *out = *in + if in.APIManagementID != nil { + in, out := &in.APIManagementID, &out.APIManagementID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LocationData != nil { + in, out := &in.LocationData, &out.LocationData + *out = new(LocationDataObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayObservation. +func (in *GatewayObservation) DeepCopy() *GatewayObservation { + if in == nil { + return nil + } + out := new(GatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayParameters) DeepCopyInto(out *GatewayParameters) { + *out = *in + if in.APIManagementID != nil { + in, out := &in.APIManagementID, &out.APIManagementID + *out = new(string) + **out = **in + } + if in.APIManagementIDRef != nil { + in, out := &in.APIManagementIDRef, &out.APIManagementIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementIDSelector != nil { + in, out := &in.APIManagementIDSelector, &out.APIManagementIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LocationData != nil { + in, out := &in.LocationData, &out.LocationData + *out = new(LocationDataParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayParameters. +func (in *GatewayParameters) DeepCopy() *GatewayParameters { + if in == nil { + return nil + } + out := new(GatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayStatus. +func (in *GatewayStatus) DeepCopy() *GatewayStatus { + if in == nil { + return nil + } + out := new(GatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderExampleInitParameters) DeepCopyInto(out *HeaderExampleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderExampleInitParameters. +func (in *HeaderExampleInitParameters) DeepCopy() *HeaderExampleInitParameters { + if in == nil { + return nil + } + out := new(HeaderExampleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderExampleObservation) DeepCopyInto(out *HeaderExampleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderExampleObservation. +func (in *HeaderExampleObservation) DeepCopy() *HeaderExampleObservation { + if in == nil { + return nil + } + out := new(HeaderExampleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderExampleParameters) DeepCopyInto(out *HeaderExampleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderExampleParameters. +func (in *HeaderExampleParameters) DeepCopy() *HeaderExampleParameters { + if in == nil { + return nil + } + out := new(HeaderExampleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]ExampleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]ExampleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]ExampleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersInitParameters) DeepCopyInto(out *HeadersInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersInitParameters. +func (in *HeadersInitParameters) DeepCopy() *HeadersInitParameters { + if in == nil { + return nil + } + out := new(HeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersObservation) DeepCopyInto(out *HeadersObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersObservation. +func (in *HeadersObservation) DeepCopy() *HeadersObservation { + if in == nil { + return nil + } + out := new(HeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersParameters) DeepCopyInto(out *HeadersParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersParameters. +func (in *HeadersParameters) DeepCopy() *HeadersParameters { + if in == nil { + return nil + } + out := new(HeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConfigurationInitParameters) DeepCopyInto(out *HostNameConfigurationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConfigurationInitParameters. +func (in *HostNameConfigurationInitParameters) DeepCopy() *HostNameConfigurationInitParameters { + if in == nil { + return nil + } + out := new(HostNameConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConfigurationManagementInitParameters) DeepCopyInto(out *HostNameConfigurationManagementInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConfigurationManagementInitParameters. +func (in *HostNameConfigurationManagementInitParameters) DeepCopy() *HostNameConfigurationManagementInitParameters { + if in == nil { + return nil + } + out := new(HostNameConfigurationManagementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConfigurationManagementObservation) DeepCopyInto(out *HostNameConfigurationManagementObservation) { + *out = *in + if in.CertificateSource != nil { + in, out := &in.CertificateSource, &out.CertificateSource + *out = new(string) + **out = **in + } + if in.CertificateStatus != nil { + in, out := &in.CertificateStatus, &out.CertificateStatus + *out = new(string) + **out = **in + } + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.NegotiateClientCertificate != nil { + in, out := &in.NegotiateClientCertificate, &out.NegotiateClientCertificate + *out = new(bool) + **out = **in + } + if in.SSLKeyvaultIdentityClientID != nil { + in, out := &in.SSLKeyvaultIdentityClientID, &out.SSLKeyvaultIdentityClientID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConfigurationManagementObservation. +func (in *HostNameConfigurationManagementObservation) DeepCopy() *HostNameConfigurationManagementObservation { + if in == nil { + return nil + } + out := new(HostNameConfigurationManagementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConfigurationManagementParameters) DeepCopyInto(out *HostNameConfigurationManagementParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConfigurationManagementParameters. +func (in *HostNameConfigurationManagementParameters) DeepCopy() *HostNameConfigurationManagementParameters { + if in == nil { + return nil + } + out := new(HostNameConfigurationManagementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConfigurationObservation) DeepCopyInto(out *HostNameConfigurationObservation) { + *out = *in + if in.DeveloperPortal != nil { + in, out := &in.DeveloperPortal, &out.DeveloperPortal + *out = make([]DeveloperPortalObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Management != nil { + in, out := &in.Management, &out.Management + *out = make([]HostNameConfigurationManagementObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Portal != nil { + in, out := &in.Portal, &out.Portal + *out = make([]PortalObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = make([]ProxyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scm != nil { + in, out := &in.Scm, &out.Scm + *out = make([]ScmObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConfigurationObservation. +func (in *HostNameConfigurationObservation) DeepCopy() *HostNameConfigurationObservation { + if in == nil { + return nil + } + out := new(HostNameConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConfigurationParameters) DeepCopyInto(out *HostNameConfigurationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConfigurationParameters. +func (in *HostNameConfigurationParameters) DeepCopy() *HostNameConfigurationParameters { + if in == nil { + return nil + } + out := new(HostNameConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportInitParameters) DeepCopyInto(out *ImportInitParameters) { + *out = *in + if in.ContentFormat != nil { + in, out := &in.ContentFormat, &out.ContentFormat + *out = new(string) + **out = **in + } + if in.ContentValue != nil { + in, out := &in.ContentValue, &out.ContentValue + *out = new(string) + **out = **in + } + if in.WsdlSelector != nil { + in, out := &in.WsdlSelector, &out.WsdlSelector + *out = new(WsdlSelectorInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportInitParameters. +func (in *ImportInitParameters) DeepCopy() *ImportInitParameters { + if in == nil { + return nil + } + out := new(ImportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportObservation) DeepCopyInto(out *ImportObservation) { + *out = *in + if in.ContentFormat != nil { + in, out := &in.ContentFormat, &out.ContentFormat + *out = new(string) + **out = **in + } + if in.ContentValue != nil { + in, out := &in.ContentValue, &out.ContentValue + *out = new(string) + **out = **in + } + if in.WsdlSelector != nil { + in, out := &in.WsdlSelector, &out.WsdlSelector + *out = new(WsdlSelectorObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportObservation. +func (in *ImportObservation) DeepCopy() *ImportObservation { + if in == nil { + return nil + } + out := new(ImportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportParameters) DeepCopyInto(out *ImportParameters) { + *out = *in + if in.ContentFormat != nil { + in, out := &in.ContentFormat, &out.ContentFormat + *out = new(string) + **out = **in + } + if in.ContentValue != nil { + in, out := &in.ContentValue, &out.ContentValue + *out = new(string) + **out = **in + } + if in.WsdlSelector != nil { + in, out := &in.WsdlSelector, &out.WsdlSelector + *out = new(WsdlSelectorParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportParameters. +func (in *ImportParameters) DeepCopy() *ImportParameters { + if in == nil { + return nil + } + out := new(ImportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseInitParameters) DeepCopyInto(out *LicenseInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseInitParameters. +func (in *LicenseInitParameters) DeepCopy() *LicenseInitParameters { + if in == nil { + return nil + } + out := new(LicenseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseObservation) DeepCopyInto(out *LicenseObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseObservation. +func (in *LicenseObservation) DeepCopy() *LicenseObservation { + if in == nil { + return nil + } + out := new(LicenseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseParameters) DeepCopyInto(out *LicenseParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseParameters. +func (in *LicenseParameters) DeepCopy() *LicenseParameters { + if in == nil { + return nil + } + out := new(LicenseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationDataInitParameters) DeepCopyInto(out *LocationDataInitParameters) { + *out = *in + if in.City != nil { + in, out := &in.City, &out.City + *out = new(string) + **out = **in + } + if in.District != nil { + in, out := &in.District, &out.District + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationDataInitParameters. +func (in *LocationDataInitParameters) DeepCopy() *LocationDataInitParameters { + if in == nil { + return nil + } + out := new(LocationDataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationDataObservation) DeepCopyInto(out *LocationDataObservation) { + *out = *in + if in.City != nil { + in, out := &in.City, &out.City + *out = new(string) + **out = **in + } + if in.District != nil { + in, out := &in.District, &out.District + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationDataObservation. +func (in *LocationDataObservation) DeepCopy() *LocationDataObservation { + if in == nil { + return nil + } + out := new(LocationDataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocationDataParameters) DeepCopyInto(out *LocationDataParameters) { + *out = *in + if in.City != nil { + in, out := &in.City, &out.City + *out = new(string) + **out = **in + } + if in.District != nil { + in, out := &in.District, &out.District + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationDataParameters. +func (in *LocationDataParameters) DeepCopy() *LocationDataParameters { + if in == nil { + return nil + } + out := new(LocationDataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Logger) DeepCopyInto(out *Logger) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logger. +func (in *Logger) DeepCopy() *Logger { + if in == nil { + return nil + } + out := new(Logger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Logger) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggerInitParameters) DeepCopyInto(out *LoggerInitParameters) { + *out = *in + if in.ApplicationInsights != nil { + in, out := &in.ApplicationInsights, &out.ApplicationInsights + *out = new(ApplicationInsightsInitParameters) + **out = **in + } + if in.Buffered != nil { + in, out := &in.Buffered, &out.Buffered + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventHub != nil { + in, out := &in.EventHub, &out.EventHub + *out = new(EventHubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggerInitParameters. +func (in *LoggerInitParameters) DeepCopy() *LoggerInitParameters { + if in == nil { + return nil + } + out := new(LoggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggerList) DeepCopyInto(out *LoggerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Logger, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggerList. +func (in *LoggerList) DeepCopy() *LoggerList { + if in == nil { + return nil + } + out := new(LoggerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LoggerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggerObservation) DeepCopyInto(out *LoggerObservation) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.ApplicationInsights != nil { + in, out := &in.ApplicationInsights, &out.ApplicationInsights + *out = new(ApplicationInsightsParameters) + **out = **in + } + if in.Buffered != nil { + in, out := &in.Buffered, &out.Buffered + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventHub != nil { + in, out := &in.EventHub, &out.EventHub + *out = new(EventHubObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggerObservation. +func (in *LoggerObservation) DeepCopy() *LoggerObservation { + if in == nil { + return nil + } + out := new(LoggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggerParameters) DeepCopyInto(out *LoggerParameters) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIManagementNameRef != nil { + in, out := &in.APIManagementNameRef, &out.APIManagementNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementNameSelector != nil { + in, out := &in.APIManagementNameSelector, &out.APIManagementNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsights != nil { + in, out := &in.ApplicationInsights, &out.ApplicationInsights + *out = new(ApplicationInsightsParameters) + **out = **in + } + if in.Buffered != nil { + in, out := &in.Buffered, &out.Buffered + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EventHub != nil { + in, out := &in.EventHub, &out.EventHub + *out = new(EventHubParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggerParameters. +func (in *LoggerParameters) DeepCopy() *LoggerParameters { + if in == nil { + return nil + } + out := new(LoggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggerSpec) DeepCopyInto(out *LoggerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggerSpec. +func (in *LoggerSpec) DeepCopy() *LoggerSpec { + if in == nil { + return nil + } + out := new(LoggerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggerStatus) DeepCopyInto(out *LoggerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggerStatus. +func (in *LoggerStatus) DeepCopy() *LoggerStatus { + if in == nil { + return nil + } + out := new(LoggerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Management) DeepCopyInto(out *Management) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Management. +func (in *Management) DeepCopy() *Management { + if in == nil { + return nil + } + out := new(Management) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Management) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementInitParameters) DeepCopyInto(out *ManagementInitParameters) { + *out = *in + if in.AdditionalLocation != nil { + in, out := &in.AdditionalLocation, &out.AdditionalLocation + *out = make([]AdditionalLocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.Delegation != nil { + in, out := &in.Delegation, &out.Delegation + *out = new(DelegationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GatewayDisabled != nil { + in, out := &in.GatewayDisabled, &out.GatewayDisabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinAPIVersion != nil { + in, out := &in.MinAPIVersion, &out.MinAPIVersion + *out = new(string) + **out = **in + } + if in.NotificationSenderEmail != nil { + in, out := &in.NotificationSenderEmail, &out.NotificationSenderEmail + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = new(ProtocolsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PublisherEmail != nil { + in, out := &in.PublisherEmail, &out.PublisherEmail + *out = new(string) + **out = **in + } + if in.PublisherName != nil { + in, out := &in.PublisherName, &out.PublisherName + *out = new(string) + **out = **in + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = new(SecurityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SignIn != nil { + in, out := &in.SignIn, &out.SignIn + *out = new(SignInInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SignUp != nil { + in, out := &in.SignUp, &out.SignUp + *out = new(SignUpInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAccess != nil { + in, out := &in.TenantAccess, &out.TenantAccess + *out = new(TenantAccessInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(ManagementVirtualNetworkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkType != nil { + in, out := &in.VirtualNetworkType, &out.VirtualNetworkType + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementInitParameters. +func (in *ManagementInitParameters) DeepCopy() *ManagementInitParameters { + if in == nil { + return nil + } + out := new(ManagementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementList) DeepCopyInto(out *ManagementList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Management, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementList. +func (in *ManagementList) DeepCopy() *ManagementList { + if in == nil { + return nil + } + out := new(ManagementList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagementList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementObservation) DeepCopyInto(out *ManagementObservation) { + *out = *in + if in.AdditionalLocation != nil { + in, out := &in.AdditionalLocation, &out.AdditionalLocation + *out = make([]AdditionalLocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.Delegation != nil { + in, out := &in.Delegation, &out.Delegation + *out = new(DelegationObservation) + (*in).DeepCopyInto(*out) + } + if in.DeveloperPortalURL != nil { + in, out := &in.DeveloperPortalURL, &out.DeveloperPortalURL + *out = new(string) + **out = **in + } + if in.GatewayDisabled != nil { + in, out := &in.GatewayDisabled, &out.GatewayDisabled + *out = new(bool) + **out = **in + } + if in.GatewayRegionalURL != nil { + in, out := &in.GatewayRegionalURL, &out.GatewayRegionalURL + *out = new(string) + **out = **in + } + if in.GatewayURL != nil { + in, out := &in.GatewayURL, &out.GatewayURL + *out = new(string) + **out = **in + } + if in.HostNameConfiguration != nil { + in, out := &in.HostNameConfiguration, &out.HostNameConfiguration + *out = new(HostNameConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagementAPIURL != nil { + in, out := &in.ManagementAPIURL, &out.ManagementAPIURL + *out = new(string) + **out = **in + } + if in.MinAPIVersion != nil { + in, out := &in.MinAPIVersion, &out.MinAPIVersion + *out = new(string) + **out = **in + } + if in.NotificationSenderEmail != nil { + in, out := &in.NotificationSenderEmail, &out.NotificationSenderEmail + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PortalURL != nil { + in, out := &in.PortalURL, &out.PortalURL + *out = new(string) + **out = **in + } + if in.PrivateIPAddresses != nil { + in, out := &in.PrivateIPAddresses, &out.PrivateIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = new(ProtocolsObservation) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicIPAddresses != nil { + in, out := &in.PublicIPAddresses, &out.PublicIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PublisherEmail != nil { + in, out := &in.PublisherEmail, &out.PublisherEmail + *out = new(string) + **out = **in + } + if in.PublisherName != nil { + in, out := &in.PublisherName, &out.PublisherName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ScmURL != nil { + in, out := &in.ScmURL, &out.ScmURL + *out = new(string) + **out = **in + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = new(SecurityObservation) + (*in).DeepCopyInto(*out) + } + if in.SignIn != nil { + in, out := &in.SignIn, &out.SignIn + *out = new(SignInObservation) + (*in).DeepCopyInto(*out) + } + if in.SignUp != nil { + in, out := &in.SignUp, &out.SignUp + *out = new(SignUpObservation) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAccess != nil { + in, out := &in.TenantAccess, &out.TenantAccess + *out = new(TenantAccessObservation) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(ManagementVirtualNetworkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkType != nil { + in, out := &in.VirtualNetworkType, &out.VirtualNetworkType + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementObservation. +func (in *ManagementObservation) DeepCopy() *ManagementObservation { + if in == nil { + return nil + } + out := new(ManagementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementParameters) DeepCopyInto(out *ManagementParameters) { + *out = *in + if in.AdditionalLocation != nil { + in, out := &in.AdditionalLocation, &out.AdditionalLocation + *out = make([]AdditionalLocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.Delegation != nil { + in, out := &in.Delegation, &out.Delegation + *out = new(DelegationParameters) + (*in).DeepCopyInto(*out) + } + if in.GatewayDisabled != nil { + in, out := &in.GatewayDisabled, &out.GatewayDisabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinAPIVersion != nil { + in, out := &in.MinAPIVersion, &out.MinAPIVersion + *out = new(string) + **out = **in + } + if in.NotificationSenderEmail != nil { + in, out := &in.NotificationSenderEmail, &out.NotificationSenderEmail + *out = new(string) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = make([]PolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = new(ProtocolsParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PublisherEmail != nil { + in, out := &in.PublisherEmail, &out.PublisherEmail + *out = new(string) + **out = **in + } + if in.PublisherName != nil { + in, out := &in.PublisherName, &out.PublisherName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = new(SecurityParameters) + (*in).DeepCopyInto(*out) + } + if in.SignIn != nil { + in, out := &in.SignIn, &out.SignIn + *out = new(SignInParameters) + (*in).DeepCopyInto(*out) + } + if in.SignUp != nil { + in, out := &in.SignUp, &out.SignUp + *out = new(SignUpParameters) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAccess != nil { + in, out := &in.TenantAccess, &out.TenantAccess + *out = new(TenantAccessParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(ManagementVirtualNetworkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkType != nil { + in, out := &in.VirtualNetworkType, &out.VirtualNetworkType + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementParameters. +func (in *ManagementParameters) DeepCopy() *ManagementParameters { + if in == nil { + return nil + } + out := new(ManagementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementSpec) DeepCopyInto(out *ManagementSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementSpec. +func (in *ManagementSpec) DeepCopy() *ManagementSpec { + if in == nil { + return nil + } + out := new(ManagementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementStatus) DeepCopyInto(out *ManagementStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementStatus. +func (in *ManagementStatus) DeepCopy() *ManagementStatus { + if in == nil { + return nil + } + out := new(ManagementStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementVirtualNetworkConfigurationInitParameters) DeepCopyInto(out *ManagementVirtualNetworkConfigurationInitParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementVirtualNetworkConfigurationInitParameters. +func (in *ManagementVirtualNetworkConfigurationInitParameters) DeepCopy() *ManagementVirtualNetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ManagementVirtualNetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementVirtualNetworkConfigurationObservation) DeepCopyInto(out *ManagementVirtualNetworkConfigurationObservation) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementVirtualNetworkConfigurationObservation. +func (in *ManagementVirtualNetworkConfigurationObservation) DeepCopy() *ManagementVirtualNetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(ManagementVirtualNetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementVirtualNetworkConfigurationParameters) DeepCopyInto(out *ManagementVirtualNetworkConfigurationParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementVirtualNetworkConfigurationParameters. +func (in *ManagementVirtualNetworkConfigurationParameters) DeepCopy() *ManagementVirtualNetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(ManagementVirtualNetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedValue) DeepCopyInto(out *NamedValue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedValue. +func (in *NamedValue) DeepCopy() *NamedValue { + if in == nil { + return nil + } + out := new(NamedValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamedValue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedValueInitParameters) DeepCopyInto(out *NamedValueInitParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ValueFromKeyVault != nil { + in, out := &in.ValueFromKeyVault, &out.ValueFromKeyVault + *out = new(ValueFromKeyVaultInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedValueInitParameters. +func (in *NamedValueInitParameters) DeepCopy() *NamedValueInitParameters { + if in == nil { + return nil + } + out := new(NamedValueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedValueList) DeepCopyInto(out *NamedValueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NamedValue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedValueList. +func (in *NamedValueList) DeepCopy() *NamedValueList { + if in == nil { + return nil + } + out := new(NamedValueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamedValueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedValueObservation) DeepCopyInto(out *NamedValueObservation) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ValueFromKeyVault != nil { + in, out := &in.ValueFromKeyVault, &out.ValueFromKeyVault + *out = new(ValueFromKeyVaultObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedValueObservation. +func (in *NamedValueObservation) DeepCopy() *NamedValueObservation { + if in == nil { + return nil + } + out := new(NamedValueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedValueParameters) DeepCopyInto(out *NamedValueParameters) { + *out = *in + if in.APIManagementName != nil { + in, out := &in.APIManagementName, &out.APIManagementName + *out = new(string) + **out = **in + } + if in.APIManagementNameRef != nil { + in, out := &in.APIManagementNameRef, &out.APIManagementNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.APIManagementNameSelector != nil { + in, out := &in.APIManagementNameSelector, &out.APIManagementNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ValueFromKeyVault != nil { + in, out := &in.ValueFromKeyVault, &out.ValueFromKeyVault + *out = new(ValueFromKeyVaultParameters) + (*in).DeepCopyInto(*out) + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedValueParameters. +func (in *NamedValueParameters) DeepCopy() *NamedValueParameters { + if in == nil { + return nil + } + out := new(NamedValueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedValueSpec) DeepCopyInto(out *NamedValueSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedValueSpec. +func (in *NamedValueSpec) DeepCopy() *NamedValueSpec { + if in == nil { + return nil + } + out := new(NamedValueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedValueStatus) DeepCopyInto(out *NamedValueStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedValueStatus. +func (in *NamedValueStatus) DeepCopy() *NamedValueStatus { + if in == nil { + return nil + } + out := new(NamedValueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Oauth2AuthorizationInitParameters) DeepCopyInto(out *Oauth2AuthorizationInitParameters) { + *out = *in + if in.AuthorizationServerName != nil { + in, out := &in.AuthorizationServerName, &out.AuthorizationServerName + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Oauth2AuthorizationInitParameters. +func (in *Oauth2AuthorizationInitParameters) DeepCopy() *Oauth2AuthorizationInitParameters { + if in == nil { + return nil + } + out := new(Oauth2AuthorizationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Oauth2AuthorizationObservation) DeepCopyInto(out *Oauth2AuthorizationObservation) { + *out = *in + if in.AuthorizationServerName != nil { + in, out := &in.AuthorizationServerName, &out.AuthorizationServerName + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Oauth2AuthorizationObservation. +func (in *Oauth2AuthorizationObservation) DeepCopy() *Oauth2AuthorizationObservation { + if in == nil { + return nil + } + out := new(Oauth2AuthorizationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Oauth2AuthorizationParameters) DeepCopyInto(out *Oauth2AuthorizationParameters) { + *out = *in + if in.AuthorizationServerName != nil { + in, out := &in.AuthorizationServerName, &out.AuthorizationServerName + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Oauth2AuthorizationParameters. +func (in *Oauth2AuthorizationParameters) DeepCopy() *Oauth2AuthorizationParameters { + if in == nil { + return nil + } + out := new(Oauth2AuthorizationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDAuthenticationInitParameters) DeepCopyInto(out *OpenIDAuthenticationInitParameters) { + *out = *in + if in.BearerTokenSendingMethods != nil { + in, out := &in.BearerTokenSendingMethods, &out.BearerTokenSendingMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OpenIDProviderName != nil { + in, out := &in.OpenIDProviderName, &out.OpenIDProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDAuthenticationInitParameters. +func (in *OpenIDAuthenticationInitParameters) DeepCopy() *OpenIDAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(OpenIDAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDAuthenticationObservation) DeepCopyInto(out *OpenIDAuthenticationObservation) { + *out = *in + if in.BearerTokenSendingMethods != nil { + in, out := &in.BearerTokenSendingMethods, &out.BearerTokenSendingMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OpenIDProviderName != nil { + in, out := &in.OpenIDProviderName, &out.OpenIDProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDAuthenticationObservation. +func (in *OpenIDAuthenticationObservation) DeepCopy() *OpenIDAuthenticationObservation { + if in == nil { + return nil + } + out := new(OpenIDAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDAuthenticationParameters) DeepCopyInto(out *OpenIDAuthenticationParameters) { + *out = *in + if in.BearerTokenSendingMethods != nil { + in, out := &in.BearerTokenSendingMethods, &out.BearerTokenSendingMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OpenIDProviderName != nil { + in, out := &in.OpenIDProviderName, &out.OpenIDProviderName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDAuthenticationParameters. +func (in *OpenIDAuthenticationParameters) DeepCopy() *OpenIDAuthenticationParameters { + if in == nil { + return nil + } + out := new(OpenIDAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyInitParameters) DeepCopyInto(out *PolicyInitParameters) { + *out = *in + if in.XMLContent != nil { + in, out := &in.XMLContent, &out.XMLContent + *out = new(string) + **out = **in + } + if in.XMLLink != nil { + in, out := &in.XMLLink, &out.XMLLink + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyInitParameters. +func (in *PolicyInitParameters) DeepCopy() *PolicyInitParameters { + if in == nil { + return nil + } + out := new(PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyObservation) DeepCopyInto(out *PolicyObservation) { + *out = *in + if in.XMLContent != nil { + in, out := &in.XMLContent, &out.XMLContent + *out = new(string) + **out = **in + } + if in.XMLLink != nil { + in, out := &in.XMLLink, &out.XMLLink + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyObservation. +func (in *PolicyObservation) DeepCopy() *PolicyObservation { + if in == nil { + return nil + } + out := new(PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyParameters) DeepCopyInto(out *PolicyParameters) { + *out = *in + if in.XMLContent != nil { + in, out := &in.XMLContent, &out.XMLContent + *out = new(string) + **out = **in + } + if in.XMLLink != nil { + in, out := &in.XMLLink, &out.XMLLink + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyParameters. +func (in *PolicyParameters) DeepCopy() *PolicyParameters { + if in == nil { + return nil + } + out := new(PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortalInitParameters) DeepCopyInto(out *PortalInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortalInitParameters. +func (in *PortalInitParameters) DeepCopy() *PortalInitParameters { + if in == nil { + return nil + } + out := new(PortalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortalObservation) DeepCopyInto(out *PortalObservation) { + *out = *in + if in.CertificateSource != nil { + in, out := &in.CertificateSource, &out.CertificateSource + *out = new(string) + **out = **in + } + if in.CertificateStatus != nil { + in, out := &in.CertificateStatus, &out.CertificateStatus + *out = new(string) + **out = **in + } + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.NegotiateClientCertificate != nil { + in, out := &in.NegotiateClientCertificate, &out.NegotiateClientCertificate + *out = new(bool) + **out = **in + } + if in.SSLKeyvaultIdentityClientID != nil { + in, out := &in.SSLKeyvaultIdentityClientID, &out.SSLKeyvaultIdentityClientID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortalObservation. +func (in *PortalObservation) DeepCopy() *PortalObservation { + if in == nil { + return nil + } + out := new(PortalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortalParameters) DeepCopyInto(out *PortalParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortalParameters. +func (in *PortalParameters) DeepCopy() *PortalParameters { + if in == nil { + return nil + } + out := new(PortalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtocolsInitParameters) DeepCopyInto(out *ProtocolsInitParameters) { + *out = *in + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtocolsInitParameters. +func (in *ProtocolsInitParameters) DeepCopy() *ProtocolsInitParameters { + if in == nil { + return nil + } + out := new(ProtocolsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtocolsObservation) DeepCopyInto(out *ProtocolsObservation) { + *out = *in + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtocolsObservation. +func (in *ProtocolsObservation) DeepCopy() *ProtocolsObservation { + if in == nil { + return nil + } + out := new(ProtocolsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtocolsParameters) DeepCopyInto(out *ProtocolsParameters) { + *out = *in + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtocolsParameters. +func (in *ProtocolsParameters) DeepCopy() *ProtocolsParameters { + if in == nil { + return nil + } + out := new(ProtocolsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyInitParameters) DeepCopyInto(out *ProxyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyInitParameters. +func (in *ProxyInitParameters) DeepCopy() *ProxyInitParameters { + if in == nil { + return nil + } + out := new(ProxyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyObservation) DeepCopyInto(out *ProxyObservation) { + *out = *in + if in.CertificateSource != nil { + in, out := &in.CertificateSource, &out.CertificateSource + *out = new(string) + **out = **in + } + if in.CertificateStatus != nil { + in, out := &in.CertificateStatus, &out.CertificateStatus + *out = new(string) + **out = **in + } + if in.DefaultSSLBinding != nil { + in, out := &in.DefaultSSLBinding, &out.DefaultSSLBinding + *out = new(bool) + **out = **in + } + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.NegotiateClientCertificate != nil { + in, out := &in.NegotiateClientCertificate, &out.NegotiateClientCertificate + *out = new(bool) + **out = **in + } + if in.SSLKeyvaultIdentityClientID != nil { + in, out := &in.SSLKeyvaultIdentityClientID, &out.SSLKeyvaultIdentityClientID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyObservation. +func (in *ProxyObservation) DeepCopy() *ProxyObservation { + if in == nil { + return nil + } + out := new(ProxyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyParameters) DeepCopyInto(out *ProxyParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyParameters. +func (in *ProxyParameters) DeepCopy() *ProxyParameters { + if in == nil { + return nil + } + out := new(ProxyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterExampleInitParameters) DeepCopyInto(out *QueryParameterExampleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterExampleInitParameters. +func (in *QueryParameterExampleInitParameters) DeepCopy() *QueryParameterExampleInitParameters { + if in == nil { + return nil + } + out := new(QueryParameterExampleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterExampleObservation) DeepCopyInto(out *QueryParameterExampleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterExampleObservation. +func (in *QueryParameterExampleObservation) DeepCopy() *QueryParameterExampleObservation { + if in == nil { + return nil + } + out := new(QueryParameterExampleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterExampleParameters) DeepCopyInto(out *QueryParameterExampleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterExampleParameters. +func (in *QueryParameterExampleParameters) DeepCopy() *QueryParameterExampleParameters { + if in == nil { + return nil + } + out := new(QueryParameterExampleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterInitParameters) DeepCopyInto(out *QueryParameterInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]QueryParameterExampleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterInitParameters. +func (in *QueryParameterInitParameters) DeepCopy() *QueryParameterInitParameters { + if in == nil { + return nil + } + out := new(QueryParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterObservation) DeepCopyInto(out *QueryParameterObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]QueryParameterExampleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterObservation. +func (in *QueryParameterObservation) DeepCopy() *QueryParameterObservation { + if in == nil { + return nil + } + out := new(QueryParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParameterParameters) DeepCopyInto(out *QueryParameterParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]QueryParameterExampleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParameterParameters. +func (in *QueryParameterParameters) DeepCopy() *QueryParameterParameters { + if in == nil { + return nil + } + out := new(QueryParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParamsInitParameters) DeepCopyInto(out *QueryParamsInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParamsInitParameters. +func (in *QueryParamsInitParameters) DeepCopy() *QueryParamsInitParameters { + if in == nil { + return nil + } + out := new(QueryParamsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParamsObservation) DeepCopyInto(out *QueryParamsObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParamsObservation. +func (in *QueryParamsObservation) DeepCopy() *QueryParamsObservation { + if in == nil { + return nil + } + out := new(QueryParamsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryParamsParameters) DeepCopyInto(out *QueryParamsParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryParamsParameters. +func (in *QueryParamsParameters) DeepCopy() *QueryParamsParameters { + if in == nil { + return nil + } + out := new(QueryParamsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationExampleInitParameters) DeepCopyInto(out *RepresentationExampleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationExampleInitParameters. +func (in *RepresentationExampleInitParameters) DeepCopy() *RepresentationExampleInitParameters { + if in == nil { + return nil + } + out := new(RepresentationExampleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationExampleObservation) DeepCopyInto(out *RepresentationExampleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationExampleObservation. +func (in *RepresentationExampleObservation) DeepCopy() *RepresentationExampleObservation { + if in == nil { + return nil + } + out := new(RepresentationExampleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationExampleParameters) DeepCopyInto(out *RepresentationExampleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationExampleParameters. +func (in *RepresentationExampleParameters) DeepCopy() *RepresentationExampleParameters { + if in == nil { + return nil + } + out := new(RepresentationExampleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationFormParameterExampleInitParameters) DeepCopyInto(out *RepresentationFormParameterExampleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationFormParameterExampleInitParameters. +func (in *RepresentationFormParameterExampleInitParameters) DeepCopy() *RepresentationFormParameterExampleInitParameters { + if in == nil { + return nil + } + out := new(RepresentationFormParameterExampleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationFormParameterExampleObservation) DeepCopyInto(out *RepresentationFormParameterExampleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationFormParameterExampleObservation. +func (in *RepresentationFormParameterExampleObservation) DeepCopy() *RepresentationFormParameterExampleObservation { + if in == nil { + return nil + } + out := new(RepresentationFormParameterExampleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationFormParameterExampleParameters) DeepCopyInto(out *RepresentationFormParameterExampleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationFormParameterExampleParameters. +func (in *RepresentationFormParameterExampleParameters) DeepCopy() *RepresentationFormParameterExampleParameters { + if in == nil { + return nil + } + out := new(RepresentationFormParameterExampleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationFormParameterInitParameters) DeepCopyInto(out *RepresentationFormParameterInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]RepresentationFormParameterExampleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationFormParameterInitParameters. +func (in *RepresentationFormParameterInitParameters) DeepCopy() *RepresentationFormParameterInitParameters { + if in == nil { + return nil + } + out := new(RepresentationFormParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationFormParameterObservation) DeepCopyInto(out *RepresentationFormParameterObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]RepresentationFormParameterExampleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationFormParameterObservation. +func (in *RepresentationFormParameterObservation) DeepCopy() *RepresentationFormParameterObservation { + if in == nil { + return nil + } + out := new(RepresentationFormParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationFormParameterParameters) DeepCopyInto(out *RepresentationFormParameterParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]RepresentationFormParameterExampleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationFormParameterParameters. +func (in *RepresentationFormParameterParameters) DeepCopy() *RepresentationFormParameterParameters { + if in == nil { + return nil + } + out := new(RepresentationFormParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationInitParameters) DeepCopyInto(out *RepresentationInitParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]RepresentationExampleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FormParameter != nil { + in, out := &in.FormParameter, &out.FormParameter + *out = make([]FormParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationInitParameters. +func (in *RepresentationInitParameters) DeepCopy() *RepresentationInitParameters { + if in == nil { + return nil + } + out := new(RepresentationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationObservation) DeepCopyInto(out *RepresentationObservation) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]RepresentationExampleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FormParameter != nil { + in, out := &in.FormParameter, &out.FormParameter + *out = make([]FormParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationObservation. +func (in *RepresentationObservation) DeepCopy() *RepresentationObservation { + if in == nil { + return nil + } + out := new(RepresentationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepresentationParameters) DeepCopyInto(out *RepresentationParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]RepresentationExampleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FormParameter != nil { + in, out := &in.FormParameter, &out.FormParameter + *out = make([]FormParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepresentationParameters. +func (in *RepresentationParameters) DeepCopy() *RepresentationParameters { + if in == nil { + return nil + } + out := new(RepresentationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestInitParameters) DeepCopyInto(out *RequestInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]QueryParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Representation != nil { + in, out := &in.Representation, &out.Representation + *out = make([]RepresentationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestInitParameters. +func (in *RequestInitParameters) DeepCopy() *RequestInitParameters { + if in == nil { + return nil + } + out := new(RequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestObservation) DeepCopyInto(out *RequestObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]QueryParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Representation != nil { + in, out := &in.Representation, &out.Representation + *out = make([]RepresentationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestObservation. +func (in *RequestObservation) DeepCopy() *RequestObservation { + if in == nil { + return nil + } + out := new(RequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestParameters) DeepCopyInto(out *RequestParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryParameter != nil { + in, out := &in.QueryParameter, &out.QueryParameter + *out = make([]QueryParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Representation != nil { + in, out := &in.Representation, &out.Representation + *out = make([]RepresentationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestParameters. +func (in *RequestParameters) DeepCopy() *RequestParameters { + if in == nil { + return nil + } + out := new(RequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderInitParameters) DeepCopyInto(out *ResponseHeaderInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]HeaderExampleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderInitParameters. +func (in *ResponseHeaderInitParameters) DeepCopy() *ResponseHeaderInitParameters { + if in == nil { + return nil + } + out := new(ResponseHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderObservation) DeepCopyInto(out *ResponseHeaderObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]HeaderExampleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderObservation. +func (in *ResponseHeaderObservation) DeepCopy() *ResponseHeaderObservation { + if in == nil { + return nil + } + out := new(ResponseHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderParameters) DeepCopyInto(out *ResponseHeaderParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]HeaderExampleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderParameters. +func (in *ResponseHeaderParameters) DeepCopy() *ResponseHeaderParameters { + if in == nil { + return nil + } + out := new(ResponseHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseInitParameters) DeepCopyInto(out *ResponseInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]ResponseHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Representation != nil { + in, out := &in.Representation, &out.Representation + *out = make([]ResponseRepresentationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseInitParameters. +func (in *ResponseInitParameters) DeepCopy() *ResponseInitParameters { + if in == nil { + return nil + } + out := new(ResponseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseObservation) DeepCopyInto(out *ResponseObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]ResponseHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Representation != nil { + in, out := &in.Representation, &out.Representation + *out = make([]ResponseRepresentationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseObservation. +func (in *ResponseObservation) DeepCopy() *ResponseObservation { + if in == nil { + return nil + } + out := new(ResponseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseParameters) DeepCopyInto(out *ResponseParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]ResponseHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Representation != nil { + in, out := &in.Representation, &out.Representation + *out = make([]ResponseRepresentationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseParameters. +func (in *ResponseParameters) DeepCopy() *ResponseParameters { + if in == nil { + return nil + } + out := new(ResponseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseRepresentationExampleInitParameters) DeepCopyInto(out *ResponseRepresentationExampleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseRepresentationExampleInitParameters. +func (in *ResponseRepresentationExampleInitParameters) DeepCopy() *ResponseRepresentationExampleInitParameters { + if in == nil { + return nil + } + out := new(ResponseRepresentationExampleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseRepresentationExampleObservation) DeepCopyInto(out *ResponseRepresentationExampleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseRepresentationExampleObservation. +func (in *ResponseRepresentationExampleObservation) DeepCopy() *ResponseRepresentationExampleObservation { + if in == nil { + return nil + } + out := new(ResponseRepresentationExampleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseRepresentationExampleParameters) DeepCopyInto(out *ResponseRepresentationExampleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseRepresentationExampleParameters. +func (in *ResponseRepresentationExampleParameters) DeepCopy() *ResponseRepresentationExampleParameters { + if in == nil { + return nil + } + out := new(ResponseRepresentationExampleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseRepresentationInitParameters) DeepCopyInto(out *ResponseRepresentationInitParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]ResponseRepresentationExampleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FormParameter != nil { + in, out := &in.FormParameter, &out.FormParameter + *out = make([]RepresentationFormParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseRepresentationInitParameters. +func (in *ResponseRepresentationInitParameters) DeepCopy() *ResponseRepresentationInitParameters { + if in == nil { + return nil + } + out := new(ResponseRepresentationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseRepresentationObservation) DeepCopyInto(out *ResponseRepresentationObservation) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]ResponseRepresentationExampleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FormParameter != nil { + in, out := &in.FormParameter, &out.FormParameter + *out = make([]RepresentationFormParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseRepresentationObservation. +func (in *ResponseRepresentationObservation) DeepCopy() *ResponseRepresentationObservation { + if in == nil { + return nil + } + out := new(ResponseRepresentationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseRepresentationParameters) DeepCopyInto(out *ResponseRepresentationParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]ResponseRepresentationExampleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FormParameter != nil { + in, out := &in.FormParameter, &out.FormParameter + *out = make([]RepresentationFormParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseRepresentationParameters. +func (in *ResponseRepresentationParameters) DeepCopy() *ResponseRepresentationParameters { + if in == nil { + return nil + } + out := new(ResponseRepresentationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmInitParameters) DeepCopyInto(out *ScmInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmInitParameters. +func (in *ScmInitParameters) DeepCopy() *ScmInitParameters { + if in == nil { + return nil + } + out := new(ScmInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmObservation) DeepCopyInto(out *ScmObservation) { + *out = *in + if in.CertificateSource != nil { + in, out := &in.CertificateSource, &out.CertificateSource + *out = new(string) + **out = **in + } + if in.CertificateStatus != nil { + in, out := &in.CertificateStatus, &out.CertificateStatus + *out = new(string) + **out = **in + } + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.NegotiateClientCertificate != nil { + in, out := &in.NegotiateClientCertificate, &out.NegotiateClientCertificate + *out = new(bool) + **out = **in + } + if in.SSLKeyvaultIdentityClientID != nil { + in, out := &in.SSLKeyvaultIdentityClientID, &out.SSLKeyvaultIdentityClientID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmObservation. +func (in *ScmObservation) DeepCopy() *ScmObservation { + if in == nil { + return nil + } + out := new(ScmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmParameters) DeepCopyInto(out *ScmParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmParameters. +func (in *ScmParameters) DeepCopy() *ScmParameters { + if in == nil { + return nil + } + out := new(ScmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityInitParameters) DeepCopyInto(out *SecurityInitParameters) { + *out = *in + if in.EnableBackendSsl30 != nil { + in, out := &in.EnableBackendSsl30, &out.EnableBackendSsl30 + *out = new(bool) + **out = **in + } + if in.EnableBackendTls10 != nil { + in, out := &in.EnableBackendTls10, &out.EnableBackendTls10 + *out = new(bool) + **out = **in + } + if in.EnableBackendTls11 != nil { + in, out := &in.EnableBackendTls11, &out.EnableBackendTls11 + *out = new(bool) + **out = **in + } + if in.EnableFrontendSsl30 != nil { + in, out := &in.EnableFrontendSsl30, &out.EnableFrontendSsl30 + *out = new(bool) + **out = **in + } + if in.EnableFrontendTls10 != nil { + in, out := &in.EnableFrontendTls10, &out.EnableFrontendTls10 + *out = new(bool) + **out = **in + } + if in.EnableFrontendTls11 != nil { + in, out := &in.EnableFrontendTls11, &out.EnableFrontendTls11 + *out = new(bool) + **out = **in + } + if in.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled, &out.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled, &out.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheRsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheRsaWithAes128CbcShaCiphersEnabled, &out.TLSEcdheRsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheRsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheRsaWithAes256CbcShaCiphersEnabled, &out.TLSEcdheRsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128CbcSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128CbcSha256CiphersEnabled, &out.TLSRsaWithAes128CbcSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128CbcShaCiphersEnabled, &out.TLSRsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128GCMSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128GCMSha256CiphersEnabled, &out.TLSRsaWithAes128GCMSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256CbcSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256CbcSha256CiphersEnabled, &out.TLSRsaWithAes256CbcSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256CbcShaCiphersEnabled, &out.TLSRsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256GCMSha384CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256GCMSha384CiphersEnabled, &out.TLSRsaWithAes256GCMSha384CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TripleDesCiphersEnabled != nil { + in, out := &in.TripleDesCiphersEnabled, &out.TripleDesCiphersEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityInitParameters. +func (in *SecurityInitParameters) DeepCopy() *SecurityInitParameters { + if in == nil { + return nil + } + out := new(SecurityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityObservation) DeepCopyInto(out *SecurityObservation) { + *out = *in + if in.EnableBackendSsl30 != nil { + in, out := &in.EnableBackendSsl30, &out.EnableBackendSsl30 + *out = new(bool) + **out = **in + } + if in.EnableBackendTls10 != nil { + in, out := &in.EnableBackendTls10, &out.EnableBackendTls10 + *out = new(bool) + **out = **in + } + if in.EnableBackendTls11 != nil { + in, out := &in.EnableBackendTls11, &out.EnableBackendTls11 + *out = new(bool) + **out = **in + } + if in.EnableFrontendSsl30 != nil { + in, out := &in.EnableFrontendSsl30, &out.EnableFrontendSsl30 + *out = new(bool) + **out = **in + } + if in.EnableFrontendTls10 != nil { + in, out := &in.EnableFrontendTls10, &out.EnableFrontendTls10 + *out = new(bool) + **out = **in + } + if in.EnableFrontendTls11 != nil { + in, out := &in.EnableFrontendTls11, &out.EnableFrontendTls11 + *out = new(bool) + **out = **in + } + if in.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled, &out.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled, &out.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheRsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheRsaWithAes128CbcShaCiphersEnabled, &out.TLSEcdheRsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheRsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheRsaWithAes256CbcShaCiphersEnabled, &out.TLSEcdheRsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128CbcSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128CbcSha256CiphersEnabled, &out.TLSRsaWithAes128CbcSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128CbcShaCiphersEnabled, &out.TLSRsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128GCMSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128GCMSha256CiphersEnabled, &out.TLSRsaWithAes128GCMSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256CbcSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256CbcSha256CiphersEnabled, &out.TLSRsaWithAes256CbcSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256CbcShaCiphersEnabled, &out.TLSRsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256GCMSha384CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256GCMSha384CiphersEnabled, &out.TLSRsaWithAes256GCMSha384CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TripleDesCiphersEnabled != nil { + in, out := &in.TripleDesCiphersEnabled, &out.TripleDesCiphersEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityObservation. +func (in *SecurityObservation) DeepCopy() *SecurityObservation { + if in == nil { + return nil + } + out := new(SecurityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityParameters) DeepCopyInto(out *SecurityParameters) { + *out = *in + if in.EnableBackendSsl30 != nil { + in, out := &in.EnableBackendSsl30, &out.EnableBackendSsl30 + *out = new(bool) + **out = **in + } + if in.EnableBackendTls10 != nil { + in, out := &in.EnableBackendTls10, &out.EnableBackendTls10 + *out = new(bool) + **out = **in + } + if in.EnableBackendTls11 != nil { + in, out := &in.EnableBackendTls11, &out.EnableBackendTls11 + *out = new(bool) + **out = **in + } + if in.EnableFrontendSsl30 != nil { + in, out := &in.EnableFrontendSsl30, &out.EnableFrontendSsl30 + *out = new(bool) + **out = **in + } + if in.EnableFrontendTls10 != nil { + in, out := &in.EnableFrontendTls10, &out.EnableFrontendTls10 + *out = new(bool) + **out = **in + } + if in.EnableFrontendTls11 != nil { + in, out := &in.EnableFrontendTls11, &out.EnableFrontendTls11 + *out = new(bool) + **out = **in + } + if in.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled, &out.TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled, &out.TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheRsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheRsaWithAes128CbcShaCiphersEnabled, &out.TLSEcdheRsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSEcdheRsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSEcdheRsaWithAes256CbcShaCiphersEnabled, &out.TLSEcdheRsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128CbcSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128CbcSha256CiphersEnabled, &out.TLSRsaWithAes128CbcSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128CbcShaCiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128CbcShaCiphersEnabled, &out.TLSRsaWithAes128CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes128GCMSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes128GCMSha256CiphersEnabled, &out.TLSRsaWithAes128GCMSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256CbcSha256CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256CbcSha256CiphersEnabled, &out.TLSRsaWithAes256CbcSha256CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256CbcShaCiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256CbcShaCiphersEnabled, &out.TLSRsaWithAes256CbcShaCiphersEnabled + *out = new(bool) + **out = **in + } + if in.TLSRsaWithAes256GCMSha384CiphersEnabled != nil { + in, out := &in.TLSRsaWithAes256GCMSha384CiphersEnabled, &out.TLSRsaWithAes256GCMSha384CiphersEnabled + *out = new(bool) + **out = **in + } + if in.TripleDesCiphersEnabled != nil { + in, out := &in.TripleDesCiphersEnabled, &out.TripleDesCiphersEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityParameters. +func (in *SecurityParameters) DeepCopy() *SecurityParameters { + if in == nil { + return nil + } + out := new(SecurityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerX509NameInitParameters) DeepCopyInto(out *ServerX509NameInitParameters) { + *out = *in + if in.IssuerCertificateThumbprint != nil { + in, out := &in.IssuerCertificateThumbprint, &out.IssuerCertificateThumbprint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerX509NameInitParameters. +func (in *ServerX509NameInitParameters) DeepCopy() *ServerX509NameInitParameters { + if in == nil { + return nil + } + out := new(ServerX509NameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerX509NameObservation) DeepCopyInto(out *ServerX509NameObservation) { + *out = *in + if in.IssuerCertificateThumbprint != nil { + in, out := &in.IssuerCertificateThumbprint, &out.IssuerCertificateThumbprint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerX509NameObservation. +func (in *ServerX509NameObservation) DeepCopy() *ServerX509NameObservation { + if in == nil { + return nil + } + out := new(ServerX509NameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerX509NameParameters) DeepCopyInto(out *ServerX509NameParameters) { + *out = *in + if in.IssuerCertificateThumbprint != nil { + in, out := &in.IssuerCertificateThumbprint, &out.IssuerCertificateThumbprint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerX509NameParameters. +func (in *ServerX509NameParameters) DeepCopy() *ServerX509NameParameters { + if in == nil { + return nil + } + out := new(ServerX509NameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceFabricClusterInitParameters) DeepCopyInto(out *ServiceFabricClusterInitParameters) { + *out = *in + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.ClientCertificateThumbprint != nil { + in, out := &in.ClientCertificateThumbprint, &out.ClientCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ManagementEndpoints != nil { + in, out := &in.ManagementEndpoints, &out.ManagementEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxPartitionResolutionRetries != nil { + in, out := &in.MaxPartitionResolutionRetries, &out.MaxPartitionResolutionRetries + *out = new(float64) + **out = **in + } + if in.ServerCertificateThumbprints != nil { + in, out := &in.ServerCertificateThumbprints, &out.ServerCertificateThumbprints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServerX509Name != nil { + in, out := &in.ServerX509Name, &out.ServerX509Name + *out = make([]ServerX509NameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceFabricClusterInitParameters. +func (in *ServiceFabricClusterInitParameters) DeepCopy() *ServiceFabricClusterInitParameters { + if in == nil { + return nil + } + out := new(ServiceFabricClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceFabricClusterObservation) DeepCopyInto(out *ServiceFabricClusterObservation) { + *out = *in + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.ClientCertificateThumbprint != nil { + in, out := &in.ClientCertificateThumbprint, &out.ClientCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ManagementEndpoints != nil { + in, out := &in.ManagementEndpoints, &out.ManagementEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxPartitionResolutionRetries != nil { + in, out := &in.MaxPartitionResolutionRetries, &out.MaxPartitionResolutionRetries + *out = new(float64) + **out = **in + } + if in.ServerCertificateThumbprints != nil { + in, out := &in.ServerCertificateThumbprints, &out.ServerCertificateThumbprints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServerX509Name != nil { + in, out := &in.ServerX509Name, &out.ServerX509Name + *out = make([]ServerX509NameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceFabricClusterObservation. +func (in *ServiceFabricClusterObservation) DeepCopy() *ServiceFabricClusterObservation { + if in == nil { + return nil + } + out := new(ServiceFabricClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceFabricClusterParameters) DeepCopyInto(out *ServiceFabricClusterParameters) { + *out = *in + if in.ClientCertificateID != nil { + in, out := &in.ClientCertificateID, &out.ClientCertificateID + *out = new(string) + **out = **in + } + if in.ClientCertificateThumbprint != nil { + in, out := &in.ClientCertificateThumbprint, &out.ClientCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ManagementEndpoints != nil { + in, out := &in.ManagementEndpoints, &out.ManagementEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxPartitionResolutionRetries != nil { + in, out := &in.MaxPartitionResolutionRetries, &out.MaxPartitionResolutionRetries + *out = new(float64) + **out = **in + } + if in.ServerCertificateThumbprints != nil { + in, out := &in.ServerCertificateThumbprints, &out.ServerCertificateThumbprints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServerX509Name != nil { + in, out := &in.ServerX509Name, &out.ServerX509Name + *out = make([]ServerX509NameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceFabricClusterParameters. +func (in *ServiceFabricClusterParameters) DeepCopy() *ServiceFabricClusterParameters { + if in == nil { + return nil + } + out := new(ServiceFabricClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignInInitParameters) DeepCopyInto(out *SignInInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignInInitParameters. +func (in *SignInInitParameters) DeepCopy() *SignInInitParameters { + if in == nil { + return nil + } + out := new(SignInInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignInObservation) DeepCopyInto(out *SignInObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignInObservation. +func (in *SignInObservation) DeepCopy() *SignInObservation { + if in == nil { + return nil + } + out := new(SignInObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignInParameters) DeepCopyInto(out *SignInParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignInParameters. +func (in *SignInParameters) DeepCopy() *SignInParameters { + if in == nil { + return nil + } + out := new(SignInParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignUpInitParameters) DeepCopyInto(out *SignUpInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.TermsOfService != nil { + in, out := &in.TermsOfService, &out.TermsOfService + *out = new(TermsOfServiceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignUpInitParameters. +func (in *SignUpInitParameters) DeepCopy() *SignUpInitParameters { + if in == nil { + return nil + } + out := new(SignUpInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignUpObservation) DeepCopyInto(out *SignUpObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.TermsOfService != nil { + in, out := &in.TermsOfService, &out.TermsOfService + *out = new(TermsOfServiceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignUpObservation. +func (in *SignUpObservation) DeepCopy() *SignUpObservation { + if in == nil { + return nil + } + out := new(SignUpObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignUpParameters) DeepCopyInto(out *SignUpParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.TermsOfService != nil { + in, out := &in.TermsOfService, &out.TermsOfService + *out = new(TermsOfServiceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignUpParameters. +func (in *SignUpParameters) DeepCopy() *SignUpParameters { + if in == nil { + return nil + } + out := new(SignUpParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionKeyParameterNamesInitParameters) DeepCopyInto(out *SubscriptionKeyParameterNamesInitParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionKeyParameterNamesInitParameters. +func (in *SubscriptionKeyParameterNamesInitParameters) DeepCopy() *SubscriptionKeyParameterNamesInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionKeyParameterNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionKeyParameterNamesObservation) DeepCopyInto(out *SubscriptionKeyParameterNamesObservation) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionKeyParameterNamesObservation. +func (in *SubscriptionKeyParameterNamesObservation) DeepCopy() *SubscriptionKeyParameterNamesObservation { + if in == nil { + return nil + } + out := new(SubscriptionKeyParameterNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionKeyParameterNamesParameters) DeepCopyInto(out *SubscriptionKeyParameterNamesParameters) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionKeyParameterNamesParameters. +func (in *SubscriptionKeyParameterNamesParameters) DeepCopy() *SubscriptionKeyParameterNamesParameters { + if in == nil { + return nil + } + out := new(SubscriptionKeyParameterNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSInitParameters) DeepCopyInto(out *TLSInitParameters) { + *out = *in + if in.ValidateCertificateChain != nil { + in, out := &in.ValidateCertificateChain, &out.ValidateCertificateChain + *out = new(bool) + **out = **in + } + if in.ValidateCertificateName != nil { + in, out := &in.ValidateCertificateName, &out.ValidateCertificateName + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSInitParameters. +func (in *TLSInitParameters) DeepCopy() *TLSInitParameters { + if in == nil { + return nil + } + out := new(TLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSObservation) DeepCopyInto(out *TLSObservation) { + *out = *in + if in.ValidateCertificateChain != nil { + in, out := &in.ValidateCertificateChain, &out.ValidateCertificateChain + *out = new(bool) + **out = **in + } + if in.ValidateCertificateName != nil { + in, out := &in.ValidateCertificateName, &out.ValidateCertificateName + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSObservation. +func (in *TLSObservation) DeepCopy() *TLSObservation { + if in == nil { + return nil + } + out := new(TLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSParameters) DeepCopyInto(out *TLSParameters) { + *out = *in + if in.ValidateCertificateChain != nil { + in, out := &in.ValidateCertificateChain, &out.ValidateCertificateChain + *out = new(bool) + **out = **in + } + if in.ValidateCertificateName != nil { + in, out := &in.ValidateCertificateName, &out.ValidateCertificateName + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSParameters. +func (in *TLSParameters) DeepCopy() *TLSParameters { + if in == nil { + return nil + } + out := new(TLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameterExampleInitParameters) DeepCopyInto(out *TemplateParameterExampleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameterExampleInitParameters. +func (in *TemplateParameterExampleInitParameters) DeepCopy() *TemplateParameterExampleInitParameters { + if in == nil { + return nil + } + out := new(TemplateParameterExampleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameterExampleObservation) DeepCopyInto(out *TemplateParameterExampleObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameterExampleObservation. +func (in *TemplateParameterExampleObservation) DeepCopy() *TemplateParameterExampleObservation { + if in == nil { + return nil + } + out := new(TemplateParameterExampleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameterExampleParameters) DeepCopyInto(out *TemplateParameterExampleParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExternalValue != nil { + in, out := &in.ExternalValue, &out.ExternalValue + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameterExampleParameters. +func (in *TemplateParameterExampleParameters) DeepCopy() *TemplateParameterExampleParameters { + if in == nil { + return nil + } + out := new(TemplateParameterExampleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameterInitParameters) DeepCopyInto(out *TemplateParameterInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]TemplateParameterExampleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameterInitParameters. +func (in *TemplateParameterInitParameters) DeepCopy() *TemplateParameterInitParameters { + if in == nil { + return nil + } + out := new(TemplateParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameterObservation) DeepCopyInto(out *TemplateParameterObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]TemplateParameterExampleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameterObservation. +func (in *TemplateParameterObservation) DeepCopy() *TemplateParameterObservation { + if in == nil { + return nil + } + out := new(TemplateParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameterParameters) DeepCopyInto(out *TemplateParameterParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Example != nil { + in, out := &in.Example, &out.Example + *out = make([]TemplateParameterExampleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(bool) + **out = **in + } + if in.SchemaID != nil { + in, out := &in.SchemaID, &out.SchemaID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeName != nil { + in, out := &in.TypeName, &out.TypeName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameterParameters. +func (in *TemplateParameterParameters) DeepCopy() *TemplateParameterParameters { + if in == nil { + return nil + } + out := new(TemplateParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantAccessInitParameters) DeepCopyInto(out *TenantAccessInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantAccessInitParameters. +func (in *TenantAccessInitParameters) DeepCopy() *TenantAccessInitParameters { + if in == nil { + return nil + } + out := new(TenantAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantAccessObservation) DeepCopyInto(out *TenantAccessObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantAccessObservation. +func (in *TenantAccessObservation) DeepCopy() *TenantAccessObservation { + if in == nil { + return nil + } + out := new(TenantAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantAccessParameters) DeepCopyInto(out *TenantAccessParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantAccessParameters. +func (in *TenantAccessParameters) DeepCopy() *TenantAccessParameters { + if in == nil { + return nil + } + out := new(TenantAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TermsOfServiceInitParameters) DeepCopyInto(out *TermsOfServiceInitParameters) { + *out = *in + if in.ConsentRequired != nil { + in, out := &in.ConsentRequired, &out.ConsentRequired + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TermsOfServiceInitParameters. +func (in *TermsOfServiceInitParameters) DeepCopy() *TermsOfServiceInitParameters { + if in == nil { + return nil + } + out := new(TermsOfServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TermsOfServiceObservation) DeepCopyInto(out *TermsOfServiceObservation) { + *out = *in + if in.ConsentRequired != nil { + in, out := &in.ConsentRequired, &out.ConsentRequired + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TermsOfServiceObservation. +func (in *TermsOfServiceObservation) DeepCopy() *TermsOfServiceObservation { + if in == nil { + return nil + } + out := new(TermsOfServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TermsOfServiceParameters) DeepCopyInto(out *TermsOfServiceParameters) { + *out = *in + if in.ConsentRequired != nil { + in, out := &in.ConsentRequired, &out.ConsentRequired + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TermsOfServiceParameters. +func (in *TermsOfServiceParameters) DeepCopy() *TermsOfServiceParameters { + if in == nil { + return nil + } + out := new(TermsOfServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueFromKeyVaultInitParameters) DeepCopyInto(out *ValueFromKeyVaultInitParameters) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFromKeyVaultInitParameters. +func (in *ValueFromKeyVaultInitParameters) DeepCopy() *ValueFromKeyVaultInitParameters { + if in == nil { + return nil + } + out := new(ValueFromKeyVaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueFromKeyVaultObservation) DeepCopyInto(out *ValueFromKeyVaultObservation) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFromKeyVaultObservation. +func (in *ValueFromKeyVaultObservation) DeepCopy() *ValueFromKeyVaultObservation { + if in == nil { + return nil + } + out := new(ValueFromKeyVaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueFromKeyVaultParameters) DeepCopyInto(out *ValueFromKeyVaultParameters) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFromKeyVaultParameters. +func (in *ValueFromKeyVaultParameters) DeepCopy() *ValueFromKeyVaultParameters { + if in == nil { + return nil + } + out := new(ValueFromKeyVaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkConfigurationInitParameters) DeepCopyInto(out *VirtualNetworkConfigurationInitParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkConfigurationInitParameters. +func (in *VirtualNetworkConfigurationInitParameters) DeepCopy() *VirtualNetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkConfigurationObservation) DeepCopyInto(out *VirtualNetworkConfigurationObservation) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkConfigurationObservation. +func (in *VirtualNetworkConfigurationObservation) DeepCopy() *VirtualNetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkConfigurationParameters) DeepCopyInto(out *VirtualNetworkConfigurationParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkConfigurationParameters. +func (in *VirtualNetworkConfigurationParameters) DeepCopy() *VirtualNetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WsdlSelectorInitParameters) DeepCopyInto(out *WsdlSelectorInitParameters) { + *out = *in + if in.EndpointName != nil { + in, out := &in.EndpointName, &out.EndpointName + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WsdlSelectorInitParameters. +func (in *WsdlSelectorInitParameters) DeepCopy() *WsdlSelectorInitParameters { + if in == nil { + return nil + } + out := new(WsdlSelectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WsdlSelectorObservation) DeepCopyInto(out *WsdlSelectorObservation) { + *out = *in + if in.EndpointName != nil { + in, out := &in.EndpointName, &out.EndpointName + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WsdlSelectorObservation. +func (in *WsdlSelectorObservation) DeepCopy() *WsdlSelectorObservation { + if in == nil { + return nil + } + out := new(WsdlSelectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WsdlSelectorParameters) DeepCopyInto(out *WsdlSelectorParameters) { + *out = *in + if in.EndpointName != nil { + in, out := &in.EndpointName, &out.EndpointName + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WsdlSelectorParameters. +func (in *WsdlSelectorParameters) DeepCopy() *WsdlSelectorParameters { + if in == nil { + return nil + } + out := new(WsdlSelectorParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/apimanagement/v1beta2/zz_generated.managed.go b/apis/apimanagement/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..3abb1012b --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_generated.managed.go @@ -0,0 +1,548 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this API. +func (mg *API) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this API. +func (mg *API) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this API. +func (mg *API) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this API. +func (mg *API) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this API. +func (mg *API) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this API. +func (mg *API) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this API. +func (mg *API) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this API. +func (mg *API) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this API. +func (mg *API) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this API. +func (mg *API) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this API. +func (mg *API) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this API. +func (mg *API) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this APIDiagnostic. +func (mg *APIDiagnostic) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this APIDiagnostic. +func (mg *APIDiagnostic) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this APIDiagnostic. +func (mg *APIDiagnostic) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this APIDiagnostic. +func (mg *APIDiagnostic) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this APIDiagnostic. +func (mg *APIDiagnostic) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this APIDiagnostic. +func (mg *APIDiagnostic) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this APIDiagnostic. +func (mg *APIDiagnostic) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this APIDiagnostic. +func (mg *APIDiagnostic) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this APIDiagnostic. +func (mg *APIDiagnostic) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this APIDiagnostic. +func (mg *APIDiagnostic) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this APIDiagnostic. +func (mg *APIDiagnostic) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this APIDiagnostic. +func (mg *APIDiagnostic) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this APIOperation. +func (mg *APIOperation) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this APIOperation. +func (mg *APIOperation) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this APIOperation. +func (mg *APIOperation) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this APIOperation. +func (mg *APIOperation) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this APIOperation. +func (mg *APIOperation) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this APIOperation. +func (mg *APIOperation) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this APIOperation. +func (mg *APIOperation) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this APIOperation. +func (mg *APIOperation) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this APIOperation. +func (mg *APIOperation) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this APIOperation. +func (mg *APIOperation) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this APIOperation. +func (mg *APIOperation) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this APIOperation. +func (mg *APIOperation) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Backend. +func (mg *Backend) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Backend. +func (mg *Backend) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Backend. +func (mg *Backend) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Backend. +func (mg *Backend) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Backend. +func (mg *Backend) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Backend. +func (mg *Backend) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Backend. +func (mg *Backend) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Backend. +func (mg *Backend) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Backend. +func (mg *Backend) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Backend. +func (mg *Backend) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Backend. +func (mg *Backend) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Backend. +func (mg *Backend) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Diagnostic. +func (mg *Diagnostic) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Diagnostic. +func (mg *Diagnostic) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Diagnostic. +func (mg *Diagnostic) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Diagnostic. +func (mg *Diagnostic) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Diagnostic. +func (mg *Diagnostic) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Diagnostic. +func (mg *Diagnostic) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Diagnostic. +func (mg *Diagnostic) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Diagnostic. +func (mg *Diagnostic) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Diagnostic. +func (mg *Diagnostic) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Diagnostic. +func (mg *Diagnostic) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Diagnostic. +func (mg *Diagnostic) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Diagnostic. +func (mg *Diagnostic) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Gateway. +func (mg *Gateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Gateway. +func (mg *Gateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Gateway. +func (mg *Gateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Gateway. +func (mg *Gateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Gateway. +func (mg *Gateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Gateway. +func (mg *Gateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Gateway. +func (mg *Gateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Gateway. +func (mg *Gateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Gateway. +func (mg *Gateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Gateway. +func (mg *Gateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Gateway. +func (mg *Gateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Gateway. +func (mg *Gateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Logger. +func (mg *Logger) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Logger. +func (mg *Logger) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Logger. +func (mg *Logger) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Logger. +func (mg *Logger) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Logger. +func (mg *Logger) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Logger. +func (mg *Logger) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Logger. +func (mg *Logger) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Logger. +func (mg *Logger) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Logger. +func (mg *Logger) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Logger. +func (mg *Logger) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Logger. +func (mg *Logger) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Logger. +func (mg *Logger) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Management. +func (mg *Management) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Management. +func (mg *Management) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Management. +func (mg *Management) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Management. +func (mg *Management) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Management. +func (mg *Management) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Management. +func (mg *Management) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Management. +func (mg *Management) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Management. +func (mg *Management) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Management. +func (mg *Management) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Management. +func (mg *Management) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Management. +func (mg *Management) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Management. +func (mg *Management) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this NamedValue. +func (mg *NamedValue) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this NamedValue. +func (mg *NamedValue) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this NamedValue. +func (mg *NamedValue) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this NamedValue. +func (mg *NamedValue) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this NamedValue. +func (mg *NamedValue) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this NamedValue. +func (mg *NamedValue) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this NamedValue. +func (mg *NamedValue) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this NamedValue. +func (mg *NamedValue) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this NamedValue. +func (mg *NamedValue) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this NamedValue. +func (mg *NamedValue) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this NamedValue. +func (mg *NamedValue) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this NamedValue. +func (mg *NamedValue) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/apimanagement/v1beta2/zz_generated.managedlist.go b/apis/apimanagement/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..b647d19e1 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,89 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this APIDiagnosticList. +func (l *APIDiagnosticList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this APIList. +func (l *APIList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this APIOperationList. +func (l *APIOperationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BackendList. +func (l *BackendList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DiagnosticList. +func (l *DiagnosticList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this GatewayList. +func (l *GatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LoggerList. +func (l *LoggerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ManagementList. +func (l *ManagementList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this NamedValueList. +func (l *NamedValueList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/apimanagement/v1beta2/zz_generated.resolvers.go b/apis/apimanagement/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..3f3b4bd03 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,692 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this API. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *API) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIManagementNameRef, + Selector: mg.Spec.ForProvider.APIManagementNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementName") + } + mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this APIDiagnostic. +func (mg *APIDiagnostic) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Logger", "LoggerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementLoggerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.APIManagementLoggerIDRef, + Selector: mg.Spec.ForProvider.APIManagementLoggerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementLoggerID") + } + mg.Spec.ForProvider.APIManagementLoggerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementLoggerIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIManagementNameRef, + Selector: mg.Spec.ForProvider.APIManagementNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementName") + } + mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APINameRef, + Selector: mg.Spec.ForProvider.APINameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIName") + } + mg.Spec.ForProvider.APIName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APINameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Logger", "LoggerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIManagementLoggerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.APIManagementLoggerIDRef, + Selector: mg.Spec.InitProvider.APIManagementLoggerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIManagementLoggerID") + } + mg.Spec.InitProvider.APIManagementLoggerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIManagementLoggerIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this APIOperation. +func (mg *APIOperation) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIManagementNameRef, + Selector: mg.Spec.ForProvider.APIManagementNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementName") + } + mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "API", "APIList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APINameRef, + Selector: mg.Spec.ForProvider.APINameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIName") + } + mg.Spec.ForProvider.APIName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APINameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Backend. +func (mg *Backend) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIManagementNameRef, + Selector: mg.Spec.ForProvider.APIManagementNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementName") + } + mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Diagnostic. +func (mg *Diagnostic) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Logger", "LoggerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementLoggerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.APIManagementLoggerIDRef, + Selector: mg.Spec.ForProvider.APIManagementLoggerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementLoggerID") + } + mg.Spec.ForProvider.APIManagementLoggerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementLoggerIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIManagementNameRef, + Selector: mg.Spec.ForProvider.APIManagementNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementName") + } + mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Logger", "LoggerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIManagementLoggerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.APIManagementLoggerIDRef, + Selector: mg.Spec.InitProvider.APIManagementLoggerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIManagementLoggerID") + } + mg.Spec.InitProvider.APIManagementLoggerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIManagementLoggerIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Gateway. +func (mg *Gateway) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.APIManagementIDRef, + Selector: mg.Spec.ForProvider.APIManagementIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementID") + } + mg.Spec.ForProvider.APIManagementID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIManagementID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.APIManagementIDRef, + Selector: mg.Spec.InitProvider.APIManagementIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIManagementID") + } + mg.Spec.InitProvider.APIManagementID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIManagementIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Logger. +func (mg *Logger) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIManagementNameRef, + Selector: mg.Spec.ForProvider.APIManagementNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementName") + } + mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ResourceIDRef, + Selector: mg.Spec.ForProvider.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceID") + } + mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ResourceIDRef, + Selector: mg.Spec.InitProvider.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceID") + } + mg.Spec.InitProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Management. +func (mg *Management) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.AdditionalLocation); i3++ { + if mg.Spec.ForProvider.AdditionalLocation[i3].VirtualNetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetIDRef, + Selector: mg.Spec.ForProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetID") + } + mg.Spec.ForProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.VirtualNetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetID") + } + mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.AdditionalLocation); i3++ { + if mg.Spec.InitProvider.AdditionalLocation[i3].VirtualNetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetIDRef, + Selector: mg.Spec.InitProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetID") + } + mg.Spec.InitProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AdditionalLocation[i3].VirtualNetworkConfiguration.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.VirtualNetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetID") + } + mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this NamedValue. +func (mg *NamedValue) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("apimanagement.azure.upbound.io", "v1beta2", "Management", "ManagementList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIManagementName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.APIManagementNameRef, + Selector: mg.Spec.ForProvider.APIManagementNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIManagementName") + } + mg.Spec.ForProvider.APIManagementName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIManagementNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/apimanagement/v1beta2/zz_groupversion_info.go b/apis/apimanagement/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..a410e03c2 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=apimanagement.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "apimanagement.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/apimanagement/v1beta2/zz_logger_terraformed.go b/apis/apimanagement/v1beta2/zz_logger_terraformed.go new file mode 100755 index 000000000..f7e33858e --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_logger_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Logger +func (mg *Logger) GetTerraformResourceType() string { + return "azurerm_api_management_logger" +} + +// GetConnectionDetailsMapping for this Logger +func (tr *Logger) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"application_insights[*].instrumentation_key": "spec.forProvider.applicationInsights[*].instrumentationKeySecretRef", "eventhub[*].connection_string": "spec.forProvider.eventhub[*].connectionStringSecretRef"} +} + +// GetObservation of this Logger +func (tr *Logger) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Logger +func (tr *Logger) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Logger +func (tr *Logger) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Logger +func (tr *Logger) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Logger +func (tr *Logger) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Logger +func (tr *Logger) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Logger +func (tr *Logger) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Logger using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Logger) LateInitialize(attrs []byte) (bool, error) { + params := &LoggerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Logger) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apimanagement/v1beta2/zz_logger_types.go b/apis/apimanagement/v1beta2/zz_logger_types.go new file mode 100755 index 000000000..0e58d52d4 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_logger_types.go @@ -0,0 +1,244 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationInsightsInitParameters struct { +} + +type ApplicationInsightsObservation struct { +} + +type ApplicationInsightsParameters struct { + + // The instrumentation key used to push data to Application Insights. + // +kubebuilder:validation:Required + InstrumentationKeySecretRef v1.SecretKeySelector `json:"instrumentationKeySecretRef" tf:"-"` +} + +type EventHubInitParameters struct { + + // The endpoint address of an EventHub Namespace. Required when client_id is set. + EndpointURI *string `json:"endpointUri,omitempty" tf:"endpoint_uri,omitempty"` + + // The name of an EventHub. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Client Id of the User Assigned Identity with the "Azure Event Hubs Data Sender" role to the target EventHub Namespace. Required when endpoint_uri is set. If not specified the System Assigned Identity will be used. + UserAssignedIdentityClientID *string `json:"userAssignedIdentityClientId,omitempty" tf:"user_assigned_identity_client_id,omitempty"` +} + +type EventHubObservation struct { + + // The endpoint address of an EventHub Namespace. Required when client_id is set. + EndpointURI *string `json:"endpointUri,omitempty" tf:"endpoint_uri,omitempty"` + + // The name of an EventHub. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Client Id of the User Assigned Identity with the "Azure Event Hubs Data Sender" role to the target EventHub Namespace. Required when endpoint_uri is set. If not specified the System Assigned Identity will be used. + UserAssignedIdentityClientID *string `json:"userAssignedIdentityClientId,omitempty" tf:"user_assigned_identity_client_id,omitempty"` +} + +type EventHubParameters struct { + + // The connection string of an EventHub Namespace. + // +kubebuilder:validation:Optional + ConnectionStringSecretRef *v1.SecretKeySelector `json:"connectionStringSecretRef,omitempty" tf:"-"` + + // The endpoint address of an EventHub Namespace. Required when client_id is set. + // +kubebuilder:validation:Optional + EndpointURI *string `json:"endpointUri,omitempty" tf:"endpoint_uri,omitempty"` + + // The name of an EventHub. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Client Id of the User Assigned Identity with the "Azure Event Hubs Data Sender" role to the target EventHub Namespace. Required when endpoint_uri is set. If not specified the System Assigned Identity will be used. + // +kubebuilder:validation:Optional + UserAssignedIdentityClientID *string `json:"userAssignedIdentityClientId,omitempty" tf:"user_assigned_identity_client_id,omitempty"` +} + +type LoggerInitParameters struct { + + // An application_insights block as documented below. Changing this forces a new resource to be created. + ApplicationInsights *ApplicationInsightsInitParameters `json:"applicationInsights,omitempty" tf:"application_insights,omitempty"` + + // Specifies whether records should be buffered in the Logger prior to publishing. Defaults to true. + Buffered *bool `json:"buffered,omitempty" tf:"buffered,omitempty"` + + // A description of this Logger. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An eventhub block as documented below. Changing this forces a new resource to be created. + EventHub *EventHubInitParameters `json:"eventhub,omitempty" tf:"eventhub,omitempty"` + + // The target resource id which will be linked in the API-Management portal page. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a ApplicationInsights in insights to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` +} + +type LoggerObservation struct { + + // The name of the API Management Service. Changing this forces a new resource to be created. + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // An application_insights block as documented below. Changing this forces a new resource to be created. + ApplicationInsights *ApplicationInsightsParameters `json:"applicationInsights,omitempty" tf:"application_insights,omitempty"` + + // Specifies whether records should be buffered in the Logger prior to publishing. Defaults to true. + Buffered *bool `json:"buffered,omitempty" tf:"buffered,omitempty"` + + // A description of this Logger. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An eventhub block as documented below. Changing this forces a new resource to be created. + EventHub *EventHubObservation `json:"eventhub,omitempty" tf:"eventhub,omitempty"` + + // The ID of the API Management Logger. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The target resource id which will be linked in the API-Management portal page. Changing this forces a new resource to be created. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` +} + +type LoggerParameters struct { + + // The name of the API Management Service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +kubebuilder:validation:Optional + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameRef *v1.Reference `json:"apiManagementNameRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` + + // An application_insights block as documented below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ApplicationInsights *ApplicationInsightsParameters `json:"applicationInsights,omitempty" tf:"application_insights,omitempty"` + + // Specifies whether records should be buffered in the Logger prior to publishing. Defaults to true. + // +kubebuilder:validation:Optional + Buffered *bool `json:"buffered,omitempty" tf:"buffered,omitempty"` + + // A description of this Logger. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An eventhub block as documented below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EventHub *EventHubParameters `json:"eventhub,omitempty" tf:"eventhub,omitempty"` + + // The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The target resource id which will be linked in the API-Management portal page. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a ApplicationInsights in insights to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` +} + +// LoggerSpec defines the desired state of Logger +type LoggerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LoggerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LoggerInitParameters `json:"initProvider,omitempty"` +} + +// LoggerStatus defines the observed state of Logger. +type LoggerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LoggerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Logger is the Schema for the Loggers API. Manages a Logger within an API Management Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Logger struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec LoggerSpec `json:"spec"` + Status LoggerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LoggerList contains a list of Loggers +type LoggerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Logger `json:"items"` +} + +// Repository type metadata. +var ( + Logger_Kind = "Logger" + Logger_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Logger_Kind}.String() + Logger_KindAPIVersion = Logger_Kind + "." + CRDGroupVersion.String() + Logger_GroupVersionKind = CRDGroupVersion.WithKind(Logger_Kind) +) + +func init() { + SchemeBuilder.Register(&Logger{}, &LoggerList{}) +} diff --git a/apis/apimanagement/v1beta2/zz_management_terraformed.go b/apis/apimanagement/v1beta2/zz_management_terraformed.go new file mode 100755 index 000000000..56ff32ecd --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_management_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Management +func (mg *Management) GetTerraformResourceType() string { + return "azurerm_api_management" +} + +// GetConnectionDetailsMapping for this Management +func (tr *Management) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"certificate[*].certificate_password": "spec.forProvider.certificate[*].certificatePasswordSecretRef", "certificate[*].encoded_certificate": "spec.forProvider.certificate[*].encodedCertificateSecretRef", "delegation[*].validation_key": "spec.forProvider.delegation[*].validationKeySecretRef", "hostname_configuration[*].developer_portal[*].certificate": "status.atProvider.hostnameConfiguration[*].developerPortal[*].certificate", "hostname_configuration[*].developer_portal[*].certificate_password": "status.atProvider.hostnameConfiguration[*].developerPortal[*].certificatePassword", "hostname_configuration[*].management[*].certificate": "status.atProvider.hostnameConfiguration[*].management[*].certificate", "hostname_configuration[*].management[*].certificate_password": "status.atProvider.hostnameConfiguration[*].management[*].certificatePassword", "hostname_configuration[*].portal[*].certificate": "status.atProvider.hostnameConfiguration[*].portal[*].certificate", "hostname_configuration[*].portal[*].certificate_password": "status.atProvider.hostnameConfiguration[*].portal[*].certificatePassword", "hostname_configuration[*].proxy[*].certificate": "status.atProvider.hostnameConfiguration[*].proxy[*].certificate", "hostname_configuration[*].proxy[*].certificate_password": "status.atProvider.hostnameConfiguration[*].proxy[*].certificatePassword", "hostname_configuration[*].scm[*].certificate": "status.atProvider.hostnameConfiguration[*].scm[*].certificate", "hostname_configuration[*].scm[*].certificate_password": "status.atProvider.hostnameConfiguration[*].scm[*].certificatePassword", "tenant_access[*].primary_key": "status.atProvider.tenantAccess[*].primaryKey", "tenant_access[*].secondary_key": "status.atProvider.tenantAccess[*].secondaryKey"} +} + +// GetObservation of this Management +func (tr *Management) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Management +func (tr *Management) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Management +func (tr *Management) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Management +func (tr *Management) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Management +func (tr *Management) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Management +func (tr *Management) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Management +func (tr *Management) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Management using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Management) LateInitialize(attrs []byte) (bool, error) { + params := &ManagementParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Management) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apimanagement/v1beta2/zz_management_types.go b/apis/apimanagement/v1beta2/zz_management_types.go new file mode 100755 index 000000000..8161ffda4 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_management_types.go @@ -0,0 +1,1198 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdditionalLocationInitParameters struct { + + // The number of compute units in this region. Defaults to the capacity of the main region. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Only valid for an Api Management service deployed in multiple locations. This can be used to disable the gateway in this additional location. + GatewayDisabled *bool `json:"gatewayDisabled,omitempty" tf:"gateway_disabled,omitempty"` + + // The name of the Azure Region in which the API Management Service should be expanded to. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // ID of a standard SKU IPv4 Public IP. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // A virtual_network_configuration block as defined below. Required when virtual_network_type is External or Internal. + VirtualNetworkConfiguration *VirtualNetworkConfigurationInitParameters `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // A list of availability zones. Changing this forces a new resource to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type AdditionalLocationObservation struct { + + // The number of compute units in this region. Defaults to the capacity of the main region. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Only valid for an Api Management service deployed in multiple locations. This can be used to disable the gateway in this additional location. + GatewayDisabled *bool `json:"gatewayDisabled,omitempty" tf:"gateway_disabled,omitempty"` + + // The URL of the Regional Gateway for the API Management Service in the specified region. + GatewayRegionalURL *string `json:"gatewayRegionalUrl,omitempty" tf:"gateway_regional_url,omitempty"` + + // The name of the Azure Region in which the API Management Service should be expanded to. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Private IP addresses of the API Management Service. Available only when the API Manager instance is using Virtual Network mode. + PrivateIPAddresses []*string `json:"privateIpAddresses,omitempty" tf:"private_ip_addresses,omitempty"` + + // ID of a standard SKU IPv4 Public IP. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Public Static Load Balanced IP addresses of the API Management service in the additional location. Available only for Basic, Standard and Premium SKU. + PublicIPAddresses []*string `json:"publicIpAddresses,omitempty" tf:"public_ip_addresses,omitempty"` + + // A virtual_network_configuration block as defined below. Required when virtual_network_type is External or Internal. + VirtualNetworkConfiguration *VirtualNetworkConfigurationObservation `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // A list of availability zones. Changing this forces a new resource to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type AdditionalLocationParameters struct { + + // The number of compute units in this region. Defaults to the capacity of the main region. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Only valid for an Api Management service deployed in multiple locations. This can be used to disable the gateway in this additional location. + // +kubebuilder:validation:Optional + GatewayDisabled *bool `json:"gatewayDisabled,omitempty" tf:"gateway_disabled,omitempty"` + + // The name of the Azure Region in which the API Management Service should be expanded to. + // +kubebuilder:validation:Optional + Location *string `json:"location" tf:"location,omitempty"` + + // ID of a standard SKU IPv4 Public IP. + // +kubebuilder:validation:Optional + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // A virtual_network_configuration block as defined below. Required when virtual_network_type is External or Internal. + // +kubebuilder:validation:Optional + VirtualNetworkConfiguration *VirtualNetworkConfigurationParameters `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // A list of availability zones. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type CertificateInitParameters struct { + + // The name of the Certificate Store where this certificate should be stored. Possible values are CertificateAuthority and Root. + StoreName *string `json:"storeName,omitempty" tf:"store_name,omitempty"` +} + +type CertificateObservation struct { + + // The expiration date of the certificate in RFC3339 format: 2000-01-02T03:04:05Z. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` + + // The name of the Certificate Store where this certificate should be stored. Possible values are CertificateAuthority and Root. + StoreName *string `json:"storeName,omitempty" tf:"store_name,omitempty"` + + // The subject of the certificate. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The thumbprint of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type CertificateParameters struct { + + // The password for the certificate. + // +kubebuilder:validation:Optional + CertificatePasswordSecretRef *v1.SecretKeySelector `json:"certificatePasswordSecretRef,omitempty" tf:"-"` + + // The Base64 Encoded PFX or Base64 Encoded X.509 Certificate. + // +kubebuilder:validation:Required + EncodedCertificateSecretRef v1.SecretKeySelector `json:"encodedCertificateSecretRef" tf:"-"` + + // The name of the Certificate Store where this certificate should be stored. Possible values are CertificateAuthority and Root. + // +kubebuilder:validation:Optional + StoreName *string `json:"storeName" tf:"store_name,omitempty"` +} + +type DelegationInitParameters struct { + + // Should subscription requests be delegated to an external url? Defaults to false. + SubscriptionsEnabled *bool `json:"subscriptionsEnabled,omitempty" tf:"subscriptions_enabled,omitempty"` + + // The delegation URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Should user registration requests be delegated to an external url? Defaults to false. + UserRegistrationEnabled *bool `json:"userRegistrationEnabled,omitempty" tf:"user_registration_enabled,omitempty"` +} + +type DelegationObservation struct { + + // Should subscription requests be delegated to an external url? Defaults to false. + SubscriptionsEnabled *bool `json:"subscriptionsEnabled,omitempty" tf:"subscriptions_enabled,omitempty"` + + // The delegation URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Should user registration requests be delegated to an external url? Defaults to false. + UserRegistrationEnabled *bool `json:"userRegistrationEnabled,omitempty" tf:"user_registration_enabled,omitempty"` +} + +type DelegationParameters struct { + + // Should subscription requests be delegated to an external url? Defaults to false. + // +kubebuilder:validation:Optional + SubscriptionsEnabled *bool `json:"subscriptionsEnabled,omitempty" tf:"subscriptions_enabled,omitempty"` + + // The delegation URL. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` + + // Should user registration requests be delegated to an external url? Defaults to false. + // +kubebuilder:validation:Optional + UserRegistrationEnabled *bool `json:"userRegistrationEnabled,omitempty" tf:"user_registration_enabled,omitempty"` + + // A base64-encoded validation key to validate, that a request is coming from Azure API Management. + // +kubebuilder:validation:Optional + ValidationKeySecretRef *v1.SecretKeySelector `json:"validationKeySecretRef,omitempty" tf:"-"` +} + +type DeveloperPortalInitParameters struct { +} + +type DeveloperPortalObservation struct { + + // The source of the certificate. + CertificateSource *string `json:"certificateSource,omitempty" tf:"certificate_source,omitempty"` + + // The status of the certificate. + CertificateStatus *string `json:"certificateStatus,omitempty" tf:"certificate_status,omitempty"` + + // The expiration date of the certificate in RFC3339 format: 2000-01-02T03:04:05Z. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` + + // The Hostname to use for the Management API. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Should Client Certificate Negotiation be enabled for this Hostname? Defaults to false. + NegotiateClientCertificate *bool `json:"negotiateClientCertificate,omitempty" tf:"negotiate_client_certificate,omitempty"` + + // System or User Assigned Managed identity clientId as generated by Azure AD, which has GET access to the keyVault containing the SSL certificate. + SSLKeyvaultIdentityClientID *string `json:"sslKeyvaultIdentityClientId,omitempty" tf:"ssl_keyvault_identity_client_id,omitempty"` + + // The subject of the certificate. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The thumbprint of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type DeveloperPortalParameters struct { +} + +type HostNameConfigurationInitParameters struct { +} + +type HostNameConfigurationManagementInitParameters struct { +} + +type HostNameConfigurationManagementObservation struct { + + // The source of the certificate. + CertificateSource *string `json:"certificateSource,omitempty" tf:"certificate_source,omitempty"` + + // The status of the certificate. + CertificateStatus *string `json:"certificateStatus,omitempty" tf:"certificate_status,omitempty"` + + // The expiration date of the certificate in RFC3339 format: 2000-01-02T03:04:05Z. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` + + // The Hostname to use for the Management API. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Should Client Certificate Negotiation be enabled for this Hostname? Defaults to false. + NegotiateClientCertificate *bool `json:"negotiateClientCertificate,omitempty" tf:"negotiate_client_certificate,omitempty"` + + // System or User Assigned Managed identity clientId as generated by Azure AD, which has GET access to the keyVault containing the SSL certificate. + SSLKeyvaultIdentityClientID *string `json:"sslKeyvaultIdentityClientId,omitempty" tf:"ssl_keyvault_identity_client_id,omitempty"` + + // The subject of the certificate. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The thumbprint of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type HostNameConfigurationManagementParameters struct { +} + +type HostNameConfigurationObservation struct { + + // One or more developer_portal blocks as documented below. + DeveloperPortal []DeveloperPortalObservation `json:"developerPortal,omitempty" tf:"developer_portal,omitempty"` + + // One or more management blocks as documented below. + Management []HostNameConfigurationManagementObservation `json:"management,omitempty" tf:"management,omitempty"` + + // One or more portal blocks as documented below. + Portal []PortalObservation `json:"portal,omitempty" tf:"portal,omitempty"` + + // One or more proxy blocks as documented below. + Proxy []ProxyObservation `json:"proxy,omitempty" tf:"proxy,omitempty"` + + // One or more scm blocks as documented below. + Scm []ScmObservation `json:"scm,omitempty" tf:"scm,omitempty"` +} + +type HostNameConfigurationParameters struct { +} + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this API Management Service. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this API Management Service. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this API Management Service. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this API Management Service. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this API Management Service. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this API Management Service. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ManagementInitParameters struct { + + // One or more additional_location blocks as defined below. + AdditionalLocation []AdditionalLocationInitParameters `json:"additionalLocation,omitempty" tf:"additional_location,omitempty"` + + // One or more certificate blocks (up to 10) as defined below. + Certificate []CertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Enforce a client certificate to be presented on each request to the gateway? This is only supported when SKU type is Consumption. + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // A delegation block as defined below. + Delegation *DelegationInitParameters `json:"delegation,omitempty" tf:"delegation,omitempty"` + + // Disable the gateway in main region? This is only supported when additional_location is set. + GatewayDisabled *bool `json:"gatewayDisabled,omitempty" tf:"gateway_disabled,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure location where the API Management Service exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The version which the control plane API calls to API Management service are limited with version equal to or newer than. + MinAPIVersion *string `json:"minApiVersion,omitempty" tf:"min_api_version,omitempty"` + + // Email address from which the notification will be sent. + NotificationSenderEmail *string `json:"notificationSenderEmail,omitempty" tf:"notification_sender_email,omitempty"` + + // A policy block as defined below. + Policy []PolicyInitParameters `json:"policy,omitempty" tf:"policy,omitempty"` + + // A protocols block as defined below. + Protocols *ProtocolsInitParameters `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // ID of a standard SKU IPv4 Public IP. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Is public access to the service allowed? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The email of publisher/company. + PublisherEmail *string `json:"publisherEmail,omitempty" tf:"publisher_email,omitempty"` + + // The name of publisher/company. + PublisherName *string `json:"publisherName,omitempty" tf:"publisher_name,omitempty"` + + // A security block as defined below. + Security *SecurityInitParameters `json:"security,omitempty" tf:"security,omitempty"` + + // A sign_in block as defined below. + SignIn *SignInInitParameters `json:"signIn,omitempty" tf:"sign_in,omitempty"` + + // A sign_up block as defined below. + SignUp *SignUpInitParameters `json:"signUp,omitempty" tf:"sign_up,omitempty"` + + // sku_name is a string consisting of two parts separated by an underscore(_). The first part is the name, valid values include: Consumption, Developer, Basic, Standard and Premium. The second part is the capacity (e.g. the number of deployed units of the sku), which must be a positive integer (e.g. Developer_1). + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags assigned to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A tenant_access block as defined below. + TenantAccess *TenantAccessInitParameters `json:"tenantAccess,omitempty" tf:"tenant_access,omitempty"` + + // A virtual_network_configuration block as defined below. Required when virtual_network_type is External or Internal. + VirtualNetworkConfiguration *ManagementVirtualNetworkConfigurationInitParameters `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // The type of virtual network you want to use, valid values include: None, External, Internal. Defaults to None. + VirtualNetworkType *string `json:"virtualNetworkType,omitempty" tf:"virtual_network_type,omitempty"` + + // Specifies a list of Availability Zones in which this API Management service should be located. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type ManagementObservation struct { + + // One or more additional_location blocks as defined below. + AdditionalLocation []AdditionalLocationObservation `json:"additionalLocation,omitempty" tf:"additional_location,omitempty"` + + // One or more certificate blocks (up to 10) as defined below. + Certificate []CertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Enforce a client certificate to be presented on each request to the gateway? This is only supported when SKU type is Consumption. + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // A delegation block as defined below. + Delegation *DelegationObservation `json:"delegation,omitempty" tf:"delegation,omitempty"` + + // The URL for the Developer Portal associated with this API Management service. + DeveloperPortalURL *string `json:"developerPortalUrl,omitempty" tf:"developer_portal_url,omitempty"` + + // Disable the gateway in main region? This is only supported when additional_location is set. + GatewayDisabled *bool `json:"gatewayDisabled,omitempty" tf:"gateway_disabled,omitempty"` + + // The Region URL for the Gateway of the API Management Service. + GatewayRegionalURL *string `json:"gatewayRegionalUrl,omitempty" tf:"gateway_regional_url,omitempty"` + + // The URL of the Gateway for the API Management Service. + GatewayURL *string `json:"gatewayUrl,omitempty" tf:"gateway_url,omitempty"` + + // A hostname_configuration block as defined below. + HostNameConfiguration *HostNameConfigurationObservation `json:"hostnameConfiguration,omitempty" tf:"hostname_configuration,omitempty"` + + // The ID of the API Management Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure location where the API Management Service exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The URL for the Management API associated with this API Management service. + ManagementAPIURL *string `json:"managementApiUrl,omitempty" tf:"management_api_url,omitempty"` + + // The version which the control plane API calls to API Management service are limited with version equal to or newer than. + MinAPIVersion *string `json:"minApiVersion,omitempty" tf:"min_api_version,omitempty"` + + // Email address from which the notification will be sent. + NotificationSenderEmail *string `json:"notificationSenderEmail,omitempty" tf:"notification_sender_email,omitempty"` + + // A policy block as defined below. + Policy []PolicyObservation `json:"policy,omitempty" tf:"policy,omitempty"` + + // The URL for the Publisher Portal associated with this API Management service. + PortalURL *string `json:"portalUrl,omitempty" tf:"portal_url,omitempty"` + + // The Private IP addresses of the API Management Service. + PrivateIPAddresses []*string `json:"privateIpAddresses,omitempty" tf:"private_ip_addresses,omitempty"` + + // A protocols block as defined below. + Protocols *ProtocolsObservation `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // ID of a standard SKU IPv4 Public IP. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // The Public IP addresses of the API Management Service. + PublicIPAddresses []*string `json:"publicIpAddresses,omitempty" tf:"public_ip_addresses,omitempty"` + + // Is public access to the service allowed? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The email of publisher/company. + PublisherEmail *string `json:"publisherEmail,omitempty" tf:"publisher_email,omitempty"` + + // The name of publisher/company. + PublisherName *string `json:"publisherName,omitempty" tf:"publisher_name,omitempty"` + + // The name of the Resource Group in which the API Management Service should be exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The URL for the SCM (Source Code Management) Endpoint associated with this API Management service. + ScmURL *string `json:"scmUrl,omitempty" tf:"scm_url,omitempty"` + + // A security block as defined below. + Security *SecurityObservation `json:"security,omitempty" tf:"security,omitempty"` + + // A sign_in block as defined below. + SignIn *SignInObservation `json:"signIn,omitempty" tf:"sign_in,omitempty"` + + // A sign_up block as defined below. + SignUp *SignUpObservation `json:"signUp,omitempty" tf:"sign_up,omitempty"` + + // sku_name is a string consisting of two parts separated by an underscore(_). The first part is the name, valid values include: Consumption, Developer, Basic, Standard and Premium. The second part is the capacity (e.g. the number of deployed units of the sku), which must be a positive integer (e.g. Developer_1). + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags assigned to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A tenant_access block as defined below. + TenantAccess *TenantAccessObservation `json:"tenantAccess,omitempty" tf:"tenant_access,omitempty"` + + // A virtual_network_configuration block as defined below. Required when virtual_network_type is External or Internal. + VirtualNetworkConfiguration *ManagementVirtualNetworkConfigurationObservation `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // The type of virtual network you want to use, valid values include: None, External, Internal. Defaults to None. + VirtualNetworkType *string `json:"virtualNetworkType,omitempty" tf:"virtual_network_type,omitempty"` + + // Specifies a list of Availability Zones in which this API Management service should be located. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type ManagementParameters struct { + + // One or more additional_location blocks as defined below. + // +kubebuilder:validation:Optional + AdditionalLocation []AdditionalLocationParameters `json:"additionalLocation,omitempty" tf:"additional_location,omitempty"` + + // One or more certificate blocks (up to 10) as defined below. + // +kubebuilder:validation:Optional + Certificate []CertificateParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // Enforce a client certificate to be presented on each request to the gateway? This is only supported when SKU type is Consumption. + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // A delegation block as defined below. + // +kubebuilder:validation:Optional + Delegation *DelegationParameters `json:"delegation,omitempty" tf:"delegation,omitempty"` + + // Disable the gateway in main region? This is only supported when additional_location is set. + // +kubebuilder:validation:Optional + GatewayDisabled *bool `json:"gatewayDisabled,omitempty" tf:"gateway_disabled,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure location where the API Management Service exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The version which the control plane API calls to API Management service are limited with version equal to or newer than. + // +kubebuilder:validation:Optional + MinAPIVersion *string `json:"minApiVersion,omitempty" tf:"min_api_version,omitempty"` + + // Email address from which the notification will be sent. + // +kubebuilder:validation:Optional + NotificationSenderEmail *string `json:"notificationSenderEmail,omitempty" tf:"notification_sender_email,omitempty"` + + // A policy block as defined below. + // +kubebuilder:validation:Optional + Policy []PolicyParameters `json:"policy,omitempty" tf:"policy,omitempty"` + + // A protocols block as defined below. + // +kubebuilder:validation:Optional + Protocols *ProtocolsParameters `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // ID of a standard SKU IPv4 Public IP. + // +kubebuilder:validation:Optional + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Is public access to the service allowed? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The email of publisher/company. + // +kubebuilder:validation:Optional + PublisherEmail *string `json:"publisherEmail,omitempty" tf:"publisher_email,omitempty"` + + // The name of publisher/company. + // +kubebuilder:validation:Optional + PublisherName *string `json:"publisherName,omitempty" tf:"publisher_name,omitempty"` + + // The name of the Resource Group in which the API Management Service should be exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A security block as defined below. + // +kubebuilder:validation:Optional + Security *SecurityParameters `json:"security,omitempty" tf:"security,omitempty"` + + // A sign_in block as defined below. + // +kubebuilder:validation:Optional + SignIn *SignInParameters `json:"signIn,omitempty" tf:"sign_in,omitempty"` + + // A sign_up block as defined below. + // +kubebuilder:validation:Optional + SignUp *SignUpParameters `json:"signUp,omitempty" tf:"sign_up,omitempty"` + + // sku_name is a string consisting of two parts separated by an underscore(_). The first part is the name, valid values include: Consumption, Developer, Basic, Standard and Premium. The second part is the capacity (e.g. the number of deployed units of the sku), which must be a positive integer (e.g. Developer_1). + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags assigned to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A tenant_access block as defined below. + // +kubebuilder:validation:Optional + TenantAccess *TenantAccessParameters `json:"tenantAccess,omitempty" tf:"tenant_access,omitempty"` + + // A virtual_network_configuration block as defined below. Required when virtual_network_type is External or Internal. + // +kubebuilder:validation:Optional + VirtualNetworkConfiguration *ManagementVirtualNetworkConfigurationParameters `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // The type of virtual network you want to use, valid values include: None, External, Internal. Defaults to None. + // +kubebuilder:validation:Optional + VirtualNetworkType *string `json:"virtualNetworkType,omitempty" tf:"virtual_network_type,omitempty"` + + // Specifies a list of Availability Zones in which this API Management service should be located. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type ManagementVirtualNetworkConfigurationInitParameters struct { + + // The id of the subnet that will be used for the API Management. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type ManagementVirtualNetworkConfigurationObservation struct { + + // The id of the subnet that will be used for the API Management. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type ManagementVirtualNetworkConfigurationParameters struct { + + // The id of the subnet that will be used for the API Management. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type PolicyInitParameters struct { + + // The XML Content for this Policy. + XMLContent *string `json:"xmlContent,omitempty" tf:"xml_content"` + + // A link to an API Management Policy XML Document, which must be publicly available. + XMLLink *string `json:"xmlLink,omitempty" tf:"xml_link"` +} + +type PolicyObservation struct { + + // The XML Content for this Policy. + XMLContent *string `json:"xmlContent,omitempty" tf:"xml_content,omitempty"` + + // A link to an API Management Policy XML Document, which must be publicly available. + XMLLink *string `json:"xmlLink,omitempty" tf:"xml_link,omitempty"` +} + +type PolicyParameters struct { + + // The XML Content for this Policy. + // +kubebuilder:validation:Optional + XMLContent *string `json:"xmlContent,omitempty" tf:"xml_content"` + + // A link to an API Management Policy XML Document, which must be publicly available. + // +kubebuilder:validation:Optional + XMLLink *string `json:"xmlLink,omitempty" tf:"xml_link"` +} + +type PortalInitParameters struct { +} + +type PortalObservation struct { + + // The source of the certificate. + CertificateSource *string `json:"certificateSource,omitempty" tf:"certificate_source,omitempty"` + + // The status of the certificate. + CertificateStatus *string `json:"certificateStatus,omitempty" tf:"certificate_status,omitempty"` + + // The expiration date of the certificate in RFC3339 format: 2000-01-02T03:04:05Z. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` + + // The Hostname to use for the Management API. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Should Client Certificate Negotiation be enabled for this Hostname? Defaults to false. + NegotiateClientCertificate *bool `json:"negotiateClientCertificate,omitempty" tf:"negotiate_client_certificate,omitempty"` + + // System or User Assigned Managed identity clientId as generated by Azure AD, which has GET access to the keyVault containing the SSL certificate. + SSLKeyvaultIdentityClientID *string `json:"sslKeyvaultIdentityClientId,omitempty" tf:"ssl_keyvault_identity_client_id,omitempty"` + + // The subject of the certificate. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The thumbprint of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type PortalParameters struct { +} + +type ProtocolsInitParameters struct { + + // Should HTTP/2 be supported by the API Management Service? Defaults to false. + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` +} + +type ProtocolsObservation struct { + + // Should HTTP/2 be supported by the API Management Service? Defaults to false. + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` +} + +type ProtocolsParameters struct { + + // Should HTTP/2 be supported by the API Management Service? Defaults to false. + // +kubebuilder:validation:Optional + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` +} + +type ProxyInitParameters struct { +} + +type ProxyObservation struct { + + // The source of the certificate. + CertificateSource *string `json:"certificateSource,omitempty" tf:"certificate_source,omitempty"` + + // The status of the certificate. + CertificateStatus *string `json:"certificateStatus,omitempty" tf:"certificate_status,omitempty"` + + // Is the certificate associated with this Hostname the Default SSL Certificate? This is used when an SNI header isn't specified by a client. Defaults to false. + DefaultSSLBinding *bool `json:"defaultSslBinding,omitempty" tf:"default_ssl_binding,omitempty"` + + // The expiration date of the certificate in RFC3339 format: 2000-01-02T03:04:05Z. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` + + // The Hostname to use for the Management API. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Should Client Certificate Negotiation be enabled for this Hostname? Defaults to false. + NegotiateClientCertificate *bool `json:"negotiateClientCertificate,omitempty" tf:"negotiate_client_certificate,omitempty"` + + // System or User Assigned Managed identity clientId as generated by Azure AD, which has GET access to the keyVault containing the SSL certificate. + SSLKeyvaultIdentityClientID *string `json:"sslKeyvaultIdentityClientId,omitempty" tf:"ssl_keyvault_identity_client_id,omitempty"` + + // The subject of the certificate. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The thumbprint of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type ProxyParameters struct { +} + +type ScmInitParameters struct { +} + +type ScmObservation struct { + + // The source of the certificate. + CertificateSource *string `json:"certificateSource,omitempty" tf:"certificate_source,omitempty"` + + // The status of the certificate. + CertificateStatus *string `json:"certificateStatus,omitempty" tf:"certificate_status,omitempty"` + + // The expiration date of the certificate in RFC3339 format: 2000-01-02T03:04:05Z. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` + + // The Hostname to use for the Management API. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Key Vault Secret containing the SSL Certificate, which must be should be of the type application/x-pkcs12. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Should Client Certificate Negotiation be enabled for this Hostname? Defaults to false. + NegotiateClientCertificate *bool `json:"negotiateClientCertificate,omitempty" tf:"negotiate_client_certificate,omitempty"` + + // System or User Assigned Managed identity clientId as generated by Azure AD, which has GET access to the keyVault containing the SSL certificate. + SSLKeyvaultIdentityClientID *string `json:"sslKeyvaultIdentityClientId,omitempty" tf:"ssl_keyvault_identity_client_id,omitempty"` + + // The subject of the certificate. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // The thumbprint of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type ScmParameters struct { +} + +type SecurityInitParameters struct { + + // Should SSL 3.0 be enabled on the backend of the gateway? Defaults to false. + EnableBackendSsl30 *bool `json:"enableBackendSsl30,omitempty" tf:"enable_backend_ssl30,omitempty"` + + // Should TLS 1.0 be enabled on the backend of the gateway? Defaults to false. + EnableBackendTls10 *bool `json:"enableBackendTls10,omitempty" tf:"enable_backend_tls10,omitempty"` + + // Should TLS 1.1 be enabled on the backend of the gateway? Defaults to false. + EnableBackendTls11 *bool `json:"enableBackendTls11,omitempty" tf:"enable_backend_tls11,omitempty"` + + // Should SSL 3.0 be enabled on the frontend of the gateway? Defaults to false. + EnableFrontendSsl30 *bool `json:"enableFrontendSsl30,omitempty" tf:"enable_frontend_ssl30,omitempty"` + + // Should TLS 1.0 be enabled on the frontend of the gateway? Defaults to false. + EnableFrontendTls10 *bool `json:"enableFrontendTls10,omitempty" tf:"enable_frontend_tls10,omitempty"` + + // Should TLS 1.1 be enabled on the frontend of the gateway? Defaults to false. + EnableFrontendTls11 *bool `json:"enableFrontendTls11,omitempty" tf:"enable_frontend_tls11,omitempty"` + + // Should the TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsEcdheEcdsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_ecdsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsEcdheEcdsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_ecdsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + TLSEcdheRsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsEcdheRsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_rsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + TLSEcdheRsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsEcdheRsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_rsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_CBC_SHA256 cipher be enabled? Defaults to false. + TLSRsaWithAes128CbcSha256CiphersEnabled *bool `json:"tlsRsaWithAes128CbcSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_cbc_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + TLSRsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsRsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_GCM_SHA256 cipher be enabled? Defaults to false. + TLSRsaWithAes128GCMSha256CiphersEnabled *bool `json:"tlsRsaWithAes128GcmSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_gcm_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_CBC_SHA256 cipher be enabled? Defaults to false. + TLSRsaWithAes256CbcSha256CiphersEnabled *bool `json:"tlsRsaWithAes256CbcSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_cbc_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + TLSRsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsRsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_GCM_SHA384 cipher be enabled? Defaults to false. + TLSRsaWithAes256GCMSha384CiphersEnabled *bool `json:"tlsRsaWithAes256GcmSha384CiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_gcm_sha384_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_3DES_EDE_CBC_SHA cipher be enabled for alL TLS versions (1.0, 1.1 and 1.2)? + TripleDesCiphersEnabled *bool `json:"tripleDesCiphersEnabled,omitempty" tf:"triple_des_ciphers_enabled,omitempty"` +} + +type SecurityObservation struct { + + // Should SSL 3.0 be enabled on the backend of the gateway? Defaults to false. + EnableBackendSsl30 *bool `json:"enableBackendSsl30,omitempty" tf:"enable_backend_ssl30,omitempty"` + + // Should TLS 1.0 be enabled on the backend of the gateway? Defaults to false. + EnableBackendTls10 *bool `json:"enableBackendTls10,omitempty" tf:"enable_backend_tls10,omitempty"` + + // Should TLS 1.1 be enabled on the backend of the gateway? Defaults to false. + EnableBackendTls11 *bool `json:"enableBackendTls11,omitempty" tf:"enable_backend_tls11,omitempty"` + + // Should SSL 3.0 be enabled on the frontend of the gateway? Defaults to false. + EnableFrontendSsl30 *bool `json:"enableFrontendSsl30,omitempty" tf:"enable_frontend_ssl30,omitempty"` + + // Should TLS 1.0 be enabled on the frontend of the gateway? Defaults to false. + EnableFrontendTls10 *bool `json:"enableFrontendTls10,omitempty" tf:"enable_frontend_tls10,omitempty"` + + // Should TLS 1.1 be enabled on the frontend of the gateway? Defaults to false. + EnableFrontendTls11 *bool `json:"enableFrontendTls11,omitempty" tf:"enable_frontend_tls11,omitempty"` + + // Should the TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsEcdheEcdsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_ecdsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsEcdheEcdsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_ecdsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + TLSEcdheRsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsEcdheRsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_rsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + TLSEcdheRsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsEcdheRsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_rsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_CBC_SHA256 cipher be enabled? Defaults to false. + TLSRsaWithAes128CbcSha256CiphersEnabled *bool `json:"tlsRsaWithAes128CbcSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_cbc_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + TLSRsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsRsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_GCM_SHA256 cipher be enabled? Defaults to false. + TLSRsaWithAes128GCMSha256CiphersEnabled *bool `json:"tlsRsaWithAes128GcmSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_gcm_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_CBC_SHA256 cipher be enabled? Defaults to false. + TLSRsaWithAes256CbcSha256CiphersEnabled *bool `json:"tlsRsaWithAes256CbcSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_cbc_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + TLSRsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsRsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_GCM_SHA384 cipher be enabled? Defaults to false. + TLSRsaWithAes256GCMSha384CiphersEnabled *bool `json:"tlsRsaWithAes256GcmSha384CiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_gcm_sha384_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_3DES_EDE_CBC_SHA cipher be enabled for alL TLS versions (1.0, 1.1 and 1.2)? + TripleDesCiphersEnabled *bool `json:"tripleDesCiphersEnabled,omitempty" tf:"triple_des_ciphers_enabled,omitempty"` +} + +type SecurityParameters struct { + + // Should SSL 3.0 be enabled on the backend of the gateway? Defaults to false. + // +kubebuilder:validation:Optional + EnableBackendSsl30 *bool `json:"enableBackendSsl30,omitempty" tf:"enable_backend_ssl30,omitempty"` + + // Should TLS 1.0 be enabled on the backend of the gateway? Defaults to false. + // +kubebuilder:validation:Optional + EnableBackendTls10 *bool `json:"enableBackendTls10,omitempty" tf:"enable_backend_tls10,omitempty"` + + // Should TLS 1.1 be enabled on the backend of the gateway? Defaults to false. + // +kubebuilder:validation:Optional + EnableBackendTls11 *bool `json:"enableBackendTls11,omitempty" tf:"enable_backend_tls11,omitempty"` + + // Should SSL 3.0 be enabled on the frontend of the gateway? Defaults to false. + // +kubebuilder:validation:Optional + EnableFrontendSsl30 *bool `json:"enableFrontendSsl30,omitempty" tf:"enable_frontend_ssl30,omitempty"` + + // Should TLS 1.0 be enabled on the frontend of the gateway? Defaults to false. + // +kubebuilder:validation:Optional + EnableFrontendTls10 *bool `json:"enableFrontendTls10,omitempty" tf:"enable_frontend_tls10,omitempty"` + + // Should TLS 1.1 be enabled on the frontend of the gateway? Defaults to false. + // +kubebuilder:validation:Optional + EnableFrontendTls11 *bool `json:"enableFrontendTls11,omitempty" tf:"enable_frontend_tls11,omitempty"` + + // Should the TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSEcdheEcdsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsEcdheEcdsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_ecdsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSEcdheEcdsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsEcdheEcdsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_ecdsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSEcdheRsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsEcdheRsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_rsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSEcdheRsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsEcdheRsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_ecdhe_rsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_CBC_SHA256 cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSRsaWithAes128CbcSha256CiphersEnabled *bool `json:"tlsRsaWithAes128CbcSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_cbc_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_CBC_SHA cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSRsaWithAes128CbcShaCiphersEnabled *bool `json:"tlsRsaWithAes128CbcShaCiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_128_GCM_SHA256 cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSRsaWithAes128GCMSha256CiphersEnabled *bool `json:"tlsRsaWithAes128GcmSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes128_gcm_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_CBC_SHA256 cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSRsaWithAes256CbcSha256CiphersEnabled *bool `json:"tlsRsaWithAes256CbcSha256CiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_cbc_sha256_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_CBC_SHA cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSRsaWithAes256CbcShaCiphersEnabled *bool `json:"tlsRsaWithAes256CbcShaCiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_cbc_sha_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_AES_256_GCM_SHA384 cipher be enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSRsaWithAes256GCMSha384CiphersEnabled *bool `json:"tlsRsaWithAes256GcmSha384CiphersEnabled,omitempty" tf:"tls_rsa_with_aes256_gcm_sha384_ciphers_enabled,omitempty"` + + // Should the TLS_RSA_WITH_3DES_EDE_CBC_SHA cipher be enabled for alL TLS versions (1.0, 1.1 and 1.2)? + // +kubebuilder:validation:Optional + TripleDesCiphersEnabled *bool `json:"tripleDesCiphersEnabled,omitempty" tf:"triple_des_ciphers_enabled,omitempty"` +} + +type SignInInitParameters struct { + + // Should anonymous users be redirected to the sign in page? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SignInObservation struct { + + // Should anonymous users be redirected to the sign in page? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SignInParameters struct { + + // Should anonymous users be redirected to the sign in page? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type SignUpInitParameters struct { + + // Can users sign up on the development portal? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A terms_of_service block as defined below. + TermsOfService *TermsOfServiceInitParameters `json:"termsOfService,omitempty" tf:"terms_of_service,omitempty"` +} + +type SignUpObservation struct { + + // Can users sign up on the development portal? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A terms_of_service block as defined below. + TermsOfService *TermsOfServiceObservation `json:"termsOfService,omitempty" tf:"terms_of_service,omitempty"` +} + +type SignUpParameters struct { + + // Can users sign up on the development portal? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A terms_of_service block as defined below. + // +kubebuilder:validation:Optional + TermsOfService *TermsOfServiceParameters `json:"termsOfService" tf:"terms_of_service,omitempty"` +} + +type TenantAccessInitParameters struct { + + // Should the access to the management API be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type TenantAccessObservation struct { + + // Should the access to the management API be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The identifier for the tenant access information contract. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type TenantAccessParameters struct { + + // Should the access to the management API be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type TermsOfServiceInitParameters struct { + + // Should the user be asked for consent during sign up? + ConsentRequired *bool `json:"consentRequired,omitempty" tf:"consent_required,omitempty"` + + // Should Terms of Service be displayed during sign up?. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Terms of Service which users are required to agree to in order to sign up. + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type TermsOfServiceObservation struct { + + // Should the user be asked for consent during sign up? + ConsentRequired *bool `json:"consentRequired,omitempty" tf:"consent_required,omitempty"` + + // Should Terms of Service be displayed during sign up?. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Terms of Service which users are required to agree to in order to sign up. + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type TermsOfServiceParameters struct { + + // Should the user be asked for consent during sign up? + // +kubebuilder:validation:Optional + ConsentRequired *bool `json:"consentRequired" tf:"consent_required,omitempty"` + + // Should Terms of Service be displayed during sign up?. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // The Terms of Service which users are required to agree to in order to sign up. + // +kubebuilder:validation:Optional + Text *string `json:"text,omitempty" tf:"text,omitempty"` +} + +type VirtualNetworkConfigurationInitParameters struct { + + // The id of the subnet that will be used for the API Management. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type VirtualNetworkConfigurationObservation struct { + + // The id of the subnet that will be used for the API Management. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type VirtualNetworkConfigurationParameters struct { + + // The id of the subnet that will be used for the API Management. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +// ManagementSpec defines the desired state of Management +type ManagementSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ManagementParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ManagementInitParameters `json:"initProvider,omitempty"` +} + +// ManagementStatus defines the observed state of Management. +type ManagementStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ManagementObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Management is the Schema for the Managements API. Manages an API Management Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Management struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.publisherEmail) || (has(self.initProvider) && has(self.initProvider.publisherEmail))",message="spec.forProvider.publisherEmail is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.publisherName) || (has(self.initProvider) && has(self.initProvider.publisherName))",message="spec.forProvider.publisherName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + Spec ManagementSpec `json:"spec"` + Status ManagementStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ManagementList contains a list of Managements +type ManagementList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Management `json:"items"` +} + +// Repository type metadata. +var ( + Management_Kind = "Management" + Management_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Management_Kind}.String() + Management_KindAPIVersion = Management_Kind + "." + CRDGroupVersion.String() + Management_GroupVersionKind = CRDGroupVersion.WithKind(Management_Kind) +) + +func init() { + SchemeBuilder.Register(&Management{}, &ManagementList{}) +} diff --git a/apis/apimanagement/v1beta2/zz_namedvalue_terraformed.go b/apis/apimanagement/v1beta2/zz_namedvalue_terraformed.go new file mode 100755 index 000000000..0d6444962 --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_namedvalue_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this NamedValue +func (mg *NamedValue) GetTerraformResourceType() string { + return "azurerm_api_management_named_value" +} + +// GetConnectionDetailsMapping for this NamedValue +func (tr *NamedValue) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"value": "spec.forProvider.valueSecretRef"} +} + +// GetObservation of this NamedValue +func (tr *NamedValue) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this NamedValue +func (tr *NamedValue) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this NamedValue +func (tr *NamedValue) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this NamedValue +func (tr *NamedValue) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this NamedValue +func (tr *NamedValue) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this NamedValue +func (tr *NamedValue) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this NamedValue +func (tr *NamedValue) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this NamedValue using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *NamedValue) LateInitialize(attrs []byte) (bool, error) { + params := &NamedValueParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *NamedValue) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/apimanagement/v1beta2/zz_namedvalue_types.go b/apis/apimanagement/v1beta2/zz_namedvalue_types.go new file mode 100755 index 000000000..7aa7859bc --- /dev/null +++ b/apis/apimanagement/v1beta2/zz_namedvalue_types.go @@ -0,0 +1,192 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type NamedValueInitParameters struct { + + // The display name of this API Management Named Value. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies whether the API Management Named Value is secret. Valid values are true or false. The default value is false. + Secret *bool `json:"secret,omitempty" tf:"secret,omitempty"` + + // A list of tags to be applied to the API Management Named Value. + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A value_from_key_vault block as defined below. + ValueFromKeyVault *ValueFromKeyVaultInitParameters `json:"valueFromKeyVault,omitempty" tf:"value_from_key_vault,omitempty"` +} + +type NamedValueObservation struct { + + // The name of the API Management Service in which the API Management Named Value should exist. Changing this forces a new resource to be created. + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // The display name of this API Management Named Value. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The ID of the API Management Named Value. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Resource Group in which the API Management Named Value should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies whether the API Management Named Value is secret. Valid values are true or false. The default value is false. + Secret *bool `json:"secret,omitempty" tf:"secret,omitempty"` + + // A list of tags to be applied to the API Management Named Value. + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A value_from_key_vault block as defined below. + ValueFromKeyVault *ValueFromKeyVaultObservation `json:"valueFromKeyVault,omitempty" tf:"value_from_key_vault,omitempty"` +} + +type NamedValueParameters struct { + + // The name of the API Management Service in which the API Management Named Value should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/apimanagement/v1beta2.Management + // +kubebuilder:validation:Optional + APIManagementName *string `json:"apiManagementName,omitempty" tf:"api_management_name,omitempty"` + + // Reference to a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameRef *v1.Reference `json:"apiManagementNameRef,omitempty" tf:"-"` + + // Selector for a Management in apimanagement to populate apiManagementName. + // +kubebuilder:validation:Optional + APIManagementNameSelector *v1.Selector `json:"apiManagementNameSelector,omitempty" tf:"-"` + + // The display name of this API Management Named Value. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The name of the Resource Group in which the API Management Named Value should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies whether the API Management Named Value is secret. Valid values are true or false. The default value is false. + // +kubebuilder:validation:Optional + Secret *bool `json:"secret,omitempty" tf:"secret,omitempty"` + + // A list of tags to be applied to the API Management Named Value. + // +kubebuilder:validation:Optional + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A value_from_key_vault block as defined below. + // +kubebuilder:validation:Optional + ValueFromKeyVault *ValueFromKeyVaultParameters `json:"valueFromKeyVault,omitempty" tf:"value_from_key_vault,omitempty"` + + // The value of this API Management Named Value. + // +kubebuilder:validation:Optional + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type ValueFromKeyVaultInitParameters struct { + + // The client ID of User Assigned Identity, for the API Management Service, which will be used to access the key vault secret. The System Assigned Identity will be used in absence. + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // The resource ID of the Key Vault Secret. + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type ValueFromKeyVaultObservation struct { + + // The client ID of User Assigned Identity, for the API Management Service, which will be used to access the key vault secret. The System Assigned Identity will be used in absence. + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // The resource ID of the Key Vault Secret. + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` +} + +type ValueFromKeyVaultParameters struct { + + // The client ID of User Assigned Identity, for the API Management Service, which will be used to access the key vault secret. The System Assigned Identity will be used in absence. + // +kubebuilder:validation:Optional + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // The resource ID of the Key Vault Secret. + // +kubebuilder:validation:Optional + SecretID *string `json:"secretId" tf:"secret_id,omitempty"` +} + +// NamedValueSpec defines the desired state of NamedValue +type NamedValueSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider NamedValueParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider NamedValueInitParameters `json:"initProvider,omitempty"` +} + +// NamedValueStatus defines the observed state of NamedValue. +type NamedValueStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider NamedValueObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NamedValue is the Schema for the NamedValues API. Manages an API Management Named Value. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type NamedValue struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.displayName) || (has(self.initProvider) && has(self.initProvider.displayName))",message="spec.forProvider.displayName is a required parameter" + Spec NamedValueSpec `json:"spec"` + Status NamedValueStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NamedValueList contains a list of NamedValues +type NamedValueList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NamedValue `json:"items"` +} + +// Repository type metadata. +var ( + NamedValue_Kind = "NamedValue" + NamedValue_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: NamedValue_Kind}.String() + NamedValue_KindAPIVersion = NamedValue_Kind + "." + CRDGroupVersion.String() + NamedValue_GroupVersionKind = CRDGroupVersion.WithKind(NamedValue_Kind) +) + +func init() { + SchemeBuilder.Register(&NamedValue{}, &NamedValueList{}) +} diff --git a/apis/appconfiguration/v1beta1/zz_generated.conversion_spokes.go b/apis/appconfiguration/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..744581c12 --- /dev/null +++ b/apis/appconfiguration/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Configuration to the hub type. +func (tr *Configuration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Configuration type. +func (tr *Configuration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/appconfiguration/v1beta2/zz_configuration_terraformed.go b/apis/appconfiguration/v1beta2/zz_configuration_terraformed.go new file mode 100755 index 000000000..03a026aba --- /dev/null +++ b/apis/appconfiguration/v1beta2/zz_configuration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Configuration +func (mg *Configuration) GetTerraformResourceType() string { + return "azurerm_app_configuration" +} + +// GetConnectionDetailsMapping for this Configuration +func (tr *Configuration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Configuration +func (tr *Configuration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Configuration +func (tr *Configuration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Configuration +func (tr *Configuration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Configuration +func (tr *Configuration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Configuration +func (tr *Configuration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Configuration +func (tr *Configuration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Configuration +func (tr *Configuration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Configuration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Configuration) LateInitialize(attrs []byte) (bool, error) { + params := &ConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Configuration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appconfiguration/v1beta2/zz_configuration_types.go b/apis/appconfiguration/v1beta2/zz_configuration_types.go new file mode 100755 index 000000000..deea91497 --- /dev/null +++ b/apis/appconfiguration/v1beta2/zz_configuration_types.go @@ -0,0 +1,435 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationInitParameters struct { + + // An encryption block as defined below. + Encryption *EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Public Network Access setting of the App Configuration. Possible values are Enabled and Disabled. + PublicNetworkAccess *string `json:"publicNetworkAccess,omitempty" tf:"public_network_access,omitempty"` + + // Whether Purge Protection is enabled. This field only works for standard sku. Defaults to false. + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // One or more replica blocks as defined below. + Replica []ReplicaInitParameters `json:"replica,omitempty" tf:"replica,omitempty"` + + // The SKU name of the App Configuration. Possible values are free and standard. Defaults to free. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This field only works for standard sku. This value can be between 1 and 7 days. Defaults to 7. Changing this forces a new resource to be created. + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ConfigurationObservation struct { + + // An encryption block as defined below. + Encryption *EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The URL of the App Configuration. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The App Configuration ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A primary_read_key block as defined below containing the primary read access key. + PrimaryReadKey []PrimaryReadKeyObservation `json:"primaryReadKey,omitempty" tf:"primary_read_key,omitempty"` + + // A primary_write_key block as defined below containing the primary write access key. + PrimaryWriteKey []PrimaryWriteKeyObservation `json:"primaryWriteKey,omitempty" tf:"primary_write_key,omitempty"` + + // The Public Network Access setting of the App Configuration. Possible values are Enabled and Disabled. + PublicNetworkAccess *string `json:"publicNetworkAccess,omitempty" tf:"public_network_access,omitempty"` + + // Whether Purge Protection is enabled. This field only works for standard sku. Defaults to false. + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // One or more replica blocks as defined below. + Replica []ReplicaObservation `json:"replica,omitempty" tf:"replica,omitempty"` + + // The name of the resource group in which to create the App Configuration. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A secondary_read_key block as defined below containing the secondary read access key. + SecondaryReadKey []SecondaryReadKeyObservation `json:"secondaryReadKey,omitempty" tf:"secondary_read_key,omitempty"` + + // A secondary_write_key block as defined below containing the secondary write access key. + SecondaryWriteKey []SecondaryWriteKeyObservation `json:"secondaryWriteKey,omitempty" tf:"secondary_write_key,omitempty"` + + // The SKU name of the App Configuration. Possible values are free and standard. Defaults to free. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This field only works for standard sku. This value can be between 1 and 7 days. Defaults to 7. Changing this forces a new resource to be created. + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ConfigurationParameters struct { + + // An encryption block as defined below. + // +kubebuilder:validation:Optional + Encryption *EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Public Network Access setting of the App Configuration. Possible values are Enabled and Disabled. + // +kubebuilder:validation:Optional + PublicNetworkAccess *string `json:"publicNetworkAccess,omitempty" tf:"public_network_access,omitempty"` + + // Whether Purge Protection is enabled. This field only works for standard sku. Defaults to false. + // +kubebuilder:validation:Optional + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // One or more replica blocks as defined below. + // +kubebuilder:validation:Optional + Replica []ReplicaParameters `json:"replica,omitempty" tf:"replica,omitempty"` + + // The name of the resource group in which to create the App Configuration. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The SKU name of the App Configuration. Possible values are free and standard. Defaults to free. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This field only works for standard sku. This value can be between 1 and 7 days. Defaults to 7. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EncryptionInitParameters struct { + + // Specifies the client id of the identity which will be used to access key vault. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("client_id",true) + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate identityClientId. + // +kubebuilder:validation:Optional + IdentityClientIDRef *v1.Reference `json:"identityClientIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate identityClientId. + // +kubebuilder:validation:Optional + IdentityClientIDSelector *v1.Selector `json:"identityClientIdSelector,omitempty" tf:"-"` + + // Specifies the URI of the key vault key used to encrypt data. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + KeyVaultKeyIdentifier *string `json:"keyVaultKeyIdentifier,omitempty" tf:"key_vault_key_identifier,omitempty"` + + // Reference to a Key in keyvault to populate keyVaultKeyIdentifier. + // +kubebuilder:validation:Optional + KeyVaultKeyIdentifierRef *v1.Reference `json:"keyVaultKeyIdentifierRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate keyVaultKeyIdentifier. + // +kubebuilder:validation:Optional + KeyVaultKeyIdentifierSelector *v1.Selector `json:"keyVaultKeyIdentifierSelector,omitempty" tf:"-"` +} + +type EncryptionObservation struct { + + // Specifies the client id of the identity which will be used to access key vault. + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // Specifies the URI of the key vault key used to encrypt data. + KeyVaultKeyIdentifier *string `json:"keyVaultKeyIdentifier,omitempty" tf:"key_vault_key_identifier,omitempty"` +} + +type EncryptionParameters struct { + + // Specifies the client id of the identity which will be used to access key vault. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("client_id",true) + // +kubebuilder:validation:Optional + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate identityClientId. + // +kubebuilder:validation:Optional + IdentityClientIDRef *v1.Reference `json:"identityClientIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate identityClientId. + // +kubebuilder:validation:Optional + IdentityClientIDSelector *v1.Selector `json:"identityClientIdSelector,omitempty" tf:"-"` + + // Specifies the URI of the key vault key used to encrypt data. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + KeyVaultKeyIdentifier *string `json:"keyVaultKeyIdentifier,omitempty" tf:"key_vault_key_identifier,omitempty"` + + // Reference to a Key in keyvault to populate keyVaultKeyIdentifier. + // +kubebuilder:validation:Optional + KeyVaultKeyIdentifierRef *v1.Reference `json:"keyVaultKeyIdentifierRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate keyVaultKeyIdentifier. + // +kubebuilder:validation:Optional + KeyVaultKeyIdentifierSelector *v1.Selector `json:"keyVaultKeyIdentifierSelector,omitempty" tf:"-"` +} + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this App Configuration. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this App Configuration. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this App Configuration. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this App Configuration. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this App Configuration. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this App Configuration. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PrimaryReadKeyInitParameters struct { +} + +type PrimaryReadKeyObservation struct { + + // The Connection String for this Access Key - comprising of the Endpoint, ID and Secret. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The ID of the Access Key. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Secret of the Access Key. + Secret *string `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type PrimaryReadKeyParameters struct { +} + +type PrimaryWriteKeyInitParameters struct { +} + +type PrimaryWriteKeyObservation struct { + + // The Connection String for this Access Key - comprising of the Endpoint, ID and Secret. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The ID of the Access Key. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Secret of the Access Key. + Secret *string `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type PrimaryWriteKeyParameters struct { +} + +type ReplicaInitParameters struct { + + // Specifies the supported Azure location where the replica exists. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the replica. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ReplicaObservation struct { + + // The URL of the App Configuration Replica. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The ID of the App Configuration Replica. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the replica exists. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the replica. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ReplicaParameters struct { + + // Specifies the supported Azure location where the replica exists. + // +kubebuilder:validation:Optional + Location *string `json:"location" tf:"location,omitempty"` + + // Specifies the name of the replica. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type SecondaryReadKeyInitParameters struct { +} + +type SecondaryReadKeyObservation struct { + + // The Connection String for this Access Key - comprising of the Endpoint, ID and Secret. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The ID of the Access Key. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Secret of the Access Key. + Secret *string `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type SecondaryReadKeyParameters struct { +} + +type SecondaryWriteKeyInitParameters struct { +} + +type SecondaryWriteKeyObservation struct { + + // The Connection String for this Access Key - comprising of the Endpoint, ID and Secret. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The ID of the Access Key. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Secret of the Access Key. + Secret *string `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type SecondaryWriteKeyParameters struct { +} + +// ConfigurationSpec defines the desired state of Configuration +type ConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// ConfigurationStatus defines the observed state of Configuration. +type ConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Configuration is the Schema for the Configurations API. Manages an Azure App Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Configuration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec ConfigurationSpec `json:"spec"` + Status ConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConfigurationList contains a list of Configurations +type ConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Configuration `json:"items"` +} + +// Repository type metadata. +var ( + Configuration_Kind = "Configuration" + Configuration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Configuration_Kind}.String() + Configuration_KindAPIVersion = Configuration_Kind + "." + CRDGroupVersion.String() + Configuration_GroupVersionKind = CRDGroupVersion.WithKind(Configuration_Kind) +) + +func init() { + SchemeBuilder.Register(&Configuration{}, &ConfigurationList{}) +} diff --git a/apis/appconfiguration/v1beta1/zz_generated.conversion_hubs.go b/apis/appconfiguration/v1beta2/zz_generated.conversion_hubs.go similarity index 93% rename from apis/appconfiguration/v1beta1/zz_generated.conversion_hubs.go rename to apis/appconfiguration/v1beta2/zz_generated.conversion_hubs.go index f233d9ca0..0707ab4a9 100755 --- a/apis/appconfiguration/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/appconfiguration/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *Configuration) Hub() {} diff --git a/apis/appconfiguration/v1beta2/zz_generated.deepcopy.go b/apis/appconfiguration/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..7237d94ec --- /dev/null +++ b/apis/appconfiguration/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,943 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Configuration) DeepCopyInto(out *Configuration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Configuration. +func (in *Configuration) DeepCopy() *Configuration { + if in == nil { + return nil + } + out := new(Configuration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Configuration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccess != nil { + in, out := &in.PublicNetworkAccess, &out.PublicNetworkAccess + *out = new(string) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.Replica != nil { + in, out := &in.Replica, &out.Replica + *out = make([]ReplicaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationList) DeepCopyInto(out *ConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Configuration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationList. +func (in *ConfigurationList) DeepCopy() *ConfigurationList { + if in == nil { + return nil + } + out := new(ConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrimaryReadKey != nil { + in, out := &in.PrimaryReadKey, &out.PrimaryReadKey + *out = make([]PrimaryReadKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrimaryWriteKey != nil { + in, out := &in.PrimaryWriteKey, &out.PrimaryWriteKey + *out = make([]PrimaryWriteKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccess != nil { + in, out := &in.PublicNetworkAccess, &out.PublicNetworkAccess + *out = new(string) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.Replica != nil { + in, out := &in.Replica, &out.Replica + *out = make([]ReplicaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SecondaryReadKey != nil { + in, out := &in.SecondaryReadKey, &out.SecondaryReadKey + *out = make([]SecondaryReadKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryWriteKey != nil { + in, out := &in.SecondaryWriteKey, &out.SecondaryWriteKey + *out = make([]SecondaryWriteKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccess != nil { + in, out := &in.PublicNetworkAccess, &out.PublicNetworkAccess + *out = new(string) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.Replica != nil { + in, out := &in.Replica, &out.Replica + *out = make([]ReplicaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationSpec) DeepCopyInto(out *ConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationSpec. +func (in *ConfigurationSpec) DeepCopy() *ConfigurationSpec { + if in == nil { + return nil + } + out := new(ConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationStatus) DeepCopyInto(out *ConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationStatus. +func (in *ConfigurationStatus) DeepCopy() *ConfigurationStatus { + if in == nil { + return nil + } + out := new(ConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.IdentityClientIDRef != nil { + in, out := &in.IdentityClientIDRef, &out.IdentityClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IdentityClientIDSelector != nil { + in, out := &in.IdentityClientIDSelector, &out.IdentityClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyIdentifier != nil { + in, out := &in.KeyVaultKeyIdentifier, &out.KeyVaultKeyIdentifier + *out = new(string) + **out = **in + } + if in.KeyVaultKeyIdentifierRef != nil { + in, out := &in.KeyVaultKeyIdentifierRef, &out.KeyVaultKeyIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyIdentifierSelector != nil { + in, out := &in.KeyVaultKeyIdentifierSelector, &out.KeyVaultKeyIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyIdentifier != nil { + in, out := &in.KeyVaultKeyIdentifier, &out.KeyVaultKeyIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.IdentityClientIDRef != nil { + in, out := &in.IdentityClientIDRef, &out.IdentityClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IdentityClientIDSelector != nil { + in, out := &in.IdentityClientIDSelector, &out.IdentityClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyIdentifier != nil { + in, out := &in.KeyVaultKeyIdentifier, &out.KeyVaultKeyIdentifier + *out = new(string) + **out = **in + } + if in.KeyVaultKeyIdentifierRef != nil { + in, out := &in.KeyVaultKeyIdentifierRef, &out.KeyVaultKeyIdentifierRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyIdentifierSelector != nil { + in, out := &in.KeyVaultKeyIdentifierSelector, &out.KeyVaultKeyIdentifierSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryReadKeyInitParameters) DeepCopyInto(out *PrimaryReadKeyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryReadKeyInitParameters. +func (in *PrimaryReadKeyInitParameters) DeepCopy() *PrimaryReadKeyInitParameters { + if in == nil { + return nil + } + out := new(PrimaryReadKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryReadKeyObservation) DeepCopyInto(out *PrimaryReadKeyObservation) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryReadKeyObservation. +func (in *PrimaryReadKeyObservation) DeepCopy() *PrimaryReadKeyObservation { + if in == nil { + return nil + } + out := new(PrimaryReadKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryReadKeyParameters) DeepCopyInto(out *PrimaryReadKeyParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryReadKeyParameters. +func (in *PrimaryReadKeyParameters) DeepCopy() *PrimaryReadKeyParameters { + if in == nil { + return nil + } + out := new(PrimaryReadKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryWriteKeyInitParameters) DeepCopyInto(out *PrimaryWriteKeyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryWriteKeyInitParameters. +func (in *PrimaryWriteKeyInitParameters) DeepCopy() *PrimaryWriteKeyInitParameters { + if in == nil { + return nil + } + out := new(PrimaryWriteKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryWriteKeyObservation) DeepCopyInto(out *PrimaryWriteKeyObservation) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryWriteKeyObservation. +func (in *PrimaryWriteKeyObservation) DeepCopy() *PrimaryWriteKeyObservation { + if in == nil { + return nil + } + out := new(PrimaryWriteKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrimaryWriteKeyParameters) DeepCopyInto(out *PrimaryWriteKeyParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrimaryWriteKeyParameters. +func (in *PrimaryWriteKeyParameters) DeepCopy() *PrimaryWriteKeyParameters { + if in == nil { + return nil + } + out := new(PrimaryWriteKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaInitParameters) DeepCopyInto(out *ReplicaInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaInitParameters. +func (in *ReplicaInitParameters) DeepCopy() *ReplicaInitParameters { + if in == nil { + return nil + } + out := new(ReplicaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaObservation) DeepCopyInto(out *ReplicaObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaObservation. +func (in *ReplicaObservation) DeepCopy() *ReplicaObservation { + if in == nil { + return nil + } + out := new(ReplicaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaParameters) DeepCopyInto(out *ReplicaParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaParameters. +func (in *ReplicaParameters) DeepCopy() *ReplicaParameters { + if in == nil { + return nil + } + out := new(ReplicaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryReadKeyInitParameters) DeepCopyInto(out *SecondaryReadKeyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryReadKeyInitParameters. +func (in *SecondaryReadKeyInitParameters) DeepCopy() *SecondaryReadKeyInitParameters { + if in == nil { + return nil + } + out := new(SecondaryReadKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryReadKeyObservation) DeepCopyInto(out *SecondaryReadKeyObservation) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryReadKeyObservation. +func (in *SecondaryReadKeyObservation) DeepCopy() *SecondaryReadKeyObservation { + if in == nil { + return nil + } + out := new(SecondaryReadKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryReadKeyParameters) DeepCopyInto(out *SecondaryReadKeyParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryReadKeyParameters. +func (in *SecondaryReadKeyParameters) DeepCopy() *SecondaryReadKeyParameters { + if in == nil { + return nil + } + out := new(SecondaryReadKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWriteKeyInitParameters) DeepCopyInto(out *SecondaryWriteKeyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWriteKeyInitParameters. +func (in *SecondaryWriteKeyInitParameters) DeepCopy() *SecondaryWriteKeyInitParameters { + if in == nil { + return nil + } + out := new(SecondaryWriteKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWriteKeyObservation) DeepCopyInto(out *SecondaryWriteKeyObservation) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWriteKeyObservation. +func (in *SecondaryWriteKeyObservation) DeepCopy() *SecondaryWriteKeyObservation { + if in == nil { + return nil + } + out := new(SecondaryWriteKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWriteKeyParameters) DeepCopyInto(out *SecondaryWriteKeyParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWriteKeyParameters. +func (in *SecondaryWriteKeyParameters) DeepCopy() *SecondaryWriteKeyParameters { + if in == nil { + return nil + } + out := new(SecondaryWriteKeyParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/appconfiguration/v1beta2/zz_generated.managed.go b/apis/appconfiguration/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..5479680a3 --- /dev/null +++ b/apis/appconfiguration/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Configuration. +func (mg *Configuration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Configuration. +func (mg *Configuration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Configuration. +func (mg *Configuration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Configuration. +func (mg *Configuration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Configuration. +func (mg *Configuration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Configuration. +func (mg *Configuration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Configuration. +func (mg *Configuration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Configuration. +func (mg *Configuration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Configuration. +func (mg *Configuration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Configuration. +func (mg *Configuration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Configuration. +func (mg *Configuration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Configuration. +func (mg *Configuration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/appconfiguration/v1beta2/zz_generated.managedlist.go b/apis/appconfiguration/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..b452a8091 --- /dev/null +++ b/apis/appconfiguration/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConfigurationList. +func (l *ConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/appconfiguration/v1beta2/zz_generated.resolvers.go b/apis/appconfiguration/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..3b1e37932 --- /dev/null +++ b/apis/appconfiguration/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,134 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Configuration) ResolveReferences( // ResolveReferences of this Configuration. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Encryption.IdentityClientID), + Extract: resource.ExtractParamPath("client_id", true), + Reference: mg.Spec.ForProvider.Encryption.IdentityClientIDRef, + Selector: mg.Spec.ForProvider.Encryption.IdentityClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Encryption.IdentityClientID") + } + mg.Spec.ForProvider.Encryption.IdentityClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Encryption.IdentityClientIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Encryption.KeyVaultKeyIdentifier), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Encryption.KeyVaultKeyIdentifierRef, + Selector: mg.Spec.ForProvider.Encryption.KeyVaultKeyIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Encryption.KeyVaultKeyIdentifier") + } + mg.Spec.ForProvider.Encryption.KeyVaultKeyIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Encryption.KeyVaultKeyIdentifierRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Encryption.IdentityClientID), + Extract: resource.ExtractParamPath("client_id", true), + Reference: mg.Spec.InitProvider.Encryption.IdentityClientIDRef, + Selector: mg.Spec.InitProvider.Encryption.IdentityClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Encryption.IdentityClientID") + } + mg.Spec.InitProvider.Encryption.IdentityClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Encryption.IdentityClientIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Encryption.KeyVaultKeyIdentifier), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Encryption.KeyVaultKeyIdentifierRef, + Selector: mg.Spec.InitProvider.Encryption.KeyVaultKeyIdentifierSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Encryption.KeyVaultKeyIdentifier") + } + mg.Spec.InitProvider.Encryption.KeyVaultKeyIdentifier = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Encryption.KeyVaultKeyIdentifierRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/appconfiguration/v1beta2/zz_groupversion_info.go b/apis/appconfiguration/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..48ce49b64 --- /dev/null +++ b/apis/appconfiguration/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=appconfiguration.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "appconfiguration.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/appplatform/v1beta1/zz_generated.conversion_hubs.go b/apis/appplatform/v1beta1/zz_generated.conversion_hubs.go index 1c7b269c3..9dcd3ecf4 100755 --- a/apis/appplatform/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/appplatform/v1beta1/zz_generated.conversion_hubs.go @@ -12,15 +12,9 @@ func (tr *SpringCloudAccelerator) Hub() {} // Hub marks this type as a conversion hub. func (tr *SpringCloudActiveDeployment) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SpringCloudAPIPortal) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SpringCloudAPIPortalCustomDomain) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SpringCloudApp) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SpringCloudAppCosmosDBAssociation) Hub() {} @@ -30,44 +24,17 @@ func (tr *SpringCloudAppMySQLAssociation) Hub() {} // Hub marks this type as a conversion hub. func (tr *SpringCloudAppRedisAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SpringCloudBuildDeployment) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SpringCloudBuildPackBinding) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SpringCloudBuilder) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SpringCloudCertificate) Hub() {} // Hub marks this type as a conversion hub. func (tr *SpringCloudConfigurationService) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SpringCloudContainerDeployment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SpringCloudCustomDomain) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SpringCloudCustomizedAccelerator) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SpringCloudDevToolPortal) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SpringCloudGateway) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SpringCloudGatewayCustomDomain) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SpringCloudJavaDeployment) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SpringCloudService) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SpringCloudStorage) Hub() {} diff --git a/apis/appplatform/v1beta1/zz_generated.conversion_spokes.go b/apis/appplatform/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..18d2abf56 --- /dev/null +++ b/apis/appplatform/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,234 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this SpringCloudAPIPortal to the hub type. +func (tr *SpringCloudAPIPortal) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudAPIPortal type. +func (tr *SpringCloudAPIPortal) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudApp to the hub type. +func (tr *SpringCloudApp) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudApp type. +func (tr *SpringCloudApp) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudBuildDeployment to the hub type. +func (tr *SpringCloudBuildDeployment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudBuildDeployment type. +func (tr *SpringCloudBuildDeployment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudBuilder to the hub type. +func (tr *SpringCloudBuilder) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudBuilder type. +func (tr *SpringCloudBuilder) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudBuildPackBinding to the hub type. +func (tr *SpringCloudBuildPackBinding) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudBuildPackBinding type. +func (tr *SpringCloudBuildPackBinding) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudContainerDeployment to the hub type. +func (tr *SpringCloudContainerDeployment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudContainerDeployment type. +func (tr *SpringCloudContainerDeployment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudCustomizedAccelerator to the hub type. +func (tr *SpringCloudCustomizedAccelerator) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudCustomizedAccelerator type. +func (tr *SpringCloudCustomizedAccelerator) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudDevToolPortal to the hub type. +func (tr *SpringCloudDevToolPortal) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudDevToolPortal type. +func (tr *SpringCloudDevToolPortal) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudGateway to the hub type. +func (tr *SpringCloudGateway) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudGateway type. +func (tr *SpringCloudGateway) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudJavaDeployment to the hub type. +func (tr *SpringCloudJavaDeployment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudJavaDeployment type. +func (tr *SpringCloudJavaDeployment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SpringCloudService to the hub type. +func (tr *SpringCloudService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudService type. +func (tr *SpringCloudService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/appplatform/v1beta1/zz_generated.resolvers.go b/apis/appplatform/v1beta1/zz_generated.resolvers.go index d2b0798ce..2b0dd6ab2 100644 --- a/apis/appplatform/v1beta1/zz_generated.resolvers.go +++ b/apis/appplatform/v1beta1/zz_generated.resolvers.go @@ -98,7 +98,7 @@ func (mg *SpringCloudAPIPortalCustomDomain) ResolveReferences(ctx context.Contex var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudAPIPortal", "SpringCloudAPIPortalList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudAPIPortal", "SpringCloudAPIPortalList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -117,7 +117,7 @@ func (mg *SpringCloudAPIPortalCustomDomain) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.SpringCloudAPIPortalID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudAPIPortalIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudAPIPortal", "SpringCloudAPIPortalList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudAPIPortal", "SpringCloudAPIPortalList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -148,7 +148,7 @@ func (mg *SpringCloudAccelerator) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudService", "SpringCloudServiceList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -167,7 +167,7 @@ func (mg *SpringCloudAccelerator) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.SpringCloudServiceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudServiceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudService", "SpringCloudServiceList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -198,7 +198,7 @@ func (mg *SpringCloudActiveDeployment) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudJavaDeployment", "SpringCloudJavaDeploymentList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudJavaDeployment", "SpringCloudJavaDeploymentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -217,7 +217,7 @@ func (mg *SpringCloudActiveDeployment) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.DeploymentName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DeploymentNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudApp", "SpringCloudAppList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -236,7 +236,7 @@ func (mg *SpringCloudActiveDeployment) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.SpringCloudAppID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudAppIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudJavaDeployment", "SpringCloudJavaDeploymentList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudJavaDeployment", "SpringCloudJavaDeploymentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -255,7 +255,7 @@ func (mg *SpringCloudActiveDeployment) ResolveReferences(ctx context.Context, c mg.Spec.InitProvider.DeploymentName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DeploymentNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudApp", "SpringCloudAppList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -336,7 +336,7 @@ func (mg *SpringCloudAppCosmosDBAssociation) ResolveReferences(ctx context.Conte var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -355,7 +355,7 @@ func (mg *SpringCloudAppCosmosDBAssociation) ResolveReferences(ctx context.Conte mg.Spec.ForProvider.CosmosDBAccessKey = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CosmosDBAccessKeyRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -374,7 +374,7 @@ func (mg *SpringCloudAppCosmosDBAssociation) ResolveReferences(ctx context.Conte mg.Spec.ForProvider.CosmosDBAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CosmosDBAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudApp", "SpringCloudAppList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -393,7 +393,7 @@ func (mg *SpringCloudAppCosmosDBAssociation) ResolveReferences(ctx context.Conte mg.Spec.ForProvider.SpringCloudAppID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudAppIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -412,7 +412,7 @@ func (mg *SpringCloudAppCosmosDBAssociation) ResolveReferences(ctx context.Conte mg.Spec.InitProvider.CosmosDBAccessKey = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.CosmosDBAccessKeyRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -462,7 +462,7 @@ func (mg *SpringCloudAppMySQLAssociation) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -481,7 +481,7 @@ func (mg *SpringCloudAppMySQLAssociation) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.MySQLServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.MySQLServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudApp", "SpringCloudAppList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -500,7 +500,7 @@ func (mg *SpringCloudAppMySQLAssociation) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.SpringCloudAppID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudAppIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -538,7 +538,7 @@ func (mg *SpringCloudAppMySQLAssociation) ResolveReferences(ctx context.Context, mg.Spec.InitProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -557,7 +557,7 @@ func (mg *SpringCloudAppMySQLAssociation) ResolveReferences(ctx context.Context, mg.Spec.InitProvider.MySQLServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.MySQLServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -588,7 +588,7 @@ func (mg *SpringCloudAppRedisAssociation) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -607,7 +607,7 @@ func (mg *SpringCloudAppRedisAssociation) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.RedisAccessKey = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RedisAccessKeyRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -626,7 +626,7 @@ func (mg *SpringCloudAppRedisAssociation) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.RedisCacheID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RedisCacheIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudApp", "SpringCloudAppList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -645,7 +645,7 @@ func (mg *SpringCloudAppRedisAssociation) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.SpringCloudAppID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudAppIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -664,7 +664,7 @@ func (mg *SpringCloudAppRedisAssociation) ResolveReferences(ctx context.Context, mg.Spec.InitProvider.RedisAccessKey = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.RedisAccessKeyRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -807,7 +807,7 @@ func (mg *SpringCloudCertificate) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -845,7 +845,7 @@ func (mg *SpringCloudCertificate) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudService", "SpringCloudServiceList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -864,7 +864,7 @@ func (mg *SpringCloudCertificate) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.ServiceName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServiceNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Certificate", "CertificateList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Certificate", "CertificateList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -895,7 +895,7 @@ func (mg *SpringCloudConfigurationService) ResolveReferences(ctx context.Context var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudService", "SpringCloudServiceList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -957,7 +957,7 @@ func (mg *SpringCloudCustomDomain) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudApp", "SpringCloudAppList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -976,7 +976,7 @@ func (mg *SpringCloudCustomDomain) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.SpringCloudAppID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudAppIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudApp", "SpringCloudAppList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1119,7 +1119,7 @@ func (mg *SpringCloudGatewayCustomDomain) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudGateway", "SpringCloudGatewayList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudGateway", "SpringCloudGatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1138,7 +1138,7 @@ func (mg *SpringCloudGatewayCustomDomain) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.SpringCloudGatewayID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudGatewayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudGateway", "SpringCloudGatewayList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudGateway", "SpringCloudGatewayList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1358,7 +1358,7 @@ func (mg *SpringCloudStorage) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudService", "SpringCloudServiceList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1377,7 +1377,7 @@ func (mg *SpringCloudStorage) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.SpringCloudServiceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SpringCloudServiceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1396,7 +1396,7 @@ func (mg *SpringCloudStorage) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.StorageAccountKey = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountKeyRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1415,7 +1415,7 @@ func (mg *SpringCloudStorage) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1434,7 +1434,7 @@ func (mg *SpringCloudStorage) ResolveReferences(ctx context.Context, c client.Re mg.Spec.InitProvider.StorageAccountKey = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.StorageAccountKeyRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/appplatform/v1beta1/zz_springcloudaccelerator_types.go b/apis/appplatform/v1beta1/zz_springcloudaccelerator_types.go index 5740c4039..424feaedb 100755 --- a/apis/appplatform/v1beta1/zz_springcloudaccelerator_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudaccelerator_types.go @@ -19,7 +19,7 @@ type SpringCloudAcceleratorInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Accelerator to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` @@ -51,7 +51,7 @@ type SpringCloudAcceleratorParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Accelerator to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudactivedeployment_types.go b/apis/appplatform/v1beta1/zz_springcloudactivedeployment_types.go index e6a011131..6dea79068 100755 --- a/apis/appplatform/v1beta1/zz_springcloudactivedeployment_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudactivedeployment_types.go @@ -16,7 +16,7 @@ import ( type SpringCloudActiveDeploymentInitParameters struct { // Specifies the name of Spring Cloud Deployment which is going to be active. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudJavaDeployment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudJavaDeployment DeploymentName *string `json:"deploymentName,omitempty" tf:"deployment_name,omitempty"` // Reference to a SpringCloudJavaDeployment in appplatform to populate deploymentName. @@ -28,7 +28,7 @@ type SpringCloudActiveDeploymentInitParameters struct { DeploymentNameSelector *v1.Selector `json:"deploymentNameSelector,omitempty" tf:"-"` // Specifies the id of the Spring Cloud Application. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` @@ -56,7 +56,7 @@ type SpringCloudActiveDeploymentObservation struct { type SpringCloudActiveDeploymentParameters struct { // Specifies the name of Spring Cloud Deployment which is going to be active. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudJavaDeployment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudJavaDeployment // +kubebuilder:validation:Optional DeploymentName *string `json:"deploymentName,omitempty" tf:"deployment_name,omitempty"` @@ -69,7 +69,7 @@ type SpringCloudActiveDeploymentParameters struct { DeploymentNameSelector *v1.Selector `json:"deploymentNameSelector,omitempty" tf:"-"` // Specifies the id of the Spring Cloud Application. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudapiportalcustomdomain_types.go b/apis/appplatform/v1beta1/zz_springcloudapiportalcustomdomain_types.go index d258ec9c9..c12c57011 100755 --- a/apis/appplatform/v1beta1/zz_springcloudapiportalcustomdomain_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudapiportalcustomdomain_types.go @@ -19,7 +19,7 @@ type SpringCloudAPIPortalCustomDomainInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the Spring Cloud API Portal. Changing this forces a new Spring Cloud API Portal Domain to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudAPIPortal + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudAPIPortal // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SpringCloudAPIPortalID *string `json:"springCloudApiPortalId,omitempty" tf:"spring_cloud_api_portal_id,omitempty"` @@ -57,7 +57,7 @@ type SpringCloudAPIPortalCustomDomainParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the Spring Cloud API Portal. Changing this forces a new Spring Cloud API Portal Domain to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudAPIPortal + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudAPIPortal // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudAPIPortalID *string `json:"springCloudApiPortalId,omitempty" tf:"spring_cloud_api_portal_id,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudappcosmosdbassociation_types.go b/apis/appplatform/v1beta1/zz_springcloudappcosmosdbassociation_types.go index dbdc28e6e..dbba0faf9 100755 --- a/apis/appplatform/v1beta1/zz_springcloudappcosmosdbassociation_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudappcosmosdbassociation_types.go @@ -19,7 +19,7 @@ type SpringCloudAppCosmosDBAssociationInitParameters struct { APIType *string `json:"apiType,omitempty" tf:"api_type,omitempty"` // Specifies the CosmosDB Account access key. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_key",true) CosmosDBAccessKey *string `json:"cosmosdbAccessKey,omitempty" tf:"cosmosdb_access_key,omitempty"` @@ -32,7 +32,7 @@ type SpringCloudAppCosmosDBAssociationInitParameters struct { CosmosDBAccessKeySelector *v1.Selector `json:"cosmosdbAccessKeySelector,omitempty" tf:"-"` // Specifies the ID of the CosmosDB Account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() CosmosDBAccountID *string `json:"cosmosdbAccountId,omitempty" tf:"cosmosdb_account_id,omitempty"` @@ -100,7 +100,7 @@ type SpringCloudAppCosmosDBAssociationParameters struct { APIType *string `json:"apiType,omitempty" tf:"api_type,omitempty"` // Specifies the CosmosDB Account access key. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_key",true) // +kubebuilder:validation:Optional CosmosDBAccessKey *string `json:"cosmosdbAccessKey,omitempty" tf:"cosmosdb_access_key,omitempty"` @@ -114,7 +114,7 @@ type SpringCloudAppCosmosDBAssociationParameters struct { CosmosDBAccessKeySelector *v1.Selector `json:"cosmosdbAccessKeySelector,omitempty" tf:"-"` // Specifies the ID of the CosmosDB Account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional CosmosDBAccountID *string `json:"cosmosdbAccountId,omitempty" tf:"cosmosdb_account_id,omitempty"` @@ -148,7 +148,7 @@ type SpringCloudAppCosmosDBAssociationParameters struct { CosmosDBSQLDatabaseName *string `json:"cosmosdbSqlDatabaseName,omitempty" tf:"cosmosdb_sql_database_name,omitempty"` // Specifies the ID of the Spring Cloud Application where this Association is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudappmysqlassociation_types.go b/apis/appplatform/v1beta1/zz_springcloudappmysqlassociation_types.go index 120d8eadb..730cecaf3 100755 --- a/apis/appplatform/v1beta1/zz_springcloudappmysqlassociation_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudappmysqlassociation_types.go @@ -28,7 +28,7 @@ type SpringCloudAppMySQLAssociationInitParameters struct { DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` // Specifies the ID of the MySQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() MySQLServerID *string `json:"mysqlServerId,omitempty" tf:"mysql_server_id,omitempty"` @@ -41,7 +41,7 @@ type SpringCloudAppMySQLAssociationInitParameters struct { MySQLServerIDSelector *v1.Selector `json:"mysqlServerIdSelector,omitempty" tf:"-"` // Specifies the username which should be used when connecting to the MySQL Database from the Spring Cloud App. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("administrator_login",false) Username *string `json:"username,omitempty" tf:"username,omitempty"` @@ -88,7 +88,7 @@ type SpringCloudAppMySQLAssociationParameters struct { DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` // Specifies the ID of the MySQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional MySQLServerID *string `json:"mysqlServerId,omitempty" tf:"mysql_server_id,omitempty"` @@ -106,7 +106,7 @@ type SpringCloudAppMySQLAssociationParameters struct { PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` // Specifies the ID of the Spring Cloud Application where this Association is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` @@ -120,7 +120,7 @@ type SpringCloudAppMySQLAssociationParameters struct { SpringCloudAppIDSelector *v1.Selector `json:"springCloudAppIdSelector,omitempty" tf:"-"` // Specifies the username which should be used when connecting to the MySQL Database from the Spring Cloud App. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("administrator_login",false) // +kubebuilder:validation:Optional Username *string `json:"username,omitempty" tf:"username,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudappredisassociation_types.go b/apis/appplatform/v1beta1/zz_springcloudappredisassociation_types.go index b47afe55e..9fa8e72e1 100755 --- a/apis/appplatform/v1beta1/zz_springcloudappredisassociation_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudappredisassociation_types.go @@ -16,7 +16,7 @@ import ( type SpringCloudAppRedisAssociationInitParameters struct { // Specifies the Redis Cache access key. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_access_key",true) RedisAccessKey *string `json:"redisAccessKey,omitempty" tf:"redis_access_key,omitempty"` @@ -29,7 +29,7 @@ type SpringCloudAppRedisAssociationInitParameters struct { RedisAccessKeySelector *v1.Selector `json:"redisAccessKeySelector,omitempty" tf:"-"` // Specifies the Redis Cache resource ID. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() RedisCacheID *string `json:"redisCacheId,omitempty" tf:"redis_cache_id,omitempty"` @@ -66,7 +66,7 @@ type SpringCloudAppRedisAssociationObservation struct { type SpringCloudAppRedisAssociationParameters struct { // Specifies the Redis Cache access key. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_access_key",true) // +kubebuilder:validation:Optional RedisAccessKey *string `json:"redisAccessKey,omitempty" tf:"redis_access_key,omitempty"` @@ -80,7 +80,7 @@ type SpringCloudAppRedisAssociationParameters struct { RedisAccessKeySelector *v1.Selector `json:"redisAccessKeySelector,omitempty" tf:"-"` // Specifies the Redis Cache resource ID. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional RedisCacheID *string `json:"redisCacheId,omitempty" tf:"redis_cache_id,omitempty"` @@ -98,7 +98,7 @@ type SpringCloudAppRedisAssociationParameters struct { SSLEnabled *bool `json:"sslEnabled,omitempty" tf:"ssl_enabled,omitempty"` // Specifies the Spring Cloud Application resource ID in which the Association is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudcertificate_types.go b/apis/appplatform/v1beta1/zz_springcloudcertificate_types.go index f60a0188a..8c22eb16c 100755 --- a/apis/appplatform/v1beta1/zz_springcloudcertificate_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudcertificate_types.go @@ -22,7 +22,7 @@ type SpringCloudCertificateInitParameters struct { ExcludePrivateKey *bool `json:"excludePrivateKey,omitempty" tf:"exclude_private_key,omitempty"` // Specifies the ID of the Key Vault Certificate resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() KeyVaultCertificateID *string `json:"keyVaultCertificateId,omitempty" tf:"key_vault_certificate_id,omitempty"` @@ -70,7 +70,7 @@ type SpringCloudCertificateParameters struct { ExcludePrivateKey *bool `json:"excludePrivateKey,omitempty" tf:"exclude_private_key,omitempty"` // Specifies the ID of the Key Vault Certificate resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Certificate + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Certificate // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultCertificateID *string `json:"keyVaultCertificateId,omitempty" tf:"key_vault_certificate_id,omitempty"` @@ -97,7 +97,7 @@ type SpringCloudCertificateParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the Spring Cloud Service resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService // +kubebuilder:validation:Optional ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudconfigurationservice_types.go b/apis/appplatform/v1beta1/zz_springcloudconfigurationservice_types.go index 6a571ede1..ec3344d00 100755 --- a/apis/appplatform/v1beta1/zz_springcloudconfigurationservice_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudconfigurationservice_types.go @@ -181,7 +181,7 @@ type SpringCloudConfigurationServiceParameters struct { Repository []RepositoryParameters `json:"repository,omitempty" tf:"repository,omitempty"` // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Configuration Service to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudcustomdomain_types.go b/apis/appplatform/v1beta1/zz_springcloudcustomdomain_types.go index 938224122..6f1976cbe 100755 --- a/apis/appplatform/v1beta1/zz_springcloudcustomdomain_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudcustomdomain_types.go @@ -22,7 +22,7 @@ type SpringCloudCustomDomainInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Specifies the resource ID of the Spring Cloud Application. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` @@ -67,7 +67,7 @@ type SpringCloudCustomDomainParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Specifies the resource ID of the Spring Cloud Application. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudgatewaycustomdomain_types.go b/apis/appplatform/v1beta1/zz_springcloudgatewaycustomdomain_types.go index 25255bbfa..b59a6e371 100755 --- a/apis/appplatform/v1beta1/zz_springcloudgatewaycustomdomain_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudgatewaycustomdomain_types.go @@ -19,7 +19,7 @@ type SpringCloudGatewayCustomDomainInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the Spring Cloud Gateway. Changing this forces a new Spring Cloud Gateway Custom Domain to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudGateway + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudGateway // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SpringCloudGatewayID *string `json:"springCloudGatewayId,omitempty" tf:"spring_cloud_gateway_id,omitempty"` @@ -57,7 +57,7 @@ type SpringCloudGatewayCustomDomainParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the Spring Cloud Gateway. Changing this forces a new Spring Cloud Gateway Custom Domain to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudGateway + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudGateway // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudGatewayID *string `json:"springCloudGatewayId,omitempty" tf:"spring_cloud_gateway_id,omitempty"` diff --git a/apis/appplatform/v1beta1/zz_springcloudstorage_types.go b/apis/appplatform/v1beta1/zz_springcloudstorage_types.go index fdde3c75b..7cb24963a 100755 --- a/apis/appplatform/v1beta1/zz_springcloudstorage_types.go +++ b/apis/appplatform/v1beta1/zz_springcloudstorage_types.go @@ -16,7 +16,7 @@ import ( type SpringCloudStorageInitParameters struct { // The access key of the Azure Storage Account. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_access_key",true) StorageAccountKey *string `json:"storageAccountKey,omitempty" tf:"storage_account_key,omitempty"` @@ -29,7 +29,7 @@ type SpringCloudStorageInitParameters struct { StorageAccountKeySelector *v1.Selector `json:"storageAccountKeySelector,omitempty" tf:"-"` // The account name of the Azure Storage Account. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` // Reference to a Account in storage to populate storageAccountName. @@ -59,7 +59,7 @@ type SpringCloudStorageObservation struct { type SpringCloudStorageParameters struct { // The ID of the Spring Cloud Service where the Spring Cloud Storage should exist. Changing this forces a new Spring Cloud Storage to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` @@ -73,7 +73,7 @@ type SpringCloudStorageParameters struct { SpringCloudServiceIDSelector *v1.Selector `json:"springCloudServiceIdSelector,omitempty" tf:"-"` // The access key of the Azure Storage Account. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_access_key",true) // +kubebuilder:validation:Optional StorageAccountKey *string `json:"storageAccountKey,omitempty" tf:"storage_account_key,omitempty"` @@ -87,7 +87,7 @@ type SpringCloudStorageParameters struct { StorageAccountKeySelector *v1.Selector `json:"storageAccountKeySelector,omitempty" tf:"-"` // The account name of the Azure Storage Account. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/appplatform/v1beta2/zz_generated.conversion_hubs.go b/apis/appplatform/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..573d5bdd2 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudAPIPortal) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudApp) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudBuildDeployment) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudBuilder) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudBuildPackBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudContainerDeployment) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudCustomizedAccelerator) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudDevToolPortal) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudGateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudJavaDeployment) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SpringCloudService) Hub() {} diff --git a/apis/appplatform/v1beta2/zz_generated.deepcopy.go b/apis/appplatform/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..12f584c16 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,7046 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIMetadataInitParameters) DeepCopyInto(out *APIMetadataInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentationURL != nil { + in, out := &in.DocumentationURL, &out.DocumentationURL + *out = new(string) + **out = **in + } + if in.ServerURL != nil { + in, out := &in.ServerURL, &out.ServerURL + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIMetadataInitParameters. +func (in *APIMetadataInitParameters) DeepCopy() *APIMetadataInitParameters { + if in == nil { + return nil + } + out := new(APIMetadataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIMetadataObservation) DeepCopyInto(out *APIMetadataObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentationURL != nil { + in, out := &in.DocumentationURL, &out.DocumentationURL + *out = new(string) + **out = **in + } + if in.ServerURL != nil { + in, out := &in.ServerURL, &out.ServerURL + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIMetadataObservation. +func (in *APIMetadataObservation) DeepCopy() *APIMetadataObservation { + if in == nil { + return nil + } + out := new(APIMetadataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIMetadataParameters) DeepCopyInto(out *APIMetadataParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DocumentationURL != nil { + in, out := &in.DocumentationURL, &out.DocumentationURL + *out = new(string) + **out = **in + } + if in.ServerURL != nil { + in, out := &in.ServerURL, &out.ServerURL + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIMetadataParameters. +func (in *APIMetadataParameters) DeepCopy() *APIMetadataParameters { + if in == nil { + return nil + } + out := new(APIMetadataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthInitParameters) DeepCopyInto(out *BasicAuthInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthInitParameters. +func (in *BasicAuthInitParameters) DeepCopy() *BasicAuthInitParameters { + if in == nil { + return nil + } + out := new(BasicAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthObservation) DeepCopyInto(out *BasicAuthObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthObservation. +func (in *BasicAuthObservation) DeepCopy() *BasicAuthObservation { + if in == nil { + return nil + } + out := new(BasicAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthParameters) DeepCopyInto(out *BasicAuthParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthParameters. +func (in *BasicAuthParameters) DeepCopy() *BasicAuthParameters { + if in == nil { + return nil + } + out := new(BasicAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildPackGroupInitParameters) DeepCopyInto(out *BuildPackGroupInitParameters) { + *out = *in + if in.BuildPackIds != nil { + in, out := &in.BuildPackIds, &out.BuildPackIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPackGroupInitParameters. +func (in *BuildPackGroupInitParameters) DeepCopy() *BuildPackGroupInitParameters { + if in == nil { + return nil + } + out := new(BuildPackGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildPackGroupObservation) DeepCopyInto(out *BuildPackGroupObservation) { + *out = *in + if in.BuildPackIds != nil { + in, out := &in.BuildPackIds, &out.BuildPackIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPackGroupObservation. +func (in *BuildPackGroupObservation) DeepCopy() *BuildPackGroupObservation { + if in == nil { + return nil + } + out := new(BuildPackGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildPackGroupParameters) DeepCopyInto(out *BuildPackGroupParameters) { + *out = *in + if in.BuildPackIds != nil { + in, out := &in.BuildPackIds, &out.BuildPackIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPackGroupParameters. +func (in *BuildPackGroupParameters) DeepCopy() *BuildPackGroupParameters { + if in == nil { + return nil + } + out := new(BuildPackGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthorizationInitParameters) DeepCopyInto(out *ClientAuthorizationInitParameters) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VerificationEnabled != nil { + in, out := &in.VerificationEnabled, &out.VerificationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthorizationInitParameters. +func (in *ClientAuthorizationInitParameters) DeepCopy() *ClientAuthorizationInitParameters { + if in == nil { + return nil + } + out := new(ClientAuthorizationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthorizationObservation) DeepCopyInto(out *ClientAuthorizationObservation) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VerificationEnabled != nil { + in, out := &in.VerificationEnabled, &out.VerificationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthorizationObservation. +func (in *ClientAuthorizationObservation) DeepCopy() *ClientAuthorizationObservation { + if in == nil { + return nil + } + out := new(ClientAuthorizationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientAuthorizationParameters) DeepCopyInto(out *ClientAuthorizationParameters) { + *out = *in + if in.CertificateIds != nil { + in, out := &in.CertificateIds, &out.CertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VerificationEnabled != nil { + in, out := &in.VerificationEnabled, &out.VerificationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientAuthorizationParameters. +func (in *ClientAuthorizationParameters) DeepCopy() *ClientAuthorizationParameters { + if in == nil { + return nil + } + out := new(ClientAuthorizationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigServerGitSettingInitParameters) DeepCopyInto(out *ConfigServerGitSettingInitParameters) { + *out = *in + if in.HTTPBasicAuth != nil { + in, out := &in.HTTPBasicAuth, &out.HTTPBasicAuth + *out = new(HTTPBasicAuthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = make([]RepositoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(ConfigServerGitSettingSSHAuthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SearchPaths != nil { + in, out := &in.SearchPaths, &out.SearchPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigServerGitSettingInitParameters. +func (in *ConfigServerGitSettingInitParameters) DeepCopy() *ConfigServerGitSettingInitParameters { + if in == nil { + return nil + } + out := new(ConfigServerGitSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigServerGitSettingObservation) DeepCopyInto(out *ConfigServerGitSettingObservation) { + *out = *in + if in.HTTPBasicAuth != nil { + in, out := &in.HTTPBasicAuth, &out.HTTPBasicAuth + *out = new(HTTPBasicAuthObservation) + (*in).DeepCopyInto(*out) + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = make([]RepositoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(ConfigServerGitSettingSSHAuthObservation) + (*in).DeepCopyInto(*out) + } + if in.SearchPaths != nil { + in, out := &in.SearchPaths, &out.SearchPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigServerGitSettingObservation. +func (in *ConfigServerGitSettingObservation) DeepCopy() *ConfigServerGitSettingObservation { + if in == nil { + return nil + } + out := new(ConfigServerGitSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigServerGitSettingParameters) DeepCopyInto(out *ConfigServerGitSettingParameters) { + *out = *in + if in.HTTPBasicAuth != nil { + in, out := &in.HTTPBasicAuth, &out.HTTPBasicAuth + *out = new(HTTPBasicAuthParameters) + (*in).DeepCopyInto(*out) + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = make([]RepositoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(ConfigServerGitSettingSSHAuthParameters) + (*in).DeepCopyInto(*out) + } + if in.SearchPaths != nil { + in, out := &in.SearchPaths, &out.SearchPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigServerGitSettingParameters. +func (in *ConfigServerGitSettingParameters) DeepCopy() *ConfigServerGitSettingParameters { + if in == nil { + return nil + } + out := new(ConfigServerGitSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigServerGitSettingSSHAuthInitParameters) DeepCopyInto(out *ConfigServerGitSettingSSHAuthInitParameters) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } + if in.StrictHostKeyCheckingEnabled != nil { + in, out := &in.StrictHostKeyCheckingEnabled, &out.StrictHostKeyCheckingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigServerGitSettingSSHAuthInitParameters. +func (in *ConfigServerGitSettingSSHAuthInitParameters) DeepCopy() *ConfigServerGitSettingSSHAuthInitParameters { + if in == nil { + return nil + } + out := new(ConfigServerGitSettingSSHAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigServerGitSettingSSHAuthObservation) DeepCopyInto(out *ConfigServerGitSettingSSHAuthObservation) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } + if in.StrictHostKeyCheckingEnabled != nil { + in, out := &in.StrictHostKeyCheckingEnabled, &out.StrictHostKeyCheckingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigServerGitSettingSSHAuthObservation. +func (in *ConfigServerGitSettingSSHAuthObservation) DeepCopy() *ConfigServerGitSettingSSHAuthObservation { + if in == nil { + return nil + } + out := new(ConfigServerGitSettingSSHAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigServerGitSettingSSHAuthParameters) DeepCopyInto(out *ConfigServerGitSettingSSHAuthParameters) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } + if in.HostKeySecretRef != nil { + in, out := &in.HostKeySecretRef, &out.HostKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + out.PrivateKeySecretRef = in.PrivateKeySecretRef + if in.StrictHostKeyCheckingEnabled != nil { + in, out := &in.StrictHostKeyCheckingEnabled, &out.StrictHostKeyCheckingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigServerGitSettingSSHAuthParameters. +func (in *ConfigServerGitSettingSSHAuthParameters) DeepCopy() *ConfigServerGitSettingSSHAuthParameters { + if in == nil { + return nil + } + out := new(ConfigServerGitSettingSSHAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryInitParameters) DeepCopyInto(out *ContainerRegistryInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryInitParameters. +func (in *ContainerRegistryInitParameters) DeepCopy() *ContainerRegistryInitParameters { + if in == nil { + return nil + } + out := new(ContainerRegistryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryObservation) DeepCopyInto(out *ContainerRegistryObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryObservation. +func (in *ContainerRegistryObservation) DeepCopy() *ContainerRegistryObservation { + if in == nil { + return nil + } + out := new(ContainerRegistryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRegistryParameters) DeepCopyInto(out *ContainerRegistryParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRegistryParameters. +func (in *ContainerRegistryParameters) DeepCopy() *ContainerRegistryParameters { + if in == nil { + return nil + } + out := new(ContainerRegistryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsInitParameters) DeepCopyInto(out *CorsInitParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOriginPatterns != nil { + in, out := &in.AllowedOriginPatterns, &out.AllowedOriginPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CredentialsAllowed != nil { + in, out := &in.CredentialsAllowed, &out.CredentialsAllowed + *out = new(bool) + **out = **in + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeSeconds != nil { + in, out := &in.MaxAgeSeconds, &out.MaxAgeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsInitParameters. +func (in *CorsInitParameters) DeepCopy() *CorsInitParameters { + if in == nil { + return nil + } + out := new(CorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsObservation) DeepCopyInto(out *CorsObservation) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOriginPatterns != nil { + in, out := &in.AllowedOriginPatterns, &out.AllowedOriginPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CredentialsAllowed != nil { + in, out := &in.CredentialsAllowed, &out.CredentialsAllowed + *out = new(bool) + **out = **in + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeSeconds != nil { + in, out := &in.MaxAgeSeconds, &out.MaxAgeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsObservation. +func (in *CorsObservation) DeepCopy() *CorsObservation { + if in == nil { + return nil + } + out := new(CorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsParameters) DeepCopyInto(out *CorsParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOriginPatterns != nil { + in, out := &in.AllowedOriginPatterns, &out.AllowedOriginPatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CredentialsAllowed != nil { + in, out := &in.CredentialsAllowed, &out.CredentialsAllowed + *out = new(bool) + **out = **in + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeSeconds != nil { + in, out := &in.MaxAgeSeconds, &out.MaxAgeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsParameters. +func (in *CorsParameters) DeepCopy() *CorsParameters { + if in == nil { + return nil + } + out := new(CorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPersistentDiskInitParameters) DeepCopyInto(out *CustomPersistentDiskInitParameters) { + *out = *in + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.ReadOnlyEnabled != nil { + in, out := &in.ReadOnlyEnabled, &out.ReadOnlyEnabled + *out = new(bool) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.StorageName != nil { + in, out := &in.StorageName, &out.StorageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPersistentDiskInitParameters. +func (in *CustomPersistentDiskInitParameters) DeepCopy() *CustomPersistentDiskInitParameters { + if in == nil { + return nil + } + out := new(CustomPersistentDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPersistentDiskObservation) DeepCopyInto(out *CustomPersistentDiskObservation) { + *out = *in + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.ReadOnlyEnabled != nil { + in, out := &in.ReadOnlyEnabled, &out.ReadOnlyEnabled + *out = new(bool) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.StorageName != nil { + in, out := &in.StorageName, &out.StorageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPersistentDiskObservation. +func (in *CustomPersistentDiskObservation) DeepCopy() *CustomPersistentDiskObservation { + if in == nil { + return nil + } + out := new(CustomPersistentDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPersistentDiskParameters) DeepCopyInto(out *CustomPersistentDiskParameters) { + *out = *in + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.ReadOnlyEnabled != nil { + in, out := &in.ReadOnlyEnabled, &out.ReadOnlyEnabled + *out = new(bool) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.StorageName != nil { + in, out := &in.StorageName, &out.StorageName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPersistentDiskParameters. +func (in *CustomPersistentDiskParameters) DeepCopy() *CustomPersistentDiskParameters { + if in == nil { + return nil + } + out := new(CustomPersistentDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultBuildServiceInitParameters) DeepCopyInto(out *DefaultBuildServiceInitParameters) { + *out = *in + if in.ContainerRegistryName != nil { + in, out := &in.ContainerRegistryName, &out.ContainerRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultBuildServiceInitParameters. +func (in *DefaultBuildServiceInitParameters) DeepCopy() *DefaultBuildServiceInitParameters { + if in == nil { + return nil + } + out := new(DefaultBuildServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultBuildServiceObservation) DeepCopyInto(out *DefaultBuildServiceObservation) { + *out = *in + if in.ContainerRegistryName != nil { + in, out := &in.ContainerRegistryName, &out.ContainerRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultBuildServiceObservation. +func (in *DefaultBuildServiceObservation) DeepCopy() *DefaultBuildServiceObservation { + if in == nil { + return nil + } + out := new(DefaultBuildServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultBuildServiceParameters) DeepCopyInto(out *DefaultBuildServiceParameters) { + *out = *in + if in.ContainerRegistryName != nil { + in, out := &in.ContainerRegistryName, &out.ContainerRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultBuildServiceParameters. +func (in *DefaultBuildServiceParameters) DeepCopy() *DefaultBuildServiceParameters { + if in == nil { + return nil + } + out := new(DefaultBuildServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryInitParameters) DeepCopyInto(out *GitRepositoryInitParameters) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.CACertificateID != nil { + in, out := &in.CACertificateID, &out.CACertificateID + *out = new(string) + **out = **in + } + if in.Commit != nil { + in, out := &in.Commit, &out.Commit + *out = new(string) + **out = **in + } + if in.GitTag != nil { + in, out := &in.GitTag, &out.GitTag + *out = new(string) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(SSHAuthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInitParameters. +func (in *GitRepositoryInitParameters) DeepCopy() *GitRepositoryInitParameters { + if in == nil { + return nil + } + out := new(GitRepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryObservation) DeepCopyInto(out *GitRepositoryObservation) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthObservation) + (*in).DeepCopyInto(*out) + } + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.CACertificateID != nil { + in, out := &in.CACertificateID, &out.CACertificateID + *out = new(string) + **out = **in + } + if in.Commit != nil { + in, out := &in.Commit, &out.Commit + *out = new(string) + **out = **in + } + if in.GitTag != nil { + in, out := &in.GitTag, &out.GitTag + *out = new(string) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(SSHAuthObservation) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryObservation. +func (in *GitRepositoryObservation) DeepCopy() *GitRepositoryObservation { + if in == nil { + return nil + } + out := new(GitRepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryParameters) DeepCopyInto(out *GitRepositoryParameters) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthParameters) + (*in).DeepCopyInto(*out) + } + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.CACertificateID != nil { + in, out := &in.CACertificateID, &out.CACertificateID + *out = new(string) + **out = **in + } + if in.Commit != nil { + in, out := &in.Commit, &out.Commit + *out = new(string) + **out = **in + } + if in.GitTag != nil { + in, out := &in.GitTag, &out.GitTag + *out = new(string) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(SSHAuthParameters) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryParameters. +func (in *GitRepositoryParameters) DeepCopy() *GitRepositoryParameters { + if in == nil { + return nil + } + out := new(GitRepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBasicAuthInitParameters) DeepCopyInto(out *HTTPBasicAuthInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBasicAuthInitParameters. +func (in *HTTPBasicAuthInitParameters) DeepCopy() *HTTPBasicAuthInitParameters { + if in == nil { + return nil + } + out := new(HTTPBasicAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBasicAuthObservation) DeepCopyInto(out *HTTPBasicAuthObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBasicAuthObservation. +func (in *HTTPBasicAuthObservation) DeepCopy() *HTTPBasicAuthObservation { + if in == nil { + return nil + } + out := new(HTTPBasicAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPBasicAuthParameters) DeepCopyInto(out *HTTPBasicAuthParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBasicAuthParameters. +func (in *HTTPBasicAuthParameters) DeepCopy() *HTTPBasicAuthParameters { + if in == nil { + return nil + } + out := new(HTTPBasicAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSettingsInitParameters) DeepCopyInto(out *IngressSettingsInitParameters) { + *out = *in + if in.BackendProtocol != nil { + in, out := &in.BackendProtocol, &out.BackendProtocol + *out = new(string) + **out = **in + } + if in.ReadTimeoutInSeconds != nil { + in, out := &in.ReadTimeoutInSeconds, &out.ReadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.SendTimeoutInSeconds != nil { + in, out := &in.SendTimeoutInSeconds, &out.SendTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.SessionAffinity != nil { + in, out := &in.SessionAffinity, &out.SessionAffinity + *out = new(string) + **out = **in + } + if in.SessionCookieMaxAge != nil { + in, out := &in.SessionCookieMaxAge, &out.SessionCookieMaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSettingsInitParameters. +func (in *IngressSettingsInitParameters) DeepCopy() *IngressSettingsInitParameters { + if in == nil { + return nil + } + out := new(IngressSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSettingsObservation) DeepCopyInto(out *IngressSettingsObservation) { + *out = *in + if in.BackendProtocol != nil { + in, out := &in.BackendProtocol, &out.BackendProtocol + *out = new(string) + **out = **in + } + if in.ReadTimeoutInSeconds != nil { + in, out := &in.ReadTimeoutInSeconds, &out.ReadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.SendTimeoutInSeconds != nil { + in, out := &in.SendTimeoutInSeconds, &out.SendTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.SessionAffinity != nil { + in, out := &in.SessionAffinity, &out.SessionAffinity + *out = new(string) + **out = **in + } + if in.SessionCookieMaxAge != nil { + in, out := &in.SessionCookieMaxAge, &out.SessionCookieMaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSettingsObservation. +func (in *IngressSettingsObservation) DeepCopy() *IngressSettingsObservation { + if in == nil { + return nil + } + out := new(IngressSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSettingsParameters) DeepCopyInto(out *IngressSettingsParameters) { + *out = *in + if in.BackendProtocol != nil { + in, out := &in.BackendProtocol, &out.BackendProtocol + *out = new(string) + **out = **in + } + if in.ReadTimeoutInSeconds != nil { + in, out := &in.ReadTimeoutInSeconds, &out.ReadTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.SendTimeoutInSeconds != nil { + in, out := &in.SendTimeoutInSeconds, &out.SendTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.SessionAffinity != nil { + in, out := &in.SessionAffinity, &out.SessionAffinity + *out = new(string) + **out = **in + } + if in.SessionCookieMaxAge != nil { + in, out := &in.SessionCookieMaxAge, &out.SessionCookieMaxAge + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSettingsParameters. +func (in *IngressSettingsParameters) DeepCopy() *IngressSettingsParameters { + if in == nil { + return nil + } + out := new(IngressSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchInitParameters) DeepCopyInto(out *LaunchInitParameters) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchInitParameters. +func (in *LaunchInitParameters) DeepCopy() *LaunchInitParameters { + if in == nil { + return nil + } + out := new(LaunchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchObservation) DeepCopyInto(out *LaunchObservation) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchObservation. +func (in *LaunchObservation) DeepCopy() *LaunchObservation { + if in == nil { + return nil + } + out := new(LaunchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchParameters) DeepCopyInto(out *LaunchParameters) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchParameters. +func (in *LaunchParameters) DeepCopy() *LaunchParameters { + if in == nil { + return nil + } + out := new(LaunchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResponseCachePerInstanceInitParameters) DeepCopyInto(out *LocalResponseCachePerInstanceInitParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResponseCachePerInstanceInitParameters. +func (in *LocalResponseCachePerInstanceInitParameters) DeepCopy() *LocalResponseCachePerInstanceInitParameters { + if in == nil { + return nil + } + out := new(LocalResponseCachePerInstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResponseCachePerInstanceObservation) DeepCopyInto(out *LocalResponseCachePerInstanceObservation) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResponseCachePerInstanceObservation. +func (in *LocalResponseCachePerInstanceObservation) DeepCopy() *LocalResponseCachePerInstanceObservation { + if in == nil { + return nil + } + out := new(LocalResponseCachePerInstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResponseCachePerInstanceParameters) DeepCopyInto(out *LocalResponseCachePerInstanceParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResponseCachePerInstanceParameters. +func (in *LocalResponseCachePerInstanceParameters) DeepCopy() *LocalResponseCachePerInstanceParameters { + if in == nil { + return nil + } + out := new(LocalResponseCachePerInstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResponseCachePerRouteInitParameters) DeepCopyInto(out *LocalResponseCachePerRouteInitParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResponseCachePerRouteInitParameters. +func (in *LocalResponseCachePerRouteInitParameters) DeepCopy() *LocalResponseCachePerRouteInitParameters { + if in == nil { + return nil + } + out := new(LocalResponseCachePerRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResponseCachePerRouteObservation) DeepCopyInto(out *LocalResponseCachePerRouteObservation) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResponseCachePerRouteObservation. +func (in *LocalResponseCachePerRouteObservation) DeepCopy() *LocalResponseCachePerRouteObservation { + if in == nil { + return nil + } + out := new(LocalResponseCachePerRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResponseCachePerRouteParameters) DeepCopyInto(out *LocalResponseCachePerRouteParameters) { + *out = *in + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResponseCachePerRouteParameters. +func (in *LocalResponseCachePerRouteParameters) DeepCopy() *LocalResponseCachePerRouteParameters { + if in == nil { + return nil + } + out := new(LocalResponseCachePerRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketplaceInitParameters) DeepCopyInto(out *MarketplaceInitParameters) { + *out = *in + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketplaceInitParameters. +func (in *MarketplaceInitParameters) DeepCopy() *MarketplaceInitParameters { + if in == nil { + return nil + } + out := new(MarketplaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketplaceObservation) DeepCopyInto(out *MarketplaceObservation) { + *out = *in + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketplaceObservation. +func (in *MarketplaceObservation) DeepCopy() *MarketplaceObservation { + if in == nil { + return nil + } + out := new(MarketplaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MarketplaceParameters) DeepCopyInto(out *MarketplaceParameters) { + *out = *in + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MarketplaceParameters. +func (in *MarketplaceParameters) DeepCopy() *MarketplaceParameters { + if in == nil { + return nil + } + out := new(MarketplaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInitParameters) DeepCopyInto(out *NetworkInitParameters) { + *out = *in + if in.AppNetworkResourceGroup != nil { + in, out := &in.AppNetworkResourceGroup, &out.AppNetworkResourceGroup + *out = new(string) + **out = **in + } + if in.AppSubnetID != nil { + in, out := &in.AppSubnetID, &out.AppSubnetID + *out = new(string) + **out = **in + } + if in.AppSubnetIDRef != nil { + in, out := &in.AppSubnetIDRef, &out.AppSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppSubnetIDSelector != nil { + in, out := &in.AppSubnetIDSelector, &out.AppSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CidrRanges != nil { + in, out := &in.CidrRanges, &out.CidrRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundType != nil { + in, out := &in.OutboundType, &out.OutboundType + *out = new(string) + **out = **in + } + if in.ReadTimeoutSeconds != nil { + in, out := &in.ReadTimeoutSeconds, &out.ReadTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.ServiceRuntimeNetworkResourceGroup != nil { + in, out := &in.ServiceRuntimeNetworkResourceGroup, &out.ServiceRuntimeNetworkResourceGroup + *out = new(string) + **out = **in + } + if in.ServiceRuntimeSubnetID != nil { + in, out := &in.ServiceRuntimeSubnetID, &out.ServiceRuntimeSubnetID + *out = new(string) + **out = **in + } + if in.ServiceRuntimeSubnetIDRef != nil { + in, out := &in.ServiceRuntimeSubnetIDRef, &out.ServiceRuntimeSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRuntimeSubnetIDSelector != nil { + in, out := &in.ServiceRuntimeSubnetIDSelector, &out.ServiceRuntimeSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInitParameters. +func (in *NetworkInitParameters) DeepCopy() *NetworkInitParameters { + if in == nil { + return nil + } + out := new(NetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkObservation) DeepCopyInto(out *NetworkObservation) { + *out = *in + if in.AppNetworkResourceGroup != nil { + in, out := &in.AppNetworkResourceGroup, &out.AppNetworkResourceGroup + *out = new(string) + **out = **in + } + if in.AppSubnetID != nil { + in, out := &in.AppSubnetID, &out.AppSubnetID + *out = new(string) + **out = **in + } + if in.CidrRanges != nil { + in, out := &in.CidrRanges, &out.CidrRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundType != nil { + in, out := &in.OutboundType, &out.OutboundType + *out = new(string) + **out = **in + } + if in.ReadTimeoutSeconds != nil { + in, out := &in.ReadTimeoutSeconds, &out.ReadTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.ServiceRuntimeNetworkResourceGroup != nil { + in, out := &in.ServiceRuntimeNetworkResourceGroup, &out.ServiceRuntimeNetworkResourceGroup + *out = new(string) + **out = **in + } + if in.ServiceRuntimeSubnetID != nil { + in, out := &in.ServiceRuntimeSubnetID, &out.ServiceRuntimeSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkObservation. +func (in *NetworkObservation) DeepCopy() *NetworkObservation { + if in == nil { + return nil + } + out := new(NetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkParameters) DeepCopyInto(out *NetworkParameters) { + *out = *in + if in.AppNetworkResourceGroup != nil { + in, out := &in.AppNetworkResourceGroup, &out.AppNetworkResourceGroup + *out = new(string) + **out = **in + } + if in.AppSubnetID != nil { + in, out := &in.AppSubnetID, &out.AppSubnetID + *out = new(string) + **out = **in + } + if in.AppSubnetIDRef != nil { + in, out := &in.AppSubnetIDRef, &out.AppSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppSubnetIDSelector != nil { + in, out := &in.AppSubnetIDSelector, &out.AppSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CidrRanges != nil { + in, out := &in.CidrRanges, &out.CidrRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundType != nil { + in, out := &in.OutboundType, &out.OutboundType + *out = new(string) + **out = **in + } + if in.ReadTimeoutSeconds != nil { + in, out := &in.ReadTimeoutSeconds, &out.ReadTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.ServiceRuntimeNetworkResourceGroup != nil { + in, out := &in.ServiceRuntimeNetworkResourceGroup, &out.ServiceRuntimeNetworkResourceGroup + *out = new(string) + **out = **in + } + if in.ServiceRuntimeSubnetID != nil { + in, out := &in.ServiceRuntimeSubnetID, &out.ServiceRuntimeSubnetID + *out = new(string) + **out = **in + } + if in.ServiceRuntimeSubnetIDRef != nil { + in, out := &in.ServiceRuntimeSubnetIDRef, &out.ServiceRuntimeSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceRuntimeSubnetIDSelector != nil { + in, out := &in.ServiceRuntimeSubnetIDSelector, &out.ServiceRuntimeSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParameters. +func (in *NetworkParameters) DeepCopy() *NetworkParameters { + if in == nil { + return nil + } + out := new(NetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentDiskInitParameters) DeepCopyInto(out *PersistentDiskInitParameters) { + *out = *in + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.SizeInGb != nil { + in, out := &in.SizeInGb, &out.SizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentDiskInitParameters. +func (in *PersistentDiskInitParameters) DeepCopy() *PersistentDiskInitParameters { + if in == nil { + return nil + } + out := new(PersistentDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentDiskObservation) DeepCopyInto(out *PersistentDiskObservation) { + *out = *in + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.SizeInGb != nil { + in, out := &in.SizeInGb, &out.SizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentDiskObservation. +func (in *PersistentDiskObservation) DeepCopy() *PersistentDiskObservation { + if in == nil { + return nil + } + out := new(PersistentDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentDiskParameters) DeepCopyInto(out *PersistentDiskParameters) { + *out = *in + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.SizeInGb != nil { + in, out := &in.SizeInGb, &out.SizeInGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentDiskParameters. +func (in *PersistentDiskParameters) DeepCopy() *PersistentDiskParameters { + if in == nil { + return nil + } + out := new(PersistentDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaInitParameters) DeepCopyInto(out *QuotaInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaInitParameters. +func (in *QuotaInitParameters) DeepCopy() *QuotaInitParameters { + if in == nil { + return nil + } + out := new(QuotaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaObservation) DeepCopyInto(out *QuotaObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaObservation. +func (in *QuotaObservation) DeepCopy() *QuotaObservation { + if in == nil { + return nil + } + out := new(QuotaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaParameters) DeepCopyInto(out *QuotaParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaParameters. +func (in *QuotaParameters) DeepCopy() *QuotaParameters { + if in == nil { + return nil + } + out := new(QuotaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryHTTPBasicAuthInitParameters) DeepCopyInto(out *RepositoryHTTPBasicAuthInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryHTTPBasicAuthInitParameters. +func (in *RepositoryHTTPBasicAuthInitParameters) DeepCopy() *RepositoryHTTPBasicAuthInitParameters { + if in == nil { + return nil + } + out := new(RepositoryHTTPBasicAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryHTTPBasicAuthObservation) DeepCopyInto(out *RepositoryHTTPBasicAuthObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryHTTPBasicAuthObservation. +func (in *RepositoryHTTPBasicAuthObservation) DeepCopy() *RepositoryHTTPBasicAuthObservation { + if in == nil { + return nil + } + out := new(RepositoryHTTPBasicAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryHTTPBasicAuthParameters) DeepCopyInto(out *RepositoryHTTPBasicAuthParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryHTTPBasicAuthParameters. +func (in *RepositoryHTTPBasicAuthParameters) DeepCopy() *RepositoryHTTPBasicAuthParameters { + if in == nil { + return nil + } + out := new(RepositoryHTTPBasicAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryInitParameters) DeepCopyInto(out *RepositoryInitParameters) { + *out = *in + if in.HTTPBasicAuth != nil { + in, out := &in.HTTPBasicAuth, &out.HTTPBasicAuth + *out = new(RepositoryHTTPBasicAuthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(RepositorySSHAuthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SearchPaths != nil { + in, out := &in.SearchPaths, &out.SearchPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryInitParameters. +func (in *RepositoryInitParameters) DeepCopy() *RepositoryInitParameters { + if in == nil { + return nil + } + out := new(RepositoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryObservation) DeepCopyInto(out *RepositoryObservation) { + *out = *in + if in.HTTPBasicAuth != nil { + in, out := &in.HTTPBasicAuth, &out.HTTPBasicAuth + *out = new(RepositoryHTTPBasicAuthObservation) + (*in).DeepCopyInto(*out) + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(RepositorySSHAuthObservation) + (*in).DeepCopyInto(*out) + } + if in.SearchPaths != nil { + in, out := &in.SearchPaths, &out.SearchPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryObservation. +func (in *RepositoryObservation) DeepCopy() *RepositoryObservation { + if in == nil { + return nil + } + out := new(RepositoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryParameters) DeepCopyInto(out *RepositoryParameters) { + *out = *in + if in.HTTPBasicAuth != nil { + in, out := &in.HTTPBasicAuth, &out.HTTPBasicAuth + *out = new(RepositoryHTTPBasicAuthParameters) + (*in).DeepCopyInto(*out) + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SSHAuth != nil { + in, out := &in.SSHAuth, &out.SSHAuth + *out = new(RepositorySSHAuthParameters) + (*in).DeepCopyInto(*out) + } + if in.SearchPaths != nil { + in, out := &in.SearchPaths, &out.SearchPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryParameters. +func (in *RepositoryParameters) DeepCopy() *RepositoryParameters { + if in == nil { + return nil + } + out := new(RepositoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositorySSHAuthInitParameters) DeepCopyInto(out *RepositorySSHAuthInitParameters) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } + if in.StrictHostKeyCheckingEnabled != nil { + in, out := &in.StrictHostKeyCheckingEnabled, &out.StrictHostKeyCheckingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositorySSHAuthInitParameters. +func (in *RepositorySSHAuthInitParameters) DeepCopy() *RepositorySSHAuthInitParameters { + if in == nil { + return nil + } + out := new(RepositorySSHAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositorySSHAuthObservation) DeepCopyInto(out *RepositorySSHAuthObservation) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } + if in.StrictHostKeyCheckingEnabled != nil { + in, out := &in.StrictHostKeyCheckingEnabled, &out.StrictHostKeyCheckingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositorySSHAuthObservation. +func (in *RepositorySSHAuthObservation) DeepCopy() *RepositorySSHAuthObservation { + if in == nil { + return nil + } + out := new(RepositorySSHAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositorySSHAuthParameters) DeepCopyInto(out *RepositorySSHAuthParameters) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } + if in.HostKeySecretRef != nil { + in, out := &in.HostKeySecretRef, &out.HostKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + out.PrivateKeySecretRef = in.PrivateKeySecretRef + if in.StrictHostKeyCheckingEnabled != nil { + in, out := &in.StrictHostKeyCheckingEnabled, &out.StrictHostKeyCheckingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositorySSHAuthParameters. +func (in *RepositorySSHAuthParameters) DeepCopy() *RepositorySSHAuthParameters { + if in == nil { + return nil + } + out := new(RepositorySSHAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredNetworkTrafficRulesInitParameters) DeepCopyInto(out *RequiredNetworkTrafficRulesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredNetworkTrafficRulesInitParameters. +func (in *RequiredNetworkTrafficRulesInitParameters) DeepCopy() *RequiredNetworkTrafficRulesInitParameters { + if in == nil { + return nil + } + out := new(RequiredNetworkTrafficRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredNetworkTrafficRulesObservation) DeepCopyInto(out *RequiredNetworkTrafficRulesObservation) { + *out = *in + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredNetworkTrafficRulesObservation. +func (in *RequiredNetworkTrafficRulesObservation) DeepCopy() *RequiredNetworkTrafficRulesObservation { + if in == nil { + return nil + } + out := new(RequiredNetworkTrafficRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredNetworkTrafficRulesParameters) DeepCopyInto(out *RequiredNetworkTrafficRulesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredNetworkTrafficRulesParameters. +func (in *RequiredNetworkTrafficRulesParameters) DeepCopy() *RequiredNetworkTrafficRulesParameters { + if in == nil { + return nil + } + out := new(RequiredNetworkTrafficRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHAuthInitParameters) DeepCopyInto(out *SSHAuthInitParameters) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHAuthInitParameters. +func (in *SSHAuthInitParameters) DeepCopy() *SSHAuthInitParameters { + if in == nil { + return nil + } + out := new(SSHAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHAuthObservation) DeepCopyInto(out *SSHAuthObservation) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHAuthObservation. +func (in *SSHAuthObservation) DeepCopy() *SSHAuthObservation { + if in == nil { + return nil + } + out := new(SSHAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHAuthParameters) DeepCopyInto(out *SSHAuthParameters) { + *out = *in + if in.HostKeyAlgorithm != nil { + in, out := &in.HostKeyAlgorithm, &out.HostKeyAlgorithm + *out = new(string) + **out = **in + } + if in.HostKeySecretRef != nil { + in, out := &in.HostKeySecretRef, &out.HostKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + out.PrivateKeySecretRef = in.PrivateKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHAuthParameters. +func (in *SSHAuthParameters) DeepCopy() *SSHAuthParameters { + if in == nil { + return nil + } + out := new(SSHAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAPIPortal) DeepCopyInto(out *SpringCloudAPIPortal) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAPIPortal. +func (in *SpringCloudAPIPortal) DeepCopy() *SpringCloudAPIPortal { + if in == nil { + return nil + } + out := new(SpringCloudAPIPortal) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudAPIPortal) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAPIPortalInitParameters) DeepCopyInto(out *SpringCloudAPIPortalInitParameters) { + *out = *in + if in.APITryOutEnabled != nil { + in, out := &in.APITryOutEnabled, &out.APITryOutEnabled + *out = new(bool) + **out = **in + } + if in.GatewayIds != nil { + in, out := &in.GatewayIds, &out.GatewayIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.GatewayIdsRefs != nil { + in, out := &in.GatewayIdsRefs, &out.GatewayIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GatewayIdsSelector != nil { + in, out := &in.GatewayIdsSelector, &out.GatewayIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HTTPSOnlyEnabled != nil { + in, out := &in.HTTPSOnlyEnabled, &out.HTTPSOnlyEnabled + *out = new(bool) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SsoInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAPIPortalInitParameters. +func (in *SpringCloudAPIPortalInitParameters) DeepCopy() *SpringCloudAPIPortalInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudAPIPortalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAPIPortalList) DeepCopyInto(out *SpringCloudAPIPortalList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudAPIPortal, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAPIPortalList. +func (in *SpringCloudAPIPortalList) DeepCopy() *SpringCloudAPIPortalList { + if in == nil { + return nil + } + out := new(SpringCloudAPIPortalList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudAPIPortalList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAPIPortalObservation) DeepCopyInto(out *SpringCloudAPIPortalObservation) { + *out = *in + if in.APITryOutEnabled != nil { + in, out := &in.APITryOutEnabled, &out.APITryOutEnabled + *out = new(bool) + **out = **in + } + if in.GatewayIds != nil { + in, out := &in.GatewayIds, &out.GatewayIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HTTPSOnlyEnabled != nil { + in, out := &in.HTTPSOnlyEnabled, &out.HTTPSOnlyEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SsoObservation) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAPIPortalObservation. +func (in *SpringCloudAPIPortalObservation) DeepCopy() *SpringCloudAPIPortalObservation { + if in == nil { + return nil + } + out := new(SpringCloudAPIPortalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAPIPortalParameters) DeepCopyInto(out *SpringCloudAPIPortalParameters) { + *out = *in + if in.APITryOutEnabled != nil { + in, out := &in.APITryOutEnabled, &out.APITryOutEnabled + *out = new(bool) + **out = **in + } + if in.GatewayIds != nil { + in, out := &in.GatewayIds, &out.GatewayIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.GatewayIdsRefs != nil { + in, out := &in.GatewayIdsRefs, &out.GatewayIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GatewayIdsSelector != nil { + in, out := &in.GatewayIdsSelector, &out.GatewayIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HTTPSOnlyEnabled != nil { + in, out := &in.HTTPSOnlyEnabled, &out.HTTPSOnlyEnabled + *out = new(bool) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.SpringCloudServiceIDRef != nil { + in, out := &in.SpringCloudServiceIDRef, &out.SpringCloudServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudServiceIDSelector != nil { + in, out := &in.SpringCloudServiceIDSelector, &out.SpringCloudServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SsoParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAPIPortalParameters. +func (in *SpringCloudAPIPortalParameters) DeepCopy() *SpringCloudAPIPortalParameters { + if in == nil { + return nil + } + out := new(SpringCloudAPIPortalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAPIPortalSpec) DeepCopyInto(out *SpringCloudAPIPortalSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAPIPortalSpec. +func (in *SpringCloudAPIPortalSpec) DeepCopy() *SpringCloudAPIPortalSpec { + if in == nil { + return nil + } + out := new(SpringCloudAPIPortalSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAPIPortalStatus) DeepCopyInto(out *SpringCloudAPIPortalStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAPIPortalStatus. +func (in *SpringCloudAPIPortalStatus) DeepCopy() *SpringCloudAPIPortalStatus { + if in == nil { + return nil + } + out := new(SpringCloudAPIPortalStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudApp) DeepCopyInto(out *SpringCloudApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudApp. +func (in *SpringCloudApp) DeepCopy() *SpringCloudApp { + if in == nil { + return nil + } + out := new(SpringCloudApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAppInitParameters) DeepCopyInto(out *SpringCloudAppInitParameters) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.CustomPersistentDisk != nil { + in, out := &in.CustomPersistentDisk, &out.CustomPersistentDisk + *out = make([]CustomPersistentDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IngressSettings != nil { + in, out := &in.IngressSettings, &out.IngressSettings + *out = new(IngressSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IsPublic != nil { + in, out := &in.IsPublic, &out.IsPublic + *out = new(bool) + **out = **in + } + if in.PersistentDisk != nil { + in, out := &in.PersistentDisk, &out.PersistentDisk + *out = new(PersistentDiskInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicEndpointEnabled != nil { + in, out := &in.PublicEndpointEnabled, &out.PublicEndpointEnabled + *out = new(bool) + **out = **in + } + if in.TLSEnabled != nil { + in, out := &in.TLSEnabled, &out.TLSEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAppInitParameters. +func (in *SpringCloudAppInitParameters) DeepCopy() *SpringCloudAppInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudAppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAppList) DeepCopyInto(out *SpringCloudAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAppList. +func (in *SpringCloudAppList) DeepCopy() *SpringCloudAppList { + if in == nil { + return nil + } + out := new(SpringCloudAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAppObservation) DeepCopyInto(out *SpringCloudAppObservation) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.CustomPersistentDisk != nil { + in, out := &in.CustomPersistentDisk, &out.CustomPersistentDisk + *out = make([]CustomPersistentDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.IngressSettings != nil { + in, out := &in.IngressSettings, &out.IngressSettings + *out = new(IngressSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.IsPublic != nil { + in, out := &in.IsPublic, &out.IsPublic + *out = new(bool) + **out = **in + } + if in.PersistentDisk != nil { + in, out := &in.PersistentDisk, &out.PersistentDisk + *out = new(PersistentDiskObservation) + (*in).DeepCopyInto(*out) + } + if in.PublicEndpointEnabled != nil { + in, out := &in.PublicEndpointEnabled, &out.PublicEndpointEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.TLSEnabled != nil { + in, out := &in.TLSEnabled, &out.TLSEnabled + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAppObservation. +func (in *SpringCloudAppObservation) DeepCopy() *SpringCloudAppObservation { + if in == nil { + return nil + } + out := new(SpringCloudAppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAppParameters) DeepCopyInto(out *SpringCloudAppParameters) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.CustomPersistentDisk != nil { + in, out := &in.CustomPersistentDisk, &out.CustomPersistentDisk + *out = make([]CustomPersistentDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.IngressSettings != nil { + in, out := &in.IngressSettings, &out.IngressSettings + *out = new(IngressSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.IsPublic != nil { + in, out := &in.IsPublic, &out.IsPublic + *out = new(bool) + **out = **in + } + if in.PersistentDisk != nil { + in, out := &in.PersistentDisk, &out.PersistentDisk + *out = new(PersistentDiskParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicEndpointEnabled != nil { + in, out := &in.PublicEndpointEnabled, &out.PublicEndpointEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.ServiceNameRef != nil { + in, out := &in.ServiceNameRef, &out.ServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceNameSelector != nil { + in, out := &in.ServiceNameSelector, &out.ServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TLSEnabled != nil { + in, out := &in.TLSEnabled, &out.TLSEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAppParameters. +func (in *SpringCloudAppParameters) DeepCopy() *SpringCloudAppParameters { + if in == nil { + return nil + } + out := new(SpringCloudAppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAppSpec) DeepCopyInto(out *SpringCloudAppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAppSpec. +func (in *SpringCloudAppSpec) DeepCopy() *SpringCloudAppSpec { + if in == nil { + return nil + } + out := new(SpringCloudAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudAppStatus) DeepCopyInto(out *SpringCloudAppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudAppStatus. +func (in *SpringCloudAppStatus) DeepCopy() *SpringCloudAppStatus { + if in == nil { + return nil + } + out := new(SpringCloudAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildDeployment) DeepCopyInto(out *SpringCloudBuildDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildDeployment. +func (in *SpringCloudBuildDeployment) DeepCopy() *SpringCloudBuildDeployment { + if in == nil { + return nil + } + out := new(SpringCloudBuildDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudBuildDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildDeploymentInitParameters) DeepCopyInto(out *SpringCloudBuildDeploymentInitParameters) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BuildResultID != nil { + in, out := &in.BuildResultID, &out.BuildResultID + *out = new(string) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(QuotaInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildDeploymentInitParameters. +func (in *SpringCloudBuildDeploymentInitParameters) DeepCopy() *SpringCloudBuildDeploymentInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudBuildDeploymentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildDeploymentList) DeepCopyInto(out *SpringCloudBuildDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudBuildDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildDeploymentList. +func (in *SpringCloudBuildDeploymentList) DeepCopy() *SpringCloudBuildDeploymentList { + if in == nil { + return nil + } + out := new(SpringCloudBuildDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudBuildDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildDeploymentObservation) DeepCopyInto(out *SpringCloudBuildDeploymentObservation) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BuildResultID != nil { + in, out := &in.BuildResultID, &out.BuildResultID + *out = new(string) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(QuotaObservation) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudAppID != nil { + in, out := &in.SpringCloudAppID, &out.SpringCloudAppID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildDeploymentObservation. +func (in *SpringCloudBuildDeploymentObservation) DeepCopy() *SpringCloudBuildDeploymentObservation { + if in == nil { + return nil + } + out := new(SpringCloudBuildDeploymentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildDeploymentParameters) DeepCopyInto(out *SpringCloudBuildDeploymentParameters) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BuildResultID != nil { + in, out := &in.BuildResultID, &out.BuildResultID + *out = new(string) + **out = **in + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(QuotaParameters) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudAppID != nil { + in, out := &in.SpringCloudAppID, &out.SpringCloudAppID + *out = new(string) + **out = **in + } + if in.SpringCloudAppIDRef != nil { + in, out := &in.SpringCloudAppIDRef, &out.SpringCloudAppIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudAppIDSelector != nil { + in, out := &in.SpringCloudAppIDSelector, &out.SpringCloudAppIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildDeploymentParameters. +func (in *SpringCloudBuildDeploymentParameters) DeepCopy() *SpringCloudBuildDeploymentParameters { + if in == nil { + return nil + } + out := new(SpringCloudBuildDeploymentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildDeploymentSpec) DeepCopyInto(out *SpringCloudBuildDeploymentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildDeploymentSpec. +func (in *SpringCloudBuildDeploymentSpec) DeepCopy() *SpringCloudBuildDeploymentSpec { + if in == nil { + return nil + } + out := new(SpringCloudBuildDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildDeploymentStatus) DeepCopyInto(out *SpringCloudBuildDeploymentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildDeploymentStatus. +func (in *SpringCloudBuildDeploymentStatus) DeepCopy() *SpringCloudBuildDeploymentStatus { + if in == nil { + return nil + } + out := new(SpringCloudBuildDeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildPackBinding) DeepCopyInto(out *SpringCloudBuildPackBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildPackBinding. +func (in *SpringCloudBuildPackBinding) DeepCopy() *SpringCloudBuildPackBinding { + if in == nil { + return nil + } + out := new(SpringCloudBuildPackBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudBuildPackBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildPackBindingInitParameters) DeepCopyInto(out *SpringCloudBuildPackBindingInitParameters) { + *out = *in + if in.BindingType != nil { + in, out := &in.BindingType, &out.BindingType + *out = new(string) + **out = **in + } + if in.Launch != nil { + in, out := &in.Launch, &out.Launch + *out = new(LaunchInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildPackBindingInitParameters. +func (in *SpringCloudBuildPackBindingInitParameters) DeepCopy() *SpringCloudBuildPackBindingInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudBuildPackBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildPackBindingList) DeepCopyInto(out *SpringCloudBuildPackBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudBuildPackBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildPackBindingList. +func (in *SpringCloudBuildPackBindingList) DeepCopy() *SpringCloudBuildPackBindingList { + if in == nil { + return nil + } + out := new(SpringCloudBuildPackBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudBuildPackBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildPackBindingObservation) DeepCopyInto(out *SpringCloudBuildPackBindingObservation) { + *out = *in + if in.BindingType != nil { + in, out := &in.BindingType, &out.BindingType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Launch != nil { + in, out := &in.Launch, &out.Launch + *out = new(LaunchObservation) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudBuilderID != nil { + in, out := &in.SpringCloudBuilderID, &out.SpringCloudBuilderID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildPackBindingObservation. +func (in *SpringCloudBuildPackBindingObservation) DeepCopy() *SpringCloudBuildPackBindingObservation { + if in == nil { + return nil + } + out := new(SpringCloudBuildPackBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildPackBindingParameters) DeepCopyInto(out *SpringCloudBuildPackBindingParameters) { + *out = *in + if in.BindingType != nil { + in, out := &in.BindingType, &out.BindingType + *out = new(string) + **out = **in + } + if in.Launch != nil { + in, out := &in.Launch, &out.Launch + *out = new(LaunchParameters) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudBuilderID != nil { + in, out := &in.SpringCloudBuilderID, &out.SpringCloudBuilderID + *out = new(string) + **out = **in + } + if in.SpringCloudBuilderIDRef != nil { + in, out := &in.SpringCloudBuilderIDRef, &out.SpringCloudBuilderIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudBuilderIDSelector != nil { + in, out := &in.SpringCloudBuilderIDSelector, &out.SpringCloudBuilderIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildPackBindingParameters. +func (in *SpringCloudBuildPackBindingParameters) DeepCopy() *SpringCloudBuildPackBindingParameters { + if in == nil { + return nil + } + out := new(SpringCloudBuildPackBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildPackBindingSpec) DeepCopyInto(out *SpringCloudBuildPackBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildPackBindingSpec. +func (in *SpringCloudBuildPackBindingSpec) DeepCopy() *SpringCloudBuildPackBindingSpec { + if in == nil { + return nil + } + out := new(SpringCloudBuildPackBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuildPackBindingStatus) DeepCopyInto(out *SpringCloudBuildPackBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuildPackBindingStatus. +func (in *SpringCloudBuildPackBindingStatus) DeepCopy() *SpringCloudBuildPackBindingStatus { + if in == nil { + return nil + } + out := new(SpringCloudBuildPackBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuilder) DeepCopyInto(out *SpringCloudBuilder) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuilder. +func (in *SpringCloudBuilder) DeepCopy() *SpringCloudBuilder { + if in == nil { + return nil + } + out := new(SpringCloudBuilder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudBuilder) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuilderInitParameters) DeepCopyInto(out *SpringCloudBuilderInitParameters) { + *out = *in + if in.BuildPackGroup != nil { + in, out := &in.BuildPackGroup, &out.BuildPackGroup + *out = make([]BuildPackGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.SpringCloudServiceIDRef != nil { + in, out := &in.SpringCloudServiceIDRef, &out.SpringCloudServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudServiceIDSelector != nil { + in, out := &in.SpringCloudServiceIDSelector, &out.SpringCloudServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Stack != nil { + in, out := &in.Stack, &out.Stack + *out = new(StackInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuilderInitParameters. +func (in *SpringCloudBuilderInitParameters) DeepCopy() *SpringCloudBuilderInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudBuilderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuilderList) DeepCopyInto(out *SpringCloudBuilderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudBuilder, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuilderList. +func (in *SpringCloudBuilderList) DeepCopy() *SpringCloudBuilderList { + if in == nil { + return nil + } + out := new(SpringCloudBuilderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudBuilderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuilderObservation) DeepCopyInto(out *SpringCloudBuilderObservation) { + *out = *in + if in.BuildPackGroup != nil { + in, out := &in.BuildPackGroup, &out.BuildPackGroup + *out = make([]BuildPackGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.Stack != nil { + in, out := &in.Stack, &out.Stack + *out = new(StackObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuilderObservation. +func (in *SpringCloudBuilderObservation) DeepCopy() *SpringCloudBuilderObservation { + if in == nil { + return nil + } + out := new(SpringCloudBuilderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuilderParameters) DeepCopyInto(out *SpringCloudBuilderParameters) { + *out = *in + if in.BuildPackGroup != nil { + in, out := &in.BuildPackGroup, &out.BuildPackGroup + *out = make([]BuildPackGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.SpringCloudServiceIDRef != nil { + in, out := &in.SpringCloudServiceIDRef, &out.SpringCloudServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudServiceIDSelector != nil { + in, out := &in.SpringCloudServiceIDSelector, &out.SpringCloudServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Stack != nil { + in, out := &in.Stack, &out.Stack + *out = new(StackParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuilderParameters. +func (in *SpringCloudBuilderParameters) DeepCopy() *SpringCloudBuilderParameters { + if in == nil { + return nil + } + out := new(SpringCloudBuilderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuilderSpec) DeepCopyInto(out *SpringCloudBuilderSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuilderSpec. +func (in *SpringCloudBuilderSpec) DeepCopy() *SpringCloudBuilderSpec { + if in == nil { + return nil + } + out := new(SpringCloudBuilderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudBuilderStatus) DeepCopyInto(out *SpringCloudBuilderStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudBuilderStatus. +func (in *SpringCloudBuilderStatus) DeepCopy() *SpringCloudBuilderStatus { + if in == nil { + return nil + } + out := new(SpringCloudBuilderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeployment) DeepCopyInto(out *SpringCloudContainerDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeployment. +func (in *SpringCloudContainerDeployment) DeepCopy() *SpringCloudContainerDeployment { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudContainerDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentInitParameters) DeepCopyInto(out *SpringCloudContainerDeploymentInitParameters) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LanguageFramework != nil { + in, out := &in.LanguageFramework, &out.LanguageFramework + *out = new(string) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudContainerDeploymentQuotaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentInitParameters. +func (in *SpringCloudContainerDeploymentInitParameters) DeepCopy() *SpringCloudContainerDeploymentInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentList) DeepCopyInto(out *SpringCloudContainerDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudContainerDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentList. +func (in *SpringCloudContainerDeploymentList) DeepCopy() *SpringCloudContainerDeploymentList { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudContainerDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentObservation) DeepCopyInto(out *SpringCloudContainerDeploymentObservation) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LanguageFramework != nil { + in, out := &in.LanguageFramework, &out.LanguageFramework + *out = new(string) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudContainerDeploymentQuotaObservation) + (*in).DeepCopyInto(*out) + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.SpringCloudAppID != nil { + in, out := &in.SpringCloudAppID, &out.SpringCloudAppID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentObservation. +func (in *SpringCloudContainerDeploymentObservation) DeepCopy() *SpringCloudContainerDeploymentObservation { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentParameters) DeepCopyInto(out *SpringCloudContainerDeploymentParameters) { + *out = *in + if in.AddonJSON != nil { + in, out := &in.AddonJSON, &out.AddonJSON + *out = new(string) + **out = **in + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Arguments != nil { + in, out := &in.Arguments, &out.Arguments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Commands != nil { + in, out := &in.Commands, &out.Commands + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LanguageFramework != nil { + in, out := &in.LanguageFramework, &out.LanguageFramework + *out = new(string) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudContainerDeploymentQuotaParameters) + (*in).DeepCopyInto(*out) + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.SpringCloudAppID != nil { + in, out := &in.SpringCloudAppID, &out.SpringCloudAppID + *out = new(string) + **out = **in + } + if in.SpringCloudAppIDRef != nil { + in, out := &in.SpringCloudAppIDRef, &out.SpringCloudAppIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudAppIDSelector != nil { + in, out := &in.SpringCloudAppIDSelector, &out.SpringCloudAppIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentParameters. +func (in *SpringCloudContainerDeploymentParameters) DeepCopy() *SpringCloudContainerDeploymentParameters { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentQuotaInitParameters) DeepCopyInto(out *SpringCloudContainerDeploymentQuotaInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentQuotaInitParameters. +func (in *SpringCloudContainerDeploymentQuotaInitParameters) DeepCopy() *SpringCloudContainerDeploymentQuotaInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentQuotaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentQuotaObservation) DeepCopyInto(out *SpringCloudContainerDeploymentQuotaObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentQuotaObservation. +func (in *SpringCloudContainerDeploymentQuotaObservation) DeepCopy() *SpringCloudContainerDeploymentQuotaObservation { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentQuotaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentQuotaParameters) DeepCopyInto(out *SpringCloudContainerDeploymentQuotaParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentQuotaParameters. +func (in *SpringCloudContainerDeploymentQuotaParameters) DeepCopy() *SpringCloudContainerDeploymentQuotaParameters { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentQuotaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentSpec) DeepCopyInto(out *SpringCloudContainerDeploymentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentSpec. +func (in *SpringCloudContainerDeploymentSpec) DeepCopy() *SpringCloudContainerDeploymentSpec { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudContainerDeploymentStatus) DeepCopyInto(out *SpringCloudContainerDeploymentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudContainerDeploymentStatus. +func (in *SpringCloudContainerDeploymentStatus) DeepCopy() *SpringCloudContainerDeploymentStatus { + if in == nil { + return nil + } + out := new(SpringCloudContainerDeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudCustomizedAccelerator) DeepCopyInto(out *SpringCloudCustomizedAccelerator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudCustomizedAccelerator. +func (in *SpringCloudCustomizedAccelerator) DeepCopy() *SpringCloudCustomizedAccelerator { + if in == nil { + return nil + } + out := new(SpringCloudCustomizedAccelerator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudCustomizedAccelerator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudCustomizedAcceleratorInitParameters) DeepCopyInto(out *SpringCloudCustomizedAcceleratorInitParameters) { + *out = *in + if in.AcceleratorTags != nil { + in, out := &in.AcceleratorTags, &out.AcceleratorTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.GitRepository != nil { + in, out := &in.GitRepository, &out.GitRepository + *out = new(GitRepositoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IconURL != nil { + in, out := &in.IconURL, &out.IconURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudCustomizedAcceleratorInitParameters. +func (in *SpringCloudCustomizedAcceleratorInitParameters) DeepCopy() *SpringCloudCustomizedAcceleratorInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudCustomizedAcceleratorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudCustomizedAcceleratorList) DeepCopyInto(out *SpringCloudCustomizedAcceleratorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudCustomizedAccelerator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudCustomizedAcceleratorList. +func (in *SpringCloudCustomizedAcceleratorList) DeepCopy() *SpringCloudCustomizedAcceleratorList { + if in == nil { + return nil + } + out := new(SpringCloudCustomizedAcceleratorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudCustomizedAcceleratorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudCustomizedAcceleratorObservation) DeepCopyInto(out *SpringCloudCustomizedAcceleratorObservation) { + *out = *in + if in.AcceleratorTags != nil { + in, out := &in.AcceleratorTags, &out.AcceleratorTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.GitRepository != nil { + in, out := &in.GitRepository, &out.GitRepository + *out = new(GitRepositoryObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IconURL != nil { + in, out := &in.IconURL, &out.IconURL + *out = new(string) + **out = **in + } + if in.SpringCloudAcceleratorID != nil { + in, out := &in.SpringCloudAcceleratorID, &out.SpringCloudAcceleratorID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudCustomizedAcceleratorObservation. +func (in *SpringCloudCustomizedAcceleratorObservation) DeepCopy() *SpringCloudCustomizedAcceleratorObservation { + if in == nil { + return nil + } + out := new(SpringCloudCustomizedAcceleratorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudCustomizedAcceleratorParameters) DeepCopyInto(out *SpringCloudCustomizedAcceleratorParameters) { + *out = *in + if in.AcceleratorTags != nil { + in, out := &in.AcceleratorTags, &out.AcceleratorTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.GitRepository != nil { + in, out := &in.GitRepository, &out.GitRepository + *out = new(GitRepositoryParameters) + (*in).DeepCopyInto(*out) + } + if in.IconURL != nil { + in, out := &in.IconURL, &out.IconURL + *out = new(string) + **out = **in + } + if in.SpringCloudAcceleratorID != nil { + in, out := &in.SpringCloudAcceleratorID, &out.SpringCloudAcceleratorID + *out = new(string) + **out = **in + } + if in.SpringCloudAcceleratorIDRef != nil { + in, out := &in.SpringCloudAcceleratorIDRef, &out.SpringCloudAcceleratorIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudAcceleratorIDSelector != nil { + in, out := &in.SpringCloudAcceleratorIDSelector, &out.SpringCloudAcceleratorIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudCustomizedAcceleratorParameters. +func (in *SpringCloudCustomizedAcceleratorParameters) DeepCopy() *SpringCloudCustomizedAcceleratorParameters { + if in == nil { + return nil + } + out := new(SpringCloudCustomizedAcceleratorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudCustomizedAcceleratorSpec) DeepCopyInto(out *SpringCloudCustomizedAcceleratorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudCustomizedAcceleratorSpec. +func (in *SpringCloudCustomizedAcceleratorSpec) DeepCopy() *SpringCloudCustomizedAcceleratorSpec { + if in == nil { + return nil + } + out := new(SpringCloudCustomizedAcceleratorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudCustomizedAcceleratorStatus) DeepCopyInto(out *SpringCloudCustomizedAcceleratorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudCustomizedAcceleratorStatus. +func (in *SpringCloudCustomizedAcceleratorStatus) DeepCopy() *SpringCloudCustomizedAcceleratorStatus { + if in == nil { + return nil + } + out := new(SpringCloudCustomizedAcceleratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortal) DeepCopyInto(out *SpringCloudDevToolPortal) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortal. +func (in *SpringCloudDevToolPortal) DeepCopy() *SpringCloudDevToolPortal { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortal) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudDevToolPortal) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalInitParameters) DeepCopyInto(out *SpringCloudDevToolPortalInitParameters) { + *out = *in + if in.ApplicationAcceleratorEnabled != nil { + in, out := &in.ApplicationAcceleratorEnabled, &out.ApplicationAcceleratorEnabled + *out = new(bool) + **out = **in + } + if in.ApplicationLiveViewEnabled != nil { + in, out := &in.ApplicationLiveViewEnabled, &out.ApplicationLiveViewEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.SpringCloudServiceIDRef != nil { + in, out := &in.SpringCloudServiceIDRef, &out.SpringCloudServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudServiceIDSelector != nil { + in, out := &in.SpringCloudServiceIDSelector, &out.SpringCloudServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SpringCloudDevToolPortalSsoInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalInitParameters. +func (in *SpringCloudDevToolPortalInitParameters) DeepCopy() *SpringCloudDevToolPortalInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalList) DeepCopyInto(out *SpringCloudDevToolPortalList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudDevToolPortal, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalList. +func (in *SpringCloudDevToolPortalList) DeepCopy() *SpringCloudDevToolPortalList { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudDevToolPortalList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalObservation) DeepCopyInto(out *SpringCloudDevToolPortalObservation) { + *out = *in + if in.ApplicationAcceleratorEnabled != nil { + in, out := &in.ApplicationAcceleratorEnabled, &out.ApplicationAcceleratorEnabled + *out = new(bool) + **out = **in + } + if in.ApplicationLiveViewEnabled != nil { + in, out := &in.ApplicationLiveViewEnabled, &out.ApplicationLiveViewEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SpringCloudDevToolPortalSsoObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalObservation. +func (in *SpringCloudDevToolPortalObservation) DeepCopy() *SpringCloudDevToolPortalObservation { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalParameters) DeepCopyInto(out *SpringCloudDevToolPortalParameters) { + *out = *in + if in.ApplicationAcceleratorEnabled != nil { + in, out := &in.ApplicationAcceleratorEnabled, &out.ApplicationAcceleratorEnabled + *out = new(bool) + **out = **in + } + if in.ApplicationLiveViewEnabled != nil { + in, out := &in.ApplicationLiveViewEnabled, &out.ApplicationLiveViewEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.SpringCloudServiceIDRef != nil { + in, out := &in.SpringCloudServiceIDRef, &out.SpringCloudServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudServiceIDSelector != nil { + in, out := &in.SpringCloudServiceIDSelector, &out.SpringCloudServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SpringCloudDevToolPortalSsoParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalParameters. +func (in *SpringCloudDevToolPortalParameters) DeepCopy() *SpringCloudDevToolPortalParameters { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalSpec) DeepCopyInto(out *SpringCloudDevToolPortalSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalSpec. +func (in *SpringCloudDevToolPortalSpec) DeepCopy() *SpringCloudDevToolPortalSpec { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalSsoInitParameters) DeepCopyInto(out *SpringCloudDevToolPortalSsoInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.MetadataURL != nil { + in, out := &in.MetadataURL, &out.MetadataURL + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalSsoInitParameters. +func (in *SpringCloudDevToolPortalSsoInitParameters) DeepCopy() *SpringCloudDevToolPortalSsoInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalSsoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalSsoObservation) DeepCopyInto(out *SpringCloudDevToolPortalSsoObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.MetadataURL != nil { + in, out := &in.MetadataURL, &out.MetadataURL + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalSsoObservation. +func (in *SpringCloudDevToolPortalSsoObservation) DeepCopy() *SpringCloudDevToolPortalSsoObservation { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalSsoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalSsoParameters) DeepCopyInto(out *SpringCloudDevToolPortalSsoParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.MetadataURL != nil { + in, out := &in.MetadataURL, &out.MetadataURL + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalSsoParameters. +func (in *SpringCloudDevToolPortalSsoParameters) DeepCopy() *SpringCloudDevToolPortalSsoParameters { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalSsoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudDevToolPortalStatus) DeepCopyInto(out *SpringCloudDevToolPortalStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudDevToolPortalStatus. +func (in *SpringCloudDevToolPortalStatus) DeepCopy() *SpringCloudDevToolPortalStatus { + if in == nil { + return nil + } + out := new(SpringCloudDevToolPortalStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGateway) DeepCopyInto(out *SpringCloudGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGateway. +func (in *SpringCloudGateway) DeepCopy() *SpringCloudGateway { + if in == nil { + return nil + } + out := new(SpringCloudGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewayInitParameters) DeepCopyInto(out *SpringCloudGatewayInitParameters) { + *out = *in + if in.APIMetadata != nil { + in, out := &in.APIMetadata, &out.APIMetadata + *out = new(APIMetadataInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationPerformanceMonitoringTypes != nil { + in, out := &in.ApplicationPerformanceMonitoringTypes, &out.ApplicationPerformanceMonitoringTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientAuthorization != nil { + in, out := &in.ClientAuthorization, &out.ClientAuthorization + *out = new(ClientAuthorizationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LocalResponseCachePerInstance != nil { + in, out := &in.LocalResponseCachePerInstance, &out.LocalResponseCachePerInstance + *out = new(LocalResponseCachePerInstanceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalResponseCachePerRoute != nil { + in, out := &in.LocalResponseCachePerRoute, &out.LocalResponseCachePerRoute + *out = new(LocalResponseCachePerRouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudGatewayQuotaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SpringCloudGatewaySsoInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewayInitParameters. +func (in *SpringCloudGatewayInitParameters) DeepCopy() *SpringCloudGatewayInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewayList) DeepCopyInto(out *SpringCloudGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudGateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewayList. +func (in *SpringCloudGatewayList) DeepCopy() *SpringCloudGatewayList { + if in == nil { + return nil + } + out := new(SpringCloudGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewayObservation) DeepCopyInto(out *SpringCloudGatewayObservation) { + *out = *in + if in.APIMetadata != nil { + in, out := &in.APIMetadata, &out.APIMetadata + *out = new(APIMetadataObservation) + (*in).DeepCopyInto(*out) + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationPerformanceMonitoringTypes != nil { + in, out := &in.ApplicationPerformanceMonitoringTypes, &out.ApplicationPerformanceMonitoringTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientAuthorization != nil { + in, out := &in.ClientAuthorization, &out.ClientAuthorization + *out = new(ClientAuthorizationObservation) + (*in).DeepCopyInto(*out) + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsObservation) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LocalResponseCachePerInstance != nil { + in, out := &in.LocalResponseCachePerInstance, &out.LocalResponseCachePerInstance + *out = new(LocalResponseCachePerInstanceObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalResponseCachePerRoute != nil { + in, out := &in.LocalResponseCachePerRoute, &out.LocalResponseCachePerRoute + *out = new(LocalResponseCachePerRouteObservation) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudGatewayQuotaObservation) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SpringCloudGatewaySsoObservation) + (*in).DeepCopyInto(*out) + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewayObservation. +func (in *SpringCloudGatewayObservation) DeepCopy() *SpringCloudGatewayObservation { + if in == nil { + return nil + } + out := new(SpringCloudGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewayParameters) DeepCopyInto(out *SpringCloudGatewayParameters) { + *out = *in + if in.APIMetadata != nil { + in, out := &in.APIMetadata, &out.APIMetadata + *out = new(APIMetadataParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationPerformanceMonitoringIds != nil { + in, out := &in.ApplicationPerformanceMonitoringIds, &out.ApplicationPerformanceMonitoringIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationPerformanceMonitoringTypes != nil { + in, out := &in.ApplicationPerformanceMonitoringTypes, &out.ApplicationPerformanceMonitoringTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientAuthorization != nil { + in, out := &in.ClientAuthorization, &out.ClientAuthorization + *out = new(ClientAuthorizationParameters) + (*in).DeepCopyInto(*out) + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsParameters) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.LocalResponseCachePerInstance != nil { + in, out := &in.LocalResponseCachePerInstance, &out.LocalResponseCachePerInstance + *out = new(LocalResponseCachePerInstanceParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalResponseCachePerRoute != nil { + in, out := &in.LocalResponseCachePerRoute, &out.LocalResponseCachePerRoute + *out = new(LocalResponseCachePerRouteParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudGatewayQuotaParameters) + (*in).DeepCopyInto(*out) + } + if in.SensitiveEnvironmentVariablesSecretRef != nil { + in, out := &in.SensitiveEnvironmentVariablesSecretRef, &out.SensitiveEnvironmentVariablesSecretRef + *out = new(v1.SecretReference) + **out = **in + } + if in.SpringCloudServiceID != nil { + in, out := &in.SpringCloudServiceID, &out.SpringCloudServiceID + *out = new(string) + **out = **in + } + if in.SpringCloudServiceIDRef != nil { + in, out := &in.SpringCloudServiceIDRef, &out.SpringCloudServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudServiceIDSelector != nil { + in, out := &in.SpringCloudServiceIDSelector, &out.SpringCloudServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sso != nil { + in, out := &in.Sso, &out.Sso + *out = new(SpringCloudGatewaySsoParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewayParameters. +func (in *SpringCloudGatewayParameters) DeepCopy() *SpringCloudGatewayParameters { + if in == nil { + return nil + } + out := new(SpringCloudGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewayQuotaInitParameters) DeepCopyInto(out *SpringCloudGatewayQuotaInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewayQuotaInitParameters. +func (in *SpringCloudGatewayQuotaInitParameters) DeepCopy() *SpringCloudGatewayQuotaInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudGatewayQuotaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewayQuotaObservation) DeepCopyInto(out *SpringCloudGatewayQuotaObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewayQuotaObservation. +func (in *SpringCloudGatewayQuotaObservation) DeepCopy() *SpringCloudGatewayQuotaObservation { + if in == nil { + return nil + } + out := new(SpringCloudGatewayQuotaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewayQuotaParameters) DeepCopyInto(out *SpringCloudGatewayQuotaParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewayQuotaParameters. +func (in *SpringCloudGatewayQuotaParameters) DeepCopy() *SpringCloudGatewayQuotaParameters { + if in == nil { + return nil + } + out := new(SpringCloudGatewayQuotaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewaySpec) DeepCopyInto(out *SpringCloudGatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewaySpec. +func (in *SpringCloudGatewaySpec) DeepCopy() *SpringCloudGatewaySpec { + if in == nil { + return nil + } + out := new(SpringCloudGatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewaySsoInitParameters) DeepCopyInto(out *SpringCloudGatewaySsoInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.IssuerURI != nil { + in, out := &in.IssuerURI, &out.IssuerURI + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewaySsoInitParameters. +func (in *SpringCloudGatewaySsoInitParameters) DeepCopy() *SpringCloudGatewaySsoInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudGatewaySsoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewaySsoObservation) DeepCopyInto(out *SpringCloudGatewaySsoObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.IssuerURI != nil { + in, out := &in.IssuerURI, &out.IssuerURI + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewaySsoObservation. +func (in *SpringCloudGatewaySsoObservation) DeepCopy() *SpringCloudGatewaySsoObservation { + if in == nil { + return nil + } + out := new(SpringCloudGatewaySsoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewaySsoParameters) DeepCopyInto(out *SpringCloudGatewaySsoParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.IssuerURI != nil { + in, out := &in.IssuerURI, &out.IssuerURI + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewaySsoParameters. +func (in *SpringCloudGatewaySsoParameters) DeepCopy() *SpringCloudGatewaySsoParameters { + if in == nil { + return nil + } + out := new(SpringCloudGatewaySsoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudGatewayStatus) DeepCopyInto(out *SpringCloudGatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudGatewayStatus. +func (in *SpringCloudGatewayStatus) DeepCopy() *SpringCloudGatewayStatus { + if in == nil { + return nil + } + out := new(SpringCloudGatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeployment) DeepCopyInto(out *SpringCloudJavaDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeployment. +func (in *SpringCloudJavaDeployment) DeepCopy() *SpringCloudJavaDeployment { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudJavaDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentInitParameters) DeepCopyInto(out *SpringCloudJavaDeploymentInitParameters) { + *out = *in + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.JvmOptions != nil { + in, out := &in.JvmOptions, &out.JvmOptions + *out = new(string) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudJavaDeploymentQuotaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentInitParameters. +func (in *SpringCloudJavaDeploymentInitParameters) DeepCopy() *SpringCloudJavaDeploymentInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentList) DeepCopyInto(out *SpringCloudJavaDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudJavaDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentList. +func (in *SpringCloudJavaDeploymentList) DeepCopy() *SpringCloudJavaDeploymentList { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudJavaDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentObservation) DeepCopyInto(out *SpringCloudJavaDeploymentObservation) { + *out = *in + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.JvmOptions != nil { + in, out := &in.JvmOptions, &out.JvmOptions + *out = new(string) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudJavaDeploymentQuotaObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.SpringCloudAppID != nil { + in, out := &in.SpringCloudAppID, &out.SpringCloudAppID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentObservation. +func (in *SpringCloudJavaDeploymentObservation) DeepCopy() *SpringCloudJavaDeploymentObservation { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentParameters) DeepCopyInto(out *SpringCloudJavaDeploymentParameters) { + *out = *in + if in.EnvironmentVariables != nil { + in, out := &in.EnvironmentVariables, &out.EnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.JvmOptions != nil { + in, out := &in.JvmOptions, &out.JvmOptions + *out = new(string) + **out = **in + } + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + *out = new(SpringCloudJavaDeploymentQuotaParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.SpringCloudAppID != nil { + in, out := &in.SpringCloudAppID, &out.SpringCloudAppID + *out = new(string) + **out = **in + } + if in.SpringCloudAppIDRef != nil { + in, out := &in.SpringCloudAppIDRef, &out.SpringCloudAppIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudAppIDSelector != nil { + in, out := &in.SpringCloudAppIDSelector, &out.SpringCloudAppIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentParameters. +func (in *SpringCloudJavaDeploymentParameters) DeepCopy() *SpringCloudJavaDeploymentParameters { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentQuotaInitParameters) DeepCopyInto(out *SpringCloudJavaDeploymentQuotaInitParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentQuotaInitParameters. +func (in *SpringCloudJavaDeploymentQuotaInitParameters) DeepCopy() *SpringCloudJavaDeploymentQuotaInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentQuotaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentQuotaObservation) DeepCopyInto(out *SpringCloudJavaDeploymentQuotaObservation) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentQuotaObservation. +func (in *SpringCloudJavaDeploymentQuotaObservation) DeepCopy() *SpringCloudJavaDeploymentQuotaObservation { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentQuotaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentQuotaParameters) DeepCopyInto(out *SpringCloudJavaDeploymentQuotaParameters) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentQuotaParameters. +func (in *SpringCloudJavaDeploymentQuotaParameters) DeepCopy() *SpringCloudJavaDeploymentQuotaParameters { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentQuotaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentSpec) DeepCopyInto(out *SpringCloudJavaDeploymentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentSpec. +func (in *SpringCloudJavaDeploymentSpec) DeepCopy() *SpringCloudJavaDeploymentSpec { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudJavaDeploymentStatus) DeepCopyInto(out *SpringCloudJavaDeploymentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudJavaDeploymentStatus. +func (in *SpringCloudJavaDeploymentStatus) DeepCopy() *SpringCloudJavaDeploymentStatus { + if in == nil { + return nil + } + out := new(SpringCloudJavaDeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudService) DeepCopyInto(out *SpringCloudService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudService. +func (in *SpringCloudService) DeepCopy() *SpringCloudService { + if in == nil { + return nil + } + out := new(SpringCloudService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudServiceInitParameters) DeepCopyInto(out *SpringCloudServiceInitParameters) { + *out = *in + if in.BuildAgentPoolSize != nil { + in, out := &in.BuildAgentPoolSize, &out.BuildAgentPoolSize + *out = new(string) + **out = **in + } + if in.ConfigServerGitSetting != nil { + in, out := &in.ConfigServerGitSetting, &out.ConfigServerGitSetting + *out = new(ConfigServerGitSettingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultBuildService != nil { + in, out := &in.DefaultBuildService, &out.DefaultBuildService + *out = new(DefaultBuildServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogStreamPublicEndpointEnabled != nil { + in, out := &in.LogStreamPublicEndpointEnabled, &out.LogStreamPublicEndpointEnabled + *out = new(bool) + **out = **in + } + if in.ManagedEnvironmentID != nil { + in, out := &in.ManagedEnvironmentID, &out.ManagedEnvironmentID + *out = new(string) + **out = **in + } + if in.Marketplace != nil { + in, out := &in.Marketplace, &out.Marketplace + *out = new(MarketplaceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceRegistryEnabled != nil { + in, out := &in.ServiceRegistryEnabled, &out.ServiceRegistryEnabled + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Trace != nil { + in, out := &in.Trace, &out.Trace + *out = new(TraceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudServiceInitParameters. +func (in *SpringCloudServiceInitParameters) DeepCopy() *SpringCloudServiceInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudServiceList) DeepCopyInto(out *SpringCloudServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudServiceList. +func (in *SpringCloudServiceList) DeepCopy() *SpringCloudServiceList { + if in == nil { + return nil + } + out := new(SpringCloudServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudServiceObservation) DeepCopyInto(out *SpringCloudServiceObservation) { + *out = *in + if in.BuildAgentPoolSize != nil { + in, out := &in.BuildAgentPoolSize, &out.BuildAgentPoolSize + *out = new(string) + **out = **in + } + if in.ConfigServerGitSetting != nil { + in, out := &in.ConfigServerGitSetting, &out.ConfigServerGitSetting + *out = new(ConfigServerGitSettingObservation) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultBuildService != nil { + in, out := &in.DefaultBuildService, &out.DefaultBuildService + *out = new(DefaultBuildServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogStreamPublicEndpointEnabled != nil { + in, out := &in.LogStreamPublicEndpointEnabled, &out.LogStreamPublicEndpointEnabled + *out = new(bool) + **out = **in + } + if in.ManagedEnvironmentID != nil { + in, out := &in.ManagedEnvironmentID, &out.ManagedEnvironmentID + *out = new(string) + **out = **in + } + if in.Marketplace != nil { + in, out := &in.Marketplace, &out.Marketplace + *out = new(MarketplaceObservation) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.OutboundPublicIPAddresses != nil { + in, out := &in.OutboundPublicIPAddresses, &out.OutboundPublicIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RequiredNetworkTrafficRules != nil { + in, out := &in.RequiredNetworkTrafficRules, &out.RequiredNetworkTrafficRules + *out = make([]RequiredNetworkTrafficRulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServiceRegistryEnabled != nil { + in, out := &in.ServiceRegistryEnabled, &out.ServiceRegistryEnabled + *out = new(bool) + **out = **in + } + if in.ServiceRegistryID != nil { + in, out := &in.ServiceRegistryID, &out.ServiceRegistryID + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Trace != nil { + in, out := &in.Trace, &out.Trace + *out = new(TraceObservation) + (*in).DeepCopyInto(*out) + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudServiceObservation. +func (in *SpringCloudServiceObservation) DeepCopy() *SpringCloudServiceObservation { + if in == nil { + return nil + } + out := new(SpringCloudServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudServiceParameters) DeepCopyInto(out *SpringCloudServiceParameters) { + *out = *in + if in.BuildAgentPoolSize != nil { + in, out := &in.BuildAgentPoolSize, &out.BuildAgentPoolSize + *out = new(string) + **out = **in + } + if in.ConfigServerGitSetting != nil { + in, out := &in.ConfigServerGitSetting, &out.ConfigServerGitSetting + *out = new(ConfigServerGitSettingParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistry != nil { + in, out := &in.ContainerRegistry, &out.ContainerRegistry + *out = make([]ContainerRegistryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultBuildService != nil { + in, out := &in.DefaultBuildService, &out.DefaultBuildService + *out = new(DefaultBuildServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogStreamPublicEndpointEnabled != nil { + in, out := &in.LogStreamPublicEndpointEnabled, &out.LogStreamPublicEndpointEnabled + *out = new(bool) + **out = **in + } + if in.ManagedEnvironmentID != nil { + in, out := &in.ManagedEnvironmentID, &out.ManagedEnvironmentID + *out = new(string) + **out = **in + } + if in.Marketplace != nil { + in, out := &in.Marketplace, &out.Marketplace + *out = new(MarketplaceParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceRegistryEnabled != nil { + in, out := &in.ServiceRegistryEnabled, &out.ServiceRegistryEnabled + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Trace != nil { + in, out := &in.Trace, &out.Trace + *out = new(TraceParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudServiceParameters. +func (in *SpringCloudServiceParameters) DeepCopy() *SpringCloudServiceParameters { + if in == nil { + return nil + } + out := new(SpringCloudServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudServiceSpec) DeepCopyInto(out *SpringCloudServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudServiceSpec. +func (in *SpringCloudServiceSpec) DeepCopy() *SpringCloudServiceSpec { + if in == nil { + return nil + } + out := new(SpringCloudServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudServiceStatus) DeepCopyInto(out *SpringCloudServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudServiceStatus. +func (in *SpringCloudServiceStatus) DeepCopy() *SpringCloudServiceStatus { + if in == nil { + return nil + } + out := new(SpringCloudServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsoInitParameters) DeepCopyInto(out *SsoInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.IssuerURI != nil { + in, out := &in.IssuerURI, &out.IssuerURI + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsoInitParameters. +func (in *SsoInitParameters) DeepCopy() *SsoInitParameters { + if in == nil { + return nil + } + out := new(SsoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsoObservation) DeepCopyInto(out *SsoObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.IssuerURI != nil { + in, out := &in.IssuerURI, &out.IssuerURI + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsoObservation. +func (in *SsoObservation) DeepCopy() *SsoObservation { + if in == nil { + return nil + } + out := new(SsoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SsoParameters) DeepCopyInto(out *SsoParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecret != nil { + in, out := &in.ClientSecret, &out.ClientSecret + *out = new(string) + **out = **in + } + if in.IssuerURI != nil { + in, out := &in.IssuerURI, &out.IssuerURI + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SsoParameters. +func (in *SsoParameters) DeepCopy() *SsoParameters { + if in == nil { + return nil + } + out := new(SsoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackInitParameters) DeepCopyInto(out *StackInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackInitParameters. +func (in *StackInitParameters) DeepCopy() *StackInitParameters { + if in == nil { + return nil + } + out := new(StackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackObservation) DeepCopyInto(out *StackObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackObservation. +func (in *StackObservation) DeepCopy() *StackObservation { + if in == nil { + return nil + } + out := new(StackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StackParameters) DeepCopyInto(out *StackParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StackParameters. +func (in *StackParameters) DeepCopy() *StackParameters { + if in == nil { + return nil + } + out := new(StackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TraceInitParameters) DeepCopyInto(out *TraceInitParameters) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.ConnectionStringRef != nil { + in, out := &in.ConnectionStringRef, &out.ConnectionStringRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionStringSelector != nil { + in, out := &in.ConnectionStringSelector, &out.ConnectionStringSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceInitParameters. +func (in *TraceInitParameters) DeepCopy() *TraceInitParameters { + if in == nil { + return nil + } + out := new(TraceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TraceObservation) DeepCopyInto(out *TraceObservation) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceObservation. +func (in *TraceObservation) DeepCopy() *TraceObservation { + if in == nil { + return nil + } + out := new(TraceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TraceParameters) DeepCopyInto(out *TraceParameters) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.ConnectionStringRef != nil { + in, out := &in.ConnectionStringRef, &out.ConnectionStringRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ConnectionStringSelector != nil { + in, out := &in.ConnectionStringSelector, &out.ConnectionStringSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SampleRate != nil { + in, out := &in.SampleRate, &out.SampleRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceParameters. +func (in *TraceParameters) DeepCopy() *TraceParameters { + if in == nil { + return nil + } + out := new(TraceParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/appplatform/v1beta2/zz_generated.managed.go b/apis/appplatform/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..fadacc946 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_generated.managed.go @@ -0,0 +1,668 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudAPIPortal. +func (mg *SpringCloudAPIPortal) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudApp. +func (mg *SpringCloudApp) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudApp. +func (mg *SpringCloudApp) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudApp. +func (mg *SpringCloudApp) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudApp. +func (mg *SpringCloudApp) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudApp. +func (mg *SpringCloudApp) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudApp. +func (mg *SpringCloudApp) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudApp. +func (mg *SpringCloudApp) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudApp. +func (mg *SpringCloudApp) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudApp. +func (mg *SpringCloudApp) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudApp. +func (mg *SpringCloudApp) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudApp. +func (mg *SpringCloudApp) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudApp. +func (mg *SpringCloudApp) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudGateway. +func (mg *SpringCloudGateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudGateway. +func (mg *SpringCloudGateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudGateway. +func (mg *SpringCloudGateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudGateway. +func (mg *SpringCloudGateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudGateway. +func (mg *SpringCloudGateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudGateway. +func (mg *SpringCloudGateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudGateway. +func (mg *SpringCloudGateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudGateway. +func (mg *SpringCloudGateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudGateway. +func (mg *SpringCloudGateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudGateway. +func (mg *SpringCloudGateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudGateway. +func (mg *SpringCloudGateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudGateway. +func (mg *SpringCloudGateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SpringCloudService. +func (mg *SpringCloudService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudService. +func (mg *SpringCloudService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudService. +func (mg *SpringCloudService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudService. +func (mg *SpringCloudService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudService. +func (mg *SpringCloudService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudService. +func (mg *SpringCloudService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudService. +func (mg *SpringCloudService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudService. +func (mg *SpringCloudService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudService. +func (mg *SpringCloudService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudService. +func (mg *SpringCloudService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudService. +func (mg *SpringCloudService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudService. +func (mg *SpringCloudService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/appplatform/v1beta2/zz_generated.managedlist.go b/apis/appplatform/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..41f19aad5 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,107 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this SpringCloudAPIPortalList. +func (l *SpringCloudAPIPortalList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudAppList. +func (l *SpringCloudAppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudBuildDeploymentList. +func (l *SpringCloudBuildDeploymentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudBuildPackBindingList. +func (l *SpringCloudBuildPackBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudBuilderList. +func (l *SpringCloudBuilderList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudContainerDeploymentList. +func (l *SpringCloudContainerDeploymentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudCustomizedAcceleratorList. +func (l *SpringCloudCustomizedAcceleratorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudDevToolPortalList. +func (l *SpringCloudDevToolPortalList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudGatewayList. +func (l *SpringCloudGatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudJavaDeploymentList. +func (l *SpringCloudJavaDeploymentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SpringCloudServiceList. +func (l *SpringCloudServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/appplatform/v1beta2/zz_generated.resolvers.go b/apis/appplatform/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..c0d349908 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,583 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *SpringCloudAPIPortal) ResolveReferences( // ResolveReferences of this SpringCloudAPIPortal. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudGateway", "SpringCloudGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.GatewayIds), + Extract: resource.ExtractResourceID(), + References: mg.Spec.ForProvider.GatewayIdsRefs, + Selector: mg.Spec.ForProvider.GatewayIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GatewayIds") + } + mg.Spec.ForProvider.GatewayIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.GatewayIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudServiceIDRef, + Selector: mg.Spec.ForProvider.SpringCloudServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudServiceID") + } + mg.Spec.ForProvider.SpringCloudServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudServiceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudGateway", "SpringCloudGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.GatewayIds), + Extract: resource.ExtractResourceID(), + References: mg.Spec.InitProvider.GatewayIdsRefs, + Selector: mg.Spec.InitProvider.GatewayIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GatewayIds") + } + mg.Spec.InitProvider.GatewayIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.GatewayIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this SpringCloudApp. +func (mg *SpringCloudApp) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceNameRef, + Selector: mg.Spec.ForProvider.ServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceName") + } + mg.Spec.ForProvider.ServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudBuildDeployment. +func (mg *SpringCloudBuildDeployment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudAppID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudAppIDRef, + Selector: mg.Spec.ForProvider.SpringCloudAppIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudAppID") + } + mg.Spec.ForProvider.SpringCloudAppID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudAppIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudBuildPackBinding. +func (mg *SpringCloudBuildPackBinding) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudBuilder", "SpringCloudBuilderList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudBuilderID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudBuilderIDRef, + Selector: mg.Spec.ForProvider.SpringCloudBuilderIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudBuilderID") + } + mg.Spec.ForProvider.SpringCloudBuilderID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudBuilderIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudBuilder. +func (mg *SpringCloudBuilder) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudServiceIDRef, + Selector: mg.Spec.ForProvider.SpringCloudServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudServiceID") + } + mg.Spec.ForProvider.SpringCloudServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudServiceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SpringCloudServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SpringCloudServiceIDRef, + Selector: mg.Spec.InitProvider.SpringCloudServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SpringCloudServiceID") + } + mg.Spec.InitProvider.SpringCloudServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SpringCloudServiceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudContainerDeployment. +func (mg *SpringCloudContainerDeployment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudAppID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudAppIDRef, + Selector: mg.Spec.ForProvider.SpringCloudAppIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudAppID") + } + mg.Spec.ForProvider.SpringCloudAppID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudAppIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudCustomizedAccelerator. +func (mg *SpringCloudCustomizedAccelerator) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudAccelerator", "SpringCloudAcceleratorList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudAcceleratorID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudAcceleratorIDRef, + Selector: mg.Spec.ForProvider.SpringCloudAcceleratorIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudAcceleratorID") + } + mg.Spec.ForProvider.SpringCloudAcceleratorID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudAcceleratorIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudDevToolPortal. +func (mg *SpringCloudDevToolPortal) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudServiceIDRef, + Selector: mg.Spec.ForProvider.SpringCloudServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudServiceID") + } + mg.Spec.ForProvider.SpringCloudServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudServiceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SpringCloudServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SpringCloudServiceIDRef, + Selector: mg.Spec.InitProvider.SpringCloudServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SpringCloudServiceID") + } + mg.Spec.InitProvider.SpringCloudServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SpringCloudServiceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudGateway. +func (mg *SpringCloudGateway) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudServiceIDRef, + Selector: mg.Spec.ForProvider.SpringCloudServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudServiceID") + } + mg.Spec.ForProvider.SpringCloudServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudServiceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudJavaDeployment. +func (mg *SpringCloudJavaDeployment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudApp", "SpringCloudAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudAppID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudAppIDRef, + Selector: mg.Spec.ForProvider.SpringCloudAppIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudAppID") + } + mg.Spec.ForProvider.SpringCloudAppID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudAppIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SpringCloudService. +func (mg *SpringCloudService) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Network != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Network.AppSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Network.AppSubnetIDRef, + Selector: mg.Spec.ForProvider.Network.AppSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Network.AppSubnetID") + } + mg.Spec.ForProvider.Network.AppSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Network.AppSubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Network != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Network.ServiceRuntimeSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Network.ServiceRuntimeSubnetIDRef, + Selector: mg.Spec.ForProvider.Network.ServiceRuntimeSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Network.ServiceRuntimeSubnetID") + } + mg.Spec.ForProvider.Network.ServiceRuntimeSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Network.ServiceRuntimeSubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Trace != nil { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Trace.ConnectionString), + Extract: resource.ExtractParamPath("connection_string", true), + Reference: mg.Spec.ForProvider.Trace.ConnectionStringRef, + Selector: mg.Spec.ForProvider.Trace.ConnectionStringSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Trace.ConnectionString") + } + mg.Spec.ForProvider.Trace.ConnectionString = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Trace.ConnectionStringRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Network != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Network.AppSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Network.AppSubnetIDRef, + Selector: mg.Spec.InitProvider.Network.AppSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Network.AppSubnetID") + } + mg.Spec.InitProvider.Network.AppSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Network.AppSubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Network != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Network.ServiceRuntimeSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Network.ServiceRuntimeSubnetIDRef, + Selector: mg.Spec.InitProvider.Network.ServiceRuntimeSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Network.ServiceRuntimeSubnetID") + } + mg.Spec.InitProvider.Network.ServiceRuntimeSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Network.ServiceRuntimeSubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Trace != nil { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Trace.ConnectionString), + Extract: resource.ExtractParamPath("connection_string", true), + Reference: mg.Spec.InitProvider.Trace.ConnectionStringRef, + Selector: mg.Spec.InitProvider.Trace.ConnectionStringSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Trace.ConnectionString") + } + mg.Spec.InitProvider.Trace.ConnectionString = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Trace.ConnectionStringRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/appplatform/v1beta2/zz_groupversion_info.go b/apis/appplatform/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..a300b6050 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=appplatform.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "appplatform.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/appplatform/v1beta2/zz_springcloudapiportal_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudapiportal_terraformed.go new file mode 100755 index 000000000..274c0b332 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudapiportal_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudAPIPortal +func (mg *SpringCloudAPIPortal) GetTerraformResourceType() string { + return "azurerm_spring_cloud_api_portal" +} + +// GetConnectionDetailsMapping for this SpringCloudAPIPortal +func (tr *SpringCloudAPIPortal) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpringCloudAPIPortal +func (tr *SpringCloudAPIPortal) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudAPIPortal +func (tr *SpringCloudAPIPortal) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudAPIPortal +func (tr *SpringCloudAPIPortal) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudAPIPortal +func (tr *SpringCloudAPIPortal) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudAPIPortal +func (tr *SpringCloudAPIPortal) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudAPIPortal +func (tr *SpringCloudAPIPortal) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudAPIPortal +func (tr *SpringCloudAPIPortal) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudAPIPortal using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudAPIPortal) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudAPIPortalParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudAPIPortal) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudapiportal_types.go b/apis/appplatform/v1beta2/zz_springcloudapiportal_types.go new file mode 100755 index 000000000..918eeee64 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudapiportal_types.go @@ -0,0 +1,241 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SpringCloudAPIPortalInitParameters struct { + + // Specifies whether the API try-out feature is enabled. When enabled, users can try out the API by sending requests and viewing responses in API portal. + APITryOutEnabled *bool `json:"apiTryOutEnabled,omitempty" tf:"api_try_out_enabled,omitempty"` + + // Specifies a list of Spring Cloud Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +listType=set + GatewayIds []*string `json:"gatewayIds,omitempty" tf:"gateway_ids,omitempty"` + + // References to SpringCloudGateway in appplatform to populate gatewayIds. + // +kubebuilder:validation:Optional + GatewayIdsRefs []v1.Reference `json:"gatewayIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SpringCloudGateway in appplatform to populate gatewayIds. + // +kubebuilder:validation:Optional + GatewayIdsSelector *v1.Selector `json:"gatewayIdsSelector,omitempty" tf:"-"` + + // is only https is allowed? + HTTPSOnlyEnabled *bool `json:"httpsOnlyEnabled,omitempty" tf:"https_only_enabled,omitempty"` + + // Specifies the required instance count of the Spring Cloud API Portal. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Is the public network access enabled? + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A sso block as defined below. + Sso *SsoInitParameters `json:"sso,omitempty" tf:"sso,omitempty"` +} + +type SpringCloudAPIPortalObservation struct { + + // Specifies whether the API try-out feature is enabled. When enabled, users can try out the API by sending requests and viewing responses in API portal. + APITryOutEnabled *bool `json:"apiTryOutEnabled,omitempty" tf:"api_try_out_enabled,omitempty"` + + // Specifies a list of Spring Cloud Gateway. + // +listType=set + GatewayIds []*string `json:"gatewayIds,omitempty" tf:"gateway_ids,omitempty"` + + // is only https is allowed? + HTTPSOnlyEnabled *bool `json:"httpsOnlyEnabled,omitempty" tf:"https_only_enabled,omitempty"` + + // The ID of the Spring Cloud API Portal. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the required instance count of the Spring Cloud API Portal. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Is the public network access enabled? + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud API Portal to be created. + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // A sso block as defined below. + Sso *SsoObservation `json:"sso,omitempty" tf:"sso,omitempty"` + + // TODO. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SpringCloudAPIPortalParameters struct { + + // Specifies whether the API try-out feature is enabled. When enabled, users can try out the API by sending requests and viewing responses in API portal. + // +kubebuilder:validation:Optional + APITryOutEnabled *bool `json:"apiTryOutEnabled,omitempty" tf:"api_try_out_enabled,omitempty"` + + // Specifies a list of Spring Cloud Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + GatewayIds []*string `json:"gatewayIds,omitempty" tf:"gateway_ids,omitempty"` + + // References to SpringCloudGateway in appplatform to populate gatewayIds. + // +kubebuilder:validation:Optional + GatewayIdsRefs []v1.Reference `json:"gatewayIdsRefs,omitempty" tf:"-"` + + // Selector for a list of SpringCloudGateway in appplatform to populate gatewayIds. + // +kubebuilder:validation:Optional + GatewayIdsSelector *v1.Selector `json:"gatewayIdsSelector,omitempty" tf:"-"` + + // is only https is allowed? + // +kubebuilder:validation:Optional + HTTPSOnlyEnabled *bool `json:"httpsOnlyEnabled,omitempty" tf:"https_only_enabled,omitempty"` + + // Specifies the required instance count of the Spring Cloud API Portal. Possible Values are between 1 and 500. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Is the public network access enabled? + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud API Portal to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // Reference to a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDRef *v1.Reference `json:"springCloudServiceIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDSelector *v1.Selector `json:"springCloudServiceIdSelector,omitempty" tf:"-"` + + // A sso block as defined below. + // +kubebuilder:validation:Optional + Sso *SsoParameters `json:"sso,omitempty" tf:"sso,omitempty"` +} + +type SsoInitParameters struct { + + // The public identifier for the application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The secret known only to the application and the authorization server. + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // The URI of Issuer Identifier. + IssuerURI *string `json:"issuerUri,omitempty" tf:"issuer_uri,omitempty"` + + // It defines the specific actions applications can be allowed to do on a user's behalf. + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type SsoObservation struct { + + // The public identifier for the application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The secret known only to the application and the authorization server. + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // The URI of Issuer Identifier. + IssuerURI *string `json:"issuerUri,omitempty" tf:"issuer_uri,omitempty"` + + // It defines the specific actions applications can be allowed to do on a user's behalf. + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type SsoParameters struct { + + // The public identifier for the application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The secret known only to the application and the authorization server. + // +kubebuilder:validation:Optional + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // The URI of Issuer Identifier. + // +kubebuilder:validation:Optional + IssuerURI *string `json:"issuerUri,omitempty" tf:"issuer_uri,omitempty"` + + // It defines the specific actions applications can be allowed to do on a user's behalf. + // +kubebuilder:validation:Optional + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +// SpringCloudAPIPortalSpec defines the desired state of SpringCloudAPIPortal +type SpringCloudAPIPortalSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudAPIPortalParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudAPIPortalInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudAPIPortalStatus defines the observed state of SpringCloudAPIPortal. +type SpringCloudAPIPortalStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudAPIPortalObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudAPIPortal is the Schema for the SpringCloudAPIPortals API. Manages a Spring Cloud API Portal. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudAPIPortal struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SpringCloudAPIPortalSpec `json:"spec"` + Status SpringCloudAPIPortalStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudAPIPortalList contains a list of SpringCloudAPIPortals +type SpringCloudAPIPortalList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudAPIPortal `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudAPIPortal_Kind = "SpringCloudAPIPortal" + SpringCloudAPIPortal_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudAPIPortal_Kind}.String() + SpringCloudAPIPortal_KindAPIVersion = SpringCloudAPIPortal_Kind + "." + CRDGroupVersion.String() + SpringCloudAPIPortal_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudAPIPortal_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudAPIPortal{}, &SpringCloudAPIPortalList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudapp_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudapp_terraformed.go new file mode 100755 index 000000000..106ef86a1 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudapp_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudApp +func (mg *SpringCloudApp) GetTerraformResourceType() string { + return "azurerm_spring_cloud_app" +} + +// GetConnectionDetailsMapping for this SpringCloudApp +func (tr *SpringCloudApp) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpringCloudApp +func (tr *SpringCloudApp) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudApp +func (tr *SpringCloudApp) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudApp +func (tr *SpringCloudApp) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudApp +func (tr *SpringCloudApp) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudApp +func (tr *SpringCloudApp) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudApp +func (tr *SpringCloudApp) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudApp +func (tr *SpringCloudApp) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudApp using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudApp) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudAppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudApp) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudapp_types.go b/apis/appplatform/v1beta2/zz_springcloudapp_types.go new file mode 100755 index 000000000..c7707ca78 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudapp_types.go @@ -0,0 +1,402 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomPersistentDiskInitParameters struct { + + // These are the mount options for a persistent disk. + // +listType=set + MountOptions []*string `json:"mountOptions,omitempty" tf:"mount_options,omitempty"` + + // The mount path of the persistent disk. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // Indicates whether the persistent disk is a readOnly one. + ReadOnlyEnabled *bool `json:"readOnlyEnabled,omitempty" tf:"read_only_enabled,omitempty"` + + // The share name of the Azure File share. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The name of the Spring Cloud Storage. + StorageName *string `json:"storageName,omitempty" tf:"storage_name,omitempty"` +} + +type CustomPersistentDiskObservation struct { + + // These are the mount options for a persistent disk. + // +listType=set + MountOptions []*string `json:"mountOptions,omitempty" tf:"mount_options,omitempty"` + + // The mount path of the persistent disk. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // Indicates whether the persistent disk is a readOnly one. + ReadOnlyEnabled *bool `json:"readOnlyEnabled,omitempty" tf:"read_only_enabled,omitempty"` + + // The share name of the Azure File share. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The name of the Spring Cloud Storage. + StorageName *string `json:"storageName,omitempty" tf:"storage_name,omitempty"` +} + +type CustomPersistentDiskParameters struct { + + // These are the mount options for a persistent disk. + // +kubebuilder:validation:Optional + // +listType=set + MountOptions []*string `json:"mountOptions,omitempty" tf:"mount_options,omitempty"` + + // The mount path of the persistent disk. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath" tf:"mount_path,omitempty"` + + // Indicates whether the persistent disk is a readOnly one. + // +kubebuilder:validation:Optional + ReadOnlyEnabled *bool `json:"readOnlyEnabled,omitempty" tf:"read_only_enabled,omitempty"` + + // The share name of the Azure File share. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The name of the Spring Cloud Storage. + // +kubebuilder:validation:Optional + StorageName *string `json:"storageName" tf:"storage_name,omitempty"` +} + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Spring Cloud Application. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Spring Cloud Application. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Spring Cloud Application. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Spring Cloud Application. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Spring Cloud Application. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Spring Cloud Application. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type IngressSettingsInitParameters struct { + + // Specifies how ingress should communicate with this app backend service. Allowed values are GRPC and Default. Defaults to Default. + BackendProtocol *string `json:"backendProtocol,omitempty" tf:"backend_protocol,omitempty"` + + // Specifies the ingress read time out in seconds. Defaults to 300. + ReadTimeoutInSeconds *float64 `json:"readTimeoutInSeconds,omitempty" tf:"read_timeout_in_seconds,omitempty"` + + // Specifies the ingress send time out in seconds. Defaults to 60. + SendTimeoutInSeconds *float64 `json:"sendTimeoutInSeconds,omitempty" tf:"send_timeout_in_seconds,omitempty"` + + // Specifies the type of the affinity, set this to Cookie to enable session affinity. Allowed values are Cookie and None. Defaults to None. + SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + + // Specifies the time in seconds until the cookie expires. + SessionCookieMaxAge *float64 `json:"sessionCookieMaxAge,omitempty" tf:"session_cookie_max_age,omitempty"` +} + +type IngressSettingsObservation struct { + + // Specifies how ingress should communicate with this app backend service. Allowed values are GRPC and Default. Defaults to Default. + BackendProtocol *string `json:"backendProtocol,omitempty" tf:"backend_protocol,omitempty"` + + // Specifies the ingress read time out in seconds. Defaults to 300. + ReadTimeoutInSeconds *float64 `json:"readTimeoutInSeconds,omitempty" tf:"read_timeout_in_seconds,omitempty"` + + // Specifies the ingress send time out in seconds. Defaults to 60. + SendTimeoutInSeconds *float64 `json:"sendTimeoutInSeconds,omitempty" tf:"send_timeout_in_seconds,omitempty"` + + // Specifies the type of the affinity, set this to Cookie to enable session affinity. Allowed values are Cookie and None. Defaults to None. + SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + + // Specifies the time in seconds until the cookie expires. + SessionCookieMaxAge *float64 `json:"sessionCookieMaxAge,omitempty" tf:"session_cookie_max_age,omitempty"` +} + +type IngressSettingsParameters struct { + + // Specifies how ingress should communicate with this app backend service. Allowed values are GRPC and Default. Defaults to Default. + // +kubebuilder:validation:Optional + BackendProtocol *string `json:"backendProtocol,omitempty" tf:"backend_protocol,omitempty"` + + // Specifies the ingress read time out in seconds. Defaults to 300. + // +kubebuilder:validation:Optional + ReadTimeoutInSeconds *float64 `json:"readTimeoutInSeconds,omitempty" tf:"read_timeout_in_seconds,omitempty"` + + // Specifies the ingress send time out in seconds. Defaults to 60. + // +kubebuilder:validation:Optional + SendTimeoutInSeconds *float64 `json:"sendTimeoutInSeconds,omitempty" tf:"send_timeout_in_seconds,omitempty"` + + // Specifies the type of the affinity, set this to Cookie to enable session affinity. Allowed values are Cookie and None. Defaults to None. + // +kubebuilder:validation:Optional + SessionAffinity *string `json:"sessionAffinity,omitempty" tf:"session_affinity,omitempty"` + + // Specifies the time in seconds until the cookie expires. + // +kubebuilder:validation:Optional + SessionCookieMaxAge *float64 `json:"sessionCookieMaxAge,omitempty" tf:"session_cookie_max_age,omitempty"` +} + +type PersistentDiskInitParameters struct { + + // Specifies the mount path of the persistent disk. Defaults to /persistent. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // Specifies the size of the persistent disk in GB. Possible values are between 0 and 50. + SizeInGb *float64 `json:"sizeInGb,omitempty" tf:"size_in_gb,omitempty"` +} + +type PersistentDiskObservation struct { + + // Specifies the mount path of the persistent disk. Defaults to /persistent. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // Specifies the size of the persistent disk in GB. Possible values are between 0 and 50. + SizeInGb *float64 `json:"sizeInGb,omitempty" tf:"size_in_gb,omitempty"` +} + +type PersistentDiskParameters struct { + + // Specifies the mount path of the persistent disk. Defaults to /persistent. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // Specifies the size of the persistent disk in GB. Possible values are between 0 and 50. + // +kubebuilder:validation:Optional + SizeInGb *float64 `json:"sizeInGb" tf:"size_in_gb,omitempty"` +} + +type SpringCloudAppInitParameters struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Service. + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // A custom_persistent_disk block as defined below. + CustomPersistentDisk []CustomPersistentDiskInitParameters `json:"customPersistentDisk,omitempty" tf:"custom_persistent_disk,omitempty"` + + // Is only HTTPS allowed? Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An ingress_settings block as defined below. + IngressSettings *IngressSettingsInitParameters `json:"ingressSettings,omitempty" tf:"ingress_settings,omitempty"` + + // Does the Spring Cloud Application have public endpoint? Defaults to false. + IsPublic *bool `json:"isPublic,omitempty" tf:"is_public,omitempty"` + + // An persistent_disk block as defined below. + PersistentDisk *PersistentDiskInitParameters `json:"persistentDisk,omitempty" tf:"persistent_disk,omitempty"` + + // Should the App in vnet injection instance exposes endpoint which could be accessed from Internet? + PublicEndpointEnabled *bool `json:"publicEndpointEnabled,omitempty" tf:"public_endpoint_enabled,omitempty"` + + // Is End to End TLS Enabled? Defaults to false. + TLSEnabled *bool `json:"tlsEnabled,omitempty" tf:"tls_enabled,omitempty"` +} + +type SpringCloudAppObservation struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Service. + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // A custom_persistent_disk block as defined below. + CustomPersistentDisk []CustomPersistentDiskObservation `json:"customPersistentDisk,omitempty" tf:"custom_persistent_disk,omitempty"` + + // The Fully Qualified DNS Name of the Spring Application in the service. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // Is only HTTPS allowed? Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the Spring Cloud Application. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // An ingress_settings block as defined below. + IngressSettings *IngressSettingsObservation `json:"ingressSettings,omitempty" tf:"ingress_settings,omitempty"` + + // Does the Spring Cloud Application have public endpoint? Defaults to false. + IsPublic *bool `json:"isPublic,omitempty" tf:"is_public,omitempty"` + + // An persistent_disk block as defined below. + PersistentDisk *PersistentDiskObservation `json:"persistentDisk,omitempty" tf:"persistent_disk,omitempty"` + + // Should the App in vnet injection instance exposes endpoint which could be accessed from Internet? + PublicEndpointEnabled *bool `json:"publicEndpointEnabled,omitempty" tf:"public_endpoint_enabled,omitempty"` + + // Specifies the name of the resource group in which to create the Spring Cloud Application. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the name of the Spring Cloud Service resource. Changing this forces a new resource to be created. + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Is End to End TLS Enabled? Defaults to false. + TLSEnabled *bool `json:"tlsEnabled,omitempty" tf:"tls_enabled,omitempty"` + + // The public endpoint of the Spring Cloud Application. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SpringCloudAppParameters struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Service. + // +kubebuilder:validation:Optional + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // A custom_persistent_disk block as defined below. + // +kubebuilder:validation:Optional + CustomPersistentDisk []CustomPersistentDiskParameters `json:"customPersistentDisk,omitempty" tf:"custom_persistent_disk,omitempty"` + + // Is only HTTPS allowed? Defaults to false. + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An ingress_settings block as defined below. + // +kubebuilder:validation:Optional + IngressSettings *IngressSettingsParameters `json:"ingressSettings,omitempty" tf:"ingress_settings,omitempty"` + + // Does the Spring Cloud Application have public endpoint? Defaults to false. + // +kubebuilder:validation:Optional + IsPublic *bool `json:"isPublic,omitempty" tf:"is_public,omitempty"` + + // An persistent_disk block as defined below. + // +kubebuilder:validation:Optional + PersistentDisk *PersistentDiskParameters `json:"persistentDisk,omitempty" tf:"persistent_disk,omitempty"` + + // Should the App in vnet injection instance exposes endpoint which could be accessed from Internet? + // +kubebuilder:validation:Optional + PublicEndpointEnabled *bool `json:"publicEndpointEnabled,omitempty" tf:"public_endpoint_enabled,omitempty"` + + // Specifies the name of the resource group in which to create the Spring Cloud Application. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the name of the Spring Cloud Service resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService + // +kubebuilder:validation:Optional + ServiceName *string `json:"serviceName,omitempty" tf:"service_name,omitempty"` + + // Reference to a SpringCloudService in appplatform to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameRef *v1.Reference `json:"serviceNameRef,omitempty" tf:"-"` + + // Selector for a SpringCloudService in appplatform to populate serviceName. + // +kubebuilder:validation:Optional + ServiceNameSelector *v1.Selector `json:"serviceNameSelector,omitempty" tf:"-"` + + // Is End to End TLS Enabled? Defaults to false. + // +kubebuilder:validation:Optional + TLSEnabled *bool `json:"tlsEnabled,omitempty" tf:"tls_enabled,omitempty"` +} + +// SpringCloudAppSpec defines the desired state of SpringCloudApp +type SpringCloudAppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudAppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudAppInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudAppStatus defines the observed state of SpringCloudApp. +type SpringCloudAppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudAppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudApp is the Schema for the SpringCloudApps API. Manage an Azure Spring Cloud Application. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SpringCloudAppSpec `json:"spec"` + Status SpringCloudAppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudAppList contains a list of SpringCloudApps +type SpringCloudAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudApp `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudApp_Kind = "SpringCloudApp" + SpringCloudApp_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudApp_Kind}.String() + SpringCloudApp_KindAPIVersion = SpringCloudApp_Kind + "." + CRDGroupVersion.String() + SpringCloudApp_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudApp_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudApp{}, &SpringCloudAppList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudbuilddeployment_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudbuilddeployment_terraformed.go new file mode 100755 index 000000000..98e7d0bac --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudbuilddeployment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudBuildDeployment +func (mg *SpringCloudBuildDeployment) GetTerraformResourceType() string { + return "azurerm_spring_cloud_build_deployment" +} + +// GetConnectionDetailsMapping for this SpringCloudBuildDeployment +func (tr *SpringCloudBuildDeployment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpringCloudBuildDeployment +func (tr *SpringCloudBuildDeployment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudBuildDeployment +func (tr *SpringCloudBuildDeployment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudBuildDeployment +func (tr *SpringCloudBuildDeployment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudBuildDeployment +func (tr *SpringCloudBuildDeployment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudBuildDeployment +func (tr *SpringCloudBuildDeployment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudBuildDeployment +func (tr *SpringCloudBuildDeployment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudBuildDeployment +func (tr *SpringCloudBuildDeployment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudBuildDeployment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudBuildDeployment) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudBuildDeploymentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudBuildDeployment) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudbuilddeployment_types.go b/apis/appplatform/v1beta2/zz_springcloudbuilddeployment_types.go new file mode 100755 index 000000000..ace4a35d3 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudbuilddeployment_types.go @@ -0,0 +1,196 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type QuotaInitParameters struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type QuotaObservation struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type QuotaParameters struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + // +kubebuilder:validation:Optional + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpringCloudBuildDeploymentInitParameters struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Build Deployment. + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // The ID of the Spring Cloud Build Result. + BuildResultID *string `json:"buildResultId,omitempty" tf:"build_result_id,omitempty"` + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // A quota block as defined below. + Quota *QuotaInitParameters `json:"quota,omitempty" tf:"quota,omitempty"` +} + +type SpringCloudBuildDeploymentObservation struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Build Deployment. + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // The ID of the Spring Cloud Build Result. + BuildResultID *string `json:"buildResultId,omitempty" tf:"build_result_id,omitempty"` + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // The ID of the Spring Cloud Build Deployment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // A quota block as defined below. + Quota *QuotaObservation `json:"quota,omitempty" tf:"quota,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Build Deployment to be created. + SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` +} + +type SpringCloudBuildDeploymentParameters struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Build Deployment. + // +kubebuilder:validation:Optional + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + // +kubebuilder:validation:Optional + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // The ID of the Spring Cloud Build Result. + // +kubebuilder:validation:Optional + BuildResultID *string `json:"buildResultId,omitempty" tf:"build_result_id,omitempty"` + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // A quota block as defined below. + // +kubebuilder:validation:Optional + Quota *QuotaParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Build Deployment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` + + // Reference to a SpringCloudApp in appplatform to populate springCloudAppId. + // +kubebuilder:validation:Optional + SpringCloudAppIDRef *v1.Reference `json:"springCloudAppIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudApp in appplatform to populate springCloudAppId. + // +kubebuilder:validation:Optional + SpringCloudAppIDSelector *v1.Selector `json:"springCloudAppIdSelector,omitempty" tf:"-"` +} + +// SpringCloudBuildDeploymentSpec defines the desired state of SpringCloudBuildDeployment +type SpringCloudBuildDeploymentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudBuildDeploymentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudBuildDeploymentInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudBuildDeploymentStatus defines the observed state of SpringCloudBuildDeployment. +type SpringCloudBuildDeploymentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudBuildDeploymentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudBuildDeployment is the Schema for the SpringCloudBuildDeployments API. Manages a Spring Cloud Build Deployment. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudBuildDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.buildResultId) || (has(self.initProvider) && has(self.initProvider.buildResultId))",message="spec.forProvider.buildResultId is a required parameter" + Spec SpringCloudBuildDeploymentSpec `json:"spec"` + Status SpringCloudBuildDeploymentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudBuildDeploymentList contains a list of SpringCloudBuildDeployments +type SpringCloudBuildDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudBuildDeployment `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudBuildDeployment_Kind = "SpringCloudBuildDeployment" + SpringCloudBuildDeployment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudBuildDeployment_Kind}.String() + SpringCloudBuildDeployment_KindAPIVersion = SpringCloudBuildDeployment_Kind + "." + CRDGroupVersion.String() + SpringCloudBuildDeployment_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudBuildDeployment_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudBuildDeployment{}, &SpringCloudBuildDeploymentList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudbuilder_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudbuilder_terraformed.go new file mode 100755 index 000000000..efd0022cd --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudbuilder_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudBuilder +func (mg *SpringCloudBuilder) GetTerraformResourceType() string { + return "azurerm_spring_cloud_builder" +} + +// GetConnectionDetailsMapping for this SpringCloudBuilder +func (tr *SpringCloudBuilder) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpringCloudBuilder +func (tr *SpringCloudBuilder) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudBuilder +func (tr *SpringCloudBuilder) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudBuilder +func (tr *SpringCloudBuilder) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudBuilder +func (tr *SpringCloudBuilder) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudBuilder +func (tr *SpringCloudBuilder) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudBuilder +func (tr *SpringCloudBuilder) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudBuilder +func (tr *SpringCloudBuilder) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudBuilder using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudBuilder) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudBuilderParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudBuilder) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudbuilder_types.go b/apis/appplatform/v1beta2/zz_springcloudbuilder_types.go new file mode 100755 index 000000000..50e0145ea --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudbuilder_types.go @@ -0,0 +1,207 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BuildPackGroupInitParameters struct { + + // Specifies a list of the build pack's ID. + BuildPackIds []*string `json:"buildPackIds,omitempty" tf:"build_pack_ids,omitempty"` + + // The name which should be used for this build pack group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BuildPackGroupObservation struct { + + // Specifies a list of the build pack's ID. + BuildPackIds []*string `json:"buildPackIds,omitempty" tf:"build_pack_ids,omitempty"` + + // The name which should be used for this build pack group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BuildPackGroupParameters struct { + + // Specifies a list of the build pack's ID. + // +kubebuilder:validation:Optional + BuildPackIds []*string `json:"buildPackIds,omitempty" tf:"build_pack_ids,omitempty"` + + // The name which should be used for this build pack group. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type SpringCloudBuilderInitParameters struct { + + // One or more build_pack_group blocks as defined below. + BuildPackGroup []BuildPackGroupInitParameters `json:"buildPackGroup,omitempty" tf:"build_pack_group,omitempty"` + + // The name which should be used for this Spring Cloud Builder. Changing this forces a new Spring Cloud Builder to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Builder to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // Reference to a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDRef *v1.Reference `json:"springCloudServiceIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDSelector *v1.Selector `json:"springCloudServiceIdSelector,omitempty" tf:"-"` + + // A stack block as defined below. + Stack *StackInitParameters `json:"stack,omitempty" tf:"stack,omitempty"` +} + +type SpringCloudBuilderObservation struct { + + // One or more build_pack_group blocks as defined below. + BuildPackGroup []BuildPackGroupObservation `json:"buildPackGroup,omitempty" tf:"build_pack_group,omitempty"` + + // The ID of the Spring Cloud Builder. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name which should be used for this Spring Cloud Builder. Changing this forces a new Spring Cloud Builder to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Builder to be created. + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // A stack block as defined below. + Stack *StackObservation `json:"stack,omitempty" tf:"stack,omitempty"` +} + +type SpringCloudBuilderParameters struct { + + // One or more build_pack_group blocks as defined below. + // +kubebuilder:validation:Optional + BuildPackGroup []BuildPackGroupParameters `json:"buildPackGroup,omitempty" tf:"build_pack_group,omitempty"` + + // The name which should be used for this Spring Cloud Builder. Changing this forces a new Spring Cloud Builder to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Builder to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // Reference to a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDRef *v1.Reference `json:"springCloudServiceIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDSelector *v1.Selector `json:"springCloudServiceIdSelector,omitempty" tf:"-"` + + // A stack block as defined below. + // +kubebuilder:validation:Optional + Stack *StackParameters `json:"stack,omitempty" tf:"stack,omitempty"` +} + +type StackInitParameters struct { + + // Specifies the ID of the ClusterStack. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the version of the ClusterStack + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type StackObservation struct { + + // Specifies the ID of the ClusterStack. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the version of the ClusterStack + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type StackParameters struct { + + // Specifies the ID of the ClusterStack. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // Specifies the version of the ClusterStack + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +// SpringCloudBuilderSpec defines the desired state of SpringCloudBuilder +type SpringCloudBuilderSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudBuilderParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudBuilderInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudBuilderStatus defines the observed state of SpringCloudBuilder. +type SpringCloudBuilderStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudBuilderObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudBuilder is the Schema for the SpringCloudBuilders API. Manages a Spring Cloud Builder. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudBuilder struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.buildPackGroup) || (has(self.initProvider) && has(self.initProvider.buildPackGroup))",message="spec.forProvider.buildPackGroup is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.stack) || (has(self.initProvider) && has(self.initProvider.stack))",message="spec.forProvider.stack is a required parameter" + Spec SpringCloudBuilderSpec `json:"spec"` + Status SpringCloudBuilderStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudBuilderList contains a list of SpringCloudBuilders +type SpringCloudBuilderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudBuilder `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudBuilder_Kind = "SpringCloudBuilder" + SpringCloudBuilder_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudBuilder_Kind}.String() + SpringCloudBuilder_KindAPIVersion = SpringCloudBuilder_Kind + "." + CRDGroupVersion.String() + SpringCloudBuilder_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudBuilder_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudBuilder{}, &SpringCloudBuilderList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudbuildpackbinding_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudbuildpackbinding_terraformed.go new file mode 100755 index 000000000..23a5ed40b --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudbuildpackbinding_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudBuildPackBinding +func (mg *SpringCloudBuildPackBinding) GetTerraformResourceType() string { + return "azurerm_spring_cloud_build_pack_binding" +} + +// GetConnectionDetailsMapping for this SpringCloudBuildPackBinding +func (tr *SpringCloudBuildPackBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpringCloudBuildPackBinding +func (tr *SpringCloudBuildPackBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudBuildPackBinding +func (tr *SpringCloudBuildPackBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudBuildPackBinding +func (tr *SpringCloudBuildPackBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudBuildPackBinding +func (tr *SpringCloudBuildPackBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudBuildPackBinding +func (tr *SpringCloudBuildPackBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudBuildPackBinding +func (tr *SpringCloudBuildPackBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudBuildPackBinding +func (tr *SpringCloudBuildPackBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudBuildPackBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudBuildPackBinding) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudBuildPackBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudBuildPackBinding) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudbuildpackbinding_types.go b/apis/appplatform/v1beta2/zz_springcloudbuildpackbinding_types.go new file mode 100755 index 000000000..04712e573 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudbuildpackbinding_types.go @@ -0,0 +1,158 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LaunchInitParameters struct { + + // Specifies a map of non-sensitive properties for launchProperties. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Specifies a map of sensitive properties for launchProperties. + // +mapType=granular + Secrets map[string]*string `json:"secrets,omitempty" tf:"secrets,omitempty"` +} + +type LaunchObservation struct { + + // Specifies a map of non-sensitive properties for launchProperties. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Specifies a map of sensitive properties for launchProperties. + // +mapType=granular + Secrets map[string]*string `json:"secrets,omitempty" tf:"secrets,omitempty"` +} + +type LaunchParameters struct { + + // Specifies a map of non-sensitive properties for launchProperties. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Specifies a map of sensitive properties for launchProperties. + // +kubebuilder:validation:Optional + // +mapType=granular + Secrets map[string]*string `json:"secrets,omitempty" tf:"secrets,omitempty"` +} + +type SpringCloudBuildPackBindingInitParameters struct { + + // Specifies the Build Pack Binding Type. Allowed values are ApacheSkyWalking, AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and NewRelic. + BindingType *string `json:"bindingType,omitempty" tf:"binding_type,omitempty"` + + // A launch block as defined below. + Launch *LaunchInitParameters `json:"launch,omitempty" tf:"launch,omitempty"` +} + +type SpringCloudBuildPackBindingObservation struct { + + // Specifies the Build Pack Binding Type. Allowed values are ApacheSkyWalking, AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and NewRelic. + BindingType *string `json:"bindingType,omitempty" tf:"binding_type,omitempty"` + + // The ID of the Spring Cloud Build Pack Binding. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A launch block as defined below. + Launch *LaunchObservation `json:"launch,omitempty" tf:"launch,omitempty"` + + // The ID of the Spring Cloud Builder. Changing this forces a new Spring Cloud Build Pack Binding to be created. + SpringCloudBuilderID *string `json:"springCloudBuilderId,omitempty" tf:"spring_cloud_builder_id,omitempty"` +} + +type SpringCloudBuildPackBindingParameters struct { + + // Specifies the Build Pack Binding Type. Allowed values are ApacheSkyWalking, AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and NewRelic. + // +kubebuilder:validation:Optional + BindingType *string `json:"bindingType,omitempty" tf:"binding_type,omitempty"` + + // A launch block as defined below. + // +kubebuilder:validation:Optional + Launch *LaunchParameters `json:"launch,omitempty" tf:"launch,omitempty"` + + // The ID of the Spring Cloud Builder. Changing this forces a new Spring Cloud Build Pack Binding to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudBuilder + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudBuilderID *string `json:"springCloudBuilderId,omitempty" tf:"spring_cloud_builder_id,omitempty"` + + // Reference to a SpringCloudBuilder in appplatform to populate springCloudBuilderId. + // +kubebuilder:validation:Optional + SpringCloudBuilderIDRef *v1.Reference `json:"springCloudBuilderIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudBuilder in appplatform to populate springCloudBuilderId. + // +kubebuilder:validation:Optional + SpringCloudBuilderIDSelector *v1.Selector `json:"springCloudBuilderIdSelector,omitempty" tf:"-"` +} + +// SpringCloudBuildPackBindingSpec defines the desired state of SpringCloudBuildPackBinding +type SpringCloudBuildPackBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudBuildPackBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudBuildPackBindingInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudBuildPackBindingStatus defines the observed state of SpringCloudBuildPackBinding. +type SpringCloudBuildPackBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudBuildPackBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudBuildPackBinding is the Schema for the SpringCloudBuildPackBindings API. Manages a Spring Cloud Build Pack Binding. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudBuildPackBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SpringCloudBuildPackBindingSpec `json:"spec"` + Status SpringCloudBuildPackBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudBuildPackBindingList contains a list of SpringCloudBuildPackBindings +type SpringCloudBuildPackBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudBuildPackBinding `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudBuildPackBinding_Kind = "SpringCloudBuildPackBinding" + SpringCloudBuildPackBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudBuildPackBinding_Kind}.String() + SpringCloudBuildPackBinding_KindAPIVersion = SpringCloudBuildPackBinding_Kind + "." + CRDGroupVersion.String() + SpringCloudBuildPackBinding_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudBuildPackBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudBuildPackBinding{}, &SpringCloudBuildPackBindingList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudcontainerdeployment_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudcontainerdeployment_terraformed.go new file mode 100755 index 000000000..ff7864623 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudcontainerdeployment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudContainerDeployment +func (mg *SpringCloudContainerDeployment) GetTerraformResourceType() string { + return "azurerm_spring_cloud_container_deployment" +} + +// GetConnectionDetailsMapping for this SpringCloudContainerDeployment +func (tr *SpringCloudContainerDeployment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpringCloudContainerDeployment +func (tr *SpringCloudContainerDeployment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudContainerDeployment +func (tr *SpringCloudContainerDeployment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudContainerDeployment +func (tr *SpringCloudContainerDeployment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudContainerDeployment +func (tr *SpringCloudContainerDeployment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudContainerDeployment +func (tr *SpringCloudContainerDeployment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudContainerDeployment +func (tr *SpringCloudContainerDeployment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudContainerDeployment +func (tr *SpringCloudContainerDeployment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudContainerDeployment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudContainerDeployment) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudContainerDeploymentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudContainerDeployment) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudcontainerdeployment_types.go b/apis/appplatform/v1beta2/zz_springcloudcontainerdeployment_types.go new file mode 100755 index 000000000..47f9209d9 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudcontainerdeployment_types.go @@ -0,0 +1,237 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SpringCloudContainerDeploymentInitParameters struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Container Deployment. + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // Specifies the arguments to the entrypoint. The docker image's CMD is used if not specified. + Arguments []*string `json:"arguments,omitempty" tf:"arguments,omitempty"` + + // Specifies the entrypoint array. It will not be executed within a shell. The docker image's ENTRYPOINT is used if not specified. + Commands []*string `json:"commands,omitempty" tf:"commands,omitempty"` + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Container image of the custom container. This should be in the form of : without the server name of the registry. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Specifies the language framework of the container image. The only possible value is springboot. + LanguageFramework *string `json:"languageFramework,omitempty" tf:"language_framework,omitempty"` + + // A quota block as defined below. + Quota *SpringCloudContainerDeploymentQuotaInitParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // The name of the registry that contains the container image. + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type SpringCloudContainerDeploymentObservation struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Container Deployment. + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // Specifies the arguments to the entrypoint. The docker image's CMD is used if not specified. + Arguments []*string `json:"arguments,omitempty" tf:"arguments,omitempty"` + + // Specifies the entrypoint array. It will not be executed within a shell. The docker image's ENTRYPOINT is used if not specified. + Commands []*string `json:"commands,omitempty" tf:"commands,omitempty"` + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // The ID of the Spring Cloud Container Deployment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Container image of the custom container. This should be in the form of : without the server name of the registry. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Specifies the language framework of the container image. The only possible value is springboot. + LanguageFramework *string `json:"languageFramework,omitempty" tf:"language_framework,omitempty"` + + // A quota block as defined below. + Quota *SpringCloudContainerDeploymentQuotaObservation `json:"quota,omitempty" tf:"quota,omitempty"` + + // The name of the registry that contains the container image. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Container Deployment to be created. + SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` +} + +type SpringCloudContainerDeploymentParameters struct { + + // A JSON object that contains the addon configurations of the Spring Cloud Container Deployment. + // +kubebuilder:validation:Optional + AddonJSON *string `json:"addonJson,omitempty" tf:"addon_json,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + // +kubebuilder:validation:Optional + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // Specifies the arguments to the entrypoint. The docker image's CMD is used if not specified. + // +kubebuilder:validation:Optional + Arguments []*string `json:"arguments,omitempty" tf:"arguments,omitempty"` + + // Specifies the entrypoint array. It will not be executed within a shell. The docker image's ENTRYPOINT is used if not specified. + // +kubebuilder:validation:Optional + Commands []*string `json:"commands,omitempty" tf:"commands,omitempty"` + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Container image of the custom container. This should be in the form of : without the server name of the registry. + // +kubebuilder:validation:Optional + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Specifies the language framework of the container image. The only possible value is springboot. + // +kubebuilder:validation:Optional + LanguageFramework *string `json:"languageFramework,omitempty" tf:"language_framework,omitempty"` + + // A quota block as defined below. + // +kubebuilder:validation:Optional + Quota *SpringCloudContainerDeploymentQuotaParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // The name of the registry that contains the container image. + // +kubebuilder:validation:Optional + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Container Deployment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` + + // Reference to a SpringCloudApp in appplatform to populate springCloudAppId. + // +kubebuilder:validation:Optional + SpringCloudAppIDRef *v1.Reference `json:"springCloudAppIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudApp in appplatform to populate springCloudAppId. + // +kubebuilder:validation:Optional + SpringCloudAppIDSelector *v1.Selector `json:"springCloudAppIdSelector,omitempty" tf:"-"` +} + +type SpringCloudContainerDeploymentQuotaInitParameters struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpringCloudContainerDeploymentQuotaObservation struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpringCloudContainerDeploymentQuotaParameters struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + // +kubebuilder:validation:Optional + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +// SpringCloudContainerDeploymentSpec defines the desired state of SpringCloudContainerDeployment +type SpringCloudContainerDeploymentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudContainerDeploymentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudContainerDeploymentInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudContainerDeploymentStatus defines the observed state of SpringCloudContainerDeployment. +type SpringCloudContainerDeploymentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudContainerDeploymentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudContainerDeployment is the Schema for the SpringCloudContainerDeployments API. Manages a Spring Cloud Container Deployment. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudContainerDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.image) || (has(self.initProvider) && has(self.initProvider.image))",message="spec.forProvider.image is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.server) || (has(self.initProvider) && has(self.initProvider.server))",message="spec.forProvider.server is a required parameter" + Spec SpringCloudContainerDeploymentSpec `json:"spec"` + Status SpringCloudContainerDeploymentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudContainerDeploymentList contains a list of SpringCloudContainerDeployments +type SpringCloudContainerDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudContainerDeployment `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudContainerDeployment_Kind = "SpringCloudContainerDeployment" + SpringCloudContainerDeployment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudContainerDeployment_Kind}.String() + SpringCloudContainerDeployment_KindAPIVersion = SpringCloudContainerDeployment_Kind + "." + CRDGroupVersion.String() + SpringCloudContainerDeployment_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudContainerDeployment_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudContainerDeployment{}, &SpringCloudContainerDeploymentList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudcustomizedaccelerator_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudcustomizedaccelerator_terraformed.go new file mode 100755 index 000000000..a29d46a15 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudcustomizedaccelerator_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudCustomizedAccelerator +func (mg *SpringCloudCustomizedAccelerator) GetTerraformResourceType() string { + return "azurerm_spring_cloud_customized_accelerator" +} + +// GetConnectionDetailsMapping for this SpringCloudCustomizedAccelerator +func (tr *SpringCloudCustomizedAccelerator) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"git_repository[*].basic_auth[*].password": "spec.forProvider.gitRepository[*].basicAuth[*].passwordSecretRef", "git_repository[*].ssh_auth[*].host_key": "spec.forProvider.gitRepository[*].sshAuth[*].hostKeySecretRef", "git_repository[*].ssh_auth[*].private_key": "spec.forProvider.gitRepository[*].sshAuth[*].privateKeySecretRef"} +} + +// GetObservation of this SpringCloudCustomizedAccelerator +func (tr *SpringCloudCustomizedAccelerator) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudCustomizedAccelerator +func (tr *SpringCloudCustomizedAccelerator) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudCustomizedAccelerator +func (tr *SpringCloudCustomizedAccelerator) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudCustomizedAccelerator +func (tr *SpringCloudCustomizedAccelerator) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudCustomizedAccelerator +func (tr *SpringCloudCustomizedAccelerator) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudCustomizedAccelerator +func (tr *SpringCloudCustomizedAccelerator) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudCustomizedAccelerator +func (tr *SpringCloudCustomizedAccelerator) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudCustomizedAccelerator using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudCustomizedAccelerator) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudCustomizedAcceleratorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudCustomizedAccelerator) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudcustomizedaccelerator_types.go b/apis/appplatform/v1beta2/zz_springcloudcustomizedaccelerator_types.go new file mode 100755 index 000000000..f8ff86e00 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudcustomizedaccelerator_types.go @@ -0,0 +1,313 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BasicAuthInitParameters struct { + + // Specifies the username of git repository basic auth. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type BasicAuthObservation struct { + + // Specifies the username of git repository basic auth. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type BasicAuthParameters struct { + + // Specifies the password of git repository basic auth. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Specifies the username of git repository basic auth. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type GitRepositoryInitParameters struct { + + // A basic_auth block as defined below. Conflicts with git_repository[0].ssh_auth. Changing this forces a new Spring Cloud Customized Accelerator to be created. + BasicAuth *BasicAuthInitParameters `json:"basicAuth,omitempty" tf:"basic_auth,omitempty"` + + // Specifies the Git repository branch to be used. + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // Specifies the ID of the CA Spring Cloud Certificate for https URL of Git repository. + CACertificateID *string `json:"caCertificateId,omitempty" tf:"ca_certificate_id,omitempty"` + + // Specifies the Git repository commit to be used. + Commit *string `json:"commit,omitempty" tf:"commit,omitempty"` + + // Specifies the Git repository tag to be used. + GitTag *string `json:"gitTag,omitempty" tf:"git_tag,omitempty"` + + // Specifies the interval for checking for updates to Git or image repository. It should be greater than 10. + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the path under the git repository to be treated as the root directory of the accelerator or the fragment (depending on accelerator_type). + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A ssh_auth block as defined below. Conflicts with git_repository[0].basic_auth. Changing this forces a new Spring Cloud Customized Accelerator to be created. + SSHAuth *SSHAuthInitParameters `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // Specifies Git repository URL for the accelerator. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type GitRepositoryObservation struct { + + // A basic_auth block as defined below. Conflicts with git_repository[0].ssh_auth. Changing this forces a new Spring Cloud Customized Accelerator to be created. + BasicAuth *BasicAuthObservation `json:"basicAuth,omitempty" tf:"basic_auth,omitempty"` + + // Specifies the Git repository branch to be used. + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // Specifies the ID of the CA Spring Cloud Certificate for https URL of Git repository. + CACertificateID *string `json:"caCertificateId,omitempty" tf:"ca_certificate_id,omitempty"` + + // Specifies the Git repository commit to be used. + Commit *string `json:"commit,omitempty" tf:"commit,omitempty"` + + // Specifies the Git repository tag to be used. + GitTag *string `json:"gitTag,omitempty" tf:"git_tag,omitempty"` + + // Specifies the interval for checking for updates to Git or image repository. It should be greater than 10. + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the path under the git repository to be treated as the root directory of the accelerator or the fragment (depending on accelerator_type). + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A ssh_auth block as defined below. Conflicts with git_repository[0].basic_auth. Changing this forces a new Spring Cloud Customized Accelerator to be created. + SSHAuth *SSHAuthObservation `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // Specifies Git repository URL for the accelerator. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type GitRepositoryParameters struct { + + // A basic_auth block as defined below. Conflicts with git_repository[0].ssh_auth. Changing this forces a new Spring Cloud Customized Accelerator to be created. + // +kubebuilder:validation:Optional + BasicAuth *BasicAuthParameters `json:"basicAuth,omitempty" tf:"basic_auth,omitempty"` + + // Specifies the Git repository branch to be used. + // +kubebuilder:validation:Optional + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // Specifies the ID of the CA Spring Cloud Certificate for https URL of Git repository. + // +kubebuilder:validation:Optional + CACertificateID *string `json:"caCertificateId,omitempty" tf:"ca_certificate_id,omitempty"` + + // Specifies the Git repository commit to be used. + // +kubebuilder:validation:Optional + Commit *string `json:"commit,omitempty" tf:"commit,omitempty"` + + // Specifies the Git repository tag to be used. + // +kubebuilder:validation:Optional + GitTag *string `json:"gitTag,omitempty" tf:"git_tag,omitempty"` + + // Specifies the interval for checking for updates to Git or image repository. It should be greater than 10. + // +kubebuilder:validation:Optional + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the path under the git repository to be treated as the root directory of the accelerator or the fragment (depending on accelerator_type). + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A ssh_auth block as defined below. Conflicts with git_repository[0].basic_auth. Changing this forces a new Spring Cloud Customized Accelerator to be created. + // +kubebuilder:validation:Optional + SSHAuth *SSHAuthParameters `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // Specifies Git repository URL for the accelerator. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type SSHAuthInitParameters struct { + + // Specifies the SSH Key algorithm of git repository basic auth. + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` +} + +type SSHAuthObservation struct { + + // Specifies the SSH Key algorithm of git repository basic auth. + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` +} + +type SSHAuthParameters struct { + + // Specifies the SSH Key algorithm of git repository basic auth. + // +kubebuilder:validation:Optional + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` + + // Specifies the Public SSH Key of git repository basic auth. + // +kubebuilder:validation:Optional + HostKeySecretRef *v1.SecretKeySelector `json:"hostKeySecretRef,omitempty" tf:"-"` + + // Specifies the Private SSH Key of git repository basic auth. + // +kubebuilder:validation:Required + PrivateKeySecretRef v1.SecretKeySelector `json:"privateKeySecretRef" tf:"-"` +} + +type SpringCloudCustomizedAcceleratorInitParameters struct { + + // Specifies a list of accelerator tags. + AcceleratorTags []*string `json:"acceleratorTags,omitempty" tf:"accelerator_tags,omitempty"` + + // Specifies the type of the Spring Cloud Customized Accelerator. Possible values are Accelerator and Fragment. Defaults to Accelerator. + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // Specifies the description of the Spring Cloud Customized Accelerator. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the display name of the Spring Cloud Customized Accelerator.. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // A git_repository block as defined below. + GitRepository *GitRepositoryInitParameters `json:"gitRepository,omitempty" tf:"git_repository,omitempty"` + + // Specifies the icon URL of the Spring Cloud Customized Accelerator.. + IconURL *string `json:"iconUrl,omitempty" tf:"icon_url,omitempty"` +} + +type SpringCloudCustomizedAcceleratorObservation struct { + + // Specifies a list of accelerator tags. + AcceleratorTags []*string `json:"acceleratorTags,omitempty" tf:"accelerator_tags,omitempty"` + + // Specifies the type of the Spring Cloud Customized Accelerator. Possible values are Accelerator and Fragment. Defaults to Accelerator. + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // Specifies the description of the Spring Cloud Customized Accelerator. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the display name of the Spring Cloud Customized Accelerator.. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // A git_repository block as defined below. + GitRepository *GitRepositoryObservation `json:"gitRepository,omitempty" tf:"git_repository,omitempty"` + + // The ID of the Spring Cloud Customized Accelerator. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the icon URL of the Spring Cloud Customized Accelerator.. + IconURL *string `json:"iconUrl,omitempty" tf:"icon_url,omitempty"` + + // The ID of the Spring Cloud Accelerator. Changing this forces a new Spring Cloud Customized Accelerator to be created. + SpringCloudAcceleratorID *string `json:"springCloudAcceleratorId,omitempty" tf:"spring_cloud_accelerator_id,omitempty"` +} + +type SpringCloudCustomizedAcceleratorParameters struct { + + // Specifies a list of accelerator tags. + // +kubebuilder:validation:Optional + AcceleratorTags []*string `json:"acceleratorTags,omitempty" tf:"accelerator_tags,omitempty"` + + // Specifies the type of the Spring Cloud Customized Accelerator. Possible values are Accelerator and Fragment. Defaults to Accelerator. + // +kubebuilder:validation:Optional + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` + + // Specifies the description of the Spring Cloud Customized Accelerator. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the display name of the Spring Cloud Customized Accelerator.. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // A git_repository block as defined below. + // +kubebuilder:validation:Optional + GitRepository *GitRepositoryParameters `json:"gitRepository,omitempty" tf:"git_repository,omitempty"` + + // Specifies the icon URL of the Spring Cloud Customized Accelerator.. + // +kubebuilder:validation:Optional + IconURL *string `json:"iconUrl,omitempty" tf:"icon_url,omitempty"` + + // The ID of the Spring Cloud Accelerator. Changing this forces a new Spring Cloud Customized Accelerator to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudAccelerator + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudAcceleratorID *string `json:"springCloudAcceleratorId,omitempty" tf:"spring_cloud_accelerator_id,omitempty"` + + // Reference to a SpringCloudAccelerator in appplatform to populate springCloudAcceleratorId. + // +kubebuilder:validation:Optional + SpringCloudAcceleratorIDRef *v1.Reference `json:"springCloudAcceleratorIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudAccelerator in appplatform to populate springCloudAcceleratorId. + // +kubebuilder:validation:Optional + SpringCloudAcceleratorIDSelector *v1.Selector `json:"springCloudAcceleratorIdSelector,omitempty" tf:"-"` +} + +// SpringCloudCustomizedAcceleratorSpec defines the desired state of SpringCloudCustomizedAccelerator +type SpringCloudCustomizedAcceleratorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudCustomizedAcceleratorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudCustomizedAcceleratorInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudCustomizedAcceleratorStatus defines the observed state of SpringCloudCustomizedAccelerator. +type SpringCloudCustomizedAcceleratorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudCustomizedAcceleratorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudCustomizedAccelerator is the Schema for the SpringCloudCustomizedAccelerators API. Manages a Spring Cloud Customized Accelerator. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudCustomizedAccelerator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gitRepository) || (has(self.initProvider) && has(self.initProvider.gitRepository))",message="spec.forProvider.gitRepository is a required parameter" + Spec SpringCloudCustomizedAcceleratorSpec `json:"spec"` + Status SpringCloudCustomizedAcceleratorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudCustomizedAcceleratorList contains a list of SpringCloudCustomizedAccelerators +type SpringCloudCustomizedAcceleratorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudCustomizedAccelerator `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudCustomizedAccelerator_Kind = "SpringCloudCustomizedAccelerator" + SpringCloudCustomizedAccelerator_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudCustomizedAccelerator_Kind}.String() + SpringCloudCustomizedAccelerator_KindAPIVersion = SpringCloudCustomizedAccelerator_Kind + "." + CRDGroupVersion.String() + SpringCloudCustomizedAccelerator_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudCustomizedAccelerator_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudCustomizedAccelerator{}, &SpringCloudCustomizedAcceleratorList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springclouddevtoolportal_terraformed.go b/apis/appplatform/v1beta2/zz_springclouddevtoolportal_terraformed.go new file mode 100755 index 000000000..7df35c2aa --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springclouddevtoolportal_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudDevToolPortal +func (mg *SpringCloudDevToolPortal) GetTerraformResourceType() string { + return "azurerm_spring_cloud_dev_tool_portal" +} + +// GetConnectionDetailsMapping for this SpringCloudDevToolPortal +func (tr *SpringCloudDevToolPortal) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpringCloudDevToolPortal +func (tr *SpringCloudDevToolPortal) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudDevToolPortal +func (tr *SpringCloudDevToolPortal) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudDevToolPortal +func (tr *SpringCloudDevToolPortal) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudDevToolPortal +func (tr *SpringCloudDevToolPortal) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudDevToolPortal +func (tr *SpringCloudDevToolPortal) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudDevToolPortal +func (tr *SpringCloudDevToolPortal) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudDevToolPortal +func (tr *SpringCloudDevToolPortal) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudDevToolPortal using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudDevToolPortal) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudDevToolPortalParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudDevToolPortal) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/appplatform/v1beta2/zz_springclouddevtoolportal_types.go b/apis/appplatform/v1beta2/zz_springclouddevtoolportal_types.go new file mode 100755 index 000000000..27b5f17ab --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springclouddevtoolportal_types.go @@ -0,0 +1,219 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SpringCloudDevToolPortalInitParameters struct { + + // Should the Accelerator plugin be enabled? + ApplicationAcceleratorEnabled *bool `json:"applicationAcceleratorEnabled,omitempty" tf:"application_accelerator_enabled,omitempty"` + + // Should the Application Live View be enabled? + ApplicationLiveViewEnabled *bool `json:"applicationLiveViewEnabled,omitempty" tf:"application_live_view_enabled,omitempty"` + + // The name which should be used for this Spring Cloud Dev Tool Portal. The only possible value is default. Changing this forces a new Spring Cloud Dev Tool Portal to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is public network access enabled? + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Dev Tool Portal to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // Reference to a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDRef *v1.Reference `json:"springCloudServiceIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDSelector *v1.Selector `json:"springCloudServiceIdSelector,omitempty" tf:"-"` + + // A sso block as defined below. + Sso *SpringCloudDevToolPortalSsoInitParameters `json:"sso,omitempty" tf:"sso,omitempty"` +} + +type SpringCloudDevToolPortalObservation struct { + + // Should the Accelerator plugin be enabled? + ApplicationAcceleratorEnabled *bool `json:"applicationAcceleratorEnabled,omitempty" tf:"application_accelerator_enabled,omitempty"` + + // Should the Application Live View be enabled? + ApplicationLiveViewEnabled *bool `json:"applicationLiveViewEnabled,omitempty" tf:"application_live_view_enabled,omitempty"` + + // The ID of the Spring Cloud Dev Tool Portal. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name which should be used for this Spring Cloud Dev Tool Portal. The only possible value is default. Changing this forces a new Spring Cloud Dev Tool Portal to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is public network access enabled? + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Dev Tool Portal to be created. + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // A sso block as defined below. + Sso *SpringCloudDevToolPortalSsoObservation `json:"sso,omitempty" tf:"sso,omitempty"` +} + +type SpringCloudDevToolPortalParameters struct { + + // Should the Accelerator plugin be enabled? + // +kubebuilder:validation:Optional + ApplicationAcceleratorEnabled *bool `json:"applicationAcceleratorEnabled,omitempty" tf:"application_accelerator_enabled,omitempty"` + + // Should the Application Live View be enabled? + // +kubebuilder:validation:Optional + ApplicationLiveViewEnabled *bool `json:"applicationLiveViewEnabled,omitempty" tf:"application_live_view_enabled,omitempty"` + + // The name which should be used for this Spring Cloud Dev Tool Portal. The only possible value is default. Changing this forces a new Spring Cloud Dev Tool Portal to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is public network access enabled? + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Dev Tool Portal to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // Reference to a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDRef *v1.Reference `json:"springCloudServiceIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDSelector *v1.Selector `json:"springCloudServiceIdSelector,omitempty" tf:"-"` + + // A sso block as defined below. + // +kubebuilder:validation:Optional + Sso *SpringCloudDevToolPortalSsoParameters `json:"sso,omitempty" tf:"sso,omitempty"` +} + +type SpringCloudDevToolPortalSsoInitParameters struct { + + // Specifies the public identifier for the application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Specifies the secret known only to the application and the authorization server. + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // Specifies the URI of a JSON file with generic OIDC provider configuration. + MetadataURL *string `json:"metadataUrl,omitempty" tf:"metadata_url,omitempty"` + + // Specifies a list of specific actions applications can be allowed to do on a user's behalf. + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type SpringCloudDevToolPortalSsoObservation struct { + + // Specifies the public identifier for the application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Specifies the secret known only to the application and the authorization server. + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // Specifies the URI of a JSON file with generic OIDC provider configuration. + MetadataURL *string `json:"metadataUrl,omitempty" tf:"metadata_url,omitempty"` + + // Specifies a list of specific actions applications can be allowed to do on a user's behalf. + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type SpringCloudDevToolPortalSsoParameters struct { + + // Specifies the public identifier for the application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Specifies the secret known only to the application and the authorization server. + // +kubebuilder:validation:Optional + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // Specifies the URI of a JSON file with generic OIDC provider configuration. + // +kubebuilder:validation:Optional + MetadataURL *string `json:"metadataUrl,omitempty" tf:"metadata_url,omitempty"` + + // Specifies a list of specific actions applications can be allowed to do on a user's behalf. + // +kubebuilder:validation:Optional + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +// SpringCloudDevToolPortalSpec defines the desired state of SpringCloudDevToolPortal +type SpringCloudDevToolPortalSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudDevToolPortalParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudDevToolPortalInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudDevToolPortalStatus defines the observed state of SpringCloudDevToolPortal. +type SpringCloudDevToolPortalStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudDevToolPortalObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudDevToolPortal is the Schema for the SpringCloudDevToolPortals API. Manages a Spring Cloud Dev Tool Portal. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudDevToolPortal struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec SpringCloudDevToolPortalSpec `json:"spec"` + Status SpringCloudDevToolPortalStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudDevToolPortalList contains a list of SpringCloudDevToolPortals +type SpringCloudDevToolPortalList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudDevToolPortal `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudDevToolPortal_Kind = "SpringCloudDevToolPortal" + SpringCloudDevToolPortal_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudDevToolPortal_Kind}.String() + SpringCloudDevToolPortal_KindAPIVersion = SpringCloudDevToolPortal_Kind + "." + CRDGroupVersion.String() + SpringCloudDevToolPortal_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudDevToolPortal_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudDevToolPortal{}, &SpringCloudDevToolPortalList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudgateway_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudgateway_terraformed.go new file mode 100755 index 000000000..e0e4f2194 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudgateway_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudGateway +func (mg *SpringCloudGateway) GetTerraformResourceType() string { + return "azurerm_spring_cloud_gateway" +} + +// GetConnectionDetailsMapping for this SpringCloudGateway +func (tr *SpringCloudGateway) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"sensitive_environment_variables": "spec.forProvider.sensitiveEnvironmentVariablesSecretRef"} +} + +// GetObservation of this SpringCloudGateway +func (tr *SpringCloudGateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudGateway +func (tr *SpringCloudGateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudGateway +func (tr *SpringCloudGateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudGateway +func (tr *SpringCloudGateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudGateway +func (tr *SpringCloudGateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudGateway +func (tr *SpringCloudGateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudGateway +func (tr *SpringCloudGateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudGateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudGateway) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudGatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudGateway) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudgateway_types.go b/apis/appplatform/v1beta2/zz_springcloudgateway_types.go new file mode 100755 index 000000000..4144e737b --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudgateway_types.go @@ -0,0 +1,564 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APIMetadataInitParameters struct { + + // Detailed description of the APIs available on the Gateway instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Location of additional documentation for the APIs available on the Gateway instance. + DocumentationURL *string `json:"documentationUrl,omitempty" tf:"documentation_url,omitempty"` + + // Base URL that API consumers will use to access APIs on the Gateway instance. + ServerURL *string `json:"serverUrl,omitempty" tf:"server_url,omitempty"` + + // Specifies the title describing the context of the APIs available on the Gateway instance. + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // Specifies the version of APIs available on this Gateway instance. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type APIMetadataObservation struct { + + // Detailed description of the APIs available on the Gateway instance. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Location of additional documentation for the APIs available on the Gateway instance. + DocumentationURL *string `json:"documentationUrl,omitempty" tf:"documentation_url,omitempty"` + + // Base URL that API consumers will use to access APIs on the Gateway instance. + ServerURL *string `json:"serverUrl,omitempty" tf:"server_url,omitempty"` + + // Specifies the title describing the context of the APIs available on the Gateway instance. + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // Specifies the version of APIs available on this Gateway instance. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type APIMetadataParameters struct { + + // Detailed description of the APIs available on the Gateway instance. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Location of additional documentation for the APIs available on the Gateway instance. + // +kubebuilder:validation:Optional + DocumentationURL *string `json:"documentationUrl,omitempty" tf:"documentation_url,omitempty"` + + // Base URL that API consumers will use to access APIs on the Gateway instance. + // +kubebuilder:validation:Optional + ServerURL *string `json:"serverUrl,omitempty" tf:"server_url,omitempty"` + + // Specifies the title describing the context of the APIs available on the Gateway instance. + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // Specifies the version of APIs available on this Gateway instance. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ClientAuthorizationInitParameters struct { + + // Specifies the Spring Cloud Certificate IDs of the Spring Cloud Gateway. + CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` + + // Specifies whether the client certificate verification is enabled. + VerificationEnabled *bool `json:"verificationEnabled,omitempty" tf:"verification_enabled,omitempty"` +} + +type ClientAuthorizationObservation struct { + + // Specifies the Spring Cloud Certificate IDs of the Spring Cloud Gateway. + CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` + + // Specifies whether the client certificate verification is enabled. + VerificationEnabled *bool `json:"verificationEnabled,omitempty" tf:"verification_enabled,omitempty"` +} + +type ClientAuthorizationParameters struct { + + // Specifies the Spring Cloud Certificate IDs of the Spring Cloud Gateway. + // +kubebuilder:validation:Optional + CertificateIds []*string `json:"certificateIds,omitempty" tf:"certificate_ids,omitempty"` + + // Specifies whether the client certificate verification is enabled. + // +kubebuilder:validation:Optional + VerificationEnabled *bool `json:"verificationEnabled,omitempty" tf:"verification_enabled,omitempty"` +} + +type CorsInitParameters struct { + + // Allowed headers in cross-site requests. The special value * allows actual requests to send any header. + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // Allowed HTTP methods on cross-site requests. The special value * allows all methods. If not set, GET and HEAD are allowed by default. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS and PUT. + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Allowed origin patterns to make cross-site requests. + // +listType=set + AllowedOriginPatterns []*string `json:"allowedOriginPatterns,omitempty" tf:"allowed_origin_patterns,omitempty"` + + // Allowed origins to make cross-site requests. The special value * allows all domains. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // is user credentials are supported on cross-site requests? + CredentialsAllowed *bool `json:"credentialsAllowed,omitempty" tf:"credentials_allowed,omitempty"` + + // HTTP response headers to expose for cross-site requests. + // +listType=set + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // How long, in seconds, the response from a pre-flight request can be cached by clients. + MaxAgeSeconds *float64 `json:"maxAgeSeconds,omitempty" tf:"max_age_seconds,omitempty"` +} + +type CorsObservation struct { + + // Allowed headers in cross-site requests. The special value * allows actual requests to send any header. + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // Allowed HTTP methods on cross-site requests. The special value * allows all methods. If not set, GET and HEAD are allowed by default. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS and PUT. + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Allowed origin patterns to make cross-site requests. + // +listType=set + AllowedOriginPatterns []*string `json:"allowedOriginPatterns,omitempty" tf:"allowed_origin_patterns,omitempty"` + + // Allowed origins to make cross-site requests. The special value * allows all domains. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // is user credentials are supported on cross-site requests? + CredentialsAllowed *bool `json:"credentialsAllowed,omitempty" tf:"credentials_allowed,omitempty"` + + // HTTP response headers to expose for cross-site requests. + // +listType=set + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // How long, in seconds, the response from a pre-flight request can be cached by clients. + MaxAgeSeconds *float64 `json:"maxAgeSeconds,omitempty" tf:"max_age_seconds,omitempty"` +} + +type CorsParameters struct { + + // Allowed headers in cross-site requests. The special value * allows actual requests to send any header. + // +kubebuilder:validation:Optional + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // Allowed HTTP methods on cross-site requests. The special value * allows all methods. If not set, GET and HEAD are allowed by default. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS and PUT. + // +kubebuilder:validation:Optional + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // Allowed origin patterns to make cross-site requests. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOriginPatterns []*string `json:"allowedOriginPatterns,omitempty" tf:"allowed_origin_patterns,omitempty"` + + // Allowed origins to make cross-site requests. The special value * allows all domains. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // is user credentials are supported on cross-site requests? + // +kubebuilder:validation:Optional + CredentialsAllowed *bool `json:"credentialsAllowed,omitempty" tf:"credentials_allowed,omitempty"` + + // HTTP response headers to expose for cross-site requests. + // +kubebuilder:validation:Optional + // +listType=set + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // How long, in seconds, the response from a pre-flight request can be cached by clients. + // +kubebuilder:validation:Optional + MaxAgeSeconds *float64 `json:"maxAgeSeconds,omitempty" tf:"max_age_seconds,omitempty"` +} + +type LocalResponseCachePerInstanceInitParameters struct { + + // Specifies the maximum size of cache (10MB, 900KB, 1GB...) to determine if the cache needs to evict some entries. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Specifies the time before a cached entry is expired (300s, 5m, 1h...). + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type LocalResponseCachePerInstanceObservation struct { + + // Specifies the maximum size of cache (10MB, 900KB, 1GB...) to determine if the cache needs to evict some entries. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Specifies the time before a cached entry is expired (300s, 5m, 1h...). + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type LocalResponseCachePerInstanceParameters struct { + + // Specifies the maximum size of cache (10MB, 900KB, 1GB...) to determine if the cache needs to evict some entries. + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Specifies the time before a cached entry is expired (300s, 5m, 1h...). + // +kubebuilder:validation:Optional + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type LocalResponseCachePerRouteInitParameters struct { + + // Specifies the maximum size of cache (10MB, 900KB, 1GB...) to determine if the cache needs to evict some entries. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Specifies the time before a cached entry is expired (300s, 5m, 1h...). + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type LocalResponseCachePerRouteObservation struct { + + // Specifies the maximum size of cache (10MB, 900KB, 1GB...) to determine if the cache needs to evict some entries. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Specifies the time before a cached entry is expired (300s, 5m, 1h...). + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type LocalResponseCachePerRouteParameters struct { + + // Specifies the maximum size of cache (10MB, 900KB, 1GB...) to determine if the cache needs to evict some entries. + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Specifies the time before a cached entry is expired (300s, 5m, 1h...). + // +kubebuilder:validation:Optional + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type SpringCloudGatewayInitParameters struct { + + // A api_metadata block as defined below. + APIMetadata *APIMetadataInitParameters `json:"apiMetadata,omitempty" tf:"api_metadata,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // Specifies a list of application performance monitoring types used in the Spring Cloud Gateway. The allowed values are AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and NewRelic. + ApplicationPerformanceMonitoringTypes []*string `json:"applicationPerformanceMonitoringTypes,omitempty" tf:"application_performance_monitoring_types,omitempty"` + + // A client_authorization block as defined below. + ClientAuthorization *ClientAuthorizationInitParameters `json:"clientAuthorization,omitempty" tf:"client_authorization,omitempty"` + + // A cors block as defined below. + Cors *CorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies the environment variables of the Spring Cloud Gateway as a map of key-value pairs. Changing this forces a new resource to be created. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // is only https is allowed? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // Specifies the required instance count of the Spring Cloud Gateway. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // A local_response_cache_per_instance block as defined below. Only one of local_response_cache_per_instance or local_response_cache_per_route can be specified. + LocalResponseCachePerInstance *LocalResponseCachePerInstanceInitParameters `json:"localResponseCachePerInstance,omitempty" tf:"local_response_cache_per_instance,omitempty"` + + // A local_response_cache_per_route block as defined below. Only one of local_response_cache_per_instance or local_response_cache_per_route can be specified. + LocalResponseCachePerRoute *LocalResponseCachePerRouteInitParameters `json:"localResponseCachePerRoute,omitempty" tf:"local_response_cache_per_route,omitempty"` + + // Indicates whether the Spring Cloud Gateway exposes endpoint. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A quota block as defined below. + Quota *SpringCloudGatewayQuotaInitParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // A sso block as defined below. + Sso *SpringCloudGatewaySsoInitParameters `json:"sso,omitempty" tf:"sso,omitempty"` +} + +type SpringCloudGatewayObservation struct { + + // A api_metadata block as defined below. + APIMetadata *APIMetadataObservation `json:"apiMetadata,omitempty" tf:"api_metadata,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // Specifies a list of application performance monitoring types used in the Spring Cloud Gateway. The allowed values are AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and NewRelic. + ApplicationPerformanceMonitoringTypes []*string `json:"applicationPerformanceMonitoringTypes,omitempty" tf:"application_performance_monitoring_types,omitempty"` + + // A client_authorization block as defined below. + ClientAuthorization *ClientAuthorizationObservation `json:"clientAuthorization,omitempty" tf:"client_authorization,omitempty"` + + // A cors block as defined below. + Cors *CorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies the environment variables of the Spring Cloud Gateway as a map of key-value pairs. Changing this forces a new resource to be created. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // is only https is allowed? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the Spring Cloud Gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the required instance count of the Spring Cloud Gateway. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // A local_response_cache_per_instance block as defined below. Only one of local_response_cache_per_instance or local_response_cache_per_route can be specified. + LocalResponseCachePerInstance *LocalResponseCachePerInstanceObservation `json:"localResponseCachePerInstance,omitempty" tf:"local_response_cache_per_instance,omitempty"` + + // A local_response_cache_per_route block as defined below. Only one of local_response_cache_per_instance or local_response_cache_per_route can be specified. + LocalResponseCachePerRoute *LocalResponseCachePerRouteObservation `json:"localResponseCachePerRoute,omitempty" tf:"local_response_cache_per_route,omitempty"` + + // Indicates whether the Spring Cloud Gateway exposes endpoint. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A quota block as defined below. + Quota *SpringCloudGatewayQuotaObservation `json:"quota,omitempty" tf:"quota,omitempty"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Gateway to be created. + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // A sso block as defined below. + Sso *SpringCloudGatewaySsoObservation `json:"sso,omitempty" tf:"sso,omitempty"` + + // URL of the Spring Cloud Gateway, exposed when 'public_network_access_enabled' is true. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SpringCloudGatewayParameters struct { + + // A api_metadata block as defined below. + // +kubebuilder:validation:Optional + APIMetadata *APIMetadataParameters `json:"apiMetadata,omitempty" tf:"api_metadata,omitempty"` + + // Specifies a list of Spring Cloud Application Performance Monitoring IDs. + // +kubebuilder:validation:Optional + ApplicationPerformanceMonitoringIds []*string `json:"applicationPerformanceMonitoringIds,omitempty" tf:"application_performance_monitoring_ids,omitempty"` + + // Specifies a list of application performance monitoring types used in the Spring Cloud Gateway. The allowed values are AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and NewRelic. + // +kubebuilder:validation:Optional + ApplicationPerformanceMonitoringTypes []*string `json:"applicationPerformanceMonitoringTypes,omitempty" tf:"application_performance_monitoring_types,omitempty"` + + // A client_authorization block as defined below. + // +kubebuilder:validation:Optional + ClientAuthorization *ClientAuthorizationParameters `json:"clientAuthorization,omitempty" tf:"client_authorization,omitempty"` + + // A cors block as defined below. + // +kubebuilder:validation:Optional + Cors *CorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies the environment variables of the Spring Cloud Gateway as a map of key-value pairs. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // is only https is allowed? + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // Specifies the required instance count of the Spring Cloud Gateway. Possible Values are between 1 and 500. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // A local_response_cache_per_instance block as defined below. Only one of local_response_cache_per_instance or local_response_cache_per_route can be specified. + // +kubebuilder:validation:Optional + LocalResponseCachePerInstance *LocalResponseCachePerInstanceParameters `json:"localResponseCachePerInstance,omitempty" tf:"local_response_cache_per_instance,omitempty"` + + // A local_response_cache_per_route block as defined below. Only one of local_response_cache_per_instance or local_response_cache_per_route can be specified. + // +kubebuilder:validation:Optional + LocalResponseCachePerRoute *LocalResponseCachePerRouteParameters `json:"localResponseCachePerRoute,omitempty" tf:"local_response_cache_per_route,omitempty"` + + // Indicates whether the Spring Cloud Gateway exposes endpoint. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A quota block as defined below. + // +kubebuilder:validation:Optional + Quota *SpringCloudGatewayQuotaParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // Specifies the sensitive environment variables of the Spring Cloud Gateway as a map of key-value pairs. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SensitiveEnvironmentVariablesSecretRef *v1.SecretReference `json:"sensitiveEnvironmentVariablesSecretRef,omitempty" tf:"-"` + + // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Gateway to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` + + // Reference to a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDRef *v1.Reference `json:"springCloudServiceIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudService in appplatform to populate springCloudServiceId. + // +kubebuilder:validation:Optional + SpringCloudServiceIDSelector *v1.Selector `json:"springCloudServiceIdSelector,omitempty" tf:"-"` + + // A sso block as defined below. + // +kubebuilder:validation:Optional + Sso *SpringCloudGatewaySsoParameters `json:"sso,omitempty" tf:"sso,omitempty"` +} + +type SpringCloudGatewayQuotaInitParameters struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 2Gi if not specified. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpringCloudGatewayQuotaObservation struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 2Gi if not specified. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpringCloudGatewayQuotaParameters struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 2Gi if not specified. + // +kubebuilder:validation:Optional + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpringCloudGatewaySsoInitParameters struct { + + // The public identifier for the application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The secret known only to the application and the authorization server. + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // The URI of Issuer Identifier. + IssuerURI *string `json:"issuerUri,omitempty" tf:"issuer_uri,omitempty"` + + // It defines the specific actions applications can be allowed to do on a user's behalf. + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type SpringCloudGatewaySsoObservation struct { + + // The public identifier for the application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The secret known only to the application and the authorization server. + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // The URI of Issuer Identifier. + IssuerURI *string `json:"issuerUri,omitempty" tf:"issuer_uri,omitempty"` + + // It defines the specific actions applications can be allowed to do on a user's behalf. + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +type SpringCloudGatewaySsoParameters struct { + + // The public identifier for the application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The secret known only to the application and the authorization server. + // +kubebuilder:validation:Optional + ClientSecret *string `json:"clientSecret,omitempty" tf:"client_secret,omitempty"` + + // The URI of Issuer Identifier. + // +kubebuilder:validation:Optional + IssuerURI *string `json:"issuerUri,omitempty" tf:"issuer_uri,omitempty"` + + // It defines the specific actions applications can be allowed to do on a user's behalf. + // +kubebuilder:validation:Optional + // +listType=set + Scope []*string `json:"scope,omitempty" tf:"scope,omitempty"` +} + +// SpringCloudGatewaySpec defines the desired state of SpringCloudGateway +type SpringCloudGatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudGatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudGatewayInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudGatewayStatus defines the observed state of SpringCloudGateway. +type SpringCloudGatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudGatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudGateway is the Schema for the SpringCloudGateways API. Manages a Spring Cloud Gateway. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SpringCloudGatewaySpec `json:"spec"` + Status SpringCloudGatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudGatewayList contains a list of SpringCloudGateways +type SpringCloudGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudGateway `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudGateway_Kind = "SpringCloudGateway" + SpringCloudGateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudGateway_Kind}.String() + SpringCloudGateway_KindAPIVersion = SpringCloudGateway_Kind + "." + CRDGroupVersion.String() + SpringCloudGateway_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudGateway_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudGateway{}, &SpringCloudGatewayList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudjavadeployment_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudjavadeployment_terraformed.go new file mode 100755 index 000000000..89f92a1a0 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudjavadeployment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudJavaDeployment +func (mg *SpringCloudJavaDeployment) GetTerraformResourceType() string { + return "azurerm_spring_cloud_java_deployment" +} + +// GetConnectionDetailsMapping for this SpringCloudJavaDeployment +func (tr *SpringCloudJavaDeployment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SpringCloudJavaDeployment +func (tr *SpringCloudJavaDeployment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudJavaDeployment +func (tr *SpringCloudJavaDeployment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudJavaDeployment +func (tr *SpringCloudJavaDeployment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudJavaDeployment +func (tr *SpringCloudJavaDeployment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudJavaDeployment +func (tr *SpringCloudJavaDeployment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudJavaDeployment +func (tr *SpringCloudJavaDeployment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudJavaDeployment +func (tr *SpringCloudJavaDeployment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudJavaDeployment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudJavaDeployment) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudJavaDeploymentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudJavaDeployment) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudjavadeployment_types.go b/apis/appplatform/v1beta2/zz_springcloudjavadeployment_types.go new file mode 100755 index 000000000..c9fc0bb5b --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudjavadeployment_types.go @@ -0,0 +1,185 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SpringCloudJavaDeploymentInitParameters struct { + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Specifies the jvm option of the Spring Cloud Deployment. + JvmOptions *string `json:"jvmOptions,omitempty" tf:"jvm_options,omitempty"` + + // A quota block as defined below. + Quota *SpringCloudJavaDeploymentQuotaInitParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // Specifies the runtime version of the Spring Cloud Deployment. Possible Values are Java_8, Java_11 and Java_17. Defaults to Java_8. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` +} + +type SpringCloudJavaDeploymentObservation struct { + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // The ID of the Spring Cloud Deployment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Specifies the jvm option of the Spring Cloud Deployment. + JvmOptions *string `json:"jvmOptions,omitempty" tf:"jvm_options,omitempty"` + + // A quota block as defined below. + Quota *SpringCloudJavaDeploymentQuotaObservation `json:"quota,omitempty" tf:"quota,omitempty"` + + // Specifies the runtime version of the Spring Cloud Deployment. Possible Values are Java_8, Java_11 and Java_17. Defaults to Java_8. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // Specifies the id of the Spring Cloud Application in which to create the Deployment. Changing this forces a new resource to be created. + SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` +} + +type SpringCloudJavaDeploymentParameters struct { + + // Specifies the environment variables of the Spring Cloud Deployment as a map of key-value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + EnvironmentVariables map[string]*string `json:"environmentVariables,omitempty" tf:"environment_variables,omitempty"` + + // Specifies the required instance count of the Spring Cloud Deployment. Possible Values are between 1 and 500. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Specifies the jvm option of the Spring Cloud Deployment. + // +kubebuilder:validation:Optional + JvmOptions *string `json:"jvmOptions,omitempty" tf:"jvm_options,omitempty"` + + // A quota block as defined below. + // +kubebuilder:validation:Optional + Quota *SpringCloudJavaDeploymentQuotaParameters `json:"quota,omitempty" tf:"quota,omitempty"` + + // Specifies the runtime version of the Spring Cloud Deployment. Possible Values are Java_8, Java_11 and Java_17. Defaults to Java_8. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // Specifies the id of the Spring Cloud Application in which to create the Deployment. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudAppID *string `json:"springCloudAppId,omitempty" tf:"spring_cloud_app_id,omitempty"` + + // Reference to a SpringCloudApp in appplatform to populate springCloudAppId. + // +kubebuilder:validation:Optional + SpringCloudAppIDRef *v1.Reference `json:"springCloudAppIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudApp in appplatform to populate springCloudAppId. + // +kubebuilder:validation:Optional + SpringCloudAppIDSelector *v1.Selector `json:"springCloudAppIdSelector,omitempty" tf:"-"` +} + +type SpringCloudJavaDeploymentQuotaInitParameters struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpringCloudJavaDeploymentQuotaObservation struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +type SpringCloudJavaDeploymentQuotaParameters struct { + + // Specifies the required cpu of the Spring Cloud Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults to 1 if not specified. + // +kubebuilder:validation:Optional + CPU *string `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // Specifies the required memory size of the Spring Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + // +kubebuilder:validation:Optional + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` +} + +// SpringCloudJavaDeploymentSpec defines the desired state of SpringCloudJavaDeployment +type SpringCloudJavaDeploymentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudJavaDeploymentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudJavaDeploymentInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudJavaDeploymentStatus defines the observed state of SpringCloudJavaDeployment. +type SpringCloudJavaDeploymentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudJavaDeploymentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudJavaDeployment is the Schema for the SpringCloudJavaDeployments API. Manages an Azure Spring Cloud Deployment with a Java runtime. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudJavaDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SpringCloudJavaDeploymentSpec `json:"spec"` + Status SpringCloudJavaDeploymentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudJavaDeploymentList contains a list of SpringCloudJavaDeployments +type SpringCloudJavaDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudJavaDeployment `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudJavaDeployment_Kind = "SpringCloudJavaDeployment" + SpringCloudJavaDeployment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudJavaDeployment_Kind}.String() + SpringCloudJavaDeployment_KindAPIVersion = SpringCloudJavaDeployment_Kind + "." + CRDGroupVersion.String() + SpringCloudJavaDeployment_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudJavaDeployment_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudJavaDeployment{}, &SpringCloudJavaDeploymentList{}) +} diff --git a/apis/appplatform/v1beta2/zz_springcloudservice_terraformed.go b/apis/appplatform/v1beta2/zz_springcloudservice_terraformed.go new file mode 100755 index 000000000..02e362555 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudService +func (mg *SpringCloudService) GetTerraformResourceType() string { + return "azurerm_spring_cloud_service" +} + +// GetConnectionDetailsMapping for this SpringCloudService +func (tr *SpringCloudService) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"config_server_git_setting[*].http_basic_auth[*].password": "spec.forProvider.configServerGitSetting[*].httpBasicAuth[*].passwordSecretRef", "config_server_git_setting[*].repository[*].http_basic_auth[*].password": "spec.forProvider.configServerGitSetting[*].repository[*].httpBasicAuth[*].passwordSecretRef", "config_server_git_setting[*].repository[*].ssh_auth[*].host_key": "spec.forProvider.configServerGitSetting[*].repository[*].sshAuth[*].hostKeySecretRef", "config_server_git_setting[*].repository[*].ssh_auth[*].private_key": "spec.forProvider.configServerGitSetting[*].repository[*].sshAuth[*].privateKeySecretRef", "config_server_git_setting[*].ssh_auth[*].host_key": "spec.forProvider.configServerGitSetting[*].sshAuth[*].hostKeySecretRef", "config_server_git_setting[*].ssh_auth[*].private_key": "spec.forProvider.configServerGitSetting[*].sshAuth[*].privateKeySecretRef", "container_registry[*].password": "spec.forProvider.containerRegistry[*].passwordSecretRef"} +} + +// GetObservation of this SpringCloudService +func (tr *SpringCloudService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudService +func (tr *SpringCloudService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudService +func (tr *SpringCloudService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudService +func (tr *SpringCloudService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudService +func (tr *SpringCloudService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudService +func (tr *SpringCloudService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudService +func (tr *SpringCloudService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudService) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudService) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/appplatform/v1beta2/zz_springcloudservice_types.go b/apis/appplatform/v1beta2/zz_springcloudservice_types.go new file mode 100755 index 000000000..4cce973c4 --- /dev/null +++ b/apis/appplatform/v1beta2/zz_springcloudservice_types.go @@ -0,0 +1,826 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigServerGitSettingInitParameters struct { + + // A http_basic_auth block as defined below. + HTTPBasicAuth *HTTPBasicAuthInitParameters `json:"httpBasicAuth,omitempty" tf:"http_basic_auth,omitempty"` + + // The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more repository blocks as defined below. + Repository []RepositoryInitParameters `json:"repository,omitempty" tf:"repository,omitempty"` + + // A ssh_auth block as defined below. + SSHAuth *ConfigServerGitSettingSSHAuthInitParameters `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // An array of strings used to search subdirectories of the Git repository. + SearchPaths []*string `json:"searchPaths,omitempty" tf:"search_paths,omitempty"` + + // The URI of the default Git repository used as the Config Server back end, should be started with http://, https://, git@, or ssh://. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type ConfigServerGitSettingObservation struct { + + // A http_basic_auth block as defined below. + HTTPBasicAuth *HTTPBasicAuthObservation `json:"httpBasicAuth,omitempty" tf:"http_basic_auth,omitempty"` + + // The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more repository blocks as defined below. + Repository []RepositoryObservation `json:"repository,omitempty" tf:"repository,omitempty"` + + // A ssh_auth block as defined below. + SSHAuth *ConfigServerGitSettingSSHAuthObservation `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // An array of strings used to search subdirectories of the Git repository. + SearchPaths []*string `json:"searchPaths,omitempty" tf:"search_paths,omitempty"` + + // The URI of the default Git repository used as the Config Server back end, should be started with http://, https://, git@, or ssh://. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type ConfigServerGitSettingParameters struct { + + // A http_basic_auth block as defined below. + // +kubebuilder:validation:Optional + HTTPBasicAuth *HTTPBasicAuthParameters `json:"httpBasicAuth,omitempty" tf:"http_basic_auth,omitempty"` + + // The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more repository blocks as defined below. + // +kubebuilder:validation:Optional + Repository []RepositoryParameters `json:"repository,omitempty" tf:"repository,omitempty"` + + // A ssh_auth block as defined below. + // +kubebuilder:validation:Optional + SSHAuth *ConfigServerGitSettingSSHAuthParameters `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // An array of strings used to search subdirectories of the Git repository. + // +kubebuilder:validation:Optional + SearchPaths []*string `json:"searchPaths,omitempty" tf:"search_paths,omitempty"` + + // The URI of the default Git repository used as the Config Server back end, should be started with http://, https://, git@, or ssh://. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type ConfigServerGitSettingSSHAuthInitParameters struct { + + // The host key algorithm, should be ssh-dss, ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521. Required only if host-key exists. + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` + + // Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to true. + StrictHostKeyCheckingEnabled *bool `json:"strictHostKeyCheckingEnabled,omitempty" tf:"strict_host_key_checking_enabled,omitempty"` +} + +type ConfigServerGitSettingSSHAuthObservation struct { + + // The host key algorithm, should be ssh-dss, ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521. Required only if host-key exists. + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` + + // Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to true. + StrictHostKeyCheckingEnabled *bool `json:"strictHostKeyCheckingEnabled,omitempty" tf:"strict_host_key_checking_enabled,omitempty"` +} + +type ConfigServerGitSettingSSHAuthParameters struct { + + // The host key algorithm, should be ssh-dss, ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521. Required only if host-key exists. + // +kubebuilder:validation:Optional + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` + + // The host key of the Git repository server, should not include the algorithm prefix as covered by host-key-algorithm. + // +kubebuilder:validation:Optional + HostKeySecretRef *v1.SecretKeySelector `json:"hostKeySecretRef,omitempty" tf:"-"` + + // The SSH private key to access the Git repository, required when the URI starts with git@ or ssh://. + // +kubebuilder:validation:Required + PrivateKeySecretRef v1.SecretKeySelector `json:"privateKeySecretRef" tf:"-"` + + // Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to true. + // +kubebuilder:validation:Optional + StrictHostKeyCheckingEnabled *bool `json:"strictHostKeyCheckingEnabled,omitempty" tf:"strict_host_key_checking_enabled,omitempty"` +} + +type ContainerRegistryInitParameters struct { + + // Specifies the name of the container registry. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the login server of the container registry. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // Specifies the username of the container registry. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ContainerRegistryObservation struct { + + // Specifies the name of the container registry. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the login server of the container registry. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // Specifies the username of the container registry. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ContainerRegistryParameters struct { + + // Specifies the name of the container registry. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the password of the container registry. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // Specifies the login server of the container registry. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // Specifies the username of the container registry. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type DefaultBuildServiceInitParameters struct { + + // Specifies the name of the container registry used in the default build service. + ContainerRegistryName *string `json:"containerRegistryName,omitempty" tf:"container_registry_name,omitempty"` +} + +type DefaultBuildServiceObservation struct { + + // Specifies the name of the container registry used in the default build service. + ContainerRegistryName *string `json:"containerRegistryName,omitempty" tf:"container_registry_name,omitempty"` +} + +type DefaultBuildServiceParameters struct { + + // Specifies the name of the container registry used in the default build service. + // +kubebuilder:validation:Optional + ContainerRegistryName *string `json:"containerRegistryName,omitempty" tf:"container_registry_name,omitempty"` +} + +type HTTPBasicAuthInitParameters struct { + + // The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type HTTPBasicAuthObservation struct { + + // The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type HTTPBasicAuthParameters struct { + + // The password used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type MarketplaceInitParameters struct { + + // Specifies the plan ID of the 3rd Party Artifact that is being procured. + Plan *string `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the 3rd Party artifact that is being procured. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the publisher ID of the 3rd Party Artifact that is being procured. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type MarketplaceObservation struct { + + // Specifies the plan ID of the 3rd Party Artifact that is being procured. + Plan *string `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the 3rd Party artifact that is being procured. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the publisher ID of the 3rd Party Artifact that is being procured. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type MarketplaceParameters struct { + + // Specifies the plan ID of the 3rd Party Artifact that is being procured. + // +kubebuilder:validation:Optional + Plan *string `json:"plan" tf:"plan,omitempty"` + + // Specifies the 3rd Party artifact that is being procured. + // +kubebuilder:validation:Optional + Product *string `json:"product" tf:"product,omitempty"` + + // Specifies the publisher ID of the 3rd Party Artifact that is being procured. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` +} + +type NetworkInitParameters struct { + + // Specifies the Name of the resource group containing network resources of Azure Spring Cloud Apps. Changing this forces a new resource to be created. + AppNetworkResourceGroup *string `json:"appNetworkResourceGroup,omitempty" tf:"app_network_resource_group,omitempty"` + + // Specifies the ID of the Subnet which should host the Spring Boot Applications deployed in this Spring Cloud Service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + AppSubnetID *string `json:"appSubnetId,omitempty" tf:"app_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate appSubnetId. + // +kubebuilder:validation:Optional + AppSubnetIDRef *v1.Reference `json:"appSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate appSubnetId. + // +kubebuilder:validation:Optional + AppSubnetIDSelector *v1.Selector `json:"appSubnetIdSelector,omitempty" tf:"-"` + + // A list of (at least 3) CIDR ranges (at least /16) which are used to host the Spring Cloud infrastructure, which must not overlap with any existing CIDR ranges in the Subnet. Changing this forces a new resource to be created. + CidrRanges []*string `json:"cidrRanges,omitempty" tf:"cidr_ranges,omitempty"` + + // Specifies the egress traffic type of the Spring Cloud Service. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. Changing this forces a new resource to be created. + OutboundType *string `json:"outboundType,omitempty" tf:"outbound_type,omitempty"` + + // Ingress read time out in seconds. + ReadTimeoutSeconds *float64 `json:"readTimeoutSeconds,omitempty" tf:"read_timeout_seconds,omitempty"` + + // Specifies the Name of the resource group containing network resources of Azure Spring Cloud Service Runtime. Changing this forces a new resource to be created. + ServiceRuntimeNetworkResourceGroup *string `json:"serviceRuntimeNetworkResourceGroup,omitempty" tf:"service_runtime_network_resource_group,omitempty"` + + // Specifies the ID of the Subnet where the Service Runtime components of the Spring Cloud Service will exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + ServiceRuntimeSubnetID *string `json:"serviceRuntimeSubnetId,omitempty" tf:"service_runtime_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate serviceRuntimeSubnetId. + // +kubebuilder:validation:Optional + ServiceRuntimeSubnetIDRef *v1.Reference `json:"serviceRuntimeSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate serviceRuntimeSubnetId. + // +kubebuilder:validation:Optional + ServiceRuntimeSubnetIDSelector *v1.Selector `json:"serviceRuntimeSubnetIdSelector,omitempty" tf:"-"` +} + +type NetworkObservation struct { + + // Specifies the Name of the resource group containing network resources of Azure Spring Cloud Apps. Changing this forces a new resource to be created. + AppNetworkResourceGroup *string `json:"appNetworkResourceGroup,omitempty" tf:"app_network_resource_group,omitempty"` + + // Specifies the ID of the Subnet which should host the Spring Boot Applications deployed in this Spring Cloud Service. Changing this forces a new resource to be created. + AppSubnetID *string `json:"appSubnetId,omitempty" tf:"app_subnet_id,omitempty"` + + // A list of (at least 3) CIDR ranges (at least /16) which are used to host the Spring Cloud infrastructure, which must not overlap with any existing CIDR ranges in the Subnet. Changing this forces a new resource to be created. + CidrRanges []*string `json:"cidrRanges,omitempty" tf:"cidr_ranges,omitempty"` + + // Specifies the egress traffic type of the Spring Cloud Service. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. Changing this forces a new resource to be created. + OutboundType *string `json:"outboundType,omitempty" tf:"outbound_type,omitempty"` + + // Ingress read time out in seconds. + ReadTimeoutSeconds *float64 `json:"readTimeoutSeconds,omitempty" tf:"read_timeout_seconds,omitempty"` + + // Specifies the Name of the resource group containing network resources of Azure Spring Cloud Service Runtime. Changing this forces a new resource to be created. + ServiceRuntimeNetworkResourceGroup *string `json:"serviceRuntimeNetworkResourceGroup,omitempty" tf:"service_runtime_network_resource_group,omitempty"` + + // Specifies the ID of the Subnet where the Service Runtime components of the Spring Cloud Service will exist. Changing this forces a new resource to be created. + ServiceRuntimeSubnetID *string `json:"serviceRuntimeSubnetId,omitempty" tf:"service_runtime_subnet_id,omitempty"` +} + +type NetworkParameters struct { + + // Specifies the Name of the resource group containing network resources of Azure Spring Cloud Apps. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AppNetworkResourceGroup *string `json:"appNetworkResourceGroup,omitempty" tf:"app_network_resource_group,omitempty"` + + // Specifies the ID of the Subnet which should host the Spring Boot Applications deployed in this Spring Cloud Service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + AppSubnetID *string `json:"appSubnetId,omitempty" tf:"app_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate appSubnetId. + // +kubebuilder:validation:Optional + AppSubnetIDRef *v1.Reference `json:"appSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate appSubnetId. + // +kubebuilder:validation:Optional + AppSubnetIDSelector *v1.Selector `json:"appSubnetIdSelector,omitempty" tf:"-"` + + // A list of (at least 3) CIDR ranges (at least /16) which are used to host the Spring Cloud infrastructure, which must not overlap with any existing CIDR ranges in the Subnet. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CidrRanges []*string `json:"cidrRanges" tf:"cidr_ranges,omitempty"` + + // Specifies the egress traffic type of the Spring Cloud Service. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + OutboundType *string `json:"outboundType,omitempty" tf:"outbound_type,omitempty"` + + // Ingress read time out in seconds. + // +kubebuilder:validation:Optional + ReadTimeoutSeconds *float64 `json:"readTimeoutSeconds,omitempty" tf:"read_timeout_seconds,omitempty"` + + // Specifies the Name of the resource group containing network resources of Azure Spring Cloud Service Runtime. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ServiceRuntimeNetworkResourceGroup *string `json:"serviceRuntimeNetworkResourceGroup,omitempty" tf:"service_runtime_network_resource_group,omitempty"` + + // Specifies the ID of the Subnet where the Service Runtime components of the Spring Cloud Service will exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ServiceRuntimeSubnetID *string `json:"serviceRuntimeSubnetId,omitempty" tf:"service_runtime_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate serviceRuntimeSubnetId. + // +kubebuilder:validation:Optional + ServiceRuntimeSubnetIDRef *v1.Reference `json:"serviceRuntimeSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate serviceRuntimeSubnetId. + // +kubebuilder:validation:Optional + ServiceRuntimeSubnetIDSelector *v1.Selector `json:"serviceRuntimeSubnetIdSelector,omitempty" tf:"-"` +} + +type RepositoryHTTPBasicAuthInitParameters struct { + + // The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type RepositoryHTTPBasicAuthObservation struct { + + // The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type RepositoryHTTPBasicAuthParameters struct { + + // The password used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type RepositoryInitParameters struct { + + // A http_basic_auth block as defined below. + HTTPBasicAuth *RepositoryHTTPBasicAuthInitParameters `json:"httpBasicAuth,omitempty" tf:"http_basic_auth,omitempty"` + + // The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // A name to identify on the Git repository, required only if repos exists. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An array of strings used to match an application name. For each pattern, use the {application}/{profile} format with wildcards. + Pattern []*string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // A ssh_auth block as defined below. + SSHAuth *RepositorySSHAuthInitParameters `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // An array of strings used to search subdirectories of the Git repository. + SearchPaths []*string `json:"searchPaths,omitempty" tf:"search_paths,omitempty"` + + // The URI of the Git repository that's used as the Config Server back end should be started with http://, https://, git@, or ssh://. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type RepositoryObservation struct { + + // A http_basic_auth block as defined below. + HTTPBasicAuth *RepositoryHTTPBasicAuthObservation `json:"httpBasicAuth,omitempty" tf:"http_basic_auth,omitempty"` + + // The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // A name to identify on the Git repository, required only if repos exists. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An array of strings used to match an application name. For each pattern, use the {application}/{profile} format with wildcards. + Pattern []*string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // A ssh_auth block as defined below. + SSHAuth *RepositorySSHAuthObservation `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // An array of strings used to search subdirectories of the Git repository. + SearchPaths []*string `json:"searchPaths,omitempty" tf:"search_paths,omitempty"` + + // The URI of the Git repository that's used as the Config Server back end should be started with http://, https://, git@, or ssh://. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type RepositoryParameters struct { + + // A http_basic_auth block as defined below. + // +kubebuilder:validation:Optional + HTTPBasicAuth *RepositoryHTTPBasicAuthParameters `json:"httpBasicAuth,omitempty" tf:"http_basic_auth,omitempty"` + + // The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // A name to identify on the Git repository, required only if repos exists. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // An array of strings used to match an application name. For each pattern, use the {application}/{profile} format with wildcards. + // +kubebuilder:validation:Optional + Pattern []*string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // A ssh_auth block as defined below. + // +kubebuilder:validation:Optional + SSHAuth *RepositorySSHAuthParameters `json:"sshAuth,omitempty" tf:"ssh_auth,omitempty"` + + // An array of strings used to search subdirectories of the Git repository. + // +kubebuilder:validation:Optional + SearchPaths []*string `json:"searchPaths,omitempty" tf:"search_paths,omitempty"` + + // The URI of the Git repository that's used as the Config Server back end should be started with http://, https://, git@, or ssh://. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type RepositorySSHAuthInitParameters struct { + + // The host key algorithm, should be ssh-dss, ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521. Required only if host-key exists. + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` + + // Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to true. + StrictHostKeyCheckingEnabled *bool `json:"strictHostKeyCheckingEnabled,omitempty" tf:"strict_host_key_checking_enabled,omitempty"` +} + +type RepositorySSHAuthObservation struct { + + // The host key algorithm, should be ssh-dss, ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521. Required only if host-key exists. + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` + + // Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to true. + StrictHostKeyCheckingEnabled *bool `json:"strictHostKeyCheckingEnabled,omitempty" tf:"strict_host_key_checking_enabled,omitempty"` +} + +type RepositorySSHAuthParameters struct { + + // The host key algorithm, should be ssh-dss, ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521. Required only if host-key exists. + // +kubebuilder:validation:Optional + HostKeyAlgorithm *string `json:"hostKeyAlgorithm,omitempty" tf:"host_key_algorithm,omitempty"` + + // The host key of the Git repository server, should not include the algorithm prefix as covered by host-key-algorithm. + // +kubebuilder:validation:Optional + HostKeySecretRef *v1.SecretKeySelector `json:"hostKeySecretRef,omitempty" tf:"-"` + + // The SSH private key to access the Git repository, required when the URI starts with git@ or ssh://. + // +kubebuilder:validation:Required + PrivateKeySecretRef v1.SecretKeySelector `json:"privateKeySecretRef" tf:"-"` + + // Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to true. + // +kubebuilder:validation:Optional + StrictHostKeyCheckingEnabled *bool `json:"strictHostKeyCheckingEnabled,omitempty" tf:"strict_host_key_checking_enabled,omitempty"` +} + +type RequiredNetworkTrafficRulesInitParameters struct { +} + +type RequiredNetworkTrafficRulesObservation struct { + + // The direction of required traffic. Possible values are Inbound, Outbound. + Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` + + // The FQDN list of required traffic. + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // The IP list of required traffic. + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // The port of required traffic. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The protocol of required traffic. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type RequiredNetworkTrafficRulesParameters struct { +} + +type SpringCloudServiceInitParameters struct { + + // Specifies the size for this Spring Cloud Service's default build agent pool. Possible values are S1, S2, S3, S4 and S5. This field is applicable only for Spring Cloud Service with enterprise tier. + BuildAgentPoolSize *string `json:"buildAgentPoolSize,omitempty" tf:"build_agent_pool_size,omitempty"` + + // A config_server_git_setting block as defined below. This field is applicable only for Spring Cloud Service with basic and standard tier. + ConfigServerGitSetting *ConfigServerGitSettingInitParameters `json:"configServerGitSetting,omitempty" tf:"config_server_git_setting,omitempty"` + + // One or more container_registry block as defined below. This field is applicable only for Spring Cloud Service with enterprise tier. + ContainerRegistry []ContainerRegistryInitParameters `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + + // A default_build_service block as defined below. This field is applicable only for Spring Cloud Service with enterprise tier. + DefaultBuildService *DefaultBuildServiceInitParameters `json:"defaultBuildService,omitempty" tf:"default_build_service,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Should the log stream in vnet injection instance could be accessed from Internet? + LogStreamPublicEndpointEnabled *bool `json:"logStreamPublicEndpointEnabled,omitempty" tf:"log_stream_public_endpoint_enabled,omitempty"` + + // The resource Id of the Managed Environment that the Spring Apps instance builds on. Can only be specified when sku_tier is set to StandardGen2. + ManagedEnvironmentID *string `json:"managedEnvironmentId,omitempty" tf:"managed_environment_id,omitempty"` + + // A marketplace block as defined below. Can only be specified when sku is set to E0. + Marketplace *MarketplaceInitParameters `json:"marketplace,omitempty" tf:"marketplace,omitempty"` + + // A network block as defined below. Changing this forces a new resource to be created. + Network *NetworkInitParameters `json:"network,omitempty" tf:"network,omitempty"` + + // Whether enable the default Service Registry. This field is applicable only for Spring Cloud Service with enterprise tier. + ServiceRegistryEnabled *bool `json:"serviceRegistryEnabled,omitempty" tf:"service_registry_enabled,omitempty"` + + // Specifies the SKU Name for this Spring Cloud Service. Possible values are B0, S0 and E0. Defaults to S0. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the SKU Tier for this Spring Cloud Service. Possible values are Basic, Enterprise, Standard and StandardGen2. The attribute is automatically computed from API response except when managed_environment_id is defined. Changing this forces a new resource to be created. + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A trace block as defined below. + Trace *TraceInitParameters `json:"trace,omitempty" tf:"trace,omitempty"` + + // Whether zone redundancy is enabled for this Spring Cloud Service. Defaults to false. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type SpringCloudServiceObservation struct { + + // Specifies the size for this Spring Cloud Service's default build agent pool. Possible values are S1, S2, S3, S4 and S5. This field is applicable only for Spring Cloud Service with enterprise tier. + BuildAgentPoolSize *string `json:"buildAgentPoolSize,omitempty" tf:"build_agent_pool_size,omitempty"` + + // A config_server_git_setting block as defined below. This field is applicable only for Spring Cloud Service with basic and standard tier. + ConfigServerGitSetting *ConfigServerGitSettingObservation `json:"configServerGitSetting,omitempty" tf:"config_server_git_setting,omitempty"` + + // One or more container_registry block as defined below. This field is applicable only for Spring Cloud Service with enterprise tier. + ContainerRegistry []ContainerRegistryObservation `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + + // A default_build_service block as defined below. This field is applicable only for Spring Cloud Service with enterprise tier. + DefaultBuildService *DefaultBuildServiceObservation `json:"defaultBuildService,omitempty" tf:"default_build_service,omitempty"` + + // The ID of the Spring Cloud Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Should the log stream in vnet injection instance could be accessed from Internet? + LogStreamPublicEndpointEnabled *bool `json:"logStreamPublicEndpointEnabled,omitempty" tf:"log_stream_public_endpoint_enabled,omitempty"` + + // The resource Id of the Managed Environment that the Spring Apps instance builds on. Can only be specified when sku_tier is set to StandardGen2. + ManagedEnvironmentID *string `json:"managedEnvironmentId,omitempty" tf:"managed_environment_id,omitempty"` + + // A marketplace block as defined below. Can only be specified when sku is set to E0. + Marketplace *MarketplaceObservation `json:"marketplace,omitempty" tf:"marketplace,omitempty"` + + // A network block as defined below. Changing this forces a new resource to be created. + Network *NetworkObservation `json:"network,omitempty" tf:"network,omitempty"` + + // A list of the outbound Public IP Addresses used by this Spring Cloud Service. + OutboundPublicIPAddresses []*string `json:"outboundPublicIpAddresses,omitempty" tf:"outbound_public_ip_addresses,omitempty"` + + // A list of required_network_traffic_rules blocks as defined below. + RequiredNetworkTrafficRules []RequiredNetworkTrafficRulesObservation `json:"requiredNetworkTrafficRules,omitempty" tf:"required_network_traffic_rules,omitempty"` + + // Specifies The name of the resource group in which to create the Spring Cloud Service. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Whether enable the default Service Registry. This field is applicable only for Spring Cloud Service with enterprise tier. + ServiceRegistryEnabled *bool `json:"serviceRegistryEnabled,omitempty" tf:"service_registry_enabled,omitempty"` + + // The ID of the Spring Cloud Service Registry. + ServiceRegistryID *string `json:"serviceRegistryId,omitempty" tf:"service_registry_id,omitempty"` + + // Specifies the SKU Name for this Spring Cloud Service. Possible values are B0, S0 and E0. Defaults to S0. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the SKU Tier for this Spring Cloud Service. Possible values are Basic, Enterprise, Standard and StandardGen2. The attribute is automatically computed from API response except when managed_environment_id is defined. Changing this forces a new resource to be created. + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A trace block as defined below. + Trace *TraceObservation `json:"trace,omitempty" tf:"trace,omitempty"` + + // Whether zone redundancy is enabled for this Spring Cloud Service. Defaults to false. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type SpringCloudServiceParameters struct { + + // Specifies the size for this Spring Cloud Service's default build agent pool. Possible values are S1, S2, S3, S4 and S5. This field is applicable only for Spring Cloud Service with enterprise tier. + // +kubebuilder:validation:Optional + BuildAgentPoolSize *string `json:"buildAgentPoolSize,omitempty" tf:"build_agent_pool_size,omitempty"` + + // A config_server_git_setting block as defined below. This field is applicable only for Spring Cloud Service with basic and standard tier. + // +kubebuilder:validation:Optional + ConfigServerGitSetting *ConfigServerGitSettingParameters `json:"configServerGitSetting,omitempty" tf:"config_server_git_setting,omitempty"` + + // One or more container_registry block as defined below. This field is applicable only for Spring Cloud Service with enterprise tier. + // +kubebuilder:validation:Optional + ContainerRegistry []ContainerRegistryParameters `json:"containerRegistry,omitempty" tf:"container_registry,omitempty"` + + // A default_build_service block as defined below. This field is applicable only for Spring Cloud Service with enterprise tier. + // +kubebuilder:validation:Optional + DefaultBuildService *DefaultBuildServiceParameters `json:"defaultBuildService,omitempty" tf:"default_build_service,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Should the log stream in vnet injection instance could be accessed from Internet? + // +kubebuilder:validation:Optional + LogStreamPublicEndpointEnabled *bool `json:"logStreamPublicEndpointEnabled,omitempty" tf:"log_stream_public_endpoint_enabled,omitempty"` + + // The resource Id of the Managed Environment that the Spring Apps instance builds on. Can only be specified when sku_tier is set to StandardGen2. + // +kubebuilder:validation:Optional + ManagedEnvironmentID *string `json:"managedEnvironmentId,omitempty" tf:"managed_environment_id,omitempty"` + + // A marketplace block as defined below. Can only be specified when sku is set to E0. + // +kubebuilder:validation:Optional + Marketplace *MarketplaceParameters `json:"marketplace,omitempty" tf:"marketplace,omitempty"` + + // A network block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Network *NetworkParameters `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies The name of the resource group in which to create the Spring Cloud Service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Whether enable the default Service Registry. This field is applicable only for Spring Cloud Service with enterprise tier. + // +kubebuilder:validation:Optional + ServiceRegistryEnabled *bool `json:"serviceRegistryEnabled,omitempty" tf:"service_registry_enabled,omitempty"` + + // Specifies the SKU Name for this Spring Cloud Service. Possible values are B0, S0 and E0. Defaults to S0. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the SKU Tier for this Spring Cloud Service. Possible values are Basic, Enterprise, Standard and StandardGen2. The attribute is automatically computed from API response except when managed_environment_id is defined. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A trace block as defined below. + // +kubebuilder:validation:Optional + Trace *TraceParameters `json:"trace,omitempty" tf:"trace,omitempty"` + + // Whether zone redundancy is enabled for this Spring Cloud Service. Defaults to false. + // +kubebuilder:validation:Optional + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type TraceInitParameters struct { + + // The connection string used for Application Insights. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("connection_string",true) + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Reference to a ApplicationInsights in insights to populate connectionString. + // +kubebuilder:validation:Optional + ConnectionStringRef *v1.Reference `json:"connectionStringRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate connectionString. + // +kubebuilder:validation:Optional + ConnectionStringSelector *v1.Selector `json:"connectionStringSelector,omitempty" tf:"-"` + + // The sampling rate of Application Insights Agent. Must be between 0.0 and 100.0. Defaults to 10.0. + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type TraceObservation struct { + + // The connection string used for Application Insights. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The sampling rate of Application Insights Agent. Must be between 0.0 and 100.0. Defaults to 10.0. + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +type TraceParameters struct { + + // The connection string used for Application Insights. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("connection_string",true) + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Reference to a ApplicationInsights in insights to populate connectionString. + // +kubebuilder:validation:Optional + ConnectionStringRef *v1.Reference `json:"connectionStringRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate connectionString. + // +kubebuilder:validation:Optional + ConnectionStringSelector *v1.Selector `json:"connectionStringSelector,omitempty" tf:"-"` + + // The sampling rate of Application Insights Agent. Must be between 0.0 and 100.0. Defaults to 10.0. + // +kubebuilder:validation:Optional + SampleRate *float64 `json:"sampleRate,omitempty" tf:"sample_rate,omitempty"` +} + +// SpringCloudServiceSpec defines the desired state of SpringCloudService +type SpringCloudServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudServiceInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudServiceStatus defines the observed state of SpringCloudService. +type SpringCloudServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudService is the Schema for the SpringCloudServices API. Manages an Azure Spring Cloud Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec SpringCloudServiceSpec `json:"spec"` + Status SpringCloudServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudServiceList contains a list of SpringCloudServices +type SpringCloudServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudService `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudService_Kind = "SpringCloudService" + SpringCloudService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudService_Kind}.String() + SpringCloudService_KindAPIVersion = SpringCloudService_Kind + "." + CRDGroupVersion.String() + SpringCloudService_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudService_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudService{}, &SpringCloudServiceList{}) +} diff --git a/apis/authorization/v1beta1/zz_generated.conversion_hubs.go b/apis/authorization/v1beta1/zz_generated.conversion_hubs.go index 37d2e57c3..6eba8265d 100755 --- a/apis/authorization/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/authorization/v1beta1/zz_generated.conversion_hubs.go @@ -12,12 +12,6 @@ func (tr *ManagementLock) Hub() {} // Hub marks this type as a conversion hub. func (tr *PolicyDefinition) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ResourceGroupPolicyAssignment) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ResourcePolicyAssignment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ResourcePolicyExemption) Hub() {} @@ -27,8 +21,5 @@ func (tr *RoleAssignment) Hub() {} // Hub marks this type as a conversion hub. func (tr *RoleDefinition) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SubscriptionPolicyAssignment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SubscriptionPolicyExemption) Hub() {} diff --git a/apis/authorization/v1beta1/zz_generated.conversion_spokes.go b/apis/authorization/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..7463752ff --- /dev/null +++ b/apis/authorization/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ResourceGroupPolicyAssignment to the hub type. +func (tr *ResourceGroupPolicyAssignment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ResourceGroupPolicyAssignment type. +func (tr *ResourceGroupPolicyAssignment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ResourcePolicyAssignment to the hub type. +func (tr *ResourcePolicyAssignment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ResourcePolicyAssignment type. +func (tr *ResourcePolicyAssignment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SubscriptionPolicyAssignment to the hub type. +func (tr *SubscriptionPolicyAssignment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SubscriptionPolicyAssignment type. +func (tr *SubscriptionPolicyAssignment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/authorization/v1beta1/zz_generated.resolvers.go b/apis/authorization/v1beta1/zz_generated.resolvers.go index 970908c65..995ed69ea 100644 --- a/apis/authorization/v1beta1/zz_generated.resolvers.go +++ b/apis/authorization/v1beta1/zz_generated.resolvers.go @@ -214,7 +214,7 @@ func (mg *ResourcePolicyExemption) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "ResourcePolicyAssignment", "ResourcePolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "ResourcePolicyAssignment", "ResourcePolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -233,7 +233,7 @@ func (mg *ResourcePolicyExemption) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.PolicyAssignmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PolicyAssignmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "ResourcePolicyAssignment", "ResourcePolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "ResourcePolicyAssignment", "ResourcePolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -252,7 +252,7 @@ func (mg *ResourcePolicyExemption) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "ResourcePolicyAssignment", "ResourcePolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "ResourcePolicyAssignment", "ResourcePolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -271,7 +271,7 @@ func (mg *ResourcePolicyExemption) ResolveReferences(ctx context.Context, c clie mg.Spec.InitProvider.PolicyAssignmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.PolicyAssignmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "ResourcePolicyAssignment", "ResourcePolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "ResourcePolicyAssignment", "ResourcePolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -402,7 +402,7 @@ func (mg *SubscriptionPolicyExemption) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "SubscriptionPolicyAssignment", "SubscriptionPolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "SubscriptionPolicyAssignment", "SubscriptionPolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -421,7 +421,7 @@ func (mg *SubscriptionPolicyExemption) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.PolicyAssignmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PolicyAssignmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "SubscriptionPolicyAssignment", "SubscriptionPolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "SubscriptionPolicyAssignment", "SubscriptionPolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/authorization/v1beta1/zz_resourcepolicyexemption_types.go b/apis/authorization/v1beta1/zz_resourcepolicyexemption_types.go index ea02abd08..e9fd95dca 100755 --- a/apis/authorization/v1beta1/zz_resourcepolicyexemption_types.go +++ b/apis/authorization/v1beta1/zz_resourcepolicyexemption_types.go @@ -34,7 +34,7 @@ type ResourcePolicyExemptionInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the Policy Assignment to be exempted at the specified Scope. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.ResourcePolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.ResourcePolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() PolicyAssignmentID *string `json:"policyAssignmentId,omitempty" tf:"policy_assignment_id,omitempty"` @@ -50,7 +50,7 @@ type ResourcePolicyExemptionInitParameters struct { PolicyDefinitionReferenceIds []*string `json:"policyDefinitionReferenceIds,omitempty" tf:"policy_definition_reference_ids,omitempty"` // The Resource ID where the Policy Exemption should be applied. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.ResourcePolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.ResourcePolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_id",false) ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` @@ -123,7 +123,7 @@ type ResourcePolicyExemptionParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the Policy Assignment to be exempted at the specified Scope. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.ResourcePolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.ResourcePolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PolicyAssignmentID *string `json:"policyAssignmentId,omitempty" tf:"policy_assignment_id,omitempty"` @@ -141,7 +141,7 @@ type ResourcePolicyExemptionParameters struct { PolicyDefinitionReferenceIds []*string `json:"policyDefinitionReferenceIds,omitempty" tf:"policy_definition_reference_ids,omitempty"` // The Resource ID where the Policy Exemption should be applied. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.ResourcePolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.ResourcePolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_id",false) // +kubebuilder:validation:Optional ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` diff --git a/apis/authorization/v1beta1/zz_subscriptionpolicyexemption_types.go b/apis/authorization/v1beta1/zz_subscriptionpolicyexemption_types.go index 3cd90786a..81d27b1c8 100755 --- a/apis/authorization/v1beta1/zz_subscriptionpolicyexemption_types.go +++ b/apis/authorization/v1beta1/zz_subscriptionpolicyexemption_types.go @@ -31,7 +31,7 @@ type SubscriptionPolicyExemptionInitParameters struct { Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` // The ID of the Policy Assignment to be exempted at the specified Scope. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.SubscriptionPolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.SubscriptionPolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() PolicyAssignmentID *string `json:"policyAssignmentId,omitempty" tf:"policy_assignment_id,omitempty"` @@ -103,7 +103,7 @@ type SubscriptionPolicyExemptionParameters struct { Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` // The ID of the Policy Assignment to be exempted at the specified Scope. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.SubscriptionPolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.SubscriptionPolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PolicyAssignmentID *string `json:"policyAssignmentId,omitempty" tf:"policy_assignment_id,omitempty"` diff --git a/apis/authorization/v1beta2/zz_generated.conversion_hubs.go b/apis/authorization/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..b7032b134 --- /dev/null +++ b/apis/authorization/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ResourceGroupPolicyAssignment) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ResourcePolicyAssignment) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SubscriptionPolicyAssignment) Hub() {} diff --git a/apis/authorization/v1beta2/zz_generated.deepcopy.go b/apis/authorization/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..1054bfaf5 --- /dev/null +++ b/apis/authorization/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2980 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonComplianceMessageInitParameters) DeepCopyInto(out *NonComplianceMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonComplianceMessageInitParameters. +func (in *NonComplianceMessageInitParameters) DeepCopy() *NonComplianceMessageInitParameters { + if in == nil { + return nil + } + out := new(NonComplianceMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonComplianceMessageObservation) DeepCopyInto(out *NonComplianceMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonComplianceMessageObservation. +func (in *NonComplianceMessageObservation) DeepCopy() *NonComplianceMessageObservation { + if in == nil { + return nil + } + out := new(NonComplianceMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonComplianceMessageParameters) DeepCopyInto(out *NonComplianceMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonComplianceMessageParameters. +func (in *NonComplianceMessageParameters) DeepCopy() *NonComplianceMessageParameters { + if in == nil { + return nil + } + out := new(NonComplianceMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesInitParameters) DeepCopyInto(out *OverridesInitParameters) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesInitParameters. +func (in *OverridesInitParameters) DeepCopy() *OverridesInitParameters { + if in == nil { + return nil + } + out := new(OverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesObservation) DeepCopyInto(out *OverridesObservation) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesObservation. +func (in *OverridesObservation) DeepCopy() *OverridesObservation { + if in == nil { + return nil + } + out := new(OverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesParameters) DeepCopyInto(out *OverridesParameters) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesParameters. +func (in *OverridesParameters) DeepCopy() *OverridesParameters { + if in == nil { + return nil + } + out := new(OverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesSelectorsInitParameters) DeepCopyInto(out *OverridesSelectorsInitParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesSelectorsInitParameters. +func (in *OverridesSelectorsInitParameters) DeepCopy() *OverridesSelectorsInitParameters { + if in == nil { + return nil + } + out := new(OverridesSelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesSelectorsObservation) DeepCopyInto(out *OverridesSelectorsObservation) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesSelectorsObservation. +func (in *OverridesSelectorsObservation) DeepCopy() *OverridesSelectorsObservation { + if in == nil { + return nil + } + out := new(OverridesSelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverridesSelectorsParameters) DeepCopyInto(out *OverridesSelectorsParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesSelectorsParameters. +func (in *OverridesSelectorsParameters) DeepCopy() *OverridesSelectorsParameters { + if in == nil { + return nil + } + out := new(OverridesSelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupPolicyAssignment) DeepCopyInto(out *ResourceGroupPolicyAssignment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupPolicyAssignment. +func (in *ResourceGroupPolicyAssignment) DeepCopy() *ResourceGroupPolicyAssignment { + if in == nil { + return nil + } + out := new(ResourceGroupPolicyAssignment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceGroupPolicyAssignment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupPolicyAssignmentInitParameters) DeepCopyInto(out *ResourceGroupPolicyAssignmentInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]NonComplianceMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]OverridesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.PolicyDefinitionIDRef != nil { + in, out := &in.PolicyDefinitionIDRef, &out.PolicyDefinitionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyDefinitionIDSelector != nil { + in, out := &in.PolicyDefinitionIDSelector, &out.PolicyDefinitionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupIDRef != nil { + in, out := &in.ResourceGroupIDRef, &out.ResourceGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupIDSelector != nil { + in, out := &in.ResourceGroupIDSelector, &out.ResourceGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]ResourceSelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupPolicyAssignmentInitParameters. +func (in *ResourceGroupPolicyAssignmentInitParameters) DeepCopy() *ResourceGroupPolicyAssignmentInitParameters { + if in == nil { + return nil + } + out := new(ResourceGroupPolicyAssignmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupPolicyAssignmentList) DeepCopyInto(out *ResourceGroupPolicyAssignmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceGroupPolicyAssignment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupPolicyAssignmentList. +func (in *ResourceGroupPolicyAssignmentList) DeepCopy() *ResourceGroupPolicyAssignmentList { + if in == nil { + return nil + } + out := new(ResourceGroupPolicyAssignmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceGroupPolicyAssignmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupPolicyAssignmentObservation) DeepCopyInto(out *ResourceGroupPolicyAssignmentObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]NonComplianceMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]OverridesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]ResourceSelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupPolicyAssignmentObservation. +func (in *ResourceGroupPolicyAssignmentObservation) DeepCopy() *ResourceGroupPolicyAssignmentObservation { + if in == nil { + return nil + } + out := new(ResourceGroupPolicyAssignmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupPolicyAssignmentParameters) DeepCopyInto(out *ResourceGroupPolicyAssignmentParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]NonComplianceMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]OverridesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.PolicyDefinitionIDRef != nil { + in, out := &in.PolicyDefinitionIDRef, &out.PolicyDefinitionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyDefinitionIDSelector != nil { + in, out := &in.PolicyDefinitionIDSelector, &out.PolicyDefinitionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupIDRef != nil { + in, out := &in.ResourceGroupIDRef, &out.ResourceGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupIDSelector != nil { + in, out := &in.ResourceGroupIDSelector, &out.ResourceGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]ResourceSelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupPolicyAssignmentParameters. +func (in *ResourceGroupPolicyAssignmentParameters) DeepCopy() *ResourceGroupPolicyAssignmentParameters { + if in == nil { + return nil + } + out := new(ResourceGroupPolicyAssignmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupPolicyAssignmentSpec) DeepCopyInto(out *ResourceGroupPolicyAssignmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupPolicyAssignmentSpec. +func (in *ResourceGroupPolicyAssignmentSpec) DeepCopy() *ResourceGroupPolicyAssignmentSpec { + if in == nil { + return nil + } + out := new(ResourceGroupPolicyAssignmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupPolicyAssignmentStatus) DeepCopyInto(out *ResourceGroupPolicyAssignmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupPolicyAssignmentStatus. +func (in *ResourceGroupPolicyAssignmentStatus) DeepCopy() *ResourceGroupPolicyAssignmentStatus { + if in == nil { + return nil + } + out := new(ResourceGroupPolicyAssignmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignment) DeepCopyInto(out *ResourcePolicyAssignment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignment. +func (in *ResourcePolicyAssignment) DeepCopy() *ResourcePolicyAssignment { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourcePolicyAssignment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentIdentityInitParameters) DeepCopyInto(out *ResourcePolicyAssignmentIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentIdentityInitParameters. +func (in *ResourcePolicyAssignmentIdentityInitParameters) DeepCopy() *ResourcePolicyAssignmentIdentityInitParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentIdentityObservation) DeepCopyInto(out *ResourcePolicyAssignmentIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentIdentityObservation. +func (in *ResourcePolicyAssignmentIdentityObservation) DeepCopy() *ResourcePolicyAssignmentIdentityObservation { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentIdentityParameters) DeepCopyInto(out *ResourcePolicyAssignmentIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentIdentityParameters. +func (in *ResourcePolicyAssignmentIdentityParameters) DeepCopy() *ResourcePolicyAssignmentIdentityParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentInitParameters) DeepCopyInto(out *ResourcePolicyAssignmentInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ResourcePolicyAssignmentIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]ResourcePolicyAssignmentNonComplianceMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ResourcePolicyAssignmentOverridesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.PolicyDefinitionIDRef != nil { + in, out := &in.PolicyDefinitionIDRef, &out.PolicyDefinitionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyDefinitionIDSelector != nil { + in, out := &in.PolicyDefinitionIDSelector, &out.PolicyDefinitionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]ResourcePolicyAssignmentResourceSelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentInitParameters. +func (in *ResourcePolicyAssignmentInitParameters) DeepCopy() *ResourcePolicyAssignmentInitParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentList) DeepCopyInto(out *ResourcePolicyAssignmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourcePolicyAssignment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentList. +func (in *ResourcePolicyAssignmentList) DeepCopy() *ResourcePolicyAssignmentList { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourcePolicyAssignmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentNonComplianceMessageInitParameters) DeepCopyInto(out *ResourcePolicyAssignmentNonComplianceMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentNonComplianceMessageInitParameters. +func (in *ResourcePolicyAssignmentNonComplianceMessageInitParameters) DeepCopy() *ResourcePolicyAssignmentNonComplianceMessageInitParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentNonComplianceMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentNonComplianceMessageObservation) DeepCopyInto(out *ResourcePolicyAssignmentNonComplianceMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentNonComplianceMessageObservation. +func (in *ResourcePolicyAssignmentNonComplianceMessageObservation) DeepCopy() *ResourcePolicyAssignmentNonComplianceMessageObservation { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentNonComplianceMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentNonComplianceMessageParameters) DeepCopyInto(out *ResourcePolicyAssignmentNonComplianceMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentNonComplianceMessageParameters. +func (in *ResourcePolicyAssignmentNonComplianceMessageParameters) DeepCopy() *ResourcePolicyAssignmentNonComplianceMessageParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentNonComplianceMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentObservation) DeepCopyInto(out *ResourcePolicyAssignmentObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ResourcePolicyAssignmentIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]ResourcePolicyAssignmentNonComplianceMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ResourcePolicyAssignmentOverridesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]ResourcePolicyAssignmentResourceSelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentObservation. +func (in *ResourcePolicyAssignmentObservation) DeepCopy() *ResourcePolicyAssignmentObservation { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentOverridesInitParameters) DeepCopyInto(out *ResourcePolicyAssignmentOverridesInitParameters) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]OverridesSelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentOverridesInitParameters. +func (in *ResourcePolicyAssignmentOverridesInitParameters) DeepCopy() *ResourcePolicyAssignmentOverridesInitParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentOverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentOverridesObservation) DeepCopyInto(out *ResourcePolicyAssignmentOverridesObservation) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]OverridesSelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentOverridesObservation. +func (in *ResourcePolicyAssignmentOverridesObservation) DeepCopy() *ResourcePolicyAssignmentOverridesObservation { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentOverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentOverridesParameters) DeepCopyInto(out *ResourcePolicyAssignmentOverridesParameters) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]OverridesSelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentOverridesParameters. +func (in *ResourcePolicyAssignmentOverridesParameters) DeepCopy() *ResourcePolicyAssignmentOverridesParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentOverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentParameters) DeepCopyInto(out *ResourcePolicyAssignmentParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ResourcePolicyAssignmentIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]ResourcePolicyAssignmentNonComplianceMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ResourcePolicyAssignmentOverridesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.PolicyDefinitionIDRef != nil { + in, out := &in.PolicyDefinitionIDRef, &out.PolicyDefinitionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyDefinitionIDSelector != nil { + in, out := &in.PolicyDefinitionIDSelector, &out.PolicyDefinitionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]ResourcePolicyAssignmentResourceSelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentParameters. +func (in *ResourcePolicyAssignmentParameters) DeepCopy() *ResourcePolicyAssignmentParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentResourceSelectorsInitParameters) DeepCopyInto(out *ResourcePolicyAssignmentResourceSelectorsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentResourceSelectorsInitParameters. +func (in *ResourcePolicyAssignmentResourceSelectorsInitParameters) DeepCopy() *ResourcePolicyAssignmentResourceSelectorsInitParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentResourceSelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentResourceSelectorsObservation) DeepCopyInto(out *ResourcePolicyAssignmentResourceSelectorsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]ResourcePolicyAssignmentResourceSelectorsSelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentResourceSelectorsObservation. +func (in *ResourcePolicyAssignmentResourceSelectorsObservation) DeepCopy() *ResourcePolicyAssignmentResourceSelectorsObservation { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentResourceSelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentResourceSelectorsParameters) DeepCopyInto(out *ResourcePolicyAssignmentResourceSelectorsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]ResourcePolicyAssignmentResourceSelectorsSelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentResourceSelectorsParameters. +func (in *ResourcePolicyAssignmentResourceSelectorsParameters) DeepCopy() *ResourcePolicyAssignmentResourceSelectorsParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentResourceSelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters) DeepCopyInto(out *ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters. +func (in *ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters) DeepCopy() *ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentResourceSelectorsSelectorsObservation) DeepCopyInto(out *ResourcePolicyAssignmentResourceSelectorsSelectorsObservation) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentResourceSelectorsSelectorsObservation. +func (in *ResourcePolicyAssignmentResourceSelectorsSelectorsObservation) DeepCopy() *ResourcePolicyAssignmentResourceSelectorsSelectorsObservation { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentResourceSelectorsSelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentResourceSelectorsSelectorsParameters) DeepCopyInto(out *ResourcePolicyAssignmentResourceSelectorsSelectorsParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentResourceSelectorsSelectorsParameters. +func (in *ResourcePolicyAssignmentResourceSelectorsSelectorsParameters) DeepCopy() *ResourcePolicyAssignmentResourceSelectorsSelectorsParameters { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentResourceSelectorsSelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentSpec) DeepCopyInto(out *ResourcePolicyAssignmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentSpec. +func (in *ResourcePolicyAssignmentSpec) DeepCopy() *ResourcePolicyAssignmentSpec { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyAssignmentStatus) DeepCopyInto(out *ResourcePolicyAssignmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyAssignmentStatus. +func (in *ResourcePolicyAssignmentStatus) DeepCopy() *ResourcePolicyAssignmentStatus { + if in == nil { + return nil + } + out := new(ResourcePolicyAssignmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSelectorsInitParameters) DeepCopyInto(out *ResourceSelectorsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]ResourceSelectorsSelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelectorsInitParameters. +func (in *ResourceSelectorsInitParameters) DeepCopy() *ResourceSelectorsInitParameters { + if in == nil { + return nil + } + out := new(ResourceSelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSelectorsObservation) DeepCopyInto(out *ResourceSelectorsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]ResourceSelectorsSelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelectorsObservation. +func (in *ResourceSelectorsObservation) DeepCopy() *ResourceSelectorsObservation { + if in == nil { + return nil + } + out := new(ResourceSelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSelectorsParameters) DeepCopyInto(out *ResourceSelectorsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]ResourceSelectorsSelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelectorsParameters. +func (in *ResourceSelectorsParameters) DeepCopy() *ResourceSelectorsParameters { + if in == nil { + return nil + } + out := new(ResourceSelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSelectorsSelectorsInitParameters) DeepCopyInto(out *ResourceSelectorsSelectorsInitParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelectorsSelectorsInitParameters. +func (in *ResourceSelectorsSelectorsInitParameters) DeepCopy() *ResourceSelectorsSelectorsInitParameters { + if in == nil { + return nil + } + out := new(ResourceSelectorsSelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSelectorsSelectorsObservation) DeepCopyInto(out *ResourceSelectorsSelectorsObservation) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelectorsSelectorsObservation. +func (in *ResourceSelectorsSelectorsObservation) DeepCopy() *ResourceSelectorsSelectorsObservation { + if in == nil { + return nil + } + out := new(ResourceSelectorsSelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSelectorsSelectorsParameters) DeepCopyInto(out *ResourceSelectorsSelectorsParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSelectorsSelectorsParameters. +func (in *ResourceSelectorsSelectorsParameters) DeepCopy() *ResourceSelectorsSelectorsParameters { + if in == nil { + return nil + } + out := new(ResourceSelectorsSelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorsInitParameters) DeepCopyInto(out *SelectorsInitParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorsInitParameters. +func (in *SelectorsInitParameters) DeepCopy() *SelectorsInitParameters { + if in == nil { + return nil + } + out := new(SelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorsObservation) DeepCopyInto(out *SelectorsObservation) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorsObservation. +func (in *SelectorsObservation) DeepCopy() *SelectorsObservation { + if in == nil { + return nil + } + out := new(SelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelectorsParameters) DeepCopyInto(out *SelectorsParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectorsParameters. +func (in *SelectorsParameters) DeepCopy() *SelectorsParameters { + if in == nil { + return nil + } + out := new(SelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignment) DeepCopyInto(out *SubscriptionPolicyAssignment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignment. +func (in *SubscriptionPolicyAssignment) DeepCopy() *SubscriptionPolicyAssignment { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionPolicyAssignment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentIdentityInitParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentIdentityInitParameters. +func (in *SubscriptionPolicyAssignmentIdentityInitParameters) DeepCopy() *SubscriptionPolicyAssignmentIdentityInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentIdentityObservation) DeepCopyInto(out *SubscriptionPolicyAssignmentIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentIdentityObservation. +func (in *SubscriptionPolicyAssignmentIdentityObservation) DeepCopy() *SubscriptionPolicyAssignmentIdentityObservation { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentIdentityParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentIdentityParameters. +func (in *SubscriptionPolicyAssignmentIdentityParameters) DeepCopy() *SubscriptionPolicyAssignmentIdentityParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentInitParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SubscriptionPolicyAssignmentIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]SubscriptionPolicyAssignmentNonComplianceMessageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]SubscriptionPolicyAssignmentOverridesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.PolicyDefinitionIDRef != nil { + in, out := &in.PolicyDefinitionIDRef, &out.PolicyDefinitionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyDefinitionIDSelector != nil { + in, out := &in.PolicyDefinitionIDSelector, &out.PolicyDefinitionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]SubscriptionPolicyAssignmentResourceSelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentInitParameters. +func (in *SubscriptionPolicyAssignmentInitParameters) DeepCopy() *SubscriptionPolicyAssignmentInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentList) DeepCopyInto(out *SubscriptionPolicyAssignmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SubscriptionPolicyAssignment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentList. +func (in *SubscriptionPolicyAssignmentList) DeepCopy() *SubscriptionPolicyAssignmentList { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionPolicyAssignmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentNonComplianceMessageInitParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentNonComplianceMessageInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentNonComplianceMessageInitParameters. +func (in *SubscriptionPolicyAssignmentNonComplianceMessageInitParameters) DeepCopy() *SubscriptionPolicyAssignmentNonComplianceMessageInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentNonComplianceMessageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentNonComplianceMessageObservation) DeepCopyInto(out *SubscriptionPolicyAssignmentNonComplianceMessageObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentNonComplianceMessageObservation. +func (in *SubscriptionPolicyAssignmentNonComplianceMessageObservation) DeepCopy() *SubscriptionPolicyAssignmentNonComplianceMessageObservation { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentNonComplianceMessageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentNonComplianceMessageParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentNonComplianceMessageParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.PolicyDefinitionReferenceID != nil { + in, out := &in.PolicyDefinitionReferenceID, &out.PolicyDefinitionReferenceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentNonComplianceMessageParameters. +func (in *SubscriptionPolicyAssignmentNonComplianceMessageParameters) DeepCopy() *SubscriptionPolicyAssignmentNonComplianceMessageParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentNonComplianceMessageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentObservation) DeepCopyInto(out *SubscriptionPolicyAssignmentObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SubscriptionPolicyAssignmentIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]SubscriptionPolicyAssignmentNonComplianceMessageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]SubscriptionPolicyAssignmentOverridesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]SubscriptionPolicyAssignmentResourceSelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentObservation. +func (in *SubscriptionPolicyAssignmentObservation) DeepCopy() *SubscriptionPolicyAssignmentObservation { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentOverridesInitParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentOverridesInitParameters) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SubscriptionPolicyAssignmentOverridesSelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentOverridesInitParameters. +func (in *SubscriptionPolicyAssignmentOverridesInitParameters) DeepCopy() *SubscriptionPolicyAssignmentOverridesInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentOverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentOverridesObservation) DeepCopyInto(out *SubscriptionPolicyAssignmentOverridesObservation) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SubscriptionPolicyAssignmentOverridesSelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentOverridesObservation. +func (in *SubscriptionPolicyAssignmentOverridesObservation) DeepCopy() *SubscriptionPolicyAssignmentOverridesObservation { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentOverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentOverridesParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentOverridesParameters) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SubscriptionPolicyAssignmentOverridesSelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentOverridesParameters. +func (in *SubscriptionPolicyAssignmentOverridesParameters) DeepCopy() *SubscriptionPolicyAssignmentOverridesParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentOverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentOverridesSelectorsInitParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentOverridesSelectorsInitParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentOverridesSelectorsInitParameters. +func (in *SubscriptionPolicyAssignmentOverridesSelectorsInitParameters) DeepCopy() *SubscriptionPolicyAssignmentOverridesSelectorsInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentOverridesSelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentOverridesSelectorsObservation) DeepCopyInto(out *SubscriptionPolicyAssignmentOverridesSelectorsObservation) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentOverridesSelectorsObservation. +func (in *SubscriptionPolicyAssignmentOverridesSelectorsObservation) DeepCopy() *SubscriptionPolicyAssignmentOverridesSelectorsObservation { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentOverridesSelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentOverridesSelectorsParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentOverridesSelectorsParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentOverridesSelectorsParameters. +func (in *SubscriptionPolicyAssignmentOverridesSelectorsParameters) DeepCopy() *SubscriptionPolicyAssignmentOverridesSelectorsParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentOverridesSelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enforce != nil { + in, out := &in.Enforce, &out.Enforce + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SubscriptionPolicyAssignmentIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(string) + **out = **in + } + if in.NonComplianceMessage != nil { + in, out := &in.NonComplianceMessage, &out.NonComplianceMessage + *out = make([]SubscriptionPolicyAssignmentNonComplianceMessageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotScopes != nil { + in, out := &in.NotScopes, &out.NotScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]SubscriptionPolicyAssignmentOverridesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.PolicyDefinitionID != nil { + in, out := &in.PolicyDefinitionID, &out.PolicyDefinitionID + *out = new(string) + **out = **in + } + if in.PolicyDefinitionIDRef != nil { + in, out := &in.PolicyDefinitionIDRef, &out.PolicyDefinitionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyDefinitionIDSelector != nil { + in, out := &in.PolicyDefinitionIDSelector, &out.PolicyDefinitionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceSelectors != nil { + in, out := &in.ResourceSelectors, &out.ResourceSelectors + *out = make([]SubscriptionPolicyAssignmentResourceSelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentParameters. +func (in *SubscriptionPolicyAssignmentParameters) DeepCopy() *SubscriptionPolicyAssignmentParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentResourceSelectorsInitParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentResourceSelectorsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentResourceSelectorsInitParameters. +func (in *SubscriptionPolicyAssignmentResourceSelectorsInitParameters) DeepCopy() *SubscriptionPolicyAssignmentResourceSelectorsInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentResourceSelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentResourceSelectorsObservation) DeepCopyInto(out *SubscriptionPolicyAssignmentResourceSelectorsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentResourceSelectorsObservation. +func (in *SubscriptionPolicyAssignmentResourceSelectorsObservation) DeepCopy() *SubscriptionPolicyAssignmentResourceSelectorsObservation { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentResourceSelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentResourceSelectorsParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentResourceSelectorsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentResourceSelectorsParameters. +func (in *SubscriptionPolicyAssignmentResourceSelectorsParameters) DeepCopy() *SubscriptionPolicyAssignmentResourceSelectorsParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentResourceSelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters. +func (in *SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters) DeepCopy() *SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation) DeepCopyInto(out *SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation. +func (in *SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation) DeepCopy() *SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters) DeepCopyInto(out *SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters) { + *out = *in + if in.In != nil { + in, out := &in.In, &out.In + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.NotIn != nil { + in, out := &in.NotIn, &out.NotIn + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters. +func (in *SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters) DeepCopy() *SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentSpec) DeepCopyInto(out *SubscriptionPolicyAssignmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentSpec. +func (in *SubscriptionPolicyAssignmentSpec) DeepCopy() *SubscriptionPolicyAssignmentSpec { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionPolicyAssignmentStatus) DeepCopyInto(out *SubscriptionPolicyAssignmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionPolicyAssignmentStatus. +func (in *SubscriptionPolicyAssignmentStatus) DeepCopy() *SubscriptionPolicyAssignmentStatus { + if in == nil { + return nil + } + out := new(SubscriptionPolicyAssignmentStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/authorization/v1beta2/zz_generated.managed.go b/apis/authorization/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..0f8add7e9 --- /dev/null +++ b/apis/authorization/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ResourceGroupPolicyAssignment. +func (mg *ResourceGroupPolicyAssignment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/authorization/v1beta2/zz_generated.managedlist.go b/apis/authorization/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..4b4b68e96 --- /dev/null +++ b/apis/authorization/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ResourceGroupPolicyAssignmentList. +func (l *ResourceGroupPolicyAssignmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ResourcePolicyAssignmentList. +func (l *ResourcePolicyAssignmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SubscriptionPolicyAssignmentList. +func (l *SubscriptionPolicyAssignmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/authorization/v1beta2/zz_generated.resolvers.go b/apis/authorization/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..e5b7616c8 --- /dev/null +++ b/apis/authorization/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,206 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ResourceGroupPolicyAssignment) ResolveReferences( // ResolveReferences of this ResourceGroupPolicyAssignment. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "PolicyDefinition", "PolicyDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PolicyDefinitionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PolicyDefinitionIDRef, + Selector: mg.Spec.ForProvider.PolicyDefinitionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PolicyDefinitionID") + } + mg.Spec.ForProvider.PolicyDefinitionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PolicyDefinitionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ResourceGroupIDRef, + Selector: mg.Spec.ForProvider.ResourceGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupID") + } + mg.Spec.ForProvider.ResourceGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "PolicyDefinition", "PolicyDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PolicyDefinitionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PolicyDefinitionIDRef, + Selector: mg.Spec.InitProvider.PolicyDefinitionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PolicyDefinitionID") + } + mg.Spec.InitProvider.PolicyDefinitionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PolicyDefinitionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ResourceGroupIDRef, + Selector: mg.Spec.InitProvider.ResourceGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupID") + } + mg.Spec.InitProvider.ResourceGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ResourcePolicyAssignment. +func (mg *ResourcePolicyAssignment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "PolicyDefinition", "PolicyDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PolicyDefinitionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PolicyDefinitionIDRef, + Selector: mg.Spec.ForProvider.PolicyDefinitionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PolicyDefinitionID") + } + mg.Spec.ForProvider.PolicyDefinitionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PolicyDefinitionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "PolicyDefinition", "PolicyDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PolicyDefinitionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PolicyDefinitionIDRef, + Selector: mg.Spec.InitProvider.PolicyDefinitionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PolicyDefinitionID") + } + mg.Spec.InitProvider.PolicyDefinitionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PolicyDefinitionIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SubscriptionPolicyAssignment. +func (mg *SubscriptionPolicyAssignment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "PolicyDefinition", "PolicyDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PolicyDefinitionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PolicyDefinitionIDRef, + Selector: mg.Spec.ForProvider.PolicyDefinitionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PolicyDefinitionID") + } + mg.Spec.ForProvider.PolicyDefinitionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PolicyDefinitionIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "PolicyDefinition", "PolicyDefinitionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PolicyDefinitionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PolicyDefinitionIDRef, + Selector: mg.Spec.InitProvider.PolicyDefinitionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PolicyDefinitionID") + } + mg.Spec.InitProvider.PolicyDefinitionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PolicyDefinitionIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/authorization/v1beta2/zz_groupversion_info.go b/apis/authorization/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..0343bf814 --- /dev/null +++ b/apis/authorization/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=authorization.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "authorization.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/authorization/v1beta2/zz_resourcegrouppolicyassignment_terraformed.go b/apis/authorization/v1beta2/zz_resourcegrouppolicyassignment_terraformed.go new file mode 100755 index 000000000..4dd0b5ef9 --- /dev/null +++ b/apis/authorization/v1beta2/zz_resourcegrouppolicyassignment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ResourceGroupPolicyAssignment +func (mg *ResourceGroupPolicyAssignment) GetTerraformResourceType() string { + return "azurerm_resource_group_policy_assignment" +} + +// GetConnectionDetailsMapping for this ResourceGroupPolicyAssignment +func (tr *ResourceGroupPolicyAssignment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ResourceGroupPolicyAssignment +func (tr *ResourceGroupPolicyAssignment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ResourceGroupPolicyAssignment +func (tr *ResourceGroupPolicyAssignment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ResourceGroupPolicyAssignment +func (tr *ResourceGroupPolicyAssignment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ResourceGroupPolicyAssignment +func (tr *ResourceGroupPolicyAssignment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ResourceGroupPolicyAssignment +func (tr *ResourceGroupPolicyAssignment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ResourceGroupPolicyAssignment +func (tr *ResourceGroupPolicyAssignment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ResourceGroupPolicyAssignment +func (tr *ResourceGroupPolicyAssignment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ResourceGroupPolicyAssignment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ResourceGroupPolicyAssignment) LateInitialize(attrs []byte) (bool, error) { + params := &ResourceGroupPolicyAssignmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ResourceGroupPolicyAssignment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/authorization/v1beta2/zz_resourcegrouppolicyassignment_types.go b/apis/authorization/v1beta2/zz_resourcegrouppolicyassignment_types.go new file mode 100755 index 000000000..0bdea6a64 --- /dev/null +++ b/apis/authorization/v1beta2/zz_resourcegrouppolicyassignment_types.go @@ -0,0 +1,452 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID of the Policy Assignment for this Resource Group. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID of the Policy Assignment for this Resource Group. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned and UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type NonComplianceMessageInitParameters struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type NonComplianceMessageObservation struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type NonComplianceMessageParameters struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + // +kubebuilder:validation:Optional + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type OverridesInitParameters struct { + + // One or more override_selector block as defined below. + Selectors []SelectorsInitParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type OverridesObservation struct { + + // One or more override_selector block as defined below. + Selectors []SelectorsObservation `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type OverridesParameters struct { + + // One or more override_selector block as defined below. + // +kubebuilder:validation:Optional + Selectors []SelectorsParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourceGroupPolicyAssignmentInitParameters struct { + + // A description which should be used for this Policy Assignment. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // One or more non_compliance_message blocks as defined below. + NonComplianceMessage []NonComplianceMessageInitParameters `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + Overrides []OverridesInitParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.PolicyDefinition + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // Reference to a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDRef *v1.Reference `json:"policyDefinitionIdRef,omitempty" tf:"-"` + + // Selector for a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDSelector *v1.Selector `json:"policyDefinitionIdSelector,omitempty" tf:"-"` + + // The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDRef *v1.Reference `json:"resourceGroupIdRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDSelector *v1.Selector `json:"resourceGroupIdSelector,omitempty" tf:"-"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + ResourceSelectors []ResourceSelectorsInitParameters `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` +} + +type ResourceGroupPolicyAssignmentObservation struct { + + // A description which should be used for this Policy Assignment. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // The ID of the Resource Group Policy Assignment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // One or more non_compliance_message blocks as defined below. + NonComplianceMessage []NonComplianceMessageObservation `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + Overrides []OverridesObservation `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created. + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + ResourceSelectors []ResourceSelectorsObservation `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` +} + +type ResourceGroupPolicyAssignmentParameters struct { + + // A description which should be used for this Policy Assignment. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + // +kubebuilder:validation:Optional + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + // +kubebuilder:validation:Optional + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // One or more non_compliance_message blocks as defined below. + // +kubebuilder:validation:Optional + NonComplianceMessage []NonComplianceMessageParameters `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + // +kubebuilder:validation:Optional + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + // +kubebuilder:validation:Optional + Overrides []OverridesParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.PolicyDefinition + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // Reference to a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDRef *v1.Reference `json:"policyDefinitionIdRef,omitempty" tf:"-"` + + // Selector for a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDSelector *v1.Selector `json:"policyDefinitionIdSelector,omitempty" tf:"-"` + + // The ID of the Resource Group where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDRef *v1.Reference `json:"resourceGroupIdRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDSelector *v1.Selector `json:"resourceGroupIdSelector,omitempty" tf:"-"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + // +kubebuilder:validation:Optional + ResourceSelectors []ResourceSelectorsParameters `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` +} + +type ResourceSelectorsInitParameters struct { + + // Specifies a name for the resource selector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + Selectors []ResourceSelectorsSelectorsInitParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` +} + +type ResourceSelectorsObservation struct { + + // Specifies a name for the resource selector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + Selectors []ResourceSelectorsSelectorsObservation `json:"selectors,omitempty" tf:"selectors,omitempty"` +} + +type ResourceSelectorsParameters struct { + + // Specifies a name for the resource selector. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + // +kubebuilder:validation:Optional + Selectors []ResourceSelectorsSelectorsParameters `json:"selectors" tf:"selectors,omitempty"` +} + +type ResourceSelectorsSelectorsInitParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type ResourceSelectorsSelectorsObservation struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type ResourceSelectorsSelectorsParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + // +kubebuilder:validation:Optional + Kind *string `json:"kind" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type SelectorsInitParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type SelectorsObservation struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type SelectorsParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +// ResourceGroupPolicyAssignmentSpec defines the desired state of ResourceGroupPolicyAssignment +type ResourceGroupPolicyAssignmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResourceGroupPolicyAssignmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResourceGroupPolicyAssignmentInitParameters `json:"initProvider,omitempty"` +} + +// ResourceGroupPolicyAssignmentStatus defines the observed state of ResourceGroupPolicyAssignment. +type ResourceGroupPolicyAssignmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResourceGroupPolicyAssignmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ResourceGroupPolicyAssignment is the Schema for the ResourceGroupPolicyAssignments API. Manages a Resource Group Policy Assignment. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ResourceGroupPolicyAssignment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResourceGroupPolicyAssignmentSpec `json:"spec"` + Status ResourceGroupPolicyAssignmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceGroupPolicyAssignmentList contains a list of ResourceGroupPolicyAssignments +type ResourceGroupPolicyAssignmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceGroupPolicyAssignment `json:"items"` +} + +// Repository type metadata. +var ( + ResourceGroupPolicyAssignment_Kind = "ResourceGroupPolicyAssignment" + ResourceGroupPolicyAssignment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ResourceGroupPolicyAssignment_Kind}.String() + ResourceGroupPolicyAssignment_KindAPIVersion = ResourceGroupPolicyAssignment_Kind + "." + CRDGroupVersion.String() + ResourceGroupPolicyAssignment_GroupVersionKind = CRDGroupVersion.WithKind(ResourceGroupPolicyAssignment_Kind) +) + +func init() { + SchemeBuilder.Register(&ResourceGroupPolicyAssignment{}, &ResourceGroupPolicyAssignmentList{}) +} diff --git a/apis/authorization/v1beta2/zz_resourcepolicyassignment_terraformed.go b/apis/authorization/v1beta2/zz_resourcepolicyassignment_terraformed.go new file mode 100755 index 000000000..f260d3fc4 --- /dev/null +++ b/apis/authorization/v1beta2/zz_resourcepolicyassignment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ResourcePolicyAssignment +func (mg *ResourcePolicyAssignment) GetTerraformResourceType() string { + return "azurerm_resource_policy_assignment" +} + +// GetConnectionDetailsMapping for this ResourcePolicyAssignment +func (tr *ResourcePolicyAssignment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ResourcePolicyAssignment +func (tr *ResourcePolicyAssignment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ResourcePolicyAssignment +func (tr *ResourcePolicyAssignment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ResourcePolicyAssignment +func (tr *ResourcePolicyAssignment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ResourcePolicyAssignment +func (tr *ResourcePolicyAssignment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ResourcePolicyAssignment +func (tr *ResourcePolicyAssignment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ResourcePolicyAssignment +func (tr *ResourcePolicyAssignment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ResourcePolicyAssignment +func (tr *ResourcePolicyAssignment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ResourcePolicyAssignment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ResourcePolicyAssignment) LateInitialize(attrs []byte) (bool, error) { + params := &ResourcePolicyAssignmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ResourcePolicyAssignment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/authorization/v1beta2/zz_resourcepolicyassignment_types.go b/apis/authorization/v1beta2/zz_resourcepolicyassignment_types.go new file mode 100755 index 000000000..c4c49e146 --- /dev/null +++ b/apis/authorization/v1beta2/zz_resourcepolicyassignment_types.go @@ -0,0 +1,444 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OverridesSelectorsInitParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type OverridesSelectorsObservation struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type OverridesSelectorsParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type ResourcePolicyAssignmentIdentityInitParameters struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ResourcePolicyAssignmentIdentityObservation struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID of the Policy Assignment for this Resource. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID of the Policy Assignment for this Resource. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ResourcePolicyAssignmentIdentityParameters struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned and UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ResourcePolicyAssignmentInitParameters struct { + + // A description which should be used for this Policy Assignment. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // An identity block as defined below. + Identity *ResourcePolicyAssignmentIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name which should be used for this Policy Assignment. Changing this forces a new Resource Policy Assignment to be created. Cannot exceed 64 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more non_compliance_message blocks as defined below. + NonComplianceMessage []ResourcePolicyAssignmentNonComplianceMessageInitParameters `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + Overrides []ResourcePolicyAssignmentOverridesInitParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.PolicyDefinition + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // Reference to a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDRef *v1.Reference `json:"policyDefinitionIdRef,omitempty" tf:"-"` + + // Selector for a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDSelector *v1.Selector `json:"policyDefinitionIdSelector,omitempty" tf:"-"` + + // The ID of the Resource (or Resource Scope) where this should be applied. Changing this forces a new Resource Policy Assignment to be created. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + ResourceSelectors []ResourcePolicyAssignmentResourceSelectorsInitParameters `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` +} + +type ResourcePolicyAssignmentNonComplianceMessageInitParameters struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type ResourcePolicyAssignmentNonComplianceMessageObservation struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type ResourcePolicyAssignmentNonComplianceMessageParameters struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + // +kubebuilder:validation:Optional + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type ResourcePolicyAssignmentObservation struct { + + // A description which should be used for this Policy Assignment. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // The ID of the Resource Policy Assignment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *ResourcePolicyAssignmentIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name which should be used for this Policy Assignment. Changing this forces a new Resource Policy Assignment to be created. Cannot exceed 64 characters in length. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more non_compliance_message blocks as defined below. + NonComplianceMessage []ResourcePolicyAssignmentNonComplianceMessageObservation `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + Overrides []ResourcePolicyAssignmentOverridesObservation `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // The ID of the Resource (or Resource Scope) where this should be applied. Changing this forces a new Resource Policy Assignment to be created. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + ResourceSelectors []ResourcePolicyAssignmentResourceSelectorsObservation `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` +} + +type ResourcePolicyAssignmentOverridesInitParameters struct { + + // One or more override_selector block as defined below. + Selectors []OverridesSelectorsInitParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourcePolicyAssignmentOverridesObservation struct { + + // One or more override_selector block as defined below. + Selectors []OverridesSelectorsObservation `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourcePolicyAssignmentOverridesParameters struct { + + // One or more override_selector block as defined below. + // +kubebuilder:validation:Optional + Selectors []OverridesSelectorsParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ResourcePolicyAssignmentParameters struct { + + // A description which should be used for this Policy Assignment. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + // +kubebuilder:validation:Optional + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *ResourcePolicyAssignmentIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + // +kubebuilder:validation:Optional + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name which should be used for this Policy Assignment. Changing this forces a new Resource Policy Assignment to be created. Cannot exceed 64 characters in length. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more non_compliance_message blocks as defined below. + // +kubebuilder:validation:Optional + NonComplianceMessage []ResourcePolicyAssignmentNonComplianceMessageParameters `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + // +kubebuilder:validation:Optional + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + // +kubebuilder:validation:Optional + Overrides []ResourcePolicyAssignmentOverridesParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.PolicyDefinition + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // Reference to a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDRef *v1.Reference `json:"policyDefinitionIdRef,omitempty" tf:"-"` + + // Selector for a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDSelector *v1.Selector `json:"policyDefinitionIdSelector,omitempty" tf:"-"` + + // The ID of the Resource (or Resource Scope) where this should be applied. Changing this forces a new Resource Policy Assignment to be created. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + // +kubebuilder:validation:Optional + ResourceSelectors []ResourcePolicyAssignmentResourceSelectorsParameters `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` +} + +type ResourcePolicyAssignmentResourceSelectorsInitParameters struct { + + // Specifies a name for the resource selector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + Selectors []ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` +} + +type ResourcePolicyAssignmentResourceSelectorsObservation struct { + + // Specifies a name for the resource selector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + Selectors []ResourcePolicyAssignmentResourceSelectorsSelectorsObservation `json:"selectors,omitempty" tf:"selectors,omitempty"` +} + +type ResourcePolicyAssignmentResourceSelectorsParameters struct { + + // Specifies a name for the resource selector. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + // +kubebuilder:validation:Optional + Selectors []ResourcePolicyAssignmentResourceSelectorsSelectorsParameters `json:"selectors" tf:"selectors,omitempty"` +} + +type ResourcePolicyAssignmentResourceSelectorsSelectorsInitParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type ResourcePolicyAssignmentResourceSelectorsSelectorsObservation struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type ResourcePolicyAssignmentResourceSelectorsSelectorsParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + // +kubebuilder:validation:Optional + Kind *string `json:"kind" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +// ResourcePolicyAssignmentSpec defines the desired state of ResourcePolicyAssignment +type ResourcePolicyAssignmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResourcePolicyAssignmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResourcePolicyAssignmentInitParameters `json:"initProvider,omitempty"` +} + +// ResourcePolicyAssignmentStatus defines the observed state of ResourcePolicyAssignment. +type ResourcePolicyAssignmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResourcePolicyAssignmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ResourcePolicyAssignment is the Schema for the ResourcePolicyAssignments API. Manages a Policy Assignment to a Resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ResourcePolicyAssignment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.resourceId) || (has(self.initProvider) && has(self.initProvider.resourceId))",message="spec.forProvider.resourceId is a required parameter" + Spec ResourcePolicyAssignmentSpec `json:"spec"` + Status ResourcePolicyAssignmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourcePolicyAssignmentList contains a list of ResourcePolicyAssignments +type ResourcePolicyAssignmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourcePolicyAssignment `json:"items"` +} + +// Repository type metadata. +var ( + ResourcePolicyAssignment_Kind = "ResourcePolicyAssignment" + ResourcePolicyAssignment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ResourcePolicyAssignment_Kind}.String() + ResourcePolicyAssignment_KindAPIVersion = ResourcePolicyAssignment_Kind + "." + CRDGroupVersion.String() + ResourcePolicyAssignment_GroupVersionKind = CRDGroupVersion.WithKind(ResourcePolicyAssignment_Kind) +) + +func init() { + SchemeBuilder.Register(&ResourcePolicyAssignment{}, &ResourcePolicyAssignmentList{}) +} diff --git a/apis/authorization/v1beta2/zz_subscriptionpolicyassignment_terraformed.go b/apis/authorization/v1beta2/zz_subscriptionpolicyassignment_terraformed.go new file mode 100755 index 000000000..40f84ef5e --- /dev/null +++ b/apis/authorization/v1beta2/zz_subscriptionpolicyassignment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SubscriptionPolicyAssignment +func (mg *SubscriptionPolicyAssignment) GetTerraformResourceType() string { + return "azurerm_subscription_policy_assignment" +} + +// GetConnectionDetailsMapping for this SubscriptionPolicyAssignment +func (tr *SubscriptionPolicyAssignment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SubscriptionPolicyAssignment +func (tr *SubscriptionPolicyAssignment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SubscriptionPolicyAssignment +func (tr *SubscriptionPolicyAssignment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SubscriptionPolicyAssignment +func (tr *SubscriptionPolicyAssignment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SubscriptionPolicyAssignment +func (tr *SubscriptionPolicyAssignment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SubscriptionPolicyAssignment +func (tr *SubscriptionPolicyAssignment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SubscriptionPolicyAssignment +func (tr *SubscriptionPolicyAssignment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SubscriptionPolicyAssignment +func (tr *SubscriptionPolicyAssignment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SubscriptionPolicyAssignment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SubscriptionPolicyAssignment) LateInitialize(attrs []byte) (bool, error) { + params := &SubscriptionPolicyAssignmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SubscriptionPolicyAssignment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/authorization/v1beta2/zz_subscriptionpolicyassignment_types.go b/apis/authorization/v1beta2/zz_subscriptionpolicyassignment_types.go new file mode 100755 index 000000000..e2b55033b --- /dev/null +++ b/apis/authorization/v1beta2/zz_subscriptionpolicyassignment_types.go @@ -0,0 +1,433 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SubscriptionPolicyAssignmentIdentityInitParameters struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned or UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SubscriptionPolicyAssignmentIdentityObservation struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID of the Policy Assignment for this Subscription. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID of the Policy Assignment for this Subscription. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned or UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SubscriptionPolicyAssignmentIdentityParameters struct { + + // A list of User Managed Identity IDs which should be assigned to the Policy Definition. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity which should be added to this Policy Definition. Possible values are SystemAssigned or UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SubscriptionPolicyAssignmentInitParameters struct { + + // A description which should be used for this Policy Assignment. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // An identity block as defined below. + Identity *SubscriptionPolicyAssignmentIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // One or more non_compliance_message blocks as defined below. + NonComplianceMessage []SubscriptionPolicyAssignmentNonComplianceMessageInitParameters `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + Overrides []SubscriptionPolicyAssignmentOverridesInitParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.PolicyDefinition + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // Reference to a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDRef *v1.Reference `json:"policyDefinitionIdRef,omitempty" tf:"-"` + + // Selector for a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDSelector *v1.Selector `json:"policyDefinitionIdSelector,omitempty" tf:"-"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + ResourceSelectors []SubscriptionPolicyAssignmentResourceSelectorsInitParameters `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` + + // The ID of the Subscription where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` +} + +type SubscriptionPolicyAssignmentNonComplianceMessageInitParameters struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type SubscriptionPolicyAssignmentNonComplianceMessageObservation struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type SubscriptionPolicyAssignmentNonComplianceMessageParameters struct { + + // The non-compliance message text. When assigning policy sets (initiatives), unless policy_definition_reference_id is specified then this message will be the default for all policies. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // When assigning policy sets (initiatives), this is the ID of the policy definition that the non-compliance message applies to. + // +kubebuilder:validation:Optional + PolicyDefinitionReferenceID *string `json:"policyDefinitionReferenceId,omitempty" tf:"policy_definition_reference_id,omitempty"` +} + +type SubscriptionPolicyAssignmentObservation struct { + + // A description which should be used for this Policy Assignment. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // The ID of the Subscription Policy Assignment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *SubscriptionPolicyAssignmentIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // One or more non_compliance_message blocks as defined below. + NonComplianceMessage []SubscriptionPolicyAssignmentNonComplianceMessageObservation `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + Overrides []SubscriptionPolicyAssignmentOverridesObservation `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + ResourceSelectors []SubscriptionPolicyAssignmentResourceSelectorsObservation `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` + + // The ID of the Subscription where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` +} + +type SubscriptionPolicyAssignmentOverridesInitParameters struct { + + // One or more override_selector block as defined below. + Selectors []SubscriptionPolicyAssignmentOverridesSelectorsInitParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SubscriptionPolicyAssignmentOverridesObservation struct { + + // One or more override_selector block as defined below. + Selectors []SubscriptionPolicyAssignmentOverridesSelectorsObservation `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type SubscriptionPolicyAssignmentOverridesParameters struct { + + // One or more override_selector block as defined below. + // +kubebuilder:validation:Optional + Selectors []SubscriptionPolicyAssignmentOverridesSelectorsParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` + + // Specifies the value to override the policy property. Possible values for policyEffect override listed policy effects. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type SubscriptionPolicyAssignmentOverridesSelectorsInitParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type SubscriptionPolicyAssignmentOverridesSelectorsObservation struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type SubscriptionPolicyAssignmentOverridesSelectorsParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type SubscriptionPolicyAssignmentParameters struct { + + // A description which should be used for this Policy Assignment. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Display Name for this Policy Assignment. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies if this Policy should be enforced or not? Defaults to true. + // +kubebuilder:validation:Optional + Enforce *bool `json:"enforce,omitempty" tf:"enforce,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *SubscriptionPolicyAssignmentIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Policy Assignment should exist. Changing this forces a new Policy Assignment to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A JSON mapping of any Metadata for this Policy. + // +kubebuilder:validation:Optional + Metadata *string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // One or more non_compliance_message blocks as defined below. + // +kubebuilder:validation:Optional + NonComplianceMessage []SubscriptionPolicyAssignmentNonComplianceMessageParameters `json:"nonComplianceMessage,omitempty" tf:"non_compliance_message,omitempty"` + + // Specifies a list of Resource Scopes (for example a Subscription, or a Resource Group) within this Management Group which are excluded from this Policy. + // +kubebuilder:validation:Optional + NotScopes []*string `json:"notScopes,omitempty" tf:"not_scopes,omitempty"` + + // One or more overrides blocks as defined below. More detail about overrides and resource_selectors see policy assignment structure + // +kubebuilder:validation:Optional + Overrides []SubscriptionPolicyAssignmentOverridesParameters `json:"overrides,omitempty" tf:"overrides,omitempty"` + + // A JSON mapping of any Parameters for this Policy. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The ID of the Policy Definition or Policy Definition Set. Changing this forces a new Policy Assignment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.PolicyDefinition + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty" tf:"policy_definition_id,omitempty"` + + // Reference to a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDRef *v1.Reference `json:"policyDefinitionIdRef,omitempty" tf:"-"` + + // Selector for a PolicyDefinition in authorization to populate policyDefinitionId. + // +kubebuilder:validation:Optional + PolicyDefinitionIDSelector *v1.Selector `json:"policyDefinitionIdSelector,omitempty" tf:"-"` + + // One or more resource_selectors blocks as defined below to filter polices by resource properties. + // +kubebuilder:validation:Optional + ResourceSelectors []SubscriptionPolicyAssignmentResourceSelectorsParameters `json:"resourceSelectors,omitempty" tf:"resource_selectors,omitempty"` + + // The ID of the Subscription where this Policy Assignment should be created. Changing this forces a new Policy Assignment to be created. + // +kubebuilder:validation:Optional + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` +} + +type SubscriptionPolicyAssignmentResourceSelectorsInitParameters struct { + + // Specifies a name for the resource selector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + Selectors []SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters `json:"selectors,omitempty" tf:"selectors,omitempty"` +} + +type SubscriptionPolicyAssignmentResourceSelectorsObservation struct { + + // Specifies a name for the resource selector. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + Selectors []SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation `json:"selectors,omitempty" tf:"selectors,omitempty"` +} + +type SubscriptionPolicyAssignmentResourceSelectorsParameters struct { + + // Specifies a name for the resource selector. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more resource_selector block as defined below. + // +kubebuilder:validation:Optional + Selectors []SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters `json:"selectors" tf:"selectors,omitempty"` +} + +type SubscriptionPolicyAssignmentResourceSelectorsSelectorsInitParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type SubscriptionPolicyAssignmentResourceSelectorsSelectorsObservation struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +type SubscriptionPolicyAssignmentResourceSelectorsSelectorsParameters struct { + + // The list of allowed values for the specified kind. Cannot be used with not_in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + In []*string `json:"in,omitempty" tf:"in,omitempty"` + + // Specifies which characteristic will narrow down the set of evaluated resources. Possible values are resourceLocation, resourceType and resourceWithoutLocation. + // +kubebuilder:validation:Optional + Kind *string `json:"kind" tf:"kind,omitempty"` + + // The list of not-allowed values for the specified kind. Cannot be used with in. Can contain up to 50 values. + // +kubebuilder:validation:Optional + NotIn []*string `json:"notIn,omitempty" tf:"not_in,omitempty"` +} + +// SubscriptionPolicyAssignmentSpec defines the desired state of SubscriptionPolicyAssignment +type SubscriptionPolicyAssignmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SubscriptionPolicyAssignmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SubscriptionPolicyAssignmentInitParameters `json:"initProvider,omitempty"` +} + +// SubscriptionPolicyAssignmentStatus defines the observed state of SubscriptionPolicyAssignment. +type SubscriptionPolicyAssignmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SubscriptionPolicyAssignmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SubscriptionPolicyAssignment is the Schema for the SubscriptionPolicyAssignments API. Manages a Subscription Policy Assignment. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SubscriptionPolicyAssignment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subscriptionId) || (has(self.initProvider) && has(self.initProvider.subscriptionId))",message="spec.forProvider.subscriptionId is a required parameter" + Spec SubscriptionPolicyAssignmentSpec `json:"spec"` + Status SubscriptionPolicyAssignmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubscriptionPolicyAssignmentList contains a list of SubscriptionPolicyAssignments +type SubscriptionPolicyAssignmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SubscriptionPolicyAssignment `json:"items"` +} + +// Repository type metadata. +var ( + SubscriptionPolicyAssignment_Kind = "SubscriptionPolicyAssignment" + SubscriptionPolicyAssignment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SubscriptionPolicyAssignment_Kind}.String() + SubscriptionPolicyAssignment_KindAPIVersion = SubscriptionPolicyAssignment_Kind + "." + CRDGroupVersion.String() + SubscriptionPolicyAssignment_GroupVersionKind = CRDGroupVersion.WithKind(SubscriptionPolicyAssignment_Kind) +) + +func init() { + SchemeBuilder.Register(&SubscriptionPolicyAssignment{}, &SubscriptionPolicyAssignmentList{}) +} diff --git a/apis/automation/v1beta1/zz_connection_types.go b/apis/automation/v1beta1/zz_connection_types.go index 0fda382d4..df424983c 100755 --- a/apis/automation/v1beta1/zz_connection_types.go +++ b/apis/automation/v1beta1/zz_connection_types.go @@ -51,7 +51,7 @@ type ConnectionObservation struct { type ConnectionParameters struct { // The name of the automation account in which the Connection is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_connectionclassiccertificate_types.go b/apis/automation/v1beta1/zz_connectionclassiccertificate_types.go index 9ce0f0bb2..0d3e27c9a 100755 --- a/apis/automation/v1beta1/zz_connectionclassiccertificate_types.go +++ b/apis/automation/v1beta1/zz_connectionclassiccertificate_types.go @@ -55,7 +55,7 @@ type ConnectionClassicCertificateObservation struct { type ConnectionClassicCertificateParameters struct { // The name of the automation account in which the Connection is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_connectiontype_types.go b/apis/automation/v1beta1/zz_connectiontype_types.go index 125affbaa..6bed6318b 100755 --- a/apis/automation/v1beta1/zz_connectiontype_types.go +++ b/apis/automation/v1beta1/zz_connectiontype_types.go @@ -16,7 +16,7 @@ import ( type ConnectionTypeInitParameters struct { // The name of the automation account in which the Connection is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` // Reference to a Account in automation to populate automationAccountName. @@ -73,7 +73,7 @@ type ConnectionTypeObservation struct { type ConnectionTypeParameters struct { // The name of the automation account in which the Connection is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_credential_types.go b/apis/automation/v1beta1/zz_credential_types.go index 461bc78f1..facf31116 100755 --- a/apis/automation/v1beta1/zz_credential_types.go +++ b/apis/automation/v1beta1/zz_credential_types.go @@ -43,7 +43,7 @@ type CredentialObservation struct { type CredentialParameters struct { // The name of the automation account in which the Credential is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_generated.conversion_hubs.go b/apis/automation/v1beta1/zz_generated.conversion_hubs.go index deff497bd..ef78abcbc 100755 --- a/apis/automation/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/automation/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Account) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Connection) Hub() {} @@ -24,15 +21,6 @@ func (tr *Credential) Hub() {} // Hub marks this type as a conversion hub. func (tr *HybridRunBookWorkerGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Module) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *RunBook) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Schedule) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VariableBool) Hub() {} diff --git a/apis/automation/v1beta1/zz_generated.conversion_spokes.go b/apis/automation/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..68b6cd661 --- /dev/null +++ b/apis/automation/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Account to the hub type. +func (tr *Account) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Account type. +func (tr *Account) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Module to the hub type. +func (tr *Module) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Module type. +func (tr *Module) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this RunBook to the hub type. +func (tr *RunBook) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RunBook type. +func (tr *RunBook) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Schedule to the hub type. +func (tr *Schedule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Schedule type. +func (tr *Schedule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/automation/v1beta1/zz_generated.resolvers.go b/apis/automation/v1beta1/zz_generated.resolvers.go index b45893819..a8867aebd 100644 --- a/apis/automation/v1beta1/zz_generated.resolvers.go +++ b/apis/automation/v1beta1/zz_generated.resolvers.go @@ -58,7 +58,7 @@ func (mg *Connection) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -108,7 +108,7 @@ func (mg *ConnectionClassicCertificate) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -158,7 +158,7 @@ func (mg *ConnectionType) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -196,7 +196,7 @@ func (mg *ConnectionType) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -246,7 +246,7 @@ func (mg *Credential) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -296,7 +296,7 @@ func (mg *HybridRunBookWorkerGroup) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -334,7 +334,7 @@ func (mg *HybridRunBookWorkerGroup) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -572,7 +572,7 @@ func (mg *VariableBool) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -622,7 +622,7 @@ func (mg *VariableDateTime) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -672,7 +672,7 @@ func (mg *VariableInt) ResolveReferences(ctx context.Context, c client.Reader) e var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -722,7 +722,7 @@ func (mg *VariableString) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -772,7 +772,7 @@ func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -810,7 +810,7 @@ func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "RunBook", "RunBookList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "RunBook", "RunBookList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -829,7 +829,7 @@ func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.RunBookName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RunBookNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -867,7 +867,7 @@ func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "RunBook", "RunBookList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "RunBook", "RunBookList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/automation/v1beta1/zz_hybridrunbookworkergroup_types.go b/apis/automation/v1beta1/zz_hybridrunbookworkergroup_types.go index 6df374261..b96cc7f43 100755 --- a/apis/automation/v1beta1/zz_hybridrunbookworkergroup_types.go +++ b/apis/automation/v1beta1/zz_hybridrunbookworkergroup_types.go @@ -16,7 +16,7 @@ import ( type HybridRunBookWorkerGroupInitParameters struct { // The name of the Automation Account in which the Runbook Worker Group is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` // Reference to a Account in automation to populate automationAccountName. @@ -67,7 +67,7 @@ type HybridRunBookWorkerGroupObservation struct { type HybridRunBookWorkerGroupParameters struct { // The name of the Automation Account in which the Runbook Worker Group is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_variablebool_types.go b/apis/automation/v1beta1/zz_variablebool_types.go index cb33b1263..820ef8df9 100755 --- a/apis/automation/v1beta1/zz_variablebool_types.go +++ b/apis/automation/v1beta1/zz_variablebool_types.go @@ -49,7 +49,7 @@ type VariableBoolObservation struct { type VariableBoolParameters struct { // The name of the automation account in which the Variable is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_variabledatetime_types.go b/apis/automation/v1beta1/zz_variabledatetime_types.go index d73f489da..deed7e242 100755 --- a/apis/automation/v1beta1/zz_variabledatetime_types.go +++ b/apis/automation/v1beta1/zz_variabledatetime_types.go @@ -49,7 +49,7 @@ type VariableDateTimeObservation struct { type VariableDateTimeParameters struct { // The name of the automation account in which the Variable is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_variableint_types.go b/apis/automation/v1beta1/zz_variableint_types.go index e761459ea..33848a5dd 100755 --- a/apis/automation/v1beta1/zz_variableint_types.go +++ b/apis/automation/v1beta1/zz_variableint_types.go @@ -49,7 +49,7 @@ type VariableIntObservation struct { type VariableIntParameters struct { // The name of the automation account in which the Variable is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_variablestring_types.go b/apis/automation/v1beta1/zz_variablestring_types.go index 5c0da1530..ef5a50a87 100755 --- a/apis/automation/v1beta1/zz_variablestring_types.go +++ b/apis/automation/v1beta1/zz_variablestring_types.go @@ -49,7 +49,7 @@ type VariableStringObservation struct { type VariableStringParameters struct { // The name of the automation account in which the Variable is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` diff --git a/apis/automation/v1beta1/zz_webhook_types.go b/apis/automation/v1beta1/zz_webhook_types.go index ff104b583..fd192e09e 100755 --- a/apis/automation/v1beta1/zz_webhook_types.go +++ b/apis/automation/v1beta1/zz_webhook_types.go @@ -16,7 +16,7 @@ import ( type WebhookInitParameters struct { // The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` // Reference to a Account in automation to populate automationAccountName. @@ -53,7 +53,7 @@ type WebhookInitParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Name of the Automation Runbook to execute by Webhook. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.RunBook + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.RunBook // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) RunBookName *string `json:"runbookName,omitempty" tf:"runbook_name,omitempty"` @@ -103,7 +103,7 @@ type WebhookObservation struct { type WebhookParameters struct { // The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +kubebuilder:validation:Optional AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` @@ -146,7 +146,7 @@ type WebhookParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Name of the Automation Runbook to execute by Webhook. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.RunBook + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.RunBook // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) // +kubebuilder:validation:Optional RunBookName *string `json:"runbookName,omitempty" tf:"runbook_name,omitempty"` diff --git a/apis/automation/v1beta2/zz_account_terraformed.go b/apis/automation/v1beta2/zz_account_terraformed.go new file mode 100755 index 000000000..30f50d6f6 --- /dev/null +++ b/apis/automation/v1beta2/zz_account_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Account +func (mg *Account) GetTerraformResourceType() string { + return "azurerm_automation_account" +} + +// GetConnectionDetailsMapping for this Account +func (tr *Account) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"dsc_primary_access_key": "status.atProvider.dscPrimaryAccessKey", "dsc_secondary_access_key": "status.atProvider.dscSecondaryAccessKey"} +} + +// GetObservation of this Account +func (tr *Account) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Account +func (tr *Account) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Account +func (tr *Account) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Account +func (tr *Account) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Account +func (tr *Account) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Account +func (tr *Account) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Account +func (tr *Account) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Account using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Account) LateInitialize(attrs []byte) (bool, error) { + params := &AccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("Encryption")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Account) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/automation/v1beta2/zz_account_types.go b/apis/automation/v1beta2/zz_account_types.go new file mode 100755 index 000000000..2c228c812 --- /dev/null +++ b/apis/automation/v1beta2/zz_account_types.go @@ -0,0 +1,272 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountInitParameters struct { + + // An encryption block as defined below. + Encryption []EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether requests using non-AAD authentication are blocked. Defaults to true. + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether public network access is allowed for the automation account. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The SKU of the account. Possible values are Basic and Free. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountObservation struct { + + // The DSC Server Endpoint associated with this Automation Account. + DSCServerEndpoint *string `json:"dscServerEndpoint,omitempty" tf:"dsc_server_endpoint,omitempty"` + + // An encryption block as defined below. + Encryption []EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The URL of automation hybrid service which is used for hybrid worker on-boarding With this Automation Account. + HybridServiceURL *string `json:"hybridServiceUrl,omitempty" tf:"hybrid_service_url,omitempty"` + + // The ID of the Automation Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether requests using non-AAD authentication are blocked. Defaults to true. + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + PrivateEndpointConnection []PrivateEndpointConnectionObservation `json:"privateEndpointConnection,omitempty" tf:"private_endpoint_connection,omitempty"` + + // Whether public network access is allowed for the automation account. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which the Automation Account is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The SKU of the account. Possible values are Basic and Free. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountParameters struct { + + // An encryption block as defined below. + // +kubebuilder:validation:Optional + Encryption []EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether requests using non-AAD authentication are blocked. Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether public network access is allowed for the automation account. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which the Automation Account is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The SKU of the account. Possible values are Basic and Free. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EncryptionInitParameters struct { + KeySource *string `json:"keySource,omitempty" tf:"key_source,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this Automation Account. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // The User Assigned Managed Identity ID to be used for accessing the Customer Managed Key for encryption. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type EncryptionObservation struct { + KeySource *string `json:"keySource,omitempty" tf:"key_source,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this Automation Account. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // The User Assigned Managed Identity ID to be used for accessing the Customer Managed Key for encryption. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type EncryptionParameters struct { + + // +kubebuilder:validation:Optional + KeySource *string `json:"keySource,omitempty" tf:"key_source,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this Automation Account. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId" tf:"key_vault_key_id,omitempty"` + + // The User Assigned Managed Identity ID to be used for accessing the Customer Managed Key for encryption. + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type IdentityInitParameters struct { + + // The ID of the User Assigned Identity which should be assigned to this Automation Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of identity used for this Automation Account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // The ID of the User Assigned Identity which should be assigned to this Automation Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The type of identity used for this Automation Account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // The ID of the User Assigned Identity which should be assigned to this Automation Account. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of identity used for this Automation Account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PrivateEndpointConnectionInitParameters struct { +} + +type PrivateEndpointConnectionObservation struct { + + // The ID of the Automation Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the Automation Account. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PrivateEndpointConnectionParameters struct { +} + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccountInitParameters `json:"initProvider,omitempty"` +} + +// AccountStatus defines the observed state of Account. +type AccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Account is the Schema for the Accounts API. Manages a Automation Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Accounts +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Repository type metadata. +var ( + Account_Kind = "Account" + Account_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Account_Kind}.String() + Account_KindAPIVersion = Account_Kind + "." + CRDGroupVersion.String() + Account_GroupVersionKind = CRDGroupVersion.WithKind(Account_Kind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/apis/automation/v1beta2/zz_generated.conversion_hubs.go b/apis/automation/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..1288f6a01 --- /dev/null +++ b/apis/automation/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Account) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Module) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *RunBook) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Schedule) Hub() {} diff --git a/apis/automation/v1beta2/zz_generated.deepcopy.go b/apis/automation/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..b364de658 --- /dev/null +++ b/apis/automation/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2526 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountInitParameters) DeepCopyInto(out *AccountInitParameters) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = make([]EncryptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountInitParameters. +func (in *AccountInitParameters) DeepCopy() *AccountInitParameters { + if in == nil { + return nil + } + out := new(AccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in + if in.DSCServerEndpoint != nil { + in, out := &in.DSCServerEndpoint, &out.DSCServerEndpoint + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = make([]EncryptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HybridServiceURL != nil { + in, out := &in.HybridServiceURL, &out.HybridServiceURL + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateEndpointConnection != nil { + in, out := &in.PrivateEndpointConnection, &out.PrivateEndpointConnection + *out = make([]PrivateEndpointConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = make([]EncryptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLinkHashInitParameters) DeepCopyInto(out *ContentLinkHashInitParameters) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLinkHashInitParameters. +func (in *ContentLinkHashInitParameters) DeepCopy() *ContentLinkHashInitParameters { + if in == nil { + return nil + } + out := new(ContentLinkHashInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLinkHashObservation) DeepCopyInto(out *ContentLinkHashObservation) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLinkHashObservation. +func (in *ContentLinkHashObservation) DeepCopy() *ContentLinkHashObservation { + if in == nil { + return nil + } + out := new(ContentLinkHashObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLinkHashParameters) DeepCopyInto(out *ContentLinkHashParameters) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLinkHashParameters. +func (in *ContentLinkHashParameters) DeepCopy() *ContentLinkHashParameters { + if in == nil { + return nil + } + out := new(ContentLinkHashParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLinkInitParameters) DeepCopyInto(out *ContentLinkInitParameters) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(ContentLinkHashInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLinkInitParameters. +func (in *ContentLinkInitParameters) DeepCopy() *ContentLinkInitParameters { + if in == nil { + return nil + } + out := new(ContentLinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLinkObservation) DeepCopyInto(out *ContentLinkObservation) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(ContentLinkHashObservation) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLinkObservation. +func (in *ContentLinkObservation) DeepCopy() *ContentLinkObservation { + if in == nil { + return nil + } + out := new(ContentLinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentLinkParameters) DeepCopyInto(out *ContentLinkParameters) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(ContentLinkHashParameters) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentLinkParameters. +func (in *ContentLinkParameters) DeepCopy() *ContentLinkParameters { + if in == nil { + return nil + } + out := new(ContentLinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DraftInitParameters) DeepCopyInto(out *DraftInitParameters) { + *out = *in + if in.ContentLink != nil { + in, out := &in.ContentLink, &out.ContentLink + *out = new(ContentLinkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EditModeEnabled != nil { + in, out := &in.EditModeEnabled, &out.EditModeEnabled + *out = new(bool) + **out = **in + } + if in.OutputTypes != nil { + in, out := &in.OutputTypes, &out.OutputTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DraftInitParameters. +func (in *DraftInitParameters) DeepCopy() *DraftInitParameters { + if in == nil { + return nil + } + out := new(DraftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DraftObservation) DeepCopyInto(out *DraftObservation) { + *out = *in + if in.ContentLink != nil { + in, out := &in.ContentLink, &out.ContentLink + *out = new(ContentLinkObservation) + (*in).DeepCopyInto(*out) + } + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = new(string) + **out = **in + } + if in.EditModeEnabled != nil { + in, out := &in.EditModeEnabled, &out.EditModeEnabled + *out = new(bool) + **out = **in + } + if in.LastModifiedTime != nil { + in, out := &in.LastModifiedTime, &out.LastModifiedTime + *out = new(string) + **out = **in + } + if in.OutputTypes != nil { + in, out := &in.OutputTypes, &out.OutputTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DraftObservation. +func (in *DraftObservation) DeepCopy() *DraftObservation { + if in == nil { + return nil + } + out := new(DraftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DraftParameters) DeepCopyInto(out *DraftParameters) { + *out = *in + if in.ContentLink != nil { + in, out := &in.ContentLink, &out.ContentLink + *out = new(ContentLinkParameters) + (*in).DeepCopyInto(*out) + } + if in.EditModeEnabled != nil { + in, out := &in.EditModeEnabled, &out.EditModeEnabled + *out = new(bool) + **out = **in + } + if in.OutputTypes != nil { + in, out := &in.OutputTypes, &out.OutputTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DraftParameters. +func (in *DraftParameters) DeepCopy() *DraftParameters { + if in == nil { + return nil + } + out := new(DraftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.KeySource != nil { + in, out := &in.KeySource, &out.KeySource + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.KeySource != nil { + in, out := &in.KeySource, &out.KeySource + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.KeySource != nil { + in, out := &in.KeySource, &out.KeySource + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HashInitParameters) DeepCopyInto(out *HashInitParameters) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashInitParameters. +func (in *HashInitParameters) DeepCopy() *HashInitParameters { + if in == nil { + return nil + } + out := new(HashInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HashObservation) DeepCopyInto(out *HashObservation) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashObservation. +func (in *HashObservation) DeepCopy() *HashObservation { + if in == nil { + return nil + } + out := new(HashObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HashParameters) DeepCopyInto(out *HashParameters) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HashParameters. +func (in *HashParameters) DeepCopy() *HashParameters { + if in == nil { + return nil + } + out := new(HashParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobScheduleInitParameters) DeepCopyInto(out *JobScheduleInitParameters) { + *out = *in + if in.JobScheduleID != nil { + in, out := &in.JobScheduleID, &out.JobScheduleID + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RunOn != nil { + in, out := &in.RunOn, &out.RunOn + *out = new(string) + **out = **in + } + if in.ScheduleName != nil { + in, out := &in.ScheduleName, &out.ScheduleName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobScheduleInitParameters. +func (in *JobScheduleInitParameters) DeepCopy() *JobScheduleInitParameters { + if in == nil { + return nil + } + out := new(JobScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobScheduleObservation) DeepCopyInto(out *JobScheduleObservation) { + *out = *in + if in.JobScheduleID != nil { + in, out := &in.JobScheduleID, &out.JobScheduleID + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RunOn != nil { + in, out := &in.RunOn, &out.RunOn + *out = new(string) + **out = **in + } + if in.ScheduleName != nil { + in, out := &in.ScheduleName, &out.ScheduleName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobScheduleObservation. +func (in *JobScheduleObservation) DeepCopy() *JobScheduleObservation { + if in == nil { + return nil + } + out := new(JobScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobScheduleParameters) DeepCopyInto(out *JobScheduleParameters) { + *out = *in + if in.JobScheduleID != nil { + in, out := &in.JobScheduleID, &out.JobScheduleID + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.RunOn != nil { + in, out := &in.RunOn, &out.RunOn + *out = new(string) + **out = **in + } + if in.ScheduleName != nil { + in, out := &in.ScheduleName, &out.ScheduleName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobScheduleParameters. +func (in *JobScheduleParameters) DeepCopy() *JobScheduleParameters { + if in == nil { + return nil + } + out := new(JobScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Module) DeepCopyInto(out *Module) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Module. +func (in *Module) DeepCopy() *Module { + if in == nil { + return nil + } + out := new(Module) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Module) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleInitParameters) DeepCopyInto(out *ModuleInitParameters) { + *out = *in + if in.ModuleLink != nil { + in, out := &in.ModuleLink, &out.ModuleLink + *out = new(ModuleLinkInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleInitParameters. +func (in *ModuleInitParameters) DeepCopy() *ModuleInitParameters { + if in == nil { + return nil + } + out := new(ModuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleLinkInitParameters) DeepCopyInto(out *ModuleLinkInitParameters) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(HashInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleLinkInitParameters. +func (in *ModuleLinkInitParameters) DeepCopy() *ModuleLinkInitParameters { + if in == nil { + return nil + } + out := new(ModuleLinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleLinkObservation) DeepCopyInto(out *ModuleLinkObservation) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(HashObservation) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleLinkObservation. +func (in *ModuleLinkObservation) DeepCopy() *ModuleLinkObservation { + if in == nil { + return nil + } + out := new(ModuleLinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleLinkParameters) DeepCopyInto(out *ModuleLinkParameters) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(HashParameters) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleLinkParameters. +func (in *ModuleLinkParameters) DeepCopy() *ModuleLinkParameters { + if in == nil { + return nil + } + out := new(ModuleLinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleList) DeepCopyInto(out *ModuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Module, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleList. +func (in *ModuleList) DeepCopy() *ModuleList { + if in == nil { + return nil + } + out := new(ModuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ModuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleObservation) DeepCopyInto(out *ModuleObservation) { + *out = *in + if in.AutomationAccountName != nil { + in, out := &in.AutomationAccountName, &out.AutomationAccountName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ModuleLink != nil { + in, out := &in.ModuleLink, &out.ModuleLink + *out = new(ModuleLinkObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleObservation. +func (in *ModuleObservation) DeepCopy() *ModuleObservation { + if in == nil { + return nil + } + out := new(ModuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleParameters) DeepCopyInto(out *ModuleParameters) { + *out = *in + if in.AutomationAccountName != nil { + in, out := &in.AutomationAccountName, &out.AutomationAccountName + *out = new(string) + **out = **in + } + if in.AutomationAccountNameRef != nil { + in, out := &in.AutomationAccountNameRef, &out.AutomationAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutomationAccountNameSelector != nil { + in, out := &in.AutomationAccountNameSelector, &out.AutomationAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ModuleLink != nil { + in, out := &in.ModuleLink, &out.ModuleLink + *out = new(ModuleLinkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleParameters. +func (in *ModuleParameters) DeepCopy() *ModuleParameters { + if in == nil { + return nil + } + out := new(ModuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleSpec) DeepCopyInto(out *ModuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleSpec. +func (in *ModuleSpec) DeepCopy() *ModuleSpec { + if in == nil { + return nil + } + out := new(ModuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleStatus) DeepCopyInto(out *ModuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleStatus. +func (in *ModuleStatus) DeepCopy() *ModuleStatus { + if in == nil { + return nil + } + out := new(ModuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyOccurrenceInitParameters) DeepCopyInto(out *MonthlyOccurrenceInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Occurrence != nil { + in, out := &in.Occurrence, &out.Occurrence + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyOccurrenceInitParameters. +func (in *MonthlyOccurrenceInitParameters) DeepCopy() *MonthlyOccurrenceInitParameters { + if in == nil { + return nil + } + out := new(MonthlyOccurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyOccurrenceObservation) DeepCopyInto(out *MonthlyOccurrenceObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Occurrence != nil { + in, out := &in.Occurrence, &out.Occurrence + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyOccurrenceObservation. +func (in *MonthlyOccurrenceObservation) DeepCopy() *MonthlyOccurrenceObservation { + if in == nil { + return nil + } + out := new(MonthlyOccurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyOccurrenceParameters) DeepCopyInto(out *MonthlyOccurrenceParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Occurrence != nil { + in, out := &in.Occurrence, &out.Occurrence + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyOccurrenceParameters. +func (in *MonthlyOccurrenceParameters) DeepCopy() *MonthlyOccurrenceParameters { + if in == nil { + return nil + } + out := new(MonthlyOccurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersInitParameters) DeepCopyInto(out *ParametersInitParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Mandatory != nil { + in, out := &in.Mandatory, &out.Mandatory + *out = new(bool) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersInitParameters. +func (in *ParametersInitParameters) DeepCopy() *ParametersInitParameters { + if in == nil { + return nil + } + out := new(ParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersObservation) DeepCopyInto(out *ParametersObservation) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Mandatory != nil { + in, out := &in.Mandatory, &out.Mandatory + *out = new(bool) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersObservation. +func (in *ParametersObservation) DeepCopy() *ParametersObservation { + if in == nil { + return nil + } + out := new(ParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersParameters) DeepCopyInto(out *ParametersParameters) { + *out = *in + if in.DefaultValue != nil { + in, out := &in.DefaultValue, &out.DefaultValue + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Mandatory != nil { + in, out := &in.Mandatory, &out.Mandatory + *out = new(bool) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersParameters. +func (in *ParametersParameters) DeepCopy() *ParametersParameters { + if in == nil { + return nil + } + out := new(ParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointConnectionInitParameters) DeepCopyInto(out *PrivateEndpointConnectionInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointConnectionInitParameters. +func (in *PrivateEndpointConnectionInitParameters) DeepCopy() *PrivateEndpointConnectionInitParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointConnectionObservation) DeepCopyInto(out *PrivateEndpointConnectionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointConnectionObservation. +func (in *PrivateEndpointConnectionObservation) DeepCopy() *PrivateEndpointConnectionObservation { + if in == nil { + return nil + } + out := new(PrivateEndpointConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointConnectionParameters) DeepCopyInto(out *PrivateEndpointConnectionParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointConnectionParameters. +func (in *PrivateEndpointConnectionParameters) DeepCopy() *PrivateEndpointConnectionParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishContentLinkHashInitParameters) DeepCopyInto(out *PublishContentLinkHashInitParameters) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishContentLinkHashInitParameters. +func (in *PublishContentLinkHashInitParameters) DeepCopy() *PublishContentLinkHashInitParameters { + if in == nil { + return nil + } + out := new(PublishContentLinkHashInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishContentLinkHashObservation) DeepCopyInto(out *PublishContentLinkHashObservation) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishContentLinkHashObservation. +func (in *PublishContentLinkHashObservation) DeepCopy() *PublishContentLinkHashObservation { + if in == nil { + return nil + } + out := new(PublishContentLinkHashObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishContentLinkHashParameters) DeepCopyInto(out *PublishContentLinkHashParameters) { + *out = *in + if in.Algorithm != nil { + in, out := &in.Algorithm, &out.Algorithm + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishContentLinkHashParameters. +func (in *PublishContentLinkHashParameters) DeepCopy() *PublishContentLinkHashParameters { + if in == nil { + return nil + } + out := new(PublishContentLinkHashParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishContentLinkInitParameters) DeepCopyInto(out *PublishContentLinkInitParameters) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(PublishContentLinkHashInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishContentLinkInitParameters. +func (in *PublishContentLinkInitParameters) DeepCopy() *PublishContentLinkInitParameters { + if in == nil { + return nil + } + out := new(PublishContentLinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishContentLinkObservation) DeepCopyInto(out *PublishContentLinkObservation) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(PublishContentLinkHashObservation) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishContentLinkObservation. +func (in *PublishContentLinkObservation) DeepCopy() *PublishContentLinkObservation { + if in == nil { + return nil + } + out := new(PublishContentLinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishContentLinkParameters) DeepCopyInto(out *PublishContentLinkParameters) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = new(PublishContentLinkHashParameters) + (*in).DeepCopyInto(*out) + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishContentLinkParameters. +func (in *PublishContentLinkParameters) DeepCopy() *PublishContentLinkParameters { + if in == nil { + return nil + } + out := new(PublishContentLinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunBook) DeepCopyInto(out *RunBook) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunBook. +func (in *RunBook) DeepCopy() *RunBook { + if in == nil { + return nil + } + out := new(RunBook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RunBook) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunBookInitParameters) DeepCopyInto(out *RunBookInitParameters) { + *out = *in + if in.AutomationAccountName != nil { + in, out := &in.AutomationAccountName, &out.AutomationAccountName + *out = new(string) + **out = **in + } + if in.AutomationAccountNameRef != nil { + in, out := &in.AutomationAccountNameRef, &out.AutomationAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutomationAccountNameSelector != nil { + in, out := &in.AutomationAccountNameSelector, &out.AutomationAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Draft != nil { + in, out := &in.Draft, &out.Draft + *out = new(DraftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JobSchedule != nil { + in, out := &in.JobSchedule, &out.JobSchedule + *out = make([]JobScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogActivityTraceLevel != nil { + in, out := &in.LogActivityTraceLevel, &out.LogActivityTraceLevel + *out = new(float64) + **out = **in + } + if in.LogProgress != nil { + in, out := &in.LogProgress, &out.LogProgress + *out = new(bool) + **out = **in + } + if in.LogVerbose != nil { + in, out := &in.LogVerbose, &out.LogVerbose + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublishContentLink != nil { + in, out := &in.PublishContentLink, &out.PublishContentLink + *out = new(PublishContentLinkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RunBookType != nil { + in, out := &in.RunBookType, &out.RunBookType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunBookInitParameters. +func (in *RunBookInitParameters) DeepCopy() *RunBookInitParameters { + if in == nil { + return nil + } + out := new(RunBookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunBookList) DeepCopyInto(out *RunBookList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RunBook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunBookList. +func (in *RunBookList) DeepCopy() *RunBookList { + if in == nil { + return nil + } + out := new(RunBookList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RunBookList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunBookObservation) DeepCopyInto(out *RunBookObservation) { + *out = *in + if in.AutomationAccountName != nil { + in, out := &in.AutomationAccountName, &out.AutomationAccountName + *out = new(string) + **out = **in + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Draft != nil { + in, out := &in.Draft, &out.Draft + *out = new(DraftObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.JobSchedule != nil { + in, out := &in.JobSchedule, &out.JobSchedule + *out = make([]JobScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogActivityTraceLevel != nil { + in, out := &in.LogActivityTraceLevel, &out.LogActivityTraceLevel + *out = new(float64) + **out = **in + } + if in.LogProgress != nil { + in, out := &in.LogProgress, &out.LogProgress + *out = new(bool) + **out = **in + } + if in.LogVerbose != nil { + in, out := &in.LogVerbose, &out.LogVerbose + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublishContentLink != nil { + in, out := &in.PublishContentLink, &out.PublishContentLink + *out = new(PublishContentLinkObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RunBookType != nil { + in, out := &in.RunBookType, &out.RunBookType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunBookObservation. +func (in *RunBookObservation) DeepCopy() *RunBookObservation { + if in == nil { + return nil + } + out := new(RunBookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunBookParameters) DeepCopyInto(out *RunBookParameters) { + *out = *in + if in.AutomationAccountName != nil { + in, out := &in.AutomationAccountName, &out.AutomationAccountName + *out = new(string) + **out = **in + } + if in.AutomationAccountNameRef != nil { + in, out := &in.AutomationAccountNameRef, &out.AutomationAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutomationAccountNameSelector != nil { + in, out := &in.AutomationAccountNameSelector, &out.AutomationAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Draft != nil { + in, out := &in.Draft, &out.Draft + *out = new(DraftParameters) + (*in).DeepCopyInto(*out) + } + if in.JobSchedule != nil { + in, out := &in.JobSchedule, &out.JobSchedule + *out = make([]JobScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogActivityTraceLevel != nil { + in, out := &in.LogActivityTraceLevel, &out.LogActivityTraceLevel + *out = new(float64) + **out = **in + } + if in.LogProgress != nil { + in, out := &in.LogProgress, &out.LogProgress + *out = new(bool) + **out = **in + } + if in.LogVerbose != nil { + in, out := &in.LogVerbose, &out.LogVerbose + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublishContentLink != nil { + in, out := &in.PublishContentLink, &out.PublishContentLink + *out = new(PublishContentLinkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RunBookType != nil { + in, out := &in.RunBookType, &out.RunBookType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunBookParameters. +func (in *RunBookParameters) DeepCopy() *RunBookParameters { + if in == nil { + return nil + } + out := new(RunBookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunBookSpec) DeepCopyInto(out *RunBookSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunBookSpec. +func (in *RunBookSpec) DeepCopy() *RunBookSpec { + if in == nil { + return nil + } + out := new(RunBookSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunBookStatus) DeepCopyInto(out *RunBookStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunBookStatus. +func (in *RunBookStatus) DeepCopy() *RunBookStatus { + if in == nil { + return nil + } + out := new(RunBookStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Schedule) DeepCopyInto(out *Schedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule. +func (in *Schedule) DeepCopy() *Schedule { + if in == nil { + return nil + } + out := new(Schedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Schedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExpiryTime != nil { + in, out := &in.ExpiryTime, &out.ExpiryTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.MonthDays != nil { + in, out := &in.MonthDays, &out.MonthDays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.MonthlyOccurrence != nil { + in, out := &in.MonthlyOccurrence, &out.MonthlyOccurrence + *out = new(MonthlyOccurrenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleList) DeepCopyInto(out *ScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Schedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleList. +func (in *ScheduleList) DeepCopy() *ScheduleList { + if in == nil { + return nil + } + out := new(ScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.AutomationAccountName != nil { + in, out := &in.AutomationAccountName, &out.AutomationAccountName + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExpiryTime != nil { + in, out := &in.ExpiryTime, &out.ExpiryTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.MonthDays != nil { + in, out := &in.MonthDays, &out.MonthDays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.MonthlyOccurrence != nil { + in, out := &in.MonthlyOccurrence, &out.MonthlyOccurrence + *out = new(MonthlyOccurrenceObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.AutomationAccountName != nil { + in, out := &in.AutomationAccountName, &out.AutomationAccountName + *out = new(string) + **out = **in + } + if in.AutomationAccountNameRef != nil { + in, out := &in.AutomationAccountNameRef, &out.AutomationAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AutomationAccountNameSelector != nil { + in, out := &in.AutomationAccountNameSelector, &out.AutomationAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExpiryTime != nil { + in, out := &in.ExpiryTime, &out.ExpiryTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.MonthDays != nil { + in, out := &in.MonthDays, &out.MonthDays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.MonthlyOccurrence != nil { + in, out := &in.MonthlyOccurrence, &out.MonthlyOccurrence + *out = new(MonthlyOccurrenceParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleSpec) DeepCopyInto(out *ScheduleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleSpec. +func (in *ScheduleSpec) DeepCopy() *ScheduleSpec { + if in == nil { + return nil + } + out := new(ScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus. +func (in *ScheduleStatus) DeepCopy() *ScheduleStatus { + if in == nil { + return nil + } + out := new(ScheduleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/automation/v1beta2/zz_generated.managed.go b/apis/automation/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..a0150aedd --- /dev/null +++ b/apis/automation/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Account. +func (mg *Account) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Account. +func (mg *Account) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Module. +func (mg *Module) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Module. +func (mg *Module) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Module. +func (mg *Module) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Module. +func (mg *Module) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Module. +func (mg *Module) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Module. +func (mg *Module) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Module. +func (mg *Module) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Module. +func (mg *Module) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Module. +func (mg *Module) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Module. +func (mg *Module) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Module. +func (mg *Module) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Module. +func (mg *Module) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this RunBook. +func (mg *RunBook) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RunBook. +func (mg *RunBook) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RunBook. +func (mg *RunBook) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RunBook. +func (mg *RunBook) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RunBook. +func (mg *RunBook) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RunBook. +func (mg *RunBook) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RunBook. +func (mg *RunBook) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RunBook. +func (mg *RunBook) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RunBook. +func (mg *RunBook) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RunBook. +func (mg *RunBook) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RunBook. +func (mg *RunBook) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RunBook. +func (mg *RunBook) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Schedule. +func (mg *Schedule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Schedule. +func (mg *Schedule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Schedule. +func (mg *Schedule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Schedule. +func (mg *Schedule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Schedule. +func (mg *Schedule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Schedule. +func (mg *Schedule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Schedule. +func (mg *Schedule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Schedule. +func (mg *Schedule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Schedule. +func (mg *Schedule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Schedule. +func (mg *Schedule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Schedule. +func (mg *Schedule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Schedule. +func (mg *Schedule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/automation/v1beta2/zz_generated.managedlist.go b/apis/automation/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..890ed260a --- /dev/null +++ b/apis/automation/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ModuleList. +func (l *ModuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this RunBookList. +func (l *RunBookList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ScheduleList. +func (l *ScheduleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/automation/v1beta2/zz_generated.resolvers.go b/apis/automation/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..6d17b5b92 --- /dev/null +++ b/apis/automation/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,237 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Account. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Account) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Module. +func (mg *Module) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AutomationAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AutomationAccountNameRef, + Selector: mg.Spec.ForProvider.AutomationAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AutomationAccountName") + } + mg.Spec.ForProvider.AutomationAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AutomationAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this RunBook. +func (mg *RunBook) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AutomationAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AutomationAccountNameRef, + Selector: mg.Spec.ForProvider.AutomationAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AutomationAccountName") + } + mg.Spec.ForProvider.AutomationAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AutomationAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AutomationAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AutomationAccountNameRef, + Selector: mg.Spec.InitProvider.AutomationAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AutomationAccountName") + } + mg.Spec.InitProvider.AutomationAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AutomationAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Schedule. +func (mg *Schedule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AutomationAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AutomationAccountNameRef, + Selector: mg.Spec.ForProvider.AutomationAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AutomationAccountName") + } + mg.Spec.ForProvider.AutomationAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AutomationAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/automation/v1beta2/zz_groupversion_info.go b/apis/automation/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..7adcd9217 --- /dev/null +++ b/apis/automation/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=automation.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "automation.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/automation/v1beta2/zz_module_terraformed.go b/apis/automation/v1beta2/zz_module_terraformed.go new file mode 100755 index 000000000..2993d9add --- /dev/null +++ b/apis/automation/v1beta2/zz_module_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Module +func (mg *Module) GetTerraformResourceType() string { + return "azurerm_automation_module" +} + +// GetConnectionDetailsMapping for this Module +func (tr *Module) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Module +func (tr *Module) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Module +func (tr *Module) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Module +func (tr *Module) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Module +func (tr *Module) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Module +func (tr *Module) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Module +func (tr *Module) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Module +func (tr *Module) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Module using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Module) LateInitialize(attrs []byte) (bool, error) { + params := &ModuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Module) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/automation/v1beta2/zz_module_types.go b/apis/automation/v1beta2/zz_module_types.go new file mode 100755 index 000000000..add778cd9 --- /dev/null +++ b/apis/automation/v1beta2/zz_module_types.go @@ -0,0 +1,187 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HashInitParameters struct { + + // Specifies the algorithm used for the hash content. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // The hash value of the content. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HashObservation struct { + + // Specifies the algorithm used for the hash content. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // The hash value of the content. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HashParameters struct { + + // Specifies the algorithm used for the hash content. + // +kubebuilder:validation:Optional + Algorithm *string `json:"algorithm" tf:"algorithm,omitempty"` + + // The hash value of the content. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ModuleInitParameters struct { + + // A module_link block as defined below. + ModuleLink *ModuleLinkInitParameters `json:"moduleLink,omitempty" tf:"module_link,omitempty"` +} + +type ModuleLinkInitParameters struct { + + // A hash block as defined below. + Hash *HashInitParameters `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the module content (zip or nupkg). + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type ModuleLinkObservation struct { + + // A hash block as defined below. + Hash *HashObservation `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the module content (zip or nupkg). + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type ModuleLinkParameters struct { + + // A hash block as defined below. + // +kubebuilder:validation:Optional + Hash *HashParameters `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the module content (zip or nupkg). + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type ModuleObservation struct { + + // The name of the automation account in which the Module is created. Changing this forces a new resource to be created. + AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` + + // The Automation Module ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A module_link block as defined below. + ModuleLink *ModuleLinkObservation `json:"moduleLink,omitempty" tf:"module_link,omitempty"` + + // The name of the resource group in which the Module is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` +} + +type ModuleParameters struct { + + // The name of the automation account in which the Module is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account + // +kubebuilder:validation:Optional + AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` + + // Reference to a Account in automation to populate automationAccountName. + // +kubebuilder:validation:Optional + AutomationAccountNameRef *v1.Reference `json:"automationAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in automation to populate automationAccountName. + // +kubebuilder:validation:Optional + AutomationAccountNameSelector *v1.Selector `json:"automationAccountNameSelector,omitempty" tf:"-"` + + // A module_link block as defined below. + // +kubebuilder:validation:Optional + ModuleLink *ModuleLinkParameters `json:"moduleLink,omitempty" tf:"module_link,omitempty"` + + // The name of the resource group in which the Module is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` +} + +// ModuleSpec defines the desired state of Module +type ModuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ModuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ModuleInitParameters `json:"initProvider,omitempty"` +} + +// ModuleStatus defines the observed state of Module. +type ModuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ModuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Module is the Schema for the Modules API. Manages a Automation Module. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Module struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.moduleLink) || (has(self.initProvider) && has(self.initProvider.moduleLink))",message="spec.forProvider.moduleLink is a required parameter" + Spec ModuleSpec `json:"spec"` + Status ModuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ModuleList contains a list of Modules +type ModuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Module `json:"items"` +} + +// Repository type metadata. +var ( + Module_Kind = "Module" + Module_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Module_Kind}.String() + Module_KindAPIVersion = Module_Kind + "." + CRDGroupVersion.String() + Module_GroupVersionKind = CRDGroupVersion.WithKind(Module_Kind) +) + +func init() { + SchemeBuilder.Register(&Module{}, &ModuleList{}) +} diff --git a/apis/automation/v1beta2/zz_runbook_terraformed.go b/apis/automation/v1beta2/zz_runbook_terraformed.go new file mode 100755 index 000000000..346781ce9 --- /dev/null +++ b/apis/automation/v1beta2/zz_runbook_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RunBook +func (mg *RunBook) GetTerraformResourceType() string { + return "azurerm_automation_runbook" +} + +// GetConnectionDetailsMapping for this RunBook +func (tr *RunBook) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this RunBook +func (tr *RunBook) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RunBook +func (tr *RunBook) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RunBook +func (tr *RunBook) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RunBook +func (tr *RunBook) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RunBook +func (tr *RunBook) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RunBook +func (tr *RunBook) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RunBook +func (tr *RunBook) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RunBook using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RunBook) LateInitialize(attrs []byte) (bool, error) { + params := &RunBookParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RunBook) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/automation/v1beta2/zz_runbook_types.go b/apis/automation/v1beta2/zz_runbook_types.go new file mode 100755 index 000000000..7aa1b49db --- /dev/null +++ b/apis/automation/v1beta2/zz_runbook_types.go @@ -0,0 +1,564 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContentLinkHashInitParameters struct { + + // Specifies the hash algorithm used to hash the content. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // Specifies the expected hash value of the content. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ContentLinkHashObservation struct { + + // Specifies the hash algorithm used to hash the content. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // Specifies the expected hash value of the content. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ContentLinkHashParameters struct { + + // Specifies the hash algorithm used to hash the content. + // +kubebuilder:validation:Optional + Algorithm *string `json:"algorithm" tf:"algorithm,omitempty"` + + // Specifies the expected hash value of the content. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ContentLinkInitParameters struct { + + // A hash block as defined below. + Hash *ContentLinkHashInitParameters `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the runbook content. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Specifies the version of the content + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ContentLinkObservation struct { + + // A hash block as defined below. + Hash *ContentLinkHashObservation `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the runbook content. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Specifies the version of the content + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ContentLinkParameters struct { + + // A hash block as defined below. + // +kubebuilder:validation:Optional + Hash *ContentLinkHashParameters `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the runbook content. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` + + // Specifies the version of the content + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type DraftInitParameters struct { + + // A publish_content_link block as defined above. + ContentLink *ContentLinkInitParameters `json:"contentLink,omitempty" tf:"content_link,omitempty"` + + // Whether the draft in edit mode. + EditModeEnabled *bool `json:"editModeEnabled,omitempty" tf:"edit_mode_enabled,omitempty"` + + // Specifies the output types of the runbook. + OutputTypes []*string `json:"outputTypes,omitempty" tf:"output_types,omitempty"` + + // A list of parameters block as defined below. + Parameters []ParametersInitParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type DraftObservation struct { + + // A publish_content_link block as defined above. + ContentLink *ContentLinkObservation `json:"contentLink,omitempty" tf:"content_link,omitempty"` + + CreationTime *string `json:"creationTime,omitempty" tf:"creation_time,omitempty"` + + // Whether the draft in edit mode. + EditModeEnabled *bool `json:"editModeEnabled,omitempty" tf:"edit_mode_enabled,omitempty"` + + LastModifiedTime *string `json:"lastModifiedTime,omitempty" tf:"last_modified_time,omitempty"` + + // Specifies the output types of the runbook. + OutputTypes []*string `json:"outputTypes,omitempty" tf:"output_types,omitempty"` + + // A list of parameters block as defined below. + Parameters []ParametersObservation `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type DraftParameters struct { + + // A publish_content_link block as defined above. + // +kubebuilder:validation:Optional + ContentLink *ContentLinkParameters `json:"contentLink,omitempty" tf:"content_link,omitempty"` + + // Whether the draft in edit mode. + // +kubebuilder:validation:Optional + EditModeEnabled *bool `json:"editModeEnabled,omitempty" tf:"edit_mode_enabled,omitempty"` + + // Specifies the output types of the runbook. + // +kubebuilder:validation:Optional + OutputTypes []*string `json:"outputTypes,omitempty" tf:"output_types,omitempty"` + + // A list of parameters block as defined below. + // +kubebuilder:validation:Optional + Parameters []ParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type JobScheduleInitParameters struct { + + // The Automation Runbook ID. + JobScheduleID *string `json:"jobScheduleId,omitempty" tf:"job_schedule_id"` + + // A list of parameters block as defined below. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters"` + + RunOn *string `json:"runOn,omitempty" tf:"run_on"` + + // Specifies the name of the Runbook. Changing this forces a new resource to be created. + ScheduleName *string `json:"scheduleName,omitempty" tf:"schedule_name"` +} + +type JobScheduleObservation struct { + + // The Automation Runbook ID. + JobScheduleID *string `json:"jobScheduleId,omitempty" tf:"job_schedule_id,omitempty"` + + // A list of parameters block as defined below. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + RunOn *string `json:"runOn,omitempty" tf:"run_on,omitempty"` + + // Specifies the name of the Runbook. Changing this forces a new resource to be created. + ScheduleName *string `json:"scheduleName,omitempty" tf:"schedule_name,omitempty"` +} + +type JobScheduleParameters struct { + + // The Automation Runbook ID. + // +kubebuilder:validation:Optional + JobScheduleID *string `json:"jobScheduleId,omitempty" tf:"job_schedule_id"` + + // A list of parameters block as defined below. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters"` + + // +kubebuilder:validation:Optional + RunOn *string `json:"runOn,omitempty" tf:"run_on"` + + // Specifies the name of the Runbook. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ScheduleName *string `json:"scheduleName,omitempty" tf:"schedule_name"` +} + +type ParametersInitParameters struct { + + // Specifies the default value of the parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // The name of the parameter. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Whether this parameter is mandatory. + Mandatory *bool `json:"mandatory,omitempty" tf:"mandatory,omitempty"` + + // Specifies the position of the parameter. + Position *float64 `json:"position,omitempty" tf:"position,omitempty"` + + // Specifies the type of this parameter. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ParametersObservation struct { + + // Specifies the default value of the parameter. + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // The name of the parameter. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Whether this parameter is mandatory. + Mandatory *bool `json:"mandatory,omitempty" tf:"mandatory,omitempty"` + + // Specifies the position of the parameter. + Position *float64 `json:"position,omitempty" tf:"position,omitempty"` + + // Specifies the type of this parameter. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ParametersParameters struct { + + // Specifies the default value of the parameter. + // +kubebuilder:validation:Optional + DefaultValue *string `json:"defaultValue,omitempty" tf:"default_value,omitempty"` + + // The name of the parameter. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Whether this parameter is mandatory. + // +kubebuilder:validation:Optional + Mandatory *bool `json:"mandatory,omitempty" tf:"mandatory,omitempty"` + + // Specifies the position of the parameter. + // +kubebuilder:validation:Optional + Position *float64 `json:"position,omitempty" tf:"position,omitempty"` + + // Specifies the type of this parameter. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PublishContentLinkHashInitParameters struct { + + // Specifies the hash algorithm used to hash the content. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // Specifies the expected hash value of the content. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PublishContentLinkHashObservation struct { + + // Specifies the hash algorithm used to hash the content. + Algorithm *string `json:"algorithm,omitempty" tf:"algorithm,omitempty"` + + // Specifies the expected hash value of the content. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PublishContentLinkHashParameters struct { + + // Specifies the hash algorithm used to hash the content. + // +kubebuilder:validation:Optional + Algorithm *string `json:"algorithm" tf:"algorithm,omitempty"` + + // Specifies the expected hash value of the content. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type PublishContentLinkInitParameters struct { + + // A hash block as defined below. + Hash *PublishContentLinkHashInitParameters `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the runbook content. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Specifies the version of the content + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PublishContentLinkObservation struct { + + // A hash block as defined below. + Hash *PublishContentLinkHashObservation `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the runbook content. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // Specifies the version of the content + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PublishContentLinkParameters struct { + + // A hash block as defined below. + // +kubebuilder:validation:Optional + Hash *PublishContentLinkHashParameters `json:"hash,omitempty" tf:"hash,omitempty"` + + // The URI of the runbook content. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` + + // Specifies the version of the content + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type RunBookInitParameters struct { + + // The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account + AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` + + // Reference to a Account in automation to populate automationAccountName. + // +kubebuilder:validation:Optional + AutomationAccountNameRef *v1.Reference `json:"automationAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in automation to populate automationAccountName. + // +kubebuilder:validation:Optional + AutomationAccountNameSelector *v1.Selector `json:"automationAccountNameSelector,omitempty" tf:"-"` + + // The desired content of the runbook. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // A description for this credential. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A draft block as defined below . + Draft *DraftInitParameters `json:"draft,omitempty" tf:"draft,omitempty"` + + JobSchedule []JobScheduleInitParameters `json:"jobSchedule,omitempty" tf:"job_schedule,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the activity-level tracing options of the runbook, available only for Graphical runbooks. Possible values are 0 for None, 9 for Basic, and 15 for Detailed. Must turn on Verbose logging in order to see the tracing. + LogActivityTraceLevel *float64 `json:"logActivityTraceLevel,omitempty" tf:"log_activity_trace_level,omitempty"` + + // Progress log option. + LogProgress *bool `json:"logProgress,omitempty" tf:"log_progress,omitempty"` + + // Verbose log option. + LogVerbose *bool `json:"logVerbose,omitempty" tf:"log_verbose,omitempty"` + + // Specifies the name of the Runbook. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One publish_content_link block as defined below. + PublishContentLink *PublishContentLinkInitParameters `json:"publishContentLink,omitempty" tf:"publish_content_link,omitempty"` + + // The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The type of the runbook - can be either Graph, GraphPowerShell, GraphPowerShellWorkflow, PowerShellWorkflow, PowerShell, PowerShell72, Python3, Python2 or Script. Changing this forces a new resource to be created. + RunBookType *string `json:"runbookType,omitempty" tf:"runbook_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RunBookObservation struct { + + // The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created. + AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` + + // The desired content of the runbook. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // A description for this credential. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A draft block as defined below . + Draft *DraftObservation `json:"draft,omitempty" tf:"draft,omitempty"` + + // The Automation Runbook ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + JobSchedule []JobScheduleObservation `json:"jobSchedule,omitempty" tf:"job_schedule,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the activity-level tracing options of the runbook, available only for Graphical runbooks. Possible values are 0 for None, 9 for Basic, and 15 for Detailed. Must turn on Verbose logging in order to see the tracing. + LogActivityTraceLevel *float64 `json:"logActivityTraceLevel,omitempty" tf:"log_activity_trace_level,omitempty"` + + // Progress log option. + LogProgress *bool `json:"logProgress,omitempty" tf:"log_progress,omitempty"` + + // Verbose log option. + LogVerbose *bool `json:"logVerbose,omitempty" tf:"log_verbose,omitempty"` + + // Specifies the name of the Runbook. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One publish_content_link block as defined below. + PublishContentLink *PublishContentLinkObservation `json:"publishContentLink,omitempty" tf:"publish_content_link,omitempty"` + + // The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The type of the runbook - can be either Graph, GraphPowerShell, GraphPowerShellWorkflow, PowerShellWorkflow, PowerShell, PowerShell72, Python3, Python2 or Script. Changing this forces a new resource to be created. + RunBookType *string `json:"runbookType,omitempty" tf:"runbook_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RunBookParameters struct { + + // The name of the automation account in which the Runbook is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account + // +kubebuilder:validation:Optional + AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` + + // Reference to a Account in automation to populate automationAccountName. + // +kubebuilder:validation:Optional + AutomationAccountNameRef *v1.Reference `json:"automationAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in automation to populate automationAccountName. + // +kubebuilder:validation:Optional + AutomationAccountNameSelector *v1.Selector `json:"automationAccountNameSelector,omitempty" tf:"-"` + + // The desired content of the runbook. + // +kubebuilder:validation:Optional + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // A description for this credential. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A draft block as defined below . + // +kubebuilder:validation:Optional + Draft *DraftParameters `json:"draft,omitempty" tf:"draft,omitempty"` + + // +kubebuilder:validation:Optional + JobSchedule []JobScheduleParameters `json:"jobSchedule,omitempty" tf:"job_schedule,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the activity-level tracing options of the runbook, available only for Graphical runbooks. Possible values are 0 for None, 9 for Basic, and 15 for Detailed. Must turn on Verbose logging in order to see the tracing. + // +kubebuilder:validation:Optional + LogActivityTraceLevel *float64 `json:"logActivityTraceLevel,omitempty" tf:"log_activity_trace_level,omitempty"` + + // Progress log option. + // +kubebuilder:validation:Optional + LogProgress *bool `json:"logProgress,omitempty" tf:"log_progress,omitempty"` + + // Verbose log option. + // +kubebuilder:validation:Optional + LogVerbose *bool `json:"logVerbose,omitempty" tf:"log_verbose,omitempty"` + + // Specifies the name of the Runbook. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One publish_content_link block as defined below. + // +kubebuilder:validation:Optional + PublishContentLink *PublishContentLinkParameters `json:"publishContentLink,omitempty" tf:"publish_content_link,omitempty"` + + // The name of the resource group in which the Runbook is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The type of the runbook - can be either Graph, GraphPowerShell, GraphPowerShellWorkflow, PowerShellWorkflow, PowerShell, PowerShell72, Python3, Python2 or Script. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RunBookType *string `json:"runbookType,omitempty" tf:"runbook_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// RunBookSpec defines the desired state of RunBook +type RunBookSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RunBookParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RunBookInitParameters `json:"initProvider,omitempty"` +} + +// RunBookStatus defines the observed state of RunBook. +type RunBookStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RunBookObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RunBook is the Schema for the RunBooks API. Manages a Automation Runbook. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type RunBook struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.logProgress) || (has(self.initProvider) && has(self.initProvider.logProgress))",message="spec.forProvider.logProgress is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.logVerbose) || (has(self.initProvider) && has(self.initProvider.logVerbose))",message="spec.forProvider.logVerbose is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.runbookType) || (has(self.initProvider) && has(self.initProvider.runbookType))",message="spec.forProvider.runbookType is a required parameter" + Spec RunBookSpec `json:"spec"` + Status RunBookStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RunBookList contains a list of RunBooks +type RunBookList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RunBook `json:"items"` +} + +// Repository type metadata. +var ( + RunBook_Kind = "RunBook" + RunBook_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RunBook_Kind}.String() + RunBook_KindAPIVersion = RunBook_Kind + "." + CRDGroupVersion.String() + RunBook_GroupVersionKind = CRDGroupVersion.WithKind(RunBook_Kind) +) + +func init() { + SchemeBuilder.Register(&RunBook{}, &RunBookList{}) +} diff --git a/apis/automation/v1beta2/zz_schedule_terraformed.go b/apis/automation/v1beta2/zz_schedule_terraformed.go new file mode 100755 index 000000000..f3d1aab66 --- /dev/null +++ b/apis/automation/v1beta2/zz_schedule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Schedule +func (mg *Schedule) GetTerraformResourceType() string { + return "azurerm_automation_schedule" +} + +// GetConnectionDetailsMapping for this Schedule +func (tr *Schedule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Schedule +func (tr *Schedule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Schedule +func (tr *Schedule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Schedule +func (tr *Schedule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Schedule +func (tr *Schedule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Schedule +func (tr *Schedule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Schedule +func (tr *Schedule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Schedule +func (tr *Schedule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Schedule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Schedule) LateInitialize(attrs []byte) (bool, error) { + params := &ScheduleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Schedule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/automation/v1beta2/zz_schedule_types.go b/apis/automation/v1beta2/zz_schedule_types.go new file mode 100755 index 000000000..bd5bfe8ac --- /dev/null +++ b/apis/automation/v1beta2/zz_schedule_types.go @@ -0,0 +1,244 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MonthlyOccurrenceInitParameters struct { + + // Day of the occurrence. Must be one of Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Occurrence of the week within the month. Must be between 1 and 5. -1 for last week within the month. + Occurrence *float64 `json:"occurrence,omitempty" tf:"occurrence,omitempty"` +} + +type MonthlyOccurrenceObservation struct { + + // Day of the occurrence. Must be one of Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // Occurrence of the week within the month. Must be between 1 and 5. -1 for last week within the month. + Occurrence *float64 `json:"occurrence,omitempty" tf:"occurrence,omitempty"` +} + +type MonthlyOccurrenceParameters struct { + + // Day of the occurrence. Must be one of Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday. + // +kubebuilder:validation:Optional + Day *string `json:"day" tf:"day,omitempty"` + + // Occurrence of the week within the month. Must be between 1 and 5. -1 for last week within the month. + // +kubebuilder:validation:Optional + Occurrence *float64 `json:"occurrence" tf:"occurrence,omitempty"` +} + +type ScheduleInitParameters struct { + + // A description for this Schedule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The end time of the schedule. + ExpiryTime *string `json:"expiryTime,omitempty" tf:"expiry_time,omitempty"` + + // The frequency of the schedule. - can be either OneTime, Day, Hour, Week, or Month. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The number of frequencys between runs. Only valid when frequency is Day, Hour, Week, or Month and defaults to 1. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // List of days of the month that the job should execute on. Must be between 1 and 31. -1 for last day of the month. Only valid when frequency is Month. + // +listType=set + MonthDays []*float64 `json:"monthDays,omitempty" tf:"month_days,omitempty"` + + // One monthly_occurrence blocks as defined below to specifies occurrences of days within a month. Only valid when frequency is Month. The monthly_occurrence block supports fields documented below. + MonthlyOccurrence *MonthlyOccurrenceInitParameters `json:"monthlyOccurrence,omitempty" tf:"monthly_occurrence,omitempty"` + + // Start time of the schedule. Must be at least five minutes in the future. Defaults to seven minutes in the future from the time the resource is created. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start time. Defaults to Etc/UTC. For possible values see: https://docs.microsoft.com/en-us/rest/api/maps/timezone/gettimezoneenumwindows + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // List of days of the week that the job should execute on. Only valid when frequency is Week. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +listType=set + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +type ScheduleObservation struct { + + // The name of the automation account in which the Schedule is created. Changing this forces a new resource to be created. + AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` + + // A description for this Schedule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The end time of the schedule. + ExpiryTime *string `json:"expiryTime,omitempty" tf:"expiry_time,omitempty"` + + // The frequency of the schedule. - can be either OneTime, Day, Hour, Week, or Month. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The Automation Schedule ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The number of frequencys between runs. Only valid when frequency is Day, Hour, Week, or Month and defaults to 1. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // List of days of the month that the job should execute on. Must be between 1 and 31. -1 for last day of the month. Only valid when frequency is Month. + // +listType=set + MonthDays []*float64 `json:"monthDays,omitempty" tf:"month_days,omitempty"` + + // One monthly_occurrence blocks as defined below to specifies occurrences of days within a month. Only valid when frequency is Month. The monthly_occurrence block supports fields documented below. + MonthlyOccurrence *MonthlyOccurrenceObservation `json:"monthlyOccurrence,omitempty" tf:"monthly_occurrence,omitempty"` + + // The name of the resource group in which the Schedule is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Start time of the schedule. Must be at least five minutes in the future. Defaults to seven minutes in the future from the time the resource is created. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start time. Defaults to Etc/UTC. For possible values see: https://docs.microsoft.com/en-us/rest/api/maps/timezone/gettimezoneenumwindows + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // List of days of the week that the job should execute on. Only valid when frequency is Week. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +listType=set + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +type ScheduleParameters struct { + + // The name of the automation account in which the Schedule is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account + // +kubebuilder:validation:Optional + AutomationAccountName *string `json:"automationAccountName,omitempty" tf:"automation_account_name,omitempty"` + + // Reference to a Account in automation to populate automationAccountName. + // +kubebuilder:validation:Optional + AutomationAccountNameRef *v1.Reference `json:"automationAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in automation to populate automationAccountName. + // +kubebuilder:validation:Optional + AutomationAccountNameSelector *v1.Selector `json:"automationAccountNameSelector,omitempty" tf:"-"` + + // A description for this Schedule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The end time of the schedule. + // +kubebuilder:validation:Optional + ExpiryTime *string `json:"expiryTime,omitempty" tf:"expiry_time,omitempty"` + + // The frequency of the schedule. - can be either OneTime, Day, Hour, Week, or Month. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The number of frequencys between runs. Only valid when frequency is Day, Hour, Week, or Month and defaults to 1. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // List of days of the month that the job should execute on. Must be between 1 and 31. -1 for last day of the month. Only valid when frequency is Month. + // +kubebuilder:validation:Optional + // +listType=set + MonthDays []*float64 `json:"monthDays,omitempty" tf:"month_days,omitempty"` + + // One monthly_occurrence blocks as defined below to specifies occurrences of days within a month. Only valid when frequency is Month. The monthly_occurrence block supports fields documented below. + // +kubebuilder:validation:Optional + MonthlyOccurrence *MonthlyOccurrenceParameters `json:"monthlyOccurrence,omitempty" tf:"monthly_occurrence,omitempty"` + + // The name of the resource group in which the Schedule is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Start time of the schedule. Must be at least five minutes in the future. Defaults to seven minutes in the future from the time the resource is created. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start time. Defaults to Etc/UTC. For possible values see: https://docs.microsoft.com/en-us/rest/api/maps/timezone/gettimezoneenumwindows + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // List of days of the week that the job should execute on. Only valid when frequency is Week. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +kubebuilder:validation:Optional + // +listType=set + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +// ScheduleSpec defines the desired state of Schedule +type ScheduleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScheduleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScheduleInitParameters `json:"initProvider,omitempty"` +} + +// ScheduleStatus defines the observed state of Schedule. +type ScheduleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScheduleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Schedule is the Schema for the Schedules API. Manages a Automation Schedule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Schedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.frequency) || (has(self.initProvider) && has(self.initProvider.frequency))",message="spec.forProvider.frequency is a required parameter" + Spec ScheduleSpec `json:"spec"` + Status ScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduleList contains a list of Schedules +type ScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Schedule `json:"items"` +} + +// Repository type metadata. +var ( + Schedule_Kind = "Schedule" + Schedule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Schedule_Kind}.String() + Schedule_KindAPIVersion = Schedule_Kind + "." + CRDGroupVersion.String() + Schedule_GroupVersionKind = CRDGroupVersion.WithKind(Schedule_Kind) +) + +func init() { + SchemeBuilder.Register(&Schedule{}, &ScheduleList{}) +} diff --git a/apis/botservice/v1beta1/zz_generated.conversion_hubs.go b/apis/botservice/v1beta1/zz_generated.conversion_hubs.go index 616125a09..df4099f99 100755 --- a/apis/botservice/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/botservice/v1beta1/zz_generated.conversion_hubs.go @@ -25,10 +25,10 @@ func (tr *BotChannelSlack) Hub() {} func (tr *BotChannelSMS) Hub() {} // Hub marks this type as a conversion hub. -func (tr *BotChannelWebChat) Hub() {} +func (tr *BotChannelsRegistration) Hub() {} // Hub marks this type as a conversion hub. -func (tr *BotChannelsRegistration) Hub() {} +func (tr *BotChannelWebChat) Hub() {} // Hub marks this type as a conversion hub. func (tr *BotConnection) Hub() {} diff --git a/apis/cache/v1beta1/zz_generated.conversion_hubs.go b/apis/cache/v1beta1/zz_generated.conversion_hubs.go index 682533f70..48383624a 100755 --- a/apis/cache/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cache/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *RedisCache) Hub() {} - // Hub marks this type as a conversion hub. func (tr *RedisEnterpriseCluster) Hub() {} diff --git a/apis/cache/v1beta1/zz_generated.conversion_spokes.go b/apis/cache/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..7416b2e2f --- /dev/null +++ b/apis/cache/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this RedisCache to the hub type. +func (tr *RedisCache) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the RedisCache type. +func (tr *RedisCache) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cache/v1beta1/zz_generated.resolvers.go b/apis/cache/v1beta1/zz_generated.resolvers.go index 3f4bdff0f..d0ffe8d63 100644 --- a/apis/cache/v1beta1/zz_generated.resolvers.go +++ b/apis/cache/v1beta1/zz_generated.resolvers.go @@ -196,7 +196,7 @@ func (mg *RedisFirewallRule) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -246,7 +246,7 @@ func (mg *RedisLinkedServer) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -284,7 +284,7 @@ func (mg *RedisLinkedServer) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -303,7 +303,7 @@ func (mg *RedisLinkedServer) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.TargetRedisCacheName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetRedisCacheNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta1", "RedisCache", "RedisCacheList") + m, l, err = apisresolver.GetManagedResource("cache.azure.upbound.io", "v1beta2", "RedisCache", "RedisCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cache/v1beta1/zz_redisfirewallrule_types.go b/apis/cache/v1beta1/zz_redisfirewallrule_types.go index cda42b0bb..e996c17f0 100755 --- a/apis/cache/v1beta1/zz_redisfirewallrule_types.go +++ b/apis/cache/v1beta1/zz_redisfirewallrule_types.go @@ -47,7 +47,7 @@ type RedisFirewallRuleParameters struct { EndIP *string `json:"endIp,omitempty" tf:"end_ip,omitempty"` // The name of the Redis Cache. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +kubebuilder:validation:Optional RedisCacheName *string `json:"redisCacheName,omitempty" tf:"redis_cache_name,omitempty"` diff --git a/apis/cache/v1beta1/zz_redislinkedserver_types.go b/apis/cache/v1beta1/zz_redislinkedserver_types.go index b2a5d147f..5d90fce7b 100755 --- a/apis/cache/v1beta1/zz_redislinkedserver_types.go +++ b/apis/cache/v1beta1/zz_redislinkedserver_types.go @@ -16,7 +16,7 @@ import ( type RedisLinkedServerInitParameters struct { // The ID of the linked Redis cache. Changing this forces a new Redis to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() LinkedRedisCacheID *string `json:"linkedRedisCacheId,omitempty" tf:"linked_redis_cache_id,omitempty"` @@ -65,7 +65,7 @@ type RedisLinkedServerObservation struct { type RedisLinkedServerParameters struct { // The ID of the linked Redis cache. Changing this forces a new Redis to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional LinkedRedisCacheID *string `json:"linkedRedisCacheId,omitempty" tf:"linked_redis_cache_id,omitempty"` @@ -100,7 +100,7 @@ type RedisLinkedServerParameters struct { ServerRole *string `json:"serverRole,omitempty" tf:"server_role,omitempty"` // The name of Redis cache to link with. Changing this forces a new Redis to be created. (eg The primary role) - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta1.RedisCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cache/v1beta2.RedisCache // +kubebuilder:validation:Optional TargetRedisCacheName *string `json:"targetRedisCacheName,omitempty" tf:"target_redis_cache_name,omitempty"` diff --git a/apis/cache/v1beta2/zz_generated.conversion_hubs.go b/apis/cache/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..738c1a1ae --- /dev/null +++ b/apis/cache/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *RedisCache) Hub() {} diff --git a/apis/cache/v1beta2/zz_generated.deepcopy.go b/apis/cache/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..4f2314592 --- /dev/null +++ b/apis/cache/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1041 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchScheduleInitParameters) DeepCopyInto(out *PatchScheduleInitParameters) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.StartHourUtc != nil { + in, out := &in.StartHourUtc, &out.StartHourUtc + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchScheduleInitParameters. +func (in *PatchScheduleInitParameters) DeepCopy() *PatchScheduleInitParameters { + if in == nil { + return nil + } + out := new(PatchScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchScheduleObservation) DeepCopyInto(out *PatchScheduleObservation) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.StartHourUtc != nil { + in, out := &in.StartHourUtc, &out.StartHourUtc + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchScheduleObservation. +func (in *PatchScheduleObservation) DeepCopy() *PatchScheduleObservation { + if in == nil { + return nil + } + out := new(PatchScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchScheduleParameters) DeepCopyInto(out *PatchScheduleParameters) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(string) + **out = **in + } + if in.StartHourUtc != nil { + in, out := &in.StartHourUtc, &out.StartHourUtc + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchScheduleParameters. +func (in *PatchScheduleParameters) DeepCopy() *PatchScheduleParameters { + if in == nil { + return nil + } + out := new(PatchScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCache) DeepCopyInto(out *RedisCache) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCache. +func (in *RedisCache) DeepCopy() *RedisCache { + if in == nil { + return nil + } + out := new(RedisCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCache) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheInitParameters) DeepCopyInto(out *RedisCacheInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.EnableNonSSLPort != nil { + in, out := &in.EnableNonSSLPort, &out.EnableNonSSLPort + *out = new(bool) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PatchSchedule != nil { + in, out := &in.PatchSchedule, &out.PatchSchedule + *out = make([]PatchScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateStaticIPAddress != nil { + in, out := &in.PrivateStaticIPAddress, &out.PrivateStaticIPAddress + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.RedisConfiguration != nil { + in, out := &in.RedisConfiguration, &out.RedisConfiguration + *out = new(RedisConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RedisVersion != nil { + in, out := &in.RedisVersion, &out.RedisVersion + *out = new(string) + **out = **in + } + if in.ReplicasPerMaster != nil { + in, out := &in.ReplicasPerMaster, &out.ReplicasPerMaster + *out = new(float64) + **out = **in + } + if in.ReplicasPerPrimary != nil { + in, out := &in.ReplicasPerPrimary, &out.ReplicasPerPrimary + *out = new(float64) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(float64) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantSettings != nil { + in, out := &in.TenantSettings, &out.TenantSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheInitParameters. +func (in *RedisCacheInitParameters) DeepCopy() *RedisCacheInitParameters { + if in == nil { + return nil + } + out := new(RedisCacheInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheList) DeepCopyInto(out *RedisCacheList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RedisCache, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheList. +func (in *RedisCacheList) DeepCopy() *RedisCacheList { + if in == nil { + return nil + } + out := new(RedisCacheList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCacheList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheObservation) DeepCopyInto(out *RedisCacheObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.EnableNonSSLPort != nil { + in, out := &in.EnableNonSSLPort, &out.EnableNonSSLPort + *out = new(bool) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PatchSchedule != nil { + in, out := &in.PatchSchedule, &out.PatchSchedule + *out = make([]PatchScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PrivateStaticIPAddress != nil { + in, out := &in.PrivateStaticIPAddress, &out.PrivateStaticIPAddress + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.RedisConfiguration != nil { + in, out := &in.RedisConfiguration, &out.RedisConfiguration + *out = new(RedisConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RedisVersion != nil { + in, out := &in.RedisVersion, &out.RedisVersion + *out = new(string) + **out = **in + } + if in.ReplicasPerMaster != nil { + in, out := &in.ReplicasPerMaster, &out.ReplicasPerMaster + *out = new(float64) + **out = **in + } + if in.ReplicasPerPrimary != nil { + in, out := &in.ReplicasPerPrimary, &out.ReplicasPerPrimary + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SSLPort != nil { + in, out := &in.SSLPort, &out.SSLPort + *out = new(float64) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(float64) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantSettings != nil { + in, out := &in.TenantSettings, &out.TenantSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheObservation. +func (in *RedisCacheObservation) DeepCopy() *RedisCacheObservation { + if in == nil { + return nil + } + out := new(RedisCacheObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheParameters) DeepCopyInto(out *RedisCacheParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.EnableNonSSLPort != nil { + in, out := &in.EnableNonSSLPort, &out.EnableNonSSLPort + *out = new(bool) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PatchSchedule != nil { + in, out := &in.PatchSchedule, &out.PatchSchedule + *out = make([]PatchScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateStaticIPAddress != nil { + in, out := &in.PrivateStaticIPAddress, &out.PrivateStaticIPAddress + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.RedisConfiguration != nil { + in, out := &in.RedisConfiguration, &out.RedisConfiguration + *out = new(RedisConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.RedisVersion != nil { + in, out := &in.RedisVersion, &out.RedisVersion + *out = new(string) + **out = **in + } + if in.ReplicasPerMaster != nil { + in, out := &in.ReplicasPerMaster, &out.ReplicasPerMaster + *out = new(float64) + **out = **in + } + if in.ReplicasPerPrimary != nil { + in, out := &in.ReplicasPerPrimary, &out.ReplicasPerPrimary + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(float64) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantSettings != nil { + in, out := &in.TenantSettings, &out.TenantSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheParameters. +func (in *RedisCacheParameters) DeepCopy() *RedisCacheParameters { + if in == nil { + return nil + } + out := new(RedisCacheParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSpec) DeepCopyInto(out *RedisCacheSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSpec. +func (in *RedisCacheSpec) DeepCopy() *RedisCacheSpec { + if in == nil { + return nil + } + out := new(RedisCacheSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheStatus) DeepCopyInto(out *RedisCacheStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheStatus. +func (in *RedisCacheStatus) DeepCopy() *RedisCacheStatus { + if in == nil { + return nil + } + out := new(RedisCacheStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisConfigurationInitParameters) DeepCopyInto(out *RedisConfigurationInitParameters) { + *out = *in + if in.ActiveDirectoryAuthenticationEnabled != nil { + in, out := &in.ActiveDirectoryAuthenticationEnabled, &out.ActiveDirectoryAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.AofBackupEnabled != nil { + in, out := &in.AofBackupEnabled, &out.AofBackupEnabled + *out = new(bool) + **out = **in + } + if in.DataPersistenceAuthenticationMethod != nil { + in, out := &in.DataPersistenceAuthenticationMethod, &out.DataPersistenceAuthenticationMethod + *out = new(string) + **out = **in + } + if in.EnableAuthentication != nil { + in, out := &in.EnableAuthentication, &out.EnableAuthentication + *out = new(bool) + **out = **in + } + if in.MaxfragmentationmemoryReserved != nil { + in, out := &in.MaxfragmentationmemoryReserved, &out.MaxfragmentationmemoryReserved + *out = new(float64) + **out = **in + } + if in.MaxmemoryDelta != nil { + in, out := &in.MaxmemoryDelta, &out.MaxmemoryDelta + *out = new(float64) + **out = **in + } + if in.MaxmemoryPolicy != nil { + in, out := &in.MaxmemoryPolicy, &out.MaxmemoryPolicy + *out = new(string) + **out = **in + } + if in.MaxmemoryReserved != nil { + in, out := &in.MaxmemoryReserved, &out.MaxmemoryReserved + *out = new(float64) + **out = **in + } + if in.NotifyKeySpaceEvents != nil { + in, out := &in.NotifyKeySpaceEvents, &out.NotifyKeySpaceEvents + *out = new(string) + **out = **in + } + if in.RdbBackupEnabled != nil { + in, out := &in.RdbBackupEnabled, &out.RdbBackupEnabled + *out = new(bool) + **out = **in + } + if in.RdbBackupFrequency != nil { + in, out := &in.RdbBackupFrequency, &out.RdbBackupFrequency + *out = new(float64) + **out = **in + } + if in.RdbBackupMaxSnapshotCount != nil { + in, out := &in.RdbBackupMaxSnapshotCount, &out.RdbBackupMaxSnapshotCount + *out = new(float64) + **out = **in + } + if in.StorageAccountSubscriptionID != nil { + in, out := &in.StorageAccountSubscriptionID, &out.StorageAccountSubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisConfigurationInitParameters. +func (in *RedisConfigurationInitParameters) DeepCopy() *RedisConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RedisConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisConfigurationObservation) DeepCopyInto(out *RedisConfigurationObservation) { + *out = *in + if in.ActiveDirectoryAuthenticationEnabled != nil { + in, out := &in.ActiveDirectoryAuthenticationEnabled, &out.ActiveDirectoryAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.AofBackupEnabled != nil { + in, out := &in.AofBackupEnabled, &out.AofBackupEnabled + *out = new(bool) + **out = **in + } + if in.DataPersistenceAuthenticationMethod != nil { + in, out := &in.DataPersistenceAuthenticationMethod, &out.DataPersistenceAuthenticationMethod + *out = new(string) + **out = **in + } + if in.EnableAuthentication != nil { + in, out := &in.EnableAuthentication, &out.EnableAuthentication + *out = new(bool) + **out = **in + } + if in.Maxclients != nil { + in, out := &in.Maxclients, &out.Maxclients + *out = new(float64) + **out = **in + } + if in.MaxfragmentationmemoryReserved != nil { + in, out := &in.MaxfragmentationmemoryReserved, &out.MaxfragmentationmemoryReserved + *out = new(float64) + **out = **in + } + if in.MaxmemoryDelta != nil { + in, out := &in.MaxmemoryDelta, &out.MaxmemoryDelta + *out = new(float64) + **out = **in + } + if in.MaxmemoryPolicy != nil { + in, out := &in.MaxmemoryPolicy, &out.MaxmemoryPolicy + *out = new(string) + **out = **in + } + if in.MaxmemoryReserved != nil { + in, out := &in.MaxmemoryReserved, &out.MaxmemoryReserved + *out = new(float64) + **out = **in + } + if in.NotifyKeySpaceEvents != nil { + in, out := &in.NotifyKeySpaceEvents, &out.NotifyKeySpaceEvents + *out = new(string) + **out = **in + } + if in.RdbBackupEnabled != nil { + in, out := &in.RdbBackupEnabled, &out.RdbBackupEnabled + *out = new(bool) + **out = **in + } + if in.RdbBackupFrequency != nil { + in, out := &in.RdbBackupFrequency, &out.RdbBackupFrequency + *out = new(float64) + **out = **in + } + if in.RdbBackupMaxSnapshotCount != nil { + in, out := &in.RdbBackupMaxSnapshotCount, &out.RdbBackupMaxSnapshotCount + *out = new(float64) + **out = **in + } + if in.StorageAccountSubscriptionID != nil { + in, out := &in.StorageAccountSubscriptionID, &out.StorageAccountSubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisConfigurationObservation. +func (in *RedisConfigurationObservation) DeepCopy() *RedisConfigurationObservation { + if in == nil { + return nil + } + out := new(RedisConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisConfigurationParameters) DeepCopyInto(out *RedisConfigurationParameters) { + *out = *in + if in.ActiveDirectoryAuthenticationEnabled != nil { + in, out := &in.ActiveDirectoryAuthenticationEnabled, &out.ActiveDirectoryAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.AofBackupEnabled != nil { + in, out := &in.AofBackupEnabled, &out.AofBackupEnabled + *out = new(bool) + **out = **in + } + if in.AofStorageConnectionString0SecretRef != nil { + in, out := &in.AofStorageConnectionString0SecretRef, &out.AofStorageConnectionString0SecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AofStorageConnectionString1SecretRef != nil { + in, out := &in.AofStorageConnectionString1SecretRef, &out.AofStorageConnectionString1SecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DataPersistenceAuthenticationMethod != nil { + in, out := &in.DataPersistenceAuthenticationMethod, &out.DataPersistenceAuthenticationMethod + *out = new(string) + **out = **in + } + if in.EnableAuthentication != nil { + in, out := &in.EnableAuthentication, &out.EnableAuthentication + *out = new(bool) + **out = **in + } + if in.MaxfragmentationmemoryReserved != nil { + in, out := &in.MaxfragmentationmemoryReserved, &out.MaxfragmentationmemoryReserved + *out = new(float64) + **out = **in + } + if in.MaxmemoryDelta != nil { + in, out := &in.MaxmemoryDelta, &out.MaxmemoryDelta + *out = new(float64) + **out = **in + } + if in.MaxmemoryPolicy != nil { + in, out := &in.MaxmemoryPolicy, &out.MaxmemoryPolicy + *out = new(string) + **out = **in + } + if in.MaxmemoryReserved != nil { + in, out := &in.MaxmemoryReserved, &out.MaxmemoryReserved + *out = new(float64) + **out = **in + } + if in.NotifyKeySpaceEvents != nil { + in, out := &in.NotifyKeySpaceEvents, &out.NotifyKeySpaceEvents + *out = new(string) + **out = **in + } + if in.RdbBackupEnabled != nil { + in, out := &in.RdbBackupEnabled, &out.RdbBackupEnabled + *out = new(bool) + **out = **in + } + if in.RdbBackupFrequency != nil { + in, out := &in.RdbBackupFrequency, &out.RdbBackupFrequency + *out = new(float64) + **out = **in + } + if in.RdbBackupMaxSnapshotCount != nil { + in, out := &in.RdbBackupMaxSnapshotCount, &out.RdbBackupMaxSnapshotCount + *out = new(float64) + **out = **in + } + if in.RdbStorageConnectionStringSecretRef != nil { + in, out := &in.RdbStorageConnectionStringSecretRef, &out.RdbStorageConnectionStringSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageAccountSubscriptionID != nil { + in, out := &in.StorageAccountSubscriptionID, &out.StorageAccountSubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisConfigurationParameters. +func (in *RedisConfigurationParameters) DeepCopy() *RedisConfigurationParameters { + if in == nil { + return nil + } + out := new(RedisConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cache/v1beta2/zz_generated.managed.go b/apis/cache/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..a344699de --- /dev/null +++ b/apis/cache/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this RedisCache. +func (mg *RedisCache) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this RedisCache. +func (mg *RedisCache) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this RedisCache. +func (mg *RedisCache) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this RedisCache. +func (mg *RedisCache) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this RedisCache. +func (mg *RedisCache) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this RedisCache. +func (mg *RedisCache) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this RedisCache. +func (mg *RedisCache) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this RedisCache. +func (mg *RedisCache) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this RedisCache. +func (mg *RedisCache) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this RedisCache. +func (mg *RedisCache) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this RedisCache. +func (mg *RedisCache) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this RedisCache. +func (mg *RedisCache) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cache/v1beta2/zz_generated.managedlist.go b/apis/cache/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..52e359144 --- /dev/null +++ b/apis/cache/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this RedisCacheList. +func (l *RedisCacheList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cache/v1beta2/zz_generated.resolvers.go b/apis/cache/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..bbb059e9a --- /dev/null +++ b/apis/cache/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,87 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *RedisCache) ResolveReferences( // ResolveReferences of this RedisCache. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cache/v1beta2/zz_groupversion_info.go b/apis/cache/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..bf627c7b7 --- /dev/null +++ b/apis/cache/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cache.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cache.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cache/v1beta2/zz_rediscache_terraformed.go b/apis/cache/v1beta2/zz_rediscache_terraformed.go new file mode 100755 index 000000000..dfcca1ec1 --- /dev/null +++ b/apis/cache/v1beta2/zz_rediscache_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this RedisCache +func (mg *RedisCache) GetTerraformResourceType() string { + return "azurerm_redis_cache" +} + +// GetConnectionDetailsMapping for this RedisCache +func (tr *RedisCache) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_access_key": "status.atProvider.primaryAccessKey", "primary_connection_string": "status.atProvider.primaryConnectionString", "redis_configuration[*].aof_storage_connection_string_0": "spec.forProvider.redisConfiguration[*].aofStorageConnectionString0SecretRef", "redis_configuration[*].aof_storage_connection_string_1": "spec.forProvider.redisConfiguration[*].aofStorageConnectionString1SecretRef", "redis_configuration[*].rdb_storage_connection_string": "spec.forProvider.redisConfiguration[*].rdbStorageConnectionStringSecretRef", "secondary_access_key": "status.atProvider.secondaryAccessKey", "secondary_connection_string": "status.atProvider.secondaryConnectionString"} +} + +// GetObservation of this RedisCache +func (tr *RedisCache) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this RedisCache +func (tr *RedisCache) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this RedisCache +func (tr *RedisCache) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this RedisCache +func (tr *RedisCache) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this RedisCache +func (tr *RedisCache) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this RedisCache +func (tr *RedisCache) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this RedisCache +func (tr *RedisCache) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this RedisCache using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *RedisCache) LateInitialize(attrs []byte) (bool, error) { + params := &RedisCacheParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *RedisCache) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cache/v1beta2/zz_rediscache_types.go b/apis/cache/v1beta2/zz_rediscache_types.go new file mode 100755 index 000000000..175f8901a --- /dev/null +++ b/apis/cache/v1beta2/zz_rediscache_types.go @@ -0,0 +1,566 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Redis Cluster. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Redis Cluster. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Redis Cluster. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Route ID. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Route ID. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Redis Cluster. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Redis Cluster. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Redis Cluster. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PatchScheduleInitParameters struct { + + // the Weekday name - possible values include Monday, Tuesday, Wednesday etc. + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The ISO 8601 timespan which specifies the amount of time the Redis Cache can be updated. Defaults to PT5H. + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // the Start Hour for maintenance in UTC - possible values range from 0 - 23. + StartHourUtc *float64 `json:"startHourUtc,omitempty" tf:"start_hour_utc,omitempty"` +} + +type PatchScheduleObservation struct { + + // the Weekday name - possible values include Monday, Tuesday, Wednesday etc. + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The ISO 8601 timespan which specifies the amount of time the Redis Cache can be updated. Defaults to PT5H. + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // the Start Hour for maintenance in UTC - possible values range from 0 - 23. + StartHourUtc *float64 `json:"startHourUtc,omitempty" tf:"start_hour_utc,omitempty"` +} + +type PatchScheduleParameters struct { + + // the Weekday name - possible values include Monday, Tuesday, Wednesday etc. + // +kubebuilder:validation:Optional + DayOfWeek *string `json:"dayOfWeek" tf:"day_of_week,omitempty"` + + // The ISO 8601 timespan which specifies the amount of time the Redis Cache can be updated. Defaults to PT5H. + // +kubebuilder:validation:Optional + MaintenanceWindow *string `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // the Start Hour for maintenance in UTC - possible values range from 0 - 23. + // +kubebuilder:validation:Optional + StartHourUtc *float64 `json:"startHourUtc,omitempty" tf:"start_hour_utc,omitempty"` +} + +type RedisCacheInitParameters struct { + + // The size of the Redis cache to deploy. Valid values for a SKU family of C (Basic/Standard) are 0, 1, 2, 3, 4, 5, 6, and for P (Premium) family are 1, 2, 3, 4, 5. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Enable the non-SSL port (6379) - disabled by default. + EnableNonSSLPort *bool `json:"enableNonSslPort,omitempty" tf:"enable_non_ssl_port,omitempty"` + + // The SKU family/pricing group to use. Valid values are C (for Basic/Standard SKU family) and P (for Premium) + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The location of the resource group. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum TLS version. Possible values are 1.0, 1.1 and 1.2. Defaults to 1.0. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // A list of patch_schedule blocks as defined below. + PatchSchedule []PatchScheduleInitParameters `json:"patchSchedule,omitempty" tf:"patch_schedule,omitempty"` + + // The Static IP Address to assign to the Redis Cache when hosted inside the Virtual Network. This argument implies the use of subnet_id. Changing this forces a new resource to be created. + PrivateStaticIPAddress *string `json:"privateStaticIpAddress,omitempty" tf:"private_static_ip_address,omitempty"` + + // Whether or not public network access is allowed for this Redis Cache. true means this resource could be accessed by both public and private endpoint. false means only private endpoint access is allowed. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A redis_configuration block as defined below - with some limitations by SKU - defaults/details are shown below. + RedisConfiguration *RedisConfigurationInitParameters `json:"redisConfiguration,omitempty" tf:"redis_configuration,omitempty"` + + // Redis version. Only major version needed. Valid values: 4, 6. + RedisVersion *string `json:"redisVersion,omitempty" tf:"redis_version,omitempty"` + + // Amount of replicas to create per master for this Redis Cache. + ReplicasPerMaster *float64 `json:"replicasPerMaster,omitempty" tf:"replicas_per_master,omitempty"` + + // Amount of replicas to create per primary for this Redis Cache. If both replicas_per_primary and replicas_per_master are set, they need to be equal. + ReplicasPerPrimary *float64 `json:"replicasPerPrimary,omitempty" tf:"replicas_per_primary,omitempty"` + + // Only available when using the Premium SKU The number of Shards to create on the Redis Cluster. + ShardCount *float64 `json:"shardCount,omitempty" tf:"shard_count,omitempty"` + + // The SKU of Redis to use. Possible values are Basic, Standard and Premium. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Only available when using the Premium SKU The ID of the Subnet within which the Redis Cache should be deployed. This Subnet must only contain Azure Cache for Redis instances without any other type of resources. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A mapping of tenant settings to assign to the resource. + // +mapType=granular + TenantSettings map[string]*string `json:"tenantSettings,omitempty" tf:"tenant_settings,omitempty"` + + // Specifies a list of Availability Zones in which this Redis Cache should be located. Changing this forces a new Redis Cache to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type RedisCacheObservation struct { + + // The size of the Redis cache to deploy. Valid values for a SKU family of C (Basic/Standard) are 0, 1, 2, 3, 4, 5, 6, and for P (Premium) family are 1, 2, 3, 4, 5. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Enable the non-SSL port (6379) - disabled by default. + EnableNonSSLPort *bool `json:"enableNonSslPort,omitempty" tf:"enable_non_ssl_port,omitempty"` + + // The SKU family/pricing group to use. Valid values are C (for Basic/Standard SKU family) and P (for Premium) + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The Hostname of the Redis Instance + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // The Route ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The location of the resource group. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum TLS version. Possible values are 1.0, 1.1 and 1.2. Defaults to 1.0. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // A list of patch_schedule blocks as defined below. + PatchSchedule []PatchScheduleObservation `json:"patchSchedule,omitempty" tf:"patch_schedule,omitempty"` + + // The non-SSL Port of the Redis Instance + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The Static IP Address to assign to the Redis Cache when hosted inside the Virtual Network. This argument implies the use of subnet_id. Changing this forces a new resource to be created. + PrivateStaticIPAddress *string `json:"privateStaticIpAddress,omitempty" tf:"private_static_ip_address,omitempty"` + + // Whether or not public network access is allowed for this Redis Cache. true means this resource could be accessed by both public and private endpoint. false means only private endpoint access is allowed. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A redis_configuration block as defined below - with some limitations by SKU - defaults/details are shown below. + RedisConfiguration *RedisConfigurationObservation `json:"redisConfiguration,omitempty" tf:"redis_configuration,omitempty"` + + // Redis version. Only major version needed. Valid values: 4, 6. + RedisVersion *string `json:"redisVersion,omitempty" tf:"redis_version,omitempty"` + + // Amount of replicas to create per master for this Redis Cache. + ReplicasPerMaster *float64 `json:"replicasPerMaster,omitempty" tf:"replicas_per_master,omitempty"` + + // Amount of replicas to create per primary for this Redis Cache. If both replicas_per_primary and replicas_per_master are set, they need to be equal. + ReplicasPerPrimary *float64 `json:"replicasPerPrimary,omitempty" tf:"replicas_per_primary,omitempty"` + + // The name of the resource group in which to create the Redis instance. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The SSL Port of the Redis Instance + SSLPort *float64 `json:"sslPort,omitempty" tf:"ssl_port,omitempty"` + + // Only available when using the Premium SKU The number of Shards to create on the Redis Cluster. + ShardCount *float64 `json:"shardCount,omitempty" tf:"shard_count,omitempty"` + + // The SKU of Redis to use. Possible values are Basic, Standard and Premium. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Only available when using the Premium SKU The ID of the Subnet within which the Redis Cache should be deployed. This Subnet must only contain Azure Cache for Redis instances without any other type of resources. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A mapping of tenant settings to assign to the resource. + // +mapType=granular + TenantSettings map[string]*string `json:"tenantSettings,omitempty" tf:"tenant_settings,omitempty"` + + // Specifies a list of Availability Zones in which this Redis Cache should be located. Changing this forces a new Redis Cache to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type RedisCacheParameters struct { + + // The size of the Redis cache to deploy. Valid values for a SKU family of C (Basic/Standard) are 0, 1, 2, 3, 4, 5, 6, and for P (Premium) family are 1, 2, 3, 4, 5. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Enable the non-SSL port (6379) - disabled by default. + // +kubebuilder:validation:Optional + EnableNonSSLPort *bool `json:"enableNonSslPort,omitempty" tf:"enable_non_ssl_port,omitempty"` + + // The SKU family/pricing group to use. Valid values are C (for Basic/Standard SKU family) and P (for Premium) + // +kubebuilder:validation:Optional + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The location of the resource group. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum TLS version. Possible values are 1.0, 1.1 and 1.2. Defaults to 1.0. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // A list of patch_schedule blocks as defined below. + // +kubebuilder:validation:Optional + PatchSchedule []PatchScheduleParameters `json:"patchSchedule,omitempty" tf:"patch_schedule,omitempty"` + + // The Static IP Address to assign to the Redis Cache when hosted inside the Virtual Network. This argument implies the use of subnet_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateStaticIPAddress *string `json:"privateStaticIpAddress,omitempty" tf:"private_static_ip_address,omitempty"` + + // Whether or not public network access is allowed for this Redis Cache. true means this resource could be accessed by both public and private endpoint. false means only private endpoint access is allowed. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A redis_configuration block as defined below - with some limitations by SKU - defaults/details are shown below. + // +kubebuilder:validation:Optional + RedisConfiguration *RedisConfigurationParameters `json:"redisConfiguration,omitempty" tf:"redis_configuration,omitempty"` + + // Redis version. Only major version needed. Valid values: 4, 6. + // +kubebuilder:validation:Optional + RedisVersion *string `json:"redisVersion,omitempty" tf:"redis_version,omitempty"` + + // Amount of replicas to create per master for this Redis Cache. + // +kubebuilder:validation:Optional + ReplicasPerMaster *float64 `json:"replicasPerMaster,omitempty" tf:"replicas_per_master,omitempty"` + + // Amount of replicas to create per primary for this Redis Cache. If both replicas_per_primary and replicas_per_master are set, they need to be equal. + // +kubebuilder:validation:Optional + ReplicasPerPrimary *float64 `json:"replicasPerPrimary,omitempty" tf:"replicas_per_primary,omitempty"` + + // The name of the resource group in which to create the Redis instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Only available when using the Premium SKU The number of Shards to create on the Redis Cluster. + // +kubebuilder:validation:Optional + ShardCount *float64 `json:"shardCount,omitempty" tf:"shard_count,omitempty"` + + // The SKU of Redis to use. Possible values are Basic, Standard and Premium. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Only available when using the Premium SKU The ID of the Subnet within which the Redis Cache should be deployed. This Subnet must only contain Azure Cache for Redis instances without any other type of resources. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A mapping of tenant settings to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + TenantSettings map[string]*string `json:"tenantSettings,omitempty" tf:"tenant_settings,omitempty"` + + // Specifies a list of Availability Zones in which this Redis Cache should be located. Changing this forces a new Redis Cache to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type RedisConfigurationInitParameters struct { + + // Enable Microsoft Entra (AAD) authentication. Defaults to false. + ActiveDirectoryAuthenticationEnabled *bool `json:"activeDirectoryAuthenticationEnabled,omitempty" tf:"active_directory_authentication_enabled,omitempty"` + + // Enable or disable AOF persistence for this Redis Cache. Defaults to false. + AofBackupEnabled *bool `json:"aofBackupEnabled,omitempty" tf:"aof_backup_enabled,omitempty"` + + // Preferred auth method to communicate to storage account used for data persistence. Possible values are SAS and ManagedIdentity. Defaults to SAS. + DataPersistenceAuthenticationMethod *string `json:"dataPersistenceAuthenticationMethod,omitempty" tf:"data_persistence_authentication_method,omitempty"` + + // If set to false, the Redis instance will be accessible without authentication. Defaults to true. + EnableAuthentication *bool `json:"enableAuthentication,omitempty" tf:"enable_authentication,omitempty"` + + // Value in megabytes reserved to accommodate for memory fragmentation. Defaults are shown below. + MaxfragmentationmemoryReserved *float64 `json:"maxfragmentationmemoryReserved,omitempty" tf:"maxfragmentationmemory_reserved,omitempty"` + + // The max-memory delta for this Redis instance. Defaults are shown below. + MaxmemoryDelta *float64 `json:"maxmemoryDelta,omitempty" tf:"maxmemory_delta,omitempty"` + + // How Redis will select what to remove when maxmemory is reached. Defaults to volatile-lru. + MaxmemoryPolicy *string `json:"maxmemoryPolicy,omitempty" tf:"maxmemory_policy,omitempty"` + + // Value in megabytes reserved for non-cache usage e.g. failover. Defaults are shown below. + MaxmemoryReserved *float64 `json:"maxmemoryReserved,omitempty" tf:"maxmemory_reserved,omitempty"` + + // Keyspace notifications allows clients to subscribe to Pub/Sub channels in order to receive events affecting the Redis data set in some way. Reference + NotifyKeySpaceEvents *string `json:"notifyKeyspaceEvents,omitempty" tf:"notify_keyspace_events,omitempty"` + + // Is Backup Enabled? Only supported on Premium SKUs. Defaults to false. + RdbBackupEnabled *bool `json:"rdbBackupEnabled,omitempty" tf:"rdb_backup_enabled,omitempty"` + + // The Backup Frequency in Minutes. Only supported on Premium SKUs. Possible values are: 15, 30, 60, 360, 720 and 1440. + RdbBackupFrequency *float64 `json:"rdbBackupFrequency,omitempty" tf:"rdb_backup_frequency,omitempty"` + + // The maximum number of snapshots to create as a backup. Only supported for Premium SKUs. + RdbBackupMaxSnapshotCount *float64 `json:"rdbBackupMaxSnapshotCount,omitempty" tf:"rdb_backup_max_snapshot_count,omitempty"` + + // The ID of the Subscription containing the Storage Account. + StorageAccountSubscriptionID *string `json:"storageAccountSubscriptionId,omitempty" tf:"storage_account_subscription_id,omitempty"` +} + +type RedisConfigurationObservation struct { + + // Enable Microsoft Entra (AAD) authentication. Defaults to false. + ActiveDirectoryAuthenticationEnabled *bool `json:"activeDirectoryAuthenticationEnabled,omitempty" tf:"active_directory_authentication_enabled,omitempty"` + + // Enable or disable AOF persistence for this Redis Cache. Defaults to false. + AofBackupEnabled *bool `json:"aofBackupEnabled,omitempty" tf:"aof_backup_enabled,omitempty"` + + // Preferred auth method to communicate to storage account used for data persistence. Possible values are SAS and ManagedIdentity. Defaults to SAS. + DataPersistenceAuthenticationMethod *string `json:"dataPersistenceAuthenticationMethod,omitempty" tf:"data_persistence_authentication_method,omitempty"` + + // If set to false, the Redis instance will be accessible without authentication. Defaults to true. + EnableAuthentication *bool `json:"enableAuthentication,omitempty" tf:"enable_authentication,omitempty"` + + // Returns the max number of connected clients at the same time. + Maxclients *float64 `json:"maxclients,omitempty" tf:"maxclients,omitempty"` + + // Value in megabytes reserved to accommodate for memory fragmentation. Defaults are shown below. + MaxfragmentationmemoryReserved *float64 `json:"maxfragmentationmemoryReserved,omitempty" tf:"maxfragmentationmemory_reserved,omitempty"` + + // The max-memory delta for this Redis instance. Defaults are shown below. + MaxmemoryDelta *float64 `json:"maxmemoryDelta,omitempty" tf:"maxmemory_delta,omitempty"` + + // How Redis will select what to remove when maxmemory is reached. Defaults to volatile-lru. + MaxmemoryPolicy *string `json:"maxmemoryPolicy,omitempty" tf:"maxmemory_policy,omitempty"` + + // Value in megabytes reserved for non-cache usage e.g. failover. Defaults are shown below. + MaxmemoryReserved *float64 `json:"maxmemoryReserved,omitempty" tf:"maxmemory_reserved,omitempty"` + + // Keyspace notifications allows clients to subscribe to Pub/Sub channels in order to receive events affecting the Redis data set in some way. Reference + NotifyKeySpaceEvents *string `json:"notifyKeyspaceEvents,omitempty" tf:"notify_keyspace_events,omitempty"` + + // Is Backup Enabled? Only supported on Premium SKUs. Defaults to false. + RdbBackupEnabled *bool `json:"rdbBackupEnabled,omitempty" tf:"rdb_backup_enabled,omitempty"` + + // The Backup Frequency in Minutes. Only supported on Premium SKUs. Possible values are: 15, 30, 60, 360, 720 and 1440. + RdbBackupFrequency *float64 `json:"rdbBackupFrequency,omitempty" tf:"rdb_backup_frequency,omitempty"` + + // The maximum number of snapshots to create as a backup. Only supported for Premium SKUs. + RdbBackupMaxSnapshotCount *float64 `json:"rdbBackupMaxSnapshotCount,omitempty" tf:"rdb_backup_max_snapshot_count,omitempty"` + + // The ID of the Subscription containing the Storage Account. + StorageAccountSubscriptionID *string `json:"storageAccountSubscriptionId,omitempty" tf:"storage_account_subscription_id,omitempty"` +} + +type RedisConfigurationParameters struct { + + // Enable Microsoft Entra (AAD) authentication. Defaults to false. + // +kubebuilder:validation:Optional + ActiveDirectoryAuthenticationEnabled *bool `json:"activeDirectoryAuthenticationEnabled,omitempty" tf:"active_directory_authentication_enabled,omitempty"` + + // Enable or disable AOF persistence for this Redis Cache. Defaults to false. + // +kubebuilder:validation:Optional + AofBackupEnabled *bool `json:"aofBackupEnabled,omitempty" tf:"aof_backup_enabled,omitempty"` + + // First Storage Account connection string for AOF persistence. + // +kubebuilder:validation:Optional + AofStorageConnectionString0SecretRef *v1.SecretKeySelector `json:"aofStorageConnectionString0SecretRef,omitempty" tf:"-"` + + // Second Storage Account connection string for AOF persistence. + // +kubebuilder:validation:Optional + AofStorageConnectionString1SecretRef *v1.SecretKeySelector `json:"aofStorageConnectionString1SecretRef,omitempty" tf:"-"` + + // Preferred auth method to communicate to storage account used for data persistence. Possible values are SAS and ManagedIdentity. Defaults to SAS. + // +kubebuilder:validation:Optional + DataPersistenceAuthenticationMethod *string `json:"dataPersistenceAuthenticationMethod,omitempty" tf:"data_persistence_authentication_method,omitempty"` + + // If set to false, the Redis instance will be accessible without authentication. Defaults to true. + // +kubebuilder:validation:Optional + EnableAuthentication *bool `json:"enableAuthentication,omitempty" tf:"enable_authentication,omitempty"` + + // Value in megabytes reserved to accommodate for memory fragmentation. Defaults are shown below. + // +kubebuilder:validation:Optional + MaxfragmentationmemoryReserved *float64 `json:"maxfragmentationmemoryReserved,omitempty" tf:"maxfragmentationmemory_reserved,omitempty"` + + // The max-memory delta for this Redis instance. Defaults are shown below. + // +kubebuilder:validation:Optional + MaxmemoryDelta *float64 `json:"maxmemoryDelta,omitempty" tf:"maxmemory_delta,omitempty"` + + // How Redis will select what to remove when maxmemory is reached. Defaults to volatile-lru. + // +kubebuilder:validation:Optional + MaxmemoryPolicy *string `json:"maxmemoryPolicy,omitempty" tf:"maxmemory_policy,omitempty"` + + // Value in megabytes reserved for non-cache usage e.g. failover. Defaults are shown below. + // +kubebuilder:validation:Optional + MaxmemoryReserved *float64 `json:"maxmemoryReserved,omitempty" tf:"maxmemory_reserved,omitempty"` + + // Keyspace notifications allows clients to subscribe to Pub/Sub channels in order to receive events affecting the Redis data set in some way. Reference + // +kubebuilder:validation:Optional + NotifyKeySpaceEvents *string `json:"notifyKeyspaceEvents,omitempty" tf:"notify_keyspace_events,omitempty"` + + // Is Backup Enabled? Only supported on Premium SKUs. Defaults to false. + // +kubebuilder:validation:Optional + RdbBackupEnabled *bool `json:"rdbBackupEnabled,omitempty" tf:"rdb_backup_enabled,omitempty"` + + // The Backup Frequency in Minutes. Only supported on Premium SKUs. Possible values are: 15, 30, 60, 360, 720 and 1440. + // +kubebuilder:validation:Optional + RdbBackupFrequency *float64 `json:"rdbBackupFrequency,omitempty" tf:"rdb_backup_frequency,omitempty"` + + // The maximum number of snapshots to create as a backup. Only supported for Premium SKUs. + // +kubebuilder:validation:Optional + RdbBackupMaxSnapshotCount *float64 `json:"rdbBackupMaxSnapshotCount,omitempty" tf:"rdb_backup_max_snapshot_count,omitempty"` + + // The Connection String to the Storage Account. Only supported for Premium SKUs. In the format: DefaultEndpointsProtocol=https;BlobEndpoint=${azurerm_storage_account.example.primary_blob_endpoint};AccountName=${azurerm_storage_account.example.name};AccountKey=${azurerm_storage_account.example.primary_access_key}. + // +kubebuilder:validation:Optional + RdbStorageConnectionStringSecretRef *v1.SecretKeySelector `json:"rdbStorageConnectionStringSecretRef,omitempty" tf:"-"` + + // The ID of the Subscription containing the Storage Account. + // +kubebuilder:validation:Optional + StorageAccountSubscriptionID *string `json:"storageAccountSubscriptionId,omitempty" tf:"storage_account_subscription_id,omitempty"` +} + +// RedisCacheSpec defines the desired state of RedisCache +type RedisCacheSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RedisCacheParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RedisCacheInitParameters `json:"initProvider,omitempty"` +} + +// RedisCacheStatus defines the observed state of RedisCache. +type RedisCacheStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RedisCacheObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RedisCache is the Schema for the RedisCaches API. Manages a Redis Cache +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type RedisCache struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.capacity) || (has(self.initProvider) && has(self.initProvider.capacity))",message="spec.forProvider.capacity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.family) || (has(self.initProvider) && has(self.initProvider.family))",message="spec.forProvider.family is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.redisVersion) || (has(self.initProvider) && has(self.initProvider.redisVersion))",message="spec.forProvider.redisVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + Spec RedisCacheSpec `json:"spec"` + Status RedisCacheStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RedisCacheList contains a list of RedisCaches +type RedisCacheList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RedisCache `json:"items"` +} + +// Repository type metadata. +var ( + RedisCache_Kind = "RedisCache" + RedisCache_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: RedisCache_Kind}.String() + RedisCache_KindAPIVersion = RedisCache_Kind + "." + CRDGroupVersion.String() + RedisCache_GroupVersionKind = CRDGroupVersion.WithKind(RedisCache_Kind) +) + +func init() { + SchemeBuilder.Register(&RedisCache{}, &RedisCacheList{}) +} diff --git a/apis/cdn/v1beta1/zz_frontdoorcustomdomainassociation_types.go b/apis/cdn/v1beta1/zz_frontdoorcustomdomainassociation_types.go index 5df6268ac..2d394d9d0 100755 --- a/apis/cdn/v1beta1/zz_frontdoorcustomdomainassociation_types.go +++ b/apis/cdn/v1beta1/zz_frontdoorcustomdomainassociation_types.go @@ -16,7 +16,7 @@ import ( type FrontdoorCustomDomainAssociationInitParameters struct { // The ID of the Front Door Custom Domain that should be managed by the association resource. Changing this forces a new association resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorCustomDomain + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorCustomDomain // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() CdnFrontdoorCustomDomainID *string `json:"cdnFrontdoorCustomDomainId,omitempty" tf:"cdn_frontdoor_custom_domain_id,omitempty"` @@ -29,7 +29,7 @@ type FrontdoorCustomDomainAssociationInitParameters struct { CdnFrontdoorCustomDomainIDSelector *v1.Selector `json:"cdnFrontdoorCustomDomainIdSelector,omitempty" tf:"-"` // One or more IDs of the Front Door Route to which the Front Door Custom Domain is associated with. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorRoute + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorRoute // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() CdnFrontdoorRouteIds []*string `json:"cdnFrontdoorRouteIds,omitempty" tf:"cdn_frontdoor_route_ids,omitempty"` @@ -57,7 +57,7 @@ type FrontdoorCustomDomainAssociationObservation struct { type FrontdoorCustomDomainAssociationParameters struct { // The ID of the Front Door Custom Domain that should be managed by the association resource. Changing this forces a new association resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorCustomDomain + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorCustomDomain // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional CdnFrontdoorCustomDomainID *string `json:"cdnFrontdoorCustomDomainId,omitempty" tf:"cdn_frontdoor_custom_domain_id,omitempty"` @@ -71,7 +71,7 @@ type FrontdoorCustomDomainAssociationParameters struct { CdnFrontdoorCustomDomainIDSelector *v1.Selector `json:"cdnFrontdoorCustomDomainIdSelector,omitempty" tf:"-"` // One or more IDs of the Front Door Route to which the Front Door Custom Domain is associated with. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorRoute + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorRoute // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional CdnFrontdoorRouteIds []*string `json:"cdnFrontdoorRouteIds,omitempty" tf:"cdn_frontdoor_route_ids,omitempty"` diff --git a/apis/cdn/v1beta1/zz_generated.conversion_hubs.go b/apis/cdn/v1beta1/zz_generated.conversion_hubs.go index 70aff8bb1..90b61fe17 100755 --- a/apis/cdn/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cdn/v1beta1/zz_generated.conversion_hubs.go @@ -6,12 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Endpoint) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FrontdoorCustomDomain) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FrontdoorCustomDomainAssociation) Hub() {} @@ -21,26 +15,11 @@ func (tr *FrontdoorEndpoint) Hub() {} // Hub marks this type as a conversion hub. func (tr *FrontdoorFirewallPolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FrontdoorOrigin) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FrontdoorOriginGroup) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FrontdoorProfile) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FrontdoorRoute) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FrontdoorRule) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FrontdoorRuleSet) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FrontdoorSecurityPolicy) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Profile) Hub() {} diff --git a/apis/cdn/v1beta1/zz_generated.conversion_spokes.go b/apis/cdn/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..f536eaddc --- /dev/null +++ b/apis/cdn/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,154 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Endpoint to the hub type. +func (tr *Endpoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Endpoint type. +func (tr *Endpoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontdoorCustomDomain to the hub type. +func (tr *FrontdoorCustomDomain) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontdoorCustomDomain type. +func (tr *FrontdoorCustomDomain) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontdoorOrigin to the hub type. +func (tr *FrontdoorOrigin) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontdoorOrigin type. +func (tr *FrontdoorOrigin) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontdoorOriginGroup to the hub type. +func (tr *FrontdoorOriginGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontdoorOriginGroup type. +func (tr *FrontdoorOriginGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontdoorRoute to the hub type. +func (tr *FrontdoorRoute) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontdoorRoute type. +func (tr *FrontdoorRoute) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontdoorRule to the hub type. +func (tr *FrontdoorRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontdoorRule type. +func (tr *FrontdoorRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontdoorSecurityPolicy to the hub type. +func (tr *FrontdoorSecurityPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontdoorSecurityPolicy type. +func (tr *FrontdoorSecurityPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cdn/v1beta1/zz_generated.resolvers.go b/apis/cdn/v1beta1/zz_generated.resolvers.go index 3f04b586a..aba893130 100644 --- a/apis/cdn/v1beta1/zz_generated.resolvers.go +++ b/apis/cdn/v1beta1/zz_generated.resolvers.go @@ -147,7 +147,7 @@ func (mg *FrontdoorCustomDomainAssociation) ResolveReferences(ctx context.Contex var mrsp reference.MultiResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorCustomDomain", "FrontdoorCustomDomainList") + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorCustomDomain", "FrontdoorCustomDomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -166,7 +166,7 @@ func (mg *FrontdoorCustomDomainAssociation) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.CdnFrontdoorCustomDomainID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CdnFrontdoorCustomDomainIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorRoute", "FrontdoorRouteList") + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorRoute", "FrontdoorRouteList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -185,7 +185,7 @@ func (mg *FrontdoorCustomDomainAssociation) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.CdnFrontdoorRouteIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.CdnFrontdoorRouteIdsRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorCustomDomain", "FrontdoorCustomDomainList") + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorCustomDomain", "FrontdoorCustomDomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -204,7 +204,7 @@ func (mg *FrontdoorCustomDomainAssociation) ResolveReferences(ctx context.Contex mg.Spec.InitProvider.CdnFrontdoorCustomDomainID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.CdnFrontdoorCustomDomainIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorRoute", "FrontdoorRouteList") + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorRoute", "FrontdoorRouteList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cdn/v1beta2/zz_endpoint_terraformed.go b/apis/cdn/v1beta2/zz_endpoint_terraformed.go new file mode 100755 index 000000000..1a11c1f41 --- /dev/null +++ b/apis/cdn/v1beta2/zz_endpoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Endpoint +func (mg *Endpoint) GetTerraformResourceType() string { + return "azurerm_cdn_endpoint" +} + +// GetConnectionDetailsMapping for this Endpoint +func (tr *Endpoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Endpoint +func (tr *Endpoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Endpoint +func (tr *Endpoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Endpoint +func (tr *Endpoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Endpoint +func (tr *Endpoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Endpoint +func (tr *Endpoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Endpoint +func (tr *Endpoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Endpoint +func (tr *Endpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Endpoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Endpoint) LateInitialize(attrs []byte) (bool, error) { + params := &EndpointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Endpoint) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cdn/v1beta2/zz_endpoint_types.go b/apis/cdn/v1beta2/zz_endpoint_types.go new file mode 100755 index 000000000..98ebbee07 --- /dev/null +++ b/apis/cdn/v1beta2/zz_endpoint_types.go @@ -0,0 +1,1861 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CacheExpirationActionInitParameters struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + Behavior *string `json:"behavior,omitempty" tf:"behavior,omitempty"` + + // Duration of the cache. Only allowed when behavior is set to Override or SetIfMissing. Format: [d.]hh:mm:ss + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` +} + +type CacheExpirationActionObservation struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + Behavior *string `json:"behavior,omitempty" tf:"behavior,omitempty"` + + // Duration of the cache. Only allowed when behavior is set to Override or SetIfMissing. Format: [d.]hh:mm:ss + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` +} + +type CacheExpirationActionParameters struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + // +kubebuilder:validation:Optional + Behavior *string `json:"behavior" tf:"behavior,omitempty"` + + // Duration of the cache. Only allowed when behavior is set to Override or SetIfMissing. Format: [d.]hh:mm:ss + // +kubebuilder:validation:Optional + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` +} + +type CacheKeyQueryStringActionInitParameters struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + Behavior *string `json:"behavior,omitempty" tf:"behavior,omitempty"` + + // Comma separated list of parameter values. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type CacheKeyQueryStringActionObservation struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + Behavior *string `json:"behavior,omitempty" tf:"behavior,omitempty"` + + // Comma separated list of parameter values. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type CacheKeyQueryStringActionParameters struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + // +kubebuilder:validation:Optional + Behavior *string `json:"behavior" tf:"behavior,omitempty"` + + // Comma separated list of parameter values. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type CookiesConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Header name. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type CookiesConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Header name. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type CookiesConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Header name. + // +kubebuilder:validation:Optional + Selector *string `json:"selector" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type DeliveryRuleInitParameters struct { + + // A cache_expiration_action block as defined above. + CacheExpirationAction *CacheExpirationActionInitParameters `json:"cacheExpirationAction,omitempty" tf:"cache_expiration_action,omitempty"` + + // A cache_key_query_string_action block as defined above. + CacheKeyQueryStringAction *CacheKeyQueryStringActionInitParameters `json:"cacheKeyQueryStringAction,omitempty" tf:"cache_key_query_string_action,omitempty"` + + // A cookies_condition block as defined above. + CookiesCondition []CookiesConditionInitParameters `json:"cookiesCondition,omitempty" tf:"cookies_condition,omitempty"` + + // A device_condition block as defined below. + DeviceCondition *DeviceConditionInitParameters `json:"deviceCondition,omitempty" tf:"device_condition,omitempty"` + + // A http_version_condition block as defined below. + HTTPVersionCondition []HTTPVersionConditionInitParameters `json:"httpVersionCondition,omitempty" tf:"http_version_condition,omitempty"` + + // A modify_request_header_action block as defined below. + ModifyRequestHeaderAction []ModifyRequestHeaderActionInitParameters `json:"modifyRequestHeaderAction,omitempty" tf:"modify_request_header_action,omitempty"` + + // A modify_response_header_action block as defined below. + ModifyResponseHeaderAction []ModifyResponseHeaderActionInitParameters `json:"modifyResponseHeaderAction,omitempty" tf:"modify_response_header_action,omitempty"` + + // The Name which should be used for this Delivery Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The order used for this rule. The order values should be sequential and begin at 1. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // A post_arg_condition block as defined below. + PostArgCondition []PostArgConditionInitParameters `json:"postArgCondition,omitempty" tf:"post_arg_condition,omitempty"` + + // A query_string_condition block as defined below. + QueryStringCondition []QueryStringConditionInitParameters `json:"queryStringCondition,omitempty" tf:"query_string_condition,omitempty"` + + // A remote_address_condition block as defined below. + RemoteAddressCondition []RemoteAddressConditionInitParameters `json:"remoteAddressCondition,omitempty" tf:"remote_address_condition,omitempty"` + + // A request_body_condition block as defined below. + RequestBodyCondition []RequestBodyConditionInitParameters `json:"requestBodyCondition,omitempty" tf:"request_body_condition,omitempty"` + + // A request_header_condition block as defined below. + RequestHeaderCondition []RequestHeaderConditionInitParameters `json:"requestHeaderCondition,omitempty" tf:"request_header_condition,omitempty"` + + // A request_method_condition block as defined below. + RequestMethodCondition *RequestMethodConditionInitParameters `json:"requestMethodCondition,omitempty" tf:"request_method_condition,omitempty"` + + // A request_scheme_condition block as defined below. + RequestSchemeCondition *RequestSchemeConditionInitParameters `json:"requestSchemeCondition,omitempty" tf:"request_scheme_condition,omitempty"` + + // A request_uri_condition block as defined below. + RequestURICondition []RequestURIConditionInitParameters `json:"requestUriCondition,omitempty" tf:"request_uri_condition,omitempty"` + + // A url_file_extension_condition block as defined below. + URLFileExtensionCondition []URLFileExtensionConditionInitParameters `json:"urlFileExtensionCondition,omitempty" tf:"url_file_extension_condition,omitempty"` + + // A url_file_name_condition block as defined below. + URLFileNameCondition []URLFileNameConditionInitParameters `json:"urlFileNameCondition,omitempty" tf:"url_file_name_condition,omitempty"` + + // A url_path_condition block as defined below. + URLPathCondition []URLPathConditionInitParameters `json:"urlPathCondition,omitempty" tf:"url_path_condition,omitempty"` + + // A url_redirect_action block as defined below. + URLRedirectAction *URLRedirectActionInitParameters `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. + URLRewriteAction *URLRewriteActionInitParameters `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type DeliveryRuleObservation struct { + + // A cache_expiration_action block as defined above. + CacheExpirationAction *CacheExpirationActionObservation `json:"cacheExpirationAction,omitempty" tf:"cache_expiration_action,omitempty"` + + // A cache_key_query_string_action block as defined above. + CacheKeyQueryStringAction *CacheKeyQueryStringActionObservation `json:"cacheKeyQueryStringAction,omitempty" tf:"cache_key_query_string_action,omitempty"` + + // A cookies_condition block as defined above. + CookiesCondition []CookiesConditionObservation `json:"cookiesCondition,omitempty" tf:"cookies_condition,omitempty"` + + // A device_condition block as defined below. + DeviceCondition *DeviceConditionObservation `json:"deviceCondition,omitempty" tf:"device_condition,omitempty"` + + // A http_version_condition block as defined below. + HTTPVersionCondition []HTTPVersionConditionObservation `json:"httpVersionCondition,omitempty" tf:"http_version_condition,omitempty"` + + // A modify_request_header_action block as defined below. + ModifyRequestHeaderAction []ModifyRequestHeaderActionObservation `json:"modifyRequestHeaderAction,omitempty" tf:"modify_request_header_action,omitempty"` + + // A modify_response_header_action block as defined below. + ModifyResponseHeaderAction []ModifyResponseHeaderActionObservation `json:"modifyResponseHeaderAction,omitempty" tf:"modify_response_header_action,omitempty"` + + // The Name which should be used for this Delivery Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The order used for this rule. The order values should be sequential and begin at 1. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // A post_arg_condition block as defined below. + PostArgCondition []PostArgConditionObservation `json:"postArgCondition,omitempty" tf:"post_arg_condition,omitempty"` + + // A query_string_condition block as defined below. + QueryStringCondition []QueryStringConditionObservation `json:"queryStringCondition,omitempty" tf:"query_string_condition,omitempty"` + + // A remote_address_condition block as defined below. + RemoteAddressCondition []RemoteAddressConditionObservation `json:"remoteAddressCondition,omitempty" tf:"remote_address_condition,omitempty"` + + // A request_body_condition block as defined below. + RequestBodyCondition []RequestBodyConditionObservation `json:"requestBodyCondition,omitempty" tf:"request_body_condition,omitempty"` + + // A request_header_condition block as defined below. + RequestHeaderCondition []RequestHeaderConditionObservation `json:"requestHeaderCondition,omitempty" tf:"request_header_condition,omitempty"` + + // A request_method_condition block as defined below. + RequestMethodCondition *RequestMethodConditionObservation `json:"requestMethodCondition,omitempty" tf:"request_method_condition,omitempty"` + + // A request_scheme_condition block as defined below. + RequestSchemeCondition *RequestSchemeConditionObservation `json:"requestSchemeCondition,omitempty" tf:"request_scheme_condition,omitempty"` + + // A request_uri_condition block as defined below. + RequestURICondition []RequestURIConditionObservation `json:"requestUriCondition,omitempty" tf:"request_uri_condition,omitempty"` + + // A url_file_extension_condition block as defined below. + URLFileExtensionCondition []URLFileExtensionConditionObservation `json:"urlFileExtensionCondition,omitempty" tf:"url_file_extension_condition,omitempty"` + + // A url_file_name_condition block as defined below. + URLFileNameCondition []URLFileNameConditionObservation `json:"urlFileNameCondition,omitempty" tf:"url_file_name_condition,omitempty"` + + // A url_path_condition block as defined below. + URLPathCondition []URLPathConditionObservation `json:"urlPathCondition,omitempty" tf:"url_path_condition,omitempty"` + + // A url_redirect_action block as defined below. + URLRedirectAction *URLRedirectActionObservation `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. + URLRewriteAction *URLRewriteActionObservation `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type DeliveryRuleParameters struct { + + // A cache_expiration_action block as defined above. + // +kubebuilder:validation:Optional + CacheExpirationAction *CacheExpirationActionParameters `json:"cacheExpirationAction,omitempty" tf:"cache_expiration_action,omitempty"` + + // A cache_key_query_string_action block as defined above. + // +kubebuilder:validation:Optional + CacheKeyQueryStringAction *CacheKeyQueryStringActionParameters `json:"cacheKeyQueryStringAction,omitempty" tf:"cache_key_query_string_action,omitempty"` + + // A cookies_condition block as defined above. + // +kubebuilder:validation:Optional + CookiesCondition []CookiesConditionParameters `json:"cookiesCondition,omitempty" tf:"cookies_condition,omitempty"` + + // A device_condition block as defined below. + // +kubebuilder:validation:Optional + DeviceCondition *DeviceConditionParameters `json:"deviceCondition,omitempty" tf:"device_condition,omitempty"` + + // A http_version_condition block as defined below. + // +kubebuilder:validation:Optional + HTTPVersionCondition []HTTPVersionConditionParameters `json:"httpVersionCondition,omitempty" tf:"http_version_condition,omitempty"` + + // A modify_request_header_action block as defined below. + // +kubebuilder:validation:Optional + ModifyRequestHeaderAction []ModifyRequestHeaderActionParameters `json:"modifyRequestHeaderAction,omitempty" tf:"modify_request_header_action,omitempty"` + + // A modify_response_header_action block as defined below. + // +kubebuilder:validation:Optional + ModifyResponseHeaderAction []ModifyResponseHeaderActionParameters `json:"modifyResponseHeaderAction,omitempty" tf:"modify_response_header_action,omitempty"` + + // The Name which should be used for this Delivery Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The order used for this rule. The order values should be sequential and begin at 1. + // +kubebuilder:validation:Optional + Order *float64 `json:"order" tf:"order,omitempty"` + + // A post_arg_condition block as defined below. + // +kubebuilder:validation:Optional + PostArgCondition []PostArgConditionParameters `json:"postArgCondition,omitempty" tf:"post_arg_condition,omitempty"` + + // A query_string_condition block as defined below. + // +kubebuilder:validation:Optional + QueryStringCondition []QueryStringConditionParameters `json:"queryStringCondition,omitempty" tf:"query_string_condition,omitempty"` + + // A remote_address_condition block as defined below. + // +kubebuilder:validation:Optional + RemoteAddressCondition []RemoteAddressConditionParameters `json:"remoteAddressCondition,omitempty" tf:"remote_address_condition,omitempty"` + + // A request_body_condition block as defined below. + // +kubebuilder:validation:Optional + RequestBodyCondition []RequestBodyConditionParameters `json:"requestBodyCondition,omitempty" tf:"request_body_condition,omitempty"` + + // A request_header_condition block as defined below. + // +kubebuilder:validation:Optional + RequestHeaderCondition []RequestHeaderConditionParameters `json:"requestHeaderCondition,omitempty" tf:"request_header_condition,omitempty"` + + // A request_method_condition block as defined below. + // +kubebuilder:validation:Optional + RequestMethodCondition *RequestMethodConditionParameters `json:"requestMethodCondition,omitempty" tf:"request_method_condition,omitempty"` + + // A request_scheme_condition block as defined below. + // +kubebuilder:validation:Optional + RequestSchemeCondition *RequestSchemeConditionParameters `json:"requestSchemeCondition,omitempty" tf:"request_scheme_condition,omitempty"` + + // A request_uri_condition block as defined below. + // +kubebuilder:validation:Optional + RequestURICondition []RequestURIConditionParameters `json:"requestUriCondition,omitempty" tf:"request_uri_condition,omitempty"` + + // A url_file_extension_condition block as defined below. + // +kubebuilder:validation:Optional + URLFileExtensionCondition []URLFileExtensionConditionParameters `json:"urlFileExtensionCondition,omitempty" tf:"url_file_extension_condition,omitempty"` + + // A url_file_name_condition block as defined below. + // +kubebuilder:validation:Optional + URLFileNameCondition []URLFileNameConditionParameters `json:"urlFileNameCondition,omitempty" tf:"url_file_name_condition,omitempty"` + + // A url_path_condition block as defined below. + // +kubebuilder:validation:Optional + URLPathCondition []URLPathConditionParameters `json:"urlPathCondition,omitempty" tf:"url_path_condition,omitempty"` + + // A url_redirect_action block as defined below. + // +kubebuilder:validation:Optional + URLRedirectAction *URLRedirectActionParameters `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. + // +kubebuilder:validation:Optional + URLRewriteAction *URLRewriteActionParameters `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type DeviceConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type DeviceConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type DeviceConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type EndpointInitParameters struct { + + // An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types. + // +listType=set + ContentTypesToCompress []*string `json:"contentTypesToCompress,omitempty" tf:"content_types_to_compress,omitempty"` + + // Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A delivery_rule blocks as defined below. + DeliveryRule []DeliveryRuleInitParameters `json:"deliveryRule,omitempty" tf:"delivery_rule,omitempty"` + + // A set of Geo Filters for this CDN Endpoint. Each geo_filter block supports fields documented below. + GeoFilter []GeoFilterInitParameters `json:"geoFilter,omitempty" tf:"geo_filter,omitempty"` + + // Actions that are valid for all resources regardless of any conditions. A global_delivery_rule block as defined below. + GlobalDeliveryRule *GlobalDeliveryRuleInitParameters `json:"globalDeliveryRule,omitempty" tf:"global_delivery_rule,omitempty"` + + // Indicates whether compression is to be enabled. + IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty" tf:"is_compression_enabled,omitempty"` + + // Specifies if http allowed. Defaults to true. + IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty" tf:"is_http_allowed,omitempty"` + + // Specifies if https allowed. Defaults to true. + IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty" tf:"is_https_allowed,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // What types of optimization should this CDN Endpoint optimize for? Possible values include DynamicSiteAcceleration, GeneralMediaStreaming, GeneralWebDelivery, LargeFileDownload and VideoOnDemandMediaStreaming. + OptimizationType *string `json:"optimizationType,omitempty" tf:"optimization_type,omitempty"` + + // The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each origin block supports fields documented below. Changing this forces a new resource to be created. + Origin []OriginInitParameters `json:"origin,omitempty" tf:"origin,omitempty"` + + // The host header CDN provider will send along with content requests to origins. + OriginHostHeader *string `json:"originHostHeader,omitempty" tf:"origin_host_header,omitempty"` + + // The path used at for origin requests. + OriginPath *string `json:"originPath,omitempty" tf:"origin_path,omitempty"` + + // the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin_path. + ProbePath *string `json:"probePath,omitempty" tf:"probe_path,omitempty"` + + // Sets query string caching behavior. Allowed values are IgnoreQueryString, BypassCaching and UseQueryString. NotSet value can be used for Premium Verizon CDN profile. Defaults to IgnoreQueryString. + QuerystringCachingBehaviour *string `json:"querystringCachingBehaviour,omitempty" tf:"querystring_caching_behaviour,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EndpointObservation struct { + + // An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types. + // +listType=set + ContentTypesToCompress []*string `json:"contentTypesToCompress,omitempty" tf:"content_types_to_compress,omitempty"` + + // Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A delivery_rule blocks as defined below. + DeliveryRule []DeliveryRuleObservation `json:"deliveryRule,omitempty" tf:"delivery_rule,omitempty"` + + // The Fully Qualified Domain Name of the CDN Endpoint. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // A set of Geo Filters for this CDN Endpoint. Each geo_filter block supports fields documented below. + GeoFilter []GeoFilterObservation `json:"geoFilter,omitempty" tf:"geo_filter,omitempty"` + + // Actions that are valid for all resources regardless of any conditions. A global_delivery_rule block as defined below. + GlobalDeliveryRule *GlobalDeliveryRuleObservation `json:"globalDeliveryRule,omitempty" tf:"global_delivery_rule,omitempty"` + + // The ID of the CDN Endpoint. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Indicates whether compression is to be enabled. + IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty" tf:"is_compression_enabled,omitempty"` + + // Specifies if http allowed. Defaults to true. + IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty" tf:"is_http_allowed,omitempty"` + + // Specifies if https allowed. Defaults to true. + IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty" tf:"is_https_allowed,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // What types of optimization should this CDN Endpoint optimize for? Possible values include DynamicSiteAcceleration, GeneralMediaStreaming, GeneralWebDelivery, LargeFileDownload and VideoOnDemandMediaStreaming. + OptimizationType *string `json:"optimizationType,omitempty" tf:"optimization_type,omitempty"` + + // The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each origin block supports fields documented below. Changing this forces a new resource to be created. + Origin []OriginObservation `json:"origin,omitempty" tf:"origin,omitempty"` + + // The host header CDN provider will send along with content requests to origins. + OriginHostHeader *string `json:"originHostHeader,omitempty" tf:"origin_host_header,omitempty"` + + // The path used at for origin requests. + OriginPath *string `json:"originPath,omitempty" tf:"origin_path,omitempty"` + + // the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin_path. + ProbePath *string `json:"probePath,omitempty" tf:"probe_path,omitempty"` + + // The CDN Profile to which to attach the CDN Endpoint. Changing this forces a new resource to be created. + ProfileName *string `json:"profileName,omitempty" tf:"profile_name,omitempty"` + + // Sets query string caching behavior. Allowed values are IgnoreQueryString, BypassCaching and UseQueryString. NotSet value can be used for Premium Verizon CDN profile. Defaults to IgnoreQueryString. + QuerystringCachingBehaviour *string `json:"querystringCachingBehaviour,omitempty" tf:"querystring_caching_behaviour,omitempty"` + + // The name of the resource group in which to create the CDN Endpoint. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type EndpointParameters struct { + + // An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types. + // +kubebuilder:validation:Optional + // +listType=set + ContentTypesToCompress []*string `json:"contentTypesToCompress,omitempty" tf:"content_types_to_compress,omitempty"` + + // Rules for the rules engine. An endpoint can contain up until 4 of those rules that consist of conditions and actions. A delivery_rule blocks as defined below. + // +kubebuilder:validation:Optional + DeliveryRule []DeliveryRuleParameters `json:"deliveryRule,omitempty" tf:"delivery_rule,omitempty"` + + // A set of Geo Filters for this CDN Endpoint. Each geo_filter block supports fields documented below. + // +kubebuilder:validation:Optional + GeoFilter []GeoFilterParameters `json:"geoFilter,omitempty" tf:"geo_filter,omitempty"` + + // Actions that are valid for all resources regardless of any conditions. A global_delivery_rule block as defined below. + // +kubebuilder:validation:Optional + GlobalDeliveryRule *GlobalDeliveryRuleParameters `json:"globalDeliveryRule,omitempty" tf:"global_delivery_rule,omitempty"` + + // Indicates whether compression is to be enabled. + // +kubebuilder:validation:Optional + IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty" tf:"is_compression_enabled,omitempty"` + + // Specifies if http allowed. Defaults to true. + // +kubebuilder:validation:Optional + IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty" tf:"is_http_allowed,omitempty"` + + // Specifies if https allowed. Defaults to true. + // +kubebuilder:validation:Optional + IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty" tf:"is_https_allowed,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // What types of optimization should this CDN Endpoint optimize for? Possible values include DynamicSiteAcceleration, GeneralMediaStreaming, GeneralWebDelivery, LargeFileDownload and VideoOnDemandMediaStreaming. + // +kubebuilder:validation:Optional + OptimizationType *string `json:"optimizationType,omitempty" tf:"optimization_type,omitempty"` + + // The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options. Each origin block supports fields documented below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Origin []OriginParameters `json:"origin,omitempty" tf:"origin,omitempty"` + + // The host header CDN provider will send along with content requests to origins. + // +kubebuilder:validation:Optional + OriginHostHeader *string `json:"originHostHeader,omitempty" tf:"origin_host_header,omitempty"` + + // The path used at for origin requests. + // +kubebuilder:validation:Optional + OriginPath *string `json:"originPath,omitempty" tf:"origin_path,omitempty"` + + // the path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin_path. + // +kubebuilder:validation:Optional + ProbePath *string `json:"probePath,omitempty" tf:"probe_path,omitempty"` + + // The CDN Profile to which to attach the CDN Endpoint. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.Profile + // +kubebuilder:validation:Optional + ProfileName *string `json:"profileName,omitempty" tf:"profile_name,omitempty"` + + // Reference to a Profile in cdn to populate profileName. + // +kubebuilder:validation:Optional + ProfileNameRef *v1.Reference `json:"profileNameRef,omitempty" tf:"-"` + + // Selector for a Profile in cdn to populate profileName. + // +kubebuilder:validation:Optional + ProfileNameSelector *v1.Selector `json:"profileNameSelector,omitempty" tf:"-"` + + // Sets query string caching behavior. Allowed values are IgnoreQueryString, BypassCaching and UseQueryString. NotSet value can be used for Premium Verizon CDN profile. Defaults to IgnoreQueryString. + // +kubebuilder:validation:Optional + QuerystringCachingBehaviour *string `json:"querystringCachingBehaviour,omitempty" tf:"querystring_caching_behaviour,omitempty"` + + // The name of the resource group in which to create the CDN Endpoint. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type GeoFilterInitParameters struct { + + // The Action of the Geo Filter. Possible values include Allow and Block. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A List of two letter country codes (e.g. US, GB) to be associated with this Geo Filter. + CountryCodes []*string `json:"countryCodes,omitempty" tf:"country_codes,omitempty"` + + // The relative path applicable to geo filter. + RelativePath *string `json:"relativePath,omitempty" tf:"relative_path,omitempty"` +} + +type GeoFilterObservation struct { + + // The Action of the Geo Filter. Possible values include Allow and Block. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A List of two letter country codes (e.g. US, GB) to be associated with this Geo Filter. + CountryCodes []*string `json:"countryCodes,omitempty" tf:"country_codes,omitempty"` + + // The relative path applicable to geo filter. + RelativePath *string `json:"relativePath,omitempty" tf:"relative_path,omitempty"` +} + +type GeoFilterParameters struct { + + // The Action of the Geo Filter. Possible values include Allow and Block. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // A List of two letter country codes (e.g. US, GB) to be associated with this Geo Filter. + // +kubebuilder:validation:Optional + CountryCodes []*string `json:"countryCodes" tf:"country_codes,omitempty"` + + // The relative path applicable to geo filter. + // +kubebuilder:validation:Optional + RelativePath *string `json:"relativePath" tf:"relative_path,omitempty"` +} + +type GlobalDeliveryRuleCacheExpirationActionInitParameters struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + Behavior *string `json:"behavior,omitempty" tf:"behavior,omitempty"` + + // Duration of the cache. Only allowed when behavior is set to Override or SetIfMissing. Format: [d.]hh:mm:ss + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` +} + +type GlobalDeliveryRuleCacheExpirationActionObservation struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + Behavior *string `json:"behavior,omitempty" tf:"behavior,omitempty"` + + // Duration of the cache. Only allowed when behavior is set to Override or SetIfMissing. Format: [d.]hh:mm:ss + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` +} + +type GlobalDeliveryRuleCacheExpirationActionParameters struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + // +kubebuilder:validation:Optional + Behavior *string `json:"behavior" tf:"behavior,omitempty"` + + // Duration of the cache. Only allowed when behavior is set to Override or SetIfMissing. Format: [d.]hh:mm:ss + // +kubebuilder:validation:Optional + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` +} + +type GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + Behavior *string `json:"behavior,omitempty" tf:"behavior,omitempty"` + + // Comma separated list of parameter values. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type GlobalDeliveryRuleCacheKeyQueryStringActionObservation struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + Behavior *string `json:"behavior,omitempty" tf:"behavior,omitempty"` + + // Comma separated list of parameter values. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type GlobalDeliveryRuleCacheKeyQueryStringActionParameters struct { + + // The behavior of the cache key for query strings. Valid values are Exclude, ExcludeAll, Include and IncludeAll. + // +kubebuilder:validation:Optional + Behavior *string `json:"behavior" tf:"behavior,omitempty"` + + // Comma separated list of parameter values. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type GlobalDeliveryRuleInitParameters struct { + + // A cache_expiration_action block as defined above. + CacheExpirationAction *GlobalDeliveryRuleCacheExpirationActionInitParameters `json:"cacheExpirationAction,omitempty" tf:"cache_expiration_action,omitempty"` + + // A cache_key_query_string_action block as defined above. + CacheKeyQueryStringAction *GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters `json:"cacheKeyQueryStringAction,omitempty" tf:"cache_key_query_string_action,omitempty"` + + // A modify_request_header_action block as defined below. + ModifyRequestHeaderAction []GlobalDeliveryRuleModifyRequestHeaderActionInitParameters `json:"modifyRequestHeaderAction,omitempty" tf:"modify_request_header_action,omitempty"` + + // A modify_response_header_action block as defined below. + ModifyResponseHeaderAction []GlobalDeliveryRuleModifyResponseHeaderActionInitParameters `json:"modifyResponseHeaderAction,omitempty" tf:"modify_response_header_action,omitempty"` + + // A url_redirect_action block as defined below. + URLRedirectAction *GlobalDeliveryRuleURLRedirectActionInitParameters `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. + URLRewriteAction *GlobalDeliveryRuleURLRewriteActionInitParameters `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type GlobalDeliveryRuleModifyRequestHeaderActionInitParameters struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GlobalDeliveryRuleModifyRequestHeaderActionObservation struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GlobalDeliveryRuleModifyRequestHeaderActionParameters struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GlobalDeliveryRuleModifyResponseHeaderActionInitParameters struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GlobalDeliveryRuleModifyResponseHeaderActionObservation struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GlobalDeliveryRuleModifyResponseHeaderActionParameters struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GlobalDeliveryRuleObservation struct { + + // A cache_expiration_action block as defined above. + CacheExpirationAction *GlobalDeliveryRuleCacheExpirationActionObservation `json:"cacheExpirationAction,omitempty" tf:"cache_expiration_action,omitempty"` + + // A cache_key_query_string_action block as defined above. + CacheKeyQueryStringAction *GlobalDeliveryRuleCacheKeyQueryStringActionObservation `json:"cacheKeyQueryStringAction,omitempty" tf:"cache_key_query_string_action,omitempty"` + + // A modify_request_header_action block as defined below. + ModifyRequestHeaderAction []GlobalDeliveryRuleModifyRequestHeaderActionObservation `json:"modifyRequestHeaderAction,omitempty" tf:"modify_request_header_action,omitempty"` + + // A modify_response_header_action block as defined below. + ModifyResponseHeaderAction []GlobalDeliveryRuleModifyResponseHeaderActionObservation `json:"modifyResponseHeaderAction,omitempty" tf:"modify_response_header_action,omitempty"` + + // A url_redirect_action block as defined below. + URLRedirectAction *GlobalDeliveryRuleURLRedirectActionObservation `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. + URLRewriteAction *GlobalDeliveryRuleURLRewriteActionObservation `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type GlobalDeliveryRuleParameters struct { + + // A cache_expiration_action block as defined above. + // +kubebuilder:validation:Optional + CacheExpirationAction *GlobalDeliveryRuleCacheExpirationActionParameters `json:"cacheExpirationAction,omitempty" tf:"cache_expiration_action,omitempty"` + + // A cache_key_query_string_action block as defined above. + // +kubebuilder:validation:Optional + CacheKeyQueryStringAction *GlobalDeliveryRuleCacheKeyQueryStringActionParameters `json:"cacheKeyQueryStringAction,omitempty" tf:"cache_key_query_string_action,omitempty"` + + // A modify_request_header_action block as defined below. + // +kubebuilder:validation:Optional + ModifyRequestHeaderAction []GlobalDeliveryRuleModifyRequestHeaderActionParameters `json:"modifyRequestHeaderAction,omitempty" tf:"modify_request_header_action,omitempty"` + + // A modify_response_header_action block as defined below. + // +kubebuilder:validation:Optional + ModifyResponseHeaderAction []GlobalDeliveryRuleModifyResponseHeaderActionParameters `json:"modifyResponseHeaderAction,omitempty" tf:"modify_response_header_action,omitempty"` + + // A url_redirect_action block as defined below. + // +kubebuilder:validation:Optional + URLRedirectAction *GlobalDeliveryRuleURLRedirectActionParameters `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. + // +kubebuilder:validation:Optional + URLRewriteAction *GlobalDeliveryRuleURLRewriteActionParameters `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type GlobalDeliveryRuleURLRedirectActionInitParameters struct { + + // Specifies the fragment part of the URL. This value must not start with a #. + Fragment *string `json:"fragment,omitempty" tf:"fragment,omitempty"` + + // Specifies the hostname part of the URL. + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Specifies the path part of the URL. This value must begin with a /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol part of the URL. Valid values are MatchRequest, Http and Https. Defaults to MatchRequest. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies the query string part of the URL. This value must not start with a ? or & and must be in = format separated by &. + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Type of the redirect. Valid values are Found, Moved, PermanentRedirect and TemporaryRedirect. + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` +} + +type GlobalDeliveryRuleURLRedirectActionObservation struct { + + // Specifies the fragment part of the URL. This value must not start with a #. + Fragment *string `json:"fragment,omitempty" tf:"fragment,omitempty"` + + // Specifies the hostname part of the URL. + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Specifies the path part of the URL. This value must begin with a /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol part of the URL. Valid values are MatchRequest, Http and Https. Defaults to MatchRequest. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies the query string part of the URL. This value must not start with a ? or & and must be in = format separated by &. + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Type of the redirect. Valid values are Found, Moved, PermanentRedirect and TemporaryRedirect. + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` +} + +type GlobalDeliveryRuleURLRedirectActionParameters struct { + + // Specifies the fragment part of the URL. This value must not start with a #. + // +kubebuilder:validation:Optional + Fragment *string `json:"fragment,omitempty" tf:"fragment,omitempty"` + + // Specifies the hostname part of the URL. + // +kubebuilder:validation:Optional + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Specifies the path part of the URL. This value must begin with a /. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol part of the URL. Valid values are MatchRequest, Http and Https. Defaults to MatchRequest. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies the query string part of the URL. This value must not start with a ? or & and must be in = format separated by &. + // +kubebuilder:validation:Optional + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Type of the redirect. Valid values are Found, Moved, PermanentRedirect and TemporaryRedirect. + // +kubebuilder:validation:Optional + RedirectType *string `json:"redirectType" tf:"redirect_type,omitempty"` +} + +type GlobalDeliveryRuleURLRewriteActionInitParameters struct { + + // This value must start with a / and can't be longer than 260 characters. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Whether preserve an unmatched path. Defaults to true. + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // This value must start with a / and can't be longer than 260 characters. + SourcePattern *string `json:"sourcePattern,omitempty" tf:"source_pattern,omitempty"` +} + +type GlobalDeliveryRuleURLRewriteActionObservation struct { + + // This value must start with a / and can't be longer than 260 characters. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Whether preserve an unmatched path. Defaults to true. + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // This value must start with a / and can't be longer than 260 characters. + SourcePattern *string `json:"sourcePattern,omitempty" tf:"source_pattern,omitempty"` +} + +type GlobalDeliveryRuleURLRewriteActionParameters struct { + + // This value must start with a / and can't be longer than 260 characters. + // +kubebuilder:validation:Optional + Destination *string `json:"destination" tf:"destination,omitempty"` + + // Whether preserve an unmatched path. Defaults to true. + // +kubebuilder:validation:Optional + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // This value must start with a / and can't be longer than 260 characters. + // +kubebuilder:validation:Optional + SourcePattern *string `json:"sourcePattern" tf:"source_pattern,omitempty"` +} + +type HTTPVersionConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type HTTPVersionConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type HTTPVersionConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ModifyRequestHeaderActionInitParameters struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ModifyRequestHeaderActionObservation struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ModifyRequestHeaderActionParameters struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ModifyResponseHeaderActionInitParameters struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ModifyResponseHeaderActionObservation struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ModifyResponseHeaderActionParameters struct { + + // Action to be executed on a header value. Valid values are Append, Delete and Overwrite. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the header. Only needed when action is set to Append or overwrite. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type OriginInitParameters struct { + + // The HTTP port of the origin. Defaults to 80. Changing this forces a new resource to be created. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The HTTPS port of the origin. Defaults to 443. Changing this forces a new resource to be created. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // A string that determines the hostname/IP address of the origin server. This string can be a domain name, Storage Account endpoint, Web App endpoint, IPv4 address or IPv6 address. Changing this forces a new resource to be created. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type OriginObservation struct { + + // The HTTP port of the origin. Defaults to 80. Changing this forces a new resource to be created. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The HTTPS port of the origin. Defaults to 443. Changing this forces a new resource to be created. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // A string that determines the hostname/IP address of the origin server. This string can be a domain name, Storage Account endpoint, Web App endpoint, IPv4 address or IPv6 address. Changing this forces a new resource to be created. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type OriginParameters struct { + + // The HTTP port of the origin. Defaults to 80. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The HTTPS port of the origin. Defaults to 443. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // A string that determines the hostname/IP address of the origin server. This string can be a domain name, Storage Account endpoint, Web App endpoint, IPv4 address or IPv6 address. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HostName *string `json:"hostName" tf:"host_name,omitempty"` + + // The name of the origin. This is an arbitrary value. However, this value needs to be unique under the endpoint. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type PostArgConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Header name. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type PostArgConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Header name. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type PostArgConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Header name. + // +kubebuilder:validation:Optional + Selector *string `json:"selector" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type QueryStringConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type QueryStringConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type QueryStringConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RemoteAddressConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type RemoteAddressConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type RemoteAddressConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` +} + +type RequestBodyConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestBodyConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestBodyConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestHeaderConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Header name. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestHeaderConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Header name. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestHeaderConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Header name. + // +kubebuilder:validation:Optional + Selector *string `json:"selector" tf:"selector,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestMethodConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type RequestMethodConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type RequestMethodConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type RequestSchemeConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type RequestSchemeConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type RequestSchemeConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type RequestURIConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestURIConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestURIConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLFileExtensionConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLFileExtensionConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLFileExtensionConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLFileNameConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLFileNameConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLFileNameConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLPathConditionInitParameters struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLPathConditionObservation struct { + + // List of string values. This is required if operator is not Any. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLPathConditionParameters struct { + + // List of string values. This is required if operator is not Any. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // Valid values are Any, BeginsWith, Contains, EndsWith, Equal, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, RegEx and Wildcard. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of transforms. Valid values are Lowercase and Uppercase. + // +kubebuilder:validation:Optional + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLRedirectActionInitParameters struct { + + // Specifies the fragment part of the URL. This value must not start with a #. + Fragment *string `json:"fragment,omitempty" tf:"fragment,omitempty"` + + // Specifies the hostname part of the URL. + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Specifies the path part of the URL. This value must begin with a /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol part of the URL. Valid values are MatchRequest, Http and Https. Defaults to MatchRequest. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies the query string part of the URL. This value must not start with a ? or & and must be in = format separated by &. + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Type of the redirect. Valid values are Found, Moved, PermanentRedirect and TemporaryRedirect. + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` +} + +type URLRedirectActionObservation struct { + + // Specifies the fragment part of the URL. This value must not start with a #. + Fragment *string `json:"fragment,omitempty" tf:"fragment,omitempty"` + + // Specifies the hostname part of the URL. + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Specifies the path part of the URL. This value must begin with a /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol part of the URL. Valid values are MatchRequest, Http and Https. Defaults to MatchRequest. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies the query string part of the URL. This value must not start with a ? or & and must be in = format separated by &. + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Type of the redirect. Valid values are Found, Moved, PermanentRedirect and TemporaryRedirect. + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` +} + +type URLRedirectActionParameters struct { + + // Specifies the fragment part of the URL. This value must not start with a #. + // +kubebuilder:validation:Optional + Fragment *string `json:"fragment,omitempty" tf:"fragment,omitempty"` + + // Specifies the hostname part of the URL. + // +kubebuilder:validation:Optional + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // Specifies the path part of the URL. This value must begin with a /. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol part of the URL. Valid values are MatchRequest, Http and Https. Defaults to MatchRequest. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies the query string part of the URL. This value must not start with a ? or & and must be in = format separated by &. + // +kubebuilder:validation:Optional + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Type of the redirect. Valid values are Found, Moved, PermanentRedirect and TemporaryRedirect. + // +kubebuilder:validation:Optional + RedirectType *string `json:"redirectType" tf:"redirect_type,omitempty"` +} + +type URLRewriteActionInitParameters struct { + + // This value must start with a / and can't be longer than 260 characters. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Whether preserve an unmatched path. Defaults to true. + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // This value must start with a / and can't be longer than 260 characters. + SourcePattern *string `json:"sourcePattern,omitempty" tf:"source_pattern,omitempty"` +} + +type URLRewriteActionObservation struct { + + // This value must start with a / and can't be longer than 260 characters. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Whether preserve an unmatched path. Defaults to true. + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // This value must start with a / and can't be longer than 260 characters. + SourcePattern *string `json:"sourcePattern,omitempty" tf:"source_pattern,omitempty"` +} + +type URLRewriteActionParameters struct { + + // This value must start with a / and can't be longer than 260 characters. + // +kubebuilder:validation:Optional + Destination *string `json:"destination" tf:"destination,omitempty"` + + // Whether preserve an unmatched path. Defaults to true. + // +kubebuilder:validation:Optional + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // This value must start with a / and can't be longer than 260 characters. + // +kubebuilder:validation:Optional + SourcePattern *string `json:"sourcePattern" tf:"source_pattern,omitempty"` +} + +// EndpointSpec defines the desired state of Endpoint +type EndpointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EndpointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EndpointInitParameters `json:"initProvider,omitempty"` +} + +// EndpointStatus defines the observed state of Endpoint. +type EndpointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EndpointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Endpoint is the Schema for the Endpoints API. Manages a CDN Endpoint. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Endpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.origin) || (has(self.initProvider) && has(self.initProvider.origin))",message="spec.forProvider.origin is a required parameter" + Spec EndpointSpec `json:"spec"` + Status EndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EndpointList contains a list of Endpoints +type EndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Endpoint `json:"items"` +} + +// Repository type metadata. +var ( + Endpoint_Kind = "Endpoint" + Endpoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Endpoint_Kind}.String() + Endpoint_KindAPIVersion = Endpoint_Kind + "." + CRDGroupVersion.String() + Endpoint_GroupVersionKind = CRDGroupVersion.WithKind(Endpoint_Kind) +) + +func init() { + SchemeBuilder.Register(&Endpoint{}, &EndpointList{}) +} diff --git a/apis/cdn/v1beta2/zz_frontdoorcustomdomain_terraformed.go b/apis/cdn/v1beta2/zz_frontdoorcustomdomain_terraformed.go new file mode 100755 index 000000000..48ee2aa77 --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoorcustomdomain_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontdoorCustomDomain +func (mg *FrontdoorCustomDomain) GetTerraformResourceType() string { + return "azurerm_cdn_frontdoor_custom_domain" +} + +// GetConnectionDetailsMapping for this FrontdoorCustomDomain +func (tr *FrontdoorCustomDomain) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontdoorCustomDomain +func (tr *FrontdoorCustomDomain) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontdoorCustomDomain +func (tr *FrontdoorCustomDomain) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontdoorCustomDomain +func (tr *FrontdoorCustomDomain) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontdoorCustomDomain +func (tr *FrontdoorCustomDomain) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontdoorCustomDomain +func (tr *FrontdoorCustomDomain) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontdoorCustomDomain +func (tr *FrontdoorCustomDomain) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontdoorCustomDomain +func (tr *FrontdoorCustomDomain) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontdoorCustomDomain using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontdoorCustomDomain) LateInitialize(attrs []byte) (bool, error) { + params := &FrontdoorCustomDomainParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontdoorCustomDomain) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cdn/v1beta2/zz_frontdoorcustomdomain_types.go b/apis/cdn/v1beta2/zz_frontdoorcustomdomain_types.go new file mode 100755 index 000000000..9b9d4ad2c --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoorcustomdomain_types.go @@ -0,0 +1,200 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FrontdoorCustomDomainInitParameters struct { + + // The ID of the Azure DNS Zone which should be used for this Front Door Custom Domain. If you are using Azure to host your DNS domains, you must delegate the domain provider's domain name system (DNS) to an Azure DNS Zone. For more information, see Delegate a domain to Azure DNS. Otherwise, if you're using your own domain provider to handle your DNS, you must validate the Front Door Custom Domain by creating the DNS TXT records manually. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // Reference to a DNSZone in network to populate dnsZoneId. + // +kubebuilder:validation:Optional + DNSZoneIDRef *v1.Reference `json:"dnsZoneIdRef,omitempty" tf:"-"` + + // Selector for a DNSZone in network to populate dnsZoneId. + // +kubebuilder:validation:Optional + DNSZoneIDSelector *v1.Selector `json:"dnsZoneIdSelector,omitempty" tf:"-"` + + // The host name of the domain. The host_name field must be the FQDN of your domain(e.g. contoso.fabrikam.com). Changing this forces a new Front Door Custom Domain to be created. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // A tls block as defined below. + TLS *TLSInitParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type FrontdoorCustomDomainObservation struct { + + // The ID of the Front Door Profile. Changing this forces a new Front Door Custom Domain to be created. + CdnFrontdoorProfileID *string `json:"cdnFrontdoorProfileId,omitempty" tf:"cdn_frontdoor_profile_id,omitempty"` + + // The ID of the Azure DNS Zone which should be used for this Front Door Custom Domain. If you are using Azure to host your DNS domains, you must delegate the domain provider's domain name system (DNS) to an Azure DNS Zone. For more information, see Delegate a domain to Azure DNS. Otherwise, if you're using your own domain provider to handle your DNS, you must validate the Front Door Custom Domain by creating the DNS TXT records manually. + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // The date time that the token expires. + ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` + + // The host name of the domain. The host_name field must be the FQDN of your domain(e.g. contoso.fabrikam.com). Changing this forces a new Front Door Custom Domain to be created. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Front Door Custom Domain. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A tls block as defined below. + TLS *TLSObservation `json:"tls,omitempty" tf:"tls,omitempty"` + + // Challenge used for DNS TXT record or file based validation. + ValidationToken *string `json:"validationToken,omitempty" tf:"validation_token,omitempty"` +} + +type FrontdoorCustomDomainParameters struct { + + // The ID of the Front Door Profile. Changing this forces a new Front Door Custom Domain to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorProfileID *string `json:"cdnFrontdoorProfileId,omitempty" tf:"cdn_frontdoor_profile_id,omitempty"` + + // Reference to a FrontdoorProfile in cdn to populate cdnFrontdoorProfileId. + // +kubebuilder:validation:Optional + CdnFrontdoorProfileIDRef *v1.Reference `json:"cdnFrontdoorProfileIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorProfile in cdn to populate cdnFrontdoorProfileId. + // +kubebuilder:validation:Optional + CdnFrontdoorProfileIDSelector *v1.Selector `json:"cdnFrontdoorProfileIdSelector,omitempty" tf:"-"` + + // The ID of the Azure DNS Zone which should be used for this Front Door Custom Domain. If you are using Azure to host your DNS domains, you must delegate the domain provider's domain name system (DNS) to an Azure DNS Zone. For more information, see Delegate a domain to Azure DNS. Otherwise, if you're using your own domain provider to handle your DNS, you must validate the Front Door Custom Domain by creating the DNS TXT records manually. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // Reference to a DNSZone in network to populate dnsZoneId. + // +kubebuilder:validation:Optional + DNSZoneIDRef *v1.Reference `json:"dnsZoneIdRef,omitempty" tf:"-"` + + // Selector for a DNSZone in network to populate dnsZoneId. + // +kubebuilder:validation:Optional + DNSZoneIDSelector *v1.Selector `json:"dnsZoneIdSelector,omitempty" tf:"-"` + + // The host name of the domain. The host_name field must be the FQDN of your domain(e.g. contoso.fabrikam.com). Changing this forces a new Front Door Custom Domain to be created. + // +kubebuilder:validation:Optional + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // A tls block as defined below. + // +kubebuilder:validation:Optional + TLS *TLSParameters `json:"tls,omitempty" tf:"tls,omitempty"` +} + +type TLSInitParameters struct { + + // Resource ID of the Front Door Secret. + CdnFrontdoorSecretID *string `json:"cdnFrontdoorSecretId,omitempty" tf:"cdn_frontdoor_secret_id,omitempty"` + + // Defines the source of the SSL certificate. Possible values include CustomerCertificate and ManagedCertificate. Defaults to ManagedCertificate. + CertificateType *string `json:"certificateType,omitempty" tf:"certificate_type,omitempty"` + + // TLS protocol version that will be used for Https. Possible values include TLS10 and TLS12. Defaults to TLS12. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` +} + +type TLSObservation struct { + + // Resource ID of the Front Door Secret. + CdnFrontdoorSecretID *string `json:"cdnFrontdoorSecretId,omitempty" tf:"cdn_frontdoor_secret_id,omitempty"` + + // Defines the source of the SSL certificate. Possible values include CustomerCertificate and ManagedCertificate. Defaults to ManagedCertificate. + CertificateType *string `json:"certificateType,omitempty" tf:"certificate_type,omitempty"` + + // TLS protocol version that will be used for Https. Possible values include TLS10 and TLS12. Defaults to TLS12. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` +} + +type TLSParameters struct { + + // Resource ID of the Front Door Secret. + // +kubebuilder:validation:Optional + CdnFrontdoorSecretID *string `json:"cdnFrontdoorSecretId,omitempty" tf:"cdn_frontdoor_secret_id,omitempty"` + + // Defines the source of the SSL certificate. Possible values include CustomerCertificate and ManagedCertificate. Defaults to ManagedCertificate. + // +kubebuilder:validation:Optional + CertificateType *string `json:"certificateType,omitempty" tf:"certificate_type,omitempty"` + + // TLS protocol version that will be used for Https. Possible values include TLS10 and TLS12. Defaults to TLS12. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` +} + +// FrontdoorCustomDomainSpec defines the desired state of FrontdoorCustomDomain +type FrontdoorCustomDomainSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontdoorCustomDomainParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontdoorCustomDomainInitParameters `json:"initProvider,omitempty"` +} + +// FrontdoorCustomDomainStatus defines the observed state of FrontdoorCustomDomain. +type FrontdoorCustomDomainStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontdoorCustomDomainObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontdoorCustomDomain is the Schema for the FrontdoorCustomDomains API. Manages a Front Door (standard/premium) Custom Domain. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontdoorCustomDomain struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.hostName) || (has(self.initProvider) && has(self.initProvider.hostName))",message="spec.forProvider.hostName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tls) || (has(self.initProvider) && has(self.initProvider.tls))",message="spec.forProvider.tls is a required parameter" + Spec FrontdoorCustomDomainSpec `json:"spec"` + Status FrontdoorCustomDomainStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontdoorCustomDomainList contains a list of FrontdoorCustomDomains +type FrontdoorCustomDomainList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontdoorCustomDomain `json:"items"` +} + +// Repository type metadata. +var ( + FrontdoorCustomDomain_Kind = "FrontdoorCustomDomain" + FrontdoorCustomDomain_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontdoorCustomDomain_Kind}.String() + FrontdoorCustomDomain_KindAPIVersion = FrontdoorCustomDomain_Kind + "." + CRDGroupVersion.String() + FrontdoorCustomDomain_GroupVersionKind = CRDGroupVersion.WithKind(FrontdoorCustomDomain_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontdoorCustomDomain{}, &FrontdoorCustomDomainList{}) +} diff --git a/apis/cdn/v1beta2/zz_frontdoororigin_terraformed.go b/apis/cdn/v1beta2/zz_frontdoororigin_terraformed.go new file mode 100755 index 000000000..c4c8ec9ce --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoororigin_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontdoorOrigin +func (mg *FrontdoorOrigin) GetTerraformResourceType() string { + return "azurerm_cdn_frontdoor_origin" +} + +// GetConnectionDetailsMapping for this FrontdoorOrigin +func (tr *FrontdoorOrigin) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontdoorOrigin +func (tr *FrontdoorOrigin) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontdoorOrigin +func (tr *FrontdoorOrigin) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontdoorOrigin +func (tr *FrontdoorOrigin) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontdoorOrigin +func (tr *FrontdoorOrigin) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontdoorOrigin +func (tr *FrontdoorOrigin) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontdoorOrigin +func (tr *FrontdoorOrigin) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontdoorOrigin +func (tr *FrontdoorOrigin) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontdoorOrigin using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontdoorOrigin) LateInitialize(attrs []byte) (bool, error) { + params := &FrontdoorOriginParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("HealthProbesEnabled")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontdoorOrigin) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cdn/v1beta2/zz_frontdoororigin_types.go b/apis/cdn/v1beta2/zz_frontdoororigin_types.go new file mode 100755 index 000000000..45fe09ecf --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoororigin_types.go @@ -0,0 +1,333 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FrontdoorOriginInitParameters struct { + + // Specifies whether certificate name checks are enabled for this origin. + CertificateNameCheckEnabled *bool `json:"certificateNameCheckEnabled,omitempty" tf:"certificate_name_check_enabled,omitempty"` + + // Should the origin be enabled? Possible values are true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The value of the HTTP port. Must be between 1 and 65535. Defaults to 80. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The value of the HTTPS port. Must be between 1 and 65535. Defaults to 443. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // Should the origin be enabled? Possible values are true or false. Defaults to true. + HealthProbesEnabled *bool `json:"healthProbesEnabled,omitempty" tf:"health_probes_enabled,omitempty"` + + // The IPv4 address, IPv6 address or Domain name of the Origin. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_host",true) + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // Reference to a Account in storage to populate hostName. + // +kubebuilder:validation:Optional + HostNameRef *v1.Reference `json:"hostNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate hostName. + // +kubebuilder:validation:Optional + HostNameSelector *v1.Selector `json:"hostNameSelector,omitempty" tf:"-"` + + // The host header value (an IPv4 address, IPv6 address or Domain name) which is sent to the origin with each request. If unspecified the hostname from the request will be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_host",true) + OriginHostHeader *string `json:"originHostHeader,omitempty" tf:"origin_host_header,omitempty"` + + // Reference to a Account in storage to populate originHostHeader. + // +kubebuilder:validation:Optional + OriginHostHeaderRef *v1.Reference `json:"originHostHeaderRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate originHostHeader. + // +kubebuilder:validation:Optional + OriginHostHeaderSelector *v1.Selector `json:"originHostHeaderSelector,omitempty" tf:"-"` + + // Priority of origin in given origin group for load balancing. Higher priorities will not be used for load balancing if any lower priority origin is healthy. Must be between 1 and 5 (inclusive). Defaults to 1. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // A private_link block as defined below. + PrivateLink *PrivateLinkInitParameters `json:"privateLink,omitempty" tf:"private_link,omitempty"` + + // The weight of the origin in a given origin group for load balancing. Must be between 1 and 1000. Defaults to 500. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type FrontdoorOriginObservation struct { + + // The ID of the Front Door Origin Group within which this Front Door Origin should exist. Changing this forces a new Front Door Origin to be created. + CdnFrontdoorOriginGroupID *string `json:"cdnFrontdoorOriginGroupId,omitempty" tf:"cdn_frontdoor_origin_group_id,omitempty"` + + // Specifies whether certificate name checks are enabled for this origin. + CertificateNameCheckEnabled *bool `json:"certificateNameCheckEnabled,omitempty" tf:"certificate_name_check_enabled,omitempty"` + + // Should the origin be enabled? Possible values are true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The value of the HTTP port. Must be between 1 and 65535. Defaults to 80. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The value of the HTTPS port. Must be between 1 and 65535. Defaults to 443. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // Should the origin be enabled? Possible values are true or false. Defaults to true. + HealthProbesEnabled *bool `json:"healthProbesEnabled,omitempty" tf:"health_probes_enabled,omitempty"` + + // The IPv4 address, IPv6 address or Domain name of the Origin. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Front Door Origin. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The host header value (an IPv4 address, IPv6 address or Domain name) which is sent to the origin with each request. If unspecified the hostname from the request will be used. + OriginHostHeader *string `json:"originHostHeader,omitempty" tf:"origin_host_header,omitempty"` + + // Priority of origin in given origin group for load balancing. Higher priorities will not be used for load balancing if any lower priority origin is healthy. Must be between 1 and 5 (inclusive). Defaults to 1. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // A private_link block as defined below. + PrivateLink *PrivateLinkObservation `json:"privateLink,omitempty" tf:"private_link,omitempty"` + + // The weight of the origin in a given origin group for load balancing. Must be between 1 and 1000. Defaults to 500. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type FrontdoorOriginParameters struct { + + // The ID of the Front Door Origin Group within which this Front Door Origin should exist. Changing this forces a new Front Door Origin to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorOriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupID *string `json:"cdnFrontdoorOriginGroupId,omitempty" tf:"cdn_frontdoor_origin_group_id,omitempty"` + + // Reference to a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDRef *v1.Reference `json:"cdnFrontdoorOriginGroupIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDSelector *v1.Selector `json:"cdnFrontdoorOriginGroupIdSelector,omitempty" tf:"-"` + + // Specifies whether certificate name checks are enabled for this origin. + // +kubebuilder:validation:Optional + CertificateNameCheckEnabled *bool `json:"certificateNameCheckEnabled,omitempty" tf:"certificate_name_check_enabled,omitempty"` + + // Should the origin be enabled? Possible values are true or false. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The value of the HTTP port. Must be between 1 and 65535. Defaults to 80. + // +kubebuilder:validation:Optional + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The value of the HTTPS port. Must be between 1 and 65535. Defaults to 443. + // +kubebuilder:validation:Optional + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // Should the origin be enabled? Possible values are true or false. Defaults to true. + // +kubebuilder:validation:Optional + HealthProbesEnabled *bool `json:"healthProbesEnabled,omitempty" tf:"health_probes_enabled,omitempty"` + + // The IPv4 address, IPv6 address or Domain name of the Origin. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_host",true) + // +kubebuilder:validation:Optional + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // Reference to a Account in storage to populate hostName. + // +kubebuilder:validation:Optional + HostNameRef *v1.Reference `json:"hostNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate hostName. + // +kubebuilder:validation:Optional + HostNameSelector *v1.Selector `json:"hostNameSelector,omitempty" tf:"-"` + + // The host header value (an IPv4 address, IPv6 address or Domain name) which is sent to the origin with each request. If unspecified the hostname from the request will be used. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_host",true) + // +kubebuilder:validation:Optional + OriginHostHeader *string `json:"originHostHeader,omitempty" tf:"origin_host_header,omitempty"` + + // Reference to a Account in storage to populate originHostHeader. + // +kubebuilder:validation:Optional + OriginHostHeaderRef *v1.Reference `json:"originHostHeaderRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate originHostHeader. + // +kubebuilder:validation:Optional + OriginHostHeaderSelector *v1.Selector `json:"originHostHeaderSelector,omitempty" tf:"-"` + + // Priority of origin in given origin group for load balancing. Higher priorities will not be used for load balancing if any lower priority origin is healthy. Must be between 1 and 5 (inclusive). Defaults to 1. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // A private_link block as defined below. + // +kubebuilder:validation:Optional + PrivateLink *PrivateLinkParameters `json:"privateLink,omitempty" tf:"private_link,omitempty"` + + // The weight of the origin in a given origin group for load balancing. Must be between 1 and 1000. Defaults to 500. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type PrivateLinkInitParameters struct { + + // Specifies the location where the Private Link resource should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("location",false) + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Reference to a Account in storage to populate location. + // +kubebuilder:validation:Optional + LocationRef *v1.Reference `json:"locationRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate location. + // +kubebuilder:validation:Optional + LocationSelector *v1.Selector `json:"locationSelector,omitempty" tf:"-"` + + // The ID of the Azure Resource to connect to via the Private Link. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PrivateLinkTargetID *string `json:"privateLinkTargetId,omitempty" tf:"private_link_target_id,omitempty"` + + // Reference to a Account in storage to populate privateLinkTargetId. + // +kubebuilder:validation:Optional + PrivateLinkTargetIDRef *v1.Reference `json:"privateLinkTargetIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate privateLinkTargetId. + // +kubebuilder:validation:Optional + PrivateLinkTargetIDSelector *v1.Selector `json:"privateLinkTargetIdSelector,omitempty" tf:"-"` + + // Specifies the request message that will be submitted to the private_link_target_id when requesting the private link endpoint connection. Values must be between 1 and 140 characters in length. Defaults to Access request for CDN FrontDoor Private Link Origin. + RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` + + // Specifies the type of target for this Private Link Endpoint. Possible values are blob, blob_secondary, web and sites. + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` +} + +type PrivateLinkObservation struct { + + // Specifies the location where the Private Link resource should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Azure Resource to connect to via the Private Link. + PrivateLinkTargetID *string `json:"privateLinkTargetId,omitempty" tf:"private_link_target_id,omitempty"` + + // Specifies the request message that will be submitted to the private_link_target_id when requesting the private link endpoint connection. Values must be between 1 and 140 characters in length. Defaults to Access request for CDN FrontDoor Private Link Origin. + RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` + + // Specifies the type of target for this Private Link Endpoint. Possible values are blob, blob_secondary, web and sites. + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` +} + +type PrivateLinkParameters struct { + + // Specifies the location where the Private Link resource should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("location",false) + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Reference to a Account in storage to populate location. + // +kubebuilder:validation:Optional + LocationRef *v1.Reference `json:"locationRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate location. + // +kubebuilder:validation:Optional + LocationSelector *v1.Selector `json:"locationSelector,omitempty" tf:"-"` + + // The ID of the Azure Resource to connect to via the Private Link. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PrivateLinkTargetID *string `json:"privateLinkTargetId,omitempty" tf:"private_link_target_id,omitempty"` + + // Reference to a Account in storage to populate privateLinkTargetId. + // +kubebuilder:validation:Optional + PrivateLinkTargetIDRef *v1.Reference `json:"privateLinkTargetIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate privateLinkTargetId. + // +kubebuilder:validation:Optional + PrivateLinkTargetIDSelector *v1.Selector `json:"privateLinkTargetIdSelector,omitempty" tf:"-"` + + // Specifies the request message that will be submitted to the private_link_target_id when requesting the private link endpoint connection. Values must be between 1 and 140 characters in length. Defaults to Access request for CDN FrontDoor Private Link Origin. + // +kubebuilder:validation:Optional + RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` + + // Specifies the type of target for this Private Link Endpoint. Possible values are blob, blob_secondary, web and sites. + // +kubebuilder:validation:Optional + TargetType *string `json:"targetType,omitempty" tf:"target_type,omitempty"` +} + +// FrontdoorOriginSpec defines the desired state of FrontdoorOrigin +type FrontdoorOriginSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontdoorOriginParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontdoorOriginInitParameters `json:"initProvider,omitempty"` +} + +// FrontdoorOriginStatus defines the observed state of FrontdoorOrigin. +type FrontdoorOriginStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontdoorOriginObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontdoorOrigin is the Schema for the FrontdoorOrigins API. Manages a Front Door (standard/premium) Origin. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontdoorOrigin struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.certificateNameCheckEnabled) || (has(self.initProvider) && has(self.initProvider.certificateNameCheckEnabled))",message="spec.forProvider.certificateNameCheckEnabled is a required parameter" + Spec FrontdoorOriginSpec `json:"spec"` + Status FrontdoorOriginStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontdoorOriginList contains a list of FrontdoorOrigins +type FrontdoorOriginList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontdoorOrigin `json:"items"` +} + +// Repository type metadata. +var ( + FrontdoorOrigin_Kind = "FrontdoorOrigin" + FrontdoorOrigin_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontdoorOrigin_Kind}.String() + FrontdoorOrigin_KindAPIVersion = FrontdoorOrigin_Kind + "." + CRDGroupVersion.String() + FrontdoorOrigin_GroupVersionKind = CRDGroupVersion.WithKind(FrontdoorOrigin_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontdoorOrigin{}, &FrontdoorOriginList{}) +} diff --git a/apis/cdn/v1beta2/zz_frontdoororigingroup_terraformed.go b/apis/cdn/v1beta2/zz_frontdoororigingroup_terraformed.go new file mode 100755 index 000000000..30570aeec --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoororigingroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontdoorOriginGroup +func (mg *FrontdoorOriginGroup) GetTerraformResourceType() string { + return "azurerm_cdn_frontdoor_origin_group" +} + +// GetConnectionDetailsMapping for this FrontdoorOriginGroup +func (tr *FrontdoorOriginGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontdoorOriginGroup +func (tr *FrontdoorOriginGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontdoorOriginGroup +func (tr *FrontdoorOriginGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontdoorOriginGroup +func (tr *FrontdoorOriginGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontdoorOriginGroup +func (tr *FrontdoorOriginGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontdoorOriginGroup +func (tr *FrontdoorOriginGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontdoorOriginGroup +func (tr *FrontdoorOriginGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontdoorOriginGroup +func (tr *FrontdoorOriginGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontdoorOriginGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontdoorOriginGroup) LateInitialize(attrs []byte) (bool, error) { + params := &FrontdoorOriginGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontdoorOriginGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cdn/v1beta2/zz_frontdoororigingroup_types.go b/apis/cdn/v1beta2/zz_frontdoororigingroup_types.go new file mode 100755 index 000000000..86c2dbfa4 --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoororigingroup_types.go @@ -0,0 +1,232 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FrontdoorOriginGroupInitParameters struct { + + // A health_probe block as defined below. + HealthProbe *HealthProbeInitParameters `json:"healthProbe,omitempty" tf:"health_probe,omitempty"` + + // A load_balancing block as defined below. + LoadBalancing *LoadBalancingInitParameters `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` + + // Specifies the amount of time which should elapse before shifting traffic to another endpoint when a healthy endpoint becomes unhealthy or a new endpoint is added. Possible values are between 0 and 50 minutes (inclusive). Default is 10 minutes. + RestoreTrafficTimeToHealedOrNewEndpointInMinutes *float64 `json:"restoreTrafficTimeToHealedOrNewEndpointInMinutes,omitempty" tf:"restore_traffic_time_to_healed_or_new_endpoint_in_minutes,omitempty"` + + // Specifies whether session affinity should be enabled on this host. Defaults to true. + SessionAffinityEnabled *bool `json:"sessionAffinityEnabled,omitempty" tf:"session_affinity_enabled,omitempty"` +} + +type FrontdoorOriginGroupObservation struct { + + // The ID of the Front Door Profile within which this Front Door Origin Group should exist. Changing this forces a new Front Door Origin Group to be created. + CdnFrontdoorProfileID *string `json:"cdnFrontdoorProfileId,omitempty" tf:"cdn_frontdoor_profile_id,omitempty"` + + // A health_probe block as defined below. + HealthProbe *HealthProbeObservation `json:"healthProbe,omitempty" tf:"health_probe,omitempty"` + + // The ID of the Front Door Origin Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A load_balancing block as defined below. + LoadBalancing *LoadBalancingObservation `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` + + // Specifies the amount of time which should elapse before shifting traffic to another endpoint when a healthy endpoint becomes unhealthy or a new endpoint is added. Possible values are between 0 and 50 minutes (inclusive). Default is 10 minutes. + RestoreTrafficTimeToHealedOrNewEndpointInMinutes *float64 `json:"restoreTrafficTimeToHealedOrNewEndpointInMinutes,omitempty" tf:"restore_traffic_time_to_healed_or_new_endpoint_in_minutes,omitempty"` + + // Specifies whether session affinity should be enabled on this host. Defaults to true. + SessionAffinityEnabled *bool `json:"sessionAffinityEnabled,omitempty" tf:"session_affinity_enabled,omitempty"` +} + +type FrontdoorOriginGroupParameters struct { + + // The ID of the Front Door Profile within which this Front Door Origin Group should exist. Changing this forces a new Front Door Origin Group to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorProfileID *string `json:"cdnFrontdoorProfileId,omitempty" tf:"cdn_frontdoor_profile_id,omitempty"` + + // Reference to a FrontdoorProfile in cdn to populate cdnFrontdoorProfileId. + // +kubebuilder:validation:Optional + CdnFrontdoorProfileIDRef *v1.Reference `json:"cdnFrontdoorProfileIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorProfile in cdn to populate cdnFrontdoorProfileId. + // +kubebuilder:validation:Optional + CdnFrontdoorProfileIDSelector *v1.Selector `json:"cdnFrontdoorProfileIdSelector,omitempty" tf:"-"` + + // A health_probe block as defined below. + // +kubebuilder:validation:Optional + HealthProbe *HealthProbeParameters `json:"healthProbe,omitempty" tf:"health_probe,omitempty"` + + // A load_balancing block as defined below. + // +kubebuilder:validation:Optional + LoadBalancing *LoadBalancingParameters `json:"loadBalancing,omitempty" tf:"load_balancing,omitempty"` + + // Specifies the amount of time which should elapse before shifting traffic to another endpoint when a healthy endpoint becomes unhealthy or a new endpoint is added. Possible values are between 0 and 50 minutes (inclusive). Default is 10 minutes. + // +kubebuilder:validation:Optional + RestoreTrafficTimeToHealedOrNewEndpointInMinutes *float64 `json:"restoreTrafficTimeToHealedOrNewEndpointInMinutes,omitempty" tf:"restore_traffic_time_to_healed_or_new_endpoint_in_minutes,omitempty"` + + // Specifies whether session affinity should be enabled on this host. Defaults to true. + // +kubebuilder:validation:Optional + SessionAffinityEnabled *bool `json:"sessionAffinityEnabled,omitempty" tf:"session_affinity_enabled,omitempty"` +} + +type HealthProbeInitParameters struct { + + // Specifies the number of seconds between health probes. Possible values are between 5 and 31536000 seconds (inclusive). + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the path relative to the origin that is used to determine the health of the origin. Defaults to /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol to use for health probe. Possible values are Http and Https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies the type of health probe request that is made. Possible values are GET and HEAD. Defaults to HEAD. + RequestType *string `json:"requestType,omitempty" tf:"request_type,omitempty"` +} + +type HealthProbeObservation struct { + + // Specifies the number of seconds between health probes. Possible values are between 5 and 31536000 seconds (inclusive). + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the path relative to the origin that is used to determine the health of the origin. Defaults to /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol to use for health probe. Possible values are Http and Https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies the type of health probe request that is made. Possible values are GET and HEAD. Defaults to HEAD. + RequestType *string `json:"requestType,omitempty" tf:"request_type,omitempty"` +} + +type HealthProbeParameters struct { + + // Specifies the number of seconds between health probes. Possible values are between 5 and 31536000 seconds (inclusive). + // +kubebuilder:validation:Optional + IntervalInSeconds *float64 `json:"intervalInSeconds" tf:"interval_in_seconds,omitempty"` + + // Specifies the path relative to the origin that is used to determine the health of the origin. Defaults to /. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies the protocol to use for health probe. Possible values are Http and Https. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Specifies the type of health probe request that is made. Possible values are GET and HEAD. Defaults to HEAD. + // +kubebuilder:validation:Optional + RequestType *string `json:"requestType,omitempty" tf:"request_type,omitempty"` +} + +type LoadBalancingInitParameters struct { + + // Specifies the additional latency in milliseconds for probes to fall into the lowest latency bucket. Possible values are between 0 and 1000 milliseconds (inclusive). Defaults to 50. + AdditionalLatencyInMilliseconds *float64 `json:"additionalLatencyInMilliseconds,omitempty" tf:"additional_latency_in_milliseconds,omitempty"` + + // Specifies the number of samples to consider for load balancing decisions. Possible values are between 0 and 255 (inclusive). Defaults to 4. + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` + + // Specifies the number of samples within the sample period that must succeed. Possible values are between 0 and 255 (inclusive). Defaults to 3. + SuccessfulSamplesRequired *float64 `json:"successfulSamplesRequired,omitempty" tf:"successful_samples_required,omitempty"` +} + +type LoadBalancingObservation struct { + + // Specifies the additional latency in milliseconds for probes to fall into the lowest latency bucket. Possible values are between 0 and 1000 milliseconds (inclusive). Defaults to 50. + AdditionalLatencyInMilliseconds *float64 `json:"additionalLatencyInMilliseconds,omitempty" tf:"additional_latency_in_milliseconds,omitempty"` + + // Specifies the number of samples to consider for load balancing decisions. Possible values are between 0 and 255 (inclusive). Defaults to 4. + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` + + // Specifies the number of samples within the sample period that must succeed. Possible values are between 0 and 255 (inclusive). Defaults to 3. + SuccessfulSamplesRequired *float64 `json:"successfulSamplesRequired,omitempty" tf:"successful_samples_required,omitempty"` +} + +type LoadBalancingParameters struct { + + // Specifies the additional latency in milliseconds for probes to fall into the lowest latency bucket. Possible values are between 0 and 1000 milliseconds (inclusive). Defaults to 50. + // +kubebuilder:validation:Optional + AdditionalLatencyInMilliseconds *float64 `json:"additionalLatencyInMilliseconds,omitempty" tf:"additional_latency_in_milliseconds,omitempty"` + + // Specifies the number of samples to consider for load balancing decisions. Possible values are between 0 and 255 (inclusive). Defaults to 4. + // +kubebuilder:validation:Optional + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` + + // Specifies the number of samples within the sample period that must succeed. Possible values are between 0 and 255 (inclusive). Defaults to 3. + // +kubebuilder:validation:Optional + SuccessfulSamplesRequired *float64 `json:"successfulSamplesRequired,omitempty" tf:"successful_samples_required,omitempty"` +} + +// FrontdoorOriginGroupSpec defines the desired state of FrontdoorOriginGroup +type FrontdoorOriginGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontdoorOriginGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontdoorOriginGroupInitParameters `json:"initProvider,omitempty"` +} + +// FrontdoorOriginGroupStatus defines the observed state of FrontdoorOriginGroup. +type FrontdoorOriginGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontdoorOriginGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontdoorOriginGroup is the Schema for the FrontdoorOriginGroups API. Manages a Front Door (standard/premium) Origin Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontdoorOriginGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.loadBalancing) || (has(self.initProvider) && has(self.initProvider.loadBalancing))",message="spec.forProvider.loadBalancing is a required parameter" + Spec FrontdoorOriginGroupSpec `json:"spec"` + Status FrontdoorOriginGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontdoorOriginGroupList contains a list of FrontdoorOriginGroups +type FrontdoorOriginGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontdoorOriginGroup `json:"items"` +} + +// Repository type metadata. +var ( + FrontdoorOriginGroup_Kind = "FrontdoorOriginGroup" + FrontdoorOriginGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontdoorOriginGroup_Kind}.String() + FrontdoorOriginGroup_KindAPIVersion = FrontdoorOriginGroup_Kind + "." + CRDGroupVersion.String() + FrontdoorOriginGroup_GroupVersionKind = CRDGroupVersion.WithKind(FrontdoorOriginGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontdoorOriginGroup{}, &FrontdoorOriginGroupList{}) +} diff --git a/apis/cdn/v1beta2/zz_frontdoorroute_terraformed.go b/apis/cdn/v1beta2/zz_frontdoorroute_terraformed.go new file mode 100755 index 000000000..afaa32f7f --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoorroute_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontdoorRoute +func (mg *FrontdoorRoute) GetTerraformResourceType() string { + return "azurerm_cdn_frontdoor_route" +} + +// GetConnectionDetailsMapping for this FrontdoorRoute +func (tr *FrontdoorRoute) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontdoorRoute +func (tr *FrontdoorRoute) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontdoorRoute +func (tr *FrontdoorRoute) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontdoorRoute +func (tr *FrontdoorRoute) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontdoorRoute +func (tr *FrontdoorRoute) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontdoorRoute +func (tr *FrontdoorRoute) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontdoorRoute +func (tr *FrontdoorRoute) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontdoorRoute +func (tr *FrontdoorRoute) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontdoorRoute using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontdoorRoute) LateInitialize(attrs []byte) (bool, error) { + params := &FrontdoorRouteParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontdoorRoute) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cdn/v1beta2/zz_frontdoorroute_types.go b/apis/cdn/v1beta2/zz_frontdoorroute_types.go new file mode 100755 index 000000000..a3e3a1c8e --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoorroute_types.go @@ -0,0 +1,363 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CacheInitParameters struct { + + // Is content compression enabled? Possible values are true or false. Defaults to false. + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // A list of one or more Content types (formerly known as MIME types) to compress. Possible values include application/eot, application/font, application/font-sfnt, application/javascript, application/json, application/opentype, application/otf, application/pkcs7-mime, application/truetype, application/ttf, application/vnd.ms-fontobject, application/xhtml+xml, application/xml, application/xml+rss, application/x-font-opentype, application/x-font-truetype, application/x-font-ttf, application/x-httpd-cgi, application/x-mpegurl, application/x-opentype, application/x-otf, application/x-perl, application/x-ttf, application/x-javascript, font/eot, font/ttf, font/otf, font/opentype, image/svg+xml, text/css, text/csv, text/html, text/javascript, text/js, text/plain, text/richtext, text/tab-separated-values, text/xml, text/x-script, text/x-component or text/x-java-source. + ContentTypesToCompress []*string `json:"contentTypesToCompress,omitempty" tf:"content_types_to_compress,omitempty"` + + // Defines how the Front Door Route will cache requests that include query strings. Possible values include IgnoreQueryString, IgnoreSpecifiedQueryStrings, IncludeSpecifiedQueryStrings or UseQueryString. Defaults to IgnoreQueryString. + QueryStringCachingBehavior *string `json:"queryStringCachingBehavior,omitempty" tf:"query_string_caching_behavior,omitempty"` + + // Query strings to include or ignore. + QueryStrings []*string `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type CacheObservation struct { + + // Is content compression enabled? Possible values are true or false. Defaults to false. + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // A list of one or more Content types (formerly known as MIME types) to compress. Possible values include application/eot, application/font, application/font-sfnt, application/javascript, application/json, application/opentype, application/otf, application/pkcs7-mime, application/truetype, application/ttf, application/vnd.ms-fontobject, application/xhtml+xml, application/xml, application/xml+rss, application/x-font-opentype, application/x-font-truetype, application/x-font-ttf, application/x-httpd-cgi, application/x-mpegurl, application/x-opentype, application/x-otf, application/x-perl, application/x-ttf, application/x-javascript, font/eot, font/ttf, font/otf, font/opentype, image/svg+xml, text/css, text/csv, text/html, text/javascript, text/js, text/plain, text/richtext, text/tab-separated-values, text/xml, text/x-script, text/x-component or text/x-java-source. + ContentTypesToCompress []*string `json:"contentTypesToCompress,omitempty" tf:"content_types_to_compress,omitempty"` + + // Defines how the Front Door Route will cache requests that include query strings. Possible values include IgnoreQueryString, IgnoreSpecifiedQueryStrings, IncludeSpecifiedQueryStrings or UseQueryString. Defaults to IgnoreQueryString. + QueryStringCachingBehavior *string `json:"queryStringCachingBehavior,omitempty" tf:"query_string_caching_behavior,omitempty"` + + // Query strings to include or ignore. + QueryStrings []*string `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type CacheParameters struct { + + // Is content compression enabled? Possible values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // A list of one or more Content types (formerly known as MIME types) to compress. Possible values include application/eot, application/font, application/font-sfnt, application/javascript, application/json, application/opentype, application/otf, application/pkcs7-mime, application/truetype, application/ttf, application/vnd.ms-fontobject, application/xhtml+xml, application/xml, application/xml+rss, application/x-font-opentype, application/x-font-truetype, application/x-font-ttf, application/x-httpd-cgi, application/x-mpegurl, application/x-opentype, application/x-otf, application/x-perl, application/x-ttf, application/x-javascript, font/eot, font/ttf, font/otf, font/opentype, image/svg+xml, text/css, text/csv, text/html, text/javascript, text/js, text/plain, text/richtext, text/tab-separated-values, text/xml, text/x-script, text/x-component or text/x-java-source. + // +kubebuilder:validation:Optional + ContentTypesToCompress []*string `json:"contentTypesToCompress,omitempty" tf:"content_types_to_compress,omitempty"` + + // Defines how the Front Door Route will cache requests that include query strings. Possible values include IgnoreQueryString, IgnoreSpecifiedQueryStrings, IncludeSpecifiedQueryStrings or UseQueryString. Defaults to IgnoreQueryString. + // +kubebuilder:validation:Optional + QueryStringCachingBehavior *string `json:"queryStringCachingBehavior,omitempty" tf:"query_string_caching_behavior,omitempty"` + + // Query strings to include or ignore. + // +kubebuilder:validation:Optional + QueryStrings []*string `json:"queryStrings,omitempty" tf:"query_strings,omitempty"` +} + +type FrontdoorRouteInitParameters struct { + + // A cache block as defined below. + Cache *CacheInitParameters `json:"cache,omitempty" tf:"cache,omitempty"` + + // The IDs of the Front Door Custom Domains which are associated with this Front Door Route. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorCustomDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +listType=set + CdnFrontdoorCustomDomainIds []*string `json:"cdnFrontdoorCustomDomainIds,omitempty" tf:"cdn_frontdoor_custom_domain_ids,omitempty"` + + // References to FrontdoorCustomDomain in cdn to populate cdnFrontdoorCustomDomainIds. + // +kubebuilder:validation:Optional + CdnFrontdoorCustomDomainIdsRefs []v1.Reference `json:"cdnFrontdoorCustomDomainIdsRefs,omitempty" tf:"-"` + + // Selector for a list of FrontdoorCustomDomain in cdn to populate cdnFrontdoorCustomDomainIds. + // +kubebuilder:validation:Optional + CdnFrontdoorCustomDomainIdsSelector *v1.Selector `json:"cdnFrontdoorCustomDomainIdsSelector,omitempty" tf:"-"` + + // The resource ID of the Front Door Origin Group where this Front Door Route should be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorOriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + CdnFrontdoorOriginGroupID *string `json:"cdnFrontdoorOriginGroupId,omitempty" tf:"cdn_frontdoor_origin_group_id,omitempty"` + + // Reference to a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDRef *v1.Reference `json:"cdnFrontdoorOriginGroupIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDSelector *v1.Selector `json:"cdnFrontdoorOriginGroupIdSelector,omitempty" tf:"-"` + + // One or more Front Door Origin resource IDs that this Front Door Route will link to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorOrigin + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + CdnFrontdoorOriginIds []*string `json:"cdnFrontdoorOriginIds,omitempty" tf:"cdn_frontdoor_origin_ids,omitempty"` + + // References to FrontdoorOrigin in cdn to populate cdnFrontdoorOriginIds. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginIdsRefs []v1.Reference `json:"cdnFrontdoorOriginIdsRefs,omitempty" tf:"-"` + + // Selector for a list of FrontdoorOrigin in cdn to populate cdnFrontdoorOriginIds. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginIdsSelector *v1.Selector `json:"cdnFrontdoorOriginIdsSelector,omitempty" tf:"-"` + + // A directory path on the Front Door Origin that can be used to retrieve content (e.g. contoso.cloudapp.net/originpath). + CdnFrontdoorOriginPath *string `json:"cdnFrontdoorOriginPath,omitempty" tf:"cdn_frontdoor_origin_path,omitempty"` + + // A list of the Front Door Rule Set IDs which should be assigned to this Front Door Route. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorRuleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +listType=set + CdnFrontdoorRuleSetIds []*string `json:"cdnFrontdoorRuleSetIds,omitempty" tf:"cdn_frontdoor_rule_set_ids,omitempty"` + + // References to FrontdoorRuleSet in cdn to populate cdnFrontdoorRuleSetIds. + // +kubebuilder:validation:Optional + CdnFrontdoorRuleSetIdsRefs []v1.Reference `json:"cdnFrontdoorRuleSetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of FrontdoorRuleSet in cdn to populate cdnFrontdoorRuleSetIds. + // +kubebuilder:validation:Optional + CdnFrontdoorRuleSetIdsSelector *v1.Selector `json:"cdnFrontdoorRuleSetIdsSelector,omitempty" tf:"-"` + + // Is this Front Door Route enabled? Possible values are true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Protocol that will be use when forwarding traffic to backends. Possible values are HttpOnly, HttpsOnly or MatchRequest. Defaults to MatchRequest. + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` + + // Automatically redirect HTTP traffic to HTTPS traffic? Possible values are true or false. Defaults to true. + HTTPSRedirectEnabled *bool `json:"httpsRedirectEnabled,omitempty" tf:"https_redirect_enabled,omitempty"` + + // Should this Front Door Route be linked to the default endpoint? Possible values include true or false. Defaults to true. + LinkToDefaultDomain *bool `json:"linkToDefaultDomain,omitempty" tf:"link_to_default_domain,omitempty"` + + // The route patterns of the rule. + PatternsToMatch []*string `json:"patternsToMatch,omitempty" tf:"patterns_to_match,omitempty"` + + // One or more Protocols supported by this Front Door Route. Possible values are Http or Https. + // +listType=set + SupportedProtocols []*string `json:"supportedProtocols,omitempty" tf:"supported_protocols,omitempty"` +} + +type FrontdoorRouteObservation struct { + + // A cache block as defined below. + Cache *CacheObservation `json:"cache,omitempty" tf:"cache,omitempty"` + + // The IDs of the Front Door Custom Domains which are associated with this Front Door Route. + // +listType=set + CdnFrontdoorCustomDomainIds []*string `json:"cdnFrontdoorCustomDomainIds,omitempty" tf:"cdn_frontdoor_custom_domain_ids,omitempty"` + + // The resource ID of the Front Door Endpoint where this Front Door Route should exist. Changing this forces a new Front Door Route to be created. + CdnFrontdoorEndpointID *string `json:"cdnFrontdoorEndpointId,omitempty" tf:"cdn_frontdoor_endpoint_id,omitempty"` + + // The resource ID of the Front Door Origin Group where this Front Door Route should be created. + CdnFrontdoorOriginGroupID *string `json:"cdnFrontdoorOriginGroupId,omitempty" tf:"cdn_frontdoor_origin_group_id,omitempty"` + + // One or more Front Door Origin resource IDs that this Front Door Route will link to. + CdnFrontdoorOriginIds []*string `json:"cdnFrontdoorOriginIds,omitempty" tf:"cdn_frontdoor_origin_ids,omitempty"` + + // A directory path on the Front Door Origin that can be used to retrieve content (e.g. contoso.cloudapp.net/originpath). + CdnFrontdoorOriginPath *string `json:"cdnFrontdoorOriginPath,omitempty" tf:"cdn_frontdoor_origin_path,omitempty"` + + // A list of the Front Door Rule Set IDs which should be assigned to this Front Door Route. + // +listType=set + CdnFrontdoorRuleSetIds []*string `json:"cdnFrontdoorRuleSetIds,omitempty" tf:"cdn_frontdoor_rule_set_ids,omitempty"` + + // Is this Front Door Route enabled? Possible values are true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Protocol that will be use when forwarding traffic to backends. Possible values are HttpOnly, HttpsOnly or MatchRequest. Defaults to MatchRequest. + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` + + // Automatically redirect HTTP traffic to HTTPS traffic? Possible values are true or false. Defaults to true. + HTTPSRedirectEnabled *bool `json:"httpsRedirectEnabled,omitempty" tf:"https_redirect_enabled,omitempty"` + + // The ID of the Front Door Route. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Should this Front Door Route be linked to the default endpoint? Possible values include true or false. Defaults to true. + LinkToDefaultDomain *bool `json:"linkToDefaultDomain,omitempty" tf:"link_to_default_domain,omitempty"` + + // The route patterns of the rule. + PatternsToMatch []*string `json:"patternsToMatch,omitempty" tf:"patterns_to_match,omitempty"` + + // One or more Protocols supported by this Front Door Route. Possible values are Http or Https. + // +listType=set + SupportedProtocols []*string `json:"supportedProtocols,omitempty" tf:"supported_protocols,omitempty"` +} + +type FrontdoorRouteParameters struct { + + // A cache block as defined below. + // +kubebuilder:validation:Optional + Cache *CacheParameters `json:"cache,omitempty" tf:"cache,omitempty"` + + // The IDs of the Front Door Custom Domains which are associated with this Front Door Route. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorCustomDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + CdnFrontdoorCustomDomainIds []*string `json:"cdnFrontdoorCustomDomainIds,omitempty" tf:"cdn_frontdoor_custom_domain_ids,omitempty"` + + // References to FrontdoorCustomDomain in cdn to populate cdnFrontdoorCustomDomainIds. + // +kubebuilder:validation:Optional + CdnFrontdoorCustomDomainIdsRefs []v1.Reference `json:"cdnFrontdoorCustomDomainIdsRefs,omitempty" tf:"-"` + + // Selector for a list of FrontdoorCustomDomain in cdn to populate cdnFrontdoorCustomDomainIds. + // +kubebuilder:validation:Optional + CdnFrontdoorCustomDomainIdsSelector *v1.Selector `json:"cdnFrontdoorCustomDomainIdsSelector,omitempty" tf:"-"` + + // The resource ID of the Front Door Endpoint where this Front Door Route should exist. Changing this forces a new Front Door Route to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorEndpoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorEndpointID *string `json:"cdnFrontdoorEndpointId,omitempty" tf:"cdn_frontdoor_endpoint_id,omitempty"` + + // Reference to a FrontdoorEndpoint in cdn to populate cdnFrontdoorEndpointId. + // +kubebuilder:validation:Optional + CdnFrontdoorEndpointIDRef *v1.Reference `json:"cdnFrontdoorEndpointIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorEndpoint in cdn to populate cdnFrontdoorEndpointId. + // +kubebuilder:validation:Optional + CdnFrontdoorEndpointIDSelector *v1.Selector `json:"cdnFrontdoorEndpointIdSelector,omitempty" tf:"-"` + + // The resource ID of the Front Door Origin Group where this Front Door Route should be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorOriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupID *string `json:"cdnFrontdoorOriginGroupId,omitempty" tf:"cdn_frontdoor_origin_group_id,omitempty"` + + // Reference to a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDRef *v1.Reference `json:"cdnFrontdoorOriginGroupIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDSelector *v1.Selector `json:"cdnFrontdoorOriginGroupIdSelector,omitempty" tf:"-"` + + // One or more Front Door Origin resource IDs that this Front Door Route will link to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorOrigin + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorOriginIds []*string `json:"cdnFrontdoorOriginIds,omitempty" tf:"cdn_frontdoor_origin_ids,omitempty"` + + // References to FrontdoorOrigin in cdn to populate cdnFrontdoorOriginIds. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginIdsRefs []v1.Reference `json:"cdnFrontdoorOriginIdsRefs,omitempty" tf:"-"` + + // Selector for a list of FrontdoorOrigin in cdn to populate cdnFrontdoorOriginIds. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginIdsSelector *v1.Selector `json:"cdnFrontdoorOriginIdsSelector,omitempty" tf:"-"` + + // A directory path on the Front Door Origin that can be used to retrieve content (e.g. contoso.cloudapp.net/originpath). + // +kubebuilder:validation:Optional + CdnFrontdoorOriginPath *string `json:"cdnFrontdoorOriginPath,omitempty" tf:"cdn_frontdoor_origin_path,omitempty"` + + // A list of the Front Door Rule Set IDs which should be assigned to this Front Door Route. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorRuleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + CdnFrontdoorRuleSetIds []*string `json:"cdnFrontdoorRuleSetIds,omitempty" tf:"cdn_frontdoor_rule_set_ids,omitempty"` + + // References to FrontdoorRuleSet in cdn to populate cdnFrontdoorRuleSetIds. + // +kubebuilder:validation:Optional + CdnFrontdoorRuleSetIdsRefs []v1.Reference `json:"cdnFrontdoorRuleSetIdsRefs,omitempty" tf:"-"` + + // Selector for a list of FrontdoorRuleSet in cdn to populate cdnFrontdoorRuleSetIds. + // +kubebuilder:validation:Optional + CdnFrontdoorRuleSetIdsSelector *v1.Selector `json:"cdnFrontdoorRuleSetIdsSelector,omitempty" tf:"-"` + + // Is this Front Door Route enabled? Possible values are true or false. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Protocol that will be use when forwarding traffic to backends. Possible values are HttpOnly, HttpsOnly or MatchRequest. Defaults to MatchRequest. + // +kubebuilder:validation:Optional + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` + + // Automatically redirect HTTP traffic to HTTPS traffic? Possible values are true or false. Defaults to true. + // +kubebuilder:validation:Optional + HTTPSRedirectEnabled *bool `json:"httpsRedirectEnabled,omitempty" tf:"https_redirect_enabled,omitempty"` + + // Should this Front Door Route be linked to the default endpoint? Possible values include true or false. Defaults to true. + // +kubebuilder:validation:Optional + LinkToDefaultDomain *bool `json:"linkToDefaultDomain,omitempty" tf:"link_to_default_domain,omitempty"` + + // The route patterns of the rule. + // +kubebuilder:validation:Optional + PatternsToMatch []*string `json:"patternsToMatch,omitempty" tf:"patterns_to_match,omitempty"` + + // One or more Protocols supported by this Front Door Route. Possible values are Http or Https. + // +kubebuilder:validation:Optional + // +listType=set + SupportedProtocols []*string `json:"supportedProtocols,omitempty" tf:"supported_protocols,omitempty"` +} + +// FrontdoorRouteSpec defines the desired state of FrontdoorRoute +type FrontdoorRouteSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontdoorRouteParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontdoorRouteInitParameters `json:"initProvider,omitempty"` +} + +// FrontdoorRouteStatus defines the observed state of FrontdoorRoute. +type FrontdoorRouteStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontdoorRouteObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontdoorRoute is the Schema for the FrontdoorRoutes API. Manages a Front Door (standard/premium) Route. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontdoorRoute struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.patternsToMatch) || (has(self.initProvider) && has(self.initProvider.patternsToMatch))",message="spec.forProvider.patternsToMatch is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.supportedProtocols) || (has(self.initProvider) && has(self.initProvider.supportedProtocols))",message="spec.forProvider.supportedProtocols is a required parameter" + Spec FrontdoorRouteSpec `json:"spec"` + Status FrontdoorRouteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontdoorRouteList contains a list of FrontdoorRoutes +type FrontdoorRouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontdoorRoute `json:"items"` +} + +// Repository type metadata. +var ( + FrontdoorRoute_Kind = "FrontdoorRoute" + FrontdoorRoute_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontdoorRoute_Kind}.String() + FrontdoorRoute_KindAPIVersion = FrontdoorRoute_Kind + "." + CRDGroupVersion.String() + FrontdoorRoute_GroupVersionKind = CRDGroupVersion.WithKind(FrontdoorRoute_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontdoorRoute{}, &FrontdoorRouteList{}) +} diff --git a/apis/cdn/v1beta2/zz_frontdoorrule_terraformed.go b/apis/cdn/v1beta2/zz_frontdoorrule_terraformed.go new file mode 100755 index 000000000..ef7c79b41 --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoorrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontdoorRule +func (mg *FrontdoorRule) GetTerraformResourceType() string { + return "azurerm_cdn_frontdoor_rule" +} + +// GetConnectionDetailsMapping for this FrontdoorRule +func (tr *FrontdoorRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontdoorRule +func (tr *FrontdoorRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontdoorRule +func (tr *FrontdoorRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontdoorRule +func (tr *FrontdoorRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontdoorRule +func (tr *FrontdoorRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontdoorRule +func (tr *FrontdoorRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontdoorRule +func (tr *FrontdoorRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontdoorRule +func (tr *FrontdoorRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontdoorRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontdoorRule) LateInitialize(attrs []byte) (bool, error) { + params := &FrontdoorRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontdoorRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cdn/v1beta2/zz_frontdoorrule_types.go b/apis/cdn/v1beta2/zz_frontdoorrule_types.go new file mode 100755 index 000000000..548ac21c4 --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoorrule_types.go @@ -0,0 +1,1604 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionsInitParameters struct { + + // A request_header_action block as defined below. + RequestHeaderAction []RequestHeaderActionInitParameters `json:"requestHeaderAction,omitempty" tf:"request_header_action,omitempty"` + + // A response_header_action block as defined below. + ResponseHeaderAction []ResponseHeaderActionInitParameters `json:"responseHeaderAction,omitempty" tf:"response_header_action,omitempty"` + + // A route_configuration_override_action block as defined below. + RouteConfigurationOverrideAction *RouteConfigurationOverrideActionInitParameters `json:"routeConfigurationOverrideAction,omitempty" tf:"route_configuration_override_action,omitempty"` + + // A url_redirect_action block as defined below. You may not have a url_redirect_action and a url_rewrite_action defined in the same actions block. + URLRedirectAction *ActionsURLRedirectActionInitParameters `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. You may not have a url_rewrite_action and a url_redirect_action defined in the same actions block. + URLRewriteAction *ActionsURLRewriteActionInitParameters `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type ActionsObservation struct { + + // A request_header_action block as defined below. + RequestHeaderAction []RequestHeaderActionObservation `json:"requestHeaderAction,omitempty" tf:"request_header_action,omitempty"` + + // A response_header_action block as defined below. + ResponseHeaderAction []ResponseHeaderActionObservation `json:"responseHeaderAction,omitempty" tf:"response_header_action,omitempty"` + + // A route_configuration_override_action block as defined below. + RouteConfigurationOverrideAction *RouteConfigurationOverrideActionObservation `json:"routeConfigurationOverrideAction,omitempty" tf:"route_configuration_override_action,omitempty"` + + // A url_redirect_action block as defined below. You may not have a url_redirect_action and a url_rewrite_action defined in the same actions block. + URLRedirectAction *ActionsURLRedirectActionObservation `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. You may not have a url_rewrite_action and a url_redirect_action defined in the same actions block. + URLRewriteAction *ActionsURLRewriteActionObservation `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type ActionsParameters struct { + + // A request_header_action block as defined below. + // +kubebuilder:validation:Optional + RequestHeaderAction []RequestHeaderActionParameters `json:"requestHeaderAction,omitempty" tf:"request_header_action,omitempty"` + + // A response_header_action block as defined below. + // +kubebuilder:validation:Optional + ResponseHeaderAction []ResponseHeaderActionParameters `json:"responseHeaderAction,omitempty" tf:"response_header_action,omitempty"` + + // A route_configuration_override_action block as defined below. + // +kubebuilder:validation:Optional + RouteConfigurationOverrideAction *RouteConfigurationOverrideActionParameters `json:"routeConfigurationOverrideAction,omitempty" tf:"route_configuration_override_action,omitempty"` + + // A url_redirect_action block as defined below. You may not have a url_redirect_action and a url_rewrite_action defined in the same actions block. + // +kubebuilder:validation:Optional + URLRedirectAction *ActionsURLRedirectActionParameters `json:"urlRedirectAction,omitempty" tf:"url_redirect_action,omitempty"` + + // A url_rewrite_action block as defined below. You may not have a url_rewrite_action and a url_redirect_action defined in the same actions block. + // +kubebuilder:validation:Optional + URLRewriteAction *ActionsURLRewriteActionParameters `json:"urlRewriteAction,omitempty" tf:"url_rewrite_action,omitempty"` +} + +type ActionsURLRedirectActionInitParameters struct { + + // The fragment to use in the redirect. The value must be a string between 0 and 1024 characters in length, leave blank to preserve the incoming fragment. Defaults to "". + DestinationFragment *string `json:"destinationFragment,omitempty" tf:"destination_fragment,omitempty"` + + // The host name you want the request to be redirected to. The value must be a string between 0 and 2048 characters in length, leave blank to preserve the incoming host. + DestinationHostName *string `json:"destinationHostname,omitempty" tf:"destination_hostname,omitempty"` + + // The path to use in the redirect. The value must be a string and include the leading /, leave blank to preserve the incoming path. Defaults to "". + DestinationPath *string `json:"destinationPath,omitempty" tf:"destination_path,omitempty"` + + // The query string used in the redirect URL. The value must be in the = or ={action_server_variable} format and must not include the leading ?, leave blank to preserve the incoming query string. Maximum allowed length for this field is 2048 characters. Defaults to "". + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // The protocol the request will be redirected as. Possible values include MatchRequest, Http or Https. Defaults to MatchRequest. + RedirectProtocol *string `json:"redirectProtocol,omitempty" tf:"redirect_protocol,omitempty"` + + // The response type to return to the requestor. Possible values include Moved, Found , TemporaryRedirect or PermanentRedirect. + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` +} + +type ActionsURLRedirectActionObservation struct { + + // The fragment to use in the redirect. The value must be a string between 0 and 1024 characters in length, leave blank to preserve the incoming fragment. Defaults to "". + DestinationFragment *string `json:"destinationFragment,omitempty" tf:"destination_fragment,omitempty"` + + // The host name you want the request to be redirected to. The value must be a string between 0 and 2048 characters in length, leave blank to preserve the incoming host. + DestinationHostName *string `json:"destinationHostname,omitempty" tf:"destination_hostname,omitempty"` + + // The path to use in the redirect. The value must be a string and include the leading /, leave blank to preserve the incoming path. Defaults to "". + DestinationPath *string `json:"destinationPath,omitempty" tf:"destination_path,omitempty"` + + // The query string used in the redirect URL. The value must be in the = or ={action_server_variable} format and must not include the leading ?, leave blank to preserve the incoming query string. Maximum allowed length for this field is 2048 characters. Defaults to "". + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // The protocol the request will be redirected as. Possible values include MatchRequest, Http or Https. Defaults to MatchRequest. + RedirectProtocol *string `json:"redirectProtocol,omitempty" tf:"redirect_protocol,omitempty"` + + // The response type to return to the requestor. Possible values include Moved, Found , TemporaryRedirect or PermanentRedirect. + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` +} + +type ActionsURLRedirectActionParameters struct { + + // The fragment to use in the redirect. The value must be a string between 0 and 1024 characters in length, leave blank to preserve the incoming fragment. Defaults to "". + // +kubebuilder:validation:Optional + DestinationFragment *string `json:"destinationFragment,omitempty" tf:"destination_fragment,omitempty"` + + // The host name you want the request to be redirected to. The value must be a string between 0 and 2048 characters in length, leave blank to preserve the incoming host. + // +kubebuilder:validation:Optional + DestinationHostName *string `json:"destinationHostname" tf:"destination_hostname,omitempty"` + + // The path to use in the redirect. The value must be a string and include the leading /, leave blank to preserve the incoming path. Defaults to "". + // +kubebuilder:validation:Optional + DestinationPath *string `json:"destinationPath,omitempty" tf:"destination_path,omitempty"` + + // The query string used in the redirect URL. The value must be in the = or ={action_server_variable} format and must not include the leading ?, leave blank to preserve the incoming query string. Maximum allowed length for this field is 2048 characters. Defaults to "". + // +kubebuilder:validation:Optional + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // The protocol the request will be redirected as. Possible values include MatchRequest, Http or Https. Defaults to MatchRequest. + // +kubebuilder:validation:Optional + RedirectProtocol *string `json:"redirectProtocol,omitempty" tf:"redirect_protocol,omitempty"` + + // The response type to return to the requestor. Possible values include Moved, Found , TemporaryRedirect or PermanentRedirect. + // +kubebuilder:validation:Optional + RedirectType *string `json:"redirectType" tf:"redirect_type,omitempty"` +} + +type ActionsURLRewriteActionInitParameters struct { + + // The destination path to use in the rewrite. The destination path overwrites the source pattern. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Append the remaining path after the source pattern to the new destination path? Possible values true or false. Defaults to false. + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // The source pattern in the URL path to replace. This uses prefix-based matching. For example, to match all URL paths use a forward slash "/" as the source pattern value. + SourcePattern *string `json:"sourcePattern,omitempty" tf:"source_pattern,omitempty"` +} + +type ActionsURLRewriteActionObservation struct { + + // The destination path to use in the rewrite. The destination path overwrites the source pattern. + Destination *string `json:"destination,omitempty" tf:"destination,omitempty"` + + // Append the remaining path after the source pattern to the new destination path? Possible values true or false. Defaults to false. + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // The source pattern in the URL path to replace. This uses prefix-based matching. For example, to match all URL paths use a forward slash "/" as the source pattern value. + SourcePattern *string `json:"sourcePattern,omitempty" tf:"source_pattern,omitempty"` +} + +type ActionsURLRewriteActionParameters struct { + + // The destination path to use in the rewrite. The destination path overwrites the source pattern. + // +kubebuilder:validation:Optional + Destination *string `json:"destination" tf:"destination,omitempty"` + + // Append the remaining path after the source pattern to the new destination path? Possible values true or false. Defaults to false. + // +kubebuilder:validation:Optional + PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty" tf:"preserve_unmatched_path,omitempty"` + + // The source pattern in the URL path to replace. This uses prefix-based matching. For example, to match all URL paths use a forward slash "/" as the source pattern value. + // +kubebuilder:validation:Optional + SourcePattern *string `json:"sourcePattern" tf:"source_pattern,omitempty"` +} + +type ClientPortConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ClientPortConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ClientPortConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` +} + +type ConditionsCookiesConditionInitParameters struct { + + // A string value representing the name of the cookie. + CookieName *string `json:"cookieName,omitempty" tf:"cookie_name,omitempty"` + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsCookiesConditionObservation struct { + + // A string value representing the name of the cookie. + CookieName *string `json:"cookieName,omitempty" tf:"cookie_name,omitempty"` + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsCookiesConditionParameters struct { + + // A string value representing the name of the cookie. + // +kubebuilder:validation:Optional + CookieName *string `json:"cookieName" tf:"cookie_name,omitempty"` + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsHTTPVersionConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsHTTPVersionConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsHTTPVersionConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsInitParameters struct { + + // A client_port_condition block as defined below. + ClientPortCondition []ClientPortConditionInitParameters `json:"clientPortCondition,omitempty" tf:"client_port_condition,omitempty"` + + // A cookies_condition block as defined below. + CookiesCondition []ConditionsCookiesConditionInitParameters `json:"cookiesCondition,omitempty" tf:"cookies_condition,omitempty"` + + // A http_version_condition block as defined below. + HTTPVersionCondition []ConditionsHTTPVersionConditionInitParameters `json:"httpVersionCondition,omitempty" tf:"http_version_condition,omitempty"` + + // A host_name_condition block as defined below. + HostNameCondition []HostNameConditionInitParameters `json:"hostNameCondition,omitempty" tf:"host_name_condition,omitempty"` + + // A is_device_condition block as defined below. + IsDeviceCondition []IsDeviceConditionInitParameters `json:"isDeviceCondition,omitempty" tf:"is_device_condition,omitempty"` + + // A post_args_condition block as defined below. + PostArgsCondition []PostArgsConditionInitParameters `json:"postArgsCondition,omitempty" tf:"post_args_condition,omitempty"` + + // A query_string_condition block as defined below. + QueryStringCondition []ConditionsQueryStringConditionInitParameters `json:"queryStringCondition,omitempty" tf:"query_string_condition,omitempty"` + + // A remote_address_condition block as defined below. + RemoteAddressCondition []ConditionsRemoteAddressConditionInitParameters `json:"remoteAddressCondition,omitempty" tf:"remote_address_condition,omitempty"` + + // A request_body_condition block as defined below. + RequestBodyCondition []ConditionsRequestBodyConditionInitParameters `json:"requestBodyCondition,omitempty" tf:"request_body_condition,omitempty"` + + // A request_header_condition block as defined below. + RequestHeaderCondition []ConditionsRequestHeaderConditionInitParameters `json:"requestHeaderCondition,omitempty" tf:"request_header_condition,omitempty"` + + // A request_method_condition block as defined below. + RequestMethodCondition []ConditionsRequestMethodConditionInitParameters `json:"requestMethodCondition,omitempty" tf:"request_method_condition,omitempty"` + + // A request_scheme_condition block as defined below. + RequestSchemeCondition []ConditionsRequestSchemeConditionInitParameters `json:"requestSchemeCondition,omitempty" tf:"request_scheme_condition,omitempty"` + + // A request_uri_condition block as defined below. + RequestURICondition []ConditionsRequestURIConditionInitParameters `json:"requestUriCondition,omitempty" tf:"request_uri_condition,omitempty"` + + // A ssl_protocol_condition block as defined below. + SSLProtocolCondition []SSLProtocolConditionInitParameters `json:"sslProtocolCondition,omitempty" tf:"ssl_protocol_condition,omitempty"` + + // A server_port_condition block as defined below. + ServerPortCondition []ServerPortConditionInitParameters `json:"serverPortCondition,omitempty" tf:"server_port_condition,omitempty"` + + // A socket_address_condition block as defined below. + SocketAddressCondition []SocketAddressConditionInitParameters `json:"socketAddressCondition,omitempty" tf:"socket_address_condition,omitempty"` + + // A url_file_extension_condition block as defined below. + URLFileExtensionCondition []ConditionsURLFileExtensionConditionInitParameters `json:"urlFileExtensionCondition,omitempty" tf:"url_file_extension_condition,omitempty"` + + // A url_filename_condition block as defined below. + URLFilenameCondition []URLFilenameConditionInitParameters `json:"urlFilenameCondition,omitempty" tf:"url_filename_condition,omitempty"` + + // A url_path_condition block as defined below. + URLPathCondition []ConditionsURLPathConditionInitParameters `json:"urlPathCondition,omitempty" tf:"url_path_condition,omitempty"` +} + +type ConditionsObservation struct { + + // A client_port_condition block as defined below. + ClientPortCondition []ClientPortConditionObservation `json:"clientPortCondition,omitempty" tf:"client_port_condition,omitempty"` + + // A cookies_condition block as defined below. + CookiesCondition []ConditionsCookiesConditionObservation `json:"cookiesCondition,omitempty" tf:"cookies_condition,omitempty"` + + // A http_version_condition block as defined below. + HTTPVersionCondition []ConditionsHTTPVersionConditionObservation `json:"httpVersionCondition,omitempty" tf:"http_version_condition,omitempty"` + + // A host_name_condition block as defined below. + HostNameCondition []HostNameConditionObservation `json:"hostNameCondition,omitempty" tf:"host_name_condition,omitempty"` + + // A is_device_condition block as defined below. + IsDeviceCondition []IsDeviceConditionObservation `json:"isDeviceCondition,omitempty" tf:"is_device_condition,omitempty"` + + // A post_args_condition block as defined below. + PostArgsCondition []PostArgsConditionObservation `json:"postArgsCondition,omitempty" tf:"post_args_condition,omitempty"` + + // A query_string_condition block as defined below. + QueryStringCondition []ConditionsQueryStringConditionObservation `json:"queryStringCondition,omitempty" tf:"query_string_condition,omitempty"` + + // A remote_address_condition block as defined below. + RemoteAddressCondition []ConditionsRemoteAddressConditionObservation `json:"remoteAddressCondition,omitempty" tf:"remote_address_condition,omitempty"` + + // A request_body_condition block as defined below. + RequestBodyCondition []ConditionsRequestBodyConditionObservation `json:"requestBodyCondition,omitempty" tf:"request_body_condition,omitempty"` + + // A request_header_condition block as defined below. + RequestHeaderCondition []ConditionsRequestHeaderConditionObservation `json:"requestHeaderCondition,omitempty" tf:"request_header_condition,omitempty"` + + // A request_method_condition block as defined below. + RequestMethodCondition []ConditionsRequestMethodConditionObservation `json:"requestMethodCondition,omitempty" tf:"request_method_condition,omitempty"` + + // A request_scheme_condition block as defined below. + RequestSchemeCondition []ConditionsRequestSchemeConditionObservation `json:"requestSchemeCondition,omitempty" tf:"request_scheme_condition,omitempty"` + + // A request_uri_condition block as defined below. + RequestURICondition []ConditionsRequestURIConditionObservation `json:"requestUriCondition,omitempty" tf:"request_uri_condition,omitempty"` + + // A ssl_protocol_condition block as defined below. + SSLProtocolCondition []SSLProtocolConditionObservation `json:"sslProtocolCondition,omitempty" tf:"ssl_protocol_condition,omitempty"` + + // A server_port_condition block as defined below. + ServerPortCondition []ServerPortConditionObservation `json:"serverPortCondition,omitempty" tf:"server_port_condition,omitempty"` + + // A socket_address_condition block as defined below. + SocketAddressCondition []SocketAddressConditionObservation `json:"socketAddressCondition,omitempty" tf:"socket_address_condition,omitempty"` + + // A url_file_extension_condition block as defined below. + URLFileExtensionCondition []ConditionsURLFileExtensionConditionObservation `json:"urlFileExtensionCondition,omitempty" tf:"url_file_extension_condition,omitempty"` + + // A url_filename_condition block as defined below. + URLFilenameCondition []URLFilenameConditionObservation `json:"urlFilenameCondition,omitempty" tf:"url_filename_condition,omitempty"` + + // A url_path_condition block as defined below. + URLPathCondition []ConditionsURLPathConditionObservation `json:"urlPathCondition,omitempty" tf:"url_path_condition,omitempty"` +} + +type ConditionsParameters struct { + + // A client_port_condition block as defined below. + // +kubebuilder:validation:Optional + ClientPortCondition []ClientPortConditionParameters `json:"clientPortCondition,omitempty" tf:"client_port_condition,omitempty"` + + // A cookies_condition block as defined below. + // +kubebuilder:validation:Optional + CookiesCondition []ConditionsCookiesConditionParameters `json:"cookiesCondition,omitempty" tf:"cookies_condition,omitempty"` + + // A http_version_condition block as defined below. + // +kubebuilder:validation:Optional + HTTPVersionCondition []ConditionsHTTPVersionConditionParameters `json:"httpVersionCondition,omitempty" tf:"http_version_condition,omitempty"` + + // A host_name_condition block as defined below. + // +kubebuilder:validation:Optional + HostNameCondition []HostNameConditionParameters `json:"hostNameCondition,omitempty" tf:"host_name_condition,omitempty"` + + // A is_device_condition block as defined below. + // +kubebuilder:validation:Optional + IsDeviceCondition []IsDeviceConditionParameters `json:"isDeviceCondition,omitempty" tf:"is_device_condition,omitempty"` + + // A post_args_condition block as defined below. + // +kubebuilder:validation:Optional + PostArgsCondition []PostArgsConditionParameters `json:"postArgsCondition,omitempty" tf:"post_args_condition,omitempty"` + + // A query_string_condition block as defined below. + // +kubebuilder:validation:Optional + QueryStringCondition []ConditionsQueryStringConditionParameters `json:"queryStringCondition,omitempty" tf:"query_string_condition,omitempty"` + + // A remote_address_condition block as defined below. + // +kubebuilder:validation:Optional + RemoteAddressCondition []ConditionsRemoteAddressConditionParameters `json:"remoteAddressCondition,omitempty" tf:"remote_address_condition,omitempty"` + + // A request_body_condition block as defined below. + // +kubebuilder:validation:Optional + RequestBodyCondition []ConditionsRequestBodyConditionParameters `json:"requestBodyCondition,omitempty" tf:"request_body_condition,omitempty"` + + // A request_header_condition block as defined below. + // +kubebuilder:validation:Optional + RequestHeaderCondition []ConditionsRequestHeaderConditionParameters `json:"requestHeaderCondition,omitempty" tf:"request_header_condition,omitempty"` + + // A request_method_condition block as defined below. + // +kubebuilder:validation:Optional + RequestMethodCondition []ConditionsRequestMethodConditionParameters `json:"requestMethodCondition,omitempty" tf:"request_method_condition,omitempty"` + + // A request_scheme_condition block as defined below. + // +kubebuilder:validation:Optional + RequestSchemeCondition []ConditionsRequestSchemeConditionParameters `json:"requestSchemeCondition,omitempty" tf:"request_scheme_condition,omitempty"` + + // A request_uri_condition block as defined below. + // +kubebuilder:validation:Optional + RequestURICondition []ConditionsRequestURIConditionParameters `json:"requestUriCondition,omitempty" tf:"request_uri_condition,omitempty"` + + // A ssl_protocol_condition block as defined below. + // +kubebuilder:validation:Optional + SSLProtocolCondition []SSLProtocolConditionParameters `json:"sslProtocolCondition,omitempty" tf:"ssl_protocol_condition,omitempty"` + + // A server_port_condition block as defined below. + // +kubebuilder:validation:Optional + ServerPortCondition []ServerPortConditionParameters `json:"serverPortCondition,omitempty" tf:"server_port_condition,omitempty"` + + // A socket_address_condition block as defined below. + // +kubebuilder:validation:Optional + SocketAddressCondition []SocketAddressConditionParameters `json:"socketAddressCondition,omitempty" tf:"socket_address_condition,omitempty"` + + // A url_file_extension_condition block as defined below. + // +kubebuilder:validation:Optional + URLFileExtensionCondition []ConditionsURLFileExtensionConditionParameters `json:"urlFileExtensionCondition,omitempty" tf:"url_file_extension_condition,omitempty"` + + // A url_filename_condition block as defined below. + // +kubebuilder:validation:Optional + URLFilenameCondition []URLFilenameConditionParameters `json:"urlFilenameCondition,omitempty" tf:"url_filename_condition,omitempty"` + + // A url_path_condition block as defined below. + // +kubebuilder:validation:Optional + URLPathCondition []ConditionsURLPathConditionParameters `json:"urlPathCondition,omitempty" tf:"url_path_condition,omitempty"` +} + +type ConditionsQueryStringConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsQueryStringConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsQueryStringConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRemoteAddressConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRemoteAddressConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRemoteAddressConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRequestBodyConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRequestBodyConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRequestBodyConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRequestHeaderConditionInitParameters struct { + + // The name of the header to modify. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRequestHeaderConditionObservation struct { + + // The name of the header to modify. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRequestHeaderConditionParameters struct { + + // The name of the header to modify. + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName" tf:"header_name,omitempty"` + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRequestMethodConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRequestMethodConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRequestMethodConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRequestSchemeConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRequestSchemeConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRequestSchemeConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ConditionsRequestURIConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRequestURIConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsRequestURIConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsURLFileExtensionConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsURLFileExtensionConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsURLFileExtensionConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsURLPathConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsURLPathConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type ConditionsURLPathConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type FrontdoorRuleInitParameters struct { + + // An actions block as defined below. + Actions *ActionsInitParameters `json:"actions,omitempty" tf:"actions,omitempty"` + + // If this rule is a match should the rules engine continue processing the remaining rules or stop? Possible values are Continue and Stop. Defaults to Continue. + BehaviorOnMatch *string `json:"behaviorOnMatch,omitempty" tf:"behavior_on_match,omitempty"` + + // A conditions block as defined below. + Conditions *ConditionsInitParameters `json:"conditions,omitempty" tf:"conditions,omitempty"` + + // The order in which the rules will be applied for the Front Door Endpoint. The order value should be sequential and begin at 1(e.g. 1, 2, 3...). A Front Door Rule with a lesser order value will be applied before a rule with a greater order value. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` +} + +type FrontdoorRuleObservation struct { + + // An actions block as defined below. + Actions *ActionsObservation `json:"actions,omitempty" tf:"actions,omitempty"` + + // If this rule is a match should the rules engine continue processing the remaining rules or stop? Possible values are Continue and Stop. Defaults to Continue. + BehaviorOnMatch *string `json:"behaviorOnMatch,omitempty" tf:"behavior_on_match,omitempty"` + + // The resource ID of the Front Door Rule Set for this Front Door Rule. Changing this forces a new Front Door Rule to be created. + CdnFrontdoorRuleSetID *string `json:"cdnFrontdoorRuleSetId,omitempty" tf:"cdn_frontdoor_rule_set_id,omitempty"` + + // The name of the Front Door Rule Set containing this Front Door Rule. + CdnFrontdoorRuleSetName *string `json:"cdnFrontdoorRuleSetName,omitempty" tf:"cdn_frontdoor_rule_set_name,omitempty"` + + // A conditions block as defined below. + Conditions *ConditionsObservation `json:"conditions,omitempty" tf:"conditions,omitempty"` + + // The ID of the Front Door Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The order in which the rules will be applied for the Front Door Endpoint. The order value should be sequential and begin at 1(e.g. 1, 2, 3...). A Front Door Rule with a lesser order value will be applied before a rule with a greater order value. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` +} + +type FrontdoorRuleParameters struct { + + // An actions block as defined below. + // +kubebuilder:validation:Optional + Actions *ActionsParameters `json:"actions,omitempty" tf:"actions,omitempty"` + + // If this rule is a match should the rules engine continue processing the remaining rules or stop? Possible values are Continue and Stop. Defaults to Continue. + // +kubebuilder:validation:Optional + BehaviorOnMatch *string `json:"behaviorOnMatch,omitempty" tf:"behavior_on_match,omitempty"` + + // The resource ID of the Front Door Rule Set for this Front Door Rule. Changing this forces a new Front Door Rule to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorRuleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorRuleSetID *string `json:"cdnFrontdoorRuleSetId,omitempty" tf:"cdn_frontdoor_rule_set_id,omitempty"` + + // Reference to a FrontdoorRuleSet in cdn to populate cdnFrontdoorRuleSetId. + // +kubebuilder:validation:Optional + CdnFrontdoorRuleSetIDRef *v1.Reference `json:"cdnFrontdoorRuleSetIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorRuleSet in cdn to populate cdnFrontdoorRuleSetId. + // +kubebuilder:validation:Optional + CdnFrontdoorRuleSetIDSelector *v1.Selector `json:"cdnFrontdoorRuleSetIdSelector,omitempty" tf:"-"` + + // A conditions block as defined below. + // +kubebuilder:validation:Optional + Conditions *ConditionsParameters `json:"conditions,omitempty" tf:"conditions,omitempty"` + + // The order in which the rules will be applied for the Front Door Endpoint. The order value should be sequential and begin at 1(e.g. 1, 2, 3...). A Front Door Rule with a lesser order value will be applied before a rule with a greater order value. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` +} + +type HostNameConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type HostNameConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type HostNameConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type IsDeviceConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type IsDeviceConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type IsDeviceConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type PostArgsConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A string value representing the name of the POST argument. + PostArgsName *string `json:"postArgsName,omitempty" tf:"post_args_name,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type PostArgsConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A string value representing the name of the POST argument. + PostArgsName *string `json:"postArgsName,omitempty" tf:"post_args_name,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type PostArgsConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A string value representing the name of the POST argument. + // +kubebuilder:validation:Optional + PostArgsName *string `json:"postArgsName" tf:"post_args_name,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type RequestHeaderActionInitParameters struct { + + // The action to be taken on the specified header_name. Possible values include Append, Overwrite or Delete. + HeaderAction *string `json:"headerAction,omitempty" tf:"header_action,omitempty"` + + // The name of the header to modify. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // The value to append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestHeaderActionObservation struct { + + // The action to be taken on the specified header_name. Possible values include Append, Overwrite or Delete. + HeaderAction *string `json:"headerAction,omitempty" tf:"header_action,omitempty"` + + // The name of the header to modify. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // The value to append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestHeaderActionParameters struct { + + // The action to be taken on the specified header_name. Possible values include Append, Overwrite or Delete. + // +kubebuilder:validation:Optional + HeaderAction *string `json:"headerAction" tf:"header_action,omitempty"` + + // The name of the header to modify. + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName" tf:"header_name,omitempty"` + + // The value to append or overwrite. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseHeaderActionInitParameters struct { + + // The action to be taken on the specified header_name. Possible values include Append, Overwrite or Delete. + HeaderAction *string `json:"headerAction,omitempty" tf:"header_action,omitempty"` + + // The name of the header to modify. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // The value to append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseHeaderActionObservation struct { + + // The action to be taken on the specified header_name. Possible values include Append, Overwrite or Delete. + HeaderAction *string `json:"headerAction,omitempty" tf:"header_action,omitempty"` + + // The name of the header to modify. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // The value to append or overwrite. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseHeaderActionParameters struct { + + // The action to be taken on the specified header_name. Possible values include Append, Overwrite or Delete. + // +kubebuilder:validation:Optional + HeaderAction *string `json:"headerAction" tf:"header_action,omitempty"` + + // The name of the header to modify. + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName" tf:"header_name,omitempty"` + + // The value to append or overwrite. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RouteConfigurationOverrideActionInitParameters struct { + + // HonorOrigin the Front Door will always honor origin response header directive. If the origin directive is missing, Front Door will cache contents anywhere from 1 to 3 days. OverrideAlways the TTL value returned from your Front Door Origin is overwritten with the value specified in the action. This behavior will only be applied if the response is cacheable. OverrideIfOriginMissing if no TTL value gets returned from your Front Door Origin, the rule sets the TTL to the value specified in the action. This behavior will only be applied if the response is cacheable. Disabled the Front Door will not cache the response contents, irrespective of Front Door Origin response directives. Possible values include HonorOrigin, OverrideAlways, OverrideIfOriginMissing or Disabled. + CacheBehavior *string `json:"cacheBehavior,omitempty" tf:"cache_behavior,omitempty"` + + // When Cache behavior is set to Override or SetIfMissing, this field specifies the cache duration to use. The maximum duration is 366 days specified in the d.HH:MM:SS format(e.g. 365.23:59:59). If the desired maximum cache duration is less than 1 day then the maximum cache duration should be specified in the HH:MM:SS format(e.g. 23:59:59). + CacheDuration *string `json:"cacheDuration,omitempty" tf:"cache_duration,omitempty"` + + // The Front Door Origin Group resource ID that the request should be routed to. This overrides the configuration specified in the Front Door Endpoint route. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorOriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + CdnFrontdoorOriginGroupID *string `json:"cdnFrontdoorOriginGroupId,omitempty" tf:"cdn_frontdoor_origin_group_id,omitempty"` + + // Reference to a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDRef *v1.Reference `json:"cdnFrontdoorOriginGroupIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDSelector *v1.Selector `json:"cdnFrontdoorOriginGroupIdSelector,omitempty" tf:"-"` + + // Should the Front Door dynamically compress the content? Possible values include true or false. + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // The forwarding protocol the request will be redirected as. This overrides the configuration specified in the route to be associated with. Possible values include MatchRequest, HttpOnly or HttpsOnly. + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` + + // IncludeSpecifiedQueryStrings query strings specified in the query_string_parameters field get included when the cache key gets generated. UseQueryString cache every unique URL, each unique URL will have its own cache key. IgnoreSpecifiedQueryStrings query strings specified in the query_string_parameters field get excluded when the cache key gets generated. IgnoreQueryString query strings aren't considered when the cache key gets generated. Possible values include IgnoreQueryString, UseQueryString, IgnoreSpecifiedQueryStrings or IncludeSpecifiedQueryStrings. + QueryStringCachingBehavior *string `json:"queryStringCachingBehavior,omitempty" tf:"query_string_caching_behavior,omitempty"` + + // A list of query string parameter names. + QueryStringParameters []*string `json:"queryStringParameters,omitempty" tf:"query_string_parameters,omitempty"` +} + +type RouteConfigurationOverrideActionObservation struct { + + // HonorOrigin the Front Door will always honor origin response header directive. If the origin directive is missing, Front Door will cache contents anywhere from 1 to 3 days. OverrideAlways the TTL value returned from your Front Door Origin is overwritten with the value specified in the action. This behavior will only be applied if the response is cacheable. OverrideIfOriginMissing if no TTL value gets returned from your Front Door Origin, the rule sets the TTL to the value specified in the action. This behavior will only be applied if the response is cacheable. Disabled the Front Door will not cache the response contents, irrespective of Front Door Origin response directives. Possible values include HonorOrigin, OverrideAlways, OverrideIfOriginMissing or Disabled. + CacheBehavior *string `json:"cacheBehavior,omitempty" tf:"cache_behavior,omitempty"` + + // When Cache behavior is set to Override or SetIfMissing, this field specifies the cache duration to use. The maximum duration is 366 days specified in the d.HH:MM:SS format(e.g. 365.23:59:59). If the desired maximum cache duration is less than 1 day then the maximum cache duration should be specified in the HH:MM:SS format(e.g. 23:59:59). + CacheDuration *string `json:"cacheDuration,omitempty" tf:"cache_duration,omitempty"` + + // The Front Door Origin Group resource ID that the request should be routed to. This overrides the configuration specified in the Front Door Endpoint route. + CdnFrontdoorOriginGroupID *string `json:"cdnFrontdoorOriginGroupId,omitempty" tf:"cdn_frontdoor_origin_group_id,omitempty"` + + // Should the Front Door dynamically compress the content? Possible values include true or false. + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // The forwarding protocol the request will be redirected as. This overrides the configuration specified in the route to be associated with. Possible values include MatchRequest, HttpOnly or HttpsOnly. + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` + + // IncludeSpecifiedQueryStrings query strings specified in the query_string_parameters field get included when the cache key gets generated. UseQueryString cache every unique URL, each unique URL will have its own cache key. IgnoreSpecifiedQueryStrings query strings specified in the query_string_parameters field get excluded when the cache key gets generated. IgnoreQueryString query strings aren't considered when the cache key gets generated. Possible values include IgnoreQueryString, UseQueryString, IgnoreSpecifiedQueryStrings or IncludeSpecifiedQueryStrings. + QueryStringCachingBehavior *string `json:"queryStringCachingBehavior,omitempty" tf:"query_string_caching_behavior,omitempty"` + + // A list of query string parameter names. + QueryStringParameters []*string `json:"queryStringParameters,omitempty" tf:"query_string_parameters,omitempty"` +} + +type RouteConfigurationOverrideActionParameters struct { + + // HonorOrigin the Front Door will always honor origin response header directive. If the origin directive is missing, Front Door will cache contents anywhere from 1 to 3 days. OverrideAlways the TTL value returned from your Front Door Origin is overwritten with the value specified in the action. This behavior will only be applied if the response is cacheable. OverrideIfOriginMissing if no TTL value gets returned from your Front Door Origin, the rule sets the TTL to the value specified in the action. This behavior will only be applied if the response is cacheable. Disabled the Front Door will not cache the response contents, irrespective of Front Door Origin response directives. Possible values include HonorOrigin, OverrideAlways, OverrideIfOriginMissing or Disabled. + // +kubebuilder:validation:Optional + CacheBehavior *string `json:"cacheBehavior,omitempty" tf:"cache_behavior,omitempty"` + + // When Cache behavior is set to Override or SetIfMissing, this field specifies the cache duration to use. The maximum duration is 366 days specified in the d.HH:MM:SS format(e.g. 365.23:59:59). If the desired maximum cache duration is less than 1 day then the maximum cache duration should be specified in the HH:MM:SS format(e.g. 23:59:59). + // +kubebuilder:validation:Optional + CacheDuration *string `json:"cacheDuration,omitempty" tf:"cache_duration,omitempty"` + + // The Front Door Origin Group resource ID that the request should be routed to. This overrides the configuration specified in the Front Door Endpoint route. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorOriginGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupID *string `json:"cdnFrontdoorOriginGroupId,omitempty" tf:"cdn_frontdoor_origin_group_id,omitempty"` + + // Reference to a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDRef *v1.Reference `json:"cdnFrontdoorOriginGroupIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorOriginGroup in cdn to populate cdnFrontdoorOriginGroupId. + // +kubebuilder:validation:Optional + CdnFrontdoorOriginGroupIDSelector *v1.Selector `json:"cdnFrontdoorOriginGroupIdSelector,omitempty" tf:"-"` + + // Should the Front Door dynamically compress the content? Possible values include true or false. + // +kubebuilder:validation:Optional + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // The forwarding protocol the request will be redirected as. This overrides the configuration specified in the route to be associated with. Possible values include MatchRequest, HttpOnly or HttpsOnly. + // +kubebuilder:validation:Optional + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` + + // IncludeSpecifiedQueryStrings query strings specified in the query_string_parameters field get included when the cache key gets generated. UseQueryString cache every unique URL, each unique URL will have its own cache key. IgnoreSpecifiedQueryStrings query strings specified in the query_string_parameters field get excluded when the cache key gets generated. IgnoreQueryString query strings aren't considered when the cache key gets generated. Possible values include IgnoreQueryString, UseQueryString, IgnoreSpecifiedQueryStrings or IncludeSpecifiedQueryStrings. + // +kubebuilder:validation:Optional + QueryStringCachingBehavior *string `json:"queryStringCachingBehavior,omitempty" tf:"query_string_caching_behavior,omitempty"` + + // A list of query string parameter names. + // +kubebuilder:validation:Optional + QueryStringParameters []*string `json:"queryStringParameters,omitempty" tf:"query_string_parameters,omitempty"` +} + +type SSLProtocolConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type SSLProtocolConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type SSLProtocolConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ServerPortConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ServerPortConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +listType=set + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type ServerPortConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + // +listType=set + MatchValues []*string `json:"matchValues" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` +} + +type SocketAddressConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type SocketAddressConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type SocketAddressConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` +} + +type URLFilenameConditionInitParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLFilenameConditionObservation struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type URLFilenameConditionParameters struct { + + // One or more string or integer values(e.g. "1") representing the value of the request path to match. Don't include the leading slash (/). If multiple values are specified, they're evaluated using OR logic. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // If true operator becomes the opposite of its value. Possible values true or false. Defaults to false. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // A Conditional operator. Possible values include Any, Equal, Contains, BeginsWith, EndsWith, LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual or RegEx. Details can be found in the Condition Operator List below. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A Conditional operator. Possible values include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode or UrlEncode. Details can be found in the Condition Transform List below. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +// FrontdoorRuleSpec defines the desired state of FrontdoorRule +type FrontdoorRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontdoorRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontdoorRuleInitParameters `json:"initProvider,omitempty"` +} + +// FrontdoorRuleStatus defines the observed state of FrontdoorRule. +type FrontdoorRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontdoorRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontdoorRule is the Schema for the FrontdoorRules API. Manages a Front Door (standard/premium) Rule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontdoorRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.actions) || (has(self.initProvider) && has(self.initProvider.actions))",message="spec.forProvider.actions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.order) || (has(self.initProvider) && has(self.initProvider.order))",message="spec.forProvider.order is a required parameter" + Spec FrontdoorRuleSpec `json:"spec"` + Status FrontdoorRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontdoorRuleList contains a list of FrontdoorRules +type FrontdoorRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontdoorRule `json:"items"` +} + +// Repository type metadata. +var ( + FrontdoorRule_Kind = "FrontdoorRule" + FrontdoorRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontdoorRule_Kind}.String() + FrontdoorRule_KindAPIVersion = FrontdoorRule_Kind + "." + CRDGroupVersion.String() + FrontdoorRule_GroupVersionKind = CRDGroupVersion.WithKind(FrontdoorRule_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontdoorRule{}, &FrontdoorRuleList{}) +} diff --git a/apis/cdn/v1beta2/zz_frontdoorsecuritypolicy_terraformed.go b/apis/cdn/v1beta2/zz_frontdoorsecuritypolicy_terraformed.go new file mode 100755 index 000000000..6940e8a28 --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoorsecuritypolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontdoorSecurityPolicy +func (mg *FrontdoorSecurityPolicy) GetTerraformResourceType() string { + return "azurerm_cdn_frontdoor_security_policy" +} + +// GetConnectionDetailsMapping for this FrontdoorSecurityPolicy +func (tr *FrontdoorSecurityPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontdoorSecurityPolicy +func (tr *FrontdoorSecurityPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontdoorSecurityPolicy +func (tr *FrontdoorSecurityPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontdoorSecurityPolicy +func (tr *FrontdoorSecurityPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontdoorSecurityPolicy +func (tr *FrontdoorSecurityPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontdoorSecurityPolicy +func (tr *FrontdoorSecurityPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontdoorSecurityPolicy +func (tr *FrontdoorSecurityPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontdoorSecurityPolicy +func (tr *FrontdoorSecurityPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontdoorSecurityPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontdoorSecurityPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &FrontdoorSecurityPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontdoorSecurityPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cdn/v1beta2/zz_frontdoorsecuritypolicy_types.go b/apis/cdn/v1beta2/zz_frontdoorsecuritypolicy_types.go new file mode 100755 index 000000000..a3cd05427 --- /dev/null +++ b/apis/cdn/v1beta2/zz_frontdoorsecuritypolicy_types.go @@ -0,0 +1,253 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AssociationInitParameters struct { + + // One or more domain blocks as defined below. Changing this forces a new Front Door Security Policy to be created. + Domain []DomainInitParameters `json:"domain,omitempty" tf:"domain,omitempty"` + + // The list of paths to match for this firewall policy. Possible value includes /*. Changing this forces a new Front Door Security Policy to be created. + PatternsToMatch []*string `json:"patternsToMatch,omitempty" tf:"patterns_to_match,omitempty"` +} + +type AssociationObservation struct { + + // One or more domain blocks as defined below. Changing this forces a new Front Door Security Policy to be created. + Domain []DomainObservation `json:"domain,omitempty" tf:"domain,omitempty"` + + // The list of paths to match for this firewall policy. Possible value includes /*. Changing this forces a new Front Door Security Policy to be created. + PatternsToMatch []*string `json:"patternsToMatch,omitempty" tf:"patterns_to_match,omitempty"` +} + +type AssociationParameters struct { + + // One or more domain blocks as defined below. Changing this forces a new Front Door Security Policy to be created. + // +kubebuilder:validation:Optional + Domain []DomainParameters `json:"domain" tf:"domain,omitempty"` + + // The list of paths to match for this firewall policy. Possible value includes /*. Changing this forces a new Front Door Security Policy to be created. + // +kubebuilder:validation:Optional + PatternsToMatch []*string `json:"patternsToMatch" tf:"patterns_to_match,omitempty"` +} + +type DomainInitParameters struct { + + // The Resource Id of the Front Door Custom Domain or Front Door Endpoint that should be bound to this Front Door Security Policy. Changing this forces a new Front Door Security Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorCustomDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + CdnFrontdoorDomainID *string `json:"cdnFrontdoorDomainId,omitempty" tf:"cdn_frontdoor_domain_id,omitempty"` + + // Reference to a FrontdoorCustomDomain in cdn to populate cdnFrontdoorDomainId. + // +kubebuilder:validation:Optional + CdnFrontdoorDomainIDRef *v1.Reference `json:"cdnFrontdoorDomainIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorCustomDomain in cdn to populate cdnFrontdoorDomainId. + // +kubebuilder:validation:Optional + CdnFrontdoorDomainIDSelector *v1.Selector `json:"cdnFrontdoorDomainIdSelector,omitempty" tf:"-"` +} + +type DomainObservation struct { + + // (Computed) Is the Front Door Custom Domain/Endpoint activated? + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // The Resource Id of the Front Door Custom Domain or Front Door Endpoint that should be bound to this Front Door Security Policy. Changing this forces a new Front Door Security Policy to be created. + CdnFrontdoorDomainID *string `json:"cdnFrontdoorDomainId,omitempty" tf:"cdn_frontdoor_domain_id,omitempty"` +} + +type DomainParameters struct { + + // The Resource Id of the Front Door Custom Domain or Front Door Endpoint that should be bound to this Front Door Security Policy. Changing this forces a new Front Door Security Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta2.FrontdoorCustomDomain + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorDomainID *string `json:"cdnFrontdoorDomainId,omitempty" tf:"cdn_frontdoor_domain_id,omitempty"` + + // Reference to a FrontdoorCustomDomain in cdn to populate cdnFrontdoorDomainId. + // +kubebuilder:validation:Optional + CdnFrontdoorDomainIDRef *v1.Reference `json:"cdnFrontdoorDomainIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorCustomDomain in cdn to populate cdnFrontdoorDomainId. + // +kubebuilder:validation:Optional + CdnFrontdoorDomainIDSelector *v1.Selector `json:"cdnFrontdoorDomainIdSelector,omitempty" tf:"-"` +} + +type FirewallInitParameters struct { + + // An association block as defined below. Changing this forces a new Front Door Security Policy to be created. + Association *AssociationInitParameters `json:"association,omitempty" tf:"association,omitempty"` + + // The Resource Id of the Front Door Firewall Policy that should be linked to this Front Door Security Policy. Changing this forces a new Front Door Security Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorFirewallPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + CdnFrontdoorFirewallPolicyID *string `json:"cdnFrontdoorFirewallPolicyId,omitempty" tf:"cdn_frontdoor_firewall_policy_id,omitempty"` + + // Reference to a FrontdoorFirewallPolicy in cdn to populate cdnFrontdoorFirewallPolicyId. + // +kubebuilder:validation:Optional + CdnFrontdoorFirewallPolicyIDRef *v1.Reference `json:"cdnFrontdoorFirewallPolicyIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorFirewallPolicy in cdn to populate cdnFrontdoorFirewallPolicyId. + // +kubebuilder:validation:Optional + CdnFrontdoorFirewallPolicyIDSelector *v1.Selector `json:"cdnFrontdoorFirewallPolicyIdSelector,omitempty" tf:"-"` +} + +type FirewallObservation struct { + + // An association block as defined below. Changing this forces a new Front Door Security Policy to be created. + Association *AssociationObservation `json:"association,omitempty" tf:"association,omitempty"` + + // The Resource Id of the Front Door Firewall Policy that should be linked to this Front Door Security Policy. Changing this forces a new Front Door Security Policy to be created. + CdnFrontdoorFirewallPolicyID *string `json:"cdnFrontdoorFirewallPolicyId,omitempty" tf:"cdn_frontdoor_firewall_policy_id,omitempty"` +} + +type FirewallParameters struct { + + // An association block as defined below. Changing this forces a new Front Door Security Policy to be created. + // +kubebuilder:validation:Optional + Association *AssociationParameters `json:"association" tf:"association,omitempty"` + + // The Resource Id of the Front Door Firewall Policy that should be linked to this Front Door Security Policy. Changing this forces a new Front Door Security Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorFirewallPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorFirewallPolicyID *string `json:"cdnFrontdoorFirewallPolicyId,omitempty" tf:"cdn_frontdoor_firewall_policy_id,omitempty"` + + // Reference to a FrontdoorFirewallPolicy in cdn to populate cdnFrontdoorFirewallPolicyId. + // +kubebuilder:validation:Optional + CdnFrontdoorFirewallPolicyIDRef *v1.Reference `json:"cdnFrontdoorFirewallPolicyIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorFirewallPolicy in cdn to populate cdnFrontdoorFirewallPolicyId. + // +kubebuilder:validation:Optional + CdnFrontdoorFirewallPolicyIDSelector *v1.Selector `json:"cdnFrontdoorFirewallPolicyIdSelector,omitempty" tf:"-"` +} + +type FrontdoorSecurityPolicyInitParameters struct { + + // An security_policies block as defined below. Changing this forces a new Front Door Security Policy to be created. + SecurityPolicies *SecurityPoliciesInitParameters `json:"securityPolicies,omitempty" tf:"security_policies,omitempty"` +} + +type FrontdoorSecurityPolicyObservation struct { + + // The Front Door Profile Resource Id that is linked to this Front Door Security Policy. Changing this forces a new Front Door Security Policy to be created. + CdnFrontdoorProfileID *string `json:"cdnFrontdoorProfileId,omitempty" tf:"cdn_frontdoor_profile_id,omitempty"` + + // The ID of the Front Door Security Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An security_policies block as defined below. Changing this forces a new Front Door Security Policy to be created. + SecurityPolicies *SecurityPoliciesObservation `json:"securityPolicies,omitempty" tf:"security_policies,omitempty"` +} + +type FrontdoorSecurityPolicyParameters struct { + + // The Front Door Profile Resource Id that is linked to this Front Door Security Policy. Changing this forces a new Front Door Security Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cdn/v1beta1.FrontdoorProfile + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CdnFrontdoorProfileID *string `json:"cdnFrontdoorProfileId,omitempty" tf:"cdn_frontdoor_profile_id,omitempty"` + + // Reference to a FrontdoorProfile in cdn to populate cdnFrontdoorProfileId. + // +kubebuilder:validation:Optional + CdnFrontdoorProfileIDRef *v1.Reference `json:"cdnFrontdoorProfileIdRef,omitempty" tf:"-"` + + // Selector for a FrontdoorProfile in cdn to populate cdnFrontdoorProfileId. + // +kubebuilder:validation:Optional + CdnFrontdoorProfileIDSelector *v1.Selector `json:"cdnFrontdoorProfileIdSelector,omitempty" tf:"-"` + + // An security_policies block as defined below. Changing this forces a new Front Door Security Policy to be created. + // +kubebuilder:validation:Optional + SecurityPolicies *SecurityPoliciesParameters `json:"securityPolicies,omitempty" tf:"security_policies,omitempty"` +} + +type SecurityPoliciesInitParameters struct { + + // An firewall block as defined below. Changing this forces a new Front Door Security Policy to be created. + Firewall *FirewallInitParameters `json:"firewall,omitempty" tf:"firewall,omitempty"` +} + +type SecurityPoliciesObservation struct { + + // An firewall block as defined below. Changing this forces a new Front Door Security Policy to be created. + Firewall *FirewallObservation `json:"firewall,omitempty" tf:"firewall,omitempty"` +} + +type SecurityPoliciesParameters struct { + + // An firewall block as defined below. Changing this forces a new Front Door Security Policy to be created. + // +kubebuilder:validation:Optional + Firewall *FirewallParameters `json:"firewall" tf:"firewall,omitempty"` +} + +// FrontdoorSecurityPolicySpec defines the desired state of FrontdoorSecurityPolicy +type FrontdoorSecurityPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontdoorSecurityPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontdoorSecurityPolicyInitParameters `json:"initProvider,omitempty"` +} + +// FrontdoorSecurityPolicyStatus defines the observed state of FrontdoorSecurityPolicy. +type FrontdoorSecurityPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontdoorSecurityPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontdoorSecurityPolicy is the Schema for the FrontdoorSecurityPolicys API. Manages a Front Door (standard/premium) Security Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontdoorSecurityPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.securityPolicies) || (has(self.initProvider) && has(self.initProvider.securityPolicies))",message="spec.forProvider.securityPolicies is a required parameter" + Spec FrontdoorSecurityPolicySpec `json:"spec"` + Status FrontdoorSecurityPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontdoorSecurityPolicyList contains a list of FrontdoorSecurityPolicys +type FrontdoorSecurityPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontdoorSecurityPolicy `json:"items"` +} + +// Repository type metadata. +var ( + FrontdoorSecurityPolicy_Kind = "FrontdoorSecurityPolicy" + FrontdoorSecurityPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontdoorSecurityPolicy_Kind}.String() + FrontdoorSecurityPolicy_KindAPIVersion = FrontdoorSecurityPolicy_Kind + "." + CRDGroupVersion.String() + FrontdoorSecurityPolicy_GroupVersionKind = CRDGroupVersion.WithKind(FrontdoorSecurityPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontdoorSecurityPolicy{}, &FrontdoorSecurityPolicyList{}) +} diff --git a/apis/cdn/v1beta2/zz_generated.conversion_hubs.go b/apis/cdn/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..fbc639483 --- /dev/null +++ b/apis/cdn/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Endpoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontdoorCustomDomain) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontdoorOrigin) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontdoorOriginGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontdoorRoute) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontdoorRule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontdoorSecurityPolicy) Hub() {} diff --git a/apis/cdn/v1beta2/zz_generated.deepcopy.go b/apis/cdn/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..3c1986da1 --- /dev/null +++ b/apis/cdn/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,10531 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsInitParameters) DeepCopyInto(out *ActionsInitParameters) { + *out = *in + if in.RequestHeaderAction != nil { + in, out := &in.RequestHeaderAction, &out.RequestHeaderAction + *out = make([]RequestHeaderActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeaderAction != nil { + in, out := &in.ResponseHeaderAction, &out.ResponseHeaderAction + *out = make([]ResponseHeaderActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RouteConfigurationOverrideAction != nil { + in, out := &in.RouteConfigurationOverrideAction, &out.RouteConfigurationOverrideAction + *out = new(RouteConfigurationOverrideActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(ActionsURLRedirectActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(ActionsURLRewriteActionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsInitParameters. +func (in *ActionsInitParameters) DeepCopy() *ActionsInitParameters { + if in == nil { + return nil + } + out := new(ActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsObservation) DeepCopyInto(out *ActionsObservation) { + *out = *in + if in.RequestHeaderAction != nil { + in, out := &in.RequestHeaderAction, &out.RequestHeaderAction + *out = make([]RequestHeaderActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeaderAction != nil { + in, out := &in.ResponseHeaderAction, &out.ResponseHeaderAction + *out = make([]ResponseHeaderActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RouteConfigurationOverrideAction != nil { + in, out := &in.RouteConfigurationOverrideAction, &out.RouteConfigurationOverrideAction + *out = new(RouteConfigurationOverrideActionObservation) + (*in).DeepCopyInto(*out) + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(ActionsURLRedirectActionObservation) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(ActionsURLRewriteActionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsObservation. +func (in *ActionsObservation) DeepCopy() *ActionsObservation { + if in == nil { + return nil + } + out := new(ActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsParameters) DeepCopyInto(out *ActionsParameters) { + *out = *in + if in.RequestHeaderAction != nil { + in, out := &in.RequestHeaderAction, &out.RequestHeaderAction + *out = make([]RequestHeaderActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeaderAction != nil { + in, out := &in.ResponseHeaderAction, &out.ResponseHeaderAction + *out = make([]ResponseHeaderActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RouteConfigurationOverrideAction != nil { + in, out := &in.RouteConfigurationOverrideAction, &out.RouteConfigurationOverrideAction + *out = new(RouteConfigurationOverrideActionParameters) + (*in).DeepCopyInto(*out) + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(ActionsURLRedirectActionParameters) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(ActionsURLRewriteActionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsParameters. +func (in *ActionsParameters) DeepCopy() *ActionsParameters { + if in == nil { + return nil + } + out := new(ActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsURLRedirectActionInitParameters) DeepCopyInto(out *ActionsURLRedirectActionInitParameters) { + *out = *in + if in.DestinationFragment != nil { + in, out := &in.DestinationFragment, &out.DestinationFragment + *out = new(string) + **out = **in + } + if in.DestinationHostName != nil { + in, out := &in.DestinationHostName, &out.DestinationHostName + *out = new(string) + **out = **in + } + if in.DestinationPath != nil { + in, out := &in.DestinationPath, &out.DestinationPath + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectProtocol != nil { + in, out := &in.RedirectProtocol, &out.RedirectProtocol + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsURLRedirectActionInitParameters. +func (in *ActionsURLRedirectActionInitParameters) DeepCopy() *ActionsURLRedirectActionInitParameters { + if in == nil { + return nil + } + out := new(ActionsURLRedirectActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsURLRedirectActionObservation) DeepCopyInto(out *ActionsURLRedirectActionObservation) { + *out = *in + if in.DestinationFragment != nil { + in, out := &in.DestinationFragment, &out.DestinationFragment + *out = new(string) + **out = **in + } + if in.DestinationHostName != nil { + in, out := &in.DestinationHostName, &out.DestinationHostName + *out = new(string) + **out = **in + } + if in.DestinationPath != nil { + in, out := &in.DestinationPath, &out.DestinationPath + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectProtocol != nil { + in, out := &in.RedirectProtocol, &out.RedirectProtocol + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsURLRedirectActionObservation. +func (in *ActionsURLRedirectActionObservation) DeepCopy() *ActionsURLRedirectActionObservation { + if in == nil { + return nil + } + out := new(ActionsURLRedirectActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsURLRedirectActionParameters) DeepCopyInto(out *ActionsURLRedirectActionParameters) { + *out = *in + if in.DestinationFragment != nil { + in, out := &in.DestinationFragment, &out.DestinationFragment + *out = new(string) + **out = **in + } + if in.DestinationHostName != nil { + in, out := &in.DestinationHostName, &out.DestinationHostName + *out = new(string) + **out = **in + } + if in.DestinationPath != nil { + in, out := &in.DestinationPath, &out.DestinationPath + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectProtocol != nil { + in, out := &in.RedirectProtocol, &out.RedirectProtocol + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsURLRedirectActionParameters. +func (in *ActionsURLRedirectActionParameters) DeepCopy() *ActionsURLRedirectActionParameters { + if in == nil { + return nil + } + out := new(ActionsURLRedirectActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsURLRewriteActionInitParameters) DeepCopyInto(out *ActionsURLRewriteActionInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsURLRewriteActionInitParameters. +func (in *ActionsURLRewriteActionInitParameters) DeepCopy() *ActionsURLRewriteActionInitParameters { + if in == nil { + return nil + } + out := new(ActionsURLRewriteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsURLRewriteActionObservation) DeepCopyInto(out *ActionsURLRewriteActionObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsURLRewriteActionObservation. +func (in *ActionsURLRewriteActionObservation) DeepCopy() *ActionsURLRewriteActionObservation { + if in == nil { + return nil + } + out := new(ActionsURLRewriteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsURLRewriteActionParameters) DeepCopyInto(out *ActionsURLRewriteActionParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsURLRewriteActionParameters. +func (in *ActionsURLRewriteActionParameters) DeepCopy() *ActionsURLRewriteActionParameters { + if in == nil { + return nil + } + out := new(ActionsURLRewriteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationInitParameters) DeepCopyInto(out *AssociationInitParameters) { + *out = *in + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = make([]DomainInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationInitParameters. +func (in *AssociationInitParameters) DeepCopy() *AssociationInitParameters { + if in == nil { + return nil + } + out := new(AssociationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationObservation) DeepCopyInto(out *AssociationObservation) { + *out = *in + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = make([]DomainObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationObservation. +func (in *AssociationObservation) DeepCopy() *AssociationObservation { + if in == nil { + return nil + } + out := new(AssociationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssociationParameters) DeepCopyInto(out *AssociationParameters) { + *out = *in + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = make([]DomainParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssociationParameters. +func (in *AssociationParameters) DeepCopy() *AssociationParameters { + if in == nil { + return nil + } + out := new(AssociationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheExpirationActionInitParameters) DeepCopyInto(out *CacheExpirationActionInitParameters) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheExpirationActionInitParameters. +func (in *CacheExpirationActionInitParameters) DeepCopy() *CacheExpirationActionInitParameters { + if in == nil { + return nil + } + out := new(CacheExpirationActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheExpirationActionObservation) DeepCopyInto(out *CacheExpirationActionObservation) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheExpirationActionObservation. +func (in *CacheExpirationActionObservation) DeepCopy() *CacheExpirationActionObservation { + if in == nil { + return nil + } + out := new(CacheExpirationActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheExpirationActionParameters) DeepCopyInto(out *CacheExpirationActionParameters) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheExpirationActionParameters. +func (in *CacheExpirationActionParameters) DeepCopy() *CacheExpirationActionParameters { + if in == nil { + return nil + } + out := new(CacheExpirationActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheInitParameters) DeepCopyInto(out *CacheInitParameters) { + *out = *in + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.ContentTypesToCompress != nil { + in, out := &in.ContentTypesToCompress, &out.ContentTypesToCompress + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryStringCachingBehavior != nil { + in, out := &in.QueryStringCachingBehavior, &out.QueryStringCachingBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheInitParameters. +func (in *CacheInitParameters) DeepCopy() *CacheInitParameters { + if in == nil { + return nil + } + out := new(CacheInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheKeyQueryStringActionInitParameters) DeepCopyInto(out *CacheKeyQueryStringActionInitParameters) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheKeyQueryStringActionInitParameters. +func (in *CacheKeyQueryStringActionInitParameters) DeepCopy() *CacheKeyQueryStringActionInitParameters { + if in == nil { + return nil + } + out := new(CacheKeyQueryStringActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheKeyQueryStringActionObservation) DeepCopyInto(out *CacheKeyQueryStringActionObservation) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheKeyQueryStringActionObservation. +func (in *CacheKeyQueryStringActionObservation) DeepCopy() *CacheKeyQueryStringActionObservation { + if in == nil { + return nil + } + out := new(CacheKeyQueryStringActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheKeyQueryStringActionParameters) DeepCopyInto(out *CacheKeyQueryStringActionParameters) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheKeyQueryStringActionParameters. +func (in *CacheKeyQueryStringActionParameters) DeepCopy() *CacheKeyQueryStringActionParameters { + if in == nil { + return nil + } + out := new(CacheKeyQueryStringActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheObservation) DeepCopyInto(out *CacheObservation) { + *out = *in + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.ContentTypesToCompress != nil { + in, out := &in.ContentTypesToCompress, &out.ContentTypesToCompress + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryStringCachingBehavior != nil { + in, out := &in.QueryStringCachingBehavior, &out.QueryStringCachingBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheObservation. +func (in *CacheObservation) DeepCopy() *CacheObservation { + if in == nil { + return nil + } + out := new(CacheObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheParameters) DeepCopyInto(out *CacheParameters) { + *out = *in + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.ContentTypesToCompress != nil { + in, out := &in.ContentTypesToCompress, &out.ContentTypesToCompress + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueryStringCachingBehavior != nil { + in, out := &in.QueryStringCachingBehavior, &out.QueryStringCachingBehavior + *out = new(string) + **out = **in + } + if in.QueryStrings != nil { + in, out := &in.QueryStrings, &out.QueryStrings + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheParameters. +func (in *CacheParameters) DeepCopy() *CacheParameters { + if in == nil { + return nil + } + out := new(CacheParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPortConditionInitParameters) DeepCopyInto(out *ClientPortConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPortConditionInitParameters. +func (in *ClientPortConditionInitParameters) DeepCopy() *ClientPortConditionInitParameters { + if in == nil { + return nil + } + out := new(ClientPortConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPortConditionObservation) DeepCopyInto(out *ClientPortConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPortConditionObservation. +func (in *ClientPortConditionObservation) DeepCopy() *ClientPortConditionObservation { + if in == nil { + return nil + } + out := new(ClientPortConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPortConditionParameters) DeepCopyInto(out *ClientPortConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPortConditionParameters. +func (in *ClientPortConditionParameters) DeepCopy() *ClientPortConditionParameters { + if in == nil { + return nil + } + out := new(ClientPortConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsCookiesConditionInitParameters) DeepCopyInto(out *ConditionsCookiesConditionInitParameters) { + *out = *in + if in.CookieName != nil { + in, out := &in.CookieName, &out.CookieName + *out = new(string) + **out = **in + } + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsCookiesConditionInitParameters. +func (in *ConditionsCookiesConditionInitParameters) DeepCopy() *ConditionsCookiesConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsCookiesConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsCookiesConditionObservation) DeepCopyInto(out *ConditionsCookiesConditionObservation) { + *out = *in + if in.CookieName != nil { + in, out := &in.CookieName, &out.CookieName + *out = new(string) + **out = **in + } + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsCookiesConditionObservation. +func (in *ConditionsCookiesConditionObservation) DeepCopy() *ConditionsCookiesConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsCookiesConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsCookiesConditionParameters) DeepCopyInto(out *ConditionsCookiesConditionParameters) { + *out = *in + if in.CookieName != nil { + in, out := &in.CookieName, &out.CookieName + *out = new(string) + **out = **in + } + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsCookiesConditionParameters. +func (in *ConditionsCookiesConditionParameters) DeepCopy() *ConditionsCookiesConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsCookiesConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsHTTPVersionConditionInitParameters) DeepCopyInto(out *ConditionsHTTPVersionConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsHTTPVersionConditionInitParameters. +func (in *ConditionsHTTPVersionConditionInitParameters) DeepCopy() *ConditionsHTTPVersionConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsHTTPVersionConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsHTTPVersionConditionObservation) DeepCopyInto(out *ConditionsHTTPVersionConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsHTTPVersionConditionObservation. +func (in *ConditionsHTTPVersionConditionObservation) DeepCopy() *ConditionsHTTPVersionConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsHTTPVersionConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsHTTPVersionConditionParameters) DeepCopyInto(out *ConditionsHTTPVersionConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsHTTPVersionConditionParameters. +func (in *ConditionsHTTPVersionConditionParameters) DeepCopy() *ConditionsHTTPVersionConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsHTTPVersionConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsInitParameters) DeepCopyInto(out *ConditionsInitParameters) { + *out = *in + if in.ClientPortCondition != nil { + in, out := &in.ClientPortCondition, &out.ClientPortCondition + *out = make([]ClientPortConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CookiesCondition != nil { + in, out := &in.CookiesCondition, &out.CookiesCondition + *out = make([]ConditionsCookiesConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPVersionCondition != nil { + in, out := &in.HTTPVersionCondition, &out.HTTPVersionCondition + *out = make([]ConditionsHTTPVersionConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostNameCondition != nil { + in, out := &in.HostNameCondition, &out.HostNameCondition + *out = make([]HostNameConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsDeviceCondition != nil { + in, out := &in.IsDeviceCondition, &out.IsDeviceCondition + *out = make([]IsDeviceConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostArgsCondition != nil { + in, out := &in.PostArgsCondition, &out.PostArgsCondition + *out = make([]PostArgsConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryStringCondition != nil { + in, out := &in.QueryStringCondition, &out.QueryStringCondition + *out = make([]ConditionsQueryStringConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteAddressCondition != nil { + in, out := &in.RemoteAddressCondition, &out.RemoteAddressCondition + *out = make([]ConditionsRemoteAddressConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestBodyCondition != nil { + in, out := &in.RequestBodyCondition, &out.RequestBodyCondition + *out = make([]ConditionsRequestBodyConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeaderCondition != nil { + in, out := &in.RequestHeaderCondition, &out.RequestHeaderCondition + *out = make([]ConditionsRequestHeaderConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestMethodCondition != nil { + in, out := &in.RequestMethodCondition, &out.RequestMethodCondition + *out = make([]ConditionsRequestMethodConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestSchemeCondition != nil { + in, out := &in.RequestSchemeCondition, &out.RequestSchemeCondition + *out = make([]ConditionsRequestSchemeConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURICondition != nil { + in, out := &in.RequestURICondition, &out.RequestURICondition + *out = make([]ConditionsRequestURIConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLProtocolCondition != nil { + in, out := &in.SSLProtocolCondition, &out.SSLProtocolCondition + *out = make([]SSLProtocolConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerPortCondition != nil { + in, out := &in.ServerPortCondition, &out.ServerPortCondition + *out = make([]ServerPortConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SocketAddressCondition != nil { + in, out := &in.SocketAddressCondition, &out.SocketAddressCondition + *out = make([]SocketAddressConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileExtensionCondition != nil { + in, out := &in.URLFileExtensionCondition, &out.URLFileExtensionCondition + *out = make([]ConditionsURLFileExtensionConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFilenameCondition != nil { + in, out := &in.URLFilenameCondition, &out.URLFilenameCondition + *out = make([]URLFilenameConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathCondition != nil { + in, out := &in.URLPathCondition, &out.URLPathCondition + *out = make([]ConditionsURLPathConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsInitParameters. +func (in *ConditionsInitParameters) DeepCopy() *ConditionsInitParameters { + if in == nil { + return nil + } + out := new(ConditionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsObservation) DeepCopyInto(out *ConditionsObservation) { + *out = *in + if in.ClientPortCondition != nil { + in, out := &in.ClientPortCondition, &out.ClientPortCondition + *out = make([]ClientPortConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CookiesCondition != nil { + in, out := &in.CookiesCondition, &out.CookiesCondition + *out = make([]ConditionsCookiesConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPVersionCondition != nil { + in, out := &in.HTTPVersionCondition, &out.HTTPVersionCondition + *out = make([]ConditionsHTTPVersionConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostNameCondition != nil { + in, out := &in.HostNameCondition, &out.HostNameCondition + *out = make([]HostNameConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsDeviceCondition != nil { + in, out := &in.IsDeviceCondition, &out.IsDeviceCondition + *out = make([]IsDeviceConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostArgsCondition != nil { + in, out := &in.PostArgsCondition, &out.PostArgsCondition + *out = make([]PostArgsConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryStringCondition != nil { + in, out := &in.QueryStringCondition, &out.QueryStringCondition + *out = make([]ConditionsQueryStringConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteAddressCondition != nil { + in, out := &in.RemoteAddressCondition, &out.RemoteAddressCondition + *out = make([]ConditionsRemoteAddressConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestBodyCondition != nil { + in, out := &in.RequestBodyCondition, &out.RequestBodyCondition + *out = make([]ConditionsRequestBodyConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeaderCondition != nil { + in, out := &in.RequestHeaderCondition, &out.RequestHeaderCondition + *out = make([]ConditionsRequestHeaderConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestMethodCondition != nil { + in, out := &in.RequestMethodCondition, &out.RequestMethodCondition + *out = make([]ConditionsRequestMethodConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestSchemeCondition != nil { + in, out := &in.RequestSchemeCondition, &out.RequestSchemeCondition + *out = make([]ConditionsRequestSchemeConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURICondition != nil { + in, out := &in.RequestURICondition, &out.RequestURICondition + *out = make([]ConditionsRequestURIConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLProtocolCondition != nil { + in, out := &in.SSLProtocolCondition, &out.SSLProtocolCondition + *out = make([]SSLProtocolConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerPortCondition != nil { + in, out := &in.ServerPortCondition, &out.ServerPortCondition + *out = make([]ServerPortConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SocketAddressCondition != nil { + in, out := &in.SocketAddressCondition, &out.SocketAddressCondition + *out = make([]SocketAddressConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileExtensionCondition != nil { + in, out := &in.URLFileExtensionCondition, &out.URLFileExtensionCondition + *out = make([]ConditionsURLFileExtensionConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFilenameCondition != nil { + in, out := &in.URLFilenameCondition, &out.URLFilenameCondition + *out = make([]URLFilenameConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathCondition != nil { + in, out := &in.URLPathCondition, &out.URLPathCondition + *out = make([]ConditionsURLPathConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsObservation. +func (in *ConditionsObservation) DeepCopy() *ConditionsObservation { + if in == nil { + return nil + } + out := new(ConditionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsParameters) DeepCopyInto(out *ConditionsParameters) { + *out = *in + if in.ClientPortCondition != nil { + in, out := &in.ClientPortCondition, &out.ClientPortCondition + *out = make([]ClientPortConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CookiesCondition != nil { + in, out := &in.CookiesCondition, &out.CookiesCondition + *out = make([]ConditionsCookiesConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPVersionCondition != nil { + in, out := &in.HTTPVersionCondition, &out.HTTPVersionCondition + *out = make([]ConditionsHTTPVersionConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostNameCondition != nil { + in, out := &in.HostNameCondition, &out.HostNameCondition + *out = make([]HostNameConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsDeviceCondition != nil { + in, out := &in.IsDeviceCondition, &out.IsDeviceCondition + *out = make([]IsDeviceConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostArgsCondition != nil { + in, out := &in.PostArgsCondition, &out.PostArgsCondition + *out = make([]PostArgsConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryStringCondition != nil { + in, out := &in.QueryStringCondition, &out.QueryStringCondition + *out = make([]ConditionsQueryStringConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteAddressCondition != nil { + in, out := &in.RemoteAddressCondition, &out.RemoteAddressCondition + *out = make([]ConditionsRemoteAddressConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestBodyCondition != nil { + in, out := &in.RequestBodyCondition, &out.RequestBodyCondition + *out = make([]ConditionsRequestBodyConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeaderCondition != nil { + in, out := &in.RequestHeaderCondition, &out.RequestHeaderCondition + *out = make([]ConditionsRequestHeaderConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestMethodCondition != nil { + in, out := &in.RequestMethodCondition, &out.RequestMethodCondition + *out = make([]ConditionsRequestMethodConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestSchemeCondition != nil { + in, out := &in.RequestSchemeCondition, &out.RequestSchemeCondition + *out = make([]ConditionsRequestSchemeConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestURICondition != nil { + in, out := &in.RequestURICondition, &out.RequestURICondition + *out = make([]ConditionsRequestURIConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLProtocolCondition != nil { + in, out := &in.SSLProtocolCondition, &out.SSLProtocolCondition + *out = make([]SSLProtocolConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerPortCondition != nil { + in, out := &in.ServerPortCondition, &out.ServerPortCondition + *out = make([]ServerPortConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SocketAddressCondition != nil { + in, out := &in.SocketAddressCondition, &out.SocketAddressCondition + *out = make([]SocketAddressConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileExtensionCondition != nil { + in, out := &in.URLFileExtensionCondition, &out.URLFileExtensionCondition + *out = make([]ConditionsURLFileExtensionConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFilenameCondition != nil { + in, out := &in.URLFilenameCondition, &out.URLFilenameCondition + *out = make([]URLFilenameConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathCondition != nil { + in, out := &in.URLPathCondition, &out.URLPathCondition + *out = make([]ConditionsURLPathConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsParameters. +func (in *ConditionsParameters) DeepCopy() *ConditionsParameters { + if in == nil { + return nil + } + out := new(ConditionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsQueryStringConditionInitParameters) DeepCopyInto(out *ConditionsQueryStringConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsQueryStringConditionInitParameters. +func (in *ConditionsQueryStringConditionInitParameters) DeepCopy() *ConditionsQueryStringConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsQueryStringConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsQueryStringConditionObservation) DeepCopyInto(out *ConditionsQueryStringConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsQueryStringConditionObservation. +func (in *ConditionsQueryStringConditionObservation) DeepCopy() *ConditionsQueryStringConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsQueryStringConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsQueryStringConditionParameters) DeepCopyInto(out *ConditionsQueryStringConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsQueryStringConditionParameters. +func (in *ConditionsQueryStringConditionParameters) DeepCopy() *ConditionsQueryStringConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsQueryStringConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRemoteAddressConditionInitParameters) DeepCopyInto(out *ConditionsRemoteAddressConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRemoteAddressConditionInitParameters. +func (in *ConditionsRemoteAddressConditionInitParameters) DeepCopy() *ConditionsRemoteAddressConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsRemoteAddressConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRemoteAddressConditionObservation) DeepCopyInto(out *ConditionsRemoteAddressConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRemoteAddressConditionObservation. +func (in *ConditionsRemoteAddressConditionObservation) DeepCopy() *ConditionsRemoteAddressConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsRemoteAddressConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRemoteAddressConditionParameters) DeepCopyInto(out *ConditionsRemoteAddressConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRemoteAddressConditionParameters. +func (in *ConditionsRemoteAddressConditionParameters) DeepCopy() *ConditionsRemoteAddressConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsRemoteAddressConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestBodyConditionInitParameters) DeepCopyInto(out *ConditionsRequestBodyConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestBodyConditionInitParameters. +func (in *ConditionsRequestBodyConditionInitParameters) DeepCopy() *ConditionsRequestBodyConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestBodyConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestBodyConditionObservation) DeepCopyInto(out *ConditionsRequestBodyConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestBodyConditionObservation. +func (in *ConditionsRequestBodyConditionObservation) DeepCopy() *ConditionsRequestBodyConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsRequestBodyConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestBodyConditionParameters) DeepCopyInto(out *ConditionsRequestBodyConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestBodyConditionParameters. +func (in *ConditionsRequestBodyConditionParameters) DeepCopy() *ConditionsRequestBodyConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestBodyConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestHeaderConditionInitParameters) DeepCopyInto(out *ConditionsRequestHeaderConditionInitParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestHeaderConditionInitParameters. +func (in *ConditionsRequestHeaderConditionInitParameters) DeepCopy() *ConditionsRequestHeaderConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestHeaderConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestHeaderConditionObservation) DeepCopyInto(out *ConditionsRequestHeaderConditionObservation) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestHeaderConditionObservation. +func (in *ConditionsRequestHeaderConditionObservation) DeepCopy() *ConditionsRequestHeaderConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsRequestHeaderConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestHeaderConditionParameters) DeepCopyInto(out *ConditionsRequestHeaderConditionParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestHeaderConditionParameters. +func (in *ConditionsRequestHeaderConditionParameters) DeepCopy() *ConditionsRequestHeaderConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestHeaderConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestMethodConditionInitParameters) DeepCopyInto(out *ConditionsRequestMethodConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestMethodConditionInitParameters. +func (in *ConditionsRequestMethodConditionInitParameters) DeepCopy() *ConditionsRequestMethodConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestMethodConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestMethodConditionObservation) DeepCopyInto(out *ConditionsRequestMethodConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestMethodConditionObservation. +func (in *ConditionsRequestMethodConditionObservation) DeepCopy() *ConditionsRequestMethodConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsRequestMethodConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestMethodConditionParameters) DeepCopyInto(out *ConditionsRequestMethodConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestMethodConditionParameters. +func (in *ConditionsRequestMethodConditionParameters) DeepCopy() *ConditionsRequestMethodConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestMethodConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestSchemeConditionInitParameters) DeepCopyInto(out *ConditionsRequestSchemeConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestSchemeConditionInitParameters. +func (in *ConditionsRequestSchemeConditionInitParameters) DeepCopy() *ConditionsRequestSchemeConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestSchemeConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestSchemeConditionObservation) DeepCopyInto(out *ConditionsRequestSchemeConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestSchemeConditionObservation. +func (in *ConditionsRequestSchemeConditionObservation) DeepCopy() *ConditionsRequestSchemeConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsRequestSchemeConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestSchemeConditionParameters) DeepCopyInto(out *ConditionsRequestSchemeConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestSchemeConditionParameters. +func (in *ConditionsRequestSchemeConditionParameters) DeepCopy() *ConditionsRequestSchemeConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestSchemeConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestURIConditionInitParameters) DeepCopyInto(out *ConditionsRequestURIConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestURIConditionInitParameters. +func (in *ConditionsRequestURIConditionInitParameters) DeepCopy() *ConditionsRequestURIConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestURIConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestURIConditionObservation) DeepCopyInto(out *ConditionsRequestURIConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestURIConditionObservation. +func (in *ConditionsRequestURIConditionObservation) DeepCopy() *ConditionsRequestURIConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsRequestURIConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsRequestURIConditionParameters) DeepCopyInto(out *ConditionsRequestURIConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsRequestURIConditionParameters. +func (in *ConditionsRequestURIConditionParameters) DeepCopy() *ConditionsRequestURIConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsRequestURIConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsURLFileExtensionConditionInitParameters) DeepCopyInto(out *ConditionsURLFileExtensionConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsURLFileExtensionConditionInitParameters. +func (in *ConditionsURLFileExtensionConditionInitParameters) DeepCopy() *ConditionsURLFileExtensionConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsURLFileExtensionConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsURLFileExtensionConditionObservation) DeepCopyInto(out *ConditionsURLFileExtensionConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsURLFileExtensionConditionObservation. +func (in *ConditionsURLFileExtensionConditionObservation) DeepCopy() *ConditionsURLFileExtensionConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsURLFileExtensionConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsURLFileExtensionConditionParameters) DeepCopyInto(out *ConditionsURLFileExtensionConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsURLFileExtensionConditionParameters. +func (in *ConditionsURLFileExtensionConditionParameters) DeepCopy() *ConditionsURLFileExtensionConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsURLFileExtensionConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsURLPathConditionInitParameters) DeepCopyInto(out *ConditionsURLPathConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsURLPathConditionInitParameters. +func (in *ConditionsURLPathConditionInitParameters) DeepCopy() *ConditionsURLPathConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionsURLPathConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsURLPathConditionObservation) DeepCopyInto(out *ConditionsURLPathConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsURLPathConditionObservation. +func (in *ConditionsURLPathConditionObservation) DeepCopy() *ConditionsURLPathConditionObservation { + if in == nil { + return nil + } + out := new(ConditionsURLPathConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsURLPathConditionParameters) DeepCopyInto(out *ConditionsURLPathConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsURLPathConditionParameters. +func (in *ConditionsURLPathConditionParameters) DeepCopy() *ConditionsURLPathConditionParameters { + if in == nil { + return nil + } + out := new(ConditionsURLPathConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConditionInitParameters) DeepCopyInto(out *CookiesConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConditionInitParameters. +func (in *CookiesConditionInitParameters) DeepCopy() *CookiesConditionInitParameters { + if in == nil { + return nil + } + out := new(CookiesConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConditionObservation) DeepCopyInto(out *CookiesConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConditionObservation. +func (in *CookiesConditionObservation) DeepCopy() *CookiesConditionObservation { + if in == nil { + return nil + } + out := new(CookiesConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CookiesConditionParameters) DeepCopyInto(out *CookiesConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CookiesConditionParameters. +func (in *CookiesConditionParameters) DeepCopy() *CookiesConditionParameters { + if in == nil { + return nil + } + out := new(CookiesConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryRuleInitParameters) DeepCopyInto(out *DeliveryRuleInitParameters) { + *out = *in + if in.CacheExpirationAction != nil { + in, out := &in.CacheExpirationAction, &out.CacheExpirationAction + *out = new(CacheExpirationActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CacheKeyQueryStringAction != nil { + in, out := &in.CacheKeyQueryStringAction, &out.CacheKeyQueryStringAction + *out = new(CacheKeyQueryStringActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CookiesCondition != nil { + in, out := &in.CookiesCondition, &out.CookiesCondition + *out = make([]CookiesConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeviceCondition != nil { + in, out := &in.DeviceCondition, &out.DeviceCondition + *out = new(DeviceConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPVersionCondition != nil { + in, out := &in.HTTPVersionCondition, &out.HTTPVersionCondition + *out = make([]HTTPVersionConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyRequestHeaderAction != nil { + in, out := &in.ModifyRequestHeaderAction, &out.ModifyRequestHeaderAction + *out = make([]ModifyRequestHeaderActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaderAction != nil { + in, out := &in.ModifyResponseHeaderAction, &out.ModifyResponseHeaderAction + *out = make([]ModifyResponseHeaderActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PostArgCondition != nil { + in, out := &in.PostArgCondition, &out.PostArgCondition + *out = make([]PostArgConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryStringCondition != nil { + in, out := &in.QueryStringCondition, &out.QueryStringCondition + *out = make([]QueryStringConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteAddressCondition != nil { + in, out := &in.RemoteAddressCondition, &out.RemoteAddressCondition + *out = make([]RemoteAddressConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestBodyCondition != nil { + in, out := &in.RequestBodyCondition, &out.RequestBodyCondition + *out = make([]RequestBodyConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeaderCondition != nil { + in, out := &in.RequestHeaderCondition, &out.RequestHeaderCondition + *out = make([]RequestHeaderConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestMethodCondition != nil { + in, out := &in.RequestMethodCondition, &out.RequestMethodCondition + *out = new(RequestMethodConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequestSchemeCondition != nil { + in, out := &in.RequestSchemeCondition, &out.RequestSchemeCondition + *out = new(RequestSchemeConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequestURICondition != nil { + in, out := &in.RequestURICondition, &out.RequestURICondition + *out = make([]RequestURIConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileExtensionCondition != nil { + in, out := &in.URLFileExtensionCondition, &out.URLFileExtensionCondition + *out = make([]URLFileExtensionConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileNameCondition != nil { + in, out := &in.URLFileNameCondition, &out.URLFileNameCondition + *out = make([]URLFileNameConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathCondition != nil { + in, out := &in.URLPathCondition, &out.URLPathCondition + *out = make([]URLPathConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(URLRedirectActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(URLRewriteActionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryRuleInitParameters. +func (in *DeliveryRuleInitParameters) DeepCopy() *DeliveryRuleInitParameters { + if in == nil { + return nil + } + out := new(DeliveryRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryRuleObservation) DeepCopyInto(out *DeliveryRuleObservation) { + *out = *in + if in.CacheExpirationAction != nil { + in, out := &in.CacheExpirationAction, &out.CacheExpirationAction + *out = new(CacheExpirationActionObservation) + (*in).DeepCopyInto(*out) + } + if in.CacheKeyQueryStringAction != nil { + in, out := &in.CacheKeyQueryStringAction, &out.CacheKeyQueryStringAction + *out = new(CacheKeyQueryStringActionObservation) + (*in).DeepCopyInto(*out) + } + if in.CookiesCondition != nil { + in, out := &in.CookiesCondition, &out.CookiesCondition + *out = make([]CookiesConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeviceCondition != nil { + in, out := &in.DeviceCondition, &out.DeviceCondition + *out = new(DeviceConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPVersionCondition != nil { + in, out := &in.HTTPVersionCondition, &out.HTTPVersionCondition + *out = make([]HTTPVersionConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyRequestHeaderAction != nil { + in, out := &in.ModifyRequestHeaderAction, &out.ModifyRequestHeaderAction + *out = make([]ModifyRequestHeaderActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaderAction != nil { + in, out := &in.ModifyResponseHeaderAction, &out.ModifyResponseHeaderAction + *out = make([]ModifyResponseHeaderActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PostArgCondition != nil { + in, out := &in.PostArgCondition, &out.PostArgCondition + *out = make([]PostArgConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryStringCondition != nil { + in, out := &in.QueryStringCondition, &out.QueryStringCondition + *out = make([]QueryStringConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteAddressCondition != nil { + in, out := &in.RemoteAddressCondition, &out.RemoteAddressCondition + *out = make([]RemoteAddressConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestBodyCondition != nil { + in, out := &in.RequestBodyCondition, &out.RequestBodyCondition + *out = make([]RequestBodyConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeaderCondition != nil { + in, out := &in.RequestHeaderCondition, &out.RequestHeaderCondition + *out = make([]RequestHeaderConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestMethodCondition != nil { + in, out := &in.RequestMethodCondition, &out.RequestMethodCondition + *out = new(RequestMethodConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.RequestSchemeCondition != nil { + in, out := &in.RequestSchemeCondition, &out.RequestSchemeCondition + *out = new(RequestSchemeConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.RequestURICondition != nil { + in, out := &in.RequestURICondition, &out.RequestURICondition + *out = make([]RequestURIConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileExtensionCondition != nil { + in, out := &in.URLFileExtensionCondition, &out.URLFileExtensionCondition + *out = make([]URLFileExtensionConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileNameCondition != nil { + in, out := &in.URLFileNameCondition, &out.URLFileNameCondition + *out = make([]URLFileNameConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathCondition != nil { + in, out := &in.URLPathCondition, &out.URLPathCondition + *out = make([]URLPathConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(URLRedirectActionObservation) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(URLRewriteActionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryRuleObservation. +func (in *DeliveryRuleObservation) DeepCopy() *DeliveryRuleObservation { + if in == nil { + return nil + } + out := new(DeliveryRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryRuleParameters) DeepCopyInto(out *DeliveryRuleParameters) { + *out = *in + if in.CacheExpirationAction != nil { + in, out := &in.CacheExpirationAction, &out.CacheExpirationAction + *out = new(CacheExpirationActionParameters) + (*in).DeepCopyInto(*out) + } + if in.CacheKeyQueryStringAction != nil { + in, out := &in.CacheKeyQueryStringAction, &out.CacheKeyQueryStringAction + *out = new(CacheKeyQueryStringActionParameters) + (*in).DeepCopyInto(*out) + } + if in.CookiesCondition != nil { + in, out := &in.CookiesCondition, &out.CookiesCondition + *out = make([]CookiesConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeviceCondition != nil { + in, out := &in.DeviceCondition, &out.DeviceCondition + *out = new(DeviceConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPVersionCondition != nil { + in, out := &in.HTTPVersionCondition, &out.HTTPVersionCondition + *out = make([]HTTPVersionConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyRequestHeaderAction != nil { + in, out := &in.ModifyRequestHeaderAction, &out.ModifyRequestHeaderAction + *out = make([]ModifyRequestHeaderActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaderAction != nil { + in, out := &in.ModifyResponseHeaderAction, &out.ModifyResponseHeaderAction + *out = make([]ModifyResponseHeaderActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PostArgCondition != nil { + in, out := &in.PostArgCondition, &out.PostArgCondition + *out = make([]PostArgConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.QueryStringCondition != nil { + in, out := &in.QueryStringCondition, &out.QueryStringCondition + *out = make([]QueryStringConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemoteAddressCondition != nil { + in, out := &in.RemoteAddressCondition, &out.RemoteAddressCondition + *out = make([]RemoteAddressConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestBodyCondition != nil { + in, out := &in.RequestBodyCondition, &out.RequestBodyCondition + *out = make([]RequestBodyConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestHeaderCondition != nil { + in, out := &in.RequestHeaderCondition, &out.RequestHeaderCondition + *out = make([]RequestHeaderConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestMethodCondition != nil { + in, out := &in.RequestMethodCondition, &out.RequestMethodCondition + *out = new(RequestMethodConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.RequestSchemeCondition != nil { + in, out := &in.RequestSchemeCondition, &out.RequestSchemeCondition + *out = new(RequestSchemeConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.RequestURICondition != nil { + in, out := &in.RequestURICondition, &out.RequestURICondition + *out = make([]RequestURIConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileExtensionCondition != nil { + in, out := &in.URLFileExtensionCondition, &out.URLFileExtensionCondition + *out = make([]URLFileExtensionConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLFileNameCondition != nil { + in, out := &in.URLFileNameCondition, &out.URLFileNameCondition + *out = make([]URLFileNameConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathCondition != nil { + in, out := &in.URLPathCondition, &out.URLPathCondition + *out = make([]URLPathConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(URLRedirectActionParameters) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(URLRewriteActionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryRuleParameters. +func (in *DeliveryRuleParameters) DeepCopy() *DeliveryRuleParameters { + if in == nil { + return nil + } + out := new(DeliveryRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConditionInitParameters) DeepCopyInto(out *DeviceConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConditionInitParameters. +func (in *DeviceConditionInitParameters) DeepCopy() *DeviceConditionInitParameters { + if in == nil { + return nil + } + out := new(DeviceConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConditionObservation) DeepCopyInto(out *DeviceConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConditionObservation. +func (in *DeviceConditionObservation) DeepCopy() *DeviceConditionObservation { + if in == nil { + return nil + } + out := new(DeviceConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConditionParameters) DeepCopyInto(out *DeviceConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConditionParameters. +func (in *DeviceConditionParameters) DeepCopy() *DeviceConditionParameters { + if in == nil { + return nil + } + out := new(DeviceConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainInitParameters) DeepCopyInto(out *DomainInitParameters) { + *out = *in + if in.CdnFrontdoorDomainID != nil { + in, out := &in.CdnFrontdoorDomainID, &out.CdnFrontdoorDomainID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorDomainIDRef != nil { + in, out := &in.CdnFrontdoorDomainIDRef, &out.CdnFrontdoorDomainIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorDomainIDSelector != nil { + in, out := &in.CdnFrontdoorDomainIDSelector, &out.CdnFrontdoorDomainIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainInitParameters. +func (in *DomainInitParameters) DeepCopy() *DomainInitParameters { + if in == nil { + return nil + } + out := new(DomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainObservation) DeepCopyInto(out *DomainObservation) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.CdnFrontdoorDomainID != nil { + in, out := &in.CdnFrontdoorDomainID, &out.CdnFrontdoorDomainID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainObservation. +func (in *DomainObservation) DeepCopy() *DomainObservation { + if in == nil { + return nil + } + out := new(DomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainParameters) DeepCopyInto(out *DomainParameters) { + *out = *in + if in.CdnFrontdoorDomainID != nil { + in, out := &in.CdnFrontdoorDomainID, &out.CdnFrontdoorDomainID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorDomainIDRef != nil { + in, out := &in.CdnFrontdoorDomainIDRef, &out.CdnFrontdoorDomainIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorDomainIDSelector != nil { + in, out := &in.CdnFrontdoorDomainIDSelector, &out.CdnFrontdoorDomainIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainParameters. +func (in *DomainParameters) DeepCopy() *DomainParameters { + if in == nil { + return nil + } + out := new(DomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in + if in.ContentTypesToCompress != nil { + in, out := &in.ContentTypesToCompress, &out.ContentTypesToCompress + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeliveryRule != nil { + in, out := &in.DeliveryRule, &out.DeliveryRule + *out = make([]DeliveryRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoFilter != nil { + in, out := &in.GeoFilter, &out.GeoFilter + *out = make([]GeoFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalDeliveryRule != nil { + in, out := &in.GlobalDeliveryRule, &out.GlobalDeliveryRule + *out = new(GlobalDeliveryRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IsCompressionEnabled != nil { + in, out := &in.IsCompressionEnabled, &out.IsCompressionEnabled + *out = new(bool) + **out = **in + } + if in.IsHTTPAllowed != nil { + in, out := &in.IsHTTPAllowed, &out.IsHTTPAllowed + *out = new(bool) + **out = **in + } + if in.IsHTTPSAllowed != nil { + in, out := &in.IsHTTPSAllowed, &out.IsHTTPSAllowed + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OptimizationType != nil { + in, out := &in.OptimizationType, &out.OptimizationType + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginHostHeader != nil { + in, out := &in.OriginHostHeader, &out.OriginHostHeader + *out = new(string) + **out = **in + } + if in.OriginPath != nil { + in, out := &in.OriginPath, &out.OriginPath + *out = new(string) + **out = **in + } + if in.ProbePath != nil { + in, out := &in.ProbePath, &out.ProbePath + *out = new(string) + **out = **in + } + if in.QuerystringCachingBehaviour != nil { + in, out := &in.QuerystringCachingBehaviour, &out.QuerystringCachingBehaviour + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointList) DeepCopyInto(out *EndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointList. +func (in *EndpointList) DeepCopy() *EndpointList { + if in == nil { + return nil + } + out := new(EndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.ContentTypesToCompress != nil { + in, out := &in.ContentTypesToCompress, &out.ContentTypesToCompress + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeliveryRule != nil { + in, out := &in.DeliveryRule, &out.DeliveryRule + *out = make([]DeliveryRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GeoFilter != nil { + in, out := &in.GeoFilter, &out.GeoFilter + *out = make([]GeoFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalDeliveryRule != nil { + in, out := &in.GlobalDeliveryRule, &out.GlobalDeliveryRule + *out = new(GlobalDeliveryRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IsCompressionEnabled != nil { + in, out := &in.IsCompressionEnabled, &out.IsCompressionEnabled + *out = new(bool) + **out = **in + } + if in.IsHTTPAllowed != nil { + in, out := &in.IsHTTPAllowed, &out.IsHTTPAllowed + *out = new(bool) + **out = **in + } + if in.IsHTTPSAllowed != nil { + in, out := &in.IsHTTPSAllowed, &out.IsHTTPSAllowed + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OptimizationType != nil { + in, out := &in.OptimizationType, &out.OptimizationType + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginHostHeader != nil { + in, out := &in.OriginHostHeader, &out.OriginHostHeader + *out = new(string) + **out = **in + } + if in.OriginPath != nil { + in, out := &in.OriginPath, &out.OriginPath + *out = new(string) + **out = **in + } + if in.ProbePath != nil { + in, out := &in.ProbePath, &out.ProbePath + *out = new(string) + **out = **in + } + if in.ProfileName != nil { + in, out := &in.ProfileName, &out.ProfileName + *out = new(string) + **out = **in + } + if in.QuerystringCachingBehaviour != nil { + in, out := &in.QuerystringCachingBehaviour, &out.QuerystringCachingBehaviour + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in + if in.ContentTypesToCompress != nil { + in, out := &in.ContentTypesToCompress, &out.ContentTypesToCompress + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeliveryRule != nil { + in, out := &in.DeliveryRule, &out.DeliveryRule + *out = make([]DeliveryRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GeoFilter != nil { + in, out := &in.GeoFilter, &out.GeoFilter + *out = make([]GeoFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GlobalDeliveryRule != nil { + in, out := &in.GlobalDeliveryRule, &out.GlobalDeliveryRule + *out = new(GlobalDeliveryRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.IsCompressionEnabled != nil { + in, out := &in.IsCompressionEnabled, &out.IsCompressionEnabled + *out = new(bool) + **out = **in + } + if in.IsHTTPAllowed != nil { + in, out := &in.IsHTTPAllowed, &out.IsHTTPAllowed + *out = new(bool) + **out = **in + } + if in.IsHTTPSAllowed != nil { + in, out := &in.IsHTTPSAllowed, &out.IsHTTPSAllowed + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OptimizationType != nil { + in, out := &in.OptimizationType, &out.OptimizationType + *out = new(string) + **out = **in + } + if in.Origin != nil { + in, out := &in.Origin, &out.Origin + *out = make([]OriginParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OriginHostHeader != nil { + in, out := &in.OriginHostHeader, &out.OriginHostHeader + *out = new(string) + **out = **in + } + if in.OriginPath != nil { + in, out := &in.OriginPath, &out.OriginPath + *out = new(string) + **out = **in + } + if in.ProbePath != nil { + in, out := &in.ProbePath, &out.ProbePath + *out = new(string) + **out = **in + } + if in.ProfileName != nil { + in, out := &in.ProfileName, &out.ProfileName + *out = new(string) + **out = **in + } + if in.ProfileNameRef != nil { + in, out := &in.ProfileNameRef, &out.ProfileNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProfileNameSelector != nil { + in, out := &in.ProfileNameSelector, &out.ProfileNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.QuerystringCachingBehaviour != nil { + in, out := &in.QuerystringCachingBehaviour, &out.QuerystringCachingBehaviour + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSpec) DeepCopyInto(out *EndpointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSpec. +func (in *EndpointSpec) DeepCopy() *EndpointSpec { + if in == nil { + return nil + } + out := new(EndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus. +func (in *EndpointStatus) DeepCopy() *EndpointStatus { + if in == nil { + return nil + } + out := new(EndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallInitParameters) DeepCopyInto(out *FirewallInitParameters) { + *out = *in + if in.Association != nil { + in, out := &in.Association, &out.Association + *out = new(AssociationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorFirewallPolicyID != nil { + in, out := &in.CdnFrontdoorFirewallPolicyID, &out.CdnFrontdoorFirewallPolicyID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorFirewallPolicyIDRef != nil { + in, out := &in.CdnFrontdoorFirewallPolicyIDRef, &out.CdnFrontdoorFirewallPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorFirewallPolicyIDSelector != nil { + in, out := &in.CdnFrontdoorFirewallPolicyIDSelector, &out.CdnFrontdoorFirewallPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallInitParameters. +func (in *FirewallInitParameters) DeepCopy() *FirewallInitParameters { + if in == nil { + return nil + } + out := new(FirewallInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallObservation) DeepCopyInto(out *FirewallObservation) { + *out = *in + if in.Association != nil { + in, out := &in.Association, &out.Association + *out = new(AssociationObservation) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorFirewallPolicyID != nil { + in, out := &in.CdnFrontdoorFirewallPolicyID, &out.CdnFrontdoorFirewallPolicyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallObservation. +func (in *FirewallObservation) DeepCopy() *FirewallObservation { + if in == nil { + return nil + } + out := new(FirewallObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallParameters) DeepCopyInto(out *FirewallParameters) { + *out = *in + if in.Association != nil { + in, out := &in.Association, &out.Association + *out = new(AssociationParameters) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorFirewallPolicyID != nil { + in, out := &in.CdnFrontdoorFirewallPolicyID, &out.CdnFrontdoorFirewallPolicyID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorFirewallPolicyIDRef != nil { + in, out := &in.CdnFrontdoorFirewallPolicyIDRef, &out.CdnFrontdoorFirewallPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorFirewallPolicyIDSelector != nil { + in, out := &in.CdnFrontdoorFirewallPolicyIDSelector, &out.CdnFrontdoorFirewallPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallParameters. +func (in *FirewallParameters) DeepCopy() *FirewallParameters { + if in == nil { + return nil + } + out := new(FirewallParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomDomain) DeepCopyInto(out *FrontdoorCustomDomain) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomDomain. +func (in *FrontdoorCustomDomain) DeepCopy() *FrontdoorCustomDomain { + if in == nil { + return nil + } + out := new(FrontdoorCustomDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorCustomDomain) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomDomainInitParameters) DeepCopyInto(out *FrontdoorCustomDomainInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.DNSZoneIDRef != nil { + in, out := &in.DNSZoneIDRef, &out.DNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DNSZoneIDSelector != nil { + in, out := &in.DNSZoneIDSelector, &out.DNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomDomainInitParameters. +func (in *FrontdoorCustomDomainInitParameters) DeepCopy() *FrontdoorCustomDomainInitParameters { + if in == nil { + return nil + } + out := new(FrontdoorCustomDomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomDomainList) DeepCopyInto(out *FrontdoorCustomDomainList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontdoorCustomDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomDomainList. +func (in *FrontdoorCustomDomainList) DeepCopy() *FrontdoorCustomDomainList { + if in == nil { + return nil + } + out := new(FrontdoorCustomDomainList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorCustomDomainList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomDomainObservation) DeepCopyInto(out *FrontdoorCustomDomainObservation) { + *out = *in + if in.CdnFrontdoorProfileID != nil { + in, out := &in.CdnFrontdoorProfileID, &out.CdnFrontdoorProfileID + *out = new(string) + **out = **in + } + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSObservation) + (*in).DeepCopyInto(*out) + } + if in.ValidationToken != nil { + in, out := &in.ValidationToken, &out.ValidationToken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomDomainObservation. +func (in *FrontdoorCustomDomainObservation) DeepCopy() *FrontdoorCustomDomainObservation { + if in == nil { + return nil + } + out := new(FrontdoorCustomDomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomDomainParameters) DeepCopyInto(out *FrontdoorCustomDomainParameters) { + *out = *in + if in.CdnFrontdoorProfileID != nil { + in, out := &in.CdnFrontdoorProfileID, &out.CdnFrontdoorProfileID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorProfileIDRef != nil { + in, out := &in.CdnFrontdoorProfileIDRef, &out.CdnFrontdoorProfileIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorProfileIDSelector != nil { + in, out := &in.CdnFrontdoorProfileIDSelector, &out.CdnFrontdoorProfileIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.DNSZoneIDRef != nil { + in, out := &in.DNSZoneIDRef, &out.DNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DNSZoneIDSelector != nil { + in, out := &in.DNSZoneIDSelector, &out.DNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomDomainParameters. +func (in *FrontdoorCustomDomainParameters) DeepCopy() *FrontdoorCustomDomainParameters { + if in == nil { + return nil + } + out := new(FrontdoorCustomDomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomDomainSpec) DeepCopyInto(out *FrontdoorCustomDomainSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomDomainSpec. +func (in *FrontdoorCustomDomainSpec) DeepCopy() *FrontdoorCustomDomainSpec { + if in == nil { + return nil + } + out := new(FrontdoorCustomDomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomDomainStatus) DeepCopyInto(out *FrontdoorCustomDomainStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomDomainStatus. +func (in *FrontdoorCustomDomainStatus) DeepCopy() *FrontdoorCustomDomainStatus { + if in == nil { + return nil + } + out := new(FrontdoorCustomDomainStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOrigin) DeepCopyInto(out *FrontdoorOrigin) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOrigin. +func (in *FrontdoorOrigin) DeepCopy() *FrontdoorOrigin { + if in == nil { + return nil + } + out := new(FrontdoorOrigin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorOrigin) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginGroup) DeepCopyInto(out *FrontdoorOriginGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginGroup. +func (in *FrontdoorOriginGroup) DeepCopy() *FrontdoorOriginGroup { + if in == nil { + return nil + } + out := new(FrontdoorOriginGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorOriginGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginGroupInitParameters) DeepCopyInto(out *FrontdoorOriginGroupInitParameters) { + *out = *in + if in.HealthProbe != nil { + in, out := &in.HealthProbe, &out.HealthProbe + *out = new(HealthProbeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancing != nil { + in, out := &in.LoadBalancing, &out.LoadBalancing + *out = new(LoadBalancingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RestoreTrafficTimeToHealedOrNewEndpointInMinutes != nil { + in, out := &in.RestoreTrafficTimeToHealedOrNewEndpointInMinutes, &out.RestoreTrafficTimeToHealedOrNewEndpointInMinutes + *out = new(float64) + **out = **in + } + if in.SessionAffinityEnabled != nil { + in, out := &in.SessionAffinityEnabled, &out.SessionAffinityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginGroupInitParameters. +func (in *FrontdoorOriginGroupInitParameters) DeepCopy() *FrontdoorOriginGroupInitParameters { + if in == nil { + return nil + } + out := new(FrontdoorOriginGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginGroupList) DeepCopyInto(out *FrontdoorOriginGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontdoorOriginGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginGroupList. +func (in *FrontdoorOriginGroupList) DeepCopy() *FrontdoorOriginGroupList { + if in == nil { + return nil + } + out := new(FrontdoorOriginGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorOriginGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginGroupObservation) DeepCopyInto(out *FrontdoorOriginGroupObservation) { + *out = *in + if in.CdnFrontdoorProfileID != nil { + in, out := &in.CdnFrontdoorProfileID, &out.CdnFrontdoorProfileID + *out = new(string) + **out = **in + } + if in.HealthProbe != nil { + in, out := &in.HealthProbe, &out.HealthProbe + *out = new(HealthProbeObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoadBalancing != nil { + in, out := &in.LoadBalancing, &out.LoadBalancing + *out = new(LoadBalancingObservation) + (*in).DeepCopyInto(*out) + } + if in.RestoreTrafficTimeToHealedOrNewEndpointInMinutes != nil { + in, out := &in.RestoreTrafficTimeToHealedOrNewEndpointInMinutes, &out.RestoreTrafficTimeToHealedOrNewEndpointInMinutes + *out = new(float64) + **out = **in + } + if in.SessionAffinityEnabled != nil { + in, out := &in.SessionAffinityEnabled, &out.SessionAffinityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginGroupObservation. +func (in *FrontdoorOriginGroupObservation) DeepCopy() *FrontdoorOriginGroupObservation { + if in == nil { + return nil + } + out := new(FrontdoorOriginGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginGroupParameters) DeepCopyInto(out *FrontdoorOriginGroupParameters) { + *out = *in + if in.CdnFrontdoorProfileID != nil { + in, out := &in.CdnFrontdoorProfileID, &out.CdnFrontdoorProfileID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorProfileIDRef != nil { + in, out := &in.CdnFrontdoorProfileIDRef, &out.CdnFrontdoorProfileIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorProfileIDSelector != nil { + in, out := &in.CdnFrontdoorProfileIDSelector, &out.CdnFrontdoorProfileIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HealthProbe != nil { + in, out := &in.HealthProbe, &out.HealthProbe + *out = new(HealthProbeParameters) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancing != nil { + in, out := &in.LoadBalancing, &out.LoadBalancing + *out = new(LoadBalancingParameters) + (*in).DeepCopyInto(*out) + } + if in.RestoreTrafficTimeToHealedOrNewEndpointInMinutes != nil { + in, out := &in.RestoreTrafficTimeToHealedOrNewEndpointInMinutes, &out.RestoreTrafficTimeToHealedOrNewEndpointInMinutes + *out = new(float64) + **out = **in + } + if in.SessionAffinityEnabled != nil { + in, out := &in.SessionAffinityEnabled, &out.SessionAffinityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginGroupParameters. +func (in *FrontdoorOriginGroupParameters) DeepCopy() *FrontdoorOriginGroupParameters { + if in == nil { + return nil + } + out := new(FrontdoorOriginGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginGroupSpec) DeepCopyInto(out *FrontdoorOriginGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginGroupSpec. +func (in *FrontdoorOriginGroupSpec) DeepCopy() *FrontdoorOriginGroupSpec { + if in == nil { + return nil + } + out := new(FrontdoorOriginGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginGroupStatus) DeepCopyInto(out *FrontdoorOriginGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginGroupStatus. +func (in *FrontdoorOriginGroupStatus) DeepCopy() *FrontdoorOriginGroupStatus { + if in == nil { + return nil + } + out := new(FrontdoorOriginGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginInitParameters) DeepCopyInto(out *FrontdoorOriginInitParameters) { + *out = *in + if in.CertificateNameCheckEnabled != nil { + in, out := &in.CertificateNameCheckEnabled, &out.CertificateNameCheckEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HealthProbesEnabled != nil { + in, out := &in.HealthProbesEnabled, &out.HealthProbesEnabled + *out = new(bool) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.HostNameRef != nil { + in, out := &in.HostNameRef, &out.HostNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HostNameSelector != nil { + in, out := &in.HostNameSelector, &out.HostNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OriginHostHeader != nil { + in, out := &in.OriginHostHeader, &out.OriginHostHeader + *out = new(string) + **out = **in + } + if in.OriginHostHeaderRef != nil { + in, out := &in.OriginHostHeaderRef, &out.OriginHostHeaderRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginHostHeaderSelector != nil { + in, out := &in.OriginHostHeaderSelector, &out.OriginHostHeaderSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.PrivateLink != nil { + in, out := &in.PrivateLink, &out.PrivateLink + *out = new(PrivateLinkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginInitParameters. +func (in *FrontdoorOriginInitParameters) DeepCopy() *FrontdoorOriginInitParameters { + if in == nil { + return nil + } + out := new(FrontdoorOriginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginList) DeepCopyInto(out *FrontdoorOriginList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontdoorOrigin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginList. +func (in *FrontdoorOriginList) DeepCopy() *FrontdoorOriginList { + if in == nil { + return nil + } + out := new(FrontdoorOriginList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorOriginList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginObservation) DeepCopyInto(out *FrontdoorOriginObservation) { + *out = *in + if in.CdnFrontdoorOriginGroupID != nil { + in, out := &in.CdnFrontdoorOriginGroupID, &out.CdnFrontdoorOriginGroupID + *out = new(string) + **out = **in + } + if in.CertificateNameCheckEnabled != nil { + in, out := &in.CertificateNameCheckEnabled, &out.CertificateNameCheckEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HealthProbesEnabled != nil { + in, out := &in.HealthProbesEnabled, &out.HealthProbesEnabled + *out = new(bool) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.OriginHostHeader != nil { + in, out := &in.OriginHostHeader, &out.OriginHostHeader + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.PrivateLink != nil { + in, out := &in.PrivateLink, &out.PrivateLink + *out = new(PrivateLinkObservation) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginObservation. +func (in *FrontdoorOriginObservation) DeepCopy() *FrontdoorOriginObservation { + if in == nil { + return nil + } + out := new(FrontdoorOriginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginParameters) DeepCopyInto(out *FrontdoorOriginParameters) { + *out = *in + if in.CdnFrontdoorOriginGroupID != nil { + in, out := &in.CdnFrontdoorOriginGroupID, &out.CdnFrontdoorOriginGroupID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupIDRef != nil { + in, out := &in.CdnFrontdoorOriginGroupIDRef, &out.CdnFrontdoorOriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginGroupIDSelector != nil { + in, out := &in.CdnFrontdoorOriginGroupIDSelector, &out.CdnFrontdoorOriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CertificateNameCheckEnabled != nil { + in, out := &in.CertificateNameCheckEnabled, &out.CertificateNameCheckEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HealthProbesEnabled != nil { + in, out := &in.HealthProbesEnabled, &out.HealthProbesEnabled + *out = new(bool) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.HostNameRef != nil { + in, out := &in.HostNameRef, &out.HostNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.HostNameSelector != nil { + in, out := &in.HostNameSelector, &out.HostNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OriginHostHeader != nil { + in, out := &in.OriginHostHeader, &out.OriginHostHeader + *out = new(string) + **out = **in + } + if in.OriginHostHeaderRef != nil { + in, out := &in.OriginHostHeaderRef, &out.OriginHostHeaderRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OriginHostHeaderSelector != nil { + in, out := &in.OriginHostHeaderSelector, &out.OriginHostHeaderSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.PrivateLink != nil { + in, out := &in.PrivateLink, &out.PrivateLink + *out = new(PrivateLinkParameters) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginParameters. +func (in *FrontdoorOriginParameters) DeepCopy() *FrontdoorOriginParameters { + if in == nil { + return nil + } + out := new(FrontdoorOriginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginSpec) DeepCopyInto(out *FrontdoorOriginSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginSpec. +func (in *FrontdoorOriginSpec) DeepCopy() *FrontdoorOriginSpec { + if in == nil { + return nil + } + out := new(FrontdoorOriginSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorOriginStatus) DeepCopyInto(out *FrontdoorOriginStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorOriginStatus. +func (in *FrontdoorOriginStatus) DeepCopy() *FrontdoorOriginStatus { + if in == nil { + return nil + } + out := new(FrontdoorOriginStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRoute) DeepCopyInto(out *FrontdoorRoute) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRoute. +func (in *FrontdoorRoute) DeepCopy() *FrontdoorRoute { + if in == nil { + return nil + } + out := new(FrontdoorRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorRoute) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRouteInitParameters) DeepCopyInto(out *FrontdoorRouteInitParameters) { + *out = *in + if in.Cache != nil { + in, out := &in.Cache, &out.Cache + *out = new(CacheInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorCustomDomainIds != nil { + in, out := &in.CdnFrontdoorCustomDomainIds, &out.CdnFrontdoorCustomDomainIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CdnFrontdoorCustomDomainIdsRefs != nil { + in, out := &in.CdnFrontdoorCustomDomainIdsRefs, &out.CdnFrontdoorCustomDomainIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CdnFrontdoorCustomDomainIdsSelector != nil { + in, out := &in.CdnFrontdoorCustomDomainIdsSelector, &out.CdnFrontdoorCustomDomainIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginGroupID != nil { + in, out := &in.CdnFrontdoorOriginGroupID, &out.CdnFrontdoorOriginGroupID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupIDRef != nil { + in, out := &in.CdnFrontdoorOriginGroupIDRef, &out.CdnFrontdoorOriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginGroupIDSelector != nil { + in, out := &in.CdnFrontdoorOriginGroupIDSelector, &out.CdnFrontdoorOriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginIds != nil { + in, out := &in.CdnFrontdoorOriginIds, &out.CdnFrontdoorOriginIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CdnFrontdoorOriginIdsRefs != nil { + in, out := &in.CdnFrontdoorOriginIdsRefs, &out.CdnFrontdoorOriginIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CdnFrontdoorOriginIdsSelector != nil { + in, out := &in.CdnFrontdoorOriginIdsSelector, &out.CdnFrontdoorOriginIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginPath != nil { + in, out := &in.CdnFrontdoorOriginPath, &out.CdnFrontdoorOriginPath + *out = new(string) + **out = **in + } + if in.CdnFrontdoorRuleSetIds != nil { + in, out := &in.CdnFrontdoorRuleSetIds, &out.CdnFrontdoorRuleSetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CdnFrontdoorRuleSetIdsRefs != nil { + in, out := &in.CdnFrontdoorRuleSetIdsRefs, &out.CdnFrontdoorRuleSetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CdnFrontdoorRuleSetIdsSelector != nil { + in, out := &in.CdnFrontdoorRuleSetIdsSelector, &out.CdnFrontdoorRuleSetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } + if in.HTTPSRedirectEnabled != nil { + in, out := &in.HTTPSRedirectEnabled, &out.HTTPSRedirectEnabled + *out = new(bool) + **out = **in + } + if in.LinkToDefaultDomain != nil { + in, out := &in.LinkToDefaultDomain, &out.LinkToDefaultDomain + *out = new(bool) + **out = **in + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportedProtocols != nil { + in, out := &in.SupportedProtocols, &out.SupportedProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRouteInitParameters. +func (in *FrontdoorRouteInitParameters) DeepCopy() *FrontdoorRouteInitParameters { + if in == nil { + return nil + } + out := new(FrontdoorRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRouteList) DeepCopyInto(out *FrontdoorRouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontdoorRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRouteList. +func (in *FrontdoorRouteList) DeepCopy() *FrontdoorRouteList { + if in == nil { + return nil + } + out := new(FrontdoorRouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorRouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRouteObservation) DeepCopyInto(out *FrontdoorRouteObservation) { + *out = *in + if in.Cache != nil { + in, out := &in.Cache, &out.Cache + *out = new(CacheObservation) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorCustomDomainIds != nil { + in, out := &in.CdnFrontdoorCustomDomainIds, &out.CdnFrontdoorCustomDomainIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CdnFrontdoorEndpointID != nil { + in, out := &in.CdnFrontdoorEndpointID, &out.CdnFrontdoorEndpointID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupID != nil { + in, out := &in.CdnFrontdoorOriginGroupID, &out.CdnFrontdoorOriginGroupID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginIds != nil { + in, out := &in.CdnFrontdoorOriginIds, &out.CdnFrontdoorOriginIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CdnFrontdoorOriginPath != nil { + in, out := &in.CdnFrontdoorOriginPath, &out.CdnFrontdoorOriginPath + *out = new(string) + **out = **in + } + if in.CdnFrontdoorRuleSetIds != nil { + in, out := &in.CdnFrontdoorRuleSetIds, &out.CdnFrontdoorRuleSetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } + if in.HTTPSRedirectEnabled != nil { + in, out := &in.HTTPSRedirectEnabled, &out.HTTPSRedirectEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LinkToDefaultDomain != nil { + in, out := &in.LinkToDefaultDomain, &out.LinkToDefaultDomain + *out = new(bool) + **out = **in + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportedProtocols != nil { + in, out := &in.SupportedProtocols, &out.SupportedProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRouteObservation. +func (in *FrontdoorRouteObservation) DeepCopy() *FrontdoorRouteObservation { + if in == nil { + return nil + } + out := new(FrontdoorRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRouteParameters) DeepCopyInto(out *FrontdoorRouteParameters) { + *out = *in + if in.Cache != nil { + in, out := &in.Cache, &out.Cache + *out = new(CacheParameters) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorCustomDomainIds != nil { + in, out := &in.CdnFrontdoorCustomDomainIds, &out.CdnFrontdoorCustomDomainIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CdnFrontdoorCustomDomainIdsRefs != nil { + in, out := &in.CdnFrontdoorCustomDomainIdsRefs, &out.CdnFrontdoorCustomDomainIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CdnFrontdoorCustomDomainIdsSelector != nil { + in, out := &in.CdnFrontdoorCustomDomainIdsSelector, &out.CdnFrontdoorCustomDomainIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorEndpointID != nil { + in, out := &in.CdnFrontdoorEndpointID, &out.CdnFrontdoorEndpointID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorEndpointIDRef != nil { + in, out := &in.CdnFrontdoorEndpointIDRef, &out.CdnFrontdoorEndpointIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorEndpointIDSelector != nil { + in, out := &in.CdnFrontdoorEndpointIDSelector, &out.CdnFrontdoorEndpointIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginGroupID != nil { + in, out := &in.CdnFrontdoorOriginGroupID, &out.CdnFrontdoorOriginGroupID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupIDRef != nil { + in, out := &in.CdnFrontdoorOriginGroupIDRef, &out.CdnFrontdoorOriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginGroupIDSelector != nil { + in, out := &in.CdnFrontdoorOriginGroupIDSelector, &out.CdnFrontdoorOriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginIds != nil { + in, out := &in.CdnFrontdoorOriginIds, &out.CdnFrontdoorOriginIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CdnFrontdoorOriginIdsRefs != nil { + in, out := &in.CdnFrontdoorOriginIdsRefs, &out.CdnFrontdoorOriginIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CdnFrontdoorOriginIdsSelector != nil { + in, out := &in.CdnFrontdoorOriginIdsSelector, &out.CdnFrontdoorOriginIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginPath != nil { + in, out := &in.CdnFrontdoorOriginPath, &out.CdnFrontdoorOriginPath + *out = new(string) + **out = **in + } + if in.CdnFrontdoorRuleSetIds != nil { + in, out := &in.CdnFrontdoorRuleSetIds, &out.CdnFrontdoorRuleSetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CdnFrontdoorRuleSetIdsRefs != nil { + in, out := &in.CdnFrontdoorRuleSetIdsRefs, &out.CdnFrontdoorRuleSetIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CdnFrontdoorRuleSetIdsSelector != nil { + in, out := &in.CdnFrontdoorRuleSetIdsSelector, &out.CdnFrontdoorRuleSetIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } + if in.HTTPSRedirectEnabled != nil { + in, out := &in.HTTPSRedirectEnabled, &out.HTTPSRedirectEnabled + *out = new(bool) + **out = **in + } + if in.LinkToDefaultDomain != nil { + in, out := &in.LinkToDefaultDomain, &out.LinkToDefaultDomain + *out = new(bool) + **out = **in + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportedProtocols != nil { + in, out := &in.SupportedProtocols, &out.SupportedProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRouteParameters. +func (in *FrontdoorRouteParameters) DeepCopy() *FrontdoorRouteParameters { + if in == nil { + return nil + } + out := new(FrontdoorRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRouteSpec) DeepCopyInto(out *FrontdoorRouteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRouteSpec. +func (in *FrontdoorRouteSpec) DeepCopy() *FrontdoorRouteSpec { + if in == nil { + return nil + } + out := new(FrontdoorRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRouteStatus) DeepCopyInto(out *FrontdoorRouteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRouteStatus. +func (in *FrontdoorRouteStatus) DeepCopy() *FrontdoorRouteStatus { + if in == nil { + return nil + } + out := new(FrontdoorRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRule) DeepCopyInto(out *FrontdoorRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRule. +func (in *FrontdoorRule) DeepCopy() *FrontdoorRule { + if in == nil { + return nil + } + out := new(FrontdoorRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRuleInitParameters) DeepCopyInto(out *FrontdoorRuleInitParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BehaviorOnMatch != nil { + in, out := &in.BehaviorOnMatch, &out.BehaviorOnMatch + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = new(ConditionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRuleInitParameters. +func (in *FrontdoorRuleInitParameters) DeepCopy() *FrontdoorRuleInitParameters { + if in == nil { + return nil + } + out := new(FrontdoorRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRuleList) DeepCopyInto(out *FrontdoorRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontdoorRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRuleList. +func (in *FrontdoorRuleList) DeepCopy() *FrontdoorRuleList { + if in == nil { + return nil + } + out := new(FrontdoorRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRuleObservation) DeepCopyInto(out *FrontdoorRuleObservation) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsObservation) + (*in).DeepCopyInto(*out) + } + if in.BehaviorOnMatch != nil { + in, out := &in.BehaviorOnMatch, &out.BehaviorOnMatch + *out = new(string) + **out = **in + } + if in.CdnFrontdoorRuleSetID != nil { + in, out := &in.CdnFrontdoorRuleSetID, &out.CdnFrontdoorRuleSetID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorRuleSetName != nil { + in, out := &in.CdnFrontdoorRuleSetName, &out.CdnFrontdoorRuleSetName + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = new(ConditionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRuleObservation. +func (in *FrontdoorRuleObservation) DeepCopy() *FrontdoorRuleObservation { + if in == nil { + return nil + } + out := new(FrontdoorRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRuleParameters) DeepCopyInto(out *FrontdoorRuleParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsParameters) + (*in).DeepCopyInto(*out) + } + if in.BehaviorOnMatch != nil { + in, out := &in.BehaviorOnMatch, &out.BehaviorOnMatch + *out = new(string) + **out = **in + } + if in.CdnFrontdoorRuleSetID != nil { + in, out := &in.CdnFrontdoorRuleSetID, &out.CdnFrontdoorRuleSetID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorRuleSetIDRef != nil { + in, out := &in.CdnFrontdoorRuleSetIDRef, &out.CdnFrontdoorRuleSetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorRuleSetIDSelector != nil { + in, out := &in.CdnFrontdoorRuleSetIDSelector, &out.CdnFrontdoorRuleSetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = new(ConditionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRuleParameters. +func (in *FrontdoorRuleParameters) DeepCopy() *FrontdoorRuleParameters { + if in == nil { + return nil + } + out := new(FrontdoorRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRuleSpec) DeepCopyInto(out *FrontdoorRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRuleSpec. +func (in *FrontdoorRuleSpec) DeepCopy() *FrontdoorRuleSpec { + if in == nil { + return nil + } + out := new(FrontdoorRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRuleStatus) DeepCopyInto(out *FrontdoorRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRuleStatus. +func (in *FrontdoorRuleStatus) DeepCopy() *FrontdoorRuleStatus { + if in == nil { + return nil + } + out := new(FrontdoorRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorSecurityPolicy) DeepCopyInto(out *FrontdoorSecurityPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorSecurityPolicy. +func (in *FrontdoorSecurityPolicy) DeepCopy() *FrontdoorSecurityPolicy { + if in == nil { + return nil + } + out := new(FrontdoorSecurityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorSecurityPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorSecurityPolicyInitParameters) DeepCopyInto(out *FrontdoorSecurityPolicyInitParameters) { + *out = *in + if in.SecurityPolicies != nil { + in, out := &in.SecurityPolicies, &out.SecurityPolicies + *out = new(SecurityPoliciesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorSecurityPolicyInitParameters. +func (in *FrontdoorSecurityPolicyInitParameters) DeepCopy() *FrontdoorSecurityPolicyInitParameters { + if in == nil { + return nil + } + out := new(FrontdoorSecurityPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorSecurityPolicyList) DeepCopyInto(out *FrontdoorSecurityPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontdoorSecurityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorSecurityPolicyList. +func (in *FrontdoorSecurityPolicyList) DeepCopy() *FrontdoorSecurityPolicyList { + if in == nil { + return nil + } + out := new(FrontdoorSecurityPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorSecurityPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorSecurityPolicyObservation) DeepCopyInto(out *FrontdoorSecurityPolicyObservation) { + *out = *in + if in.CdnFrontdoorProfileID != nil { + in, out := &in.CdnFrontdoorProfileID, &out.CdnFrontdoorProfileID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SecurityPolicies != nil { + in, out := &in.SecurityPolicies, &out.SecurityPolicies + *out = new(SecurityPoliciesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorSecurityPolicyObservation. +func (in *FrontdoorSecurityPolicyObservation) DeepCopy() *FrontdoorSecurityPolicyObservation { + if in == nil { + return nil + } + out := new(FrontdoorSecurityPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorSecurityPolicyParameters) DeepCopyInto(out *FrontdoorSecurityPolicyParameters) { + *out = *in + if in.CdnFrontdoorProfileID != nil { + in, out := &in.CdnFrontdoorProfileID, &out.CdnFrontdoorProfileID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorProfileIDRef != nil { + in, out := &in.CdnFrontdoorProfileIDRef, &out.CdnFrontdoorProfileIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorProfileIDSelector != nil { + in, out := &in.CdnFrontdoorProfileIDSelector, &out.CdnFrontdoorProfileIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityPolicies != nil { + in, out := &in.SecurityPolicies, &out.SecurityPolicies + *out = new(SecurityPoliciesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorSecurityPolicyParameters. +func (in *FrontdoorSecurityPolicyParameters) DeepCopy() *FrontdoorSecurityPolicyParameters { + if in == nil { + return nil + } + out := new(FrontdoorSecurityPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorSecurityPolicySpec) DeepCopyInto(out *FrontdoorSecurityPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorSecurityPolicySpec. +func (in *FrontdoorSecurityPolicySpec) DeepCopy() *FrontdoorSecurityPolicySpec { + if in == nil { + return nil + } + out := new(FrontdoorSecurityPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorSecurityPolicyStatus) DeepCopyInto(out *FrontdoorSecurityPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorSecurityPolicyStatus. +func (in *FrontdoorSecurityPolicyStatus) DeepCopy() *FrontdoorSecurityPolicyStatus { + if in == nil { + return nil + } + out := new(FrontdoorSecurityPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoFilterInitParameters) DeepCopyInto(out *GeoFilterInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.CountryCodes != nil { + in, out := &in.CountryCodes, &out.CountryCodes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RelativePath != nil { + in, out := &in.RelativePath, &out.RelativePath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoFilterInitParameters. +func (in *GeoFilterInitParameters) DeepCopy() *GeoFilterInitParameters { + if in == nil { + return nil + } + out := new(GeoFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoFilterObservation) DeepCopyInto(out *GeoFilterObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.CountryCodes != nil { + in, out := &in.CountryCodes, &out.CountryCodes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RelativePath != nil { + in, out := &in.RelativePath, &out.RelativePath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoFilterObservation. +func (in *GeoFilterObservation) DeepCopy() *GeoFilterObservation { + if in == nil { + return nil + } + out := new(GeoFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoFilterParameters) DeepCopyInto(out *GeoFilterParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.CountryCodes != nil { + in, out := &in.CountryCodes, &out.CountryCodes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RelativePath != nil { + in, out := &in.RelativePath, &out.RelativePath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoFilterParameters. +func (in *GeoFilterParameters) DeepCopy() *GeoFilterParameters { + if in == nil { + return nil + } + out := new(GeoFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleCacheExpirationActionInitParameters) DeepCopyInto(out *GlobalDeliveryRuleCacheExpirationActionInitParameters) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleCacheExpirationActionInitParameters. +func (in *GlobalDeliveryRuleCacheExpirationActionInitParameters) DeepCopy() *GlobalDeliveryRuleCacheExpirationActionInitParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleCacheExpirationActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleCacheExpirationActionObservation) DeepCopyInto(out *GlobalDeliveryRuleCacheExpirationActionObservation) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleCacheExpirationActionObservation. +func (in *GlobalDeliveryRuleCacheExpirationActionObservation) DeepCopy() *GlobalDeliveryRuleCacheExpirationActionObservation { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleCacheExpirationActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleCacheExpirationActionParameters) DeepCopyInto(out *GlobalDeliveryRuleCacheExpirationActionParameters) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleCacheExpirationActionParameters. +func (in *GlobalDeliveryRuleCacheExpirationActionParameters) DeepCopy() *GlobalDeliveryRuleCacheExpirationActionParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleCacheExpirationActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters) DeepCopyInto(out *GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters. +func (in *GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters) DeepCopy() *GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleCacheKeyQueryStringActionObservation) DeepCopyInto(out *GlobalDeliveryRuleCacheKeyQueryStringActionObservation) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleCacheKeyQueryStringActionObservation. +func (in *GlobalDeliveryRuleCacheKeyQueryStringActionObservation) DeepCopy() *GlobalDeliveryRuleCacheKeyQueryStringActionObservation { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleCacheKeyQueryStringActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleCacheKeyQueryStringActionParameters) DeepCopyInto(out *GlobalDeliveryRuleCacheKeyQueryStringActionParameters) { + *out = *in + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleCacheKeyQueryStringActionParameters. +func (in *GlobalDeliveryRuleCacheKeyQueryStringActionParameters) DeepCopy() *GlobalDeliveryRuleCacheKeyQueryStringActionParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleCacheKeyQueryStringActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleInitParameters) DeepCopyInto(out *GlobalDeliveryRuleInitParameters) { + *out = *in + if in.CacheExpirationAction != nil { + in, out := &in.CacheExpirationAction, &out.CacheExpirationAction + *out = new(GlobalDeliveryRuleCacheExpirationActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CacheKeyQueryStringAction != nil { + in, out := &in.CacheKeyQueryStringAction, &out.CacheKeyQueryStringAction + *out = new(GlobalDeliveryRuleCacheKeyQueryStringActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ModifyRequestHeaderAction != nil { + in, out := &in.ModifyRequestHeaderAction, &out.ModifyRequestHeaderAction + *out = make([]GlobalDeliveryRuleModifyRequestHeaderActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaderAction != nil { + in, out := &in.ModifyResponseHeaderAction, &out.ModifyResponseHeaderAction + *out = make([]GlobalDeliveryRuleModifyResponseHeaderActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(GlobalDeliveryRuleURLRedirectActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(GlobalDeliveryRuleURLRewriteActionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleInitParameters. +func (in *GlobalDeliveryRuleInitParameters) DeepCopy() *GlobalDeliveryRuleInitParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleModifyRequestHeaderActionInitParameters) DeepCopyInto(out *GlobalDeliveryRuleModifyRequestHeaderActionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleModifyRequestHeaderActionInitParameters. +func (in *GlobalDeliveryRuleModifyRequestHeaderActionInitParameters) DeepCopy() *GlobalDeliveryRuleModifyRequestHeaderActionInitParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleModifyRequestHeaderActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleModifyRequestHeaderActionObservation) DeepCopyInto(out *GlobalDeliveryRuleModifyRequestHeaderActionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleModifyRequestHeaderActionObservation. +func (in *GlobalDeliveryRuleModifyRequestHeaderActionObservation) DeepCopy() *GlobalDeliveryRuleModifyRequestHeaderActionObservation { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleModifyRequestHeaderActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleModifyRequestHeaderActionParameters) DeepCopyInto(out *GlobalDeliveryRuleModifyRequestHeaderActionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleModifyRequestHeaderActionParameters. +func (in *GlobalDeliveryRuleModifyRequestHeaderActionParameters) DeepCopy() *GlobalDeliveryRuleModifyRequestHeaderActionParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleModifyRequestHeaderActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleModifyResponseHeaderActionInitParameters) DeepCopyInto(out *GlobalDeliveryRuleModifyResponseHeaderActionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleModifyResponseHeaderActionInitParameters. +func (in *GlobalDeliveryRuleModifyResponseHeaderActionInitParameters) DeepCopy() *GlobalDeliveryRuleModifyResponseHeaderActionInitParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleModifyResponseHeaderActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleModifyResponseHeaderActionObservation) DeepCopyInto(out *GlobalDeliveryRuleModifyResponseHeaderActionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleModifyResponseHeaderActionObservation. +func (in *GlobalDeliveryRuleModifyResponseHeaderActionObservation) DeepCopy() *GlobalDeliveryRuleModifyResponseHeaderActionObservation { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleModifyResponseHeaderActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleModifyResponseHeaderActionParameters) DeepCopyInto(out *GlobalDeliveryRuleModifyResponseHeaderActionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleModifyResponseHeaderActionParameters. +func (in *GlobalDeliveryRuleModifyResponseHeaderActionParameters) DeepCopy() *GlobalDeliveryRuleModifyResponseHeaderActionParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleModifyResponseHeaderActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleObservation) DeepCopyInto(out *GlobalDeliveryRuleObservation) { + *out = *in + if in.CacheExpirationAction != nil { + in, out := &in.CacheExpirationAction, &out.CacheExpirationAction + *out = new(GlobalDeliveryRuleCacheExpirationActionObservation) + (*in).DeepCopyInto(*out) + } + if in.CacheKeyQueryStringAction != nil { + in, out := &in.CacheKeyQueryStringAction, &out.CacheKeyQueryStringAction + *out = new(GlobalDeliveryRuleCacheKeyQueryStringActionObservation) + (*in).DeepCopyInto(*out) + } + if in.ModifyRequestHeaderAction != nil { + in, out := &in.ModifyRequestHeaderAction, &out.ModifyRequestHeaderAction + *out = make([]GlobalDeliveryRuleModifyRequestHeaderActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaderAction != nil { + in, out := &in.ModifyResponseHeaderAction, &out.ModifyResponseHeaderAction + *out = make([]GlobalDeliveryRuleModifyResponseHeaderActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(GlobalDeliveryRuleURLRedirectActionObservation) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(GlobalDeliveryRuleURLRewriteActionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleObservation. +func (in *GlobalDeliveryRuleObservation) DeepCopy() *GlobalDeliveryRuleObservation { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleParameters) DeepCopyInto(out *GlobalDeliveryRuleParameters) { + *out = *in + if in.CacheExpirationAction != nil { + in, out := &in.CacheExpirationAction, &out.CacheExpirationAction + *out = new(GlobalDeliveryRuleCacheExpirationActionParameters) + (*in).DeepCopyInto(*out) + } + if in.CacheKeyQueryStringAction != nil { + in, out := &in.CacheKeyQueryStringAction, &out.CacheKeyQueryStringAction + *out = new(GlobalDeliveryRuleCacheKeyQueryStringActionParameters) + (*in).DeepCopyInto(*out) + } + if in.ModifyRequestHeaderAction != nil { + in, out := &in.ModifyRequestHeaderAction, &out.ModifyRequestHeaderAction + *out = make([]GlobalDeliveryRuleModifyRequestHeaderActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ModifyResponseHeaderAction != nil { + in, out := &in.ModifyResponseHeaderAction, &out.ModifyResponseHeaderAction + *out = make([]GlobalDeliveryRuleModifyResponseHeaderActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLRedirectAction != nil { + in, out := &in.URLRedirectAction, &out.URLRedirectAction + *out = new(GlobalDeliveryRuleURLRedirectActionParameters) + (*in).DeepCopyInto(*out) + } + if in.URLRewriteAction != nil { + in, out := &in.URLRewriteAction, &out.URLRewriteAction + *out = new(GlobalDeliveryRuleURLRewriteActionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleParameters. +func (in *GlobalDeliveryRuleParameters) DeepCopy() *GlobalDeliveryRuleParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleURLRedirectActionInitParameters) DeepCopyInto(out *GlobalDeliveryRuleURLRedirectActionInitParameters) { + *out = *in + if in.Fragment != nil { + in, out := &in.Fragment, &out.Fragment + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleURLRedirectActionInitParameters. +func (in *GlobalDeliveryRuleURLRedirectActionInitParameters) DeepCopy() *GlobalDeliveryRuleURLRedirectActionInitParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleURLRedirectActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleURLRedirectActionObservation) DeepCopyInto(out *GlobalDeliveryRuleURLRedirectActionObservation) { + *out = *in + if in.Fragment != nil { + in, out := &in.Fragment, &out.Fragment + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleURLRedirectActionObservation. +func (in *GlobalDeliveryRuleURLRedirectActionObservation) DeepCopy() *GlobalDeliveryRuleURLRedirectActionObservation { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleURLRedirectActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleURLRedirectActionParameters) DeepCopyInto(out *GlobalDeliveryRuleURLRedirectActionParameters) { + *out = *in + if in.Fragment != nil { + in, out := &in.Fragment, &out.Fragment + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleURLRedirectActionParameters. +func (in *GlobalDeliveryRuleURLRedirectActionParameters) DeepCopy() *GlobalDeliveryRuleURLRedirectActionParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleURLRedirectActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleURLRewriteActionInitParameters) DeepCopyInto(out *GlobalDeliveryRuleURLRewriteActionInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleURLRewriteActionInitParameters. +func (in *GlobalDeliveryRuleURLRewriteActionInitParameters) DeepCopy() *GlobalDeliveryRuleURLRewriteActionInitParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleURLRewriteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleURLRewriteActionObservation) DeepCopyInto(out *GlobalDeliveryRuleURLRewriteActionObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleURLRewriteActionObservation. +func (in *GlobalDeliveryRuleURLRewriteActionObservation) DeepCopy() *GlobalDeliveryRuleURLRewriteActionObservation { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleURLRewriteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalDeliveryRuleURLRewriteActionParameters) DeepCopyInto(out *GlobalDeliveryRuleURLRewriteActionParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalDeliveryRuleURLRewriteActionParameters. +func (in *GlobalDeliveryRuleURLRewriteActionParameters) DeepCopy() *GlobalDeliveryRuleURLRewriteActionParameters { + if in == nil { + return nil + } + out := new(GlobalDeliveryRuleURLRewriteActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPVersionConditionInitParameters) DeepCopyInto(out *HTTPVersionConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPVersionConditionInitParameters. +func (in *HTTPVersionConditionInitParameters) DeepCopy() *HTTPVersionConditionInitParameters { + if in == nil { + return nil + } + out := new(HTTPVersionConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPVersionConditionObservation) DeepCopyInto(out *HTTPVersionConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPVersionConditionObservation. +func (in *HTTPVersionConditionObservation) DeepCopy() *HTTPVersionConditionObservation { + if in == nil { + return nil + } + out := new(HTTPVersionConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPVersionConditionParameters) DeepCopyInto(out *HTTPVersionConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPVersionConditionParameters. +func (in *HTTPVersionConditionParameters) DeepCopy() *HTTPVersionConditionParameters { + if in == nil { + return nil + } + out := new(HTTPVersionConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthProbeInitParameters) DeepCopyInto(out *HealthProbeInitParameters) { + *out = *in + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequestType != nil { + in, out := &in.RequestType, &out.RequestType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthProbeInitParameters. +func (in *HealthProbeInitParameters) DeepCopy() *HealthProbeInitParameters { + if in == nil { + return nil + } + out := new(HealthProbeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthProbeObservation) DeepCopyInto(out *HealthProbeObservation) { + *out = *in + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequestType != nil { + in, out := &in.RequestType, &out.RequestType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthProbeObservation. +func (in *HealthProbeObservation) DeepCopy() *HealthProbeObservation { + if in == nil { + return nil + } + out := new(HealthProbeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthProbeParameters) DeepCopyInto(out *HealthProbeParameters) { + *out = *in + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequestType != nil { + in, out := &in.RequestType, &out.RequestType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthProbeParameters. +func (in *HealthProbeParameters) DeepCopy() *HealthProbeParameters { + if in == nil { + return nil + } + out := new(HealthProbeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConditionInitParameters) DeepCopyInto(out *HostNameConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConditionInitParameters. +func (in *HostNameConditionInitParameters) DeepCopy() *HostNameConditionInitParameters { + if in == nil { + return nil + } + out := new(HostNameConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConditionObservation) DeepCopyInto(out *HostNameConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConditionObservation. +func (in *HostNameConditionObservation) DeepCopy() *HostNameConditionObservation { + if in == nil { + return nil + } + out := new(HostNameConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNameConditionParameters) DeepCopyInto(out *HostNameConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNameConditionParameters. +func (in *HostNameConditionParameters) DeepCopy() *HostNameConditionParameters { + if in == nil { + return nil + } + out := new(HostNameConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsDeviceConditionInitParameters) DeepCopyInto(out *IsDeviceConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsDeviceConditionInitParameters. +func (in *IsDeviceConditionInitParameters) DeepCopy() *IsDeviceConditionInitParameters { + if in == nil { + return nil + } + out := new(IsDeviceConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsDeviceConditionObservation) DeepCopyInto(out *IsDeviceConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsDeviceConditionObservation. +func (in *IsDeviceConditionObservation) DeepCopy() *IsDeviceConditionObservation { + if in == nil { + return nil + } + out := new(IsDeviceConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsDeviceConditionParameters) DeepCopyInto(out *IsDeviceConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsDeviceConditionParameters. +func (in *IsDeviceConditionParameters) DeepCopy() *IsDeviceConditionParameters { + if in == nil { + return nil + } + out := new(IsDeviceConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancingInitParameters) DeepCopyInto(out *LoadBalancingInitParameters) { + *out = *in + if in.AdditionalLatencyInMilliseconds != nil { + in, out := &in.AdditionalLatencyInMilliseconds, &out.AdditionalLatencyInMilliseconds + *out = new(float64) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } + if in.SuccessfulSamplesRequired != nil { + in, out := &in.SuccessfulSamplesRequired, &out.SuccessfulSamplesRequired + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancingInitParameters. +func (in *LoadBalancingInitParameters) DeepCopy() *LoadBalancingInitParameters { + if in == nil { + return nil + } + out := new(LoadBalancingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancingObservation) DeepCopyInto(out *LoadBalancingObservation) { + *out = *in + if in.AdditionalLatencyInMilliseconds != nil { + in, out := &in.AdditionalLatencyInMilliseconds, &out.AdditionalLatencyInMilliseconds + *out = new(float64) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } + if in.SuccessfulSamplesRequired != nil { + in, out := &in.SuccessfulSamplesRequired, &out.SuccessfulSamplesRequired + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancingObservation. +func (in *LoadBalancingObservation) DeepCopy() *LoadBalancingObservation { + if in == nil { + return nil + } + out := new(LoadBalancingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancingParameters) DeepCopyInto(out *LoadBalancingParameters) { + *out = *in + if in.AdditionalLatencyInMilliseconds != nil { + in, out := &in.AdditionalLatencyInMilliseconds, &out.AdditionalLatencyInMilliseconds + *out = new(float64) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } + if in.SuccessfulSamplesRequired != nil { + in, out := &in.SuccessfulSamplesRequired, &out.SuccessfulSamplesRequired + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancingParameters. +func (in *LoadBalancingParameters) DeepCopy() *LoadBalancingParameters { + if in == nil { + return nil + } + out := new(LoadBalancingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyRequestHeaderActionInitParameters) DeepCopyInto(out *ModifyRequestHeaderActionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyRequestHeaderActionInitParameters. +func (in *ModifyRequestHeaderActionInitParameters) DeepCopy() *ModifyRequestHeaderActionInitParameters { + if in == nil { + return nil + } + out := new(ModifyRequestHeaderActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyRequestHeaderActionObservation) DeepCopyInto(out *ModifyRequestHeaderActionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyRequestHeaderActionObservation. +func (in *ModifyRequestHeaderActionObservation) DeepCopy() *ModifyRequestHeaderActionObservation { + if in == nil { + return nil + } + out := new(ModifyRequestHeaderActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyRequestHeaderActionParameters) DeepCopyInto(out *ModifyRequestHeaderActionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyRequestHeaderActionParameters. +func (in *ModifyRequestHeaderActionParameters) DeepCopy() *ModifyRequestHeaderActionParameters { + if in == nil { + return nil + } + out := new(ModifyRequestHeaderActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyResponseHeaderActionInitParameters) DeepCopyInto(out *ModifyResponseHeaderActionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyResponseHeaderActionInitParameters. +func (in *ModifyResponseHeaderActionInitParameters) DeepCopy() *ModifyResponseHeaderActionInitParameters { + if in == nil { + return nil + } + out := new(ModifyResponseHeaderActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyResponseHeaderActionObservation) DeepCopyInto(out *ModifyResponseHeaderActionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyResponseHeaderActionObservation. +func (in *ModifyResponseHeaderActionObservation) DeepCopy() *ModifyResponseHeaderActionObservation { + if in == nil { + return nil + } + out := new(ModifyResponseHeaderActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyResponseHeaderActionParameters) DeepCopyInto(out *ModifyResponseHeaderActionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyResponseHeaderActionParameters. +func (in *ModifyResponseHeaderActionParameters) DeepCopy() *ModifyResponseHeaderActionParameters { + if in == nil { + return nil + } + out := new(ModifyResponseHeaderActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginInitParameters) DeepCopyInto(out *OriginInitParameters) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginInitParameters. +func (in *OriginInitParameters) DeepCopy() *OriginInitParameters { + if in == nil { + return nil + } + out := new(OriginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginObservation) DeepCopyInto(out *OriginObservation) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginObservation. +func (in *OriginObservation) DeepCopy() *OriginObservation { + if in == nil { + return nil + } + out := new(OriginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginParameters) DeepCopyInto(out *OriginParameters) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginParameters. +func (in *OriginParameters) DeepCopy() *OriginParameters { + if in == nil { + return nil + } + out := new(OriginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostArgConditionInitParameters) DeepCopyInto(out *PostArgConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostArgConditionInitParameters. +func (in *PostArgConditionInitParameters) DeepCopy() *PostArgConditionInitParameters { + if in == nil { + return nil + } + out := new(PostArgConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostArgConditionObservation) DeepCopyInto(out *PostArgConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostArgConditionObservation. +func (in *PostArgConditionObservation) DeepCopy() *PostArgConditionObservation { + if in == nil { + return nil + } + out := new(PostArgConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostArgConditionParameters) DeepCopyInto(out *PostArgConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostArgConditionParameters. +func (in *PostArgConditionParameters) DeepCopy() *PostArgConditionParameters { + if in == nil { + return nil + } + out := new(PostArgConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostArgsConditionInitParameters) DeepCopyInto(out *PostArgsConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.PostArgsName != nil { + in, out := &in.PostArgsName, &out.PostArgsName + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostArgsConditionInitParameters. +func (in *PostArgsConditionInitParameters) DeepCopy() *PostArgsConditionInitParameters { + if in == nil { + return nil + } + out := new(PostArgsConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostArgsConditionObservation) DeepCopyInto(out *PostArgsConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.PostArgsName != nil { + in, out := &in.PostArgsName, &out.PostArgsName + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostArgsConditionObservation. +func (in *PostArgsConditionObservation) DeepCopy() *PostArgsConditionObservation { + if in == nil { + return nil + } + out := new(PostArgsConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostArgsConditionParameters) DeepCopyInto(out *PostArgsConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.PostArgsName != nil { + in, out := &in.PostArgsName, &out.PostArgsName + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostArgsConditionParameters. +func (in *PostArgsConditionParameters) DeepCopy() *PostArgsConditionParameters { + if in == nil { + return nil + } + out := new(PostArgsConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkInitParameters) DeepCopyInto(out *PrivateLinkInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LocationRef != nil { + in, out := &in.LocationRef, &out.LocationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LocationSelector != nil { + in, out := &in.LocationSelector, &out.LocationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PrivateLinkTargetID != nil { + in, out := &in.PrivateLinkTargetID, &out.PrivateLinkTargetID + *out = new(string) + **out = **in + } + if in.PrivateLinkTargetIDRef != nil { + in, out := &in.PrivateLinkTargetIDRef, &out.PrivateLinkTargetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateLinkTargetIDSelector != nil { + in, out := &in.PrivateLinkTargetIDSelector, &out.PrivateLinkTargetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RequestMessage != nil { + in, out := &in.RequestMessage, &out.RequestMessage + *out = new(string) + **out = **in + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkInitParameters. +func (in *PrivateLinkInitParameters) DeepCopy() *PrivateLinkInitParameters { + if in == nil { + return nil + } + out := new(PrivateLinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkObservation) DeepCopyInto(out *PrivateLinkObservation) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateLinkTargetID != nil { + in, out := &in.PrivateLinkTargetID, &out.PrivateLinkTargetID + *out = new(string) + **out = **in + } + if in.RequestMessage != nil { + in, out := &in.RequestMessage, &out.RequestMessage + *out = new(string) + **out = **in + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkObservation. +func (in *PrivateLinkObservation) DeepCopy() *PrivateLinkObservation { + if in == nil { + return nil + } + out := new(PrivateLinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkParameters) DeepCopyInto(out *PrivateLinkParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LocationRef != nil { + in, out := &in.LocationRef, &out.LocationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LocationSelector != nil { + in, out := &in.LocationSelector, &out.LocationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PrivateLinkTargetID != nil { + in, out := &in.PrivateLinkTargetID, &out.PrivateLinkTargetID + *out = new(string) + **out = **in + } + if in.PrivateLinkTargetIDRef != nil { + in, out := &in.PrivateLinkTargetIDRef, &out.PrivateLinkTargetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateLinkTargetIDSelector != nil { + in, out := &in.PrivateLinkTargetIDSelector, &out.PrivateLinkTargetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RequestMessage != nil { + in, out := &in.RequestMessage, &out.RequestMessage + *out = new(string) + **out = **in + } + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkParameters. +func (in *PrivateLinkParameters) DeepCopy() *PrivateLinkParameters { + if in == nil { + return nil + } + out := new(PrivateLinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringConditionInitParameters) DeepCopyInto(out *QueryStringConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringConditionInitParameters. +func (in *QueryStringConditionInitParameters) DeepCopy() *QueryStringConditionInitParameters { + if in == nil { + return nil + } + out := new(QueryStringConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringConditionObservation) DeepCopyInto(out *QueryStringConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringConditionObservation. +func (in *QueryStringConditionObservation) DeepCopy() *QueryStringConditionObservation { + if in == nil { + return nil + } + out := new(QueryStringConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryStringConditionParameters) DeepCopyInto(out *QueryStringConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryStringConditionParameters. +func (in *QueryStringConditionParameters) DeepCopy() *QueryStringConditionParameters { + if in == nil { + return nil + } + out := new(QueryStringConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteAddressConditionInitParameters) DeepCopyInto(out *RemoteAddressConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteAddressConditionInitParameters. +func (in *RemoteAddressConditionInitParameters) DeepCopy() *RemoteAddressConditionInitParameters { + if in == nil { + return nil + } + out := new(RemoteAddressConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteAddressConditionObservation) DeepCopyInto(out *RemoteAddressConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteAddressConditionObservation. +func (in *RemoteAddressConditionObservation) DeepCopy() *RemoteAddressConditionObservation { + if in == nil { + return nil + } + out := new(RemoteAddressConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteAddressConditionParameters) DeepCopyInto(out *RemoteAddressConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteAddressConditionParameters. +func (in *RemoteAddressConditionParameters) DeepCopy() *RemoteAddressConditionParameters { + if in == nil { + return nil + } + out := new(RemoteAddressConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestBodyConditionInitParameters) DeepCopyInto(out *RequestBodyConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestBodyConditionInitParameters. +func (in *RequestBodyConditionInitParameters) DeepCopy() *RequestBodyConditionInitParameters { + if in == nil { + return nil + } + out := new(RequestBodyConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestBodyConditionObservation) DeepCopyInto(out *RequestBodyConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestBodyConditionObservation. +func (in *RequestBodyConditionObservation) DeepCopy() *RequestBodyConditionObservation { + if in == nil { + return nil + } + out := new(RequestBodyConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestBodyConditionParameters) DeepCopyInto(out *RequestBodyConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestBodyConditionParameters. +func (in *RequestBodyConditionParameters) DeepCopy() *RequestBodyConditionParameters { + if in == nil { + return nil + } + out := new(RequestBodyConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderActionInitParameters) DeepCopyInto(out *RequestHeaderActionInitParameters) { + *out = *in + if in.HeaderAction != nil { + in, out := &in.HeaderAction, &out.HeaderAction + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderActionInitParameters. +func (in *RequestHeaderActionInitParameters) DeepCopy() *RequestHeaderActionInitParameters { + if in == nil { + return nil + } + out := new(RequestHeaderActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderActionObservation) DeepCopyInto(out *RequestHeaderActionObservation) { + *out = *in + if in.HeaderAction != nil { + in, out := &in.HeaderAction, &out.HeaderAction + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderActionObservation. +func (in *RequestHeaderActionObservation) DeepCopy() *RequestHeaderActionObservation { + if in == nil { + return nil + } + out := new(RequestHeaderActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderActionParameters) DeepCopyInto(out *RequestHeaderActionParameters) { + *out = *in + if in.HeaderAction != nil { + in, out := &in.HeaderAction, &out.HeaderAction + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderActionParameters. +func (in *RequestHeaderActionParameters) DeepCopy() *RequestHeaderActionParameters { + if in == nil { + return nil + } + out := new(RequestHeaderActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderConditionInitParameters) DeepCopyInto(out *RequestHeaderConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderConditionInitParameters. +func (in *RequestHeaderConditionInitParameters) DeepCopy() *RequestHeaderConditionInitParameters { + if in == nil { + return nil + } + out := new(RequestHeaderConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderConditionObservation) DeepCopyInto(out *RequestHeaderConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderConditionObservation. +func (in *RequestHeaderConditionObservation) DeepCopy() *RequestHeaderConditionObservation { + if in == nil { + return nil + } + out := new(RequestHeaderConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderConditionParameters) DeepCopyInto(out *RequestHeaderConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderConditionParameters. +func (in *RequestHeaderConditionParameters) DeepCopy() *RequestHeaderConditionParameters { + if in == nil { + return nil + } + out := new(RequestHeaderConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestMethodConditionInitParameters) DeepCopyInto(out *RequestMethodConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestMethodConditionInitParameters. +func (in *RequestMethodConditionInitParameters) DeepCopy() *RequestMethodConditionInitParameters { + if in == nil { + return nil + } + out := new(RequestMethodConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestMethodConditionObservation) DeepCopyInto(out *RequestMethodConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestMethodConditionObservation. +func (in *RequestMethodConditionObservation) DeepCopy() *RequestMethodConditionObservation { + if in == nil { + return nil + } + out := new(RequestMethodConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestMethodConditionParameters) DeepCopyInto(out *RequestMethodConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestMethodConditionParameters. +func (in *RequestMethodConditionParameters) DeepCopy() *RequestMethodConditionParameters { + if in == nil { + return nil + } + out := new(RequestMethodConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestSchemeConditionInitParameters) DeepCopyInto(out *RequestSchemeConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestSchemeConditionInitParameters. +func (in *RequestSchemeConditionInitParameters) DeepCopy() *RequestSchemeConditionInitParameters { + if in == nil { + return nil + } + out := new(RequestSchemeConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestSchemeConditionObservation) DeepCopyInto(out *RequestSchemeConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestSchemeConditionObservation. +func (in *RequestSchemeConditionObservation) DeepCopy() *RequestSchemeConditionObservation { + if in == nil { + return nil + } + out := new(RequestSchemeConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestSchemeConditionParameters) DeepCopyInto(out *RequestSchemeConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestSchemeConditionParameters. +func (in *RequestSchemeConditionParameters) DeepCopy() *RequestSchemeConditionParameters { + if in == nil { + return nil + } + out := new(RequestSchemeConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIConditionInitParameters) DeepCopyInto(out *RequestURIConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIConditionInitParameters. +func (in *RequestURIConditionInitParameters) DeepCopy() *RequestURIConditionInitParameters { + if in == nil { + return nil + } + out := new(RequestURIConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIConditionObservation) DeepCopyInto(out *RequestURIConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIConditionObservation. +func (in *RequestURIConditionObservation) DeepCopy() *RequestURIConditionObservation { + if in == nil { + return nil + } + out := new(RequestURIConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestURIConditionParameters) DeepCopyInto(out *RequestURIConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestURIConditionParameters. +func (in *RequestURIConditionParameters) DeepCopy() *RequestURIConditionParameters { + if in == nil { + return nil + } + out := new(RequestURIConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderActionInitParameters) DeepCopyInto(out *ResponseHeaderActionInitParameters) { + *out = *in + if in.HeaderAction != nil { + in, out := &in.HeaderAction, &out.HeaderAction + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderActionInitParameters. +func (in *ResponseHeaderActionInitParameters) DeepCopy() *ResponseHeaderActionInitParameters { + if in == nil { + return nil + } + out := new(ResponseHeaderActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderActionObservation) DeepCopyInto(out *ResponseHeaderActionObservation) { + *out = *in + if in.HeaderAction != nil { + in, out := &in.HeaderAction, &out.HeaderAction + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderActionObservation. +func (in *ResponseHeaderActionObservation) DeepCopy() *ResponseHeaderActionObservation { + if in == nil { + return nil + } + out := new(ResponseHeaderActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderActionParameters) DeepCopyInto(out *ResponseHeaderActionParameters) { + *out = *in + if in.HeaderAction != nil { + in, out := &in.HeaderAction, &out.HeaderAction + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderActionParameters. +func (in *ResponseHeaderActionParameters) DeepCopy() *ResponseHeaderActionParameters { + if in == nil { + return nil + } + out := new(ResponseHeaderActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteConfigurationOverrideActionInitParameters) DeepCopyInto(out *RouteConfigurationOverrideActionInitParameters) { + *out = *in + if in.CacheBehavior != nil { + in, out := &in.CacheBehavior, &out.CacheBehavior + *out = new(string) + **out = **in + } + if in.CacheDuration != nil { + in, out := &in.CacheDuration, &out.CacheDuration + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupID != nil { + in, out := &in.CdnFrontdoorOriginGroupID, &out.CdnFrontdoorOriginGroupID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupIDRef != nil { + in, out := &in.CdnFrontdoorOriginGroupIDRef, &out.CdnFrontdoorOriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginGroupIDSelector != nil { + in, out := &in.CdnFrontdoorOriginGroupIDSelector, &out.CdnFrontdoorOriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } + if in.QueryStringCachingBehavior != nil { + in, out := &in.QueryStringCachingBehavior, &out.QueryStringCachingBehavior + *out = new(string) + **out = **in + } + if in.QueryStringParameters != nil { + in, out := &in.QueryStringParameters, &out.QueryStringParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteConfigurationOverrideActionInitParameters. +func (in *RouteConfigurationOverrideActionInitParameters) DeepCopy() *RouteConfigurationOverrideActionInitParameters { + if in == nil { + return nil + } + out := new(RouteConfigurationOverrideActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteConfigurationOverrideActionObservation) DeepCopyInto(out *RouteConfigurationOverrideActionObservation) { + *out = *in + if in.CacheBehavior != nil { + in, out := &in.CacheBehavior, &out.CacheBehavior + *out = new(string) + **out = **in + } + if in.CacheDuration != nil { + in, out := &in.CacheDuration, &out.CacheDuration + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupID != nil { + in, out := &in.CdnFrontdoorOriginGroupID, &out.CdnFrontdoorOriginGroupID + *out = new(string) + **out = **in + } + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } + if in.QueryStringCachingBehavior != nil { + in, out := &in.QueryStringCachingBehavior, &out.QueryStringCachingBehavior + *out = new(string) + **out = **in + } + if in.QueryStringParameters != nil { + in, out := &in.QueryStringParameters, &out.QueryStringParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteConfigurationOverrideActionObservation. +func (in *RouteConfigurationOverrideActionObservation) DeepCopy() *RouteConfigurationOverrideActionObservation { + if in == nil { + return nil + } + out := new(RouteConfigurationOverrideActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteConfigurationOverrideActionParameters) DeepCopyInto(out *RouteConfigurationOverrideActionParameters) { + *out = *in + if in.CacheBehavior != nil { + in, out := &in.CacheBehavior, &out.CacheBehavior + *out = new(string) + **out = **in + } + if in.CacheDuration != nil { + in, out := &in.CacheDuration, &out.CacheDuration + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupID != nil { + in, out := &in.CdnFrontdoorOriginGroupID, &out.CdnFrontdoorOriginGroupID + *out = new(string) + **out = **in + } + if in.CdnFrontdoorOriginGroupIDRef != nil { + in, out := &in.CdnFrontdoorOriginGroupIDRef, &out.CdnFrontdoorOriginGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CdnFrontdoorOriginGroupIDSelector != nil { + in, out := &in.CdnFrontdoorOriginGroupIDSelector, &out.CdnFrontdoorOriginGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } + if in.QueryStringCachingBehavior != nil { + in, out := &in.QueryStringCachingBehavior, &out.QueryStringCachingBehavior + *out = new(string) + **out = **in + } + if in.QueryStringParameters != nil { + in, out := &in.QueryStringParameters, &out.QueryStringParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteConfigurationOverrideActionParameters. +func (in *RouteConfigurationOverrideActionParameters) DeepCopy() *RouteConfigurationOverrideActionParameters { + if in == nil { + return nil + } + out := new(RouteConfigurationOverrideActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProtocolConditionInitParameters) DeepCopyInto(out *SSLProtocolConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProtocolConditionInitParameters. +func (in *SSLProtocolConditionInitParameters) DeepCopy() *SSLProtocolConditionInitParameters { + if in == nil { + return nil + } + out := new(SSLProtocolConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProtocolConditionObservation) DeepCopyInto(out *SSLProtocolConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProtocolConditionObservation. +func (in *SSLProtocolConditionObservation) DeepCopy() *SSLProtocolConditionObservation { + if in == nil { + return nil + } + out := new(SSLProtocolConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProtocolConditionParameters) DeepCopyInto(out *SSLProtocolConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProtocolConditionParameters. +func (in *SSLProtocolConditionParameters) DeepCopy() *SSLProtocolConditionParameters { + if in == nil { + return nil + } + out := new(SSLProtocolConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityPoliciesInitParameters) DeepCopyInto(out *SecurityPoliciesInitParameters) { + *out = *in + if in.Firewall != nil { + in, out := &in.Firewall, &out.Firewall + *out = new(FirewallInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityPoliciesInitParameters. +func (in *SecurityPoliciesInitParameters) DeepCopy() *SecurityPoliciesInitParameters { + if in == nil { + return nil + } + out := new(SecurityPoliciesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityPoliciesObservation) DeepCopyInto(out *SecurityPoliciesObservation) { + *out = *in + if in.Firewall != nil { + in, out := &in.Firewall, &out.Firewall + *out = new(FirewallObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityPoliciesObservation. +func (in *SecurityPoliciesObservation) DeepCopy() *SecurityPoliciesObservation { + if in == nil { + return nil + } + out := new(SecurityPoliciesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityPoliciesParameters) DeepCopyInto(out *SecurityPoliciesParameters) { + *out = *in + if in.Firewall != nil { + in, out := &in.Firewall, &out.Firewall + *out = new(FirewallParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityPoliciesParameters. +func (in *SecurityPoliciesParameters) DeepCopy() *SecurityPoliciesParameters { + if in == nil { + return nil + } + out := new(SecurityPoliciesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerPortConditionInitParameters) DeepCopyInto(out *ServerPortConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerPortConditionInitParameters. +func (in *ServerPortConditionInitParameters) DeepCopy() *ServerPortConditionInitParameters { + if in == nil { + return nil + } + out := new(ServerPortConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerPortConditionObservation) DeepCopyInto(out *ServerPortConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerPortConditionObservation. +func (in *ServerPortConditionObservation) DeepCopy() *ServerPortConditionObservation { + if in == nil { + return nil + } + out := new(ServerPortConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerPortConditionParameters) DeepCopyInto(out *ServerPortConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerPortConditionParameters. +func (in *ServerPortConditionParameters) DeepCopy() *ServerPortConditionParameters { + if in == nil { + return nil + } + out := new(ServerPortConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SocketAddressConditionInitParameters) DeepCopyInto(out *SocketAddressConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SocketAddressConditionInitParameters. +func (in *SocketAddressConditionInitParameters) DeepCopy() *SocketAddressConditionInitParameters { + if in == nil { + return nil + } + out := new(SocketAddressConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SocketAddressConditionObservation) DeepCopyInto(out *SocketAddressConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SocketAddressConditionObservation. +func (in *SocketAddressConditionObservation) DeepCopy() *SocketAddressConditionObservation { + if in == nil { + return nil + } + out := new(SocketAddressConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SocketAddressConditionParameters) DeepCopyInto(out *SocketAddressConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SocketAddressConditionParameters. +func (in *SocketAddressConditionParameters) DeepCopy() *SocketAddressConditionParameters { + if in == nil { + return nil + } + out := new(SocketAddressConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSInitParameters) DeepCopyInto(out *TLSInitParameters) { + *out = *in + if in.CdnFrontdoorSecretID != nil { + in, out := &in.CdnFrontdoorSecretID, &out.CdnFrontdoorSecretID + *out = new(string) + **out = **in + } + if in.CertificateType != nil { + in, out := &in.CertificateType, &out.CertificateType + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSInitParameters. +func (in *TLSInitParameters) DeepCopy() *TLSInitParameters { + if in == nil { + return nil + } + out := new(TLSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSObservation) DeepCopyInto(out *TLSObservation) { + *out = *in + if in.CdnFrontdoorSecretID != nil { + in, out := &in.CdnFrontdoorSecretID, &out.CdnFrontdoorSecretID + *out = new(string) + **out = **in + } + if in.CertificateType != nil { + in, out := &in.CertificateType, &out.CertificateType + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSObservation. +func (in *TLSObservation) DeepCopy() *TLSObservation { + if in == nil { + return nil + } + out := new(TLSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSParameters) DeepCopyInto(out *TLSParameters) { + *out = *in + if in.CdnFrontdoorSecretID != nil { + in, out := &in.CdnFrontdoorSecretID, &out.CdnFrontdoorSecretID + *out = new(string) + **out = **in + } + if in.CertificateType != nil { + in, out := &in.CertificateType, &out.CertificateType + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSParameters. +func (in *TLSParameters) DeepCopy() *TLSParameters { + if in == nil { + return nil + } + out := new(TLSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFileExtensionConditionInitParameters) DeepCopyInto(out *URLFileExtensionConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFileExtensionConditionInitParameters. +func (in *URLFileExtensionConditionInitParameters) DeepCopy() *URLFileExtensionConditionInitParameters { + if in == nil { + return nil + } + out := new(URLFileExtensionConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFileExtensionConditionObservation) DeepCopyInto(out *URLFileExtensionConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFileExtensionConditionObservation. +func (in *URLFileExtensionConditionObservation) DeepCopy() *URLFileExtensionConditionObservation { + if in == nil { + return nil + } + out := new(URLFileExtensionConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFileExtensionConditionParameters) DeepCopyInto(out *URLFileExtensionConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFileExtensionConditionParameters. +func (in *URLFileExtensionConditionParameters) DeepCopy() *URLFileExtensionConditionParameters { + if in == nil { + return nil + } + out := new(URLFileExtensionConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFileNameConditionInitParameters) DeepCopyInto(out *URLFileNameConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFileNameConditionInitParameters. +func (in *URLFileNameConditionInitParameters) DeepCopy() *URLFileNameConditionInitParameters { + if in == nil { + return nil + } + out := new(URLFileNameConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFileNameConditionObservation) DeepCopyInto(out *URLFileNameConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFileNameConditionObservation. +func (in *URLFileNameConditionObservation) DeepCopy() *URLFileNameConditionObservation { + if in == nil { + return nil + } + out := new(URLFileNameConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFileNameConditionParameters) DeepCopyInto(out *URLFileNameConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFileNameConditionParameters. +func (in *URLFileNameConditionParameters) DeepCopy() *URLFileNameConditionParameters { + if in == nil { + return nil + } + out := new(URLFileNameConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFilenameConditionInitParameters) DeepCopyInto(out *URLFilenameConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFilenameConditionInitParameters. +func (in *URLFilenameConditionInitParameters) DeepCopy() *URLFilenameConditionInitParameters { + if in == nil { + return nil + } + out := new(URLFilenameConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFilenameConditionObservation) DeepCopyInto(out *URLFilenameConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFilenameConditionObservation. +func (in *URLFilenameConditionObservation) DeepCopy() *URLFilenameConditionObservation { + if in == nil { + return nil + } + out := new(URLFilenameConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLFilenameConditionParameters) DeepCopyInto(out *URLFilenameConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLFilenameConditionParameters. +func (in *URLFilenameConditionParameters) DeepCopy() *URLFilenameConditionParameters { + if in == nil { + return nil + } + out := new(URLFilenameConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLPathConditionInitParameters) DeepCopyInto(out *URLPathConditionInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLPathConditionInitParameters. +func (in *URLPathConditionInitParameters) DeepCopy() *URLPathConditionInitParameters { + if in == nil { + return nil + } + out := new(URLPathConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLPathConditionObservation) DeepCopyInto(out *URLPathConditionObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLPathConditionObservation. +func (in *URLPathConditionObservation) DeepCopy() *URLPathConditionObservation { + if in == nil { + return nil + } + out := new(URLPathConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLPathConditionParameters) DeepCopyInto(out *URLPathConditionParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLPathConditionParameters. +func (in *URLPathConditionParameters) DeepCopy() *URLPathConditionParameters { + if in == nil { + return nil + } + out := new(URLPathConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLRedirectActionInitParameters) DeepCopyInto(out *URLRedirectActionInitParameters) { + *out = *in + if in.Fragment != nil { + in, out := &in.Fragment, &out.Fragment + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLRedirectActionInitParameters. +func (in *URLRedirectActionInitParameters) DeepCopy() *URLRedirectActionInitParameters { + if in == nil { + return nil + } + out := new(URLRedirectActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLRedirectActionObservation) DeepCopyInto(out *URLRedirectActionObservation) { + *out = *in + if in.Fragment != nil { + in, out := &in.Fragment, &out.Fragment + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLRedirectActionObservation. +func (in *URLRedirectActionObservation) DeepCopy() *URLRedirectActionObservation { + if in == nil { + return nil + } + out := new(URLRedirectActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLRedirectActionParameters) DeepCopyInto(out *URLRedirectActionParameters) { + *out = *in + if in.Fragment != nil { + in, out := &in.Fragment, &out.Fragment + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLRedirectActionParameters. +func (in *URLRedirectActionParameters) DeepCopy() *URLRedirectActionParameters { + if in == nil { + return nil + } + out := new(URLRedirectActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLRewriteActionInitParameters) DeepCopyInto(out *URLRewriteActionInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLRewriteActionInitParameters. +func (in *URLRewriteActionInitParameters) DeepCopy() *URLRewriteActionInitParameters { + if in == nil { + return nil + } + out := new(URLRewriteActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLRewriteActionObservation) DeepCopyInto(out *URLRewriteActionObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLRewriteActionObservation. +func (in *URLRewriteActionObservation) DeepCopy() *URLRewriteActionObservation { + if in == nil { + return nil + } + out := new(URLRewriteActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLRewriteActionParameters) DeepCopyInto(out *URLRewriteActionParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(string) + **out = **in + } + if in.PreserveUnmatchedPath != nil { + in, out := &in.PreserveUnmatchedPath, &out.PreserveUnmatchedPath + *out = new(bool) + **out = **in + } + if in.SourcePattern != nil { + in, out := &in.SourcePattern, &out.SourcePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLRewriteActionParameters. +func (in *URLRewriteActionParameters) DeepCopy() *URLRewriteActionParameters { + if in == nil { + return nil + } + out := new(URLRewriteActionParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cdn/v1beta2/zz_generated.managed.go b/apis/cdn/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..184d54d6f --- /dev/null +++ b/apis/cdn/v1beta2/zz_generated.managed.go @@ -0,0 +1,428 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Endpoint. +func (mg *Endpoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Endpoint. +func (mg *Endpoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Endpoint. +func (mg *Endpoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Endpoint. +func (mg *Endpoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Endpoint. +func (mg *Endpoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Endpoint. +func (mg *Endpoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Endpoint. +func (mg *Endpoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Endpoint. +func (mg *Endpoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Endpoint. +func (mg *Endpoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Endpoint. +func (mg *Endpoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Endpoint. +func (mg *Endpoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Endpoint. +func (mg *Endpoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontdoorRoute. +func (mg *FrontdoorRoute) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontdoorRoute. +func (mg *FrontdoorRoute) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontdoorRoute. +func (mg *FrontdoorRoute) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontdoorRoute. +func (mg *FrontdoorRoute) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontdoorRoute. +func (mg *FrontdoorRoute) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontdoorRoute. +func (mg *FrontdoorRoute) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontdoorRoute. +func (mg *FrontdoorRoute) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontdoorRoute. +func (mg *FrontdoorRoute) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontdoorRoute. +func (mg *FrontdoorRoute) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontdoorRoute. +func (mg *FrontdoorRoute) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontdoorRoute. +func (mg *FrontdoorRoute) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontdoorRoute. +func (mg *FrontdoorRoute) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontdoorRule. +func (mg *FrontdoorRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontdoorRule. +func (mg *FrontdoorRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontdoorRule. +func (mg *FrontdoorRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontdoorRule. +func (mg *FrontdoorRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontdoorRule. +func (mg *FrontdoorRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontdoorRule. +func (mg *FrontdoorRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontdoorRule. +func (mg *FrontdoorRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontdoorRule. +func (mg *FrontdoorRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontdoorRule. +func (mg *FrontdoorRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontdoorRule. +func (mg *FrontdoorRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontdoorRule. +func (mg *FrontdoorRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontdoorRule. +func (mg *FrontdoorRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cdn/v1beta2/zz_generated.managedlist.go b/apis/cdn/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..42819eba3 --- /dev/null +++ b/apis/cdn/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this EndpointList. +func (l *EndpointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontdoorCustomDomainList. +func (l *FrontdoorCustomDomainList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontdoorOriginGroupList. +func (l *FrontdoorOriginGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontdoorOriginList. +func (l *FrontdoorOriginList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontdoorRouteList. +func (l *FrontdoorRouteList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontdoorRuleList. +func (l *FrontdoorRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontdoorSecurityPolicyList. +func (l *FrontdoorSecurityPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cdn/v1beta2/zz_generated.resolvers.go b/apis/cdn/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..6e566390b --- /dev/null +++ b/apis/cdn/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,755 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Endpoint. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Endpoint) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "Profile", "ProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ProfileName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ProfileNameRef, + Selector: mg.Spec.ForProvider.ProfileNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ProfileName") + } + mg.Spec.ForProvider.ProfileName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ProfileNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FrontdoorCustomDomain. +func (mg *FrontdoorCustomDomain) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorProfile", "FrontdoorProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CdnFrontdoorProfileID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CdnFrontdoorProfileIDRef, + Selector: mg.Spec.ForProvider.CdnFrontdoorProfileIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorProfileID") + } + mg.Spec.ForProvider.CdnFrontdoorProfileID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CdnFrontdoorProfileIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DNSZoneID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DNSZoneIDRef, + Selector: mg.Spec.ForProvider.DNSZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DNSZoneID") + } + mg.Spec.ForProvider.DNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DNSZoneIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DNSZoneID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DNSZoneIDRef, + Selector: mg.Spec.InitProvider.DNSZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DNSZoneID") + } + mg.Spec.InitProvider.DNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DNSZoneIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FrontdoorOrigin. +func (mg *FrontdoorOrigin) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorOriginGroup", "FrontdoorOriginGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CdnFrontdoorOriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CdnFrontdoorOriginGroupIDRef, + Selector: mg.Spec.ForProvider.CdnFrontdoorOriginGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorOriginGroupID") + } + mg.Spec.ForProvider.CdnFrontdoorOriginGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CdnFrontdoorOriginGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.HostName), + Extract: resource.ExtractParamPath("primary_blob_host", true), + Reference: mg.Spec.ForProvider.HostNameRef, + Selector: mg.Spec.ForProvider.HostNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.HostName") + } + mg.Spec.ForProvider.HostName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.HostNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OriginHostHeader), + Extract: resource.ExtractParamPath("primary_blob_host", true), + Reference: mg.Spec.ForProvider.OriginHostHeaderRef, + Selector: mg.Spec.ForProvider.OriginHostHeaderSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OriginHostHeader") + } + mg.Spec.ForProvider.OriginHostHeader = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OriginHostHeaderRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.PrivateLink != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrivateLink.Location), + Extract: resource.ExtractParamPath("location", false), + Reference: mg.Spec.ForProvider.PrivateLink.LocationRef, + Selector: mg.Spec.ForProvider.PrivateLink.LocationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateLink.Location") + } + mg.Spec.ForProvider.PrivateLink.Location = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrivateLink.LocationRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.PrivateLink != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrivateLink.PrivateLinkTargetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrivateLink.PrivateLinkTargetIDRef, + Selector: mg.Spec.ForProvider.PrivateLink.PrivateLinkTargetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateLink.PrivateLinkTargetID") + } + mg.Spec.ForProvider.PrivateLink.PrivateLinkTargetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrivateLink.PrivateLinkTargetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.HostName), + Extract: resource.ExtractParamPath("primary_blob_host", true), + Reference: mg.Spec.InitProvider.HostNameRef, + Selector: mg.Spec.InitProvider.HostNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.HostName") + } + mg.Spec.InitProvider.HostName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.HostNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OriginHostHeader), + Extract: resource.ExtractParamPath("primary_blob_host", true), + Reference: mg.Spec.InitProvider.OriginHostHeaderRef, + Selector: mg.Spec.InitProvider.OriginHostHeaderSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OriginHostHeader") + } + mg.Spec.InitProvider.OriginHostHeader = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OriginHostHeaderRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.PrivateLink != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrivateLink.Location), + Extract: resource.ExtractParamPath("location", false), + Reference: mg.Spec.InitProvider.PrivateLink.LocationRef, + Selector: mg.Spec.InitProvider.PrivateLink.LocationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateLink.Location") + } + mg.Spec.InitProvider.PrivateLink.Location = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrivateLink.LocationRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.PrivateLink != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrivateLink.PrivateLinkTargetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrivateLink.PrivateLinkTargetIDRef, + Selector: mg.Spec.InitProvider.PrivateLink.PrivateLinkTargetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateLink.PrivateLinkTargetID") + } + mg.Spec.InitProvider.PrivateLink.PrivateLinkTargetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrivateLink.PrivateLinkTargetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this FrontdoorOriginGroup. +func (mg *FrontdoorOriginGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorProfile", "FrontdoorProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CdnFrontdoorProfileID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CdnFrontdoorProfileIDRef, + Selector: mg.Spec.ForProvider.CdnFrontdoorProfileIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorProfileID") + } + mg.Spec.ForProvider.CdnFrontdoorProfileID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CdnFrontdoorProfileIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FrontdoorRoute. +func (mg *FrontdoorRoute) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorCustomDomain", "FrontdoorCustomDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CdnFrontdoorCustomDomainIds), + Extract: resource.ExtractResourceID(), + References: mg.Spec.ForProvider.CdnFrontdoorCustomDomainIdsRefs, + Selector: mg.Spec.ForProvider.CdnFrontdoorCustomDomainIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorCustomDomainIds") + } + mg.Spec.ForProvider.CdnFrontdoorCustomDomainIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CdnFrontdoorCustomDomainIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorEndpoint", "FrontdoorEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CdnFrontdoorEndpointID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CdnFrontdoorEndpointIDRef, + Selector: mg.Spec.ForProvider.CdnFrontdoorEndpointIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorEndpointID") + } + mg.Spec.ForProvider.CdnFrontdoorEndpointID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CdnFrontdoorEndpointIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorOriginGroup", "FrontdoorOriginGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CdnFrontdoorOriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CdnFrontdoorOriginGroupIDRef, + Selector: mg.Spec.ForProvider.CdnFrontdoorOriginGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorOriginGroupID") + } + mg.Spec.ForProvider.CdnFrontdoorOriginGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CdnFrontdoorOriginGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorOrigin", "FrontdoorOriginList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CdnFrontdoorOriginIds), + Extract: resource.ExtractResourceID(), + References: mg.Spec.ForProvider.CdnFrontdoorOriginIdsRefs, + Selector: mg.Spec.ForProvider.CdnFrontdoorOriginIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorOriginIds") + } + mg.Spec.ForProvider.CdnFrontdoorOriginIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CdnFrontdoorOriginIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorRuleSet", "FrontdoorRuleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.CdnFrontdoorRuleSetIds), + Extract: resource.ExtractResourceID(), + References: mg.Spec.ForProvider.CdnFrontdoorRuleSetIdsRefs, + Selector: mg.Spec.ForProvider.CdnFrontdoorRuleSetIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorRuleSetIds") + } + mg.Spec.ForProvider.CdnFrontdoorRuleSetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.CdnFrontdoorRuleSetIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorCustomDomain", "FrontdoorCustomDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CdnFrontdoorCustomDomainIds), + Extract: resource.ExtractResourceID(), + References: mg.Spec.InitProvider.CdnFrontdoorCustomDomainIdsRefs, + Selector: mg.Spec.InitProvider.CdnFrontdoorCustomDomainIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CdnFrontdoorCustomDomainIds") + } + mg.Spec.InitProvider.CdnFrontdoorCustomDomainIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CdnFrontdoorCustomDomainIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorOriginGroup", "FrontdoorOriginGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CdnFrontdoorOriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.CdnFrontdoorOriginGroupIDRef, + Selector: mg.Spec.InitProvider.CdnFrontdoorOriginGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CdnFrontdoorOriginGroupID") + } + mg.Spec.InitProvider.CdnFrontdoorOriginGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CdnFrontdoorOriginGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorOrigin", "FrontdoorOriginList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CdnFrontdoorOriginIds), + Extract: resource.ExtractResourceID(), + References: mg.Spec.InitProvider.CdnFrontdoorOriginIdsRefs, + Selector: mg.Spec.InitProvider.CdnFrontdoorOriginIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CdnFrontdoorOriginIds") + } + mg.Spec.InitProvider.CdnFrontdoorOriginIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CdnFrontdoorOriginIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorRuleSet", "FrontdoorRuleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.CdnFrontdoorRuleSetIds), + Extract: resource.ExtractResourceID(), + References: mg.Spec.InitProvider.CdnFrontdoorRuleSetIdsRefs, + Selector: mg.Spec.InitProvider.CdnFrontdoorRuleSetIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CdnFrontdoorRuleSetIds") + } + mg.Spec.InitProvider.CdnFrontdoorRuleSetIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.CdnFrontdoorRuleSetIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this FrontdoorRule. +func (mg *FrontdoorRule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Actions != nil { + if mg.Spec.ForProvider.Actions.RouteConfigurationOverrideAction != nil { + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorOriginGroup", "FrontdoorOriginGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupIDRef, + Selector: mg.Spec.ForProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupID") + } + mg.Spec.ForProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorRuleSet", "FrontdoorRuleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CdnFrontdoorRuleSetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CdnFrontdoorRuleSetIDRef, + Selector: mg.Spec.ForProvider.CdnFrontdoorRuleSetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorRuleSetID") + } + mg.Spec.ForProvider.CdnFrontdoorRuleSetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CdnFrontdoorRuleSetIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Actions != nil { + if mg.Spec.InitProvider.Actions.RouteConfigurationOverrideAction != nil { + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorOriginGroup", "FrontdoorOriginGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupIDRef, + Selector: mg.Spec.InitProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupID") + } + mg.Spec.InitProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Actions.RouteConfigurationOverrideAction.CdnFrontdoorOriginGroupIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this FrontdoorSecurityPolicy. +func (mg *FrontdoorSecurityPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorProfile", "FrontdoorProfileList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CdnFrontdoorProfileID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CdnFrontdoorProfileIDRef, + Selector: mg.Spec.ForProvider.CdnFrontdoorProfileIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CdnFrontdoorProfileID") + } + mg.Spec.ForProvider.CdnFrontdoorProfileID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CdnFrontdoorProfileIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SecurityPolicies != nil { + if mg.Spec.ForProvider.SecurityPolicies.Firewall != nil { + if mg.Spec.ForProvider.SecurityPolicies.Firewall.Association != nil { + for i6 := 0; i6 < len(mg.Spec.ForProvider.SecurityPolicies.Firewall.Association.Domain); i6++ { + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorCustomDomain", "FrontdoorCustomDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainIDRef, + Selector: mg.Spec.ForProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainID") + } + mg.Spec.ForProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainIDRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.ForProvider.SecurityPolicies != nil { + if mg.Spec.ForProvider.SecurityPolicies.Firewall != nil { + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorFirewallPolicy", "FrontdoorFirewallPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyIDRef, + Selector: mg.Spec.ForProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyID") + } + mg.Spec.ForProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SecurityPolicies != nil { + if mg.Spec.InitProvider.SecurityPolicies.Firewall != nil { + if mg.Spec.InitProvider.SecurityPolicies.Firewall.Association != nil { + for i6 := 0; i6 < len(mg.Spec.InitProvider.SecurityPolicies.Firewall.Association.Domain); i6++ { + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta2", "FrontdoorCustomDomain", "FrontdoorCustomDomainList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainIDRef, + Selector: mg.Spec.InitProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainID") + } + mg.Spec.InitProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SecurityPolicies.Firewall.Association.Domain[i6].CdnFrontdoorDomainIDRef = rsp.ResolvedReference + + } + } + } + } + if mg.Spec.InitProvider.SecurityPolicies != nil { + if mg.Spec.InitProvider.SecurityPolicies.Firewall != nil { + { + m, l, err = apisresolver.GetManagedResource("cdn.azure.upbound.io", "v1beta1", "FrontdoorFirewallPolicy", "FrontdoorFirewallPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyIDRef, + Selector: mg.Spec.InitProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyID") + } + mg.Spec.InitProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SecurityPolicies.Firewall.CdnFrontdoorFirewallPolicyIDRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/cdn/v1beta2/zz_groupversion_info.go b/apis/cdn/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..85d14f156 --- /dev/null +++ b/apis/cdn/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cdn.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cdn.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cognitiveservices/v1beta1/zz_generated.conversion_spokes.go b/apis/cognitiveservices/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..45ff90404 --- /dev/null +++ b/apis/cognitiveservices/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Account to the hub type. +func (tr *Account) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Account type. +func (tr *Account) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Deployment to the hub type. +func (tr *Deployment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Deployment type. +func (tr *Deployment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cognitiveservices/v1beta2/zz_account_terraformed.go b/apis/cognitiveservices/v1beta2/zz_account_terraformed.go new file mode 100755 index 000000000..e46c61cde --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_account_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Account +func (mg *Account) GetTerraformResourceType() string { + return "azurerm_cognitive_account" +} + +// GetConnectionDetailsMapping for this Account +func (tr *Account) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"custom_question_answering_search_service_key": "spec.forProvider.customQuestionAnsweringSearchServiceKeySecretRef", "primary_access_key": "status.atProvider.primaryAccessKey", "secondary_access_key": "status.atProvider.secondaryAccessKey"} +} + +// GetObservation of this Account +func (tr *Account) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Account +func (tr *Account) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Account +func (tr *Account) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Account +func (tr *Account) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Account +func (tr *Account) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Account +func (tr *Account) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Account +func (tr *Account) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Account using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Account) LateInitialize(attrs []byte) (bool, error) { + params := &AccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Account) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cognitiveservices/v1beta2/zz_account_types.go b/apis/cognitiveservices/v1beta2/zz_account_types.go new file mode 100755 index 000000000..36beaa33e --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_account_types.go @@ -0,0 +1,502 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountInitParameters struct { + + // If kind is TextAnalytics this specifies the ID of the Search service. + CustomQuestionAnsweringSearchServiceID *string `json:"customQuestionAnsweringSearchServiceId,omitempty" tf:"custom_question_answering_search_service_id,omitempty"` + + // The subdomain name used for token-based authentication. This property is required when network_acls is specified. Changing this forces a new resource to be created. + CustomSubdomainName *string `json:"customSubdomainName,omitempty" tf:"custom_subdomain_name,omitempty"` + + // A customer_managed_key block as documented below. + CustomerManagedKey *CustomerManagedKeyInitParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Whether to enable the dynamic throttling for this Cognitive Service Account. + DynamicThrottlingEnabled *bool `json:"dynamicThrottlingEnabled,omitempty" tf:"dynamic_throttling_enabled,omitempty"` + + // List of FQDNs allowed for the Cognitive Account. + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the type of Cognitive Service Account that should be created. Possible values are Academic, AnomalyDetector, Bing.Autosuggest, Bing.Autosuggest.v7, Bing.CustomSearch, Bing.Search, Bing.Search.v7, Bing.Speech, Bing.SpellCheck, Bing.SpellCheck.v7, CognitiveServices, ComputerVision, ContentModerator, ContentSafety, CustomSpeech, CustomVision.Prediction, CustomVision.Training, Emotion, Face, FormRecognizer, ImmersiveReader, LUIS, LUIS.Authoring, MetricsAdvisor, OpenAI, Personalizer, QnAMaker, Recommendations, SpeakerRecognition, Speech, SpeechServices, SpeechTranslation, TextAnalytics, TextTranslation and WebLM. Changing this forces a new resource to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Whether local authentication methods is enabled for the Cognitive Account. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Azure AD Client ID (Application ID). This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + MetricsAdvisorAADClientID *string `json:"metricsAdvisorAadClientId,omitempty" tf:"metrics_advisor_aad_client_id,omitempty"` + + // The Azure AD Tenant ID. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + MetricsAdvisorAADTenantID *string `json:"metricsAdvisorAadTenantId,omitempty" tf:"metrics_advisor_aad_tenant_id,omitempty"` + + // The super user of Metrics Advisor. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + MetricsAdvisorSuperUserName *string `json:"metricsAdvisorSuperUserName,omitempty" tf:"metrics_advisor_super_user_name,omitempty"` + + // The website name of Metrics Advisor. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + MetricsAdvisorWebsiteName *string `json:"metricsAdvisorWebsiteName,omitempty" tf:"metrics_advisor_website_name,omitempty"` + + // A network_acls block as defined below. When this property is specified, custom_subdomain_name is also required to be set. + NetworkAcls *NetworkAclsInitParameters `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether outbound network access is restricted for the Cognitive Account. Defaults to false. + OutboundNetworkAccessRestricted *bool `json:"outboundNetworkAccessRestricted,omitempty" tf:"outbound_network_access_restricted,omitempty"` + + // Whether public network access is allowed for the Cognitive Account. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A URL to link a QnAMaker cognitive account to a QnA runtime. + QnaRuntimeEndpoint *string `json:"qnaRuntimeEndpoint,omitempty" tf:"qna_runtime_endpoint,omitempty"` + + // Specifies the SKU Name for this Cognitive Service Account. Possible values are F0, F1, S0, S, S1, S2, S3, S4, S5, S6, P0, P1, P2, E0 and DC0. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A storage block as defined below. + Storage []StorageInitParameters `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountObservation struct { + + // If kind is TextAnalytics this specifies the ID of the Search service. + CustomQuestionAnsweringSearchServiceID *string `json:"customQuestionAnsweringSearchServiceId,omitempty" tf:"custom_question_answering_search_service_id,omitempty"` + + // The subdomain name used for token-based authentication. This property is required when network_acls is specified. Changing this forces a new resource to be created. + CustomSubdomainName *string `json:"customSubdomainName,omitempty" tf:"custom_subdomain_name,omitempty"` + + // A customer_managed_key block as documented below. + CustomerManagedKey *CustomerManagedKeyObservation `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Whether to enable the dynamic throttling for this Cognitive Service Account. + DynamicThrottlingEnabled *bool `json:"dynamicThrottlingEnabled,omitempty" tf:"dynamic_throttling_enabled,omitempty"` + + // The endpoint used to connect to the Cognitive Service Account. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // List of FQDNs allowed for the Cognitive Account. + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // The ID of the Cognitive Service Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the type of Cognitive Service Account that should be created. Possible values are Academic, AnomalyDetector, Bing.Autosuggest, Bing.Autosuggest.v7, Bing.CustomSearch, Bing.Search, Bing.Search.v7, Bing.Speech, Bing.SpellCheck, Bing.SpellCheck.v7, CognitiveServices, ComputerVision, ContentModerator, ContentSafety, CustomSpeech, CustomVision.Prediction, CustomVision.Training, Emotion, Face, FormRecognizer, ImmersiveReader, LUIS, LUIS.Authoring, MetricsAdvisor, OpenAI, Personalizer, QnAMaker, Recommendations, SpeakerRecognition, Speech, SpeechServices, SpeechTranslation, TextAnalytics, TextTranslation and WebLM. Changing this forces a new resource to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Whether local authentication methods is enabled for the Cognitive Account. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Azure AD Client ID (Application ID). This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + MetricsAdvisorAADClientID *string `json:"metricsAdvisorAadClientId,omitempty" tf:"metrics_advisor_aad_client_id,omitempty"` + + // The Azure AD Tenant ID. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + MetricsAdvisorAADTenantID *string `json:"metricsAdvisorAadTenantId,omitempty" tf:"metrics_advisor_aad_tenant_id,omitempty"` + + // The super user of Metrics Advisor. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + MetricsAdvisorSuperUserName *string `json:"metricsAdvisorSuperUserName,omitempty" tf:"metrics_advisor_super_user_name,omitempty"` + + // The website name of Metrics Advisor. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + MetricsAdvisorWebsiteName *string `json:"metricsAdvisorWebsiteName,omitempty" tf:"metrics_advisor_website_name,omitempty"` + + // A network_acls block as defined below. When this property is specified, custom_subdomain_name is also required to be set. + NetworkAcls *NetworkAclsObservation `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether outbound network access is restricted for the Cognitive Account. Defaults to false. + OutboundNetworkAccessRestricted *bool `json:"outboundNetworkAccessRestricted,omitempty" tf:"outbound_network_access_restricted,omitempty"` + + // Whether public network access is allowed for the Cognitive Account. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A URL to link a QnAMaker cognitive account to a QnA runtime. + QnaRuntimeEndpoint *string `json:"qnaRuntimeEndpoint,omitempty" tf:"qna_runtime_endpoint,omitempty"` + + // The name of the resource group in which the Cognitive Service Account is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the SKU Name for this Cognitive Service Account. Possible values are F0, F1, S0, S, S1, S2, S3, S4, S5, S6, P0, P1, P2, E0 and DC0. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A storage block as defined below. + Storage []StorageObservation `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountParameters struct { + + // If kind is TextAnalytics this specifies the ID of the Search service. + // +kubebuilder:validation:Optional + CustomQuestionAnsweringSearchServiceID *string `json:"customQuestionAnsweringSearchServiceId,omitempty" tf:"custom_question_answering_search_service_id,omitempty"` + + // If kind is TextAnalytics this specifies the key of the Search service. + // +kubebuilder:validation:Optional + CustomQuestionAnsweringSearchServiceKeySecretRef *v1.SecretKeySelector `json:"customQuestionAnsweringSearchServiceKeySecretRef,omitempty" tf:"-"` + + // The subdomain name used for token-based authentication. This property is required when network_acls is specified. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CustomSubdomainName *string `json:"customSubdomainName,omitempty" tf:"custom_subdomain_name,omitempty"` + + // A customer_managed_key block as documented below. + // +kubebuilder:validation:Optional + CustomerManagedKey *CustomerManagedKeyParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Whether to enable the dynamic throttling for this Cognitive Service Account. + // +kubebuilder:validation:Optional + DynamicThrottlingEnabled *bool `json:"dynamicThrottlingEnabled,omitempty" tf:"dynamic_throttling_enabled,omitempty"` + + // List of FQDNs allowed for the Cognitive Account. + // +kubebuilder:validation:Optional + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the type of Cognitive Service Account that should be created. Possible values are Academic, AnomalyDetector, Bing.Autosuggest, Bing.Autosuggest.v7, Bing.CustomSearch, Bing.Search, Bing.Search.v7, Bing.Speech, Bing.SpellCheck, Bing.SpellCheck.v7, CognitiveServices, ComputerVision, ContentModerator, ContentSafety, CustomSpeech, CustomVision.Prediction, CustomVision.Training, Emotion, Face, FormRecognizer, ImmersiveReader, LUIS, LUIS.Authoring, MetricsAdvisor, OpenAI, Personalizer, QnAMaker, Recommendations, SpeakerRecognition, Speech, SpeechServices, SpeechTranslation, TextAnalytics, TextTranslation and WebLM. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Whether local authentication methods is enabled for the Cognitive Account. Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Azure AD Client ID (Application ID). This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MetricsAdvisorAADClientID *string `json:"metricsAdvisorAadClientId,omitempty" tf:"metrics_advisor_aad_client_id,omitempty"` + + // The Azure AD Tenant ID. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MetricsAdvisorAADTenantID *string `json:"metricsAdvisorAadTenantId,omitempty" tf:"metrics_advisor_aad_tenant_id,omitempty"` + + // The super user of Metrics Advisor. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MetricsAdvisorSuperUserName *string `json:"metricsAdvisorSuperUserName,omitempty" tf:"metrics_advisor_super_user_name,omitempty"` + + // The website name of Metrics Advisor. This attribute is only set when kind is MetricsAdvisor. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MetricsAdvisorWebsiteName *string `json:"metricsAdvisorWebsiteName,omitempty" tf:"metrics_advisor_website_name,omitempty"` + + // A network_acls block as defined below. When this property is specified, custom_subdomain_name is also required to be set. + // +kubebuilder:validation:Optional + NetworkAcls *NetworkAclsParameters `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether outbound network access is restricted for the Cognitive Account. Defaults to false. + // +kubebuilder:validation:Optional + OutboundNetworkAccessRestricted *bool `json:"outboundNetworkAccessRestricted,omitempty" tf:"outbound_network_access_restricted,omitempty"` + + // Whether public network access is allowed for the Cognitive Account. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A URL to link a QnAMaker cognitive account to a QnA runtime. + // +kubebuilder:validation:Optional + QnaRuntimeEndpoint *string `json:"qnaRuntimeEndpoint,omitempty" tf:"qna_runtime_endpoint,omitempty"` + + // The name of the resource group in which the Cognitive Service Account is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the SKU Name for this Cognitive Service Account. Possible values are F0, F1, S0, S, S1, S2, S3, S4, S5, S6, P0, P1, P2, E0 and DC0. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A storage block as defined below. + // +kubebuilder:validation:Optional + Storage []StorageParameters `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CustomerManagedKeyInitParameters struct { + + // The Client ID of the User Assigned Identity that has access to the key. This property only needs to be specified when there're multiple identities attached to the Cognitive Account. + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this Cognitive Account. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` +} + +type CustomerManagedKeyObservation struct { + + // The Client ID of the User Assigned Identity that has access to the key. This property only needs to be specified when there're multiple identities attached to the Cognitive Account. + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this Cognitive Account. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` +} + +type CustomerManagedKeyParameters struct { + + // The Client ID of the User Assigned Identity that has access to the key. This property only needs to be specified when there're multiple identities attached to the Cognitive Account. + // +kubebuilder:validation:Optional + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this Cognitive Account. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId" tf:"key_vault_key_id,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Cognitive Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Cognitive Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Cognitive Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Cognitive Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Cognitive Account. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Cognitive Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type NetworkAclsInitParameters struct { + + // The Default Action to use when no rules match from ip_rules / virtual_network_rules. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Cognitive Account. + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // A virtual_network_rules block as defined below. + VirtualNetworkRules []VirtualNetworkRulesInitParameters `json:"virtualNetworkRules,omitempty" tf:"virtual_network_rules,omitempty"` +} + +type NetworkAclsObservation struct { + + // The Default Action to use when no rules match from ip_rules / virtual_network_rules. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Cognitive Account. + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // A virtual_network_rules block as defined below. + VirtualNetworkRules []VirtualNetworkRulesObservation `json:"virtualNetworkRules,omitempty" tf:"virtual_network_rules,omitempty"` +} + +type NetworkAclsParameters struct { + + // The Default Action to use when no rules match from ip_rules / virtual_network_rules. Possible values are Allow and Deny. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Cognitive Account. + // +kubebuilder:validation:Optional + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // A virtual_network_rules block as defined below. + // +kubebuilder:validation:Optional + VirtualNetworkRules []VirtualNetworkRulesParameters `json:"virtualNetworkRules,omitempty" tf:"virtual_network_rules,omitempty"` +} + +type StorageInitParameters struct { + + // The client ID of the managed identity associated with the storage resource. + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // Full resource id of a Microsoft.Storage resource. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type StorageObservation struct { + + // The client ID of the managed identity associated with the storage resource. + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // Full resource id of a Microsoft.Storage resource. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type StorageParameters struct { + + // The client ID of the managed identity associated with the storage resource. + // +kubebuilder:validation:Optional + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // Full resource id of a Microsoft.Storage resource. + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId" tf:"storage_account_id,omitempty"` +} + +type VirtualNetworkRulesInitParameters struct { + + // Whether ignore missing vnet service endpoint or not. Default to false. + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` + + // The ID of the subnet which should be able to access this Cognitive Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type VirtualNetworkRulesObservation struct { + + // Whether ignore missing vnet service endpoint or not. Default to false. + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` + + // The ID of the subnet which should be able to access this Cognitive Account. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type VirtualNetworkRulesParameters struct { + + // Whether ignore missing vnet service endpoint or not. Default to false. + // +kubebuilder:validation:Optional + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` + + // The ID of the subnet which should be able to access this Cognitive Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccountInitParameters `json:"initProvider,omitempty"` +} + +// AccountStatus defines the observed state of Account. +type AccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Account is the Schema for the Accounts API. Manages a Cognitive Services Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kind) || (has(self.initProvider) && has(self.initProvider.kind))",message="spec.forProvider.kind is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Accounts +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Repository type metadata. +var ( + Account_Kind = "Account" + Account_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Account_Kind}.String() + Account_KindAPIVersion = Account_Kind + "." + CRDGroupVersion.String() + Account_GroupVersionKind = CRDGroupVersion.WithKind(Account_Kind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/apis/cognitiveservices/v1beta2/zz_deployment_terraformed.go b/apis/cognitiveservices/v1beta2/zz_deployment_terraformed.go new file mode 100755 index 000000000..5b8f81f89 --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_deployment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Deployment +func (mg *Deployment) GetTerraformResourceType() string { + return "azurerm_cognitive_deployment" +} + +// GetConnectionDetailsMapping for this Deployment +func (tr *Deployment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Deployment +func (tr *Deployment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Deployment +func (tr *Deployment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Deployment +func (tr *Deployment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Deployment +func (tr *Deployment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Deployment +func (tr *Deployment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Deployment +func (tr *Deployment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Deployment +func (tr *Deployment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Deployment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Deployment) LateInitialize(attrs []byte) (bool, error) { + params := &DeploymentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Deployment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cognitiveservices/v1beta2/zz_deployment_types.go b/apis/cognitiveservices/v1beta2/zz_deployment_types.go new file mode 100755 index 000000000..334d08961 --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_deployment_types.go @@ -0,0 +1,243 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DeploymentInitParameters struct { + + // A model block as defined below. Changing this forces a new resource to be created. + Model *ModelInitParameters `json:"model,omitempty" tf:"model,omitempty"` + + // The name of RAI policy. + RaiPolicyName *string `json:"raiPolicyName,omitempty" tf:"rai_policy_name,omitempty"` + + // A scale block as defined below. + Scale *ScaleInitParameters `json:"scale,omitempty" tf:"scale,omitempty"` + + // Deployment model version upgrade option. Possible values are OnceNewDefaultVersionAvailable, OnceCurrentVersionExpired, and NoAutoUpgrade. Defaults to OnceNewDefaultVersionAvailable. + VersionUpgradeOption *string `json:"versionUpgradeOption,omitempty" tf:"version_upgrade_option,omitempty"` +} + +type DeploymentObservation struct { + + // The ID of the Cognitive Services Account. Changing this forces a new resource to be created. + CognitiveAccountID *string `json:"cognitiveAccountId,omitempty" tf:"cognitive_account_id,omitempty"` + + // The ID of the Deployment for Azure Cognitive Services Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A model block as defined below. Changing this forces a new resource to be created. + Model *ModelObservation `json:"model,omitempty" tf:"model,omitempty"` + + // The name of RAI policy. + RaiPolicyName *string `json:"raiPolicyName,omitempty" tf:"rai_policy_name,omitempty"` + + // A scale block as defined below. + Scale *ScaleObservation `json:"scale,omitempty" tf:"scale,omitempty"` + + // Deployment model version upgrade option. Possible values are OnceNewDefaultVersionAvailable, OnceCurrentVersionExpired, and NoAutoUpgrade. Defaults to OnceNewDefaultVersionAvailable. + VersionUpgradeOption *string `json:"versionUpgradeOption,omitempty" tf:"version_upgrade_option,omitempty"` +} + +type DeploymentParameters struct { + + // The ID of the Cognitive Services Account. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cognitiveservices/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CognitiveAccountID *string `json:"cognitiveAccountId,omitempty" tf:"cognitive_account_id,omitempty"` + + // Reference to a Account in cognitiveservices to populate cognitiveAccountId. + // +kubebuilder:validation:Optional + CognitiveAccountIDRef *v1.Reference `json:"cognitiveAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in cognitiveservices to populate cognitiveAccountId. + // +kubebuilder:validation:Optional + CognitiveAccountIDSelector *v1.Selector `json:"cognitiveAccountIdSelector,omitempty" tf:"-"` + + // A model block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Model *ModelParameters `json:"model,omitempty" tf:"model,omitempty"` + + // The name of RAI policy. + // +kubebuilder:validation:Optional + RaiPolicyName *string `json:"raiPolicyName,omitempty" tf:"rai_policy_name,omitempty"` + + // A scale block as defined below. + // +kubebuilder:validation:Optional + Scale *ScaleParameters `json:"scale,omitempty" tf:"scale,omitempty"` + + // Deployment model version upgrade option. Possible values are OnceNewDefaultVersionAvailable, OnceCurrentVersionExpired, and NoAutoUpgrade. Defaults to OnceNewDefaultVersionAvailable. + // +kubebuilder:validation:Optional + VersionUpgradeOption *string `json:"versionUpgradeOption,omitempty" tf:"version_upgrade_option,omitempty"` +} + +type ModelInitParameters struct { + + // The format of the Cognitive Services Account Deployment model. Changing this forces a new resource to be created. Possible value is OpenAI. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The name of the Cognitive Services Account Deployment model. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of Cognitive Services Account Deployment model. If version is not specified, the default version of the model at the time will be assigned. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ModelObservation struct { + + // The format of the Cognitive Services Account Deployment model. Changing this forces a new resource to be created. Possible value is OpenAI. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The name of the Cognitive Services Account Deployment model. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The version of Cognitive Services Account Deployment model. If version is not specified, the default version of the model at the time will be assigned. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ModelParameters struct { + + // The format of the Cognitive Services Account Deployment model. Changing this forces a new resource to be created. Possible value is OpenAI. + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` + + // The name of the Cognitive Services Account Deployment model. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The version of Cognitive Services Account Deployment model. If version is not specified, the default version of the model at the time will be assigned. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ScaleInitParameters struct { + + // Tokens-per-Minute (TPM). The unit of measure for this field is in the thousands of Tokens-per-Minute. Defaults to 1 which means that the limitation is 1000 tokens per minute. If the resources SKU supports scale in/out then the capacity field should be included in the resources' configuration. If the scale in/out is not supported by the resources SKU then this field can be safely omitted. For more information about TPM please see the product documentation. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // If the service has different generations of hardware, for the same SKU, then that can be captured here. Changing this forces a new resource to be created. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code. Changing this forces a new resource to be created. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Possible values are Free, Basic, Standard, Premium, Enterprise. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // The name of the SKU. Ex - Standard or P3. It is typically a letter+number code. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ScaleObservation struct { + + // Tokens-per-Minute (TPM). The unit of measure for this field is in the thousands of Tokens-per-Minute. Defaults to 1 which means that the limitation is 1000 tokens per minute. If the resources SKU supports scale in/out then the capacity field should be included in the resources' configuration. If the scale in/out is not supported by the resources SKU then this field can be safely omitted. For more information about TPM please see the product documentation. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // If the service has different generations of hardware, for the same SKU, then that can be captured here. Changing this forces a new resource to be created. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code. Changing this forces a new resource to be created. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Possible values are Free, Basic, Standard, Premium, Enterprise. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // The name of the SKU. Ex - Standard or P3. It is typically a letter+number code. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ScaleParameters struct { + + // Tokens-per-Minute (TPM). The unit of measure for this field is in the thousands of Tokens-per-Minute. Defaults to 1 which means that the limitation is 1000 tokens per minute. If the resources SKU supports scale in/out then the capacity field should be included in the resources' configuration. If the scale in/out is not supported by the resources SKU then this field can be safely omitted. For more information about TPM please see the product documentation. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // If the service has different generations of hardware, for the same SKU, then that can be captured here. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Possible values are Free, Basic, Standard, Premium, Enterprise. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // The name of the SKU. Ex - Standard or P3. It is typically a letter+number code. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// DeploymentSpec defines the desired state of Deployment +type DeploymentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DeploymentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DeploymentInitParameters `json:"initProvider,omitempty"` +} + +// DeploymentStatus defines the observed state of Deployment. +type DeploymentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DeploymentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Deployment is the Schema for the Deployments API. Manages a Cognitive Services Account Deployment. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Deployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.model) || (has(self.initProvider) && has(self.initProvider.model))",message="spec.forProvider.model is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scale) || (has(self.initProvider) && has(self.initProvider.scale))",message="spec.forProvider.scale is a required parameter" + Spec DeploymentSpec `json:"spec"` + Status DeploymentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DeploymentList contains a list of Deployments +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Deployment `json:"items"` +} + +// Repository type metadata. +var ( + Deployment_Kind = "Deployment" + Deployment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Deployment_Kind}.String() + Deployment_KindAPIVersion = Deployment_Kind + "." + CRDGroupVersion.String() + Deployment_GroupVersionKind = CRDGroupVersion.WithKind(Deployment_Kind) +) + +func init() { + SchemeBuilder.Register(&Deployment{}, &DeploymentList{}) +} diff --git a/apis/cognitiveservices/v1beta1/zz_generated.conversion_hubs.go b/apis/cognitiveservices/v1beta2/zz_generated.conversion_hubs.go similarity index 95% rename from apis/cognitiveservices/v1beta1/zz_generated.conversion_hubs.go rename to apis/cognitiveservices/v1beta2/zz_generated.conversion_hubs.go index 19484dc58..f87c58df5 100755 --- a/apis/cognitiveservices/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cognitiveservices/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *Account) Hub() {} diff --git a/apis/cognitiveservices/v1beta2/zz_generated.deepcopy.go b/apis/cognitiveservices/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..d55a20d5d --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1441 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountInitParameters) DeepCopyInto(out *AccountInitParameters) { + *out = *in + if in.CustomQuestionAnsweringSearchServiceID != nil { + in, out := &in.CustomQuestionAnsweringSearchServiceID, &out.CustomQuestionAnsweringSearchServiceID + *out = new(string) + **out = **in + } + if in.CustomSubdomainName != nil { + in, out := &in.CustomSubdomainName, &out.CustomSubdomainName + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DynamicThrottlingEnabled != nil { + in, out := &in.DynamicThrottlingEnabled, &out.DynamicThrottlingEnabled + *out = new(bool) + **out = **in + } + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MetricsAdvisorAADClientID != nil { + in, out := &in.MetricsAdvisorAADClientID, &out.MetricsAdvisorAADClientID + *out = new(string) + **out = **in + } + if in.MetricsAdvisorAADTenantID != nil { + in, out := &in.MetricsAdvisorAADTenantID, &out.MetricsAdvisorAADTenantID + *out = new(string) + **out = **in + } + if in.MetricsAdvisorSuperUserName != nil { + in, out := &in.MetricsAdvisorSuperUserName, &out.MetricsAdvisorSuperUserName + *out = new(string) + **out = **in + } + if in.MetricsAdvisorWebsiteName != nil { + in, out := &in.MetricsAdvisorWebsiteName, &out.MetricsAdvisorWebsiteName + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(NetworkAclsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutboundNetworkAccessRestricted != nil { + in, out := &in.OutboundNetworkAccessRestricted, &out.OutboundNetworkAccessRestricted + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QnaRuntimeEndpoint != nil { + in, out := &in.QnaRuntimeEndpoint, &out.QnaRuntimeEndpoint + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]StorageInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountInitParameters. +func (in *AccountInitParameters) DeepCopy() *AccountInitParameters { + if in == nil { + return nil + } + out := new(AccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in + if in.CustomQuestionAnsweringSearchServiceID != nil { + in, out := &in.CustomQuestionAnsweringSearchServiceID, &out.CustomQuestionAnsweringSearchServiceID + *out = new(string) + **out = **in + } + if in.CustomSubdomainName != nil { + in, out := &in.CustomSubdomainName, &out.CustomSubdomainName + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.DynamicThrottlingEnabled != nil { + in, out := &in.DynamicThrottlingEnabled, &out.DynamicThrottlingEnabled + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MetricsAdvisorAADClientID != nil { + in, out := &in.MetricsAdvisorAADClientID, &out.MetricsAdvisorAADClientID + *out = new(string) + **out = **in + } + if in.MetricsAdvisorAADTenantID != nil { + in, out := &in.MetricsAdvisorAADTenantID, &out.MetricsAdvisorAADTenantID + *out = new(string) + **out = **in + } + if in.MetricsAdvisorSuperUserName != nil { + in, out := &in.MetricsAdvisorSuperUserName, &out.MetricsAdvisorSuperUserName + *out = new(string) + **out = **in + } + if in.MetricsAdvisorWebsiteName != nil { + in, out := &in.MetricsAdvisorWebsiteName, &out.MetricsAdvisorWebsiteName + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(NetworkAclsObservation) + (*in).DeepCopyInto(*out) + } + if in.OutboundNetworkAccessRestricted != nil { + in, out := &in.OutboundNetworkAccessRestricted, &out.OutboundNetworkAccessRestricted + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QnaRuntimeEndpoint != nil { + in, out := &in.QnaRuntimeEndpoint, &out.QnaRuntimeEndpoint + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]StorageObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + if in.CustomQuestionAnsweringSearchServiceID != nil { + in, out := &in.CustomQuestionAnsweringSearchServiceID, &out.CustomQuestionAnsweringSearchServiceID + *out = new(string) + **out = **in + } + if in.CustomQuestionAnsweringSearchServiceKeySecretRef != nil { + in, out := &in.CustomQuestionAnsweringSearchServiceKeySecretRef, &out.CustomQuestionAnsweringSearchServiceKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.CustomSubdomainName != nil { + in, out := &in.CustomSubdomainName, &out.CustomSubdomainName + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.DynamicThrottlingEnabled != nil { + in, out := &in.DynamicThrottlingEnabled, &out.DynamicThrottlingEnabled + *out = new(bool) + **out = **in + } + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MetricsAdvisorAADClientID != nil { + in, out := &in.MetricsAdvisorAADClientID, &out.MetricsAdvisorAADClientID + *out = new(string) + **out = **in + } + if in.MetricsAdvisorAADTenantID != nil { + in, out := &in.MetricsAdvisorAADTenantID, &out.MetricsAdvisorAADTenantID + *out = new(string) + **out = **in + } + if in.MetricsAdvisorSuperUserName != nil { + in, out := &in.MetricsAdvisorSuperUserName, &out.MetricsAdvisorSuperUserName + *out = new(string) + **out = **in + } + if in.MetricsAdvisorWebsiteName != nil { + in, out := &in.MetricsAdvisorWebsiteName, &out.MetricsAdvisorWebsiteName + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(NetworkAclsParameters) + (*in).DeepCopyInto(*out) + } + if in.OutboundNetworkAccessRestricted != nil { + in, out := &in.OutboundNetworkAccessRestricted, &out.OutboundNetworkAccessRestricted + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QnaRuntimeEndpoint != nil { + in, out := &in.QnaRuntimeEndpoint, &out.QnaRuntimeEndpoint + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]StorageParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyInitParameters) DeepCopyInto(out *CustomerManagedKeyInitParameters) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyInitParameters. +func (in *CustomerManagedKeyInitParameters) DeepCopy() *CustomerManagedKeyInitParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyObservation) DeepCopyInto(out *CustomerManagedKeyObservation) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyObservation. +func (in *CustomerManagedKeyObservation) DeepCopy() *CustomerManagedKeyObservation { + if in == nil { + return nil + } + out := new(CustomerManagedKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyParameters) DeepCopyInto(out *CustomerManagedKeyParameters) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyParameters. +func (in *CustomerManagedKeyParameters) DeepCopy() *CustomerManagedKeyParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentInitParameters) DeepCopyInto(out *DeploymentInitParameters) { + *out = *in + if in.Model != nil { + in, out := &in.Model, &out.Model + *out = new(ModelInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RaiPolicyName != nil { + in, out := &in.RaiPolicyName, &out.RaiPolicyName + *out = new(string) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(ScaleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VersionUpgradeOption != nil { + in, out := &in.VersionUpgradeOption, &out.VersionUpgradeOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentInitParameters. +func (in *DeploymentInitParameters) DeepCopy() *DeploymentInitParameters { + if in == nil { + return nil + } + out := new(DeploymentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentObservation) DeepCopyInto(out *DeploymentObservation) { + *out = *in + if in.CognitiveAccountID != nil { + in, out := &in.CognitiveAccountID, &out.CognitiveAccountID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Model != nil { + in, out := &in.Model, &out.Model + *out = new(ModelObservation) + (*in).DeepCopyInto(*out) + } + if in.RaiPolicyName != nil { + in, out := &in.RaiPolicyName, &out.RaiPolicyName + *out = new(string) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(ScaleObservation) + (*in).DeepCopyInto(*out) + } + if in.VersionUpgradeOption != nil { + in, out := &in.VersionUpgradeOption, &out.VersionUpgradeOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentObservation. +func (in *DeploymentObservation) DeepCopy() *DeploymentObservation { + if in == nil { + return nil + } + out := new(DeploymentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentParameters) DeepCopyInto(out *DeploymentParameters) { + *out = *in + if in.CognitiveAccountID != nil { + in, out := &in.CognitiveAccountID, &out.CognitiveAccountID + *out = new(string) + **out = **in + } + if in.CognitiveAccountIDRef != nil { + in, out := &in.CognitiveAccountIDRef, &out.CognitiveAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CognitiveAccountIDSelector != nil { + in, out := &in.CognitiveAccountIDSelector, &out.CognitiveAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Model != nil { + in, out := &in.Model, &out.Model + *out = new(ModelParameters) + (*in).DeepCopyInto(*out) + } + if in.RaiPolicyName != nil { + in, out := &in.RaiPolicyName, &out.RaiPolicyName + *out = new(string) + **out = **in + } + if in.Scale != nil { + in, out := &in.Scale, &out.Scale + *out = new(ScaleParameters) + (*in).DeepCopyInto(*out) + } + if in.VersionUpgradeOption != nil { + in, out := &in.VersionUpgradeOption, &out.VersionUpgradeOption + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentParameters. +func (in *DeploymentParameters) DeepCopy() *DeploymentParameters { + if in == nil { + return nil + } + out := new(DeploymentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelInitParameters) DeepCopyInto(out *ModelInitParameters) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelInitParameters. +func (in *ModelInitParameters) DeepCopy() *ModelInitParameters { + if in == nil { + return nil + } + out := new(ModelInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelObservation) DeepCopyInto(out *ModelObservation) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelObservation. +func (in *ModelObservation) DeepCopy() *ModelObservation { + if in == nil { + return nil + } + out := new(ModelObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelParameters) DeepCopyInto(out *ModelParameters) { + *out = *in + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelParameters. +func (in *ModelParameters) DeepCopy() *ModelParameters { + if in == nil { + return nil + } + out := new(ModelParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAclsInitParameters) DeepCopyInto(out *NetworkAclsInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkRules != nil { + in, out := &in.VirtualNetworkRules, &out.VirtualNetworkRules + *out = make([]VirtualNetworkRulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAclsInitParameters. +func (in *NetworkAclsInitParameters) DeepCopy() *NetworkAclsInitParameters { + if in == nil { + return nil + } + out := new(NetworkAclsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAclsObservation) DeepCopyInto(out *NetworkAclsObservation) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkRules != nil { + in, out := &in.VirtualNetworkRules, &out.VirtualNetworkRules + *out = make([]VirtualNetworkRulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAclsObservation. +func (in *NetworkAclsObservation) DeepCopy() *NetworkAclsObservation { + if in == nil { + return nil + } + out := new(NetworkAclsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAclsParameters) DeepCopyInto(out *NetworkAclsParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkRules != nil { + in, out := &in.VirtualNetworkRules, &out.VirtualNetworkRules + *out = make([]VirtualNetworkRulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAclsParameters. +func (in *NetworkAclsParameters) DeepCopy() *NetworkAclsParameters { + if in == nil { + return nil + } + out := new(NetworkAclsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleInitParameters) DeepCopyInto(out *ScaleInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleInitParameters. +func (in *ScaleInitParameters) DeepCopy() *ScaleInitParameters { + if in == nil { + return nil + } + out := new(ScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleObservation) DeepCopyInto(out *ScaleObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleObservation. +func (in *ScaleObservation) DeepCopy() *ScaleObservation { + if in == nil { + return nil + } + out := new(ScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleParameters) DeepCopyInto(out *ScaleParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleParameters. +func (in *ScaleParameters) DeepCopy() *ScaleParameters { + if in == nil { + return nil + } + out := new(ScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageInitParameters) DeepCopyInto(out *StorageInitParameters) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageInitParameters. +func (in *StorageInitParameters) DeepCopy() *StorageInitParameters { + if in == nil { + return nil + } + out := new(StorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageObservation) DeepCopyInto(out *StorageObservation) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageObservation. +func (in *StorageObservation) DeepCopy() *StorageObservation { + if in == nil { + return nil + } + out := new(StorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageParameters) DeepCopyInto(out *StorageParameters) { + *out = *in + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageParameters. +func (in *StorageParameters) DeepCopy() *StorageParameters { + if in == nil { + return nil + } + out := new(StorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRulesInitParameters) DeepCopyInto(out *VirtualNetworkRulesInitParameters) { + *out = *in + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRulesInitParameters. +func (in *VirtualNetworkRulesInitParameters) DeepCopy() *VirtualNetworkRulesInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRulesObservation) DeepCopyInto(out *VirtualNetworkRulesObservation) { + *out = *in + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRulesObservation. +func (in *VirtualNetworkRulesObservation) DeepCopy() *VirtualNetworkRulesObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRulesParameters) DeepCopyInto(out *VirtualNetworkRulesParameters) { + *out = *in + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRulesParameters. +func (in *VirtualNetworkRulesParameters) DeepCopy() *VirtualNetworkRulesParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkRulesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cognitiveservices/v1beta2/zz_generated.managed.go b/apis/cognitiveservices/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..a60525249 --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Account. +func (mg *Account) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Account. +func (mg *Account) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Deployment. +func (mg *Deployment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Deployment. +func (mg *Deployment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Deployment. +func (mg *Deployment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Deployment. +func (mg *Deployment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Deployment. +func (mg *Deployment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Deployment. +func (mg *Deployment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Deployment. +func (mg *Deployment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Deployment. +func (mg *Deployment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Deployment. +func (mg *Deployment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Deployment. +func (mg *Deployment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Deployment. +func (mg *Deployment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Deployment. +func (mg *Deployment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cognitiveservices/v1beta2/zz_generated.managedlist.go b/apis/cognitiveservices/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..a2307b0a6 --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DeploymentList. +func (l *DeploymentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cognitiveservices/v1beta2/zz_generated.resolvers.go b/apis/cognitiveservices/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..4284a678f --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Account. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Account) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.NetworkAcls != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.NetworkAcls.VirtualNetworkRules); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetID") + } + mg.Spec.ForProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.NetworkAcls != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.NetworkAcls.VirtualNetworkRules); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetID") + } + mg.Spec.InitProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkAcls.VirtualNetworkRules[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this Deployment. +func (mg *Deployment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cognitiveservices.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CognitiveAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CognitiveAccountIDRef, + Selector: mg.Spec.ForProvider.CognitiveAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CognitiveAccountID") + } + mg.Spec.ForProvider.CognitiveAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CognitiveAccountIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cognitiveservices/v1beta2/zz_groupversion_info.go b/apis/cognitiveservices/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..d5460038c --- /dev/null +++ b/apis/cognitiveservices/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cognitiveservices.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cognitiveservices.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/compute/v1beta1/zz_galleryapplication_types.go b/apis/compute/v1beta1/zz_galleryapplication_types.go index 205a5ccf4..13a7e8f0c 100755 --- a/apis/compute/v1beta1/zz_galleryapplication_types.go +++ b/apis/compute/v1beta1/zz_galleryapplication_types.go @@ -90,7 +90,7 @@ type GalleryApplicationParameters struct { Eula *string `json:"eula,omitempty" tf:"eula,omitempty"` // The ID of the Shared Image Gallery. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.SharedImageGallery + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.SharedImageGallery // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional GalleryID *string `json:"galleryId,omitempty" tf:"gallery_id,omitempty"` diff --git a/apis/compute/v1beta1/zz_generated.conversion_hubs.go b/apis/compute/v1beta1/zz_generated.conversion_hubs.go index 2f8284631..627155c01 100755 --- a/apis/compute/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/compute/v1beta1/zz_generated.conversion_hubs.go @@ -9,9 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AvailabilitySet) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *CapacityReservation) Hub() {} - // Hub marks this type as a conversion hub. func (tr *CapacityReservationGroup) Hub() {} @@ -21,59 +18,17 @@ func (tr *DedicatedHost) Hub() {} // Hub marks this type as a conversion hub. func (tr *DiskAccess) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DiskEncryptionSet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *GalleryApplication) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *GalleryApplicationVersion) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Image) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinuxVirtualMachine) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinuxVirtualMachineScaleSet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ManagedDisk) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ManagedDiskSASToken) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *OrchestratedVirtualMachineScaleSet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ProximityPlacementGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SharedImage) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SharedImageGallery) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Snapshot) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SSHPublicKey) Hub() {} // Hub marks this type as a conversion hub. func (tr *VirtualMachineDataDiskAttachment) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualMachineExtension) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualMachineRunCommand) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WindowsVirtualMachine) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WindowsVirtualMachineScaleSet) Hub() {} diff --git a/apis/compute/v1beta1/zz_generated.conversion_spokes.go b/apis/compute/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..551765d31 --- /dev/null +++ b/apis/compute/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,314 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CapacityReservation to the hub type. +func (tr *CapacityReservation) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CapacityReservation type. +func (tr *CapacityReservation) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DiskEncryptionSet to the hub type. +func (tr *DiskEncryptionSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DiskEncryptionSet type. +func (tr *DiskEncryptionSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this GalleryApplicationVersion to the hub type. +func (tr *GalleryApplicationVersion) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the GalleryApplicationVersion type. +func (tr *GalleryApplicationVersion) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Image to the hub type. +func (tr *Image) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Image type. +func (tr *Image) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinuxVirtualMachine to the hub type. +func (tr *LinuxVirtualMachine) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinuxVirtualMachine type. +func (tr *LinuxVirtualMachine) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinuxVirtualMachineScaleSet to the hub type. +func (tr *LinuxVirtualMachineScaleSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinuxVirtualMachineScaleSet type. +func (tr *LinuxVirtualMachineScaleSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ManagedDisk to the hub type. +func (tr *ManagedDisk) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ManagedDisk type. +func (tr *ManagedDisk) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this OrchestratedVirtualMachineScaleSet to the hub type. +func (tr *OrchestratedVirtualMachineScaleSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the OrchestratedVirtualMachineScaleSet type. +func (tr *OrchestratedVirtualMachineScaleSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SharedImage to the hub type. +func (tr *SharedImage) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SharedImage type. +func (tr *SharedImage) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SharedImageGallery to the hub type. +func (tr *SharedImageGallery) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SharedImageGallery type. +func (tr *SharedImageGallery) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Snapshot to the hub type. +func (tr *Snapshot) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Snapshot type. +func (tr *Snapshot) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualMachineExtension to the hub type. +func (tr *VirtualMachineExtension) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualMachineExtension type. +func (tr *VirtualMachineExtension) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualMachineRunCommand to the hub type. +func (tr *VirtualMachineRunCommand) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualMachineRunCommand type. +func (tr *VirtualMachineRunCommand) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WindowsVirtualMachine to the hub type. +func (tr *WindowsVirtualMachine) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WindowsVirtualMachine type. +func (tr *WindowsVirtualMachine) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WindowsVirtualMachineScaleSet to the hub type. +func (tr *WindowsVirtualMachineScaleSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WindowsVirtualMachineScaleSet type. +func (tr *WindowsVirtualMachineScaleSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/compute/v1beta1/zz_generated.resolvers.go b/apis/compute/v1beta1/zz_generated.resolvers.go index cc644f04a..acd755827 100644 --- a/apis/compute/v1beta1/zz_generated.resolvers.go +++ b/apis/compute/v1beta1/zz_generated.resolvers.go @@ -221,7 +221,7 @@ func (mg *GalleryApplication) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "SharedImageGallery", "SharedImageGalleryList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "SharedImageGallery", "SharedImageGalleryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -635,7 +635,7 @@ func (mg *ManagedDiskSASToken) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "ManagedDisk", "ManagedDiskList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -654,7 +654,7 @@ func (mg *ManagedDiskSASToken) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ManagedDiskID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ManagedDiskIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "ManagedDisk", "ManagedDiskList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -975,7 +975,7 @@ func (mg *VirtualMachineDataDiskAttachment) ResolveReferences(ctx context.Contex var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "ManagedDisk", "ManagedDiskList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -994,7 +994,7 @@ func (mg *VirtualMachineDataDiskAttachment) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.ManagedDiskID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ManagedDiskIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "LinuxVirtualMachine", "LinuxVirtualMachineList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1013,7 +1013,7 @@ func (mg *VirtualMachineDataDiskAttachment) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.VirtualMachineID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VirtualMachineIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "ManagedDisk", "ManagedDiskList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1032,7 +1032,7 @@ func (mg *VirtualMachineDataDiskAttachment) ResolveReferences(ctx context.Contex mg.Spec.InitProvider.ManagedDiskID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ManagedDiskIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "LinuxVirtualMachine", "LinuxVirtualMachineList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/compute/v1beta1/zz_manageddisksastoken_types.go b/apis/compute/v1beta1/zz_manageddisksastoken_types.go index 8906c1860..9ae8362ff 100755 --- a/apis/compute/v1beta1/zz_manageddisksastoken_types.go +++ b/apis/compute/v1beta1/zz_manageddisksastoken_types.go @@ -22,7 +22,7 @@ type ManagedDiskSASTokenInitParameters struct { DurationInSeconds *float64 `json:"durationInSeconds,omitempty" tf:"duration_in_seconds,omitempty"` // The ID of an existing Managed Disk which should be exported. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.ManagedDisk + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` @@ -61,7 +61,7 @@ type ManagedDiskSASTokenParameters struct { DurationInSeconds *float64 `json:"durationInSeconds,omitempty" tf:"duration_in_seconds,omitempty"` // The ID of an existing Managed Disk which should be exported. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.ManagedDisk + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` diff --git a/apis/compute/v1beta1/zz_virtualmachinedatadiskattachment_types.go b/apis/compute/v1beta1/zz_virtualmachinedatadiskattachment_types.go index a5f70441b..3acaa958c 100755 --- a/apis/compute/v1beta1/zz_virtualmachinedatadiskattachment_types.go +++ b/apis/compute/v1beta1/zz_virtualmachinedatadiskattachment_types.go @@ -25,7 +25,7 @@ type VirtualMachineDataDiskAttachmentInitParameters struct { Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` // The ID of an existing Managed Disk which should be attached. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.ManagedDisk + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` @@ -38,7 +38,7 @@ type VirtualMachineDataDiskAttachmentInitParameters struct { ManagedDiskIDSelector *v1.Selector `json:"managedDiskIdSelector,omitempty" tf:"-"` // The ID of the Virtual Machine to which the Data Disk should be attached. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.LinuxVirtualMachine + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` @@ -93,7 +93,7 @@ type VirtualMachineDataDiskAttachmentParameters struct { Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` // The ID of an existing Managed Disk which should be attached. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.ManagedDisk + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` @@ -107,7 +107,7 @@ type VirtualMachineDataDiskAttachmentParameters struct { ManagedDiskIDSelector *v1.Selector `json:"managedDiskIdSelector,omitempty" tf:"-"` // The ID of the Virtual Machine to which the Data Disk should be attached. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.LinuxVirtualMachine + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` diff --git a/apis/compute/v1beta2/zz_capacityreservation_terraformed.go b/apis/compute/v1beta2/zz_capacityreservation_terraformed.go new file mode 100755 index 000000000..982a5a897 --- /dev/null +++ b/apis/compute/v1beta2/zz_capacityreservation_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CapacityReservation +func (mg *CapacityReservation) GetTerraformResourceType() string { + return "azurerm_capacity_reservation" +} + +// GetConnectionDetailsMapping for this CapacityReservation +func (tr *CapacityReservation) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CapacityReservation +func (tr *CapacityReservation) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CapacityReservation +func (tr *CapacityReservation) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CapacityReservation +func (tr *CapacityReservation) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CapacityReservation +func (tr *CapacityReservation) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CapacityReservation +func (tr *CapacityReservation) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CapacityReservation +func (tr *CapacityReservation) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CapacityReservation +func (tr *CapacityReservation) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CapacityReservation using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CapacityReservation) LateInitialize(attrs []byte) (bool, error) { + params := &CapacityReservationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CapacityReservation) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_capacityreservation_types.go b/apis/compute/v1beta2/zz_capacityreservation_types.go new file mode 100755 index 000000000..ff80fa1e6 --- /dev/null +++ b/apis/compute/v1beta2/zz_capacityreservation_types.go @@ -0,0 +1,166 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CapacityReservationInitParameters struct { + + // A sku block as defined below. + Sku *SkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Availability Zone for this Capacity Reservation. Changing this forces a new resource to be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type CapacityReservationObservation struct { + + // The ID of the Capacity Reservation Group where the Capacity Reservation exists. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // The ID of the Capacity Reservation. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A sku block as defined below. + Sku *SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Availability Zone for this Capacity Reservation. Changing this forces a new resource to be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type CapacityReservationParameters struct { + + // The ID of the Capacity Reservation Group where the Capacity Reservation exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.CapacityReservationGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Reference to a CapacityReservationGroup in compute to populate capacityReservationGroupId. + // +kubebuilder:validation:Optional + CapacityReservationGroupIDRef *v1.Reference `json:"capacityReservationGroupIdRef,omitempty" tf:"-"` + + // Selector for a CapacityReservationGroup in compute to populate capacityReservationGroupId. + // +kubebuilder:validation:Optional + CapacityReservationGroupIDSelector *v1.Selector `json:"capacityReservationGroupIdSelector,omitempty" tf:"-"` + + // A sku block as defined below. + // +kubebuilder:validation:Optional + Sku *SkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Availability Zone for this Capacity Reservation. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type SkuInitParameters struct { + + // Specifies the number of instances to be reserved. It must be a positive integer and not exceed the quota in the subscription. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Name of the sku, such as Standard_F2. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuObservation struct { + + // Specifies the number of instances to be reserved. It must be a positive integer and not exceed the quota in the subscription. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Name of the sku, such as Standard_F2. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuParameters struct { + + // Specifies the number of instances to be reserved. It must be a positive integer and not exceed the quota in the subscription. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity" tf:"capacity,omitempty"` + + // Name of the sku, such as Standard_F2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +// CapacityReservationSpec defines the desired state of CapacityReservation +type CapacityReservationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CapacityReservationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CapacityReservationInitParameters `json:"initProvider,omitempty"` +} + +// CapacityReservationStatus defines the observed state of CapacityReservation. +type CapacityReservationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CapacityReservationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CapacityReservation is the Schema for the CapacityReservations API. Manages a Capacity Reservation within a Capacity Reservation Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type CapacityReservation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec CapacityReservationSpec `json:"spec"` + Status CapacityReservationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CapacityReservationList contains a list of CapacityReservations +type CapacityReservationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CapacityReservation `json:"items"` +} + +// Repository type metadata. +var ( + CapacityReservation_Kind = "CapacityReservation" + CapacityReservation_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CapacityReservation_Kind}.String() + CapacityReservation_KindAPIVersion = CapacityReservation_Kind + "." + CRDGroupVersion.String() + CapacityReservation_GroupVersionKind = CRDGroupVersion.WithKind(CapacityReservation_Kind) +) + +func init() { + SchemeBuilder.Register(&CapacityReservation{}, &CapacityReservationList{}) +} diff --git a/apis/compute/v1beta2/zz_diskencryptionset_terraformed.go b/apis/compute/v1beta2/zz_diskencryptionset_terraformed.go new file mode 100755 index 000000000..76acad58b --- /dev/null +++ b/apis/compute/v1beta2/zz_diskencryptionset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DiskEncryptionSet +func (mg *DiskEncryptionSet) GetTerraformResourceType() string { + return "azurerm_disk_encryption_set" +} + +// GetConnectionDetailsMapping for this DiskEncryptionSet +func (tr *DiskEncryptionSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DiskEncryptionSet +func (tr *DiskEncryptionSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DiskEncryptionSet +func (tr *DiskEncryptionSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DiskEncryptionSet +func (tr *DiskEncryptionSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DiskEncryptionSet +func (tr *DiskEncryptionSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DiskEncryptionSet +func (tr *DiskEncryptionSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DiskEncryptionSet +func (tr *DiskEncryptionSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DiskEncryptionSet +func (tr *DiskEncryptionSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DiskEncryptionSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DiskEncryptionSet) LateInitialize(attrs []byte) (bool, error) { + params := &DiskEncryptionSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DiskEncryptionSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_diskencryptionset_types.go b/apis/compute/v1beta2/zz_diskencryptionset_types.go new file mode 100755 index 000000000..00de0e0e5 --- /dev/null +++ b/apis/compute/v1beta2/zz_diskencryptionset_types.go @@ -0,0 +1,238 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskEncryptionSetInitParameters struct { + + // Boolean flag to specify whether Azure Disk Encryption Set automatically rotates the encryption Key to latest version or not. Possible values are true or false. Defaults to false. + AutoKeyRotationEnabled *bool `json:"autoKeyRotationEnabled,omitempty" tf:"auto_key_rotation_enabled,omitempty"` + + // The type of key used to encrypt the data of the disk. Possible values are EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys and ConfidentialVmEncryptedWithCustomerKey. Defaults to EncryptionAtRestWithCustomerKey. Changing this forces a new resource to be created. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // Multi-tenant application client id to access key vault in a different tenant. + FederatedClientID *string `json:"federatedClientId,omitempty" tf:"federated_client_id,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the URL to a Key Vault Key (either from a Key Vault Key, or the Key URL for the Key Vault Secret). + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Reference to a Key in keyvault to populate keyVaultKeyId. + // +kubebuilder:validation:Optional + KeyVaultKeyIDRef *v1.Reference `json:"keyVaultKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate keyVaultKeyId. + // +kubebuilder:validation:Optional + KeyVaultKeyIDSelector *v1.Selector `json:"keyVaultKeyIdSelector,omitempty" tf:"-"` + + // Specifies the Azure Region where the Disk Encryption Set exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the Disk Encryption Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DiskEncryptionSetObservation struct { + + // Boolean flag to specify whether Azure Disk Encryption Set automatically rotates the encryption Key to latest version or not. Possible values are true or false. Defaults to false. + AutoKeyRotationEnabled *bool `json:"autoKeyRotationEnabled,omitempty" tf:"auto_key_rotation_enabled,omitempty"` + + // The type of key used to encrypt the data of the disk. Possible values are EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys and ConfidentialVmEncryptedWithCustomerKey. Defaults to EncryptionAtRestWithCustomerKey. Changing this forces a new resource to be created. + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // Multi-tenant application client id to access key vault in a different tenant. + FederatedClientID *string `json:"federatedClientId,omitempty" tf:"federated_client_id,omitempty"` + + // The ID of the Disk Encryption Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the URL to a Key Vault Key (either from a Key Vault Key, or the Key URL for the Key Vault Secret). + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // The URL for the Key Vault Key or Key Vault Secret that is currently being used by the service. + KeyVaultKeyURL *string `json:"keyVaultKeyUrl,omitempty" tf:"key_vault_key_url,omitempty"` + + // Specifies the Azure Region where the Disk Encryption Set exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Resource Group where the Disk Encryption Set should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the Disk Encryption Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DiskEncryptionSetParameters struct { + + // Boolean flag to specify whether Azure Disk Encryption Set automatically rotates the encryption Key to latest version or not. Possible values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + AutoKeyRotationEnabled *bool `json:"autoKeyRotationEnabled,omitempty" tf:"auto_key_rotation_enabled,omitempty"` + + // The type of key used to encrypt the data of the disk. Possible values are EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys and ConfidentialVmEncryptedWithCustomerKey. Defaults to EncryptionAtRestWithCustomerKey. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EncryptionType *string `json:"encryptionType,omitempty" tf:"encryption_type,omitempty"` + + // Multi-tenant application client id to access key vault in a different tenant. + // +kubebuilder:validation:Optional + FederatedClientID *string `json:"federatedClientId,omitempty" tf:"federated_client_id,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the URL to a Key Vault Key (either from a Key Vault Key, or the Key URL for the Key Vault Secret). + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Reference to a Key in keyvault to populate keyVaultKeyId. + // +kubebuilder:validation:Optional + KeyVaultKeyIDRef *v1.Reference `json:"keyVaultKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate keyVaultKeyId. + // +kubebuilder:validation:Optional + KeyVaultKeyIDSelector *v1.Selector `json:"keyVaultKeyIdSelector,omitempty" tf:"-"` + + // Specifies the Azure Region where the Disk Encryption Set exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Resource Group where the Disk Encryption Set should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the Disk Encryption Set. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Disk Encryption Set. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of Managed Service Identity that is configured on this Disk Encryption Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Disk Encryption Set. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The (Client) ID of the Service Principal. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the Tenant the Service Principal is assigned in. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The type of Managed Service Identity that is configured on this Disk Encryption Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Disk Encryption Set. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of Managed Service Identity that is configured on this Disk Encryption Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// DiskEncryptionSetSpec defines the desired state of DiskEncryptionSet +type DiskEncryptionSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DiskEncryptionSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DiskEncryptionSetInitParameters `json:"initProvider,omitempty"` +} + +// DiskEncryptionSetStatus defines the observed state of DiskEncryptionSet. +type DiskEncryptionSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DiskEncryptionSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DiskEncryptionSet is the Schema for the DiskEncryptionSets API. Manages a Disk Encryption Set. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DiskEncryptionSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.identity) || (has(self.initProvider) && has(self.initProvider.identity))",message="spec.forProvider.identity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec DiskEncryptionSetSpec `json:"spec"` + Status DiskEncryptionSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DiskEncryptionSetList contains a list of DiskEncryptionSets +type DiskEncryptionSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DiskEncryptionSet `json:"items"` +} + +// Repository type metadata. +var ( + DiskEncryptionSet_Kind = "DiskEncryptionSet" + DiskEncryptionSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DiskEncryptionSet_Kind}.String() + DiskEncryptionSet_KindAPIVersion = DiskEncryptionSet_Kind + "." + CRDGroupVersion.String() + DiskEncryptionSet_GroupVersionKind = CRDGroupVersion.WithKind(DiskEncryptionSet_Kind) +) + +func init() { + SchemeBuilder.Register(&DiskEncryptionSet{}, &DiskEncryptionSetList{}) +} diff --git a/apis/compute/v1beta2/zz_galleryapplicationversion_terraformed.go b/apis/compute/v1beta2/zz_galleryapplicationversion_terraformed.go new file mode 100755 index 000000000..7b8458ab9 --- /dev/null +++ b/apis/compute/v1beta2/zz_galleryapplicationversion_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GalleryApplicationVersion +func (mg *GalleryApplicationVersion) GetTerraformResourceType() string { + return "azurerm_gallery_application_version" +} + +// GetConnectionDetailsMapping for this GalleryApplicationVersion +func (tr *GalleryApplicationVersion) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GalleryApplicationVersion +func (tr *GalleryApplicationVersion) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GalleryApplicationVersion +func (tr *GalleryApplicationVersion) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GalleryApplicationVersion +func (tr *GalleryApplicationVersion) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GalleryApplicationVersion +func (tr *GalleryApplicationVersion) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GalleryApplicationVersion +func (tr *GalleryApplicationVersion) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GalleryApplicationVersion +func (tr *GalleryApplicationVersion) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GalleryApplicationVersion +func (tr *GalleryApplicationVersion) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GalleryApplicationVersion using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GalleryApplicationVersion) LateInitialize(attrs []byte) (bool, error) { + params := &GalleryApplicationVersionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GalleryApplicationVersion) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_galleryapplicationversion_types.go b/apis/compute/v1beta2/zz_galleryapplicationversion_types.go new file mode 100755 index 000000000..1f7ffa689 --- /dev/null +++ b/apis/compute/v1beta2/zz_galleryapplicationversion_types.go @@ -0,0 +1,391 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GalleryApplicationVersionInitParameters struct { + + // Specifies the name of the config file on the VM. Changing this forces a new resource to be created. + ConfigFile *string `json:"configFile,omitempty" tf:"config_file,omitempty"` + + // Should the Gallery Application reports health. Defaults to false. + EnableHealthCheck *bool `json:"enableHealthCheck,omitempty" tf:"enable_health_check,omitempty"` + + // The end of life date in RFC3339 format of the Gallery Application Version. + EndOfLifeDate *string `json:"endOfLifeDate,omitempty" tf:"end_of_life_date,omitempty"` + + // Should the Gallery Application Version be excluded from the latest filter? If set to true this Gallery Application Version won't be returned for the latest version. Defaults to false. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty" tf:"exclude_from_latest,omitempty"` + + // The ID of the Gallery Application. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.GalleryApplication + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + GalleryApplicationID *string `json:"galleryApplicationId,omitempty" tf:"gallery_application_id,omitempty"` + + // Reference to a GalleryApplication in compute to populate galleryApplicationId. + // +kubebuilder:validation:Optional + GalleryApplicationIDRef *v1.Reference `json:"galleryApplicationIdRef,omitempty" tf:"-"` + + // Selector for a GalleryApplication in compute to populate galleryApplicationId. + // +kubebuilder:validation:Optional + GalleryApplicationIDSelector *v1.Selector `json:"galleryApplicationIdSelector,omitempty" tf:"-"` + + // The Azure Region where the Gallery Application Version exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A manage_action block as defined below. + ManageAction *ManageActionInitParameters `json:"manageAction,omitempty" tf:"manage_action,omitempty"` + + // The version name of the Gallery Application Version, such as 1.0.0. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the package file on the VM. Changing this forces a new resource to be created. + PackageFile *string `json:"packageFile,omitempty" tf:"package_file,omitempty"` + + // A source block as defined below. + Source *SourceInitParameters `json:"source,omitempty" tf:"source,omitempty"` + + // A mapping of tags to assign to the Gallery Application Version. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more target_region blocks as defined below. + TargetRegion []TargetRegionInitParameters `json:"targetRegion,omitempty" tf:"target_region,omitempty"` +} + +type GalleryApplicationVersionObservation struct { + + // Specifies the name of the config file on the VM. Changing this forces a new resource to be created. + ConfigFile *string `json:"configFile,omitempty" tf:"config_file,omitempty"` + + // Should the Gallery Application reports health. Defaults to false. + EnableHealthCheck *bool `json:"enableHealthCheck,omitempty" tf:"enable_health_check,omitempty"` + + // The end of life date in RFC3339 format of the Gallery Application Version. + EndOfLifeDate *string `json:"endOfLifeDate,omitempty" tf:"end_of_life_date,omitempty"` + + // Should the Gallery Application Version be excluded from the latest filter? If set to true this Gallery Application Version won't be returned for the latest version. Defaults to false. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty" tf:"exclude_from_latest,omitempty"` + + // The ID of the Gallery Application. Changing this forces a new resource to be created. + GalleryApplicationID *string `json:"galleryApplicationId,omitempty" tf:"gallery_application_id,omitempty"` + + // The ID of the Gallery Application Version. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region where the Gallery Application Version exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A manage_action block as defined below. + ManageAction *ManageActionObservation `json:"manageAction,omitempty" tf:"manage_action,omitempty"` + + // The version name of the Gallery Application Version, such as 1.0.0. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the package file on the VM. Changing this forces a new resource to be created. + PackageFile *string `json:"packageFile,omitempty" tf:"package_file,omitempty"` + + // A source block as defined below. + Source *SourceObservation `json:"source,omitempty" tf:"source,omitempty"` + + // A mapping of tags to assign to the Gallery Application Version. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more target_region blocks as defined below. + TargetRegion []TargetRegionObservation `json:"targetRegion,omitempty" tf:"target_region,omitempty"` +} + +type GalleryApplicationVersionParameters struct { + + // Specifies the name of the config file on the VM. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConfigFile *string `json:"configFile,omitempty" tf:"config_file,omitempty"` + + // Should the Gallery Application reports health. Defaults to false. + // +kubebuilder:validation:Optional + EnableHealthCheck *bool `json:"enableHealthCheck,omitempty" tf:"enable_health_check,omitempty"` + + // The end of life date in RFC3339 format of the Gallery Application Version. + // +kubebuilder:validation:Optional + EndOfLifeDate *string `json:"endOfLifeDate,omitempty" tf:"end_of_life_date,omitempty"` + + // Should the Gallery Application Version be excluded from the latest filter? If set to true this Gallery Application Version won't be returned for the latest version. Defaults to false. + // +kubebuilder:validation:Optional + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty" tf:"exclude_from_latest,omitempty"` + + // The ID of the Gallery Application. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.GalleryApplication + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + GalleryApplicationID *string `json:"galleryApplicationId,omitempty" tf:"gallery_application_id,omitempty"` + + // Reference to a GalleryApplication in compute to populate galleryApplicationId. + // +kubebuilder:validation:Optional + GalleryApplicationIDRef *v1.Reference `json:"galleryApplicationIdRef,omitempty" tf:"-"` + + // Selector for a GalleryApplication in compute to populate galleryApplicationId. + // +kubebuilder:validation:Optional + GalleryApplicationIDSelector *v1.Selector `json:"galleryApplicationIdSelector,omitempty" tf:"-"` + + // The Azure Region where the Gallery Application Version exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A manage_action block as defined below. + // +kubebuilder:validation:Optional + ManageAction *ManageActionParameters `json:"manageAction,omitempty" tf:"manage_action,omitempty"` + + // The version name of the Gallery Application Version, such as 1.0.0. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the package file on the VM. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PackageFile *string `json:"packageFile,omitempty" tf:"package_file,omitempty"` + + // A source block as defined below. + // +kubebuilder:validation:Optional + Source *SourceParameters `json:"source,omitempty" tf:"source,omitempty"` + + // A mapping of tags to assign to the Gallery Application Version. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more target_region blocks as defined below. + // +kubebuilder:validation:Optional + TargetRegion []TargetRegionParameters `json:"targetRegion,omitempty" tf:"target_region,omitempty"` +} + +type ManageActionInitParameters struct { + + // The command to install the Gallery Application. Changing this forces a new resource to be created. + Install *string `json:"install,omitempty" tf:"install,omitempty"` + + // The command to remove the Gallery Application. Changing this forces a new resource to be created. + Remove *string `json:"remove,omitempty" tf:"remove,omitempty"` + + // The command to update the Gallery Application. Changing this forces a new resource to be created. + Update *string `json:"update,omitempty" tf:"update,omitempty"` +} + +type ManageActionObservation struct { + + // The command to install the Gallery Application. Changing this forces a new resource to be created. + Install *string `json:"install,omitempty" tf:"install,omitempty"` + + // The command to remove the Gallery Application. Changing this forces a new resource to be created. + Remove *string `json:"remove,omitempty" tf:"remove,omitempty"` + + // The command to update the Gallery Application. Changing this forces a new resource to be created. + Update *string `json:"update,omitempty" tf:"update,omitempty"` +} + +type ManageActionParameters struct { + + // The command to install the Gallery Application. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Install *string `json:"install" tf:"install,omitempty"` + + // The command to remove the Gallery Application. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Remove *string `json:"remove" tf:"remove,omitempty"` + + // The command to update the Gallery Application. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Update *string `json:"update,omitempty" tf:"update,omitempty"` +} + +type SourceInitParameters struct { + + // The Storage Blob URI of the default configuration. Changing this forces a new resource to be created. + DefaultConfigurationLink *string `json:"defaultConfigurationLink,omitempty" tf:"default_configuration_link,omitempty"` + + // The Storage Blob URI of the source application package. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Blob + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + MediaLink *string `json:"mediaLink,omitempty" tf:"media_link,omitempty"` + + // Reference to a Blob in storage to populate mediaLink. + // +kubebuilder:validation:Optional + MediaLinkRef *v1.Reference `json:"mediaLinkRef,omitempty" tf:"-"` + + // Selector for a Blob in storage to populate mediaLink. + // +kubebuilder:validation:Optional + MediaLinkSelector *v1.Selector `json:"mediaLinkSelector,omitempty" tf:"-"` +} + +type SourceObservation struct { + + // The Storage Blob URI of the default configuration. Changing this forces a new resource to be created. + DefaultConfigurationLink *string `json:"defaultConfigurationLink,omitempty" tf:"default_configuration_link,omitempty"` + + // The Storage Blob URI of the source application package. Changing this forces a new resource to be created. + MediaLink *string `json:"mediaLink,omitempty" tf:"media_link,omitempty"` +} + +type SourceParameters struct { + + // The Storage Blob URI of the default configuration. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DefaultConfigurationLink *string `json:"defaultConfigurationLink,omitempty" tf:"default_configuration_link,omitempty"` + + // The Storage Blob URI of the source application package. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Blob + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MediaLink *string `json:"mediaLink,omitempty" tf:"media_link,omitempty"` + + // Reference to a Blob in storage to populate mediaLink. + // +kubebuilder:validation:Optional + MediaLinkRef *v1.Reference `json:"mediaLinkRef,omitempty" tf:"-"` + + // Selector for a Blob in storage to populate mediaLink. + // +kubebuilder:validation:Optional + MediaLinkSelector *v1.Selector `json:"mediaLinkSelector,omitempty" tf:"-"` +} + +type TargetRegionInitParameters struct { + + // Specifies whether this Gallery Application Version should be excluded from the latest filter. If set to true, this Gallery Application Version won't be returned for the latest version. Defaults to false. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty" tf:"exclude_from_latest,omitempty"` + + // The Azure Region in which the Gallery Application Version exists. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.GalleryApplication + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("location",false) + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a GalleryApplication in compute to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a GalleryApplication in compute to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // The number of replicas of the Gallery Application Version to be created per region. Possible values are between 1 and 10. + RegionalReplicaCount *float64 `json:"regionalReplicaCount,omitempty" tf:"regional_replica_count,omitempty"` + + // The storage account type for the Gallery Application Version. Possible values are Standard_LRS, Premium_LRS and Standard_ZRS. Defaults to Standard_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` +} + +type TargetRegionObservation struct { + + // Specifies whether this Gallery Application Version should be excluded from the latest filter. If set to true, this Gallery Application Version won't be returned for the latest version. Defaults to false. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty" tf:"exclude_from_latest,omitempty"` + + // The Azure Region in which the Gallery Application Version exists. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The number of replicas of the Gallery Application Version to be created per region. Possible values are between 1 and 10. + RegionalReplicaCount *float64 `json:"regionalReplicaCount,omitempty" tf:"regional_replica_count,omitempty"` + + // The storage account type for the Gallery Application Version. Possible values are Standard_LRS, Premium_LRS and Standard_ZRS. Defaults to Standard_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` +} + +type TargetRegionParameters struct { + + // Specifies whether this Gallery Application Version should be excluded from the latest filter. If set to true, this Gallery Application Version won't be returned for the latest version. Defaults to false. + // +kubebuilder:validation:Optional + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty" tf:"exclude_from_latest,omitempty"` + + // The Azure Region in which the Gallery Application Version exists. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.GalleryApplication + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("location",false) + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a GalleryApplication in compute to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a GalleryApplication in compute to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // The number of replicas of the Gallery Application Version to be created per region. Possible values are between 1 and 10. + // +kubebuilder:validation:Optional + RegionalReplicaCount *float64 `json:"regionalReplicaCount" tf:"regional_replica_count,omitempty"` + + // The storage account type for the Gallery Application Version. Possible values are Standard_LRS, Premium_LRS and Standard_ZRS. Defaults to Standard_LRS. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` +} + +// GalleryApplicationVersionSpec defines the desired state of GalleryApplicationVersion +type GalleryApplicationVersionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GalleryApplicationVersionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GalleryApplicationVersionInitParameters `json:"initProvider,omitempty"` +} + +// GalleryApplicationVersionStatus defines the observed state of GalleryApplicationVersion. +type GalleryApplicationVersionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GalleryApplicationVersionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GalleryApplicationVersion is the Schema for the GalleryApplicationVersions API. Manages a Gallery Application Version. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type GalleryApplicationVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.manageAction) || (has(self.initProvider) && has(self.initProvider.manageAction))",message="spec.forProvider.manageAction is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.source) || (has(self.initProvider) && has(self.initProvider.source))",message="spec.forProvider.source is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetRegion) || (has(self.initProvider) && has(self.initProvider.targetRegion))",message="spec.forProvider.targetRegion is a required parameter" + Spec GalleryApplicationVersionSpec `json:"spec"` + Status GalleryApplicationVersionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GalleryApplicationVersionList contains a list of GalleryApplicationVersions +type GalleryApplicationVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GalleryApplicationVersion `json:"items"` +} + +// Repository type metadata. +var ( + GalleryApplicationVersion_Kind = "GalleryApplicationVersion" + GalleryApplicationVersion_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GalleryApplicationVersion_Kind}.String() + GalleryApplicationVersion_KindAPIVersion = GalleryApplicationVersion_Kind + "." + CRDGroupVersion.String() + GalleryApplicationVersion_GroupVersionKind = CRDGroupVersion.WithKind(GalleryApplicationVersion_Kind) +) + +func init() { + SchemeBuilder.Register(&GalleryApplicationVersion{}, &GalleryApplicationVersionList{}) +} diff --git a/apis/compute/v1beta2/zz_generated.conversion_hubs.go b/apis/compute/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..528393174 --- /dev/null +++ b/apis/compute/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *CapacityReservation) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DiskEncryptionSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GalleryApplicationVersion) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Image) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinuxVirtualMachine) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinuxVirtualMachineScaleSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ManagedDisk) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OrchestratedVirtualMachineScaleSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SharedImage) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SharedImageGallery) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Snapshot) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualMachineExtension) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualMachineRunCommand) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WindowsVirtualMachine) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WindowsVirtualMachineScaleSet) Hub() {} diff --git a/apis/compute/v1beta2/zz_generated.deepcopy.go b/apis/compute/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..f527e7cfe --- /dev/null +++ b/apis/compute/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,22039 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalCapabilitiesInitParameters) DeepCopyInto(out *AdditionalCapabilitiesInitParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalCapabilitiesInitParameters. +func (in *AdditionalCapabilitiesInitParameters) DeepCopy() *AdditionalCapabilitiesInitParameters { + if in == nil { + return nil + } + out := new(AdditionalCapabilitiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalCapabilitiesObservation) DeepCopyInto(out *AdditionalCapabilitiesObservation) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalCapabilitiesObservation. +func (in *AdditionalCapabilitiesObservation) DeepCopy() *AdditionalCapabilitiesObservation { + if in == nil { + return nil + } + out := new(AdditionalCapabilitiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalCapabilitiesParameters) DeepCopyInto(out *AdditionalCapabilitiesParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalCapabilitiesParameters. +func (in *AdditionalCapabilitiesParameters) DeepCopy() *AdditionalCapabilitiesParameters { + if in == nil { + return nil + } + out := new(AdditionalCapabilitiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalUnattendContentInitParameters) DeepCopyInto(out *AdditionalUnattendContentInitParameters) { + *out = *in + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalUnattendContentInitParameters. +func (in *AdditionalUnattendContentInitParameters) DeepCopy() *AdditionalUnattendContentInitParameters { + if in == nil { + return nil + } + out := new(AdditionalUnattendContentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalUnattendContentObservation) DeepCopyInto(out *AdditionalUnattendContentObservation) { + *out = *in + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalUnattendContentObservation. +func (in *AdditionalUnattendContentObservation) DeepCopy() *AdditionalUnattendContentObservation { + if in == nil { + return nil + } + out := new(AdditionalUnattendContentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalUnattendContentParameters) DeepCopyInto(out *AdditionalUnattendContentParameters) { + *out = *in + out.ContentSecretRef = in.ContentSecretRef + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalUnattendContentParameters. +func (in *AdditionalUnattendContentParameters) DeepCopy() *AdditionalUnattendContentParameters { + if in == nil { + return nil + } + out := new(AdditionalUnattendContentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminSSHKeyInitParameters) DeepCopyInto(out *AdminSSHKeyInitParameters) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminSSHKeyInitParameters. +func (in *AdminSSHKeyInitParameters) DeepCopy() *AdminSSHKeyInitParameters { + if in == nil { + return nil + } + out := new(AdminSSHKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminSSHKeyObservation) DeepCopyInto(out *AdminSSHKeyObservation) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminSSHKeyObservation. +func (in *AdminSSHKeyObservation) DeepCopy() *AdminSSHKeyObservation { + if in == nil { + return nil + } + out := new(AdminSSHKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminSSHKeyParameters) DeepCopyInto(out *AdminSSHKeyParameters) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminSSHKeyParameters. +func (in *AdminSSHKeyParameters) DeepCopy() *AdminSSHKeyParameters { + if in == nil { + return nil + } + out := new(AdminSSHKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticInstanceRepairInitParameters) DeepCopyInto(out *AutomaticInstanceRepairInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticInstanceRepairInitParameters. +func (in *AutomaticInstanceRepairInitParameters) DeepCopy() *AutomaticInstanceRepairInitParameters { + if in == nil { + return nil + } + out := new(AutomaticInstanceRepairInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticInstanceRepairObservation) DeepCopyInto(out *AutomaticInstanceRepairObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticInstanceRepairObservation. +func (in *AutomaticInstanceRepairObservation) DeepCopy() *AutomaticInstanceRepairObservation { + if in == nil { + return nil + } + out := new(AutomaticInstanceRepairObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticInstanceRepairParameters) DeepCopyInto(out *AutomaticInstanceRepairParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticInstanceRepairParameters. +func (in *AutomaticInstanceRepairParameters) DeepCopy() *AutomaticInstanceRepairParameters { + if in == nil { + return nil + } + out := new(AutomaticInstanceRepairParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticOsUpgradePolicyInitParameters) DeepCopyInto(out *AutomaticOsUpgradePolicyInitParameters) { + *out = *in + if in.DisableAutomaticRollback != nil { + in, out := &in.DisableAutomaticRollback, &out.DisableAutomaticRollback + *out = new(bool) + **out = **in + } + if in.EnableAutomaticOsUpgrade != nil { + in, out := &in.EnableAutomaticOsUpgrade, &out.EnableAutomaticOsUpgrade + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticOsUpgradePolicyInitParameters. +func (in *AutomaticOsUpgradePolicyInitParameters) DeepCopy() *AutomaticOsUpgradePolicyInitParameters { + if in == nil { + return nil + } + out := new(AutomaticOsUpgradePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticOsUpgradePolicyObservation) DeepCopyInto(out *AutomaticOsUpgradePolicyObservation) { + *out = *in + if in.DisableAutomaticRollback != nil { + in, out := &in.DisableAutomaticRollback, &out.DisableAutomaticRollback + *out = new(bool) + **out = **in + } + if in.EnableAutomaticOsUpgrade != nil { + in, out := &in.EnableAutomaticOsUpgrade, &out.EnableAutomaticOsUpgrade + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticOsUpgradePolicyObservation. +func (in *AutomaticOsUpgradePolicyObservation) DeepCopy() *AutomaticOsUpgradePolicyObservation { + if in == nil { + return nil + } + out := new(AutomaticOsUpgradePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticOsUpgradePolicyParameters) DeepCopyInto(out *AutomaticOsUpgradePolicyParameters) { + *out = *in + if in.DisableAutomaticRollback != nil { + in, out := &in.DisableAutomaticRollback, &out.DisableAutomaticRollback + *out = new(bool) + **out = **in + } + if in.EnableAutomaticOsUpgrade != nil { + in, out := &in.EnableAutomaticOsUpgrade, &out.EnableAutomaticOsUpgrade + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticOsUpgradePolicyParameters. +func (in *AutomaticOsUpgradePolicyParameters) DeepCopy() *AutomaticOsUpgradePolicyParameters { + if in == nil { + return nil + } + out := new(AutomaticOsUpgradePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiagnosticsInitParameters) DeepCopyInto(out *BootDiagnosticsInitParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiagnosticsInitParameters. +func (in *BootDiagnosticsInitParameters) DeepCopy() *BootDiagnosticsInitParameters { + if in == nil { + return nil + } + out := new(BootDiagnosticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiagnosticsObservation) DeepCopyInto(out *BootDiagnosticsObservation) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiagnosticsObservation. +func (in *BootDiagnosticsObservation) DeepCopy() *BootDiagnosticsObservation { + if in == nil { + return nil + } + out := new(BootDiagnosticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiagnosticsParameters) DeepCopyInto(out *BootDiagnosticsParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiagnosticsParameters. +func (in *BootDiagnosticsParameters) DeepCopy() *BootDiagnosticsParameters { + if in == nil { + return nil + } + out := new(BootDiagnosticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservation) DeepCopyInto(out *CapacityReservation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservation. +func (in *CapacityReservation) DeepCopy() *CapacityReservation { + if in == nil { + return nil + } + out := new(CapacityReservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CapacityReservation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationInitParameters) DeepCopyInto(out *CapacityReservationInitParameters) { + *out = *in + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationInitParameters. +func (in *CapacityReservationInitParameters) DeepCopy() *CapacityReservationInitParameters { + if in == nil { + return nil + } + out := new(CapacityReservationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationList) DeepCopyInto(out *CapacityReservationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CapacityReservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationList. +func (in *CapacityReservationList) DeepCopy() *CapacityReservationList { + if in == nil { + return nil + } + out := new(CapacityReservationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CapacityReservationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationObservation) DeepCopyInto(out *CapacityReservationObservation) { + *out = *in + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationObservation. +func (in *CapacityReservationObservation) DeepCopy() *CapacityReservationObservation { + if in == nil { + return nil + } + out := new(CapacityReservationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationParameters) DeepCopyInto(out *CapacityReservationParameters) { + *out = *in + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.CapacityReservationGroupIDRef != nil { + in, out := &in.CapacityReservationGroupIDRef, &out.CapacityReservationGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupIDSelector != nil { + in, out := &in.CapacityReservationGroupIDSelector, &out.CapacityReservationGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationParameters. +func (in *CapacityReservationParameters) DeepCopy() *CapacityReservationParameters { + if in == nil { + return nil + } + out := new(CapacityReservationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationSpec) DeepCopyInto(out *CapacityReservationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationSpec. +func (in *CapacityReservationSpec) DeepCopy() *CapacityReservationSpec { + if in == nil { + return nil + } + out := new(CapacityReservationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityReservationStatus) DeepCopyInto(out *CapacityReservationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservationStatus. +func (in *CapacityReservationStatus) DeepCopy() *CapacityReservationStatus { + if in == nil { + return nil + } + out := new(CapacityReservationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommunityGalleryInitParameters) DeepCopyInto(out *CommunityGalleryInitParameters) { + *out = *in + if in.Eula != nil { + in, out := &in.Eula, &out.Eula + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.PublisherEmail != nil { + in, out := &in.PublisherEmail, &out.PublisherEmail + *out = new(string) + **out = **in + } + if in.PublisherURI != nil { + in, out := &in.PublisherURI, &out.PublisherURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommunityGalleryInitParameters. +func (in *CommunityGalleryInitParameters) DeepCopy() *CommunityGalleryInitParameters { + if in == nil { + return nil + } + out := new(CommunityGalleryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommunityGalleryObservation) DeepCopyInto(out *CommunityGalleryObservation) { + *out = *in + if in.Eula != nil { + in, out := &in.Eula, &out.Eula + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.PublisherEmail != nil { + in, out := &in.PublisherEmail, &out.PublisherEmail + *out = new(string) + **out = **in + } + if in.PublisherURI != nil { + in, out := &in.PublisherURI, &out.PublisherURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommunityGalleryObservation. +func (in *CommunityGalleryObservation) DeepCopy() *CommunityGalleryObservation { + if in == nil { + return nil + } + out := new(CommunityGalleryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommunityGalleryParameters) DeepCopyInto(out *CommunityGalleryParameters) { + *out = *in + if in.Eula != nil { + in, out := &in.Eula, &out.Eula + *out = new(string) + **out = **in + } + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.PublisherEmail != nil { + in, out := &in.PublisherEmail, &out.PublisherEmail + *out = new(string) + **out = **in + } + if in.PublisherURI != nil { + in, out := &in.PublisherURI, &out.PublisherURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommunityGalleryParameters. +func (in *CommunityGalleryParameters) DeepCopy() *CommunityGalleryParameters { + if in == nil { + return nil + } + out := new(CommunityGalleryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDiskInitParameters) DeepCopyInto(out *DataDiskInitParameters) { + *out = *in + if in.BlobURI != nil { + in, out := &in.BlobURI, &out.BlobURI + *out = new(string) + **out = **in + } + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.ManagedDiskID != nil { + in, out := &in.ManagedDiskID, &out.ManagedDiskID + *out = new(string) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskInitParameters. +func (in *DataDiskInitParameters) DeepCopy() *DataDiskInitParameters { + if in == nil { + return nil + } + out := new(DataDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDiskObservation) DeepCopyInto(out *DataDiskObservation) { + *out = *in + if in.BlobURI != nil { + in, out := &in.BlobURI, &out.BlobURI + *out = new(string) + **out = **in + } + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.ManagedDiskID != nil { + in, out := &in.ManagedDiskID, &out.ManagedDiskID + *out = new(string) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskObservation. +func (in *DataDiskObservation) DeepCopy() *DataDiskObservation { + if in == nil { + return nil + } + out := new(DataDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDiskParameters) DeepCopyInto(out *DataDiskParameters) { + *out = *in + if in.BlobURI != nil { + in, out := &in.BlobURI, &out.BlobURI + *out = new(string) + **out = **in + } + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.ManagedDiskID != nil { + in, out := &in.ManagedDiskID, &out.ManagedDiskID + *out = new(string) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskParameters. +func (in *DataDiskParameters) DeepCopy() *DataDiskParameters { + if in == nil { + return nil + } + out := new(DataDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiffDiskSettingsInitParameters) DeepCopyInto(out *DiffDiskSettingsInitParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiffDiskSettingsInitParameters. +func (in *DiffDiskSettingsInitParameters) DeepCopy() *DiffDiskSettingsInitParameters { + if in == nil { + return nil + } + out := new(DiffDiskSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiffDiskSettingsObservation) DeepCopyInto(out *DiffDiskSettingsObservation) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiffDiskSettingsObservation. +func (in *DiffDiskSettingsObservation) DeepCopy() *DiffDiskSettingsObservation { + if in == nil { + return nil + } + out := new(DiffDiskSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiffDiskSettingsParameters) DeepCopyInto(out *DiffDiskSettingsParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiffDiskSettingsParameters. +func (in *DiffDiskSettingsParameters) DeepCopy() *DiffDiskSettingsParameters { + if in == nil { + return nil + } + out := new(DiffDiskSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionKeyInitParameters) DeepCopyInto(out *DiskEncryptionKeyInitParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionKeyInitParameters. +func (in *DiskEncryptionKeyInitParameters) DeepCopy() *DiskEncryptionKeyInitParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionKeyObservation) DeepCopyInto(out *DiskEncryptionKeyObservation) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionKeyObservation. +func (in *DiskEncryptionKeyObservation) DeepCopy() *DiskEncryptionKeyObservation { + if in == nil { + return nil + } + out := new(DiskEncryptionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionKeyParameters) DeepCopyInto(out *DiskEncryptionKeyParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionKeyParameters. +func (in *DiskEncryptionKeyParameters) DeepCopy() *DiskEncryptionKeyParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSet) DeepCopyInto(out *DiskEncryptionSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSet. +func (in *DiskEncryptionSet) DeepCopy() *DiskEncryptionSet { + if in == nil { + return nil + } + out := new(DiskEncryptionSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskEncryptionSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetInitParameters) DeepCopyInto(out *DiskEncryptionSetInitParameters) { + *out = *in + if in.AutoKeyRotationEnabled != nil { + in, out := &in.AutoKeyRotationEnabled, &out.AutoKeyRotationEnabled + *out = new(bool) + **out = **in + } + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.FederatedClientID != nil { + in, out := &in.FederatedClientID, &out.FederatedClientID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyIDRef != nil { + in, out := &in.KeyVaultKeyIDRef, &out.KeyVaultKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyIDSelector != nil { + in, out := &in.KeyVaultKeyIDSelector, &out.KeyVaultKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetInitParameters. +func (in *DiskEncryptionSetInitParameters) DeepCopy() *DiskEncryptionSetInitParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetList) DeepCopyInto(out *DiskEncryptionSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DiskEncryptionSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetList. +func (in *DiskEncryptionSetList) DeepCopy() *DiskEncryptionSetList { + if in == nil { + return nil + } + out := new(DiskEncryptionSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DiskEncryptionSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetObservation) DeepCopyInto(out *DiskEncryptionSetObservation) { + *out = *in + if in.AutoKeyRotationEnabled != nil { + in, out := &in.AutoKeyRotationEnabled, &out.AutoKeyRotationEnabled + *out = new(bool) + **out = **in + } + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.FederatedClientID != nil { + in, out := &in.FederatedClientID, &out.FederatedClientID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyURL != nil { + in, out := &in.KeyVaultKeyURL, &out.KeyVaultKeyURL + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetObservation. +func (in *DiskEncryptionSetObservation) DeepCopy() *DiskEncryptionSetObservation { + if in == nil { + return nil + } + out := new(DiskEncryptionSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { + *out = *in + if in.AutoKeyRotationEnabled != nil { + in, out := &in.AutoKeyRotationEnabled, &out.AutoKeyRotationEnabled + *out = new(bool) + **out = **in + } + if in.EncryptionType != nil { + in, out := &in.EncryptionType, &out.EncryptionType + *out = new(string) + **out = **in + } + if in.FederatedClientID != nil { + in, out := &in.FederatedClientID, &out.FederatedClientID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyIDRef != nil { + in, out := &in.KeyVaultKeyIDRef, &out.KeyVaultKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyIDSelector != nil { + in, out := &in.KeyVaultKeyIDSelector, &out.KeyVaultKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetParameters. +func (in *DiskEncryptionSetParameters) DeepCopy() *DiskEncryptionSetParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetSpec) DeepCopyInto(out *DiskEncryptionSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetSpec. +func (in *DiskEncryptionSetSpec) DeepCopy() *DiskEncryptionSetSpec { + if in == nil { + return nil + } + out := new(DiskEncryptionSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetStatus) DeepCopyInto(out *DiskEncryptionSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetStatus. +func (in *DiskEncryptionSetStatus) DeepCopy() *DiskEncryptionSetStatus { + if in == nil { + return nil + } + out := new(DiskEncryptionSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsDiskEncryptionKeyInitParameters) DeepCopyInto(out *EncryptionSettingsDiskEncryptionKeyInitParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsDiskEncryptionKeyInitParameters. +func (in *EncryptionSettingsDiskEncryptionKeyInitParameters) DeepCopy() *EncryptionSettingsDiskEncryptionKeyInitParameters { + if in == nil { + return nil + } + out := new(EncryptionSettingsDiskEncryptionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsDiskEncryptionKeyObservation) DeepCopyInto(out *EncryptionSettingsDiskEncryptionKeyObservation) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsDiskEncryptionKeyObservation. +func (in *EncryptionSettingsDiskEncryptionKeyObservation) DeepCopy() *EncryptionSettingsDiskEncryptionKeyObservation { + if in == nil { + return nil + } + out := new(EncryptionSettingsDiskEncryptionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsDiskEncryptionKeyParameters) DeepCopyInto(out *EncryptionSettingsDiskEncryptionKeyParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsDiskEncryptionKeyParameters. +func (in *EncryptionSettingsDiskEncryptionKeyParameters) DeepCopy() *EncryptionSettingsDiskEncryptionKeyParameters { + if in == nil { + return nil + } + out := new(EncryptionSettingsDiskEncryptionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsInitParameters) DeepCopyInto(out *EncryptionSettingsInitParameters) { + *out = *in + if in.DiskEncryptionKey != nil { + in, out := &in.DiskEncryptionKey, &out.DiskEncryptionKey + *out = new(DiskEncryptionKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyEncryptionKey != nil { + in, out := &in.KeyEncryptionKey, &out.KeyEncryptionKey + *out = new(KeyEncryptionKeyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsInitParameters. +func (in *EncryptionSettingsInitParameters) DeepCopy() *EncryptionSettingsInitParameters { + if in == nil { + return nil + } + out := new(EncryptionSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsKeyEncryptionKeyInitParameters) DeepCopyInto(out *EncryptionSettingsKeyEncryptionKeyInitParameters) { + *out = *in + if in.KeyURL != nil { + in, out := &in.KeyURL, &out.KeyURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsKeyEncryptionKeyInitParameters. +func (in *EncryptionSettingsKeyEncryptionKeyInitParameters) DeepCopy() *EncryptionSettingsKeyEncryptionKeyInitParameters { + if in == nil { + return nil + } + out := new(EncryptionSettingsKeyEncryptionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsKeyEncryptionKeyObservation) DeepCopyInto(out *EncryptionSettingsKeyEncryptionKeyObservation) { + *out = *in + if in.KeyURL != nil { + in, out := &in.KeyURL, &out.KeyURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsKeyEncryptionKeyObservation. +func (in *EncryptionSettingsKeyEncryptionKeyObservation) DeepCopy() *EncryptionSettingsKeyEncryptionKeyObservation { + if in == nil { + return nil + } + out := new(EncryptionSettingsKeyEncryptionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsKeyEncryptionKeyParameters) DeepCopyInto(out *EncryptionSettingsKeyEncryptionKeyParameters) { + *out = *in + if in.KeyURL != nil { + in, out := &in.KeyURL, &out.KeyURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsKeyEncryptionKeyParameters. +func (in *EncryptionSettingsKeyEncryptionKeyParameters) DeepCopy() *EncryptionSettingsKeyEncryptionKeyParameters { + if in == nil { + return nil + } + out := new(EncryptionSettingsKeyEncryptionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsObservation) DeepCopyInto(out *EncryptionSettingsObservation) { + *out = *in + if in.DiskEncryptionKey != nil { + in, out := &in.DiskEncryptionKey, &out.DiskEncryptionKey + *out = new(DiskEncryptionKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyEncryptionKey != nil { + in, out := &in.KeyEncryptionKey, &out.KeyEncryptionKey + *out = new(KeyEncryptionKeyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsObservation. +func (in *EncryptionSettingsObservation) DeepCopy() *EncryptionSettingsObservation { + if in == nil { + return nil + } + out := new(EncryptionSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSettingsParameters) DeepCopyInto(out *EncryptionSettingsParameters) { + *out = *in + if in.DiskEncryptionKey != nil { + in, out := &in.DiskEncryptionKey, &out.DiskEncryptionKey + *out = new(DiskEncryptionKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyEncryptionKey != nil { + in, out := &in.KeyEncryptionKey, &out.KeyEncryptionKey + *out = new(KeyEncryptionKeyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSettingsParameters. +func (in *EncryptionSettingsParameters) DeepCopy() *EncryptionSettingsParameters { + if in == nil { + return nil + } + out := new(EncryptionSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorBlobManagedIdentityInitParameters) DeepCopyInto(out *ErrorBlobManagedIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorBlobManagedIdentityInitParameters. +func (in *ErrorBlobManagedIdentityInitParameters) DeepCopy() *ErrorBlobManagedIdentityInitParameters { + if in == nil { + return nil + } + out := new(ErrorBlobManagedIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorBlobManagedIdentityObservation) DeepCopyInto(out *ErrorBlobManagedIdentityObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorBlobManagedIdentityObservation. +func (in *ErrorBlobManagedIdentityObservation) DeepCopy() *ErrorBlobManagedIdentityObservation { + if in == nil { + return nil + } + out := new(ErrorBlobManagedIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErrorBlobManagedIdentityParameters) DeepCopyInto(out *ErrorBlobManagedIdentityParameters) { + *out = *in + if in.ClientIDSecretRef != nil { + in, out := &in.ClientIDSecretRef, &out.ClientIDSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ObjectIDSecretRef != nil { + in, out := &in.ObjectIDSecretRef, &out.ObjectIDSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErrorBlobManagedIdentityParameters. +func (in *ErrorBlobManagedIdentityParameters) DeepCopy() *ErrorBlobManagedIdentityParameters { + if in == nil { + return nil + } + out := new(ErrorBlobManagedIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionInitParameters) DeepCopyInto(out *ExtensionInitParameters) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(ProtectedSettingsFromKeyVaultInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionInitParameters. +func (in *ExtensionInitParameters) DeepCopy() *ExtensionInitParameters { + if in == nil { + return nil + } + out := new(ExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionObservation) DeepCopyInto(out *ExtensionObservation) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(ProtectedSettingsFromKeyVaultObservation) + (*in).DeepCopyInto(*out) + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionObservation. +func (in *ExtensionObservation) DeepCopy() *ExtensionObservation { + if in == nil { + return nil + } + out := new(ExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionParameters) DeepCopyInto(out *ExtensionParameters) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(ProtectedSettingsFromKeyVaultParameters) + (*in).DeepCopyInto(*out) + } + if in.ProtectedSettingsSecretRef != nil { + in, out := &in.ProtectedSettingsSecretRef, &out.ProtectedSettingsSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionParameters. +func (in *ExtensionParameters) DeepCopy() *ExtensionParameters { + if in == nil { + return nil + } + out := new(ExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionProtectedSettingsFromKeyVaultInitParameters) DeepCopyInto(out *ExtensionProtectedSettingsFromKeyVaultInitParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionProtectedSettingsFromKeyVaultInitParameters. +func (in *ExtensionProtectedSettingsFromKeyVaultInitParameters) DeepCopy() *ExtensionProtectedSettingsFromKeyVaultInitParameters { + if in == nil { + return nil + } + out := new(ExtensionProtectedSettingsFromKeyVaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionProtectedSettingsFromKeyVaultObservation) DeepCopyInto(out *ExtensionProtectedSettingsFromKeyVaultObservation) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionProtectedSettingsFromKeyVaultObservation. +func (in *ExtensionProtectedSettingsFromKeyVaultObservation) DeepCopy() *ExtensionProtectedSettingsFromKeyVaultObservation { + if in == nil { + return nil + } + out := new(ExtensionProtectedSettingsFromKeyVaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionProtectedSettingsFromKeyVaultParameters) DeepCopyInto(out *ExtensionProtectedSettingsFromKeyVaultParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionProtectedSettingsFromKeyVaultParameters. +func (in *ExtensionProtectedSettingsFromKeyVaultParameters) DeepCopy() *ExtensionProtectedSettingsFromKeyVaultParameters { + if in == nil { + return nil + } + out := new(ExtensionProtectedSettingsFromKeyVaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationInitParameters) DeepCopyInto(out *GalleryApplicationInitParameters) { + *out = *in + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.TreatFailureAsDeploymentFailureEnabled != nil { + in, out := &in.TreatFailureAsDeploymentFailureEnabled, &out.TreatFailureAsDeploymentFailureEnabled + *out = new(bool) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationInitParameters. +func (in *GalleryApplicationInitParameters) DeepCopy() *GalleryApplicationInitParameters { + if in == nil { + return nil + } + out := new(GalleryApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationObservation) DeepCopyInto(out *GalleryApplicationObservation) { + *out = *in + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.TreatFailureAsDeploymentFailureEnabled != nil { + in, out := &in.TreatFailureAsDeploymentFailureEnabled, &out.TreatFailureAsDeploymentFailureEnabled + *out = new(bool) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationObservation. +func (in *GalleryApplicationObservation) DeepCopy() *GalleryApplicationObservation { + if in == nil { + return nil + } + out := new(GalleryApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationParameters) DeepCopyInto(out *GalleryApplicationParameters) { + *out = *in + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.TreatFailureAsDeploymentFailureEnabled != nil { + in, out := &in.TreatFailureAsDeploymentFailureEnabled, &out.TreatFailureAsDeploymentFailureEnabled + *out = new(bool) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationParameters. +func (in *GalleryApplicationParameters) DeepCopy() *GalleryApplicationParameters { + if in == nil { + return nil + } + out := new(GalleryApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationVersion) DeepCopyInto(out *GalleryApplicationVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationVersion. +func (in *GalleryApplicationVersion) DeepCopy() *GalleryApplicationVersion { + if in == nil { + return nil + } + out := new(GalleryApplicationVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GalleryApplicationVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationVersionInitParameters) DeepCopyInto(out *GalleryApplicationVersionInitParameters) { + *out = *in + if in.ConfigFile != nil { + in, out := &in.ConfigFile, &out.ConfigFile + *out = new(string) + **out = **in + } + if in.EnableHealthCheck != nil { + in, out := &in.EnableHealthCheck, &out.EnableHealthCheck + *out = new(bool) + **out = **in + } + if in.EndOfLifeDate != nil { + in, out := &in.EndOfLifeDate, &out.EndOfLifeDate + *out = new(string) + **out = **in + } + if in.ExcludeFromLatest != nil { + in, out := &in.ExcludeFromLatest, &out.ExcludeFromLatest + *out = new(bool) + **out = **in + } + if in.GalleryApplicationID != nil { + in, out := &in.GalleryApplicationID, &out.GalleryApplicationID + *out = new(string) + **out = **in + } + if in.GalleryApplicationIDRef != nil { + in, out := &in.GalleryApplicationIDRef, &out.GalleryApplicationIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GalleryApplicationIDSelector != nil { + in, out := &in.GalleryApplicationIDSelector, &out.GalleryApplicationIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManageAction != nil { + in, out := &in.ManageAction, &out.ManageAction + *out = new(ManageActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PackageFile != nil { + in, out := &in.PackageFile, &out.PackageFile + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetRegion != nil { + in, out := &in.TargetRegion, &out.TargetRegion + *out = make([]TargetRegionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationVersionInitParameters. +func (in *GalleryApplicationVersionInitParameters) DeepCopy() *GalleryApplicationVersionInitParameters { + if in == nil { + return nil + } + out := new(GalleryApplicationVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationVersionList) DeepCopyInto(out *GalleryApplicationVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GalleryApplicationVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationVersionList. +func (in *GalleryApplicationVersionList) DeepCopy() *GalleryApplicationVersionList { + if in == nil { + return nil + } + out := new(GalleryApplicationVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GalleryApplicationVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationVersionObservation) DeepCopyInto(out *GalleryApplicationVersionObservation) { + *out = *in + if in.ConfigFile != nil { + in, out := &in.ConfigFile, &out.ConfigFile + *out = new(string) + **out = **in + } + if in.EnableHealthCheck != nil { + in, out := &in.EnableHealthCheck, &out.EnableHealthCheck + *out = new(bool) + **out = **in + } + if in.EndOfLifeDate != nil { + in, out := &in.EndOfLifeDate, &out.EndOfLifeDate + *out = new(string) + **out = **in + } + if in.ExcludeFromLatest != nil { + in, out := &in.ExcludeFromLatest, &out.ExcludeFromLatest + *out = new(bool) + **out = **in + } + if in.GalleryApplicationID != nil { + in, out := &in.GalleryApplicationID, &out.GalleryApplicationID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManageAction != nil { + in, out := &in.ManageAction, &out.ManageAction + *out = new(ManageActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PackageFile != nil { + in, out := &in.PackageFile, &out.PackageFile + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetRegion != nil { + in, out := &in.TargetRegion, &out.TargetRegion + *out = make([]TargetRegionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationVersionObservation. +func (in *GalleryApplicationVersionObservation) DeepCopy() *GalleryApplicationVersionObservation { + if in == nil { + return nil + } + out := new(GalleryApplicationVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationVersionParameters) DeepCopyInto(out *GalleryApplicationVersionParameters) { + *out = *in + if in.ConfigFile != nil { + in, out := &in.ConfigFile, &out.ConfigFile + *out = new(string) + **out = **in + } + if in.EnableHealthCheck != nil { + in, out := &in.EnableHealthCheck, &out.EnableHealthCheck + *out = new(bool) + **out = **in + } + if in.EndOfLifeDate != nil { + in, out := &in.EndOfLifeDate, &out.EndOfLifeDate + *out = new(string) + **out = **in + } + if in.ExcludeFromLatest != nil { + in, out := &in.ExcludeFromLatest, &out.ExcludeFromLatest + *out = new(bool) + **out = **in + } + if in.GalleryApplicationID != nil { + in, out := &in.GalleryApplicationID, &out.GalleryApplicationID + *out = new(string) + **out = **in + } + if in.GalleryApplicationIDRef != nil { + in, out := &in.GalleryApplicationIDRef, &out.GalleryApplicationIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GalleryApplicationIDSelector != nil { + in, out := &in.GalleryApplicationIDSelector, &out.GalleryApplicationIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManageAction != nil { + in, out := &in.ManageAction, &out.ManageAction + *out = new(ManageActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PackageFile != nil { + in, out := &in.PackageFile, &out.PackageFile + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(SourceParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetRegion != nil { + in, out := &in.TargetRegion, &out.TargetRegion + *out = make([]TargetRegionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationVersionParameters. +func (in *GalleryApplicationVersionParameters) DeepCopy() *GalleryApplicationVersionParameters { + if in == nil { + return nil + } + out := new(GalleryApplicationVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationVersionSpec) DeepCopyInto(out *GalleryApplicationVersionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationVersionSpec. +func (in *GalleryApplicationVersionSpec) DeepCopy() *GalleryApplicationVersionSpec { + if in == nil { + return nil + } + out := new(GalleryApplicationVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationVersionStatus) DeepCopyInto(out *GalleryApplicationVersionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationVersionStatus. +func (in *GalleryApplicationVersionStatus) DeepCopy() *GalleryApplicationVersionStatus { + if in == nil { + return nil + } + out := new(GalleryApplicationVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationsInitParameters) DeepCopyInto(out *GalleryApplicationsInitParameters) { + *out = *in + if in.ConfigurationReferenceBlobURI != nil { + in, out := &in.ConfigurationReferenceBlobURI, &out.ConfigurationReferenceBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PackageReferenceID != nil { + in, out := &in.PackageReferenceID, &out.PackageReferenceID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationsInitParameters. +func (in *GalleryApplicationsInitParameters) DeepCopy() *GalleryApplicationsInitParameters { + if in == nil { + return nil + } + out := new(GalleryApplicationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationsObservation) DeepCopyInto(out *GalleryApplicationsObservation) { + *out = *in + if in.ConfigurationReferenceBlobURI != nil { + in, out := &in.ConfigurationReferenceBlobURI, &out.ConfigurationReferenceBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PackageReferenceID != nil { + in, out := &in.PackageReferenceID, &out.PackageReferenceID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationsObservation. +func (in *GalleryApplicationsObservation) DeepCopy() *GalleryApplicationsObservation { + if in == nil { + return nil + } + out := new(GalleryApplicationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryApplicationsParameters) DeepCopyInto(out *GalleryApplicationsParameters) { + *out = *in + if in.ConfigurationReferenceBlobURI != nil { + in, out := &in.ConfigurationReferenceBlobURI, &out.ConfigurationReferenceBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PackageReferenceID != nil { + in, out := &in.PackageReferenceID, &out.PackageReferenceID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryApplicationsParameters. +func (in *GalleryApplicationsParameters) DeepCopy() *GalleryApplicationsParameters { + if in == nil { + return nil + } + out := new(GalleryApplicationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationInitParameters) DeepCopyInto(out *IPConfigurationInitParameters) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerInboundNATRulesIds != nil { + in, out := &in.LoadBalancerInboundNATRulesIds, &out.LoadBalancerInboundNATRulesIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]PublicIPAddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationInitParameters. +func (in *IPConfigurationInitParameters) DeepCopy() *IPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(IPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationObservation) DeepCopyInto(out *IPConfigurationObservation) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerInboundNATRulesIds != nil { + in, out := &in.LoadBalancerInboundNATRulesIds, &out.LoadBalancerInboundNATRulesIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]PublicIPAddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationObservation. +func (in *IPConfigurationObservation) DeepCopy() *IPConfigurationObservation { + if in == nil { + return nil + } + out := new(IPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationParameters) DeepCopyInto(out *IPConfigurationParameters) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerInboundNATRulesIds != nil { + in, out := &in.LoadBalancerInboundNATRulesIds, &out.LoadBalancerInboundNATRulesIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]PublicIPAddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationParameters. +func (in *IPConfigurationParameters) DeepCopy() *IPConfigurationParameters { + if in == nil { + return nil + } + out := new(IPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationPublicIPAddressIPTagInitParameters) DeepCopyInto(out *IPConfigurationPublicIPAddressIPTagInitParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationPublicIPAddressIPTagInitParameters. +func (in *IPConfigurationPublicIPAddressIPTagInitParameters) DeepCopy() *IPConfigurationPublicIPAddressIPTagInitParameters { + if in == nil { + return nil + } + out := new(IPConfigurationPublicIPAddressIPTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationPublicIPAddressIPTagObservation) DeepCopyInto(out *IPConfigurationPublicIPAddressIPTagObservation) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationPublicIPAddressIPTagObservation. +func (in *IPConfigurationPublicIPAddressIPTagObservation) DeepCopy() *IPConfigurationPublicIPAddressIPTagObservation { + if in == nil { + return nil + } + out := new(IPConfigurationPublicIPAddressIPTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationPublicIPAddressIPTagParameters) DeepCopyInto(out *IPConfigurationPublicIPAddressIPTagParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationPublicIPAddressIPTagParameters. +func (in *IPConfigurationPublicIPAddressIPTagParameters) DeepCopy() *IPConfigurationPublicIPAddressIPTagParameters { + if in == nil { + return nil + } + out := new(IPConfigurationPublicIPAddressIPTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationPublicIPAddressInitParameters) DeepCopyInto(out *IPConfigurationPublicIPAddressInitParameters) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]PublicIPAddressIPTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationPublicIPAddressInitParameters. +func (in *IPConfigurationPublicIPAddressInitParameters) DeepCopy() *IPConfigurationPublicIPAddressInitParameters { + if in == nil { + return nil + } + out := new(IPConfigurationPublicIPAddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationPublicIPAddressObservation) DeepCopyInto(out *IPConfigurationPublicIPAddressObservation) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]PublicIPAddressIPTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationPublicIPAddressObservation. +func (in *IPConfigurationPublicIPAddressObservation) DeepCopy() *IPConfigurationPublicIPAddressObservation { + if in == nil { + return nil + } + out := new(IPConfigurationPublicIPAddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationPublicIPAddressParameters) DeepCopyInto(out *IPConfigurationPublicIPAddressParameters) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]PublicIPAddressIPTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationPublicIPAddressParameters. +func (in *IPConfigurationPublicIPAddressParameters) DeepCopy() *IPConfigurationPublicIPAddressParameters { + if in == nil { + return nil + } + out := new(IPConfigurationPublicIPAddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPTagInitParameters) DeepCopyInto(out *IPTagInitParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPTagInitParameters. +func (in *IPTagInitParameters) DeepCopy() *IPTagInitParameters { + if in == nil { + return nil + } + out := new(IPTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPTagObservation) DeepCopyInto(out *IPTagObservation) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPTagObservation. +func (in *IPTagObservation) DeepCopy() *IPTagObservation { + if in == nil { + return nil + } + out := new(IPTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPTagParameters) DeepCopyInto(out *IPTagParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPTagParameters. +func (in *IPTagParameters) DeepCopy() *IPTagParameters { + if in == nil { + return nil + } + out := new(IPTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentifierInitParameters) DeepCopyInto(out *IdentifierInitParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentifierInitParameters. +func (in *IdentifierInitParameters) DeepCopy() *IdentifierInitParameters { + if in == nil { + return nil + } + out := new(IdentifierInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentifierObservation) DeepCopyInto(out *IdentifierObservation) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentifierObservation. +func (in *IdentifierObservation) DeepCopy() *IdentifierObservation { + if in == nil { + return nil + } + out := new(IdentifierObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentifierParameters) DeepCopyInto(out *IdentifierParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentifierParameters. +func (in *IdentifierParameters) DeepCopy() *IdentifierParameters { + if in == nil { + return nil + } + out := new(IdentifierParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageInitParameters) DeepCopyInto(out *ImageInitParameters) { + *out = *in + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]DataDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(OsDiskInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceVirtualMachineID != nil { + in, out := &in.SourceVirtualMachineID, &out.SourceVirtualMachineID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneResilient != nil { + in, out := &in.ZoneResilient, &out.ZoneResilient + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInitParameters. +func (in *ImageInitParameters) DeepCopy() *ImageInitParameters { + if in == nil { + return nil + } + out := new(ImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageObservation) DeepCopyInto(out *ImageObservation) { + *out = *in + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]DataDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(OsDiskObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SourceVirtualMachineID != nil { + in, out := &in.SourceVirtualMachineID, &out.SourceVirtualMachineID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneResilient != nil { + in, out := &in.ZoneResilient, &out.ZoneResilient + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageObservation. +func (in *ImageObservation) DeepCopy() *ImageObservation { + if in == nil { + return nil + } + out := new(ImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageParameters) DeepCopyInto(out *ImageParameters) { + *out = *in + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]DataDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(OsDiskParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceVirtualMachineID != nil { + in, out := &in.SourceVirtualMachineID, &out.SourceVirtualMachineID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneResilient != nil { + in, out := &in.ZoneResilient, &out.ZoneResilient + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageParameters. +func (in *ImageParameters) DeepCopy() *ImageParameters { + if in == nil { + return nil + } + out := new(ImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceViewInitParameters) DeepCopyInto(out *InstanceViewInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceViewInitParameters. +func (in *InstanceViewInitParameters) DeepCopy() *InstanceViewInitParameters { + if in == nil { + return nil + } + out := new(InstanceViewInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceViewObservation) DeepCopyInto(out *InstanceViewObservation) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + if in.ExecutionMessage != nil { + in, out := &in.ExecutionMessage, &out.ExecutionMessage + *out = new(string) + **out = **in + } + if in.ExecutionState != nil { + in, out := &in.ExecutionState, &out.ExecutionState + *out = new(string) + **out = **in + } + if in.ExitCode != nil { + in, out := &in.ExitCode, &out.ExitCode + *out = new(float64) + **out = **in + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceViewObservation. +func (in *InstanceViewObservation) DeepCopy() *InstanceViewObservation { + if in == nil { + return nil + } + out := new(InstanceViewObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceViewParameters) DeepCopyInto(out *InstanceViewParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceViewParameters. +func (in *InstanceViewParameters) DeepCopy() *InstanceViewParameters { + if in == nil { + return nil + } + out := new(InstanceViewParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyEncryptionKeyInitParameters) DeepCopyInto(out *KeyEncryptionKeyInitParameters) { + *out = *in + if in.KeyURL != nil { + in, out := &in.KeyURL, &out.KeyURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyEncryptionKeyInitParameters. +func (in *KeyEncryptionKeyInitParameters) DeepCopy() *KeyEncryptionKeyInitParameters { + if in == nil { + return nil + } + out := new(KeyEncryptionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyEncryptionKeyObservation) DeepCopyInto(out *KeyEncryptionKeyObservation) { + *out = *in + if in.KeyURL != nil { + in, out := &in.KeyURL, &out.KeyURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyEncryptionKeyObservation. +func (in *KeyEncryptionKeyObservation) DeepCopy() *KeyEncryptionKeyObservation { + if in == nil { + return nil + } + out := new(KeyEncryptionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyEncryptionKeyParameters) DeepCopyInto(out *KeyEncryptionKeyParameters) { + *out = *in + if in.KeyURL != nil { + in, out := &in.KeyURL, &out.KeyURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyEncryptionKeyParameters. +func (in *KeyEncryptionKeyParameters) DeepCopy() *KeyEncryptionKeyParameters { + if in == nil { + return nil + } + out := new(KeyEncryptionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationAdminSSHKeyInitParameters) DeepCopyInto(out *LinuxConfigurationAdminSSHKeyInitParameters) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationAdminSSHKeyInitParameters. +func (in *LinuxConfigurationAdminSSHKeyInitParameters) DeepCopy() *LinuxConfigurationAdminSSHKeyInitParameters { + if in == nil { + return nil + } + out := new(LinuxConfigurationAdminSSHKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationAdminSSHKeyObservation) DeepCopyInto(out *LinuxConfigurationAdminSSHKeyObservation) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationAdminSSHKeyObservation. +func (in *LinuxConfigurationAdminSSHKeyObservation) DeepCopy() *LinuxConfigurationAdminSSHKeyObservation { + if in == nil { + return nil + } + out := new(LinuxConfigurationAdminSSHKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationAdminSSHKeyParameters) DeepCopyInto(out *LinuxConfigurationAdminSSHKeyParameters) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationAdminSSHKeyParameters. +func (in *LinuxConfigurationAdminSSHKeyParameters) DeepCopy() *LinuxConfigurationAdminSSHKeyParameters { + if in == nil { + return nil + } + out := new(LinuxConfigurationAdminSSHKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationInitParameters) DeepCopyInto(out *LinuxConfigurationInitParameters) { + *out = *in + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]LinuxConfigurationAdminSSHKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]LinuxConfigurationSecretInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationInitParameters. +func (in *LinuxConfigurationInitParameters) DeepCopy() *LinuxConfigurationInitParameters { + if in == nil { + return nil + } + out := new(LinuxConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationObservation) DeepCopyInto(out *LinuxConfigurationObservation) { + *out = *in + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]LinuxConfigurationAdminSSHKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]LinuxConfigurationSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationObservation. +func (in *LinuxConfigurationObservation) DeepCopy() *LinuxConfigurationObservation { + if in == nil { + return nil + } + out := new(LinuxConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationParameters) DeepCopyInto(out *LinuxConfigurationParameters) { + *out = *in + if in.AdminPasswordSecretRef != nil { + in, out := &in.AdminPasswordSecretRef, &out.AdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]LinuxConfigurationAdminSSHKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]LinuxConfigurationSecretParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationParameters. +func (in *LinuxConfigurationParameters) DeepCopy() *LinuxConfigurationParameters { + if in == nil { + return nil + } + out := new(LinuxConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationSecretCertificateInitParameters) DeepCopyInto(out *LinuxConfigurationSecretCertificateInitParameters) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationSecretCertificateInitParameters. +func (in *LinuxConfigurationSecretCertificateInitParameters) DeepCopy() *LinuxConfigurationSecretCertificateInitParameters { + if in == nil { + return nil + } + out := new(LinuxConfigurationSecretCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationSecretCertificateObservation) DeepCopyInto(out *LinuxConfigurationSecretCertificateObservation) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationSecretCertificateObservation. +func (in *LinuxConfigurationSecretCertificateObservation) DeepCopy() *LinuxConfigurationSecretCertificateObservation { + if in == nil { + return nil + } + out := new(LinuxConfigurationSecretCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationSecretCertificateParameters) DeepCopyInto(out *LinuxConfigurationSecretCertificateParameters) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationSecretCertificateParameters. +func (in *LinuxConfigurationSecretCertificateParameters) DeepCopy() *LinuxConfigurationSecretCertificateParameters { + if in == nil { + return nil + } + out := new(LinuxConfigurationSecretCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationSecretInitParameters) DeepCopyInto(out *LinuxConfigurationSecretInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]LinuxConfigurationSecretCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationSecretInitParameters. +func (in *LinuxConfigurationSecretInitParameters) DeepCopy() *LinuxConfigurationSecretInitParameters { + if in == nil { + return nil + } + out := new(LinuxConfigurationSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationSecretObservation) DeepCopyInto(out *LinuxConfigurationSecretObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]LinuxConfigurationSecretCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationSecretObservation. +func (in *LinuxConfigurationSecretObservation) DeepCopy() *LinuxConfigurationSecretObservation { + if in == nil { + return nil + } + out := new(LinuxConfigurationSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxConfigurationSecretParameters) DeepCopyInto(out *LinuxConfigurationSecretParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]LinuxConfigurationSecretCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxConfigurationSecretParameters. +func (in *LinuxConfigurationSecretParameters) DeepCopy() *LinuxConfigurationSecretParameters { + if in == nil { + return nil + } + out := new(LinuxConfigurationSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachine) DeepCopyInto(out *LinuxVirtualMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachine. +func (in *LinuxVirtualMachine) DeepCopy() *LinuxVirtualMachine { + if in == nil { + return nil + } + out := new(LinuxVirtualMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxVirtualMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineIdentityInitParameters) DeepCopyInto(out *LinuxVirtualMachineIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineIdentityInitParameters. +func (in *LinuxVirtualMachineIdentityInitParameters) DeepCopy() *LinuxVirtualMachineIdentityInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineIdentityObservation) DeepCopyInto(out *LinuxVirtualMachineIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineIdentityObservation. +func (in *LinuxVirtualMachineIdentityObservation) DeepCopy() *LinuxVirtualMachineIdentityObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineIdentityParameters) DeepCopyInto(out *LinuxVirtualMachineIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineIdentityParameters. +func (in *LinuxVirtualMachineIdentityParameters) DeepCopy() *LinuxVirtualMachineIdentityParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineInitParameters) DeepCopyInto(out *LinuxVirtualMachineInitParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(AdditionalCapabilitiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]AdminSSHKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AllowExtensionOperations != nil { + in, out := &in.AllowExtensionOperations, &out.AllowExtensionOperations + *out = new(bool) + **out = **in + } + if in.AvailabilitySetID != nil { + in, out := &in.AvailabilitySetID, &out.AvailabilitySetID + *out = new(string) + **out = **in + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(BootDiagnosticsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BypassPlatformSafetyChecksOnUserScheduleEnabled != nil { + in, out := &in.BypassPlatformSafetyChecksOnUserScheduleEnabled, &out.BypassPlatformSafetyChecksOnUserScheduleEnabled + *out = new(bool) + **out = **in + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerName != nil { + in, out := &in.ComputerName, &out.ComputerName + *out = new(string) + **out = **in + } + if in.DedicatedHostGroupID != nil { + in, out := &in.DedicatedHostGroupID, &out.DedicatedHostGroupID + *out = new(string) + **out = **in + } + if in.DedicatedHostID != nil { + in, out := &in.DedicatedHostID, &out.DedicatedHostID + *out = new(string) + **out = **in + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.DiskControllerType != nil { + in, out := &in.DiskControllerType, &out.DiskControllerType + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]GalleryApplicationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxVirtualMachineIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkInterfaceIdsRefs != nil { + in, out := &in.NetworkInterfaceIdsRefs, &out.NetworkInterfaceIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaceIdsSelector != nil { + in, out := &in.NetworkInterfaceIdsSelector, &out.NetworkInterfaceIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(LinuxVirtualMachineOsDiskInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OsImageNotification != nil { + in, out := &in.OsImageNotification, &out.OsImageNotification + *out = new(OsImageNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomain != nil { + in, out := &in.PlatformFaultDomain, &out.PlatformFaultDomain + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.RebootSetting != nil { + in, out := &in.RebootSetting, &out.RebootSetting + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]SecretInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(SourceImageReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(TerminationNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VMAgentPlatformUpdatesEnabled != nil { + in, out := &in.VMAgentPlatformUpdatesEnabled, &out.VMAgentPlatformUpdatesEnabled + *out = new(bool) + **out = **in + } + if in.VirtualMachineScaleSetID != nil { + in, out := &in.VirtualMachineScaleSetID, &out.VirtualMachineScaleSetID + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineInitParameters. +func (in *LinuxVirtualMachineInitParameters) DeepCopy() *LinuxVirtualMachineInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineList) DeepCopyInto(out *LinuxVirtualMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinuxVirtualMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineList. +func (in *LinuxVirtualMachineList) DeepCopy() *LinuxVirtualMachineList { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxVirtualMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineObservation) DeepCopyInto(out *LinuxVirtualMachineObservation) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(AdditionalCapabilitiesObservation) + (*in).DeepCopyInto(*out) + } + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]AdminSSHKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AllowExtensionOperations != nil { + in, out := &in.AllowExtensionOperations, &out.AllowExtensionOperations + *out = new(bool) + **out = **in + } + if in.AvailabilitySetID != nil { + in, out := &in.AvailabilitySetID, &out.AvailabilitySetID + *out = new(string) + **out = **in + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(BootDiagnosticsObservation) + (*in).DeepCopyInto(*out) + } + if in.BypassPlatformSafetyChecksOnUserScheduleEnabled != nil { + in, out := &in.BypassPlatformSafetyChecksOnUserScheduleEnabled, &out.BypassPlatformSafetyChecksOnUserScheduleEnabled + *out = new(bool) + **out = **in + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerName != nil { + in, out := &in.ComputerName, &out.ComputerName + *out = new(string) + **out = **in + } + if in.DedicatedHostGroupID != nil { + in, out := &in.DedicatedHostGroupID, &out.DedicatedHostGroupID + *out = new(string) + **out = **in + } + if in.DedicatedHostID != nil { + in, out := &in.DedicatedHostID, &out.DedicatedHostID + *out = new(string) + **out = **in + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.DiskControllerType != nil { + in, out := &in.DiskControllerType, &out.DiskControllerType + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]GalleryApplicationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxVirtualMachineIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(LinuxVirtualMachineOsDiskObservation) + (*in).DeepCopyInto(*out) + } + if in.OsImageNotification != nil { + in, out := &in.OsImageNotification, &out.OsImageNotification + *out = new(OsImageNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanObservation) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomain != nil { + in, out := &in.PlatformFaultDomain, &out.PlatformFaultDomain + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPAddresses != nil { + in, out := &in.PrivateIPAddresses, &out.PrivateIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = new(string) + **out = **in + } + if in.PublicIPAddresses != nil { + in, out := &in.PublicIPAddresses, &out.PublicIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RebootSetting != nil { + in, out := &in.RebootSetting, &out.RebootSetting + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]SecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(SourceImageReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(TerminationNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VMAgentPlatformUpdatesEnabled != nil { + in, out := &in.VMAgentPlatformUpdatesEnabled, &out.VMAgentPlatformUpdatesEnabled + *out = new(bool) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } + if in.VirtualMachineScaleSetID != nil { + in, out := &in.VirtualMachineScaleSetID, &out.VirtualMachineScaleSetID + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineObservation. +func (in *LinuxVirtualMachineObservation) DeepCopy() *LinuxVirtualMachineObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineOsDiskInitParameters) DeepCopyInto(out *LinuxVirtualMachineOsDiskInitParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(DiffDiskSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineOsDiskInitParameters. +func (in *LinuxVirtualMachineOsDiskInitParameters) DeepCopy() *LinuxVirtualMachineOsDiskInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineOsDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineOsDiskObservation) DeepCopyInto(out *LinuxVirtualMachineOsDiskObservation) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(DiffDiskSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineOsDiskObservation. +func (in *LinuxVirtualMachineOsDiskObservation) DeepCopy() *LinuxVirtualMachineOsDiskObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineOsDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineOsDiskParameters) DeepCopyInto(out *LinuxVirtualMachineOsDiskParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(DiffDiskSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineOsDiskParameters. +func (in *LinuxVirtualMachineOsDiskParameters) DeepCopy() *LinuxVirtualMachineOsDiskParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineOsDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineParameters) DeepCopyInto(out *LinuxVirtualMachineParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(AdditionalCapabilitiesParameters) + (*in).DeepCopyInto(*out) + } + if in.AdminPasswordSecretRef != nil { + in, out := &in.AdminPasswordSecretRef, &out.AdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]AdminSSHKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AllowExtensionOperations != nil { + in, out := &in.AllowExtensionOperations, &out.AllowExtensionOperations + *out = new(bool) + **out = **in + } + if in.AvailabilitySetID != nil { + in, out := &in.AvailabilitySetID, &out.AvailabilitySetID + *out = new(string) + **out = **in + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(BootDiagnosticsParameters) + (*in).DeepCopyInto(*out) + } + if in.BypassPlatformSafetyChecksOnUserScheduleEnabled != nil { + in, out := &in.BypassPlatformSafetyChecksOnUserScheduleEnabled, &out.BypassPlatformSafetyChecksOnUserScheduleEnabled + *out = new(bool) + **out = **in + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerName != nil { + in, out := &in.ComputerName, &out.ComputerName + *out = new(string) + **out = **in + } + if in.CustomDataSecretRef != nil { + in, out := &in.CustomDataSecretRef, &out.CustomDataSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DedicatedHostGroupID != nil { + in, out := &in.DedicatedHostGroupID, &out.DedicatedHostGroupID + *out = new(string) + **out = **in + } + if in.DedicatedHostID != nil { + in, out := &in.DedicatedHostID, &out.DedicatedHostID + *out = new(string) + **out = **in + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.DiskControllerType != nil { + in, out := &in.DiskControllerType, &out.DiskControllerType + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]GalleryApplicationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxVirtualMachineIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkInterfaceIdsRefs != nil { + in, out := &in.NetworkInterfaceIdsRefs, &out.NetworkInterfaceIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaceIdsSelector != nil { + in, out := &in.NetworkInterfaceIdsSelector, &out.NetworkInterfaceIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(LinuxVirtualMachineOsDiskParameters) + (*in).DeepCopyInto(*out) + } + if in.OsImageNotification != nil { + in, out := &in.OsImageNotification, &out.OsImageNotification + *out = new(OsImageNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomain != nil { + in, out := &in.PlatformFaultDomain, &out.PlatformFaultDomain + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.RebootSetting != nil { + in, out := &in.RebootSetting, &out.RebootSetting + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]SecretParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(SourceImageReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(TerminationNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VMAgentPlatformUpdatesEnabled != nil { + in, out := &in.VMAgentPlatformUpdatesEnabled, &out.VMAgentPlatformUpdatesEnabled + *out = new(bool) + **out = **in + } + if in.VirtualMachineScaleSetID != nil { + in, out := &in.VirtualMachineScaleSetID, &out.VirtualMachineScaleSetID + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineParameters. +func (in *LinuxVirtualMachineParameters) DeepCopy() *LinuxVirtualMachineParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSet) DeepCopyInto(out *LinuxVirtualMachineScaleSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSet. +func (in *LinuxVirtualMachineScaleSet) DeepCopy() *LinuxVirtualMachineScaleSet { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxVirtualMachineScaleSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters. +func (in *LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation. +func (in *LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation) DeepCopy() *LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters. +func (in *LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters) DeepCopy() *LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters. +func (in *LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetAdminSSHKeyObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetAdminSSHKeyObservation) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetAdminSSHKeyObservation. +func (in *LinuxVirtualMachineScaleSetAdminSSHKeyObservation) DeepCopy() *LinuxVirtualMachineScaleSetAdminSSHKeyObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetAdminSSHKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetAdminSSHKeyParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetAdminSSHKeyParameters) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetAdminSSHKeyParameters. +func (in *LinuxVirtualMachineScaleSetAdminSSHKeyParameters) DeepCopy() *LinuxVirtualMachineScaleSetAdminSSHKeyParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetAdminSSHKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters. +func (in *LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetBootDiagnosticsObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetBootDiagnosticsObservation) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetBootDiagnosticsObservation. +func (in *LinuxVirtualMachineScaleSetBootDiagnosticsObservation) DeepCopy() *LinuxVirtualMachineScaleSetBootDiagnosticsObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetBootDiagnosticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetBootDiagnosticsParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetBootDiagnosticsParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetBootDiagnosticsParameters. +func (in *LinuxVirtualMachineScaleSetBootDiagnosticsParameters) DeepCopy() *LinuxVirtualMachineScaleSetBootDiagnosticsParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetBootDiagnosticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetDataDiskInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetDataDiskInitParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetDataDiskInitParameters. +func (in *LinuxVirtualMachineScaleSetDataDiskInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetDataDiskInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetDataDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetDataDiskObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetDataDiskObservation) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetDataDiskObservation. +func (in *LinuxVirtualMachineScaleSetDataDiskObservation) DeepCopy() *LinuxVirtualMachineScaleSetDataDiskObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetDataDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetDataDiskParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetDataDiskParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetDataDiskParameters. +func (in *LinuxVirtualMachineScaleSetDataDiskParameters) DeepCopy() *LinuxVirtualMachineScaleSetDataDiskParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetDataDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetGalleryApplicationInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetGalleryApplicationInitParameters) { + *out = *in + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetGalleryApplicationInitParameters. +func (in *LinuxVirtualMachineScaleSetGalleryApplicationInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetGalleryApplicationInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetGalleryApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetGalleryApplicationObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetGalleryApplicationObservation) { + *out = *in + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetGalleryApplicationObservation. +func (in *LinuxVirtualMachineScaleSetGalleryApplicationObservation) DeepCopy() *LinuxVirtualMachineScaleSetGalleryApplicationObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetGalleryApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetGalleryApplicationParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetGalleryApplicationParameters) { + *out = *in + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetGalleryApplicationParameters. +func (in *LinuxVirtualMachineScaleSetGalleryApplicationParameters) DeepCopy() *LinuxVirtualMachineScaleSetGalleryApplicationParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetGalleryApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetIdentityInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetIdentityInitParameters. +func (in *LinuxVirtualMachineScaleSetIdentityInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetIdentityInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetIdentityObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetIdentityObservation. +func (in *LinuxVirtualMachineScaleSetIdentityObservation) DeepCopy() *LinuxVirtualMachineScaleSetIdentityObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetIdentityParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetIdentityParameters. +func (in *LinuxVirtualMachineScaleSetIdentityParameters) DeepCopy() *LinuxVirtualMachineScaleSetIdentityParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetInitParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(AutomaticInstanceRepairInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticOsUpgradePolicy != nil { + in, out := &in.AutomaticOsUpgradePolicy, &out.AutomaticOsUpgradePolicy + *out = new(AutomaticOsUpgradePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]LinuxVirtualMachineScaleSetDataDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.DoNotRunExtensionsOnOverprovisionedMachines != nil { + in, out := &in.DoNotRunExtensionsOnOverprovisionedMachines, &out.DoNotRunExtensionsOnOverprovisionedMachines + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]LinuxVirtualMachineScaleSetGalleryApplicationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GalleryApplications != nil { + in, out := &in.GalleryApplications, &out.GalleryApplications + *out = make([]GalleryApplicationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeID != nil { + in, out := &in.HealthProbeID, &out.HealthProbeID + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxVirtualMachineScaleSetIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(LinuxVirtualMachineScaleSetOsDiskInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Overprovision != nil { + in, out := &in.Overprovision, &out.Overprovision + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(LinuxVirtualMachineScaleSetPlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.RollingUpgradePolicy != nil { + in, out := &in.RollingUpgradePolicy, &out.RollingUpgradePolicy + *out = new(RollingUpgradePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleIn != nil { + in, out := &in.ScaleIn, &out.ScaleIn + *out = new(ScaleInInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]LinuxVirtualMachineScaleSetSecretInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpotRestore != nil { + in, out := &in.SpotRestore, &out.SpotRestore + *out = new(SpotRestoreInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateNotification != nil { + in, out := &in.TerminateNotification, &out.TerminateNotification + *out = new(TerminateNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(LinuxVirtualMachineScaleSetTerminationNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetInitParameters. +func (in *LinuxVirtualMachineScaleSetInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetList) DeepCopyInto(out *LinuxVirtualMachineScaleSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinuxVirtualMachineScaleSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetList. +func (in *LinuxVirtualMachineScaleSetList) DeepCopy() *LinuxVirtualMachineScaleSetList { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxVirtualMachineScaleSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetObservation) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation) + (*in).DeepCopyInto(*out) + } + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]LinuxVirtualMachineScaleSetAdminSSHKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(AutomaticInstanceRepairObservation) + (*in).DeepCopyInto(*out) + } + if in.AutomaticOsUpgradePolicy != nil { + in, out := &in.AutomaticOsUpgradePolicy, &out.AutomaticOsUpgradePolicy + *out = new(AutomaticOsUpgradePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(LinuxVirtualMachineScaleSetBootDiagnosticsObservation) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]LinuxVirtualMachineScaleSetDataDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.DoNotRunExtensionsOnOverprovisionedMachines != nil { + in, out := &in.DoNotRunExtensionsOnOverprovisionedMachines, &out.DoNotRunExtensionsOnOverprovisionedMachines + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]LinuxVirtualMachineScaleSetGalleryApplicationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GalleryApplications != nil { + in, out := &in.GalleryApplications, &out.GalleryApplications + *out = make([]GalleryApplicationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeID != nil { + in, out := &in.HealthProbeID, &out.HealthProbeID + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxVirtualMachineScaleSetIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(LinuxVirtualMachineScaleSetOsDiskObservation) + (*in).DeepCopyInto(*out) + } + if in.Overprovision != nil { + in, out := &in.Overprovision, &out.Overprovision + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(LinuxVirtualMachineScaleSetPlanObservation) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RollingUpgradePolicy != nil { + in, out := &in.RollingUpgradePolicy, &out.RollingUpgradePolicy + *out = new(RollingUpgradePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ScaleIn != nil { + in, out := &in.ScaleIn, &out.ScaleIn + *out = new(ScaleInObservation) + (*in).DeepCopyInto(*out) + } + if in.ScaleInPolicy != nil { + in, out := &in.ScaleInPolicy, &out.ScaleInPolicy + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]LinuxVirtualMachineScaleSetSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(LinuxVirtualMachineScaleSetSourceImageReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.SpotRestore != nil { + in, out := &in.SpotRestore, &out.SpotRestore + *out = new(SpotRestoreObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateNotification != nil { + in, out := &in.TerminateNotification, &out.TerminateNotification + *out = new(TerminateNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(LinuxVirtualMachineScaleSetTerminationNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.UniqueID != nil { + in, out := &in.UniqueID, &out.UniqueID + *out = new(string) + **out = **in + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetObservation. +func (in *LinuxVirtualMachineScaleSetObservation) DeepCopy() *LinuxVirtualMachineScaleSetObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetOsDiskInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetOsDiskInitParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(OsDiskDiffDiskSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetOsDiskInitParameters. +func (in *LinuxVirtualMachineScaleSetOsDiskInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetOsDiskInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetOsDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetOsDiskObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetOsDiskObservation) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(OsDiskDiffDiskSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetOsDiskObservation. +func (in *LinuxVirtualMachineScaleSetOsDiskObservation) DeepCopy() *LinuxVirtualMachineScaleSetOsDiskObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetOsDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetOsDiskParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetOsDiskParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(OsDiskDiffDiskSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetOsDiskParameters. +func (in *LinuxVirtualMachineScaleSetOsDiskParameters) DeepCopy() *LinuxVirtualMachineScaleSetOsDiskParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetOsDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters) + (*in).DeepCopyInto(*out) + } + if in.AdminPasswordSecretRef != nil { + in, out := &in.AdminPasswordSecretRef, &out.AdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AdminSSHKey != nil { + in, out := &in.AdminSSHKey, &out.AdminSSHKey + *out = make([]LinuxVirtualMachineScaleSetAdminSSHKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(AutomaticInstanceRepairParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticOsUpgradePolicy != nil { + in, out := &in.AutomaticOsUpgradePolicy, &out.AutomaticOsUpgradePolicy + *out = new(AutomaticOsUpgradePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(LinuxVirtualMachineScaleSetBootDiagnosticsParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.CustomDataSecretRef != nil { + in, out := &in.CustomDataSecretRef, &out.CustomDataSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]LinuxVirtualMachineScaleSetDataDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisablePasswordAuthentication != nil { + in, out := &in.DisablePasswordAuthentication, &out.DisablePasswordAuthentication + *out = new(bool) + **out = **in + } + if in.DoNotRunExtensionsOnOverprovisionedMachines != nil { + in, out := &in.DoNotRunExtensionsOnOverprovisionedMachines, &out.DoNotRunExtensionsOnOverprovisionedMachines + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]LinuxVirtualMachineScaleSetGalleryApplicationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GalleryApplications != nil { + in, out := &in.GalleryApplications, &out.GalleryApplications + *out = make([]GalleryApplicationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeID != nil { + in, out := &in.HealthProbeID, &out.HealthProbeID + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxVirtualMachineScaleSetIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(LinuxVirtualMachineScaleSetOsDiskParameters) + (*in).DeepCopyInto(*out) + } + if in.Overprovision != nil { + in, out := &in.Overprovision, &out.Overprovision + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(LinuxVirtualMachineScaleSetPlanParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RollingUpgradePolicy != nil { + in, out := &in.RollingUpgradePolicy, &out.RollingUpgradePolicy + *out = new(RollingUpgradePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleIn != nil { + in, out := &in.ScaleIn, &out.ScaleIn + *out = new(ScaleInParameters) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]LinuxVirtualMachineScaleSetSecretParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(LinuxVirtualMachineScaleSetSourceImageReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.SpotRestore != nil { + in, out := &in.SpotRestore, &out.SpotRestore + *out = new(SpotRestoreParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateNotification != nil { + in, out := &in.TerminateNotification, &out.TerminateNotification + *out = new(TerminateNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(LinuxVirtualMachineScaleSetTerminationNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetParameters. +func (in *LinuxVirtualMachineScaleSetParameters) DeepCopy() *LinuxVirtualMachineScaleSetParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetPlanInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetPlanInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetPlanInitParameters. +func (in *LinuxVirtualMachineScaleSetPlanInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetPlanInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetPlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetPlanObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetPlanObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetPlanObservation. +func (in *LinuxVirtualMachineScaleSetPlanObservation) DeepCopy() *LinuxVirtualMachineScaleSetPlanObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetPlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetPlanParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetPlanParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetPlanParameters. +func (in *LinuxVirtualMachineScaleSetPlanParameters) DeepCopy() *LinuxVirtualMachineScaleSetPlanParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetPlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetSecretInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetSecretInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]SecretCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetSecretInitParameters. +func (in *LinuxVirtualMachineScaleSetSecretInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetSecretInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetSecretObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetSecretObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]SecretCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetSecretObservation. +func (in *LinuxVirtualMachineScaleSetSecretObservation) DeepCopy() *LinuxVirtualMachineScaleSetSecretObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetSecretParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetSecretParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]SecretCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetSecretParameters. +func (in *LinuxVirtualMachineScaleSetSecretParameters) DeepCopy() *LinuxVirtualMachineScaleSetSecretParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters. +func (in *LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetSourceImageReferenceObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetSourceImageReferenceObservation) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetSourceImageReferenceObservation. +func (in *LinuxVirtualMachineScaleSetSourceImageReferenceObservation) DeepCopy() *LinuxVirtualMachineScaleSetSourceImageReferenceObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetSourceImageReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetSourceImageReferenceParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetSourceImageReferenceParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetSourceImageReferenceParameters. +func (in *LinuxVirtualMachineScaleSetSourceImageReferenceParameters) DeepCopy() *LinuxVirtualMachineScaleSetSourceImageReferenceParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetSourceImageReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetSpec) DeepCopyInto(out *LinuxVirtualMachineScaleSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetSpec. +func (in *LinuxVirtualMachineScaleSetSpec) DeepCopy() *LinuxVirtualMachineScaleSetSpec { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetStatus) DeepCopyInto(out *LinuxVirtualMachineScaleSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetStatus. +func (in *LinuxVirtualMachineScaleSetStatus) DeepCopy() *LinuxVirtualMachineScaleSetStatus { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetTerminationNotificationInitParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetTerminationNotificationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetTerminationNotificationInitParameters. +func (in *LinuxVirtualMachineScaleSetTerminationNotificationInitParameters) DeepCopy() *LinuxVirtualMachineScaleSetTerminationNotificationInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetTerminationNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetTerminationNotificationObservation) DeepCopyInto(out *LinuxVirtualMachineScaleSetTerminationNotificationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetTerminationNotificationObservation. +func (in *LinuxVirtualMachineScaleSetTerminationNotificationObservation) DeepCopy() *LinuxVirtualMachineScaleSetTerminationNotificationObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetTerminationNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineScaleSetTerminationNotificationParameters) DeepCopyInto(out *LinuxVirtualMachineScaleSetTerminationNotificationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineScaleSetTerminationNotificationParameters. +func (in *LinuxVirtualMachineScaleSetTerminationNotificationParameters) DeepCopy() *LinuxVirtualMachineScaleSetTerminationNotificationParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineScaleSetTerminationNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineSpec) DeepCopyInto(out *LinuxVirtualMachineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineSpec. +func (in *LinuxVirtualMachineSpec) DeepCopy() *LinuxVirtualMachineSpec { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineStatus) DeepCopyInto(out *LinuxVirtualMachineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineStatus. +func (in *LinuxVirtualMachineStatus) DeepCopy() *LinuxVirtualMachineStatus { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManageActionInitParameters) DeepCopyInto(out *ManageActionInitParameters) { + *out = *in + if in.Install != nil { + in, out := &in.Install, &out.Install + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageActionInitParameters. +func (in *ManageActionInitParameters) DeepCopy() *ManageActionInitParameters { + if in == nil { + return nil + } + out := new(ManageActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManageActionObservation) DeepCopyInto(out *ManageActionObservation) { + *out = *in + if in.Install != nil { + in, out := &in.Install, &out.Install + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageActionObservation. +func (in *ManageActionObservation) DeepCopy() *ManageActionObservation { + if in == nil { + return nil + } + out := new(ManageActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManageActionParameters) DeepCopyInto(out *ManageActionParameters) { + *out = *in + if in.Install != nil { + in, out := &in.Install, &out.Install + *out = new(string) + **out = **in + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = new(string) + **out = **in + } + if in.Update != nil { + in, out := &in.Update, &out.Update + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageActionParameters. +func (in *ManageActionParameters) DeepCopy() *ManageActionParameters { + if in == nil { + return nil + } + out := new(ManageActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDisk) DeepCopyInto(out *ManagedDisk) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDisk. +func (in *ManagedDisk) DeepCopy() *ManagedDisk { + if in == nil { + return nil + } + out := new(ManagedDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedDisk) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskInitParameters) DeepCopyInto(out *ManagedDiskInitParameters) { + *out = *in + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskAccessID != nil { + in, out := &in.DiskAccessID, &out.DiskAccessID + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskIopsReadOnly != nil { + in, out := &in.DiskIopsReadOnly, &out.DiskIopsReadOnly + *out = new(float64) + **out = **in + } + if in.DiskIopsReadWrite != nil { + in, out := &in.DiskIopsReadWrite, &out.DiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.DiskMbpsReadOnly != nil { + in, out := &in.DiskMbpsReadOnly, &out.DiskMbpsReadOnly + *out = new(float64) + **out = **in + } + if in.DiskMbpsReadWrite != nil { + in, out := &in.DiskMbpsReadWrite, &out.DiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionSettings != nil { + in, out := &in.EncryptionSettings, &out.EncryptionSettings + *out = new(EncryptionSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GalleryImageReferenceID != nil { + in, out := &in.GalleryImageReferenceID, &out.GalleryImageReferenceID + *out = new(string) + **out = **in + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.ImageReferenceID != nil { + in, out := &in.ImageReferenceID, &out.ImageReferenceID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicalSectorSize != nil { + in, out := &in.LogicalSectorSize, &out.LogicalSectorSize + *out = new(float64) + **out = **in + } + if in.MaxShares != nil { + in, out := &in.MaxShares, &out.MaxShares + *out = new(float64) + **out = **in + } + if in.NetworkAccessPolicy != nil { + in, out := &in.NetworkAccessPolicy, &out.NetworkAccessPolicy + *out = new(string) + **out = **in + } + if in.OnDemandBurstingEnabled != nil { + in, out := &in.OnDemandBurstingEnabled, &out.OnDemandBurstingEnabled + *out = new(bool) + **out = **in + } + if in.OptimizedFrequentAttachEnabled != nil { + in, out := &in.OptimizedFrequentAttachEnabled, &out.OptimizedFrequentAttachEnabled + *out = new(bool) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PerformancePlusEnabled != nil { + in, out := &in.PerformancePlusEnabled, &out.PerformancePlusEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityType != nil { + in, out := &in.SecurityType, &out.SecurityType + *out = new(string) + **out = **in + } + if in.SourceResourceID != nil { + in, out := &in.SourceResourceID, &out.SourceResourceID + *out = new(string) + **out = **in + } + if in.SourceResourceIDRef != nil { + in, out := &in.SourceResourceIDRef, &out.SourceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceResourceIDSelector != nil { + in, out := &in.SourceResourceIDSelector, &out.SourceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceURI != nil { + in, out := &in.SourceURI, &out.SourceURI + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.TrustedLaunchEnabled != nil { + in, out := &in.TrustedLaunchEnabled, &out.TrustedLaunchEnabled + *out = new(bool) + **out = **in + } + if in.UploadSizeBytes != nil { + in, out := &in.UploadSizeBytes, &out.UploadSizeBytes + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskInitParameters. +func (in *ManagedDiskInitParameters) DeepCopy() *ManagedDiskInitParameters { + if in == nil { + return nil + } + out := new(ManagedDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskList) DeepCopyInto(out *ManagedDiskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedDisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskList. +func (in *ManagedDiskList) DeepCopy() *ManagedDiskList { + if in == nil { + return nil + } + out := new(ManagedDiskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedDiskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskObservation) DeepCopyInto(out *ManagedDiskObservation) { + *out = *in + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskAccessID != nil { + in, out := &in.DiskAccessID, &out.DiskAccessID + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskIopsReadOnly != nil { + in, out := &in.DiskIopsReadOnly, &out.DiskIopsReadOnly + *out = new(float64) + **out = **in + } + if in.DiskIopsReadWrite != nil { + in, out := &in.DiskIopsReadWrite, &out.DiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.DiskMbpsReadOnly != nil { + in, out := &in.DiskMbpsReadOnly, &out.DiskMbpsReadOnly + *out = new(float64) + **out = **in + } + if in.DiskMbpsReadWrite != nil { + in, out := &in.DiskMbpsReadWrite, &out.DiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionSettings != nil { + in, out := &in.EncryptionSettings, &out.EncryptionSettings + *out = new(EncryptionSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.GalleryImageReferenceID != nil { + in, out := &in.GalleryImageReferenceID, &out.GalleryImageReferenceID + *out = new(string) + **out = **in + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ImageReferenceID != nil { + in, out := &in.ImageReferenceID, &out.ImageReferenceID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicalSectorSize != nil { + in, out := &in.LogicalSectorSize, &out.LogicalSectorSize + *out = new(float64) + **out = **in + } + if in.MaxShares != nil { + in, out := &in.MaxShares, &out.MaxShares + *out = new(float64) + **out = **in + } + if in.NetworkAccessPolicy != nil { + in, out := &in.NetworkAccessPolicy, &out.NetworkAccessPolicy + *out = new(string) + **out = **in + } + if in.OnDemandBurstingEnabled != nil { + in, out := &in.OnDemandBurstingEnabled, &out.OnDemandBurstingEnabled + *out = new(bool) + **out = **in + } + if in.OptimizedFrequentAttachEnabled != nil { + in, out := &in.OptimizedFrequentAttachEnabled, &out.OptimizedFrequentAttachEnabled + *out = new(bool) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PerformancePlusEnabled != nil { + in, out := &in.PerformancePlusEnabled, &out.PerformancePlusEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityType != nil { + in, out := &in.SecurityType, &out.SecurityType + *out = new(string) + **out = **in + } + if in.SourceResourceID != nil { + in, out := &in.SourceResourceID, &out.SourceResourceID + *out = new(string) + **out = **in + } + if in.SourceURI != nil { + in, out := &in.SourceURI, &out.SourceURI + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.TrustedLaunchEnabled != nil { + in, out := &in.TrustedLaunchEnabled, &out.TrustedLaunchEnabled + *out = new(bool) + **out = **in + } + if in.UploadSizeBytes != nil { + in, out := &in.UploadSizeBytes, &out.UploadSizeBytes + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskObservation. +func (in *ManagedDiskObservation) DeepCopy() *ManagedDiskObservation { + if in == nil { + return nil + } + out := new(ManagedDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskParameters) DeepCopyInto(out *ManagedDiskParameters) { + *out = *in + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskAccessID != nil { + in, out := &in.DiskAccessID, &out.DiskAccessID + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskIopsReadOnly != nil { + in, out := &in.DiskIopsReadOnly, &out.DiskIopsReadOnly + *out = new(float64) + **out = **in + } + if in.DiskIopsReadWrite != nil { + in, out := &in.DiskIopsReadWrite, &out.DiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.DiskMbpsReadOnly != nil { + in, out := &in.DiskMbpsReadOnly, &out.DiskMbpsReadOnly + *out = new(float64) + **out = **in + } + if in.DiskMbpsReadWrite != nil { + in, out := &in.DiskMbpsReadWrite, &out.DiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EncryptionSettings != nil { + in, out := &in.EncryptionSettings, &out.EncryptionSettings + *out = new(EncryptionSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.GalleryImageReferenceID != nil { + in, out := &in.GalleryImageReferenceID, &out.GalleryImageReferenceID + *out = new(string) + **out = **in + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.ImageReferenceID != nil { + in, out := &in.ImageReferenceID, &out.ImageReferenceID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicalSectorSize != nil { + in, out := &in.LogicalSectorSize, &out.LogicalSectorSize + *out = new(float64) + **out = **in + } + if in.MaxShares != nil { + in, out := &in.MaxShares, &out.MaxShares + *out = new(float64) + **out = **in + } + if in.NetworkAccessPolicy != nil { + in, out := &in.NetworkAccessPolicy, &out.NetworkAccessPolicy + *out = new(string) + **out = **in + } + if in.OnDemandBurstingEnabled != nil { + in, out := &in.OnDemandBurstingEnabled, &out.OnDemandBurstingEnabled + *out = new(bool) + **out = **in + } + if in.OptimizedFrequentAttachEnabled != nil { + in, out := &in.OptimizedFrequentAttachEnabled, &out.OptimizedFrequentAttachEnabled + *out = new(bool) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PerformancePlusEnabled != nil { + in, out := &in.PerformancePlusEnabled, &out.PerformancePlusEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityType != nil { + in, out := &in.SecurityType, &out.SecurityType + *out = new(string) + **out = **in + } + if in.SourceResourceID != nil { + in, out := &in.SourceResourceID, &out.SourceResourceID + *out = new(string) + **out = **in + } + if in.SourceResourceIDRef != nil { + in, out := &in.SourceResourceIDRef, &out.SourceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceResourceIDSelector != nil { + in, out := &in.SourceResourceIDSelector, &out.SourceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceURI != nil { + in, out := &in.SourceURI, &out.SourceURI + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.TrustedLaunchEnabled != nil { + in, out := &in.TrustedLaunchEnabled, &out.TrustedLaunchEnabled + *out = new(bool) + **out = **in + } + if in.UploadSizeBytes != nil { + in, out := &in.UploadSizeBytes, &out.UploadSizeBytes + *out = new(float64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskParameters. +func (in *ManagedDiskParameters) DeepCopy() *ManagedDiskParameters { + if in == nil { + return nil + } + out := new(ManagedDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskSpec) DeepCopyInto(out *ManagedDiskSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskSpec. +func (in *ManagedDiskSpec) DeepCopy() *ManagedDiskSpec { + if in == nil { + return nil + } + out := new(ManagedDiskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskStatus) DeepCopyInto(out *ManagedDiskStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskStatus. +func (in *ManagedDiskStatus) DeepCopy() *ManagedDiskStatus { + if in == nil { + return nil + } + out := new(ManagedDiskStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPConfigurationInitParameters) DeepCopyInto(out *NetworkInterfaceIPConfigurationInitParameters) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]IPConfigurationPublicIPAddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPConfigurationInitParameters. +func (in *NetworkInterfaceIPConfigurationInitParameters) DeepCopy() *NetworkInterfaceIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPConfigurationObservation) DeepCopyInto(out *NetworkInterfaceIPConfigurationObservation) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]IPConfigurationPublicIPAddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPConfigurationObservation. +func (in *NetworkInterfaceIPConfigurationObservation) DeepCopy() *NetworkInterfaceIPConfigurationObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPConfigurationParameters) DeepCopyInto(out *NetworkInterfaceIPConfigurationParameters) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]IPConfigurationPublicIPAddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPConfigurationParameters. +func (in *NetworkInterfaceIPConfigurationParameters) DeepCopy() *NetworkInterfaceIPConfigurationParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPConfigurationPublicIPAddressInitParameters) DeepCopyInto(out *NetworkInterfaceIPConfigurationPublicIPAddressInitParameters) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]IPConfigurationPublicIPAddressIPTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPConfigurationPublicIPAddressInitParameters. +func (in *NetworkInterfaceIPConfigurationPublicIPAddressInitParameters) DeepCopy() *NetworkInterfaceIPConfigurationPublicIPAddressInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPConfigurationPublicIPAddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPConfigurationPublicIPAddressObservation) DeepCopyInto(out *NetworkInterfaceIPConfigurationPublicIPAddressObservation) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]IPConfigurationPublicIPAddressIPTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPConfigurationPublicIPAddressObservation. +func (in *NetworkInterfaceIPConfigurationPublicIPAddressObservation) DeepCopy() *NetworkInterfaceIPConfigurationPublicIPAddressObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPConfigurationPublicIPAddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceIPConfigurationPublicIPAddressParameters) DeepCopyInto(out *NetworkInterfaceIPConfigurationPublicIPAddressParameters) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]IPConfigurationPublicIPAddressIPTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceIPConfigurationPublicIPAddressParameters. +func (in *NetworkInterfaceIPConfigurationPublicIPAddressParameters) DeepCopy() *NetworkInterfaceIPConfigurationPublicIPAddressParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceIPConfigurationPublicIPAddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceInitParameters) DeepCopyInto(out *NetworkInterfaceInitParameters) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]IPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceInitParameters. +func (in *NetworkInterfaceInitParameters) DeepCopy() *NetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceObservation) DeepCopyInto(out *NetworkInterfaceObservation) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]IPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceObservation. +func (in *NetworkInterfaceObservation) DeepCopy() *NetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceParameters) DeepCopyInto(out *NetworkInterfaceParameters) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]IPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceParameters. +func (in *NetworkInterfaceParameters) DeepCopy() *NetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSet) DeepCopyInto(out *OrchestratedVirtualMachineScaleSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSet. +func (in *OrchestratedVirtualMachineScaleSet) DeepCopy() *OrchestratedVirtualMachineScaleSet { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrchestratedVirtualMachineScaleSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters. +func (in *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation. +func (in *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters. +func (in *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters. +func (in *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation. +func (in *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters. +func (in *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters. +func (in *OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation. +func (in *OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters. +func (in *OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetDataDiskInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetDataDiskInitParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetDataDiskInitParameters. +func (in *OrchestratedVirtualMachineScaleSetDataDiskInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetDataDiskInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetDataDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetDataDiskObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetDataDiskObservation) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetDataDiskObservation. +func (in *OrchestratedVirtualMachineScaleSetDataDiskObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetDataDiskObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetDataDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetDataDiskParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetDataDiskParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetDataDiskParameters. +func (in *OrchestratedVirtualMachineScaleSetDataDiskParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetDataDiskParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetDataDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetExtensionInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetExtensionInitParameters) { + *out = *in + if in.AutoUpgradeMinorVersionEnabled != nil { + in, out := &in.AutoUpgradeMinorVersionEnabled, &out.AutoUpgradeMinorVersionEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsToProvisionAfterVMCreation != nil { + in, out := &in.ExtensionsToProvisionAfterVMCreation, &out.ExtensionsToProvisionAfterVMCreation + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FailureSuppressionEnabled != nil { + in, out := &in.FailureSuppressionEnabled, &out.FailureSuppressionEnabled + *out = new(bool) + **out = **in + } + if in.ForceExtensionExecutionOnChange != nil { + in, out := &in.ForceExtensionExecutionOnChange, &out.ForceExtensionExecutionOnChange + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(ExtensionProtectedSettingsFromKeyVaultInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetExtensionInitParameters. +func (in *OrchestratedVirtualMachineScaleSetExtensionInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetExtensionInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetExtensionObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetExtensionObservation) { + *out = *in + if in.AutoUpgradeMinorVersionEnabled != nil { + in, out := &in.AutoUpgradeMinorVersionEnabled, &out.AutoUpgradeMinorVersionEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsToProvisionAfterVMCreation != nil { + in, out := &in.ExtensionsToProvisionAfterVMCreation, &out.ExtensionsToProvisionAfterVMCreation + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FailureSuppressionEnabled != nil { + in, out := &in.FailureSuppressionEnabled, &out.FailureSuppressionEnabled + *out = new(bool) + **out = **in + } + if in.ForceExtensionExecutionOnChange != nil { + in, out := &in.ForceExtensionExecutionOnChange, &out.ForceExtensionExecutionOnChange + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(ExtensionProtectedSettingsFromKeyVaultObservation) + (*in).DeepCopyInto(*out) + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetExtensionObservation. +func (in *OrchestratedVirtualMachineScaleSetExtensionObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetExtensionObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetExtensionParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetExtensionParameters) { + *out = *in + if in.AutoUpgradeMinorVersionEnabled != nil { + in, out := &in.AutoUpgradeMinorVersionEnabled, &out.AutoUpgradeMinorVersionEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsToProvisionAfterVMCreation != nil { + in, out := &in.ExtensionsToProvisionAfterVMCreation, &out.ExtensionsToProvisionAfterVMCreation + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FailureSuppressionEnabled != nil { + in, out := &in.FailureSuppressionEnabled, &out.FailureSuppressionEnabled + *out = new(bool) + **out = **in + } + if in.ForceExtensionExecutionOnChange != nil { + in, out := &in.ForceExtensionExecutionOnChange, &out.ForceExtensionExecutionOnChange + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(ExtensionProtectedSettingsFromKeyVaultParameters) + (*in).DeepCopyInto(*out) + } + if in.ProtectedSettingsSecretRef != nil { + in, out := &in.ProtectedSettingsSecretRef, &out.ProtectedSettingsSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetExtensionParameters. +func (in *OrchestratedVirtualMachineScaleSetExtensionParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetExtensionParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetIdentityInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetIdentityInitParameters. +func (in *OrchestratedVirtualMachineScaleSetIdentityInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetIdentityInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetIdentityObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetIdentityObservation. +func (in *OrchestratedVirtualMachineScaleSetIdentityObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetIdentityObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetIdentityParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetIdentityParameters. +func (in *OrchestratedVirtualMachineScaleSetIdentityParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetIdentityParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetInitParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]OrchestratedVirtualMachineScaleSetDataDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]OrchestratedVirtualMachineScaleSetExtensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(OrchestratedVirtualMachineScaleSetIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(OrchestratedVirtualMachineScaleSetOsDiskInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OsProfile != nil { + in, out := &in.OsProfile, &out.OsProfile + *out = new(OsProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(OrchestratedVirtualMachineScaleSetPlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.PriorityMix != nil { + in, out := &in.PriorityMix, &out.PriorityMix + *out = new(PriorityMixInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetInitParameters. +func (in *OrchestratedVirtualMachineScaleSetInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetList) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OrchestratedVirtualMachineScaleSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetList. +func (in *OrchestratedVirtualMachineScaleSetList) DeepCopy() *OrchestratedVirtualMachineScaleSetList { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrchestratedVirtualMachineScaleSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]NetworkInterfaceIPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters. +func (in *OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]NetworkInterfaceIPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation. +func (in *OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]NetworkInterfaceIPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters. +func (in *OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetObservation) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation) + (*in).DeepCopyInto(*out) + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]OrchestratedVirtualMachineScaleSetDataDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]OrchestratedVirtualMachineScaleSetExtensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(OrchestratedVirtualMachineScaleSetIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(OrchestratedVirtualMachineScaleSetOsDiskObservation) + (*in).DeepCopyInto(*out) + } + if in.OsProfile != nil { + in, out := &in.OsProfile, &out.OsProfile + *out = new(OsProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(OrchestratedVirtualMachineScaleSetPlanObservation) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.PriorityMix != nil { + in, out := &in.PriorityMix, &out.PriorityMix + *out = new(PriorityMixObservation) + (*in).DeepCopyInto(*out) + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(OrchestratedVirtualMachineScaleSetTerminationNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.UniqueID != nil { + in, out := &in.UniqueID, &out.UniqueID + *out = new(string) + **out = **in + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetObservation. +func (in *OrchestratedVirtualMachineScaleSetObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters. +func (in *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation. +func (in *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters. +func (in *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetOsDiskInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetOsDiskInitParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetOsDiskInitParameters. +func (in *OrchestratedVirtualMachineScaleSetOsDiskInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetOsDiskInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetOsDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetOsDiskObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetOsDiskObservation) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetOsDiskObservation. +func (in *OrchestratedVirtualMachineScaleSetOsDiskObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetOsDiskObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetOsDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetOsDiskParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetOsDiskParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetOsDiskParameters. +func (in *OrchestratedVirtualMachineScaleSetOsDiskParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetOsDiskParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetOsDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]OrchestratedVirtualMachineScaleSetDataDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]OrchestratedVirtualMachineScaleSetExtensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(OrchestratedVirtualMachineScaleSetIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(OrchestratedVirtualMachineScaleSetOsDiskParameters) + (*in).DeepCopyInto(*out) + } + if in.OsProfile != nil { + in, out := &in.OsProfile, &out.OsProfile + *out = new(OsProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(OrchestratedVirtualMachineScaleSetPlanParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.PriorityMix != nil { + in, out := &in.PriorityMix, &out.PriorityMix + *out = new(PriorityMixParameters) + (*in).DeepCopyInto(*out) + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(OrchestratedVirtualMachineScaleSetTerminationNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.UserDataBase64SecretRef != nil { + in, out := &in.UserDataBase64SecretRef, &out.UserDataBase64SecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetParameters. +func (in *OrchestratedVirtualMachineScaleSetParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetPlanInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetPlanInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetPlanInitParameters. +func (in *OrchestratedVirtualMachineScaleSetPlanInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetPlanInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetPlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetPlanObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetPlanObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetPlanObservation. +func (in *OrchestratedVirtualMachineScaleSetPlanObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetPlanObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetPlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetPlanParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetPlanParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetPlanParameters. +func (in *OrchestratedVirtualMachineScaleSetPlanParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetPlanParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetPlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters. +func (in *OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation. +func (in *OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters. +func (in *OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetSpec) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetSpec. +func (in *OrchestratedVirtualMachineScaleSetSpec) DeepCopy() *OrchestratedVirtualMachineScaleSetSpec { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetStatus) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetStatus. +func (in *OrchestratedVirtualMachineScaleSetStatus) DeepCopy() *OrchestratedVirtualMachineScaleSetStatus { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters. +func (in *OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetTerminationNotificationObservation) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetTerminationNotificationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetTerminationNotificationObservation. +func (in *OrchestratedVirtualMachineScaleSetTerminationNotificationObservation) DeepCopy() *OrchestratedVirtualMachineScaleSetTerminationNotificationObservation { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetTerminationNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrchestratedVirtualMachineScaleSetTerminationNotificationParameters) DeepCopyInto(out *OrchestratedVirtualMachineScaleSetTerminationNotificationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrchestratedVirtualMachineScaleSetTerminationNotificationParameters. +func (in *OrchestratedVirtualMachineScaleSetTerminationNotificationParameters) DeepCopy() *OrchestratedVirtualMachineScaleSetTerminationNotificationParameters { + if in == nil { + return nil + } + out := new(OrchestratedVirtualMachineScaleSetTerminationNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsDiskDiffDiskSettingsInitParameters) DeepCopyInto(out *OsDiskDiffDiskSettingsInitParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsDiskDiffDiskSettingsInitParameters. +func (in *OsDiskDiffDiskSettingsInitParameters) DeepCopy() *OsDiskDiffDiskSettingsInitParameters { + if in == nil { + return nil + } + out := new(OsDiskDiffDiskSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsDiskDiffDiskSettingsObservation) DeepCopyInto(out *OsDiskDiffDiskSettingsObservation) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsDiskDiffDiskSettingsObservation. +func (in *OsDiskDiffDiskSettingsObservation) DeepCopy() *OsDiskDiffDiskSettingsObservation { + if in == nil { + return nil + } + out := new(OsDiskDiffDiskSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsDiskDiffDiskSettingsParameters) DeepCopyInto(out *OsDiskDiffDiskSettingsParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsDiskDiffDiskSettingsParameters. +func (in *OsDiskDiffDiskSettingsParameters) DeepCopy() *OsDiskDiffDiskSettingsParameters { + if in == nil { + return nil + } + out := new(OsDiskDiffDiskSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsDiskInitParameters) DeepCopyInto(out *OsDiskInitParameters) { + *out = *in + if in.BlobURI != nil { + in, out := &in.BlobURI, &out.BlobURI + *out = new(string) + **out = **in + } + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.ManagedDiskID != nil { + in, out := &in.ManagedDiskID, &out.ManagedDiskID + *out = new(string) + **out = **in + } + if in.OsState != nil { + in, out := &in.OsState, &out.OsState + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsDiskInitParameters. +func (in *OsDiskInitParameters) DeepCopy() *OsDiskInitParameters { + if in == nil { + return nil + } + out := new(OsDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsDiskObservation) DeepCopyInto(out *OsDiskObservation) { + *out = *in + if in.BlobURI != nil { + in, out := &in.BlobURI, &out.BlobURI + *out = new(string) + **out = **in + } + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.ManagedDiskID != nil { + in, out := &in.ManagedDiskID, &out.ManagedDiskID + *out = new(string) + **out = **in + } + if in.OsState != nil { + in, out := &in.OsState, &out.OsState + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsDiskObservation. +func (in *OsDiskObservation) DeepCopy() *OsDiskObservation { + if in == nil { + return nil + } + out := new(OsDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsDiskParameters) DeepCopyInto(out *OsDiskParameters) { + *out = *in + if in.BlobURI != nil { + in, out := &in.BlobURI, &out.BlobURI + *out = new(string) + **out = **in + } + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.ManagedDiskID != nil { + in, out := &in.ManagedDiskID, &out.ManagedDiskID + *out = new(string) + **out = **in + } + if in.OsState != nil { + in, out := &in.OsState, &out.OsState + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsDiskParameters. +func (in *OsDiskParameters) DeepCopy() *OsDiskParameters { + if in == nil { + return nil + } + out := new(OsDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsImageNotificationInitParameters) DeepCopyInto(out *OsImageNotificationInitParameters) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsImageNotificationInitParameters. +func (in *OsImageNotificationInitParameters) DeepCopy() *OsImageNotificationInitParameters { + if in == nil { + return nil + } + out := new(OsImageNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsImageNotificationObservation) DeepCopyInto(out *OsImageNotificationObservation) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsImageNotificationObservation. +func (in *OsImageNotificationObservation) DeepCopy() *OsImageNotificationObservation { + if in == nil { + return nil + } + out := new(OsImageNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsImageNotificationParameters) DeepCopyInto(out *OsImageNotificationParameters) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsImageNotificationParameters. +func (in *OsImageNotificationParameters) DeepCopy() *OsImageNotificationParameters { + if in == nil { + return nil + } + out := new(OsImageNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsProfileInitParameters) DeepCopyInto(out *OsProfileInitParameters) { + *out = *in + if in.LinuxConfiguration != nil { + in, out := &in.LinuxConfiguration, &out.LinuxConfiguration + *out = new(LinuxConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WindowsConfiguration != nil { + in, out := &in.WindowsConfiguration, &out.WindowsConfiguration + *out = new(WindowsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsProfileInitParameters. +func (in *OsProfileInitParameters) DeepCopy() *OsProfileInitParameters { + if in == nil { + return nil + } + out := new(OsProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsProfileObservation) DeepCopyInto(out *OsProfileObservation) { + *out = *in + if in.LinuxConfiguration != nil { + in, out := &in.LinuxConfiguration, &out.LinuxConfiguration + *out = new(LinuxConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.WindowsConfiguration != nil { + in, out := &in.WindowsConfiguration, &out.WindowsConfiguration + *out = new(WindowsConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsProfileObservation. +func (in *OsProfileObservation) DeepCopy() *OsProfileObservation { + if in == nil { + return nil + } + out := new(OsProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsProfileParameters) DeepCopyInto(out *OsProfileParameters) { + *out = *in + if in.CustomDataSecretRef != nil { + in, out := &in.CustomDataSecretRef, &out.CustomDataSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.LinuxConfiguration != nil { + in, out := &in.LinuxConfiguration, &out.LinuxConfiguration + *out = new(LinuxConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.WindowsConfiguration != nil { + in, out := &in.WindowsConfiguration, &out.WindowsConfiguration + *out = new(WindowsConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsProfileParameters. +func (in *OsProfileParameters) DeepCopy() *OsProfileParameters { + if in == nil { + return nil + } + out := new(OsProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobManagedIdentityInitParameters) DeepCopyInto(out *OutputBlobManagedIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobManagedIdentityInitParameters. +func (in *OutputBlobManagedIdentityInitParameters) DeepCopy() *OutputBlobManagedIdentityInitParameters { + if in == nil { + return nil + } + out := new(OutputBlobManagedIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobManagedIdentityObservation) DeepCopyInto(out *OutputBlobManagedIdentityObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobManagedIdentityObservation. +func (in *OutputBlobManagedIdentityObservation) DeepCopy() *OutputBlobManagedIdentityObservation { + if in == nil { + return nil + } + out := new(OutputBlobManagedIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobManagedIdentityParameters) DeepCopyInto(out *OutputBlobManagedIdentityParameters) { + *out = *in + if in.ClientIDSecretRef != nil { + in, out := &in.ClientIDSecretRef, &out.ClientIDSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ObjectIDSecretRef != nil { + in, out := &in.ObjectIDSecretRef, &out.ObjectIDSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobManagedIdentityParameters. +func (in *OutputBlobManagedIdentityParameters) DeepCopy() *OutputBlobManagedIdentityParameters { + if in == nil { + return nil + } + out := new(OutputBlobManagedIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterInitParameters) DeepCopyInto(out *ParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterInitParameters. +func (in *ParameterInitParameters) DeepCopy() *ParameterInitParameters { + if in == nil { + return nil + } + out := new(ParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterObservation) DeepCopyInto(out *ParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterObservation. +func (in *ParameterObservation) DeepCopy() *ParameterObservation { + if in == nil { + return nil + } + out := new(ParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterParameters) DeepCopyInto(out *ParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterParameters. +func (in *ParameterParameters) DeepCopy() *ParameterParameters { + if in == nil { + return nil + } + out := new(ParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanInitParameters) DeepCopyInto(out *PlanInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanInitParameters. +func (in *PlanInitParameters) DeepCopy() *PlanInitParameters { + if in == nil { + return nil + } + out := new(PlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanObservation) DeepCopyInto(out *PlanObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanObservation. +func (in *PlanObservation) DeepCopy() *PlanObservation { + if in == nil { + return nil + } + out := new(PlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanParameters) DeepCopyInto(out *PlanParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanParameters. +func (in *PlanParameters) DeepCopy() *PlanParameters { + if in == nil { + return nil + } + out := new(PlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityMixInitParameters) DeepCopyInto(out *PriorityMixInitParameters) { + *out = *in + if in.BaseRegularCount != nil { + in, out := &in.BaseRegularCount, &out.BaseRegularCount + *out = new(float64) + **out = **in + } + if in.RegularPercentageAboveBase != nil { + in, out := &in.RegularPercentageAboveBase, &out.RegularPercentageAboveBase + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityMixInitParameters. +func (in *PriorityMixInitParameters) DeepCopy() *PriorityMixInitParameters { + if in == nil { + return nil + } + out := new(PriorityMixInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityMixObservation) DeepCopyInto(out *PriorityMixObservation) { + *out = *in + if in.BaseRegularCount != nil { + in, out := &in.BaseRegularCount, &out.BaseRegularCount + *out = new(float64) + **out = **in + } + if in.RegularPercentageAboveBase != nil { + in, out := &in.RegularPercentageAboveBase, &out.RegularPercentageAboveBase + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityMixObservation. +func (in *PriorityMixObservation) DeepCopy() *PriorityMixObservation { + if in == nil { + return nil + } + out := new(PriorityMixObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityMixParameters) DeepCopyInto(out *PriorityMixParameters) { + *out = *in + if in.BaseRegularCount != nil { + in, out := &in.BaseRegularCount, &out.BaseRegularCount + *out = new(float64) + **out = **in + } + if in.RegularPercentageAboveBase != nil { + in, out := &in.RegularPercentageAboveBase, &out.RegularPercentageAboveBase + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityMixParameters. +func (in *PriorityMixParameters) DeepCopy() *PriorityMixParameters { + if in == nil { + return nil + } + out := new(PriorityMixParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedParameterInitParameters) DeepCopyInto(out *ProtectedParameterInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedParameterInitParameters. +func (in *ProtectedParameterInitParameters) DeepCopy() *ProtectedParameterInitParameters { + if in == nil { + return nil + } + out := new(ProtectedParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedParameterObservation) DeepCopyInto(out *ProtectedParameterObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedParameterObservation. +func (in *ProtectedParameterObservation) DeepCopy() *ProtectedParameterObservation { + if in == nil { + return nil + } + out := new(ProtectedParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedParameterParameters) DeepCopyInto(out *ProtectedParameterParameters) { + *out = *in + out.NameSecretRef = in.NameSecretRef + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedParameterParameters. +func (in *ProtectedParameterParameters) DeepCopy() *ProtectedParameterParameters { + if in == nil { + return nil + } + out := new(ProtectedParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedSettingsFromKeyVaultInitParameters) DeepCopyInto(out *ProtectedSettingsFromKeyVaultInitParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedSettingsFromKeyVaultInitParameters. +func (in *ProtectedSettingsFromKeyVaultInitParameters) DeepCopy() *ProtectedSettingsFromKeyVaultInitParameters { + if in == nil { + return nil + } + out := new(ProtectedSettingsFromKeyVaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedSettingsFromKeyVaultObservation) DeepCopyInto(out *ProtectedSettingsFromKeyVaultObservation) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedSettingsFromKeyVaultObservation. +func (in *ProtectedSettingsFromKeyVaultObservation) DeepCopy() *ProtectedSettingsFromKeyVaultObservation { + if in == nil { + return nil + } + out := new(ProtectedSettingsFromKeyVaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectedSettingsFromKeyVaultParameters) DeepCopyInto(out *ProtectedSettingsFromKeyVaultParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectedSettingsFromKeyVaultParameters. +func (in *ProtectedSettingsFromKeyVaultParameters) DeepCopy() *ProtectedSettingsFromKeyVaultParameters { + if in == nil { + return nil + } + out := new(ProtectedSettingsFromKeyVaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicIPAddressIPTagInitParameters) DeepCopyInto(out *PublicIPAddressIPTagInitParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicIPAddressIPTagInitParameters. +func (in *PublicIPAddressIPTagInitParameters) DeepCopy() *PublicIPAddressIPTagInitParameters { + if in == nil { + return nil + } + out := new(PublicIPAddressIPTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicIPAddressIPTagObservation) DeepCopyInto(out *PublicIPAddressIPTagObservation) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicIPAddressIPTagObservation. +func (in *PublicIPAddressIPTagObservation) DeepCopy() *PublicIPAddressIPTagObservation { + if in == nil { + return nil + } + out := new(PublicIPAddressIPTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicIPAddressIPTagParameters) DeepCopyInto(out *PublicIPAddressIPTagParameters) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicIPAddressIPTagParameters. +func (in *PublicIPAddressIPTagParameters) DeepCopy() *PublicIPAddressIPTagParameters { + if in == nil { + return nil + } + out := new(PublicIPAddressIPTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicIPAddressInitParameters) DeepCopyInto(out *PublicIPAddressInitParameters) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]IPTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicIPAddressInitParameters. +func (in *PublicIPAddressInitParameters) DeepCopy() *PublicIPAddressInitParameters { + if in == nil { + return nil + } + out := new(PublicIPAddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicIPAddressObservation) DeepCopyInto(out *PublicIPAddressObservation) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]IPTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicIPAddressObservation. +func (in *PublicIPAddressObservation) DeepCopy() *PublicIPAddressObservation { + if in == nil { + return nil + } + out := new(PublicIPAddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicIPAddressParameters) DeepCopyInto(out *PublicIPAddressParameters) { + *out = *in + if in.DomainNameLabel != nil { + in, out := &in.DomainNameLabel, &out.DomainNameLabel + *out = new(string) + **out = **in + } + if in.IPTag != nil { + in, out := &in.IPTag, &out.IPTag + *out = make([]IPTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPPrefixID != nil { + in, out := &in.PublicIPPrefixID, &out.PublicIPPrefixID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicIPAddressParameters. +func (in *PublicIPAddressParameters) DeepCopy() *PublicIPAddressParameters { + if in == nil { + return nil + } + out := new(PublicIPAddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PurchasePlanInitParameters) DeepCopyInto(out *PurchasePlanInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PurchasePlanInitParameters. +func (in *PurchasePlanInitParameters) DeepCopy() *PurchasePlanInitParameters { + if in == nil { + return nil + } + out := new(PurchasePlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PurchasePlanObservation) DeepCopyInto(out *PurchasePlanObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PurchasePlanObservation. +func (in *PurchasePlanObservation) DeepCopy() *PurchasePlanObservation { + if in == nil { + return nil + } + out := new(PurchasePlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PurchasePlanParameters) DeepCopyInto(out *PurchasePlanParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PurchasePlanParameters. +func (in *PurchasePlanParameters) DeepCopy() *PurchasePlanParameters { + if in == nil { + return nil + } + out := new(PurchasePlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpgradePolicyInitParameters) DeepCopyInto(out *RollingUpgradePolicyInitParameters) { + *out = *in + if in.CrossZoneUpgradesEnabled != nil { + in, out := &in.CrossZoneUpgradesEnabled, &out.CrossZoneUpgradesEnabled + *out = new(bool) + **out = **in + } + if in.MaxBatchInstancePercent != nil { + in, out := &in.MaxBatchInstancePercent, &out.MaxBatchInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyInstancePercent != nil { + in, out := &in.MaxUnhealthyInstancePercent, &out.MaxUnhealthyInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyUpgradedInstancePercent != nil { + in, out := &in.MaxUnhealthyUpgradedInstancePercent, &out.MaxUnhealthyUpgradedInstancePercent + *out = new(float64) + **out = **in + } + if in.PauseTimeBetweenBatches != nil { + in, out := &in.PauseTimeBetweenBatches, &out.PauseTimeBetweenBatches + *out = new(string) + **out = **in + } + if in.PrioritizeUnhealthyInstancesEnabled != nil { + in, out := &in.PrioritizeUnhealthyInstancesEnabled, &out.PrioritizeUnhealthyInstancesEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpgradePolicyInitParameters. +func (in *RollingUpgradePolicyInitParameters) DeepCopy() *RollingUpgradePolicyInitParameters { + if in == nil { + return nil + } + out := new(RollingUpgradePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpgradePolicyObservation) DeepCopyInto(out *RollingUpgradePolicyObservation) { + *out = *in + if in.CrossZoneUpgradesEnabled != nil { + in, out := &in.CrossZoneUpgradesEnabled, &out.CrossZoneUpgradesEnabled + *out = new(bool) + **out = **in + } + if in.MaxBatchInstancePercent != nil { + in, out := &in.MaxBatchInstancePercent, &out.MaxBatchInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyInstancePercent != nil { + in, out := &in.MaxUnhealthyInstancePercent, &out.MaxUnhealthyInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyUpgradedInstancePercent != nil { + in, out := &in.MaxUnhealthyUpgradedInstancePercent, &out.MaxUnhealthyUpgradedInstancePercent + *out = new(float64) + **out = **in + } + if in.PauseTimeBetweenBatches != nil { + in, out := &in.PauseTimeBetweenBatches, &out.PauseTimeBetweenBatches + *out = new(string) + **out = **in + } + if in.PrioritizeUnhealthyInstancesEnabled != nil { + in, out := &in.PrioritizeUnhealthyInstancesEnabled, &out.PrioritizeUnhealthyInstancesEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpgradePolicyObservation. +func (in *RollingUpgradePolicyObservation) DeepCopy() *RollingUpgradePolicyObservation { + if in == nil { + return nil + } + out := new(RollingUpgradePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpgradePolicyParameters) DeepCopyInto(out *RollingUpgradePolicyParameters) { + *out = *in + if in.CrossZoneUpgradesEnabled != nil { + in, out := &in.CrossZoneUpgradesEnabled, &out.CrossZoneUpgradesEnabled + *out = new(bool) + **out = **in + } + if in.MaxBatchInstancePercent != nil { + in, out := &in.MaxBatchInstancePercent, &out.MaxBatchInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyInstancePercent != nil { + in, out := &in.MaxUnhealthyInstancePercent, &out.MaxUnhealthyInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyUpgradedInstancePercent != nil { + in, out := &in.MaxUnhealthyUpgradedInstancePercent, &out.MaxUnhealthyUpgradedInstancePercent + *out = new(float64) + **out = **in + } + if in.PauseTimeBetweenBatches != nil { + in, out := &in.PauseTimeBetweenBatches, &out.PauseTimeBetweenBatches + *out = new(string) + **out = **in + } + if in.PrioritizeUnhealthyInstancesEnabled != nil { + in, out := &in.PrioritizeUnhealthyInstancesEnabled, &out.PrioritizeUnhealthyInstancesEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpgradePolicyParameters. +func (in *RollingUpgradePolicyParameters) DeepCopy() *RollingUpgradePolicyParameters { + if in == nil { + return nil + } + out := new(RollingUpgradePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleInInitParameters) DeepCopyInto(out *ScaleInInitParameters) { + *out = *in + if in.ForceDeletionEnabled != nil { + in, out := &in.ForceDeletionEnabled, &out.ForceDeletionEnabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleInInitParameters. +func (in *ScaleInInitParameters) DeepCopy() *ScaleInInitParameters { + if in == nil { + return nil + } + out := new(ScaleInInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleInObservation) DeepCopyInto(out *ScaleInObservation) { + *out = *in + if in.ForceDeletionEnabled != nil { + in, out := &in.ForceDeletionEnabled, &out.ForceDeletionEnabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleInObservation. +func (in *ScaleInObservation) DeepCopy() *ScaleInObservation { + if in == nil { + return nil + } + out := new(ScaleInObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleInParameters) DeepCopyInto(out *ScaleInParameters) { + *out = *in + if in.ForceDeletionEnabled != nil { + in, out := &in.ForceDeletionEnabled, &out.ForceDeletionEnabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleInParameters. +func (in *ScaleInParameters) DeepCopy() *ScaleInParameters { + if in == nil { + return nil + } + out := new(ScaleInParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptURIManagedIdentityInitParameters) DeepCopyInto(out *ScriptURIManagedIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptURIManagedIdentityInitParameters. +func (in *ScriptURIManagedIdentityInitParameters) DeepCopy() *ScriptURIManagedIdentityInitParameters { + if in == nil { + return nil + } + out := new(ScriptURIManagedIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptURIManagedIdentityObservation) DeepCopyInto(out *ScriptURIManagedIdentityObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptURIManagedIdentityObservation. +func (in *ScriptURIManagedIdentityObservation) DeepCopy() *ScriptURIManagedIdentityObservation { + if in == nil { + return nil + } + out := new(ScriptURIManagedIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptURIManagedIdentityParameters) DeepCopyInto(out *ScriptURIManagedIdentityParameters) { + *out = *in + if in.ClientIDSecretRef != nil { + in, out := &in.ClientIDSecretRef, &out.ClientIDSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ObjectIDSecretRef != nil { + in, out := &in.ObjectIDSecretRef, &out.ObjectIDSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptURIManagedIdentityParameters. +func (in *ScriptURIManagedIdentityParameters) DeepCopy() *ScriptURIManagedIdentityParameters { + if in == nil { + return nil + } + out := new(ScriptURIManagedIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCertificateInitParameters) DeepCopyInto(out *SecretCertificateInitParameters) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCertificateInitParameters. +func (in *SecretCertificateInitParameters) DeepCopy() *SecretCertificateInitParameters { + if in == nil { + return nil + } + out := new(SecretCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCertificateObservation) DeepCopyInto(out *SecretCertificateObservation) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCertificateObservation. +func (in *SecretCertificateObservation) DeepCopy() *SecretCertificateObservation { + if in == nil { + return nil + } + out := new(SecretCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretCertificateParameters) DeepCopyInto(out *SecretCertificateParameters) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretCertificateParameters. +func (in *SecretCertificateParameters) DeepCopy() *SecretCertificateParameters { + if in == nil { + return nil + } + out := new(SecretCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretInitParameters) DeepCopyInto(out *SecretInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInitParameters. +func (in *SecretInitParameters) DeepCopy() *SecretInitParameters { + if in == nil { + return nil + } + out := new(SecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretObservation) DeepCopyInto(out *SecretObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretObservation. +func (in *SecretObservation) DeepCopy() *SecretObservation { + if in == nil { + return nil + } + out := new(SecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretParameters) DeepCopyInto(out *SecretParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]CertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretParameters. +func (in *SecretParameters) DeepCopy() *SecretParameters { + if in == nil { + return nil + } + out := new(SecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImage) DeepCopyInto(out *SharedImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImage. +func (in *SharedImage) DeepCopy() *SharedImage { + if in == nil { + return nil + } + out := new(SharedImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageGallery) DeepCopyInto(out *SharedImageGallery) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageGallery. +func (in *SharedImageGallery) DeepCopy() *SharedImageGallery { + if in == nil { + return nil + } + out := new(SharedImageGallery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedImageGallery) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageGalleryInitParameters) DeepCopyInto(out *SharedImageGalleryInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Sharing != nil { + in, out := &in.Sharing, &out.Sharing + *out = new(SharingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageGalleryInitParameters. +func (in *SharedImageGalleryInitParameters) DeepCopy() *SharedImageGalleryInitParameters { + if in == nil { + return nil + } + out := new(SharedImageGalleryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageGalleryList) DeepCopyInto(out *SharedImageGalleryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SharedImageGallery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageGalleryList. +func (in *SharedImageGalleryList) DeepCopy() *SharedImageGalleryList { + if in == nil { + return nil + } + out := new(SharedImageGalleryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedImageGalleryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageGalleryObservation) DeepCopyInto(out *SharedImageGalleryObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sharing != nil { + in, out := &in.Sharing, &out.Sharing + *out = new(SharingObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UniqueName != nil { + in, out := &in.UniqueName, &out.UniqueName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageGalleryObservation. +func (in *SharedImageGalleryObservation) DeepCopy() *SharedImageGalleryObservation { + if in == nil { + return nil + } + out := new(SharedImageGalleryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageGalleryParameters) DeepCopyInto(out *SharedImageGalleryParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sharing != nil { + in, out := &in.Sharing, &out.Sharing + *out = new(SharingParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageGalleryParameters. +func (in *SharedImageGalleryParameters) DeepCopy() *SharedImageGalleryParameters { + if in == nil { + return nil + } + out := new(SharedImageGalleryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageGallerySpec) DeepCopyInto(out *SharedImageGallerySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageGallerySpec. +func (in *SharedImageGallerySpec) DeepCopy() *SharedImageGallerySpec { + if in == nil { + return nil + } + out := new(SharedImageGallerySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageGalleryStatus) DeepCopyInto(out *SharedImageGalleryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageGalleryStatus. +func (in *SharedImageGalleryStatus) DeepCopy() *SharedImageGalleryStatus { + if in == nil { + return nil + } + out := new(SharedImageGalleryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageInitParameters) DeepCopyInto(out *SharedImageInitParameters) { + *out = *in + if in.AcceleratedNetworkSupportEnabled != nil { + in, out := &in.AcceleratedNetworkSupportEnabled, &out.AcceleratedNetworkSupportEnabled + *out = new(bool) + **out = **in + } + if in.Architecture != nil { + in, out := &in.Architecture, &out.Architecture + *out = new(string) + **out = **in + } + if in.ConfidentialVMEnabled != nil { + in, out := &in.ConfidentialVMEnabled, &out.ConfidentialVMEnabled + *out = new(bool) + **out = **in + } + if in.ConfidentialVMSupported != nil { + in, out := &in.ConfidentialVMSupported, &out.ConfidentialVMSupported + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskTypesNotAllowed != nil { + in, out := &in.DiskTypesNotAllowed, &out.DiskTypesNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndOfLifeDate != nil { + in, out := &in.EndOfLifeDate, &out.EndOfLifeDate + *out = new(string) + **out = **in + } + if in.Eula != nil { + in, out := &in.Eula, &out.Eula + *out = new(string) + **out = **in + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(IdentifierInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxRecommendedMemoryInGb != nil { + in, out := &in.MaxRecommendedMemoryInGb, &out.MaxRecommendedMemoryInGb + *out = new(float64) + **out = **in + } + if in.MaxRecommendedVcpuCount != nil { + in, out := &in.MaxRecommendedVcpuCount, &out.MaxRecommendedVcpuCount + *out = new(float64) + **out = **in + } + if in.MinRecommendedMemoryInGb != nil { + in, out := &in.MinRecommendedMemoryInGb, &out.MinRecommendedMemoryInGb + *out = new(float64) + **out = **in + } + if in.MinRecommendedVcpuCount != nil { + in, out := &in.MinRecommendedVcpuCount, &out.MinRecommendedVcpuCount + *out = new(float64) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PrivacyStatementURI != nil { + in, out := &in.PrivacyStatementURI, &out.PrivacyStatementURI + *out = new(string) + **out = **in + } + if in.PurchasePlan != nil { + in, out := &in.PurchasePlan, &out.PurchasePlan + *out = new(PurchasePlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReleaseNoteURI != nil { + in, out := &in.ReleaseNoteURI, &out.ReleaseNoteURI + *out = new(string) + **out = **in + } + if in.Specialized != nil { + in, out := &in.Specialized, &out.Specialized + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedLaunchEnabled != nil { + in, out := &in.TrustedLaunchEnabled, &out.TrustedLaunchEnabled + *out = new(bool) + **out = **in + } + if in.TrustedLaunchSupported != nil { + in, out := &in.TrustedLaunchSupported, &out.TrustedLaunchSupported + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageInitParameters. +func (in *SharedImageInitParameters) DeepCopy() *SharedImageInitParameters { + if in == nil { + return nil + } + out := new(SharedImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageList) DeepCopyInto(out *SharedImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SharedImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageList. +func (in *SharedImageList) DeepCopy() *SharedImageList { + if in == nil { + return nil + } + out := new(SharedImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageObservation) DeepCopyInto(out *SharedImageObservation) { + *out = *in + if in.AcceleratedNetworkSupportEnabled != nil { + in, out := &in.AcceleratedNetworkSupportEnabled, &out.AcceleratedNetworkSupportEnabled + *out = new(bool) + **out = **in + } + if in.Architecture != nil { + in, out := &in.Architecture, &out.Architecture + *out = new(string) + **out = **in + } + if in.ConfidentialVMEnabled != nil { + in, out := &in.ConfidentialVMEnabled, &out.ConfidentialVMEnabled + *out = new(bool) + **out = **in + } + if in.ConfidentialVMSupported != nil { + in, out := &in.ConfidentialVMSupported, &out.ConfidentialVMSupported + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskTypesNotAllowed != nil { + in, out := &in.DiskTypesNotAllowed, &out.DiskTypesNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndOfLifeDate != nil { + in, out := &in.EndOfLifeDate, &out.EndOfLifeDate + *out = new(string) + **out = **in + } + if in.Eula != nil { + in, out := &in.Eula, &out.Eula + *out = new(string) + **out = **in + } + if in.GalleryName != nil { + in, out := &in.GalleryName, &out.GalleryName + *out = new(string) + **out = **in + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(IdentifierObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxRecommendedMemoryInGb != nil { + in, out := &in.MaxRecommendedMemoryInGb, &out.MaxRecommendedMemoryInGb + *out = new(float64) + **out = **in + } + if in.MaxRecommendedVcpuCount != nil { + in, out := &in.MaxRecommendedVcpuCount, &out.MaxRecommendedVcpuCount + *out = new(float64) + **out = **in + } + if in.MinRecommendedMemoryInGb != nil { + in, out := &in.MinRecommendedMemoryInGb, &out.MinRecommendedMemoryInGb + *out = new(float64) + **out = **in + } + if in.MinRecommendedVcpuCount != nil { + in, out := &in.MinRecommendedVcpuCount, &out.MinRecommendedVcpuCount + *out = new(float64) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PrivacyStatementURI != nil { + in, out := &in.PrivacyStatementURI, &out.PrivacyStatementURI + *out = new(string) + **out = **in + } + if in.PurchasePlan != nil { + in, out := &in.PurchasePlan, &out.PurchasePlan + *out = new(PurchasePlanObservation) + (*in).DeepCopyInto(*out) + } + if in.ReleaseNoteURI != nil { + in, out := &in.ReleaseNoteURI, &out.ReleaseNoteURI + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Specialized != nil { + in, out := &in.Specialized, &out.Specialized + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedLaunchEnabled != nil { + in, out := &in.TrustedLaunchEnabled, &out.TrustedLaunchEnabled + *out = new(bool) + **out = **in + } + if in.TrustedLaunchSupported != nil { + in, out := &in.TrustedLaunchSupported, &out.TrustedLaunchSupported + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageObservation. +func (in *SharedImageObservation) DeepCopy() *SharedImageObservation { + if in == nil { + return nil + } + out := new(SharedImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageParameters) DeepCopyInto(out *SharedImageParameters) { + *out = *in + if in.AcceleratedNetworkSupportEnabled != nil { + in, out := &in.AcceleratedNetworkSupportEnabled, &out.AcceleratedNetworkSupportEnabled + *out = new(bool) + **out = **in + } + if in.Architecture != nil { + in, out := &in.Architecture, &out.Architecture + *out = new(string) + **out = **in + } + if in.ConfidentialVMEnabled != nil { + in, out := &in.ConfidentialVMEnabled, &out.ConfidentialVMEnabled + *out = new(bool) + **out = **in + } + if in.ConfidentialVMSupported != nil { + in, out := &in.ConfidentialVMSupported, &out.ConfidentialVMSupported + *out = new(bool) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiskTypesNotAllowed != nil { + in, out := &in.DiskTypesNotAllowed, &out.DiskTypesNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EndOfLifeDate != nil { + in, out := &in.EndOfLifeDate, &out.EndOfLifeDate + *out = new(string) + **out = **in + } + if in.Eula != nil { + in, out := &in.Eula, &out.Eula + *out = new(string) + **out = **in + } + if in.GalleryName != nil { + in, out := &in.GalleryName, &out.GalleryName + *out = new(string) + **out = **in + } + if in.GalleryNameRef != nil { + in, out := &in.GalleryNameRef, &out.GalleryNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.GalleryNameSelector != nil { + in, out := &in.GalleryNameSelector, &out.GalleryNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HyperVGeneration != nil { + in, out := &in.HyperVGeneration, &out.HyperVGeneration + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(IdentifierParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxRecommendedMemoryInGb != nil { + in, out := &in.MaxRecommendedMemoryInGb, &out.MaxRecommendedMemoryInGb + *out = new(float64) + **out = **in + } + if in.MaxRecommendedVcpuCount != nil { + in, out := &in.MaxRecommendedVcpuCount, &out.MaxRecommendedVcpuCount + *out = new(float64) + **out = **in + } + if in.MinRecommendedMemoryInGb != nil { + in, out := &in.MinRecommendedMemoryInGb, &out.MinRecommendedMemoryInGb + *out = new(float64) + **out = **in + } + if in.MinRecommendedVcpuCount != nil { + in, out := &in.MinRecommendedVcpuCount, &out.MinRecommendedVcpuCount + *out = new(float64) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PrivacyStatementURI != nil { + in, out := &in.PrivacyStatementURI, &out.PrivacyStatementURI + *out = new(string) + **out = **in + } + if in.PurchasePlan != nil { + in, out := &in.PurchasePlan, &out.PurchasePlan + *out = new(PurchasePlanParameters) + (*in).DeepCopyInto(*out) + } + if in.ReleaseNoteURI != nil { + in, out := &in.ReleaseNoteURI, &out.ReleaseNoteURI + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Specialized != nil { + in, out := &in.Specialized, &out.Specialized + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedLaunchEnabled != nil { + in, out := &in.TrustedLaunchEnabled, &out.TrustedLaunchEnabled + *out = new(bool) + **out = **in + } + if in.TrustedLaunchSupported != nil { + in, out := &in.TrustedLaunchSupported, &out.TrustedLaunchSupported + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageParameters. +func (in *SharedImageParameters) DeepCopy() *SharedImageParameters { + if in == nil { + return nil + } + out := new(SharedImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageSpec) DeepCopyInto(out *SharedImageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageSpec. +func (in *SharedImageSpec) DeepCopy() *SharedImageSpec { + if in == nil { + return nil + } + out := new(SharedImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedImageStatus) DeepCopyInto(out *SharedImageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedImageStatus. +func (in *SharedImageStatus) DeepCopy() *SharedImageStatus { + if in == nil { + return nil + } + out := new(SharedImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingInitParameters) DeepCopyInto(out *SharingInitParameters) { + *out = *in + if in.CommunityGallery != nil { + in, out := &in.CommunityGallery, &out.CommunityGallery + *out = new(CommunityGalleryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingInitParameters. +func (in *SharingInitParameters) DeepCopy() *SharingInitParameters { + if in == nil { + return nil + } + out := new(SharingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingObservation) DeepCopyInto(out *SharingObservation) { + *out = *in + if in.CommunityGallery != nil { + in, out := &in.CommunityGallery, &out.CommunityGallery + *out = new(CommunityGalleryObservation) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingObservation. +func (in *SharingObservation) DeepCopy() *SharingObservation { + if in == nil { + return nil + } + out := new(SharingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingParameters) DeepCopyInto(out *SharingParameters) { + *out = *in + if in.CommunityGallery != nil { + in, out := &in.CommunityGallery, &out.CommunityGallery + *out = new(CommunityGalleryParameters) + (*in).DeepCopyInto(*out) + } + if in.Permission != nil { + in, out := &in.Permission, &out.Permission + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingParameters. +func (in *SharingParameters) DeepCopy() *SharingParameters { + if in == nil { + return nil + } + out := new(SharingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Snapshot) DeepCopyInto(out *Snapshot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Snapshot. +func (in *Snapshot) DeepCopy() *Snapshot { + if in == nil { + return nil + } + out := new(Snapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Snapshot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotEncryptionSettingsInitParameters) DeepCopyInto(out *SnapshotEncryptionSettingsInitParameters) { + *out = *in + if in.DiskEncryptionKey != nil { + in, out := &in.DiskEncryptionKey, &out.DiskEncryptionKey + *out = new(EncryptionSettingsDiskEncryptionKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyEncryptionKey != nil { + in, out := &in.KeyEncryptionKey, &out.KeyEncryptionKey + *out = new(EncryptionSettingsKeyEncryptionKeyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotEncryptionSettingsInitParameters. +func (in *SnapshotEncryptionSettingsInitParameters) DeepCopy() *SnapshotEncryptionSettingsInitParameters { + if in == nil { + return nil + } + out := new(SnapshotEncryptionSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotEncryptionSettingsObservation) DeepCopyInto(out *SnapshotEncryptionSettingsObservation) { + *out = *in + if in.DiskEncryptionKey != nil { + in, out := &in.DiskEncryptionKey, &out.DiskEncryptionKey + *out = new(EncryptionSettingsDiskEncryptionKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyEncryptionKey != nil { + in, out := &in.KeyEncryptionKey, &out.KeyEncryptionKey + *out = new(EncryptionSettingsKeyEncryptionKeyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotEncryptionSettingsObservation. +func (in *SnapshotEncryptionSettingsObservation) DeepCopy() *SnapshotEncryptionSettingsObservation { + if in == nil { + return nil + } + out := new(SnapshotEncryptionSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotEncryptionSettingsParameters) DeepCopyInto(out *SnapshotEncryptionSettingsParameters) { + *out = *in + if in.DiskEncryptionKey != nil { + in, out := &in.DiskEncryptionKey, &out.DiskEncryptionKey + *out = new(EncryptionSettingsDiskEncryptionKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.KeyEncryptionKey != nil { + in, out := &in.KeyEncryptionKey, &out.KeyEncryptionKey + *out = new(EncryptionSettingsKeyEncryptionKeyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotEncryptionSettingsParameters. +func (in *SnapshotEncryptionSettingsParameters) DeepCopy() *SnapshotEncryptionSettingsParameters { + if in == nil { + return nil + } + out := new(SnapshotEncryptionSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotInitParameters) DeepCopyInto(out *SnapshotInitParameters) { + *out = *in + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.EncryptionSettings != nil { + in, out := &in.EncryptionSettings, &out.EncryptionSettings + *out = new(SnapshotEncryptionSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IncrementalEnabled != nil { + in, out := &in.IncrementalEnabled, &out.IncrementalEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.SourceResourceID != nil { + in, out := &in.SourceResourceID, &out.SourceResourceID + *out = new(string) + **out = **in + } + if in.SourceURI != nil { + in, out := &in.SourceURI, &out.SourceURI + *out = new(string) + **out = **in + } + if in.SourceURIRef != nil { + in, out := &in.SourceURIRef, &out.SourceURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceURISelector != nil { + in, out := &in.SourceURISelector, &out.SourceURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotInitParameters. +func (in *SnapshotInitParameters) DeepCopy() *SnapshotInitParameters { + if in == nil { + return nil + } + out := new(SnapshotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotList) DeepCopyInto(out *SnapshotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Snapshot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotList. +func (in *SnapshotList) DeepCopy() *SnapshotList { + if in == nil { + return nil + } + out := new(SnapshotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotObservation) DeepCopyInto(out *SnapshotObservation) { + *out = *in + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.EncryptionSettings != nil { + in, out := &in.EncryptionSettings, &out.EncryptionSettings + *out = new(SnapshotEncryptionSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IncrementalEnabled != nil { + in, out := &in.IncrementalEnabled, &out.IncrementalEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SourceResourceID != nil { + in, out := &in.SourceResourceID, &out.SourceResourceID + *out = new(string) + **out = **in + } + if in.SourceURI != nil { + in, out := &in.SourceURI, &out.SourceURI + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedLaunchEnabled != nil { + in, out := &in.TrustedLaunchEnabled, &out.TrustedLaunchEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotObservation. +func (in *SnapshotObservation) DeepCopy() *SnapshotObservation { + if in == nil { + return nil + } + out := new(SnapshotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotParameters) DeepCopyInto(out *SnapshotParameters) { + *out = *in + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.EncryptionSettings != nil { + in, out := &in.EncryptionSettings, &out.EncryptionSettings + *out = new(SnapshotEncryptionSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.IncrementalEnabled != nil { + in, out := &in.IncrementalEnabled, &out.IncrementalEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceResourceID != nil { + in, out := &in.SourceResourceID, &out.SourceResourceID + *out = new(string) + **out = **in + } + if in.SourceURI != nil { + in, out := &in.SourceURI, &out.SourceURI + *out = new(string) + **out = **in + } + if in.SourceURIRef != nil { + in, out := &in.SourceURIRef, &out.SourceURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceURISelector != nil { + in, out := &in.SourceURISelector, &out.SourceURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotParameters. +func (in *SnapshotParameters) DeepCopy() *SnapshotParameters { + if in == nil { + return nil + } + out := new(SnapshotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotSpec) DeepCopyInto(out *SnapshotSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSpec. +func (in *SnapshotSpec) DeepCopy() *SnapshotSpec { + if in == nil { + return nil + } + out := new(SnapshotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotStatus) DeepCopyInto(out *SnapshotStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotStatus. +func (in *SnapshotStatus) DeepCopy() *SnapshotStatus { + if in == nil { + return nil + } + out := new(SnapshotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceImageReferenceInitParameters) DeepCopyInto(out *SourceImageReferenceInitParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceImageReferenceInitParameters. +func (in *SourceImageReferenceInitParameters) DeepCopy() *SourceImageReferenceInitParameters { + if in == nil { + return nil + } + out := new(SourceImageReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceImageReferenceObservation) DeepCopyInto(out *SourceImageReferenceObservation) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceImageReferenceObservation. +func (in *SourceImageReferenceObservation) DeepCopy() *SourceImageReferenceObservation { + if in == nil { + return nil + } + out := new(SourceImageReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceImageReferenceParameters) DeepCopyInto(out *SourceImageReferenceParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceImageReferenceParameters. +func (in *SourceImageReferenceParameters) DeepCopy() *SourceImageReferenceParameters { + if in == nil { + return nil + } + out := new(SourceImageReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceInitParameters) DeepCopyInto(out *SourceInitParameters) { + *out = *in + if in.DefaultConfigurationLink != nil { + in, out := &in.DefaultConfigurationLink, &out.DefaultConfigurationLink + *out = new(string) + **out = **in + } + if in.MediaLink != nil { + in, out := &in.MediaLink, &out.MediaLink + *out = new(string) + **out = **in + } + if in.MediaLinkRef != nil { + in, out := &in.MediaLinkRef, &out.MediaLinkRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaLinkSelector != nil { + in, out := &in.MediaLinkSelector, &out.MediaLinkSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceInitParameters. +func (in *SourceInitParameters) DeepCopy() *SourceInitParameters { + if in == nil { + return nil + } + out := new(SourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceObservation) DeepCopyInto(out *SourceObservation) { + *out = *in + if in.DefaultConfigurationLink != nil { + in, out := &in.DefaultConfigurationLink, &out.DefaultConfigurationLink + *out = new(string) + **out = **in + } + if in.MediaLink != nil { + in, out := &in.MediaLink, &out.MediaLink + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceObservation. +func (in *SourceObservation) DeepCopy() *SourceObservation { + if in == nil { + return nil + } + out := new(SourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceParameters) DeepCopyInto(out *SourceParameters) { + *out = *in + if in.DefaultConfigurationLink != nil { + in, out := &in.DefaultConfigurationLink, &out.DefaultConfigurationLink + *out = new(string) + **out = **in + } + if in.MediaLink != nil { + in, out := &in.MediaLink, &out.MediaLink + *out = new(string) + **out = **in + } + if in.MediaLinkRef != nil { + in, out := &in.MediaLinkRef, &out.MediaLinkRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaLinkSelector != nil { + in, out := &in.MediaLinkSelector, &out.MediaLinkSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceParameters. +func (in *SourceParameters) DeepCopy() *SourceParameters { + if in == nil { + return nil + } + out := new(SourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotRestoreInitParameters) DeepCopyInto(out *SpotRestoreInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotRestoreInitParameters. +func (in *SpotRestoreInitParameters) DeepCopy() *SpotRestoreInitParameters { + if in == nil { + return nil + } + out := new(SpotRestoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotRestoreObservation) DeepCopyInto(out *SpotRestoreObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotRestoreObservation. +func (in *SpotRestoreObservation) DeepCopy() *SpotRestoreObservation { + if in == nil { + return nil + } + out := new(SpotRestoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotRestoreParameters) DeepCopyInto(out *SpotRestoreParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotRestoreParameters. +func (in *SpotRestoreParameters) DeepCopy() *SpotRestoreParameters { + if in == nil { + return nil + } + out := new(SpotRestoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetRegionInitParameters) DeepCopyInto(out *TargetRegionInitParameters) { + *out = *in + if in.ExcludeFromLatest != nil { + in, out := &in.ExcludeFromLatest, &out.ExcludeFromLatest + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RegionalReplicaCount != nil { + in, out := &in.RegionalReplicaCount, &out.RegionalReplicaCount + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetRegionInitParameters. +func (in *TargetRegionInitParameters) DeepCopy() *TargetRegionInitParameters { + if in == nil { + return nil + } + out := new(TargetRegionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetRegionObservation) DeepCopyInto(out *TargetRegionObservation) { + *out = *in + if in.ExcludeFromLatest != nil { + in, out := &in.ExcludeFromLatest, &out.ExcludeFromLatest + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegionalReplicaCount != nil { + in, out := &in.RegionalReplicaCount, &out.RegionalReplicaCount + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetRegionObservation. +func (in *TargetRegionObservation) DeepCopy() *TargetRegionObservation { + if in == nil { + return nil + } + out := new(TargetRegionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetRegionParameters) DeepCopyInto(out *TargetRegionParameters) { + *out = *in + if in.ExcludeFromLatest != nil { + in, out := &in.ExcludeFromLatest, &out.ExcludeFromLatest + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RegionalReplicaCount != nil { + in, out := &in.RegionalReplicaCount, &out.RegionalReplicaCount + *out = new(float64) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetRegionParameters. +func (in *TargetRegionParameters) DeepCopy() *TargetRegionParameters { + if in == nil { + return nil + } + out := new(TargetRegionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminateNotificationInitParameters) DeepCopyInto(out *TerminateNotificationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminateNotificationInitParameters. +func (in *TerminateNotificationInitParameters) DeepCopy() *TerminateNotificationInitParameters { + if in == nil { + return nil + } + out := new(TerminateNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminateNotificationObservation) DeepCopyInto(out *TerminateNotificationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminateNotificationObservation. +func (in *TerminateNotificationObservation) DeepCopy() *TerminateNotificationObservation { + if in == nil { + return nil + } + out := new(TerminateNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminateNotificationParameters) DeepCopyInto(out *TerminateNotificationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminateNotificationParameters. +func (in *TerminateNotificationParameters) DeepCopy() *TerminateNotificationParameters { + if in == nil { + return nil + } + out := new(TerminateNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminationNotificationInitParameters) DeepCopyInto(out *TerminationNotificationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminationNotificationInitParameters. +func (in *TerminationNotificationInitParameters) DeepCopy() *TerminationNotificationInitParameters { + if in == nil { + return nil + } + out := new(TerminationNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminationNotificationObservation) DeepCopyInto(out *TerminationNotificationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminationNotificationObservation. +func (in *TerminationNotificationObservation) DeepCopy() *TerminationNotificationObservation { + if in == nil { + return nil + } + out := new(TerminationNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TerminationNotificationParameters) DeepCopyInto(out *TerminationNotificationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminationNotificationParameters. +func (in *TerminationNotificationParameters) DeepCopy() *TerminationNotificationParameters { + if in == nil { + return nil + } + out := new(TerminationNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtension) DeepCopyInto(out *VirtualMachineExtension) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtension. +func (in *VirtualMachineExtension) DeepCopy() *VirtualMachineExtension { + if in == nil { + return nil + } + out := new(VirtualMachineExtension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineExtension) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionInitParameters) DeepCopyInto(out *VirtualMachineExtensionInitParameters) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.FailureSuppressionEnabled != nil { + in, out := &in.FailureSuppressionEnabled, &out.FailureSuppressionEnabled + *out = new(bool) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionInitParameters. +func (in *VirtualMachineExtensionInitParameters) DeepCopy() *VirtualMachineExtensionInitParameters { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionList) DeepCopyInto(out *VirtualMachineExtensionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachineExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionList. +func (in *VirtualMachineExtensionList) DeepCopy() *VirtualMachineExtensionList { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineExtensionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionObservation) DeepCopyInto(out *VirtualMachineExtensionObservation) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.FailureSuppressionEnabled != nil { + in, out := &in.FailureSuppressionEnabled, &out.FailureSuppressionEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation) + (*in).DeepCopyInto(*out) + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionObservation. +func (in *VirtualMachineExtensionObservation) DeepCopy() *VirtualMachineExtensionObservation { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionParameters) DeepCopyInto(out *VirtualMachineExtensionParameters) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.FailureSuppressionEnabled != nil { + in, out := &in.FailureSuppressionEnabled, &out.FailureSuppressionEnabled + *out = new(bool) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters) + (*in).DeepCopyInto(*out) + } + if in.ProtectedSettingsSecretRef != nil { + in, out := &in.ProtectedSettingsSecretRef, &out.ProtectedSettingsSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } + if in.VirtualMachineIDRef != nil { + in, out := &in.VirtualMachineIDRef, &out.VirtualMachineIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualMachineIDSelector != nil { + in, out := &in.VirtualMachineIDSelector, &out.VirtualMachineIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionParameters. +func (in *VirtualMachineExtensionParameters) DeepCopy() *VirtualMachineExtensionParameters { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters) DeepCopyInto(out *VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters. +func (in *VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters) DeepCopy() *VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation) DeepCopyInto(out *VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation. +func (in *VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation) DeepCopy() *VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters) DeepCopyInto(out *VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters. +func (in *VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters) DeepCopy() *VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionSpec) DeepCopyInto(out *VirtualMachineExtensionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionSpec. +func (in *VirtualMachineExtensionSpec) DeepCopy() *VirtualMachineExtensionSpec { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineExtensionStatus) DeepCopyInto(out *VirtualMachineExtensionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineExtensionStatus. +func (in *VirtualMachineExtensionStatus) DeepCopy() *VirtualMachineExtensionStatus { + if in == nil { + return nil + } + out := new(VirtualMachineExtensionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommand) DeepCopyInto(out *VirtualMachineRunCommand) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommand. +func (in *VirtualMachineRunCommand) DeepCopy() *VirtualMachineRunCommand { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommand) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineRunCommand) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandInitParameters) DeepCopyInto(out *VirtualMachineRunCommandInitParameters) { + *out = *in + if in.ErrorBlobManagedIdentity != nil { + in, out := &in.ErrorBlobManagedIdentity, &out.ErrorBlobManagedIdentity + *out = new(ErrorBlobManagedIdentityInitParameters) + **out = **in + } + if in.ErrorBlobURI != nil { + in, out := &in.ErrorBlobURI, &out.ErrorBlobURI + *out = new(string) + **out = **in + } + if in.ErrorBlobURIRef != nil { + in, out := &in.ErrorBlobURIRef, &out.ErrorBlobURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ErrorBlobURISelector != nil { + in, out := &in.ErrorBlobURISelector, &out.ErrorBlobURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OutputBlobManagedIdentity != nil { + in, out := &in.OutputBlobManagedIdentity, &out.OutputBlobManagedIdentity + *out = new(OutputBlobManagedIdentityInitParameters) + **out = **in + } + if in.OutputBlobURI != nil { + in, out := &in.OutputBlobURI, &out.OutputBlobURI + *out = new(string) + **out = **in + } + if in.OutputBlobURIRef != nil { + in, out := &in.OutputBlobURIRef, &out.OutputBlobURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OutputBlobURISelector != nil { + in, out := &in.OutputBlobURISelector, &out.OutputBlobURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProtectedParameter != nil { + in, out := &in.ProtectedParameter, &out.ProtectedParameter + *out = make([]ProtectedParameterInitParameters, len(*in)) + copy(*out, *in) + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(VirtualMachineRunCommandSourceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandInitParameters. +func (in *VirtualMachineRunCommandInitParameters) DeepCopy() *VirtualMachineRunCommandInitParameters { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandList) DeepCopyInto(out *VirtualMachineRunCommandList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachineRunCommand, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandList. +func (in *VirtualMachineRunCommandList) DeepCopy() *VirtualMachineRunCommandList { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineRunCommandList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandObservation) DeepCopyInto(out *VirtualMachineRunCommandObservation) { + *out = *in + if in.ErrorBlobManagedIdentity != nil { + in, out := &in.ErrorBlobManagedIdentity, &out.ErrorBlobManagedIdentity + *out = new(ErrorBlobManagedIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.ErrorBlobURI != nil { + in, out := &in.ErrorBlobURI, &out.ErrorBlobURI + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstanceView != nil { + in, out := &in.InstanceView, &out.InstanceView + *out = make([]InstanceViewObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OutputBlobManagedIdentity != nil { + in, out := &in.OutputBlobManagedIdentity, &out.OutputBlobManagedIdentity + *out = new(OutputBlobManagedIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputBlobURI != nil { + in, out := &in.OutputBlobURI, &out.OutputBlobURI + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProtectedParameter != nil { + in, out := &in.ProtectedParameter, &out.ProtectedParameter + *out = make([]ProtectedParameterParameters, len(*in)) + copy(*out, *in) + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(VirtualMachineRunCommandSourceObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandObservation. +func (in *VirtualMachineRunCommandObservation) DeepCopy() *VirtualMachineRunCommandObservation { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandParameters) DeepCopyInto(out *VirtualMachineRunCommandParameters) { + *out = *in + if in.ErrorBlobManagedIdentity != nil { + in, out := &in.ErrorBlobManagedIdentity, &out.ErrorBlobManagedIdentity + *out = new(ErrorBlobManagedIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.ErrorBlobURI != nil { + in, out := &in.ErrorBlobURI, &out.ErrorBlobURI + *out = new(string) + **out = **in + } + if in.ErrorBlobURIRef != nil { + in, out := &in.ErrorBlobURIRef, &out.ErrorBlobURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ErrorBlobURISelector != nil { + in, out := &in.ErrorBlobURISelector, &out.ErrorBlobURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OutputBlobManagedIdentity != nil { + in, out := &in.OutputBlobManagedIdentity, &out.OutputBlobManagedIdentity + *out = new(OutputBlobManagedIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputBlobURI != nil { + in, out := &in.OutputBlobURI, &out.OutputBlobURI + *out = new(string) + **out = **in + } + if in.OutputBlobURIRef != nil { + in, out := &in.OutputBlobURIRef, &out.OutputBlobURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.OutputBlobURISelector != nil { + in, out := &in.OutputBlobURISelector, &out.OutputBlobURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProtectedParameter != nil { + in, out := &in.ProtectedParameter, &out.ProtectedParameter + *out = make([]ProtectedParameterParameters, len(*in)) + copy(*out, *in) + } + if in.RunAsPasswordSecretRef != nil { + in, out := &in.RunAsPasswordSecretRef, &out.RunAsPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(VirtualMachineRunCommandSourceParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } + if in.VirtualMachineIDRef != nil { + in, out := &in.VirtualMachineIDRef, &out.VirtualMachineIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualMachineIDSelector != nil { + in, out := &in.VirtualMachineIDSelector, &out.VirtualMachineIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandParameters. +func (in *VirtualMachineRunCommandParameters) DeepCopy() *VirtualMachineRunCommandParameters { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandSourceInitParameters) DeepCopyInto(out *VirtualMachineRunCommandSourceInitParameters) { + *out = *in + if in.CommandID != nil { + in, out := &in.CommandID, &out.CommandID + *out = new(string) + **out = **in + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.ScriptURI != nil { + in, out := &in.ScriptURI, &out.ScriptURI + *out = new(string) + **out = **in + } + if in.ScriptURIManagedIdentity != nil { + in, out := &in.ScriptURIManagedIdentity, &out.ScriptURIManagedIdentity + *out = new(ScriptURIManagedIdentityInitParameters) + **out = **in + } + if in.ScriptURIRef != nil { + in, out := &in.ScriptURIRef, &out.ScriptURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ScriptURISelector != nil { + in, out := &in.ScriptURISelector, &out.ScriptURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandSourceInitParameters. +func (in *VirtualMachineRunCommandSourceInitParameters) DeepCopy() *VirtualMachineRunCommandSourceInitParameters { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandSourceObservation) DeepCopyInto(out *VirtualMachineRunCommandSourceObservation) { + *out = *in + if in.CommandID != nil { + in, out := &in.CommandID, &out.CommandID + *out = new(string) + **out = **in + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.ScriptURI != nil { + in, out := &in.ScriptURI, &out.ScriptURI + *out = new(string) + **out = **in + } + if in.ScriptURIManagedIdentity != nil { + in, out := &in.ScriptURIManagedIdentity, &out.ScriptURIManagedIdentity + *out = new(ScriptURIManagedIdentityParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandSourceObservation. +func (in *VirtualMachineRunCommandSourceObservation) DeepCopy() *VirtualMachineRunCommandSourceObservation { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandSourceParameters) DeepCopyInto(out *VirtualMachineRunCommandSourceParameters) { + *out = *in + if in.CommandID != nil { + in, out := &in.CommandID, &out.CommandID + *out = new(string) + **out = **in + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.ScriptURI != nil { + in, out := &in.ScriptURI, &out.ScriptURI + *out = new(string) + **out = **in + } + if in.ScriptURIManagedIdentity != nil { + in, out := &in.ScriptURIManagedIdentity, &out.ScriptURIManagedIdentity + *out = new(ScriptURIManagedIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.ScriptURIRef != nil { + in, out := &in.ScriptURIRef, &out.ScriptURIRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ScriptURISelector != nil { + in, out := &in.ScriptURISelector, &out.ScriptURISelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandSourceParameters. +func (in *VirtualMachineRunCommandSourceParameters) DeepCopy() *VirtualMachineRunCommandSourceParameters { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandSpec) DeepCopyInto(out *VirtualMachineRunCommandSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandSpec. +func (in *VirtualMachineRunCommandSpec) DeepCopy() *VirtualMachineRunCommandSpec { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineRunCommandStatus) DeepCopyInto(out *VirtualMachineRunCommandStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineRunCommandStatus. +func (in *VirtualMachineRunCommandStatus) DeepCopy() *VirtualMachineRunCommandStatus { + if in == nil { + return nil + } + out := new(VirtualMachineRunCommandStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationInitParameters) DeepCopyInto(out *WindowsConfigurationInitParameters) { + *out = *in + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]AdditionalUnattendContentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.HotpatchingEnabled != nil { + in, out := &in.HotpatchingEnabled, &out.HotpatchingEnabled + *out = new(bool) + **out = **in + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsConfigurationSecretInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WinrmListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationInitParameters. +func (in *WindowsConfigurationInitParameters) DeepCopy() *WindowsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(WindowsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationObservation) DeepCopyInto(out *WindowsConfigurationObservation) { + *out = *in + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]AdditionalUnattendContentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.HotpatchingEnabled != nil { + in, out := &in.HotpatchingEnabled, &out.HotpatchingEnabled + *out = new(bool) + **out = **in + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsConfigurationSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WinrmListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationObservation. +func (in *WindowsConfigurationObservation) DeepCopy() *WindowsConfigurationObservation { + if in == nil { + return nil + } + out := new(WindowsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationParameters) DeepCopyInto(out *WindowsConfigurationParameters) { + *out = *in + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]AdditionalUnattendContentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.AdminPasswordSecretRef = in.AdminPasswordSecretRef + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.HotpatchingEnabled != nil { + in, out := &in.HotpatchingEnabled, &out.HotpatchingEnabled + *out = new(bool) + **out = **in + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsConfigurationSecretParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WinrmListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationParameters. +func (in *WindowsConfigurationParameters) DeepCopy() *WindowsConfigurationParameters { + if in == nil { + return nil + } + out := new(WindowsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationSecretCertificateInitParameters) DeepCopyInto(out *WindowsConfigurationSecretCertificateInitParameters) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationSecretCertificateInitParameters. +func (in *WindowsConfigurationSecretCertificateInitParameters) DeepCopy() *WindowsConfigurationSecretCertificateInitParameters { + if in == nil { + return nil + } + out := new(WindowsConfigurationSecretCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationSecretCertificateObservation) DeepCopyInto(out *WindowsConfigurationSecretCertificateObservation) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationSecretCertificateObservation. +func (in *WindowsConfigurationSecretCertificateObservation) DeepCopy() *WindowsConfigurationSecretCertificateObservation { + if in == nil { + return nil + } + out := new(WindowsConfigurationSecretCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationSecretCertificateParameters) DeepCopyInto(out *WindowsConfigurationSecretCertificateParameters) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationSecretCertificateParameters. +func (in *WindowsConfigurationSecretCertificateParameters) DeepCopy() *WindowsConfigurationSecretCertificateParameters { + if in == nil { + return nil + } + out := new(WindowsConfigurationSecretCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationSecretInitParameters) DeepCopyInto(out *WindowsConfigurationSecretInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsConfigurationSecretCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationSecretInitParameters. +func (in *WindowsConfigurationSecretInitParameters) DeepCopy() *WindowsConfigurationSecretInitParameters { + if in == nil { + return nil + } + out := new(WindowsConfigurationSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationSecretObservation) DeepCopyInto(out *WindowsConfigurationSecretObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsConfigurationSecretCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationSecretObservation. +func (in *WindowsConfigurationSecretObservation) DeepCopy() *WindowsConfigurationSecretObservation { + if in == nil { + return nil + } + out := new(WindowsConfigurationSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsConfigurationSecretParameters) DeepCopyInto(out *WindowsConfigurationSecretParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsConfigurationSecretCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsConfigurationSecretParameters. +func (in *WindowsConfigurationSecretParameters) DeepCopy() *WindowsConfigurationSecretParameters { + if in == nil { + return nil + } + out := new(WindowsConfigurationSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachine) DeepCopyInto(out *WindowsVirtualMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachine. +func (in *WindowsVirtualMachine) DeepCopy() *WindowsVirtualMachine { + if in == nil { + return nil + } + out := new(WindowsVirtualMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsVirtualMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineAdditionalCapabilitiesInitParameters) DeepCopyInto(out *WindowsVirtualMachineAdditionalCapabilitiesInitParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineAdditionalCapabilitiesInitParameters. +func (in *WindowsVirtualMachineAdditionalCapabilitiesInitParameters) DeepCopy() *WindowsVirtualMachineAdditionalCapabilitiesInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineAdditionalCapabilitiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineAdditionalCapabilitiesObservation) DeepCopyInto(out *WindowsVirtualMachineAdditionalCapabilitiesObservation) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineAdditionalCapabilitiesObservation. +func (in *WindowsVirtualMachineAdditionalCapabilitiesObservation) DeepCopy() *WindowsVirtualMachineAdditionalCapabilitiesObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineAdditionalCapabilitiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineAdditionalCapabilitiesParameters) DeepCopyInto(out *WindowsVirtualMachineAdditionalCapabilitiesParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineAdditionalCapabilitiesParameters. +func (in *WindowsVirtualMachineAdditionalCapabilitiesParameters) DeepCopy() *WindowsVirtualMachineAdditionalCapabilitiesParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineAdditionalCapabilitiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineAdditionalUnattendContentInitParameters) DeepCopyInto(out *WindowsVirtualMachineAdditionalUnattendContentInitParameters) { + *out = *in + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineAdditionalUnattendContentInitParameters. +func (in *WindowsVirtualMachineAdditionalUnattendContentInitParameters) DeepCopy() *WindowsVirtualMachineAdditionalUnattendContentInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineAdditionalUnattendContentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineAdditionalUnattendContentObservation) DeepCopyInto(out *WindowsVirtualMachineAdditionalUnattendContentObservation) { + *out = *in + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineAdditionalUnattendContentObservation. +func (in *WindowsVirtualMachineAdditionalUnattendContentObservation) DeepCopy() *WindowsVirtualMachineAdditionalUnattendContentObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineAdditionalUnattendContentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineAdditionalUnattendContentParameters) DeepCopyInto(out *WindowsVirtualMachineAdditionalUnattendContentParameters) { + *out = *in + out.ContentSecretRef = in.ContentSecretRef + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineAdditionalUnattendContentParameters. +func (in *WindowsVirtualMachineAdditionalUnattendContentParameters) DeepCopy() *WindowsVirtualMachineAdditionalUnattendContentParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineAdditionalUnattendContentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineBootDiagnosticsInitParameters) DeepCopyInto(out *WindowsVirtualMachineBootDiagnosticsInitParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineBootDiagnosticsInitParameters. +func (in *WindowsVirtualMachineBootDiagnosticsInitParameters) DeepCopy() *WindowsVirtualMachineBootDiagnosticsInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineBootDiagnosticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineBootDiagnosticsObservation) DeepCopyInto(out *WindowsVirtualMachineBootDiagnosticsObservation) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineBootDiagnosticsObservation. +func (in *WindowsVirtualMachineBootDiagnosticsObservation) DeepCopy() *WindowsVirtualMachineBootDiagnosticsObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineBootDiagnosticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineBootDiagnosticsParameters) DeepCopyInto(out *WindowsVirtualMachineBootDiagnosticsParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineBootDiagnosticsParameters. +func (in *WindowsVirtualMachineBootDiagnosticsParameters) DeepCopy() *WindowsVirtualMachineBootDiagnosticsParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineBootDiagnosticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineGalleryApplicationInitParameters) DeepCopyInto(out *WindowsVirtualMachineGalleryApplicationInitParameters) { + *out = *in + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.TreatFailureAsDeploymentFailureEnabled != nil { + in, out := &in.TreatFailureAsDeploymentFailureEnabled, &out.TreatFailureAsDeploymentFailureEnabled + *out = new(bool) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineGalleryApplicationInitParameters. +func (in *WindowsVirtualMachineGalleryApplicationInitParameters) DeepCopy() *WindowsVirtualMachineGalleryApplicationInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineGalleryApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineGalleryApplicationObservation) DeepCopyInto(out *WindowsVirtualMachineGalleryApplicationObservation) { + *out = *in + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.TreatFailureAsDeploymentFailureEnabled != nil { + in, out := &in.TreatFailureAsDeploymentFailureEnabled, &out.TreatFailureAsDeploymentFailureEnabled + *out = new(bool) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineGalleryApplicationObservation. +func (in *WindowsVirtualMachineGalleryApplicationObservation) DeepCopy() *WindowsVirtualMachineGalleryApplicationObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineGalleryApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineGalleryApplicationParameters) DeepCopyInto(out *WindowsVirtualMachineGalleryApplicationParameters) { + *out = *in + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.TreatFailureAsDeploymentFailureEnabled != nil { + in, out := &in.TreatFailureAsDeploymentFailureEnabled, &out.TreatFailureAsDeploymentFailureEnabled + *out = new(bool) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineGalleryApplicationParameters. +func (in *WindowsVirtualMachineGalleryApplicationParameters) DeepCopy() *WindowsVirtualMachineGalleryApplicationParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineGalleryApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineIdentityInitParameters) DeepCopyInto(out *WindowsVirtualMachineIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineIdentityInitParameters. +func (in *WindowsVirtualMachineIdentityInitParameters) DeepCopy() *WindowsVirtualMachineIdentityInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineIdentityObservation) DeepCopyInto(out *WindowsVirtualMachineIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineIdentityObservation. +func (in *WindowsVirtualMachineIdentityObservation) DeepCopy() *WindowsVirtualMachineIdentityObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineIdentityParameters) DeepCopyInto(out *WindowsVirtualMachineIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineIdentityParameters. +func (in *WindowsVirtualMachineIdentityParameters) DeepCopy() *WindowsVirtualMachineIdentityParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineInitParameters) DeepCopyInto(out *WindowsVirtualMachineInitParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(WindowsVirtualMachineAdditionalCapabilitiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]WindowsVirtualMachineAdditionalUnattendContentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AllowExtensionOperations != nil { + in, out := &in.AllowExtensionOperations, &out.AllowExtensionOperations + *out = new(bool) + **out = **in + } + if in.AvailabilitySetID != nil { + in, out := &in.AvailabilitySetID, &out.AvailabilitySetID + *out = new(string) + **out = **in + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(WindowsVirtualMachineBootDiagnosticsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BypassPlatformSafetyChecksOnUserScheduleEnabled != nil { + in, out := &in.BypassPlatformSafetyChecksOnUserScheduleEnabled, &out.BypassPlatformSafetyChecksOnUserScheduleEnabled + *out = new(bool) + **out = **in + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerName != nil { + in, out := &in.ComputerName, &out.ComputerName + *out = new(string) + **out = **in + } + if in.DedicatedHostGroupID != nil { + in, out := &in.DedicatedHostGroupID, &out.DedicatedHostGroupID + *out = new(string) + **out = **in + } + if in.DedicatedHostID != nil { + in, out := &in.DedicatedHostID, &out.DedicatedHostID + *out = new(string) + **out = **in + } + if in.DiskControllerType != nil { + in, out := &in.DiskControllerType, &out.DiskControllerType + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]WindowsVirtualMachineGalleryApplicationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HotpatchingEnabled != nil { + in, out := &in.HotpatchingEnabled, &out.HotpatchingEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsVirtualMachineIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkInterfaceIdsRefs != nil { + in, out := &in.NetworkInterfaceIdsRefs, &out.NetworkInterfaceIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaceIdsSelector != nil { + in, out := &in.NetworkInterfaceIdsSelector, &out.NetworkInterfaceIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(WindowsVirtualMachineOsDiskInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OsImageNotification != nil { + in, out := &in.OsImageNotification, &out.OsImageNotification + *out = new(WindowsVirtualMachineOsImageNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(WindowsVirtualMachinePlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomain != nil { + in, out := &in.PlatformFaultDomain, &out.PlatformFaultDomain + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.RebootSetting != nil { + in, out := &in.RebootSetting, &out.RebootSetting + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsVirtualMachineSecretInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(WindowsVirtualMachineSourceImageReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(WindowsVirtualMachineTerminationNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VMAgentPlatformUpdatesEnabled != nil { + in, out := &in.VMAgentPlatformUpdatesEnabled, &out.VMAgentPlatformUpdatesEnabled + *out = new(bool) + **out = **in + } + if in.VirtualMachineScaleSetID != nil { + in, out := &in.VirtualMachineScaleSetID, &out.VirtualMachineScaleSetID + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WindowsVirtualMachineWinrmListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineInitParameters. +func (in *WindowsVirtualMachineInitParameters) DeepCopy() *WindowsVirtualMachineInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineList) DeepCopyInto(out *WindowsVirtualMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WindowsVirtualMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineList. +func (in *WindowsVirtualMachineList) DeepCopy() *WindowsVirtualMachineList { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsVirtualMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineObservation) DeepCopyInto(out *WindowsVirtualMachineObservation) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(WindowsVirtualMachineAdditionalCapabilitiesObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]WindowsVirtualMachineAdditionalUnattendContentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AllowExtensionOperations != nil { + in, out := &in.AllowExtensionOperations, &out.AllowExtensionOperations + *out = new(bool) + **out = **in + } + if in.AvailabilitySetID != nil { + in, out := &in.AvailabilitySetID, &out.AvailabilitySetID + *out = new(string) + **out = **in + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(WindowsVirtualMachineBootDiagnosticsObservation) + (*in).DeepCopyInto(*out) + } + if in.BypassPlatformSafetyChecksOnUserScheduleEnabled != nil { + in, out := &in.BypassPlatformSafetyChecksOnUserScheduleEnabled, &out.BypassPlatformSafetyChecksOnUserScheduleEnabled + *out = new(bool) + **out = **in + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerName != nil { + in, out := &in.ComputerName, &out.ComputerName + *out = new(string) + **out = **in + } + if in.DedicatedHostGroupID != nil { + in, out := &in.DedicatedHostGroupID, &out.DedicatedHostGroupID + *out = new(string) + **out = **in + } + if in.DedicatedHostID != nil { + in, out := &in.DedicatedHostID, &out.DedicatedHostID + *out = new(string) + **out = **in + } + if in.DiskControllerType != nil { + in, out := &in.DiskControllerType, &out.DiskControllerType + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]WindowsVirtualMachineGalleryApplicationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HotpatchingEnabled != nil { + in, out := &in.HotpatchingEnabled, &out.HotpatchingEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsVirtualMachineIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(WindowsVirtualMachineOsDiskObservation) + (*in).DeepCopyInto(*out) + } + if in.OsImageNotification != nil { + in, out := &in.OsImageNotification, &out.OsImageNotification + *out = new(WindowsVirtualMachineOsImageNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(WindowsVirtualMachinePlanObservation) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomain != nil { + in, out := &in.PlatformFaultDomain, &out.PlatformFaultDomain + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPAddresses != nil { + in, out := &in.PrivateIPAddresses, &out.PrivateIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = new(string) + **out = **in + } + if in.PublicIPAddresses != nil { + in, out := &in.PublicIPAddresses, &out.PublicIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RebootSetting != nil { + in, out := &in.RebootSetting, &out.RebootSetting + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsVirtualMachineSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(WindowsVirtualMachineSourceImageReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(WindowsVirtualMachineTerminationNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VMAgentPlatformUpdatesEnabled != nil { + in, out := &in.VMAgentPlatformUpdatesEnabled, &out.VMAgentPlatformUpdatesEnabled + *out = new(bool) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } + if in.VirtualMachineScaleSetID != nil { + in, out := &in.VirtualMachineScaleSetID, &out.VirtualMachineScaleSetID + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WindowsVirtualMachineWinrmListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineObservation. +func (in *WindowsVirtualMachineObservation) DeepCopy() *WindowsVirtualMachineObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters) DeepCopyInto(out *WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters. +func (in *WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters) DeepCopy() *WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsDiskDiffDiskSettingsObservation) DeepCopyInto(out *WindowsVirtualMachineOsDiskDiffDiskSettingsObservation) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsDiskDiffDiskSettingsObservation. +func (in *WindowsVirtualMachineOsDiskDiffDiskSettingsObservation) DeepCopy() *WindowsVirtualMachineOsDiskDiffDiskSettingsObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsDiskDiffDiskSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsDiskDiffDiskSettingsParameters) DeepCopyInto(out *WindowsVirtualMachineOsDiskDiffDiskSettingsParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsDiskDiffDiskSettingsParameters. +func (in *WindowsVirtualMachineOsDiskDiffDiskSettingsParameters) DeepCopy() *WindowsVirtualMachineOsDiskDiffDiskSettingsParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsDiskDiffDiskSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsDiskInitParameters) DeepCopyInto(out *WindowsVirtualMachineOsDiskInitParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsDiskInitParameters. +func (in *WindowsVirtualMachineOsDiskInitParameters) DeepCopy() *WindowsVirtualMachineOsDiskInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsDiskObservation) DeepCopyInto(out *WindowsVirtualMachineOsDiskObservation) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(WindowsVirtualMachineOsDiskDiffDiskSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsDiskObservation. +func (in *WindowsVirtualMachineOsDiskObservation) DeepCopy() *WindowsVirtualMachineOsDiskObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsDiskParameters) DeepCopyInto(out *WindowsVirtualMachineOsDiskParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(WindowsVirtualMachineOsDiskDiffDiskSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsDiskParameters. +func (in *WindowsVirtualMachineOsDiskParameters) DeepCopy() *WindowsVirtualMachineOsDiskParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsImageNotificationInitParameters) DeepCopyInto(out *WindowsVirtualMachineOsImageNotificationInitParameters) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsImageNotificationInitParameters. +func (in *WindowsVirtualMachineOsImageNotificationInitParameters) DeepCopy() *WindowsVirtualMachineOsImageNotificationInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsImageNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsImageNotificationObservation) DeepCopyInto(out *WindowsVirtualMachineOsImageNotificationObservation) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsImageNotificationObservation. +func (in *WindowsVirtualMachineOsImageNotificationObservation) DeepCopy() *WindowsVirtualMachineOsImageNotificationObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsImageNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineOsImageNotificationParameters) DeepCopyInto(out *WindowsVirtualMachineOsImageNotificationParameters) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineOsImageNotificationParameters. +func (in *WindowsVirtualMachineOsImageNotificationParameters) DeepCopy() *WindowsVirtualMachineOsImageNotificationParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineOsImageNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineParameters) DeepCopyInto(out *WindowsVirtualMachineParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(WindowsVirtualMachineAdditionalCapabilitiesParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]WindowsVirtualMachineAdditionalUnattendContentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.AdminPasswordSecretRef = in.AdminPasswordSecretRef + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AllowExtensionOperations != nil { + in, out := &in.AllowExtensionOperations, &out.AllowExtensionOperations + *out = new(bool) + **out = **in + } + if in.AvailabilitySetID != nil { + in, out := &in.AvailabilitySetID, &out.AvailabilitySetID + *out = new(string) + **out = **in + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(WindowsVirtualMachineBootDiagnosticsParameters) + (*in).DeepCopyInto(*out) + } + if in.BypassPlatformSafetyChecksOnUserScheduleEnabled != nil { + in, out := &in.BypassPlatformSafetyChecksOnUserScheduleEnabled, &out.BypassPlatformSafetyChecksOnUserScheduleEnabled + *out = new(bool) + **out = **in + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerName != nil { + in, out := &in.ComputerName, &out.ComputerName + *out = new(string) + **out = **in + } + if in.CustomDataSecretRef != nil { + in, out := &in.CustomDataSecretRef, &out.CustomDataSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DedicatedHostGroupID != nil { + in, out := &in.DedicatedHostGroupID, &out.DedicatedHostGroupID + *out = new(string) + **out = **in + } + if in.DedicatedHostID != nil { + in, out := &in.DedicatedHostID, &out.DedicatedHostID + *out = new(string) + **out = **in + } + if in.DiskControllerType != nil { + in, out := &in.DiskControllerType, &out.DiskControllerType + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]WindowsVirtualMachineGalleryApplicationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HotpatchingEnabled != nil { + in, out := &in.HotpatchingEnabled, &out.HotpatchingEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsVirtualMachineIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterfaceIds != nil { + in, out := &in.NetworkInterfaceIds, &out.NetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkInterfaceIdsRefs != nil { + in, out := &in.NetworkInterfaceIdsRefs, &out.NetworkInterfaceIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaceIdsSelector != nil { + in, out := &in.NetworkInterfaceIdsSelector, &out.NetworkInterfaceIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(WindowsVirtualMachineOsDiskParameters) + (*in).DeepCopyInto(*out) + } + if in.OsImageNotification != nil { + in, out := &in.OsImageNotification, &out.OsImageNotification + *out = new(WindowsVirtualMachineOsImageNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.PatchAssessmentMode != nil { + in, out := &in.PatchAssessmentMode, &out.PatchAssessmentMode + *out = new(string) + **out = **in + } + if in.PatchMode != nil { + in, out := &in.PatchMode, &out.PatchMode + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(WindowsVirtualMachinePlanParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomain != nil { + in, out := &in.PlatformFaultDomain, &out.PlatformFaultDomain + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.RebootSetting != nil { + in, out := &in.RebootSetting, &out.RebootSetting + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsVirtualMachineSecretParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(WindowsVirtualMachineSourceImageReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(WindowsVirtualMachineTerminationNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VMAgentPlatformUpdatesEnabled != nil { + in, out := &in.VMAgentPlatformUpdatesEnabled, &out.VMAgentPlatformUpdatesEnabled + *out = new(bool) + **out = **in + } + if in.VirtualMachineScaleSetID != nil { + in, out := &in.VirtualMachineScaleSetID, &out.VirtualMachineScaleSetID + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WindowsVirtualMachineWinrmListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineParameters. +func (in *WindowsVirtualMachineParameters) DeepCopy() *WindowsVirtualMachineParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachinePlanInitParameters) DeepCopyInto(out *WindowsVirtualMachinePlanInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachinePlanInitParameters. +func (in *WindowsVirtualMachinePlanInitParameters) DeepCopy() *WindowsVirtualMachinePlanInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachinePlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachinePlanObservation) DeepCopyInto(out *WindowsVirtualMachinePlanObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachinePlanObservation. +func (in *WindowsVirtualMachinePlanObservation) DeepCopy() *WindowsVirtualMachinePlanObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachinePlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachinePlanParameters) DeepCopyInto(out *WindowsVirtualMachinePlanParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachinePlanParameters. +func (in *WindowsVirtualMachinePlanParameters) DeepCopy() *WindowsVirtualMachinePlanParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachinePlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSet) DeepCopyInto(out *WindowsVirtualMachineScaleSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSet. +func (in *WindowsVirtualMachineScaleSet) DeepCopy() *WindowsVirtualMachineScaleSet { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsVirtualMachineScaleSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters. +func (in *WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation. +func (in *WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation) DeepCopy() *WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters) { + *out = *in + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters. +func (in *WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters) DeepCopy() *WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters) { + *out = *in + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters. +func (in *WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation) { + *out = *in + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation. +func (in *WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation) DeepCopy() *WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters) { + *out = *in + out.ContentSecretRef = in.ContentSecretRef + if in.Setting != nil { + in, out := &in.Setting, &out.Setting + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters. +func (in *WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters) DeepCopy() *WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters. +func (in *WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation. +func (in *WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation) DeepCopy() *WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters. +func (in *WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters) DeepCopy() *WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters) { + *out = *in + if in.DisableAutomaticRollback != nil { + in, out := &in.DisableAutomaticRollback, &out.DisableAutomaticRollback + *out = new(bool) + **out = **in + } + if in.EnableAutomaticOsUpgrade != nil { + in, out := &in.EnableAutomaticOsUpgrade, &out.EnableAutomaticOsUpgrade + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters. +func (in *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation) { + *out = *in + if in.DisableAutomaticRollback != nil { + in, out := &in.DisableAutomaticRollback, &out.DisableAutomaticRollback + *out = new(bool) + **out = **in + } + if in.EnableAutomaticOsUpgrade != nil { + in, out := &in.EnableAutomaticOsUpgrade, &out.EnableAutomaticOsUpgrade + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation. +func (in *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation) DeepCopy() *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters) { + *out = *in + if in.DisableAutomaticRollback != nil { + in, out := &in.DisableAutomaticRollback, &out.DisableAutomaticRollback + *out = new(bool) + **out = **in + } + if in.EnableAutomaticOsUpgrade != nil { + in, out := &in.EnableAutomaticOsUpgrade, &out.EnableAutomaticOsUpgrade + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters. +func (in *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters) DeepCopy() *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters. +func (in *WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetBootDiagnosticsObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetBootDiagnosticsObservation) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetBootDiagnosticsObservation. +func (in *WindowsVirtualMachineScaleSetBootDiagnosticsObservation) DeepCopy() *WindowsVirtualMachineScaleSetBootDiagnosticsObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetBootDiagnosticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetBootDiagnosticsParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetBootDiagnosticsParameters) { + *out = *in + if in.StorageAccountURI != nil { + in, out := &in.StorageAccountURI, &out.StorageAccountURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetBootDiagnosticsParameters. +func (in *WindowsVirtualMachineScaleSetBootDiagnosticsParameters) DeepCopy() *WindowsVirtualMachineScaleSetBootDiagnosticsParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetBootDiagnosticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetDataDiskInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetDataDiskInitParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetDataDiskInitParameters. +func (in *WindowsVirtualMachineScaleSetDataDiskInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetDataDiskInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetDataDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetDataDiskObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetDataDiskObservation) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetDataDiskObservation. +func (in *WindowsVirtualMachineScaleSetDataDiskObservation) DeepCopy() *WindowsVirtualMachineScaleSetDataDiskObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetDataDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetDataDiskParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetDataDiskParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.UltraSsdDiskIopsReadWrite != nil { + in, out := &in.UltraSsdDiskIopsReadWrite, &out.UltraSsdDiskIopsReadWrite + *out = new(float64) + **out = **in + } + if in.UltraSsdDiskMbpsReadWrite != nil { + in, out := &in.UltraSsdDiskMbpsReadWrite, &out.UltraSsdDiskMbpsReadWrite + *out = new(float64) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetDataDiskParameters. +func (in *WindowsVirtualMachineScaleSetDataDiskParameters) DeepCopy() *WindowsVirtualMachineScaleSetDataDiskParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetDataDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetExtensionInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetExtensionInitParameters) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetExtensionInitParameters. +func (in *WindowsVirtualMachineScaleSetExtensionInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetExtensionInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetExtensionObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetExtensionObservation) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation) + (*in).DeepCopyInto(*out) + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetExtensionObservation. +func (in *WindowsVirtualMachineScaleSetExtensionObservation) DeepCopy() *WindowsVirtualMachineScaleSetExtensionObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetExtensionParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetExtensionParameters) { + *out = *in + if in.AutoUpgradeMinorVersion != nil { + in, out := &in.AutoUpgradeMinorVersion, &out.AutoUpgradeMinorVersion + *out = new(bool) + **out = **in + } + if in.AutomaticUpgradeEnabled != nil { + in, out := &in.AutomaticUpgradeEnabled, &out.AutomaticUpgradeEnabled + *out = new(bool) + **out = **in + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProtectedSettingsFromKeyVault != nil { + in, out := &in.ProtectedSettingsFromKeyVault, &out.ProtectedSettingsFromKeyVault + *out = new(WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters) + (*in).DeepCopyInto(*out) + } + if in.ProtectedSettingsSecretRef != nil { + in, out := &in.ProtectedSettingsSecretRef, &out.ProtectedSettingsSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ProvisionAfterExtensions != nil { + in, out := &in.ProvisionAfterExtensions, &out.ProvisionAfterExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypeHandlerVersion != nil { + in, out := &in.TypeHandlerVersion, &out.TypeHandlerVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetExtensionParameters. +func (in *WindowsVirtualMachineScaleSetExtensionParameters) DeepCopy() *WindowsVirtualMachineScaleSetExtensionParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters. +func (in *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation. +func (in *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation) DeepCopy() *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters) { + *out = *in + if in.SecretURL != nil { + in, out := &in.SecretURL, &out.SecretURL + *out = new(string) + **out = **in + } + if in.SourceVaultID != nil { + in, out := &in.SourceVaultID, &out.SourceVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters. +func (in *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters) DeepCopy() *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetGalleryApplicationInitParameters) { + *out = *in + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetGalleryApplicationInitParameters. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetGalleryApplicationInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetGalleryApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetGalleryApplicationObservation) { + *out = *in + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetGalleryApplicationObservation. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationObservation) DeepCopy() *WindowsVirtualMachineScaleSetGalleryApplicationObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetGalleryApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetGalleryApplicationParameters) { + *out = *in + if in.ConfigurationBlobURI != nil { + in, out := &in.ConfigurationBlobURI, &out.ConfigurationBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.VersionID != nil { + in, out := &in.VersionID, &out.VersionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetGalleryApplicationParameters. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationParameters) DeepCopy() *WindowsVirtualMachineScaleSetGalleryApplicationParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetGalleryApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters) { + *out = *in + if in.ConfigurationReferenceBlobURI != nil { + in, out := &in.ConfigurationReferenceBlobURI, &out.ConfigurationReferenceBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PackageReferenceID != nil { + in, out := &in.PackageReferenceID, &out.PackageReferenceID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationsObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetGalleryApplicationsObservation) { + *out = *in + if in.ConfigurationReferenceBlobURI != nil { + in, out := &in.ConfigurationReferenceBlobURI, &out.ConfigurationReferenceBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PackageReferenceID != nil { + in, out := &in.PackageReferenceID, &out.PackageReferenceID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetGalleryApplicationsObservation. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationsObservation) DeepCopy() *WindowsVirtualMachineScaleSetGalleryApplicationsObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetGalleryApplicationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationsParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetGalleryApplicationsParameters) { + *out = *in + if in.ConfigurationReferenceBlobURI != nil { + in, out := &in.ConfigurationReferenceBlobURI, &out.ConfigurationReferenceBlobURI + *out = new(string) + **out = **in + } + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.PackageReferenceID != nil { + in, out := &in.PackageReferenceID, &out.PackageReferenceID + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetGalleryApplicationsParameters. +func (in *WindowsVirtualMachineScaleSetGalleryApplicationsParameters) DeepCopy() *WindowsVirtualMachineScaleSetGalleryApplicationsParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetGalleryApplicationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetIdentityInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetIdentityInitParameters. +func (in *WindowsVirtualMachineScaleSetIdentityInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetIdentityInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetIdentityObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetIdentityObservation. +func (in *WindowsVirtualMachineScaleSetIdentityObservation) DeepCopy() *WindowsVirtualMachineScaleSetIdentityObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetIdentityParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetIdentityParameters. +func (in *WindowsVirtualMachineScaleSetIdentityParameters) DeepCopy() *WindowsVirtualMachineScaleSetIdentityParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetInitParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticOsUpgradePolicy != nil { + in, out := &in.AutomaticOsUpgradePolicy, &out.AutomaticOsUpgradePolicy + *out = new(WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]WindowsVirtualMachineScaleSetDataDiskInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DoNotRunExtensionsOnOverprovisionedMachines != nil { + in, out := &in.DoNotRunExtensionsOnOverprovisionedMachines, &out.DoNotRunExtensionsOnOverprovisionedMachines + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]WindowsVirtualMachineScaleSetExtensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]WindowsVirtualMachineScaleSetGalleryApplicationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GalleryApplications != nil { + in, out := &in.GalleryApplications, &out.GalleryApplications + *out = make([]WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeID != nil { + in, out := &in.HealthProbeID, &out.HealthProbeID + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsVirtualMachineScaleSetIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(WindowsVirtualMachineScaleSetOsDiskInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Overprovision != nil { + in, out := &in.Overprovision, &out.Overprovision + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(WindowsVirtualMachineScaleSetPlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.RollingUpgradePolicy != nil { + in, out := &in.RollingUpgradePolicy, &out.RollingUpgradePolicy + *out = new(WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleIn != nil { + in, out := &in.ScaleIn, &out.ScaleIn + *out = new(WindowsVirtualMachineScaleSetScaleInInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleInPolicy != nil { + in, out := &in.ScaleInPolicy, &out.ScaleInPolicy + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsVirtualMachineScaleSetSecretInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpotRestore != nil { + in, out := &in.SpotRestore, &out.SpotRestore + *out = new(WindowsVirtualMachineScaleSetSpotRestoreInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateNotification != nil { + in, out := &in.TerminateNotification, &out.TerminateNotification + *out = new(WindowsVirtualMachineScaleSetTerminateNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(WindowsVirtualMachineScaleSetTerminationNotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WindowsVirtualMachineScaleSetWinrmListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetInitParameters. +func (in *WindowsVirtualMachineScaleSetInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetList) DeepCopyInto(out *WindowsVirtualMachineScaleSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WindowsVirtualMachineScaleSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetList. +func (in *WindowsVirtualMachineScaleSetList) DeepCopy() *WindowsVirtualMachineScaleSetList { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsVirtualMachineScaleSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerInboundNATRulesIds != nil { + in, out := &in.LoadBalancerInboundNATRulesIds, &out.LoadBalancerInboundNATRulesIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]NetworkInterfaceIPConfigurationPublicIPAddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerInboundNATRulesIds != nil { + in, out := &in.LoadBalancerInboundNATRulesIds, &out.LoadBalancerInboundNATRulesIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]NetworkInterfaceIPConfigurationPublicIPAddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation) DeepCopy() *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters) { + *out = *in + if in.ApplicationGatewayBackendAddressPoolIds != nil { + in, out := &in.ApplicationGatewayBackendAddressPoolIds, &out.ApplicationGatewayBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerBackendAddressPoolIds != nil { + in, out := &in.LoadBalancerBackendAddressPoolIds, &out.LoadBalancerBackendAddressPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerInboundNATRulesIds != nil { + in, out := &in.LoadBalancerInboundNATRulesIds, &out.LoadBalancerInboundNATRulesIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PublicIPAddress != nil { + in, out := &in.PublicIPAddress, &out.PublicIPAddress + *out = make([]NetworkInterfaceIPConfigurationPublicIPAddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters) DeepCopy() *WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetNetworkInterfaceObservation) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetNetworkInterfaceObservation. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceObservation) DeepCopy() *WindowsVirtualMachineScaleSetNetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetNetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetNetworkInterfaceParameters) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableAcceleratedNetworking != nil { + in, out := &in.EnableAcceleratedNetworking, &out.EnableAcceleratedNetworking + *out = new(bool) + **out = **in + } + if in.EnableIPForwarding != nil { + in, out := &in.EnableIPForwarding, &out.EnableIPForwarding + *out = new(bool) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetNetworkInterfaceParameters. +func (in *WindowsVirtualMachineScaleSetNetworkInterfaceParameters) DeepCopy() *WindowsVirtualMachineScaleSetNetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetNetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetObservation) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation) + (*in).DeepCopyInto(*out) + } + if in.AutomaticOsUpgradePolicy != nil { + in, out := &in.AutomaticOsUpgradePolicy, &out.AutomaticOsUpgradePolicy + *out = new(WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(WindowsVirtualMachineScaleSetBootDiagnosticsObservation) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]WindowsVirtualMachineScaleSetDataDiskObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DoNotRunExtensionsOnOverprovisionedMachines != nil { + in, out := &in.DoNotRunExtensionsOnOverprovisionedMachines, &out.DoNotRunExtensionsOnOverprovisionedMachines + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]WindowsVirtualMachineScaleSetExtensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]WindowsVirtualMachineScaleSetGalleryApplicationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GalleryApplications != nil { + in, out := &in.GalleryApplications, &out.GalleryApplications + *out = make([]WindowsVirtualMachineScaleSetGalleryApplicationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeID != nil { + in, out := &in.HealthProbeID, &out.HealthProbeID + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsVirtualMachineScaleSetIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]WindowsVirtualMachineScaleSetNetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(WindowsVirtualMachineScaleSetOsDiskObservation) + (*in).DeepCopyInto(*out) + } + if in.Overprovision != nil { + in, out := &in.Overprovision, &out.Overprovision + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(WindowsVirtualMachineScaleSetPlanObservation) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RollingUpgradePolicy != nil { + in, out := &in.RollingUpgradePolicy, &out.RollingUpgradePolicy + *out = new(WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ScaleIn != nil { + in, out := &in.ScaleIn, &out.ScaleIn + *out = new(WindowsVirtualMachineScaleSetScaleInObservation) + (*in).DeepCopyInto(*out) + } + if in.ScaleInPolicy != nil { + in, out := &in.ScaleInPolicy, &out.ScaleInPolicy + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsVirtualMachineScaleSetSecretObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(WindowsVirtualMachineScaleSetSourceImageReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.SpotRestore != nil { + in, out := &in.SpotRestore, &out.SpotRestore + *out = new(WindowsVirtualMachineScaleSetSpotRestoreObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateNotification != nil { + in, out := &in.TerminateNotification, &out.TerminateNotification + *out = new(WindowsVirtualMachineScaleSetTerminateNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(WindowsVirtualMachineScaleSetTerminationNotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.UniqueID != nil { + in, out := &in.UniqueID, &out.UniqueID + *out = new(string) + **out = **in + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WindowsVirtualMachineScaleSetWinrmListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetObservation. +func (in *WindowsVirtualMachineScaleSetObservation) DeepCopy() *WindowsVirtualMachineScaleSetObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters. +func (in *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation. +func (in *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) DeepCopy() *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) { + *out = *in + if in.Option != nil { + in, out := &in.Option, &out.Option + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters. +func (in *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) DeepCopy() *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetOsDiskInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetOsDiskInitParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetOsDiskInitParameters. +func (in *WindowsVirtualMachineScaleSetOsDiskInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetOsDiskInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetOsDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetOsDiskObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetOsDiskObservation) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetOsDiskObservation. +func (in *WindowsVirtualMachineScaleSetOsDiskObservation) DeepCopy() *WindowsVirtualMachineScaleSetOsDiskObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetOsDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetOsDiskParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetOsDiskParameters) { + *out = *in + if in.Caching != nil { + in, out := &in.Caching, &out.Caching + *out = new(string) + **out = **in + } + if in.DiffDiskSettings != nil { + in, out := &in.DiffDiskSettings, &out.DiffDiskSettings + *out = new(WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.DiskSizeGb != nil { + in, out := &in.DiskSizeGb, &out.DiskSizeGb + *out = new(float64) + **out = **in + } + if in.SecureVMDiskEncryptionSetID != nil { + in, out := &in.SecureVMDiskEncryptionSetID, &out.SecureVMDiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.SecurityEncryptionType != nil { + in, out := &in.SecurityEncryptionType, &out.SecurityEncryptionType + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.WriteAcceleratorEnabled != nil { + in, out := &in.WriteAcceleratorEnabled, &out.WriteAcceleratorEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetOsDiskParameters. +func (in *WindowsVirtualMachineScaleSetOsDiskParameters) DeepCopy() *WindowsVirtualMachineScaleSetOsDiskParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetOsDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetParameters) { + *out = *in + if in.AdditionalCapabilities != nil { + in, out := &in.AdditionalCapabilities, &out.AdditionalCapabilities + *out = new(WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalUnattendContent != nil { + in, out := &in.AdditionalUnattendContent, &out.AdditionalUnattendContent + *out = make([]WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.AdminPasswordSecretRef = in.AdminPasswordSecretRef + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AutomaticInstanceRepair != nil { + in, out := &in.AutomaticInstanceRepair, &out.AutomaticInstanceRepair + *out = new(WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticOsUpgradePolicy != nil { + in, out := &in.AutomaticOsUpgradePolicy, &out.AutomaticOsUpgradePolicy + *out = new(WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(WindowsVirtualMachineScaleSetBootDiagnosticsParameters) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.ComputerNamePrefix != nil { + in, out := &in.ComputerNamePrefix, &out.ComputerNamePrefix + *out = new(string) + **out = **in + } + if in.CustomDataSecretRef != nil { + in, out := &in.CustomDataSecretRef, &out.CustomDataSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DataDisk != nil { + in, out := &in.DataDisk, &out.DataDisk + *out = make([]WindowsVirtualMachineScaleSetDataDiskParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DoNotRunExtensionsOnOverprovisionedMachines != nil { + in, out := &in.DoNotRunExtensionsOnOverprovisionedMachines, &out.DoNotRunExtensionsOnOverprovisionedMachines + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableAutomaticUpdates != nil { + in, out := &in.EnableAutomaticUpdates, &out.EnableAutomaticUpdates + *out = new(bool) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]WindowsVirtualMachineScaleSetExtensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionOperationsEnabled != nil { + in, out := &in.ExtensionOperationsEnabled, &out.ExtensionOperationsEnabled + *out = new(bool) + **out = **in + } + if in.ExtensionsTimeBudget != nil { + in, out := &in.ExtensionsTimeBudget, &out.ExtensionsTimeBudget + *out = new(string) + **out = **in + } + if in.GalleryApplication != nil { + in, out := &in.GalleryApplication, &out.GalleryApplication + *out = make([]WindowsVirtualMachineScaleSetGalleryApplicationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GalleryApplications != nil { + in, out := &in.GalleryApplications, &out.GalleryApplications + *out = make([]WindowsVirtualMachineScaleSetGalleryApplicationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeID != nil { + in, out := &in.HealthProbeID, &out.HealthProbeID + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsVirtualMachineScaleSetIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(float64) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxBidPrice != nil { + in, out := &in.MaxBidPrice, &out.MaxBidPrice + *out = new(float64) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]WindowsVirtualMachineScaleSetNetworkInterfaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OsDisk != nil { + in, out := &in.OsDisk, &out.OsDisk + *out = new(WindowsVirtualMachineScaleSetOsDiskParameters) + (*in).DeepCopyInto(*out) + } + if in.Overprovision != nil { + in, out := &in.Overprovision, &out.Overprovision + *out = new(bool) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(WindowsVirtualMachineScaleSetPlanParameters) + (*in).DeepCopyInto(*out) + } + if in.PlatformFaultDomainCount != nil { + in, out := &in.PlatformFaultDomainCount, &out.PlatformFaultDomainCount + *out = new(float64) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProvisionVMAgent != nil { + in, out := &in.ProvisionVMAgent, &out.ProvisionVMAgent + *out = new(bool) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RollingUpgradePolicy != nil { + in, out := &in.RollingUpgradePolicy, &out.RollingUpgradePolicy + *out = new(WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleIn != nil { + in, out := &in.ScaleIn, &out.ScaleIn + *out = new(WindowsVirtualMachineScaleSetScaleInParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleInPolicy != nil { + in, out := &in.ScaleInPolicy, &out.ScaleInPolicy + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]WindowsVirtualMachineScaleSetSecretParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecureBootEnabled != nil { + in, out := &in.SecureBootEnabled, &out.SecureBootEnabled + *out = new(bool) + **out = **in + } + if in.SinglePlacementGroup != nil { + in, out := &in.SinglePlacementGroup, &out.SinglePlacementGroup + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SourceImageID != nil { + in, out := &in.SourceImageID, &out.SourceImageID + *out = new(string) + **out = **in + } + if in.SourceImageReference != nil { + in, out := &in.SourceImageReference, &out.SourceImageReference + *out = new(WindowsVirtualMachineScaleSetSourceImageReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.SpotRestore != nil { + in, out := &in.SpotRestore, &out.SpotRestore + *out = new(WindowsVirtualMachineScaleSetSpotRestoreParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TerminateNotification != nil { + in, out := &in.TerminateNotification, &out.TerminateNotification + *out = new(WindowsVirtualMachineScaleSetTerminateNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.TerminationNotification != nil { + in, out := &in.TerminationNotification, &out.TerminationNotification + *out = new(WindowsVirtualMachineScaleSetTerminationNotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.VtpmEnabled != nil { + in, out := &in.VtpmEnabled, &out.VtpmEnabled + *out = new(bool) + **out = **in + } + if in.WinrmListener != nil { + in, out := &in.WinrmListener, &out.WinrmListener + *out = make([]WindowsVirtualMachineScaleSetWinrmListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ZoneBalance != nil { + in, out := &in.ZoneBalance, &out.ZoneBalance + *out = new(bool) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetParameters. +func (in *WindowsVirtualMachineScaleSetParameters) DeepCopy() *WindowsVirtualMachineScaleSetParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetPlanInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetPlanInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetPlanInitParameters. +func (in *WindowsVirtualMachineScaleSetPlanInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetPlanInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetPlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetPlanObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetPlanObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetPlanObservation. +func (in *WindowsVirtualMachineScaleSetPlanObservation) DeepCopy() *WindowsVirtualMachineScaleSetPlanObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetPlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetPlanParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetPlanParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetPlanParameters. +func (in *WindowsVirtualMachineScaleSetPlanParameters) DeepCopy() *WindowsVirtualMachineScaleSetPlanParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetPlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters) { + *out = *in + if in.CrossZoneUpgradesEnabled != nil { + in, out := &in.CrossZoneUpgradesEnabled, &out.CrossZoneUpgradesEnabled + *out = new(bool) + **out = **in + } + if in.MaxBatchInstancePercent != nil { + in, out := &in.MaxBatchInstancePercent, &out.MaxBatchInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyInstancePercent != nil { + in, out := &in.MaxUnhealthyInstancePercent, &out.MaxUnhealthyInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyUpgradedInstancePercent != nil { + in, out := &in.MaxUnhealthyUpgradedInstancePercent, &out.MaxUnhealthyUpgradedInstancePercent + *out = new(float64) + **out = **in + } + if in.PauseTimeBetweenBatches != nil { + in, out := &in.PauseTimeBetweenBatches, &out.PauseTimeBetweenBatches + *out = new(string) + **out = **in + } + if in.PrioritizeUnhealthyInstancesEnabled != nil { + in, out := &in.PrioritizeUnhealthyInstancesEnabled, &out.PrioritizeUnhealthyInstancesEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters. +func (in *WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation) { + *out = *in + if in.CrossZoneUpgradesEnabled != nil { + in, out := &in.CrossZoneUpgradesEnabled, &out.CrossZoneUpgradesEnabled + *out = new(bool) + **out = **in + } + if in.MaxBatchInstancePercent != nil { + in, out := &in.MaxBatchInstancePercent, &out.MaxBatchInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyInstancePercent != nil { + in, out := &in.MaxUnhealthyInstancePercent, &out.MaxUnhealthyInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyUpgradedInstancePercent != nil { + in, out := &in.MaxUnhealthyUpgradedInstancePercent, &out.MaxUnhealthyUpgradedInstancePercent + *out = new(float64) + **out = **in + } + if in.PauseTimeBetweenBatches != nil { + in, out := &in.PauseTimeBetweenBatches, &out.PauseTimeBetweenBatches + *out = new(string) + **out = **in + } + if in.PrioritizeUnhealthyInstancesEnabled != nil { + in, out := &in.PrioritizeUnhealthyInstancesEnabled, &out.PrioritizeUnhealthyInstancesEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation. +func (in *WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation) DeepCopy() *WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters) { + *out = *in + if in.CrossZoneUpgradesEnabled != nil { + in, out := &in.CrossZoneUpgradesEnabled, &out.CrossZoneUpgradesEnabled + *out = new(bool) + **out = **in + } + if in.MaxBatchInstancePercent != nil { + in, out := &in.MaxBatchInstancePercent, &out.MaxBatchInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyInstancePercent != nil { + in, out := &in.MaxUnhealthyInstancePercent, &out.MaxUnhealthyInstancePercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyUpgradedInstancePercent != nil { + in, out := &in.MaxUnhealthyUpgradedInstancePercent, &out.MaxUnhealthyUpgradedInstancePercent + *out = new(float64) + **out = **in + } + if in.PauseTimeBetweenBatches != nil { + in, out := &in.PauseTimeBetweenBatches, &out.PauseTimeBetweenBatches + *out = new(string) + **out = **in + } + if in.PrioritizeUnhealthyInstancesEnabled != nil { + in, out := &in.PrioritizeUnhealthyInstancesEnabled, &out.PrioritizeUnhealthyInstancesEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters. +func (in *WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters) DeepCopy() *WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetScaleInInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetScaleInInitParameters) { + *out = *in + if in.ForceDeletionEnabled != nil { + in, out := &in.ForceDeletionEnabled, &out.ForceDeletionEnabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetScaleInInitParameters. +func (in *WindowsVirtualMachineScaleSetScaleInInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetScaleInInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetScaleInInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetScaleInObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetScaleInObservation) { + *out = *in + if in.ForceDeletionEnabled != nil { + in, out := &in.ForceDeletionEnabled, &out.ForceDeletionEnabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetScaleInObservation. +func (in *WindowsVirtualMachineScaleSetScaleInObservation) DeepCopy() *WindowsVirtualMachineScaleSetScaleInObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetScaleInObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetScaleInParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetScaleInParameters) { + *out = *in + if in.ForceDeletionEnabled != nil { + in, out := &in.ForceDeletionEnabled, &out.ForceDeletionEnabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetScaleInParameters. +func (in *WindowsVirtualMachineScaleSetScaleInParameters) DeepCopy() *WindowsVirtualMachineScaleSetScaleInParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetScaleInParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSecretCertificateInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetSecretCertificateInitParameters) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSecretCertificateInitParameters. +func (in *WindowsVirtualMachineScaleSetSecretCertificateInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetSecretCertificateInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSecretCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSecretCertificateObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetSecretCertificateObservation) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSecretCertificateObservation. +func (in *WindowsVirtualMachineScaleSetSecretCertificateObservation) DeepCopy() *WindowsVirtualMachineScaleSetSecretCertificateObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSecretCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSecretCertificateParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetSecretCertificateParameters) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSecretCertificateParameters. +func (in *WindowsVirtualMachineScaleSetSecretCertificateParameters) DeepCopy() *WindowsVirtualMachineScaleSetSecretCertificateParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSecretCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSecretInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetSecretInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsVirtualMachineScaleSetSecretCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSecretInitParameters. +func (in *WindowsVirtualMachineScaleSetSecretInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetSecretInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSecretObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetSecretObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsVirtualMachineScaleSetSecretCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSecretObservation. +func (in *WindowsVirtualMachineScaleSetSecretObservation) DeepCopy() *WindowsVirtualMachineScaleSetSecretObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSecretParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetSecretParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsVirtualMachineScaleSetSecretCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSecretParameters. +func (in *WindowsVirtualMachineScaleSetSecretParameters) DeepCopy() *WindowsVirtualMachineScaleSetSecretParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters. +func (in *WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSourceImageReferenceObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetSourceImageReferenceObservation) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSourceImageReferenceObservation. +func (in *WindowsVirtualMachineScaleSetSourceImageReferenceObservation) DeepCopy() *WindowsVirtualMachineScaleSetSourceImageReferenceObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSourceImageReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSourceImageReferenceParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetSourceImageReferenceParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSourceImageReferenceParameters. +func (in *WindowsVirtualMachineScaleSetSourceImageReferenceParameters) DeepCopy() *WindowsVirtualMachineScaleSetSourceImageReferenceParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSourceImageReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSpec) DeepCopyInto(out *WindowsVirtualMachineScaleSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSpec. +func (in *WindowsVirtualMachineScaleSetSpec) DeepCopy() *WindowsVirtualMachineScaleSetSpec { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSpotRestoreInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetSpotRestoreInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSpotRestoreInitParameters. +func (in *WindowsVirtualMachineScaleSetSpotRestoreInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetSpotRestoreInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSpotRestoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSpotRestoreObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetSpotRestoreObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSpotRestoreObservation. +func (in *WindowsVirtualMachineScaleSetSpotRestoreObservation) DeepCopy() *WindowsVirtualMachineScaleSetSpotRestoreObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSpotRestoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetSpotRestoreParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetSpotRestoreParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetSpotRestoreParameters. +func (in *WindowsVirtualMachineScaleSetSpotRestoreParameters) DeepCopy() *WindowsVirtualMachineScaleSetSpotRestoreParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetSpotRestoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetStatus) DeepCopyInto(out *WindowsVirtualMachineScaleSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetStatus. +func (in *WindowsVirtualMachineScaleSetStatus) DeepCopy() *WindowsVirtualMachineScaleSetStatus { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetTerminateNotificationInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetTerminateNotificationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetTerminateNotificationInitParameters. +func (in *WindowsVirtualMachineScaleSetTerminateNotificationInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetTerminateNotificationInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetTerminateNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetTerminateNotificationObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetTerminateNotificationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetTerminateNotificationObservation. +func (in *WindowsVirtualMachineScaleSetTerminateNotificationObservation) DeepCopy() *WindowsVirtualMachineScaleSetTerminateNotificationObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetTerminateNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetTerminateNotificationParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetTerminateNotificationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetTerminateNotificationParameters. +func (in *WindowsVirtualMachineScaleSetTerminateNotificationParameters) DeepCopy() *WindowsVirtualMachineScaleSetTerminateNotificationParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetTerminateNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetTerminationNotificationInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetTerminationNotificationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetTerminationNotificationInitParameters. +func (in *WindowsVirtualMachineScaleSetTerminationNotificationInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetTerminationNotificationInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetTerminationNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetTerminationNotificationObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetTerminationNotificationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetTerminationNotificationObservation. +func (in *WindowsVirtualMachineScaleSetTerminationNotificationObservation) DeepCopy() *WindowsVirtualMachineScaleSetTerminationNotificationObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetTerminationNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetTerminationNotificationParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetTerminationNotificationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetTerminationNotificationParameters. +func (in *WindowsVirtualMachineScaleSetTerminationNotificationParameters) DeepCopy() *WindowsVirtualMachineScaleSetTerminationNotificationParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetTerminationNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetWinrmListenerInitParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetWinrmListenerInitParameters) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetWinrmListenerInitParameters. +func (in *WindowsVirtualMachineScaleSetWinrmListenerInitParameters) DeepCopy() *WindowsVirtualMachineScaleSetWinrmListenerInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetWinrmListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetWinrmListenerObservation) DeepCopyInto(out *WindowsVirtualMachineScaleSetWinrmListenerObservation) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetWinrmListenerObservation. +func (in *WindowsVirtualMachineScaleSetWinrmListenerObservation) DeepCopy() *WindowsVirtualMachineScaleSetWinrmListenerObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetWinrmListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineScaleSetWinrmListenerParameters) DeepCopyInto(out *WindowsVirtualMachineScaleSetWinrmListenerParameters) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineScaleSetWinrmListenerParameters. +func (in *WindowsVirtualMachineScaleSetWinrmListenerParameters) DeepCopy() *WindowsVirtualMachineScaleSetWinrmListenerParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineScaleSetWinrmListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSecretCertificateInitParameters) DeepCopyInto(out *WindowsVirtualMachineSecretCertificateInitParameters) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSecretCertificateInitParameters. +func (in *WindowsVirtualMachineSecretCertificateInitParameters) DeepCopy() *WindowsVirtualMachineSecretCertificateInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSecretCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSecretCertificateObservation) DeepCopyInto(out *WindowsVirtualMachineSecretCertificateObservation) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSecretCertificateObservation. +func (in *WindowsVirtualMachineSecretCertificateObservation) DeepCopy() *WindowsVirtualMachineSecretCertificateObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSecretCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSecretCertificateParameters) DeepCopyInto(out *WindowsVirtualMachineSecretCertificateParameters) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSecretCertificateParameters. +func (in *WindowsVirtualMachineSecretCertificateParameters) DeepCopy() *WindowsVirtualMachineSecretCertificateParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSecretCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSecretInitParameters) DeepCopyInto(out *WindowsVirtualMachineSecretInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsVirtualMachineSecretCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSecretInitParameters. +func (in *WindowsVirtualMachineSecretInitParameters) DeepCopy() *WindowsVirtualMachineSecretInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSecretObservation) DeepCopyInto(out *WindowsVirtualMachineSecretObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsVirtualMachineSecretCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSecretObservation. +func (in *WindowsVirtualMachineSecretObservation) DeepCopy() *WindowsVirtualMachineSecretObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSecretParameters) DeepCopyInto(out *WindowsVirtualMachineSecretParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]WindowsVirtualMachineSecretCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSecretParameters. +func (in *WindowsVirtualMachineSecretParameters) DeepCopy() *WindowsVirtualMachineSecretParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSourceImageReferenceInitParameters) DeepCopyInto(out *WindowsVirtualMachineSourceImageReferenceInitParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSourceImageReferenceInitParameters. +func (in *WindowsVirtualMachineSourceImageReferenceInitParameters) DeepCopy() *WindowsVirtualMachineSourceImageReferenceInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSourceImageReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSourceImageReferenceObservation) DeepCopyInto(out *WindowsVirtualMachineSourceImageReferenceObservation) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSourceImageReferenceObservation. +func (in *WindowsVirtualMachineSourceImageReferenceObservation) DeepCopy() *WindowsVirtualMachineSourceImageReferenceObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSourceImageReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSourceImageReferenceParameters) DeepCopyInto(out *WindowsVirtualMachineSourceImageReferenceParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSourceImageReferenceParameters. +func (in *WindowsVirtualMachineSourceImageReferenceParameters) DeepCopy() *WindowsVirtualMachineSourceImageReferenceParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSourceImageReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSpec) DeepCopyInto(out *WindowsVirtualMachineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSpec. +func (in *WindowsVirtualMachineSpec) DeepCopy() *WindowsVirtualMachineSpec { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineStatus) DeepCopyInto(out *WindowsVirtualMachineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineStatus. +func (in *WindowsVirtualMachineStatus) DeepCopy() *WindowsVirtualMachineStatus { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineTerminationNotificationInitParameters) DeepCopyInto(out *WindowsVirtualMachineTerminationNotificationInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineTerminationNotificationInitParameters. +func (in *WindowsVirtualMachineTerminationNotificationInitParameters) DeepCopy() *WindowsVirtualMachineTerminationNotificationInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineTerminationNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineTerminationNotificationObservation) DeepCopyInto(out *WindowsVirtualMachineTerminationNotificationObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineTerminationNotificationObservation. +func (in *WindowsVirtualMachineTerminationNotificationObservation) DeepCopy() *WindowsVirtualMachineTerminationNotificationObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineTerminationNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineTerminationNotificationParameters) DeepCopyInto(out *WindowsVirtualMachineTerminationNotificationParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineTerminationNotificationParameters. +func (in *WindowsVirtualMachineTerminationNotificationParameters) DeepCopy() *WindowsVirtualMachineTerminationNotificationParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineTerminationNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineWinrmListenerInitParameters) DeepCopyInto(out *WindowsVirtualMachineWinrmListenerInitParameters) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineWinrmListenerInitParameters. +func (in *WindowsVirtualMachineWinrmListenerInitParameters) DeepCopy() *WindowsVirtualMachineWinrmListenerInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineWinrmListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineWinrmListenerObservation) DeepCopyInto(out *WindowsVirtualMachineWinrmListenerObservation) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineWinrmListenerObservation. +func (in *WindowsVirtualMachineWinrmListenerObservation) DeepCopy() *WindowsVirtualMachineWinrmListenerObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineWinrmListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineWinrmListenerParameters) DeepCopyInto(out *WindowsVirtualMachineWinrmListenerParameters) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineWinrmListenerParameters. +func (in *WindowsVirtualMachineWinrmListenerParameters) DeepCopy() *WindowsVirtualMachineWinrmListenerParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineWinrmListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WinrmListenerInitParameters) DeepCopyInto(out *WinrmListenerInitParameters) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WinrmListenerInitParameters. +func (in *WinrmListenerInitParameters) DeepCopy() *WinrmListenerInitParameters { + if in == nil { + return nil + } + out := new(WinrmListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WinrmListenerObservation) DeepCopyInto(out *WinrmListenerObservation) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WinrmListenerObservation. +func (in *WinrmListenerObservation) DeepCopy() *WinrmListenerObservation { + if in == nil { + return nil + } + out := new(WinrmListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WinrmListenerParameters) DeepCopyInto(out *WinrmListenerParameters) { + *out = *in + if in.CertificateURL != nil { + in, out := &in.CertificateURL, &out.CertificateURL + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WinrmListenerParameters. +func (in *WinrmListenerParameters) DeepCopy() *WinrmListenerParameters { + if in == nil { + return nil + } + out := new(WinrmListenerParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/compute/v1beta2/zz_generated.managed.go b/apis/compute/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..08e41fabd --- /dev/null +++ b/apis/compute/v1beta2/zz_generated.managed.go @@ -0,0 +1,908 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CapacityReservation. +func (mg *CapacityReservation) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CapacityReservation. +func (mg *CapacityReservation) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CapacityReservation. +func (mg *CapacityReservation) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CapacityReservation. +func (mg *CapacityReservation) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CapacityReservation. +func (mg *CapacityReservation) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CapacityReservation. +func (mg *CapacityReservation) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CapacityReservation. +func (mg *CapacityReservation) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CapacityReservation. +func (mg *CapacityReservation) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CapacityReservation. +func (mg *CapacityReservation) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CapacityReservation. +func (mg *CapacityReservation) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CapacityReservation. +func (mg *CapacityReservation) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CapacityReservation. +func (mg *CapacityReservation) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Image. +func (mg *Image) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Image. +func (mg *Image) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Image. +func (mg *Image) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Image. +func (mg *Image) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Image. +func (mg *Image) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Image. +func (mg *Image) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Image. +func (mg *Image) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Image. +func (mg *Image) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Image. +func (mg *Image) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Image. +func (mg *Image) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Image. +func (mg *Image) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Image. +func (mg *Image) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ManagedDisk. +func (mg *ManagedDisk) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ManagedDisk. +func (mg *ManagedDisk) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ManagedDisk. +func (mg *ManagedDisk) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ManagedDisk. +func (mg *ManagedDisk) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ManagedDisk. +func (mg *ManagedDisk) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ManagedDisk. +func (mg *ManagedDisk) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ManagedDisk. +func (mg *ManagedDisk) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ManagedDisk. +func (mg *ManagedDisk) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ManagedDisk. +func (mg *ManagedDisk) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ManagedDisk. +func (mg *ManagedDisk) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ManagedDisk. +func (mg *ManagedDisk) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ManagedDisk. +func (mg *ManagedDisk) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SharedImage. +func (mg *SharedImage) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SharedImage. +func (mg *SharedImage) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SharedImage. +func (mg *SharedImage) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SharedImage. +func (mg *SharedImage) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SharedImage. +func (mg *SharedImage) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SharedImage. +func (mg *SharedImage) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SharedImage. +func (mg *SharedImage) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SharedImage. +func (mg *SharedImage) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SharedImage. +func (mg *SharedImage) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SharedImage. +func (mg *SharedImage) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SharedImage. +func (mg *SharedImage) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SharedImage. +func (mg *SharedImage) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SharedImageGallery. +func (mg *SharedImageGallery) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SharedImageGallery. +func (mg *SharedImageGallery) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SharedImageGallery. +func (mg *SharedImageGallery) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SharedImageGallery. +func (mg *SharedImageGallery) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SharedImageGallery. +func (mg *SharedImageGallery) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SharedImageGallery. +func (mg *SharedImageGallery) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SharedImageGallery. +func (mg *SharedImageGallery) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SharedImageGallery. +func (mg *SharedImageGallery) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SharedImageGallery. +func (mg *SharedImageGallery) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SharedImageGallery. +func (mg *SharedImageGallery) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SharedImageGallery. +func (mg *SharedImageGallery) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SharedImageGallery. +func (mg *SharedImageGallery) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Snapshot. +func (mg *Snapshot) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Snapshot. +func (mg *Snapshot) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Snapshot. +func (mg *Snapshot) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Snapshot. +func (mg *Snapshot) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Snapshot. +func (mg *Snapshot) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Snapshot. +func (mg *Snapshot) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Snapshot. +func (mg *Snapshot) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Snapshot. +func (mg *Snapshot) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Snapshot. +func (mg *Snapshot) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Snapshot. +func (mg *Snapshot) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Snapshot. +func (mg *Snapshot) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Snapshot. +func (mg *Snapshot) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/compute/v1beta2/zz_generated.managedlist.go b/apis/compute/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..9e3b51260 --- /dev/null +++ b/apis/compute/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CapacityReservationList. +func (l *CapacityReservationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DiskEncryptionSetList. +func (l *DiskEncryptionSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this GalleryApplicationVersionList. +func (l *GalleryApplicationVersionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ImageList. +func (l *ImageList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinuxVirtualMachineList. +func (l *LinuxVirtualMachineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinuxVirtualMachineScaleSetList. +func (l *LinuxVirtualMachineScaleSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ManagedDiskList. +func (l *ManagedDiskList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this OrchestratedVirtualMachineScaleSetList. +func (l *OrchestratedVirtualMachineScaleSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SharedImageGalleryList. +func (l *SharedImageGalleryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SharedImageList. +func (l *SharedImageList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SnapshotList. +func (l *SnapshotList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualMachineExtensionList. +func (l *VirtualMachineExtensionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualMachineRunCommandList. +func (l *VirtualMachineRunCommandList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WindowsVirtualMachineList. +func (l *WindowsVirtualMachineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WindowsVirtualMachineScaleSetList. +func (l *WindowsVirtualMachineScaleSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/compute/v1beta2/zz_generated.resolvers.go b/apis/compute/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..7e013349c --- /dev/null +++ b/apis/compute/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1060 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *CapacityReservation) ResolveReferences( // ResolveReferences of this CapacityReservation. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "CapacityReservationGroup", "CapacityReservationGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CapacityReservationGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CapacityReservationGroupIDRef, + Selector: mg.Spec.ForProvider.CapacityReservationGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CapacityReservationGroupID") + } + mg.Spec.ForProvider.CapacityReservationGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CapacityReservationGroupIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DiskEncryptionSet. +func (mg *DiskEncryptionSet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyVaultKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.KeyVaultKeyIDRef, + Selector: mg.Spec.ForProvider.KeyVaultKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyVaultKeyID") + } + mg.Spec.ForProvider.KeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyVaultKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyVaultKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.KeyVaultKeyIDRef, + Selector: mg.Spec.InitProvider.KeyVaultKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyVaultKeyID") + } + mg.Spec.InitProvider.KeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyVaultKeyIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this GalleryApplicationVersion. +func (mg *GalleryApplicationVersion) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "GalleryApplication", "GalleryApplicationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GalleryApplicationID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.GalleryApplicationIDRef, + Selector: mg.Spec.ForProvider.GalleryApplicationIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GalleryApplicationID") + } + mg.Spec.ForProvider.GalleryApplicationID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GalleryApplicationIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Source != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Blob", "BlobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Source.MediaLink), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Source.MediaLinkRef, + Selector: mg.Spec.ForProvider.Source.MediaLinkSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Source.MediaLink") + } + mg.Spec.ForProvider.Source.MediaLink = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Source.MediaLinkRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.TargetRegion); i3++ { + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "GalleryApplication", "GalleryApplicationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TargetRegion[i3].Name), + Extract: resource.ExtractParamPath("location", false), + Reference: mg.Spec.ForProvider.TargetRegion[i3].NameRef, + Selector: mg.Spec.ForProvider.TargetRegion[i3].NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TargetRegion[i3].Name") + } + mg.Spec.ForProvider.TargetRegion[i3].Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TargetRegion[i3].NameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "GalleryApplication", "GalleryApplicationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GalleryApplicationID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.GalleryApplicationIDRef, + Selector: mg.Spec.InitProvider.GalleryApplicationIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GalleryApplicationID") + } + mg.Spec.InitProvider.GalleryApplicationID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GalleryApplicationIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Source != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Blob", "BlobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Source.MediaLink), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Source.MediaLinkRef, + Selector: mg.Spec.InitProvider.Source.MediaLinkSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Source.MediaLink") + } + mg.Spec.InitProvider.Source.MediaLink = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Source.MediaLinkRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.TargetRegion); i3++ { + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "GalleryApplication", "GalleryApplicationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TargetRegion[i3].Name), + Extract: resource.ExtractParamPath("location", false), + Reference: mg.Spec.InitProvider.TargetRegion[i3].NameRef, + Selector: mg.Spec.InitProvider.TargetRegion[i3].NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TargetRegion[i3].Name") + } + mg.Spec.InitProvider.TargetRegion[i3].Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TargetRegion[i3].NameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this Image. +func (mg *Image) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.NetworkInterfaceIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.NetworkInterfaceIdsRefs, + Selector: mg.Spec.ForProvider.NetworkInterfaceIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterfaceIds") + } + mg.Spec.ForProvider.NetworkInterfaceIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.NetworkInterfaceIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.NetworkInterfaceIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.NetworkInterfaceIdsRefs, + Selector: mg.Spec.InitProvider.NetworkInterfaceIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterfaceIds") + } + mg.Spec.InitProvider.NetworkInterfaceIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.NetworkInterfaceIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this LinuxVirtualMachineScaleSet. +func (mg *LinuxVirtualMachineScaleSet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterface); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID") + } + mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterface); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID") + } + mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this ManagedDisk. +func (mg *ManagedDisk) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SourceResourceIDRef, + Selector: mg.Spec.ForProvider.SourceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceResourceID") + } + mg.Spec.ForProvider.SourceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SourceResourceIDRef, + Selector: mg.Spec.InitProvider.SourceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceResourceID") + } + mg.Spec.InitProvider.SourceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceResourceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this OrchestratedVirtualMachineScaleSet. +func (mg *OrchestratedVirtualMachineScaleSet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterface); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID") + } + mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterface); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID") + } + mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this SharedImage. +func (mg *SharedImage) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "SharedImageGallery", "SharedImageGalleryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GalleryName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.GalleryNameRef, + Selector: mg.Spec.ForProvider.GalleryNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GalleryName") + } + mg.Spec.ForProvider.GalleryName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GalleryNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SharedImageGallery. +func (mg *SharedImageGallery) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Snapshot. +func (mg *Snapshot) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceURI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SourceURIRef, + Selector: mg.Spec.ForProvider.SourceURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceURI") + } + mg.Spec.ForProvider.SourceURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceURIRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceURI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SourceURIRef, + Selector: mg.Spec.InitProvider.SourceURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceURI") + } + mg.Spec.InitProvider.SourceURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceURIRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VirtualMachineExtension. +func (mg *VirtualMachineExtension) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualMachineID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualMachineIDRef, + Selector: mg.Spec.ForProvider.VirtualMachineIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualMachineID") + } + mg.Spec.ForProvider.VirtualMachineID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualMachineIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VirtualMachineRunCommand. +func (mg *VirtualMachineRunCommand) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Blob", "BlobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ErrorBlobURI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ErrorBlobURIRef, + Selector: mg.Spec.ForProvider.ErrorBlobURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ErrorBlobURI") + } + mg.Spec.ForProvider.ErrorBlobURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ErrorBlobURIRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Blob", "BlobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OutputBlobURI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.OutputBlobURIRef, + Selector: mg.Spec.ForProvider.OutputBlobURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OutputBlobURI") + } + mg.Spec.ForProvider.OutputBlobURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OutputBlobURIRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Source != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Blob", "BlobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Source.ScriptURI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Source.ScriptURIRef, + Selector: mg.Spec.ForProvider.Source.ScriptURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Source.ScriptURI") + } + mg.Spec.ForProvider.Source.ScriptURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Source.ScriptURIRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualMachineID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualMachineIDRef, + Selector: mg.Spec.ForProvider.VirtualMachineIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualMachineID") + } + mg.Spec.ForProvider.VirtualMachineID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualMachineIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Blob", "BlobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ErrorBlobURI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ErrorBlobURIRef, + Selector: mg.Spec.InitProvider.ErrorBlobURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ErrorBlobURI") + } + mg.Spec.InitProvider.ErrorBlobURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ErrorBlobURIRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Blob", "BlobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OutputBlobURI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.OutputBlobURIRef, + Selector: mg.Spec.InitProvider.OutputBlobURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OutputBlobURI") + } + mg.Spec.InitProvider.OutputBlobURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OutputBlobURIRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Source != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Blob", "BlobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Source.ScriptURI), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Source.ScriptURIRef, + Selector: mg.Spec.InitProvider.Source.ScriptURISelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Source.ScriptURI") + } + mg.Spec.InitProvider.Source.ScriptURI = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Source.ScriptURIRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.NetworkInterfaceIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.NetworkInterfaceIdsRefs, + Selector: mg.Spec.ForProvider.NetworkInterfaceIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterfaceIds") + } + mg.Spec.ForProvider.NetworkInterfaceIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.NetworkInterfaceIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "NetworkInterface", "NetworkInterfaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.NetworkInterfaceIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.NetworkInterfaceIdsRefs, + Selector: mg.Spec.InitProvider.NetworkInterfaceIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterfaceIds") + } + mg.Spec.InitProvider.NetworkInterfaceIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.NetworkInterfaceIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this WindowsVirtualMachineScaleSet. +func (mg *WindowsVirtualMachineScaleSet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkInterface); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID") + } + mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkInterface); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID") + } + mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkInterface[i3].IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/compute/v1beta2/zz_groupversion_info.go b/apis/compute/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..996557060 --- /dev/null +++ b/apis/compute/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=compute.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "compute.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/compute/v1beta2/zz_image_terraformed.go b/apis/compute/v1beta2/zz_image_terraformed.go new file mode 100755 index 000000000..86b082157 --- /dev/null +++ b/apis/compute/v1beta2/zz_image_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Image +func (mg *Image) GetTerraformResourceType() string { + return "azurerm_image" +} + +// GetConnectionDetailsMapping for this Image +func (tr *Image) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Image +func (tr *Image) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Image +func (tr *Image) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Image +func (tr *Image) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Image +func (tr *Image) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Image +func (tr *Image) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Image +func (tr *Image) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Image +func (tr *Image) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Image using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Image) LateInitialize(attrs []byte) (bool, error) { + params := &ImageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Image) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_image_types.go b/apis/compute/v1beta2/zz_image_types.go new file mode 100755 index 000000000..9b2abfd4e --- /dev/null +++ b/apis/compute/v1beta2/zz_image_types.go @@ -0,0 +1,314 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataDiskInitParameters struct { + + // Specifies the URI in Azure storage of the blob that you want to use to create the image. + BlobURI *string `json:"blobUri,omitempty" tf:"blob_uri,omitempty"` + + // Specifies the caching mode as ReadWrite, ReadOnly, or None. Defaults to None. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // Specifies the logical unit number of the data disk. + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // Specifies the ID of the managed disk resource that you want to use to create the image. Changing this forces a new resource to be created. + ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` + + // Specifies the size of the image to be created. The target size can't be smaller than the source size. + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +type DataDiskObservation struct { + + // Specifies the URI in Azure storage of the blob that you want to use to create the image. + BlobURI *string `json:"blobUri,omitempty" tf:"blob_uri,omitempty"` + + // Specifies the caching mode as ReadWrite, ReadOnly, or None. Defaults to None. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // Specifies the logical unit number of the data disk. + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // Specifies the ID of the managed disk resource that you want to use to create the image. Changing this forces a new resource to be created. + ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` + + // Specifies the size of the image to be created. The target size can't be smaller than the source size. + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +type DataDiskParameters struct { + + // Specifies the URI in Azure storage of the blob that you want to use to create the image. + // +kubebuilder:validation:Optional + BlobURI *string `json:"blobUri,omitempty" tf:"blob_uri,omitempty"` + + // Specifies the caching mode as ReadWrite, ReadOnly, or None. Defaults to None. + // +kubebuilder:validation:Optional + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // Specifies the logical unit number of the data disk. + // +kubebuilder:validation:Optional + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // Specifies the ID of the managed disk resource that you want to use to create the image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` + + // Specifies the size of the image to be created. The target size can't be smaller than the source size. + // +kubebuilder:validation:Optional + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +type ImageInitParameters struct { + + // One or more data_disk blocks as defined below. + DataDisk []DataDiskInitParameters `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // The HyperVGenerationType of the VirtualMachine created from the image as V1, V2. Defaults to V1. Changing this forces a new resource to be created. + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more os_disk blocks as defined below. Changing this forces a new resource to be created. + OsDisk *OsDiskInitParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // The Virtual Machine ID from which to create the image. + SourceVirtualMachineID *string `json:"sourceVirtualMachineId,omitempty" tf:"source_virtual_machine_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Is zone resiliency enabled? Defaults to false. Changing this forces a new resource to be created. + ZoneResilient *bool `json:"zoneResilient,omitempty" tf:"zone_resilient,omitempty"` +} + +type ImageObservation struct { + + // One or more data_disk blocks as defined below. + DataDisk []DataDiskObservation `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // The HyperVGenerationType of the VirtualMachine created from the image as V1, V2. Defaults to V1. Changing this forces a new resource to be created. + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // The ID of the Image. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more os_disk blocks as defined below. Changing this forces a new resource to be created. + OsDisk *OsDiskObservation `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // The name of the resource group in which to create the image. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The Virtual Machine ID from which to create the image. + SourceVirtualMachineID *string `json:"sourceVirtualMachineId,omitempty" tf:"source_virtual_machine_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Is zone resiliency enabled? Defaults to false. Changing this forces a new resource to be created. + ZoneResilient *bool `json:"zoneResilient,omitempty" tf:"zone_resilient,omitempty"` +} + +type ImageParameters struct { + + // One or more data_disk blocks as defined below. + // +kubebuilder:validation:Optional + DataDisk []DataDiskParameters `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // The HyperVGenerationType of the VirtualMachine created from the image as V1, V2. Defaults to V1. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more os_disk blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + OsDisk *OsDiskParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // The name of the resource group in which to create the image. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The Virtual Machine ID from which to create the image. + // +kubebuilder:validation:Optional + SourceVirtualMachineID *string `json:"sourceVirtualMachineId,omitempty" tf:"source_virtual_machine_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Is zone resiliency enabled? Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ZoneResilient *bool `json:"zoneResilient,omitempty" tf:"zone_resilient,omitempty"` +} + +type OsDiskInitParameters struct { + + // Specifies the URI in Azure storage of the blob that you want to use to create the image. Changing this forces a new resource to be created. + BlobURI *string `json:"blobUri,omitempty" tf:"blob_uri,omitempty"` + + // Specifies the caching mode as ReadWrite, ReadOnly, or None. The default is None. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this image. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // Specifies the ID of the managed disk resource that you want to use to create the image. + ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` + + // Specifies the state of the operating system contained in the blob. Currently, the only value is Generalized. Possible values are Generalized and Specialized. + OsState *string `json:"osState,omitempty" tf:"os_state,omitempty"` + + // Specifies the type of operating system contained in the virtual machine image. Possible values are: Windows or Linux. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Specifies the size of the image to be created. Changing this forces a new resource to be created. + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +type OsDiskObservation struct { + + // Specifies the URI in Azure storage of the blob that you want to use to create the image. Changing this forces a new resource to be created. + BlobURI *string `json:"blobUri,omitempty" tf:"blob_uri,omitempty"` + + // Specifies the caching mode as ReadWrite, ReadOnly, or None. The default is None. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this image. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // Specifies the ID of the managed disk resource that you want to use to create the image. + ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` + + // Specifies the state of the operating system contained in the blob. Currently, the only value is Generalized. Possible values are Generalized and Specialized. + OsState *string `json:"osState,omitempty" tf:"os_state,omitempty"` + + // Specifies the type of operating system contained in the virtual machine image. Possible values are: Windows or Linux. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Specifies the size of the image to be created. Changing this forces a new resource to be created. + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +type OsDiskParameters struct { + + // Specifies the URI in Azure storage of the blob that you want to use to create the image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + BlobURI *string `json:"blobUri,omitempty" tf:"blob_uri,omitempty"` + + // Specifies the caching mode as ReadWrite, ReadOnly, or None. The default is None. + // +kubebuilder:validation:Optional + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // Specifies the ID of the managed disk resource that you want to use to create the image. + // +kubebuilder:validation:Optional + ManagedDiskID *string `json:"managedDiskId,omitempty" tf:"managed_disk_id,omitempty"` + + // Specifies the state of the operating system contained in the blob. Currently, the only value is Generalized. Possible values are Generalized and Specialized. + // +kubebuilder:validation:Optional + OsState *string `json:"osState,omitempty" tf:"os_state,omitempty"` + + // Specifies the type of operating system contained in the virtual machine image. Possible values are: Windows or Linux. + // +kubebuilder:validation:Optional + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Specifies the size of the image to be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +// ImageSpec defines the desired state of Image +type ImageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ImageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ImageInitParameters `json:"initProvider,omitempty"` +} + +// ImageStatus defines the observed state of Image. +type ImageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ImageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Image is the Schema for the Images API. Manages a custom virtual machine image that can be used to create virtual machines. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Image struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec ImageSpec `json:"spec"` + Status ImageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ImageList contains a list of Images +type ImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Image `json:"items"` +} + +// Repository type metadata. +var ( + Image_Kind = "Image" + Image_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Image_Kind}.String() + Image_KindAPIVersion = Image_Kind + "." + CRDGroupVersion.String() + Image_GroupVersionKind = CRDGroupVersion.WithKind(Image_Kind) +) + +func init() { + SchemeBuilder.Register(&Image{}, &ImageList{}) +} diff --git a/apis/compute/v1beta2/zz_linuxvirtualmachine_terraformed.go b/apis/compute/v1beta2/zz_linuxvirtualmachine_terraformed.go new file mode 100755 index 000000000..93cbc8668 --- /dev/null +++ b/apis/compute/v1beta2/zz_linuxvirtualmachine_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinuxVirtualMachine +func (mg *LinuxVirtualMachine) GetTerraformResourceType() string { + return "azurerm_linux_virtual_machine" +} + +// GetConnectionDetailsMapping for this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"admin_password": "spec.forProvider.adminPasswordSecretRef", "custom_data": "spec.forProvider.customDataSecretRef"} +} + +// GetObservation of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinuxVirtualMachine using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinuxVirtualMachine) LateInitialize(attrs []byte) (bool, error) { + params := &LinuxVirtualMachineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("PlatformFaultDomain")) + opts = append(opts, resource.WithNameFilter("VirtualMachineScaleSetID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinuxVirtualMachine) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_linuxvirtualmachine_types.go b/apis/compute/v1beta2/zz_linuxvirtualmachine_types.go new file mode 100755 index 000000000..98f9ef493 --- /dev/null +++ b/apis/compute/v1beta2/zz_linuxvirtualmachine_types.go @@ -0,0 +1,1088 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdditionalCapabilitiesInitParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine? Defaults to false. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type AdditionalCapabilitiesObservation struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine? Defaults to false. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type AdditionalCapabilitiesParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine? Defaults to false. + // +kubebuilder:validation:Optional + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type AdminSSHKeyInitParameters struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. Changing this forces a new resource to be created. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AdminSSHKeyObservation struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. Changing this forces a new resource to be created. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AdminSSHKeyParameters struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublicKey *string `json:"publicKey" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type BootDiagnosticsInitParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type BootDiagnosticsObservation struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type BootDiagnosticsParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + // +kubebuilder:validation:Optional + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type CertificateInitParameters struct { + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type CertificateObservation struct { + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type CertificateParameters struct { + + // The Secret URL of a Key Vault Certificate. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type DiffDiskSettingsInitParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type DiffDiskSettingsObservation struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type DiffDiskSettingsParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Option *string `json:"option" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type GalleryApplicationInitParameters struct { + + // Specifies whether the version will be automatically updated for the VM when a new Gallery Application version is available in PIR/SIG. Defaults to false. + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies whether any failure for any operation in the VmApplication will fail the deployment of the VM. Defaults to false. + TreatFailureAsDeploymentFailureEnabled *bool `json:"treatFailureAsDeploymentFailureEnabled,omitempty" tf:"treat_failure_as_deployment_failure_enabled,omitempty"` + + // Specifies the Gallery Application Version resource ID. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type GalleryApplicationObservation struct { + + // Specifies whether the version will be automatically updated for the VM when a new Gallery Application version is available in PIR/SIG. Defaults to false. + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies whether any failure for any operation in the VmApplication will fail the deployment of the VM. Defaults to false. + TreatFailureAsDeploymentFailureEnabled *bool `json:"treatFailureAsDeploymentFailureEnabled,omitempty" tf:"treat_failure_as_deployment_failure_enabled,omitempty"` + + // Specifies the Gallery Application Version resource ID. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type GalleryApplicationParameters struct { + + // Specifies whether the version will be automatically updated for the VM when a new Gallery Application version is available in PIR/SIG. Defaults to false. + // +kubebuilder:validation:Optional + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. + // +kubebuilder:validation:Optional + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies whether any failure for any operation in the VmApplication will fail the deployment of the VM. Defaults to false. + // +kubebuilder:validation:Optional + TreatFailureAsDeploymentFailureEnabled *bool `json:"treatFailureAsDeploymentFailureEnabled,omitempty" tf:"treat_failure_as_deployment_failure_enabled,omitempty"` + + // Specifies the Gallery Application Version resource ID. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId" tf:"version_id,omitempty"` +} + +type LinuxVirtualMachineIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Linux Virtual Machine. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Virtual Machine. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxVirtualMachineIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Linux Virtual Machine. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Virtual Machine. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxVirtualMachineIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Linux Virtual Machine. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Virtual Machine. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LinuxVirtualMachineInitParameters struct { + + // A additional_capabilities block as defined below. + AdditionalCapabilities *AdditionalCapabilitiesInitParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more admin_ssh_key blocks as defined below. Changing this forces a new resource to be created. + AdminSSHKey []AdminSSHKeyInitParameters `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator used for the Virtual Machine. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // Should Extension Operations be allowed on this Virtual Machine? Defaults to true. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty" tf:"allow_extension_operations,omitempty"` + + // Specifies the ID of the Availability Set in which the Virtual Machine should exist. Changing this forces a new resource to be created. + AvailabilitySetID *string `json:"availabilitySetId,omitempty" tf:"availability_set_id,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *BootDiagnosticsInitParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies whether to skip platform scheduled patching when a user schedule is associated with the VM. Defaults to false. + BypassPlatformSafetyChecksOnUserScheduleEnabled *bool `json:"bypassPlatformSafetyChecksOnUserScheduleEnabled,omitempty" tf:"bypass_platform_safety_checks_on_user_schedule_enabled,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine should be allocated to. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies the Hostname which should be used for this Virtual Machine. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name, then you must specify computer_name. Changing this forces a new resource to be created. + ComputerName *string `json:"computerName,omitempty" tf:"computer_name,omitempty"` + + // The ID of a Dedicated Host Group that this Linux Virtual Machine should be run within. Conflicts with dedicated_host_id. + DedicatedHostGroupID *string `json:"dedicatedHostGroupId,omitempty" tf:"dedicated_host_group_id,omitempty"` + + // The ID of a Dedicated Host where this machine should be run on. Conflicts with dedicated_host_group_id. + DedicatedHostID *string `json:"dedicatedHostId,omitempty" tf:"dedicated_host_id,omitempty"` + + // Should Password Authentication be disabled on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Specifies the Disk Controller Type used for this Virtual Machine. Possible values are SCSI and NVMe. + DiskControllerType *string `json:"diskControllerType,omitempty" tf:"disk_controller_type,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Linux Virtual Machine should exist. Changing this forces a new Linux Virtual Machine to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies what should happen when the Virtual Machine is evicted for price reasons when using a Spot instance. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + GalleryApplication []GalleryApplicationInitParameters `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + // An identity block as defined below. + Identity *LinuxVirtualMachineIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the BYOL Type for this Virtual Machine. Possible values are RHEL_BYOS and SLES_BYOS. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Linux Virtual Machine should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for this Virtual Machine, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machine will be evicted using the eviction_policy. Defaults to -1, which means that the Virtual Machine should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // . A list of Network Interface IDs which should be attached to this Virtual Machine. The first Network Interface ID in this list will be the Primary Network Interface on the Virtual Machine. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.NetworkInterface + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // References to NetworkInterface in network to populate networkInterfaceIds. + // +kubebuilder:validation:Optional + NetworkInterfaceIdsRefs []v1.Reference `json:"networkInterfaceIdsRefs,omitempty" tf:"-"` + + // Selector for a list of NetworkInterface in network to populate networkInterfaceIds. + // +kubebuilder:validation:Optional + NetworkInterfaceIdsSelector *v1.Selector `json:"networkInterfaceIdsSelector,omitempty" tf:"-"` + + // A os_disk block as defined below. + OsDisk *LinuxVirtualMachineOsDiskInitParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // A os_image_notification block as defined below. + OsImageNotification *OsImageNotificationInitParameters `json:"osImageNotification,omitempty" tf:"os_image_notification,omitempty"` + + // Specifies the mode of VM Guest Patching for the Virtual Machine. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching to this Linux Virtual Machine. Possible values are AutomaticByPlatform and ImageDefault. Defaults to ImageDefault. For more information on patch modes please see the product documentation. + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *PlanInitParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the Platform Fault Domain in which this Linux Virtual Machine should be created. Defaults to -1, which means this will be automatically assigned to a fault domain that best maintains balance across the available fault domains. Changing this forces a new Linux Virtual Machine to be created. + PlatformFaultDomain *float64 `json:"platformFaultDomain,omitempty" tf:"platform_fault_domain,omitempty"` + + // Specifies the priority of this Virtual Machine. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies the reboot setting for platform scheduled patching. Possible values are Always, IfRequired and Never. + RebootSetting *string `json:"rebootSetting,omitempty" tf:"reboot_setting,omitempty"` + + // One or more secret blocks as defined below. + Secret []SecretInitParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies whether secure boot should be enabled on the virtual machine. Changing this forces a new resource to be created. + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // The SKU which should be used for this Virtual Machine, such as Standard_F2. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The ID of the Image which this Virtual Machine should be created from. Changing this forces a new resource to be created. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. Changing this forces a new resource to be created. + SourceImageReference *SourceImageReferenceInitParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *TerminationNotificationInitParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether VMAgent Platform Updates is enabled. Defaults to false. + VMAgentPlatformUpdatesEnabled *bool `json:"vmAgentPlatformUpdatesEnabled,omitempty" tf:"vm_agent_platform_updates_enabled,omitempty"` + + // Specifies the Orchestrated Virtual Machine Scale Set that this Virtual Machine should be created within. + VirtualMachineScaleSetID *string `json:"virtualMachineScaleSetId,omitempty" tf:"virtual_machine_scale_set_id,omitempty"` + + // Specifies whether vTPM should be enabled on the virtual machine. Changing this forces a new resource to be created. + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // Specifies the Availability Zones in which this Linux Virtual Machine should be located. Changing this forces a new Linux Virtual Machine to be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type LinuxVirtualMachineObservation struct { + + // A additional_capabilities block as defined below. + AdditionalCapabilities *AdditionalCapabilitiesObservation `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more admin_ssh_key blocks as defined below. Changing this forces a new resource to be created. + AdminSSHKey []AdminSSHKeyObservation `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator used for the Virtual Machine. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // Should Extension Operations be allowed on this Virtual Machine? Defaults to true. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty" tf:"allow_extension_operations,omitempty"` + + // Specifies the ID of the Availability Set in which the Virtual Machine should exist. Changing this forces a new resource to be created. + AvailabilitySetID *string `json:"availabilitySetId,omitempty" tf:"availability_set_id,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *BootDiagnosticsObservation `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies whether to skip platform scheduled patching when a user schedule is associated with the VM. Defaults to false. + BypassPlatformSafetyChecksOnUserScheduleEnabled *bool `json:"bypassPlatformSafetyChecksOnUserScheduleEnabled,omitempty" tf:"bypass_platform_safety_checks_on_user_schedule_enabled,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine should be allocated to. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies the Hostname which should be used for this Virtual Machine. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name, then you must specify computer_name. Changing this forces a new resource to be created. + ComputerName *string `json:"computerName,omitempty" tf:"computer_name,omitempty"` + + // The ID of a Dedicated Host Group that this Linux Virtual Machine should be run within. Conflicts with dedicated_host_id. + DedicatedHostGroupID *string `json:"dedicatedHostGroupId,omitempty" tf:"dedicated_host_group_id,omitempty"` + + // The ID of a Dedicated Host where this machine should be run on. Conflicts with dedicated_host_group_id. + DedicatedHostID *string `json:"dedicatedHostId,omitempty" tf:"dedicated_host_id,omitempty"` + + // Should Password Authentication be disabled on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Specifies the Disk Controller Type used for this Virtual Machine. Possible values are SCSI and NVMe. + DiskControllerType *string `json:"diskControllerType,omitempty" tf:"disk_controller_type,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Linux Virtual Machine should exist. Changing this forces a new Linux Virtual Machine to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies what should happen when the Virtual Machine is evicted for price reasons when using a Spot instance. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + GalleryApplication []GalleryApplicationObservation `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + // The ID of the Linux Virtual Machine. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *LinuxVirtualMachineIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the BYOL Type for this Virtual Machine. Possible values are RHEL_BYOS and SLES_BYOS. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Linux Virtual Machine should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for this Virtual Machine, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machine will be evicted using the eviction_policy. Defaults to -1, which means that the Virtual Machine should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // . A list of Network Interface IDs which should be attached to this Virtual Machine. The first Network Interface ID in this list will be the Primary Network Interface on the Virtual Machine. + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // A os_disk block as defined below. + OsDisk *LinuxVirtualMachineOsDiskObservation `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // A os_image_notification block as defined below. + OsImageNotification *OsImageNotificationObservation `json:"osImageNotification,omitempty" tf:"os_image_notification,omitempty"` + + // Specifies the mode of VM Guest Patching for the Virtual Machine. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching to this Linux Virtual Machine. Possible values are AutomaticByPlatform and ImageDefault. Defaults to ImageDefault. For more information on patch modes please see the product documentation. + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *PlanObservation `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the Platform Fault Domain in which this Linux Virtual Machine should be created. Defaults to -1, which means this will be automatically assigned to a fault domain that best maintains balance across the available fault domains. Changing this forces a new Linux Virtual Machine to be created. + PlatformFaultDomain *float64 `json:"platformFaultDomain,omitempty" tf:"platform_fault_domain,omitempty"` + + // Specifies the priority of this Virtual Machine. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Primary Private IP Address assigned to this Virtual Machine. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // A list of Private IP Addresses assigned to this Virtual Machine. + PrivateIPAddresses []*string `json:"privateIpAddresses,omitempty" tf:"private_ip_addresses,omitempty"` + + // Should the Azure VM Agent be provisioned on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // The Primary Public IP Address assigned to this Virtual Machine. + PublicIPAddress *string `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // A list of the Public IP Addresses assigned to this Virtual Machine. + PublicIPAddresses []*string `json:"publicIpAddresses,omitempty" tf:"public_ip_addresses,omitempty"` + + // Specifies the reboot setting for platform scheduled patching. Possible values are Always, IfRequired and Never. + RebootSetting *string `json:"rebootSetting,omitempty" tf:"reboot_setting,omitempty"` + + // The name of the Resource Group in which the Linux Virtual Machine should be exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // One or more secret blocks as defined below. + Secret []SecretObservation `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies whether secure boot should be enabled on the virtual machine. Changing this forces a new resource to be created. + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // The SKU which should be used for this Virtual Machine, such as Standard_F2. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The ID of the Image which this Virtual Machine should be created from. Changing this forces a new resource to be created. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. Changing this forces a new resource to be created. + SourceImageReference *SourceImageReferenceObservation `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *TerminationNotificationObservation `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether VMAgent Platform Updates is enabled. Defaults to false. + VMAgentPlatformUpdatesEnabled *bool `json:"vmAgentPlatformUpdatesEnabled,omitempty" tf:"vm_agent_platform_updates_enabled,omitempty"` + + // A 128-bit identifier which uniquely identifies this Virtual Machine. + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` + + // Specifies the Orchestrated Virtual Machine Scale Set that this Virtual Machine should be created within. + VirtualMachineScaleSetID *string `json:"virtualMachineScaleSetId,omitempty" tf:"virtual_machine_scale_set_id,omitempty"` + + // Specifies whether vTPM should be enabled on the virtual machine. Changing this forces a new resource to be created. + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // Specifies the Availability Zones in which this Linux Virtual Machine should be located. Changing this forces a new Linux Virtual Machine to be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type LinuxVirtualMachineOsDiskInitParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *DiffDiskSettingsInitParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The name which should be used for the Internal OS Disk. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine is a Confidential VM. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values are Standard_LRS, StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineOsDiskObservation struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *DiffDiskSettingsObservation `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The name which should be used for the Internal OS Disk. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine is a Confidential VM. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values are Standard_LRS, StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineOsDiskParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + // +kubebuilder:validation:Optional + Caching *string `json:"caching" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiffDiskSettings *DiffDiskSettingsParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine is sourced from. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The name which should be used for the Internal OS Disk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine is a Confidential VM. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values are Standard_LRS, StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + // +kubebuilder:validation:Optional + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineParameters struct { + + // A additional_capabilities block as defined below. + // +kubebuilder:validation:Optional + AdditionalCapabilities *AdditionalCapabilitiesParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminPasswordSecretRef *v1.SecretKeySelector `json:"adminPasswordSecretRef,omitempty" tf:"-"` + + // One or more admin_ssh_key blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminSSHKey []AdminSSHKeyParameters `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator used for the Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // Should Extension Operations be allowed on this Virtual Machine? Defaults to true. + // +kubebuilder:validation:Optional + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty" tf:"allow_extension_operations,omitempty"` + + // Specifies the ID of the Availability Set in which the Virtual Machine should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AvailabilitySetID *string `json:"availabilitySetId,omitempty" tf:"availability_set_id,omitempty"` + + // A boot_diagnostics block as defined below. + // +kubebuilder:validation:Optional + BootDiagnostics *BootDiagnosticsParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies whether to skip platform scheduled patching when a user schedule is associated with the VM. Defaults to false. + // +kubebuilder:validation:Optional + BypassPlatformSafetyChecksOnUserScheduleEnabled *bool `json:"bypassPlatformSafetyChecksOnUserScheduleEnabled,omitempty" tf:"bypass_platform_safety_checks_on_user_schedule_enabled,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine should be allocated to. + // +kubebuilder:validation:Optional + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies the Hostname which should be used for this Virtual Machine. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name, then you must specify computer_name. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ComputerName *string `json:"computerName,omitempty" tf:"computer_name,omitempty"` + + // The Base64-Encoded Custom Data which should be used for this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CustomDataSecretRef *v1.SecretKeySelector `json:"customDataSecretRef,omitempty" tf:"-"` + + // The ID of a Dedicated Host Group that this Linux Virtual Machine should be run within. Conflicts with dedicated_host_id. + // +kubebuilder:validation:Optional + DedicatedHostGroupID *string `json:"dedicatedHostGroupId,omitempty" tf:"dedicated_host_group_id,omitempty"` + + // The ID of a Dedicated Host where this machine should be run on. Conflicts with dedicated_host_group_id. + // +kubebuilder:validation:Optional + DedicatedHostID *string `json:"dedicatedHostId,omitempty" tf:"dedicated_host_id,omitempty"` + + // Should Password Authentication be disabled on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Specifies the Disk Controller Type used for this Virtual Machine. Possible values are SCSI and NVMe. + // +kubebuilder:validation:Optional + DiskControllerType *string `json:"diskControllerType,omitempty" tf:"disk_controller_type,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Linux Virtual Machine should exist. Changing this forces a new Linux Virtual Machine to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies what should happen when the Virtual Machine is evicted for price reasons when using a Spot instance. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + // +kubebuilder:validation:Optional + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + // +kubebuilder:validation:Optional + GalleryApplication []GalleryApplicationParameters `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *LinuxVirtualMachineIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the BYOL Type for this Virtual Machine. Possible values are RHEL_BYOS and SLES_BYOS. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Linux Virtual Machine should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for this Virtual Machine, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machine will be evicted using the eviction_policy. Defaults to -1, which means that the Virtual Machine should not be evicted for price reasons. + // +kubebuilder:validation:Optional + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // . A list of Network Interface IDs which should be attached to this Virtual Machine. The first Network Interface ID in this list will be the Primary Network Interface on the Virtual Machine. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.NetworkInterface + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // References to NetworkInterface in network to populate networkInterfaceIds. + // +kubebuilder:validation:Optional + NetworkInterfaceIdsRefs []v1.Reference `json:"networkInterfaceIdsRefs,omitempty" tf:"-"` + + // Selector for a list of NetworkInterface in network to populate networkInterfaceIds. + // +kubebuilder:validation:Optional + NetworkInterfaceIdsSelector *v1.Selector `json:"networkInterfaceIdsSelector,omitempty" tf:"-"` + + // A os_disk block as defined below. + // +kubebuilder:validation:Optional + OsDisk *LinuxVirtualMachineOsDiskParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // A os_image_notification block as defined below. + // +kubebuilder:validation:Optional + OsImageNotification *OsImageNotificationParameters `json:"osImageNotification,omitempty" tf:"os_image_notification,omitempty"` + + // Specifies the mode of VM Guest Patching for the Virtual Machine. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + // +kubebuilder:validation:Optional + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching to this Linux Virtual Machine. Possible values are AutomaticByPlatform and ImageDefault. Defaults to ImageDefault. For more information on patch modes please see the product documentation. + // +kubebuilder:validation:Optional + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Plan *PlanParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the Platform Fault Domain in which this Linux Virtual Machine should be created. Defaults to -1, which means this will be automatically assigned to a fault domain that best maintains balance across the available fault domains. Changing this forces a new Linux Virtual Machine to be created. + // +kubebuilder:validation:Optional + PlatformFaultDomain *float64 `json:"platformFaultDomain,omitempty" tf:"platform_fault_domain,omitempty"` + + // Specifies the priority of this Virtual Machine. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. + // +kubebuilder:validation:Optional + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies the reboot setting for platform scheduled patching. Possible values are Always, IfRequired and Never. + // +kubebuilder:validation:Optional + RebootSetting *string `json:"rebootSetting,omitempty" tf:"reboot_setting,omitempty"` + + // The name of the Resource Group in which the Linux Virtual Machine should be exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // One or more secret blocks as defined below. + // +kubebuilder:validation:Optional + Secret []SecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies whether secure boot should be enabled on the virtual machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // The SKU which should be used for this Virtual Machine, such as Standard_F2. + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The ID of the Image which this Virtual Machine should be created from. Changing this forces a new resource to be created. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + // +kubebuilder:validation:Optional + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceImageReference *SourceImageReferenceParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + // +kubebuilder:validation:Optional + TerminationNotification *TerminationNotificationParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine. + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether VMAgent Platform Updates is enabled. Defaults to false. + // +kubebuilder:validation:Optional + VMAgentPlatformUpdatesEnabled *bool `json:"vmAgentPlatformUpdatesEnabled,omitempty" tf:"vm_agent_platform_updates_enabled,omitempty"` + + // Specifies the Orchestrated Virtual Machine Scale Set that this Virtual Machine should be created within. + // +kubebuilder:validation:Optional + VirtualMachineScaleSetID *string `json:"virtualMachineScaleSetId,omitempty" tf:"virtual_machine_scale_set_id,omitempty"` + + // Specifies whether vTPM should be enabled on the virtual machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // Specifies the Availability Zones in which this Linux Virtual Machine should be located. Changing this forces a new Linux Virtual Machine to be created. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type OsImageNotificationInitParameters struct { + + // Length of time a notification to be sent to the VM on the instance metadata server till the VM gets OS upgraded. The only possible value is PT15M. Defaults to PT15M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type OsImageNotificationObservation struct { + + // Length of time a notification to be sent to the VM on the instance metadata server till the VM gets OS upgraded. The only possible value is PT15M. Defaults to PT15M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type OsImageNotificationParameters struct { + + // Length of time a notification to be sent to the VM on the instance metadata server till the VM gets OS upgraded. The only possible value is PT15M. Defaults to PT15M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type PlanInitParameters struct { + + // Specifies the Name of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the Product of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the Publisher of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type PlanObservation struct { + + // Specifies the Name of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the Product of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the Publisher of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type PlanParameters struct { + + // Specifies the Name of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the Product of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Product *string `json:"product" tf:"product,omitempty"` + + // Specifies the Publisher of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` +} + +type SecretInitParameters struct { + + // One or more certificate blocks as defined above. + Certificate []CertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type SecretObservation struct { + + // One or more certificate blocks as defined above. + Certificate []CertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type SecretParameters struct { + + // One or more certificate blocks as defined above. + // +kubebuilder:validation:Optional + Certificate []CertificateParameters `json:"certificate" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId" tf:"key_vault_id,omitempty"` +} + +type SourceImageReferenceInitParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SourceImageReferenceObservation struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type SourceImageReferenceParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Sku *string `json:"sku" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type TerminationNotificationInitParameters struct { + + // Should the termination notification be enabled on this Virtual Machine? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TerminationNotificationObservation struct { + + // Should the termination notification be enabled on this Virtual Machine? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TerminationNotificationParameters struct { + + // Should the termination notification be enabled on this Virtual Machine? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +// LinuxVirtualMachineSpec defines the desired state of LinuxVirtualMachine +type LinuxVirtualMachineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinuxVirtualMachineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinuxVirtualMachineInitParameters `json:"initProvider,omitempty"` +} + +// LinuxVirtualMachineStatus defines the observed state of LinuxVirtualMachine. +type LinuxVirtualMachineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinuxVirtualMachineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinuxVirtualMachine is the Schema for the LinuxVirtualMachines API. Manages a Linux Virtual Machine. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinuxVirtualMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.adminUsername) || (has(self.initProvider) && has(self.initProvider.adminUsername))",message="spec.forProvider.adminUsername is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.osDisk) || (has(self.initProvider) && has(self.initProvider.osDisk))",message="spec.forProvider.osDisk is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.size) || (has(self.initProvider) && has(self.initProvider.size))",message="spec.forProvider.size is a required parameter" + Spec LinuxVirtualMachineSpec `json:"spec"` + Status LinuxVirtualMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinuxVirtualMachineList contains a list of LinuxVirtualMachines +type LinuxVirtualMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinuxVirtualMachine `json:"items"` +} + +// Repository type metadata. +var ( + LinuxVirtualMachine_Kind = "LinuxVirtualMachine" + LinuxVirtualMachine_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinuxVirtualMachine_Kind}.String() + LinuxVirtualMachine_KindAPIVersion = LinuxVirtualMachine_Kind + "." + CRDGroupVersion.String() + LinuxVirtualMachine_GroupVersionKind = CRDGroupVersion.WithKind(LinuxVirtualMachine_Kind) +) + +func init() { + SchemeBuilder.Register(&LinuxVirtualMachine{}, &LinuxVirtualMachineList{}) +} diff --git a/apis/compute/v1beta2/zz_linuxvirtualmachinescaleset_terraformed.go b/apis/compute/v1beta2/zz_linuxvirtualmachinescaleset_terraformed.go new file mode 100755 index 000000000..d9c9c7304 --- /dev/null +++ b/apis/compute/v1beta2/zz_linuxvirtualmachinescaleset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinuxVirtualMachineScaleSet +func (mg *LinuxVirtualMachineScaleSet) GetTerraformResourceType() string { + return "azurerm_linux_virtual_machine_scale_set" +} + +// GetConnectionDetailsMapping for this LinuxVirtualMachineScaleSet +func (tr *LinuxVirtualMachineScaleSet) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"admin_password": "spec.forProvider.adminPasswordSecretRef", "custom_data": "spec.forProvider.customDataSecretRef", "extension[*].protected_settings": "spec.forProvider.extension[*].protectedSettingsSecretRef"} +} + +// GetObservation of this LinuxVirtualMachineScaleSet +func (tr *LinuxVirtualMachineScaleSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinuxVirtualMachineScaleSet +func (tr *LinuxVirtualMachineScaleSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinuxVirtualMachineScaleSet +func (tr *LinuxVirtualMachineScaleSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinuxVirtualMachineScaleSet +func (tr *LinuxVirtualMachineScaleSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinuxVirtualMachineScaleSet +func (tr *LinuxVirtualMachineScaleSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinuxVirtualMachineScaleSet +func (tr *LinuxVirtualMachineScaleSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinuxVirtualMachineScaleSet +func (tr *LinuxVirtualMachineScaleSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinuxVirtualMachineScaleSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinuxVirtualMachineScaleSet) LateInitialize(attrs []byte) (bool, error) { + params := &LinuxVirtualMachineScaleSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinuxVirtualMachineScaleSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_linuxvirtualmachinescaleset_types.go b/apis/compute/v1beta2/zz_linuxvirtualmachinescaleset_types.go new file mode 100755 index 000000000..68b6e3685 --- /dev/null +++ b/apis/compute/v1beta2/zz_linuxvirtualmachinescaleset_types.go @@ -0,0 +1,1877 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutomaticInstanceRepairInitParameters struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amount of time (in minutes, between 30 and 90) for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. The time duration should be specified in ISO 8601 format. Defaults to PT30M. + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type AutomaticInstanceRepairObservation struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amount of time (in minutes, between 30 and 90) for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. The time duration should be specified in ISO 8601 format. Defaults to PT30M. + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type AutomaticInstanceRepairParameters struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Amount of time (in minutes, between 30 and 90) for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. The time duration should be specified in ISO 8601 format. Defaults to PT30M. + // +kubebuilder:validation:Optional + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type AutomaticOsUpgradePolicyInitParameters struct { + + // Should automatic rollbacks be disabled? + DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty" tf:"disable_automatic_rollback,omitempty"` + + // Should OS Upgrades automatically be applied to Scale Set instances in a rolling fashion when a newer version of the OS Image becomes available? + EnableAutomaticOsUpgrade *bool `json:"enableAutomaticOsUpgrade,omitempty" tf:"enable_automatic_os_upgrade,omitempty"` +} + +type AutomaticOsUpgradePolicyObservation struct { + + // Should automatic rollbacks be disabled? + DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty" tf:"disable_automatic_rollback,omitempty"` + + // Should OS Upgrades automatically be applied to Scale Set instances in a rolling fashion when a newer version of the OS Image becomes available? + EnableAutomaticOsUpgrade *bool `json:"enableAutomaticOsUpgrade,omitempty" tf:"enable_automatic_os_upgrade,omitempty"` +} + +type AutomaticOsUpgradePolicyParameters struct { + + // Should automatic rollbacks be disabled? + // +kubebuilder:validation:Optional + DisableAutomaticRollback *bool `json:"disableAutomaticRollback" tf:"disable_automatic_rollback,omitempty"` + + // Should OS Upgrades automatically be applied to Scale Set instances in a rolling fashion when a newer version of the OS Image becomes available? + // +kubebuilder:validation:Optional + EnableAutomaticOsUpgrade *bool `json:"enableAutomaticOsUpgrade" tf:"enable_automatic_os_upgrade,omitempty"` +} + +type ExtensionInitParameters struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + ProtectedSettingsFromKeyVault *ProtectedSettingsFromKeyVaultInitParameters `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // An ordered list of Extension names which this should be provisioned after. + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // Specifies the Publisher of the Extension. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` +} + +type ExtensionObservation struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + ProtectedSettingsFromKeyVault *ProtectedSettingsFromKeyVaultObservation `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // An ordered list of Extension names which this should be provisioned after. + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // Specifies the Publisher of the Extension. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` +} + +type ExtensionParameters struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + // +kubebuilder:validation:Optional + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + // +kubebuilder:validation:Optional + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + // +kubebuilder:validation:Optional + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + // +kubebuilder:validation:Optional + ProtectedSettingsFromKeyVault *ProtectedSettingsFromKeyVaultParameters `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // A JSON String which specifies Sensitive Settings (such as Passwords) for the Extension. + // +kubebuilder:validation:Optional + ProtectedSettingsSecretRef *v1.SecretKeySelector `json:"protectedSettingsSecretRef,omitempty" tf:"-"` + + // An ordered list of Extension names which this should be provisioned after. + // +kubebuilder:validation:Optional + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // Specifies the Publisher of the Extension. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + // +kubebuilder:validation:Optional + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + // +kubebuilder:validation:Optional + TypeHandlerVersion *string `json:"typeHandlerVersion" tf:"type_handler_version,omitempty"` +} + +type GalleryApplicationsInitParameters struct { + ConfigurationReferenceBlobURI *string `json:"configurationReferenceBlobUri,omitempty" tf:"configuration_reference_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // The ID of the Linux Virtual Machine Scale Set. + PackageReferenceID *string `json:"packageReferenceId,omitempty" tf:"package_reference_id,omitempty"` + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type GalleryApplicationsObservation struct { + ConfigurationReferenceBlobURI *string `json:"configurationReferenceBlobUri,omitempty" tf:"configuration_reference_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // The ID of the Linux Virtual Machine Scale Set. + PackageReferenceID *string `json:"packageReferenceId,omitempty" tf:"package_reference_id,omitempty"` + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type GalleryApplicationsParameters struct { + + // +kubebuilder:validation:Optional + ConfigurationReferenceBlobURI *string `json:"configurationReferenceBlobUri,omitempty" tf:"configuration_reference_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // The ID of the Linux Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + PackageReferenceID *string `json:"packageReferenceId" tf:"package_reference_id,omitempty"` + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type IPConfigurationInitParameters struct { + + // A list of Backend Address Pools ID's from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group ID's which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // A list of NAT Rule ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerInboundNATRulesIds []*string `json:"loadBalancerInboundNatRulesIds,omitempty" tf:"load_balancer_inbound_nat_rules_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + PublicIPAddress []PublicIPAddressInitParameters `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type IPConfigurationObservation struct { + + // A list of Backend Address Pools ID's from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group ID's which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // A list of NAT Rule ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerInboundNATRulesIds []*string `json:"loadBalancerInboundNatRulesIds,omitempty" tf:"load_balancer_inbound_nat_rules_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + PublicIPAddress []PublicIPAddressObservation `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type IPConfigurationParameters struct { + + // A list of Backend Address Pools ID's from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group ID's which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // A list of NAT Rule ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + LoadBalancerInboundNATRulesIds []*string `json:"loadBalancerInboundNatRulesIds,omitempty" tf:"load_balancer_inbound_nat_rules_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + // +kubebuilder:validation:Optional + PublicIPAddress []PublicIPAddressParameters `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type IPTagInitParameters struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IPTagObservation struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IPTagParameters struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tag *string `json:"tag" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type LinuxVirtualMachineScaleSetAdminSSHKeyObservation struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type LinuxVirtualMachineScaleSetAdminSSHKeyParameters struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. + // +kubebuilder:validation:Optional + PublicKey *string `json:"publicKey" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type LinuxVirtualMachineScaleSetBootDiagnosticsObservation struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type LinuxVirtualMachineScaleSetBootDiagnosticsParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + // +kubebuilder:validation:Optional + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type LinuxVirtualMachineScaleSetDataDiskInitParameters struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // The name of the Data Disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetDataDiskObservation struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // The name of the Data Disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetDataDiskParameters struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + // +kubebuilder:validation:Optional + Caching *string `json:"caching" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + // +kubebuilder:validation:Optional + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. + // +kubebuilder:validation:Optional + Lun *float64 `json:"lun" tf:"lun,omitempty"` + + // The name of the Data Disk. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + // +kubebuilder:validation:Optional + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + // +kubebuilder:validation:Optional + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + // +kubebuilder:validation:Optional + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetGalleryApplicationInitParameters struct { + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. Changing this forces a new resource to be created. + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies the Gallery Application Version resource ID. Changing this forces a new resource to be created. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type LinuxVirtualMachineScaleSetGalleryApplicationObservation struct { + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. Changing this forces a new resource to be created. + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies the Gallery Application Version resource ID. Changing this forces a new resource to be created. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type LinuxVirtualMachineScaleSetGalleryApplicationParameters struct { + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies the Gallery Application Version resource ID. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId" tf:"version_id,omitempty"` +} + +type LinuxVirtualMachineScaleSetIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Linux Virtual Machine Scale Set. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Virtual Machine Scale Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxVirtualMachineScaleSetIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Linux Virtual Machine Scale Set. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Virtual Machine Scale Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxVirtualMachineScaleSetIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Linux Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Virtual Machine Scale Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LinuxVirtualMachineScaleSetInitParameters struct { + + // An additional_capabilities block as defined below. + AdditionalCapabilities *LinuxVirtualMachineScaleSetAdditionalCapabilitiesInitParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more admin_ssh_key blocks as defined below. + AdminSSHKey []LinuxVirtualMachineScaleSetAdminSSHKeyInitParameters `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // An automatic_instance_repair block as defined below. To enable the automatic instance repair, this Virtual Machine Scale Set must have a valid health_probe_id or an Application Health Extension. + AutomaticInstanceRepair *AutomaticInstanceRepairInitParameters `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // An automatic_os_upgrade_policy block as defined below. This can only be specified when upgrade_mode is set to either Automatic or Rolling. + AutomaticOsUpgradePolicy *AutomaticOsUpgradePolicyInitParameters `json:"automaticOsUpgradePolicy,omitempty" tf:"automatic_os_upgrade_policy,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *LinuxVirtualMachineScaleSetBootDiagnosticsInitParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // One or more data_disk blocks as defined below. + DataDisk []LinuxVirtualMachineScaleSetDataDiskInitParameters `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should Password Authentication be disabled on this Virtual Machine Scale Set? Defaults to true. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Should Virtual Machine Extensions be run on Overprovisioned Virtual Machines in the Scale Set? Defaults to false. + DoNotRunExtensionsOnOverprovisionedMachines *bool `json:"doNotRunExtensionsOnOverprovisionedMachines,omitempty" tf:"do_not_run_extensions_on_overprovisioned_machines,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Linux Virtual Machine Scale Set should exist. Changing this forces a new Linux Virtual Machine Scale Set to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + Extension []ExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Linux Virtual Machine Scale Set to be created. + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + GalleryApplication []LinuxVirtualMachineScaleSetGalleryApplicationInitParameters `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + GalleryApplications []GalleryApplicationsInitParameters `json:"galleryApplications,omitempty" tf:"gallery_applications,omitempty"` + + // The ID of a Load Balancer Probe which should be used to determine the health of an instance. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. + HealthProbeID *string `json:"healthProbeId,omitempty" tf:"health_probe_id,omitempty"` + + // Specifies the ID of the dedicated host group that the virtual machine scale set resides in. Changing this forces a new resource to be created. + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // An identity block as defined below. + Identity *LinuxVirtualMachineScaleSetIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Scale Set. Defaults to 0. + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // The Azure location where the Linux Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in this Scale Set should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + NetworkInterface []NetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + OsDisk *LinuxVirtualMachineScaleSetOsDiskInitParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // Should Azure over-provision Virtual Machines in this Scale Set? This means that multiple Virtual Machines will be provisioned and Azure will keep the instances which become available first - which improves provisioning success rates and improves deployment time. You're not billed for these over-provisioned VM's and they don't count towards the Subscription Quota. Defaults to true. + Overprovision *bool `json:"overprovision,omitempty" tf:"overprovision,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *LinuxVirtualMachineScaleSetPlanInitParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Linux Virtual Machine Scale Set. Changing this forces a new resource to be created. + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group in which the Virtual Machine Scale Set should be assigned to. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // A rolling_upgrade_policy block as defined below. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. Changing this forces a new resource to be created. + RollingUpgradePolicy *RollingUpgradePolicyInitParameters `json:"rollingUpgradePolicy,omitempty" tf:"rolling_upgrade_policy,omitempty"` + + // A scale_in block as defined below. + ScaleIn *ScaleInInitParameters `json:"scaleIn,omitempty" tf:"scale_in,omitempty"` + + // One or more secret blocks as defined below. + Secret []LinuxVirtualMachineScaleSetSecretInitParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies whether secure boot should be enabled on the virtual machine. Changing this forces a new resource to be created. + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Defaults to true. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The Virtual Machine SKU for the Scale Set, such as Standard_F2. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image ID, Shared Image ID, Shared Image Version ID, Community Gallery Image ID, Community Gallery Image Version ID, Shared Gallery Image ID and Shared Gallery Image Version ID. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + SourceImageReference *LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A spot_restore block as defined below. + SpotRestore *SpotRestoreInitParameters `json:"spotRestore,omitempty" tf:"spot_restore,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A terminate_notification block as defined below. + TerminateNotification *TerminateNotificationInitParameters `json:"terminateNotification,omitempty" tf:"terminate_notification,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *LinuxVirtualMachineScaleSetTerminationNotificationInitParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Specifies how Upgrades (e.g. changing the Image/SKU) should be performed to Virtual Machine Instances. Possible values are Automatic, Manual and Rolling. Defaults to Manual. Changing this forces a new resource to be created. + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine Scale Set. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether vTPM should be enabled on the virtual machine. Changing this forces a new resource to be created. + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones in which this Linux Virtual Machine Scale Set should be located. Changing this forces a new Linux Virtual Machine Scale Set to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type LinuxVirtualMachineScaleSetObservation struct { + + // An additional_capabilities block as defined below. + AdditionalCapabilities *LinuxVirtualMachineScaleSetAdditionalCapabilitiesObservation `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more admin_ssh_key blocks as defined below. + AdminSSHKey []LinuxVirtualMachineScaleSetAdminSSHKeyObservation `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // An automatic_instance_repair block as defined below. To enable the automatic instance repair, this Virtual Machine Scale Set must have a valid health_probe_id or an Application Health Extension. + AutomaticInstanceRepair *AutomaticInstanceRepairObservation `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // An automatic_os_upgrade_policy block as defined below. This can only be specified when upgrade_mode is set to either Automatic or Rolling. + AutomaticOsUpgradePolicy *AutomaticOsUpgradePolicyObservation `json:"automaticOsUpgradePolicy,omitempty" tf:"automatic_os_upgrade_policy,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *LinuxVirtualMachineScaleSetBootDiagnosticsObservation `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // One or more data_disk blocks as defined below. + DataDisk []LinuxVirtualMachineScaleSetDataDiskObservation `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should Password Authentication be disabled on this Virtual Machine Scale Set? Defaults to true. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Should Virtual Machine Extensions be run on Overprovisioned Virtual Machines in the Scale Set? Defaults to false. + DoNotRunExtensionsOnOverprovisionedMachines *bool `json:"doNotRunExtensionsOnOverprovisionedMachines,omitempty" tf:"do_not_run_extensions_on_overprovisioned_machines,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Linux Virtual Machine Scale Set should exist. Changing this forces a new Linux Virtual Machine Scale Set to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + Extension []ExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Linux Virtual Machine Scale Set to be created. + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + GalleryApplication []LinuxVirtualMachineScaleSetGalleryApplicationObservation `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + GalleryApplications []GalleryApplicationsObservation `json:"galleryApplications,omitempty" tf:"gallery_applications,omitempty"` + + // The ID of a Load Balancer Probe which should be used to determine the health of an instance. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. + HealthProbeID *string `json:"healthProbeId,omitempty" tf:"health_probe_id,omitempty"` + + // Specifies the ID of the dedicated host group that the virtual machine scale set resides in. Changing this forces a new resource to be created. + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // The ID of the Linux Virtual Machine Scale Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *LinuxVirtualMachineScaleSetIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Scale Set. Defaults to 0. + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // The Azure location where the Linux Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in this Scale Set should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + NetworkInterface []NetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + OsDisk *LinuxVirtualMachineScaleSetOsDiskObservation `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // Should Azure over-provision Virtual Machines in this Scale Set? This means that multiple Virtual Machines will be provisioned and Azure will keep the instances which become available first - which improves provisioning success rates and improves deployment time. You're not billed for these over-provisioned VM's and they don't count towards the Subscription Quota. Defaults to true. + Overprovision *bool `json:"overprovision,omitempty" tf:"overprovision,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *LinuxVirtualMachineScaleSetPlanObservation `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Linux Virtual Machine Scale Set. Changing this forces a new resource to be created. + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group in which the Virtual Machine Scale Set should be assigned to. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // The name of the Resource Group in which the Linux Virtual Machine Scale Set should be exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A rolling_upgrade_policy block as defined below. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. Changing this forces a new resource to be created. + RollingUpgradePolicy *RollingUpgradePolicyObservation `json:"rollingUpgradePolicy,omitempty" tf:"rolling_upgrade_policy,omitempty"` + + // A scale_in block as defined below. + ScaleIn *ScaleInObservation `json:"scaleIn,omitempty" tf:"scale_in,omitempty"` + + ScaleInPolicy *string `json:"scaleInPolicy,omitempty" tf:"scale_in_policy,omitempty"` + + // One or more secret blocks as defined below. + Secret []LinuxVirtualMachineScaleSetSecretObservation `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies whether secure boot should be enabled on the virtual machine. Changing this forces a new resource to be created. + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Defaults to true. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The Virtual Machine SKU for the Scale Set, such as Standard_F2. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image ID, Shared Image ID, Shared Image Version ID, Community Gallery Image ID, Community Gallery Image Version ID, Shared Gallery Image ID and Shared Gallery Image Version ID. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + SourceImageReference *LinuxVirtualMachineScaleSetSourceImageReferenceObservation `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A spot_restore block as defined below. + SpotRestore *SpotRestoreObservation `json:"spotRestore,omitempty" tf:"spot_restore,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A terminate_notification block as defined below. + TerminateNotification *TerminateNotificationObservation `json:"terminateNotification,omitempty" tf:"terminate_notification,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *LinuxVirtualMachineScaleSetTerminationNotificationObservation `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // The Unique ID for this Linux Virtual Machine Scale Set. + UniqueID *string `json:"uniqueId,omitempty" tf:"unique_id,omitempty"` + + // Specifies how Upgrades (e.g. changing the Image/SKU) should be performed to Virtual Machine Instances. Possible values are Automatic, Manual and Rolling. Defaults to Manual. Changing this forces a new resource to be created. + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine Scale Set. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether vTPM should be enabled on the virtual machine. Changing this forces a new resource to be created. + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones in which this Linux Virtual Machine Scale Set should be located. Changing this forces a new Linux Virtual Machine Scale Set to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type LinuxVirtualMachineScaleSetOsDiskInitParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *OsDiskDiffDiskSettingsInitParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt the OS Disk when the Virtual Machine Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine Scale Set is Confidential VMSS. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetOsDiskObservation struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *OsDiskDiffDiskSettingsObservation `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt the OS Disk when the Virtual Machine Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine Scale Set is Confidential VMSS. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetOsDiskParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + // +kubebuilder:validation:Optional + Caching *string `json:"caching" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiffDiskSettings *OsDiskDiffDiskSettingsParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt the OS Disk when the Virtual Machine Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine Scale Set is Confidential VMSS. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + // +kubebuilder:validation:Optional + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type LinuxVirtualMachineScaleSetParameters struct { + + // An additional_capabilities block as defined below. + // +kubebuilder:validation:Optional + AdditionalCapabilities *LinuxVirtualMachineScaleSetAdditionalCapabilitiesParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminPasswordSecretRef *v1.SecretKeySelector `json:"adminPasswordSecretRef,omitempty" tf:"-"` + + // One or more admin_ssh_key blocks as defined below. + // +kubebuilder:validation:Optional + AdminSSHKey []LinuxVirtualMachineScaleSetAdminSSHKeyParameters `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // An automatic_instance_repair block as defined below. To enable the automatic instance repair, this Virtual Machine Scale Set must have a valid health_probe_id or an Application Health Extension. + // +kubebuilder:validation:Optional + AutomaticInstanceRepair *AutomaticInstanceRepairParameters `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // An automatic_os_upgrade_policy block as defined below. This can only be specified when upgrade_mode is set to either Automatic or Rolling. + // +kubebuilder:validation:Optional + AutomaticOsUpgradePolicy *AutomaticOsUpgradePolicyParameters `json:"automaticOsUpgradePolicy,omitempty" tf:"automatic_os_upgrade_policy,omitempty"` + + // A boot_diagnostics block as defined below. + // +kubebuilder:validation:Optional + BootDiagnostics *LinuxVirtualMachineScaleSetBootDiagnosticsParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // The Base64-Encoded Custom Data which should be used for this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + CustomDataSecretRef *v1.SecretKeySelector `json:"customDataSecretRef,omitempty" tf:"-"` + + // One or more data_disk blocks as defined below. + // +kubebuilder:validation:Optional + DataDisk []LinuxVirtualMachineScaleSetDataDiskParameters `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should Password Authentication be disabled on this Virtual Machine Scale Set? Defaults to true. + // +kubebuilder:validation:Optional + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Should Virtual Machine Extensions be run on Overprovisioned Virtual Machines in the Scale Set? Defaults to false. + // +kubebuilder:validation:Optional + DoNotRunExtensionsOnOverprovisionedMachines *bool `json:"doNotRunExtensionsOnOverprovisionedMachines,omitempty" tf:"do_not_run_extensions_on_overprovisioned_machines,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Linux Virtual Machine Scale Set should exist. Changing this forces a new Linux Virtual Machine Scale Set to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + // +kubebuilder:validation:Optional + Extension []ExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Linux Virtual Machine Scale Set to be created. + // +kubebuilder:validation:Optional + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + // +kubebuilder:validation:Optional + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + // +kubebuilder:validation:Optional + GalleryApplication []LinuxVirtualMachineScaleSetGalleryApplicationParameters `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + // +kubebuilder:validation:Optional + GalleryApplications []GalleryApplicationsParameters `json:"galleryApplications,omitempty" tf:"gallery_applications,omitempty"` + + // The ID of a Load Balancer Probe which should be used to determine the health of an instance. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. + // +kubebuilder:validation:Optional + HealthProbeID *string `json:"healthProbeId,omitempty" tf:"health_probe_id,omitempty"` + + // Specifies the ID of the dedicated host group that the virtual machine scale set resides in. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *LinuxVirtualMachineScaleSetIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Scale Set. Defaults to 0. + // +kubebuilder:validation:Optional + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // The Azure location where the Linux Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in this Scale Set should not be evicted for price reasons. + // +kubebuilder:validation:Optional + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + // +kubebuilder:validation:Optional + NetworkInterface []NetworkInterfaceParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + // +kubebuilder:validation:Optional + OsDisk *LinuxVirtualMachineScaleSetOsDiskParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // Should Azure over-provision Virtual Machines in this Scale Set? This means that multiple Virtual Machines will be provisioned and Azure will keep the instances which become available first - which improves provisioning success rates and improves deployment time. You're not billed for these over-provisioned VM's and they don't count towards the Subscription Quota. Defaults to true. + // +kubebuilder:validation:Optional + Overprovision *bool `json:"overprovision,omitempty" tf:"overprovision,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Plan *LinuxVirtualMachineScaleSetPlanParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Linux Virtual Machine Scale Set. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + // +kubebuilder:validation:Optional + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + // +kubebuilder:validation:Optional + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group in which the Virtual Machine Scale Set should be assigned to. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // The name of the Resource Group in which the Linux Virtual Machine Scale Set should be exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A rolling_upgrade_policy block as defined below. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RollingUpgradePolicy *RollingUpgradePolicyParameters `json:"rollingUpgradePolicy,omitempty" tf:"rolling_upgrade_policy,omitempty"` + + // A scale_in block as defined below. + // +kubebuilder:validation:Optional + ScaleIn *ScaleInParameters `json:"scaleIn,omitempty" tf:"scale_in,omitempty"` + + // One or more secret blocks as defined below. + // +kubebuilder:validation:Optional + Secret []LinuxVirtualMachineScaleSetSecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies whether secure boot should be enabled on the virtual machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Defaults to true. + // +kubebuilder:validation:Optional + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The Virtual Machine SKU for the Scale Set, such as Standard_F2. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image ID, Shared Image ID, Shared Image Version ID, Community Gallery Image ID, Community Gallery Image Version ID, Shared Gallery Image ID and Shared Gallery Image Version ID. + // +kubebuilder:validation:Optional + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + // +kubebuilder:validation:Optional + SourceImageReference *LinuxVirtualMachineScaleSetSourceImageReferenceParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A spot_restore block as defined below. + // +kubebuilder:validation:Optional + SpotRestore *SpotRestoreParameters `json:"spotRestore,omitempty" tf:"spot_restore,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A terminate_notification block as defined below. + // +kubebuilder:validation:Optional + TerminateNotification *TerminateNotificationParameters `json:"terminateNotification,omitempty" tf:"terminate_notification,omitempty"` + + // A termination_notification block as defined below. + // +kubebuilder:validation:Optional + TerminationNotification *LinuxVirtualMachineScaleSetTerminationNotificationParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Specifies how Upgrades (e.g. changing the Image/SKU) should be performed to Virtual Machine Instances. Possible values are Automatic, Manual and Rolling. Defaults to Manual. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether vTPM should be enabled on the virtual machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones in which this Linux Virtual Machine Scale Set should be located. Changing this forces a new Linux Virtual Machine Scale Set to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type LinuxVirtualMachineScaleSetPlanInitParameters struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type LinuxVirtualMachineScaleSetPlanObservation struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type LinuxVirtualMachineScaleSetPlanParameters struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Product *string `json:"product" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` +} + +type LinuxVirtualMachineScaleSetSecretInitParameters struct { + + // One or more certificate blocks as defined above. + Certificate []SecretCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type LinuxVirtualMachineScaleSetSecretObservation struct { + + // One or more certificate blocks as defined above. + Certificate []SecretCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type LinuxVirtualMachineScaleSetSecretParameters struct { + + // One or more certificate blocks as defined above. + // +kubebuilder:validation:Optional + Certificate []SecretCertificateParameters `json:"certificate" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId" tf:"key_vault_id,omitempty"` +} + +type LinuxVirtualMachineScaleSetSourceImageReferenceInitParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LinuxVirtualMachineScaleSetSourceImageReferenceObservation struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LinuxVirtualMachineScaleSetSourceImageReferenceParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Sku *string `json:"sku" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type LinuxVirtualMachineScaleSetTerminationNotificationInitParameters struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type LinuxVirtualMachineScaleSetTerminationNotificationObservation struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type LinuxVirtualMachineScaleSetTerminationNotificationParameters struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type NetworkInterfaceInitParameters struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Defaults to false. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Defaults to false. + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + IPConfiguration []IPConfigurationInitParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type NetworkInterfaceObservation struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Defaults to false. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Defaults to false. + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + IPConfiguration []IPConfigurationObservation `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type NetworkInterfaceParameters struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + // +kubebuilder:validation:Optional + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Defaults to false. + // +kubebuilder:validation:Optional + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Defaults to false. + // +kubebuilder:validation:Optional + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + // +kubebuilder:validation:Optional + IPConfiguration []IPConfigurationParameters `json:"ipConfiguration" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + // +kubebuilder:validation:Optional + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type OsDiskDiffDiskSettingsInitParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type OsDiskDiffDiskSettingsObservation struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type OsDiskDiffDiskSettingsParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Option *string `json:"option" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type ProtectedSettingsFromKeyVaultInitParameters struct { + + // The URL to the Key Vault Secret which stores the protected settings. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type ProtectedSettingsFromKeyVaultObservation struct { + + // The URL to the Key Vault Secret which stores the protected settings. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type ProtectedSettingsFromKeyVaultParameters struct { + + // The URL to the Key Vault Secret which stores the protected settings. + // +kubebuilder:validation:Optional + SecretURL *string `json:"secretUrl" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + // +kubebuilder:validation:Optional + SourceVaultID *string `json:"sourceVaultId" tf:"source_vault_id,omitempty"` +} + +type PublicIPAddressInitParameters struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + IPTag []IPTagInitParameters `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PublicIPAddressObservation struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + IPTag []IPTagObservation `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type PublicIPAddressParameters struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. + // +kubebuilder:validation:Optional + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IPTag []IPTagParameters `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + // +kubebuilder:validation:Optional + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type RollingUpgradePolicyInitParameters struct { + + // Should the Virtual Machine Scale Set ignore the Azure Zone boundaries when constructing upgrade batches? Possible values are true or false. + CrossZoneUpgradesEnabled *bool `json:"crossZoneUpgradesEnabled,omitempty" tf:"cross_zone_upgrades_enabled,omitempty"` + + // The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. + MaxBatchInstancePercent *float64 `json:"maxBatchInstancePercent,omitempty" tf:"max_batch_instance_percent,omitempty"` + + // The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. + MaxUnhealthyInstancePercent *float64 `json:"maxUnhealthyInstancePercent,omitempty" tf:"max_unhealthy_instance_percent,omitempty"` + + // The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. + MaxUnhealthyUpgradedInstancePercent *float64 `json:"maxUnhealthyUpgradedInstancePercent,omitempty" tf:"max_unhealthy_upgraded_instance_percent,omitempty"` + + // The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty" tf:"pause_time_between_batches,omitempty"` + + // Upgrade all unhealthy instances in a scale set before any healthy instances. Possible values are true or false. + PrioritizeUnhealthyInstancesEnabled *bool `json:"prioritizeUnhealthyInstancesEnabled,omitempty" tf:"prioritize_unhealthy_instances_enabled,omitempty"` +} + +type RollingUpgradePolicyObservation struct { + + // Should the Virtual Machine Scale Set ignore the Azure Zone boundaries when constructing upgrade batches? Possible values are true or false. + CrossZoneUpgradesEnabled *bool `json:"crossZoneUpgradesEnabled,omitempty" tf:"cross_zone_upgrades_enabled,omitempty"` + + // The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. + MaxBatchInstancePercent *float64 `json:"maxBatchInstancePercent,omitempty" tf:"max_batch_instance_percent,omitempty"` + + // The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. + MaxUnhealthyInstancePercent *float64 `json:"maxUnhealthyInstancePercent,omitempty" tf:"max_unhealthy_instance_percent,omitempty"` + + // The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. + MaxUnhealthyUpgradedInstancePercent *float64 `json:"maxUnhealthyUpgradedInstancePercent,omitempty" tf:"max_unhealthy_upgraded_instance_percent,omitempty"` + + // The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty" tf:"pause_time_between_batches,omitempty"` + + // Upgrade all unhealthy instances in a scale set before any healthy instances. Possible values are true or false. + PrioritizeUnhealthyInstancesEnabled *bool `json:"prioritizeUnhealthyInstancesEnabled,omitempty" tf:"prioritize_unhealthy_instances_enabled,omitempty"` +} + +type RollingUpgradePolicyParameters struct { + + // Should the Virtual Machine Scale Set ignore the Azure Zone boundaries when constructing upgrade batches? Possible values are true or false. + // +kubebuilder:validation:Optional + CrossZoneUpgradesEnabled *bool `json:"crossZoneUpgradesEnabled,omitempty" tf:"cross_zone_upgrades_enabled,omitempty"` + + // The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. + // +kubebuilder:validation:Optional + MaxBatchInstancePercent *float64 `json:"maxBatchInstancePercent" tf:"max_batch_instance_percent,omitempty"` + + // The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. + // +kubebuilder:validation:Optional + MaxUnhealthyInstancePercent *float64 `json:"maxUnhealthyInstancePercent" tf:"max_unhealthy_instance_percent,omitempty"` + + // The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. + // +kubebuilder:validation:Optional + MaxUnhealthyUpgradedInstancePercent *float64 `json:"maxUnhealthyUpgradedInstancePercent" tf:"max_unhealthy_upgraded_instance_percent,omitempty"` + + // The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. + // +kubebuilder:validation:Optional + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches" tf:"pause_time_between_batches,omitempty"` + + // Upgrade all unhealthy instances in a scale set before any healthy instances. Possible values are true or false. + // +kubebuilder:validation:Optional + PrioritizeUnhealthyInstancesEnabled *bool `json:"prioritizeUnhealthyInstancesEnabled,omitempty" tf:"prioritize_unhealthy_instances_enabled,omitempty"` +} + +type ScaleInInitParameters struct { + + // Should the virtual machines chosen for removal be force deleted when the virtual machine scale set is being scaled-in? Possible values are true or false. Defaults to false. + ForceDeletionEnabled *bool `json:"forceDeletionEnabled,omitempty" tf:"force_deletion_enabled,omitempty"` + + // The scale-in policy rule that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled in. Possible values for the scale-in policy rules are Default, NewestVM and OldestVM, defaults to Default. For more information about scale in policy, please refer to this doc. + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ScaleInObservation struct { + + // Should the virtual machines chosen for removal be force deleted when the virtual machine scale set is being scaled-in? Possible values are true or false. Defaults to false. + ForceDeletionEnabled *bool `json:"forceDeletionEnabled,omitempty" tf:"force_deletion_enabled,omitempty"` + + // The scale-in policy rule that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled in. Possible values for the scale-in policy rules are Default, NewestVM and OldestVM, defaults to Default. For more information about scale in policy, please refer to this doc. + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ScaleInParameters struct { + + // Should the virtual machines chosen for removal be force deleted when the virtual machine scale set is being scaled-in? Possible values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + ForceDeletionEnabled *bool `json:"forceDeletionEnabled,omitempty" tf:"force_deletion_enabled,omitempty"` + + // The scale-in policy rule that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled in. Possible values for the scale-in policy rules are Default, NewestVM and OldestVM, defaults to Default. For more information about scale in policy, please refer to this doc. + // +kubebuilder:validation:Optional + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type SecretCertificateInitParameters struct { + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SecretCertificateObservation struct { + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SecretCertificateParameters struct { + + // The Secret URL of a Key Vault Certificate. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type SpotRestoreInitParameters struct { + + // Should the Spot-Try-Restore feature be enabled? The Spot-Try-Restore feature will attempt to automatically restore the evicted Spot Virtual Machine Scale Set VM instances opportunistically based on capacity availability and pricing constraints. Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The length of time that the Virtual Machine Scale Set should attempt to restore the Spot VM instances which have been evicted. The time duration should be between 15 minutes and 120 minutes (inclusive). The time duration should be specified in the ISO 8601 format. Defaults to PT1H. Changing this forces a new resource to be created. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpotRestoreObservation struct { + + // Should the Spot-Try-Restore feature be enabled? The Spot-Try-Restore feature will attempt to automatically restore the evicted Spot Virtual Machine Scale Set VM instances opportunistically based on capacity availability and pricing constraints. Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The length of time that the Virtual Machine Scale Set should attempt to restore the Spot VM instances which have been evicted. The time duration should be between 15 minutes and 120 minutes (inclusive). The time duration should be specified in the ISO 8601 format. Defaults to PT1H. Changing this forces a new resource to be created. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type SpotRestoreParameters struct { + + // Should the Spot-Try-Restore feature be enabled? The Spot-Try-Restore feature will attempt to automatically restore the evicted Spot Virtual Machine Scale Set VM instances opportunistically based on capacity availability and pricing constraints. Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The length of time that the Virtual Machine Scale Set should attempt to restore the Spot VM instances which have been evicted. The time duration should be between 15 minutes and 120 minutes (inclusive). The time duration should be specified in the ISO 8601 format. Defaults to PT1H. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TerminateNotificationInitParameters struct { + + // Should the terminate notification be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TerminateNotificationObservation struct { + + // Should the terminate notification be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type TerminateNotificationParameters struct { + + // Should the terminate notification be enabled on this Virtual Machine Scale Set? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +// LinuxVirtualMachineScaleSetSpec defines the desired state of LinuxVirtualMachineScaleSet +type LinuxVirtualMachineScaleSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinuxVirtualMachineScaleSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinuxVirtualMachineScaleSetInitParameters `json:"initProvider,omitempty"` +} + +// LinuxVirtualMachineScaleSetStatus defines the observed state of LinuxVirtualMachineScaleSet. +type LinuxVirtualMachineScaleSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinuxVirtualMachineScaleSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinuxVirtualMachineScaleSet is the Schema for the LinuxVirtualMachineScaleSets API. Manages a Linux Virtual Machine Scale Set. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinuxVirtualMachineScaleSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.adminUsername) || (has(self.initProvider) && has(self.initProvider.adminUsername))",message="spec.forProvider.adminUsername is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.networkInterface) || (has(self.initProvider) && has(self.initProvider.networkInterface))",message="spec.forProvider.networkInterface is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.osDisk) || (has(self.initProvider) && has(self.initProvider.osDisk))",message="spec.forProvider.osDisk is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec LinuxVirtualMachineScaleSetSpec `json:"spec"` + Status LinuxVirtualMachineScaleSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinuxVirtualMachineScaleSetList contains a list of LinuxVirtualMachineScaleSets +type LinuxVirtualMachineScaleSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinuxVirtualMachineScaleSet `json:"items"` +} + +// Repository type metadata. +var ( + LinuxVirtualMachineScaleSet_Kind = "LinuxVirtualMachineScaleSet" + LinuxVirtualMachineScaleSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinuxVirtualMachineScaleSet_Kind}.String() + LinuxVirtualMachineScaleSet_KindAPIVersion = LinuxVirtualMachineScaleSet_Kind + "." + CRDGroupVersion.String() + LinuxVirtualMachineScaleSet_GroupVersionKind = CRDGroupVersion.WithKind(LinuxVirtualMachineScaleSet_Kind) +) + +func init() { + SchemeBuilder.Register(&LinuxVirtualMachineScaleSet{}, &LinuxVirtualMachineScaleSetList{}) +} diff --git a/apis/compute/v1beta2/zz_manageddisk_terraformed.go b/apis/compute/v1beta2/zz_manageddisk_terraformed.go new file mode 100755 index 000000000..3146245e5 --- /dev/null +++ b/apis/compute/v1beta2/zz_manageddisk_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ManagedDisk +func (mg *ManagedDisk) GetTerraformResourceType() string { + return "azurerm_managed_disk" +} + +// GetConnectionDetailsMapping for this ManagedDisk +func (tr *ManagedDisk) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ManagedDisk +func (tr *ManagedDisk) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ManagedDisk +func (tr *ManagedDisk) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ManagedDisk +func (tr *ManagedDisk) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ManagedDisk +func (tr *ManagedDisk) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ManagedDisk +func (tr *ManagedDisk) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ManagedDisk +func (tr *ManagedDisk) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ManagedDisk +func (tr *ManagedDisk) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ManagedDisk using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ManagedDisk) LateInitialize(attrs []byte) (bool, error) { + params := &ManagedDiskParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ManagedDisk) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/compute/v1beta2/zz_manageddisk_types.go b/apis/compute/v1beta2/zz_manageddisk_types.go new file mode 100755 index 000000000..5894d3349 --- /dev/null +++ b/apis/compute/v1beta2/zz_manageddisk_types.go @@ -0,0 +1,552 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiskEncryptionKeyInitParameters struct { + + // The URL to the Key Vault Secret used as the Disk Encryption Key. This can be found as id on the azurerm_key_vault_secret resource. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type DiskEncryptionKeyObservation struct { + + // The URL to the Key Vault Secret used as the Disk Encryption Key. This can be found as id on the azurerm_key_vault_secret resource. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type DiskEncryptionKeyParameters struct { + + // The URL to the Key Vault Secret used as the Disk Encryption Key. This can be found as id on the azurerm_key_vault_secret resource. + // +kubebuilder:validation:Optional + SecretURL *string `json:"secretUrl" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + // +kubebuilder:validation:Optional + SourceVaultID *string `json:"sourceVaultId" tf:"source_vault_id,omitempty"` +} + +type EncryptionSettingsInitParameters struct { + + // A disk_encryption_key block as defined above. + DiskEncryptionKey *DiskEncryptionKeyInitParameters `json:"diskEncryptionKey,omitempty" tf:"disk_encryption_key,omitempty"` + + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A key_encryption_key block as defined below. + KeyEncryptionKey *KeyEncryptionKeyInitParameters `json:"keyEncryptionKey,omitempty" tf:"key_encryption_key,omitempty"` +} + +type EncryptionSettingsObservation struct { + + // A disk_encryption_key block as defined above. + DiskEncryptionKey *DiskEncryptionKeyObservation `json:"diskEncryptionKey,omitempty" tf:"disk_encryption_key,omitempty"` + + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A key_encryption_key block as defined below. + KeyEncryptionKey *KeyEncryptionKeyObservation `json:"keyEncryptionKey,omitempty" tf:"key_encryption_key,omitempty"` +} + +type EncryptionSettingsParameters struct { + + // A disk_encryption_key block as defined above. + // +kubebuilder:validation:Optional + DiskEncryptionKey *DiskEncryptionKeyParameters `json:"diskEncryptionKey,omitempty" tf:"disk_encryption_key,omitempty"` + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A key_encryption_key block as defined below. + // +kubebuilder:validation:Optional + KeyEncryptionKey *KeyEncryptionKeyParameters `json:"keyEncryptionKey,omitempty" tf:"key_encryption_key,omitempty"` +} + +type KeyEncryptionKeyInitParameters struct { + + // The URL to the Key Vault Key used as the Key Encryption Key. This can be found as id on the azurerm_key_vault_key resource. + KeyURL *string `json:"keyUrl,omitempty" tf:"key_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type KeyEncryptionKeyObservation struct { + + // The URL to the Key Vault Key used as the Key Encryption Key. This can be found as id on the azurerm_key_vault_key resource. + KeyURL *string `json:"keyUrl,omitempty" tf:"key_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type KeyEncryptionKeyParameters struct { + + // The URL to the Key Vault Key used as the Key Encryption Key. This can be found as id on the azurerm_key_vault_key resource. + // +kubebuilder:validation:Optional + KeyURL *string `json:"keyUrl" tf:"key_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + // +kubebuilder:validation:Optional + SourceVaultID *string `json:"sourceVaultId" tf:"source_vault_id,omitempty"` +} + +type ManagedDiskInitParameters struct { + + // The method to use when creating the managed disk. Changing this forces a new resource to be created. Possible values include: * Import - Import a VHD file in to the managed disk (VHD specified with source_uri). * ImportSecure - Securely import a VHD file in to the managed disk (VHD specified with source_uri). * Empty - Create an empty managed disk. * Copy - Copy an existing managed disk or snapshot (specified with source_resource_id). * FromImage - Copy a Platform Image (specified with image_reference_id) * Restore - Set by Azure Backup or Site Recovery on a restored disk (specified with source_resource_id). * Upload - Upload a VHD disk with the help of SAS URL (to be used with upload_size_bytes). + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the disk access resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty" tf:"disk_access_id,omitempty"` + + // The ID of a Disk Encryption Set which should be used to encrypt this Managed Disk. Conflicts with secure_vm_disk_encryption_set_id. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The number of IOPS allowed across all VMs mounting the shared disk as read-only; only settable for UltraSSD disks and PremiumV2 disks with shared disk enabled. One operation can transfer between 4k and 256k bytes. + DiskIopsReadOnly *float64 `json:"diskIopsReadOnly,omitempty" tf:"disk_iops_read_only,omitempty"` + + // The number of IOPS allowed for this disk; only settable for UltraSSD disks and PremiumV2 disks. One operation can transfer between 4k and 256k bytes. + DiskIopsReadWrite *float64 `json:"diskIopsReadWrite,omitempty" tf:"disk_iops_read_write,omitempty"` + + // The bandwidth allowed across all VMs mounting the shared disk as read-only; only settable for UltraSSD disks and PremiumV2 disks with shared disk enabled. MBps means millions of bytes per second. + DiskMbpsReadOnly *float64 `json:"diskMbpsReadOnly,omitempty" tf:"disk_mbps_read_only,omitempty"` + + // The bandwidth allowed for this disk; only settable for UltraSSD disks and PremiumV2 disks. MBps means millions of bytes per second. + DiskMbpsReadWrite *float64 `json:"diskMbpsReadWrite,omitempty" tf:"disk_mbps_read_write,omitempty"` + + // Specifies the size of the managed disk to create in gigabytes. If create_option is Copy or FromImage, then the value must be equal to or greater than the source's size. The size can only be increased. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Managed Disk should exist. Changing this forces a new Managed Disk to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // A encryption_settings block as defined below. + EncryptionSettings *EncryptionSettingsInitParameters `json:"encryptionSettings,omitempty" tf:"encryption_settings,omitempty"` + + // ID of a Gallery Image Version to copy when create_option is FromImage. This field cannot be specified if image_reference_id is specified. Changing this forces a new resource to be created. + GalleryImageReferenceID *string `json:"galleryImageReferenceId,omitempty" tf:"gallery_image_reference_id,omitempty"` + + // The HyperV Generation of the Disk when the source of an Import or Copy operation targets a source that contains an operating system. Possible values are V1 and V2. For ImportSecure it must be set to V2. Changing this forces a new resource to be created. + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // ID of an existing platform/marketplace disk image to copy when create_option is FromImage. This field cannot be specified if gallery_image_reference_id is specified. Changing this forces a new resource to be created. + ImageReferenceID *string `json:"imageReferenceId,omitempty" tf:"image_reference_id,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Logical Sector Size. Possible values are: 512 and 4096. Defaults to 4096. Changing this forces a new resource to be created. + LogicalSectorSize *float64 `json:"logicalSectorSize,omitempty" tf:"logical_sector_size,omitempty"` + + // The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + MaxShares *float64 `json:"maxShares,omitempty" tf:"max_shares,omitempty"` + + // Policy for accessing the disk via network. Allowed values are AllowAll, AllowPrivate, and DenyAll. + NetworkAccessPolicy *string `json:"networkAccessPolicy,omitempty" tf:"network_access_policy,omitempty"` + + // Specifies if On-Demand Bursting is enabled for the Managed Disk. + OnDemandBurstingEnabled *bool `json:"onDemandBurstingEnabled,omitempty" tf:"on_demand_bursting_enabled,omitempty"` + + // Specifies whether this Managed Disk should be optimized for frequent disk attachments (where a disk is attached/detached more than 5 times in a day). Defaults to false. + OptimizedFrequentAttachEnabled *bool `json:"optimizedFrequentAttachEnabled,omitempty" tf:"optimized_frequent_attach_enabled,omitempty"` + + // Specify a value when the source of an Import, ImportSecure or Copy operation targets a source that contains an operating system. Valid values are Linux or Windows. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Specifies whether Performance Plus is enabled for this Managed Disk. Defaults to false. Changing this forces a new resource to be created. + PerformancePlusEnabled *bool `json:"performancePlusEnabled,omitempty" tf:"performance_plus_enabled,omitempty"` + + // Whether it is allowed to access the disk via public network. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Security Type of the Managed Disk when it is used for a Confidential VM. Possible values are ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey, ConfidentialVM_DiskEncryptedWithPlatformKey and ConfidentialVM_DiskEncryptedWithCustomerKey. Changing this forces a new resource to be created. + SecurityType *string `json:"securityType,omitempty" tf:"security_type,omitempty"` + + // The ID of an existing Managed Disk or Snapshot to copy when create_option is Copy or the recovery point to restore when create_option is Restore. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SourceResourceID *string `json:"sourceResourceId,omitempty" tf:"source_resource_id,omitempty"` + + // Reference to a ManagedDisk in compute to populate sourceResourceId. + // +kubebuilder:validation:Optional + SourceResourceIDRef *v1.Reference `json:"sourceResourceIdRef,omitempty" tf:"-"` + + // Selector for a ManagedDisk in compute to populate sourceResourceId. + // +kubebuilder:validation:Optional + SourceResourceIDSelector *v1.Selector `json:"sourceResourceIdSelector,omitempty" tf:"-"` + + // URI to a valid VHD file to be used when create_option is Import or ImportSecure. Changing this forces a new resource to be created. + SourceURI *string `json:"sourceUri,omitempty" tf:"source_uri,omitempty"` + + // The ID of the Storage Account where the source_uri is located. Required when create_option is set to Import or ImportSecure. Changing this forces a new resource to be created. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // The type of storage to use for the managed disk. Possible values are Standard_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS, StandardSSD_LRS or UltraSSD_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The disk performance tier to use. Possible values are documented here. This feature is currently supported only for premium SSDs. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // Specifies if Trusted Launch is enabled for the Managed Disk. Changing this forces a new resource to be created. + TrustedLaunchEnabled *bool `json:"trustedLaunchEnabled,omitempty" tf:"trusted_launch_enabled,omitempty"` + + // Specifies the size of the managed disk to create in bytes. Required when create_option is Upload. The value must be equal to the source disk to be copied in bytes. Source disk size could be calculated with ls -l or wc -c. More information can be found at Copy a managed disk. Changing this forces a new resource to be created. + UploadSizeBytes *float64 `json:"uploadSizeBytes,omitempty" tf:"upload_size_bytes,omitempty"` + + // Specifies the Availability Zone in which this Managed Disk should be located. Changing this property forces a new resource to be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type ManagedDiskObservation struct { + + // The method to use when creating the managed disk. Changing this forces a new resource to be created. Possible values include: * Import - Import a VHD file in to the managed disk (VHD specified with source_uri). * ImportSecure - Securely import a VHD file in to the managed disk (VHD specified with source_uri). * Empty - Create an empty managed disk. * Copy - Copy an existing managed disk or snapshot (specified with source_resource_id). * FromImage - Copy a Platform Image (specified with image_reference_id) * Restore - Set by Azure Backup or Site Recovery on a restored disk (specified with source_resource_id). * Upload - Upload a VHD disk with the help of SAS URL (to be used with upload_size_bytes). + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the disk access resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty" tf:"disk_access_id,omitempty"` + + // The ID of a Disk Encryption Set which should be used to encrypt this Managed Disk. Conflicts with secure_vm_disk_encryption_set_id. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The number of IOPS allowed across all VMs mounting the shared disk as read-only; only settable for UltraSSD disks and PremiumV2 disks with shared disk enabled. One operation can transfer between 4k and 256k bytes. + DiskIopsReadOnly *float64 `json:"diskIopsReadOnly,omitempty" tf:"disk_iops_read_only,omitempty"` + + // The number of IOPS allowed for this disk; only settable for UltraSSD disks and PremiumV2 disks. One operation can transfer between 4k and 256k bytes. + DiskIopsReadWrite *float64 `json:"diskIopsReadWrite,omitempty" tf:"disk_iops_read_write,omitempty"` + + // The bandwidth allowed across all VMs mounting the shared disk as read-only; only settable for UltraSSD disks and PremiumV2 disks with shared disk enabled. MBps means millions of bytes per second. + DiskMbpsReadOnly *float64 `json:"diskMbpsReadOnly,omitempty" tf:"disk_mbps_read_only,omitempty"` + + // The bandwidth allowed for this disk; only settable for UltraSSD disks and PremiumV2 disks. MBps means millions of bytes per second. + DiskMbpsReadWrite *float64 `json:"diskMbpsReadWrite,omitempty" tf:"disk_mbps_read_write,omitempty"` + + // Specifies the size of the managed disk to create in gigabytes. If create_option is Copy or FromImage, then the value must be equal to or greater than the source's size. The size can only be increased. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Managed Disk should exist. Changing this forces a new Managed Disk to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // A encryption_settings block as defined below. + EncryptionSettings *EncryptionSettingsObservation `json:"encryptionSettings,omitempty" tf:"encryption_settings,omitempty"` + + // ID of a Gallery Image Version to copy when create_option is FromImage. This field cannot be specified if image_reference_id is specified. Changing this forces a new resource to be created. + GalleryImageReferenceID *string `json:"galleryImageReferenceId,omitempty" tf:"gallery_image_reference_id,omitempty"` + + // The HyperV Generation of the Disk when the source of an Import or Copy operation targets a source that contains an operating system. Possible values are V1 and V2. For ImportSecure it must be set to V2. Changing this forces a new resource to be created. + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // The ID of the Managed Disk. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // ID of an existing platform/marketplace disk image to copy when create_option is FromImage. This field cannot be specified if gallery_image_reference_id is specified. Changing this forces a new resource to be created. + ImageReferenceID *string `json:"imageReferenceId,omitempty" tf:"image_reference_id,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Logical Sector Size. Possible values are: 512 and 4096. Defaults to 4096. Changing this forces a new resource to be created. + LogicalSectorSize *float64 `json:"logicalSectorSize,omitempty" tf:"logical_sector_size,omitempty"` + + // The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + MaxShares *float64 `json:"maxShares,omitempty" tf:"max_shares,omitempty"` + + // Policy for accessing the disk via network. Allowed values are AllowAll, AllowPrivate, and DenyAll. + NetworkAccessPolicy *string `json:"networkAccessPolicy,omitempty" tf:"network_access_policy,omitempty"` + + // Specifies if On-Demand Bursting is enabled for the Managed Disk. + OnDemandBurstingEnabled *bool `json:"onDemandBurstingEnabled,omitempty" tf:"on_demand_bursting_enabled,omitempty"` + + // Specifies whether this Managed Disk should be optimized for frequent disk attachments (where a disk is attached/detached more than 5 times in a day). Defaults to false. + OptimizedFrequentAttachEnabled *bool `json:"optimizedFrequentAttachEnabled,omitempty" tf:"optimized_frequent_attach_enabled,omitempty"` + + // Specify a value when the source of an Import, ImportSecure or Copy operation targets a source that contains an operating system. Valid values are Linux or Windows. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Specifies whether Performance Plus is enabled for this Managed Disk. Defaults to false. Changing this forces a new resource to be created. + PerformancePlusEnabled *bool `json:"performancePlusEnabled,omitempty" tf:"performance_plus_enabled,omitempty"` + + // Whether it is allowed to access the disk via public network. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Managed Disk should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Security Type of the Managed Disk when it is used for a Confidential VM. Possible values are ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey, ConfidentialVM_DiskEncryptedWithPlatformKey and ConfidentialVM_DiskEncryptedWithCustomerKey. Changing this forces a new resource to be created. + SecurityType *string `json:"securityType,omitempty" tf:"security_type,omitempty"` + + // The ID of an existing Managed Disk or Snapshot to copy when create_option is Copy or the recovery point to restore when create_option is Restore. Changing this forces a new resource to be created. + SourceResourceID *string `json:"sourceResourceId,omitempty" tf:"source_resource_id,omitempty"` + + // URI to a valid VHD file to be used when create_option is Import or ImportSecure. Changing this forces a new resource to be created. + SourceURI *string `json:"sourceUri,omitempty" tf:"source_uri,omitempty"` + + // The ID of the Storage Account where the source_uri is located. Required when create_option is set to Import or ImportSecure. Changing this forces a new resource to be created. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // The type of storage to use for the managed disk. Possible values are Standard_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS, StandardSSD_LRS or UltraSSD_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The disk performance tier to use. Possible values are documented here. This feature is currently supported only for premium SSDs. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // Specifies if Trusted Launch is enabled for the Managed Disk. Changing this forces a new resource to be created. + TrustedLaunchEnabled *bool `json:"trustedLaunchEnabled,omitempty" tf:"trusted_launch_enabled,omitempty"` + + // Specifies the size of the managed disk to create in bytes. Required when create_option is Upload. The value must be equal to the source disk to be copied in bytes. Source disk size could be calculated with ls -l or wc -c. More information can be found at Copy a managed disk. Changing this forces a new resource to be created. + UploadSizeBytes *float64 `json:"uploadSizeBytes,omitempty" tf:"upload_size_bytes,omitempty"` + + // Specifies the Availability Zone in which this Managed Disk should be located. Changing this property forces a new resource to be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type ManagedDiskParameters struct { + + // The method to use when creating the managed disk. Changing this forces a new resource to be created. Possible values include: * Import - Import a VHD file in to the managed disk (VHD specified with source_uri). * ImportSecure - Securely import a VHD file in to the managed disk (VHD specified with source_uri). * Empty - Create an empty managed disk. * Copy - Copy an existing managed disk or snapshot (specified with source_resource_id). * FromImage - Copy a Platform Image (specified with image_reference_id) * Restore - Set by Azure Backup or Site Recovery on a restored disk (specified with source_resource_id). * Upload - Upload a VHD disk with the help of SAS URL (to be used with upload_size_bytes). + // +kubebuilder:validation:Optional + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the disk access resource for using private endpoints on disks. + // +kubebuilder:validation:Optional + DiskAccessID *string `json:"diskAccessId,omitempty" tf:"disk_access_id,omitempty"` + + // The ID of a Disk Encryption Set which should be used to encrypt this Managed Disk. Conflicts with secure_vm_disk_encryption_set_id. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The number of IOPS allowed across all VMs mounting the shared disk as read-only; only settable for UltraSSD disks and PremiumV2 disks with shared disk enabled. One operation can transfer between 4k and 256k bytes. + // +kubebuilder:validation:Optional + DiskIopsReadOnly *float64 `json:"diskIopsReadOnly,omitempty" tf:"disk_iops_read_only,omitempty"` + + // The number of IOPS allowed for this disk; only settable for UltraSSD disks and PremiumV2 disks. One operation can transfer between 4k and 256k bytes. + // +kubebuilder:validation:Optional + DiskIopsReadWrite *float64 `json:"diskIopsReadWrite,omitempty" tf:"disk_iops_read_write,omitempty"` + + // The bandwidth allowed across all VMs mounting the shared disk as read-only; only settable for UltraSSD disks and PremiumV2 disks with shared disk enabled. MBps means millions of bytes per second. + // +kubebuilder:validation:Optional + DiskMbpsReadOnly *float64 `json:"diskMbpsReadOnly,omitempty" tf:"disk_mbps_read_only,omitempty"` + + // The bandwidth allowed for this disk; only settable for UltraSSD disks and PremiumV2 disks. MBps means millions of bytes per second. + // +kubebuilder:validation:Optional + DiskMbpsReadWrite *float64 `json:"diskMbpsReadWrite,omitempty" tf:"disk_mbps_read_write,omitempty"` + + // Specifies the size of the managed disk to create in gigabytes. If create_option is Copy or FromImage, then the value must be equal to or greater than the source's size. The size can only be increased. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Managed Disk should exist. Changing this forces a new Managed Disk to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // A encryption_settings block as defined below. + // +kubebuilder:validation:Optional + EncryptionSettings *EncryptionSettingsParameters `json:"encryptionSettings,omitempty" tf:"encryption_settings,omitempty"` + + // ID of a Gallery Image Version to copy when create_option is FromImage. This field cannot be specified if image_reference_id is specified. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + GalleryImageReferenceID *string `json:"galleryImageReferenceId,omitempty" tf:"gallery_image_reference_id,omitempty"` + + // The HyperV Generation of the Disk when the source of an Import or Copy operation targets a source that contains an operating system. Possible values are V1 and V2. For ImportSecure it must be set to V2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // ID of an existing platform/marketplace disk image to copy when create_option is FromImage. This field cannot be specified if gallery_image_reference_id is specified. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ImageReferenceID *string `json:"imageReferenceId,omitempty" tf:"image_reference_id,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Logical Sector Size. Possible values are: 512 and 4096. Defaults to 4096. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LogicalSectorSize *float64 `json:"logicalSectorSize,omitempty" tf:"logical_sector_size,omitempty"` + + // The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + // +kubebuilder:validation:Optional + MaxShares *float64 `json:"maxShares,omitempty" tf:"max_shares,omitempty"` + + // Policy for accessing the disk via network. Allowed values are AllowAll, AllowPrivate, and DenyAll. + // +kubebuilder:validation:Optional + NetworkAccessPolicy *string `json:"networkAccessPolicy,omitempty" tf:"network_access_policy,omitempty"` + + // Specifies if On-Demand Bursting is enabled for the Managed Disk. + // +kubebuilder:validation:Optional + OnDemandBurstingEnabled *bool `json:"onDemandBurstingEnabled,omitempty" tf:"on_demand_bursting_enabled,omitempty"` + + // Specifies whether this Managed Disk should be optimized for frequent disk attachments (where a disk is attached/detached more than 5 times in a day). Defaults to false. + // +kubebuilder:validation:Optional + OptimizedFrequentAttachEnabled *bool `json:"optimizedFrequentAttachEnabled,omitempty" tf:"optimized_frequent_attach_enabled,omitempty"` + + // Specify a value when the source of an Import, ImportSecure or Copy operation targets a source that contains an operating system. Valid values are Linux or Windows. + // +kubebuilder:validation:Optional + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // Specifies whether Performance Plus is enabled for this Managed Disk. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PerformancePlusEnabled *bool `json:"performancePlusEnabled,omitempty" tf:"performance_plus_enabled,omitempty"` + + // Whether it is allowed to access the disk via public network. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Managed Disk should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Security Type of the Managed Disk when it is used for a Confidential VM. Possible values are ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey, ConfidentialVM_DiskEncryptedWithPlatformKey and ConfidentialVM_DiskEncryptedWithCustomerKey. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityType *string `json:"securityType,omitempty" tf:"security_type,omitempty"` + + // The ID of an existing Managed Disk or Snapshot to copy when create_option is Copy or the recovery point to restore when create_option is Restore. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SourceResourceID *string `json:"sourceResourceId,omitempty" tf:"source_resource_id,omitempty"` + + // Reference to a ManagedDisk in compute to populate sourceResourceId. + // +kubebuilder:validation:Optional + SourceResourceIDRef *v1.Reference `json:"sourceResourceIdRef,omitempty" tf:"-"` + + // Selector for a ManagedDisk in compute to populate sourceResourceId. + // +kubebuilder:validation:Optional + SourceResourceIDSelector *v1.Selector `json:"sourceResourceIdSelector,omitempty" tf:"-"` + + // URI to a valid VHD file to be used when create_option is Import or ImportSecure. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceURI *string `json:"sourceUri,omitempty" tf:"source_uri,omitempty"` + + // The ID of the Storage Account where the source_uri is located. Required when create_option is set to Import or ImportSecure. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // The type of storage to use for the managed disk. Possible values are Standard_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS, StandardSSD_LRS or UltraSSD_LRS. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The disk performance tier to use. Possible values are documented here. This feature is currently supported only for premium SSDs. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // Specifies if Trusted Launch is enabled for the Managed Disk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TrustedLaunchEnabled *bool `json:"trustedLaunchEnabled,omitempty" tf:"trusted_launch_enabled,omitempty"` + + // Specifies the size of the managed disk to create in bytes. Required when create_option is Upload. The value must be equal to the source disk to be copied in bytes. Source disk size could be calculated with ls -l or wc -c. More information can be found at Copy a managed disk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UploadSizeBytes *float64 `json:"uploadSizeBytes,omitempty" tf:"upload_size_bytes,omitempty"` + + // Specifies the Availability Zone in which this Managed Disk should be located. Changing this property forces a new resource to be created. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +// ManagedDiskSpec defines the desired state of ManagedDisk +type ManagedDiskSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ManagedDiskParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ManagedDiskInitParameters `json:"initProvider,omitempty"` +} + +// ManagedDiskStatus defines the observed state of ManagedDisk. +type ManagedDiskStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ManagedDiskObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ManagedDisk is the Schema for the ManagedDisks API. Manages a Managed Disk. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ManagedDisk struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.createOption) || (has(self.initProvider) && has(self.initProvider.createOption))",message="spec.forProvider.createOption is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageAccountType) || (has(self.initProvider) && has(self.initProvider.storageAccountType))",message="spec.forProvider.storageAccountType is a required parameter" + Spec ManagedDiskSpec `json:"spec"` + Status ManagedDiskStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ManagedDiskList contains a list of ManagedDisks +type ManagedDiskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ManagedDisk `json:"items"` +} + +// Repository type metadata. +var ( + ManagedDisk_Kind = "ManagedDisk" + ManagedDisk_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ManagedDisk_Kind}.String() + ManagedDisk_KindAPIVersion = ManagedDisk_Kind + "." + CRDGroupVersion.String() + ManagedDisk_GroupVersionKind = CRDGroupVersion.WithKind(ManagedDisk_Kind) +) + +func init() { + SchemeBuilder.Register(&ManagedDisk{}, &ManagedDiskList{}) +} diff --git a/apis/compute/v1beta2/zz_orchestratedvirtualmachinescaleset_terraformed.go b/apis/compute/v1beta2/zz_orchestratedvirtualmachinescaleset_terraformed.go new file mode 100755 index 000000000..ff4f561c5 --- /dev/null +++ b/apis/compute/v1beta2/zz_orchestratedvirtualmachinescaleset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OrchestratedVirtualMachineScaleSet +func (mg *OrchestratedVirtualMachineScaleSet) GetTerraformResourceType() string { + return "azurerm_orchestrated_virtual_machine_scale_set" +} + +// GetConnectionDetailsMapping for this OrchestratedVirtualMachineScaleSet +func (tr *OrchestratedVirtualMachineScaleSet) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"extension[*].protected_settings": "spec.forProvider.extension[*].protectedSettingsSecretRef", "os_profile[*].custom_data": "spec.forProvider.osProfile[*].customDataSecretRef", "os_profile[*].linux_configuration[*].admin_password": "spec.forProvider.osProfile[*].linuxConfiguration[*].adminPasswordSecretRef", "os_profile[*].windows_configuration[*].additional_unattend_content[*].content": "spec.forProvider.osProfile[*].windowsConfiguration[*].additionalUnattendContent[*].contentSecretRef", "os_profile[*].windows_configuration[*].admin_password": "spec.forProvider.osProfile[*].windowsConfiguration[*].adminPasswordSecretRef", "user_data_base64": "spec.forProvider.userDataBase64SecretRef"} +} + +// GetObservation of this OrchestratedVirtualMachineScaleSet +func (tr *OrchestratedVirtualMachineScaleSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OrchestratedVirtualMachineScaleSet +func (tr *OrchestratedVirtualMachineScaleSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OrchestratedVirtualMachineScaleSet +func (tr *OrchestratedVirtualMachineScaleSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OrchestratedVirtualMachineScaleSet +func (tr *OrchestratedVirtualMachineScaleSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OrchestratedVirtualMachineScaleSet +func (tr *OrchestratedVirtualMachineScaleSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OrchestratedVirtualMachineScaleSet +func (tr *OrchestratedVirtualMachineScaleSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OrchestratedVirtualMachineScaleSet +func (tr *OrchestratedVirtualMachineScaleSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OrchestratedVirtualMachineScaleSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OrchestratedVirtualMachineScaleSet) LateInitialize(attrs []byte) (bool, error) { + params := &OrchestratedVirtualMachineScaleSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OrchestratedVirtualMachineScaleSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_orchestratedvirtualmachinescaleset_types.go b/apis/compute/v1beta2/zz_orchestratedvirtualmachinescaleset_types.go new file mode 100755 index 000000000..55177f64e --- /dev/null +++ b/apis/compute/v1beta2/zz_orchestratedvirtualmachinescaleset_types.go @@ -0,0 +1,1752 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdditionalUnattendContentInitParameters struct { + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + Setting *string `json:"setting,omitempty" tf:"setting,omitempty"` +} + +type AdditionalUnattendContentObservation struct { + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + Setting *string `json:"setting,omitempty" tf:"setting,omitempty"` +} + +type AdditionalUnattendContentParameters struct { + + // The XML formatted content that is added to the unattend.xml file for the specified path and component. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + ContentSecretRef v1.SecretKeySelector `json:"contentSecretRef" tf:"-"` + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Setting *string `json:"setting" tf:"setting,omitempty"` +} + +type ExtensionProtectedSettingsFromKeyVaultInitParameters struct { + + // The URL to the Key Vault Secret which stores the protected settings. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type ExtensionProtectedSettingsFromKeyVaultObservation struct { + + // The URL to the Key Vault Secret which stores the protected settings. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type ExtensionProtectedSettingsFromKeyVaultParameters struct { + + // The URL to the Key Vault Secret which stores the protected settings. + // +kubebuilder:validation:Optional + SecretURL *string `json:"secretUrl" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + // +kubebuilder:validation:Optional + SourceVaultID *string `json:"sourceVaultId" tf:"source_vault_id,omitempty"` +} + +type IPConfigurationPublicIPAddressInitParameters struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. Valid values must be between 1 and 26 characters long, start with a lower case letter, end with a lower case letter or number and contains only a-z, 0-9 and hyphens. + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + IPTag []PublicIPAddressIPTagInitParameters `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // The name of the SKU to be used by this Virtual Machine Scale Set. Valid values include: any of the General purpose, Compute optimized, Memory optimized, Storage optimized, GPU optimized, FPGA optimized, High performance, or Previous generation virtual machine SKUs. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type IPConfigurationPublicIPAddressObservation struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. Valid values must be between 1 and 26 characters long, start with a lower case letter, end with a lower case letter or number and contains only a-z, 0-9 and hyphens. + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + IPTag []PublicIPAddressIPTagObservation `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // The name of the SKU to be used by this Virtual Machine Scale Set. Valid values include: any of the General purpose, Compute optimized, Memory optimized, Storage optimized, GPU optimized, FPGA optimized, High performance, or Previous generation virtual machine SKUs. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type IPConfigurationPublicIPAddressParameters struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. Valid values must be between 1 and 26 characters long, start with a lower case letter, end with a lower case letter or number and contains only a-z, 0-9 and hyphens. + // +kubebuilder:validation:Optional + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IPTag []PublicIPAddressIPTagParameters `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + // +kubebuilder:validation:Optional + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // The name of the SKU to be used by this Virtual Machine Scale Set. Valid values include: any of the General purpose, Compute optimized, Memory optimized, Storage optimized, GPU optimized, FPGA optimized, High performance, or Previous generation virtual machine SKUs. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LinuxConfigurationAdminSSHKeyInitParameters struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type LinuxConfigurationAdminSSHKeyObservation struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type LinuxConfigurationAdminSSHKeyParameters struct { + + // The Public Key which should be used for authentication, which needs to be at least 2048-bit and in ssh-rsa format. + // +kubebuilder:validation:Optional + PublicKey *string `json:"publicKey" tf:"public_key,omitempty"` + + // The Username for which this Public SSH Key should be configured. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type LinuxConfigurationInitParameters struct { + + // A admin_ssh_key block as documented below. + AdminSSHKey []LinuxConfigurationAdminSSHKeyInitParameters `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // When an admin_password is specified disable_password_authentication must be set to false. Defaults to true. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Specifies the mode of VM Guest Patching for the virtual machines that are associated to the Virtual Machine Scale Set. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching of this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // One or more secret blocks as defined below. + Secret []LinuxConfigurationSecretInitParameters `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type LinuxConfigurationObservation struct { + + // A admin_ssh_key block as documented below. + AdminSSHKey []LinuxConfigurationAdminSSHKeyObservation `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // When an admin_password is specified disable_password_authentication must be set to false. Defaults to true. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Specifies the mode of VM Guest Patching for the virtual machines that are associated to the Virtual Machine Scale Set. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching of this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // One or more secret blocks as defined below. + Secret []LinuxConfigurationSecretObservation `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type LinuxConfigurationParameters struct { + + // The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminPasswordSecretRef *v1.SecretKeySelector `json:"adminPasswordSecretRef,omitempty" tf:"-"` + + // A admin_ssh_key block as documented below. + // +kubebuilder:validation:Optional + AdminSSHKey []LinuxConfigurationAdminSSHKeyParameters `json:"adminSshKey,omitempty" tf:"admin_ssh_key,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername" tf:"admin_username,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // When an admin_password is specified disable_password_authentication must be set to false. Defaults to true. + // +kubebuilder:validation:Optional + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty" tf:"disable_password_authentication,omitempty"` + + // Specifies the mode of VM Guest Patching for the virtual machines that are associated to the Virtual Machine Scale Set. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + // +kubebuilder:validation:Optional + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching of this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + // +kubebuilder:validation:Optional + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + // +kubebuilder:validation:Optional + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // One or more secret blocks as defined below. + // +kubebuilder:validation:Optional + Secret []LinuxConfigurationSecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` +} + +type LinuxConfigurationSecretCertificateInitParameters struct { + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type LinuxConfigurationSecretCertificateObservation struct { + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type LinuxConfigurationSecretCertificateParameters struct { + + // The Secret URL of a Key Vault Certificate. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type LinuxConfigurationSecretInitParameters struct { + + // One or more certificate blocks as defined below. + Certificate []LinuxConfigurationSecretCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type LinuxConfigurationSecretObservation struct { + + // One or more certificate blocks as defined below. + Certificate []LinuxConfigurationSecretCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type LinuxConfigurationSecretParameters struct { + + // One or more certificate blocks as defined below. + // +kubebuilder:validation:Optional + Certificate []LinuxConfigurationSecretCertificateParameters `json:"certificate" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId" tf:"key_vault_id,omitempty"` +} + +type NetworkInterfaceIPConfigurationInitParameters struct { + + // A list of Backend Address Pools IDs from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group IDs which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools IDs from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? Possible values are true and false. Defaults to false. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + PublicIPAddress []IPConfigurationPublicIPAddressInitParameters `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type NetworkInterfaceIPConfigurationObservation struct { + + // A list of Backend Address Pools IDs from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group IDs which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools IDs from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? Possible values are true and false. Defaults to false. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + PublicIPAddress []IPConfigurationPublicIPAddressObservation `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type NetworkInterfaceIPConfigurationParameters struct { + + // A list of Backend Address Pools IDs from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group IDs which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools IDs from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? Possible values are true and false. Defaults to false. + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + // +kubebuilder:validation:Optional + PublicIPAddress []IPConfigurationPublicIPAddressParameters `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Defaults to false. Changing this forces a new resource to be created. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Defaults to false. Changing this forces a new resource to be created. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? Possible values are true and false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amount of time for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. Possible values are between 30 and 90 minutes. The time duration should be specified in ISO 8601 format (e.g. PT30M to PT90M). Defaults to PT30M. + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? Possible values are true and false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amount of time for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. Possible values are between 30 and 90 minutes. The time duration should be specified in ISO 8601 format (e.g. PT30M to PT90M). Defaults to PT30M. + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? Possible values are true and false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Amount of time for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. Possible values are between 30 and 90 minutes. The time duration should be specified in ISO 8601 format (e.g. PT30M to PT90M). Defaults to PT30M. + // +kubebuilder:validation:Optional + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. By including a boot_diagnostics block without passing the storage_account_uri field will cause the API to utilize a Managed Storage Account to store the Boot Diagnostics output. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. By including a boot_diagnostics block without passing the storage_account_uri field will cause the API to utilize a Managed Storage Account to store the Boot Diagnostics output. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. By including a boot_diagnostics block without passing the storage_account_uri field will cause the API to utilize a Managed Storage Account to store the Boot Diagnostics output. + // +kubebuilder:validation:Optional + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetDataDiskInitParameters struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt the Data Disk. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. Required if create_option is specified as Empty. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. Required if create_option is specified as Empty. + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Specifies if Write Accelerator is enabled on the Data Disk. Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetDataDiskObservation struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt the Data Disk. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. Required if create_option is specified as Empty. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. Required if create_option is specified as Empty. + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Specifies if Write Accelerator is enabled on the Data Disk. Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetDataDiskParameters struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + // +kubebuilder:validation:Optional + Caching *string `json:"caching" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + // +kubebuilder:validation:Optional + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt the Data Disk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. Required if create_option is specified as Empty. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. Required if create_option is specified as Empty. + // +kubebuilder:validation:Optional + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + // +kubebuilder:validation:Optional + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + // +kubebuilder:validation:Optional + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Specifies if Write Accelerator is enabled on the Data Disk. Defaults to false. + // +kubebuilder:validation:Optional + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetExtensionInitParameters struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + AutoUpgradeMinorVersionEnabled *bool `json:"autoUpgradeMinorVersionEnabled,omitempty" tf:"auto_upgrade_minor_version_enabled,omitempty"` + + // An ordered list of Extension names which Virtual Machine Scale Set should provision after VM creation. + ExtensionsToProvisionAfterVMCreation []*string `json:"extensionsToProvisionAfterVmCreation,omitempty" tf:"extensions_to_provision_after_vm_creation,omitempty"` + + // Should failures from the extension be suppressed? Possible values are true or false. + FailureSuppressionEnabled *bool `json:"failureSuppressionEnabled,omitempty" tf:"failure_suppression_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + ForceExtensionExecutionOnChange *string `json:"forceExtensionExecutionOnChange,omitempty" tf:"force_extension_execution_on_change,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + ProtectedSettingsFromKeyVault *ExtensionProtectedSettingsFromKeyVaultInitParameters `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // Specifies the Publisher of the Extension. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetExtensionObservation struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + AutoUpgradeMinorVersionEnabled *bool `json:"autoUpgradeMinorVersionEnabled,omitempty" tf:"auto_upgrade_minor_version_enabled,omitempty"` + + // An ordered list of Extension names which Virtual Machine Scale Set should provision after VM creation. + ExtensionsToProvisionAfterVMCreation []*string `json:"extensionsToProvisionAfterVmCreation,omitempty" tf:"extensions_to_provision_after_vm_creation,omitempty"` + + // Should failures from the extension be suppressed? Possible values are true or false. + FailureSuppressionEnabled *bool `json:"failureSuppressionEnabled,omitempty" tf:"failure_suppression_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + ForceExtensionExecutionOnChange *string `json:"forceExtensionExecutionOnChange,omitempty" tf:"force_extension_execution_on_change,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + ProtectedSettingsFromKeyVault *ExtensionProtectedSettingsFromKeyVaultObservation `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // Specifies the Publisher of the Extension. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetExtensionParameters struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + // +kubebuilder:validation:Optional + AutoUpgradeMinorVersionEnabled *bool `json:"autoUpgradeMinorVersionEnabled,omitempty" tf:"auto_upgrade_minor_version_enabled,omitempty"` + + // An ordered list of Extension names which Virtual Machine Scale Set should provision after VM creation. + // +kubebuilder:validation:Optional + ExtensionsToProvisionAfterVMCreation []*string `json:"extensionsToProvisionAfterVmCreation,omitempty" tf:"extensions_to_provision_after_vm_creation,omitempty"` + + // Should failures from the extension be suppressed? Possible values are true or false. + // +kubebuilder:validation:Optional + FailureSuppressionEnabled *bool `json:"failureSuppressionEnabled,omitempty" tf:"failure_suppression_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + // +kubebuilder:validation:Optional + ForceExtensionExecutionOnChange *string `json:"forceExtensionExecutionOnChange,omitempty" tf:"force_extension_execution_on_change,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + // +kubebuilder:validation:Optional + ProtectedSettingsFromKeyVault *ExtensionProtectedSettingsFromKeyVaultParameters `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // A JSON String which specifies Sensitive Settings (such as Passwords) for the Extension. + // +kubebuilder:validation:Optional + ProtectedSettingsSecretRef *v1.SecretKeySelector `json:"protectedSettingsSecretRef,omitempty" tf:"-"` + + // Specifies the Publisher of the Extension. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + // +kubebuilder:validation:Optional + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + // +kubebuilder:validation:Optional + TypeHandlerVersion *string `json:"typeHandlerVersion" tf:"type_handler_version,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetIdentityInitParameters struct { + + // Specifies a list of User Managed Identity IDs to be assigned to this Windows Virtual Machine Scale Set. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of Managed Identity that should be configured on this Windows Virtual Machine Scale Set. Only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetIdentityObservation struct { + + // Specifies a list of User Managed Identity IDs to be assigned to this Windows Virtual Machine Scale Set. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of Managed Identity that should be configured on this Windows Virtual Machine Scale Set. Only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetIdentityParameters struct { + + // Specifies a list of User Managed Identity IDs to be assigned to this Windows Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds" tf:"identity_ids,omitempty"` + + // The type of Managed Identity that should be configured on this Windows Virtual Machine Scale Set. Only possible value is UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetInitParameters struct { + + // An additional_capabilities block as defined below. + AdditionalCapabilities *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesInitParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // An automatic_instance_repair block as defined below. + AutomaticInstanceRepair *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairInitParameters `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *OrchestratedVirtualMachineScaleSetBootDiagnosticsInitParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // One or more data_disk blocks as defined below. + DataDisk []OrchestratedVirtualMachineScaleSetDataDiskInitParameters `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should disks attached to this Virtual Machine Scale Set be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The Policy which should be used by Spot Virtual Machines that are Evicted from the Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + Extension []OrchestratedVirtualMachineScaleSetExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Virtual Machine Scale Set to be created. + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // An identity block as defined below. + Identity *OrchestratedVirtualMachineScaleSetIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Virtual Machine Scale Set. + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine Scale Set. Possible values are None, Windows_Client and Windows_Server. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in the Scale Set should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + NetworkInterface []OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + OsDisk *OrchestratedVirtualMachineScaleSetOsDiskInitParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // An os_profile block as defined below. + OsProfile *OsProfileInitParameters `json:"osProfile,omitempty" tf:"os_profile,omitempty"` + + // A plan block as documented below. Changing this forces a new resource to be created. + Plan *OrchestratedVirtualMachineScaleSetPlanInitParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Virtual Machine Scale Set. Changing this forces a new resource to be created. + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // a priority_mix block as defined below + PriorityMix *PriorityMixInitParameters `json:"priorityMix,omitempty" tf:"priority_mix,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Possible values are true or false. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The name of the SKU to be used by this Virtual Machine Scale Set. Valid values include: any of the General purpose, Compute optimized, Memory optimized, Storage optimized, GPU optimized, FPGA optimized, High performance, or Previous generation virtual machine SKUs. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + SourceImageReference *OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones across which the Virtual Machine Scale Set will create instances. Changing this forces a new Virtual Machine Scale Set to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetNetworkInterfaceInitParameters struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Possible values are true and false. Defaults to false. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Possible values are true and false. Defaults to false. + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + IPConfiguration []NetworkInterfaceIPConfigurationInitParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? Possible values are true and false. Defaults to false. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Possible values are true and false. Defaults to false. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Possible values are true and false. Defaults to false. + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + IPConfiguration []NetworkInterfaceIPConfigurationObservation `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? Possible values are true and false. Defaults to false. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + // +kubebuilder:validation:Optional + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Possible values are true and false. Defaults to false. + // +kubebuilder:validation:Optional + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Possible values are true and false. Defaults to false. + // +kubebuilder:validation:Optional + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + // +kubebuilder:validation:Optional + IPConfiguration []NetworkInterfaceIPConfigurationParameters `json:"ipConfiguration" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + // +kubebuilder:validation:Optional + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? Possible values are true and false. Defaults to false. + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetObservation struct { + + // An additional_capabilities block as defined below. + AdditionalCapabilities *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesObservation `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // An automatic_instance_repair block as defined below. + AutomaticInstanceRepair *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairObservation `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *OrchestratedVirtualMachineScaleSetBootDiagnosticsObservation `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // One or more data_disk blocks as defined below. + DataDisk []OrchestratedVirtualMachineScaleSetDataDiskObservation `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should disks attached to this Virtual Machine Scale Set be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The Policy which should be used by Spot Virtual Machines that are Evicted from the Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + Extension []OrchestratedVirtualMachineScaleSetExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Virtual Machine Scale Set to be created. + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // The ID of the Virtual Machine Scale Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *OrchestratedVirtualMachineScaleSetIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Virtual Machine Scale Set. + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine Scale Set. Possible values are None, Windows_Client and Windows_Server. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in the Scale Set should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + NetworkInterface []OrchestratedVirtualMachineScaleSetNetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + OsDisk *OrchestratedVirtualMachineScaleSetOsDiskObservation `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // An os_profile block as defined below. + OsProfile *OsProfileObservation `json:"osProfile,omitempty" tf:"os_profile,omitempty"` + + // A plan block as documented below. Changing this forces a new resource to be created. + Plan *OrchestratedVirtualMachineScaleSetPlanObservation `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Virtual Machine Scale Set. Changing this forces a new resource to be created. + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // a priority_mix block as defined below + PriorityMix *PriorityMixObservation `json:"priorityMix,omitempty" tf:"priority_mix,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // The name of the Resource Group in which the Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Possible values are true or false. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The name of the SKU to be used by this Virtual Machine Scale Set. Valid values include: any of the General purpose, Compute optimized, Memory optimized, Storage optimized, GPU optimized, FPGA optimized, High performance, or Previous generation virtual machine SKUs. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + SourceImageReference *OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *OrchestratedVirtualMachineScaleSetTerminationNotificationObservation `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // The Unique ID for the Virtual Machine Scale Set. + UniqueID *string `json:"uniqueId,omitempty" tf:"unique_id,omitempty"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones across which the Virtual Machine Scale Set will create instances. Changing this forces a new Virtual Machine Scale Set to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Option *string `json:"option" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetOsDiskInitParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Specifies if Write Accelerator is enabled on the OS Disk. Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetOsDiskObservation struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Specifies if Write Accelerator is enabled on the OS Disk. Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetOsDiskParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + // +kubebuilder:validation:Optional + Caching *string `json:"caching" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiffDiskSettings *OrchestratedVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType" tf:"storage_account_type,omitempty"` + + // Specifies if Write Accelerator is enabled on the OS Disk. Defaults to false. + // +kubebuilder:validation:Optional + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetParameters struct { + + // An additional_capabilities block as defined below. + // +kubebuilder:validation:Optional + AdditionalCapabilities *OrchestratedVirtualMachineScaleSetAdditionalCapabilitiesParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // An automatic_instance_repair block as defined below. + // +kubebuilder:validation:Optional + AutomaticInstanceRepair *OrchestratedVirtualMachineScaleSetAutomaticInstanceRepairParameters `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // A boot_diagnostics block as defined below. + // +kubebuilder:validation:Optional + BootDiagnostics *OrchestratedVirtualMachineScaleSetBootDiagnosticsParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // One or more data_disk blocks as defined below. + // +kubebuilder:validation:Optional + DataDisk []OrchestratedVirtualMachineScaleSetDataDiskParameters `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should disks attached to this Virtual Machine Scale Set be encrypted by enabling Encryption at Host? + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The Policy which should be used by Spot Virtual Machines that are Evicted from the Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + // +kubebuilder:validation:Optional + Extension []OrchestratedVirtualMachineScaleSetExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Virtual Machine Scale Set to be created. + // +kubebuilder:validation:Optional + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + // +kubebuilder:validation:Optional + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *OrchestratedVirtualMachineScaleSetIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine Scale Set. Possible values are None, Windows_Client and Windows_Server. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in the Scale Set should not be evicted for price reasons. + // +kubebuilder:validation:Optional + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + // +kubebuilder:validation:Optional + NetworkInterface []OrchestratedVirtualMachineScaleSetNetworkInterfaceParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + // +kubebuilder:validation:Optional + OsDisk *OrchestratedVirtualMachineScaleSetOsDiskParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // An os_profile block as defined below. + // +kubebuilder:validation:Optional + OsProfile *OsProfileParameters `json:"osProfile,omitempty" tf:"os_profile,omitempty"` + + // A plan block as documented below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Plan *OrchestratedVirtualMachineScaleSetPlanParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Virtual Machine Scale Set. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + // +kubebuilder:validation:Optional + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // a priority_mix block as defined below + // +kubebuilder:validation:Optional + PriorityMix *PriorityMixParameters `json:"priorityMix,omitempty" tf:"priority_mix,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // The name of the Resource Group in which the Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Possible values are true or false. + // +kubebuilder:validation:Optional + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The name of the SKU to be used by this Virtual Machine Scale Set. Valid values include: any of the General purpose, Compute optimized, Memory optimized, Storage optimized, GPU optimized, FPGA optimized, High performance, or Previous generation virtual machine SKUs. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + // +kubebuilder:validation:Optional + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + // +kubebuilder:validation:Optional + SourceImageReference *OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + // +kubebuilder:validation:Optional + TerminationNotification *OrchestratedVirtualMachineScaleSetTerminationNotificationParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + UserDataBase64SecretRef *v1.SecretKeySelector `json:"userDataBase64SecretRef,omitempty" tf:"-"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones across which the Virtual Machine Scale Set will create instances. Changing this forces a new Virtual Machine Scale Set to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetPlanInitParameters struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetPlanObservation struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetPlanParameters struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Product *string `json:"product" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetSourceImageReferenceInitParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetSourceImageReferenceObservation struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetSourceImageReferenceParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Sku *string `json:"sku" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetTerminationNotificationInitParameters struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? Possible values true or false + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetTerminationNotificationObservation struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? Possible values true or false + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type OrchestratedVirtualMachineScaleSetTerminationNotificationParameters struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? Possible values true or false + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type OsProfileInitParameters struct { + + // A linux_configuration block as documented below. + LinuxConfiguration *LinuxConfigurationInitParameters `json:"linuxConfiguration,omitempty" tf:"linux_configuration,omitempty"` + + // A windows_configuration block as documented below. + WindowsConfiguration *WindowsConfigurationInitParameters `json:"windowsConfiguration,omitempty" tf:"windows_configuration,omitempty"` +} + +type OsProfileObservation struct { + + // A linux_configuration block as documented below. + LinuxConfiguration *LinuxConfigurationObservation `json:"linuxConfiguration,omitempty" tf:"linux_configuration,omitempty"` + + // A windows_configuration block as documented below. + WindowsConfiguration *WindowsConfigurationObservation `json:"windowsConfiguration,omitempty" tf:"windows_configuration,omitempty"` +} + +type OsProfileParameters struct { + + // The Base64-Encoded Custom Data which should be used for this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + CustomDataSecretRef *v1.SecretKeySelector `json:"customDataSecretRef,omitempty" tf:"-"` + + // A linux_configuration block as documented below. + // +kubebuilder:validation:Optional + LinuxConfiguration *LinuxConfigurationParameters `json:"linuxConfiguration,omitempty" tf:"linux_configuration,omitempty"` + + // A windows_configuration block as documented below. + // +kubebuilder:validation:Optional + WindowsConfiguration *WindowsConfigurationParameters `json:"windowsConfiguration,omitempty" tf:"windows_configuration,omitempty"` +} + +type PriorityMixInitParameters struct { + + // Specifies the base number of VMs of Regular priority that will be created before any VMs of priority Spot are created. Possible values are integers between 0 and 1000. Defaults to 0. + BaseRegularCount *float64 `json:"baseRegularCount,omitempty" tf:"base_regular_count,omitempty"` + + // Specifies the desired percentage of VM instances that are of Regular priority after the base count has been reached. Possible values are integers between 0 and 100. Defaults to 0. + RegularPercentageAboveBase *float64 `json:"regularPercentageAboveBase,omitempty" tf:"regular_percentage_above_base,omitempty"` +} + +type PriorityMixObservation struct { + + // Specifies the base number of VMs of Regular priority that will be created before any VMs of priority Spot are created. Possible values are integers between 0 and 1000. Defaults to 0. + BaseRegularCount *float64 `json:"baseRegularCount,omitempty" tf:"base_regular_count,omitempty"` + + // Specifies the desired percentage of VM instances that are of Regular priority after the base count has been reached. Possible values are integers between 0 and 100. Defaults to 0. + RegularPercentageAboveBase *float64 `json:"regularPercentageAboveBase,omitempty" tf:"regular_percentage_above_base,omitempty"` +} + +type PriorityMixParameters struct { + + // Specifies the base number of VMs of Regular priority that will be created before any VMs of priority Spot are created. Possible values are integers between 0 and 1000. Defaults to 0. + // +kubebuilder:validation:Optional + BaseRegularCount *float64 `json:"baseRegularCount,omitempty" tf:"base_regular_count,omitempty"` + + // Specifies the desired percentage of VM instances that are of Regular priority after the base count has been reached. Possible values are integers between 0 and 100. Defaults to 0. + // +kubebuilder:validation:Optional + RegularPercentageAboveBase *float64 `json:"regularPercentageAboveBase,omitempty" tf:"regular_percentage_above_base,omitempty"` +} + +type PublicIPAddressIPTagInitParameters struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PublicIPAddressIPTagObservation struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type PublicIPAddressIPTagParameters struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tag *string `json:"tag" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WindowsConfigurationInitParameters struct { + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + AdditionalUnattendContent []AdditionalUnattendContentInitParameters `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // Are automatic updates enabled for this Virtual Machine? Defaults to true. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should the VM be patched without requiring a reboot? Possible values are true or false. Defaults to false. For more information about hot patching please see the product documentation. + HotpatchingEnabled *bool `json:"hotpatchingEnabled,omitempty" tf:"hotpatching_enabled,omitempty"` + + // Specifies the mode of VM Guest Patching for the virtual machines that are associated to the Virtual Machine Scale Set. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching of this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // One or more secret blocks as defined below. + Secret []WindowsConfigurationSecretInitParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies the time zone of the virtual machine, the possible values are defined here. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + WinrmListener []WinrmListenerInitParameters `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` +} + +type WindowsConfigurationObservation struct { + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + AdditionalUnattendContent []AdditionalUnattendContentObservation `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // Are automatic updates enabled for this Virtual Machine? Defaults to true. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should the VM be patched without requiring a reboot? Possible values are true or false. Defaults to false. For more information about hot patching please see the product documentation. + HotpatchingEnabled *bool `json:"hotpatchingEnabled,omitempty" tf:"hotpatching_enabled,omitempty"` + + // Specifies the mode of VM Guest Patching for the virtual machines that are associated to the Virtual Machine Scale Set. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching of this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // One or more secret blocks as defined below. + Secret []WindowsConfigurationSecretObservation `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies the time zone of the virtual machine, the possible values are defined here. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + WinrmListener []WinrmListenerObservation `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` +} + +type WindowsConfigurationParameters struct { + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdditionalUnattendContent []AdditionalUnattendContentParameters `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + AdminPasswordSecretRef v1.SecretKeySelector `json:"adminPasswordSecretRef" tf:"-"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername" tf:"admin_username,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // Are automatic updates enabled for this Virtual Machine? Defaults to true. + // +kubebuilder:validation:Optional + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should the VM be patched without requiring a reboot? Possible values are true or false. Defaults to false. For more information about hot patching please see the product documentation. + // +kubebuilder:validation:Optional + HotpatchingEnabled *bool `json:"hotpatchingEnabled,omitempty" tf:"hotpatching_enabled,omitempty"` + + // Specifies the mode of VM Guest Patching for the virtual machines that are associated to the Virtual Machine Scale Set. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + // +kubebuilder:validation:Optional + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching of this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + // +kubebuilder:validation:Optional + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + // +kubebuilder:validation:Optional + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // One or more secret blocks as defined below. + // +kubebuilder:validation:Optional + Secret []WindowsConfigurationSecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies the time zone of the virtual machine, the possible values are defined here. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + WinrmListener []WinrmListenerParameters `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` +} + +type WindowsConfigurationSecretCertificateInitParameters struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + Store *string `json:"store,omitempty" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WindowsConfigurationSecretCertificateObservation struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + Store *string `json:"store,omitempty" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WindowsConfigurationSecretCertificateParameters struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + // +kubebuilder:validation:Optional + Store *string `json:"store" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type WindowsConfigurationSecretInitParameters struct { + + // One or more certificate blocks as defined below. + Certificate []WindowsConfigurationSecretCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type WindowsConfigurationSecretObservation struct { + + // One or more certificate blocks as defined below. + Certificate []WindowsConfigurationSecretCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type WindowsConfigurationSecretParameters struct { + + // One or more certificate blocks as defined below. + // +kubebuilder:validation:Optional + Certificate []WindowsConfigurationSecretCertificateParameters `json:"certificate" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId" tf:"key_vault_id,omitempty"` +} + +type WinrmListenerInitParameters struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // Specifies the protocol of listener. Possible values are Http or Https. Changing this forces a new resource to be created. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type WinrmListenerObservation struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // Specifies the protocol of listener. Possible values are Http or Https. Changing this forces a new resource to be created. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type WinrmListenerParameters struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // Specifies the protocol of listener. Possible values are Http or Https. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +// OrchestratedVirtualMachineScaleSetSpec defines the desired state of OrchestratedVirtualMachineScaleSet +type OrchestratedVirtualMachineScaleSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OrchestratedVirtualMachineScaleSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OrchestratedVirtualMachineScaleSetInitParameters `json:"initProvider,omitempty"` +} + +// OrchestratedVirtualMachineScaleSetStatus defines the observed state of OrchestratedVirtualMachineScaleSet. +type OrchestratedVirtualMachineScaleSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OrchestratedVirtualMachineScaleSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// OrchestratedVirtualMachineScaleSet is the Schema for the OrchestratedVirtualMachineScaleSets API. Manages an Virtual Machine Scale Set in Flexible Orchestration Mode. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type OrchestratedVirtualMachineScaleSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.platformFaultDomainCount) || (has(self.initProvider) && has(self.initProvider.platformFaultDomainCount))",message="spec.forProvider.platformFaultDomainCount is a required parameter" + Spec OrchestratedVirtualMachineScaleSetSpec `json:"spec"` + Status OrchestratedVirtualMachineScaleSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OrchestratedVirtualMachineScaleSetList contains a list of OrchestratedVirtualMachineScaleSets +type OrchestratedVirtualMachineScaleSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OrchestratedVirtualMachineScaleSet `json:"items"` +} + +// Repository type metadata. +var ( + OrchestratedVirtualMachineScaleSet_Kind = "OrchestratedVirtualMachineScaleSet" + OrchestratedVirtualMachineScaleSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OrchestratedVirtualMachineScaleSet_Kind}.String() + OrchestratedVirtualMachineScaleSet_KindAPIVersion = OrchestratedVirtualMachineScaleSet_Kind + "." + CRDGroupVersion.String() + OrchestratedVirtualMachineScaleSet_GroupVersionKind = CRDGroupVersion.WithKind(OrchestratedVirtualMachineScaleSet_Kind) +) + +func init() { + SchemeBuilder.Register(&OrchestratedVirtualMachineScaleSet{}, &OrchestratedVirtualMachineScaleSetList{}) +} diff --git a/apis/compute/v1beta2/zz_sharedimage_terraformed.go b/apis/compute/v1beta2/zz_sharedimage_terraformed.go new file mode 100755 index 000000000..cb264acd9 --- /dev/null +++ b/apis/compute/v1beta2/zz_sharedimage_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SharedImage +func (mg *SharedImage) GetTerraformResourceType() string { + return "azurerm_shared_image" +} + +// GetConnectionDetailsMapping for this SharedImage +func (tr *SharedImage) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SharedImage +func (tr *SharedImage) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SharedImage +func (tr *SharedImage) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SharedImage +func (tr *SharedImage) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SharedImage +func (tr *SharedImage) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SharedImage +func (tr *SharedImage) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SharedImage +func (tr *SharedImage) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SharedImage +func (tr *SharedImage) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SharedImage using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SharedImage) LateInitialize(attrs []byte) (bool, error) { + params := &SharedImageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SharedImage) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_sharedimage_types.go b/apis/compute/v1beta2/zz_sharedimage_types.go new file mode 100755 index 000000000..e700f9e6c --- /dev/null +++ b/apis/compute/v1beta2/zz_sharedimage_types.go @@ -0,0 +1,435 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentifierInitParameters struct { + + // The Offer Name for this Shared Image. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The Publisher Name for this Gallery Image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The Name of the SKU for this Gallery Image. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` +} + +type IdentifierObservation struct { + + // The Offer Name for this Shared Image. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The Publisher Name for this Gallery Image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The Name of the SKU for this Gallery Image. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` +} + +type IdentifierParameters struct { + + // The Offer Name for this Shared Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer" tf:"offer,omitempty"` + + // The Publisher Name for this Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // The Name of the SKU for this Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Sku *string `json:"sku" tf:"sku,omitempty"` +} + +type PurchasePlanInitParameters struct { + + // The Purchase Plan Name for this Shared Image. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Purchase Plan Product for this Gallery Image. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // The Purchase Plan Publisher for this Gallery Image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type PurchasePlanObservation struct { + + // The Purchase Plan Name for this Shared Image. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Purchase Plan Product for this Gallery Image. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // The Purchase Plan Publisher for this Gallery Image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type PurchasePlanParameters struct { + + // The Purchase Plan Name for this Shared Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Purchase Plan Product for this Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // The Purchase Plan Publisher for this Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type SharedImageInitParameters struct { + + // Specifies if the Shared Image supports Accelerated Network. Changing this forces a new resource to be created. + AcceleratedNetworkSupportEnabled *bool `json:"acceleratedNetworkSupportEnabled,omitempty" tf:"accelerated_network_support_enabled,omitempty"` + + // CPU architecture supported by an OS. Possible values are x64 and Arm64. Defaults to x64. Changing this forces a new resource to be created. + Architecture *string `json:"architecture,omitempty" tf:"architecture,omitempty"` + + // Specifies if Confidential Virtual Machines enabled. It will enable all the features of trusted, with higher confidentiality features for isolate machines or encrypted data. Available for Gen2 machines. Changing this forces a new resource to be created. + ConfidentialVMEnabled *bool `json:"confidentialVmEnabled,omitempty" tf:"confidential_vm_enabled,omitempty"` + + // Specifies if supports creation of both Confidential virtual machines and Gen2 virtual machines with standard security from a compatible Gen2 OS disk VHD or Gen2 Managed image. Changing this forces a new resource to be created. + ConfidentialVMSupported *bool `json:"confidentialVmSupported,omitempty" tf:"confidential_vm_supported,omitempty"` + + // A description of this Shared Image. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more Disk Types not allowed for the Image. Possible values include Standard_LRS and Premium_LRS. + // +listType=set + DiskTypesNotAllowed []*string `json:"diskTypesNotAllowed,omitempty" tf:"disk_types_not_allowed,omitempty"` + + // The end of life date in RFC3339 format of the Image. + EndOfLifeDate *string `json:"endOfLifeDate,omitempty" tf:"end_of_life_date,omitempty"` + + // The End User Licence Agreement for the Shared Image. Changing this forces a new resource to be created. + Eula *string `json:"eula,omitempty" tf:"eula,omitempty"` + + // The generation of HyperV that the Virtual Machine used to create the Shared Image is based on. Possible values are V1 and V2. Defaults to V1. Changing this forces a new resource to be created. + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // An identifier block as defined below. + Identifier *IdentifierInitParameters `json:"identifier,omitempty" tf:"identifier,omitempty"` + + // Specifies the supported Azure location where the Shared Image Gallery exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Maximum memory in GB recommended for the Image. + MaxRecommendedMemoryInGb *float64 `json:"maxRecommendedMemoryInGb,omitempty" tf:"max_recommended_memory_in_gb,omitempty"` + + // Maximum count of vCPUs recommended for the Image. + MaxRecommendedVcpuCount *float64 `json:"maxRecommendedVcpuCount,omitempty" tf:"max_recommended_vcpu_count,omitempty"` + + // Minimum memory in GB recommended for the Image. + MinRecommendedMemoryInGb *float64 `json:"minRecommendedMemoryInGb,omitempty" tf:"min_recommended_memory_in_gb,omitempty"` + + // Minimum count of vCPUs recommended for the Image. + MinRecommendedVcpuCount *float64 `json:"minRecommendedVcpuCount,omitempty" tf:"min_recommended_vcpu_count,omitempty"` + + // The type of Operating System present in this Shared Image. Possible values are Linux and Windows. Changing this forces a new resource to be created. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The URI containing the Privacy Statement associated with this Shared Image. Changing this forces a new resource to be created. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty" tf:"privacy_statement_uri,omitempty"` + + // A purchase_plan block as defined below. + PurchasePlan *PurchasePlanInitParameters `json:"purchasePlan,omitempty" tf:"purchase_plan,omitempty"` + + // The URI containing the Release Notes associated with this Shared Image. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty" tf:"release_note_uri,omitempty"` + + // Specifies that the Operating System used inside this Image has not been Generalized (for example, sysprep on Windows has not been run). Changing this forces a new resource to be created. + Specialized *bool `json:"specialized,omitempty" tf:"specialized,omitempty"` + + // A mapping of tags to assign to the Shared Image. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if Trusted Launch has to be enabled for the Virtual Machine created from the Shared Image. Changing this forces a new resource to be created. + TrustedLaunchEnabled *bool `json:"trustedLaunchEnabled,omitempty" tf:"trusted_launch_enabled,omitempty"` + + // Specifies if supports creation of both Trusted Launch virtual machines and Gen2 virtual machines with standard security created from the Shared Image. Changing this forces a new resource to be created. + TrustedLaunchSupported *bool `json:"trustedLaunchSupported,omitempty" tf:"trusted_launch_supported,omitempty"` +} + +type SharedImageObservation struct { + + // Specifies if the Shared Image supports Accelerated Network. Changing this forces a new resource to be created. + AcceleratedNetworkSupportEnabled *bool `json:"acceleratedNetworkSupportEnabled,omitempty" tf:"accelerated_network_support_enabled,omitempty"` + + // CPU architecture supported by an OS. Possible values are x64 and Arm64. Defaults to x64. Changing this forces a new resource to be created. + Architecture *string `json:"architecture,omitempty" tf:"architecture,omitempty"` + + // Specifies if Confidential Virtual Machines enabled. It will enable all the features of trusted, with higher confidentiality features for isolate machines or encrypted data. Available for Gen2 machines. Changing this forces a new resource to be created. + ConfidentialVMEnabled *bool `json:"confidentialVmEnabled,omitempty" tf:"confidential_vm_enabled,omitempty"` + + // Specifies if supports creation of both Confidential virtual machines and Gen2 virtual machines with standard security from a compatible Gen2 OS disk VHD or Gen2 Managed image. Changing this forces a new resource to be created. + ConfidentialVMSupported *bool `json:"confidentialVmSupported,omitempty" tf:"confidential_vm_supported,omitempty"` + + // A description of this Shared Image. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more Disk Types not allowed for the Image. Possible values include Standard_LRS and Premium_LRS. + // +listType=set + DiskTypesNotAllowed []*string `json:"diskTypesNotAllowed,omitempty" tf:"disk_types_not_allowed,omitempty"` + + // The end of life date in RFC3339 format of the Image. + EndOfLifeDate *string `json:"endOfLifeDate,omitempty" tf:"end_of_life_date,omitempty"` + + // The End User Licence Agreement for the Shared Image. Changing this forces a new resource to be created. + Eula *string `json:"eula,omitempty" tf:"eula,omitempty"` + + // Specifies the name of the Shared Image Gallery in which this Shared Image should exist. Changing this forces a new resource to be created. + GalleryName *string `json:"galleryName,omitempty" tf:"gallery_name,omitempty"` + + // The generation of HyperV that the Virtual Machine used to create the Shared Image is based on. Possible values are V1 and V2. Defaults to V1. Changing this forces a new resource to be created. + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // The ID of the Shared Image. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identifier block as defined below. + Identifier *IdentifierObservation `json:"identifier,omitempty" tf:"identifier,omitempty"` + + // Specifies the supported Azure location where the Shared Image Gallery exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Maximum memory in GB recommended for the Image. + MaxRecommendedMemoryInGb *float64 `json:"maxRecommendedMemoryInGb,omitempty" tf:"max_recommended_memory_in_gb,omitempty"` + + // Maximum count of vCPUs recommended for the Image. + MaxRecommendedVcpuCount *float64 `json:"maxRecommendedVcpuCount,omitempty" tf:"max_recommended_vcpu_count,omitempty"` + + // Minimum memory in GB recommended for the Image. + MinRecommendedMemoryInGb *float64 `json:"minRecommendedMemoryInGb,omitempty" tf:"min_recommended_memory_in_gb,omitempty"` + + // Minimum count of vCPUs recommended for the Image. + MinRecommendedVcpuCount *float64 `json:"minRecommendedVcpuCount,omitempty" tf:"min_recommended_vcpu_count,omitempty"` + + // The type of Operating System present in this Shared Image. Possible values are Linux and Windows. Changing this forces a new resource to be created. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The URI containing the Privacy Statement associated with this Shared Image. Changing this forces a new resource to be created. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty" tf:"privacy_statement_uri,omitempty"` + + // A purchase_plan block as defined below. + PurchasePlan *PurchasePlanObservation `json:"purchasePlan,omitempty" tf:"purchase_plan,omitempty"` + + // The URI containing the Release Notes associated with this Shared Image. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty" tf:"release_note_uri,omitempty"` + + // The name of the resource group in which the Shared Image Gallery exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies that the Operating System used inside this Image has not been Generalized (for example, sysprep on Windows has not been run). Changing this forces a new resource to be created. + Specialized *bool `json:"specialized,omitempty" tf:"specialized,omitempty"` + + // A mapping of tags to assign to the Shared Image. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if Trusted Launch has to be enabled for the Virtual Machine created from the Shared Image. Changing this forces a new resource to be created. + TrustedLaunchEnabled *bool `json:"trustedLaunchEnabled,omitempty" tf:"trusted_launch_enabled,omitempty"` + + // Specifies if supports creation of both Trusted Launch virtual machines and Gen2 virtual machines with standard security created from the Shared Image. Changing this forces a new resource to be created. + TrustedLaunchSupported *bool `json:"trustedLaunchSupported,omitempty" tf:"trusted_launch_supported,omitempty"` +} + +type SharedImageParameters struct { + + // Specifies if the Shared Image supports Accelerated Network. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AcceleratedNetworkSupportEnabled *bool `json:"acceleratedNetworkSupportEnabled,omitempty" tf:"accelerated_network_support_enabled,omitempty"` + + // CPU architecture supported by an OS. Possible values are x64 and Arm64. Defaults to x64. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Architecture *string `json:"architecture,omitempty" tf:"architecture,omitempty"` + + // Specifies if Confidential Virtual Machines enabled. It will enable all the features of trusted, with higher confidentiality features for isolate machines or encrypted data. Available for Gen2 machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConfidentialVMEnabled *bool `json:"confidentialVmEnabled,omitempty" tf:"confidential_vm_enabled,omitempty"` + + // Specifies if supports creation of both Confidential virtual machines and Gen2 virtual machines with standard security from a compatible Gen2 OS disk VHD or Gen2 Managed image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConfidentialVMSupported *bool `json:"confidentialVmSupported,omitempty" tf:"confidential_vm_supported,omitempty"` + + // A description of this Shared Image. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more Disk Types not allowed for the Image. Possible values include Standard_LRS and Premium_LRS. + // +kubebuilder:validation:Optional + // +listType=set + DiskTypesNotAllowed []*string `json:"diskTypesNotAllowed,omitempty" tf:"disk_types_not_allowed,omitempty"` + + // The end of life date in RFC3339 format of the Image. + // +kubebuilder:validation:Optional + EndOfLifeDate *string `json:"endOfLifeDate,omitempty" tf:"end_of_life_date,omitempty"` + + // The End User Licence Agreement for the Shared Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Eula *string `json:"eula,omitempty" tf:"eula,omitempty"` + + // Specifies the name of the Shared Image Gallery in which this Shared Image should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.SharedImageGallery + // +kubebuilder:validation:Optional + GalleryName *string `json:"galleryName,omitempty" tf:"gallery_name,omitempty"` + + // Reference to a SharedImageGallery in compute to populate galleryName. + // +kubebuilder:validation:Optional + GalleryNameRef *v1.Reference `json:"galleryNameRef,omitempty" tf:"-"` + + // Selector for a SharedImageGallery in compute to populate galleryName. + // +kubebuilder:validation:Optional + GalleryNameSelector *v1.Selector `json:"galleryNameSelector,omitempty" tf:"-"` + + // The generation of HyperV that the Virtual Machine used to create the Shared Image is based on. Possible values are V1 and V2. Defaults to V1. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HyperVGeneration *string `json:"hyperVGeneration,omitempty" tf:"hyper_v_generation,omitempty"` + + // An identifier block as defined below. + // +kubebuilder:validation:Optional + Identifier *IdentifierParameters `json:"identifier,omitempty" tf:"identifier,omitempty"` + + // Specifies the supported Azure location where the Shared Image Gallery exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Maximum memory in GB recommended for the Image. + // +kubebuilder:validation:Optional + MaxRecommendedMemoryInGb *float64 `json:"maxRecommendedMemoryInGb,omitempty" tf:"max_recommended_memory_in_gb,omitempty"` + + // Maximum count of vCPUs recommended for the Image. + // +kubebuilder:validation:Optional + MaxRecommendedVcpuCount *float64 `json:"maxRecommendedVcpuCount,omitempty" tf:"max_recommended_vcpu_count,omitempty"` + + // Minimum memory in GB recommended for the Image. + // +kubebuilder:validation:Optional + MinRecommendedMemoryInGb *float64 `json:"minRecommendedMemoryInGb,omitempty" tf:"min_recommended_memory_in_gb,omitempty"` + + // Minimum count of vCPUs recommended for the Image. + // +kubebuilder:validation:Optional + MinRecommendedVcpuCount *float64 `json:"minRecommendedVcpuCount,omitempty" tf:"min_recommended_vcpu_count,omitempty"` + + // The type of Operating System present in this Shared Image. Possible values are Linux and Windows. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The URI containing the Privacy Statement associated with this Shared Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty" tf:"privacy_statement_uri,omitempty"` + + // A purchase_plan block as defined below. + // +kubebuilder:validation:Optional + PurchasePlan *PurchasePlanParameters `json:"purchasePlan,omitempty" tf:"purchase_plan,omitempty"` + + // The URI containing the Release Notes associated with this Shared Image. + // +kubebuilder:validation:Optional + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty" tf:"release_note_uri,omitempty"` + + // The name of the resource group in which the Shared Image Gallery exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies that the Operating System used inside this Image has not been Generalized (for example, sysprep on Windows has not been run). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Specialized *bool `json:"specialized,omitempty" tf:"specialized,omitempty"` + + // A mapping of tags to assign to the Shared Image. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if Trusted Launch has to be enabled for the Virtual Machine created from the Shared Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TrustedLaunchEnabled *bool `json:"trustedLaunchEnabled,omitempty" tf:"trusted_launch_enabled,omitempty"` + + // Specifies if supports creation of both Trusted Launch virtual machines and Gen2 virtual machines with standard security created from the Shared Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TrustedLaunchSupported *bool `json:"trustedLaunchSupported,omitempty" tf:"trusted_launch_supported,omitempty"` +} + +// SharedImageSpec defines the desired state of SharedImage +type SharedImageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SharedImageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SharedImageInitParameters `json:"initProvider,omitempty"` +} + +// SharedImageStatus defines the observed state of SharedImage. +type SharedImageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SharedImageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SharedImage is the Schema for the SharedImages API. Manages a Shared Image within a Shared Image Gallery. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SharedImage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.identifier) || (has(self.initProvider) && has(self.initProvider.identifier))",message="spec.forProvider.identifier is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.osType) || (has(self.initProvider) && has(self.initProvider.osType))",message="spec.forProvider.osType is a required parameter" + Spec SharedImageSpec `json:"spec"` + Status SharedImageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SharedImageList contains a list of SharedImages +type SharedImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SharedImage `json:"items"` +} + +// Repository type metadata. +var ( + SharedImage_Kind = "SharedImage" + SharedImage_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SharedImage_Kind}.String() + SharedImage_KindAPIVersion = SharedImage_Kind + "." + CRDGroupVersion.String() + SharedImage_GroupVersionKind = CRDGroupVersion.WithKind(SharedImage_Kind) +) + +func init() { + SchemeBuilder.Register(&SharedImage{}, &SharedImageList{}) +} diff --git a/apis/compute/v1beta2/zz_sharedimagegallery_terraformed.go b/apis/compute/v1beta2/zz_sharedimagegallery_terraformed.go new file mode 100755 index 000000000..dc8c45e03 --- /dev/null +++ b/apis/compute/v1beta2/zz_sharedimagegallery_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SharedImageGallery +func (mg *SharedImageGallery) GetTerraformResourceType() string { + return "azurerm_shared_image_gallery" +} + +// GetConnectionDetailsMapping for this SharedImageGallery +func (tr *SharedImageGallery) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SharedImageGallery +func (tr *SharedImageGallery) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SharedImageGallery +func (tr *SharedImageGallery) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SharedImageGallery +func (tr *SharedImageGallery) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SharedImageGallery +func (tr *SharedImageGallery) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SharedImageGallery +func (tr *SharedImageGallery) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SharedImageGallery +func (tr *SharedImageGallery) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SharedImageGallery +func (tr *SharedImageGallery) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SharedImageGallery using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SharedImageGallery) LateInitialize(attrs []byte) (bool, error) { + params := &SharedImageGalleryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SharedImageGallery) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_sharedimagegallery_types.go b/apis/compute/v1beta2/zz_sharedimagegallery_types.go new file mode 100755 index 000000000..488ab41af --- /dev/null +++ b/apis/compute/v1beta2/zz_sharedimagegallery_types.go @@ -0,0 +1,230 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CommunityGalleryInitParameters struct { + + // The End User Licence Agreement for the Shared Image Gallery. Changing this forces a new resource to be created. + Eula *string `json:"eula,omitempty" tf:"eula,omitempty"` + + // Prefix of the community public name for the Shared Image Gallery. Changing this forces a new resource to be created. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Email of the publisher for the Shared Image Gallery. Changing this forces a new resource to be created. + PublisherEmail *string `json:"publisherEmail,omitempty" tf:"publisher_email,omitempty"` + + // URI of the publisher for the Shared Image Gallery. Changing this forces a new resource to be created. + PublisherURI *string `json:"publisherUri,omitempty" tf:"publisher_uri,omitempty"` +} + +type CommunityGalleryObservation struct { + + // The End User Licence Agreement for the Shared Image Gallery. Changing this forces a new resource to be created. + Eula *string `json:"eula,omitempty" tf:"eula,omitempty"` + + // Specifies the name of the Shared Image Gallery. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Prefix of the community public name for the Shared Image Gallery. Changing this forces a new resource to be created. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // Email of the publisher for the Shared Image Gallery. Changing this forces a new resource to be created. + PublisherEmail *string `json:"publisherEmail,omitempty" tf:"publisher_email,omitempty"` + + // URI of the publisher for the Shared Image Gallery. Changing this forces a new resource to be created. + PublisherURI *string `json:"publisherUri,omitempty" tf:"publisher_uri,omitempty"` +} + +type CommunityGalleryParameters struct { + + // The End User Licence Agreement for the Shared Image Gallery. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Eula *string `json:"eula" tf:"eula,omitempty"` + + // Prefix of the community public name for the Shared Image Gallery. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix" tf:"prefix,omitempty"` + + // Email of the publisher for the Shared Image Gallery. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublisherEmail *string `json:"publisherEmail" tf:"publisher_email,omitempty"` + + // URI of the publisher for the Shared Image Gallery. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublisherURI *string `json:"publisherUri" tf:"publisher_uri,omitempty"` +} + +type SharedImageGalleryInitParameters struct { + + // A description for this Shared Image Gallery. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A sharing block as defined below. Changing this forces a new resource to be created. + Sharing *SharingInitParameters `json:"sharing,omitempty" tf:"sharing,omitempty"` + + // A mapping of tags to assign to the Shared Image Gallery. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SharedImageGalleryObservation struct { + + // A description for this Shared Image Gallery. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Shared Image Gallery. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the Shared Image Gallery. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A sharing block as defined below. Changing this forces a new resource to be created. + Sharing *SharingObservation `json:"sharing,omitempty" tf:"sharing,omitempty"` + + // A mapping of tags to assign to the Shared Image Gallery. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Unique Name for this Shared Image Gallery. + UniqueName *string `json:"uniqueName,omitempty" tf:"unique_name,omitempty"` +} + +type SharedImageGalleryParameters struct { + + // A description for this Shared Image Gallery. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the Shared Image Gallery. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sharing block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Sharing *SharingParameters `json:"sharing,omitempty" tf:"sharing,omitempty"` + + // A mapping of tags to assign to the Shared Image Gallery. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SharingInitParameters struct { + + // A community_gallery block as defined below. Changing this forces a new resource to be created. + CommunityGallery *CommunityGalleryInitParameters `json:"communityGallery,omitempty" tf:"community_gallery,omitempty"` + + // The permission of the Shared Image Gallery when sharing. Possible values are Community, Groups and Private. Changing this forces a new resource to be created. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type SharingObservation struct { + + // A community_gallery block as defined below. Changing this forces a new resource to be created. + CommunityGallery *CommunityGalleryObservation `json:"communityGallery,omitempty" tf:"community_gallery,omitempty"` + + // The permission of the Shared Image Gallery when sharing. Possible values are Community, Groups and Private. Changing this forces a new resource to be created. + Permission *string `json:"permission,omitempty" tf:"permission,omitempty"` +} + +type SharingParameters struct { + + // A community_gallery block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CommunityGallery *CommunityGalleryParameters `json:"communityGallery,omitempty" tf:"community_gallery,omitempty"` + + // The permission of the Shared Image Gallery when sharing. Possible values are Community, Groups and Private. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Permission *string `json:"permission" tf:"permission,omitempty"` +} + +// SharedImageGallerySpec defines the desired state of SharedImageGallery +type SharedImageGallerySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SharedImageGalleryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SharedImageGalleryInitParameters `json:"initProvider,omitempty"` +} + +// SharedImageGalleryStatus defines the observed state of SharedImageGallery. +type SharedImageGalleryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SharedImageGalleryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SharedImageGallery is the Schema for the SharedImageGallerys API. Manages a Shared Image Gallery. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SharedImageGallery struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec SharedImageGallerySpec `json:"spec"` + Status SharedImageGalleryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SharedImageGalleryList contains a list of SharedImageGallerys +type SharedImageGalleryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SharedImageGallery `json:"items"` +} + +// Repository type metadata. +var ( + SharedImageGallery_Kind = "SharedImageGallery" + SharedImageGallery_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SharedImageGallery_Kind}.String() + SharedImageGallery_KindAPIVersion = SharedImageGallery_Kind + "." + CRDGroupVersion.String() + SharedImageGallery_GroupVersionKind = CRDGroupVersion.WithKind(SharedImageGallery_Kind) +) + +func init() { + SchemeBuilder.Register(&SharedImageGallery{}, &SharedImageGalleryList{}) +} diff --git a/apis/compute/v1beta2/zz_snapshot_terraformed.go b/apis/compute/v1beta2/zz_snapshot_terraformed.go new file mode 100755 index 000000000..9da1a568c --- /dev/null +++ b/apis/compute/v1beta2/zz_snapshot_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Snapshot +func (mg *Snapshot) GetTerraformResourceType() string { + return "azurerm_snapshot" +} + +// GetConnectionDetailsMapping for this Snapshot +func (tr *Snapshot) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Snapshot +func (tr *Snapshot) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Snapshot +func (tr *Snapshot) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Snapshot +func (tr *Snapshot) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Snapshot +func (tr *Snapshot) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Snapshot +func (tr *Snapshot) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Snapshot +func (tr *Snapshot) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Snapshot +func (tr *Snapshot) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Snapshot using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Snapshot) LateInitialize(attrs []byte) (bool, error) { + params := &SnapshotParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Snapshot) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/compute/v1beta2/zz_snapshot_types.go b/apis/compute/v1beta2/zz_snapshot_types.go new file mode 100755 index 000000000..f27415499 --- /dev/null +++ b/apis/compute/v1beta2/zz_snapshot_types.go @@ -0,0 +1,314 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EncryptionSettingsDiskEncryptionKeyInitParameters struct { + + // The URL to the Key Vault Secret used as the Disk Encryption Key. This can be found as id on the azurerm_key_vault_secret resource. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type EncryptionSettingsDiskEncryptionKeyObservation struct { + + // The URL to the Key Vault Secret used as the Disk Encryption Key. This can be found as id on the azurerm_key_vault_secret resource. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type EncryptionSettingsDiskEncryptionKeyParameters struct { + + // The URL to the Key Vault Secret used as the Disk Encryption Key. This can be found as id on the azurerm_key_vault_secret resource. + // +kubebuilder:validation:Optional + SecretURL *string `json:"secretUrl" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + // +kubebuilder:validation:Optional + SourceVaultID *string `json:"sourceVaultId" tf:"source_vault_id,omitempty"` +} + +type EncryptionSettingsKeyEncryptionKeyInitParameters struct { + + // The URL to the Key Vault Key used as the Key Encryption Key. This can be found as id on the azurerm_key_vault_key resource. + KeyURL *string `json:"keyUrl,omitempty" tf:"key_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type EncryptionSettingsKeyEncryptionKeyObservation struct { + + // The URL to the Key Vault Key used as the Key Encryption Key. This can be found as id on the azurerm_key_vault_key resource. + KeyURL *string `json:"keyUrl,omitempty" tf:"key_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type EncryptionSettingsKeyEncryptionKeyParameters struct { + + // The URL to the Key Vault Key used as the Key Encryption Key. This can be found as id on the azurerm_key_vault_key resource. + // +kubebuilder:validation:Optional + KeyURL *string `json:"keyUrl" tf:"key_url,omitempty"` + + // The ID of the source Key Vault. This can be found as id on the azurerm_key_vault resource. + // +kubebuilder:validation:Optional + SourceVaultID *string `json:"sourceVaultId" tf:"source_vault_id,omitempty"` +} + +type SnapshotEncryptionSettingsInitParameters struct { + + // A disk_encryption_key block as defined below. + DiskEncryptionKey *EncryptionSettingsDiskEncryptionKeyInitParameters `json:"diskEncryptionKey,omitempty" tf:"disk_encryption_key,omitempty"` + + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A key_encryption_key block as defined below. + KeyEncryptionKey *EncryptionSettingsKeyEncryptionKeyInitParameters `json:"keyEncryptionKey,omitempty" tf:"key_encryption_key,omitempty"` +} + +type SnapshotEncryptionSettingsObservation struct { + + // A disk_encryption_key block as defined below. + DiskEncryptionKey *EncryptionSettingsDiskEncryptionKeyObservation `json:"diskEncryptionKey,omitempty" tf:"disk_encryption_key,omitempty"` + + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A key_encryption_key block as defined below. + KeyEncryptionKey *EncryptionSettingsKeyEncryptionKeyObservation `json:"keyEncryptionKey,omitempty" tf:"key_encryption_key,omitempty"` +} + +type SnapshotEncryptionSettingsParameters struct { + + // A disk_encryption_key block as defined below. + // +kubebuilder:validation:Optional + DiskEncryptionKey *EncryptionSettingsDiskEncryptionKeyParameters `json:"diskEncryptionKey,omitempty" tf:"disk_encryption_key,omitempty"` + + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A key_encryption_key block as defined below. + // +kubebuilder:validation:Optional + KeyEncryptionKey *EncryptionSettingsKeyEncryptionKeyParameters `json:"keyEncryptionKey,omitempty" tf:"key_encryption_key,omitempty"` +} + +type SnapshotInitParameters struct { + + // Indicates how the snapshot is to be created. Possible values are Copy or Import. + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The size of the Snapshotted Disk in GB. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // A encryption_settings block as defined below. + EncryptionSettings *SnapshotEncryptionSettingsInitParameters `json:"encryptionSettings,omitempty" tf:"encryption_settings,omitempty"` + + // Specifies if the Snapshot is incremental. Changing this forces a new resource to be created. + IncrementalEnabled *bool `json:"incrementalEnabled,omitempty" tf:"incremental_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies a reference to an existing snapshot, when create_option is Copy. Changing this forces a new resource to be created. + SourceResourceID *string `json:"sourceResourceId,omitempty" tf:"source_resource_id,omitempty"` + + // Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SourceURI *string `json:"sourceUri,omitempty" tf:"source_uri,omitempty"` + + // Reference to a ManagedDisk in compute to populate sourceUri. + // +kubebuilder:validation:Optional + SourceURIRef *v1.Reference `json:"sourceUriRef,omitempty" tf:"-"` + + // Selector for a ManagedDisk in compute to populate sourceUri. + // +kubebuilder:validation:Optional + SourceURISelector *v1.Selector `json:"sourceUriSelector,omitempty" tf:"-"` + + // Specifies the ID of an storage account. Used with source_uri to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SnapshotObservation struct { + + // Indicates how the snapshot is to be created. Possible values are Copy or Import. + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The size of the Snapshotted Disk in GB. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // A encryption_settings block as defined below. + EncryptionSettings *SnapshotEncryptionSettingsObservation `json:"encryptionSettings,omitempty" tf:"encryption_settings,omitempty"` + + // The Snapshot ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies if the Snapshot is incremental. Changing this forces a new resource to be created. + IncrementalEnabled *bool `json:"incrementalEnabled,omitempty" tf:"incremental_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies a reference to an existing snapshot, when create_option is Copy. Changing this forces a new resource to be created. + SourceResourceID *string `json:"sourceResourceId,omitempty" tf:"source_resource_id,omitempty"` + + // Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created. + SourceURI *string `json:"sourceUri,omitempty" tf:"source_uri,omitempty"` + + // Specifies the ID of an storage account. Used with source_uri to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether Trusted Launch is enabled for the Snapshot. + TrustedLaunchEnabled *bool `json:"trustedLaunchEnabled,omitempty" tf:"trusted_launch_enabled,omitempty"` +} + +type SnapshotParameters struct { + + // Indicates how the snapshot is to be created. Possible values are Copy or Import. + // +kubebuilder:validation:Optional + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The size of the Snapshotted Disk in GB. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // A encryption_settings block as defined below. + // +kubebuilder:validation:Optional + EncryptionSettings *SnapshotEncryptionSettingsParameters `json:"encryptionSettings,omitempty" tf:"encryption_settings,omitempty"` + + // Specifies if the Snapshot is incremental. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IncrementalEnabled *bool `json:"incrementalEnabled,omitempty" tf:"incremental_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the Snapshot. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies a reference to an existing snapshot, when create_option is Copy. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceResourceID *string `json:"sourceResourceId,omitempty" tf:"source_resource_id,omitempty"` + + // Specifies the URI to a Managed or Unmanaged Disk. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SourceURI *string `json:"sourceUri,omitempty" tf:"source_uri,omitempty"` + + // Reference to a ManagedDisk in compute to populate sourceUri. + // +kubebuilder:validation:Optional + SourceURIRef *v1.Reference `json:"sourceUriRef,omitempty" tf:"-"` + + // Selector for a ManagedDisk in compute to populate sourceUri. + // +kubebuilder:validation:Optional + SourceURISelector *v1.Selector `json:"sourceUriSelector,omitempty" tf:"-"` + + // Specifies the ID of an storage account. Used with source_uri to allow authorization during import of unmanaged blobs from a different subscription. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// SnapshotSpec defines the desired state of Snapshot +type SnapshotSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SnapshotParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SnapshotInitParameters `json:"initProvider,omitempty"` +} + +// SnapshotStatus defines the observed state of Snapshot. +type SnapshotStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SnapshotObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Snapshot is the Schema for the Snapshots API. Manages a Disk Snapshot. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Snapshot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.createOption) || (has(self.initProvider) && has(self.initProvider.createOption))",message="spec.forProvider.createOption is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec SnapshotSpec `json:"spec"` + Status SnapshotStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SnapshotList contains a list of Snapshots +type SnapshotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Snapshot `json:"items"` +} + +// Repository type metadata. +var ( + Snapshot_Kind = "Snapshot" + Snapshot_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Snapshot_Kind}.String() + Snapshot_KindAPIVersion = Snapshot_Kind + "." + CRDGroupVersion.String() + Snapshot_GroupVersionKind = CRDGroupVersion.WithKind(Snapshot_Kind) +) + +func init() { + SchemeBuilder.Register(&Snapshot{}, &SnapshotList{}) +} diff --git a/apis/compute/v1beta2/zz_virtualmachineextension_terraformed.go b/apis/compute/v1beta2/zz_virtualmachineextension_terraformed.go new file mode 100755 index 000000000..6b5592e6e --- /dev/null +++ b/apis/compute/v1beta2/zz_virtualmachineextension_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualMachineExtension +func (mg *VirtualMachineExtension) GetTerraformResourceType() string { + return "azurerm_virtual_machine_extension" +} + +// GetConnectionDetailsMapping for this VirtualMachineExtension +func (tr *VirtualMachineExtension) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"protected_settings": "spec.forProvider.protectedSettingsSecretRef"} +} + +// GetObservation of this VirtualMachineExtension +func (tr *VirtualMachineExtension) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualMachineExtension +func (tr *VirtualMachineExtension) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualMachineExtension +func (tr *VirtualMachineExtension) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualMachineExtension +func (tr *VirtualMachineExtension) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualMachineExtension +func (tr *VirtualMachineExtension) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualMachineExtension +func (tr *VirtualMachineExtension) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualMachineExtension +func (tr *VirtualMachineExtension) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualMachineExtension using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualMachineExtension) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualMachineExtensionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualMachineExtension) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_virtualmachineextension_types.go b/apis/compute/v1beta2/zz_virtualmachineextension_types.go new file mode 100755 index 000000000..6bd6afeb9 --- /dev/null +++ b/apis/compute/v1beta2/zz_virtualmachineextension_types.go @@ -0,0 +1,242 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type VirtualMachineExtensionInitParameters struct { + + // Specifies if the platform deploys the latest minor version update to the type_handler_version specified. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Should failures from the extension be suppressed? Possible values are true or false. Defaults to false. + FailureSuppressionEnabled *bool `json:"failureSuppressionEnabled,omitempty" tf:"failure_suppression_enabled,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + ProtectedSettingsFromKeyVault *VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // Specifies the collection of extension names after which this extension needs to be provisioned. + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // The publisher of the extension, available publishers can be found by using the Azure CLI. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The settings passed to the extension, these are specified as a JSON object in a string. + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of extension, available types for a publisher can be found using the Azure CLI. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` +} + +type VirtualMachineExtensionObservation struct { + + // Specifies if the platform deploys the latest minor version update to the type_handler_version specified. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Should failures from the extension be suppressed? Possible values are true or false. Defaults to false. + FailureSuppressionEnabled *bool `json:"failureSuppressionEnabled,omitempty" tf:"failure_suppression_enabled,omitempty"` + + // The ID of the Virtual Machine Extension. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + ProtectedSettingsFromKeyVault *VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // Specifies the collection of extension names after which this extension needs to be provisioned. + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // The publisher of the extension, available publishers can be found by using the Azure CLI. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The settings passed to the extension, these are specified as a JSON object in a string. + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of extension, available types for a publisher can be found using the Azure CLI. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` + + // The ID of the Virtual Machine. Changing this forces a new resource to be created + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` +} + +type VirtualMachineExtensionParameters struct { + + // Specifies if the platform deploys the latest minor version update to the type_handler_version specified. + // +kubebuilder:validation:Optional + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + // +kubebuilder:validation:Optional + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Should failures from the extension be suppressed? Possible values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + FailureSuppressionEnabled *bool `json:"failureSuppressionEnabled,omitempty" tf:"failure_suppression_enabled,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + // +kubebuilder:validation:Optional + ProtectedSettingsFromKeyVault *VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // The protected_settings passed to the extension, like settings, these are specified as a JSON object in a string. + // +kubebuilder:validation:Optional + ProtectedSettingsSecretRef *v1.SecretKeySelector `json:"protectedSettingsSecretRef,omitempty" tf:"-"` + + // Specifies the collection of extension names after which this extension needs to be provisioned. + // +kubebuilder:validation:Optional + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // The publisher of the extension, available publishers can be found by using the Azure CLI. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The settings passed to the extension, these are specified as a JSON object in a string. + // +kubebuilder:validation:Optional + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of extension, available types for a publisher can be found using the Azure CLI. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + // +kubebuilder:validation:Optional + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` + + // The ID of the Virtual Machine. Changing this forces a new resource to be created + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` + + // Reference to a LinuxVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDRef *v1.Reference `json:"virtualMachineIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDSelector *v1.Selector `json:"virtualMachineIdSelector,omitempty" tf:"-"` +} + +type VirtualMachineExtensionProtectedSettingsFromKeyVaultInitParameters struct { + + // The URL to the Key Vault Secret which stores the protected settings. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type VirtualMachineExtensionProtectedSettingsFromKeyVaultObservation struct { + + // The URL to the Key Vault Secret which stores the protected settings. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type VirtualMachineExtensionProtectedSettingsFromKeyVaultParameters struct { + + // The URL to the Key Vault Secret which stores the protected settings. + // +kubebuilder:validation:Optional + SecretURL *string `json:"secretUrl" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + // +kubebuilder:validation:Optional + SourceVaultID *string `json:"sourceVaultId" tf:"source_vault_id,omitempty"` +} + +// VirtualMachineExtensionSpec defines the desired state of VirtualMachineExtension +type VirtualMachineExtensionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualMachineExtensionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualMachineExtensionInitParameters `json:"initProvider,omitempty"` +} + +// VirtualMachineExtensionStatus defines the observed state of VirtualMachineExtension. +type VirtualMachineExtensionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualMachineExtensionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualMachineExtension is the Schema for the VirtualMachineExtensions API. Manages a Virtual Machine Extension to provide post deployment configuration and run automated tasks. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VirtualMachineExtension struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.publisher) || (has(self.initProvider) && has(self.initProvider.publisher))",message="spec.forProvider.publisher is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.typeHandlerVersion) || (has(self.initProvider) && has(self.initProvider.typeHandlerVersion))",message="spec.forProvider.typeHandlerVersion is a required parameter" + Spec VirtualMachineExtensionSpec `json:"spec"` + Status VirtualMachineExtensionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualMachineExtensionList contains a list of VirtualMachineExtensions +type VirtualMachineExtensionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachineExtension `json:"items"` +} + +// Repository type metadata. +var ( + VirtualMachineExtension_Kind = "VirtualMachineExtension" + VirtualMachineExtension_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualMachineExtension_Kind}.String() + VirtualMachineExtension_KindAPIVersion = VirtualMachineExtension_Kind + "." + CRDGroupVersion.String() + VirtualMachineExtension_GroupVersionKind = CRDGroupVersion.WithKind(VirtualMachineExtension_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualMachineExtension{}, &VirtualMachineExtensionList{}) +} diff --git a/apis/compute/v1beta2/zz_virtualmachineruncommand_terraformed.go b/apis/compute/v1beta2/zz_virtualmachineruncommand_terraformed.go new file mode 100755 index 000000000..460f6ff77 --- /dev/null +++ b/apis/compute/v1beta2/zz_virtualmachineruncommand_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualMachineRunCommand +func (mg *VirtualMachineRunCommand) GetTerraformResourceType() string { + return "azurerm_virtual_machine_run_command" +} + +// GetConnectionDetailsMapping for this VirtualMachineRunCommand +func (tr *VirtualMachineRunCommand) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"error_blob_managed_identity[*].client_id": "spec.forProvider.errorBlobManagedIdentity[*].clientIdSecretRef", "error_blob_managed_identity[*].object_id": "spec.forProvider.errorBlobManagedIdentity[*].objectIdSecretRef", "output_blob_managed_identity[*].client_id": "spec.forProvider.outputBlobManagedIdentity[*].clientIdSecretRef", "output_blob_managed_identity[*].object_id": "spec.forProvider.outputBlobManagedIdentity[*].objectIdSecretRef", "protected_parameter[*].name": "spec.forProvider.protectedParameter[*].nameSecretRef", "protected_parameter[*].value": "spec.forProvider.protectedParameter[*].valueSecretRef", "run_as_password": "spec.forProvider.runAsPasswordSecretRef", "source[*].script_uri_managed_identity[*].client_id": "spec.forProvider.source[*].scriptUriManagedIdentity[*].clientIdSecretRef", "source[*].script_uri_managed_identity[*].object_id": "spec.forProvider.source[*].scriptUriManagedIdentity[*].objectIdSecretRef"} +} + +// GetObservation of this VirtualMachineRunCommand +func (tr *VirtualMachineRunCommand) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualMachineRunCommand +func (tr *VirtualMachineRunCommand) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualMachineRunCommand +func (tr *VirtualMachineRunCommand) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualMachineRunCommand +func (tr *VirtualMachineRunCommand) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualMachineRunCommand +func (tr *VirtualMachineRunCommand) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualMachineRunCommand +func (tr *VirtualMachineRunCommand) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualMachineRunCommand +func (tr *VirtualMachineRunCommand) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualMachineRunCommand using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualMachineRunCommand) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualMachineRunCommandParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualMachineRunCommand) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_virtualmachineruncommand_types.go b/apis/compute/v1beta2/zz_virtualmachineruncommand_types.go new file mode 100755 index 000000000..f670d6865 --- /dev/null +++ b/apis/compute/v1beta2/zz_virtualmachineruncommand_types.go @@ -0,0 +1,431 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ErrorBlobManagedIdentityInitParameters struct { +} + +type ErrorBlobManagedIdentityObservation struct { +} + +type ErrorBlobManagedIdentityParameters struct { + + // The client ID of the managed identity. + // +kubebuilder:validation:Optional + ClientIDSecretRef *v1.SecretKeySelector `json:"clientIdSecretRef,omitempty" tf:"-"` + + // The object ID of the managed identity. + // +kubebuilder:validation:Optional + ObjectIDSecretRef *v1.SecretKeySelector `json:"objectIdSecretRef,omitempty" tf:"-"` +} + +type InstanceViewInitParameters struct { +} + +type InstanceViewObservation struct { + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + ErrorMessage *string `json:"errorMessage,omitempty" tf:"error_message,omitempty"` + + ExecutionMessage *string `json:"executionMessage,omitempty" tf:"execution_message,omitempty"` + + ExecutionState *string `json:"executionState,omitempty" tf:"execution_state,omitempty"` + + ExitCode *float64 `json:"exitCode,omitempty" tf:"exit_code,omitempty"` + + Output *string `json:"output,omitempty" tf:"output,omitempty"` + + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type InstanceViewParameters struct { +} + +type OutputBlobManagedIdentityInitParameters struct { +} + +type OutputBlobManagedIdentityObservation struct { +} + +type OutputBlobManagedIdentityParameters struct { + + // The client ID of the managed identity. + // +kubebuilder:validation:Optional + ClientIDSecretRef *v1.SecretKeySelector `json:"clientIdSecretRef,omitempty" tf:"-"` + + // The object ID of the managed identity. + // +kubebuilder:validation:Optional + ObjectIDSecretRef *v1.SecretKeySelector `json:"objectIdSecretRef,omitempty" tf:"-"` +} + +type ParameterInitParameters struct { + + // The run parameter name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The run parameter value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ParameterObservation struct { + + // The run parameter name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The run parameter value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ParameterParameters struct { + + // The run parameter name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The run parameter value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ProtectedParameterInitParameters struct { +} + +type ProtectedParameterObservation struct { +} + +type ProtectedParameterParameters struct { + + // The run parameter name. + // +kubebuilder:validation:Required + NameSecretRef v1.SecretKeySelector `json:"nameSecretRef" tf:"-"` + + // The run parameter value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type ScriptURIManagedIdentityInitParameters struct { +} + +type ScriptURIManagedIdentityObservation struct { +} + +type ScriptURIManagedIdentityParameters struct { + + // The client ID of the managed identity. + // +kubebuilder:validation:Optional + ClientIDSecretRef *v1.SecretKeySelector `json:"clientIdSecretRef,omitempty" tf:"-"` + + // The object ID of the managed identity. + // +kubebuilder:validation:Optional + ObjectIDSecretRef *v1.SecretKeySelector `json:"objectIdSecretRef,omitempty" tf:"-"` +} + +type VirtualMachineRunCommandInitParameters struct { + + // An error_blob_managed_identity block as defined below. User-assigned managed Identity that has access to errorBlobUri storage blob. + ErrorBlobManagedIdentity *ErrorBlobManagedIdentityInitParameters `json:"errorBlobManagedIdentity,omitempty" tf:"error_blob_managed_identity,omitempty"` + + // Specifies the Azure storage blob where script error stream will be uploaded. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Blob + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ErrorBlobURI *string `json:"errorBlobUri,omitempty" tf:"error_blob_uri,omitempty"` + + // Reference to a Blob in storage to populate errorBlobUri. + // +kubebuilder:validation:Optional + ErrorBlobURIRef *v1.Reference `json:"errorBlobUriRef,omitempty" tf:"-"` + + // Selector for a Blob in storage to populate errorBlobUri. + // +kubebuilder:validation:Optional + ErrorBlobURISelector *v1.Selector `json:"errorBlobUriSelector,omitempty" tf:"-"` + + // The Azure Region where the Virtual Machine Run Command should exist. Changing this forces a new Virtual Machine Run Command to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An output_blob_managed_identity block as defined below. User-assigned managed Identity that has access to outputBlobUri storage blob. + OutputBlobManagedIdentity *OutputBlobManagedIdentityInitParameters `json:"outputBlobManagedIdentity,omitempty" tf:"output_blob_managed_identity,omitempty"` + + // Specifies the Azure storage blob where script output stream will be uploaded. It can be basic blob URI with SAS token. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Blob + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + OutputBlobURI *string `json:"outputBlobUri,omitempty" tf:"output_blob_uri,omitempty"` + + // Reference to a Blob in storage to populate outputBlobUri. + // +kubebuilder:validation:Optional + OutputBlobURIRef *v1.Reference `json:"outputBlobUriRef,omitempty" tf:"-"` + + // Selector for a Blob in storage to populate outputBlobUri. + // +kubebuilder:validation:Optional + OutputBlobURISelector *v1.Selector `json:"outputBlobUriSelector,omitempty" tf:"-"` + + // A list of parameter blocks as defined below. The parameters used by the script. + Parameter []ParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // A list of protected_parameter blocks as defined below. The protected parameters used by the script. + ProtectedParameter []ProtectedParameterInitParameters `json:"protectedParameter,omitempty" tf:"protected_parameter,omitempty"` + + // Specifies the user account on the VM when executing the Virtual Machine Run Command. + RunAsUser *string `json:"runAsUser,omitempty" tf:"run_as_user,omitempty"` + + // A source block as defined below. The source of the run command script. + Source *VirtualMachineRunCommandSourceInitParameters `json:"source,omitempty" tf:"source,omitempty"` + + // A mapping of tags which should be assigned to the Virtual Machine Run Command. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualMachineRunCommandObservation struct { + + // An error_blob_managed_identity block as defined below. User-assigned managed Identity that has access to errorBlobUri storage blob. + ErrorBlobManagedIdentity *ErrorBlobManagedIdentityParameters `json:"errorBlobManagedIdentity,omitempty" tf:"error_blob_managed_identity,omitempty"` + + // Specifies the Azure storage blob where script error stream will be uploaded. + ErrorBlobURI *string `json:"errorBlobUri,omitempty" tf:"error_blob_uri,omitempty"` + + // The ID of the Virtual Machine Run Command. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + InstanceView []InstanceViewObservation `json:"instanceView,omitempty" tf:"instance_view,omitempty"` + + // The Azure Region where the Virtual Machine Run Command should exist. Changing this forces a new Virtual Machine Run Command to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An output_blob_managed_identity block as defined below. User-assigned managed Identity that has access to outputBlobUri storage blob. + OutputBlobManagedIdentity *OutputBlobManagedIdentityParameters `json:"outputBlobManagedIdentity,omitempty" tf:"output_blob_managed_identity,omitempty"` + + // Specifies the Azure storage blob where script output stream will be uploaded. It can be basic blob URI with SAS token. + OutputBlobURI *string `json:"outputBlobUri,omitempty" tf:"output_blob_uri,omitempty"` + + // A list of parameter blocks as defined below. The parameters used by the script. + Parameter []ParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // A list of protected_parameter blocks as defined below. The protected parameters used by the script. + ProtectedParameter []ProtectedParameterParameters `json:"protectedParameter,omitempty" tf:"protected_parameter,omitempty"` + + // Specifies the user account on the VM when executing the Virtual Machine Run Command. + RunAsUser *string `json:"runAsUser,omitempty" tf:"run_as_user,omitempty"` + + // A source block as defined below. The source of the run command script. + Source *VirtualMachineRunCommandSourceObservation `json:"source,omitempty" tf:"source,omitempty"` + + // A mapping of tags which should be assigned to the Virtual Machine Run Command. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Virtual Machine ID within which this Virtual Machine Run Command should exist. Changing this forces a new Virtual Machine Run Command to be created. + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` +} + +type VirtualMachineRunCommandParameters struct { + + // An error_blob_managed_identity block as defined below. User-assigned managed Identity that has access to errorBlobUri storage blob. + // +kubebuilder:validation:Optional + ErrorBlobManagedIdentity *ErrorBlobManagedIdentityParameters `json:"errorBlobManagedIdentity,omitempty" tf:"error_blob_managed_identity,omitempty"` + + // Specifies the Azure storage blob where script error stream will be uploaded. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Blob + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ErrorBlobURI *string `json:"errorBlobUri,omitempty" tf:"error_blob_uri,omitempty"` + + // Reference to a Blob in storage to populate errorBlobUri. + // +kubebuilder:validation:Optional + ErrorBlobURIRef *v1.Reference `json:"errorBlobUriRef,omitempty" tf:"-"` + + // Selector for a Blob in storage to populate errorBlobUri. + // +kubebuilder:validation:Optional + ErrorBlobURISelector *v1.Selector `json:"errorBlobUriSelector,omitempty" tf:"-"` + + // The Azure Region where the Virtual Machine Run Command should exist. Changing this forces a new Virtual Machine Run Command to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An output_blob_managed_identity block as defined below. User-assigned managed Identity that has access to outputBlobUri storage blob. + // +kubebuilder:validation:Optional + OutputBlobManagedIdentity *OutputBlobManagedIdentityParameters `json:"outputBlobManagedIdentity,omitempty" tf:"output_blob_managed_identity,omitempty"` + + // Specifies the Azure storage blob where script output stream will be uploaded. It can be basic blob URI with SAS token. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Blob + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + OutputBlobURI *string `json:"outputBlobUri,omitempty" tf:"output_blob_uri,omitempty"` + + // Reference to a Blob in storage to populate outputBlobUri. + // +kubebuilder:validation:Optional + OutputBlobURIRef *v1.Reference `json:"outputBlobUriRef,omitempty" tf:"-"` + + // Selector for a Blob in storage to populate outputBlobUri. + // +kubebuilder:validation:Optional + OutputBlobURISelector *v1.Selector `json:"outputBlobUriSelector,omitempty" tf:"-"` + + // A list of parameter blocks as defined below. The parameters used by the script. + // +kubebuilder:validation:Optional + Parameter []ParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // A list of protected_parameter blocks as defined below. The protected parameters used by the script. + // +kubebuilder:validation:Optional + ProtectedParameter []ProtectedParameterParameters `json:"protectedParameter,omitempty" tf:"protected_parameter,omitempty"` + + // Specifies the user account password on the VM when executing the Virtual Machine Run Command. + // +kubebuilder:validation:Optional + RunAsPasswordSecretRef *v1.SecretKeySelector `json:"runAsPasswordSecretRef,omitempty" tf:"-"` + + // Specifies the user account on the VM when executing the Virtual Machine Run Command. + // +kubebuilder:validation:Optional + RunAsUser *string `json:"runAsUser,omitempty" tf:"run_as_user,omitempty"` + + // A source block as defined below. The source of the run command script. + // +kubebuilder:validation:Optional + Source *VirtualMachineRunCommandSourceParameters `json:"source,omitempty" tf:"source,omitempty"` + + // A mapping of tags which should be assigned to the Virtual Machine Run Command. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Virtual Machine ID within which this Virtual Machine Run Command should exist. Changing this forces a new Virtual Machine Run Command to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` + + // Reference to a LinuxVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDRef *v1.Reference `json:"virtualMachineIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDSelector *v1.Selector `json:"virtualMachineIdSelector,omitempty" tf:"-"` +} + +type VirtualMachineRunCommandSourceInitParameters struct { + CommandID *string `json:"commandId,omitempty" tf:"command_id,omitempty"` + + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Blob + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ScriptURI *string `json:"scriptUri,omitempty" tf:"script_uri,omitempty"` + + // A script_uri_managed_identity block as defined above. + ScriptURIManagedIdentity *ScriptURIManagedIdentityInitParameters `json:"scriptUriManagedIdentity,omitempty" tf:"script_uri_managed_identity,omitempty"` + + // Reference to a Blob in storage to populate scriptUri. + // +kubebuilder:validation:Optional + ScriptURIRef *v1.Reference `json:"scriptUriRef,omitempty" tf:"-"` + + // Selector for a Blob in storage to populate scriptUri. + // +kubebuilder:validation:Optional + ScriptURISelector *v1.Selector `json:"scriptUriSelector,omitempty" tf:"-"` +} + +type VirtualMachineRunCommandSourceObservation struct { + CommandID *string `json:"commandId,omitempty" tf:"command_id,omitempty"` + + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + ScriptURI *string `json:"scriptUri,omitempty" tf:"script_uri,omitempty"` + + // A script_uri_managed_identity block as defined above. + ScriptURIManagedIdentity *ScriptURIManagedIdentityParameters `json:"scriptUriManagedIdentity,omitempty" tf:"script_uri_managed_identity,omitempty"` +} + +type VirtualMachineRunCommandSourceParameters struct { + + // +kubebuilder:validation:Optional + CommandID *string `json:"commandId,omitempty" tf:"command_id,omitempty"` + + // +kubebuilder:validation:Optional + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Blob + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ScriptURI *string `json:"scriptUri,omitempty" tf:"script_uri,omitempty"` + + // A script_uri_managed_identity block as defined above. + // +kubebuilder:validation:Optional + ScriptURIManagedIdentity *ScriptURIManagedIdentityParameters `json:"scriptUriManagedIdentity,omitempty" tf:"script_uri_managed_identity,omitempty"` + + // Reference to a Blob in storage to populate scriptUri. + // +kubebuilder:validation:Optional + ScriptURIRef *v1.Reference `json:"scriptUriRef,omitempty" tf:"-"` + + // Selector for a Blob in storage to populate scriptUri. + // +kubebuilder:validation:Optional + ScriptURISelector *v1.Selector `json:"scriptUriSelector,omitempty" tf:"-"` +} + +// VirtualMachineRunCommandSpec defines the desired state of VirtualMachineRunCommand +type VirtualMachineRunCommandSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualMachineRunCommandParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualMachineRunCommandInitParameters `json:"initProvider,omitempty"` +} + +// VirtualMachineRunCommandStatus defines the observed state of VirtualMachineRunCommand. +type VirtualMachineRunCommandStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualMachineRunCommandObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualMachineRunCommand is the Schema for the VirtualMachineRunCommands API. Manages a Virtual Machine Run Command. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VirtualMachineRunCommand struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.source) || (has(self.initProvider) && has(self.initProvider.source))",message="spec.forProvider.source is a required parameter" + Spec VirtualMachineRunCommandSpec `json:"spec"` + Status VirtualMachineRunCommandStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualMachineRunCommandList contains a list of VirtualMachineRunCommands +type VirtualMachineRunCommandList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachineRunCommand `json:"items"` +} + +// Repository type metadata. +var ( + VirtualMachineRunCommand_Kind = "VirtualMachineRunCommand" + VirtualMachineRunCommand_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualMachineRunCommand_Kind}.String() + VirtualMachineRunCommand_KindAPIVersion = VirtualMachineRunCommand_Kind + "." + CRDGroupVersion.String() + VirtualMachineRunCommand_GroupVersionKind = CRDGroupVersion.WithKind(VirtualMachineRunCommand_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualMachineRunCommand{}, &VirtualMachineRunCommandList{}) +} diff --git a/apis/compute/v1beta2/zz_windowsvirtualmachine_terraformed.go b/apis/compute/v1beta2/zz_windowsvirtualmachine_terraformed.go new file mode 100755 index 000000000..e4521d81d --- /dev/null +++ b/apis/compute/v1beta2/zz_windowsvirtualmachine_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WindowsVirtualMachine +func (mg *WindowsVirtualMachine) GetTerraformResourceType() string { + return "azurerm_windows_virtual_machine" +} + +// GetConnectionDetailsMapping for this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"additional_unattend_content[*].content": "spec.forProvider.additionalUnattendContent[*].contentSecretRef", "admin_password": "spec.forProvider.adminPasswordSecretRef", "custom_data": "spec.forProvider.customDataSecretRef"} +} + +// GetObservation of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WindowsVirtualMachine using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WindowsVirtualMachine) LateInitialize(attrs []byte) (bool, error) { + params := &WindowsVirtualMachineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("PlatformFaultDomain")) + opts = append(opts, resource.WithNameFilter("VirtualMachineScaleSetID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WindowsVirtualMachine) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_windowsvirtualmachine_types.go b/apis/compute/v1beta2/zz_windowsvirtualmachine_types.go new file mode 100755 index 000000000..0e2ac8535 --- /dev/null +++ b/apis/compute/v1beta2/zz_windowsvirtualmachine_types.go @@ -0,0 +1,1152 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type WindowsVirtualMachineAdditionalCapabilitiesInitParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine? Defaults to false. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type WindowsVirtualMachineAdditionalCapabilitiesObservation struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine? Defaults to false. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type WindowsVirtualMachineAdditionalCapabilitiesParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine? Defaults to false. + // +kubebuilder:validation:Optional + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type WindowsVirtualMachineAdditionalUnattendContentInitParameters struct { + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + Setting *string `json:"setting,omitempty" tf:"setting,omitempty"` +} + +type WindowsVirtualMachineAdditionalUnattendContentObservation struct { + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + Setting *string `json:"setting,omitempty" tf:"setting,omitempty"` +} + +type WindowsVirtualMachineAdditionalUnattendContentParameters struct { + + // The XML formatted content that is added to the unattend.xml file for the specified path and component. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + ContentSecretRef v1.SecretKeySelector `json:"contentSecretRef" tf:"-"` + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Setting *string `json:"setting" tf:"setting,omitempty"` +} + +type WindowsVirtualMachineBootDiagnosticsInitParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type WindowsVirtualMachineBootDiagnosticsObservation struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type WindowsVirtualMachineBootDiagnosticsParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + // +kubebuilder:validation:Optional + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type WindowsVirtualMachineGalleryApplicationInitParameters struct { + + // Specifies whether the version will be automatically updated for the VM when a new Gallery Application version is available in PIR/SIG. Defaults to false. + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies whether any failure for any operation in the VmApplication will fail the deployment of the VM. Defaults to false. + TreatFailureAsDeploymentFailureEnabled *bool `json:"treatFailureAsDeploymentFailureEnabled,omitempty" tf:"treat_failure_as_deployment_failure_enabled,omitempty"` + + // Specifies the Gallery Application Version resource ID. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type WindowsVirtualMachineGalleryApplicationObservation struct { + + // Specifies whether the version will be automatically updated for the VM when a new Gallery Application version is available in PIR/SIG. Defaults to false. + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies whether any failure for any operation in the VmApplication will fail the deployment of the VM. Defaults to false. + TreatFailureAsDeploymentFailureEnabled *bool `json:"treatFailureAsDeploymentFailureEnabled,omitempty" tf:"treat_failure_as_deployment_failure_enabled,omitempty"` + + // Specifies the Gallery Application Version resource ID. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type WindowsVirtualMachineGalleryApplicationParameters struct { + + // Specifies whether the version will be automatically updated for the VM when a new Gallery Application version is available in PIR/SIG. Defaults to false. + // +kubebuilder:validation:Optional + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. + // +kubebuilder:validation:Optional + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies whether any failure for any operation in the VmApplication will fail the deployment of the VM. Defaults to false. + // +kubebuilder:validation:Optional + TreatFailureAsDeploymentFailureEnabled *bool `json:"treatFailureAsDeploymentFailureEnabled,omitempty" tf:"treat_failure_as_deployment_failure_enabled,omitempty"` + + // Specifies the Gallery Application Version resource ID. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId" tf:"version_id,omitempty"` +} + +type WindowsVirtualMachineIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Windows Virtual Machine. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Virtual Machine. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsVirtualMachineIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Windows Virtual Machine. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Virtual Machine. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsVirtualMachineIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Windows Virtual Machine. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Virtual Machine. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WindowsVirtualMachineInitParameters struct { + + // A additional_capabilities block as defined below. + AdditionalCapabilities *WindowsVirtualMachineAdditionalCapabilitiesInitParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + AdditionalUnattendContent []WindowsVirtualMachineAdditionalUnattendContentInitParameters `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The username of the local administrator used for the Virtual Machine. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // Should Extension Operations be allowed on this Virtual Machine? Defaults to true. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty" tf:"allow_extension_operations,omitempty"` + + // Specifies the ID of the Availability Set in which the Virtual Machine should exist. Changing this forces a new resource to be created. + AvailabilitySetID *string `json:"availabilitySetId,omitempty" tf:"availability_set_id,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *WindowsVirtualMachineBootDiagnosticsInitParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies whether to skip platform scheduled patching when a user schedule is associated with the VM. Defaults to false. + BypassPlatformSafetyChecksOnUserScheduleEnabled *bool `json:"bypassPlatformSafetyChecksOnUserScheduleEnabled,omitempty" tf:"bypass_platform_safety_checks_on_user_schedule_enabled,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine should be allocated to. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies the Hostname which should be used for this Virtual Machine. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name, then you must specify computer_name. Changing this forces a new resource to be created. + ComputerName *string `json:"computerName,omitempty" tf:"computer_name,omitempty"` + + // The ID of a Dedicated Host Group that this Windows Virtual Machine should be run within. Conflicts with dedicated_host_id. + DedicatedHostGroupID *string `json:"dedicatedHostGroupId,omitempty" tf:"dedicated_host_group_id,omitempty"` + + // The ID of a Dedicated Host where this machine should be run on. Conflicts with dedicated_host_group_id. + DedicatedHostID *string `json:"dedicatedHostId,omitempty" tf:"dedicated_host_id,omitempty"` + + // Specifies the Disk Controller Type used for this Virtual Machine. Possible values are SCSI and NVMe. + DiskControllerType *string `json:"diskControllerType,omitempty" tf:"disk_controller_type,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Windows Virtual Machine should exist. Changing this forces a new Windows Virtual Machine to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Specifies if Automatic Updates are Enabled for the Windows Virtual Machine. Changing this forces a new resource to be created. Defaults to true. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies what should happen when the Virtual Machine is evicted for price reasons when using a Spot instance. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + GalleryApplication []WindowsVirtualMachineGalleryApplicationInitParameters `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + // Should the VM be patched without requiring a reboot? Possible values are true or false. Defaults to false. For more information about hot patching please see the product documentation. + HotpatchingEnabled *bool `json:"hotpatchingEnabled,omitempty" tf:"hotpatching_enabled,omitempty"` + + // An identity block as defined below. + Identity *WindowsVirtualMachineIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine. Possible values are None, Windows_Client and Windows_Server. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Windows Virtual Machine should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for this Virtual Machine, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machine will be evicted using the eviction_policy. Defaults to -1, which means that the Virtual Machine should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // . A list of Network Interface IDs which should be attached to this Virtual Machine. The first Network Interface ID in this list will be the Primary Network Interface on the Virtual Machine. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.NetworkInterface + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // References to NetworkInterface in network to populate networkInterfaceIds. + // +kubebuilder:validation:Optional + NetworkInterfaceIdsRefs []v1.Reference `json:"networkInterfaceIdsRefs,omitempty" tf:"-"` + + // Selector for a list of NetworkInterface in network to populate networkInterfaceIds. + // +kubebuilder:validation:Optional + NetworkInterfaceIdsSelector *v1.Selector `json:"networkInterfaceIdsSelector,omitempty" tf:"-"` + + // A os_disk block as defined below. + OsDisk *WindowsVirtualMachineOsDiskInitParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // A os_image_notification block as defined below. + OsImageNotification *WindowsVirtualMachineOsImageNotificationInitParameters `json:"osImageNotification,omitempty" tf:"os_image_notification,omitempty"` + + // Specifies the mode of VM Guest Patching for the Virtual Machine. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching to this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *WindowsVirtualMachinePlanInitParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the Platform Fault Domain in which this Windows Virtual Machine should be created. Defaults to -1, which means this will be automatically assigned to a fault domain that best maintains balance across the available fault domains. Changing this forces a new Windows Virtual Machine to be created. + PlatformFaultDomain *float64 `json:"platformFaultDomain,omitempty" tf:"platform_fault_domain,omitempty"` + + // Specifies the priority of this Virtual Machine. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies the reboot setting for platform scheduled patching. Possible values are Always, IfRequired and Never. + RebootSetting *string `json:"rebootSetting,omitempty" tf:"reboot_setting,omitempty"` + + // One or more secret blocks as defined below. + Secret []WindowsVirtualMachineSecretInitParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies if Secure Boot and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // The SKU which should be used for this Virtual Machine, such as Standard_F2. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The ID of the Image which this Virtual Machine should be created from. Changing this forces a new resource to be created. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. Changing this forces a new resource to be created. + SourceImageReference *WindowsVirtualMachineSourceImageReferenceInitParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *WindowsVirtualMachineTerminationNotificationInitParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Specifies the Time Zone which should be used by the Virtual Machine, the possible values are defined here. Changing this forces a new resource to be created. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether VMAgent Platform Updates is enabled. Defaults to false. + VMAgentPlatformUpdatesEnabled *bool `json:"vmAgentPlatformUpdatesEnabled,omitempty" tf:"vm_agent_platform_updates_enabled,omitempty"` + + // Specifies the Orchestrated Virtual Machine Scale Set that this Virtual Machine should be created within. + VirtualMachineScaleSetID *string `json:"virtualMachineScaleSetId,omitempty" tf:"virtual_machine_scale_set_id,omitempty"` + + // Specifies if vTPM (virtual Trusted Platform Module) and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + WinrmListener []WindowsVirtualMachineWinrmListenerInitParameters `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` + + // * zones - Specifies the Availability Zone in which this Windows Virtual Machine should be located. Changing this forces a new Windows Virtual Machine to be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type WindowsVirtualMachineObservation struct { + + // A additional_capabilities block as defined below. + AdditionalCapabilities *WindowsVirtualMachineAdditionalCapabilitiesObservation `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + AdditionalUnattendContent []WindowsVirtualMachineAdditionalUnattendContentObservation `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The username of the local administrator used for the Virtual Machine. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // Should Extension Operations be allowed on this Virtual Machine? Defaults to true. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty" tf:"allow_extension_operations,omitempty"` + + // Specifies the ID of the Availability Set in which the Virtual Machine should exist. Changing this forces a new resource to be created. + AvailabilitySetID *string `json:"availabilitySetId,omitempty" tf:"availability_set_id,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *WindowsVirtualMachineBootDiagnosticsObservation `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies whether to skip platform scheduled patching when a user schedule is associated with the VM. Defaults to false. + BypassPlatformSafetyChecksOnUserScheduleEnabled *bool `json:"bypassPlatformSafetyChecksOnUserScheduleEnabled,omitempty" tf:"bypass_platform_safety_checks_on_user_schedule_enabled,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine should be allocated to. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies the Hostname which should be used for this Virtual Machine. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name, then you must specify computer_name. Changing this forces a new resource to be created. + ComputerName *string `json:"computerName,omitempty" tf:"computer_name,omitempty"` + + // The ID of a Dedicated Host Group that this Windows Virtual Machine should be run within. Conflicts with dedicated_host_id. + DedicatedHostGroupID *string `json:"dedicatedHostGroupId,omitempty" tf:"dedicated_host_group_id,omitempty"` + + // The ID of a Dedicated Host where this machine should be run on. Conflicts with dedicated_host_group_id. + DedicatedHostID *string `json:"dedicatedHostId,omitempty" tf:"dedicated_host_id,omitempty"` + + // Specifies the Disk Controller Type used for this Virtual Machine. Possible values are SCSI and NVMe. + DiskControllerType *string `json:"diskControllerType,omitempty" tf:"disk_controller_type,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Windows Virtual Machine should exist. Changing this forces a new Windows Virtual Machine to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Specifies if Automatic Updates are Enabled for the Windows Virtual Machine. Changing this forces a new resource to be created. Defaults to true. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies what should happen when the Virtual Machine is evicted for price reasons when using a Spot instance. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + GalleryApplication []WindowsVirtualMachineGalleryApplicationObservation `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + // Should the VM be patched without requiring a reboot? Possible values are true or false. Defaults to false. For more information about hot patching please see the product documentation. + HotpatchingEnabled *bool `json:"hotpatchingEnabled,omitempty" tf:"hotpatching_enabled,omitempty"` + + // The ID of the Windows Virtual Machine. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *WindowsVirtualMachineIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine. Possible values are None, Windows_Client and Windows_Server. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Windows Virtual Machine should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for this Virtual Machine, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machine will be evicted using the eviction_policy. Defaults to -1, which means that the Virtual Machine should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // . A list of Network Interface IDs which should be attached to this Virtual Machine. The first Network Interface ID in this list will be the Primary Network Interface on the Virtual Machine. + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // A os_disk block as defined below. + OsDisk *WindowsVirtualMachineOsDiskObservation `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // A os_image_notification block as defined below. + OsImageNotification *WindowsVirtualMachineOsImageNotificationObservation `json:"osImageNotification,omitempty" tf:"os_image_notification,omitempty"` + + // Specifies the mode of VM Guest Patching for the Virtual Machine. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching to this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *WindowsVirtualMachinePlanObservation `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the Platform Fault Domain in which this Windows Virtual Machine should be created. Defaults to -1, which means this will be automatically assigned to a fault domain that best maintains balance across the available fault domains. Changing this forces a new Windows Virtual Machine to be created. + PlatformFaultDomain *float64 `json:"platformFaultDomain,omitempty" tf:"platform_fault_domain,omitempty"` + + // Specifies the priority of this Virtual Machine. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Primary Private IP Address assigned to this Virtual Machine. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // A list of Private IP Addresses assigned to this Virtual Machine. + PrivateIPAddresses []*string `json:"privateIpAddresses,omitempty" tf:"private_ip_addresses,omitempty"` + + // Should the Azure VM Agent be provisioned on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // The Primary Public IP Address assigned to this Virtual Machine. + PublicIPAddress *string `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // A list of the Public IP Addresses assigned to this Virtual Machine. + PublicIPAddresses []*string `json:"publicIpAddresses,omitempty" tf:"public_ip_addresses,omitempty"` + + // Specifies the reboot setting for platform scheduled patching. Possible values are Always, IfRequired and Never. + RebootSetting *string `json:"rebootSetting,omitempty" tf:"reboot_setting,omitempty"` + + // The name of the Resource Group in which the Windows Virtual Machine should be exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // One or more secret blocks as defined below. + Secret []WindowsVirtualMachineSecretObservation `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies if Secure Boot and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // The SKU which should be used for this Virtual Machine, such as Standard_F2. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The ID of the Image which this Virtual Machine should be created from. Changing this forces a new resource to be created. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. Changing this forces a new resource to be created. + SourceImageReference *WindowsVirtualMachineSourceImageReferenceObservation `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *WindowsVirtualMachineTerminationNotificationObservation `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Specifies the Time Zone which should be used by the Virtual Machine, the possible values are defined here. Changing this forces a new resource to be created. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether VMAgent Platform Updates is enabled. Defaults to false. + VMAgentPlatformUpdatesEnabled *bool `json:"vmAgentPlatformUpdatesEnabled,omitempty" tf:"vm_agent_platform_updates_enabled,omitempty"` + + // A 128-bit identifier which uniquely identifies this Virtual Machine. + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` + + // Specifies the Orchestrated Virtual Machine Scale Set that this Virtual Machine should be created within. + VirtualMachineScaleSetID *string `json:"virtualMachineScaleSetId,omitempty" tf:"virtual_machine_scale_set_id,omitempty"` + + // Specifies if vTPM (virtual Trusted Platform Module) and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + WinrmListener []WindowsVirtualMachineWinrmListenerObservation `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` + + // * zones - Specifies the Availability Zone in which this Windows Virtual Machine should be located. Changing this forces a new Windows Virtual Machine to be created. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type WindowsVirtualMachineOsDiskDiffDiskSettingsObservation struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type WindowsVirtualMachineOsDiskDiffDiskSettingsParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Option *string `json:"option" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type WindowsVirtualMachineOsDiskInitParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *WindowsVirtualMachineOsDiskDiffDiskSettingsInitParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The name which should be used for the Internal OS Disk. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine is a Confidential VM. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values are Standard_LRS, StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineOsDiskObservation struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *WindowsVirtualMachineOsDiskDiffDiskSettingsObservation `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The name which should be used for the Internal OS Disk. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine is a Confidential VM. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values are Standard_LRS, StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineOsDiskParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + // +kubebuilder:validation:Optional + Caching *string `json:"caching" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiffDiskSettings *WindowsVirtualMachineOsDiskDiffDiskSettingsParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine is sourced from. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The name which should be used for the Internal OS Disk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt this OS Disk when the Virtual Machine is a Confidential VM. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine is a Confidential VM. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values are Standard_LRS, StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + // +kubebuilder:validation:Optional + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineOsImageNotificationInitParameters struct { + + // Length of time a notification to be sent to the VM on the instance metadata server till the VM gets OS upgraded. The only possible value is PT15M. Defaults to PT15M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineOsImageNotificationObservation struct { + + // Length of time a notification to be sent to the VM on the instance metadata server till the VM gets OS upgraded. The only possible value is PT15M. Defaults to PT15M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineOsImageNotificationParameters struct { + + // Length of time a notification to be sent to the VM on the instance metadata server till the VM gets OS upgraded. The only possible value is PT15M. Defaults to PT15M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineParameters struct { + + // A additional_capabilities block as defined below. + // +kubebuilder:validation:Optional + AdditionalCapabilities *WindowsVirtualMachineAdditionalCapabilitiesParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdditionalUnattendContent []WindowsVirtualMachineAdditionalUnattendContentParameters `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminPasswordSecretRef v1.SecretKeySelector `json:"adminPasswordSecretRef" tf:"-"` + + // The username of the local administrator used for the Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // Should Extension Operations be allowed on this Virtual Machine? Defaults to true. + // +kubebuilder:validation:Optional + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty" tf:"allow_extension_operations,omitempty"` + + // Specifies the ID of the Availability Set in which the Virtual Machine should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AvailabilitySetID *string `json:"availabilitySetId,omitempty" tf:"availability_set_id,omitempty"` + + // A boot_diagnostics block as defined below. + // +kubebuilder:validation:Optional + BootDiagnostics *WindowsVirtualMachineBootDiagnosticsParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies whether to skip platform scheduled patching when a user schedule is associated with the VM. Defaults to false. + // +kubebuilder:validation:Optional + BypassPlatformSafetyChecksOnUserScheduleEnabled *bool `json:"bypassPlatformSafetyChecksOnUserScheduleEnabled,omitempty" tf:"bypass_platform_safety_checks_on_user_schedule_enabled,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine should be allocated to. + // +kubebuilder:validation:Optional + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies the Hostname which should be used for this Virtual Machine. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name, then you must specify computer_name. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ComputerName *string `json:"computerName,omitempty" tf:"computer_name,omitempty"` + + // The Base64-Encoded Custom Data which should be used for this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CustomDataSecretRef *v1.SecretKeySelector `json:"customDataSecretRef,omitempty" tf:"-"` + + // The ID of a Dedicated Host Group that this Windows Virtual Machine should be run within. Conflicts with dedicated_host_id. + // +kubebuilder:validation:Optional + DedicatedHostGroupID *string `json:"dedicatedHostGroupId,omitempty" tf:"dedicated_host_group_id,omitempty"` + + // The ID of a Dedicated Host where this machine should be run on. Conflicts with dedicated_host_group_id. + // +kubebuilder:validation:Optional + DedicatedHostID *string `json:"dedicatedHostId,omitempty" tf:"dedicated_host_id,omitempty"` + + // Specifies the Disk Controller Type used for this Virtual Machine. Possible values are SCSI and NVMe. + // +kubebuilder:validation:Optional + DiskControllerType *string `json:"diskControllerType,omitempty" tf:"disk_controller_type,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Windows Virtual Machine should exist. Changing this forces a new Windows Virtual Machine to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Specifies if Automatic Updates are Enabled for the Windows Virtual Machine. Changing this forces a new resource to be created. Defaults to true. + // +kubebuilder:validation:Optional + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies what should happen when the Virtual Machine is evicted for price reasons when using a Spot instance. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + // +kubebuilder:validation:Optional + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + // +kubebuilder:validation:Optional + GalleryApplication []WindowsVirtualMachineGalleryApplicationParameters `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + // Should the VM be patched without requiring a reboot? Possible values are true or false. Defaults to false. For more information about hot patching please see the product documentation. + // +kubebuilder:validation:Optional + HotpatchingEnabled *bool `json:"hotpatchingEnabled,omitempty" tf:"hotpatching_enabled,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *WindowsVirtualMachineIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine. Possible values are None, Windows_Client and Windows_Server. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Windows Virtual Machine should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for this Virtual Machine, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machine will be evicted using the eviction_policy. Defaults to -1, which means that the Virtual Machine should not be evicted for price reasons. + // +kubebuilder:validation:Optional + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // . A list of Network Interface IDs which should be attached to this Virtual Machine. The first Network Interface ID in this list will be the Primary Network Interface on the Virtual Machine. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.NetworkInterface + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + NetworkInterfaceIds []*string `json:"networkInterfaceIds,omitempty" tf:"network_interface_ids,omitempty"` + + // References to NetworkInterface in network to populate networkInterfaceIds. + // +kubebuilder:validation:Optional + NetworkInterfaceIdsRefs []v1.Reference `json:"networkInterfaceIdsRefs,omitempty" tf:"-"` + + // Selector for a list of NetworkInterface in network to populate networkInterfaceIds. + // +kubebuilder:validation:Optional + NetworkInterfaceIdsSelector *v1.Selector `json:"networkInterfaceIdsSelector,omitempty" tf:"-"` + + // A os_disk block as defined below. + // +kubebuilder:validation:Optional + OsDisk *WindowsVirtualMachineOsDiskParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // A os_image_notification block as defined below. + // +kubebuilder:validation:Optional + OsImageNotification *WindowsVirtualMachineOsImageNotificationParameters `json:"osImageNotification,omitempty" tf:"os_image_notification,omitempty"` + + // Specifies the mode of VM Guest Patching for the Virtual Machine. Possible values are AutomaticByPlatform or ImageDefault. Defaults to ImageDefault. + // +kubebuilder:validation:Optional + PatchAssessmentMode *string `json:"patchAssessmentMode,omitempty" tf:"patch_assessment_mode,omitempty"` + + // Specifies the mode of in-guest patching to this Windows Virtual Machine. Possible values are Manual, AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. For more information on patch modes please see the product documentation. + // +kubebuilder:validation:Optional + PatchMode *string `json:"patchMode,omitempty" tf:"patch_mode,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Plan *WindowsVirtualMachinePlanParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the Platform Fault Domain in which this Windows Virtual Machine should be created. Defaults to -1, which means this will be automatically assigned to a fault domain that best maintains balance across the available fault domains. Changing this forces a new Windows Virtual Machine to be created. + // +kubebuilder:validation:Optional + PlatformFaultDomain *float64 `json:"platformFaultDomain,omitempty" tf:"platform_fault_domain,omitempty"` + + // Specifies the priority of this Virtual Machine. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on this Virtual Machine? Defaults to true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group which the Virtual Machine should be assigned to. + // +kubebuilder:validation:Optional + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies the reboot setting for platform scheduled patching. Possible values are Always, IfRequired and Never. + // +kubebuilder:validation:Optional + RebootSetting *string `json:"rebootSetting,omitempty" tf:"reboot_setting,omitempty"` + + // The name of the Resource Group in which the Windows Virtual Machine should be exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // One or more secret blocks as defined below. + // +kubebuilder:validation:Optional + Secret []WindowsVirtualMachineSecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies if Secure Boot and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // The SKU which should be used for this Virtual Machine, such as Standard_F2. + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The ID of the Image which this Virtual Machine should be created from. Changing this forces a new resource to be created. Possible Image ID types include Image IDs, Shared Image IDs, Shared Image Version IDs, Community Gallery Image IDs, Community Gallery Image Version IDs, Shared Gallery Image IDs and Shared Gallery Image Version IDs. + // +kubebuilder:validation:Optional + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceImageReference *WindowsVirtualMachineSourceImageReferenceParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A termination_notification block as defined below. + // +kubebuilder:validation:Optional + TerminationNotification *WindowsVirtualMachineTerminationNotificationParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Specifies the Time Zone which should be used by the Virtual Machine, the possible values are defined here. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine. + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies whether VMAgent Platform Updates is enabled. Defaults to false. + // +kubebuilder:validation:Optional + VMAgentPlatformUpdatesEnabled *bool `json:"vmAgentPlatformUpdatesEnabled,omitempty" tf:"vm_agent_platform_updates_enabled,omitempty"` + + // Specifies the Orchestrated Virtual Machine Scale Set that this Virtual Machine should be created within. + // +kubebuilder:validation:Optional + VirtualMachineScaleSetID *string `json:"virtualMachineScaleSetId,omitempty" tf:"virtual_machine_scale_set_id,omitempty"` + + // Specifies if vTPM (virtual Trusted Platform Module) and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + WinrmListener []WindowsVirtualMachineWinrmListenerParameters `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` + + // * zones - Specifies the Availability Zone in which this Windows Virtual Machine should be located. Changing this forces a new Windows Virtual Machine to be created. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type WindowsVirtualMachinePlanInitParameters struct { + + // Specifies the Name of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the Product of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the Publisher of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type WindowsVirtualMachinePlanObservation struct { + + // Specifies the Name of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the Product of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the Publisher of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type WindowsVirtualMachinePlanParameters struct { + + // Specifies the Name of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the Product of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Product *string `json:"product" tf:"product,omitempty"` + + // Specifies the Publisher of the Marketplace Image this Virtual Machine should be created from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` +} + +type WindowsVirtualMachineSecretCertificateInitParameters struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + Store *string `json:"store,omitempty" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WindowsVirtualMachineSecretCertificateObservation struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + Store *string `json:"store,omitempty" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WindowsVirtualMachineSecretCertificateParameters struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + // +kubebuilder:validation:Optional + Store *string `json:"store" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type WindowsVirtualMachineSecretInitParameters struct { + + // One or more certificate blocks as defined above. + Certificate []WindowsVirtualMachineSecretCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type WindowsVirtualMachineSecretObservation struct { + + // One or more certificate blocks as defined above. + Certificate []WindowsVirtualMachineSecretCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type WindowsVirtualMachineSecretParameters struct { + + // One or more certificate blocks as defined above. + // +kubebuilder:validation:Optional + Certificate []WindowsVirtualMachineSecretCertificateParameters `json:"certificate" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId" tf:"key_vault_id,omitempty"` +} + +type WindowsVirtualMachineSourceImageReferenceInitParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineSourceImageReferenceObservation struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineSourceImageReferenceParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Sku *string `json:"sku" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type WindowsVirtualMachineTerminationNotificationInitParameters struct { + + // Should the termination notification be enabled on this Virtual Machine? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineTerminationNotificationObservation struct { + + // Should the termination notification be enabled on this Virtual Machine? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineTerminationNotificationParameters struct { + + // Should the termination notification be enabled on this Virtual Machine? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineWinrmListenerInitParameters struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // Specifies the protocol of listener. Possible values are Http or Https. Changing this forces a new resource to be created. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type WindowsVirtualMachineWinrmListenerObservation struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // Specifies the protocol of listener. Possible values are Http or Https. Changing this forces a new resource to be created. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type WindowsVirtualMachineWinrmListenerParameters struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // Specifies the protocol of listener. Possible values are Http or Https. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +// WindowsVirtualMachineSpec defines the desired state of WindowsVirtualMachine +type WindowsVirtualMachineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WindowsVirtualMachineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WindowsVirtualMachineInitParameters `json:"initProvider,omitempty"` +} + +// WindowsVirtualMachineStatus defines the observed state of WindowsVirtualMachine. +type WindowsVirtualMachineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WindowsVirtualMachineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WindowsVirtualMachine is the Schema for the WindowsVirtualMachines API. Manages a Windows Virtual Machine. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WindowsVirtualMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.adminPasswordSecretRef)",message="spec.forProvider.adminPasswordSecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.adminUsername) || (has(self.initProvider) && has(self.initProvider.adminUsername))",message="spec.forProvider.adminUsername is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.osDisk) || (has(self.initProvider) && has(self.initProvider.osDisk))",message="spec.forProvider.osDisk is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.size) || (has(self.initProvider) && has(self.initProvider.size))",message="spec.forProvider.size is a required parameter" + Spec WindowsVirtualMachineSpec `json:"spec"` + Status WindowsVirtualMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WindowsVirtualMachineList contains a list of WindowsVirtualMachines +type WindowsVirtualMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WindowsVirtualMachine `json:"items"` +} + +// Repository type metadata. +var ( + WindowsVirtualMachine_Kind = "WindowsVirtualMachine" + WindowsVirtualMachine_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WindowsVirtualMachine_Kind}.String() + WindowsVirtualMachine_KindAPIVersion = WindowsVirtualMachine_Kind + "." + CRDGroupVersion.String() + WindowsVirtualMachine_GroupVersionKind = CRDGroupVersion.WithKind(WindowsVirtualMachine_Kind) +) + +func init() { + SchemeBuilder.Register(&WindowsVirtualMachine{}, &WindowsVirtualMachineList{}) +} diff --git a/apis/compute/v1beta2/zz_windowsvirtualmachinescaleset_terraformed.go b/apis/compute/v1beta2/zz_windowsvirtualmachinescaleset_terraformed.go new file mode 100755 index 000000000..ec5709922 --- /dev/null +++ b/apis/compute/v1beta2/zz_windowsvirtualmachinescaleset_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WindowsVirtualMachineScaleSet +func (mg *WindowsVirtualMachineScaleSet) GetTerraformResourceType() string { + return "azurerm_windows_virtual_machine_scale_set" +} + +// GetConnectionDetailsMapping for this WindowsVirtualMachineScaleSet +func (tr *WindowsVirtualMachineScaleSet) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"additional_unattend_content[*].content": "spec.forProvider.additionalUnattendContent[*].contentSecretRef", "admin_password": "spec.forProvider.adminPasswordSecretRef", "custom_data": "spec.forProvider.customDataSecretRef", "extension[*].protected_settings": "spec.forProvider.extension[*].protectedSettingsSecretRef"} +} + +// GetObservation of this WindowsVirtualMachineScaleSet +func (tr *WindowsVirtualMachineScaleSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WindowsVirtualMachineScaleSet +func (tr *WindowsVirtualMachineScaleSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WindowsVirtualMachineScaleSet +func (tr *WindowsVirtualMachineScaleSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WindowsVirtualMachineScaleSet +func (tr *WindowsVirtualMachineScaleSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WindowsVirtualMachineScaleSet +func (tr *WindowsVirtualMachineScaleSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WindowsVirtualMachineScaleSet +func (tr *WindowsVirtualMachineScaleSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WindowsVirtualMachineScaleSet +func (tr *WindowsVirtualMachineScaleSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WindowsVirtualMachineScaleSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WindowsVirtualMachineScaleSet) LateInitialize(attrs []byte) (bool, error) { + params := &WindowsVirtualMachineScaleSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("ScaleInPolicy")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WindowsVirtualMachineScaleSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/compute/v1beta2/zz_windowsvirtualmachinescaleset_types.go b/apis/compute/v1beta2/zz_windowsvirtualmachinescaleset_types.go new file mode 100755 index 000000000..0b452a6a7 --- /dev/null +++ b/apis/compute/v1beta2/zz_windowsvirtualmachinescaleset_types.go @@ -0,0 +1,1950 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IPConfigurationPublicIPAddressIPTagInitParameters struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IPConfigurationPublicIPAddressIPTagObservation struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IPConfigurationPublicIPAddressIPTagParameters struct { + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tag *string `json:"tag" tf:"tag,omitempty"` + + // The Type of IP Tag, such as FirstPartyUsage. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type NetworkInterfaceIPConfigurationPublicIPAddressInitParameters struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + IPTag []IPConfigurationPublicIPAddressIPTagInitParameters `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type NetworkInterfaceIPConfigurationPublicIPAddressObservation struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + IPTag []IPConfigurationPublicIPAddressIPTagObservation `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type NetworkInterfaceIPConfigurationPublicIPAddressParameters struct { + + // The Prefix which should be used for the Domain Name Label for each Virtual Machine Instance. Azure concatenates the Domain Name Label and Virtual Machine Index to create a unique Domain Name Label for each Virtual Machine. + // +kubebuilder:validation:Optional + DomainNameLabel *string `json:"domainNameLabel,omitempty" tf:"domain_name_label,omitempty"` + + // One or more ip_tag blocks as defined above. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IPTag []IPConfigurationPublicIPAddressIPTagParameters `json:"ipTag,omitempty" tf:"ip_tag,omitempty"` + + // The Idle Timeout in Minutes for the Public IP Address. Possible values are in the range 4 to 32. + // +kubebuilder:validation:Optional + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // The Name of the Public IP Address Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the Public IP Address Prefix from where Public IP Addresses should be allocated. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters struct { + + // Should the capacity to enable Data Disks of the UltraSSD_LRS storage account type be supported on this Virtual Machine Scale Set? Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters struct { + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + Setting *string `json:"setting,omitempty" tf:"setting,omitempty"` +} + +type WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation struct { + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + Setting *string `json:"setting,omitempty" tf:"setting,omitempty"` +} + +type WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters struct { + + // The XML formatted content that is added to the unattend.xml file for the specified path and component. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + ContentSecretRef v1.SecretKeySelector `json:"contentSecretRef" tf:"-"` + + // The name of the setting to which the content applies. Possible values are AutoLogon and FirstLogonCommands. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Setting *string `json:"setting" tf:"setting,omitempty"` +} + +type WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amount of time (in minutes, between 30 and 90) for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. The time duration should be specified in ISO 8601 format. Defaults to PT30M. + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Amount of time (in minutes, between 30 and 90) for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. The time duration should be specified in ISO 8601 format. Defaults to PT30M. + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters struct { + + // Should the automatic instance repair be enabled on this Virtual Machine Scale Set? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Amount of time (in minutes, between 30 and 90) for which automatic repairs will be delayed. The grace period starts right after the VM is found unhealthy. The time duration should be specified in ISO 8601 format. Defaults to PT30M. + // +kubebuilder:validation:Optional + GracePeriod *string `json:"gracePeriod,omitempty" tf:"grace_period,omitempty"` +} + +type WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters struct { + + // Should automatic rollbacks be disabled? + DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty" tf:"disable_automatic_rollback,omitempty"` + + // Should OS Upgrades automatically be applied to Scale Set instances in a rolling fashion when a newer version of the OS Image becomes available? + EnableAutomaticOsUpgrade *bool `json:"enableAutomaticOsUpgrade,omitempty" tf:"enable_automatic_os_upgrade,omitempty"` +} + +type WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation struct { + + // Should automatic rollbacks be disabled? + DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty" tf:"disable_automatic_rollback,omitempty"` + + // Should OS Upgrades automatically be applied to Scale Set instances in a rolling fashion when a newer version of the OS Image becomes available? + EnableAutomaticOsUpgrade *bool `json:"enableAutomaticOsUpgrade,omitempty" tf:"enable_automatic_os_upgrade,omitempty"` +} + +type WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters struct { + + // Should automatic rollbacks be disabled? + // +kubebuilder:validation:Optional + DisableAutomaticRollback *bool `json:"disableAutomaticRollback" tf:"disable_automatic_rollback,omitempty"` + + // Should OS Upgrades automatically be applied to Scale Set instances in a rolling fashion when a newer version of the OS Image becomes available? + // +kubebuilder:validation:Optional + EnableAutomaticOsUpgrade *bool `json:"enableAutomaticOsUpgrade" tf:"enable_automatic_os_upgrade,omitempty"` +} + +type WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type WindowsVirtualMachineScaleSetBootDiagnosticsObservation struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type WindowsVirtualMachineScaleSetBootDiagnosticsParameters struct { + + // The Primary/Secondary Endpoint for the Azure Storage Account which should be used to store Boot Diagnostics, including Console Output and Screenshots from the Hypervisor. + // +kubebuilder:validation:Optional + StorageAccountURI *string `json:"storageAccountUri,omitempty" tf:"storage_account_uri,omitempty"` +} + +type WindowsVirtualMachineScaleSetDataDiskInitParameters struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // The name of the Data Disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetDataDiskObservation struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. + Lun *float64 `json:"lun,omitempty" tf:"lun,omitempty"` + + // The name of the Data Disk. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetDataDiskParameters struct { + + // The type of Caching which should be used for this Data Disk. Possible values are None, ReadOnly and ReadWrite. + // +kubebuilder:validation:Optional + Caching *string `json:"caching" tf:"caching,omitempty"` + + // The create option which should be used for this Data Disk. Possible values are Empty and FromImage. Defaults to Empty. (FromImage should only be used if the source image includes data disks). + // +kubebuilder:validation:Optional + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The size of the Data Disk which should be created. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb" tf:"disk_size_gb,omitempty"` + + // The Logical Unit Number of the Data Disk, which must be unique within the Virtual Machine. + // +kubebuilder:validation:Optional + Lun *float64 `json:"lun" tf:"lun,omitempty"` + + // The name of the Data Disk. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Type of Storage Account which should back this Data Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, Premium_ZRS and UltraSSD_LRS. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType" tf:"storage_account_type,omitempty"` + + // Specifies the Read-Write IOPS for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + // +kubebuilder:validation:Optional + UltraSsdDiskIopsReadWrite *float64 `json:"ultraSsdDiskIopsReadWrite,omitempty" tf:"ultra_ssd_disk_iops_read_write,omitempty"` + + // Specifies the bandwidth in MB per second for this Data Disk. Only settable when storage_account_type is PremiumV2_LRS or UltraSSD_LRS. + // +kubebuilder:validation:Optional + UltraSsdDiskMbpsReadWrite *float64 `json:"ultraSsdDiskMbpsReadWrite,omitempty" tf:"ultra_ssd_disk_mbps_read_write,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + // +kubebuilder:validation:Optional + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetExtensionInitParameters struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + ProtectedSettingsFromKeyVault *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // An ordered list of Extension names which this should be provisioned after. + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // Specifies the Publisher of the Extension. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` +} + +type WindowsVirtualMachineScaleSetExtensionObservation struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + ProtectedSettingsFromKeyVault *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // An ordered list of Extension names which this should be provisioned after. + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // Specifies the Publisher of the Extension. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty" tf:"type_handler_version,omitempty"` +} + +type WindowsVirtualMachineScaleSetExtensionParameters struct { + + // Should the latest version of the Extension be used at Deployment Time, if one is available? This won't auto-update the extension on existing installation. Defaults to true. + // +kubebuilder:validation:Optional + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty" tf:"auto_upgrade_minor_version,omitempty"` + + // Should the Extension be automatically updated whenever the Publisher releases a new version of this VM Extension? + // +kubebuilder:validation:Optional + AutomaticUpgradeEnabled *bool `json:"automaticUpgradeEnabled,omitempty" tf:"automatic_upgrade_enabled,omitempty"` + + // A value which, when different to the previous value can be used to force-run the Extension even if the Extension Configuration hasn't changed. + // +kubebuilder:validation:Optional + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // The name for the Virtual Machine Scale Set Extension. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A protected_settings_from_key_vault block as defined below. + // +kubebuilder:validation:Optional + ProtectedSettingsFromKeyVault *WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters `json:"protectedSettingsFromKeyVault,omitempty" tf:"protected_settings_from_key_vault,omitempty"` + + // A JSON String which specifies Sensitive Settings (such as Passwords) for the Extension. + // +kubebuilder:validation:Optional + ProtectedSettingsSecretRef *v1.SecretKeySelector `json:"protectedSettingsSecretRef,omitempty" tf:"-"` + + // An ordered list of Extension names which this should be provisioned after. + // +kubebuilder:validation:Optional + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty" tf:"provision_after_extensions,omitempty"` + + // Specifies the Publisher of the Extension. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // A JSON String which specifies Settings for the Extension. + // +kubebuilder:validation:Optional + Settings *string `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies the Type of the Extension. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Specifies the version of the extension to use, available versions can be found using the Azure CLI. + // +kubebuilder:validation:Optional + TypeHandlerVersion *string `json:"typeHandlerVersion" tf:"type_handler_version,omitempty"` +} + +type WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultInitParameters struct { + + // The URL to the Key Vault Secret which stores the protected settings. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultObservation struct { + + // The URL to the Key Vault Secret which stores the protected settings. + SecretURL *string `json:"secretUrl,omitempty" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + SourceVaultID *string `json:"sourceVaultId,omitempty" tf:"source_vault_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetExtensionProtectedSettingsFromKeyVaultParameters struct { + + // The URL to the Key Vault Secret which stores the protected settings. + // +kubebuilder:validation:Optional + SecretURL *string `json:"secretUrl" tf:"secret_url,omitempty"` + + // The ID of the source Key Vault. + // +kubebuilder:validation:Optional + SourceVaultID *string `json:"sourceVaultId" tf:"source_vault_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetGalleryApplicationInitParameters struct { + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. Changing this forces a new resource to be created. + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies the Gallery Application Version resource ID. Changing this forces a new resource to be created. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetGalleryApplicationObservation struct { + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. Changing this forces a new resource to be created. + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies the Gallery Application Version resource ID. Changing this forces a new resource to be created. + VersionID *string `json:"versionId,omitempty" tf:"version_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetGalleryApplicationParameters struct { + + // Specifies the URI to an Azure Blob that will replace the default configuration for the package if provided. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConfigurationBlobURI *string `json:"configurationBlobUri,omitempty" tf:"configuration_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // Specifies a passthrough value for more generic context. This field can be any valid string value. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` + + // Specifies the Gallery Application Version resource ID. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VersionID *string `json:"versionId" tf:"version_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters struct { + ConfigurationReferenceBlobURI *string `json:"configurationReferenceBlobUri,omitempty" tf:"configuration_reference_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // The ID of the Windows Virtual Machine Scale Set. + PackageReferenceID *string `json:"packageReferenceId,omitempty" tf:"package_reference_id,omitempty"` + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type WindowsVirtualMachineScaleSetGalleryApplicationsObservation struct { + ConfigurationReferenceBlobURI *string `json:"configurationReferenceBlobUri,omitempty" tf:"configuration_reference_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // The ID of the Windows Virtual Machine Scale Set. + PackageReferenceID *string `json:"packageReferenceId,omitempty" tf:"package_reference_id,omitempty"` + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type WindowsVirtualMachineScaleSetGalleryApplicationsParameters struct { + + // +kubebuilder:validation:Optional + ConfigurationReferenceBlobURI *string `json:"configurationReferenceBlobUri,omitempty" tf:"configuration_reference_blob_uri,omitempty"` + + // Specifies the order in which the packages have to be installed. Possible values are between 0 and 2,147,483,647. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Order *float64 `json:"order,omitempty" tf:"order,omitempty"` + + // The ID of the Windows Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + PackageReferenceID *string `json:"packageReferenceId" tf:"package_reference_id,omitempty"` + + // The IP Tag associated with the Public IP, such as SQL or Storage. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tag *string `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type WindowsVirtualMachineScaleSetIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Windows Virtual Machine Scale Set. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Virtual Machine Scale Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsVirtualMachineScaleSetIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Windows Virtual Machine Scale Set. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Virtual Machine Scale Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsVirtualMachineScaleSetIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Windows Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Virtual Machine Scale Set. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WindowsVirtualMachineScaleSetInitParameters struct { + + // An additional_capabilities block as defined below. + AdditionalCapabilities *WindowsVirtualMachineScaleSetAdditionalCapabilitiesInitParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + AdditionalUnattendContent []WindowsVirtualMachineScaleSetAdditionalUnattendContentInitParameters `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // An automatic_instance_repair block as defined below. To enable the automatic instance repair, this Virtual Machine Scale Set must have a valid health_probe_id or an Application Health Extension. + AutomaticInstanceRepair *WindowsVirtualMachineScaleSetAutomaticInstanceRepairInitParameters `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // An automatic_os_upgrade_policy block as defined below. This can only be specified when upgrade_mode is set to either Automatic or Rolling. + AutomaticOsUpgradePolicy *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyInitParameters `json:"automaticOsUpgradePolicy,omitempty" tf:"automatic_os_upgrade_policy,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *WindowsVirtualMachineScaleSetBootDiagnosticsInitParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // One or more data_disk blocks as defined below. + DataDisk []WindowsVirtualMachineScaleSetDataDiskInitParameters `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should Virtual Machine Extensions be run on Overprovisioned Virtual Machines in the Scale Set? Defaults to false. + DoNotRunExtensionsOnOverprovisionedMachines *bool `json:"doNotRunExtensionsOnOverprovisionedMachines,omitempty" tf:"do_not_run_extensions_on_overprovisioned_machines,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Windows Virtual Machine Scale Set should exist. Changing this forces a new Windows Virtual Machine Scale Set to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Are automatic updates enabled for this Virtual Machine? Defaults to true. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + Extension []WindowsVirtualMachineScaleSetExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Windows Virtual Machine Scale Set to be created. + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + GalleryApplication []WindowsVirtualMachineScaleSetGalleryApplicationInitParameters `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + GalleryApplications []WindowsVirtualMachineScaleSetGalleryApplicationsInitParameters `json:"galleryApplications,omitempty" tf:"gallery_applications,omitempty"` + + // The ID of a Load Balancer Probe which should be used to determine the health of an instance. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. + HealthProbeID *string `json:"healthProbeId,omitempty" tf:"health_probe_id,omitempty"` + + // Specifies the ID of the dedicated host group that the virtual machine scale set resides in. Changing this forces a new resource to be created. + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // An identity block as defined below. + Identity *WindowsVirtualMachineScaleSetIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Scale Set. + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine Scale Set. Possible values are None, Windows_Client and Windows_Server. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Windows Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in the Scale Set should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + NetworkInterface []WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + OsDisk *WindowsVirtualMachineScaleSetOsDiskInitParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // Should Azure over-provision Virtual Machines in this Scale Set? This means that multiple Virtual Machines will be provisioned and Azure will keep the instances which become available first - which improves provisioning success rates and improves deployment time. You're not billed for these over-provisioned VM's and they don't count towards the Subscription Quota. Defaults to true. + Overprovision *bool `json:"overprovision,omitempty" tf:"overprovision,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *WindowsVirtualMachineScaleSetPlanInitParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Linux Virtual Machine Scale Set. Changing this forces a new resource to be created. + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group in which the Virtual Machine Scale Set should be assigned to. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // A rolling_upgrade_policy block as defined below. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. Changing this forces a new resource to be created. + RollingUpgradePolicy *WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters `json:"rollingUpgradePolicy,omitempty" tf:"rolling_upgrade_policy,omitempty"` + + // A scale_in block as defined below. + ScaleIn *WindowsVirtualMachineScaleSetScaleInInitParameters `json:"scaleIn,omitempty" tf:"scale_in,omitempty"` + + // Deprecated: scaleInPolicy will be removed in favour of the scaleIn code block. + ScaleInPolicy *string `json:"scaleInPolicy,omitempty" tf:"scale_in_policy,omitempty"` + + // One or more secret blocks as defined below. + Secret []WindowsVirtualMachineScaleSetSecretInitParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies if Secure Boot and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Defaults to true. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The Virtual Machine SKU for the Scale Set, such as Standard_F2. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image ID, Shared Image ID, Shared Image Version ID, Community Gallery Image ID, Community Gallery Image Version ID, Shared Gallery Image ID and Shared Gallery Image Version ID. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + SourceImageReference *WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A spot_restore block as defined below. + SpotRestore *WindowsVirtualMachineScaleSetSpotRestoreInitParameters `json:"spotRestore,omitempty" tf:"spot_restore,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A terminate_notification block as defined below. + TerminateNotification *WindowsVirtualMachineScaleSetTerminateNotificationInitParameters `json:"terminateNotification,omitempty" tf:"terminate_notification,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *WindowsVirtualMachineScaleSetTerminationNotificationInitParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Specifies the time zone of the virtual machine, the possible values are defined here. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // Specifies how Upgrades (e.g. changing the Image/SKU) should be performed to Virtual Machine Instances. Possible values are Automatic, Manual and Rolling. Defaults to Manual. Changing this forces a new resource to be created. + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine Scale Set. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies if vTPM (Virtual Trusted Platform Module) and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + WinrmListener []WindowsVirtualMachineScaleSetWinrmListenerInitParameters `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones in which this Windows Virtual Machine Scale Set should be located. Changing this forces a new Windows Virtual Machine Scale Set to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters struct { + + // A list of Backend Address Pools ID's from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group ID's which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // A list of NAT Rule ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerInboundNATRulesIds []*string `json:"loadBalancerInboundNatRulesIds,omitempty" tf:"load_balancer_inbound_nat_rules_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + PublicIPAddress []NetworkInterfaceIPConfigurationPublicIPAddressInitParameters `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation struct { + + // A list of Backend Address Pools ID's from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group ID's which this Virtual Machine Scale Set should be connected to. + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // A list of NAT Rule ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +listType=set + LoadBalancerInboundNATRulesIds []*string `json:"loadBalancerInboundNatRulesIds,omitempty" tf:"load_balancer_inbound_nat_rules_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + PublicIPAddress []NetworkInterfaceIPConfigurationPublicIPAddressObservation `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters struct { + + // A list of Backend Address Pools ID's from a Application Gateway which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + ApplicationGatewayBackendAddressPoolIds []*string `json:"applicationGatewayBackendAddressPoolIds,omitempty" tf:"application_gateway_backend_address_pool_ids,omitempty"` + + // A list of Application Security Group ID's which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // A list of Backend Address Pools ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + LoadBalancerBackendAddressPoolIds []*string `json:"loadBalancerBackendAddressPoolIds,omitempty" tf:"load_balancer_backend_address_pool_ids,omitempty"` + + // A list of NAT Rule ID's from a Load Balancer which this Virtual Machine Scale Set should be connected to. + // +kubebuilder:validation:Optional + // +listType=set + LoadBalancerInboundNATRulesIds []*string `json:"loadBalancerInboundNatRulesIds,omitempty" tf:"load_balancer_inbound_nat_rules_ids,omitempty"` + + // The Name of the Public IP Address Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // A public_ip_address block as defined below. + // +kubebuilder:validation:Optional + PublicIPAddress []NetworkInterfaceIPConfigurationPublicIPAddressParameters `json:"publicIpAddress,omitempty" tf:"public_ip_address,omitempty"` + + // The ID of the Subnet which this IP Configuration should be connected to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineScaleSetNetworkInterfaceInitParameters struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Defaults to false. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Defaults to false. + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + IPConfiguration []WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationInitParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type WindowsVirtualMachineScaleSetNetworkInterfaceObservation struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Defaults to false. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Defaults to false. + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + IPConfiguration []WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationObservation `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type WindowsVirtualMachineScaleSetNetworkInterfaceParameters struct { + + // A list of IP Addresses of DNS Servers which should be assigned to the Network Interface. + // +kubebuilder:validation:Optional + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Does this Network Interface support Accelerated Networking? Defaults to false. + // +kubebuilder:validation:Optional + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty" tf:"enable_accelerated_networking,omitempty"` + + // Does this Network Interface support IP Forwarding? Defaults to false. + // +kubebuilder:validation:Optional + EnableIPForwarding *bool `json:"enableIpForwarding,omitempty" tf:"enable_ip_forwarding,omitempty"` + + // One or more ip_configuration blocks as defined above. + // +kubebuilder:validation:Optional + IPConfiguration []WindowsVirtualMachineScaleSetNetworkInterfaceIPConfigurationParameters `json:"ipConfiguration" tf:"ip_configuration,omitempty"` + + // The Name which should be used for this Network Interface. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of a Network Security Group which should be assigned to this Network Interface. + // +kubebuilder:validation:Optional + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Is this the Primary IP Configuration? + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` +} + +type WindowsVirtualMachineScaleSetObservation struct { + + // An additional_capabilities block as defined below. + AdditionalCapabilities *WindowsVirtualMachineScaleSetAdditionalCapabilitiesObservation `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + AdditionalUnattendContent []WindowsVirtualMachineScaleSetAdditionalUnattendContentObservation `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // An automatic_instance_repair block as defined below. To enable the automatic instance repair, this Virtual Machine Scale Set must have a valid health_probe_id or an Application Health Extension. + AutomaticInstanceRepair *WindowsVirtualMachineScaleSetAutomaticInstanceRepairObservation `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // An automatic_os_upgrade_policy block as defined below. This can only be specified when upgrade_mode is set to either Automatic or Rolling. + AutomaticOsUpgradePolicy *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyObservation `json:"automaticOsUpgradePolicy,omitempty" tf:"automatic_os_upgrade_policy,omitempty"` + + // A boot_diagnostics block as defined below. + BootDiagnostics *WindowsVirtualMachineScaleSetBootDiagnosticsObservation `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // One or more data_disk blocks as defined below. + DataDisk []WindowsVirtualMachineScaleSetDataDiskObservation `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should Virtual Machine Extensions be run on Overprovisioned Virtual Machines in the Scale Set? Defaults to false. + DoNotRunExtensionsOnOverprovisionedMachines *bool `json:"doNotRunExtensionsOnOverprovisionedMachines,omitempty" tf:"do_not_run_extensions_on_overprovisioned_machines,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Windows Virtual Machine Scale Set should exist. Changing this forces a new Windows Virtual Machine Scale Set to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Are automatic updates enabled for this Virtual Machine? Defaults to true. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + Extension []WindowsVirtualMachineScaleSetExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Windows Virtual Machine Scale Set to be created. + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + GalleryApplication []WindowsVirtualMachineScaleSetGalleryApplicationObservation `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + GalleryApplications []WindowsVirtualMachineScaleSetGalleryApplicationsObservation `json:"galleryApplications,omitempty" tf:"gallery_applications,omitempty"` + + // The ID of a Load Balancer Probe which should be used to determine the health of an instance. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. + HealthProbeID *string `json:"healthProbeId,omitempty" tf:"health_probe_id,omitempty"` + + // Specifies the ID of the dedicated host group that the virtual machine scale set resides in. Changing this forces a new resource to be created. + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // The ID of the Windows Virtual Machine Scale Set. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *WindowsVirtualMachineScaleSetIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Scale Set. + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine Scale Set. Possible values are None, Windows_Client and Windows_Server. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Windows Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in the Scale Set should not be evicted for price reasons. + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + NetworkInterface []WindowsVirtualMachineScaleSetNetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + OsDisk *WindowsVirtualMachineScaleSetOsDiskObservation `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // Should Azure over-provision Virtual Machines in this Scale Set? This means that multiple Virtual Machines will be provisioned and Azure will keep the instances which become available first - which improves provisioning success rates and improves deployment time. You're not billed for these over-provisioned VM's and they don't count towards the Subscription Quota. Defaults to true. + Overprovision *bool `json:"overprovision,omitempty" tf:"overprovision,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *WindowsVirtualMachineScaleSetPlanObservation `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Linux Virtual Machine Scale Set. Changing this forces a new resource to be created. + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group in which the Virtual Machine Scale Set should be assigned to. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // The name of the Resource Group in which the Windows Virtual Machine Scale Set should be exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A rolling_upgrade_policy block as defined below. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. Changing this forces a new resource to be created. + RollingUpgradePolicy *WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation `json:"rollingUpgradePolicy,omitempty" tf:"rolling_upgrade_policy,omitempty"` + + // A scale_in block as defined below. + ScaleIn *WindowsVirtualMachineScaleSetScaleInObservation `json:"scaleIn,omitempty" tf:"scale_in,omitempty"` + + // Deprecated: scaleInPolicy will be removed in favour of the scaleIn code block. + ScaleInPolicy *string `json:"scaleInPolicy,omitempty" tf:"scale_in_policy,omitempty"` + + // One or more secret blocks as defined below. + Secret []WindowsVirtualMachineScaleSetSecretObservation `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies if Secure Boot and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Defaults to true. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The Virtual Machine SKU for the Scale Set, such as Standard_F2. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image ID, Shared Image ID, Shared Image Version ID, Community Gallery Image ID, Community Gallery Image Version ID, Shared Gallery Image ID and Shared Gallery Image Version ID. + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + SourceImageReference *WindowsVirtualMachineScaleSetSourceImageReferenceObservation `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A spot_restore block as defined below. + SpotRestore *WindowsVirtualMachineScaleSetSpotRestoreObservation `json:"spotRestore,omitempty" tf:"spot_restore,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A terminate_notification block as defined below. + TerminateNotification *WindowsVirtualMachineScaleSetTerminateNotificationObservation `json:"terminateNotification,omitempty" tf:"terminate_notification,omitempty"` + + // A termination_notification block as defined below. + TerminationNotification *WindowsVirtualMachineScaleSetTerminationNotificationObservation `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Specifies the time zone of the virtual machine, the possible values are defined here. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The Unique ID for this Windows Virtual Machine Scale Set. + UniqueID *string `json:"uniqueId,omitempty" tf:"unique_id,omitempty"` + + // Specifies how Upgrades (e.g. changing the Image/SKU) should be performed to Virtual Machine Instances. Possible values are Automatic, Manual and Rolling. Defaults to Manual. Changing this forces a new resource to be created. + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine Scale Set. + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies if vTPM (Virtual Trusted Platform Module) and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + WinrmListener []WindowsVirtualMachineScaleSetWinrmListenerObservation `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones in which this Windows Virtual Machine Scale Set should be located. Changing this forces a new Windows Virtual Machine Scale Set to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + Option *string `json:"option,omitempty" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters struct { + + // Specifies the Ephemeral Disk Settings for the OS Disk. At this time the only possible value is Local. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Option *string `json:"option" tf:"option,omitempty"` + + // Specifies where to store the Ephemeral Disk. Possible values are CacheDisk and ResourceDisk. Defaults to CacheDisk. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Placement *string `json:"placement,omitempty" tf:"placement,omitempty"` +} + +type WindowsVirtualMachineScaleSetOsDiskInitParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsInitParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt the OS Disk when the Virtual Machine Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine Scale Set is Confidential VMSS. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetOsDiskObservation struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + Caching *string `json:"caching,omitempty" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + DiffDiskSettings *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsObservation `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt the OS Disk when the Virtual Machine Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine Scale Set is Confidential VMSS. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetOsDiskParameters struct { + + // The Type of Caching which should be used for the Internal OS Disk. Possible values are None, ReadOnly and ReadWrite. + // +kubebuilder:validation:Optional + Caching *string `json:"caching" tf:"caching,omitempty"` + + // A diff_disk_settings block as defined above. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiffDiskSettings *WindowsVirtualMachineScaleSetOsDiskDiffDiskSettingsParameters `json:"diffDiskSettings,omitempty" tf:"diff_disk_settings,omitempty"` + + // The ID of the Disk Encryption Set which should be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The Size of the Internal OS Disk in GB, if you wish to vary from the size used in the image this Virtual Machine Scale Set is sourced from. + // +kubebuilder:validation:Optional + DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` + + // The ID of the Disk Encryption Set which should be used to Encrypt the OS Disk when the Virtual Machine Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureVMDiskEncryptionSetID *string `json:"secureVmDiskEncryptionSetId,omitempty" tf:"secure_vm_disk_encryption_set_id,omitempty"` + + // Encryption Type when the Virtual Machine Scale Set is Confidential VMSS. Possible values are VMGuestStateOnly and DiskWithVMGuestState. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityEncryptionType *string `json:"securityEncryptionType,omitempty" tf:"security_encryption_type,omitempty"` + + // The Type of Storage Account which should back this the Internal OS Disk. Possible values include Standard_LRS, StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType" tf:"storage_account_type,omitempty"` + + // Should Write Accelerator be Enabled for this OS Disk? Defaults to false. + // +kubebuilder:validation:Optional + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty" tf:"write_accelerator_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetParameters struct { + + // An additional_capabilities block as defined below. + // +kubebuilder:validation:Optional + AdditionalCapabilities *WindowsVirtualMachineScaleSetAdditionalCapabilitiesParameters `json:"additionalCapabilities,omitempty" tf:"additional_capabilities,omitempty"` + + // One or more additional_unattend_content blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdditionalUnattendContent []WindowsVirtualMachineScaleSetAdditionalUnattendContentParameters `json:"additionalUnattendContent,omitempty" tf:"additional_unattend_content,omitempty"` + + // The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminPasswordSecretRef v1.SecretKeySelector `json:"adminPasswordSecretRef" tf:"-"` + + // The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // An automatic_instance_repair block as defined below. To enable the automatic instance repair, this Virtual Machine Scale Set must have a valid health_probe_id or an Application Health Extension. + // +kubebuilder:validation:Optional + AutomaticInstanceRepair *WindowsVirtualMachineScaleSetAutomaticInstanceRepairParameters `json:"automaticInstanceRepair,omitempty" tf:"automatic_instance_repair,omitempty"` + + // An automatic_os_upgrade_policy block as defined below. This can only be specified when upgrade_mode is set to either Automatic or Rolling. + // +kubebuilder:validation:Optional + AutomaticOsUpgradePolicy *WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicyParameters `json:"automaticOsUpgradePolicy,omitempty" tf:"automatic_os_upgrade_policy,omitempty"` + + // A boot_diagnostics block as defined below. + // +kubebuilder:validation:Optional + BootDiagnostics *WindowsVirtualMachineScaleSetBootDiagnosticsParameters `json:"bootDiagnostics,omitempty" tf:"boot_diagnostics,omitempty"` + + // Specifies the ID of the Capacity Reservation Group which the Virtual Machine Scale Set should be allocated to. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the name field. If the value of the name field is not a valid computer_name_prefix, then you must specify computer_name_prefix. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty" tf:"computer_name_prefix,omitempty"` + + // The Base64-Encoded Custom Data which should be used for this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + CustomDataSecretRef *v1.SecretKeySelector `json:"customDataSecretRef,omitempty" tf:"-"` + + // One or more data_disk blocks as defined below. + // +kubebuilder:validation:Optional + DataDisk []WindowsVirtualMachineScaleSetDataDiskParameters `json:"dataDisk,omitempty" tf:"data_disk,omitempty"` + + // Should Virtual Machine Extensions be run on Overprovisioned Virtual Machines in the Scale Set? Defaults to false. + // +kubebuilder:validation:Optional + DoNotRunExtensionsOnOverprovisionedMachines *bool `json:"doNotRunExtensionsOnOverprovisionedMachines,omitempty" tf:"do_not_run_extensions_on_overprovisioned_machines,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Windows Virtual Machine Scale Set should exist. Changing this forces a new Windows Virtual Machine Scale Set to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Are automatic updates enabled for this Virtual Machine? Defaults to true. + // +kubebuilder:validation:Optional + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty" tf:"enable_automatic_updates,omitempty"` + + // Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host? + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // One or more extension blocks as defined below + // +kubebuilder:validation:Optional + Extension []WindowsVirtualMachineScaleSetExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // Should extension operations be allowed on the Virtual Machine Scale Set? Possible values are true or false. Defaults to true. Changing this forces a new Windows Virtual Machine Scale Set to be created. + // +kubebuilder:validation:Optional + ExtensionOperationsEnabled *bool `json:"extensionOperationsEnabled,omitempty" tf:"extension_operations_enabled,omitempty"` + + // Specifies the duration allocated for all extensions to start. The time duration should be between 15 minutes and 120 minutes (inclusive) and should be specified in ISO 8601 format. Defaults to PT1H30M. + // +kubebuilder:validation:Optional + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty" tf:"extensions_time_budget,omitempty"` + + // One or more gallery_application blocks as defined below. + // +kubebuilder:validation:Optional + GalleryApplication []WindowsVirtualMachineScaleSetGalleryApplicationParameters `json:"galleryApplication,omitempty" tf:"gallery_application,omitempty"` + + // +kubebuilder:validation:Optional + GalleryApplications []WindowsVirtualMachineScaleSetGalleryApplicationsParameters `json:"galleryApplications,omitempty" tf:"gallery_applications,omitempty"` + + // The ID of a Load Balancer Probe which should be used to determine the health of an instance. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. + // +kubebuilder:validation:Optional + HealthProbeID *string `json:"healthProbeId,omitempty" tf:"health_probe_id,omitempty"` + + // Specifies the ID of the dedicated host group that the virtual machine scale set resides in. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *WindowsVirtualMachineScaleSetIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The number of Virtual Machines in the Scale Set. + // +kubebuilder:validation:Optional + Instances *float64 `json:"instances,omitempty" tf:"instances,omitempty"` + + // Specifies the type of on-premise license (also known as Azure Hybrid Use Benefit) which should be used for this Virtual Machine Scale Set. Possible values are None, Windows_Client and Windows_Server. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // The Azure location where the Windows Virtual Machine Scale Set should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum price you're willing to pay for each Virtual Machine in this Scale Set, in US Dollars; which must be greater than the current spot price. If this bid price falls below the current spot price the Virtual Machines in the Scale Set will be evicted using the eviction_policy. Defaults to -1, which means that each Virtual Machine in the Scale Set should not be evicted for price reasons. + // +kubebuilder:validation:Optional + MaxBidPrice *float64 `json:"maxBidPrice,omitempty" tf:"max_bid_price,omitempty"` + + // One or more network_interface blocks as defined below. + // +kubebuilder:validation:Optional + NetworkInterface []WindowsVirtualMachineScaleSetNetworkInterfaceParameters `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // An os_disk block as defined below. + // +kubebuilder:validation:Optional + OsDisk *WindowsVirtualMachineScaleSetOsDiskParameters `json:"osDisk,omitempty" tf:"os_disk,omitempty"` + + // Should Azure over-provision Virtual Machines in this Scale Set? This means that multiple Virtual Machines will be provisioned and Azure will keep the instances which become available first - which improves provisioning success rates and improves deployment time. You're not billed for these over-provisioned VM's and they don't count towards the Subscription Quota. Defaults to true. + // +kubebuilder:validation:Optional + Overprovision *bool `json:"overprovision,omitempty" tf:"overprovision,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Plan *WindowsVirtualMachineScaleSetPlanParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // Specifies the number of fault domains that are used by this Linux Virtual Machine Scale Set. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PlatformFaultDomainCount *float64 `json:"platformFaultDomainCount,omitempty" tf:"platform_fault_domain_count,omitempty"` + + // The Priority of this Virtual Machine Scale Set. Possible values are Regular and Spot. Defaults to Regular. Changing this value forces a new resource. + // +kubebuilder:validation:Optional + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to true. Changing this value forces a new resource to be created. + // +kubebuilder:validation:Optional + ProvisionVMAgent *bool `json:"provisionVmAgent,omitempty" tf:"provision_vm_agent,omitempty"` + + // The ID of the Proximity Placement Group in which the Virtual Machine Scale Set should be assigned to. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // The name of the Resource Group in which the Windows Virtual Machine Scale Set should be exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A rolling_upgrade_policy block as defined below. This is Required and can only be specified when upgrade_mode is set to Automatic or Rolling. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RollingUpgradePolicy *WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters `json:"rollingUpgradePolicy,omitempty" tf:"rolling_upgrade_policy,omitempty"` + + // A scale_in block as defined below. + // +kubebuilder:validation:Optional + ScaleIn *WindowsVirtualMachineScaleSetScaleInParameters `json:"scaleIn,omitempty" tf:"scale_in,omitempty"` + + // Deprecated: scaleInPolicy will be removed in favour of the scaleIn code block. + // +kubebuilder:validation:Optional + ScaleInPolicy *string `json:"scaleInPolicy,omitempty" tf:"scale_in_policy,omitempty"` + + // One or more secret blocks as defined below. + // +kubebuilder:validation:Optional + Secret []WindowsVirtualMachineScaleSetSecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // Specifies if Secure Boot and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty" tf:"secure_boot_enabled,omitempty"` + + // Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Defaults to true. + // +kubebuilder:validation:Optional + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty" tf:"single_placement_group,omitempty"` + + // The Virtual Machine SKU for the Scale Set, such as Standard_F2. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The ID of an Image which each Virtual Machine in this Scale Set should be based on. Possible Image ID types include Image ID, Shared Image ID, Shared Image Version ID, Community Gallery Image ID, Community Gallery Image Version ID, Shared Gallery Image ID and Shared Gallery Image Version ID. + // +kubebuilder:validation:Optional + SourceImageID *string `json:"sourceImageId,omitempty" tf:"source_image_id,omitempty"` + + // A source_image_reference block as defined below. + // +kubebuilder:validation:Optional + SourceImageReference *WindowsVirtualMachineScaleSetSourceImageReferenceParameters `json:"sourceImageReference,omitempty" tf:"source_image_reference,omitempty"` + + // A spot_restore block as defined below. + // +kubebuilder:validation:Optional + SpotRestore *WindowsVirtualMachineScaleSetSpotRestoreParameters `json:"spotRestore,omitempty" tf:"spot_restore,omitempty"` + + // A mapping of tags which should be assigned to this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A terminate_notification block as defined below. + // +kubebuilder:validation:Optional + TerminateNotification *WindowsVirtualMachineScaleSetTerminateNotificationParameters `json:"terminateNotification,omitempty" tf:"terminate_notification,omitempty"` + + // A termination_notification block as defined below. + // +kubebuilder:validation:Optional + TerminationNotification *WindowsVirtualMachineScaleSetTerminationNotificationParameters `json:"terminationNotification,omitempty" tf:"termination_notification,omitempty"` + + // Specifies the time zone of the virtual machine, the possible values are defined here. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // Specifies how Upgrades (e.g. changing the Image/SKU) should be performed to Virtual Machine Instances. Possible values are Automatic, Manual and Rolling. Defaults to Manual. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // The Base64-Encoded User Data which should be used for this Virtual Machine Scale Set. + // +kubebuilder:validation:Optional + UserData *string `json:"userData,omitempty" tf:"user_data,omitempty"` + + // Specifies if vTPM (Virtual Trusted Platform Module) and Trusted Launch is enabled for the Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VtpmEnabled *bool `json:"vtpmEnabled,omitempty" tf:"vtpm_enabled,omitempty"` + + // One or more winrm_listener blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + WinrmListener []WindowsVirtualMachineScaleSetWinrmListenerParameters `json:"winrmListener,omitempty" tf:"winrm_listener,omitempty"` + + // Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ZoneBalance *bool `json:"zoneBalance,omitempty" tf:"zone_balance,omitempty"` + + // Specifies a list of Availability Zones in which this Windows Virtual Machine Scale Set should be located. Changing this forces a new Windows Virtual Machine Scale Set to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type WindowsVirtualMachineScaleSetPlanInitParameters struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type WindowsVirtualMachineScaleSetPlanObservation struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type WindowsVirtualMachineScaleSetPlanParameters struct { + + // Specifies the name of the image from the marketplace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the product of the image from the marketplace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Product *string `json:"product" tf:"product,omitempty"` + + // Specifies the publisher of the image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` +} + +type WindowsVirtualMachineScaleSetRollingUpgradePolicyInitParameters struct { + + // Should the Virtual Machine Scale Set ignore the Azure Zone boundaries when constructing upgrade batches? Possible values are true or false. + CrossZoneUpgradesEnabled *bool `json:"crossZoneUpgradesEnabled,omitempty" tf:"cross_zone_upgrades_enabled,omitempty"` + + // The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. + MaxBatchInstancePercent *float64 `json:"maxBatchInstancePercent,omitempty" tf:"max_batch_instance_percent,omitempty"` + + // The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. + MaxUnhealthyInstancePercent *float64 `json:"maxUnhealthyInstancePercent,omitempty" tf:"max_unhealthy_instance_percent,omitempty"` + + // The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. + MaxUnhealthyUpgradedInstancePercent *float64 `json:"maxUnhealthyUpgradedInstancePercent,omitempty" tf:"max_unhealthy_upgraded_instance_percent,omitempty"` + + // The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty" tf:"pause_time_between_batches,omitempty"` + + // Upgrade all unhealthy instances in a scale set before any healthy instances. Possible values are true or false. + PrioritizeUnhealthyInstancesEnabled *bool `json:"prioritizeUnhealthyInstancesEnabled,omitempty" tf:"prioritize_unhealthy_instances_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetRollingUpgradePolicyObservation struct { + + // Should the Virtual Machine Scale Set ignore the Azure Zone boundaries when constructing upgrade batches? Possible values are true or false. + CrossZoneUpgradesEnabled *bool `json:"crossZoneUpgradesEnabled,omitempty" tf:"cross_zone_upgrades_enabled,omitempty"` + + // The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. + MaxBatchInstancePercent *float64 `json:"maxBatchInstancePercent,omitempty" tf:"max_batch_instance_percent,omitempty"` + + // The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. + MaxUnhealthyInstancePercent *float64 `json:"maxUnhealthyInstancePercent,omitempty" tf:"max_unhealthy_instance_percent,omitempty"` + + // The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. + MaxUnhealthyUpgradedInstancePercent *float64 `json:"maxUnhealthyUpgradedInstancePercent,omitempty" tf:"max_unhealthy_upgraded_instance_percent,omitempty"` + + // The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty" tf:"pause_time_between_batches,omitempty"` + + // Upgrade all unhealthy instances in a scale set before any healthy instances. Possible values are true or false. + PrioritizeUnhealthyInstancesEnabled *bool `json:"prioritizeUnhealthyInstancesEnabled,omitempty" tf:"prioritize_unhealthy_instances_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetRollingUpgradePolicyParameters struct { + + // Should the Virtual Machine Scale Set ignore the Azure Zone boundaries when constructing upgrade batches? Possible values are true or false. + // +kubebuilder:validation:Optional + CrossZoneUpgradesEnabled *bool `json:"crossZoneUpgradesEnabled,omitempty" tf:"cross_zone_upgrades_enabled,omitempty"` + + // The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. + // +kubebuilder:validation:Optional + MaxBatchInstancePercent *float64 `json:"maxBatchInstancePercent" tf:"max_batch_instance_percent,omitempty"` + + // The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. + // +kubebuilder:validation:Optional + MaxUnhealthyInstancePercent *float64 `json:"maxUnhealthyInstancePercent" tf:"max_unhealthy_instance_percent,omitempty"` + + // The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. + // +kubebuilder:validation:Optional + MaxUnhealthyUpgradedInstancePercent *float64 `json:"maxUnhealthyUpgradedInstancePercent" tf:"max_unhealthy_upgraded_instance_percent,omitempty"` + + // The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. + // +kubebuilder:validation:Optional + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches" tf:"pause_time_between_batches,omitempty"` + + // Upgrade all unhealthy instances in a scale set before any healthy instances. Possible values are true or false. + // +kubebuilder:validation:Optional + PrioritizeUnhealthyInstancesEnabled *bool `json:"prioritizeUnhealthyInstancesEnabled,omitempty" tf:"prioritize_unhealthy_instances_enabled,omitempty"` +} + +type WindowsVirtualMachineScaleSetScaleInInitParameters struct { + + // Should the virtual machines chosen for removal be force deleted when the virtual machine scale set is being scaled-in? Possible values are true or false. Defaults to false. + ForceDeletionEnabled *bool `json:"forceDeletionEnabled,omitempty" tf:"force_deletion_enabled,omitempty"` + + // The scale-in policy rule that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled in. Possible values for the scale-in policy rules are Default, NewestVM and OldestVM, defaults to Default. For more information about scale in policy, please refer to this doc. + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type WindowsVirtualMachineScaleSetScaleInObservation struct { + + // Should the virtual machines chosen for removal be force deleted when the virtual machine scale set is being scaled-in? Possible values are true or false. Defaults to false. + ForceDeletionEnabled *bool `json:"forceDeletionEnabled,omitempty" tf:"force_deletion_enabled,omitempty"` + + // The scale-in policy rule that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled in. Possible values for the scale-in policy rules are Default, NewestVM and OldestVM, defaults to Default. For more information about scale in policy, please refer to this doc. + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type WindowsVirtualMachineScaleSetScaleInParameters struct { + + // Should the virtual machines chosen for removal be force deleted when the virtual machine scale set is being scaled-in? Possible values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + ForceDeletionEnabled *bool `json:"forceDeletionEnabled,omitempty" tf:"force_deletion_enabled,omitempty"` + + // The scale-in policy rule that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled in. Possible values for the scale-in policy rules are Default, NewestVM and OldestVM, defaults to Default. For more information about scale in policy, please refer to this doc. + // +kubebuilder:validation:Optional + Rule *string `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type WindowsVirtualMachineScaleSetSecretCertificateInitParameters struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + Store *string `json:"store,omitempty" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WindowsVirtualMachineScaleSetSecretCertificateObservation struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + Store *string `json:"store,omitempty" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WindowsVirtualMachineScaleSetSecretCertificateParameters struct { + + // The certificate store on the Virtual Machine where the certificate should be added. + // +kubebuilder:validation:Optional + Store *string `json:"store" tf:"store,omitempty"` + + // The Secret URL of a Key Vault Certificate. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type WindowsVirtualMachineScaleSetSecretInitParameters struct { + + // One or more certificate blocks as defined above. + Certificate []WindowsVirtualMachineScaleSetSecretCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetSecretObservation struct { + + // One or more certificate blocks as defined above. + Certificate []WindowsVirtualMachineScaleSetSecretCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetSecretParameters struct { + + // One or more certificate blocks as defined above. + // +kubebuilder:validation:Optional + Certificate []WindowsVirtualMachineScaleSetSecretCertificateParameters `json:"certificate" tf:"certificate,omitempty"` + + // The ID of the Key Vault from which all Secrets should be sourced. + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId" tf:"key_vault_id,omitempty"` +} + +type WindowsVirtualMachineScaleSetSourceImageReferenceInitParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineScaleSetSourceImageReferenceObservation struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineScaleSetSourceImageReferenceParameters struct { + + // Specifies the offer of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer" tf:"offer,omitempty"` + + // Specifies the publisher of the image used to create the virtual machines. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // Specifies the SKU of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Sku *string `json:"sku" tf:"sku,omitempty"` + + // Specifies the version of the image used to create the virtual machines. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type WindowsVirtualMachineScaleSetSpotRestoreInitParameters struct { + + // Should the Spot-Try-Restore feature be enabled? The Spot-Try-Restore feature will attempt to automatically restore the evicted Spot Virtual Machine Scale Set VM instances opportunistically based on capacity availability and pricing constraints. Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The length of time that the Virtual Machine Scale Set should attempt to restore the Spot VM instances which have been evicted. The time duration should be between 15 minutes and 120 minutes (inclusive). The time duration should be specified in the ISO 8601 format. Defaults to PT1H. Changing this forces a new resource to be created. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetSpotRestoreObservation struct { + + // Should the Spot-Try-Restore feature be enabled? The Spot-Try-Restore feature will attempt to automatically restore the evicted Spot Virtual Machine Scale Set VM instances opportunistically based on capacity availability and pricing constraints. Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The length of time that the Virtual Machine Scale Set should attempt to restore the Spot VM instances which have been evicted. The time duration should be between 15 minutes and 120 minutes (inclusive). The time duration should be specified in the ISO 8601 format. Defaults to PT1H. Changing this forces a new resource to be created. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetSpotRestoreParameters struct { + + // Should the Spot-Try-Restore feature be enabled? The Spot-Try-Restore feature will attempt to automatically restore the evicted Spot Virtual Machine Scale Set VM instances opportunistically based on capacity availability and pricing constraints. Possible values are true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The length of time that the Virtual Machine Scale Set should attempt to restore the Spot VM instances which have been evicted. The time duration should be between 15 minutes and 120 minutes (inclusive). The time duration should be specified in the ISO 8601 format. Defaults to PT1H. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetTerminateNotificationInitParameters struct { + + // Should the terminate notification be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetTerminateNotificationObservation struct { + + // Should the terminate notification be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetTerminateNotificationParameters struct { + + // Should the terminate notification be enabled on this Virtual Machine Scale Set? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetTerminationNotificationInitParameters struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetTerminationNotificationObservation struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetTerminationNotificationParameters struct { + + // Should the termination notification be enabled on this Virtual Machine Scale Set? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted. The time duration should be specified in ISO 8601 format. Defaults to PT5M. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` +} + +type WindowsVirtualMachineScaleSetWinrmListenerInitParameters struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // The Protocol of the WinRM Listener. Possible values are Http and Https. Changing this forces a new resource to be created. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type WindowsVirtualMachineScaleSetWinrmListenerObservation struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // The Protocol of the WinRM Listener. Possible values are Http and Https. Changing this forces a new resource to be created. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type WindowsVirtualMachineScaleSetWinrmListenerParameters struct { + + // The Secret URL of a Key Vault Certificate, which must be specified when protocol is set to Https. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CertificateURL *string `json:"certificateUrl,omitempty" tf:"certificate_url,omitempty"` + + // The Protocol of the WinRM Listener. Possible values are Http and Https. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +// WindowsVirtualMachineScaleSetSpec defines the desired state of WindowsVirtualMachineScaleSet +type WindowsVirtualMachineScaleSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WindowsVirtualMachineScaleSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WindowsVirtualMachineScaleSetInitParameters `json:"initProvider,omitempty"` +} + +// WindowsVirtualMachineScaleSetStatus defines the observed state of WindowsVirtualMachineScaleSet. +type WindowsVirtualMachineScaleSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WindowsVirtualMachineScaleSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WindowsVirtualMachineScaleSet is the Schema for the WindowsVirtualMachineScaleSets API. Manages a Windows Virtual Machine Scale Set. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WindowsVirtualMachineScaleSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.adminPasswordSecretRef)",message="spec.forProvider.adminPasswordSecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.adminUsername) || (has(self.initProvider) && has(self.initProvider.adminUsername))",message="spec.forProvider.adminUsername is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.instances) || (has(self.initProvider) && has(self.initProvider.instances))",message="spec.forProvider.instances is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.networkInterface) || (has(self.initProvider) && has(self.initProvider.networkInterface))",message="spec.forProvider.networkInterface is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.osDisk) || (has(self.initProvider) && has(self.initProvider.osDisk))",message="spec.forProvider.osDisk is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec WindowsVirtualMachineScaleSetSpec `json:"spec"` + Status WindowsVirtualMachineScaleSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WindowsVirtualMachineScaleSetList contains a list of WindowsVirtualMachineScaleSets +type WindowsVirtualMachineScaleSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WindowsVirtualMachineScaleSet `json:"items"` +} + +// Repository type metadata. +var ( + WindowsVirtualMachineScaleSet_Kind = "WindowsVirtualMachineScaleSet" + WindowsVirtualMachineScaleSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WindowsVirtualMachineScaleSet_Kind}.String() + WindowsVirtualMachineScaleSet_KindAPIVersion = WindowsVirtualMachineScaleSet_Kind + "." + CRDGroupVersion.String() + WindowsVirtualMachineScaleSet_GroupVersionKind = CRDGroupVersion.WithKind(WindowsVirtualMachineScaleSet_Kind) +) + +func init() { + SchemeBuilder.Register(&WindowsVirtualMachineScaleSet{}, &WindowsVirtualMachineScaleSetList{}) +} diff --git a/apis/consumption/v1beta1/zz_generated.conversion_spokes.go b/apis/consumption/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..0e7505ff0 --- /dev/null +++ b/apis/consumption/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this BudgetManagementGroup to the hub type. +func (tr *BudgetManagementGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BudgetManagementGroup type. +func (tr *BudgetManagementGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BudgetResourceGroup to the hub type. +func (tr *BudgetResourceGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BudgetResourceGroup type. +func (tr *BudgetResourceGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BudgetSubscription to the hub type. +func (tr *BudgetSubscription) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BudgetSubscription type. +func (tr *BudgetSubscription) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/consumption/v1beta2/zz_budgetmanagementgroup_terraformed.go b/apis/consumption/v1beta2/zz_budgetmanagementgroup_terraformed.go new file mode 100755 index 000000000..5c02ad01d --- /dev/null +++ b/apis/consumption/v1beta2/zz_budgetmanagementgroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BudgetManagementGroup +func (mg *BudgetManagementGroup) GetTerraformResourceType() string { + return "azurerm_consumption_budget_management_group" +} + +// GetConnectionDetailsMapping for this BudgetManagementGroup +func (tr *BudgetManagementGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BudgetManagementGroup +func (tr *BudgetManagementGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BudgetManagementGroup +func (tr *BudgetManagementGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BudgetManagementGroup +func (tr *BudgetManagementGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BudgetManagementGroup +func (tr *BudgetManagementGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BudgetManagementGroup +func (tr *BudgetManagementGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BudgetManagementGroup +func (tr *BudgetManagementGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BudgetManagementGroup +func (tr *BudgetManagementGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BudgetManagementGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BudgetManagementGroup) LateInitialize(attrs []byte) (bool, error) { + params := &BudgetManagementGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BudgetManagementGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/consumption/v1beta2/zz_budgetmanagementgroup_types.go b/apis/consumption/v1beta2/zz_budgetmanagementgroup_types.go new file mode 100755 index 000000000..60bbeb8d2 --- /dev/null +++ b/apis/consumption/v1beta2/zz_budgetmanagementgroup_types.go @@ -0,0 +1,502 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BudgetManagementGroupInitParameters struct { + + // The total amount of cost to track with the budget. + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Management Group Consumption Budget. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + Filter *FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // The ID of the Management Group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/management/v1beta1.ManagementGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ManagementGroupID *string `json:"managementGroupId,omitempty" tf:"management_group_id,omitempty"` + + // Reference to a ManagementGroup in management to populate managementGroupId. + // +kubebuilder:validation:Optional + ManagementGroupIDRef *v1.Reference `json:"managementGroupIdRef,omitempty" tf:"-"` + + // Selector for a ManagementGroup in management to populate managementGroupId. + // +kubebuilder:validation:Optional + ManagementGroupIDSelector *v1.Selector `json:"managementGroupIdSelector,omitempty" tf:"-"` + + // The name which should be used for this Management Group Consumption Budget. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more notification blocks as defined below. + Notification []NotificationInitParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + TimePeriod *TimePeriodInitParameters `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type BudgetManagementGroupObservation struct { + + // The total amount of cost to track with the budget. + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Management Group Consumption Budget. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + Filter *FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // The ID of the Management Group Consumption Budget. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ID of the Management Group. Changing this forces a new resource to be created. + ManagementGroupID *string `json:"managementGroupId,omitempty" tf:"management_group_id,omitempty"` + + // The name which should be used for this Management Group Consumption Budget. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more notification blocks as defined below. + Notification []NotificationObservation `json:"notification,omitempty" tf:"notification,omitempty"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + TimePeriod *TimePeriodObservation `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type BudgetManagementGroupParameters struct { + + // The total amount of cost to track with the budget. + // +kubebuilder:validation:Optional + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Management Group Consumption Budget. + // +kubebuilder:validation:Optional + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + // +kubebuilder:validation:Optional + Filter *FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // The ID of the Management Group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/management/v1beta1.ManagementGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ManagementGroupID *string `json:"managementGroupId,omitempty" tf:"management_group_id,omitempty"` + + // Reference to a ManagementGroup in management to populate managementGroupId. + // +kubebuilder:validation:Optional + ManagementGroupIDRef *v1.Reference `json:"managementGroupIdRef,omitempty" tf:"-"` + + // Selector for a ManagementGroup in management to populate managementGroupId. + // +kubebuilder:validation:Optional + ManagementGroupIDSelector *v1.Selector `json:"managementGroupIdSelector,omitempty" tf:"-"` + + // The name which should be used for this Management Group Consumption Budget. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more notification blocks as defined below. + // +kubebuilder:validation:Optional + Notification []NotificationParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + // +kubebuilder:validation:Optional + TimePeriod *TimePeriodParameters `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type DimensionInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DimensionObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DimensionParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type FilterInitParameters struct { + + // One or more dimension blocks as defined below to filter the budget on. + Dimension []DimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + Not *NotInitParameters `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + Tag []FilterTagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FilterObservation struct { + + // One or more dimension blocks as defined below to filter the budget on. + Dimension []DimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + Not *NotObservation `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + Tag []FilterTagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FilterParameters struct { + + // One or more dimension blocks as defined below to filter the budget on. + // +kubebuilder:validation:Optional + Dimension []DimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + // +kubebuilder:validation:Optional + Not *NotParameters `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + // +kubebuilder:validation:Optional + Tag []FilterTagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FilterTagInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterTagObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterTagParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type NotDimensionInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type NotDimensionObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type NotDimensionParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type NotInitParameters struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + Dimension *NotDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + Tag *TagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type NotObservation struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + Dimension *NotDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + Tag *TagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type NotParameters struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + // +kubebuilder:validation:Optional + Dimension *NotDimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + // +kubebuilder:validation:Optional + Tag *TagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type NotificationInitParameters struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + ContactEmails []*string `json:"contactEmails,omitempty" tf:"contact_emails,omitempty"` + + // Should the notification be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type NotificationObservation struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + ContactEmails []*string `json:"contactEmails,omitempty" tf:"contact_emails,omitempty"` + + // Should the notification be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type NotificationParameters struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + // +kubebuilder:validation:Optional + ContactEmails []*string `json:"contactEmails" tf:"contact_emails,omitempty"` + + // Should the notification be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type TagInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TagObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type TagParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type TimePeriodInitParameters struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new resource to be created. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` +} + +type TimePeriodObservation struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new resource to be created. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` +} + +type TimePeriodParameters struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + // +kubebuilder:validation:Optional + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StartDate *string `json:"startDate" tf:"start_date,omitempty"` +} + +// BudgetManagementGroupSpec defines the desired state of BudgetManagementGroup +type BudgetManagementGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BudgetManagementGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BudgetManagementGroupInitParameters `json:"initProvider,omitempty"` +} + +// BudgetManagementGroupStatus defines the observed state of BudgetManagementGroup. +type BudgetManagementGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BudgetManagementGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BudgetManagementGroup is the Schema for the BudgetManagementGroups API. Manages a Consumption Budget for a Management Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BudgetManagementGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.amount) || (has(self.initProvider) && has(self.initProvider.amount))",message="spec.forProvider.amount is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.notification) || (has(self.initProvider) && has(self.initProvider.notification))",message="spec.forProvider.notification is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timePeriod) || (has(self.initProvider) && has(self.initProvider.timePeriod))",message="spec.forProvider.timePeriod is a required parameter" + Spec BudgetManagementGroupSpec `json:"spec"` + Status BudgetManagementGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BudgetManagementGroupList contains a list of BudgetManagementGroups +type BudgetManagementGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BudgetManagementGroup `json:"items"` +} + +// Repository type metadata. +var ( + BudgetManagementGroup_Kind = "BudgetManagementGroup" + BudgetManagementGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BudgetManagementGroup_Kind}.String() + BudgetManagementGroup_KindAPIVersion = BudgetManagementGroup_Kind + "." + CRDGroupVersion.String() + BudgetManagementGroup_GroupVersionKind = CRDGroupVersion.WithKind(BudgetManagementGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&BudgetManagementGroup{}, &BudgetManagementGroupList{}) +} diff --git a/apis/consumption/v1beta2/zz_budgetresourcegroup_terraformed.go b/apis/consumption/v1beta2/zz_budgetresourcegroup_terraformed.go new file mode 100755 index 000000000..b07fc3014 --- /dev/null +++ b/apis/consumption/v1beta2/zz_budgetresourcegroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BudgetResourceGroup +func (mg *BudgetResourceGroup) GetTerraformResourceType() string { + return "azurerm_consumption_budget_resource_group" +} + +// GetConnectionDetailsMapping for this BudgetResourceGroup +func (tr *BudgetResourceGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BudgetResourceGroup +func (tr *BudgetResourceGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BudgetResourceGroup +func (tr *BudgetResourceGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BudgetResourceGroup +func (tr *BudgetResourceGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BudgetResourceGroup +func (tr *BudgetResourceGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BudgetResourceGroup +func (tr *BudgetResourceGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BudgetResourceGroup +func (tr *BudgetResourceGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BudgetResourceGroup +func (tr *BudgetResourceGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BudgetResourceGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BudgetResourceGroup) LateInitialize(attrs []byte) (bool, error) { + params := &BudgetResourceGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BudgetResourceGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/consumption/v1beta2/zz_budgetresourcegroup_types.go b/apis/consumption/v1beta2/zz_budgetresourcegroup_types.go new file mode 100755 index 000000000..b3b3d0e01 --- /dev/null +++ b/apis/consumption/v1beta2/zz_budgetresourcegroup_types.go @@ -0,0 +1,522 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BudgetResourceGroupFilterInitParameters struct { + + // One or more dimension blocks as defined below to filter the budget on. + Dimension []FilterDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + Not *FilterNotInitParameters `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + Tag []BudgetResourceGroupFilterTagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetResourceGroupFilterObservation struct { + + // One or more dimension blocks as defined below to filter the budget on. + Dimension []FilterDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + Not *FilterNotObservation `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + Tag []BudgetResourceGroupFilterTagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetResourceGroupFilterParameters struct { + + // One or more dimension blocks as defined below to filter the budget on. + // +kubebuilder:validation:Optional + Dimension []FilterDimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + // +kubebuilder:validation:Optional + Not *FilterNotParameters `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + // +kubebuilder:validation:Optional + Tag []BudgetResourceGroupFilterTagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetResourceGroupFilterTagInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type BudgetResourceGroupFilterTagObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type BudgetResourceGroupFilterTagParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type BudgetResourceGroupInitParameters struct { + + // The total amount of cost to track with the budget. + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Resource Group Consumption Budget + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + Filter *BudgetResourceGroupFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // The name which should be used for this Resource Group Consumption Budget. Changing this forces a new Resource Group Consumption Budget to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more notification blocks as defined below. + Notification []BudgetResourceGroupNotificationInitParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // The ID of the Resource Group to create the consumption budget for in the form of /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1. Changing this forces a new Resource Group Consumption Budget to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDRef *v1.Reference `json:"resourceGroupIdRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDSelector *v1.Selector `json:"resourceGroupIdSelector,omitempty" tf:"-"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + TimePeriod *BudgetResourceGroupTimePeriodInitParameters `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type BudgetResourceGroupNotificationInitParameters struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + ContactEmails []*string `json:"contactEmails,omitempty" tf:"contact_emails,omitempty"` + + // Specifies a list of Action Group IDs to send the budget notification to when the threshold is exceeded. + ContactGroups []*string `json:"contactGroups,omitempty" tf:"contact_groups,omitempty"` + + // Specifies a list of contact roles to send the budget notification to when the threshold is exceeded. + ContactRoles []*string `json:"contactRoles,omitempty" tf:"contact_roles,omitempty"` + + // Should the notification be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type BudgetResourceGroupNotificationObservation struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + ContactEmails []*string `json:"contactEmails,omitempty" tf:"contact_emails,omitempty"` + + // Specifies a list of Action Group IDs to send the budget notification to when the threshold is exceeded. + ContactGroups []*string `json:"contactGroups,omitempty" tf:"contact_groups,omitempty"` + + // Specifies a list of contact roles to send the budget notification to when the threshold is exceeded. + ContactRoles []*string `json:"contactRoles,omitempty" tf:"contact_roles,omitempty"` + + // Should the notification be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type BudgetResourceGroupNotificationParameters struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + // +kubebuilder:validation:Optional + ContactEmails []*string `json:"contactEmails,omitempty" tf:"contact_emails,omitempty"` + + // Specifies a list of Action Group IDs to send the budget notification to when the threshold is exceeded. + // +kubebuilder:validation:Optional + ContactGroups []*string `json:"contactGroups,omitempty" tf:"contact_groups,omitempty"` + + // Specifies a list of contact roles to send the budget notification to when the threshold is exceeded. + // +kubebuilder:validation:Optional + ContactRoles []*string `json:"contactRoles,omitempty" tf:"contact_roles,omitempty"` + + // Should the notification be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type BudgetResourceGroupObservation struct { + + // The total amount of cost to track with the budget. + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Resource Group Consumption Budget + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + Filter *BudgetResourceGroupFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // The ID of the Resource Group Consumption Budget. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name which should be used for this Resource Group Consumption Budget. Changing this forces a new Resource Group Consumption Budget to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more notification blocks as defined below. + Notification []BudgetResourceGroupNotificationObservation `json:"notification,omitempty" tf:"notification,omitempty"` + + // The ID of the Resource Group to create the consumption budget for in the form of /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1. Changing this forces a new Resource Group Consumption Budget to be created. + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + TimePeriod *BudgetResourceGroupTimePeriodObservation `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type BudgetResourceGroupParameters struct { + + // The total amount of cost to track with the budget. + // +kubebuilder:validation:Optional + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Resource Group Consumption Budget + // +kubebuilder:validation:Optional + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + // +kubebuilder:validation:Optional + Filter *BudgetResourceGroupFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // The name which should be used for this Resource Group Consumption Budget. Changing this forces a new Resource Group Consumption Budget to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more notification blocks as defined below. + // +kubebuilder:validation:Optional + Notification []BudgetResourceGroupNotificationParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // The ID of the Resource Group to create the consumption budget for in the form of /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1. Changing this forces a new Resource Group Consumption Budget to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDRef *v1.Reference `json:"resourceGroupIdRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDSelector *v1.Selector `json:"resourceGroupIdSelector,omitempty" tf:"-"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + // +kubebuilder:validation:Optional + TimePeriod *BudgetResourceGroupTimePeriodParameters `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type BudgetResourceGroupTimePeriodInitParameters struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new Resource Group Consumption Budget to be created. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` +} + +type BudgetResourceGroupTimePeriodObservation struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new Resource Group Consumption Budget to be created. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` +} + +type BudgetResourceGroupTimePeriodParameters struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + // +kubebuilder:validation:Optional + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new Resource Group Consumption Budget to be created. + // +kubebuilder:validation:Optional + StartDate *string `json:"startDate" tf:"start_date,omitempty"` +} + +type FilterDimensionInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterDimensionObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterDimensionParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type FilterNotDimensionInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterNotDimensionObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterNotDimensionParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type FilterNotInitParameters struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + Dimension *FilterNotDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + Tag *NotTagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FilterNotObservation struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + Dimension *FilterNotDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + Tag *NotTagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type FilterNotParameters struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + // +kubebuilder:validation:Optional + Dimension *FilterNotDimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + // +kubebuilder:validation:Optional + Tag *NotTagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type NotTagInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type NotTagObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type NotTagParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +// BudgetResourceGroupSpec defines the desired state of BudgetResourceGroup +type BudgetResourceGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BudgetResourceGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BudgetResourceGroupInitParameters `json:"initProvider,omitempty"` +} + +// BudgetResourceGroupStatus defines the observed state of BudgetResourceGroup. +type BudgetResourceGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BudgetResourceGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BudgetResourceGroup is the Schema for the BudgetResourceGroups API. Manages a Resource Group Consumption Budget. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BudgetResourceGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.amount) || (has(self.initProvider) && has(self.initProvider.amount))",message="spec.forProvider.amount is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.notification) || (has(self.initProvider) && has(self.initProvider.notification))",message="spec.forProvider.notification is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timePeriod) || (has(self.initProvider) && has(self.initProvider.timePeriod))",message="spec.forProvider.timePeriod is a required parameter" + Spec BudgetResourceGroupSpec `json:"spec"` + Status BudgetResourceGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BudgetResourceGroupList contains a list of BudgetResourceGroups +type BudgetResourceGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BudgetResourceGroup `json:"items"` +} + +// Repository type metadata. +var ( + BudgetResourceGroup_Kind = "BudgetResourceGroup" + BudgetResourceGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BudgetResourceGroup_Kind}.String() + BudgetResourceGroup_KindAPIVersion = BudgetResourceGroup_Kind + "." + CRDGroupVersion.String() + BudgetResourceGroup_GroupVersionKind = CRDGroupVersion.WithKind(BudgetResourceGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&BudgetResourceGroup{}, &BudgetResourceGroupList{}) +} diff --git a/apis/consumption/v1beta2/zz_budgetsubscription_terraformed.go b/apis/consumption/v1beta2/zz_budgetsubscription_terraformed.go new file mode 100755 index 000000000..a6bfa1336 --- /dev/null +++ b/apis/consumption/v1beta2/zz_budgetsubscription_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BudgetSubscription +func (mg *BudgetSubscription) GetTerraformResourceType() string { + return "azurerm_consumption_budget_subscription" +} + +// GetConnectionDetailsMapping for this BudgetSubscription +func (tr *BudgetSubscription) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BudgetSubscription +func (tr *BudgetSubscription) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BudgetSubscription +func (tr *BudgetSubscription) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BudgetSubscription +func (tr *BudgetSubscription) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BudgetSubscription +func (tr *BudgetSubscription) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BudgetSubscription +func (tr *BudgetSubscription) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BudgetSubscription +func (tr *BudgetSubscription) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BudgetSubscription +func (tr *BudgetSubscription) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BudgetSubscription using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BudgetSubscription) LateInitialize(attrs []byte) (bool, error) { + params := &BudgetSubscriptionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BudgetSubscription) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/consumption/v1beta2/zz_budgetsubscription_types.go b/apis/consumption/v1beta2/zz_budgetsubscription_types.go new file mode 100755 index 000000000..6b499881a --- /dev/null +++ b/apis/consumption/v1beta2/zz_budgetsubscription_types.go @@ -0,0 +1,512 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BudgetSubscriptionFilterDimensionInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type BudgetSubscriptionFilterDimensionObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type BudgetSubscriptionFilterDimensionParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type BudgetSubscriptionFilterInitParameters struct { + + // One or more dimension blocks as defined below to filter the budget on. + Dimension []BudgetSubscriptionFilterDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + Not *BudgetSubscriptionFilterNotInitParameters `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + Tag []BudgetSubscriptionFilterTagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetSubscriptionFilterNotDimensionInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type BudgetSubscriptionFilterNotDimensionObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type BudgetSubscriptionFilterNotDimensionParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type BudgetSubscriptionFilterNotInitParameters struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + Dimension *BudgetSubscriptionFilterNotDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + Tag *FilterNotTagInitParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetSubscriptionFilterNotObservation struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + Dimension *BudgetSubscriptionFilterNotDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + Tag *FilterNotTagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetSubscriptionFilterNotParameters struct { + + // One dimension block as defined below to filter the budget on. Conflicts with tag. + // +kubebuilder:validation:Optional + Dimension *BudgetSubscriptionFilterNotDimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One tag block as defined below to filter the budget on. Conflicts with dimension. + // +kubebuilder:validation:Optional + Tag *FilterNotTagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetSubscriptionFilterObservation struct { + + // One or more dimension blocks as defined below to filter the budget on. + Dimension []BudgetSubscriptionFilterDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + Not *BudgetSubscriptionFilterNotObservation `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + Tag []BudgetSubscriptionFilterTagObservation `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetSubscriptionFilterParameters struct { + + // One or more dimension blocks as defined below to filter the budget on. + // +kubebuilder:validation:Optional + Dimension []BudgetSubscriptionFilterDimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A not block as defined below to filter the budget on. This is deprecated as the API no longer supports it and will be removed in version 4.0 of the provider. + // +kubebuilder:validation:Optional + Not *BudgetSubscriptionFilterNotParameters `json:"not,omitempty" tf:"not,omitempty"` + + // One or more tag blocks as defined below to filter the budget on. + // +kubebuilder:validation:Optional + Tag []BudgetSubscriptionFilterTagParameters `json:"tag,omitempty" tf:"tag,omitempty"` +} + +type BudgetSubscriptionFilterTagInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type BudgetSubscriptionFilterTagObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type BudgetSubscriptionFilterTagParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type BudgetSubscriptionInitParameters struct { + + // The total amount of cost to track with the budget. + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Subscription Consumption Budget. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + Filter *BudgetSubscriptionFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // One or more notification blocks as defined below. + Notification []BudgetSubscriptionNotificationInitParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // The ID of the Subscription for which to create a Consumption Budget. Changing this forces a new resource to be created. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + TimePeriod *BudgetSubscriptionTimePeriodInitParameters `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type BudgetSubscriptionNotificationInitParameters struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + ContactEmails []*string `json:"contactEmails,omitempty" tf:"contact_emails,omitempty"` + + // Specifies a list of Action Group IDs to send the budget notification to when the threshold is exceeded. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + ContactGroups []*string `json:"contactGroups,omitempty" tf:"contact_groups,omitempty"` + + // References to MonitorActionGroup in insights to populate contactGroups. + // +kubebuilder:validation:Optional + ContactGroupsRefs []v1.Reference `json:"contactGroupsRefs,omitempty" tf:"-"` + + // Selector for a list of MonitorActionGroup in insights to populate contactGroups. + // +kubebuilder:validation:Optional + ContactGroupsSelector *v1.Selector `json:"contactGroupsSelector,omitempty" tf:"-"` + + // Specifies a list of contact roles to send the budget notification to when the threshold is exceeded. + ContactRoles []*string `json:"contactRoles,omitempty" tf:"contact_roles,omitempty"` + + // Should the notification be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type BudgetSubscriptionNotificationObservation struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + ContactEmails []*string `json:"contactEmails,omitempty" tf:"contact_emails,omitempty"` + + // Specifies a list of Action Group IDs to send the budget notification to when the threshold is exceeded. + ContactGroups []*string `json:"contactGroups,omitempty" tf:"contact_groups,omitempty"` + + // Specifies a list of contact roles to send the budget notification to when the threshold is exceeded. + ContactRoles []*string `json:"contactRoles,omitempty" tf:"contact_roles,omitempty"` + + // Should the notification be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type BudgetSubscriptionNotificationParameters struct { + + // Specifies a list of email addresses to send the budget notification to when the threshold is exceeded. + // +kubebuilder:validation:Optional + ContactEmails []*string `json:"contactEmails,omitempty" tf:"contact_emails,omitempty"` + + // Specifies a list of Action Group IDs to send the budget notification to when the threshold is exceeded. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ContactGroups []*string `json:"contactGroups,omitempty" tf:"contact_groups,omitempty"` + + // References to MonitorActionGroup in insights to populate contactGroups. + // +kubebuilder:validation:Optional + ContactGroupsRefs []v1.Reference `json:"contactGroupsRefs,omitempty" tf:"-"` + + // Selector for a list of MonitorActionGroup in insights to populate contactGroups. + // +kubebuilder:validation:Optional + ContactGroupsSelector *v1.Selector `json:"contactGroupsSelector,omitempty" tf:"-"` + + // Specifies a list of contact roles to send the budget notification to when the threshold is exceeded. + // +kubebuilder:validation:Optional + ContactRoles []*string `json:"contactRoles,omitempty" tf:"contact_roles,omitempty"` + + // Should the notification be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The comparison operator for the notification. Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` + + // The type of threshold for the notification. This determines whether the notification is triggered by forecasted costs or actual costs. The allowed values are Actual and Forecasted. Default is Actual. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ThresholdType *string `json:"thresholdType,omitempty" tf:"threshold_type,omitempty"` +} + +type BudgetSubscriptionObservation struct { + + // The total amount of cost to track with the budget. + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Subscription Consumption Budget. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + Filter *BudgetSubscriptionFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // The ID of the Subscription Consumption Budget. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more notification blocks as defined below. + Notification []BudgetSubscriptionNotificationObservation `json:"notification,omitempty" tf:"notification,omitempty"` + + // The ID of the Subscription for which to create a Consumption Budget. Changing this forces a new resource to be created. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + TimePeriod *BudgetSubscriptionTimePeriodObservation `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type BudgetSubscriptionParameters struct { + + // The total amount of cost to track with the budget. + // +kubebuilder:validation:Optional + Amount *float64 `json:"amount,omitempty" tf:"amount,omitempty"` + + // The ETag of the Subscription Consumption Budget. + // +kubebuilder:validation:Optional + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + // A filter block as defined below. + // +kubebuilder:validation:Optional + Filter *BudgetSubscriptionFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // One or more notification blocks as defined below. + // +kubebuilder:validation:Optional + Notification []BudgetSubscriptionNotificationParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // The ID of the Subscription for which to create a Consumption Budget. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The time covered by a budget. Tracking of the amount will be reset based on the time grain. Must be one of BillingAnnual, BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. Defaults to Monthly. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // A time_period block as defined below. + // +kubebuilder:validation:Optional + TimePeriod *BudgetSubscriptionTimePeriodParameters `json:"timePeriod,omitempty" tf:"time_period,omitempty"` +} + +type BudgetSubscriptionTimePeriodInitParameters struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new Subscription Consumption Budget to be created. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` +} + +type BudgetSubscriptionTimePeriodObservation struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new Subscription Consumption Budget to be created. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` +} + +type BudgetSubscriptionTimePeriodParameters struct { + + // The end date for the budget. If not set this will be 10 years after the start date. + // +kubebuilder:validation:Optional + EndDate *string `json:"endDate,omitempty" tf:"end_date,omitempty"` + + // The start date for the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than twelve months. Past start date should be selected within the timegrain period. Changing this forces a new Subscription Consumption Budget to be created. + // +kubebuilder:validation:Optional + StartDate *string `json:"startDate" tf:"start_date,omitempty"` +} + +type FilterNotTagInitParameters struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterNotTagObservation struct { + + // The name of the tag to use for the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type FilterNotTagParameters struct { + + // The name of the tag to use for the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The operator to use for comparison. The allowed values are In. Defaults to In. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies a list of values for the tag. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +// BudgetSubscriptionSpec defines the desired state of BudgetSubscription +type BudgetSubscriptionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BudgetSubscriptionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BudgetSubscriptionInitParameters `json:"initProvider,omitempty"` +} + +// BudgetSubscriptionStatus defines the observed state of BudgetSubscription. +type BudgetSubscriptionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BudgetSubscriptionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BudgetSubscription is the Schema for the BudgetSubscriptions API. Manages a Subscription Consumption Budget. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BudgetSubscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.amount) || (has(self.initProvider) && has(self.initProvider.amount))",message="spec.forProvider.amount is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.notification) || (has(self.initProvider) && has(self.initProvider.notification))",message="spec.forProvider.notification is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subscriptionId) || (has(self.initProvider) && has(self.initProvider.subscriptionId))",message="spec.forProvider.subscriptionId is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timePeriod) || (has(self.initProvider) && has(self.initProvider.timePeriod))",message="spec.forProvider.timePeriod is a required parameter" + Spec BudgetSubscriptionSpec `json:"spec"` + Status BudgetSubscriptionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BudgetSubscriptionList contains a list of BudgetSubscriptions +type BudgetSubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BudgetSubscription `json:"items"` +} + +// Repository type metadata. +var ( + BudgetSubscription_Kind = "BudgetSubscription" + BudgetSubscription_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BudgetSubscription_Kind}.String() + BudgetSubscription_KindAPIVersion = BudgetSubscription_Kind + "." + CRDGroupVersion.String() + BudgetSubscription_GroupVersionKind = CRDGroupVersion.WithKind(BudgetSubscription_Kind) +) + +func init() { + SchemeBuilder.Register(&BudgetSubscription{}, &BudgetSubscriptionList{}) +} diff --git a/apis/consumption/v1beta1/zz_generated.conversion_hubs.go b/apis/consumption/v1beta2/zz_generated.conversion_hubs.go similarity index 96% rename from apis/consumption/v1beta1/zz_generated.conversion_hubs.go rename to apis/consumption/v1beta2/zz_generated.conversion_hubs.go index 65cb98739..0281e9c97 100755 --- a/apis/consumption/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/consumption/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *BudgetManagementGroup) Hub() {} diff --git a/apis/consumption/v1beta2/zz_generated.deepcopy.go b/apis/consumption/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..e4cf75baf --- /dev/null +++ b/apis/consumption/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3471 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetManagementGroup) DeepCopyInto(out *BudgetManagementGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetManagementGroup. +func (in *BudgetManagementGroup) DeepCopy() *BudgetManagementGroup { + if in == nil { + return nil + } + out := new(BudgetManagementGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetManagementGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetManagementGroupInitParameters) DeepCopyInto(out *BudgetManagementGroupInitParameters) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagementGroupID != nil { + in, out := &in.ManagementGroupID, &out.ManagementGroupID + *out = new(string) + **out = **in + } + if in.ManagementGroupIDRef != nil { + in, out := &in.ManagementGroupIDRef, &out.ManagementGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagementGroupIDSelector != nil { + in, out := &in.ManagementGroupIDSelector, &out.ManagementGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]NotificationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(TimePeriodInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetManagementGroupInitParameters. +func (in *BudgetManagementGroupInitParameters) DeepCopy() *BudgetManagementGroupInitParameters { + if in == nil { + return nil + } + out := new(BudgetManagementGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetManagementGroupList) DeepCopyInto(out *BudgetManagementGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BudgetManagementGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetManagementGroupList. +func (in *BudgetManagementGroupList) DeepCopy() *BudgetManagementGroupList { + if in == nil { + return nil + } + out := new(BudgetManagementGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetManagementGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetManagementGroupObservation) DeepCopyInto(out *BudgetManagementGroupObservation) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ManagementGroupID != nil { + in, out := &in.ManagementGroupID, &out.ManagementGroupID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]NotificationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(TimePeriodObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetManagementGroupObservation. +func (in *BudgetManagementGroupObservation) DeepCopy() *BudgetManagementGroupObservation { + if in == nil { + return nil + } + out := new(BudgetManagementGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetManagementGroupParameters) DeepCopyInto(out *BudgetManagementGroupParameters) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagementGroupID != nil { + in, out := &in.ManagementGroupID, &out.ManagementGroupID + *out = new(string) + **out = **in + } + if in.ManagementGroupIDRef != nil { + in, out := &in.ManagementGroupIDRef, &out.ManagementGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagementGroupIDSelector != nil { + in, out := &in.ManagementGroupIDSelector, &out.ManagementGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]NotificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(TimePeriodParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetManagementGroupParameters. +func (in *BudgetManagementGroupParameters) DeepCopy() *BudgetManagementGroupParameters { + if in == nil { + return nil + } + out := new(BudgetManagementGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetManagementGroupSpec) DeepCopyInto(out *BudgetManagementGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetManagementGroupSpec. +func (in *BudgetManagementGroupSpec) DeepCopy() *BudgetManagementGroupSpec { + if in == nil { + return nil + } + out := new(BudgetManagementGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetManagementGroupStatus) DeepCopyInto(out *BudgetManagementGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetManagementGroupStatus. +func (in *BudgetManagementGroupStatus) DeepCopy() *BudgetManagementGroupStatus { + if in == nil { + return nil + } + out := new(BudgetManagementGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroup) DeepCopyInto(out *BudgetResourceGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroup. +func (in *BudgetResourceGroup) DeepCopy() *BudgetResourceGroup { + if in == nil { + return nil + } + out := new(BudgetResourceGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetResourceGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupFilterInitParameters) DeepCopyInto(out *BudgetResourceGroupFilterInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]FilterDimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(FilterNotInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]BudgetResourceGroupFilterTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupFilterInitParameters. +func (in *BudgetResourceGroupFilterInitParameters) DeepCopy() *BudgetResourceGroupFilterInitParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupFilterObservation) DeepCopyInto(out *BudgetResourceGroupFilterObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]FilterDimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(FilterNotObservation) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]BudgetResourceGroupFilterTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupFilterObservation. +func (in *BudgetResourceGroupFilterObservation) DeepCopy() *BudgetResourceGroupFilterObservation { + if in == nil { + return nil + } + out := new(BudgetResourceGroupFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupFilterParameters) DeepCopyInto(out *BudgetResourceGroupFilterParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]FilterDimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(FilterNotParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]BudgetResourceGroupFilterTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupFilterParameters. +func (in *BudgetResourceGroupFilterParameters) DeepCopy() *BudgetResourceGroupFilterParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupFilterTagInitParameters) DeepCopyInto(out *BudgetResourceGroupFilterTagInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupFilterTagInitParameters. +func (in *BudgetResourceGroupFilterTagInitParameters) DeepCopy() *BudgetResourceGroupFilterTagInitParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupFilterTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupFilterTagObservation) DeepCopyInto(out *BudgetResourceGroupFilterTagObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupFilterTagObservation. +func (in *BudgetResourceGroupFilterTagObservation) DeepCopy() *BudgetResourceGroupFilterTagObservation { + if in == nil { + return nil + } + out := new(BudgetResourceGroupFilterTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupFilterTagParameters) DeepCopyInto(out *BudgetResourceGroupFilterTagParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupFilterTagParameters. +func (in *BudgetResourceGroupFilterTagParameters) DeepCopy() *BudgetResourceGroupFilterTagParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupFilterTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupInitParameters) DeepCopyInto(out *BudgetResourceGroupInitParameters) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BudgetResourceGroupFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]BudgetResourceGroupNotificationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupIDRef != nil { + in, out := &in.ResourceGroupIDRef, &out.ResourceGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupIDSelector != nil { + in, out := &in.ResourceGroupIDSelector, &out.ResourceGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(BudgetResourceGroupTimePeriodInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupInitParameters. +func (in *BudgetResourceGroupInitParameters) DeepCopy() *BudgetResourceGroupInitParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupList) DeepCopyInto(out *BudgetResourceGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BudgetResourceGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupList. +func (in *BudgetResourceGroupList) DeepCopy() *BudgetResourceGroupList { + if in == nil { + return nil + } + out := new(BudgetResourceGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetResourceGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupNotificationInitParameters) DeepCopyInto(out *BudgetResourceGroupNotificationInitParameters) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactGroups != nil { + in, out := &in.ContactGroups, &out.ContactGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactRoles != nil { + in, out := &in.ContactRoles, &out.ContactRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupNotificationInitParameters. +func (in *BudgetResourceGroupNotificationInitParameters) DeepCopy() *BudgetResourceGroupNotificationInitParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupNotificationObservation) DeepCopyInto(out *BudgetResourceGroupNotificationObservation) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactGroups != nil { + in, out := &in.ContactGroups, &out.ContactGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactRoles != nil { + in, out := &in.ContactRoles, &out.ContactRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupNotificationObservation. +func (in *BudgetResourceGroupNotificationObservation) DeepCopy() *BudgetResourceGroupNotificationObservation { + if in == nil { + return nil + } + out := new(BudgetResourceGroupNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupNotificationParameters) DeepCopyInto(out *BudgetResourceGroupNotificationParameters) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactGroups != nil { + in, out := &in.ContactGroups, &out.ContactGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactRoles != nil { + in, out := &in.ContactRoles, &out.ContactRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupNotificationParameters. +func (in *BudgetResourceGroupNotificationParameters) DeepCopy() *BudgetResourceGroupNotificationParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupObservation) DeepCopyInto(out *BudgetResourceGroupObservation) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BudgetResourceGroupFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]BudgetResourceGroupNotificationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(BudgetResourceGroupTimePeriodObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupObservation. +func (in *BudgetResourceGroupObservation) DeepCopy() *BudgetResourceGroupObservation { + if in == nil { + return nil + } + out := new(BudgetResourceGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupParameters) DeepCopyInto(out *BudgetResourceGroupParameters) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BudgetResourceGroupFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]BudgetResourceGroupNotificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupIDRef != nil { + in, out := &in.ResourceGroupIDRef, &out.ResourceGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupIDSelector != nil { + in, out := &in.ResourceGroupIDSelector, &out.ResourceGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(BudgetResourceGroupTimePeriodParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupParameters. +func (in *BudgetResourceGroupParameters) DeepCopy() *BudgetResourceGroupParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupSpec) DeepCopyInto(out *BudgetResourceGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupSpec. +func (in *BudgetResourceGroupSpec) DeepCopy() *BudgetResourceGroupSpec { + if in == nil { + return nil + } + out := new(BudgetResourceGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupStatus) DeepCopyInto(out *BudgetResourceGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupStatus. +func (in *BudgetResourceGroupStatus) DeepCopy() *BudgetResourceGroupStatus { + if in == nil { + return nil + } + out := new(BudgetResourceGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupTimePeriodInitParameters) DeepCopyInto(out *BudgetResourceGroupTimePeriodInitParameters) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupTimePeriodInitParameters. +func (in *BudgetResourceGroupTimePeriodInitParameters) DeepCopy() *BudgetResourceGroupTimePeriodInitParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupTimePeriodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupTimePeriodObservation) DeepCopyInto(out *BudgetResourceGroupTimePeriodObservation) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupTimePeriodObservation. +func (in *BudgetResourceGroupTimePeriodObservation) DeepCopy() *BudgetResourceGroupTimePeriodObservation { + if in == nil { + return nil + } + out := new(BudgetResourceGroupTimePeriodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetResourceGroupTimePeriodParameters) DeepCopyInto(out *BudgetResourceGroupTimePeriodParameters) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetResourceGroupTimePeriodParameters. +func (in *BudgetResourceGroupTimePeriodParameters) DeepCopy() *BudgetResourceGroupTimePeriodParameters { + if in == nil { + return nil + } + out := new(BudgetResourceGroupTimePeriodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscription) DeepCopyInto(out *BudgetSubscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscription. +func (in *BudgetSubscription) DeepCopy() *BudgetSubscription { + if in == nil { + return nil + } + out := new(BudgetSubscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetSubscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterDimensionInitParameters) DeepCopyInto(out *BudgetSubscriptionFilterDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterDimensionInitParameters. +func (in *BudgetSubscriptionFilterDimensionInitParameters) DeepCopy() *BudgetSubscriptionFilterDimensionInitParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterDimensionObservation) DeepCopyInto(out *BudgetSubscriptionFilterDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterDimensionObservation. +func (in *BudgetSubscriptionFilterDimensionObservation) DeepCopy() *BudgetSubscriptionFilterDimensionObservation { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterDimensionParameters) DeepCopyInto(out *BudgetSubscriptionFilterDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterDimensionParameters. +func (in *BudgetSubscriptionFilterDimensionParameters) DeepCopy() *BudgetSubscriptionFilterDimensionParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterInitParameters) DeepCopyInto(out *BudgetSubscriptionFilterInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]BudgetSubscriptionFilterDimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(BudgetSubscriptionFilterNotInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]BudgetSubscriptionFilterTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterInitParameters. +func (in *BudgetSubscriptionFilterInitParameters) DeepCopy() *BudgetSubscriptionFilterInitParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterNotDimensionInitParameters) DeepCopyInto(out *BudgetSubscriptionFilterNotDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterNotDimensionInitParameters. +func (in *BudgetSubscriptionFilterNotDimensionInitParameters) DeepCopy() *BudgetSubscriptionFilterNotDimensionInitParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterNotDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterNotDimensionObservation) DeepCopyInto(out *BudgetSubscriptionFilterNotDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterNotDimensionObservation. +func (in *BudgetSubscriptionFilterNotDimensionObservation) DeepCopy() *BudgetSubscriptionFilterNotDimensionObservation { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterNotDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterNotDimensionParameters) DeepCopyInto(out *BudgetSubscriptionFilterNotDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterNotDimensionParameters. +func (in *BudgetSubscriptionFilterNotDimensionParameters) DeepCopy() *BudgetSubscriptionFilterNotDimensionParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterNotDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterNotInitParameters) DeepCopyInto(out *BudgetSubscriptionFilterNotInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(BudgetSubscriptionFilterNotDimensionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(FilterNotTagInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterNotInitParameters. +func (in *BudgetSubscriptionFilterNotInitParameters) DeepCopy() *BudgetSubscriptionFilterNotInitParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterNotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterNotObservation) DeepCopyInto(out *BudgetSubscriptionFilterNotObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(BudgetSubscriptionFilterNotDimensionObservation) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(FilterNotTagObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterNotObservation. +func (in *BudgetSubscriptionFilterNotObservation) DeepCopy() *BudgetSubscriptionFilterNotObservation { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterNotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterNotParameters) DeepCopyInto(out *BudgetSubscriptionFilterNotParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(BudgetSubscriptionFilterNotDimensionParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(FilterNotTagParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterNotParameters. +func (in *BudgetSubscriptionFilterNotParameters) DeepCopy() *BudgetSubscriptionFilterNotParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterNotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterObservation) DeepCopyInto(out *BudgetSubscriptionFilterObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]BudgetSubscriptionFilterDimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(BudgetSubscriptionFilterNotObservation) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]BudgetSubscriptionFilterTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterObservation. +func (in *BudgetSubscriptionFilterObservation) DeepCopy() *BudgetSubscriptionFilterObservation { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterParameters) DeepCopyInto(out *BudgetSubscriptionFilterParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]BudgetSubscriptionFilterDimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(BudgetSubscriptionFilterNotParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]BudgetSubscriptionFilterTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterParameters. +func (in *BudgetSubscriptionFilterParameters) DeepCopy() *BudgetSubscriptionFilterParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterTagInitParameters) DeepCopyInto(out *BudgetSubscriptionFilterTagInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterTagInitParameters. +func (in *BudgetSubscriptionFilterTagInitParameters) DeepCopy() *BudgetSubscriptionFilterTagInitParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterTagObservation) DeepCopyInto(out *BudgetSubscriptionFilterTagObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterTagObservation. +func (in *BudgetSubscriptionFilterTagObservation) DeepCopy() *BudgetSubscriptionFilterTagObservation { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionFilterTagParameters) DeepCopyInto(out *BudgetSubscriptionFilterTagParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionFilterTagParameters. +func (in *BudgetSubscriptionFilterTagParameters) DeepCopy() *BudgetSubscriptionFilterTagParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionFilterTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionInitParameters) DeepCopyInto(out *BudgetSubscriptionInitParameters) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BudgetSubscriptionFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]BudgetSubscriptionNotificationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(BudgetSubscriptionTimePeriodInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionInitParameters. +func (in *BudgetSubscriptionInitParameters) DeepCopy() *BudgetSubscriptionInitParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionList) DeepCopyInto(out *BudgetSubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BudgetSubscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionList. +func (in *BudgetSubscriptionList) DeepCopy() *BudgetSubscriptionList { + if in == nil { + return nil + } + out := new(BudgetSubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BudgetSubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionNotificationInitParameters) DeepCopyInto(out *BudgetSubscriptionNotificationInitParameters) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactGroups != nil { + in, out := &in.ContactGroups, &out.ContactGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactGroupsRefs != nil { + in, out := &in.ContactGroupsRefs, &out.ContactGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContactGroupsSelector != nil { + in, out := &in.ContactGroupsSelector, &out.ContactGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ContactRoles != nil { + in, out := &in.ContactRoles, &out.ContactRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionNotificationInitParameters. +func (in *BudgetSubscriptionNotificationInitParameters) DeepCopy() *BudgetSubscriptionNotificationInitParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionNotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionNotificationObservation) DeepCopyInto(out *BudgetSubscriptionNotificationObservation) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactGroups != nil { + in, out := &in.ContactGroups, &out.ContactGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactRoles != nil { + in, out := &in.ContactRoles, &out.ContactRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionNotificationObservation. +func (in *BudgetSubscriptionNotificationObservation) DeepCopy() *BudgetSubscriptionNotificationObservation { + if in == nil { + return nil + } + out := new(BudgetSubscriptionNotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionNotificationParameters) DeepCopyInto(out *BudgetSubscriptionNotificationParameters) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactGroups != nil { + in, out := &in.ContactGroups, &out.ContactGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContactGroupsRefs != nil { + in, out := &in.ContactGroupsRefs, &out.ContactGroupsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContactGroupsSelector != nil { + in, out := &in.ContactGroupsSelector, &out.ContactGroupsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ContactRoles != nil { + in, out := &in.ContactRoles, &out.ContactRoles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionNotificationParameters. +func (in *BudgetSubscriptionNotificationParameters) DeepCopy() *BudgetSubscriptionNotificationParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionNotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionObservation) DeepCopyInto(out *BudgetSubscriptionObservation) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BudgetSubscriptionFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]BudgetSubscriptionNotificationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(BudgetSubscriptionTimePeriodObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionObservation. +func (in *BudgetSubscriptionObservation) DeepCopy() *BudgetSubscriptionObservation { + if in == nil { + return nil + } + out := new(BudgetSubscriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionParameters) DeepCopyInto(out *BudgetSubscriptionParameters) { + *out = *in + if in.Amount != nil { + in, out := &in.Amount, &out.Amount + *out = new(float64) + **out = **in + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(BudgetSubscriptionFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = make([]BudgetSubscriptionNotificationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimePeriod != nil { + in, out := &in.TimePeriod, &out.TimePeriod + *out = new(BudgetSubscriptionTimePeriodParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionParameters. +func (in *BudgetSubscriptionParameters) DeepCopy() *BudgetSubscriptionParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionSpec) DeepCopyInto(out *BudgetSubscriptionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionSpec. +func (in *BudgetSubscriptionSpec) DeepCopy() *BudgetSubscriptionSpec { + if in == nil { + return nil + } + out := new(BudgetSubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionStatus) DeepCopyInto(out *BudgetSubscriptionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionStatus. +func (in *BudgetSubscriptionStatus) DeepCopy() *BudgetSubscriptionStatus { + if in == nil { + return nil + } + out := new(BudgetSubscriptionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionTimePeriodInitParameters) DeepCopyInto(out *BudgetSubscriptionTimePeriodInitParameters) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionTimePeriodInitParameters. +func (in *BudgetSubscriptionTimePeriodInitParameters) DeepCopy() *BudgetSubscriptionTimePeriodInitParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionTimePeriodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionTimePeriodObservation) DeepCopyInto(out *BudgetSubscriptionTimePeriodObservation) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionTimePeriodObservation. +func (in *BudgetSubscriptionTimePeriodObservation) DeepCopy() *BudgetSubscriptionTimePeriodObservation { + if in == nil { + return nil + } + out := new(BudgetSubscriptionTimePeriodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BudgetSubscriptionTimePeriodParameters) DeepCopyInto(out *BudgetSubscriptionTimePeriodParameters) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BudgetSubscriptionTimePeriodParameters. +func (in *BudgetSubscriptionTimePeriodParameters) DeepCopy() *BudgetSubscriptionTimePeriodParameters { + if in == nil { + return nil + } + out := new(BudgetSubscriptionTimePeriodParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionInitParameters) DeepCopyInto(out *DimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionInitParameters. +func (in *DimensionInitParameters) DeepCopy() *DimensionInitParameters { + if in == nil { + return nil + } + out := new(DimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionObservation) DeepCopyInto(out *DimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionObservation. +func (in *DimensionObservation) DeepCopy() *DimensionObservation { + if in == nil { + return nil + } + out := new(DimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionParameters) DeepCopyInto(out *DimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionParameters. +func (in *DimensionParameters) DeepCopy() *DimensionParameters { + if in == nil { + return nil + } + out := new(DimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterDimensionInitParameters) DeepCopyInto(out *FilterDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterDimensionInitParameters. +func (in *FilterDimensionInitParameters) DeepCopy() *FilterDimensionInitParameters { + if in == nil { + return nil + } + out := new(FilterDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterDimensionObservation) DeepCopyInto(out *FilterDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterDimensionObservation. +func (in *FilterDimensionObservation) DeepCopy() *FilterDimensionObservation { + if in == nil { + return nil + } + out := new(FilterDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterDimensionParameters) DeepCopyInto(out *FilterDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterDimensionParameters. +func (in *FilterDimensionParameters) DeepCopy() *FilterDimensionParameters { + if in == nil { + return nil + } + out := new(FilterDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(NotInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]FilterTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotDimensionInitParameters) DeepCopyInto(out *FilterNotDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotDimensionInitParameters. +func (in *FilterNotDimensionInitParameters) DeepCopy() *FilterNotDimensionInitParameters { + if in == nil { + return nil + } + out := new(FilterNotDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotDimensionObservation) DeepCopyInto(out *FilterNotDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotDimensionObservation. +func (in *FilterNotDimensionObservation) DeepCopy() *FilterNotDimensionObservation { + if in == nil { + return nil + } + out := new(FilterNotDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotDimensionParameters) DeepCopyInto(out *FilterNotDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotDimensionParameters. +func (in *FilterNotDimensionParameters) DeepCopy() *FilterNotDimensionParameters { + if in == nil { + return nil + } + out := new(FilterNotDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotInitParameters) DeepCopyInto(out *FilterNotInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(FilterNotDimensionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(NotTagInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotInitParameters. +func (in *FilterNotInitParameters) DeepCopy() *FilterNotInitParameters { + if in == nil { + return nil + } + out := new(FilterNotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotObservation) DeepCopyInto(out *FilterNotObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(FilterNotDimensionObservation) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(NotTagObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotObservation. +func (in *FilterNotObservation) DeepCopy() *FilterNotObservation { + if in == nil { + return nil + } + out := new(FilterNotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotParameters) DeepCopyInto(out *FilterNotParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(FilterNotDimensionParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(NotTagParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotParameters. +func (in *FilterNotParameters) DeepCopy() *FilterNotParameters { + if in == nil { + return nil + } + out := new(FilterNotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotTagInitParameters) DeepCopyInto(out *FilterNotTagInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotTagInitParameters. +func (in *FilterNotTagInitParameters) DeepCopy() *FilterNotTagInitParameters { + if in == nil { + return nil + } + out := new(FilterNotTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotTagObservation) DeepCopyInto(out *FilterNotTagObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotTagObservation. +func (in *FilterNotTagObservation) DeepCopy() *FilterNotTagObservation { + if in == nil { + return nil + } + out := new(FilterNotTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterNotTagParameters) DeepCopyInto(out *FilterNotTagParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterNotTagParameters. +func (in *FilterNotTagParameters) DeepCopy() *FilterNotTagParameters { + if in == nil { + return nil + } + out := new(FilterNotTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(NotObservation) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]FilterTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Not != nil { + in, out := &in.Not, &out.Not + *out = new(NotParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = make([]FilterTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterTagInitParameters) DeepCopyInto(out *FilterTagInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterTagInitParameters. +func (in *FilterTagInitParameters) DeepCopy() *FilterTagInitParameters { + if in == nil { + return nil + } + out := new(FilterTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterTagObservation) DeepCopyInto(out *FilterTagObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterTagObservation. +func (in *FilterTagObservation) DeepCopy() *FilterTagObservation { + if in == nil { + return nil + } + out := new(FilterTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterTagParameters) DeepCopyInto(out *FilterTagParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterTagParameters. +func (in *FilterTagParameters) DeepCopy() *FilterTagParameters { + if in == nil { + return nil + } + out := new(FilterTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotDimensionInitParameters) DeepCopyInto(out *NotDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotDimensionInitParameters. +func (in *NotDimensionInitParameters) DeepCopy() *NotDimensionInitParameters { + if in == nil { + return nil + } + out := new(NotDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotDimensionObservation) DeepCopyInto(out *NotDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotDimensionObservation. +func (in *NotDimensionObservation) DeepCopy() *NotDimensionObservation { + if in == nil { + return nil + } + out := new(NotDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotDimensionParameters) DeepCopyInto(out *NotDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotDimensionParameters. +func (in *NotDimensionParameters) DeepCopy() *NotDimensionParameters { + if in == nil { + return nil + } + out := new(NotDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotInitParameters) DeepCopyInto(out *NotInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(NotDimensionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotInitParameters. +func (in *NotInitParameters) DeepCopy() *NotInitParameters { + if in == nil { + return nil + } + out := new(NotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotObservation) DeepCopyInto(out *NotObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(NotDimensionObservation) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotObservation. +func (in *NotObservation) DeepCopy() *NotObservation { + if in == nil { + return nil + } + out := new(NotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotParameters) DeepCopyInto(out *NotParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = new(NotDimensionParameters) + (*in).DeepCopyInto(*out) + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotParameters. +func (in *NotParameters) DeepCopy() *NotParameters { + if in == nil { + return nil + } + out := new(NotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotTagInitParameters) DeepCopyInto(out *NotTagInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotTagInitParameters. +func (in *NotTagInitParameters) DeepCopy() *NotTagInitParameters { + if in == nil { + return nil + } + out := new(NotTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotTagObservation) DeepCopyInto(out *NotTagObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotTagObservation. +func (in *NotTagObservation) DeepCopy() *NotTagObservation { + if in == nil { + return nil + } + out := new(NotTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotTagParameters) DeepCopyInto(out *NotTagParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotTagParameters. +func (in *NotTagParameters) DeepCopy() *NotTagParameters { + if in == nil { + return nil + } + out := new(NotTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationInitParameters) DeepCopyInto(out *NotificationInitParameters) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationInitParameters. +func (in *NotificationInitParameters) DeepCopy() *NotificationInitParameters { + if in == nil { + return nil + } + out := new(NotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationObservation) DeepCopyInto(out *NotificationObservation) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationObservation. +func (in *NotificationObservation) DeepCopy() *NotificationObservation { + if in == nil { + return nil + } + out := new(NotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationParameters) DeepCopyInto(out *NotificationParameters) { + *out = *in + if in.ContactEmails != nil { + in, out := &in.ContactEmails, &out.ContactEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.ThresholdType != nil { + in, out := &in.ThresholdType, &out.ThresholdType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationParameters. +func (in *NotificationParameters) DeepCopy() *NotificationParameters { + if in == nil { + return nil + } + out := new(NotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagInitParameters) DeepCopyInto(out *TagInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagInitParameters. +func (in *TagInitParameters) DeepCopy() *TagInitParameters { + if in == nil { + return nil + } + out := new(TagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagObservation) DeepCopyInto(out *TagObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagObservation. +func (in *TagObservation) DeepCopy() *TagObservation { + if in == nil { + return nil + } + out := new(TagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagParameters) DeepCopyInto(out *TagParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagParameters. +func (in *TagParameters) DeepCopy() *TagParameters { + if in == nil { + return nil + } + out := new(TagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimePeriodInitParameters) DeepCopyInto(out *TimePeriodInitParameters) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimePeriodInitParameters. +func (in *TimePeriodInitParameters) DeepCopy() *TimePeriodInitParameters { + if in == nil { + return nil + } + out := new(TimePeriodInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimePeriodObservation) DeepCopyInto(out *TimePeriodObservation) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimePeriodObservation. +func (in *TimePeriodObservation) DeepCopy() *TimePeriodObservation { + if in == nil { + return nil + } + out := new(TimePeriodObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimePeriodParameters) DeepCopyInto(out *TimePeriodParameters) { + *out = *in + if in.EndDate != nil { + in, out := &in.EndDate, &out.EndDate + *out = new(string) + **out = **in + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimePeriodParameters. +func (in *TimePeriodParameters) DeepCopy() *TimePeriodParameters { + if in == nil { + return nil + } + out := new(TimePeriodParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/consumption/v1beta2/zz_generated.managed.go b/apis/consumption/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..a21765fca --- /dev/null +++ b/apis/consumption/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BudgetManagementGroup. +func (mg *BudgetManagementGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BudgetSubscription. +func (mg *BudgetSubscription) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BudgetSubscription. +func (mg *BudgetSubscription) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BudgetSubscription. +func (mg *BudgetSubscription) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BudgetSubscription. +func (mg *BudgetSubscription) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BudgetSubscription. +func (mg *BudgetSubscription) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BudgetSubscription. +func (mg *BudgetSubscription) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BudgetSubscription. +func (mg *BudgetSubscription) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BudgetSubscription. +func (mg *BudgetSubscription) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BudgetSubscription. +func (mg *BudgetSubscription) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BudgetSubscription. +func (mg *BudgetSubscription) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BudgetSubscription. +func (mg *BudgetSubscription) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BudgetSubscription. +func (mg *BudgetSubscription) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/consumption/v1beta2/zz_generated.managedlist.go b/apis/consumption/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..763989414 --- /dev/null +++ b/apis/consumption/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this BudgetManagementGroupList. +func (l *BudgetManagementGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BudgetResourceGroupList. +func (l *BudgetResourceGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BudgetSubscriptionList. +func (l *BudgetSubscriptionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/consumption/v1beta2/zz_generated.resolvers.go b/apis/consumption/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..a1a10b696 --- /dev/null +++ b/apis/consumption/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,174 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *BudgetManagementGroup) ResolveReferences( // ResolveReferences of this BudgetManagementGroup. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("management.azure.upbound.io", "v1beta1", "ManagementGroup", "ManagementGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ManagementGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ManagementGroupIDRef, + Selector: mg.Spec.ForProvider.ManagementGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ManagementGroupID") + } + mg.Spec.ForProvider.ManagementGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ManagementGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("management.azure.upbound.io", "v1beta1", "ManagementGroup", "ManagementGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ManagementGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ManagementGroupIDRef, + Selector: mg.Spec.InitProvider.ManagementGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ManagementGroupID") + } + mg.Spec.InitProvider.ManagementGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ManagementGroupIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BudgetResourceGroup. +func (mg *BudgetResourceGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ResourceGroupIDRef, + Selector: mg.Spec.ForProvider.ResourceGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupID") + } + mg.Spec.ForProvider.ResourceGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ResourceGroupIDRef, + Selector: mg.Spec.InitProvider.ResourceGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupID") + } + mg.Spec.InitProvider.ResourceGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BudgetSubscription. +func (mg *BudgetSubscription) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Notification); i3++ { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Notification[i3].ContactGroups), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.Notification[i3].ContactGroupsRefs, + Selector: mg.Spec.ForProvider.Notification[i3].ContactGroupsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Notification[i3].ContactGroups") + } + mg.Spec.ForProvider.Notification[i3].ContactGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Notification[i3].ContactGroupsRefs = mrsp.ResolvedReferences + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Notification); i3++ { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Notification[i3].ContactGroups), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.Notification[i3].ContactGroupsRefs, + Selector: mg.Spec.InitProvider.Notification[i3].ContactGroupsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Notification[i3].ContactGroups") + } + mg.Spec.InitProvider.Notification[i3].ContactGroups = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Notification[i3].ContactGroupsRefs = mrsp.ResolvedReferences + + } + + return nil +} diff --git a/apis/consumption/v1beta2/zz_groupversion_info.go b/apis/consumption/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..9d887d407 --- /dev/null +++ b/apis/consumption/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=consumption.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "consumption.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/containerapp/v1beta1/zz_environment_types.go b/apis/containerapp/v1beta1/zz_environment_types.go index 2a6dbdbb1..76bd2ed89 100755 --- a/apis/containerapp/v1beta1/zz_environment_types.go +++ b/apis/containerapp/v1beta1/zz_environment_types.go @@ -30,7 +30,7 @@ type EnvironmentInitParameters struct { // The existing Subnet to use for the Container Apps Control Plane. Changing this forces a new resource to be created. // The existing Subnet to use for the Container Apps Control Plane. **NOTE:** The Subnet must have a `/21` or larger address space. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() InfrastructureSubnetID *string `json:"infrastructureSubnetId,omitempty" tf:"infrastructure_subnet_id,omitempty"` @@ -51,7 +51,7 @@ type EnvironmentInitParameters struct { // The ID for the Log Analytics Workspace to link this Container Apps Managed Environment to. Changing this forces a new resource to be created. // The ID for the Log Analytics Workspace to link this Container Apps Managed Environment to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` @@ -155,7 +155,7 @@ type EnvironmentParameters struct { // The existing Subnet to use for the Container Apps Control Plane. Changing this forces a new resource to be created. // The existing Subnet to use for the Container Apps Control Plane. **NOTE:** The Subnet must have a `/21` or larger address space. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional InfrastructureSubnetID *string `json:"infrastructureSubnetId,omitempty" tf:"infrastructure_subnet_id,omitempty"` @@ -179,7 +179,7 @@ type EnvironmentParameters struct { // The ID for the Log Analytics Workspace to link this Container Apps Managed Environment to. Changing this forces a new resource to be created. // The ID for the Log Analytics Workspace to link this Container Apps Managed Environment to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` diff --git a/apis/containerapp/v1beta1/zz_generated.conversion_hubs.go b/apis/containerapp/v1beta1/zz_generated.conversion_hubs.go index aab4f39fb..e7ab6a185 100755 --- a/apis/containerapp/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/containerapp/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *ContainerApp) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Environment) Hub() {} diff --git a/apis/containerapp/v1beta1/zz_generated.conversion_spokes.go b/apis/containerapp/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..0306987e4 --- /dev/null +++ b/apis/containerapp/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ContainerApp to the hub type. +func (tr *ContainerApp) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ContainerApp type. +func (tr *ContainerApp) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/containerapp/v1beta1/zz_generated.resolvers.go b/apis/containerapp/v1beta1/zz_generated.resolvers.go index 50338d568..99f280b59 100644 --- a/apis/containerapp/v1beta1/zz_generated.resolvers.go +++ b/apis/containerapp/v1beta1/zz_generated.resolvers.go @@ -115,7 +115,7 @@ func (mg *Environment) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.InfrastructureResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.InfrastructureResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -134,7 +134,7 @@ func (mg *Environment) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.InfrastructureSubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.InfrastructureSubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -191,7 +191,7 @@ func (mg *Environment) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.InitProvider.InfrastructureResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.InfrastructureResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -210,7 +210,7 @@ func (mg *Environment) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.InitProvider.InfrastructureSubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.InfrastructureSubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/containerapp/v1beta2/zz_containerapp_terraformed.go b/apis/containerapp/v1beta2/zz_containerapp_terraformed.go new file mode 100755 index 000000000..04291143f --- /dev/null +++ b/apis/containerapp/v1beta2/zz_containerapp_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ContainerApp +func (mg *ContainerApp) GetTerraformResourceType() string { + return "azurerm_container_app" +} + +// GetConnectionDetailsMapping for this ContainerApp +func (tr *ContainerApp) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "secret[*].name": "spec.forProvider.secret[*].nameSecretRef", "secret[*].value": "spec.forProvider.secret[*].valueSecretRef"} +} + +// GetObservation of this ContainerApp +func (tr *ContainerApp) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ContainerApp +func (tr *ContainerApp) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ContainerApp +func (tr *ContainerApp) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ContainerApp +func (tr *ContainerApp) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ContainerApp +func (tr *ContainerApp) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ContainerApp +func (tr *ContainerApp) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ContainerApp +func (tr *ContainerApp) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ContainerApp using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ContainerApp) LateInitialize(attrs []byte) (bool, error) { + params := &ContainerAppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ContainerApp) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/containerapp/v1beta2/zz_containerapp_types.go b/apis/containerapp/v1beta2/zz_containerapp_types.go new file mode 100755 index 000000000..c0a9f52ce --- /dev/null +++ b/apis/containerapp/v1beta2/zz_containerapp_types.go @@ -0,0 +1,2033 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationInitParameters struct { + + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type AuthenticationObservation struct { + + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type AuthenticationParameters struct { + + // The name of the secret that contains the value for this environment variable. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + // +kubebuilder:validation:Optional + TriggerParameter *string `json:"triggerParameter" tf:"trigger_parameter,omitempty"` +} + +type AzureQueueScaleRuleInitParameters struct { + + // Zero or more authentication blocks as defined below. + Authentication []AuthenticationInitParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the length of the queue to trigger scaling actions. + QueueLength *float64 `json:"queueLength,omitempty" tf:"queue_length,omitempty"` + + // The name of the Azure Queue + QueueName *string `json:"queueName,omitempty" tf:"queue_name,omitempty"` +} + +type AzureQueueScaleRuleObservation struct { + + // Zero or more authentication blocks as defined below. + Authentication []AuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the length of the queue to trigger scaling actions. + QueueLength *float64 `json:"queueLength,omitempty" tf:"queue_length,omitempty"` + + // The name of the Azure Queue + QueueName *string `json:"queueName,omitempty" tf:"queue_name,omitempty"` +} + +type AzureQueueScaleRuleParameters struct { + + // Zero or more authentication blocks as defined below. + // +kubebuilder:validation:Optional + Authentication []AuthenticationParameters `json:"authentication" tf:"authentication,omitempty"` + + // The name of the Volume to be mounted in the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the length of the queue to trigger scaling actions. + // +kubebuilder:validation:Optional + QueueLength *float64 `json:"queueLength" tf:"queue_length,omitempty"` + + // The name of the Azure Queue + // +kubebuilder:validation:Optional + QueueName *string `json:"queueName" tf:"queue_name,omitempty"` +} + +type ContainerAppInitParameters struct { + + // The ID of the Container App Environment within which this Container App should exist. Changing this forces a new resource to be created. + // The ID of the Container App Environment to host this Container App. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerapp/v1beta1.Environment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ContainerAppEnvironmentID *string `json:"containerAppEnvironmentId,omitempty" tf:"container_app_environment_id,omitempty"` + + // Reference to a Environment in containerapp to populate containerAppEnvironmentId. + // +kubebuilder:validation:Optional + ContainerAppEnvironmentIDRef *v1.Reference `json:"containerAppEnvironmentIdRef,omitempty" tf:"-"` + + // Selector for a Environment in containerapp to populate containerAppEnvironmentId. + // +kubebuilder:validation:Optional + ContainerAppEnvironmentIDSelector *v1.Selector `json:"containerAppEnvironmentIdSelector,omitempty" tf:"-"` + + // A dapr block as detailed below. + Dapr *DaprInitParameters `json:"dapr,omitempty" tf:"dapr,omitempty"` + + // An identity block as detailed below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An ingress block as detailed below. + Ingress *IngressInitParameters `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // A registry block as detailed below. + Registry []RegistryInitParameters `json:"registry,omitempty" tf:"registry,omitempty"` + + // The revisions operational mode for the Container App. Possible values include Single and Multiple. In Single mode, a single revision is in operation at any given time. In Multiple mode, more than one revision can be active at a time and can be configured with load distribution via the traffic_weight block in the ingress configuration. + RevisionMode *string `json:"revisionMode,omitempty" tf:"revision_mode,omitempty"` + + // One or more secret block as detailed below. + Secret []SecretInitParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // A mapping of tags to assign to the Container App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A template block as detailed below. + Template *TemplateInitParameters `json:"template,omitempty" tf:"template,omitempty"` + + // The name of the Workload Profile in the Container App Environment to place this Container App. + WorkloadProfileName *string `json:"workloadProfileName,omitempty" tf:"workload_profile_name,omitempty"` +} + +type ContainerAppObservation struct { + + // The ID of the Container App Environment within which this Container App should exist. Changing this forces a new resource to be created. + // The ID of the Container App Environment to host this Container App. + ContainerAppEnvironmentID *string `json:"containerAppEnvironmentId,omitempty" tf:"container_app_environment_id,omitempty"` + + // A dapr block as detailed below. + Dapr *DaprObservation `json:"dapr,omitempty" tf:"dapr,omitempty"` + + // The ID of the Container App. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as detailed below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // An ingress block as detailed below. + Ingress *IngressObservation `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // The FQDN of the Latest Revision of the Container App. + // The FQDN of the Latest Revision of the Container App. + LatestRevisionFqdn *string `json:"latestRevisionFqdn,omitempty" tf:"latest_revision_fqdn,omitempty"` + + // The name of the latest Container Revision. + // The name of the latest Container Revision. + LatestRevisionName *string `json:"latestRevisionName,omitempty" tf:"latest_revision_name,omitempty"` + + // The location this Container App is deployed in. This is the same as the Environment in which it is deployed. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of the Public IP Addresses which the Container App uses for outbound network access. + OutboundIPAddresses []*string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A registry block as detailed below. + Registry []RegistryObservation `json:"registry,omitempty" tf:"registry,omitempty"` + + // The name of the resource group in which the Container App Environment is to be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The revisions operational mode for the Container App. Possible values include Single and Multiple. In Single mode, a single revision is in operation at any given time. In Multiple mode, more than one revision can be active at a time and can be configured with load distribution via the traffic_weight block in the ingress configuration. + RevisionMode *string `json:"revisionMode,omitempty" tf:"revision_mode,omitempty"` + + // One or more secret block as detailed below. + Secret []SecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // A mapping of tags to assign to the Container App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A template block as detailed below. + Template *TemplateObservation `json:"template,omitempty" tf:"template,omitempty"` + + // The name of the Workload Profile in the Container App Environment to place this Container App. + WorkloadProfileName *string `json:"workloadProfileName,omitempty" tf:"workload_profile_name,omitempty"` +} + +type ContainerAppParameters struct { + + // The ID of the Container App Environment within which this Container App should exist. Changing this forces a new resource to be created. + // The ID of the Container App Environment to host this Container App. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerapp/v1beta1.Environment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ContainerAppEnvironmentID *string `json:"containerAppEnvironmentId,omitempty" tf:"container_app_environment_id,omitempty"` + + // Reference to a Environment in containerapp to populate containerAppEnvironmentId. + // +kubebuilder:validation:Optional + ContainerAppEnvironmentIDRef *v1.Reference `json:"containerAppEnvironmentIdRef,omitempty" tf:"-"` + + // Selector for a Environment in containerapp to populate containerAppEnvironmentId. + // +kubebuilder:validation:Optional + ContainerAppEnvironmentIDSelector *v1.Selector `json:"containerAppEnvironmentIdSelector,omitempty" tf:"-"` + + // A dapr block as detailed below. + // +kubebuilder:validation:Optional + Dapr *DaprParameters `json:"dapr,omitempty" tf:"dapr,omitempty"` + + // An identity block as detailed below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An ingress block as detailed below. + // +kubebuilder:validation:Optional + Ingress *IngressParameters `json:"ingress,omitempty" tf:"ingress,omitempty"` + + // A registry block as detailed below. + // +kubebuilder:validation:Optional + Registry []RegistryParameters `json:"registry,omitempty" tf:"registry,omitempty"` + + // The name of the resource group in which the Container App Environment is to be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The revisions operational mode for the Container App. Possible values include Single and Multiple. In Single mode, a single revision is in operation at any given time. In Multiple mode, more than one revision can be active at a time and can be configured with load distribution via the traffic_weight block in the ingress configuration. + // +kubebuilder:validation:Optional + RevisionMode *string `json:"revisionMode,omitempty" tf:"revision_mode,omitempty"` + + // One or more secret block as detailed below. + // +kubebuilder:validation:Optional + Secret []SecretParameters `json:"secret,omitempty" tf:"secret,omitempty"` + + // A mapping of tags to assign to the Container App. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A template block as detailed below. + // +kubebuilder:validation:Optional + Template *TemplateParameters `json:"template,omitempty" tf:"template,omitempty"` + + // The name of the Workload Profile in the Container App Environment to place this Container App. + // +kubebuilder:validation:Optional + WorkloadProfileName *string `json:"workloadProfileName,omitempty" tf:"workload_profile_name,omitempty"` +} + +type ContainerInitParameters struct { + + // A list of extra arguments to pass to the container. + // A list of args to pass to the container. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + // The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + CPU *float64 `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // One or more env blocks as detailed below. + Env []EnvInitParameters `json:"env,omitempty" tf:"env,omitempty"` + + // The image to use to create the container. + // The image to use to create the container. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // A liveness_probe block as detailed below. + LivenessProbe []LivenessProbeInitParameters `json:"livenessProbe,omitempty" tf:"liveness_probe,omitempty"` + + // The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + // The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` + + // The name of the Volume to be mounted in the container. + // The name of the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A readiness_probe block as detailed below. + ReadinessProbe []ReadinessProbeInitParameters `json:"readinessProbe,omitempty" tf:"readiness_probe,omitempty"` + + // A startup_probe block as detailed below. + StartupProbe []StartupProbeInitParameters `json:"startupProbe,omitempty" tf:"startup_probe,omitempty"` + + // A volume_mounts block as detailed below. + VolumeMounts []VolumeMountsInitParameters `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type ContainerObservation struct { + + // A list of extra arguments to pass to the container. + // A list of args to pass to the container. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + // The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + CPU *float64 `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // One or more env blocks as detailed below. + Env []EnvObservation `json:"env,omitempty" tf:"env,omitempty"` + + // The amount of ephemeral storage available to the Container App. + // The amount of ephemeral storage available to the Container App. + EphemeralStorage *string `json:"ephemeralStorage,omitempty" tf:"ephemeral_storage,omitempty"` + + // The image to use to create the container. + // The image to use to create the container. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // A liveness_probe block as detailed below. + LivenessProbe []LivenessProbeObservation `json:"livenessProbe,omitempty" tf:"liveness_probe,omitempty"` + + // The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + // The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` + + // The name of the Volume to be mounted in the container. + // The name of the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A readiness_probe block as detailed below. + ReadinessProbe []ReadinessProbeObservation `json:"readinessProbe,omitempty" tf:"readiness_probe,omitempty"` + + // A startup_probe block as detailed below. + StartupProbe []StartupProbeObservation `json:"startupProbe,omitempty" tf:"startup_probe,omitempty"` + + // A volume_mounts block as detailed below. + VolumeMounts []VolumeMountsObservation `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type ContainerParameters struct { + + // A list of extra arguments to pass to the container. + // A list of args to pass to the container. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + // The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + // +kubebuilder:validation:Optional + CPU *float64 `json:"cpu" tf:"cpu,omitempty"` + + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + // +kubebuilder:validation:Optional + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // One or more env blocks as detailed below. + // +kubebuilder:validation:Optional + Env []EnvParameters `json:"env,omitempty" tf:"env,omitempty"` + + // The image to use to create the container. + // The image to use to create the container. + // +kubebuilder:validation:Optional + Image *string `json:"image" tf:"image,omitempty"` + + // A liveness_probe block as detailed below. + // +kubebuilder:validation:Optional + LivenessProbe []LivenessProbeParameters `json:"livenessProbe,omitempty" tf:"liveness_probe,omitempty"` + + // The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + // The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + // +kubebuilder:validation:Optional + Memory *string `json:"memory" tf:"memory,omitempty"` + + // The name of the Volume to be mounted in the container. + // The name of the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A readiness_probe block as detailed below. + // +kubebuilder:validation:Optional + ReadinessProbe []ReadinessProbeParameters `json:"readinessProbe,omitempty" tf:"readiness_probe,omitempty"` + + // A startup_probe block as detailed below. + // +kubebuilder:validation:Optional + StartupProbe []StartupProbeParameters `json:"startupProbe,omitempty" tf:"startup_probe,omitempty"` + + // A volume_mounts block as detailed below. + // +kubebuilder:validation:Optional + VolumeMounts []VolumeMountsParameters `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type CustomDomainInitParameters struct { + + // The Binding type. Possible values include Disabled and SniEnabled. Defaults to Disabled. + // The Binding type. Possible values include `Disabled` and `SniEnabled`. Defaults to `Disabled` + CertificateBindingType *string `json:"certificateBindingType,omitempty" tf:"certificate_binding_type,omitempty"` + + // The ID of the Container App Environment Certificate. + CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` + + // The name of the Volume to be mounted in the container. + // The hostname of the Certificate. Must be the CN or a named SAN in the certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CustomDomainObservation struct { + + // The Binding type. Possible values include Disabled and SniEnabled. Defaults to Disabled. + // The Binding type. Possible values include `Disabled` and `SniEnabled`. Defaults to `Disabled` + CertificateBindingType *string `json:"certificateBindingType,omitempty" tf:"certificate_binding_type,omitempty"` + + // The ID of the Container App Environment Certificate. + CertificateID *string `json:"certificateId,omitempty" tf:"certificate_id,omitempty"` + + // The name of the Volume to be mounted in the container. + // The hostname of the Certificate. Must be the CN or a named SAN in the certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CustomDomainParameters struct { + + // The Binding type. Possible values include Disabled and SniEnabled. Defaults to Disabled. + // The Binding type. Possible values include `Disabled` and `SniEnabled`. Defaults to `Disabled` + // +kubebuilder:validation:Optional + CertificateBindingType *string `json:"certificateBindingType,omitempty" tf:"certificate_binding_type,omitempty"` + + // The ID of the Container App Environment Certificate. + // +kubebuilder:validation:Optional + CertificateID *string `json:"certificateId" tf:"certificate_id,omitempty"` + + // The name of the Volume to be mounted in the container. + // The hostname of the Certificate. Must be the CN or a named SAN in the certificate. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type CustomScaleRuleAuthenticationInitParameters struct { + + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type CustomScaleRuleAuthenticationObservation struct { + + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type CustomScaleRuleAuthenticationParameters struct { + + // The name of the secret that contains the value for this environment variable. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + // +kubebuilder:validation:Optional + TriggerParameter *string `json:"triggerParameter" tf:"trigger_parameter,omitempty"` +} + +type CustomScaleRuleInitParameters struct { + + // Zero or more authentication blocks as defined below. + Authentication []CustomScaleRuleAuthenticationInitParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The Custom rule type. Possible values include: activemq, artemis-queue, kafka, pulsar, aws-cloudwatch, aws-dynamodb, aws-dynamodb-streams, aws-kinesis-stream, aws-sqs-queue, azure-app-insights, azure-blob, azure-data-explorer, azure-eventhub, azure-log-analytics, azure-monitor, azure-pipelines, azure-servicebus, azure-queue, cassandra, cpu, cron, datadog, elasticsearch, external, external-push, gcp-stackdriver, gcp-storage, gcp-pubsub, graphite, http, huawei-cloudeye, ibmmq, influxdb, kubernetes-workload, liiklus, memory, metrics-api, mongodb, mssql, mysql, nats-jetstream, stan, tcp, new-relic, openstack-metric, openstack-swift, postgresql, predictkube, prometheus, rabbitmq, redis, redis-cluster, redis-sentinel, redis-streams, redis-cluster-streams, redis-sentinel-streams, selenium-grid,solace-event-queue, and github-runner. + CustomRuleType *string `json:"customRuleType,omitempty" tf:"custom_rule_type,omitempty"` + + // - A map of string key-value pairs to configure the Custom Scale Rule. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CustomScaleRuleObservation struct { + + // Zero or more authentication blocks as defined below. + Authentication []CustomScaleRuleAuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The Custom rule type. Possible values include: activemq, artemis-queue, kafka, pulsar, aws-cloudwatch, aws-dynamodb, aws-dynamodb-streams, aws-kinesis-stream, aws-sqs-queue, azure-app-insights, azure-blob, azure-data-explorer, azure-eventhub, azure-log-analytics, azure-monitor, azure-pipelines, azure-servicebus, azure-queue, cassandra, cpu, cron, datadog, elasticsearch, external, external-push, gcp-stackdriver, gcp-storage, gcp-pubsub, graphite, http, huawei-cloudeye, ibmmq, influxdb, kubernetes-workload, liiklus, memory, metrics-api, mongodb, mssql, mysql, nats-jetstream, stan, tcp, new-relic, openstack-metric, openstack-swift, postgresql, predictkube, prometheus, rabbitmq, redis, redis-cluster, redis-sentinel, redis-streams, redis-cluster-streams, redis-sentinel-streams, selenium-grid,solace-event-queue, and github-runner. + CustomRuleType *string `json:"customRuleType,omitempty" tf:"custom_rule_type,omitempty"` + + // - A map of string key-value pairs to configure the Custom Scale Rule. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CustomScaleRuleParameters struct { + + // Zero or more authentication blocks as defined below. + // +kubebuilder:validation:Optional + Authentication []CustomScaleRuleAuthenticationParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The Custom rule type. Possible values include: activemq, artemis-queue, kafka, pulsar, aws-cloudwatch, aws-dynamodb, aws-dynamodb-streams, aws-kinesis-stream, aws-sqs-queue, azure-app-insights, azure-blob, azure-data-explorer, azure-eventhub, azure-log-analytics, azure-monitor, azure-pipelines, azure-servicebus, azure-queue, cassandra, cpu, cron, datadog, elasticsearch, external, external-push, gcp-stackdriver, gcp-storage, gcp-pubsub, graphite, http, huawei-cloudeye, ibmmq, influxdb, kubernetes-workload, liiklus, memory, metrics-api, mongodb, mssql, mysql, nats-jetstream, stan, tcp, new-relic, openstack-metric, openstack-swift, postgresql, predictkube, prometheus, rabbitmq, redis, redis-cluster, redis-sentinel, redis-streams, redis-cluster-streams, redis-sentinel-streams, selenium-grid,solace-event-queue, and github-runner. + // +kubebuilder:validation:Optional + CustomRuleType *string `json:"customRuleType" tf:"custom_rule_type,omitempty"` + + // - A map of string key-value pairs to configure the Custom Scale Rule. + // +kubebuilder:validation:Optional + // +mapType=granular + Metadata map[string]*string `json:"metadata" tf:"metadata,omitempty"` + + // The name of the Volume to be mounted in the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type DaprInitParameters struct { + + // The Dapr Application Identifier. + // The Dapr Application Identifier. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The port which the application is listening on. This is the same as the ingress port. + // The port which the application is listening on. This is the same as the `ingress` port. + AppPort *float64 `json:"appPort,omitempty" tf:"app_port,omitempty"` + + // The protocol for the app. Possible values include http and grpc. Defaults to http. + // The protocol for the app. Possible values include `http` and `grpc`. Defaults to `http`. + AppProtocol *string `json:"appProtocol,omitempty" tf:"app_protocol,omitempty"` +} + +type DaprObservation struct { + + // The Dapr Application Identifier. + // The Dapr Application Identifier. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The port which the application is listening on. This is the same as the ingress port. + // The port which the application is listening on. This is the same as the `ingress` port. + AppPort *float64 `json:"appPort,omitempty" tf:"app_port,omitempty"` + + // The protocol for the app. Possible values include http and grpc. Defaults to http. + // The protocol for the app. Possible values include `http` and `grpc`. Defaults to `http`. + AppProtocol *string `json:"appProtocol,omitempty" tf:"app_protocol,omitempty"` +} + +type DaprParameters struct { + + // The Dapr Application Identifier. + // The Dapr Application Identifier. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The port which the application is listening on. This is the same as the ingress port. + // The port which the application is listening on. This is the same as the `ingress` port. + // +kubebuilder:validation:Optional + AppPort *float64 `json:"appPort,omitempty" tf:"app_port,omitempty"` + + // The protocol for the app. Possible values include http and grpc. Defaults to http. + // The protocol for the app. Possible values include `http` and `grpc`. Defaults to `http`. + // +kubebuilder:validation:Optional + AppProtocol *string `json:"appProtocol,omitempty" tf:"app_protocol,omitempty"` +} + +type EnvInitParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the environment variable for the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the secret that contains the value for this environment variable. + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The value for this secret. + // The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EnvObservation struct { + + // The name of the Volume to be mounted in the container. + // The name of the environment variable for the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the secret that contains the value for this environment variable. + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The value for this secret. + // The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EnvParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the environment variable for the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the secret that contains the value for this environment variable. + // The name of the secret that contains the value for this environment variable. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The value for this secret. + // The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPScaleRuleAuthenticationInitParameters struct { + + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type HTTPScaleRuleAuthenticationObservation struct { + + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type HTTPScaleRuleAuthenticationParameters struct { + + // The name of the secret that contains the value for this environment variable. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + // +kubebuilder:validation:Optional + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type HTTPScaleRuleInitParameters struct { + + // Zero or more authentication blocks as defined below. + Authentication []HTTPScaleRuleAuthenticationInitParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // - The number of concurrent requests to trigger scaling. + ConcurrentRequests *string `json:"concurrentRequests,omitempty" tf:"concurrent_requests,omitempty"` + + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type HTTPScaleRuleObservation struct { + + // Zero or more authentication blocks as defined below. + Authentication []HTTPScaleRuleAuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // - The number of concurrent requests to trigger scaling. + ConcurrentRequests *string `json:"concurrentRequests,omitempty" tf:"concurrent_requests,omitempty"` + + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type HTTPScaleRuleParameters struct { + + // Zero or more authentication blocks as defined below. + // +kubebuilder:validation:Optional + Authentication []HTTPScaleRuleAuthenticationParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // - The number of concurrent requests to trigger scaling. + // +kubebuilder:validation:Optional + ConcurrentRequests *string `json:"concurrentRequests" tf:"concurrent_requests,omitempty"` + + // The name of the Volume to be mounted in the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type HeaderInitParameters struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderObservation struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderParameters struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type IPSecurityRestrictionInitParameters struct { + + // The IP-filter action. Allow or Deny. + // The action. Allow or Deny. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Describe the IP restriction rule that is being sent to the container-app. + // Describe the IP restriction rule that is being sent to the container-app. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // CIDR notation to match incoming IP address. + // CIDR notation to match incoming IP address. + IPAddressRange *string `json:"ipAddressRange,omitempty" tf:"ip_address_range,omitempty"` + + // The name of the Volume to be mounted in the container. + // Name for the IP restriction rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IPSecurityRestrictionObservation struct { + + // The IP-filter action. Allow or Deny. + // The action. Allow or Deny. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Describe the IP restriction rule that is being sent to the container-app. + // Describe the IP restriction rule that is being sent to the container-app. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // CIDR notation to match incoming IP address. + // CIDR notation to match incoming IP address. + IPAddressRange *string `json:"ipAddressRange,omitempty" tf:"ip_address_range,omitempty"` + + // The name of the Volume to be mounted in the container. + // Name for the IP restriction rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IPSecurityRestrictionParameters struct { + + // The IP-filter action. Allow or Deny. + // The action. Allow or Deny. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // Describe the IP restriction rule that is being sent to the container-app. + // Describe the IP restriction rule that is being sent to the container-app. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // CIDR notation to match incoming IP address. + // CIDR notation to match incoming IP address. + // +kubebuilder:validation:Optional + IPAddressRange *string `json:"ipAddressRange" tf:"ip_address_range,omitempty"` + + // The name of the Volume to be mounted in the container. + // Name for the IP restriction rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type IdentityInitParameters struct { + + // - A list of one or more Resource IDs for User Assigned Managed identities to assign. Required when type is set to UserAssigned or SystemAssigned, UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of managed identity to assign. Possible values are SystemAssigned, UserAssigned, and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // - A list of one or more Resource IDs for User Assigned Managed identities to assign. Required when type is set to UserAssigned or SystemAssigned, UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The ID of the Container App. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the Container App. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The type of managed identity to assign. Possible values are SystemAssigned, UserAssigned, and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // - A list of one or more Resource IDs for User Assigned Managed identities to assign. Required when type is set to UserAssigned or SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of managed identity to assign. Possible values are SystemAssigned, UserAssigned, and SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type IngressInitParameters struct { + + // Should this ingress allow insecure connections? + // Should this ingress allow insecure connections? + AllowInsecureConnections *bool `json:"allowInsecureConnections,omitempty" tf:"allow_insecure_connections,omitempty"` + + // One or more custom_domain block as detailed below. + CustomDomain *CustomDomainInitParameters `json:"customDomain,omitempty" tf:"custom_domain,omitempty"` + + // The exposed port on the container for the Ingress traffic. + // The exposed port on the container for the Ingress traffic. + ExposedPort *float64 `json:"exposedPort,omitempty" tf:"exposed_port,omitempty"` + + // Are connections to this Ingress from outside the Container App Environment enabled? Defaults to false. + // Is this an external Ingress. + ExternalEnabled *bool `json:"externalEnabled,omitempty" tf:"external_enabled,omitempty"` + + // One or more ip_security_restriction blocks for IP-filtering rules as defined below. + IPSecurityRestriction []IPSecurityRestrictionInitParameters `json:"ipSecurityRestriction,omitempty" tf:"ip_security_restriction,omitempty"` + + // The target port on the container for the Ingress traffic. + // The target port on the container for the Ingress traffic. + TargetPort *float64 `json:"targetPort,omitempty" tf:"target_port,omitempty"` + + // One or more traffic_weight blocks as detailed below. + TrafficWeight []TrafficWeightInitParameters `json:"trafficWeight,omitempty" tf:"traffic_weight,omitempty"` + + // The transport method for the Ingress. Possible values are auto, http, http2 and tcp. Defaults to auto. + // The transport method for the Ingress. Possible values include `auto`, `http`, and `http2`, `tcp`. Defaults to `auto` + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type IngressObservation struct { + + // Should this ingress allow insecure connections? + // Should this ingress allow insecure connections? + AllowInsecureConnections *bool `json:"allowInsecureConnections,omitempty" tf:"allow_insecure_connections,omitempty"` + + // One or more custom_domain block as detailed below. + CustomDomain *CustomDomainObservation `json:"customDomain,omitempty" tf:"custom_domain,omitempty"` + + // The exposed port on the container for the Ingress traffic. + // The exposed port on the container for the Ingress traffic. + ExposedPort *float64 `json:"exposedPort,omitempty" tf:"exposed_port,omitempty"` + + // Are connections to this Ingress from outside the Container App Environment enabled? Defaults to false. + // Is this an external Ingress. + ExternalEnabled *bool `json:"externalEnabled,omitempty" tf:"external_enabled,omitempty"` + + // The FQDN of the ingress. + // The FQDN of the ingress. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // One or more ip_security_restriction blocks for IP-filtering rules as defined below. + IPSecurityRestriction []IPSecurityRestrictionObservation `json:"ipSecurityRestriction,omitempty" tf:"ip_security_restriction,omitempty"` + + // The target port on the container for the Ingress traffic. + // The target port on the container for the Ingress traffic. + TargetPort *float64 `json:"targetPort,omitempty" tf:"target_port,omitempty"` + + // One or more traffic_weight blocks as detailed below. + TrafficWeight []TrafficWeightObservation `json:"trafficWeight,omitempty" tf:"traffic_weight,omitempty"` + + // The transport method for the Ingress. Possible values are auto, http, http2 and tcp. Defaults to auto. + // The transport method for the Ingress. Possible values include `auto`, `http`, and `http2`, `tcp`. Defaults to `auto` + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type IngressParameters struct { + + // Should this ingress allow insecure connections? + // Should this ingress allow insecure connections? + // +kubebuilder:validation:Optional + AllowInsecureConnections *bool `json:"allowInsecureConnections,omitempty" tf:"allow_insecure_connections,omitempty"` + + // One or more custom_domain block as detailed below. + // +kubebuilder:validation:Optional + CustomDomain *CustomDomainParameters `json:"customDomain,omitempty" tf:"custom_domain,omitempty"` + + // The exposed port on the container for the Ingress traffic. + // The exposed port on the container for the Ingress traffic. + // +kubebuilder:validation:Optional + ExposedPort *float64 `json:"exposedPort,omitempty" tf:"exposed_port,omitempty"` + + // Are connections to this Ingress from outside the Container App Environment enabled? Defaults to false. + // Is this an external Ingress. + // +kubebuilder:validation:Optional + ExternalEnabled *bool `json:"externalEnabled,omitempty" tf:"external_enabled,omitempty"` + + // One or more ip_security_restriction blocks for IP-filtering rules as defined below. + // +kubebuilder:validation:Optional + IPSecurityRestriction []IPSecurityRestrictionParameters `json:"ipSecurityRestriction,omitempty" tf:"ip_security_restriction,omitempty"` + + // The target port on the container for the Ingress traffic. + // The target port on the container for the Ingress traffic. + // +kubebuilder:validation:Optional + TargetPort *float64 `json:"targetPort" tf:"target_port,omitempty"` + + // One or more traffic_weight blocks as detailed below. + // +kubebuilder:validation:Optional + TrafficWeight []TrafficWeightParameters `json:"trafficWeight" tf:"traffic_weight,omitempty"` + + // The transport method for the Ingress. Possible values are auto, http, http2 and tcp. Defaults to auto. + // The transport method for the Ingress. Possible values include `auto`, `http`, and `http2`, `tcp`. Defaults to `auto` + // +kubebuilder:validation:Optional + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type InitContainerEnvInitParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the environment variable for the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the secret that contains the value for this environment variable. + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The value for this secret. + // The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type InitContainerEnvObservation struct { + + // The name of the Volume to be mounted in the container. + // The name of the environment variable for the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the secret that contains the value for this environment variable. + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The value for this secret. + // The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type InitContainerEnvParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the environment variable for the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the secret that contains the value for this environment variable. + // The name of the secret that contains the value for this environment variable. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The value for this secret. + // The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type InitContainerInitParameters struct { + + // A list of extra arguments to pass to the container. + // A list of args to pass to the container. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + // The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + CPU *float64 `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // One or more env blocks as detailed below. + Env []InitContainerEnvInitParameters `json:"env,omitempty" tf:"env,omitempty"` + + // The image to use to create the container. + // The image to use to create the container. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + // The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` + + // The name of the Volume to be mounted in the container. + // The name of the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A volume_mounts block as detailed below. + VolumeMounts []InitContainerVolumeMountsInitParameters `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type InitContainerObservation struct { + + // A list of extra arguments to pass to the container. + // A list of args to pass to the container. + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + // The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + CPU *float64 `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // One or more env blocks as detailed below. + Env []InitContainerEnvObservation `json:"env,omitempty" tf:"env,omitempty"` + + // The amount of ephemeral storage available to the Container App. + // The amount of ephemeral storage available to the Container App. + EphemeralStorage *string `json:"ephemeralStorage,omitempty" tf:"ephemeral_storage,omitempty"` + + // The image to use to create the container. + // The image to use to create the container. + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + // The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` + + // The name of the Volume to be mounted in the container. + // The name of the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A volume_mounts block as detailed below. + VolumeMounts []InitContainerVolumeMountsObservation `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type InitContainerParameters struct { + + // A list of extra arguments to pass to the container. + // A list of args to pass to the container. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + // The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + // +kubebuilder:validation:Optional + CPU *float64 `json:"cpu,omitempty" tf:"cpu,omitempty"` + + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + // A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + // +kubebuilder:validation:Optional + Command []*string `json:"command,omitempty" tf:"command,omitempty"` + + // One or more env blocks as detailed below. + // +kubebuilder:validation:Optional + Env []InitContainerEnvParameters `json:"env,omitempty" tf:"env,omitempty"` + + // The image to use to create the container. + // The image to use to create the container. + // +kubebuilder:validation:Optional + Image *string `json:"image" tf:"image,omitempty"` + + // The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + // The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + // +kubebuilder:validation:Optional + Memory *string `json:"memory,omitempty" tf:"memory,omitempty"` + + // The name of the Volume to be mounted in the container. + // The name of the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A volume_mounts block as detailed below. + // +kubebuilder:validation:Optional + VolumeMounts []InitContainerVolumeMountsParameters `json:"volumeMounts,omitempty" tf:"volume_mounts,omitempty"` +} + +type InitContainerVolumeMountsInitParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path in the container at which to mount this volume. + // The path in the container at which to mount this volume. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type InitContainerVolumeMountsObservation struct { + + // The name of the Volume to be mounted in the container. + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path in the container at which to mount this volume. + // The path in the container at which to mount this volume. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type InitContainerVolumeMountsParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the Volume to be mounted in the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The path in the container at which to mount this volume. + // The path in the container at which to mount this volume. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type LivenessProbeInitParameters struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + Header []HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The time in seconds to wait after the container has started before the probe is started. + // The time in seconds to wait after the container has started before the probe is started. + InitialDelay *float64 `json:"initialDelay,omitempty" tf:"initial_delay,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type LivenessProbeObservation struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + Header []HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The time in seconds to wait after the container has started before the probe is started. + // The time in seconds to wait after the container has started before the probe is started. + InitialDelay *float64 `json:"initialDelay,omitempty" tf:"initial_delay,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The time in seconds after the container is sent the termination signal before the process if forcibly killed. + // The time in seconds after the container is sent the termination signal before the process if forcibly killed. + TerminationGracePeriodSeconds *float64 `json:"terminationGracePeriodSeconds,omitempty" tf:"termination_grace_period_seconds,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type LivenessProbeParameters struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + // +kubebuilder:validation:Optional + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + // +kubebuilder:validation:Optional + Header []HeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The time in seconds to wait after the container has started before the probe is started. + // The time in seconds to wait after the container has started before the probe is started. + // +kubebuilder:validation:Optional + InitialDelay *float64 `json:"initialDelay,omitempty" tf:"initial_delay,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + // +kubebuilder:validation:Optional + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + // +kubebuilder:validation:Optional + Transport *string `json:"transport" tf:"transport,omitempty"` +} + +type ReadinessProbeHeaderInitParameters struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ReadinessProbeHeaderObservation struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ReadinessProbeHeaderParameters struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ReadinessProbeInitParameters struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + Header []ReadinessProbeHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The number of consecutive successful responses required to consider this probe as successful. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive successful responses required to consider this probe as successful. Possible values are between `1` and `10`. Defaults to `3`. + SuccessCountThreshold *float64 `json:"successCountThreshold,omitempty" tf:"success_count_threshold,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type ReadinessProbeObservation struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + Header []ReadinessProbeHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The number of consecutive successful responses required to consider this probe as successful. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive successful responses required to consider this probe as successful. Possible values are between `1` and `10`. Defaults to `3`. + SuccessCountThreshold *float64 `json:"successCountThreshold,omitempty" tf:"success_count_threshold,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type ReadinessProbeParameters struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + // +kubebuilder:validation:Optional + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + // +kubebuilder:validation:Optional + Header []ReadinessProbeHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + // +kubebuilder:validation:Optional + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // The number of consecutive successful responses required to consider this probe as successful. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive successful responses required to consider this probe as successful. Possible values are between `1` and `10`. Defaults to `3`. + // +kubebuilder:validation:Optional + SuccessCountThreshold *float64 `json:"successCountThreshold,omitempty" tf:"success_count_threshold,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + // +kubebuilder:validation:Optional + Transport *string `json:"transport" tf:"transport,omitempty"` +} + +type RegistryInitParameters struct { + + // Resource ID for the User Assigned Managed identity to use when pulling from the Container Registry. + // ID of the System or User Managed Identity used to pull images from the Container Registry + Identity *string `json:"identity,omitempty" tf:"identity,omitempty"` + + // The name of the Secret Reference containing the password value for this user on the Container Registry, username must also be supplied. + // The name of the Secret Reference containing the password value for this user on the Container Registry. + PasswordSecretName *string `json:"passwordSecretName,omitempty" tf:"password_secret_name,omitempty"` + + // The hostname for the Container Registry. + // The hostname for the Container Registry. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The username to use for this Container Registry, password_secret_name must also be supplied.. + // The username to use for this Container Registry. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type RegistryObservation struct { + + // Resource ID for the User Assigned Managed identity to use when pulling from the Container Registry. + // ID of the System or User Managed Identity used to pull images from the Container Registry + Identity *string `json:"identity,omitempty" tf:"identity,omitempty"` + + // The name of the Secret Reference containing the password value for this user on the Container Registry, username must also be supplied. + // The name of the Secret Reference containing the password value for this user on the Container Registry. + PasswordSecretName *string `json:"passwordSecretName,omitempty" tf:"password_secret_name,omitempty"` + + // The hostname for the Container Registry. + // The hostname for the Container Registry. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The username to use for this Container Registry, password_secret_name must also be supplied.. + // The username to use for this Container Registry. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type RegistryParameters struct { + + // Resource ID for the User Assigned Managed identity to use when pulling from the Container Registry. + // ID of the System or User Managed Identity used to pull images from the Container Registry + // +kubebuilder:validation:Optional + Identity *string `json:"identity,omitempty" tf:"identity,omitempty"` + + // The name of the Secret Reference containing the password value for this user on the Container Registry, username must also be supplied. + // The name of the Secret Reference containing the password value for this user on the Container Registry. + // +kubebuilder:validation:Optional + PasswordSecretName *string `json:"passwordSecretName,omitempty" tf:"password_secret_name,omitempty"` + + // The hostname for the Container Registry. + // The hostname for the Container Registry. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The username to use for this Container Registry, password_secret_name must also be supplied.. + // The username to use for this Container Registry. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SecretInitParameters struct { +} + +type SecretObservation struct { +} + +type SecretParameters struct { + + // The Secret name. + // The Secret name. + // +kubebuilder:validation:Required + NameSecretRef v1.SecretKeySelector `json:"nameSecretRef" tf:"-"` + + // The value for this secret. + // The value for this secret. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type StartupProbeHeaderInitParameters struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type StartupProbeHeaderObservation struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type StartupProbeHeaderParameters struct { + + // The name of the Volume to be mounted in the container. + // The HTTP Header Name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value for this secret. + // The HTTP Header value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type StartupProbeInitParameters struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + Header []StartupProbeHeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type StartupProbeObservation struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + Header []StartupProbeHeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The time in seconds after the container is sent the termination signal before the process if forcibly killed. + // The time in seconds after the container is sent the termination signal before the process if forcibly killed. + TerminationGracePeriodSeconds *float64 `json:"terminationGracePeriodSeconds,omitempty" tf:"termination_grace_period_seconds,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + Transport *string `json:"transport,omitempty" tf:"transport,omitempty"` +} + +type StartupProbeParameters struct { + + // The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + // The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + // +kubebuilder:validation:Optional + FailureCountThreshold *float64 `json:"failureCountThreshold,omitempty" tf:"failure_count_threshold,omitempty"` + + // A header block as detailed below. + // +kubebuilder:validation:Optional + Header []StartupProbeHeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + // The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + // How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + // +kubebuilder:validation:Optional + IntervalSeconds *float64 `json:"intervalSeconds,omitempty" tf:"interval_seconds,omitempty"` + + // The path in the container at which to mount this volume. + // The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number on which to connect. Possible values are between 1 and 65535. + // The port number on which to connect. Possible values are between `1` and `65535`. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + // Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Type of probe. Possible values are TCP, HTTP, and HTTPS. + // Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + // +kubebuilder:validation:Optional + Transport *string `json:"transport" tf:"transport,omitempty"` +} + +type TCPScaleRuleAuthenticationInitParameters struct { + + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type TCPScaleRuleAuthenticationObservation struct { + + // The name of the secret that contains the value for this environment variable. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type TCPScaleRuleAuthenticationParameters struct { + + // The name of the secret that contains the value for this environment variable. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` + + // The Trigger Parameter name to use the supply the value retrieved from the secret_name. + // +kubebuilder:validation:Optional + TriggerParameter *string `json:"triggerParameter,omitempty" tf:"trigger_parameter,omitempty"` +} + +type TCPScaleRuleInitParameters struct { + + // Zero or more authentication blocks as defined below. + Authentication []TCPScaleRuleAuthenticationInitParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // - The number of concurrent requests to trigger scaling. + ConcurrentRequests *string `json:"concurrentRequests,omitempty" tf:"concurrent_requests,omitempty"` + + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TCPScaleRuleObservation struct { + + // Zero or more authentication blocks as defined below. + Authentication []TCPScaleRuleAuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // - The number of concurrent requests to trigger scaling. + ConcurrentRequests *string `json:"concurrentRequests,omitempty" tf:"concurrent_requests,omitempty"` + + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TCPScaleRuleParameters struct { + + // Zero or more authentication blocks as defined below. + // +kubebuilder:validation:Optional + Authentication []TCPScaleRuleAuthenticationParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // - The number of concurrent requests to trigger scaling. + // +kubebuilder:validation:Optional + ConcurrentRequests *string `json:"concurrentRequests" tf:"concurrent_requests,omitempty"` + + // The name of the Volume to be mounted in the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type TemplateInitParameters struct { + + // One or more azure_queue_scale_rule blocks as defined below. + AzureQueueScaleRule []AzureQueueScaleRuleInitParameters `json:"azureQueueScaleRule,omitempty" tf:"azure_queue_scale_rule,omitempty"` + + // One or more container blocks as detailed below. + Container []ContainerInitParameters `json:"container,omitempty" tf:"container,omitempty"` + + // One or more custom_scale_rule blocks as defined below. + CustomScaleRule []CustomScaleRuleInitParameters `json:"customScaleRule,omitempty" tf:"custom_scale_rule,omitempty"` + + // One or more http_scale_rule blocks as defined below. + HTTPScaleRule []HTTPScaleRuleInitParameters `json:"httpScaleRule,omitempty" tf:"http_scale_rule,omitempty"` + + // The definition of an init container that is part of the group as documented in the init_container block below. + InitContainer []InitContainerInitParameters `json:"initContainer,omitempty" tf:"init_container,omitempty"` + + // The maximum number of replicas for this container. + // The maximum number of replicas for this container. + MaxReplicas *float64 `json:"maxReplicas,omitempty" tf:"max_replicas,omitempty"` + + // The minimum number of replicas for this container. + // The minimum number of replicas for this container. + MinReplicas *float64 `json:"minReplicas,omitempty" tf:"min_replicas,omitempty"` + + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + RevisionSuffix *string `json:"revisionSuffix,omitempty" tf:"revision_suffix,omitempty"` + + // One or more tcp_scale_rule blocks as defined below. + TCPScaleRule []TCPScaleRuleInitParameters `json:"tcpScaleRule,omitempty" tf:"tcp_scale_rule,omitempty"` + + // A volume block as detailed below. + Volume []VolumeInitParameters `json:"volume,omitempty" tf:"volume,omitempty"` +} + +type TemplateObservation struct { + + // One or more azure_queue_scale_rule blocks as defined below. + AzureQueueScaleRule []AzureQueueScaleRuleObservation `json:"azureQueueScaleRule,omitempty" tf:"azure_queue_scale_rule,omitempty"` + + // One or more container blocks as detailed below. + Container []ContainerObservation `json:"container,omitempty" tf:"container,omitempty"` + + // One or more custom_scale_rule blocks as defined below. + CustomScaleRule []CustomScaleRuleObservation `json:"customScaleRule,omitempty" tf:"custom_scale_rule,omitempty"` + + // One or more http_scale_rule blocks as defined below. + HTTPScaleRule []HTTPScaleRuleObservation `json:"httpScaleRule,omitempty" tf:"http_scale_rule,omitempty"` + + // The definition of an init container that is part of the group as documented in the init_container block below. + InitContainer []InitContainerObservation `json:"initContainer,omitempty" tf:"init_container,omitempty"` + + // The maximum number of replicas for this container. + // The maximum number of replicas for this container. + MaxReplicas *float64 `json:"maxReplicas,omitempty" tf:"max_replicas,omitempty"` + + // The minimum number of replicas for this container. + // The minimum number of replicas for this container. + MinReplicas *float64 `json:"minReplicas,omitempty" tf:"min_replicas,omitempty"` + + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + RevisionSuffix *string `json:"revisionSuffix,omitempty" tf:"revision_suffix,omitempty"` + + // One or more tcp_scale_rule blocks as defined below. + TCPScaleRule []TCPScaleRuleObservation `json:"tcpScaleRule,omitempty" tf:"tcp_scale_rule,omitempty"` + + // A volume block as detailed below. + Volume []VolumeObservation `json:"volume,omitempty" tf:"volume,omitempty"` +} + +type TemplateParameters struct { + + // One or more azure_queue_scale_rule blocks as defined below. + // +kubebuilder:validation:Optional + AzureQueueScaleRule []AzureQueueScaleRuleParameters `json:"azureQueueScaleRule,omitempty" tf:"azure_queue_scale_rule,omitempty"` + + // One or more container blocks as detailed below. + // +kubebuilder:validation:Optional + Container []ContainerParameters `json:"container" tf:"container,omitempty"` + + // One or more custom_scale_rule blocks as defined below. + // +kubebuilder:validation:Optional + CustomScaleRule []CustomScaleRuleParameters `json:"customScaleRule,omitempty" tf:"custom_scale_rule,omitempty"` + + // One or more http_scale_rule blocks as defined below. + // +kubebuilder:validation:Optional + HTTPScaleRule []HTTPScaleRuleParameters `json:"httpScaleRule,omitempty" tf:"http_scale_rule,omitempty"` + + // The definition of an init container that is part of the group as documented in the init_container block below. + // +kubebuilder:validation:Optional + InitContainer []InitContainerParameters `json:"initContainer,omitempty" tf:"init_container,omitempty"` + + // The maximum number of replicas for this container. + // The maximum number of replicas for this container. + // +kubebuilder:validation:Optional + MaxReplicas *float64 `json:"maxReplicas,omitempty" tf:"max_replicas,omitempty"` + + // The minimum number of replicas for this container. + // The minimum number of replicas for this container. + // +kubebuilder:validation:Optional + MinReplicas *float64 `json:"minReplicas,omitempty" tf:"min_replicas,omitempty"` + + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + // +kubebuilder:validation:Optional + RevisionSuffix *string `json:"revisionSuffix,omitempty" tf:"revision_suffix,omitempty"` + + // One or more tcp_scale_rule blocks as defined below. + // +kubebuilder:validation:Optional + TCPScaleRule []TCPScaleRuleParameters `json:"tcpScaleRule,omitempty" tf:"tcp_scale_rule,omitempty"` + + // A volume block as detailed below. + // +kubebuilder:validation:Optional + Volume []VolumeParameters `json:"volume,omitempty" tf:"volume,omitempty"` +} + +type TrafficWeightInitParameters struct { + + // The label to apply to the revision as a name prefix for routing traffic. + // The label to apply to the revision as a name prefix for routing traffic. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // This traffic Weight applies to the latest stable Container Revision. At most only one traffic_weight block can have the latest_revision set to true. + // This traffic Weight relates to the latest stable Container Revision. + LatestRevision *bool `json:"latestRevision,omitempty" tf:"latest_revision,omitempty"` + + // The percentage of traffic which should be sent this revision. + // The percentage of traffic to send to this revision. + Percentage *float64 `json:"percentage,omitempty" tf:"percentage,omitempty"` + + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + // The suffix string to append to the revision. This must be unique for the Container App's lifetime. A default hash created by the service will be used if this value is omitted. + RevisionSuffix *string `json:"revisionSuffix,omitempty" tf:"revision_suffix,omitempty"` +} + +type TrafficWeightObservation struct { + + // The label to apply to the revision as a name prefix for routing traffic. + // The label to apply to the revision as a name prefix for routing traffic. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // This traffic Weight applies to the latest stable Container Revision. At most only one traffic_weight block can have the latest_revision set to true. + // This traffic Weight relates to the latest stable Container Revision. + LatestRevision *bool `json:"latestRevision,omitempty" tf:"latest_revision,omitempty"` + + // The percentage of traffic which should be sent this revision. + // The percentage of traffic to send to this revision. + Percentage *float64 `json:"percentage,omitempty" tf:"percentage,omitempty"` + + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + // The suffix string to append to the revision. This must be unique for the Container App's lifetime. A default hash created by the service will be used if this value is omitted. + RevisionSuffix *string `json:"revisionSuffix,omitempty" tf:"revision_suffix,omitempty"` +} + +type TrafficWeightParameters struct { + + // The label to apply to the revision as a name prefix for routing traffic. + // The label to apply to the revision as a name prefix for routing traffic. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // This traffic Weight applies to the latest stable Container Revision. At most only one traffic_weight block can have the latest_revision set to true. + // This traffic Weight relates to the latest stable Container Revision. + // +kubebuilder:validation:Optional + LatestRevision *bool `json:"latestRevision,omitempty" tf:"latest_revision,omitempty"` + + // The percentage of traffic which should be sent this revision. + // The percentage of traffic to send to this revision. + // +kubebuilder:validation:Optional + Percentage *float64 `json:"percentage" tf:"percentage,omitempty"` + + // The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + // The suffix string to append to the revision. This must be unique for the Container App's lifetime. A default hash created by the service will be used if this value is omitted. + // +kubebuilder:validation:Optional + RevisionSuffix *string `json:"revisionSuffix,omitempty" tf:"revision_suffix,omitempty"` +} + +type VolumeInitParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the volume. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the AzureFile storage. + // The name of the `AzureFile` storage. Required when `storage_type` is `AzureFile` + StorageName *string `json:"storageName,omitempty" tf:"storage_name,omitempty"` + + // The type of storage volume. Possible values are AzureFile, EmptyDir and Secret. Defaults to EmptyDir. + // The type of storage volume. Possible values include `AzureFile` and `EmptyDir`. Defaults to `EmptyDir`. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` +} + +type VolumeMountsInitParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path in the container at which to mount this volume. + // The path in the container at which to mount this volume. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type VolumeMountsObservation struct { + + // The name of the Volume to be mounted in the container. + // The name of the Volume to be mounted in the container. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path in the container at which to mount this volume. + // The path in the container at which to mount this volume. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type VolumeMountsParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the Volume to be mounted in the container. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The path in the container at which to mount this volume. + // The path in the container at which to mount this volume. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type VolumeObservation struct { + + // The name of the Volume to be mounted in the container. + // The name of the volume. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the AzureFile storage. + // The name of the `AzureFile` storage. Required when `storage_type` is `AzureFile` + StorageName *string `json:"storageName,omitempty" tf:"storage_name,omitempty"` + + // The type of storage volume. Possible values are AzureFile, EmptyDir and Secret. Defaults to EmptyDir. + // The type of storage volume. Possible values include `AzureFile` and `EmptyDir`. Defaults to `EmptyDir`. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` +} + +type VolumeParameters struct { + + // The name of the Volume to be mounted in the container. + // The name of the volume. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the AzureFile storage. + // The name of the `AzureFile` storage. Required when `storage_type` is `AzureFile` + // +kubebuilder:validation:Optional + StorageName *string `json:"storageName,omitempty" tf:"storage_name,omitempty"` + + // The type of storage volume. Possible values are AzureFile, EmptyDir and Secret. Defaults to EmptyDir. + // The type of storage volume. Possible values include `AzureFile` and `EmptyDir`. Defaults to `EmptyDir`. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` +} + +// ContainerAppSpec defines the desired state of ContainerApp +type ContainerAppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ContainerAppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ContainerAppInitParameters `json:"initProvider,omitempty"` +} + +// ContainerAppStatus defines the observed state of ContainerApp. +type ContainerAppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ContainerAppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ContainerApp is the Schema for the ContainerApps API. Manages a Container App. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ContainerApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.revisionMode) || (has(self.initProvider) && has(self.initProvider.revisionMode))",message="spec.forProvider.revisionMode is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.template) || (has(self.initProvider) && has(self.initProvider.template))",message="spec.forProvider.template is a required parameter" + Spec ContainerAppSpec `json:"spec"` + Status ContainerAppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ContainerAppList contains a list of ContainerApps +type ContainerAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ContainerApp `json:"items"` +} + +// Repository type metadata. +var ( + ContainerApp_Kind = "ContainerApp" + ContainerApp_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ContainerApp_Kind}.String() + ContainerApp_KindAPIVersion = ContainerApp_Kind + "." + CRDGroupVersion.String() + ContainerApp_GroupVersionKind = CRDGroupVersion.WithKind(ContainerApp_Kind) +) + +func init() { + SchemeBuilder.Register(&ContainerApp{}, &ContainerAppList{}) +} diff --git a/apis/containerapp/v1beta2/zz_generated.conversion_hubs.go b/apis/containerapp/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..d4a8ea8f5 --- /dev/null +++ b/apis/containerapp/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ContainerApp) Hub() {} diff --git a/apis/containerapp/v1beta2/zz_generated.deepcopy.go b/apis/containerapp/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..445017c9b --- /dev/null +++ b/apis/containerapp/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3921 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationInitParameters) DeepCopyInto(out *AuthenticationInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationInitParameters. +func (in *AuthenticationInitParameters) DeepCopy() *AuthenticationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationObservation) DeepCopyInto(out *AuthenticationObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationObservation. +func (in *AuthenticationObservation) DeepCopy() *AuthenticationObservation { + if in == nil { + return nil + } + out := new(AuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationParameters) DeepCopyInto(out *AuthenticationParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationParameters. +func (in *AuthenticationParameters) DeepCopy() *AuthenticationParameters { + if in == nil { + return nil + } + out := new(AuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureQueueScaleRuleInitParameters) DeepCopyInto(out *AzureQueueScaleRuleInitParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]AuthenticationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueueLength != nil { + in, out := &in.QueueLength, &out.QueueLength + *out = new(float64) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureQueueScaleRuleInitParameters. +func (in *AzureQueueScaleRuleInitParameters) DeepCopy() *AzureQueueScaleRuleInitParameters { + if in == nil { + return nil + } + out := new(AzureQueueScaleRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureQueueScaleRuleObservation) DeepCopyInto(out *AzureQueueScaleRuleObservation) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]AuthenticationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueueLength != nil { + in, out := &in.QueueLength, &out.QueueLength + *out = new(float64) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureQueueScaleRuleObservation. +func (in *AzureQueueScaleRuleObservation) DeepCopy() *AzureQueueScaleRuleObservation { + if in == nil { + return nil + } + out := new(AzureQueueScaleRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureQueueScaleRuleParameters) DeepCopyInto(out *AzureQueueScaleRuleParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]AuthenticationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueueLength != nil { + in, out := &in.QueueLength, &out.QueueLength + *out = new(float64) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureQueueScaleRuleParameters. +func (in *AzureQueueScaleRuleParameters) DeepCopy() *AzureQueueScaleRuleParameters { + if in == nil { + return nil + } + out := new(AzureQueueScaleRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerApp) DeepCopyInto(out *ContainerApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerApp. +func (in *ContainerApp) DeepCopy() *ContainerApp { + if in == nil { + return nil + } + out := new(ContainerApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerAppInitParameters) DeepCopyInto(out *ContainerAppInitParameters) { + *out = *in + if in.ContainerAppEnvironmentID != nil { + in, out := &in.ContainerAppEnvironmentID, &out.ContainerAppEnvironmentID + *out = new(string) + **out = **in + } + if in.ContainerAppEnvironmentIDRef != nil { + in, out := &in.ContainerAppEnvironmentIDRef, &out.ContainerAppEnvironmentIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerAppEnvironmentIDSelector != nil { + in, out := &in.ContainerAppEnvironmentIDSelector, &out.ContainerAppEnvironmentIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Dapr != nil { + in, out := &in.Dapr, &out.Dapr + *out = new(DaprInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(IngressInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Registry != nil { + in, out := &in.Registry, &out.Registry + *out = make([]RegistryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RevisionMode != nil { + in, out := &in.RevisionMode, &out.RevisionMode + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]SecretInitParameters, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(TemplateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadProfileName != nil { + in, out := &in.WorkloadProfileName, &out.WorkloadProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerAppInitParameters. +func (in *ContainerAppInitParameters) DeepCopy() *ContainerAppInitParameters { + if in == nil { + return nil + } + out := new(ContainerAppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerAppList) DeepCopyInto(out *ContainerAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContainerApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerAppList. +func (in *ContainerAppList) DeepCopy() *ContainerAppList { + if in == nil { + return nil + } + out := new(ContainerAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContainerAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerAppObservation) DeepCopyInto(out *ContainerAppObservation) { + *out = *in + if in.ContainerAppEnvironmentID != nil { + in, out := &in.ContainerAppEnvironmentID, &out.ContainerAppEnvironmentID + *out = new(string) + **out = **in + } + if in.Dapr != nil { + in, out := &in.Dapr, &out.Dapr + *out = new(DaprObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(IngressObservation) + (*in).DeepCopyInto(*out) + } + if in.LatestRevisionFqdn != nil { + in, out := &in.LatestRevisionFqdn, &out.LatestRevisionFqdn + *out = new(string) + **out = **in + } + if in.LatestRevisionName != nil { + in, out := &in.LatestRevisionName, &out.LatestRevisionName + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Registry != nil { + in, out := &in.Registry, &out.Registry + *out = make([]RegistryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RevisionMode != nil { + in, out := &in.RevisionMode, &out.RevisionMode + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]SecretParameters, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(TemplateObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkloadProfileName != nil { + in, out := &in.WorkloadProfileName, &out.WorkloadProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerAppObservation. +func (in *ContainerAppObservation) DeepCopy() *ContainerAppObservation { + if in == nil { + return nil + } + out := new(ContainerAppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerAppParameters) DeepCopyInto(out *ContainerAppParameters) { + *out = *in + if in.ContainerAppEnvironmentID != nil { + in, out := &in.ContainerAppEnvironmentID, &out.ContainerAppEnvironmentID + *out = new(string) + **out = **in + } + if in.ContainerAppEnvironmentIDRef != nil { + in, out := &in.ContainerAppEnvironmentIDRef, &out.ContainerAppEnvironmentIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerAppEnvironmentIDSelector != nil { + in, out := &in.ContainerAppEnvironmentIDSelector, &out.ContainerAppEnvironmentIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Dapr != nil { + in, out := &in.Dapr, &out.Dapr + *out = new(DaprParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(IngressParameters) + (*in).DeepCopyInto(*out) + } + if in.Registry != nil { + in, out := &in.Registry, &out.Registry + *out = make([]RegistryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RevisionMode != nil { + in, out := &in.RevisionMode, &out.RevisionMode + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = make([]SecretParameters, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(TemplateParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadProfileName != nil { + in, out := &in.WorkloadProfileName, &out.WorkloadProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerAppParameters. +func (in *ContainerAppParameters) DeepCopy() *ContainerAppParameters { + if in == nil { + return nil + } + out := new(ContainerAppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerAppSpec) DeepCopyInto(out *ContainerAppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerAppSpec. +func (in *ContainerAppSpec) DeepCopy() *ContainerAppSpec { + if in == nil { + return nil + } + out := new(ContainerAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerAppStatus) DeepCopyInto(out *ContainerAppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerAppStatus. +func (in *ContainerAppStatus) DeepCopy() *ContainerAppStatus { + if in == nil { + return nil + } + out := new(ContainerAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerInitParameters) DeepCopyInto(out *ContainerInitParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = make([]LivenessProbeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = make([]ReadinessProbeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = make([]StartupProbeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInitParameters. +func (in *ContainerInitParameters) DeepCopy() *ContainerInitParameters { + if in == nil { + return nil + } + out := new(ContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerObservation) DeepCopyInto(out *ContainerObservation) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = make([]LivenessProbeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = make([]ReadinessProbeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = make([]StartupProbeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerObservation. +func (in *ContainerObservation) DeepCopy() *ContainerObservation { + if in == nil { + return nil + } + out := new(ContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerParameters) DeepCopyInto(out *ContainerParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = make([]LivenessProbeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = make([]ReadinessProbeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = make([]StartupProbeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParameters. +func (in *ContainerParameters) DeepCopy() *ContainerParameters { + if in == nil { + return nil + } + out := new(ContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainInitParameters) DeepCopyInto(out *CustomDomainInitParameters) { + *out = *in + if in.CertificateBindingType != nil { + in, out := &in.CertificateBindingType, &out.CertificateBindingType + *out = new(string) + **out = **in + } + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainInitParameters. +func (in *CustomDomainInitParameters) DeepCopy() *CustomDomainInitParameters { + if in == nil { + return nil + } + out := new(CustomDomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainObservation) DeepCopyInto(out *CustomDomainObservation) { + *out = *in + if in.CertificateBindingType != nil { + in, out := &in.CertificateBindingType, &out.CertificateBindingType + *out = new(string) + **out = **in + } + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainObservation. +func (in *CustomDomainObservation) DeepCopy() *CustomDomainObservation { + if in == nil { + return nil + } + out := new(CustomDomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainParameters) DeepCopyInto(out *CustomDomainParameters) { + *out = *in + if in.CertificateBindingType != nil { + in, out := &in.CertificateBindingType, &out.CertificateBindingType + *out = new(string) + **out = **in + } + if in.CertificateID != nil { + in, out := &in.CertificateID, &out.CertificateID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainParameters. +func (in *CustomDomainParameters) DeepCopy() *CustomDomainParameters { + if in == nil { + return nil + } + out := new(CustomDomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomScaleRuleAuthenticationInitParameters) DeepCopyInto(out *CustomScaleRuleAuthenticationInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomScaleRuleAuthenticationInitParameters. +func (in *CustomScaleRuleAuthenticationInitParameters) DeepCopy() *CustomScaleRuleAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(CustomScaleRuleAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomScaleRuleAuthenticationObservation) DeepCopyInto(out *CustomScaleRuleAuthenticationObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomScaleRuleAuthenticationObservation. +func (in *CustomScaleRuleAuthenticationObservation) DeepCopy() *CustomScaleRuleAuthenticationObservation { + if in == nil { + return nil + } + out := new(CustomScaleRuleAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomScaleRuleAuthenticationParameters) DeepCopyInto(out *CustomScaleRuleAuthenticationParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomScaleRuleAuthenticationParameters. +func (in *CustomScaleRuleAuthenticationParameters) DeepCopy() *CustomScaleRuleAuthenticationParameters { + if in == nil { + return nil + } + out := new(CustomScaleRuleAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomScaleRuleInitParameters) DeepCopyInto(out *CustomScaleRuleInitParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]CustomScaleRuleAuthenticationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomRuleType != nil { + in, out := &in.CustomRuleType, &out.CustomRuleType + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomScaleRuleInitParameters. +func (in *CustomScaleRuleInitParameters) DeepCopy() *CustomScaleRuleInitParameters { + if in == nil { + return nil + } + out := new(CustomScaleRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomScaleRuleObservation) DeepCopyInto(out *CustomScaleRuleObservation) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]CustomScaleRuleAuthenticationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomRuleType != nil { + in, out := &in.CustomRuleType, &out.CustomRuleType + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomScaleRuleObservation. +func (in *CustomScaleRuleObservation) DeepCopy() *CustomScaleRuleObservation { + if in == nil { + return nil + } + out := new(CustomScaleRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomScaleRuleParameters) DeepCopyInto(out *CustomScaleRuleParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]CustomScaleRuleAuthenticationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomRuleType != nil { + in, out := &in.CustomRuleType, &out.CustomRuleType + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomScaleRuleParameters. +func (in *CustomScaleRuleParameters) DeepCopy() *CustomScaleRuleParameters { + if in == nil { + return nil + } + out := new(CustomScaleRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaprInitParameters) DeepCopyInto(out *DaprInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppPort != nil { + in, out := &in.AppPort, &out.AppPort + *out = new(float64) + **out = **in + } + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaprInitParameters. +func (in *DaprInitParameters) DeepCopy() *DaprInitParameters { + if in == nil { + return nil + } + out := new(DaprInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaprObservation) DeepCopyInto(out *DaprObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppPort != nil { + in, out := &in.AppPort, &out.AppPort + *out = new(float64) + **out = **in + } + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaprObservation. +func (in *DaprObservation) DeepCopy() *DaprObservation { + if in == nil { + return nil + } + out := new(DaprObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaprParameters) DeepCopyInto(out *DaprParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppPort != nil { + in, out := &in.AppPort, &out.AppPort + *out = new(float64) + **out = **in + } + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaprParameters. +func (in *DaprParameters) DeepCopy() *DaprParameters { + if in == nil { + return nil + } + out := new(DaprParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvInitParameters) DeepCopyInto(out *EnvInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvInitParameters. +func (in *EnvInitParameters) DeepCopy() *EnvInitParameters { + if in == nil { + return nil + } + out := new(EnvInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvObservation) DeepCopyInto(out *EnvObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvObservation. +func (in *EnvObservation) DeepCopy() *EnvObservation { + if in == nil { + return nil + } + out := new(EnvObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvParameters) DeepCopyInto(out *EnvParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvParameters. +func (in *EnvParameters) DeepCopy() *EnvParameters { + if in == nil { + return nil + } + out := new(EnvParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPScaleRuleAuthenticationInitParameters) DeepCopyInto(out *HTTPScaleRuleAuthenticationInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPScaleRuleAuthenticationInitParameters. +func (in *HTTPScaleRuleAuthenticationInitParameters) DeepCopy() *HTTPScaleRuleAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(HTTPScaleRuleAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPScaleRuleAuthenticationObservation) DeepCopyInto(out *HTTPScaleRuleAuthenticationObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPScaleRuleAuthenticationObservation. +func (in *HTTPScaleRuleAuthenticationObservation) DeepCopy() *HTTPScaleRuleAuthenticationObservation { + if in == nil { + return nil + } + out := new(HTTPScaleRuleAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPScaleRuleAuthenticationParameters) DeepCopyInto(out *HTTPScaleRuleAuthenticationParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPScaleRuleAuthenticationParameters. +func (in *HTTPScaleRuleAuthenticationParameters) DeepCopy() *HTTPScaleRuleAuthenticationParameters { + if in == nil { + return nil + } + out := new(HTTPScaleRuleAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPScaleRuleInitParameters) DeepCopyInto(out *HTTPScaleRuleInitParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]HTTPScaleRuleAuthenticationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConcurrentRequests != nil { + in, out := &in.ConcurrentRequests, &out.ConcurrentRequests + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPScaleRuleInitParameters. +func (in *HTTPScaleRuleInitParameters) DeepCopy() *HTTPScaleRuleInitParameters { + if in == nil { + return nil + } + out := new(HTTPScaleRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPScaleRuleObservation) DeepCopyInto(out *HTTPScaleRuleObservation) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]HTTPScaleRuleAuthenticationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConcurrentRequests != nil { + in, out := &in.ConcurrentRequests, &out.ConcurrentRequests + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPScaleRuleObservation. +func (in *HTTPScaleRuleObservation) DeepCopy() *HTTPScaleRuleObservation { + if in == nil { + return nil + } + out := new(HTTPScaleRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPScaleRuleParameters) DeepCopyInto(out *HTTPScaleRuleParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]HTTPScaleRuleAuthenticationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConcurrentRequests != nil { + in, out := &in.ConcurrentRequests, &out.ConcurrentRequests + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPScaleRuleParameters. +func (in *HTTPScaleRuleParameters) DeepCopy() *HTTPScaleRuleParameters { + if in == nil { + return nil + } + out := new(HTTPScaleRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSecurityRestrictionInitParameters) DeepCopyInto(out *IPSecurityRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IPAddressRange != nil { + in, out := &in.IPAddressRange, &out.IPAddressRange + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSecurityRestrictionInitParameters. +func (in *IPSecurityRestrictionInitParameters) DeepCopy() *IPSecurityRestrictionInitParameters { + if in == nil { + return nil + } + out := new(IPSecurityRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSecurityRestrictionObservation) DeepCopyInto(out *IPSecurityRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IPAddressRange != nil { + in, out := &in.IPAddressRange, &out.IPAddressRange + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSecurityRestrictionObservation. +func (in *IPSecurityRestrictionObservation) DeepCopy() *IPSecurityRestrictionObservation { + if in == nil { + return nil + } + out := new(IPSecurityRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPSecurityRestrictionParameters) DeepCopyInto(out *IPSecurityRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IPAddressRange != nil { + in, out := &in.IPAddressRange, &out.IPAddressRange + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPSecurityRestrictionParameters. +func (in *IPSecurityRestrictionParameters) DeepCopy() *IPSecurityRestrictionParameters { + if in == nil { + return nil + } + out := new(IPSecurityRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressInitParameters) DeepCopyInto(out *IngressInitParameters) { + *out = *in + if in.AllowInsecureConnections != nil { + in, out := &in.AllowInsecureConnections, &out.AllowInsecureConnections + *out = new(bool) + **out = **in + } + if in.CustomDomain != nil { + in, out := &in.CustomDomain, &out.CustomDomain + *out = new(CustomDomainInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExposedPort != nil { + in, out := &in.ExposedPort, &out.ExposedPort + *out = new(float64) + **out = **in + } + if in.ExternalEnabled != nil { + in, out := &in.ExternalEnabled, &out.ExternalEnabled + *out = new(bool) + **out = **in + } + if in.IPSecurityRestriction != nil { + in, out := &in.IPSecurityRestriction, &out.IPSecurityRestriction + *out = make([]IPSecurityRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(float64) + **out = **in + } + if in.TrafficWeight != nil { + in, out := &in.TrafficWeight, &out.TrafficWeight + *out = make([]TrafficWeightInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressInitParameters. +func (in *IngressInitParameters) DeepCopy() *IngressInitParameters { + if in == nil { + return nil + } + out := new(IngressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressObservation) DeepCopyInto(out *IngressObservation) { + *out = *in + if in.AllowInsecureConnections != nil { + in, out := &in.AllowInsecureConnections, &out.AllowInsecureConnections + *out = new(bool) + **out = **in + } + if in.CustomDomain != nil { + in, out := &in.CustomDomain, &out.CustomDomain + *out = new(CustomDomainObservation) + (*in).DeepCopyInto(*out) + } + if in.ExposedPort != nil { + in, out := &in.ExposedPort, &out.ExposedPort + *out = new(float64) + **out = **in + } + if in.ExternalEnabled != nil { + in, out := &in.ExternalEnabled, &out.ExternalEnabled + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.IPSecurityRestriction != nil { + in, out := &in.IPSecurityRestriction, &out.IPSecurityRestriction + *out = make([]IPSecurityRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(float64) + **out = **in + } + if in.TrafficWeight != nil { + in, out := &in.TrafficWeight, &out.TrafficWeight + *out = make([]TrafficWeightObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressObservation. +func (in *IngressObservation) DeepCopy() *IngressObservation { + if in == nil { + return nil + } + out := new(IngressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressParameters) DeepCopyInto(out *IngressParameters) { + *out = *in + if in.AllowInsecureConnections != nil { + in, out := &in.AllowInsecureConnections, &out.AllowInsecureConnections + *out = new(bool) + **out = **in + } + if in.CustomDomain != nil { + in, out := &in.CustomDomain, &out.CustomDomain + *out = new(CustomDomainParameters) + (*in).DeepCopyInto(*out) + } + if in.ExposedPort != nil { + in, out := &in.ExposedPort, &out.ExposedPort + *out = new(float64) + **out = **in + } + if in.ExternalEnabled != nil { + in, out := &in.ExternalEnabled, &out.ExternalEnabled + *out = new(bool) + **out = **in + } + if in.IPSecurityRestriction != nil { + in, out := &in.IPSecurityRestriction, &out.IPSecurityRestriction + *out = make([]IPSecurityRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(float64) + **out = **in + } + if in.TrafficWeight != nil { + in, out := &in.TrafficWeight, &out.TrafficWeight + *out = make([]TrafficWeightParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressParameters. +func (in *IngressParameters) DeepCopy() *IngressParameters { + if in == nil { + return nil + } + out := new(IngressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerEnvInitParameters) DeepCopyInto(out *InitContainerEnvInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerEnvInitParameters. +func (in *InitContainerEnvInitParameters) DeepCopy() *InitContainerEnvInitParameters { + if in == nil { + return nil + } + out := new(InitContainerEnvInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerEnvObservation) DeepCopyInto(out *InitContainerEnvObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerEnvObservation. +func (in *InitContainerEnvObservation) DeepCopy() *InitContainerEnvObservation { + if in == nil { + return nil + } + out := new(InitContainerEnvObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerEnvParameters) DeepCopyInto(out *InitContainerEnvParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerEnvParameters. +func (in *InitContainerEnvParameters) DeepCopy() *InitContainerEnvParameters { + if in == nil { + return nil + } + out := new(InitContainerEnvParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerInitParameters) DeepCopyInto(out *InitContainerInitParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]InitContainerEnvInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]InitContainerVolumeMountsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerInitParameters. +func (in *InitContainerInitParameters) DeepCopy() *InitContainerInitParameters { + if in == nil { + return nil + } + out := new(InitContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerObservation) DeepCopyInto(out *InitContainerObservation) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]InitContainerEnvObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]InitContainerVolumeMountsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerObservation. +func (in *InitContainerObservation) DeepCopy() *InitContainerObservation { + if in == nil { + return nil + } + out := new(InitContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerParameters) DeepCopyInto(out *InitContainerParameters) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(float64) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]InitContainerEnvParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]InitContainerVolumeMountsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerParameters. +func (in *InitContainerParameters) DeepCopy() *InitContainerParameters { + if in == nil { + return nil + } + out := new(InitContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerVolumeMountsInitParameters) DeepCopyInto(out *InitContainerVolumeMountsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerVolumeMountsInitParameters. +func (in *InitContainerVolumeMountsInitParameters) DeepCopy() *InitContainerVolumeMountsInitParameters { + if in == nil { + return nil + } + out := new(InitContainerVolumeMountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerVolumeMountsObservation) DeepCopyInto(out *InitContainerVolumeMountsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerVolumeMountsObservation. +func (in *InitContainerVolumeMountsObservation) DeepCopy() *InitContainerVolumeMountsObservation { + if in == nil { + return nil + } + out := new(InitContainerVolumeMountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitContainerVolumeMountsParameters) DeepCopyInto(out *InitContainerVolumeMountsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitContainerVolumeMountsParameters. +func (in *InitContainerVolumeMountsParameters) DeepCopy() *InitContainerVolumeMountsParameters { + if in == nil { + return nil + } + out := new(InitContainerVolumeMountsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LivenessProbeInitParameters) DeepCopyInto(out *LivenessProbeInitParameters) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.InitialDelay != nil { + in, out := &in.InitialDelay, &out.InitialDelay + *out = new(float64) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LivenessProbeInitParameters. +func (in *LivenessProbeInitParameters) DeepCopy() *LivenessProbeInitParameters { + if in == nil { + return nil + } + out := new(LivenessProbeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LivenessProbeObservation) DeepCopyInto(out *LivenessProbeObservation) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.InitialDelay != nil { + in, out := &in.InitialDelay, &out.InitialDelay + *out = new(float64) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LivenessProbeObservation. +func (in *LivenessProbeObservation) DeepCopy() *LivenessProbeObservation { + if in == nil { + return nil + } + out := new(LivenessProbeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LivenessProbeParameters) DeepCopyInto(out *LivenessProbeParameters) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.InitialDelay != nil { + in, out := &in.InitialDelay, &out.InitialDelay + *out = new(float64) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LivenessProbeParameters. +func (in *LivenessProbeParameters) DeepCopy() *LivenessProbeParameters { + if in == nil { + return nil + } + out := new(LivenessProbeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadinessProbeHeaderInitParameters) DeepCopyInto(out *ReadinessProbeHeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadinessProbeHeaderInitParameters. +func (in *ReadinessProbeHeaderInitParameters) DeepCopy() *ReadinessProbeHeaderInitParameters { + if in == nil { + return nil + } + out := new(ReadinessProbeHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadinessProbeHeaderObservation) DeepCopyInto(out *ReadinessProbeHeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadinessProbeHeaderObservation. +func (in *ReadinessProbeHeaderObservation) DeepCopy() *ReadinessProbeHeaderObservation { + if in == nil { + return nil + } + out := new(ReadinessProbeHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadinessProbeHeaderParameters) DeepCopyInto(out *ReadinessProbeHeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadinessProbeHeaderParameters. +func (in *ReadinessProbeHeaderParameters) DeepCopy() *ReadinessProbeHeaderParameters { + if in == nil { + return nil + } + out := new(ReadinessProbeHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadinessProbeInitParameters) DeepCopyInto(out *ReadinessProbeInitParameters) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]ReadinessProbeHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SuccessCountThreshold != nil { + in, out := &in.SuccessCountThreshold, &out.SuccessCountThreshold + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadinessProbeInitParameters. +func (in *ReadinessProbeInitParameters) DeepCopy() *ReadinessProbeInitParameters { + if in == nil { + return nil + } + out := new(ReadinessProbeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadinessProbeObservation) DeepCopyInto(out *ReadinessProbeObservation) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]ReadinessProbeHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SuccessCountThreshold != nil { + in, out := &in.SuccessCountThreshold, &out.SuccessCountThreshold + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadinessProbeObservation. +func (in *ReadinessProbeObservation) DeepCopy() *ReadinessProbeObservation { + if in == nil { + return nil + } + out := new(ReadinessProbeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadinessProbeParameters) DeepCopyInto(out *ReadinessProbeParameters) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]ReadinessProbeHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.SuccessCountThreshold != nil { + in, out := &in.SuccessCountThreshold, &out.SuccessCountThreshold + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadinessProbeParameters. +func (in *ReadinessProbeParameters) DeepCopy() *ReadinessProbeParameters { + if in == nil { + return nil + } + out := new(ReadinessProbeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryInitParameters) DeepCopyInto(out *RegistryInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(string) + **out = **in + } + if in.PasswordSecretName != nil { + in, out := &in.PasswordSecretName, &out.PasswordSecretName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryInitParameters. +func (in *RegistryInitParameters) DeepCopy() *RegistryInitParameters { + if in == nil { + return nil + } + out := new(RegistryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryObservation) DeepCopyInto(out *RegistryObservation) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(string) + **out = **in + } + if in.PasswordSecretName != nil { + in, out := &in.PasswordSecretName, &out.PasswordSecretName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryObservation. +func (in *RegistryObservation) DeepCopy() *RegistryObservation { + if in == nil { + return nil + } + out := new(RegistryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryParameters) DeepCopyInto(out *RegistryParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(string) + **out = **in + } + if in.PasswordSecretName != nil { + in, out := &in.PasswordSecretName, &out.PasswordSecretName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryParameters. +func (in *RegistryParameters) DeepCopy() *RegistryParameters { + if in == nil { + return nil + } + out := new(RegistryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretInitParameters) DeepCopyInto(out *SecretInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInitParameters. +func (in *SecretInitParameters) DeepCopy() *SecretInitParameters { + if in == nil { + return nil + } + out := new(SecretInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretObservation) DeepCopyInto(out *SecretObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretObservation. +func (in *SecretObservation) DeepCopy() *SecretObservation { + if in == nil { + return nil + } + out := new(SecretObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretParameters) DeepCopyInto(out *SecretParameters) { + *out = *in + out.NameSecretRef = in.NameSecretRef + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretParameters. +func (in *SecretParameters) DeepCopy() *SecretParameters { + if in == nil { + return nil + } + out := new(SecretParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupProbeHeaderInitParameters) DeepCopyInto(out *StartupProbeHeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupProbeHeaderInitParameters. +func (in *StartupProbeHeaderInitParameters) DeepCopy() *StartupProbeHeaderInitParameters { + if in == nil { + return nil + } + out := new(StartupProbeHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupProbeHeaderObservation) DeepCopyInto(out *StartupProbeHeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupProbeHeaderObservation. +func (in *StartupProbeHeaderObservation) DeepCopy() *StartupProbeHeaderObservation { + if in == nil { + return nil + } + out := new(StartupProbeHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupProbeHeaderParameters) DeepCopyInto(out *StartupProbeHeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupProbeHeaderParameters. +func (in *StartupProbeHeaderParameters) DeepCopy() *StartupProbeHeaderParameters { + if in == nil { + return nil + } + out := new(StartupProbeHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupProbeInitParameters) DeepCopyInto(out *StartupProbeInitParameters) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]StartupProbeHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupProbeInitParameters. +func (in *StartupProbeInitParameters) DeepCopy() *StartupProbeInitParameters { + if in == nil { + return nil + } + out := new(StartupProbeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupProbeObservation) DeepCopyInto(out *StartupProbeObservation) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]StartupProbeHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupProbeObservation. +func (in *StartupProbeObservation) DeepCopy() *StartupProbeObservation { + if in == nil { + return nil + } + out := new(StartupProbeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartupProbeParameters) DeepCopyInto(out *StartupProbeParameters) { + *out = *in + if in.FailureCountThreshold != nil { + in, out := &in.FailureCountThreshold, &out.FailureCountThreshold + *out = new(float64) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]StartupProbeHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartupProbeParameters. +func (in *StartupProbeParameters) DeepCopy() *StartupProbeParameters { + if in == nil { + return nil + } + out := new(StartupProbeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPScaleRuleAuthenticationInitParameters) DeepCopyInto(out *TCPScaleRuleAuthenticationInitParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPScaleRuleAuthenticationInitParameters. +func (in *TCPScaleRuleAuthenticationInitParameters) DeepCopy() *TCPScaleRuleAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(TCPScaleRuleAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPScaleRuleAuthenticationObservation) DeepCopyInto(out *TCPScaleRuleAuthenticationObservation) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPScaleRuleAuthenticationObservation. +func (in *TCPScaleRuleAuthenticationObservation) DeepCopy() *TCPScaleRuleAuthenticationObservation { + if in == nil { + return nil + } + out := new(TCPScaleRuleAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPScaleRuleAuthenticationParameters) DeepCopyInto(out *TCPScaleRuleAuthenticationParameters) { + *out = *in + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.TriggerParameter != nil { + in, out := &in.TriggerParameter, &out.TriggerParameter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPScaleRuleAuthenticationParameters. +func (in *TCPScaleRuleAuthenticationParameters) DeepCopy() *TCPScaleRuleAuthenticationParameters { + if in == nil { + return nil + } + out := new(TCPScaleRuleAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPScaleRuleInitParameters) DeepCopyInto(out *TCPScaleRuleInitParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]TCPScaleRuleAuthenticationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConcurrentRequests != nil { + in, out := &in.ConcurrentRequests, &out.ConcurrentRequests + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPScaleRuleInitParameters. +func (in *TCPScaleRuleInitParameters) DeepCopy() *TCPScaleRuleInitParameters { + if in == nil { + return nil + } + out := new(TCPScaleRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPScaleRuleObservation) DeepCopyInto(out *TCPScaleRuleObservation) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]TCPScaleRuleAuthenticationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConcurrentRequests != nil { + in, out := &in.ConcurrentRequests, &out.ConcurrentRequests + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPScaleRuleObservation. +func (in *TCPScaleRuleObservation) DeepCopy() *TCPScaleRuleObservation { + if in == nil { + return nil + } + out := new(TCPScaleRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPScaleRuleParameters) DeepCopyInto(out *TCPScaleRuleParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]TCPScaleRuleAuthenticationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConcurrentRequests != nil { + in, out := &in.ConcurrentRequests, &out.ConcurrentRequests + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPScaleRuleParameters. +func (in *TCPScaleRuleParameters) DeepCopy() *TCPScaleRuleParameters { + if in == nil { + return nil + } + out := new(TCPScaleRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInitParameters) DeepCopyInto(out *TemplateInitParameters) { + *out = *in + if in.AzureQueueScaleRule != nil { + in, out := &in.AzureQueueScaleRule, &out.AzureQueueScaleRule + *out = make([]AzureQueueScaleRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomScaleRule != nil { + in, out := &in.CustomScaleRule, &out.CustomScaleRule + *out = make([]CustomScaleRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPScaleRule != nil { + in, out := &in.HTTPScaleRule, &out.HTTPScaleRule + *out = make([]HTTPScaleRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainer != nil { + in, out := &in.InitContainer, &out.InitContainer + *out = make([]InitContainerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxReplicas != nil { + in, out := &in.MaxReplicas, &out.MaxReplicas + *out = new(float64) + **out = **in + } + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(float64) + **out = **in + } + if in.RevisionSuffix != nil { + in, out := &in.RevisionSuffix, &out.RevisionSuffix + *out = new(string) + **out = **in + } + if in.TCPScaleRule != nil { + in, out := &in.TCPScaleRule, &out.TCPScaleRule + *out = make([]TCPScaleRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = make([]VolumeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInitParameters. +func (in *TemplateInitParameters) DeepCopy() *TemplateInitParameters { + if in == nil { + return nil + } + out := new(TemplateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateObservation) DeepCopyInto(out *TemplateObservation) { + *out = *in + if in.AzureQueueScaleRule != nil { + in, out := &in.AzureQueueScaleRule, &out.AzureQueueScaleRule + *out = make([]AzureQueueScaleRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomScaleRule != nil { + in, out := &in.CustomScaleRule, &out.CustomScaleRule + *out = make([]CustomScaleRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPScaleRule != nil { + in, out := &in.HTTPScaleRule, &out.HTTPScaleRule + *out = make([]HTTPScaleRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainer != nil { + in, out := &in.InitContainer, &out.InitContainer + *out = make([]InitContainerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxReplicas != nil { + in, out := &in.MaxReplicas, &out.MaxReplicas + *out = new(float64) + **out = **in + } + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(float64) + **out = **in + } + if in.RevisionSuffix != nil { + in, out := &in.RevisionSuffix, &out.RevisionSuffix + *out = new(string) + **out = **in + } + if in.TCPScaleRule != nil { + in, out := &in.TCPScaleRule, &out.TCPScaleRule + *out = make([]TCPScaleRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = make([]VolumeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateObservation. +func (in *TemplateObservation) DeepCopy() *TemplateObservation { + if in == nil { + return nil + } + out := new(TemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateParameters) DeepCopyInto(out *TemplateParameters) { + *out = *in + if in.AzureQueueScaleRule != nil { + in, out := &in.AzureQueueScaleRule, &out.AzureQueueScaleRule + *out = make([]AzureQueueScaleRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = make([]ContainerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomScaleRule != nil { + in, out := &in.CustomScaleRule, &out.CustomScaleRule + *out = make([]CustomScaleRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPScaleRule != nil { + in, out := &in.HTTPScaleRule, &out.HTTPScaleRule + *out = make([]HTTPScaleRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainer != nil { + in, out := &in.InitContainer, &out.InitContainer + *out = make([]InitContainerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxReplicas != nil { + in, out := &in.MaxReplicas, &out.MaxReplicas + *out = new(float64) + **out = **in + } + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(float64) + **out = **in + } + if in.RevisionSuffix != nil { + in, out := &in.RevisionSuffix, &out.RevisionSuffix + *out = new(string) + **out = **in + } + if in.TCPScaleRule != nil { + in, out := &in.TCPScaleRule, &out.TCPScaleRule + *out = make([]TCPScaleRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = make([]VolumeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateParameters. +func (in *TemplateParameters) DeepCopy() *TemplateParameters { + if in == nil { + return nil + } + out := new(TemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficWeightInitParameters) DeepCopyInto(out *TrafficWeightInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.LatestRevision != nil { + in, out := &in.LatestRevision, &out.LatestRevision + *out = new(bool) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } + if in.RevisionSuffix != nil { + in, out := &in.RevisionSuffix, &out.RevisionSuffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficWeightInitParameters. +func (in *TrafficWeightInitParameters) DeepCopy() *TrafficWeightInitParameters { + if in == nil { + return nil + } + out := new(TrafficWeightInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficWeightObservation) DeepCopyInto(out *TrafficWeightObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.LatestRevision != nil { + in, out := &in.LatestRevision, &out.LatestRevision + *out = new(bool) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } + if in.RevisionSuffix != nil { + in, out := &in.RevisionSuffix, &out.RevisionSuffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficWeightObservation. +func (in *TrafficWeightObservation) DeepCopy() *TrafficWeightObservation { + if in == nil { + return nil + } + out := new(TrafficWeightObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficWeightParameters) DeepCopyInto(out *TrafficWeightParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.LatestRevision != nil { + in, out := &in.LatestRevision, &out.LatestRevision + *out = new(bool) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(float64) + **out = **in + } + if in.RevisionSuffix != nil { + in, out := &in.RevisionSuffix, &out.RevisionSuffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficWeightParameters. +func (in *TrafficWeightParameters) DeepCopy() *TrafficWeightParameters { + if in == nil { + return nil + } + out := new(TrafficWeightParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeInitParameters) DeepCopyInto(out *VolumeInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageName != nil { + in, out := &in.StorageName, &out.StorageName + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeInitParameters. +func (in *VolumeInitParameters) DeepCopy() *VolumeInitParameters { + if in == nil { + return nil + } + out := new(VolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountsInitParameters) DeepCopyInto(out *VolumeMountsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountsInitParameters. +func (in *VolumeMountsInitParameters) DeepCopy() *VolumeMountsInitParameters { + if in == nil { + return nil + } + out := new(VolumeMountsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountsObservation) DeepCopyInto(out *VolumeMountsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountsObservation. +func (in *VolumeMountsObservation) DeepCopy() *VolumeMountsObservation { + if in == nil { + return nil + } + out := new(VolumeMountsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountsParameters) DeepCopyInto(out *VolumeMountsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountsParameters. +func (in *VolumeMountsParameters) DeepCopy() *VolumeMountsParameters { + if in == nil { + return nil + } + out := new(VolumeMountsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeObservation) DeepCopyInto(out *VolumeObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageName != nil { + in, out := &in.StorageName, &out.StorageName + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeObservation. +func (in *VolumeObservation) DeepCopy() *VolumeObservation { + if in == nil { + return nil + } + out := new(VolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeParameters) DeepCopyInto(out *VolumeParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageName != nil { + in, out := &in.StorageName, &out.StorageName + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeParameters. +func (in *VolumeParameters) DeepCopy() *VolumeParameters { + if in == nil { + return nil + } + out := new(VolumeParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/containerapp/v1beta2/zz_generated.managed.go b/apis/containerapp/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..f5882b3aa --- /dev/null +++ b/apis/containerapp/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ContainerApp. +func (mg *ContainerApp) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ContainerApp. +func (mg *ContainerApp) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ContainerApp. +func (mg *ContainerApp) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ContainerApp. +func (mg *ContainerApp) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ContainerApp. +func (mg *ContainerApp) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ContainerApp. +func (mg *ContainerApp) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ContainerApp. +func (mg *ContainerApp) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ContainerApp. +func (mg *ContainerApp) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ContainerApp. +func (mg *ContainerApp) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ContainerApp. +func (mg *ContainerApp) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ContainerApp. +func (mg *ContainerApp) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ContainerApp. +func (mg *ContainerApp) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/containerapp/v1beta2/zz_generated.managedlist.go b/apis/containerapp/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..b8a34eb05 --- /dev/null +++ b/apis/containerapp/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ContainerAppList. +func (l *ContainerAppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/containerapp/v1beta2/zz_generated.resolvers.go b/apis/containerapp/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..d4c180f34 --- /dev/null +++ b/apis/containerapp/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,87 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ContainerApp) ResolveReferences( // ResolveReferences of this ContainerApp. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("containerapp.azure.upbound.io", "v1beta1", "Environment", "EnvironmentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ContainerAppEnvironmentID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ContainerAppEnvironmentIDRef, + Selector: mg.Spec.ForProvider.ContainerAppEnvironmentIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ContainerAppEnvironmentID") + } + mg.Spec.ForProvider.ContainerAppEnvironmentID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ContainerAppEnvironmentIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("containerapp.azure.upbound.io", "v1beta1", "Environment", "EnvironmentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ContainerAppEnvironmentID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ContainerAppEnvironmentIDRef, + Selector: mg.Spec.InitProvider.ContainerAppEnvironmentIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ContainerAppEnvironmentID") + } + mg.Spec.InitProvider.ContainerAppEnvironmentID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ContainerAppEnvironmentIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/containerapp/v1beta2/zz_groupversion_info.go b/apis/containerapp/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..eea338bb8 --- /dev/null +++ b/apis/containerapp/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=containerapp.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "containerapp.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/containerregistry/v1beta1/zz_agentpool_types.go b/apis/containerregistry/v1beta1/zz_agentpool_types.go index c1a8cc9c0..cdb2d3127 100755 --- a/apis/containerregistry/v1beta1/zz_agentpool_types.go +++ b/apis/containerregistry/v1beta1/zz_agentpool_types.go @@ -29,7 +29,7 @@ type AgentPoolInitParameters struct { Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` // The ID of the Virtual Network Subnet Resource where the agent machines will be running. Changing this forces a new Azure Container Registry Agent Pool to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` @@ -73,7 +73,7 @@ type AgentPoolObservation struct { type AgentPoolParameters struct { // Name of Azure Container Registry to create an Agent Pool for. Changing this forces a new Azure Container Registry Agent Pool to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Registry + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta2.Registry // +kubebuilder:validation:Optional ContainerRegistryName *string `json:"containerRegistryName,omitempty" tf:"container_registry_name,omitempty"` @@ -116,7 +116,7 @@ type AgentPoolParameters struct { Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` // The ID of the Virtual Network Subnet Resource where the agent machines will be running. Changing this forces a new Azure Container Registry Agent Pool to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` diff --git a/apis/containerregistry/v1beta1/zz_containerconnectedregistry_types.go b/apis/containerregistry/v1beta1/zz_containerconnectedregistry_types.go index 2df6131de..9517937fe 100755 --- a/apis/containerregistry/v1beta1/zz_containerconnectedregistry_types.go +++ b/apis/containerregistry/v1beta1/zz_containerconnectedregistry_types.go @@ -22,7 +22,7 @@ type ContainerConnectedRegistryInitParameters struct { ClientTokenIds []*string `json:"clientTokenIds,omitempty" tf:"client_token_ids,omitempty"` // The ID of the Container Registry that this Connected Registry will reside in. Changing this forces a new Container Connected Registry to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Registry + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta2.Registry // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() ContainerRegistryID *string `json:"containerRegistryId,omitempty" tf:"container_registry_id,omitempty"` @@ -119,7 +119,7 @@ type ContainerConnectedRegistryParameters struct { ClientTokenIds []*string `json:"clientTokenIds,omitempty" tf:"client_token_ids,omitempty"` // The ID of the Container Registry that this Connected Registry will reside in. Changing this forces a new Container Connected Registry to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Registry + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta2.Registry // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ContainerRegistryID *string `json:"containerRegistryId,omitempty" tf:"container_registry_id,omitempty"` diff --git a/apis/containerregistry/v1beta1/zz_generated.conversion_hubs.go b/apis/containerregistry/v1beta1/zz_generated.conversion_hubs.go index d4aaa99d9..6ced9f333 100755 --- a/apis/containerregistry/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/containerregistry/v1beta1/zz_generated.conversion_hubs.go @@ -7,13 +7,10 @@ package v1beta1 // Hub marks this type as a conversion hub. -func (tr *ContainerConnectedRegistry) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Registry) Hub() {} +func (tr *AgentPool) Hub() {} // Hub marks this type as a conversion hub. -func (tr *AgentPool) Hub() {} +func (tr *ContainerConnectedRegistry) Hub() {} // Hub marks this type as a conversion hub. func (tr *ScopeMap) Hub() {} @@ -21,8 +18,5 @@ func (tr *ScopeMap) Hub() {} // Hub marks this type as a conversion hub. func (tr *Token) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *TokenPassword) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Webhook) Hub() {} diff --git a/apis/containerregistry/v1beta1/zz_generated.conversion_spokes.go b/apis/containerregistry/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..009ae7868 --- /dev/null +++ b/apis/containerregistry/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Registry to the hub type. +func (tr *Registry) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Registry type. +func (tr *Registry) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this TokenPassword to the hub type. +func (tr *TokenPassword) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the TokenPassword type. +func (tr *TokenPassword) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/containerregistry/v1beta1/zz_generated.resolvers.go b/apis/containerregistry/v1beta1/zz_generated.resolvers.go index 725ac50ed..a88474b9b 100644 --- a/apis/containerregistry/v1beta1/zz_generated.resolvers.go +++ b/apis/containerregistry/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *AgentPool) ResolveReferences( // ResolveReferences of this AgentPool. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Registry", "RegistryList") + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta2", "Registry", "RegistryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -65,7 +65,7 @@ func (mg *AgentPool) ResolveReferences( // ResolveReferences of this AgentPool. mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -84,7 +84,7 @@ func (mg *AgentPool) ResolveReferences( // ResolveReferences of this AgentPool. mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -115,7 +115,7 @@ func (mg *ContainerConnectedRegistry) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Registry", "RegistryList") + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta2", "Registry", "RegistryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -153,7 +153,7 @@ func (mg *ContainerConnectedRegistry) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.SyncTokenID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SyncTokenIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Registry", "RegistryList") + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta2", "Registry", "RegistryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -323,7 +323,7 @@ func (mg *ScopeMap) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Registry", "RegistryList") + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta2", "Registry", "RegistryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -373,7 +373,7 @@ func (mg *Token) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Registry", "RegistryList") + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta2", "Registry", "RegistryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -511,7 +511,7 @@ func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Registry", "RegistryList") + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta2", "Registry", "RegistryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -549,7 +549,7 @@ func (mg *Webhook) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Registry", "RegistryList") + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta2", "Registry", "RegistryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/containerregistry/v1beta1/zz_scopemap_types.go b/apis/containerregistry/v1beta1/zz_scopemap_types.go index d0662a481..ebd1b314d 100755 --- a/apis/containerregistry/v1beta1/zz_scopemap_types.go +++ b/apis/containerregistry/v1beta1/zz_scopemap_types.go @@ -47,7 +47,7 @@ type ScopeMapParameters struct { Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` // The name of the Container Registry. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Registry + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta2.Registry // +kubebuilder:validation:Optional ContainerRegistryName *string `json:"containerRegistryName,omitempty" tf:"container_registry_name,omitempty"` diff --git a/apis/containerregistry/v1beta1/zz_token_types.go b/apis/containerregistry/v1beta1/zz_token_types.go index 83e40c129..6d8d8c6a1 100755 --- a/apis/containerregistry/v1beta1/zz_token_types.go +++ b/apis/containerregistry/v1beta1/zz_token_types.go @@ -53,7 +53,7 @@ type TokenObservation struct { type TokenParameters struct { // The name of the Container Registry. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Registry + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta2.Registry // +kubebuilder:validation:Optional ContainerRegistryName *string `json:"containerRegistryName,omitempty" tf:"container_registry_name,omitempty"` diff --git a/apis/containerregistry/v1beta1/zz_webhook_types.go b/apis/containerregistry/v1beta1/zz_webhook_types.go index ea7580a94..b01b813bd 100755 --- a/apis/containerregistry/v1beta1/zz_webhook_types.go +++ b/apis/containerregistry/v1beta1/zz_webhook_types.go @@ -27,7 +27,7 @@ type WebhookInitParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // The Name of Container registry this Webhook belongs to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Registry + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta2.Registry RegistryName *string `json:"registryName,omitempty" tf:"registry_name,omitempty"` // Reference to a Registry in containerregistry to populate registryName. @@ -105,7 +105,7 @@ type WebhookParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // The Name of Container registry this Webhook belongs to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Registry + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta2.Registry // +kubebuilder:validation:Optional RegistryName *string `json:"registryName,omitempty" tf:"registry_name,omitempty"` diff --git a/apis/containerregistry/v1beta2/zz_generated.conversion_hubs.go b/apis/containerregistry/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..45aaca38f --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Registry) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TokenPassword) Hub() {} diff --git a/apis/containerregistry/v1beta2/zz_generated.deepcopy.go b/apis/containerregistry/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..7af4c1cfa --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1593 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.IdentityClientIDRef != nil { + in, out := &in.IdentityClientIDRef, &out.IdentityClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IdentityClientIDSelector != nil { + in, out := &in.IdentityClientIDSelector, &out.IdentityClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IdentityClientID != nil { + in, out := &in.IdentityClientID, &out.IdentityClientID + *out = new(string) + **out = **in + } + if in.IdentityClientIDRef != nil { + in, out := &in.IdentityClientIDRef, &out.IdentityClientIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IdentityClientIDSelector != nil { + in, out := &in.IdentityClientIDSelector, &out.IdentityClientIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoreplicationsInitParameters) DeepCopyInto(out *GeoreplicationsInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.RegionalEndpointEnabled != nil { + in, out := &in.RegionalEndpointEnabled, &out.RegionalEndpointEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundancyEnabled != nil { + in, out := &in.ZoneRedundancyEnabled, &out.ZoneRedundancyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoreplicationsInitParameters. +func (in *GeoreplicationsInitParameters) DeepCopy() *GeoreplicationsInitParameters { + if in == nil { + return nil + } + out := new(GeoreplicationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoreplicationsObservation) DeepCopyInto(out *GeoreplicationsObservation) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.RegionalEndpointEnabled != nil { + in, out := &in.RegionalEndpointEnabled, &out.RegionalEndpointEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundancyEnabled != nil { + in, out := &in.ZoneRedundancyEnabled, &out.ZoneRedundancyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoreplicationsObservation. +func (in *GeoreplicationsObservation) DeepCopy() *GeoreplicationsObservation { + if in == nil { + return nil + } + out := new(GeoreplicationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoreplicationsParameters) DeepCopyInto(out *GeoreplicationsParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.RegionalEndpointEnabled != nil { + in, out := &in.RegionalEndpointEnabled, &out.RegionalEndpointEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundancyEnabled != nil { + in, out := &in.ZoneRedundancyEnabled, &out.ZoneRedundancyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoreplicationsParameters. +func (in *GeoreplicationsParameters) DeepCopy() *GeoreplicationsParameters { + if in == nil { + return nil + } + out := new(GeoreplicationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleInitParameters) DeepCopyInto(out *IPRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPRange != nil { + in, out := &in.IPRange, &out.IPRange + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleInitParameters. +func (in *IPRuleInitParameters) DeepCopy() *IPRuleInitParameters { + if in == nil { + return nil + } + out := new(IPRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleObservation) DeepCopyInto(out *IPRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPRange != nil { + in, out := &in.IPRange, &out.IPRange + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleObservation. +func (in *IPRuleObservation) DeepCopy() *IPRuleObservation { + if in == nil { + return nil + } + out := new(IPRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleParameters) DeepCopyInto(out *IPRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPRange != nil { + in, out := &in.IPRange, &out.IPRange + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleParameters. +func (in *IPRuleParameters) DeepCopy() *IPRuleParameters { + if in == nil { + return nil + } + out := new(IPRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetInitParameters) DeepCopyInto(out *NetworkRuleSetInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualNetwork != nil { + in, out := &in.VirtualNetwork, &out.VirtualNetwork + *out = make([]VirtualNetworkInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetInitParameters. +func (in *NetworkRuleSetInitParameters) DeepCopy() *NetworkRuleSetInitParameters { + if in == nil { + return nil + } + out := new(NetworkRuleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetObservation) DeepCopyInto(out *NetworkRuleSetObservation) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualNetwork != nil { + in, out := &in.VirtualNetwork, &out.VirtualNetwork + *out = make([]VirtualNetworkObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetObservation. +func (in *NetworkRuleSetObservation) DeepCopy() *NetworkRuleSetObservation { + if in == nil { + return nil + } + out := new(NetworkRuleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetParameters) DeepCopyInto(out *NetworkRuleSetParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualNetwork != nil { + in, out := &in.VirtualNetwork, &out.VirtualNetwork + *out = make([]VirtualNetworkParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetParameters. +func (in *NetworkRuleSetParameters) DeepCopy() *NetworkRuleSetParameters { + if in == nil { + return nil + } + out := new(NetworkRuleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Password1InitParameters) DeepCopyInto(out *Password1InitParameters) { + *out = *in + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Password1InitParameters. +func (in *Password1InitParameters) DeepCopy() *Password1InitParameters { + if in == nil { + return nil + } + out := new(Password1InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Password1Observation) DeepCopyInto(out *Password1Observation) { + *out = *in + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Password1Observation. +func (in *Password1Observation) DeepCopy() *Password1Observation { + if in == nil { + return nil + } + out := new(Password1Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Password1Parameters) DeepCopyInto(out *Password1Parameters) { + *out = *in + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Password1Parameters. +func (in *Password1Parameters) DeepCopy() *Password1Parameters { + if in == nil { + return nil + } + out := new(Password1Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Password2InitParameters) DeepCopyInto(out *Password2InitParameters) { + *out = *in + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Password2InitParameters. +func (in *Password2InitParameters) DeepCopy() *Password2InitParameters { + if in == nil { + return nil + } + out := new(Password2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Password2Observation) DeepCopyInto(out *Password2Observation) { + *out = *in + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Password2Observation. +func (in *Password2Observation) DeepCopy() *Password2Observation { + if in == nil { + return nil + } + out := new(Password2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Password2Parameters) DeepCopyInto(out *Password2Parameters) { + *out = *in + if in.Expiry != nil { + in, out := &in.Expiry, &out.Expiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Password2Parameters. +func (in *Password2Parameters) DeepCopy() *Password2Parameters { + if in == nil { + return nil + } + out := new(Password2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Registry) DeepCopyInto(out *Registry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Registry. +func (in *Registry) DeepCopy() *Registry { + if in == nil { + return nil + } + out := new(Registry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Registry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryInitParameters) DeepCopyInto(out *RegistryInitParameters) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.AnonymousPullEnabled != nil { + in, out := &in.AnonymousPullEnabled, &out.AnonymousPullEnabled + *out = new(bool) + **out = **in + } + if in.DataEndpointEnabled != nil { + in, out := &in.DataEndpointEnabled, &out.DataEndpointEnabled + *out = new(bool) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = make([]EncryptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExportPolicyEnabled != nil { + in, out := &in.ExportPolicyEnabled, &out.ExportPolicyEnabled + *out = new(bool) + **out = **in + } + if in.Georeplications != nil { + in, out := &in.Georeplications, &out.Georeplications + *out = make([]GeoreplicationsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkRuleBypassOption != nil { + in, out := &in.NetworkRuleBypassOption, &out.NetworkRuleBypassOption + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = make([]NetworkRuleSetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QuarantinePolicyEnabled != nil { + in, out := &in.QuarantinePolicyEnabled, &out.QuarantinePolicyEnabled + *out = new(bool) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = make([]RetentionPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustPolicy != nil { + in, out := &in.TrustPolicy, &out.TrustPolicy + *out = make([]TrustPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ZoneRedundancyEnabled != nil { + in, out := &in.ZoneRedundancyEnabled, &out.ZoneRedundancyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryInitParameters. +func (in *RegistryInitParameters) DeepCopy() *RegistryInitParameters { + if in == nil { + return nil + } + out := new(RegistryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryList) DeepCopyInto(out *RegistryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Registry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryList. +func (in *RegistryList) DeepCopy() *RegistryList { + if in == nil { + return nil + } + out := new(RegistryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RegistryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryObservation) DeepCopyInto(out *RegistryObservation) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.AnonymousPullEnabled != nil { + in, out := &in.AnonymousPullEnabled, &out.AnonymousPullEnabled + *out = new(bool) + **out = **in + } + if in.DataEndpointEnabled != nil { + in, out := &in.DataEndpointEnabled, &out.DataEndpointEnabled + *out = new(bool) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = make([]EncryptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExportPolicyEnabled != nil { + in, out := &in.ExportPolicyEnabled, &out.ExportPolicyEnabled + *out = new(bool) + **out = **in + } + if in.Georeplications != nil { + in, out := &in.Georeplications, &out.Georeplications + *out = make([]GeoreplicationsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LoginServer != nil { + in, out := &in.LoginServer, &out.LoginServer + *out = new(string) + **out = **in + } + if in.NetworkRuleBypassOption != nil { + in, out := &in.NetworkRuleBypassOption, &out.NetworkRuleBypassOption + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = make([]NetworkRuleSetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QuarantinePolicyEnabled != nil { + in, out := &in.QuarantinePolicyEnabled, &out.QuarantinePolicyEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = make([]RetentionPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustPolicy != nil { + in, out := &in.TrustPolicy, &out.TrustPolicy + *out = make([]TrustPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ZoneRedundancyEnabled != nil { + in, out := &in.ZoneRedundancyEnabled, &out.ZoneRedundancyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryObservation. +func (in *RegistryObservation) DeepCopy() *RegistryObservation { + if in == nil { + return nil + } + out := new(RegistryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryParameters) DeepCopyInto(out *RegistryParameters) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.AnonymousPullEnabled != nil { + in, out := &in.AnonymousPullEnabled, &out.AnonymousPullEnabled + *out = new(bool) + **out = **in + } + if in.DataEndpointEnabled != nil { + in, out := &in.DataEndpointEnabled, &out.DataEndpointEnabled + *out = new(bool) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = make([]EncryptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExportPolicyEnabled != nil { + in, out := &in.ExportPolicyEnabled, &out.ExportPolicyEnabled + *out = new(bool) + **out = **in + } + if in.Georeplications != nil { + in, out := &in.Georeplications, &out.Georeplications + *out = make([]GeoreplicationsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkRuleBypassOption != nil { + in, out := &in.NetworkRuleBypassOption, &out.NetworkRuleBypassOption + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = make([]NetworkRuleSetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QuarantinePolicyEnabled != nil { + in, out := &in.QuarantinePolicyEnabled, &out.QuarantinePolicyEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = make([]RetentionPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustPolicy != nil { + in, out := &in.TrustPolicy, &out.TrustPolicy + *out = make([]TrustPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ZoneRedundancyEnabled != nil { + in, out := &in.ZoneRedundancyEnabled, &out.ZoneRedundancyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryParameters. +func (in *RegistryParameters) DeepCopy() *RegistryParameters { + if in == nil { + return nil + } + out := new(RegistryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrySpec) DeepCopyInto(out *RegistrySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySpec. +func (in *RegistrySpec) DeepCopy() *RegistrySpec { + if in == nil { + return nil + } + out := new(RegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryStatus) DeepCopyInto(out *RegistryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryStatus. +func (in *RegistryStatus) DeepCopy() *RegistryStatus { + if in == nil { + return nil + } + out := new(RegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyInitParameters) DeepCopyInto(out *RetentionPolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyInitParameters. +func (in *RetentionPolicyInitParameters) DeepCopy() *RetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyObservation) DeepCopyInto(out *RetentionPolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyObservation. +func (in *RetentionPolicyObservation) DeepCopy() *RetentionPolicyObservation { + if in == nil { + return nil + } + out := new(RetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyParameters) DeepCopyInto(out *RetentionPolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyParameters. +func (in *RetentionPolicyParameters) DeepCopy() *RetentionPolicyParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenPassword) DeepCopyInto(out *TokenPassword) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenPassword. +func (in *TokenPassword) DeepCopy() *TokenPassword { + if in == nil { + return nil + } + out := new(TokenPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenPassword) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenPasswordInitParameters) DeepCopyInto(out *TokenPasswordInitParameters) { + *out = *in + if in.ContainerRegistryTokenID != nil { + in, out := &in.ContainerRegistryTokenID, &out.ContainerRegistryTokenID + *out = new(string) + **out = **in + } + if in.ContainerRegistryTokenIDRef != nil { + in, out := &in.ContainerRegistryTokenIDRef, &out.ContainerRegistryTokenIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryTokenIDSelector != nil { + in, out := &in.ContainerRegistryTokenIDSelector, &out.ContainerRegistryTokenIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Password1 != nil { + in, out := &in.Password1, &out.Password1 + *out = new(Password1InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Password2 != nil { + in, out := &in.Password2, &out.Password2 + *out = new(Password2InitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenPasswordInitParameters. +func (in *TokenPasswordInitParameters) DeepCopy() *TokenPasswordInitParameters { + if in == nil { + return nil + } + out := new(TokenPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenPasswordList) DeepCopyInto(out *TokenPasswordList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TokenPassword, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenPasswordList. +func (in *TokenPasswordList) DeepCopy() *TokenPasswordList { + if in == nil { + return nil + } + out := new(TokenPasswordList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenPasswordList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenPasswordObservation) DeepCopyInto(out *TokenPasswordObservation) { + *out = *in + if in.ContainerRegistryTokenID != nil { + in, out := &in.ContainerRegistryTokenID, &out.ContainerRegistryTokenID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Password1 != nil { + in, out := &in.Password1, &out.Password1 + *out = new(Password1Observation) + (*in).DeepCopyInto(*out) + } + if in.Password2 != nil { + in, out := &in.Password2, &out.Password2 + *out = new(Password2Observation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenPasswordObservation. +func (in *TokenPasswordObservation) DeepCopy() *TokenPasswordObservation { + if in == nil { + return nil + } + out := new(TokenPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenPasswordParameters) DeepCopyInto(out *TokenPasswordParameters) { + *out = *in + if in.ContainerRegistryTokenID != nil { + in, out := &in.ContainerRegistryTokenID, &out.ContainerRegistryTokenID + *out = new(string) + **out = **in + } + if in.ContainerRegistryTokenIDRef != nil { + in, out := &in.ContainerRegistryTokenIDRef, &out.ContainerRegistryTokenIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryTokenIDSelector != nil { + in, out := &in.ContainerRegistryTokenIDSelector, &out.ContainerRegistryTokenIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Password1 != nil { + in, out := &in.Password1, &out.Password1 + *out = new(Password1Parameters) + (*in).DeepCopyInto(*out) + } + if in.Password2 != nil { + in, out := &in.Password2, &out.Password2 + *out = new(Password2Parameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenPasswordParameters. +func (in *TokenPasswordParameters) DeepCopy() *TokenPasswordParameters { + if in == nil { + return nil + } + out := new(TokenPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenPasswordSpec) DeepCopyInto(out *TokenPasswordSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenPasswordSpec. +func (in *TokenPasswordSpec) DeepCopy() *TokenPasswordSpec { + if in == nil { + return nil + } + out := new(TokenPasswordSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenPasswordStatus) DeepCopyInto(out *TokenPasswordStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenPasswordStatus. +func (in *TokenPasswordStatus) DeepCopy() *TokenPasswordStatus { + if in == nil { + return nil + } + out := new(TokenPasswordStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustPolicyInitParameters) DeepCopyInto(out *TrustPolicyInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustPolicyInitParameters. +func (in *TrustPolicyInitParameters) DeepCopy() *TrustPolicyInitParameters { + if in == nil { + return nil + } + out := new(TrustPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustPolicyObservation) DeepCopyInto(out *TrustPolicyObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustPolicyObservation. +func (in *TrustPolicyObservation) DeepCopy() *TrustPolicyObservation { + if in == nil { + return nil + } + out := new(TrustPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustPolicyParameters) DeepCopyInto(out *TrustPolicyParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustPolicyParameters. +func (in *TrustPolicyParameters) DeepCopy() *TrustPolicyParameters { + if in == nil { + return nil + } + out := new(TrustPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkInitParameters) DeepCopyInto(out *VirtualNetworkInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkInitParameters. +func (in *VirtualNetworkInitParameters) DeepCopy() *VirtualNetworkInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkObservation) DeepCopyInto(out *VirtualNetworkObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkObservation. +func (in *VirtualNetworkObservation) DeepCopy() *VirtualNetworkObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkParameters) DeepCopyInto(out *VirtualNetworkParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkParameters. +func (in *VirtualNetworkParameters) DeepCopy() *VirtualNetworkParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/containerregistry/v1beta2/zz_generated.managed.go b/apis/containerregistry/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..aef25f534 --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Registry. +func (mg *Registry) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Registry. +func (mg *Registry) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Registry. +func (mg *Registry) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Registry. +func (mg *Registry) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Registry. +func (mg *Registry) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Registry. +func (mg *Registry) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Registry. +func (mg *Registry) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Registry. +func (mg *Registry) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Registry. +func (mg *Registry) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Registry. +func (mg *Registry) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Registry. +func (mg *Registry) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Registry. +func (mg *Registry) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TokenPassword. +func (mg *TokenPassword) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TokenPassword. +func (mg *TokenPassword) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TokenPassword. +func (mg *TokenPassword) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TokenPassword. +func (mg *TokenPassword) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TokenPassword. +func (mg *TokenPassword) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TokenPassword. +func (mg *TokenPassword) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TokenPassword. +func (mg *TokenPassword) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TokenPassword. +func (mg *TokenPassword) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TokenPassword. +func (mg *TokenPassword) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TokenPassword. +func (mg *TokenPassword) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TokenPassword. +func (mg *TokenPassword) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TokenPassword. +func (mg *TokenPassword) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/containerregistry/v1beta2/zz_generated.managedlist.go b/apis/containerregistry/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..f30f5d383 --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this RegistryList. +func (l *RegistryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TokenPasswordList. +func (l *TokenPasswordList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/containerregistry/v1beta2/zz_generated.resolvers.go b/apis/containerregistry/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..6328629ed --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,190 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Registry. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Registry) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Encryption); i3++ { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Encryption[i3].IdentityClientID), + Extract: resource.ExtractParamPath("client_id", true), + Reference: mg.Spec.ForProvider.Encryption[i3].IdentityClientIDRef, + Selector: mg.Spec.ForProvider.Encryption[i3].IdentityClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Encryption[i3].IdentityClientID") + } + mg.Spec.ForProvider.Encryption[i3].IdentityClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Encryption[i3].IdentityClientIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkRuleSet); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.NetworkRuleSet[i3].VirtualNetwork); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetID") + } + mg.Spec.ForProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Encryption); i3++ { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Encryption[i3].IdentityClientID), + Extract: resource.ExtractParamPath("client_id", true), + Reference: mg.Spec.InitProvider.Encryption[i3].IdentityClientIDRef, + Selector: mg.Spec.InitProvider.Encryption[i3].IdentityClientIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Encryption[i3].IdentityClientID") + } + mg.Spec.InitProvider.Encryption[i3].IdentityClientID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Encryption[i3].IdentityClientIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkRuleSet); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.NetworkRuleSet[i3].VirtualNetwork); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetID") + } + mg.Spec.InitProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkRuleSet[i3].VirtualNetwork[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this TokenPassword. +func (mg *TokenPassword) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Token", "TokenList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ContainerRegistryTokenID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ContainerRegistryTokenIDRef, + Selector: mg.Spec.ForProvider.ContainerRegistryTokenIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ContainerRegistryTokenID") + } + mg.Spec.ForProvider.ContainerRegistryTokenID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ContainerRegistryTokenIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("containerregistry.azure.upbound.io", "v1beta1", "Token", "TokenList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ContainerRegistryTokenID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ContainerRegistryTokenIDRef, + Selector: mg.Spec.InitProvider.ContainerRegistryTokenIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ContainerRegistryTokenID") + } + mg.Spec.InitProvider.ContainerRegistryTokenID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ContainerRegistryTokenIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/containerregistry/v1beta2/zz_groupversion_info.go b/apis/containerregistry/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..3823daf71 --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=containerregistry.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "containerregistry.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/containerregistry/v1beta2/zz_registry_terraformed.go b/apis/containerregistry/v1beta2/zz_registry_terraformed.go new file mode 100755 index 000000000..aa611dc03 --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_registry_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Registry +func (mg *Registry) GetTerraformResourceType() string { + return "azurerm_container_registry" +} + +// GetConnectionDetailsMapping for this Registry +func (tr *Registry) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"admin_password": "status.atProvider.adminPassword"} +} + +// GetObservation of this Registry +func (tr *Registry) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Registry +func (tr *Registry) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Registry +func (tr *Registry) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Registry +func (tr *Registry) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Registry +func (tr *Registry) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Registry +func (tr *Registry) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Registry +func (tr *Registry) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Registry using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Registry) LateInitialize(attrs []byte) (bool, error) { + params := &RegistryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("Encryption")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Registry) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/containerregistry/v1beta2/zz_registry_types.go b/apis/containerregistry/v1beta2/zz_registry_types.go new file mode 100755 index 000000000..cf861f940 --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_registry_types.go @@ -0,0 +1,594 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EncryptionInitParameters struct { + + // Boolean value that indicates whether encryption is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled"` + + // The client ID of the managed identity associated with the encryption key. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("client_id",true) + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id"` + + // Reference to a UserAssignedIdentity in managedidentity to populate identityClientId. + // +kubebuilder:validation:Optional + IdentityClientIDRef *v1.Reference `json:"identityClientIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate identityClientId. + // +kubebuilder:validation:Optional + IdentityClientIDSelector *v1.Selector `json:"identityClientIdSelector,omitempty" tf:"-"` + + // The ID of the Key Vault Key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id"` +} + +type EncryptionObservation struct { + + // Boolean value that indicates whether encryption is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The client ID of the managed identity associated with the encryption key. + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id,omitempty"` + + // The ID of the Key Vault Key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` +} + +type EncryptionParameters struct { + + // Boolean value that indicates whether encryption is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled"` + + // The client ID of the managed identity associated with the encryption key. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("client_id",true) + // +kubebuilder:validation:Optional + IdentityClientID *string `json:"identityClientId,omitempty" tf:"identity_client_id"` + + // Reference to a UserAssignedIdentity in managedidentity to populate identityClientId. + // +kubebuilder:validation:Optional + IdentityClientIDRef *v1.Reference `json:"identityClientIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate identityClientId. + // +kubebuilder:validation:Optional + IdentityClientIDSelector *v1.Selector `json:"identityClientIdSelector,omitempty" tf:"-"` + + // The ID of the Key Vault Key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id"` +} + +type GeoreplicationsInitParameters struct { + + // A location where the container registry should be geo-replicated. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether regional endpoint is enabled for this Container Registry? + RegionalEndpointEnabled *bool `json:"regionalEndpointEnabled,omitempty" tf:"regional_endpoint_enabled,omitempty"` + + // A mapping of tags to assign to this replication location. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether zone redundancy is enabled for this Container Registry? Changing this forces a new resource to be created. Defaults to false. + ZoneRedundancyEnabled *bool `json:"zoneRedundancyEnabled,omitempty" tf:"zone_redundancy_enabled,omitempty"` +} + +type GeoreplicationsObservation struct { + + // A location where the container registry should be geo-replicated. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether regional endpoint is enabled for this Container Registry? + RegionalEndpointEnabled *bool `json:"regionalEndpointEnabled,omitempty" tf:"regional_endpoint_enabled,omitempty"` + + // A mapping of tags to assign to this replication location. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether zone redundancy is enabled for this Container Registry? Changing this forces a new resource to be created. Defaults to false. + ZoneRedundancyEnabled *bool `json:"zoneRedundancyEnabled,omitempty" tf:"zone_redundancy_enabled,omitempty"` +} + +type GeoreplicationsParameters struct { + + // A location where the container registry should be geo-replicated. + // +kubebuilder:validation:Optional + Location *string `json:"location" tf:"location,omitempty"` + + // Whether regional endpoint is enabled for this Container Registry? + // +kubebuilder:validation:Optional + RegionalEndpointEnabled *bool `json:"regionalEndpointEnabled,omitempty" tf:"regional_endpoint_enabled,omitempty"` + + // A mapping of tags to assign to this replication location. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether zone redundancy is enabled for this Container Registry? Changing this forces a new resource to be created. Defaults to false. + // +kubebuilder:validation:Optional + ZoneRedundancyEnabled *bool `json:"zoneRedundancyEnabled,omitempty" tf:"zone_redundancy_enabled,omitempty"` +} + +type IPRuleInitParameters struct { + + // The behaviour for requests matching this rule. At this time the only supported value is Allow + Action *string `json:"action,omitempty" tf:"action"` + + // The CIDR block from which requests will match the rule. + IPRange *string `json:"ipRange,omitempty" tf:"ip_range"` +} + +type IPRuleObservation struct { + + // The behaviour for requests matching this rule. At this time the only supported value is Allow + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The CIDR block from which requests will match the rule. + IPRange *string `json:"ipRange,omitempty" tf:"ip_range,omitempty"` +} + +type IPRuleParameters struct { + + // The behaviour for requests matching this rule. At this time the only supported value is Allow + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The CIDR block from which requests will match the rule. + // +kubebuilder:validation:Optional + IPRange *string `json:"ipRange,omitempty" tf:"ip_range"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Container Registry. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Container Registry. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Container Registry. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Container Registry. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Container Registry. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Container Registry. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type NetworkRuleSetInitParameters struct { + + // The behaviour for requests matching no rules. Either Allow or Deny. Defaults to Allow + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action"` + + // One or more ip_rule blocks as defined below. + IPRule []IPRuleInitParameters `json:"ipRule,omitempty" tf:"ip_rule"` + + VirtualNetwork []VirtualNetworkInitParameters `json:"virtualNetwork,omitempty" tf:"virtual_network"` +} + +type NetworkRuleSetObservation struct { + + // The behaviour for requests matching no rules. Either Allow or Deny. Defaults to Allow + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more ip_rule blocks as defined below. + IPRule []IPRuleObservation `json:"ipRule,omitempty" tf:"ip_rule,omitempty"` + + VirtualNetwork []VirtualNetworkObservation `json:"virtualNetwork,omitempty" tf:"virtual_network,omitempty"` +} + +type NetworkRuleSetParameters struct { + + // The behaviour for requests matching no rules. Either Allow or Deny. Defaults to Allow + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action"` + + // One or more ip_rule blocks as defined below. + // +kubebuilder:validation:Optional + IPRule []IPRuleParameters `json:"ipRule,omitempty" tf:"ip_rule"` + + // +kubebuilder:validation:Optional + VirtualNetwork []VirtualNetworkParameters `json:"virtualNetwork,omitempty" tf:"virtual_network"` +} + +type RegistryInitParameters struct { + + // Specifies whether the admin user is enabled. Defaults to false. + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // Whether allows anonymous (unauthenticated) pull access to this Container Registry? This is only supported on resources with the Standard or Premium SKU. + AnonymousPullEnabled *bool `json:"anonymousPullEnabled,omitempty" tf:"anonymous_pull_enabled,omitempty"` + + // Whether to enable dedicated data endpoints for this Container Registry? This is only supported on resources with the Premium SKU. + DataEndpointEnabled *bool `json:"dataEndpointEnabled,omitempty" tf:"data_endpoint_enabled,omitempty"` + + // An encryption block as documented below. + Encryption []EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // Boolean value that indicates whether export policy is enabled. Defaults to true. In order to set it to false, make sure the public_network_access_enabled is also set to false. + ExportPolicyEnabled *bool `json:"exportPolicyEnabled,omitempty" tf:"export_policy_enabled,omitempty"` + + // A georeplications block as documented below. + Georeplications []GeoreplicationsInitParameters `json:"georeplications,omitempty" tf:"georeplications,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether to allow trusted Azure services to access a network restricted Container Registry? Possible values are None and AzureServices. Defaults to AzureServices. + NetworkRuleBypassOption *string `json:"networkRuleBypassOption,omitempty" tf:"network_rule_bypass_option,omitempty"` + + // A network_rule_set block as documented below. + NetworkRuleSet []NetworkRuleSetInitParameters `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Whether public network access is allowed for the container registry. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Boolean value that indicates whether quarantine policy is enabled. + QuarantinePolicyEnabled *bool `json:"quarantinePolicyEnabled,omitempty" tf:"quarantine_policy_enabled,omitempty"` + + // A retention_policy block as documented below. + RetentionPolicy []RetentionPolicyInitParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // The SKU name of the container registry. Possible values are Basic, Standard and Premium. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A trust_policy block as documented below. + TrustPolicy []TrustPolicyInitParameters `json:"trustPolicy,omitempty" tf:"trust_policy,omitempty"` + + // Whether zone redundancy is enabled for this Container Registry? Changing this forces a new resource to be created. Defaults to false. + ZoneRedundancyEnabled *bool `json:"zoneRedundancyEnabled,omitempty" tf:"zone_redundancy_enabled,omitempty"` +} + +type RegistryObservation struct { + + // Specifies whether the admin user is enabled. Defaults to false. + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // The Username associated with the Container Registry Admin account - if the admin account is enabled. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // Whether allows anonymous (unauthenticated) pull access to this Container Registry? This is only supported on resources with the Standard or Premium SKU. + AnonymousPullEnabled *bool `json:"anonymousPullEnabled,omitempty" tf:"anonymous_pull_enabled,omitempty"` + + // Whether to enable dedicated data endpoints for this Container Registry? This is only supported on resources with the Premium SKU. + DataEndpointEnabled *bool `json:"dataEndpointEnabled,omitempty" tf:"data_endpoint_enabled,omitempty"` + + // An encryption block as documented below. + Encryption []EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // Boolean value that indicates whether export policy is enabled. Defaults to true. In order to set it to false, make sure the public_network_access_enabled is also set to false. + ExportPolicyEnabled *bool `json:"exportPolicyEnabled,omitempty" tf:"export_policy_enabled,omitempty"` + + // A georeplications block as documented below. + Georeplications []GeoreplicationsObservation `json:"georeplications,omitempty" tf:"georeplications,omitempty"` + + // The ID of the Container Registry. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The URL that can be used to log into the container registry. + LoginServer *string `json:"loginServer,omitempty" tf:"login_server,omitempty"` + + // Whether to allow trusted Azure services to access a network restricted Container Registry? Possible values are None and AzureServices. Defaults to AzureServices. + NetworkRuleBypassOption *string `json:"networkRuleBypassOption,omitempty" tf:"network_rule_bypass_option,omitempty"` + + // A network_rule_set block as documented below. + NetworkRuleSet []NetworkRuleSetObservation `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Whether public network access is allowed for the container registry. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Boolean value that indicates whether quarantine policy is enabled. + QuarantinePolicyEnabled *bool `json:"quarantinePolicyEnabled,omitempty" tf:"quarantine_policy_enabled,omitempty"` + + // The name of the resource group in which to create the Container Registry. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A retention_policy block as documented below. + RetentionPolicy []RetentionPolicyObservation `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // The SKU name of the container registry. Possible values are Basic, Standard and Premium. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A trust_policy block as documented below. + TrustPolicy []TrustPolicyObservation `json:"trustPolicy,omitempty" tf:"trust_policy,omitempty"` + + // Whether zone redundancy is enabled for this Container Registry? Changing this forces a new resource to be created. Defaults to false. + ZoneRedundancyEnabled *bool `json:"zoneRedundancyEnabled,omitempty" tf:"zone_redundancy_enabled,omitempty"` +} + +type RegistryParameters struct { + + // Specifies whether the admin user is enabled. Defaults to false. + // +kubebuilder:validation:Optional + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // Whether allows anonymous (unauthenticated) pull access to this Container Registry? This is only supported on resources with the Standard or Premium SKU. + // +kubebuilder:validation:Optional + AnonymousPullEnabled *bool `json:"anonymousPullEnabled,omitempty" tf:"anonymous_pull_enabled,omitempty"` + + // Whether to enable dedicated data endpoints for this Container Registry? This is only supported on resources with the Premium SKU. + // +kubebuilder:validation:Optional + DataEndpointEnabled *bool `json:"dataEndpointEnabled,omitempty" tf:"data_endpoint_enabled,omitempty"` + + // An encryption block as documented below. + // +kubebuilder:validation:Optional + Encryption []EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // Boolean value that indicates whether export policy is enabled. Defaults to true. In order to set it to false, make sure the public_network_access_enabled is also set to false. + // +kubebuilder:validation:Optional + ExportPolicyEnabled *bool `json:"exportPolicyEnabled,omitempty" tf:"export_policy_enabled,omitempty"` + + // A georeplications block as documented below. + // +kubebuilder:validation:Optional + Georeplications []GeoreplicationsParameters `json:"georeplications,omitempty" tf:"georeplications,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether to allow trusted Azure services to access a network restricted Container Registry? Possible values are None and AzureServices. Defaults to AzureServices. + // +kubebuilder:validation:Optional + NetworkRuleBypassOption *string `json:"networkRuleBypassOption,omitempty" tf:"network_rule_bypass_option,omitempty"` + + // A network_rule_set block as documented below. + // +kubebuilder:validation:Optional + NetworkRuleSet []NetworkRuleSetParameters `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Whether public network access is allowed for the container registry. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Boolean value that indicates whether quarantine policy is enabled. + // +kubebuilder:validation:Optional + QuarantinePolicyEnabled *bool `json:"quarantinePolicyEnabled,omitempty" tf:"quarantine_policy_enabled,omitempty"` + + // The name of the resource group in which to create the Container Registry. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A retention_policy block as documented below. + // +kubebuilder:validation:Optional + RetentionPolicy []RetentionPolicyParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // The SKU name of the container registry. Possible values are Basic, Standard and Premium. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A trust_policy block as documented below. + // +kubebuilder:validation:Optional + TrustPolicy []TrustPolicyParameters `json:"trustPolicy,omitempty" tf:"trust_policy,omitempty"` + + // Whether zone redundancy is enabled for this Container Registry? Changing this forces a new resource to be created. Defaults to false. + // +kubebuilder:validation:Optional + ZoneRedundancyEnabled *bool `json:"zoneRedundancyEnabled,omitempty" tf:"zone_redundancy_enabled,omitempty"` +} + +type RetentionPolicyInitParameters struct { + + // The number of days to retain an untagged manifest after which it gets purged. Default is 7. + Days *float64 `json:"days,omitempty" tf:"days"` + + // Boolean value that indicates whether the policy is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled"` +} + +type RetentionPolicyObservation struct { + + // The number of days to retain an untagged manifest after which it gets purged. Default is 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Boolean value that indicates whether the policy is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RetentionPolicyParameters struct { + + // The number of days to retain an untagged manifest after which it gets purged. Default is 7. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days"` + + // Boolean value that indicates whether the policy is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled"` +} + +type TrustPolicyInitParameters struct { + + // Boolean value that indicates whether the policy is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled"` +} + +type TrustPolicyObservation struct { + + // Boolean value that indicates whether the policy is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type TrustPolicyParameters struct { + + // Boolean value that indicates whether the policy is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled"` +} + +type VirtualNetworkInitParameters struct { + + // The behaviour for requests matching this rule. At this time the only supported value is Allow + Action *string `json:"action,omitempty" tf:"action"` + + // The ID of the Container Registry. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type VirtualNetworkObservation struct { + + // The behaviour for requests matching this rule. At this time the only supported value is Allow + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The ID of the Container Registry. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type VirtualNetworkParameters struct { + + // The behaviour for requests matching this rule. At this time the only supported value is Allow + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The ID of the Container Registry. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +// RegistrySpec defines the desired state of Registry +type RegistrySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider RegistryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider RegistryInitParameters `json:"initProvider,omitempty"` +} + +// RegistryStatus defines the observed state of Registry. +type RegistryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider RegistryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Registry is the Schema for the Registrys API. Manages an Azure Container Registry. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Registry struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec RegistrySpec `json:"spec"` + Status RegistryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// RegistryList contains a list of Registrys +type RegistryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Registry `json:"items"` +} + +// Repository type metadata. +var ( + Registry_Kind = "Registry" + Registry_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Registry_Kind}.String() + Registry_KindAPIVersion = Registry_Kind + "." + CRDGroupVersion.String() + Registry_GroupVersionKind = CRDGroupVersion.WithKind(Registry_Kind) +) + +func init() { + SchemeBuilder.Register(&Registry{}, &RegistryList{}) +} diff --git a/apis/containerregistry/v1beta2/zz_tokenpassword_terraformed.go b/apis/containerregistry/v1beta2/zz_tokenpassword_terraformed.go new file mode 100755 index 000000000..8dd890ee2 --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_tokenpassword_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TokenPassword +func (mg *TokenPassword) GetTerraformResourceType() string { + return "azurerm_container_registry_token_password" +} + +// GetConnectionDetailsMapping for this TokenPassword +func (tr *TokenPassword) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password1[*].value": "status.atProvider.password1[*].value", "password2[*].value": "status.atProvider.password2[*].value"} +} + +// GetObservation of this TokenPassword +func (tr *TokenPassword) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TokenPassword +func (tr *TokenPassword) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TokenPassword +func (tr *TokenPassword) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TokenPassword +func (tr *TokenPassword) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TokenPassword +func (tr *TokenPassword) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TokenPassword +func (tr *TokenPassword) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TokenPassword +func (tr *TokenPassword) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TokenPassword using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TokenPassword) LateInitialize(attrs []byte) (bool, error) { + params := &TokenPasswordParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TokenPassword) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/containerregistry/v1beta2/zz_tokenpassword_types.go b/apis/containerregistry/v1beta2/zz_tokenpassword_types.go new file mode 100755 index 000000000..79c3681a6 --- /dev/null +++ b/apis/containerregistry/v1beta2/zz_tokenpassword_types.go @@ -0,0 +1,175 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type Password1InitParameters struct { + + // The expiration date of the password in RFC3339 format. If not specified, the password never expires. Changing this forces a new resource to be created. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` +} + +type Password1Observation struct { + + // The expiration date of the password in RFC3339 format. If not specified, the password never expires. Changing this forces a new resource to be created. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` +} + +type Password1Parameters struct { + + // The expiration date of the password in RFC3339 format. If not specified, the password never expires. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` +} + +type Password2InitParameters struct { + + // The expiration date of the password in RFC3339 format. If not specified, the password never expires. Changing this forces a new resource to be created. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` +} + +type Password2Observation struct { + + // The expiration date of the password in RFC3339 format. If not specified, the password never expires. Changing this forces a new resource to be created. + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` +} + +type Password2Parameters struct { + + // The expiration date of the password in RFC3339 format. If not specified, the password never expires. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Expiry *string `json:"expiry,omitempty" tf:"expiry,omitempty"` +} + +type TokenPasswordInitParameters struct { + + // The ID of the Container Registry Token that this Container Registry Token Password resides in. Changing this forces a new Container Registry Token Password to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Token + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ContainerRegistryTokenID *string `json:"containerRegistryTokenId,omitempty" tf:"container_registry_token_id,omitempty"` + + // Reference to a Token in containerregistry to populate containerRegistryTokenId. + // +kubebuilder:validation:Optional + ContainerRegistryTokenIDRef *v1.Reference `json:"containerRegistryTokenIdRef,omitempty" tf:"-"` + + // Selector for a Token in containerregistry to populate containerRegistryTokenId. + // +kubebuilder:validation:Optional + ContainerRegistryTokenIDSelector *v1.Selector `json:"containerRegistryTokenIdSelector,omitempty" tf:"-"` + + // One password block as defined below. + Password1 *Password1InitParameters `json:"password1,omitempty" tf:"password1,omitempty"` + + // One password block as defined below. + Password2 *Password2InitParameters `json:"password2,omitempty" tf:"password2,omitempty"` +} + +type TokenPasswordObservation struct { + + // The ID of the Container Registry Token that this Container Registry Token Password resides in. Changing this forces a new Container Registry Token Password to be created. + ContainerRegistryTokenID *string `json:"containerRegistryTokenId,omitempty" tf:"container_registry_token_id,omitempty"` + + // The ID of the Container Registry Token Password. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One password block as defined below. + Password1 *Password1Observation `json:"password1,omitempty" tf:"password1,omitempty"` + + // One password block as defined below. + Password2 *Password2Observation `json:"password2,omitempty" tf:"password2,omitempty"` +} + +type TokenPasswordParameters struct { + + // The ID of the Container Registry Token that this Container Registry Token Password resides in. Changing this forces a new Container Registry Token Password to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerregistry/v1beta1.Token + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ContainerRegistryTokenID *string `json:"containerRegistryTokenId,omitempty" tf:"container_registry_token_id,omitempty"` + + // Reference to a Token in containerregistry to populate containerRegistryTokenId. + // +kubebuilder:validation:Optional + ContainerRegistryTokenIDRef *v1.Reference `json:"containerRegistryTokenIdRef,omitempty" tf:"-"` + + // Selector for a Token in containerregistry to populate containerRegistryTokenId. + // +kubebuilder:validation:Optional + ContainerRegistryTokenIDSelector *v1.Selector `json:"containerRegistryTokenIdSelector,omitempty" tf:"-"` + + // One password block as defined below. + // +kubebuilder:validation:Optional + Password1 *Password1Parameters `json:"password1,omitempty" tf:"password1,omitempty"` + + // One password block as defined below. + // +kubebuilder:validation:Optional + Password2 *Password2Parameters `json:"password2,omitempty" tf:"password2,omitempty"` +} + +// TokenPasswordSpec defines the desired state of TokenPassword +type TokenPasswordSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TokenPasswordParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TokenPasswordInitParameters `json:"initProvider,omitempty"` +} + +// TokenPasswordStatus defines the observed state of TokenPassword. +type TokenPasswordStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TokenPasswordObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TokenPassword is the Schema for the TokenPasswords API. Manages a Container Registry Token Password. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type TokenPassword struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.password1) || (has(self.initProvider) && has(self.initProvider.password1))",message="spec.forProvider.password1 is a required parameter" + Spec TokenPasswordSpec `json:"spec"` + Status TokenPasswordStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TokenPasswordList contains a list of TokenPasswords +type TokenPasswordList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TokenPassword `json:"items"` +} + +// Repository type metadata. +var ( + TokenPassword_Kind = "TokenPassword" + TokenPassword_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TokenPassword_Kind}.String() + TokenPassword_KindAPIVersion = TokenPassword_Kind + "." + CRDGroupVersion.String() + TokenPassword_GroupVersionKind = CRDGroupVersion.WithKind(TokenPassword_Kind) +) + +func init() { + SchemeBuilder.Register(&TokenPassword{}, &TokenPasswordList{}) +} diff --git a/apis/containerservice/v1beta1/zz_generated.conversion_spokes.go b/apis/containerservice/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..2f3475a64 --- /dev/null +++ b/apis/containerservice/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this KubernetesCluster to the hub type. +func (tr *KubernetesCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the KubernetesCluster type. +func (tr *KubernetesCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this KubernetesClusterNodePool to the hub type. +func (tr *KubernetesClusterNodePool) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the KubernetesClusterNodePool type. +func (tr *KubernetesClusterNodePool) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this KubernetesFleetManager to the hub type. +func (tr *KubernetesFleetManager) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the KubernetesFleetManager type. +func (tr *KubernetesFleetManager) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/containerservice/v1beta1/zz_generated.conversion_hubs.go b/apis/containerservice/v1beta2/zz_generated.conversion_hubs.go similarity index 96% rename from apis/containerservice/v1beta1/zz_generated.conversion_hubs.go rename to apis/containerservice/v1beta2/zz_generated.conversion_hubs.go index 1271cd938..d5d9e5d09 100755 --- a/apis/containerservice/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/containerservice/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *KubernetesCluster) Hub() {} diff --git a/apis/containerservice/v1beta2/zz_generated.deepcopy.go b/apis/containerservice/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..25da02bf1 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,9484 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerAccessProfileInitParameters) DeepCopyInto(out *APIServerAccessProfileInitParameters) { + *out = *in + if in.AuthorizedIPRanges != nil { + in, out := &in.AuthorizedIPRanges, &out.AuthorizedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VnetIntegrationEnabled != nil { + in, out := &in.VnetIntegrationEnabled, &out.VnetIntegrationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerAccessProfileInitParameters. +func (in *APIServerAccessProfileInitParameters) DeepCopy() *APIServerAccessProfileInitParameters { + if in == nil { + return nil + } + out := new(APIServerAccessProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerAccessProfileObservation) DeepCopyInto(out *APIServerAccessProfileObservation) { + *out = *in + if in.AuthorizedIPRanges != nil { + in, out := &in.AuthorizedIPRanges, &out.AuthorizedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.VnetIntegrationEnabled != nil { + in, out := &in.VnetIntegrationEnabled, &out.VnetIntegrationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerAccessProfileObservation. +func (in *APIServerAccessProfileObservation) DeepCopy() *APIServerAccessProfileObservation { + if in == nil { + return nil + } + out := new(APIServerAccessProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerAccessProfileParameters) DeepCopyInto(out *APIServerAccessProfileParameters) { + *out = *in + if in.AuthorizedIPRanges != nil { + in, out := &in.AuthorizedIPRanges, &out.AuthorizedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VnetIntegrationEnabled != nil { + in, out := &in.VnetIntegrationEnabled, &out.VnetIntegrationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerAccessProfileParameters. +func (in *APIServerAccessProfileParameters) DeepCopy() *APIServerAccessProfileParameters { + if in == nil { + return nil + } + out := new(APIServerAccessProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AciConnectorLinuxInitParameters) DeepCopyInto(out *AciConnectorLinuxInitParameters) { + *out = *in + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.SubnetNameRef != nil { + in, out := &in.SubnetNameRef, &out.SubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetNameSelector != nil { + in, out := &in.SubnetNameSelector, &out.SubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AciConnectorLinuxInitParameters. +func (in *AciConnectorLinuxInitParameters) DeepCopy() *AciConnectorLinuxInitParameters { + if in == nil { + return nil + } + out := new(AciConnectorLinuxInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AciConnectorLinuxObservation) DeepCopyInto(out *AciConnectorLinuxObservation) { + *out = *in + if in.ConnectorIdentity != nil { + in, out := &in.ConnectorIdentity, &out.ConnectorIdentity + *out = make([]ConnectorIdentityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AciConnectorLinuxObservation. +func (in *AciConnectorLinuxObservation) DeepCopy() *AciConnectorLinuxObservation { + if in == nil { + return nil + } + out := new(AciConnectorLinuxObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AciConnectorLinuxParameters) DeepCopyInto(out *AciConnectorLinuxParameters) { + *out = *in + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.SubnetNameRef != nil { + in, out := &in.SubnetNameRef, &out.SubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetNameSelector != nil { + in, out := &in.SubnetNameSelector, &out.SubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AciConnectorLinuxParameters. +func (in *AciConnectorLinuxParameters) DeepCopy() *AciConnectorLinuxParameters { + if in == nil { + return nil + } + out := new(AciConnectorLinuxParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedHostPortsInitParameters) DeepCopyInto(out *AllowedHostPortsInitParameters) { + *out = *in + if in.PortEnd != nil { + in, out := &in.PortEnd, &out.PortEnd + *out = new(float64) + **out = **in + } + if in.PortStart != nil { + in, out := &in.PortStart, &out.PortStart + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPortsInitParameters. +func (in *AllowedHostPortsInitParameters) DeepCopy() *AllowedHostPortsInitParameters { + if in == nil { + return nil + } + out := new(AllowedHostPortsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedHostPortsObservation) DeepCopyInto(out *AllowedHostPortsObservation) { + *out = *in + if in.PortEnd != nil { + in, out := &in.PortEnd, &out.PortEnd + *out = new(float64) + **out = **in + } + if in.PortStart != nil { + in, out := &in.PortStart, &out.PortStart + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPortsObservation. +func (in *AllowedHostPortsObservation) DeepCopy() *AllowedHostPortsObservation { + if in == nil { + return nil + } + out := new(AllowedHostPortsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedHostPortsParameters) DeepCopyInto(out *AllowedHostPortsParameters) { + *out = *in + if in.PortEnd != nil { + in, out := &in.PortEnd, &out.PortEnd + *out = new(float64) + **out = **in + } + if in.PortStart != nil { + in, out := &in.PortStart, &out.PortStart + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPortsParameters. +func (in *AllowedHostPortsParameters) DeepCopy() *AllowedHostPortsParameters { + if in == nil { + return nil + } + out := new(AllowedHostPortsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedInitParameters) DeepCopyInto(out *AllowedInitParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedInitParameters. +func (in *AllowedInitParameters) DeepCopy() *AllowedInitParameters { + if in == nil { + return nil + } + out := new(AllowedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedObservation) DeepCopyInto(out *AllowedObservation) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedObservation. +func (in *AllowedObservation) DeepCopy() *AllowedObservation { + if in == nil { + return nil + } + out := new(AllowedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedParameters) DeepCopyInto(out *AllowedParameters) { + *out = *in + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedParameters. +func (in *AllowedParameters) DeepCopy() *AllowedParameters { + if in == nil { + return nil + } + out := new(AllowedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScalerProfileInitParameters) DeepCopyInto(out *AutoScalerProfileInitParameters) { + *out = *in + if in.BalanceSimilarNodeGroups != nil { + in, out := &in.BalanceSimilarNodeGroups, &out.BalanceSimilarNodeGroups + *out = new(bool) + **out = **in + } + if in.EmptyBulkDeleteMax != nil { + in, out := &in.EmptyBulkDeleteMax, &out.EmptyBulkDeleteMax + *out = new(string) + **out = **in + } + if in.Expander != nil { + in, out := &in.Expander, &out.Expander + *out = new(string) + **out = **in + } + if in.MaxGracefulTerminationSec != nil { + in, out := &in.MaxGracefulTerminationSec, &out.MaxGracefulTerminationSec + *out = new(string) + **out = **in + } + if in.MaxNodeProvisioningTime != nil { + in, out := &in.MaxNodeProvisioningTime, &out.MaxNodeProvisioningTime + *out = new(string) + **out = **in + } + if in.MaxUnreadyNodes != nil { + in, out := &in.MaxUnreadyNodes, &out.MaxUnreadyNodes + *out = new(float64) + **out = **in + } + if in.MaxUnreadyPercentage != nil { + in, out := &in.MaxUnreadyPercentage, &out.MaxUnreadyPercentage + *out = new(float64) + **out = **in + } + if in.NewPodScaleUpDelay != nil { + in, out := &in.NewPodScaleUpDelay, &out.NewPodScaleUpDelay + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterAdd != nil { + in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterDelete != nil { + in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterFailure != nil { + in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure + *out = new(string) + **out = **in + } + if in.ScaleDownUnneeded != nil { + in, out := &in.ScaleDownUnneeded, &out.ScaleDownUnneeded + *out = new(string) + **out = **in + } + if in.ScaleDownUnready != nil { + in, out := &in.ScaleDownUnready, &out.ScaleDownUnready + *out = new(string) + **out = **in + } + if in.ScaleDownUtilizationThreshold != nil { + in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold + *out = new(string) + **out = **in + } + if in.ScanInterval != nil { + in, out := &in.ScanInterval, &out.ScanInterval + *out = new(string) + **out = **in + } + if in.SkipNodesWithLocalStorage != nil { + in, out := &in.SkipNodesWithLocalStorage, &out.SkipNodesWithLocalStorage + *out = new(bool) + **out = **in + } + if in.SkipNodesWithSystemPods != nil { + in, out := &in.SkipNodesWithSystemPods, &out.SkipNodesWithSystemPods + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScalerProfileInitParameters. +func (in *AutoScalerProfileInitParameters) DeepCopy() *AutoScalerProfileInitParameters { + if in == nil { + return nil + } + out := new(AutoScalerProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScalerProfileObservation) DeepCopyInto(out *AutoScalerProfileObservation) { + *out = *in + if in.BalanceSimilarNodeGroups != nil { + in, out := &in.BalanceSimilarNodeGroups, &out.BalanceSimilarNodeGroups + *out = new(bool) + **out = **in + } + if in.EmptyBulkDeleteMax != nil { + in, out := &in.EmptyBulkDeleteMax, &out.EmptyBulkDeleteMax + *out = new(string) + **out = **in + } + if in.Expander != nil { + in, out := &in.Expander, &out.Expander + *out = new(string) + **out = **in + } + if in.MaxGracefulTerminationSec != nil { + in, out := &in.MaxGracefulTerminationSec, &out.MaxGracefulTerminationSec + *out = new(string) + **out = **in + } + if in.MaxNodeProvisioningTime != nil { + in, out := &in.MaxNodeProvisioningTime, &out.MaxNodeProvisioningTime + *out = new(string) + **out = **in + } + if in.MaxUnreadyNodes != nil { + in, out := &in.MaxUnreadyNodes, &out.MaxUnreadyNodes + *out = new(float64) + **out = **in + } + if in.MaxUnreadyPercentage != nil { + in, out := &in.MaxUnreadyPercentage, &out.MaxUnreadyPercentage + *out = new(float64) + **out = **in + } + if in.NewPodScaleUpDelay != nil { + in, out := &in.NewPodScaleUpDelay, &out.NewPodScaleUpDelay + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterAdd != nil { + in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterDelete != nil { + in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterFailure != nil { + in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure + *out = new(string) + **out = **in + } + if in.ScaleDownUnneeded != nil { + in, out := &in.ScaleDownUnneeded, &out.ScaleDownUnneeded + *out = new(string) + **out = **in + } + if in.ScaleDownUnready != nil { + in, out := &in.ScaleDownUnready, &out.ScaleDownUnready + *out = new(string) + **out = **in + } + if in.ScaleDownUtilizationThreshold != nil { + in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold + *out = new(string) + **out = **in + } + if in.ScanInterval != nil { + in, out := &in.ScanInterval, &out.ScanInterval + *out = new(string) + **out = **in + } + if in.SkipNodesWithLocalStorage != nil { + in, out := &in.SkipNodesWithLocalStorage, &out.SkipNodesWithLocalStorage + *out = new(bool) + **out = **in + } + if in.SkipNodesWithSystemPods != nil { + in, out := &in.SkipNodesWithSystemPods, &out.SkipNodesWithSystemPods + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScalerProfileObservation. +func (in *AutoScalerProfileObservation) DeepCopy() *AutoScalerProfileObservation { + if in == nil { + return nil + } + out := new(AutoScalerProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScalerProfileParameters) DeepCopyInto(out *AutoScalerProfileParameters) { + *out = *in + if in.BalanceSimilarNodeGroups != nil { + in, out := &in.BalanceSimilarNodeGroups, &out.BalanceSimilarNodeGroups + *out = new(bool) + **out = **in + } + if in.EmptyBulkDeleteMax != nil { + in, out := &in.EmptyBulkDeleteMax, &out.EmptyBulkDeleteMax + *out = new(string) + **out = **in + } + if in.Expander != nil { + in, out := &in.Expander, &out.Expander + *out = new(string) + **out = **in + } + if in.MaxGracefulTerminationSec != nil { + in, out := &in.MaxGracefulTerminationSec, &out.MaxGracefulTerminationSec + *out = new(string) + **out = **in + } + if in.MaxNodeProvisioningTime != nil { + in, out := &in.MaxNodeProvisioningTime, &out.MaxNodeProvisioningTime + *out = new(string) + **out = **in + } + if in.MaxUnreadyNodes != nil { + in, out := &in.MaxUnreadyNodes, &out.MaxUnreadyNodes + *out = new(float64) + **out = **in + } + if in.MaxUnreadyPercentage != nil { + in, out := &in.MaxUnreadyPercentage, &out.MaxUnreadyPercentage + *out = new(float64) + **out = **in + } + if in.NewPodScaleUpDelay != nil { + in, out := &in.NewPodScaleUpDelay, &out.NewPodScaleUpDelay + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterAdd != nil { + in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterDelete != nil { + in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete + *out = new(string) + **out = **in + } + if in.ScaleDownDelayAfterFailure != nil { + in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure + *out = new(string) + **out = **in + } + if in.ScaleDownUnneeded != nil { + in, out := &in.ScaleDownUnneeded, &out.ScaleDownUnneeded + *out = new(string) + **out = **in + } + if in.ScaleDownUnready != nil { + in, out := &in.ScaleDownUnready, &out.ScaleDownUnready + *out = new(string) + **out = **in + } + if in.ScaleDownUtilizationThreshold != nil { + in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold + *out = new(string) + **out = **in + } + if in.ScanInterval != nil { + in, out := &in.ScanInterval, &out.ScanInterval + *out = new(string) + **out = **in + } + if in.SkipNodesWithLocalStorage != nil { + in, out := &in.SkipNodesWithLocalStorage, &out.SkipNodesWithLocalStorage + *out = new(bool) + **out = **in + } + if in.SkipNodesWithSystemPods != nil { + in, out := &in.SkipNodesWithSystemPods, &out.SkipNodesWithSystemPods + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScalerProfileParameters. +func (in *AutoScalerProfileParameters) DeepCopy() *AutoScalerProfileParameters { + if in == nil { + return nil + } + out := new(AutoScalerProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryRoleBasedAccessControlInitParameters) DeepCopyInto(out *AzureActiveDirectoryRoleBasedAccessControlInitParameters) { + *out = *in + if in.AdminGroupObjectIds != nil { + in, out := &in.AdminGroupObjectIds, &out.AdminGroupObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureRbacEnabled != nil { + in, out := &in.AzureRbacEnabled, &out.AzureRbacEnabled + *out = new(bool) + **out = **in + } + if in.ClientAppID != nil { + in, out := &in.ClientAppID, &out.ClientAppID + *out = new(string) + **out = **in + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = new(bool) + **out = **in + } + if in.ServerAppID != nil { + in, out := &in.ServerAppID, &out.ServerAppID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryRoleBasedAccessControlInitParameters. +func (in *AzureActiveDirectoryRoleBasedAccessControlInitParameters) DeepCopy() *AzureActiveDirectoryRoleBasedAccessControlInitParameters { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryRoleBasedAccessControlInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryRoleBasedAccessControlObservation) DeepCopyInto(out *AzureActiveDirectoryRoleBasedAccessControlObservation) { + *out = *in + if in.AdminGroupObjectIds != nil { + in, out := &in.AdminGroupObjectIds, &out.AdminGroupObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureRbacEnabled != nil { + in, out := &in.AzureRbacEnabled, &out.AzureRbacEnabled + *out = new(bool) + **out = **in + } + if in.ClientAppID != nil { + in, out := &in.ClientAppID, &out.ClientAppID + *out = new(string) + **out = **in + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = new(bool) + **out = **in + } + if in.ServerAppID != nil { + in, out := &in.ServerAppID, &out.ServerAppID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryRoleBasedAccessControlObservation. +func (in *AzureActiveDirectoryRoleBasedAccessControlObservation) DeepCopy() *AzureActiveDirectoryRoleBasedAccessControlObservation { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryRoleBasedAccessControlObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryRoleBasedAccessControlParameters) DeepCopyInto(out *AzureActiveDirectoryRoleBasedAccessControlParameters) { + *out = *in + if in.AdminGroupObjectIds != nil { + in, out := &in.AdminGroupObjectIds, &out.AdminGroupObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureRbacEnabled != nil { + in, out := &in.AzureRbacEnabled, &out.AzureRbacEnabled + *out = new(bool) + **out = **in + } + if in.ClientAppID != nil { + in, out := &in.ClientAppID, &out.ClientAppID + *out = new(string) + **out = **in + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = new(bool) + **out = **in + } + if in.ServerAppID != nil { + in, out := &in.ServerAppID, &out.ServerAppID + *out = new(string) + **out = **in + } + if in.ServerAppSecretSecretRef != nil { + in, out := &in.ServerAppSecretSecretRef, &out.ServerAppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryRoleBasedAccessControlParameters. +func (in *AzureActiveDirectoryRoleBasedAccessControlParameters) DeepCopy() *AzureActiveDirectoryRoleBasedAccessControlParameters { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryRoleBasedAccessControlParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialComputingInitParameters) DeepCopyInto(out *ConfidentialComputingInitParameters) { + *out = *in + if in.SgxQuoteHelperEnabled != nil { + in, out := &in.SgxQuoteHelperEnabled, &out.SgxQuoteHelperEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialComputingInitParameters. +func (in *ConfidentialComputingInitParameters) DeepCopy() *ConfidentialComputingInitParameters { + if in == nil { + return nil + } + out := new(ConfidentialComputingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialComputingObservation) DeepCopyInto(out *ConfidentialComputingObservation) { + *out = *in + if in.SgxQuoteHelperEnabled != nil { + in, out := &in.SgxQuoteHelperEnabled, &out.SgxQuoteHelperEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialComputingObservation. +func (in *ConfidentialComputingObservation) DeepCopy() *ConfidentialComputingObservation { + if in == nil { + return nil + } + out := new(ConfidentialComputingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialComputingParameters) DeepCopyInto(out *ConfidentialComputingParameters) { + *out = *in + if in.SgxQuoteHelperEnabled != nil { + in, out := &in.SgxQuoteHelperEnabled, &out.SgxQuoteHelperEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialComputingParameters. +func (in *ConfidentialComputingParameters) DeepCopy() *ConfidentialComputingParameters { + if in == nil { + return nil + } + out := new(ConfidentialComputingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorIdentityInitParameters) DeepCopyInto(out *ConnectorIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorIdentityInitParameters. +func (in *ConnectorIdentityInitParameters) DeepCopy() *ConnectorIdentityInitParameters { + if in == nil { + return nil + } + out := new(ConnectorIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorIdentityObservation) DeepCopyInto(out *ConnectorIdentityObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorIdentityObservation. +func (in *ConnectorIdentityObservation) DeepCopy() *ConnectorIdentityObservation { + if in == nil { + return nil + } + out := new(ConnectorIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorIdentityParameters) DeepCopyInto(out *ConnectorIdentityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorIdentityParameters. +func (in *ConnectorIdentityParameters) DeepCopy() *ConnectorIdentityParameters { + if in == nil { + return nil + } + out := new(ConnectorIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNodePoolInitParameters) DeepCopyInto(out *DefaultNodePoolInitParameters) { + *out = *in + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.CustomCATrustEnabled != nil { + in, out := &in.CustomCATrustEnabled, &out.CustomCATrustEnabled + *out = new(bool) + **out = **in + } + if in.EnableAutoScaling != nil { + in, out := &in.EnableAutoScaling, &out.EnableAutoScaling + *out = new(bool) + **out = **in + } + if in.EnableHostEncryption != nil { + in, out := &in.EnableHostEncryption, &out.EnableHostEncryption + *out = new(bool) + **out = **in + } + if in.EnableNodePublicIP != nil { + in, out := &in.EnableNodePublicIP, &out.EnableNodePublicIP + *out = new(bool) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.GpuInstance != nil { + in, out := &in.GpuInstance, &out.GpuInstance + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubeletConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KubeletDiskType != nil { + in, out := &in.KubeletDiskType, &out.KubeletDiskType + *out = new(string) + **out = **in + } + if in.LinuxOsConfig != nil { + in, out := &in.LinuxOsConfig, &out.LinuxOsConfig + *out = new(LinuxOsConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(float64) + **out = **in + } + if in.MessageOfTheDay != nil { + in, out := &in.MessageOfTheDay, &out.MessageOfTheDay + *out = new(string) + **out = **in + } + if in.MinCount != nil { + in, out := &in.MinCount, &out.MinCount + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeNetworkProfile != nil { + in, out := &in.NodeNetworkProfile, &out.NodeNetworkProfile + *out = new(NodeNetworkProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NodePublicIPPrefixID != nil { + in, out := &in.NodePublicIPPrefixID, &out.NodePublicIPPrefixID + *out = new(string) + **out = **in + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OnlyCriticalAddonsEnabled != nil { + in, out := &in.OnlyCriticalAddonsEnabled, &out.OnlyCriticalAddonsEnabled + *out = new(bool) + **out = **in + } + if in.OrchestratorVersion != nil { + in, out := &in.OrchestratorVersion, &out.OrchestratorVersion + *out = new(string) + **out = **in + } + if in.OsDiskSizeGb != nil { + in, out := &in.OsDiskSizeGb, &out.OsDiskSizeGb + *out = new(float64) + **out = **in + } + if in.OsDiskType != nil { + in, out := &in.OsDiskType, &out.OsDiskType + *out = new(string) + **out = **in + } + if in.OsSku != nil { + in, out := &in.OsSku, &out.OsSku + *out = new(string) + **out = **in + } + if in.PodSubnetID != nil { + in, out := &in.PodSubnetID, &out.PodSubnetID + *out = new(string) + **out = **in + } + if in.PodSubnetIDRef != nil { + in, out := &in.PodSubnetIDRef, &out.PodSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PodSubnetIDSelector != nil { + in, out := &in.PodSubnetIDSelector, &out.PodSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ScaleDownMode != nil { + in, out := &in.ScaleDownMode, &out.ScaleDownMode + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemporaryNameForRotation != nil { + in, out := &in.TemporaryNameForRotation, &out.TemporaryNameForRotation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } + if in.UpgradeSettings != nil { + in, out := &in.UpgradeSettings, &out.UpgradeSettings + *out = new(UpgradeSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VnetSubnetID != nil { + in, out := &in.VnetSubnetID, &out.VnetSubnetID + *out = new(string) + **out = **in + } + if in.VnetSubnetIDRef != nil { + in, out := &in.VnetSubnetIDRef, &out.VnetSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VnetSubnetIDSelector != nil { + in, out := &in.VnetSubnetIDSelector, &out.VnetSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WorkloadRuntime != nil { + in, out := &in.WorkloadRuntime, &out.WorkloadRuntime + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNodePoolInitParameters. +func (in *DefaultNodePoolInitParameters) DeepCopy() *DefaultNodePoolInitParameters { + if in == nil { + return nil + } + out := new(DefaultNodePoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNodePoolObservation) DeepCopyInto(out *DefaultNodePoolObservation) { + *out = *in + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.CustomCATrustEnabled != nil { + in, out := &in.CustomCATrustEnabled, &out.CustomCATrustEnabled + *out = new(bool) + **out = **in + } + if in.EnableAutoScaling != nil { + in, out := &in.EnableAutoScaling, &out.EnableAutoScaling + *out = new(bool) + **out = **in + } + if in.EnableHostEncryption != nil { + in, out := &in.EnableHostEncryption, &out.EnableHostEncryption + *out = new(bool) + **out = **in + } + if in.EnableNodePublicIP != nil { + in, out := &in.EnableNodePublicIP, &out.EnableNodePublicIP + *out = new(bool) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.GpuInstance != nil { + in, out := &in.GpuInstance, &out.GpuInstance + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubeletConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.KubeletDiskType != nil { + in, out := &in.KubeletDiskType, &out.KubeletDiskType + *out = new(string) + **out = **in + } + if in.LinuxOsConfig != nil { + in, out := &in.LinuxOsConfig, &out.LinuxOsConfig + *out = new(LinuxOsConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(float64) + **out = **in + } + if in.MessageOfTheDay != nil { + in, out := &in.MessageOfTheDay, &out.MessageOfTheDay + *out = new(string) + **out = **in + } + if in.MinCount != nil { + in, out := &in.MinCount, &out.MinCount + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeNetworkProfile != nil { + in, out := &in.NodeNetworkProfile, &out.NodeNetworkProfile + *out = new(NodeNetworkProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.NodePublicIPPrefixID != nil { + in, out := &in.NodePublicIPPrefixID, &out.NodePublicIPPrefixID + *out = new(string) + **out = **in + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OnlyCriticalAddonsEnabled != nil { + in, out := &in.OnlyCriticalAddonsEnabled, &out.OnlyCriticalAddonsEnabled + *out = new(bool) + **out = **in + } + if in.OrchestratorVersion != nil { + in, out := &in.OrchestratorVersion, &out.OrchestratorVersion + *out = new(string) + **out = **in + } + if in.OsDiskSizeGb != nil { + in, out := &in.OsDiskSizeGb, &out.OsDiskSizeGb + *out = new(float64) + **out = **in + } + if in.OsDiskType != nil { + in, out := &in.OsDiskType, &out.OsDiskType + *out = new(string) + **out = **in + } + if in.OsSku != nil { + in, out := &in.OsSku, &out.OsSku + *out = new(string) + **out = **in + } + if in.PodSubnetID != nil { + in, out := &in.PodSubnetID, &out.PodSubnetID + *out = new(string) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ScaleDownMode != nil { + in, out := &in.ScaleDownMode, &out.ScaleDownMode + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemporaryNameForRotation != nil { + in, out := &in.TemporaryNameForRotation, &out.TemporaryNameForRotation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } + if in.UpgradeSettings != nil { + in, out := &in.UpgradeSettings, &out.UpgradeSettings + *out = new(UpgradeSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VnetSubnetID != nil { + in, out := &in.VnetSubnetID, &out.VnetSubnetID + *out = new(string) + **out = **in + } + if in.WorkloadRuntime != nil { + in, out := &in.WorkloadRuntime, &out.WorkloadRuntime + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNodePoolObservation. +func (in *DefaultNodePoolObservation) DeepCopy() *DefaultNodePoolObservation { + if in == nil { + return nil + } + out := new(DefaultNodePoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNodePoolParameters) DeepCopyInto(out *DefaultNodePoolParameters) { + *out = *in + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.CustomCATrustEnabled != nil { + in, out := &in.CustomCATrustEnabled, &out.CustomCATrustEnabled + *out = new(bool) + **out = **in + } + if in.EnableAutoScaling != nil { + in, out := &in.EnableAutoScaling, &out.EnableAutoScaling + *out = new(bool) + **out = **in + } + if in.EnableHostEncryption != nil { + in, out := &in.EnableHostEncryption, &out.EnableHostEncryption + *out = new(bool) + **out = **in + } + if in.EnableNodePublicIP != nil { + in, out := &in.EnableNodePublicIP, &out.EnableNodePublicIP + *out = new(bool) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.GpuInstance != nil { + in, out := &in.GpuInstance, &out.GpuInstance + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubeletConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.KubeletDiskType != nil { + in, out := &in.KubeletDiskType, &out.KubeletDiskType + *out = new(string) + **out = **in + } + if in.LinuxOsConfig != nil { + in, out := &in.LinuxOsConfig, &out.LinuxOsConfig + *out = new(LinuxOsConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(float64) + **out = **in + } + if in.MessageOfTheDay != nil { + in, out := &in.MessageOfTheDay, &out.MessageOfTheDay + *out = new(string) + **out = **in + } + if in.MinCount != nil { + in, out := &in.MinCount, &out.MinCount + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeNetworkProfile != nil { + in, out := &in.NodeNetworkProfile, &out.NodeNetworkProfile + *out = new(NodeNetworkProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.NodePublicIPPrefixID != nil { + in, out := &in.NodePublicIPPrefixID, &out.NodePublicIPPrefixID + *out = new(string) + **out = **in + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OnlyCriticalAddonsEnabled != nil { + in, out := &in.OnlyCriticalAddonsEnabled, &out.OnlyCriticalAddonsEnabled + *out = new(bool) + **out = **in + } + if in.OrchestratorVersion != nil { + in, out := &in.OrchestratorVersion, &out.OrchestratorVersion + *out = new(string) + **out = **in + } + if in.OsDiskSizeGb != nil { + in, out := &in.OsDiskSizeGb, &out.OsDiskSizeGb + *out = new(float64) + **out = **in + } + if in.OsDiskType != nil { + in, out := &in.OsDiskType, &out.OsDiskType + *out = new(string) + **out = **in + } + if in.OsSku != nil { + in, out := &in.OsSku, &out.OsSku + *out = new(string) + **out = **in + } + if in.PodSubnetID != nil { + in, out := &in.PodSubnetID, &out.PodSubnetID + *out = new(string) + **out = **in + } + if in.PodSubnetIDRef != nil { + in, out := &in.PodSubnetIDRef, &out.PodSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PodSubnetIDSelector != nil { + in, out := &in.PodSubnetIDSelector, &out.PodSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ScaleDownMode != nil { + in, out := &in.ScaleDownMode, &out.ScaleDownMode + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TemporaryNameForRotation != nil { + in, out := &in.TemporaryNameForRotation, &out.TemporaryNameForRotation + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } + if in.UpgradeSettings != nil { + in, out := &in.UpgradeSettings, &out.UpgradeSettings + *out = new(UpgradeSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VnetSubnetID != nil { + in, out := &in.VnetSubnetID, &out.VnetSubnetID + *out = new(string) + **out = **in + } + if in.VnetSubnetIDRef != nil { + in, out := &in.VnetSubnetIDRef, &out.VnetSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VnetSubnetIDSelector != nil { + in, out := &in.VnetSubnetIDSelector, &out.VnetSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WorkloadRuntime != nil { + in, out := &in.WorkloadRuntime, &out.WorkloadRuntime + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNodePoolParameters. +func (in *DefaultNodePoolParameters) DeepCopy() *DefaultNodePoolParameters { + if in == nil { + return nil + } + out := new(DefaultNodePoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GmsaInitParameters) DeepCopyInto(out *GmsaInitParameters) { + *out = *in + if in.DNSServer != nil { + in, out := &in.DNSServer, &out.DNSServer + *out = new(string) + **out = **in + } + if in.RootDomain != nil { + in, out := &in.RootDomain, &out.RootDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GmsaInitParameters. +func (in *GmsaInitParameters) DeepCopy() *GmsaInitParameters { + if in == nil { + return nil + } + out := new(GmsaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GmsaObservation) DeepCopyInto(out *GmsaObservation) { + *out = *in + if in.DNSServer != nil { + in, out := &in.DNSServer, &out.DNSServer + *out = new(string) + **out = **in + } + if in.RootDomain != nil { + in, out := &in.RootDomain, &out.RootDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GmsaObservation. +func (in *GmsaObservation) DeepCopy() *GmsaObservation { + if in == nil { + return nil + } + out := new(GmsaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GmsaParameters) DeepCopyInto(out *GmsaParameters) { + *out = *in + if in.DNSServer != nil { + in, out := &in.DNSServer, &out.DNSServer + *out = new(string) + **out = **in + } + if in.RootDomain != nil { + in, out := &in.RootDomain, &out.RootDomain + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GmsaParameters. +func (in *GmsaParameters) DeepCopy() *GmsaParameters { + if in == nil { + return nil + } + out := new(GmsaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPProxyConfigInitParameters) DeepCopyInto(out *HTTPProxyConfigInitParameters) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPProxyConfigInitParameters. +func (in *HTTPProxyConfigInitParameters) DeepCopy() *HTTPProxyConfigInitParameters { + if in == nil { + return nil + } + out := new(HTTPProxyConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPProxyConfigObservation) DeepCopyInto(out *HTTPProxyConfigObservation) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPProxyConfigObservation. +func (in *HTTPProxyConfigObservation) DeepCopy() *HTTPProxyConfigObservation { + if in == nil { + return nil + } + out := new(HTTPProxyConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPProxyConfigParameters) DeepCopyInto(out *HTTPProxyConfigParameters) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TrustedCASecretRef != nil { + in, out := &in.TrustedCASecretRef, &out.TrustedCASecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPProxyConfigParameters. +func (in *HTTPProxyConfigParameters) DeepCopy() *HTTPProxyConfigParameters { + if in == nil { + return nil + } + out := new(HTTPProxyConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubProfileInitParameters) DeepCopyInto(out *HubProfileInitParameters) { + *out = *in + if in.DNSPrefix != nil { + in, out := &in.DNSPrefix, &out.DNSPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubProfileInitParameters. +func (in *HubProfileInitParameters) DeepCopy() *HubProfileInitParameters { + if in == nil { + return nil + } + out := new(HubProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubProfileObservation) DeepCopyInto(out *HubProfileObservation) { + *out = *in + if in.DNSPrefix != nil { + in, out := &in.DNSPrefix, &out.DNSPrefix + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.KubernetesVersion != nil { + in, out := &in.KubernetesVersion, &out.KubernetesVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubProfileObservation. +func (in *HubProfileObservation) DeepCopy() *HubProfileObservation { + if in == nil { + return nil + } + out := new(HubProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubProfileParameters) DeepCopyInto(out *HubProfileParameters) { + *out = *in + if in.DNSPrefix != nil { + in, out := &in.DNSPrefix, &out.DNSPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubProfileParameters. +func (in *HubProfileParameters) DeepCopy() *HubProfileParameters { + if in == nil { + return nil + } + out := new(HubProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressApplicationGatewayIdentityInitParameters) DeepCopyInto(out *IngressApplicationGatewayIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressApplicationGatewayIdentityInitParameters. +func (in *IngressApplicationGatewayIdentityInitParameters) DeepCopy() *IngressApplicationGatewayIdentityInitParameters { + if in == nil { + return nil + } + out := new(IngressApplicationGatewayIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressApplicationGatewayIdentityObservation) DeepCopyInto(out *IngressApplicationGatewayIdentityObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressApplicationGatewayIdentityObservation. +func (in *IngressApplicationGatewayIdentityObservation) DeepCopy() *IngressApplicationGatewayIdentityObservation { + if in == nil { + return nil + } + out := new(IngressApplicationGatewayIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressApplicationGatewayIdentityParameters) DeepCopyInto(out *IngressApplicationGatewayIdentityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressApplicationGatewayIdentityParameters. +func (in *IngressApplicationGatewayIdentityParameters) DeepCopy() *IngressApplicationGatewayIdentityParameters { + if in == nil { + return nil + } + out := new(IngressApplicationGatewayIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressApplicationGatewayInitParameters) DeepCopyInto(out *IngressApplicationGatewayInitParameters) { + *out = *in + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.GatewayName != nil { + in, out := &in.GatewayName, &out.GatewayName + *out = new(string) + **out = **in + } + if in.SubnetCidr != nil { + in, out := &in.SubnetCidr, &out.SubnetCidr + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressApplicationGatewayInitParameters. +func (in *IngressApplicationGatewayInitParameters) DeepCopy() *IngressApplicationGatewayInitParameters { + if in == nil { + return nil + } + out := new(IngressApplicationGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressApplicationGatewayObservation) DeepCopyInto(out *IngressApplicationGatewayObservation) { + *out = *in + if in.EffectiveGatewayID != nil { + in, out := &in.EffectiveGatewayID, &out.EffectiveGatewayID + *out = new(string) + **out = **in + } + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.GatewayName != nil { + in, out := &in.GatewayName, &out.GatewayName + *out = new(string) + **out = **in + } + if in.IngressApplicationGatewayIdentity != nil { + in, out := &in.IngressApplicationGatewayIdentity, &out.IngressApplicationGatewayIdentity + *out = make([]IngressApplicationGatewayIdentityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetCidr != nil { + in, out := &in.SubnetCidr, &out.SubnetCidr + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressApplicationGatewayObservation. +func (in *IngressApplicationGatewayObservation) DeepCopy() *IngressApplicationGatewayObservation { + if in == nil { + return nil + } + out := new(IngressApplicationGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressApplicationGatewayParameters) DeepCopyInto(out *IngressApplicationGatewayParameters) { + *out = *in + if in.GatewayID != nil { + in, out := &in.GatewayID, &out.GatewayID + *out = new(string) + **out = **in + } + if in.GatewayName != nil { + in, out := &in.GatewayName, &out.GatewayName + *out = new(string) + **out = **in + } + if in.SubnetCidr != nil { + in, out := &in.SubnetCidr, &out.SubnetCidr + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressApplicationGatewayParameters. +func (in *IngressApplicationGatewayParameters) DeepCopy() *IngressApplicationGatewayParameters { + if in == nil { + return nil + } + out := new(IngressApplicationGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyManagementServiceInitParameters) DeepCopyInto(out *KeyManagementServiceInitParameters) { + *out = *in + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultNetworkAccess != nil { + in, out := &in.KeyVaultNetworkAccess, &out.KeyVaultNetworkAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyManagementServiceInitParameters. +func (in *KeyManagementServiceInitParameters) DeepCopy() *KeyManagementServiceInitParameters { + if in == nil { + return nil + } + out := new(KeyManagementServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyManagementServiceObservation) DeepCopyInto(out *KeyManagementServiceObservation) { + *out = *in + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultNetworkAccess != nil { + in, out := &in.KeyVaultNetworkAccess, &out.KeyVaultNetworkAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyManagementServiceObservation. +func (in *KeyManagementServiceObservation) DeepCopy() *KeyManagementServiceObservation { + if in == nil { + return nil + } + out := new(KeyManagementServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyManagementServiceParameters) DeepCopyInto(out *KeyManagementServiceParameters) { + *out = *in + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultNetworkAccess != nil { + in, out := &in.KeyVaultNetworkAccess, &out.KeyVaultNetworkAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyManagementServiceParameters. +func (in *KeyManagementServiceParameters) DeepCopy() *KeyManagementServiceParameters { + if in == nil { + return nil + } + out := new(KeyManagementServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultSecretsProviderInitParameters) DeepCopyInto(out *KeyVaultSecretsProviderInitParameters) { + *out = *in + if in.SecretRotationEnabled != nil { + in, out := &in.SecretRotationEnabled, &out.SecretRotationEnabled + *out = new(bool) + **out = **in + } + if in.SecretRotationInterval != nil { + in, out := &in.SecretRotationInterval, &out.SecretRotationInterval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultSecretsProviderInitParameters. +func (in *KeyVaultSecretsProviderInitParameters) DeepCopy() *KeyVaultSecretsProviderInitParameters { + if in == nil { + return nil + } + out := new(KeyVaultSecretsProviderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultSecretsProviderObservation) DeepCopyInto(out *KeyVaultSecretsProviderObservation) { + *out = *in + if in.SecretIdentity != nil { + in, out := &in.SecretIdentity, &out.SecretIdentity + *out = make([]SecretIdentityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecretRotationEnabled != nil { + in, out := &in.SecretRotationEnabled, &out.SecretRotationEnabled + *out = new(bool) + **out = **in + } + if in.SecretRotationInterval != nil { + in, out := &in.SecretRotationInterval, &out.SecretRotationInterval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultSecretsProviderObservation. +func (in *KeyVaultSecretsProviderObservation) DeepCopy() *KeyVaultSecretsProviderObservation { + if in == nil { + return nil + } + out := new(KeyVaultSecretsProviderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultSecretsProviderParameters) DeepCopyInto(out *KeyVaultSecretsProviderParameters) { + *out = *in + if in.SecretRotationEnabled != nil { + in, out := &in.SecretRotationEnabled, &out.SecretRotationEnabled + *out = new(bool) + **out = **in + } + if in.SecretRotationInterval != nil { + in, out := &in.SecretRotationInterval, &out.SecretRotationInterval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultSecretsProviderParameters. +func (in *KeyVaultSecretsProviderParameters) DeepCopy() *KeyVaultSecretsProviderParameters { + if in == nil { + return nil + } + out := new(KeyVaultSecretsProviderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAdminConfigInitParameters) DeepCopyInto(out *KubeAdminConfigInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAdminConfigInitParameters. +func (in *KubeAdminConfigInitParameters) DeepCopy() *KubeAdminConfigInitParameters { + if in == nil { + return nil + } + out := new(KubeAdminConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAdminConfigObservation) DeepCopyInto(out *KubeAdminConfigObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAdminConfigObservation. +func (in *KubeAdminConfigObservation) DeepCopy() *KubeAdminConfigObservation { + if in == nil { + return nil + } + out := new(KubeAdminConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAdminConfigParameters) DeepCopyInto(out *KubeAdminConfigParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAdminConfigParameters. +func (in *KubeAdminConfigParameters) DeepCopy() *KubeAdminConfigParameters { + if in == nil { + return nil + } + out := new(KubeAdminConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeConfigInitParameters) DeepCopyInto(out *KubeConfigInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeConfigInitParameters. +func (in *KubeConfigInitParameters) DeepCopy() *KubeConfigInitParameters { + if in == nil { + return nil + } + out := new(KubeConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeConfigObservation) DeepCopyInto(out *KubeConfigObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeConfigObservation. +func (in *KubeConfigObservation) DeepCopy() *KubeConfigObservation { + if in == nil { + return nil + } + out := new(KubeConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeConfigParameters) DeepCopyInto(out *KubeConfigParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeConfigParameters. +func (in *KubeConfigParameters) DeepCopy() *KubeConfigParameters { + if in == nil { + return nil + } + out := new(KubeConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigInitParameters) DeepCopyInto(out *KubeletConfigInitParameters) { + *out = *in + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUCfsQuotaEnabled != nil { + in, out := &in.CPUCfsQuotaEnabled, &out.CPUCfsQuotaEnabled + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.ContainerLogMaxLine != nil { + in, out := &in.ContainerLogMaxLine, &out.ContainerLogMaxLine + *out = new(float64) + **out = **in + } + if in.ContainerLogMaxSizeMb != nil { + in, out := &in.ContainerLogMaxSizeMb, &out.ContainerLogMaxSizeMb + *out = new(float64) + **out = **in + } + if in.ImageGcHighThreshold != nil { + in, out := &in.ImageGcHighThreshold, &out.ImageGcHighThreshold + *out = new(float64) + **out = **in + } + if in.ImageGcLowThreshold != nil { + in, out := &in.ImageGcLowThreshold, &out.ImageGcLowThreshold + *out = new(float64) + **out = **in + } + if in.PodMaxPid != nil { + in, out := &in.PodMaxPid, &out.PodMaxPid + *out = new(float64) + **out = **in + } + if in.TopologyManagerPolicy != nil { + in, out := &in.TopologyManagerPolicy, &out.TopologyManagerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigInitParameters. +func (in *KubeletConfigInitParameters) DeepCopy() *KubeletConfigInitParameters { + if in == nil { + return nil + } + out := new(KubeletConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigObservation) DeepCopyInto(out *KubeletConfigObservation) { + *out = *in + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUCfsQuotaEnabled != nil { + in, out := &in.CPUCfsQuotaEnabled, &out.CPUCfsQuotaEnabled + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.ContainerLogMaxLine != nil { + in, out := &in.ContainerLogMaxLine, &out.ContainerLogMaxLine + *out = new(float64) + **out = **in + } + if in.ContainerLogMaxSizeMb != nil { + in, out := &in.ContainerLogMaxSizeMb, &out.ContainerLogMaxSizeMb + *out = new(float64) + **out = **in + } + if in.ImageGcHighThreshold != nil { + in, out := &in.ImageGcHighThreshold, &out.ImageGcHighThreshold + *out = new(float64) + **out = **in + } + if in.ImageGcLowThreshold != nil { + in, out := &in.ImageGcLowThreshold, &out.ImageGcLowThreshold + *out = new(float64) + **out = **in + } + if in.PodMaxPid != nil { + in, out := &in.PodMaxPid, &out.PodMaxPid + *out = new(float64) + **out = **in + } + if in.TopologyManagerPolicy != nil { + in, out := &in.TopologyManagerPolicy, &out.TopologyManagerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigObservation. +func (in *KubeletConfigObservation) DeepCopy() *KubeletConfigObservation { + if in == nil { + return nil + } + out := new(KubeletConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigParameters) DeepCopyInto(out *KubeletConfigParameters) { + *out = *in + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUCfsQuotaEnabled != nil { + in, out := &in.CPUCfsQuotaEnabled, &out.CPUCfsQuotaEnabled + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.ContainerLogMaxLine != nil { + in, out := &in.ContainerLogMaxLine, &out.ContainerLogMaxLine + *out = new(float64) + **out = **in + } + if in.ContainerLogMaxSizeMb != nil { + in, out := &in.ContainerLogMaxSizeMb, &out.ContainerLogMaxSizeMb + *out = new(float64) + **out = **in + } + if in.ImageGcHighThreshold != nil { + in, out := &in.ImageGcHighThreshold, &out.ImageGcHighThreshold + *out = new(float64) + **out = **in + } + if in.ImageGcLowThreshold != nil { + in, out := &in.ImageGcLowThreshold, &out.ImageGcLowThreshold + *out = new(float64) + **out = **in + } + if in.PodMaxPid != nil { + in, out := &in.PodMaxPid, &out.PodMaxPid + *out = new(float64) + **out = **in + } + if in.TopologyManagerPolicy != nil { + in, out := &in.TopologyManagerPolicy, &out.TopologyManagerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigParameters. +func (in *KubeletConfigParameters) DeepCopy() *KubeletConfigParameters { + if in == nil { + return nil + } + out := new(KubeletConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletIdentityInitParameters) DeepCopyInto(out *KubeletIdentityInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletIdentityInitParameters. +func (in *KubeletIdentityInitParameters) DeepCopy() *KubeletIdentityInitParameters { + if in == nil { + return nil + } + out := new(KubeletIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletIdentityObservation) DeepCopyInto(out *KubeletIdentityObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletIdentityObservation. +func (in *KubeletIdentityObservation) DeepCopy() *KubeletIdentityObservation { + if in == nil { + return nil + } + out := new(KubeletIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletIdentityParameters) DeepCopyInto(out *KubeletIdentityParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletIdentityParameters. +func (in *KubeletIdentityParameters) DeepCopy() *KubeletIdentityParameters { + if in == nil { + return nil + } + out := new(KubeletIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesCluster) DeepCopyInto(out *KubernetesCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesCluster. +func (in *KubernetesCluster) DeepCopy() *KubernetesCluster { + if in == nil { + return nil + } + out := new(KubernetesCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubernetesCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterInitParameters) DeepCopyInto(out *KubernetesClusterInitParameters) { + *out = *in + if in.APIServerAccessProfile != nil { + in, out := &in.APIServerAccessProfile, &out.APIServerAccessProfile + *out = new(APIServerAccessProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.APIServerAuthorizedIPRanges != nil { + in, out := &in.APIServerAuthorizedIPRanges, &out.APIServerAuthorizedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AciConnectorLinux != nil { + in, out := &in.AciConnectorLinux, &out.AciConnectorLinux + *out = new(AciConnectorLinuxInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoScalerProfile != nil { + in, out := &in.AutoScalerProfile, &out.AutoScalerProfile + *out = new(AutoScalerProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticChannelUpgrade != nil { + in, out := &in.AutomaticChannelUpgrade, &out.AutomaticChannelUpgrade + *out = new(string) + **out = **in + } + if in.AzureActiveDirectoryRoleBasedAccessControl != nil { + in, out := &in.AzureActiveDirectoryRoleBasedAccessControl, &out.AzureActiveDirectoryRoleBasedAccessControl + *out = new(AzureActiveDirectoryRoleBasedAccessControlInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AzurePolicyEnabled != nil { + in, out := &in.AzurePolicyEnabled, &out.AzurePolicyEnabled + *out = new(bool) + **out = **in + } + if in.ConfidentialComputing != nil { + in, out := &in.ConfidentialComputing, &out.ConfidentialComputing + *out = new(ConfidentialComputingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomCATrustCertificatesBase64 != nil { + in, out := &in.CustomCATrustCertificatesBase64, &out.CustomCATrustCertificatesBase64 + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DNSPrefix != nil { + in, out := &in.DNSPrefix, &out.DNSPrefix + *out = new(string) + **out = **in + } + if in.DNSPrefixPrivateCluster != nil { + in, out := &in.DNSPrefixPrivateCluster, &out.DNSPrefixPrivateCluster + *out = new(string) + **out = **in + } + if in.DefaultNodePool != nil { + in, out := &in.DefaultNodePool, &out.DefaultNodePool + *out = new(DefaultNodePoolInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnablePodSecurityPolicy != nil { + in, out := &in.EnablePodSecurityPolicy, &out.EnablePodSecurityPolicy + *out = new(bool) + **out = **in + } + if in.HTTPApplicationRoutingEnabled != nil { + in, out := &in.HTTPApplicationRoutingEnabled, &out.HTTPApplicationRoutingEnabled + *out = new(bool) + **out = **in + } + if in.HTTPProxyConfig != nil { + in, out := &in.HTTPProxyConfig, &out.HTTPProxyConfig + *out = new(HTTPProxyConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageCleanerEnabled != nil { + in, out := &in.ImageCleanerEnabled, &out.ImageCleanerEnabled + *out = new(bool) + **out = **in + } + if in.ImageCleanerIntervalHours != nil { + in, out := &in.ImageCleanerIntervalHours, &out.ImageCleanerIntervalHours + *out = new(float64) + **out = **in + } + if in.IngressApplicationGateway != nil { + in, out := &in.IngressApplicationGateway, &out.IngressApplicationGateway + *out = new(IngressApplicationGatewayInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyManagementService != nil { + in, out := &in.KeyManagementService, &out.KeyManagementService + *out = new(KeyManagementServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultSecretsProvider != nil { + in, out := &in.KeyVaultSecretsProvider, &out.KeyVaultSecretsProvider + *out = new(KeyVaultSecretsProviderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KubeletIdentity != nil { + in, out := &in.KubeletIdentity, &out.KubeletIdentity + *out = new(KubeletIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KubernetesVersion != nil { + in, out := &in.KubernetesVersion, &out.KubernetesVersion + *out = new(string) + **out = **in + } + if in.LinuxProfile != nil { + in, out := &in.LinuxProfile, &out.LinuxProfile + *out = new(LinuxProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAccountDisabled != nil { + in, out := &in.LocalAccountDisabled, &out.LocalAccountDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowAutoUpgrade != nil { + in, out := &in.MaintenanceWindowAutoUpgrade, &out.MaintenanceWindowAutoUpgrade + *out = new(MaintenanceWindowAutoUpgradeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowNodeOs != nil { + in, out := &in.MaintenanceWindowNodeOs, &out.MaintenanceWindowNodeOs + *out = new(MaintenanceWindowNodeOsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftDefender != nil { + in, out := &in.MicrosoftDefender, &out.MicrosoftDefender + *out = new(MicrosoftDefenderInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorMetrics != nil { + in, out := &in.MonitorMetrics, &out.MonitorMetrics + *out = new(MonitorMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkProfile != nil { + in, out := &in.NetworkProfile, &out.NetworkProfile + *out = new(NetworkProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NodeOsChannelUpgrade != nil { + in, out := &in.NodeOsChannelUpgrade, &out.NodeOsChannelUpgrade + *out = new(string) + **out = **in + } + if in.NodeResourceGroup != nil { + in, out := &in.NodeResourceGroup, &out.NodeResourceGroup + *out = new(string) + **out = **in + } + if in.OidcIssuerEnabled != nil { + in, out := &in.OidcIssuerEnabled, &out.OidcIssuerEnabled + *out = new(bool) + **out = **in + } + if in.OmsAgent != nil { + in, out := &in.OmsAgent, &out.OmsAgent + *out = new(OmsAgentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OpenServiceMeshEnabled != nil { + in, out := &in.OpenServiceMeshEnabled, &out.OpenServiceMeshEnabled + *out = new(bool) + **out = **in + } + if in.PrivateClusterEnabled != nil { + in, out := &in.PrivateClusterEnabled, &out.PrivateClusterEnabled + *out = new(bool) + **out = **in + } + if in.PrivateClusterPublicFqdnEnabled != nil { + in, out := &in.PrivateClusterPublicFqdnEnabled, &out.PrivateClusterPublicFqdnEnabled + *out = new(bool) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIDRef != nil { + in, out := &in.PrivateDNSZoneIDRef, &out.PrivateDNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSZoneIDSelector != nil { + in, out := &in.PrivateDNSZoneIDSelector, &out.PrivateDNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.RoleBasedAccessControlEnabled != nil { + in, out := &in.RoleBasedAccessControlEnabled, &out.RoleBasedAccessControlEnabled + *out = new(bool) + **out = **in + } + if in.RunCommandEnabled != nil { + in, out := &in.RunCommandEnabled, &out.RunCommandEnabled + *out = new(bool) + **out = **in + } + if in.ServiceMeshProfile != nil { + in, out := &in.ServiceMeshProfile, &out.ServiceMeshProfile + *out = new(ServiceMeshProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServicePrincipal != nil { + in, out := &in.ServicePrincipal, &out.ServicePrincipal + *out = new(ServicePrincipalInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.StorageProfile != nil { + in, out := &in.StorageProfile, &out.StorageProfile + *out = new(StorageProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SupportPlan != nil { + in, out := &in.SupportPlan, &out.SupportPlan + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebAppRouting != nil { + in, out := &in.WebAppRouting, &out.WebAppRouting + *out = new(WebAppRoutingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WindowsProfile != nil { + in, out := &in.WindowsProfile, &out.WindowsProfile + *out = new(WindowsProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadAutoscalerProfile != nil { + in, out := &in.WorkloadAutoscalerProfile, &out.WorkloadAutoscalerProfile + *out = new(WorkloadAutoscalerProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadIdentityEnabled != nil { + in, out := &in.WorkloadIdentityEnabled, &out.WorkloadIdentityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterInitParameters. +func (in *KubernetesClusterInitParameters) DeepCopy() *KubernetesClusterInitParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterList) DeepCopyInto(out *KubernetesClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubernetesCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterList. +func (in *KubernetesClusterList) DeepCopy() *KubernetesClusterList { + if in == nil { + return nil + } + out := new(KubernetesClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubernetesClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePool) DeepCopyInto(out *KubernetesClusterNodePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePool. +func (in *KubernetesClusterNodePool) DeepCopy() *KubernetesClusterNodePool { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubernetesClusterNodePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolInitParameters) DeepCopyInto(out *KubernetesClusterNodePoolInitParameters) { + *out = *in + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.CustomCATrustEnabled != nil { + in, out := &in.CustomCATrustEnabled, &out.CustomCATrustEnabled + *out = new(bool) + **out = **in + } + if in.EnableAutoScaling != nil { + in, out := &in.EnableAutoScaling, &out.EnableAutoScaling + *out = new(bool) + **out = **in + } + if in.EnableHostEncryption != nil { + in, out := &in.EnableHostEncryption, &out.EnableHostEncryption + *out = new(bool) + **out = **in + } + if in.EnableNodePublicIP != nil { + in, out := &in.EnableNodePublicIP, &out.EnableNodePublicIP + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.GpuInstance != nil { + in, out := &in.GpuInstance, &out.GpuInstance + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubernetesClusterNodePoolKubeletConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KubeletDiskType != nil { + in, out := &in.KubeletDiskType, &out.KubeletDiskType + *out = new(string) + **out = **in + } + if in.LinuxOsConfig != nil { + in, out := &in.LinuxOsConfig, &out.LinuxOsConfig + *out = new(KubernetesClusterNodePoolLinuxOsConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(float64) + **out = **in + } + if in.MessageOfTheDay != nil { + in, out := &in.MessageOfTheDay, &out.MessageOfTheDay + *out = new(string) + **out = **in + } + if in.MinCount != nil { + in, out := &in.MinCount, &out.MinCount + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeNetworkProfile != nil { + in, out := &in.NodeNetworkProfile, &out.NodeNetworkProfile + *out = new(KubernetesClusterNodePoolNodeNetworkProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NodePublicIPPrefixID != nil { + in, out := &in.NodePublicIPPrefixID, &out.NodePublicIPPrefixID + *out = new(string) + **out = **in + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrchestratorVersion != nil { + in, out := &in.OrchestratorVersion, &out.OrchestratorVersion + *out = new(string) + **out = **in + } + if in.OsDiskSizeGb != nil { + in, out := &in.OsDiskSizeGb, &out.OsDiskSizeGb + *out = new(float64) + **out = **in + } + if in.OsDiskType != nil { + in, out := &in.OsDiskType, &out.OsDiskType + *out = new(string) + **out = **in + } + if in.OsSku != nil { + in, out := &in.OsSku, &out.OsSku + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PodSubnetID != nil { + in, out := &in.PodSubnetID, &out.PodSubnetID + *out = new(string) + **out = **in + } + if in.PodSubnetIDRef != nil { + in, out := &in.PodSubnetIDRef, &out.PodSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PodSubnetIDSelector != nil { + in, out := &in.PodSubnetIDSelector, &out.PodSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ScaleDownMode != nil { + in, out := &in.ScaleDownMode, &out.ScaleDownMode + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } + if in.UpgradeSettings != nil { + in, out := &in.UpgradeSettings, &out.UpgradeSettings + *out = new(KubernetesClusterNodePoolUpgradeSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VnetSubnetID != nil { + in, out := &in.VnetSubnetID, &out.VnetSubnetID + *out = new(string) + **out = **in + } + if in.VnetSubnetIDRef != nil { + in, out := &in.VnetSubnetIDRef, &out.VnetSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VnetSubnetIDSelector != nil { + in, out := &in.VnetSubnetIDSelector, &out.VnetSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WindowsProfile != nil { + in, out := &in.WindowsProfile, &out.WindowsProfile + *out = new(KubernetesClusterNodePoolWindowsProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadRuntime != nil { + in, out := &in.WorkloadRuntime, &out.WorkloadRuntime + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolInitParameters. +func (in *KubernetesClusterNodePoolInitParameters) DeepCopy() *KubernetesClusterNodePoolInitParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolKubeletConfigInitParameters) DeepCopyInto(out *KubernetesClusterNodePoolKubeletConfigInitParameters) { + *out = *in + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUCfsQuotaEnabled != nil { + in, out := &in.CPUCfsQuotaEnabled, &out.CPUCfsQuotaEnabled + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.ContainerLogMaxLine != nil { + in, out := &in.ContainerLogMaxLine, &out.ContainerLogMaxLine + *out = new(float64) + **out = **in + } + if in.ContainerLogMaxSizeMb != nil { + in, out := &in.ContainerLogMaxSizeMb, &out.ContainerLogMaxSizeMb + *out = new(float64) + **out = **in + } + if in.ImageGcHighThreshold != nil { + in, out := &in.ImageGcHighThreshold, &out.ImageGcHighThreshold + *out = new(float64) + **out = **in + } + if in.ImageGcLowThreshold != nil { + in, out := &in.ImageGcLowThreshold, &out.ImageGcLowThreshold + *out = new(float64) + **out = **in + } + if in.PodMaxPid != nil { + in, out := &in.PodMaxPid, &out.PodMaxPid + *out = new(float64) + **out = **in + } + if in.TopologyManagerPolicy != nil { + in, out := &in.TopologyManagerPolicy, &out.TopologyManagerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolKubeletConfigInitParameters. +func (in *KubernetesClusterNodePoolKubeletConfigInitParameters) DeepCopy() *KubernetesClusterNodePoolKubeletConfigInitParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolKubeletConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolKubeletConfigObservation) DeepCopyInto(out *KubernetesClusterNodePoolKubeletConfigObservation) { + *out = *in + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUCfsQuotaEnabled != nil { + in, out := &in.CPUCfsQuotaEnabled, &out.CPUCfsQuotaEnabled + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.ContainerLogMaxLine != nil { + in, out := &in.ContainerLogMaxLine, &out.ContainerLogMaxLine + *out = new(float64) + **out = **in + } + if in.ContainerLogMaxSizeMb != nil { + in, out := &in.ContainerLogMaxSizeMb, &out.ContainerLogMaxSizeMb + *out = new(float64) + **out = **in + } + if in.ImageGcHighThreshold != nil { + in, out := &in.ImageGcHighThreshold, &out.ImageGcHighThreshold + *out = new(float64) + **out = **in + } + if in.ImageGcLowThreshold != nil { + in, out := &in.ImageGcLowThreshold, &out.ImageGcLowThreshold + *out = new(float64) + **out = **in + } + if in.PodMaxPid != nil { + in, out := &in.PodMaxPid, &out.PodMaxPid + *out = new(float64) + **out = **in + } + if in.TopologyManagerPolicy != nil { + in, out := &in.TopologyManagerPolicy, &out.TopologyManagerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolKubeletConfigObservation. +func (in *KubernetesClusterNodePoolKubeletConfigObservation) DeepCopy() *KubernetesClusterNodePoolKubeletConfigObservation { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolKubeletConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolKubeletConfigParameters) DeepCopyInto(out *KubernetesClusterNodePoolKubeletConfigParameters) { + *out = *in + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CPUCfsQuotaEnabled != nil { + in, out := &in.CPUCfsQuotaEnabled, &out.CPUCfsQuotaEnabled + *out = new(bool) + **out = **in + } + if in.CPUCfsQuotaPeriod != nil { + in, out := &in.CPUCfsQuotaPeriod, &out.CPUCfsQuotaPeriod + *out = new(string) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.ContainerLogMaxLine != nil { + in, out := &in.ContainerLogMaxLine, &out.ContainerLogMaxLine + *out = new(float64) + **out = **in + } + if in.ContainerLogMaxSizeMb != nil { + in, out := &in.ContainerLogMaxSizeMb, &out.ContainerLogMaxSizeMb + *out = new(float64) + **out = **in + } + if in.ImageGcHighThreshold != nil { + in, out := &in.ImageGcHighThreshold, &out.ImageGcHighThreshold + *out = new(float64) + **out = **in + } + if in.ImageGcLowThreshold != nil { + in, out := &in.ImageGcLowThreshold, &out.ImageGcLowThreshold + *out = new(float64) + **out = **in + } + if in.PodMaxPid != nil { + in, out := &in.PodMaxPid, &out.PodMaxPid + *out = new(float64) + **out = **in + } + if in.TopologyManagerPolicy != nil { + in, out := &in.TopologyManagerPolicy, &out.TopologyManagerPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolKubeletConfigParameters. +func (in *KubernetesClusterNodePoolKubeletConfigParameters) DeepCopy() *KubernetesClusterNodePoolKubeletConfigParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolKubeletConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolLinuxOsConfigInitParameters) DeepCopyInto(out *KubernetesClusterNodePoolLinuxOsConfigInitParameters) { + *out = *in + if in.SwapFileSizeMb != nil { + in, out := &in.SwapFileSizeMb, &out.SwapFileSizeMb + *out = new(float64) + **out = **in + } + if in.SysctlConfig != nil { + in, out := &in.SysctlConfig, &out.SysctlConfig + *out = new(LinuxOsConfigSysctlConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TransparentHugePageDefrag != nil { + in, out := &in.TransparentHugePageDefrag, &out.TransparentHugePageDefrag + *out = new(string) + **out = **in + } + if in.TransparentHugePageEnabled != nil { + in, out := &in.TransparentHugePageEnabled, &out.TransparentHugePageEnabled + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolLinuxOsConfigInitParameters. +func (in *KubernetesClusterNodePoolLinuxOsConfigInitParameters) DeepCopy() *KubernetesClusterNodePoolLinuxOsConfigInitParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolLinuxOsConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolLinuxOsConfigObservation) DeepCopyInto(out *KubernetesClusterNodePoolLinuxOsConfigObservation) { + *out = *in + if in.SwapFileSizeMb != nil { + in, out := &in.SwapFileSizeMb, &out.SwapFileSizeMb + *out = new(float64) + **out = **in + } + if in.SysctlConfig != nil { + in, out := &in.SysctlConfig, &out.SysctlConfig + *out = new(LinuxOsConfigSysctlConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.TransparentHugePageDefrag != nil { + in, out := &in.TransparentHugePageDefrag, &out.TransparentHugePageDefrag + *out = new(string) + **out = **in + } + if in.TransparentHugePageEnabled != nil { + in, out := &in.TransparentHugePageEnabled, &out.TransparentHugePageEnabled + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolLinuxOsConfigObservation. +func (in *KubernetesClusterNodePoolLinuxOsConfigObservation) DeepCopy() *KubernetesClusterNodePoolLinuxOsConfigObservation { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolLinuxOsConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolLinuxOsConfigParameters) DeepCopyInto(out *KubernetesClusterNodePoolLinuxOsConfigParameters) { + *out = *in + if in.SwapFileSizeMb != nil { + in, out := &in.SwapFileSizeMb, &out.SwapFileSizeMb + *out = new(float64) + **out = **in + } + if in.SysctlConfig != nil { + in, out := &in.SysctlConfig, &out.SysctlConfig + *out = new(LinuxOsConfigSysctlConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.TransparentHugePageDefrag != nil { + in, out := &in.TransparentHugePageDefrag, &out.TransparentHugePageDefrag + *out = new(string) + **out = **in + } + if in.TransparentHugePageEnabled != nil { + in, out := &in.TransparentHugePageEnabled, &out.TransparentHugePageEnabled + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolLinuxOsConfigParameters. +func (in *KubernetesClusterNodePoolLinuxOsConfigParameters) DeepCopy() *KubernetesClusterNodePoolLinuxOsConfigParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolLinuxOsConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolList) DeepCopyInto(out *KubernetesClusterNodePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubernetesClusterNodePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolList. +func (in *KubernetesClusterNodePoolList) DeepCopy() *KubernetesClusterNodePoolList { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubernetesClusterNodePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolNodeNetworkProfileInitParameters) DeepCopyInto(out *KubernetesClusterNodePoolNodeNetworkProfileInitParameters) { + *out = *in + if in.AllowedHostPorts != nil { + in, out := &in.AllowedHostPorts, &out.AllowedHostPorts + *out = make([]NodeNetworkProfileAllowedHostPortsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NodePublicIPTags != nil { + in, out := &in.NodePublicIPTags, &out.NodePublicIPTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolNodeNetworkProfileInitParameters. +func (in *KubernetesClusterNodePoolNodeNetworkProfileInitParameters) DeepCopy() *KubernetesClusterNodePoolNodeNetworkProfileInitParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolNodeNetworkProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolNodeNetworkProfileObservation) DeepCopyInto(out *KubernetesClusterNodePoolNodeNetworkProfileObservation) { + *out = *in + if in.AllowedHostPorts != nil { + in, out := &in.AllowedHostPorts, &out.AllowedHostPorts + *out = make([]NodeNetworkProfileAllowedHostPortsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NodePublicIPTags != nil { + in, out := &in.NodePublicIPTags, &out.NodePublicIPTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolNodeNetworkProfileObservation. +func (in *KubernetesClusterNodePoolNodeNetworkProfileObservation) DeepCopy() *KubernetesClusterNodePoolNodeNetworkProfileObservation { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolNodeNetworkProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolNodeNetworkProfileParameters) DeepCopyInto(out *KubernetesClusterNodePoolNodeNetworkProfileParameters) { + *out = *in + if in.AllowedHostPorts != nil { + in, out := &in.AllowedHostPorts, &out.AllowedHostPorts + *out = make([]NodeNetworkProfileAllowedHostPortsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NodePublicIPTags != nil { + in, out := &in.NodePublicIPTags, &out.NodePublicIPTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolNodeNetworkProfileParameters. +func (in *KubernetesClusterNodePoolNodeNetworkProfileParameters) DeepCopy() *KubernetesClusterNodePoolNodeNetworkProfileParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolNodeNetworkProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolObservation) DeepCopyInto(out *KubernetesClusterNodePoolObservation) { + *out = *in + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.CustomCATrustEnabled != nil { + in, out := &in.CustomCATrustEnabled, &out.CustomCATrustEnabled + *out = new(bool) + **out = **in + } + if in.EnableAutoScaling != nil { + in, out := &in.EnableAutoScaling, &out.EnableAutoScaling + *out = new(bool) + **out = **in + } + if in.EnableHostEncryption != nil { + in, out := &in.EnableHostEncryption, &out.EnableHostEncryption + *out = new(bool) + **out = **in + } + if in.EnableNodePublicIP != nil { + in, out := &in.EnableNodePublicIP, &out.EnableNodePublicIP + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.GpuInstance != nil { + in, out := &in.GpuInstance, &out.GpuInstance + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubernetesClusterNodePoolKubeletConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.KubeletDiskType != nil { + in, out := &in.KubeletDiskType, &out.KubeletDiskType + *out = new(string) + **out = **in + } + if in.KubernetesClusterID != nil { + in, out := &in.KubernetesClusterID, &out.KubernetesClusterID + *out = new(string) + **out = **in + } + if in.LinuxOsConfig != nil { + in, out := &in.LinuxOsConfig, &out.LinuxOsConfig + *out = new(KubernetesClusterNodePoolLinuxOsConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(float64) + **out = **in + } + if in.MessageOfTheDay != nil { + in, out := &in.MessageOfTheDay, &out.MessageOfTheDay + *out = new(string) + **out = **in + } + if in.MinCount != nil { + in, out := &in.MinCount, &out.MinCount + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeNetworkProfile != nil { + in, out := &in.NodeNetworkProfile, &out.NodeNetworkProfile + *out = new(KubernetesClusterNodePoolNodeNetworkProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.NodePublicIPPrefixID != nil { + in, out := &in.NodePublicIPPrefixID, &out.NodePublicIPPrefixID + *out = new(string) + **out = **in + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrchestratorVersion != nil { + in, out := &in.OrchestratorVersion, &out.OrchestratorVersion + *out = new(string) + **out = **in + } + if in.OsDiskSizeGb != nil { + in, out := &in.OsDiskSizeGb, &out.OsDiskSizeGb + *out = new(float64) + **out = **in + } + if in.OsDiskType != nil { + in, out := &in.OsDiskType, &out.OsDiskType + *out = new(string) + **out = **in + } + if in.OsSku != nil { + in, out := &in.OsSku, &out.OsSku + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PodSubnetID != nil { + in, out := &in.PodSubnetID, &out.PodSubnetID + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ScaleDownMode != nil { + in, out := &in.ScaleDownMode, &out.ScaleDownMode + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } + if in.UpgradeSettings != nil { + in, out := &in.UpgradeSettings, &out.UpgradeSettings + *out = new(KubernetesClusterNodePoolUpgradeSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VnetSubnetID != nil { + in, out := &in.VnetSubnetID, &out.VnetSubnetID + *out = new(string) + **out = **in + } + if in.WindowsProfile != nil { + in, out := &in.WindowsProfile, &out.WindowsProfile + *out = new(KubernetesClusterNodePoolWindowsProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkloadRuntime != nil { + in, out := &in.WorkloadRuntime, &out.WorkloadRuntime + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolObservation. +func (in *KubernetesClusterNodePoolObservation) DeepCopy() *KubernetesClusterNodePoolObservation { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolParameters) DeepCopyInto(out *KubernetesClusterNodePoolParameters) { + *out = *in + if in.CapacityReservationGroupID != nil { + in, out := &in.CapacityReservationGroupID, &out.CapacityReservationGroupID + *out = new(string) + **out = **in + } + if in.CustomCATrustEnabled != nil { + in, out := &in.CustomCATrustEnabled, &out.CustomCATrustEnabled + *out = new(bool) + **out = **in + } + if in.EnableAutoScaling != nil { + in, out := &in.EnableAutoScaling, &out.EnableAutoScaling + *out = new(bool) + **out = **in + } + if in.EnableHostEncryption != nil { + in, out := &in.EnableHostEncryption, &out.EnableHostEncryption + *out = new(bool) + **out = **in + } + if in.EnableNodePublicIP != nil { + in, out := &in.EnableNodePublicIP, &out.EnableNodePublicIP + *out = new(bool) + **out = **in + } + if in.EvictionPolicy != nil { + in, out := &in.EvictionPolicy, &out.EvictionPolicy + *out = new(string) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.GpuInstance != nil { + in, out := &in.GpuInstance, &out.GpuInstance + *out = new(string) + **out = **in + } + if in.HostGroupID != nil { + in, out := &in.HostGroupID, &out.HostGroupID + *out = new(string) + **out = **in + } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubernetesClusterNodePoolKubeletConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.KubeletDiskType != nil { + in, out := &in.KubeletDiskType, &out.KubeletDiskType + *out = new(string) + **out = **in + } + if in.KubernetesClusterID != nil { + in, out := &in.KubernetesClusterID, &out.KubernetesClusterID + *out = new(string) + **out = **in + } + if in.KubernetesClusterIDRef != nil { + in, out := &in.KubernetesClusterIDRef, &out.KubernetesClusterIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KubernetesClusterIDSelector != nil { + in, out := &in.KubernetesClusterIDSelector, &out.KubernetesClusterIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LinuxOsConfig != nil { + in, out := &in.LinuxOsConfig, &out.LinuxOsConfig + *out = new(KubernetesClusterNodePoolLinuxOsConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxCount != nil { + in, out := &in.MaxCount, &out.MaxCount + *out = new(float64) + **out = **in + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(float64) + **out = **in + } + if in.MessageOfTheDay != nil { + in, out := &in.MessageOfTheDay, &out.MessageOfTheDay + *out = new(string) + **out = **in + } + if in.MinCount != nil { + in, out := &in.MinCount, &out.MinCount + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.NodeNetworkProfile != nil { + in, out := &in.NodeNetworkProfile, &out.NodeNetworkProfile + *out = new(KubernetesClusterNodePoolNodeNetworkProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.NodePublicIPPrefixID != nil { + in, out := &in.NodePublicIPPrefixID, &out.NodePublicIPPrefixID + *out = new(string) + **out = **in + } + if in.NodeTaints != nil { + in, out := &in.NodeTaints, &out.NodeTaints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OrchestratorVersion != nil { + in, out := &in.OrchestratorVersion, &out.OrchestratorVersion + *out = new(string) + **out = **in + } + if in.OsDiskSizeGb != nil { + in, out := &in.OsDiskSizeGb, &out.OsDiskSizeGb + *out = new(float64) + **out = **in + } + if in.OsDiskType != nil { + in, out := &in.OsDiskType, &out.OsDiskType + *out = new(string) + **out = **in + } + if in.OsSku != nil { + in, out := &in.OsSku, &out.OsSku + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.PodSubnetID != nil { + in, out := &in.PodSubnetID, &out.PodSubnetID + *out = new(string) + **out = **in + } + if in.PodSubnetIDRef != nil { + in, out := &in.PodSubnetIDRef, &out.PodSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PodSubnetIDSelector != nil { + in, out := &in.PodSubnetIDSelector, &out.PodSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ProximityPlacementGroupID != nil { + in, out := &in.ProximityPlacementGroupID, &out.ProximityPlacementGroupID + *out = new(string) + **out = **in + } + if in.ScaleDownMode != nil { + in, out := &in.ScaleDownMode, &out.ScaleDownMode + *out = new(string) + **out = **in + } + if in.SnapshotID != nil { + in, out := &in.SnapshotID, &out.SnapshotID + *out = new(string) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UltraSsdEnabled != nil { + in, out := &in.UltraSsdEnabled, &out.UltraSsdEnabled + *out = new(bool) + **out = **in + } + if in.UpgradeSettings != nil { + in, out := &in.UpgradeSettings, &out.UpgradeSettings + *out = new(KubernetesClusterNodePoolUpgradeSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VnetSubnetID != nil { + in, out := &in.VnetSubnetID, &out.VnetSubnetID + *out = new(string) + **out = **in + } + if in.VnetSubnetIDRef != nil { + in, out := &in.VnetSubnetIDRef, &out.VnetSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VnetSubnetIDSelector != nil { + in, out := &in.VnetSubnetIDSelector, &out.VnetSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WindowsProfile != nil { + in, out := &in.WindowsProfile, &out.WindowsProfile + *out = new(KubernetesClusterNodePoolWindowsProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadRuntime != nil { + in, out := &in.WorkloadRuntime, &out.WorkloadRuntime + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolParameters. +func (in *KubernetesClusterNodePoolParameters) DeepCopy() *KubernetesClusterNodePoolParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolSpec) DeepCopyInto(out *KubernetesClusterNodePoolSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolSpec. +func (in *KubernetesClusterNodePoolSpec) DeepCopy() *KubernetesClusterNodePoolSpec { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolStatus) DeepCopyInto(out *KubernetesClusterNodePoolStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolStatus. +func (in *KubernetesClusterNodePoolStatus) DeepCopy() *KubernetesClusterNodePoolStatus { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolUpgradeSettingsInitParameters) DeepCopyInto(out *KubernetesClusterNodePoolUpgradeSettingsInitParameters) { + *out = *in + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolUpgradeSettingsInitParameters. +func (in *KubernetesClusterNodePoolUpgradeSettingsInitParameters) DeepCopy() *KubernetesClusterNodePoolUpgradeSettingsInitParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolUpgradeSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolUpgradeSettingsObservation) DeepCopyInto(out *KubernetesClusterNodePoolUpgradeSettingsObservation) { + *out = *in + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolUpgradeSettingsObservation. +func (in *KubernetesClusterNodePoolUpgradeSettingsObservation) DeepCopy() *KubernetesClusterNodePoolUpgradeSettingsObservation { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolUpgradeSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolUpgradeSettingsParameters) DeepCopyInto(out *KubernetesClusterNodePoolUpgradeSettingsParameters) { + *out = *in + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolUpgradeSettingsParameters. +func (in *KubernetesClusterNodePoolUpgradeSettingsParameters) DeepCopy() *KubernetesClusterNodePoolUpgradeSettingsParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolUpgradeSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolWindowsProfileInitParameters) DeepCopyInto(out *KubernetesClusterNodePoolWindowsProfileInitParameters) { + *out = *in + if in.OutboundNATEnabled != nil { + in, out := &in.OutboundNATEnabled, &out.OutboundNATEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolWindowsProfileInitParameters. +func (in *KubernetesClusterNodePoolWindowsProfileInitParameters) DeepCopy() *KubernetesClusterNodePoolWindowsProfileInitParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolWindowsProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolWindowsProfileObservation) DeepCopyInto(out *KubernetesClusterNodePoolWindowsProfileObservation) { + *out = *in + if in.OutboundNATEnabled != nil { + in, out := &in.OutboundNATEnabled, &out.OutboundNATEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolWindowsProfileObservation. +func (in *KubernetesClusterNodePoolWindowsProfileObservation) DeepCopy() *KubernetesClusterNodePoolWindowsProfileObservation { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolWindowsProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterNodePoolWindowsProfileParameters) DeepCopyInto(out *KubernetesClusterNodePoolWindowsProfileParameters) { + *out = *in + if in.OutboundNATEnabled != nil { + in, out := &in.OutboundNATEnabled, &out.OutboundNATEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterNodePoolWindowsProfileParameters. +func (in *KubernetesClusterNodePoolWindowsProfileParameters) DeepCopy() *KubernetesClusterNodePoolWindowsProfileParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterNodePoolWindowsProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterObservation) DeepCopyInto(out *KubernetesClusterObservation) { + *out = *in + if in.APIServerAccessProfile != nil { + in, out := &in.APIServerAccessProfile, &out.APIServerAccessProfile + *out = new(APIServerAccessProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.APIServerAuthorizedIPRanges != nil { + in, out := &in.APIServerAuthorizedIPRanges, &out.APIServerAuthorizedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AciConnectorLinux != nil { + in, out := &in.AciConnectorLinux, &out.AciConnectorLinux + *out = new(AciConnectorLinuxObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoScalerProfile != nil { + in, out := &in.AutoScalerProfile, &out.AutoScalerProfile + *out = new(AutoScalerProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.AutomaticChannelUpgrade != nil { + in, out := &in.AutomaticChannelUpgrade, &out.AutomaticChannelUpgrade + *out = new(string) + **out = **in + } + if in.AzureActiveDirectoryRoleBasedAccessControl != nil { + in, out := &in.AzureActiveDirectoryRoleBasedAccessControl, &out.AzureActiveDirectoryRoleBasedAccessControl + *out = new(AzureActiveDirectoryRoleBasedAccessControlObservation) + (*in).DeepCopyInto(*out) + } + if in.AzurePolicyEnabled != nil { + in, out := &in.AzurePolicyEnabled, &out.AzurePolicyEnabled + *out = new(bool) + **out = **in + } + if in.ConfidentialComputing != nil { + in, out := &in.ConfidentialComputing, &out.ConfidentialComputing + *out = new(ConfidentialComputingObservation) + (*in).DeepCopyInto(*out) + } + if in.CurrentKubernetesVersion != nil { + in, out := &in.CurrentKubernetesVersion, &out.CurrentKubernetesVersion + *out = new(string) + **out = **in + } + if in.CustomCATrustCertificatesBase64 != nil { + in, out := &in.CustomCATrustCertificatesBase64, &out.CustomCATrustCertificatesBase64 + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DNSPrefix != nil { + in, out := &in.DNSPrefix, &out.DNSPrefix + *out = new(string) + **out = **in + } + if in.DNSPrefixPrivateCluster != nil { + in, out := &in.DNSPrefixPrivateCluster, &out.DNSPrefixPrivateCluster + *out = new(string) + **out = **in + } + if in.DefaultNodePool != nil { + in, out := &in.DefaultNodePool, &out.DefaultNodePool + *out = new(DefaultNodePoolObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnablePodSecurityPolicy != nil { + in, out := &in.EnablePodSecurityPolicy, &out.EnablePodSecurityPolicy + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.HTTPApplicationRoutingEnabled != nil { + in, out := &in.HTTPApplicationRoutingEnabled, &out.HTTPApplicationRoutingEnabled + *out = new(bool) + **out = **in + } + if in.HTTPApplicationRoutingZoneName != nil { + in, out := &in.HTTPApplicationRoutingZoneName, &out.HTTPApplicationRoutingZoneName + *out = new(string) + **out = **in + } + if in.HTTPProxyConfig != nil { + in, out := &in.HTTPProxyConfig, &out.HTTPProxyConfig + *out = new(HTTPProxyConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageCleanerEnabled != nil { + in, out := &in.ImageCleanerEnabled, &out.ImageCleanerEnabled + *out = new(bool) + **out = **in + } + if in.ImageCleanerIntervalHours != nil { + in, out := &in.ImageCleanerIntervalHours, &out.ImageCleanerIntervalHours + *out = new(float64) + **out = **in + } + if in.IngressApplicationGateway != nil { + in, out := &in.IngressApplicationGateway, &out.IngressApplicationGateway + *out = new(IngressApplicationGatewayObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyManagementService != nil { + in, out := &in.KeyManagementService, &out.KeyManagementService + *out = new(KeyManagementServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultSecretsProvider != nil { + in, out := &in.KeyVaultSecretsProvider, &out.KeyVaultSecretsProvider + *out = new(KeyVaultSecretsProviderObservation) + (*in).DeepCopyInto(*out) + } + if in.KubeletIdentity != nil { + in, out := &in.KubeletIdentity, &out.KubeletIdentity + *out = new(KubeletIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KubernetesVersion != nil { + in, out := &in.KubernetesVersion, &out.KubernetesVersion + *out = new(string) + **out = **in + } + if in.LinuxProfile != nil { + in, out := &in.LinuxProfile, &out.LinuxProfile + *out = new(LinuxProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAccountDisabled != nil { + in, out := &in.LocalAccountDisabled, &out.LocalAccountDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowObservation) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowAutoUpgrade != nil { + in, out := &in.MaintenanceWindowAutoUpgrade, &out.MaintenanceWindowAutoUpgrade + *out = new(MaintenanceWindowAutoUpgradeObservation) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowNodeOs != nil { + in, out := &in.MaintenanceWindowNodeOs, &out.MaintenanceWindowNodeOs + *out = new(MaintenanceWindowNodeOsObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftDefender != nil { + in, out := &in.MicrosoftDefender, &out.MicrosoftDefender + *out = new(MicrosoftDefenderObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitorMetrics != nil { + in, out := &in.MonitorMetrics, &out.MonitorMetrics + *out = new(MonitorMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkProfile != nil { + in, out := &in.NetworkProfile, &out.NetworkProfile + *out = new(NetworkProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.NodeOsChannelUpgrade != nil { + in, out := &in.NodeOsChannelUpgrade, &out.NodeOsChannelUpgrade + *out = new(string) + **out = **in + } + if in.NodeResourceGroup != nil { + in, out := &in.NodeResourceGroup, &out.NodeResourceGroup + *out = new(string) + **out = **in + } + if in.NodeResourceGroupID != nil { + in, out := &in.NodeResourceGroupID, &out.NodeResourceGroupID + *out = new(string) + **out = **in + } + if in.OidcIssuerEnabled != nil { + in, out := &in.OidcIssuerEnabled, &out.OidcIssuerEnabled + *out = new(bool) + **out = **in + } + if in.OidcIssuerURL != nil { + in, out := &in.OidcIssuerURL, &out.OidcIssuerURL + *out = new(string) + **out = **in + } + if in.OmsAgent != nil { + in, out := &in.OmsAgent, &out.OmsAgent + *out = new(OmsAgentObservation) + (*in).DeepCopyInto(*out) + } + if in.OpenServiceMeshEnabled != nil { + in, out := &in.OpenServiceMeshEnabled, &out.OpenServiceMeshEnabled + *out = new(bool) + **out = **in + } + if in.PortalFqdn != nil { + in, out := &in.PortalFqdn, &out.PortalFqdn + *out = new(string) + **out = **in + } + if in.PrivateClusterEnabled != nil { + in, out := &in.PrivateClusterEnabled, &out.PrivateClusterEnabled + *out = new(bool) + **out = **in + } + if in.PrivateClusterPublicFqdnEnabled != nil { + in, out := &in.PrivateClusterPublicFqdnEnabled, &out.PrivateClusterPublicFqdnEnabled + *out = new(bool) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PrivateFqdn != nil { + in, out := &in.PrivateFqdn, &out.PrivateFqdn + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RoleBasedAccessControlEnabled != nil { + in, out := &in.RoleBasedAccessControlEnabled, &out.RoleBasedAccessControlEnabled + *out = new(bool) + **out = **in + } + if in.RunCommandEnabled != nil { + in, out := &in.RunCommandEnabled, &out.RunCommandEnabled + *out = new(bool) + **out = **in + } + if in.ServiceMeshProfile != nil { + in, out := &in.ServiceMeshProfile, &out.ServiceMeshProfile + *out = new(ServiceMeshProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.ServicePrincipal != nil { + in, out := &in.ServicePrincipal, &out.ServicePrincipal + *out = new(ServicePrincipalObservation) + (*in).DeepCopyInto(*out) + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.StorageProfile != nil { + in, out := &in.StorageProfile, &out.StorageProfile + *out = new(StorageProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.SupportPlan != nil { + in, out := &in.SupportPlan, &out.SupportPlan + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebAppRouting != nil { + in, out := &in.WebAppRouting, &out.WebAppRouting + *out = new(WebAppRoutingObservation) + (*in).DeepCopyInto(*out) + } + if in.WindowsProfile != nil { + in, out := &in.WindowsProfile, &out.WindowsProfile + *out = new(WindowsProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkloadAutoscalerProfile != nil { + in, out := &in.WorkloadAutoscalerProfile, &out.WorkloadAutoscalerProfile + *out = new(WorkloadAutoscalerProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkloadIdentityEnabled != nil { + in, out := &in.WorkloadIdentityEnabled, &out.WorkloadIdentityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterObservation. +func (in *KubernetesClusterObservation) DeepCopy() *KubernetesClusterObservation { + if in == nil { + return nil + } + out := new(KubernetesClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterParameters) DeepCopyInto(out *KubernetesClusterParameters) { + *out = *in + if in.APIServerAccessProfile != nil { + in, out := &in.APIServerAccessProfile, &out.APIServerAccessProfile + *out = new(APIServerAccessProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.APIServerAuthorizedIPRanges != nil { + in, out := &in.APIServerAuthorizedIPRanges, &out.APIServerAuthorizedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AciConnectorLinux != nil { + in, out := &in.AciConnectorLinux, &out.AciConnectorLinux + *out = new(AciConnectorLinuxParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoScalerProfile != nil { + in, out := &in.AutoScalerProfile, &out.AutoScalerProfile + *out = new(AutoScalerProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.AutomaticChannelUpgrade != nil { + in, out := &in.AutomaticChannelUpgrade, &out.AutomaticChannelUpgrade + *out = new(string) + **out = **in + } + if in.AzureActiveDirectoryRoleBasedAccessControl != nil { + in, out := &in.AzureActiveDirectoryRoleBasedAccessControl, &out.AzureActiveDirectoryRoleBasedAccessControl + *out = new(AzureActiveDirectoryRoleBasedAccessControlParameters) + (*in).DeepCopyInto(*out) + } + if in.AzurePolicyEnabled != nil { + in, out := &in.AzurePolicyEnabled, &out.AzurePolicyEnabled + *out = new(bool) + **out = **in + } + if in.ConfidentialComputing != nil { + in, out := &in.ConfidentialComputing, &out.ConfidentialComputing + *out = new(ConfidentialComputingParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomCATrustCertificatesBase64 != nil { + in, out := &in.CustomCATrustCertificatesBase64, &out.CustomCATrustCertificatesBase64 + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DNSPrefix != nil { + in, out := &in.DNSPrefix, &out.DNSPrefix + *out = new(string) + **out = **in + } + if in.DNSPrefixPrivateCluster != nil { + in, out := &in.DNSPrefixPrivateCluster, &out.DNSPrefixPrivateCluster + *out = new(string) + **out = **in + } + if in.DefaultNodePool != nil { + in, out := &in.DefaultNodePool, &out.DefaultNodePool + *out = new(DefaultNodePoolParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnablePodSecurityPolicy != nil { + in, out := &in.EnablePodSecurityPolicy, &out.EnablePodSecurityPolicy + *out = new(bool) + **out = **in + } + if in.HTTPApplicationRoutingEnabled != nil { + in, out := &in.HTTPApplicationRoutingEnabled, &out.HTTPApplicationRoutingEnabled + *out = new(bool) + **out = **in + } + if in.HTTPProxyConfig != nil { + in, out := &in.HTTPProxyConfig, &out.HTTPProxyConfig + *out = new(HTTPProxyConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageCleanerEnabled != nil { + in, out := &in.ImageCleanerEnabled, &out.ImageCleanerEnabled + *out = new(bool) + **out = **in + } + if in.ImageCleanerIntervalHours != nil { + in, out := &in.ImageCleanerIntervalHours, &out.ImageCleanerIntervalHours + *out = new(float64) + **out = **in + } + if in.IngressApplicationGateway != nil { + in, out := &in.IngressApplicationGateway, &out.IngressApplicationGateway + *out = new(IngressApplicationGatewayParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyManagementService != nil { + in, out := &in.KeyManagementService, &out.KeyManagementService + *out = new(KeyManagementServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultSecretsProvider != nil { + in, out := &in.KeyVaultSecretsProvider, &out.KeyVaultSecretsProvider + *out = new(KeyVaultSecretsProviderParameters) + (*in).DeepCopyInto(*out) + } + if in.KubeletIdentity != nil { + in, out := &in.KubeletIdentity, &out.KubeletIdentity + *out = new(KubeletIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KubernetesVersion != nil { + in, out := &in.KubernetesVersion, &out.KubernetesVersion + *out = new(string) + **out = **in + } + if in.LinuxProfile != nil { + in, out := &in.LinuxProfile, &out.LinuxProfile + *out = new(LinuxProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAccountDisabled != nil { + in, out := &in.LocalAccountDisabled, &out.LocalAccountDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowAutoUpgrade != nil { + in, out := &in.MaintenanceWindowAutoUpgrade, &out.MaintenanceWindowAutoUpgrade + *out = new(MaintenanceWindowAutoUpgradeParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceWindowNodeOs != nil { + in, out := &in.MaintenanceWindowNodeOs, &out.MaintenanceWindowNodeOs + *out = new(MaintenanceWindowNodeOsParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftDefender != nil { + in, out := &in.MicrosoftDefender, &out.MicrosoftDefender + *out = new(MicrosoftDefenderParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitorMetrics != nil { + in, out := &in.MonitorMetrics, &out.MonitorMetrics + *out = new(MonitorMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkProfile != nil { + in, out := &in.NetworkProfile, &out.NetworkProfile + *out = new(NetworkProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.NodeOsChannelUpgrade != nil { + in, out := &in.NodeOsChannelUpgrade, &out.NodeOsChannelUpgrade + *out = new(string) + **out = **in + } + if in.NodeResourceGroup != nil { + in, out := &in.NodeResourceGroup, &out.NodeResourceGroup + *out = new(string) + **out = **in + } + if in.OidcIssuerEnabled != nil { + in, out := &in.OidcIssuerEnabled, &out.OidcIssuerEnabled + *out = new(bool) + **out = **in + } + if in.OmsAgent != nil { + in, out := &in.OmsAgent, &out.OmsAgent + *out = new(OmsAgentParameters) + (*in).DeepCopyInto(*out) + } + if in.OpenServiceMeshEnabled != nil { + in, out := &in.OpenServiceMeshEnabled, &out.OpenServiceMeshEnabled + *out = new(bool) + **out = **in + } + if in.PrivateClusterEnabled != nil { + in, out := &in.PrivateClusterEnabled, &out.PrivateClusterEnabled + *out = new(bool) + **out = **in + } + if in.PrivateClusterPublicFqdnEnabled != nil { + in, out := &in.PrivateClusterPublicFqdnEnabled, &out.PrivateClusterPublicFqdnEnabled + *out = new(bool) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIDRef != nil { + in, out := &in.PrivateDNSZoneIDRef, &out.PrivateDNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSZoneIDSelector != nil { + in, out := &in.PrivateDNSZoneIDSelector, &out.PrivateDNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoleBasedAccessControlEnabled != nil { + in, out := &in.RoleBasedAccessControlEnabled, &out.RoleBasedAccessControlEnabled + *out = new(bool) + **out = **in + } + if in.RunCommandEnabled != nil { + in, out := &in.RunCommandEnabled, &out.RunCommandEnabled + *out = new(bool) + **out = **in + } + if in.ServiceMeshProfile != nil { + in, out := &in.ServiceMeshProfile, &out.ServiceMeshProfile + *out = new(ServiceMeshProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.ServicePrincipal != nil { + in, out := &in.ServicePrincipal, &out.ServicePrincipal + *out = new(ServicePrincipalParameters) + (*in).DeepCopyInto(*out) + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.StorageProfile != nil { + in, out := &in.StorageProfile, &out.StorageProfile + *out = new(StorageProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.SupportPlan != nil { + in, out := &in.SupportPlan, &out.SupportPlan + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WebAppRouting != nil { + in, out := &in.WebAppRouting, &out.WebAppRouting + *out = new(WebAppRoutingParameters) + (*in).DeepCopyInto(*out) + } + if in.WindowsProfile != nil { + in, out := &in.WindowsProfile, &out.WindowsProfile + *out = new(WindowsProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadAutoscalerProfile != nil { + in, out := &in.WorkloadAutoscalerProfile, &out.WorkloadAutoscalerProfile + *out = new(WorkloadAutoscalerProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadIdentityEnabled != nil { + in, out := &in.WorkloadIdentityEnabled, &out.WorkloadIdentityEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterParameters. +func (in *KubernetesClusterParameters) DeepCopy() *KubernetesClusterParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterSpec) DeepCopyInto(out *KubernetesClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterSpec. +func (in *KubernetesClusterSpec) DeepCopy() *KubernetesClusterSpec { + if in == nil { + return nil + } + out := new(KubernetesClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterStatus) DeepCopyInto(out *KubernetesClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterStatus. +func (in *KubernetesClusterStatus) DeepCopy() *KubernetesClusterStatus { + if in == nil { + return nil + } + out := new(KubernetesClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesFleetManager) DeepCopyInto(out *KubernetesFleetManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesFleetManager. +func (in *KubernetesFleetManager) DeepCopy() *KubernetesFleetManager { + if in == nil { + return nil + } + out := new(KubernetesFleetManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubernetesFleetManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesFleetManagerInitParameters) DeepCopyInto(out *KubernetesFleetManagerInitParameters) { + *out = *in + if in.HubProfile != nil { + in, out := &in.HubProfile, &out.HubProfile + *out = new(HubProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesFleetManagerInitParameters. +func (in *KubernetesFleetManagerInitParameters) DeepCopy() *KubernetesFleetManagerInitParameters { + if in == nil { + return nil + } + out := new(KubernetesFleetManagerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesFleetManagerList) DeepCopyInto(out *KubernetesFleetManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubernetesFleetManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesFleetManagerList. +func (in *KubernetesFleetManagerList) DeepCopy() *KubernetesFleetManagerList { + if in == nil { + return nil + } + out := new(KubernetesFleetManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubernetesFleetManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesFleetManagerObservation) DeepCopyInto(out *KubernetesFleetManagerObservation) { + *out = *in + if in.HubProfile != nil { + in, out := &in.HubProfile, &out.HubProfile + *out = new(HubProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesFleetManagerObservation. +func (in *KubernetesFleetManagerObservation) DeepCopy() *KubernetesFleetManagerObservation { + if in == nil { + return nil + } + out := new(KubernetesFleetManagerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesFleetManagerParameters) DeepCopyInto(out *KubernetesFleetManagerParameters) { + *out = *in + if in.HubProfile != nil { + in, out := &in.HubProfile, &out.HubProfile + *out = new(HubProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesFleetManagerParameters. +func (in *KubernetesFleetManagerParameters) DeepCopy() *KubernetesFleetManagerParameters { + if in == nil { + return nil + } + out := new(KubernetesFleetManagerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesFleetManagerSpec) DeepCopyInto(out *KubernetesFleetManagerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesFleetManagerSpec. +func (in *KubernetesFleetManagerSpec) DeepCopy() *KubernetesFleetManagerSpec { + if in == nil { + return nil + } + out := new(KubernetesFleetManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesFleetManagerStatus) DeepCopyInto(out *KubernetesFleetManagerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesFleetManagerStatus. +func (in *KubernetesFleetManagerStatus) DeepCopy() *KubernetesFleetManagerStatus { + if in == nil { + return nil + } + out := new(KubernetesFleetManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxOsConfigInitParameters) DeepCopyInto(out *LinuxOsConfigInitParameters) { + *out = *in + if in.SwapFileSizeMb != nil { + in, out := &in.SwapFileSizeMb, &out.SwapFileSizeMb + *out = new(float64) + **out = **in + } + if in.SysctlConfig != nil { + in, out := &in.SysctlConfig, &out.SysctlConfig + *out = new(SysctlConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TransparentHugePageDefrag != nil { + in, out := &in.TransparentHugePageDefrag, &out.TransparentHugePageDefrag + *out = new(string) + **out = **in + } + if in.TransparentHugePageEnabled != nil { + in, out := &in.TransparentHugePageEnabled, &out.TransparentHugePageEnabled + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxOsConfigInitParameters. +func (in *LinuxOsConfigInitParameters) DeepCopy() *LinuxOsConfigInitParameters { + if in == nil { + return nil + } + out := new(LinuxOsConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxOsConfigObservation) DeepCopyInto(out *LinuxOsConfigObservation) { + *out = *in + if in.SwapFileSizeMb != nil { + in, out := &in.SwapFileSizeMb, &out.SwapFileSizeMb + *out = new(float64) + **out = **in + } + if in.SysctlConfig != nil { + in, out := &in.SysctlConfig, &out.SysctlConfig + *out = new(SysctlConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.TransparentHugePageDefrag != nil { + in, out := &in.TransparentHugePageDefrag, &out.TransparentHugePageDefrag + *out = new(string) + **out = **in + } + if in.TransparentHugePageEnabled != nil { + in, out := &in.TransparentHugePageEnabled, &out.TransparentHugePageEnabled + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxOsConfigObservation. +func (in *LinuxOsConfigObservation) DeepCopy() *LinuxOsConfigObservation { + if in == nil { + return nil + } + out := new(LinuxOsConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxOsConfigParameters) DeepCopyInto(out *LinuxOsConfigParameters) { + *out = *in + if in.SwapFileSizeMb != nil { + in, out := &in.SwapFileSizeMb, &out.SwapFileSizeMb + *out = new(float64) + **out = **in + } + if in.SysctlConfig != nil { + in, out := &in.SysctlConfig, &out.SysctlConfig + *out = new(SysctlConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.TransparentHugePageDefrag != nil { + in, out := &in.TransparentHugePageDefrag, &out.TransparentHugePageDefrag + *out = new(string) + **out = **in + } + if in.TransparentHugePageEnabled != nil { + in, out := &in.TransparentHugePageEnabled, &out.TransparentHugePageEnabled + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxOsConfigParameters. +func (in *LinuxOsConfigParameters) DeepCopy() *LinuxOsConfigParameters { + if in == nil { + return nil + } + out := new(LinuxOsConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxOsConfigSysctlConfigInitParameters) DeepCopyInto(out *LinuxOsConfigSysctlConfigInitParameters) { + *out = *in + if in.FsAioMaxNr != nil { + in, out := &in.FsAioMaxNr, &out.FsAioMaxNr + *out = new(float64) + **out = **in + } + if in.FsFileMax != nil { + in, out := &in.FsFileMax, &out.FsFileMax + *out = new(float64) + **out = **in + } + if in.FsInotifyMaxUserWatches != nil { + in, out := &in.FsInotifyMaxUserWatches, &out.FsInotifyMaxUserWatches + *out = new(float64) + **out = **in + } + if in.FsNrOpen != nil { + in, out := &in.FsNrOpen, &out.FsNrOpen + *out = new(float64) + **out = **in + } + if in.KernelThreadsMax != nil { + in, out := &in.KernelThreadsMax, &out.KernelThreadsMax + *out = new(float64) + **out = **in + } + if in.NetCoreNetdevMaxBacklog != nil { + in, out := &in.NetCoreNetdevMaxBacklog, &out.NetCoreNetdevMaxBacklog + *out = new(float64) + **out = **in + } + if in.NetCoreOptmemMax != nil { + in, out := &in.NetCoreOptmemMax, &out.NetCoreOptmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreRmemDefault != nil { + in, out := &in.NetCoreRmemDefault, &out.NetCoreRmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreRmemMax != nil { + in, out := &in.NetCoreRmemMax, &out.NetCoreRmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreSomaxconn != nil { + in, out := &in.NetCoreSomaxconn, &out.NetCoreSomaxconn + *out = new(float64) + **out = **in + } + if in.NetCoreWmemDefault != nil { + in, out := &in.NetCoreWmemDefault, &out.NetCoreWmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreWmemMax != nil { + in, out := &in.NetCoreWmemMax, &out.NetCoreWmemMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMax != nil { + in, out := &in.NetIPv4IPLocalPortRangeMax, &out.NetIPv4IPLocalPortRangeMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMin != nil { + in, out := &in.NetIPv4IPLocalPortRangeMin, &out.NetIPv4IPLocalPortRangeMin + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh1 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh1, &out.NetIPv4NeighDefaultGcThresh1 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh2 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh2, &out.NetIPv4NeighDefaultGcThresh2 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh3 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh3, &out.NetIPv4NeighDefaultGcThresh3 + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPFinTimeout != nil { + in, out := &in.NetIPv4TCPFinTimeout, &out.NetIPv4TCPFinTimeout + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveIntvl != nil { + in, out := &in.NetIPv4TCPKeepaliveIntvl, &out.NetIPv4TCPKeepaliveIntvl + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveProbes != nil { + in, out := &in.NetIPv4TCPKeepaliveProbes, &out.NetIPv4TCPKeepaliveProbes + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveTime != nil { + in, out := &in.NetIPv4TCPKeepaliveTime, &out.NetIPv4TCPKeepaliveTime + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxSynBacklog != nil { + in, out := &in.NetIPv4TCPMaxSynBacklog, &out.NetIPv4TCPMaxSynBacklog + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxTwBuckets != nil { + in, out := &in.NetIPv4TCPMaxTwBuckets, &out.NetIPv4TCPMaxTwBuckets + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPTwReuse != nil { + in, out := &in.NetIPv4TCPTwReuse, &out.NetIPv4TCPTwReuse + *out = new(bool) + **out = **in + } + if in.NetNetfilterNfConntrackBuckets != nil { + in, out := &in.NetNetfilterNfConntrackBuckets, &out.NetNetfilterNfConntrackBuckets + *out = new(float64) + **out = **in + } + if in.NetNetfilterNfConntrackMax != nil { + in, out := &in.NetNetfilterNfConntrackMax, &out.NetNetfilterNfConntrackMax + *out = new(float64) + **out = **in + } + if in.VMMaxMapCount != nil { + in, out := &in.VMMaxMapCount, &out.VMMaxMapCount + *out = new(float64) + **out = **in + } + if in.VMSwappiness != nil { + in, out := &in.VMSwappiness, &out.VMSwappiness + *out = new(float64) + **out = **in + } + if in.VMVfsCachePressure != nil { + in, out := &in.VMVfsCachePressure, &out.VMVfsCachePressure + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxOsConfigSysctlConfigInitParameters. +func (in *LinuxOsConfigSysctlConfigInitParameters) DeepCopy() *LinuxOsConfigSysctlConfigInitParameters { + if in == nil { + return nil + } + out := new(LinuxOsConfigSysctlConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxOsConfigSysctlConfigObservation) DeepCopyInto(out *LinuxOsConfigSysctlConfigObservation) { + *out = *in + if in.FsAioMaxNr != nil { + in, out := &in.FsAioMaxNr, &out.FsAioMaxNr + *out = new(float64) + **out = **in + } + if in.FsFileMax != nil { + in, out := &in.FsFileMax, &out.FsFileMax + *out = new(float64) + **out = **in + } + if in.FsInotifyMaxUserWatches != nil { + in, out := &in.FsInotifyMaxUserWatches, &out.FsInotifyMaxUserWatches + *out = new(float64) + **out = **in + } + if in.FsNrOpen != nil { + in, out := &in.FsNrOpen, &out.FsNrOpen + *out = new(float64) + **out = **in + } + if in.KernelThreadsMax != nil { + in, out := &in.KernelThreadsMax, &out.KernelThreadsMax + *out = new(float64) + **out = **in + } + if in.NetCoreNetdevMaxBacklog != nil { + in, out := &in.NetCoreNetdevMaxBacklog, &out.NetCoreNetdevMaxBacklog + *out = new(float64) + **out = **in + } + if in.NetCoreOptmemMax != nil { + in, out := &in.NetCoreOptmemMax, &out.NetCoreOptmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreRmemDefault != nil { + in, out := &in.NetCoreRmemDefault, &out.NetCoreRmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreRmemMax != nil { + in, out := &in.NetCoreRmemMax, &out.NetCoreRmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreSomaxconn != nil { + in, out := &in.NetCoreSomaxconn, &out.NetCoreSomaxconn + *out = new(float64) + **out = **in + } + if in.NetCoreWmemDefault != nil { + in, out := &in.NetCoreWmemDefault, &out.NetCoreWmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreWmemMax != nil { + in, out := &in.NetCoreWmemMax, &out.NetCoreWmemMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMax != nil { + in, out := &in.NetIPv4IPLocalPortRangeMax, &out.NetIPv4IPLocalPortRangeMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMin != nil { + in, out := &in.NetIPv4IPLocalPortRangeMin, &out.NetIPv4IPLocalPortRangeMin + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh1 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh1, &out.NetIPv4NeighDefaultGcThresh1 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh2 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh2, &out.NetIPv4NeighDefaultGcThresh2 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh3 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh3, &out.NetIPv4NeighDefaultGcThresh3 + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPFinTimeout != nil { + in, out := &in.NetIPv4TCPFinTimeout, &out.NetIPv4TCPFinTimeout + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveIntvl != nil { + in, out := &in.NetIPv4TCPKeepaliveIntvl, &out.NetIPv4TCPKeepaliveIntvl + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveProbes != nil { + in, out := &in.NetIPv4TCPKeepaliveProbes, &out.NetIPv4TCPKeepaliveProbes + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveTime != nil { + in, out := &in.NetIPv4TCPKeepaliveTime, &out.NetIPv4TCPKeepaliveTime + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxSynBacklog != nil { + in, out := &in.NetIPv4TCPMaxSynBacklog, &out.NetIPv4TCPMaxSynBacklog + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxTwBuckets != nil { + in, out := &in.NetIPv4TCPMaxTwBuckets, &out.NetIPv4TCPMaxTwBuckets + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPTwReuse != nil { + in, out := &in.NetIPv4TCPTwReuse, &out.NetIPv4TCPTwReuse + *out = new(bool) + **out = **in + } + if in.NetNetfilterNfConntrackBuckets != nil { + in, out := &in.NetNetfilterNfConntrackBuckets, &out.NetNetfilterNfConntrackBuckets + *out = new(float64) + **out = **in + } + if in.NetNetfilterNfConntrackMax != nil { + in, out := &in.NetNetfilterNfConntrackMax, &out.NetNetfilterNfConntrackMax + *out = new(float64) + **out = **in + } + if in.VMMaxMapCount != nil { + in, out := &in.VMMaxMapCount, &out.VMMaxMapCount + *out = new(float64) + **out = **in + } + if in.VMSwappiness != nil { + in, out := &in.VMSwappiness, &out.VMSwappiness + *out = new(float64) + **out = **in + } + if in.VMVfsCachePressure != nil { + in, out := &in.VMVfsCachePressure, &out.VMVfsCachePressure + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxOsConfigSysctlConfigObservation. +func (in *LinuxOsConfigSysctlConfigObservation) DeepCopy() *LinuxOsConfigSysctlConfigObservation { + if in == nil { + return nil + } + out := new(LinuxOsConfigSysctlConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxOsConfigSysctlConfigParameters) DeepCopyInto(out *LinuxOsConfigSysctlConfigParameters) { + *out = *in + if in.FsAioMaxNr != nil { + in, out := &in.FsAioMaxNr, &out.FsAioMaxNr + *out = new(float64) + **out = **in + } + if in.FsFileMax != nil { + in, out := &in.FsFileMax, &out.FsFileMax + *out = new(float64) + **out = **in + } + if in.FsInotifyMaxUserWatches != nil { + in, out := &in.FsInotifyMaxUserWatches, &out.FsInotifyMaxUserWatches + *out = new(float64) + **out = **in + } + if in.FsNrOpen != nil { + in, out := &in.FsNrOpen, &out.FsNrOpen + *out = new(float64) + **out = **in + } + if in.KernelThreadsMax != nil { + in, out := &in.KernelThreadsMax, &out.KernelThreadsMax + *out = new(float64) + **out = **in + } + if in.NetCoreNetdevMaxBacklog != nil { + in, out := &in.NetCoreNetdevMaxBacklog, &out.NetCoreNetdevMaxBacklog + *out = new(float64) + **out = **in + } + if in.NetCoreOptmemMax != nil { + in, out := &in.NetCoreOptmemMax, &out.NetCoreOptmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreRmemDefault != nil { + in, out := &in.NetCoreRmemDefault, &out.NetCoreRmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreRmemMax != nil { + in, out := &in.NetCoreRmemMax, &out.NetCoreRmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreSomaxconn != nil { + in, out := &in.NetCoreSomaxconn, &out.NetCoreSomaxconn + *out = new(float64) + **out = **in + } + if in.NetCoreWmemDefault != nil { + in, out := &in.NetCoreWmemDefault, &out.NetCoreWmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreWmemMax != nil { + in, out := &in.NetCoreWmemMax, &out.NetCoreWmemMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMax != nil { + in, out := &in.NetIPv4IPLocalPortRangeMax, &out.NetIPv4IPLocalPortRangeMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMin != nil { + in, out := &in.NetIPv4IPLocalPortRangeMin, &out.NetIPv4IPLocalPortRangeMin + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh1 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh1, &out.NetIPv4NeighDefaultGcThresh1 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh2 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh2, &out.NetIPv4NeighDefaultGcThresh2 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh3 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh3, &out.NetIPv4NeighDefaultGcThresh3 + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPFinTimeout != nil { + in, out := &in.NetIPv4TCPFinTimeout, &out.NetIPv4TCPFinTimeout + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveIntvl != nil { + in, out := &in.NetIPv4TCPKeepaliveIntvl, &out.NetIPv4TCPKeepaliveIntvl + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveProbes != nil { + in, out := &in.NetIPv4TCPKeepaliveProbes, &out.NetIPv4TCPKeepaliveProbes + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveTime != nil { + in, out := &in.NetIPv4TCPKeepaliveTime, &out.NetIPv4TCPKeepaliveTime + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxSynBacklog != nil { + in, out := &in.NetIPv4TCPMaxSynBacklog, &out.NetIPv4TCPMaxSynBacklog + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxTwBuckets != nil { + in, out := &in.NetIPv4TCPMaxTwBuckets, &out.NetIPv4TCPMaxTwBuckets + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPTwReuse != nil { + in, out := &in.NetIPv4TCPTwReuse, &out.NetIPv4TCPTwReuse + *out = new(bool) + **out = **in + } + if in.NetNetfilterNfConntrackBuckets != nil { + in, out := &in.NetNetfilterNfConntrackBuckets, &out.NetNetfilterNfConntrackBuckets + *out = new(float64) + **out = **in + } + if in.NetNetfilterNfConntrackMax != nil { + in, out := &in.NetNetfilterNfConntrackMax, &out.NetNetfilterNfConntrackMax + *out = new(float64) + **out = **in + } + if in.VMMaxMapCount != nil { + in, out := &in.VMMaxMapCount, &out.VMMaxMapCount + *out = new(float64) + **out = **in + } + if in.VMSwappiness != nil { + in, out := &in.VMSwappiness, &out.VMSwappiness + *out = new(float64) + **out = **in + } + if in.VMVfsCachePressure != nil { + in, out := &in.VMVfsCachePressure, &out.VMVfsCachePressure + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxOsConfigSysctlConfigParameters. +func (in *LinuxOsConfigSysctlConfigParameters) DeepCopy() *LinuxOsConfigSysctlConfigParameters { + if in == nil { + return nil + } + out := new(LinuxOsConfigSysctlConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxProfileInitParameters) DeepCopyInto(out *LinuxProfileInitParameters) { + *out = *in + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.SSHKey != nil { + in, out := &in.SSHKey, &out.SSHKey + *out = new(SSHKeyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxProfileInitParameters. +func (in *LinuxProfileInitParameters) DeepCopy() *LinuxProfileInitParameters { + if in == nil { + return nil + } + out := new(LinuxProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxProfileObservation) DeepCopyInto(out *LinuxProfileObservation) { + *out = *in + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.SSHKey != nil { + in, out := &in.SSHKey, &out.SSHKey + *out = new(SSHKeyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxProfileObservation. +func (in *LinuxProfileObservation) DeepCopy() *LinuxProfileObservation { + if in == nil { + return nil + } + out := new(LinuxProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxProfileParameters) DeepCopyInto(out *LinuxProfileParameters) { + *out = *in + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.SSHKey != nil { + in, out := &in.SSHKey, &out.SSHKey + *out = new(SSHKeyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxProfileParameters. +func (in *LinuxProfileParameters) DeepCopy() *LinuxProfileParameters { + if in == nil { + return nil + } + out := new(LinuxProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerProfileInitParameters) DeepCopyInto(out *LoadBalancerProfileInitParameters) { + *out = *in + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPCount != nil { + in, out := &in.ManagedOutboundIPCount, &out.ManagedOutboundIPCount + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPv6Count != nil { + in, out := &in.ManagedOutboundIPv6Count, &out.ManagedOutboundIPv6Count + *out = new(float64) + **out = **in + } + if in.OutboundIPAddressIds != nil { + in, out := &in.OutboundIPAddressIds, &out.OutboundIPAddressIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPPrefixIds != nil { + in, out := &in.OutboundIPPrefixIds, &out.OutboundIPPrefixIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundPortsAllocated != nil { + in, out := &in.OutboundPortsAllocated, &out.OutboundPortsAllocated + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerProfileInitParameters. +func (in *LoadBalancerProfileInitParameters) DeepCopy() *LoadBalancerProfileInitParameters { + if in == nil { + return nil + } + out := new(LoadBalancerProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerProfileObservation) DeepCopyInto(out *LoadBalancerProfileObservation) { + *out = *in + if in.EffectiveOutboundIps != nil { + in, out := &in.EffectiveOutboundIps, &out.EffectiveOutboundIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPCount != nil { + in, out := &in.ManagedOutboundIPCount, &out.ManagedOutboundIPCount + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPv6Count != nil { + in, out := &in.ManagedOutboundIPv6Count, &out.ManagedOutboundIPv6Count + *out = new(float64) + **out = **in + } + if in.OutboundIPAddressIds != nil { + in, out := &in.OutboundIPAddressIds, &out.OutboundIPAddressIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPPrefixIds != nil { + in, out := &in.OutboundIPPrefixIds, &out.OutboundIPPrefixIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundPortsAllocated != nil { + in, out := &in.OutboundPortsAllocated, &out.OutboundPortsAllocated + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerProfileObservation. +func (in *LoadBalancerProfileObservation) DeepCopy() *LoadBalancerProfileObservation { + if in == nil { + return nil + } + out := new(LoadBalancerProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerProfileParameters) DeepCopyInto(out *LoadBalancerProfileParameters) { + *out = *in + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPCount != nil { + in, out := &in.ManagedOutboundIPCount, &out.ManagedOutboundIPCount + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPv6Count != nil { + in, out := &in.ManagedOutboundIPv6Count, &out.ManagedOutboundIPv6Count + *out = new(float64) + **out = **in + } + if in.OutboundIPAddressIds != nil { + in, out := &in.OutboundIPAddressIds, &out.OutboundIPAddressIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPPrefixIds != nil { + in, out := &in.OutboundIPPrefixIds, &out.OutboundIPPrefixIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundPortsAllocated != nil { + in, out := &in.OutboundPortsAllocated, &out.OutboundPortsAllocated + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerProfileParameters. +func (in *LoadBalancerProfileParameters) DeepCopy() *LoadBalancerProfileParameters { + if in == nil { + return nil + } + out := new(LoadBalancerProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowAutoUpgradeInitParameters) DeepCopyInto(out *MaintenanceWindowAutoUpgradeInitParameters) { + *out = *in + if in.DayOfMonth != nil { + in, out := &in.DayOfMonth, &out.DayOfMonth + *out = new(float64) + **out = **in + } + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]MaintenanceWindowAutoUpgradeNotAllowedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.UtcOffset != nil { + in, out := &in.UtcOffset, &out.UtcOffset + *out = new(string) + **out = **in + } + if in.WeekIndex != nil { + in, out := &in.WeekIndex, &out.WeekIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowAutoUpgradeInitParameters. +func (in *MaintenanceWindowAutoUpgradeInitParameters) DeepCopy() *MaintenanceWindowAutoUpgradeInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowAutoUpgradeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowAutoUpgradeNotAllowedInitParameters) DeepCopyInto(out *MaintenanceWindowAutoUpgradeNotAllowedInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowAutoUpgradeNotAllowedInitParameters. +func (in *MaintenanceWindowAutoUpgradeNotAllowedInitParameters) DeepCopy() *MaintenanceWindowAutoUpgradeNotAllowedInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowAutoUpgradeNotAllowedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowAutoUpgradeNotAllowedObservation) DeepCopyInto(out *MaintenanceWindowAutoUpgradeNotAllowedObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowAutoUpgradeNotAllowedObservation. +func (in *MaintenanceWindowAutoUpgradeNotAllowedObservation) DeepCopy() *MaintenanceWindowAutoUpgradeNotAllowedObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowAutoUpgradeNotAllowedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowAutoUpgradeNotAllowedParameters) DeepCopyInto(out *MaintenanceWindowAutoUpgradeNotAllowedParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowAutoUpgradeNotAllowedParameters. +func (in *MaintenanceWindowAutoUpgradeNotAllowedParameters) DeepCopy() *MaintenanceWindowAutoUpgradeNotAllowedParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowAutoUpgradeNotAllowedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowAutoUpgradeObservation) DeepCopyInto(out *MaintenanceWindowAutoUpgradeObservation) { + *out = *in + if in.DayOfMonth != nil { + in, out := &in.DayOfMonth, &out.DayOfMonth + *out = new(float64) + **out = **in + } + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]MaintenanceWindowAutoUpgradeNotAllowedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.UtcOffset != nil { + in, out := &in.UtcOffset, &out.UtcOffset + *out = new(string) + **out = **in + } + if in.WeekIndex != nil { + in, out := &in.WeekIndex, &out.WeekIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowAutoUpgradeObservation. +func (in *MaintenanceWindowAutoUpgradeObservation) DeepCopy() *MaintenanceWindowAutoUpgradeObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowAutoUpgradeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowAutoUpgradeParameters) DeepCopyInto(out *MaintenanceWindowAutoUpgradeParameters) { + *out = *in + if in.DayOfMonth != nil { + in, out := &in.DayOfMonth, &out.DayOfMonth + *out = new(float64) + **out = **in + } + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]MaintenanceWindowAutoUpgradeNotAllowedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.UtcOffset != nil { + in, out := &in.UtcOffset, &out.UtcOffset + *out = new(string) + **out = **in + } + if in.WeekIndex != nil { + in, out := &in.WeekIndex, &out.WeekIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowAutoUpgradeParameters. +func (in *MaintenanceWindowAutoUpgradeParameters) DeepCopy() *MaintenanceWindowAutoUpgradeParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowAutoUpgradeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowInitParameters) DeepCopyInto(out *MaintenanceWindowInitParameters) { + *out = *in + if in.Allowed != nil { + in, out := &in.Allowed, &out.Allowed + *out = make([]AllowedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]NotAllowedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowInitParameters. +func (in *MaintenanceWindowInitParameters) DeepCopy() *MaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowNodeOsInitParameters) DeepCopyInto(out *MaintenanceWindowNodeOsInitParameters) { + *out = *in + if in.DayOfMonth != nil { + in, out := &in.DayOfMonth, &out.DayOfMonth + *out = new(float64) + **out = **in + } + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]MaintenanceWindowNodeOsNotAllowedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.UtcOffset != nil { + in, out := &in.UtcOffset, &out.UtcOffset + *out = new(string) + **out = **in + } + if in.WeekIndex != nil { + in, out := &in.WeekIndex, &out.WeekIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowNodeOsInitParameters. +func (in *MaintenanceWindowNodeOsInitParameters) DeepCopy() *MaintenanceWindowNodeOsInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowNodeOsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowNodeOsNotAllowedInitParameters) DeepCopyInto(out *MaintenanceWindowNodeOsNotAllowedInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowNodeOsNotAllowedInitParameters. +func (in *MaintenanceWindowNodeOsNotAllowedInitParameters) DeepCopy() *MaintenanceWindowNodeOsNotAllowedInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowNodeOsNotAllowedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowNodeOsNotAllowedObservation) DeepCopyInto(out *MaintenanceWindowNodeOsNotAllowedObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowNodeOsNotAllowedObservation. +func (in *MaintenanceWindowNodeOsNotAllowedObservation) DeepCopy() *MaintenanceWindowNodeOsNotAllowedObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowNodeOsNotAllowedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowNodeOsNotAllowedParameters) DeepCopyInto(out *MaintenanceWindowNodeOsNotAllowedParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowNodeOsNotAllowedParameters. +func (in *MaintenanceWindowNodeOsNotAllowedParameters) DeepCopy() *MaintenanceWindowNodeOsNotAllowedParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowNodeOsNotAllowedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowNodeOsObservation) DeepCopyInto(out *MaintenanceWindowNodeOsObservation) { + *out = *in + if in.DayOfMonth != nil { + in, out := &in.DayOfMonth, &out.DayOfMonth + *out = new(float64) + **out = **in + } + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]MaintenanceWindowNodeOsNotAllowedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.UtcOffset != nil { + in, out := &in.UtcOffset, &out.UtcOffset + *out = new(string) + **out = **in + } + if in.WeekIndex != nil { + in, out := &in.WeekIndex, &out.WeekIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowNodeOsObservation. +func (in *MaintenanceWindowNodeOsObservation) DeepCopy() *MaintenanceWindowNodeOsObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowNodeOsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowNodeOsParameters) DeepCopyInto(out *MaintenanceWindowNodeOsParameters) { + *out = *in + if in.DayOfMonth != nil { + in, out := &in.DayOfMonth, &out.DayOfMonth + *out = new(float64) + **out = **in + } + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(float64) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]MaintenanceWindowNodeOsNotAllowedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartDate != nil { + in, out := &in.StartDate, &out.StartDate + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.UtcOffset != nil { + in, out := &in.UtcOffset, &out.UtcOffset + *out = new(string) + **out = **in + } + if in.WeekIndex != nil { + in, out := &in.WeekIndex, &out.WeekIndex + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowNodeOsParameters. +func (in *MaintenanceWindowNodeOsParameters) DeepCopy() *MaintenanceWindowNodeOsParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowNodeOsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowObservation) DeepCopyInto(out *MaintenanceWindowObservation) { + *out = *in + if in.Allowed != nil { + in, out := &in.Allowed, &out.Allowed + *out = make([]AllowedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]NotAllowedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowObservation. +func (in *MaintenanceWindowObservation) DeepCopy() *MaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowParameters) DeepCopyInto(out *MaintenanceWindowParameters) { + *out = *in + if in.Allowed != nil { + in, out := &in.Allowed, &out.Allowed + *out = make([]AllowedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotAllowed != nil { + in, out := &in.NotAllowed, &out.NotAllowed + *out = make([]NotAllowedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowParameters. +func (in *MaintenanceWindowParameters) DeepCopy() *MaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftDefenderInitParameters) DeepCopyInto(out *MicrosoftDefenderInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftDefenderInitParameters. +func (in *MicrosoftDefenderInitParameters) DeepCopy() *MicrosoftDefenderInitParameters { + if in == nil { + return nil + } + out := new(MicrosoftDefenderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftDefenderObservation) DeepCopyInto(out *MicrosoftDefenderObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftDefenderObservation. +func (in *MicrosoftDefenderObservation) DeepCopy() *MicrosoftDefenderObservation { + if in == nil { + return nil + } + out := new(MicrosoftDefenderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftDefenderParameters) DeepCopyInto(out *MicrosoftDefenderParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftDefenderParameters. +func (in *MicrosoftDefenderParameters) DeepCopy() *MicrosoftDefenderParameters { + if in == nil { + return nil + } + out := new(MicrosoftDefenderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricsInitParameters) DeepCopyInto(out *MonitorMetricsInitParameters) { + *out = *in + if in.AnnotationsAllowed != nil { + in, out := &in.AnnotationsAllowed, &out.AnnotationsAllowed + *out = new(string) + **out = **in + } + if in.LabelsAllowed != nil { + in, out := &in.LabelsAllowed, &out.LabelsAllowed + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricsInitParameters. +func (in *MonitorMetricsInitParameters) DeepCopy() *MonitorMetricsInitParameters { + if in == nil { + return nil + } + out := new(MonitorMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricsObservation) DeepCopyInto(out *MonitorMetricsObservation) { + *out = *in + if in.AnnotationsAllowed != nil { + in, out := &in.AnnotationsAllowed, &out.AnnotationsAllowed + *out = new(string) + **out = **in + } + if in.LabelsAllowed != nil { + in, out := &in.LabelsAllowed, &out.LabelsAllowed + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricsObservation. +func (in *MonitorMetricsObservation) DeepCopy() *MonitorMetricsObservation { + if in == nil { + return nil + } + out := new(MonitorMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricsParameters) DeepCopyInto(out *MonitorMetricsParameters) { + *out = *in + if in.AnnotationsAllowed != nil { + in, out := &in.AnnotationsAllowed, &out.AnnotationsAllowed + *out = new(string) + **out = **in + } + if in.LabelsAllowed != nil { + in, out := &in.LabelsAllowed, &out.LabelsAllowed + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricsParameters. +func (in *MonitorMetricsParameters) DeepCopy() *MonitorMetricsParameters { + if in == nil { + return nil + } + out := new(MonitorMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NATGatewayProfileInitParameters) DeepCopyInto(out *NATGatewayProfileInitParameters) { + *out = *in + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPCount != nil { + in, out := &in.ManagedOutboundIPCount, &out.ManagedOutboundIPCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATGatewayProfileInitParameters. +func (in *NATGatewayProfileInitParameters) DeepCopy() *NATGatewayProfileInitParameters { + if in == nil { + return nil + } + out := new(NATGatewayProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NATGatewayProfileObservation) DeepCopyInto(out *NATGatewayProfileObservation) { + *out = *in + if in.EffectiveOutboundIps != nil { + in, out := &in.EffectiveOutboundIps, &out.EffectiveOutboundIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPCount != nil { + in, out := &in.ManagedOutboundIPCount, &out.ManagedOutboundIPCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATGatewayProfileObservation. +func (in *NATGatewayProfileObservation) DeepCopy() *NATGatewayProfileObservation { + if in == nil { + return nil + } + out := new(NATGatewayProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NATGatewayProfileParameters) DeepCopyInto(out *NATGatewayProfileParameters) { + *out = *in + if in.IdleTimeoutInMinutes != nil { + in, out := &in.IdleTimeoutInMinutes, &out.IdleTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.ManagedOutboundIPCount != nil { + in, out := &in.ManagedOutboundIPCount, &out.ManagedOutboundIPCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NATGatewayProfileParameters. +func (in *NATGatewayProfileParameters) DeepCopy() *NATGatewayProfileParameters { + if in == nil { + return nil + } + out := new(NATGatewayProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkProfileInitParameters) DeepCopyInto(out *NetworkProfileInitParameters) { + *out = *in + if in.DNSServiceIP != nil { + in, out := &in.DNSServiceIP, &out.DNSServiceIP + *out = new(string) + **out = **in + } + if in.DockerBridgeCidr != nil { + in, out := &in.DockerBridgeCidr, &out.DockerBridgeCidr + *out = new(string) + **out = **in + } + if in.EbpfDataPlane != nil { + in, out := &in.EbpfDataPlane, &out.EbpfDataPlane + *out = new(string) + **out = **in + } + if in.IPVersions != nil { + in, out := &in.IPVersions, &out.IPVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerProfile != nil { + in, out := &in.LoadBalancerProfile, &out.LoadBalancerProfile + *out = new(LoadBalancerProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerSku != nil { + in, out := &in.LoadBalancerSku, &out.LoadBalancerSku + *out = new(string) + **out = **in + } + if in.NATGatewayProfile != nil { + in, out := &in.NATGatewayProfile, &out.NATGatewayProfile + *out = new(NATGatewayProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkMode != nil { + in, out := &in.NetworkMode, &out.NetworkMode + *out = new(string) + **out = **in + } + if in.NetworkPlugin != nil { + in, out := &in.NetworkPlugin, &out.NetworkPlugin + *out = new(string) + **out = **in + } + if in.NetworkPluginMode != nil { + in, out := &in.NetworkPluginMode, &out.NetworkPluginMode + *out = new(string) + **out = **in + } + if in.NetworkPolicy != nil { + in, out := &in.NetworkPolicy, &out.NetworkPolicy + *out = new(string) + **out = **in + } + if in.OutboundType != nil { + in, out := &in.OutboundType, &out.OutboundType + *out = new(string) + **out = **in + } + if in.PodCidr != nil { + in, out := &in.PodCidr, &out.PodCidr + *out = new(string) + **out = **in + } + if in.PodCidrs != nil { + in, out := &in.PodCidrs, &out.PodCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceCidr != nil { + in, out := &in.ServiceCidr, &out.ServiceCidr + *out = new(string) + **out = **in + } + if in.ServiceCidrs != nil { + in, out := &in.ServiceCidrs, &out.ServiceCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkProfileInitParameters. +func (in *NetworkProfileInitParameters) DeepCopy() *NetworkProfileInitParameters { + if in == nil { + return nil + } + out := new(NetworkProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkProfileObservation) DeepCopyInto(out *NetworkProfileObservation) { + *out = *in + if in.DNSServiceIP != nil { + in, out := &in.DNSServiceIP, &out.DNSServiceIP + *out = new(string) + **out = **in + } + if in.DockerBridgeCidr != nil { + in, out := &in.DockerBridgeCidr, &out.DockerBridgeCidr + *out = new(string) + **out = **in + } + if in.EbpfDataPlane != nil { + in, out := &in.EbpfDataPlane, &out.EbpfDataPlane + *out = new(string) + **out = **in + } + if in.IPVersions != nil { + in, out := &in.IPVersions, &out.IPVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerProfile != nil { + in, out := &in.LoadBalancerProfile, &out.LoadBalancerProfile + *out = new(LoadBalancerProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerSku != nil { + in, out := &in.LoadBalancerSku, &out.LoadBalancerSku + *out = new(string) + **out = **in + } + if in.NATGatewayProfile != nil { + in, out := &in.NATGatewayProfile, &out.NATGatewayProfile + *out = new(NATGatewayProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.NetworkMode != nil { + in, out := &in.NetworkMode, &out.NetworkMode + *out = new(string) + **out = **in + } + if in.NetworkPlugin != nil { + in, out := &in.NetworkPlugin, &out.NetworkPlugin + *out = new(string) + **out = **in + } + if in.NetworkPluginMode != nil { + in, out := &in.NetworkPluginMode, &out.NetworkPluginMode + *out = new(string) + **out = **in + } + if in.NetworkPolicy != nil { + in, out := &in.NetworkPolicy, &out.NetworkPolicy + *out = new(string) + **out = **in + } + if in.OutboundType != nil { + in, out := &in.OutboundType, &out.OutboundType + *out = new(string) + **out = **in + } + if in.PodCidr != nil { + in, out := &in.PodCidr, &out.PodCidr + *out = new(string) + **out = **in + } + if in.PodCidrs != nil { + in, out := &in.PodCidrs, &out.PodCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceCidr != nil { + in, out := &in.ServiceCidr, &out.ServiceCidr + *out = new(string) + **out = **in + } + if in.ServiceCidrs != nil { + in, out := &in.ServiceCidrs, &out.ServiceCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkProfileObservation. +func (in *NetworkProfileObservation) DeepCopy() *NetworkProfileObservation { + if in == nil { + return nil + } + out := new(NetworkProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkProfileParameters) DeepCopyInto(out *NetworkProfileParameters) { + *out = *in + if in.DNSServiceIP != nil { + in, out := &in.DNSServiceIP, &out.DNSServiceIP + *out = new(string) + **out = **in + } + if in.DockerBridgeCidr != nil { + in, out := &in.DockerBridgeCidr, &out.DockerBridgeCidr + *out = new(string) + **out = **in + } + if in.EbpfDataPlane != nil { + in, out := &in.EbpfDataPlane, &out.EbpfDataPlane + *out = new(string) + **out = **in + } + if in.IPVersions != nil { + in, out := &in.IPVersions, &out.IPVersions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoadBalancerProfile != nil { + in, out := &in.LoadBalancerProfile, &out.LoadBalancerProfile + *out = new(LoadBalancerProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerSku != nil { + in, out := &in.LoadBalancerSku, &out.LoadBalancerSku + *out = new(string) + **out = **in + } + if in.NATGatewayProfile != nil { + in, out := &in.NATGatewayProfile, &out.NATGatewayProfile + *out = new(NATGatewayProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.NetworkMode != nil { + in, out := &in.NetworkMode, &out.NetworkMode + *out = new(string) + **out = **in + } + if in.NetworkPlugin != nil { + in, out := &in.NetworkPlugin, &out.NetworkPlugin + *out = new(string) + **out = **in + } + if in.NetworkPluginMode != nil { + in, out := &in.NetworkPluginMode, &out.NetworkPluginMode + *out = new(string) + **out = **in + } + if in.NetworkPolicy != nil { + in, out := &in.NetworkPolicy, &out.NetworkPolicy + *out = new(string) + **out = **in + } + if in.OutboundType != nil { + in, out := &in.OutboundType, &out.OutboundType + *out = new(string) + **out = **in + } + if in.PodCidr != nil { + in, out := &in.PodCidr, &out.PodCidr + *out = new(string) + **out = **in + } + if in.PodCidrs != nil { + in, out := &in.PodCidrs, &out.PodCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceCidr != nil { + in, out := &in.ServiceCidr, &out.ServiceCidr + *out = new(string) + **out = **in + } + if in.ServiceCidrs != nil { + in, out := &in.ServiceCidrs, &out.ServiceCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkProfileParameters. +func (in *NetworkProfileParameters) DeepCopy() *NetworkProfileParameters { + if in == nil { + return nil + } + out := new(NetworkProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeNetworkProfileAllowedHostPortsInitParameters) DeepCopyInto(out *NodeNetworkProfileAllowedHostPortsInitParameters) { + *out = *in + if in.PortEnd != nil { + in, out := &in.PortEnd, &out.PortEnd + *out = new(float64) + **out = **in + } + if in.PortStart != nil { + in, out := &in.PortStart, &out.PortStart + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkProfileAllowedHostPortsInitParameters. +func (in *NodeNetworkProfileAllowedHostPortsInitParameters) DeepCopy() *NodeNetworkProfileAllowedHostPortsInitParameters { + if in == nil { + return nil + } + out := new(NodeNetworkProfileAllowedHostPortsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeNetworkProfileAllowedHostPortsObservation) DeepCopyInto(out *NodeNetworkProfileAllowedHostPortsObservation) { + *out = *in + if in.PortEnd != nil { + in, out := &in.PortEnd, &out.PortEnd + *out = new(float64) + **out = **in + } + if in.PortStart != nil { + in, out := &in.PortStart, &out.PortStart + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkProfileAllowedHostPortsObservation. +func (in *NodeNetworkProfileAllowedHostPortsObservation) DeepCopy() *NodeNetworkProfileAllowedHostPortsObservation { + if in == nil { + return nil + } + out := new(NodeNetworkProfileAllowedHostPortsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeNetworkProfileAllowedHostPortsParameters) DeepCopyInto(out *NodeNetworkProfileAllowedHostPortsParameters) { + *out = *in + if in.PortEnd != nil { + in, out := &in.PortEnd, &out.PortEnd + *out = new(float64) + **out = **in + } + if in.PortStart != nil { + in, out := &in.PortStart, &out.PortStart + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkProfileAllowedHostPortsParameters. +func (in *NodeNetworkProfileAllowedHostPortsParameters) DeepCopy() *NodeNetworkProfileAllowedHostPortsParameters { + if in == nil { + return nil + } + out := new(NodeNetworkProfileAllowedHostPortsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeNetworkProfileInitParameters) DeepCopyInto(out *NodeNetworkProfileInitParameters) { + *out = *in + if in.AllowedHostPorts != nil { + in, out := &in.AllowedHostPorts, &out.AllowedHostPorts + *out = make([]AllowedHostPortsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NodePublicIPTags != nil { + in, out := &in.NodePublicIPTags, &out.NodePublicIPTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkProfileInitParameters. +func (in *NodeNetworkProfileInitParameters) DeepCopy() *NodeNetworkProfileInitParameters { + if in == nil { + return nil + } + out := new(NodeNetworkProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeNetworkProfileObservation) DeepCopyInto(out *NodeNetworkProfileObservation) { + *out = *in + if in.AllowedHostPorts != nil { + in, out := &in.AllowedHostPorts, &out.AllowedHostPorts + *out = make([]AllowedHostPortsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NodePublicIPTags != nil { + in, out := &in.NodePublicIPTags, &out.NodePublicIPTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkProfileObservation. +func (in *NodeNetworkProfileObservation) DeepCopy() *NodeNetworkProfileObservation { + if in == nil { + return nil + } + out := new(NodeNetworkProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeNetworkProfileParameters) DeepCopyInto(out *NodeNetworkProfileParameters) { + *out = *in + if in.AllowedHostPorts != nil { + in, out := &in.AllowedHostPorts, &out.AllowedHostPorts + *out = make([]AllowedHostPortsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationSecurityGroupIds != nil { + in, out := &in.ApplicationSecurityGroupIds, &out.ApplicationSecurityGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NodePublicIPTags != nil { + in, out := &in.NodePublicIPTags, &out.NodePublicIPTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkProfileParameters. +func (in *NodeNetworkProfileParameters) DeepCopy() *NodeNetworkProfileParameters { + if in == nil { + return nil + } + out := new(NodeNetworkProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotAllowedInitParameters) DeepCopyInto(out *NotAllowedInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotAllowedInitParameters. +func (in *NotAllowedInitParameters) DeepCopy() *NotAllowedInitParameters { + if in == nil { + return nil + } + out := new(NotAllowedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotAllowedObservation) DeepCopyInto(out *NotAllowedObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotAllowedObservation. +func (in *NotAllowedObservation) DeepCopy() *NotAllowedObservation { + if in == nil { + return nil + } + out := new(NotAllowedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotAllowedParameters) DeepCopyInto(out *NotAllowedParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotAllowedParameters. +func (in *NotAllowedParameters) DeepCopy() *NotAllowedParameters { + if in == nil { + return nil + } + out := new(NotAllowedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OmsAgentIdentityInitParameters) DeepCopyInto(out *OmsAgentIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OmsAgentIdentityInitParameters. +func (in *OmsAgentIdentityInitParameters) DeepCopy() *OmsAgentIdentityInitParameters { + if in == nil { + return nil + } + out := new(OmsAgentIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OmsAgentIdentityObservation) DeepCopyInto(out *OmsAgentIdentityObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OmsAgentIdentityObservation. +func (in *OmsAgentIdentityObservation) DeepCopy() *OmsAgentIdentityObservation { + if in == nil { + return nil + } + out := new(OmsAgentIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OmsAgentIdentityParameters) DeepCopyInto(out *OmsAgentIdentityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OmsAgentIdentityParameters. +func (in *OmsAgentIdentityParameters) DeepCopy() *OmsAgentIdentityParameters { + if in == nil { + return nil + } + out := new(OmsAgentIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OmsAgentInitParameters) DeepCopyInto(out *OmsAgentInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.MsiAuthForMonitoringEnabled != nil { + in, out := &in.MsiAuthForMonitoringEnabled, &out.MsiAuthForMonitoringEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OmsAgentInitParameters. +func (in *OmsAgentInitParameters) DeepCopy() *OmsAgentInitParameters { + if in == nil { + return nil + } + out := new(OmsAgentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OmsAgentObservation) DeepCopyInto(out *OmsAgentObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.MsiAuthForMonitoringEnabled != nil { + in, out := &in.MsiAuthForMonitoringEnabled, &out.MsiAuthForMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.OmsAgentIdentity != nil { + in, out := &in.OmsAgentIdentity, &out.OmsAgentIdentity + *out = make([]OmsAgentIdentityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OmsAgentObservation. +func (in *OmsAgentObservation) DeepCopy() *OmsAgentObservation { + if in == nil { + return nil + } + out := new(OmsAgentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OmsAgentParameters) DeepCopyInto(out *OmsAgentParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.MsiAuthForMonitoringEnabled != nil { + in, out := &in.MsiAuthForMonitoringEnabled, &out.MsiAuthForMonitoringEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OmsAgentParameters. +func (in *OmsAgentParameters) DeepCopy() *OmsAgentParameters { + if in == nil { + return nil + } + out := new(OmsAgentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHKeyInitParameters) DeepCopyInto(out *SSHKeyInitParameters) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHKeyInitParameters. +func (in *SSHKeyInitParameters) DeepCopy() *SSHKeyInitParameters { + if in == nil { + return nil + } + out := new(SSHKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHKeyObservation) DeepCopyInto(out *SSHKeyObservation) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHKeyObservation. +func (in *SSHKeyObservation) DeepCopy() *SSHKeyObservation { + if in == nil { + return nil + } + out := new(SSHKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHKeyParameters) DeepCopyInto(out *SSHKeyParameters) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHKeyParameters. +func (in *SSHKeyParameters) DeepCopy() *SSHKeyParameters { + if in == nil { + return nil + } + out := new(SSHKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIdentityInitParameters) DeepCopyInto(out *SecretIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIdentityInitParameters. +func (in *SecretIdentityInitParameters) DeepCopy() *SecretIdentityInitParameters { + if in == nil { + return nil + } + out := new(SecretIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIdentityObservation) DeepCopyInto(out *SecretIdentityObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIdentityObservation. +func (in *SecretIdentityObservation) DeepCopy() *SecretIdentityObservation { + if in == nil { + return nil + } + out := new(SecretIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretIdentityParameters) DeepCopyInto(out *SecretIdentityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretIdentityParameters. +func (in *SecretIdentityParameters) DeepCopy() *SecretIdentityParameters { + if in == nil { + return nil + } + out := new(SecretIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMeshProfileInitParameters) DeepCopyInto(out *ServiceMeshProfileInitParameters) { + *out = *in + if in.ExternalIngressGatewayEnabled != nil { + in, out := &in.ExternalIngressGatewayEnabled, &out.ExternalIngressGatewayEnabled + *out = new(bool) + **out = **in + } + if in.InternalIngressGatewayEnabled != nil { + in, out := &in.InternalIngressGatewayEnabled, &out.InternalIngressGatewayEnabled + *out = new(bool) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMeshProfileInitParameters. +func (in *ServiceMeshProfileInitParameters) DeepCopy() *ServiceMeshProfileInitParameters { + if in == nil { + return nil + } + out := new(ServiceMeshProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMeshProfileObservation) DeepCopyInto(out *ServiceMeshProfileObservation) { + *out = *in + if in.ExternalIngressGatewayEnabled != nil { + in, out := &in.ExternalIngressGatewayEnabled, &out.ExternalIngressGatewayEnabled + *out = new(bool) + **out = **in + } + if in.InternalIngressGatewayEnabled != nil { + in, out := &in.InternalIngressGatewayEnabled, &out.InternalIngressGatewayEnabled + *out = new(bool) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMeshProfileObservation. +func (in *ServiceMeshProfileObservation) DeepCopy() *ServiceMeshProfileObservation { + if in == nil { + return nil + } + out := new(ServiceMeshProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMeshProfileParameters) DeepCopyInto(out *ServiceMeshProfileParameters) { + *out = *in + if in.ExternalIngressGatewayEnabled != nil { + in, out := &in.ExternalIngressGatewayEnabled, &out.ExternalIngressGatewayEnabled + *out = new(bool) + **out = **in + } + if in.InternalIngressGatewayEnabled != nil { + in, out := &in.InternalIngressGatewayEnabled, &out.InternalIngressGatewayEnabled + *out = new(bool) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMeshProfileParameters. +func (in *ServiceMeshProfileParameters) DeepCopy() *ServiceMeshProfileParameters { + if in == nil { + return nil + } + out := new(ServiceMeshProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePrincipalInitParameters) DeepCopyInto(out *ServicePrincipalInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePrincipalInitParameters. +func (in *ServicePrincipalInitParameters) DeepCopy() *ServicePrincipalInitParameters { + if in == nil { + return nil + } + out := new(ServicePrincipalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePrincipalObservation) DeepCopyInto(out *ServicePrincipalObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePrincipalObservation. +func (in *ServicePrincipalObservation) DeepCopy() *ServicePrincipalObservation { + if in == nil { + return nil + } + out := new(ServicePrincipalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePrincipalParameters) DeepCopyInto(out *ServicePrincipalParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePrincipalParameters. +func (in *ServicePrincipalParameters) DeepCopy() *ServicePrincipalParameters { + if in == nil { + return nil + } + out := new(ServicePrincipalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileInitParameters) DeepCopyInto(out *StorageProfileInitParameters) { + *out = *in + if in.BlobDriverEnabled != nil { + in, out := &in.BlobDriverEnabled, &out.BlobDriverEnabled + *out = new(bool) + **out = **in + } + if in.DiskDriverEnabled != nil { + in, out := &in.DiskDriverEnabled, &out.DiskDriverEnabled + *out = new(bool) + **out = **in + } + if in.DiskDriverVersion != nil { + in, out := &in.DiskDriverVersion, &out.DiskDriverVersion + *out = new(string) + **out = **in + } + if in.FileDriverEnabled != nil { + in, out := &in.FileDriverEnabled, &out.FileDriverEnabled + *out = new(bool) + **out = **in + } + if in.SnapshotControllerEnabled != nil { + in, out := &in.SnapshotControllerEnabled, &out.SnapshotControllerEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileInitParameters. +func (in *StorageProfileInitParameters) DeepCopy() *StorageProfileInitParameters { + if in == nil { + return nil + } + out := new(StorageProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileObservation) DeepCopyInto(out *StorageProfileObservation) { + *out = *in + if in.BlobDriverEnabled != nil { + in, out := &in.BlobDriverEnabled, &out.BlobDriverEnabled + *out = new(bool) + **out = **in + } + if in.DiskDriverEnabled != nil { + in, out := &in.DiskDriverEnabled, &out.DiskDriverEnabled + *out = new(bool) + **out = **in + } + if in.DiskDriverVersion != nil { + in, out := &in.DiskDriverVersion, &out.DiskDriverVersion + *out = new(string) + **out = **in + } + if in.FileDriverEnabled != nil { + in, out := &in.FileDriverEnabled, &out.FileDriverEnabled + *out = new(bool) + **out = **in + } + if in.SnapshotControllerEnabled != nil { + in, out := &in.SnapshotControllerEnabled, &out.SnapshotControllerEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileObservation. +func (in *StorageProfileObservation) DeepCopy() *StorageProfileObservation { + if in == nil { + return nil + } + out := new(StorageProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileParameters) DeepCopyInto(out *StorageProfileParameters) { + *out = *in + if in.BlobDriverEnabled != nil { + in, out := &in.BlobDriverEnabled, &out.BlobDriverEnabled + *out = new(bool) + **out = **in + } + if in.DiskDriverEnabled != nil { + in, out := &in.DiskDriverEnabled, &out.DiskDriverEnabled + *out = new(bool) + **out = **in + } + if in.DiskDriverVersion != nil { + in, out := &in.DiskDriverVersion, &out.DiskDriverVersion + *out = new(string) + **out = **in + } + if in.FileDriverEnabled != nil { + in, out := &in.FileDriverEnabled, &out.FileDriverEnabled + *out = new(bool) + **out = **in + } + if in.SnapshotControllerEnabled != nil { + in, out := &in.SnapshotControllerEnabled, &out.SnapshotControllerEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileParameters. +func (in *StorageProfileParameters) DeepCopy() *StorageProfileParameters { + if in == nil { + return nil + } + out := new(StorageProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SysctlConfigInitParameters) DeepCopyInto(out *SysctlConfigInitParameters) { + *out = *in + if in.FsAioMaxNr != nil { + in, out := &in.FsAioMaxNr, &out.FsAioMaxNr + *out = new(float64) + **out = **in + } + if in.FsFileMax != nil { + in, out := &in.FsFileMax, &out.FsFileMax + *out = new(float64) + **out = **in + } + if in.FsInotifyMaxUserWatches != nil { + in, out := &in.FsInotifyMaxUserWatches, &out.FsInotifyMaxUserWatches + *out = new(float64) + **out = **in + } + if in.FsNrOpen != nil { + in, out := &in.FsNrOpen, &out.FsNrOpen + *out = new(float64) + **out = **in + } + if in.KernelThreadsMax != nil { + in, out := &in.KernelThreadsMax, &out.KernelThreadsMax + *out = new(float64) + **out = **in + } + if in.NetCoreNetdevMaxBacklog != nil { + in, out := &in.NetCoreNetdevMaxBacklog, &out.NetCoreNetdevMaxBacklog + *out = new(float64) + **out = **in + } + if in.NetCoreOptmemMax != nil { + in, out := &in.NetCoreOptmemMax, &out.NetCoreOptmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreRmemDefault != nil { + in, out := &in.NetCoreRmemDefault, &out.NetCoreRmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreRmemMax != nil { + in, out := &in.NetCoreRmemMax, &out.NetCoreRmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreSomaxconn != nil { + in, out := &in.NetCoreSomaxconn, &out.NetCoreSomaxconn + *out = new(float64) + **out = **in + } + if in.NetCoreWmemDefault != nil { + in, out := &in.NetCoreWmemDefault, &out.NetCoreWmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreWmemMax != nil { + in, out := &in.NetCoreWmemMax, &out.NetCoreWmemMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMax != nil { + in, out := &in.NetIPv4IPLocalPortRangeMax, &out.NetIPv4IPLocalPortRangeMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMin != nil { + in, out := &in.NetIPv4IPLocalPortRangeMin, &out.NetIPv4IPLocalPortRangeMin + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh1 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh1, &out.NetIPv4NeighDefaultGcThresh1 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh2 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh2, &out.NetIPv4NeighDefaultGcThresh2 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh3 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh3, &out.NetIPv4NeighDefaultGcThresh3 + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPFinTimeout != nil { + in, out := &in.NetIPv4TCPFinTimeout, &out.NetIPv4TCPFinTimeout + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveIntvl != nil { + in, out := &in.NetIPv4TCPKeepaliveIntvl, &out.NetIPv4TCPKeepaliveIntvl + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveProbes != nil { + in, out := &in.NetIPv4TCPKeepaliveProbes, &out.NetIPv4TCPKeepaliveProbes + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveTime != nil { + in, out := &in.NetIPv4TCPKeepaliveTime, &out.NetIPv4TCPKeepaliveTime + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxSynBacklog != nil { + in, out := &in.NetIPv4TCPMaxSynBacklog, &out.NetIPv4TCPMaxSynBacklog + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxTwBuckets != nil { + in, out := &in.NetIPv4TCPMaxTwBuckets, &out.NetIPv4TCPMaxTwBuckets + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPTwReuse != nil { + in, out := &in.NetIPv4TCPTwReuse, &out.NetIPv4TCPTwReuse + *out = new(bool) + **out = **in + } + if in.NetNetfilterNfConntrackBuckets != nil { + in, out := &in.NetNetfilterNfConntrackBuckets, &out.NetNetfilterNfConntrackBuckets + *out = new(float64) + **out = **in + } + if in.NetNetfilterNfConntrackMax != nil { + in, out := &in.NetNetfilterNfConntrackMax, &out.NetNetfilterNfConntrackMax + *out = new(float64) + **out = **in + } + if in.VMMaxMapCount != nil { + in, out := &in.VMMaxMapCount, &out.VMMaxMapCount + *out = new(float64) + **out = **in + } + if in.VMSwappiness != nil { + in, out := &in.VMSwappiness, &out.VMSwappiness + *out = new(float64) + **out = **in + } + if in.VMVfsCachePressure != nil { + in, out := &in.VMVfsCachePressure, &out.VMVfsCachePressure + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SysctlConfigInitParameters. +func (in *SysctlConfigInitParameters) DeepCopy() *SysctlConfigInitParameters { + if in == nil { + return nil + } + out := new(SysctlConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SysctlConfigObservation) DeepCopyInto(out *SysctlConfigObservation) { + *out = *in + if in.FsAioMaxNr != nil { + in, out := &in.FsAioMaxNr, &out.FsAioMaxNr + *out = new(float64) + **out = **in + } + if in.FsFileMax != nil { + in, out := &in.FsFileMax, &out.FsFileMax + *out = new(float64) + **out = **in + } + if in.FsInotifyMaxUserWatches != nil { + in, out := &in.FsInotifyMaxUserWatches, &out.FsInotifyMaxUserWatches + *out = new(float64) + **out = **in + } + if in.FsNrOpen != nil { + in, out := &in.FsNrOpen, &out.FsNrOpen + *out = new(float64) + **out = **in + } + if in.KernelThreadsMax != nil { + in, out := &in.KernelThreadsMax, &out.KernelThreadsMax + *out = new(float64) + **out = **in + } + if in.NetCoreNetdevMaxBacklog != nil { + in, out := &in.NetCoreNetdevMaxBacklog, &out.NetCoreNetdevMaxBacklog + *out = new(float64) + **out = **in + } + if in.NetCoreOptmemMax != nil { + in, out := &in.NetCoreOptmemMax, &out.NetCoreOptmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreRmemDefault != nil { + in, out := &in.NetCoreRmemDefault, &out.NetCoreRmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreRmemMax != nil { + in, out := &in.NetCoreRmemMax, &out.NetCoreRmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreSomaxconn != nil { + in, out := &in.NetCoreSomaxconn, &out.NetCoreSomaxconn + *out = new(float64) + **out = **in + } + if in.NetCoreWmemDefault != nil { + in, out := &in.NetCoreWmemDefault, &out.NetCoreWmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreWmemMax != nil { + in, out := &in.NetCoreWmemMax, &out.NetCoreWmemMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMax != nil { + in, out := &in.NetIPv4IPLocalPortRangeMax, &out.NetIPv4IPLocalPortRangeMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMin != nil { + in, out := &in.NetIPv4IPLocalPortRangeMin, &out.NetIPv4IPLocalPortRangeMin + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh1 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh1, &out.NetIPv4NeighDefaultGcThresh1 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh2 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh2, &out.NetIPv4NeighDefaultGcThresh2 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh3 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh3, &out.NetIPv4NeighDefaultGcThresh3 + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPFinTimeout != nil { + in, out := &in.NetIPv4TCPFinTimeout, &out.NetIPv4TCPFinTimeout + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveIntvl != nil { + in, out := &in.NetIPv4TCPKeepaliveIntvl, &out.NetIPv4TCPKeepaliveIntvl + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveProbes != nil { + in, out := &in.NetIPv4TCPKeepaliveProbes, &out.NetIPv4TCPKeepaliveProbes + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveTime != nil { + in, out := &in.NetIPv4TCPKeepaliveTime, &out.NetIPv4TCPKeepaliveTime + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxSynBacklog != nil { + in, out := &in.NetIPv4TCPMaxSynBacklog, &out.NetIPv4TCPMaxSynBacklog + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxTwBuckets != nil { + in, out := &in.NetIPv4TCPMaxTwBuckets, &out.NetIPv4TCPMaxTwBuckets + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPTwReuse != nil { + in, out := &in.NetIPv4TCPTwReuse, &out.NetIPv4TCPTwReuse + *out = new(bool) + **out = **in + } + if in.NetNetfilterNfConntrackBuckets != nil { + in, out := &in.NetNetfilterNfConntrackBuckets, &out.NetNetfilterNfConntrackBuckets + *out = new(float64) + **out = **in + } + if in.NetNetfilterNfConntrackMax != nil { + in, out := &in.NetNetfilterNfConntrackMax, &out.NetNetfilterNfConntrackMax + *out = new(float64) + **out = **in + } + if in.VMMaxMapCount != nil { + in, out := &in.VMMaxMapCount, &out.VMMaxMapCount + *out = new(float64) + **out = **in + } + if in.VMSwappiness != nil { + in, out := &in.VMSwappiness, &out.VMSwappiness + *out = new(float64) + **out = **in + } + if in.VMVfsCachePressure != nil { + in, out := &in.VMVfsCachePressure, &out.VMVfsCachePressure + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SysctlConfigObservation. +func (in *SysctlConfigObservation) DeepCopy() *SysctlConfigObservation { + if in == nil { + return nil + } + out := new(SysctlConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SysctlConfigParameters) DeepCopyInto(out *SysctlConfigParameters) { + *out = *in + if in.FsAioMaxNr != nil { + in, out := &in.FsAioMaxNr, &out.FsAioMaxNr + *out = new(float64) + **out = **in + } + if in.FsFileMax != nil { + in, out := &in.FsFileMax, &out.FsFileMax + *out = new(float64) + **out = **in + } + if in.FsInotifyMaxUserWatches != nil { + in, out := &in.FsInotifyMaxUserWatches, &out.FsInotifyMaxUserWatches + *out = new(float64) + **out = **in + } + if in.FsNrOpen != nil { + in, out := &in.FsNrOpen, &out.FsNrOpen + *out = new(float64) + **out = **in + } + if in.KernelThreadsMax != nil { + in, out := &in.KernelThreadsMax, &out.KernelThreadsMax + *out = new(float64) + **out = **in + } + if in.NetCoreNetdevMaxBacklog != nil { + in, out := &in.NetCoreNetdevMaxBacklog, &out.NetCoreNetdevMaxBacklog + *out = new(float64) + **out = **in + } + if in.NetCoreOptmemMax != nil { + in, out := &in.NetCoreOptmemMax, &out.NetCoreOptmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreRmemDefault != nil { + in, out := &in.NetCoreRmemDefault, &out.NetCoreRmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreRmemMax != nil { + in, out := &in.NetCoreRmemMax, &out.NetCoreRmemMax + *out = new(float64) + **out = **in + } + if in.NetCoreSomaxconn != nil { + in, out := &in.NetCoreSomaxconn, &out.NetCoreSomaxconn + *out = new(float64) + **out = **in + } + if in.NetCoreWmemDefault != nil { + in, out := &in.NetCoreWmemDefault, &out.NetCoreWmemDefault + *out = new(float64) + **out = **in + } + if in.NetCoreWmemMax != nil { + in, out := &in.NetCoreWmemMax, &out.NetCoreWmemMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMax != nil { + in, out := &in.NetIPv4IPLocalPortRangeMax, &out.NetIPv4IPLocalPortRangeMax + *out = new(float64) + **out = **in + } + if in.NetIPv4IPLocalPortRangeMin != nil { + in, out := &in.NetIPv4IPLocalPortRangeMin, &out.NetIPv4IPLocalPortRangeMin + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh1 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh1, &out.NetIPv4NeighDefaultGcThresh1 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh2 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh2, &out.NetIPv4NeighDefaultGcThresh2 + *out = new(float64) + **out = **in + } + if in.NetIPv4NeighDefaultGcThresh3 != nil { + in, out := &in.NetIPv4NeighDefaultGcThresh3, &out.NetIPv4NeighDefaultGcThresh3 + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPFinTimeout != nil { + in, out := &in.NetIPv4TCPFinTimeout, &out.NetIPv4TCPFinTimeout + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveIntvl != nil { + in, out := &in.NetIPv4TCPKeepaliveIntvl, &out.NetIPv4TCPKeepaliveIntvl + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveProbes != nil { + in, out := &in.NetIPv4TCPKeepaliveProbes, &out.NetIPv4TCPKeepaliveProbes + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPKeepaliveTime != nil { + in, out := &in.NetIPv4TCPKeepaliveTime, &out.NetIPv4TCPKeepaliveTime + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxSynBacklog != nil { + in, out := &in.NetIPv4TCPMaxSynBacklog, &out.NetIPv4TCPMaxSynBacklog + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPMaxTwBuckets != nil { + in, out := &in.NetIPv4TCPMaxTwBuckets, &out.NetIPv4TCPMaxTwBuckets + *out = new(float64) + **out = **in + } + if in.NetIPv4TCPTwReuse != nil { + in, out := &in.NetIPv4TCPTwReuse, &out.NetIPv4TCPTwReuse + *out = new(bool) + **out = **in + } + if in.NetNetfilterNfConntrackBuckets != nil { + in, out := &in.NetNetfilterNfConntrackBuckets, &out.NetNetfilterNfConntrackBuckets + *out = new(float64) + **out = **in + } + if in.NetNetfilterNfConntrackMax != nil { + in, out := &in.NetNetfilterNfConntrackMax, &out.NetNetfilterNfConntrackMax + *out = new(float64) + **out = **in + } + if in.VMMaxMapCount != nil { + in, out := &in.VMMaxMapCount, &out.VMMaxMapCount + *out = new(float64) + **out = **in + } + if in.VMSwappiness != nil { + in, out := &in.VMSwappiness, &out.VMSwappiness + *out = new(float64) + **out = **in + } + if in.VMVfsCachePressure != nil { + in, out := &in.VMVfsCachePressure, &out.VMVfsCachePressure + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SysctlConfigParameters. +func (in *SysctlConfigParameters) DeepCopy() *SysctlConfigParameters { + if in == nil { + return nil + } + out := new(SysctlConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeSettingsInitParameters) DeepCopyInto(out *UpgradeSettingsInitParameters) { + *out = *in + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSettingsInitParameters. +func (in *UpgradeSettingsInitParameters) DeepCopy() *UpgradeSettingsInitParameters { + if in == nil { + return nil + } + out := new(UpgradeSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeSettingsObservation) DeepCopyInto(out *UpgradeSettingsObservation) { + *out = *in + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSettingsObservation. +func (in *UpgradeSettingsObservation) DeepCopy() *UpgradeSettingsObservation { + if in == nil { + return nil + } + out := new(UpgradeSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeSettingsParameters) DeepCopyInto(out *UpgradeSettingsParameters) { + *out = *in + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSettingsParameters. +func (in *UpgradeSettingsParameters) DeepCopy() *UpgradeSettingsParameters { + if in == nil { + return nil + } + out := new(UpgradeSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebAppRoutingIdentityInitParameters) DeepCopyInto(out *WebAppRoutingIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebAppRoutingIdentityInitParameters. +func (in *WebAppRoutingIdentityInitParameters) DeepCopy() *WebAppRoutingIdentityInitParameters { + if in == nil { + return nil + } + out := new(WebAppRoutingIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebAppRoutingIdentityObservation) DeepCopyInto(out *WebAppRoutingIdentityObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebAppRoutingIdentityObservation. +func (in *WebAppRoutingIdentityObservation) DeepCopy() *WebAppRoutingIdentityObservation { + if in == nil { + return nil + } + out := new(WebAppRoutingIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebAppRoutingIdentityParameters) DeepCopyInto(out *WebAppRoutingIdentityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebAppRoutingIdentityParameters. +func (in *WebAppRoutingIdentityParameters) DeepCopy() *WebAppRoutingIdentityParameters { + if in == nil { + return nil + } + out := new(WebAppRoutingIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebAppRoutingInitParameters) DeepCopyInto(out *WebAppRoutingInitParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebAppRoutingInitParameters. +func (in *WebAppRoutingInitParameters) DeepCopy() *WebAppRoutingInitParameters { + if in == nil { + return nil + } + out := new(WebAppRoutingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebAppRoutingObservation) DeepCopyInto(out *WebAppRoutingObservation) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } + if in.WebAppRoutingIdentity != nil { + in, out := &in.WebAppRoutingIdentity, &out.WebAppRoutingIdentity + *out = make([]WebAppRoutingIdentityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebAppRoutingObservation. +func (in *WebAppRoutingObservation) DeepCopy() *WebAppRoutingObservation { + if in == nil { + return nil + } + out := new(WebAppRoutingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebAppRoutingParameters) DeepCopyInto(out *WebAppRoutingParameters) { + *out = *in + if in.DNSZoneID != nil { + in, out := &in.DNSZoneID, &out.DNSZoneID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebAppRoutingParameters. +func (in *WebAppRoutingParameters) DeepCopy() *WebAppRoutingParameters { + if in == nil { + return nil + } + out := new(WebAppRoutingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsProfileInitParameters) DeepCopyInto(out *WindowsProfileInitParameters) { + *out = *in + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.Gmsa != nil { + in, out := &in.Gmsa, &out.Gmsa + *out = new(GmsaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.License != nil { + in, out := &in.License, &out.License + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsProfileInitParameters. +func (in *WindowsProfileInitParameters) DeepCopy() *WindowsProfileInitParameters { + if in == nil { + return nil + } + out := new(WindowsProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsProfileObservation) DeepCopyInto(out *WindowsProfileObservation) { + *out = *in + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.Gmsa != nil { + in, out := &in.Gmsa, &out.Gmsa + *out = new(GmsaObservation) + (*in).DeepCopyInto(*out) + } + if in.License != nil { + in, out := &in.License, &out.License + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsProfileObservation. +func (in *WindowsProfileObservation) DeepCopy() *WindowsProfileObservation { + if in == nil { + return nil + } + out := new(WindowsProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsProfileParameters) DeepCopyInto(out *WindowsProfileParameters) { + *out = *in + if in.AdminPasswordSecretRef != nil { + in, out := &in.AdminPasswordSecretRef, &out.AdminPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.Gmsa != nil { + in, out := &in.Gmsa, &out.Gmsa + *out = new(GmsaParameters) + (*in).DeepCopyInto(*out) + } + if in.License != nil { + in, out := &in.License, &out.License + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsProfileParameters. +func (in *WindowsProfileParameters) DeepCopy() *WindowsProfileParameters { + if in == nil { + return nil + } + out := new(WindowsProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkloadAutoscalerProfileInitParameters) DeepCopyInto(out *WorkloadAutoscalerProfileInitParameters) { + *out = *in + if in.KedaEnabled != nil { + in, out := &in.KedaEnabled, &out.KedaEnabled + *out = new(bool) + **out = **in + } + if in.VerticalPodAutoscalerEnabled != nil { + in, out := &in.VerticalPodAutoscalerEnabled, &out.VerticalPodAutoscalerEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadAutoscalerProfileInitParameters. +func (in *WorkloadAutoscalerProfileInitParameters) DeepCopy() *WorkloadAutoscalerProfileInitParameters { + if in == nil { + return nil + } + out := new(WorkloadAutoscalerProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkloadAutoscalerProfileObservation) DeepCopyInto(out *WorkloadAutoscalerProfileObservation) { + *out = *in + if in.KedaEnabled != nil { + in, out := &in.KedaEnabled, &out.KedaEnabled + *out = new(bool) + **out = **in + } + if in.VerticalPodAutoscalerControlledValues != nil { + in, out := &in.VerticalPodAutoscalerControlledValues, &out.VerticalPodAutoscalerControlledValues + *out = new(string) + **out = **in + } + if in.VerticalPodAutoscalerEnabled != nil { + in, out := &in.VerticalPodAutoscalerEnabled, &out.VerticalPodAutoscalerEnabled + *out = new(bool) + **out = **in + } + if in.VerticalPodAutoscalerUpdateMode != nil { + in, out := &in.VerticalPodAutoscalerUpdateMode, &out.VerticalPodAutoscalerUpdateMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadAutoscalerProfileObservation. +func (in *WorkloadAutoscalerProfileObservation) DeepCopy() *WorkloadAutoscalerProfileObservation { + if in == nil { + return nil + } + out := new(WorkloadAutoscalerProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkloadAutoscalerProfileParameters) DeepCopyInto(out *WorkloadAutoscalerProfileParameters) { + *out = *in + if in.KedaEnabled != nil { + in, out := &in.KedaEnabled, &out.KedaEnabled + *out = new(bool) + **out = **in + } + if in.VerticalPodAutoscalerEnabled != nil { + in, out := &in.VerticalPodAutoscalerEnabled, &out.VerticalPodAutoscalerEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadAutoscalerProfileParameters. +func (in *WorkloadAutoscalerProfileParameters) DeepCopy() *WorkloadAutoscalerProfileParameters { + if in == nil { + return nil + } + out := new(WorkloadAutoscalerProfileParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/containerservice/v1beta2/zz_generated.managed.go b/apis/containerservice/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..b7e1062c8 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this KubernetesCluster. +func (mg *KubernetesCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this KubernetesCluster. +func (mg *KubernetesCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this KubernetesCluster. +func (mg *KubernetesCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this KubernetesCluster. +func (mg *KubernetesCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this KubernetesCluster. +func (mg *KubernetesCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this KubernetesCluster. +func (mg *KubernetesCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this KubernetesCluster. +func (mg *KubernetesCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this KubernetesCluster. +func (mg *KubernetesCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this KubernetesCluster. +func (mg *KubernetesCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this KubernetesCluster. +func (mg *KubernetesCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this KubernetesCluster. +func (mg *KubernetesCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this KubernetesCluster. +func (mg *KubernetesCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/containerservice/v1beta2/zz_generated.managedlist.go b/apis/containerservice/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..d5b14dadd --- /dev/null +++ b/apis/containerservice/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this KubernetesClusterList. +func (l *KubernetesClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this KubernetesClusterNodePoolList. +func (l *KubernetesClusterNodePoolList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this KubernetesFleetManagerList. +func (l *KubernetesFleetManagerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/containerservice/v1beta2/zz_generated.resolvers.go b/apis/containerservice/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..e656516d7 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,436 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *KubernetesCluster) ResolveReferences( // ResolveReferences of this KubernetesCluster. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.APIServerAccessProfile != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.APIServerAccessProfile.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.APIServerAccessProfile.SubnetIDRef, + Selector: mg.Spec.ForProvider.APIServerAccessProfile.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.APIServerAccessProfile.SubnetID") + } + mg.Spec.ForProvider.APIServerAccessProfile.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.APIServerAccessProfile.SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.AciConnectorLinux != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AciConnectorLinux.SubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AciConnectorLinux.SubnetNameRef, + Selector: mg.Spec.ForProvider.AciConnectorLinux.SubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AciConnectorLinux.SubnetName") + } + mg.Spec.ForProvider.AciConnectorLinux.SubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AciConnectorLinux.SubnetNameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.DefaultNodePool != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultNodePool.PodSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DefaultNodePool.PodSubnetIDRef, + Selector: mg.Spec.ForProvider.DefaultNodePool.PodSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultNodePool.PodSubnetID") + } + mg.Spec.ForProvider.DefaultNodePool.PodSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultNodePool.PodSubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.DefaultNodePool != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultNodePool.VnetSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DefaultNodePool.VnetSubnetIDRef, + Selector: mg.Spec.ForProvider.DefaultNodePool.VnetSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultNodePool.VnetSubnetID") + } + mg.Spec.ForProvider.DefaultNodePool.VnetSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultNodePool.VnetSubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.IngressApplicationGateway != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IngressApplicationGateway.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IngressApplicationGateway.SubnetIDRef, + Selector: mg.Spec.ForProvider.IngressApplicationGateway.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IngressApplicationGateway.SubnetID") + } + mg.Spec.ForProvider.IngressApplicationGateway.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IngressApplicationGateway.SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrivateDNSZoneID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrivateDNSZoneIDRef, + Selector: mg.Spec.ForProvider.PrivateDNSZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateDNSZoneID") + } + mg.Spec.ForProvider.PrivateDNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrivateDNSZoneIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.APIServerAccessProfile != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.APIServerAccessProfile.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.APIServerAccessProfile.SubnetIDRef, + Selector: mg.Spec.InitProvider.APIServerAccessProfile.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.APIServerAccessProfile.SubnetID") + } + mg.Spec.InitProvider.APIServerAccessProfile.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.APIServerAccessProfile.SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.AciConnectorLinux != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AciConnectorLinux.SubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.AciConnectorLinux.SubnetNameRef, + Selector: mg.Spec.InitProvider.AciConnectorLinux.SubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AciConnectorLinux.SubnetName") + } + mg.Spec.InitProvider.AciConnectorLinux.SubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AciConnectorLinux.SubnetNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.DefaultNodePool != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultNodePool.PodSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DefaultNodePool.PodSubnetIDRef, + Selector: mg.Spec.InitProvider.DefaultNodePool.PodSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultNodePool.PodSubnetID") + } + mg.Spec.InitProvider.DefaultNodePool.PodSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultNodePool.PodSubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.DefaultNodePool != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultNodePool.VnetSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DefaultNodePool.VnetSubnetIDRef, + Selector: mg.Spec.InitProvider.DefaultNodePool.VnetSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultNodePool.VnetSubnetID") + } + mg.Spec.InitProvider.DefaultNodePool.VnetSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultNodePool.VnetSubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.IngressApplicationGateway != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IngressApplicationGateway.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IngressApplicationGateway.SubnetIDRef, + Selector: mg.Spec.InitProvider.IngressApplicationGateway.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IngressApplicationGateway.SubnetID") + } + mg.Spec.InitProvider.IngressApplicationGateway.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IngressApplicationGateway.SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrivateDNSZoneID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrivateDNSZoneIDRef, + Selector: mg.Spec.InitProvider.PrivateDNSZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateDNSZoneID") + } + mg.Spec.InitProvider.PrivateDNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrivateDNSZoneIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this KubernetesClusterNodePool. +func (mg *KubernetesClusterNodePool) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("containerservice.azure.upbound.io", "v1beta2", "KubernetesCluster", "KubernetesClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KubernetesClusterID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.KubernetesClusterIDRef, + Selector: mg.Spec.ForProvider.KubernetesClusterIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KubernetesClusterID") + } + mg.Spec.ForProvider.KubernetesClusterID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KubernetesClusterIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PodSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PodSubnetIDRef, + Selector: mg.Spec.ForProvider.PodSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PodSubnetID") + } + mg.Spec.ForProvider.PodSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PodSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VnetSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VnetSubnetIDRef, + Selector: mg.Spec.ForProvider.VnetSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VnetSubnetID") + } + mg.Spec.ForProvider.VnetSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VnetSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PodSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PodSubnetIDRef, + Selector: mg.Spec.InitProvider.PodSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PodSubnetID") + } + mg.Spec.InitProvider.PodSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PodSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VnetSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VnetSubnetIDRef, + Selector: mg.Spec.InitProvider.VnetSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VnetSubnetID") + } + mg.Spec.InitProvider.VnetSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VnetSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this KubernetesFleetManager. +func (mg *KubernetesFleetManager) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/containerservice/v1beta2/zz_groupversion_info.go b/apis/containerservice/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..b30891146 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=containerservice.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "containerservice.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/containerservice/v1beta2/zz_kubernetescluster_terraformed.go b/apis/containerservice/v1beta2/zz_kubernetescluster_terraformed.go new file mode 100755 index 000000000..6c0b0f7b7 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_kubernetescluster_terraformed.go @@ -0,0 +1,133 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this KubernetesCluster +func (mg *KubernetesCluster) GetTerraformResourceType() string { + return "azurerm_kubernetes_cluster" +} + +// GetConnectionDetailsMapping for this KubernetesCluster +func (tr *KubernetesCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"azure_active_directory_role_based_access_control[*].server_app_secret": "spec.forProvider.azureActiveDirectoryRoleBasedAccessControl[*].serverAppSecretSecretRef", "http_proxy_config[*].trusted_ca": "spec.forProvider.httpProxyConfig[*].trustedCaSecretRef", "kube_admin_config[*]": "status.atProvider.kubeAdminConfig[*]", "kube_admin_config[*].client_certificate": "status.atProvider.kubeAdminConfig[*].clientCertificate", "kube_admin_config[*].client_key": "status.atProvider.kubeAdminConfig[*].clientKey", "kube_admin_config[*].cluster_ca_certificate": "status.atProvider.kubeAdminConfig[*].clusterCaCertificate", "kube_admin_config[*].password": "status.atProvider.kubeAdminConfig[*].password", "kube_admin_config_raw": "status.atProvider.kubeAdminConfigRaw", "kube_config[*]": "status.atProvider.kubeConfig[*]", "kube_config[*].client_certificate": "status.atProvider.kubeConfig[*].clientCertificate", "kube_config[*].client_key": "status.atProvider.kubeConfig[*].clientKey", "kube_config[*].cluster_ca_certificate": "status.atProvider.kubeConfig[*].clusterCaCertificate", "kube_config[*].password": "status.atProvider.kubeConfig[*].password", "kube_config_raw": "status.atProvider.kubeConfigRaw", "service_principal[*].client_secret": "spec.forProvider.servicePrincipal[*].clientSecretSecretRef", "windows_profile[*].admin_password": "spec.forProvider.windowsProfile[*].adminPasswordSecretRef"} +} + +// GetObservation of this KubernetesCluster +func (tr *KubernetesCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KubernetesCluster +func (tr *KubernetesCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KubernetesCluster +func (tr *KubernetesCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KubernetesCluster +func (tr *KubernetesCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KubernetesCluster +func (tr *KubernetesCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KubernetesCluster +func (tr *KubernetesCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this KubernetesCluster +func (tr *KubernetesCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this KubernetesCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KubernetesCluster) LateInitialize(attrs []byte) (bool, error) { + params := &KubernetesClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("APIServerAuthorizedIPRanges")) + opts = append(opts, resource.WithNameFilter("KubeletIdentity")) + opts = append(opts, resource.WithNameFilter("MicrosoftDefender")) + opts = append(opts, resource.WithNameFilter("OmsAgent")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KubernetesCluster) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/containerservice/v1beta2/zz_kubernetescluster_types.go b/apis/containerservice/v1beta2/zz_kubernetescluster_types.go new file mode 100755 index 000000000..a4705bd80 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_kubernetescluster_types.go @@ -0,0 +1,3483 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APIServerAccessProfileInitParameters struct { + + // Set of authorized IP ranges to allow access to API server, e.g. ["198.51.100.0/24"]. + // +listType=set + AuthorizedIPRanges []*string `json:"authorizedIpRanges,omitempty" tf:"authorized_ip_ranges,omitempty"` + + // The ID of the Subnet where the API server endpoint is delegated to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Should API Server VNet Integration be enabled? For more details please visit Use API Server VNet Integration. + VnetIntegrationEnabled *bool `json:"vnetIntegrationEnabled,omitempty" tf:"vnet_integration_enabled,omitempty"` +} + +type APIServerAccessProfileObservation struct { + + // Set of authorized IP ranges to allow access to API server, e.g. ["198.51.100.0/24"]. + // +listType=set + AuthorizedIPRanges []*string `json:"authorizedIpRanges,omitempty" tf:"authorized_ip_ranges,omitempty"` + + // The ID of the Subnet where the API server endpoint is delegated to. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Should API Server VNet Integration be enabled? For more details please visit Use API Server VNet Integration. + VnetIntegrationEnabled *bool `json:"vnetIntegrationEnabled,omitempty" tf:"vnet_integration_enabled,omitempty"` +} + +type APIServerAccessProfileParameters struct { + + // Set of authorized IP ranges to allow access to API server, e.g. ["198.51.100.0/24"]. + // +kubebuilder:validation:Optional + // +listType=set + AuthorizedIPRanges []*string `json:"authorizedIpRanges,omitempty" tf:"authorized_ip_ranges,omitempty"` + + // The ID of the Subnet where the API server endpoint is delegated to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Should API Server VNet Integration be enabled? For more details please visit Use API Server VNet Integration. + // +kubebuilder:validation:Optional + VnetIntegrationEnabled *bool `json:"vnetIntegrationEnabled,omitempty" tf:"vnet_integration_enabled,omitempty"` +} + +type AciConnectorLinuxInitParameters struct { + + // The subnet name for the virtual nodes to run. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` + + // Reference to a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameRef *v1.Reference `json:"subnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameSelector *v1.Selector `json:"subnetNameSelector,omitempty" tf:"-"` +} + +type AciConnectorLinuxObservation struct { + + // A connector_identity block is exported. The exported attributes are defined below. + ConnectorIdentity []ConnectorIdentityObservation `json:"connectorIdentity,omitempty" tf:"connector_identity,omitempty"` + + // The subnet name for the virtual nodes to run. + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` +} + +type AciConnectorLinuxParameters struct { + + // The subnet name for the virtual nodes to run. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +kubebuilder:validation:Optional + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` + + // Reference to a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameRef *v1.Reference `json:"subnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameSelector *v1.Selector `json:"subnetNameSelector,omitempty" tf:"-"` +} + +type AllowedHostPortsInitParameters struct { + + // Specifies the end of the port range. + PortEnd *float64 `json:"portEnd,omitempty" tf:"port_end,omitempty"` + + // Specifies the start of the port range. + PortStart *float64 `json:"portStart,omitempty" tf:"port_start,omitempty"` + + // Specifies the protocol of the port range. Possible values are TCP and UDP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type AllowedHostPortsObservation struct { + + // Specifies the end of the port range. + PortEnd *float64 `json:"portEnd,omitempty" tf:"port_end,omitempty"` + + // Specifies the start of the port range. + PortStart *float64 `json:"portStart,omitempty" tf:"port_start,omitempty"` + + // Specifies the protocol of the port range. Possible values are TCP and UDP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type AllowedHostPortsParameters struct { + + // Specifies the end of the port range. + // +kubebuilder:validation:Optional + PortEnd *float64 `json:"portEnd,omitempty" tf:"port_end,omitempty"` + + // Specifies the start of the port range. + // +kubebuilder:validation:Optional + PortStart *float64 `json:"portStart,omitempty" tf:"port_start,omitempty"` + + // Specifies the protocol of the port range. Possible values are TCP and UDP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type AllowedInitParameters struct { + + // A day in a week. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // An array of hour slots in a day. For example, specifying 1 will allow maintenance from 1:00am to 2:00am. Specifying 1, 2 will allow maintenance from 1:00am to 3:00m. Possible values are between 0 and 23. + // +listType=set + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` +} + +type AllowedObservation struct { + + // A day in a week. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + Day *string `json:"day,omitempty" tf:"day,omitempty"` + + // An array of hour slots in a day. For example, specifying 1 will allow maintenance from 1:00am to 2:00am. Specifying 1, 2 will allow maintenance from 1:00am to 3:00m. Possible values are between 0 and 23. + // +listType=set + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` +} + +type AllowedParameters struct { + + // A day in a week. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + // +kubebuilder:validation:Optional + Day *string `json:"day" tf:"day,omitempty"` + + // An array of hour slots in a day. For example, specifying 1 will allow maintenance from 1:00am to 2:00am. Specifying 1, 2 will allow maintenance from 1:00am to 3:00m. Possible values are between 0 and 23. + // +kubebuilder:validation:Optional + // +listType=set + Hours []*float64 `json:"hours" tf:"hours,omitempty"` +} + +type AutoScalerProfileInitParameters struct { + + // Detect similar node groups and balance the number of nodes between them. Defaults to false. + BalanceSimilarNodeGroups *bool `json:"balanceSimilarNodeGroups,omitempty" tf:"balance_similar_node_groups,omitempty"` + + // Maximum number of empty nodes that can be deleted at the same time. Defaults to 10. + EmptyBulkDeleteMax *string `json:"emptyBulkDeleteMax,omitempty" tf:"empty_bulk_delete_max,omitempty"` + + // Expander to use. Possible values are least-waste, priority, most-pods and random. Defaults to random. + Expander *string `json:"expander,omitempty" tf:"expander,omitempty"` + + // Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to 600. + MaxGracefulTerminationSec *string `json:"maxGracefulTerminationSec,omitempty" tf:"max_graceful_termination_sec,omitempty"` + + // Maximum time the autoscaler waits for a node to be provisioned. Defaults to 15m. + MaxNodeProvisioningTime *string `json:"maxNodeProvisioningTime,omitempty" tf:"max_node_provisioning_time,omitempty"` + + // Maximum Number of allowed unready nodes. Defaults to 3. + MaxUnreadyNodes *float64 `json:"maxUnreadyNodes,omitempty" tf:"max_unready_nodes,omitempty"` + + // Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to 45. + MaxUnreadyPercentage *float64 `json:"maxUnreadyPercentage,omitempty" tf:"max_unready_percentage,omitempty"` + + // For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to 10s. + NewPodScaleUpDelay *string `json:"newPodScaleUpDelay,omitempty" tf:"new_pod_scale_up_delay,omitempty"` + + // How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to 10m. + ScaleDownDelayAfterAdd *string `json:"scaleDownDelayAfterAdd,omitempty" tf:"scale_down_delay_after_add,omitempty"` + + // How long after node deletion that scale down evaluation resumes. Defaults to the value used for scan_interval. + ScaleDownDelayAfterDelete *string `json:"scaleDownDelayAfterDelete,omitempty" tf:"scale_down_delay_after_delete,omitempty"` + + // How long after scale down failure that scale down evaluation resumes. Defaults to 3m. + ScaleDownDelayAfterFailure *string `json:"scaleDownDelayAfterFailure,omitempty" tf:"scale_down_delay_after_failure,omitempty"` + + // How long a node should be unneeded before it is eligible for scale down. Defaults to 10m. + ScaleDownUnneeded *string `json:"scaleDownUnneeded,omitempty" tf:"scale_down_unneeded,omitempty"` + + // How long an unready node should be unneeded before it is eligible for scale down. Defaults to 20m. + ScaleDownUnready *string `json:"scaleDownUnready,omitempty" tf:"scale_down_unready,omitempty"` + + // Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to 0.5. + ScaleDownUtilizationThreshold *string `json:"scaleDownUtilizationThreshold,omitempty" tf:"scale_down_utilization_threshold,omitempty"` + + // How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to 10s. + ScanInterval *string `json:"scanInterval,omitempty" tf:"scan_interval,omitempty"` + + // If true cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to true. + SkipNodesWithLocalStorage *bool `json:"skipNodesWithLocalStorage,omitempty" tf:"skip_nodes_with_local_storage,omitempty"` + + // If true cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to true. + SkipNodesWithSystemPods *bool `json:"skipNodesWithSystemPods,omitempty" tf:"skip_nodes_with_system_pods,omitempty"` +} + +type AutoScalerProfileObservation struct { + + // Detect similar node groups and balance the number of nodes between them. Defaults to false. + BalanceSimilarNodeGroups *bool `json:"balanceSimilarNodeGroups,omitempty" tf:"balance_similar_node_groups,omitempty"` + + // Maximum number of empty nodes that can be deleted at the same time. Defaults to 10. + EmptyBulkDeleteMax *string `json:"emptyBulkDeleteMax,omitempty" tf:"empty_bulk_delete_max,omitempty"` + + // Expander to use. Possible values are least-waste, priority, most-pods and random. Defaults to random. + Expander *string `json:"expander,omitempty" tf:"expander,omitempty"` + + // Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to 600. + MaxGracefulTerminationSec *string `json:"maxGracefulTerminationSec,omitempty" tf:"max_graceful_termination_sec,omitempty"` + + // Maximum time the autoscaler waits for a node to be provisioned. Defaults to 15m. + MaxNodeProvisioningTime *string `json:"maxNodeProvisioningTime,omitempty" tf:"max_node_provisioning_time,omitempty"` + + // Maximum Number of allowed unready nodes. Defaults to 3. + MaxUnreadyNodes *float64 `json:"maxUnreadyNodes,omitempty" tf:"max_unready_nodes,omitempty"` + + // Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to 45. + MaxUnreadyPercentage *float64 `json:"maxUnreadyPercentage,omitempty" tf:"max_unready_percentage,omitempty"` + + // For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to 10s. + NewPodScaleUpDelay *string `json:"newPodScaleUpDelay,omitempty" tf:"new_pod_scale_up_delay,omitempty"` + + // How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to 10m. + ScaleDownDelayAfterAdd *string `json:"scaleDownDelayAfterAdd,omitempty" tf:"scale_down_delay_after_add,omitempty"` + + // How long after node deletion that scale down evaluation resumes. Defaults to the value used for scan_interval. + ScaleDownDelayAfterDelete *string `json:"scaleDownDelayAfterDelete,omitempty" tf:"scale_down_delay_after_delete,omitempty"` + + // How long after scale down failure that scale down evaluation resumes. Defaults to 3m. + ScaleDownDelayAfterFailure *string `json:"scaleDownDelayAfterFailure,omitempty" tf:"scale_down_delay_after_failure,omitempty"` + + // How long a node should be unneeded before it is eligible for scale down. Defaults to 10m. + ScaleDownUnneeded *string `json:"scaleDownUnneeded,omitempty" tf:"scale_down_unneeded,omitempty"` + + // How long an unready node should be unneeded before it is eligible for scale down. Defaults to 20m. + ScaleDownUnready *string `json:"scaleDownUnready,omitempty" tf:"scale_down_unready,omitempty"` + + // Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to 0.5. + ScaleDownUtilizationThreshold *string `json:"scaleDownUtilizationThreshold,omitempty" tf:"scale_down_utilization_threshold,omitempty"` + + // How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to 10s. + ScanInterval *string `json:"scanInterval,omitempty" tf:"scan_interval,omitempty"` + + // If true cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to true. + SkipNodesWithLocalStorage *bool `json:"skipNodesWithLocalStorage,omitempty" tf:"skip_nodes_with_local_storage,omitempty"` + + // If true cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to true. + SkipNodesWithSystemPods *bool `json:"skipNodesWithSystemPods,omitempty" tf:"skip_nodes_with_system_pods,omitempty"` +} + +type AutoScalerProfileParameters struct { + + // Detect similar node groups and balance the number of nodes between them. Defaults to false. + // +kubebuilder:validation:Optional + BalanceSimilarNodeGroups *bool `json:"balanceSimilarNodeGroups,omitempty" tf:"balance_similar_node_groups,omitempty"` + + // Maximum number of empty nodes that can be deleted at the same time. Defaults to 10. + // +kubebuilder:validation:Optional + EmptyBulkDeleteMax *string `json:"emptyBulkDeleteMax,omitempty" tf:"empty_bulk_delete_max,omitempty"` + + // Expander to use. Possible values are least-waste, priority, most-pods and random. Defaults to random. + // +kubebuilder:validation:Optional + Expander *string `json:"expander,omitempty" tf:"expander,omitempty"` + + // Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to 600. + // +kubebuilder:validation:Optional + MaxGracefulTerminationSec *string `json:"maxGracefulTerminationSec,omitempty" tf:"max_graceful_termination_sec,omitempty"` + + // Maximum time the autoscaler waits for a node to be provisioned. Defaults to 15m. + // +kubebuilder:validation:Optional + MaxNodeProvisioningTime *string `json:"maxNodeProvisioningTime,omitempty" tf:"max_node_provisioning_time,omitempty"` + + // Maximum Number of allowed unready nodes. Defaults to 3. + // +kubebuilder:validation:Optional + MaxUnreadyNodes *float64 `json:"maxUnreadyNodes,omitempty" tf:"max_unready_nodes,omitempty"` + + // Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to 45. + // +kubebuilder:validation:Optional + MaxUnreadyPercentage *float64 `json:"maxUnreadyPercentage,omitempty" tf:"max_unready_percentage,omitempty"` + + // For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to 10s. + // +kubebuilder:validation:Optional + NewPodScaleUpDelay *string `json:"newPodScaleUpDelay,omitempty" tf:"new_pod_scale_up_delay,omitempty"` + + // How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to 10m. + // +kubebuilder:validation:Optional + ScaleDownDelayAfterAdd *string `json:"scaleDownDelayAfterAdd,omitempty" tf:"scale_down_delay_after_add,omitempty"` + + // How long after node deletion that scale down evaluation resumes. Defaults to the value used for scan_interval. + // +kubebuilder:validation:Optional + ScaleDownDelayAfterDelete *string `json:"scaleDownDelayAfterDelete,omitempty" tf:"scale_down_delay_after_delete,omitempty"` + + // How long after scale down failure that scale down evaluation resumes. Defaults to 3m. + // +kubebuilder:validation:Optional + ScaleDownDelayAfterFailure *string `json:"scaleDownDelayAfterFailure,omitempty" tf:"scale_down_delay_after_failure,omitempty"` + + // How long a node should be unneeded before it is eligible for scale down. Defaults to 10m. + // +kubebuilder:validation:Optional + ScaleDownUnneeded *string `json:"scaleDownUnneeded,omitempty" tf:"scale_down_unneeded,omitempty"` + + // How long an unready node should be unneeded before it is eligible for scale down. Defaults to 20m. + // +kubebuilder:validation:Optional + ScaleDownUnready *string `json:"scaleDownUnready,omitempty" tf:"scale_down_unready,omitempty"` + + // Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to 0.5. + // +kubebuilder:validation:Optional + ScaleDownUtilizationThreshold *string `json:"scaleDownUtilizationThreshold,omitempty" tf:"scale_down_utilization_threshold,omitempty"` + + // How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to 10s. + // +kubebuilder:validation:Optional + ScanInterval *string `json:"scanInterval,omitempty" tf:"scan_interval,omitempty"` + + // If true cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to true. + // +kubebuilder:validation:Optional + SkipNodesWithLocalStorage *bool `json:"skipNodesWithLocalStorage,omitempty" tf:"skip_nodes_with_local_storage,omitempty"` + + // If true cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to true. + // +kubebuilder:validation:Optional + SkipNodesWithSystemPods *bool `json:"skipNodesWithSystemPods,omitempty" tf:"skip_nodes_with_system_pods,omitempty"` +} + +type AzureActiveDirectoryRoleBasedAccessControlInitParameters struct { + + // A list of Object IDs of Azure Active Directory Groups which should have Admin Role on the Cluster. + AdminGroupObjectIds []*string `json:"adminGroupObjectIds,omitempty" tf:"admin_group_object_ids,omitempty"` + + // Is Role Based Access Control based on Azure AD enabled? + AzureRbacEnabled *bool `json:"azureRbacEnabled,omitempty" tf:"azure_rbac_enabled,omitempty"` + + // The Client ID of an Azure Active Directory Application. + ClientAppID *string `json:"clientAppId,omitempty" tf:"client_app_id,omitempty"` + + // Is the Azure Active Directory integration Managed, meaning that Azure will create/manage the Service Principal used for integration. + Managed *bool `json:"managed,omitempty" tf:"managed,omitempty"` + + // The Server ID of an Azure Active Directory Application. + ServerAppID *string `json:"serverAppId,omitempty" tf:"server_app_id,omitempty"` + + // The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AzureActiveDirectoryRoleBasedAccessControlObservation struct { + + // A list of Object IDs of Azure Active Directory Groups which should have Admin Role on the Cluster. + AdminGroupObjectIds []*string `json:"adminGroupObjectIds,omitempty" tf:"admin_group_object_ids,omitempty"` + + // Is Role Based Access Control based on Azure AD enabled? + AzureRbacEnabled *bool `json:"azureRbacEnabled,omitempty" tf:"azure_rbac_enabled,omitempty"` + + // The Client ID of an Azure Active Directory Application. + ClientAppID *string `json:"clientAppId,omitempty" tf:"client_app_id,omitempty"` + + // Is the Azure Active Directory integration Managed, meaning that Azure will create/manage the Service Principal used for integration. + Managed *bool `json:"managed,omitempty" tf:"managed,omitempty"` + + // The Server ID of an Azure Active Directory Application. + ServerAppID *string `json:"serverAppId,omitempty" tf:"server_app_id,omitempty"` + + // The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AzureActiveDirectoryRoleBasedAccessControlParameters struct { + + // A list of Object IDs of Azure Active Directory Groups which should have Admin Role on the Cluster. + // +kubebuilder:validation:Optional + AdminGroupObjectIds []*string `json:"adminGroupObjectIds,omitempty" tf:"admin_group_object_ids,omitempty"` + + // Is Role Based Access Control based on Azure AD enabled? + // +kubebuilder:validation:Optional + AzureRbacEnabled *bool `json:"azureRbacEnabled,omitempty" tf:"azure_rbac_enabled,omitempty"` + + // The Client ID of an Azure Active Directory Application. + // +kubebuilder:validation:Optional + ClientAppID *string `json:"clientAppId,omitempty" tf:"client_app_id,omitempty"` + + // Is the Azure Active Directory integration Managed, meaning that Azure will create/manage the Service Principal used for integration. + // +kubebuilder:validation:Optional + Managed *bool `json:"managed,omitempty" tf:"managed,omitempty"` + + // The Server ID of an Azure Active Directory Application. + // +kubebuilder:validation:Optional + ServerAppID *string `json:"serverAppId,omitempty" tf:"server_app_id,omitempty"` + + // The Server Secret of an Azure Active Directory Application. + // +kubebuilder:validation:Optional + ServerAppSecretSecretRef *v1.SecretKeySelector `json:"serverAppSecretSecretRef,omitempty" tf:"-"` + + // The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type ConfidentialComputingInitParameters struct { + + // Should the SGX quote helper be enabled? + SgxQuoteHelperEnabled *bool `json:"sgxQuoteHelperEnabled,omitempty" tf:"sgx_quote_helper_enabled,omitempty"` +} + +type ConfidentialComputingObservation struct { + + // Should the SGX quote helper be enabled? + SgxQuoteHelperEnabled *bool `json:"sgxQuoteHelperEnabled,omitempty" tf:"sgx_quote_helper_enabled,omitempty"` +} + +type ConfidentialComputingParameters struct { + + // Should the SGX quote helper be enabled? + // +kubebuilder:validation:Optional + SgxQuoteHelperEnabled *bool `json:"sgxQuoteHelperEnabled" tf:"sgx_quote_helper_enabled,omitempty"` +} + +type ConnectorIdentityInitParameters struct { +} + +type ConnectorIdentityObservation struct { + + // The Client ID of the user-defined Managed Identity used by the ACI Connector. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The Object ID of the user-defined Managed Identity used by the ACI Connector. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The ID of the User Assigned Identity used by the ACI Connector. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type ConnectorIdentityParameters struct { +} + +type DefaultNodePoolInitParameters struct { + + // Specifies the ID of the Capacity Reservation Group within which this AKS Cluster should be created. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies whether to trust a Custom CA. + CustomCATrustEnabled *bool `json:"customCaTrustEnabled,omitempty" tf:"custom_ca_trust_enabled,omitempty"` + + // Should the Kubernetes Auto Scaler be enabled for this Node Pool? + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty" tf:"enable_auto_scaling,omitempty"` + + // Should the nodes in the Default Node Pool have host encryption enabled? temporary_name_for_rotation must be specified when changing this property. + EnableHostEncryption *bool `json:"enableHostEncryption,omitempty" tf:"enable_host_encryption,omitempty"` + + // Should nodes in this Node Pool have a Public IP Address? temporary_name_for_rotation must be specified when changing this property. + EnableNodePublicIP *bool `json:"enableNodePublicIp,omitempty" tf:"enable_node_public_ip,omitempty"` + + // Should the nodes in this Node Pool have Federal Information Processing Standard enabled? temporary_name_for_rotation must be specified when changing this block. Changing this forces a new resource to be created. + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g and MIG7g. Changing this forces a new resource to be created. + GpuInstance *string `json:"gpuInstance,omitempty" tf:"gpu_instance,omitempty"` + + // Specifies the ID of the Host Group within which this AKS Cluster should be created. Changing this forces a new resource to be created. + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // A kubelet_config block as defined below. temporary_name_for_rotation must be specified when changing this block. + KubeletConfig *KubeletConfigInitParameters `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + + // The type of disk used by kubelet. Possible values are OS and Temporary. + KubeletDiskType *string `json:"kubeletDiskType,omitempty" tf:"kubelet_disk_type,omitempty"` + + // A linux_os_config block as defined below. temporary_name_for_rotation must be specified when changing this block. + LinuxOsConfig *LinuxOsConfigInitParameters `json:"linuxOsConfig,omitempty" tf:"linux_os_config,omitempty"` + + // The maximum number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000. + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The maximum number of pods that can run on each agent. temporary_name_for_rotation must be specified when changing this property. + MaxPods *float64 `json:"maxPods,omitempty" tf:"max_pods,omitempty"` + + // A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + MessageOfTheDay *string `json:"messageOfTheDay,omitempty" tf:"message_of_the_day,omitempty"` + + // The minimum number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000. + MinCount *float64 `json:"minCount,omitempty" tf:"min_count,omitempty"` + + // The name which should be used for the default Kubernetes Node Pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The initial number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000 and between min_count and max_count. + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A node_network_profile block as documented below. + NodeNetworkProfile *NodeNetworkProfileInitParameters `json:"nodeNetworkProfile,omitempty" tf:"node_network_profile,omitempty"` + + // Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. enable_node_public_ip should be true. Changing this forces a new resource to be created. + NodePublicIPPrefixID *string `json:"nodePublicIpPrefixId,omitempty" tf:"node_public_ip_prefix_id,omitempty"` + + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Enabling this option will taint default node pool with CriticalAddonsOnly=true:NoSchedule taint. temporary_name_for_rotation must be specified when changing this property. + OnlyCriticalAddonsEnabled *bool `json:"onlyCriticalAddonsEnabled,omitempty" tf:"only_critical_addons_enabled,omitempty"` + + // Version of Kubernetes used for the Agents. If not specified, the default node pool will be created with the version specified by kubernetes_version. If both are unspecified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + OrchestratorVersion *string `json:"orchestratorVersion,omitempty" tf:"orchestrator_version,omitempty"` + + // The size of the OS Disk which should be used for each agent in the Node Pool. temporary_name_for_rotation must be specified when attempting a change. + OsDiskSizeGb *float64 `json:"osDiskSizeGb,omitempty" tf:"os_disk_size_gb,omitempty"` + + // The type of disk which should be used for the Operating System. Possible values are Ephemeral and Managed. Defaults to Managed. temporary_name_for_rotation must be specified when attempting a change. + OsDiskType *string `json:"osDiskType,omitempty" tf:"os_disk_type,omitempty"` + + // Specifies the OS SKU used by the agent pool. Possible values are AzureLinux, Ubuntu, Windows2019 and Windows2022. If not specified, the default is Ubuntu if OSType=Linux or Windows2019 if OSType=Windows. And the default Windows OSSKU will be changed to Windows2022 after Windows2019 is deprecated. temporary_name_for_rotation must be specified when attempting a change. + OsSku *string `json:"osSku,omitempty" tf:"os_sku,omitempty"` + + // The ID of the Subnet where the pods in the default Node Pool should exist. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + PodSubnetID *string `json:"podSubnetId,omitempty" tf:"pod_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate podSubnetId. + // +kubebuilder:validation:Optional + PodSubnetIDRef *v1.Reference `json:"podSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate podSubnetId. + // +kubebuilder:validation:Optional + PodSubnetIDSelector *v1.Selector `json:"podSubnetIdSelector,omitempty" tf:"-"` + + // The ID of the Proximity Placement Group. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies the autoscaling behaviour of the Kubernetes Cluster. Allowed values are Delete and Deallocate. Defaults to Delete. + ScaleDownMode *string `json:"scaleDownMode,omitempty" tf:"scale_down_mode,omitempty"` + + // The ID of the Snapshot which should be used to create this default Node Pool. temporary_name_for_rotation must be specified when changing this property. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // A mapping of tags to assign to the Node Pool. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. + TemporaryNameForRotation *string `json:"temporaryNameForRotation,omitempty" tf:"temporary_name_for_rotation,omitempty"` + + // The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. See the documentation for more information. temporary_name_for_rotation must be specified when attempting a change. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` + + // A upgrade_settings block as documented below. + UpgradeSettings *UpgradeSettingsInitParameters `json:"upgradeSettings,omitempty" tf:"upgrade_settings,omitempty"` + + // The size of the Virtual Machine, such as Standard_DS2_v2. temporary_name_for_rotation must be specified when attempting a resize. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of a Subnet where the Kubernetes Node Pool should exist. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VnetSubnetID *string `json:"vnetSubnetId,omitempty" tf:"vnet_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate vnetSubnetId. + // +kubebuilder:validation:Optional + VnetSubnetIDRef *v1.Reference `json:"vnetSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate vnetSubnetId. + // +kubebuilder:validation:Optional + VnetSubnetIDSelector *v1.Selector `json:"vnetSubnetIdSelector,omitempty" tf:"-"` + + // Specifies the workload runtime used by the node pool. Possible values are OCIContainer and KataMshvVmIsolation. + WorkloadRuntime *string `json:"workloadRuntime,omitempty" tf:"workload_runtime,omitempty"` + + // Specifies a list of Availability Zones in which this Kubernetes Cluster should be located. temporary_name_for_rotation must be specified when changing this property. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type DefaultNodePoolObservation struct { + + // Specifies the ID of the Capacity Reservation Group within which this AKS Cluster should be created. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies whether to trust a Custom CA. + CustomCATrustEnabled *bool `json:"customCaTrustEnabled,omitempty" tf:"custom_ca_trust_enabled,omitempty"` + + // Should the Kubernetes Auto Scaler be enabled for this Node Pool? + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty" tf:"enable_auto_scaling,omitempty"` + + // Should the nodes in the Default Node Pool have host encryption enabled? temporary_name_for_rotation must be specified when changing this property. + EnableHostEncryption *bool `json:"enableHostEncryption,omitempty" tf:"enable_host_encryption,omitempty"` + + // Should nodes in this Node Pool have a Public IP Address? temporary_name_for_rotation must be specified when changing this property. + EnableNodePublicIP *bool `json:"enableNodePublicIp,omitempty" tf:"enable_node_public_ip,omitempty"` + + // Should the nodes in this Node Pool have Federal Information Processing Standard enabled? temporary_name_for_rotation must be specified when changing this block. Changing this forces a new resource to be created. + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g and MIG7g. Changing this forces a new resource to be created. + GpuInstance *string `json:"gpuInstance,omitempty" tf:"gpu_instance,omitempty"` + + // Specifies the ID of the Host Group within which this AKS Cluster should be created. Changing this forces a new resource to be created. + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // A kubelet_config block as defined below. temporary_name_for_rotation must be specified when changing this block. + KubeletConfig *KubeletConfigObservation `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + + // The type of disk used by kubelet. Possible values are OS and Temporary. + KubeletDiskType *string `json:"kubeletDiskType,omitempty" tf:"kubelet_disk_type,omitempty"` + + // A linux_os_config block as defined below. temporary_name_for_rotation must be specified when changing this block. + LinuxOsConfig *LinuxOsConfigObservation `json:"linuxOsConfig,omitempty" tf:"linux_os_config,omitempty"` + + // The maximum number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000. + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The maximum number of pods that can run on each agent. temporary_name_for_rotation must be specified when changing this property. + MaxPods *float64 `json:"maxPods,omitempty" tf:"max_pods,omitempty"` + + // A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + MessageOfTheDay *string `json:"messageOfTheDay,omitempty" tf:"message_of_the_day,omitempty"` + + // The minimum number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000. + MinCount *float64 `json:"minCount,omitempty" tf:"min_count,omitempty"` + + // The name which should be used for the default Kubernetes Node Pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The initial number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000 and between min_count and max_count. + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A node_network_profile block as documented below. + NodeNetworkProfile *NodeNetworkProfileObservation `json:"nodeNetworkProfile,omitempty" tf:"node_network_profile,omitempty"` + + // Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. enable_node_public_ip should be true. Changing this forces a new resource to be created. + NodePublicIPPrefixID *string `json:"nodePublicIpPrefixId,omitempty" tf:"node_public_ip_prefix_id,omitempty"` + + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Enabling this option will taint default node pool with CriticalAddonsOnly=true:NoSchedule taint. temporary_name_for_rotation must be specified when changing this property. + OnlyCriticalAddonsEnabled *bool `json:"onlyCriticalAddonsEnabled,omitempty" tf:"only_critical_addons_enabled,omitempty"` + + // Version of Kubernetes used for the Agents. If not specified, the default node pool will be created with the version specified by kubernetes_version. If both are unspecified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + OrchestratorVersion *string `json:"orchestratorVersion,omitempty" tf:"orchestrator_version,omitempty"` + + // The size of the OS Disk which should be used for each agent in the Node Pool. temporary_name_for_rotation must be specified when attempting a change. + OsDiskSizeGb *float64 `json:"osDiskSizeGb,omitempty" tf:"os_disk_size_gb,omitempty"` + + // The type of disk which should be used for the Operating System. Possible values are Ephemeral and Managed. Defaults to Managed. temporary_name_for_rotation must be specified when attempting a change. + OsDiskType *string `json:"osDiskType,omitempty" tf:"os_disk_type,omitempty"` + + // Specifies the OS SKU used by the agent pool. Possible values are AzureLinux, Ubuntu, Windows2019 and Windows2022. If not specified, the default is Ubuntu if OSType=Linux or Windows2019 if OSType=Windows. And the default Windows OSSKU will be changed to Windows2022 after Windows2019 is deprecated. temporary_name_for_rotation must be specified when attempting a change. + OsSku *string `json:"osSku,omitempty" tf:"os_sku,omitempty"` + + // The ID of the Subnet where the pods in the default Node Pool should exist. + PodSubnetID *string `json:"podSubnetId,omitempty" tf:"pod_subnet_id,omitempty"` + + // The ID of the Proximity Placement Group. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies the autoscaling behaviour of the Kubernetes Cluster. Allowed values are Delete and Deallocate. Defaults to Delete. + ScaleDownMode *string `json:"scaleDownMode,omitempty" tf:"scale_down_mode,omitempty"` + + // The ID of the Snapshot which should be used to create this default Node Pool. temporary_name_for_rotation must be specified when changing this property. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // A mapping of tags to assign to the Node Pool. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. + TemporaryNameForRotation *string `json:"temporaryNameForRotation,omitempty" tf:"temporary_name_for_rotation,omitempty"` + + // The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. See the documentation for more information. temporary_name_for_rotation must be specified when attempting a change. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` + + // A upgrade_settings block as documented below. + UpgradeSettings *UpgradeSettingsObservation `json:"upgradeSettings,omitempty" tf:"upgrade_settings,omitempty"` + + // The size of the Virtual Machine, such as Standard_DS2_v2. temporary_name_for_rotation must be specified when attempting a resize. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of a Subnet where the Kubernetes Node Pool should exist. + VnetSubnetID *string `json:"vnetSubnetId,omitempty" tf:"vnet_subnet_id,omitempty"` + + // Specifies the workload runtime used by the node pool. Possible values are OCIContainer and KataMshvVmIsolation. + WorkloadRuntime *string `json:"workloadRuntime,omitempty" tf:"workload_runtime,omitempty"` + + // Specifies a list of Availability Zones in which this Kubernetes Cluster should be located. temporary_name_for_rotation must be specified when changing this property. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type DefaultNodePoolParameters struct { + + // Specifies the ID of the Capacity Reservation Group within which this AKS Cluster should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies whether to trust a Custom CA. + // +kubebuilder:validation:Optional + CustomCATrustEnabled *bool `json:"customCaTrustEnabled,omitempty" tf:"custom_ca_trust_enabled,omitempty"` + + // Should the Kubernetes Auto Scaler be enabled for this Node Pool? + // +kubebuilder:validation:Optional + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty" tf:"enable_auto_scaling,omitempty"` + + // Should the nodes in the Default Node Pool have host encryption enabled? temporary_name_for_rotation must be specified when changing this property. + // +kubebuilder:validation:Optional + EnableHostEncryption *bool `json:"enableHostEncryption,omitempty" tf:"enable_host_encryption,omitempty"` + + // Should nodes in this Node Pool have a Public IP Address? temporary_name_for_rotation must be specified when changing this property. + // +kubebuilder:validation:Optional + EnableNodePublicIP *bool `json:"enableNodePublicIp,omitempty" tf:"enable_node_public_ip,omitempty"` + + // Should the nodes in this Node Pool have Federal Information Processing Standard enabled? temporary_name_for_rotation must be specified when changing this block. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g and MIG7g. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + GpuInstance *string `json:"gpuInstance,omitempty" tf:"gpu_instance,omitempty"` + + // Specifies the ID of the Host Group within which this AKS Cluster should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // A kubelet_config block as defined below. temporary_name_for_rotation must be specified when changing this block. + // +kubebuilder:validation:Optional + KubeletConfig *KubeletConfigParameters `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + + // The type of disk used by kubelet. Possible values are OS and Temporary. + // +kubebuilder:validation:Optional + KubeletDiskType *string `json:"kubeletDiskType,omitempty" tf:"kubelet_disk_type,omitempty"` + + // A linux_os_config block as defined below. temporary_name_for_rotation must be specified when changing this block. + // +kubebuilder:validation:Optional + LinuxOsConfig *LinuxOsConfigParameters `json:"linuxOsConfig,omitempty" tf:"linux_os_config,omitempty"` + + // The maximum number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000. + // +kubebuilder:validation:Optional + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The maximum number of pods that can run on each agent. temporary_name_for_rotation must be specified when changing this property. + // +kubebuilder:validation:Optional + MaxPods *float64 `json:"maxPods,omitempty" tf:"max_pods,omitempty"` + + // A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MessageOfTheDay *string `json:"messageOfTheDay,omitempty" tf:"message_of_the_day,omitempty"` + + // The minimum number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000. + // +kubebuilder:validation:Optional + MinCount *float64 `json:"minCount,omitempty" tf:"min_count,omitempty"` + + // The name which should be used for the default Kubernetes Node Pool. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The initial number of nodes which should exist in this Node Pool. If specified this must be between 1 and 1000 and between min_count and max_count. + // +kubebuilder:validation:Optional + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. + // +kubebuilder:validation:Optional + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A node_network_profile block as documented below. + // +kubebuilder:validation:Optional + NodeNetworkProfile *NodeNetworkProfileParameters `json:"nodeNetworkProfile,omitempty" tf:"node_network_profile,omitempty"` + + // Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. enable_node_public_ip should be true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NodePublicIPPrefixID *string `json:"nodePublicIpPrefixId,omitempty" tf:"node_public_ip_prefix_id,omitempty"` + + // +kubebuilder:validation:Optional + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Enabling this option will taint default node pool with CriticalAddonsOnly=true:NoSchedule taint. temporary_name_for_rotation must be specified when changing this property. + // +kubebuilder:validation:Optional + OnlyCriticalAddonsEnabled *bool `json:"onlyCriticalAddonsEnabled,omitempty" tf:"only_critical_addons_enabled,omitempty"` + + // Version of Kubernetes used for the Agents. If not specified, the default node pool will be created with the version specified by kubernetes_version. If both are unspecified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + // +kubebuilder:validation:Optional + OrchestratorVersion *string `json:"orchestratorVersion,omitempty" tf:"orchestrator_version,omitempty"` + + // The size of the OS Disk which should be used for each agent in the Node Pool. temporary_name_for_rotation must be specified when attempting a change. + // +kubebuilder:validation:Optional + OsDiskSizeGb *float64 `json:"osDiskSizeGb,omitempty" tf:"os_disk_size_gb,omitempty"` + + // The type of disk which should be used for the Operating System. Possible values are Ephemeral and Managed. Defaults to Managed. temporary_name_for_rotation must be specified when attempting a change. + // +kubebuilder:validation:Optional + OsDiskType *string `json:"osDiskType,omitempty" tf:"os_disk_type,omitempty"` + + // Specifies the OS SKU used by the agent pool. Possible values are AzureLinux, Ubuntu, Windows2019 and Windows2022. If not specified, the default is Ubuntu if OSType=Linux or Windows2019 if OSType=Windows. And the default Windows OSSKU will be changed to Windows2022 after Windows2019 is deprecated. temporary_name_for_rotation must be specified when attempting a change. + // +kubebuilder:validation:Optional + OsSku *string `json:"osSku,omitempty" tf:"os_sku,omitempty"` + + // The ID of the Subnet where the pods in the default Node Pool should exist. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + PodSubnetID *string `json:"podSubnetId,omitempty" tf:"pod_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate podSubnetId. + // +kubebuilder:validation:Optional + PodSubnetIDRef *v1.Reference `json:"podSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate podSubnetId. + // +kubebuilder:validation:Optional + PodSubnetIDSelector *v1.Selector `json:"podSubnetIdSelector,omitempty" tf:"-"` + + // The ID of the Proximity Placement Group. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies the autoscaling behaviour of the Kubernetes Cluster. Allowed values are Delete and Deallocate. Defaults to Delete. + // +kubebuilder:validation:Optional + ScaleDownMode *string `json:"scaleDownMode,omitempty" tf:"scale_down_mode,omitempty"` + + // The ID of the Snapshot which should be used to create this default Node Pool. temporary_name_for_rotation must be specified when changing this property. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // A mapping of tags to assign to the Node Pool. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. + // +kubebuilder:validation:Optional + TemporaryNameForRotation *string `json:"temporaryNameForRotation,omitempty" tf:"temporary_name_for_rotation,omitempty"` + + // The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. See the documentation for more information. temporary_name_for_rotation must be specified when attempting a change. + // +kubebuilder:validation:Optional + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` + + // A upgrade_settings block as documented below. + // +kubebuilder:validation:Optional + UpgradeSettings *UpgradeSettingsParameters `json:"upgradeSettings,omitempty" tf:"upgrade_settings,omitempty"` + + // The size of the Virtual Machine, such as Standard_DS2_v2. temporary_name_for_rotation must be specified when attempting a resize. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of a Subnet where the Kubernetes Node Pool should exist. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VnetSubnetID *string `json:"vnetSubnetId,omitempty" tf:"vnet_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate vnetSubnetId. + // +kubebuilder:validation:Optional + VnetSubnetIDRef *v1.Reference `json:"vnetSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate vnetSubnetId. + // +kubebuilder:validation:Optional + VnetSubnetIDSelector *v1.Selector `json:"vnetSubnetIdSelector,omitempty" tf:"-"` + + // Specifies the workload runtime used by the node pool. Possible values are OCIContainer and KataMshvVmIsolation. + // +kubebuilder:validation:Optional + WorkloadRuntime *string `json:"workloadRuntime,omitempty" tf:"workload_runtime,omitempty"` + + // Specifies a list of Availability Zones in which this Kubernetes Cluster should be located. temporary_name_for_rotation must be specified when changing this property. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type GmsaInitParameters struct { + + // Specifies the DNS server for Windows gMSA. Set this to an empty string if you have configured the DNS server in the VNet which was used to create the managed cluster. + DNSServer *string `json:"dnsServer,omitempty" tf:"dns_server,omitempty"` + + // Specifies the root domain name for Windows gMSA. Set this to an empty string if you have configured the DNS server in the VNet which was used to create the managed cluster. + RootDomain *string `json:"rootDomain,omitempty" tf:"root_domain,omitempty"` +} + +type GmsaObservation struct { + + // Specifies the DNS server for Windows gMSA. Set this to an empty string if you have configured the DNS server in the VNet which was used to create the managed cluster. + DNSServer *string `json:"dnsServer,omitempty" tf:"dns_server,omitempty"` + + // Specifies the root domain name for Windows gMSA. Set this to an empty string if you have configured the DNS server in the VNet which was used to create the managed cluster. + RootDomain *string `json:"rootDomain,omitempty" tf:"root_domain,omitempty"` +} + +type GmsaParameters struct { + + // Specifies the DNS server for Windows gMSA. Set this to an empty string if you have configured the DNS server in the VNet which was used to create the managed cluster. + // +kubebuilder:validation:Optional + DNSServer *string `json:"dnsServer" tf:"dns_server,omitempty"` + + // Specifies the root domain name for Windows gMSA. Set this to an empty string if you have configured the DNS server in the VNet which was used to create the managed cluster. + // +kubebuilder:validation:Optional + RootDomain *string `json:"rootDomain" tf:"root_domain,omitempty"` +} + +type HTTPProxyConfigInitParameters struct { + + // The proxy address to be used when communicating over HTTP. + HTTPProxy *string `json:"httpProxy,omitempty" tf:"http_proxy,omitempty"` + + // The proxy address to be used when communicating over HTTPS. + HTTPSProxy *string `json:"httpsProxy,omitempty" tf:"https_proxy,omitempty"` + + // The list of domains that will not use the proxy for communication. + // +listType=set + NoProxy []*string `json:"noProxy,omitempty" tf:"no_proxy,omitempty"` +} + +type HTTPProxyConfigObservation struct { + + // The proxy address to be used when communicating over HTTP. + HTTPProxy *string `json:"httpProxy,omitempty" tf:"http_proxy,omitempty"` + + // The proxy address to be used when communicating over HTTPS. + HTTPSProxy *string `json:"httpsProxy,omitempty" tf:"https_proxy,omitempty"` + + // The list of domains that will not use the proxy for communication. + // +listType=set + NoProxy []*string `json:"noProxy,omitempty" tf:"no_proxy,omitempty"` +} + +type HTTPProxyConfigParameters struct { + + // The proxy address to be used when communicating over HTTP. + // +kubebuilder:validation:Optional + HTTPProxy *string `json:"httpProxy,omitempty" tf:"http_proxy,omitempty"` + + // The proxy address to be used when communicating over HTTPS. + // +kubebuilder:validation:Optional + HTTPSProxy *string `json:"httpsProxy,omitempty" tf:"https_proxy,omitempty"` + + // The list of domains that will not use the proxy for communication. + // +kubebuilder:validation:Optional + // +listType=set + NoProxy []*string `json:"noProxy,omitempty" tf:"no_proxy,omitempty"` + + // The base64 encoded alternative CA certificate content in PEM format. + // +kubebuilder:validation:Optional + TrustedCASecretRef *v1.SecretKeySelector `json:"trustedCaSecretRef,omitempty" tf:"-"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Kubernetes Cluster. Possible values are SystemAssigned or UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Kubernetes Cluster. Possible values are SystemAssigned or UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Kubernetes Cluster. Possible values are SystemAssigned or UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type IngressApplicationGatewayIdentityInitParameters struct { +} + +type IngressApplicationGatewayIdentityObservation struct { + + // The Client ID of the user-defined Managed Identity used for Web App Routing. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The Object ID of the user-defined Managed Identity used for Web App Routing + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The ID of the User Assigned Identity used for Web App Routing. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type IngressApplicationGatewayIdentityParameters struct { +} + +type IngressApplicationGatewayInitParameters struct { + + // The ID of the Application Gateway to integrate with the ingress controller of this Kubernetes Cluster. See this page for further details. + GatewayID *string `json:"gatewayId,omitempty" tf:"gateway_id,omitempty"` + + // The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + GatewayName *string `json:"gatewayName,omitempty" tf:"gateway_name,omitempty"` + + // The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + SubnetCidr *string `json:"subnetCidr,omitempty" tf:"subnet_cidr,omitempty"` + + // The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type IngressApplicationGatewayObservation struct { + + // The ID of the Application Gateway associated with the ingress controller deployed to this Kubernetes Cluster. + EffectiveGatewayID *string `json:"effectiveGatewayId,omitempty" tf:"effective_gateway_id,omitempty"` + + // The ID of the Application Gateway to integrate with the ingress controller of this Kubernetes Cluster. See this page for further details. + GatewayID *string `json:"gatewayId,omitempty" tf:"gateway_id,omitempty"` + + // The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + GatewayName *string `json:"gatewayName,omitempty" tf:"gateway_name,omitempty"` + + // An ingress_application_gateway_identity block is exported. The exported attributes are defined below. + IngressApplicationGatewayIdentity []IngressApplicationGatewayIdentityObservation `json:"ingressApplicationGatewayIdentity,omitempty" tf:"ingress_application_gateway_identity,omitempty"` + + // The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + SubnetCidr *string `json:"subnetCidr,omitempty" tf:"subnet_cidr,omitempty"` + + // The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type IngressApplicationGatewayParameters struct { + + // The ID of the Application Gateway to integrate with the ingress controller of this Kubernetes Cluster. See this page for further details. + // +kubebuilder:validation:Optional + GatewayID *string `json:"gatewayId,omitempty" tf:"gateway_id,omitempty"` + + // The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + // +kubebuilder:validation:Optional + GatewayName *string `json:"gatewayName,omitempty" tf:"gateway_name,omitempty"` + + // The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + // +kubebuilder:validation:Optional + SubnetCidr *string `json:"subnetCidr,omitempty" tf:"subnet_cidr,omitempty"` + + // The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. See this page for further details. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type KeyManagementServiceInitParameters struct { + + // Identifier of Azure Key Vault key. See key identifier format for more details. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Network access of the key vault Network access of key vault. The possible values are Public and Private. Public means the key vault allows public access from all networks. Private means the key vault disables public access and enables private link. Defaults to Public. + KeyVaultNetworkAccess *string `json:"keyVaultNetworkAccess,omitempty" tf:"key_vault_network_access,omitempty"` +} + +type KeyManagementServiceObservation struct { + + // Identifier of Azure Key Vault key. See key identifier format for more details. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Network access of the key vault Network access of key vault. The possible values are Public and Private. Public means the key vault allows public access from all networks. Private means the key vault disables public access and enables private link. Defaults to Public. + KeyVaultNetworkAccess *string `json:"keyVaultNetworkAccess,omitempty" tf:"key_vault_network_access,omitempty"` +} + +type KeyManagementServiceParameters struct { + + // Identifier of Azure Key Vault key. See key identifier format for more details. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId" tf:"key_vault_key_id,omitempty"` + + // Network access of the key vault Network access of key vault. The possible values are Public and Private. Public means the key vault allows public access from all networks. Private means the key vault disables public access and enables private link. Defaults to Public. + // +kubebuilder:validation:Optional + KeyVaultNetworkAccess *string `json:"keyVaultNetworkAccess,omitempty" tf:"key_vault_network_access,omitempty"` +} + +type KeyVaultSecretsProviderInitParameters struct { + + // Should the secret store CSI driver on the AKS cluster be enabled? + SecretRotationEnabled *bool `json:"secretRotationEnabled,omitempty" tf:"secret_rotation_enabled,omitempty"` + + // The interval to poll for secret rotation. This attribute is only set when secret_rotation is true. Defaults to 2m. + SecretRotationInterval *string `json:"secretRotationInterval,omitempty" tf:"secret_rotation_interval,omitempty"` +} + +type KeyVaultSecretsProviderObservation struct { + + // An secret_identity block is exported. The exported attributes are defined below. + SecretIdentity []SecretIdentityObservation `json:"secretIdentity,omitempty" tf:"secret_identity,omitempty"` + + // Should the secret store CSI driver on the AKS cluster be enabled? + SecretRotationEnabled *bool `json:"secretRotationEnabled,omitempty" tf:"secret_rotation_enabled,omitempty"` + + // The interval to poll for secret rotation. This attribute is only set when secret_rotation is true. Defaults to 2m. + SecretRotationInterval *string `json:"secretRotationInterval,omitempty" tf:"secret_rotation_interval,omitempty"` +} + +type KeyVaultSecretsProviderParameters struct { + + // Should the secret store CSI driver on the AKS cluster be enabled? + // +kubebuilder:validation:Optional + SecretRotationEnabled *bool `json:"secretRotationEnabled,omitempty" tf:"secret_rotation_enabled,omitempty"` + + // The interval to poll for secret rotation. This attribute is only set when secret_rotation is true. Defaults to 2m. + // +kubebuilder:validation:Optional + SecretRotationInterval *string `json:"secretRotationInterval,omitempty" tf:"secret_rotation_interval,omitempty"` +} + +type KubeAdminConfigInitParameters struct { +} + +type KubeAdminConfigObservation struct { + + // The Kubernetes cluster server host. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // A username used to authenticate to the Kubernetes cluster. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KubeAdminConfigParameters struct { +} + +type KubeConfigInitParameters struct { +} + +type KubeConfigObservation struct { + + // The Kubernetes cluster server host. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // A username used to authenticate to the Kubernetes cluster. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KubeConfigParameters struct { +} + +type KubeletConfigInitParameters struct { + + // Specifies the allow list of unsafe sysctls command or patterns (ending in *). + // +listType=set + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // Is CPU CFS quota enforcement for containers enabled? + CPUCfsQuotaEnabled *bool `json:"cpuCfsQuotaEnabled,omitempty" tf:"cpu_cfs_quota_enabled,omitempty"` + + // Specifies the CPU CFS quota period value. + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // Specifies the CPU Manager policy to use. Possible values are none and static,. + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Specifies the maximum number of container log files that can be present for a container. must be at least 2. + ContainerLogMaxLine *float64 `json:"containerLogMaxLine,omitempty" tf:"container_log_max_line,omitempty"` + + // Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. + ContainerLogMaxSizeMb *float64 `json:"containerLogMaxSizeMb,omitempty" tf:"container_log_max_size_mb,omitempty"` + + // Specifies the percent of disk usage above which image garbage collection is always run. Must be between 0 and 100. + ImageGcHighThreshold *float64 `json:"imageGcHighThreshold,omitempty" tf:"image_gc_high_threshold,omitempty"` + + // Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between 0 and 100. + ImageGcLowThreshold *float64 `json:"imageGcLowThreshold,omitempty" tf:"image_gc_low_threshold,omitempty"` + + // Specifies the maximum number of processes per pod. + PodMaxPid *float64 `json:"podMaxPid,omitempty" tf:"pod_max_pid,omitempty"` + + // Specifies the Topology Manager policy to use. Possible values are none, best-effort, restricted or single-numa-node. + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty" tf:"topology_manager_policy,omitempty"` +} + +type KubeletConfigObservation struct { + + // Specifies the allow list of unsafe sysctls command or patterns (ending in *). + // +listType=set + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // Is CPU CFS quota enforcement for containers enabled? + CPUCfsQuotaEnabled *bool `json:"cpuCfsQuotaEnabled,omitempty" tf:"cpu_cfs_quota_enabled,omitempty"` + + // Specifies the CPU CFS quota period value. + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // Specifies the CPU Manager policy to use. Possible values are none and static,. + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Specifies the maximum number of container log files that can be present for a container. must be at least 2. + ContainerLogMaxLine *float64 `json:"containerLogMaxLine,omitempty" tf:"container_log_max_line,omitempty"` + + // Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. + ContainerLogMaxSizeMb *float64 `json:"containerLogMaxSizeMb,omitempty" tf:"container_log_max_size_mb,omitempty"` + + // Specifies the percent of disk usage above which image garbage collection is always run. Must be between 0 and 100. + ImageGcHighThreshold *float64 `json:"imageGcHighThreshold,omitempty" tf:"image_gc_high_threshold,omitempty"` + + // Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between 0 and 100. + ImageGcLowThreshold *float64 `json:"imageGcLowThreshold,omitempty" tf:"image_gc_low_threshold,omitempty"` + + // Specifies the maximum number of processes per pod. + PodMaxPid *float64 `json:"podMaxPid,omitempty" tf:"pod_max_pid,omitempty"` + + // Specifies the Topology Manager policy to use. Possible values are none, best-effort, restricted or single-numa-node. + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty" tf:"topology_manager_policy,omitempty"` +} + +type KubeletConfigParameters struct { + + // Specifies the allow list of unsafe sysctls command or patterns (ending in *). + // +kubebuilder:validation:Optional + // +listType=set + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // Is CPU CFS quota enforcement for containers enabled? + // +kubebuilder:validation:Optional + CPUCfsQuotaEnabled *bool `json:"cpuCfsQuotaEnabled,omitempty" tf:"cpu_cfs_quota_enabled,omitempty"` + + // Specifies the CPU CFS quota period value. + // +kubebuilder:validation:Optional + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // Specifies the CPU Manager policy to use. Possible values are none and static,. + // +kubebuilder:validation:Optional + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Specifies the maximum number of container log files that can be present for a container. must be at least 2. + // +kubebuilder:validation:Optional + ContainerLogMaxLine *float64 `json:"containerLogMaxLine,omitempty" tf:"container_log_max_line,omitempty"` + + // Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. + // +kubebuilder:validation:Optional + ContainerLogMaxSizeMb *float64 `json:"containerLogMaxSizeMb,omitempty" tf:"container_log_max_size_mb,omitempty"` + + // Specifies the percent of disk usage above which image garbage collection is always run. Must be between 0 and 100. + // +kubebuilder:validation:Optional + ImageGcHighThreshold *float64 `json:"imageGcHighThreshold,omitempty" tf:"image_gc_high_threshold,omitempty"` + + // Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between 0 and 100. + // +kubebuilder:validation:Optional + ImageGcLowThreshold *float64 `json:"imageGcLowThreshold,omitempty" tf:"image_gc_low_threshold,omitempty"` + + // Specifies the maximum number of processes per pod. + // +kubebuilder:validation:Optional + PodMaxPid *float64 `json:"podMaxPid,omitempty" tf:"pod_max_pid,omitempty"` + + // Specifies the Topology Manager policy to use. Possible values are none, best-effort, restricted or single-numa-node. + // +kubebuilder:validation:Optional + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty" tf:"topology_manager_policy,omitempty"` +} + +type KubeletIdentityInitParameters struct { + + // The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type KubeletIdentityObservation struct { + + // The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type KubeletIdentityParameters struct { + + // The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type KubernetesClusterInitParameters struct { + + // An api_server_access_profile block as defined below. + APIServerAccessProfile *APIServerAccessProfileInitParameters `json:"apiServerAccessProfile,omitempty" tf:"api_server_access_profile,omitempty"` + + // Deprecated in favor of `spec.forProvider.apiServerAccessProfile[0].authorizedIpRanges` + // +listType=set + APIServerAuthorizedIPRanges []*string `json:"apiServerAuthorizedIpRanges,omitempty" tf:"api_server_authorized_ip_ranges,omitempty"` + + // A aci_connector_linux block as defined below. For more details, please visit Create and configure an AKS cluster to use virtual nodes. + AciConnectorLinux *AciConnectorLinuxInitParameters `json:"aciConnectorLinux,omitempty" tf:"aci_connector_linux,omitempty"` + + // A auto_scaler_profile block as defined below. + AutoScalerProfile *AutoScalerProfileInitParameters `json:"autoScalerProfile,omitempty" tf:"auto_scaler_profile,omitempty"` + + // The upgrade channel for this Kubernetes Cluster. Possible values are patch, rapid, node-image and stable. Omitting this field sets this value to none. + AutomaticChannelUpgrade *string `json:"automaticChannelUpgrade,omitempty" tf:"automatic_channel_upgrade,omitempty"` + + // A azure_active_directory_role_based_access_control block as defined below. + AzureActiveDirectoryRoleBasedAccessControl *AzureActiveDirectoryRoleBasedAccessControlInitParameters `json:"azureActiveDirectoryRoleBasedAccessControl,omitempty" tf:"azure_active_directory_role_based_access_control,omitempty"` + + // Should the Azure Policy Add-On be enabled? For more details please visit Understand Azure Policy for Azure Kubernetes Service + AzurePolicyEnabled *bool `json:"azurePolicyEnabled,omitempty" tf:"azure_policy_enabled,omitempty"` + + // A confidential_computing block as defined below. For more details please the documentation + ConfidentialComputing *ConfidentialComputingInitParameters `json:"confidentialComputing,omitempty" tf:"confidential_computing,omitempty"` + + // A list of up to 10 base64 encoded CAs that will be added to the trust store on nodes with the custom_ca_trust_enabled feature enabled. + CustomCATrustCertificatesBase64 []*string `json:"customCaTrustCertificatesBase64,omitempty" tf:"custom_ca_trust_certificates_base64,omitempty"` + + // DNS prefix specified when creating the managed cluster. Possible values must begin and end with a letter or number, contain only letters, numbers, and hyphens and be between 1 and 54 characters in length. Changing this forces a new resource to be created. + DNSPrefix *string `json:"dnsPrefix,omitempty" tf:"dns_prefix,omitempty"` + + // Specifies the DNS prefix to use with private clusters. Changing this forces a new resource to be created. + DNSPrefixPrivateCluster *string `json:"dnsPrefixPrivateCluster,omitempty" tf:"dns_prefix_private_cluster,omitempty"` + + // A default_node_pool block as defined below. + DefaultNodePool *DefaultNodePoolInitParameters `json:"defaultNodePool,omitempty" tf:"default_node_pool,omitempty"` + + // The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information can be found in the documentation. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + EnablePodSecurityPolicy *bool `json:"enablePodSecurityPolicy,omitempty" tf:"enable_pod_security_policy,omitempty"` + + // Should HTTP Application Routing be enabled? + HTTPApplicationRoutingEnabled *bool `json:"httpApplicationRoutingEnabled,omitempty" tf:"http_application_routing_enabled,omitempty"` + + // A http_proxy_config block as defined below. + HTTPProxyConfig *HTTPProxyConfigInitParameters `json:"httpProxyConfig,omitempty" tf:"http_proxy_config,omitempty"` + + // An identity block as defined below. One of either identity or service_principal must be specified. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies whether Image Cleaner is enabled. + ImageCleanerEnabled *bool `json:"imageCleanerEnabled,omitempty" tf:"image_cleaner_enabled,omitempty"` + + // Specifies the interval in hours when images should be cleaned up. Defaults to 48. + ImageCleanerIntervalHours *float64 `json:"imageCleanerIntervalHours,omitempty" tf:"image_cleaner_interval_hours,omitempty"` + + // An ingress_application_gateway block as defined below. + IngressApplicationGateway *IngressApplicationGatewayInitParameters `json:"ingressApplicationGateway,omitempty" tf:"ingress_application_gateway,omitempty"` + + // A key_management_service block as defined below. For more details, please visit Key Management Service (KMS) etcd encryption to an AKS cluster. + KeyManagementService *KeyManagementServiceInitParameters `json:"keyManagementService,omitempty" tf:"key_management_service,omitempty"` + + // A key_vault_secrets_provider block as defined below. + KeyVaultSecretsProvider *KeyVaultSecretsProviderInitParameters `json:"keyVaultSecretsProvider,omitempty" tf:"key_vault_secrets_provider,omitempty"` + + // A kubelet_identity block as defined below. + KubeletIdentity *KubeletIdentityInitParameters `json:"kubeletIdentity,omitempty" tf:"kubelet_identity,omitempty"` + + // Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + KubernetesVersion *string `json:"kubernetesVersion,omitempty" tf:"kubernetes_version,omitempty"` + + // A linux_profile block as defined below. + LinuxProfile *LinuxProfileInitParameters `json:"linuxProfile,omitempty" tf:"linux_profile,omitempty"` + + // If true local accounts will be disabled. See the documentation for more information. + LocalAccountDisabled *bool `json:"localAccountDisabled,omitempty" tf:"local_account_disabled,omitempty"` + + // The location where the Managed Kubernetes Cluster should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + MaintenanceWindow *MaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // A maintenance_window_auto_upgrade block as defined below. + MaintenanceWindowAutoUpgrade *MaintenanceWindowAutoUpgradeInitParameters `json:"maintenanceWindowAutoUpgrade,omitempty" tf:"maintenance_window_auto_upgrade,omitempty"` + + // A maintenance_window_node_os block as defined below. + MaintenanceWindowNodeOs *MaintenanceWindowNodeOsInitParameters `json:"maintenanceWindowNodeOs,omitempty" tf:"maintenance_window_node_os,omitempty"` + + // A microsoft_defender block as defined below. + MicrosoftDefender *MicrosoftDefenderInitParameters `json:"microsoftDefender,omitempty" tf:"microsoft_defender,omitempty"` + + // Specifies a Prometheus add-on profile for the Kubernetes Cluster. A monitor_metrics block as defined below. + MonitorMetrics *MonitorMetricsInitParameters `json:"monitorMetrics,omitempty" tf:"monitor_metrics,omitempty"` + + // A network_profile block as defined below. + NetworkProfile *NetworkProfileInitParameters `json:"networkProfile,omitempty" tf:"network_profile,omitempty"` + + // The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are Unmanaged, SecurityPatch, NodeImage and None. + NodeOsChannelUpgrade *string `json:"nodeOsChannelUpgrade,omitempty" tf:"node_os_channel_upgrade,omitempty"` + + // The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. + NodeResourceGroup *string `json:"nodeResourceGroup,omitempty" tf:"node_resource_group,omitempty"` + + // Enable or Disable the OIDC issuer URL + OidcIssuerEnabled *bool `json:"oidcIssuerEnabled,omitempty" tf:"oidc_issuer_enabled,omitempty"` + + // An oms_agent block as defined below. + OmsAgent *OmsAgentInitParameters `json:"omsAgent,omitempty" tf:"oms_agent,omitempty"` + + // Is Open Service Mesh enabled? For more details, please visit Open Service Mesh for AKS. + OpenServiceMeshEnabled *bool `json:"openServiceMeshEnabled,omitempty" tf:"open_service_mesh_enabled,omitempty"` + + // Should this Kubernetes Cluster have its API server only exposed on internal IP addresses? This provides a Private IP Address for the Kubernetes API on the Virtual Network where the Kubernetes Cluster is located. Defaults to false. Changing this forces a new resource to be created. + PrivateClusterEnabled *bool `json:"privateClusterEnabled,omitempty" tf:"private_cluster_enabled,omitempty"` + + // Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to false. + PrivateClusterPublicFqdnEnabled *bool `json:"privateClusterPublicFqdnEnabled,omitempty" tf:"private_cluster_public_fqdn_enabled,omitempty"` + + // Either the ID of Private DNS Zone which should be delegated to this Cluster, System to have AKS manage this or None. In case of None you will need to bring your own DNS server and set up resolving, otherwise, the cluster will have issues after provisioning. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // Reference to a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDRef *v1.Reference `json:"privateDnsZoneIdRef,omitempty" tf:"-"` + + // Selector for a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDSelector *v1.Selector `json:"privateDnsZoneIdSelector,omitempty" tf:"-"` + + // Whether public network access is allowed for this Kubernetes Cluster. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Whether Role Based Access Control for the Kubernetes Cluster should be enabled. Defaults to true. Changing this forces a new resource to be created. + RoleBasedAccessControlEnabled *bool `json:"roleBasedAccessControlEnabled,omitempty" tf:"role_based_access_control_enabled,omitempty"` + + // Whether to enable run command for the cluster or not. Defaults to true. + RunCommandEnabled *bool `json:"runCommandEnabled,omitempty" tf:"run_command_enabled,omitempty"` + + // A service_mesh_profile block as defined below. + ServiceMeshProfile *ServiceMeshProfileInitParameters `json:"serviceMeshProfile,omitempty" tf:"service_mesh_profile,omitempty"` + + // A service_principal block as documented below. One of either identity or service_principal must be specified. + ServicePrincipal *ServicePrincipalInitParameters `json:"servicePrincipal,omitempty" tf:"service_principal,omitempty"` + + // The SKU Tier that should be used for this Kubernetes Cluster. Possible values are Free, Standard (which includes the Uptime SLA) and Premium. Defaults to Free. + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A storage_profile block as defined below. + StorageProfile *StorageProfileInitParameters `json:"storageProfile,omitempty" tf:"storage_profile,omitempty"` + + // Specifies the support plan which should be used for this Kubernetes Cluster. Possible values are KubernetesOfficial and AKSLongTermSupport. Defaults to KubernetesOfficial. + SupportPlan *string `json:"supportPlan,omitempty" tf:"support_plan,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A web_app_routing block as defined below. + WebAppRouting *WebAppRoutingInitParameters `json:"webAppRouting,omitempty" tf:"web_app_routing,omitempty"` + + // A windows_profile block as defined below. + WindowsProfile *WindowsProfileInitParameters `json:"windowsProfile,omitempty" tf:"windows_profile,omitempty"` + + // A workload_autoscaler_profile block defined below. + WorkloadAutoscalerProfile *WorkloadAutoscalerProfileInitParameters `json:"workloadAutoscalerProfile,omitempty" tf:"workload_autoscaler_profile,omitempty"` + + // Specifies whether Azure AD Workload Identity should be enabled for the Cluster. Defaults to false. + WorkloadIdentityEnabled *bool `json:"workloadIdentityEnabled,omitempty" tf:"workload_identity_enabled,omitempty"` +} + +type KubernetesClusterObservation struct { + + // An api_server_access_profile block as defined below. + APIServerAccessProfile *APIServerAccessProfileObservation `json:"apiServerAccessProfile,omitempty" tf:"api_server_access_profile,omitempty"` + + // Deprecated in favor of `spec.forProvider.apiServerAccessProfile[0].authorizedIpRanges` + // +listType=set + APIServerAuthorizedIPRanges []*string `json:"apiServerAuthorizedIpRanges,omitempty" tf:"api_server_authorized_ip_ranges,omitempty"` + + // A aci_connector_linux block as defined below. For more details, please visit Create and configure an AKS cluster to use virtual nodes. + AciConnectorLinux *AciConnectorLinuxObservation `json:"aciConnectorLinux,omitempty" tf:"aci_connector_linux,omitempty"` + + // A auto_scaler_profile block as defined below. + AutoScalerProfile *AutoScalerProfileObservation `json:"autoScalerProfile,omitempty" tf:"auto_scaler_profile,omitempty"` + + // The upgrade channel for this Kubernetes Cluster. Possible values are patch, rapid, node-image and stable. Omitting this field sets this value to none. + AutomaticChannelUpgrade *string `json:"automaticChannelUpgrade,omitempty" tf:"automatic_channel_upgrade,omitempty"` + + // A azure_active_directory_role_based_access_control block as defined below. + AzureActiveDirectoryRoleBasedAccessControl *AzureActiveDirectoryRoleBasedAccessControlObservation `json:"azureActiveDirectoryRoleBasedAccessControl,omitempty" tf:"azure_active_directory_role_based_access_control,omitempty"` + + // Should the Azure Policy Add-On be enabled? For more details please visit Understand Azure Policy for Azure Kubernetes Service + AzurePolicyEnabled *bool `json:"azurePolicyEnabled,omitempty" tf:"azure_policy_enabled,omitempty"` + + // A confidential_computing block as defined below. For more details please the documentation + ConfidentialComputing *ConfidentialComputingObservation `json:"confidentialComputing,omitempty" tf:"confidential_computing,omitempty"` + + // The current version running on the Azure Kubernetes Managed Cluster. + CurrentKubernetesVersion *string `json:"currentKubernetesVersion,omitempty" tf:"current_kubernetes_version,omitempty"` + + // A list of up to 10 base64 encoded CAs that will be added to the trust store on nodes with the custom_ca_trust_enabled feature enabled. + CustomCATrustCertificatesBase64 []*string `json:"customCaTrustCertificatesBase64,omitempty" tf:"custom_ca_trust_certificates_base64,omitempty"` + + // DNS prefix specified when creating the managed cluster. Possible values must begin and end with a letter or number, contain only letters, numbers, and hyphens and be between 1 and 54 characters in length. Changing this forces a new resource to be created. + DNSPrefix *string `json:"dnsPrefix,omitempty" tf:"dns_prefix,omitempty"` + + // Specifies the DNS prefix to use with private clusters. Changing this forces a new resource to be created. + DNSPrefixPrivateCluster *string `json:"dnsPrefixPrivateCluster,omitempty" tf:"dns_prefix_private_cluster,omitempty"` + + // A default_node_pool block as defined below. + DefaultNodePool *DefaultNodePoolObservation `json:"defaultNodePool,omitempty" tf:"default_node_pool,omitempty"` + + // The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information can be found in the documentation. Changing this forces a new resource to be created. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + EnablePodSecurityPolicy *bool `json:"enablePodSecurityPolicy,omitempty" tf:"enable_pod_security_policy,omitempty"` + + // The FQDN of the Azure Kubernetes Managed Cluster. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // Should HTTP Application Routing be enabled? + HTTPApplicationRoutingEnabled *bool `json:"httpApplicationRoutingEnabled,omitempty" tf:"http_application_routing_enabled,omitempty"` + + // The Zone Name of the HTTP Application Routing. + HTTPApplicationRoutingZoneName *string `json:"httpApplicationRoutingZoneName,omitempty" tf:"http_application_routing_zone_name,omitempty"` + + // A http_proxy_config block as defined below. + HTTPProxyConfig *HTTPProxyConfigObservation `json:"httpProxyConfig,omitempty" tf:"http_proxy_config,omitempty"` + + // The Kubernetes Managed Cluster ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. One of either identity or service_principal must be specified. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies whether Image Cleaner is enabled. + ImageCleanerEnabled *bool `json:"imageCleanerEnabled,omitempty" tf:"image_cleaner_enabled,omitempty"` + + // Specifies the interval in hours when images should be cleaned up. Defaults to 48. + ImageCleanerIntervalHours *float64 `json:"imageCleanerIntervalHours,omitempty" tf:"image_cleaner_interval_hours,omitempty"` + + // An ingress_application_gateway block as defined below. + IngressApplicationGateway *IngressApplicationGatewayObservation `json:"ingressApplicationGateway,omitempty" tf:"ingress_application_gateway,omitempty"` + + // A key_management_service block as defined below. For more details, please visit Key Management Service (KMS) etcd encryption to an AKS cluster. + KeyManagementService *KeyManagementServiceObservation `json:"keyManagementService,omitempty" tf:"key_management_service,omitempty"` + + // A key_vault_secrets_provider block as defined below. + KeyVaultSecretsProvider *KeyVaultSecretsProviderObservation `json:"keyVaultSecretsProvider,omitempty" tf:"key_vault_secrets_provider,omitempty"` + + // A kubelet_identity block as defined below. + KubeletIdentity *KubeletIdentityObservation `json:"kubeletIdentity,omitempty" tf:"kubelet_identity,omitempty"` + + // Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + KubernetesVersion *string `json:"kubernetesVersion,omitempty" tf:"kubernetes_version,omitempty"` + + // A linux_profile block as defined below. + LinuxProfile *LinuxProfileObservation `json:"linuxProfile,omitempty" tf:"linux_profile,omitempty"` + + // If true local accounts will be disabled. See the documentation for more information. + LocalAccountDisabled *bool `json:"localAccountDisabled,omitempty" tf:"local_account_disabled,omitempty"` + + // The location where the Managed Kubernetes Cluster should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + MaintenanceWindow *MaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // A maintenance_window_auto_upgrade block as defined below. + MaintenanceWindowAutoUpgrade *MaintenanceWindowAutoUpgradeObservation `json:"maintenanceWindowAutoUpgrade,omitempty" tf:"maintenance_window_auto_upgrade,omitempty"` + + // A maintenance_window_node_os block as defined below. + MaintenanceWindowNodeOs *MaintenanceWindowNodeOsObservation `json:"maintenanceWindowNodeOs,omitempty" tf:"maintenance_window_node_os,omitempty"` + + // A microsoft_defender block as defined below. + MicrosoftDefender *MicrosoftDefenderObservation `json:"microsoftDefender,omitempty" tf:"microsoft_defender,omitempty"` + + // Specifies a Prometheus add-on profile for the Kubernetes Cluster. A monitor_metrics block as defined below. + MonitorMetrics *MonitorMetricsObservation `json:"monitorMetrics,omitempty" tf:"monitor_metrics,omitempty"` + + // A network_profile block as defined below. + NetworkProfile *NetworkProfileObservation `json:"networkProfile,omitempty" tf:"network_profile,omitempty"` + + // The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are Unmanaged, SecurityPatch, NodeImage and None. + NodeOsChannelUpgrade *string `json:"nodeOsChannelUpgrade,omitempty" tf:"node_os_channel_upgrade,omitempty"` + + // The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. + NodeResourceGroup *string `json:"nodeResourceGroup,omitempty" tf:"node_resource_group,omitempty"` + + // The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster. + NodeResourceGroupID *string `json:"nodeResourceGroupId,omitempty" tf:"node_resource_group_id,omitempty"` + + // Enable or Disable the OIDC issuer URL + OidcIssuerEnabled *bool `json:"oidcIssuerEnabled,omitempty" tf:"oidc_issuer_enabled,omitempty"` + + // The OIDC issuer URL that is associated with the cluster. + OidcIssuerURL *string `json:"oidcIssuerUrl,omitempty" tf:"oidc_issuer_url,omitempty"` + + // An oms_agent block as defined below. + OmsAgent *OmsAgentObservation `json:"omsAgent,omitempty" tf:"oms_agent,omitempty"` + + // Is Open Service Mesh enabled? For more details, please visit Open Service Mesh for AKS. + OpenServiceMeshEnabled *bool `json:"openServiceMeshEnabled,omitempty" tf:"open_service_mesh_enabled,omitempty"` + + // The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. + PortalFqdn *string `json:"portalFqdn,omitempty" tf:"portal_fqdn,omitempty"` + + // Should this Kubernetes Cluster have its API server only exposed on internal IP addresses? This provides a Private IP Address for the Kubernetes API on the Virtual Network where the Kubernetes Cluster is located. Defaults to false. Changing this forces a new resource to be created. + PrivateClusterEnabled *bool `json:"privateClusterEnabled,omitempty" tf:"private_cluster_enabled,omitempty"` + + // Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to false. + PrivateClusterPublicFqdnEnabled *bool `json:"privateClusterPublicFqdnEnabled,omitempty" tf:"private_cluster_public_fqdn_enabled,omitempty"` + + // Either the ID of Private DNS Zone which should be delegated to this Cluster, System to have AKS manage this or None. In case of None you will need to bring your own DNS server and set up resolving, otherwise, the cluster will have issues after provisioning. Changing this forces a new resource to be created. + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. + PrivateFqdn *string `json:"privateFqdn,omitempty" tf:"private_fqdn,omitempty"` + + // Whether public network access is allowed for this Kubernetes Cluster. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the Resource Group where the Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Whether Role Based Access Control for the Kubernetes Cluster should be enabled. Defaults to true. Changing this forces a new resource to be created. + RoleBasedAccessControlEnabled *bool `json:"roleBasedAccessControlEnabled,omitempty" tf:"role_based_access_control_enabled,omitempty"` + + // Whether to enable run command for the cluster or not. Defaults to true. + RunCommandEnabled *bool `json:"runCommandEnabled,omitempty" tf:"run_command_enabled,omitempty"` + + // A service_mesh_profile block as defined below. + ServiceMeshProfile *ServiceMeshProfileObservation `json:"serviceMeshProfile,omitempty" tf:"service_mesh_profile,omitempty"` + + // A service_principal block as documented below. One of either identity or service_principal must be specified. + ServicePrincipal *ServicePrincipalObservation `json:"servicePrincipal,omitempty" tf:"service_principal,omitempty"` + + // The SKU Tier that should be used for this Kubernetes Cluster. Possible values are Free, Standard (which includes the Uptime SLA) and Premium. Defaults to Free. + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A storage_profile block as defined below. + StorageProfile *StorageProfileObservation `json:"storageProfile,omitempty" tf:"storage_profile,omitempty"` + + // Specifies the support plan which should be used for this Kubernetes Cluster. Possible values are KubernetesOfficial and AKSLongTermSupport. Defaults to KubernetesOfficial. + SupportPlan *string `json:"supportPlan,omitempty" tf:"support_plan,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A web_app_routing block as defined below. + WebAppRouting *WebAppRoutingObservation `json:"webAppRouting,omitempty" tf:"web_app_routing,omitempty"` + + // A windows_profile block as defined below. + WindowsProfile *WindowsProfileObservation `json:"windowsProfile,omitempty" tf:"windows_profile,omitempty"` + + // A workload_autoscaler_profile block defined below. + WorkloadAutoscalerProfile *WorkloadAutoscalerProfileObservation `json:"workloadAutoscalerProfile,omitempty" tf:"workload_autoscaler_profile,omitempty"` + + // Specifies whether Azure AD Workload Identity should be enabled for the Cluster. Defaults to false. + WorkloadIdentityEnabled *bool `json:"workloadIdentityEnabled,omitempty" tf:"workload_identity_enabled,omitempty"` +} + +type KubernetesClusterParameters struct { + + // An api_server_access_profile block as defined below. + // +kubebuilder:validation:Optional + APIServerAccessProfile *APIServerAccessProfileParameters `json:"apiServerAccessProfile,omitempty" tf:"api_server_access_profile,omitempty"` + + // Deprecated in favor of `spec.forProvider.apiServerAccessProfile[0].authorizedIpRanges` + // +kubebuilder:validation:Optional + // +listType=set + APIServerAuthorizedIPRanges []*string `json:"apiServerAuthorizedIpRanges,omitempty" tf:"api_server_authorized_ip_ranges,omitempty"` + + // A aci_connector_linux block as defined below. For more details, please visit Create and configure an AKS cluster to use virtual nodes. + // +kubebuilder:validation:Optional + AciConnectorLinux *AciConnectorLinuxParameters `json:"aciConnectorLinux,omitempty" tf:"aci_connector_linux,omitempty"` + + // A auto_scaler_profile block as defined below. + // +kubebuilder:validation:Optional + AutoScalerProfile *AutoScalerProfileParameters `json:"autoScalerProfile,omitempty" tf:"auto_scaler_profile,omitempty"` + + // The upgrade channel for this Kubernetes Cluster. Possible values are patch, rapid, node-image and stable. Omitting this field sets this value to none. + // +kubebuilder:validation:Optional + AutomaticChannelUpgrade *string `json:"automaticChannelUpgrade,omitempty" tf:"automatic_channel_upgrade,omitempty"` + + // A azure_active_directory_role_based_access_control block as defined below. + // +kubebuilder:validation:Optional + AzureActiveDirectoryRoleBasedAccessControl *AzureActiveDirectoryRoleBasedAccessControlParameters `json:"azureActiveDirectoryRoleBasedAccessControl,omitempty" tf:"azure_active_directory_role_based_access_control,omitempty"` + + // Should the Azure Policy Add-On be enabled? For more details please visit Understand Azure Policy for Azure Kubernetes Service + // +kubebuilder:validation:Optional + AzurePolicyEnabled *bool `json:"azurePolicyEnabled,omitempty" tf:"azure_policy_enabled,omitempty"` + + // A confidential_computing block as defined below. For more details please the documentation + // +kubebuilder:validation:Optional + ConfidentialComputing *ConfidentialComputingParameters `json:"confidentialComputing,omitempty" tf:"confidential_computing,omitempty"` + + // A list of up to 10 base64 encoded CAs that will be added to the trust store on nodes with the custom_ca_trust_enabled feature enabled. + // +kubebuilder:validation:Optional + CustomCATrustCertificatesBase64 []*string `json:"customCaTrustCertificatesBase64,omitempty" tf:"custom_ca_trust_certificates_base64,omitempty"` + + // DNS prefix specified when creating the managed cluster. Possible values must begin and end with a letter or number, contain only letters, numbers, and hyphens and be between 1 and 54 characters in length. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DNSPrefix *string `json:"dnsPrefix,omitempty" tf:"dns_prefix,omitempty"` + + // Specifies the DNS prefix to use with private clusters. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DNSPrefixPrivateCluster *string `json:"dnsPrefixPrivateCluster,omitempty" tf:"dns_prefix_private_cluster,omitempty"` + + // A default_node_pool block as defined below. + // +kubebuilder:validation:Optional + DefaultNodePool *DefaultNodePoolParameters `json:"defaultNodePool,omitempty" tf:"default_node_pool,omitempty"` + + // The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information can be found in the documentation. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // +kubebuilder:validation:Optional + EnablePodSecurityPolicy *bool `json:"enablePodSecurityPolicy,omitempty" tf:"enable_pod_security_policy,omitempty"` + + // Should HTTP Application Routing be enabled? + // +kubebuilder:validation:Optional + HTTPApplicationRoutingEnabled *bool `json:"httpApplicationRoutingEnabled,omitempty" tf:"http_application_routing_enabled,omitempty"` + + // A http_proxy_config block as defined below. + // +kubebuilder:validation:Optional + HTTPProxyConfig *HTTPProxyConfigParameters `json:"httpProxyConfig,omitempty" tf:"http_proxy_config,omitempty"` + + // An identity block as defined below. One of either identity or service_principal must be specified. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies whether Image Cleaner is enabled. + // +kubebuilder:validation:Optional + ImageCleanerEnabled *bool `json:"imageCleanerEnabled,omitempty" tf:"image_cleaner_enabled,omitempty"` + + // Specifies the interval in hours when images should be cleaned up. Defaults to 48. + // +kubebuilder:validation:Optional + ImageCleanerIntervalHours *float64 `json:"imageCleanerIntervalHours,omitempty" tf:"image_cleaner_interval_hours,omitempty"` + + // An ingress_application_gateway block as defined below. + // +kubebuilder:validation:Optional + IngressApplicationGateway *IngressApplicationGatewayParameters `json:"ingressApplicationGateway,omitempty" tf:"ingress_application_gateway,omitempty"` + + // A key_management_service block as defined below. For more details, please visit Key Management Service (KMS) etcd encryption to an AKS cluster. + // +kubebuilder:validation:Optional + KeyManagementService *KeyManagementServiceParameters `json:"keyManagementService,omitempty" tf:"key_management_service,omitempty"` + + // A key_vault_secrets_provider block as defined below. + // +kubebuilder:validation:Optional + KeyVaultSecretsProvider *KeyVaultSecretsProviderParameters `json:"keyVaultSecretsProvider,omitempty" tf:"key_vault_secrets_provider,omitempty"` + + // A kubelet_identity block as defined below. + // +kubebuilder:validation:Optional + KubeletIdentity *KubeletIdentityParameters `json:"kubeletIdentity,omitempty" tf:"kubelet_identity,omitempty"` + + // Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + // +kubebuilder:validation:Optional + KubernetesVersion *string `json:"kubernetesVersion,omitempty" tf:"kubernetes_version,omitempty"` + + // A linux_profile block as defined below. + // +kubebuilder:validation:Optional + LinuxProfile *LinuxProfileParameters `json:"linuxProfile,omitempty" tf:"linux_profile,omitempty"` + + // If true local accounts will be disabled. See the documentation for more information. + // +kubebuilder:validation:Optional + LocalAccountDisabled *bool `json:"localAccountDisabled,omitempty" tf:"local_account_disabled,omitempty"` + + // The location where the Managed Kubernetes Cluster should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + // +kubebuilder:validation:Optional + MaintenanceWindow *MaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // A maintenance_window_auto_upgrade block as defined below. + // +kubebuilder:validation:Optional + MaintenanceWindowAutoUpgrade *MaintenanceWindowAutoUpgradeParameters `json:"maintenanceWindowAutoUpgrade,omitempty" tf:"maintenance_window_auto_upgrade,omitempty"` + + // A maintenance_window_node_os block as defined below. + // +kubebuilder:validation:Optional + MaintenanceWindowNodeOs *MaintenanceWindowNodeOsParameters `json:"maintenanceWindowNodeOs,omitempty" tf:"maintenance_window_node_os,omitempty"` + + // A microsoft_defender block as defined below. + // +kubebuilder:validation:Optional + MicrosoftDefender *MicrosoftDefenderParameters `json:"microsoftDefender,omitempty" tf:"microsoft_defender,omitempty"` + + // Specifies a Prometheus add-on profile for the Kubernetes Cluster. A monitor_metrics block as defined below. + // +kubebuilder:validation:Optional + MonitorMetrics *MonitorMetricsParameters `json:"monitorMetrics,omitempty" tf:"monitor_metrics,omitempty"` + + // A network_profile block as defined below. + // +kubebuilder:validation:Optional + NetworkProfile *NetworkProfileParameters `json:"networkProfile,omitempty" tf:"network_profile,omitempty"` + + // The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are Unmanaged, SecurityPatch, NodeImage and None. + // +kubebuilder:validation:Optional + NodeOsChannelUpgrade *string `json:"nodeOsChannelUpgrade,omitempty" tf:"node_os_channel_upgrade,omitempty"` + + // The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. + // +kubebuilder:validation:Optional + NodeResourceGroup *string `json:"nodeResourceGroup,omitempty" tf:"node_resource_group,omitempty"` + + // Enable or Disable the OIDC issuer URL + // +kubebuilder:validation:Optional + OidcIssuerEnabled *bool `json:"oidcIssuerEnabled,omitempty" tf:"oidc_issuer_enabled,omitempty"` + + // An oms_agent block as defined below. + // +kubebuilder:validation:Optional + OmsAgent *OmsAgentParameters `json:"omsAgent,omitempty" tf:"oms_agent,omitempty"` + + // Is Open Service Mesh enabled? For more details, please visit Open Service Mesh for AKS. + // +kubebuilder:validation:Optional + OpenServiceMeshEnabled *bool `json:"openServiceMeshEnabled,omitempty" tf:"open_service_mesh_enabled,omitempty"` + + // Should this Kubernetes Cluster have its API server only exposed on internal IP addresses? This provides a Private IP Address for the Kubernetes API on the Virtual Network where the Kubernetes Cluster is located. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateClusterEnabled *bool `json:"privateClusterEnabled,omitempty" tf:"private_cluster_enabled,omitempty"` + + // Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to false. + // +kubebuilder:validation:Optional + PrivateClusterPublicFqdnEnabled *bool `json:"privateClusterPublicFqdnEnabled,omitempty" tf:"private_cluster_public_fqdn_enabled,omitempty"` + + // Either the ID of Private DNS Zone which should be delegated to this Cluster, System to have AKS manage this or None. In case of None you will need to bring your own DNS server and set up resolving, otherwise, the cluster will have issues after provisioning. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // Reference to a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDRef *v1.Reference `json:"privateDnsZoneIdRef,omitempty" tf:"-"` + + // Selector for a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDSelector *v1.Selector `json:"privateDnsZoneIdSelector,omitempty" tf:"-"` + + // Whether public network access is allowed for this Kubernetes Cluster. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the Resource Group where the Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Whether Role Based Access Control for the Kubernetes Cluster should be enabled. Defaults to true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RoleBasedAccessControlEnabled *bool `json:"roleBasedAccessControlEnabled,omitempty" tf:"role_based_access_control_enabled,omitempty"` + + // Whether to enable run command for the cluster or not. Defaults to true. + // +kubebuilder:validation:Optional + RunCommandEnabled *bool `json:"runCommandEnabled,omitempty" tf:"run_command_enabled,omitempty"` + + // A service_mesh_profile block as defined below. + // +kubebuilder:validation:Optional + ServiceMeshProfile *ServiceMeshProfileParameters `json:"serviceMeshProfile,omitempty" tf:"service_mesh_profile,omitempty"` + + // A service_principal block as documented below. One of either identity or service_principal must be specified. + // +kubebuilder:validation:Optional + ServicePrincipal *ServicePrincipalParameters `json:"servicePrincipal,omitempty" tf:"service_principal,omitempty"` + + // The SKU Tier that should be used for this Kubernetes Cluster. Possible values are Free, Standard (which includes the Uptime SLA) and Premium. Defaults to Free. + // +kubebuilder:validation:Optional + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A storage_profile block as defined below. + // +kubebuilder:validation:Optional + StorageProfile *StorageProfileParameters `json:"storageProfile,omitempty" tf:"storage_profile,omitempty"` + + // Specifies the support plan which should be used for this Kubernetes Cluster. Possible values are KubernetesOfficial and AKSLongTermSupport. Defaults to KubernetesOfficial. + // +kubebuilder:validation:Optional + SupportPlan *string `json:"supportPlan,omitempty" tf:"support_plan,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A web_app_routing block as defined below. + // +kubebuilder:validation:Optional + WebAppRouting *WebAppRoutingParameters `json:"webAppRouting,omitempty" tf:"web_app_routing,omitempty"` + + // A windows_profile block as defined below. + // +kubebuilder:validation:Optional + WindowsProfile *WindowsProfileParameters `json:"windowsProfile,omitempty" tf:"windows_profile,omitempty"` + + // A workload_autoscaler_profile block defined below. + // +kubebuilder:validation:Optional + WorkloadAutoscalerProfile *WorkloadAutoscalerProfileParameters `json:"workloadAutoscalerProfile,omitempty" tf:"workload_autoscaler_profile,omitempty"` + + // Specifies whether Azure AD Workload Identity should be enabled for the Cluster. Defaults to false. + // +kubebuilder:validation:Optional + WorkloadIdentityEnabled *bool `json:"workloadIdentityEnabled,omitempty" tf:"workload_identity_enabled,omitempty"` +} + +type LinuxOsConfigInitParameters struct { + + // Specifies the size of the swap file on each node in MB. + SwapFileSizeMb *float64 `json:"swapFileSizeMb,omitempty" tf:"swap_file_size_mb,omitempty"` + + // A sysctl_config block as defined below. + SysctlConfig *SysctlConfigInitParameters `json:"sysctlConfig,omitempty" tf:"sysctl_config,omitempty"` + + // specifies the defrag configuration for Transparent Huge Page. Possible values are always, defer, defer+madvise, madvise and never. + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty" tf:"transparent_huge_page_defrag,omitempty"` + + // Specifies the Transparent Huge Page enabled configuration. Possible values are always, madvise and never. + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty" tf:"transparent_huge_page_enabled,omitempty"` +} + +type LinuxOsConfigObservation struct { + + // Specifies the size of the swap file on each node in MB. + SwapFileSizeMb *float64 `json:"swapFileSizeMb,omitempty" tf:"swap_file_size_mb,omitempty"` + + // A sysctl_config block as defined below. + SysctlConfig *SysctlConfigObservation `json:"sysctlConfig,omitempty" tf:"sysctl_config,omitempty"` + + // specifies the defrag configuration for Transparent Huge Page. Possible values are always, defer, defer+madvise, madvise and never. + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty" tf:"transparent_huge_page_defrag,omitempty"` + + // Specifies the Transparent Huge Page enabled configuration. Possible values are always, madvise and never. + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty" tf:"transparent_huge_page_enabled,omitempty"` +} + +type LinuxOsConfigParameters struct { + + // Specifies the size of the swap file on each node in MB. + // +kubebuilder:validation:Optional + SwapFileSizeMb *float64 `json:"swapFileSizeMb,omitempty" tf:"swap_file_size_mb,omitempty"` + + // A sysctl_config block as defined below. + // +kubebuilder:validation:Optional + SysctlConfig *SysctlConfigParameters `json:"sysctlConfig,omitempty" tf:"sysctl_config,omitempty"` + + // specifies the defrag configuration for Transparent Huge Page. Possible values are always, defer, defer+madvise, madvise and never. + // +kubebuilder:validation:Optional + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty" tf:"transparent_huge_page_defrag,omitempty"` + + // Specifies the Transparent Huge Page enabled configuration. Possible values are always, madvise and never. + // +kubebuilder:validation:Optional + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty" tf:"transparent_huge_page_enabled,omitempty"` +} + +type LinuxProfileInitParameters struct { + + // The Admin Username for the Cluster. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // An ssh_key block as defined below. Only one is currently allowed. Changing this will update the key on all node pools. More information can be found in the documentation. + SSHKey *SSHKeyInitParameters `json:"sshKey,omitempty" tf:"ssh_key,omitempty"` +} + +type LinuxProfileObservation struct { + + // The Admin Username for the Cluster. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // An ssh_key block as defined below. Only one is currently allowed. Changing this will update the key on all node pools. More information can be found in the documentation. + SSHKey *SSHKeyObservation `json:"sshKey,omitempty" tf:"ssh_key,omitempty"` +} + +type LinuxProfileParameters struct { + + // The Admin Username for the Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername" tf:"admin_username,omitempty"` + + // An ssh_key block as defined below. Only one is currently allowed. Changing this will update the key on all node pools. More information can be found in the documentation. + // +kubebuilder:validation:Optional + SSHKey *SSHKeyParameters `json:"sshKey" tf:"ssh_key,omitempty"` +} + +type LoadBalancerProfileInitParameters struct { + + // Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between 4 and 120 inclusive. Defaults to 4. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // Count of desired managed outbound IPs for the cluster load balancer. Must be between 1 and 100 inclusive. + ManagedOutboundIPCount *float64 `json:"managedOutboundIpCount,omitempty" tf:"managed_outbound_ip_count,omitempty"` + + // The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of 1 to 100 (inclusive). The default value is 0 for single-stack and 1 for dual-stack. + ManagedOutboundIPv6Count *float64 `json:"managedOutboundIpv6Count,omitempty" tf:"managed_outbound_ipv6_count,omitempty"` + + // The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. + // +listType=set + OutboundIPAddressIds []*string `json:"outboundIpAddressIds,omitempty" tf:"outbound_ip_address_ids,omitempty"` + + // The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. + // +listType=set + OutboundIPPrefixIds []*string `json:"outboundIpPrefixIds,omitempty" tf:"outbound_ip_prefix_ids,omitempty"` + + // Number of desired SNAT port for each VM in the clusters load balancer. Must be between 0 and 64000 inclusive. Defaults to 0. + OutboundPortsAllocated *float64 `json:"outboundPortsAllocated,omitempty" tf:"outbound_ports_allocated,omitempty"` +} + +type LoadBalancerProfileObservation struct { + + // The outcome (resource IDs) of the specified arguments. + // +listType=set + EffectiveOutboundIps []*string `json:"effectiveOutboundIps,omitempty" tf:"effective_outbound_ips,omitempty"` + + // Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between 4 and 120 inclusive. Defaults to 4. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // Count of desired managed outbound IPs for the cluster load balancer. Must be between 1 and 100 inclusive. + ManagedOutboundIPCount *float64 `json:"managedOutboundIpCount,omitempty" tf:"managed_outbound_ip_count,omitempty"` + + // The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of 1 to 100 (inclusive). The default value is 0 for single-stack and 1 for dual-stack. + ManagedOutboundIPv6Count *float64 `json:"managedOutboundIpv6Count,omitempty" tf:"managed_outbound_ipv6_count,omitempty"` + + // The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. + // +listType=set + OutboundIPAddressIds []*string `json:"outboundIpAddressIds,omitempty" tf:"outbound_ip_address_ids,omitempty"` + + // The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. + // +listType=set + OutboundIPPrefixIds []*string `json:"outboundIpPrefixIds,omitempty" tf:"outbound_ip_prefix_ids,omitempty"` + + // Number of desired SNAT port for each VM in the clusters load balancer. Must be between 0 and 64000 inclusive. Defaults to 0. + OutboundPortsAllocated *float64 `json:"outboundPortsAllocated,omitempty" tf:"outbound_ports_allocated,omitempty"` +} + +type LoadBalancerProfileParameters struct { + + // Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between 4 and 120 inclusive. Defaults to 4. + // +kubebuilder:validation:Optional + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // Count of desired managed outbound IPs for the cluster load balancer. Must be between 1 and 100 inclusive. + // +kubebuilder:validation:Optional + ManagedOutboundIPCount *float64 `json:"managedOutboundIpCount,omitempty" tf:"managed_outbound_ip_count,omitempty"` + + // The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of 1 to 100 (inclusive). The default value is 0 for single-stack and 1 for dual-stack. + // +kubebuilder:validation:Optional + ManagedOutboundIPv6Count *float64 `json:"managedOutboundIpv6Count,omitempty" tf:"managed_outbound_ipv6_count,omitempty"` + + // The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. + // +kubebuilder:validation:Optional + // +listType=set + OutboundIPAddressIds []*string `json:"outboundIpAddressIds,omitempty" tf:"outbound_ip_address_ids,omitempty"` + + // The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. + // +kubebuilder:validation:Optional + // +listType=set + OutboundIPPrefixIds []*string `json:"outboundIpPrefixIds,omitempty" tf:"outbound_ip_prefix_ids,omitempty"` + + // Number of desired SNAT port for each VM in the clusters load balancer. Must be between 0 and 64000 inclusive. Defaults to 0. + // +kubebuilder:validation:Optional + OutboundPortsAllocated *float64 `json:"outboundPortsAllocated,omitempty" tf:"outbound_ports_allocated,omitempty"` +} + +type MaintenanceWindowAutoUpgradeInitParameters struct { + + // The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + DayOfMonth *float64 `json:"dayOfMonth,omitempty" tf:"day_of_month,omitempty"` + + // The day of the week for the maintenance run. Required in combination with weekly frequency. Possible values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday and Wednesday. + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The duration of the window for maintenance to run in hours. + Duration *float64 `json:"duration,omitempty" tf:"duration,omitempty"` + + // Frequency of maintenance. Possible options are Weekly, AbsoluteMonthly and RelativeMonthly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The interval for maintenance runs. Depending on the frequency this interval is week or month based. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // One or more not_allowed block as defined below. + NotAllowed []MaintenanceWindowAutoUpgradeNotAllowedInitParameters `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` + + // The date on which the maintenance window begins to take effect. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // The time for maintenance to begin, based on the timezone determined by utc_offset. Format is HH:mm. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Used to determine the timezone for cluster maintenance. + UtcOffset *string `json:"utcOffset,omitempty" tf:"utc_offset,omitempty"` + + // Specifies on which instance of the allowed days specified in day_of_week the maintenance occurs. Options are First, Second, Third, Fourth, and Last. + // Required in combination with relative monthly frequency. + WeekIndex *string `json:"weekIndex,omitempty" tf:"week_index,omitempty"` +} + +type MaintenanceWindowAutoUpgradeNotAllowedInitParameters struct { + + // The end of a time span, formatted as an RFC3339 string. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type MaintenanceWindowAutoUpgradeNotAllowedObservation struct { + + // The end of a time span, formatted as an RFC3339 string. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type MaintenanceWindowAutoUpgradeNotAllowedParameters struct { + + // The end of a time span, formatted as an RFC3339 string. + // +kubebuilder:validation:Optional + End *string `json:"end" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + // +kubebuilder:validation:Optional + Start *string `json:"start" tf:"start,omitempty"` +} + +type MaintenanceWindowAutoUpgradeObservation struct { + + // The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + DayOfMonth *float64 `json:"dayOfMonth,omitempty" tf:"day_of_month,omitempty"` + + // The day of the week for the maintenance run. Required in combination with weekly frequency. Possible values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday and Wednesday. + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The duration of the window for maintenance to run in hours. + Duration *float64 `json:"duration,omitempty" tf:"duration,omitempty"` + + // Frequency of maintenance. Possible options are Weekly, AbsoluteMonthly and RelativeMonthly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The interval for maintenance runs. Depending on the frequency this interval is week or month based. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // One or more not_allowed block as defined below. + NotAllowed []MaintenanceWindowAutoUpgradeNotAllowedObservation `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` + + // The date on which the maintenance window begins to take effect. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // The time for maintenance to begin, based on the timezone determined by utc_offset. Format is HH:mm. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Used to determine the timezone for cluster maintenance. + UtcOffset *string `json:"utcOffset,omitempty" tf:"utc_offset,omitempty"` + + // Specifies on which instance of the allowed days specified in day_of_week the maintenance occurs. Options are First, Second, Third, Fourth, and Last. + // Required in combination with relative monthly frequency. + WeekIndex *string `json:"weekIndex,omitempty" tf:"week_index,omitempty"` +} + +type MaintenanceWindowAutoUpgradeParameters struct { + + // The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + // +kubebuilder:validation:Optional + DayOfMonth *float64 `json:"dayOfMonth,omitempty" tf:"day_of_month,omitempty"` + + // The day of the week for the maintenance run. Required in combination with weekly frequency. Possible values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday and Wednesday. + // +kubebuilder:validation:Optional + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The duration of the window for maintenance to run in hours. + // +kubebuilder:validation:Optional + Duration *float64 `json:"duration" tf:"duration,omitempty"` + + // Frequency of maintenance. Possible options are Weekly, AbsoluteMonthly and RelativeMonthly. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency" tf:"frequency,omitempty"` + + // The interval for maintenance runs. Depending on the frequency this interval is week or month based. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // One or more not_allowed block as defined below. + // +kubebuilder:validation:Optional + NotAllowed []MaintenanceWindowAutoUpgradeNotAllowedParameters `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` + + // The date on which the maintenance window begins to take effect. + // +kubebuilder:validation:Optional + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // The time for maintenance to begin, based on the timezone determined by utc_offset. Format is HH:mm. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Used to determine the timezone for cluster maintenance. + // +kubebuilder:validation:Optional + UtcOffset *string `json:"utcOffset,omitempty" tf:"utc_offset,omitempty"` + + // Specifies on which instance of the allowed days specified in day_of_week the maintenance occurs. Options are First, Second, Third, Fourth, and Last. + // Required in combination with relative monthly frequency. + // +kubebuilder:validation:Optional + WeekIndex *string `json:"weekIndex,omitempty" tf:"week_index,omitempty"` +} + +type MaintenanceWindowInitParameters struct { + + // One or more allowed blocks as defined below. + Allowed []AllowedInitParameters `json:"allowed,omitempty" tf:"allowed,omitempty"` + + // One or more not_allowed block as defined below. + NotAllowed []NotAllowedInitParameters `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` +} + +type MaintenanceWindowNodeOsInitParameters struct { + + // The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + DayOfMonth *float64 `json:"dayOfMonth,omitempty" tf:"day_of_month,omitempty"` + + // The day of the week for the maintenance run. Required in combination with weekly frequency. Possible values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday and Wednesday. + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The duration of the window for maintenance to run in hours. + Duration *float64 `json:"duration,omitempty" tf:"duration,omitempty"` + + // Frequency of maintenance. Possible options are Daily, Weekly, AbsoluteMonthly and RelativeMonthly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The interval for maintenance runs. Depending on the frequency this interval is week or month based. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // One or more not_allowed block as defined below. + NotAllowed []MaintenanceWindowNodeOsNotAllowedInitParameters `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` + + // The date on which the maintenance window begins to take effect. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // The time for maintenance to begin, based on the timezone determined by utc_offset. Format is HH:mm. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Used to determine the timezone for cluster maintenance. + UtcOffset *string `json:"utcOffset,omitempty" tf:"utc_offset,omitempty"` + + // The week in the month used for the maintenance run. Options are First, Second, Third, Fourth, and Last. + WeekIndex *string `json:"weekIndex,omitempty" tf:"week_index,omitempty"` +} + +type MaintenanceWindowNodeOsNotAllowedInitParameters struct { + + // The end of a time span, formatted as an RFC3339 string. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type MaintenanceWindowNodeOsNotAllowedObservation struct { + + // The end of a time span, formatted as an RFC3339 string. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type MaintenanceWindowNodeOsNotAllowedParameters struct { + + // The end of a time span, formatted as an RFC3339 string. + // +kubebuilder:validation:Optional + End *string `json:"end" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + // +kubebuilder:validation:Optional + Start *string `json:"start" tf:"start,omitempty"` +} + +type MaintenanceWindowNodeOsObservation struct { + + // The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + DayOfMonth *float64 `json:"dayOfMonth,omitempty" tf:"day_of_month,omitempty"` + + // The day of the week for the maintenance run. Required in combination with weekly frequency. Possible values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday and Wednesday. + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The duration of the window for maintenance to run in hours. + Duration *float64 `json:"duration,omitempty" tf:"duration,omitempty"` + + // Frequency of maintenance. Possible options are Daily, Weekly, AbsoluteMonthly and RelativeMonthly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The interval for maintenance runs. Depending on the frequency this interval is week or month based. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // One or more not_allowed block as defined below. + NotAllowed []MaintenanceWindowNodeOsNotAllowedObservation `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` + + // The date on which the maintenance window begins to take effect. + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // The time for maintenance to begin, based on the timezone determined by utc_offset. Format is HH:mm. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Used to determine the timezone for cluster maintenance. + UtcOffset *string `json:"utcOffset,omitempty" tf:"utc_offset,omitempty"` + + // The week in the month used for the maintenance run. Options are First, Second, Third, Fourth, and Last. + WeekIndex *string `json:"weekIndex,omitempty" tf:"week_index,omitempty"` +} + +type MaintenanceWindowNodeOsParameters struct { + + // The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + // +kubebuilder:validation:Optional + DayOfMonth *float64 `json:"dayOfMonth,omitempty" tf:"day_of_month,omitempty"` + + // The day of the week for the maintenance run. Required in combination with weekly frequency. Possible values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday and Wednesday. + // +kubebuilder:validation:Optional + DayOfWeek *string `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The duration of the window for maintenance to run in hours. + // +kubebuilder:validation:Optional + Duration *float64 `json:"duration" tf:"duration,omitempty"` + + // Frequency of maintenance. Possible options are Daily, Weekly, AbsoluteMonthly and RelativeMonthly. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency" tf:"frequency,omitempty"` + + // The interval for maintenance runs. Depending on the frequency this interval is week or month based. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // One or more not_allowed block as defined below. + // +kubebuilder:validation:Optional + NotAllowed []MaintenanceWindowNodeOsNotAllowedParameters `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` + + // The date on which the maintenance window begins to take effect. + // +kubebuilder:validation:Optional + StartDate *string `json:"startDate,omitempty" tf:"start_date,omitempty"` + + // The time for maintenance to begin, based on the timezone determined by utc_offset. Format is HH:mm. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Used to determine the timezone for cluster maintenance. + // +kubebuilder:validation:Optional + UtcOffset *string `json:"utcOffset,omitempty" tf:"utc_offset,omitempty"` + + // The week in the month used for the maintenance run. Options are First, Second, Third, Fourth, and Last. + // +kubebuilder:validation:Optional + WeekIndex *string `json:"weekIndex,omitempty" tf:"week_index,omitempty"` +} + +type MaintenanceWindowObservation struct { + + // One or more allowed blocks as defined below. + Allowed []AllowedObservation `json:"allowed,omitempty" tf:"allowed,omitempty"` + + // One or more not_allowed block as defined below. + NotAllowed []NotAllowedObservation `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` +} + +type MaintenanceWindowParameters struct { + + // One or more allowed blocks as defined below. + // +kubebuilder:validation:Optional + Allowed []AllowedParameters `json:"allowed,omitempty" tf:"allowed,omitempty"` + + // One or more not_allowed block as defined below. + // +kubebuilder:validation:Optional + NotAllowed []NotAllowedParameters `json:"notAllowed,omitempty" tf:"not_allowed,omitempty"` +} + +type MicrosoftDefenderInitParameters struct { + + // Specifies the ID of the Log Analytics Workspace where the audit logs collected by Microsoft Defender should be sent to. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type MicrosoftDefenderObservation struct { + + // Specifies the ID of the Log Analytics Workspace where the audit logs collected by Microsoft Defender should be sent to. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type MicrosoftDefenderParameters struct { + + // Specifies the ID of the Log Analytics Workspace where the audit logs collected by Microsoft Defender should be sent to. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` +} + +type MonitorMetricsInitParameters struct { + + // Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric. + AnnotationsAllowed *string `json:"annotationsAllowed,omitempty" tf:"annotations_allowed,omitempty"` + + // Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric. + LabelsAllowed *string `json:"labelsAllowed,omitempty" tf:"labels_allowed,omitempty"` +} + +type MonitorMetricsObservation struct { + + // Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric. + AnnotationsAllowed *string `json:"annotationsAllowed,omitempty" tf:"annotations_allowed,omitempty"` + + // Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric. + LabelsAllowed *string `json:"labelsAllowed,omitempty" tf:"labels_allowed,omitempty"` +} + +type MonitorMetricsParameters struct { + + // Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric. + // +kubebuilder:validation:Optional + AnnotationsAllowed *string `json:"annotationsAllowed,omitempty" tf:"annotations_allowed,omitempty"` + + // Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric. + // +kubebuilder:validation:Optional + LabelsAllowed *string `json:"labelsAllowed,omitempty" tf:"labels_allowed,omitempty"` +} + +type NATGatewayProfileInitParameters struct { + + // Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between 4 and 120 inclusive. Defaults to 4. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // Count of desired managed outbound IPs for the cluster load balancer. Must be between 1 and 100 inclusive. + ManagedOutboundIPCount *float64 `json:"managedOutboundIpCount,omitempty" tf:"managed_outbound_ip_count,omitempty"` +} + +type NATGatewayProfileObservation struct { + + // The outcome (resource IDs) of the specified arguments. + // +listType=set + EffectiveOutboundIps []*string `json:"effectiveOutboundIps,omitempty" tf:"effective_outbound_ips,omitempty"` + + // Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between 4 and 120 inclusive. Defaults to 4. + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // Count of desired managed outbound IPs for the cluster load balancer. Must be between 1 and 100 inclusive. + ManagedOutboundIPCount *float64 `json:"managedOutboundIpCount,omitempty" tf:"managed_outbound_ip_count,omitempty"` +} + +type NATGatewayProfileParameters struct { + + // Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between 4 and 120 inclusive. Defaults to 4. + // +kubebuilder:validation:Optional + IdleTimeoutInMinutes *float64 `json:"idleTimeoutInMinutes,omitempty" tf:"idle_timeout_in_minutes,omitempty"` + + // Count of desired managed outbound IPs for the cluster load balancer. Must be between 1 and 100 inclusive. + // +kubebuilder:validation:Optional + ManagedOutboundIPCount *float64 `json:"managedOutboundIpCount,omitempty" tf:"managed_outbound_ip_count,omitempty"` +} + +type NetworkProfileInitParameters struct { + + // IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. + DNSServiceIP *string `json:"dnsServiceIp,omitempty" tf:"dns_service_ip,omitempty"` + + // IP address (in CIDR notation) used as the Docker bridge IP address on nodes. Changing this forces a new resource to be created. + DockerBridgeCidr *string `json:"dockerBridgeCidr,omitempty" tf:"docker_bridge_cidr,omitempty"` + + // Specifies the eBPF data plane used for building the Kubernetes network. Possible value is cilium. Disabling this forces a new resource to be created. + EbpfDataPlane *string `json:"ebpfDataPlane,omitempty" tf:"ebpf_data_plane,omitempty"` + + // Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are IPv4 and/or IPv6. IPv4 must always be specified. Changing this forces a new resource to be created. + IPVersions []*string `json:"ipVersions,omitempty" tf:"ip_versions,omitempty"` + + // A load_balancer_profile block as defined below. This can only be specified when load_balancer_sku is set to standard. Changing this forces a new resource to be created. + LoadBalancerProfile *LoadBalancerProfileInitParameters `json:"loadBalancerProfile,omitempty" tf:"load_balancer_profile,omitempty"` + + // Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are basic and standard. Defaults to standard. Changing this forces a new resource to be created. + LoadBalancerSku *string `json:"loadBalancerSku,omitempty" tf:"load_balancer_sku,omitempty"` + + // A nat_gateway_profile block as defined below. This can only be specified when load_balancer_sku is set to standard and outbound_type is set to managedNATGateway or userAssignedNATGateway. Changing this forces a new resource to be created. + NATGatewayProfile *NATGatewayProfileInitParameters `json:"natGatewayProfile,omitempty" tf:"nat_gateway_profile,omitempty"` + + // Network mode to be used with Azure CNI. Possible values are bridge and transparent. Changing this forces a new resource to be created. + NetworkMode *string `json:"networkMode,omitempty" tf:"network_mode,omitempty"` + + // Network plugin to use for networking. Currently supported values are azure, kubenet and none. Changing this forces a new resource to be created. + NetworkPlugin *string `json:"networkPlugin,omitempty" tf:"network_plugin,omitempty"` + + // Specifies the network plugin mode used for building the Kubernetes network. Possible value is overlay. + NetworkPluginMode *string `json:"networkPluginMode,omitempty" tf:"network_plugin_mode,omitempty"` + + // Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico, azure and cilium. + NetworkPolicy *string `json:"networkPolicy,omitempty" tf:"network_policy,omitempty"` + + // The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer, userDefinedRouting, managedNATGateway and userAssignedNATGateway. Defaults to loadBalancer. More information on supported migration paths for outbound_type can be found in this documentation. + OutboundType *string `json:"outboundType,omitempty" tf:"outbound_type,omitempty"` + + // The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet. Changing this forces a new resource to be created. + PodCidr *string `json:"podCidr,omitempty" tf:"pod_cidr,omitempty"` + + // A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. + PodCidrs []*string `json:"podCidrs,omitempty" tf:"pod_cidrs,omitempty"` + + // The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. + ServiceCidr *string `json:"serviceCidr,omitempty" tf:"service_cidr,omitempty"` + + // A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. + ServiceCidrs []*string `json:"serviceCidrs,omitempty" tf:"service_cidrs,omitempty"` +} + +type NetworkProfileObservation struct { + + // IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. + DNSServiceIP *string `json:"dnsServiceIp,omitempty" tf:"dns_service_ip,omitempty"` + + // IP address (in CIDR notation) used as the Docker bridge IP address on nodes. Changing this forces a new resource to be created. + DockerBridgeCidr *string `json:"dockerBridgeCidr,omitempty" tf:"docker_bridge_cidr,omitempty"` + + // Specifies the eBPF data plane used for building the Kubernetes network. Possible value is cilium. Disabling this forces a new resource to be created. + EbpfDataPlane *string `json:"ebpfDataPlane,omitempty" tf:"ebpf_data_plane,omitempty"` + + // Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are IPv4 and/or IPv6. IPv4 must always be specified. Changing this forces a new resource to be created. + IPVersions []*string `json:"ipVersions,omitempty" tf:"ip_versions,omitempty"` + + // A load_balancer_profile block as defined below. This can only be specified when load_balancer_sku is set to standard. Changing this forces a new resource to be created. + LoadBalancerProfile *LoadBalancerProfileObservation `json:"loadBalancerProfile,omitempty" tf:"load_balancer_profile,omitempty"` + + // Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are basic and standard. Defaults to standard. Changing this forces a new resource to be created. + LoadBalancerSku *string `json:"loadBalancerSku,omitempty" tf:"load_balancer_sku,omitempty"` + + // A nat_gateway_profile block as defined below. This can only be specified when load_balancer_sku is set to standard and outbound_type is set to managedNATGateway or userAssignedNATGateway. Changing this forces a new resource to be created. + NATGatewayProfile *NATGatewayProfileObservation `json:"natGatewayProfile,omitempty" tf:"nat_gateway_profile,omitempty"` + + // Network mode to be used with Azure CNI. Possible values are bridge and transparent. Changing this forces a new resource to be created. + NetworkMode *string `json:"networkMode,omitempty" tf:"network_mode,omitempty"` + + // Network plugin to use for networking. Currently supported values are azure, kubenet and none. Changing this forces a new resource to be created. + NetworkPlugin *string `json:"networkPlugin,omitempty" tf:"network_plugin,omitempty"` + + // Specifies the network plugin mode used for building the Kubernetes network. Possible value is overlay. + NetworkPluginMode *string `json:"networkPluginMode,omitempty" tf:"network_plugin_mode,omitempty"` + + // Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico, azure and cilium. + NetworkPolicy *string `json:"networkPolicy,omitempty" tf:"network_policy,omitempty"` + + // The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer, userDefinedRouting, managedNATGateway and userAssignedNATGateway. Defaults to loadBalancer. More information on supported migration paths for outbound_type can be found in this documentation. + OutboundType *string `json:"outboundType,omitempty" tf:"outbound_type,omitempty"` + + // The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet. Changing this forces a new resource to be created. + PodCidr *string `json:"podCidr,omitempty" tf:"pod_cidr,omitempty"` + + // A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. + PodCidrs []*string `json:"podCidrs,omitempty" tf:"pod_cidrs,omitempty"` + + // The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. + ServiceCidr *string `json:"serviceCidr,omitempty" tf:"service_cidr,omitempty"` + + // A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. + ServiceCidrs []*string `json:"serviceCidrs,omitempty" tf:"service_cidrs,omitempty"` +} + +type NetworkProfileParameters struct { + + // IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DNSServiceIP *string `json:"dnsServiceIp,omitempty" tf:"dns_service_ip,omitempty"` + + // IP address (in CIDR notation) used as the Docker bridge IP address on nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DockerBridgeCidr *string `json:"dockerBridgeCidr,omitempty" tf:"docker_bridge_cidr,omitempty"` + + // Specifies the eBPF data plane used for building the Kubernetes network. Possible value is cilium. Disabling this forces a new resource to be created. + // +kubebuilder:validation:Optional + EbpfDataPlane *string `json:"ebpfDataPlane,omitempty" tf:"ebpf_data_plane,omitempty"` + + // Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are IPv4 and/or IPv6. IPv4 must always be specified. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IPVersions []*string `json:"ipVersions,omitempty" tf:"ip_versions,omitempty"` + + // A load_balancer_profile block as defined below. This can only be specified when load_balancer_sku is set to standard. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LoadBalancerProfile *LoadBalancerProfileParameters `json:"loadBalancerProfile,omitempty" tf:"load_balancer_profile,omitempty"` + + // Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are basic and standard. Defaults to standard. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LoadBalancerSku *string `json:"loadBalancerSku,omitempty" tf:"load_balancer_sku,omitempty"` + + // A nat_gateway_profile block as defined below. This can only be specified when load_balancer_sku is set to standard and outbound_type is set to managedNATGateway or userAssignedNATGateway. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NATGatewayProfile *NATGatewayProfileParameters `json:"natGatewayProfile,omitempty" tf:"nat_gateway_profile,omitempty"` + + // Network mode to be used with Azure CNI. Possible values are bridge and transparent. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetworkMode *string `json:"networkMode,omitempty" tf:"network_mode,omitempty"` + + // Network plugin to use for networking. Currently supported values are azure, kubenet and none. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetworkPlugin *string `json:"networkPlugin" tf:"network_plugin,omitempty"` + + // Specifies the network plugin mode used for building the Kubernetes network. Possible value is overlay. + // +kubebuilder:validation:Optional + NetworkPluginMode *string `json:"networkPluginMode,omitempty" tf:"network_plugin_mode,omitempty"` + + // Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico, azure and cilium. + // +kubebuilder:validation:Optional + NetworkPolicy *string `json:"networkPolicy,omitempty" tf:"network_policy,omitempty"` + + // The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer, userDefinedRouting, managedNATGateway and userAssignedNATGateway. Defaults to loadBalancer. More information on supported migration paths for outbound_type can be found in this documentation. + // +kubebuilder:validation:Optional + OutboundType *string `json:"outboundType,omitempty" tf:"outbound_type,omitempty"` + + // The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PodCidr *string `json:"podCidr,omitempty" tf:"pod_cidr,omitempty"` + + // A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PodCidrs []*string `json:"podCidrs,omitempty" tf:"pod_cidrs,omitempty"` + + // The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ServiceCidr *string `json:"serviceCidr,omitempty" tf:"service_cidr,omitempty"` + + // A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ServiceCidrs []*string `json:"serviceCidrs,omitempty" tf:"service_cidrs,omitempty"` +} + +type NodeNetworkProfileInitParameters struct { + + // One or more allowed_host_ports blocks as defined below. + AllowedHostPorts []AllowedHostPortsInitParameters `json:"allowedHostPorts,omitempty" tf:"allowed_host_ports,omitempty"` + + // A list of Application Security Group IDs which should be associated with this Node Pool. + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + // +mapType=granular + NodePublicIPTags map[string]*string `json:"nodePublicIpTags,omitempty" tf:"node_public_ip_tags,omitempty"` +} + +type NodeNetworkProfileObservation struct { + + // One or more allowed_host_ports blocks as defined below. + AllowedHostPorts []AllowedHostPortsObservation `json:"allowedHostPorts,omitempty" tf:"allowed_host_ports,omitempty"` + + // A list of Application Security Group IDs which should be associated with this Node Pool. + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + // +mapType=granular + NodePublicIPTags map[string]*string `json:"nodePublicIpTags,omitempty" tf:"node_public_ip_tags,omitempty"` +} + +type NodeNetworkProfileParameters struct { + + // One or more allowed_host_ports blocks as defined below. + // +kubebuilder:validation:Optional + AllowedHostPorts []AllowedHostPortsParameters `json:"allowedHostPorts,omitempty" tf:"allowed_host_ports,omitempty"` + + // A list of Application Security Group IDs which should be associated with this Node Pool. + // +kubebuilder:validation:Optional + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +mapType=granular + NodePublicIPTags map[string]*string `json:"nodePublicIpTags,omitempty" tf:"node_public_ip_tags,omitempty"` +} + +type NotAllowedInitParameters struct { + + // The end of a time span, formatted as an RFC3339 string. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type NotAllowedObservation struct { + + // The end of a time span, formatted as an RFC3339 string. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type NotAllowedParameters struct { + + // The end of a time span, formatted as an RFC3339 string. + // +kubebuilder:validation:Optional + End *string `json:"end" tf:"end,omitempty"` + + // The start of a time span, formatted as an RFC3339 string. + // +kubebuilder:validation:Optional + Start *string `json:"start" tf:"start,omitempty"` +} + +type OmsAgentIdentityInitParameters struct { +} + +type OmsAgentIdentityObservation struct { + + // The Client ID of the user-defined Managed Identity used by the OMS Agents. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The Object ID of the user-defined Managed Identity used by the OMS Agents. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The ID of the User Assigned Identity used by the OMS Agents. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type OmsAgentIdentityParameters struct { +} + +type OmsAgentInitParameters struct { + + // The ID of the Log Analytics Workspace which the OMS Agent should send data to. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` + + // Is managed identity authentication for monitoring enabled? + MsiAuthForMonitoringEnabled *bool `json:"msiAuthForMonitoringEnabled,omitempty" tf:"msi_auth_for_monitoring_enabled,omitempty"` +} + +type OmsAgentObservation struct { + + // The ID of the Log Analytics Workspace which the OMS Agent should send data to. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` + + // Is managed identity authentication for monitoring enabled? + MsiAuthForMonitoringEnabled *bool `json:"msiAuthForMonitoringEnabled,omitempty" tf:"msi_auth_for_monitoring_enabled,omitempty"` + + // An oms_agent_identity block is exported. The exported attributes are defined below. + OmsAgentIdentity []OmsAgentIdentityObservation `json:"omsAgentIdentity,omitempty" tf:"oms_agent_identity,omitempty"` +} + +type OmsAgentParameters struct { + + // The ID of the Log Analytics Workspace which the OMS Agent should send data to. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // Is managed identity authentication for monitoring enabled? + // +kubebuilder:validation:Optional + MsiAuthForMonitoringEnabled *bool `json:"msiAuthForMonitoringEnabled,omitempty" tf:"msi_auth_for_monitoring_enabled,omitempty"` +} + +type SSHKeyInitParameters struct { + + // The Public SSH Key used to access the cluster. + KeyData *string `json:"keyData,omitempty" tf:"key_data,omitempty"` +} + +type SSHKeyObservation struct { + + // The Public SSH Key used to access the cluster. + KeyData *string `json:"keyData,omitempty" tf:"key_data,omitempty"` +} + +type SSHKeyParameters struct { + + // The Public SSH Key used to access the cluster. + // +kubebuilder:validation:Optional + KeyData *string `json:"keyData" tf:"key_data,omitempty"` +} + +type SecretIdentityInitParameters struct { +} + +type SecretIdentityObservation struct { + + // The Client ID of the user-defined Managed Identity used by the Secret Provider. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The Object ID of the user-defined Managed Identity used by the Secret Provider. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The ID of the User Assigned Identity used by the Secret Provider. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type SecretIdentityParameters struct { +} + +type ServiceMeshProfileInitParameters struct { + + // Is Istio External Ingress Gateway enabled? + ExternalIngressGatewayEnabled *bool `json:"externalIngressGatewayEnabled,omitempty" tf:"external_ingress_gateway_enabled,omitempty"` + + // Is Istio Internal Ingress Gateway enabled? + InternalIngressGatewayEnabled *bool `json:"internalIngressGatewayEnabled,omitempty" tf:"internal_ingress_gateway_enabled,omitempty"` + + // The mode of the service mesh. Possible value is Istio. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type ServiceMeshProfileObservation struct { + + // Is Istio External Ingress Gateway enabled? + ExternalIngressGatewayEnabled *bool `json:"externalIngressGatewayEnabled,omitempty" tf:"external_ingress_gateway_enabled,omitempty"` + + // Is Istio Internal Ingress Gateway enabled? + InternalIngressGatewayEnabled *bool `json:"internalIngressGatewayEnabled,omitempty" tf:"internal_ingress_gateway_enabled,omitempty"` + + // The mode of the service mesh. Possible value is Istio. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type ServiceMeshProfileParameters struct { + + // Is Istio External Ingress Gateway enabled? + // +kubebuilder:validation:Optional + ExternalIngressGatewayEnabled *bool `json:"externalIngressGatewayEnabled,omitempty" tf:"external_ingress_gateway_enabled,omitempty"` + + // Is Istio Internal Ingress Gateway enabled? + // +kubebuilder:validation:Optional + InternalIngressGatewayEnabled *bool `json:"internalIngressGatewayEnabled,omitempty" tf:"internal_ingress_gateway_enabled,omitempty"` + + // The mode of the service mesh. Possible value is Istio. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` +} + +type ServicePrincipalInitParameters struct { + + // The Client ID for the Service Principal. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type ServicePrincipalObservation struct { + + // The Client ID for the Service Principal. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type ServicePrincipalParameters struct { + + // The Client ID for the Service Principal. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The Client Secret for the Service Principal. + // +kubebuilder:validation:Required + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` +} + +type StorageProfileInitParameters struct { + + // Is the Blob CSI driver enabled? Defaults to false. + BlobDriverEnabled *bool `json:"blobDriverEnabled,omitempty" tf:"blob_driver_enabled,omitempty"` + + // Is the Disk CSI driver enabled? Defaults to true. + DiskDriverEnabled *bool `json:"diskDriverEnabled,omitempty" tf:"disk_driver_enabled,omitempty"` + + // Disk CSI Driver version to be used. Possible values are v1 and v2. Defaults to v1. + DiskDriverVersion *string `json:"diskDriverVersion,omitempty" tf:"disk_driver_version,omitempty"` + + // Is the File CSI driver enabled? Defaults to true. + FileDriverEnabled *bool `json:"fileDriverEnabled,omitempty" tf:"file_driver_enabled,omitempty"` + + // Is the Snapshot Controller enabled? Defaults to true. + SnapshotControllerEnabled *bool `json:"snapshotControllerEnabled,omitempty" tf:"snapshot_controller_enabled,omitempty"` +} + +type StorageProfileObservation struct { + + // Is the Blob CSI driver enabled? Defaults to false. + BlobDriverEnabled *bool `json:"blobDriverEnabled,omitempty" tf:"blob_driver_enabled,omitempty"` + + // Is the Disk CSI driver enabled? Defaults to true. + DiskDriverEnabled *bool `json:"diskDriverEnabled,omitempty" tf:"disk_driver_enabled,omitempty"` + + // Disk CSI Driver version to be used. Possible values are v1 and v2. Defaults to v1. + DiskDriverVersion *string `json:"diskDriverVersion,omitempty" tf:"disk_driver_version,omitempty"` + + // Is the File CSI driver enabled? Defaults to true. + FileDriverEnabled *bool `json:"fileDriverEnabled,omitempty" tf:"file_driver_enabled,omitempty"` + + // Is the Snapshot Controller enabled? Defaults to true. + SnapshotControllerEnabled *bool `json:"snapshotControllerEnabled,omitempty" tf:"snapshot_controller_enabled,omitempty"` +} + +type StorageProfileParameters struct { + + // Is the Blob CSI driver enabled? Defaults to false. + // +kubebuilder:validation:Optional + BlobDriverEnabled *bool `json:"blobDriverEnabled,omitempty" tf:"blob_driver_enabled,omitempty"` + + // Is the Disk CSI driver enabled? Defaults to true. + // +kubebuilder:validation:Optional + DiskDriverEnabled *bool `json:"diskDriverEnabled,omitempty" tf:"disk_driver_enabled,omitempty"` + + // Disk CSI Driver version to be used. Possible values are v1 and v2. Defaults to v1. + // +kubebuilder:validation:Optional + DiskDriverVersion *string `json:"diskDriverVersion,omitempty" tf:"disk_driver_version,omitempty"` + + // Is the File CSI driver enabled? Defaults to true. + // +kubebuilder:validation:Optional + FileDriverEnabled *bool `json:"fileDriverEnabled,omitempty" tf:"file_driver_enabled,omitempty"` + + // Is the Snapshot Controller enabled? Defaults to true. + // +kubebuilder:validation:Optional + SnapshotControllerEnabled *bool `json:"snapshotControllerEnabled,omitempty" tf:"snapshot_controller_enabled,omitempty"` +} + +type SysctlConfigInitParameters struct { + + // The sysctl setting fs.aio-max-nr. Must be between 65536 and 6553500. + FsAioMaxNr *float64 `json:"fsAioMaxNr,omitempty" tf:"fs_aio_max_nr,omitempty"` + + // The sysctl setting fs.file-max. Must be between 8192 and 12000500. + FsFileMax *float64 `json:"fsFileMax,omitempty" tf:"fs_file_max,omitempty"` + + // The sysctl setting fs.inotify.max_user_watches. Must be between 781250 and 2097152. + FsInotifyMaxUserWatches *float64 `json:"fsInotifyMaxUserWatches,omitempty" tf:"fs_inotify_max_user_watches,omitempty"` + + // The sysctl setting fs.nr_open. Must be between 8192 and 20000500. + FsNrOpen *float64 `json:"fsNrOpen,omitempty" tf:"fs_nr_open,omitempty"` + + // The sysctl setting kernel.threads-max. Must be between 20 and 513785. + KernelThreadsMax *float64 `json:"kernelThreadsMax,omitempty" tf:"kernel_threads_max,omitempty"` + + // The sysctl setting net.core.netdev_max_backlog. Must be between 1000 and 3240000. + NetCoreNetdevMaxBacklog *float64 `json:"netCoreNetdevMaxBacklog,omitempty" tf:"net_core_netdev_max_backlog,omitempty"` + + // The sysctl setting net.core.optmem_max. Must be between 20480 and 4194304. + NetCoreOptmemMax *float64 `json:"netCoreOptmemMax,omitempty" tf:"net_core_optmem_max,omitempty"` + + // The sysctl setting net.core.rmem_default. Must be between 212992 and 134217728. + NetCoreRmemDefault *float64 `json:"netCoreRmemDefault,omitempty" tf:"net_core_rmem_default,omitempty"` + + // The sysctl setting net.core.rmem_max. Must be between 212992 and 134217728. + NetCoreRmemMax *float64 `json:"netCoreRmemMax,omitempty" tf:"net_core_rmem_max,omitempty"` + + // The sysctl setting net.core.somaxconn. Must be between 4096 and 3240000. + NetCoreSomaxconn *float64 `json:"netCoreSomaxconn,omitempty" tf:"net_core_somaxconn,omitempty"` + + // The sysctl setting net.core.wmem_default. Must be between 212992 and 134217728. + NetCoreWmemDefault *float64 `json:"netCoreWmemDefault,omitempty" tf:"net_core_wmem_default,omitempty"` + + // The sysctl setting net.core.wmem_max. Must be between 212992 and 134217728. + NetCoreWmemMax *float64 `json:"netCoreWmemMax,omitempty" tf:"net_core_wmem_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range max value. Must be between 32768 and 65535. + NetIPv4IPLocalPortRangeMax *float64 `json:"netIpv4IpLocalPortRangeMax,omitempty" tf:"net_ipv4_ip_local_port_range_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range min value. Must be between 1024 and 60999. + NetIPv4IPLocalPortRangeMin *float64 `json:"netIpv4IpLocalPortRangeMin,omitempty" tf:"net_ipv4_ip_local_port_range_min,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between 128 and 80000. + NetIPv4NeighDefaultGcThresh1 *float64 `json:"netIpv4NeighDefaultGcThresh1,omitempty" tf:"net_ipv4_neigh_default_gc_thresh1,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between 512 and 90000. + NetIPv4NeighDefaultGcThresh2 *float64 `json:"netIpv4NeighDefaultGcThresh2,omitempty" tf:"net_ipv4_neigh_default_gc_thresh2,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between 1024 and 100000. + NetIPv4NeighDefaultGcThresh3 *float64 `json:"netIpv4NeighDefaultGcThresh3,omitempty" tf:"net_ipv4_neigh_default_gc_thresh3,omitempty"` + + // The sysctl setting net.ipv4.tcp_fin_timeout. Must be between 5 and 120. + NetIPv4TCPFinTimeout *float64 `json:"netIpv4TcpFinTimeout,omitempty" tf:"net_ipv4_tcp_fin_timeout,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between 10 and 90. + NetIPv4TCPKeepaliveIntvl *float64 `json:"netIpv4TcpKeepaliveIntvl,omitempty" tf:"net_ipv4_tcp_keepalive_intvl,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between 1 and 15. + NetIPv4TCPKeepaliveProbes *float64 `json:"netIpv4TcpKeepaliveProbes,omitempty" tf:"net_ipv4_tcp_keepalive_probes,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_time. Must be between 30 and 432000. + NetIPv4TCPKeepaliveTime *float64 `json:"netIpv4TcpKeepaliveTime,omitempty" tf:"net_ipv4_tcp_keepalive_time,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between 128 and 3240000. + NetIPv4TCPMaxSynBacklog *float64 `json:"netIpv4TcpMaxSynBacklog,omitempty" tf:"net_ipv4_tcp_max_syn_backlog,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between 8000 and 1440000. + NetIPv4TCPMaxTwBuckets *float64 `json:"netIpv4TcpMaxTwBuckets,omitempty" tf:"net_ipv4_tcp_max_tw_buckets,omitempty"` + + // The sysctl setting net.ipv4.tcp_tw_reuse. + NetIPv4TCPTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty" tf:"net_ipv4_tcp_tw_reuse,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between 65536 and 524288. + NetNetfilterNfConntrackBuckets *float64 `json:"netNetfilterNfConntrackBuckets,omitempty" tf:"net_netfilter_nf_conntrack_buckets,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_max. Must be between 131072 and 2097152. + NetNetfilterNfConntrackMax *float64 `json:"netNetfilterNfConntrackMax,omitempty" tf:"net_netfilter_nf_conntrack_max,omitempty"` + + // The sysctl setting vm.max_map_count. Must be between 65530 and 262144. + VMMaxMapCount *float64 `json:"vmMaxMapCount,omitempty" tf:"vm_max_map_count,omitempty"` + + // The sysctl setting vm.swappiness. Must be between 0 and 100. + VMSwappiness *float64 `json:"vmSwappiness,omitempty" tf:"vm_swappiness,omitempty"` + + // The sysctl setting vm.vfs_cache_pressure. Must be between 0 and 100. + VMVfsCachePressure *float64 `json:"vmVfsCachePressure,omitempty" tf:"vm_vfs_cache_pressure,omitempty"` +} + +type SysctlConfigObservation struct { + + // The sysctl setting fs.aio-max-nr. Must be between 65536 and 6553500. + FsAioMaxNr *float64 `json:"fsAioMaxNr,omitempty" tf:"fs_aio_max_nr,omitempty"` + + // The sysctl setting fs.file-max. Must be between 8192 and 12000500. + FsFileMax *float64 `json:"fsFileMax,omitempty" tf:"fs_file_max,omitempty"` + + // The sysctl setting fs.inotify.max_user_watches. Must be between 781250 and 2097152. + FsInotifyMaxUserWatches *float64 `json:"fsInotifyMaxUserWatches,omitempty" tf:"fs_inotify_max_user_watches,omitempty"` + + // The sysctl setting fs.nr_open. Must be between 8192 and 20000500. + FsNrOpen *float64 `json:"fsNrOpen,omitempty" tf:"fs_nr_open,omitempty"` + + // The sysctl setting kernel.threads-max. Must be between 20 and 513785. + KernelThreadsMax *float64 `json:"kernelThreadsMax,omitempty" tf:"kernel_threads_max,omitempty"` + + // The sysctl setting net.core.netdev_max_backlog. Must be between 1000 and 3240000. + NetCoreNetdevMaxBacklog *float64 `json:"netCoreNetdevMaxBacklog,omitempty" tf:"net_core_netdev_max_backlog,omitempty"` + + // The sysctl setting net.core.optmem_max. Must be between 20480 and 4194304. + NetCoreOptmemMax *float64 `json:"netCoreOptmemMax,omitempty" tf:"net_core_optmem_max,omitempty"` + + // The sysctl setting net.core.rmem_default. Must be between 212992 and 134217728. + NetCoreRmemDefault *float64 `json:"netCoreRmemDefault,omitempty" tf:"net_core_rmem_default,omitempty"` + + // The sysctl setting net.core.rmem_max. Must be between 212992 and 134217728. + NetCoreRmemMax *float64 `json:"netCoreRmemMax,omitempty" tf:"net_core_rmem_max,omitempty"` + + // The sysctl setting net.core.somaxconn. Must be between 4096 and 3240000. + NetCoreSomaxconn *float64 `json:"netCoreSomaxconn,omitempty" tf:"net_core_somaxconn,omitempty"` + + // The sysctl setting net.core.wmem_default. Must be between 212992 and 134217728. + NetCoreWmemDefault *float64 `json:"netCoreWmemDefault,omitempty" tf:"net_core_wmem_default,omitempty"` + + // The sysctl setting net.core.wmem_max. Must be between 212992 and 134217728. + NetCoreWmemMax *float64 `json:"netCoreWmemMax,omitempty" tf:"net_core_wmem_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range max value. Must be between 32768 and 65535. + NetIPv4IPLocalPortRangeMax *float64 `json:"netIpv4IpLocalPortRangeMax,omitempty" tf:"net_ipv4_ip_local_port_range_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range min value. Must be between 1024 and 60999. + NetIPv4IPLocalPortRangeMin *float64 `json:"netIpv4IpLocalPortRangeMin,omitempty" tf:"net_ipv4_ip_local_port_range_min,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between 128 and 80000. + NetIPv4NeighDefaultGcThresh1 *float64 `json:"netIpv4NeighDefaultGcThresh1,omitempty" tf:"net_ipv4_neigh_default_gc_thresh1,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between 512 and 90000. + NetIPv4NeighDefaultGcThresh2 *float64 `json:"netIpv4NeighDefaultGcThresh2,omitempty" tf:"net_ipv4_neigh_default_gc_thresh2,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between 1024 and 100000. + NetIPv4NeighDefaultGcThresh3 *float64 `json:"netIpv4NeighDefaultGcThresh3,omitempty" tf:"net_ipv4_neigh_default_gc_thresh3,omitempty"` + + // The sysctl setting net.ipv4.tcp_fin_timeout. Must be between 5 and 120. + NetIPv4TCPFinTimeout *float64 `json:"netIpv4TcpFinTimeout,omitempty" tf:"net_ipv4_tcp_fin_timeout,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between 10 and 90. + NetIPv4TCPKeepaliveIntvl *float64 `json:"netIpv4TcpKeepaliveIntvl,omitempty" tf:"net_ipv4_tcp_keepalive_intvl,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between 1 and 15. + NetIPv4TCPKeepaliveProbes *float64 `json:"netIpv4TcpKeepaliveProbes,omitempty" tf:"net_ipv4_tcp_keepalive_probes,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_time. Must be between 30 and 432000. + NetIPv4TCPKeepaliveTime *float64 `json:"netIpv4TcpKeepaliveTime,omitempty" tf:"net_ipv4_tcp_keepalive_time,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between 128 and 3240000. + NetIPv4TCPMaxSynBacklog *float64 `json:"netIpv4TcpMaxSynBacklog,omitempty" tf:"net_ipv4_tcp_max_syn_backlog,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between 8000 and 1440000. + NetIPv4TCPMaxTwBuckets *float64 `json:"netIpv4TcpMaxTwBuckets,omitempty" tf:"net_ipv4_tcp_max_tw_buckets,omitempty"` + + // The sysctl setting net.ipv4.tcp_tw_reuse. + NetIPv4TCPTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty" tf:"net_ipv4_tcp_tw_reuse,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between 65536 and 524288. + NetNetfilterNfConntrackBuckets *float64 `json:"netNetfilterNfConntrackBuckets,omitempty" tf:"net_netfilter_nf_conntrack_buckets,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_max. Must be between 131072 and 2097152. + NetNetfilterNfConntrackMax *float64 `json:"netNetfilterNfConntrackMax,omitempty" tf:"net_netfilter_nf_conntrack_max,omitempty"` + + // The sysctl setting vm.max_map_count. Must be between 65530 and 262144. + VMMaxMapCount *float64 `json:"vmMaxMapCount,omitempty" tf:"vm_max_map_count,omitempty"` + + // The sysctl setting vm.swappiness. Must be between 0 and 100. + VMSwappiness *float64 `json:"vmSwappiness,omitempty" tf:"vm_swappiness,omitempty"` + + // The sysctl setting vm.vfs_cache_pressure. Must be between 0 and 100. + VMVfsCachePressure *float64 `json:"vmVfsCachePressure,omitempty" tf:"vm_vfs_cache_pressure,omitempty"` +} + +type SysctlConfigParameters struct { + + // The sysctl setting fs.aio-max-nr. Must be between 65536 and 6553500. + // +kubebuilder:validation:Optional + FsAioMaxNr *float64 `json:"fsAioMaxNr,omitempty" tf:"fs_aio_max_nr,omitempty"` + + // The sysctl setting fs.file-max. Must be between 8192 and 12000500. + // +kubebuilder:validation:Optional + FsFileMax *float64 `json:"fsFileMax,omitempty" tf:"fs_file_max,omitempty"` + + // The sysctl setting fs.inotify.max_user_watches. Must be between 781250 and 2097152. + // +kubebuilder:validation:Optional + FsInotifyMaxUserWatches *float64 `json:"fsInotifyMaxUserWatches,omitempty" tf:"fs_inotify_max_user_watches,omitempty"` + + // The sysctl setting fs.nr_open. Must be between 8192 and 20000500. + // +kubebuilder:validation:Optional + FsNrOpen *float64 `json:"fsNrOpen,omitempty" tf:"fs_nr_open,omitempty"` + + // The sysctl setting kernel.threads-max. Must be between 20 and 513785. + // +kubebuilder:validation:Optional + KernelThreadsMax *float64 `json:"kernelThreadsMax,omitempty" tf:"kernel_threads_max,omitempty"` + + // The sysctl setting net.core.netdev_max_backlog. Must be between 1000 and 3240000. + // +kubebuilder:validation:Optional + NetCoreNetdevMaxBacklog *float64 `json:"netCoreNetdevMaxBacklog,omitempty" tf:"net_core_netdev_max_backlog,omitempty"` + + // The sysctl setting net.core.optmem_max. Must be between 20480 and 4194304. + // +kubebuilder:validation:Optional + NetCoreOptmemMax *float64 `json:"netCoreOptmemMax,omitempty" tf:"net_core_optmem_max,omitempty"` + + // The sysctl setting net.core.rmem_default. Must be between 212992 and 134217728. + // +kubebuilder:validation:Optional + NetCoreRmemDefault *float64 `json:"netCoreRmemDefault,omitempty" tf:"net_core_rmem_default,omitempty"` + + // The sysctl setting net.core.rmem_max. Must be between 212992 and 134217728. + // +kubebuilder:validation:Optional + NetCoreRmemMax *float64 `json:"netCoreRmemMax,omitempty" tf:"net_core_rmem_max,omitempty"` + + // The sysctl setting net.core.somaxconn. Must be between 4096 and 3240000. + // +kubebuilder:validation:Optional + NetCoreSomaxconn *float64 `json:"netCoreSomaxconn,omitempty" tf:"net_core_somaxconn,omitempty"` + + // The sysctl setting net.core.wmem_default. Must be between 212992 and 134217728. + // +kubebuilder:validation:Optional + NetCoreWmemDefault *float64 `json:"netCoreWmemDefault,omitempty" tf:"net_core_wmem_default,omitempty"` + + // The sysctl setting net.core.wmem_max. Must be between 212992 and 134217728. + // +kubebuilder:validation:Optional + NetCoreWmemMax *float64 `json:"netCoreWmemMax,omitempty" tf:"net_core_wmem_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range max value. Must be between 32768 and 65535. + // +kubebuilder:validation:Optional + NetIPv4IPLocalPortRangeMax *float64 `json:"netIpv4IpLocalPortRangeMax,omitempty" tf:"net_ipv4_ip_local_port_range_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range min value. Must be between 1024 and 60999. + // +kubebuilder:validation:Optional + NetIPv4IPLocalPortRangeMin *float64 `json:"netIpv4IpLocalPortRangeMin,omitempty" tf:"net_ipv4_ip_local_port_range_min,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between 128 and 80000. + // +kubebuilder:validation:Optional + NetIPv4NeighDefaultGcThresh1 *float64 `json:"netIpv4NeighDefaultGcThresh1,omitempty" tf:"net_ipv4_neigh_default_gc_thresh1,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between 512 and 90000. + // +kubebuilder:validation:Optional + NetIPv4NeighDefaultGcThresh2 *float64 `json:"netIpv4NeighDefaultGcThresh2,omitempty" tf:"net_ipv4_neigh_default_gc_thresh2,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between 1024 and 100000. + // +kubebuilder:validation:Optional + NetIPv4NeighDefaultGcThresh3 *float64 `json:"netIpv4NeighDefaultGcThresh3,omitempty" tf:"net_ipv4_neigh_default_gc_thresh3,omitempty"` + + // The sysctl setting net.ipv4.tcp_fin_timeout. Must be between 5 and 120. + // +kubebuilder:validation:Optional + NetIPv4TCPFinTimeout *float64 `json:"netIpv4TcpFinTimeout,omitempty" tf:"net_ipv4_tcp_fin_timeout,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between 10 and 90. + // +kubebuilder:validation:Optional + NetIPv4TCPKeepaliveIntvl *float64 `json:"netIpv4TcpKeepaliveIntvl,omitempty" tf:"net_ipv4_tcp_keepalive_intvl,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between 1 and 15. + // +kubebuilder:validation:Optional + NetIPv4TCPKeepaliveProbes *float64 `json:"netIpv4TcpKeepaliveProbes,omitempty" tf:"net_ipv4_tcp_keepalive_probes,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_time. Must be between 30 and 432000. + // +kubebuilder:validation:Optional + NetIPv4TCPKeepaliveTime *float64 `json:"netIpv4TcpKeepaliveTime,omitempty" tf:"net_ipv4_tcp_keepalive_time,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between 128 and 3240000. + // +kubebuilder:validation:Optional + NetIPv4TCPMaxSynBacklog *float64 `json:"netIpv4TcpMaxSynBacklog,omitempty" tf:"net_ipv4_tcp_max_syn_backlog,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between 8000 and 1440000. + // +kubebuilder:validation:Optional + NetIPv4TCPMaxTwBuckets *float64 `json:"netIpv4TcpMaxTwBuckets,omitempty" tf:"net_ipv4_tcp_max_tw_buckets,omitempty"` + + // The sysctl setting net.ipv4.tcp_tw_reuse. + // +kubebuilder:validation:Optional + NetIPv4TCPTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty" tf:"net_ipv4_tcp_tw_reuse,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between 65536 and 524288. + // +kubebuilder:validation:Optional + NetNetfilterNfConntrackBuckets *float64 `json:"netNetfilterNfConntrackBuckets,omitempty" tf:"net_netfilter_nf_conntrack_buckets,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_max. Must be between 131072 and 2097152. + // +kubebuilder:validation:Optional + NetNetfilterNfConntrackMax *float64 `json:"netNetfilterNfConntrackMax,omitempty" tf:"net_netfilter_nf_conntrack_max,omitempty"` + + // The sysctl setting vm.max_map_count. Must be between 65530 and 262144. + // +kubebuilder:validation:Optional + VMMaxMapCount *float64 `json:"vmMaxMapCount,omitempty" tf:"vm_max_map_count,omitempty"` + + // The sysctl setting vm.swappiness. Must be between 0 and 100. + // +kubebuilder:validation:Optional + VMSwappiness *float64 `json:"vmSwappiness,omitempty" tf:"vm_swappiness,omitempty"` + + // The sysctl setting vm.vfs_cache_pressure. Must be between 0 and 100. + // +kubebuilder:validation:Optional + VMVfsCachePressure *float64 `json:"vmVfsCachePressure,omitempty" tf:"vm_vfs_cache_pressure,omitempty"` +} + +type UpgradeSettingsInitParameters struct { + + // The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. + MaxSurge *string `json:"maxSurge,omitempty" tf:"max_surge,omitempty"` +} + +type UpgradeSettingsObservation struct { + + // The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. + MaxSurge *string `json:"maxSurge,omitempty" tf:"max_surge,omitempty"` +} + +type UpgradeSettingsParameters struct { + + // The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. + // +kubebuilder:validation:Optional + MaxSurge *string `json:"maxSurge" tf:"max_surge,omitempty"` +} + +type WebAppRoutingIdentityInitParameters struct { +} + +type WebAppRoutingIdentityObservation struct { + + // The Client ID of the user-defined Managed Identity used for Web App Routing. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The Object ID of the user-defined Managed Identity used for Web App Routing + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The ID of the User Assigned Identity used for Web App Routing. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type WebAppRoutingIdentityParameters struct { +} + +type WebAppRoutingInitParameters struct { + + // Specifies the ID of the DNS Zone in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. For Bring-Your-Own DNS zones this property should be set to an empty string "". + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` +} + +type WebAppRoutingObservation struct { + + // Specifies the ID of the DNS Zone in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. For Bring-Your-Own DNS zones this property should be set to an empty string "". + DNSZoneID *string `json:"dnsZoneId,omitempty" tf:"dns_zone_id,omitempty"` + + // A web_app_routing_identity block is exported. The exported attributes are defined below. + WebAppRoutingIdentity []WebAppRoutingIdentityObservation `json:"webAppRoutingIdentity,omitempty" tf:"web_app_routing_identity,omitempty"` +} + +type WebAppRoutingParameters struct { + + // Specifies the ID of the DNS Zone in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. For Bring-Your-Own DNS zones this property should be set to an empty string "". + // +kubebuilder:validation:Optional + DNSZoneID *string `json:"dnsZoneId" tf:"dns_zone_id,omitempty"` +} + +type WindowsProfileInitParameters struct { + + // The Admin Username for Windows VMs. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // A gmsa block as defined below. + Gmsa *GmsaInitParameters `json:"gmsa,omitempty" tf:"gmsa,omitempty"` + + // Specifies the type of on-premise license which should be used for Node Pool Windows Virtual Machine. At this time the only possible value is Windows_Server. + License *string `json:"license,omitempty" tf:"license,omitempty"` +} + +type WindowsProfileObservation struct { + + // The Admin Username for Windows VMs. Changing this forces a new resource to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // A gmsa block as defined below. + Gmsa *GmsaObservation `json:"gmsa,omitempty" tf:"gmsa,omitempty"` + + // Specifies the type of on-premise license which should be used for Node Pool Windows Virtual Machine. At this time the only possible value is Windows_Server. + License *string `json:"license,omitempty" tf:"license,omitempty"` +} + +type WindowsProfileParameters struct { + + // The Admin Password for Windows VMs. Length must be between 14 and 123 characters. + // +kubebuilder:validation:Optional + AdminPasswordSecretRef *v1.SecretKeySelector `json:"adminPasswordSecretRef,omitempty" tf:"-"` + + // The Admin Username for Windows VMs. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername" tf:"admin_username,omitempty"` + + // A gmsa block as defined below. + // +kubebuilder:validation:Optional + Gmsa *GmsaParameters `json:"gmsa,omitempty" tf:"gmsa,omitempty"` + + // Specifies the type of on-premise license which should be used for Node Pool Windows Virtual Machine. At this time the only possible value is Windows_Server. + // +kubebuilder:validation:Optional + License *string `json:"license,omitempty" tf:"license,omitempty"` +} + +type WorkloadAutoscalerProfileInitParameters struct { + + // Specifies whether KEDA Autoscaler can be used for workloads. + KedaEnabled *bool `json:"kedaEnabled,omitempty" tf:"keda_enabled,omitempty"` + + // Specifies whether Vertical Pod Autoscaler should be enabled. + VerticalPodAutoscalerEnabled *bool `json:"verticalPodAutoscalerEnabled,omitempty" tf:"vertical_pod_autoscaler_enabled,omitempty"` +} + +type WorkloadAutoscalerProfileObservation struct { + + // Specifies whether KEDA Autoscaler can be used for workloads. + KedaEnabled *bool `json:"kedaEnabled,omitempty" tf:"keda_enabled,omitempty"` + + // Which resources values should be controlled. + VerticalPodAutoscalerControlledValues *string `json:"verticalPodAutoscalerControlledValues,omitempty" tf:"vertical_pod_autoscaler_controlled_values,omitempty"` + + // Specifies whether Vertical Pod Autoscaler should be enabled. + VerticalPodAutoscalerEnabled *bool `json:"verticalPodAutoscalerEnabled,omitempty" tf:"vertical_pod_autoscaler_enabled,omitempty"` + + // How the autoscaler applies changes to pod resources. + VerticalPodAutoscalerUpdateMode *string `json:"verticalPodAutoscalerUpdateMode,omitempty" tf:"vertical_pod_autoscaler_update_mode,omitempty"` +} + +type WorkloadAutoscalerProfileParameters struct { + + // Specifies whether KEDA Autoscaler can be used for workloads. + // +kubebuilder:validation:Optional + KedaEnabled *bool `json:"kedaEnabled,omitempty" tf:"keda_enabled,omitempty"` + + // Specifies whether Vertical Pod Autoscaler should be enabled. + // +kubebuilder:validation:Optional + VerticalPodAutoscalerEnabled *bool `json:"verticalPodAutoscalerEnabled,omitempty" tf:"vertical_pod_autoscaler_enabled,omitempty"` +} + +// KubernetesClusterSpec defines the desired state of KubernetesCluster +type KubernetesClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider KubernetesClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider KubernetesClusterInitParameters `json:"initProvider,omitempty"` +} + +// KubernetesClusterStatus defines the observed state of KubernetesCluster. +type KubernetesClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider KubernetesClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// KubernetesCluster is the Schema for the KubernetesClusters API. Manages a managed Kubernetes Cluster (also known as AKS / Azure Kubernetes Service) +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type KubernetesCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultNodePool) || (has(self.initProvider) && has(self.initProvider.defaultNodePool))",message="spec.forProvider.defaultNodePool is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec KubernetesClusterSpec `json:"spec"` + Status KubernetesClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// KubernetesClusterList contains a list of KubernetesClusters +type KubernetesClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KubernetesCluster `json:"items"` +} + +// Repository type metadata. +var ( + KubernetesCluster_Kind = "KubernetesCluster" + KubernetesCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: KubernetesCluster_Kind}.String() + KubernetesCluster_KindAPIVersion = KubernetesCluster_Kind + "." + CRDGroupVersion.String() + KubernetesCluster_GroupVersionKind = CRDGroupVersion.WithKind(KubernetesCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&KubernetesCluster{}, &KubernetesClusterList{}) +} diff --git a/apis/containerservice/v1beta2/zz_kubernetesclusternodepool_terraformed.go b/apis/containerservice/v1beta2/zz_kubernetesclusternodepool_terraformed.go new file mode 100755 index 000000000..67b413ea1 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_kubernetesclusternodepool_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this KubernetesClusterNodePool +func (mg *KubernetesClusterNodePool) GetTerraformResourceType() string { + return "azurerm_kubernetes_cluster_node_pool" +} + +// GetConnectionDetailsMapping for this KubernetesClusterNodePool +func (tr *KubernetesClusterNodePool) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this KubernetesClusterNodePool +func (tr *KubernetesClusterNodePool) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KubernetesClusterNodePool +func (tr *KubernetesClusterNodePool) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KubernetesClusterNodePool +func (tr *KubernetesClusterNodePool) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KubernetesClusterNodePool +func (tr *KubernetesClusterNodePool) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KubernetesClusterNodePool +func (tr *KubernetesClusterNodePool) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KubernetesClusterNodePool +func (tr *KubernetesClusterNodePool) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this KubernetesClusterNodePool +func (tr *KubernetesClusterNodePool) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this KubernetesClusterNodePool using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KubernetesClusterNodePool) LateInitialize(attrs []byte) (bool, error) { + params := &KubernetesClusterNodePoolParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KubernetesClusterNodePool) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/containerservice/v1beta2/zz_kubernetesclusternodepool_types.go b/apis/containerservice/v1beta2/zz_kubernetesclusternodepool_types.go new file mode 100755 index 000000000..585d4abf3 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_kubernetesclusternodepool_types.go @@ -0,0 +1,1142 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type KubernetesClusterNodePoolInitParameters struct { + + // Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies whether to trust a Custom CA. + CustomCATrustEnabled *bool `json:"customCaTrustEnabled,omitempty" tf:"custom_ca_trust_enabled,omitempty"` + + // Whether to enable auto-scaler. + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty" tf:"enable_auto_scaling,omitempty"` + + // Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. + EnableHostEncryption *bool `json:"enableHostEncryption,omitempty" tf:"enable_host_encryption,omitempty"` + + // Should each node have a Public IP Address? Changing this forces a new resource to be created. + EnableNodePublicIP *bool `json:"enableNodePublicIp,omitempty" tf:"enable_node_public_ip,omitempty"` + + // The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g and MIG7g. Changing this forces a new resource to be created. + GpuInstance *string `json:"gpuInstance,omitempty" tf:"gpu_instance,omitempty"` + + // The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // A kubelet_config block as defined below. Changing this forces a new resource to be created. + KubeletConfig *KubernetesClusterNodePoolKubeletConfigInitParameters `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + + // The type of disk used by kubelet. Possible values are OS and Temporary. + KubeletDiskType *string `json:"kubeletDiskType,omitempty" tf:"kubelet_disk_type,omitempty"` + + // A linux_os_config block as defined below. Changing this forces a new resource to be created. + LinuxOsConfig *KubernetesClusterNodePoolLinuxOsConfigInitParameters `json:"linuxOsConfig,omitempty" tf:"linux_os_config,omitempty"` + + // The maximum number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 and must be greater than or equal to min_count. + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. + MaxPods *float64 `json:"maxPods,omitempty" tf:"max_pods,omitempty"` + + // A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + MessageOfTheDay *string `json:"messageOfTheDay,omitempty" tf:"message_of_the_day,omitempty"` + + // The minimum number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 and must be less than or equal to max_count. + MinCount *float64 `json:"minCount,omitempty" tf:"min_count,omitempty"` + + // Should this Node Pool be used for System or User resources? Possible values are System and User. Defaults to User. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The initial number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 (inclusive) for user pools and between 1 and 1000 (inclusive) for system pools and must be a value in the range min_count - max_count. + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // A map of Kubernetes labels which should be applied to nodes in this Node Pool. + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A node_network_profile block as documented below. + NodeNetworkProfile *KubernetesClusterNodePoolNodeNetworkProfileInitParameters `json:"nodeNetworkProfile,omitempty" tf:"node_network_profile,omitempty"` + + // Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. enable_node_public_ip should be true. Changing this forces a new resource to be created. + NodePublicIPPrefixID *string `json:"nodePublicIpPrefixId,omitempty" tf:"node_public_ip_prefix_id,omitempty"` + + // A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g key=value:NoSchedule). + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + OrchestratorVersion *string `json:"orchestratorVersion,omitempty" tf:"orchestrator_version,omitempty"` + + // The Agent Operating System disk size in GB. Changing this forces a new resource to be created. + OsDiskSizeGb *float64 `json:"osDiskSizeGb,omitempty" tf:"os_disk_size_gb,omitempty"` + + // The type of disk which should be used for the Operating System. Possible values are Ephemeral and Managed. Defaults to Managed. Changing this forces a new resource to be created. + OsDiskType *string `json:"osDiskType,omitempty" tf:"os_disk_type,omitempty"` + + // Specifies the OS SKU used by the agent pool. Possible values are AzureLinux, Ubuntu, Windows2019 and Windows2022. If not specified, the default is Ubuntu if OSType=Linux or Windows2019 if OSType=Windows. And the default Windows OSSKU will be changed to Windows2022 after Windows2019 is deprecated. Changing this forces a new resource to be created. + OsSku *string `json:"osSku,omitempty" tf:"os_sku,omitempty"` + + // The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are Linux and Windows. Defaults to Linux. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + PodSubnetID *string `json:"podSubnetId,omitempty" tf:"pod_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate podSubnetId. + // +kubebuilder:validation:Optional + PodSubnetIDRef *v1.Reference `json:"podSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate podSubnetId. + // +kubebuilder:validation:Optional + PodSubnetIDSelector *v1.Selector `json:"podSubnetIdSelector,omitempty" tf:"-"` + + // The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies how the node pool should deal with scaled-down nodes. Allowed values are Delete and Deallocate. Defaults to Delete. + ScaleDownMode *string `json:"scaleDownMode,omitempty" tf:"scale_down_mode,omitempty"` + + // The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. + SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty" tf:"spot_max_price,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to false. See the documentation for more information. Changing this forces a new resource to be created. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` + + // A upgrade_settings block as documented below. + UpgradeSettings *KubernetesClusterNodePoolUpgradeSettingsInitParameters `json:"upgradeSettings,omitempty" tf:"upgrade_settings,omitempty"` + + // The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VnetSubnetID *string `json:"vnetSubnetId,omitempty" tf:"vnet_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate vnetSubnetId. + // +kubebuilder:validation:Optional + VnetSubnetIDRef *v1.Reference `json:"vnetSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate vnetSubnetId. + // +kubebuilder:validation:Optional + VnetSubnetIDSelector *v1.Selector `json:"vnetSubnetIdSelector,omitempty" tf:"-"` + + // A windows_profile block as documented below. Changing this forces a new resource to be created. + WindowsProfile *KubernetesClusterNodePoolWindowsProfileInitParameters `json:"windowsProfile,omitempty" tf:"windows_profile,omitempty"` + + // Used to specify the workload runtime. Allowed values are OCIContainer, WasmWasi and KataMshvVmIsolation. + WorkloadRuntime *string `json:"workloadRuntime,omitempty" tf:"workload_runtime,omitempty"` + + // Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type KubernetesClusterNodePoolKubeletConfigInitParameters struct { + + // Specifies the allow list of unsafe sysctls command or patterns (ending in *). Changing this forces a new resource to be created. + // +listType=set + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + CPUCfsQuotaEnabled *bool `json:"cpuCfsQuotaEnabled,omitempty" tf:"cpu_cfs_quota_enabled,omitempty"` + + // Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // Specifies the CPU Manager policy to use. Possible values are none and static, Changing this forces a new resource to be created. + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + ContainerLogMaxLine *float64 `json:"containerLogMaxLine,omitempty" tf:"container_log_max_line,omitempty"` + + // Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + ContainerLogMaxSizeMb *float64 `json:"containerLogMaxSizeMb,omitempty" tf:"container_log_max_size_mb,omitempty"` + + // Specifies the percent of disk usage above which image garbage collection is always run. Must be between 0 and 100. Changing this forces a new resource to be created. + ImageGcHighThreshold *float64 `json:"imageGcHighThreshold,omitempty" tf:"image_gc_high_threshold,omitempty"` + + // Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between 0 and 100. Changing this forces a new resource to be created. + ImageGcLowThreshold *float64 `json:"imageGcLowThreshold,omitempty" tf:"image_gc_low_threshold,omitempty"` + + // Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + PodMaxPid *float64 `json:"podMaxPid,omitempty" tf:"pod_max_pid,omitempty"` + + // Specifies the Topology Manager policy to use. Possible values are none, best-effort, restricted or single-numa-node. Changing this forces a new resource to be created. + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty" tf:"topology_manager_policy,omitempty"` +} + +type KubernetesClusterNodePoolKubeletConfigObservation struct { + + // Specifies the allow list of unsafe sysctls command or patterns (ending in *). Changing this forces a new resource to be created. + // +listType=set + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + CPUCfsQuotaEnabled *bool `json:"cpuCfsQuotaEnabled,omitempty" tf:"cpu_cfs_quota_enabled,omitempty"` + + // Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // Specifies the CPU Manager policy to use. Possible values are none and static, Changing this forces a new resource to be created. + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + ContainerLogMaxLine *float64 `json:"containerLogMaxLine,omitempty" tf:"container_log_max_line,omitempty"` + + // Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + ContainerLogMaxSizeMb *float64 `json:"containerLogMaxSizeMb,omitempty" tf:"container_log_max_size_mb,omitempty"` + + // Specifies the percent of disk usage above which image garbage collection is always run. Must be between 0 and 100. Changing this forces a new resource to be created. + ImageGcHighThreshold *float64 `json:"imageGcHighThreshold,omitempty" tf:"image_gc_high_threshold,omitempty"` + + // Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between 0 and 100. Changing this forces a new resource to be created. + ImageGcLowThreshold *float64 `json:"imageGcLowThreshold,omitempty" tf:"image_gc_low_threshold,omitempty"` + + // Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + PodMaxPid *float64 `json:"podMaxPid,omitempty" tf:"pod_max_pid,omitempty"` + + // Specifies the Topology Manager policy to use. Possible values are none, best-effort, restricted or single-numa-node. Changing this forces a new resource to be created. + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty" tf:"topology_manager_policy,omitempty"` +} + +type KubernetesClusterNodePoolKubeletConfigParameters struct { + + // Specifies the allow list of unsafe sysctls command or patterns (ending in *). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + AllowedUnsafeSysctls []*string `json:"allowedUnsafeSysctls,omitempty" tf:"allowed_unsafe_sysctls,omitempty"` + + // Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CPUCfsQuotaEnabled *bool `json:"cpuCfsQuotaEnabled,omitempty" tf:"cpu_cfs_quota_enabled,omitempty"` + + // Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty" tf:"cpu_cfs_quota_period,omitempty"` + + // Specifies the CPU Manager policy to use. Possible values are none and static, Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" tf:"cpu_manager_policy,omitempty"` + + // Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ContainerLogMaxLine *float64 `json:"containerLogMaxLine,omitempty" tf:"container_log_max_line,omitempty"` + + // Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ContainerLogMaxSizeMb *float64 `json:"containerLogMaxSizeMb,omitempty" tf:"container_log_max_size_mb,omitempty"` + + // Specifies the percent of disk usage above which image garbage collection is always run. Must be between 0 and 100. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ImageGcHighThreshold *float64 `json:"imageGcHighThreshold,omitempty" tf:"image_gc_high_threshold,omitempty"` + + // Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between 0 and 100. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ImageGcLowThreshold *float64 `json:"imageGcLowThreshold,omitempty" tf:"image_gc_low_threshold,omitempty"` + + // Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PodMaxPid *float64 `json:"podMaxPid,omitempty" tf:"pod_max_pid,omitempty"` + + // Specifies the Topology Manager policy to use. Possible values are none, best-effort, restricted or single-numa-node. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty" tf:"topology_manager_policy,omitempty"` +} + +type KubernetesClusterNodePoolLinuxOsConfigInitParameters struct { + + // Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. + SwapFileSizeMb *float64 `json:"swapFileSizeMb,omitempty" tf:"swap_file_size_mb,omitempty"` + + // A sysctl_config block as defined below. Changing this forces a new resource to be created. + SysctlConfig *LinuxOsConfigSysctlConfigInitParameters `json:"sysctlConfig,omitempty" tf:"sysctl_config,omitempty"` + + // specifies the defrag configuration for Transparent Huge Page. Possible values are always, defer, defer+madvise, madvise and never. Changing this forces a new resource to be created. + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty" tf:"transparent_huge_page_defrag,omitempty"` + + // Specifies the Transparent Huge Page enabled configuration. Possible values are always, madvise and never. Changing this forces a new resource to be created. + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty" tf:"transparent_huge_page_enabled,omitempty"` +} + +type KubernetesClusterNodePoolLinuxOsConfigObservation struct { + + // Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. + SwapFileSizeMb *float64 `json:"swapFileSizeMb,omitempty" tf:"swap_file_size_mb,omitempty"` + + // A sysctl_config block as defined below. Changing this forces a new resource to be created. + SysctlConfig *LinuxOsConfigSysctlConfigObservation `json:"sysctlConfig,omitempty" tf:"sysctl_config,omitempty"` + + // specifies the defrag configuration for Transparent Huge Page. Possible values are always, defer, defer+madvise, madvise and never. Changing this forces a new resource to be created. + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty" tf:"transparent_huge_page_defrag,omitempty"` + + // Specifies the Transparent Huge Page enabled configuration. Possible values are always, madvise and never. Changing this forces a new resource to be created. + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty" tf:"transparent_huge_page_enabled,omitempty"` +} + +type KubernetesClusterNodePoolLinuxOsConfigParameters struct { + + // Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SwapFileSizeMb *float64 `json:"swapFileSizeMb,omitempty" tf:"swap_file_size_mb,omitempty"` + + // A sysctl_config block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SysctlConfig *LinuxOsConfigSysctlConfigParameters `json:"sysctlConfig,omitempty" tf:"sysctl_config,omitempty"` + + // specifies the defrag configuration for Transparent Huge Page. Possible values are always, defer, defer+madvise, madvise and never. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty" tf:"transparent_huge_page_defrag,omitempty"` + + // Specifies the Transparent Huge Page enabled configuration. Possible values are always, madvise and never. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty" tf:"transparent_huge_page_enabled,omitempty"` +} + +type KubernetesClusterNodePoolNodeNetworkProfileInitParameters struct { + + // One or more allowed_host_ports blocks as defined below. + AllowedHostPorts []NodeNetworkProfileAllowedHostPortsInitParameters `json:"allowedHostPorts,omitempty" tf:"allowed_host_ports,omitempty"` + + // A list of Application Security Group IDs which should be associated with this Node Pool. + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + // +mapType=granular + NodePublicIPTags map[string]*string `json:"nodePublicIpTags,omitempty" tf:"node_public_ip_tags,omitempty"` +} + +type KubernetesClusterNodePoolNodeNetworkProfileObservation struct { + + // One or more allowed_host_ports blocks as defined below. + AllowedHostPorts []NodeNetworkProfileAllowedHostPortsObservation `json:"allowedHostPorts,omitempty" tf:"allowed_host_ports,omitempty"` + + // A list of Application Security Group IDs which should be associated with this Node Pool. + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + // +mapType=granular + NodePublicIPTags map[string]*string `json:"nodePublicIpTags,omitempty" tf:"node_public_ip_tags,omitempty"` +} + +type KubernetesClusterNodePoolNodeNetworkProfileParameters struct { + + // One or more allowed_host_ports blocks as defined below. + // +kubebuilder:validation:Optional + AllowedHostPorts []NodeNetworkProfileAllowedHostPortsParameters `json:"allowedHostPorts,omitempty" tf:"allowed_host_ports,omitempty"` + + // A list of Application Security Group IDs which should be associated with this Node Pool. + // +kubebuilder:validation:Optional + ApplicationSecurityGroupIds []*string `json:"applicationSecurityGroupIds,omitempty" tf:"application_security_group_ids,omitempty"` + + // Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +mapType=granular + NodePublicIPTags map[string]*string `json:"nodePublicIpTags,omitempty" tf:"node_public_ip_tags,omitempty"` +} + +type KubernetesClusterNodePoolObservation struct { + + // Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies whether to trust a Custom CA. + CustomCATrustEnabled *bool `json:"customCaTrustEnabled,omitempty" tf:"custom_ca_trust_enabled,omitempty"` + + // Whether to enable auto-scaler. + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty" tf:"enable_auto_scaling,omitempty"` + + // Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. + EnableHostEncryption *bool `json:"enableHostEncryption,omitempty" tf:"enable_host_encryption,omitempty"` + + // Should each node have a Public IP Address? Changing this forces a new resource to be created. + EnableNodePublicIP *bool `json:"enableNodePublicIp,omitempty" tf:"enable_node_public_ip,omitempty"` + + // The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g and MIG7g. Changing this forces a new resource to be created. + GpuInstance *string `json:"gpuInstance,omitempty" tf:"gpu_instance,omitempty"` + + // The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // The ID of the Kubernetes Cluster Node Pool. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A kubelet_config block as defined below. Changing this forces a new resource to be created. + KubeletConfig *KubernetesClusterNodePoolKubeletConfigObservation `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + + // The type of disk used by kubelet. Possible values are OS and Temporary. + KubeletDiskType *string `json:"kubeletDiskType,omitempty" tf:"kubelet_disk_type,omitempty"` + + // The ID of the Kubernetes Cluster where this Node Pool should exist. Changing this forces a new resource to be created. + KubernetesClusterID *string `json:"kubernetesClusterId,omitempty" tf:"kubernetes_cluster_id,omitempty"` + + // A linux_os_config block as defined below. Changing this forces a new resource to be created. + LinuxOsConfig *KubernetesClusterNodePoolLinuxOsConfigObservation `json:"linuxOsConfig,omitempty" tf:"linux_os_config,omitempty"` + + // The maximum number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 and must be greater than or equal to min_count. + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. + MaxPods *float64 `json:"maxPods,omitempty" tf:"max_pods,omitempty"` + + // A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + MessageOfTheDay *string `json:"messageOfTheDay,omitempty" tf:"message_of_the_day,omitempty"` + + // The minimum number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 and must be less than or equal to max_count. + MinCount *float64 `json:"minCount,omitempty" tf:"min_count,omitempty"` + + // Should this Node Pool be used for System or User resources? Possible values are System and User. Defaults to User. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The initial number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 (inclusive) for user pools and between 1 and 1000 (inclusive) for system pools and must be a value in the range min_count - max_count. + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // A map of Kubernetes labels which should be applied to nodes in this Node Pool. + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A node_network_profile block as documented below. + NodeNetworkProfile *KubernetesClusterNodePoolNodeNetworkProfileObservation `json:"nodeNetworkProfile,omitempty" tf:"node_network_profile,omitempty"` + + // Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. enable_node_public_ip should be true. Changing this forces a new resource to be created. + NodePublicIPPrefixID *string `json:"nodePublicIpPrefixId,omitempty" tf:"node_public_ip_prefix_id,omitempty"` + + // A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g key=value:NoSchedule). + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + OrchestratorVersion *string `json:"orchestratorVersion,omitempty" tf:"orchestrator_version,omitempty"` + + // The Agent Operating System disk size in GB. Changing this forces a new resource to be created. + OsDiskSizeGb *float64 `json:"osDiskSizeGb,omitempty" tf:"os_disk_size_gb,omitempty"` + + // The type of disk which should be used for the Operating System. Possible values are Ephemeral and Managed. Defaults to Managed. Changing this forces a new resource to be created. + OsDiskType *string `json:"osDiskType,omitempty" tf:"os_disk_type,omitempty"` + + // Specifies the OS SKU used by the agent pool. Possible values are AzureLinux, Ubuntu, Windows2019 and Windows2022. If not specified, the default is Ubuntu if OSType=Linux or Windows2019 if OSType=Windows. And the default Windows OSSKU will be changed to Windows2022 after Windows2019 is deprecated. Changing this forces a new resource to be created. + OsSku *string `json:"osSku,omitempty" tf:"os_sku,omitempty"` + + // The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are Linux and Windows. Defaults to Linux. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. + PodSubnetID *string `json:"podSubnetId,omitempty" tf:"pod_subnet_id,omitempty"` + + // The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies how the node pool should deal with scaled-down nodes. Allowed values are Delete and Deallocate. Defaults to Delete. + ScaleDownMode *string `json:"scaleDownMode,omitempty" tf:"scale_down_mode,omitempty"` + + // The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. + SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty" tf:"spot_max_price,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to false. See the documentation for more information. Changing this forces a new resource to be created. + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` + + // A upgrade_settings block as documented below. + UpgradeSettings *KubernetesClusterNodePoolUpgradeSettingsObservation `json:"upgradeSettings,omitempty" tf:"upgrade_settings,omitempty"` + + // The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. + VnetSubnetID *string `json:"vnetSubnetId,omitempty" tf:"vnet_subnet_id,omitempty"` + + // A windows_profile block as documented below. Changing this forces a new resource to be created. + WindowsProfile *KubernetesClusterNodePoolWindowsProfileObservation `json:"windowsProfile,omitempty" tf:"windows_profile,omitempty"` + + // Used to specify the workload runtime. Allowed values are OCIContainer, WasmWasi and KataMshvVmIsolation. + WorkloadRuntime *string `json:"workloadRuntime,omitempty" tf:"workload_runtime,omitempty"` + + // Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type KubernetesClusterNodePoolParameters struct { + + // Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CapacityReservationGroupID *string `json:"capacityReservationGroupId,omitempty" tf:"capacity_reservation_group_id,omitempty"` + + // Specifies whether to trust a Custom CA. + // +kubebuilder:validation:Optional + CustomCATrustEnabled *bool `json:"customCaTrustEnabled,omitempty" tf:"custom_ca_trust_enabled,omitempty"` + + // Whether to enable auto-scaler. + // +kubebuilder:validation:Optional + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty" tf:"enable_auto_scaling,omitempty"` + + // Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EnableHostEncryption *bool `json:"enableHostEncryption,omitempty" tf:"enable_host_encryption,omitempty"` + + // Should each node have a Public IP Address? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EnableNodePublicIP *bool `json:"enableNodePublicIp,omitempty" tf:"enable_node_public_ip,omitempty"` + + // The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are Deallocate and Delete. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EvictionPolicy *string `json:"evictionPolicy,omitempty" tf:"eviction_policy,omitempty"` + + // Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g and MIG7g. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + GpuInstance *string `json:"gpuInstance,omitempty" tf:"gpu_instance,omitempty"` + + // The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HostGroupID *string `json:"hostGroupId,omitempty" tf:"host_group_id,omitempty"` + + // A kubelet_config block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + KubeletConfig *KubernetesClusterNodePoolKubeletConfigParameters `json:"kubeletConfig,omitempty" tf:"kubelet_config,omitempty"` + + // The type of disk used by kubelet. Possible values are OS and Temporary. + // +kubebuilder:validation:Optional + KubeletDiskType *string `json:"kubeletDiskType,omitempty" tf:"kubelet_disk_type,omitempty"` + + // The ID of the Kubernetes Cluster where this Node Pool should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/containerservice/v1beta2.KubernetesCluster + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + KubernetesClusterID *string `json:"kubernetesClusterId,omitempty" tf:"kubernetes_cluster_id,omitempty"` + + // Reference to a KubernetesCluster in containerservice to populate kubernetesClusterId. + // +kubebuilder:validation:Optional + KubernetesClusterIDRef *v1.Reference `json:"kubernetesClusterIdRef,omitempty" tf:"-"` + + // Selector for a KubernetesCluster in containerservice to populate kubernetesClusterId. + // +kubebuilder:validation:Optional + KubernetesClusterIDSelector *v1.Selector `json:"kubernetesClusterIdSelector,omitempty" tf:"-"` + + // A linux_os_config block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LinuxOsConfig *KubernetesClusterNodePoolLinuxOsConfigParameters `json:"linuxOsConfig,omitempty" tf:"linux_os_config,omitempty"` + + // The maximum number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 and must be greater than or equal to min_count. + // +kubebuilder:validation:Optional + MaxCount *float64 `json:"maxCount,omitempty" tf:"max_count,omitempty"` + + // The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MaxPods *float64 `json:"maxPods,omitempty" tf:"max_pods,omitempty"` + + // A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MessageOfTheDay *string `json:"messageOfTheDay,omitempty" tf:"message_of_the_day,omitempty"` + + // The minimum number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 and must be less than or equal to max_count. + // +kubebuilder:validation:Optional + MinCount *float64 `json:"minCount,omitempty" tf:"min_count,omitempty"` + + // Should this Node Pool be used for System or User resources? Possible values are System and User. Defaults to User. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The initial number of nodes which should exist within this Node Pool. Valid values are between 0 and 1000 (inclusive) for user pools and between 1 and 1000 (inclusive) for system pools and must be a value in the range min_count - max_count. + // +kubebuilder:validation:Optional + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // A map of Kubernetes labels which should be applied to nodes in this Node Pool. + // +kubebuilder:validation:Optional + // +mapType=granular + NodeLabels map[string]*string `json:"nodeLabels,omitempty" tf:"node_labels,omitempty"` + + // A node_network_profile block as documented below. + // +kubebuilder:validation:Optional + NodeNetworkProfile *KubernetesClusterNodePoolNodeNetworkProfileParameters `json:"nodeNetworkProfile,omitempty" tf:"node_network_profile,omitempty"` + + // Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. enable_node_public_ip should be true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NodePublicIPPrefixID *string `json:"nodePublicIpPrefixId,omitempty" tf:"node_public_ip_prefix_id,omitempty"` + + // A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g key=value:NoSchedule). + // +kubebuilder:validation:Optional + NodeTaints []*string `json:"nodeTaints,omitempty" tf:"node_taints,omitempty"` + + // Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as 1.22 are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in the documentation. + // +kubebuilder:validation:Optional + OrchestratorVersion *string `json:"orchestratorVersion,omitempty" tf:"orchestrator_version,omitempty"` + + // The Agent Operating System disk size in GB. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + OsDiskSizeGb *float64 `json:"osDiskSizeGb,omitempty" tf:"os_disk_size_gb,omitempty"` + + // The type of disk which should be used for the Operating System. Possible values are Ephemeral and Managed. Defaults to Managed. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + OsDiskType *string `json:"osDiskType,omitempty" tf:"os_disk_type,omitempty"` + + // Specifies the OS SKU used by the agent pool. Possible values are AzureLinux, Ubuntu, Windows2019 and Windows2022. If not specified, the default is Ubuntu if OSType=Linux or Windows2019 if OSType=Windows. And the default Windows OSSKU will be changed to Windows2022 after Windows2019 is deprecated. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + OsSku *string `json:"osSku,omitempty" tf:"os_sku,omitempty"` + + // The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are Linux and Windows. Defaults to Linux. + // +kubebuilder:validation:Optional + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + PodSubnetID *string `json:"podSubnetId,omitempty" tf:"pod_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate podSubnetId. + // +kubebuilder:validation:Optional + PodSubnetIDRef *v1.Reference `json:"podSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate podSubnetId. + // +kubebuilder:validation:Optional + PodSubnetIDSelector *v1.Selector `json:"podSubnetIdSelector,omitempty" tf:"-"` + + // The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are Regular and Spot. Defaults to Regular. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ProximityPlacementGroupID *string `json:"proximityPlacementGroupId,omitempty" tf:"proximity_placement_group_id,omitempty"` + + // Specifies how the node pool should deal with scaled-down nodes. Allowed values are Delete and Deallocate. Defaults to Delete. + // +kubebuilder:validation:Optional + ScaleDownMode *string `json:"scaleDownMode,omitempty" tf:"scale_down_mode,omitempty"` + + // The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SnapshotID *string `json:"snapshotId,omitempty" tf:"snapshot_id,omitempty"` + + // The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty" tf:"spot_max_price,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to false. See the documentation for more information. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UltraSsdEnabled *bool `json:"ultraSsdEnabled,omitempty" tf:"ultra_ssd_enabled,omitempty"` + + // A upgrade_settings block as documented below. + // +kubebuilder:validation:Optional + UpgradeSettings *KubernetesClusterNodePoolUpgradeSettingsParameters `json:"upgradeSettings,omitempty" tf:"upgrade_settings,omitempty"` + + // The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VnetSubnetID *string `json:"vnetSubnetId,omitempty" tf:"vnet_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate vnetSubnetId. + // +kubebuilder:validation:Optional + VnetSubnetIDRef *v1.Reference `json:"vnetSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate vnetSubnetId. + // +kubebuilder:validation:Optional + VnetSubnetIDSelector *v1.Selector `json:"vnetSubnetIdSelector,omitempty" tf:"-"` + + // A windows_profile block as documented below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + WindowsProfile *KubernetesClusterNodePoolWindowsProfileParameters `json:"windowsProfile,omitempty" tf:"windows_profile,omitempty"` + + // Used to specify the workload runtime. Allowed values are OCIContainer, WasmWasi and KataMshvVmIsolation. + // +kubebuilder:validation:Optional + WorkloadRuntime *string `json:"workloadRuntime,omitempty" tf:"workload_runtime,omitempty"` + + // Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type KubernetesClusterNodePoolUpgradeSettingsInitParameters struct { + + // The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. + MaxSurge *string `json:"maxSurge,omitempty" tf:"max_surge,omitempty"` +} + +type KubernetesClusterNodePoolUpgradeSettingsObservation struct { + + // The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. + MaxSurge *string `json:"maxSurge,omitempty" tf:"max_surge,omitempty"` +} + +type KubernetesClusterNodePoolUpgradeSettingsParameters struct { + + // The maximum number or percentage of nodes which will be added to the Node Pool size during an upgrade. + // +kubebuilder:validation:Optional + MaxSurge *string `json:"maxSurge" tf:"max_surge,omitempty"` +} + +type KubernetesClusterNodePoolWindowsProfileInitParameters struct { + + // Should the Windows nodes in this Node Pool have outbound NAT enabled? Defaults to true. Changing this forces a new resource to be created. + OutboundNATEnabled *bool `json:"outboundNatEnabled,omitempty" tf:"outbound_nat_enabled,omitempty"` +} + +type KubernetesClusterNodePoolWindowsProfileObservation struct { + + // Should the Windows nodes in this Node Pool have outbound NAT enabled? Defaults to true. Changing this forces a new resource to be created. + OutboundNATEnabled *bool `json:"outboundNatEnabled,omitempty" tf:"outbound_nat_enabled,omitempty"` +} + +type KubernetesClusterNodePoolWindowsProfileParameters struct { + + // Should the Windows nodes in this Node Pool have outbound NAT enabled? Defaults to true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + OutboundNATEnabled *bool `json:"outboundNatEnabled,omitempty" tf:"outbound_nat_enabled,omitempty"` +} + +type LinuxOsConfigSysctlConfigInitParameters struct { + + // The sysctl setting fs.aio-max-nr. Must be between 65536 and 6553500. Changing this forces a new resource to be created. + FsAioMaxNr *float64 `json:"fsAioMaxNr,omitempty" tf:"fs_aio_max_nr,omitempty"` + + // The sysctl setting fs.file-max. Must be between 8192 and 12000500. Changing this forces a new resource to be created. + FsFileMax *float64 `json:"fsFileMax,omitempty" tf:"fs_file_max,omitempty"` + + // The sysctl setting fs.inotify.max_user_watches. Must be between 781250 and 2097152. Changing this forces a new resource to be created. + FsInotifyMaxUserWatches *float64 `json:"fsInotifyMaxUserWatches,omitempty" tf:"fs_inotify_max_user_watches,omitempty"` + + // The sysctl setting fs.nr_open. Must be between 8192 and 20000500. Changing this forces a new resource to be created. + FsNrOpen *float64 `json:"fsNrOpen,omitempty" tf:"fs_nr_open,omitempty"` + + // The sysctl setting kernel.threads-max. Must be between 20 and 513785. Changing this forces a new resource to be created. + KernelThreadsMax *float64 `json:"kernelThreadsMax,omitempty" tf:"kernel_threads_max,omitempty"` + + // The sysctl setting net.core.netdev_max_backlog. Must be between 1000 and 3240000. Changing this forces a new resource to be created. + NetCoreNetdevMaxBacklog *float64 `json:"netCoreNetdevMaxBacklog,omitempty" tf:"net_core_netdev_max_backlog,omitempty"` + + // The sysctl setting net.core.optmem_max. Must be between 20480 and 4194304. Changing this forces a new resource to be created. + NetCoreOptmemMax *float64 `json:"netCoreOptmemMax,omitempty" tf:"net_core_optmem_max,omitempty"` + + // The sysctl setting net.core.rmem_default. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + NetCoreRmemDefault *float64 `json:"netCoreRmemDefault,omitempty" tf:"net_core_rmem_default,omitempty"` + + // The sysctl setting net.core.rmem_max. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + NetCoreRmemMax *float64 `json:"netCoreRmemMax,omitempty" tf:"net_core_rmem_max,omitempty"` + + // The sysctl setting net.core.somaxconn. Must be between 4096 and 3240000. Changing this forces a new resource to be created. + NetCoreSomaxconn *float64 `json:"netCoreSomaxconn,omitempty" tf:"net_core_somaxconn,omitempty"` + + // The sysctl setting net.core.wmem_default. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + NetCoreWmemDefault *float64 `json:"netCoreWmemDefault,omitempty" tf:"net_core_wmem_default,omitempty"` + + // The sysctl setting net.core.wmem_max. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + NetCoreWmemMax *float64 `json:"netCoreWmemMax,omitempty" tf:"net_core_wmem_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range max value. Must be between 32768 and 65535. Changing this forces a new resource to be created. + NetIPv4IPLocalPortRangeMax *float64 `json:"netIpv4IpLocalPortRangeMax,omitempty" tf:"net_ipv4_ip_local_port_range_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range min value. Must be between 1024 and 60999. Changing this forces a new resource to be created. + NetIPv4IPLocalPortRangeMin *float64 `json:"netIpv4IpLocalPortRangeMin,omitempty" tf:"net_ipv4_ip_local_port_range_min,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between 128 and 80000. Changing this forces a new resource to be created. + NetIPv4NeighDefaultGcThresh1 *float64 `json:"netIpv4NeighDefaultGcThresh1,omitempty" tf:"net_ipv4_neigh_default_gc_thresh1,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between 512 and 90000. Changing this forces a new resource to be created. + NetIPv4NeighDefaultGcThresh2 *float64 `json:"netIpv4NeighDefaultGcThresh2,omitempty" tf:"net_ipv4_neigh_default_gc_thresh2,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between 1024 and 100000. Changing this forces a new resource to be created. + NetIPv4NeighDefaultGcThresh3 *float64 `json:"netIpv4NeighDefaultGcThresh3,omitempty" tf:"net_ipv4_neigh_default_gc_thresh3,omitempty"` + + // The sysctl setting net.ipv4.tcp_fin_timeout. Must be between 5 and 120. Changing this forces a new resource to be created. + NetIPv4TCPFinTimeout *float64 `json:"netIpv4TcpFinTimeout,omitempty" tf:"net_ipv4_tcp_fin_timeout,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between 10 and 90. Changing this forces a new resource to be created. + NetIPv4TCPKeepaliveIntvl *float64 `json:"netIpv4TcpKeepaliveIntvl,omitempty" tf:"net_ipv4_tcp_keepalive_intvl,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between 1 and 15. Changing this forces a new resource to be created. + NetIPv4TCPKeepaliveProbes *float64 `json:"netIpv4TcpKeepaliveProbes,omitempty" tf:"net_ipv4_tcp_keepalive_probes,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_time. Must be between 30 and 432000. Changing this forces a new resource to be created. + NetIPv4TCPKeepaliveTime *float64 `json:"netIpv4TcpKeepaliveTime,omitempty" tf:"net_ipv4_tcp_keepalive_time,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between 128 and 3240000. Changing this forces a new resource to be created. + NetIPv4TCPMaxSynBacklog *float64 `json:"netIpv4TcpMaxSynBacklog,omitempty" tf:"net_ipv4_tcp_max_syn_backlog,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between 8000 and 1440000. Changing this forces a new resource to be created. + NetIPv4TCPMaxTwBuckets *float64 `json:"netIpv4TcpMaxTwBuckets,omitempty" tf:"net_ipv4_tcp_max_tw_buckets,omitempty"` + + // Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. + NetIPv4TCPTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty" tf:"net_ipv4_tcp_tw_reuse,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between 65536 and 524288. Changing this forces a new resource to be created. + NetNetfilterNfConntrackBuckets *float64 `json:"netNetfilterNfConntrackBuckets,omitempty" tf:"net_netfilter_nf_conntrack_buckets,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_max. Must be between 131072 and 2097152. Changing this forces a new resource to be created. + NetNetfilterNfConntrackMax *float64 `json:"netNetfilterNfConntrackMax,omitempty" tf:"net_netfilter_nf_conntrack_max,omitempty"` + + // The sysctl setting vm.max_map_count. Must be between 65530 and 262144. Changing this forces a new resource to be created. + VMMaxMapCount *float64 `json:"vmMaxMapCount,omitempty" tf:"vm_max_map_count,omitempty"` + + // The sysctl setting vm.swappiness. Must be between 0 and 100. Changing this forces a new resource to be created. + VMSwappiness *float64 `json:"vmSwappiness,omitempty" tf:"vm_swappiness,omitempty"` + + // The sysctl setting vm.vfs_cache_pressure. Must be between 0 and 100. Changing this forces a new resource to be created. + VMVfsCachePressure *float64 `json:"vmVfsCachePressure,omitempty" tf:"vm_vfs_cache_pressure,omitempty"` +} + +type LinuxOsConfigSysctlConfigObservation struct { + + // The sysctl setting fs.aio-max-nr. Must be between 65536 and 6553500. Changing this forces a new resource to be created. + FsAioMaxNr *float64 `json:"fsAioMaxNr,omitempty" tf:"fs_aio_max_nr,omitempty"` + + // The sysctl setting fs.file-max. Must be between 8192 and 12000500. Changing this forces a new resource to be created. + FsFileMax *float64 `json:"fsFileMax,omitempty" tf:"fs_file_max,omitempty"` + + // The sysctl setting fs.inotify.max_user_watches. Must be between 781250 and 2097152. Changing this forces a new resource to be created. + FsInotifyMaxUserWatches *float64 `json:"fsInotifyMaxUserWatches,omitempty" tf:"fs_inotify_max_user_watches,omitempty"` + + // The sysctl setting fs.nr_open. Must be between 8192 and 20000500. Changing this forces a new resource to be created. + FsNrOpen *float64 `json:"fsNrOpen,omitempty" tf:"fs_nr_open,omitempty"` + + // The sysctl setting kernel.threads-max. Must be between 20 and 513785. Changing this forces a new resource to be created. + KernelThreadsMax *float64 `json:"kernelThreadsMax,omitempty" tf:"kernel_threads_max,omitempty"` + + // The sysctl setting net.core.netdev_max_backlog. Must be between 1000 and 3240000. Changing this forces a new resource to be created. + NetCoreNetdevMaxBacklog *float64 `json:"netCoreNetdevMaxBacklog,omitempty" tf:"net_core_netdev_max_backlog,omitempty"` + + // The sysctl setting net.core.optmem_max. Must be between 20480 and 4194304. Changing this forces a new resource to be created. + NetCoreOptmemMax *float64 `json:"netCoreOptmemMax,omitempty" tf:"net_core_optmem_max,omitempty"` + + // The sysctl setting net.core.rmem_default. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + NetCoreRmemDefault *float64 `json:"netCoreRmemDefault,omitempty" tf:"net_core_rmem_default,omitempty"` + + // The sysctl setting net.core.rmem_max. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + NetCoreRmemMax *float64 `json:"netCoreRmemMax,omitempty" tf:"net_core_rmem_max,omitempty"` + + // The sysctl setting net.core.somaxconn. Must be between 4096 and 3240000. Changing this forces a new resource to be created. + NetCoreSomaxconn *float64 `json:"netCoreSomaxconn,omitempty" tf:"net_core_somaxconn,omitempty"` + + // The sysctl setting net.core.wmem_default. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + NetCoreWmemDefault *float64 `json:"netCoreWmemDefault,omitempty" tf:"net_core_wmem_default,omitempty"` + + // The sysctl setting net.core.wmem_max. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + NetCoreWmemMax *float64 `json:"netCoreWmemMax,omitempty" tf:"net_core_wmem_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range max value. Must be between 32768 and 65535. Changing this forces a new resource to be created. + NetIPv4IPLocalPortRangeMax *float64 `json:"netIpv4IpLocalPortRangeMax,omitempty" tf:"net_ipv4_ip_local_port_range_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range min value. Must be between 1024 and 60999. Changing this forces a new resource to be created. + NetIPv4IPLocalPortRangeMin *float64 `json:"netIpv4IpLocalPortRangeMin,omitempty" tf:"net_ipv4_ip_local_port_range_min,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between 128 and 80000. Changing this forces a new resource to be created. + NetIPv4NeighDefaultGcThresh1 *float64 `json:"netIpv4NeighDefaultGcThresh1,omitempty" tf:"net_ipv4_neigh_default_gc_thresh1,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between 512 and 90000. Changing this forces a new resource to be created. + NetIPv4NeighDefaultGcThresh2 *float64 `json:"netIpv4NeighDefaultGcThresh2,omitempty" tf:"net_ipv4_neigh_default_gc_thresh2,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between 1024 and 100000. Changing this forces a new resource to be created. + NetIPv4NeighDefaultGcThresh3 *float64 `json:"netIpv4NeighDefaultGcThresh3,omitempty" tf:"net_ipv4_neigh_default_gc_thresh3,omitempty"` + + // The sysctl setting net.ipv4.tcp_fin_timeout. Must be between 5 and 120. Changing this forces a new resource to be created. + NetIPv4TCPFinTimeout *float64 `json:"netIpv4TcpFinTimeout,omitempty" tf:"net_ipv4_tcp_fin_timeout,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between 10 and 90. Changing this forces a new resource to be created. + NetIPv4TCPKeepaliveIntvl *float64 `json:"netIpv4TcpKeepaliveIntvl,omitempty" tf:"net_ipv4_tcp_keepalive_intvl,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between 1 and 15. Changing this forces a new resource to be created. + NetIPv4TCPKeepaliveProbes *float64 `json:"netIpv4TcpKeepaliveProbes,omitempty" tf:"net_ipv4_tcp_keepalive_probes,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_time. Must be between 30 and 432000. Changing this forces a new resource to be created. + NetIPv4TCPKeepaliveTime *float64 `json:"netIpv4TcpKeepaliveTime,omitempty" tf:"net_ipv4_tcp_keepalive_time,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between 128 and 3240000. Changing this forces a new resource to be created. + NetIPv4TCPMaxSynBacklog *float64 `json:"netIpv4TcpMaxSynBacklog,omitempty" tf:"net_ipv4_tcp_max_syn_backlog,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between 8000 and 1440000. Changing this forces a new resource to be created. + NetIPv4TCPMaxTwBuckets *float64 `json:"netIpv4TcpMaxTwBuckets,omitempty" tf:"net_ipv4_tcp_max_tw_buckets,omitempty"` + + // Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. + NetIPv4TCPTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty" tf:"net_ipv4_tcp_tw_reuse,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between 65536 and 524288. Changing this forces a new resource to be created. + NetNetfilterNfConntrackBuckets *float64 `json:"netNetfilterNfConntrackBuckets,omitempty" tf:"net_netfilter_nf_conntrack_buckets,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_max. Must be between 131072 and 2097152. Changing this forces a new resource to be created. + NetNetfilterNfConntrackMax *float64 `json:"netNetfilterNfConntrackMax,omitempty" tf:"net_netfilter_nf_conntrack_max,omitempty"` + + // The sysctl setting vm.max_map_count. Must be between 65530 and 262144. Changing this forces a new resource to be created. + VMMaxMapCount *float64 `json:"vmMaxMapCount,omitempty" tf:"vm_max_map_count,omitempty"` + + // The sysctl setting vm.swappiness. Must be between 0 and 100. Changing this forces a new resource to be created. + VMSwappiness *float64 `json:"vmSwappiness,omitempty" tf:"vm_swappiness,omitempty"` + + // The sysctl setting vm.vfs_cache_pressure. Must be between 0 and 100. Changing this forces a new resource to be created. + VMVfsCachePressure *float64 `json:"vmVfsCachePressure,omitempty" tf:"vm_vfs_cache_pressure,omitempty"` +} + +type LinuxOsConfigSysctlConfigParameters struct { + + // The sysctl setting fs.aio-max-nr. Must be between 65536 and 6553500. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FsAioMaxNr *float64 `json:"fsAioMaxNr,omitempty" tf:"fs_aio_max_nr,omitempty"` + + // The sysctl setting fs.file-max. Must be between 8192 and 12000500. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FsFileMax *float64 `json:"fsFileMax,omitempty" tf:"fs_file_max,omitempty"` + + // The sysctl setting fs.inotify.max_user_watches. Must be between 781250 and 2097152. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FsInotifyMaxUserWatches *float64 `json:"fsInotifyMaxUserWatches,omitempty" tf:"fs_inotify_max_user_watches,omitempty"` + + // The sysctl setting fs.nr_open. Must be between 8192 and 20000500. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FsNrOpen *float64 `json:"fsNrOpen,omitempty" tf:"fs_nr_open,omitempty"` + + // The sysctl setting kernel.threads-max. Must be between 20 and 513785. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + KernelThreadsMax *float64 `json:"kernelThreadsMax,omitempty" tf:"kernel_threads_max,omitempty"` + + // The sysctl setting net.core.netdev_max_backlog. Must be between 1000 and 3240000. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetCoreNetdevMaxBacklog *float64 `json:"netCoreNetdevMaxBacklog,omitempty" tf:"net_core_netdev_max_backlog,omitempty"` + + // The sysctl setting net.core.optmem_max. Must be between 20480 and 4194304. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetCoreOptmemMax *float64 `json:"netCoreOptmemMax,omitempty" tf:"net_core_optmem_max,omitempty"` + + // The sysctl setting net.core.rmem_default. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetCoreRmemDefault *float64 `json:"netCoreRmemDefault,omitempty" tf:"net_core_rmem_default,omitempty"` + + // The sysctl setting net.core.rmem_max. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetCoreRmemMax *float64 `json:"netCoreRmemMax,omitempty" tf:"net_core_rmem_max,omitempty"` + + // The sysctl setting net.core.somaxconn. Must be between 4096 and 3240000. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetCoreSomaxconn *float64 `json:"netCoreSomaxconn,omitempty" tf:"net_core_somaxconn,omitempty"` + + // The sysctl setting net.core.wmem_default. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetCoreWmemDefault *float64 `json:"netCoreWmemDefault,omitempty" tf:"net_core_wmem_default,omitempty"` + + // The sysctl setting net.core.wmem_max. Must be between 212992 and 134217728. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetCoreWmemMax *float64 `json:"netCoreWmemMax,omitempty" tf:"net_core_wmem_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range max value. Must be between 32768 and 65535. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4IPLocalPortRangeMax *float64 `json:"netIpv4IpLocalPortRangeMax,omitempty" tf:"net_ipv4_ip_local_port_range_max,omitempty"` + + // The sysctl setting net.ipv4.ip_local_port_range min value. Must be between 1024 and 60999. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4IPLocalPortRangeMin *float64 `json:"netIpv4IpLocalPortRangeMin,omitempty" tf:"net_ipv4_ip_local_port_range_min,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between 128 and 80000. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4NeighDefaultGcThresh1 *float64 `json:"netIpv4NeighDefaultGcThresh1,omitempty" tf:"net_ipv4_neigh_default_gc_thresh1,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between 512 and 90000. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4NeighDefaultGcThresh2 *float64 `json:"netIpv4NeighDefaultGcThresh2,omitempty" tf:"net_ipv4_neigh_default_gc_thresh2,omitempty"` + + // The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between 1024 and 100000. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4NeighDefaultGcThresh3 *float64 `json:"netIpv4NeighDefaultGcThresh3,omitempty" tf:"net_ipv4_neigh_default_gc_thresh3,omitempty"` + + // The sysctl setting net.ipv4.tcp_fin_timeout. Must be between 5 and 120. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4TCPFinTimeout *float64 `json:"netIpv4TcpFinTimeout,omitempty" tf:"net_ipv4_tcp_fin_timeout,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between 10 and 90. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4TCPKeepaliveIntvl *float64 `json:"netIpv4TcpKeepaliveIntvl,omitempty" tf:"net_ipv4_tcp_keepalive_intvl,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between 1 and 15. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4TCPKeepaliveProbes *float64 `json:"netIpv4TcpKeepaliveProbes,omitempty" tf:"net_ipv4_tcp_keepalive_probes,omitempty"` + + // The sysctl setting net.ipv4.tcp_keepalive_time. Must be between 30 and 432000. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4TCPKeepaliveTime *float64 `json:"netIpv4TcpKeepaliveTime,omitempty" tf:"net_ipv4_tcp_keepalive_time,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between 128 and 3240000. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4TCPMaxSynBacklog *float64 `json:"netIpv4TcpMaxSynBacklog,omitempty" tf:"net_ipv4_tcp_max_syn_backlog,omitempty"` + + // The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between 8000 and 1440000. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4TCPMaxTwBuckets *float64 `json:"netIpv4TcpMaxTwBuckets,omitempty" tf:"net_ipv4_tcp_max_tw_buckets,omitempty"` + + // Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetIPv4TCPTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty" tf:"net_ipv4_tcp_tw_reuse,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between 65536 and 524288. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetNetfilterNfConntrackBuckets *float64 `json:"netNetfilterNfConntrackBuckets,omitempty" tf:"net_netfilter_nf_conntrack_buckets,omitempty"` + + // The sysctl setting net.netfilter.nf_conntrack_max. Must be between 131072 and 2097152. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NetNetfilterNfConntrackMax *float64 `json:"netNetfilterNfConntrackMax,omitempty" tf:"net_netfilter_nf_conntrack_max,omitempty"` + + // The sysctl setting vm.max_map_count. Must be between 65530 and 262144. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMMaxMapCount *float64 `json:"vmMaxMapCount,omitempty" tf:"vm_max_map_count,omitempty"` + + // The sysctl setting vm.swappiness. Must be between 0 and 100. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSwappiness *float64 `json:"vmSwappiness,omitempty" tf:"vm_swappiness,omitempty"` + + // The sysctl setting vm.vfs_cache_pressure. Must be between 0 and 100. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMVfsCachePressure *float64 `json:"vmVfsCachePressure,omitempty" tf:"vm_vfs_cache_pressure,omitempty"` +} + +type NodeNetworkProfileAllowedHostPortsInitParameters struct { + + // Specifies the end of the port range. + PortEnd *float64 `json:"portEnd,omitempty" tf:"port_end,omitempty"` + + // Specifies the start of the port range. + PortStart *float64 `json:"portStart,omitempty" tf:"port_start,omitempty"` + + // Specifies the protocol of the port range. Possible values are TCP and UDP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type NodeNetworkProfileAllowedHostPortsObservation struct { + + // Specifies the end of the port range. + PortEnd *float64 `json:"portEnd,omitempty" tf:"port_end,omitempty"` + + // Specifies the start of the port range. + PortStart *float64 `json:"portStart,omitempty" tf:"port_start,omitempty"` + + // Specifies the protocol of the port range. Possible values are TCP and UDP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type NodeNetworkProfileAllowedHostPortsParameters struct { + + // Specifies the end of the port range. + // +kubebuilder:validation:Optional + PortEnd *float64 `json:"portEnd,omitempty" tf:"port_end,omitempty"` + + // Specifies the start of the port range. + // +kubebuilder:validation:Optional + PortStart *float64 `json:"portStart,omitempty" tf:"port_start,omitempty"` + + // Specifies the protocol of the port range. Possible values are TCP and UDP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +// KubernetesClusterNodePoolSpec defines the desired state of KubernetesClusterNodePool +type KubernetesClusterNodePoolSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider KubernetesClusterNodePoolParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider KubernetesClusterNodePoolInitParameters `json:"initProvider,omitempty"` +} + +// KubernetesClusterNodePoolStatus defines the observed state of KubernetesClusterNodePool. +type KubernetesClusterNodePoolStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider KubernetesClusterNodePoolObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// KubernetesClusterNodePool is the Schema for the KubernetesClusterNodePools API. Manages a Node Pool within a Kubernetes Cluster +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type KubernetesClusterNodePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vmSize) || (has(self.initProvider) && has(self.initProvider.vmSize))",message="spec.forProvider.vmSize is a required parameter" + Spec KubernetesClusterNodePoolSpec `json:"spec"` + Status KubernetesClusterNodePoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// KubernetesClusterNodePoolList contains a list of KubernetesClusterNodePools +type KubernetesClusterNodePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KubernetesClusterNodePool `json:"items"` +} + +// Repository type metadata. +var ( + KubernetesClusterNodePool_Kind = "KubernetesClusterNodePool" + KubernetesClusterNodePool_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: KubernetesClusterNodePool_Kind}.String() + KubernetesClusterNodePool_KindAPIVersion = KubernetesClusterNodePool_Kind + "." + CRDGroupVersion.String() + KubernetesClusterNodePool_GroupVersionKind = CRDGroupVersion.WithKind(KubernetesClusterNodePool_Kind) +) + +func init() { + SchemeBuilder.Register(&KubernetesClusterNodePool{}, &KubernetesClusterNodePoolList{}) +} diff --git a/apis/containerservice/v1beta2/zz_kubernetesfleetmanager_terraformed.go b/apis/containerservice/v1beta2/zz_kubernetesfleetmanager_terraformed.go new file mode 100755 index 000000000..1bf6c5d4a --- /dev/null +++ b/apis/containerservice/v1beta2/zz_kubernetesfleetmanager_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this KubernetesFleetManager +func (mg *KubernetesFleetManager) GetTerraformResourceType() string { + return "azurerm_kubernetes_fleet_manager" +} + +// GetConnectionDetailsMapping for this KubernetesFleetManager +func (tr *KubernetesFleetManager) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this KubernetesFleetManager +func (tr *KubernetesFleetManager) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KubernetesFleetManager +func (tr *KubernetesFleetManager) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KubernetesFleetManager +func (tr *KubernetesFleetManager) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KubernetesFleetManager +func (tr *KubernetesFleetManager) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KubernetesFleetManager +func (tr *KubernetesFleetManager) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KubernetesFleetManager +func (tr *KubernetesFleetManager) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this KubernetesFleetManager +func (tr *KubernetesFleetManager) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this KubernetesFleetManager using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KubernetesFleetManager) LateInitialize(attrs []byte) (bool, error) { + params := &KubernetesFleetManagerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KubernetesFleetManager) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/containerservice/v1beta2/zz_kubernetesfleetmanager_types.go b/apis/containerservice/v1beta2/zz_kubernetesfleetmanager_types.go new file mode 100755 index 000000000..fb0ff0f91 --- /dev/null +++ b/apis/containerservice/v1beta2/zz_kubernetesfleetmanager_types.go @@ -0,0 +1,149 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HubProfileInitParameters struct { + DNSPrefix *string `json:"dnsPrefix,omitempty" tf:"dns_prefix,omitempty"` +} + +type HubProfileObservation struct { + DNSPrefix *string `json:"dnsPrefix,omitempty" tf:"dns_prefix,omitempty"` + + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + KubernetesVersion *string `json:"kubernetesVersion,omitempty" tf:"kubernetes_version,omitempty"` +} + +type HubProfileParameters struct { + + // +kubebuilder:validation:Optional + DNSPrefix *string `json:"dnsPrefix" tf:"dns_prefix,omitempty"` +} + +type KubernetesFleetManagerInitParameters struct { + HubProfile *HubProfileInitParameters `json:"hubProfile,omitempty" tf:"hub_profile,omitempty"` + + // The Azure Region where the Kubernetes Fleet Manager should exist. Changing this forces a new Kubernetes Fleet Manager to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags which should be assigned to the Kubernetes Fleet Manager. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type KubernetesFleetManagerObservation struct { + HubProfile *HubProfileObservation `json:"hubProfile,omitempty" tf:"hub_profile,omitempty"` + + // The ID of the Kubernetes Fleet Manager. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region where the Kubernetes Fleet Manager should exist. Changing this forces a new Kubernetes Fleet Manager to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Resource Group within which this Kubernetes Fleet Manager should exist. Changing this forces a new Kubernetes Fleet Manager to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags which should be assigned to the Kubernetes Fleet Manager. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type KubernetesFleetManagerParameters struct { + + // +kubebuilder:validation:Optional + HubProfile *HubProfileParameters `json:"hubProfile,omitempty" tf:"hub_profile,omitempty"` + + // The Azure Region where the Kubernetes Fleet Manager should exist. Changing this forces a new Kubernetes Fleet Manager to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Resource Group within which this Kubernetes Fleet Manager should exist. Changing this forces a new Kubernetes Fleet Manager to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Kubernetes Fleet Manager. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// KubernetesFleetManagerSpec defines the desired state of KubernetesFleetManager +type KubernetesFleetManagerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider KubernetesFleetManagerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider KubernetesFleetManagerInitParameters `json:"initProvider,omitempty"` +} + +// KubernetesFleetManagerStatus defines the observed state of KubernetesFleetManager. +type KubernetesFleetManagerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider KubernetesFleetManagerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// KubernetesFleetManager is the Schema for the KubernetesFleetManagers API. Manages a Kubernetes Fleet Manager. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type KubernetesFleetManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec KubernetesFleetManagerSpec `json:"spec"` + Status KubernetesFleetManagerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// KubernetesFleetManagerList contains a list of KubernetesFleetManagers +type KubernetesFleetManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KubernetesFleetManager `json:"items"` +} + +// Repository type metadata. +var ( + KubernetesFleetManager_Kind = "KubernetesFleetManager" + KubernetesFleetManager_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: KubernetesFleetManager_Kind}.String() + KubernetesFleetManager_KindAPIVersion = KubernetesFleetManager_Kind + "." + CRDGroupVersion.String() + KubernetesFleetManager_GroupVersionKind = CRDGroupVersion.WithKind(KubernetesFleetManager_Kind) +) + +func init() { + SchemeBuilder.Register(&KubernetesFleetManager{}, &KubernetesFleetManagerList{}) +} diff --git a/apis/cosmosdb/v1beta1/zz_cassandradatacenter_types.go b/apis/cosmosdb/v1beta1/zz_cassandradatacenter_types.go index 7740d7e26..1741cfb2f 100755 --- a/apis/cosmosdb/v1beta1/zz_cassandradatacenter_types.go +++ b/apis/cosmosdb/v1beta1/zz_cassandradatacenter_types.go @@ -25,7 +25,7 @@ type CassandraDatacenterInitParameters struct { Base64EncodedYamlFragment *string `json:"base64EncodedYamlFragment,omitempty" tf:"base64_encoded_yaml_fragment,omitempty"` // The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DelegatedManagementSubnetID *string `json:"delegatedManagementSubnetId,omitempty" tf:"delegated_management_subnet_id,omitempty"` @@ -113,7 +113,7 @@ type CassandraDatacenterParameters struct { Base64EncodedYamlFragment *string `json:"base64EncodedYamlFragment,omitempty" tf:"base64_encoded_yaml_fragment,omitempty"` // The ID of the Cassandra Cluster. Changing this forces a new Cassandra Datacenter to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.CassandraCluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.CassandraCluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional CassandraClusterID *string `json:"cassandraClusterId,omitempty" tf:"cassandra_cluster_id,omitempty"` @@ -127,7 +127,7 @@ type CassandraDatacenterParameters struct { CassandraClusterIDSelector *v1.Selector `json:"cassandraClusterIdSelector,omitempty" tf:"-"` // The ID of the delegated management subnet for this Cassandra Datacenter. Changing this forces a new Cassandra Datacenter to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DelegatedManagementSubnetID *string `json:"delegatedManagementSubnetId,omitempty" tf:"delegated_management_subnet_id,omitempty"` diff --git a/apis/cosmosdb/v1beta1/zz_generated.conversion_hubs.go b/apis/cosmosdb/v1beta1/zz_generated.conversion_hubs.go index 20ac43a72..dcd1d07ec 100755 --- a/apis/cosmosdb/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/cosmosdb/v1beta1/zz_generated.conversion_hubs.go @@ -6,39 +6,9 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Account) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CassandraCluster) Hub() {} - // Hub marks this type as a conversion hub. func (tr *CassandraDatacenter) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *CassandraKeySpace) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CassandraTable) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *GremlinDatabase) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *GremlinGraph) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MongoCollection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MongoDatabase) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SQLContainer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SQLDatabase) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SQLDedicatedGateway) Hub() {} @@ -56,6 +26,3 @@ func (tr *SQLStoredProcedure) Hub() {} // Hub marks this type as a conversion hub. func (tr *SQLTrigger) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Table) Hub() {} diff --git a/apis/cosmosdb/v1beta1/zz_generated.conversion_spokes.go b/apis/cosmosdb/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..fa985b357 --- /dev/null +++ b/apis/cosmosdb/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,234 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Account to the hub type. +func (tr *Account) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Account type. +func (tr *Account) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CassandraCluster to the hub type. +func (tr *CassandraCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CassandraCluster type. +func (tr *CassandraCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CassandraKeySpace to the hub type. +func (tr *CassandraKeySpace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CassandraKeySpace type. +func (tr *CassandraKeySpace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this CassandraTable to the hub type. +func (tr *CassandraTable) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CassandraTable type. +func (tr *CassandraTable) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this GremlinDatabase to the hub type. +func (tr *GremlinDatabase) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the GremlinDatabase type. +func (tr *GremlinDatabase) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this GremlinGraph to the hub type. +func (tr *GremlinGraph) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the GremlinGraph type. +func (tr *GremlinGraph) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MongoCollection to the hub type. +func (tr *MongoCollection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MongoCollection type. +func (tr *MongoCollection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MongoDatabase to the hub type. +func (tr *MongoDatabase) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MongoDatabase type. +func (tr *MongoDatabase) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SQLContainer to the hub type. +func (tr *SQLContainer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SQLContainer type. +func (tr *SQLContainer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SQLDatabase to the hub type. +func (tr *SQLDatabase) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SQLDatabase type. +func (tr *SQLDatabase) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Table to the hub type. +func (tr *Table) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Table type. +func (tr *Table) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/cosmosdb/v1beta1/zz_generated.resolvers.go b/apis/cosmosdb/v1beta1/zz_generated.resolvers.go index dec851b39..c81b03444 100644 --- a/apis/cosmosdb/v1beta1/zz_generated.resolvers.go +++ b/apis/cosmosdb/v1beta1/zz_generated.resolvers.go @@ -171,7 +171,7 @@ func (mg *CassandraDatacenter) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "CassandraCluster", "CassandraClusterList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "CassandraCluster", "CassandraClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -190,7 +190,7 @@ func (mg *CassandraDatacenter) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.CassandraClusterID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.CassandraClusterIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -209,7 +209,7 @@ func (mg *CassandraDatacenter) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.DelegatedManagementSubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DelegatedManagementSubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -678,7 +678,7 @@ func (mg *SQLDedicatedGateway) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -709,7 +709,7 @@ func (mg *SQLFunction) ResolveReferences(ctx context.Context, c client.Reader) e var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "SQLContainer", "SQLContainerList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "SQLContainer", "SQLContainerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -740,7 +740,7 @@ func (mg *SQLRoleAssignment) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -797,7 +797,7 @@ func (mg *SQLRoleAssignment) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.RoleDefinitionID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RoleDefinitionIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -816,7 +816,7 @@ func (mg *SQLRoleAssignment) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.Scope = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ScopeRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -873,7 +873,7 @@ func (mg *SQLRoleAssignment) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.InitProvider.RoleDefinitionID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.RoleDefinitionIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -904,7 +904,7 @@ func (mg *SQLRoleDefinition) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -942,7 +942,7 @@ func (mg *SQLRoleDefinition) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -992,7 +992,7 @@ func (mg *SQLStoredProcedure) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1011,7 +1011,7 @@ func (mg *SQLStoredProcedure) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "SQLContainer", "SQLContainerList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "SQLContainer", "SQLContainerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1030,7 +1030,7 @@ func (mg *SQLStoredProcedure) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.ContainerName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ContainerNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "SQLDatabase", "SQLDatabaseList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "SQLDatabase", "SQLDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1080,7 +1080,7 @@ func (mg *SQLTrigger) ResolveReferences(ctx context.Context, c client.Reader) er var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta1", "SQLContainer", "SQLContainerList") + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "SQLContainer", "SQLContainerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/cosmosdb/v1beta1/zz_sqldedicatedgateway_types.go b/apis/cosmosdb/v1beta1/zz_sqldedicatedgateway_types.go index b7c45d5e6..2e6dae62f 100755 --- a/apis/cosmosdb/v1beta1/zz_sqldedicatedgateway_types.go +++ b/apis/cosmosdb/v1beta1/zz_sqldedicatedgateway_types.go @@ -40,7 +40,7 @@ type SQLDedicatedGatewayObservation struct { type SQLDedicatedGatewayParameters struct { // The resource ID of the CosmosDB Account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional CosmosDBAccountID *string `json:"cosmosdbAccountId,omitempty" tf:"cosmosdb_account_id,omitempty"` diff --git a/apis/cosmosdb/v1beta1/zz_sqlfunction_types.go b/apis/cosmosdb/v1beta1/zz_sqlfunction_types.go index 21bdc343a..89167d967 100755 --- a/apis/cosmosdb/v1beta1/zz_sqlfunction_types.go +++ b/apis/cosmosdb/v1beta1/zz_sqlfunction_types.go @@ -38,7 +38,7 @@ type SQLFunctionParameters struct { Body *string `json:"body,omitempty" tf:"body,omitempty"` // The id of the Cosmos DB SQL Container to create the SQL User Defined Function within. Changing this forces a new SQL User Defined Function to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.SQLContainer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.SQLContainer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` diff --git a/apis/cosmosdb/v1beta1/zz_sqlroleassignment_types.go b/apis/cosmosdb/v1beta1/zz_sqlroleassignment_types.go index a0a39a6af..455fc2815 100755 --- a/apis/cosmosdb/v1beta1/zz_sqlroleassignment_types.go +++ b/apis/cosmosdb/v1beta1/zz_sqlroleassignment_types.go @@ -16,7 +16,7 @@ import ( type SQLRoleAssignmentInitParameters struct { // The name of the Cosmos DB Account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` // Reference to a Account in cosmosdb to populate accountName. @@ -59,7 +59,7 @@ type SQLRoleAssignmentInitParameters struct { RoleDefinitionIDSelector *v1.Selector `json:"roleDefinitionIdSelector,omitempty" tf:"-"` // The data plane resource path for which access is being granted through this Cosmos DB SQL Role Assignment. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` @@ -99,7 +99,7 @@ type SQLRoleAssignmentObservation struct { type SQLRoleAssignmentParameters struct { // The name of the Cosmos DB Account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +kubebuilder:validation:Optional AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` @@ -147,7 +147,7 @@ type SQLRoleAssignmentParameters struct { RoleDefinitionIDSelector *v1.Selector `json:"roleDefinitionIdSelector,omitempty" tf:"-"` // The data plane resource path for which access is being granted through this Cosmos DB SQL Role Assignment. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` diff --git a/apis/cosmosdb/v1beta1/zz_sqlroledefinition_types.go b/apis/cosmosdb/v1beta1/zz_sqlroledefinition_types.go index 4f8f487b5..d1a4962e2 100755 --- a/apis/cosmosdb/v1beta1/zz_sqlroledefinition_types.go +++ b/apis/cosmosdb/v1beta1/zz_sqlroledefinition_types.go @@ -38,7 +38,7 @@ type PermissionsParameters struct { type SQLRoleDefinitionInitParameters struct { // The name of the Cosmos DB Account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` // Reference to a Account in cosmosdb to populate accountName. @@ -109,7 +109,7 @@ type SQLRoleDefinitionObservation struct { type SQLRoleDefinitionParameters struct { // The name of the Cosmos DB Account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +kubebuilder:validation:Optional AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` diff --git a/apis/cosmosdb/v1beta1/zz_sqlstoredprocedure_types.go b/apis/cosmosdb/v1beta1/zz_sqlstoredprocedure_types.go index a15970095..c56592ae6 100755 --- a/apis/cosmosdb/v1beta1/zz_sqlstoredprocedure_types.go +++ b/apis/cosmosdb/v1beta1/zz_sqlstoredprocedure_types.go @@ -43,7 +43,7 @@ type SQLStoredProcedureObservation struct { type SQLStoredProcedureParameters struct { // The name of the Cosmos DB Account to create the stored procedure within. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account // +kubebuilder:validation:Optional AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` @@ -60,7 +60,7 @@ type SQLStoredProcedureParameters struct { Body *string `json:"body,omitempty" tf:"body,omitempty"` // The name of the Cosmos DB SQL Container to create the stored procedure within. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.SQLContainer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.SQLContainer // +kubebuilder:validation:Optional ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` @@ -73,7 +73,7 @@ type SQLStoredProcedureParameters struct { ContainerNameSelector *v1.Selector `json:"containerNameSelector,omitempty" tf:"-"` // The name of the Cosmos DB SQL Database to create the stored procedure within. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.SQLDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.SQLDatabase // +kubebuilder:validation:Optional DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` diff --git a/apis/cosmosdb/v1beta1/zz_sqltrigger_types.go b/apis/cosmosdb/v1beta1/zz_sqltrigger_types.go index 835f0e0de..443d9b719 100755 --- a/apis/cosmosdb/v1beta1/zz_sqltrigger_types.go +++ b/apis/cosmosdb/v1beta1/zz_sqltrigger_types.go @@ -50,7 +50,7 @@ type SQLTriggerParameters struct { Body *string `json:"body,omitempty" tf:"body,omitempty"` // The id of the Cosmos DB SQL Container to create the SQL Trigger within. Changing this forces a new SQL Trigger to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta1.SQLContainer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.SQLContainer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` diff --git a/apis/cosmosdb/v1beta2/zz_account_terraformed.go b/apis/cosmosdb/v1beta2/zz_account_terraformed.go new file mode 100755 index 000000000..9afbbf0d7 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_account_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Account +func (mg *Account) GetTerraformResourceType() string { + return "azurerm_cosmosdb_account" +} + +// GetConnectionDetailsMapping for this Account +func (tr *Account) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"connection_strings[*]": "status.atProvider.connectionStrings[*]", "primary_key": "status.atProvider.primaryKey", "primary_mongodb_connection_string": "status.atProvider.primaryMongodbConnectionString", "primary_readonly_key": "status.atProvider.primaryReadonlyKey", "primary_readonly_mongodb_connection_string": "status.atProvider.primaryReadonlyMongodbConnectionString", "primary_readonly_sql_connection_string": "status.atProvider.primaryReadonlySqlConnectionString", "primary_sql_connection_string": "status.atProvider.primarySqlConnectionString", "secondary_key": "status.atProvider.secondaryKey", "secondary_mongodb_connection_string": "status.atProvider.secondaryMongodbConnectionString", "secondary_readonly_key": "status.atProvider.secondaryReadonlyKey", "secondary_readonly_mongodb_connection_string": "status.atProvider.secondaryReadonlyMongodbConnectionString", "secondary_readonly_sql_connection_string": "status.atProvider.secondaryReadonlySqlConnectionString", "secondary_sql_connection_string": "status.atProvider.secondarySqlConnectionString"} +} + +// GetObservation of this Account +func (tr *Account) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Account +func (tr *Account) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Account +func (tr *Account) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Account +func (tr *Account) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Account +func (tr *Account) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Account +func (tr *Account) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Account +func (tr *Account) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Account using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Account) LateInitialize(attrs []byte) (bool, error) { + params := &AccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Account) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cosmosdb/v1beta2/zz_account_types.go b/apis/cosmosdb/v1beta2/zz_account_types.go new file mode 100755 index 000000000..101e402ba --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_account_types.go @@ -0,0 +1,891 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountInitParameters struct { + + // Is write operations on metadata resources (databases, containers, throughput) via account keys enabled? Defaults to true. + AccessKeyMetadataWritesEnabled *bool `json:"accessKeyMetadataWritesEnabled,omitempty" tf:"access_key_metadata_writes_enabled,omitempty"` + + // An analytical_storage block as defined below. + AnalyticalStorage *AnalyticalStorageInitParameters `json:"analyticalStorage,omitempty" tf:"analytical_storage,omitempty"` + + // Enable Analytical Storage option for this Cosmos DB account. Defaults to false. Enabling and then disabling analytical storage forces a new resource to be created. + AnalyticalStorageEnabled *bool `json:"analyticalStorageEnabled,omitempty" tf:"analytical_storage_enabled,omitempty"` + + // A backup block as defined below. + Backup *BackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // The capabilities which should be enabled for this Cosmos DB account. Value is a capabilities block as defined below. + Capabilities []CapabilitiesInitParameters `json:"capabilities,omitempty" tf:"capabilities,omitempty"` + + // A capacity block as defined below. + Capacity *CapacityInitParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies one consistency_policy block as defined below, used to define the consistency policy for this CosmosDB account. + ConsistencyPolicy *ConsistencyPolicyInitParameters `json:"consistencyPolicy,omitempty" tf:"consistency_policy,omitempty"` + + // A cors_rule block as defined below. + CorsRule *CorsRuleInitParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // The creation mode for the CosmosDB Account. Possible values are Default and Restore. Changing this forces a new resource to be created. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // The default identity for accessing Key Vault. Possible values are FirstPartyIdentity, SystemAssignedIdentity or UserAssignedIdentity. Defaults to FirstPartyIdentity. + DefaultIdentityType *string `json:"defaultIdentityType,omitempty" tf:"default_identity_type,omitempty"` + + // Enable automatic failover for this Cosmos DB account. + EnableAutomaticFailover *bool `json:"enableAutomaticFailover,omitempty" tf:"enable_automatic_failover,omitempty"` + + // Enable the Free Tier pricing option for this Cosmos DB account. Defaults to false. Changing this forces a new resource to be created. + EnableFreeTier *bool `json:"enableFreeTier,omitempty" tf:"enable_free_tier,omitempty"` + + // Enable multiple write locations for this Cosmos DB account. + EnableMultipleWriteLocations *bool `json:"enableMultipleWriteLocations,omitempty" tf:"enable_multiple_write_locations,omitempty"` + + // Specifies a geo_location resource, used to define where data should be replicated with the failover_priority 0 specifying the primary location. Value is a geo_location block as defined below. + GeoLocation []GeoLocationInitParameters `json:"geoLocation,omitempty" tf:"geo_location,omitempty"` + + // CosmosDB Firewall Support: This value specifies the set of IP addresses or IP address ranges in CIDR form to be included as the allowed list of client IPs for a given database account. IP addresses/ranges must be comma separated and must not contain any spaces. + IPRangeFilter *string `json:"ipRangeFilter,omitempty" tf:"ip_range_filter,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Enables virtual network filtering for this Cosmos DB account. + IsVirtualNetworkFilterEnabled *bool `json:"isVirtualNetworkFilterEnabled,omitempty" tf:"is_virtual_network_filter_enabled,omitempty"` + + // A versionless Key Vault Key ID for CMK encryption. Changing this forces a new resource to be created. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the Kind of CosmosDB to create - possible values are GlobalDocumentDB, MongoDB and Parse. Defaults to GlobalDocumentDB. Changing this forces a new resource to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Disable local authentication and ensure only MSI and AAD can be used exclusively for authentication. Defaults to false. Can be set only when using the SQL API. + LocalAuthenticationDisabled *bool `json:"localAuthenticationDisabled,omitempty" tf:"local_authentication_disabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the minimal TLS version for the CosmosDB account. Possible values are: Tls, Tls11, and Tls12. Defaults to Tls12. + MinimalTLSVersion *string `json:"minimalTlsVersion,omitempty" tf:"minimal_tls_version,omitempty"` + + // The Server Version of a MongoDB account. Possible values are 4.2, 4.0, 3.6, and 3.2. + MongoServerVersion *string `json:"mongoServerVersion,omitempty" tf:"mongo_server_version,omitempty"` + + // If Azure services can bypass ACLs. Defaults to false. + NetworkACLBypassForAzureServices *bool `json:"networkAclBypassForAzureServices,omitempty" tf:"network_acl_bypass_for_azure_services,omitempty"` + + // The list of resource Ids for Network Acl Bypass for this Cosmos DB account. + NetworkACLBypassIds []*string `json:"networkAclBypassIds,omitempty" tf:"network_acl_bypass_ids,omitempty"` + + // Specifies the Offer Type to use for this CosmosDB Account; currently, this can only be set to Standard. + OfferType *string `json:"offerType,omitempty" tf:"offer_type,omitempty"` + + // Is partition merge on the Cosmos DB account enabled? Defaults to false. + PartitionMergeEnabled *bool `json:"partitionMergeEnabled,omitempty" tf:"partition_merge_enabled,omitempty"` + + // Whether or not public network access is allowed for this CosmosDB account. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A restore block as defined below. + Restore *RestoreInitParameters `json:"restore,omitempty" tf:"restore,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a virtual_network_rule block as defined below, used to define which subnets are allowed to access this CosmosDB account. + VirtualNetworkRule []VirtualNetworkRuleInitParameters `json:"virtualNetworkRule,omitempty" tf:"virtual_network_rule,omitempty"` +} + +type AccountObservation struct { + + // Is write operations on metadata resources (databases, containers, throughput) via account keys enabled? Defaults to true. + AccessKeyMetadataWritesEnabled *bool `json:"accessKeyMetadataWritesEnabled,omitempty" tf:"access_key_metadata_writes_enabled,omitempty"` + + // An analytical_storage block as defined below. + AnalyticalStorage *AnalyticalStorageObservation `json:"analyticalStorage,omitempty" tf:"analytical_storage,omitempty"` + + // Enable Analytical Storage option for this Cosmos DB account. Defaults to false. Enabling and then disabling analytical storage forces a new resource to be created. + AnalyticalStorageEnabled *bool `json:"analyticalStorageEnabled,omitempty" tf:"analytical_storage_enabled,omitempty"` + + // A backup block as defined below. + Backup *BackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // The capabilities which should be enabled for this Cosmos DB account. Value is a capabilities block as defined below. + Capabilities []CapabilitiesObservation `json:"capabilities,omitempty" tf:"capabilities,omitempty"` + + // A capacity block as defined below. + Capacity *CapacityObservation `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies one consistency_policy block as defined below, used to define the consistency policy for this CosmosDB account. + ConsistencyPolicy *ConsistencyPolicyObservation `json:"consistencyPolicy,omitempty" tf:"consistency_policy,omitempty"` + + // A cors_rule block as defined below. + CorsRule *CorsRuleObservation `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // The creation mode for the CosmosDB Account. Possible values are Default and Restore. Changing this forces a new resource to be created. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // The default identity for accessing Key Vault. Possible values are FirstPartyIdentity, SystemAssignedIdentity or UserAssignedIdentity. Defaults to FirstPartyIdentity. + DefaultIdentityType *string `json:"defaultIdentityType,omitempty" tf:"default_identity_type,omitempty"` + + // Enable automatic failover for this Cosmos DB account. + EnableAutomaticFailover *bool `json:"enableAutomaticFailover,omitempty" tf:"enable_automatic_failover,omitempty"` + + // Enable the Free Tier pricing option for this Cosmos DB account. Defaults to false. Changing this forces a new resource to be created. + EnableFreeTier *bool `json:"enableFreeTier,omitempty" tf:"enable_free_tier,omitempty"` + + // Enable multiple write locations for this Cosmos DB account. + EnableMultipleWriteLocations *bool `json:"enableMultipleWriteLocations,omitempty" tf:"enable_multiple_write_locations,omitempty"` + + // The endpoint used to connect to the CosmosDB account. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // Specifies a geo_location resource, used to define where data should be replicated with the failover_priority 0 specifying the primary location. Value is a geo_location block as defined below. + GeoLocation []GeoLocationObservation `json:"geoLocation,omitempty" tf:"geo_location,omitempty"` + + // The CosmosDB Account ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // CosmosDB Firewall Support: This value specifies the set of IP addresses or IP address ranges in CIDR form to be included as the allowed list of client IPs for a given database account. IP addresses/ranges must be comma separated and must not contain any spaces. + IPRangeFilter *string `json:"ipRangeFilter,omitempty" tf:"ip_range_filter,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Enables virtual network filtering for this Cosmos DB account. + IsVirtualNetworkFilterEnabled *bool `json:"isVirtualNetworkFilterEnabled,omitempty" tf:"is_virtual_network_filter_enabled,omitempty"` + + // A versionless Key Vault Key ID for CMK encryption. Changing this forces a new resource to be created. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the Kind of CosmosDB to create - possible values are GlobalDocumentDB, MongoDB and Parse. Defaults to GlobalDocumentDB. Changing this forces a new resource to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Disable local authentication and ensure only MSI and AAD can be used exclusively for authentication. Defaults to false. Can be set only when using the SQL API. + LocalAuthenticationDisabled *bool `json:"localAuthenticationDisabled,omitempty" tf:"local_authentication_disabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the minimal TLS version for the CosmosDB account. Possible values are: Tls, Tls11, and Tls12. Defaults to Tls12. + MinimalTLSVersion *string `json:"minimalTlsVersion,omitempty" tf:"minimal_tls_version,omitempty"` + + // The Server Version of a MongoDB account. Possible values are 4.2, 4.0, 3.6, and 3.2. + MongoServerVersion *string `json:"mongoServerVersion,omitempty" tf:"mongo_server_version,omitempty"` + + // If Azure services can bypass ACLs. Defaults to false. + NetworkACLBypassForAzureServices *bool `json:"networkAclBypassForAzureServices,omitempty" tf:"network_acl_bypass_for_azure_services,omitempty"` + + // The list of resource Ids for Network Acl Bypass for this Cosmos DB account. + NetworkACLBypassIds []*string `json:"networkAclBypassIds,omitempty" tf:"network_acl_bypass_ids,omitempty"` + + // Specifies the Offer Type to use for this CosmosDB Account; currently, this can only be set to Standard. + OfferType *string `json:"offerType,omitempty" tf:"offer_type,omitempty"` + + // Is partition merge on the Cosmos DB account enabled? Defaults to false. + PartitionMergeEnabled *bool `json:"partitionMergeEnabled,omitempty" tf:"partition_merge_enabled,omitempty"` + + // Whether or not public network access is allowed for this CosmosDB account. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A list of read endpoints available for this CosmosDB account. + ReadEndpoints []*string `json:"readEndpoints,omitempty" tf:"read_endpoints,omitempty"` + + // The name of the resource group in which the CosmosDB Account is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A restore block as defined below. + Restore *RestoreObservation `json:"restore,omitempty" tf:"restore,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a virtual_network_rule block as defined below, used to define which subnets are allowed to access this CosmosDB account. + VirtualNetworkRule []VirtualNetworkRuleObservation `json:"virtualNetworkRule,omitempty" tf:"virtual_network_rule,omitempty"` + + // A list of write endpoints available for this CosmosDB account. + WriteEndpoints []*string `json:"writeEndpoints,omitempty" tf:"write_endpoints,omitempty"` +} + +type AccountParameters struct { + + // Is write operations on metadata resources (databases, containers, throughput) via account keys enabled? Defaults to true. + // +kubebuilder:validation:Optional + AccessKeyMetadataWritesEnabled *bool `json:"accessKeyMetadataWritesEnabled,omitempty" tf:"access_key_metadata_writes_enabled,omitempty"` + + // An analytical_storage block as defined below. + // +kubebuilder:validation:Optional + AnalyticalStorage *AnalyticalStorageParameters `json:"analyticalStorage,omitempty" tf:"analytical_storage,omitempty"` + + // Enable Analytical Storage option for this Cosmos DB account. Defaults to false. Enabling and then disabling analytical storage forces a new resource to be created. + // +kubebuilder:validation:Optional + AnalyticalStorageEnabled *bool `json:"analyticalStorageEnabled,omitempty" tf:"analytical_storage_enabled,omitempty"` + + // A backup block as defined below. + // +kubebuilder:validation:Optional + Backup *BackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // The capabilities which should be enabled for this Cosmos DB account. Value is a capabilities block as defined below. + // +kubebuilder:validation:Optional + Capabilities []CapabilitiesParameters `json:"capabilities,omitempty" tf:"capabilities,omitempty"` + + // A capacity block as defined below. + // +kubebuilder:validation:Optional + Capacity *CapacityParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies one consistency_policy block as defined below, used to define the consistency policy for this CosmosDB account. + // +kubebuilder:validation:Optional + ConsistencyPolicy *ConsistencyPolicyParameters `json:"consistencyPolicy,omitempty" tf:"consistency_policy,omitempty"` + + // A cors_rule block as defined below. + // +kubebuilder:validation:Optional + CorsRule *CorsRuleParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // The creation mode for the CosmosDB Account. Possible values are Default and Restore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // The default identity for accessing Key Vault. Possible values are FirstPartyIdentity, SystemAssignedIdentity or UserAssignedIdentity. Defaults to FirstPartyIdentity. + // +kubebuilder:validation:Optional + DefaultIdentityType *string `json:"defaultIdentityType,omitempty" tf:"default_identity_type,omitempty"` + + // Enable automatic failover for this Cosmos DB account. + // +kubebuilder:validation:Optional + EnableAutomaticFailover *bool `json:"enableAutomaticFailover,omitempty" tf:"enable_automatic_failover,omitempty"` + + // Enable the Free Tier pricing option for this Cosmos DB account. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EnableFreeTier *bool `json:"enableFreeTier,omitempty" tf:"enable_free_tier,omitempty"` + + // Enable multiple write locations for this Cosmos DB account. + // +kubebuilder:validation:Optional + EnableMultipleWriteLocations *bool `json:"enableMultipleWriteLocations,omitempty" tf:"enable_multiple_write_locations,omitempty"` + + // Specifies a geo_location resource, used to define where data should be replicated with the failover_priority 0 specifying the primary location. Value is a geo_location block as defined below. + // +kubebuilder:validation:Optional + GeoLocation []GeoLocationParameters `json:"geoLocation,omitempty" tf:"geo_location,omitempty"` + + // CosmosDB Firewall Support: This value specifies the set of IP addresses or IP address ranges in CIDR form to be included as the allowed list of client IPs for a given database account. IP addresses/ranges must be comma separated and must not contain any spaces. + // +kubebuilder:validation:Optional + IPRangeFilter *string `json:"ipRangeFilter,omitempty" tf:"ip_range_filter,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Enables virtual network filtering for this Cosmos DB account. + // +kubebuilder:validation:Optional + IsVirtualNetworkFilterEnabled *bool `json:"isVirtualNetworkFilterEnabled,omitempty" tf:"is_virtual_network_filter_enabled,omitempty"` + + // A versionless Key Vault Key ID for CMK encryption. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the Kind of CosmosDB to create - possible values are GlobalDocumentDB, MongoDB and Parse. Defaults to GlobalDocumentDB. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Disable local authentication and ensure only MSI and AAD can be used exclusively for authentication. Defaults to false. Can be set only when using the SQL API. + // +kubebuilder:validation:Optional + LocalAuthenticationDisabled *bool `json:"localAuthenticationDisabled,omitempty" tf:"local_authentication_disabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the minimal TLS version for the CosmosDB account. Possible values are: Tls, Tls11, and Tls12. Defaults to Tls12. + // +kubebuilder:validation:Optional + MinimalTLSVersion *string `json:"minimalTlsVersion,omitempty" tf:"minimal_tls_version,omitempty"` + + // The Server Version of a MongoDB account. Possible values are 4.2, 4.0, 3.6, and 3.2. + // +kubebuilder:validation:Optional + MongoServerVersion *string `json:"mongoServerVersion,omitempty" tf:"mongo_server_version,omitempty"` + + // If Azure services can bypass ACLs. Defaults to false. + // +kubebuilder:validation:Optional + NetworkACLBypassForAzureServices *bool `json:"networkAclBypassForAzureServices,omitempty" tf:"network_acl_bypass_for_azure_services,omitempty"` + + // The list of resource Ids for Network Acl Bypass for this Cosmos DB account. + // +kubebuilder:validation:Optional + NetworkACLBypassIds []*string `json:"networkAclBypassIds,omitempty" tf:"network_acl_bypass_ids,omitempty"` + + // Specifies the Offer Type to use for this CosmosDB Account; currently, this can only be set to Standard. + // +kubebuilder:validation:Optional + OfferType *string `json:"offerType,omitempty" tf:"offer_type,omitempty"` + + // Is partition merge on the Cosmos DB account enabled? Defaults to false. + // +kubebuilder:validation:Optional + PartitionMergeEnabled *bool `json:"partitionMergeEnabled,omitempty" tf:"partition_merge_enabled,omitempty"` + + // Whether or not public network access is allowed for this CosmosDB account. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which the CosmosDB Account is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A restore block as defined below. + // +kubebuilder:validation:Optional + Restore *RestoreParameters `json:"restore,omitempty" tf:"restore,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a virtual_network_rule block as defined below, used to define which subnets are allowed to access this CosmosDB account. + // +kubebuilder:validation:Optional + VirtualNetworkRule []VirtualNetworkRuleParameters `json:"virtualNetworkRule,omitempty" tf:"virtual_network_rule,omitempty"` +} + +type AnalyticalStorageInitParameters struct { + + // The schema type of the Analytical Storage for this Cosmos DB account. Possible values are FullFidelity and WellDefined. + SchemaType *string `json:"schemaType,omitempty" tf:"schema_type,omitempty"` +} + +type AnalyticalStorageObservation struct { + + // The schema type of the Analytical Storage for this Cosmos DB account. Possible values are FullFidelity and WellDefined. + SchemaType *string `json:"schemaType,omitempty" tf:"schema_type,omitempty"` +} + +type AnalyticalStorageParameters struct { + + // The schema type of the Analytical Storage for this Cosmos DB account. Possible values are FullFidelity and WellDefined. + // +kubebuilder:validation:Optional + SchemaType *string `json:"schemaType" tf:"schema_type,omitempty"` +} + +type BackupInitParameters struct { + + // The interval in minutes between two backups. Possible values are between 60 and 1440. Defaults to 240. + IntervalInMinutes *float64 `json:"intervalInMinutes,omitempty" tf:"interval_in_minutes,omitempty"` + + // The time in hours that each backup is retained. Possible values are between 8 and 720. Defaults to 8. + RetentionInHours *float64 `json:"retentionInHours,omitempty" tf:"retention_in_hours,omitempty"` + + // The storage redundancy is used to indicate the type of backup residency. Possible values are Geo, Local and Zone. Defaults to Geo. + StorageRedundancy *string `json:"storageRedundancy,omitempty" tf:"storage_redundancy,omitempty"` + + // The continuous backup tier. Possible values are Continuous7Days and Continuous30Days. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // The type of the backup. Possible values are Continuous and Periodic. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type BackupObservation struct { + + // The interval in minutes between two backups. Possible values are between 60 and 1440. Defaults to 240. + IntervalInMinutes *float64 `json:"intervalInMinutes,omitempty" tf:"interval_in_minutes,omitempty"` + + // The time in hours that each backup is retained. Possible values are between 8 and 720. Defaults to 8. + RetentionInHours *float64 `json:"retentionInHours,omitempty" tf:"retention_in_hours,omitempty"` + + // The storage redundancy is used to indicate the type of backup residency. Possible values are Geo, Local and Zone. Defaults to Geo. + StorageRedundancy *string `json:"storageRedundancy,omitempty" tf:"storage_redundancy,omitempty"` + + // The continuous backup tier. Possible values are Continuous7Days and Continuous30Days. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // The type of the backup. Possible values are Continuous and Periodic. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type BackupParameters struct { + + // The interval in minutes between two backups. Possible values are between 60 and 1440. Defaults to 240. + // +kubebuilder:validation:Optional + IntervalInMinutes *float64 `json:"intervalInMinutes,omitempty" tf:"interval_in_minutes,omitempty"` + + // The time in hours that each backup is retained. Possible values are between 8 and 720. Defaults to 8. + // +kubebuilder:validation:Optional + RetentionInHours *float64 `json:"retentionInHours,omitempty" tf:"retention_in_hours,omitempty"` + + // The storage redundancy is used to indicate the type of backup residency. Possible values are Geo, Local and Zone. Defaults to Geo. + // +kubebuilder:validation:Optional + StorageRedundancy *string `json:"storageRedundancy,omitempty" tf:"storage_redundancy,omitempty"` + + // The continuous backup tier. Possible values are Continuous7Days and Continuous30Days. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` + + // The type of the backup. Possible values are Continuous and Periodic. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type CapabilitiesInitParameters struct { + + // The capability to enable - Possible values are AllowSelfServeUpgradeToMongo36, DisableRateLimitingResponses, EnableAggregationPipeline, EnableCassandra, EnableGremlin, EnableMongo, EnableMongo16MBDocumentSupport, EnableMongoRetryableWrites, EnableMongoRoleBasedAccessControl, EnablePartialUniqueIndex, EnableServerless, EnableTable, EnableTtlOnCustomPath, EnableUniqueCompoundNestedDocs, MongoDBv3.4 and mongoEnableDocLevelTTL. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CapabilitiesObservation struct { + + // The capability to enable - Possible values are AllowSelfServeUpgradeToMongo36, DisableRateLimitingResponses, EnableAggregationPipeline, EnableCassandra, EnableGremlin, EnableMongo, EnableMongo16MBDocumentSupport, EnableMongoRetryableWrites, EnableMongoRoleBasedAccessControl, EnablePartialUniqueIndex, EnableServerless, EnableTable, EnableTtlOnCustomPath, EnableUniqueCompoundNestedDocs, MongoDBv3.4 and mongoEnableDocLevelTTL. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type CapabilitiesParameters struct { + + // The capability to enable - Possible values are AllowSelfServeUpgradeToMongo36, DisableRateLimitingResponses, EnableAggregationPipeline, EnableCassandra, EnableGremlin, EnableMongo, EnableMongo16MBDocumentSupport, EnableMongoRetryableWrites, EnableMongoRoleBasedAccessControl, EnablePartialUniqueIndex, EnableServerless, EnableTable, EnableTtlOnCustomPath, EnableUniqueCompoundNestedDocs, MongoDBv3.4 and mongoEnableDocLevelTTL. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type CapacityInitParameters struct { + + // The total throughput limit imposed on this Cosmos DB account (RU/s). Possible values are at least -1. -1 means no limit. + TotalThroughputLimit *float64 `json:"totalThroughputLimit,omitempty" tf:"total_throughput_limit,omitempty"` +} + +type CapacityObservation struct { + + // The total throughput limit imposed on this Cosmos DB account (RU/s). Possible values are at least -1. -1 means no limit. + TotalThroughputLimit *float64 `json:"totalThroughputLimit,omitempty" tf:"total_throughput_limit,omitempty"` +} + +type CapacityParameters struct { + + // The total throughput limit imposed on this Cosmos DB account (RU/s). Possible values are at least -1. -1 means no limit. + // +kubebuilder:validation:Optional + TotalThroughputLimit *float64 `json:"totalThroughputLimit" tf:"total_throughput_limit,omitempty"` +} + +type ConsistencyPolicyInitParameters struct { + + // The Consistency Level to use for this CosmosDB Account - can be either BoundedStaleness, Eventual, Session, Strong or ConsistentPrefix. + ConsistencyLevel *string `json:"consistencyLevel,omitempty" tf:"consistency_level,omitempty"` + + // When used with the Bounded Staleness consistency level, this value represents the time amount of staleness (in seconds) tolerated. The accepted range for this value is 5 - 86400 (1 day). Defaults to 5. Required when consistency_level is set to BoundedStaleness. + MaxIntervalInSeconds *float64 `json:"maxIntervalInSeconds,omitempty" tf:"max_interval_in_seconds,omitempty"` + + // When used with the Bounded Staleness consistency level, this value represents the number of stale requests tolerated. The accepted range for this value is 10 – 2147483647. Defaults to 100. Required when consistency_level is set to BoundedStaleness. + MaxStalenessPrefix *float64 `json:"maxStalenessPrefix,omitempty" tf:"max_staleness_prefix,omitempty"` +} + +type ConsistencyPolicyObservation struct { + + // The Consistency Level to use for this CosmosDB Account - can be either BoundedStaleness, Eventual, Session, Strong or ConsistentPrefix. + ConsistencyLevel *string `json:"consistencyLevel,omitempty" tf:"consistency_level,omitempty"` + + // When used with the Bounded Staleness consistency level, this value represents the time amount of staleness (in seconds) tolerated. The accepted range for this value is 5 - 86400 (1 day). Defaults to 5. Required when consistency_level is set to BoundedStaleness. + MaxIntervalInSeconds *float64 `json:"maxIntervalInSeconds,omitempty" tf:"max_interval_in_seconds,omitempty"` + + // When used with the Bounded Staleness consistency level, this value represents the number of stale requests tolerated. The accepted range for this value is 10 – 2147483647. Defaults to 100. Required when consistency_level is set to BoundedStaleness. + MaxStalenessPrefix *float64 `json:"maxStalenessPrefix,omitempty" tf:"max_staleness_prefix,omitempty"` +} + +type ConsistencyPolicyParameters struct { + + // The Consistency Level to use for this CosmosDB Account - can be either BoundedStaleness, Eventual, Session, Strong or ConsistentPrefix. + // +kubebuilder:validation:Optional + ConsistencyLevel *string `json:"consistencyLevel" tf:"consistency_level,omitempty"` + + // When used with the Bounded Staleness consistency level, this value represents the time amount of staleness (in seconds) tolerated. The accepted range for this value is 5 - 86400 (1 day). Defaults to 5. Required when consistency_level is set to BoundedStaleness. + // +kubebuilder:validation:Optional + MaxIntervalInSeconds *float64 `json:"maxIntervalInSeconds,omitempty" tf:"max_interval_in_seconds,omitempty"` + + // When used with the Bounded Staleness consistency level, this value represents the number of stale requests tolerated. The accepted range for this value is 10 – 2147483647. Defaults to 100. Required when consistency_level is set to BoundedStaleness. + // +kubebuilder:validation:Optional + MaxStalenessPrefix *float64 `json:"maxStalenessPrefix,omitempty" tf:"max_staleness_prefix,omitempty"` +} + +type CorsRuleInitParameters struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // A list of HTTP headers that are allowed to be executed by the origin. Valid options are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. Possible values are between 1 and 2147483647. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type CorsRuleObservation struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // A list of HTTP headers that are allowed to be executed by the origin. Valid options are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. Possible values are between 1 and 2147483647. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type CorsRuleParameters struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + // +kubebuilder:validation:Optional + AllowedHeaders []*string `json:"allowedHeaders" tf:"allowed_headers,omitempty"` + + // A list of HTTP headers that are allowed to be executed by the origin. Valid options are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + // +kubebuilder:validation:Optional + AllowedMethods []*string `json:"allowedMethods" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + // +kubebuilder:validation:Optional + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + // +kubebuilder:validation:Optional + ExposedHeaders []*string `json:"exposedHeaders" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. Possible values are between 1 and 2147483647. + // +kubebuilder:validation:Optional + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type DatabaseInitParameters struct { + + // A list of the collection names for the restore request. Changing this forces a new resource to be created. + // +listType=set + CollectionNames []*string `json:"collectionNames,omitempty" tf:"collection_names,omitempty"` + + // Specifies the name of the CosmosDB Account. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type DatabaseObservation struct { + + // A list of the collection names for the restore request. Changing this forces a new resource to be created. + // +listType=set + CollectionNames []*string `json:"collectionNames,omitempty" tf:"collection_names,omitempty"` + + // Specifies the name of the CosmosDB Account. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type DatabaseParameters struct { + + // A list of the collection names for the restore request. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + CollectionNames []*string `json:"collectionNames,omitempty" tf:"collection_names,omitempty"` + + // Specifies the name of the CosmosDB Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type GeoLocationInitParameters struct { + + // The failover priority of the region. A failover priority of 0 indicates a write region. The maximum value for a failover priority = (total number of regions - 1). Failover priority values must be unique for each of the regions in which the database account exists. Changing this causes the location to be re-provisioned and cannot be changed for the location with failover priority 0. + FailoverPriority *float64 `json:"failoverPriority,omitempty" tf:"failover_priority,omitempty"` + + // The name of the Azure region to host replicated data. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Should zone redundancy be enabled for this region? Defaults to false. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type GeoLocationObservation struct { + + // The failover priority of the region. A failover priority of 0 indicates a write region. The maximum value for a failover priority = (total number of regions - 1). Failover priority values must be unique for each of the regions in which the database account exists. Changing this causes the location to be re-provisioned and cannot be changed for the location with failover priority 0. + FailoverPriority *float64 `json:"failoverPriority,omitempty" tf:"failover_priority,omitempty"` + + // The ID of the virtual network subnet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Azure region to host replicated data. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Should zone redundancy be enabled for this region? Defaults to false. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type GeoLocationParameters struct { + + // The failover priority of the region. A failover priority of 0 indicates a write region. The maximum value for a failover priority = (total number of regions - 1). Failover priority values must be unique for each of the regions in which the database account exists. Changing this causes the location to be re-provisioned and cannot be changed for the location with failover priority 0. + // +kubebuilder:validation:Optional + FailoverPriority *float64 `json:"failoverPriority" tf:"failover_priority,omitempty"` + + // The name of the Azure region to host replicated data. + // +kubebuilder:validation:Optional + Location *string `json:"location" tf:"location,omitempty"` + + // Should zone redundancy be enabled for this region? Defaults to false. + // +kubebuilder:validation:Optional + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type GremlinDatabaseInitParameters struct { + + // A list of the Graph names for the restore request. Changing this forces a new resource to be created. + GraphNames []*string `json:"graphNames,omitempty" tf:"graph_names,omitempty"` + + // Specifies the name of the CosmosDB Account. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type GremlinDatabaseObservation struct { + + // A list of the Graph names for the restore request. Changing this forces a new resource to be created. + GraphNames []*string `json:"graphNames,omitempty" tf:"graph_names,omitempty"` + + // Specifies the name of the CosmosDB Account. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type GremlinDatabaseParameters struct { + + // A list of the Graph names for the restore request. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + GraphNames []*string `json:"graphNames,omitempty" tf:"graph_names,omitempty"` + + // Specifies the name of the CosmosDB Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Cosmos Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity assigned to this Cosmos account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Cosmos Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The Type of Managed Identity assigned to this Cosmos account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Cosmos Account. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity assigned to this Cosmos account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type RestoreInitParameters struct { + + // A database block as defined below. Changing this forces a new resource to be created. + Database []DatabaseInitParameters `json:"database,omitempty" tf:"database,omitempty"` + + // One or more gremlin_database blocks as defined below. Changing this forces a new resource to be created. + GremlinDatabase []GremlinDatabaseInitParameters `json:"gremlinDatabase,omitempty" tf:"gremlin_database,omitempty"` + + // The creation time of the database or the collection (Datetime Format RFC 3339). Changing this forces a new resource to be created. + RestoreTimestampInUtc *string `json:"restoreTimestampInUtc,omitempty" tf:"restore_timestamp_in_utc,omitempty"` + + // The resource ID of the restorable database account from which the restore has to be initiated. The example is /subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/locations/{location}/restorableDatabaseAccounts/{restorableDatabaseAccountName}. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SourceCosmosDBAccountID *string `json:"sourceCosmosdbAccountId,omitempty" tf:"source_cosmosdb_account_id,omitempty"` + + // Reference to a Account in cosmosdb to populate sourceCosmosdbAccountId. + // +kubebuilder:validation:Optional + SourceCosmosDBAccountIDRef *v1.Reference `json:"sourceCosmosdbAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate sourceCosmosdbAccountId. + // +kubebuilder:validation:Optional + SourceCosmosDBAccountIDSelector *v1.Selector `json:"sourceCosmosdbAccountIdSelector,omitempty" tf:"-"` + + // A list of specific tables available for restore. Changing this forces a new resource to be created. + TablesToRestore []*string `json:"tablesToRestore,omitempty" tf:"tables_to_restore,omitempty"` +} + +type RestoreObservation struct { + + // A database block as defined below. Changing this forces a new resource to be created. + Database []DatabaseObservation `json:"database,omitempty" tf:"database,omitempty"` + + // One or more gremlin_database blocks as defined below. Changing this forces a new resource to be created. + GremlinDatabase []GremlinDatabaseObservation `json:"gremlinDatabase,omitempty" tf:"gremlin_database,omitempty"` + + // The creation time of the database or the collection (Datetime Format RFC 3339). Changing this forces a new resource to be created. + RestoreTimestampInUtc *string `json:"restoreTimestampInUtc,omitempty" tf:"restore_timestamp_in_utc,omitempty"` + + // The resource ID of the restorable database account from which the restore has to be initiated. The example is /subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/locations/{location}/restorableDatabaseAccounts/{restorableDatabaseAccountName}. Changing this forces a new resource to be created. + SourceCosmosDBAccountID *string `json:"sourceCosmosdbAccountId,omitempty" tf:"source_cosmosdb_account_id,omitempty"` + + // A list of specific tables available for restore. Changing this forces a new resource to be created. + TablesToRestore []*string `json:"tablesToRestore,omitempty" tf:"tables_to_restore,omitempty"` +} + +type RestoreParameters struct { + + // A database block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Database []DatabaseParameters `json:"database,omitempty" tf:"database,omitempty"` + + // One or more gremlin_database blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + GremlinDatabase []GremlinDatabaseParameters `json:"gremlinDatabase,omitempty" tf:"gremlin_database,omitempty"` + + // The creation time of the database or the collection (Datetime Format RFC 3339). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RestoreTimestampInUtc *string `json:"restoreTimestampInUtc" tf:"restore_timestamp_in_utc,omitempty"` + + // The resource ID of the restorable database account from which the restore has to be initiated. The example is /subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/locations/{location}/restorableDatabaseAccounts/{restorableDatabaseAccountName}. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SourceCosmosDBAccountID *string `json:"sourceCosmosdbAccountId,omitempty" tf:"source_cosmosdb_account_id,omitempty"` + + // Reference to a Account in cosmosdb to populate sourceCosmosdbAccountId. + // +kubebuilder:validation:Optional + SourceCosmosDBAccountIDRef *v1.Reference `json:"sourceCosmosdbAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate sourceCosmosdbAccountId. + // +kubebuilder:validation:Optional + SourceCosmosDBAccountIDSelector *v1.Selector `json:"sourceCosmosdbAccountIdSelector,omitempty" tf:"-"` + + // A list of specific tables available for restore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TablesToRestore []*string `json:"tablesToRestore,omitempty" tf:"tables_to_restore,omitempty"` +} + +type VirtualNetworkRuleInitParameters struct { + + // The ID of the virtual network subnet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // If set to true, the specified subnet will be added as a virtual network rule even if its CosmosDB service endpoint is not active. Defaults to false. + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` +} + +type VirtualNetworkRuleObservation struct { + + // The ID of the virtual network subnet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // If set to true, the specified subnet will be added as a virtual network rule even if its CosmosDB service endpoint is not active. Defaults to false. + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` +} + +type VirtualNetworkRuleParameters struct { + + // The ID of the virtual network subnet. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` + + // If set to true, the specified subnet will be added as a virtual network rule even if its CosmosDB service endpoint is not active. Defaults to false. + // +kubebuilder:validation:Optional + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` +} + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccountInitParameters `json:"initProvider,omitempty"` +} + +// AccountStatus defines the observed state of Account. +type AccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Account is the Schema for the Accounts API. Manages a CosmosDB (formally DocumentDB) Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.consistencyPolicy) || (has(self.initProvider) && has(self.initProvider.consistencyPolicy))",message="spec.forProvider.consistencyPolicy is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.geoLocation) || (has(self.initProvider) && has(self.initProvider.geoLocation))",message="spec.forProvider.geoLocation is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.offerType) || (has(self.initProvider) && has(self.initProvider.offerType))",message="spec.forProvider.offerType is a required parameter" + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Accounts +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Repository type metadata. +var ( + Account_Kind = "Account" + Account_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Account_Kind}.String() + Account_KindAPIVersion = Account_Kind + "." + CRDGroupVersion.String() + Account_GroupVersionKind = CRDGroupVersion.WithKind(Account_Kind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_cassandracluster_terraformed.go b/apis/cosmosdb/v1beta2/zz_cassandracluster_terraformed.go new file mode 100755 index 000000000..25bf7116b --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_cassandracluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CassandraCluster +func (mg *CassandraCluster) GetTerraformResourceType() string { + return "azurerm_cosmosdb_cassandra_cluster" +} + +// GetConnectionDetailsMapping for this CassandraCluster +func (tr *CassandraCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"default_admin_password": "spec.forProvider.defaultAdminPasswordSecretRef"} +} + +// GetObservation of this CassandraCluster +func (tr *CassandraCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CassandraCluster +func (tr *CassandraCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CassandraCluster +func (tr *CassandraCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CassandraCluster +func (tr *CassandraCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CassandraCluster +func (tr *CassandraCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CassandraCluster +func (tr *CassandraCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CassandraCluster +func (tr *CassandraCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CassandraCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CassandraCluster) LateInitialize(attrs []byte) (bool, error) { + params := &CassandraClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CassandraCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cosmosdb/v1beta2/zz_cassandracluster_types.go b/apis/cosmosdb/v1beta2/zz_cassandracluster_types.go new file mode 100755 index 000000000..099591d65 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_cassandracluster_types.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CassandraClusterIdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this Cassandra Cluster. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CassandraClusterIdentityObservation struct { + + // The ID of the Cassandra Cluster. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the Cassandra Cluster. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Cassandra Cluster. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CassandraClusterIdentityParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this Cassandra Cluster. The only possible value is SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type CassandraClusterInitParameters struct { + + // The authentication method that is used to authenticate clients. Possible values are None and Cassandra. Defaults to Cassandra. + AuthenticationMethod *string `json:"authenticationMethod,omitempty" tf:"authentication_method,omitempty"` + + // A list of TLS certificates that is used to authorize client connecting to the Cassandra Cluster. + ClientCertificatePems []*string `json:"clientCertificatePems,omitempty" tf:"client_certificate_pems,omitempty"` + + // The ID of the delegated management subnet for this Cassandra Cluster. Changing this forces a new Cassandra Cluster to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DelegatedManagementSubnetID *string `json:"delegatedManagementSubnetId,omitempty" tf:"delegated_management_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate delegatedManagementSubnetId. + // +kubebuilder:validation:Optional + DelegatedManagementSubnetIDRef *v1.Reference `json:"delegatedManagementSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate delegatedManagementSubnetId. + // +kubebuilder:validation:Optional + DelegatedManagementSubnetIDSelector *v1.Selector `json:"delegatedManagementSubnetIdSelector,omitempty" tf:"-"` + + // A list of TLS certificates that is used to authorize gossip from unmanaged Cassandra Data Center. + ExternalGossipCertificatePems []*string `json:"externalGossipCertificatePems,omitempty" tf:"external_gossip_certificate_pems,omitempty"` + + // A list of IP Addresses of the seed nodes in unmanaged the Cassandra Data Center which will be added to the seed node lists of all managed nodes. + ExternalSeedNodeIPAddresses []*string `json:"externalSeedNodeIpAddresses,omitempty" tf:"external_seed_node_ip_addresses,omitempty"` + + // The number of hours to wait between taking a backup of the Cassandra Cluster. Defaults to 24. + HoursBetweenBackups *float64 `json:"hoursBetweenBackups,omitempty" tf:"hours_between_backups,omitempty"` + + // An identity block as defined below. + Identity *CassandraClusterIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Cassandra Cluster should exist. Changing this forces a new Cassandra Cluster to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Is the automatic repair enabled on the Cassandra Cluster? Defaults to true. + RepairEnabled *bool `json:"repairEnabled,omitempty" tf:"repair_enabled,omitempty"` + + // A mapping of tags assigned to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of Cassandra what the Cluster converges to run. Possible values are 3.11 and 4.0. Defaults to 3.11. Changing this forces a new Cassandra Cluster to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type CassandraClusterObservation struct { + + // The authentication method that is used to authenticate clients. Possible values are None and Cassandra. Defaults to Cassandra. + AuthenticationMethod *string `json:"authenticationMethod,omitempty" tf:"authentication_method,omitempty"` + + // A list of TLS certificates that is used to authorize client connecting to the Cassandra Cluster. + ClientCertificatePems []*string `json:"clientCertificatePems,omitempty" tf:"client_certificate_pems,omitempty"` + + // The ID of the delegated management subnet for this Cassandra Cluster. Changing this forces a new Cassandra Cluster to be created. + DelegatedManagementSubnetID *string `json:"delegatedManagementSubnetId,omitempty" tf:"delegated_management_subnet_id,omitempty"` + + // A list of TLS certificates that is used to authorize gossip from unmanaged Cassandra Data Center. + ExternalGossipCertificatePems []*string `json:"externalGossipCertificatePems,omitempty" tf:"external_gossip_certificate_pems,omitempty"` + + // A list of IP Addresses of the seed nodes in unmanaged the Cassandra Data Center which will be added to the seed node lists of all managed nodes. + ExternalSeedNodeIPAddresses []*string `json:"externalSeedNodeIpAddresses,omitempty" tf:"external_seed_node_ip_addresses,omitempty"` + + // The number of hours to wait between taking a backup of the Cassandra Cluster. Defaults to 24. + HoursBetweenBackups *float64 `json:"hoursBetweenBackups,omitempty" tf:"hours_between_backups,omitempty"` + + // The ID of the Cassandra Cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *CassandraClusterIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Cassandra Cluster should exist. Changing this forces a new Cassandra Cluster to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Is the automatic repair enabled on the Cassandra Cluster? Defaults to true. + RepairEnabled *bool `json:"repairEnabled,omitempty" tf:"repair_enabled,omitempty"` + + // The name of the Resource Group where the Cassandra Cluster should exist. Changing this forces a new Cassandra Cluster to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags assigned to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of Cassandra what the Cluster converges to run. Possible values are 3.11 and 4.0. Defaults to 3.11. Changing this forces a new Cassandra Cluster to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type CassandraClusterParameters struct { + + // The authentication method that is used to authenticate clients. Possible values are None and Cassandra. Defaults to Cassandra. + // +kubebuilder:validation:Optional + AuthenticationMethod *string `json:"authenticationMethod,omitempty" tf:"authentication_method,omitempty"` + + // A list of TLS certificates that is used to authorize client connecting to the Cassandra Cluster. + // +kubebuilder:validation:Optional + ClientCertificatePems []*string `json:"clientCertificatePems,omitempty" tf:"client_certificate_pems,omitempty"` + + // The initial admin password for this Cassandra Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DefaultAdminPasswordSecretRef v1.SecretKeySelector `json:"defaultAdminPasswordSecretRef" tf:"-"` + + // The ID of the delegated management subnet for this Cassandra Cluster. Changing this forces a new Cassandra Cluster to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DelegatedManagementSubnetID *string `json:"delegatedManagementSubnetId,omitempty" tf:"delegated_management_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate delegatedManagementSubnetId. + // +kubebuilder:validation:Optional + DelegatedManagementSubnetIDRef *v1.Reference `json:"delegatedManagementSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate delegatedManagementSubnetId. + // +kubebuilder:validation:Optional + DelegatedManagementSubnetIDSelector *v1.Selector `json:"delegatedManagementSubnetIdSelector,omitempty" tf:"-"` + + // A list of TLS certificates that is used to authorize gossip from unmanaged Cassandra Data Center. + // +kubebuilder:validation:Optional + ExternalGossipCertificatePems []*string `json:"externalGossipCertificatePems,omitempty" tf:"external_gossip_certificate_pems,omitempty"` + + // A list of IP Addresses of the seed nodes in unmanaged the Cassandra Data Center which will be added to the seed node lists of all managed nodes. + // +kubebuilder:validation:Optional + ExternalSeedNodeIPAddresses []*string `json:"externalSeedNodeIpAddresses,omitempty" tf:"external_seed_node_ip_addresses,omitempty"` + + // The number of hours to wait between taking a backup of the Cassandra Cluster. Defaults to 24. + // +kubebuilder:validation:Optional + HoursBetweenBackups *float64 `json:"hoursBetweenBackups,omitempty" tf:"hours_between_backups,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *CassandraClusterIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Cassandra Cluster should exist. Changing this forces a new Cassandra Cluster to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Is the automatic repair enabled on the Cassandra Cluster? Defaults to true. + // +kubebuilder:validation:Optional + RepairEnabled *bool `json:"repairEnabled,omitempty" tf:"repair_enabled,omitempty"` + + // The name of the Resource Group where the Cassandra Cluster should exist. Changing this forces a new Cassandra Cluster to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags assigned to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of Cassandra what the Cluster converges to run. Possible values are 3.11 and 4.0. Defaults to 3.11. Changing this forces a new Cassandra Cluster to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +// CassandraClusterSpec defines the desired state of CassandraCluster +type CassandraClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CassandraClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CassandraClusterInitParameters `json:"initProvider,omitempty"` +} + +// CassandraClusterStatus defines the observed state of CassandraCluster. +type CassandraClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CassandraClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CassandraCluster is the Schema for the CassandraClusters API. Manages a Cassandra Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type CassandraCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultAdminPasswordSecretRef)",message="spec.forProvider.defaultAdminPasswordSecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec CassandraClusterSpec `json:"spec"` + Status CassandraClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CassandraClusterList contains a list of CassandraClusters +type CassandraClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CassandraCluster `json:"items"` +} + +// Repository type metadata. +var ( + CassandraCluster_Kind = "CassandraCluster" + CassandraCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CassandraCluster_Kind}.String() + CassandraCluster_KindAPIVersion = CassandraCluster_Kind + "." + CRDGroupVersion.String() + CassandraCluster_GroupVersionKind = CRDGroupVersion.WithKind(CassandraCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&CassandraCluster{}, &CassandraClusterList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_cassandrakeyspace_terraformed.go b/apis/cosmosdb/v1beta2/zz_cassandrakeyspace_terraformed.go new file mode 100755 index 000000000..d1d5635a1 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_cassandrakeyspace_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CassandraKeySpace +func (mg *CassandraKeySpace) GetTerraformResourceType() string { + return "azurerm_cosmosdb_cassandra_keyspace" +} + +// GetConnectionDetailsMapping for this CassandraKeySpace +func (tr *CassandraKeySpace) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CassandraKeySpace +func (tr *CassandraKeySpace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CassandraKeySpace +func (tr *CassandraKeySpace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CassandraKeySpace +func (tr *CassandraKeySpace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CassandraKeySpace +func (tr *CassandraKeySpace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CassandraKeySpace +func (tr *CassandraKeySpace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CassandraKeySpace +func (tr *CassandraKeySpace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CassandraKeySpace +func (tr *CassandraKeySpace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CassandraKeySpace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CassandraKeySpace) LateInitialize(attrs []byte) (bool, error) { + params := &CassandraKeySpaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CassandraKeySpace) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cosmosdb/v1beta2/zz_cassandrakeyspace_types.go b/apis/cosmosdb/v1beta2/zz_cassandrakeyspace_types.go new file mode 100755 index 000000000..461f0edfe --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_cassandrakeyspace_types.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoscaleSettingsInitParameters struct { + + // The maximum throughput of the Cassandra KeySpace (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type AutoscaleSettingsObservation struct { + + // The maximum throughput of the Cassandra KeySpace (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type AutoscaleSettingsParameters struct { + + // The maximum throughput of the Cassandra KeySpace (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type CassandraKeySpaceInitParameters struct { + + // An autoscale_settings block as defined below. + AutoscaleSettings *AutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The throughput of Cassandra KeySpace (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type CassandraKeySpaceObservation struct { + + // The name of the Cosmos DB Cassandra KeySpace to create the table within. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *AutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // the ID of the CosmosDB Cassandra KeySpace. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the resource group in which the Cosmos DB Cassandra KeySpace is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The throughput of Cassandra KeySpace (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type CassandraKeySpaceParameters struct { + + // The name of the Cosmos DB Cassandra KeySpace to create the table within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // An autoscale_settings block as defined below. + // +kubebuilder:validation:Optional + AutoscaleSettings *AutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The name of the resource group in which the Cosmos DB Cassandra KeySpace is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The throughput of Cassandra KeySpace (RU/s). Must be set in increments of 100. The minimum value is 400. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +// CassandraKeySpaceSpec defines the desired state of CassandraKeySpace +type CassandraKeySpaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CassandraKeySpaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CassandraKeySpaceInitParameters `json:"initProvider,omitempty"` +} + +// CassandraKeySpaceStatus defines the observed state of CassandraKeySpace. +type CassandraKeySpaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CassandraKeySpaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CassandraKeySpace is the Schema for the CassandraKeySpaces API. Manages a Cassandra KeySpace within a Cosmos DB Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type CassandraKeySpace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CassandraKeySpaceSpec `json:"spec"` + Status CassandraKeySpaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CassandraKeySpaceList contains a list of CassandraKeySpaces +type CassandraKeySpaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CassandraKeySpace `json:"items"` +} + +// Repository type metadata. +var ( + CassandraKeySpace_Kind = "CassandraKeySpace" + CassandraKeySpace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CassandraKeySpace_Kind}.String() + CassandraKeySpace_KindAPIVersion = CassandraKeySpace_Kind + "." + CRDGroupVersion.String() + CassandraKeySpace_GroupVersionKind = CRDGroupVersion.WithKind(CassandraKeySpace_Kind) +) + +func init() { + SchemeBuilder.Register(&CassandraKeySpace{}, &CassandraKeySpaceList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_cassandratable_terraformed.go b/apis/cosmosdb/v1beta2/zz_cassandratable_terraformed.go new file mode 100755 index 000000000..c4bb7e1c5 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_cassandratable_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CassandraTable +func (mg *CassandraTable) GetTerraformResourceType() string { + return "azurerm_cosmosdb_cassandra_table" +} + +// GetConnectionDetailsMapping for this CassandraTable +func (tr *CassandraTable) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CassandraTable +func (tr *CassandraTable) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CassandraTable +func (tr *CassandraTable) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CassandraTable +func (tr *CassandraTable) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CassandraTable +func (tr *CassandraTable) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CassandraTable +func (tr *CassandraTable) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CassandraTable +func (tr *CassandraTable) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CassandraTable +func (tr *CassandraTable) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CassandraTable using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CassandraTable) LateInitialize(attrs []byte) (bool, error) { + params := &CassandraTableParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("AnalyticalStorageTTL")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CassandraTable) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/cosmosdb/v1beta2/zz_cassandratable_types.go b/apis/cosmosdb/v1beta2/zz_cassandratable_types.go new file mode 100755 index 000000000..0dea80479 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_cassandratable_types.go @@ -0,0 +1,289 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CassandraTableAutoscaleSettingsInitParameters struct { + + // The maximum throughput of the Cassandra Table (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type CassandraTableAutoscaleSettingsObservation struct { + + // The maximum throughput of the Cassandra Table (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type CassandraTableAutoscaleSettingsParameters struct { + + // The maximum throughput of the Cassandra Table (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type CassandraTableInitParameters struct { + + // Time to live of the Analytical Storage. Possible values are between -1 and 2147483647 except 0. -1 means the Analytical Storage never expires. Changing this forces a new resource to be created. + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *CassandraTableAutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // Time to live of the Cosmos DB Cassandra table. Possible values are at least -1. -1 means the Cassandra table never expires. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // A schema block as defined below. + Schema *SchemaInitParameters `json:"schema,omitempty" tf:"schema,omitempty"` + + // The throughput of Cassandra KeySpace (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type CassandraTableObservation struct { + + // Time to live of the Analytical Storage. Possible values are between -1 and 2147483647 except 0. -1 means the Analytical Storage never expires. Changing this forces a new resource to be created. + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *CassandraTableAutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The ID of the Cosmos DB Cassandra Keyspace to create the table within. Changing this forces a new resource to be created. + CassandraKeySpaceID *string `json:"cassandraKeyspaceId,omitempty" tf:"cassandra_keyspace_id,omitempty"` + + // Time to live of the Cosmos DB Cassandra table. Possible values are at least -1. -1 means the Cassandra table never expires. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // the ID of the CosmosDB Cassandra Table. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A schema block as defined below. + Schema *SchemaObservation `json:"schema,omitempty" tf:"schema,omitempty"` + + // The throughput of Cassandra KeySpace (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type CassandraTableParameters struct { + + // Time to live of the Analytical Storage. Possible values are between -1 and 2147483647 except 0. -1 means the Analytical Storage never expires. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. + // +kubebuilder:validation:Optional + AutoscaleSettings *CassandraTableAutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The ID of the Cosmos DB Cassandra Keyspace to create the table within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.CassandraKeySpace + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + CassandraKeySpaceID *string `json:"cassandraKeyspaceId,omitempty" tf:"cassandra_keyspace_id,omitempty"` + + // Reference to a CassandraKeySpace in cosmosdb to populate cassandraKeyspaceId. + // +kubebuilder:validation:Optional + CassandraKeySpaceIDRef *v1.Reference `json:"cassandraKeyspaceIdRef,omitempty" tf:"-"` + + // Selector for a CassandraKeySpace in cosmosdb to populate cassandraKeyspaceId. + // +kubebuilder:validation:Optional + CassandraKeySpaceIDSelector *v1.Selector `json:"cassandraKeyspaceIdSelector,omitempty" tf:"-"` + + // Time to live of the Cosmos DB Cassandra table. Possible values are at least -1. -1 means the Cassandra table never expires. + // +kubebuilder:validation:Optional + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // A schema block as defined below. + // +kubebuilder:validation:Optional + Schema *SchemaParameters `json:"schema,omitempty" tf:"schema,omitempty"` + + // The throughput of Cassandra KeySpace (RU/s). Must be set in increments of 100. The minimum value is 400. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type ClusterKeyInitParameters struct { + + // Name of the column to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Order of the key. Currently supported values are Asc and Desc. + OrderBy *string `json:"orderBy,omitempty" tf:"order_by,omitempty"` +} + +type ClusterKeyObservation struct { + + // Name of the column to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Order of the key. Currently supported values are Asc and Desc. + OrderBy *string `json:"orderBy,omitempty" tf:"order_by,omitempty"` +} + +type ClusterKeyParameters struct { + + // Name of the column to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Order of the key. Currently supported values are Asc and Desc. + // +kubebuilder:validation:Optional + OrderBy *string `json:"orderBy" tf:"order_by,omitempty"` +} + +type ColumnInitParameters struct { + + // Name of the column to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of the column to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnObservation struct { + + // Name of the column to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of the column to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnParameters struct { + + // Name of the column to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of the column to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PartitionKeyInitParameters struct { + + // Name of the column to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PartitionKeyObservation struct { + + // Name of the column to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PartitionKeyParameters struct { + + // Name of the column to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type SchemaInitParameters struct { + + // One or more cluster_key blocks as defined below. + ClusterKey []ClusterKeyInitParameters `json:"clusterKey,omitempty" tf:"cluster_key,omitempty"` + + // One or more column blocks as defined below. + Column []ColumnInitParameters `json:"column,omitempty" tf:"column,omitempty"` + + // One or more partition_key blocks as defined below. + PartitionKey []PartitionKeyInitParameters `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` +} + +type SchemaObservation struct { + + // One or more cluster_key blocks as defined below. + ClusterKey []ClusterKeyObservation `json:"clusterKey,omitempty" tf:"cluster_key,omitempty"` + + // One or more column blocks as defined below. + Column []ColumnObservation `json:"column,omitempty" tf:"column,omitempty"` + + // One or more partition_key blocks as defined below. + PartitionKey []PartitionKeyObservation `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` +} + +type SchemaParameters struct { + + // One or more cluster_key blocks as defined below. + // +kubebuilder:validation:Optional + ClusterKey []ClusterKeyParameters `json:"clusterKey,omitempty" tf:"cluster_key,omitempty"` + + // One or more column blocks as defined below. + // +kubebuilder:validation:Optional + Column []ColumnParameters `json:"column" tf:"column,omitempty"` + + // One or more partition_key blocks as defined below. + // +kubebuilder:validation:Optional + PartitionKey []PartitionKeyParameters `json:"partitionKey" tf:"partition_key,omitempty"` +} + +// CassandraTableSpec defines the desired state of CassandraTable +type CassandraTableSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CassandraTableParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CassandraTableInitParameters `json:"initProvider,omitempty"` +} + +// CassandraTableStatus defines the observed state of CassandraTable. +type CassandraTableStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CassandraTableObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CassandraTable is the Schema for the CassandraTables API. Manages a Cassandra Table within a Cosmos DB Cassandra Keyspace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type CassandraTable struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.schema) || (has(self.initProvider) && has(self.initProvider.schema))",message="spec.forProvider.schema is a required parameter" + Spec CassandraTableSpec `json:"spec"` + Status CassandraTableStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CassandraTableList contains a list of CassandraTables +type CassandraTableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CassandraTable `json:"items"` +} + +// Repository type metadata. +var ( + CassandraTable_Kind = "CassandraTable" + CassandraTable_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CassandraTable_Kind}.String() + CassandraTable_KindAPIVersion = CassandraTable_Kind + "." + CRDGroupVersion.String() + CassandraTable_GroupVersionKind = CRDGroupVersion.WithKind(CassandraTable_Kind) +) + +func init() { + SchemeBuilder.Register(&CassandraTable{}, &CassandraTableList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_generated.conversion_hubs.go b/apis/cosmosdb/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..dcbd76c16 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Account) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CassandraCluster) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CassandraKeySpace) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *CassandraTable) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GremlinDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *GremlinGraph) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MongoCollection) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MongoDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SQLContainer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SQLDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Table) Hub() {} diff --git a/apis/cosmosdb/v1beta2/zz_generated.deepcopy.go b/apis/cosmosdb/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..ec172f93d --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,6982 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountInitParameters) DeepCopyInto(out *AccountInitParameters) { + *out = *in + if in.AccessKeyMetadataWritesEnabled != nil { + in, out := &in.AccessKeyMetadataWritesEnabled, &out.AccessKeyMetadataWritesEnabled + *out = new(bool) + **out = **in + } + if in.AnalyticalStorage != nil { + in, out := &in.AnalyticalStorage, &out.AnalyticalStorage + *out = new(AnalyticalStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AnalyticalStorageEnabled != nil { + in, out := &in.AnalyticalStorageEnabled, &out.AnalyticalStorageEnabled + *out = new(bool) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]CapabilitiesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConsistencyPolicy != nil { + in, out := &in.ConsistencyPolicy, &out.ConsistencyPolicy + *out = new(ConsistencyPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = new(CorsRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.DefaultIdentityType != nil { + in, out := &in.DefaultIdentityType, &out.DefaultIdentityType + *out = new(string) + **out = **in + } + if in.EnableAutomaticFailover != nil { + in, out := &in.EnableAutomaticFailover, &out.EnableAutomaticFailover + *out = new(bool) + **out = **in + } + if in.EnableFreeTier != nil { + in, out := &in.EnableFreeTier, &out.EnableFreeTier + *out = new(bool) + **out = **in + } + if in.EnableMultipleWriteLocations != nil { + in, out := &in.EnableMultipleWriteLocations, &out.EnableMultipleWriteLocations + *out = new(bool) + **out = **in + } + if in.GeoLocation != nil { + in, out := &in.GeoLocation, &out.GeoLocation + *out = make([]GeoLocationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangeFilter != nil { + in, out := &in.IPRangeFilter, &out.IPRangeFilter + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IsVirtualNetworkFilterEnabled != nil { + in, out := &in.IsVirtualNetworkFilterEnabled, &out.IsVirtualNetworkFilterEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.LocalAuthenticationDisabled != nil { + in, out := &in.LocalAuthenticationDisabled, &out.LocalAuthenticationDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimalTLSVersion != nil { + in, out := &in.MinimalTLSVersion, &out.MinimalTLSVersion + *out = new(string) + **out = **in + } + if in.MongoServerVersion != nil { + in, out := &in.MongoServerVersion, &out.MongoServerVersion + *out = new(string) + **out = **in + } + if in.NetworkACLBypassForAzureServices != nil { + in, out := &in.NetworkACLBypassForAzureServices, &out.NetworkACLBypassForAzureServices + *out = new(bool) + **out = **in + } + if in.NetworkACLBypassIds != nil { + in, out := &in.NetworkACLBypassIds, &out.NetworkACLBypassIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OfferType != nil { + in, out := &in.OfferType, &out.OfferType + *out = new(string) + **out = **in + } + if in.PartitionMergeEnabled != nil { + in, out := &in.PartitionMergeEnabled, &out.PartitionMergeEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(RestoreInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkRule != nil { + in, out := &in.VirtualNetworkRule, &out.VirtualNetworkRule + *out = make([]VirtualNetworkRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountInitParameters. +func (in *AccountInitParameters) DeepCopy() *AccountInitParameters { + if in == nil { + return nil + } + out := new(AccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in + if in.AccessKeyMetadataWritesEnabled != nil { + in, out := &in.AccessKeyMetadataWritesEnabled, &out.AccessKeyMetadataWritesEnabled + *out = new(bool) + **out = **in + } + if in.AnalyticalStorage != nil { + in, out := &in.AnalyticalStorage, &out.AnalyticalStorage + *out = new(AnalyticalStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.AnalyticalStorageEnabled != nil { + in, out := &in.AnalyticalStorageEnabled, &out.AnalyticalStorageEnabled + *out = new(bool) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupObservation) + (*in).DeepCopyInto(*out) + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]CapabilitiesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityObservation) + (*in).DeepCopyInto(*out) + } + if in.ConsistencyPolicy != nil { + in, out := &in.ConsistencyPolicy, &out.ConsistencyPolicy + *out = new(ConsistencyPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = new(CorsRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.DefaultIdentityType != nil { + in, out := &in.DefaultIdentityType, &out.DefaultIdentityType + *out = new(string) + **out = **in + } + if in.EnableAutomaticFailover != nil { + in, out := &in.EnableAutomaticFailover, &out.EnableAutomaticFailover + *out = new(bool) + **out = **in + } + if in.EnableFreeTier != nil { + in, out := &in.EnableFreeTier, &out.EnableFreeTier + *out = new(bool) + **out = **in + } + if in.EnableMultipleWriteLocations != nil { + in, out := &in.EnableMultipleWriteLocations, &out.EnableMultipleWriteLocations + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.GeoLocation != nil { + in, out := &in.GeoLocation, &out.GeoLocation + *out = make([]GeoLocationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPRangeFilter != nil { + in, out := &in.IPRangeFilter, &out.IPRangeFilter + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.IsVirtualNetworkFilterEnabled != nil { + in, out := &in.IsVirtualNetworkFilterEnabled, &out.IsVirtualNetworkFilterEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.LocalAuthenticationDisabled != nil { + in, out := &in.LocalAuthenticationDisabled, &out.LocalAuthenticationDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimalTLSVersion != nil { + in, out := &in.MinimalTLSVersion, &out.MinimalTLSVersion + *out = new(string) + **out = **in + } + if in.MongoServerVersion != nil { + in, out := &in.MongoServerVersion, &out.MongoServerVersion + *out = new(string) + **out = **in + } + if in.NetworkACLBypassForAzureServices != nil { + in, out := &in.NetworkACLBypassForAzureServices, &out.NetworkACLBypassForAzureServices + *out = new(bool) + **out = **in + } + if in.NetworkACLBypassIds != nil { + in, out := &in.NetworkACLBypassIds, &out.NetworkACLBypassIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OfferType != nil { + in, out := &in.OfferType, &out.OfferType + *out = new(string) + **out = **in + } + if in.PartitionMergeEnabled != nil { + in, out := &in.PartitionMergeEnabled, &out.PartitionMergeEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ReadEndpoints != nil { + in, out := &in.ReadEndpoints, &out.ReadEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(RestoreObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkRule != nil { + in, out := &in.VirtualNetworkRule, &out.VirtualNetworkRule + *out = make([]VirtualNetworkRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WriteEndpoints != nil { + in, out := &in.WriteEndpoints, &out.WriteEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + if in.AccessKeyMetadataWritesEnabled != nil { + in, out := &in.AccessKeyMetadataWritesEnabled, &out.AccessKeyMetadataWritesEnabled + *out = new(bool) + **out = **in + } + if in.AnalyticalStorage != nil { + in, out := &in.AnalyticalStorage, &out.AnalyticalStorage + *out = new(AnalyticalStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.AnalyticalStorageEnabled != nil { + in, out := &in.AnalyticalStorageEnabled, &out.AnalyticalStorageEnabled + *out = new(bool) + **out = **in + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupParameters) + (*in).DeepCopyInto(*out) + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]CapabilitiesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityParameters) + (*in).DeepCopyInto(*out) + } + if in.ConsistencyPolicy != nil { + in, out := &in.ConsistencyPolicy, &out.ConsistencyPolicy + *out = new(ConsistencyPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = new(CorsRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.DefaultIdentityType != nil { + in, out := &in.DefaultIdentityType, &out.DefaultIdentityType + *out = new(string) + **out = **in + } + if in.EnableAutomaticFailover != nil { + in, out := &in.EnableAutomaticFailover, &out.EnableAutomaticFailover + *out = new(bool) + **out = **in + } + if in.EnableFreeTier != nil { + in, out := &in.EnableFreeTier, &out.EnableFreeTier + *out = new(bool) + **out = **in + } + if in.EnableMultipleWriteLocations != nil { + in, out := &in.EnableMultipleWriteLocations, &out.EnableMultipleWriteLocations + *out = new(bool) + **out = **in + } + if in.GeoLocation != nil { + in, out := &in.GeoLocation, &out.GeoLocation + *out = make([]GeoLocationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRangeFilter != nil { + in, out := &in.IPRangeFilter, &out.IPRangeFilter + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.IsVirtualNetworkFilterEnabled != nil { + in, out := &in.IsVirtualNetworkFilterEnabled, &out.IsVirtualNetworkFilterEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.LocalAuthenticationDisabled != nil { + in, out := &in.LocalAuthenticationDisabled, &out.LocalAuthenticationDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimalTLSVersion != nil { + in, out := &in.MinimalTLSVersion, &out.MinimalTLSVersion + *out = new(string) + **out = **in + } + if in.MongoServerVersion != nil { + in, out := &in.MongoServerVersion, &out.MongoServerVersion + *out = new(string) + **out = **in + } + if in.NetworkACLBypassForAzureServices != nil { + in, out := &in.NetworkACLBypassForAzureServices, &out.NetworkACLBypassForAzureServices + *out = new(bool) + **out = **in + } + if in.NetworkACLBypassIds != nil { + in, out := &in.NetworkACLBypassIds, &out.NetworkACLBypassIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OfferType != nil { + in, out := &in.OfferType, &out.OfferType + *out = new(string) + **out = **in + } + if in.PartitionMergeEnabled != nil { + in, out := &in.PartitionMergeEnabled, &out.PartitionMergeEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(RestoreParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkRule != nil { + in, out := &in.VirtualNetworkRule, &out.VirtualNetworkRule + *out = make([]VirtualNetworkRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyticalStorageInitParameters) DeepCopyInto(out *AnalyticalStorageInitParameters) { + *out = *in + if in.SchemaType != nil { + in, out := &in.SchemaType, &out.SchemaType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyticalStorageInitParameters. +func (in *AnalyticalStorageInitParameters) DeepCopy() *AnalyticalStorageInitParameters { + if in == nil { + return nil + } + out := new(AnalyticalStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyticalStorageObservation) DeepCopyInto(out *AnalyticalStorageObservation) { + *out = *in + if in.SchemaType != nil { + in, out := &in.SchemaType, &out.SchemaType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyticalStorageObservation. +func (in *AnalyticalStorageObservation) DeepCopy() *AnalyticalStorageObservation { + if in == nil { + return nil + } + out := new(AnalyticalStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalyticalStorageParameters) DeepCopyInto(out *AnalyticalStorageParameters) { + *out = *in + if in.SchemaType != nil { + in, out := &in.SchemaType, &out.SchemaType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalyticalStorageParameters. +func (in *AnalyticalStorageParameters) DeepCopy() *AnalyticalStorageParameters { + if in == nil { + return nil + } + out := new(AnalyticalStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleSettingsInitParameters) DeepCopyInto(out *AutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleSettingsInitParameters. +func (in *AutoscaleSettingsInitParameters) DeepCopy() *AutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(AutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleSettingsObservation) DeepCopyInto(out *AutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleSettingsObservation. +func (in *AutoscaleSettingsObservation) DeepCopy() *AutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(AutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleSettingsParameters) DeepCopyInto(out *AutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleSettingsParameters. +func (in *AutoscaleSettingsParameters) DeepCopy() *AutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(AutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupInitParameters) DeepCopyInto(out *BackupInitParameters) { + *out = *in + if in.IntervalInMinutes != nil { + in, out := &in.IntervalInMinutes, &out.IntervalInMinutes + *out = new(float64) + **out = **in + } + if in.RetentionInHours != nil { + in, out := &in.RetentionInHours, &out.RetentionInHours + *out = new(float64) + **out = **in + } + if in.StorageRedundancy != nil { + in, out := &in.StorageRedundancy, &out.StorageRedundancy + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupInitParameters. +func (in *BackupInitParameters) DeepCopy() *BackupInitParameters { + if in == nil { + return nil + } + out := new(BackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupObservation) DeepCopyInto(out *BackupObservation) { + *out = *in + if in.IntervalInMinutes != nil { + in, out := &in.IntervalInMinutes, &out.IntervalInMinutes + *out = new(float64) + **out = **in + } + if in.RetentionInHours != nil { + in, out := &in.RetentionInHours, &out.RetentionInHours + *out = new(float64) + **out = **in + } + if in.StorageRedundancy != nil { + in, out := &in.StorageRedundancy, &out.StorageRedundancy + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupObservation. +func (in *BackupObservation) DeepCopy() *BackupObservation { + if in == nil { + return nil + } + out := new(BackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupParameters) DeepCopyInto(out *BackupParameters) { + *out = *in + if in.IntervalInMinutes != nil { + in, out := &in.IntervalInMinutes, &out.IntervalInMinutes + *out = new(float64) + **out = **in + } + if in.RetentionInHours != nil { + in, out := &in.RetentionInHours, &out.RetentionInHours + *out = new(float64) + **out = **in + } + if in.StorageRedundancy != nil { + in, out := &in.StorageRedundancy, &out.StorageRedundancy + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupParameters. +func (in *BackupParameters) DeepCopy() *BackupParameters { + if in == nil { + return nil + } + out := new(BackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapabilitiesInitParameters) DeepCopyInto(out *CapabilitiesInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapabilitiesInitParameters. +func (in *CapabilitiesInitParameters) DeepCopy() *CapabilitiesInitParameters { + if in == nil { + return nil + } + out := new(CapabilitiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapabilitiesObservation) DeepCopyInto(out *CapabilitiesObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapabilitiesObservation. +func (in *CapabilitiesObservation) DeepCopy() *CapabilitiesObservation { + if in == nil { + return nil + } + out := new(CapabilitiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapabilitiesParameters) DeepCopyInto(out *CapabilitiesParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapabilitiesParameters. +func (in *CapabilitiesParameters) DeepCopy() *CapabilitiesParameters { + if in == nil { + return nil + } + out := new(CapabilitiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityInitParameters) DeepCopyInto(out *CapacityInitParameters) { + *out = *in + if in.TotalThroughputLimit != nil { + in, out := &in.TotalThroughputLimit, &out.TotalThroughputLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityInitParameters. +func (in *CapacityInitParameters) DeepCopy() *CapacityInitParameters { + if in == nil { + return nil + } + out := new(CapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityObservation) DeepCopyInto(out *CapacityObservation) { + *out = *in + if in.TotalThroughputLimit != nil { + in, out := &in.TotalThroughputLimit, &out.TotalThroughputLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityObservation. +func (in *CapacityObservation) DeepCopy() *CapacityObservation { + if in == nil { + return nil + } + out := new(CapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityParameters) DeepCopyInto(out *CapacityParameters) { + *out = *in + if in.TotalThroughputLimit != nil { + in, out := &in.TotalThroughputLimit, &out.TotalThroughputLimit + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityParameters. +func (in *CapacityParameters) DeepCopy() *CapacityParameters { + if in == nil { + return nil + } + out := new(CapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraCluster) DeepCopyInto(out *CassandraCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraCluster. +func (in *CassandraCluster) DeepCopy() *CassandraCluster { + if in == nil { + return nil + } + out := new(CassandraCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterIdentityInitParameters) DeepCopyInto(out *CassandraClusterIdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterIdentityInitParameters. +func (in *CassandraClusterIdentityInitParameters) DeepCopy() *CassandraClusterIdentityInitParameters { + if in == nil { + return nil + } + out := new(CassandraClusterIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterIdentityObservation) DeepCopyInto(out *CassandraClusterIdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterIdentityObservation. +func (in *CassandraClusterIdentityObservation) DeepCopy() *CassandraClusterIdentityObservation { + if in == nil { + return nil + } + out := new(CassandraClusterIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterIdentityParameters) DeepCopyInto(out *CassandraClusterIdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterIdentityParameters. +func (in *CassandraClusterIdentityParameters) DeepCopy() *CassandraClusterIdentityParameters { + if in == nil { + return nil + } + out := new(CassandraClusterIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterInitParameters) DeepCopyInto(out *CassandraClusterInitParameters) { + *out = *in + if in.AuthenticationMethod != nil { + in, out := &in.AuthenticationMethod, &out.AuthenticationMethod + *out = new(string) + **out = **in + } + if in.ClientCertificatePems != nil { + in, out := &in.ClientCertificatePems, &out.ClientCertificatePems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DelegatedManagementSubnetID != nil { + in, out := &in.DelegatedManagementSubnetID, &out.DelegatedManagementSubnetID + *out = new(string) + **out = **in + } + if in.DelegatedManagementSubnetIDRef != nil { + in, out := &in.DelegatedManagementSubnetIDRef, &out.DelegatedManagementSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DelegatedManagementSubnetIDSelector != nil { + in, out := &in.DelegatedManagementSubnetIDSelector, &out.DelegatedManagementSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExternalGossipCertificatePems != nil { + in, out := &in.ExternalGossipCertificatePems, &out.ExternalGossipCertificatePems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExternalSeedNodeIPAddresses != nil { + in, out := &in.ExternalSeedNodeIPAddresses, &out.ExternalSeedNodeIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HoursBetweenBackups != nil { + in, out := &in.HoursBetweenBackups, &out.HoursBetweenBackups + *out = new(float64) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(CassandraClusterIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.RepairEnabled != nil { + in, out := &in.RepairEnabled, &out.RepairEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterInitParameters. +func (in *CassandraClusterInitParameters) DeepCopy() *CassandraClusterInitParameters { + if in == nil { + return nil + } + out := new(CassandraClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterList) DeepCopyInto(out *CassandraClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CassandraCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterList. +func (in *CassandraClusterList) DeepCopy() *CassandraClusterList { + if in == nil { + return nil + } + out := new(CassandraClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterObservation) DeepCopyInto(out *CassandraClusterObservation) { + *out = *in + if in.AuthenticationMethod != nil { + in, out := &in.AuthenticationMethod, &out.AuthenticationMethod + *out = new(string) + **out = **in + } + if in.ClientCertificatePems != nil { + in, out := &in.ClientCertificatePems, &out.ClientCertificatePems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DelegatedManagementSubnetID != nil { + in, out := &in.DelegatedManagementSubnetID, &out.DelegatedManagementSubnetID + *out = new(string) + **out = **in + } + if in.ExternalGossipCertificatePems != nil { + in, out := &in.ExternalGossipCertificatePems, &out.ExternalGossipCertificatePems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExternalSeedNodeIPAddresses != nil { + in, out := &in.ExternalSeedNodeIPAddresses, &out.ExternalSeedNodeIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HoursBetweenBackups != nil { + in, out := &in.HoursBetweenBackups, &out.HoursBetweenBackups + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(CassandraClusterIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.RepairEnabled != nil { + in, out := &in.RepairEnabled, &out.RepairEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterObservation. +func (in *CassandraClusterObservation) DeepCopy() *CassandraClusterObservation { + if in == nil { + return nil + } + out := new(CassandraClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterParameters) DeepCopyInto(out *CassandraClusterParameters) { + *out = *in + if in.AuthenticationMethod != nil { + in, out := &in.AuthenticationMethod, &out.AuthenticationMethod + *out = new(string) + **out = **in + } + if in.ClientCertificatePems != nil { + in, out := &in.ClientCertificatePems, &out.ClientCertificatePems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + out.DefaultAdminPasswordSecretRef = in.DefaultAdminPasswordSecretRef + if in.DelegatedManagementSubnetID != nil { + in, out := &in.DelegatedManagementSubnetID, &out.DelegatedManagementSubnetID + *out = new(string) + **out = **in + } + if in.DelegatedManagementSubnetIDRef != nil { + in, out := &in.DelegatedManagementSubnetIDRef, &out.DelegatedManagementSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DelegatedManagementSubnetIDSelector != nil { + in, out := &in.DelegatedManagementSubnetIDSelector, &out.DelegatedManagementSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExternalGossipCertificatePems != nil { + in, out := &in.ExternalGossipCertificatePems, &out.ExternalGossipCertificatePems + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExternalSeedNodeIPAddresses != nil { + in, out := &in.ExternalSeedNodeIPAddresses, &out.ExternalSeedNodeIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HoursBetweenBackups != nil { + in, out := &in.HoursBetweenBackups, &out.HoursBetweenBackups + *out = new(float64) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(CassandraClusterIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.RepairEnabled != nil { + in, out := &in.RepairEnabled, &out.RepairEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterParameters. +func (in *CassandraClusterParameters) DeepCopy() *CassandraClusterParameters { + if in == nil { + return nil + } + out := new(CassandraClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterSpec) DeepCopyInto(out *CassandraClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterSpec. +func (in *CassandraClusterSpec) DeepCopy() *CassandraClusterSpec { + if in == nil { + return nil + } + out := new(CassandraClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraClusterStatus) DeepCopyInto(out *CassandraClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraClusterStatus. +func (in *CassandraClusterStatus) DeepCopy() *CassandraClusterStatus { + if in == nil { + return nil + } + out := new(CassandraClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraKeySpace) DeepCopyInto(out *CassandraKeySpace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraKeySpace. +func (in *CassandraKeySpace) DeepCopy() *CassandraKeySpace { + if in == nil { + return nil + } + out := new(CassandraKeySpace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraKeySpace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraKeySpaceInitParameters) DeepCopyInto(out *CassandraKeySpaceInitParameters) { + *out = *in + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(AutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraKeySpaceInitParameters. +func (in *CassandraKeySpaceInitParameters) DeepCopy() *CassandraKeySpaceInitParameters { + if in == nil { + return nil + } + out := new(CassandraKeySpaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraKeySpaceList) DeepCopyInto(out *CassandraKeySpaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CassandraKeySpace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraKeySpaceList. +func (in *CassandraKeySpaceList) DeepCopy() *CassandraKeySpaceList { + if in == nil { + return nil + } + out := new(CassandraKeySpaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraKeySpaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraKeySpaceObservation) DeepCopyInto(out *CassandraKeySpaceObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(AutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraKeySpaceObservation. +func (in *CassandraKeySpaceObservation) DeepCopy() *CassandraKeySpaceObservation { + if in == nil { + return nil + } + out := new(CassandraKeySpaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraKeySpaceParameters) DeepCopyInto(out *CassandraKeySpaceParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(AutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraKeySpaceParameters. +func (in *CassandraKeySpaceParameters) DeepCopy() *CassandraKeySpaceParameters { + if in == nil { + return nil + } + out := new(CassandraKeySpaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraKeySpaceSpec) DeepCopyInto(out *CassandraKeySpaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraKeySpaceSpec. +func (in *CassandraKeySpaceSpec) DeepCopy() *CassandraKeySpaceSpec { + if in == nil { + return nil + } + out := new(CassandraKeySpaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraKeySpaceStatus) DeepCopyInto(out *CassandraKeySpaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraKeySpaceStatus. +func (in *CassandraKeySpaceStatus) DeepCopy() *CassandraKeySpaceStatus { + if in == nil { + return nil + } + out := new(CassandraKeySpaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTable) DeepCopyInto(out *CassandraTable) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTable. +func (in *CassandraTable) DeepCopy() *CassandraTable { + if in == nil { + return nil + } + out := new(CassandraTable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraTable) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableAutoscaleSettingsInitParameters) DeepCopyInto(out *CassandraTableAutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableAutoscaleSettingsInitParameters. +func (in *CassandraTableAutoscaleSettingsInitParameters) DeepCopy() *CassandraTableAutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(CassandraTableAutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableAutoscaleSettingsObservation) DeepCopyInto(out *CassandraTableAutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableAutoscaleSettingsObservation. +func (in *CassandraTableAutoscaleSettingsObservation) DeepCopy() *CassandraTableAutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(CassandraTableAutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableAutoscaleSettingsParameters) DeepCopyInto(out *CassandraTableAutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableAutoscaleSettingsParameters. +func (in *CassandraTableAutoscaleSettingsParameters) DeepCopy() *CassandraTableAutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(CassandraTableAutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableInitParameters) DeepCopyInto(out *CassandraTableInitParameters) { + *out = *in + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(CassandraTableAutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableInitParameters. +func (in *CassandraTableInitParameters) DeepCopy() *CassandraTableInitParameters { + if in == nil { + return nil + } + out := new(CassandraTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableList) DeepCopyInto(out *CassandraTableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CassandraTable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableList. +func (in *CassandraTableList) DeepCopy() *CassandraTableList { + if in == nil { + return nil + } + out := new(CassandraTableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraTableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableObservation) DeepCopyInto(out *CassandraTableObservation) { + *out = *in + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(CassandraTableAutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.CassandraKeySpaceID != nil { + in, out := &in.CassandraKeySpaceID, &out.CassandraKeySpaceID + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaObservation) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableObservation. +func (in *CassandraTableObservation) DeepCopy() *CassandraTableObservation { + if in == nil { + return nil + } + out := new(CassandraTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableParameters) DeepCopyInto(out *CassandraTableParameters) { + *out = *in + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(CassandraTableAutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CassandraKeySpaceID != nil { + in, out := &in.CassandraKeySpaceID, &out.CassandraKeySpaceID + *out = new(string) + **out = **in + } + if in.CassandraKeySpaceIDRef != nil { + in, out := &in.CassandraKeySpaceIDRef, &out.CassandraKeySpaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CassandraKeySpaceIDSelector != nil { + in, out := &in.CassandraKeySpaceIDSelector, &out.CassandraKeySpaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(SchemaParameters) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableParameters. +func (in *CassandraTableParameters) DeepCopy() *CassandraTableParameters { + if in == nil { + return nil + } + out := new(CassandraTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableSpec) DeepCopyInto(out *CassandraTableSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableSpec. +func (in *CassandraTableSpec) DeepCopy() *CassandraTableSpec { + if in == nil { + return nil + } + out := new(CassandraTableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraTableStatus) DeepCopyInto(out *CassandraTableStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraTableStatus. +func (in *CassandraTableStatus) DeepCopy() *CassandraTableStatus { + if in == nil { + return nil + } + out := new(CassandraTableStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterKeyInitParameters) DeepCopyInto(out *ClusterKeyInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrderBy != nil { + in, out := &in.OrderBy, &out.OrderBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterKeyInitParameters. +func (in *ClusterKeyInitParameters) DeepCopy() *ClusterKeyInitParameters { + if in == nil { + return nil + } + out := new(ClusterKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterKeyObservation) DeepCopyInto(out *ClusterKeyObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrderBy != nil { + in, out := &in.OrderBy, &out.OrderBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterKeyObservation. +func (in *ClusterKeyObservation) DeepCopy() *ClusterKeyObservation { + if in == nil { + return nil + } + out := new(ClusterKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterKeyParameters) DeepCopyInto(out *ClusterKeyParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrderBy != nil { + in, out := &in.OrderBy, &out.OrderBy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterKeyParameters. +func (in *ClusterKeyParameters) DeepCopy() *ClusterKeyParameters { + if in == nil { + return nil + } + out := new(ClusterKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnInitParameters) DeepCopyInto(out *ColumnInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnInitParameters. +func (in *ColumnInitParameters) DeepCopy() *ColumnInitParameters { + if in == nil { + return nil + } + out := new(ColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnObservation) DeepCopyInto(out *ColumnObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnObservation. +func (in *ColumnObservation) DeepCopy() *ColumnObservation { + if in == nil { + return nil + } + out := new(ColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnParameters) DeepCopyInto(out *ColumnParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnParameters. +func (in *ColumnParameters) DeepCopy() *ColumnParameters { + if in == nil { + return nil + } + out := new(ColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeIndexIndexInitParameters) DeepCopyInto(out *CompositeIndexIndexInitParameters) { + *out = *in + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeIndexIndexInitParameters. +func (in *CompositeIndexIndexInitParameters) DeepCopy() *CompositeIndexIndexInitParameters { + if in == nil { + return nil + } + out := new(CompositeIndexIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeIndexIndexObservation) DeepCopyInto(out *CompositeIndexIndexObservation) { + *out = *in + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeIndexIndexObservation. +func (in *CompositeIndexIndexObservation) DeepCopy() *CompositeIndexIndexObservation { + if in == nil { + return nil + } + out := new(CompositeIndexIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeIndexIndexParameters) DeepCopyInto(out *CompositeIndexIndexParameters) { + *out = *in + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeIndexIndexParameters. +func (in *CompositeIndexIndexParameters) DeepCopy() *CompositeIndexIndexParameters { + if in == nil { + return nil + } + out := new(CompositeIndexIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeIndexInitParameters) DeepCopyInto(out *CompositeIndexInitParameters) { + *out = *in + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]IndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeIndexInitParameters. +func (in *CompositeIndexInitParameters) DeepCopy() *CompositeIndexInitParameters { + if in == nil { + return nil + } + out := new(CompositeIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeIndexObservation) DeepCopyInto(out *CompositeIndexObservation) { + *out = *in + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]IndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeIndexObservation. +func (in *CompositeIndexObservation) DeepCopy() *CompositeIndexObservation { + if in == nil { + return nil + } + out := new(CompositeIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompositeIndexParameters) DeepCopyInto(out *CompositeIndexParameters) { + *out = *in + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]IndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeIndexParameters. +func (in *CompositeIndexParameters) DeepCopy() *CompositeIndexParameters { + if in == nil { + return nil + } + out := new(CompositeIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConflictResolutionPolicyInitParameters) DeepCopyInto(out *ConflictResolutionPolicyInitParameters) { + *out = *in + if in.ConflictResolutionPath != nil { + in, out := &in.ConflictResolutionPath, &out.ConflictResolutionPath + *out = new(string) + **out = **in + } + if in.ConflictResolutionProcedure != nil { + in, out := &in.ConflictResolutionProcedure, &out.ConflictResolutionProcedure + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConflictResolutionPolicyInitParameters. +func (in *ConflictResolutionPolicyInitParameters) DeepCopy() *ConflictResolutionPolicyInitParameters { + if in == nil { + return nil + } + out := new(ConflictResolutionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConflictResolutionPolicyObservation) DeepCopyInto(out *ConflictResolutionPolicyObservation) { + *out = *in + if in.ConflictResolutionPath != nil { + in, out := &in.ConflictResolutionPath, &out.ConflictResolutionPath + *out = new(string) + **out = **in + } + if in.ConflictResolutionProcedure != nil { + in, out := &in.ConflictResolutionProcedure, &out.ConflictResolutionProcedure + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConflictResolutionPolicyObservation. +func (in *ConflictResolutionPolicyObservation) DeepCopy() *ConflictResolutionPolicyObservation { + if in == nil { + return nil + } + out := new(ConflictResolutionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConflictResolutionPolicyParameters) DeepCopyInto(out *ConflictResolutionPolicyParameters) { + *out = *in + if in.ConflictResolutionPath != nil { + in, out := &in.ConflictResolutionPath, &out.ConflictResolutionPath + *out = new(string) + **out = **in + } + if in.ConflictResolutionProcedure != nil { + in, out := &in.ConflictResolutionProcedure, &out.ConflictResolutionProcedure + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConflictResolutionPolicyParameters. +func (in *ConflictResolutionPolicyParameters) DeepCopy() *ConflictResolutionPolicyParameters { + if in == nil { + return nil + } + out := new(ConflictResolutionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsistencyPolicyInitParameters) DeepCopyInto(out *ConsistencyPolicyInitParameters) { + *out = *in + if in.ConsistencyLevel != nil { + in, out := &in.ConsistencyLevel, &out.ConsistencyLevel + *out = new(string) + **out = **in + } + if in.MaxIntervalInSeconds != nil { + in, out := &in.MaxIntervalInSeconds, &out.MaxIntervalInSeconds + *out = new(float64) + **out = **in + } + if in.MaxStalenessPrefix != nil { + in, out := &in.MaxStalenessPrefix, &out.MaxStalenessPrefix + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsistencyPolicyInitParameters. +func (in *ConsistencyPolicyInitParameters) DeepCopy() *ConsistencyPolicyInitParameters { + if in == nil { + return nil + } + out := new(ConsistencyPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsistencyPolicyObservation) DeepCopyInto(out *ConsistencyPolicyObservation) { + *out = *in + if in.ConsistencyLevel != nil { + in, out := &in.ConsistencyLevel, &out.ConsistencyLevel + *out = new(string) + **out = **in + } + if in.MaxIntervalInSeconds != nil { + in, out := &in.MaxIntervalInSeconds, &out.MaxIntervalInSeconds + *out = new(float64) + **out = **in + } + if in.MaxStalenessPrefix != nil { + in, out := &in.MaxStalenessPrefix, &out.MaxStalenessPrefix + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsistencyPolicyObservation. +func (in *ConsistencyPolicyObservation) DeepCopy() *ConsistencyPolicyObservation { + if in == nil { + return nil + } + out := new(ConsistencyPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsistencyPolicyParameters) DeepCopyInto(out *ConsistencyPolicyParameters) { + *out = *in + if in.ConsistencyLevel != nil { + in, out := &in.ConsistencyLevel, &out.ConsistencyLevel + *out = new(string) + **out = **in + } + if in.MaxIntervalInSeconds != nil { + in, out := &in.MaxIntervalInSeconds, &out.MaxIntervalInSeconds + *out = new(float64) + **out = **in + } + if in.MaxStalenessPrefix != nil { + in, out := &in.MaxStalenessPrefix, &out.MaxStalenessPrefix + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsistencyPolicyParameters. +func (in *ConsistencyPolicyParameters) DeepCopy() *ConsistencyPolicyParameters { + if in == nil { + return nil + } + out := new(ConsistencyPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleInitParameters) DeepCopyInto(out *CorsRuleInitParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleInitParameters. +func (in *CorsRuleInitParameters) DeepCopy() *CorsRuleInitParameters { + if in == nil { + return nil + } + out := new(CorsRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleObservation) DeepCopyInto(out *CorsRuleObservation) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleObservation. +func (in *CorsRuleObservation) DeepCopy() *CorsRuleObservation { + if in == nil { + return nil + } + out := new(CorsRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleParameters) DeepCopyInto(out *CorsRuleParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleParameters. +func (in *CorsRuleParameters) DeepCopy() *CorsRuleParameters { + if in == nil { + return nil + } + out := new(CorsRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseInitParameters) DeepCopyInto(out *DatabaseInitParameters) { + *out = *in + if in.CollectionNames != nil { + in, out := &in.CollectionNames, &out.CollectionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseInitParameters. +func (in *DatabaseInitParameters) DeepCopy() *DatabaseInitParameters { + if in == nil { + return nil + } + out := new(DatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObservation) DeepCopyInto(out *DatabaseObservation) { + *out = *in + if in.CollectionNames != nil { + in, out := &in.CollectionNames, &out.CollectionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObservation. +func (in *DatabaseObservation) DeepCopy() *DatabaseObservation { + if in == nil { + return nil + } + out := new(DatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseParameters) DeepCopyInto(out *DatabaseParameters) { + *out = *in + if in.CollectionNames != nil { + in, out := &in.CollectionNames, &out.CollectionNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseParameters. +func (in *DatabaseParameters) DeepCopy() *DatabaseParameters { + if in == nil { + return nil + } + out := new(DatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedPathInitParameters) DeepCopyInto(out *ExcludedPathInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedPathInitParameters. +func (in *ExcludedPathInitParameters) DeepCopy() *ExcludedPathInitParameters { + if in == nil { + return nil + } + out := new(ExcludedPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedPathObservation) DeepCopyInto(out *ExcludedPathObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedPathObservation. +func (in *ExcludedPathObservation) DeepCopy() *ExcludedPathObservation { + if in == nil { + return nil + } + out := new(ExcludedPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedPathParameters) DeepCopyInto(out *ExcludedPathParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedPathParameters. +func (in *ExcludedPathParameters) DeepCopy() *ExcludedPathParameters { + if in == nil { + return nil + } + out := new(ExcludedPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoLocationInitParameters) DeepCopyInto(out *GeoLocationInitParameters) { + *out = *in + if in.FailoverPriority != nil { + in, out := &in.FailoverPriority, &out.FailoverPriority + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoLocationInitParameters. +func (in *GeoLocationInitParameters) DeepCopy() *GeoLocationInitParameters { + if in == nil { + return nil + } + out := new(GeoLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoLocationObservation) DeepCopyInto(out *GeoLocationObservation) { + *out = *in + if in.FailoverPriority != nil { + in, out := &in.FailoverPriority, &out.FailoverPriority + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoLocationObservation. +func (in *GeoLocationObservation) DeepCopy() *GeoLocationObservation { + if in == nil { + return nil + } + out := new(GeoLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeoLocationParameters) DeepCopyInto(out *GeoLocationParameters) { + *out = *in + if in.FailoverPriority != nil { + in, out := &in.FailoverPriority, &out.FailoverPriority + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeoLocationParameters. +func (in *GeoLocationParameters) DeepCopy() *GeoLocationParameters { + if in == nil { + return nil + } + out := new(GeoLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabase) DeepCopyInto(out *GremlinDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabase. +func (in *GremlinDatabase) DeepCopy() *GremlinDatabase { + if in == nil { + return nil + } + out := new(GremlinDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GremlinDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseAutoscaleSettingsInitParameters) DeepCopyInto(out *GremlinDatabaseAutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseAutoscaleSettingsInitParameters. +func (in *GremlinDatabaseAutoscaleSettingsInitParameters) DeepCopy() *GremlinDatabaseAutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(GremlinDatabaseAutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseAutoscaleSettingsObservation) DeepCopyInto(out *GremlinDatabaseAutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseAutoscaleSettingsObservation. +func (in *GremlinDatabaseAutoscaleSettingsObservation) DeepCopy() *GremlinDatabaseAutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(GremlinDatabaseAutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseAutoscaleSettingsParameters) DeepCopyInto(out *GremlinDatabaseAutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseAutoscaleSettingsParameters. +func (in *GremlinDatabaseAutoscaleSettingsParameters) DeepCopy() *GremlinDatabaseAutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(GremlinDatabaseAutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseInitParameters) DeepCopyInto(out *GremlinDatabaseInitParameters) { + *out = *in + if in.GraphNames != nil { + in, out := &in.GraphNames, &out.GraphNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseInitParameters. +func (in *GremlinDatabaseInitParameters) DeepCopy() *GremlinDatabaseInitParameters { + if in == nil { + return nil + } + out := new(GremlinDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseInitParameters_2) DeepCopyInto(out *GremlinDatabaseInitParameters_2) { + *out = *in + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(GremlinDatabaseAutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseInitParameters_2. +func (in *GremlinDatabaseInitParameters_2) DeepCopy() *GremlinDatabaseInitParameters_2 { + if in == nil { + return nil + } + out := new(GremlinDatabaseInitParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseList) DeepCopyInto(out *GremlinDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GremlinDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseList. +func (in *GremlinDatabaseList) DeepCopy() *GremlinDatabaseList { + if in == nil { + return nil + } + out := new(GremlinDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GremlinDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseObservation) DeepCopyInto(out *GremlinDatabaseObservation) { + *out = *in + if in.GraphNames != nil { + in, out := &in.GraphNames, &out.GraphNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseObservation. +func (in *GremlinDatabaseObservation) DeepCopy() *GremlinDatabaseObservation { + if in == nil { + return nil + } + out := new(GremlinDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseObservation_2) DeepCopyInto(out *GremlinDatabaseObservation_2) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(GremlinDatabaseAutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseObservation_2. +func (in *GremlinDatabaseObservation_2) DeepCopy() *GremlinDatabaseObservation_2 { + if in == nil { + return nil + } + out := new(GremlinDatabaseObservation_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseParameters) DeepCopyInto(out *GremlinDatabaseParameters) { + *out = *in + if in.GraphNames != nil { + in, out := &in.GraphNames, &out.GraphNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseParameters. +func (in *GremlinDatabaseParameters) DeepCopy() *GremlinDatabaseParameters { + if in == nil { + return nil + } + out := new(GremlinDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseParameters_2) DeepCopyInto(out *GremlinDatabaseParameters_2) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(GremlinDatabaseAutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseParameters_2. +func (in *GremlinDatabaseParameters_2) DeepCopy() *GremlinDatabaseParameters_2 { + if in == nil { + return nil + } + out := new(GremlinDatabaseParameters_2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseSpec) DeepCopyInto(out *GremlinDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseSpec. +func (in *GremlinDatabaseSpec) DeepCopy() *GremlinDatabaseSpec { + if in == nil { + return nil + } + out := new(GremlinDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinDatabaseStatus) DeepCopyInto(out *GremlinDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinDatabaseStatus. +func (in *GremlinDatabaseStatus) DeepCopy() *GremlinDatabaseStatus { + if in == nil { + return nil + } + out := new(GremlinDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraph) DeepCopyInto(out *GremlinGraph) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraph. +func (in *GremlinGraph) DeepCopy() *GremlinGraph { + if in == nil { + return nil + } + out := new(GremlinGraph) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GremlinGraph) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphAutoscaleSettingsInitParameters) DeepCopyInto(out *GremlinGraphAutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphAutoscaleSettingsInitParameters. +func (in *GremlinGraphAutoscaleSettingsInitParameters) DeepCopy() *GremlinGraphAutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(GremlinGraphAutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphAutoscaleSettingsObservation) DeepCopyInto(out *GremlinGraphAutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphAutoscaleSettingsObservation. +func (in *GremlinGraphAutoscaleSettingsObservation) DeepCopy() *GremlinGraphAutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(GremlinGraphAutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphAutoscaleSettingsParameters) DeepCopyInto(out *GremlinGraphAutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphAutoscaleSettingsParameters. +func (in *GremlinGraphAutoscaleSettingsParameters) DeepCopy() *GremlinGraphAutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(GremlinGraphAutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphInitParameters) DeepCopyInto(out *GremlinGraphInitParameters) { + *out = *in + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(GremlinGraphAutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConflictResolutionPolicy != nil { + in, out := &in.ConflictResolutionPolicy, &out.ConflictResolutionPolicy + *out = new(ConflictResolutionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.IndexPolicy != nil { + in, out := &in.IndexPolicy, &out.IndexPolicy + *out = new(IndexPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } + if in.PartitionKeyVersion != nil { + in, out := &in.PartitionKeyVersion, &out.PartitionKeyVersion + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.UniqueKey != nil { + in, out := &in.UniqueKey, &out.UniqueKey + *out = make([]UniqueKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphInitParameters. +func (in *GremlinGraphInitParameters) DeepCopy() *GremlinGraphInitParameters { + if in == nil { + return nil + } + out := new(GremlinGraphInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphList) DeepCopyInto(out *GremlinGraphList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GremlinGraph, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphList. +func (in *GremlinGraphList) DeepCopy() *GremlinGraphList { + if in == nil { + return nil + } + out := new(GremlinGraphList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GremlinGraphList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphObservation) DeepCopyInto(out *GremlinGraphObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(GremlinGraphAutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ConflictResolutionPolicy != nil { + in, out := &in.ConflictResolutionPolicy, &out.ConflictResolutionPolicy + *out = new(ConflictResolutionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexPolicy != nil { + in, out := &in.IndexPolicy, &out.IndexPolicy + *out = new(IndexPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } + if in.PartitionKeyVersion != nil { + in, out := &in.PartitionKeyVersion, &out.PartitionKeyVersion + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.UniqueKey != nil { + in, out := &in.UniqueKey, &out.UniqueKey + *out = make([]UniqueKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphObservation. +func (in *GremlinGraphObservation) DeepCopy() *GremlinGraphObservation { + if in == nil { + return nil + } + out := new(GremlinGraphObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphParameters) DeepCopyInto(out *GremlinGraphParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(GremlinGraphAutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ConflictResolutionPolicy != nil { + in, out := &in.ConflictResolutionPolicy, &out.ConflictResolutionPolicy + *out = new(ConflictResolutionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.IndexPolicy != nil { + in, out := &in.IndexPolicy, &out.IndexPolicy + *out = new(IndexPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } + if in.PartitionKeyVersion != nil { + in, out := &in.PartitionKeyVersion, &out.PartitionKeyVersion + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.UniqueKey != nil { + in, out := &in.UniqueKey, &out.UniqueKey + *out = make([]UniqueKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphParameters. +func (in *GremlinGraphParameters) DeepCopy() *GremlinGraphParameters { + if in == nil { + return nil + } + out := new(GremlinGraphParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphSpec) DeepCopyInto(out *GremlinGraphSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphSpec. +func (in *GremlinGraphSpec) DeepCopy() *GremlinGraphSpec { + if in == nil { + return nil + } + out := new(GremlinGraphSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GremlinGraphStatus) DeepCopyInto(out *GremlinGraphStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GremlinGraphStatus. +func (in *GremlinGraphStatus) DeepCopy() *GremlinGraphStatus { + if in == nil { + return nil + } + out := new(GremlinGraphStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludedPathInitParameters) DeepCopyInto(out *IncludedPathInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludedPathInitParameters. +func (in *IncludedPathInitParameters) DeepCopy() *IncludedPathInitParameters { + if in == nil { + return nil + } + out := new(IncludedPathInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludedPathObservation) DeepCopyInto(out *IncludedPathObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludedPathObservation. +func (in *IncludedPathObservation) DeepCopy() *IncludedPathObservation { + if in == nil { + return nil + } + out := new(IncludedPathObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IncludedPathParameters) DeepCopyInto(out *IncludedPathParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IncludedPathParameters. +func (in *IncludedPathParameters) DeepCopy() *IncludedPathParameters { + if in == nil { + return nil + } + out := new(IncludedPathParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexInitParameters) DeepCopyInto(out *IndexInitParameters) { + *out = *in + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexInitParameters. +func (in *IndexInitParameters) DeepCopy() *IndexInitParameters { + if in == nil { + return nil + } + out := new(IndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexObservation) DeepCopyInto(out *IndexObservation) { + *out = *in + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexObservation. +func (in *IndexObservation) DeepCopy() *IndexObservation { + if in == nil { + return nil + } + out := new(IndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexParameters) DeepCopyInto(out *IndexParameters) { + *out = *in + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexParameters. +func (in *IndexParameters) DeepCopy() *IndexParameters { + if in == nil { + return nil + } + out := new(IndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexPolicyInitParameters) DeepCopyInto(out *IndexPolicyInitParameters) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(bool) + **out = **in + } + if in.CompositeIndex != nil { + in, out := &in.CompositeIndex, &out.CompositeIndex + *out = make([]CompositeIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludedPaths != nil { + in, out := &in.IncludedPaths, &out.IncludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IndexingMode != nil { + in, out := &in.IndexingMode, &out.IndexingMode + *out = new(string) + **out = **in + } + if in.SpatialIndex != nil { + in, out := &in.SpatialIndex, &out.SpatialIndex + *out = make([]SpatialIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexPolicyInitParameters. +func (in *IndexPolicyInitParameters) DeepCopy() *IndexPolicyInitParameters { + if in == nil { + return nil + } + out := new(IndexPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexPolicyObservation) DeepCopyInto(out *IndexPolicyObservation) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(bool) + **out = **in + } + if in.CompositeIndex != nil { + in, out := &in.CompositeIndex, &out.CompositeIndex + *out = make([]CompositeIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludedPaths != nil { + in, out := &in.IncludedPaths, &out.IncludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IndexingMode != nil { + in, out := &in.IndexingMode, &out.IndexingMode + *out = new(string) + **out = **in + } + if in.SpatialIndex != nil { + in, out := &in.SpatialIndex, &out.SpatialIndex + *out = make([]SpatialIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexPolicyObservation. +func (in *IndexPolicyObservation) DeepCopy() *IndexPolicyObservation { + if in == nil { + return nil + } + out := new(IndexPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexPolicyParameters) DeepCopyInto(out *IndexPolicyParameters) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(bool) + **out = **in + } + if in.CompositeIndex != nil { + in, out := &in.CompositeIndex, &out.CompositeIndex + *out = make([]CompositeIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludedPaths != nil { + in, out := &in.IncludedPaths, &out.IncludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IndexingMode != nil { + in, out := &in.IndexingMode, &out.IndexingMode + *out = new(string) + **out = **in + } + if in.SpatialIndex != nil { + in, out := &in.SpatialIndex, &out.SpatialIndex + *out = make([]SpatialIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexPolicyParameters. +func (in *IndexPolicyParameters) DeepCopy() *IndexPolicyParameters { + if in == nil { + return nil + } + out := new(IndexPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicyCompositeIndexInitParameters) DeepCopyInto(out *IndexingPolicyCompositeIndexInitParameters) { + *out = *in + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]CompositeIndexIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicyCompositeIndexInitParameters. +func (in *IndexingPolicyCompositeIndexInitParameters) DeepCopy() *IndexingPolicyCompositeIndexInitParameters { + if in == nil { + return nil + } + out := new(IndexingPolicyCompositeIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicyCompositeIndexObservation) DeepCopyInto(out *IndexingPolicyCompositeIndexObservation) { + *out = *in + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]CompositeIndexIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicyCompositeIndexObservation. +func (in *IndexingPolicyCompositeIndexObservation) DeepCopy() *IndexingPolicyCompositeIndexObservation { + if in == nil { + return nil + } + out := new(IndexingPolicyCompositeIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicyCompositeIndexParameters) DeepCopyInto(out *IndexingPolicyCompositeIndexParameters) { + *out = *in + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]CompositeIndexIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicyCompositeIndexParameters. +func (in *IndexingPolicyCompositeIndexParameters) DeepCopy() *IndexingPolicyCompositeIndexParameters { + if in == nil { + return nil + } + out := new(IndexingPolicyCompositeIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicyInitParameters) DeepCopyInto(out *IndexingPolicyInitParameters) { + *out = *in + if in.CompositeIndex != nil { + in, out := &in.CompositeIndex, &out.CompositeIndex + *out = make([]IndexingPolicyCompositeIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedPath != nil { + in, out := &in.ExcludedPath, &out.ExcludedPath + *out = make([]ExcludedPathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IncludedPath != nil { + in, out := &in.IncludedPath, &out.IncludedPath + *out = make([]IncludedPathInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IndexingMode != nil { + in, out := &in.IndexingMode, &out.IndexingMode + *out = new(string) + **out = **in + } + if in.SpatialIndex != nil { + in, out := &in.SpatialIndex, &out.SpatialIndex + *out = make([]IndexingPolicySpatialIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicyInitParameters. +func (in *IndexingPolicyInitParameters) DeepCopy() *IndexingPolicyInitParameters { + if in == nil { + return nil + } + out := new(IndexingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicyObservation) DeepCopyInto(out *IndexingPolicyObservation) { + *out = *in + if in.CompositeIndex != nil { + in, out := &in.CompositeIndex, &out.CompositeIndex + *out = make([]IndexingPolicyCompositeIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedPath != nil { + in, out := &in.ExcludedPath, &out.ExcludedPath + *out = make([]ExcludedPathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IncludedPath != nil { + in, out := &in.IncludedPath, &out.IncludedPath + *out = make([]IncludedPathObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IndexingMode != nil { + in, out := &in.IndexingMode, &out.IndexingMode + *out = new(string) + **out = **in + } + if in.SpatialIndex != nil { + in, out := &in.SpatialIndex, &out.SpatialIndex + *out = make([]IndexingPolicySpatialIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicyObservation. +func (in *IndexingPolicyObservation) DeepCopy() *IndexingPolicyObservation { + if in == nil { + return nil + } + out := new(IndexingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicyParameters) DeepCopyInto(out *IndexingPolicyParameters) { + *out = *in + if in.CompositeIndex != nil { + in, out := &in.CompositeIndex, &out.CompositeIndex + *out = make([]IndexingPolicyCompositeIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludedPath != nil { + in, out := &in.ExcludedPath, &out.ExcludedPath + *out = make([]ExcludedPathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IncludedPath != nil { + in, out := &in.IncludedPath, &out.IncludedPath + *out = make([]IncludedPathParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IndexingMode != nil { + in, out := &in.IndexingMode, &out.IndexingMode + *out = new(string) + **out = **in + } + if in.SpatialIndex != nil { + in, out := &in.SpatialIndex, &out.SpatialIndex + *out = make([]IndexingPolicySpatialIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicyParameters. +func (in *IndexingPolicyParameters) DeepCopy() *IndexingPolicyParameters { + if in == nil { + return nil + } + out := new(IndexingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicySpatialIndexInitParameters) DeepCopyInto(out *IndexingPolicySpatialIndexInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicySpatialIndexInitParameters. +func (in *IndexingPolicySpatialIndexInitParameters) DeepCopy() *IndexingPolicySpatialIndexInitParameters { + if in == nil { + return nil + } + out := new(IndexingPolicySpatialIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicySpatialIndexObservation) DeepCopyInto(out *IndexingPolicySpatialIndexObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicySpatialIndexObservation. +func (in *IndexingPolicySpatialIndexObservation) DeepCopy() *IndexingPolicySpatialIndexObservation { + if in == nil { + return nil + } + out := new(IndexingPolicySpatialIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexingPolicySpatialIndexParameters) DeepCopyInto(out *IndexingPolicySpatialIndexParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexingPolicySpatialIndexParameters. +func (in *IndexingPolicySpatialIndexParameters) DeepCopy() *IndexingPolicySpatialIndexParameters { + if in == nil { + return nil + } + out := new(IndexingPolicySpatialIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollection) DeepCopyInto(out *MongoCollection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollection. +func (in *MongoCollection) DeepCopy() *MongoCollection { + if in == nil { + return nil + } + out := new(MongoCollection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoCollection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionAutoscaleSettingsInitParameters) DeepCopyInto(out *MongoCollectionAutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionAutoscaleSettingsInitParameters. +func (in *MongoCollectionAutoscaleSettingsInitParameters) DeepCopy() *MongoCollectionAutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(MongoCollectionAutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionAutoscaleSettingsObservation) DeepCopyInto(out *MongoCollectionAutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionAutoscaleSettingsObservation. +func (in *MongoCollectionAutoscaleSettingsObservation) DeepCopy() *MongoCollectionAutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(MongoCollectionAutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionAutoscaleSettingsParameters) DeepCopyInto(out *MongoCollectionAutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionAutoscaleSettingsParameters. +func (in *MongoCollectionAutoscaleSettingsParameters) DeepCopy() *MongoCollectionAutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(MongoCollectionAutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionIndexInitParameters) DeepCopyInto(out *MongoCollectionIndexInitParameters) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Unique != nil { + in, out := &in.Unique, &out.Unique + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionIndexInitParameters. +func (in *MongoCollectionIndexInitParameters) DeepCopy() *MongoCollectionIndexInitParameters { + if in == nil { + return nil + } + out := new(MongoCollectionIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionIndexObservation) DeepCopyInto(out *MongoCollectionIndexObservation) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Unique != nil { + in, out := &in.Unique, &out.Unique + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionIndexObservation. +func (in *MongoCollectionIndexObservation) DeepCopy() *MongoCollectionIndexObservation { + if in == nil { + return nil + } + out := new(MongoCollectionIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionIndexParameters) DeepCopyInto(out *MongoCollectionIndexParameters) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Unique != nil { + in, out := &in.Unique, &out.Unique + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionIndexParameters. +func (in *MongoCollectionIndexParameters) DeepCopy() *MongoCollectionIndexParameters { + if in == nil { + return nil + } + out := new(MongoCollectionIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionInitParameters) DeepCopyInto(out *MongoCollectionInitParameters) { + *out = *in + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(MongoCollectionAutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultTTLSeconds != nil { + in, out := &in.DefaultTTLSeconds, &out.DefaultTTLSeconds + *out = new(float64) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]MongoCollectionIndexInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShardKey != nil { + in, out := &in.ShardKey, &out.ShardKey + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionInitParameters. +func (in *MongoCollectionInitParameters) DeepCopy() *MongoCollectionInitParameters { + if in == nil { + return nil + } + out := new(MongoCollectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionList) DeepCopyInto(out *MongoCollectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongoCollection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionList. +func (in *MongoCollectionList) DeepCopy() *MongoCollectionList { + if in == nil { + return nil + } + out := new(MongoCollectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoCollectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionObservation) DeepCopyInto(out *MongoCollectionObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(MongoCollectionAutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DefaultTTLSeconds != nil { + in, out := &in.DefaultTTLSeconds, &out.DefaultTTLSeconds + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]MongoCollectionIndexObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ShardKey != nil { + in, out := &in.ShardKey, &out.ShardKey + *out = new(string) + **out = **in + } + if in.SystemIndexes != nil { + in, out := &in.SystemIndexes, &out.SystemIndexes + *out = make([]SystemIndexesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionObservation. +func (in *MongoCollectionObservation) DeepCopy() *MongoCollectionObservation { + if in == nil { + return nil + } + out := new(MongoCollectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionParameters) DeepCopyInto(out *MongoCollectionParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(MongoCollectionAutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultTTLSeconds != nil { + in, out := &in.DefaultTTLSeconds, &out.DefaultTTLSeconds + *out = new(float64) + **out = **in + } + if in.Index != nil { + in, out := &in.Index, &out.Index + *out = make([]MongoCollectionIndexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ShardKey != nil { + in, out := &in.ShardKey, &out.ShardKey + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionParameters. +func (in *MongoCollectionParameters) DeepCopy() *MongoCollectionParameters { + if in == nil { + return nil + } + out := new(MongoCollectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionSpec) DeepCopyInto(out *MongoCollectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionSpec. +func (in *MongoCollectionSpec) DeepCopy() *MongoCollectionSpec { + if in == nil { + return nil + } + out := new(MongoCollectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoCollectionStatus) DeepCopyInto(out *MongoCollectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoCollectionStatus. +func (in *MongoCollectionStatus) DeepCopy() *MongoCollectionStatus { + if in == nil { + return nil + } + out := new(MongoCollectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabase) DeepCopyInto(out *MongoDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabase. +func (in *MongoDatabase) DeepCopy() *MongoDatabase { + if in == nil { + return nil + } + out := new(MongoDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseAutoscaleSettingsInitParameters) DeepCopyInto(out *MongoDatabaseAutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseAutoscaleSettingsInitParameters. +func (in *MongoDatabaseAutoscaleSettingsInitParameters) DeepCopy() *MongoDatabaseAutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(MongoDatabaseAutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseAutoscaleSettingsObservation) DeepCopyInto(out *MongoDatabaseAutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseAutoscaleSettingsObservation. +func (in *MongoDatabaseAutoscaleSettingsObservation) DeepCopy() *MongoDatabaseAutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(MongoDatabaseAutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseAutoscaleSettingsParameters) DeepCopyInto(out *MongoDatabaseAutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseAutoscaleSettingsParameters. +func (in *MongoDatabaseAutoscaleSettingsParameters) DeepCopy() *MongoDatabaseAutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(MongoDatabaseAutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseInitParameters) DeepCopyInto(out *MongoDatabaseInitParameters) { + *out = *in + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(MongoDatabaseAutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseInitParameters. +func (in *MongoDatabaseInitParameters) DeepCopy() *MongoDatabaseInitParameters { + if in == nil { + return nil + } + out := new(MongoDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseList) DeepCopyInto(out *MongoDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongoDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseList. +func (in *MongoDatabaseList) DeepCopy() *MongoDatabaseList { + if in == nil { + return nil + } + out := new(MongoDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseObservation) DeepCopyInto(out *MongoDatabaseObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(MongoDatabaseAutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseObservation. +func (in *MongoDatabaseObservation) DeepCopy() *MongoDatabaseObservation { + if in == nil { + return nil + } + out := new(MongoDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseParameters) DeepCopyInto(out *MongoDatabaseParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(MongoDatabaseAutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseParameters. +func (in *MongoDatabaseParameters) DeepCopy() *MongoDatabaseParameters { + if in == nil { + return nil + } + out := new(MongoDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseSpec) DeepCopyInto(out *MongoDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseSpec. +func (in *MongoDatabaseSpec) DeepCopy() *MongoDatabaseSpec { + if in == nil { + return nil + } + out := new(MongoDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDatabaseStatus) DeepCopyInto(out *MongoDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDatabaseStatus. +func (in *MongoDatabaseStatus) DeepCopy() *MongoDatabaseStatus { + if in == nil { + return nil + } + out := new(MongoDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeyInitParameters) DeepCopyInto(out *PartitionKeyInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeyInitParameters. +func (in *PartitionKeyInitParameters) DeepCopy() *PartitionKeyInitParameters { + if in == nil { + return nil + } + out := new(PartitionKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeyObservation) DeepCopyInto(out *PartitionKeyObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeyObservation. +func (in *PartitionKeyObservation) DeepCopy() *PartitionKeyObservation { + if in == nil { + return nil + } + out := new(PartitionKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartitionKeyParameters) DeepCopyInto(out *PartitionKeyParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartitionKeyParameters. +func (in *PartitionKeyParameters) DeepCopy() *PartitionKeyParameters { + if in == nil { + return nil + } + out := new(PartitionKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreInitParameters) DeepCopyInto(out *RestoreInitParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]DatabaseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GremlinDatabase != nil { + in, out := &in.GremlinDatabase, &out.GremlinDatabase + *out = make([]GremlinDatabaseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestoreTimestampInUtc != nil { + in, out := &in.RestoreTimestampInUtc, &out.RestoreTimestampInUtc + *out = new(string) + **out = **in + } + if in.SourceCosmosDBAccountID != nil { + in, out := &in.SourceCosmosDBAccountID, &out.SourceCosmosDBAccountID + *out = new(string) + **out = **in + } + if in.SourceCosmosDBAccountIDRef != nil { + in, out := &in.SourceCosmosDBAccountIDRef, &out.SourceCosmosDBAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceCosmosDBAccountIDSelector != nil { + in, out := &in.SourceCosmosDBAccountIDSelector, &out.SourceCosmosDBAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TablesToRestore != nil { + in, out := &in.TablesToRestore, &out.TablesToRestore + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreInitParameters. +func (in *RestoreInitParameters) DeepCopy() *RestoreInitParameters { + if in == nil { + return nil + } + out := new(RestoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreObservation) DeepCopyInto(out *RestoreObservation) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]DatabaseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GremlinDatabase != nil { + in, out := &in.GremlinDatabase, &out.GremlinDatabase + *out = make([]GremlinDatabaseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestoreTimestampInUtc != nil { + in, out := &in.RestoreTimestampInUtc, &out.RestoreTimestampInUtc + *out = new(string) + **out = **in + } + if in.SourceCosmosDBAccountID != nil { + in, out := &in.SourceCosmosDBAccountID, &out.SourceCosmosDBAccountID + *out = new(string) + **out = **in + } + if in.TablesToRestore != nil { + in, out := &in.TablesToRestore, &out.TablesToRestore + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreObservation. +func (in *RestoreObservation) DeepCopy() *RestoreObservation { + if in == nil { + return nil + } + out := new(RestoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreParameters) DeepCopyInto(out *RestoreParameters) { + *out = *in + if in.Database != nil { + in, out := &in.Database, &out.Database + *out = make([]DatabaseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GremlinDatabase != nil { + in, out := &in.GremlinDatabase, &out.GremlinDatabase + *out = make([]GremlinDatabaseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestoreTimestampInUtc != nil { + in, out := &in.RestoreTimestampInUtc, &out.RestoreTimestampInUtc + *out = new(string) + **out = **in + } + if in.SourceCosmosDBAccountID != nil { + in, out := &in.SourceCosmosDBAccountID, &out.SourceCosmosDBAccountID + *out = new(string) + **out = **in + } + if in.SourceCosmosDBAccountIDRef != nil { + in, out := &in.SourceCosmosDBAccountIDRef, &out.SourceCosmosDBAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceCosmosDBAccountIDSelector != nil { + in, out := &in.SourceCosmosDBAccountIDSelector, &out.SourceCosmosDBAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TablesToRestore != nil { + in, out := &in.TablesToRestore, &out.TablesToRestore + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreParameters. +func (in *RestoreParameters) DeepCopy() *RestoreParameters { + if in == nil { + return nil + } + out := new(RestoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainer) DeepCopyInto(out *SQLContainer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainer. +func (in *SQLContainer) DeepCopy() *SQLContainer { + if in == nil { + return nil + } + out := new(SQLContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLContainer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerAutoscaleSettingsInitParameters) DeepCopyInto(out *SQLContainerAutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerAutoscaleSettingsInitParameters. +func (in *SQLContainerAutoscaleSettingsInitParameters) DeepCopy() *SQLContainerAutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(SQLContainerAutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerAutoscaleSettingsObservation) DeepCopyInto(out *SQLContainerAutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerAutoscaleSettingsObservation. +func (in *SQLContainerAutoscaleSettingsObservation) DeepCopy() *SQLContainerAutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(SQLContainerAutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerAutoscaleSettingsParameters) DeepCopyInto(out *SQLContainerAutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerAutoscaleSettingsParameters. +func (in *SQLContainerAutoscaleSettingsParameters) DeepCopy() *SQLContainerAutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(SQLContainerAutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerConflictResolutionPolicyInitParameters) DeepCopyInto(out *SQLContainerConflictResolutionPolicyInitParameters) { + *out = *in + if in.ConflictResolutionPath != nil { + in, out := &in.ConflictResolutionPath, &out.ConflictResolutionPath + *out = new(string) + **out = **in + } + if in.ConflictResolutionProcedure != nil { + in, out := &in.ConflictResolutionProcedure, &out.ConflictResolutionProcedure + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerConflictResolutionPolicyInitParameters. +func (in *SQLContainerConflictResolutionPolicyInitParameters) DeepCopy() *SQLContainerConflictResolutionPolicyInitParameters { + if in == nil { + return nil + } + out := new(SQLContainerConflictResolutionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerConflictResolutionPolicyObservation) DeepCopyInto(out *SQLContainerConflictResolutionPolicyObservation) { + *out = *in + if in.ConflictResolutionPath != nil { + in, out := &in.ConflictResolutionPath, &out.ConflictResolutionPath + *out = new(string) + **out = **in + } + if in.ConflictResolutionProcedure != nil { + in, out := &in.ConflictResolutionProcedure, &out.ConflictResolutionProcedure + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerConflictResolutionPolicyObservation. +func (in *SQLContainerConflictResolutionPolicyObservation) DeepCopy() *SQLContainerConflictResolutionPolicyObservation { + if in == nil { + return nil + } + out := new(SQLContainerConflictResolutionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerConflictResolutionPolicyParameters) DeepCopyInto(out *SQLContainerConflictResolutionPolicyParameters) { + *out = *in + if in.ConflictResolutionPath != nil { + in, out := &in.ConflictResolutionPath, &out.ConflictResolutionPath + *out = new(string) + **out = **in + } + if in.ConflictResolutionProcedure != nil { + in, out := &in.ConflictResolutionProcedure, &out.ConflictResolutionProcedure + *out = new(string) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerConflictResolutionPolicyParameters. +func (in *SQLContainerConflictResolutionPolicyParameters) DeepCopy() *SQLContainerConflictResolutionPolicyParameters { + if in == nil { + return nil + } + out := new(SQLContainerConflictResolutionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerInitParameters) DeepCopyInto(out *SQLContainerInitParameters) { + *out = *in + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(SQLContainerAutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConflictResolutionPolicy != nil { + in, out := &in.ConflictResolutionPolicy, &out.ConflictResolutionPolicy + *out = new(SQLContainerConflictResolutionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.IndexingPolicy != nil { + in, out := &in.IndexingPolicy, &out.IndexingPolicy + *out = new(IndexingPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } + if in.PartitionKeyVersion != nil { + in, out := &in.PartitionKeyVersion, &out.PartitionKeyVersion + *out = new(float64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.UniqueKey != nil { + in, out := &in.UniqueKey, &out.UniqueKey + *out = make([]SQLContainerUniqueKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerInitParameters. +func (in *SQLContainerInitParameters) DeepCopy() *SQLContainerInitParameters { + if in == nil { + return nil + } + out := new(SQLContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerList) DeepCopyInto(out *SQLContainerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SQLContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerList. +func (in *SQLContainerList) DeepCopy() *SQLContainerList { + if in == nil { + return nil + } + out := new(SQLContainerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLContainerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerObservation) DeepCopyInto(out *SQLContainerObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(SQLContainerAutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ConflictResolutionPolicy != nil { + in, out := &in.ConflictResolutionPolicy, &out.ConflictResolutionPolicy + *out = new(SQLContainerConflictResolutionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IndexingPolicy != nil { + in, out := &in.IndexingPolicy, &out.IndexingPolicy + *out = new(IndexingPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } + if in.PartitionKeyVersion != nil { + in, out := &in.PartitionKeyVersion, &out.PartitionKeyVersion + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.UniqueKey != nil { + in, out := &in.UniqueKey, &out.UniqueKey + *out = make([]SQLContainerUniqueKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerObservation. +func (in *SQLContainerObservation) DeepCopy() *SQLContainerObservation { + if in == nil { + return nil + } + out := new(SQLContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerParameters) DeepCopyInto(out *SQLContainerParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AnalyticalStorageTTL != nil { + in, out := &in.AnalyticalStorageTTL, &out.AnalyticalStorageTTL + *out = new(float64) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(SQLContainerAutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ConflictResolutionPolicy != nil { + in, out := &in.ConflictResolutionPolicy, &out.ConflictResolutionPolicy + *out = new(SQLContainerConflictResolutionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(float64) + **out = **in + } + if in.IndexingPolicy != nil { + in, out := &in.IndexingPolicy, &out.IndexingPolicy + *out = new(IndexingPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.PartitionKeyPath != nil { + in, out := &in.PartitionKeyPath, &out.PartitionKeyPath + *out = new(string) + **out = **in + } + if in.PartitionKeyVersion != nil { + in, out := &in.PartitionKeyVersion, &out.PartitionKeyVersion + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } + if in.UniqueKey != nil { + in, out := &in.UniqueKey, &out.UniqueKey + *out = make([]SQLContainerUniqueKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerParameters. +func (in *SQLContainerParameters) DeepCopy() *SQLContainerParameters { + if in == nil { + return nil + } + out := new(SQLContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerSpec) DeepCopyInto(out *SQLContainerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerSpec. +func (in *SQLContainerSpec) DeepCopy() *SQLContainerSpec { + if in == nil { + return nil + } + out := new(SQLContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerStatus) DeepCopyInto(out *SQLContainerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerStatus. +func (in *SQLContainerStatus) DeepCopy() *SQLContainerStatus { + if in == nil { + return nil + } + out := new(SQLContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerUniqueKeyInitParameters) DeepCopyInto(out *SQLContainerUniqueKeyInitParameters) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerUniqueKeyInitParameters. +func (in *SQLContainerUniqueKeyInitParameters) DeepCopy() *SQLContainerUniqueKeyInitParameters { + if in == nil { + return nil + } + out := new(SQLContainerUniqueKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerUniqueKeyObservation) DeepCopyInto(out *SQLContainerUniqueKeyObservation) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerUniqueKeyObservation. +func (in *SQLContainerUniqueKeyObservation) DeepCopy() *SQLContainerUniqueKeyObservation { + if in == nil { + return nil + } + out := new(SQLContainerUniqueKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLContainerUniqueKeyParameters) DeepCopyInto(out *SQLContainerUniqueKeyParameters) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLContainerUniqueKeyParameters. +func (in *SQLContainerUniqueKeyParameters) DeepCopy() *SQLContainerUniqueKeyParameters { + if in == nil { + return nil + } + out := new(SQLContainerUniqueKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabase) DeepCopyInto(out *SQLDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabase. +func (in *SQLDatabase) DeepCopy() *SQLDatabase { + if in == nil { + return nil + } + out := new(SQLDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseAutoscaleSettingsInitParameters) DeepCopyInto(out *SQLDatabaseAutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseAutoscaleSettingsInitParameters. +func (in *SQLDatabaseAutoscaleSettingsInitParameters) DeepCopy() *SQLDatabaseAutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(SQLDatabaseAutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseAutoscaleSettingsObservation) DeepCopyInto(out *SQLDatabaseAutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseAutoscaleSettingsObservation. +func (in *SQLDatabaseAutoscaleSettingsObservation) DeepCopy() *SQLDatabaseAutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(SQLDatabaseAutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseAutoscaleSettingsParameters) DeepCopyInto(out *SQLDatabaseAutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseAutoscaleSettingsParameters. +func (in *SQLDatabaseAutoscaleSettingsParameters) DeepCopy() *SQLDatabaseAutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(SQLDatabaseAutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseInitParameters) DeepCopyInto(out *SQLDatabaseInitParameters) { + *out = *in + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(SQLDatabaseAutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseInitParameters. +func (in *SQLDatabaseInitParameters) DeepCopy() *SQLDatabaseInitParameters { + if in == nil { + return nil + } + out := new(SQLDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseList) DeepCopyInto(out *SQLDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SQLDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseList. +func (in *SQLDatabaseList) DeepCopy() *SQLDatabaseList { + if in == nil { + return nil + } + out := new(SQLDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseObservation) DeepCopyInto(out *SQLDatabaseObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(SQLDatabaseAutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseObservation. +func (in *SQLDatabaseObservation) DeepCopy() *SQLDatabaseObservation { + if in == nil { + return nil + } + out := new(SQLDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseParameters) DeepCopyInto(out *SQLDatabaseParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(SQLDatabaseAutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseParameters. +func (in *SQLDatabaseParameters) DeepCopy() *SQLDatabaseParameters { + if in == nil { + return nil + } + out := new(SQLDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseSpec) DeepCopyInto(out *SQLDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseSpec. +func (in *SQLDatabaseSpec) DeepCopy() *SQLDatabaseSpec { + if in == nil { + return nil + } + out := new(SQLDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLDatabaseStatus) DeepCopyInto(out *SQLDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLDatabaseStatus. +func (in *SQLDatabaseStatus) DeepCopy() *SQLDatabaseStatus { + if in == nil { + return nil + } + out := new(SQLDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaInitParameters) DeepCopyInto(out *SchemaInitParameters) { + *out = *in + if in.ClusterKey != nil { + in, out := &in.ClusterKey, &out.ClusterKey + *out = make([]ClusterKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = make([]PartitionKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaInitParameters. +func (in *SchemaInitParameters) DeepCopy() *SchemaInitParameters { + if in == nil { + return nil + } + out := new(SchemaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaObservation) DeepCopyInto(out *SchemaObservation) { + *out = *in + if in.ClusterKey != nil { + in, out := &in.ClusterKey, &out.ClusterKey + *out = make([]ClusterKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = make([]PartitionKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaObservation. +func (in *SchemaObservation) DeepCopy() *SchemaObservation { + if in == nil { + return nil + } + out := new(SchemaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaParameters) DeepCopyInto(out *SchemaParameters) { + *out = *in + if in.ClusterKey != nil { + in, out := &in.ClusterKey, &out.ClusterKey + *out = make([]ClusterKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = make([]PartitionKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaParameters. +func (in *SchemaParameters) DeepCopy() *SchemaParameters { + if in == nil { + return nil + } + out := new(SchemaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpatialIndexInitParameters) DeepCopyInto(out *SpatialIndexInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpatialIndexInitParameters. +func (in *SpatialIndexInitParameters) DeepCopy() *SpatialIndexInitParameters { + if in == nil { + return nil + } + out := new(SpatialIndexInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpatialIndexObservation) DeepCopyInto(out *SpatialIndexObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpatialIndexObservation. +func (in *SpatialIndexObservation) DeepCopy() *SpatialIndexObservation { + if in == nil { + return nil + } + out := new(SpatialIndexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpatialIndexParameters) DeepCopyInto(out *SpatialIndexParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpatialIndexParameters. +func (in *SpatialIndexParameters) DeepCopy() *SpatialIndexParameters { + if in == nil { + return nil + } + out := new(SpatialIndexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemIndexesInitParameters) DeepCopyInto(out *SystemIndexesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemIndexesInitParameters. +func (in *SystemIndexesInitParameters) DeepCopy() *SystemIndexesInitParameters { + if in == nil { + return nil + } + out := new(SystemIndexesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemIndexesObservation) DeepCopyInto(out *SystemIndexesObservation) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Unique != nil { + in, out := &in.Unique, &out.Unique + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemIndexesObservation. +func (in *SystemIndexesObservation) DeepCopy() *SystemIndexesObservation { + if in == nil { + return nil + } + out := new(SystemIndexesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemIndexesParameters) DeepCopyInto(out *SystemIndexesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemIndexesParameters. +func (in *SystemIndexesParameters) DeepCopy() *SystemIndexesParameters { + if in == nil { + return nil + } + out := new(SystemIndexesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableAutoscaleSettingsInitParameters) DeepCopyInto(out *TableAutoscaleSettingsInitParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableAutoscaleSettingsInitParameters. +func (in *TableAutoscaleSettingsInitParameters) DeepCopy() *TableAutoscaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(TableAutoscaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableAutoscaleSettingsObservation) DeepCopyInto(out *TableAutoscaleSettingsObservation) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableAutoscaleSettingsObservation. +func (in *TableAutoscaleSettingsObservation) DeepCopy() *TableAutoscaleSettingsObservation { + if in == nil { + return nil + } + out := new(TableAutoscaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableAutoscaleSettingsParameters) DeepCopyInto(out *TableAutoscaleSettingsParameters) { + *out = *in + if in.MaxThroughput != nil { + in, out := &in.MaxThroughput, &out.MaxThroughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableAutoscaleSettingsParameters. +func (in *TableAutoscaleSettingsParameters) DeepCopy() *TableAutoscaleSettingsParameters { + if in == nil { + return nil + } + out := new(TableAutoscaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableInitParameters) DeepCopyInto(out *TableInitParameters) { + *out = *in + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(TableAutoscaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableInitParameters. +func (in *TableInitParameters) DeepCopy() *TableInitParameters { + if in == nil { + return nil + } + out := new(TableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableList) DeepCopyInto(out *TableList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Table, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableList. +func (in *TableList) DeepCopy() *TableList { + if in == nil { + return nil + } + out := new(TableList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableObservation) DeepCopyInto(out *TableObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(TableAutoscaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableObservation. +func (in *TableObservation) DeepCopy() *TableObservation { + if in == nil { + return nil + } + out := new(TableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableParameters) DeepCopyInto(out *TableParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AutoscaleSettings != nil { + in, out := &in.AutoscaleSettings, &out.AutoscaleSettings + *out = new(TableAutoscaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableParameters. +func (in *TableParameters) DeepCopy() *TableParameters { + if in == nil { + return nil + } + out := new(TableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableSpec) DeepCopyInto(out *TableSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableSpec. +func (in *TableSpec) DeepCopy() *TableSpec { + if in == nil { + return nil + } + out := new(TableSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableStatus) DeepCopyInto(out *TableStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableStatus. +func (in *TableStatus) DeepCopy() *TableStatus { + if in == nil { + return nil + } + out := new(TableStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UniqueKeyInitParameters) DeepCopyInto(out *UniqueKeyInitParameters) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UniqueKeyInitParameters. +func (in *UniqueKeyInitParameters) DeepCopy() *UniqueKeyInitParameters { + if in == nil { + return nil + } + out := new(UniqueKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UniqueKeyObservation) DeepCopyInto(out *UniqueKeyObservation) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UniqueKeyObservation. +func (in *UniqueKeyObservation) DeepCopy() *UniqueKeyObservation { + if in == nil { + return nil + } + out := new(UniqueKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UniqueKeyParameters) DeepCopyInto(out *UniqueKeyParameters) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UniqueKeyParameters. +func (in *UniqueKeyParameters) DeepCopy() *UniqueKeyParameters { + if in == nil { + return nil + } + out := new(UniqueKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRuleInitParameters) DeepCopyInto(out *VirtualNetworkRuleInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRuleInitParameters. +func (in *VirtualNetworkRuleInitParameters) DeepCopy() *VirtualNetworkRuleInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRuleObservation) DeepCopyInto(out *VirtualNetworkRuleObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRuleObservation. +func (in *VirtualNetworkRuleObservation) DeepCopy() *VirtualNetworkRuleObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRuleParameters) DeepCopyInto(out *VirtualNetworkRuleParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRuleParameters. +func (in *VirtualNetworkRuleParameters) DeepCopy() *VirtualNetworkRuleParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkRuleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/cosmosdb/v1beta2/zz_generated.managed.go b/apis/cosmosdb/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..702fa436e --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_generated.managed.go @@ -0,0 +1,668 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Account. +func (mg *Account) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Account. +func (mg *Account) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CassandraCluster. +func (mg *CassandraCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CassandraCluster. +func (mg *CassandraCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CassandraCluster. +func (mg *CassandraCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CassandraCluster. +func (mg *CassandraCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CassandraCluster. +func (mg *CassandraCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CassandraCluster. +func (mg *CassandraCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CassandraCluster. +func (mg *CassandraCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CassandraCluster. +func (mg *CassandraCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CassandraCluster. +func (mg *CassandraCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CassandraCluster. +func (mg *CassandraCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CassandraCluster. +func (mg *CassandraCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CassandraCluster. +func (mg *CassandraCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CassandraKeySpace. +func (mg *CassandraKeySpace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CassandraKeySpace. +func (mg *CassandraKeySpace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CassandraKeySpace. +func (mg *CassandraKeySpace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CassandraKeySpace. +func (mg *CassandraKeySpace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CassandraKeySpace. +func (mg *CassandraKeySpace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CassandraKeySpace. +func (mg *CassandraKeySpace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CassandraKeySpace. +func (mg *CassandraKeySpace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CassandraKeySpace. +func (mg *CassandraKeySpace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CassandraKeySpace. +func (mg *CassandraKeySpace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CassandraKeySpace. +func (mg *CassandraKeySpace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CassandraKeySpace. +func (mg *CassandraKeySpace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CassandraKeySpace. +func (mg *CassandraKeySpace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this CassandraTable. +func (mg *CassandraTable) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CassandraTable. +func (mg *CassandraTable) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CassandraTable. +func (mg *CassandraTable) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CassandraTable. +func (mg *CassandraTable) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CassandraTable. +func (mg *CassandraTable) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CassandraTable. +func (mg *CassandraTable) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CassandraTable. +func (mg *CassandraTable) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CassandraTable. +func (mg *CassandraTable) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CassandraTable. +func (mg *CassandraTable) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CassandraTable. +func (mg *CassandraTable) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CassandraTable. +func (mg *CassandraTable) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CassandraTable. +func (mg *CassandraTable) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this GremlinDatabase. +func (mg *GremlinDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GremlinDatabase. +func (mg *GremlinDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GremlinDatabase. +func (mg *GremlinDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GremlinDatabase. +func (mg *GremlinDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GremlinDatabase. +func (mg *GremlinDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GremlinDatabase. +func (mg *GremlinDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GremlinDatabase. +func (mg *GremlinDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GremlinDatabase. +func (mg *GremlinDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GremlinDatabase. +func (mg *GremlinDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GremlinDatabase. +func (mg *GremlinDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GremlinDatabase. +func (mg *GremlinDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GremlinDatabase. +func (mg *GremlinDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this GremlinGraph. +func (mg *GremlinGraph) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GremlinGraph. +func (mg *GremlinGraph) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GremlinGraph. +func (mg *GremlinGraph) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GremlinGraph. +func (mg *GremlinGraph) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GremlinGraph. +func (mg *GremlinGraph) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GremlinGraph. +func (mg *GremlinGraph) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GremlinGraph. +func (mg *GremlinGraph) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GremlinGraph. +func (mg *GremlinGraph) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GremlinGraph. +func (mg *GremlinGraph) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GremlinGraph. +func (mg *GremlinGraph) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GremlinGraph. +func (mg *GremlinGraph) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GremlinGraph. +func (mg *GremlinGraph) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MongoCollection. +func (mg *MongoCollection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MongoCollection. +func (mg *MongoCollection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MongoCollection. +func (mg *MongoCollection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MongoCollection. +func (mg *MongoCollection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MongoCollection. +func (mg *MongoCollection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MongoCollection. +func (mg *MongoCollection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MongoCollection. +func (mg *MongoCollection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MongoCollection. +func (mg *MongoCollection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MongoCollection. +func (mg *MongoCollection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MongoCollection. +func (mg *MongoCollection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MongoCollection. +func (mg *MongoCollection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MongoCollection. +func (mg *MongoCollection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MongoDatabase. +func (mg *MongoDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MongoDatabase. +func (mg *MongoDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MongoDatabase. +func (mg *MongoDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MongoDatabase. +func (mg *MongoDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MongoDatabase. +func (mg *MongoDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MongoDatabase. +func (mg *MongoDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MongoDatabase. +func (mg *MongoDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MongoDatabase. +func (mg *MongoDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MongoDatabase. +func (mg *MongoDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MongoDatabase. +func (mg *MongoDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MongoDatabase. +func (mg *MongoDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MongoDatabase. +func (mg *MongoDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SQLContainer. +func (mg *SQLContainer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SQLContainer. +func (mg *SQLContainer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SQLContainer. +func (mg *SQLContainer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SQLContainer. +func (mg *SQLContainer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SQLContainer. +func (mg *SQLContainer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SQLContainer. +func (mg *SQLContainer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SQLContainer. +func (mg *SQLContainer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SQLContainer. +func (mg *SQLContainer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SQLContainer. +func (mg *SQLContainer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SQLContainer. +func (mg *SQLContainer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SQLContainer. +func (mg *SQLContainer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SQLContainer. +func (mg *SQLContainer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SQLDatabase. +func (mg *SQLDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SQLDatabase. +func (mg *SQLDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SQLDatabase. +func (mg *SQLDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SQLDatabase. +func (mg *SQLDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SQLDatabase. +func (mg *SQLDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SQLDatabase. +func (mg *SQLDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SQLDatabase. +func (mg *SQLDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SQLDatabase. +func (mg *SQLDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SQLDatabase. +func (mg *SQLDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SQLDatabase. +func (mg *SQLDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SQLDatabase. +func (mg *SQLDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SQLDatabase. +func (mg *SQLDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Table. +func (mg *Table) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Table. +func (mg *Table) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Table. +func (mg *Table) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Table. +func (mg *Table) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Table. +func (mg *Table) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Table. +func (mg *Table) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Table. +func (mg *Table) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Table. +func (mg *Table) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Table. +func (mg *Table) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Table. +func (mg *Table) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Table. +func (mg *Table) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Table. +func (mg *Table) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/cosmosdb/v1beta2/zz_generated.managedlist.go b/apis/cosmosdb/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..a1db2606f --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,107 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CassandraClusterList. +func (l *CassandraClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CassandraKeySpaceList. +func (l *CassandraKeySpaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this CassandraTableList. +func (l *CassandraTableList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this GremlinDatabaseList. +func (l *GremlinDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this GremlinGraphList. +func (l *GremlinGraphList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MongoCollectionList. +func (l *MongoCollectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MongoDatabaseList. +func (l *MongoDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SQLContainerList. +func (l *SQLContainerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SQLDatabaseList. +func (l *SQLDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TableList. +func (l *TableList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/cosmosdb/v1beta2/zz_generated.resolvers.go b/apis/cosmosdb/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..21c4a4f22 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,651 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Account. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Account) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Restore != nil { + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Restore.SourceCosmosDBAccountID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Restore.SourceCosmosDBAccountIDRef, + Selector: mg.Spec.ForProvider.Restore.SourceCosmosDBAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Restore.SourceCosmosDBAccountID") + } + mg.Spec.ForProvider.Restore.SourceCosmosDBAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Restore.SourceCosmosDBAccountIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Restore != nil { + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Restore.SourceCosmosDBAccountID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Restore.SourceCosmosDBAccountIDRef, + Selector: mg.Spec.InitProvider.Restore.SourceCosmosDBAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Restore.SourceCosmosDBAccountID") + } + mg.Spec.InitProvider.Restore.SourceCosmosDBAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Restore.SourceCosmosDBAccountIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this CassandraCluster. +func (mg *CassandraCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DelegatedManagementSubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DelegatedManagementSubnetIDRef, + Selector: mg.Spec.ForProvider.DelegatedManagementSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DelegatedManagementSubnetID") + } + mg.Spec.ForProvider.DelegatedManagementSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DelegatedManagementSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DelegatedManagementSubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DelegatedManagementSubnetIDRef, + Selector: mg.Spec.InitProvider.DelegatedManagementSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DelegatedManagementSubnetID") + } + mg.Spec.InitProvider.DelegatedManagementSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DelegatedManagementSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this CassandraKeySpace. +func (mg *CassandraKeySpace) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this CassandraTable. +func (mg *CassandraTable) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "CassandraKeySpace", "CassandraKeySpaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CassandraKeySpaceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CassandraKeySpaceIDRef, + Selector: mg.Spec.ForProvider.CassandraKeySpaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CassandraKeySpaceID") + } + mg.Spec.ForProvider.CassandraKeySpaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CassandraKeySpaceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this GremlinDatabase. +func (mg *GremlinDatabase) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this GremlinGraph. +func (mg *GremlinGraph) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "GremlinDatabase", "GremlinDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DatabaseNameRef, + Selector: mg.Spec.ForProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseName") + } + mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MongoCollection. +func (mg *MongoCollection) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "MongoDatabase", "MongoDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DatabaseNameRef, + Selector: mg.Spec.ForProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseName") + } + mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MongoDatabase. +func (mg *MongoDatabase) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SQLContainer. +func (mg *SQLContainer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "SQLDatabase", "SQLDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DatabaseNameRef, + Selector: mg.Spec.ForProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseName") + } + mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SQLDatabase. +func (mg *SQLDatabase) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Table. +func (mg *Table) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/cosmosdb/v1beta2/zz_gremlindatabase_terraformed.go b/apis/cosmosdb/v1beta2/zz_gremlindatabase_terraformed.go new file mode 100755 index 000000000..26508e9fa --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_gremlindatabase_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GremlinDatabase +func (mg *GremlinDatabase) GetTerraformResourceType() string { + return "azurerm_cosmosdb_gremlin_database" +} + +// GetConnectionDetailsMapping for this GremlinDatabase +func (tr *GremlinDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GremlinDatabase +func (tr *GremlinDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GremlinDatabase +func (tr *GremlinDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GremlinDatabase +func (tr *GremlinDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GremlinDatabase +func (tr *GremlinDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GremlinDatabase +func (tr *GremlinDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GremlinDatabase +func (tr *GremlinDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GremlinDatabase +func (tr *GremlinDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GremlinDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GremlinDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &GremlinDatabaseParameters_2{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GremlinDatabase) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cosmosdb/v1beta2/zz_gremlindatabase_types.go b/apis/cosmosdb/v1beta2/zz_gremlindatabase_types.go new file mode 100755 index 000000000..1e8da6fb4 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_gremlindatabase_types.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GremlinDatabaseAutoscaleSettingsInitParameters struct { + + // The maximum throughput of the Gremlin database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type GremlinDatabaseAutoscaleSettingsObservation struct { + + // The maximum throughput of the Gremlin database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type GremlinDatabaseAutoscaleSettingsParameters struct { + + // The maximum throughput of the Gremlin database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type GremlinDatabaseInitParameters_2 struct { + + // An autoscale_settings block as defined below. + AutoscaleSettings *GremlinDatabaseAutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The throughput of the Gremlin database (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type GremlinDatabaseObservation_2 struct { + + // The name of the CosmosDB Account to create the Gremlin Database within. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *GremlinDatabaseAutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The ID of the CosmosDB Gremlin Database. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the resource group in which the Cosmos DB Gremlin Database is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The throughput of the Gremlin database (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type GremlinDatabaseParameters_2 struct { + + // The name of the CosmosDB Account to create the Gremlin Database within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // An autoscale_settings block as defined below. + // +kubebuilder:validation:Optional + AutoscaleSettings *GremlinDatabaseAutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The name of the resource group in which the Cosmos DB Gremlin Database is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The throughput of the Gremlin database (RU/s). Must be set in increments of 100. The minimum value is 400. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +// GremlinDatabaseSpec defines the desired state of GremlinDatabase +type GremlinDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GremlinDatabaseParameters_2 `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GremlinDatabaseInitParameters_2 `json:"initProvider,omitempty"` +} + +// GremlinDatabaseStatus defines the observed state of GremlinDatabase. +type GremlinDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GremlinDatabaseObservation_2 `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GremlinDatabase is the Schema for the GremlinDatabases API. Manages a Gremlin Database within a Cosmos DB Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type GremlinDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec GremlinDatabaseSpec `json:"spec"` + Status GremlinDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GremlinDatabaseList contains a list of GremlinDatabases +type GremlinDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GremlinDatabase `json:"items"` +} + +// Repository type metadata. +var ( + GremlinDatabase_Kind = "GremlinDatabase" + GremlinDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GremlinDatabase_Kind}.String() + GremlinDatabase_KindAPIVersion = GremlinDatabase_Kind + "." + CRDGroupVersion.String() + GremlinDatabase_GroupVersionKind = CRDGroupVersion.WithKind(GremlinDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&GremlinDatabase{}, &GremlinDatabaseList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_gremlingraph_terraformed.go b/apis/cosmosdb/v1beta2/zz_gremlingraph_terraformed.go new file mode 100755 index 000000000..bf406a730 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_gremlingraph_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GremlinGraph +func (mg *GremlinGraph) GetTerraformResourceType() string { + return "azurerm_cosmosdb_gremlin_graph" +} + +// GetConnectionDetailsMapping for this GremlinGraph +func (tr *GremlinGraph) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GremlinGraph +func (tr *GremlinGraph) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GremlinGraph +func (tr *GremlinGraph) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GremlinGraph +func (tr *GremlinGraph) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GremlinGraph +func (tr *GremlinGraph) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GremlinGraph +func (tr *GremlinGraph) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GremlinGraph +func (tr *GremlinGraph) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GremlinGraph +func (tr *GremlinGraph) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GremlinGraph using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GremlinGraph) LateInitialize(attrs []byte) (bool, error) { + params := &GremlinGraphParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GremlinGraph) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cosmosdb/v1beta2/zz_gremlingraph_types.go b/apis/cosmosdb/v1beta2/zz_gremlingraph_types.go new file mode 100755 index 000000000..c0460bdd7 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_gremlingraph_types.go @@ -0,0 +1,450 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CompositeIndexInitParameters struct { + + // One or more index blocks as defined below. + Index []IndexInitParameters `json:"index,omitempty" tf:"index,omitempty"` +} + +type CompositeIndexObservation struct { + + // One or more index blocks as defined below. + Index []IndexObservation `json:"index,omitempty" tf:"index,omitempty"` +} + +type CompositeIndexParameters struct { + + // One or more index blocks as defined below. + // +kubebuilder:validation:Optional + Index []IndexParameters `json:"index" tf:"index,omitempty"` +} + +type ConflictResolutionPolicyInitParameters struct { + + // The conflict resolution path in the case of LastWriterWins mode. + ConflictResolutionPath *string `json:"conflictResolutionPath,omitempty" tf:"conflict_resolution_path,omitempty"` + + // The procedure to resolve conflicts in the case of custom mode. + ConflictResolutionProcedure *string `json:"conflictResolutionProcedure,omitempty" tf:"conflict_resolution_procedure,omitempty"` + + // Indicates the conflict resolution mode. Possible values include: LastWriterWins, Custom. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type ConflictResolutionPolicyObservation struct { + + // The conflict resolution path in the case of LastWriterWins mode. + ConflictResolutionPath *string `json:"conflictResolutionPath,omitempty" tf:"conflict_resolution_path,omitempty"` + + // The procedure to resolve conflicts in the case of custom mode. + ConflictResolutionProcedure *string `json:"conflictResolutionProcedure,omitempty" tf:"conflict_resolution_procedure,omitempty"` + + // Indicates the conflict resolution mode. Possible values include: LastWriterWins, Custom. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type ConflictResolutionPolicyParameters struct { + + // The conflict resolution path in the case of LastWriterWins mode. + // +kubebuilder:validation:Optional + ConflictResolutionPath *string `json:"conflictResolutionPath,omitempty" tf:"conflict_resolution_path,omitempty"` + + // The procedure to resolve conflicts in the case of custom mode. + // +kubebuilder:validation:Optional + ConflictResolutionProcedure *string `json:"conflictResolutionProcedure,omitempty" tf:"conflict_resolution_procedure,omitempty"` + + // Indicates the conflict resolution mode. Possible values include: LastWriterWins, Custom. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` +} + +type GremlinGraphAutoscaleSettingsInitParameters struct { + + // The maximum throughput of the Gremlin graph (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type GremlinGraphAutoscaleSettingsObservation struct { + + // The maximum throughput of the Gremlin graph (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type GremlinGraphAutoscaleSettingsParameters struct { + + // The maximum throughput of the Gremlin graph (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type GremlinGraphInitParameters struct { + + // The time to live of Analytical Storage for this Cosmos DB Gremlin Graph. Possible values are between -1 to 2147483647 not including 0. If present and the value is set to -1, it means never expire. + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. Requires partition_key_path to be set. + AutoscaleSettings *GremlinGraphAutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // A conflict_resolution_policy blocks as defined below. Changing this forces a new resource to be created. + ConflictResolutionPolicy *ConflictResolutionPolicyInitParameters `json:"conflictResolutionPolicy,omitempty" tf:"conflict_resolution_policy,omitempty"` + + // The default time to live (TTL) of the Gremlin graph. If the value is missing or set to "-1", items don’t expire. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // The configuration of the indexing policy. One or more index_policy blocks as defined below. + IndexPolicy *IndexPolicyInitParameters `json:"indexPolicy,omitempty" tf:"index_policy,omitempty"` + + // Define a partition key. Changing this forces a new resource to be created. + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` + + // Define a partition key version. Changing this forces a new resource to be created. Possible values are 1and 2. This should be set to 2 in order to use large partition keys. + PartitionKeyVersion *float64 `json:"partitionKeyVersion,omitempty" tf:"partition_key_version,omitempty"` + + // The throughput of the Gremlin graph (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // One or more unique_key blocks as defined below. Changing this forces a new resource to be created. + UniqueKey []UniqueKeyInitParameters `json:"uniqueKey,omitempty" tf:"unique_key,omitempty"` +} + +type GremlinGraphObservation struct { + + // The name of the CosmosDB Account to create the Gremlin Graph within. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The time to live of Analytical Storage for this Cosmos DB Gremlin Graph. Possible values are between -1 to 2147483647 not including 0. If present and the value is set to -1, it means never expire. + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. Requires partition_key_path to be set. + AutoscaleSettings *GremlinGraphAutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // A conflict_resolution_policy blocks as defined below. Changing this forces a new resource to be created. + ConflictResolutionPolicy *ConflictResolutionPolicyObservation `json:"conflictResolutionPolicy,omitempty" tf:"conflict_resolution_policy,omitempty"` + + // The name of the Cosmos DB Graph Database in which the Cosmos DB Gremlin Graph is created. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The default time to live (TTL) of the Gremlin graph. If the value is missing or set to "-1", items don’t expire. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // The ID of the CosmosDB Gremlin Graph. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The configuration of the indexing policy. One or more index_policy blocks as defined below. + IndexPolicy *IndexPolicyObservation `json:"indexPolicy,omitempty" tf:"index_policy,omitempty"` + + // Define a partition key. Changing this forces a new resource to be created. + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` + + // Define a partition key version. Changing this forces a new resource to be created. Possible values are 1and 2. This should be set to 2 in order to use large partition keys. + PartitionKeyVersion *float64 `json:"partitionKeyVersion,omitempty" tf:"partition_key_version,omitempty"` + + // The name of the resource group in which the Cosmos DB Gremlin Graph is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The throughput of the Gremlin graph (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // One or more unique_key blocks as defined below. Changing this forces a new resource to be created. + UniqueKey []UniqueKeyObservation `json:"uniqueKey,omitempty" tf:"unique_key,omitempty"` +} + +type GremlinGraphParameters struct { + + // The name of the CosmosDB Account to create the Gremlin Graph within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // The time to live of Analytical Storage for this Cosmos DB Gremlin Graph. Possible values are between -1 to 2147483647 not including 0. If present and the value is set to -1, it means never expire. + // +kubebuilder:validation:Optional + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. Requires partition_key_path to be set. + // +kubebuilder:validation:Optional + AutoscaleSettings *GremlinGraphAutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // A conflict_resolution_policy blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConflictResolutionPolicy *ConflictResolutionPolicyParameters `json:"conflictResolutionPolicy,omitempty" tf:"conflict_resolution_policy,omitempty"` + + // The name of the Cosmos DB Graph Database in which the Cosmos DB Gremlin Graph is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.GremlinDatabase + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a GremlinDatabase in cosmosdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a GremlinDatabase in cosmosdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // The default time to live (TTL) of the Gremlin graph. If the value is missing or set to "-1", items don’t expire. + // +kubebuilder:validation:Optional + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // The configuration of the indexing policy. One or more index_policy blocks as defined below. + // +kubebuilder:validation:Optional + IndexPolicy *IndexPolicyParameters `json:"indexPolicy,omitempty" tf:"index_policy,omitempty"` + + // Define a partition key. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` + + // Define a partition key version. Changing this forces a new resource to be created. Possible values are 1and 2. This should be set to 2 in order to use large partition keys. + // +kubebuilder:validation:Optional + PartitionKeyVersion *float64 `json:"partitionKeyVersion,omitempty" tf:"partition_key_version,omitempty"` + + // The name of the resource group in which the Cosmos DB Gremlin Graph is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The throughput of the Gremlin graph (RU/s). Must be set in increments of 100. The minimum value is 400. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // One or more unique_key blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UniqueKey []UniqueKeyParameters `json:"uniqueKey,omitempty" tf:"unique_key,omitempty"` +} + +type IndexInitParameters struct { + + // Order of the index. Possible values are Ascending or Descending. + Order *string `json:"order,omitempty" tf:"order,omitempty"` + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type IndexObservation struct { + + // Order of the index. Possible values are Ascending or Descending. + Order *string `json:"order,omitempty" tf:"order,omitempty"` + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type IndexParameters struct { + + // Order of the index. Possible values are Ascending or Descending. + // +kubebuilder:validation:Optional + Order *string `json:"order" tf:"order,omitempty"` + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type IndexPolicyInitParameters struct { + + // Indicates if the indexing policy is automatic. Defaults to true. + Automatic *bool `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // One or more composite_index blocks as defined below. + CompositeIndex []CompositeIndexInitParameters `json:"compositeIndex,omitempty" tf:"composite_index,omitempty"` + + // List of paths to exclude from indexing. Required if indexing_mode is Consistent or Lazy. + // +listType=set + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // List of paths to include in the indexing. Required if indexing_mode is Consistent or Lazy. + // +listType=set + IncludedPaths []*string `json:"includedPaths,omitempty" tf:"included_paths,omitempty"` + + // Indicates the indexing mode. Possible values include: Consistent, Lazy, None. + IndexingMode *string `json:"indexingMode,omitempty" tf:"indexing_mode,omitempty"` + + // One or more spatial_index blocks as defined below. + SpatialIndex []SpatialIndexInitParameters `json:"spatialIndex,omitempty" tf:"spatial_index,omitempty"` +} + +type IndexPolicyObservation struct { + + // Indicates if the indexing policy is automatic. Defaults to true. + Automatic *bool `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // One or more composite_index blocks as defined below. + CompositeIndex []CompositeIndexObservation `json:"compositeIndex,omitempty" tf:"composite_index,omitempty"` + + // List of paths to exclude from indexing. Required if indexing_mode is Consistent or Lazy. + // +listType=set + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // List of paths to include in the indexing. Required if indexing_mode is Consistent or Lazy. + // +listType=set + IncludedPaths []*string `json:"includedPaths,omitempty" tf:"included_paths,omitempty"` + + // Indicates the indexing mode. Possible values include: Consistent, Lazy, None. + IndexingMode *string `json:"indexingMode,omitempty" tf:"indexing_mode,omitempty"` + + // One or more spatial_index blocks as defined below. + SpatialIndex []SpatialIndexObservation `json:"spatialIndex,omitempty" tf:"spatial_index,omitempty"` +} + +type IndexPolicyParameters struct { + + // Indicates if the indexing policy is automatic. Defaults to true. + // +kubebuilder:validation:Optional + Automatic *bool `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // One or more composite_index blocks as defined below. + // +kubebuilder:validation:Optional + CompositeIndex []CompositeIndexParameters `json:"compositeIndex,omitempty" tf:"composite_index,omitempty"` + + // List of paths to exclude from indexing. Required if indexing_mode is Consistent or Lazy. + // +kubebuilder:validation:Optional + // +listType=set + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // List of paths to include in the indexing. Required if indexing_mode is Consistent or Lazy. + // +kubebuilder:validation:Optional + // +listType=set + IncludedPaths []*string `json:"includedPaths,omitempty" tf:"included_paths,omitempty"` + + // Indicates the indexing mode. Possible values include: Consistent, Lazy, None. + // +kubebuilder:validation:Optional + IndexingMode *string `json:"indexingMode" tf:"indexing_mode,omitempty"` + + // One or more spatial_index blocks as defined below. + // +kubebuilder:validation:Optional + SpatialIndex []SpatialIndexParameters `json:"spatialIndex,omitempty" tf:"spatial_index,omitempty"` +} + +type SpatialIndexInitParameters struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type SpatialIndexObservation struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // +listType=set + Types []*string `json:"types,omitempty" tf:"types,omitempty"` +} + +type SpatialIndexParameters struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type UniqueKeyInitParameters struct { + + // A list of paths to use for this unique key. Changing this forces a new resource to be created. + // +listType=set + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` +} + +type UniqueKeyObservation struct { + + // A list of paths to use for this unique key. Changing this forces a new resource to be created. + // +listType=set + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` +} + +type UniqueKeyParameters struct { + + // A list of paths to use for this unique key. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + Paths []*string `json:"paths" tf:"paths,omitempty"` +} + +// GremlinGraphSpec defines the desired state of GremlinGraph +type GremlinGraphSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GremlinGraphParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GremlinGraphInitParameters `json:"initProvider,omitempty"` +} + +// GremlinGraphStatus defines the observed state of GremlinGraph. +type GremlinGraphStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GremlinGraphObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GremlinGraph is the Schema for the GremlinGraphs API. Manages a Gremlin Graph within a Cosmos DB Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type GremlinGraph struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.partitionKeyPath) || (has(self.initProvider) && has(self.initProvider.partitionKeyPath))",message="spec.forProvider.partitionKeyPath is a required parameter" + Spec GremlinGraphSpec `json:"spec"` + Status GremlinGraphStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GremlinGraphList contains a list of GremlinGraphs +type GremlinGraphList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GremlinGraph `json:"items"` +} + +// Repository type metadata. +var ( + GremlinGraph_Kind = "GremlinGraph" + GremlinGraph_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GremlinGraph_Kind}.String() + GremlinGraph_KindAPIVersion = GremlinGraph_Kind + "." + CRDGroupVersion.String() + GremlinGraph_GroupVersionKind = CRDGroupVersion.WithKind(GremlinGraph_Kind) +) + +func init() { + SchemeBuilder.Register(&GremlinGraph{}, &GremlinGraphList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_groupversion_info.go b/apis/cosmosdb/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..d8e3214cd --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=cosmosdb.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "cosmosdb.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/cosmosdb/v1beta2/zz_mongocollection_terraformed.go b/apis/cosmosdb/v1beta2/zz_mongocollection_terraformed.go new file mode 100755 index 000000000..cf9072755 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_mongocollection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MongoCollection +func (mg *MongoCollection) GetTerraformResourceType() string { + return "azurerm_cosmosdb_mongo_collection" +} + +// GetConnectionDetailsMapping for this MongoCollection +func (tr *MongoCollection) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MongoCollection +func (tr *MongoCollection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MongoCollection +func (tr *MongoCollection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MongoCollection +func (tr *MongoCollection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MongoCollection +func (tr *MongoCollection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MongoCollection +func (tr *MongoCollection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MongoCollection +func (tr *MongoCollection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MongoCollection +func (tr *MongoCollection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MongoCollection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MongoCollection) LateInitialize(attrs []byte) (bool, error) { + params := &MongoCollectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MongoCollection) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cosmosdb/v1beta2/zz_mongocollection_types.go b/apis/cosmosdb/v1beta2/zz_mongocollection_types.go new file mode 100755 index 000000000..3aeda099c --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_mongocollection_types.go @@ -0,0 +1,260 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MongoCollectionAutoscaleSettingsInitParameters struct { + + // The maximum throughput of the MongoDB collection (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type MongoCollectionAutoscaleSettingsObservation struct { + + // The maximum throughput of the MongoDB collection (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type MongoCollectionAutoscaleSettingsParameters struct { + + // The maximum throughput of the MongoDB collection (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type MongoCollectionIndexInitParameters struct { + + // Specifies the list of user settable keys for each Cosmos DB Mongo Collection. + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` + + // Is the index unique or not? Defaults to false. + Unique *bool `json:"unique,omitempty" tf:"unique,omitempty"` +} + +type MongoCollectionIndexObservation struct { + + // Specifies the list of user settable keys for each Cosmos DB Mongo Collection. + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` + + // Is the index unique or not? Defaults to false. + Unique *bool `json:"unique,omitempty" tf:"unique,omitempty"` +} + +type MongoCollectionIndexParameters struct { + + // Specifies the list of user settable keys for each Cosmos DB Mongo Collection. + // +kubebuilder:validation:Optional + Keys []*string `json:"keys" tf:"keys,omitempty"` + + // Is the index unique or not? Defaults to false. + // +kubebuilder:validation:Optional + Unique *bool `json:"unique,omitempty" tf:"unique,omitempty"` +} + +type MongoCollectionInitParameters struct { + + // The default time to live of Analytical Storage for this Mongo Collection. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *MongoCollectionAutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The default Time To Live in seconds. If the value is -1, items are not automatically expired. + DefaultTTLSeconds *float64 `json:"defaultTtlSeconds,omitempty" tf:"default_ttl_seconds,omitempty"` + + // One or more index blocks as defined below. + Index []MongoCollectionIndexInitParameters `json:"index,omitempty" tf:"index,omitempty"` + + // The name of the key to partition on for sharding. There must not be any other unique index keys. Changing this forces a new resource to be created. + ShardKey *string `json:"shardKey,omitempty" tf:"shard_key,omitempty"` + + // The throughput of the MongoDB collection (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type MongoCollectionObservation struct { + + // The name of the Cosmos DB Account in which the Cosmos DB Mongo Collection is created. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The default time to live of Analytical Storage for this Mongo Collection. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *MongoCollectionAutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The name of the Cosmos DB Mongo Database in which the Cosmos DB Mongo Collection is created. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The default Time To Live in seconds. If the value is -1, items are not automatically expired. + DefaultTTLSeconds *float64 `json:"defaultTtlSeconds,omitempty" tf:"default_ttl_seconds,omitempty"` + + // The ID of the Cosmos DB Mongo Collection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more index blocks as defined below. + Index []MongoCollectionIndexObservation `json:"index,omitempty" tf:"index,omitempty"` + + // The name of the resource group in which the Cosmos DB Mongo Collection is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The name of the key to partition on for sharding. There must not be any other unique index keys. Changing this forces a new resource to be created. + ShardKey *string `json:"shardKey,omitempty" tf:"shard_key,omitempty"` + + // One or more system_indexes blocks as defined below. + SystemIndexes []SystemIndexesObservation `json:"systemIndexes,omitempty" tf:"system_indexes,omitempty"` + + // The throughput of the MongoDB collection (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type MongoCollectionParameters struct { + + // The name of the Cosmos DB Account in which the Cosmos DB Mongo Collection is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // The default time to live of Analytical Storage for this Mongo Collection. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + // +kubebuilder:validation:Optional + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. + // +kubebuilder:validation:Optional + AutoscaleSettings *MongoCollectionAutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The name of the Cosmos DB Mongo Database in which the Cosmos DB Mongo Collection is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.MongoDatabase + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a MongoDatabase in cosmosdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a MongoDatabase in cosmosdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // The default Time To Live in seconds. If the value is -1, items are not automatically expired. + // +kubebuilder:validation:Optional + DefaultTTLSeconds *float64 `json:"defaultTtlSeconds,omitempty" tf:"default_ttl_seconds,omitempty"` + + // One or more index blocks as defined below. + // +kubebuilder:validation:Optional + Index []MongoCollectionIndexParameters `json:"index,omitempty" tf:"index,omitempty"` + + // The name of the resource group in which the Cosmos DB Mongo Collection is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The name of the key to partition on for sharding. There must not be any other unique index keys. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ShardKey *string `json:"shardKey,omitempty" tf:"shard_key,omitempty"` + + // The throughput of the MongoDB collection (RU/s). Must be set in increments of 100. The minimum value is 400. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type SystemIndexesInitParameters struct { +} + +type SystemIndexesObservation struct { + + // The list of system keys which are not settable for each Cosmos DB Mongo Collection. + Keys []*string `json:"keys,omitempty" tf:"keys,omitempty"` + + // Identifies whether the table contains no duplicate values. + Unique *bool `json:"unique,omitempty" tf:"unique,omitempty"` +} + +type SystemIndexesParameters struct { +} + +// MongoCollectionSpec defines the desired state of MongoCollection +type MongoCollectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MongoCollectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MongoCollectionInitParameters `json:"initProvider,omitempty"` +} + +// MongoCollectionStatus defines the observed state of MongoCollection. +type MongoCollectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MongoCollectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MongoCollection is the Schema for the MongoCollections API. Manages a Mongo Collection within a Cosmos DB Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MongoCollection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MongoCollectionSpec `json:"spec"` + Status MongoCollectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MongoCollectionList contains a list of MongoCollections +type MongoCollectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MongoCollection `json:"items"` +} + +// Repository type metadata. +var ( + MongoCollection_Kind = "MongoCollection" + MongoCollection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MongoCollection_Kind}.String() + MongoCollection_KindAPIVersion = MongoCollection_Kind + "." + CRDGroupVersion.String() + MongoCollection_GroupVersionKind = CRDGroupVersion.WithKind(MongoCollection_Kind) +) + +func init() { + SchemeBuilder.Register(&MongoCollection{}, &MongoCollectionList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_mongodatabase_terraformed.go b/apis/cosmosdb/v1beta2/zz_mongodatabase_terraformed.go new file mode 100755 index 000000000..655eafb3c --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_mongodatabase_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MongoDatabase +func (mg *MongoDatabase) GetTerraformResourceType() string { + return "azurerm_cosmosdb_mongo_database" +} + +// GetConnectionDetailsMapping for this MongoDatabase +func (tr *MongoDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MongoDatabase +func (tr *MongoDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MongoDatabase +func (tr *MongoDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MongoDatabase +func (tr *MongoDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MongoDatabase +func (tr *MongoDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MongoDatabase +func (tr *MongoDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MongoDatabase +func (tr *MongoDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MongoDatabase +func (tr *MongoDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MongoDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MongoDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &MongoDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MongoDatabase) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cosmosdb/v1beta2/zz_mongodatabase_types.go b/apis/cosmosdb/v1beta2/zz_mongodatabase_types.go new file mode 100755 index 000000000..167eaccb3 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_mongodatabase_types.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MongoDatabaseAutoscaleSettingsInitParameters struct { + + // The maximum throughput of the MongoDB database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type MongoDatabaseAutoscaleSettingsObservation struct { + + // The maximum throughput of the MongoDB database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type MongoDatabaseAutoscaleSettingsParameters struct { + + // The maximum throughput of the MongoDB database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type MongoDatabaseInitParameters struct { + + // An autoscale_settings block as defined below. + AutoscaleSettings *MongoDatabaseAutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The throughput of the MongoDB database (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type MongoDatabaseObservation struct { + + // The name of the Cosmos DB Mongo Database to create the table within. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *MongoDatabaseAutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The ID of the Cosmos DB Mongo Database. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the resource group in which the Cosmos DB Mongo Database is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The throughput of the MongoDB database (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type MongoDatabaseParameters struct { + + // The name of the Cosmos DB Mongo Database to create the table within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // An autoscale_settings block as defined below. + // +kubebuilder:validation:Optional + AutoscaleSettings *MongoDatabaseAutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The name of the resource group in which the Cosmos DB Mongo Database is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The throughput of the MongoDB database (RU/s). Must be set in increments of 100. The minimum value is 400. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +// MongoDatabaseSpec defines the desired state of MongoDatabase +type MongoDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MongoDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MongoDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// MongoDatabaseStatus defines the observed state of MongoDatabase. +type MongoDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MongoDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MongoDatabase is the Schema for the MongoDatabases API. Manages a Mongo Database within a Cosmos DB Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MongoDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MongoDatabaseSpec `json:"spec"` + Status MongoDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MongoDatabaseList contains a list of MongoDatabases +type MongoDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MongoDatabase `json:"items"` +} + +// Repository type metadata. +var ( + MongoDatabase_Kind = "MongoDatabase" + MongoDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MongoDatabase_Kind}.String() + MongoDatabase_KindAPIVersion = MongoDatabase_Kind + "." + CRDGroupVersion.String() + MongoDatabase_GroupVersionKind = CRDGroupVersion.WithKind(MongoDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&MongoDatabase{}, &MongoDatabaseList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_sqlcontainer_terraformed.go b/apis/cosmosdb/v1beta2/zz_sqlcontainer_terraformed.go new file mode 100755 index 000000000..470762a42 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_sqlcontainer_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SQLContainer +func (mg *SQLContainer) GetTerraformResourceType() string { + return "azurerm_cosmosdb_sql_container" +} + +// GetConnectionDetailsMapping for this SQLContainer +func (tr *SQLContainer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SQLContainer +func (tr *SQLContainer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SQLContainer +func (tr *SQLContainer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SQLContainer +func (tr *SQLContainer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SQLContainer +func (tr *SQLContainer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SQLContainer +func (tr *SQLContainer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SQLContainer +func (tr *SQLContainer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SQLContainer +func (tr *SQLContainer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SQLContainer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SQLContainer) LateInitialize(attrs []byte) (bool, error) { + params := &SQLContainerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SQLContainer) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cosmosdb/v1beta2/zz_sqlcontainer_types.go b/apis/cosmosdb/v1beta2/zz_sqlcontainer_types.go new file mode 100755 index 000000000..6465efd59 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_sqlcontainer_types.go @@ -0,0 +1,473 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CompositeIndexIndexInitParameters struct { + + // Order of the index. Possible values are Ascending or Descending. + Order *string `json:"order,omitempty" tf:"order,omitempty"` + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type CompositeIndexIndexObservation struct { + + // Order of the index. Possible values are Ascending or Descending. + Order *string `json:"order,omitempty" tf:"order,omitempty"` + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type CompositeIndexIndexParameters struct { + + // Order of the index. Possible values are Ascending or Descending. + // +kubebuilder:validation:Optional + Order *string `json:"order" tf:"order,omitempty"` + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type ExcludedPathInitParameters struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type ExcludedPathObservation struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type ExcludedPathParameters struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type IncludedPathInitParameters struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type IncludedPathObservation struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type IncludedPathParameters struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type IndexingPolicyCompositeIndexInitParameters struct { + + // One or more index blocks as defined below. + Index []CompositeIndexIndexInitParameters `json:"index,omitempty" tf:"index,omitempty"` +} + +type IndexingPolicyCompositeIndexObservation struct { + + // One or more index blocks as defined below. + Index []CompositeIndexIndexObservation `json:"index,omitempty" tf:"index,omitempty"` +} + +type IndexingPolicyCompositeIndexParameters struct { + + // One or more index blocks as defined below. + // +kubebuilder:validation:Optional + Index []CompositeIndexIndexParameters `json:"index" tf:"index,omitempty"` +} + +type IndexingPolicyInitParameters struct { + + // One or more composite_index blocks as defined below. + CompositeIndex []IndexingPolicyCompositeIndexInitParameters `json:"compositeIndex,omitempty" tf:"composite_index,omitempty"` + + // One or more excluded_path blocks as defined below. Either included_path or excluded_path must contain the path /* + ExcludedPath []ExcludedPathInitParameters `json:"excludedPath,omitempty" tf:"excluded_path,omitempty"` + + // One or more included_path blocks as defined below. Either included_path or excluded_path must contain the path /* + IncludedPath []IncludedPathInitParameters `json:"includedPath,omitempty" tf:"included_path,omitempty"` + + // Indicates the indexing mode. Possible values include: consistent and none. Defaults to consistent. + IndexingMode *string `json:"indexingMode,omitempty" tf:"indexing_mode,omitempty"` + + // One or more spatial_index blocks as defined below. + SpatialIndex []IndexingPolicySpatialIndexInitParameters `json:"spatialIndex,omitempty" tf:"spatial_index,omitempty"` +} + +type IndexingPolicyObservation struct { + + // One or more composite_index blocks as defined below. + CompositeIndex []IndexingPolicyCompositeIndexObservation `json:"compositeIndex,omitempty" tf:"composite_index,omitempty"` + + // One or more excluded_path blocks as defined below. Either included_path or excluded_path must contain the path /* + ExcludedPath []ExcludedPathObservation `json:"excludedPath,omitempty" tf:"excluded_path,omitempty"` + + // One or more included_path blocks as defined below. Either included_path or excluded_path must contain the path /* + IncludedPath []IncludedPathObservation `json:"includedPath,omitempty" tf:"included_path,omitempty"` + + // Indicates the indexing mode. Possible values include: consistent and none. Defaults to consistent. + IndexingMode *string `json:"indexingMode,omitempty" tf:"indexing_mode,omitempty"` + + // One or more spatial_index blocks as defined below. + SpatialIndex []IndexingPolicySpatialIndexObservation `json:"spatialIndex,omitempty" tf:"spatial_index,omitempty"` +} + +type IndexingPolicyParameters struct { + + // One or more composite_index blocks as defined below. + // +kubebuilder:validation:Optional + CompositeIndex []IndexingPolicyCompositeIndexParameters `json:"compositeIndex,omitempty" tf:"composite_index,omitempty"` + + // One or more excluded_path blocks as defined below. Either included_path or excluded_path must contain the path /* + // +kubebuilder:validation:Optional + ExcludedPath []ExcludedPathParameters `json:"excludedPath,omitempty" tf:"excluded_path,omitempty"` + + // One or more included_path blocks as defined below. Either included_path or excluded_path must contain the path /* + // +kubebuilder:validation:Optional + IncludedPath []IncludedPathParameters `json:"includedPath,omitempty" tf:"included_path,omitempty"` + + // Indicates the indexing mode. Possible values include: consistent and none. Defaults to consistent. + // +kubebuilder:validation:Optional + IndexingMode *string `json:"indexingMode,omitempty" tf:"indexing_mode,omitempty"` + + // One or more spatial_index blocks as defined below. + // +kubebuilder:validation:Optional + SpatialIndex []IndexingPolicySpatialIndexParameters `json:"spatialIndex,omitempty" tf:"spatial_index,omitempty"` +} + +type IndexingPolicySpatialIndexInitParameters struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type IndexingPolicySpatialIndexObservation struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // A set of spatial types of the path. + // +listType=set + Types []*string `json:"types,omitempty" tf:"types,omitempty"` +} + +type IndexingPolicySpatialIndexParameters struct { + + // Path for which the indexing behaviour applies to. According to the service design, all spatial types including LineString, MultiPolygon, Point, and Polygon will be applied to the path. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type SQLContainerAutoscaleSettingsInitParameters struct { + + // The maximum throughput of the SQL container (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type SQLContainerAutoscaleSettingsObservation struct { + + // The maximum throughput of the SQL container (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type SQLContainerAutoscaleSettingsParameters struct { + + // The maximum throughput of the SQL container (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type SQLContainerConflictResolutionPolicyInitParameters struct { + + // The conflict resolution path in the case of LastWriterWins mode. + ConflictResolutionPath *string `json:"conflictResolutionPath,omitempty" tf:"conflict_resolution_path,omitempty"` + + // The procedure to resolve conflicts in the case of Custom mode. + ConflictResolutionProcedure *string `json:"conflictResolutionProcedure,omitempty" tf:"conflict_resolution_procedure,omitempty"` + + // Indicates the conflict resolution mode. Possible values include: LastWriterWins, Custom. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type SQLContainerConflictResolutionPolicyObservation struct { + + // The conflict resolution path in the case of LastWriterWins mode. + ConflictResolutionPath *string `json:"conflictResolutionPath,omitempty" tf:"conflict_resolution_path,omitempty"` + + // The procedure to resolve conflicts in the case of Custom mode. + ConflictResolutionProcedure *string `json:"conflictResolutionProcedure,omitempty" tf:"conflict_resolution_procedure,omitempty"` + + // Indicates the conflict resolution mode. Possible values include: LastWriterWins, Custom. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type SQLContainerConflictResolutionPolicyParameters struct { + + // The conflict resolution path in the case of LastWriterWins mode. + // +kubebuilder:validation:Optional + ConflictResolutionPath *string `json:"conflictResolutionPath,omitempty" tf:"conflict_resolution_path,omitempty"` + + // The procedure to resolve conflicts in the case of Custom mode. + // +kubebuilder:validation:Optional + ConflictResolutionProcedure *string `json:"conflictResolutionProcedure,omitempty" tf:"conflict_resolution_procedure,omitempty"` + + // Indicates the conflict resolution mode. Possible values include: LastWriterWins, Custom. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` +} + +type SQLContainerInitParameters struct { + + // The default time to live of Analytical Storage for this SQL container. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. Requires partition_key_path to be set. + AutoscaleSettings *SQLContainerAutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // A conflict_resolution_policy blocks as defined below. Changing this forces a new resource to be created. + ConflictResolutionPolicy *SQLContainerConflictResolutionPolicyInitParameters `json:"conflictResolutionPolicy,omitempty" tf:"conflict_resolution_policy,omitempty"` + + // The default time to live of SQL container. If missing, items are not expired automatically. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // An indexing_policy block as defined below. + IndexingPolicy *IndexingPolicyInitParameters `json:"indexingPolicy,omitempty" tf:"indexing_policy,omitempty"` + + // Define a partition key. Changing this forces a new resource to be created. + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` + + // Define a partition key version. Changing this forces a new resource to be created. Possible values are 1and 2. This should be set to 2 in order to use large partition keys. + PartitionKeyVersion *float64 `json:"partitionKeyVersion,omitempty" tf:"partition_key_version,omitempty"` + + // The throughput of SQL container (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // One or more unique_key blocks as defined below. Changing this forces a new resource to be created. + UniqueKey []SQLContainerUniqueKeyInitParameters `json:"uniqueKey,omitempty" tf:"unique_key,omitempty"` +} + +type SQLContainerObservation struct { + + // The name of the Cosmos DB Account to create the container within. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The default time to live of Analytical Storage for this SQL container. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. Requires partition_key_path to be set. + AutoscaleSettings *SQLContainerAutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // A conflict_resolution_policy blocks as defined below. Changing this forces a new resource to be created. + ConflictResolutionPolicy *SQLContainerConflictResolutionPolicyObservation `json:"conflictResolutionPolicy,omitempty" tf:"conflict_resolution_policy,omitempty"` + + // The name of the Cosmos DB SQL Database to create the container within. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The default time to live of SQL container. If missing, items are not expired automatically. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // The ID of the CosmosDB SQL Container. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An indexing_policy block as defined below. + IndexingPolicy *IndexingPolicyObservation `json:"indexingPolicy,omitempty" tf:"indexing_policy,omitempty"` + + // Define a partition key. Changing this forces a new resource to be created. + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` + + // Define a partition key version. Changing this forces a new resource to be created. Possible values are 1and 2. This should be set to 2 in order to use large partition keys. + PartitionKeyVersion *float64 `json:"partitionKeyVersion,omitempty" tf:"partition_key_version,omitempty"` + + // The name of the resource group in which the Cosmos DB SQL Container is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The throughput of SQL container (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // One or more unique_key blocks as defined below. Changing this forces a new resource to be created. + UniqueKey []SQLContainerUniqueKeyObservation `json:"uniqueKey,omitempty" tf:"unique_key,omitempty"` +} + +type SQLContainerParameters struct { + + // The name of the Cosmos DB Account to create the container within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // The default time to live of Analytical Storage for this SQL container. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + // +kubebuilder:validation:Optional + AnalyticalStorageTTL *float64 `json:"analyticalStorageTtl,omitempty" tf:"analytical_storage_ttl,omitempty"` + + // An autoscale_settings block as defined below. Requires partition_key_path to be set. + // +kubebuilder:validation:Optional + AutoscaleSettings *SQLContainerAutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // A conflict_resolution_policy blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConflictResolutionPolicy *SQLContainerConflictResolutionPolicyParameters `json:"conflictResolutionPolicy,omitempty" tf:"conflict_resolution_policy,omitempty"` + + // The name of the Cosmos DB SQL Database to create the container within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.SQLDatabase + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a SQLDatabase in cosmosdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a SQLDatabase in cosmosdb to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // The default time to live of SQL container. If missing, items are not expired automatically. If present and the value is set to -1, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number n – items will expire n seconds after their last modified time. + // +kubebuilder:validation:Optional + DefaultTTL *float64 `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // An indexing_policy block as defined below. + // +kubebuilder:validation:Optional + IndexingPolicy *IndexingPolicyParameters `json:"indexingPolicy,omitempty" tf:"indexing_policy,omitempty"` + + // Define a partition key. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PartitionKeyPath *string `json:"partitionKeyPath,omitempty" tf:"partition_key_path,omitempty"` + + // Define a partition key version. Changing this forces a new resource to be created. Possible values are 1and 2. This should be set to 2 in order to use large partition keys. + // +kubebuilder:validation:Optional + PartitionKeyVersion *float64 `json:"partitionKeyVersion,omitempty" tf:"partition_key_version,omitempty"` + + // The name of the resource group in which the Cosmos DB SQL Container is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The throughput of SQL container (RU/s). Must be set in increments of 100. The minimum value is 400. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` + + // One or more unique_key blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + UniqueKey []SQLContainerUniqueKeyParameters `json:"uniqueKey,omitempty" tf:"unique_key,omitempty"` +} + +type SQLContainerUniqueKeyInitParameters struct { + + // A list of paths to use for this unique key. Changing this forces a new resource to be created. + // +listType=set + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` +} + +type SQLContainerUniqueKeyObservation struct { + + // A list of paths to use for this unique key. Changing this forces a new resource to be created. + // +listType=set + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` +} + +type SQLContainerUniqueKeyParameters struct { + + // A list of paths to use for this unique key. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + Paths []*string `json:"paths" tf:"paths,omitempty"` +} + +// SQLContainerSpec defines the desired state of SQLContainer +type SQLContainerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SQLContainerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SQLContainerInitParameters `json:"initProvider,omitempty"` +} + +// SQLContainerStatus defines the observed state of SQLContainer. +type SQLContainerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SQLContainerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SQLContainer is the Schema for the SQLContainers API. Manages a SQL Container within a Cosmos DB Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SQLContainer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.partitionKeyPath) || (has(self.initProvider) && has(self.initProvider.partitionKeyPath))",message="spec.forProvider.partitionKeyPath is a required parameter" + Spec SQLContainerSpec `json:"spec"` + Status SQLContainerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SQLContainerList contains a list of SQLContainers +type SQLContainerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SQLContainer `json:"items"` +} + +// Repository type metadata. +var ( + SQLContainer_Kind = "SQLContainer" + SQLContainer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SQLContainer_Kind}.String() + SQLContainer_KindAPIVersion = SQLContainer_Kind + "." + CRDGroupVersion.String() + SQLContainer_GroupVersionKind = CRDGroupVersion.WithKind(SQLContainer_Kind) +) + +func init() { + SchemeBuilder.Register(&SQLContainer{}, &SQLContainerList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_sqldatabase_terraformed.go b/apis/cosmosdb/v1beta2/zz_sqldatabase_terraformed.go new file mode 100755 index 000000000..4913b116e --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_sqldatabase_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SQLDatabase +func (mg *SQLDatabase) GetTerraformResourceType() string { + return "azurerm_cosmosdb_sql_database" +} + +// GetConnectionDetailsMapping for this SQLDatabase +func (tr *SQLDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SQLDatabase +func (tr *SQLDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SQLDatabase +func (tr *SQLDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SQLDatabase +func (tr *SQLDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SQLDatabase +func (tr *SQLDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SQLDatabase +func (tr *SQLDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SQLDatabase +func (tr *SQLDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SQLDatabase +func (tr *SQLDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SQLDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SQLDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &SQLDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SQLDatabase) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cosmosdb/v1beta2/zz_sqldatabase_types.go b/apis/cosmosdb/v1beta2/zz_sqldatabase_types.go new file mode 100755 index 000000000..285ebad87 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_sqldatabase_types.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SQLDatabaseAutoscaleSettingsInitParameters struct { + + // The maximum throughput of the SQL database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type SQLDatabaseAutoscaleSettingsObservation struct { + + // The maximum throughput of the SQL database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type SQLDatabaseAutoscaleSettingsParameters struct { + + // The maximum throughput of the SQL database (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type SQLDatabaseInitParameters struct { + + // An autoscale_settings block as defined below. + AutoscaleSettings *SQLDatabaseAutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The throughput of SQL database (RU/s). Must be set in increments of 100. The minimum value is 400. Do not set when azurerm_cosmosdb_account is configured with EnableServerless capability. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type SQLDatabaseObservation struct { + + // The name of the Cosmos DB SQL Database to create the table within. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *SQLDatabaseAutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The ID of the CosmosDB SQL Database. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the resource group in which the Cosmos DB SQL Database is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The throughput of SQL database (RU/s). Must be set in increments of 100. The minimum value is 400. Do not set when azurerm_cosmosdb_account is configured with EnableServerless capability. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type SQLDatabaseParameters struct { + + // The name of the Cosmos DB SQL Database to create the table within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // An autoscale_settings block as defined below. + // +kubebuilder:validation:Optional + AutoscaleSettings *SQLDatabaseAutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The name of the resource group in which the Cosmos DB SQL Database is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The throughput of SQL database (RU/s). Must be set in increments of 100. The minimum value is 400. Do not set when azurerm_cosmosdb_account is configured with EnableServerless capability. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +// SQLDatabaseSpec defines the desired state of SQLDatabase +type SQLDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SQLDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SQLDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// SQLDatabaseStatus defines the observed state of SQLDatabase. +type SQLDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SQLDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SQLDatabase is the Schema for the SQLDatabases API. Manages a SQL Database within a Cosmos DB Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SQLDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SQLDatabaseSpec `json:"spec"` + Status SQLDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SQLDatabaseList contains a list of SQLDatabases +type SQLDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SQLDatabase `json:"items"` +} + +// Repository type metadata. +var ( + SQLDatabase_Kind = "SQLDatabase" + SQLDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SQLDatabase_Kind}.String() + SQLDatabase_KindAPIVersion = SQLDatabase_Kind + "." + CRDGroupVersion.String() + SQLDatabase_GroupVersionKind = CRDGroupVersion.WithKind(SQLDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&SQLDatabase{}, &SQLDatabaseList{}) +} diff --git a/apis/cosmosdb/v1beta2/zz_table_terraformed.go b/apis/cosmosdb/v1beta2/zz_table_terraformed.go new file mode 100755 index 000000000..ca0740e75 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_table_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Table +func (mg *Table) GetTerraformResourceType() string { + return "azurerm_cosmosdb_table" +} + +// GetConnectionDetailsMapping for this Table +func (tr *Table) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Table +func (tr *Table) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Table +func (tr *Table) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Table +func (tr *Table) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Table +func (tr *Table) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Table +func (tr *Table) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Table +func (tr *Table) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Table +func (tr *Table) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Table using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Table) LateInitialize(attrs []byte) (bool, error) { + params := &TableParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Table) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/cosmosdb/v1beta2/zz_table_types.go b/apis/cosmosdb/v1beta2/zz_table_types.go new file mode 100755 index 000000000..8daeb7632 --- /dev/null +++ b/apis/cosmosdb/v1beta2/zz_table_types.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TableAutoscaleSettingsInitParameters struct { + + // The maximum throughput of the Table (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type TableAutoscaleSettingsObservation struct { + + // The maximum throughput of the Table (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type TableAutoscaleSettingsParameters struct { + + // The maximum throughput of the Table (RU/s). Must be between 1,000 and 1,000,000. Must be set in increments of 1,000. Conflicts with throughput. + // +kubebuilder:validation:Optional + MaxThroughput *float64 `json:"maxThroughput,omitempty" tf:"max_throughput,omitempty"` +} + +type TableInitParameters struct { + + // An autoscale_settings block as defined below. + AutoscaleSettings *TableAutoscaleSettingsInitParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The throughput of Table (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type TableObservation struct { + + // The name of the Cosmos DB Table to create the table within. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // An autoscale_settings block as defined below. + AutoscaleSettings *TableAutoscaleSettingsObservation `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The ID of the CosmosDB Table. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the resource group in which the Cosmos DB Table is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The throughput of Table (RU/s). Must be set in increments of 100. The minimum value is 400. + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +type TableParameters struct { + + // The name of the Cosmos DB Table to create the table within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in cosmosdb to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // An autoscale_settings block as defined below. + // +kubebuilder:validation:Optional + AutoscaleSettings *TableAutoscaleSettingsParameters `json:"autoscaleSettings,omitempty" tf:"autoscale_settings,omitempty"` + + // The name of the resource group in which the Cosmos DB Table is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The throughput of Table (RU/s). Must be set in increments of 100. The minimum value is 400. + // +kubebuilder:validation:Optional + Throughput *float64 `json:"throughput,omitempty" tf:"throughput,omitempty"` +} + +// TableSpec defines the desired state of Table +type TableSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TableParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TableInitParameters `json:"initProvider,omitempty"` +} + +// TableStatus defines the observed state of Table. +type TableStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TableObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Table is the Schema for the Tables API. Manages a Table within a Cosmos DB Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Table struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec TableSpec `json:"spec"` + Status TableStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TableList contains a list of Tables +type TableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Table `json:"items"` +} + +// Repository type metadata. +var ( + Table_Kind = "Table" + Table_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Table_Kind}.String() + Table_KindAPIVersion = Table_Kind + "." + CRDGroupVersion.String() + Table_GroupVersionKind = CRDGroupVersion.WithKind(Table_Kind) +) + +func init() { + SchemeBuilder.Register(&Table{}, &TableList{}) +} diff --git a/apis/costmanagement/v1beta1/zz_generated.conversion_hubs.go b/apis/costmanagement/v1beta1/zz_generated.conversion_hubs.go index 1b3d53996..a8dd7e8d2 100755 --- a/apis/costmanagement/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/costmanagement/v1beta1/zz_generated.conversion_hubs.go @@ -8,9 +8,3 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *CostAnomalyAlert) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ResourceGroupCostManagementExport) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SubscriptionCostManagementExport) Hub() {} diff --git a/apis/costmanagement/v1beta1/zz_generated.conversion_spokes.go b/apis/costmanagement/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..7df9a4068 --- /dev/null +++ b/apis/costmanagement/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ResourceGroupCostManagementExport to the hub type. +func (tr *ResourceGroupCostManagementExport) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ResourceGroupCostManagementExport type. +func (tr *ResourceGroupCostManagementExport) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SubscriptionCostManagementExport to the hub type. +func (tr *SubscriptionCostManagementExport) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SubscriptionCostManagementExport type. +func (tr *SubscriptionCostManagementExport) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/costmanagement/v1beta2/zz_generated.conversion_hubs.go b/apis/costmanagement/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..ffafa8bb0 --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ResourceGroupCostManagementExport) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SubscriptionCostManagementExport) Hub() {} diff --git a/apis/costmanagement/v1beta2/zz_generated.deepcopy.go b/apis/costmanagement/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..f368c80b3 --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,907 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportDataOptionsInitParameters) DeepCopyInto(out *ExportDataOptionsInitParameters) { + *out = *in + if in.TimeFrame != nil { + in, out := &in.TimeFrame, &out.TimeFrame + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportDataOptionsInitParameters. +func (in *ExportDataOptionsInitParameters) DeepCopy() *ExportDataOptionsInitParameters { + if in == nil { + return nil + } + out := new(ExportDataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportDataOptionsObservation) DeepCopyInto(out *ExportDataOptionsObservation) { + *out = *in + if in.TimeFrame != nil { + in, out := &in.TimeFrame, &out.TimeFrame + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportDataOptionsObservation. +func (in *ExportDataOptionsObservation) DeepCopy() *ExportDataOptionsObservation { + if in == nil { + return nil + } + out := new(ExportDataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportDataOptionsParameters) DeepCopyInto(out *ExportDataOptionsParameters) { + *out = *in + if in.TimeFrame != nil { + in, out := &in.TimeFrame, &out.TimeFrame + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportDataOptionsParameters. +func (in *ExportDataOptionsParameters) DeepCopy() *ExportDataOptionsParameters { + if in == nil { + return nil + } + out := new(ExportDataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportDataStorageLocationInitParameters) DeepCopyInto(out *ExportDataStorageLocationInitParameters) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.ContainerIDRef != nil { + in, out := &in.ContainerIDRef, &out.ContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerIDSelector != nil { + in, out := &in.ContainerIDSelector, &out.ContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RootFolderPath != nil { + in, out := &in.RootFolderPath, &out.RootFolderPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportDataStorageLocationInitParameters. +func (in *ExportDataStorageLocationInitParameters) DeepCopy() *ExportDataStorageLocationInitParameters { + if in == nil { + return nil + } + out := new(ExportDataStorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportDataStorageLocationObservation) DeepCopyInto(out *ExportDataStorageLocationObservation) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.RootFolderPath != nil { + in, out := &in.RootFolderPath, &out.RootFolderPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportDataStorageLocationObservation. +func (in *ExportDataStorageLocationObservation) DeepCopy() *ExportDataStorageLocationObservation { + if in == nil { + return nil + } + out := new(ExportDataStorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportDataStorageLocationParameters) DeepCopyInto(out *ExportDataStorageLocationParameters) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.ContainerIDRef != nil { + in, out := &in.ContainerIDRef, &out.ContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerIDSelector != nil { + in, out := &in.ContainerIDSelector, &out.ContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RootFolderPath != nil { + in, out := &in.RootFolderPath, &out.RootFolderPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportDataStorageLocationParameters. +func (in *ExportDataStorageLocationParameters) DeepCopy() *ExportDataStorageLocationParameters { + if in == nil { + return nil + } + out := new(ExportDataStorageLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupCostManagementExport) DeepCopyInto(out *ResourceGroupCostManagementExport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupCostManagementExport. +func (in *ResourceGroupCostManagementExport) DeepCopy() *ResourceGroupCostManagementExport { + if in == nil { + return nil + } + out := new(ResourceGroupCostManagementExport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceGroupCostManagementExport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupCostManagementExportInitParameters) DeepCopyInto(out *ResourceGroupCostManagementExportInitParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.ExportDataOptions != nil { + in, out := &in.ExportDataOptions, &out.ExportDataOptions + *out = new(ExportDataOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExportDataStorageLocation != nil { + in, out := &in.ExportDataStorageLocation, &out.ExportDataStorageLocation + *out = new(ExportDataStorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RecurrencePeriodEndDate != nil { + in, out := &in.RecurrencePeriodEndDate, &out.RecurrencePeriodEndDate + *out = new(string) + **out = **in + } + if in.RecurrencePeriodStartDate != nil { + in, out := &in.RecurrencePeriodStartDate, &out.RecurrencePeriodStartDate + *out = new(string) + **out = **in + } + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupIDRef != nil { + in, out := &in.ResourceGroupIDRef, &out.ResourceGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupIDSelector != nil { + in, out := &in.ResourceGroupIDSelector, &out.ResourceGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupCostManagementExportInitParameters. +func (in *ResourceGroupCostManagementExportInitParameters) DeepCopy() *ResourceGroupCostManagementExportInitParameters { + if in == nil { + return nil + } + out := new(ResourceGroupCostManagementExportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupCostManagementExportList) DeepCopyInto(out *ResourceGroupCostManagementExportList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceGroupCostManagementExport, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupCostManagementExportList. +func (in *ResourceGroupCostManagementExportList) DeepCopy() *ResourceGroupCostManagementExportList { + if in == nil { + return nil + } + out := new(ResourceGroupCostManagementExportList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceGroupCostManagementExportList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupCostManagementExportObservation) DeepCopyInto(out *ResourceGroupCostManagementExportObservation) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.ExportDataOptions != nil { + in, out := &in.ExportDataOptions, &out.ExportDataOptions + *out = new(ExportDataOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ExportDataStorageLocation != nil { + in, out := &in.ExportDataStorageLocation, &out.ExportDataStorageLocation + *out = new(ExportDataStorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RecurrencePeriodEndDate != nil { + in, out := &in.RecurrencePeriodEndDate, &out.RecurrencePeriodEndDate + *out = new(string) + **out = **in + } + if in.RecurrencePeriodStartDate != nil { + in, out := &in.RecurrencePeriodStartDate, &out.RecurrencePeriodStartDate + *out = new(string) + **out = **in + } + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupCostManagementExportObservation. +func (in *ResourceGroupCostManagementExportObservation) DeepCopy() *ResourceGroupCostManagementExportObservation { + if in == nil { + return nil + } + out := new(ResourceGroupCostManagementExportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupCostManagementExportParameters) DeepCopyInto(out *ResourceGroupCostManagementExportParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.ExportDataOptions != nil { + in, out := &in.ExportDataOptions, &out.ExportDataOptions + *out = new(ExportDataOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ExportDataStorageLocation != nil { + in, out := &in.ExportDataStorageLocation, &out.ExportDataStorageLocation + *out = new(ExportDataStorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.RecurrencePeriodEndDate != nil { + in, out := &in.RecurrencePeriodEndDate, &out.RecurrencePeriodEndDate + *out = new(string) + **out = **in + } + if in.RecurrencePeriodStartDate != nil { + in, out := &in.RecurrencePeriodStartDate, &out.RecurrencePeriodStartDate + *out = new(string) + **out = **in + } + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.ResourceGroupIDRef != nil { + in, out := &in.ResourceGroupIDRef, &out.ResourceGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupIDSelector != nil { + in, out := &in.ResourceGroupIDSelector, &out.ResourceGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupCostManagementExportParameters. +func (in *ResourceGroupCostManagementExportParameters) DeepCopy() *ResourceGroupCostManagementExportParameters { + if in == nil { + return nil + } + out := new(ResourceGroupCostManagementExportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupCostManagementExportSpec) DeepCopyInto(out *ResourceGroupCostManagementExportSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupCostManagementExportSpec. +func (in *ResourceGroupCostManagementExportSpec) DeepCopy() *ResourceGroupCostManagementExportSpec { + if in == nil { + return nil + } + out := new(ResourceGroupCostManagementExportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceGroupCostManagementExportStatus) DeepCopyInto(out *ResourceGroupCostManagementExportStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceGroupCostManagementExportStatus. +func (in *ResourceGroupCostManagementExportStatus) DeepCopy() *ResourceGroupCostManagementExportStatus { + if in == nil { + return nil + } + out := new(ResourceGroupCostManagementExportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExport) DeepCopyInto(out *SubscriptionCostManagementExport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExport. +func (in *SubscriptionCostManagementExport) DeepCopy() *SubscriptionCostManagementExport { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionCostManagementExport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportExportDataOptionsInitParameters) DeepCopyInto(out *SubscriptionCostManagementExportExportDataOptionsInitParameters) { + *out = *in + if in.TimeFrame != nil { + in, out := &in.TimeFrame, &out.TimeFrame + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportExportDataOptionsInitParameters. +func (in *SubscriptionCostManagementExportExportDataOptionsInitParameters) DeepCopy() *SubscriptionCostManagementExportExportDataOptionsInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportExportDataOptionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportExportDataOptionsObservation) DeepCopyInto(out *SubscriptionCostManagementExportExportDataOptionsObservation) { + *out = *in + if in.TimeFrame != nil { + in, out := &in.TimeFrame, &out.TimeFrame + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportExportDataOptionsObservation. +func (in *SubscriptionCostManagementExportExportDataOptionsObservation) DeepCopy() *SubscriptionCostManagementExportExportDataOptionsObservation { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportExportDataOptionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportExportDataOptionsParameters) DeepCopyInto(out *SubscriptionCostManagementExportExportDataOptionsParameters) { + *out = *in + if in.TimeFrame != nil { + in, out := &in.TimeFrame, &out.TimeFrame + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportExportDataOptionsParameters. +func (in *SubscriptionCostManagementExportExportDataOptionsParameters) DeepCopy() *SubscriptionCostManagementExportExportDataOptionsParameters { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportExportDataOptionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportExportDataStorageLocationInitParameters) DeepCopyInto(out *SubscriptionCostManagementExportExportDataStorageLocationInitParameters) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.ContainerIDRef != nil { + in, out := &in.ContainerIDRef, &out.ContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerIDSelector != nil { + in, out := &in.ContainerIDSelector, &out.ContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RootFolderPath != nil { + in, out := &in.RootFolderPath, &out.RootFolderPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportExportDataStorageLocationInitParameters. +func (in *SubscriptionCostManagementExportExportDataStorageLocationInitParameters) DeepCopy() *SubscriptionCostManagementExportExportDataStorageLocationInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportExportDataStorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportExportDataStorageLocationObservation) DeepCopyInto(out *SubscriptionCostManagementExportExportDataStorageLocationObservation) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.RootFolderPath != nil { + in, out := &in.RootFolderPath, &out.RootFolderPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportExportDataStorageLocationObservation. +func (in *SubscriptionCostManagementExportExportDataStorageLocationObservation) DeepCopy() *SubscriptionCostManagementExportExportDataStorageLocationObservation { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportExportDataStorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportExportDataStorageLocationParameters) DeepCopyInto(out *SubscriptionCostManagementExportExportDataStorageLocationParameters) { + *out = *in + if in.ContainerID != nil { + in, out := &in.ContainerID, &out.ContainerID + *out = new(string) + **out = **in + } + if in.ContainerIDRef != nil { + in, out := &in.ContainerIDRef, &out.ContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerIDSelector != nil { + in, out := &in.ContainerIDSelector, &out.ContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RootFolderPath != nil { + in, out := &in.RootFolderPath, &out.RootFolderPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportExportDataStorageLocationParameters. +func (in *SubscriptionCostManagementExportExportDataStorageLocationParameters) DeepCopy() *SubscriptionCostManagementExportExportDataStorageLocationParameters { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportExportDataStorageLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportInitParameters) DeepCopyInto(out *SubscriptionCostManagementExportInitParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.ExportDataOptions != nil { + in, out := &in.ExportDataOptions, &out.ExportDataOptions + *out = new(SubscriptionCostManagementExportExportDataOptionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExportDataStorageLocation != nil { + in, out := &in.ExportDataStorageLocation, &out.ExportDataStorageLocation + *out = new(SubscriptionCostManagementExportExportDataStorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RecurrencePeriodEndDate != nil { + in, out := &in.RecurrencePeriodEndDate, &out.RecurrencePeriodEndDate + *out = new(string) + **out = **in + } + if in.RecurrencePeriodStartDate != nil { + in, out := &in.RecurrencePeriodStartDate, &out.RecurrencePeriodStartDate + *out = new(string) + **out = **in + } + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.SubscriptionIDRef != nil { + in, out := &in.SubscriptionIDRef, &out.SubscriptionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubscriptionIDSelector != nil { + in, out := &in.SubscriptionIDSelector, &out.SubscriptionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportInitParameters. +func (in *SubscriptionCostManagementExportInitParameters) DeepCopy() *SubscriptionCostManagementExportInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportList) DeepCopyInto(out *SubscriptionCostManagementExportList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SubscriptionCostManagementExport, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportList. +func (in *SubscriptionCostManagementExportList) DeepCopy() *SubscriptionCostManagementExportList { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionCostManagementExportList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportObservation) DeepCopyInto(out *SubscriptionCostManagementExportObservation) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.ExportDataOptions != nil { + in, out := &in.ExportDataOptions, &out.ExportDataOptions + *out = new(SubscriptionCostManagementExportExportDataOptionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ExportDataStorageLocation != nil { + in, out := &in.ExportDataStorageLocation, &out.ExportDataStorageLocation + *out = new(SubscriptionCostManagementExportExportDataStorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RecurrencePeriodEndDate != nil { + in, out := &in.RecurrencePeriodEndDate, &out.RecurrencePeriodEndDate + *out = new(string) + **out = **in + } + if in.RecurrencePeriodStartDate != nil { + in, out := &in.RecurrencePeriodStartDate, &out.RecurrencePeriodStartDate + *out = new(string) + **out = **in + } + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportObservation. +func (in *SubscriptionCostManagementExportObservation) DeepCopy() *SubscriptionCostManagementExportObservation { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportParameters) DeepCopyInto(out *SubscriptionCostManagementExportParameters) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + if in.ExportDataOptions != nil { + in, out := &in.ExportDataOptions, &out.ExportDataOptions + *out = new(SubscriptionCostManagementExportExportDataOptionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ExportDataStorageLocation != nil { + in, out := &in.ExportDataStorageLocation, &out.ExportDataStorageLocation + *out = new(SubscriptionCostManagementExportExportDataStorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RecurrencePeriodEndDate != nil { + in, out := &in.RecurrencePeriodEndDate, &out.RecurrencePeriodEndDate + *out = new(string) + **out = **in + } + if in.RecurrencePeriodStartDate != nil { + in, out := &in.RecurrencePeriodStartDate, &out.RecurrencePeriodStartDate + *out = new(string) + **out = **in + } + if in.RecurrenceType != nil { + in, out := &in.RecurrenceType, &out.RecurrenceType + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.SubscriptionIDRef != nil { + in, out := &in.SubscriptionIDRef, &out.SubscriptionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubscriptionIDSelector != nil { + in, out := &in.SubscriptionIDSelector, &out.SubscriptionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportParameters. +func (in *SubscriptionCostManagementExportParameters) DeepCopy() *SubscriptionCostManagementExportParameters { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportSpec) DeepCopyInto(out *SubscriptionCostManagementExportSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportSpec. +func (in *SubscriptionCostManagementExportSpec) DeepCopy() *SubscriptionCostManagementExportSpec { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCostManagementExportStatus) DeepCopyInto(out *SubscriptionCostManagementExportStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCostManagementExportStatus. +func (in *SubscriptionCostManagementExportStatus) DeepCopy() *SubscriptionCostManagementExportStatus { + if in == nil { + return nil + } + out := new(SubscriptionCostManagementExportStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/costmanagement/v1beta2/zz_generated.managed.go b/apis/costmanagement/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..9a5d129a8 --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ResourceGroupCostManagementExport. +func (mg *ResourceGroupCostManagementExport) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/costmanagement/v1beta2/zz_generated.managedlist.go b/apis/costmanagement/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..cb20a2299 --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ResourceGroupCostManagementExportList. +func (l *ResourceGroupCostManagementExportList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SubscriptionCostManagementExportList. +func (l *SubscriptionCostManagementExportList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/costmanagement/v1beta2/zz_generated.resolvers.go b/apis/costmanagement/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..8d15dc58e --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,202 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ResourceGroupCostManagementExport) ResolveReferences( // ResolveReferences of this ResourceGroupCostManagementExport. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.ExportDataStorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExportDataStorageLocation.ContainerID), + Extract: resource.ExtractParamPath("resource_manager_id", true), + Reference: mg.Spec.ForProvider.ExportDataStorageLocation.ContainerIDRef, + Selector: mg.Spec.ForProvider.ExportDataStorageLocation.ContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExportDataStorageLocation.ContainerID") + } + mg.Spec.ForProvider.ExportDataStorageLocation.ContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExportDataStorageLocation.ContainerIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ResourceGroupIDRef, + Selector: mg.Spec.ForProvider.ResourceGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupID") + } + mg.Spec.ForProvider.ResourceGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.ExportDataStorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExportDataStorageLocation.ContainerID), + Extract: resource.ExtractParamPath("resource_manager_id", true), + Reference: mg.Spec.InitProvider.ExportDataStorageLocation.ContainerIDRef, + Selector: mg.Spec.InitProvider.ExportDataStorageLocation.ContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExportDataStorageLocation.ContainerID") + } + mg.Spec.InitProvider.ExportDataStorageLocation.ContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExportDataStorageLocation.ContainerIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ResourceGroupIDRef, + Selector: mg.Spec.InitProvider.ResourceGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupID") + } + mg.Spec.InitProvider.ResourceGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SubscriptionCostManagementExport. +func (mg *SubscriptionCostManagementExport) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.ExportDataStorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExportDataStorageLocation.ContainerID), + Extract: resource.ExtractParamPath("resource_manager_id", true), + Reference: mg.Spec.ForProvider.ExportDataStorageLocation.ContainerIDRef, + Selector: mg.Spec.ForProvider.ExportDataStorageLocation.ContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExportDataStorageLocation.ContainerID") + } + mg.Spec.ForProvider.ExportDataStorageLocation.ContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExportDataStorageLocation.ContainerIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "Subscription", "SubscriptionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubscriptionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubscriptionIDRef, + Selector: mg.Spec.ForProvider.SubscriptionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubscriptionID") + } + mg.Spec.ForProvider.SubscriptionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubscriptionIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.ExportDataStorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExportDataStorageLocation.ContainerID), + Extract: resource.ExtractParamPath("resource_manager_id", true), + Reference: mg.Spec.InitProvider.ExportDataStorageLocation.ContainerIDRef, + Selector: mg.Spec.InitProvider.ExportDataStorageLocation.ContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExportDataStorageLocation.ContainerID") + } + mg.Spec.InitProvider.ExportDataStorageLocation.ContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExportDataStorageLocation.ContainerIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "Subscription", "SubscriptionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubscriptionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubscriptionIDRef, + Selector: mg.Spec.InitProvider.SubscriptionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubscriptionID") + } + mg.Spec.InitProvider.SubscriptionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubscriptionIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/costmanagement/v1beta2/zz_groupversion_info.go b/apis/costmanagement/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..3b722e927 --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=costmanagement.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "costmanagement.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/costmanagement/v1beta2/zz_resourcegroupcostmanagementexport_terraformed.go b/apis/costmanagement/v1beta2/zz_resourcegroupcostmanagementexport_terraformed.go new file mode 100755 index 000000000..869aa3577 --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_resourcegroupcostmanagementexport_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ResourceGroupCostManagementExport +func (mg *ResourceGroupCostManagementExport) GetTerraformResourceType() string { + return "azurerm_resource_group_cost_management_export" +} + +// GetConnectionDetailsMapping for this ResourceGroupCostManagementExport +func (tr *ResourceGroupCostManagementExport) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ResourceGroupCostManagementExport +func (tr *ResourceGroupCostManagementExport) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ResourceGroupCostManagementExport +func (tr *ResourceGroupCostManagementExport) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ResourceGroupCostManagementExport +func (tr *ResourceGroupCostManagementExport) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ResourceGroupCostManagementExport +func (tr *ResourceGroupCostManagementExport) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ResourceGroupCostManagementExport +func (tr *ResourceGroupCostManagementExport) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ResourceGroupCostManagementExport +func (tr *ResourceGroupCostManagementExport) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ResourceGroupCostManagementExport +func (tr *ResourceGroupCostManagementExport) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ResourceGroupCostManagementExport using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ResourceGroupCostManagementExport) LateInitialize(attrs []byte) (bool, error) { + params := &ResourceGroupCostManagementExportParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ResourceGroupCostManagementExport) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/costmanagement/v1beta2/zz_resourcegroupcostmanagementexport_types.go b/apis/costmanagement/v1beta2/zz_resourcegroupcostmanagementexport_types.go new file mode 100755 index 000000000..7aef0772c --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_resourcegroupcostmanagementexport_types.go @@ -0,0 +1,259 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ExportDataOptionsInitParameters struct { + + // The time frame for pulling data for the query. If custom, then a specific time period must be provided. Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom. + TimeFrame *string `json:"timeFrame,omitempty" tf:"time_frame,omitempty"` + + // The type of the query. Possible values are ActualCost, AmortizedCost and Usage. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ExportDataOptionsObservation struct { + + // The time frame for pulling data for the query. If custom, then a specific time period must be provided. Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom. + TimeFrame *string `json:"timeFrame,omitempty" tf:"time_frame,omitempty"` + + // The type of the query. Possible values are ActualCost, AmortizedCost and Usage. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ExportDataOptionsParameters struct { + + // The time frame for pulling data for the query. If custom, then a specific time period must be provided. Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom. + // +kubebuilder:validation:Optional + TimeFrame *string `json:"timeFrame" tf:"time_frame,omitempty"` + + // The type of the query. Possible values are ActualCost, AmortizedCost and Usage. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ExportDataStorageLocationInitParameters struct { + + // The Resource Manager ID of the container where exports will be uploaded. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_manager_id",true) + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + // Reference to a Container in storage to populate containerId. + // +kubebuilder:validation:Optional + ContainerIDRef *v1.Reference `json:"containerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate containerId. + // +kubebuilder:validation:Optional + ContainerIDSelector *v1.Selector `json:"containerIdSelector,omitempty" tf:"-"` + + // The path of the directory where exports will be uploaded. Changing this forces a new resource to be created. + RootFolderPath *string `json:"rootFolderPath,omitempty" tf:"root_folder_path,omitempty"` +} + +type ExportDataStorageLocationObservation struct { + + // The Resource Manager ID of the container where exports will be uploaded. Changing this forces a new resource to be created. + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + // The path of the directory where exports will be uploaded. Changing this forces a new resource to be created. + RootFolderPath *string `json:"rootFolderPath,omitempty" tf:"root_folder_path,omitempty"` +} + +type ExportDataStorageLocationParameters struct { + + // The Resource Manager ID of the container where exports will be uploaded. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_manager_id",true) + // +kubebuilder:validation:Optional + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + // Reference to a Container in storage to populate containerId. + // +kubebuilder:validation:Optional + ContainerIDRef *v1.Reference `json:"containerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate containerId. + // +kubebuilder:validation:Optional + ContainerIDSelector *v1.Selector `json:"containerIdSelector,omitempty" tf:"-"` + + // The path of the directory where exports will be uploaded. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RootFolderPath *string `json:"rootFolderPath" tf:"root_folder_path,omitempty"` +} + +type ResourceGroupCostManagementExportInitParameters struct { + + // Is the cost management export active? Default is true. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // A export_data_options block as defined below. + ExportDataOptions *ExportDataOptionsInitParameters `json:"exportDataOptions,omitempty" tf:"export_data_options,omitempty"` + + // A export_data_storage_location block as defined below. + ExportDataStorageLocation *ExportDataStorageLocationInitParameters `json:"exportDataStorageLocation,omitempty" tf:"export_data_storage_location,omitempty"` + + // The date the export will stop capturing information. + RecurrencePeriodEndDate *string `json:"recurrencePeriodEndDate,omitempty" tf:"recurrence_period_end_date,omitempty"` + + // The date the export will start capturing information. + RecurrencePeriodStartDate *string `json:"recurrencePeriodStartDate,omitempty" tf:"recurrence_period_start_date,omitempty"` + + // How often the requested information will be exported. Valid values include Annually, Daily, Monthly, Weekly. + RecurrenceType *string `json:"recurrenceType,omitempty" tf:"recurrence_type,omitempty"` + + // The id of the resource group on which to create an export. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDRef *v1.Reference `json:"resourceGroupIdRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDSelector *v1.Selector `json:"resourceGroupIdSelector,omitempty" tf:"-"` +} + +type ResourceGroupCostManagementExportObservation struct { + + // Is the cost management export active? Default is true. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // A export_data_options block as defined below. + ExportDataOptions *ExportDataOptionsObservation `json:"exportDataOptions,omitempty" tf:"export_data_options,omitempty"` + + // A export_data_storage_location block as defined below. + ExportDataStorageLocation *ExportDataStorageLocationObservation `json:"exportDataStorageLocation,omitempty" tf:"export_data_storage_location,omitempty"` + + // The ID of the Cost Management Export for this Resource Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The date the export will stop capturing information. + RecurrencePeriodEndDate *string `json:"recurrencePeriodEndDate,omitempty" tf:"recurrence_period_end_date,omitempty"` + + // The date the export will start capturing information. + RecurrencePeriodStartDate *string `json:"recurrencePeriodStartDate,omitempty" tf:"recurrence_period_start_date,omitempty"` + + // How often the requested information will be exported. Valid values include Annually, Daily, Monthly, Weekly. + RecurrenceType *string `json:"recurrenceType,omitempty" tf:"recurrence_type,omitempty"` + + // The id of the resource group on which to create an export. Changing this forces a new resource to be created. + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` +} + +type ResourceGroupCostManagementExportParameters struct { + + // Is the cost management export active? Default is true. + // +kubebuilder:validation:Optional + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // A export_data_options block as defined below. + // +kubebuilder:validation:Optional + ExportDataOptions *ExportDataOptionsParameters `json:"exportDataOptions,omitempty" tf:"export_data_options,omitempty"` + + // A export_data_storage_location block as defined below. + // +kubebuilder:validation:Optional + ExportDataStorageLocation *ExportDataStorageLocationParameters `json:"exportDataStorageLocation,omitempty" tf:"export_data_storage_location,omitempty"` + + // The date the export will stop capturing information. + // +kubebuilder:validation:Optional + RecurrencePeriodEndDate *string `json:"recurrencePeriodEndDate,omitempty" tf:"recurrence_period_end_date,omitempty"` + + // The date the export will start capturing information. + // +kubebuilder:validation:Optional + RecurrencePeriodStartDate *string `json:"recurrencePeriodStartDate,omitempty" tf:"recurrence_period_start_date,omitempty"` + + // How often the requested information will be exported. Valid values include Annually, Daily, Monthly, Weekly. + // +kubebuilder:validation:Optional + RecurrenceType *string `json:"recurrenceType,omitempty" tf:"recurrence_type,omitempty"` + + // The id of the resource group on which to create an export. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDRef *v1.Reference `json:"resourceGroupIdRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupId. + // +kubebuilder:validation:Optional + ResourceGroupIDSelector *v1.Selector `json:"resourceGroupIdSelector,omitempty" tf:"-"` +} + +// ResourceGroupCostManagementExportSpec defines the desired state of ResourceGroupCostManagementExport +type ResourceGroupCostManagementExportSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResourceGroupCostManagementExportParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResourceGroupCostManagementExportInitParameters `json:"initProvider,omitempty"` +} + +// ResourceGroupCostManagementExportStatus defines the observed state of ResourceGroupCostManagementExport. +type ResourceGroupCostManagementExportStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResourceGroupCostManagementExportObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ResourceGroupCostManagementExport is the Schema for the ResourceGroupCostManagementExports API. Manages an Azure Cost Management Export for a Resource Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ResourceGroupCostManagementExport struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.exportDataOptions) || (has(self.initProvider) && has(self.initProvider.exportDataOptions))",message="spec.forProvider.exportDataOptions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.exportDataStorageLocation) || (has(self.initProvider) && has(self.initProvider.exportDataStorageLocation))",message="spec.forProvider.exportDataStorageLocation is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.recurrencePeriodEndDate) || (has(self.initProvider) && has(self.initProvider.recurrencePeriodEndDate))",message="spec.forProvider.recurrencePeriodEndDate is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.recurrencePeriodStartDate) || (has(self.initProvider) && has(self.initProvider.recurrencePeriodStartDate))",message="spec.forProvider.recurrencePeriodStartDate is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.recurrenceType) || (has(self.initProvider) && has(self.initProvider.recurrenceType))",message="spec.forProvider.recurrenceType is a required parameter" + Spec ResourceGroupCostManagementExportSpec `json:"spec"` + Status ResourceGroupCostManagementExportStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceGroupCostManagementExportList contains a list of ResourceGroupCostManagementExports +type ResourceGroupCostManagementExportList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceGroupCostManagementExport `json:"items"` +} + +// Repository type metadata. +var ( + ResourceGroupCostManagementExport_Kind = "ResourceGroupCostManagementExport" + ResourceGroupCostManagementExport_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ResourceGroupCostManagementExport_Kind}.String() + ResourceGroupCostManagementExport_KindAPIVersion = ResourceGroupCostManagementExport_Kind + "." + CRDGroupVersion.String() + ResourceGroupCostManagementExport_GroupVersionKind = CRDGroupVersion.WithKind(ResourceGroupCostManagementExport_Kind) +) + +func init() { + SchemeBuilder.Register(&ResourceGroupCostManagementExport{}, &ResourceGroupCostManagementExportList{}) +} diff --git a/apis/costmanagement/v1beta2/zz_subscriptioncostmanagementexport_terraformed.go b/apis/costmanagement/v1beta2/zz_subscriptioncostmanagementexport_terraformed.go new file mode 100755 index 000000000..c877ab94f --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_subscriptioncostmanagementexport_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SubscriptionCostManagementExport +func (mg *SubscriptionCostManagementExport) GetTerraformResourceType() string { + return "azurerm_subscription_cost_management_export" +} + +// GetConnectionDetailsMapping for this SubscriptionCostManagementExport +func (tr *SubscriptionCostManagementExport) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SubscriptionCostManagementExport +func (tr *SubscriptionCostManagementExport) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SubscriptionCostManagementExport +func (tr *SubscriptionCostManagementExport) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SubscriptionCostManagementExport +func (tr *SubscriptionCostManagementExport) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SubscriptionCostManagementExport +func (tr *SubscriptionCostManagementExport) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SubscriptionCostManagementExport +func (tr *SubscriptionCostManagementExport) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SubscriptionCostManagementExport +func (tr *SubscriptionCostManagementExport) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SubscriptionCostManagementExport +func (tr *SubscriptionCostManagementExport) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SubscriptionCostManagementExport using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SubscriptionCostManagementExport) LateInitialize(attrs []byte) (bool, error) { + params := &SubscriptionCostManagementExportParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SubscriptionCostManagementExport) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/costmanagement/v1beta2/zz_subscriptioncostmanagementexport_types.go b/apis/costmanagement/v1beta2/zz_subscriptioncostmanagementexport_types.go new file mode 100755 index 000000000..3baa6ffbe --- /dev/null +++ b/apis/costmanagement/v1beta2/zz_subscriptioncostmanagementexport_types.go @@ -0,0 +1,270 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SubscriptionCostManagementExportExportDataOptionsInitParameters struct { + + // The time frame for pulling data for the query. If custom, then a specific time period must be provided. Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom. + TimeFrame *string `json:"timeFrame,omitempty" tf:"time_frame,omitempty"` + + // The type of the query. Possible values are ActualCost, AmortizedCost and Usage. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SubscriptionCostManagementExportExportDataOptionsObservation struct { + + // The time frame for pulling data for the query. If custom, then a specific time period must be provided. Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom. + TimeFrame *string `json:"timeFrame,omitempty" tf:"time_frame,omitempty"` + + // The type of the query. Possible values are ActualCost, AmortizedCost and Usage. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SubscriptionCostManagementExportExportDataOptionsParameters struct { + + // The time frame for pulling data for the query. If custom, then a specific time period must be provided. Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom. + // +kubebuilder:validation:Optional + TimeFrame *string `json:"timeFrame" tf:"time_frame,omitempty"` + + // The type of the query. Possible values are ActualCost, AmortizedCost and Usage. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SubscriptionCostManagementExportExportDataStorageLocationInitParameters struct { + + // The Resource Manager ID of the container where exports will be uploaded. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_manager_id",true) + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + // Reference to a Container in storage to populate containerId. + // +kubebuilder:validation:Optional + ContainerIDRef *v1.Reference `json:"containerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate containerId. + // +kubebuilder:validation:Optional + ContainerIDSelector *v1.Selector `json:"containerIdSelector,omitempty" tf:"-"` + + // The path of the directory where exports will be uploaded. Changing this forces a new resource to be created. + RootFolderPath *string `json:"rootFolderPath,omitempty" tf:"root_folder_path,omitempty"` +} + +type SubscriptionCostManagementExportExportDataStorageLocationObservation struct { + + // The Resource Manager ID of the container where exports will be uploaded. Changing this forces a new resource to be created. + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + // The path of the directory where exports will be uploaded. Changing this forces a new resource to be created. + RootFolderPath *string `json:"rootFolderPath,omitempty" tf:"root_folder_path,omitempty"` +} + +type SubscriptionCostManagementExportExportDataStorageLocationParameters struct { + + // The Resource Manager ID of the container where exports will be uploaded. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("resource_manager_id",true) + // +kubebuilder:validation:Optional + ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` + + // Reference to a Container in storage to populate containerId. + // +kubebuilder:validation:Optional + ContainerIDRef *v1.Reference `json:"containerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate containerId. + // +kubebuilder:validation:Optional + ContainerIDSelector *v1.Selector `json:"containerIdSelector,omitempty" tf:"-"` + + // The path of the directory where exports will be uploaded. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RootFolderPath *string `json:"rootFolderPath" tf:"root_folder_path,omitempty"` +} + +type SubscriptionCostManagementExportInitParameters struct { + + // Is the cost management export active? Default is true. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // A export_data_options block as defined below. + ExportDataOptions *SubscriptionCostManagementExportExportDataOptionsInitParameters `json:"exportDataOptions,omitempty" tf:"export_data_options,omitempty"` + + // A export_data_storage_location block as defined below. + ExportDataStorageLocation *SubscriptionCostManagementExportExportDataStorageLocationInitParameters `json:"exportDataStorageLocation,omitempty" tf:"export_data_storage_location,omitempty"` + + // Specifies the name of the Cost Management Export. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The date the export will stop capturing information. + RecurrencePeriodEndDate *string `json:"recurrencePeriodEndDate,omitempty" tf:"recurrence_period_end_date,omitempty"` + + // The date the export will start capturing information. + RecurrencePeriodStartDate *string `json:"recurrencePeriodStartDate,omitempty" tf:"recurrence_period_start_date,omitempty"` + + // How often the requested information will be exported. Valid values include Annually, Daily, Monthly, Weekly. + RecurrenceType *string `json:"recurrenceType,omitempty" tf:"recurrence_type,omitempty"` + + // The id of the subscription on which to create an export. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.Subscription + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // Reference to a Subscription in azure to populate subscriptionId. + // +kubebuilder:validation:Optional + SubscriptionIDRef *v1.Reference `json:"subscriptionIdRef,omitempty" tf:"-"` + + // Selector for a Subscription in azure to populate subscriptionId. + // +kubebuilder:validation:Optional + SubscriptionIDSelector *v1.Selector `json:"subscriptionIdSelector,omitempty" tf:"-"` +} + +type SubscriptionCostManagementExportObservation struct { + + // Is the cost management export active? Default is true. + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // A export_data_options block as defined below. + ExportDataOptions *SubscriptionCostManagementExportExportDataOptionsObservation `json:"exportDataOptions,omitempty" tf:"export_data_options,omitempty"` + + // A export_data_storage_location block as defined below. + ExportDataStorageLocation *SubscriptionCostManagementExportExportDataStorageLocationObservation `json:"exportDataStorageLocation,omitempty" tf:"export_data_storage_location,omitempty"` + + // The ID of the Cost Management Export for this Subscription. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the Cost Management Export. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The date the export will stop capturing information. + RecurrencePeriodEndDate *string `json:"recurrencePeriodEndDate,omitempty" tf:"recurrence_period_end_date,omitempty"` + + // The date the export will start capturing information. + RecurrencePeriodStartDate *string `json:"recurrencePeriodStartDate,omitempty" tf:"recurrence_period_start_date,omitempty"` + + // How often the requested information will be exported. Valid values include Annually, Daily, Monthly, Weekly. + RecurrenceType *string `json:"recurrenceType,omitempty" tf:"recurrence_type,omitempty"` + + // The id of the subscription on which to create an export. Changing this forces a new resource to be created. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` +} + +type SubscriptionCostManagementExportParameters struct { + + // Is the cost management export active? Default is true. + // +kubebuilder:validation:Optional + Active *bool `json:"active,omitempty" tf:"active,omitempty"` + + // A export_data_options block as defined below. + // +kubebuilder:validation:Optional + ExportDataOptions *SubscriptionCostManagementExportExportDataOptionsParameters `json:"exportDataOptions,omitempty" tf:"export_data_options,omitempty"` + + // A export_data_storage_location block as defined below. + // +kubebuilder:validation:Optional + ExportDataStorageLocation *SubscriptionCostManagementExportExportDataStorageLocationParameters `json:"exportDataStorageLocation,omitempty" tf:"export_data_storage_location,omitempty"` + + // Specifies the name of the Cost Management Export. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The date the export will stop capturing information. + // +kubebuilder:validation:Optional + RecurrencePeriodEndDate *string `json:"recurrencePeriodEndDate,omitempty" tf:"recurrence_period_end_date,omitempty"` + + // The date the export will start capturing information. + // +kubebuilder:validation:Optional + RecurrencePeriodStartDate *string `json:"recurrencePeriodStartDate,omitempty" tf:"recurrence_period_start_date,omitempty"` + + // How often the requested information will be exported. Valid values include Annually, Daily, Monthly, Weekly. + // +kubebuilder:validation:Optional + RecurrenceType *string `json:"recurrenceType,omitempty" tf:"recurrence_type,omitempty"` + + // The id of the subscription on which to create an export. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.Subscription + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // Reference to a Subscription in azure to populate subscriptionId. + // +kubebuilder:validation:Optional + SubscriptionIDRef *v1.Reference `json:"subscriptionIdRef,omitempty" tf:"-"` + + // Selector for a Subscription in azure to populate subscriptionId. + // +kubebuilder:validation:Optional + SubscriptionIDSelector *v1.Selector `json:"subscriptionIdSelector,omitempty" tf:"-"` +} + +// SubscriptionCostManagementExportSpec defines the desired state of SubscriptionCostManagementExport +type SubscriptionCostManagementExportSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SubscriptionCostManagementExportParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SubscriptionCostManagementExportInitParameters `json:"initProvider,omitempty"` +} + +// SubscriptionCostManagementExportStatus defines the observed state of SubscriptionCostManagementExport. +type SubscriptionCostManagementExportStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SubscriptionCostManagementExportObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SubscriptionCostManagementExport is the Schema for the SubscriptionCostManagementExports API. Manages an Azure Cost Management Export for a Subscription. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SubscriptionCostManagementExport struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.exportDataOptions) || (has(self.initProvider) && has(self.initProvider.exportDataOptions))",message="spec.forProvider.exportDataOptions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.exportDataStorageLocation) || (has(self.initProvider) && has(self.initProvider.exportDataStorageLocation))",message="spec.forProvider.exportDataStorageLocation is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.recurrencePeriodEndDate) || (has(self.initProvider) && has(self.initProvider.recurrencePeriodEndDate))",message="spec.forProvider.recurrencePeriodEndDate is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.recurrencePeriodStartDate) || (has(self.initProvider) && has(self.initProvider.recurrencePeriodStartDate))",message="spec.forProvider.recurrencePeriodStartDate is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.recurrenceType) || (has(self.initProvider) && has(self.initProvider.recurrenceType))",message="spec.forProvider.recurrenceType is a required parameter" + Spec SubscriptionCostManagementExportSpec `json:"spec"` + Status SubscriptionCostManagementExportStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubscriptionCostManagementExportList contains a list of SubscriptionCostManagementExports +type SubscriptionCostManagementExportList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SubscriptionCostManagementExport `json:"items"` +} + +// Repository type metadata. +var ( + SubscriptionCostManagementExport_Kind = "SubscriptionCostManagementExport" + SubscriptionCostManagementExport_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SubscriptionCostManagementExport_Kind}.String() + SubscriptionCostManagementExport_KindAPIVersion = SubscriptionCostManagementExport_Kind + "." + CRDGroupVersion.String() + SubscriptionCostManagementExport_GroupVersionKind = CRDGroupVersion.WithKind(SubscriptionCostManagementExport_Kind) +) + +func init() { + SchemeBuilder.Register(&SubscriptionCostManagementExport{}, &SubscriptionCostManagementExportList{}) +} diff --git a/apis/databricks/v1beta1/zz_generated.conversion_hubs.go b/apis/databricks/v1beta1/zz_generated.conversion_hubs.go index 412448442..2b4164676 100755 --- a/apis/databricks/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/databricks/v1beta1/zz_generated.conversion_hubs.go @@ -6,12 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *AccessConnector) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Workspace) Hub() {} - // Hub marks this type as a conversion hub. func (tr *WorkspaceCustomerManagedKey) Hub() {} diff --git a/apis/databricks/v1beta1/zz_generated.conversion_spokes.go b/apis/databricks/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..b355010f9 --- /dev/null +++ b/apis/databricks/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AccessConnector to the hub type. +func (tr *AccessConnector) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AccessConnector type. +func (tr *AccessConnector) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Workspace to the hub type. +func (tr *Workspace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workspace type. +func (tr *Workspace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/databricks/v1beta1/zz_generated.resolvers.go b/apis/databricks/v1beta1/zz_generated.resolvers.go index ce3e7b78e..3a40e691b 100644 --- a/apis/databricks/v1beta1/zz_generated.resolvers.go +++ b/apis/databricks/v1beta1/zz_generated.resolvers.go @@ -211,7 +211,7 @@ func (mg *WorkspaceCustomerManagedKey) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Key", "KeyList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -230,7 +230,7 @@ func (mg *WorkspaceCustomerManagedKey) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.KeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("databricks.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("databricks.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -249,7 +249,7 @@ func (mg *WorkspaceCustomerManagedKey) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Key", "KeyList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -280,7 +280,7 @@ func (mg *WorkspaceRootDbfsCustomerManagedKey) ResolveReferences(ctx context.Con var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Key", "KeyList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -299,7 +299,7 @@ func (mg *WorkspaceRootDbfsCustomerManagedKey) ResolveReferences(ctx context.Con mg.Spec.ForProvider.KeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("databricks.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("databricks.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -318,7 +318,7 @@ func (mg *WorkspaceRootDbfsCustomerManagedKey) ResolveReferences(ctx context.Con mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Key", "KeyList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/databricks/v1beta1/zz_workspacecustomermanagedkey_types.go b/apis/databricks/v1beta1/zz_workspacecustomermanagedkey_types.go index 17b31de1c..e24ef0eee 100755 --- a/apis/databricks/v1beta1/zz_workspacecustomermanagedkey_types.go +++ b/apis/databricks/v1beta1/zz_workspacecustomermanagedkey_types.go @@ -16,7 +16,7 @@ import ( type WorkspaceCustomerManagedKeyInitParameters struct { // The ID of the Key Vault. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Key + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` @@ -44,7 +44,7 @@ type WorkspaceCustomerManagedKeyObservation struct { type WorkspaceCustomerManagedKeyParameters struct { // The ID of the Key Vault. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Key + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` @@ -58,7 +58,7 @@ type WorkspaceCustomerManagedKeyParameters struct { KeyVaultKeyIDSelector *v1.Selector `json:"keyVaultKeyIdSelector,omitempty" tf:"-"` // The ID of the Databricks Workspace.. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/databricks/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/databricks/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/databricks/v1beta1/zz_workspacerootdbfscustomermanagedkey_types.go b/apis/databricks/v1beta1/zz_workspacerootdbfscustomermanagedkey_types.go index 16598e660..5bb0ecca8 100755 --- a/apis/databricks/v1beta1/zz_workspacerootdbfscustomermanagedkey_types.go +++ b/apis/databricks/v1beta1/zz_workspacerootdbfscustomermanagedkey_types.go @@ -16,7 +16,7 @@ import ( type WorkspaceRootDbfsCustomerManagedKeyInitParameters struct { // The resource ID of the Key Vault Key to be used. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Key + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` @@ -44,7 +44,7 @@ type WorkspaceRootDbfsCustomerManagedKeyObservation struct { type WorkspaceRootDbfsCustomerManagedKeyParameters struct { // The resource ID of the Key Vault Key to be used. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Key + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` @@ -58,7 +58,7 @@ type WorkspaceRootDbfsCustomerManagedKeyParameters struct { KeyVaultKeyIDSelector *v1.Selector `json:"keyVaultKeyIdSelector,omitempty" tf:"-"` // The resource ID of the Databricks Workspace. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/databricks/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/databricks/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/databricks/v1beta2/zz_accessconnector_terraformed.go b/apis/databricks/v1beta2/zz_accessconnector_terraformed.go new file mode 100755 index 000000000..5a17d2ac6 --- /dev/null +++ b/apis/databricks/v1beta2/zz_accessconnector_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AccessConnector +func (mg *AccessConnector) GetTerraformResourceType() string { + return "azurerm_databricks_access_connector" +} + +// GetConnectionDetailsMapping for this AccessConnector +func (tr *AccessConnector) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AccessConnector +func (tr *AccessConnector) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AccessConnector +func (tr *AccessConnector) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AccessConnector +func (tr *AccessConnector) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AccessConnector +func (tr *AccessConnector) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AccessConnector +func (tr *AccessConnector) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AccessConnector +func (tr *AccessConnector) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AccessConnector +func (tr *AccessConnector) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AccessConnector using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AccessConnector) LateInitialize(attrs []byte) (bool, error) { + params := &AccessConnectorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AccessConnector) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/databricks/v1beta2/zz_accessconnector_types.go b/apis/databricks/v1beta2/zz_accessconnector_types.go new file mode 100755 index 000000000..8ef00602d --- /dev/null +++ b/apis/databricks/v1beta2/zz_accessconnector_types.go @@ -0,0 +1,174 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessConnectorInitParameters struct { + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccessConnectorObservation struct { + + // The ID of the Databricks Access Connector in the Azure management plane. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group in which the Databricks Access Connector should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccessConnectorParameters struct { + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group in which the Databricks Access Connector should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to the Databricks Access Connector. Only one User Assigned Managed Identity ID is supported per Databricks Access Connector resource. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on the Databricks Access Connector. Possible values include SystemAssigned or UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to the Databricks Access Connector. Only one User Assigned Managed Identity ID is supported per Databricks Access Connector resource. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID of the System Assigned Managed Service Identity that is configured on this Access Connector. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID of the System Assigned Managed Service Identity that is configured on this Access Connector. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on the Databricks Access Connector. Possible values include SystemAssigned or UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to the Databricks Access Connector. Only one User Assigned Managed Identity ID is supported per Databricks Access Connector resource. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on the Databricks Access Connector. Possible values include SystemAssigned or UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// AccessConnectorSpec defines the desired state of AccessConnector +type AccessConnectorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccessConnectorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccessConnectorInitParameters `json:"initProvider,omitempty"` +} + +// AccessConnectorStatus defines the observed state of AccessConnector. +type AccessConnectorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccessConnectorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AccessConnector is the Schema for the AccessConnectors API. Manages a Databricks Access Connector +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type AccessConnector struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec AccessConnectorSpec `json:"spec"` + Status AccessConnectorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccessConnectorList contains a list of AccessConnectors +type AccessConnectorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AccessConnector `json:"items"` +} + +// Repository type metadata. +var ( + AccessConnector_Kind = "AccessConnector" + AccessConnector_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AccessConnector_Kind}.String() + AccessConnector_KindAPIVersion = AccessConnector_Kind + "." + CRDGroupVersion.String() + AccessConnector_GroupVersionKind = CRDGroupVersion.WithKind(AccessConnector_Kind) +) + +func init() { + SchemeBuilder.Register(&AccessConnector{}, &AccessConnectorList{}) +} diff --git a/apis/databricks/v1beta2/zz_generated.conversion_hubs.go b/apis/databricks/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..92ad79ddd --- /dev/null +++ b/apis/databricks/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *AccessConnector) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Workspace) Hub() {} diff --git a/apis/databricks/v1beta2/zz_generated.deepcopy.go b/apis/databricks/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..1dc067422 --- /dev/null +++ b/apis/databricks/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1190 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConnector) DeepCopyInto(out *AccessConnector) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConnector. +func (in *AccessConnector) DeepCopy() *AccessConnector { + if in == nil { + return nil + } + out := new(AccessConnector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccessConnector) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConnectorInitParameters) DeepCopyInto(out *AccessConnectorInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConnectorInitParameters. +func (in *AccessConnectorInitParameters) DeepCopy() *AccessConnectorInitParameters { + if in == nil { + return nil + } + out := new(AccessConnectorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConnectorList) DeepCopyInto(out *AccessConnectorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AccessConnector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConnectorList. +func (in *AccessConnectorList) DeepCopy() *AccessConnectorList { + if in == nil { + return nil + } + out := new(AccessConnectorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccessConnectorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConnectorObservation) DeepCopyInto(out *AccessConnectorObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConnectorObservation. +func (in *AccessConnectorObservation) DeepCopy() *AccessConnectorObservation { + if in == nil { + return nil + } + out := new(AccessConnectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConnectorParameters) DeepCopyInto(out *AccessConnectorParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConnectorParameters. +func (in *AccessConnectorParameters) DeepCopy() *AccessConnectorParameters { + if in == nil { + return nil + } + out := new(AccessConnectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConnectorSpec) DeepCopyInto(out *AccessConnectorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConnectorSpec. +func (in *AccessConnectorSpec) DeepCopy() *AccessConnectorSpec { + if in == nil { + return nil + } + out := new(AccessConnectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConnectorStatus) DeepCopyInto(out *AccessConnectorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConnectorStatus. +func (in *AccessConnectorStatus) DeepCopy() *AccessConnectorStatus { + if in == nil { + return nil + } + out := new(AccessConnectorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomParametersInitParameters) DeepCopyInto(out *CustomParametersInitParameters) { + *out = *in + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.NATGatewayName != nil { + in, out := &in.NATGatewayName, &out.NATGatewayName + *out = new(string) + **out = **in + } + if in.NoPublicIP != nil { + in, out := &in.NoPublicIP, &out.NoPublicIP + *out = new(bool) + **out = **in + } + if in.PrivateSubnetName != nil { + in, out := &in.PrivateSubnetName, &out.PrivateSubnetName + *out = new(string) + **out = **in + } + if in.PrivateSubnetNameRef != nil { + in, out := &in.PrivateSubnetNameRef, &out.PrivateSubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateSubnetNameSelector != nil { + in, out := &in.PrivateSubnetNameSelector, &out.PrivateSubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PrivateSubnetNetworkSecurityGroupAssociationID != nil { + in, out := &in.PrivateSubnetNetworkSecurityGroupAssociationID, &out.PrivateSubnetNetworkSecurityGroupAssociationID + *out = new(string) + **out = **in + } + if in.PublicIPName != nil { + in, out := &in.PublicIPName, &out.PublicIPName + *out = new(string) + **out = **in + } + if in.PublicSubnetName != nil { + in, out := &in.PublicSubnetName, &out.PublicSubnetName + *out = new(string) + **out = **in + } + if in.PublicSubnetNameRef != nil { + in, out := &in.PublicSubnetNameRef, &out.PublicSubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicSubnetNameSelector != nil { + in, out := &in.PublicSubnetNameSelector, &out.PublicSubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicSubnetNetworkSecurityGroupAssociationID != nil { + in, out := &in.PublicSubnetNetworkSecurityGroupAssociationID, &out.PublicSubnetNetworkSecurityGroupAssociationID + *out = new(string) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountSkuName != nil { + in, out := &in.StorageAccountSkuName, &out.StorageAccountSkuName + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } + if in.VnetAddressPrefix != nil { + in, out := &in.VnetAddressPrefix, &out.VnetAddressPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomParametersInitParameters. +func (in *CustomParametersInitParameters) DeepCopy() *CustomParametersInitParameters { + if in == nil { + return nil + } + out := new(CustomParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomParametersObservation) DeepCopyInto(out *CustomParametersObservation) { + *out = *in + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.NATGatewayName != nil { + in, out := &in.NATGatewayName, &out.NATGatewayName + *out = new(string) + **out = **in + } + if in.NoPublicIP != nil { + in, out := &in.NoPublicIP, &out.NoPublicIP + *out = new(bool) + **out = **in + } + if in.PrivateSubnetName != nil { + in, out := &in.PrivateSubnetName, &out.PrivateSubnetName + *out = new(string) + **out = **in + } + if in.PrivateSubnetNetworkSecurityGroupAssociationID != nil { + in, out := &in.PrivateSubnetNetworkSecurityGroupAssociationID, &out.PrivateSubnetNetworkSecurityGroupAssociationID + *out = new(string) + **out = **in + } + if in.PublicIPName != nil { + in, out := &in.PublicIPName, &out.PublicIPName + *out = new(string) + **out = **in + } + if in.PublicSubnetName != nil { + in, out := &in.PublicSubnetName, &out.PublicSubnetName + *out = new(string) + **out = **in + } + if in.PublicSubnetNetworkSecurityGroupAssociationID != nil { + in, out := &in.PublicSubnetNetworkSecurityGroupAssociationID, &out.PublicSubnetNetworkSecurityGroupAssociationID + *out = new(string) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountSkuName != nil { + in, out := &in.StorageAccountSkuName, &out.StorageAccountSkuName + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } + if in.VnetAddressPrefix != nil { + in, out := &in.VnetAddressPrefix, &out.VnetAddressPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomParametersObservation. +func (in *CustomParametersObservation) DeepCopy() *CustomParametersObservation { + if in == nil { + return nil + } + out := new(CustomParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomParametersParameters) DeepCopyInto(out *CustomParametersParameters) { + *out = *in + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.NATGatewayName != nil { + in, out := &in.NATGatewayName, &out.NATGatewayName + *out = new(string) + **out = **in + } + if in.NoPublicIP != nil { + in, out := &in.NoPublicIP, &out.NoPublicIP + *out = new(bool) + **out = **in + } + if in.PrivateSubnetName != nil { + in, out := &in.PrivateSubnetName, &out.PrivateSubnetName + *out = new(string) + **out = **in + } + if in.PrivateSubnetNameRef != nil { + in, out := &in.PrivateSubnetNameRef, &out.PrivateSubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateSubnetNameSelector != nil { + in, out := &in.PrivateSubnetNameSelector, &out.PrivateSubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PrivateSubnetNetworkSecurityGroupAssociationID != nil { + in, out := &in.PrivateSubnetNetworkSecurityGroupAssociationID, &out.PrivateSubnetNetworkSecurityGroupAssociationID + *out = new(string) + **out = **in + } + if in.PublicIPName != nil { + in, out := &in.PublicIPName, &out.PublicIPName + *out = new(string) + **out = **in + } + if in.PublicSubnetName != nil { + in, out := &in.PublicSubnetName, &out.PublicSubnetName + *out = new(string) + **out = **in + } + if in.PublicSubnetNameRef != nil { + in, out := &in.PublicSubnetNameRef, &out.PublicSubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicSubnetNameSelector != nil { + in, out := &in.PublicSubnetNameSelector, &out.PublicSubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicSubnetNetworkSecurityGroupAssociationID != nil { + in, out := &in.PublicSubnetNetworkSecurityGroupAssociationID, &out.PublicSubnetNetworkSecurityGroupAssociationID + *out = new(string) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountSkuName != nil { + in, out := &in.StorageAccountSkuName, &out.StorageAccountSkuName + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } + if in.VnetAddressPrefix != nil { + in, out := &in.VnetAddressPrefix, &out.VnetAddressPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomParametersParameters. +func (in *CustomParametersParameters) DeepCopy() *CustomParametersParameters { + if in == nil { + return nil + } + out := new(CustomParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskIdentityInitParameters) DeepCopyInto(out *ManagedDiskIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskIdentityInitParameters. +func (in *ManagedDiskIdentityInitParameters) DeepCopy() *ManagedDiskIdentityInitParameters { + if in == nil { + return nil + } + out := new(ManagedDiskIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskIdentityObservation) DeepCopyInto(out *ManagedDiskIdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskIdentityObservation. +func (in *ManagedDiskIdentityObservation) DeepCopy() *ManagedDiskIdentityObservation { + if in == nil { + return nil + } + out := new(ManagedDiskIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskIdentityParameters) DeepCopyInto(out *ManagedDiskIdentityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskIdentityParameters. +func (in *ManagedDiskIdentityParameters) DeepCopy() *ManagedDiskIdentityParameters { + if in == nil { + return nil + } + out := new(ManagedDiskIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountIdentityInitParameters) DeepCopyInto(out *StorageAccountIdentityInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountIdentityInitParameters. +func (in *StorageAccountIdentityInitParameters) DeepCopy() *StorageAccountIdentityInitParameters { + if in == nil { + return nil + } + out := new(StorageAccountIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountIdentityObservation) DeepCopyInto(out *StorageAccountIdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountIdentityObservation. +func (in *StorageAccountIdentityObservation) DeepCopy() *StorageAccountIdentityObservation { + if in == nil { + return nil + } + out := new(StorageAccountIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountIdentityParameters) DeepCopyInto(out *StorageAccountIdentityParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountIdentityParameters. +func (in *StorageAccountIdentityParameters) DeepCopy() *StorageAccountIdentityParameters { + if in == nil { + return nil + } + out := new(StorageAccountIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceInitParameters) DeepCopyInto(out *WorkspaceInitParameters) { + *out = *in + if in.CustomParameters != nil { + in, out := &in.CustomParameters, &out.CustomParameters + *out = new(CustomParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomerManagedKeyEnabled != nil { + in, out := &in.CustomerManagedKeyEnabled, &out.CustomerManagedKeyEnabled + *out = new(bool) + **out = **in + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.LoadBalancerBackendAddressPoolID != nil { + in, out := &in.LoadBalancerBackendAddressPoolID, &out.LoadBalancerBackendAddressPoolID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedDiskCmkKeyVaultKeyID != nil { + in, out := &in.ManagedDiskCmkKeyVaultKeyID, &out.ManagedDiskCmkKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.ManagedDiskCmkRotationToLatestVersionEnabled != nil { + in, out := &in.ManagedDiskCmkRotationToLatestVersionEnabled, &out.ManagedDiskCmkRotationToLatestVersionEnabled + *out = new(bool) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupNameRef != nil { + in, out := &in.ManagedResourceGroupNameRef, &out.ManagedResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedResourceGroupNameSelector != nil { + in, out := &in.ManagedResourceGroupNameSelector, &out.ManagedResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ManagedServicesCmkKeyVaultKeyID != nil { + in, out := &in.ManagedServicesCmkKeyVaultKeyID, &out.ManagedServicesCmkKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupRulesRequired != nil { + in, out := &in.NetworkSecurityGroupRulesRequired, &out.NetworkSecurityGroupRulesRequired + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceInitParameters. +func (in *WorkspaceInitParameters) DeepCopy() *WorkspaceInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. +func (in *WorkspaceList) DeepCopy() *WorkspaceList { + if in == nil { + return nil + } + out := new(WorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceObservation) DeepCopyInto(out *WorkspaceObservation) { + *out = *in + if in.CustomParameters != nil { + in, out := &in.CustomParameters, &out.CustomParameters + *out = new(CustomParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomerManagedKeyEnabled != nil { + in, out := &in.CustomerManagedKeyEnabled, &out.CustomerManagedKeyEnabled + *out = new(bool) + **out = **in + } + if in.DiskEncryptionSetID != nil { + in, out := &in.DiskEncryptionSetID, &out.DiskEncryptionSetID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.LoadBalancerBackendAddressPoolID != nil { + in, out := &in.LoadBalancerBackendAddressPoolID, &out.LoadBalancerBackendAddressPoolID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedDiskCmkKeyVaultKeyID != nil { + in, out := &in.ManagedDiskCmkKeyVaultKeyID, &out.ManagedDiskCmkKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.ManagedDiskCmkRotationToLatestVersionEnabled != nil { + in, out := &in.ManagedDiskCmkRotationToLatestVersionEnabled, &out.ManagedDiskCmkRotationToLatestVersionEnabled + *out = new(bool) + **out = **in + } + if in.ManagedDiskIdentity != nil { + in, out := &in.ManagedDiskIdentity, &out.ManagedDiskIdentity + *out = make([]ManagedDiskIdentityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagedResourceGroupID != nil { + in, out := &in.ManagedResourceGroupID, &out.ManagedResourceGroupID + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedServicesCmkKeyVaultKeyID != nil { + in, out := &in.ManagedServicesCmkKeyVaultKeyID, &out.ManagedServicesCmkKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupRulesRequired != nil { + in, out := &in.NetworkSecurityGroupRulesRequired, &out.NetworkSecurityGroupRulesRequired + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.StorageAccountIdentity != nil { + in, out := &in.StorageAccountIdentity, &out.StorageAccountIdentity + *out = make([]StorageAccountIdentityObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } + if in.WorkspaceURL != nil { + in, out := &in.WorkspaceURL, &out.WorkspaceURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceObservation. +func (in *WorkspaceObservation) DeepCopy() *WorkspaceObservation { + if in == nil { + return nil + } + out := new(WorkspaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceParameters) DeepCopyInto(out *WorkspaceParameters) { + *out = *in + if in.CustomParameters != nil { + in, out := &in.CustomParameters, &out.CustomParameters + *out = new(CustomParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomerManagedKeyEnabled != nil { + in, out := &in.CustomerManagedKeyEnabled, &out.CustomerManagedKeyEnabled + *out = new(bool) + **out = **in + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.LoadBalancerBackendAddressPoolID != nil { + in, out := &in.LoadBalancerBackendAddressPoolID, &out.LoadBalancerBackendAddressPoolID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedDiskCmkKeyVaultKeyID != nil { + in, out := &in.ManagedDiskCmkKeyVaultKeyID, &out.ManagedDiskCmkKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.ManagedDiskCmkRotationToLatestVersionEnabled != nil { + in, out := &in.ManagedDiskCmkRotationToLatestVersionEnabled, &out.ManagedDiskCmkRotationToLatestVersionEnabled + *out = new(bool) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupNameRef != nil { + in, out := &in.ManagedResourceGroupNameRef, &out.ManagedResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedResourceGroupNameSelector != nil { + in, out := &in.ManagedResourceGroupNameSelector, &out.ManagedResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ManagedServicesCmkKeyVaultKeyID != nil { + in, out := &in.ManagedServicesCmkKeyVaultKeyID, &out.ManagedServicesCmkKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupRulesRequired != nil { + in, out := &in.NetworkSecurityGroupRulesRequired, &out.NetworkSecurityGroupRulesRequired + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceParameters. +func (in *WorkspaceParameters) DeepCopy() *WorkspaceParameters { + if in == nil { + return nil + } + out := new(WorkspaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. +func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { + if in == nil { + return nil + } + out := new(WorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. +func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus { + if in == nil { + return nil + } + out := new(WorkspaceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/databricks/v1beta2/zz_generated.managed.go b/apis/databricks/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..f2bde1469 --- /dev/null +++ b/apis/databricks/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AccessConnector. +func (mg *AccessConnector) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AccessConnector. +func (mg *AccessConnector) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AccessConnector. +func (mg *AccessConnector) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AccessConnector. +func (mg *AccessConnector) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AccessConnector. +func (mg *AccessConnector) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AccessConnector. +func (mg *AccessConnector) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AccessConnector. +func (mg *AccessConnector) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AccessConnector. +func (mg *AccessConnector) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AccessConnector. +func (mg *AccessConnector) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AccessConnector. +func (mg *AccessConnector) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AccessConnector. +func (mg *AccessConnector) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AccessConnector. +func (mg *AccessConnector) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Workspace. +func (mg *Workspace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workspace. +func (mg *Workspace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workspace. +func (mg *Workspace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workspace. +func (mg *Workspace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workspace. +func (mg *Workspace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workspace. +func (mg *Workspace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workspace. +func (mg *Workspace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workspace. +func (mg *Workspace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/databricks/v1beta2/zz_generated.managedlist.go b/apis/databricks/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..010d0e226 --- /dev/null +++ b/apis/databricks/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccessConnectorList. +func (l *AccessConnectorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkspaceList. +func (l *WorkspaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/databricks/v1beta2/zz_generated.resolvers.go b/apis/databricks/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..9f8edd036 --- /dev/null +++ b/apis/databricks/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,201 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *AccessConnector) ResolveReferences( // ResolveReferences of this AccessConnector. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Workspace. +func (mg *Workspace) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.CustomParameters != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomParameters.PrivateSubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CustomParameters.PrivateSubnetNameRef, + Selector: mg.Spec.ForProvider.CustomParameters.PrivateSubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomParameters.PrivateSubnetName") + } + mg.Spec.ForProvider.CustomParameters.PrivateSubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomParameters.PrivateSubnetNameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.CustomParameters != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomParameters.PublicSubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CustomParameters.PublicSubnetNameRef, + Selector: mg.Spec.ForProvider.CustomParameters.PublicSubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomParameters.PublicSubnetName") + } + mg.Spec.ForProvider.CustomParameters.PublicSubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomParameters.PublicSubnetNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ManagedResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ManagedResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ManagedResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ManagedResourceGroupName") + } + mg.Spec.ForProvider.ManagedResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ManagedResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.CustomParameters != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CustomParameters.PrivateSubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CustomParameters.PrivateSubnetNameRef, + Selector: mg.Spec.InitProvider.CustomParameters.PrivateSubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomParameters.PrivateSubnetName") + } + mg.Spec.InitProvider.CustomParameters.PrivateSubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CustomParameters.PrivateSubnetNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.CustomParameters != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CustomParameters.PublicSubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CustomParameters.PublicSubnetNameRef, + Selector: mg.Spec.InitProvider.CustomParameters.PublicSubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomParameters.PublicSubnetName") + } + mg.Spec.InitProvider.CustomParameters.PublicSubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CustomParameters.PublicSubnetNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ManagedResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ManagedResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ManagedResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ManagedResourceGroupName") + } + mg.Spec.InitProvider.ManagedResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ManagedResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/databricks/v1beta2/zz_groupversion_info.go b/apis/databricks/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..50c07f0f8 --- /dev/null +++ b/apis/databricks/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=databricks.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "databricks.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/databricks/v1beta2/zz_workspace_terraformed.go b/apis/databricks/v1beta2/zz_workspace_terraformed.go new file mode 100755 index 000000000..a29940335 --- /dev/null +++ b/apis/databricks/v1beta2/zz_workspace_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workspace +func (mg *Workspace) GetTerraformResourceType() string { + return "azurerm_databricks_workspace" +} + +// GetConnectionDetailsMapping for this Workspace +func (tr *Workspace) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Workspace +func (tr *Workspace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workspace +func (tr *Workspace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workspace +func (tr *Workspace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workspace +func (tr *Workspace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workspace +func (tr *Workspace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workspace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workspace) LateInitialize(attrs []byte) (bool, error) { + params := &WorkspaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workspace) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/databricks/v1beta2/zz_workspace_types.go b/apis/databricks/v1beta2/zz_workspace_types.go new file mode 100755 index 000000000..34b2e5b45 --- /dev/null +++ b/apis/databricks/v1beta2/zz_workspace_types.go @@ -0,0 +1,474 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomParametersInitParameters struct { + + // The ID of a Azure Machine Learning workspace to link with Databricks workspace. Changing this forces a new resource to be created. + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // Name of the NAT gateway for Secure Cluster Connectivity (No Public IP) workspace subnets. Defaults to nat-gateway. Changing this forces a new resource to be created. + NATGatewayName *string `json:"natGatewayName,omitempty" tf:"nat_gateway_name,omitempty"` + + // Are public IP Addresses not allowed? Possible values are true or false. Defaults to false. + NoPublicIP *bool `json:"noPublicIp,omitempty" tf:"no_public_ip,omitempty"` + + // The name of the Private Subnet within the Virtual Network. Required if virtual_network_id is set. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + PrivateSubnetName *string `json:"privateSubnetName,omitempty" tf:"private_subnet_name,omitempty"` + + // Reference to a Subnet in network to populate privateSubnetName. + // +kubebuilder:validation:Optional + PrivateSubnetNameRef *v1.Reference `json:"privateSubnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate privateSubnetName. + // +kubebuilder:validation:Optional + PrivateSubnetNameSelector *v1.Selector `json:"privateSubnetNameSelector,omitempty" tf:"-"` + + // The resource ID of the azurerm_subnet_network_security_group_association resource which is referred to by the private_subnet_name field. This is the same as the ID of the subnet referred to by the private_subnet_name field. Required if virtual_network_id is set. + PrivateSubnetNetworkSecurityGroupAssociationID *string `json:"privateSubnetNetworkSecurityGroupAssociationId,omitempty" tf:"private_subnet_network_security_group_association_id,omitempty"` + + // Name of the Public IP for No Public IP workspace with managed vNet. Defaults to nat-gw-public-ip. Changing this forces a new resource to be created. + PublicIPName *string `json:"publicIpName,omitempty" tf:"public_ip_name,omitempty"` + + // The name of the Public Subnet within the Virtual Network. Required if virtual_network_id is set. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + PublicSubnetName *string `json:"publicSubnetName,omitempty" tf:"public_subnet_name,omitempty"` + + // Reference to a Subnet in network to populate publicSubnetName. + // +kubebuilder:validation:Optional + PublicSubnetNameRef *v1.Reference `json:"publicSubnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate publicSubnetName. + // +kubebuilder:validation:Optional + PublicSubnetNameSelector *v1.Selector `json:"publicSubnetNameSelector,omitempty" tf:"-"` + + // The resource ID of the azurerm_subnet_network_security_group_association resource which is referred to by the public_subnet_name field. This is the same as the ID of the subnet referred to by the public_subnet_name field. Required if virtual_network_id is set. + PublicSubnetNetworkSecurityGroupAssociationID *string `json:"publicSubnetNetworkSecurityGroupAssociationId,omitempty" tf:"public_subnet_network_security_group_association_id,omitempty"` + + // Default Databricks File Storage account name. Defaults to a randomized name(e.g. dbstoragel6mfeghoe5kxu). Changing this forces a new resource to be created. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Storage account SKU name. Possible values include Standard_LRS, Standard_GRS, Standard_RAGRS, Standard_GZRS, Standard_RAGZRS, Standard_ZRS, Premium_LRS or Premium_ZRS. Defaults to Standard_GRS. Changing this forces a new resource to be created. + StorageAccountSkuName *string `json:"storageAccountSkuName,omitempty" tf:"storage_account_sku_name,omitempty"` + + // The ID of a Virtual Network where this Databricks Cluster should be created. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` + + // Address prefix for Managed virtual network. Defaults to 10.139. Changing this forces a new resource to be created. + VnetAddressPrefix *string `json:"vnetAddressPrefix,omitempty" tf:"vnet_address_prefix,omitempty"` +} + +type CustomParametersObservation struct { + + // The ID of a Azure Machine Learning workspace to link with Databricks workspace. Changing this forces a new resource to be created. + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // Name of the NAT gateway for Secure Cluster Connectivity (No Public IP) workspace subnets. Defaults to nat-gateway. Changing this forces a new resource to be created. + NATGatewayName *string `json:"natGatewayName,omitempty" tf:"nat_gateway_name,omitempty"` + + // Are public IP Addresses not allowed? Possible values are true or false. Defaults to false. + NoPublicIP *bool `json:"noPublicIp,omitempty" tf:"no_public_ip,omitempty"` + + // The name of the Private Subnet within the Virtual Network. Required if virtual_network_id is set. Changing this forces a new resource to be created. + PrivateSubnetName *string `json:"privateSubnetName,omitempty" tf:"private_subnet_name,omitempty"` + + // The resource ID of the azurerm_subnet_network_security_group_association resource which is referred to by the private_subnet_name field. This is the same as the ID of the subnet referred to by the private_subnet_name field. Required if virtual_network_id is set. + PrivateSubnetNetworkSecurityGroupAssociationID *string `json:"privateSubnetNetworkSecurityGroupAssociationId,omitempty" tf:"private_subnet_network_security_group_association_id,omitempty"` + + // Name of the Public IP for No Public IP workspace with managed vNet. Defaults to nat-gw-public-ip. Changing this forces a new resource to be created. + PublicIPName *string `json:"publicIpName,omitempty" tf:"public_ip_name,omitempty"` + + // The name of the Public Subnet within the Virtual Network. Required if virtual_network_id is set. Changing this forces a new resource to be created. + PublicSubnetName *string `json:"publicSubnetName,omitempty" tf:"public_subnet_name,omitempty"` + + // The resource ID of the azurerm_subnet_network_security_group_association resource which is referred to by the public_subnet_name field. This is the same as the ID of the subnet referred to by the public_subnet_name field. Required if virtual_network_id is set. + PublicSubnetNetworkSecurityGroupAssociationID *string `json:"publicSubnetNetworkSecurityGroupAssociationId,omitempty" tf:"public_subnet_network_security_group_association_id,omitempty"` + + // Default Databricks File Storage account name. Defaults to a randomized name(e.g. dbstoragel6mfeghoe5kxu). Changing this forces a new resource to be created. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Storage account SKU name. Possible values include Standard_LRS, Standard_GRS, Standard_RAGRS, Standard_GZRS, Standard_RAGZRS, Standard_ZRS, Premium_LRS or Premium_ZRS. Defaults to Standard_GRS. Changing this forces a new resource to be created. + StorageAccountSkuName *string `json:"storageAccountSkuName,omitempty" tf:"storage_account_sku_name,omitempty"` + + // The ID of a Virtual Network where this Databricks Cluster should be created. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` + + // Address prefix for Managed virtual network. Defaults to 10.139. Changing this forces a new resource to be created. + VnetAddressPrefix *string `json:"vnetAddressPrefix,omitempty" tf:"vnet_address_prefix,omitempty"` +} + +type CustomParametersParameters struct { + + // The ID of a Azure Machine Learning workspace to link with Databricks workspace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // Name of the NAT gateway for Secure Cluster Connectivity (No Public IP) workspace subnets. Defaults to nat-gateway. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NATGatewayName *string `json:"natGatewayName,omitempty" tf:"nat_gateway_name,omitempty"` + + // Are public IP Addresses not allowed? Possible values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + NoPublicIP *bool `json:"noPublicIp,omitempty" tf:"no_public_ip,omitempty"` + + // The name of the Private Subnet within the Virtual Network. Required if virtual_network_id is set. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +kubebuilder:validation:Optional + PrivateSubnetName *string `json:"privateSubnetName,omitempty" tf:"private_subnet_name,omitempty"` + + // Reference to a Subnet in network to populate privateSubnetName. + // +kubebuilder:validation:Optional + PrivateSubnetNameRef *v1.Reference `json:"privateSubnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate privateSubnetName. + // +kubebuilder:validation:Optional + PrivateSubnetNameSelector *v1.Selector `json:"privateSubnetNameSelector,omitempty" tf:"-"` + + // The resource ID of the azurerm_subnet_network_security_group_association resource which is referred to by the private_subnet_name field. This is the same as the ID of the subnet referred to by the private_subnet_name field. Required if virtual_network_id is set. + // +kubebuilder:validation:Optional + PrivateSubnetNetworkSecurityGroupAssociationID *string `json:"privateSubnetNetworkSecurityGroupAssociationId,omitempty" tf:"private_subnet_network_security_group_association_id,omitempty"` + + // Name of the Public IP for No Public IP workspace with managed vNet. Defaults to nat-gw-public-ip. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublicIPName *string `json:"publicIpName,omitempty" tf:"public_ip_name,omitempty"` + + // The name of the Public Subnet within the Virtual Network. Required if virtual_network_id is set. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +kubebuilder:validation:Optional + PublicSubnetName *string `json:"publicSubnetName,omitempty" tf:"public_subnet_name,omitempty"` + + // Reference to a Subnet in network to populate publicSubnetName. + // +kubebuilder:validation:Optional + PublicSubnetNameRef *v1.Reference `json:"publicSubnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate publicSubnetName. + // +kubebuilder:validation:Optional + PublicSubnetNameSelector *v1.Selector `json:"publicSubnetNameSelector,omitempty" tf:"-"` + + // The resource ID of the azurerm_subnet_network_security_group_association resource which is referred to by the public_subnet_name field. This is the same as the ID of the subnet referred to by the public_subnet_name field. Required if virtual_network_id is set. + // +kubebuilder:validation:Optional + PublicSubnetNetworkSecurityGroupAssociationID *string `json:"publicSubnetNetworkSecurityGroupAssociationId,omitempty" tf:"public_subnet_network_security_group_association_id,omitempty"` + + // Default Databricks File Storage account name. Defaults to a randomized name(e.g. dbstoragel6mfeghoe5kxu). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Storage account SKU name. Possible values include Standard_LRS, Standard_GRS, Standard_RAGRS, Standard_GZRS, Standard_RAGZRS, Standard_ZRS, Premium_LRS or Premium_ZRS. Defaults to Standard_GRS. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccountSkuName *string `json:"storageAccountSkuName,omitempty" tf:"storage_account_sku_name,omitempty"` + + // The ID of a Virtual Network where this Databricks Cluster should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` + + // Address prefix for Managed virtual network. Defaults to 10.139. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VnetAddressPrefix *string `json:"vnetAddressPrefix,omitempty" tf:"vnet_address_prefix,omitempty"` +} + +type ManagedDiskIdentityInitParameters struct { +} + +type ManagedDiskIdentityObservation struct { + + // The principal UUID for the internal databricks disks identity needed to provide access to the workspace for enabling Customer Managed Keys. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The UUID of the tenant where the internal databricks disks identity was created. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The type of the internal databricks disks identity. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ManagedDiskIdentityParameters struct { +} + +type StorageAccountIdentityInitParameters struct { +} + +type StorageAccountIdentityObservation struct { + + // The principal UUID for the internal databricks storage account needed to provide access to the workspace for enabling Customer Managed Keys. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The UUID of the tenant where the internal databricks storage account was created. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The type of the internal databricks storage account. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StorageAccountIdentityParameters struct { +} + +type WorkspaceInitParameters struct { + + // A custom_parameters block as documented below. + CustomParameters *CustomParametersInitParameters `json:"customParameters,omitempty" tf:"custom_parameters,omitempty"` + + // Is the workspace enabled for customer managed key encryption? If true this enables the Managed Identity for the managed storage account. Possible values are true or false. Defaults to false. This field is only valid if the Databricks Workspace sku is set to premium. + CustomerManagedKeyEnabled *bool `json:"customerManagedKeyEnabled,omitempty" tf:"customer_managed_key_enabled,omitempty"` + + // Is the Databricks File System root file system enabled with a secondary layer of encryption with platform managed keys? Possible values are true or false. Defaults to false. This field is only valid if the Databricks Workspace sku is set to premium. Changing this forces a new resource to be created. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Resource ID of the Outbound Load balancer Backend Address Pool for Secure Cluster Connectivity (No Public IP) workspace. Changing this forces a new resource to be created. + LoadBalancerBackendAddressPoolID *string `json:"loadBalancerBackendAddressPoolId,omitempty" tf:"load_balancer_backend_address_pool_id,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Customer managed encryption properties for the Databricks Workspace managed disks. + ManagedDiskCmkKeyVaultKeyID *string `json:"managedDiskCmkKeyVaultKeyId,omitempty" tf:"managed_disk_cmk_key_vault_key_id,omitempty"` + + // Whether customer managed keys for disk encryption will automatically be rotated to the latest version. + ManagedDiskCmkRotationToLatestVersionEnabled *bool `json:"managedDiskCmkRotationToLatestVersionEnabled,omitempty" tf:"managed_disk_cmk_rotation_to_latest_version_enabled,omitempty"` + + // The name of the resource group where Azure should place the managed Databricks resources. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameRef *v1.Reference `json:"managedResourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameSelector *v1.Selector `json:"managedResourceGroupNameSelector,omitempty" tf:"-"` + + // Customer managed encryption properties for the Databricks Workspace managed resources(e.g. Notebooks and Artifacts). + ManagedServicesCmkKeyVaultKeyID *string `json:"managedServicesCmkKeyVaultKeyId,omitempty" tf:"managed_services_cmk_key_vault_key_id,omitempty"` + + // Does the data plane (clusters) to control plane communication happen over private link endpoint only or publicly? Possible values AllRules, NoAzureDatabricksRules or NoAzureServiceRules. Required when public_network_access_enabled is set to false. + NetworkSecurityGroupRulesRequired *string `json:"networkSecurityGroupRulesRequired,omitempty" tf:"network_security_group_rules_required,omitempty"` + + // Allow public access for accessing workspace. Set value to false to access workspace only via private link endpoint. Possible values include true or false. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The sku to use for the Databricks Workspace. Possible values are standard, premium, or trial. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WorkspaceObservation struct { + + // A custom_parameters block as documented below. + CustomParameters *CustomParametersObservation `json:"customParameters,omitempty" tf:"custom_parameters,omitempty"` + + // Is the workspace enabled for customer managed key encryption? If true this enables the Managed Identity for the managed storage account. Possible values are true or false. Defaults to false. This field is only valid if the Databricks Workspace sku is set to premium. + CustomerManagedKeyEnabled *bool `json:"customerManagedKeyEnabled,omitempty" tf:"customer_managed_key_enabled,omitempty"` + + // The ID of Managed Disk Encryption Set created by the Databricks Workspace. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty" tf:"disk_encryption_set_id,omitempty"` + + // The ID of the Databricks Workspace in the Azure management plane. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Is the Databricks File System root file system enabled with a secondary layer of encryption with platform managed keys? Possible values are true or false. Defaults to false. This field is only valid if the Databricks Workspace sku is set to premium. Changing this forces a new resource to be created. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Resource ID of the Outbound Load balancer Backend Address Pool for Secure Cluster Connectivity (No Public IP) workspace. Changing this forces a new resource to be created. + LoadBalancerBackendAddressPoolID *string `json:"loadBalancerBackendAddressPoolId,omitempty" tf:"load_balancer_backend_address_pool_id,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Customer managed encryption properties for the Databricks Workspace managed disks. + ManagedDiskCmkKeyVaultKeyID *string `json:"managedDiskCmkKeyVaultKeyId,omitempty" tf:"managed_disk_cmk_key_vault_key_id,omitempty"` + + // Whether customer managed keys for disk encryption will automatically be rotated to the latest version. + ManagedDiskCmkRotationToLatestVersionEnabled *bool `json:"managedDiskCmkRotationToLatestVersionEnabled,omitempty" tf:"managed_disk_cmk_rotation_to_latest_version_enabled,omitempty"` + + // A managed_disk_identity block as documented below. + ManagedDiskIdentity []ManagedDiskIdentityObservation `json:"managedDiskIdentity,omitempty" tf:"managed_disk_identity,omitempty"` + + // The ID of the Managed Resource Group created by the Databricks Workspace. + ManagedResourceGroupID *string `json:"managedResourceGroupId,omitempty" tf:"managed_resource_group_id,omitempty"` + + // The name of the resource group where Azure should place the managed Databricks resources. Changing this forces a new resource to be created. + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // Customer managed encryption properties for the Databricks Workspace managed resources(e.g. Notebooks and Artifacts). + ManagedServicesCmkKeyVaultKeyID *string `json:"managedServicesCmkKeyVaultKeyId,omitempty" tf:"managed_services_cmk_key_vault_key_id,omitempty"` + + // Does the data plane (clusters) to control plane communication happen over private link endpoint only or publicly? Possible values AllRules, NoAzureDatabricksRules or NoAzureServiceRules. Required when public_network_access_enabled is set to false. + NetworkSecurityGroupRulesRequired *string `json:"networkSecurityGroupRulesRequired,omitempty" tf:"network_security_group_rules_required,omitempty"` + + // Allow public access for accessing workspace. Set value to false to access workspace only via private link endpoint. Possible values include true or false. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group in which the Databricks Workspace should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The sku to use for the Databricks Workspace. Possible values are standard, premium, or trial. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A storage_account_identity block as documented below. + StorageAccountIdentity []StorageAccountIdentityObservation `json:"storageAccountIdentity,omitempty" tf:"storage_account_identity,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The unique identifier of the databricks workspace in Databricks control plane. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` + + // The workspace URL which is of the format 'adb-{workspaceId}.{random}.azuredatabricks.net' + WorkspaceURL *string `json:"workspaceUrl,omitempty" tf:"workspace_url,omitempty"` +} + +type WorkspaceParameters struct { + + // A custom_parameters block as documented below. + // +kubebuilder:validation:Optional + CustomParameters *CustomParametersParameters `json:"customParameters,omitempty" tf:"custom_parameters,omitempty"` + + // Is the workspace enabled for customer managed key encryption? If true this enables the Managed Identity for the managed storage account. Possible values are true or false. Defaults to false. This field is only valid if the Databricks Workspace sku is set to premium. + // +kubebuilder:validation:Optional + CustomerManagedKeyEnabled *bool `json:"customerManagedKeyEnabled,omitempty" tf:"customer_managed_key_enabled,omitempty"` + + // Is the Databricks File System root file system enabled with a secondary layer of encryption with platform managed keys? Possible values are true or false. Defaults to false. This field is only valid if the Databricks Workspace sku is set to premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Resource ID of the Outbound Load balancer Backend Address Pool for Secure Cluster Connectivity (No Public IP) workspace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LoadBalancerBackendAddressPoolID *string `json:"loadBalancerBackendAddressPoolId,omitempty" tf:"load_balancer_backend_address_pool_id,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Customer managed encryption properties for the Databricks Workspace managed disks. + // +kubebuilder:validation:Optional + ManagedDiskCmkKeyVaultKeyID *string `json:"managedDiskCmkKeyVaultKeyId,omitempty" tf:"managed_disk_cmk_key_vault_key_id,omitempty"` + + // Whether customer managed keys for disk encryption will automatically be rotated to the latest version. + // +kubebuilder:validation:Optional + ManagedDiskCmkRotationToLatestVersionEnabled *bool `json:"managedDiskCmkRotationToLatestVersionEnabled,omitempty" tf:"managed_disk_cmk_rotation_to_latest_version_enabled,omitempty"` + + // The name of the resource group where Azure should place the managed Databricks resources. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameRef *v1.Reference `json:"managedResourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameSelector *v1.Selector `json:"managedResourceGroupNameSelector,omitempty" tf:"-"` + + // Customer managed encryption properties for the Databricks Workspace managed resources(e.g. Notebooks and Artifacts). + // +kubebuilder:validation:Optional + ManagedServicesCmkKeyVaultKeyID *string `json:"managedServicesCmkKeyVaultKeyId,omitempty" tf:"managed_services_cmk_key_vault_key_id,omitempty"` + + // Does the data plane (clusters) to control plane communication happen over private link endpoint only or publicly? Possible values AllRules, NoAzureDatabricksRules or NoAzureServiceRules. Required when public_network_access_enabled is set to false. + // +kubebuilder:validation:Optional + NetworkSecurityGroupRulesRequired *string `json:"networkSecurityGroupRulesRequired,omitempty" tf:"network_security_group_rules_required,omitempty"` + + // Allow public access for accessing workspace. Set value to false to access workspace only via private link endpoint. Possible values include true or false. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group in which the Databricks Workspace should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The sku to use for the Databricks Workspace. Possible values are standard, premium, or trial. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WorkspaceSpec defines the desired state of Workspace +type WorkspaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkspaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkspaceInitParameters `json:"initProvider,omitempty"` +} + +// WorkspaceStatus defines the observed state of Workspace. +type WorkspaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkspaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workspace is the Schema for the Workspaces API. Manages a Databricks Workspace +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Workspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec WorkspaceSpec `json:"spec"` + Status WorkspaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkspaceList contains a list of Workspaces +type WorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workspace `json:"items"` +} + +// Repository type metadata. +var ( + Workspace_Kind = "Workspace" + Workspace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workspace_Kind}.String() + Workspace_KindAPIVersion = Workspace_Kind + "." + CRDGroupVersion.String() + Workspace_GroupVersionKind = CRDGroupVersion.WithKind(Workspace_Kind) +) + +func init() { + SchemeBuilder.Register(&Workspace{}, &WorkspaceList{}) +} diff --git a/apis/datafactory/v1beta1/zz_datasetazureblob_types.go b/apis/datafactory/v1beta1/zz_datasetazureblob_types.go index 654f2016a..afb4a619f 100755 --- a/apis/datafactory/v1beta1/zz_datasetazureblob_types.go +++ b/apis/datafactory/v1beta1/zz_datasetazureblob_types.go @@ -38,7 +38,7 @@ type DataSetAzureBlobInitParameters struct { Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` // The Data Factory Linked Service name in which to associate the Dataset with. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceAzureBlobStorage + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.LinkedServiceAzureBlobStorage LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` // Reference to a LinkedServiceAzureBlobStorage in datafactory to populate linkedServiceName. @@ -116,7 +116,7 @@ type DataSetAzureBlobParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -150,7 +150,7 @@ type DataSetAzureBlobParameters struct { Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` // The Data Factory Linked Service name in which to associate the Dataset with. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceAzureBlobStorage + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.LinkedServiceAzureBlobStorage // +kubebuilder:validation:Optional LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_datasetcosmosdbsqlapi_types.go b/apis/datafactory/v1beta1/zz_datasetcosmosdbsqlapi_types.go index 9231e2a8a..c3b233f22 100755 --- a/apis/datafactory/v1beta1/zz_datasetcosmosdbsqlapi_types.go +++ b/apis/datafactory/v1beta1/zz_datasetcosmosdbsqlapi_types.go @@ -102,7 +102,7 @@ type DataSetCosmosDBSQLAPIParameters struct { CollectionName *string `json:"collectionName,omitempty" tf:"collection_name,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_datasethttp_types.go b/apis/datafactory/v1beta1/zz_datasethttp_types.go index 17e8d0e58..b04f4bb64 100755 --- a/apis/datafactory/v1beta1/zz_datasethttp_types.go +++ b/apis/datafactory/v1beta1/zz_datasethttp_types.go @@ -110,7 +110,7 @@ type DataSetHTTPParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_datasetmysql_types.go b/apis/datafactory/v1beta1/zz_datasetmysql_types.go index fea13bf57..cb0bd9761 100755 --- a/apis/datafactory/v1beta1/zz_datasetmysql_types.go +++ b/apis/datafactory/v1beta1/zz_datasetmysql_types.go @@ -98,7 +98,7 @@ type DataSetMySQLParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_datasetpostgresql_types.go b/apis/datafactory/v1beta1/zz_datasetpostgresql_types.go index af871c7a7..f06a2fac7 100755 --- a/apis/datafactory/v1beta1/zz_datasetpostgresql_types.go +++ b/apis/datafactory/v1beta1/zz_datasetpostgresql_types.go @@ -98,7 +98,7 @@ type DataSetPostgreSQLParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_datasetsnowflake_types.go b/apis/datafactory/v1beta1/zz_datasetsnowflake_types.go index 50f16ca96..69a3ada50 100755 --- a/apis/datafactory/v1beta1/zz_datasetsnowflake_types.go +++ b/apis/datafactory/v1beta1/zz_datasetsnowflake_types.go @@ -29,7 +29,7 @@ type DataSetSnowflakeInitParameters struct { Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` // The Data Factory Linked Service name in which to associate the Dataset with. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceSnowflake + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.LinkedServiceSnowflake LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` // Reference to a LinkedServiceSnowflake in datafactory to populate linkedServiceName. @@ -104,7 +104,7 @@ type DataSetSnowflakeParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -126,7 +126,7 @@ type DataSetSnowflakeParameters struct { Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` // The Data Factory Linked Service name in which to associate the Dataset with. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceSnowflake + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.LinkedServiceSnowflake // +kubebuilder:validation:Optional LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_datasetsqlservertable_types.go b/apis/datafactory/v1beta1/zz_datasetsqlservertable_types.go index 02927c08a..578cb3d80 100755 --- a/apis/datafactory/v1beta1/zz_datasetsqlservertable_types.go +++ b/apis/datafactory/v1beta1/zz_datasetsqlservertable_types.go @@ -29,7 +29,7 @@ type DataSetSQLServerTableInitParameters struct { Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` // The Data Factory Linked Service name in which to associate the Dataset with. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.LinkedServiceSQLServer LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` // Reference to a LinkedServiceSQLServer in datafactory to populate linkedServiceName. @@ -98,7 +98,7 @@ type DataSetSQLServerTableParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -120,7 +120,7 @@ type DataSetSQLServerTableParameters struct { Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` // The Data Factory Linked Service name in which to associate the Dataset with. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.LinkedServiceSQLServer // +kubebuilder:validation:Optional LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_generated.conversion_hubs.go b/apis/datafactory/v1beta1/zz_generated.conversion_hubs.go index 789f94ad5..6eed71e48 100755 --- a/apis/datafactory/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/datafactory/v1beta1/zz_generated.conversion_hubs.go @@ -6,39 +6,18 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Factory) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *CustomDataSet) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DataFlow) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DataSetAzureBlob) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DataSetBinary) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DataSetCosmosDBSQLAPI) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DataSetDelimitedText) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DataSetHTTP) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DataSetJSON) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DataSetMySQL) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DataSetParquet) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DataSetPostgreSQL) Hub() {} @@ -51,36 +30,12 @@ func (tr *DataSetSQLServerTable) Hub() {} // Hub marks this type as a conversion hub. func (tr *IntegrationRuntimeAzure) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *IntegrationRuntimeAzureSSIS) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *IntegrationRuntimeManaged) Hub() {} - // Hub marks this type as a conversion hub. func (tr *IntegrationRuntimeSelfHosted) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *LinkedCustomService) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceAzureBlobStorage) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceAzureDatabricks) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceAzureFileStorage) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceAzureFunction) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LinkedServiceAzureSearch) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceAzureSQLDatabase) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LinkedServiceAzureTableStorage) Hub() {} @@ -102,27 +57,12 @@ func (tr *LinkedServiceKusto) Hub() {} // Hub marks this type as a conversion hub. func (tr *LinkedServiceMySQL) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceOData) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceOdbc) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LinkedServicePostgreSQL) Hub() {} // Hub marks this type as a conversion hub. func (tr *LinkedServiceSFTP) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceSnowflake) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceSQLServer) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinkedServiceSynapse) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LinkedServiceWeb) Hub() {} @@ -137,6 +77,3 @@ func (tr *TriggerBlobEvent) Hub() {} // Hub marks this type as a conversion hub. func (tr *TriggerCustomEvent) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *TriggerSchedule) Hub() {} diff --git a/apis/datafactory/v1beta1/zz_generated.conversion_spokes.go b/apis/datafactory/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..04f9f5303 --- /dev/null +++ b/apis/datafactory/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,434 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CustomDataSet to the hub type. +func (tr *CustomDataSet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CustomDataSet type. +func (tr *CustomDataSet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DataFlow to the hub type. +func (tr *DataFlow) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataFlow type. +func (tr *DataFlow) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DataSetBinary to the hub type. +func (tr *DataSetBinary) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataSetBinary type. +func (tr *DataSetBinary) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DataSetDelimitedText to the hub type. +func (tr *DataSetDelimitedText) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataSetDelimitedText type. +func (tr *DataSetDelimitedText) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DataSetJSON to the hub type. +func (tr *DataSetJSON) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataSetJSON type. +func (tr *DataSetJSON) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DataSetParquet to the hub type. +func (tr *DataSetParquet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataSetParquet type. +func (tr *DataSetParquet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Factory to the hub type. +func (tr *Factory) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Factory type. +func (tr *Factory) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this IntegrationRuntimeAzureSSIS to the hub type. +func (tr *IntegrationRuntimeAzureSSIS) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IntegrationRuntimeAzureSSIS type. +func (tr *IntegrationRuntimeAzureSSIS) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this IntegrationRuntimeManaged to the hub type. +func (tr *IntegrationRuntimeManaged) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IntegrationRuntimeManaged type. +func (tr *IntegrationRuntimeManaged) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedCustomService to the hub type. +func (tr *LinkedCustomService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedCustomService type. +func (tr *LinkedCustomService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceAzureBlobStorage to the hub type. +func (tr *LinkedServiceAzureBlobStorage) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceAzureBlobStorage type. +func (tr *LinkedServiceAzureBlobStorage) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceAzureDatabricks to the hub type. +func (tr *LinkedServiceAzureDatabricks) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceAzureDatabricks type. +func (tr *LinkedServiceAzureDatabricks) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceAzureFileStorage to the hub type. +func (tr *LinkedServiceAzureFileStorage) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceAzureFileStorage type. +func (tr *LinkedServiceAzureFileStorage) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceAzureFunction to the hub type. +func (tr *LinkedServiceAzureFunction) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceAzureFunction type. +func (tr *LinkedServiceAzureFunction) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceAzureSQLDatabase to the hub type. +func (tr *LinkedServiceAzureSQLDatabase) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceAzureSQLDatabase type. +func (tr *LinkedServiceAzureSQLDatabase) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceOData to the hub type. +func (tr *LinkedServiceOData) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceOData type. +func (tr *LinkedServiceOData) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceOdbc to the hub type. +func (tr *LinkedServiceOdbc) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceOdbc type. +func (tr *LinkedServiceOdbc) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceSnowflake to the hub type. +func (tr *LinkedServiceSnowflake) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceSnowflake type. +func (tr *LinkedServiceSnowflake) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceSQLServer to the hub type. +func (tr *LinkedServiceSQLServer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceSQLServer type. +func (tr *LinkedServiceSQLServer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinkedServiceSynapse to the hub type. +func (tr *LinkedServiceSynapse) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedServiceSynapse type. +func (tr *LinkedServiceSynapse) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this TriggerSchedule to the hub type. +func (tr *TriggerSchedule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the TriggerSchedule type. +func (tr *TriggerSchedule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/datafactory/v1beta1/zz_generated.resolvers.go b/apis/datafactory/v1beta1/zz_generated.resolvers.go index 7dab2e874..d978efcaf 100644 --- a/apis/datafactory/v1beta1/zz_generated.resolvers.go +++ b/apis/datafactory/v1beta1/zz_generated.resolvers.go @@ -226,7 +226,7 @@ func (mg *DataSetAzureBlob) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -245,7 +245,7 @@ func (mg *DataSetAzureBlob) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceAzureBlobStorage", "LinkedServiceAzureBlobStorageList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "LinkedServiceAzureBlobStorage", "LinkedServiceAzureBlobStorageList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -264,7 +264,7 @@ func (mg *DataSetAzureBlob) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LinkedServiceNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceAzureBlobStorage", "LinkedServiceAzureBlobStorageList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "LinkedServiceAzureBlobStorage", "LinkedServiceAzureBlobStorageList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -364,7 +364,7 @@ func (mg *DataSetCosmosDBSQLAPI) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -502,7 +502,7 @@ func (mg *DataSetHTTP) ResolveReferences(ctx context.Context, c client.Reader) e var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -640,7 +640,7 @@ func (mg *DataSetMySQL) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -778,7 +778,7 @@ func (mg *DataSetPostgreSQL) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -847,7 +847,7 @@ func (mg *DataSetSQLServerTable) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -866,7 +866,7 @@ func (mg *DataSetSQLServerTable) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceSQLServer", "LinkedServiceSQLServerList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "LinkedServiceSQLServer", "LinkedServiceSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -885,7 +885,7 @@ func (mg *DataSetSQLServerTable) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LinkedServiceNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceSQLServer", "LinkedServiceSQLServerList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "LinkedServiceSQLServer", "LinkedServiceSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -916,7 +916,7 @@ func (mg *DataSetSnowflake) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -935,7 +935,7 @@ func (mg *DataSetSnowflake) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceSnowflake", "LinkedServiceSnowflakeList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "LinkedServiceSnowflake", "LinkedServiceSnowflakeList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -954,7 +954,7 @@ func (mg *DataSetSnowflake) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LinkedServiceNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceSnowflake", "LinkedServiceSnowflakeList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "LinkedServiceSnowflake", "LinkedServiceSnowflakeList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1016,7 +1016,7 @@ func (mg *IntegrationRuntimeAzure) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1279,7 +1279,7 @@ func (mg *IntegrationRuntimeSelfHosted) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1619,7 +1619,7 @@ func (mg *LinkedServiceAzureSearch) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1638,7 +1638,7 @@ func (mg *LinkedServiceAzureSearch) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("search.azure.upbound.io", "v1beta1", "Service", "ServiceList") + m, l, err = apisresolver.GetManagedResource("search.azure.upbound.io", "v1beta2", "Service", "ServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1657,7 +1657,7 @@ func (mg *LinkedServiceAzureSearch) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.SearchServiceKey = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SearchServiceKeyRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("search.azure.upbound.io", "v1beta1", "Service", "ServiceList") + m, l, err = apisresolver.GetManagedResource("search.azure.upbound.io", "v1beta2", "Service", "ServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1688,7 +1688,7 @@ func (mg *LinkedServiceAzureTableStorage) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1719,7 +1719,7 @@ func (mg *LinkedServiceCosmosDB) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1750,7 +1750,7 @@ func (mg *LinkedServiceCosmosDBMongoapi) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1781,7 +1781,7 @@ func (mg *LinkedServiceDataLakeStorageGen2) ResolveReferences(ctx context.Contex var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1812,7 +1812,7 @@ func (mg *LinkedServiceKeyVault) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1831,7 +1831,7 @@ func (mg *LinkedServiceKeyVault) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1850,7 +1850,7 @@ func (mg *LinkedServiceKeyVault) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1881,7 +1881,7 @@ func (mg *LinkedServiceKusto) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1919,7 +1919,7 @@ func (mg *LinkedServiceKusto) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.KustoDatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KustoDatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1957,7 +1957,7 @@ func (mg *LinkedServiceKusto) ResolveReferences(ctx context.Context, c client.Re mg.Spec.InitProvider.KustoDatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.KustoDatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1988,7 +1988,7 @@ func (mg *LinkedServiceMySQL) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2081,7 +2081,7 @@ func (mg *LinkedServicePostgreSQL) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2112,7 +2112,7 @@ func (mg *LinkedServiceSFTP) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2365,7 +2365,7 @@ func (mg *LinkedServiceWeb) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2396,7 +2396,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2415,7 +2415,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2434,7 +2434,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2453,7 +2453,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien mg.Spec.InitProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DataFactoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2484,7 +2484,7 @@ func (mg *Pipeline) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2515,7 +2515,7 @@ func (mg *TriggerBlobEvent) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2556,7 +2556,7 @@ func (mg *TriggerBlobEvent) ResolveReferences(ctx context.Context, c client.Read } { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2596,7 +2596,7 @@ func (mg *TriggerBlobEvent) ResolveReferences(ctx context.Context, c client.Read } { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2626,7 +2626,7 @@ func (mg *TriggerCustomEvent) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Factory", "FactoryList") + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2645,7 +2645,7 @@ func (mg *TriggerCustomEvent) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventgrid.azure.upbound.io", "v1beta1", "Topic", "TopicList") + m, l, err = apisresolver.GetManagedResource("eventgrid.azure.upbound.io", "v1beta2", "Topic", "TopicList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2686,7 +2686,7 @@ func (mg *TriggerCustomEvent) ResolveReferences(ctx context.Context, c client.Re } { - m, l, err = apisresolver.GetManagedResource("eventgrid.azure.upbound.io", "v1beta1", "Topic", "TopicList") + m, l, err = apisresolver.GetManagedResource("eventgrid.azure.upbound.io", "v1beta2", "Topic", "TopicList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/datafactory/v1beta1/zz_integrationruntimeazure_types.go b/apis/datafactory/v1beta1/zz_integrationruntimeazure_types.go index f2d97ca5b..810435584 100755 --- a/apis/datafactory/v1beta1/zz_integrationruntimeazure_types.go +++ b/apis/datafactory/v1beta1/zz_integrationruntimeazure_types.go @@ -81,7 +81,7 @@ type IntegrationRuntimeAzureParameters struct { CoreCount *float64 `json:"coreCount,omitempty" tf:"core_count,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_integrationruntimeselfhosted_types.go b/apis/datafactory/v1beta1/zz_integrationruntimeselfhosted_types.go index bc0fb5b8a..3037e6b9e 100755 --- a/apis/datafactory/v1beta1/zz_integrationruntimeselfhosted_types.go +++ b/apis/datafactory/v1beta1/zz_integrationruntimeselfhosted_types.go @@ -46,7 +46,7 @@ type IntegrationRuntimeSelfHostedObservation struct { type IntegrationRuntimeSelfHostedParameters struct { // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedserviceazuresearch_types.go b/apis/datafactory/v1beta1/zz_linkedserviceazuresearch_types.go index f6302b002..7c060ff52 100755 --- a/apis/datafactory/v1beta1/zz_linkedserviceazuresearch_types.go +++ b/apis/datafactory/v1beta1/zz_linkedserviceazuresearch_types.go @@ -33,7 +33,7 @@ type LinkedServiceAzureSearchInitParameters struct { Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` // The key of the Azure Search Service. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/search/v1beta1.Service + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/search/v1beta2.Service // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_key",true) SearchServiceKey *string `json:"searchServiceKey,omitempty" tf:"search_service_key,omitempty"` @@ -96,7 +96,7 @@ type LinkedServiceAzureSearchParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -123,7 +123,7 @@ type LinkedServiceAzureSearchParameters struct { Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` // The key of the Azure Search Service. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/search/v1beta1.Service + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/search/v1beta2.Service // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_key",true) // +kubebuilder:validation:Optional SearchServiceKey *string `json:"searchServiceKey,omitempty" tf:"search_service_key,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedserviceazuretablestorage_types.go b/apis/datafactory/v1beta1/zz_linkedserviceazuretablestorage_types.go index e64c649fd..58ea879e3 100755 --- a/apis/datafactory/v1beta1/zz_linkedserviceazuretablestorage_types.go +++ b/apis/datafactory/v1beta1/zz_linkedserviceazuretablestorage_types.go @@ -75,7 +75,7 @@ type LinkedServiceAzureTableStorageParameters struct { ConnectionStringSecretRef v1.SecretKeySelector `json:"connectionStringSecretRef" tf:"-"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedservicecosmosdb_types.go b/apis/datafactory/v1beta1/zz_linkedservicecosmosdb_types.go index b26039d47..ad454ba0c 100755 --- a/apis/datafactory/v1beta1/zz_linkedservicecosmosdb_types.go +++ b/apis/datafactory/v1beta1/zz_linkedservicecosmosdb_types.go @@ -95,7 +95,7 @@ type LinkedServiceCosmosDBParameters struct { ConnectionStringSecretRef *v1.SecretKeySelector `json:"connectionStringSecretRef,omitempty" tf:"-"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedservicecosmosdbmongoapi_types.go b/apis/datafactory/v1beta1/zz_linkedservicecosmosdbmongoapi_types.go index efc8762de..0da20eaf5 100755 --- a/apis/datafactory/v1beta1/zz_linkedservicecosmosdbmongoapi_types.go +++ b/apis/datafactory/v1beta1/zz_linkedservicecosmosdbmongoapi_types.go @@ -87,7 +87,7 @@ type LinkedServiceCosmosDBMongoapiParameters struct { ConnectionStringSecretRef *v1.SecretKeySelector `json:"connectionStringSecretRef,omitempty" tf:"-"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedservicedatalakestoragegen2_types.go b/apis/datafactory/v1beta1/zz_linkedservicedatalakestoragegen2_types.go index 3b9c74e48..2b5b5cb71 100755 --- a/apis/datafactory/v1beta1/zz_linkedservicedatalakestoragegen2_types.go +++ b/apis/datafactory/v1beta1/zz_linkedservicedatalakestoragegen2_types.go @@ -107,7 +107,7 @@ type LinkedServiceDataLakeStorageGen2Parameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedservicekeyvault_types.go b/apis/datafactory/v1beta1/zz_linkedservicekeyvault_types.go index 215ac0bff..d5d28c6e9 100755 --- a/apis/datafactory/v1beta1/zz_linkedservicekeyvault_types.go +++ b/apis/datafactory/v1beta1/zz_linkedservicekeyvault_types.go @@ -29,7 +29,7 @@ type LinkedServiceKeyVaultInitParameters struct { IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` // The ID the Azure Key Vault resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -87,7 +87,7 @@ type LinkedServiceKeyVaultParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -109,7 +109,7 @@ type LinkedServiceKeyVaultParameters struct { IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` // The ID the Azure Key Vault resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedservicekusto_types.go b/apis/datafactory/v1beta1/zz_linkedservicekusto_types.go index 25243a5e6..39243374f 100755 --- a/apis/datafactory/v1beta1/zz_linkedservicekusto_types.go +++ b/apis/datafactory/v1beta1/zz_linkedservicekusto_types.go @@ -41,7 +41,7 @@ type LinkedServiceKustoInitParameters struct { KustoDatabaseNameSelector *v1.Selector `json:"kustoDatabaseNameSelector,omitempty" tf:"-"` // The URI of the Kusto Cluster endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("uri",true) KustoEndpoint *string `json:"kustoEndpoint,omitempty" tf:"kusto_endpoint,omitempty"` @@ -120,7 +120,7 @@ type LinkedServiceKustoParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -155,7 +155,7 @@ type LinkedServiceKustoParameters struct { KustoDatabaseNameSelector *v1.Selector `json:"kustoDatabaseNameSelector,omitempty" tf:"-"` // The URI of the Kusto Cluster endpoint. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("uri",true) // +kubebuilder:validation:Optional KustoEndpoint *string `json:"kustoEndpoint,omitempty" tf:"kusto_endpoint,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedservicemysql_types.go b/apis/datafactory/v1beta1/zz_linkedservicemysql_types.go index ae49acff1..85dcae3e7 100755 --- a/apis/datafactory/v1beta1/zz_linkedservicemysql_types.go +++ b/apis/datafactory/v1beta1/zz_linkedservicemysql_types.go @@ -81,7 +81,7 @@ type LinkedServiceMySQLParameters struct { ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedservicepostgresql_types.go b/apis/datafactory/v1beta1/zz_linkedservicepostgresql_types.go index 2e4aa69e6..5bdb5e201 100755 --- a/apis/datafactory/v1beta1/zz_linkedservicepostgresql_types.go +++ b/apis/datafactory/v1beta1/zz_linkedservicepostgresql_types.go @@ -81,7 +81,7 @@ type LinkedServicePostgreSQLParameters struct { ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedservicesftp_types.go b/apis/datafactory/v1beta1/zz_linkedservicesftp_types.go index add61e8a1..a537a6998 100755 --- a/apis/datafactory/v1beta1/zz_linkedservicesftp_types.go +++ b/apis/datafactory/v1beta1/zz_linkedservicesftp_types.go @@ -111,7 +111,7 @@ type LinkedServiceSFTPParameters struct { AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_linkedserviceweb_types.go b/apis/datafactory/v1beta1/zz_linkedserviceweb_types.go index 60dfea494..d8d7c565e 100755 --- a/apis/datafactory/v1beta1/zz_linkedserviceweb_types.go +++ b/apis/datafactory/v1beta1/zz_linkedserviceweb_types.go @@ -93,7 +93,7 @@ type LinkedServiceWebParameters struct { AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_managedprivateendpoint_types.go b/apis/datafactory/v1beta1/zz_managedprivateendpoint_types.go index 502f0db82..c4826d84a 100755 --- a/apis/datafactory/v1beta1/zz_managedprivateendpoint_types.go +++ b/apis/datafactory/v1beta1/zz_managedprivateendpoint_types.go @@ -16,7 +16,7 @@ import ( type ManagedPrivateEndpointInitParameters struct { // The ID of the Data Factory on which to create the Managed Private Endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -38,7 +38,7 @@ type ManagedPrivateEndpointInitParameters struct { SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` // The ID of the Private Link Enabled Remote Resource which this Data Factory Private Endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` @@ -75,7 +75,7 @@ type ManagedPrivateEndpointObservation struct { type ManagedPrivateEndpointParameters struct { // The ID of the Data Factory on which to create the Managed Private Endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -101,7 +101,7 @@ type ManagedPrivateEndpointParameters struct { SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` // The ID of the Private Link Enabled Remote Resource which this Data Factory Private Endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_pipeline_types.go b/apis/datafactory/v1beta1/zz_pipeline_types.go index bd5cdcc11..a9b6f7991 100755 --- a/apis/datafactory/v1beta1/zz_pipeline_types.go +++ b/apis/datafactory/v1beta1/zz_pipeline_types.go @@ -92,7 +92,7 @@ type PipelineParameters struct { Concurrency *float64 `json:"concurrency,omitempty" tf:"concurrency,omitempty"` // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_triggerblobevent_types.go b/apis/datafactory/v1beta1/zz_triggerblobevent_types.go index 15fd1a229..9318e17fb 100755 --- a/apis/datafactory/v1beta1/zz_triggerblobevent_types.go +++ b/apis/datafactory/v1beta1/zz_triggerblobevent_types.go @@ -45,7 +45,7 @@ type TriggerBlobEventInitParameters struct { Pipeline []TriggerBlobEventPipelineInitParameters `json:"pipeline,omitempty" tf:"pipeline,omitempty"` // The ID of Storage Account in which blob event will be listened. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -123,7 +123,7 @@ type TriggerBlobEventParameters struct { BlobPathEndsWith *string `json:"blobPathEndsWith,omitempty" tf:"blob_path_ends_with,omitempty"` // The ID of Data Factory in which to associate the Trigger with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -154,7 +154,7 @@ type TriggerBlobEventParameters struct { Pipeline []TriggerBlobEventPipelineParameters `json:"pipeline,omitempty" tf:"pipeline,omitempty"` // The ID of Storage Account in which blob event will be listened. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/datafactory/v1beta1/zz_triggercustomevent_types.go b/apis/datafactory/v1beta1/zz_triggercustomevent_types.go index 87002ffd7..88b75070d 100755 --- a/apis/datafactory/v1beta1/zz_triggercustomevent_types.go +++ b/apis/datafactory/v1beta1/zz_triggercustomevent_types.go @@ -29,7 +29,7 @@ type TriggerCustomEventInitParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // The ID of Event Grid Topic in which event will be listened. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventgrid/v1beta1.Topic + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventgrid/v1beta2.Topic // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() EventGridTopicID *string `json:"eventgridTopicId,omitempty" tf:"eventgrid_topic_id,omitempty"` @@ -109,7 +109,7 @@ type TriggerCustomEventParameters struct { Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` // The ID of Data Factory in which to associate the Trigger with. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Factory + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` @@ -127,7 +127,7 @@ type TriggerCustomEventParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // The ID of Event Grid Topic in which event will be listened. Changing this forces a new resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventgrid/v1beta1.Topic + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventgrid/v1beta2.Topic // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional EventGridTopicID *string `json:"eventgridTopicId,omitempty" tf:"eventgrid_topic_id,omitempty"` diff --git a/apis/datafactory/v1beta2/zz_customdataset_terraformed.go b/apis/datafactory/v1beta2/zz_customdataset_terraformed.go new file mode 100755 index 000000000..f4d26526d --- /dev/null +++ b/apis/datafactory/v1beta2/zz_customdataset_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CustomDataSet +func (mg *CustomDataSet) GetTerraformResourceType() string { + return "azurerm_data_factory_custom_dataset" +} + +// GetConnectionDetailsMapping for this CustomDataSet +func (tr *CustomDataSet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CustomDataSet +func (tr *CustomDataSet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CustomDataSet +func (tr *CustomDataSet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CustomDataSet +func (tr *CustomDataSet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CustomDataSet +func (tr *CustomDataSet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CustomDataSet +func (tr *CustomDataSet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CustomDataSet +func (tr *CustomDataSet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CustomDataSet +func (tr *CustomDataSet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CustomDataSet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CustomDataSet) LateInitialize(attrs []byte) (bool, error) { + params := &CustomDataSetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CustomDataSet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_customdataset_types.go b/apis/datafactory/v1beta2/zz_customdataset_types.go new file mode 100755 index 000000000..2874803af --- /dev/null +++ b/apis/datafactory/v1beta2/zz_customdataset_types.go @@ -0,0 +1,252 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomDataSetInitParameters struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A linked_service block as defined below. + LinkedService *LinkedServiceInitParameters `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // A map of parameters to associate with the Data Factory Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A JSON object that contains the schema of the Data Factory Dataset. + SchemaJSON *string `json:"schemaJson,omitempty" tf:"schema_json,omitempty"` + + // The type of dataset that will be associated with Data Factory. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Data Factory Dataset. + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +type CustomDataSetObservation struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Dataset with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // The ID of the Data Factory Dataset. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A linked_service block as defined below. + LinkedService *LinkedServiceObservation `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // A map of parameters to associate with the Data Factory Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A JSON object that contains the schema of the Data Factory Dataset. + SchemaJSON *string `json:"schemaJson,omitempty" tf:"schema_json,omitempty"` + + // The type of dataset that will be associated with Data Factory. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Data Factory Dataset. + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +type CustomDataSetParameters struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Dataset with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Dataset. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + // +kubebuilder:validation:Optional + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A linked_service block as defined below. + // +kubebuilder:validation:Optional + LinkedService *LinkedServiceParameters `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // A map of parameters to associate with the Data Factory Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A JSON object that contains the schema of the Data Factory Dataset. + // +kubebuilder:validation:Optional + SchemaJSON *string `json:"schemaJson,omitempty" tf:"schema_json,omitempty"` + + // The type of dataset that will be associated with Data Factory. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Data Factory Dataset. + // +kubebuilder:validation:Optional + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +type LinkedServiceInitParameters struct { + + // The name of the Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.LinkedCustomService + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a LinkedCustomService in datafactory to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a LinkedCustomService in datafactory to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceObservation struct { + + // The name of the Data Factory Linked Service. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceParameters struct { + + // The name of the Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.LinkedCustomService + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a LinkedCustomService in datafactory to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a LinkedCustomService in datafactory to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +// CustomDataSetSpec defines the desired state of CustomDataSet +type CustomDataSetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CustomDataSetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CustomDataSetInitParameters `json:"initProvider,omitempty"` +} + +// CustomDataSetStatus defines the observed state of CustomDataSet. +type CustomDataSetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CustomDataSetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CustomDataSet is the Schema for the CustomDataSets API. Manages a Dataset inside an Azure Data Factory. This is a generic resource that supports all different Dataset Types. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type CustomDataSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.linkedService) || (has(self.initProvider) && has(self.initProvider.linkedService))",message="spec.forProvider.linkedService is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.typePropertiesJson) || (has(self.initProvider) && has(self.initProvider.typePropertiesJson))",message="spec.forProvider.typePropertiesJson is a required parameter" + Spec CustomDataSetSpec `json:"spec"` + Status CustomDataSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CustomDataSetList contains a list of CustomDataSets +type CustomDataSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CustomDataSet `json:"items"` +} + +// Repository type metadata. +var ( + CustomDataSet_Kind = "CustomDataSet" + CustomDataSet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CustomDataSet_Kind}.String() + CustomDataSet_KindAPIVersion = CustomDataSet_Kind + "." + CRDGroupVersion.String() + CustomDataSet_GroupVersionKind = CRDGroupVersion.WithKind(CustomDataSet_Kind) +) + +func init() { + SchemeBuilder.Register(&CustomDataSet{}, &CustomDataSetList{}) +} diff --git a/apis/datafactory/v1beta2/zz_dataflow_terraformed.go b/apis/datafactory/v1beta2/zz_dataflow_terraformed.go new file mode 100755 index 000000000..632616802 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_dataflow_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataFlow +func (mg *DataFlow) GetTerraformResourceType() string { + return "azurerm_data_factory_data_flow" +} + +// GetConnectionDetailsMapping for this DataFlow +func (tr *DataFlow) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataFlow +func (tr *DataFlow) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataFlow +func (tr *DataFlow) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataFlow +func (tr *DataFlow) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataFlow +func (tr *DataFlow) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataFlow +func (tr *DataFlow) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataFlow +func (tr *DataFlow) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataFlow +func (tr *DataFlow) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataFlow using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataFlow) LateInitialize(attrs []byte) (bool, error) { + params := &DataFlowParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataFlow) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_dataflow_types.go b/apis/datafactory/v1beta2/zz_dataflow_types.go new file mode 100755 index 000000000..dc210e157 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_dataflow_types.go @@ -0,0 +1,884 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataFlowInitParameters struct { + + // List of tags that can be used for describing the Data Factory Data Flow. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Data Factory Data Flow. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Data Flow is in. If not specified, the Data Flow will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // The script for the Data Factory Data Flow. + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + // The script lines for the Data Factory Data Flow. + ScriptLines []*string `json:"scriptLines,omitempty" tf:"script_lines,omitempty"` + + // One or more sink blocks as defined below. + Sink []SinkInitParameters `json:"sink,omitempty" tf:"sink,omitempty"` + + // One or more source blocks as defined below. + Source []SourceInitParameters `json:"source,omitempty" tf:"source,omitempty"` + + // One or more transformation blocks as defined below. + Transformation []TransformationInitParameters `json:"transformation,omitempty" tf:"transformation,omitempty"` +} + +type DataFlowObservation struct { + + // List of tags that can be used for describing the Data Factory Data Flow. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The ID of Data Factory in which to associate the Data Flow with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Data Flow. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Data Flow is in. If not specified, the Data Flow will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // The ID of the Data Factory Data Flow. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The script for the Data Factory Data Flow. + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + // The script lines for the Data Factory Data Flow. + ScriptLines []*string `json:"scriptLines,omitempty" tf:"script_lines,omitempty"` + + // One or more sink blocks as defined below. + Sink []SinkObservation `json:"sink,omitempty" tf:"sink,omitempty"` + + // One or more source blocks as defined below. + Source []SourceObservation `json:"source,omitempty" tf:"source,omitempty"` + + // One or more transformation blocks as defined below. + Transformation []TransformationObservation `json:"transformation,omitempty" tf:"transformation,omitempty"` +} + +type DataFlowParameters struct { + + // List of tags that can be used for describing the Data Factory Data Flow. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The ID of Data Factory in which to associate the Data Flow with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Data Flow. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Data Flow is in. If not specified, the Data Flow will appear at the root level. + // +kubebuilder:validation:Optional + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // The script for the Data Factory Data Flow. + // +kubebuilder:validation:Optional + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + // The script lines for the Data Factory Data Flow. + // +kubebuilder:validation:Optional + ScriptLines []*string `json:"scriptLines,omitempty" tf:"script_lines,omitempty"` + + // One or more sink blocks as defined below. + // +kubebuilder:validation:Optional + Sink []SinkParameters `json:"sink,omitempty" tf:"sink,omitempty"` + + // One or more source blocks as defined below. + // +kubebuilder:validation:Optional + Source []SourceParameters `json:"source,omitempty" tf:"source,omitempty"` + + // One or more transformation blocks as defined below. + // +kubebuilder:validation:Optional + Transformation []TransformationParameters `json:"transformation,omitempty" tf:"transformation,omitempty"` +} + +type DataSetInitParameters struct { + + // The name for the Data Flow transformation. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.DataSetJSON + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a DataSetJSON in datafactory to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a DataSetJSON in datafactory to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type DataSetObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type DataSetParameters struct { + + // The name for the Data Flow transformation. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.DataSetJSON + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a DataSetJSON in datafactory to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a DataSetJSON in datafactory to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type FlowletInitParameters struct { + + // Specifies the reference data flow parameters from dataset. + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type FlowletObservation struct { + + // Specifies the reference data flow parameters from dataset. + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type FlowletParameters struct { + + // Specifies the reference data flow parameters from dataset. + // +kubebuilder:validation:Optional + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type RejectedLinkedServiceInitParameters struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type RejectedLinkedServiceObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type RejectedLinkedServiceParameters struct { + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SchemaLinkedServiceInitParameters struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SchemaLinkedServiceObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SchemaLinkedServiceParameters struct { + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SinkInitParameters struct { + + // A dataset block as defined below. + DataSet *DataSetInitParameters `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow Source. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + Flowlet *FlowletInitParameters `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + LinkedService *SinkLinkedServiceInitParameters `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow Source. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A rejected_linked_service block as defined below. + RejectedLinkedService *RejectedLinkedServiceInitParameters `json:"rejectedLinkedService,omitempty" tf:"rejected_linked_service,omitempty"` + + // A schema_linked_service block as defined below. + SchemaLinkedService *SchemaLinkedServiceInitParameters `json:"schemaLinkedService,omitempty" tf:"schema_linked_service,omitempty"` +} + +type SinkLinkedServiceInitParameters struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SinkLinkedServiceObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SinkLinkedServiceParameters struct { + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SinkObservation struct { + + // A dataset block as defined below. + DataSet *DataSetObservation `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow Source. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + Flowlet *FlowletObservation `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + LinkedService *SinkLinkedServiceObservation `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow Source. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A rejected_linked_service block as defined below. + RejectedLinkedService *RejectedLinkedServiceObservation `json:"rejectedLinkedService,omitempty" tf:"rejected_linked_service,omitempty"` + + // A schema_linked_service block as defined below. + SchemaLinkedService *SchemaLinkedServiceObservation `json:"schemaLinkedService,omitempty" tf:"schema_linked_service,omitempty"` +} + +type SinkParameters struct { + + // A dataset block as defined below. + // +kubebuilder:validation:Optional + DataSet *DataSetParameters `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow Source. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + // +kubebuilder:validation:Optional + Flowlet *FlowletParameters `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + // +kubebuilder:validation:Optional + LinkedService *SinkLinkedServiceParameters `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow Source. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A rejected_linked_service block as defined below. + // +kubebuilder:validation:Optional + RejectedLinkedService *RejectedLinkedServiceParameters `json:"rejectedLinkedService,omitempty" tf:"rejected_linked_service,omitempty"` + + // A schema_linked_service block as defined below. + // +kubebuilder:validation:Optional + SchemaLinkedService *SchemaLinkedServiceParameters `json:"schemaLinkedService,omitempty" tf:"schema_linked_service,omitempty"` +} + +type SourceDataSetInitParameters struct { + + // The name for the Data Flow transformation. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.DataSetJSON + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a DataSetJSON in datafactory to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a DataSetJSON in datafactory to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceDataSetObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceDataSetParameters struct { + + // The name for the Data Flow transformation. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.DataSetJSON + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a DataSetJSON in datafactory to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a DataSetJSON in datafactory to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceFlowletInitParameters struct { + + // Specifies the reference data flow parameters from dataset. + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceFlowletObservation struct { + + // Specifies the reference data flow parameters from dataset. + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceFlowletParameters struct { + + // Specifies the reference data flow parameters from dataset. + // +kubebuilder:validation:Optional + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceInitParameters struct { + + // A dataset block as defined below. + DataSet *SourceDataSetInitParameters `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow Source. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + Flowlet *SourceFlowletInitParameters `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + LinkedService *SourceLinkedServiceInitParameters `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow Source. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A rejected_linked_service block as defined below. + RejectedLinkedService *SourceRejectedLinkedServiceInitParameters `json:"rejectedLinkedService,omitempty" tf:"rejected_linked_service,omitempty"` + + // A schema_linked_service block as defined below. + SchemaLinkedService *SourceSchemaLinkedServiceInitParameters `json:"schemaLinkedService,omitempty" tf:"schema_linked_service,omitempty"` +} + +type SourceLinkedServiceInitParameters struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceLinkedServiceObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceLinkedServiceParameters struct { + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceObservation struct { + + // A dataset block as defined below. + DataSet *SourceDataSetObservation `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow Source. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + Flowlet *SourceFlowletObservation `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + LinkedService *SourceLinkedServiceObservation `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow Source. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A rejected_linked_service block as defined below. + RejectedLinkedService *SourceRejectedLinkedServiceObservation `json:"rejectedLinkedService,omitempty" tf:"rejected_linked_service,omitempty"` + + // A schema_linked_service block as defined below. + SchemaLinkedService *SourceSchemaLinkedServiceObservation `json:"schemaLinkedService,omitempty" tf:"schema_linked_service,omitempty"` +} + +type SourceParameters struct { + + // A dataset block as defined below. + // +kubebuilder:validation:Optional + DataSet *SourceDataSetParameters `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow Source. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + // +kubebuilder:validation:Optional + Flowlet *SourceFlowletParameters `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + // +kubebuilder:validation:Optional + LinkedService *SourceLinkedServiceParameters `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow Source. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A rejected_linked_service block as defined below. + // +kubebuilder:validation:Optional + RejectedLinkedService *SourceRejectedLinkedServiceParameters `json:"rejectedLinkedService,omitempty" tf:"rejected_linked_service,omitempty"` + + // A schema_linked_service block as defined below. + // +kubebuilder:validation:Optional + SchemaLinkedService *SourceSchemaLinkedServiceParameters `json:"schemaLinkedService,omitempty" tf:"schema_linked_service,omitempty"` +} + +type SourceRejectedLinkedServiceInitParameters struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceRejectedLinkedServiceObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceRejectedLinkedServiceParameters struct { + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceSchemaLinkedServiceInitParameters struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceSchemaLinkedServiceObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SourceSchemaLinkedServiceParameters struct { + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationDataSetInitParameters struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationDataSetObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationDataSetParameters struct { + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationFlowletInitParameters struct { + + // Specifies the reference data flow parameters from dataset. + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationFlowletObservation struct { + + // Specifies the reference data flow parameters from dataset. + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationFlowletParameters struct { + + // Specifies the reference data flow parameters from dataset. + // +kubebuilder:validation:Optional + DataSetParameters *string `json:"datasetParameters,omitempty" tf:"dataset_parameters,omitempty"` + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationInitParameters struct { + + // A dataset block as defined below. + DataSet *TransformationDataSetInitParameters `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow transformation. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + Flowlet *TransformationFlowletInitParameters `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + LinkedService *TransformationLinkedServiceInitParameters `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TransformationLinkedServiceInitParameters struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationLinkedServiceObservation struct { + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationLinkedServiceParameters struct { + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type TransformationObservation struct { + + // A dataset block as defined below. + DataSet *TransformationDataSetObservation `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow transformation. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + Flowlet *TransformationFlowletObservation `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + LinkedService *TransformationLinkedServiceObservation `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow transformation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TransformationParameters struct { + + // A dataset block as defined below. + // +kubebuilder:validation:Optional + DataSet *TransformationDataSetParameters `json:"dataset,omitempty" tf:"dataset,omitempty"` + + // The description for the Data Flow transformation. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A flowlet block as defined below. + // +kubebuilder:validation:Optional + Flowlet *TransformationFlowletParameters `json:"flowlet,omitempty" tf:"flowlet,omitempty"` + + // A linked_service block as defined below. + // +kubebuilder:validation:Optional + LinkedService *TransformationLinkedServiceParameters `json:"linkedService,omitempty" tf:"linked_service,omitempty"` + + // The name for the Data Flow transformation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +// DataFlowSpec defines the desired state of DataFlow +type DataFlowSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataFlowParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataFlowInitParameters `json:"initProvider,omitempty"` +} + +// DataFlowStatus defines the observed state of DataFlow. +type DataFlowStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataFlowObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataFlow is the Schema for the DataFlows API. Manages a Data Flow inside an Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DataFlow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sink) || (has(self.initProvider) && has(self.initProvider.sink))",message="spec.forProvider.sink is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.source) || (has(self.initProvider) && has(self.initProvider.source))",message="spec.forProvider.source is a required parameter" + Spec DataFlowSpec `json:"spec"` + Status DataFlowStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataFlowList contains a list of DataFlows +type DataFlowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataFlow `json:"items"` +} + +// Repository type metadata. +var ( + DataFlow_Kind = "DataFlow" + DataFlow_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataFlow_Kind}.String() + DataFlow_KindAPIVersion = DataFlow_Kind + "." + CRDGroupVersion.String() + DataFlow_GroupVersionKind = CRDGroupVersion.WithKind(DataFlow_Kind) +) + +func init() { + SchemeBuilder.Register(&DataFlow{}, &DataFlowList{}) +} diff --git a/apis/datafactory/v1beta2/zz_datasetbinary_terraformed.go b/apis/datafactory/v1beta2/zz_datasetbinary_terraformed.go new file mode 100755 index 000000000..3995adde0 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_datasetbinary_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataSetBinary +func (mg *DataSetBinary) GetTerraformResourceType() string { + return "azurerm_data_factory_dataset_binary" +} + +// GetConnectionDetailsMapping for this DataSetBinary +func (tr *DataSetBinary) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataSetBinary +func (tr *DataSetBinary) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataSetBinary +func (tr *DataSetBinary) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataSetBinary +func (tr *DataSetBinary) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataSetBinary +func (tr *DataSetBinary) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataSetBinary +func (tr *DataSetBinary) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataSetBinary +func (tr *DataSetBinary) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataSetBinary +func (tr *DataSetBinary) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataSetBinary using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataSetBinary) LateInitialize(attrs []byte) (bool, error) { + params := &DataSetBinaryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataSetBinary) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_datasetbinary_types.go b/apis/datafactory/v1beta2/zz_datasetbinary_types.go new file mode 100755 index 000000000..8f4b150e0 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_datasetbinary_types.go @@ -0,0 +1,433 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AzureBlobStorageLocationInitParameters struct { + + // The container on the Azure Blob Storage Account hosting the file. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file in the blob container. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file in the blob container. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type AzureBlobStorageLocationObservation struct { + + // The container on the Azure Blob Storage Account hosting the file. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file in the blob container. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file in the blob container. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type AzureBlobStorageLocationParameters struct { + + // The container on the Azure Blob Storage Account hosting the file. + // +kubebuilder:validation:Optional + Container *string `json:"container" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file in the blob container. + // +kubebuilder:validation:Optional + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file in the blob container. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type CompressionInitParameters struct { + + // The level of compression. Possible values are Fastest and Optimal. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The type of compression used during transport. Possible values are BZip2, Deflate, GZip, Tar, TarGZip and ZipDeflate. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CompressionObservation struct { + + // The level of compression. Possible values are Fastest and Optimal. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The type of compression used during transport. Possible values are BZip2, Deflate, GZip, Tar, TarGZip and ZipDeflate. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type CompressionParameters struct { + + // The level of compression. Possible values are Fastest and Optimal. + // +kubebuilder:validation:Optional + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The type of compression used during transport. Possible values are BZip2, Deflate, GZip, Tar, TarGZip and ZipDeflate. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type DataSetBinaryInitParameters struct { + + // A map of additional properties to associate with the Data Factory Binary Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Binary Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_storage_location block as defined below. + AzureBlobStorageLocation *AzureBlobStorageLocationInitParameters `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // A compression block as defined below. + Compression *CompressionInitParameters `json:"compression,omitempty" tf:"compression,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + HTTPServerLocation *HTTPServerLocationInitParameters `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The Data Factory Linked Service name in which to associate the Binary Dataset with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceSFTP + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceSFTP in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceSFTP in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies a list of parameters to associate with the Data Factory Binary Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A sftp_server_location block as defined below. + SFTPServerLocation *SFTPServerLocationInitParameters `json:"sftpServerLocation,omitempty" tf:"sftp_server_location,omitempty"` +} + +type DataSetBinaryObservation struct { + + // A map of additional properties to associate with the Data Factory Binary Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Binary Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_storage_location block as defined below. + AzureBlobStorageLocation *AzureBlobStorageLocationObservation `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // A compression block as defined below. + Compression *CompressionObservation `json:"compression,omitempty" tf:"compression,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + HTTPServerLocation *HTTPServerLocationObservation `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The ID of the Data Factory Dataset. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Data Factory Linked Service name in which to associate the Binary Dataset with. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies a list of parameters to associate with the Data Factory Binary Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A sftp_server_location block as defined below. + SFTPServerLocation *SFTPServerLocationObservation `json:"sftpServerLocation,omitempty" tf:"sftp_server_location,omitempty"` +} + +type DataSetBinaryParameters struct { + + // A map of additional properties to associate with the Data Factory Binary Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Binary Dataset. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_storage_location block as defined below. + // +kubebuilder:validation:Optional + AzureBlobStorageLocation *AzureBlobStorageLocationParameters `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // A compression block as defined below. + // +kubebuilder:validation:Optional + Compression *CompressionParameters `json:"compression,omitempty" tf:"compression,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Dataset. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + // +kubebuilder:validation:Optional + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + // +kubebuilder:validation:Optional + HTTPServerLocation *HTTPServerLocationParameters `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The Data Factory Linked Service name in which to associate the Binary Dataset with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceSFTP + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceSFTP in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceSFTP in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies a list of parameters to associate with the Data Factory Binary Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A sftp_server_location block as defined below. + // +kubebuilder:validation:Optional + SFTPServerLocation *SFTPServerLocationParameters `json:"sftpServerLocation,omitempty" tf:"sftp_server_location,omitempty"` +} + +type HTTPServerLocationInitParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + RelativeURL *string `json:"relativeUrl,omitempty" tf:"relative_url,omitempty"` +} + +type HTTPServerLocationObservation struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + RelativeURL *string `json:"relativeUrl,omitempty" tf:"relative_url,omitempty"` +} + +type HTTPServerLocationParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + // +kubebuilder:validation:Optional + Filename *string `json:"filename" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + // +kubebuilder:validation:Optional + RelativeURL *string `json:"relativeUrl" tf:"relative_url,omitempty"` +} + +type SFTPServerLocationInitParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the SFTP server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the SFTP server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type SFTPServerLocationObservation struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the SFTP server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the SFTP server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type SFTPServerLocationParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the SFTP server. + // +kubebuilder:validation:Optional + Filename *string `json:"filename" tf:"filename,omitempty"` + + // The folder path to the file on the SFTP server. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +// DataSetBinarySpec defines the desired state of DataSetBinary +type DataSetBinarySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataSetBinaryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataSetBinaryInitParameters `json:"initProvider,omitempty"` +} + +// DataSetBinaryStatus defines the observed state of DataSetBinary. +type DataSetBinaryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataSetBinaryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataSetBinary is the Schema for the DataSetBinarys API. Manages a Data Factory Binary Dataset inside an Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DataSetBinary struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DataSetBinarySpec `json:"spec"` + Status DataSetBinaryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataSetBinaryList contains a list of DataSetBinarys +type DataSetBinaryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataSetBinary `json:"items"` +} + +// Repository type metadata. +var ( + DataSetBinary_Kind = "DataSetBinary" + DataSetBinary_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataSetBinary_Kind}.String() + DataSetBinary_KindAPIVersion = DataSetBinary_Kind + "." + CRDGroupVersion.String() + DataSetBinary_GroupVersionKind = CRDGroupVersion.WithKind(DataSetBinary_Kind) +) + +func init() { + SchemeBuilder.Register(&DataSetBinary{}, &DataSetBinaryList{}) +} diff --git a/apis/datafactory/v1beta2/zz_datasetdelimitedtext_terraformed.go b/apis/datafactory/v1beta2/zz_datasetdelimitedtext_terraformed.go new file mode 100755 index 000000000..4076d6418 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_datasetdelimitedtext_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataSetDelimitedText +func (mg *DataSetDelimitedText) GetTerraformResourceType() string { + return "azurerm_data_factory_dataset_delimited_text" +} + +// GetConnectionDetailsMapping for this DataSetDelimitedText +func (tr *DataSetDelimitedText) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataSetDelimitedText +func (tr *DataSetDelimitedText) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataSetDelimitedText +func (tr *DataSetDelimitedText) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataSetDelimitedText +func (tr *DataSetDelimitedText) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataSetDelimitedText +func (tr *DataSetDelimitedText) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataSetDelimitedText +func (tr *DataSetDelimitedText) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataSetDelimitedText +func (tr *DataSetDelimitedText) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataSetDelimitedText +func (tr *DataSetDelimitedText) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataSetDelimitedText using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataSetDelimitedText) LateInitialize(attrs []byte) (bool, error) { + params := &DataSetDelimitedTextParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataSetDelimitedText) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_datasetdelimitedtext_types.go b/apis/datafactory/v1beta2/zz_datasetdelimitedtext_types.go new file mode 100755 index 000000000..8e3984b1e --- /dev/null +++ b/apis/datafactory/v1beta2/zz_datasetdelimitedtext_types.go @@ -0,0 +1,553 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AzureBlobFsLocationInitParameters struct { + + // Is the file_system using dynamic expression, function or system variables? Defaults to false. + DynamicFileSystemEnabled *bool `json:"dynamicFileSystemEnabled,omitempty" tf:"dynamic_file_system_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The storage data lake gen2 file system on the Azure Blob Storage Account hosting the file. + FileSystem *string `json:"fileSystem,omitempty" tf:"file_system,omitempty"` + + // The filename of the file. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type AzureBlobFsLocationObservation struct { + + // Is the file_system using dynamic expression, function or system variables? Defaults to false. + DynamicFileSystemEnabled *bool `json:"dynamicFileSystemEnabled,omitempty" tf:"dynamic_file_system_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The storage data lake gen2 file system on the Azure Blob Storage Account hosting the file. + FileSystem *string `json:"fileSystem,omitempty" tf:"file_system,omitempty"` + + // The filename of the file. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type AzureBlobFsLocationParameters struct { + + // Is the file_system using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFileSystemEnabled *bool `json:"dynamicFileSystemEnabled,omitempty" tf:"dynamic_file_system_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The storage data lake gen2 file system on the Azure Blob Storage Account hosting the file. + // +kubebuilder:validation:Optional + FileSystem *string `json:"fileSystem,omitempty" tf:"file_system,omitempty"` + + // The filename of the file. + // +kubebuilder:validation:Optional + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetDelimitedTextAzureBlobStorageLocationInitParameters struct { + + // The container on the Azure Blob Storage Account hosting the file. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file. This can be an empty string. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetDelimitedTextAzureBlobStorageLocationObservation struct { + + // The container on the Azure Blob Storage Account hosting the file. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file. This can be an empty string. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetDelimitedTextAzureBlobStorageLocationParameters struct { + + // The container on the Azure Blob Storage Account hosting the file. + // +kubebuilder:validation:Optional + Container *string `json:"container" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file. + // +kubebuilder:validation:Optional + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file. This can be an empty string. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetDelimitedTextHTTPServerLocationInitParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + RelativeURL *string `json:"relativeUrl,omitempty" tf:"relative_url,omitempty"` +} + +type DataSetDelimitedTextHTTPServerLocationObservation struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + RelativeURL *string `json:"relativeUrl,omitempty" tf:"relative_url,omitempty"` +} + +type DataSetDelimitedTextHTTPServerLocationParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + // +kubebuilder:validation:Optional + Filename *string `json:"filename" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + // +kubebuilder:validation:Optional + RelativeURL *string `json:"relativeUrl" tf:"relative_url,omitempty"` +} + +type DataSetDelimitedTextInitParameters struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // An azure_blob_fs_location block as defined below. + AzureBlobFsLocation *AzureBlobFsLocationInitParameters `json:"azureBlobFsLocation,omitempty" tf:"azure_blob_fs_location,omitempty"` + + // An azure_blob_storage_location block as defined below. + AzureBlobStorageLocation *DataSetDelimitedTextAzureBlobStorageLocationInitParameters `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The column delimiter. Defaults to ,. + ColumnDelimiter *string `json:"columnDelimiter,omitempty" tf:"column_delimiter,omitempty"` + + // The compression codec used to read/write text files. Valid values are None, bzip2, gzip, deflate, ZipDeflate, TarGzip, Tar, snappy and lz4. Please note these values are case sensitive. + CompressionCodec *string `json:"compressionCodec,omitempty" tf:"compression_codec,omitempty"` + + // The compression ratio for the Data Factory Dataset. Valid values are Fastest or Optimal. Please note these values are case sensitive. + CompressionLevel *string `json:"compressionLevel,omitempty" tf:"compression_level,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encoding format for the file. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The escape character. Defaults to \. + EscapeCharacter *string `json:"escapeCharacter,omitempty" tf:"escape_character,omitempty"` + + // When used as input, treat the first row of data as headers. When used as output, write the headers into the output as the first row of data. Defaults to false. + FirstRowAsHeader *bool `json:"firstRowAsHeader,omitempty" tf:"first_row_as_header,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + HTTPServerLocation *DataSetDelimitedTextHTTPServerLocationInitParameters `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceWeb + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // The null value string. Defaults to "". + NullValue *string `json:"nullValue,omitempty" tf:"null_value,omitempty"` + + // A map of parameters to associate with the Data Factory Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The quote character. Defaults to ". + QuoteCharacter *string `json:"quoteCharacter,omitempty" tf:"quote_character,omitempty"` + + // The row delimiter. Defaults to any of the following values on read: \r\n, \r, \n, and \n or \r\n on write by mapping data flow and Copy activity respectively. + RowDelimiter *string `json:"rowDelimiter,omitempty" tf:"row_delimiter,omitempty"` + + // A schema_column block as defined below. + SchemaColumn []SchemaColumnInitParameters `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type DataSetDelimitedTextObservation struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // An azure_blob_fs_location block as defined below. + AzureBlobFsLocation *AzureBlobFsLocationObservation `json:"azureBlobFsLocation,omitempty" tf:"azure_blob_fs_location,omitempty"` + + // An azure_blob_storage_location block as defined below. + AzureBlobStorageLocation *DataSetDelimitedTextAzureBlobStorageLocationObservation `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The column delimiter. Defaults to ,. + ColumnDelimiter *string `json:"columnDelimiter,omitempty" tf:"column_delimiter,omitempty"` + + // The compression codec used to read/write text files. Valid values are None, bzip2, gzip, deflate, ZipDeflate, TarGzip, Tar, snappy and lz4. Please note these values are case sensitive. + CompressionCodec *string `json:"compressionCodec,omitempty" tf:"compression_codec,omitempty"` + + // The compression ratio for the Data Factory Dataset. Valid values are Fastest or Optimal. Please note these values are case sensitive. + CompressionLevel *string `json:"compressionLevel,omitempty" tf:"compression_level,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encoding format for the file. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The escape character. Defaults to \. + EscapeCharacter *string `json:"escapeCharacter,omitempty" tf:"escape_character,omitempty"` + + // When used as input, treat the first row of data as headers. When used as output, write the headers into the output as the first row of data. Defaults to false. + FirstRowAsHeader *bool `json:"firstRowAsHeader,omitempty" tf:"first_row_as_header,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + HTTPServerLocation *DataSetDelimitedTextHTTPServerLocationObservation `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The ID of the Data Factory Dataset. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // The null value string. Defaults to "". + NullValue *string `json:"nullValue,omitempty" tf:"null_value,omitempty"` + + // A map of parameters to associate with the Data Factory Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The quote character. Defaults to ". + QuoteCharacter *string `json:"quoteCharacter,omitempty" tf:"quote_character,omitempty"` + + // The row delimiter. Defaults to any of the following values on read: \r\n, \r, \n, and \n or \r\n on write by mapping data flow and Copy activity respectively. + RowDelimiter *string `json:"rowDelimiter,omitempty" tf:"row_delimiter,omitempty"` + + // A schema_column block as defined below. + SchemaColumn []SchemaColumnObservation `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type DataSetDelimitedTextParameters struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // An azure_blob_fs_location block as defined below. + // +kubebuilder:validation:Optional + AzureBlobFsLocation *AzureBlobFsLocationParameters `json:"azureBlobFsLocation,omitempty" tf:"azure_blob_fs_location,omitempty"` + + // An azure_blob_storage_location block as defined below. + // +kubebuilder:validation:Optional + AzureBlobStorageLocation *DataSetDelimitedTextAzureBlobStorageLocationParameters `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The column delimiter. Defaults to ,. + // +kubebuilder:validation:Optional + ColumnDelimiter *string `json:"columnDelimiter,omitempty" tf:"column_delimiter,omitempty"` + + // The compression codec used to read/write text files. Valid values are None, bzip2, gzip, deflate, ZipDeflate, TarGzip, Tar, snappy and lz4. Please note these values are case sensitive. + // +kubebuilder:validation:Optional + CompressionCodec *string `json:"compressionCodec,omitempty" tf:"compression_codec,omitempty"` + + // The compression ratio for the Data Factory Dataset. Valid values are Fastest or Optimal. Please note these values are case sensitive. + // +kubebuilder:validation:Optional + CompressionLevel *string `json:"compressionLevel,omitempty" tf:"compression_level,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Dataset. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encoding format for the file. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The escape character. Defaults to \. + // +kubebuilder:validation:Optional + EscapeCharacter *string `json:"escapeCharacter,omitempty" tf:"escape_character,omitempty"` + + // When used as input, treat the first row of data as headers. When used as output, write the headers into the output as the first row of data. Defaults to false. + // +kubebuilder:validation:Optional + FirstRowAsHeader *bool `json:"firstRowAsHeader,omitempty" tf:"first_row_as_header,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + // +kubebuilder:validation:Optional + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + // +kubebuilder:validation:Optional + HTTPServerLocation *DataSetDelimitedTextHTTPServerLocationParameters `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceWeb + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // The null value string. Defaults to "". + // +kubebuilder:validation:Optional + NullValue *string `json:"nullValue,omitempty" tf:"null_value,omitempty"` + + // A map of parameters to associate with the Data Factory Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The quote character. Defaults to ". + // +kubebuilder:validation:Optional + QuoteCharacter *string `json:"quoteCharacter,omitempty" tf:"quote_character,omitempty"` + + // The row delimiter. Defaults to any of the following values on read: \r\n, \r, \n, and \n or \r\n on write by mapping data flow and Copy activity respectively. + // +kubebuilder:validation:Optional + RowDelimiter *string `json:"rowDelimiter,omitempty" tf:"row_delimiter,omitempty"` + + // A schema_column block as defined below. + // +kubebuilder:validation:Optional + SchemaColumn []SchemaColumnParameters `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type SchemaColumnInitParameters struct { + + // The description of the column. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SchemaColumnObservation struct { + + // The description of the column. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SchemaColumnParameters struct { + + // The description of the column. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// DataSetDelimitedTextSpec defines the desired state of DataSetDelimitedText +type DataSetDelimitedTextSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataSetDelimitedTextParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataSetDelimitedTextInitParameters `json:"initProvider,omitempty"` +} + +// DataSetDelimitedTextStatus defines the observed state of DataSetDelimitedText. +type DataSetDelimitedTextStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataSetDelimitedTextObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataSetDelimitedText is the Schema for the DataSetDelimitedTexts API. Manages an Azure Delimited Text Dataset inside an Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DataSetDelimitedText struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DataSetDelimitedTextSpec `json:"spec"` + Status DataSetDelimitedTextStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataSetDelimitedTextList contains a list of DataSetDelimitedTexts +type DataSetDelimitedTextList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataSetDelimitedText `json:"items"` +} + +// Repository type metadata. +var ( + DataSetDelimitedText_Kind = "DataSetDelimitedText" + DataSetDelimitedText_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataSetDelimitedText_Kind}.String() + DataSetDelimitedText_KindAPIVersion = DataSetDelimitedText_Kind + "." + CRDGroupVersion.String() + DataSetDelimitedText_GroupVersionKind = CRDGroupVersion.WithKind(DataSetDelimitedText_Kind) +) + +func init() { + SchemeBuilder.Register(&DataSetDelimitedText{}, &DataSetDelimitedTextList{}) +} diff --git a/apis/datafactory/v1beta2/zz_datasetjson_terraformed.go b/apis/datafactory/v1beta2/zz_datasetjson_terraformed.go new file mode 100755 index 000000000..7d3f17683 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_datasetjson_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataSetJSON +func (mg *DataSetJSON) GetTerraformResourceType() string { + return "azurerm_data_factory_dataset_json" +} + +// GetConnectionDetailsMapping for this DataSetJSON +func (tr *DataSetJSON) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataSetJSON +func (tr *DataSetJSON) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataSetJSON +func (tr *DataSetJSON) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataSetJSON +func (tr *DataSetJSON) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataSetJSON +func (tr *DataSetJSON) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataSetJSON +func (tr *DataSetJSON) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataSetJSON +func (tr *DataSetJSON) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataSetJSON +func (tr *DataSetJSON) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataSetJSON using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataSetJSON) LateInitialize(attrs []byte) (bool, error) { + params := &DataSetJSONParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataSetJSON) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_datasetjson_types.go b/apis/datafactory/v1beta2/zz_datasetjson_types.go new file mode 100755 index 000000000..02b7ac80e --- /dev/null +++ b/apis/datafactory/v1beta2/zz_datasetjson_types.go @@ -0,0 +1,394 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataSetJSONAzureBlobStorageLocationInitParameters struct { + + // The container on the Azure Blob Storage Account hosting the file. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetJSONAzureBlobStorageLocationObservation struct { + + // The container on the Azure Blob Storage Account hosting the file. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetJSONAzureBlobStorageLocationParameters struct { + + // The container on the Azure Blob Storage Account hosting the file. + // +kubebuilder:validation:Optional + Container *string `json:"container" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + // +kubebuilder:validation:Optional + Filename *string `json:"filename" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` +} + +type DataSetJSONHTTPServerLocationInitParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + RelativeURL *string `json:"relativeUrl,omitempty" tf:"relative_url,omitempty"` +} + +type DataSetJSONHTTPServerLocationObservation struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + RelativeURL *string `json:"relativeUrl,omitempty" tf:"relative_url,omitempty"` +} + +type DataSetJSONHTTPServerLocationParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + // +kubebuilder:validation:Optional + Filename *string `json:"filename" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + // +kubebuilder:validation:Optional + RelativeURL *string `json:"relativeUrl" tf:"relative_url,omitempty"` +} + +type DataSetJSONInitParameters struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_storage_location block as defined below. + AzureBlobStorageLocation *DataSetJSONAzureBlobStorageLocationInitParameters `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encoding format for the file. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + HTTPServerLocation *DataSetJSONHTTPServerLocationInitParameters `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceWeb + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A schema_column block as defined below. + SchemaColumn []DataSetJSONSchemaColumnInitParameters `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type DataSetJSONObservation struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_storage_location block as defined below. + AzureBlobStorageLocation *DataSetJSONAzureBlobStorageLocationObservation `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encoding format for the file. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + HTTPServerLocation *DataSetJSONHTTPServerLocationObservation `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The ID of the Data Factory Dataset. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // A map of parameters to associate with the Data Factory Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A schema_column block as defined below. + SchemaColumn []DataSetJSONSchemaColumnObservation `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type DataSetJSONParameters struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_storage_location block as defined below. + // +kubebuilder:validation:Optional + AzureBlobStorageLocation *DataSetJSONAzureBlobStorageLocationParameters `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Dataset. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The encoding format for the file. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + // +kubebuilder:validation:Optional + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + // +kubebuilder:validation:Optional + HTTPServerLocation *DataSetJSONHTTPServerLocationParameters `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceWeb + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A schema_column block as defined below. + // +kubebuilder:validation:Optional + SchemaColumn []DataSetJSONSchemaColumnParameters `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type DataSetJSONSchemaColumnInitParameters struct { + + // The description of the column. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DataSetJSONSchemaColumnObservation struct { + + // The description of the column. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DataSetJSONSchemaColumnParameters struct { + + // The description of the column. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// DataSetJSONSpec defines the desired state of DataSetJSON +type DataSetJSONSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataSetJSONParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataSetJSONInitParameters `json:"initProvider,omitempty"` +} + +// DataSetJSONStatus defines the observed state of DataSetJSON. +type DataSetJSONStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataSetJSONObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataSetJSON is the Schema for the DataSetJSONs API. Manages an Azure JSON Dataset inside an Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DataSetJSON struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DataSetJSONSpec `json:"spec"` + Status DataSetJSONStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataSetJSONList contains a list of DataSetJSONs +type DataSetJSONList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataSetJSON `json:"items"` +} + +// Repository type metadata. +var ( + DataSetJSON_Kind = "DataSetJSON" + DataSetJSON_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataSetJSON_Kind}.String() + DataSetJSON_KindAPIVersion = DataSetJSON_Kind + "." + CRDGroupVersion.String() + DataSetJSON_GroupVersionKind = CRDGroupVersion.WithKind(DataSetJSON_Kind) +) + +func init() { + SchemeBuilder.Register(&DataSetJSON{}, &DataSetJSONList{}) +} diff --git a/apis/datafactory/v1beta2/zz_datasetparquet_terraformed.go b/apis/datafactory/v1beta2/zz_datasetparquet_terraformed.go new file mode 100755 index 000000000..9f318c347 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_datasetparquet_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataSetParquet +func (mg *DataSetParquet) GetTerraformResourceType() string { + return "azurerm_data_factory_dataset_parquet" +} + +// GetConnectionDetailsMapping for this DataSetParquet +func (tr *DataSetParquet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataSetParquet +func (tr *DataSetParquet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataSetParquet +func (tr *DataSetParquet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataSetParquet +func (tr *DataSetParquet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataSetParquet +func (tr *DataSetParquet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataSetParquet +func (tr *DataSetParquet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataSetParquet +func (tr *DataSetParquet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataSetParquet +func (tr *DataSetParquet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataSetParquet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataSetParquet) LateInitialize(attrs []byte) (bool, error) { + params := &DataSetParquetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataSetParquet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_datasetparquet_types.go b/apis/datafactory/v1beta2/zz_datasetparquet_types.go new file mode 100755 index 000000000..123d2adb5 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_datasetparquet_types.go @@ -0,0 +1,483 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataSetParquetAzureBlobFsLocationInitParameters struct { + + // Is the file_system using dynamic expression, function or system variables? Defaults to false. + DynamicFileSystemEnabled *bool `json:"dynamicFileSystemEnabled,omitempty" tf:"dynamic_file_system_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The container on the Azure Data Lake Storage Account hosting the file. + FileSystem *string `json:"fileSystem,omitempty" tf:"file_system,omitempty"` + + // The filename of the file on the Azure Data Lake Storage Account. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the Azure Data Lake Storage Account. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetParquetAzureBlobFsLocationObservation struct { + + // Is the file_system using dynamic expression, function or system variables? Defaults to false. + DynamicFileSystemEnabled *bool `json:"dynamicFileSystemEnabled,omitempty" tf:"dynamic_file_system_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The container on the Azure Data Lake Storage Account hosting the file. + FileSystem *string `json:"fileSystem,omitempty" tf:"file_system,omitempty"` + + // The filename of the file on the Azure Data Lake Storage Account. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the Azure Data Lake Storage Account. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetParquetAzureBlobFsLocationParameters struct { + + // Is the file_system using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFileSystemEnabled *bool `json:"dynamicFileSystemEnabled,omitempty" tf:"dynamic_file_system_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The container on the Azure Data Lake Storage Account hosting the file. + // +kubebuilder:validation:Optional + FileSystem *string `json:"fileSystem,omitempty" tf:"file_system,omitempty"` + + // The filename of the file on the Azure Data Lake Storage Account. + // +kubebuilder:validation:Optional + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the Azure Data Lake Storage Account. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetParquetAzureBlobStorageLocationInitParameters struct { + + // The container on the Azure Blob Storage Account hosting the file. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the Azure Blob Storage Account. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the Azure Blob Storage Account. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetParquetAzureBlobStorageLocationObservation struct { + + // The container on the Azure Blob Storage Account hosting the file. + Container *string `json:"container,omitempty" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the Azure Blob Storage Account. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the Azure Blob Storage Account. + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetParquetAzureBlobStorageLocationParameters struct { + + // The container on the Azure Blob Storage Account hosting the file. + // +kubebuilder:validation:Optional + Container *string `json:"container" tf:"container,omitempty"` + + // Is the container using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicContainerEnabled *bool `json:"dynamicContainerEnabled,omitempty" tf:"dynamic_container_enabled,omitempty"` + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the Azure Blob Storage Account. + // +kubebuilder:validation:Optional + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the Azure Blob Storage Account. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` +} + +type DataSetParquetHTTPServerLocationInitParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + RelativeURL *string `json:"relativeUrl,omitempty" tf:"relative_url,omitempty"` +} + +type DataSetParquetHTTPServerLocationObservation struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + RelativeURL *string `json:"relativeUrl,omitempty" tf:"relative_url,omitempty"` +} + +type DataSetParquetHTTPServerLocationParameters struct { + + // Is the filename using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicFilenameEnabled *bool `json:"dynamicFilenameEnabled,omitempty" tf:"dynamic_filename_enabled,omitempty"` + + // Is the path using dynamic expression, function or system variables? Defaults to false. + // +kubebuilder:validation:Optional + DynamicPathEnabled *bool `json:"dynamicPathEnabled,omitempty" tf:"dynamic_path_enabled,omitempty"` + + // The filename of the file on the web server. + // +kubebuilder:validation:Optional + Filename *string `json:"filename" tf:"filename,omitempty"` + + // The folder path to the file on the web server. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The base URL to the web server hosting the file. + // +kubebuilder:validation:Optional + RelativeURL *string `json:"relativeUrl" tf:"relative_url,omitempty"` +} + +type DataSetParquetInitParameters struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_fs_location block as defined below. + AzureBlobFsLocation *DataSetParquetAzureBlobFsLocationInitParameters `json:"azureBlobFsLocation,omitempty" tf:"azure_blob_fs_location,omitempty"` + + // A azure_blob_storage_location block as defined below. + AzureBlobStorageLocation *DataSetParquetAzureBlobStorageLocationInitParameters `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The compression codec used to read/write text files. Valid values are bzip2, gzip, deflate, ZipDeflate, TarGzip, Tar, snappy, or lz4. Please note these values are case-sensitive. + CompressionCodec *string `json:"compressionCodec,omitempty" tf:"compression_codec,omitempty"` + + // Specifies the compression level. Possible values are Optimal and Fastest, + CompressionLevel *string `json:"compressionLevel,omitempty" tf:"compression_level,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + HTTPServerLocation *DataSetParquetHTTPServerLocationInitParameters `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceWeb + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A schema_column block as defined below. + SchemaColumn []DataSetParquetSchemaColumnInitParameters `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type DataSetParquetObservation struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_fs_location block as defined below. + AzureBlobFsLocation *DataSetParquetAzureBlobFsLocationObservation `json:"azureBlobFsLocation,omitempty" tf:"azure_blob_fs_location,omitempty"` + + // A azure_blob_storage_location block as defined below. + AzureBlobStorageLocation *DataSetParquetAzureBlobStorageLocationObservation `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The compression codec used to read/write text files. Valid values are bzip2, gzip, deflate, ZipDeflate, TarGzip, Tar, snappy, or lz4. Please note these values are case-sensitive. + CompressionCodec *string `json:"compressionCodec,omitempty" tf:"compression_codec,omitempty"` + + // Specifies the compression level. Possible values are Optimal and Fastest, + CompressionLevel *string `json:"compressionLevel,omitempty" tf:"compression_level,omitempty"` + + // The Data Factory ID in which to associate the Dataset with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Dataset. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + HTTPServerLocation *DataSetParquetHTTPServerLocationObservation `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The ID of the Data Factory Dataset. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // A map of parameters to associate with the Data Factory Dataset. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A schema_column block as defined below. + SchemaColumn []DataSetParquetSchemaColumnObservation `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type DataSetParquetParameters struct { + + // A map of additional properties to associate with the Data Factory Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Dataset. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A azure_blob_fs_location block as defined below. + // +kubebuilder:validation:Optional + AzureBlobFsLocation *DataSetParquetAzureBlobFsLocationParameters `json:"azureBlobFsLocation,omitempty" tf:"azure_blob_fs_location,omitempty"` + + // A azure_blob_storage_location block as defined below. + // +kubebuilder:validation:Optional + AzureBlobStorageLocation *DataSetParquetAzureBlobStorageLocationParameters `json:"azureBlobStorageLocation,omitempty" tf:"azure_blob_storage_location,omitempty"` + + // The compression codec used to read/write text files. Valid values are bzip2, gzip, deflate, ZipDeflate, TarGzip, Tar, snappy, or lz4. Please note these values are case-sensitive. + // +kubebuilder:validation:Optional + CompressionCodec *string `json:"compressionCodec,omitempty" tf:"compression_codec,omitempty"` + + // Specifies the compression level. Possible values are Optimal and Fastest, + // +kubebuilder:validation:Optional + CompressionLevel *string `json:"compressionLevel,omitempty" tf:"compression_level,omitempty"` + + // The Data Factory ID in which to associate the Dataset with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Dataset. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The folder that this Dataset is in. If not specified, the Dataset will appear at the root level. + // +kubebuilder:validation:Optional + Folder *string `json:"folder,omitempty" tf:"folder,omitempty"` + + // A http_server_location block as defined below. + // +kubebuilder:validation:Optional + HTTPServerLocation *DataSetParquetHTTPServerLocationParameters `json:"httpServerLocation,omitempty" tf:"http_server_location,omitempty"` + + // The Data Factory Linked Service name in which to associate the Dataset with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceWeb + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceWeb in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the Data Factory Dataset. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A schema_column block as defined below. + // +kubebuilder:validation:Optional + SchemaColumn []DataSetParquetSchemaColumnParameters `json:"schemaColumn,omitempty" tf:"schema_column,omitempty"` +} + +type DataSetParquetSchemaColumnInitParameters struct { + + // The description of the column. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DataSetParquetSchemaColumnObservation struct { + + // The description of the column. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DataSetParquetSchemaColumnParameters struct { + + // The description of the column. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the column. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of the column. Valid values are Byte, Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. Please note these values are case sensitive. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +// DataSetParquetSpec defines the desired state of DataSetParquet +type DataSetParquetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataSetParquetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataSetParquetInitParameters `json:"initProvider,omitempty"` +} + +// DataSetParquetStatus defines the observed state of DataSetParquet. +type DataSetParquetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataSetParquetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataSetParquet is the Schema for the DataSetParquets API. Manages an Azure Parquet Dataset inside an Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DataSetParquet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DataSetParquetSpec `json:"spec"` + Status DataSetParquetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataSetParquetList contains a list of DataSetParquets +type DataSetParquetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataSetParquet `json:"items"` +} + +// Repository type metadata. +var ( + DataSetParquet_Kind = "DataSetParquet" + DataSetParquet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataSetParquet_Kind}.String() + DataSetParquet_KindAPIVersion = DataSetParquet_Kind + "." + CRDGroupVersion.String() + DataSetParquet_GroupVersionKind = CRDGroupVersion.WithKind(DataSetParquet_Kind) +) + +func init() { + SchemeBuilder.Register(&DataSetParquet{}, &DataSetParquetList{}) +} diff --git a/apis/datafactory/v1beta2/zz_factory_terraformed.go b/apis/datafactory/v1beta2/zz_factory_terraformed.go new file mode 100755 index 000000000..cc1dd3bc8 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_factory_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Factory +func (mg *Factory) GetTerraformResourceType() string { + return "azurerm_data_factory" +} + +// GetConnectionDetailsMapping for this Factory +func (tr *Factory) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Factory +func (tr *Factory) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Factory +func (tr *Factory) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Factory +func (tr *Factory) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Factory +func (tr *Factory) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Factory +func (tr *Factory) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Factory +func (tr *Factory) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Factory +func (tr *Factory) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Factory using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Factory) LateInitialize(attrs []byte) (bool, error) { + params := &FactoryParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Factory) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/datafactory/v1beta2/zz_factory_types.go b/apis/datafactory/v1beta2/zz_factory_types.go new file mode 100755 index 000000000..189b4fba3 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_factory_types.go @@ -0,0 +1,441 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FactoryInitParameters struct { + + // Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity. + CustomerManagedKeyID *string `json:"customerManagedKeyId,omitempty" tf:"customer_managed_key_id,omitempty"` + + // Specifies the ID of the user assigned identity associated with the Customer Managed Key. Must be supplied if customer_managed_key_id is set. + CustomerManagedKeyIdentityID *string `json:"customerManagedKeyIdentityId,omitempty" tf:"customer_managed_key_identity_id,omitempty"` + + // A github_configuration block as defined below. + GithubConfiguration *GithubConfigurationInitParameters `json:"githubConfiguration,omitempty" tf:"github_configuration,omitempty"` + + // A list of global_parameter blocks as defined above. + GlobalParameter []GlobalParameterInitParameters `json:"globalParameter,omitempty" tf:"global_parameter,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Is Managed Virtual Network enabled? + ManagedVirtualNetworkEnabled *bool `json:"managedVirtualNetworkEnabled,omitempty" tf:"managed_virtual_network_enabled,omitempty"` + + // Is the Data Factory visible to the public network? Defaults to true. + PublicNetworkEnabled *bool `json:"publicNetworkEnabled,omitempty" tf:"public_network_enabled,omitempty"` + + // Specifies the ID of the purview account resource associated with the Data Factory. + PurviewID *string `json:"purviewId,omitempty" tf:"purview_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A vsts_configuration block as defined below. + VstsConfiguration *VstsConfigurationInitParameters `json:"vstsConfiguration,omitempty" tf:"vsts_configuration,omitempty"` +} + +type FactoryObservation struct { + + // Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity. + CustomerManagedKeyID *string `json:"customerManagedKeyId,omitempty" tf:"customer_managed_key_id,omitempty"` + + // Specifies the ID of the user assigned identity associated with the Customer Managed Key. Must be supplied if customer_managed_key_id is set. + CustomerManagedKeyIdentityID *string `json:"customerManagedKeyIdentityId,omitempty" tf:"customer_managed_key_identity_id,omitempty"` + + // A github_configuration block as defined below. + GithubConfiguration *GithubConfigurationObservation `json:"githubConfiguration,omitempty" tf:"github_configuration,omitempty"` + + // A list of global_parameter blocks as defined above. + GlobalParameter []GlobalParameterObservation `json:"globalParameter,omitempty" tf:"global_parameter,omitempty"` + + // The ID of the Data Factory. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Is Managed Virtual Network enabled? + ManagedVirtualNetworkEnabled *bool `json:"managedVirtualNetworkEnabled,omitempty" tf:"managed_virtual_network_enabled,omitempty"` + + // Is the Data Factory visible to the public network? Defaults to true. + PublicNetworkEnabled *bool `json:"publicNetworkEnabled,omitempty" tf:"public_network_enabled,omitempty"` + + // Specifies the ID of the purview account resource associated with the Data Factory. + PurviewID *string `json:"purviewId,omitempty" tf:"purview_id,omitempty"` + + // The name of the resource group in which to create the Data Factory. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A vsts_configuration block as defined below. + VstsConfiguration *VstsConfigurationObservation `json:"vstsConfiguration,omitempty" tf:"vsts_configuration,omitempty"` +} + +type FactoryParameters struct { + + // Specifies the Azure Key Vault Key ID to be used as the Customer Managed Key (CMK) for double encryption. Required with user assigned identity. + // +kubebuilder:validation:Optional + CustomerManagedKeyID *string `json:"customerManagedKeyId,omitempty" tf:"customer_managed_key_id,omitempty"` + + // Specifies the ID of the user assigned identity associated with the Customer Managed Key. Must be supplied if customer_managed_key_id is set. + // +kubebuilder:validation:Optional + CustomerManagedKeyIdentityID *string `json:"customerManagedKeyIdentityId,omitempty" tf:"customer_managed_key_identity_id,omitempty"` + + // A github_configuration block as defined below. + // +kubebuilder:validation:Optional + GithubConfiguration *GithubConfigurationParameters `json:"githubConfiguration,omitempty" tf:"github_configuration,omitempty"` + + // A list of global_parameter blocks as defined above. + // +kubebuilder:validation:Optional + GlobalParameter []GlobalParameterParameters `json:"globalParameter,omitempty" tf:"global_parameter,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Is Managed Virtual Network enabled? + // +kubebuilder:validation:Optional + ManagedVirtualNetworkEnabled *bool `json:"managedVirtualNetworkEnabled,omitempty" tf:"managed_virtual_network_enabled,omitempty"` + + // Is the Data Factory visible to the public network? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkEnabled *bool `json:"publicNetworkEnabled,omitempty" tf:"public_network_enabled,omitempty"` + + // Specifies the ID of the purview account resource associated with the Data Factory. + // +kubebuilder:validation:Optional + PurviewID *string `json:"purviewId,omitempty" tf:"purview_id,omitempty"` + + // The name of the resource group in which to create the Data Factory. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A vsts_configuration block as defined below. + // +kubebuilder:validation:Optional + VstsConfiguration *VstsConfigurationParameters `json:"vstsConfiguration,omitempty" tf:"vsts_configuration,omitempty"` +} + +type GithubConfigurationInitParameters struct { + + // Specifies the GitHub account name. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Specifies the branch of the repository to get code from. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // Specifies the GitHub Enterprise host name. For example: https://github.mydomain.com. Use https://github.com for open source repositories. + GitURL *string `json:"gitUrl,omitempty" tf:"git_url,omitempty"` + + // Is automated publishing enabled? Defaults to true. + PublishingEnabled *bool `json:"publishingEnabled,omitempty" tf:"publishing_enabled,omitempty"` + + // Specifies the name of the git repository. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + RootFolder *string `json:"rootFolder,omitempty" tf:"root_folder,omitempty"` +} + +type GithubConfigurationObservation struct { + + // Specifies the GitHub account name. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Specifies the branch of the repository to get code from. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // Specifies the GitHub Enterprise host name. For example: https://github.mydomain.com. Use https://github.com for open source repositories. + GitURL *string `json:"gitUrl,omitempty" tf:"git_url,omitempty"` + + // Is automated publishing enabled? Defaults to true. + PublishingEnabled *bool `json:"publishingEnabled,omitempty" tf:"publishing_enabled,omitempty"` + + // Specifies the name of the git repository. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + RootFolder *string `json:"rootFolder,omitempty" tf:"root_folder,omitempty"` +} + +type GithubConfigurationParameters struct { + + // Specifies the GitHub account name. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // Specifies the branch of the repository to get code from. + // +kubebuilder:validation:Optional + BranchName *string `json:"branchName" tf:"branch_name,omitempty"` + + // Specifies the GitHub Enterprise host name. For example: https://github.mydomain.com. Use https://github.com for open source repositories. + // +kubebuilder:validation:Optional + GitURL *string `json:"gitUrl,omitempty" tf:"git_url,omitempty"` + + // Is automated publishing enabled? Defaults to true. + // +kubebuilder:validation:Optional + PublishingEnabled *bool `json:"publishingEnabled,omitempty" tf:"publishing_enabled,omitempty"` + + // Specifies the name of the git repository. + // +kubebuilder:validation:Optional + RepositoryName *string `json:"repositoryName" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + // +kubebuilder:validation:Optional + RootFolder *string `json:"rootFolder" tf:"root_folder,omitempty"` +} + +type GlobalParameterInitParameters struct { + + // Specifies the global parameter name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the global parameter type. Possible Values are Array, Bool, Float, Int, Object or String. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the global parameter value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GlobalParameterObservation struct { + + // Specifies the global parameter name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the global parameter type. Possible Values are Array, Bool, Float, Int, Object or String. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Specifies the global parameter value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type GlobalParameterParameters struct { + + // Specifies the global parameter name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the global parameter type. Possible Values are Array, Bool, Float, Int, Object or String. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // Specifies the global parameter value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Data Factory. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Factory. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Data Factory. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Factory. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Data Factory. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Factory. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type VstsConfigurationInitParameters struct { + + // Specifies the VSTS account name. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Specifies the branch of the repository to get code from. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // Specifies the name of the VSTS project. + ProjectName *string `json:"projectName,omitempty" tf:"project_name,omitempty"` + + // Is automated publishing enabled? Defaults to true. + PublishingEnabled *bool `json:"publishingEnabled,omitempty" tf:"publishing_enabled,omitempty"` + + // Specifies the name of the git repository. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + RootFolder *string `json:"rootFolder,omitempty" tf:"root_folder,omitempty"` + + // Specifies the Tenant ID associated with the VSTS account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type VstsConfigurationObservation struct { + + // Specifies the VSTS account name. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Specifies the branch of the repository to get code from. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // Specifies the name of the VSTS project. + ProjectName *string `json:"projectName,omitempty" tf:"project_name,omitempty"` + + // Is automated publishing enabled? Defaults to true. + PublishingEnabled *bool `json:"publishingEnabled,omitempty" tf:"publishing_enabled,omitempty"` + + // Specifies the name of the git repository. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + RootFolder *string `json:"rootFolder,omitempty" tf:"root_folder,omitempty"` + + // Specifies the Tenant ID associated with the VSTS account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type VstsConfigurationParameters struct { + + // Specifies the VSTS account name. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // Specifies the branch of the repository to get code from. + // +kubebuilder:validation:Optional + BranchName *string `json:"branchName" tf:"branch_name,omitempty"` + + // Specifies the name of the VSTS project. + // +kubebuilder:validation:Optional + ProjectName *string `json:"projectName" tf:"project_name,omitempty"` + + // Is automated publishing enabled? Defaults to true. + // +kubebuilder:validation:Optional + PublishingEnabled *bool `json:"publishingEnabled,omitempty" tf:"publishing_enabled,omitempty"` + + // Specifies the name of the git repository. + // +kubebuilder:validation:Optional + RepositoryName *string `json:"repositoryName" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + // +kubebuilder:validation:Optional + RootFolder *string `json:"rootFolder" tf:"root_folder,omitempty"` + + // Specifies the Tenant ID associated with the VSTS account. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId" tf:"tenant_id,omitempty"` +} + +// FactorySpec defines the desired state of Factory +type FactorySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FactoryParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FactoryInitParameters `json:"initProvider,omitempty"` +} + +// FactoryStatus defines the observed state of Factory. +type FactoryStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FactoryObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Factory is the Schema for the Factorys API. Manages an Azure Data Factory (Version 2). +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Factory struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec FactorySpec `json:"spec"` + Status FactoryStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FactoryList contains a list of Factorys +type FactoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Factory `json:"items"` +} + +// Repository type metadata. +var ( + Factory_Kind = "Factory" + Factory_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Factory_Kind}.String() + Factory_KindAPIVersion = Factory_Kind + "." + CRDGroupVersion.String() + Factory_GroupVersionKind = CRDGroupVersion.WithKind(Factory_Kind) +) + +func init() { + SchemeBuilder.Register(&Factory{}, &FactoryList{}) +} diff --git a/apis/datafactory/v1beta2/zz_generated.conversion_hubs.go b/apis/datafactory/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..93ea3797d --- /dev/null +++ b/apis/datafactory/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,70 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *CustomDataSet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DataFlow) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DataSetBinary) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DataSetDelimitedText) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DataSetJSON) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DataSetParquet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Factory) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *IntegrationRuntimeAzureSSIS) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *IntegrationRuntimeManaged) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedCustomService) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceAzureBlobStorage) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceAzureDatabricks) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceAzureFileStorage) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceAzureFunction) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceAzureSQLDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceOData) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceOdbc) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceSnowflake) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceSQLServer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinkedServiceSynapse) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TriggerSchedule) Hub() {} diff --git a/apis/datafactory/v1beta2/zz_generated.deepcopy.go b/apis/datafactory/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..72a83b43f --- /dev/null +++ b/apis/datafactory/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,15976 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobFsLocationInitParameters) DeepCopyInto(out *AzureBlobFsLocationInitParameters) { + *out = *in + if in.DynamicFileSystemEnabled != nil { + in, out := &in.DynamicFileSystemEnabled, &out.DynamicFileSystemEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobFsLocationInitParameters. +func (in *AzureBlobFsLocationInitParameters) DeepCopy() *AzureBlobFsLocationInitParameters { + if in == nil { + return nil + } + out := new(AzureBlobFsLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobFsLocationObservation) DeepCopyInto(out *AzureBlobFsLocationObservation) { + *out = *in + if in.DynamicFileSystemEnabled != nil { + in, out := &in.DynamicFileSystemEnabled, &out.DynamicFileSystemEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobFsLocationObservation. +func (in *AzureBlobFsLocationObservation) DeepCopy() *AzureBlobFsLocationObservation { + if in == nil { + return nil + } + out := new(AzureBlobFsLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobFsLocationParameters) DeepCopyInto(out *AzureBlobFsLocationParameters) { + *out = *in + if in.DynamicFileSystemEnabled != nil { + in, out := &in.DynamicFileSystemEnabled, &out.DynamicFileSystemEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobFsLocationParameters. +func (in *AzureBlobFsLocationParameters) DeepCopy() *AzureBlobFsLocationParameters { + if in == nil { + return nil + } + out := new(AzureBlobFsLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobStorageLocationInitParameters) DeepCopyInto(out *AzureBlobStorageLocationInitParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobStorageLocationInitParameters. +func (in *AzureBlobStorageLocationInitParameters) DeepCopy() *AzureBlobStorageLocationInitParameters { + if in == nil { + return nil + } + out := new(AzureBlobStorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobStorageLocationObservation) DeepCopyInto(out *AzureBlobStorageLocationObservation) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobStorageLocationObservation. +func (in *AzureBlobStorageLocationObservation) DeepCopy() *AzureBlobStorageLocationObservation { + if in == nil { + return nil + } + out := new(AzureBlobStorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobStorageLocationParameters) DeepCopyInto(out *AzureBlobStorageLocationParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobStorageLocationParameters. +func (in *AzureBlobStorageLocationParameters) DeepCopy() *AzureBlobStorageLocationParameters { + if in == nil { + return nil + } + out := new(AzureBlobStorageLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthenticationInitParameters) DeepCopyInto(out *BasicAuthenticationInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthenticationInitParameters. +func (in *BasicAuthenticationInitParameters) DeepCopy() *BasicAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(BasicAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthenticationObservation) DeepCopyInto(out *BasicAuthenticationObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthenticationObservation. +func (in *BasicAuthenticationObservation) DeepCopy() *BasicAuthenticationObservation { + if in == nil { + return nil + } + out := new(BasicAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthenticationParameters) DeepCopyInto(out *BasicAuthenticationParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthenticationParameters. +func (in *BasicAuthenticationParameters) DeepCopy() *BasicAuthenticationParameters { + if in == nil { + return nil + } + out := new(BasicAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogInfoInitParameters) DeepCopyInto(out *CatalogInfoInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.DualStandbyPairName != nil { + in, out := &in.DualStandbyPairName, &out.DualStandbyPairName + *out = new(string) + **out = **in + } + if in.ElasticPoolName != nil { + in, out := &in.ElasticPoolName, &out.ElasticPoolName + *out = new(string) + **out = **in + } + if in.PricingTier != nil { + in, out := &in.PricingTier, &out.PricingTier + *out = new(string) + **out = **in + } + if in.ServerEndpoint != nil { + in, out := &in.ServerEndpoint, &out.ServerEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogInfoInitParameters. +func (in *CatalogInfoInitParameters) DeepCopy() *CatalogInfoInitParameters { + if in == nil { + return nil + } + out := new(CatalogInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogInfoObservation) DeepCopyInto(out *CatalogInfoObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.DualStandbyPairName != nil { + in, out := &in.DualStandbyPairName, &out.DualStandbyPairName + *out = new(string) + **out = **in + } + if in.ElasticPoolName != nil { + in, out := &in.ElasticPoolName, &out.ElasticPoolName + *out = new(string) + **out = **in + } + if in.PricingTier != nil { + in, out := &in.PricingTier, &out.PricingTier + *out = new(string) + **out = **in + } + if in.ServerEndpoint != nil { + in, out := &in.ServerEndpoint, &out.ServerEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogInfoObservation. +func (in *CatalogInfoObservation) DeepCopy() *CatalogInfoObservation { + if in == nil { + return nil + } + out := new(CatalogInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogInfoParameters) DeepCopyInto(out *CatalogInfoParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AdministratorPasswordSecretRef != nil { + in, out := &in.AdministratorPasswordSecretRef, &out.AdministratorPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DualStandbyPairName != nil { + in, out := &in.DualStandbyPairName, &out.DualStandbyPairName + *out = new(string) + **out = **in + } + if in.ElasticPoolName != nil { + in, out := &in.ElasticPoolName, &out.ElasticPoolName + *out = new(string) + **out = **in + } + if in.PricingTier != nil { + in, out := &in.PricingTier, &out.PricingTier + *out = new(string) + **out = **in + } + if in.ServerEndpoint != nil { + in, out := &in.ServerEndpoint, &out.ServerEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogInfoParameters. +func (in *CatalogInfoParameters) DeepCopy() *CatalogInfoParameters { + if in == nil { + return nil + } + out := new(CatalogInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandKeyInitParameters) DeepCopyInto(out *CommandKeyInitParameters) { + *out = *in + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(KeyVaultPasswordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetName != nil { + in, out := &in.TargetName, &out.TargetName + *out = new(string) + **out = **in + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandKeyInitParameters. +func (in *CommandKeyInitParameters) DeepCopy() *CommandKeyInitParameters { + if in == nil { + return nil + } + out := new(CommandKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandKeyObservation) DeepCopyInto(out *CommandKeyObservation) { + *out = *in + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(KeyVaultPasswordObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetName != nil { + in, out := &in.TargetName, &out.TargetName + *out = new(string) + **out = **in + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandKeyObservation. +func (in *CommandKeyObservation) DeepCopy() *CommandKeyObservation { + if in == nil { + return nil + } + out := new(CommandKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommandKeyParameters) DeepCopyInto(out *CommandKeyParameters) { + *out = *in + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(KeyVaultPasswordParameters) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.TargetName != nil { + in, out := &in.TargetName, &out.TargetName + *out = new(string) + **out = **in + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandKeyParameters. +func (in *CommandKeyParameters) DeepCopy() *CommandKeyParameters { + if in == nil { + return nil + } + out := new(CommandKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentInitParameters) DeepCopyInto(out *ComponentInitParameters) { + *out = *in + if in.KeyVaultLicense != nil { + in, out := &in.KeyVaultLicense, &out.KeyVaultLicense + *out = new(KeyVaultLicenseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentInitParameters. +func (in *ComponentInitParameters) DeepCopy() *ComponentInitParameters { + if in == nil { + return nil + } + out := new(ComponentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentObservation) DeepCopyInto(out *ComponentObservation) { + *out = *in + if in.KeyVaultLicense != nil { + in, out := &in.KeyVaultLicense, &out.KeyVaultLicense + *out = new(KeyVaultLicenseObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentObservation. +func (in *ComponentObservation) DeepCopy() *ComponentObservation { + if in == nil { + return nil + } + out := new(ComponentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentParameters) DeepCopyInto(out *ComponentParameters) { + *out = *in + if in.KeyVaultLicense != nil { + in, out := &in.KeyVaultLicense, &out.KeyVaultLicense + *out = new(KeyVaultLicenseParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseSecretRef != nil { + in, out := &in.LicenseSecretRef, &out.LicenseSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentParameters. +func (in *ComponentParameters) DeepCopy() *ComponentParameters { + if in == nil { + return nil + } + out := new(ComponentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompressionInitParameters) DeepCopyInto(out *CompressionInitParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompressionInitParameters. +func (in *CompressionInitParameters) DeepCopy() *CompressionInitParameters { + if in == nil { + return nil + } + out := new(CompressionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompressionObservation) DeepCopyInto(out *CompressionObservation) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompressionObservation. +func (in *CompressionObservation) DeepCopy() *CompressionObservation { + if in == nil { + return nil + } + out := new(CompressionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompressionParameters) DeepCopyInto(out *CompressionParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompressionParameters. +func (in *CompressionParameters) DeepCopy() *CompressionParameters { + if in == nil { + return nil + } + out := new(CompressionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDataSet) DeepCopyInto(out *CustomDataSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDataSet. +func (in *CustomDataSet) DeepCopy() *CustomDataSet { + if in == nil { + return nil + } + out := new(CustomDataSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomDataSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDataSetInitParameters) DeepCopyInto(out *CustomDataSetInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(LinkedServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaJSON != nil { + in, out := &in.SchemaJSON, &out.SchemaJSON + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDataSetInitParameters. +func (in *CustomDataSetInitParameters) DeepCopy() *CustomDataSetInitParameters { + if in == nil { + return nil + } + out := new(CustomDataSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDataSetList) DeepCopyInto(out *CustomDataSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomDataSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDataSetList. +func (in *CustomDataSetList) DeepCopy() *CustomDataSetList { + if in == nil { + return nil + } + out := new(CustomDataSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomDataSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDataSetObservation) DeepCopyInto(out *CustomDataSetObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(LinkedServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaJSON != nil { + in, out := &in.SchemaJSON, &out.SchemaJSON + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDataSetObservation. +func (in *CustomDataSetObservation) DeepCopy() *CustomDataSetObservation { + if in == nil { + return nil + } + out := new(CustomDataSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDataSetParameters) DeepCopyInto(out *CustomDataSetParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(LinkedServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaJSON != nil { + in, out := &in.SchemaJSON, &out.SchemaJSON + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDataSetParameters. +func (in *CustomDataSetParameters) DeepCopy() *CustomDataSetParameters { + if in == nil { + return nil + } + out := new(CustomDataSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDataSetSpec) DeepCopyInto(out *CustomDataSetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDataSetSpec. +func (in *CustomDataSetSpec) DeepCopy() *CustomDataSetSpec { + if in == nil { + return nil + } + out := new(CustomDataSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDataSetStatus) DeepCopyInto(out *CustomDataSetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDataSetStatus. +func (in *CustomDataSetStatus) DeepCopy() *CustomDataSetStatus { + if in == nil { + return nil + } + out := new(CustomDataSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomSetupScriptInitParameters) DeepCopyInto(out *CustomSetupScriptInitParameters) { + *out = *in + if in.BlobContainerURI != nil { + in, out := &in.BlobContainerURI, &out.BlobContainerURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomSetupScriptInitParameters. +func (in *CustomSetupScriptInitParameters) DeepCopy() *CustomSetupScriptInitParameters { + if in == nil { + return nil + } + out := new(CustomSetupScriptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomSetupScriptObservation) DeepCopyInto(out *CustomSetupScriptObservation) { + *out = *in + if in.BlobContainerURI != nil { + in, out := &in.BlobContainerURI, &out.BlobContainerURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomSetupScriptObservation. +func (in *CustomSetupScriptObservation) DeepCopy() *CustomSetupScriptObservation { + if in == nil { + return nil + } + out := new(CustomSetupScriptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomSetupScriptParameters) DeepCopyInto(out *CustomSetupScriptParameters) { + *out = *in + if in.BlobContainerURI != nil { + in, out := &in.BlobContainerURI, &out.BlobContainerURI + *out = new(string) + **out = **in + } + out.SASTokenSecretRef = in.SASTokenSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomSetupScriptParameters. +func (in *CustomSetupScriptParameters) DeepCopy() *CustomSetupScriptParameters { + if in == nil { + return nil + } + out := new(CustomSetupScriptParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlow) DeepCopyInto(out *DataFlow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlow. +func (in *DataFlow) DeepCopy() *DataFlow { + if in == nil { + return nil + } + out := new(DataFlow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataFlow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowInitParameters) DeepCopyInto(out *DataFlowInitParameters) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.ScriptLines != nil { + in, out := &in.ScriptLines, &out.ScriptLines + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sink != nil { + in, out := &in.Sink, &out.Sink + *out = make([]SinkInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = make([]SourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Transformation != nil { + in, out := &in.Transformation, &out.Transformation + *out = make([]TransformationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowInitParameters. +func (in *DataFlowInitParameters) DeepCopy() *DataFlowInitParameters { + if in == nil { + return nil + } + out := new(DataFlowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowList) DeepCopyInto(out *DataFlowList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataFlow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowList. +func (in *DataFlowList) DeepCopy() *DataFlowList { + if in == nil { + return nil + } + out := new(DataFlowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataFlowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowObservation) DeepCopyInto(out *DataFlowObservation) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.ScriptLines != nil { + in, out := &in.ScriptLines, &out.ScriptLines + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sink != nil { + in, out := &in.Sink, &out.Sink + *out = make([]SinkObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = make([]SourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Transformation != nil { + in, out := &in.Transformation, &out.Transformation + *out = make([]TransformationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowObservation. +func (in *DataFlowObservation) DeepCopy() *DataFlowObservation { + if in == nil { + return nil + } + out := new(DataFlowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowParameters) DeepCopyInto(out *DataFlowParameters) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.ScriptLines != nil { + in, out := &in.ScriptLines, &out.ScriptLines + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Sink != nil { + in, out := &in.Sink, &out.Sink + *out = make([]SinkParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = make([]SourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Transformation != nil { + in, out := &in.Transformation, &out.Transformation + *out = make([]TransformationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowParameters. +func (in *DataFlowParameters) DeepCopy() *DataFlowParameters { + if in == nil { + return nil + } + out := new(DataFlowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowSpec) DeepCopyInto(out *DataFlowSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowSpec. +func (in *DataFlowSpec) DeepCopy() *DataFlowSpec { + if in == nil { + return nil + } + out := new(DataFlowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowStatus) DeepCopyInto(out *DataFlowStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowStatus. +func (in *DataFlowStatus) DeepCopy() *DataFlowStatus { + if in == nil { + return nil + } + out := new(DataFlowStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBinary) DeepCopyInto(out *DataSetBinary) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBinary. +func (in *DataSetBinary) DeepCopy() *DataSetBinary { + if in == nil { + return nil + } + out := new(DataSetBinary) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetBinary) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBinaryInitParameters) DeepCopyInto(out *DataSetBinaryInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(AzureBlobStorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(CompressionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(HTTPServerLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SFTPServerLocation != nil { + in, out := &in.SFTPServerLocation, &out.SFTPServerLocation + *out = new(SFTPServerLocationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBinaryInitParameters. +func (in *DataSetBinaryInitParameters) DeepCopy() *DataSetBinaryInitParameters { + if in == nil { + return nil + } + out := new(DataSetBinaryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBinaryList) DeepCopyInto(out *DataSetBinaryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataSetBinary, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBinaryList. +func (in *DataSetBinaryList) DeepCopy() *DataSetBinaryList { + if in == nil { + return nil + } + out := new(DataSetBinaryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetBinaryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBinaryObservation) DeepCopyInto(out *DataSetBinaryObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(AzureBlobStorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(CompressionObservation) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(HTTPServerLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SFTPServerLocation != nil { + in, out := &in.SFTPServerLocation, &out.SFTPServerLocation + *out = new(SFTPServerLocationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBinaryObservation. +func (in *DataSetBinaryObservation) DeepCopy() *DataSetBinaryObservation { + if in == nil { + return nil + } + out := new(DataSetBinaryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBinaryParameters) DeepCopyInto(out *DataSetBinaryParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(AzureBlobStorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(CompressionParameters) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(HTTPServerLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SFTPServerLocation != nil { + in, out := &in.SFTPServerLocation, &out.SFTPServerLocation + *out = new(SFTPServerLocationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBinaryParameters. +func (in *DataSetBinaryParameters) DeepCopy() *DataSetBinaryParameters { + if in == nil { + return nil + } + out := new(DataSetBinaryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBinarySpec) DeepCopyInto(out *DataSetBinarySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBinarySpec. +func (in *DataSetBinarySpec) DeepCopy() *DataSetBinarySpec { + if in == nil { + return nil + } + out := new(DataSetBinarySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBinaryStatus) DeepCopyInto(out *DataSetBinaryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBinaryStatus. +func (in *DataSetBinaryStatus) DeepCopy() *DataSetBinaryStatus { + if in == nil { + return nil + } + out := new(DataSetBinaryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedText) DeepCopyInto(out *DataSetDelimitedText) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedText. +func (in *DataSetDelimitedText) DeepCopy() *DataSetDelimitedText { + if in == nil { + return nil + } + out := new(DataSetDelimitedText) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetDelimitedText) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextAzureBlobStorageLocationInitParameters) DeepCopyInto(out *DataSetDelimitedTextAzureBlobStorageLocationInitParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextAzureBlobStorageLocationInitParameters. +func (in *DataSetDelimitedTextAzureBlobStorageLocationInitParameters) DeepCopy() *DataSetDelimitedTextAzureBlobStorageLocationInitParameters { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextAzureBlobStorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextAzureBlobStorageLocationObservation) DeepCopyInto(out *DataSetDelimitedTextAzureBlobStorageLocationObservation) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextAzureBlobStorageLocationObservation. +func (in *DataSetDelimitedTextAzureBlobStorageLocationObservation) DeepCopy() *DataSetDelimitedTextAzureBlobStorageLocationObservation { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextAzureBlobStorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextAzureBlobStorageLocationParameters) DeepCopyInto(out *DataSetDelimitedTextAzureBlobStorageLocationParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextAzureBlobStorageLocationParameters. +func (in *DataSetDelimitedTextAzureBlobStorageLocationParameters) DeepCopy() *DataSetDelimitedTextAzureBlobStorageLocationParameters { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextAzureBlobStorageLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextHTTPServerLocationInitParameters) DeepCopyInto(out *DataSetDelimitedTextHTTPServerLocationInitParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextHTTPServerLocationInitParameters. +func (in *DataSetDelimitedTextHTTPServerLocationInitParameters) DeepCopy() *DataSetDelimitedTextHTTPServerLocationInitParameters { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextHTTPServerLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextHTTPServerLocationObservation) DeepCopyInto(out *DataSetDelimitedTextHTTPServerLocationObservation) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextHTTPServerLocationObservation. +func (in *DataSetDelimitedTextHTTPServerLocationObservation) DeepCopy() *DataSetDelimitedTextHTTPServerLocationObservation { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextHTTPServerLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextHTTPServerLocationParameters) DeepCopyInto(out *DataSetDelimitedTextHTTPServerLocationParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextHTTPServerLocationParameters. +func (in *DataSetDelimitedTextHTTPServerLocationParameters) DeepCopy() *DataSetDelimitedTextHTTPServerLocationParameters { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextHTTPServerLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextInitParameters) DeepCopyInto(out *DataSetDelimitedTextInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobFsLocation != nil { + in, out := &in.AzureBlobFsLocation, &out.AzureBlobFsLocation + *out = new(AzureBlobFsLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetDelimitedTextAzureBlobStorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ColumnDelimiter != nil { + in, out := &in.ColumnDelimiter, &out.ColumnDelimiter + *out = new(string) + **out = **in + } + if in.CompressionCodec != nil { + in, out := &in.CompressionCodec, &out.CompressionCodec + *out = new(string) + **out = **in + } + if in.CompressionLevel != nil { + in, out := &in.CompressionLevel, &out.CompressionLevel + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.EscapeCharacter != nil { + in, out := &in.EscapeCharacter, &out.EscapeCharacter + *out = new(string) + **out = **in + } + if in.FirstRowAsHeader != nil { + in, out := &in.FirstRowAsHeader, &out.FirstRowAsHeader + *out = new(bool) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetDelimitedTextHTTPServerLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NullValue != nil { + in, out := &in.NullValue, &out.NullValue + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QuoteCharacter != nil { + in, out := &in.QuoteCharacter, &out.QuoteCharacter + *out = new(string) + **out = **in + } + if in.RowDelimiter != nil { + in, out := &in.RowDelimiter, &out.RowDelimiter + *out = new(string) + **out = **in + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]SchemaColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextInitParameters. +func (in *DataSetDelimitedTextInitParameters) DeepCopy() *DataSetDelimitedTextInitParameters { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextList) DeepCopyInto(out *DataSetDelimitedTextList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataSetDelimitedText, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextList. +func (in *DataSetDelimitedTextList) DeepCopy() *DataSetDelimitedTextList { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetDelimitedTextList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextObservation) DeepCopyInto(out *DataSetDelimitedTextObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobFsLocation != nil { + in, out := &in.AzureBlobFsLocation, &out.AzureBlobFsLocation + *out = new(AzureBlobFsLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetDelimitedTextAzureBlobStorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.ColumnDelimiter != nil { + in, out := &in.ColumnDelimiter, &out.ColumnDelimiter + *out = new(string) + **out = **in + } + if in.CompressionCodec != nil { + in, out := &in.CompressionCodec, &out.CompressionCodec + *out = new(string) + **out = **in + } + if in.CompressionLevel != nil { + in, out := &in.CompressionLevel, &out.CompressionLevel + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.EscapeCharacter != nil { + in, out := &in.EscapeCharacter, &out.EscapeCharacter + *out = new(string) + **out = **in + } + if in.FirstRowAsHeader != nil { + in, out := &in.FirstRowAsHeader, &out.FirstRowAsHeader + *out = new(bool) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetDelimitedTextHTTPServerLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.NullValue != nil { + in, out := &in.NullValue, &out.NullValue + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QuoteCharacter != nil { + in, out := &in.QuoteCharacter, &out.QuoteCharacter + *out = new(string) + **out = **in + } + if in.RowDelimiter != nil { + in, out := &in.RowDelimiter, &out.RowDelimiter + *out = new(string) + **out = **in + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]SchemaColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextObservation. +func (in *DataSetDelimitedTextObservation) DeepCopy() *DataSetDelimitedTextObservation { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextParameters) DeepCopyInto(out *DataSetDelimitedTextParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobFsLocation != nil { + in, out := &in.AzureBlobFsLocation, &out.AzureBlobFsLocation + *out = new(AzureBlobFsLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetDelimitedTextAzureBlobStorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.ColumnDelimiter != nil { + in, out := &in.ColumnDelimiter, &out.ColumnDelimiter + *out = new(string) + **out = **in + } + if in.CompressionCodec != nil { + in, out := &in.CompressionCodec, &out.CompressionCodec + *out = new(string) + **out = **in + } + if in.CompressionLevel != nil { + in, out := &in.CompressionLevel, &out.CompressionLevel + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.EscapeCharacter != nil { + in, out := &in.EscapeCharacter, &out.EscapeCharacter + *out = new(string) + **out = **in + } + if in.FirstRowAsHeader != nil { + in, out := &in.FirstRowAsHeader, &out.FirstRowAsHeader + *out = new(bool) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetDelimitedTextHTTPServerLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NullValue != nil { + in, out := &in.NullValue, &out.NullValue + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QuoteCharacter != nil { + in, out := &in.QuoteCharacter, &out.QuoteCharacter + *out = new(string) + **out = **in + } + if in.RowDelimiter != nil { + in, out := &in.RowDelimiter, &out.RowDelimiter + *out = new(string) + **out = **in + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]SchemaColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextParameters. +func (in *DataSetDelimitedTextParameters) DeepCopy() *DataSetDelimitedTextParameters { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextSpec) DeepCopyInto(out *DataSetDelimitedTextSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextSpec. +func (in *DataSetDelimitedTextSpec) DeepCopy() *DataSetDelimitedTextSpec { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetDelimitedTextStatus) DeepCopyInto(out *DataSetDelimitedTextStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetDelimitedTextStatus. +func (in *DataSetDelimitedTextStatus) DeepCopy() *DataSetDelimitedTextStatus { + if in == nil { + return nil + } + out := new(DataSetDelimitedTextStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetInitParameters) DeepCopyInto(out *DataSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetInitParameters. +func (in *DataSetInitParameters) DeepCopy() *DataSetInitParameters { + if in == nil { + return nil + } + out := new(DataSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSON) DeepCopyInto(out *DataSetJSON) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSON. +func (in *DataSetJSON) DeepCopy() *DataSetJSON { + if in == nil { + return nil + } + out := new(DataSetJSON) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetJSON) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONAzureBlobStorageLocationInitParameters) DeepCopyInto(out *DataSetJSONAzureBlobStorageLocationInitParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONAzureBlobStorageLocationInitParameters. +func (in *DataSetJSONAzureBlobStorageLocationInitParameters) DeepCopy() *DataSetJSONAzureBlobStorageLocationInitParameters { + if in == nil { + return nil + } + out := new(DataSetJSONAzureBlobStorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONAzureBlobStorageLocationObservation) DeepCopyInto(out *DataSetJSONAzureBlobStorageLocationObservation) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONAzureBlobStorageLocationObservation. +func (in *DataSetJSONAzureBlobStorageLocationObservation) DeepCopy() *DataSetJSONAzureBlobStorageLocationObservation { + if in == nil { + return nil + } + out := new(DataSetJSONAzureBlobStorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONAzureBlobStorageLocationParameters) DeepCopyInto(out *DataSetJSONAzureBlobStorageLocationParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONAzureBlobStorageLocationParameters. +func (in *DataSetJSONAzureBlobStorageLocationParameters) DeepCopy() *DataSetJSONAzureBlobStorageLocationParameters { + if in == nil { + return nil + } + out := new(DataSetJSONAzureBlobStorageLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONHTTPServerLocationInitParameters) DeepCopyInto(out *DataSetJSONHTTPServerLocationInitParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONHTTPServerLocationInitParameters. +func (in *DataSetJSONHTTPServerLocationInitParameters) DeepCopy() *DataSetJSONHTTPServerLocationInitParameters { + if in == nil { + return nil + } + out := new(DataSetJSONHTTPServerLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONHTTPServerLocationObservation) DeepCopyInto(out *DataSetJSONHTTPServerLocationObservation) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONHTTPServerLocationObservation. +func (in *DataSetJSONHTTPServerLocationObservation) DeepCopy() *DataSetJSONHTTPServerLocationObservation { + if in == nil { + return nil + } + out := new(DataSetJSONHTTPServerLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONHTTPServerLocationParameters) DeepCopyInto(out *DataSetJSONHTTPServerLocationParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONHTTPServerLocationParameters. +func (in *DataSetJSONHTTPServerLocationParameters) DeepCopy() *DataSetJSONHTTPServerLocationParameters { + if in == nil { + return nil + } + out := new(DataSetJSONHTTPServerLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONInitParameters) DeepCopyInto(out *DataSetJSONInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetJSONAzureBlobStorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetJSONHTTPServerLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]DataSetJSONSchemaColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONInitParameters. +func (in *DataSetJSONInitParameters) DeepCopy() *DataSetJSONInitParameters { + if in == nil { + return nil + } + out := new(DataSetJSONInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONList) DeepCopyInto(out *DataSetJSONList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataSetJSON, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONList. +func (in *DataSetJSONList) DeepCopy() *DataSetJSONList { + if in == nil { + return nil + } + out := new(DataSetJSONList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetJSONList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONObservation) DeepCopyInto(out *DataSetJSONObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetJSONAzureBlobStorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetJSONHTTPServerLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]DataSetJSONSchemaColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONObservation. +func (in *DataSetJSONObservation) DeepCopy() *DataSetJSONObservation { + if in == nil { + return nil + } + out := new(DataSetJSONObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONParameters) DeepCopyInto(out *DataSetJSONParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetJSONAzureBlobStorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetJSONHTTPServerLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]DataSetJSONSchemaColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONParameters. +func (in *DataSetJSONParameters) DeepCopy() *DataSetJSONParameters { + if in == nil { + return nil + } + out := new(DataSetJSONParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONSchemaColumnInitParameters) DeepCopyInto(out *DataSetJSONSchemaColumnInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONSchemaColumnInitParameters. +func (in *DataSetJSONSchemaColumnInitParameters) DeepCopy() *DataSetJSONSchemaColumnInitParameters { + if in == nil { + return nil + } + out := new(DataSetJSONSchemaColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONSchemaColumnObservation) DeepCopyInto(out *DataSetJSONSchemaColumnObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONSchemaColumnObservation. +func (in *DataSetJSONSchemaColumnObservation) DeepCopy() *DataSetJSONSchemaColumnObservation { + if in == nil { + return nil + } + out := new(DataSetJSONSchemaColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONSchemaColumnParameters) DeepCopyInto(out *DataSetJSONSchemaColumnParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONSchemaColumnParameters. +func (in *DataSetJSONSchemaColumnParameters) DeepCopy() *DataSetJSONSchemaColumnParameters { + if in == nil { + return nil + } + out := new(DataSetJSONSchemaColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONSpec) DeepCopyInto(out *DataSetJSONSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONSpec. +func (in *DataSetJSONSpec) DeepCopy() *DataSetJSONSpec { + if in == nil { + return nil + } + out := new(DataSetJSONSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetJSONStatus) DeepCopyInto(out *DataSetJSONStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetJSONStatus. +func (in *DataSetJSONStatus) DeepCopy() *DataSetJSONStatus { + if in == nil { + return nil + } + out := new(DataSetJSONStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetObservation) DeepCopyInto(out *DataSetObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetObservation. +func (in *DataSetObservation) DeepCopy() *DataSetObservation { + if in == nil { + return nil + } + out := new(DataSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParameters) DeepCopyInto(out *DataSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParameters. +func (in *DataSetParameters) DeepCopy() *DataSetParameters { + if in == nil { + return nil + } + out := new(DataSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquet) DeepCopyInto(out *DataSetParquet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquet. +func (in *DataSetParquet) DeepCopy() *DataSetParquet { + if in == nil { + return nil + } + out := new(DataSetParquet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetParquet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetAzureBlobFsLocationInitParameters) DeepCopyInto(out *DataSetParquetAzureBlobFsLocationInitParameters) { + *out = *in + if in.DynamicFileSystemEnabled != nil { + in, out := &in.DynamicFileSystemEnabled, &out.DynamicFileSystemEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetAzureBlobFsLocationInitParameters. +func (in *DataSetParquetAzureBlobFsLocationInitParameters) DeepCopy() *DataSetParquetAzureBlobFsLocationInitParameters { + if in == nil { + return nil + } + out := new(DataSetParquetAzureBlobFsLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetAzureBlobFsLocationObservation) DeepCopyInto(out *DataSetParquetAzureBlobFsLocationObservation) { + *out = *in + if in.DynamicFileSystemEnabled != nil { + in, out := &in.DynamicFileSystemEnabled, &out.DynamicFileSystemEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetAzureBlobFsLocationObservation. +func (in *DataSetParquetAzureBlobFsLocationObservation) DeepCopy() *DataSetParquetAzureBlobFsLocationObservation { + if in == nil { + return nil + } + out := new(DataSetParquetAzureBlobFsLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetAzureBlobFsLocationParameters) DeepCopyInto(out *DataSetParquetAzureBlobFsLocationParameters) { + *out = *in + if in.DynamicFileSystemEnabled != nil { + in, out := &in.DynamicFileSystemEnabled, &out.DynamicFileSystemEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetAzureBlobFsLocationParameters. +func (in *DataSetParquetAzureBlobFsLocationParameters) DeepCopy() *DataSetParquetAzureBlobFsLocationParameters { + if in == nil { + return nil + } + out := new(DataSetParquetAzureBlobFsLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetAzureBlobStorageLocationInitParameters) DeepCopyInto(out *DataSetParquetAzureBlobStorageLocationInitParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetAzureBlobStorageLocationInitParameters. +func (in *DataSetParquetAzureBlobStorageLocationInitParameters) DeepCopy() *DataSetParquetAzureBlobStorageLocationInitParameters { + if in == nil { + return nil + } + out := new(DataSetParquetAzureBlobStorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetAzureBlobStorageLocationObservation) DeepCopyInto(out *DataSetParquetAzureBlobStorageLocationObservation) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetAzureBlobStorageLocationObservation. +func (in *DataSetParquetAzureBlobStorageLocationObservation) DeepCopy() *DataSetParquetAzureBlobStorageLocationObservation { + if in == nil { + return nil + } + out := new(DataSetParquetAzureBlobStorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetAzureBlobStorageLocationParameters) DeepCopyInto(out *DataSetParquetAzureBlobStorageLocationParameters) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(string) + **out = **in + } + if in.DynamicContainerEnabled != nil { + in, out := &in.DynamicContainerEnabled, &out.DynamicContainerEnabled + *out = new(bool) + **out = **in + } + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetAzureBlobStorageLocationParameters. +func (in *DataSetParquetAzureBlobStorageLocationParameters) DeepCopy() *DataSetParquetAzureBlobStorageLocationParameters { + if in == nil { + return nil + } + out := new(DataSetParquetAzureBlobStorageLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetHTTPServerLocationInitParameters) DeepCopyInto(out *DataSetParquetHTTPServerLocationInitParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetHTTPServerLocationInitParameters. +func (in *DataSetParquetHTTPServerLocationInitParameters) DeepCopy() *DataSetParquetHTTPServerLocationInitParameters { + if in == nil { + return nil + } + out := new(DataSetParquetHTTPServerLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetHTTPServerLocationObservation) DeepCopyInto(out *DataSetParquetHTTPServerLocationObservation) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetHTTPServerLocationObservation. +func (in *DataSetParquetHTTPServerLocationObservation) DeepCopy() *DataSetParquetHTTPServerLocationObservation { + if in == nil { + return nil + } + out := new(DataSetParquetHTTPServerLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetHTTPServerLocationParameters) DeepCopyInto(out *DataSetParquetHTTPServerLocationParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetHTTPServerLocationParameters. +func (in *DataSetParquetHTTPServerLocationParameters) DeepCopy() *DataSetParquetHTTPServerLocationParameters { + if in == nil { + return nil + } + out := new(DataSetParquetHTTPServerLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetInitParameters) DeepCopyInto(out *DataSetParquetInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobFsLocation != nil { + in, out := &in.AzureBlobFsLocation, &out.AzureBlobFsLocation + *out = new(DataSetParquetAzureBlobFsLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetParquetAzureBlobStorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionCodec != nil { + in, out := &in.CompressionCodec, &out.CompressionCodec + *out = new(string) + **out = **in + } + if in.CompressionLevel != nil { + in, out := &in.CompressionLevel, &out.CompressionLevel + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetParquetHTTPServerLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]DataSetParquetSchemaColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetInitParameters. +func (in *DataSetParquetInitParameters) DeepCopy() *DataSetParquetInitParameters { + if in == nil { + return nil + } + out := new(DataSetParquetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetList) DeepCopyInto(out *DataSetParquetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataSetParquet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetList. +func (in *DataSetParquetList) DeepCopy() *DataSetParquetList { + if in == nil { + return nil + } + out := new(DataSetParquetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetParquetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetObservation) DeepCopyInto(out *DataSetParquetObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobFsLocation != nil { + in, out := &in.AzureBlobFsLocation, &out.AzureBlobFsLocation + *out = new(DataSetParquetAzureBlobFsLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetParquetAzureBlobStorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.CompressionCodec != nil { + in, out := &in.CompressionCodec, &out.CompressionCodec + *out = new(string) + **out = **in + } + if in.CompressionLevel != nil { + in, out := &in.CompressionLevel, &out.CompressionLevel + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetParquetHTTPServerLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]DataSetParquetSchemaColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetObservation. +func (in *DataSetParquetObservation) DeepCopy() *DataSetParquetObservation { + if in == nil { + return nil + } + out := new(DataSetParquetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetParameters) DeepCopyInto(out *DataSetParquetParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureBlobFsLocation != nil { + in, out := &in.AzureBlobFsLocation, &out.AzureBlobFsLocation + *out = new(DataSetParquetAzureBlobFsLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.AzureBlobStorageLocation != nil { + in, out := &in.AzureBlobStorageLocation, &out.AzureBlobStorageLocation + *out = new(DataSetParquetAzureBlobStorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.CompressionCodec != nil { + in, out := &in.CompressionCodec, &out.CompressionCodec + *out = new(string) + **out = **in + } + if in.CompressionLevel != nil { + in, out := &in.CompressionLevel, &out.CompressionLevel + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Folder != nil { + in, out := &in.Folder, &out.Folder + *out = new(string) + **out = **in + } + if in.HTTPServerLocation != nil { + in, out := &in.HTTPServerLocation, &out.HTTPServerLocation + *out = new(DataSetParquetHTTPServerLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SchemaColumn != nil { + in, out := &in.SchemaColumn, &out.SchemaColumn + *out = make([]DataSetParquetSchemaColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetParameters. +func (in *DataSetParquetParameters) DeepCopy() *DataSetParquetParameters { + if in == nil { + return nil + } + out := new(DataSetParquetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetSchemaColumnInitParameters) DeepCopyInto(out *DataSetParquetSchemaColumnInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetSchemaColumnInitParameters. +func (in *DataSetParquetSchemaColumnInitParameters) DeepCopy() *DataSetParquetSchemaColumnInitParameters { + if in == nil { + return nil + } + out := new(DataSetParquetSchemaColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetSchemaColumnObservation) DeepCopyInto(out *DataSetParquetSchemaColumnObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetSchemaColumnObservation. +func (in *DataSetParquetSchemaColumnObservation) DeepCopy() *DataSetParquetSchemaColumnObservation { + if in == nil { + return nil + } + out := new(DataSetParquetSchemaColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetSchemaColumnParameters) DeepCopyInto(out *DataSetParquetSchemaColumnParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetSchemaColumnParameters. +func (in *DataSetParquetSchemaColumnParameters) DeepCopy() *DataSetParquetSchemaColumnParameters { + if in == nil { + return nil + } + out := new(DataSetParquetSchemaColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetSpec) DeepCopyInto(out *DataSetParquetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetSpec. +func (in *DataSetParquetSpec) DeepCopy() *DataSetParquetSpec { + if in == nil { + return nil + } + out := new(DataSetParquetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetParquetStatus) DeepCopyInto(out *DataSetParquetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetParquetStatus. +func (in *DataSetParquetStatus) DeepCopy() *DataSetParquetStatus { + if in == nil { + return nil + } + out := new(DataSetParquetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressCustomSetupInitParameters) DeepCopyInto(out *ExpressCustomSetupInitParameters) { + *out = *in + if in.CommandKey != nil { + in, out := &in.CommandKey, &out.CommandKey + *out = make([]CommandKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ComponentInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PowershellVersion != nil { + in, out := &in.PowershellVersion, &out.PowershellVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressCustomSetupInitParameters. +func (in *ExpressCustomSetupInitParameters) DeepCopy() *ExpressCustomSetupInitParameters { + if in == nil { + return nil + } + out := new(ExpressCustomSetupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressCustomSetupObservation) DeepCopyInto(out *ExpressCustomSetupObservation) { + *out = *in + if in.CommandKey != nil { + in, out := &in.CommandKey, &out.CommandKey + *out = make([]CommandKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ComponentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PowershellVersion != nil { + in, out := &in.PowershellVersion, &out.PowershellVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressCustomSetupObservation. +func (in *ExpressCustomSetupObservation) DeepCopy() *ExpressCustomSetupObservation { + if in == nil { + return nil + } + out := new(ExpressCustomSetupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressCustomSetupParameters) DeepCopyInto(out *ExpressCustomSetupParameters) { + *out = *in + if in.CommandKey != nil { + in, out := &in.CommandKey, &out.CommandKey + *out = make([]CommandKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Component != nil { + in, out := &in.Component, &out.Component + *out = make([]ComponentParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PowershellVersion != nil { + in, out := &in.PowershellVersion, &out.PowershellVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressCustomSetupParameters. +func (in *ExpressCustomSetupParameters) DeepCopy() *ExpressCustomSetupParameters { + if in == nil { + return nil + } + out := new(ExpressCustomSetupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressVnetIntegrationInitParameters) DeepCopyInto(out *ExpressVnetIntegrationInitParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressVnetIntegrationInitParameters. +func (in *ExpressVnetIntegrationInitParameters) DeepCopy() *ExpressVnetIntegrationInitParameters { + if in == nil { + return nil + } + out := new(ExpressVnetIntegrationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressVnetIntegrationObservation) DeepCopyInto(out *ExpressVnetIntegrationObservation) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressVnetIntegrationObservation. +func (in *ExpressVnetIntegrationObservation) DeepCopy() *ExpressVnetIntegrationObservation { + if in == nil { + return nil + } + out := new(ExpressVnetIntegrationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressVnetIntegrationParameters) DeepCopyInto(out *ExpressVnetIntegrationParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressVnetIntegrationParameters. +func (in *ExpressVnetIntegrationParameters) DeepCopy() *ExpressVnetIntegrationParameters { + if in == nil { + return nil + } + out := new(ExpressVnetIntegrationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Factory) DeepCopyInto(out *Factory) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Factory. +func (in *Factory) DeepCopy() *Factory { + if in == nil { + return nil + } + out := new(Factory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Factory) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FactoryInitParameters) DeepCopyInto(out *FactoryInitParameters) { + *out = *in + if in.CustomerManagedKeyID != nil { + in, out := &in.CustomerManagedKeyID, &out.CustomerManagedKeyID + *out = new(string) + **out = **in + } + if in.CustomerManagedKeyIdentityID != nil { + in, out := &in.CustomerManagedKeyIdentityID, &out.CustomerManagedKeyIdentityID + *out = new(string) + **out = **in + } + if in.GithubConfiguration != nil { + in, out := &in.GithubConfiguration, &out.GithubConfiguration + *out = new(GithubConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GlobalParameter != nil { + in, out := &in.GlobalParameter, &out.GlobalParameter + *out = make([]GlobalParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedVirtualNetworkEnabled != nil { + in, out := &in.ManagedVirtualNetworkEnabled, &out.ManagedVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkEnabled != nil { + in, out := &in.PublicNetworkEnabled, &out.PublicNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PurviewID != nil { + in, out := &in.PurviewID, &out.PurviewID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VstsConfiguration != nil { + in, out := &in.VstsConfiguration, &out.VstsConfiguration + *out = new(VstsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FactoryInitParameters. +func (in *FactoryInitParameters) DeepCopy() *FactoryInitParameters { + if in == nil { + return nil + } + out := new(FactoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FactoryList) DeepCopyInto(out *FactoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Factory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FactoryList. +func (in *FactoryList) DeepCopy() *FactoryList { + if in == nil { + return nil + } + out := new(FactoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FactoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FactoryObservation) DeepCopyInto(out *FactoryObservation) { + *out = *in + if in.CustomerManagedKeyID != nil { + in, out := &in.CustomerManagedKeyID, &out.CustomerManagedKeyID + *out = new(string) + **out = **in + } + if in.CustomerManagedKeyIdentityID != nil { + in, out := &in.CustomerManagedKeyIdentityID, &out.CustomerManagedKeyIdentityID + *out = new(string) + **out = **in + } + if in.GithubConfiguration != nil { + in, out := &in.GithubConfiguration, &out.GithubConfiguration + *out = new(GithubConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.GlobalParameter != nil { + in, out := &in.GlobalParameter, &out.GlobalParameter + *out = make([]GlobalParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedVirtualNetworkEnabled != nil { + in, out := &in.ManagedVirtualNetworkEnabled, &out.ManagedVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkEnabled != nil { + in, out := &in.PublicNetworkEnabled, &out.PublicNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PurviewID != nil { + in, out := &in.PurviewID, &out.PurviewID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VstsConfiguration != nil { + in, out := &in.VstsConfiguration, &out.VstsConfiguration + *out = new(VstsConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FactoryObservation. +func (in *FactoryObservation) DeepCopy() *FactoryObservation { + if in == nil { + return nil + } + out := new(FactoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FactoryParameters) DeepCopyInto(out *FactoryParameters) { + *out = *in + if in.CustomerManagedKeyID != nil { + in, out := &in.CustomerManagedKeyID, &out.CustomerManagedKeyID + *out = new(string) + **out = **in + } + if in.CustomerManagedKeyIdentityID != nil { + in, out := &in.CustomerManagedKeyIdentityID, &out.CustomerManagedKeyIdentityID + *out = new(string) + **out = **in + } + if in.GithubConfiguration != nil { + in, out := &in.GithubConfiguration, &out.GithubConfiguration + *out = new(GithubConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.GlobalParameter != nil { + in, out := &in.GlobalParameter, &out.GlobalParameter + *out = make([]GlobalParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedVirtualNetworkEnabled != nil { + in, out := &in.ManagedVirtualNetworkEnabled, &out.ManagedVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkEnabled != nil { + in, out := &in.PublicNetworkEnabled, &out.PublicNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PurviewID != nil { + in, out := &in.PurviewID, &out.PurviewID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VstsConfiguration != nil { + in, out := &in.VstsConfiguration, &out.VstsConfiguration + *out = new(VstsConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FactoryParameters. +func (in *FactoryParameters) DeepCopy() *FactoryParameters { + if in == nil { + return nil + } + out := new(FactoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FactorySpec) DeepCopyInto(out *FactorySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FactorySpec. +func (in *FactorySpec) DeepCopy() *FactorySpec { + if in == nil { + return nil + } + out := new(FactorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FactoryStatus) DeepCopyInto(out *FactoryStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FactoryStatus. +func (in *FactoryStatus) DeepCopy() *FactoryStatus { + if in == nil { + return nil + } + out := new(FactoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowletInitParameters) DeepCopyInto(out *FlowletInitParameters) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowletInitParameters. +func (in *FlowletInitParameters) DeepCopy() *FlowletInitParameters { + if in == nil { + return nil + } + out := new(FlowletInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowletObservation) DeepCopyInto(out *FlowletObservation) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowletObservation. +func (in *FlowletObservation) DeepCopy() *FlowletObservation { + if in == nil { + return nil + } + out := new(FlowletObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowletParameters) DeepCopyInto(out *FlowletParameters) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowletParameters. +func (in *FlowletParameters) DeepCopy() *FlowletParameters { + if in == nil { + return nil + } + out := new(FlowletParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubConfigurationInitParameters) DeepCopyInto(out *GithubConfigurationInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.GitURL != nil { + in, out := &in.GitURL, &out.GitURL + *out = new(string) + **out = **in + } + if in.PublishingEnabled != nil { + in, out := &in.PublishingEnabled, &out.PublishingEnabled + *out = new(bool) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubConfigurationInitParameters. +func (in *GithubConfigurationInitParameters) DeepCopy() *GithubConfigurationInitParameters { + if in == nil { + return nil + } + out := new(GithubConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubConfigurationObservation) DeepCopyInto(out *GithubConfigurationObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.GitURL != nil { + in, out := &in.GitURL, &out.GitURL + *out = new(string) + **out = **in + } + if in.PublishingEnabled != nil { + in, out := &in.PublishingEnabled, &out.PublishingEnabled + *out = new(bool) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubConfigurationObservation. +func (in *GithubConfigurationObservation) DeepCopy() *GithubConfigurationObservation { + if in == nil { + return nil + } + out := new(GithubConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubConfigurationParameters) DeepCopyInto(out *GithubConfigurationParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.GitURL != nil { + in, out := &in.GitURL, &out.GitURL + *out = new(string) + **out = **in + } + if in.PublishingEnabled != nil { + in, out := &in.PublishingEnabled, &out.PublishingEnabled + *out = new(bool) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubConfigurationParameters. +func (in *GithubConfigurationParameters) DeepCopy() *GithubConfigurationParameters { + if in == nil { + return nil + } + out := new(GithubConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalParameterInitParameters) DeepCopyInto(out *GlobalParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalParameterInitParameters. +func (in *GlobalParameterInitParameters) DeepCopy() *GlobalParameterInitParameters { + if in == nil { + return nil + } + out := new(GlobalParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalParameterObservation) DeepCopyInto(out *GlobalParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalParameterObservation. +func (in *GlobalParameterObservation) DeepCopy() *GlobalParameterObservation { + if in == nil { + return nil + } + out := new(GlobalParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalParameterParameters) DeepCopyInto(out *GlobalParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalParameterParameters. +func (in *GlobalParameterParameters) DeepCopy() *GlobalParameterParameters { + if in == nil { + return nil + } + out := new(GlobalParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServerLocationInitParameters) DeepCopyInto(out *HTTPServerLocationInitParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServerLocationInitParameters. +func (in *HTTPServerLocationInitParameters) DeepCopy() *HTTPServerLocationInitParameters { + if in == nil { + return nil + } + out := new(HTTPServerLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServerLocationObservation) DeepCopyInto(out *HTTPServerLocationObservation) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServerLocationObservation. +func (in *HTTPServerLocationObservation) DeepCopy() *HTTPServerLocationObservation { + if in == nil { + return nil + } + out := new(HTTPServerLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServerLocationParameters) DeepCopyInto(out *HTTPServerLocationParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.RelativeURL != nil { + in, out := &in.RelativeURL, &out.RelativeURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServerLocationParameters. +func (in *HTTPServerLocationParameters) DeepCopy() *HTTPServerLocationParameters { + if in == nil { + return nil + } + out := new(HTTPServerLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancePoolInitParameters) DeepCopyInto(out *InstancePoolInitParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.InstancePoolID != nil { + in, out := &in.InstancePoolID, &out.InstancePoolID + *out = new(string) + **out = **in + } + if in.MaxNumberOfWorkers != nil { + in, out := &in.MaxNumberOfWorkers, &out.MaxNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.MinNumberOfWorkers != nil { + in, out := &in.MinNumberOfWorkers, &out.MinNumberOfWorkers + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancePoolInitParameters. +func (in *InstancePoolInitParameters) DeepCopy() *InstancePoolInitParameters { + if in == nil { + return nil + } + out := new(InstancePoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancePoolObservation) DeepCopyInto(out *InstancePoolObservation) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.InstancePoolID != nil { + in, out := &in.InstancePoolID, &out.InstancePoolID + *out = new(string) + **out = **in + } + if in.MaxNumberOfWorkers != nil { + in, out := &in.MaxNumberOfWorkers, &out.MaxNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.MinNumberOfWorkers != nil { + in, out := &in.MinNumberOfWorkers, &out.MinNumberOfWorkers + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancePoolObservation. +func (in *InstancePoolObservation) DeepCopy() *InstancePoolObservation { + if in == nil { + return nil + } + out := new(InstancePoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancePoolParameters) DeepCopyInto(out *InstancePoolParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.InstancePoolID != nil { + in, out := &in.InstancePoolID, &out.InstancePoolID + *out = new(string) + **out = **in + } + if in.MaxNumberOfWorkers != nil { + in, out := &in.MaxNumberOfWorkers, &out.MaxNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.MinNumberOfWorkers != nil { + in, out := &in.MinNumberOfWorkers, &out.MinNumberOfWorkers + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancePoolParameters. +func (in *InstancePoolParameters) DeepCopy() *InstancePoolParameters { + if in == nil { + return nil + } + out := new(InstancePoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeAzureSSIS) DeepCopyInto(out *IntegrationRuntimeAzureSSIS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeAzureSSIS. +func (in *IntegrationRuntimeAzureSSIS) DeepCopy() *IntegrationRuntimeAzureSSIS { + if in == nil { + return nil + } + out := new(IntegrationRuntimeAzureSSIS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationRuntimeAzureSSIS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeAzureSSISInitParameters) DeepCopyInto(out *IntegrationRuntimeAzureSSISInitParameters) { + *out = *in + if in.CatalogInfo != nil { + in, out := &in.CatalogInfo, &out.CatalogInfo + *out = new(CatalogInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CredentialName != nil { + in, out := &in.CredentialName, &out.CredentialName + *out = new(string) + **out = **in + } + if in.CustomSetupScript != nil { + in, out := &in.CustomSetupScript, &out.CustomSetupScript + *out = new(CustomSetupScriptInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.ExpressCustomSetup != nil { + in, out := &in.ExpressCustomSetup, &out.ExpressCustomSetup + *out = new(ExpressCustomSetupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpressVnetIntegration != nil { + in, out := &in.ExpressVnetIntegration, &out.ExpressVnetIntegration + *out = new(ExpressVnetIntegrationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxParallelExecutionsPerNode != nil { + in, out := &in.MaxParallelExecutionsPerNode, &out.MaxParallelExecutionsPerNode + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.PackageStore != nil { + in, out := &in.PackageStore, &out.PackageStore + *out = make([]PackageStoreInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(ProxyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VnetIntegration != nil { + in, out := &in.VnetIntegration, &out.VnetIntegration + *out = new(VnetIntegrationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeAzureSSISInitParameters. +func (in *IntegrationRuntimeAzureSSISInitParameters) DeepCopy() *IntegrationRuntimeAzureSSISInitParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeAzureSSISInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeAzureSSISList) DeepCopyInto(out *IntegrationRuntimeAzureSSISList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IntegrationRuntimeAzureSSIS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeAzureSSISList. +func (in *IntegrationRuntimeAzureSSISList) DeepCopy() *IntegrationRuntimeAzureSSISList { + if in == nil { + return nil + } + out := new(IntegrationRuntimeAzureSSISList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationRuntimeAzureSSISList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeAzureSSISObservation) DeepCopyInto(out *IntegrationRuntimeAzureSSISObservation) { + *out = *in + if in.CatalogInfo != nil { + in, out := &in.CatalogInfo, &out.CatalogInfo + *out = new(CatalogInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.CredentialName != nil { + in, out := &in.CredentialName, &out.CredentialName + *out = new(string) + **out = **in + } + if in.CustomSetupScript != nil { + in, out := &in.CustomSetupScript, &out.CustomSetupScript + *out = new(CustomSetupScriptObservation) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.ExpressCustomSetup != nil { + in, out := &in.ExpressCustomSetup, &out.ExpressCustomSetup + *out = new(ExpressCustomSetupObservation) + (*in).DeepCopyInto(*out) + } + if in.ExpressVnetIntegration != nil { + in, out := &in.ExpressVnetIntegration, &out.ExpressVnetIntegration + *out = new(ExpressVnetIntegrationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxParallelExecutionsPerNode != nil { + in, out := &in.MaxParallelExecutionsPerNode, &out.MaxParallelExecutionsPerNode + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.PackageStore != nil { + in, out := &in.PackageStore, &out.PackageStore + *out = make([]PackageStoreObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(ProxyObservation) + (*in).DeepCopyInto(*out) + } + if in.VnetIntegration != nil { + in, out := &in.VnetIntegration, &out.VnetIntegration + *out = new(VnetIntegrationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeAzureSSISObservation. +func (in *IntegrationRuntimeAzureSSISObservation) DeepCopy() *IntegrationRuntimeAzureSSISObservation { + if in == nil { + return nil + } + out := new(IntegrationRuntimeAzureSSISObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeAzureSSISParameters) DeepCopyInto(out *IntegrationRuntimeAzureSSISParameters) { + *out = *in + if in.CatalogInfo != nil { + in, out := &in.CatalogInfo, &out.CatalogInfo + *out = new(CatalogInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.CredentialName != nil { + in, out := &in.CredentialName, &out.CredentialName + *out = new(string) + **out = **in + } + if in.CustomSetupScript != nil { + in, out := &in.CustomSetupScript, &out.CustomSetupScript + *out = new(CustomSetupScriptParameters) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.ExpressCustomSetup != nil { + in, out := &in.ExpressCustomSetup, &out.ExpressCustomSetup + *out = new(ExpressCustomSetupParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpressVnetIntegration != nil { + in, out := &in.ExpressVnetIntegration, &out.ExpressVnetIntegration + *out = new(ExpressVnetIntegrationParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxParallelExecutionsPerNode != nil { + in, out := &in.MaxParallelExecutionsPerNode, &out.MaxParallelExecutionsPerNode + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.PackageStore != nil { + in, out := &in.PackageStore, &out.PackageStore + *out = make([]PackageStoreParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(ProxyParameters) + (*in).DeepCopyInto(*out) + } + if in.VnetIntegration != nil { + in, out := &in.VnetIntegration, &out.VnetIntegration + *out = new(VnetIntegrationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeAzureSSISParameters. +func (in *IntegrationRuntimeAzureSSISParameters) DeepCopy() *IntegrationRuntimeAzureSSISParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeAzureSSISParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeAzureSSISSpec) DeepCopyInto(out *IntegrationRuntimeAzureSSISSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeAzureSSISSpec. +func (in *IntegrationRuntimeAzureSSISSpec) DeepCopy() *IntegrationRuntimeAzureSSISSpec { + if in == nil { + return nil + } + out := new(IntegrationRuntimeAzureSSISSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeAzureSSISStatus) DeepCopyInto(out *IntegrationRuntimeAzureSSISStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeAzureSSISStatus. +func (in *IntegrationRuntimeAzureSSISStatus) DeepCopy() *IntegrationRuntimeAzureSSISStatus { + if in == nil { + return nil + } + out := new(IntegrationRuntimeAzureSSISStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeInitParameters) DeepCopyInto(out *IntegrationRuntimeInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeInitParameters. +func (in *IntegrationRuntimeInitParameters) DeepCopy() *IntegrationRuntimeInitParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManaged) DeepCopyInto(out *IntegrationRuntimeManaged) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManaged. +func (in *IntegrationRuntimeManaged) DeepCopy() *IntegrationRuntimeManaged { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManaged) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationRuntimeManaged) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedCatalogInfoInitParameters) DeepCopyInto(out *IntegrationRuntimeManagedCatalogInfoInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.PricingTier != nil { + in, out := &in.PricingTier, &out.PricingTier + *out = new(string) + **out = **in + } + if in.ServerEndpoint != nil { + in, out := &in.ServerEndpoint, &out.ServerEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedCatalogInfoInitParameters. +func (in *IntegrationRuntimeManagedCatalogInfoInitParameters) DeepCopy() *IntegrationRuntimeManagedCatalogInfoInitParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedCatalogInfoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedCatalogInfoObservation) DeepCopyInto(out *IntegrationRuntimeManagedCatalogInfoObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.PricingTier != nil { + in, out := &in.PricingTier, &out.PricingTier + *out = new(string) + **out = **in + } + if in.ServerEndpoint != nil { + in, out := &in.ServerEndpoint, &out.ServerEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedCatalogInfoObservation. +func (in *IntegrationRuntimeManagedCatalogInfoObservation) DeepCopy() *IntegrationRuntimeManagedCatalogInfoObservation { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedCatalogInfoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedCatalogInfoParameters) DeepCopyInto(out *IntegrationRuntimeManagedCatalogInfoParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AdministratorPasswordSecretRef != nil { + in, out := &in.AdministratorPasswordSecretRef, &out.AdministratorPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PricingTier != nil { + in, out := &in.PricingTier, &out.PricingTier + *out = new(string) + **out = **in + } + if in.ServerEndpoint != nil { + in, out := &in.ServerEndpoint, &out.ServerEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedCatalogInfoParameters. +func (in *IntegrationRuntimeManagedCatalogInfoParameters) DeepCopy() *IntegrationRuntimeManagedCatalogInfoParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedCatalogInfoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedCustomSetupScriptInitParameters) DeepCopyInto(out *IntegrationRuntimeManagedCustomSetupScriptInitParameters) { + *out = *in + if in.BlobContainerURI != nil { + in, out := &in.BlobContainerURI, &out.BlobContainerURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedCustomSetupScriptInitParameters. +func (in *IntegrationRuntimeManagedCustomSetupScriptInitParameters) DeepCopy() *IntegrationRuntimeManagedCustomSetupScriptInitParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedCustomSetupScriptInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedCustomSetupScriptObservation) DeepCopyInto(out *IntegrationRuntimeManagedCustomSetupScriptObservation) { + *out = *in + if in.BlobContainerURI != nil { + in, out := &in.BlobContainerURI, &out.BlobContainerURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedCustomSetupScriptObservation. +func (in *IntegrationRuntimeManagedCustomSetupScriptObservation) DeepCopy() *IntegrationRuntimeManagedCustomSetupScriptObservation { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedCustomSetupScriptObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedCustomSetupScriptParameters) DeepCopyInto(out *IntegrationRuntimeManagedCustomSetupScriptParameters) { + *out = *in + if in.BlobContainerURI != nil { + in, out := &in.BlobContainerURI, &out.BlobContainerURI + *out = new(string) + **out = **in + } + out.SASTokenSecretRef = in.SASTokenSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedCustomSetupScriptParameters. +func (in *IntegrationRuntimeManagedCustomSetupScriptParameters) DeepCopy() *IntegrationRuntimeManagedCustomSetupScriptParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedCustomSetupScriptParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedInitParameters) DeepCopyInto(out *IntegrationRuntimeManagedInitParameters) { + *out = *in + if in.CatalogInfo != nil { + in, out := &in.CatalogInfo, &out.CatalogInfo + *out = new(IntegrationRuntimeManagedCatalogInfoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CredentialName != nil { + in, out := &in.CredentialName, &out.CredentialName + *out = new(string) + **out = **in + } + if in.CustomSetupScript != nil { + in, out := &in.CustomSetupScript, &out.CustomSetupScript + *out = new(IntegrationRuntimeManagedCustomSetupScriptInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxParallelExecutionsPerNode != nil { + in, out := &in.MaxParallelExecutionsPerNode, &out.MaxParallelExecutionsPerNode + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.VnetIntegration != nil { + in, out := &in.VnetIntegration, &out.VnetIntegration + *out = new(IntegrationRuntimeManagedVnetIntegrationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedInitParameters. +func (in *IntegrationRuntimeManagedInitParameters) DeepCopy() *IntegrationRuntimeManagedInitParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedList) DeepCopyInto(out *IntegrationRuntimeManagedList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IntegrationRuntimeManaged, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedList. +func (in *IntegrationRuntimeManagedList) DeepCopy() *IntegrationRuntimeManagedList { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationRuntimeManagedList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedObservation) DeepCopyInto(out *IntegrationRuntimeManagedObservation) { + *out = *in + if in.CatalogInfo != nil { + in, out := &in.CatalogInfo, &out.CatalogInfo + *out = new(IntegrationRuntimeManagedCatalogInfoObservation) + (*in).DeepCopyInto(*out) + } + if in.CredentialName != nil { + in, out := &in.CredentialName, &out.CredentialName + *out = new(string) + **out = **in + } + if in.CustomSetupScript != nil { + in, out := &in.CustomSetupScript, &out.CustomSetupScript + *out = new(IntegrationRuntimeManagedCustomSetupScriptObservation) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxParallelExecutionsPerNode != nil { + in, out := &in.MaxParallelExecutionsPerNode, &out.MaxParallelExecutionsPerNode + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.VnetIntegration != nil { + in, out := &in.VnetIntegration, &out.VnetIntegration + *out = new(IntegrationRuntimeManagedVnetIntegrationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedObservation. +func (in *IntegrationRuntimeManagedObservation) DeepCopy() *IntegrationRuntimeManagedObservation { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedParameters) DeepCopyInto(out *IntegrationRuntimeManagedParameters) { + *out = *in + if in.CatalogInfo != nil { + in, out := &in.CatalogInfo, &out.CatalogInfo + *out = new(IntegrationRuntimeManagedCatalogInfoParameters) + (*in).DeepCopyInto(*out) + } + if in.CredentialName != nil { + in, out := &in.CredentialName, &out.CredentialName + *out = new(string) + **out = **in + } + if in.CustomSetupScript != nil { + in, out := &in.CustomSetupScript, &out.CustomSetupScript + *out = new(IntegrationRuntimeManagedCustomSetupScriptParameters) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Edition != nil { + in, out := &in.Edition, &out.Edition + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxParallelExecutionsPerNode != nil { + in, out := &in.MaxParallelExecutionsPerNode, &out.MaxParallelExecutionsPerNode + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NumberOfNodes != nil { + in, out := &in.NumberOfNodes, &out.NumberOfNodes + *out = new(float64) + **out = **in + } + if in.VnetIntegration != nil { + in, out := &in.VnetIntegration, &out.VnetIntegration + *out = new(IntegrationRuntimeManagedVnetIntegrationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedParameters. +func (in *IntegrationRuntimeManagedParameters) DeepCopy() *IntegrationRuntimeManagedParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedSpec) DeepCopyInto(out *IntegrationRuntimeManagedSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedSpec. +func (in *IntegrationRuntimeManagedSpec) DeepCopy() *IntegrationRuntimeManagedSpec { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedStatus) DeepCopyInto(out *IntegrationRuntimeManagedStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedStatus. +func (in *IntegrationRuntimeManagedStatus) DeepCopy() *IntegrationRuntimeManagedStatus { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedVnetIntegrationInitParameters) DeepCopyInto(out *IntegrationRuntimeManagedVnetIntegrationInitParameters) { + *out = *in + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.SubnetNameRef != nil { + in, out := &in.SubnetNameRef, &out.SubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetNameSelector != nil { + in, out := &in.SubnetNameSelector, &out.SubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VnetID != nil { + in, out := &in.VnetID, &out.VnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedVnetIntegrationInitParameters. +func (in *IntegrationRuntimeManagedVnetIntegrationInitParameters) DeepCopy() *IntegrationRuntimeManagedVnetIntegrationInitParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedVnetIntegrationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedVnetIntegrationObservation) DeepCopyInto(out *IntegrationRuntimeManagedVnetIntegrationObservation) { + *out = *in + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.VnetID != nil { + in, out := &in.VnetID, &out.VnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedVnetIntegrationObservation. +func (in *IntegrationRuntimeManagedVnetIntegrationObservation) DeepCopy() *IntegrationRuntimeManagedVnetIntegrationObservation { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedVnetIntegrationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeManagedVnetIntegrationParameters) DeepCopyInto(out *IntegrationRuntimeManagedVnetIntegrationParameters) { + *out = *in + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.SubnetNameRef != nil { + in, out := &in.SubnetNameRef, &out.SubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetNameSelector != nil { + in, out := &in.SubnetNameSelector, &out.SubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VnetID != nil { + in, out := &in.VnetID, &out.VnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeManagedVnetIntegrationParameters. +func (in *IntegrationRuntimeManagedVnetIntegrationParameters) DeepCopy() *IntegrationRuntimeManagedVnetIntegrationParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeManagedVnetIntegrationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeObservation) DeepCopyInto(out *IntegrationRuntimeObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeObservation. +func (in *IntegrationRuntimeObservation) DeepCopy() *IntegrationRuntimeObservation { + if in == nil { + return nil + } + out := new(IntegrationRuntimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeParameters) DeepCopyInto(out *IntegrationRuntimeParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeParameters. +func (in *IntegrationRuntimeParameters) DeepCopy() *IntegrationRuntimeParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultConnectionStringInitParameters) DeepCopyInto(out *KeyVaultConnectionStringInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultConnectionStringInitParameters. +func (in *KeyVaultConnectionStringInitParameters) DeepCopy() *KeyVaultConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(KeyVaultConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultConnectionStringObservation) DeepCopyInto(out *KeyVaultConnectionStringObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultConnectionStringObservation. +func (in *KeyVaultConnectionStringObservation) DeepCopy() *KeyVaultConnectionStringObservation { + if in == nil { + return nil + } + out := new(KeyVaultConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultConnectionStringParameters) DeepCopyInto(out *KeyVaultConnectionStringParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultConnectionStringParameters. +func (in *KeyVaultConnectionStringParameters) DeepCopy() *KeyVaultConnectionStringParameters { + if in == nil { + return nil + } + out := new(KeyVaultConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultKeyInitParameters) DeepCopyInto(out *KeyVaultKeyInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultKeyInitParameters. +func (in *KeyVaultKeyInitParameters) DeepCopy() *KeyVaultKeyInitParameters { + if in == nil { + return nil + } + out := new(KeyVaultKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultKeyObservation) DeepCopyInto(out *KeyVaultKeyObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultKeyObservation. +func (in *KeyVaultKeyObservation) DeepCopy() *KeyVaultKeyObservation { + if in == nil { + return nil + } + out := new(KeyVaultKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultKeyParameters) DeepCopyInto(out *KeyVaultKeyParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultKeyParameters. +func (in *KeyVaultKeyParameters) DeepCopy() *KeyVaultKeyParameters { + if in == nil { + return nil + } + out := new(KeyVaultKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultLicenseInitParameters) DeepCopyInto(out *KeyVaultLicenseInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.SecretVersion != nil { + in, out := &in.SecretVersion, &out.SecretVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultLicenseInitParameters. +func (in *KeyVaultLicenseInitParameters) DeepCopy() *KeyVaultLicenseInitParameters { + if in == nil { + return nil + } + out := new(KeyVaultLicenseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultLicenseObservation) DeepCopyInto(out *KeyVaultLicenseObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.SecretVersion != nil { + in, out := &in.SecretVersion, &out.SecretVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultLicenseObservation. +func (in *KeyVaultLicenseObservation) DeepCopy() *KeyVaultLicenseObservation { + if in == nil { + return nil + } + out := new(KeyVaultLicenseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultLicenseParameters) DeepCopyInto(out *KeyVaultLicenseParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.SecretVersion != nil { + in, out := &in.SecretVersion, &out.SecretVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultLicenseParameters. +func (in *KeyVaultLicenseParameters) DeepCopy() *KeyVaultLicenseParameters { + if in == nil { + return nil + } + out := new(KeyVaultLicenseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultPasswordInitParameters) DeepCopyInto(out *KeyVaultPasswordInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.SecretVersion != nil { + in, out := &in.SecretVersion, &out.SecretVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultPasswordInitParameters. +func (in *KeyVaultPasswordInitParameters) DeepCopy() *KeyVaultPasswordInitParameters { + if in == nil { + return nil + } + out := new(KeyVaultPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultPasswordObservation) DeepCopyInto(out *KeyVaultPasswordObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.SecretVersion != nil { + in, out := &in.SecretVersion, &out.SecretVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultPasswordObservation. +func (in *KeyVaultPasswordObservation) DeepCopy() *KeyVaultPasswordObservation { + if in == nil { + return nil + } + out := new(KeyVaultPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultPasswordParameters) DeepCopyInto(out *KeyVaultPasswordParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.SecretVersion != nil { + in, out := &in.SecretVersion, &out.SecretVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultPasswordParameters. +func (in *KeyVaultPasswordParameters) DeepCopy() *KeyVaultPasswordParameters { + if in == nil { + return nil + } + out := new(KeyVaultPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultSASTokenInitParameters) DeepCopyInto(out *KeyVaultSASTokenInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultSASTokenInitParameters. +func (in *KeyVaultSASTokenInitParameters) DeepCopy() *KeyVaultSASTokenInitParameters { + if in == nil { + return nil + } + out := new(KeyVaultSASTokenInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultSASTokenObservation) DeepCopyInto(out *KeyVaultSASTokenObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultSASTokenObservation. +func (in *KeyVaultSASTokenObservation) DeepCopy() *KeyVaultSASTokenObservation { + if in == nil { + return nil + } + out := new(KeyVaultSASTokenObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVaultSASTokenParameters) DeepCopyInto(out *KeyVaultSASTokenParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVaultSASTokenParameters. +func (in *KeyVaultSASTokenParameters) DeepCopy() *KeyVaultSASTokenParameters { + if in == nil { + return nil + } + out := new(KeyVaultSASTokenParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedCustomService) DeepCopyInto(out *LinkedCustomService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedCustomService. +func (in *LinkedCustomService) DeepCopy() *LinkedCustomService { + if in == nil { + return nil + } + out := new(LinkedCustomService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedCustomService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedCustomServiceInitParameters) DeepCopyInto(out *LinkedCustomServiceInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntime != nil { + in, out := &in.IntegrationRuntime, &out.IntegrationRuntime + *out = new(IntegrationRuntimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedCustomServiceInitParameters. +func (in *LinkedCustomServiceInitParameters) DeepCopy() *LinkedCustomServiceInitParameters { + if in == nil { + return nil + } + out := new(LinkedCustomServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedCustomServiceList) DeepCopyInto(out *LinkedCustomServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedCustomService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedCustomServiceList. +func (in *LinkedCustomServiceList) DeepCopy() *LinkedCustomServiceList { + if in == nil { + return nil + } + out := new(LinkedCustomServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedCustomServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedCustomServiceObservation) DeepCopyInto(out *LinkedCustomServiceObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntime != nil { + in, out := &in.IntegrationRuntime, &out.IntegrationRuntime + *out = new(IntegrationRuntimeObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedCustomServiceObservation. +func (in *LinkedCustomServiceObservation) DeepCopy() *LinkedCustomServiceObservation { + if in == nil { + return nil + } + out := new(LinkedCustomServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedCustomServiceParameters) DeepCopyInto(out *LinkedCustomServiceParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntime != nil { + in, out := &in.IntegrationRuntime, &out.IntegrationRuntime + *out = new(IntegrationRuntimeParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedCustomServiceParameters. +func (in *LinkedCustomServiceParameters) DeepCopy() *LinkedCustomServiceParameters { + if in == nil { + return nil + } + out := new(LinkedCustomServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedCustomServiceSpec) DeepCopyInto(out *LinkedCustomServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedCustomServiceSpec. +func (in *LinkedCustomServiceSpec) DeepCopy() *LinkedCustomServiceSpec { + if in == nil { + return nil + } + out := new(LinkedCustomServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedCustomServiceStatus) DeepCopyInto(out *LinkedCustomServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedCustomServiceStatus. +func (in *LinkedCustomServiceStatus) DeepCopy() *LinkedCustomServiceStatus { + if in == nil { + return nil + } + out := new(LinkedCustomServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureBlobStorage) DeepCopyInto(out *LinkedServiceAzureBlobStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureBlobStorage. +func (in *LinkedServiceAzureBlobStorage) DeepCopy() *LinkedServiceAzureBlobStorage { + if in == nil { + return nil + } + out := new(LinkedServiceAzureBlobStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureBlobStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureBlobStorageInitParameters) DeepCopyInto(out *LinkedServiceAzureBlobStorageInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringInsecure != nil { + in, out := &in.ConnectionStringInsecure, &out.ConnectionStringInsecure + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultSASToken != nil { + in, out := &in.KeyVaultSASToken, &out.KeyVaultSASToken + *out = new(KeyVaultSASTokenInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServicePrincipalID != nil { + in, out := &in.ServicePrincipalID, &out.ServicePrincipalID + *out = new(string) + **out = **in + } + if in.ServicePrincipalKey != nil { + in, out := &in.ServicePrincipalKey, &out.ServicePrincipalKey + *out = new(string) + **out = **in + } + if in.ServicePrincipalLinkedKeyVaultKey != nil { + in, out := &in.ServicePrincipalLinkedKeyVaultKey, &out.ServicePrincipalLinkedKeyVaultKey + *out = new(ServicePrincipalLinkedKeyVaultKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageKind != nil { + in, out := &in.StorageKind, &out.StorageKind + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseManagedIdentity != nil { + in, out := &in.UseManagedIdentity, &out.UseManagedIdentity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureBlobStorageInitParameters. +func (in *LinkedServiceAzureBlobStorageInitParameters) DeepCopy() *LinkedServiceAzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureBlobStorageList) DeepCopyInto(out *LinkedServiceAzureBlobStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceAzureBlobStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureBlobStorageList. +func (in *LinkedServiceAzureBlobStorageList) DeepCopy() *LinkedServiceAzureBlobStorageList { + if in == nil { + return nil + } + out := new(LinkedServiceAzureBlobStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureBlobStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureBlobStorageObservation) DeepCopyInto(out *LinkedServiceAzureBlobStorageObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringInsecure != nil { + in, out := &in.ConnectionStringInsecure, &out.ConnectionStringInsecure + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultSASToken != nil { + in, out := &in.KeyVaultSASToken, &out.KeyVaultSASToken + *out = new(KeyVaultSASTokenObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServicePrincipalID != nil { + in, out := &in.ServicePrincipalID, &out.ServicePrincipalID + *out = new(string) + **out = **in + } + if in.ServicePrincipalKey != nil { + in, out := &in.ServicePrincipalKey, &out.ServicePrincipalKey + *out = new(string) + **out = **in + } + if in.ServicePrincipalLinkedKeyVaultKey != nil { + in, out := &in.ServicePrincipalLinkedKeyVaultKey, &out.ServicePrincipalLinkedKeyVaultKey + *out = new(ServicePrincipalLinkedKeyVaultKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageKind != nil { + in, out := &in.StorageKind, &out.StorageKind + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseManagedIdentity != nil { + in, out := &in.UseManagedIdentity, &out.UseManagedIdentity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureBlobStorageObservation. +func (in *LinkedServiceAzureBlobStorageObservation) DeepCopy() *LinkedServiceAzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(LinkedServiceAzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureBlobStorageParameters) DeepCopyInto(out *LinkedServiceAzureBlobStorageParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringInsecure != nil { + in, out := &in.ConnectionStringInsecure, &out.ConnectionStringInsecure + *out = new(string) + **out = **in + } + if in.ConnectionStringSecretRef != nil { + in, out := &in.ConnectionStringSecretRef, &out.ConnectionStringSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultSASToken != nil { + in, out := &in.KeyVaultSASToken, &out.KeyVaultSASToken + *out = new(KeyVaultSASTokenParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SASURISecretRef != nil { + in, out := &in.SASURISecretRef, &out.SASURISecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ServiceEndpointSecretRef != nil { + in, out := &in.ServiceEndpointSecretRef, &out.ServiceEndpointSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ServicePrincipalID != nil { + in, out := &in.ServicePrincipalID, &out.ServicePrincipalID + *out = new(string) + **out = **in + } + if in.ServicePrincipalKey != nil { + in, out := &in.ServicePrincipalKey, &out.ServicePrincipalKey + *out = new(string) + **out = **in + } + if in.ServicePrincipalLinkedKeyVaultKey != nil { + in, out := &in.ServicePrincipalLinkedKeyVaultKey, &out.ServicePrincipalLinkedKeyVaultKey + *out = new(ServicePrincipalLinkedKeyVaultKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageKind != nil { + in, out := &in.StorageKind, &out.StorageKind + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseManagedIdentity != nil { + in, out := &in.UseManagedIdentity, &out.UseManagedIdentity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureBlobStorageParameters. +func (in *LinkedServiceAzureBlobStorageParameters) DeepCopy() *LinkedServiceAzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureBlobStorageSpec) DeepCopyInto(out *LinkedServiceAzureBlobStorageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureBlobStorageSpec. +func (in *LinkedServiceAzureBlobStorageSpec) DeepCopy() *LinkedServiceAzureBlobStorageSpec { + if in == nil { + return nil + } + out := new(LinkedServiceAzureBlobStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureBlobStorageStatus) DeepCopyInto(out *LinkedServiceAzureBlobStorageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureBlobStorageStatus. +func (in *LinkedServiceAzureBlobStorageStatus) DeepCopy() *LinkedServiceAzureBlobStorageStatus { + if in == nil { + return nil + } + out := new(LinkedServiceAzureBlobStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricks) DeepCopyInto(out *LinkedServiceAzureDatabricks) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricks. +func (in *LinkedServiceAzureDatabricks) DeepCopy() *LinkedServiceAzureDatabricks { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureDatabricks) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksInitParameters) DeepCopyInto(out *LinkedServiceAzureDatabricksInitParameters) { + *out = *in + if in.AdbDomain != nil { + in, out := &in.AdbDomain, &out.AdbDomain + *out = new(string) + **out = **in + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExistingClusterID != nil { + in, out := &in.ExistingClusterID, &out.ExistingClusterID + *out = new(string) + **out = **in + } + if in.InstancePool != nil { + in, out := &in.InstancePool, &out.InstancePool + *out = new(InstancePoolInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MsiWorkSpaceResourceID != nil { + in, out := &in.MsiWorkSpaceResourceID, &out.MsiWorkSpaceResourceID + *out = new(string) + **out = **in + } + if in.MsiWorkSpaceResourceIDRef != nil { + in, out := &in.MsiWorkSpaceResourceIDRef, &out.MsiWorkSpaceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MsiWorkSpaceResourceIDSelector != nil { + in, out := &in.MsiWorkSpaceResourceIDSelector, &out.MsiWorkSpaceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NewClusterConfig != nil { + in, out := &in.NewClusterConfig, &out.NewClusterConfig + *out = new(NewClusterConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksInitParameters. +func (in *LinkedServiceAzureDatabricksInitParameters) DeepCopy() *LinkedServiceAzureDatabricksInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters) DeepCopyInto(out *LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters. +func (in *LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters) DeepCopy() *LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksKeyVaultPasswordObservation) DeepCopyInto(out *LinkedServiceAzureDatabricksKeyVaultPasswordObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksKeyVaultPasswordObservation. +func (in *LinkedServiceAzureDatabricksKeyVaultPasswordObservation) DeepCopy() *LinkedServiceAzureDatabricksKeyVaultPasswordObservation { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksKeyVaultPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksKeyVaultPasswordParameters) DeepCopyInto(out *LinkedServiceAzureDatabricksKeyVaultPasswordParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksKeyVaultPasswordParameters. +func (in *LinkedServiceAzureDatabricksKeyVaultPasswordParameters) DeepCopy() *LinkedServiceAzureDatabricksKeyVaultPasswordParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksKeyVaultPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksList) DeepCopyInto(out *LinkedServiceAzureDatabricksList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceAzureDatabricks, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksList. +func (in *LinkedServiceAzureDatabricksList) DeepCopy() *LinkedServiceAzureDatabricksList { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureDatabricksList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksObservation) DeepCopyInto(out *LinkedServiceAzureDatabricksObservation) { + *out = *in + if in.AdbDomain != nil { + in, out := &in.AdbDomain, &out.AdbDomain + *out = new(string) + **out = **in + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExistingClusterID != nil { + in, out := &in.ExistingClusterID, &out.ExistingClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstancePool != nil { + in, out := &in.InstancePool, &out.InstancePool + *out = new(InstancePoolObservation) + (*in).DeepCopyInto(*out) + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureDatabricksKeyVaultPasswordObservation) + (*in).DeepCopyInto(*out) + } + if in.MsiWorkSpaceResourceID != nil { + in, out := &in.MsiWorkSpaceResourceID, &out.MsiWorkSpaceResourceID + *out = new(string) + **out = **in + } + if in.NewClusterConfig != nil { + in, out := &in.NewClusterConfig, &out.NewClusterConfig + *out = new(NewClusterConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksObservation. +func (in *LinkedServiceAzureDatabricksObservation) DeepCopy() *LinkedServiceAzureDatabricksObservation { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksParameters) DeepCopyInto(out *LinkedServiceAzureDatabricksParameters) { + *out = *in + if in.AccessTokenSecretRef != nil { + in, out := &in.AccessTokenSecretRef, &out.AccessTokenSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AdbDomain != nil { + in, out := &in.AdbDomain, &out.AdbDomain + *out = new(string) + **out = **in + } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ExistingClusterID != nil { + in, out := &in.ExistingClusterID, &out.ExistingClusterID + *out = new(string) + **out = **in + } + if in.InstancePool != nil { + in, out := &in.InstancePool, &out.InstancePool + *out = new(InstancePoolParameters) + (*in).DeepCopyInto(*out) + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureDatabricksKeyVaultPasswordParameters) + (*in).DeepCopyInto(*out) + } + if in.MsiWorkSpaceResourceID != nil { + in, out := &in.MsiWorkSpaceResourceID, &out.MsiWorkSpaceResourceID + *out = new(string) + **out = **in + } + if in.MsiWorkSpaceResourceIDRef != nil { + in, out := &in.MsiWorkSpaceResourceIDRef, &out.MsiWorkSpaceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MsiWorkSpaceResourceIDSelector != nil { + in, out := &in.MsiWorkSpaceResourceIDSelector, &out.MsiWorkSpaceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NewClusterConfig != nil { + in, out := &in.NewClusterConfig, &out.NewClusterConfig + *out = new(NewClusterConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksParameters. +func (in *LinkedServiceAzureDatabricksParameters) DeepCopy() *LinkedServiceAzureDatabricksParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksSpec) DeepCopyInto(out *LinkedServiceAzureDatabricksSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksSpec. +func (in *LinkedServiceAzureDatabricksSpec) DeepCopy() *LinkedServiceAzureDatabricksSpec { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureDatabricksStatus) DeepCopyInto(out *LinkedServiceAzureDatabricksStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureDatabricksStatus. +func (in *LinkedServiceAzureDatabricksStatus) DeepCopy() *LinkedServiceAzureDatabricksStatus { + if in == nil { + return nil + } + out := new(LinkedServiceAzureDatabricksStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorage) DeepCopyInto(out *LinkedServiceAzureFileStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorage. +func (in *LinkedServiceAzureFileStorage) DeepCopy() *LinkedServiceAzureFileStorage { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureFileStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageInitParameters) DeepCopyInto(out *LinkedServiceAzureFileStorageInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FileShare != nil { + in, out := &in.FileShare, &out.FileShare + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageInitParameters. +func (in *LinkedServiceAzureFileStorageInitParameters) DeepCopy() *LinkedServiceAzureFileStorageInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters) DeepCopyInto(out *LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters. +func (in *LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters) DeepCopy() *LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageKeyVaultPasswordObservation) DeepCopyInto(out *LinkedServiceAzureFileStorageKeyVaultPasswordObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageKeyVaultPasswordObservation. +func (in *LinkedServiceAzureFileStorageKeyVaultPasswordObservation) DeepCopy() *LinkedServiceAzureFileStorageKeyVaultPasswordObservation { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageKeyVaultPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageKeyVaultPasswordParameters) DeepCopyInto(out *LinkedServiceAzureFileStorageKeyVaultPasswordParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageKeyVaultPasswordParameters. +func (in *LinkedServiceAzureFileStorageKeyVaultPasswordParameters) DeepCopy() *LinkedServiceAzureFileStorageKeyVaultPasswordParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageKeyVaultPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageList) DeepCopyInto(out *LinkedServiceAzureFileStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceAzureFileStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageList. +func (in *LinkedServiceAzureFileStorageList) DeepCopy() *LinkedServiceAzureFileStorageList { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureFileStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageObservation) DeepCopyInto(out *LinkedServiceAzureFileStorageObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FileShare != nil { + in, out := &in.FileShare, &out.FileShare + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureFileStorageKeyVaultPasswordObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageObservation. +func (in *LinkedServiceAzureFileStorageObservation) DeepCopy() *LinkedServiceAzureFileStorageObservation { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageParameters) DeepCopyInto(out *LinkedServiceAzureFileStorageParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + out.ConnectionStringSecretRef = in.ConnectionStringSecretRef + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.FileShare != nil { + in, out := &in.FileShare, &out.FileShare + *out = new(string) + **out = **in + } + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureFileStorageKeyVaultPasswordParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.UserID != nil { + in, out := &in.UserID, &out.UserID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageParameters. +func (in *LinkedServiceAzureFileStorageParameters) DeepCopy() *LinkedServiceAzureFileStorageParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageSpec) DeepCopyInto(out *LinkedServiceAzureFileStorageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageSpec. +func (in *LinkedServiceAzureFileStorageSpec) DeepCopy() *LinkedServiceAzureFileStorageSpec { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFileStorageStatus) DeepCopyInto(out *LinkedServiceAzureFileStorageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFileStorageStatus. +func (in *LinkedServiceAzureFileStorageStatus) DeepCopy() *LinkedServiceAzureFileStorageStatus { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFileStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFunction) DeepCopyInto(out *LinkedServiceAzureFunction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFunction. +func (in *LinkedServiceAzureFunction) DeepCopy() *LinkedServiceAzureFunction { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFunction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureFunction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFunctionInitParameters) DeepCopyInto(out *LinkedServiceAzureFunctionInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultKey != nil { + in, out := &in.KeyVaultKey, &out.KeyVaultKey + *out = new(KeyVaultKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFunctionInitParameters. +func (in *LinkedServiceAzureFunctionInitParameters) DeepCopy() *LinkedServiceAzureFunctionInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFunctionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFunctionList) DeepCopyInto(out *LinkedServiceAzureFunctionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceAzureFunction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFunctionList. +func (in *LinkedServiceAzureFunctionList) DeepCopy() *LinkedServiceAzureFunctionList { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFunctionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureFunctionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFunctionObservation) DeepCopyInto(out *LinkedServiceAzureFunctionObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultKey != nil { + in, out := &in.KeyVaultKey, &out.KeyVaultKey + *out = new(KeyVaultKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFunctionObservation. +func (in *LinkedServiceAzureFunctionObservation) DeepCopy() *LinkedServiceAzureFunctionObservation { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFunctionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFunctionParameters) DeepCopyInto(out *LinkedServiceAzureFunctionParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeySecretRef != nil { + in, out := &in.KeySecretRef, &out.KeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.KeyVaultKey != nil { + in, out := &in.KeyVaultKey, &out.KeyVaultKey + *out = new(KeyVaultKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFunctionParameters. +func (in *LinkedServiceAzureFunctionParameters) DeepCopy() *LinkedServiceAzureFunctionParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFunctionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFunctionSpec) DeepCopyInto(out *LinkedServiceAzureFunctionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFunctionSpec. +func (in *LinkedServiceAzureFunctionSpec) DeepCopy() *LinkedServiceAzureFunctionSpec { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFunctionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureFunctionStatus) DeepCopyInto(out *LinkedServiceAzureFunctionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureFunctionStatus. +func (in *LinkedServiceAzureFunctionStatus) DeepCopy() *LinkedServiceAzureFunctionStatus { + if in == nil { + return nil + } + out := new(LinkedServiceAzureFunctionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabase) DeepCopyInto(out *LinkedServiceAzureSQLDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabase. +func (in *LinkedServiceAzureSQLDatabase) DeepCopy() *LinkedServiceAzureSQLDatabase { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureSQLDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseInitParameters) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultConnectionString != nil { + in, out := &in.KeyVaultConnectionString, &out.KeyVaultConnectionString + *out = new(KeyVaultConnectionStringInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServicePrincipalID != nil { + in, out := &in.ServicePrincipalID, &out.ServicePrincipalID + *out = new(string) + **out = **in + } + if in.ServicePrincipalKey != nil { + in, out := &in.ServicePrincipalKey, &out.ServicePrincipalKey + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseManagedIdentity != nil { + in, out := &in.UseManagedIdentity, &out.UseManagedIdentity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseInitParameters. +func (in *LinkedServiceAzureSQLDatabaseInitParameters) DeepCopy() *LinkedServiceAzureSQLDatabaseInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters. +func (in *LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters) DeepCopy() *LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation. +func (in *LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation) DeepCopy() *LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters. +func (in *LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters) DeepCopy() *LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseList) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceAzureSQLDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseList. +func (in *LinkedServiceAzureSQLDatabaseList) DeepCopy() *LinkedServiceAzureSQLDatabaseList { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceAzureSQLDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseObservation) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultConnectionString != nil { + in, out := &in.KeyVaultConnectionString, &out.KeyVaultConnectionString + *out = new(KeyVaultConnectionStringObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServicePrincipalID != nil { + in, out := &in.ServicePrincipalID, &out.ServicePrincipalID + *out = new(string) + **out = **in + } + if in.ServicePrincipalKey != nil { + in, out := &in.ServicePrincipalKey, &out.ServicePrincipalKey + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseManagedIdentity != nil { + in, out := &in.UseManagedIdentity, &out.UseManagedIdentity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseObservation. +func (in *LinkedServiceAzureSQLDatabaseObservation) DeepCopy() *LinkedServiceAzureSQLDatabaseObservation { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseParameters) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultConnectionString != nil { + in, out := &in.KeyVaultConnectionString, &out.KeyVaultConnectionString + *out = new(KeyVaultConnectionStringParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServicePrincipalID != nil { + in, out := &in.ServicePrincipalID, &out.ServicePrincipalID + *out = new(string) + **out = **in + } + if in.ServicePrincipalKey != nil { + in, out := &in.ServicePrincipalKey, &out.ServicePrincipalKey + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseManagedIdentity != nil { + in, out := &in.UseManagedIdentity, &out.UseManagedIdentity + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseParameters. +func (in *LinkedServiceAzureSQLDatabaseParameters) DeepCopy() *LinkedServiceAzureSQLDatabaseParameters { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseSpec) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseSpec. +func (in *LinkedServiceAzureSQLDatabaseSpec) DeepCopy() *LinkedServiceAzureSQLDatabaseSpec { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceAzureSQLDatabaseStatus) DeepCopyInto(out *LinkedServiceAzureSQLDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceAzureSQLDatabaseStatus. +func (in *LinkedServiceAzureSQLDatabaseStatus) DeepCopy() *LinkedServiceAzureSQLDatabaseStatus { + if in == nil { + return nil + } + out := new(LinkedServiceAzureSQLDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceInitParameters) DeepCopyInto(out *LinkedServiceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceInitParameters. +func (in *LinkedServiceInitParameters) DeepCopy() *LinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOData) DeepCopyInto(out *LinkedServiceOData) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOData. +func (in *LinkedServiceOData) DeepCopy() *LinkedServiceOData { + if in == nil { + return nil + } + out := new(LinkedServiceOData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceOData) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceODataInitParameters) DeepCopyInto(out *LinkedServiceODataInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = new(BasicAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceODataInitParameters. +func (in *LinkedServiceODataInitParameters) DeepCopy() *LinkedServiceODataInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceODataInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceODataList) DeepCopyInto(out *LinkedServiceODataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceOData, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceODataList. +func (in *LinkedServiceODataList) DeepCopy() *LinkedServiceODataList { + if in == nil { + return nil + } + out := new(LinkedServiceODataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceODataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceODataObservation) DeepCopyInto(out *LinkedServiceODataObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = new(BasicAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceODataObservation. +func (in *LinkedServiceODataObservation) DeepCopy() *LinkedServiceODataObservation { + if in == nil { + return nil + } + out := new(LinkedServiceODataObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceODataParameters) DeepCopyInto(out *LinkedServiceODataParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = new(BasicAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceODataParameters. +func (in *LinkedServiceODataParameters) DeepCopy() *LinkedServiceODataParameters { + if in == nil { + return nil + } + out := new(LinkedServiceODataParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceODataSpec) DeepCopyInto(out *LinkedServiceODataSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceODataSpec. +func (in *LinkedServiceODataSpec) DeepCopy() *LinkedServiceODataSpec { + if in == nil { + return nil + } + out := new(LinkedServiceODataSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceODataStatus) DeepCopyInto(out *LinkedServiceODataStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceODataStatus. +func (in *LinkedServiceODataStatus) DeepCopy() *LinkedServiceODataStatus { + if in == nil { + return nil + } + out := new(LinkedServiceODataStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceObservation) DeepCopyInto(out *LinkedServiceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceObservation. +func (in *LinkedServiceObservation) DeepCopy() *LinkedServiceObservation { + if in == nil { + return nil + } + out := new(LinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbc) DeepCopyInto(out *LinkedServiceOdbc) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbc. +func (in *LinkedServiceOdbc) DeepCopy() *LinkedServiceOdbc { + if in == nil { + return nil + } + out := new(LinkedServiceOdbc) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceOdbc) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcBasicAuthenticationInitParameters) DeepCopyInto(out *LinkedServiceOdbcBasicAuthenticationInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcBasicAuthenticationInitParameters. +func (in *LinkedServiceOdbcBasicAuthenticationInitParameters) DeepCopy() *LinkedServiceOdbcBasicAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcBasicAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcBasicAuthenticationObservation) DeepCopyInto(out *LinkedServiceOdbcBasicAuthenticationObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcBasicAuthenticationObservation. +func (in *LinkedServiceOdbcBasicAuthenticationObservation) DeepCopy() *LinkedServiceOdbcBasicAuthenticationObservation { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcBasicAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcBasicAuthenticationParameters) DeepCopyInto(out *LinkedServiceOdbcBasicAuthenticationParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcBasicAuthenticationParameters. +func (in *LinkedServiceOdbcBasicAuthenticationParameters) DeepCopy() *LinkedServiceOdbcBasicAuthenticationParameters { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcBasicAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcInitParameters) DeepCopyInto(out *LinkedServiceOdbcInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = new(LinkedServiceOdbcBasicAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcInitParameters. +func (in *LinkedServiceOdbcInitParameters) DeepCopy() *LinkedServiceOdbcInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcList) DeepCopyInto(out *LinkedServiceOdbcList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceOdbc, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcList. +func (in *LinkedServiceOdbcList) DeepCopy() *LinkedServiceOdbcList { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceOdbcList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcObservation) DeepCopyInto(out *LinkedServiceOdbcObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = new(LinkedServiceOdbcBasicAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcObservation. +func (in *LinkedServiceOdbcObservation) DeepCopy() *LinkedServiceOdbcObservation { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcParameters) DeepCopyInto(out *LinkedServiceOdbcParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BasicAuthentication != nil { + in, out := &in.BasicAuthentication, &out.BasicAuthentication + *out = new(LinkedServiceOdbcBasicAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcParameters. +func (in *LinkedServiceOdbcParameters) DeepCopy() *LinkedServiceOdbcParameters { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcSpec) DeepCopyInto(out *LinkedServiceOdbcSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcSpec. +func (in *LinkedServiceOdbcSpec) DeepCopy() *LinkedServiceOdbcSpec { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceOdbcStatus) DeepCopyInto(out *LinkedServiceOdbcStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceOdbcStatus. +func (in *LinkedServiceOdbcStatus) DeepCopy() *LinkedServiceOdbcStatus { + if in == nil { + return nil + } + out := new(LinkedServiceOdbcStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceParameters) DeepCopyInto(out *LinkedServiceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceParameters. +func (in *LinkedServiceParameters) DeepCopy() *LinkedServiceParameters { + if in == nil { + return nil + } + out := new(LinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServer) DeepCopyInto(out *LinkedServiceSQLServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServer. +func (in *LinkedServiceSQLServer) DeepCopy() *LinkedServiceSQLServer { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceSQLServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerInitParameters) DeepCopyInto(out *LinkedServiceSQLServerInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultConnectionString != nil { + in, out := &in.KeyVaultConnectionString, &out.KeyVaultConnectionString + *out = new(LinkedServiceSQLServerKeyVaultConnectionStringInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSQLServerKeyVaultPasswordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerInitParameters. +func (in *LinkedServiceSQLServerInitParameters) DeepCopy() *LinkedServiceSQLServerInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerKeyVaultConnectionStringInitParameters) DeepCopyInto(out *LinkedServiceSQLServerKeyVaultConnectionStringInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerKeyVaultConnectionStringInitParameters. +func (in *LinkedServiceSQLServerKeyVaultConnectionStringInitParameters) DeepCopy() *LinkedServiceSQLServerKeyVaultConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerKeyVaultConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerKeyVaultConnectionStringObservation) DeepCopyInto(out *LinkedServiceSQLServerKeyVaultConnectionStringObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerKeyVaultConnectionStringObservation. +func (in *LinkedServiceSQLServerKeyVaultConnectionStringObservation) DeepCopy() *LinkedServiceSQLServerKeyVaultConnectionStringObservation { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerKeyVaultConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerKeyVaultConnectionStringParameters) DeepCopyInto(out *LinkedServiceSQLServerKeyVaultConnectionStringParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerKeyVaultConnectionStringParameters. +func (in *LinkedServiceSQLServerKeyVaultConnectionStringParameters) DeepCopy() *LinkedServiceSQLServerKeyVaultConnectionStringParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerKeyVaultConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerKeyVaultPasswordInitParameters) DeepCopyInto(out *LinkedServiceSQLServerKeyVaultPasswordInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerKeyVaultPasswordInitParameters. +func (in *LinkedServiceSQLServerKeyVaultPasswordInitParameters) DeepCopy() *LinkedServiceSQLServerKeyVaultPasswordInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerKeyVaultPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerKeyVaultPasswordObservation) DeepCopyInto(out *LinkedServiceSQLServerKeyVaultPasswordObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerKeyVaultPasswordObservation. +func (in *LinkedServiceSQLServerKeyVaultPasswordObservation) DeepCopy() *LinkedServiceSQLServerKeyVaultPasswordObservation { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerKeyVaultPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerKeyVaultPasswordParameters) DeepCopyInto(out *LinkedServiceSQLServerKeyVaultPasswordParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerKeyVaultPasswordParameters. +func (in *LinkedServiceSQLServerKeyVaultPasswordParameters) DeepCopy() *LinkedServiceSQLServerKeyVaultPasswordParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerKeyVaultPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerList) DeepCopyInto(out *LinkedServiceSQLServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceSQLServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerList. +func (in *LinkedServiceSQLServerList) DeepCopy() *LinkedServiceSQLServerList { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceSQLServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerObservation) DeepCopyInto(out *LinkedServiceSQLServerObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultConnectionString != nil { + in, out := &in.KeyVaultConnectionString, &out.KeyVaultConnectionString + *out = new(LinkedServiceSQLServerKeyVaultConnectionStringObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSQLServerKeyVaultPasswordObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerObservation. +func (in *LinkedServiceSQLServerObservation) DeepCopy() *LinkedServiceSQLServerObservation { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerParameters) DeepCopyInto(out *LinkedServiceSQLServerParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultConnectionString != nil { + in, out := &in.KeyVaultConnectionString, &out.KeyVaultConnectionString + *out = new(LinkedServiceSQLServerKeyVaultConnectionStringParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSQLServerKeyVaultPasswordParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UserName != nil { + in, out := &in.UserName, &out.UserName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerParameters. +func (in *LinkedServiceSQLServerParameters) DeepCopy() *LinkedServiceSQLServerParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerSpec) DeepCopyInto(out *LinkedServiceSQLServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerSpec. +func (in *LinkedServiceSQLServerSpec) DeepCopy() *LinkedServiceSQLServerSpec { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSQLServerStatus) DeepCopyInto(out *LinkedServiceSQLServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSQLServerStatus. +func (in *LinkedServiceSQLServerStatus) DeepCopy() *LinkedServiceSQLServerStatus { + if in == nil { + return nil + } + out := new(LinkedServiceSQLServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflake) DeepCopyInto(out *LinkedServiceSnowflake) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflake. +func (in *LinkedServiceSnowflake) DeepCopy() *LinkedServiceSnowflake { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflake) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceSnowflake) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeInitParameters) DeepCopyInto(out *LinkedServiceSnowflakeInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSnowflakeKeyVaultPasswordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeInitParameters. +func (in *LinkedServiceSnowflakeInitParameters) DeepCopy() *LinkedServiceSnowflakeInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeKeyVaultPasswordInitParameters) DeepCopyInto(out *LinkedServiceSnowflakeKeyVaultPasswordInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeKeyVaultPasswordInitParameters. +func (in *LinkedServiceSnowflakeKeyVaultPasswordInitParameters) DeepCopy() *LinkedServiceSnowflakeKeyVaultPasswordInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeKeyVaultPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeKeyVaultPasswordObservation) DeepCopyInto(out *LinkedServiceSnowflakeKeyVaultPasswordObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeKeyVaultPasswordObservation. +func (in *LinkedServiceSnowflakeKeyVaultPasswordObservation) DeepCopy() *LinkedServiceSnowflakeKeyVaultPasswordObservation { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeKeyVaultPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeKeyVaultPasswordParameters) DeepCopyInto(out *LinkedServiceSnowflakeKeyVaultPasswordParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeKeyVaultPasswordParameters. +func (in *LinkedServiceSnowflakeKeyVaultPasswordParameters) DeepCopy() *LinkedServiceSnowflakeKeyVaultPasswordParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeKeyVaultPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeList) DeepCopyInto(out *LinkedServiceSnowflakeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceSnowflake, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeList. +func (in *LinkedServiceSnowflakeList) DeepCopy() *LinkedServiceSnowflakeList { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceSnowflakeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeObservation) DeepCopyInto(out *LinkedServiceSnowflakeObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSnowflakeKeyVaultPasswordObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeObservation. +func (in *LinkedServiceSnowflakeObservation) DeepCopy() *LinkedServiceSnowflakeObservation { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeParameters) DeepCopyInto(out *LinkedServiceSnowflakeParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSnowflakeKeyVaultPasswordParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeParameters. +func (in *LinkedServiceSnowflakeParameters) DeepCopy() *LinkedServiceSnowflakeParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeSpec) DeepCopyInto(out *LinkedServiceSnowflakeSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeSpec. +func (in *LinkedServiceSnowflakeSpec) DeepCopy() *LinkedServiceSnowflakeSpec { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSnowflakeStatus) DeepCopyInto(out *LinkedServiceSnowflakeStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSnowflakeStatus. +func (in *LinkedServiceSnowflakeStatus) DeepCopy() *LinkedServiceSnowflakeStatus { + if in == nil { + return nil + } + out := new(LinkedServiceSnowflakeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapse) DeepCopyInto(out *LinkedServiceSynapse) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapse. +func (in *LinkedServiceSynapse) DeepCopy() *LinkedServiceSynapse { + if in == nil { + return nil + } + out := new(LinkedServiceSynapse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceSynapse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseInitParameters) DeepCopyInto(out *LinkedServiceSynapseInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSynapseKeyVaultPasswordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseInitParameters. +func (in *LinkedServiceSynapseInitParameters) DeepCopy() *LinkedServiceSynapseInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseKeyVaultPasswordInitParameters) DeepCopyInto(out *LinkedServiceSynapseKeyVaultPasswordInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseKeyVaultPasswordInitParameters. +func (in *LinkedServiceSynapseKeyVaultPasswordInitParameters) DeepCopy() *LinkedServiceSynapseKeyVaultPasswordInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseKeyVaultPasswordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseKeyVaultPasswordObservation) DeepCopyInto(out *LinkedServiceSynapseKeyVaultPasswordObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseKeyVaultPasswordObservation. +func (in *LinkedServiceSynapseKeyVaultPasswordObservation) DeepCopy() *LinkedServiceSynapseKeyVaultPasswordObservation { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseKeyVaultPasswordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseKeyVaultPasswordParameters) DeepCopyInto(out *LinkedServiceSynapseKeyVaultPasswordParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseKeyVaultPasswordParameters. +func (in *LinkedServiceSynapseKeyVaultPasswordParameters) DeepCopy() *LinkedServiceSynapseKeyVaultPasswordParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseKeyVaultPasswordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseList) DeepCopyInto(out *LinkedServiceSynapseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedServiceSynapse, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseList. +func (in *LinkedServiceSynapseList) DeepCopy() *LinkedServiceSynapseList { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceSynapseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseObservation) DeepCopyInto(out *LinkedServiceSynapseObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSynapseKeyVaultPasswordObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseObservation. +func (in *LinkedServiceSynapseObservation) DeepCopy() *LinkedServiceSynapseObservation { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseParameters) DeepCopyInto(out *LinkedServiceSynapseParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(string) + **out = **in + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntimeName != nil { + in, out := &in.IntegrationRuntimeName, &out.IntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.KeyVaultPassword != nil { + in, out := &in.KeyVaultPassword, &out.KeyVaultPassword + *out = new(LinkedServiceSynapseKeyVaultPasswordParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseParameters. +func (in *LinkedServiceSynapseParameters) DeepCopy() *LinkedServiceSynapseParameters { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseSpec) DeepCopyInto(out *LinkedServiceSynapseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseSpec. +func (in *LinkedServiceSynapseSpec) DeepCopy() *LinkedServiceSynapseSpec { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSynapseStatus) DeepCopyInto(out *LinkedServiceSynapseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSynapseStatus. +func (in *LinkedServiceSynapseStatus) DeepCopy() *LinkedServiceSynapseStatus { + if in == nil { + return nil + } + out := new(LinkedServiceSynapseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyInitParameters) DeepCopyInto(out *MonthlyInitParameters) { + *out = *in + if in.Week != nil { + in, out := &in.Week, &out.Week + *out = new(float64) + **out = **in + } + if in.Weekday != nil { + in, out := &in.Weekday, &out.Weekday + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyInitParameters. +func (in *MonthlyInitParameters) DeepCopy() *MonthlyInitParameters { + if in == nil { + return nil + } + out := new(MonthlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyObservation) DeepCopyInto(out *MonthlyObservation) { + *out = *in + if in.Week != nil { + in, out := &in.Week, &out.Week + *out = new(float64) + **out = **in + } + if in.Weekday != nil { + in, out := &in.Weekday, &out.Weekday + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyObservation. +func (in *MonthlyObservation) DeepCopy() *MonthlyObservation { + if in == nil { + return nil + } + out := new(MonthlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyParameters) DeepCopyInto(out *MonthlyParameters) { + *out = *in + if in.Week != nil { + in, out := &in.Week, &out.Week + *out = new(float64) + **out = **in + } + if in.Weekday != nil { + in, out := &in.Weekday, &out.Weekday + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyParameters. +func (in *MonthlyParameters) DeepCopy() *MonthlyParameters { + if in == nil { + return nil + } + out := new(MonthlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NewClusterConfigInitParameters) DeepCopyInto(out *NewClusterConfigInitParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.CustomTags != nil { + in, out := &in.CustomTags, &out.CustomTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DriverNodeType != nil { + in, out := &in.DriverNodeType, &out.DriverNodeType + *out = new(string) + **out = **in + } + if in.InitScripts != nil { + in, out := &in.InitScripts, &out.InitScripts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.MaxNumberOfWorkers != nil { + in, out := &in.MaxNumberOfWorkers, &out.MaxNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.MinNumberOfWorkers != nil { + in, out := &in.MinNumberOfWorkers, &out.MinNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.SparkConfig != nil { + in, out := &in.SparkConfig, &out.SparkConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SparkEnvironmentVariables != nil { + in, out := &in.SparkEnvironmentVariables, &out.SparkEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewClusterConfigInitParameters. +func (in *NewClusterConfigInitParameters) DeepCopy() *NewClusterConfigInitParameters { + if in == nil { + return nil + } + out := new(NewClusterConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NewClusterConfigObservation) DeepCopyInto(out *NewClusterConfigObservation) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.CustomTags != nil { + in, out := &in.CustomTags, &out.CustomTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DriverNodeType != nil { + in, out := &in.DriverNodeType, &out.DriverNodeType + *out = new(string) + **out = **in + } + if in.InitScripts != nil { + in, out := &in.InitScripts, &out.InitScripts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.MaxNumberOfWorkers != nil { + in, out := &in.MaxNumberOfWorkers, &out.MaxNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.MinNumberOfWorkers != nil { + in, out := &in.MinNumberOfWorkers, &out.MinNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.SparkConfig != nil { + in, out := &in.SparkConfig, &out.SparkConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SparkEnvironmentVariables != nil { + in, out := &in.SparkEnvironmentVariables, &out.SparkEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewClusterConfigObservation. +func (in *NewClusterConfigObservation) DeepCopy() *NewClusterConfigObservation { + if in == nil { + return nil + } + out := new(NewClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NewClusterConfigParameters) DeepCopyInto(out *NewClusterConfigParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.CustomTags != nil { + in, out := &in.CustomTags, &out.CustomTags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DriverNodeType != nil { + in, out := &in.DriverNodeType, &out.DriverNodeType + *out = new(string) + **out = **in + } + if in.InitScripts != nil { + in, out := &in.InitScripts, &out.InitScripts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogDestination != nil { + in, out := &in.LogDestination, &out.LogDestination + *out = new(string) + **out = **in + } + if in.MaxNumberOfWorkers != nil { + in, out := &in.MaxNumberOfWorkers, &out.MaxNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.MinNumberOfWorkers != nil { + in, out := &in.MinNumberOfWorkers, &out.MinNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.SparkConfig != nil { + in, out := &in.SparkConfig, &out.SparkConfig + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SparkEnvironmentVariables != nil { + in, out := &in.SparkEnvironmentVariables, &out.SparkEnvironmentVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewClusterConfigParameters. +func (in *NewClusterConfigParameters) DeepCopy() *NewClusterConfigParameters { + if in == nil { + return nil + } + out := new(NewClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageStoreInitParameters) DeepCopyInto(out *PackageStoreInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageStoreInitParameters. +func (in *PackageStoreInitParameters) DeepCopy() *PackageStoreInitParameters { + if in == nil { + return nil + } + out := new(PackageStoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageStoreObservation) DeepCopyInto(out *PackageStoreObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageStoreObservation. +func (in *PackageStoreObservation) DeepCopy() *PackageStoreObservation { + if in == nil { + return nil + } + out := new(PackageStoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageStoreParameters) DeepCopyInto(out *PackageStoreParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageStoreParameters. +func (in *PackageStoreParameters) DeepCopy() *PackageStoreParameters { + if in == nil { + return nil + } + out := new(PackageStoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineInitParameters) DeepCopyInto(out *PipelineInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineInitParameters. +func (in *PipelineInitParameters) DeepCopy() *PipelineInitParameters { + if in == nil { + return nil + } + out := new(PipelineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineObservation) DeepCopyInto(out *PipelineObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineObservation. +func (in *PipelineObservation) DeepCopy() *PipelineObservation { + if in == nil { + return nil + } + out := new(PipelineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineParameters) DeepCopyInto(out *PipelineParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineParameters. +func (in *PipelineParameters) DeepCopy() *PipelineParameters { + if in == nil { + return nil + } + out := new(PipelineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyInitParameters) DeepCopyInto(out *ProxyInitParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SelfHostedIntegrationRuntimeName != nil { + in, out := &in.SelfHostedIntegrationRuntimeName, &out.SelfHostedIntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.StagingStorageLinkedServiceName != nil { + in, out := &in.StagingStorageLinkedServiceName, &out.StagingStorageLinkedServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyInitParameters. +func (in *ProxyInitParameters) DeepCopy() *ProxyInitParameters { + if in == nil { + return nil + } + out := new(ProxyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyObservation) DeepCopyInto(out *ProxyObservation) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SelfHostedIntegrationRuntimeName != nil { + in, out := &in.SelfHostedIntegrationRuntimeName, &out.SelfHostedIntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.StagingStorageLinkedServiceName != nil { + in, out := &in.StagingStorageLinkedServiceName, &out.StagingStorageLinkedServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyObservation. +func (in *ProxyObservation) DeepCopy() *ProxyObservation { + if in == nil { + return nil + } + out := new(ProxyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyParameters) DeepCopyInto(out *ProxyParameters) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.SelfHostedIntegrationRuntimeName != nil { + in, out := &in.SelfHostedIntegrationRuntimeName, &out.SelfHostedIntegrationRuntimeName + *out = new(string) + **out = **in + } + if in.StagingStorageLinkedServiceName != nil { + in, out := &in.StagingStorageLinkedServiceName, &out.StagingStorageLinkedServiceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyParameters. +func (in *ProxyParameters) DeepCopy() *ProxyParameters { + if in == nil { + return nil + } + out := new(ProxyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectedLinkedServiceInitParameters) DeepCopyInto(out *RejectedLinkedServiceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectedLinkedServiceInitParameters. +func (in *RejectedLinkedServiceInitParameters) DeepCopy() *RejectedLinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(RejectedLinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectedLinkedServiceObservation) DeepCopyInto(out *RejectedLinkedServiceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectedLinkedServiceObservation. +func (in *RejectedLinkedServiceObservation) DeepCopy() *RejectedLinkedServiceObservation { + if in == nil { + return nil + } + out := new(RejectedLinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RejectedLinkedServiceParameters) DeepCopyInto(out *RejectedLinkedServiceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RejectedLinkedServiceParameters. +func (in *RejectedLinkedServiceParameters) DeepCopy() *RejectedLinkedServiceParameters { + if in == nil { + return nil + } + out := new(RejectedLinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SFTPServerLocationInitParameters) DeepCopyInto(out *SFTPServerLocationInitParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFTPServerLocationInitParameters. +func (in *SFTPServerLocationInitParameters) DeepCopy() *SFTPServerLocationInitParameters { + if in == nil { + return nil + } + out := new(SFTPServerLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SFTPServerLocationObservation) DeepCopyInto(out *SFTPServerLocationObservation) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFTPServerLocationObservation. +func (in *SFTPServerLocationObservation) DeepCopy() *SFTPServerLocationObservation { + if in == nil { + return nil + } + out := new(SFTPServerLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SFTPServerLocationParameters) DeepCopyInto(out *SFTPServerLocationParameters) { + *out = *in + if in.DynamicFilenameEnabled != nil { + in, out := &in.DynamicFilenameEnabled, &out.DynamicFilenameEnabled + *out = new(bool) + **out = **in + } + if in.DynamicPathEnabled != nil { + in, out := &in.DynamicPathEnabled, &out.DynamicPathEnabled + *out = new(bool) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFTPServerLocationParameters. +func (in *SFTPServerLocationParameters) DeepCopy() *SFTPServerLocationParameters { + if in == nil { + return nil + } + out := new(SFTPServerLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaColumnInitParameters) DeepCopyInto(out *SchemaColumnInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaColumnInitParameters. +func (in *SchemaColumnInitParameters) DeepCopy() *SchemaColumnInitParameters { + if in == nil { + return nil + } + out := new(SchemaColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaColumnObservation) DeepCopyInto(out *SchemaColumnObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaColumnObservation. +func (in *SchemaColumnObservation) DeepCopy() *SchemaColumnObservation { + if in == nil { + return nil + } + out := new(SchemaColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaColumnParameters) DeepCopyInto(out *SchemaColumnParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaColumnParameters. +func (in *SchemaColumnParameters) DeepCopy() *SchemaColumnParameters { + if in == nil { + return nil + } + out := new(SchemaColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaLinkedServiceInitParameters) DeepCopyInto(out *SchemaLinkedServiceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaLinkedServiceInitParameters. +func (in *SchemaLinkedServiceInitParameters) DeepCopy() *SchemaLinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(SchemaLinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaLinkedServiceObservation) DeepCopyInto(out *SchemaLinkedServiceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaLinkedServiceObservation. +func (in *SchemaLinkedServiceObservation) DeepCopy() *SchemaLinkedServiceObservation { + if in == nil { + return nil + } + out := new(SchemaLinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaLinkedServiceParameters) DeepCopyInto(out *SchemaLinkedServiceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaLinkedServiceParameters. +func (in *SchemaLinkedServiceParameters) DeepCopy() *SchemaLinkedServiceParameters { + if in == nil { + return nil + } + out := new(SchemaLinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePrincipalLinkedKeyVaultKeyInitParameters) DeepCopyInto(out *ServicePrincipalLinkedKeyVaultKeyInitParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePrincipalLinkedKeyVaultKeyInitParameters. +func (in *ServicePrincipalLinkedKeyVaultKeyInitParameters) DeepCopy() *ServicePrincipalLinkedKeyVaultKeyInitParameters { + if in == nil { + return nil + } + out := new(ServicePrincipalLinkedKeyVaultKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePrincipalLinkedKeyVaultKeyObservation) DeepCopyInto(out *ServicePrincipalLinkedKeyVaultKeyObservation) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePrincipalLinkedKeyVaultKeyObservation. +func (in *ServicePrincipalLinkedKeyVaultKeyObservation) DeepCopy() *ServicePrincipalLinkedKeyVaultKeyObservation { + if in == nil { + return nil + } + out := new(ServicePrincipalLinkedKeyVaultKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePrincipalLinkedKeyVaultKeyParameters) DeepCopyInto(out *ServicePrincipalLinkedKeyVaultKeyParameters) { + *out = *in + if in.LinkedServiceName != nil { + in, out := &in.LinkedServiceName, &out.LinkedServiceName + *out = new(string) + **out = **in + } + if in.LinkedServiceNameRef != nil { + in, out := &in.LinkedServiceNameRef, &out.LinkedServiceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LinkedServiceNameSelector != nil { + in, out := &in.LinkedServiceNameSelector, &out.LinkedServiceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePrincipalLinkedKeyVaultKeyParameters. +func (in *ServicePrincipalLinkedKeyVaultKeyParameters) DeepCopy() *ServicePrincipalLinkedKeyVaultKeyParameters { + if in == nil { + return nil + } + out := new(ServicePrincipalLinkedKeyVaultKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SinkInitParameters) DeepCopyInto(out *SinkInitParameters) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(DataSetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(FlowletInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(SinkLinkedServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RejectedLinkedService != nil { + in, out := &in.RejectedLinkedService, &out.RejectedLinkedService + *out = new(RejectedLinkedServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaLinkedService != nil { + in, out := &in.SchemaLinkedService, &out.SchemaLinkedService + *out = new(SchemaLinkedServiceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkInitParameters. +func (in *SinkInitParameters) DeepCopy() *SinkInitParameters { + if in == nil { + return nil + } + out := new(SinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SinkLinkedServiceInitParameters) DeepCopyInto(out *SinkLinkedServiceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkLinkedServiceInitParameters. +func (in *SinkLinkedServiceInitParameters) DeepCopy() *SinkLinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(SinkLinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SinkLinkedServiceObservation) DeepCopyInto(out *SinkLinkedServiceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkLinkedServiceObservation. +func (in *SinkLinkedServiceObservation) DeepCopy() *SinkLinkedServiceObservation { + if in == nil { + return nil + } + out := new(SinkLinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SinkLinkedServiceParameters) DeepCopyInto(out *SinkLinkedServiceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkLinkedServiceParameters. +func (in *SinkLinkedServiceParameters) DeepCopy() *SinkLinkedServiceParameters { + if in == nil { + return nil + } + out := new(SinkLinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SinkObservation) DeepCopyInto(out *SinkObservation) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(DataSetObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(FlowletObservation) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(SinkLinkedServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RejectedLinkedService != nil { + in, out := &in.RejectedLinkedService, &out.RejectedLinkedService + *out = new(RejectedLinkedServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.SchemaLinkedService != nil { + in, out := &in.SchemaLinkedService, &out.SchemaLinkedService + *out = new(SchemaLinkedServiceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkObservation. +func (in *SinkObservation) DeepCopy() *SinkObservation { + if in == nil { + return nil + } + out := new(SinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SinkParameters) DeepCopyInto(out *SinkParameters) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(DataSetParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(FlowletParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(SinkLinkedServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RejectedLinkedService != nil { + in, out := &in.RejectedLinkedService, &out.RejectedLinkedService + *out = new(RejectedLinkedServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaLinkedService != nil { + in, out := &in.SchemaLinkedService, &out.SchemaLinkedService + *out = new(SchemaLinkedServiceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkParameters. +func (in *SinkParameters) DeepCopy() *SinkParameters { + if in == nil { + return nil + } + out := new(SinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceDataSetInitParameters) DeepCopyInto(out *SourceDataSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceDataSetInitParameters. +func (in *SourceDataSetInitParameters) DeepCopy() *SourceDataSetInitParameters { + if in == nil { + return nil + } + out := new(SourceDataSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceDataSetObservation) DeepCopyInto(out *SourceDataSetObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceDataSetObservation. +func (in *SourceDataSetObservation) DeepCopy() *SourceDataSetObservation { + if in == nil { + return nil + } + out := new(SourceDataSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceDataSetParameters) DeepCopyInto(out *SourceDataSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceDataSetParameters. +func (in *SourceDataSetParameters) DeepCopy() *SourceDataSetParameters { + if in == nil { + return nil + } + out := new(SourceDataSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceFlowletInitParameters) DeepCopyInto(out *SourceFlowletInitParameters) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceFlowletInitParameters. +func (in *SourceFlowletInitParameters) DeepCopy() *SourceFlowletInitParameters { + if in == nil { + return nil + } + out := new(SourceFlowletInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceFlowletObservation) DeepCopyInto(out *SourceFlowletObservation) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceFlowletObservation. +func (in *SourceFlowletObservation) DeepCopy() *SourceFlowletObservation { + if in == nil { + return nil + } + out := new(SourceFlowletObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceFlowletParameters) DeepCopyInto(out *SourceFlowletParameters) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceFlowletParameters. +func (in *SourceFlowletParameters) DeepCopy() *SourceFlowletParameters { + if in == nil { + return nil + } + out := new(SourceFlowletParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceInitParameters) DeepCopyInto(out *SourceInitParameters) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(SourceDataSetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(SourceFlowletInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(SourceLinkedServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RejectedLinkedService != nil { + in, out := &in.RejectedLinkedService, &out.RejectedLinkedService + *out = new(SourceRejectedLinkedServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaLinkedService != nil { + in, out := &in.SchemaLinkedService, &out.SchemaLinkedService + *out = new(SourceSchemaLinkedServiceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceInitParameters. +func (in *SourceInitParameters) DeepCopy() *SourceInitParameters { + if in == nil { + return nil + } + out := new(SourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceLinkedServiceInitParameters) DeepCopyInto(out *SourceLinkedServiceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceLinkedServiceInitParameters. +func (in *SourceLinkedServiceInitParameters) DeepCopy() *SourceLinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(SourceLinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceLinkedServiceObservation) DeepCopyInto(out *SourceLinkedServiceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceLinkedServiceObservation. +func (in *SourceLinkedServiceObservation) DeepCopy() *SourceLinkedServiceObservation { + if in == nil { + return nil + } + out := new(SourceLinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceLinkedServiceParameters) DeepCopyInto(out *SourceLinkedServiceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceLinkedServiceParameters. +func (in *SourceLinkedServiceParameters) DeepCopy() *SourceLinkedServiceParameters { + if in == nil { + return nil + } + out := new(SourceLinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceObservation) DeepCopyInto(out *SourceObservation) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(SourceDataSetObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(SourceFlowletObservation) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(SourceLinkedServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RejectedLinkedService != nil { + in, out := &in.RejectedLinkedService, &out.RejectedLinkedService + *out = new(SourceRejectedLinkedServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.SchemaLinkedService != nil { + in, out := &in.SchemaLinkedService, &out.SchemaLinkedService + *out = new(SourceSchemaLinkedServiceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceObservation. +func (in *SourceObservation) DeepCopy() *SourceObservation { + if in == nil { + return nil + } + out := new(SourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceParameters) DeepCopyInto(out *SourceParameters) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(SourceDataSetParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(SourceFlowletParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(SourceLinkedServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RejectedLinkedService != nil { + in, out := &in.RejectedLinkedService, &out.RejectedLinkedService + *out = new(SourceRejectedLinkedServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.SchemaLinkedService != nil { + in, out := &in.SchemaLinkedService, &out.SchemaLinkedService + *out = new(SourceSchemaLinkedServiceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceParameters. +func (in *SourceParameters) DeepCopy() *SourceParameters { + if in == nil { + return nil + } + out := new(SourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceRejectedLinkedServiceInitParameters) DeepCopyInto(out *SourceRejectedLinkedServiceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRejectedLinkedServiceInitParameters. +func (in *SourceRejectedLinkedServiceInitParameters) DeepCopy() *SourceRejectedLinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(SourceRejectedLinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceRejectedLinkedServiceObservation) DeepCopyInto(out *SourceRejectedLinkedServiceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRejectedLinkedServiceObservation. +func (in *SourceRejectedLinkedServiceObservation) DeepCopy() *SourceRejectedLinkedServiceObservation { + if in == nil { + return nil + } + out := new(SourceRejectedLinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceRejectedLinkedServiceParameters) DeepCopyInto(out *SourceRejectedLinkedServiceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRejectedLinkedServiceParameters. +func (in *SourceRejectedLinkedServiceParameters) DeepCopy() *SourceRejectedLinkedServiceParameters { + if in == nil { + return nil + } + out := new(SourceRejectedLinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSchemaLinkedServiceInitParameters) DeepCopyInto(out *SourceSchemaLinkedServiceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSchemaLinkedServiceInitParameters. +func (in *SourceSchemaLinkedServiceInitParameters) DeepCopy() *SourceSchemaLinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(SourceSchemaLinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSchemaLinkedServiceObservation) DeepCopyInto(out *SourceSchemaLinkedServiceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSchemaLinkedServiceObservation. +func (in *SourceSchemaLinkedServiceObservation) DeepCopy() *SourceSchemaLinkedServiceObservation { + if in == nil { + return nil + } + out := new(SourceSchemaLinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSchemaLinkedServiceParameters) DeepCopyInto(out *SourceSchemaLinkedServiceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSchemaLinkedServiceParameters. +func (in *SourceSchemaLinkedServiceParameters) DeepCopy() *SourceSchemaLinkedServiceParameters { + if in == nil { + return nil + } + out := new(SourceSchemaLinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationDataSetInitParameters) DeepCopyInto(out *TransformationDataSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationDataSetInitParameters. +func (in *TransformationDataSetInitParameters) DeepCopy() *TransformationDataSetInitParameters { + if in == nil { + return nil + } + out := new(TransformationDataSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationDataSetObservation) DeepCopyInto(out *TransformationDataSetObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationDataSetObservation. +func (in *TransformationDataSetObservation) DeepCopy() *TransformationDataSetObservation { + if in == nil { + return nil + } + out := new(TransformationDataSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationDataSetParameters) DeepCopyInto(out *TransformationDataSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationDataSetParameters. +func (in *TransformationDataSetParameters) DeepCopy() *TransformationDataSetParameters { + if in == nil { + return nil + } + out := new(TransformationDataSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationFlowletInitParameters) DeepCopyInto(out *TransformationFlowletInitParameters) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationFlowletInitParameters. +func (in *TransformationFlowletInitParameters) DeepCopy() *TransformationFlowletInitParameters { + if in == nil { + return nil + } + out := new(TransformationFlowletInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationFlowletObservation) DeepCopyInto(out *TransformationFlowletObservation) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationFlowletObservation. +func (in *TransformationFlowletObservation) DeepCopy() *TransformationFlowletObservation { + if in == nil { + return nil + } + out := new(TransformationFlowletObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationFlowletParameters) DeepCopyInto(out *TransformationFlowletParameters) { + *out = *in + if in.DataSetParameters != nil { + in, out := &in.DataSetParameters, &out.DataSetParameters + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationFlowletParameters. +func (in *TransformationFlowletParameters) DeepCopy() *TransformationFlowletParameters { + if in == nil { + return nil + } + out := new(TransformationFlowletParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationInitParameters) DeepCopyInto(out *TransformationInitParameters) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(TransformationDataSetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(TransformationFlowletInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(TransformationLinkedServiceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationInitParameters. +func (in *TransformationInitParameters) DeepCopy() *TransformationInitParameters { + if in == nil { + return nil + } + out := new(TransformationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationLinkedServiceInitParameters) DeepCopyInto(out *TransformationLinkedServiceInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationLinkedServiceInitParameters. +func (in *TransformationLinkedServiceInitParameters) DeepCopy() *TransformationLinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(TransformationLinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationLinkedServiceObservation) DeepCopyInto(out *TransformationLinkedServiceObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationLinkedServiceObservation. +func (in *TransformationLinkedServiceObservation) DeepCopy() *TransformationLinkedServiceObservation { + if in == nil { + return nil + } + out := new(TransformationLinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationLinkedServiceParameters) DeepCopyInto(out *TransformationLinkedServiceParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationLinkedServiceParameters. +func (in *TransformationLinkedServiceParameters) DeepCopy() *TransformationLinkedServiceParameters { + if in == nil { + return nil + } + out := new(TransformationLinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationObservation) DeepCopyInto(out *TransformationObservation) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(TransformationDataSetObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(TransformationFlowletObservation) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(TransformationLinkedServiceObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationObservation. +func (in *TransformationObservation) DeepCopy() *TransformationObservation { + if in == nil { + return nil + } + out := new(TransformationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformationParameters) DeepCopyInto(out *TransformationParameters) { + *out = *in + if in.DataSet != nil { + in, out := &in.DataSet, &out.DataSet + *out = new(TransformationDataSetParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Flowlet != nil { + in, out := &in.Flowlet, &out.Flowlet + *out = new(TransformationFlowletParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkedService != nil { + in, out := &in.LinkedService, &out.LinkedService + *out = new(TransformationLinkedServiceParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationParameters. +func (in *TransformationParameters) DeepCopy() *TransformationParameters { + if in == nil { + return nil + } + out := new(TransformationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerSchedule) DeepCopyInto(out *TriggerSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSchedule. +func (in *TriggerSchedule) DeepCopy() *TriggerSchedule { + if in == nil { + return nil + } + out := new(TriggerSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TriggerSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerScheduleInitParameters) DeepCopyInto(out *TriggerScheduleInitParameters) { + *out = *in + if in.Activated != nil { + in, out := &in.Activated, &out.Activated + *out = new(bool) + **out = **in + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Pipeline != nil { + in, out := &in.Pipeline, &out.Pipeline + *out = make([]PipelineInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PipelineName != nil { + in, out := &in.PipelineName, &out.PipelineName + *out = new(string) + **out = **in + } + if in.PipelineNameRef != nil { + in, out := &in.PipelineNameRef, &out.PipelineNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PipelineNameSelector != nil { + in, out := &in.PipelineNameSelector, &out.PipelineNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PipelineParameters != nil { + in, out := &in.PipelineParameters, &out.PipelineParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerScheduleInitParameters. +func (in *TriggerScheduleInitParameters) DeepCopy() *TriggerScheduleInitParameters { + if in == nil { + return nil + } + out := new(TriggerScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerScheduleList) DeepCopyInto(out *TriggerScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TriggerSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerScheduleList. +func (in *TriggerScheduleList) DeepCopy() *TriggerScheduleList { + if in == nil { + return nil + } + out := new(TriggerScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TriggerScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerScheduleObservation) DeepCopyInto(out *TriggerScheduleObservation) { + *out = *in + if in.Activated != nil { + in, out := &in.Activated, &out.Activated + *out = new(bool) + **out = **in + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Pipeline != nil { + in, out := &in.Pipeline, &out.Pipeline + *out = make([]PipelineObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PipelineName != nil { + in, out := &in.PipelineName, &out.PipelineName + *out = new(string) + **out = **in + } + if in.PipelineParameters != nil { + in, out := &in.PipelineParameters, &out.PipelineParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerScheduleObservation. +func (in *TriggerScheduleObservation) DeepCopy() *TriggerScheduleObservation { + if in == nil { + return nil + } + out := new(TriggerScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerScheduleParameters) DeepCopyInto(out *TriggerScheduleParameters) { + *out = *in + if in.Activated != nil { + in, out := &in.Activated, &out.Activated + *out = new(bool) + **out = **in + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DataFactoryID != nil { + in, out := &in.DataFactoryID, &out.DataFactoryID + *out = new(string) + **out = **in + } + if in.DataFactoryIDRef != nil { + in, out := &in.DataFactoryIDRef, &out.DataFactoryIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataFactoryIDSelector != nil { + in, out := &in.DataFactoryIDSelector, &out.DataFactoryIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Pipeline != nil { + in, out := &in.Pipeline, &out.Pipeline + *out = make([]PipelineParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PipelineName != nil { + in, out := &in.PipelineName, &out.PipelineName + *out = new(string) + **out = **in + } + if in.PipelineNameRef != nil { + in, out := &in.PipelineNameRef, &out.PipelineNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PipelineNameSelector != nil { + in, out := &in.PipelineNameSelector, &out.PipelineNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PipelineParameters != nil { + in, out := &in.PipelineParameters, &out.PipelineParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerScheduleParameters. +func (in *TriggerScheduleParameters) DeepCopy() *TriggerScheduleParameters { + if in == nil { + return nil + } + out := new(TriggerScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerScheduleSpec) DeepCopyInto(out *TriggerScheduleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerScheduleSpec. +func (in *TriggerScheduleSpec) DeepCopy() *TriggerScheduleSpec { + if in == nil { + return nil + } + out := new(TriggerScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerScheduleStatus) DeepCopyInto(out *TriggerScheduleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerScheduleStatus. +func (in *TriggerScheduleStatus) DeepCopy() *TriggerScheduleStatus { + if in == nil { + return nil + } + out := new(TriggerScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VnetIntegrationInitParameters) DeepCopyInto(out *VnetIntegrationInitParameters) { + *out = *in + if in.PublicIps != nil { + in, out := &in.PublicIps, &out.PublicIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.SubnetNameRef != nil { + in, out := &in.SubnetNameRef, &out.SubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetNameSelector != nil { + in, out := &in.SubnetNameSelector, &out.SubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VnetID != nil { + in, out := &in.VnetID, &out.VnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VnetIntegrationInitParameters. +func (in *VnetIntegrationInitParameters) DeepCopy() *VnetIntegrationInitParameters { + if in == nil { + return nil + } + out := new(VnetIntegrationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VnetIntegrationObservation) DeepCopyInto(out *VnetIntegrationObservation) { + *out = *in + if in.PublicIps != nil { + in, out := &in.PublicIps, &out.PublicIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.VnetID != nil { + in, out := &in.VnetID, &out.VnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VnetIntegrationObservation. +func (in *VnetIntegrationObservation) DeepCopy() *VnetIntegrationObservation { + if in == nil { + return nil + } + out := new(VnetIntegrationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VnetIntegrationParameters) DeepCopyInto(out *VnetIntegrationParameters) { + *out = *in + if in.PublicIps != nil { + in, out := &in.PublicIps, &out.PublicIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.SubnetNameRef != nil { + in, out := &in.SubnetNameRef, &out.SubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetNameSelector != nil { + in, out := &in.SubnetNameSelector, &out.SubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VnetID != nil { + in, out := &in.VnetID, &out.VnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VnetIntegrationParameters. +func (in *VnetIntegrationParameters) DeepCopy() *VnetIntegrationParameters { + if in == nil { + return nil + } + out := new(VnetIntegrationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VstsConfigurationInitParameters) DeepCopyInto(out *VstsConfigurationInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.ProjectName != nil { + in, out := &in.ProjectName, &out.ProjectName + *out = new(string) + **out = **in + } + if in.PublishingEnabled != nil { + in, out := &in.PublishingEnabled, &out.PublishingEnabled + *out = new(bool) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VstsConfigurationInitParameters. +func (in *VstsConfigurationInitParameters) DeepCopy() *VstsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VstsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VstsConfigurationObservation) DeepCopyInto(out *VstsConfigurationObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.ProjectName != nil { + in, out := &in.ProjectName, &out.ProjectName + *out = new(string) + **out = **in + } + if in.PublishingEnabled != nil { + in, out := &in.PublishingEnabled, &out.PublishingEnabled + *out = new(bool) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VstsConfigurationObservation. +func (in *VstsConfigurationObservation) DeepCopy() *VstsConfigurationObservation { + if in == nil { + return nil + } + out := new(VstsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VstsConfigurationParameters) DeepCopyInto(out *VstsConfigurationParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.ProjectName != nil { + in, out := &in.ProjectName, &out.ProjectName + *out = new(string) + **out = **in + } + if in.PublishingEnabled != nil { + in, out := &in.PublishingEnabled, &out.PublishingEnabled + *out = new(bool) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VstsConfigurationParameters. +func (in *VstsConfigurationParameters) DeepCopy() *VstsConfigurationParameters { + if in == nil { + return nil + } + out := new(VstsConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/datafactory/v1beta2/zz_generated.managed.go b/apis/datafactory/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..2c7cfde1d --- /dev/null +++ b/apis/datafactory/v1beta2/zz_generated.managed.go @@ -0,0 +1,1268 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CustomDataSet. +func (mg *CustomDataSet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CustomDataSet. +func (mg *CustomDataSet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CustomDataSet. +func (mg *CustomDataSet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CustomDataSet. +func (mg *CustomDataSet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CustomDataSet. +func (mg *CustomDataSet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CustomDataSet. +func (mg *CustomDataSet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CustomDataSet. +func (mg *CustomDataSet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CustomDataSet. +func (mg *CustomDataSet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CustomDataSet. +func (mg *CustomDataSet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CustomDataSet. +func (mg *CustomDataSet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CustomDataSet. +func (mg *CustomDataSet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CustomDataSet. +func (mg *CustomDataSet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DataFlow. +func (mg *DataFlow) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataFlow. +func (mg *DataFlow) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataFlow. +func (mg *DataFlow) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataFlow. +func (mg *DataFlow) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataFlow. +func (mg *DataFlow) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataFlow. +func (mg *DataFlow) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataFlow. +func (mg *DataFlow) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataFlow. +func (mg *DataFlow) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataFlow. +func (mg *DataFlow) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataFlow. +func (mg *DataFlow) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataFlow. +func (mg *DataFlow) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataFlow. +func (mg *DataFlow) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DataSetBinary. +func (mg *DataSetBinary) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataSetBinary. +func (mg *DataSetBinary) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataSetBinary. +func (mg *DataSetBinary) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataSetBinary. +func (mg *DataSetBinary) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataSetBinary. +func (mg *DataSetBinary) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataSetBinary. +func (mg *DataSetBinary) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataSetBinary. +func (mg *DataSetBinary) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataSetBinary. +func (mg *DataSetBinary) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataSetBinary. +func (mg *DataSetBinary) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataSetBinary. +func (mg *DataSetBinary) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataSetBinary. +func (mg *DataSetBinary) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataSetBinary. +func (mg *DataSetBinary) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DataSetJSON. +func (mg *DataSetJSON) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataSetJSON. +func (mg *DataSetJSON) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataSetJSON. +func (mg *DataSetJSON) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataSetJSON. +func (mg *DataSetJSON) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataSetJSON. +func (mg *DataSetJSON) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataSetJSON. +func (mg *DataSetJSON) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataSetJSON. +func (mg *DataSetJSON) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataSetJSON. +func (mg *DataSetJSON) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataSetJSON. +func (mg *DataSetJSON) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataSetJSON. +func (mg *DataSetJSON) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataSetJSON. +func (mg *DataSetJSON) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataSetJSON. +func (mg *DataSetJSON) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DataSetParquet. +func (mg *DataSetParquet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataSetParquet. +func (mg *DataSetParquet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataSetParquet. +func (mg *DataSetParquet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataSetParquet. +func (mg *DataSetParquet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataSetParquet. +func (mg *DataSetParquet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataSetParquet. +func (mg *DataSetParquet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataSetParquet. +func (mg *DataSetParquet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataSetParquet. +func (mg *DataSetParquet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataSetParquet. +func (mg *DataSetParquet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataSetParquet. +func (mg *DataSetParquet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataSetParquet. +func (mg *DataSetParquet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataSetParquet. +func (mg *DataSetParquet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Factory. +func (mg *Factory) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Factory. +func (mg *Factory) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Factory. +func (mg *Factory) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Factory. +func (mg *Factory) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Factory. +func (mg *Factory) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Factory. +func (mg *Factory) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Factory. +func (mg *Factory) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Factory. +func (mg *Factory) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Factory. +func (mg *Factory) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Factory. +func (mg *Factory) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Factory. +func (mg *Factory) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Factory. +func (mg *Factory) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedCustomService. +func (mg *LinkedCustomService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedCustomService. +func (mg *LinkedCustomService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedCustomService. +func (mg *LinkedCustomService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedCustomService. +func (mg *LinkedCustomService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedCustomService. +func (mg *LinkedCustomService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedCustomService. +func (mg *LinkedCustomService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedCustomService. +func (mg *LinkedCustomService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedCustomService. +func (mg *LinkedCustomService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedCustomService. +func (mg *LinkedCustomService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedCustomService. +func (mg *LinkedCustomService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedCustomService. +func (mg *LinkedCustomService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedCustomService. +func (mg *LinkedCustomService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceOData. +func (mg *LinkedServiceOData) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceOData. +func (mg *LinkedServiceOData) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceOData. +func (mg *LinkedServiceOData) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceOData. +func (mg *LinkedServiceOData) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceOData. +func (mg *LinkedServiceOData) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceOData. +func (mg *LinkedServiceOData) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceOData. +func (mg *LinkedServiceOData) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceOData. +func (mg *LinkedServiceOData) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceOData. +func (mg *LinkedServiceOData) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceOData. +func (mg *LinkedServiceOData) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceOData. +func (mg *LinkedServiceOData) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceOData. +func (mg *LinkedServiceOData) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TriggerSchedule. +func (mg *TriggerSchedule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TriggerSchedule. +func (mg *TriggerSchedule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TriggerSchedule. +func (mg *TriggerSchedule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TriggerSchedule. +func (mg *TriggerSchedule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TriggerSchedule. +func (mg *TriggerSchedule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TriggerSchedule. +func (mg *TriggerSchedule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TriggerSchedule. +func (mg *TriggerSchedule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TriggerSchedule. +func (mg *TriggerSchedule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TriggerSchedule. +func (mg *TriggerSchedule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TriggerSchedule. +func (mg *TriggerSchedule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TriggerSchedule. +func (mg *TriggerSchedule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TriggerSchedule. +func (mg *TriggerSchedule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/datafactory/v1beta2/zz_generated.managedlist.go b/apis/datafactory/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..85685403e --- /dev/null +++ b/apis/datafactory/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,197 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CustomDataSetList. +func (l *CustomDataSetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DataFlowList. +func (l *DataFlowList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DataSetBinaryList. +func (l *DataSetBinaryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DataSetDelimitedTextList. +func (l *DataSetDelimitedTextList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DataSetJSONList. +func (l *DataSetJSONList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DataSetParquetList. +func (l *DataSetParquetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FactoryList. +func (l *FactoryList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IntegrationRuntimeAzureSSISList. +func (l *IntegrationRuntimeAzureSSISList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IntegrationRuntimeManagedList. +func (l *IntegrationRuntimeManagedList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedCustomServiceList. +func (l *LinkedCustomServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceAzureBlobStorageList. +func (l *LinkedServiceAzureBlobStorageList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceAzureDatabricksList. +func (l *LinkedServiceAzureDatabricksList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceAzureFileStorageList. +func (l *LinkedServiceAzureFileStorageList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceAzureFunctionList. +func (l *LinkedServiceAzureFunctionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceAzureSQLDatabaseList. +func (l *LinkedServiceAzureSQLDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceODataList. +func (l *LinkedServiceODataList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceOdbcList. +func (l *LinkedServiceOdbcList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceSQLServerList. +func (l *LinkedServiceSQLServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceSnowflakeList. +func (l *LinkedServiceSnowflakeList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinkedServiceSynapseList. +func (l *LinkedServiceSynapseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TriggerScheduleList. +func (l *TriggerScheduleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/datafactory/v1beta2/zz_generated.resolvers.go b/apis/datafactory/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..caff5bcd3 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1418 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *CustomDataSet) ResolveReferences( // ResolveReferences of this CustomDataSet. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.LinkedService != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "LinkedCustomService", "LinkedCustomServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LinkedService.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LinkedService.NameRef, + Selector: mg.Spec.ForProvider.LinkedService.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedService.Name") + } + mg.Spec.ForProvider.LinkedService.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LinkedService.NameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.LinkedService != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "LinkedCustomService", "LinkedCustomServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LinkedService.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LinkedService.NameRef, + Selector: mg.Spec.InitProvider.LinkedService.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedService.Name") + } + mg.Spec.InitProvider.LinkedService.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LinkedService.NameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this DataFlow. +func (mg *DataFlow) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Sink); i3++ { + if mg.Spec.ForProvider.Sink[i3].DataSet != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "DataSetJSON", "DataSetJSONList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Sink[i3].DataSet.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Sink[i3].DataSet.NameRef, + Selector: mg.Spec.ForProvider.Sink[i3].DataSet.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Sink[i3].DataSet.Name") + } + mg.Spec.ForProvider.Sink[i3].DataSet.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Sink[i3].DataSet.NameRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.Source); i3++ { + if mg.Spec.ForProvider.Source[i3].DataSet != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "DataSetJSON", "DataSetJSONList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Source[i3].DataSet.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Source[i3].DataSet.NameRef, + Selector: mg.Spec.ForProvider.Source[i3].DataSet.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Source[i3].DataSet.Name") + } + mg.Spec.ForProvider.Source[i3].DataSet.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Source[i3].DataSet.NameRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Sink); i3++ { + if mg.Spec.InitProvider.Sink[i3].DataSet != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "DataSetJSON", "DataSetJSONList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Sink[i3].DataSet.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Sink[i3].DataSet.NameRef, + Selector: mg.Spec.InitProvider.Sink[i3].DataSet.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Sink[i3].DataSet.Name") + } + mg.Spec.InitProvider.Sink[i3].DataSet.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Sink[i3].DataSet.NameRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.Source); i3++ { + if mg.Spec.InitProvider.Source[i3].DataSet != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "DataSetJSON", "DataSetJSONList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Source[i3].DataSet.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Source[i3].DataSet.NameRef, + Selector: mg.Spec.InitProvider.Source[i3].DataSet.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Source[i3].DataSet.Name") + } + mg.Spec.InitProvider.Source[i3].DataSet.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Source[i3].DataSet.NameRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this DataSetBinary. +func (mg *DataSetBinary) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceSFTP", "LinkedServiceSFTPList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedServiceName") + } + mg.Spec.ForProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LinkedServiceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceSFTP", "LinkedServiceSFTPList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedServiceName") + } + mg.Spec.InitProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LinkedServiceNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DataSetDelimitedText. +func (mg *DataSetDelimitedText) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceWeb", "LinkedServiceWebList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedServiceName") + } + mg.Spec.ForProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LinkedServiceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceWeb", "LinkedServiceWebList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedServiceName") + } + mg.Spec.InitProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LinkedServiceNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DataSetJSON. +func (mg *DataSetJSON) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceWeb", "LinkedServiceWebList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedServiceName") + } + mg.Spec.ForProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LinkedServiceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceWeb", "LinkedServiceWebList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedServiceName") + } + mg.Spec.InitProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LinkedServiceNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DataSetParquet. +func (mg *DataSetParquet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceWeb", "LinkedServiceWebList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LinkedServiceName") + } + mg.Spec.ForProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LinkedServiceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceWeb", "LinkedServiceWebList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LinkedServiceName") + } + mg.Spec.InitProvider.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LinkedServiceNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Factory. +func (mg *Factory) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this IntegrationRuntimeAzureSSIS. +func (mg *IntegrationRuntimeAzureSSIS) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.ExpressVnetIntegration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExpressVnetIntegration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ExpressVnetIntegration.SubnetIDRef, + Selector: mg.Spec.ForProvider.ExpressVnetIntegration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExpressVnetIntegration.SubnetID") + } + mg.Spec.ForProvider.ExpressVnetIntegration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExpressVnetIntegration.SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.VnetIntegration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VnetIntegration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VnetIntegration.SubnetIDRef, + Selector: mg.Spec.ForProvider.VnetIntegration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VnetIntegration.SubnetID") + } + mg.Spec.ForProvider.VnetIntegration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VnetIntegration.SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.VnetIntegration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VnetIntegration.SubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VnetIntegration.SubnetNameRef, + Selector: mg.Spec.ForProvider.VnetIntegration.SubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VnetIntegration.SubnetName") + } + mg.Spec.ForProvider.VnetIntegration.SubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VnetIntegration.SubnetNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ExpressVnetIntegration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExpressVnetIntegration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ExpressVnetIntegration.SubnetIDRef, + Selector: mg.Spec.InitProvider.ExpressVnetIntegration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExpressVnetIntegration.SubnetID") + } + mg.Spec.InitProvider.ExpressVnetIntegration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExpressVnetIntegration.SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.VnetIntegration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VnetIntegration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VnetIntegration.SubnetIDRef, + Selector: mg.Spec.InitProvider.VnetIntegration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VnetIntegration.SubnetID") + } + mg.Spec.InitProvider.VnetIntegration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VnetIntegration.SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.VnetIntegration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VnetIntegration.SubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VnetIntegration.SubnetNameRef, + Selector: mg.Spec.InitProvider.VnetIntegration.SubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VnetIntegration.SubnetName") + } + mg.Spec.InitProvider.VnetIntegration.SubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VnetIntegration.SubnetNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this IntegrationRuntimeManaged. +func (mg *IntegrationRuntimeManaged) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.VnetIntegration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VnetIntegration.SubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VnetIntegration.SubnetNameRef, + Selector: mg.Spec.ForProvider.VnetIntegration.SubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VnetIntegration.SubnetName") + } + mg.Spec.ForProvider.VnetIntegration.SubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VnetIntegration.SubnetNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.VnetIntegration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VnetIntegration.SubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.VnetIntegration.SubnetNameRef, + Selector: mg.Spec.InitProvider.VnetIntegration.SubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VnetIntegration.SubnetName") + } + mg.Spec.InitProvider.VnetIntegration.SubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VnetIntegration.SubnetNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this LinkedCustomService. +func (mg *LinkedCustomService) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinkedServiceAzureBlobStorage. +func (mg *LinkedServiceAzureBlobStorage) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.KeyVaultSASToken != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyVaultSASToken.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KeyVaultSASToken.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.KeyVaultSASToken.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyVaultSASToken.LinkedServiceName") + } + mg.Spec.ForProvider.KeyVaultSASToken.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyVaultSASToken.LinkedServiceNameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.ServicePrincipalLinkedKeyVaultKey != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceName") + } + mg.Spec.ForProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.KeyVaultSASToken != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyVaultSASToken.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KeyVaultSASToken.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.KeyVaultSASToken.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyVaultSASToken.LinkedServiceName") + } + mg.Spec.InitProvider.KeyVaultSASToken.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyVaultSASToken.LinkedServiceNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ServicePrincipalLinkedKeyVaultKey != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceName") + } + mg.Spec.InitProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServicePrincipalLinkedKeyVaultKey.LinkedServiceNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this LinkedServiceAzureDatabricks. +func (mg *LinkedServiceAzureDatabricks) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("databricks.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MsiWorkSpaceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MsiWorkSpaceResourceIDRef, + Selector: mg.Spec.ForProvider.MsiWorkSpaceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MsiWorkSpaceResourceID") + } + mg.Spec.ForProvider.MsiWorkSpaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MsiWorkSpaceResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("databricks.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MsiWorkSpaceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MsiWorkSpaceResourceIDRef, + Selector: mg.Spec.InitProvider.MsiWorkSpaceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MsiWorkSpaceResourceID") + } + mg.Spec.InitProvider.MsiWorkSpaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MsiWorkSpaceResourceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinkedServiceAzureFileStorage. +func (mg *LinkedServiceAzureFileStorage) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinkedServiceAzureFunction. +func (mg *LinkedServiceAzureFunction) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinkedServiceAzureSQLDatabase. +func (mg *LinkedServiceAzureSQLDatabase) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinkedServiceOData. +func (mg *LinkedServiceOData) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinkedServiceOdbc. +func (mg *LinkedServiceOdbc) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinkedServiceSQLServer. +func (mg *LinkedServiceSQLServer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.KeyVaultPassword != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName") + } + mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.KeyVaultPassword != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName") + } + mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this LinkedServiceSnowflake. +func (mg *LinkedServiceSnowflake) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.KeyVaultPassword != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName") + } + mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.KeyVaultPassword != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName") + } + mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this LinkedServiceSynapse. +func (mg *LinkedServiceSynapse) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.KeyVaultPassword != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameRef, + Selector: mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName") + } + mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyVaultPassword.LinkedServiceNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.KeyVaultPassword != nil { + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "LinkedServiceKeyVault", "LinkedServiceKeyVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameRef, + Selector: mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName") + } + mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyVaultPassword.LinkedServiceNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this TriggerSchedule. +func (mg *TriggerSchedule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta2", "Factory", "FactoryList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataFactoryID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataFactoryIDRef, + Selector: mg.Spec.ForProvider.DataFactoryIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataFactoryID") + } + mg.Spec.ForProvider.DataFactoryID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataFactoryIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Pipeline", "PipelineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PipelineName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.PipelineNameRef, + Selector: mg.Spec.ForProvider.PipelineNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PipelineName") + } + mg.Spec.ForProvider.PipelineName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PipelineNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datafactory.azure.upbound.io", "v1beta1", "Pipeline", "PipelineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PipelineName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.PipelineNameRef, + Selector: mg.Spec.InitProvider.PipelineNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PipelineName") + } + mg.Spec.InitProvider.PipelineName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PipelineNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/datafactory/v1beta2/zz_groupversion_info.go b/apis/datafactory/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..14d8b1fc5 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=datafactory.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "datafactory.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/datafactory/v1beta2/zz_integrationruntimeazuressis_terraformed.go b/apis/datafactory/v1beta2/zz_integrationruntimeazuressis_terraformed.go new file mode 100755 index 000000000..0deef375e --- /dev/null +++ b/apis/datafactory/v1beta2/zz_integrationruntimeazuressis_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IntegrationRuntimeAzureSSIS +func (mg *IntegrationRuntimeAzureSSIS) GetTerraformResourceType() string { + return "azurerm_data_factory_integration_runtime_azure_ssis" +} + +// GetConnectionDetailsMapping for this IntegrationRuntimeAzureSSIS +func (tr *IntegrationRuntimeAzureSSIS) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"catalog_info[*].administrator_password": "spec.forProvider.catalogInfo[*].administratorPasswordSecretRef", "custom_setup_script[*].sas_token": "spec.forProvider.customSetupScript[*].sasTokenSecretRef", "express_custom_setup[*].command_key[*].password": "spec.forProvider.expressCustomSetup[*].commandKey[*].passwordSecretRef", "express_custom_setup[*].component[*].license": "spec.forProvider.expressCustomSetup[*].component[*].licenseSecretRef"} +} + +// GetObservation of this IntegrationRuntimeAzureSSIS +func (tr *IntegrationRuntimeAzureSSIS) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IntegrationRuntimeAzureSSIS +func (tr *IntegrationRuntimeAzureSSIS) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IntegrationRuntimeAzureSSIS +func (tr *IntegrationRuntimeAzureSSIS) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IntegrationRuntimeAzureSSIS +func (tr *IntegrationRuntimeAzureSSIS) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IntegrationRuntimeAzureSSIS +func (tr *IntegrationRuntimeAzureSSIS) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IntegrationRuntimeAzureSSIS +func (tr *IntegrationRuntimeAzureSSIS) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IntegrationRuntimeAzureSSIS +func (tr *IntegrationRuntimeAzureSSIS) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IntegrationRuntimeAzureSSIS using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IntegrationRuntimeAzureSSIS) LateInitialize(attrs []byte) (bool, error) { + params := &IntegrationRuntimeAzureSSISParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IntegrationRuntimeAzureSSIS) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_integrationruntimeazuressis_types.go b/apis/datafactory/v1beta2/zz_integrationruntimeazuressis_types.go new file mode 100755 index 000000000..02a8cfd74 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_integrationruntimeazuressis_types.go @@ -0,0 +1,767 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CatalogInfoInitParameters struct { + + // Administrator login name for the SQL Server. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The dual standby Azure-SSIS Integration Runtime pair with SSISDB failover. + DualStandbyPairName *string `json:"dualStandbyPairName,omitempty" tf:"dual_standby_pair_name,omitempty"` + + // The name of SQL elastic pool where the database will be created for the SSIS catalog. Mutually exclusive with pricing_tier. + ElasticPoolName *string `json:"elasticPoolName,omitempty" tf:"elastic_pool_name,omitempty"` + + // Pricing tier for the database that will be created for the SSIS catalog. Valid values are: Basic, S0, S1, S2, S3, S4, S6, S7, S9, S12, P1, P2, P4, P6, P11, P15, GP_S_Gen5_1, GP_S_Gen5_2, GP_S_Gen5_4, GP_S_Gen5_6, GP_S_Gen5_8, GP_S_Gen5_10, GP_S_Gen5_12, GP_S_Gen5_14, GP_S_Gen5_16, GP_S_Gen5_18, GP_S_Gen5_20, GP_S_Gen5_24, GP_S_Gen5_32, GP_S_Gen5_40, GP_Gen5_2, GP_Gen5_4, GP_Gen5_6, GP_Gen5_8, GP_Gen5_10, GP_Gen5_12, GP_Gen5_14, GP_Gen5_16, GP_Gen5_18, GP_Gen5_20, GP_Gen5_24, GP_Gen5_32, GP_Gen5_40, GP_Gen5_80, BC_Gen5_2, BC_Gen5_4, BC_Gen5_6, BC_Gen5_8, BC_Gen5_10, BC_Gen5_12, BC_Gen5_14, BC_Gen5_16, BC_Gen5_18, BC_Gen5_20, BC_Gen5_24, BC_Gen5_32, BC_Gen5_40, BC_Gen5_80, HS_Gen5_2, HS_Gen5_4, HS_Gen5_6, HS_Gen5_8, HS_Gen5_10, HS_Gen5_12, HS_Gen5_14, HS_Gen5_16, HS_Gen5_18, HS_Gen5_20, HS_Gen5_24, HS_Gen5_32, HS_Gen5_40 and HS_Gen5_80. Mutually exclusive with elastic_pool_name. + PricingTier *string `json:"pricingTier,omitempty" tf:"pricing_tier,omitempty"` + + // The endpoint of an Azure SQL Server that will be used to host the SSIS catalog. + ServerEndpoint *string `json:"serverEndpoint,omitempty" tf:"server_endpoint,omitempty"` +} + +type CatalogInfoObservation struct { + + // Administrator login name for the SQL Server. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The dual standby Azure-SSIS Integration Runtime pair with SSISDB failover. + DualStandbyPairName *string `json:"dualStandbyPairName,omitempty" tf:"dual_standby_pair_name,omitempty"` + + // The name of SQL elastic pool where the database will be created for the SSIS catalog. Mutually exclusive with pricing_tier. + ElasticPoolName *string `json:"elasticPoolName,omitempty" tf:"elastic_pool_name,omitempty"` + + // Pricing tier for the database that will be created for the SSIS catalog. Valid values are: Basic, S0, S1, S2, S3, S4, S6, S7, S9, S12, P1, P2, P4, P6, P11, P15, GP_S_Gen5_1, GP_S_Gen5_2, GP_S_Gen5_4, GP_S_Gen5_6, GP_S_Gen5_8, GP_S_Gen5_10, GP_S_Gen5_12, GP_S_Gen5_14, GP_S_Gen5_16, GP_S_Gen5_18, GP_S_Gen5_20, GP_S_Gen5_24, GP_S_Gen5_32, GP_S_Gen5_40, GP_Gen5_2, GP_Gen5_4, GP_Gen5_6, GP_Gen5_8, GP_Gen5_10, GP_Gen5_12, GP_Gen5_14, GP_Gen5_16, GP_Gen5_18, GP_Gen5_20, GP_Gen5_24, GP_Gen5_32, GP_Gen5_40, GP_Gen5_80, BC_Gen5_2, BC_Gen5_4, BC_Gen5_6, BC_Gen5_8, BC_Gen5_10, BC_Gen5_12, BC_Gen5_14, BC_Gen5_16, BC_Gen5_18, BC_Gen5_20, BC_Gen5_24, BC_Gen5_32, BC_Gen5_40, BC_Gen5_80, HS_Gen5_2, HS_Gen5_4, HS_Gen5_6, HS_Gen5_8, HS_Gen5_10, HS_Gen5_12, HS_Gen5_14, HS_Gen5_16, HS_Gen5_18, HS_Gen5_20, HS_Gen5_24, HS_Gen5_32, HS_Gen5_40 and HS_Gen5_80. Mutually exclusive with elastic_pool_name. + PricingTier *string `json:"pricingTier,omitempty" tf:"pricing_tier,omitempty"` + + // The endpoint of an Azure SQL Server that will be used to host the SSIS catalog. + ServerEndpoint *string `json:"serverEndpoint,omitempty" tf:"server_endpoint,omitempty"` +} + +type CatalogInfoParameters struct { + + // Administrator login name for the SQL Server. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Administrator login password for the SQL Server. + // +kubebuilder:validation:Optional + AdministratorPasswordSecretRef *v1.SecretKeySelector `json:"administratorPasswordSecretRef,omitempty" tf:"-"` + + // The dual standby Azure-SSIS Integration Runtime pair with SSISDB failover. + // +kubebuilder:validation:Optional + DualStandbyPairName *string `json:"dualStandbyPairName,omitempty" tf:"dual_standby_pair_name,omitempty"` + + // The name of SQL elastic pool where the database will be created for the SSIS catalog. Mutually exclusive with pricing_tier. + // +kubebuilder:validation:Optional + ElasticPoolName *string `json:"elasticPoolName,omitempty" tf:"elastic_pool_name,omitempty"` + + // Pricing tier for the database that will be created for the SSIS catalog. Valid values are: Basic, S0, S1, S2, S3, S4, S6, S7, S9, S12, P1, P2, P4, P6, P11, P15, GP_S_Gen5_1, GP_S_Gen5_2, GP_S_Gen5_4, GP_S_Gen5_6, GP_S_Gen5_8, GP_S_Gen5_10, GP_S_Gen5_12, GP_S_Gen5_14, GP_S_Gen5_16, GP_S_Gen5_18, GP_S_Gen5_20, GP_S_Gen5_24, GP_S_Gen5_32, GP_S_Gen5_40, GP_Gen5_2, GP_Gen5_4, GP_Gen5_6, GP_Gen5_8, GP_Gen5_10, GP_Gen5_12, GP_Gen5_14, GP_Gen5_16, GP_Gen5_18, GP_Gen5_20, GP_Gen5_24, GP_Gen5_32, GP_Gen5_40, GP_Gen5_80, BC_Gen5_2, BC_Gen5_4, BC_Gen5_6, BC_Gen5_8, BC_Gen5_10, BC_Gen5_12, BC_Gen5_14, BC_Gen5_16, BC_Gen5_18, BC_Gen5_20, BC_Gen5_24, BC_Gen5_32, BC_Gen5_40, BC_Gen5_80, HS_Gen5_2, HS_Gen5_4, HS_Gen5_6, HS_Gen5_8, HS_Gen5_10, HS_Gen5_12, HS_Gen5_14, HS_Gen5_16, HS_Gen5_18, HS_Gen5_20, HS_Gen5_24, HS_Gen5_32, HS_Gen5_40 and HS_Gen5_80. Mutually exclusive with elastic_pool_name. + // +kubebuilder:validation:Optional + PricingTier *string `json:"pricingTier,omitempty" tf:"pricing_tier,omitempty"` + + // The endpoint of an Azure SQL Server that will be used to host the SSIS catalog. + // +kubebuilder:validation:Optional + ServerEndpoint *string `json:"serverEndpoint" tf:"server_endpoint,omitempty"` +} + +type CommandKeyInitParameters struct { + + // A key_vault_secret_reference block as defined below. + KeyVaultPassword *KeyVaultPasswordInitParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // The target computer or domain name. + TargetName *string `json:"targetName,omitempty" tf:"target_name,omitempty"` + + // The username for the target device. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` +} + +type CommandKeyObservation struct { + + // A key_vault_secret_reference block as defined below. + KeyVaultPassword *KeyVaultPasswordObservation `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // The target computer or domain name. + TargetName *string `json:"targetName,omitempty" tf:"target_name,omitempty"` + + // The username for the target device. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` +} + +type CommandKeyParameters struct { + + // A key_vault_secret_reference block as defined below. + // +kubebuilder:validation:Optional + KeyVaultPassword *KeyVaultPasswordParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // The password for the target device. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // The target computer or domain name. + // +kubebuilder:validation:Optional + TargetName *string `json:"targetName" tf:"target_name,omitempty"` + + // The username for the target device. + // +kubebuilder:validation:Optional + UserName *string `json:"userName" tf:"user_name,omitempty"` +} + +type ComponentInitParameters struct { + + // A key_vault_secret_reference block as defined below. + KeyVaultLicense *KeyVaultLicenseInitParameters `json:"keyVaultLicense,omitempty" tf:"key_vault_license,omitempty"` + + // Name of the package store. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ComponentObservation struct { + + // A key_vault_secret_reference block as defined below. + KeyVaultLicense *KeyVaultLicenseObservation `json:"keyVaultLicense,omitempty" tf:"key_vault_license,omitempty"` + + // Name of the package store. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ComponentParameters struct { + + // A key_vault_secret_reference block as defined below. + // +kubebuilder:validation:Optional + KeyVaultLicense *KeyVaultLicenseParameters `json:"keyVaultLicense,omitempty" tf:"key_vault_license,omitempty"` + + // The license used for the Component. + // +kubebuilder:validation:Optional + LicenseSecretRef *v1.SecretKeySelector `json:"licenseSecretRef,omitempty" tf:"-"` + + // Name of the package store. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type CustomSetupScriptInitParameters struct { + + // The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup for more information. + BlobContainerURI *string `json:"blobContainerUri,omitempty" tf:"blob_container_uri,omitempty"` +} + +type CustomSetupScriptObservation struct { + + // The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup for more information. + BlobContainerURI *string `json:"blobContainerUri,omitempty" tf:"blob_container_uri,omitempty"` +} + +type CustomSetupScriptParameters struct { + + // The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup for more information. + // +kubebuilder:validation:Optional + BlobContainerURI *string `json:"blobContainerUri" tf:"blob_container_uri,omitempty"` + + // A container SAS token that gives access to the files. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup for more information. + // +kubebuilder:validation:Required + SASTokenSecretRef v1.SecretKeySelector `json:"sasTokenSecretRef" tf:"-"` +} + +type ExpressCustomSetupInitParameters struct { + + // One or more command_key blocks as defined below. + CommandKey []CommandKeyInitParameters `json:"commandKey,omitempty" tf:"command_key,omitempty"` + + // One or more component blocks as defined below. + Component []ComponentInitParameters `json:"component,omitempty" tf:"component,omitempty"` + + // The Environment Variables for the Azure-SSIS Integration Runtime. + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The version of Azure Powershell installed for the Azure-SSIS Integration Runtime. + PowershellVersion *string `json:"powershellVersion,omitempty" tf:"powershell_version,omitempty"` +} + +type ExpressCustomSetupObservation struct { + + // One or more command_key blocks as defined below. + CommandKey []CommandKeyObservation `json:"commandKey,omitempty" tf:"command_key,omitempty"` + + // One or more component blocks as defined below. + Component []ComponentObservation `json:"component,omitempty" tf:"component,omitempty"` + + // The Environment Variables for the Azure-SSIS Integration Runtime. + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The version of Azure Powershell installed for the Azure-SSIS Integration Runtime. + PowershellVersion *string `json:"powershellVersion,omitempty" tf:"powershell_version,omitempty"` +} + +type ExpressCustomSetupParameters struct { + + // One or more command_key blocks as defined below. + // +kubebuilder:validation:Optional + CommandKey []CommandKeyParameters `json:"commandKey,omitempty" tf:"command_key,omitempty"` + + // One or more component blocks as defined below. + // +kubebuilder:validation:Optional + Component []ComponentParameters `json:"component,omitempty" tf:"component,omitempty"` + + // The Environment Variables for the Azure-SSIS Integration Runtime. + // +kubebuilder:validation:Optional + // +mapType=granular + Environment map[string]*string `json:"environment,omitempty" tf:"environment,omitempty"` + + // The version of Azure Powershell installed for the Azure-SSIS Integration Runtime. + // +kubebuilder:validation:Optional + PowershellVersion *string `json:"powershellVersion,omitempty" tf:"powershell_version,omitempty"` +} + +type ExpressVnetIntegrationInitParameters struct { + + // id of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type ExpressVnetIntegrationObservation struct { + + // id of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type ExpressVnetIntegrationParameters struct { + + // id of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type IntegrationRuntimeAzureSSISInitParameters struct { + + // A catalog_info block as defined below. + CatalogInfo *CatalogInfoInitParameters `json:"catalogInfo,omitempty" tf:"catalog_info,omitempty"` + + // The name of a Data Factory Credential that the SSIS integration will use to access data sources. For example, azurerm_data_factory_credential_user_managed_identity + CredentialName *string `json:"credentialName,omitempty" tf:"credential_name,omitempty"` + + // A custom_setup_script block as defined below. + CustomSetupScript *CustomSetupScriptInitParameters `json:"customSetupScript,omitempty" tf:"custom_setup_script,omitempty"` + + // Integration runtime description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Azure-SSIS Integration Runtime edition. Valid values are Standard and Enterprise. Defaults to Standard. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // An express_custom_setup block as defined below. + ExpressCustomSetup *ExpressCustomSetupInitParameters `json:"expressCustomSetup,omitempty" tf:"express_custom_setup,omitempty"` + + // A express_vnet_integration block as defined below. + ExpressVnetIntegration *ExpressVnetIntegrationInitParameters `json:"expressVnetIntegration,omitempty" tf:"express_vnet_integration,omitempty"` + + // The type of the license that is used. Valid values are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Defines the maximum parallel executions per node. Defaults to 1. Max is 1. + MaxParallelExecutionsPerNode *float64 `json:"maxParallelExecutionsPerNode,omitempty" tf:"max_parallel_executions_per_node,omitempty"` + + // The size of the nodes on which the Azure-SSIS Integration Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2 + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // Number of nodes for the Azure-SSIS Integration Runtime. Max is 10. Defaults to 1. + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // One or more package_store block as defined below. + PackageStore []PackageStoreInitParameters `json:"packageStore,omitempty" tf:"package_store,omitempty"` + + // A proxy block as defined below. + Proxy *ProxyInitParameters `json:"proxy,omitempty" tf:"proxy,omitempty"` + + // A vnet_integration block as defined below. + VnetIntegration *VnetIntegrationInitParameters `json:"vnetIntegration,omitempty" tf:"vnet_integration,omitempty"` +} + +type IntegrationRuntimeAzureSSISObservation struct { + + // A catalog_info block as defined below. + CatalogInfo *CatalogInfoObservation `json:"catalogInfo,omitempty" tf:"catalog_info,omitempty"` + + // The name of a Data Factory Credential that the SSIS integration will use to access data sources. For example, azurerm_data_factory_credential_user_managed_identity + CredentialName *string `json:"credentialName,omitempty" tf:"credential_name,omitempty"` + + // A custom_setup_script block as defined below. + CustomSetupScript *CustomSetupScriptObservation `json:"customSetupScript,omitempty" tf:"custom_setup_script,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Integration runtime description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Azure-SSIS Integration Runtime edition. Valid values are Standard and Enterprise. Defaults to Standard. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // An express_custom_setup block as defined below. + ExpressCustomSetup *ExpressCustomSetupObservation `json:"expressCustomSetup,omitempty" tf:"express_custom_setup,omitempty"` + + // A express_vnet_integration block as defined below. + ExpressVnetIntegration *ExpressVnetIntegrationObservation `json:"expressVnetIntegration,omitempty" tf:"express_vnet_integration,omitempty"` + + // The ID of the Data Factory Azure-SSIS Integration Runtime. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The type of the license that is used. Valid values are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Defines the maximum parallel executions per node. Defaults to 1. Max is 1. + MaxParallelExecutionsPerNode *float64 `json:"maxParallelExecutionsPerNode,omitempty" tf:"max_parallel_executions_per_node,omitempty"` + + // The size of the nodes on which the Azure-SSIS Integration Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2 + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // Number of nodes for the Azure-SSIS Integration Runtime. Max is 10. Defaults to 1. + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // One or more package_store block as defined below. + PackageStore []PackageStoreObservation `json:"packageStore,omitempty" tf:"package_store,omitempty"` + + // A proxy block as defined below. + Proxy *ProxyObservation `json:"proxy,omitempty" tf:"proxy,omitempty"` + + // A vnet_integration block as defined below. + VnetIntegration *VnetIntegrationObservation `json:"vnetIntegration,omitempty" tf:"vnet_integration,omitempty"` +} + +type IntegrationRuntimeAzureSSISParameters struct { + + // A catalog_info block as defined below. + // +kubebuilder:validation:Optional + CatalogInfo *CatalogInfoParameters `json:"catalogInfo,omitempty" tf:"catalog_info,omitempty"` + + // The name of a Data Factory Credential that the SSIS integration will use to access data sources. For example, azurerm_data_factory_credential_user_managed_identity + // +kubebuilder:validation:Optional + CredentialName *string `json:"credentialName,omitempty" tf:"credential_name,omitempty"` + + // A custom_setup_script block as defined below. + // +kubebuilder:validation:Optional + CustomSetupScript *CustomSetupScriptParameters `json:"customSetupScript,omitempty" tf:"custom_setup_script,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // Integration runtime description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Azure-SSIS Integration Runtime edition. Valid values are Standard and Enterprise. Defaults to Standard. + // +kubebuilder:validation:Optional + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // An express_custom_setup block as defined below. + // +kubebuilder:validation:Optional + ExpressCustomSetup *ExpressCustomSetupParameters `json:"expressCustomSetup,omitempty" tf:"express_custom_setup,omitempty"` + + // A express_vnet_integration block as defined below. + // +kubebuilder:validation:Optional + ExpressVnetIntegration *ExpressVnetIntegrationParameters `json:"expressVnetIntegration,omitempty" tf:"express_vnet_integration,omitempty"` + + // The type of the license that is used. Valid values are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Defines the maximum parallel executions per node. Defaults to 1. Max is 1. + // +kubebuilder:validation:Optional + MaxParallelExecutionsPerNode *float64 `json:"maxParallelExecutionsPerNode,omitempty" tf:"max_parallel_executions_per_node,omitempty"` + + // The size of the nodes on which the Azure-SSIS Integration Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2 + // +kubebuilder:validation:Optional + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // Number of nodes for the Azure-SSIS Integration Runtime. Max is 10. Defaults to 1. + // +kubebuilder:validation:Optional + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // One or more package_store block as defined below. + // +kubebuilder:validation:Optional + PackageStore []PackageStoreParameters `json:"packageStore,omitempty" tf:"package_store,omitempty"` + + // A proxy block as defined below. + // +kubebuilder:validation:Optional + Proxy *ProxyParameters `json:"proxy,omitempty" tf:"proxy,omitempty"` + + // A vnet_integration block as defined below. + // +kubebuilder:validation:Optional + VnetIntegration *VnetIntegrationParameters `json:"vnetIntegration,omitempty" tf:"vnet_integration,omitempty"` +} + +type KeyVaultLicenseInitParameters struct { + + // Name of the Linked Service to associate with the packages. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // A map of parameters to associate with the Key Vault Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the secret name in Azure Key Vault. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // Specifies the secret version in Azure Key Vault. + SecretVersion *string `json:"secretVersion,omitempty" tf:"secret_version,omitempty"` +} + +type KeyVaultLicenseObservation struct { + + // Name of the Linked Service to associate with the packages. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // A map of parameters to associate with the Key Vault Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the secret name in Azure Key Vault. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // Specifies the secret version in Azure Key Vault. + SecretVersion *string `json:"secretVersion,omitempty" tf:"secret_version,omitempty"` +} + +type KeyVaultLicenseParameters struct { + + // Name of the Linked Service to associate with the packages. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // A map of parameters to associate with the Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the secret name in Azure Key Vault. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` + + // Specifies the secret version in Azure Key Vault. + // +kubebuilder:validation:Optional + SecretVersion *string `json:"secretVersion,omitempty" tf:"secret_version,omitempty"` +} + +type KeyVaultPasswordInitParameters struct { + + // Name of the Linked Service to associate with the packages. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // A map of parameters to associate with the Key Vault Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the secret name in Azure Key Vault. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // Specifies the secret version in Azure Key Vault. + SecretVersion *string `json:"secretVersion,omitempty" tf:"secret_version,omitempty"` +} + +type KeyVaultPasswordObservation struct { + + // Name of the Linked Service to associate with the packages. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // A map of parameters to associate with the Key Vault Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the secret name in Azure Key Vault. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` + + // Specifies the secret version in Azure Key Vault. + SecretVersion *string `json:"secretVersion,omitempty" tf:"secret_version,omitempty"` +} + +type KeyVaultPasswordParameters struct { + + // Name of the Linked Service to associate with the packages. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // A map of parameters to associate with the Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Specifies the secret name in Azure Key Vault. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` + + // Specifies the secret version in Azure Key Vault. + // +kubebuilder:validation:Optional + SecretVersion *string `json:"secretVersion,omitempty" tf:"secret_version,omitempty"` +} + +type PackageStoreInitParameters struct { + + // Name of the Linked Service to associate with the packages. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Name of the package store. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PackageStoreObservation struct { + + // Name of the Linked Service to associate with the packages. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Name of the package store. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PackageStoreParameters struct { + + // Name of the Linked Service to associate with the packages. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // Name of the package store. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type ProxyInitParameters struct { + + // The path in the data store to be used when moving data between Self-Hosted and Azure-SSIS Integration Runtimes. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Name of Self Hosted Integration Runtime as a proxy. + SelfHostedIntegrationRuntimeName *string `json:"selfHostedIntegrationRuntimeName,omitempty" tf:"self_hosted_integration_runtime_name,omitempty"` + + // Name of Azure Blob Storage linked service to reference the staging data store to be used when moving data between self-hosted and Azure-SSIS integration runtimes. + StagingStorageLinkedServiceName *string `json:"stagingStorageLinkedServiceName,omitempty" tf:"staging_storage_linked_service_name,omitempty"` +} + +type ProxyObservation struct { + + // The path in the data store to be used when moving data between Self-Hosted and Azure-SSIS Integration Runtimes. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Name of Self Hosted Integration Runtime as a proxy. + SelfHostedIntegrationRuntimeName *string `json:"selfHostedIntegrationRuntimeName,omitempty" tf:"self_hosted_integration_runtime_name,omitempty"` + + // Name of Azure Blob Storage linked service to reference the staging data store to be used when moving data between self-hosted and Azure-SSIS integration runtimes. + StagingStorageLinkedServiceName *string `json:"stagingStorageLinkedServiceName,omitempty" tf:"staging_storage_linked_service_name,omitempty"` +} + +type ProxyParameters struct { + + // The path in the data store to be used when moving data between Self-Hosted and Azure-SSIS Integration Runtimes. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Name of Self Hosted Integration Runtime as a proxy. + // +kubebuilder:validation:Optional + SelfHostedIntegrationRuntimeName *string `json:"selfHostedIntegrationRuntimeName" tf:"self_hosted_integration_runtime_name,omitempty"` + + // Name of Azure Blob Storage linked service to reference the staging data store to be used when moving data between self-hosted and Azure-SSIS integration runtimes. + // +kubebuilder:validation:Optional + StagingStorageLinkedServiceName *string `json:"stagingStorageLinkedServiceName" tf:"staging_storage_linked_service_name,omitempty"` +} + +type VnetIntegrationInitParameters struct { + + // Static public IP addresses for the Azure-SSIS Integration Runtime. The size must be 2. + PublicIps []*string `json:"publicIps,omitempty" tf:"public_ips,omitempty"` + + // id of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Name of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` + + // Reference to a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameRef *v1.Reference `json:"subnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameSelector *v1.Selector `json:"subnetNameSelector,omitempty" tf:"-"` + + // ID of the virtual network to which the nodes of the Azure-SSIS Integration Runtime will be added. + VnetID *string `json:"vnetId,omitempty" tf:"vnet_id,omitempty"` +} + +type VnetIntegrationObservation struct { + + // Static public IP addresses for the Azure-SSIS Integration Runtime. The size must be 2. + PublicIps []*string `json:"publicIps,omitempty" tf:"public_ips,omitempty"` + + // id of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Name of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` + + // ID of the virtual network to which the nodes of the Azure-SSIS Integration Runtime will be added. + VnetID *string `json:"vnetId,omitempty" tf:"vnet_id,omitempty"` +} + +type VnetIntegrationParameters struct { + + // Static public IP addresses for the Azure-SSIS Integration Runtime. The size must be 2. + // +kubebuilder:validation:Optional + PublicIps []*string `json:"publicIps,omitempty" tf:"public_ips,omitempty"` + + // id of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // Name of the subnet to which the nodes of the Azure-SSIS Integration Runtime will be added. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +kubebuilder:validation:Optional + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` + + // Reference to a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameRef *v1.Reference `json:"subnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameSelector *v1.Selector `json:"subnetNameSelector,omitempty" tf:"-"` + + // ID of the virtual network to which the nodes of the Azure-SSIS Integration Runtime will be added. + // +kubebuilder:validation:Optional + VnetID *string `json:"vnetId,omitempty" tf:"vnet_id,omitempty"` +} + +// IntegrationRuntimeAzureSSISSpec defines the desired state of IntegrationRuntimeAzureSSIS +type IntegrationRuntimeAzureSSISSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IntegrationRuntimeAzureSSISParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IntegrationRuntimeAzureSSISInitParameters `json:"initProvider,omitempty"` +} + +// IntegrationRuntimeAzureSSISStatus defines the observed state of IntegrationRuntimeAzureSSIS. +type IntegrationRuntimeAzureSSISStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IntegrationRuntimeAzureSSISObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IntegrationRuntimeAzureSSIS is the Schema for the IntegrationRuntimeAzureSSISs API. Manages a Data Factory Azure-SSIS Integration Runtime. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure},path=integrationruntimeazuressis +type IntegrationRuntimeAzureSSIS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nodeSize) || (has(self.initProvider) && has(self.initProvider.nodeSize))",message="spec.forProvider.nodeSize is a required parameter" + Spec IntegrationRuntimeAzureSSISSpec `json:"spec"` + Status IntegrationRuntimeAzureSSISStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IntegrationRuntimeAzureSSISList contains a list of IntegrationRuntimeAzureSSISs +type IntegrationRuntimeAzureSSISList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IntegrationRuntimeAzureSSIS `json:"items"` +} + +// Repository type metadata. +var ( + IntegrationRuntimeAzureSSIS_Kind = "IntegrationRuntimeAzureSSIS" + IntegrationRuntimeAzureSSIS_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IntegrationRuntimeAzureSSIS_Kind}.String() + IntegrationRuntimeAzureSSIS_KindAPIVersion = IntegrationRuntimeAzureSSIS_Kind + "." + CRDGroupVersion.String() + IntegrationRuntimeAzureSSIS_GroupVersionKind = CRDGroupVersion.WithKind(IntegrationRuntimeAzureSSIS_Kind) +) + +func init() { + SchemeBuilder.Register(&IntegrationRuntimeAzureSSIS{}, &IntegrationRuntimeAzureSSISList{}) +} diff --git a/apis/datafactory/v1beta2/zz_integrationruntimemanaged_terraformed.go b/apis/datafactory/v1beta2/zz_integrationruntimemanaged_terraformed.go new file mode 100755 index 000000000..073718ccb --- /dev/null +++ b/apis/datafactory/v1beta2/zz_integrationruntimemanaged_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IntegrationRuntimeManaged +func (mg *IntegrationRuntimeManaged) GetTerraformResourceType() string { + return "azurerm_data_factory_integration_runtime_managed" +} + +// GetConnectionDetailsMapping for this IntegrationRuntimeManaged +func (tr *IntegrationRuntimeManaged) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"catalog_info[*].administrator_password": "spec.forProvider.catalogInfo[*].administratorPasswordSecretRef", "custom_setup_script[*].sas_token": "spec.forProvider.customSetupScript[*].sasTokenSecretRef"} +} + +// GetObservation of this IntegrationRuntimeManaged +func (tr *IntegrationRuntimeManaged) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IntegrationRuntimeManaged +func (tr *IntegrationRuntimeManaged) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IntegrationRuntimeManaged +func (tr *IntegrationRuntimeManaged) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IntegrationRuntimeManaged +func (tr *IntegrationRuntimeManaged) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IntegrationRuntimeManaged +func (tr *IntegrationRuntimeManaged) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IntegrationRuntimeManaged +func (tr *IntegrationRuntimeManaged) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IntegrationRuntimeManaged +func (tr *IntegrationRuntimeManaged) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IntegrationRuntimeManaged using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IntegrationRuntimeManaged) LateInitialize(attrs []byte) (bool, error) { + params := &IntegrationRuntimeManagedParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IntegrationRuntimeManaged) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_integrationruntimemanaged_types.go b/apis/datafactory/v1beta2/zz_integrationruntimemanaged_types.go new file mode 100755 index 000000000..e7495a223 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_integrationruntimemanaged_types.go @@ -0,0 +1,328 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IntegrationRuntimeManagedCatalogInfoInitParameters struct { + + // Administrator login name for the SQL Server. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Pricing tier for the database that will be created for the SSIS catalog. Valid values are: Basic, Standard, Premium and PremiumRS. Defaults to Basic. + PricingTier *string `json:"pricingTier,omitempty" tf:"pricing_tier,omitempty"` + + // The endpoint of an Azure SQL Server that will be used to host the SSIS catalog. + ServerEndpoint *string `json:"serverEndpoint,omitempty" tf:"server_endpoint,omitempty"` +} + +type IntegrationRuntimeManagedCatalogInfoObservation struct { + + // Administrator login name for the SQL Server. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Pricing tier for the database that will be created for the SSIS catalog. Valid values are: Basic, Standard, Premium and PremiumRS. Defaults to Basic. + PricingTier *string `json:"pricingTier,omitempty" tf:"pricing_tier,omitempty"` + + // The endpoint of an Azure SQL Server that will be used to host the SSIS catalog. + ServerEndpoint *string `json:"serverEndpoint,omitempty" tf:"server_endpoint,omitempty"` +} + +type IntegrationRuntimeManagedCatalogInfoParameters struct { + + // Administrator login name for the SQL Server. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Administrator login password for the SQL Server. + // +kubebuilder:validation:Optional + AdministratorPasswordSecretRef *v1.SecretKeySelector `json:"administratorPasswordSecretRef,omitempty" tf:"-"` + + // Pricing tier for the database that will be created for the SSIS catalog. Valid values are: Basic, Standard, Premium and PremiumRS. Defaults to Basic. + // +kubebuilder:validation:Optional + PricingTier *string `json:"pricingTier,omitempty" tf:"pricing_tier,omitempty"` + + // The endpoint of an Azure SQL Server that will be used to host the SSIS catalog. + // +kubebuilder:validation:Optional + ServerEndpoint *string `json:"serverEndpoint" tf:"server_endpoint,omitempty"` +} + +type IntegrationRuntimeManagedCustomSetupScriptInitParameters struct { + + // The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup for more information. + BlobContainerURI *string `json:"blobContainerUri,omitempty" tf:"blob_container_uri,omitempty"` +} + +type IntegrationRuntimeManagedCustomSetupScriptObservation struct { + + // The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup for more information. + BlobContainerURI *string `json:"blobContainerUri,omitempty" tf:"blob_container_uri,omitempty"` +} + +type IntegrationRuntimeManagedCustomSetupScriptParameters struct { + + // The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup for more information. + // +kubebuilder:validation:Optional + BlobContainerURI *string `json:"blobContainerUri" tf:"blob_container_uri,omitempty"` + + // A container SAS token that gives access to the files. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup for more information. + // +kubebuilder:validation:Required + SASTokenSecretRef v1.SecretKeySelector `json:"sasTokenSecretRef" tf:"-"` +} + +type IntegrationRuntimeManagedInitParameters struct { + + // A catalog_info block as defined below. + CatalogInfo *IntegrationRuntimeManagedCatalogInfoInitParameters `json:"catalogInfo,omitempty" tf:"catalog_info,omitempty"` + + // The name of the credential to use for the Managed Integration Runtime. + CredentialName *string `json:"credentialName,omitempty" tf:"credential_name,omitempty"` + + // A custom_setup_script block as defined below. + CustomSetupScript *IntegrationRuntimeManagedCustomSetupScriptInitParameters `json:"customSetupScript,omitempty" tf:"custom_setup_script,omitempty"` + + // Integration runtime description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Managed Integration Runtime edition. Valid values are Standard and Enterprise. Defaults to Standard. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // The type of the license that is used. Valid values are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Defines the maximum parallel executions per node. Defaults to 1. Max is 1. + MaxParallelExecutionsPerNode *float64 `json:"maxParallelExecutionsPerNode,omitempty" tf:"max_parallel_executions_per_node,omitempty"` + + // The size of the nodes on which the Managed Integration Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2 + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // Number of nodes for the Managed Integration Runtime. Max is 10. Defaults to 1. + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // A vnet_integration block as defined below. + VnetIntegration *IntegrationRuntimeManagedVnetIntegrationInitParameters `json:"vnetIntegration,omitempty" tf:"vnet_integration,omitempty"` +} + +type IntegrationRuntimeManagedObservation struct { + + // A catalog_info block as defined below. + CatalogInfo *IntegrationRuntimeManagedCatalogInfoObservation `json:"catalogInfo,omitempty" tf:"catalog_info,omitempty"` + + // The name of the credential to use for the Managed Integration Runtime. + CredentialName *string `json:"credentialName,omitempty" tf:"credential_name,omitempty"` + + // A custom_setup_script block as defined below. + CustomSetupScript *IntegrationRuntimeManagedCustomSetupScriptObservation `json:"customSetupScript,omitempty" tf:"custom_setup_script,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Integration runtime description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Managed Integration Runtime edition. Valid values are Standard and Enterprise. Defaults to Standard. + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // The ID of the Data Factory Integration Managed Runtime. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The type of the license that is used. Valid values are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Defines the maximum parallel executions per node. Defaults to 1. Max is 1. + MaxParallelExecutionsPerNode *float64 `json:"maxParallelExecutionsPerNode,omitempty" tf:"max_parallel_executions_per_node,omitempty"` + + // The size of the nodes on which the Managed Integration Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2 + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // Number of nodes for the Managed Integration Runtime. Max is 10. Defaults to 1. + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // A vnet_integration block as defined below. + VnetIntegration *IntegrationRuntimeManagedVnetIntegrationObservation `json:"vnetIntegration,omitempty" tf:"vnet_integration,omitempty"` +} + +type IntegrationRuntimeManagedParameters struct { + + // A catalog_info block as defined below. + // +kubebuilder:validation:Optional + CatalogInfo *IntegrationRuntimeManagedCatalogInfoParameters `json:"catalogInfo,omitempty" tf:"catalog_info,omitempty"` + + // The name of the credential to use for the Managed Integration Runtime. + // +kubebuilder:validation:Optional + CredentialName *string `json:"credentialName,omitempty" tf:"credential_name,omitempty"` + + // A custom_setup_script block as defined below. + // +kubebuilder:validation:Optional + CustomSetupScript *IntegrationRuntimeManagedCustomSetupScriptParameters `json:"customSetupScript,omitempty" tf:"custom_setup_script,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // Integration runtime description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Managed Integration Runtime edition. Valid values are Standard and Enterprise. Defaults to Standard. + // +kubebuilder:validation:Optional + Edition *string `json:"edition,omitempty" tf:"edition,omitempty"` + + // The type of the license that is used. Valid values are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Defines the maximum parallel executions per node. Defaults to 1. Max is 1. + // +kubebuilder:validation:Optional + MaxParallelExecutionsPerNode *float64 `json:"maxParallelExecutionsPerNode,omitempty" tf:"max_parallel_executions_per_node,omitempty"` + + // The size of the nodes on which the Managed Integration Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2 + // +kubebuilder:validation:Optional + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // Number of nodes for the Managed Integration Runtime. Max is 10. Defaults to 1. + // +kubebuilder:validation:Optional + NumberOfNodes *float64 `json:"numberOfNodes,omitempty" tf:"number_of_nodes,omitempty"` + + // A vnet_integration block as defined below. + // +kubebuilder:validation:Optional + VnetIntegration *IntegrationRuntimeManagedVnetIntegrationParameters `json:"vnetIntegration,omitempty" tf:"vnet_integration,omitempty"` +} + +type IntegrationRuntimeManagedVnetIntegrationInitParameters struct { + + // Name of the subnet to which the nodes of the Managed Integration Runtime will be added. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` + + // Reference to a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameRef *v1.Reference `json:"subnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameSelector *v1.Selector `json:"subnetNameSelector,omitempty" tf:"-"` + + // ID of the virtual network to which the nodes of the Managed Integration Runtime will be added. + VnetID *string `json:"vnetId,omitempty" tf:"vnet_id,omitempty"` +} + +type IntegrationRuntimeManagedVnetIntegrationObservation struct { + + // Name of the subnet to which the nodes of the Managed Integration Runtime will be added. + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` + + // ID of the virtual network to which the nodes of the Managed Integration Runtime will be added. + VnetID *string `json:"vnetId,omitempty" tf:"vnet_id,omitempty"` +} + +type IntegrationRuntimeManagedVnetIntegrationParameters struct { + + // Name of the subnet to which the nodes of the Managed Integration Runtime will be added. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +kubebuilder:validation:Optional + SubnetName *string `json:"subnetName,omitempty" tf:"subnet_name,omitempty"` + + // Reference to a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameRef *v1.Reference `json:"subnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetName. + // +kubebuilder:validation:Optional + SubnetNameSelector *v1.Selector `json:"subnetNameSelector,omitempty" tf:"-"` + + // ID of the virtual network to which the nodes of the Managed Integration Runtime will be added. + // +kubebuilder:validation:Optional + VnetID *string `json:"vnetId" tf:"vnet_id,omitempty"` +} + +// IntegrationRuntimeManagedSpec defines the desired state of IntegrationRuntimeManaged +type IntegrationRuntimeManagedSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IntegrationRuntimeManagedParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IntegrationRuntimeManagedInitParameters `json:"initProvider,omitempty"` +} + +// IntegrationRuntimeManagedStatus defines the observed state of IntegrationRuntimeManaged. +type IntegrationRuntimeManagedStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IntegrationRuntimeManagedObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IntegrationRuntimeManaged is the Schema for the IntegrationRuntimeManageds API. Manages an Azure Data Factory Managed Integration Runtime. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type IntegrationRuntimeManaged struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nodeSize) || (has(self.initProvider) && has(self.initProvider.nodeSize))",message="spec.forProvider.nodeSize is a required parameter" + Spec IntegrationRuntimeManagedSpec `json:"spec"` + Status IntegrationRuntimeManagedStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IntegrationRuntimeManagedList contains a list of IntegrationRuntimeManageds +type IntegrationRuntimeManagedList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IntegrationRuntimeManaged `json:"items"` +} + +// Repository type metadata. +var ( + IntegrationRuntimeManaged_Kind = "IntegrationRuntimeManaged" + IntegrationRuntimeManaged_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IntegrationRuntimeManaged_Kind}.String() + IntegrationRuntimeManaged_KindAPIVersion = IntegrationRuntimeManaged_Kind + "." + CRDGroupVersion.String() + IntegrationRuntimeManaged_GroupVersionKind = CRDGroupVersion.WithKind(IntegrationRuntimeManaged_Kind) +) + +func init() { + SchemeBuilder.Register(&IntegrationRuntimeManaged{}, &IntegrationRuntimeManagedList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedcustomservice_terraformed.go b/apis/datafactory/v1beta2/zz_linkedcustomservice_terraformed.go new file mode 100755 index 000000000..ddfe566d2 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedcustomservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedCustomService +func (mg *LinkedCustomService) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_custom_service" +} + +// GetConnectionDetailsMapping for this LinkedCustomService +func (tr *LinkedCustomService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LinkedCustomService +func (tr *LinkedCustomService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedCustomService +func (tr *LinkedCustomService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedCustomService +func (tr *LinkedCustomService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedCustomService +func (tr *LinkedCustomService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedCustomService +func (tr *LinkedCustomService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedCustomService +func (tr *LinkedCustomService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedCustomService +func (tr *LinkedCustomService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedCustomService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedCustomService) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedCustomServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedCustomService) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedcustomservice_types.go b/apis/datafactory/v1beta2/zz_linkedcustomservice_types.go new file mode 100755 index 000000000..88c5865a2 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedcustomservice_types.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IntegrationRuntimeInitParameters struct { + + // The integration runtime reference to associate with the Data Factory Linked Service. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the integration runtime. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type IntegrationRuntimeObservation struct { + + // The integration runtime reference to associate with the Data Factory Linked Service. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the integration runtime. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type IntegrationRuntimeParameters struct { + + // The integration runtime reference to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map of parameters to associate with the integration runtime. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedCustomServiceInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An integration_runtime block as defined below. + IntegrationRuntime *IntegrationRuntimeInitParameters `json:"integrationRuntime,omitempty" tf:"integration_runtime,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of data stores that will be connected to Data Factory. For full list of supported data stores, please refer to Azure Data Factory connector. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Data Factory Linked Service. + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +type LinkedCustomServiceObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An integration_runtime block as defined below. + IntegrationRuntime *IntegrationRuntimeObservation `json:"integrationRuntime,omitempty" tf:"integration_runtime,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of data stores that will be connected to Data Factory. For full list of supported data stores, please refer to Azure Data Factory connector. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Data Factory Linked Service. + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +type LinkedCustomServiceParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An integration_runtime block as defined below. + // +kubebuilder:validation:Optional + IntegrationRuntime *IntegrationRuntimeParameters `json:"integrationRuntime,omitempty" tf:"integration_runtime,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of data stores that will be connected to Data Factory. For full list of supported data stores, please refer to Azure Data Factory connector. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Data Factory Linked Service. + // +kubebuilder:validation:Optional + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +// LinkedCustomServiceSpec defines the desired state of LinkedCustomService +type LinkedCustomServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedCustomServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedCustomServiceInitParameters `json:"initProvider,omitempty"` +} + +// LinkedCustomServiceStatus defines the observed state of LinkedCustomService. +type LinkedCustomServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedCustomServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedCustomService is the Schema for the LinkedCustomServices API. Manages a Linked Service (connection) between a resource and Azure Data Factory. This is a generic resource that supports all different Linked Service Types. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedCustomService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.typePropertiesJson) || (has(self.initProvider) && has(self.initProvider.typePropertiesJson))",message="spec.forProvider.typePropertiesJson is a required parameter" + Spec LinkedCustomServiceSpec `json:"spec"` + Status LinkedCustomServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedCustomServiceList contains a list of LinkedCustomServices +type LinkedCustomServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedCustomService `json:"items"` +} + +// Repository type metadata. +var ( + LinkedCustomService_Kind = "LinkedCustomService" + LinkedCustomService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedCustomService_Kind}.String() + LinkedCustomService_KindAPIVersion = LinkedCustomService_Kind + "." + CRDGroupVersion.String() + LinkedCustomService_GroupVersionKind = CRDGroupVersion.WithKind(LinkedCustomService_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedCustomService{}, &LinkedCustomServiceList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazureblobstorage_terraformed.go b/apis/datafactory/v1beta2/zz_linkedserviceazureblobstorage_terraformed.go new file mode 100755 index 000000000..5a6cd3985 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazureblobstorage_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceAzureBlobStorage +func (mg *LinkedServiceAzureBlobStorage) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_azure_blob_storage" +} + +// GetConnectionDetailsMapping for this LinkedServiceAzureBlobStorage +func (tr *LinkedServiceAzureBlobStorage) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"connection_string": "spec.forProvider.connectionStringSecretRef", "sas_uri": "spec.forProvider.sasUriSecretRef", "service_endpoint": "spec.forProvider.serviceEndpointSecretRef"} +} + +// GetObservation of this LinkedServiceAzureBlobStorage +func (tr *LinkedServiceAzureBlobStorage) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceAzureBlobStorage +func (tr *LinkedServiceAzureBlobStorage) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceAzureBlobStorage +func (tr *LinkedServiceAzureBlobStorage) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceAzureBlobStorage +func (tr *LinkedServiceAzureBlobStorage) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceAzureBlobStorage +func (tr *LinkedServiceAzureBlobStorage) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceAzureBlobStorage +func (tr *LinkedServiceAzureBlobStorage) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceAzureBlobStorage +func (tr *LinkedServiceAzureBlobStorage) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceAzureBlobStorage using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceAzureBlobStorage) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceAzureBlobStorageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceAzureBlobStorage) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazureblobstorage_types.go b/apis/datafactory/v1beta2/zz_linkedserviceazureblobstorage_types.go new file mode 100755 index 000000000..145d9738e --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazureblobstorage_types.go @@ -0,0 +1,345 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type KeyVaultSASTokenInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores the SAS token. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type KeyVaultSASTokenObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores the SAS token. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type KeyVaultSASTokenParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores the SAS token. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureBlobStorageInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string sent insecurely. Conflicts with connection_string, sas_uri and service_endpoint. + ConnectionStringInsecure *string `json:"connectionStringInsecure,omitempty" tf:"connection_string_insecure,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_sas_token block as defined below. Use this argument to store SAS Token in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. A sas_uri is required. + KeyVaultSASToken *KeyVaultSASTokenInitParameters `json:"keyVaultSasToken,omitempty" tf:"key_vault_sas_token,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The service principal id in which to authenticate against the Azure Blob Storage account. + ServicePrincipalID *string `json:"servicePrincipalId,omitempty" tf:"service_principal_id,omitempty"` + + // The service principal key in which to authenticate against the AAzure Blob Storage account. + ServicePrincipalKey *string `json:"servicePrincipalKey,omitempty" tf:"service_principal_key,omitempty"` + + // A service_principal_linked_key_vault_key block as defined below. Use this argument to store Service Principal key in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + ServicePrincipalLinkedKeyVaultKey *ServicePrincipalLinkedKeyVaultKeyInitParameters `json:"servicePrincipalLinkedKeyVaultKey,omitempty" tf:"service_principal_linked_key_vault_key,omitempty"` + + // Specify the kind of the storage account. Allowed values are Storage, StorageV2, BlobStorage and BlockBlobStorage. + StorageKind *string `json:"storageKind,omitempty" tf:"storage_kind,omitempty"` + + // The tenant id or name in which to authenticate against the Azure Blob Storage account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Whether to use the Data Factory's managed identity to authenticate against the Azure Blob Storage account. Incompatible with service_principal_id and service_principal_key. + UseManagedIdentity *bool `json:"useManagedIdentity,omitempty" tf:"use_managed_identity,omitempty"` +} + +type LinkedServiceAzureBlobStorageObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string sent insecurely. Conflicts with connection_string, sas_uri and service_endpoint. + ConnectionStringInsecure *string `json:"connectionStringInsecure,omitempty" tf:"connection_string_insecure,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_sas_token block as defined below. Use this argument to store SAS Token in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. A sas_uri is required. + KeyVaultSASToken *KeyVaultSASTokenObservation `json:"keyVaultSasToken,omitempty" tf:"key_vault_sas_token,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The service principal id in which to authenticate against the Azure Blob Storage account. + ServicePrincipalID *string `json:"servicePrincipalId,omitempty" tf:"service_principal_id,omitempty"` + + // The service principal key in which to authenticate against the AAzure Blob Storage account. + ServicePrincipalKey *string `json:"servicePrincipalKey,omitempty" tf:"service_principal_key,omitempty"` + + // A service_principal_linked_key_vault_key block as defined below. Use this argument to store Service Principal key in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + ServicePrincipalLinkedKeyVaultKey *ServicePrincipalLinkedKeyVaultKeyObservation `json:"servicePrincipalLinkedKeyVaultKey,omitempty" tf:"service_principal_linked_key_vault_key,omitempty"` + + // Specify the kind of the storage account. Allowed values are Storage, StorageV2, BlobStorage and BlockBlobStorage. + StorageKind *string `json:"storageKind,omitempty" tf:"storage_kind,omitempty"` + + // The tenant id or name in which to authenticate against the Azure Blob Storage account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Whether to use the Data Factory's managed identity to authenticate against the Azure Blob Storage account. Incompatible with service_principal_id and service_principal_key. + UseManagedIdentity *bool `json:"useManagedIdentity,omitempty" tf:"use_managed_identity,omitempty"` +} + +type LinkedServiceAzureBlobStorageParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string sent insecurely. Conflicts with connection_string, sas_uri and service_endpoint. + // +kubebuilder:validation:Optional + ConnectionStringInsecure *string `json:"connectionStringInsecure,omitempty" tf:"connection_string_insecure,omitempty"` + + // The connection string. Conflicts with connection_string_insecure, sas_uri and service_endpoint. + // +kubebuilder:validation:Optional + ConnectionStringSecretRef *v1.SecretKeySelector `json:"connectionStringSecretRef,omitempty" tf:"-"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_sas_token block as defined below. Use this argument to store SAS Token in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. A sas_uri is required. + // +kubebuilder:validation:Optional + KeyVaultSASToken *KeyVaultSASTokenParameters `json:"keyVaultSasToken,omitempty" tf:"key_vault_sas_token,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The SAS URI. Conflicts with connection_string_insecure, connection_string and service_endpoint. + // +kubebuilder:validation:Optional + SASURISecretRef *v1.SecretKeySelector `json:"sasuriSecretRef,omitempty" tf:"-"` + + // The Service Endpoint. Conflicts with connection_string, connection_string_insecure and sas_uri. + // +kubebuilder:validation:Optional + ServiceEndpointSecretRef *v1.SecretKeySelector `json:"serviceEndpointSecretRef,omitempty" tf:"-"` + + // The service principal id in which to authenticate against the Azure Blob Storage account. + // +kubebuilder:validation:Optional + ServicePrincipalID *string `json:"servicePrincipalId,omitempty" tf:"service_principal_id,omitempty"` + + // The service principal key in which to authenticate against the AAzure Blob Storage account. + // +kubebuilder:validation:Optional + ServicePrincipalKey *string `json:"servicePrincipalKey,omitempty" tf:"service_principal_key,omitempty"` + + // A service_principal_linked_key_vault_key block as defined below. Use this argument to store Service Principal key in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + ServicePrincipalLinkedKeyVaultKey *ServicePrincipalLinkedKeyVaultKeyParameters `json:"servicePrincipalLinkedKeyVaultKey,omitempty" tf:"service_principal_linked_key_vault_key,omitempty"` + + // Specify the kind of the storage account. Allowed values are Storage, StorageV2, BlobStorage and BlockBlobStorage. + // +kubebuilder:validation:Optional + StorageKind *string `json:"storageKind,omitempty" tf:"storage_kind,omitempty"` + + // The tenant id or name in which to authenticate against the Azure Blob Storage account. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Whether to use the Data Factory's managed identity to authenticate against the Azure Blob Storage account. Incompatible with service_principal_id and service_principal_key. + // +kubebuilder:validation:Optional + UseManagedIdentity *bool `json:"useManagedIdentity,omitempty" tf:"use_managed_identity,omitempty"` +} + +type ServicePrincipalLinkedKeyVaultKeyInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores the Service Principal key. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ServicePrincipalLinkedKeyVaultKeyObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores the Service Principal key. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type ServicePrincipalLinkedKeyVaultKeyParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores the Service Principal key. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +// LinkedServiceAzureBlobStorageSpec defines the desired state of LinkedServiceAzureBlobStorage +type LinkedServiceAzureBlobStorageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceAzureBlobStorageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceAzureBlobStorageInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceAzureBlobStorageStatus defines the observed state of LinkedServiceAzureBlobStorage. +type LinkedServiceAzureBlobStorageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceAzureBlobStorageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceAzureBlobStorage is the Schema for the LinkedServiceAzureBlobStorages API. Manages a Linked Service (connection) between an Azure Blob Storage Account and Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceAzureBlobStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec LinkedServiceAzureBlobStorageSpec `json:"spec"` + Status LinkedServiceAzureBlobStorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceAzureBlobStorageList contains a list of LinkedServiceAzureBlobStorages +type LinkedServiceAzureBlobStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceAzureBlobStorage `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceAzureBlobStorage_Kind = "LinkedServiceAzureBlobStorage" + LinkedServiceAzureBlobStorage_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceAzureBlobStorage_Kind}.String() + LinkedServiceAzureBlobStorage_KindAPIVersion = LinkedServiceAzureBlobStorage_Kind + "." + CRDGroupVersion.String() + LinkedServiceAzureBlobStorage_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceAzureBlobStorage_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceAzureBlobStorage{}, &LinkedServiceAzureBlobStorageList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazuredatabricks_terraformed.go b/apis/datafactory/v1beta2/zz_linkedserviceazuredatabricks_terraformed.go new file mode 100755 index 000000000..866d9fdd8 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazuredatabricks_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceAzureDatabricks +func (mg *LinkedServiceAzureDatabricks) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_azure_databricks" +} + +// GetConnectionDetailsMapping for this LinkedServiceAzureDatabricks +func (tr *LinkedServiceAzureDatabricks) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"access_token": "spec.forProvider.accessTokenSecretRef"} +} + +// GetObservation of this LinkedServiceAzureDatabricks +func (tr *LinkedServiceAzureDatabricks) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceAzureDatabricks +func (tr *LinkedServiceAzureDatabricks) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceAzureDatabricks +func (tr *LinkedServiceAzureDatabricks) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceAzureDatabricks +func (tr *LinkedServiceAzureDatabricks) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceAzureDatabricks +func (tr *LinkedServiceAzureDatabricks) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceAzureDatabricks +func (tr *LinkedServiceAzureDatabricks) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceAzureDatabricks +func (tr *LinkedServiceAzureDatabricks) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceAzureDatabricks using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceAzureDatabricks) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceAzureDatabricksParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceAzureDatabricks) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazuredatabricks_types.go b/apis/datafactory/v1beta2/zz_linkedserviceazuredatabricks_types.go new file mode 100755 index 000000000..e5e712392 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazuredatabricks_types.go @@ -0,0 +1,440 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InstancePoolInitParameters struct { + + // Spark version of a the cluster. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // Identifier of the instance pool within the linked ADB instance. + InstancePoolID *string `json:"instancePoolId,omitempty" tf:"instance_pool_id,omitempty"` + + // The max number of worker nodes. Set this value if you want to enable autoscaling between the min_number_of_workers and this value. Omit this value to use a fixed number of workers defined in the min_number_of_workers property. + MaxNumberOfWorkers *float64 `json:"maxNumberOfWorkers,omitempty" tf:"max_number_of_workers,omitempty"` + + // The minimum number of worker nodes. Defaults to 1. + MinNumberOfWorkers *float64 `json:"minNumberOfWorkers,omitempty" tf:"min_number_of_workers,omitempty"` +} + +type InstancePoolObservation struct { + + // Spark version of a the cluster. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // Identifier of the instance pool within the linked ADB instance. + InstancePoolID *string `json:"instancePoolId,omitempty" tf:"instance_pool_id,omitempty"` + + // The max number of worker nodes. Set this value if you want to enable autoscaling between the min_number_of_workers and this value. Omit this value to use a fixed number of workers defined in the min_number_of_workers property. + MaxNumberOfWorkers *float64 `json:"maxNumberOfWorkers,omitempty" tf:"max_number_of_workers,omitempty"` + + // The minimum number of worker nodes. Defaults to 1. + MinNumberOfWorkers *float64 `json:"minNumberOfWorkers,omitempty" tf:"min_number_of_workers,omitempty"` +} + +type InstancePoolParameters struct { + + // Spark version of a the cluster. + // +kubebuilder:validation:Optional + ClusterVersion *string `json:"clusterVersion" tf:"cluster_version,omitempty"` + + // Identifier of the instance pool within the linked ADB instance. + // +kubebuilder:validation:Optional + InstancePoolID *string `json:"instancePoolId" tf:"instance_pool_id,omitempty"` + + // The max number of worker nodes. Set this value if you want to enable autoscaling between the min_number_of_workers and this value. Omit this value to use a fixed number of workers defined in the min_number_of_workers property. + // +kubebuilder:validation:Optional + MaxNumberOfWorkers *float64 `json:"maxNumberOfWorkers,omitempty" tf:"max_number_of_workers,omitempty"` + + // The minimum number of worker nodes. Defaults to 1. + // +kubebuilder:validation:Optional + MinNumberOfWorkers *float64 `json:"minNumberOfWorkers,omitempty" tf:"min_number_of_workers,omitempty"` +} + +type LinkedServiceAzureDatabricksInitParameters struct { + + // The domain URL of the databricks instance. + AdbDomain *string `json:"adbDomain,omitempty" tf:"adb_domain,omitempty"` + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The cluster_id of an existing cluster within the linked ADB instance. + ExistingClusterID *string `json:"existingClusterId,omitempty" tf:"existing_cluster_id,omitempty"` + + // Leverages an instance pool within the linked ADB instance as one instance_pool block defined below. + InstancePool *InstancePoolInitParameters `json:"instancePool,omitempty" tf:"instance_pool,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // Authenticate to ADB via Azure Key Vault Linked Service as defined in the key_vault_password block below. + KeyVaultPassword *LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // Authenticate to ADB via managed service identity. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/databricks/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + MsiWorkSpaceResourceID *string `json:"msiWorkSpaceResourceId,omitempty" tf:"msi_work_space_resource_id,omitempty"` + + // Reference to a Workspace in databricks to populate msiWorkSpaceResourceId. + // +kubebuilder:validation:Optional + MsiWorkSpaceResourceIDRef *v1.Reference `json:"msiWorkSpaceResourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in databricks to populate msiWorkSpaceResourceId. + // +kubebuilder:validation:Optional + MsiWorkSpaceResourceIDSelector *v1.Selector `json:"msiWorkSpaceResourceIdSelector,omitempty" tf:"-"` + + // Creates new clusters within the linked ADB instance as defined in the new_cluster_config block below. + NewClusterConfig *NewClusterConfigInitParameters `json:"newClusterConfig,omitempty" tf:"new_cluster_config,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceAzureDatabricksKeyVaultPasswordInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores ADB access token. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureDatabricksKeyVaultPasswordObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores ADB access token. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureDatabricksKeyVaultPasswordParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores ADB access token. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureDatabricksObservation struct { + + // The domain URL of the databricks instance. + AdbDomain *string `json:"adbDomain,omitempty" tf:"adb_domain,omitempty"` + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The cluster_id of an existing cluster within the linked ADB instance. + ExistingClusterID *string `json:"existingClusterId,omitempty" tf:"existing_cluster_id,omitempty"` + + // The ID of the Data Factory Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Leverages an instance pool within the linked ADB instance as one instance_pool block defined below. + InstancePool *InstancePoolObservation `json:"instancePool,omitempty" tf:"instance_pool,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // Authenticate to ADB via Azure Key Vault Linked Service as defined in the key_vault_password block below. + KeyVaultPassword *LinkedServiceAzureDatabricksKeyVaultPasswordObservation `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // Authenticate to ADB via managed service identity. + MsiWorkSpaceResourceID *string `json:"msiWorkSpaceResourceId,omitempty" tf:"msi_work_space_resource_id,omitempty"` + + // Creates new clusters within the linked ADB instance as defined in the new_cluster_config block below. + NewClusterConfig *NewClusterConfigObservation `json:"newClusterConfig,omitempty" tf:"new_cluster_config,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceAzureDatabricksParameters struct { + + // Authenticate to ADB via an access token. + // +kubebuilder:validation:Optional + AccessTokenSecretRef *v1.SecretKeySelector `json:"accessTokenSecretRef,omitempty" tf:"-"` + + // The domain URL of the databricks instance. + // +kubebuilder:validation:Optional + AdbDomain *string `json:"adbDomain,omitempty" tf:"adb_domain,omitempty"` + + // A map of additional properties to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The cluster_id of an existing cluster within the linked ADB instance. + // +kubebuilder:validation:Optional + ExistingClusterID *string `json:"existingClusterId,omitempty" tf:"existing_cluster_id,omitempty"` + + // Leverages an instance pool within the linked ADB instance as one instance_pool block defined below. + // +kubebuilder:validation:Optional + InstancePool *InstancePoolParameters `json:"instancePool,omitempty" tf:"instance_pool,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // Authenticate to ADB via Azure Key Vault Linked Service as defined in the key_vault_password block below. + // +kubebuilder:validation:Optional + KeyVaultPassword *LinkedServiceAzureDatabricksKeyVaultPasswordParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // Authenticate to ADB via managed service identity. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/databricks/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MsiWorkSpaceResourceID *string `json:"msiWorkSpaceResourceId,omitempty" tf:"msi_work_space_resource_id,omitempty"` + + // Reference to a Workspace in databricks to populate msiWorkSpaceResourceId. + // +kubebuilder:validation:Optional + MsiWorkSpaceResourceIDRef *v1.Reference `json:"msiWorkSpaceResourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in databricks to populate msiWorkSpaceResourceId. + // +kubebuilder:validation:Optional + MsiWorkSpaceResourceIDSelector *v1.Selector `json:"msiWorkSpaceResourceIdSelector,omitempty" tf:"-"` + + // Creates new clusters within the linked ADB instance as defined in the new_cluster_config block below. + // +kubebuilder:validation:Optional + NewClusterConfig *NewClusterConfigParameters `json:"newClusterConfig,omitempty" tf:"new_cluster_config,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type NewClusterConfigInitParameters struct { + + // Spark version of a the cluster. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // Tags for the cluster resource. + // +mapType=granular + CustomTags map[string]*string `json:"customTags,omitempty" tf:"custom_tags,omitempty"` + + // Driver node type for the cluster. + DriverNodeType *string `json:"driverNodeType,omitempty" tf:"driver_node_type,omitempty"` + + // User defined initialization scripts for the cluster. + InitScripts []*string `json:"initScripts,omitempty" tf:"init_scripts,omitempty"` + + // Location to deliver Spark driver, worker, and event logs. + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Specifies the maximum number of worker nodes. It should be between 1 and 25000. + MaxNumberOfWorkers *float64 `json:"maxNumberOfWorkers,omitempty" tf:"max_number_of_workers,omitempty"` + + // Specifies the minimum number of worker nodes. It should be between 1 and 25000. It defaults to 1. + MinNumberOfWorkers *float64 `json:"minNumberOfWorkers,omitempty" tf:"min_number_of_workers,omitempty"` + + // Node type for the new cluster. + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // User-specified Spark configuration variables key-value pairs. + // +mapType=granular + SparkConfig map[string]*string `json:"sparkConfig,omitempty" tf:"spark_config,omitempty"` + + // User-specified Spark environment variables key-value pairs. + // +mapType=granular + SparkEnvironmentVariables map[string]*string `json:"sparkEnvironmentVariables,omitempty" tf:"spark_environment_variables,omitempty"` +} + +type NewClusterConfigObservation struct { + + // Spark version of a the cluster. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // Tags for the cluster resource. + // +mapType=granular + CustomTags map[string]*string `json:"customTags,omitempty" tf:"custom_tags,omitempty"` + + // Driver node type for the cluster. + DriverNodeType *string `json:"driverNodeType,omitempty" tf:"driver_node_type,omitempty"` + + // User defined initialization scripts for the cluster. + InitScripts []*string `json:"initScripts,omitempty" tf:"init_scripts,omitempty"` + + // Location to deliver Spark driver, worker, and event logs. + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Specifies the maximum number of worker nodes. It should be between 1 and 25000. + MaxNumberOfWorkers *float64 `json:"maxNumberOfWorkers,omitempty" tf:"max_number_of_workers,omitempty"` + + // Specifies the minimum number of worker nodes. It should be between 1 and 25000. It defaults to 1. + MinNumberOfWorkers *float64 `json:"minNumberOfWorkers,omitempty" tf:"min_number_of_workers,omitempty"` + + // Node type for the new cluster. + NodeType *string `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // User-specified Spark configuration variables key-value pairs. + // +mapType=granular + SparkConfig map[string]*string `json:"sparkConfig,omitempty" tf:"spark_config,omitempty"` + + // User-specified Spark environment variables key-value pairs. + // +mapType=granular + SparkEnvironmentVariables map[string]*string `json:"sparkEnvironmentVariables,omitempty" tf:"spark_environment_variables,omitempty"` +} + +type NewClusterConfigParameters struct { + + // Spark version of a the cluster. + // +kubebuilder:validation:Optional + ClusterVersion *string `json:"clusterVersion" tf:"cluster_version,omitempty"` + + // Tags for the cluster resource. + // +kubebuilder:validation:Optional + // +mapType=granular + CustomTags map[string]*string `json:"customTags,omitempty" tf:"custom_tags,omitempty"` + + // Driver node type for the cluster. + // +kubebuilder:validation:Optional + DriverNodeType *string `json:"driverNodeType,omitempty" tf:"driver_node_type,omitempty"` + + // User defined initialization scripts for the cluster. + // +kubebuilder:validation:Optional + InitScripts []*string `json:"initScripts,omitempty" tf:"init_scripts,omitempty"` + + // Location to deliver Spark driver, worker, and event logs. + // +kubebuilder:validation:Optional + LogDestination *string `json:"logDestination,omitempty" tf:"log_destination,omitempty"` + + // Specifies the maximum number of worker nodes. It should be between 1 and 25000. + // +kubebuilder:validation:Optional + MaxNumberOfWorkers *float64 `json:"maxNumberOfWorkers,omitempty" tf:"max_number_of_workers,omitempty"` + + // Specifies the minimum number of worker nodes. It should be between 1 and 25000. It defaults to 1. + // +kubebuilder:validation:Optional + MinNumberOfWorkers *float64 `json:"minNumberOfWorkers,omitempty" tf:"min_number_of_workers,omitempty"` + + // Node type for the new cluster. + // +kubebuilder:validation:Optional + NodeType *string `json:"nodeType" tf:"node_type,omitempty"` + + // User-specified Spark configuration variables key-value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + SparkConfig map[string]*string `json:"sparkConfig,omitempty" tf:"spark_config,omitempty"` + + // User-specified Spark environment variables key-value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + SparkEnvironmentVariables map[string]*string `json:"sparkEnvironmentVariables,omitempty" tf:"spark_environment_variables,omitempty"` +} + +// LinkedServiceAzureDatabricksSpec defines the desired state of LinkedServiceAzureDatabricks +type LinkedServiceAzureDatabricksSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceAzureDatabricksParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceAzureDatabricksInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceAzureDatabricksStatus defines the observed state of LinkedServiceAzureDatabricks. +type LinkedServiceAzureDatabricksStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceAzureDatabricksObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceAzureDatabricks is the Schema for the LinkedServiceAzureDatabrickss API. Manages a Linked Service (connection) between Azure Databricks and Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceAzureDatabricks struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.adbDomain) || (has(self.initProvider) && has(self.initProvider.adbDomain))",message="spec.forProvider.adbDomain is a required parameter" + Spec LinkedServiceAzureDatabricksSpec `json:"spec"` + Status LinkedServiceAzureDatabricksStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceAzureDatabricksList contains a list of LinkedServiceAzureDatabrickss +type LinkedServiceAzureDatabricksList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceAzureDatabricks `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceAzureDatabricks_Kind = "LinkedServiceAzureDatabricks" + LinkedServiceAzureDatabricks_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceAzureDatabricks_Kind}.String() + LinkedServiceAzureDatabricks_KindAPIVersion = LinkedServiceAzureDatabricks_Kind + "." + CRDGroupVersion.String() + LinkedServiceAzureDatabricks_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceAzureDatabricks_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceAzureDatabricks{}, &LinkedServiceAzureDatabricksList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazurefilestorage_terraformed.go b/apis/datafactory/v1beta2/zz_linkedserviceazurefilestorage_terraformed.go new file mode 100755 index 000000000..4bc6774ea --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazurefilestorage_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceAzureFileStorage +func (mg *LinkedServiceAzureFileStorage) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_azure_file_storage" +} + +// GetConnectionDetailsMapping for this LinkedServiceAzureFileStorage +func (tr *LinkedServiceAzureFileStorage) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"connection_string": "spec.forProvider.connectionStringSecretRef", "password": "spec.forProvider.passwordSecretRef"} +} + +// GetObservation of this LinkedServiceAzureFileStorage +func (tr *LinkedServiceAzureFileStorage) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceAzureFileStorage +func (tr *LinkedServiceAzureFileStorage) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceAzureFileStorage +func (tr *LinkedServiceAzureFileStorage) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceAzureFileStorage +func (tr *LinkedServiceAzureFileStorage) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceAzureFileStorage +func (tr *LinkedServiceAzureFileStorage) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceAzureFileStorage +func (tr *LinkedServiceAzureFileStorage) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceAzureFileStorage +func (tr *LinkedServiceAzureFileStorage) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceAzureFileStorage using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceAzureFileStorage) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceAzureFileStorageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceAzureFileStorage) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazurefilestorage_types.go b/apis/datafactory/v1beta2/zz_linkedserviceazurefilestorage_types.go new file mode 100755 index 000000000..c2525d3f5 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazurefilestorage_types.go @@ -0,0 +1,237 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LinkedServiceAzureFileStorageInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the file share. + FileShare *string `json:"fileShare,omitempty" tf:"file_share,omitempty"` + + // The Host name of the server. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Azure File Storage password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The user ID to log in the server. + UserID *string `json:"userId,omitempty" tf:"user_id,omitempty"` +} + +type LinkedServiceAzureFileStorageKeyVaultPasswordInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores Azure File Storage password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureFileStorageKeyVaultPasswordObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores Azure File Storage password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureFileStorageKeyVaultPasswordParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores Azure File Storage password. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureFileStorageObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the file share. + FileShare *string `json:"fileShare,omitempty" tf:"file_share,omitempty"` + + // The Host name of the server. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The ID of the Data Factory Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Azure File Storage password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceAzureFileStorageKeyVaultPasswordObservation `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The user ID to log in the server. + UserID *string `json:"userId,omitempty" tf:"user_id,omitempty"` +} + +type LinkedServiceAzureFileStorageParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string. + // +kubebuilder:validation:Optional + ConnectionStringSecretRef v1.SecretKeySelector `json:"connectionStringSecretRef" tf:"-"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The name of the file share. + // +kubebuilder:validation:Optional + FileShare *string `json:"fileShare,omitempty" tf:"file_share,omitempty"` + + // The Host name of the server. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Azure File Storage password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + KeyVaultPassword *LinkedServiceAzureFileStorageKeyVaultPasswordParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The password to log in the server. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // The user ID to log in the server. + // +kubebuilder:validation:Optional + UserID *string `json:"userId,omitempty" tf:"user_id,omitempty"` +} + +// LinkedServiceAzureFileStorageSpec defines the desired state of LinkedServiceAzureFileStorage +type LinkedServiceAzureFileStorageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceAzureFileStorageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceAzureFileStorageInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceAzureFileStorageStatus defines the observed state of LinkedServiceAzureFileStorage. +type LinkedServiceAzureFileStorageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceAzureFileStorageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceAzureFileStorage is the Schema for the LinkedServiceAzureFileStorages API. Manages a Linked Service (connection) between an Azure File Storage Account and Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceAzureFileStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.connectionStringSecretRef)",message="spec.forProvider.connectionStringSecretRef is a required parameter" + Spec LinkedServiceAzureFileStorageSpec `json:"spec"` + Status LinkedServiceAzureFileStorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceAzureFileStorageList contains a list of LinkedServiceAzureFileStorages +type LinkedServiceAzureFileStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceAzureFileStorage `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceAzureFileStorage_Kind = "LinkedServiceAzureFileStorage" + LinkedServiceAzureFileStorage_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceAzureFileStorage_Kind}.String() + LinkedServiceAzureFileStorage_KindAPIVersion = LinkedServiceAzureFileStorage_Kind + "." + CRDGroupVersion.String() + LinkedServiceAzureFileStorage_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceAzureFileStorage_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceAzureFileStorage{}, &LinkedServiceAzureFileStorageList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazurefunction_terraformed.go b/apis/datafactory/v1beta2/zz_linkedserviceazurefunction_terraformed.go new file mode 100755 index 000000000..68e8ec2b4 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazurefunction_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceAzureFunction +func (mg *LinkedServiceAzureFunction) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_azure_function" +} + +// GetConnectionDetailsMapping for this LinkedServiceAzureFunction +func (tr *LinkedServiceAzureFunction) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"key": "spec.forProvider.keySecretRef"} +} + +// GetObservation of this LinkedServiceAzureFunction +func (tr *LinkedServiceAzureFunction) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceAzureFunction +func (tr *LinkedServiceAzureFunction) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceAzureFunction +func (tr *LinkedServiceAzureFunction) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceAzureFunction +func (tr *LinkedServiceAzureFunction) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceAzureFunction +func (tr *LinkedServiceAzureFunction) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceAzureFunction +func (tr *LinkedServiceAzureFunction) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceAzureFunction +func (tr *LinkedServiceAzureFunction) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceAzureFunction using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceAzureFunction) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceAzureFunctionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceAzureFunction) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazurefunction_types.go b/apis/datafactory/v1beta2/zz_linkedserviceazurefunction_types.go new file mode 100755 index 000000000..8e96c93d3 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazurefunction_types.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type KeyVaultKeyInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores the system key of the Azure Function. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type KeyVaultKeyObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores the system key of the Azure Function. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type KeyVaultKeyParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores the system key of the Azure Function. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureFunctionInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_key block as defined below. Use this Argument to store the system key of the Azure Function in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either key or key_vault_key is required. + KeyVaultKey *KeyVaultKeyInitParameters `json:"keyVaultKey,omitempty" tf:"key_vault_key,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The url of the Azure Function. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type LinkedServiceAzureFunctionObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_key block as defined below. Use this Argument to store the system key of the Azure Function in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either key or key_vault_key is required. + KeyVaultKey *KeyVaultKeyObservation `json:"keyVaultKey,omitempty" tf:"key_vault_key,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The url of the Azure Function. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type LinkedServiceAzureFunctionParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // The system key of the Azure Function. Exactly one of either key or key_vault_key is required + // +kubebuilder:validation:Optional + KeySecretRef *v1.SecretKeySelector `json:"keySecretRef,omitempty" tf:"-"` + + // A key_vault_key block as defined below. Use this Argument to store the system key of the Azure Function in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either key or key_vault_key is required. + // +kubebuilder:validation:Optional + KeyVaultKey *KeyVaultKeyParameters `json:"keyVaultKey,omitempty" tf:"key_vault_key,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The url of the Azure Function. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +// LinkedServiceAzureFunctionSpec defines the desired state of LinkedServiceAzureFunction +type LinkedServiceAzureFunctionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceAzureFunctionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceAzureFunctionInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceAzureFunctionStatus defines the observed state of LinkedServiceAzureFunction. +type LinkedServiceAzureFunctionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceAzureFunctionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceAzureFunction is the Schema for the LinkedServiceAzureFunctions API. Manages a Linked Service (connection) between an Azure Function Account and Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceAzureFunction struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.url) || (has(self.initProvider) && has(self.initProvider.url))",message="spec.forProvider.url is a required parameter" + Spec LinkedServiceAzureFunctionSpec `json:"spec"` + Status LinkedServiceAzureFunctionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceAzureFunctionList contains a list of LinkedServiceAzureFunctions +type LinkedServiceAzureFunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceAzureFunction `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceAzureFunction_Kind = "LinkedServiceAzureFunction" + LinkedServiceAzureFunction_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceAzureFunction_Kind}.String() + LinkedServiceAzureFunction_KindAPIVersion = LinkedServiceAzureFunction_Kind + "." + CRDGroupVersion.String() + LinkedServiceAzureFunction_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceAzureFunction_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceAzureFunction{}, &LinkedServiceAzureFunctionList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazuresqldatabase_terraformed.go b/apis/datafactory/v1beta2/zz_linkedserviceazuresqldatabase_terraformed.go new file mode 100755 index 000000000..0eaec3c38 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazuresqldatabase_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceAzureSQLDatabase +func (mg *LinkedServiceAzureSQLDatabase) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_azure_sql_database" +} + +// GetConnectionDetailsMapping for this LinkedServiceAzureSQLDatabase +func (tr *LinkedServiceAzureSQLDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LinkedServiceAzureSQLDatabase +func (tr *LinkedServiceAzureSQLDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceAzureSQLDatabase +func (tr *LinkedServiceAzureSQLDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceAzureSQLDatabase +func (tr *LinkedServiceAzureSQLDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceAzureSQLDatabase +func (tr *LinkedServiceAzureSQLDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceAzureSQLDatabase +func (tr *LinkedServiceAzureSQLDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceAzureSQLDatabase +func (tr *LinkedServiceAzureSQLDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceAzureSQLDatabase +func (tr *LinkedServiceAzureSQLDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceAzureSQLDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceAzureSQLDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceAzureSQLDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceAzureSQLDatabase) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceazuresqldatabase_types.go b/apis/datafactory/v1beta2/zz_linkedserviceazuresqldatabase_types.go new file mode 100755 index 000000000..57d80591b --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceazuresqldatabase_types.go @@ -0,0 +1,287 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type KeyVaultConnectionStringInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server connection string. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type KeyVaultConnectionStringObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server connection string. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type KeyVaultConnectionStringParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server connection string. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureSQLDatabaseInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with Azure SQL Database. Exactly one of either connection_string or key_vault_connection_string is required. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The description for the Data Factory Linked Service Azure SQL Database. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_connection_string block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either connection_string or key_vault_connection_string is required. + KeyVaultConnectionString *KeyVaultConnectionStringInitParameters `json:"keyVaultConnectionString,omitempty" tf:"key_vault_connection_string,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service Azure SQL Database. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The service principal id in which to authenticate against the Azure SQL Database. Required if service_principal_key is set. + ServicePrincipalID *string `json:"servicePrincipalId,omitempty" tf:"service_principal_id,omitempty"` + + // The service principal key in which to authenticate against the Azure SQL Database. Required if service_principal_id is set. + ServicePrincipalKey *string `json:"servicePrincipalKey,omitempty" tf:"service_principal_key,omitempty"` + + // The tenant id or name in which to authenticate against the Azure SQL Database. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with service_principal_id and service_principal_key + UseManagedIdentity *bool `json:"useManagedIdentity,omitempty" tf:"use_managed_identity,omitempty"` +} + +type LinkedServiceAzureSQLDatabaseKeyVaultPasswordInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server password. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceAzureSQLDatabaseObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with Azure SQL Database. Exactly one of either connection_string or key_vault_connection_string is required. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service Azure SQL Database. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory Azure SQL Database Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_connection_string block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either connection_string or key_vault_connection_string is required. + KeyVaultConnectionString *KeyVaultConnectionStringObservation `json:"keyVaultConnectionString,omitempty" tf:"key_vault_connection_string,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceAzureSQLDatabaseKeyVaultPasswordObservation `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service Azure SQL Database. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The service principal id in which to authenticate against the Azure SQL Database. Required if service_principal_key is set. + ServicePrincipalID *string `json:"servicePrincipalId,omitempty" tf:"service_principal_id,omitempty"` + + // The service principal key in which to authenticate against the Azure SQL Database. Required if service_principal_id is set. + ServicePrincipalKey *string `json:"servicePrincipalKey,omitempty" tf:"service_principal_key,omitempty"` + + // The tenant id or name in which to authenticate against the Azure SQL Database. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with service_principal_id and service_principal_key + UseManagedIdentity *bool `json:"useManagedIdentity,omitempty" tf:"use_managed_identity,omitempty"` +} + +type LinkedServiceAzureSQLDatabaseParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service Azure SQL Database. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service Azure SQL Database. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with Azure SQL Database. Exactly one of either connection_string or key_vault_connection_string is required. + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service Azure SQL Database. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service Azure SQL Database. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_connection_string block as defined below. Use this argument to store Azure SQL Database connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either connection_string or key_vault_connection_string is required. + // +kubebuilder:validation:Optional + KeyVaultConnectionString *KeyVaultConnectionStringParameters `json:"keyVaultConnectionString,omitempty" tf:"key_vault_connection_string,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + KeyVaultPassword *LinkedServiceAzureSQLDatabaseKeyVaultPasswordParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service Azure SQL Database. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The service principal id in which to authenticate against the Azure SQL Database. Required if service_principal_key is set. + // +kubebuilder:validation:Optional + ServicePrincipalID *string `json:"servicePrincipalId,omitempty" tf:"service_principal_id,omitempty"` + + // The service principal key in which to authenticate against the Azure SQL Database. Required if service_principal_id is set. + // +kubebuilder:validation:Optional + ServicePrincipalKey *string `json:"servicePrincipalKey,omitempty" tf:"service_principal_key,omitempty"` + + // The tenant id or name in which to authenticate against the Azure SQL Database. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Whether to use the Data Factory's managed identity to authenticate against the Azure SQL Database. Incompatible with service_principal_id and service_principal_key + // +kubebuilder:validation:Optional + UseManagedIdentity *bool `json:"useManagedIdentity,omitempty" tf:"use_managed_identity,omitempty"` +} + +// LinkedServiceAzureSQLDatabaseSpec defines the desired state of LinkedServiceAzureSQLDatabase +type LinkedServiceAzureSQLDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceAzureSQLDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceAzureSQLDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceAzureSQLDatabaseStatus defines the observed state of LinkedServiceAzureSQLDatabase. +type LinkedServiceAzureSQLDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceAzureSQLDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceAzureSQLDatabase is the Schema for the LinkedServiceAzureSQLDatabases API. Manages a Linked Service (connection) between Azure SQL Database and Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceAzureSQLDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec LinkedServiceAzureSQLDatabaseSpec `json:"spec"` + Status LinkedServiceAzureSQLDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceAzureSQLDatabaseList contains a list of LinkedServiceAzureSQLDatabases +type LinkedServiceAzureSQLDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceAzureSQLDatabase `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceAzureSQLDatabase_Kind = "LinkedServiceAzureSQLDatabase" + LinkedServiceAzureSQLDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceAzureSQLDatabase_Kind}.String() + LinkedServiceAzureSQLDatabase_KindAPIVersion = LinkedServiceAzureSQLDatabase_Kind + "." + CRDGroupVersion.String() + LinkedServiceAzureSQLDatabase_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceAzureSQLDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceAzureSQLDatabase{}, &LinkedServiceAzureSQLDatabaseList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceodata_terraformed.go b/apis/datafactory/v1beta2/zz_linkedserviceodata_terraformed.go new file mode 100755 index 000000000..c54d9696a --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceodata_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceOData +func (mg *LinkedServiceOData) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_odata" +} + +// GetConnectionDetailsMapping for this LinkedServiceOData +func (tr *LinkedServiceOData) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"basic_authentication[*].password": "spec.forProvider.basicAuthentication[*].passwordSecretRef"} +} + +// GetObservation of this LinkedServiceOData +func (tr *LinkedServiceOData) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceOData +func (tr *LinkedServiceOData) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceOData +func (tr *LinkedServiceOData) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceOData +func (tr *LinkedServiceOData) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceOData +func (tr *LinkedServiceOData) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceOData +func (tr *LinkedServiceOData) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceOData +func (tr *LinkedServiceOData) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceOData using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceOData) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceODataParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceOData) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceodata_types.go b/apis/datafactory/v1beta2/zz_linkedserviceodata_types.go new file mode 100755 index 000000000..ca7c2a5b9 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceodata_types.go @@ -0,0 +1,203 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BasicAuthenticationInitParameters struct { + + // The username which can be used to authenticate to the OData endpoint. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type BasicAuthenticationObservation struct { + + // The username which can be used to authenticate to the OData endpoint. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type BasicAuthenticationParameters struct { + + // The password associated with the username, which can be used to authenticate to the OData endpoint. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username which can be used to authenticate to the OData endpoint. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type LinkedServiceODataInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service OData. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service OData. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A basic_authentication block as defined below. + BasicAuthentication *BasicAuthenticationInitParameters `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` + + // The description for the Data Factory Linked Service OData. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service OData. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service OData. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URL of the OData service endpoint. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type LinkedServiceODataObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service OData. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service OData. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A basic_authentication block as defined below. + BasicAuthentication *BasicAuthenticationObservation `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service OData. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory OData Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service OData. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service OData. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URL of the OData service endpoint. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type LinkedServiceODataParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service OData. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service OData. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A basic_authentication block as defined below. + // +kubebuilder:validation:Optional + BasicAuthentication *BasicAuthenticationParameters `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service OData. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service OData. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service OData. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URL of the OData service endpoint. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +// LinkedServiceODataSpec defines the desired state of LinkedServiceOData +type LinkedServiceODataSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceODataParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceODataInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceODataStatus defines the observed state of LinkedServiceOData. +type LinkedServiceODataStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceODataObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceOData is the Schema for the LinkedServiceODatas API. Manages a Linked Service (connection) between a Database and Azure Data Factory through OData protocol. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceOData struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.url) || (has(self.initProvider) && has(self.initProvider.url))",message="spec.forProvider.url is a required parameter" + Spec LinkedServiceODataSpec `json:"spec"` + Status LinkedServiceODataStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceODataList contains a list of LinkedServiceODatas +type LinkedServiceODataList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceOData `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceOData_Kind = "LinkedServiceOData" + LinkedServiceOData_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceOData_Kind}.String() + LinkedServiceOData_KindAPIVersion = LinkedServiceOData_Kind + "." + CRDGroupVersion.String() + LinkedServiceOData_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceOData_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceOData{}, &LinkedServiceODataList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceodbc_terraformed.go b/apis/datafactory/v1beta2/zz_linkedserviceodbc_terraformed.go new file mode 100755 index 000000000..973f926c6 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceodbc_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceOdbc +func (mg *LinkedServiceOdbc) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_odbc" +} + +// GetConnectionDetailsMapping for this LinkedServiceOdbc +func (tr *LinkedServiceOdbc) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"basic_authentication[*].password": "spec.forProvider.basicAuthentication[*].passwordSecretRef"} +} + +// GetObservation of this LinkedServiceOdbc +func (tr *LinkedServiceOdbc) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceOdbc +func (tr *LinkedServiceOdbc) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceOdbc +func (tr *LinkedServiceOdbc) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceOdbc +func (tr *LinkedServiceOdbc) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceOdbc +func (tr *LinkedServiceOdbc) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceOdbc +func (tr *LinkedServiceOdbc) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceOdbc +func (tr *LinkedServiceOdbc) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceOdbc using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceOdbc) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceOdbcParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceOdbc) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedserviceodbc_types.go b/apis/datafactory/v1beta2/zz_linkedserviceodbc_types.go new file mode 100755 index 000000000..f430eaefe --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedserviceodbc_types.go @@ -0,0 +1,203 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LinkedServiceOdbcBasicAuthenticationInitParameters struct { + + // The username which can be used to authenticate to the ODBC endpoint. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type LinkedServiceOdbcBasicAuthenticationObservation struct { + + // The username which can be used to authenticate to the ODBC endpoint. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type LinkedServiceOdbcBasicAuthenticationParameters struct { + + // The password associated with the username, which can be used to authenticate to the ODBC endpoint. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username which can be used to authenticate to the ODBC endpoint. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type LinkedServiceOdbcInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service ODBC. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service ODBC. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A basic_authentication block as defined below. + BasicAuthentication *LinkedServiceOdbcBasicAuthenticationInitParameters `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` + + // The connection string in which to authenticate with ODBC. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The description for the Data Factory Linked Service ODBC. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service ODBC. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service ODBC. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceOdbcObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service ODBC. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service ODBC. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A basic_authentication block as defined below. + BasicAuthentication *LinkedServiceOdbcBasicAuthenticationObservation `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` + + // The connection string in which to authenticate with ODBC. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service ODBC. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory ODBC Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service ODBC. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service ODBC. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceOdbcParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service ODBC. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service ODBC. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // A basic_authentication block as defined below. + // +kubebuilder:validation:Optional + BasicAuthentication *LinkedServiceOdbcBasicAuthenticationParameters `json:"basicAuthentication,omitempty" tf:"basic_authentication,omitempty"` + + // The connection string in which to authenticate with ODBC. + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service ODBC. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service ODBC. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service ODBC. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +// LinkedServiceOdbcSpec defines the desired state of LinkedServiceOdbc +type LinkedServiceOdbcSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceOdbcParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceOdbcInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceOdbcStatus defines the observed state of LinkedServiceOdbc. +type LinkedServiceOdbcStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceOdbcObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceOdbc is the Schema for the LinkedServiceOdbcs API. Manages a Linked Service (connection) between a Database and Azure Data Factory through ODBC protocol. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceOdbc struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.connectionString) || (has(self.initProvider) && has(self.initProvider.connectionString))",message="spec.forProvider.connectionString is a required parameter" + Spec LinkedServiceOdbcSpec `json:"spec"` + Status LinkedServiceOdbcStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceOdbcList contains a list of LinkedServiceOdbcs +type LinkedServiceOdbcList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceOdbc `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceOdbc_Kind = "LinkedServiceOdbc" + LinkedServiceOdbc_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceOdbc_Kind}.String() + LinkedServiceOdbc_KindAPIVersion = LinkedServiceOdbc_Kind + "." + CRDGroupVersion.String() + LinkedServiceOdbc_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceOdbc_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceOdbc{}, &LinkedServiceOdbcList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedservicesnowflake_terraformed.go b/apis/datafactory/v1beta2/zz_linkedservicesnowflake_terraformed.go new file mode 100755 index 000000000..04b56a642 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedservicesnowflake_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceSnowflake +func (mg *LinkedServiceSnowflake) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_snowflake" +} + +// GetConnectionDetailsMapping for this LinkedServiceSnowflake +func (tr *LinkedServiceSnowflake) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LinkedServiceSnowflake +func (tr *LinkedServiceSnowflake) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceSnowflake +func (tr *LinkedServiceSnowflake) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceSnowflake +func (tr *LinkedServiceSnowflake) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceSnowflake +func (tr *LinkedServiceSnowflake) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceSnowflake +func (tr *LinkedServiceSnowflake) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceSnowflake +func (tr *LinkedServiceSnowflake) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceSnowflake +func (tr *LinkedServiceSnowflake) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceSnowflake using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceSnowflake) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceSnowflakeParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceSnowflake) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedservicesnowflake_types.go b/apis/datafactory/v1beta2/zz_linkedservicesnowflake_types.go new file mode 100755 index 000000000..f13900337 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedservicesnowflake_types.go @@ -0,0 +1,227 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LinkedServiceSnowflakeInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with Snowflake. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Snowflake password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceSnowflakeKeyVaultPasswordInitParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceSnowflakeKeyVaultPasswordInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores Snowflake password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceSnowflakeKeyVaultPasswordObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores Snowflake password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceSnowflakeKeyVaultPasswordParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores Snowflake password. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceSnowflakeObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with Snowflake. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory Snowflake Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Snowflake password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceSnowflakeKeyVaultPasswordObservation `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceSnowflakeParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with Snowflake. + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Snowflake password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + KeyVaultPassword *LinkedServiceSnowflakeKeyVaultPasswordParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +// LinkedServiceSnowflakeSpec defines the desired state of LinkedServiceSnowflake +type LinkedServiceSnowflakeSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceSnowflakeParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceSnowflakeInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceSnowflakeStatus defines the observed state of LinkedServiceSnowflake. +type LinkedServiceSnowflakeStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceSnowflakeObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceSnowflake is the Schema for the LinkedServiceSnowflakes API. Manages a Linked Service (connection) between Snowflake and Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceSnowflake struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.connectionString) || (has(self.initProvider) && has(self.initProvider.connectionString))",message="spec.forProvider.connectionString is a required parameter" + Spec LinkedServiceSnowflakeSpec `json:"spec"` + Status LinkedServiceSnowflakeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceSnowflakeList contains a list of LinkedServiceSnowflakes +type LinkedServiceSnowflakeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceSnowflake `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceSnowflake_Kind = "LinkedServiceSnowflake" + LinkedServiceSnowflake_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceSnowflake_Kind}.String() + LinkedServiceSnowflake_KindAPIVersion = LinkedServiceSnowflake_Kind + "." + CRDGroupVersion.String() + LinkedServiceSnowflake_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceSnowflake_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceSnowflake{}, &LinkedServiceSnowflakeList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedservicesqlserver_terraformed.go b/apis/datafactory/v1beta2/zz_linkedservicesqlserver_terraformed.go new file mode 100755 index 000000000..e83306c5b --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedservicesqlserver_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceSQLServer +func (mg *LinkedServiceSQLServer) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_sql_server" +} + +// GetConnectionDetailsMapping for this LinkedServiceSQLServer +func (tr *LinkedServiceSQLServer) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LinkedServiceSQLServer +func (tr *LinkedServiceSQLServer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceSQLServer +func (tr *LinkedServiceSQLServer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceSQLServer +func (tr *LinkedServiceSQLServer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceSQLServer +func (tr *LinkedServiceSQLServer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceSQLServer +func (tr *LinkedServiceSQLServer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceSQLServer +func (tr *LinkedServiceSQLServer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceSQLServer +func (tr *LinkedServiceSQLServer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceSQLServer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceSQLServer) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceSQLServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceSQLServer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedservicesqlserver_types.go b/apis/datafactory/v1beta2/zz_linkedservicesqlserver_types.go new file mode 100755 index 000000000..2a5bb85a4 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedservicesqlserver_types.go @@ -0,0 +1,275 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LinkedServiceSQLServerInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service SQL Server. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service SQL Server. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with the SQL Server. Exactly one of either connection_string or key_vault_connection_string is required. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The description for the Data Factory Linked Service SQL Server. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service SQL Server. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_connection_string block as defined below. Use this argument to store SQL Server connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either connection_string or key_vault_connection_string is required. + KeyVaultConnectionString *LinkedServiceSQLServerKeyVaultConnectionStringInitParameters `json:"keyVaultConnectionString,omitempty" tf:"key_vault_connection_string,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceSQLServerKeyVaultPasswordInitParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service SQL Server. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The on-premises Windows authentication user name. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` +} + +type LinkedServiceSQLServerKeyVaultConnectionStringInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server connection string. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceSQLServerKeyVaultConnectionStringObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server connection string. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceSQLServerKeyVaultConnectionStringParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server connection string. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceSQLServerKeyVaultPasswordInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceSQLServerKeyVaultPasswordObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceSQLServerKeyVaultPasswordParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores SQL Server password. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceSQLServerObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service SQL Server. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service SQL Server. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with the SQL Server. Exactly one of either connection_string or key_vault_connection_string is required. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service SQL Server. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory SQL Server Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service SQL Server. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_connection_string block as defined below. Use this argument to store SQL Server connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either connection_string or key_vault_connection_string is required. + KeyVaultConnectionString *LinkedServiceSQLServerKeyVaultConnectionStringObservation `json:"keyVaultConnectionString,omitempty" tf:"key_vault_connection_string,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceSQLServerKeyVaultPasswordObservation `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service SQL Server. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The on-premises Windows authentication user name. + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` +} + +type LinkedServiceSQLServerParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service SQL Server. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service SQL Server. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with the SQL Server. Exactly one of either connection_string or key_vault_connection_string is required. + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service SQL Server. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service SQL Server. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_connection_string block as defined below. Use this argument to store SQL Server connection string in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. Exactly one of either connection_string or key_vault_connection_string is required. + // +kubebuilder:validation:Optional + KeyVaultConnectionString *LinkedServiceSQLServerKeyVaultConnectionStringParameters `json:"keyVaultConnectionString,omitempty" tf:"key_vault_connection_string,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store SQL Server password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + KeyVaultPassword *LinkedServiceSQLServerKeyVaultPasswordParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service SQL Server. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The on-premises Windows authentication user name. + // +kubebuilder:validation:Optional + UserName *string `json:"userName,omitempty" tf:"user_name,omitempty"` +} + +// LinkedServiceSQLServerSpec defines the desired state of LinkedServiceSQLServer +type LinkedServiceSQLServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceSQLServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceSQLServerInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceSQLServerStatus defines the observed state of LinkedServiceSQLServer. +type LinkedServiceSQLServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceSQLServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceSQLServer is the Schema for the LinkedServiceSQLServers API. Manages a Linked Service (connection) between a SQL Server and Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceSQLServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec LinkedServiceSQLServerSpec `json:"spec"` + Status LinkedServiceSQLServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceSQLServerList contains a list of LinkedServiceSQLServers +type LinkedServiceSQLServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceSQLServer `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceSQLServer_Kind = "LinkedServiceSQLServer" + LinkedServiceSQLServer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceSQLServer_Kind}.String() + LinkedServiceSQLServer_KindAPIVersion = LinkedServiceSQLServer_Kind + "." + CRDGroupVersion.String() + LinkedServiceSQLServer_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceSQLServer_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceSQLServer{}, &LinkedServiceSQLServerList{}) +} diff --git a/apis/datafactory/v1beta2/zz_linkedservicesynapse_terraformed.go b/apis/datafactory/v1beta2/zz_linkedservicesynapse_terraformed.go new file mode 100755 index 000000000..e861733e5 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedservicesynapse_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedServiceSynapse +func (mg *LinkedServiceSynapse) GetTerraformResourceType() string { + return "azurerm_data_factory_linked_service_synapse" +} + +// GetConnectionDetailsMapping for this LinkedServiceSynapse +func (tr *LinkedServiceSynapse) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LinkedServiceSynapse +func (tr *LinkedServiceSynapse) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedServiceSynapse +func (tr *LinkedServiceSynapse) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedServiceSynapse +func (tr *LinkedServiceSynapse) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedServiceSynapse +func (tr *LinkedServiceSynapse) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedServiceSynapse +func (tr *LinkedServiceSynapse) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedServiceSynapse +func (tr *LinkedServiceSynapse) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedServiceSynapse +func (tr *LinkedServiceSynapse) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedServiceSynapse using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedServiceSynapse) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceSynapseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedServiceSynapse) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_linkedservicesynapse_types.go b/apis/datafactory/v1beta2/zz_linkedservicesynapse_types.go new file mode 100755 index 000000000..ca4c22abc --- /dev/null +++ b/apis/datafactory/v1beta2/zz_linkedservicesynapse_types.go @@ -0,0 +1,227 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LinkedServiceSynapseInitParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service Synapse. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service Synapse. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with the Synapse. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The description for the Data Factory Linked Service Synapse. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service Synapse. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Synapse password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceSynapseKeyVaultPasswordInitParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service Synapse. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceSynapseKeyVaultPasswordInitParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores Synapse password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceSynapseKeyVaultPasswordObservation struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Specifies the secret name in Azure Key Vault that stores Synapse password. + SecretName *string `json:"secretName,omitempty" tf:"secret_name,omitempty"` +} + +type LinkedServiceSynapseKeyVaultPasswordParameters struct { + + // Specifies the name of an existing Key Vault Data Factory Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.LinkedServiceKeyVault + // +kubebuilder:validation:Optional + LinkedServiceName *string `json:"linkedServiceName,omitempty" tf:"linked_service_name,omitempty"` + + // Reference to a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameRef *v1.Reference `json:"linkedServiceNameRef,omitempty" tf:"-"` + + // Selector for a LinkedServiceKeyVault in datafactory to populate linkedServiceName. + // +kubebuilder:validation:Optional + LinkedServiceNameSelector *v1.Selector `json:"linkedServiceNameSelector,omitempty" tf:"-"` + + // Specifies the secret name in Azure Key Vault that stores Synapse password. + // +kubebuilder:validation:Optional + SecretName *string `json:"secretName" tf:"secret_name,omitempty"` +} + +type LinkedServiceSynapseObservation struct { + + // A map of additional properties to associate with the Data Factory Linked Service Synapse. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service Synapse. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with the Synapse. + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The description for the Data Factory Linked Service Synapse. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Factory Synapse Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service Synapse. + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Synapse password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + KeyVaultPassword *LinkedServiceSynapseKeyVaultPasswordObservation `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service Synapse. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceSynapseParameters struct { + + // A map of additional properties to associate with the Data Factory Linked Service Synapse. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Data Factory Linked Service Synapse. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The connection string in which to authenticate with the Synapse. + // +kubebuilder:validation:Optional + ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The description for the Data Factory Linked Service Synapse. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The integration runtime reference to associate with the Data Factory Linked Service Synapse. + // +kubebuilder:validation:Optional + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty" tf:"integration_runtime_name,omitempty"` + + // A key_vault_password block as defined below. Use this argument to store Synapse password in an existing Key Vault. It needs an existing Key Vault Data Factory Linked Service. + // +kubebuilder:validation:Optional + KeyVaultPassword *LinkedServiceSynapseKeyVaultPasswordParameters `json:"keyVaultPassword,omitempty" tf:"key_vault_password,omitempty"` + + // A map of parameters to associate with the Data Factory Linked Service Synapse. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +// LinkedServiceSynapseSpec defines the desired state of LinkedServiceSynapse +type LinkedServiceSynapseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceSynapseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceSynapseInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceSynapseStatus defines the observed state of LinkedServiceSynapse. +type LinkedServiceSynapseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceSynapseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedServiceSynapse is the Schema for the LinkedServiceSynapses API. Manages a Linked Service (connection) between Synapse and Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedServiceSynapse struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.connectionString) || (has(self.initProvider) && has(self.initProvider.connectionString))",message="spec.forProvider.connectionString is a required parameter" + Spec LinkedServiceSynapseSpec `json:"spec"` + Status LinkedServiceSynapseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceSynapseList contains a list of LinkedServiceSynapses +type LinkedServiceSynapseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedServiceSynapse `json:"items"` +} + +// Repository type metadata. +var ( + LinkedServiceSynapse_Kind = "LinkedServiceSynapse" + LinkedServiceSynapse_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedServiceSynapse_Kind}.String() + LinkedServiceSynapse_KindAPIVersion = LinkedServiceSynapse_Kind + "." + CRDGroupVersion.String() + LinkedServiceSynapse_GroupVersionKind = CRDGroupVersion.WithKind(LinkedServiceSynapse_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedServiceSynapse{}, &LinkedServiceSynapseList{}) +} diff --git a/apis/datafactory/v1beta2/zz_triggerschedule_terraformed.go b/apis/datafactory/v1beta2/zz_triggerschedule_terraformed.go new file mode 100755 index 000000000..751408ed2 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_triggerschedule_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TriggerSchedule +func (mg *TriggerSchedule) GetTerraformResourceType() string { + return "azurerm_data_factory_trigger_schedule" +} + +// GetConnectionDetailsMapping for this TriggerSchedule +func (tr *TriggerSchedule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TriggerSchedule +func (tr *TriggerSchedule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TriggerSchedule +func (tr *TriggerSchedule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TriggerSchedule +func (tr *TriggerSchedule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TriggerSchedule +func (tr *TriggerSchedule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TriggerSchedule +func (tr *TriggerSchedule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TriggerSchedule +func (tr *TriggerSchedule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TriggerSchedule +func (tr *TriggerSchedule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TriggerSchedule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TriggerSchedule) LateInitialize(attrs []byte) (bool, error) { + params := &TriggerScheduleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("Pipeline")) + opts = append(opts, resource.WithNameFilter("PipelineName")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TriggerSchedule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datafactory/v1beta2/zz_triggerschedule_types.go b/apis/datafactory/v1beta2/zz_triggerschedule_types.go new file mode 100755 index 000000000..b8adba1a4 --- /dev/null +++ b/apis/datafactory/v1beta2/zz_triggerschedule_types.go @@ -0,0 +1,364 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MonthlyInitParameters struct { + + // The occurrence of the specified day during the month. For example, a monthly property with weekday and week values of Sunday, -1 means the last Sunday of the month. + Week *float64 `json:"week,omitempty" tf:"week,omitempty"` + + // The day of the week on which the trigger runs. For example, a monthly property with a weekday value of Sunday means every Sunday of the month. + Weekday *string `json:"weekday,omitempty" tf:"weekday,omitempty"` +} + +type MonthlyObservation struct { + + // The occurrence of the specified day during the month. For example, a monthly property with weekday and week values of Sunday, -1 means the last Sunday of the month. + Week *float64 `json:"week,omitempty" tf:"week,omitempty"` + + // The day of the week on which the trigger runs. For example, a monthly property with a weekday value of Sunday means every Sunday of the month. + Weekday *string `json:"weekday,omitempty" tf:"weekday,omitempty"` +} + +type MonthlyParameters struct { + + // The occurrence of the specified day during the month. For example, a monthly property with weekday and week values of Sunday, -1 means the last Sunday of the month. + // +kubebuilder:validation:Optional + Week *float64 `json:"week,omitempty" tf:"week,omitempty"` + + // The day of the week on which the trigger runs. For example, a monthly property with a weekday value of Sunday means every Sunday of the month. + // +kubebuilder:validation:Optional + Weekday *string `json:"weekday" tf:"weekday,omitempty"` +} + +type PipelineInitParameters struct { + + // Reference pipeline name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The pipeline parameters that the trigger will act upon. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type PipelineObservation struct { + + // Reference pipeline name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The pipeline parameters that the trigger will act upon. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type PipelineParameters struct { + + // Reference pipeline name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The pipeline parameters that the trigger will act upon. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type ScheduleInitParameters struct { + + // Day(s) of the month on which the trigger is scheduled. This value can be specified with a monthly frequency only. + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Days of the week on which the trigger is scheduled. This value can be specified only with a weekly frequency. + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Hours of the day on which the trigger is scheduled. + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Minutes of the hour on which the trigger is scheduled. + Minutes []*float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // A monthly block as documented below, which specifies the days of the month on which the trigger is scheduled. The value can be specified only with a monthly frequency. + Monthly []MonthlyInitParameters `json:"monthly,omitempty" tf:"monthly,omitempty"` +} + +type ScheduleObservation struct { + + // Day(s) of the month on which the trigger is scheduled. This value can be specified with a monthly frequency only. + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Days of the week on which the trigger is scheduled. This value can be specified only with a weekly frequency. + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Hours of the day on which the trigger is scheduled. + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Minutes of the hour on which the trigger is scheduled. + Minutes []*float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // A monthly block as documented below, which specifies the days of the month on which the trigger is scheduled. The value can be specified only with a monthly frequency. + Monthly []MonthlyObservation `json:"monthly,omitempty" tf:"monthly,omitempty"` +} + +type ScheduleParameters struct { + + // Day(s) of the month on which the trigger is scheduled. This value can be specified with a monthly frequency only. + // +kubebuilder:validation:Optional + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Days of the week on which the trigger is scheduled. This value can be specified only with a weekly frequency. + // +kubebuilder:validation:Optional + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Hours of the day on which the trigger is scheduled. + // +kubebuilder:validation:Optional + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // Minutes of the hour on which the trigger is scheduled. + // +kubebuilder:validation:Optional + Minutes []*float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // A monthly block as documented below, which specifies the days of the month on which the trigger is scheduled. The value can be specified only with a monthly frequency. + // +kubebuilder:validation:Optional + Monthly []MonthlyParameters `json:"monthly,omitempty" tf:"monthly,omitempty"` +} + +type TriggerScheduleInitParameters struct { + + // Specifies if the Data Factory Schedule Trigger is activated. Defaults to true. + Activated *bool `json:"activated,omitempty" tf:"activated,omitempty"` + + // List of tags that can be used for describing the Data Factory Schedule Trigger. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Schedule Trigger's description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The time the Schedule Trigger should end. The time will be represented in UTC. + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The trigger frequency. Valid values include Minute, Hour, Day, Week, Month. Defaults to Minute. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The interval for how often the trigger occurs. This defaults to 1. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // A pipeline block as defined below. + Pipeline []PipelineInitParameters `json:"pipeline,omitempty" tf:"pipeline,omitempty"` + + // The Data Factory Pipeline name that the trigger will act on. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Pipeline + PipelineName *string `json:"pipelineName,omitempty" tf:"pipeline_name,omitempty"` + + // Reference to a Pipeline in datafactory to populate pipelineName. + // +kubebuilder:validation:Optional + PipelineNameRef *v1.Reference `json:"pipelineNameRef,omitempty" tf:"-"` + + // Selector for a Pipeline in datafactory to populate pipelineName. + // +kubebuilder:validation:Optional + PipelineNameSelector *v1.Selector `json:"pipelineNameSelector,omitempty" tf:"-"` + + // The pipeline parameters that the trigger will act upon. + // +mapType=granular + PipelineParameters map[string]*string `json:"pipelineParameters,omitempty" tf:"pipeline_parameters,omitempty"` + + // A schedule block as defined below, which further specifies the recurrence schedule for the trigger. A schedule is capable of limiting or increasing the number of trigger executions specified by the frequency and interval properties. + Schedule *ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time the Schedule Trigger will start. This defaults to the current time. The time will be represented in UTC. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start/end time. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type TriggerScheduleObservation struct { + + // Specifies if the Data Factory Schedule Trigger is activated. Defaults to true. + Activated *bool `json:"activated,omitempty" tf:"activated,omitempty"` + + // List of tags that can be used for describing the Data Factory Schedule Trigger. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // The Schedule Trigger's description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The time the Schedule Trigger should end. The time will be represented in UTC. + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The trigger frequency. Valid values include Minute, Hour, Day, Week, Month. Defaults to Minute. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The ID of the Data Factory Schedule Trigger. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The interval for how often the trigger occurs. This defaults to 1. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // A pipeline block as defined below. + Pipeline []PipelineObservation `json:"pipeline,omitempty" tf:"pipeline,omitempty"` + + // The Data Factory Pipeline name that the trigger will act on. + PipelineName *string `json:"pipelineName,omitempty" tf:"pipeline_name,omitempty"` + + // The pipeline parameters that the trigger will act upon. + // +mapType=granular + PipelineParameters map[string]*string `json:"pipelineParameters,omitempty" tf:"pipeline_parameters,omitempty"` + + // A schedule block as defined below, which further specifies the recurrence schedule for the trigger. A schedule is capable of limiting or increasing the number of trigger executions specified by the frequency and interval properties. + Schedule *ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time the Schedule Trigger will start. This defaults to the current time. The time will be represented in UTC. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start/end time. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type TriggerScheduleParameters struct { + + // Specifies if the Data Factory Schedule Trigger is activated. Defaults to true. + // +kubebuilder:validation:Optional + Activated *bool `json:"activated,omitempty" tf:"activated,omitempty"` + + // List of tags that can be used for describing the Data Factory Schedule Trigger. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The Data Factory ID in which to associate the Linked Service with. Changing this forces a new resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta2.Factory + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataFactoryID *string `json:"dataFactoryId,omitempty" tf:"data_factory_id,omitempty"` + + // Reference to a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDRef *v1.Reference `json:"dataFactoryIdRef,omitempty" tf:"-"` + + // Selector for a Factory in datafactory to populate dataFactoryId. + // +kubebuilder:validation:Optional + DataFactoryIDSelector *v1.Selector `json:"dataFactoryIdSelector,omitempty" tf:"-"` + + // The Schedule Trigger's description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The time the Schedule Trigger should end. The time will be represented in UTC. + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The trigger frequency. Valid values include Minute, Hour, Day, Week, Month. Defaults to Minute. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The interval for how often the trigger occurs. This defaults to 1. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // A pipeline block as defined below. + // +kubebuilder:validation:Optional + Pipeline []PipelineParameters `json:"pipeline,omitempty" tf:"pipeline,omitempty"` + + // The Data Factory Pipeline name that the trigger will act on. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datafactory/v1beta1.Pipeline + // +kubebuilder:validation:Optional + PipelineName *string `json:"pipelineName,omitempty" tf:"pipeline_name,omitempty"` + + // Reference to a Pipeline in datafactory to populate pipelineName. + // +kubebuilder:validation:Optional + PipelineNameRef *v1.Reference `json:"pipelineNameRef,omitempty" tf:"-"` + + // Selector for a Pipeline in datafactory to populate pipelineName. + // +kubebuilder:validation:Optional + PipelineNameSelector *v1.Selector `json:"pipelineNameSelector,omitempty" tf:"-"` + + // The pipeline parameters that the trigger will act upon. + // +kubebuilder:validation:Optional + // +mapType=granular + PipelineParameters map[string]*string `json:"pipelineParameters,omitempty" tf:"pipeline_parameters,omitempty"` + + // A schedule block as defined below, which further specifies the recurrence schedule for the trigger. A schedule is capable of limiting or increasing the number of trigger executions specified by the frequency and interval properties. + // +kubebuilder:validation:Optional + Schedule *ScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time the Schedule Trigger will start. This defaults to the current time. The time will be represented in UTC. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start/end time. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +// TriggerScheduleSpec defines the desired state of TriggerSchedule +type TriggerScheduleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TriggerScheduleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TriggerScheduleInitParameters `json:"initProvider,omitempty"` +} + +// TriggerScheduleStatus defines the observed state of TriggerSchedule. +type TriggerScheduleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TriggerScheduleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TriggerSchedule is the Schema for the TriggerSchedules API. Manages a Trigger Schedule inside a Azure Data Factory. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type TriggerSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec TriggerScheduleSpec `json:"spec"` + Status TriggerScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TriggerScheduleList contains a list of TriggerSchedules +type TriggerScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TriggerSchedule `json:"items"` +} + +// Repository type metadata. +var ( + TriggerSchedule_Kind = "TriggerSchedule" + TriggerSchedule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TriggerSchedule_Kind}.String() + TriggerSchedule_KindAPIVersion = TriggerSchedule_Kind + "." + CRDGroupVersion.String() + TriggerSchedule_GroupVersionKind = CRDGroupVersion.WithKind(TriggerSchedule_Kind) +) + +func init() { + SchemeBuilder.Register(&TriggerSchedule{}, &TriggerScheduleList{}) +} diff --git a/apis/datamigration/v1beta1/zz_databasemigrationservice_types.go b/apis/datamigration/v1beta1/zz_databasemigrationservice_types.go index a5a26494e..aa6a7fab6 100755 --- a/apis/datamigration/v1beta1/zz_databasemigrationservice_types.go +++ b/apis/datamigration/v1beta1/zz_databasemigrationservice_types.go @@ -25,7 +25,7 @@ type DatabaseMigrationServiceInitParameters struct { SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` // The ID of the virtual subnet resource to which the database migration service should be joined. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -95,7 +95,7 @@ type DatabaseMigrationServiceParameters struct { SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` // The ID of the virtual subnet resource to which the database migration service should be joined. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/datamigration/v1beta1/zz_generated.resolvers.go b/apis/datamigration/v1beta1/zz_generated.resolvers.go index 9326b322b..ebf22c53f 100644 --- a/apis/datamigration/v1beta1/zz_generated.resolvers.go +++ b/apis/datamigration/v1beta1/zz_generated.resolvers.go @@ -95,7 +95,7 @@ func (mg *DatabaseMigrationService) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -114,7 +114,7 @@ func (mg *DatabaseMigrationService) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/dataprotection/v1beta1/zz_backupinstanceblobstorage_types.go b/apis/dataprotection/v1beta1/zz_backupinstanceblobstorage_types.go index da5b757b0..757bbc52d 100755 --- a/apis/dataprotection/v1beta1/zz_backupinstanceblobstorage_types.go +++ b/apis/dataprotection/v1beta1/zz_backupinstanceblobstorage_types.go @@ -32,7 +32,7 @@ type BackupInstanceBlobStorageInitParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // The ID of the source Storage Account. Changing this forces a new Backup Instance Blob Storage to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -84,7 +84,7 @@ type BackupInstanceBlobStorageParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // The ID of the source Storage Account. Changing this forces a new Backup Instance Blob Storage to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -98,7 +98,7 @@ type BackupInstanceBlobStorageParameters struct { StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` // The ID of the Backup Vault within which the Backup Instance Blob Storage should exist. Changing this forces a new Backup Instance Blob Storage to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta1.BackupVault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupVault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VaultID *string `json:"vaultId,omitempty" tf:"vault_id,omitempty"` diff --git a/apis/dataprotection/v1beta1/zz_backupinstancedisk_types.go b/apis/dataprotection/v1beta1/zz_backupinstancedisk_types.go index 781c3052a..45e7912f4 100755 --- a/apis/dataprotection/v1beta1/zz_backupinstancedisk_types.go +++ b/apis/dataprotection/v1beta1/zz_backupinstancedisk_types.go @@ -16,7 +16,7 @@ import ( type BackupInstanceDiskInitParameters struct { // The ID of the Backup Policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta1.BackupPolicyDisk + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupPolicyDisk // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() BackupPolicyID *string `json:"backupPolicyId,omitempty" tf:"backup_policy_id,omitempty"` @@ -29,7 +29,7 @@ type BackupInstanceDiskInitParameters struct { BackupPolicyIDSelector *v1.Selector `json:"backupPolicyIdSelector,omitempty" tf:"-"` // The ID of the source Disk. Changing this forces a new Backup Instance Disk to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.ManagedDisk + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` @@ -81,7 +81,7 @@ type BackupInstanceDiskObservation struct { type BackupInstanceDiskParameters struct { // The ID of the Backup Policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta1.BackupPolicyDisk + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupPolicyDisk // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional BackupPolicyID *string `json:"backupPolicyId,omitempty" tf:"backup_policy_id,omitempty"` @@ -95,7 +95,7 @@ type BackupInstanceDiskParameters struct { BackupPolicyIDSelector *v1.Selector `json:"backupPolicyIdSelector,omitempty" tf:"-"` // The ID of the source Disk. Changing this forces a new Backup Instance Disk to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.ManagedDisk + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.ManagedDisk // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DiskID *string `json:"diskId,omitempty" tf:"disk_id,omitempty"` @@ -126,7 +126,7 @@ type BackupInstanceDiskParameters struct { SnapshotResourceGroupNameSelector *v1.Selector `json:"snapshotResourceGroupNameSelector,omitempty" tf:"-"` // The ID of the Backup Vault within which the Backup Instance Disk should exist. Changing this forces a new Backup Instance Disk to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta1.BackupVault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupVault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VaultID *string `json:"vaultId,omitempty" tf:"vault_id,omitempty"` diff --git a/apis/dataprotection/v1beta1/zz_backupinstancepostgresql_types.go b/apis/dataprotection/v1beta1/zz_backupinstancepostgresql_types.go index a54eebe45..76fff8d25 100755 --- a/apis/dataprotection/v1beta1/zz_backupinstancepostgresql_types.go +++ b/apis/dataprotection/v1beta1/zz_backupinstancepostgresql_types.go @@ -16,7 +16,7 @@ import ( type BackupInstancePostgreSQLInitParameters struct { // The ID of the Backup Policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta1.BackupPolicyPostgreSQL + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupPolicyPostgreSQL // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() BackupPolicyID *string `json:"backupPolicyId,omitempty" tf:"backup_policy_id,omitempty"` @@ -82,7 +82,7 @@ type BackupInstancePostgreSQLObservation struct { type BackupInstancePostgreSQLParameters struct { // The ID of the Backup Policy. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta1.BackupPolicyPostgreSQL + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupPolicyPostgreSQL // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional BackupPolicyID *string `json:"backupPolicyId,omitempty" tf:"backup_policy_id,omitempty"` @@ -128,7 +128,7 @@ type BackupInstancePostgreSQLParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // The ID of the Backup Vault within which the PostgreSQL Backup Instance should exist. Changing this forces a new Backup Instance PostgreSQL to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta1.BackupVault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupVault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VaultID *string `json:"vaultId,omitempty" tf:"vault_id,omitempty"` diff --git a/apis/dataprotection/v1beta1/zz_backuppolicyblobstorage_types.go b/apis/dataprotection/v1beta1/zz_backuppolicyblobstorage_types.go index 1ab878bcd..efaa0f226 100755 --- a/apis/dataprotection/v1beta1/zz_backuppolicyblobstorage_types.go +++ b/apis/dataprotection/v1beta1/zz_backuppolicyblobstorage_types.go @@ -38,7 +38,7 @@ type BackupPolicyBlobStorageParameters struct { RetentionDuration *string `json:"retentionDuration,omitempty" tf:"retention_duration,omitempty"` // The ID of the Backup Vault within which the Backup Policy Blob Storage should exist. Changing this forces a new Backup Policy Blob Storage to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta1.BackupVault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupVault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional VaultID *string `json:"vaultId,omitempty" tf:"vault_id,omitempty"` diff --git a/apis/dataprotection/v1beta1/zz_generated.conversion_hubs.go b/apis/dataprotection/v1beta1/zz_generated.conversion_hubs.go index c7ca501c4..ca26b6d97 100755 --- a/apis/dataprotection/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/dataprotection/v1beta1/zz_generated.conversion_hubs.go @@ -18,14 +18,5 @@ func (tr *BackupInstancePostgreSQL) Hub() {} // Hub marks this type as a conversion hub. func (tr *BackupPolicyBlobStorage) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *BackupPolicyDisk) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BackupPolicyPostgreSQL) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BackupVault) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ResourceGuard) Hub() {} diff --git a/apis/dataprotection/v1beta1/zz_generated.conversion_spokes.go b/apis/dataprotection/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..b4490b075 --- /dev/null +++ b/apis/dataprotection/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this BackupPolicyDisk to the hub type. +func (tr *BackupPolicyDisk) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BackupPolicyDisk type. +func (tr *BackupPolicyDisk) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BackupPolicyPostgreSQL to the hub type. +func (tr *BackupPolicyPostgreSQL) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BackupPolicyPostgreSQL type. +func (tr *BackupPolicyPostgreSQL) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BackupVault to the hub type. +func (tr *BackupVault) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BackupVault type. +func (tr *BackupVault) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/dataprotection/v1beta1/zz_generated.resolvers.go b/apis/dataprotection/v1beta1/zz_generated.resolvers.go index 5599af7a5..746d3a874 100644 --- a/apis/dataprotection/v1beta1/zz_generated.resolvers.go +++ b/apis/dataprotection/v1beta1/zz_generated.resolvers.go @@ -47,7 +47,7 @@ func (mg *BackupInstanceBlobStorage) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.BackupPolicyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BackupPolicyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -66,7 +66,7 @@ func (mg *BackupInstanceBlobStorage) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta1", "BackupVault", "BackupVaultList") + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupVault", "BackupVaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -104,7 +104,7 @@ func (mg *BackupInstanceBlobStorage) ResolveReferences( // ResolveReferences of mg.Spec.InitProvider.BackupPolicyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.BackupPolicyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -135,7 +135,7 @@ func (mg *BackupInstanceDisk) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta1", "BackupPolicyDisk", "BackupPolicyDiskList") + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupPolicyDisk", "BackupPolicyDiskList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -154,7 +154,7 @@ func (mg *BackupInstanceDisk) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.BackupPolicyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BackupPolicyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "ManagedDisk", "ManagedDiskList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -192,7 +192,7 @@ func (mg *BackupInstanceDisk) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.SnapshotResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SnapshotResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta1", "BackupVault", "BackupVaultList") + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupVault", "BackupVaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -211,7 +211,7 @@ func (mg *BackupInstanceDisk) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.VaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta1", "BackupPolicyDisk", "BackupPolicyDiskList") + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupPolicyDisk", "BackupPolicyDiskList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -230,7 +230,7 @@ func (mg *BackupInstanceDisk) ResolveReferences(ctx context.Context, c client.Re mg.Spec.InitProvider.BackupPolicyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.BackupPolicyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "ManagedDisk", "ManagedDiskList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "ManagedDisk", "ManagedDiskList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -280,7 +280,7 @@ func (mg *BackupInstancePostgreSQL) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta1", "BackupPolicyPostgreSQL", "BackupPolicyPostgreSQLList") + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupPolicyPostgreSQL", "BackupPolicyPostgreSQLList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -337,7 +337,7 @@ func (mg *BackupInstancePostgreSQL) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.DatabaseID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DatabaseIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta1", "BackupVault", "BackupVaultList") + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupVault", "BackupVaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -356,7 +356,7 @@ func (mg *BackupInstancePostgreSQL) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.VaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta1", "BackupPolicyPostgreSQL", "BackupPolicyPostgreSQLList") + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupPolicyPostgreSQL", "BackupPolicyPostgreSQLList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -425,7 +425,7 @@ func (mg *BackupPolicyBlobStorage) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta1", "BackupVault", "BackupVaultList") + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupVault", "BackupVaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/dataprotection/v1beta2/zz_backuppolicydisk_terraformed.go b/apis/dataprotection/v1beta2/zz_backuppolicydisk_terraformed.go new file mode 100755 index 000000000..6274500a5 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_backuppolicydisk_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BackupPolicyDisk +func (mg *BackupPolicyDisk) GetTerraformResourceType() string { + return "azurerm_data_protection_backup_policy_disk" +} + +// GetConnectionDetailsMapping for this BackupPolicyDisk +func (tr *BackupPolicyDisk) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BackupPolicyDisk +func (tr *BackupPolicyDisk) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BackupPolicyDisk +func (tr *BackupPolicyDisk) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BackupPolicyDisk +func (tr *BackupPolicyDisk) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BackupPolicyDisk +func (tr *BackupPolicyDisk) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BackupPolicyDisk +func (tr *BackupPolicyDisk) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BackupPolicyDisk +func (tr *BackupPolicyDisk) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BackupPolicyDisk +func (tr *BackupPolicyDisk) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BackupPolicyDisk using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BackupPolicyDisk) LateInitialize(attrs []byte) (bool, error) { + params := &BackupPolicyDiskParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BackupPolicyDisk) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dataprotection/v1beta2/zz_backuppolicydisk_types.go b/apis/dataprotection/v1beta2/zz_backuppolicydisk_types.go new file mode 100755 index 000000000..885c513f8 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_backuppolicydisk_types.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackupPolicyDiskInitParameters struct { + + // Specifies a list of repeating time interval. It should follow ISO 8601 repeating time interval . Changing this forces a new Backup Policy Disk to be created. + BackupRepeatingTimeIntervals []*string `json:"backupRepeatingTimeIntervals,omitempty" tf:"backup_repeating_time_intervals,omitempty"` + + // The duration of default retention rule. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy Disk to be created. + DefaultRetentionDuration *string `json:"defaultRetentionDuration,omitempty" tf:"default_retention_duration,omitempty"` + + // One or more retention_rule blocks as defined below. Changing this forces a new Backup Policy Disk to be created. + RetentionRule []RetentionRuleInitParameters `json:"retentionRule,omitempty" tf:"retention_rule,omitempty"` + + // Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new Backup Policy Disk to be created. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type BackupPolicyDiskObservation struct { + + // Specifies a list of repeating time interval. It should follow ISO 8601 repeating time interval . Changing this forces a new Backup Policy Disk to be created. + BackupRepeatingTimeIntervals []*string `json:"backupRepeatingTimeIntervals,omitempty" tf:"backup_repeating_time_intervals,omitempty"` + + // The duration of default retention rule. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy Disk to be created. + DefaultRetentionDuration *string `json:"defaultRetentionDuration,omitempty" tf:"default_retention_duration,omitempty"` + + // The ID of the Backup Policy Disk. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more retention_rule blocks as defined below. Changing this forces a new Backup Policy Disk to be created. + RetentionRule []RetentionRuleObservation `json:"retentionRule,omitempty" tf:"retention_rule,omitempty"` + + // Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new Backup Policy Disk to be created. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` + + // The ID of the Backup Vault within which the Backup Policy Disk should exist. Changing this forces a new Backup Policy Disk to be created. + VaultID *string `json:"vaultId,omitempty" tf:"vault_id,omitempty"` +} + +type BackupPolicyDiskParameters struct { + + // Specifies a list of repeating time interval. It should follow ISO 8601 repeating time interval . Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + BackupRepeatingTimeIntervals []*string `json:"backupRepeatingTimeIntervals,omitempty" tf:"backup_repeating_time_intervals,omitempty"` + + // The duration of default retention rule. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + DefaultRetentionDuration *string `json:"defaultRetentionDuration,omitempty" tf:"default_retention_duration,omitempty"` + + // One or more retention_rule blocks as defined below. Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + RetentionRule []RetentionRuleParameters `json:"retentionRule,omitempty" tf:"retention_rule,omitempty"` + + // Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` + + // The ID of the Backup Vault within which the Backup Policy Disk should exist. Changing this forces a new Backup Policy Disk to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupVault + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VaultID *string `json:"vaultId,omitempty" tf:"vault_id,omitempty"` + + // Reference to a BackupVault in dataprotection to populate vaultId. + // +kubebuilder:validation:Optional + VaultIDRef *v1.Reference `json:"vaultIdRef,omitempty" tf:"-"` + + // Selector for a BackupVault in dataprotection to populate vaultId. + // +kubebuilder:validation:Optional + VaultIDSelector *v1.Selector `json:"vaultIdSelector,omitempty" tf:"-"` +} + +type CriteriaInitParameters struct { + + // Possible values are FirstOfDay and FirstOfWeek. Changing this forces a new Backup Policy Disk to be created. + AbsoluteCriteria *string `json:"absoluteCriteria,omitempty" tf:"absolute_criteria,omitempty"` +} + +type CriteriaObservation struct { + + // Possible values are FirstOfDay and FirstOfWeek. Changing this forces a new Backup Policy Disk to be created. + AbsoluteCriteria *string `json:"absoluteCriteria,omitempty" tf:"absolute_criteria,omitempty"` +} + +type CriteriaParameters struct { + + // Possible values are FirstOfDay and FirstOfWeek. Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + AbsoluteCriteria *string `json:"absoluteCriteria,omitempty" tf:"absolute_criteria,omitempty"` +} + +type RetentionRuleInitParameters struct { + + // A criteria block as defined below. Changing this forces a new Backup Policy Disk to be created. + Criteria *CriteriaInitParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // Duration of deletion after given timespan. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy Disk to be created. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The name which should be used for this retention rule. Changing this forces a new Backup Policy Disk to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Retention Tag priority. Changing this forces a new Backup Policy Disk to be created. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type RetentionRuleObservation struct { + + // A criteria block as defined below. Changing this forces a new Backup Policy Disk to be created. + Criteria *CriteriaObservation `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // Duration of deletion after given timespan. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy Disk to be created. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The name which should be used for this retention rule. Changing this forces a new Backup Policy Disk to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Retention Tag priority. Changing this forces a new Backup Policy Disk to be created. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type RetentionRuleParameters struct { + + // A criteria block as defined below. Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + Criteria *CriteriaParameters `json:"criteria" tf:"criteria,omitempty"` + + // Duration of deletion after given timespan. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // The name which should be used for this retention rule. Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Retention Tag priority. Changing this forces a new Backup Policy Disk to be created. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` +} + +// BackupPolicyDiskSpec defines the desired state of BackupPolicyDisk +type BackupPolicyDiskSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BackupPolicyDiskParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BackupPolicyDiskInitParameters `json:"initProvider,omitempty"` +} + +// BackupPolicyDiskStatus defines the observed state of BackupPolicyDisk. +type BackupPolicyDiskStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BackupPolicyDiskObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BackupPolicyDisk is the Schema for the BackupPolicyDisks API. Manages a Backup Policy Disk. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BackupPolicyDisk struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backupRepeatingTimeIntervals) || (has(self.initProvider) && has(self.initProvider.backupRepeatingTimeIntervals))",message="spec.forProvider.backupRepeatingTimeIntervals is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultRetentionDuration) || (has(self.initProvider) && has(self.initProvider.defaultRetentionDuration))",message="spec.forProvider.defaultRetentionDuration is a required parameter" + Spec BackupPolicyDiskSpec `json:"spec"` + Status BackupPolicyDiskStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupPolicyDiskList contains a list of BackupPolicyDisks +type BackupPolicyDiskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupPolicyDisk `json:"items"` +} + +// Repository type metadata. +var ( + BackupPolicyDisk_Kind = "BackupPolicyDisk" + BackupPolicyDisk_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BackupPolicyDisk_Kind}.String() + BackupPolicyDisk_KindAPIVersion = BackupPolicyDisk_Kind + "." + CRDGroupVersion.String() + BackupPolicyDisk_GroupVersionKind = CRDGroupVersion.WithKind(BackupPolicyDisk_Kind) +) + +func init() { + SchemeBuilder.Register(&BackupPolicyDisk{}, &BackupPolicyDiskList{}) +} diff --git a/apis/dataprotection/v1beta2/zz_backuppolicypostgresql_terraformed.go b/apis/dataprotection/v1beta2/zz_backuppolicypostgresql_terraformed.go new file mode 100755 index 000000000..fbeae1287 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_backuppolicypostgresql_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BackupPolicyPostgreSQL +func (mg *BackupPolicyPostgreSQL) GetTerraformResourceType() string { + return "azurerm_data_protection_backup_policy_postgresql" +} + +// GetConnectionDetailsMapping for this BackupPolicyPostgreSQL +func (tr *BackupPolicyPostgreSQL) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BackupPolicyPostgreSQL +func (tr *BackupPolicyPostgreSQL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BackupPolicyPostgreSQL +func (tr *BackupPolicyPostgreSQL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BackupPolicyPostgreSQL +func (tr *BackupPolicyPostgreSQL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BackupPolicyPostgreSQL +func (tr *BackupPolicyPostgreSQL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BackupPolicyPostgreSQL +func (tr *BackupPolicyPostgreSQL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BackupPolicyPostgreSQL +func (tr *BackupPolicyPostgreSQL) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BackupPolicyPostgreSQL +func (tr *BackupPolicyPostgreSQL) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BackupPolicyPostgreSQL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BackupPolicyPostgreSQL) LateInitialize(attrs []byte) (bool, error) { + params := &BackupPolicyPostgreSQLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BackupPolicyPostgreSQL) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dataprotection/v1beta2/zz_backuppolicypostgresql_types.go b/apis/dataprotection/v1beta2/zz_backuppolicypostgresql_types.go new file mode 100755 index 000000000..2adac6606 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_backuppolicypostgresql_types.go @@ -0,0 +1,280 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackupPolicyPostgreSQLInitParameters struct { + + // Specifies a list of repeating time interval. It supports weekly back. It should follow ISO 8601 repeating time interval. Changing this forces a new Backup Policy PostgreSQL to be created. + BackupRepeatingTimeIntervals []*string `json:"backupRepeatingTimeIntervals,omitempty" tf:"backup_repeating_time_intervals,omitempty"` + + // The duration of default retention rule. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy PostgreSQL to be created. + DefaultRetentionDuration *string `json:"defaultRetentionDuration,omitempty" tf:"default_retention_duration,omitempty"` + + // One or more retention_rule blocks as defined below. Changing this forces a new Backup Policy PostgreSQL to be created. + RetentionRule []BackupPolicyPostgreSQLRetentionRuleInitParameters `json:"retentionRule,omitempty" tf:"retention_rule,omitempty"` + + // Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new Backup Policy PostgreSQL to be created. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type BackupPolicyPostgreSQLObservation struct { + + // Specifies a list of repeating time interval. It supports weekly back. It should follow ISO 8601 repeating time interval. Changing this forces a new Backup Policy PostgreSQL to be created. + BackupRepeatingTimeIntervals []*string `json:"backupRepeatingTimeIntervals,omitempty" tf:"backup_repeating_time_intervals,omitempty"` + + // The duration of default retention rule. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy PostgreSQL to be created. + DefaultRetentionDuration *string `json:"defaultRetentionDuration,omitempty" tf:"default_retention_duration,omitempty"` + + // The ID of the Backup Policy PostgreSQL. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Resource Group where the Backup Policy PostgreSQL should exist. Changing this forces a new Backup Policy PostgreSQL to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // One or more retention_rule blocks as defined below. Changing this forces a new Backup Policy PostgreSQL to be created. + RetentionRule []BackupPolicyPostgreSQLRetentionRuleObservation `json:"retentionRule,omitempty" tf:"retention_rule,omitempty"` + + // Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new Backup Policy PostgreSQL to be created. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` + + // The name of the Backup Vault where the Backup Policy PostgreSQL should exist. Changing this forces a new Backup Policy PostgreSQL to be created. + VaultName *string `json:"vaultName,omitempty" tf:"vault_name,omitempty"` +} + +type BackupPolicyPostgreSQLParameters struct { + + // Specifies a list of repeating time interval. It supports weekly back. It should follow ISO 8601 repeating time interval. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + BackupRepeatingTimeIntervals []*string `json:"backupRepeatingTimeIntervals,omitempty" tf:"backup_repeating_time_intervals,omitempty"` + + // The duration of default retention rule. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + DefaultRetentionDuration *string `json:"defaultRetentionDuration,omitempty" tf:"default_retention_duration,omitempty"` + + // The name of the Resource Group where the Backup Policy PostgreSQL should exist. Changing this forces a new Backup Policy PostgreSQL to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // One or more retention_rule blocks as defined below. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + RetentionRule []BackupPolicyPostgreSQLRetentionRuleParameters `json:"retentionRule,omitempty" tf:"retention_rule,omitempty"` + + // Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` + + // The name of the Backup Vault where the Backup Policy PostgreSQL should exist. Changing this forces a new Backup Policy PostgreSQL to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dataprotection/v1beta2.BackupVault + // +kubebuilder:validation:Optional + VaultName *string `json:"vaultName,omitempty" tf:"vault_name,omitempty"` + + // Reference to a BackupVault in dataprotection to populate vaultName. + // +kubebuilder:validation:Optional + VaultNameRef *v1.Reference `json:"vaultNameRef,omitempty" tf:"-"` + + // Selector for a BackupVault in dataprotection to populate vaultName. + // +kubebuilder:validation:Optional + VaultNameSelector *v1.Selector `json:"vaultNameSelector,omitempty" tf:"-"` +} + +type BackupPolicyPostgreSQLRetentionRuleInitParameters struct { + + // A criteria block as defined below. Changing this forces a new Backup Policy PostgreSQL to be created. + Criteria *RetentionRuleCriteriaInitParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // Duration after which the backup is deleted. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy PostgreSQL to be created. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The name which should be used for this retention rule. Changing this forces a new Backup Policy PostgreSQL to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the priority of the rule. The priority number must be unique for each rule. The lower the priority number, the higher the priority of the rule. Changing this forces a new Backup Policy PostgreSQL to be created. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type BackupPolicyPostgreSQLRetentionRuleObservation struct { + + // A criteria block as defined below. Changing this forces a new Backup Policy PostgreSQL to be created. + Criteria *RetentionRuleCriteriaObservation `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // Duration after which the backup is deleted. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy PostgreSQL to be created. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The name which should be used for this retention rule. Changing this forces a new Backup Policy PostgreSQL to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the priority of the rule. The priority number must be unique for each rule. The lower the priority number, the higher the priority of the rule. Changing this forces a new Backup Policy PostgreSQL to be created. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type BackupPolicyPostgreSQLRetentionRuleParameters struct { + + // A criteria block as defined below. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + Criteria *RetentionRuleCriteriaParameters `json:"criteria" tf:"criteria,omitempty"` + + // Duration after which the backup is deleted. It should follow ISO 8601 duration format. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // The name which should be used for this retention rule. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the priority of the rule. The priority number must be unique for each rule. The lower the priority number, the higher the priority of the rule. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` +} + +type RetentionRuleCriteriaInitParameters struct { + + // Possible values are AllBackup, FirstOfDay, FirstOfWeek, FirstOfMonth and FirstOfYear. These values mean the first successful backup of the day/week/month/year. Changing this forces a new Backup Policy PostgreSQL to be created. + AbsoluteCriteria *string `json:"absoluteCriteria,omitempty" tf:"absolute_criteria,omitempty"` + + // Possible values are Monday, Tuesday, Thursday, Friday, Saturday and Sunday. Changing this forces a new Backup Policy PostgreSQL to be created. + // +listType=set + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Possible values are January, February, March, April, May, June, July, August, September, October, November and December. Changing this forces a new Backup Policy PostgreSQL to be created. + // +listType=set + MonthsOfYear []*string `json:"monthsOfYear,omitempty" tf:"months_of_year,omitempty"` + + // Specifies a list of backup times for backup in the RFC3339 format. Changing this forces a new Backup Policy PostgreSQL to be created. + // +listType=set + ScheduledBackupTimes []*string `json:"scheduledBackupTimes,omitempty" tf:"scheduled_backup_times,omitempty"` + + // Possible values are First, Second, Third, Fourth and Last. Changing this forces a new Backup Policy PostgreSQL to be created. + // +listType=set + WeeksOfMonth []*string `json:"weeksOfMonth,omitempty" tf:"weeks_of_month,omitempty"` +} + +type RetentionRuleCriteriaObservation struct { + + // Possible values are AllBackup, FirstOfDay, FirstOfWeek, FirstOfMonth and FirstOfYear. These values mean the first successful backup of the day/week/month/year. Changing this forces a new Backup Policy PostgreSQL to be created. + AbsoluteCriteria *string `json:"absoluteCriteria,omitempty" tf:"absolute_criteria,omitempty"` + + // Possible values are Monday, Tuesday, Thursday, Friday, Saturday and Sunday. Changing this forces a new Backup Policy PostgreSQL to be created. + // +listType=set + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Possible values are January, February, March, April, May, June, July, August, September, October, November and December. Changing this forces a new Backup Policy PostgreSQL to be created. + // +listType=set + MonthsOfYear []*string `json:"monthsOfYear,omitempty" tf:"months_of_year,omitempty"` + + // Specifies a list of backup times for backup in the RFC3339 format. Changing this forces a new Backup Policy PostgreSQL to be created. + // +listType=set + ScheduledBackupTimes []*string `json:"scheduledBackupTimes,omitempty" tf:"scheduled_backup_times,omitempty"` + + // Possible values are First, Second, Third, Fourth and Last. Changing this forces a new Backup Policy PostgreSQL to be created. + // +listType=set + WeeksOfMonth []*string `json:"weeksOfMonth,omitempty" tf:"weeks_of_month,omitempty"` +} + +type RetentionRuleCriteriaParameters struct { + + // Possible values are AllBackup, FirstOfDay, FirstOfWeek, FirstOfMonth and FirstOfYear. These values mean the first successful backup of the day/week/month/year. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + AbsoluteCriteria *string `json:"absoluteCriteria,omitempty" tf:"absolute_criteria,omitempty"` + + // Possible values are Monday, Tuesday, Thursday, Friday, Saturday and Sunday. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + // +listType=set + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Possible values are January, February, March, April, May, June, July, August, September, October, November and December. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + // +listType=set + MonthsOfYear []*string `json:"monthsOfYear,omitempty" tf:"months_of_year,omitempty"` + + // Specifies a list of backup times for backup in the RFC3339 format. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + // +listType=set + ScheduledBackupTimes []*string `json:"scheduledBackupTimes,omitempty" tf:"scheduled_backup_times,omitempty"` + + // Possible values are First, Second, Third, Fourth and Last. Changing this forces a new Backup Policy PostgreSQL to be created. + // +kubebuilder:validation:Optional + // +listType=set + WeeksOfMonth []*string `json:"weeksOfMonth,omitempty" tf:"weeks_of_month,omitempty"` +} + +// BackupPolicyPostgreSQLSpec defines the desired state of BackupPolicyPostgreSQL +type BackupPolicyPostgreSQLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BackupPolicyPostgreSQLParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BackupPolicyPostgreSQLInitParameters `json:"initProvider,omitempty"` +} + +// BackupPolicyPostgreSQLStatus defines the observed state of BackupPolicyPostgreSQL. +type BackupPolicyPostgreSQLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BackupPolicyPostgreSQLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BackupPolicyPostgreSQL is the Schema for the BackupPolicyPostgreSQLs API. Manages a Backup Policy to back up PostgreSQL. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BackupPolicyPostgreSQL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backupRepeatingTimeIntervals) || (has(self.initProvider) && has(self.initProvider.backupRepeatingTimeIntervals))",message="spec.forProvider.backupRepeatingTimeIntervals is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultRetentionDuration) || (has(self.initProvider) && has(self.initProvider.defaultRetentionDuration))",message="spec.forProvider.defaultRetentionDuration is a required parameter" + Spec BackupPolicyPostgreSQLSpec `json:"spec"` + Status BackupPolicyPostgreSQLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupPolicyPostgreSQLList contains a list of BackupPolicyPostgreSQLs +type BackupPolicyPostgreSQLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupPolicyPostgreSQL `json:"items"` +} + +// Repository type metadata. +var ( + BackupPolicyPostgreSQL_Kind = "BackupPolicyPostgreSQL" + BackupPolicyPostgreSQL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BackupPolicyPostgreSQL_Kind}.String() + BackupPolicyPostgreSQL_KindAPIVersion = BackupPolicyPostgreSQL_Kind + "." + CRDGroupVersion.String() + BackupPolicyPostgreSQL_GroupVersionKind = CRDGroupVersion.WithKind(BackupPolicyPostgreSQL_Kind) +) + +func init() { + SchemeBuilder.Register(&BackupPolicyPostgreSQL{}, &BackupPolicyPostgreSQLList{}) +} diff --git a/apis/dataprotection/v1beta2/zz_backupvault_terraformed.go b/apis/dataprotection/v1beta2/zz_backupvault_terraformed.go new file mode 100755 index 000000000..373ac1eee --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_backupvault_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BackupVault +func (mg *BackupVault) GetTerraformResourceType() string { + return "azurerm_data_protection_backup_vault" +} + +// GetConnectionDetailsMapping for this BackupVault +func (tr *BackupVault) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BackupVault +func (tr *BackupVault) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BackupVault +func (tr *BackupVault) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BackupVault +func (tr *BackupVault) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BackupVault +func (tr *BackupVault) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BackupVault +func (tr *BackupVault) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BackupVault +func (tr *BackupVault) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BackupVault +func (tr *BackupVault) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BackupVault using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BackupVault) LateInitialize(attrs []byte) (bool, error) { + params := &BackupVaultParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BackupVault) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dataprotection/v1beta2/zz_backupvault_types.go b/apis/dataprotection/v1beta2/zz_backupvault_types.go new file mode 100755 index 000000000..1964759d8 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_backupvault_types.go @@ -0,0 +1,203 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackupVaultInitParameters struct { + + // Specifies the type of the data store. Possible values are ArchiveStore, OperationalStore, SnapshotStore and VaultStore. Changing this forces a new resource to be created. + DatastoreType *string `json:"datastoreType,omitempty" tf:"datastore_type,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Backup Vault should exist. Changing this forces a new Backup Vault to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the backup storage redundancy. Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. Changing this forces a new Backup Vault to be created. + Redundancy *string `json:"redundancy,omitempty" tf:"redundancy,omitempty"` + + // The soft delete retention duration for this Backup Vault. Possible values are between 14 and 180. Defaults to 14. + RetentionDurationInDays *float64 `json:"retentionDurationInDays,omitempty" tf:"retention_duration_in_days,omitempty"` + + // The state of soft delete for this Backup Vault. Possible values are AlwaysOn, Off and On. Defaults to On. + SoftDelete *string `json:"softDelete,omitempty" tf:"soft_delete,omitempty"` + + // A mapping of tags which should be assigned to the Backup Vault. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BackupVaultObservation struct { + + // Specifies the type of the data store. Possible values are ArchiveStore, OperationalStore, SnapshotStore and VaultStore. Changing this forces a new resource to be created. + DatastoreType *string `json:"datastoreType,omitempty" tf:"datastore_type,omitempty"` + + // The ID of the Backup Vault. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Backup Vault should exist. Changing this forces a new Backup Vault to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the backup storage redundancy. Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. Changing this forces a new Backup Vault to be created. + Redundancy *string `json:"redundancy,omitempty" tf:"redundancy,omitempty"` + + // The name of the Resource Group where the Backup Vault should exist. Changing this forces a new Backup Vault to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The soft delete retention duration for this Backup Vault. Possible values are between 14 and 180. Defaults to 14. + RetentionDurationInDays *float64 `json:"retentionDurationInDays,omitempty" tf:"retention_duration_in_days,omitempty"` + + // The state of soft delete for this Backup Vault. Possible values are AlwaysOn, Off and On. Defaults to On. + SoftDelete *string `json:"softDelete,omitempty" tf:"soft_delete,omitempty"` + + // A mapping of tags which should be assigned to the Backup Vault. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type BackupVaultParameters struct { + + // Specifies the type of the data store. Possible values are ArchiveStore, OperationalStore, SnapshotStore and VaultStore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatastoreType *string `json:"datastoreType,omitempty" tf:"datastore_type,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Backup Vault should exist. Changing this forces a new Backup Vault to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the backup storage redundancy. Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. Changing this forces a new Backup Vault to be created. + // +kubebuilder:validation:Optional + Redundancy *string `json:"redundancy,omitempty" tf:"redundancy,omitempty"` + + // The name of the Resource Group where the Backup Vault should exist. Changing this forces a new Backup Vault to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The soft delete retention duration for this Backup Vault. Possible values are between 14 and 180. Defaults to 14. + // +kubebuilder:validation:Optional + RetentionDurationInDays *float64 `json:"retentionDurationInDays,omitempty" tf:"retention_duration_in_days,omitempty"` + + // The state of soft delete for this Backup Vault. Possible values are AlwaysOn, Off and On. Defaults to On. + // +kubebuilder:validation:Optional + SoftDelete *string `json:"softDelete,omitempty" tf:"soft_delete,omitempty"` + + // A mapping of tags which should be assigned to the Backup Vault. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this Backup Vault. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // The Principal ID for the Service Principal associated with the Identity of this Backup Vault. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Identity of this Backup Vault. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Backup Vault. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this Backup Vault. The only possible value is SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// BackupVaultSpec defines the desired state of BackupVault +type BackupVaultSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BackupVaultParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BackupVaultInitParameters `json:"initProvider,omitempty"` +} + +// BackupVaultStatus defines the observed state of BackupVault. +type BackupVaultStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BackupVaultObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BackupVault is the Schema for the BackupVaults API. Manages a Backup Vault. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BackupVault struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.datastoreType) || (has(self.initProvider) && has(self.initProvider.datastoreType))",message="spec.forProvider.datastoreType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.redundancy) || (has(self.initProvider) && has(self.initProvider.redundancy))",message="spec.forProvider.redundancy is a required parameter" + Spec BackupVaultSpec `json:"spec"` + Status BackupVaultStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupVaultList contains a list of BackupVaults +type BackupVaultList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupVault `json:"items"` +} + +// Repository type metadata. +var ( + BackupVault_Kind = "BackupVault" + BackupVault_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BackupVault_Kind}.String() + BackupVault_KindAPIVersion = BackupVault_Kind + "." + CRDGroupVersion.String() + BackupVault_GroupVersionKind = CRDGroupVersion.WithKind(BackupVault_Kind) +) + +func init() { + SchemeBuilder.Register(&BackupVault{}, &BackupVaultList{}) +} diff --git a/apis/dataprotection/v1beta2/zz_generated.conversion_hubs.go b/apis/dataprotection/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..5726d8bc1 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *BackupPolicyDisk) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BackupPolicyPostgreSQL) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BackupVault) Hub() {} diff --git a/apis/dataprotection/v1beta2/zz_generated.deepcopy.go b/apis/dataprotection/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..c3f2daae7 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1364 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyDisk) DeepCopyInto(out *BackupPolicyDisk) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyDisk. +func (in *BackupPolicyDisk) DeepCopy() *BackupPolicyDisk { + if in == nil { + return nil + } + out := new(BackupPolicyDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyDisk) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyDiskInitParameters) DeepCopyInto(out *BackupPolicyDiskInitParameters) { + *out = *in + if in.BackupRepeatingTimeIntervals != nil { + in, out := &in.BackupRepeatingTimeIntervals, &out.BackupRepeatingTimeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultRetentionDuration != nil { + in, out := &in.DefaultRetentionDuration, &out.DefaultRetentionDuration + *out = new(string) + **out = **in + } + if in.RetentionRule != nil { + in, out := &in.RetentionRule, &out.RetentionRule + *out = make([]RetentionRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyDiskInitParameters. +func (in *BackupPolicyDiskInitParameters) DeepCopy() *BackupPolicyDiskInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyDiskInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyDiskList) DeepCopyInto(out *BackupPolicyDiskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupPolicyDisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyDiskList. +func (in *BackupPolicyDiskList) DeepCopy() *BackupPolicyDiskList { + if in == nil { + return nil + } + out := new(BackupPolicyDiskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyDiskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyDiskObservation) DeepCopyInto(out *BackupPolicyDiskObservation) { + *out = *in + if in.BackupRepeatingTimeIntervals != nil { + in, out := &in.BackupRepeatingTimeIntervals, &out.BackupRepeatingTimeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultRetentionDuration != nil { + in, out := &in.DefaultRetentionDuration, &out.DefaultRetentionDuration + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RetentionRule != nil { + in, out := &in.RetentionRule, &out.RetentionRule + *out = make([]RetentionRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } + if in.VaultID != nil { + in, out := &in.VaultID, &out.VaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyDiskObservation. +func (in *BackupPolicyDiskObservation) DeepCopy() *BackupPolicyDiskObservation { + if in == nil { + return nil + } + out := new(BackupPolicyDiskObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyDiskParameters) DeepCopyInto(out *BackupPolicyDiskParameters) { + *out = *in + if in.BackupRepeatingTimeIntervals != nil { + in, out := &in.BackupRepeatingTimeIntervals, &out.BackupRepeatingTimeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultRetentionDuration != nil { + in, out := &in.DefaultRetentionDuration, &out.DefaultRetentionDuration + *out = new(string) + **out = **in + } + if in.RetentionRule != nil { + in, out := &in.RetentionRule, &out.RetentionRule + *out = make([]RetentionRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } + if in.VaultID != nil { + in, out := &in.VaultID, &out.VaultID + *out = new(string) + **out = **in + } + if in.VaultIDRef != nil { + in, out := &in.VaultIDRef, &out.VaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VaultIDSelector != nil { + in, out := &in.VaultIDSelector, &out.VaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyDiskParameters. +func (in *BackupPolicyDiskParameters) DeepCopy() *BackupPolicyDiskParameters { + if in == nil { + return nil + } + out := new(BackupPolicyDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyDiskSpec) DeepCopyInto(out *BackupPolicyDiskSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyDiskSpec. +func (in *BackupPolicyDiskSpec) DeepCopy() *BackupPolicyDiskSpec { + if in == nil { + return nil + } + out := new(BackupPolicyDiskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyDiskStatus) DeepCopyInto(out *BackupPolicyDiskStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyDiskStatus. +func (in *BackupPolicyDiskStatus) DeepCopy() *BackupPolicyDiskStatus { + if in == nil { + return nil + } + out := new(BackupPolicyDiskStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQL) DeepCopyInto(out *BackupPolicyPostgreSQL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQL. +func (in *BackupPolicyPostgreSQL) DeepCopy() *BackupPolicyPostgreSQL { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyPostgreSQL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLInitParameters) DeepCopyInto(out *BackupPolicyPostgreSQLInitParameters) { + *out = *in + if in.BackupRepeatingTimeIntervals != nil { + in, out := &in.BackupRepeatingTimeIntervals, &out.BackupRepeatingTimeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultRetentionDuration != nil { + in, out := &in.DefaultRetentionDuration, &out.DefaultRetentionDuration + *out = new(string) + **out = **in + } + if in.RetentionRule != nil { + in, out := &in.RetentionRule, &out.RetentionRule + *out = make([]BackupPolicyPostgreSQLRetentionRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLInitParameters. +func (in *BackupPolicyPostgreSQLInitParameters) DeepCopy() *BackupPolicyPostgreSQLInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLList) DeepCopyInto(out *BackupPolicyPostgreSQLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupPolicyPostgreSQL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLList. +func (in *BackupPolicyPostgreSQLList) DeepCopy() *BackupPolicyPostgreSQLList { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyPostgreSQLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLObservation) DeepCopyInto(out *BackupPolicyPostgreSQLObservation) { + *out = *in + if in.BackupRepeatingTimeIntervals != nil { + in, out := &in.BackupRepeatingTimeIntervals, &out.BackupRepeatingTimeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultRetentionDuration != nil { + in, out := &in.DefaultRetentionDuration, &out.DefaultRetentionDuration + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionRule != nil { + in, out := &in.RetentionRule, &out.RetentionRule + *out = make([]BackupPolicyPostgreSQLRetentionRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } + if in.VaultName != nil { + in, out := &in.VaultName, &out.VaultName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLObservation. +func (in *BackupPolicyPostgreSQLObservation) DeepCopy() *BackupPolicyPostgreSQLObservation { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLParameters) DeepCopyInto(out *BackupPolicyPostgreSQLParameters) { + *out = *in + if in.BackupRepeatingTimeIntervals != nil { + in, out := &in.BackupRepeatingTimeIntervals, &out.BackupRepeatingTimeIntervals + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultRetentionDuration != nil { + in, out := &in.DefaultRetentionDuration, &out.DefaultRetentionDuration + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionRule != nil { + in, out := &in.RetentionRule, &out.RetentionRule + *out = make([]BackupPolicyPostgreSQLRetentionRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } + if in.VaultName != nil { + in, out := &in.VaultName, &out.VaultName + *out = new(string) + **out = **in + } + if in.VaultNameRef != nil { + in, out := &in.VaultNameRef, &out.VaultNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VaultNameSelector != nil { + in, out := &in.VaultNameSelector, &out.VaultNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLParameters. +func (in *BackupPolicyPostgreSQLParameters) DeepCopy() *BackupPolicyPostgreSQLParameters { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLRetentionRuleInitParameters) DeepCopyInto(out *BackupPolicyPostgreSQLRetentionRuleInitParameters) { + *out = *in + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(RetentionRuleCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLRetentionRuleInitParameters. +func (in *BackupPolicyPostgreSQLRetentionRuleInitParameters) DeepCopy() *BackupPolicyPostgreSQLRetentionRuleInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLRetentionRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLRetentionRuleObservation) DeepCopyInto(out *BackupPolicyPostgreSQLRetentionRuleObservation) { + *out = *in + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(RetentionRuleCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLRetentionRuleObservation. +func (in *BackupPolicyPostgreSQLRetentionRuleObservation) DeepCopy() *BackupPolicyPostgreSQLRetentionRuleObservation { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLRetentionRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLRetentionRuleParameters) DeepCopyInto(out *BackupPolicyPostgreSQLRetentionRuleParameters) { + *out = *in + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(RetentionRuleCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLRetentionRuleParameters. +func (in *BackupPolicyPostgreSQLRetentionRuleParameters) DeepCopy() *BackupPolicyPostgreSQLRetentionRuleParameters { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLRetentionRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLSpec) DeepCopyInto(out *BackupPolicyPostgreSQLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLSpec. +func (in *BackupPolicyPostgreSQLSpec) DeepCopy() *BackupPolicyPostgreSQLSpec { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyPostgreSQLStatus) DeepCopyInto(out *BackupPolicyPostgreSQLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyPostgreSQLStatus. +func (in *BackupPolicyPostgreSQLStatus) DeepCopy() *BackupPolicyPostgreSQLStatus { + if in == nil { + return nil + } + out := new(BackupPolicyPostgreSQLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupVault) DeepCopyInto(out *BackupVault) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupVault. +func (in *BackupVault) DeepCopy() *BackupVault { + if in == nil { + return nil + } + out := new(BackupVault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupVault) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupVaultInitParameters) DeepCopyInto(out *BackupVaultInitParameters) { + *out = *in + if in.DatastoreType != nil { + in, out := &in.DatastoreType, &out.DatastoreType + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Redundancy != nil { + in, out := &in.Redundancy, &out.Redundancy + *out = new(string) + **out = **in + } + if in.RetentionDurationInDays != nil { + in, out := &in.RetentionDurationInDays, &out.RetentionDurationInDays + *out = new(float64) + **out = **in + } + if in.SoftDelete != nil { + in, out := &in.SoftDelete, &out.SoftDelete + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupVaultInitParameters. +func (in *BackupVaultInitParameters) DeepCopy() *BackupVaultInitParameters { + if in == nil { + return nil + } + out := new(BackupVaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupVaultList) DeepCopyInto(out *BackupVaultList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupVault, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupVaultList. +func (in *BackupVaultList) DeepCopy() *BackupVaultList { + if in == nil { + return nil + } + out := new(BackupVaultList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupVaultList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupVaultObservation) DeepCopyInto(out *BackupVaultObservation) { + *out = *in + if in.DatastoreType != nil { + in, out := &in.DatastoreType, &out.DatastoreType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Redundancy != nil { + in, out := &in.Redundancy, &out.Redundancy + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionDurationInDays != nil { + in, out := &in.RetentionDurationInDays, &out.RetentionDurationInDays + *out = new(float64) + **out = **in + } + if in.SoftDelete != nil { + in, out := &in.SoftDelete, &out.SoftDelete + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupVaultObservation. +func (in *BackupVaultObservation) DeepCopy() *BackupVaultObservation { + if in == nil { + return nil + } + out := new(BackupVaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupVaultParameters) DeepCopyInto(out *BackupVaultParameters) { + *out = *in + if in.DatastoreType != nil { + in, out := &in.DatastoreType, &out.DatastoreType + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Redundancy != nil { + in, out := &in.Redundancy, &out.Redundancy + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionDurationInDays != nil { + in, out := &in.RetentionDurationInDays, &out.RetentionDurationInDays + *out = new(float64) + **out = **in + } + if in.SoftDelete != nil { + in, out := &in.SoftDelete, &out.SoftDelete + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupVaultParameters. +func (in *BackupVaultParameters) DeepCopy() *BackupVaultParameters { + if in == nil { + return nil + } + out := new(BackupVaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupVaultSpec) DeepCopyInto(out *BackupVaultSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupVaultSpec. +func (in *BackupVaultSpec) DeepCopy() *BackupVaultSpec { + if in == nil { + return nil + } + out := new(BackupVaultSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupVaultStatus) DeepCopyInto(out *BackupVaultStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupVaultStatus. +func (in *BackupVaultStatus) DeepCopy() *BackupVaultStatus { + if in == nil { + return nil + } + out := new(BackupVaultStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaInitParameters) DeepCopyInto(out *CriteriaInitParameters) { + *out = *in + if in.AbsoluteCriteria != nil { + in, out := &in.AbsoluteCriteria, &out.AbsoluteCriteria + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaInitParameters. +func (in *CriteriaInitParameters) DeepCopy() *CriteriaInitParameters { + if in == nil { + return nil + } + out := new(CriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaObservation) DeepCopyInto(out *CriteriaObservation) { + *out = *in + if in.AbsoluteCriteria != nil { + in, out := &in.AbsoluteCriteria, &out.AbsoluteCriteria + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaObservation. +func (in *CriteriaObservation) DeepCopy() *CriteriaObservation { + if in == nil { + return nil + } + out := new(CriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaParameters) DeepCopyInto(out *CriteriaParameters) { + *out = *in + if in.AbsoluteCriteria != nil { + in, out := &in.AbsoluteCriteria, &out.AbsoluteCriteria + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaParameters. +func (in *CriteriaParameters) DeepCopy() *CriteriaParameters { + if in == nil { + return nil + } + out := new(CriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionRuleCriteriaInitParameters) DeepCopyInto(out *RetentionRuleCriteriaInitParameters) { + *out = *in + if in.AbsoluteCriteria != nil { + in, out := &in.AbsoluteCriteria, &out.AbsoluteCriteria + *out = new(string) + **out = **in + } + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MonthsOfYear != nil { + in, out := &in.MonthsOfYear, &out.MonthsOfYear + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScheduledBackupTimes != nil { + in, out := &in.ScheduledBackupTimes, &out.ScheduledBackupTimes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WeeksOfMonth != nil { + in, out := &in.WeeksOfMonth, &out.WeeksOfMonth + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionRuleCriteriaInitParameters. +func (in *RetentionRuleCriteriaInitParameters) DeepCopy() *RetentionRuleCriteriaInitParameters { + if in == nil { + return nil + } + out := new(RetentionRuleCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionRuleCriteriaObservation) DeepCopyInto(out *RetentionRuleCriteriaObservation) { + *out = *in + if in.AbsoluteCriteria != nil { + in, out := &in.AbsoluteCriteria, &out.AbsoluteCriteria + *out = new(string) + **out = **in + } + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MonthsOfYear != nil { + in, out := &in.MonthsOfYear, &out.MonthsOfYear + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScheduledBackupTimes != nil { + in, out := &in.ScheduledBackupTimes, &out.ScheduledBackupTimes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WeeksOfMonth != nil { + in, out := &in.WeeksOfMonth, &out.WeeksOfMonth + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionRuleCriteriaObservation. +func (in *RetentionRuleCriteriaObservation) DeepCopy() *RetentionRuleCriteriaObservation { + if in == nil { + return nil + } + out := new(RetentionRuleCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionRuleCriteriaParameters) DeepCopyInto(out *RetentionRuleCriteriaParameters) { + *out = *in + if in.AbsoluteCriteria != nil { + in, out := &in.AbsoluteCriteria, &out.AbsoluteCriteria + *out = new(string) + **out = **in + } + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MonthsOfYear != nil { + in, out := &in.MonthsOfYear, &out.MonthsOfYear + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScheduledBackupTimes != nil { + in, out := &in.ScheduledBackupTimes, &out.ScheduledBackupTimes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WeeksOfMonth != nil { + in, out := &in.WeeksOfMonth, &out.WeeksOfMonth + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionRuleCriteriaParameters. +func (in *RetentionRuleCriteriaParameters) DeepCopy() *RetentionRuleCriteriaParameters { + if in == nil { + return nil + } + out := new(RetentionRuleCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionRuleInitParameters) DeepCopyInto(out *RetentionRuleInitParameters) { + *out = *in + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(CriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionRuleInitParameters. +func (in *RetentionRuleInitParameters) DeepCopy() *RetentionRuleInitParameters { + if in == nil { + return nil + } + out := new(RetentionRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionRuleObservation) DeepCopyInto(out *RetentionRuleObservation) { + *out = *in + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(CriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionRuleObservation. +func (in *RetentionRuleObservation) DeepCopy() *RetentionRuleObservation { + if in == nil { + return nil + } + out := new(RetentionRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionRuleParameters) DeepCopyInto(out *RetentionRuleParameters) { + *out = *in + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(CriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionRuleParameters. +func (in *RetentionRuleParameters) DeepCopy() *RetentionRuleParameters { + if in == nil { + return nil + } + out := new(RetentionRuleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dataprotection/v1beta2/zz_generated.managed.go b/apis/dataprotection/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..d90c2de0e --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BackupPolicyDisk. +func (mg *BackupPolicyDisk) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BackupVault. +func (mg *BackupVault) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BackupVault. +func (mg *BackupVault) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BackupVault. +func (mg *BackupVault) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BackupVault. +func (mg *BackupVault) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BackupVault. +func (mg *BackupVault) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BackupVault. +func (mg *BackupVault) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BackupVault. +func (mg *BackupVault) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BackupVault. +func (mg *BackupVault) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BackupVault. +func (mg *BackupVault) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BackupVault. +func (mg *BackupVault) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BackupVault. +func (mg *BackupVault) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BackupVault. +func (mg *BackupVault) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dataprotection/v1beta2/zz_generated.managedlist.go b/apis/dataprotection/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..8b557ea28 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this BackupPolicyDiskList. +func (l *BackupPolicyDiskList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BackupPolicyPostgreSQLList. +func (l *BackupPolicyPostgreSQLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BackupVaultList. +func (l *BackupVaultList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dataprotection/v1beta2/zz_generated.resolvers.go b/apis/dataprotection/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..c159843e2 --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *BackupPolicyDisk) ResolveReferences( // ResolveReferences of this BackupPolicyDisk. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupVault", "BackupVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VaultID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VaultIDRef, + Selector: mg.Spec.ForProvider.VaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VaultID") + } + mg.Spec.ForProvider.VaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VaultIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BackupPolicyPostgreSQL. +func (mg *BackupPolicyPostgreSQL) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("dataprotection.azure.upbound.io", "v1beta2", "BackupVault", "BackupVaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VaultName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VaultNameRef, + Selector: mg.Spec.ForProvider.VaultNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VaultName") + } + mg.Spec.ForProvider.VaultName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VaultNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BackupVault. +func (mg *BackupVault) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/dataprotection/v1beta2/zz_groupversion_info.go b/apis/dataprotection/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..e11e9dbef --- /dev/null +++ b/apis/dataprotection/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dataprotection.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dataprotection.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/datashare/v1beta1/zz_datasetdatalakegen2_types.go b/apis/datashare/v1beta1/zz_datasetdatalakegen2_types.go index 53303f460..a8905bf2e 100755 --- a/apis/datashare/v1beta1/zz_datasetdatalakegen2_types.go +++ b/apis/datashare/v1beta1/zz_datasetdatalakegen2_types.go @@ -34,7 +34,7 @@ type DataSetDataLakeGen2InitParameters struct { FolderPath *string `json:"folderPath,omitempty" tf:"folder_path,omitempty"` // The resource id of the storage account of the data lake file system to be shared with the receiver. Changing this forces a new Data Share Data Lake Gen2 Dataset to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -95,7 +95,7 @@ type DataSetDataLakeGen2Parameters struct { FolderPath *string `json:"folderPath,omitempty" tf:"folder_path,omitempty"` // The resource ID of the Data Share where this Data Share Data Lake Gen2 Dataset should be created. Changing this forces a new Data Share Data Lake Gen2 Dataset to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datashare/v1beta1.DataShare + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datashare/v1beta2.DataShare // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ShareID *string `json:"shareId,omitempty" tf:"share_id,omitempty"` @@ -109,7 +109,7 @@ type DataSetDataLakeGen2Parameters struct { ShareIDSelector *v1.Selector `json:"shareIdSelector,omitempty" tf:"-"` // The resource id of the storage account of the data lake file system to be shared with the receiver. Changing this forces a new Data Share Data Lake Gen2 Dataset to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/datashare/v1beta1/zz_datasetkustocluster_types.go b/apis/datashare/v1beta1/zz_datasetkustocluster_types.go index 82eef3787..898a36add 100755 --- a/apis/datashare/v1beta1/zz_datasetkustocluster_types.go +++ b/apis/datashare/v1beta1/zz_datasetkustocluster_types.go @@ -16,7 +16,7 @@ import ( type DataSetKustoClusterInitParameters struct { // The resource ID of the Kusto Cluster to be shared with the receiver. Changing this forces a new Data Share Kusto Cluster Dataset to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() KustoClusterID *string `json:"kustoClusterId,omitempty" tf:"kusto_cluster_id,omitempty"` @@ -50,7 +50,7 @@ type DataSetKustoClusterObservation struct { type DataSetKustoClusterParameters struct { // The resource ID of the Kusto Cluster to be shared with the receiver. Changing this forces a new Data Share Kusto Cluster Dataset to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional KustoClusterID *string `json:"kustoClusterId,omitempty" tf:"kusto_cluster_id,omitempty"` @@ -64,7 +64,7 @@ type DataSetKustoClusterParameters struct { KustoClusterIDSelector *v1.Selector `json:"kustoClusterIdSelector,omitempty" tf:"-"` // The resource ID of the Data Share where this Data Share Kusto Cluster Dataset should be created. Changing this forces a new Data Share Kusto Cluster Dataset to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datashare/v1beta1.DataShare + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datashare/v1beta2.DataShare // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ShareID *string `json:"shareId,omitempty" tf:"share_id,omitempty"` diff --git a/apis/datashare/v1beta1/zz_datasetkustodatabase_types.go b/apis/datashare/v1beta1/zz_datasetkustodatabase_types.go index c1c18fc19..c1c10ea7d 100755 --- a/apis/datashare/v1beta1/zz_datasetkustodatabase_types.go +++ b/apis/datashare/v1beta1/zz_datasetkustodatabase_types.go @@ -64,7 +64,7 @@ type DataSetKustoDatabaseParameters struct { KustoDatabaseIDSelector *v1.Selector `json:"kustoDatabaseIdSelector,omitempty" tf:"-"` // The resource ID of the Data Share where this Data Share Kusto Database Dataset should be created. Changing this forces a new Data Share Kusto Database Dataset to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datashare/v1beta1.DataShare + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datashare/v1beta2.DataShare // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ShareID *string `json:"shareId,omitempty" tf:"share_id,omitempty"` diff --git a/apis/datashare/v1beta1/zz_generated.conversion_hubs.go b/apis/datashare/v1beta1/zz_generated.conversion_hubs.go index 4c65fd87f..f0bdcae96 100755 --- a/apis/datashare/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/datashare/v1beta1/zz_generated.conversion_hubs.go @@ -6,15 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *DataShare) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Account) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *DataSetBlobStorage) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DataSetDataLakeGen2) Hub() {} diff --git a/apis/datashare/v1beta1/zz_generated.conversion_spokes.go b/apis/datashare/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..a4d4aeae4 --- /dev/null +++ b/apis/datashare/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Account to the hub type. +func (tr *Account) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Account type. +func (tr *Account) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DataSetBlobStorage to the hub type. +func (tr *DataSetBlobStorage) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataSetBlobStorage type. +func (tr *DataSetBlobStorage) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DataShare to the hub type. +func (tr *DataShare) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DataShare type. +func (tr *DataShare) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/datashare/v1beta1/zz_generated.resolvers.go b/apis/datashare/v1beta1/zz_generated.resolvers.go index c97dcca3f..de5e0aaf2 100644 --- a/apis/datashare/v1beta1/zz_generated.resolvers.go +++ b/apis/datashare/v1beta1/zz_generated.resolvers.go @@ -232,7 +232,7 @@ func (mg *DataSetDataLakeGen2) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.FileSystemName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.FileSystemNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datashare.azure.upbound.io", "v1beta1", "DataShare", "DataShareList") + m, l, err = apisresolver.GetManagedResource("datashare.azure.upbound.io", "v1beta2", "DataShare", "DataShareList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -251,7 +251,7 @@ func (mg *DataSetDataLakeGen2) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ShareID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ShareIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -289,7 +289,7 @@ func (mg *DataSetDataLakeGen2) ResolveReferences(ctx context.Context, c client.R mg.Spec.InitProvider.FileSystemName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.FileSystemNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -320,7 +320,7 @@ func (mg *DataSetKustoCluster) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -339,7 +339,7 @@ func (mg *DataSetKustoCluster) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.KustoClusterID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KustoClusterIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datashare.azure.upbound.io", "v1beta1", "DataShare", "DataShareList") + m, l, err = apisresolver.GetManagedResource("datashare.azure.upbound.io", "v1beta2", "DataShare", "DataShareList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -358,7 +358,7 @@ func (mg *DataSetKustoCluster) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ShareID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ShareIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -408,7 +408,7 @@ func (mg *DataSetKustoDatabase) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.KustoDatabaseID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KustoDatabaseIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("datashare.azure.upbound.io", "v1beta1", "DataShare", "DataShareList") + m, l, err = apisresolver.GetManagedResource("datashare.azure.upbound.io", "v1beta2", "DataShare", "DataShareList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/datashare/v1beta2/zz_account_terraformed.go b/apis/datashare/v1beta2/zz_account_terraformed.go new file mode 100755 index 000000000..4e4203eb4 --- /dev/null +++ b/apis/datashare/v1beta2/zz_account_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Account +func (mg *Account) GetTerraformResourceType() string { + return "azurerm_data_share_account" +} + +// GetConnectionDetailsMapping for this Account +func (tr *Account) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Account +func (tr *Account) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Account +func (tr *Account) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Account +func (tr *Account) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Account +func (tr *Account) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Account +func (tr *Account) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Account +func (tr *Account) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Account +func (tr *Account) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Account using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Account) LateInitialize(attrs []byte) (bool, error) { + params := &AccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Account) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datashare/v1beta2/zz_account_types.go b/apis/datashare/v1beta2/zz_account_types.go new file mode 100755 index 000000000..eed922943 --- /dev/null +++ b/apis/datashare/v1beta2/zz_account_types.go @@ -0,0 +1,162 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountInitParameters struct { + + // An identity block as defined below. Changing this forces a new resource to be created. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Data Share Account should exist. Changing this forces a new Data Share Account to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags which should be assigned to the Data Share Account. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountObservation struct { + + // The ID of the Data Share Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Changing this forces a new resource to be created. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Data Share Account should exist. Changing this forces a new Data Share Account to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Data Share Account should exist. Changing this forces a new Data Share Account to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags which should be assigned to the Data Share Account. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountParameters struct { + + // An identity block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Data Share Account should exist. Changing this forces a new Data Share Account to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Data Share Account should exist. Changing this forces a new Data Share Account to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Data Share Account. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this Data Share Account. The only possible value is SystemAssigned. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // The Principal ID for the Service Principal associated with the Identity of this Data Share Account. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Identity of this Data Share Account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Share Account. The only possible value is SystemAssigned. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this Data Share Account. The only possible value is SystemAssigned. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccountInitParameters `json:"initProvider,omitempty"` +} + +// AccountStatus defines the observed state of Account. +type AccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Account is the Schema for the Accounts API. Manages a Data Share Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.identity) || (has(self.initProvider) && has(self.initProvider.identity))",message="spec.forProvider.identity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Accounts +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Repository type metadata. +var ( + Account_Kind = "Account" + Account_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Account_Kind}.String() + Account_KindAPIVersion = Account_Kind + "." + CRDGroupVersion.String() + Account_GroupVersionKind = CRDGroupVersion.WithKind(Account_Kind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/apis/datashare/v1beta2/zz_datasetblobstorage_terraformed.go b/apis/datashare/v1beta2/zz_datasetblobstorage_terraformed.go new file mode 100755 index 000000000..f53251d55 --- /dev/null +++ b/apis/datashare/v1beta2/zz_datasetblobstorage_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataSetBlobStorage +func (mg *DataSetBlobStorage) GetTerraformResourceType() string { + return "azurerm_data_share_dataset_blob_storage" +} + +// GetConnectionDetailsMapping for this DataSetBlobStorage +func (tr *DataSetBlobStorage) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataSetBlobStorage +func (tr *DataSetBlobStorage) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataSetBlobStorage +func (tr *DataSetBlobStorage) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataSetBlobStorage +func (tr *DataSetBlobStorage) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataSetBlobStorage +func (tr *DataSetBlobStorage) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataSetBlobStorage +func (tr *DataSetBlobStorage) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataSetBlobStorage +func (tr *DataSetBlobStorage) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataSetBlobStorage +func (tr *DataSetBlobStorage) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataSetBlobStorage using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataSetBlobStorage) LateInitialize(attrs []byte) (bool, error) { + params := &DataSetBlobStorageParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataSetBlobStorage) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datashare/v1beta2/zz_datasetblobstorage_types.go b/apis/datashare/v1beta2/zz_datasetblobstorage_types.go new file mode 100755 index 000000000..436bc498a --- /dev/null +++ b/apis/datashare/v1beta2/zz_datasetblobstorage_types.go @@ -0,0 +1,240 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataSetBlobStorageInitParameters struct { + + // The name of the storage account container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Reference to a Container in storage to populate containerName. + // +kubebuilder:validation:Optional + ContainerNameRef *v1.Reference `json:"containerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate containerName. + // +kubebuilder:validation:Optional + ContainerNameSelector *v1.Selector `json:"containerNameSelector,omitempty" tf:"-"` + + // The path of the file in the storage container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + FilePath *string `json:"filePath,omitempty" tf:"file_path,omitempty"` + + // The path of the folder in the storage container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + FolderPath *string `json:"folderPath,omitempty" tf:"folder_path,omitempty"` + + // A storage_account block as defined below. Changing this forces a new resource to be created. + StorageAccount *StorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` +} + +type DataSetBlobStorageObservation struct { + + // The name of the storage account container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // The ID of the Data Share in which this Data Share Blob Storage Dataset should be created. Changing this forces a new Data Share Blob Storage Dataset to be created. + DataShareID *string `json:"dataShareId,omitempty" tf:"data_share_id,omitempty"` + + // The name of the Data Share Dataset. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The path of the file in the storage container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + FilePath *string `json:"filePath,omitempty" tf:"file_path,omitempty"` + + // The path of the folder in the storage container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + FolderPath *string `json:"folderPath,omitempty" tf:"folder_path,omitempty"` + + // The ID of the Data Share Blob Storage Dataset. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A storage_account block as defined below. Changing this forces a new resource to be created. + StorageAccount *StorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` +} + +type DataSetBlobStorageParameters struct { + + // The name of the storage account container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +kubebuilder:validation:Optional + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Reference to a Container in storage to populate containerName. + // +kubebuilder:validation:Optional + ContainerNameRef *v1.Reference `json:"containerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate containerName. + // +kubebuilder:validation:Optional + ContainerNameSelector *v1.Selector `json:"containerNameSelector,omitempty" tf:"-"` + + // The ID of the Data Share in which this Data Share Blob Storage Dataset should be created. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datashare/v1beta2.DataShare + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + DataShareID *string `json:"dataShareId,omitempty" tf:"data_share_id,omitempty"` + + // Reference to a DataShare in datashare to populate dataShareId. + // +kubebuilder:validation:Optional + DataShareIDRef *v1.Reference `json:"dataShareIdRef,omitempty" tf:"-"` + + // Selector for a DataShare in datashare to populate dataShareId. + // +kubebuilder:validation:Optional + DataShareIDSelector *v1.Selector `json:"dataShareIdSelector,omitempty" tf:"-"` + + // The path of the file in the storage container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +kubebuilder:validation:Optional + FilePath *string `json:"filePath,omitempty" tf:"file_path,omitempty"` + + // The path of the folder in the storage container to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +kubebuilder:validation:Optional + FolderPath *string `json:"folderPath,omitempty" tf:"folder_path,omitempty"` + + // A storage_account block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageAccount *StorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` +} + +type StorageAccountInitParameters struct { + + // The name of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Account in storage to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // The resource group name of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The subscription id of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` +} + +type StorageAccountObservation struct { + + // The name of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource group name of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The subscription id of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` +} + +type StorageAccountParameters struct { + + // The name of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Account in storage to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // The resource group name of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The subscription id of the storage account to be shared with the receiver. Changing this forces a new Data Share Blob Storage Dataset to be created. + // +kubebuilder:validation:Optional + SubscriptionID *string `json:"subscriptionId" tf:"subscription_id,omitempty"` +} + +// DataSetBlobStorageSpec defines the desired state of DataSetBlobStorage +type DataSetBlobStorageSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataSetBlobStorageParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataSetBlobStorageInitParameters `json:"initProvider,omitempty"` +} + +// DataSetBlobStorageStatus defines the observed state of DataSetBlobStorage. +type DataSetBlobStorageStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataSetBlobStorageObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataSetBlobStorage is the Schema for the DataSetBlobStorages API. Manages a Data Share Blob Storage Dataset. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DataSetBlobStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageAccount) || (has(self.initProvider) && has(self.initProvider.storageAccount))",message="spec.forProvider.storageAccount is a required parameter" + Spec DataSetBlobStorageSpec `json:"spec"` + Status DataSetBlobStorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataSetBlobStorageList contains a list of DataSetBlobStorages +type DataSetBlobStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataSetBlobStorage `json:"items"` +} + +// Repository type metadata. +var ( + DataSetBlobStorage_Kind = "DataSetBlobStorage" + DataSetBlobStorage_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataSetBlobStorage_Kind}.String() + DataSetBlobStorage_KindAPIVersion = DataSetBlobStorage_Kind + "." + CRDGroupVersion.String() + DataSetBlobStorage_GroupVersionKind = CRDGroupVersion.WithKind(DataSetBlobStorage_Kind) +) + +func init() { + SchemeBuilder.Register(&DataSetBlobStorage{}, &DataSetBlobStorageList{}) +} diff --git a/apis/datashare/v1beta2/zz_datashare_terraformed.go b/apis/datashare/v1beta2/zz_datashare_terraformed.go new file mode 100755 index 000000000..cbd3550e7 --- /dev/null +++ b/apis/datashare/v1beta2/zz_datashare_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DataShare +func (mg *DataShare) GetTerraformResourceType() string { + return "azurerm_data_share" +} + +// GetConnectionDetailsMapping for this DataShare +func (tr *DataShare) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DataShare +func (tr *DataShare) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DataShare +func (tr *DataShare) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DataShare +func (tr *DataShare) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DataShare +func (tr *DataShare) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DataShare +func (tr *DataShare) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DataShare +func (tr *DataShare) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DataShare +func (tr *DataShare) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DataShare using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DataShare) LateInitialize(attrs []byte) (bool, error) { + params := &DataShareParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DataShare) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/datashare/v1beta2/zz_datashare_types.go b/apis/datashare/v1beta2/zz_datashare_types.go new file mode 100755 index 000000000..906c8ca5f --- /dev/null +++ b/apis/datashare/v1beta2/zz_datashare_types.go @@ -0,0 +1,183 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataShareInitParameters struct { + + // The Data Share's description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The kind of the Data Share. Possible values are CopyBased and InPlace. Changing this forces a new Data Share to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // A snapshot_schedule block as defined below. + SnapshotSchedule *SnapshotScheduleInitParameters `json:"snapshotSchedule,omitempty" tf:"snapshot_schedule,omitempty"` + + // The terms of the Data Share. + Terms *string `json:"terms,omitempty" tf:"terms,omitempty"` +} + +type DataShareObservation struct { + + // The ID of the Data Share account in which the Data Share is created. Changing this forces a new Data Share to be created. + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // The Data Share's description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Data Share. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The kind of the Data Share. Possible values are CopyBased and InPlace. Changing this forces a new Data Share to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // A snapshot_schedule block as defined below. + SnapshotSchedule *SnapshotScheduleObservation `json:"snapshotSchedule,omitempty" tf:"snapshot_schedule,omitempty"` + + // The terms of the Data Share. + Terms *string `json:"terms,omitempty" tf:"terms,omitempty"` +} + +type DataShareParameters struct { + + // The ID of the Data Share account in which the Data Share is created. Changing this forces a new Data Share to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/datashare/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + AccountID *string `json:"accountId,omitempty" tf:"account_id,omitempty"` + + // Reference to a Account in datashare to populate accountId. + // +kubebuilder:validation:Optional + AccountIDRef *v1.Reference `json:"accountIdRef,omitempty" tf:"-"` + + // Selector for a Account in datashare to populate accountId. + // +kubebuilder:validation:Optional + AccountIDSelector *v1.Selector `json:"accountIdSelector,omitempty" tf:"-"` + + // The Data Share's description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The kind of the Data Share. Possible values are CopyBased and InPlace. Changing this forces a new Data Share to be created. + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // A snapshot_schedule block as defined below. + // +kubebuilder:validation:Optional + SnapshotSchedule *SnapshotScheduleParameters `json:"snapshotSchedule,omitempty" tf:"snapshot_schedule,omitempty"` + + // The terms of the Data Share. + // +kubebuilder:validation:Optional + Terms *string `json:"terms,omitempty" tf:"terms,omitempty"` +} + +type SnapshotScheduleInitParameters struct { + + // The name of the snapshot schedule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The interval of the synchronization with the source data. Possible values are Hour and Day. + Recurrence *string `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // The synchronization with the source data's start time. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type SnapshotScheduleObservation struct { + + // The name of the snapshot schedule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The interval of the synchronization with the source data. Possible values are Hour and Day. + Recurrence *string `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // The synchronization with the source data's start time. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type SnapshotScheduleParameters struct { + + // The name of the snapshot schedule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The interval of the synchronization with the source data. Possible values are Hour and Day. + // +kubebuilder:validation:Optional + Recurrence *string `json:"recurrence" tf:"recurrence,omitempty"` + + // The synchronization with the source data's start time. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime" tf:"start_time,omitempty"` +} + +// DataShareSpec defines the desired state of DataShare +type DataShareSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DataShareParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DataShareInitParameters `json:"initProvider,omitempty"` +} + +// DataShareStatus defines the observed state of DataShare. +type DataShareStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DataShareObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DataShare is the Schema for the DataShares API. Manages a Data Share. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DataShare struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.kind) || (has(self.initProvider) && has(self.initProvider.kind))",message="spec.forProvider.kind is a required parameter" + Spec DataShareSpec `json:"spec"` + Status DataShareStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DataShareList contains a list of DataShares +type DataShareList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataShare `json:"items"` +} + +// Repository type metadata. +var ( + DataShare_Kind = "DataShare" + DataShare_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DataShare_Kind}.String() + DataShare_KindAPIVersion = DataShare_Kind + "." + CRDGroupVersion.String() + DataShare_GroupVersionKind = CRDGroupVersion.WithKind(DataShare_Kind) +) + +func init() { + SchemeBuilder.Register(&DataShare{}, &DataShareList{}) +} diff --git a/apis/datashare/v1beta2/zz_generated.conversion_hubs.go b/apis/datashare/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..6feebb65b --- /dev/null +++ b/apis/datashare/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Account) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DataSetBlobStorage) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DataShare) Hub() {} diff --git a/apis/datashare/v1beta2/zz_generated.deepcopy.go b/apis/datashare/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..39849f825 --- /dev/null +++ b/apis/datashare/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1019 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountInitParameters) DeepCopyInto(out *AccountInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountInitParameters. +func (in *AccountInitParameters) DeepCopy() *AccountInitParameters { + if in == nil { + return nil + } + out := new(AccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBlobStorage) DeepCopyInto(out *DataSetBlobStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBlobStorage. +func (in *DataSetBlobStorage) DeepCopy() *DataSetBlobStorage { + if in == nil { + return nil + } + out := new(DataSetBlobStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetBlobStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBlobStorageInitParameters) DeepCopyInto(out *DataSetBlobStorageInitParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerNameRef != nil { + in, out := &in.ContainerNameRef, &out.ContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerNameSelector != nil { + in, out := &in.ContainerNameSelector, &out.ContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FilePath != nil { + in, out := &in.FilePath, &out.FilePath + *out = new(string) + **out = **in + } + if in.FolderPath != nil { + in, out := &in.FolderPath, &out.FolderPath + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(StorageAccountInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBlobStorageInitParameters. +func (in *DataSetBlobStorageInitParameters) DeepCopy() *DataSetBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(DataSetBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBlobStorageList) DeepCopyInto(out *DataSetBlobStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataSetBlobStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBlobStorageList. +func (in *DataSetBlobStorageList) DeepCopy() *DataSetBlobStorageList { + if in == nil { + return nil + } + out := new(DataSetBlobStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSetBlobStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBlobStorageObservation) DeepCopyInto(out *DataSetBlobStorageObservation) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.DataShareID != nil { + in, out := &in.DataShareID, &out.DataShareID + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.FilePath != nil { + in, out := &in.FilePath, &out.FilePath + *out = new(string) + **out = **in + } + if in.FolderPath != nil { + in, out := &in.FolderPath, &out.FolderPath + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(StorageAccountObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBlobStorageObservation. +func (in *DataSetBlobStorageObservation) DeepCopy() *DataSetBlobStorageObservation { + if in == nil { + return nil + } + out := new(DataSetBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBlobStorageParameters) DeepCopyInto(out *DataSetBlobStorageParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerNameRef != nil { + in, out := &in.ContainerNameRef, &out.ContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerNameSelector != nil { + in, out := &in.ContainerNameSelector, &out.ContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DataShareID != nil { + in, out := &in.DataShareID, &out.DataShareID + *out = new(string) + **out = **in + } + if in.DataShareIDRef != nil { + in, out := &in.DataShareIDRef, &out.DataShareIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataShareIDSelector != nil { + in, out := &in.DataShareIDSelector, &out.DataShareIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FilePath != nil { + in, out := &in.FilePath, &out.FilePath + *out = new(string) + **out = **in + } + if in.FolderPath != nil { + in, out := &in.FolderPath, &out.FolderPath + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(StorageAccountParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBlobStorageParameters. +func (in *DataSetBlobStorageParameters) DeepCopy() *DataSetBlobStorageParameters { + if in == nil { + return nil + } + out := new(DataSetBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBlobStorageSpec) DeepCopyInto(out *DataSetBlobStorageSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBlobStorageSpec. +func (in *DataSetBlobStorageSpec) DeepCopy() *DataSetBlobStorageSpec { + if in == nil { + return nil + } + out := new(DataSetBlobStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSetBlobStorageStatus) DeepCopyInto(out *DataSetBlobStorageStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSetBlobStorageStatus. +func (in *DataSetBlobStorageStatus) DeepCopy() *DataSetBlobStorageStatus { + if in == nil { + return nil + } + out := new(DataSetBlobStorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataShare) DeepCopyInto(out *DataShare) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataShare. +func (in *DataShare) DeepCopy() *DataShare { + if in == nil { + return nil + } + out := new(DataShare) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataShare) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataShareInitParameters) DeepCopyInto(out *DataShareInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.SnapshotSchedule != nil { + in, out := &in.SnapshotSchedule, &out.SnapshotSchedule + *out = new(SnapshotScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Terms != nil { + in, out := &in.Terms, &out.Terms + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataShareInitParameters. +func (in *DataShareInitParameters) DeepCopy() *DataShareInitParameters { + if in == nil { + return nil + } + out := new(DataShareInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataShareList) DeepCopyInto(out *DataShareList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataShare, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataShareList. +func (in *DataShareList) DeepCopy() *DataShareList { + if in == nil { + return nil + } + out := new(DataShareList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataShareList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataShareObservation) DeepCopyInto(out *DataShareObservation) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.SnapshotSchedule != nil { + in, out := &in.SnapshotSchedule, &out.SnapshotSchedule + *out = new(SnapshotScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.Terms != nil { + in, out := &in.Terms, &out.Terms + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataShareObservation. +func (in *DataShareObservation) DeepCopy() *DataShareObservation { + if in == nil { + return nil + } + out := new(DataShareObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataShareParameters) DeepCopyInto(out *DataShareParameters) { + *out = *in + if in.AccountID != nil { + in, out := &in.AccountID, &out.AccountID + *out = new(string) + **out = **in + } + if in.AccountIDRef != nil { + in, out := &in.AccountIDRef, &out.AccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountIDSelector != nil { + in, out := &in.AccountIDSelector, &out.AccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.SnapshotSchedule != nil { + in, out := &in.SnapshotSchedule, &out.SnapshotSchedule + *out = new(SnapshotScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.Terms != nil { + in, out := &in.Terms, &out.Terms + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataShareParameters. +func (in *DataShareParameters) DeepCopy() *DataShareParameters { + if in == nil { + return nil + } + out := new(DataShareParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataShareSpec) DeepCopyInto(out *DataShareSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataShareSpec. +func (in *DataShareSpec) DeepCopy() *DataShareSpec { + if in == nil { + return nil + } + out := new(DataShareSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataShareStatus) DeepCopyInto(out *DataShareStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataShareStatus. +func (in *DataShareStatus) DeepCopy() *DataShareStatus { + if in == nil { + return nil + } + out := new(DataShareStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleInitParameters) DeepCopyInto(out *SnapshotScheduleInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleInitParameters. +func (in *SnapshotScheduleInitParameters) DeepCopy() *SnapshotScheduleInitParameters { + if in == nil { + return nil + } + out := new(SnapshotScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleObservation) DeepCopyInto(out *SnapshotScheduleObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleObservation. +func (in *SnapshotScheduleObservation) DeepCopy() *SnapshotScheduleObservation { + if in == nil { + return nil + } + out := new(SnapshotScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleParameters) DeepCopyInto(out *SnapshotScheduleParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleParameters. +func (in *SnapshotScheduleParameters) DeepCopy() *SnapshotScheduleParameters { + if in == nil { + return nil + } + out := new(SnapshotScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountInitParameters) DeepCopyInto(out *StorageAccountInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountInitParameters. +func (in *StorageAccountInitParameters) DeepCopy() *StorageAccountInitParameters { + if in == nil { + return nil + } + out := new(StorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountObservation) DeepCopyInto(out *StorageAccountObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountObservation. +func (in *StorageAccountObservation) DeepCopy() *StorageAccountObservation { + if in == nil { + return nil + } + out := new(StorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountParameters) DeepCopyInto(out *StorageAccountParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountParameters. +func (in *StorageAccountParameters) DeepCopy() *StorageAccountParameters { + if in == nil { + return nil + } + out := new(StorageAccountParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/datashare/v1beta2/zz_generated.managed.go b/apis/datashare/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..75b07f326 --- /dev/null +++ b/apis/datashare/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Account. +func (mg *Account) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Account. +func (mg *Account) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DataShare. +func (mg *DataShare) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DataShare. +func (mg *DataShare) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DataShare. +func (mg *DataShare) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DataShare. +func (mg *DataShare) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DataShare. +func (mg *DataShare) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DataShare. +func (mg *DataShare) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DataShare. +func (mg *DataShare) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DataShare. +func (mg *DataShare) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DataShare. +func (mg *DataShare) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DataShare. +func (mg *DataShare) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DataShare. +func (mg *DataShare) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DataShare. +func (mg *DataShare) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/datashare/v1beta2/zz_generated.managedlist.go b/apis/datashare/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..7f5a5f4a8 --- /dev/null +++ b/apis/datashare/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DataSetBlobStorageList. +func (l *DataSetBlobStorageList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DataShareList. +func (l *DataShareList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/datashare/v1beta2/zz_generated.resolvers.go b/apis/datashare/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..e67603c02 --- /dev/null +++ b/apis/datashare/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,236 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Account. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Account) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DataSetBlobStorage. +func (mg *DataSetBlobStorage) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ContainerNameRef, + Selector: mg.Spec.ForProvider.ContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ContainerName") + } + mg.Spec.ForProvider.ContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ContainerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("datashare.azure.upbound.io", "v1beta2", "DataShare", "DataShareList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataShareID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataShareIDRef, + Selector: mg.Spec.ForProvider.DataShareIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataShareID") + } + mg.Spec.ForProvider.DataShareID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataShareIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.StorageAccount != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccount.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccount.NameRef, + Selector: mg.Spec.ForProvider.StorageAccount.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccount.Name") + } + mg.Spec.ForProvider.StorageAccount.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccount.NameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.StorageAccount != nil { + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccount.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccount.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.StorageAccount.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccount.ResourceGroupName") + } + mg.Spec.ForProvider.StorageAccount.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccount.ResourceGroupNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ContainerNameRef, + Selector: mg.Spec.InitProvider.ContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ContainerName") + } + mg.Spec.InitProvider.ContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ContainerNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.StorageAccount != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccount.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccount.NameRef, + Selector: mg.Spec.InitProvider.StorageAccount.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccount.Name") + } + mg.Spec.InitProvider.StorageAccount.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccount.NameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageAccount != nil { + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccount.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccount.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.StorageAccount.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccount.ResourceGroupName") + } + mg.Spec.InitProvider.StorageAccount.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccount.ResourceGroupNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this DataShare. +func (mg *DataShare) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("datashare.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.AccountIDRef, + Selector: mg.Spec.ForProvider.AccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountID") + } + mg.Spec.ForProvider.AccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/datashare/v1beta2/zz_groupversion_info.go b/apis/datashare/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..4d5382d78 --- /dev/null +++ b/apis/datashare/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=datashare.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "datashare.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/dbformariadb/v1beta1/zz_generated.resolvers.go b/apis/dbformariadb/v1beta1/zz_generated.resolvers.go index beb634ba2..f87975f81 100644 --- a/apis/dbformariadb/v1beta1/zz_generated.resolvers.go +++ b/apis/dbformariadb/v1beta1/zz_generated.resolvers.go @@ -322,7 +322,7 @@ func (mg *VirtualNetworkRule) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.ServerName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -341,7 +341,7 @@ func (mg *VirtualNetworkRule) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/dbformariadb/v1beta1/zz_virtualnetworkrule_types.go b/apis/dbformariadb/v1beta1/zz_virtualnetworkrule_types.go index 4d2f0eb15..b676c0540 100755 --- a/apis/dbformariadb/v1beta1/zz_virtualnetworkrule_types.go +++ b/apis/dbformariadb/v1beta1/zz_virtualnetworkrule_types.go @@ -16,7 +16,7 @@ import ( type VirtualNetworkRuleInitParameters struct { // The ID of the subnet that the MariaDB server will be connected to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -73,7 +73,7 @@ type VirtualNetworkRuleParameters struct { ServerNameSelector *v1.Selector `json:"serverNameSelector,omitempty" tf:"-"` // The ID of the subnet that the MariaDB server will be connected to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/dbformysql/v1beta1/zz_activedirectoryadministrator_types.go b/apis/dbformysql/v1beta1/zz_activedirectoryadministrator_types.go index 64cc5cb58..611073d7e 100755 --- a/apis/dbformysql/v1beta1/zz_activedirectoryadministrator_types.go +++ b/apis/dbformysql/v1beta1/zz_activedirectoryadministrator_types.go @@ -34,7 +34,7 @@ type ActiveDirectoryAdministratorInitParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the MySQL Server on which to set the administrator. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` // Reference to a Server in dbformysql to populate serverName. @@ -94,7 +94,7 @@ type ActiveDirectoryAdministratorParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the MySQL Server on which to set the administrator. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbformysql/v1beta1/zz_configuration_types.go b/apis/dbformysql/v1beta1/zz_configuration_types.go index fef4eae43..3c99ebd5a 100755 --- a/apis/dbformysql/v1beta1/zz_configuration_types.go +++ b/apis/dbformysql/v1beta1/zz_configuration_types.go @@ -31,7 +31,7 @@ type ConfigurationInitParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the MySQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` // Reference to a Server in dbformysql to populate serverName. @@ -84,7 +84,7 @@ type ConfigurationParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the MySQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbformysql/v1beta1/zz_database_types.go b/apis/dbformysql/v1beta1/zz_database_types.go index c1b505842..dc53579fd 100755 --- a/apis/dbformysql/v1beta1/zz_database_types.go +++ b/apis/dbformysql/v1beta1/zz_database_types.go @@ -64,7 +64,7 @@ type DatabaseParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the MySQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbformysql/v1beta1/zz_firewallrule_types.go b/apis/dbformysql/v1beta1/zz_firewallrule_types.go index 289af26a7..2e3c096c1 100755 --- a/apis/dbformysql/v1beta1/zz_firewallrule_types.go +++ b/apis/dbformysql/v1beta1/zz_firewallrule_types.go @@ -60,7 +60,7 @@ type FirewallRuleParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the MySQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbformysql/v1beta1/zz_flexibledatabase_types.go b/apis/dbformysql/v1beta1/zz_flexibledatabase_types.go index 2b332d9a1..7156458b4 100755 --- a/apis/dbformysql/v1beta1/zz_flexibledatabase_types.go +++ b/apis/dbformysql/v1beta1/zz_flexibledatabase_types.go @@ -64,7 +64,7 @@ type FlexibleDatabaseParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.FlexibleServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.FlexibleServer // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbformysql/v1beta1/zz_flexibleserverconfiguration_types.go b/apis/dbformysql/v1beta1/zz_flexibleserverconfiguration_types.go index d0d63b581..2b9a6c849 100755 --- a/apis/dbformysql/v1beta1/zz_flexibleserverconfiguration_types.go +++ b/apis/dbformysql/v1beta1/zz_flexibleserverconfiguration_types.go @@ -50,7 +50,7 @@ type FlexibleServerConfigurationParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.FlexibleServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.FlexibleServer // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbformysql/v1beta1/zz_flexibleserverfirewallrule_types.go b/apis/dbformysql/v1beta1/zz_flexibleserverfirewallrule_types.go index 55a458e81..4f4b7fbd6 100755 --- a/apis/dbformysql/v1beta1/zz_flexibleserverfirewallrule_types.go +++ b/apis/dbformysql/v1beta1/zz_flexibleserverfirewallrule_types.go @@ -60,7 +60,7 @@ type FlexibleServerFirewallRuleParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.FlexibleServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.FlexibleServer // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbformysql/v1beta1/zz_generated.conversion_hubs.go b/apis/dbformysql/v1beta1/zz_generated.conversion_hubs.go index 6c0a352cf..a087599bd 100755 --- a/apis/dbformysql/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/dbformysql/v1beta1/zz_generated.conversion_hubs.go @@ -21,17 +21,11 @@ func (tr *FirewallRule) Hub() {} // Hub marks this type as a conversion hub. func (tr *FlexibleDatabase) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FlexibleServer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FlexibleServerConfiguration) Hub() {} // Hub marks this type as a conversion hub. func (tr *FlexibleServerFirewallRule) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Server) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VirtualNetworkRule) Hub() {} diff --git a/apis/dbformysql/v1beta1/zz_generated.conversion_spokes.go b/apis/dbformysql/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..26d7416bc --- /dev/null +++ b/apis/dbformysql/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this FlexibleServer to the hub type. +func (tr *FlexibleServer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FlexibleServer type. +func (tr *FlexibleServer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Server to the hub type. +func (tr *Server) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Server type. +func (tr *Server) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/dbformysql/v1beta1/zz_generated.resolvers.go b/apis/dbformysql/v1beta1/zz_generated.resolvers.go index ebd7ccb4e..bbe896ff1 100644 --- a/apis/dbformysql/v1beta1/zz_generated.resolvers.go +++ b/apis/dbformysql/v1beta1/zz_generated.resolvers.go @@ -46,7 +46,7 @@ func (mg *ActiveDirectoryAdministrator) ResolveReferences( // ResolveReferences mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -84,7 +84,7 @@ func (mg *ActiveDirectoryAdministrator) ResolveReferences( // ResolveReferences mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -134,7 +134,7 @@ func (mg *Configuration) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -172,7 +172,7 @@ func (mg *Configuration) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -222,7 +222,7 @@ func (mg *Database) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -272,7 +272,7 @@ func (mg *FirewallRule) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -322,7 +322,7 @@ func (mg *FlexibleDatabase) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "FlexibleServer", "FlexibleServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "FlexibleServer", "FlexibleServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -479,7 +479,7 @@ func (mg *FlexibleServerConfiguration) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "FlexibleServer", "FlexibleServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "FlexibleServer", "FlexibleServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -529,7 +529,7 @@ func (mg *FlexibleServerFirewallRule) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "FlexibleServer", "FlexibleServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "FlexibleServer", "FlexibleServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -610,7 +610,7 @@ func (mg *VirtualNetworkRule) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbformysql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -629,7 +629,7 @@ func (mg *VirtualNetworkRule) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.ServerName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -648,7 +648,7 @@ func (mg *VirtualNetworkRule) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/dbformysql/v1beta1/zz_virtualnetworkrule_types.go b/apis/dbformysql/v1beta1/zz_virtualnetworkrule_types.go index ffd452d79..dce81d9e4 100755 --- a/apis/dbformysql/v1beta1/zz_virtualnetworkrule_types.go +++ b/apis/dbformysql/v1beta1/zz_virtualnetworkrule_types.go @@ -16,7 +16,7 @@ import ( type VirtualNetworkRuleInitParameters struct { // The ID of the subnet that the MySQL server will be connected to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -60,7 +60,7 @@ type VirtualNetworkRuleParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the SQL Server to which this MySQL virtual network rule will be applied to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbformysql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` @@ -73,7 +73,7 @@ type VirtualNetworkRuleParameters struct { ServerNameSelector *v1.Selector `json:"serverNameSelector,omitempty" tf:"-"` // The ID of the subnet that the MySQL server will be connected to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/dbformysql/v1beta2/zz_flexibleserver_terraformed.go b/apis/dbformysql/v1beta2/zz_flexibleserver_terraformed.go new file mode 100755 index 000000000..318bcb318 --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_flexibleserver_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FlexibleServer +func (mg *FlexibleServer) GetTerraformResourceType() string { + return "azurerm_mysql_flexible_server" +} + +// GetConnectionDetailsMapping for this FlexibleServer +func (tr *FlexibleServer) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"administrator_password": "spec.forProvider.administratorPasswordSecretRef"} +} + +// GetObservation of this FlexibleServer +func (tr *FlexibleServer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FlexibleServer +func (tr *FlexibleServer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FlexibleServer +func (tr *FlexibleServer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FlexibleServer +func (tr *FlexibleServer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FlexibleServer +func (tr *FlexibleServer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FlexibleServer +func (tr *FlexibleServer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FlexibleServer +func (tr *FlexibleServer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FlexibleServer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FlexibleServer) LateInitialize(attrs []byte) (bool, error) { + params := &FlexibleServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FlexibleServer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dbformysql/v1beta2/zz_flexibleserver_types.go b/apis/dbformysql/v1beta2/zz_flexibleserver_types.go new file mode 100755 index 000000000..97d47395e --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_flexibleserver_types.go @@ -0,0 +1,547 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomerManagedKeyInitParameters struct { + + // The ID of the geo backup Key Vault Key. It can't cross region and need Customer Managed Key in same region as geo backup. + GeoBackupKeyVaultKeyID *string `json:"geoBackupKeyVaultKeyId,omitempty" tf:"geo_backup_key_vault_key_id,omitempty"` + + // The geo backup user managed identity id for a Customer Managed Key. Should be added with identity_ids. It can't cross region and need identity in same region as geo backup. + GeoBackupUserAssignedIdentityID *string `json:"geoBackupUserAssignedIdentityId,omitempty" tf:"geo_backup_user_assigned_identity_id,omitempty"` + + // The ID of the Key Vault Key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the primary user managed identity id for a Customer Managed Key. Should be added with identity_ids. + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` +} + +type CustomerManagedKeyObservation struct { + + // The ID of the geo backup Key Vault Key. It can't cross region and need Customer Managed Key in same region as geo backup. + GeoBackupKeyVaultKeyID *string `json:"geoBackupKeyVaultKeyId,omitempty" tf:"geo_backup_key_vault_key_id,omitempty"` + + // The geo backup user managed identity id for a Customer Managed Key. Should be added with identity_ids. It can't cross region and need identity in same region as geo backup. + GeoBackupUserAssignedIdentityID *string `json:"geoBackupUserAssignedIdentityId,omitempty" tf:"geo_backup_user_assigned_identity_id,omitempty"` + + // The ID of the Key Vault Key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the primary user managed identity id for a Customer Managed Key. Should be added with identity_ids. + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` +} + +type CustomerManagedKeyParameters struct { + + // The ID of the geo backup Key Vault Key. It can't cross region and need Customer Managed Key in same region as geo backup. + // +kubebuilder:validation:Optional + GeoBackupKeyVaultKeyID *string `json:"geoBackupKeyVaultKeyId,omitempty" tf:"geo_backup_key_vault_key_id,omitempty"` + + // The geo backup user managed identity id for a Customer Managed Key. Should be added with identity_ids. It can't cross region and need identity in same region as geo backup. + // +kubebuilder:validation:Optional + GeoBackupUserAssignedIdentityID *string `json:"geoBackupUserAssignedIdentityId,omitempty" tf:"geo_backup_user_assigned_identity_id,omitempty"` + + // The ID of the Key Vault Key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the primary user managed identity id for a Customer Managed Key. Should be added with identity_ids. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` +} + +type FlexibleServerInitParameters struct { + + // The Administrator login for the MySQL Flexible Server. Required when create_mode is Default. Changing this forces a new MySQL Flexible Server to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The backup retention days for the MySQL Flexible Server. Possible values are between 1 and 35 days. Defaults to 7. + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode which can be used to restore or replicate existing servers. Possible values are Default, PointInTimeRestore, GeoRestore, and Replica. Changing this forces a new MySQL Flexible Server to be created. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // A customer_managed_key block as defined below. + CustomerManagedKey *CustomerManagedKeyInitParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // The ID of the virtual network subnet to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DelegatedSubnetID *string `json:"delegatedSubnetId,omitempty" tf:"delegated_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate delegatedSubnetId. + // +kubebuilder:validation:Optional + DelegatedSubnetIDRef *v1.Reference `json:"delegatedSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate delegatedSubnetId. + // +kubebuilder:validation:Optional + DelegatedSubnetIDSelector *v1.Selector `json:"delegatedSubnetIdSelector,omitempty" tf:"-"` + + // Should geo redundant backup enabled? Defaults to false. Changing this forces a new MySQL Flexible Server to be created. + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // A high_availability block as defined below. + HighAvailability *HighAvailabilityInitParameters `json:"highAvailability,omitempty" tf:"high_availability,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + MaintenanceWindow *MaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // The point in time to restore from creation_source_server_id when create_mode is PointInTimeRestore. Changing this forces a new MySQL Flexible Server to be created. + PointInTimeRestoreTimeInUtc *string `json:"pointInTimeRestoreTimeInUtc,omitempty" tf:"point_in_time_restore_time_in_utc,omitempty"` + + // The ID of the private DNS zone to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // Reference to a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDRef *v1.Reference `json:"privateDnsZoneIdRef,omitempty" tf:"-"` + + // Selector for a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDSelector *v1.Selector `json:"privateDnsZoneIdSelector,omitempty" tf:"-"` + + // The replication role. Possible value is None. + ReplicationRole *string `json:"replicationRole,omitempty" tf:"replication_role,omitempty"` + + // The SKU Name for the MySQL Flexible Server. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The resource ID of the source MySQL Flexible Server to be restored. Required when create_mode is PointInTimeRestore, GeoRestore, and Replica. Changing this forces a new MySQL Flexible Server to be created. + SourceServerID *string `json:"sourceServerId,omitempty" tf:"source_server_id,omitempty"` + + // A storage block as defined below. + Storage *StorageInitParameters `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags which should be assigned to the MySQL Flexible Server. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of the MySQL Flexible Server to use. Possible values are 5.7, and 8.0.21. Changing this forces a new MySQL Flexible Server to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Specifies the Availability Zone in which this MySQL Flexible Server should be located. Possible values are 1, 2 and 3. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type FlexibleServerObservation struct { + + // The Administrator login for the MySQL Flexible Server. Required when create_mode is Default. Changing this forces a new MySQL Flexible Server to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The backup retention days for the MySQL Flexible Server. Possible values are between 1 and 35 days. Defaults to 7. + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode which can be used to restore or replicate existing servers. Possible values are Default, PointInTimeRestore, GeoRestore, and Replica. Changing this forces a new MySQL Flexible Server to be created. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // A customer_managed_key block as defined below. + CustomerManagedKey *CustomerManagedKeyObservation `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // The ID of the virtual network subnet to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created. + DelegatedSubnetID *string `json:"delegatedSubnetId,omitempty" tf:"delegated_subnet_id,omitempty"` + + // The fully qualified domain name of the MySQL Flexible Server. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // Should geo redundant backup enabled? Defaults to false. Changing this forces a new MySQL Flexible Server to be created. + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // A high_availability block as defined below. + HighAvailability *HighAvailabilityObservation `json:"highAvailability,omitempty" tf:"high_availability,omitempty"` + + // The ID of the MySQL Flexible Server. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + MaintenanceWindow *MaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // The point in time to restore from creation_source_server_id when create_mode is PointInTimeRestore. Changing this forces a new MySQL Flexible Server to be created. + PointInTimeRestoreTimeInUtc *string `json:"pointInTimeRestoreTimeInUtc,omitempty" tf:"point_in_time_restore_time_in_utc,omitempty"` + + // The ID of the private DNS zone to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created. + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // Is the public network access enabled? + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The maximum number of replicas that a primary MySQL Flexible Server can have. + ReplicaCapacity *float64 `json:"replicaCapacity,omitempty" tf:"replica_capacity,omitempty"` + + // The replication role. Possible value is None. + ReplicationRole *string `json:"replicationRole,omitempty" tf:"replication_role,omitempty"` + + // The name of the Resource Group where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The SKU Name for the MySQL Flexible Server. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The resource ID of the source MySQL Flexible Server to be restored. Required when create_mode is PointInTimeRestore, GeoRestore, and Replica. Changing this forces a new MySQL Flexible Server to be created. + SourceServerID *string `json:"sourceServerId,omitempty" tf:"source_server_id,omitempty"` + + // A storage block as defined below. + Storage *StorageObservation `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags which should be assigned to the MySQL Flexible Server. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of the MySQL Flexible Server to use. Possible values are 5.7, and 8.0.21. Changing this forces a new MySQL Flexible Server to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Specifies the Availability Zone in which this MySQL Flexible Server should be located. Possible values are 1, 2 and 3. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type FlexibleServerParameters struct { + + // The Administrator login for the MySQL Flexible Server. Required when create_mode is Default. Changing this forces a new MySQL Flexible Server to be created. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The Password associated with the administrator_login for the MySQL Flexible Server. Required when create_mode is Default. + // +kubebuilder:validation:Optional + AdministratorPasswordSecretRef *v1.SecretKeySelector `json:"administratorPasswordSecretRef,omitempty" tf:"-"` + + // The backup retention days for the MySQL Flexible Server. Possible values are between 1 and 35 days. Defaults to 7. + // +kubebuilder:validation:Optional + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode which can be used to restore or replicate existing servers. Possible values are Default, PointInTimeRestore, GeoRestore, and Replica. Changing this forces a new MySQL Flexible Server to be created. + // +kubebuilder:validation:Optional + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // A customer_managed_key block as defined below. + // +kubebuilder:validation:Optional + CustomerManagedKey *CustomerManagedKeyParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // The ID of the virtual network subnet to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DelegatedSubnetID *string `json:"delegatedSubnetId,omitempty" tf:"delegated_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate delegatedSubnetId. + // +kubebuilder:validation:Optional + DelegatedSubnetIDRef *v1.Reference `json:"delegatedSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate delegatedSubnetId. + // +kubebuilder:validation:Optional + DelegatedSubnetIDSelector *v1.Selector `json:"delegatedSubnetIdSelector,omitempty" tf:"-"` + + // Should geo redundant backup enabled? Defaults to false. Changing this forces a new MySQL Flexible Server to be created. + // +kubebuilder:validation:Optional + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // A high_availability block as defined below. + // +kubebuilder:validation:Optional + HighAvailability *HighAvailabilityParameters `json:"highAvailability,omitempty" tf:"high_availability,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + // +kubebuilder:validation:Optional + MaintenanceWindow *MaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // The point in time to restore from creation_source_server_id when create_mode is PointInTimeRestore. Changing this forces a new MySQL Flexible Server to be created. + // +kubebuilder:validation:Optional + PointInTimeRestoreTimeInUtc *string `json:"pointInTimeRestoreTimeInUtc,omitempty" tf:"point_in_time_restore_time_in_utc,omitempty"` + + // The ID of the private DNS zone to create the MySQL Flexible Server. Changing this forces a new MySQL Flexible Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // Reference to a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDRef *v1.Reference `json:"privateDnsZoneIdRef,omitempty" tf:"-"` + + // Selector for a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDSelector *v1.Selector `json:"privateDnsZoneIdSelector,omitempty" tf:"-"` + + // The replication role. Possible value is None. + // +kubebuilder:validation:Optional + ReplicationRole *string `json:"replicationRole,omitempty" tf:"replication_role,omitempty"` + + // The name of the Resource Group where the MySQL Flexible Server should exist. Changing this forces a new MySQL Flexible Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The SKU Name for the MySQL Flexible Server. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The resource ID of the source MySQL Flexible Server to be restored. Required when create_mode is PointInTimeRestore, GeoRestore, and Replica. Changing this forces a new MySQL Flexible Server to be created. + // +kubebuilder:validation:Optional + SourceServerID *string `json:"sourceServerId,omitempty" tf:"source_server_id,omitempty"` + + // A storage block as defined below. + // +kubebuilder:validation:Optional + Storage *StorageParameters `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags which should be assigned to the MySQL Flexible Server. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of the MySQL Flexible Server to use. Possible values are 5.7, and 8.0.21. Changing this forces a new MySQL Flexible Server to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Specifies the Availability Zone in which this MySQL Flexible Server should be located. Possible values are 1, 2 and 3. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type HighAvailabilityInitParameters struct { + + // The high availability mode for the MySQL Flexible Server. Possibles values are SameZone and ZoneRedundant. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Specifies the Availability Zone in which the standby Flexible Server should be located. Possible values are 1, 2 and 3. + StandbyAvailabilityZone *string `json:"standbyAvailabilityZone,omitempty" tf:"standby_availability_zone,omitempty"` +} + +type HighAvailabilityObservation struct { + + // The high availability mode for the MySQL Flexible Server. Possibles values are SameZone and ZoneRedundant. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Specifies the Availability Zone in which the standby Flexible Server should be located. Possible values are 1, 2 and 3. + StandbyAvailabilityZone *string `json:"standbyAvailabilityZone,omitempty" tf:"standby_availability_zone,omitempty"` +} + +type HighAvailabilityParameters struct { + + // The high availability mode for the MySQL Flexible Server. Possibles values are SameZone and ZoneRedundant. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // Specifies the Availability Zone in which the standby Flexible Server should be located. Possible values are 1, 2 and 3. + // +kubebuilder:validation:Optional + StandbyAvailabilityZone *string `json:"standbyAvailabilityZone,omitempty" tf:"standby_availability_zone,omitempty"` +} + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this MySQL Flexible Server. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this MySQL Flexible Server. The only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this MySQL Flexible Server. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this MySQL Flexible Server. The only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this MySQL Flexible Server. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this MySQL Flexible Server. The only possible value is UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MaintenanceWindowInitParameters struct { + + // The day of week for maintenance window. Defaults to 0. + DayOfWeek *float64 `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The start hour for maintenance window. Defaults to 0. + StartHour *float64 `json:"startHour,omitempty" tf:"start_hour,omitempty"` + + // The start minute for maintenance window. Defaults to 0. + StartMinute *float64 `json:"startMinute,omitempty" tf:"start_minute,omitempty"` +} + +type MaintenanceWindowObservation struct { + + // The day of week for maintenance window. Defaults to 0. + DayOfWeek *float64 `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The start hour for maintenance window. Defaults to 0. + StartHour *float64 `json:"startHour,omitempty" tf:"start_hour,omitempty"` + + // The start minute for maintenance window. Defaults to 0. + StartMinute *float64 `json:"startMinute,omitempty" tf:"start_minute,omitempty"` +} + +type MaintenanceWindowParameters struct { + + // The day of week for maintenance window. Defaults to 0. + // +kubebuilder:validation:Optional + DayOfWeek *float64 `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The start hour for maintenance window. Defaults to 0. + // +kubebuilder:validation:Optional + StartHour *float64 `json:"startHour,omitempty" tf:"start_hour,omitempty"` + + // The start minute for maintenance window. Defaults to 0. + // +kubebuilder:validation:Optional + StartMinute *float64 `json:"startMinute,omitempty" tf:"start_minute,omitempty"` +} + +type StorageInitParameters struct { + + // Should Storage Auto Grow be enabled? Defaults to true. + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Should IOPS be scaled automatically? If true, iops can not be set. Defaults to false. + IoScalingEnabled *bool `json:"ioScalingEnabled,omitempty" tf:"io_scaling_enabled,omitempty"` + + // The storage IOPS for the MySQL Flexible Server. Possible values are between 360 and 20000. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The max storage allowed for the MySQL Flexible Server. Possible values are between 20 and 16384. + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +type StorageObservation struct { + + // Should Storage Auto Grow be enabled? Defaults to true. + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Should IOPS be scaled automatically? If true, iops can not be set. Defaults to false. + IoScalingEnabled *bool `json:"ioScalingEnabled,omitempty" tf:"io_scaling_enabled,omitempty"` + + // The storage IOPS for the MySQL Flexible Server. Possible values are between 360 and 20000. + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The max storage allowed for the MySQL Flexible Server. Possible values are between 20 and 16384. + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +type StorageParameters struct { + + // Should Storage Auto Grow be enabled? Defaults to true. + // +kubebuilder:validation:Optional + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Should IOPS be scaled automatically? If true, iops can not be set. Defaults to false. + // +kubebuilder:validation:Optional + IoScalingEnabled *bool `json:"ioScalingEnabled,omitempty" tf:"io_scaling_enabled,omitempty"` + + // The storage IOPS for the MySQL Flexible Server. Possible values are between 360 and 20000. + // +kubebuilder:validation:Optional + Iops *float64 `json:"iops,omitempty" tf:"iops,omitempty"` + + // The max storage allowed for the MySQL Flexible Server. Possible values are between 20 and 16384. + // +kubebuilder:validation:Optional + SizeGb *float64 `json:"sizeGb,omitempty" tf:"size_gb,omitempty"` +} + +// FlexibleServerSpec defines the desired state of FlexibleServer +type FlexibleServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FlexibleServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FlexibleServerInitParameters `json:"initProvider,omitempty"` +} + +// FlexibleServerStatus defines the observed state of FlexibleServer. +type FlexibleServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FlexibleServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FlexibleServer is the Schema for the FlexibleServers API. Manages a MySQL Flexible Server. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FlexibleServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec FlexibleServerSpec `json:"spec"` + Status FlexibleServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FlexibleServerList contains a list of FlexibleServers +type FlexibleServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FlexibleServer `json:"items"` +} + +// Repository type metadata. +var ( + FlexibleServer_Kind = "FlexibleServer" + FlexibleServer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FlexibleServer_Kind}.String() + FlexibleServer_KindAPIVersion = FlexibleServer_Kind + "." + CRDGroupVersion.String() + FlexibleServer_GroupVersionKind = CRDGroupVersion.WithKind(FlexibleServer_Kind) +) + +func init() { + SchemeBuilder.Register(&FlexibleServer{}, &FlexibleServerList{}) +} diff --git a/apis/dbformysql/v1beta2/zz_generated.conversion_hubs.go b/apis/dbformysql/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..58c88158a --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *FlexibleServer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Server) Hub() {} diff --git a/apis/dbformysql/v1beta2/zz_generated.deepcopy.go b/apis/dbformysql/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..f24fc6534 --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1747 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyInitParameters) DeepCopyInto(out *CustomerManagedKeyInitParameters) { + *out = *in + if in.GeoBackupKeyVaultKeyID != nil { + in, out := &in.GeoBackupKeyVaultKeyID, &out.GeoBackupKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.GeoBackupUserAssignedIdentityID != nil { + in, out := &in.GeoBackupUserAssignedIdentityID, &out.GeoBackupUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyInitParameters. +func (in *CustomerManagedKeyInitParameters) DeepCopy() *CustomerManagedKeyInitParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyObservation) DeepCopyInto(out *CustomerManagedKeyObservation) { + *out = *in + if in.GeoBackupKeyVaultKeyID != nil { + in, out := &in.GeoBackupKeyVaultKeyID, &out.GeoBackupKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.GeoBackupUserAssignedIdentityID != nil { + in, out := &in.GeoBackupUserAssignedIdentityID, &out.GeoBackupUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyObservation. +func (in *CustomerManagedKeyObservation) DeepCopy() *CustomerManagedKeyObservation { + if in == nil { + return nil + } + out := new(CustomerManagedKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyParameters) DeepCopyInto(out *CustomerManagedKeyParameters) { + *out = *in + if in.GeoBackupKeyVaultKeyID != nil { + in, out := &in.GeoBackupKeyVaultKeyID, &out.GeoBackupKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.GeoBackupUserAssignedIdentityID != nil { + in, out := &in.GeoBackupUserAssignedIdentityID, &out.GeoBackupUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyParameters. +func (in *CustomerManagedKeyParameters) DeepCopy() *CustomerManagedKeyParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServer) DeepCopyInto(out *FlexibleServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServer. +func (in *FlexibleServer) DeepCopy() *FlexibleServer { + if in == nil { + return nil + } + out := new(FlexibleServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlexibleServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerInitParameters) DeepCopyInto(out *FlexibleServerInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetID != nil { + in, out := &in.DelegatedSubnetID, &out.DelegatedSubnetID + *out = new(string) + **out = **in + } + if in.DelegatedSubnetIDRef != nil { + in, out := &in.DelegatedSubnetIDRef, &out.DelegatedSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetIDSelector != nil { + in, out := &in.DelegatedSubnetIDSelector, &out.DelegatedSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.HighAvailability != nil { + in, out := &in.HighAvailability, &out.HighAvailability + *out = new(HighAvailabilityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRestoreTimeInUtc != nil { + in, out := &in.PointInTimeRestoreTimeInUtc, &out.PointInTimeRestoreTimeInUtc + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIDRef != nil { + in, out := &in.PrivateDNSZoneIDRef, &out.PrivateDNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSZoneIDSelector != nil { + in, out := &in.PrivateDNSZoneIDSelector, &out.PrivateDNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplicationRole != nil { + in, out := &in.ReplicationRole, &out.ReplicationRole + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceServerID != nil { + in, out := &in.SourceServerID, &out.SourceServerID + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerInitParameters. +func (in *FlexibleServerInitParameters) DeepCopy() *FlexibleServerInitParameters { + if in == nil { + return nil + } + out := new(FlexibleServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerList) DeepCopyInto(out *FlexibleServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlexibleServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerList. +func (in *FlexibleServerList) DeepCopy() *FlexibleServerList { + if in == nil { + return nil + } + out := new(FlexibleServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlexibleServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerObservation) DeepCopyInto(out *FlexibleServerObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetID != nil { + in, out := &in.DelegatedSubnetID, &out.DelegatedSubnetID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.HighAvailability != nil { + in, out := &in.HighAvailability, &out.HighAvailability + *out = new(HighAvailabilityObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowObservation) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRestoreTimeInUtc != nil { + in, out := &in.PointInTimeRestoreTimeInUtc, &out.PointInTimeRestoreTimeInUtc + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ReplicaCapacity != nil { + in, out := &in.ReplicaCapacity, &out.ReplicaCapacity + *out = new(float64) + **out = **in + } + if in.ReplicationRole != nil { + in, out := &in.ReplicationRole, &out.ReplicationRole + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceServerID != nil { + in, out := &in.SourceServerID, &out.SourceServerID + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerObservation. +func (in *FlexibleServerObservation) DeepCopy() *FlexibleServerObservation { + if in == nil { + return nil + } + out := new(FlexibleServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerParameters) DeepCopyInto(out *FlexibleServerParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AdministratorPasswordSecretRef != nil { + in, out := &in.AdministratorPasswordSecretRef, &out.AdministratorPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetID != nil { + in, out := &in.DelegatedSubnetID, &out.DelegatedSubnetID + *out = new(string) + **out = **in + } + if in.DelegatedSubnetIDRef != nil { + in, out := &in.DelegatedSubnetIDRef, &out.DelegatedSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetIDSelector != nil { + in, out := &in.DelegatedSubnetIDSelector, &out.DelegatedSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.HighAvailability != nil { + in, out := &in.HighAvailability, &out.HighAvailability + *out = new(HighAvailabilityParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowParameters) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRestoreTimeInUtc != nil { + in, out := &in.PointInTimeRestoreTimeInUtc, &out.PointInTimeRestoreTimeInUtc + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIDRef != nil { + in, out := &in.PrivateDNSZoneIDRef, &out.PrivateDNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSZoneIDSelector != nil { + in, out := &in.PrivateDNSZoneIDSelector, &out.PrivateDNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplicationRole != nil { + in, out := &in.ReplicationRole, &out.ReplicationRole + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceServerID != nil { + in, out := &in.SourceServerID, &out.SourceServerID + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerParameters. +func (in *FlexibleServerParameters) DeepCopy() *FlexibleServerParameters { + if in == nil { + return nil + } + out := new(FlexibleServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerSpec) DeepCopyInto(out *FlexibleServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerSpec. +func (in *FlexibleServerSpec) DeepCopy() *FlexibleServerSpec { + if in == nil { + return nil + } + out := new(FlexibleServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerStatus) DeepCopyInto(out *FlexibleServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerStatus. +func (in *FlexibleServerStatus) DeepCopy() *FlexibleServerStatus { + if in == nil { + return nil + } + out := new(FlexibleServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighAvailabilityInitParameters) DeepCopyInto(out *HighAvailabilityInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.StandbyAvailabilityZone != nil { + in, out := &in.StandbyAvailabilityZone, &out.StandbyAvailabilityZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighAvailabilityInitParameters. +func (in *HighAvailabilityInitParameters) DeepCopy() *HighAvailabilityInitParameters { + if in == nil { + return nil + } + out := new(HighAvailabilityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighAvailabilityObservation) DeepCopyInto(out *HighAvailabilityObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.StandbyAvailabilityZone != nil { + in, out := &in.StandbyAvailabilityZone, &out.StandbyAvailabilityZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighAvailabilityObservation. +func (in *HighAvailabilityObservation) DeepCopy() *HighAvailabilityObservation { + if in == nil { + return nil + } + out := new(HighAvailabilityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighAvailabilityParameters) DeepCopyInto(out *HighAvailabilityParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.StandbyAvailabilityZone != nil { + in, out := &in.StandbyAvailabilityZone, &out.StandbyAvailabilityZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighAvailabilityParameters. +func (in *HighAvailabilityParameters) DeepCopy() *HighAvailabilityParameters { + if in == nil { + return nil + } + out := new(HighAvailabilityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowInitParameters) DeepCopyInto(out *MaintenanceWindowInitParameters) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(float64) + **out = **in + } + if in.StartHour != nil { + in, out := &in.StartHour, &out.StartHour + *out = new(float64) + **out = **in + } + if in.StartMinute != nil { + in, out := &in.StartMinute, &out.StartMinute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowInitParameters. +func (in *MaintenanceWindowInitParameters) DeepCopy() *MaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowObservation) DeepCopyInto(out *MaintenanceWindowObservation) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(float64) + **out = **in + } + if in.StartHour != nil { + in, out := &in.StartHour, &out.StartHour + *out = new(float64) + **out = **in + } + if in.StartMinute != nil { + in, out := &in.StartMinute, &out.StartMinute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowObservation. +func (in *MaintenanceWindowObservation) DeepCopy() *MaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowParameters) DeepCopyInto(out *MaintenanceWindowParameters) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(float64) + **out = **in + } + if in.StartHour != nil { + in, out := &in.StartHour, &out.StartHour + *out = new(float64) + **out = **in + } + if in.StartMinute != nil { + in, out := &in.StartMinute, &out.StartMinute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowParameters. +func (in *MaintenanceWindowParameters) DeepCopy() *MaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Server) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerIdentityInitParameters) DeepCopyInto(out *ServerIdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerIdentityInitParameters. +func (in *ServerIdentityInitParameters) DeepCopy() *ServerIdentityInitParameters { + if in == nil { + return nil + } + out := new(ServerIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerIdentityObservation) DeepCopyInto(out *ServerIdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerIdentityObservation. +func (in *ServerIdentityObservation) DeepCopy() *ServerIdentityObservation { + if in == nil { + return nil + } + out := new(ServerIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerIdentityParameters) DeepCopyInto(out *ServerIdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerIdentityParameters. +func (in *ServerIdentityParameters) DeepCopy() *ServerIdentityParameters { + if in == nil { + return nil + } + out := new(ServerIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerInitParameters) DeepCopyInto(out *ServerInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceServerID != nil { + in, out := &in.CreationSourceServerID, &out.CreationSourceServerID + *out = new(string) + **out = **in + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ServerIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SSLEnforcementEnabled != nil { + in, out := &in.SSLEnforcementEnabled, &out.SSLEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.SSLMinimalTLSVersionEnforced != nil { + in, out := &in.SSLMinimalTLSVersionEnforced, &out.SSLMinimalTLSVersionEnforced + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerInitParameters. +func (in *ServerInitParameters) DeepCopy() *ServerInitParameters { + if in == nil { + return nil + } + out := new(ServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerList) DeepCopyInto(out *ServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerList. +func (in *ServerList) DeepCopy() *ServerList { + if in == nil { + return nil + } + out := new(ServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerObservation) DeepCopyInto(out *ServerObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceServerID != nil { + in, out := &in.CreationSourceServerID, &out.CreationSourceServerID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ServerIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SSLEnforcementEnabled != nil { + in, out := &in.SSLEnforcementEnabled, &out.SSLEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.SSLMinimalTLSVersionEnforced != nil { + in, out := &in.SSLMinimalTLSVersionEnforced, &out.SSLMinimalTLSVersionEnforced + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerObservation. +func (in *ServerObservation) DeepCopy() *ServerObservation { + if in == nil { + return nil + } + out := new(ServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerParameters) DeepCopyInto(out *ServerParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AdministratorLoginPasswordSecretRef != nil { + in, out := &in.AdministratorLoginPasswordSecretRef, &out.AdministratorLoginPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceServerID != nil { + in, out := &in.CreationSourceServerID, &out.CreationSourceServerID + *out = new(string) + **out = **in + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ServerIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SSLEnforcementEnabled != nil { + in, out := &in.SSLEnforcementEnabled, &out.SSLEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.SSLMinimalTLSVersionEnforced != nil { + in, out := &in.SSLMinimalTLSVersionEnforced, &out.SSLMinimalTLSVersionEnforced + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerParameters. +func (in *ServerParameters) DeepCopy() *ServerParameters { + if in == nil { + return nil + } + out := new(ServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSpec) DeepCopyInto(out *ServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec. +func (in *ServerSpec) DeepCopy() *ServerSpec { + if in == nil { + return nil + } + out := new(ServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatus) DeepCopyInto(out *ServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatus. +func (in *ServerStatus) DeepCopy() *ServerStatus { + if in == nil { + return nil + } + out := new(ServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageInitParameters) DeepCopyInto(out *StorageInitParameters) { + *out = *in + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.IoScalingEnabled != nil { + in, out := &in.IoScalingEnabled, &out.IoScalingEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageInitParameters. +func (in *StorageInitParameters) DeepCopy() *StorageInitParameters { + if in == nil { + return nil + } + out := new(StorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageObservation) DeepCopyInto(out *StorageObservation) { + *out = *in + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.IoScalingEnabled != nil { + in, out := &in.IoScalingEnabled, &out.IoScalingEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageObservation. +func (in *StorageObservation) DeepCopy() *StorageObservation { + if in == nil { + return nil + } + out := new(StorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageParameters) DeepCopyInto(out *StorageParameters) { + *out = *in + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.IoScalingEnabled != nil { + in, out := &in.IoScalingEnabled, &out.IoScalingEnabled + *out = new(bool) + **out = **in + } + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(float64) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageParameters. +func (in *StorageParameters) DeepCopy() *StorageParameters { + if in == nil { + return nil + } + out := new(StorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyInitParameters) DeepCopyInto(out *ThreatDetectionPolicyInitParameters) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(bool) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyInitParameters. +func (in *ThreatDetectionPolicyInitParameters) DeepCopy() *ThreatDetectionPolicyInitParameters { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyObservation) DeepCopyInto(out *ThreatDetectionPolicyObservation) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(bool) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyObservation. +func (in *ThreatDetectionPolicyObservation) DeepCopy() *ThreatDetectionPolicyObservation { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyParameters) DeepCopyInto(out *ThreatDetectionPolicyParameters) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(bool) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyParameters. +func (in *ThreatDetectionPolicyParameters) DeepCopy() *ThreatDetectionPolicyParameters { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dbformysql/v1beta2/zz_generated.managed.go b/apis/dbformysql/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..aad62c8b4 --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this FlexibleServer. +func (mg *FlexibleServer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FlexibleServer. +func (mg *FlexibleServer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FlexibleServer. +func (mg *FlexibleServer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FlexibleServer. +func (mg *FlexibleServer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FlexibleServer. +func (mg *FlexibleServer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FlexibleServer. +func (mg *FlexibleServer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FlexibleServer. +func (mg *FlexibleServer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FlexibleServer. +func (mg *FlexibleServer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FlexibleServer. +func (mg *FlexibleServer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FlexibleServer. +func (mg *FlexibleServer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FlexibleServer. +func (mg *FlexibleServer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FlexibleServer. +func (mg *FlexibleServer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Server. +func (mg *Server) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Server. +func (mg *Server) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Server. +func (mg *Server) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Server. +func (mg *Server) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Server. +func (mg *Server) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Server. +func (mg *Server) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Server. +func (mg *Server) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Server. +func (mg *Server) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Server. +func (mg *Server) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Server. +func (mg *Server) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Server. +func (mg *Server) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Server. +func (mg *Server) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dbformysql/v1beta2/zz_generated.managedlist.go b/apis/dbformysql/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..379943ff1 --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this FlexibleServerList. +func (l *FlexibleServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ServerList. +func (l *ServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dbformysql/v1beta2/zz_generated.resolvers.go b/apis/dbformysql/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..84cc0d525 --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *FlexibleServer) ResolveReferences( // ResolveReferences of this FlexibleServer. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DelegatedSubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DelegatedSubnetIDRef, + Selector: mg.Spec.ForProvider.DelegatedSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DelegatedSubnetID") + } + mg.Spec.ForProvider.DelegatedSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DelegatedSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrivateDNSZoneID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrivateDNSZoneIDRef, + Selector: mg.Spec.ForProvider.PrivateDNSZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateDNSZoneID") + } + mg.Spec.ForProvider.PrivateDNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrivateDNSZoneIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DelegatedSubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DelegatedSubnetIDRef, + Selector: mg.Spec.InitProvider.DelegatedSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DelegatedSubnetID") + } + mg.Spec.InitProvider.DelegatedSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DelegatedSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrivateDNSZoneID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrivateDNSZoneIDRef, + Selector: mg.Spec.InitProvider.PrivateDNSZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateDNSZoneID") + } + mg.Spec.InitProvider.PrivateDNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrivateDNSZoneIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Server. +func (mg *Server) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/dbformysql/v1beta2/zz_groupversion_info.go b/apis/dbformysql/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..07cea497e --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dbformysql.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dbformysql.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/dbformysql/v1beta2/zz_server_terraformed.go b/apis/dbformysql/v1beta2/zz_server_terraformed.go new file mode 100755 index 000000000..9dec365f7 --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_server_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Server +func (mg *Server) GetTerraformResourceType() string { + return "azurerm_mysql_server" +} + +// GetConnectionDetailsMapping for this Server +func (tr *Server) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"administrator_login_password": "spec.forProvider.administratorLoginPasswordSecretRef", "threat_detection_policy[*].storage_account_access_key": "spec.forProvider.threatDetectionPolicy[*].storageAccountAccessKeySecretRef"} +} + +// GetObservation of this Server +func (tr *Server) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Server +func (tr *Server) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Server +func (tr *Server) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Server +func (tr *Server) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Server +func (tr *Server) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Server +func (tr *Server) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Server +func (tr *Server) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Server using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Server) LateInitialize(attrs []byte) (bool, error) { + params := &ServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Server) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dbformysql/v1beta2/zz_server_types.go b/apis/dbformysql/v1beta2/zz_server_types.go new file mode 100755 index 000000000..2fc52a42a --- /dev/null +++ b/apis/dbformysql/v1beta2/zz_server_types.go @@ -0,0 +1,400 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServerIdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this MySQL Server. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ServerIdentityObservation struct { + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this MySQL Server. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ServerIdentityParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this MySQL Server. The only possible value is SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ServerInitParameters struct { + + // The Administrator login for the MySQL Server. Required when create_mode is Default. Changing this forces a new resource to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. Defaults to true. + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Backup retention days for the server, supported values are between 7 and 35 days. + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode. Can be used to restore or replicate existing servers. Possible values are Default, Replica, GeoRestore, and PointInTimeRestore. Defaults to Default. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // For creation modes other than Default, the source server ID to use. + CreationSourceServerID *string `json:"creationSourceServerId,omitempty" tf:"creation_source_server_id,omitempty"` + + // Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier. + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // An identity block as defined below. + Identity *ServerIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not infrastructure is encrypted for this server. Changing this forces a new resource to be created. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // When create_mode is PointInTimeRestore, specifies the point in time to restore from creation_source_server_id. It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies if SSL should be enforced on connections. Possible values are true and false. + SSLEnforcementEnabled *bool `json:"sslEnforcementEnabled,omitempty" tf:"ssl_enforcement_enabled,omitempty"` + + // The minimum TLS version to support on the sever. Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2. + SSLMinimalTLSVersionEnforced *string `json:"sslMinimalTlsVersionEnforced,omitempty" tf:"ssl_minimal_tls_version_enforced,omitempty"` + + // Specifies the SKU Name for this MySQL Server. The name of the SKU, follows the tier + family + cores pattern (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Max storage allowed for a server. Possible values are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory Optimized SKUs. For more information see the product documentation. + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration, known in the API as Server Security Alerts Policy. The threat_detection_policy block supports fields documented below. + ThreatDetectionPolicy *ThreatDetectionPolicyInitParameters `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // Specifies the version of MySQL to use. Valid values are 5.7, or 8.0. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ServerObservation struct { + + // The Administrator login for the MySQL Server. Required when create_mode is Default. Changing this forces a new resource to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. Defaults to true. + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Backup retention days for the server, supported values are between 7 and 35 days. + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode. Can be used to restore or replicate existing servers. Possible values are Default, Replica, GeoRestore, and PointInTimeRestore. Defaults to Default. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // For creation modes other than Default, the source server ID to use. + CreationSourceServerID *string `json:"creationSourceServerId,omitempty" tf:"creation_source_server_id,omitempty"` + + // The FQDN of the MySQL Server. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier. + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // The ID of the MySQL Server. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *ServerIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not infrastructure is encrypted for this server. Changing this forces a new resource to be created. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the MySQL Server. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // When create_mode is PointInTimeRestore, specifies the point in time to restore from creation_source_server_id. It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies if SSL should be enforced on connections. Possible values are true and false. + SSLEnforcementEnabled *bool `json:"sslEnforcementEnabled,omitempty" tf:"ssl_enforcement_enabled,omitempty"` + + // The minimum TLS version to support on the sever. Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2. + SSLMinimalTLSVersionEnforced *string `json:"sslMinimalTlsVersionEnforced,omitempty" tf:"ssl_minimal_tls_version_enforced,omitempty"` + + // Specifies the SKU Name for this MySQL Server. The name of the SKU, follows the tier + family + cores pattern (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Max storage allowed for a server. Possible values are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory Optimized SKUs. For more information see the product documentation. + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration, known in the API as Server Security Alerts Policy. The threat_detection_policy block supports fields documented below. + ThreatDetectionPolicy *ThreatDetectionPolicyObservation `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // Specifies the version of MySQL to use. Valid values are 5.7, or 8.0. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ServerParameters struct { + + // The Administrator login for the MySQL Server. Required when create_mode is Default. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The Password associated with the administrator_login for the MySQL Server. Required when create_mode is Default. + // +kubebuilder:validation:Optional + AdministratorLoginPasswordSecretRef *v1.SecretKeySelector `json:"administratorLoginPasswordSecretRef,omitempty" tf:"-"` + + // Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. Defaults to true. + // +kubebuilder:validation:Optional + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Backup retention days for the server, supported values are between 7 and 35 days. + // +kubebuilder:validation:Optional + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode. Can be used to restore or replicate existing servers. Possible values are Default, Replica, GeoRestore, and PointInTimeRestore. Defaults to Default. + // +kubebuilder:validation:Optional + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // For creation modes other than Default, the source server ID to use. + // +kubebuilder:validation:Optional + CreationSourceServerID *string `json:"creationSourceServerId,omitempty" tf:"creation_source_server_id,omitempty"` + + // Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier. + // +kubebuilder:validation:Optional + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *ServerIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not infrastructure is encrypted for this server. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the MySQL Server. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // When create_mode is PointInTimeRestore, specifies the point in time to restore from creation_source_server_id. It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + // +kubebuilder:validation:Optional + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies if SSL should be enforced on connections. Possible values are true and false. + // +kubebuilder:validation:Optional + SSLEnforcementEnabled *bool `json:"sslEnforcementEnabled,omitempty" tf:"ssl_enforcement_enabled,omitempty"` + + // The minimum TLS version to support on the sever. Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2. + // +kubebuilder:validation:Optional + SSLMinimalTLSVersionEnforced *string `json:"sslMinimalTlsVersionEnforced,omitempty" tf:"ssl_minimal_tls_version_enforced,omitempty"` + + // Specifies the SKU Name for this MySQL Server. The name of the SKU, follows the tier + family + cores pattern (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Max storage allowed for a server. Possible values are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory Optimized SKUs. For more information see the product documentation. + // +kubebuilder:validation:Optional + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration, known in the API as Server Security Alerts Policy. The threat_detection_policy block supports fields documented below. + // +kubebuilder:validation:Optional + ThreatDetectionPolicy *ThreatDetectionPolicyParameters `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // Specifies the version of MySQL to use. Valid values are 5.7, or 8.0. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ThreatDetectionPolicyInitParameters struct { + + // Specifies a list of alerts which should be disabled. Possible values are Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration and Unsafe_Action. + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? + EmailAccountAdmins *bool `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Is the policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +type ThreatDetectionPolicyObservation struct { + + // Specifies a list of alerts which should be disabled. Possible values are Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration and Unsafe_Action. + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? + EmailAccountAdmins *bool `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Is the policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +type ThreatDetectionPolicyParameters struct { + + // Specifies a list of alerts which should be disabled. Possible values are Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration and Unsafe_Action. + // +kubebuilder:validation:Optional + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? + // +kubebuilder:validation:Optional + EmailAccountAdmins *bool `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +kubebuilder:validation:Optional + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Is the policy enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + // +kubebuilder:validation:Optional + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // Specifies the identifier key of the Threat Detection audit storage account. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. + // +kubebuilder:validation:Optional + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +// ServerSpec defines the desired state of Server +type ServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServerInitParameters `json:"initProvider,omitempty"` +} + +// ServerStatus defines the observed state of Server. +type ServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Server is the Schema for the Servers API. Manages a MySQL Server. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Server struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sslEnforcementEnabled) || (has(self.initProvider) && has(self.initProvider.sslEnforcementEnabled))",message="spec.forProvider.sslEnforcementEnabled is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec ServerSpec `json:"spec"` + Status ServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServerList contains a list of Servers +type ServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Server `json:"items"` +} + +// Repository type metadata. +var ( + Server_Kind = "Server" + Server_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Server_Kind}.String() + Server_KindAPIVersion = Server_Kind + "." + CRDGroupVersion.String() + Server_GroupVersionKind = CRDGroupVersion.WithKind(Server_Kind) +) + +func init() { + SchemeBuilder.Register(&Server{}, &ServerList{}) +} diff --git a/apis/dbforpostgresql/v1beta1/zz_activedirectoryadministrator_types.go b/apis/dbforpostgresql/v1beta1/zz_activedirectoryadministrator_types.go index 88448d772..d755dc6cb 100755 --- a/apis/dbforpostgresql/v1beta1/zz_activedirectoryadministrator_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_activedirectoryadministrator_types.go @@ -70,7 +70,7 @@ type ActiveDirectoryAdministratorParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the PostgreSQL Server on which to set the administrator. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_configuration_types.go b/apis/dbforpostgresql/v1beta1/zz_configuration_types.go index 3a6a783f1..8ab7dc3dd 100755 --- a/apis/dbforpostgresql/v1beta1/zz_configuration_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_configuration_types.go @@ -31,7 +31,7 @@ type ConfigurationInitParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the PostgreSQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.Server ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` // Reference to a Server in dbforpostgresql to populate serverName. @@ -84,7 +84,7 @@ type ConfigurationParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the PostgreSQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_database_types.go b/apis/dbforpostgresql/v1beta1/zz_database_types.go index d74dd4ea7..1a5770097 100755 --- a/apis/dbforpostgresql/v1beta1/zz_database_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_database_types.go @@ -64,7 +64,7 @@ type DatabaseParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the PostgreSQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_firewallrule_types.go b/apis/dbforpostgresql/v1beta1/zz_firewallrule_types.go index 1bc65f477..099830112 100755 --- a/apis/dbforpostgresql/v1beta1/zz_firewallrule_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_firewallrule_types.go @@ -60,7 +60,7 @@ type FirewallRuleParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the PostgreSQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_flexibleserveractivedirectoryadministrator_types.go b/apis/dbforpostgresql/v1beta1/zz_flexibleserveractivedirectoryadministrator_types.go index e8f168aa0..a1f318700 100755 --- a/apis/dbforpostgresql/v1beta1/zz_flexibleserveractivedirectoryadministrator_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_flexibleserveractivedirectoryadministrator_types.go @@ -77,7 +77,7 @@ type FlexibleServerActiveDirectoryAdministratorParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the PostgreSQL Flexible Server on which to set the administrator. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.FlexibleServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.FlexibleServer // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_flexibleserverconfiguration_types.go b/apis/dbforpostgresql/v1beta1/zz_flexibleserverconfiguration_types.go index dbce52d21..5e8498d76 100755 --- a/apis/dbforpostgresql/v1beta1/zz_flexibleserverconfiguration_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_flexibleserverconfiguration_types.go @@ -19,7 +19,7 @@ type FlexibleServerConfigurationInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the PostgreSQL Flexible Server where we want to change configuration. Changing this forces a new PostgreSQL Flexible Server Configuration resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.FlexibleServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.FlexibleServer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` @@ -57,7 +57,7 @@ type FlexibleServerConfigurationParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The ID of the PostgreSQL Flexible Server where we want to change configuration. Changing this forces a new PostgreSQL Flexible Server Configuration resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.FlexibleServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.FlexibleServer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_flexibleserverdatabase_types.go b/apis/dbforpostgresql/v1beta1/zz_flexibleserverdatabase_types.go index a2298137d..53590863c 100755 --- a/apis/dbforpostgresql/v1beta1/zz_flexibleserverdatabase_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_flexibleserverdatabase_types.go @@ -48,7 +48,7 @@ type FlexibleServerDatabaseParameters struct { Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` // The ID of the Azure PostgreSQL Flexible Server from which to create this PostgreSQL Flexible Server Database. Changing this forces a new Azure PostgreSQL Flexible Server Database to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.FlexibleServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.FlexibleServer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_flexibleserverfirewallrule_types.go b/apis/dbforpostgresql/v1beta1/zz_flexibleserverfirewallrule_types.go index d92060b1a..bea91845c 100755 --- a/apis/dbforpostgresql/v1beta1/zz_flexibleserverfirewallrule_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_flexibleserverfirewallrule_types.go @@ -44,7 +44,7 @@ type FlexibleServerFirewallRuleParameters struct { EndIPAddress *string `json:"endIpAddress,omitempty" tf:"end_ip_address,omitempty"` // The ID of the PostgreSQL Flexible Server from which to create this PostgreSQL Flexible Server Firewall Rule. Changing this forces a new PostgreSQL Flexible Server Firewall Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.FlexibleServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.FlexibleServer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_generated.conversion_hubs.go b/apis/dbforpostgresql/v1beta1/zz_generated.conversion_hubs.go index cdebeaef7..c01899215 100755 --- a/apis/dbforpostgresql/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/dbforpostgresql/v1beta1/zz_generated.conversion_hubs.go @@ -18,9 +18,6 @@ func (tr *Database) Hub() {} // Hub marks this type as a conversion hub. func (tr *FirewallRule) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FlexibleServer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FlexibleServerActiveDirectoryAdministrator) Hub() {} @@ -33,9 +30,6 @@ func (tr *FlexibleServerDatabase) Hub() {} // Hub marks this type as a conversion hub. func (tr *FlexibleServerFirewallRule) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Server) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ServerKey) Hub() {} diff --git a/apis/dbforpostgresql/v1beta1/zz_generated.conversion_spokes.go b/apis/dbforpostgresql/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..26d7416bc --- /dev/null +++ b/apis/dbforpostgresql/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this FlexibleServer to the hub type. +func (tr *FlexibleServer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FlexibleServer type. +func (tr *FlexibleServer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Server to the hub type. +func (tr *Server) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Server type. +func (tr *Server) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/dbforpostgresql/v1beta1/zz_generated.resolvers.go b/apis/dbforpostgresql/v1beta1/zz_generated.resolvers.go index 1e2e9d410..41f97eff1 100644 --- a/apis/dbforpostgresql/v1beta1/zz_generated.resolvers.go +++ b/apis/dbforpostgresql/v1beta1/zz_generated.resolvers.go @@ -46,7 +46,7 @@ func (mg *ActiveDirectoryAdministrator) ResolveReferences( // ResolveReferences mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -96,7 +96,7 @@ func (mg *Configuration) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -134,7 +134,7 @@ func (mg *Configuration) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -184,7 +184,7 @@ func (mg *Database) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -234,7 +234,7 @@ func (mg *FirewallRule) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -391,7 +391,7 @@ func (mg *FlexibleServerActiveDirectoryAdministrator) ResolveReferences(ctx cont mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "FlexibleServer", "FlexibleServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "FlexibleServer", "FlexibleServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -422,7 +422,7 @@ func (mg *FlexibleServerConfiguration) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "FlexibleServer", "FlexibleServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "FlexibleServer", "FlexibleServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -441,7 +441,7 @@ func (mg *FlexibleServerConfiguration) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "FlexibleServer", "FlexibleServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "FlexibleServer", "FlexibleServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -472,7 +472,7 @@ func (mg *FlexibleServerDatabase) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "FlexibleServer", "FlexibleServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "FlexibleServer", "FlexibleServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -503,7 +503,7 @@ func (mg *FlexibleServerFirewallRule) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "FlexibleServer", "FlexibleServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "FlexibleServer", "FlexibleServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -565,7 +565,7 @@ func (mg *ServerKey) ResolveReferences(ctx context.Context, c client.Reader) err var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Key", "KeyList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -584,7 +584,7 @@ func (mg *ServerKey) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.KeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -603,7 +603,7 @@ func (mg *ServerKey) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Key", "KeyList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -622,7 +622,7 @@ func (mg *ServerKey) ResolveReferences(ctx context.Context, c client.Reader) err mg.Spec.InitProvider.KeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.KeyVaultKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -672,7 +672,7 @@ func (mg *VirtualNetworkRule) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta1", "Server", "ServerList") + m, l, err = apisresolver.GetManagedResource("dbforpostgresql.azure.upbound.io", "v1beta2", "Server", "ServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -691,7 +691,7 @@ func (mg *VirtualNetworkRule) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.ServerName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -710,7 +710,7 @@ func (mg *VirtualNetworkRule) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/dbforpostgresql/v1beta1/zz_serverkey_types.go b/apis/dbforpostgresql/v1beta1/zz_serverkey_types.go index 3264db3c6..961374613 100755 --- a/apis/dbforpostgresql/v1beta1/zz_serverkey_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_serverkey_types.go @@ -16,7 +16,7 @@ import ( type ServerKeyInitParameters struct { // The URL to a Key Vault Key. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Key + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` @@ -29,7 +29,7 @@ type ServerKeyInitParameters struct { KeyVaultKeyIDSelector *v1.Selector `json:"keyVaultKeyIdSelector,omitempty" tf:"-"` // The ID of the PostgreSQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` @@ -57,7 +57,7 @@ type ServerKeyObservation struct { type ServerKeyParameters struct { // The URL to a Key Vault Key. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Key + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` @@ -71,7 +71,7 @@ type ServerKeyParameters struct { KeyVaultKeyIDSelector *v1.Selector `json:"keyVaultKeyIdSelector,omitempty" tf:"-"` // The ID of the PostgreSQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.Server // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` diff --git a/apis/dbforpostgresql/v1beta1/zz_virtualnetworkrule_types.go b/apis/dbforpostgresql/v1beta1/zz_virtualnetworkrule_types.go index c8ce907a6..e74ff0b22 100755 --- a/apis/dbforpostgresql/v1beta1/zz_virtualnetworkrule_types.go +++ b/apis/dbforpostgresql/v1beta1/zz_virtualnetworkrule_types.go @@ -19,7 +19,7 @@ type VirtualNetworkRuleInitParameters struct { IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` // The ID of the subnet that the PostgreSQL server will be connected to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -70,7 +70,7 @@ type VirtualNetworkRuleParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the SQL Server to which this PostgreSQL virtual network rule will be applied to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1.Server + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2.Server // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` @@ -83,7 +83,7 @@ type VirtualNetworkRuleParameters struct { ServerNameSelector *v1.Selector `json:"serverNameSelector,omitempty" tf:"-"` // The ID of the subnet that the PostgreSQL server will be connected to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/dbforpostgresql/v1beta2/zz_flexibleserver_terraformed.go b/apis/dbforpostgresql/v1beta2/zz_flexibleserver_terraformed.go new file mode 100755 index 000000000..28e294c7c --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_flexibleserver_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FlexibleServer +func (mg *FlexibleServer) GetTerraformResourceType() string { + return "azurerm_postgresql_flexible_server" +} + +// GetConnectionDetailsMapping for this FlexibleServer +func (tr *FlexibleServer) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"administrator_password": "spec.forProvider.administratorPasswordSecretRef"} +} + +// GetObservation of this FlexibleServer +func (tr *FlexibleServer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FlexibleServer +func (tr *FlexibleServer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FlexibleServer +func (tr *FlexibleServer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FlexibleServer +func (tr *FlexibleServer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FlexibleServer +func (tr *FlexibleServer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FlexibleServer +func (tr *FlexibleServer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FlexibleServer +func (tr *FlexibleServer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FlexibleServer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FlexibleServer) LateInitialize(attrs []byte) (bool, error) { + params := &FlexibleServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FlexibleServer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dbforpostgresql/v1beta2/zz_flexibleserver_types.go b/apis/dbforpostgresql/v1beta2/zz_flexibleserver_types.go new file mode 100755 index 000000000..fb04787f5 --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_flexibleserver_types.go @@ -0,0 +1,570 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationInitParameters struct { + + // Whether or not Active Directory authentication is allowed to access the PostgreSQL Flexible Server. Defaults to false. + ActiveDirectoryAuthEnabled *bool `json:"activeDirectoryAuthEnabled,omitempty" tf:"active_directory_auth_enabled,omitempty"` + + // Whether or not password authentication is allowed to access the PostgreSQL Flexible Server. Defaults to true. + PasswordAuthEnabled *bool `json:"passwordAuthEnabled,omitempty" tf:"password_auth_enabled,omitempty"` + + // The Tenant ID of the Azure Active Directory which is used by the Active Directory authentication. active_directory_auth_enabled must be set to true. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AuthenticationObservation struct { + + // Whether or not Active Directory authentication is allowed to access the PostgreSQL Flexible Server. Defaults to false. + ActiveDirectoryAuthEnabled *bool `json:"activeDirectoryAuthEnabled,omitempty" tf:"active_directory_auth_enabled,omitempty"` + + // Whether or not password authentication is allowed to access the PostgreSQL Flexible Server. Defaults to true. + PasswordAuthEnabled *bool `json:"passwordAuthEnabled,omitempty" tf:"password_auth_enabled,omitempty"` + + // The Tenant ID of the Azure Active Directory which is used by the Active Directory authentication. active_directory_auth_enabled must be set to true. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AuthenticationParameters struct { + + // Whether or not Active Directory authentication is allowed to access the PostgreSQL Flexible Server. Defaults to false. + // +kubebuilder:validation:Optional + ActiveDirectoryAuthEnabled *bool `json:"activeDirectoryAuthEnabled,omitempty" tf:"active_directory_auth_enabled,omitempty"` + + // Whether or not password authentication is allowed to access the PostgreSQL Flexible Server. Defaults to true. + // +kubebuilder:validation:Optional + PasswordAuthEnabled *bool `json:"passwordAuthEnabled,omitempty" tf:"password_auth_enabled,omitempty"` + + // The Tenant ID of the Azure Active Directory which is used by the Active Directory authentication. active_directory_auth_enabled must be set to true. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type CustomerManagedKeyInitParameters struct { + + // The ID of the geo backup Key Vault Key. It can't cross region and need Customer Managed Key in same region as geo backup. + GeoBackupKeyVaultKeyID *string `json:"geoBackupKeyVaultKeyId,omitempty" tf:"geo_backup_key_vault_key_id,omitempty"` + + // The geo backup user managed identity id for a Customer Managed Key. Should be added with identity_ids. It can't cross region and need identity in same region as geo backup. + GeoBackupUserAssignedIdentityID *string `json:"geoBackupUserAssignedIdentityId,omitempty" tf:"geo_backup_user_assigned_identity_id,omitempty"` + + // The ID of the Key Vault Key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the primary user managed identity id for a Customer Managed Key. Should be added with identity_ids. + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` +} + +type CustomerManagedKeyObservation struct { + + // The ID of the geo backup Key Vault Key. It can't cross region and need Customer Managed Key in same region as geo backup. + GeoBackupKeyVaultKeyID *string `json:"geoBackupKeyVaultKeyId,omitempty" tf:"geo_backup_key_vault_key_id,omitempty"` + + // The geo backup user managed identity id for a Customer Managed Key. Should be added with identity_ids. It can't cross region and need identity in same region as geo backup. + GeoBackupUserAssignedIdentityID *string `json:"geoBackupUserAssignedIdentityId,omitempty" tf:"geo_backup_user_assigned_identity_id,omitempty"` + + // The ID of the Key Vault Key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the primary user managed identity id for a Customer Managed Key. Should be added with identity_ids. + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` +} + +type CustomerManagedKeyParameters struct { + + // The ID of the geo backup Key Vault Key. It can't cross region and need Customer Managed Key in same region as geo backup. + // +kubebuilder:validation:Optional + GeoBackupKeyVaultKeyID *string `json:"geoBackupKeyVaultKeyId,omitempty" tf:"geo_backup_key_vault_key_id,omitempty"` + + // The geo backup user managed identity id for a Customer Managed Key. Should be added with identity_ids. It can't cross region and need identity in same region as geo backup. + // +kubebuilder:validation:Optional + GeoBackupUserAssignedIdentityID *string `json:"geoBackupUserAssignedIdentityId,omitempty" tf:"geo_backup_user_assigned_identity_id,omitempty"` + + // The ID of the Key Vault Key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId" tf:"key_vault_key_id,omitempty"` + + // Specifies the primary user managed identity id for a Customer Managed Key. Should be added with identity_ids. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` +} + +type FlexibleServerInitParameters struct { + + // The Administrator login for the PostgreSQL Flexible Server. Required when create_mode is Default and authentication.password_auth_enabled is true. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // An authentication block as defined below. + Authentication *AuthenticationInitParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // Is the storage auto grow for PostgreSQL Flexible Server enabled? Defaults to false. + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // The backup retention days for the PostgreSQL Flexible Server. Possible values are between 7 and 35 days. + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode which can be used to restore or replicate existing servers. Possible values are Default, PointInTimeRestore, Replica and Update. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // A customer_managed_key block as defined below. Changing this forces a new resource to be created. + CustomerManagedKey *CustomerManagedKeyInitParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // The ID of the virtual network subnet to create the PostgreSQL Flexible Server. The provided subnet should not have any other resource deployed in it and this subnet will be delegated to the PostgreSQL Flexible Server, if not already delegated. Changing this forces a new PostgreSQL Flexible Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DelegatedSubnetID *string `json:"delegatedSubnetId,omitempty" tf:"delegated_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate delegatedSubnetId. + // +kubebuilder:validation:Optional + DelegatedSubnetIDRef *v1.Reference `json:"delegatedSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate delegatedSubnetId. + // +kubebuilder:validation:Optional + DelegatedSubnetIDSelector *v1.Selector `json:"delegatedSubnetIdSelector,omitempty" tf:"-"` + + // Is Geo-Redundant backup enabled on the PostgreSQL Flexible Server. Defaults to false. Changing this forces a new PostgreSQL Flexible Server to be created. + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // A high_availability block as defined below. + HighAvailability *HighAvailabilityInitParameters `json:"highAvailability,omitempty" tf:"high_availability,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the PostgreSQL Flexible Server should exist. Changing this forces a new PostgreSQL Flexible Server to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + MaintenanceWindow *MaintenanceWindowInitParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // The point in time to restore from source_server_id when create_mode is PointInTimeRestore. Changing this forces a new PostgreSQL Flexible Server to be created. + PointInTimeRestoreTimeInUtc *string `json:"pointInTimeRestoreTimeInUtc,omitempty" tf:"point_in_time_restore_time_in_utc,omitempty"` + + // The ID of the private DNS zone to create the PostgreSQL Flexible Server. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // Reference to a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDRef *v1.Reference `json:"privateDnsZoneIdRef,omitempty" tf:"-"` + + // Selector for a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDSelector *v1.Selector `json:"privateDnsZoneIdSelector,omitempty" tf:"-"` + + // The replication role for the PostgreSQL Flexible Server. Possible value is None. + ReplicationRole *string `json:"replicationRole,omitempty" tf:"replication_role,omitempty"` + + // The SKU Name for the PostgreSQL Flexible Server. The name of the SKU, follows the tier + name pattern (e.g. B_Standard_B1ms, GP_Standard_D2s_v3, MO_Standard_E4s_v3). + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The resource ID of the source PostgreSQL Flexible Server to be restored. Required when create_mode is PointInTimeRestore or Replica. Changing this forces a new PostgreSQL Flexible Server to be created. + SourceServerID *string `json:"sourceServerId,omitempty" tf:"source_server_id,omitempty"` + + // The max storage allowed for the PostgreSQL Flexible Server. Possible values are 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4193280, 4194304, 8388608, 16777216 and 33553408. + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // The name of storage performance tier for IOPS of the PostgreSQL Flexible Server. Possible values are P4, P6, P10, P15,P20, P30,P40, P50,P60, P70 or P80. Default value is dependant on the storage_mb value. Please see the storage_tier defaults based on storage_mb table below. + StorageTier *string `json:"storageTier,omitempty" tf:"storage_tier,omitempty"` + + // A mapping of tags which should be assigned to the PostgreSQL Flexible Server. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of PostgreSQL Flexible Server to use. Possible values are 11,12, 13, 14, 15 and 16. Required when create_mode is Default. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Specifies the Availability Zone in which the PostgreSQL Flexible Server should be located. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type FlexibleServerObservation struct { + + // The Administrator login for the PostgreSQL Flexible Server. Required when create_mode is Default and authentication.password_auth_enabled is true. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // An authentication block as defined below. + Authentication *AuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // Is the storage auto grow for PostgreSQL Flexible Server enabled? Defaults to false. + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // The backup retention days for the PostgreSQL Flexible Server. Possible values are between 7 and 35 days. + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode which can be used to restore or replicate existing servers. Possible values are Default, PointInTimeRestore, Replica and Update. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // A customer_managed_key block as defined below. Changing this forces a new resource to be created. + CustomerManagedKey *CustomerManagedKeyObservation `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // The ID of the virtual network subnet to create the PostgreSQL Flexible Server. The provided subnet should not have any other resource deployed in it and this subnet will be delegated to the PostgreSQL Flexible Server, if not already delegated. Changing this forces a new PostgreSQL Flexible Server to be created. + DelegatedSubnetID *string `json:"delegatedSubnetId,omitempty" tf:"delegated_subnet_id,omitempty"` + + // The FQDN of the PostgreSQL Flexible Server. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // Is Geo-Redundant backup enabled on the PostgreSQL Flexible Server. Defaults to false. Changing this forces a new PostgreSQL Flexible Server to be created. + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // A high_availability block as defined below. + HighAvailability *HighAvailabilityObservation `json:"highAvailability,omitempty" tf:"high_availability,omitempty"` + + // The ID of the PostgreSQL Flexible Server. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the PostgreSQL Flexible Server should exist. Changing this forces a new PostgreSQL Flexible Server to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + MaintenanceWindow *MaintenanceWindowObservation `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // The point in time to restore from source_server_id when create_mode is PointInTimeRestore. Changing this forces a new PostgreSQL Flexible Server to be created. + PointInTimeRestoreTimeInUtc *string `json:"pointInTimeRestoreTimeInUtc,omitempty" tf:"point_in_time_restore_time_in_utc,omitempty"` + + // The ID of the private DNS zone to create the PostgreSQL Flexible Server. + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // Is public network access enabled? + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The replication role for the PostgreSQL Flexible Server. Possible value is None. + ReplicationRole *string `json:"replicationRole,omitempty" tf:"replication_role,omitempty"` + + // The name of the Resource Group where the PostgreSQL Flexible Server should exist. Changing this forces a new PostgreSQL Flexible Server to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The SKU Name for the PostgreSQL Flexible Server. The name of the SKU, follows the tier + name pattern (e.g. B_Standard_B1ms, GP_Standard_D2s_v3, MO_Standard_E4s_v3). + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The resource ID of the source PostgreSQL Flexible Server to be restored. Required when create_mode is PointInTimeRestore or Replica. Changing this forces a new PostgreSQL Flexible Server to be created. + SourceServerID *string `json:"sourceServerId,omitempty" tf:"source_server_id,omitempty"` + + // The max storage allowed for the PostgreSQL Flexible Server. Possible values are 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4193280, 4194304, 8388608, 16777216 and 33553408. + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // The name of storage performance tier for IOPS of the PostgreSQL Flexible Server. Possible values are P4, P6, P10, P15,P20, P30,P40, P50,P60, P70 or P80. Default value is dependant on the storage_mb value. Please see the storage_tier defaults based on storage_mb table below. + StorageTier *string `json:"storageTier,omitempty" tf:"storage_tier,omitempty"` + + // A mapping of tags which should be assigned to the PostgreSQL Flexible Server. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of PostgreSQL Flexible Server to use. Possible values are 11,12, 13, 14, 15 and 16. Required when create_mode is Default. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Specifies the Availability Zone in which the PostgreSQL Flexible Server should be located. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type FlexibleServerParameters struct { + + // The Administrator login for the PostgreSQL Flexible Server. Required when create_mode is Default and authentication.password_auth_enabled is true. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The Password associated with the administrator_login for the PostgreSQL Flexible Server. Required when create_mode is Default and authentication.password_auth_enabled is true. + // Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + // +kubebuilder:validation:Optional + AdministratorPasswordSecretRef *v1.SecretKeySelector `json:"administratorPasswordSecretRef,omitempty" tf:"-"` + + // An authentication block as defined below. + // +kubebuilder:validation:Optional + Authentication *AuthenticationParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // If true, the password will be auto-generated and stored in the Secret referenced by the administratorPasswordSecretRef field. + // +upjet:crd:field:TFTag=- + // +kubebuilder:validation:Optional + AutoGeneratePassword *bool `json:"autoGeneratePassword,omitempty" tf:"-"` + + // Is the storage auto grow for PostgreSQL Flexible Server enabled? Defaults to false. + // +kubebuilder:validation:Optional + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // The backup retention days for the PostgreSQL Flexible Server. Possible values are between 7 and 35 days. + // +kubebuilder:validation:Optional + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode which can be used to restore or replicate existing servers. Possible values are Default, PointInTimeRestore, Replica and Update. + // +kubebuilder:validation:Optional + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // A customer_managed_key block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CustomerManagedKey *CustomerManagedKeyParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // The ID of the virtual network subnet to create the PostgreSQL Flexible Server. The provided subnet should not have any other resource deployed in it and this subnet will be delegated to the PostgreSQL Flexible Server, if not already delegated. Changing this forces a new PostgreSQL Flexible Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DelegatedSubnetID *string `json:"delegatedSubnetId,omitempty" tf:"delegated_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate delegatedSubnetId. + // +kubebuilder:validation:Optional + DelegatedSubnetIDRef *v1.Reference `json:"delegatedSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate delegatedSubnetId. + // +kubebuilder:validation:Optional + DelegatedSubnetIDSelector *v1.Selector `json:"delegatedSubnetIdSelector,omitempty" tf:"-"` + + // Is Geo-Redundant backup enabled on the PostgreSQL Flexible Server. Defaults to false. Changing this forces a new PostgreSQL Flexible Server to be created. + // +kubebuilder:validation:Optional + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // A high_availability block as defined below. + // +kubebuilder:validation:Optional + HighAvailability *HighAvailabilityParameters `json:"highAvailability,omitempty" tf:"high_availability,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the PostgreSQL Flexible Server should exist. Changing this forces a new PostgreSQL Flexible Server to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A maintenance_window block as defined below. + // +kubebuilder:validation:Optional + MaintenanceWindow *MaintenanceWindowParameters `json:"maintenanceWindow,omitempty" tf:"maintenance_window,omitempty"` + + // The point in time to restore from source_server_id when create_mode is PointInTimeRestore. Changing this forces a new PostgreSQL Flexible Server to be created. + // +kubebuilder:validation:Optional + PointInTimeRestoreTimeInUtc *string `json:"pointInTimeRestoreTimeInUtc,omitempty" tf:"point_in_time_restore_time_in_utc,omitempty"` + + // The ID of the private DNS zone to create the PostgreSQL Flexible Server. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // Reference to a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDRef *v1.Reference `json:"privateDnsZoneIdRef,omitempty" tf:"-"` + + // Selector for a PrivateDNSZone in network to populate privateDnsZoneId. + // +kubebuilder:validation:Optional + PrivateDNSZoneIDSelector *v1.Selector `json:"privateDnsZoneIdSelector,omitempty" tf:"-"` + + // The replication role for the PostgreSQL Flexible Server. Possible value is None. + // +kubebuilder:validation:Optional + ReplicationRole *string `json:"replicationRole,omitempty" tf:"replication_role,omitempty"` + + // The name of the Resource Group where the PostgreSQL Flexible Server should exist. Changing this forces a new PostgreSQL Flexible Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The SKU Name for the PostgreSQL Flexible Server. The name of the SKU, follows the tier + name pattern (e.g. B_Standard_B1ms, GP_Standard_D2s_v3, MO_Standard_E4s_v3). + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The resource ID of the source PostgreSQL Flexible Server to be restored. Required when create_mode is PointInTimeRestore or Replica. Changing this forces a new PostgreSQL Flexible Server to be created. + // +kubebuilder:validation:Optional + SourceServerID *string `json:"sourceServerId,omitempty" tf:"source_server_id,omitempty"` + + // The max storage allowed for the PostgreSQL Flexible Server. Possible values are 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4193280, 4194304, 8388608, 16777216 and 33553408. + // +kubebuilder:validation:Optional + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // The name of storage performance tier for IOPS of the PostgreSQL Flexible Server. Possible values are P4, P6, P10, P15,P20, P30,P40, P50,P60, P70 or P80. Default value is dependant on the storage_mb value. Please see the storage_tier defaults based on storage_mb table below. + // +kubebuilder:validation:Optional + StorageTier *string `json:"storageTier,omitempty" tf:"storage_tier,omitempty"` + + // A mapping of tags which should be assigned to the PostgreSQL Flexible Server. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The version of PostgreSQL Flexible Server to use. Possible values are 11,12, 13, 14, 15 and 16. Required when create_mode is Default. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Specifies the Availability Zone in which the PostgreSQL Flexible Server should be located. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type HighAvailabilityInitParameters struct { + + // The high availability mode for the PostgreSQL Flexible Server. Possible value are SameZone or ZoneRedundant. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Specifies the Availability Zone in which the standby Flexible Server should be located. + StandbyAvailabilityZone *string `json:"standbyAvailabilityZone,omitempty" tf:"standby_availability_zone,omitempty"` +} + +type HighAvailabilityObservation struct { + + // The high availability mode for the PostgreSQL Flexible Server. Possible value are SameZone or ZoneRedundant. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Specifies the Availability Zone in which the standby Flexible Server should be located. + StandbyAvailabilityZone *string `json:"standbyAvailabilityZone,omitempty" tf:"standby_availability_zone,omitempty"` +} + +type HighAvailabilityParameters struct { + + // The high availability mode for the PostgreSQL Flexible Server. Possible value are SameZone or ZoneRedundant. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` + + // Specifies the Availability Zone in which the standby Flexible Server should be located. + // +kubebuilder:validation:Optional + StandbyAvailabilityZone *string `json:"standbyAvailabilityZone,omitempty" tf:"standby_availability_zone,omitempty"` +} + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this PostgreSQL Flexible Server. Required if used together with customer_managed_key block. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this PostgreSQL Flexible Server. The only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this PostgreSQL Flexible Server. Required if used together with customer_managed_key block. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this PostgreSQL Flexible Server. The only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this PostgreSQL Flexible Server. Required if used together with customer_managed_key block. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this PostgreSQL Flexible Server. The only possible value is UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MaintenanceWindowInitParameters struct { + + // The day of week for maintenance window, where the week starts on a Sunday, i.e. Sunday = 0, Monday = 1. Defaults to 0. + DayOfWeek *float64 `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The start hour for maintenance window. Defaults to 0. + StartHour *float64 `json:"startHour,omitempty" tf:"start_hour,omitempty"` + + // The start minute for maintenance window. Defaults to 0. + StartMinute *float64 `json:"startMinute,omitempty" tf:"start_minute,omitempty"` +} + +type MaintenanceWindowObservation struct { + + // The day of week for maintenance window, where the week starts on a Sunday, i.e. Sunday = 0, Monday = 1. Defaults to 0. + DayOfWeek *float64 `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The start hour for maintenance window. Defaults to 0. + StartHour *float64 `json:"startHour,omitempty" tf:"start_hour,omitempty"` + + // The start minute for maintenance window. Defaults to 0. + StartMinute *float64 `json:"startMinute,omitempty" tf:"start_minute,omitempty"` +} + +type MaintenanceWindowParameters struct { + + // The day of week for maintenance window, where the week starts on a Sunday, i.e. Sunday = 0, Monday = 1. Defaults to 0. + // +kubebuilder:validation:Optional + DayOfWeek *float64 `json:"dayOfWeek,omitempty" tf:"day_of_week,omitempty"` + + // The start hour for maintenance window. Defaults to 0. + // +kubebuilder:validation:Optional + StartHour *float64 `json:"startHour,omitempty" tf:"start_hour,omitempty"` + + // The start minute for maintenance window. Defaults to 0. + // +kubebuilder:validation:Optional + StartMinute *float64 `json:"startMinute,omitempty" tf:"start_minute,omitempty"` +} + +// FlexibleServerSpec defines the desired state of FlexibleServer +type FlexibleServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FlexibleServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FlexibleServerInitParameters `json:"initProvider,omitempty"` +} + +// FlexibleServerStatus defines the observed state of FlexibleServer. +type FlexibleServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FlexibleServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FlexibleServer is the Schema for the FlexibleServers API. Manages a PostgreSQL Flexible Server. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FlexibleServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec FlexibleServerSpec `json:"spec"` + Status FlexibleServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FlexibleServerList contains a list of FlexibleServers +type FlexibleServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FlexibleServer `json:"items"` +} + +// Repository type metadata. +var ( + FlexibleServer_Kind = "FlexibleServer" + FlexibleServer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FlexibleServer_Kind}.String() + FlexibleServer_KindAPIVersion = FlexibleServer_Kind + "." + CRDGroupVersion.String() + FlexibleServer_GroupVersionKind = CRDGroupVersion.WithKind(FlexibleServer_Kind) +) + +func init() { + SchemeBuilder.Register(&FlexibleServer{}, &FlexibleServerList{}) +} diff --git a/apis/dbforpostgresql/v1beta2/zz_generated.conversion_hubs.go b/apis/dbforpostgresql/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..58c88158a --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *FlexibleServer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Server) Hub() {} diff --git a/apis/dbforpostgresql/v1beta2/zz_generated.deepcopy.go b/apis/dbforpostgresql/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..441feeae3 --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1777 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationInitParameters) DeepCopyInto(out *AuthenticationInitParameters) { + *out = *in + if in.ActiveDirectoryAuthEnabled != nil { + in, out := &in.ActiveDirectoryAuthEnabled, &out.ActiveDirectoryAuthEnabled + *out = new(bool) + **out = **in + } + if in.PasswordAuthEnabled != nil { + in, out := &in.PasswordAuthEnabled, &out.PasswordAuthEnabled + *out = new(bool) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationInitParameters. +func (in *AuthenticationInitParameters) DeepCopy() *AuthenticationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationObservation) DeepCopyInto(out *AuthenticationObservation) { + *out = *in + if in.ActiveDirectoryAuthEnabled != nil { + in, out := &in.ActiveDirectoryAuthEnabled, &out.ActiveDirectoryAuthEnabled + *out = new(bool) + **out = **in + } + if in.PasswordAuthEnabled != nil { + in, out := &in.PasswordAuthEnabled, &out.PasswordAuthEnabled + *out = new(bool) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationObservation. +func (in *AuthenticationObservation) DeepCopy() *AuthenticationObservation { + if in == nil { + return nil + } + out := new(AuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationParameters) DeepCopyInto(out *AuthenticationParameters) { + *out = *in + if in.ActiveDirectoryAuthEnabled != nil { + in, out := &in.ActiveDirectoryAuthEnabled, &out.ActiveDirectoryAuthEnabled + *out = new(bool) + **out = **in + } + if in.PasswordAuthEnabled != nil { + in, out := &in.PasswordAuthEnabled, &out.PasswordAuthEnabled + *out = new(bool) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationParameters. +func (in *AuthenticationParameters) DeepCopy() *AuthenticationParameters { + if in == nil { + return nil + } + out := new(AuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyInitParameters) DeepCopyInto(out *CustomerManagedKeyInitParameters) { + *out = *in + if in.GeoBackupKeyVaultKeyID != nil { + in, out := &in.GeoBackupKeyVaultKeyID, &out.GeoBackupKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.GeoBackupUserAssignedIdentityID != nil { + in, out := &in.GeoBackupUserAssignedIdentityID, &out.GeoBackupUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyInitParameters. +func (in *CustomerManagedKeyInitParameters) DeepCopy() *CustomerManagedKeyInitParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyObservation) DeepCopyInto(out *CustomerManagedKeyObservation) { + *out = *in + if in.GeoBackupKeyVaultKeyID != nil { + in, out := &in.GeoBackupKeyVaultKeyID, &out.GeoBackupKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.GeoBackupUserAssignedIdentityID != nil { + in, out := &in.GeoBackupUserAssignedIdentityID, &out.GeoBackupUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyObservation. +func (in *CustomerManagedKeyObservation) DeepCopy() *CustomerManagedKeyObservation { + if in == nil { + return nil + } + out := new(CustomerManagedKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyParameters) DeepCopyInto(out *CustomerManagedKeyParameters) { + *out = *in + if in.GeoBackupKeyVaultKeyID != nil { + in, out := &in.GeoBackupKeyVaultKeyID, &out.GeoBackupKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.GeoBackupUserAssignedIdentityID != nil { + in, out := &in.GeoBackupUserAssignedIdentityID, &out.GeoBackupUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyParameters. +func (in *CustomerManagedKeyParameters) DeepCopy() *CustomerManagedKeyParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServer) DeepCopyInto(out *FlexibleServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServer. +func (in *FlexibleServer) DeepCopy() *FlexibleServer { + if in == nil { + return nil + } + out := new(FlexibleServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlexibleServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerInitParameters) DeepCopyInto(out *FlexibleServerInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetID != nil { + in, out := &in.DelegatedSubnetID, &out.DelegatedSubnetID + *out = new(string) + **out = **in + } + if in.DelegatedSubnetIDRef != nil { + in, out := &in.DelegatedSubnetIDRef, &out.DelegatedSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetIDSelector != nil { + in, out := &in.DelegatedSubnetIDSelector, &out.DelegatedSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.HighAvailability != nil { + in, out := &in.HighAvailability, &out.HighAvailability + *out = new(HighAvailabilityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRestoreTimeInUtc != nil { + in, out := &in.PointInTimeRestoreTimeInUtc, &out.PointInTimeRestoreTimeInUtc + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIDRef != nil { + in, out := &in.PrivateDNSZoneIDRef, &out.PrivateDNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSZoneIDSelector != nil { + in, out := &in.PrivateDNSZoneIDSelector, &out.PrivateDNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplicationRole != nil { + in, out := &in.ReplicationRole, &out.ReplicationRole + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceServerID != nil { + in, out := &in.SourceServerID, &out.SourceServerID + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.StorageTier != nil { + in, out := &in.StorageTier, &out.StorageTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerInitParameters. +func (in *FlexibleServerInitParameters) DeepCopy() *FlexibleServerInitParameters { + if in == nil { + return nil + } + out := new(FlexibleServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerList) DeepCopyInto(out *FlexibleServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlexibleServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerList. +func (in *FlexibleServerList) DeepCopy() *FlexibleServerList { + if in == nil { + return nil + } + out := new(FlexibleServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlexibleServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerObservation) DeepCopyInto(out *FlexibleServerObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetID != nil { + in, out := &in.DelegatedSubnetID, &out.DelegatedSubnetID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.HighAvailability != nil { + in, out := &in.HighAvailability, &out.HighAvailability + *out = new(HighAvailabilityObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowObservation) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRestoreTimeInUtc != nil { + in, out := &in.PointInTimeRestoreTimeInUtc, &out.PointInTimeRestoreTimeInUtc + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ReplicationRole != nil { + in, out := &in.ReplicationRole, &out.ReplicationRole + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceServerID != nil { + in, out := &in.SourceServerID, &out.SourceServerID + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.StorageTier != nil { + in, out := &in.StorageTier, &out.StorageTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerObservation. +func (in *FlexibleServerObservation) DeepCopy() *FlexibleServerObservation { + if in == nil { + return nil + } + out := new(FlexibleServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerParameters) DeepCopyInto(out *FlexibleServerParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AdministratorPasswordSecretRef != nil { + in, out := &in.AdministratorPasswordSecretRef, &out.AdministratorPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoGeneratePassword != nil { + in, out := &in.AutoGeneratePassword, &out.AutoGeneratePassword + *out = new(bool) + **out = **in + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetID != nil { + in, out := &in.DelegatedSubnetID, &out.DelegatedSubnetID + *out = new(string) + **out = **in + } + if in.DelegatedSubnetIDRef != nil { + in, out := &in.DelegatedSubnetIDRef, &out.DelegatedSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DelegatedSubnetIDSelector != nil { + in, out := &in.DelegatedSubnetIDSelector, &out.DelegatedSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.HighAvailability != nil { + in, out := &in.HighAvailability, &out.HighAvailability + *out = new(HighAvailabilityParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowParameters) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRestoreTimeInUtc != nil { + in, out := &in.PointInTimeRestoreTimeInUtc, &out.PointInTimeRestoreTimeInUtc + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIDRef != nil { + in, out := &in.PrivateDNSZoneIDRef, &out.PrivateDNSZoneIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSZoneIDSelector != nil { + in, out := &in.PrivateDNSZoneIDSelector, &out.PrivateDNSZoneIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplicationRole != nil { + in, out := &in.ReplicationRole, &out.ReplicationRole + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SourceServerID != nil { + in, out := &in.SourceServerID, &out.SourceServerID + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.StorageTier != nil { + in, out := &in.StorageTier, &out.StorageTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerParameters. +func (in *FlexibleServerParameters) DeepCopy() *FlexibleServerParameters { + if in == nil { + return nil + } + out := new(FlexibleServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerSpec) DeepCopyInto(out *FlexibleServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerSpec. +func (in *FlexibleServerSpec) DeepCopy() *FlexibleServerSpec { + if in == nil { + return nil + } + out := new(FlexibleServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexibleServerStatus) DeepCopyInto(out *FlexibleServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexibleServerStatus. +func (in *FlexibleServerStatus) DeepCopy() *FlexibleServerStatus { + if in == nil { + return nil + } + out := new(FlexibleServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighAvailabilityInitParameters) DeepCopyInto(out *HighAvailabilityInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.StandbyAvailabilityZone != nil { + in, out := &in.StandbyAvailabilityZone, &out.StandbyAvailabilityZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighAvailabilityInitParameters. +func (in *HighAvailabilityInitParameters) DeepCopy() *HighAvailabilityInitParameters { + if in == nil { + return nil + } + out := new(HighAvailabilityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighAvailabilityObservation) DeepCopyInto(out *HighAvailabilityObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.StandbyAvailabilityZone != nil { + in, out := &in.StandbyAvailabilityZone, &out.StandbyAvailabilityZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighAvailabilityObservation. +func (in *HighAvailabilityObservation) DeepCopy() *HighAvailabilityObservation { + if in == nil { + return nil + } + out := new(HighAvailabilityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HighAvailabilityParameters) DeepCopyInto(out *HighAvailabilityParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.StandbyAvailabilityZone != nil { + in, out := &in.StandbyAvailabilityZone, &out.StandbyAvailabilityZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighAvailabilityParameters. +func (in *HighAvailabilityParameters) DeepCopy() *HighAvailabilityParameters { + if in == nil { + return nil + } + out := new(HighAvailabilityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowInitParameters) DeepCopyInto(out *MaintenanceWindowInitParameters) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(float64) + **out = **in + } + if in.StartHour != nil { + in, out := &in.StartHour, &out.StartHour + *out = new(float64) + **out = **in + } + if in.StartMinute != nil { + in, out := &in.StartMinute, &out.StartMinute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowInitParameters. +func (in *MaintenanceWindowInitParameters) DeepCopy() *MaintenanceWindowInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowObservation) DeepCopyInto(out *MaintenanceWindowObservation) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(float64) + **out = **in + } + if in.StartHour != nil { + in, out := &in.StartHour, &out.StartHour + *out = new(float64) + **out = **in + } + if in.StartMinute != nil { + in, out := &in.StartMinute, &out.StartMinute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowObservation. +func (in *MaintenanceWindowObservation) DeepCopy() *MaintenanceWindowObservation { + if in == nil { + return nil + } + out := new(MaintenanceWindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowParameters) DeepCopyInto(out *MaintenanceWindowParameters) { + *out = *in + if in.DayOfWeek != nil { + in, out := &in.DayOfWeek, &out.DayOfWeek + *out = new(float64) + **out = **in + } + if in.StartHour != nil { + in, out := &in.StartHour, &out.StartHour + *out = new(float64) + **out = **in + } + if in.StartMinute != nil { + in, out := &in.StartMinute, &out.StartMinute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowParameters. +func (in *MaintenanceWindowParameters) DeepCopy() *MaintenanceWindowParameters { + if in == nil { + return nil + } + out := new(MaintenanceWindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Server) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerIdentityInitParameters) DeepCopyInto(out *ServerIdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerIdentityInitParameters. +func (in *ServerIdentityInitParameters) DeepCopy() *ServerIdentityInitParameters { + if in == nil { + return nil + } + out := new(ServerIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerIdentityObservation) DeepCopyInto(out *ServerIdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerIdentityObservation. +func (in *ServerIdentityObservation) DeepCopy() *ServerIdentityObservation { + if in == nil { + return nil + } + out := new(ServerIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerIdentityParameters) DeepCopyInto(out *ServerIdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerIdentityParameters. +func (in *ServerIdentityParameters) DeepCopy() *ServerIdentityParameters { + if in == nil { + return nil + } + out := new(ServerIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerInitParameters) DeepCopyInto(out *ServerInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceServerID != nil { + in, out := &in.CreationSourceServerID, &out.CreationSourceServerID + *out = new(string) + **out = **in + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ServerIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SSLEnforcementEnabled != nil { + in, out := &in.SSLEnforcementEnabled, &out.SSLEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.SSLMinimalTLSVersionEnforced != nil { + in, out := &in.SSLMinimalTLSVersionEnforced, &out.SSLMinimalTLSVersionEnforced + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerInitParameters. +func (in *ServerInitParameters) DeepCopy() *ServerInitParameters { + if in == nil { + return nil + } + out := new(ServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerList) DeepCopyInto(out *ServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerList. +func (in *ServerList) DeepCopy() *ServerList { + if in == nil { + return nil + } + out := new(ServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerObservation) DeepCopyInto(out *ServerObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceServerID != nil { + in, out := &in.CreationSourceServerID, &out.CreationSourceServerID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ServerIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SSLEnforcementEnabled != nil { + in, out := &in.SSLEnforcementEnabled, &out.SSLEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.SSLMinimalTLSVersionEnforced != nil { + in, out := &in.SSLMinimalTLSVersionEnforced, &out.SSLMinimalTLSVersionEnforced + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerObservation. +func (in *ServerObservation) DeepCopy() *ServerObservation { + if in == nil { + return nil + } + out := new(ServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerParameters) DeepCopyInto(out *ServerParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AdministratorLoginPasswordSecretRef != nil { + in, out := &in.AdministratorLoginPasswordSecretRef, &out.AdministratorLoginPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AutoGrowEnabled != nil { + in, out := &in.AutoGrowEnabled, &out.AutoGrowEnabled + *out = new(bool) + **out = **in + } + if in.BackupRetentionDays != nil { + in, out := &in.BackupRetentionDays, &out.BackupRetentionDays + *out = new(float64) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceServerID != nil { + in, out := &in.CreationSourceServerID, &out.CreationSourceServerID + *out = new(string) + **out = **in + } + if in.GeoRedundantBackupEnabled != nil { + in, out := &in.GeoRedundantBackupEnabled, &out.GeoRedundantBackupEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ServerIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SSLEnforcementEnabled != nil { + in, out := &in.SSLEnforcementEnabled, &out.SSLEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.SSLMinimalTLSVersionEnforced != nil { + in, out := &in.SSLMinimalTLSVersionEnforced, &out.SSLMinimalTLSVersionEnforced + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageMb != nil { + in, out := &in.StorageMb, &out.StorageMb + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerParameters. +func (in *ServerParameters) DeepCopy() *ServerParameters { + if in == nil { + return nil + } + out := new(ServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSpec) DeepCopyInto(out *ServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec. +func (in *ServerSpec) DeepCopy() *ServerSpec { + if in == nil { + return nil + } + out := new(ServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatus) DeepCopyInto(out *ServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatus. +func (in *ServerStatus) DeepCopy() *ServerStatus { + if in == nil { + return nil + } + out := new(ServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyInitParameters) DeepCopyInto(out *ThreatDetectionPolicyInitParameters) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(bool) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyInitParameters. +func (in *ThreatDetectionPolicyInitParameters) DeepCopy() *ThreatDetectionPolicyInitParameters { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyObservation) DeepCopyInto(out *ThreatDetectionPolicyObservation) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(bool) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyObservation. +func (in *ThreatDetectionPolicyObservation) DeepCopy() *ThreatDetectionPolicyObservation { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyParameters) DeepCopyInto(out *ThreatDetectionPolicyParameters) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(bool) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyParameters. +func (in *ThreatDetectionPolicyParameters) DeepCopy() *ThreatDetectionPolicyParameters { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dbforpostgresql/v1beta2/zz_generated.managed.go b/apis/dbforpostgresql/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..aad62c8b4 --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this FlexibleServer. +func (mg *FlexibleServer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FlexibleServer. +func (mg *FlexibleServer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FlexibleServer. +func (mg *FlexibleServer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FlexibleServer. +func (mg *FlexibleServer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FlexibleServer. +func (mg *FlexibleServer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FlexibleServer. +func (mg *FlexibleServer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FlexibleServer. +func (mg *FlexibleServer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FlexibleServer. +func (mg *FlexibleServer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FlexibleServer. +func (mg *FlexibleServer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FlexibleServer. +func (mg *FlexibleServer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FlexibleServer. +func (mg *FlexibleServer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FlexibleServer. +func (mg *FlexibleServer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Server. +func (mg *Server) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Server. +func (mg *Server) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Server. +func (mg *Server) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Server. +func (mg *Server) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Server. +func (mg *Server) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Server. +func (mg *Server) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Server. +func (mg *Server) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Server. +func (mg *Server) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Server. +func (mg *Server) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Server. +func (mg *Server) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Server. +func (mg *Server) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Server. +func (mg *Server) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dbforpostgresql/v1beta2/zz_generated.managedlist.go b/apis/dbforpostgresql/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..379943ff1 --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this FlexibleServerList. +func (l *FlexibleServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ServerList. +func (l *ServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dbforpostgresql/v1beta2/zz_generated.resolvers.go b/apis/dbforpostgresql/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..811381d22 --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,156 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *FlexibleServer) ResolveReferences( // ResolveReferences of this FlexibleServer. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DelegatedSubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DelegatedSubnetIDRef, + Selector: mg.Spec.ForProvider.DelegatedSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DelegatedSubnetID") + } + mg.Spec.ForProvider.DelegatedSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DelegatedSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrivateDNSZoneID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrivateDNSZoneIDRef, + Selector: mg.Spec.ForProvider.PrivateDNSZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateDNSZoneID") + } + mg.Spec.ForProvider.PrivateDNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrivateDNSZoneIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DelegatedSubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DelegatedSubnetIDRef, + Selector: mg.Spec.InitProvider.DelegatedSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DelegatedSubnetID") + } + mg.Spec.InitProvider.DelegatedSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DelegatedSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrivateDNSZoneID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrivateDNSZoneIDRef, + Selector: mg.Spec.InitProvider.PrivateDNSZoneIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateDNSZoneID") + } + mg.Spec.InitProvider.PrivateDNSZoneID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrivateDNSZoneIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Server. +func (mg *Server) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/dbforpostgresql/v1beta2/zz_groupversion_info.go b/apis/dbforpostgresql/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..eecadca27 --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dbforpostgresql.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dbforpostgresql.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/dbforpostgresql/v1beta2/zz_server_terraformed.go b/apis/dbforpostgresql/v1beta2/zz_server_terraformed.go new file mode 100755 index 000000000..0cce611f2 --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_server_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Server +func (mg *Server) GetTerraformResourceType() string { + return "azurerm_postgresql_server" +} + +// GetConnectionDetailsMapping for this Server +func (tr *Server) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"administrator_login_password": "spec.forProvider.administratorLoginPasswordSecretRef", "threat_detection_policy[*].storage_account_access_key": "spec.forProvider.threatDetectionPolicy[*].storageAccountAccessKeySecretRef"} +} + +// GetObservation of this Server +func (tr *Server) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Server +func (tr *Server) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Server +func (tr *Server) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Server +func (tr *Server) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Server +func (tr *Server) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Server +func (tr *Server) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Server +func (tr *Server) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Server using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Server) LateInitialize(attrs []byte) (bool, error) { + params := &ServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Server) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/dbforpostgresql/v1beta2/zz_server_types.go b/apis/dbforpostgresql/v1beta2/zz_server_types.go new file mode 100755 index 000000000..1f5e4085f --- /dev/null +++ b/apis/dbforpostgresql/v1beta2/zz_server_types.go @@ -0,0 +1,400 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServerIdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this PostgreSQL Server. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ServerIdentityObservation struct { + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this PostgreSQL Server. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ServerIdentityParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this PostgreSQL Server. The only possible value is SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ServerInitParameters struct { + + // The Administrator login for the PostgreSQL Server. Required when create_mode is Default. Changing this forces a new resource to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. Defaults to true. + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Backup retention days for the server, supported values are between 7 and 35 days. + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode. Can be used to restore or replicate existing servers. Possible values are Default, Replica, GeoRestore, and PointInTimeRestore. Defaults to Default. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // For creation modes other than Default, the source server ID to use. + CreationSourceServerID *string `json:"creationSourceServerId,omitempty" tf:"creation_source_server_id,omitempty"` + + // Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not support for the Basic tier. Changing this forces a new resource to be created. + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // An identity block as defined below. + Identity *ServerIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not infrastructure is encrypted for this server. Changing this forces a new resource to be created. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // When create_mode is PointInTimeRestore the point in time to restore from creation_source_server_id. It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies if SSL should be enforced on connections. Possible values are true and false. + SSLEnforcementEnabled *bool `json:"sslEnforcementEnabled,omitempty" tf:"ssl_enforcement_enabled,omitempty"` + + // The minimum TLS version to support on the sever. Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2. + SSLMinimalTLSVersionEnforced *string `json:"sslMinimalTlsVersionEnforced,omitempty" tf:"ssl_minimal_tls_version_enforced,omitempty"` + + // Specifies the SKU Name for this PostgreSQL Server. The name of the SKU, follows the tier + family + cores pattern (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Max storage allowed for a server. Possible values are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory Optimized SKUs. For more information see the product documentation. + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration, known in the API as Server Security Alerts Policy. The threat_detection_policy block supports fields documented below. + ThreatDetectionPolicy *ThreatDetectionPolicyInitParameters `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // Specifies the version of PostgreSQL to use. Valid values are 9.5, 9.6, 10, 10.0, 10.2 and 11. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ServerObservation struct { + + // The Administrator login for the PostgreSQL Server. Required when create_mode is Default. Changing this forces a new resource to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. Defaults to true. + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Backup retention days for the server, supported values are between 7 and 35 days. + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode. Can be used to restore or replicate existing servers. Possible values are Default, Replica, GeoRestore, and PointInTimeRestore. Defaults to Default. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // For creation modes other than Default, the source server ID to use. + CreationSourceServerID *string `json:"creationSourceServerId,omitempty" tf:"creation_source_server_id,omitempty"` + + // The FQDN of the PostgreSQL Server. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not support for the Basic tier. Changing this forces a new resource to be created. + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // The ID of the PostgreSQL Server. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *ServerIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not infrastructure is encrypted for this server. Changing this forces a new resource to be created. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the PostgreSQL Server. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // When create_mode is PointInTimeRestore the point in time to restore from creation_source_server_id. It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies if SSL should be enforced on connections. Possible values are true and false. + SSLEnforcementEnabled *bool `json:"sslEnforcementEnabled,omitempty" tf:"ssl_enforcement_enabled,omitempty"` + + // The minimum TLS version to support on the sever. Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2. + SSLMinimalTLSVersionEnforced *string `json:"sslMinimalTlsVersionEnforced,omitempty" tf:"ssl_minimal_tls_version_enforced,omitempty"` + + // Specifies the SKU Name for this PostgreSQL Server. The name of the SKU, follows the tier + family + cores pattern (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Max storage allowed for a server. Possible values are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory Optimized SKUs. For more information see the product documentation. + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration, known in the API as Server Security Alerts Policy. The threat_detection_policy block supports fields documented below. + ThreatDetectionPolicy *ThreatDetectionPolicyObservation `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // Specifies the version of PostgreSQL to use. Valid values are 9.5, 9.6, 10, 10.0, 10.2 and 11. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ServerParameters struct { + + // The Administrator login for the PostgreSQL Server. Required when create_mode is Default. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The Password associated with the administrator_login for the PostgreSQL Server. Required when create_mode is Default. + // +kubebuilder:validation:Optional + AdministratorLoginPasswordSecretRef *v1.SecretKeySelector `json:"administratorLoginPasswordSecretRef,omitempty" tf:"-"` + + // Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. Defaults to true. + // +kubebuilder:validation:Optional + AutoGrowEnabled *bool `json:"autoGrowEnabled,omitempty" tf:"auto_grow_enabled,omitempty"` + + // Backup retention days for the server, supported values are between 7 and 35 days. + // +kubebuilder:validation:Optional + BackupRetentionDays *float64 `json:"backupRetentionDays,omitempty" tf:"backup_retention_days,omitempty"` + + // The creation mode. Can be used to restore or replicate existing servers. Possible values are Default, Replica, GeoRestore, and PointInTimeRestore. Defaults to Default. + // +kubebuilder:validation:Optional + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // For creation modes other than Default, the source server ID to use. + // +kubebuilder:validation:Optional + CreationSourceServerID *string `json:"creationSourceServerId,omitempty" tf:"creation_source_server_id,omitempty"` + + // Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not support for the Basic tier. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + GeoRedundantBackupEnabled *bool `json:"geoRedundantBackupEnabled,omitempty" tf:"geo_redundant_backup_enabled,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *ServerIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not infrastructure is encrypted for this server. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the PostgreSQL Server. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // When create_mode is PointInTimeRestore the point in time to restore from creation_source_server_id. It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + // +kubebuilder:validation:Optional + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies if SSL should be enforced on connections. Possible values are true and false. + // +kubebuilder:validation:Optional + SSLEnforcementEnabled *bool `json:"sslEnforcementEnabled,omitempty" tf:"ssl_enforcement_enabled,omitempty"` + + // The minimum TLS version to support on the sever. Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2. + // +kubebuilder:validation:Optional + SSLMinimalTLSVersionEnforced *string `json:"sslMinimalTlsVersionEnforced,omitempty" tf:"ssl_minimal_tls_version_enforced,omitempty"` + + // Specifies the SKU Name for this PostgreSQL Server. The name of the SKU, follows the tier + family + cores pattern (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Max storage allowed for a server. Possible values are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory Optimized SKUs. For more information see the product documentation. + // +kubebuilder:validation:Optional + StorageMb *float64 `json:"storageMb,omitempty" tf:"storage_mb,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration, known in the API as Server Security Alerts Policy. The threat_detection_policy block supports fields documented below. + // +kubebuilder:validation:Optional + ThreatDetectionPolicy *ThreatDetectionPolicyParameters `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // Specifies the version of PostgreSQL to use. Valid values are 9.5, 9.6, 10, 10.0, 10.2 and 11. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ThreatDetectionPolicyInitParameters struct { + + // Specifies a list of alerts which should be disabled. Possible values are Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration and Unsafe_Action. + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? + EmailAccountAdmins *bool `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Is the policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +type ThreatDetectionPolicyObservation struct { + + // Specifies a list of alerts which should be disabled. Possible values are Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration and Unsafe_Action. + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? + EmailAccountAdmins *bool `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Is the policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +type ThreatDetectionPolicyParameters struct { + + // Specifies a list of alerts which should be disabled. Possible values are Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration and Unsafe_Action. + // +kubebuilder:validation:Optional + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? + // +kubebuilder:validation:Optional + EmailAccountAdmins *bool `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +kubebuilder:validation:Optional + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Is the policy enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + // +kubebuilder:validation:Optional + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // Specifies the identifier key of the Threat Detection audit storage account. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. + // +kubebuilder:validation:Optional + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +// ServerSpec defines the desired state of Server +type ServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServerInitParameters `json:"initProvider,omitempty"` +} + +// ServerStatus defines the observed state of Server. +type ServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Server is the Schema for the Servers API. Manages a PostgreSQL Server. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Server struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sslEnforcementEnabled) || (has(self.initProvider) && has(self.initProvider.sslEnforcementEnabled))",message="spec.forProvider.sslEnforcementEnabled is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec ServerSpec `json:"spec"` + Status ServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServerList contains a list of Servers +type ServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Server `json:"items"` +} + +// Repository type metadata. +var ( + Server_Kind = "Server" + Server_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Server_Kind}.String() + Server_KindAPIVersion = Server_Kind + "." + CRDGroupVersion.String() + Server_GroupVersionKind = CRDGroupVersion.WithKind(Server_Kind) +) + +func init() { + SchemeBuilder.Register(&Server{}, &ServerList{}) +} diff --git a/apis/devices/v1beta1/zz_generated.conversion_hubs.go b/apis/devices/v1beta1/zz_generated.conversion_hubs.go index ed8ddba6d..9de691ef2 100755 --- a/apis/devices/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/devices/v1beta1/zz_generated.conversion_hubs.go @@ -6,18 +6,12 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *IOTHub) Hub() {} - // Hub marks this type as a conversion hub. func (tr *IOTHubCertificate) Hub() {} // Hub marks this type as a conversion hub. func (tr *IOTHubConsumerGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *IOTHubDPS) Hub() {} - // Hub marks this type as a conversion hub. func (tr *IOTHubDPSCertificate) Hub() {} diff --git a/apis/devices/v1beta1/zz_generated.conversion_spokes.go b/apis/devices/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..e8a6dfb47 --- /dev/null +++ b/apis/devices/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this IOTHub to the hub type. +func (tr *IOTHub) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IOTHub type. +func (tr *IOTHub) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this IOTHubDPS to the hub type. +func (tr *IOTHubDPS) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IOTHubDPS type. +func (tr *IOTHubDPS) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/devices/v1beta1/zz_generated.resolvers.go b/apis/devices/v1beta1/zz_generated.resolvers.go index 96b9b8580..a5bceb92e 100644 --- a/apis/devices/v1beta1/zz_generated.resolvers.go +++ b/apis/devices/v1beta1/zz_generated.resolvers.go @@ -58,7 +58,7 @@ func (mg *IOTHubCertificate) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -108,7 +108,7 @@ func (mg *IOTHubConsumerGroup) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -189,7 +189,7 @@ func (mg *IOTHubDPSCertificate) ResolveReferences(ctx context.Context, c client. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHubDPS", "IOTHubDPSList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHubDPS", "IOTHubDPSList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -227,7 +227,7 @@ func (mg *IOTHubDPSCertificate) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHubDPS", "IOTHubDPSList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHubDPS", "IOTHubDPSList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -258,7 +258,7 @@ func (mg *IOTHubDPSSharedAccessPolicy) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHubDPS", "IOTHubDPSList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHubDPS", "IOTHubDPSList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -308,7 +308,7 @@ func (mg *IOTHubEndpointEventHub) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -377,7 +377,7 @@ func (mg *IOTHubEndpointServiceBusQueue) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -446,7 +446,7 @@ func (mg *IOTHubEndpointServiceBusTopic) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -534,7 +534,7 @@ func (mg *IOTHubEndpointStorageContainer) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.ContainerName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ContainerNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -622,7 +622,7 @@ func (mg *IOTHubEnrichment) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -660,7 +660,7 @@ func (mg *IOTHubEnrichment) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -730,7 +730,7 @@ func (mg *IOTHubFallbackRoute) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.EndpointNames = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.EndpointNamesRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -799,7 +799,7 @@ func (mg *IOTHubRoute) ResolveReferences(ctx context.Context, c client.Reader) e var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -849,7 +849,7 @@ func (mg *IOTHubSharedAccessPolicy) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/devices/v1beta1/zz_iothubcertificate_types.go b/apis/devices/v1beta1/zz_iothubcertificate_types.go index 1d01a9fdf..fc77a6eaa 100755 --- a/apis/devices/v1beta1/zz_iothubcertificate_types.go +++ b/apis/devices/v1beta1/zz_iothubcertificate_types.go @@ -41,7 +41,7 @@ type IOTHubCertificateParameters struct { CertificateContentSecretRef v1.SecretKeySelector `json:"certificateContentSecretRef" tf:"-"` // The name of the IoTHub that this certificate will be attached to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +kubebuilder:validation:Optional IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubconsumergroup_types.go b/apis/devices/v1beta1/zz_iothubconsumergroup_types.go index 2c4c46683..5cfe4ac5d 100755 --- a/apis/devices/v1beta1/zz_iothubconsumergroup_types.go +++ b/apis/devices/v1beta1/zz_iothubconsumergroup_types.go @@ -38,7 +38,7 @@ type IOTHubConsumerGroupParameters struct { EventHubEndpointName *string `json:"eventhubEndpointName" tf:"eventhub_endpoint_name,omitempty"` // The name of the IoT Hub. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +kubebuilder:validation:Optional IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubdpscertificate_types.go b/apis/devices/v1beta1/zz_iothubdpscertificate_types.go index 6f488eb0a..c2133b5c8 100755 --- a/apis/devices/v1beta1/zz_iothubdpscertificate_types.go +++ b/apis/devices/v1beta1/zz_iothubdpscertificate_types.go @@ -16,7 +16,7 @@ import ( type IOTHubDPSCertificateInitParameters struct { // The name of the IoT Device Provisioning Service that this certificate will be attached to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHubDPS + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHubDPS IOTDPSName *string `json:"iotDpsName,omitempty" tf:"iot_dps_name,omitempty"` // Reference to a IOTHubDPS in devices to populate iotDpsName. @@ -53,7 +53,7 @@ type IOTHubDPSCertificateParameters struct { CertificateContentSecretRef v1.SecretKeySelector `json:"certificateContentSecretRef" tf:"-"` // The name of the IoT Device Provisioning Service that this certificate will be attached to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHubDPS + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHubDPS // +kubebuilder:validation:Optional IOTDPSName *string `json:"iotDpsName,omitempty" tf:"iot_dps_name,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubdpssharedaccesspolicy_types.go b/apis/devices/v1beta1/zz_iothubdpssharedaccesspolicy_types.go index 1b6c14223..9bb2b223e 100755 --- a/apis/devices/v1beta1/zz_iothubdpssharedaccesspolicy_types.go +++ b/apis/devices/v1beta1/zz_iothubdpssharedaccesspolicy_types.go @@ -69,7 +69,7 @@ type IOTHubDPSSharedAccessPolicyParameters struct { EnrollmentWrite *bool `json:"enrollmentWrite,omitempty" tf:"enrollment_write,omitempty"` // The name of the IoT Hub Device Provisioning service to which this Shared Access Policy belongs. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHubDPS + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHubDPS // +kubebuilder:validation:Optional IOTHubDPSName *string `json:"iothubDpsName,omitempty" tf:"iothub_dps_name,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubendpointeventhub_types.go b/apis/devices/v1beta1/zz_iothubendpointeventhub_types.go index cb1808d06..6fc94da87 100755 --- a/apis/devices/v1beta1/zz_iothubendpointeventhub_types.go +++ b/apis/devices/v1beta1/zz_iothubendpointeventhub_types.go @@ -83,7 +83,7 @@ type IOTHubEndpointEventHubParameters struct { EntityPath *string `json:"entityPath,omitempty" tf:"entity_path,omitempty"` // The IoTHub ID for the endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubendpointservicebusqueue_types.go b/apis/devices/v1beta1/zz_iothubendpointservicebusqueue_types.go index 891c4291a..ec1ca0e27 100755 --- a/apis/devices/v1beta1/zz_iothubendpointservicebusqueue_types.go +++ b/apis/devices/v1beta1/zz_iothubendpointservicebusqueue_types.go @@ -83,7 +83,7 @@ type IOTHubEndpointServiceBusQueueParameters struct { EntityPath *string `json:"entityPath,omitempty" tf:"entity_path,omitempty"` // The IoTHub ID for the endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubendpointservicebustopic_types.go b/apis/devices/v1beta1/zz_iothubendpointservicebustopic_types.go index a6da54fab..7610577a2 100755 --- a/apis/devices/v1beta1/zz_iothubendpointservicebustopic_types.go +++ b/apis/devices/v1beta1/zz_iothubendpointservicebustopic_types.go @@ -83,7 +83,7 @@ type IOTHubEndpointServiceBusTopicParameters struct { EntityPath *string `json:"entityPath,omitempty" tf:"entity_path,omitempty"` // The IoTHub ID for the endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubendpointstoragecontainer_types.go b/apis/devices/v1beta1/zz_iothubendpointstoragecontainer_types.go index 68f26e0ce..74e45363c 100755 --- a/apis/devices/v1beta1/zz_iothubendpointstoragecontainer_types.go +++ b/apis/devices/v1beta1/zz_iothubendpointstoragecontainer_types.go @@ -137,7 +137,7 @@ type IOTHubEndpointStorageContainerParameters struct { FileNameFormat *string `json:"fileNameFormat,omitempty" tf:"file_name_format,omitempty"` // The IoTHub ID for the endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubenrichment_types.go b/apis/devices/v1beta1/zz_iothubenrichment_types.go index 7838ed4dc..55a718fed 100755 --- a/apis/devices/v1beta1/zz_iothubenrichment_types.go +++ b/apis/devices/v1beta1/zz_iothubenrichment_types.go @@ -19,7 +19,7 @@ type IOTHubEnrichmentInitParameters struct { EndpointNames []*string `json:"endpointNames,omitempty" tf:"endpoint_names,omitempty"` // The IoTHub name of the enrichment. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` // Reference to a IOTHub in devices to populate iothubName. @@ -77,7 +77,7 @@ type IOTHubEnrichmentParameters struct { EndpointNames []*string `json:"endpointNames,omitempty" tf:"endpoint_names,omitempty"` // The IoTHub name of the enrichment. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +kubebuilder:validation:Optional IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubfallbackroute_types.go b/apis/devices/v1beta1/zz_iothubfallbackroute_types.go index 67d0ad91e..0dbf56e7d 100755 --- a/apis/devices/v1beta1/zz_iothubfallbackroute_types.go +++ b/apis/devices/v1beta1/zz_iothubfallbackroute_types.go @@ -85,7 +85,7 @@ type IOTHubFallbackRouteParameters struct { EndpointNamesSelector *v1.Selector `json:"endpointNamesSelector,omitempty" tf:"-"` // The name of the IoTHub to which this Fallback Route belongs. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +kubebuilder:validation:Optional IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubroute_types.go b/apis/devices/v1beta1/zz_iothubroute_types.go index 8094a682c..0cad62555 100755 --- a/apis/devices/v1beta1/zz_iothubroute_types.go +++ b/apis/devices/v1beta1/zz_iothubroute_types.go @@ -67,7 +67,7 @@ type IOTHubRouteParameters struct { EndpointNames []*string `json:"endpointNames,omitempty" tf:"endpoint_names,omitempty"` // The name of the IoTHub to which this Route belongs. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +kubebuilder:validation:Optional IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` diff --git a/apis/devices/v1beta1/zz_iothubsharedaccesspolicy_types.go b/apis/devices/v1beta1/zz_iothubsharedaccesspolicy_types.go index 725202995..1cbcda7b0 100755 --- a/apis/devices/v1beta1/zz_iothubsharedaccesspolicy_types.go +++ b/apis/devices/v1beta1/zz_iothubsharedaccesspolicy_types.go @@ -59,7 +59,7 @@ type IOTHubSharedAccessPolicyParameters struct { DeviceConnect *bool `json:"deviceConnect,omitempty" tf:"device_connect,omitempty"` // The name of the IoTHub to which this Shared Access Policy belongs. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +kubebuilder:validation:Optional IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` diff --git a/apis/devices/v1beta2/zz_generated.conversion_hubs.go b/apis/devices/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..bc51b7168 --- /dev/null +++ b/apis/devices/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *IOTHub) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *IOTHubDPS) Hub() {} diff --git a/apis/devices/v1beta2/zz_generated.deepcopy.go b/apis/devices/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..c1309093e --- /dev/null +++ b/apis/devices/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2174 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudToDeviceInitParameters) DeepCopyInto(out *CloudToDeviceInitParameters) { + *out = *in + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(string) + **out = **in + } + if in.Feedback != nil { + in, out := &in.Feedback, &out.Feedback + *out = make([]FeedbackInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudToDeviceInitParameters. +func (in *CloudToDeviceInitParameters) DeepCopy() *CloudToDeviceInitParameters { + if in == nil { + return nil + } + out := new(CloudToDeviceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudToDeviceObservation) DeepCopyInto(out *CloudToDeviceObservation) { + *out = *in + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(string) + **out = **in + } + if in.Feedback != nil { + in, out := &in.Feedback, &out.Feedback + *out = make([]FeedbackObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudToDeviceObservation. +func (in *CloudToDeviceObservation) DeepCopy() *CloudToDeviceObservation { + if in == nil { + return nil + } + out := new(CloudToDeviceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudToDeviceParameters) DeepCopyInto(out *CloudToDeviceParameters) { + *out = *in + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(string) + **out = **in + } + if in.Feedback != nil { + in, out := &in.Feedback, &out.Feedback + *out = make([]FeedbackParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudToDeviceParameters. +func (in *CloudToDeviceParameters) DeepCopy() *CloudToDeviceParameters { + if in == nil { + return nil + } + out := new(CloudToDeviceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.BatchFrequencyInSeconds != nil { + in, out := &in.BatchFrequencyInSeconds, &out.BatchFrequencyInSeconds + *out = new(float64) + **out = **in + } + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.EndpointURI != nil { + in, out := &in.EndpointURI, &out.EndpointURI + *out = new(string) + **out = **in + } + if in.EntityPath != nil { + in, out := &in.EntityPath, &out.EntityPath + *out = new(string) + **out = **in + } + if in.FileNameFormat != nil { + in, out := &in.FileNameFormat, &out.FileNameFormat + *out = new(string) + **out = **in + } + if in.IdentityID != nil { + in, out := &in.IdentityID, &out.IdentityID + *out = new(string) + **out = **in + } + if in.MaxChunkSizeInBytes != nil { + in, out := &in.MaxChunkSizeInBytes, &out.MaxChunkSizeInBytes + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnrichmentInitParameters) DeepCopyInto(out *EnrichmentInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnrichmentInitParameters. +func (in *EnrichmentInitParameters) DeepCopy() *EnrichmentInitParameters { + if in == nil { + return nil + } + out := new(EnrichmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnrichmentObservation) DeepCopyInto(out *EnrichmentObservation) { + *out = *in + if in.EndpointNames != nil { + in, out := &in.EndpointNames, &out.EndpointNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnrichmentObservation. +func (in *EnrichmentObservation) DeepCopy() *EnrichmentObservation { + if in == nil { + return nil + } + out := new(EnrichmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnrichmentParameters) DeepCopyInto(out *EnrichmentParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnrichmentParameters. +func (in *EnrichmentParameters) DeepCopy() *EnrichmentParameters { + if in == nil { + return nil + } + out := new(EnrichmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FallbackRouteInitParameters) DeepCopyInto(out *FallbackRouteInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FallbackRouteInitParameters. +func (in *FallbackRouteInitParameters) DeepCopy() *FallbackRouteInitParameters { + if in == nil { + return nil + } + out := new(FallbackRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FallbackRouteObservation) DeepCopyInto(out *FallbackRouteObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EndpointNames != nil { + in, out := &in.EndpointNames, &out.EndpointNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FallbackRouteObservation. +func (in *FallbackRouteObservation) DeepCopy() *FallbackRouteObservation { + if in == nil { + return nil + } + out := new(FallbackRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FallbackRouteParameters) DeepCopyInto(out *FallbackRouteParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FallbackRouteParameters. +func (in *FallbackRouteParameters) DeepCopy() *FallbackRouteParameters { + if in == nil { + return nil + } + out := new(FallbackRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeedbackInitParameters) DeepCopyInto(out *FeedbackInitParameters) { + *out = *in + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeedbackInitParameters. +func (in *FeedbackInitParameters) DeepCopy() *FeedbackInitParameters { + if in == nil { + return nil + } + out := new(FeedbackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeedbackObservation) DeepCopyInto(out *FeedbackObservation) { + *out = *in + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeedbackObservation. +func (in *FeedbackObservation) DeepCopy() *FeedbackObservation { + if in == nil { + return nil + } + out := new(FeedbackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeedbackParameters) DeepCopyInto(out *FeedbackParameters) { + *out = *in + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.TimeToLive != nil { + in, out := &in.TimeToLive, &out.TimeToLive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeedbackParameters. +func (in *FeedbackParameters) DeepCopy() *FeedbackParameters { + if in == nil { + return nil + } + out := new(FeedbackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileUploadInitParameters) DeepCopyInto(out *FileUploadInitParameters) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(string) + **out = **in + } + if in.IdentityID != nil { + in, out := &in.IdentityID, &out.IdentityID + *out = new(string) + **out = **in + } + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.Notifications != nil { + in, out := &in.Notifications, &out.Notifications + *out = new(bool) + **out = **in + } + if in.SASTTL != nil { + in, out := &in.SASTTL, &out.SASTTL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileUploadInitParameters. +func (in *FileUploadInitParameters) DeepCopy() *FileUploadInitParameters { + if in == nil { + return nil + } + out := new(FileUploadInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileUploadObservation) DeepCopyInto(out *FileUploadObservation) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(string) + **out = **in + } + if in.IdentityID != nil { + in, out := &in.IdentityID, &out.IdentityID + *out = new(string) + **out = **in + } + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.Notifications != nil { + in, out := &in.Notifications, &out.Notifications + *out = new(bool) + **out = **in + } + if in.SASTTL != nil { + in, out := &in.SASTTL, &out.SASTTL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileUploadObservation. +func (in *FileUploadObservation) DeepCopy() *FileUploadObservation { + if in == nil { + return nil + } + out := new(FileUploadObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileUploadParameters) DeepCopyInto(out *FileUploadParameters) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + out.ConnectionStringSecretRef = in.ConnectionStringSecretRef + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.DefaultTTL != nil { + in, out := &in.DefaultTTL, &out.DefaultTTL + *out = new(string) + **out = **in + } + if in.IdentityID != nil { + in, out := &in.IdentityID, &out.IdentityID + *out = new(string) + **out = **in + } + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.Notifications != nil { + in, out := &in.Notifications, &out.Notifications + *out = new(bool) + **out = **in + } + if in.SASTTL != nil { + in, out := &in.SASTTL, &out.SASTTL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileUploadParameters. +func (in *FileUploadParameters) DeepCopy() *FileUploadParameters { + if in == nil { + return nil + } + out := new(FileUploadParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHub) DeepCopyInto(out *IOTHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHub. +func (in *IOTHub) DeepCopy() *IOTHub { + if in == nil { + return nil + } + out := new(IOTHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPS) DeepCopyInto(out *IOTHubDPS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPS. +func (in *IOTHubDPS) DeepCopy() *IOTHubDPS { + if in == nil { + return nil + } + out := new(IOTHubDPS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTHubDPS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSInitParameters) DeepCopyInto(out *IOTHubDPSInitParameters) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = new(string) + **out = **in + } + if in.DataResidencyEnabled != nil { + in, out := &in.DataResidencyEnabled, &out.DataResidencyEnabled + *out = new(bool) + **out = **in + } + if in.IPFilterRule != nil { + in, out := &in.IPFilterRule, &out.IPFilterRule + *out = make([]IPFilterRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LinkedHub != nil { + in, out := &in.LinkedHub, &out.LinkedHub + *out = make([]LinkedHubInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(IOTHubDPSSkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSInitParameters. +func (in *IOTHubDPSInitParameters) DeepCopy() *IOTHubDPSInitParameters { + if in == nil { + return nil + } + out := new(IOTHubDPSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSList) DeepCopyInto(out *IOTHubDPSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IOTHubDPS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSList. +func (in *IOTHubDPSList) DeepCopy() *IOTHubDPSList { + if in == nil { + return nil + } + out := new(IOTHubDPSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTHubDPSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSObservation) DeepCopyInto(out *IOTHubDPSObservation) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = new(string) + **out = **in + } + if in.DataResidencyEnabled != nil { + in, out := &in.DataResidencyEnabled, &out.DataResidencyEnabled + *out = new(bool) + **out = **in + } + if in.DeviceProvisioningHostName != nil { + in, out := &in.DeviceProvisioningHostName, &out.DeviceProvisioningHostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDScope != nil { + in, out := &in.IDScope, &out.IDScope + *out = new(string) + **out = **in + } + if in.IPFilterRule != nil { + in, out := &in.IPFilterRule, &out.IPFilterRule + *out = make([]IPFilterRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LinkedHub != nil { + in, out := &in.LinkedHub, &out.LinkedHub + *out = make([]LinkedHubObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServiceOperationsHostName != nil { + in, out := &in.ServiceOperationsHostName, &out.ServiceOperationsHostName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(IOTHubDPSSkuObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSObservation. +func (in *IOTHubDPSObservation) DeepCopy() *IOTHubDPSObservation { + if in == nil { + return nil + } + out := new(IOTHubDPSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSParameters) DeepCopyInto(out *IOTHubDPSParameters) { + *out = *in + if in.AllocationPolicy != nil { + in, out := &in.AllocationPolicy, &out.AllocationPolicy + *out = new(string) + **out = **in + } + if in.DataResidencyEnabled != nil { + in, out := &in.DataResidencyEnabled, &out.DataResidencyEnabled + *out = new(bool) + **out = **in + } + if in.IPFilterRule != nil { + in, out := &in.IPFilterRule, &out.IPFilterRule + *out = make([]IPFilterRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LinkedHub != nil { + in, out := &in.LinkedHub, &out.LinkedHub + *out = make([]LinkedHubParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(IOTHubDPSSkuParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSParameters. +func (in *IOTHubDPSParameters) DeepCopy() *IOTHubDPSParameters { + if in == nil { + return nil + } + out := new(IOTHubDPSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSSkuInitParameters) DeepCopyInto(out *IOTHubDPSSkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSSkuInitParameters. +func (in *IOTHubDPSSkuInitParameters) DeepCopy() *IOTHubDPSSkuInitParameters { + if in == nil { + return nil + } + out := new(IOTHubDPSSkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSSkuObservation) DeepCopyInto(out *IOTHubDPSSkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSSkuObservation. +func (in *IOTHubDPSSkuObservation) DeepCopy() *IOTHubDPSSkuObservation { + if in == nil { + return nil + } + out := new(IOTHubDPSSkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSSkuParameters) DeepCopyInto(out *IOTHubDPSSkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSSkuParameters. +func (in *IOTHubDPSSkuParameters) DeepCopy() *IOTHubDPSSkuParameters { + if in == nil { + return nil + } + out := new(IOTHubDPSSkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSSpec) DeepCopyInto(out *IOTHubDPSSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSSpec. +func (in *IOTHubDPSSpec) DeepCopy() *IOTHubDPSSpec { + if in == nil { + return nil + } + out := new(IOTHubDPSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDPSStatus) DeepCopyInto(out *IOTHubDPSStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDPSStatus. +func (in *IOTHubDPSStatus) DeepCopy() *IOTHubDPSStatus { + if in == nil { + return nil + } + out := new(IOTHubDPSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubInitParameters) DeepCopyInto(out *IOTHubInitParameters) { + *out = *in + if in.CloudToDevice != nil { + in, out := &in.CloudToDevice, &out.CloudToDevice + *out = new(CloudToDeviceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventHubPartitionCount != nil { + in, out := &in.EventHubPartitionCount, &out.EventHubPartitionCount + *out = new(float64) + **out = **in + } + if in.EventHubRetentionInDays != nil { + in, out := &in.EventHubRetentionInDays, &out.EventHubRetentionInDays + *out = new(float64) + **out = **in + } + if in.FileUpload != nil { + in, out := &in.FileUpload, &out.FileUpload + *out = new(FileUploadInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = make([]NetworkRuleSetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubInitParameters. +func (in *IOTHubInitParameters) DeepCopy() *IOTHubInitParameters { + if in == nil { + return nil + } + out := new(IOTHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubList) DeepCopyInto(out *IOTHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IOTHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubList. +func (in *IOTHubList) DeepCopy() *IOTHubList { + if in == nil { + return nil + } + out := new(IOTHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubObservation) DeepCopyInto(out *IOTHubObservation) { + *out = *in + if in.CloudToDevice != nil { + in, out := &in.CloudToDevice, &out.CloudToDevice + *out = new(CloudToDeviceObservation) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]EndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enrichment != nil { + in, out := &in.Enrichment, &out.Enrichment + *out = make([]EnrichmentObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventHubEventsEndpoint != nil { + in, out := &in.EventHubEventsEndpoint, &out.EventHubEventsEndpoint + *out = new(string) + **out = **in + } + if in.EventHubEventsNamespace != nil { + in, out := &in.EventHubEventsNamespace, &out.EventHubEventsNamespace + *out = new(string) + **out = **in + } + if in.EventHubEventsPath != nil { + in, out := &in.EventHubEventsPath, &out.EventHubEventsPath + *out = new(string) + **out = **in + } + if in.EventHubOperationsEndpoint != nil { + in, out := &in.EventHubOperationsEndpoint, &out.EventHubOperationsEndpoint + *out = new(string) + **out = **in + } + if in.EventHubOperationsPath != nil { + in, out := &in.EventHubOperationsPath, &out.EventHubOperationsPath + *out = new(string) + **out = **in + } + if in.EventHubPartitionCount != nil { + in, out := &in.EventHubPartitionCount, &out.EventHubPartitionCount + *out = new(float64) + **out = **in + } + if in.EventHubRetentionInDays != nil { + in, out := &in.EventHubRetentionInDays, &out.EventHubRetentionInDays + *out = new(float64) + **out = **in + } + if in.FallbackRoute != nil { + in, out := &in.FallbackRoute, &out.FallbackRoute + *out = new(FallbackRouteObservation) + (*in).DeepCopyInto(*out) + } + if in.FileUpload != nil { + in, out := &in.FileUpload, &out.FileUpload + *out = new(FileUploadObservation) + (*in).DeepCopyInto(*out) + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = make([]NetworkRuleSetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]RouteObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SharedAccessPolicy != nil { + in, out := &in.SharedAccessPolicy, &out.SharedAccessPolicy + *out = make([]SharedAccessPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubObservation. +func (in *IOTHubObservation) DeepCopy() *IOTHubObservation { + if in == nil { + return nil + } + out := new(IOTHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubParameters) DeepCopyInto(out *IOTHubParameters) { + *out = *in + if in.CloudToDevice != nil { + in, out := &in.CloudToDevice, &out.CloudToDevice + *out = new(CloudToDeviceParameters) + (*in).DeepCopyInto(*out) + } + if in.EventHubPartitionCount != nil { + in, out := &in.EventHubPartitionCount, &out.EventHubPartitionCount + *out = new(float64) + **out = **in + } + if in.EventHubRetentionInDays != nil { + in, out := &in.EventHubRetentionInDays, &out.EventHubRetentionInDays + *out = new(float64) + **out = **in + } + if in.FileUpload != nil { + in, out := &in.FileUpload, &out.FileUpload + *out = new(FileUploadParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = make([]NetworkRuleSetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubParameters. +func (in *IOTHubParameters) DeepCopy() *IOTHubParameters { + if in == nil { + return nil + } + out := new(IOTHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubSpec) DeepCopyInto(out *IOTHubSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubSpec. +func (in *IOTHubSpec) DeepCopy() *IOTHubSpec { + if in == nil { + return nil + } + out := new(IOTHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubStatus) DeepCopyInto(out *IOTHubStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubStatus. +func (in *IOTHubStatus) DeepCopy() *IOTHubStatus { + if in == nil { + return nil + } + out := new(IOTHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPFilterRuleInitParameters) DeepCopyInto(out *IPFilterRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFilterRuleInitParameters. +func (in *IPFilterRuleInitParameters) DeepCopy() *IPFilterRuleInitParameters { + if in == nil { + return nil + } + out := new(IPFilterRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPFilterRuleObservation) DeepCopyInto(out *IPFilterRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFilterRuleObservation. +func (in *IPFilterRuleObservation) DeepCopy() *IPFilterRuleObservation { + if in == nil { + return nil + } + out := new(IPFilterRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPFilterRuleParameters) DeepCopyInto(out *IPFilterRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFilterRuleParameters. +func (in *IPFilterRuleParameters) DeepCopy() *IPFilterRuleParameters { + if in == nil { + return nil + } + out := new(IPFilterRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleInitParameters) DeepCopyInto(out *IPRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleInitParameters. +func (in *IPRuleInitParameters) DeepCopy() *IPRuleInitParameters { + if in == nil { + return nil + } + out := new(IPRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleObservation) DeepCopyInto(out *IPRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleObservation. +func (in *IPRuleObservation) DeepCopy() *IPRuleObservation { + if in == nil { + return nil + } + out := new(IPRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleParameters) DeepCopyInto(out *IPRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleParameters. +func (in *IPRuleParameters) DeepCopy() *IPRuleParameters { + if in == nil { + return nil + } + out := new(IPRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedHubInitParameters) DeepCopyInto(out *LinkedHubInitParameters) { + *out = *in + if in.AllocationWeight != nil { + in, out := &in.AllocationWeight, &out.AllocationWeight + *out = new(float64) + **out = **in + } + if in.ApplyAllocationPolicy != nil { + in, out := &in.ApplyAllocationPolicy, &out.ApplyAllocationPolicy + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedHubInitParameters. +func (in *LinkedHubInitParameters) DeepCopy() *LinkedHubInitParameters { + if in == nil { + return nil + } + out := new(LinkedHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedHubObservation) DeepCopyInto(out *LinkedHubObservation) { + *out = *in + if in.AllocationWeight != nil { + in, out := &in.AllocationWeight, &out.AllocationWeight + *out = new(float64) + **out = **in + } + if in.ApplyAllocationPolicy != nil { + in, out := &in.ApplyAllocationPolicy, &out.ApplyAllocationPolicy + *out = new(bool) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedHubObservation. +func (in *LinkedHubObservation) DeepCopy() *LinkedHubObservation { + if in == nil { + return nil + } + out := new(LinkedHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedHubParameters) DeepCopyInto(out *LinkedHubParameters) { + *out = *in + if in.AllocationWeight != nil { + in, out := &in.AllocationWeight, &out.AllocationWeight + *out = new(float64) + **out = **in + } + if in.ApplyAllocationPolicy != nil { + in, out := &in.ApplyAllocationPolicy, &out.ApplyAllocationPolicy + *out = new(bool) + **out = **in + } + out.ConnectionStringSecretRef = in.ConnectionStringSecretRef + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedHubParameters. +func (in *LinkedHubParameters) DeepCopy() *LinkedHubParameters { + if in == nil { + return nil + } + out := new(LinkedHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetInitParameters) DeepCopyInto(out *NetworkRuleSetInitParameters) { + *out = *in + if in.ApplyToBuiltinEventHubEndpoint != nil { + in, out := &in.ApplyToBuiltinEventHubEndpoint, &out.ApplyToBuiltinEventHubEndpoint + *out = new(bool) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetInitParameters. +func (in *NetworkRuleSetInitParameters) DeepCopy() *NetworkRuleSetInitParameters { + if in == nil { + return nil + } + out := new(NetworkRuleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetObservation) DeepCopyInto(out *NetworkRuleSetObservation) { + *out = *in + if in.ApplyToBuiltinEventHubEndpoint != nil { + in, out := &in.ApplyToBuiltinEventHubEndpoint, &out.ApplyToBuiltinEventHubEndpoint + *out = new(bool) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetObservation. +func (in *NetworkRuleSetObservation) DeepCopy() *NetworkRuleSetObservation { + if in == nil { + return nil + } + out := new(NetworkRuleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetParameters) DeepCopyInto(out *NetworkRuleSetParameters) { + *out = *in + if in.ApplyToBuiltinEventHubEndpoint != nil { + in, out := &in.ApplyToBuiltinEventHubEndpoint, &out.ApplyToBuiltinEventHubEndpoint + *out = new(bool) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetParameters. +func (in *NetworkRuleSetParameters) DeepCopy() *NetworkRuleSetParameters { + if in == nil { + return nil + } + out := new(NetworkRuleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteInitParameters) DeepCopyInto(out *RouteInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteInitParameters. +func (in *RouteInitParameters) DeepCopy() *RouteInitParameters { + if in == nil { + return nil + } + out := new(RouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteObservation) DeepCopyInto(out *RouteObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EndpointNames != nil { + in, out := &in.EndpointNames, &out.EndpointNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteObservation. +func (in *RouteObservation) DeepCopy() *RouteObservation { + if in == nil { + return nil + } + out := new(RouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteParameters) DeepCopyInto(out *RouteParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteParameters. +func (in *RouteParameters) DeepCopy() *RouteParameters { + if in == nil { + return nil + } + out := new(RouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedAccessPolicyInitParameters) DeepCopyInto(out *SharedAccessPolicyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedAccessPolicyInitParameters. +func (in *SharedAccessPolicyInitParameters) DeepCopy() *SharedAccessPolicyInitParameters { + if in == nil { + return nil + } + out := new(SharedAccessPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedAccessPolicyObservation) DeepCopyInto(out *SharedAccessPolicyObservation) { + *out = *in + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedAccessPolicyObservation. +func (in *SharedAccessPolicyObservation) DeepCopy() *SharedAccessPolicyObservation { + if in == nil { + return nil + } + out := new(SharedAccessPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedAccessPolicyParameters) DeepCopyInto(out *SharedAccessPolicyParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedAccessPolicyParameters. +func (in *SharedAccessPolicyParameters) DeepCopy() *SharedAccessPolicyParameters { + if in == nil { + return nil + } + out := new(SharedAccessPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/devices/v1beta2/zz_generated.managed.go b/apis/devices/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..ee81d5555 --- /dev/null +++ b/apis/devices/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this IOTHub. +func (mg *IOTHub) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IOTHub. +func (mg *IOTHub) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IOTHub. +func (mg *IOTHub) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IOTHub. +func (mg *IOTHub) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IOTHub. +func (mg *IOTHub) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IOTHub. +func (mg *IOTHub) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IOTHub. +func (mg *IOTHub) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IOTHub. +func (mg *IOTHub) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IOTHub. +func (mg *IOTHub) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IOTHub. +func (mg *IOTHub) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IOTHub. +func (mg *IOTHub) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IOTHub. +func (mg *IOTHub) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this IOTHubDPS. +func (mg *IOTHubDPS) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IOTHubDPS. +func (mg *IOTHubDPS) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IOTHubDPS. +func (mg *IOTHubDPS) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IOTHubDPS. +func (mg *IOTHubDPS) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IOTHubDPS. +func (mg *IOTHubDPS) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IOTHubDPS. +func (mg *IOTHubDPS) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IOTHubDPS. +func (mg *IOTHubDPS) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IOTHubDPS. +func (mg *IOTHubDPS) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IOTHubDPS. +func (mg *IOTHubDPS) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IOTHubDPS. +func (mg *IOTHubDPS) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IOTHubDPS. +func (mg *IOTHubDPS) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IOTHubDPS. +func (mg *IOTHubDPS) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/devices/v1beta2/zz_generated.managedlist.go b/apis/devices/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..3aa7abf9c --- /dev/null +++ b/apis/devices/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this IOTHubDPSList. +func (l *IOTHubDPSList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IOTHubList. +func (l *IOTHubList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/devices/v1beta2/zz_generated.resolvers.go b/apis/devices/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..71fba078c --- /dev/null +++ b/apis/devices/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this IOTHub. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *IOTHub) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this IOTHubDPS. +func (mg *IOTHubDPS) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/devices/v1beta2/zz_groupversion_info.go b/apis/devices/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..4806a7bb2 --- /dev/null +++ b/apis/devices/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=devices.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "devices.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/devices/v1beta2/zz_iothub_terraformed.go b/apis/devices/v1beta2/zz_iothub_terraformed.go new file mode 100755 index 000000000..839b98bae --- /dev/null +++ b/apis/devices/v1beta2/zz_iothub_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IOTHub +func (mg *IOTHub) GetTerraformResourceType() string { + return "azurerm_iothub" +} + +// GetConnectionDetailsMapping for this IOTHub +func (tr *IOTHub) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"endpoint[*].connection_string": "status.atProvider.endpoint[*].connectionString", "file_upload[*].connection_string": "spec.forProvider.fileUpload[*].connectionStringSecretRef", "shared_access_policy[*].primary_key": "status.atProvider.sharedAccessPolicy[*].primaryKey", "shared_access_policy[*].secondary_key": "status.atProvider.sharedAccessPolicy[*].secondaryKey"} +} + +// GetObservation of this IOTHub +func (tr *IOTHub) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IOTHub +func (tr *IOTHub) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IOTHub +func (tr *IOTHub) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IOTHub +func (tr *IOTHub) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IOTHub +func (tr *IOTHub) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IOTHub +func (tr *IOTHub) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IOTHub +func (tr *IOTHub) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IOTHub using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IOTHub) LateInitialize(attrs []byte) (bool, error) { + params := &IOTHubParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IOTHub) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/devices/v1beta2/zz_iothub_types.go b/apis/devices/v1beta2/zz_iothub_types.go new file mode 100755 index 000000000..599fe139d --- /dev/null +++ b/apis/devices/v1beta2/zz_iothub_types.go @@ -0,0 +1,702 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudToDeviceInitParameters struct { + + // The default time to live for cloud-to-device messages, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + DefaultTTL *string `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // A feedback block as defined below. + Feedback []FeedbackInitParameters `json:"feedback,omitempty" tf:"feedback,omitempty"` + + // The maximum delivery count for cloud-to-device per-device queues. This value must be between 1 and 100. Defaults to 10. + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` +} + +type CloudToDeviceObservation struct { + + // The default time to live for cloud-to-device messages, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + DefaultTTL *string `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // A feedback block as defined below. + Feedback []FeedbackObservation `json:"feedback,omitempty" tf:"feedback,omitempty"` + + // The maximum delivery count for cloud-to-device per-device queues. This value must be between 1 and 100. Defaults to 10. + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` +} + +type CloudToDeviceParameters struct { + + // The default time to live for cloud-to-device messages, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + // +kubebuilder:validation:Optional + DefaultTTL *string `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // A feedback block as defined below. + // +kubebuilder:validation:Optional + Feedback []FeedbackParameters `json:"feedback,omitempty" tf:"feedback,omitempty"` + + // The maximum delivery count for cloud-to-device per-device queues. This value must be between 1 and 100. Defaults to 10. + // +kubebuilder:validation:Optional + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` +} + +type EndpointInitParameters struct { +} + +type EndpointObservation struct { + + // The type used to authenticate against the endpoint. Possible values are keyBased and identityBased. Defaults to keyBased. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds. This attribute is applicable for endpoint type AzureIotHub.StorageContainer. + BatchFrequencyInSeconds *float64 `json:"batchFrequencyInSeconds,omitempty" tf:"batch_frequency_in_seconds,omitempty"` + + // The name of storage container in the storage account. This attribute is mandatory for endpoint type AzureIotHub.StorageContainer. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Encoding that is used to serialize messages to blobs. Supported values are Avro, AvroDeflate and JSON. Default value is Avro. This attribute is applicable for endpoint type AzureIotHub.StorageContainer. Changing this forces a new resource to be created. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // URI of the Service Bus or Event Hubs Namespace endpoint. This attribute can only be specified and is mandatory when authentication_type is identityBased for endpoint type AzureIotHub.ServiceBusQueue, AzureIotHub.ServiceBusTopic or AzureIotHub.EventHub. + EndpointURI *string `json:"endpointUri,omitempty" tf:"endpoint_uri,omitempty"` + + // Name of the Service Bus Queue/Topic or Event Hub. This attribute can only be specified and is mandatory when authentication_type is identityBased for endpoint type AzureIotHub.ServiceBusQueue, AzureIotHub.ServiceBusTopic or AzureIotHub.EventHub. + EntityPath *string `json:"entityPath,omitempty" tf:"entity_path,omitempty"` + + // File name format for the blob. All parameters are mandatory but can be reordered. This attribute is applicable for endpoint type AzureIotHub.StorageContainer. Defaults to {iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. + FileNameFormat *string `json:"fileNameFormat,omitempty" tf:"file_name_format,omitempty"` + + // The ID of the User Managed Identity used to authenticate against the endpoint. + IdentityID *string `json:"identityId,omitempty" tf:"identity_id,omitempty"` + + // Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB). This attribute is applicable for endpoint type AzureIotHub.StorageContainer. + MaxChunkSizeInBytes *float64 `json:"maxChunkSizeInBytes,omitempty" tf:"max_chunk_size_in_bytes,omitempty"` + + // The name of the endpoint. The name must be unique across endpoint types. The following names are reserved: events, operationsMonitoringEvents, fileNotifications and $default. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource group in which the endpoint will be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The type of the endpoint. Possible values are AzureIotHub.StorageContainer, AzureIotHub.ServiceBusQueue, AzureIotHub.ServiceBusTopic or AzureIotHub.EventHub. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EndpointParameters struct { +} + +type EnrichmentInitParameters struct { +} + +type EnrichmentObservation struct { + + // The list of endpoints which will be enriched. + EndpointNames []*string `json:"endpointNames,omitempty" tf:"endpoint_names,omitempty"` + + // The key of the enrichment. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The value of the enrichment. Value can be any static string, the name of the IoT Hub sending the message (use $iothubname) or information from the device twin (ex: $twin.tags.latitude) + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EnrichmentParameters struct { +} + +type FallbackRouteInitParameters struct { +} + +type FallbackRouteObservation struct { + + // The condition that is evaluated to apply the routing rule. Defaults to true. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. + Condition *string `json:"condition,omitempty" tf:"condition,omitempty"` + + // Used to specify whether the fallback route is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The endpoints to which messages that satisfy the condition are routed. Currently only 1 endpoint is allowed. + EndpointNames []*string `json:"endpointNames,omitempty" tf:"endpoint_names,omitempty"` + + // The source that the routing rule is to be applied to, such as DeviceMessages. Possible values include: Invalid, DeviceMessages, TwinChangeEvents, DeviceLifecycleEvents, DeviceConnectionStateEvents, DeviceJobLifecycleEvents and DigitalTwinChangeEvents. Defaults to DeviceMessages. + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type FallbackRouteParameters struct { +} + +type FeedbackInitParameters struct { + + // The lock duration for the file upload notifications queue, specified as an ISO 8601 timespan duration. This value must be between 5 and 300 seconds. Defaults to PT1M. + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The number of times the IoT Hub attempts to deliver a file upload notification message. Defaults to 10. + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // The retention time for service-bound feedback messages, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type FeedbackObservation struct { + + // The lock duration for the file upload notifications queue, specified as an ISO 8601 timespan duration. This value must be between 5 and 300 seconds. Defaults to PT1M. + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The number of times the IoT Hub attempts to deliver a file upload notification message. Defaults to 10. + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // The retention time for service-bound feedback messages, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type FeedbackParameters struct { + + // The lock duration for the file upload notifications queue, specified as an ISO 8601 timespan duration. This value must be between 5 and 300 seconds. Defaults to PT1M. + // +kubebuilder:validation:Optional + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The number of times the IoT Hub attempts to deliver a file upload notification message. Defaults to 10. + // +kubebuilder:validation:Optional + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // The retention time for service-bound feedback messages, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + // +kubebuilder:validation:Optional + TimeToLive *string `json:"timeToLive,omitempty" tf:"time_to_live,omitempty"` +} + +type FileUploadInitParameters struct { + + // The type used to authenticate against the storage account. Possible values are keyBased and identityBased. Defaults to keyBased. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // The name of the root container where the files should be uploaded to. The container need not exist but should be creatable using the connection_string specified. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // The period of time for which a file upload notification message is available to consume before it expires, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + DefaultTTL *string `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // The ID of the User Managed Identity used to authenticate against the storage account. + IdentityID *string `json:"identityId,omitempty" tf:"identity_id,omitempty"` + + // The lock duration for the file upload notifications queue, specified as an ISO 8601 timespan duration. This value must be between 5 and 300 seconds. Defaults to PT1M. + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The number of times the IoT Hub attempts to deliver a file upload notification message. Defaults to 10. + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // Used to specify whether file notifications are sent to IoT Hub on upload. Defaults to false. + Notifications *bool `json:"notifications,omitempty" tf:"notifications,omitempty"` + + // The period of time for which the SAS URI generated by IoT Hub for file upload is valid, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 24 hours. Defaults to PT1H. + SASTTL *string `json:"sasTtl,omitempty" tf:"sas_ttl,omitempty"` +} + +type FileUploadObservation struct { + + // The type used to authenticate against the storage account. Possible values are keyBased and identityBased. Defaults to keyBased. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // The name of the root container where the files should be uploaded to. The container need not exist but should be creatable using the connection_string specified. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // The period of time for which a file upload notification message is available to consume before it expires, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + DefaultTTL *string `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // The ID of the User Managed Identity used to authenticate against the storage account. + IdentityID *string `json:"identityId,omitempty" tf:"identity_id,omitempty"` + + // The lock duration for the file upload notifications queue, specified as an ISO 8601 timespan duration. This value must be between 5 and 300 seconds. Defaults to PT1M. + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The number of times the IoT Hub attempts to deliver a file upload notification message. Defaults to 10. + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // Used to specify whether file notifications are sent to IoT Hub on upload. Defaults to false. + Notifications *bool `json:"notifications,omitempty" tf:"notifications,omitempty"` + + // The period of time for which the SAS URI generated by IoT Hub for file upload is valid, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 24 hours. Defaults to PT1H. + SASTTL *string `json:"sasTtl,omitempty" tf:"sas_ttl,omitempty"` +} + +type FileUploadParameters struct { + + // The type used to authenticate against the storage account. Possible values are keyBased and identityBased. Defaults to keyBased. + // +kubebuilder:validation:Optional + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // The connection string for the Azure Storage account to which files are uploaded. + // +kubebuilder:validation:Required + ConnectionStringSecretRef v1.SecretKeySelector `json:"connectionStringSecretRef" tf:"-"` + + // The name of the root container where the files should be uploaded to. The container need not exist but should be creatable using the connection_string specified. + // +kubebuilder:validation:Optional + ContainerName *string `json:"containerName" tf:"container_name,omitempty"` + + // The period of time for which a file upload notification message is available to consume before it expires, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 48 hours. Defaults to PT1H. + // +kubebuilder:validation:Optional + DefaultTTL *string `json:"defaultTtl,omitempty" tf:"default_ttl,omitempty"` + + // The ID of the User Managed Identity used to authenticate against the storage account. + // +kubebuilder:validation:Optional + IdentityID *string `json:"identityId,omitempty" tf:"identity_id,omitempty"` + + // The lock duration for the file upload notifications queue, specified as an ISO 8601 timespan duration. This value must be between 5 and 300 seconds. Defaults to PT1M. + // +kubebuilder:validation:Optional + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The number of times the IoT Hub attempts to deliver a file upload notification message. Defaults to 10. + // +kubebuilder:validation:Optional + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // Used to specify whether file notifications are sent to IoT Hub on upload. Defaults to false. + // +kubebuilder:validation:Optional + Notifications *bool `json:"notifications,omitempty" tf:"notifications,omitempty"` + + // The period of time for which the SAS URI generated by IoT Hub for file upload is valid, specified as an ISO 8601 timespan duration. This value must be between 1 minute and 24 hours. Defaults to PT1H. + // +kubebuilder:validation:Optional + SASTTL *string `json:"sasTtl,omitempty" tf:"sas_ttl,omitempty"` +} + +type IOTHubInitParameters struct { + + // A cloud_to_device block as defined below. + CloudToDevice *CloudToDeviceInitParameters `json:"cloudToDevice,omitempty" tf:"cloud_to_device,omitempty"` + + // The number of device-to-cloud partitions used by backing event hubs. Must be between 2 and 128. + EventHubPartitionCount *float64 `json:"eventHubPartitionCount,omitempty" tf:"event_hub_partition_count,omitempty"` + + // The event hub retention to use in days. Must be between 1 and 7. + EventHubRetentionInDays *float64 `json:"eventHubRetentionInDays,omitempty" tf:"event_hub_retention_in_days,omitempty"` + + // A file_upload block as defined below. + FileUpload *FileUploadInitParameters `json:"fileUpload,omitempty" tf:"file_upload,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // If false, SAS tokens with Iot hub scoped SAS keys cannot be used for authentication. Defaults to true. + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the minimum TLS version to support for this hub. The only valid value is 1.2. Changing this forces a new resource to be created. + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // A network_rule_set block as defined below. + NetworkRuleSet []NetworkRuleSetInitParameters `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Is the IotHub resource accessible from a public network? + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A sku block as defined below. + Sku *SkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTHubObservation struct { + + // A cloud_to_device block as defined below. + CloudToDevice *CloudToDeviceObservation `json:"cloudToDevice,omitempty" tf:"cloud_to_device,omitempty"` + + // An endpoint block as defined below. + Endpoint []EndpointObservation `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // A enrichment block as defined below. + Enrichment []EnrichmentObservation `json:"enrichment,omitempty" tf:"enrichment,omitempty"` + + // The EventHub compatible endpoint for events data + EventHubEventsEndpoint *string `json:"eventHubEventsEndpoint,omitempty" tf:"event_hub_events_endpoint,omitempty"` + + // The EventHub namespace for events data + EventHubEventsNamespace *string `json:"eventHubEventsNamespace,omitempty" tf:"event_hub_events_namespace,omitempty"` + + // The EventHub compatible path for events data + EventHubEventsPath *string `json:"eventHubEventsPath,omitempty" tf:"event_hub_events_path,omitempty"` + + // The EventHub compatible endpoint for operational data + EventHubOperationsEndpoint *string `json:"eventHubOperationsEndpoint,omitempty" tf:"event_hub_operations_endpoint,omitempty"` + + // The EventHub compatible path for operational data + EventHubOperationsPath *string `json:"eventHubOperationsPath,omitempty" tf:"event_hub_operations_path,omitempty"` + + // The number of device-to-cloud partitions used by backing event hubs. Must be between 2 and 128. + EventHubPartitionCount *float64 `json:"eventHubPartitionCount,omitempty" tf:"event_hub_partition_count,omitempty"` + + // The event hub retention to use in days. Must be between 1 and 7. + EventHubRetentionInDays *float64 `json:"eventHubRetentionInDays,omitempty" tf:"event_hub_retention_in_days,omitempty"` + + // A fallback_route block as defined below. If the fallback route is enabled, messages that don't match any of the supplied routes are automatically sent to this route. Defaults to messages/events. + FallbackRoute *FallbackRouteObservation `json:"fallbackRoute,omitempty" tf:"fallback_route,omitempty"` + + // A file_upload block as defined below. + FileUpload *FileUploadObservation `json:"fileUpload,omitempty" tf:"file_upload,omitempty"` + + // The hostname of the IotHub Resource. + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // The ID of the IoTHub. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // If false, SAS tokens with Iot hub scoped SAS keys cannot be used for authentication. Defaults to true. + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the minimum TLS version to support for this hub. The only valid value is 1.2. Changing this forces a new resource to be created. + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // A network_rule_set block as defined below. + NetworkRuleSet []NetworkRuleSetObservation `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Is the IotHub resource accessible from a public network? + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group under which the IotHub resource has to be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A route block as defined below. + Route []RouteObservation `json:"route,omitempty" tf:"route,omitempty"` + + // One or more shared_access_policy blocks as defined below. + SharedAccessPolicy []SharedAccessPolicyObservation `json:"sharedAccessPolicy,omitempty" tf:"shared_access_policy,omitempty"` + + // A sku block as defined below. + Sku *SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this IoT Hub. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IOTHubParameters struct { + + // A cloud_to_device block as defined below. + // +kubebuilder:validation:Optional + CloudToDevice *CloudToDeviceParameters `json:"cloudToDevice,omitempty" tf:"cloud_to_device,omitempty"` + + // The number of device-to-cloud partitions used by backing event hubs. Must be between 2 and 128. + // +kubebuilder:validation:Optional + EventHubPartitionCount *float64 `json:"eventHubPartitionCount,omitempty" tf:"event_hub_partition_count,omitempty"` + + // The event hub retention to use in days. Must be between 1 and 7. + // +kubebuilder:validation:Optional + EventHubRetentionInDays *float64 `json:"eventHubRetentionInDays,omitempty" tf:"event_hub_retention_in_days,omitempty"` + + // A file_upload block as defined below. + // +kubebuilder:validation:Optional + FileUpload *FileUploadParameters `json:"fileUpload,omitempty" tf:"file_upload,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // If false, SAS tokens with Iot hub scoped SAS keys cannot be used for authentication. Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the minimum TLS version to support for this hub. The only valid value is 1.2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // A network_rule_set block as defined below. + // +kubebuilder:validation:Optional + NetworkRuleSet []NetworkRuleSetParameters `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Is the IotHub resource accessible from a public network? + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group under which the IotHub resource has to be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sku block as defined below. + // +kubebuilder:validation:Optional + Sku *SkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IPRuleInitParameters struct { + + // The desired action for requests captured by this rule. Possible values are Allow. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The IP address range in CIDR notation for the IP rule. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask,omitempty"` + + // The name of the sku. Possible values are B1, B2, B3, F1, S1, S2, and S3. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IPRuleObservation struct { + + // The desired action for requests captured by this rule. Possible values are Allow. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The IP address range in CIDR notation for the IP rule. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask,omitempty"` + + // The name of the sku. Possible values are B1, B2, B3, F1, S1, S2, and S3. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IPRuleParameters struct { + + // The desired action for requests captured by this rule. Possible values are Allow. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The IP address range in CIDR notation for the IP rule. + // +kubebuilder:validation:Optional + IPMask *string `json:"ipMask" tf:"ip_mask,omitempty"` + + // The name of the sku. Possible values are B1, B2, B3, F1, S1, S2, and S3. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this IoT Hub. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this IoT Hub. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this IoT Hub. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this IoT Hub. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this IoT Hub. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this IoT Hub. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type NetworkRuleSetInitParameters struct { + + // Determines if Network Rule Set is also applied to the BuiltIn EventHub EndPoint of the IotHub. Defaults to false. + ApplyToBuiltinEventHubEndpoint *bool `json:"applyToBuiltinEventhubEndpoint,omitempty" tf:"apply_to_builtin_eventhub_endpoint,omitempty"` + + // Default Action for Network Rule Set. Possible values are Deny, Allow. Defaults to Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more ip_rule blocks as defined below. + IPRule []IPRuleInitParameters `json:"ipRule,omitempty" tf:"ip_rule,omitempty"` +} + +type NetworkRuleSetObservation struct { + + // Determines if Network Rule Set is also applied to the BuiltIn EventHub EndPoint of the IotHub. Defaults to false. + ApplyToBuiltinEventHubEndpoint *bool `json:"applyToBuiltinEventhubEndpoint,omitempty" tf:"apply_to_builtin_eventhub_endpoint,omitempty"` + + // Default Action for Network Rule Set. Possible values are Deny, Allow. Defaults to Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more ip_rule blocks as defined below. + IPRule []IPRuleObservation `json:"ipRule,omitempty" tf:"ip_rule,omitempty"` +} + +type NetworkRuleSetParameters struct { + + // Determines if Network Rule Set is also applied to the BuiltIn EventHub EndPoint of the IotHub. Defaults to false. + // +kubebuilder:validation:Optional + ApplyToBuiltinEventHubEndpoint *bool `json:"applyToBuiltinEventhubEndpoint,omitempty" tf:"apply_to_builtin_eventhub_endpoint,omitempty"` + + // Default Action for Network Rule Set. Possible values are Deny, Allow. Defaults to Deny. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more ip_rule blocks as defined below. + // +kubebuilder:validation:Optional + IPRule []IPRuleParameters `json:"ipRule,omitempty" tf:"ip_rule,omitempty"` +} + +type RouteInitParameters struct { +} + +type RouteObservation struct { + + // The condition that is evaluated to apply the routing rule. Defaults to true. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language. + Condition *string `json:"condition,omitempty" tf:"condition,omitempty"` + + // Used to specify whether a route is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The list of endpoints to which messages that satisfy the condition are routed. + EndpointNames []*string `json:"endpointNames,omitempty" tf:"endpoint_names,omitempty"` + + // The name of the route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The source that the routing rule is to be applied to, such as DeviceMessages. Possible values include: Invalid, DeviceMessages, TwinChangeEvents, DeviceLifecycleEvents, DeviceConnectionStateEvents, DeviceJobLifecycleEvents and DigitalTwinChangeEvents. + Source *string `json:"source,omitempty" tf:"source,omitempty"` +} + +type RouteParameters struct { +} + +type SharedAccessPolicyInitParameters struct { +} + +type SharedAccessPolicyObservation struct { + + // The name of the shared access policy. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // The permissions assigned to the shared access policy. + Permissions *string `json:"permissions,omitempty" tf:"permissions,omitempty"` +} + +type SharedAccessPolicyParameters struct { +} + +type SkuInitParameters struct { + + // The number of provisioned IoT Hub units. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the sku. Possible values are B1, B2, B3, F1, S1, S2, and S3. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuObservation struct { + + // The number of provisioned IoT Hub units. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the sku. Possible values are B1, B2, B3, F1, S1, S2, and S3. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuParameters struct { + + // The number of provisioned IoT Hub units. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity" tf:"capacity,omitempty"` + + // The name of the sku. Possible values are B1, B2, B3, F1, S1, S2, and S3. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +// IOTHubSpec defines the desired state of IOTHub +type IOTHubSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IOTHubParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IOTHubInitParameters `json:"initProvider,omitempty"` +} + +// IOTHubStatus defines the observed state of IOTHub. +type IOTHubStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IOTHubObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IOTHub is the Schema for the IOTHubs API. Manages an IotHub +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type IOTHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec IOTHubSpec `json:"spec"` + Status IOTHubStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IOTHubList contains a list of IOTHubs +type IOTHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IOTHub `json:"items"` +} + +// Repository type metadata. +var ( + IOTHub_Kind = "IOTHub" + IOTHub_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IOTHub_Kind}.String() + IOTHub_KindAPIVersion = IOTHub_Kind + "." + CRDGroupVersion.String() + IOTHub_GroupVersionKind = CRDGroupVersion.WithKind(IOTHub_Kind) +) + +func init() { + SchemeBuilder.Register(&IOTHub{}, &IOTHubList{}) +} diff --git a/apis/devices/v1beta2/zz_iothubdps_terraformed.go b/apis/devices/v1beta2/zz_iothubdps_terraformed.go new file mode 100755 index 000000000..5113b83ce --- /dev/null +++ b/apis/devices/v1beta2/zz_iothubdps_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IOTHubDPS +func (mg *IOTHubDPS) GetTerraformResourceType() string { + return "azurerm_iothub_dps" +} + +// GetConnectionDetailsMapping for this IOTHubDPS +func (tr *IOTHubDPS) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"linked_hub[*].connection_string": "spec.forProvider.linkedHub[*].connectionStringSecretRef"} +} + +// GetObservation of this IOTHubDPS +func (tr *IOTHubDPS) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IOTHubDPS +func (tr *IOTHubDPS) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IOTHubDPS +func (tr *IOTHubDPS) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IOTHubDPS +func (tr *IOTHubDPS) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IOTHubDPS +func (tr *IOTHubDPS) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IOTHubDPS +func (tr *IOTHubDPS) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IOTHubDPS +func (tr *IOTHubDPS) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IOTHubDPS using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IOTHubDPS) LateInitialize(attrs []byte) (bool, error) { + params := &IOTHubDPSParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IOTHubDPS) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/devices/v1beta2/zz_iothubdps_types.go b/apis/devices/v1beta2/zz_iothubdps_types.go new file mode 100755 index 000000000..c4b675c45 --- /dev/null +++ b/apis/devices/v1beta2/zz_iothubdps_types.go @@ -0,0 +1,320 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IOTHubDPSInitParameters struct { + + // The allocation policy of the IoT Device Provisioning Service (Hashed, GeoLatency or Static). Defaults to Hashed. + AllocationPolicy *string `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // Specifies if the IoT Device Provisioning Service has data residency and disaster recovery enabled. Defaults to false. Changing this forces a new resource to be created. + DataResidencyEnabled *bool `json:"dataResidencyEnabled,omitempty" tf:"data_residency_enabled,omitempty"` + + // An ip_filter_rule block as defined below. + IPFilterRule []IPFilterRuleInitParameters `json:"ipFilterRule,omitempty" tf:"ip_filter_rule,omitempty"` + + // A linked_hub block as defined below. + LinkedHub []LinkedHubInitParameters `json:"linkedHub,omitempty" tf:"linked_hub,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether requests from Public Network are allowed. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A sku block as defined below. + Sku *IOTHubDPSSkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTHubDPSObservation struct { + + // The allocation policy of the IoT Device Provisioning Service (Hashed, GeoLatency or Static). Defaults to Hashed. + AllocationPolicy *string `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // Specifies if the IoT Device Provisioning Service has data residency and disaster recovery enabled. Defaults to false. Changing this forces a new resource to be created. + DataResidencyEnabled *bool `json:"dataResidencyEnabled,omitempty" tf:"data_residency_enabled,omitempty"` + + // The device endpoint of the IoT Device Provisioning Service. + DeviceProvisioningHostName *string `json:"deviceProvisioningHostName,omitempty" tf:"device_provisioning_host_name,omitempty"` + + // The ID of the IoT Device Provisioning Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The unique identifier of the IoT Device Provisioning Service. + IDScope *string `json:"idScope,omitempty" tf:"id_scope,omitempty"` + + // An ip_filter_rule block as defined below. + IPFilterRule []IPFilterRuleObservation `json:"ipFilterRule,omitempty" tf:"ip_filter_rule,omitempty"` + + // A linked_hub block as defined below. + LinkedHub []LinkedHubObservation `json:"linkedHub,omitempty" tf:"linked_hub,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether requests from Public Network are allowed. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group under which the Iot Device Provisioning Service resource has to be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The service endpoint of the IoT Device Provisioning Service. + ServiceOperationsHostName *string `json:"serviceOperationsHostName,omitempty" tf:"service_operations_host_name,omitempty"` + + // A sku block as defined below. + Sku *IOTHubDPSSkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTHubDPSParameters struct { + + // The allocation policy of the IoT Device Provisioning Service (Hashed, GeoLatency or Static). Defaults to Hashed. + // +kubebuilder:validation:Optional + AllocationPolicy *string `json:"allocationPolicy,omitempty" tf:"allocation_policy,omitempty"` + + // Specifies if the IoT Device Provisioning Service has data residency and disaster recovery enabled. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DataResidencyEnabled *bool `json:"dataResidencyEnabled,omitempty" tf:"data_residency_enabled,omitempty"` + + // An ip_filter_rule block as defined below. + // +kubebuilder:validation:Optional + IPFilterRule []IPFilterRuleParameters `json:"ipFilterRule,omitempty" tf:"ip_filter_rule,omitempty"` + + // A linked_hub block as defined below. + // +kubebuilder:validation:Optional + LinkedHub []LinkedHubParameters `json:"linkedHub,omitempty" tf:"linked_hub,omitempty"` + + // Specifies the supported Azure location where the resource has to be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether requests from Public Network are allowed. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group under which the Iot Device Provisioning Service resource has to be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sku block as defined below. + // +kubebuilder:validation:Optional + Sku *IOTHubDPSSkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTHubDPSSkuInitParameters struct { + + // The number of provisioned IoT Device Provisioning Service units. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the sku. Currently can only be set to S1. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IOTHubDPSSkuObservation struct { + + // The number of provisioned IoT Device Provisioning Service units. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the sku. Currently can only be set to S1. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IOTHubDPSSkuParameters struct { + + // The number of provisioned IoT Device Provisioning Service units. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity" tf:"capacity,omitempty"` + + // The name of the sku. Currently can only be set to S1. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type IPFilterRuleInitParameters struct { + + // The desired action for requests captured by this rule. Possible values are Accept, Reject + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The IP address range in CIDR notation for the rule. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask,omitempty"` + + // The name of the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Target for requests captured by this rule. Possible values are all, deviceApi and serviceApi. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type IPFilterRuleObservation struct { + + // The desired action for requests captured by this rule. Possible values are Accept, Reject + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The IP address range in CIDR notation for the rule. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask,omitempty"` + + // The name of the filter. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Target for requests captured by this rule. Possible values are all, deviceApi and serviceApi. + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type IPFilterRuleParameters struct { + + // The desired action for requests captured by this rule. Possible values are Accept, Reject + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // The IP address range in CIDR notation for the rule. + // +kubebuilder:validation:Optional + IPMask *string `json:"ipMask" tf:"ip_mask,omitempty"` + + // The name of the filter. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Target for requests captured by this rule. Possible values are all, deviceApi and serviceApi. + // +kubebuilder:validation:Optional + Target *string `json:"target,omitempty" tf:"target,omitempty"` +} + +type LinkedHubInitParameters struct { + + // The weight applied to the IoT Hub. Defaults to 1. + AllocationWeight *float64 `json:"allocationWeight,omitempty" tf:"allocation_weight,omitempty"` + + // Determines whether to apply allocation policies to the IoT Hub. Defaults to true. + ApplyAllocationPolicy *bool `json:"applyAllocationPolicy,omitempty" tf:"apply_allocation_policy,omitempty"` + + // The location of the IoT hub. + Location *string `json:"location,omitempty" tf:"location,omitempty"` +} + +type LinkedHubObservation struct { + + // The weight applied to the IoT Hub. Defaults to 1. + AllocationWeight *float64 `json:"allocationWeight,omitempty" tf:"allocation_weight,omitempty"` + + // Determines whether to apply allocation policies to the IoT Hub. Defaults to true. + ApplyAllocationPolicy *bool `json:"applyAllocationPolicy,omitempty" tf:"apply_allocation_policy,omitempty"` + + // (Computed) The IoT Hub hostname. + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // The location of the IoT hub. + Location *string `json:"location,omitempty" tf:"location,omitempty"` +} + +type LinkedHubParameters struct { + + // The weight applied to the IoT Hub. Defaults to 1. + // +kubebuilder:validation:Optional + AllocationWeight *float64 `json:"allocationWeight,omitempty" tf:"allocation_weight,omitempty"` + + // Determines whether to apply allocation policies to the IoT Hub. Defaults to true. + // +kubebuilder:validation:Optional + ApplyAllocationPolicy *bool `json:"applyAllocationPolicy,omitempty" tf:"apply_allocation_policy,omitempty"` + + // The connection string to connect to the IoT Hub. + // +kubebuilder:validation:Required + ConnectionStringSecretRef v1.SecretKeySelector `json:"connectionStringSecretRef" tf:"-"` + + // The location of the IoT hub. + // +kubebuilder:validation:Optional + Location *string `json:"location" tf:"location,omitempty"` +} + +// IOTHubDPSSpec defines the desired state of IOTHubDPS +type IOTHubDPSSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IOTHubDPSParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IOTHubDPSInitParameters `json:"initProvider,omitempty"` +} + +// IOTHubDPSStatus defines the observed state of IOTHubDPS. +type IOTHubDPSStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IOTHubDPSObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IOTHubDPS is the Schema for the IOTHubDPSs API. Manages an IoT Device Provisioning Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure},path=iothubdps +type IOTHubDPS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec IOTHubDPSSpec `json:"spec"` + Status IOTHubDPSStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IOTHubDPSList contains a list of IOTHubDPSs +type IOTHubDPSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IOTHubDPS `json:"items"` +} + +// Repository type metadata. +var ( + IOTHubDPS_Kind = "IOTHubDPS" + IOTHubDPS_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IOTHubDPS_Kind}.String() + IOTHubDPS_KindAPIVersion = IOTHubDPS_Kind + "." + CRDGroupVersion.String() + IOTHubDPS_GroupVersionKind = CRDGroupVersion.WithKind(IOTHubDPS_Kind) +) + +func init() { + SchemeBuilder.Register(&IOTHubDPS{}, &IOTHubDPSList{}) +} diff --git a/apis/deviceupdate/v1beta1/zz_generated.conversion_spokes.go b/apis/deviceupdate/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..8c78ef67b --- /dev/null +++ b/apis/deviceupdate/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this IOTHubDeviceUpdateAccount to the hub type. +func (tr *IOTHubDeviceUpdateAccount) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IOTHubDeviceUpdateAccount type. +func (tr *IOTHubDeviceUpdateAccount) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this IOTHubDeviceUpdateInstance to the hub type. +func (tr *IOTHubDeviceUpdateInstance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IOTHubDeviceUpdateInstance type. +func (tr *IOTHubDeviceUpdateInstance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/deviceupdate/v1beta1/zz_generated.conversion_hubs.go b/apis/deviceupdate/v1beta2/zz_generated.conversion_hubs.go similarity index 95% rename from apis/deviceupdate/v1beta1/zz_generated.conversion_hubs.go rename to apis/deviceupdate/v1beta2/zz_generated.conversion_hubs.go index 4c446c5ee..8252e362e 100755 --- a/apis/deviceupdate/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/deviceupdate/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *IOTHubDeviceUpdateAccount) Hub() {} diff --git a/apis/deviceupdate/v1beta2/zz_generated.deepcopy.go b/apis/deviceupdate/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..b35231ec8 --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,752 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticStorageAccountInitParameters) DeepCopyInto(out *DiagnosticStorageAccountInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticStorageAccountInitParameters. +func (in *DiagnosticStorageAccountInitParameters) DeepCopy() *DiagnosticStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticStorageAccountObservation) DeepCopyInto(out *DiagnosticStorageAccountObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticStorageAccountObservation. +func (in *DiagnosticStorageAccountObservation) DeepCopy() *DiagnosticStorageAccountObservation { + if in == nil { + return nil + } + out := new(DiagnosticStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticStorageAccountParameters) DeepCopyInto(out *DiagnosticStorageAccountParameters) { + *out = *in + out.ConnectionStringSecretRef = in.ConnectionStringSecretRef + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticStorageAccountParameters. +func (in *DiagnosticStorageAccountParameters) DeepCopy() *DiagnosticStorageAccountParameters { + if in == nil { + return nil + } + out := new(DiagnosticStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateAccount) DeepCopyInto(out *IOTHubDeviceUpdateAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateAccount. +func (in *IOTHubDeviceUpdateAccount) DeepCopy() *IOTHubDeviceUpdateAccount { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTHubDeviceUpdateAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateAccountInitParameters) DeepCopyInto(out *IOTHubDeviceUpdateAccountInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateAccountInitParameters. +func (in *IOTHubDeviceUpdateAccountInitParameters) DeepCopy() *IOTHubDeviceUpdateAccountInitParameters { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateAccountList) DeepCopyInto(out *IOTHubDeviceUpdateAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IOTHubDeviceUpdateAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateAccountList. +func (in *IOTHubDeviceUpdateAccountList) DeepCopy() *IOTHubDeviceUpdateAccountList { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTHubDeviceUpdateAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateAccountObservation) DeepCopyInto(out *IOTHubDeviceUpdateAccountObservation) { + *out = *in + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateAccountObservation. +func (in *IOTHubDeviceUpdateAccountObservation) DeepCopy() *IOTHubDeviceUpdateAccountObservation { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateAccountParameters) DeepCopyInto(out *IOTHubDeviceUpdateAccountParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateAccountParameters. +func (in *IOTHubDeviceUpdateAccountParameters) DeepCopy() *IOTHubDeviceUpdateAccountParameters { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateAccountSpec) DeepCopyInto(out *IOTHubDeviceUpdateAccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateAccountSpec. +func (in *IOTHubDeviceUpdateAccountSpec) DeepCopy() *IOTHubDeviceUpdateAccountSpec { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateAccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateAccountStatus) DeepCopyInto(out *IOTHubDeviceUpdateAccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateAccountStatus. +func (in *IOTHubDeviceUpdateAccountStatus) DeepCopy() *IOTHubDeviceUpdateAccountStatus { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateAccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateInstance) DeepCopyInto(out *IOTHubDeviceUpdateInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateInstance. +func (in *IOTHubDeviceUpdateInstance) DeepCopy() *IOTHubDeviceUpdateInstance { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTHubDeviceUpdateInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateInstanceInitParameters) DeepCopyInto(out *IOTHubDeviceUpdateInstanceInitParameters) { + *out = *in + if in.DiagnosticEnabled != nil { + in, out := &in.DiagnosticEnabled, &out.DiagnosticEnabled + *out = new(bool) + **out = **in + } + if in.DiagnosticStorageAccount != nil { + in, out := &in.DiagnosticStorageAccount, &out.DiagnosticStorageAccount + *out = new(DiagnosticStorageAccountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IOTHubID != nil { + in, out := &in.IOTHubID, &out.IOTHubID + *out = new(string) + **out = **in + } + if in.IOTHubIDRef != nil { + in, out := &in.IOTHubIDRef, &out.IOTHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IOTHubIDSelector != nil { + in, out := &in.IOTHubIDSelector, &out.IOTHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateInstanceInitParameters. +func (in *IOTHubDeviceUpdateInstanceInitParameters) DeepCopy() *IOTHubDeviceUpdateInstanceInitParameters { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateInstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateInstanceList) DeepCopyInto(out *IOTHubDeviceUpdateInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IOTHubDeviceUpdateInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateInstanceList. +func (in *IOTHubDeviceUpdateInstanceList) DeepCopy() *IOTHubDeviceUpdateInstanceList { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTHubDeviceUpdateInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateInstanceObservation) DeepCopyInto(out *IOTHubDeviceUpdateInstanceObservation) { + *out = *in + if in.DeviceUpdateAccountID != nil { + in, out := &in.DeviceUpdateAccountID, &out.DeviceUpdateAccountID + *out = new(string) + **out = **in + } + if in.DiagnosticEnabled != nil { + in, out := &in.DiagnosticEnabled, &out.DiagnosticEnabled + *out = new(bool) + **out = **in + } + if in.DiagnosticStorageAccount != nil { + in, out := &in.DiagnosticStorageAccount, &out.DiagnosticStorageAccount + *out = new(DiagnosticStorageAccountObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IOTHubID != nil { + in, out := &in.IOTHubID, &out.IOTHubID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateInstanceObservation. +func (in *IOTHubDeviceUpdateInstanceObservation) DeepCopy() *IOTHubDeviceUpdateInstanceObservation { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateInstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateInstanceParameters) DeepCopyInto(out *IOTHubDeviceUpdateInstanceParameters) { + *out = *in + if in.DeviceUpdateAccountID != nil { + in, out := &in.DeviceUpdateAccountID, &out.DeviceUpdateAccountID + *out = new(string) + **out = **in + } + if in.DeviceUpdateAccountIDRef != nil { + in, out := &in.DeviceUpdateAccountIDRef, &out.DeviceUpdateAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DeviceUpdateAccountIDSelector != nil { + in, out := &in.DeviceUpdateAccountIDSelector, &out.DeviceUpdateAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DiagnosticEnabled != nil { + in, out := &in.DiagnosticEnabled, &out.DiagnosticEnabled + *out = new(bool) + **out = **in + } + if in.DiagnosticStorageAccount != nil { + in, out := &in.DiagnosticStorageAccount, &out.DiagnosticStorageAccount + *out = new(DiagnosticStorageAccountParameters) + (*in).DeepCopyInto(*out) + } + if in.IOTHubID != nil { + in, out := &in.IOTHubID, &out.IOTHubID + *out = new(string) + **out = **in + } + if in.IOTHubIDRef != nil { + in, out := &in.IOTHubIDRef, &out.IOTHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IOTHubIDSelector != nil { + in, out := &in.IOTHubIDSelector, &out.IOTHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateInstanceParameters. +func (in *IOTHubDeviceUpdateInstanceParameters) DeepCopy() *IOTHubDeviceUpdateInstanceParameters { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateInstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateInstanceSpec) DeepCopyInto(out *IOTHubDeviceUpdateInstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateInstanceSpec. +func (in *IOTHubDeviceUpdateInstanceSpec) DeepCopy() *IOTHubDeviceUpdateInstanceSpec { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTHubDeviceUpdateInstanceStatus) DeepCopyInto(out *IOTHubDeviceUpdateInstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTHubDeviceUpdateInstanceStatus. +func (in *IOTHubDeviceUpdateInstanceStatus) DeepCopy() *IOTHubDeviceUpdateInstanceStatus { + if in == nil { + return nil + } + out := new(IOTHubDeviceUpdateInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/deviceupdate/v1beta2/zz_generated.managed.go b/apis/deviceupdate/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..f08829dfa --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IOTHubDeviceUpdateAccount. +func (mg *IOTHubDeviceUpdateAccount) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/deviceupdate/v1beta2/zz_generated.managedlist.go b/apis/deviceupdate/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..6f77df975 --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this IOTHubDeviceUpdateAccountList. +func (l *IOTHubDeviceUpdateAccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IOTHubDeviceUpdateInstanceList. +func (l *IOTHubDeviceUpdateInstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/deviceupdate/v1beta2/zz_generated.resolvers.go b/apis/deviceupdate/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..e787c81d1 --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,160 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *IOTHubDeviceUpdateAccount) ResolveReferences( // ResolveReferences of this IOTHubDeviceUpdateAccount. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this IOTHubDeviceUpdateInstance. +func (mg *IOTHubDeviceUpdateInstance) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("deviceupdate.azure.upbound.io", "v1beta2", "IOTHubDeviceUpdateAccount", "IOTHubDeviceUpdateAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DeviceUpdateAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DeviceUpdateAccountIDRef, + Selector: mg.Spec.ForProvider.DeviceUpdateAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DeviceUpdateAccountID") + } + mg.Spec.ForProvider.DeviceUpdateAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DeviceUpdateAccountIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.DiagnosticStorageAccount != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DiagnosticStorageAccount.ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DiagnosticStorageAccount.IDRef, + Selector: mg.Spec.ForProvider.DiagnosticStorageAccount.IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DiagnosticStorageAccount.ID") + } + mg.Spec.ForProvider.DiagnosticStorageAccount.ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DiagnosticStorageAccount.IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IOTHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IOTHubIDRef, + Selector: mg.Spec.ForProvider.IOTHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IOTHubID") + } + mg.Spec.ForProvider.IOTHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IOTHubIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.DiagnosticStorageAccount != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DiagnosticStorageAccount.ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DiagnosticStorageAccount.IDRef, + Selector: mg.Spec.InitProvider.DiagnosticStorageAccount.IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DiagnosticStorageAccount.ID") + } + mg.Spec.InitProvider.DiagnosticStorageAccount.ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DiagnosticStorageAccount.IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IOTHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IOTHubIDRef, + Selector: mg.Spec.InitProvider.IOTHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IOTHubID") + } + mg.Spec.InitProvider.IOTHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IOTHubIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/deviceupdate/v1beta2/zz_groupversion_info.go b/apis/deviceupdate/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..2bdd77cf1 --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=deviceupdate.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "deviceupdate.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateaccount_terraformed.go b/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateaccount_terraformed.go new file mode 100755 index 000000000..337079391 --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateaccount_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IOTHubDeviceUpdateAccount +func (mg *IOTHubDeviceUpdateAccount) GetTerraformResourceType() string { + return "azurerm_iothub_device_update_account" +} + +// GetConnectionDetailsMapping for this IOTHubDeviceUpdateAccount +func (tr *IOTHubDeviceUpdateAccount) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this IOTHubDeviceUpdateAccount +func (tr *IOTHubDeviceUpdateAccount) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IOTHubDeviceUpdateAccount +func (tr *IOTHubDeviceUpdateAccount) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IOTHubDeviceUpdateAccount +func (tr *IOTHubDeviceUpdateAccount) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IOTHubDeviceUpdateAccount +func (tr *IOTHubDeviceUpdateAccount) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IOTHubDeviceUpdateAccount +func (tr *IOTHubDeviceUpdateAccount) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IOTHubDeviceUpdateAccount +func (tr *IOTHubDeviceUpdateAccount) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IOTHubDeviceUpdateAccount +func (tr *IOTHubDeviceUpdateAccount) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IOTHubDeviceUpdateAccount using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IOTHubDeviceUpdateAccount) LateInitialize(attrs []byte) (bool, error) { + params := &IOTHubDeviceUpdateAccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IOTHubDeviceUpdateAccount) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateaccount_types.go b/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateaccount_types.go new file mode 100755 index 000000000..3e7d68f02 --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateaccount_types.go @@ -0,0 +1,197 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IOTHubDeviceUpdateAccountInitParameters struct { + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the IoT Hub Device Update Account should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies whether the public network access is enabled for the IoT Hub Device Update Account. Possible values are true and false. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Sku of the IoT Hub Device Update Account. Possible values are Free and Standard. Defaults to Standard. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags which should be assigned to the IoT Hub Device Update Account. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTHubDeviceUpdateAccountObservation struct { + + // The API host name of the IoT Hub Device Update Account. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the IoT Hub Device Update Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the IoT Hub Device Update Account should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies whether the public network access is enabled for the IoT Hub Device Update Account. Possible values are true and false. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the name of the Resource Group where the IoT Hub Device Update Account should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Sku of the IoT Hub Device Update Account. Possible values are Free and Standard. Defaults to Standard. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags which should be assigned to the IoT Hub Device Update Account. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTHubDeviceUpdateAccountParameters struct { + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the IoT Hub Device Update Account should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies whether the public network access is enabled for the IoT Hub Device Update Account. Possible values are true and false. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the name of the Resource Group where the IoT Hub Device Update Account should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Sku of the IoT Hub Device Update Account. Possible values are Free and Standard. Defaults to Standard. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags which should be assigned to the IoT Hub Device Update Account. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this IoT Hub Device Update Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this IoT Hub Device Update Account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this IoT Hub Device Update Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this IoT Hub Device Update Account. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this IoT Hub Device Update Account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this IoT Hub Device Update Account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this IoT Hub Device Update Account. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this IoT Hub Device Update Account. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// IOTHubDeviceUpdateAccountSpec defines the desired state of IOTHubDeviceUpdateAccount +type IOTHubDeviceUpdateAccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IOTHubDeviceUpdateAccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IOTHubDeviceUpdateAccountInitParameters `json:"initProvider,omitempty"` +} + +// IOTHubDeviceUpdateAccountStatus defines the observed state of IOTHubDeviceUpdateAccount. +type IOTHubDeviceUpdateAccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IOTHubDeviceUpdateAccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IOTHubDeviceUpdateAccount is the Schema for the IOTHubDeviceUpdateAccounts API. Manages an IoT Hub Device Update Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type IOTHubDeviceUpdateAccount struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec IOTHubDeviceUpdateAccountSpec `json:"spec"` + Status IOTHubDeviceUpdateAccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IOTHubDeviceUpdateAccountList contains a list of IOTHubDeviceUpdateAccounts +type IOTHubDeviceUpdateAccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IOTHubDeviceUpdateAccount `json:"items"` +} + +// Repository type metadata. +var ( + IOTHubDeviceUpdateAccount_Kind = "IOTHubDeviceUpdateAccount" + IOTHubDeviceUpdateAccount_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IOTHubDeviceUpdateAccount_Kind}.String() + IOTHubDeviceUpdateAccount_KindAPIVersion = IOTHubDeviceUpdateAccount_Kind + "." + CRDGroupVersion.String() + IOTHubDeviceUpdateAccount_GroupVersionKind = CRDGroupVersion.WithKind(IOTHubDeviceUpdateAccount_Kind) +) + +func init() { + SchemeBuilder.Register(&IOTHubDeviceUpdateAccount{}, &IOTHubDeviceUpdateAccountList{}) +} diff --git a/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateinstance_terraformed.go b/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateinstance_terraformed.go new file mode 100755 index 000000000..4a3092136 --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateinstance_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IOTHubDeviceUpdateInstance +func (mg *IOTHubDeviceUpdateInstance) GetTerraformResourceType() string { + return "azurerm_iothub_device_update_instance" +} + +// GetConnectionDetailsMapping for this IOTHubDeviceUpdateInstance +func (tr *IOTHubDeviceUpdateInstance) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"diagnostic_storage_account[*].connection_string": "spec.forProvider.diagnosticStorageAccount[*].connectionStringSecretRef"} +} + +// GetObservation of this IOTHubDeviceUpdateInstance +func (tr *IOTHubDeviceUpdateInstance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IOTHubDeviceUpdateInstance +func (tr *IOTHubDeviceUpdateInstance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IOTHubDeviceUpdateInstance +func (tr *IOTHubDeviceUpdateInstance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IOTHubDeviceUpdateInstance +func (tr *IOTHubDeviceUpdateInstance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IOTHubDeviceUpdateInstance +func (tr *IOTHubDeviceUpdateInstance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IOTHubDeviceUpdateInstance +func (tr *IOTHubDeviceUpdateInstance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IOTHubDeviceUpdateInstance +func (tr *IOTHubDeviceUpdateInstance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IOTHubDeviceUpdateInstance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IOTHubDeviceUpdateInstance) LateInitialize(attrs []byte) (bool, error) { + params := &IOTHubDeviceUpdateInstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IOTHubDeviceUpdateInstance) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateinstance_types.go b/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateinstance_types.go new file mode 100755 index 000000000..16880915e --- /dev/null +++ b/apis/deviceupdate/v1beta2/zz_iothubdeviceupdateinstance_types.go @@ -0,0 +1,209 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DiagnosticStorageAccountInitParameters struct { + + // Resource ID of the Diagnostic Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a Account in storage to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type DiagnosticStorageAccountObservation struct { + + // Resource ID of the Diagnostic Storage Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type DiagnosticStorageAccountParameters struct { + + // Connection String of the Diagnostic Storage Account. + // +kubebuilder:validation:Required + ConnectionStringSecretRef v1.SecretKeySelector `json:"connectionStringSecretRef" tf:"-"` + + // Resource ID of the Diagnostic Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a Account in storage to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type IOTHubDeviceUpdateInstanceInitParameters struct { + + // Whether the diagnostic log collection is enabled. Possible values are true and false. Defaults to false. + DiagnosticEnabled *bool `json:"diagnosticEnabled,omitempty" tf:"diagnostic_enabled,omitempty"` + + // A diagnostic_storage_account block as defined below. + DiagnosticStorageAccount *DiagnosticStorageAccountInitParameters `json:"diagnosticStorageAccount,omitempty" tf:"diagnostic_storage_account,omitempty"` + + // Specifies the ID of the IoT Hub associated with the IoT Hub Device Update Instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` + + // Reference to a IOTHub in devices to populate iothubId. + // +kubebuilder:validation:Optional + IOTHubIDRef *v1.Reference `json:"iothubIdRef,omitempty" tf:"-"` + + // Selector for a IOTHub in devices to populate iothubId. + // +kubebuilder:validation:Optional + IOTHubIDSelector *v1.Selector `json:"iothubIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the IoT Hub Device Update Instance. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTHubDeviceUpdateInstanceObservation struct { + + // Specifies the ID of the IoT Hub Device Update Account where the IoT Hub Device Update Instance exists. Changing this forces a new resource to be created. + DeviceUpdateAccountID *string `json:"deviceUpdateAccountId,omitempty" tf:"device_update_account_id,omitempty"` + + // Whether the diagnostic log collection is enabled. Possible values are true and false. Defaults to false. + DiagnosticEnabled *bool `json:"diagnosticEnabled,omitempty" tf:"diagnostic_enabled,omitempty"` + + // A diagnostic_storage_account block as defined below. + DiagnosticStorageAccount *DiagnosticStorageAccountObservation `json:"diagnosticStorageAccount,omitempty" tf:"diagnostic_storage_account,omitempty"` + + // The ID of the IoT Hub Device Update Instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the ID of the IoT Hub associated with the IoT Hub Device Update Instance. Changing this forces a new resource to be created. + IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` + + // A mapping of tags which should be assigned to the IoT Hub Device Update Instance. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTHubDeviceUpdateInstanceParameters struct { + + // Specifies the ID of the IoT Hub Device Update Account where the IoT Hub Device Update Instance exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/deviceupdate/v1beta2.IOTHubDeviceUpdateAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DeviceUpdateAccountID *string `json:"deviceUpdateAccountId,omitempty" tf:"device_update_account_id,omitempty"` + + // Reference to a IOTHubDeviceUpdateAccount in deviceupdate to populate deviceUpdateAccountId. + // +kubebuilder:validation:Optional + DeviceUpdateAccountIDRef *v1.Reference `json:"deviceUpdateAccountIdRef,omitempty" tf:"-"` + + // Selector for a IOTHubDeviceUpdateAccount in deviceupdate to populate deviceUpdateAccountId. + // +kubebuilder:validation:Optional + DeviceUpdateAccountIDSelector *v1.Selector `json:"deviceUpdateAccountIdSelector,omitempty" tf:"-"` + + // Whether the diagnostic log collection is enabled. Possible values are true and false. Defaults to false. + // +kubebuilder:validation:Optional + DiagnosticEnabled *bool `json:"diagnosticEnabled,omitempty" tf:"diagnostic_enabled,omitempty"` + + // A diagnostic_storage_account block as defined below. + // +kubebuilder:validation:Optional + DiagnosticStorageAccount *DiagnosticStorageAccountParameters `json:"diagnosticStorageAccount,omitempty" tf:"diagnostic_storage_account,omitempty"` + + // Specifies the ID of the IoT Hub associated with the IoT Hub Device Update Instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` + + // Reference to a IOTHub in devices to populate iothubId. + // +kubebuilder:validation:Optional + IOTHubIDRef *v1.Reference `json:"iothubIdRef,omitempty" tf:"-"` + + // Selector for a IOTHub in devices to populate iothubId. + // +kubebuilder:validation:Optional + IOTHubIDSelector *v1.Selector `json:"iothubIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the IoT Hub Device Update Instance. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// IOTHubDeviceUpdateInstanceSpec defines the desired state of IOTHubDeviceUpdateInstance +type IOTHubDeviceUpdateInstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IOTHubDeviceUpdateInstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IOTHubDeviceUpdateInstanceInitParameters `json:"initProvider,omitempty"` +} + +// IOTHubDeviceUpdateInstanceStatus defines the observed state of IOTHubDeviceUpdateInstance. +type IOTHubDeviceUpdateInstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IOTHubDeviceUpdateInstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IOTHubDeviceUpdateInstance is the Schema for the IOTHubDeviceUpdateInstances API. Manages an IoT Hub Device Update Instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type IOTHubDeviceUpdateInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec IOTHubDeviceUpdateInstanceSpec `json:"spec"` + Status IOTHubDeviceUpdateInstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IOTHubDeviceUpdateInstanceList contains a list of IOTHubDeviceUpdateInstances +type IOTHubDeviceUpdateInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IOTHubDeviceUpdateInstance `json:"items"` +} + +// Repository type metadata. +var ( + IOTHubDeviceUpdateInstance_Kind = "IOTHubDeviceUpdateInstance" + IOTHubDeviceUpdateInstance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IOTHubDeviceUpdateInstance_Kind}.String() + IOTHubDeviceUpdateInstance_KindAPIVersion = IOTHubDeviceUpdateInstance_Kind + "." + CRDGroupVersion.String() + IOTHubDeviceUpdateInstance_GroupVersionKind = CRDGroupVersion.WithKind(IOTHubDeviceUpdateInstance_Kind) +) + +func init() { + SchemeBuilder.Register(&IOTHubDeviceUpdateInstance{}, &IOTHubDeviceUpdateInstanceList{}) +} diff --git a/apis/devtestlab/v1beta1/zz_generated.conversion_hubs.go b/apis/devtestlab/v1beta1/zz_generated.conversion_hubs.go index 8c0803487..e9e272604 100755 --- a/apis/devtestlab/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/devtestlab/v1beta1/zz_generated.conversion_hubs.go @@ -6,23 +6,8 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *GlobalVMShutdownSchedule) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Lab) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *LinuxVirtualMachine) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Policy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Schedule) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualNetwork) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WindowsVirtualMachine) Hub() {} diff --git a/apis/devtestlab/v1beta1/zz_generated.conversion_spokes.go b/apis/devtestlab/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..c25459c0c --- /dev/null +++ b/apis/devtestlab/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this GlobalVMShutdownSchedule to the hub type. +func (tr *GlobalVMShutdownSchedule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the GlobalVMShutdownSchedule type. +func (tr *GlobalVMShutdownSchedule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinuxVirtualMachine to the hub type. +func (tr *LinuxVirtualMachine) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinuxVirtualMachine type. +func (tr *LinuxVirtualMachine) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Schedule to the hub type. +func (tr *Schedule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Schedule type. +func (tr *Schedule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualNetwork to the hub type. +func (tr *VirtualNetwork) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualNetwork type. +func (tr *VirtualNetwork) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WindowsVirtualMachine to the hub type. +func (tr *WindowsVirtualMachine) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WindowsVirtualMachine type. +func (tr *WindowsVirtualMachine) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/devtestlab/v1beta2/zz_generated.conversion_hubs.go b/apis/devtestlab/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..00e36e7b6 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *GlobalVMShutdownSchedule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinuxVirtualMachine) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Schedule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualNetwork) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WindowsVirtualMachine) Hub() {} diff --git a/apis/devtestlab/v1beta2/zz_generated.deepcopy.go b/apis/devtestlab/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..38cd7ffa2 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2855 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyRecurrenceInitParameters) DeepCopyInto(out *DailyRecurrenceInitParameters) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyRecurrenceInitParameters. +func (in *DailyRecurrenceInitParameters) DeepCopy() *DailyRecurrenceInitParameters { + if in == nil { + return nil + } + out := new(DailyRecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyRecurrenceObservation) DeepCopyInto(out *DailyRecurrenceObservation) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyRecurrenceObservation. +func (in *DailyRecurrenceObservation) DeepCopy() *DailyRecurrenceObservation { + if in == nil { + return nil + } + out := new(DailyRecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyRecurrenceParameters) DeepCopyInto(out *DailyRecurrenceParameters) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyRecurrenceParameters. +func (in *DailyRecurrenceParameters) DeepCopy() *DailyRecurrenceParameters { + if in == nil { + return nil + } + out := new(DailyRecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryImageReferenceInitParameters) DeepCopyInto(out *GalleryImageReferenceInitParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryImageReferenceInitParameters. +func (in *GalleryImageReferenceInitParameters) DeepCopy() *GalleryImageReferenceInitParameters { + if in == nil { + return nil + } + out := new(GalleryImageReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryImageReferenceObservation) DeepCopyInto(out *GalleryImageReferenceObservation) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryImageReferenceObservation. +func (in *GalleryImageReferenceObservation) DeepCopy() *GalleryImageReferenceObservation { + if in == nil { + return nil + } + out := new(GalleryImageReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GalleryImageReferenceParameters) DeepCopyInto(out *GalleryImageReferenceParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GalleryImageReferenceParameters. +func (in *GalleryImageReferenceParameters) DeepCopy() *GalleryImageReferenceParameters { + if in == nil { + return nil + } + out := new(GalleryImageReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVMShutdownSchedule) DeepCopyInto(out *GlobalVMShutdownSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVMShutdownSchedule. +func (in *GlobalVMShutdownSchedule) DeepCopy() *GlobalVMShutdownSchedule { + if in == nil { + return nil + } + out := new(GlobalVMShutdownSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GlobalVMShutdownSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVMShutdownScheduleInitParameters) DeepCopyInto(out *GlobalVMShutdownScheduleInitParameters) { + *out = *in + if in.DailyRecurrenceTime != nil { + in, out := &in.DailyRecurrenceTime, &out.DailyRecurrenceTime + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NotificationSettings != nil { + in, out := &in.NotificationSettings, &out.NotificationSettings + *out = new(NotificationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } + if in.VirtualMachineIDRef != nil { + in, out := &in.VirtualMachineIDRef, &out.VirtualMachineIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualMachineIDSelector != nil { + in, out := &in.VirtualMachineIDSelector, &out.VirtualMachineIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVMShutdownScheduleInitParameters. +func (in *GlobalVMShutdownScheduleInitParameters) DeepCopy() *GlobalVMShutdownScheduleInitParameters { + if in == nil { + return nil + } + out := new(GlobalVMShutdownScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVMShutdownScheduleList) DeepCopyInto(out *GlobalVMShutdownScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GlobalVMShutdownSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVMShutdownScheduleList. +func (in *GlobalVMShutdownScheduleList) DeepCopy() *GlobalVMShutdownScheduleList { + if in == nil { + return nil + } + out := new(GlobalVMShutdownScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GlobalVMShutdownScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVMShutdownScheduleObservation) DeepCopyInto(out *GlobalVMShutdownScheduleObservation) { + *out = *in + if in.DailyRecurrenceTime != nil { + in, out := &in.DailyRecurrenceTime, &out.DailyRecurrenceTime + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NotificationSettings != nil { + in, out := &in.NotificationSettings, &out.NotificationSettings + *out = new(NotificationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVMShutdownScheduleObservation. +func (in *GlobalVMShutdownScheduleObservation) DeepCopy() *GlobalVMShutdownScheduleObservation { + if in == nil { + return nil + } + out := new(GlobalVMShutdownScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVMShutdownScheduleParameters) DeepCopyInto(out *GlobalVMShutdownScheduleParameters) { + *out = *in + if in.DailyRecurrenceTime != nil { + in, out := &in.DailyRecurrenceTime, &out.DailyRecurrenceTime + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NotificationSettings != nil { + in, out := &in.NotificationSettings, &out.NotificationSettings + *out = new(NotificationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } + if in.VirtualMachineIDRef != nil { + in, out := &in.VirtualMachineIDRef, &out.VirtualMachineIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualMachineIDSelector != nil { + in, out := &in.VirtualMachineIDSelector, &out.VirtualMachineIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVMShutdownScheduleParameters. +func (in *GlobalVMShutdownScheduleParameters) DeepCopy() *GlobalVMShutdownScheduleParameters { + if in == nil { + return nil + } + out := new(GlobalVMShutdownScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVMShutdownScheduleSpec) DeepCopyInto(out *GlobalVMShutdownScheduleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVMShutdownScheduleSpec. +func (in *GlobalVMShutdownScheduleSpec) DeepCopy() *GlobalVMShutdownScheduleSpec { + if in == nil { + return nil + } + out := new(GlobalVMShutdownScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalVMShutdownScheduleStatus) DeepCopyInto(out *GlobalVMShutdownScheduleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalVMShutdownScheduleStatus. +func (in *GlobalVMShutdownScheduleStatus) DeepCopy() *GlobalVMShutdownScheduleStatus { + if in == nil { + return nil + } + out := new(GlobalVMShutdownScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyRecurrenceInitParameters) DeepCopyInto(out *HourlyRecurrenceInitParameters) { + *out = *in + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyRecurrenceInitParameters. +func (in *HourlyRecurrenceInitParameters) DeepCopy() *HourlyRecurrenceInitParameters { + if in == nil { + return nil + } + out := new(HourlyRecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyRecurrenceObservation) DeepCopyInto(out *HourlyRecurrenceObservation) { + *out = *in + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyRecurrenceObservation. +func (in *HourlyRecurrenceObservation) DeepCopy() *HourlyRecurrenceObservation { + if in == nil { + return nil + } + out := new(HourlyRecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyRecurrenceParameters) DeepCopyInto(out *HourlyRecurrenceParameters) { + *out = *in + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyRecurrenceParameters. +func (in *HourlyRecurrenceParameters) DeepCopy() *HourlyRecurrenceParameters { + if in == nil { + return nil + } + out := new(HourlyRecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundNATRuleInitParameters) DeepCopyInto(out *InboundNATRuleInitParameters) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundNATRuleInitParameters. +func (in *InboundNATRuleInitParameters) DeepCopy() *InboundNATRuleInitParameters { + if in == nil { + return nil + } + out := new(InboundNATRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundNATRuleObservation) DeepCopyInto(out *InboundNATRuleObservation) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.FrontendPort != nil { + in, out := &in.FrontendPort, &out.FrontendPort + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundNATRuleObservation. +func (in *InboundNATRuleObservation) DeepCopy() *InboundNATRuleObservation { + if in == nil { + return nil + } + out := new(InboundNATRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundNATRuleParameters) DeepCopyInto(out *InboundNATRuleParameters) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundNATRuleParameters. +func (in *InboundNATRuleParameters) DeepCopy() *InboundNATRuleParameters { + if in == nil { + return nil + } + out := new(InboundNATRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachine) DeepCopyInto(out *LinuxVirtualMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachine. +func (in *LinuxVirtualMachine) DeepCopy() *LinuxVirtualMachine { + if in == nil { + return nil + } + out := new(LinuxVirtualMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxVirtualMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineInitParameters) DeepCopyInto(out *LinuxVirtualMachineInitParameters) { + *out = *in + if in.AllowClaim != nil { + in, out := &in.AllowClaim, &out.AllowClaim + *out = new(bool) + **out = **in + } + if in.DisallowPublicIPAddress != nil { + in, out := &in.DisallowPublicIPAddress, &out.DisallowPublicIPAddress + *out = new(bool) + **out = **in + } + if in.GalleryImageReference != nil { + in, out := &in.GalleryImageReference, &out.GalleryImageReference + *out = new(GalleryImageReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InboundNATRule != nil { + in, out := &in.InboundNATRule, &out.InboundNATRule + *out = make([]InboundNATRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabNameRef != nil { + in, out := &in.LabNameRef, &out.LabNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabNameSelector != nil { + in, out := &in.LabNameSelector, &out.LabNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabSubnetName != nil { + in, out := &in.LabSubnetName, &out.LabSubnetName + *out = new(string) + **out = **in + } + if in.LabSubnetNameRef != nil { + in, out := &in.LabSubnetNameRef, &out.LabSubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabSubnetNameSelector != nil { + in, out := &in.LabSubnetNameSelector, &out.LabSubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabVirtualNetworkID != nil { + in, out := &in.LabVirtualNetworkID, &out.LabVirtualNetworkID + *out = new(string) + **out = **in + } + if in.LabVirtualNetworkIDRef != nil { + in, out := &in.LabVirtualNetworkIDRef, &out.LabVirtualNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabVirtualNetworkIDSelector != nil { + in, out := &in.LabVirtualNetworkIDSelector, &out.LabVirtualNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SSHKey != nil { + in, out := &in.SSHKey, &out.SSHKey + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineInitParameters. +func (in *LinuxVirtualMachineInitParameters) DeepCopy() *LinuxVirtualMachineInitParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineList) DeepCopyInto(out *LinuxVirtualMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinuxVirtualMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineList. +func (in *LinuxVirtualMachineList) DeepCopy() *LinuxVirtualMachineList { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxVirtualMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineObservation) DeepCopyInto(out *LinuxVirtualMachineObservation) { + *out = *in + if in.AllowClaim != nil { + in, out := &in.AllowClaim, &out.AllowClaim + *out = new(bool) + **out = **in + } + if in.DisallowPublicIPAddress != nil { + in, out := &in.DisallowPublicIPAddress, &out.DisallowPublicIPAddress + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GalleryImageReference != nil { + in, out := &in.GalleryImageReference, &out.GalleryImageReference + *out = new(GalleryImageReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InboundNATRule != nil { + in, out := &in.InboundNATRule, &out.InboundNATRule + *out = make([]InboundNATRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabSubnetName != nil { + in, out := &in.LabSubnetName, &out.LabSubnetName + *out = new(string) + **out = **in + } + if in.LabVirtualNetworkID != nil { + in, out := &in.LabVirtualNetworkID, &out.LabVirtualNetworkID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SSHKey != nil { + in, out := &in.SSHKey, &out.SSHKey + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UniqueIdentifier != nil { + in, out := &in.UniqueIdentifier, &out.UniqueIdentifier + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineObservation. +func (in *LinuxVirtualMachineObservation) DeepCopy() *LinuxVirtualMachineObservation { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineParameters) DeepCopyInto(out *LinuxVirtualMachineParameters) { + *out = *in + if in.AllowClaim != nil { + in, out := &in.AllowClaim, &out.AllowClaim + *out = new(bool) + **out = **in + } + if in.DisallowPublicIPAddress != nil { + in, out := &in.DisallowPublicIPAddress, &out.DisallowPublicIPAddress + *out = new(bool) + **out = **in + } + if in.GalleryImageReference != nil { + in, out := &in.GalleryImageReference, &out.GalleryImageReference + *out = new(GalleryImageReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.InboundNATRule != nil { + in, out := &in.InboundNATRule, &out.InboundNATRule + *out = make([]InboundNATRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabNameRef != nil { + in, out := &in.LabNameRef, &out.LabNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabNameSelector != nil { + in, out := &in.LabNameSelector, &out.LabNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabSubnetName != nil { + in, out := &in.LabSubnetName, &out.LabSubnetName + *out = new(string) + **out = **in + } + if in.LabSubnetNameRef != nil { + in, out := &in.LabSubnetNameRef, &out.LabSubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabSubnetNameSelector != nil { + in, out := &in.LabSubnetNameSelector, &out.LabSubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabVirtualNetworkID != nil { + in, out := &in.LabVirtualNetworkID, &out.LabVirtualNetworkID + *out = new(string) + **out = **in + } + if in.LabVirtualNetworkIDRef != nil { + in, out := &in.LabVirtualNetworkIDRef, &out.LabVirtualNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabVirtualNetworkIDSelector != nil { + in, out := &in.LabVirtualNetworkIDSelector, &out.LabVirtualNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SSHKey != nil { + in, out := &in.SSHKey, &out.SSHKey + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineParameters. +func (in *LinuxVirtualMachineParameters) DeepCopy() *LinuxVirtualMachineParameters { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineSpec) DeepCopyInto(out *LinuxVirtualMachineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineSpec. +func (in *LinuxVirtualMachineSpec) DeepCopy() *LinuxVirtualMachineSpec { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxVirtualMachineStatus) DeepCopyInto(out *LinuxVirtualMachineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxVirtualMachineStatus. +func (in *LinuxVirtualMachineStatus) DeepCopy() *LinuxVirtualMachineStatus { + if in == nil { + return nil + } + out := new(LinuxVirtualMachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationSettingsInitParameters) DeepCopyInto(out *NotificationSettingsInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.TimeInMinutes != nil { + in, out := &in.TimeInMinutes, &out.TimeInMinutes + *out = new(float64) + **out = **in + } + if in.WebhookURL != nil { + in, out := &in.WebhookURL, &out.WebhookURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationSettingsInitParameters. +func (in *NotificationSettingsInitParameters) DeepCopy() *NotificationSettingsInitParameters { + if in == nil { + return nil + } + out := new(NotificationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationSettingsObservation) DeepCopyInto(out *NotificationSettingsObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.TimeInMinutes != nil { + in, out := &in.TimeInMinutes, &out.TimeInMinutes + *out = new(float64) + **out = **in + } + if in.WebhookURL != nil { + in, out := &in.WebhookURL, &out.WebhookURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationSettingsObservation. +func (in *NotificationSettingsObservation) DeepCopy() *NotificationSettingsObservation { + if in == nil { + return nil + } + out := new(NotificationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationSettingsParameters) DeepCopyInto(out *NotificationSettingsParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.TimeInMinutes != nil { + in, out := &in.TimeInMinutes, &out.TimeInMinutes + *out = new(float64) + **out = **in + } + if in.WebhookURL != nil { + in, out := &in.WebhookURL, &out.WebhookURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationSettingsParameters. +func (in *NotificationSettingsParameters) DeepCopy() *NotificationSettingsParameters { + if in == nil { + return nil + } + out := new(NotificationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Schedule) DeepCopyInto(out *Schedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule. +func (in *Schedule) DeepCopy() *Schedule { + if in == nil { + return nil + } + out := new(Schedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Schedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.DailyRecurrence != nil { + in, out := &in.DailyRecurrence, &out.DailyRecurrence + *out = new(DailyRecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HourlyRecurrence != nil { + in, out := &in.HourlyRecurrence, &out.HourlyRecurrence + *out = new(HourlyRecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NotificationSettings != nil { + in, out := &in.NotificationSettings, &out.NotificationSettings + *out = new(ScheduleNotificationSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } + if in.TimeZoneID != nil { + in, out := &in.TimeZoneID, &out.TimeZoneID + *out = new(string) + **out = **in + } + if in.WeeklyRecurrence != nil { + in, out := &in.WeeklyRecurrence, &out.WeeklyRecurrence + *out = new(WeeklyRecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleList) DeepCopyInto(out *ScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Schedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleList. +func (in *ScheduleList) DeepCopy() *ScheduleList { + if in == nil { + return nil + } + out := new(ScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleNotificationSettingsInitParameters) DeepCopyInto(out *ScheduleNotificationSettingsInitParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TimeInMinutes != nil { + in, out := &in.TimeInMinutes, &out.TimeInMinutes + *out = new(float64) + **out = **in + } + if in.WebhookURL != nil { + in, out := &in.WebhookURL, &out.WebhookURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleNotificationSettingsInitParameters. +func (in *ScheduleNotificationSettingsInitParameters) DeepCopy() *ScheduleNotificationSettingsInitParameters { + if in == nil { + return nil + } + out := new(ScheduleNotificationSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleNotificationSettingsObservation) DeepCopyInto(out *ScheduleNotificationSettingsObservation) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TimeInMinutes != nil { + in, out := &in.TimeInMinutes, &out.TimeInMinutes + *out = new(float64) + **out = **in + } + if in.WebhookURL != nil { + in, out := &in.WebhookURL, &out.WebhookURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleNotificationSettingsObservation. +func (in *ScheduleNotificationSettingsObservation) DeepCopy() *ScheduleNotificationSettingsObservation { + if in == nil { + return nil + } + out := new(ScheduleNotificationSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleNotificationSettingsParameters) DeepCopyInto(out *ScheduleNotificationSettingsParameters) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TimeInMinutes != nil { + in, out := &in.TimeInMinutes, &out.TimeInMinutes + *out = new(float64) + **out = **in + } + if in.WebhookURL != nil { + in, out := &in.WebhookURL, &out.WebhookURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleNotificationSettingsParameters. +func (in *ScheduleNotificationSettingsParameters) DeepCopy() *ScheduleNotificationSettingsParameters { + if in == nil { + return nil + } + out := new(ScheduleNotificationSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.DailyRecurrence != nil { + in, out := &in.DailyRecurrence, &out.DailyRecurrence + *out = new(DailyRecurrenceObservation) + (*in).DeepCopyInto(*out) + } + if in.HourlyRecurrence != nil { + in, out := &in.HourlyRecurrence, &out.HourlyRecurrence + *out = new(HourlyRecurrenceObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NotificationSettings != nil { + in, out := &in.NotificationSettings, &out.NotificationSettings + *out = new(ScheduleNotificationSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } + if in.TimeZoneID != nil { + in, out := &in.TimeZoneID, &out.TimeZoneID + *out = new(string) + **out = **in + } + if in.WeeklyRecurrence != nil { + in, out := &in.WeeklyRecurrence, &out.WeeklyRecurrence + *out = new(WeeklyRecurrenceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.DailyRecurrence != nil { + in, out := &in.DailyRecurrence, &out.DailyRecurrence + *out = new(DailyRecurrenceParameters) + (*in).DeepCopyInto(*out) + } + if in.HourlyRecurrence != nil { + in, out := &in.HourlyRecurrence, &out.HourlyRecurrence + *out = new(HourlyRecurrenceParameters) + (*in).DeepCopyInto(*out) + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabNameRef != nil { + in, out := &in.LabNameRef, &out.LabNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabNameSelector != nil { + in, out := &in.LabNameSelector, &out.LabNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NotificationSettings != nil { + in, out := &in.NotificationSettings, &out.NotificationSettings + *out = new(ScheduleNotificationSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TaskType != nil { + in, out := &in.TaskType, &out.TaskType + *out = new(string) + **out = **in + } + if in.TimeZoneID != nil { + in, out := &in.TimeZoneID, &out.TimeZoneID + *out = new(string) + **out = **in + } + if in.WeeklyRecurrence != nil { + in, out := &in.WeeklyRecurrence, &out.WeeklyRecurrence + *out = new(WeeklyRecurrenceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleSpec) DeepCopyInto(out *ScheduleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleSpec. +func (in *ScheduleSpec) DeepCopy() *ScheduleSpec { + if in == nil { + return nil + } + out := new(ScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus. +func (in *ScheduleStatus) DeepCopy() *ScheduleStatus { + if in == nil { + return nil + } + out := new(ScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetInitParameters) DeepCopyInto(out *SubnetInitParameters) { + *out = *in + if in.UseInVirtualMachineCreation != nil { + in, out := &in.UseInVirtualMachineCreation, &out.UseInVirtualMachineCreation + *out = new(string) + **out = **in + } + if in.UsePublicIPAddress != nil { + in, out := &in.UsePublicIPAddress, &out.UsePublicIPAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetInitParameters. +func (in *SubnetInitParameters) DeepCopy() *SubnetInitParameters { + if in == nil { + return nil + } + out := new(SubnetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetObservation) DeepCopyInto(out *SubnetObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseInVirtualMachineCreation != nil { + in, out := &in.UseInVirtualMachineCreation, &out.UseInVirtualMachineCreation + *out = new(string) + **out = **in + } + if in.UsePublicIPAddress != nil { + in, out := &in.UsePublicIPAddress, &out.UsePublicIPAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetObservation. +func (in *SubnetObservation) DeepCopy() *SubnetObservation { + if in == nil { + return nil + } + out := new(SubnetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetParameters) DeepCopyInto(out *SubnetParameters) { + *out = *in + if in.UseInVirtualMachineCreation != nil { + in, out := &in.UseInVirtualMachineCreation, &out.UseInVirtualMachineCreation + *out = new(string) + **out = **in + } + if in.UsePublicIPAddress != nil { + in, out := &in.UsePublicIPAddress, &out.UsePublicIPAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParameters. +func (in *SubnetParameters) DeepCopy() *SubnetParameters { + if in == nil { + return nil + } + out := new(SubnetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetwork) DeepCopyInto(out *VirtualNetwork) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetwork. +func (in *VirtualNetwork) DeepCopy() *VirtualNetwork { + if in == nil { + return nil + } + out := new(VirtualNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNetwork) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkInitParameters) DeepCopyInto(out *VirtualNetworkInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabNameRef != nil { + in, out := &in.LabNameRef, &out.LabNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabNameSelector != nil { + in, out := &in.LabNameSelector, &out.LabNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(SubnetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkInitParameters. +func (in *VirtualNetworkInitParameters) DeepCopy() *VirtualNetworkInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkList) DeepCopyInto(out *VirtualNetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkList. +func (in *VirtualNetworkList) DeepCopy() *VirtualNetworkList { + if in == nil { + return nil + } + out := new(VirtualNetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkObservation) DeepCopyInto(out *VirtualNetworkObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(SubnetObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UniqueIdentifier != nil { + in, out := &in.UniqueIdentifier, &out.UniqueIdentifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkObservation. +func (in *VirtualNetworkObservation) DeepCopy() *VirtualNetworkObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkParameters) DeepCopyInto(out *VirtualNetworkParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabNameRef != nil { + in, out := &in.LabNameRef, &out.LabNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabNameSelector != nil { + in, out := &in.LabNameSelector, &out.LabNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(SubnetParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkParameters. +func (in *VirtualNetworkParameters) DeepCopy() *VirtualNetworkParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkSpec) DeepCopyInto(out *VirtualNetworkSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkSpec. +func (in *VirtualNetworkSpec) DeepCopy() *VirtualNetworkSpec { + if in == nil { + return nil + } + out := new(VirtualNetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkStatus) DeepCopyInto(out *VirtualNetworkStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkStatus. +func (in *VirtualNetworkStatus) DeepCopy() *VirtualNetworkStatus { + if in == nil { + return nil + } + out := new(VirtualNetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyRecurrenceInitParameters) DeepCopyInto(out *WeeklyRecurrenceInitParameters) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyRecurrenceInitParameters. +func (in *WeeklyRecurrenceInitParameters) DeepCopy() *WeeklyRecurrenceInitParameters { + if in == nil { + return nil + } + out := new(WeeklyRecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyRecurrenceObservation) DeepCopyInto(out *WeeklyRecurrenceObservation) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyRecurrenceObservation. +func (in *WeeklyRecurrenceObservation) DeepCopy() *WeeklyRecurrenceObservation { + if in == nil { + return nil + } + out := new(WeeklyRecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyRecurrenceParameters) DeepCopyInto(out *WeeklyRecurrenceParameters) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyRecurrenceParameters. +func (in *WeeklyRecurrenceParameters) DeepCopy() *WeeklyRecurrenceParameters { + if in == nil { + return nil + } + out := new(WeeklyRecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachine) DeepCopyInto(out *WindowsVirtualMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachine. +func (in *WindowsVirtualMachine) DeepCopy() *WindowsVirtualMachine { + if in == nil { + return nil + } + out := new(WindowsVirtualMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsVirtualMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineGalleryImageReferenceInitParameters) DeepCopyInto(out *WindowsVirtualMachineGalleryImageReferenceInitParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineGalleryImageReferenceInitParameters. +func (in *WindowsVirtualMachineGalleryImageReferenceInitParameters) DeepCopy() *WindowsVirtualMachineGalleryImageReferenceInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineGalleryImageReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineGalleryImageReferenceObservation) DeepCopyInto(out *WindowsVirtualMachineGalleryImageReferenceObservation) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineGalleryImageReferenceObservation. +func (in *WindowsVirtualMachineGalleryImageReferenceObservation) DeepCopy() *WindowsVirtualMachineGalleryImageReferenceObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineGalleryImageReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineGalleryImageReferenceParameters) DeepCopyInto(out *WindowsVirtualMachineGalleryImageReferenceParameters) { + *out = *in + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineGalleryImageReferenceParameters. +func (in *WindowsVirtualMachineGalleryImageReferenceParameters) DeepCopy() *WindowsVirtualMachineGalleryImageReferenceParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineGalleryImageReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineInboundNATRuleInitParameters) DeepCopyInto(out *WindowsVirtualMachineInboundNATRuleInitParameters) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineInboundNATRuleInitParameters. +func (in *WindowsVirtualMachineInboundNATRuleInitParameters) DeepCopy() *WindowsVirtualMachineInboundNATRuleInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineInboundNATRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineInboundNATRuleObservation) DeepCopyInto(out *WindowsVirtualMachineInboundNATRuleObservation) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.FrontendPort != nil { + in, out := &in.FrontendPort, &out.FrontendPort + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineInboundNATRuleObservation. +func (in *WindowsVirtualMachineInboundNATRuleObservation) DeepCopy() *WindowsVirtualMachineInboundNATRuleObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineInboundNATRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineInboundNATRuleParameters) DeepCopyInto(out *WindowsVirtualMachineInboundNATRuleParameters) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineInboundNATRuleParameters. +func (in *WindowsVirtualMachineInboundNATRuleParameters) DeepCopy() *WindowsVirtualMachineInboundNATRuleParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineInboundNATRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineInitParameters) DeepCopyInto(out *WindowsVirtualMachineInitParameters) { + *out = *in + if in.AllowClaim != nil { + in, out := &in.AllowClaim, &out.AllowClaim + *out = new(bool) + **out = **in + } + if in.DisallowPublicIPAddress != nil { + in, out := &in.DisallowPublicIPAddress, &out.DisallowPublicIPAddress + *out = new(bool) + **out = **in + } + if in.GalleryImageReference != nil { + in, out := &in.GalleryImageReference, &out.GalleryImageReference + *out = new(WindowsVirtualMachineGalleryImageReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InboundNATRule != nil { + in, out := &in.InboundNATRule, &out.InboundNATRule + *out = make([]WindowsVirtualMachineInboundNATRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabNameRef != nil { + in, out := &in.LabNameRef, &out.LabNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabNameSelector != nil { + in, out := &in.LabNameSelector, &out.LabNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabSubnetName != nil { + in, out := &in.LabSubnetName, &out.LabSubnetName + *out = new(string) + **out = **in + } + if in.LabSubnetNameRef != nil { + in, out := &in.LabSubnetNameRef, &out.LabSubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabSubnetNameSelector != nil { + in, out := &in.LabSubnetNameSelector, &out.LabSubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabVirtualNetworkID != nil { + in, out := &in.LabVirtualNetworkID, &out.LabVirtualNetworkID + *out = new(string) + **out = **in + } + if in.LabVirtualNetworkIDRef != nil { + in, out := &in.LabVirtualNetworkIDRef, &out.LabVirtualNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabVirtualNetworkIDSelector != nil { + in, out := &in.LabVirtualNetworkIDSelector, &out.LabVirtualNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineInitParameters. +func (in *WindowsVirtualMachineInitParameters) DeepCopy() *WindowsVirtualMachineInitParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineList) DeepCopyInto(out *WindowsVirtualMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WindowsVirtualMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineList. +func (in *WindowsVirtualMachineList) DeepCopy() *WindowsVirtualMachineList { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsVirtualMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineObservation) DeepCopyInto(out *WindowsVirtualMachineObservation) { + *out = *in + if in.AllowClaim != nil { + in, out := &in.AllowClaim, &out.AllowClaim + *out = new(bool) + **out = **in + } + if in.DisallowPublicIPAddress != nil { + in, out := &in.DisallowPublicIPAddress, &out.DisallowPublicIPAddress + *out = new(bool) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.GalleryImageReference != nil { + in, out := &in.GalleryImageReference, &out.GalleryImageReference + *out = new(WindowsVirtualMachineGalleryImageReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InboundNATRule != nil { + in, out := &in.InboundNATRule, &out.InboundNATRule + *out = make([]WindowsVirtualMachineInboundNATRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabSubnetName != nil { + in, out := &in.LabSubnetName, &out.LabSubnetName + *out = new(string) + **out = **in + } + if in.LabVirtualNetworkID != nil { + in, out := &in.LabVirtualNetworkID, &out.LabVirtualNetworkID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UniqueIdentifier != nil { + in, out := &in.UniqueIdentifier, &out.UniqueIdentifier + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineObservation. +func (in *WindowsVirtualMachineObservation) DeepCopy() *WindowsVirtualMachineObservation { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineParameters) DeepCopyInto(out *WindowsVirtualMachineParameters) { + *out = *in + if in.AllowClaim != nil { + in, out := &in.AllowClaim, &out.AllowClaim + *out = new(bool) + **out = **in + } + if in.DisallowPublicIPAddress != nil { + in, out := &in.DisallowPublicIPAddress, &out.DisallowPublicIPAddress + *out = new(bool) + **out = **in + } + if in.GalleryImageReference != nil { + in, out := &in.GalleryImageReference, &out.GalleryImageReference + *out = new(WindowsVirtualMachineGalleryImageReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.InboundNATRule != nil { + in, out := &in.InboundNATRule, &out.InboundNATRule + *out = make([]WindowsVirtualMachineInboundNATRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LabName != nil { + in, out := &in.LabName, &out.LabName + *out = new(string) + **out = **in + } + if in.LabNameRef != nil { + in, out := &in.LabNameRef, &out.LabNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabNameSelector != nil { + in, out := &in.LabNameSelector, &out.LabNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabSubnetName != nil { + in, out := &in.LabSubnetName, &out.LabSubnetName + *out = new(string) + **out = **in + } + if in.LabSubnetNameRef != nil { + in, out := &in.LabSubnetNameRef, &out.LabSubnetNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabSubnetNameSelector != nil { + in, out := &in.LabSubnetNameSelector, &out.LabSubnetNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.LabVirtualNetworkID != nil { + in, out := &in.LabVirtualNetworkID, &out.LabVirtualNetworkID + *out = new(string) + **out = **in + } + if in.LabVirtualNetworkIDRef != nil { + in, out := &in.LabVirtualNetworkIDRef, &out.LabVirtualNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LabVirtualNetworkIDSelector != nil { + in, out := &in.LabVirtualNetworkIDSelector, &out.LabVirtualNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.StorageType != nil { + in, out := &in.StorageType, &out.StorageType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineParameters. +func (in *WindowsVirtualMachineParameters) DeepCopy() *WindowsVirtualMachineParameters { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineSpec) DeepCopyInto(out *WindowsVirtualMachineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineSpec. +func (in *WindowsVirtualMachineSpec) DeepCopy() *WindowsVirtualMachineSpec { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsVirtualMachineStatus) DeepCopyInto(out *WindowsVirtualMachineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsVirtualMachineStatus. +func (in *WindowsVirtualMachineStatus) DeepCopy() *WindowsVirtualMachineStatus { + if in == nil { + return nil + } + out := new(WindowsVirtualMachineStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/devtestlab/v1beta2/zz_generated.managed.go b/apis/devtestlab/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..f9414f878 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this GlobalVMShutdownSchedule. +func (mg *GlobalVMShutdownSchedule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Schedule. +func (mg *Schedule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Schedule. +func (mg *Schedule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Schedule. +func (mg *Schedule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Schedule. +func (mg *Schedule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Schedule. +func (mg *Schedule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Schedule. +func (mg *Schedule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Schedule. +func (mg *Schedule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Schedule. +func (mg *Schedule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Schedule. +func (mg *Schedule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Schedule. +func (mg *Schedule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Schedule. +func (mg *Schedule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Schedule. +func (mg *Schedule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualNetwork. +func (mg *VirtualNetwork) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualNetwork. +func (mg *VirtualNetwork) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualNetwork. +func (mg *VirtualNetwork) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualNetwork. +func (mg *VirtualNetwork) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualNetwork. +func (mg *VirtualNetwork) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualNetwork. +func (mg *VirtualNetwork) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualNetwork. +func (mg *VirtualNetwork) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualNetwork. +func (mg *VirtualNetwork) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualNetwork. +func (mg *VirtualNetwork) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualNetwork. +func (mg *VirtualNetwork) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualNetwork. +func (mg *VirtualNetwork) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualNetwork. +func (mg *VirtualNetwork) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/devtestlab/v1beta2/zz_generated.managedlist.go b/apis/devtestlab/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..eaedb624c --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this GlobalVMShutdownScheduleList. +func (l *GlobalVMShutdownScheduleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinuxVirtualMachineList. +func (l *LinuxVirtualMachineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ScheduleList. +func (l *ScheduleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualNetworkList. +func (l *VirtualNetworkList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WindowsVirtualMachineList. +func (l *WindowsVirtualMachineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/devtestlab/v1beta2/zz_generated.resolvers.go b/apis/devtestlab/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..9bcd614c9 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,534 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *GlobalVMShutdownSchedule) ResolveReferences( // ResolveReferences of this GlobalVMShutdownSchedule. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualMachineID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualMachineIDRef, + Selector: mg.Spec.ForProvider.VirtualMachineIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualMachineID") + } + mg.Spec.ForProvider.VirtualMachineID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualMachineIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualMachineID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualMachineIDRef, + Selector: mg.Spec.InitProvider.VirtualMachineIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualMachineID") + } + mg.Spec.InitProvider.VirtualMachineID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualMachineIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinuxVirtualMachine. +func (mg *LinuxVirtualMachine) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta1", "Lab", "LabList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LabName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LabNameRef, + Selector: mg.Spec.ForProvider.LabNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LabName") + } + mg.Spec.ForProvider.LabName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LabNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LabSubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LabSubnetNameRef, + Selector: mg.Spec.ForProvider.LabSubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LabSubnetName") + } + mg.Spec.ForProvider.LabSubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LabSubnetNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LabVirtualNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.LabVirtualNetworkIDRef, + Selector: mg.Spec.ForProvider.LabVirtualNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LabVirtualNetworkID") + } + mg.Spec.ForProvider.LabVirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LabVirtualNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta1", "Lab", "LabList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LabName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LabNameRef, + Selector: mg.Spec.InitProvider.LabNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LabName") + } + mg.Spec.InitProvider.LabName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LabNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LabSubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LabSubnetNameRef, + Selector: mg.Spec.InitProvider.LabSubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LabSubnetName") + } + mg.Spec.InitProvider.LabSubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LabSubnetNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LabVirtualNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.LabVirtualNetworkIDRef, + Selector: mg.Spec.InitProvider.LabVirtualNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LabVirtualNetworkID") + } + mg.Spec.InitProvider.LabVirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LabVirtualNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Schedule. +func (mg *Schedule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta1", "Lab", "LabList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LabName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LabNameRef, + Selector: mg.Spec.ForProvider.LabNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LabName") + } + mg.Spec.ForProvider.LabName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LabNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VirtualNetwork. +func (mg *VirtualNetwork) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta1", "Lab", "LabList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LabName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LabNameRef, + Selector: mg.Spec.ForProvider.LabNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LabName") + } + mg.Spec.ForProvider.LabName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LabNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta1", "Lab", "LabList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LabName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LabNameRef, + Selector: mg.Spec.InitProvider.LabNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LabName") + } + mg.Spec.InitProvider.LabName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LabNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WindowsVirtualMachine. +func (mg *WindowsVirtualMachine) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta1", "Lab", "LabList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LabName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LabNameRef, + Selector: mg.Spec.ForProvider.LabNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LabName") + } + mg.Spec.ForProvider.LabName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LabNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LabSubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LabSubnetNameRef, + Selector: mg.Spec.ForProvider.LabSubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LabSubnetName") + } + mg.Spec.ForProvider.LabSubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LabSubnetNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LabVirtualNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.LabVirtualNetworkIDRef, + Selector: mg.Spec.ForProvider.LabVirtualNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LabVirtualNetworkID") + } + mg.Spec.ForProvider.LabVirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LabVirtualNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta1", "Lab", "LabList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LabName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LabNameRef, + Selector: mg.Spec.InitProvider.LabNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LabName") + } + mg.Spec.InitProvider.LabName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LabNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LabSubnetName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LabSubnetNameRef, + Selector: mg.Spec.InitProvider.LabSubnetNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LabSubnetName") + } + mg.Spec.InitProvider.LabSubnetName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LabSubnetNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devtestlab.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LabVirtualNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.LabVirtualNetworkIDRef, + Selector: mg.Spec.InitProvider.LabVirtualNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LabVirtualNetworkID") + } + mg.Spec.InitProvider.LabVirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LabVirtualNetworkIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/devtestlab/v1beta2/zz_globalvmshutdownschedule_terraformed.go b/apis/devtestlab/v1beta2/zz_globalvmshutdownschedule_terraformed.go new file mode 100755 index 000000000..d0890aa2f --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_globalvmshutdownschedule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this GlobalVMShutdownSchedule +func (mg *GlobalVMShutdownSchedule) GetTerraformResourceType() string { + return "azurerm_dev_test_global_vm_shutdown_schedule" +} + +// GetConnectionDetailsMapping for this GlobalVMShutdownSchedule +func (tr *GlobalVMShutdownSchedule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this GlobalVMShutdownSchedule +func (tr *GlobalVMShutdownSchedule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this GlobalVMShutdownSchedule +func (tr *GlobalVMShutdownSchedule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this GlobalVMShutdownSchedule +func (tr *GlobalVMShutdownSchedule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this GlobalVMShutdownSchedule +func (tr *GlobalVMShutdownSchedule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this GlobalVMShutdownSchedule +func (tr *GlobalVMShutdownSchedule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this GlobalVMShutdownSchedule +func (tr *GlobalVMShutdownSchedule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this GlobalVMShutdownSchedule +func (tr *GlobalVMShutdownSchedule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this GlobalVMShutdownSchedule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *GlobalVMShutdownSchedule) LateInitialize(attrs []byte) (bool, error) { + params := &GlobalVMShutdownScheduleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *GlobalVMShutdownSchedule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/devtestlab/v1beta2/zz_globalvmshutdownschedule_types.go b/apis/devtestlab/v1beta2/zz_globalvmshutdownschedule_types.go new file mode 100755 index 000000000..1f9e04cbe --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_globalvmshutdownschedule_types.go @@ -0,0 +1,232 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GlobalVMShutdownScheduleInitParameters struct { + + // The time each day when the schedule takes effect. Must match the format HHmm where HH is 00-23 and mm is 00-59 (e.g. 0930, 2300, etc.) + DailyRecurrenceTime *string `json:"dailyRecurrenceTime,omitempty" tf:"daily_recurrence_time,omitempty"` + + // Whether to enable the schedule. Possible values are true and false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The location where the schedule is created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The notification setting of a schedule. A notification_settings block as defined below. + NotificationSettings *NotificationSettingsInitParameters `json:"notificationSettings,omitempty" tf:"notification_settings,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The time zone ID (e.g. Pacific Standard time). Refer to this guide for a full list of accepted time zone names. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The resource ID of the target ARM-based Virtual Machine. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` + + // Reference to a LinuxVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDRef *v1.Reference `json:"virtualMachineIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDSelector *v1.Selector `json:"virtualMachineIdSelector,omitempty" tf:"-"` +} + +type GlobalVMShutdownScheduleObservation struct { + + // The time each day when the schedule takes effect. Must match the format HHmm where HH is 00-23 and mm is 00-59 (e.g. 0930, 2300, etc.) + DailyRecurrenceTime *string `json:"dailyRecurrenceTime,omitempty" tf:"daily_recurrence_time,omitempty"` + + // Whether to enable the schedule. Possible values are true and false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Dev Test Global Schedule ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The location where the schedule is created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The notification setting of a schedule. A notification_settings block as defined below. + NotificationSettings *NotificationSettingsObservation `json:"notificationSettings,omitempty" tf:"notification_settings,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The time zone ID (e.g. Pacific Standard time). Refer to this guide for a full list of accepted time zone names. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The resource ID of the target ARM-based Virtual Machine. Changing this forces a new resource to be created. + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` +} + +type GlobalVMShutdownScheduleParameters struct { + + // The time each day when the schedule takes effect. Must match the format HHmm where HH is 00-23 and mm is 00-59 (e.g. 0930, 2300, etc.) + // +kubebuilder:validation:Optional + DailyRecurrenceTime *string `json:"dailyRecurrenceTime,omitempty" tf:"daily_recurrence_time,omitempty"` + + // Whether to enable the schedule. Possible values are true and false. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The location where the schedule is created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The notification setting of a schedule. A notification_settings block as defined below. + // +kubebuilder:validation:Optional + NotificationSettings *NotificationSettingsParameters `json:"notificationSettings,omitempty" tf:"notification_settings,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The time zone ID (e.g. Pacific Standard time). Refer to this guide for a full list of accepted time zone names. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` + + // The resource ID of the target ARM-based Virtual Machine. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` + + // Reference to a LinuxVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDRef *v1.Reference `json:"virtualMachineIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDSelector *v1.Selector `json:"virtualMachineIdSelector,omitempty" tf:"-"` +} + +type NotificationSettingsInitParameters struct { + + // E-mail address to which the notification will be sent. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // Whether to enable pre-shutdown notifications. Possible values are true and false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Time in minutes between 15 and 120 before a shutdown event at which a notification will be sent. Defaults to 30. + TimeInMinutes *float64 `json:"timeInMinutes,omitempty" tf:"time_in_minutes,omitempty"` + + // The webhook URL to which the notification will be sent. + WebhookURL *string `json:"webhookUrl,omitempty" tf:"webhook_url,omitempty"` +} + +type NotificationSettingsObservation struct { + + // E-mail address to which the notification will be sent. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // Whether to enable pre-shutdown notifications. Possible values are true and false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Time in minutes between 15 and 120 before a shutdown event at which a notification will be sent. Defaults to 30. + TimeInMinutes *float64 `json:"timeInMinutes,omitempty" tf:"time_in_minutes,omitempty"` + + // The webhook URL to which the notification will be sent. + WebhookURL *string `json:"webhookUrl,omitempty" tf:"webhook_url,omitempty"` +} + +type NotificationSettingsParameters struct { + + // E-mail address to which the notification will be sent. + // +kubebuilder:validation:Optional + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // Whether to enable pre-shutdown notifications. Possible values are true and false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Time in minutes between 15 and 120 before a shutdown event at which a notification will be sent. Defaults to 30. + // +kubebuilder:validation:Optional + TimeInMinutes *float64 `json:"timeInMinutes,omitempty" tf:"time_in_minutes,omitempty"` + + // The webhook URL to which the notification will be sent. + // +kubebuilder:validation:Optional + WebhookURL *string `json:"webhookUrl,omitempty" tf:"webhook_url,omitempty"` +} + +// GlobalVMShutdownScheduleSpec defines the desired state of GlobalVMShutdownSchedule +type GlobalVMShutdownScheduleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider GlobalVMShutdownScheduleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider GlobalVMShutdownScheduleInitParameters `json:"initProvider,omitempty"` +} + +// GlobalVMShutdownScheduleStatus defines the observed state of GlobalVMShutdownSchedule. +type GlobalVMShutdownScheduleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider GlobalVMShutdownScheduleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// GlobalVMShutdownSchedule is the Schema for the GlobalVMShutdownSchedules API. Manages automated shutdown schedules for Azure Resource Manager VMs outside of Dev Test Labs. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type GlobalVMShutdownSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dailyRecurrenceTime) || (has(self.initProvider) && has(self.initProvider.dailyRecurrenceTime))",message="spec.forProvider.dailyRecurrenceTime is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.notificationSettings) || (has(self.initProvider) && has(self.initProvider.notificationSettings))",message="spec.forProvider.notificationSettings is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timezone) || (has(self.initProvider) && has(self.initProvider.timezone))",message="spec.forProvider.timezone is a required parameter" + Spec GlobalVMShutdownScheduleSpec `json:"spec"` + Status GlobalVMShutdownScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// GlobalVMShutdownScheduleList contains a list of GlobalVMShutdownSchedules +type GlobalVMShutdownScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GlobalVMShutdownSchedule `json:"items"` +} + +// Repository type metadata. +var ( + GlobalVMShutdownSchedule_Kind = "GlobalVMShutdownSchedule" + GlobalVMShutdownSchedule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: GlobalVMShutdownSchedule_Kind}.String() + GlobalVMShutdownSchedule_KindAPIVersion = GlobalVMShutdownSchedule_Kind + "." + CRDGroupVersion.String() + GlobalVMShutdownSchedule_GroupVersionKind = CRDGroupVersion.WithKind(GlobalVMShutdownSchedule_Kind) +) + +func init() { + SchemeBuilder.Register(&GlobalVMShutdownSchedule{}, &GlobalVMShutdownScheduleList{}) +} diff --git a/apis/devtestlab/v1beta2/zz_groupversion_info.go b/apis/devtestlab/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..ca67bb122 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=devtestlab.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "devtestlab.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/devtestlab/v1beta2/zz_linuxvirtualmachine_terraformed.go b/apis/devtestlab/v1beta2/zz_linuxvirtualmachine_terraformed.go new file mode 100755 index 000000000..522aa05e0 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_linuxvirtualmachine_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinuxVirtualMachine +func (mg *LinuxVirtualMachine) GetTerraformResourceType() string { + return "azurerm_dev_test_linux_virtual_machine" +} + +// GetConnectionDetailsMapping for this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "spec.forProvider.passwordSecretRef"} +} + +// GetObservation of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinuxVirtualMachine +func (tr *LinuxVirtualMachine) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinuxVirtualMachine using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinuxVirtualMachine) LateInitialize(attrs []byte) (bool, error) { + params := &LinuxVirtualMachineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinuxVirtualMachine) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/devtestlab/v1beta2/zz_linuxvirtualmachine_types.go b/apis/devtestlab/v1beta2/zz_linuxvirtualmachine_types.go new file mode 100755 index 000000000..a36d42bba --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_linuxvirtualmachine_types.go @@ -0,0 +1,420 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type GalleryImageReferenceInitParameters struct { + + // The Offer of the Gallery Image. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The Publisher of the Gallery Image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The SKU of the Gallery Image. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The Version of the Gallery Image. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type GalleryImageReferenceObservation struct { + + // The Offer of the Gallery Image. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The Publisher of the Gallery Image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The SKU of the Gallery Image. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The Version of the Gallery Image. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type GalleryImageReferenceParameters struct { + + // The Offer of the Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer" tf:"offer,omitempty"` + + // The Publisher of the Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // The SKU of the Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Sku *string `json:"sku" tf:"sku,omitempty"` + + // The Version of the Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type InboundNATRuleInitParameters struct { + + // The Backend Port associated with this NAT Rule. Changing this forces a new resource to be created. + BackendPort *float64 `json:"backendPort,omitempty" tf:"backend_port,omitempty"` + + // The Protocol used for this NAT Rule. Possible values are Tcp and Udp. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type InboundNATRuleObservation struct { + + // The Backend Port associated with this NAT Rule. Changing this forces a new resource to be created. + BackendPort *float64 `json:"backendPort,omitempty" tf:"backend_port,omitempty"` + + // The frontend port associated with this Inbound NAT Rule. + FrontendPort *float64 `json:"frontendPort,omitempty" tf:"frontend_port,omitempty"` + + // The Protocol used for this NAT Rule. Possible values are Tcp and Udp. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type InboundNATRuleParameters struct { + + // The Backend Port associated with this NAT Rule. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + BackendPort *float64 `json:"backendPort" tf:"backend_port,omitempty"` + + // The Protocol used for this NAT Rule. Possible values are Tcp and Udp. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +type LinuxVirtualMachineInitParameters struct { + + // Can this Virtual Machine be claimed by users? Defaults to true. + AllowClaim *bool `json:"allowClaim,omitempty" tf:"allow_claim,omitempty"` + + // Should the Virtual Machine be created without a Public IP Address? Changing this forces a new resource to be created. + DisallowPublicIPAddress *bool `json:"disallowPublicIpAddress,omitempty" tf:"disallow_public_ip_address,omitempty"` + + // A gallery_image_reference block as defined below. + GalleryImageReference *GalleryImageReferenceInitParameters `json:"galleryImageReference,omitempty" tf:"gallery_image_reference,omitempty"` + + // One or more inbound_nat_rule blocks as defined below. Changing this forces a new resource to be created. + InboundNATRule []InboundNATRuleInitParameters `json:"inboundNatRule,omitempty" tf:"inbound_nat_rule,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Machine should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta1.Lab + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // Reference to a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameRef *v1.Reference `json:"labNameRef,omitempty" tf:"-"` + + // Selector for a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameSelector *v1.Selector `json:"labNameSelector,omitempty" tf:"-"` + + // The name of a Subnet within the Dev Test Virtual Network where this machine should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + LabSubnetName *string `json:"labSubnetName,omitempty" tf:"lab_subnet_name,omitempty"` + + // Reference to a Subnet in network to populate labSubnetName. + // +kubebuilder:validation:Optional + LabSubnetNameRef *v1.Reference `json:"labSubnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate labSubnetName. + // +kubebuilder:validation:Optional + LabSubnetNameSelector *v1.Selector `json:"labSubnetNameSelector,omitempty" tf:"-"` + + // The ID of the Dev Test Virtual Network where this Virtual Machine should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta2.VirtualNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + LabVirtualNetworkID *string `json:"labVirtualNetworkId,omitempty" tf:"lab_virtual_network_id,omitempty"` + + // Reference to a VirtualNetwork in devtestlab to populate labVirtualNetworkId. + // +kubebuilder:validation:Optional + LabVirtualNetworkIDRef *v1.Reference `json:"labVirtualNetworkIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetwork in devtestlab to populate labVirtualNetworkId. + // +kubebuilder:validation:Optional + LabVirtualNetworkIDSelector *v1.Selector `json:"labVirtualNetworkIdSelector,omitempty" tf:"-"` + + // Specifies the supported Azure location where the Dev Test Lab exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Dev Test Machine. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Any notes about the Virtual Machine. + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The SSH Key associated with the username used to login to this Virtual Machine. Changing this forces a new resource to be created. + SSHKey *string `json:"sshKey,omitempty" tf:"ssh_key,omitempty"` + + // The Machine Size to use for this Virtual Machine, such as Standard_F2. Changing this forces a new resource to be created. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The type of Storage to use on this Virtual Machine. Possible values are Standard and Premium. Changing this forces a new resource to be created. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Username associated with the local administrator on this Virtual Machine. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type LinuxVirtualMachineObservation struct { + + // Can this Virtual Machine be claimed by users? Defaults to true. + AllowClaim *bool `json:"allowClaim,omitempty" tf:"allow_claim,omitempty"` + + // Should the Virtual Machine be created without a Public IP Address? Changing this forces a new resource to be created. + DisallowPublicIPAddress *bool `json:"disallowPublicIpAddress,omitempty" tf:"disallow_public_ip_address,omitempty"` + + // The FQDN of the Virtual Machine. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // A gallery_image_reference block as defined below. + GalleryImageReference *GalleryImageReferenceObservation `json:"galleryImageReference,omitempty" tf:"gallery_image_reference,omitempty"` + + // The ID of the Virtual Machine. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more inbound_nat_rule blocks as defined below. Changing this forces a new resource to be created. + InboundNATRule []InboundNATRuleObservation `json:"inboundNatRule,omitempty" tf:"inbound_nat_rule,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Machine should be created. Changing this forces a new resource to be created. + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // The name of a Subnet within the Dev Test Virtual Network where this machine should exist. Changing this forces a new resource to be created. + LabSubnetName *string `json:"labSubnetName,omitempty" tf:"lab_subnet_name,omitempty"` + + // The ID of the Dev Test Virtual Network where this Virtual Machine should be created. Changing this forces a new resource to be created. + LabVirtualNetworkID *string `json:"labVirtualNetworkId,omitempty" tf:"lab_virtual_network_id,omitempty"` + + // Specifies the supported Azure location where the Dev Test Lab exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Dev Test Machine. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Any notes about the Virtual Machine. + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The SSH Key associated with the username used to login to this Virtual Machine. Changing this forces a new resource to be created. + SSHKey *string `json:"sshKey,omitempty" tf:"ssh_key,omitempty"` + + // The Machine Size to use for this Virtual Machine, such as Standard_F2. Changing this forces a new resource to be created. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The type of Storage to use on this Virtual Machine. Possible values are Standard and Premium. Changing this forces a new resource to be created. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The unique immutable identifier of the Virtual Machine. + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty" tf:"unique_identifier,omitempty"` + + // The Username associated with the local administrator on this Virtual Machine. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type LinuxVirtualMachineParameters struct { + + // Can this Virtual Machine be claimed by users? Defaults to true. + // +kubebuilder:validation:Optional + AllowClaim *bool `json:"allowClaim,omitempty" tf:"allow_claim,omitempty"` + + // Should the Virtual Machine be created without a Public IP Address? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DisallowPublicIPAddress *bool `json:"disallowPublicIpAddress,omitempty" tf:"disallow_public_ip_address,omitempty"` + + // A gallery_image_reference block as defined below. + // +kubebuilder:validation:Optional + GalleryImageReference *GalleryImageReferenceParameters `json:"galleryImageReference,omitempty" tf:"gallery_image_reference,omitempty"` + + // One or more inbound_nat_rule blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InboundNATRule []InboundNATRuleParameters `json:"inboundNatRule,omitempty" tf:"inbound_nat_rule,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Machine should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta1.Lab + // +kubebuilder:validation:Optional + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // Reference to a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameRef *v1.Reference `json:"labNameRef,omitempty" tf:"-"` + + // Selector for a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameSelector *v1.Selector `json:"labNameSelector,omitempty" tf:"-"` + + // The name of a Subnet within the Dev Test Virtual Network where this machine should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +kubebuilder:validation:Optional + LabSubnetName *string `json:"labSubnetName,omitempty" tf:"lab_subnet_name,omitempty"` + + // Reference to a Subnet in network to populate labSubnetName. + // +kubebuilder:validation:Optional + LabSubnetNameRef *v1.Reference `json:"labSubnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate labSubnetName. + // +kubebuilder:validation:Optional + LabSubnetNameSelector *v1.Selector `json:"labSubnetNameSelector,omitempty" tf:"-"` + + // The ID of the Dev Test Virtual Network where this Virtual Machine should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta2.VirtualNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + LabVirtualNetworkID *string `json:"labVirtualNetworkId,omitempty" tf:"lab_virtual_network_id,omitempty"` + + // Reference to a VirtualNetwork in devtestlab to populate labVirtualNetworkId. + // +kubebuilder:validation:Optional + LabVirtualNetworkIDRef *v1.Reference `json:"labVirtualNetworkIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetwork in devtestlab to populate labVirtualNetworkId. + // +kubebuilder:validation:Optional + LabVirtualNetworkIDSelector *v1.Selector `json:"labVirtualNetworkIdSelector,omitempty" tf:"-"` + + // Specifies the supported Azure location where the Dev Test Lab exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Dev Test Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Any notes about the Virtual Machine. + // +kubebuilder:validation:Optional + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // The Password associated with the username used to login to this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The SSH Key associated with the username used to login to this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SSHKey *string `json:"sshKey,omitempty" tf:"ssh_key,omitempty"` + + // The Machine Size to use for this Virtual Machine, such as Standard_F2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The type of Storage to use on this Virtual Machine. Possible values are Standard and Premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Username associated with the local administrator on this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +// LinuxVirtualMachineSpec defines the desired state of LinuxVirtualMachine +type LinuxVirtualMachineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinuxVirtualMachineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinuxVirtualMachineInitParameters `json:"initProvider,omitempty"` +} + +// LinuxVirtualMachineStatus defines the observed state of LinuxVirtualMachine. +type LinuxVirtualMachineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinuxVirtualMachineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinuxVirtualMachine is the Schema for the LinuxVirtualMachines API. Manages a Linux Virtual Machine within a Dev Test Lab. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinuxVirtualMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.galleryImageReference) || (has(self.initProvider) && has(self.initProvider.galleryImageReference))",message="spec.forProvider.galleryImageReference is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.size) || (has(self.initProvider) && has(self.initProvider.size))",message="spec.forProvider.size is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageType) || (has(self.initProvider) && has(self.initProvider.storageType))",message="spec.forProvider.storageType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.username) || (has(self.initProvider) && has(self.initProvider.username))",message="spec.forProvider.username is a required parameter" + Spec LinuxVirtualMachineSpec `json:"spec"` + Status LinuxVirtualMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinuxVirtualMachineList contains a list of LinuxVirtualMachines +type LinuxVirtualMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinuxVirtualMachine `json:"items"` +} + +// Repository type metadata. +var ( + LinuxVirtualMachine_Kind = "LinuxVirtualMachine" + LinuxVirtualMachine_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinuxVirtualMachine_Kind}.String() + LinuxVirtualMachine_KindAPIVersion = LinuxVirtualMachine_Kind + "." + CRDGroupVersion.String() + LinuxVirtualMachine_GroupVersionKind = CRDGroupVersion.WithKind(LinuxVirtualMachine_Kind) +) + +func init() { + SchemeBuilder.Register(&LinuxVirtualMachine{}, &LinuxVirtualMachineList{}) +} diff --git a/apis/devtestlab/v1beta2/zz_schedule_terraformed.go b/apis/devtestlab/v1beta2/zz_schedule_terraformed.go new file mode 100755 index 000000000..50a6aedd6 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_schedule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Schedule +func (mg *Schedule) GetTerraformResourceType() string { + return "azurerm_dev_test_schedule" +} + +// GetConnectionDetailsMapping for this Schedule +func (tr *Schedule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Schedule +func (tr *Schedule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Schedule +func (tr *Schedule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Schedule +func (tr *Schedule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Schedule +func (tr *Schedule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Schedule +func (tr *Schedule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Schedule +func (tr *Schedule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Schedule +func (tr *Schedule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Schedule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Schedule) LateInitialize(attrs []byte) (bool, error) { + params := &ScheduleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Schedule) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/devtestlab/v1beta2/zz_schedule_types.go b/apis/devtestlab/v1beta2/zz_schedule_types.go new file mode 100755 index 000000000..2dca2dd89 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_schedule_types.go @@ -0,0 +1,321 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DailyRecurrenceInitParameters struct { + + // The time each day when the schedule takes effect. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type DailyRecurrenceObservation struct { + + // The time each day when the schedule takes effect. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type DailyRecurrenceParameters struct { + + // The time each day when the schedule takes effect. + // +kubebuilder:validation:Optional + Time *string `json:"time" tf:"time,omitempty"` +} + +type HourlyRecurrenceInitParameters struct { + + // Minutes of the hour the schedule will run. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` +} + +type HourlyRecurrenceObservation struct { + + // Minutes of the hour the schedule will run. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` +} + +type HourlyRecurrenceParameters struct { + + // Minutes of the hour the schedule will run. + // +kubebuilder:validation:Optional + Minute *float64 `json:"minute" tf:"minute,omitempty"` +} + +type ScheduleInitParameters struct { + + // The properties of a daily schedule. If the schedule occurs once each day of the week, specify the daily recurrence. A daily_recurrence block as defined below. + DailyRecurrence *DailyRecurrenceInitParameters `json:"dailyRecurrence,omitempty" tf:"daily_recurrence,omitempty"` + + // The properties of an hourly schedule. If the schedule occurs multiple times a day, specify the hourly recurrence. A hourly_recurrence block as defined below. + HourlyRecurrence *HourlyRecurrenceInitParameters `json:"hourlyRecurrence,omitempty" tf:"hourly_recurrence,omitempty"` + + // The location where the schedule is created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The notification setting of a schedule. A notification_settings block as defined below. + NotificationSettings *ScheduleNotificationSettingsInitParameters `json:"notificationSettings,omitempty" tf:"notification_settings,omitempty"` + + // The status of this schedule. Possible values are Enabled and Disabled. Defaults to Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The task type of the schedule. Possible values include LabVmsShutdownTask and LabVmAutoStart. + TaskType *string `json:"taskType,omitempty" tf:"task_type,omitempty"` + + // The time zone ID (e.g. Pacific Standard time). + TimeZoneID *string `json:"timeZoneId,omitempty" tf:"time_zone_id,omitempty"` + + // The properties of a weekly schedule. If the schedule occurs only some days of the week, specify the weekly recurrence. A weekly_recurrence block as defined below. + WeeklyRecurrence *WeeklyRecurrenceInitParameters `json:"weeklyRecurrence,omitempty" tf:"weekly_recurrence,omitempty"` +} + +type ScheduleNotificationSettingsInitParameters struct { + + // The status of the notification. Possible values are Enabled and Disabled. Defaults to Disabled + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Time in minutes before event at which notification will be sent. + TimeInMinutes *float64 `json:"timeInMinutes,omitempty" tf:"time_in_minutes,omitempty"` + + // The webhook URL to which the notification will be sent. + WebhookURL *string `json:"webhookUrl,omitempty" tf:"webhook_url,omitempty"` +} + +type ScheduleNotificationSettingsObservation struct { + + // The status of the notification. Possible values are Enabled and Disabled. Defaults to Disabled + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Time in minutes before event at which notification will be sent. + TimeInMinutes *float64 `json:"timeInMinutes,omitempty" tf:"time_in_minutes,omitempty"` + + // The webhook URL to which the notification will be sent. + WebhookURL *string `json:"webhookUrl,omitempty" tf:"webhook_url,omitempty"` +} + +type ScheduleNotificationSettingsParameters struct { + + // The status of the notification. Possible values are Enabled and Disabled. Defaults to Disabled + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // Time in minutes before event at which notification will be sent. + // +kubebuilder:validation:Optional + TimeInMinutes *float64 `json:"timeInMinutes,omitempty" tf:"time_in_minutes,omitempty"` + + // The webhook URL to which the notification will be sent. + // +kubebuilder:validation:Optional + WebhookURL *string `json:"webhookUrl,omitempty" tf:"webhook_url,omitempty"` +} + +type ScheduleObservation struct { + + // The properties of a daily schedule. If the schedule occurs once each day of the week, specify the daily recurrence. A daily_recurrence block as defined below. + DailyRecurrence *DailyRecurrenceObservation `json:"dailyRecurrence,omitempty" tf:"daily_recurrence,omitempty"` + + // The properties of an hourly schedule. If the schedule occurs multiple times a day, specify the hourly recurrence. A hourly_recurrence block as defined below. + HourlyRecurrence *HourlyRecurrenceObservation `json:"hourlyRecurrence,omitempty" tf:"hourly_recurrence,omitempty"` + + // The ID of the DevTest Schedule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the dev test lab. Changing this forces a new resource to be created. + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // The location where the schedule is created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The notification setting of a schedule. A notification_settings block as defined below. + NotificationSettings *ScheduleNotificationSettingsObservation `json:"notificationSettings,omitempty" tf:"notification_settings,omitempty"` + + // The name of the resource group in which to create the dev test lab schedule. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The status of this schedule. Possible values are Enabled and Disabled. Defaults to Disabled. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The task type of the schedule. Possible values include LabVmsShutdownTask and LabVmAutoStart. + TaskType *string `json:"taskType,omitempty" tf:"task_type,omitempty"` + + // The time zone ID (e.g. Pacific Standard time). + TimeZoneID *string `json:"timeZoneId,omitempty" tf:"time_zone_id,omitempty"` + + // The properties of a weekly schedule. If the schedule occurs only some days of the week, specify the weekly recurrence. A weekly_recurrence block as defined below. + WeeklyRecurrence *WeeklyRecurrenceObservation `json:"weeklyRecurrence,omitempty" tf:"weekly_recurrence,omitempty"` +} + +type ScheduleParameters struct { + + // The properties of a daily schedule. If the schedule occurs once each day of the week, specify the daily recurrence. A daily_recurrence block as defined below. + // +kubebuilder:validation:Optional + DailyRecurrence *DailyRecurrenceParameters `json:"dailyRecurrence,omitempty" tf:"daily_recurrence,omitempty"` + + // The properties of an hourly schedule. If the schedule occurs multiple times a day, specify the hourly recurrence. A hourly_recurrence block as defined below. + // +kubebuilder:validation:Optional + HourlyRecurrence *HourlyRecurrenceParameters `json:"hourlyRecurrence,omitempty" tf:"hourly_recurrence,omitempty"` + + // The name of the dev test lab. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta1.Lab + // +kubebuilder:validation:Optional + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // Reference to a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameRef *v1.Reference `json:"labNameRef,omitempty" tf:"-"` + + // Selector for a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameSelector *v1.Selector `json:"labNameSelector,omitempty" tf:"-"` + + // The location where the schedule is created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The notification setting of a schedule. A notification_settings block as defined below. + // +kubebuilder:validation:Optional + NotificationSettings *ScheduleNotificationSettingsParameters `json:"notificationSettings,omitempty" tf:"notification_settings,omitempty"` + + // The name of the resource group in which to create the dev test lab schedule. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The status of this schedule. Possible values are Enabled and Disabled. Defaults to Disabled. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The task type of the schedule. Possible values include LabVmsShutdownTask and LabVmAutoStart. + // +kubebuilder:validation:Optional + TaskType *string `json:"taskType,omitempty" tf:"task_type,omitempty"` + + // The time zone ID (e.g. Pacific Standard time). + // +kubebuilder:validation:Optional + TimeZoneID *string `json:"timeZoneId,omitempty" tf:"time_zone_id,omitempty"` + + // The properties of a weekly schedule. If the schedule occurs only some days of the week, specify the weekly recurrence. A weekly_recurrence block as defined below. + // +kubebuilder:validation:Optional + WeeklyRecurrence *WeeklyRecurrenceParameters `json:"weeklyRecurrence,omitempty" tf:"weekly_recurrence,omitempty"` +} + +type WeeklyRecurrenceInitParameters struct { + + // The time when the schedule takes effect. + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // A list of days that this schedule takes effect . Possible values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +type WeeklyRecurrenceObservation struct { + + // The time when the schedule takes effect. + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // A list of days that this schedule takes effect . Possible values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +type WeeklyRecurrenceParameters struct { + + // The time when the schedule takes effect. + // +kubebuilder:validation:Optional + Time *string `json:"time" tf:"time,omitempty"` + + // A list of days that this schedule takes effect . Possible values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +kubebuilder:validation:Optional + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +// ScheduleSpec defines the desired state of Schedule +type ScheduleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ScheduleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ScheduleInitParameters `json:"initProvider,omitempty"` +} + +// ScheduleStatus defines the observed state of Schedule. +type ScheduleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ScheduleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Schedule is the Schema for the Schedules API. Manages automated startup and shutdown schedules for Azure Dev Test Lab. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Schedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.notificationSettings) || (has(self.initProvider) && has(self.initProvider.notificationSettings))",message="spec.forProvider.notificationSettings is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.taskType) || (has(self.initProvider) && has(self.initProvider.taskType))",message="spec.forProvider.taskType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timeZoneId) || (has(self.initProvider) && has(self.initProvider.timeZoneId))",message="spec.forProvider.timeZoneId is a required parameter" + Spec ScheduleSpec `json:"spec"` + Status ScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduleList contains a list of Schedules +type ScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Schedule `json:"items"` +} + +// Repository type metadata. +var ( + Schedule_Kind = "Schedule" + Schedule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Schedule_Kind}.String() + Schedule_KindAPIVersion = Schedule_Kind + "." + CRDGroupVersion.String() + Schedule_GroupVersionKind = CRDGroupVersion.WithKind(Schedule_Kind) +) + +func init() { + SchemeBuilder.Register(&Schedule{}, &ScheduleList{}) +} diff --git a/apis/devtestlab/v1beta2/zz_virtualnetwork_terraformed.go b/apis/devtestlab/v1beta2/zz_virtualnetwork_terraformed.go new file mode 100755 index 000000000..b0143e631 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_virtualnetwork_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualNetwork +func (mg *VirtualNetwork) GetTerraformResourceType() string { + return "azurerm_dev_test_virtual_network" +} + +// GetConnectionDetailsMapping for this VirtualNetwork +func (tr *VirtualNetwork) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VirtualNetwork +func (tr *VirtualNetwork) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualNetwork +func (tr *VirtualNetwork) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualNetwork +func (tr *VirtualNetwork) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualNetwork +func (tr *VirtualNetwork) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualNetwork +func (tr *VirtualNetwork) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualNetwork +func (tr *VirtualNetwork) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualNetwork +func (tr *VirtualNetwork) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualNetwork using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualNetwork) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualNetworkParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualNetwork) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/devtestlab/v1beta2/zz_virtualnetwork_types.go b/apis/devtestlab/v1beta2/zz_virtualnetwork_types.go new file mode 100755 index 000000000..e0cb2f98f --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_virtualnetwork_types.go @@ -0,0 +1,221 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SubnetInitParameters struct { + + // Can this subnet be used for creating Virtual Machines? Possible values are Allow, Default and Deny. Defaults to Allow. + UseInVirtualMachineCreation *string `json:"useInVirtualMachineCreation,omitempty" tf:"use_in_virtual_machine_creation,omitempty"` + + // Can Virtual Machines in this Subnet use Public IP Addresses? Possible values are Allow, Default and Deny. Defaults to Allow. + UsePublicIPAddress *string `json:"usePublicIpAddress,omitempty" tf:"use_public_ip_address,omitempty"` +} + +type SubnetObservation struct { + + // The name of the Subnet for this Virtual Network. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Can this subnet be used for creating Virtual Machines? Possible values are Allow, Default and Deny. Defaults to Allow. + UseInVirtualMachineCreation *string `json:"useInVirtualMachineCreation,omitempty" tf:"use_in_virtual_machine_creation,omitempty"` + + // Can Virtual Machines in this Subnet use Public IP Addresses? Possible values are Allow, Default and Deny. Defaults to Allow. + UsePublicIPAddress *string `json:"usePublicIpAddress,omitempty" tf:"use_public_ip_address,omitempty"` +} + +type SubnetParameters struct { + + // Can this subnet be used for creating Virtual Machines? Possible values are Allow, Default and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + UseInVirtualMachineCreation *string `json:"useInVirtualMachineCreation,omitempty" tf:"use_in_virtual_machine_creation,omitempty"` + + // Can Virtual Machines in this Subnet use Public IP Addresses? Possible values are Allow, Default and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + UsePublicIPAddress *string `json:"usePublicIpAddress,omitempty" tf:"use_public_ip_address,omitempty"` +} + +type VirtualNetworkInitParameters struct { + + // A description for the Virtual Network. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Network should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta1.Lab + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // Reference to a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameRef *v1.Reference `json:"labNameRef,omitempty" tf:"-"` + + // Selector for a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameSelector *v1.Selector `json:"labNameSelector,omitempty" tf:"-"` + + // Specifies the name of the Dev Test Virtual Network. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A subnet block as defined below. + Subnet *SubnetInitParameters `json:"subnet,omitempty" tf:"subnet,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualNetworkObservation struct { + + // A description for the Virtual Network. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Dev Test Virtual Network. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Network should be created. Changing this forces a new resource to be created. + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // Specifies the name of the Dev Test Virtual Network. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A subnet block as defined below. + Subnet *SubnetObservation `json:"subnet,omitempty" tf:"subnet,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The unique immutable identifier of the Dev Test Virtual Network. + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty" tf:"unique_identifier,omitempty"` +} + +type VirtualNetworkParameters struct { + + // A description for the Virtual Network. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Network should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta1.Lab + // +kubebuilder:validation:Optional + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // Reference to a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameRef *v1.Reference `json:"labNameRef,omitempty" tf:"-"` + + // Selector for a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameSelector *v1.Selector `json:"labNameSelector,omitempty" tf:"-"` + + // Specifies the name of the Dev Test Virtual Network. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A subnet block as defined below. + // +kubebuilder:validation:Optional + Subnet *SubnetParameters `json:"subnet,omitempty" tf:"subnet,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// VirtualNetworkSpec defines the desired state of VirtualNetwork +type VirtualNetworkSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualNetworkParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualNetworkInitParameters `json:"initProvider,omitempty"` +} + +// VirtualNetworkStatus defines the observed state of VirtualNetwork. +type VirtualNetworkStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualNetworkObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualNetwork is the Schema for the VirtualNetworks API. Manages a Virtual Network within a DevTest Lab. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VirtualNetwork struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec VirtualNetworkSpec `json:"spec"` + Status VirtualNetworkStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualNetworkList contains a list of VirtualNetworks +type VirtualNetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualNetwork `json:"items"` +} + +// Repository type metadata. +var ( + VirtualNetwork_Kind = "VirtualNetwork" + VirtualNetwork_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualNetwork_Kind}.String() + VirtualNetwork_KindAPIVersion = VirtualNetwork_Kind + "." + CRDGroupVersion.String() + VirtualNetwork_GroupVersionKind = CRDGroupVersion.WithKind(VirtualNetwork_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualNetwork{}, &VirtualNetworkList{}) +} diff --git a/apis/devtestlab/v1beta2/zz_windowsvirtualmachine_terraformed.go b/apis/devtestlab/v1beta2/zz_windowsvirtualmachine_terraformed.go new file mode 100755 index 000000000..f0dbfca16 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_windowsvirtualmachine_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WindowsVirtualMachine +func (mg *WindowsVirtualMachine) GetTerraformResourceType() string { + return "azurerm_dev_test_windows_virtual_machine" +} + +// GetConnectionDetailsMapping for this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "spec.forProvider.passwordSecretRef"} +} + +// GetObservation of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WindowsVirtualMachine +func (tr *WindowsVirtualMachine) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WindowsVirtualMachine using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WindowsVirtualMachine) LateInitialize(attrs []byte) (bool, error) { + params := &WindowsVirtualMachineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WindowsVirtualMachine) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/devtestlab/v1beta2/zz_windowsvirtualmachine_types.go b/apis/devtestlab/v1beta2/zz_windowsvirtualmachine_types.go new file mode 100755 index 000000000..b5b055c79 --- /dev/null +++ b/apis/devtestlab/v1beta2/zz_windowsvirtualmachine_types.go @@ -0,0 +1,411 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type WindowsVirtualMachineGalleryImageReferenceInitParameters struct { + + // The Offer of the Gallery Image. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The Publisher of the Gallery Image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The SKU of the Gallery Image. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The Version of the Gallery Image. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineGalleryImageReferenceObservation struct { + + // The Offer of the Gallery Image. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The Publisher of the Gallery Image. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // The SKU of the Gallery Image. Changing this forces a new resource to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The Version of the Gallery Image. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WindowsVirtualMachineGalleryImageReferenceParameters struct { + + // The Offer of the Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer" tf:"offer,omitempty"` + + // The Publisher of the Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` + + // The SKU of the Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Sku *string `json:"sku" tf:"sku,omitempty"` + + // The Version of the Gallery Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type WindowsVirtualMachineInboundNATRuleInitParameters struct { + + // The Backend Port associated with this NAT Rule. Changing this forces a new resource to be created. + BackendPort *float64 `json:"backendPort,omitempty" tf:"backend_port,omitempty"` + + // The Protocol used for this NAT Rule. Possible values are Tcp and Udp. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type WindowsVirtualMachineInboundNATRuleObservation struct { + + // The Backend Port associated with this NAT Rule. Changing this forces a new resource to be created. + BackendPort *float64 `json:"backendPort,omitempty" tf:"backend_port,omitempty"` + + // The frontend port associated with this Inbound NAT Rule. + FrontendPort *float64 `json:"frontendPort,omitempty" tf:"frontend_port,omitempty"` + + // The Protocol used for this NAT Rule. Possible values are Tcp and Udp. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type WindowsVirtualMachineInboundNATRuleParameters struct { + + // The Backend Port associated with this NAT Rule. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + BackendPort *float64 `json:"backendPort" tf:"backend_port,omitempty"` + + // The Protocol used for this NAT Rule. Possible values are Tcp and Udp. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +type WindowsVirtualMachineInitParameters struct { + + // Can this Virtual Machine be claimed by users? Defaults to true. + AllowClaim *bool `json:"allowClaim,omitempty" tf:"allow_claim,omitempty"` + + // Should the Virtual Machine be created without a Public IP Address? Changing this forces a new resource to be created. + DisallowPublicIPAddress *bool `json:"disallowPublicIpAddress,omitempty" tf:"disallow_public_ip_address,omitempty"` + + // A gallery_image_reference block as defined below. + GalleryImageReference *WindowsVirtualMachineGalleryImageReferenceInitParameters `json:"galleryImageReference,omitempty" tf:"gallery_image_reference,omitempty"` + + // One or more inbound_nat_rule blocks as defined below. Changing this forces a new resource to be created. + InboundNATRule []WindowsVirtualMachineInboundNATRuleInitParameters `json:"inboundNatRule,omitempty" tf:"inbound_nat_rule,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Machine should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta1.Lab + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // Reference to a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameRef *v1.Reference `json:"labNameRef,omitempty" tf:"-"` + + // Selector for a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameSelector *v1.Selector `json:"labNameSelector,omitempty" tf:"-"` + + // The name of a Subnet within the Dev Test Virtual Network where this machine should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + LabSubnetName *string `json:"labSubnetName,omitempty" tf:"lab_subnet_name,omitempty"` + + // Reference to a Subnet in network to populate labSubnetName. + // +kubebuilder:validation:Optional + LabSubnetNameRef *v1.Reference `json:"labSubnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate labSubnetName. + // +kubebuilder:validation:Optional + LabSubnetNameSelector *v1.Selector `json:"labSubnetNameSelector,omitempty" tf:"-"` + + // The ID of the Dev Test Virtual Network where this Virtual Machine should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta2.VirtualNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + LabVirtualNetworkID *string `json:"labVirtualNetworkId,omitempty" tf:"lab_virtual_network_id,omitempty"` + + // Reference to a VirtualNetwork in devtestlab to populate labVirtualNetworkId. + // +kubebuilder:validation:Optional + LabVirtualNetworkIDRef *v1.Reference `json:"labVirtualNetworkIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetwork in devtestlab to populate labVirtualNetworkId. + // +kubebuilder:validation:Optional + LabVirtualNetworkIDSelector *v1.Selector `json:"labVirtualNetworkIdSelector,omitempty" tf:"-"` + + // Specifies the supported Azure location where the Dev Test Lab exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Dev Test Machine. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Any notes about the Virtual Machine. + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The Machine Size to use for this Virtual Machine, such as Standard_F2. Changing this forces a new resource to be created. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The type of Storage to use on this Virtual Machine. Possible values are Standard and Premium. Changing this forces a new resource to be created. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Username associated with the local administrator on this Virtual Machine. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type WindowsVirtualMachineObservation struct { + + // Can this Virtual Machine be claimed by users? Defaults to true. + AllowClaim *bool `json:"allowClaim,omitempty" tf:"allow_claim,omitempty"` + + // Should the Virtual Machine be created without a Public IP Address? Changing this forces a new resource to be created. + DisallowPublicIPAddress *bool `json:"disallowPublicIpAddress,omitempty" tf:"disallow_public_ip_address,omitempty"` + + // The FQDN of the Virtual Machine. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // A gallery_image_reference block as defined below. + GalleryImageReference *WindowsVirtualMachineGalleryImageReferenceObservation `json:"galleryImageReference,omitempty" tf:"gallery_image_reference,omitempty"` + + // The ID of the Virtual Machine. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more inbound_nat_rule blocks as defined below. Changing this forces a new resource to be created. + InboundNATRule []WindowsVirtualMachineInboundNATRuleObservation `json:"inboundNatRule,omitempty" tf:"inbound_nat_rule,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Machine should be created. Changing this forces a new resource to be created. + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // The name of a Subnet within the Dev Test Virtual Network where this machine should exist. Changing this forces a new resource to be created. + LabSubnetName *string `json:"labSubnetName,omitempty" tf:"lab_subnet_name,omitempty"` + + // The ID of the Dev Test Virtual Network where this Virtual Machine should be created. Changing this forces a new resource to be created. + LabVirtualNetworkID *string `json:"labVirtualNetworkId,omitempty" tf:"lab_virtual_network_id,omitempty"` + + // Specifies the supported Azure location where the Dev Test Lab exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Dev Test Machine. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Any notes about the Virtual Machine. + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The Machine Size to use for this Virtual Machine, such as Standard_F2. Changing this forces a new resource to be created. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The type of Storage to use on this Virtual Machine. Possible values are Standard and Premium. Changing this forces a new resource to be created. + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The unique immutable identifier of the Virtual Machine. + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty" tf:"unique_identifier,omitempty"` + + // The Username associated with the local administrator on this Virtual Machine. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type WindowsVirtualMachineParameters struct { + + // Can this Virtual Machine be claimed by users? Defaults to true. + // +kubebuilder:validation:Optional + AllowClaim *bool `json:"allowClaim,omitempty" tf:"allow_claim,omitempty"` + + // Should the Virtual Machine be created without a Public IP Address? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DisallowPublicIPAddress *bool `json:"disallowPublicIpAddress,omitempty" tf:"disallow_public_ip_address,omitempty"` + + // A gallery_image_reference block as defined below. + // +kubebuilder:validation:Optional + GalleryImageReference *WindowsVirtualMachineGalleryImageReferenceParameters `json:"galleryImageReference,omitempty" tf:"gallery_image_reference,omitempty"` + + // One or more inbound_nat_rule blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InboundNATRule []WindowsVirtualMachineInboundNATRuleParameters `json:"inboundNatRule,omitempty" tf:"inbound_nat_rule,omitempty"` + + // Specifies the name of the Dev Test Lab in which the Virtual Machine should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta1.Lab + // +kubebuilder:validation:Optional + LabName *string `json:"labName,omitempty" tf:"lab_name,omitempty"` + + // Reference to a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameRef *v1.Reference `json:"labNameRef,omitempty" tf:"-"` + + // Selector for a Lab in devtestlab to populate labName. + // +kubebuilder:validation:Optional + LabNameSelector *v1.Selector `json:"labNameSelector,omitempty" tf:"-"` + + // The name of a Subnet within the Dev Test Virtual Network where this machine should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +kubebuilder:validation:Optional + LabSubnetName *string `json:"labSubnetName,omitempty" tf:"lab_subnet_name,omitempty"` + + // Reference to a Subnet in network to populate labSubnetName. + // +kubebuilder:validation:Optional + LabSubnetNameRef *v1.Reference `json:"labSubnetNameRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate labSubnetName. + // +kubebuilder:validation:Optional + LabSubnetNameSelector *v1.Selector `json:"labSubnetNameSelector,omitempty" tf:"-"` + + // The ID of the Dev Test Virtual Network where this Virtual Machine should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devtestlab/v1beta2.VirtualNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + LabVirtualNetworkID *string `json:"labVirtualNetworkId,omitempty" tf:"lab_virtual_network_id,omitempty"` + + // Reference to a VirtualNetwork in devtestlab to populate labVirtualNetworkId. + // +kubebuilder:validation:Optional + LabVirtualNetworkIDRef *v1.Reference `json:"labVirtualNetworkIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetwork in devtestlab to populate labVirtualNetworkId. + // +kubebuilder:validation:Optional + LabVirtualNetworkIDSelector *v1.Selector `json:"labVirtualNetworkIdSelector,omitempty" tf:"-"` + + // Specifies the supported Azure location where the Dev Test Lab exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Dev Test Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Any notes about the Virtual Machine. + // +kubebuilder:validation:Optional + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // The Password associated with the username used to login to this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The name of the resource group in which the Dev Test Lab resource exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The Machine Size to use for this Virtual Machine, such as Standard_F2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // The type of Storage to use on this Virtual Machine. Possible values are Standard and Premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageType *string `json:"storageType,omitempty" tf:"storage_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Username associated with the local administrator on this Virtual Machine. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +// WindowsVirtualMachineSpec defines the desired state of WindowsVirtualMachine +type WindowsVirtualMachineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WindowsVirtualMachineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WindowsVirtualMachineInitParameters `json:"initProvider,omitempty"` +} + +// WindowsVirtualMachineStatus defines the observed state of WindowsVirtualMachine. +type WindowsVirtualMachineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WindowsVirtualMachineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WindowsVirtualMachine is the Schema for the WindowsVirtualMachines API. Manages a Windows Virtual Machine within a Dev Test Lab. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WindowsVirtualMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.galleryImageReference) || (has(self.initProvider) && has(self.initProvider.galleryImageReference))",message="spec.forProvider.galleryImageReference is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)",message="spec.forProvider.passwordSecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.size) || (has(self.initProvider) && has(self.initProvider.size))",message="spec.forProvider.size is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageType) || (has(self.initProvider) && has(self.initProvider.storageType))",message="spec.forProvider.storageType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.username) || (has(self.initProvider) && has(self.initProvider.username))",message="spec.forProvider.username is a required parameter" + Spec WindowsVirtualMachineSpec `json:"spec"` + Status WindowsVirtualMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WindowsVirtualMachineList contains a list of WindowsVirtualMachines +type WindowsVirtualMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WindowsVirtualMachine `json:"items"` +} + +// Repository type metadata. +var ( + WindowsVirtualMachine_Kind = "WindowsVirtualMachine" + WindowsVirtualMachine_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WindowsVirtualMachine_Kind}.String() + WindowsVirtualMachine_KindAPIVersion = WindowsVirtualMachine_Kind + "." + CRDGroupVersion.String() + WindowsVirtualMachine_GroupVersionKind = CRDGroupVersion.WithKind(WindowsVirtualMachine_Kind) +) + +func init() { + SchemeBuilder.Register(&WindowsVirtualMachine{}, &WindowsVirtualMachineList{}) +} diff --git a/apis/digitaltwins/v1beta1/zz_generated.conversion_spokes.go b/apis/digitaltwins/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..ad52b5df4 --- /dev/null +++ b/apis/digitaltwins/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Instance to the hub type. +func (tr *Instance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Instance type. +func (tr *Instance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/digitaltwins/v1beta1/zz_generated.conversion_hubs.go b/apis/digitaltwins/v1beta2/zz_generated.conversion_hubs.go similarity index 93% rename from apis/digitaltwins/v1beta1/zz_generated.conversion_hubs.go rename to apis/digitaltwins/v1beta2/zz_generated.conversion_hubs.go index 61a4a671c..d7a1e7065 100755 --- a/apis/digitaltwins/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/digitaltwins/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *Instance) Hub() {} diff --git a/apis/digitaltwins/v1beta2/zz_generated.deepcopy.go b/apis/digitaltwins/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..22cd798eb --- /dev/null +++ b/apis/digitaltwins/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,364 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance) DeepCopyInto(out *Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. +func (in *Instance) DeepCopy() *Instance { + if in == nil { + return nil + } + out := new(Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceInitParameters) DeepCopyInto(out *InstanceInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceInitParameters. +func (in *InstanceInitParameters) DeepCopy() *InstanceInitParameters { + if in == nil { + return nil + } + out := new(InstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceList) DeepCopyInto(out *InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceList. +func (in *InstanceList) DeepCopy() *InstanceList { + if in == nil { + return nil + } + out := new(InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceObservation) DeepCopyInto(out *InstanceObservation) { + *out = *in + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceObservation. +func (in *InstanceObservation) DeepCopy() *InstanceObservation { + if in == nil { + return nil + } + out := new(InstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceParameters) DeepCopyInto(out *InstanceParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceParameters. +func (in *InstanceParameters) DeepCopy() *InstanceParameters { + if in == nil { + return nil + } + out := new(InstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSpec) DeepCopyInto(out *InstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSpec. +func (in *InstanceSpec) DeepCopy() *InstanceSpec { + if in == nil { + return nil + } + out := new(InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceStatus) DeepCopyInto(out *InstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceStatus. +func (in *InstanceStatus) DeepCopy() *InstanceStatus { + if in == nil { + return nil + } + out := new(InstanceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/digitaltwins/v1beta2/zz_generated.managed.go b/apis/digitaltwins/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..37ec7f5cf --- /dev/null +++ b/apis/digitaltwins/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Instance. +func (mg *Instance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Instance. +func (mg *Instance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Instance. +func (mg *Instance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Instance. +func (mg *Instance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Instance. +func (mg *Instance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Instance. +func (mg *Instance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Instance. +func (mg *Instance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Instance. +func (mg *Instance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Instance. +func (mg *Instance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Instance. +func (mg *Instance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Instance. +func (mg *Instance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Instance. +func (mg *Instance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/digitaltwins/v1beta2/zz_generated.managedlist.go b/apis/digitaltwins/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..28d925e64 --- /dev/null +++ b/apis/digitaltwins/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this InstanceList. +func (l *InstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/digitaltwins/v1beta2/zz_generated.resolvers.go b/apis/digitaltwins/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..36035e681 --- /dev/null +++ b/apis/digitaltwins/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Instance. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Instance) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/digitaltwins/v1beta2/zz_groupversion_info.go b/apis/digitaltwins/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..7e9613106 --- /dev/null +++ b/apis/digitaltwins/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=digitaltwins.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "digitaltwins.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/digitaltwins/v1beta2/zz_instance_terraformed.go b/apis/digitaltwins/v1beta2/zz_instance_terraformed.go new file mode 100755 index 000000000..80a7f2018 --- /dev/null +++ b/apis/digitaltwins/v1beta2/zz_instance_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Instance +func (mg *Instance) GetTerraformResourceType() string { + return "azurerm_digital_twins_instance" +} + +// GetConnectionDetailsMapping for this Instance +func (tr *Instance) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Instance +func (tr *Instance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Instance +func (tr *Instance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Instance +func (tr *Instance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Instance +func (tr *Instance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Instance +func (tr *Instance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Instance +func (tr *Instance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Instance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Instance) LateInitialize(attrs []byte) (bool, error) { + params := &InstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Instance) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/digitaltwins/v1beta2/zz_instance_types.go b/apis/digitaltwins/v1beta2/zz_instance_types.go new file mode 100755 index 000000000..27663851b --- /dev/null +++ b/apis/digitaltwins/v1beta2/zz_instance_types.go @@ -0,0 +1,177 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Digital Twins instance. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Digital Twins instance. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Digital Twins instance. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Digital Twins instance. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Digital Twins instance. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Digital Twins instance. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type InstanceInitParameters struct { + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Digital Twins instance should exist. Changing this forces a new Digital Twins instance to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags which should be assigned to the Digital Twins instance. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type InstanceObservation struct { + + // The API endpoint to work with this Digital Twins instance. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Digital Twins instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Digital Twins instance should exist. Changing this forces a new Digital Twins instance to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Digital Twins instance should exist. Changing this forces a new Digital Twins instance to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags which should be assigned to the Digital Twins instance. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type InstanceParameters struct { + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Digital Twins instance should exist. Changing this forces a new Digital Twins instance to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Digital Twins instance should exist. Changing this forces a new Digital Twins instance to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Digital Twins instance. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// InstanceSpec defines the desired state of Instance +type InstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InstanceInitParameters `json:"initProvider,omitempty"` +} + +// InstanceStatus defines the observed state of Instance. +type InstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Instance is the Schema for the Instances API. Manages a Digital Twins instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec InstanceSpec `json:"spec"` + Status InstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InstanceList contains a list of Instances +type InstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Instance `json:"items"` +} + +// Repository type metadata. +var ( + Instance_Kind = "Instance" + Instance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Instance_Kind}.String() + Instance_KindAPIVersion = Instance_Kind + "." + CRDGroupVersion.String() + Instance_GroupVersionKind = CRDGroupVersion.WithKind(Instance_Kind) +) + +func init() { + SchemeBuilder.Register(&Instance{}, &InstanceList{}) +} diff --git a/apis/elastic/v1beta1/zz_generated.conversion_spokes.go b/apis/elastic/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..13d0c76bf --- /dev/null +++ b/apis/elastic/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this CloudElasticsearch to the hub type. +func (tr *CloudElasticsearch) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the CloudElasticsearch type. +func (tr *CloudElasticsearch) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/elastic/v1beta2/zz_cloudelasticsearch_terraformed.go b/apis/elastic/v1beta2/zz_cloudelasticsearch_terraformed.go new file mode 100755 index 000000000..8366af3a8 --- /dev/null +++ b/apis/elastic/v1beta2/zz_cloudelasticsearch_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this CloudElasticsearch +func (mg *CloudElasticsearch) GetTerraformResourceType() string { + return "azurerm_elastic_cloud_elasticsearch" +} + +// GetConnectionDetailsMapping for this CloudElasticsearch +func (tr *CloudElasticsearch) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this CloudElasticsearch +func (tr *CloudElasticsearch) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this CloudElasticsearch +func (tr *CloudElasticsearch) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this CloudElasticsearch +func (tr *CloudElasticsearch) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this CloudElasticsearch +func (tr *CloudElasticsearch) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this CloudElasticsearch +func (tr *CloudElasticsearch) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this CloudElasticsearch +func (tr *CloudElasticsearch) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this CloudElasticsearch +func (tr *CloudElasticsearch) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this CloudElasticsearch using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *CloudElasticsearch) LateInitialize(attrs []byte) (bool, error) { + params := &CloudElasticsearchParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *CloudElasticsearch) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/elastic/v1beta2/zz_cloudelasticsearch_types.go b/apis/elastic/v1beta2/zz_cloudelasticsearch_types.go new file mode 100755 index 000000000..bafec5005 --- /dev/null +++ b/apis/elastic/v1beta2/zz_cloudelasticsearch_types.go @@ -0,0 +1,274 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CloudElasticsearchInitParameters struct { + + // Specifies the Email Address which should be associated with this Elasticsearch account. Changing this forces a new Elasticsearch to be created. + ElasticCloudEmailAddress *string `json:"elasticCloudEmailAddress,omitempty" tf:"elastic_cloud_email_address,omitempty"` + + // The Azure Region where the Elasticsearch resource should exist. Changing this forces a new Elasticsearch to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + Logs *LogsInitParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Specifies if the Elasticsearch should have monitoring configured? Defaults to true. Changing this forces a new Elasticsearch to be created. + MonitoringEnabled *bool `json:"monitoringEnabled,omitempty" tf:"monitoring_enabled,omitempty"` + + // Specifies the name of the SKU for this Elasticsearch. Changing this forces a new Elasticsearch to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags which should be assigned to the Elasticsearch resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CloudElasticsearchObservation struct { + + // The ID of the Deployment within Elastic Cloud. + ElasticCloudDeploymentID *string `json:"elasticCloudDeploymentId,omitempty" tf:"elastic_cloud_deployment_id,omitempty"` + + // Specifies the Email Address which should be associated with this Elasticsearch account. Changing this forces a new Elasticsearch to be created. + ElasticCloudEmailAddress *string `json:"elasticCloudEmailAddress,omitempty" tf:"elastic_cloud_email_address,omitempty"` + + // The Default URL used for Single Sign On (SSO) to Elastic Cloud. + ElasticCloudSsoDefaultURL *string `json:"elasticCloudSsoDefaultUrl,omitempty" tf:"elastic_cloud_sso_default_url,omitempty"` + + // The ID of the User Account within Elastic Cloud. + ElasticCloudUserID *string `json:"elasticCloudUserId,omitempty" tf:"elastic_cloud_user_id,omitempty"` + + // The URL to the Elasticsearch Service associated with this Elasticsearch. + ElasticsearchServiceURL *string `json:"elasticsearchServiceUrl,omitempty" tf:"elasticsearch_service_url,omitempty"` + + // The ID of the Elasticsearch. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The URL to the Kibana Dashboard associated with this Elasticsearch. + KibanaServiceURL *string `json:"kibanaServiceUrl,omitempty" tf:"kibana_service_url,omitempty"` + + // The URI used for SSO to the Kibana Dashboard associated with this Elasticsearch. + KibanaSsoURI *string `json:"kibanaSsoUri,omitempty" tf:"kibana_sso_uri,omitempty"` + + // The Azure Region where the Elasticsearch resource should exist. Changing this forces a new Elasticsearch to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + Logs *LogsObservation `json:"logs,omitempty" tf:"logs,omitempty"` + + // Specifies if the Elasticsearch should have monitoring configured? Defaults to true. Changing this forces a new Elasticsearch to be created. + MonitoringEnabled *bool `json:"monitoringEnabled,omitempty" tf:"monitoring_enabled,omitempty"` + + // The name of the Resource Group where the Elasticsearch resource should exist. Changing this forces a new Elasticsearch to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the name of the SKU for this Elasticsearch. Changing this forces a new Elasticsearch to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags which should be assigned to the Elasticsearch resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CloudElasticsearchParameters struct { + + // Specifies the Email Address which should be associated with this Elasticsearch account. Changing this forces a new Elasticsearch to be created. + // +kubebuilder:validation:Optional + ElasticCloudEmailAddress *string `json:"elasticCloudEmailAddress,omitempty" tf:"elastic_cloud_email_address,omitempty"` + + // The Azure Region where the Elasticsearch resource should exist. Changing this forces a new Elasticsearch to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + // +kubebuilder:validation:Optional + Logs *LogsParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Specifies if the Elasticsearch should have monitoring configured? Defaults to true. Changing this forces a new Elasticsearch to be created. + // +kubebuilder:validation:Optional + MonitoringEnabled *bool `json:"monitoringEnabled,omitempty" tf:"monitoring_enabled,omitempty"` + + // The name of the Resource Group where the Elasticsearch resource should exist. Changing this forces a new Elasticsearch to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the name of the SKU for this Elasticsearch. Changing this forces a new Elasticsearch to be created. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A mapping of tags which should be assigned to the Elasticsearch resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FilteringTagInitParameters struct { + + // Specifies the type of action which should be taken when the Tag matches the name and value. Possible values are Exclude and Include. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name which should be used for this Elasticsearch resource. Changing this forces a new Elasticsearch to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the value of the Tag which should be filtered. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FilteringTagObservation struct { + + // Specifies the type of action which should be taken when the Tag matches the name and value. Possible values are Exclude and Include. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The name which should be used for this Elasticsearch resource. Changing this forces a new Elasticsearch to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the value of the Tag which should be filtered. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type FilteringTagParameters struct { + + // Specifies the type of action which should be taken when the Tag matches the name and value. Possible values are Exclude and Include. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // The name which should be used for this Elasticsearch resource. Changing this forces a new Elasticsearch to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the value of the Tag which should be filtered. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type LogsInitParameters struct { + + // A list of filtering_tag blocks as defined above. + FilteringTag []FilteringTagInitParameters `json:"filteringTag,omitempty" tf:"filtering_tag,omitempty"` + + // Specifies if the Azure Activity Logs should be sent to the Elasticsearch cluster. Defaults to false. + SendActivityLogs *bool `json:"sendActivityLogs,omitempty" tf:"send_activity_logs,omitempty"` + + // Specifies if the AzureAD Logs should be sent to the Elasticsearch cluster. Defaults to false. + SendAzureadLogs *bool `json:"sendAzureadLogs,omitempty" tf:"send_azuread_logs,omitempty"` + + // Specifies if the Azure Subscription Logs should be sent to the Elasticsearch cluster. Defaults to false. + SendSubscriptionLogs *bool `json:"sendSubscriptionLogs,omitempty" tf:"send_subscription_logs,omitempty"` +} + +type LogsObservation struct { + + // A list of filtering_tag blocks as defined above. + FilteringTag []FilteringTagObservation `json:"filteringTag,omitempty" tf:"filtering_tag,omitempty"` + + // Specifies if the Azure Activity Logs should be sent to the Elasticsearch cluster. Defaults to false. + SendActivityLogs *bool `json:"sendActivityLogs,omitempty" tf:"send_activity_logs,omitempty"` + + // Specifies if the AzureAD Logs should be sent to the Elasticsearch cluster. Defaults to false. + SendAzureadLogs *bool `json:"sendAzureadLogs,omitempty" tf:"send_azuread_logs,omitempty"` + + // Specifies if the Azure Subscription Logs should be sent to the Elasticsearch cluster. Defaults to false. + SendSubscriptionLogs *bool `json:"sendSubscriptionLogs,omitempty" tf:"send_subscription_logs,omitempty"` +} + +type LogsParameters struct { + + // A list of filtering_tag blocks as defined above. + // +kubebuilder:validation:Optional + FilteringTag []FilteringTagParameters `json:"filteringTag,omitempty" tf:"filtering_tag,omitempty"` + + // Specifies if the Azure Activity Logs should be sent to the Elasticsearch cluster. Defaults to false. + // +kubebuilder:validation:Optional + SendActivityLogs *bool `json:"sendActivityLogs,omitempty" tf:"send_activity_logs,omitempty"` + + // Specifies if the AzureAD Logs should be sent to the Elasticsearch cluster. Defaults to false. + // +kubebuilder:validation:Optional + SendAzureadLogs *bool `json:"sendAzureadLogs,omitempty" tf:"send_azuread_logs,omitempty"` + + // Specifies if the Azure Subscription Logs should be sent to the Elasticsearch cluster. Defaults to false. + // +kubebuilder:validation:Optional + SendSubscriptionLogs *bool `json:"sendSubscriptionLogs,omitempty" tf:"send_subscription_logs,omitempty"` +} + +// CloudElasticsearchSpec defines the desired state of CloudElasticsearch +type CloudElasticsearchSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CloudElasticsearchParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CloudElasticsearchInitParameters `json:"initProvider,omitempty"` +} + +// CloudElasticsearchStatus defines the observed state of CloudElasticsearch. +type CloudElasticsearchStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CloudElasticsearchObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CloudElasticsearch is the Schema for the CloudElasticsearchs API. Manages an Elasticsearch cluster in Elastic Cloud. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type CloudElasticsearch struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.elasticCloudEmailAddress) || (has(self.initProvider) && has(self.initProvider.elasticCloudEmailAddress))",message="spec.forProvider.elasticCloudEmailAddress is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + Spec CloudElasticsearchSpec `json:"spec"` + Status CloudElasticsearchStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CloudElasticsearchList contains a list of CloudElasticsearchs +type CloudElasticsearchList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CloudElasticsearch `json:"items"` +} + +// Repository type metadata. +var ( + CloudElasticsearch_Kind = "CloudElasticsearch" + CloudElasticsearch_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: CloudElasticsearch_Kind}.String() + CloudElasticsearch_KindAPIVersion = CloudElasticsearch_Kind + "." + CRDGroupVersion.String() + CloudElasticsearch_GroupVersionKind = CRDGroupVersion.WithKind(CloudElasticsearch_Kind) +) + +func init() { + SchemeBuilder.Register(&CloudElasticsearch{}, &CloudElasticsearchList{}) +} diff --git a/apis/elastic/v1beta1/zz_generated.conversion_hubs.go b/apis/elastic/v1beta2/zz_generated.conversion_hubs.go similarity index 93% rename from apis/elastic/v1beta1/zz_generated.conversion_hubs.go rename to apis/elastic/v1beta2/zz_generated.conversion_hubs.go index 80e4c72e0..7639cf8a8 100755 --- a/apis/elastic/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/elastic/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *CloudElasticsearch) Hub() {} diff --git a/apis/elastic/v1beta2/zz_generated.deepcopy.go b/apis/elastic/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..2701c5a47 --- /dev/null +++ b/apis/elastic/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,532 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudElasticsearch) DeepCopyInto(out *CloudElasticsearch) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudElasticsearch. +func (in *CloudElasticsearch) DeepCopy() *CloudElasticsearch { + if in == nil { + return nil + } + out := new(CloudElasticsearch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudElasticsearch) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudElasticsearchInitParameters) DeepCopyInto(out *CloudElasticsearchInitParameters) { + *out = *in + if in.ElasticCloudEmailAddress != nil { + in, out := &in.ElasticCloudEmailAddress, &out.ElasticCloudEmailAddress + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitoringEnabled != nil { + in, out := &in.MonitoringEnabled, &out.MonitoringEnabled + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudElasticsearchInitParameters. +func (in *CloudElasticsearchInitParameters) DeepCopy() *CloudElasticsearchInitParameters { + if in == nil { + return nil + } + out := new(CloudElasticsearchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudElasticsearchList) DeepCopyInto(out *CloudElasticsearchList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudElasticsearch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudElasticsearchList. +func (in *CloudElasticsearchList) DeepCopy() *CloudElasticsearchList { + if in == nil { + return nil + } + out := new(CloudElasticsearchList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudElasticsearchList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudElasticsearchObservation) DeepCopyInto(out *CloudElasticsearchObservation) { + *out = *in + if in.ElasticCloudDeploymentID != nil { + in, out := &in.ElasticCloudDeploymentID, &out.ElasticCloudDeploymentID + *out = new(string) + **out = **in + } + if in.ElasticCloudEmailAddress != nil { + in, out := &in.ElasticCloudEmailAddress, &out.ElasticCloudEmailAddress + *out = new(string) + **out = **in + } + if in.ElasticCloudSsoDefaultURL != nil { + in, out := &in.ElasticCloudSsoDefaultURL, &out.ElasticCloudSsoDefaultURL + *out = new(string) + **out = **in + } + if in.ElasticCloudUserID != nil { + in, out := &in.ElasticCloudUserID, &out.ElasticCloudUserID + *out = new(string) + **out = **in + } + if in.ElasticsearchServiceURL != nil { + in, out := &in.ElasticsearchServiceURL, &out.ElasticsearchServiceURL + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KibanaServiceURL != nil { + in, out := &in.KibanaServiceURL, &out.KibanaServiceURL + *out = new(string) + **out = **in + } + if in.KibanaSsoURI != nil { + in, out := &in.KibanaSsoURI, &out.KibanaSsoURI + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsObservation) + (*in).DeepCopyInto(*out) + } + if in.MonitoringEnabled != nil { + in, out := &in.MonitoringEnabled, &out.MonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudElasticsearchObservation. +func (in *CloudElasticsearchObservation) DeepCopy() *CloudElasticsearchObservation { + if in == nil { + return nil + } + out := new(CloudElasticsearchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudElasticsearchParameters) DeepCopyInto(out *CloudElasticsearchParameters) { + *out = *in + if in.ElasticCloudEmailAddress != nil { + in, out := &in.ElasticCloudEmailAddress, &out.ElasticCloudEmailAddress + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsParameters) + (*in).DeepCopyInto(*out) + } + if in.MonitoringEnabled != nil { + in, out := &in.MonitoringEnabled, &out.MonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudElasticsearchParameters. +func (in *CloudElasticsearchParameters) DeepCopy() *CloudElasticsearchParameters { + if in == nil { + return nil + } + out := new(CloudElasticsearchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudElasticsearchSpec) DeepCopyInto(out *CloudElasticsearchSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudElasticsearchSpec. +func (in *CloudElasticsearchSpec) DeepCopy() *CloudElasticsearchSpec { + if in == nil { + return nil + } + out := new(CloudElasticsearchSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudElasticsearchStatus) DeepCopyInto(out *CloudElasticsearchStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudElasticsearchStatus. +func (in *CloudElasticsearchStatus) DeepCopy() *CloudElasticsearchStatus { + if in == nil { + return nil + } + out := new(CloudElasticsearchStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilteringTagInitParameters) DeepCopyInto(out *FilteringTagInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilteringTagInitParameters. +func (in *FilteringTagInitParameters) DeepCopy() *FilteringTagInitParameters { + if in == nil { + return nil + } + out := new(FilteringTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilteringTagObservation) DeepCopyInto(out *FilteringTagObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilteringTagObservation. +func (in *FilteringTagObservation) DeepCopy() *FilteringTagObservation { + if in == nil { + return nil + } + out := new(FilteringTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilteringTagParameters) DeepCopyInto(out *FilteringTagParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilteringTagParameters. +func (in *FilteringTagParameters) DeepCopy() *FilteringTagParameters { + if in == nil { + return nil + } + out := new(FilteringTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInitParameters) DeepCopyInto(out *LogsInitParameters) { + *out = *in + if in.FilteringTag != nil { + in, out := &in.FilteringTag, &out.FilteringTag + *out = make([]FilteringTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SendActivityLogs != nil { + in, out := &in.SendActivityLogs, &out.SendActivityLogs + *out = new(bool) + **out = **in + } + if in.SendAzureadLogs != nil { + in, out := &in.SendAzureadLogs, &out.SendAzureadLogs + *out = new(bool) + **out = **in + } + if in.SendSubscriptionLogs != nil { + in, out := &in.SendSubscriptionLogs, &out.SendSubscriptionLogs + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInitParameters. +func (in *LogsInitParameters) DeepCopy() *LogsInitParameters { + if in == nil { + return nil + } + out := new(LogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsObservation) DeepCopyInto(out *LogsObservation) { + *out = *in + if in.FilteringTag != nil { + in, out := &in.FilteringTag, &out.FilteringTag + *out = make([]FilteringTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SendActivityLogs != nil { + in, out := &in.SendActivityLogs, &out.SendActivityLogs + *out = new(bool) + **out = **in + } + if in.SendAzureadLogs != nil { + in, out := &in.SendAzureadLogs, &out.SendAzureadLogs + *out = new(bool) + **out = **in + } + if in.SendSubscriptionLogs != nil { + in, out := &in.SendSubscriptionLogs, &out.SendSubscriptionLogs + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsObservation. +func (in *LogsObservation) DeepCopy() *LogsObservation { + if in == nil { + return nil + } + out := new(LogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsParameters) DeepCopyInto(out *LogsParameters) { + *out = *in + if in.FilteringTag != nil { + in, out := &in.FilteringTag, &out.FilteringTag + *out = make([]FilteringTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SendActivityLogs != nil { + in, out := &in.SendActivityLogs, &out.SendActivityLogs + *out = new(bool) + **out = **in + } + if in.SendAzureadLogs != nil { + in, out := &in.SendAzureadLogs, &out.SendAzureadLogs + *out = new(bool) + **out = **in + } + if in.SendSubscriptionLogs != nil { + in, out := &in.SendSubscriptionLogs, &out.SendSubscriptionLogs + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsParameters. +func (in *LogsParameters) DeepCopy() *LogsParameters { + if in == nil { + return nil + } + out := new(LogsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/elastic/v1beta2/zz_generated.managed.go b/apis/elastic/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..5a5934bed --- /dev/null +++ b/apis/elastic/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this CloudElasticsearch. +func (mg *CloudElasticsearch) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this CloudElasticsearch. +func (mg *CloudElasticsearch) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this CloudElasticsearch. +func (mg *CloudElasticsearch) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this CloudElasticsearch. +func (mg *CloudElasticsearch) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this CloudElasticsearch. +func (mg *CloudElasticsearch) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this CloudElasticsearch. +func (mg *CloudElasticsearch) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this CloudElasticsearch. +func (mg *CloudElasticsearch) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this CloudElasticsearch. +func (mg *CloudElasticsearch) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this CloudElasticsearch. +func (mg *CloudElasticsearch) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this CloudElasticsearch. +func (mg *CloudElasticsearch) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this CloudElasticsearch. +func (mg *CloudElasticsearch) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this CloudElasticsearch. +func (mg *CloudElasticsearch) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/elastic/v1beta2/zz_generated.managedlist.go b/apis/elastic/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..003e25aab --- /dev/null +++ b/apis/elastic/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CloudElasticsearchList. +func (l *CloudElasticsearchList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/elastic/v1beta2/zz_generated.resolvers.go b/apis/elastic/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..4a569e0f1 --- /dev/null +++ b/apis/elastic/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *CloudElasticsearch) ResolveReferences( // ResolveReferences of this CloudElasticsearch. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/elastic/v1beta2/zz_groupversion_info.go b/apis/elastic/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..f8e253a77 --- /dev/null +++ b/apis/elastic/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=elastic.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "elastic.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/eventgrid/v1beta1/zz_domaintopic_types.go b/apis/eventgrid/v1beta1/zz_domaintopic_types.go index 883171476..38b3fbb38 100755 --- a/apis/eventgrid/v1beta1/zz_domaintopic_types.go +++ b/apis/eventgrid/v1beta1/zz_domaintopic_types.go @@ -31,7 +31,7 @@ type DomainTopicObservation struct { type DomainTopicParameters struct { // Specifies the name of the EventGrid Domain. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventgrid/v1beta1.Domain + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventgrid/v1beta2.Domain // +kubebuilder:validation:Optional DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` diff --git a/apis/eventgrid/v1beta1/zz_generated.conversion_hubs.go b/apis/eventgrid/v1beta1/zz_generated.conversion_hubs.go index efcc8f84f..487a5a2d7 100755 --- a/apis/eventgrid/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/eventgrid/v1beta1/zz_generated.conversion_hubs.go @@ -6,17 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Domain) Hub() {} - // Hub marks this type as a conversion hub. func (tr *DomainTopic) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *EventSubscription) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SystemTopic) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Topic) Hub() {} diff --git a/apis/eventgrid/v1beta1/zz_generated.conversion_spokes.go b/apis/eventgrid/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..605c53e95 --- /dev/null +++ b/apis/eventgrid/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Domain to the hub type. +func (tr *Domain) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Domain type. +func (tr *Domain) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this EventSubscription to the hub type. +func (tr *EventSubscription) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EventSubscription type. +func (tr *EventSubscription) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SystemTopic to the hub type. +func (tr *SystemTopic) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SystemTopic type. +func (tr *SystemTopic) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Topic to the hub type. +func (tr *Topic) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Topic type. +func (tr *Topic) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/eventgrid/v1beta1/zz_generated.resolvers.go b/apis/eventgrid/v1beta1/zz_generated.resolvers.go index 0b90c2052..e364193be 100644 --- a/apis/eventgrid/v1beta1/zz_generated.resolvers.go +++ b/apis/eventgrid/v1beta1/zz_generated.resolvers.go @@ -58,7 +58,7 @@ func (mg *DomainTopic) ResolveReferences(ctx context.Context, c client.Reader) e var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eventgrid.azure.upbound.io", "v1beta1", "Domain", "DomainList") + m, l, err = apisresolver.GetManagedResource("eventgrid.azure.upbound.io", "v1beta2", "Domain", "DomainList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/eventgrid/v1beta2/zz_domain_terraformed.go b/apis/eventgrid/v1beta2/zz_domain_terraformed.go new file mode 100755 index 000000000..644270797 --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_domain_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Domain +func (mg *Domain) GetTerraformResourceType() string { + return "azurerm_eventgrid_domain" +} + +// GetConnectionDetailsMapping for this Domain +func (tr *Domain) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_access_key": "status.atProvider.primaryAccessKey", "secondary_access_key": "status.atProvider.secondaryAccessKey"} +} + +// GetObservation of this Domain +func (tr *Domain) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Domain +func (tr *Domain) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Domain +func (tr *Domain) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Domain +func (tr *Domain) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Domain +func (tr *Domain) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Domain +func (tr *Domain) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Domain using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Domain) LateInitialize(attrs []byte) (bool, error) { + params := &DomainParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Domain) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eventgrid/v1beta2/zz_domain_types.go b/apis/eventgrid/v1beta2/zz_domain_types.go new file mode 100755 index 000000000..8076b65ab --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_domain_types.go @@ -0,0 +1,394 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DomainInitParameters struct { + + // Whether to create the domain topic when the first event subscription at the scope of the domain topic is created. Defaults to true. + AutoCreateTopicWithFirstSubscription *bool `json:"autoCreateTopicWithFirstSubscription,omitempty" tf:"auto_create_topic_with_first_subscription,omitempty"` + + // Whether to delete the domain topic when the last event subscription at the scope of the domain topic is deleted. Defaults to true. + AutoDeleteTopicWithLastSubscription *bool `json:"autoDeleteTopicWithLastSubscription,omitempty" tf:"auto_delete_topic_with_last_subscription,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // One or more inbound_ip_rule blocks as defined below. + InboundIPRule []InboundIPRuleInitParameters `json:"inboundIpRule,omitempty" tf:"inbound_ip_rule,omitempty"` + + // A input_mapping_default_values block as defined below. Changing this forces a new resource to be created. + InputMappingDefaultValues *InputMappingDefaultValuesInitParameters `json:"inputMappingDefaultValues,omitempty" tf:"input_mapping_default_values,omitempty"` + + // A input_mapping_fields block as defined below. Changing this forces a new resource to be created. + InputMappingFields *InputMappingFieldsInitParameters `json:"inputMappingFields,omitempty" tf:"input_mapping_fields,omitempty"` + + // Specifies the schema in which incoming events will be published to this domain. Allowed values are CloudEventSchemaV1_0, CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + InputSchema *string `json:"inputSchema,omitempty" tf:"input_schema,omitempty"` + + // Whether local authentication methods is enabled for the EventGrid Domain. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DomainObservation struct { + + // Whether to create the domain topic when the first event subscription at the scope of the domain topic is created. Defaults to true. + AutoCreateTopicWithFirstSubscription *bool `json:"autoCreateTopicWithFirstSubscription,omitempty" tf:"auto_create_topic_with_first_subscription,omitempty"` + + // Whether to delete the domain topic when the last event subscription at the scope of the domain topic is deleted. Defaults to true. + AutoDeleteTopicWithLastSubscription *bool `json:"autoDeleteTopicWithLastSubscription,omitempty" tf:"auto_delete_topic_with_last_subscription,omitempty"` + + // The Endpoint associated with the EventGrid Domain. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The ID of the EventGrid Domain. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // One or more inbound_ip_rule blocks as defined below. + InboundIPRule []InboundIPRuleObservation `json:"inboundIpRule,omitempty" tf:"inbound_ip_rule,omitempty"` + + // A input_mapping_default_values block as defined below. Changing this forces a new resource to be created. + InputMappingDefaultValues *InputMappingDefaultValuesObservation `json:"inputMappingDefaultValues,omitempty" tf:"input_mapping_default_values,omitempty"` + + // A input_mapping_fields block as defined below. Changing this forces a new resource to be created. + InputMappingFields *InputMappingFieldsObservation `json:"inputMappingFields,omitempty" tf:"input_mapping_fields,omitempty"` + + // Specifies the schema in which incoming events will be published to this domain. Allowed values are CloudEventSchemaV1_0, CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + InputSchema *string `json:"inputSchema,omitempty" tf:"input_schema,omitempty"` + + // Whether local authentication methods is enabled for the EventGrid Domain. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which the EventGrid Domain exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DomainParameters struct { + + // Whether to create the domain topic when the first event subscription at the scope of the domain topic is created. Defaults to true. + // +kubebuilder:validation:Optional + AutoCreateTopicWithFirstSubscription *bool `json:"autoCreateTopicWithFirstSubscription,omitempty" tf:"auto_create_topic_with_first_subscription,omitempty"` + + // Whether to delete the domain topic when the last event subscription at the scope of the domain topic is deleted. Defaults to true. + // +kubebuilder:validation:Optional + AutoDeleteTopicWithLastSubscription *bool `json:"autoDeleteTopicWithLastSubscription,omitempty" tf:"auto_delete_topic_with_last_subscription,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // One or more inbound_ip_rule blocks as defined below. + // +kubebuilder:validation:Optional + InboundIPRule []InboundIPRuleParameters `json:"inboundIpRule,omitempty" tf:"inbound_ip_rule,omitempty"` + + // A input_mapping_default_values block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InputMappingDefaultValues *InputMappingDefaultValuesParameters `json:"inputMappingDefaultValues,omitempty" tf:"input_mapping_default_values,omitempty"` + + // A input_mapping_fields block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InputMappingFields *InputMappingFieldsParameters `json:"inputMappingFields,omitempty" tf:"input_mapping_fields,omitempty"` + + // Specifies the schema in which incoming events will be published to this domain. Allowed values are CloudEventSchemaV1_0, CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InputSchema *string `json:"inputSchema,omitempty" tf:"input_schema,omitempty"` + + // Whether local authentication methods is enabled for the EventGrid Domain. Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which the EventGrid Domain exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid Domain. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid Domain. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid Domain. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid Domain. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid Domain. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid Domain. Possible values are SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type InboundIPRuleInitParameters struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action"` + + // The IP mask (CIDR) to match on. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask"` +} + +type InboundIPRuleObservation struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The IP mask (CIDR) to match on. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask,omitempty"` +} + +type InboundIPRuleParameters struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The IP mask (CIDR) to match on. + // +kubebuilder:validation:Optional + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask"` +} + +type InputMappingDefaultValuesInitParameters struct { + + // Specifies the default data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the default event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the default subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` +} + +type InputMappingDefaultValuesObservation struct { + + // Specifies the default data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the default event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the default subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` +} + +type InputMappingDefaultValuesParameters struct { + + // Specifies the default data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the default event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the default subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` +} + +type InputMappingFieldsInitParameters struct { + + // Specifies the data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the event time of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventTime *string `json:"eventTime,omitempty" tf:"event_time,omitempty"` + + // Specifies the event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the id of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // Specifies the topic of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type InputMappingFieldsObservation struct { + + // Specifies the data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the event time of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventTime *string `json:"eventTime,omitempty" tf:"event_time,omitempty"` + + // Specifies the event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the id of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // Specifies the topic of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type InputMappingFieldsParameters struct { + + // Specifies the data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the event time of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EventTime *string `json:"eventTime,omitempty" tf:"event_time,omitempty"` + + // Specifies the event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the id of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // Specifies the topic of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +// DomainSpec defines the desired state of Domain +type DomainSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DomainParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DomainInitParameters `json:"initProvider,omitempty"` +} + +// DomainStatus defines the observed state of Domain. +type DomainStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DomainObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Domain is the Schema for the Domains API. Manages an EventGrid Domain +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Domain struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec DomainSpec `json:"spec"` + Status DomainStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainList contains a list of Domains +type DomainList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Domain `json:"items"` +} + +// Repository type metadata. +var ( + Domain_Kind = "Domain" + Domain_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Domain_Kind}.String() + Domain_KindAPIVersion = Domain_Kind + "." + CRDGroupVersion.String() + Domain_GroupVersionKind = CRDGroupVersion.WithKind(Domain_Kind) +) + +func init() { + SchemeBuilder.Register(&Domain{}, &DomainList{}) +} diff --git a/apis/eventgrid/v1beta2/zz_eventsubscription_terraformed.go b/apis/eventgrid/v1beta2/zz_eventsubscription_terraformed.go new file mode 100755 index 000000000..35e510bfb --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_eventsubscription_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EventSubscription +func (mg *EventSubscription) GetTerraformResourceType() string { + return "azurerm_eventgrid_event_subscription" +} + +// GetConnectionDetailsMapping for this EventSubscription +func (tr *EventSubscription) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"delivery_property[*].value": "spec.forProvider.deliveryProperty[*].valueSecretRef"} +} + +// GetObservation of this EventSubscription +func (tr *EventSubscription) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EventSubscription +func (tr *EventSubscription) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EventSubscription +func (tr *EventSubscription) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EventSubscription +func (tr *EventSubscription) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EventSubscription +func (tr *EventSubscription) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EventSubscription +func (tr *EventSubscription) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EventSubscription +func (tr *EventSubscription) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EventSubscription using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EventSubscription) LateInitialize(attrs []byte) (bool, error) { + params := &EventSubscriptionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EventSubscription) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eventgrid/v1beta2/zz_eventsubscription_types.go b/apis/eventgrid/v1beta2/zz_eventsubscription_types.go new file mode 100755 index 000000000..fed817af6 --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_eventsubscription_types.go @@ -0,0 +1,1433 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdvancedFilterInitParameters struct { + + // Compares a value of an event using a single boolean value. + BoolEquals []BoolEqualsInitParameters `json:"boolEquals,omitempty" tf:"bool_equals,omitempty"` + + // Evaluates if a value of an event isn't NULL or undefined. + IsNotNull []IsNotNullInitParameters `json:"isNotNull,omitempty" tf:"is_not_null,omitempty"` + + // Evaluates if a value of an event is NULL or undefined. + IsNullOrUndefined []IsNullOrUndefinedInitParameters `json:"isNullOrUndefined,omitempty" tf:"is_null_or_undefined,omitempty"` + + // Compares a value of an event using a single floating point number. + NumberGreaterThan []NumberGreaterThanInitParameters `json:"numberGreaterThan,omitempty" tf:"number_greater_than,omitempty"` + + // Compares a value of an event using a single floating point number. + NumberGreaterThanOrEquals []NumberGreaterThanOrEqualsInitParameters `json:"numberGreaterThanOrEquals,omitempty" tf:"number_greater_than_or_equals,omitempty"` + + // Compares a value of an event using multiple floating point numbers. + NumberIn []NumberInInitParameters `json:"numberIn,omitempty" tf:"number_in,omitempty"` + + // Compares a value of an event using multiple floating point number ranges. + NumberInRange []NumberInRangeInitParameters `json:"numberInRange,omitempty" tf:"number_in_range,omitempty"` + + // Compares a value of an event using a single floating point number. + NumberLessThan []NumberLessThanInitParameters `json:"numberLessThan,omitempty" tf:"number_less_than,omitempty"` + + // Compares a value of an event using a single floating point number. + NumberLessThanOrEquals []NumberLessThanOrEqualsInitParameters `json:"numberLessThanOrEquals,omitempty" tf:"number_less_than_or_equals,omitempty"` + + // Compares a value of an event using multiple floating point numbers. + NumberNotIn []NumberNotInInitParameters `json:"numberNotIn,omitempty" tf:"number_not_in,omitempty"` + + // Compares a value of an event using multiple floating point number ranges. + NumberNotInRange []NumberNotInRangeInitParameters `json:"numberNotInRange,omitempty" tf:"number_not_in_range,omitempty"` + + // Compares a value of an event using multiple string values. + StringBeginsWith []StringBeginsWithInitParameters `json:"stringBeginsWith,omitempty" tf:"string_begins_with,omitempty"` + + // Compares a value of an event using multiple string values. + StringContains []StringContainsInitParameters `json:"stringContains,omitempty" tf:"string_contains,omitempty"` + + // Compares a value of an event using multiple string values. + StringEndsWith []StringEndsWithInitParameters `json:"stringEndsWith,omitempty" tf:"string_ends_with,omitempty"` + + // Compares a value of an event using multiple string values. + StringIn []StringInInitParameters `json:"stringIn,omitempty" tf:"string_in,omitempty"` + + // Compares a value of an event using multiple string values. + StringNotBeginsWith []StringNotBeginsWithInitParameters `json:"stringNotBeginsWith,omitempty" tf:"string_not_begins_with,omitempty"` + + // Compares a value of an event using multiple string values. + StringNotContains []StringNotContainsInitParameters `json:"stringNotContains,omitempty" tf:"string_not_contains,omitempty"` + + // Compares a value of an event using multiple string values. + StringNotEndsWith []StringNotEndsWithInitParameters `json:"stringNotEndsWith,omitempty" tf:"string_not_ends_with,omitempty"` + + // Compares a value of an event using multiple string values. + StringNotIn []StringNotInInitParameters `json:"stringNotIn,omitempty" tf:"string_not_in,omitempty"` +} + +type AdvancedFilterObservation struct { + + // Compares a value of an event using a single boolean value. + BoolEquals []BoolEqualsObservation `json:"boolEquals,omitempty" tf:"bool_equals,omitempty"` + + // Evaluates if a value of an event isn't NULL or undefined. + IsNotNull []IsNotNullObservation `json:"isNotNull,omitempty" tf:"is_not_null,omitempty"` + + // Evaluates if a value of an event is NULL or undefined. + IsNullOrUndefined []IsNullOrUndefinedObservation `json:"isNullOrUndefined,omitempty" tf:"is_null_or_undefined,omitempty"` + + // Compares a value of an event using a single floating point number. + NumberGreaterThan []NumberGreaterThanObservation `json:"numberGreaterThan,omitempty" tf:"number_greater_than,omitempty"` + + // Compares a value of an event using a single floating point number. + NumberGreaterThanOrEquals []NumberGreaterThanOrEqualsObservation `json:"numberGreaterThanOrEquals,omitempty" tf:"number_greater_than_or_equals,omitempty"` + + // Compares a value of an event using multiple floating point numbers. + NumberIn []NumberInObservation `json:"numberIn,omitempty" tf:"number_in,omitempty"` + + // Compares a value of an event using multiple floating point number ranges. + NumberInRange []NumberInRangeObservation `json:"numberInRange,omitempty" tf:"number_in_range,omitempty"` + + // Compares a value of an event using a single floating point number. + NumberLessThan []NumberLessThanObservation `json:"numberLessThan,omitempty" tf:"number_less_than,omitempty"` + + // Compares a value of an event using a single floating point number. + NumberLessThanOrEquals []NumberLessThanOrEqualsObservation `json:"numberLessThanOrEquals,omitempty" tf:"number_less_than_or_equals,omitempty"` + + // Compares a value of an event using multiple floating point numbers. + NumberNotIn []NumberNotInObservation `json:"numberNotIn,omitempty" tf:"number_not_in,omitempty"` + + // Compares a value of an event using multiple floating point number ranges. + NumberNotInRange []NumberNotInRangeObservation `json:"numberNotInRange,omitempty" tf:"number_not_in_range,omitempty"` + + // Compares a value of an event using multiple string values. + StringBeginsWith []StringBeginsWithObservation `json:"stringBeginsWith,omitempty" tf:"string_begins_with,omitempty"` + + // Compares a value of an event using multiple string values. + StringContains []StringContainsObservation `json:"stringContains,omitempty" tf:"string_contains,omitempty"` + + // Compares a value of an event using multiple string values. + StringEndsWith []StringEndsWithObservation `json:"stringEndsWith,omitempty" tf:"string_ends_with,omitempty"` + + // Compares a value of an event using multiple string values. + StringIn []StringInObservation `json:"stringIn,omitempty" tf:"string_in,omitempty"` + + // Compares a value of an event using multiple string values. + StringNotBeginsWith []StringNotBeginsWithObservation `json:"stringNotBeginsWith,omitempty" tf:"string_not_begins_with,omitempty"` + + // Compares a value of an event using multiple string values. + StringNotContains []StringNotContainsObservation `json:"stringNotContains,omitempty" tf:"string_not_contains,omitempty"` + + // Compares a value of an event using multiple string values. + StringNotEndsWith []StringNotEndsWithObservation `json:"stringNotEndsWith,omitempty" tf:"string_not_ends_with,omitempty"` + + // Compares a value of an event using multiple string values. + StringNotIn []StringNotInObservation `json:"stringNotIn,omitempty" tf:"string_not_in,omitempty"` +} + +type AdvancedFilterParameters struct { + + // Compares a value of an event using a single boolean value. + // +kubebuilder:validation:Optional + BoolEquals []BoolEqualsParameters `json:"boolEquals,omitempty" tf:"bool_equals,omitempty"` + + // Evaluates if a value of an event isn't NULL or undefined. + // +kubebuilder:validation:Optional + IsNotNull []IsNotNullParameters `json:"isNotNull,omitempty" tf:"is_not_null,omitempty"` + + // Evaluates if a value of an event is NULL or undefined. + // +kubebuilder:validation:Optional + IsNullOrUndefined []IsNullOrUndefinedParameters `json:"isNullOrUndefined,omitempty" tf:"is_null_or_undefined,omitempty"` + + // Compares a value of an event using a single floating point number. + // +kubebuilder:validation:Optional + NumberGreaterThan []NumberGreaterThanParameters `json:"numberGreaterThan,omitempty" tf:"number_greater_than,omitempty"` + + // Compares a value of an event using a single floating point number. + // +kubebuilder:validation:Optional + NumberGreaterThanOrEquals []NumberGreaterThanOrEqualsParameters `json:"numberGreaterThanOrEquals,omitempty" tf:"number_greater_than_or_equals,omitempty"` + + // Compares a value of an event using multiple floating point numbers. + // +kubebuilder:validation:Optional + NumberIn []NumberInParameters `json:"numberIn,omitempty" tf:"number_in,omitempty"` + + // Compares a value of an event using multiple floating point number ranges. + // +kubebuilder:validation:Optional + NumberInRange []NumberInRangeParameters `json:"numberInRange,omitempty" tf:"number_in_range,omitempty"` + + // Compares a value of an event using a single floating point number. + // +kubebuilder:validation:Optional + NumberLessThan []NumberLessThanParameters `json:"numberLessThan,omitempty" tf:"number_less_than,omitempty"` + + // Compares a value of an event using a single floating point number. + // +kubebuilder:validation:Optional + NumberLessThanOrEquals []NumberLessThanOrEqualsParameters `json:"numberLessThanOrEquals,omitempty" tf:"number_less_than_or_equals,omitempty"` + + // Compares a value of an event using multiple floating point numbers. + // +kubebuilder:validation:Optional + NumberNotIn []NumberNotInParameters `json:"numberNotIn,omitempty" tf:"number_not_in,omitempty"` + + // Compares a value of an event using multiple floating point number ranges. + // +kubebuilder:validation:Optional + NumberNotInRange []NumberNotInRangeParameters `json:"numberNotInRange,omitempty" tf:"number_not_in_range,omitempty"` + + // Compares a value of an event using multiple string values. + // +kubebuilder:validation:Optional + StringBeginsWith []StringBeginsWithParameters `json:"stringBeginsWith,omitempty" tf:"string_begins_with,omitempty"` + + // Compares a value of an event using multiple string values. + // +kubebuilder:validation:Optional + StringContains []StringContainsParameters `json:"stringContains,omitempty" tf:"string_contains,omitempty"` + + // Compares a value of an event using multiple string values. + // +kubebuilder:validation:Optional + StringEndsWith []StringEndsWithParameters `json:"stringEndsWith,omitempty" tf:"string_ends_with,omitempty"` + + // Compares a value of an event using multiple string values. + // +kubebuilder:validation:Optional + StringIn []StringInParameters `json:"stringIn,omitempty" tf:"string_in,omitempty"` + + // Compares a value of an event using multiple string values. + // +kubebuilder:validation:Optional + StringNotBeginsWith []StringNotBeginsWithParameters `json:"stringNotBeginsWith,omitempty" tf:"string_not_begins_with,omitempty"` + + // Compares a value of an event using multiple string values. + // +kubebuilder:validation:Optional + StringNotContains []StringNotContainsParameters `json:"stringNotContains,omitempty" tf:"string_not_contains,omitempty"` + + // Compares a value of an event using multiple string values. + // +kubebuilder:validation:Optional + StringNotEndsWith []StringNotEndsWithParameters `json:"stringNotEndsWith,omitempty" tf:"string_not_ends_with,omitempty"` + + // Compares a value of an event using multiple string values. + // +kubebuilder:validation:Optional + StringNotIn []StringNotInParameters `json:"stringNotIn,omitempty" tf:"string_not_in,omitempty"` +} + +type AzureFunctionEndpointInitParameters struct { + + // Specifies the ID of the Function where the Event Subscription will receive events. This must be the functions ID in format {function_app.id}/functions/{name}. + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + // Maximum number of events per batch. + MaxEventsPerBatch *float64 `json:"maxEventsPerBatch,omitempty" tf:"max_events_per_batch,omitempty"` + + // Preferred batch size in Kilobytes. + PreferredBatchSizeInKilobytes *float64 `json:"preferredBatchSizeInKilobytes,omitempty" tf:"preferred_batch_size_in_kilobytes,omitempty"` +} + +type AzureFunctionEndpointObservation struct { + + // Specifies the ID of the Function where the Event Subscription will receive events. This must be the functions ID in format {function_app.id}/functions/{name}. + FunctionID *string `json:"functionId,omitempty" tf:"function_id,omitempty"` + + // Maximum number of events per batch. + MaxEventsPerBatch *float64 `json:"maxEventsPerBatch,omitempty" tf:"max_events_per_batch,omitempty"` + + // Preferred batch size in Kilobytes. + PreferredBatchSizeInKilobytes *float64 `json:"preferredBatchSizeInKilobytes,omitempty" tf:"preferred_batch_size_in_kilobytes,omitempty"` +} + +type AzureFunctionEndpointParameters struct { + + // Specifies the ID of the Function where the Event Subscription will receive events. This must be the functions ID in format {function_app.id}/functions/{name}. + // +kubebuilder:validation:Optional + FunctionID *string `json:"functionId" tf:"function_id,omitempty"` + + // Maximum number of events per batch. + // +kubebuilder:validation:Optional + MaxEventsPerBatch *float64 `json:"maxEventsPerBatch,omitempty" tf:"max_events_per_batch,omitempty"` + + // Preferred batch size in Kilobytes. + // +kubebuilder:validation:Optional + PreferredBatchSizeInKilobytes *float64 `json:"preferredBatchSizeInKilobytes,omitempty" tf:"preferred_batch_size_in_kilobytes,omitempty"` +} + +type BoolEqualsInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *bool `json:"value,omitempty" tf:"value,omitempty"` +} + +type BoolEqualsObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *bool `json:"value,omitempty" tf:"value,omitempty"` +} + +type BoolEqualsParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + // +kubebuilder:validation:Optional + Value *bool `json:"value" tf:"value,omitempty"` +} + +type DeadLetterIdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that is used for dead lettering. Allowed value is SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The user identity associated with the resource. + UserAssignedIdentity *string `json:"userAssignedIdentity,omitempty" tf:"user_assigned_identity,omitempty"` +} + +type DeadLetterIdentityObservation struct { + + // Specifies the type of Managed Service Identity that is used for dead lettering. Allowed value is SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The user identity associated with the resource. + UserAssignedIdentity *string `json:"userAssignedIdentity,omitempty" tf:"user_assigned_identity,omitempty"` +} + +type DeadLetterIdentityParameters struct { + + // Specifies the type of Managed Service Identity that is used for dead lettering. Allowed value is SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The user identity associated with the resource. + // +kubebuilder:validation:Optional + UserAssignedIdentity *string `json:"userAssignedIdentity,omitempty" tf:"user_assigned_identity,omitempty"` +} + +type DeliveryIdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that is used for event delivery. Allowed value is SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The user identity associated with the resource. + UserAssignedIdentity *string `json:"userAssignedIdentity,omitempty" tf:"user_assigned_identity,omitempty"` +} + +type DeliveryIdentityObservation struct { + + // Specifies the type of Managed Service Identity that is used for event delivery. Allowed value is SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The user identity associated with the resource. + UserAssignedIdentity *string `json:"userAssignedIdentity,omitempty" tf:"user_assigned_identity,omitempty"` +} + +type DeliveryIdentityParameters struct { + + // Specifies the type of Managed Service Identity that is used for event delivery. Allowed value is SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The user identity associated with the resource. + // +kubebuilder:validation:Optional + UserAssignedIdentity *string `json:"userAssignedIdentity,omitempty" tf:"user_assigned_identity,omitempty"` +} + +type DeliveryPropertyInitParameters struct { + + // The name of the header to send on to the destination + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // True if the value is a secret and should be protected, otherwise false. If True, then this value won't be returned from Azure API calls + Secret *bool `json:"secret,omitempty" tf:"secret,omitempty"` + + // If the type is Dynamic, then provide the payload field to be used as the value. Valid source fields differ by subscription type. + SourceField *string `json:"sourceField,omitempty" tf:"source_field,omitempty"` + + // Either Static or Dynamic + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DeliveryPropertyObservation struct { + + // The name of the header to send on to the destination + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // True if the value is a secret and should be protected, otherwise false. If True, then this value won't be returned from Azure API calls + Secret *bool `json:"secret,omitempty" tf:"secret,omitempty"` + + // If the type is Dynamic, then provide the payload field to be used as the value. Valid source fields differ by subscription type. + SourceField *string `json:"sourceField,omitempty" tf:"source_field,omitempty"` + + // Either Static or Dynamic + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type DeliveryPropertyParameters struct { + + // The name of the header to send on to the destination + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName" tf:"header_name,omitempty"` + + // True if the value is a secret and should be protected, otherwise false. If True, then this value won't be returned from Azure API calls + // +kubebuilder:validation:Optional + Secret *bool `json:"secret,omitempty" tf:"secret,omitempty"` + + // If the type is Dynamic, then provide the payload field to be used as the value. Valid source fields differ by subscription type. + // +kubebuilder:validation:Optional + SourceField *string `json:"sourceField,omitempty" tf:"source_field,omitempty"` + + // Either Static or Dynamic + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // If the type is Static, then provide the value to use + // +kubebuilder:validation:Optional + ValueSecretRef *v1.SecretKeySelector `json:"valueSecretRef,omitempty" tf:"-"` +} + +type EventSubscriptionInitParameters struct { + + // A advanced_filter block as defined below. + AdvancedFilter *AdvancedFilterInitParameters `json:"advancedFilter,omitempty" tf:"advanced_filter,omitempty"` + + // Specifies whether advanced filters should be evaluated against an array of values instead of expecting a singular value. Defaults to false. + AdvancedFilteringOnArraysEnabled *bool `json:"advancedFilteringOnArraysEnabled,omitempty" tf:"advanced_filtering_on_arrays_enabled,omitempty"` + + // An azure_function_endpoint block as defined below. + AzureFunctionEndpoint *AzureFunctionEndpointInitParameters `json:"azureFunctionEndpoint,omitempty" tf:"azure_function_endpoint,omitempty"` + + // A dead_letter_identity block as defined below. + DeadLetterIdentity *DeadLetterIdentityInitParameters `json:"deadLetterIdentity,omitempty" tf:"dead_letter_identity,omitempty"` + + // A delivery_identity block as defined below. + DeliveryIdentity *DeliveryIdentityInitParameters `json:"deliveryIdentity,omitempty" tf:"delivery_identity,omitempty"` + + // One or more delivery_property blocks as defined below. + DeliveryProperty []DeliveryPropertyInitParameters `json:"deliveryProperty,omitempty" tf:"delivery_property,omitempty"` + + // Specifies the event delivery schema for the event subscription. Possible values include: EventGridSchema, CloudEventSchemaV1_0, CustomInputSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + EventDeliverySchema *string `json:"eventDeliverySchema,omitempty" tf:"event_delivery_schema,omitempty"` + + // Specifies the id where the Event Hub is located. + EventHubEndpointID *string `json:"eventhubEndpointId,omitempty" tf:"eventhub_endpoint_id,omitempty"` + + // Specifies the expiration time of the event subscription (Datetime Format RFC 3339). + ExpirationTimeUtc *string `json:"expirationTimeUtc,omitempty" tf:"expiration_time_utc,omitempty"` + + // Specifies the id where the Hybrid Connection is located. + HybridConnectionEndpointID *string `json:"hybridConnectionEndpointId,omitempty" tf:"hybrid_connection_endpoint_id,omitempty"` + + // A list of applicable event types that need to be part of the event subscription. + IncludedEventTypes []*string `json:"includedEventTypes,omitempty" tf:"included_event_types,omitempty"` + + // A list of labels to assign to the event subscription. + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Specifies the name of the EventGrid Event Subscription resource. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A retry_policy block as defined below. + RetryPolicy *RetryPolicyInitParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Specifies the scope at which the EventGrid Event Subscription should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Reference to a ResourceGroup in azure to populate scope. + // +kubebuilder:validation:Optional + ScopeRef *v1.Reference `json:"scopeRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate scope. + // +kubebuilder:validation:Optional + ScopeSelector *v1.Selector `json:"scopeSelector,omitempty" tf:"-"` + + // Specifies the id where the Service Bus Queue is located. + ServiceBusQueueEndpointID *string `json:"serviceBusQueueEndpointId,omitempty" tf:"service_bus_queue_endpoint_id,omitempty"` + + // Specifies the id where the Service Bus Topic is located. + ServiceBusTopicEndpointID *string `json:"serviceBusTopicEndpointId,omitempty" tf:"service_bus_topic_endpoint_id,omitempty"` + + // A storage_blob_dead_letter_destination block as defined below. + StorageBlobDeadLetterDestination *StorageBlobDeadLetterDestinationInitParameters `json:"storageBlobDeadLetterDestination,omitempty" tf:"storage_blob_dead_letter_destination,omitempty"` + + // A storage_queue_endpoint block as defined below. + StorageQueueEndpoint *StorageQueueEndpointInitParameters `json:"storageQueueEndpoint,omitempty" tf:"storage_queue_endpoint,omitempty"` + + // A subject_filter block as defined below. + SubjectFilter *SubjectFilterInitParameters `json:"subjectFilter,omitempty" tf:"subject_filter,omitempty"` + + // A webhook_endpoint block as defined below. + WebhookEndpoint *WebhookEndpointInitParameters `json:"webhookEndpoint,omitempty" tf:"webhook_endpoint,omitempty"` +} + +type EventSubscriptionObservation struct { + + // A advanced_filter block as defined below. + AdvancedFilter *AdvancedFilterObservation `json:"advancedFilter,omitempty" tf:"advanced_filter,omitempty"` + + // Specifies whether advanced filters should be evaluated against an array of values instead of expecting a singular value. Defaults to false. + AdvancedFilteringOnArraysEnabled *bool `json:"advancedFilteringOnArraysEnabled,omitempty" tf:"advanced_filtering_on_arrays_enabled,omitempty"` + + // An azure_function_endpoint block as defined below. + AzureFunctionEndpoint *AzureFunctionEndpointObservation `json:"azureFunctionEndpoint,omitempty" tf:"azure_function_endpoint,omitempty"` + + // A dead_letter_identity block as defined below. + DeadLetterIdentity *DeadLetterIdentityObservation `json:"deadLetterIdentity,omitempty" tf:"dead_letter_identity,omitempty"` + + // A delivery_identity block as defined below. + DeliveryIdentity *DeliveryIdentityObservation `json:"deliveryIdentity,omitempty" tf:"delivery_identity,omitempty"` + + // One or more delivery_property blocks as defined below. + DeliveryProperty []DeliveryPropertyObservation `json:"deliveryProperty,omitempty" tf:"delivery_property,omitempty"` + + // Specifies the event delivery schema for the event subscription. Possible values include: EventGridSchema, CloudEventSchemaV1_0, CustomInputSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + EventDeliverySchema *string `json:"eventDeliverySchema,omitempty" tf:"event_delivery_schema,omitempty"` + + // Specifies the id where the Event Hub is located. + EventHubEndpointID *string `json:"eventhubEndpointId,omitempty" tf:"eventhub_endpoint_id,omitempty"` + + // Specifies the expiration time of the event subscription (Datetime Format RFC 3339). + ExpirationTimeUtc *string `json:"expirationTimeUtc,omitempty" tf:"expiration_time_utc,omitempty"` + + // Specifies the id where the Hybrid Connection is located. + HybridConnectionEndpointID *string `json:"hybridConnectionEndpointId,omitempty" tf:"hybrid_connection_endpoint_id,omitempty"` + + // The ID of the EventGrid Event Subscription. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of applicable event types that need to be part of the event subscription. + IncludedEventTypes []*string `json:"includedEventTypes,omitempty" tf:"included_event_types,omitempty"` + + // A list of labels to assign to the event subscription. + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Specifies the name of the EventGrid Event Subscription resource. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A retry_policy block as defined below. + RetryPolicy *RetryPolicyObservation `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Specifies the scope at which the EventGrid Event Subscription should be created. Changing this forces a new resource to be created. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Specifies the id where the Service Bus Queue is located. + ServiceBusQueueEndpointID *string `json:"serviceBusQueueEndpointId,omitempty" tf:"service_bus_queue_endpoint_id,omitempty"` + + // Specifies the id where the Service Bus Topic is located. + ServiceBusTopicEndpointID *string `json:"serviceBusTopicEndpointId,omitempty" tf:"service_bus_topic_endpoint_id,omitempty"` + + // A storage_blob_dead_letter_destination block as defined below. + StorageBlobDeadLetterDestination *StorageBlobDeadLetterDestinationObservation `json:"storageBlobDeadLetterDestination,omitempty" tf:"storage_blob_dead_letter_destination,omitempty"` + + // A storage_queue_endpoint block as defined below. + StorageQueueEndpoint *StorageQueueEndpointObservation `json:"storageQueueEndpoint,omitempty" tf:"storage_queue_endpoint,omitempty"` + + // A subject_filter block as defined below. + SubjectFilter *SubjectFilterObservation `json:"subjectFilter,omitempty" tf:"subject_filter,omitempty"` + + // A webhook_endpoint block as defined below. + WebhookEndpoint *WebhookEndpointObservation `json:"webhookEndpoint,omitempty" tf:"webhook_endpoint,omitempty"` +} + +type EventSubscriptionParameters struct { + + // A advanced_filter block as defined below. + // +kubebuilder:validation:Optional + AdvancedFilter *AdvancedFilterParameters `json:"advancedFilter,omitempty" tf:"advanced_filter,omitempty"` + + // Specifies whether advanced filters should be evaluated against an array of values instead of expecting a singular value. Defaults to false. + // +kubebuilder:validation:Optional + AdvancedFilteringOnArraysEnabled *bool `json:"advancedFilteringOnArraysEnabled,omitempty" tf:"advanced_filtering_on_arrays_enabled,omitempty"` + + // An azure_function_endpoint block as defined below. + // +kubebuilder:validation:Optional + AzureFunctionEndpoint *AzureFunctionEndpointParameters `json:"azureFunctionEndpoint,omitempty" tf:"azure_function_endpoint,omitempty"` + + // A dead_letter_identity block as defined below. + // +kubebuilder:validation:Optional + DeadLetterIdentity *DeadLetterIdentityParameters `json:"deadLetterIdentity,omitempty" tf:"dead_letter_identity,omitempty"` + + // A delivery_identity block as defined below. + // +kubebuilder:validation:Optional + DeliveryIdentity *DeliveryIdentityParameters `json:"deliveryIdentity,omitempty" tf:"delivery_identity,omitempty"` + + // One or more delivery_property blocks as defined below. + // +kubebuilder:validation:Optional + DeliveryProperty []DeliveryPropertyParameters `json:"deliveryProperty,omitempty" tf:"delivery_property,omitempty"` + + // Specifies the event delivery schema for the event subscription. Possible values include: EventGridSchema, CloudEventSchemaV1_0, CustomInputSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EventDeliverySchema *string `json:"eventDeliverySchema,omitempty" tf:"event_delivery_schema,omitempty"` + + // Specifies the id where the Event Hub is located. + // +kubebuilder:validation:Optional + EventHubEndpointID *string `json:"eventhubEndpointId,omitempty" tf:"eventhub_endpoint_id,omitempty"` + + // Specifies the expiration time of the event subscription (Datetime Format RFC 3339). + // +kubebuilder:validation:Optional + ExpirationTimeUtc *string `json:"expirationTimeUtc,omitempty" tf:"expiration_time_utc,omitempty"` + + // Specifies the id where the Hybrid Connection is located. + // +kubebuilder:validation:Optional + HybridConnectionEndpointID *string `json:"hybridConnectionEndpointId,omitempty" tf:"hybrid_connection_endpoint_id,omitempty"` + + // A list of applicable event types that need to be part of the event subscription. + // +kubebuilder:validation:Optional + IncludedEventTypes []*string `json:"includedEventTypes,omitempty" tf:"included_event_types,omitempty"` + + // A list of labels to assign to the event subscription. + // +kubebuilder:validation:Optional + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Specifies the name of the EventGrid Event Subscription resource. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A retry_policy block as defined below. + // +kubebuilder:validation:Optional + RetryPolicy *RetryPolicyParameters `json:"retryPolicy,omitempty" tf:"retry_policy,omitempty"` + + // Specifies the scope at which the EventGrid Event Subscription should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Reference to a ResourceGroup in azure to populate scope. + // +kubebuilder:validation:Optional + ScopeRef *v1.Reference `json:"scopeRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate scope. + // +kubebuilder:validation:Optional + ScopeSelector *v1.Selector `json:"scopeSelector,omitempty" tf:"-"` + + // Specifies the id where the Service Bus Queue is located. + // +kubebuilder:validation:Optional + ServiceBusQueueEndpointID *string `json:"serviceBusQueueEndpointId,omitempty" tf:"service_bus_queue_endpoint_id,omitempty"` + + // Specifies the id where the Service Bus Topic is located. + // +kubebuilder:validation:Optional + ServiceBusTopicEndpointID *string `json:"serviceBusTopicEndpointId,omitempty" tf:"service_bus_topic_endpoint_id,omitempty"` + + // A storage_blob_dead_letter_destination block as defined below. + // +kubebuilder:validation:Optional + StorageBlobDeadLetterDestination *StorageBlobDeadLetterDestinationParameters `json:"storageBlobDeadLetterDestination,omitempty" tf:"storage_blob_dead_letter_destination,omitempty"` + + // A storage_queue_endpoint block as defined below. + // +kubebuilder:validation:Optional + StorageQueueEndpoint *StorageQueueEndpointParameters `json:"storageQueueEndpoint,omitempty" tf:"storage_queue_endpoint,omitempty"` + + // A subject_filter block as defined below. + // +kubebuilder:validation:Optional + SubjectFilter *SubjectFilterParameters `json:"subjectFilter,omitempty" tf:"subject_filter,omitempty"` + + // A webhook_endpoint block as defined below. + // +kubebuilder:validation:Optional + WebhookEndpoint *WebhookEndpointParameters `json:"webhookEndpoint,omitempty" tf:"webhook_endpoint,omitempty"` +} + +type IsNotNullInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type IsNotNullObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type IsNotNullParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +type IsNullOrUndefinedInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type IsNullOrUndefinedObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type IsNullOrUndefinedParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +type NumberGreaterThanInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NumberGreaterThanObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NumberGreaterThanOrEqualsInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NumberGreaterThanOrEqualsObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NumberGreaterThanOrEqualsParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type NumberGreaterThanParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type NumberInInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*float64 `json:"values,omitempty" tf:"values,omitempty"` +} + +type NumberInObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*float64 `json:"values,omitempty" tf:"values,omitempty"` +} + +type NumberInParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*float64 `json:"values" tf:"values,omitempty"` +} + +type NumberInRangeInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values [][]*float64 `json:"values,omitempty" tf:"values,omitempty"` +} + +type NumberInRangeObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values [][]*float64 `json:"values,omitempty" tf:"values,omitempty"` +} + +type NumberInRangeParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values [][]*float64 `json:"values" tf:"values,omitempty"` +} + +type NumberLessThanInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NumberLessThanObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NumberLessThanOrEqualsInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NumberLessThanOrEqualsObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type NumberLessThanOrEqualsParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type NumberLessThanParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies a single value to compare to when using a single value operator. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type NumberNotInInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*float64 `json:"values,omitempty" tf:"values,omitempty"` +} + +type NumberNotInObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*float64 `json:"values,omitempty" tf:"values,omitempty"` +} + +type NumberNotInParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*float64 `json:"values" tf:"values,omitempty"` +} + +type NumberNotInRangeInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values [][]*float64 `json:"values,omitempty" tf:"values,omitempty"` +} + +type NumberNotInRangeObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values [][]*float64 `json:"values,omitempty" tf:"values,omitempty"` +} + +type NumberNotInRangeParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values [][]*float64 `json:"values" tf:"values,omitempty"` +} + +type RetryPolicyInitParameters struct { + + // Specifies the time to live (in minutes) for events. Supported range is 1 to 1440. See official documentation for more details. + EventTimeToLive *float64 `json:"eventTimeToLive,omitempty" tf:"event_time_to_live,omitempty"` + + // Specifies the maximum number of delivery retry attempts for events. + MaxDeliveryAttempts *float64 `json:"maxDeliveryAttempts,omitempty" tf:"max_delivery_attempts,omitempty"` +} + +type RetryPolicyObservation struct { + + // Specifies the time to live (in minutes) for events. Supported range is 1 to 1440. See official documentation for more details. + EventTimeToLive *float64 `json:"eventTimeToLive,omitempty" tf:"event_time_to_live,omitempty"` + + // Specifies the maximum number of delivery retry attempts for events. + MaxDeliveryAttempts *float64 `json:"maxDeliveryAttempts,omitempty" tf:"max_delivery_attempts,omitempty"` +} + +type RetryPolicyParameters struct { + + // Specifies the time to live (in minutes) for events. Supported range is 1 to 1440. See official documentation for more details. + // +kubebuilder:validation:Optional + EventTimeToLive *float64 `json:"eventTimeToLive" tf:"event_time_to_live,omitempty"` + + // Specifies the maximum number of delivery retry attempts for events. + // +kubebuilder:validation:Optional + MaxDeliveryAttempts *float64 `json:"maxDeliveryAttempts" tf:"max_delivery_attempts,omitempty"` +} + +type StorageBlobDeadLetterDestinationInitParameters struct { + + // Specifies the id of the storage account id where the storage blob is located. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Specifies the name of the Storage blob container that is the destination of the deadletter events. + StorageBlobContainerName *string `json:"storageBlobContainerName,omitempty" tf:"storage_blob_container_name,omitempty"` +} + +type StorageBlobDeadLetterDestinationObservation struct { + + // Specifies the id of the storage account id where the storage blob is located. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Specifies the name of the Storage blob container that is the destination of the deadletter events. + StorageBlobContainerName *string `json:"storageBlobContainerName,omitempty" tf:"storage_blob_container_name,omitempty"` +} + +type StorageBlobDeadLetterDestinationParameters struct { + + // Specifies the id of the storage account id where the storage blob is located. + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId" tf:"storage_account_id,omitempty"` + + // Specifies the name of the Storage blob container that is the destination of the deadletter events. + // +kubebuilder:validation:Optional + StorageBlobContainerName *string `json:"storageBlobContainerName" tf:"storage_blob_container_name,omitempty"` +} + +type StorageQueueEndpointInitParameters struct { + + // Storage queue message time to live in seconds. + QueueMessageTimeToLiveInSeconds *float64 `json:"queueMessageTimeToLiveInSeconds,omitempty" tf:"queue_message_time_to_live_in_seconds,omitempty"` + + // Specifies the name of the storage queue where the Event Subscription will receive events. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Queue + QueueName *string `json:"queueName,omitempty" tf:"queue_name,omitempty"` + + // Reference to a Queue in storage to populate queueName. + // +kubebuilder:validation:Optional + QueueNameRef *v1.Reference `json:"queueNameRef,omitempty" tf:"-"` + + // Selector for a Queue in storage to populate queueName. + // +kubebuilder:validation:Optional + QueueNameSelector *v1.Selector `json:"queueNameSelector,omitempty" tf:"-"` + + // Specifies the id of the storage account id where the storage queue is located. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type StorageQueueEndpointObservation struct { + + // Storage queue message time to live in seconds. + QueueMessageTimeToLiveInSeconds *float64 `json:"queueMessageTimeToLiveInSeconds,omitempty" tf:"queue_message_time_to_live_in_seconds,omitempty"` + + // Specifies the name of the storage queue where the Event Subscription will receive events. + QueueName *string `json:"queueName,omitempty" tf:"queue_name,omitempty"` + + // Specifies the id of the storage account id where the storage queue is located. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type StorageQueueEndpointParameters struct { + + // Storage queue message time to live in seconds. + // +kubebuilder:validation:Optional + QueueMessageTimeToLiveInSeconds *float64 `json:"queueMessageTimeToLiveInSeconds,omitempty" tf:"queue_message_time_to_live_in_seconds,omitempty"` + + // Specifies the name of the storage queue where the Event Subscription will receive events. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Queue + // +kubebuilder:validation:Optional + QueueName *string `json:"queueName,omitempty" tf:"queue_name,omitempty"` + + // Reference to a Queue in storage to populate queueName. + // +kubebuilder:validation:Optional + QueueNameRef *v1.Reference `json:"queueNameRef,omitempty" tf:"-"` + + // Selector for a Queue in storage to populate queueName. + // +kubebuilder:validation:Optional + QueueNameSelector *v1.Selector `json:"queueNameSelector,omitempty" tf:"-"` + + // Specifies the id of the storage account id where the storage queue is located. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type StringBeginsWithInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringBeginsWithObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringBeginsWithParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type StringContainsInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringContainsObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringContainsParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type StringEndsWithInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringEndsWithObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringEndsWithParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type StringInInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringInObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringInParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type StringNotBeginsWithInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringNotBeginsWithObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringNotBeginsWithParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type StringNotContainsInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringNotContainsObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringNotContainsParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type StringNotEndsWithInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringNotEndsWithObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringNotEndsWithParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type StringNotInInitParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringNotInObservation struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type StringNotInParameters struct { + + // Specifies the field within the event data that you want to use for filtering. Type of the field can be a number, boolean, or string. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` + + // Specifies an array of values to compare to when using a multiple values operator. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type SubjectFilterInitParameters struct { + + // Specifies if subject_begins_with and subject_ends_with case sensitive. This value + CaseSensitive *bool `json:"caseSensitive,omitempty" tf:"case_sensitive,omitempty"` + + // A string to filter events for an event subscription based on a resource path prefix. + SubjectBeginsWith *string `json:"subjectBeginsWith,omitempty" tf:"subject_begins_with,omitempty"` + + // A string to filter events for an event subscription based on a resource path suffix. + SubjectEndsWith *string `json:"subjectEndsWith,omitempty" tf:"subject_ends_with,omitempty"` +} + +type SubjectFilterObservation struct { + + // Specifies if subject_begins_with and subject_ends_with case sensitive. This value + CaseSensitive *bool `json:"caseSensitive,omitempty" tf:"case_sensitive,omitempty"` + + // A string to filter events for an event subscription based on a resource path prefix. + SubjectBeginsWith *string `json:"subjectBeginsWith,omitempty" tf:"subject_begins_with,omitempty"` + + // A string to filter events for an event subscription based on a resource path suffix. + SubjectEndsWith *string `json:"subjectEndsWith,omitempty" tf:"subject_ends_with,omitempty"` +} + +type SubjectFilterParameters struct { + + // Specifies if subject_begins_with and subject_ends_with case sensitive. This value + // +kubebuilder:validation:Optional + CaseSensitive *bool `json:"caseSensitive,omitempty" tf:"case_sensitive,omitempty"` + + // A string to filter events for an event subscription based on a resource path prefix. + // +kubebuilder:validation:Optional + SubjectBeginsWith *string `json:"subjectBeginsWith,omitempty" tf:"subject_begins_with,omitempty"` + + // A string to filter events for an event subscription based on a resource path suffix. + // +kubebuilder:validation:Optional + SubjectEndsWith *string `json:"subjectEndsWith,omitempty" tf:"subject_ends_with,omitempty"` +} + +type WebhookEndpointInitParameters struct { + + // The Azure Active Directory Application ID or URI to get the access token that will be included as the bearer token in delivery requests. + ActiveDirectoryAppIDOrURI *string `json:"activeDirectoryAppIdOrUri,omitempty" tf:"active_directory_app_id_or_uri,omitempty"` + + // The Azure Active Directory Tenant ID to get the access token that will be included as the bearer token in delivery requests. + ActiveDirectoryTenantID *string `json:"activeDirectoryTenantId,omitempty" tf:"active_directory_tenant_id,omitempty"` + + // Maximum number of events per batch. + MaxEventsPerBatch *float64 `json:"maxEventsPerBatch,omitempty" tf:"max_events_per_batch,omitempty"` + + // Preferred batch size in Kilobytes. + PreferredBatchSizeInKilobytes *float64 `json:"preferredBatchSizeInKilobytes,omitempty" tf:"preferred_batch_size_in_kilobytes,omitempty"` + + // Specifies the url of the webhook where the Event Subscription will receive events. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WebhookEndpointObservation struct { + + // The Azure Active Directory Application ID or URI to get the access token that will be included as the bearer token in delivery requests. + ActiveDirectoryAppIDOrURI *string `json:"activeDirectoryAppIdOrUri,omitempty" tf:"active_directory_app_id_or_uri,omitempty"` + + // The Azure Active Directory Tenant ID to get the access token that will be included as the bearer token in delivery requests. + ActiveDirectoryTenantID *string `json:"activeDirectoryTenantId,omitempty" tf:"active_directory_tenant_id,omitempty"` + + // (Computed) The base url of the webhook where the Event Subscription will receive events. + BaseURL *string `json:"baseUrl,omitempty" tf:"base_url,omitempty"` + + // Maximum number of events per batch. + MaxEventsPerBatch *float64 `json:"maxEventsPerBatch,omitempty" tf:"max_events_per_batch,omitempty"` + + // Preferred batch size in Kilobytes. + PreferredBatchSizeInKilobytes *float64 `json:"preferredBatchSizeInKilobytes,omitempty" tf:"preferred_batch_size_in_kilobytes,omitempty"` + + // Specifies the url of the webhook where the Event Subscription will receive events. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type WebhookEndpointParameters struct { + + // The Azure Active Directory Application ID or URI to get the access token that will be included as the bearer token in delivery requests. + // +kubebuilder:validation:Optional + ActiveDirectoryAppIDOrURI *string `json:"activeDirectoryAppIdOrUri,omitempty" tf:"active_directory_app_id_or_uri,omitempty"` + + // The Azure Active Directory Tenant ID to get the access token that will be included as the bearer token in delivery requests. + // +kubebuilder:validation:Optional + ActiveDirectoryTenantID *string `json:"activeDirectoryTenantId,omitempty" tf:"active_directory_tenant_id,omitempty"` + + // Maximum number of events per batch. + // +kubebuilder:validation:Optional + MaxEventsPerBatch *float64 `json:"maxEventsPerBatch,omitempty" tf:"max_events_per_batch,omitempty"` + + // Preferred batch size in Kilobytes. + // +kubebuilder:validation:Optional + PreferredBatchSizeInKilobytes *float64 `json:"preferredBatchSizeInKilobytes,omitempty" tf:"preferred_batch_size_in_kilobytes,omitempty"` + + // Specifies the url of the webhook where the Event Subscription will receive events. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +// EventSubscriptionSpec defines the desired state of EventSubscription +type EventSubscriptionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EventSubscriptionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EventSubscriptionInitParameters `json:"initProvider,omitempty"` +} + +// EventSubscriptionStatus defines the observed state of EventSubscription. +type EventSubscriptionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EventSubscriptionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EventSubscription is the Schema for the EventSubscriptions API. Manages an EventGrid Event Subscription +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type EventSubscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec EventSubscriptionSpec `json:"spec"` + Status EventSubscriptionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EventSubscriptionList contains a list of EventSubscriptions +type EventSubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EventSubscription `json:"items"` +} + +// Repository type metadata. +var ( + EventSubscription_Kind = "EventSubscription" + EventSubscription_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EventSubscription_Kind}.String() + EventSubscription_KindAPIVersion = EventSubscription_Kind + "." + CRDGroupVersion.String() + EventSubscription_GroupVersionKind = CRDGroupVersion.WithKind(EventSubscription_Kind) +) + +func init() { + SchemeBuilder.Register(&EventSubscription{}, &EventSubscriptionList{}) +} diff --git a/apis/eventgrid/v1beta2/zz_generated.conversion_hubs.go b/apis/eventgrid/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..0c189197b --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Domain) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *EventSubscription) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SystemTopic) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Topic) Hub() {} diff --git a/apis/eventgrid/v1beta2/zz_generated.deepcopy.go b/apis/eventgrid/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..4b34256ac --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,5393 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedFilterInitParameters) DeepCopyInto(out *AdvancedFilterInitParameters) { + *out = *in + if in.BoolEquals != nil { + in, out := &in.BoolEquals, &out.BoolEquals + *out = make([]BoolEqualsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsNotNull != nil { + in, out := &in.IsNotNull, &out.IsNotNull + *out = make([]IsNotNullInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsNullOrUndefined != nil { + in, out := &in.IsNullOrUndefined, &out.IsNullOrUndefined + *out = make([]IsNullOrUndefinedInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberGreaterThan != nil { + in, out := &in.NumberGreaterThan, &out.NumberGreaterThan + *out = make([]NumberGreaterThanInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberGreaterThanOrEquals != nil { + in, out := &in.NumberGreaterThanOrEquals, &out.NumberGreaterThanOrEquals + *out = make([]NumberGreaterThanOrEqualsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberIn != nil { + in, out := &in.NumberIn, &out.NumberIn + *out = make([]NumberInInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberInRange != nil { + in, out := &in.NumberInRange, &out.NumberInRange + *out = make([]NumberInRangeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberLessThan != nil { + in, out := &in.NumberLessThan, &out.NumberLessThan + *out = make([]NumberLessThanInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberLessThanOrEquals != nil { + in, out := &in.NumberLessThanOrEquals, &out.NumberLessThanOrEquals + *out = make([]NumberLessThanOrEqualsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberNotIn != nil { + in, out := &in.NumberNotIn, &out.NumberNotIn + *out = make([]NumberNotInInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberNotInRange != nil { + in, out := &in.NumberNotInRange, &out.NumberNotInRange + *out = make([]NumberNotInRangeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringBeginsWith != nil { + in, out := &in.StringBeginsWith, &out.StringBeginsWith + *out = make([]StringBeginsWithInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringContains != nil { + in, out := &in.StringContains, &out.StringContains + *out = make([]StringContainsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringEndsWith != nil { + in, out := &in.StringEndsWith, &out.StringEndsWith + *out = make([]StringEndsWithInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringIn != nil { + in, out := &in.StringIn, &out.StringIn + *out = make([]StringInInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotBeginsWith != nil { + in, out := &in.StringNotBeginsWith, &out.StringNotBeginsWith + *out = make([]StringNotBeginsWithInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotContains != nil { + in, out := &in.StringNotContains, &out.StringNotContains + *out = make([]StringNotContainsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotEndsWith != nil { + in, out := &in.StringNotEndsWith, &out.StringNotEndsWith + *out = make([]StringNotEndsWithInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotIn != nil { + in, out := &in.StringNotIn, &out.StringNotIn + *out = make([]StringNotInInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedFilterInitParameters. +func (in *AdvancedFilterInitParameters) DeepCopy() *AdvancedFilterInitParameters { + if in == nil { + return nil + } + out := new(AdvancedFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedFilterObservation) DeepCopyInto(out *AdvancedFilterObservation) { + *out = *in + if in.BoolEquals != nil { + in, out := &in.BoolEquals, &out.BoolEquals + *out = make([]BoolEqualsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsNotNull != nil { + in, out := &in.IsNotNull, &out.IsNotNull + *out = make([]IsNotNullObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsNullOrUndefined != nil { + in, out := &in.IsNullOrUndefined, &out.IsNullOrUndefined + *out = make([]IsNullOrUndefinedObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberGreaterThan != nil { + in, out := &in.NumberGreaterThan, &out.NumberGreaterThan + *out = make([]NumberGreaterThanObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberGreaterThanOrEquals != nil { + in, out := &in.NumberGreaterThanOrEquals, &out.NumberGreaterThanOrEquals + *out = make([]NumberGreaterThanOrEqualsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberIn != nil { + in, out := &in.NumberIn, &out.NumberIn + *out = make([]NumberInObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberInRange != nil { + in, out := &in.NumberInRange, &out.NumberInRange + *out = make([]NumberInRangeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberLessThan != nil { + in, out := &in.NumberLessThan, &out.NumberLessThan + *out = make([]NumberLessThanObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberLessThanOrEquals != nil { + in, out := &in.NumberLessThanOrEquals, &out.NumberLessThanOrEquals + *out = make([]NumberLessThanOrEqualsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberNotIn != nil { + in, out := &in.NumberNotIn, &out.NumberNotIn + *out = make([]NumberNotInObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberNotInRange != nil { + in, out := &in.NumberNotInRange, &out.NumberNotInRange + *out = make([]NumberNotInRangeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringBeginsWith != nil { + in, out := &in.StringBeginsWith, &out.StringBeginsWith + *out = make([]StringBeginsWithObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringContains != nil { + in, out := &in.StringContains, &out.StringContains + *out = make([]StringContainsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringEndsWith != nil { + in, out := &in.StringEndsWith, &out.StringEndsWith + *out = make([]StringEndsWithObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringIn != nil { + in, out := &in.StringIn, &out.StringIn + *out = make([]StringInObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotBeginsWith != nil { + in, out := &in.StringNotBeginsWith, &out.StringNotBeginsWith + *out = make([]StringNotBeginsWithObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotContains != nil { + in, out := &in.StringNotContains, &out.StringNotContains + *out = make([]StringNotContainsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotEndsWith != nil { + in, out := &in.StringNotEndsWith, &out.StringNotEndsWith + *out = make([]StringNotEndsWithObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotIn != nil { + in, out := &in.StringNotIn, &out.StringNotIn + *out = make([]StringNotInObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedFilterObservation. +func (in *AdvancedFilterObservation) DeepCopy() *AdvancedFilterObservation { + if in == nil { + return nil + } + out := new(AdvancedFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedFilterParameters) DeepCopyInto(out *AdvancedFilterParameters) { + *out = *in + if in.BoolEquals != nil { + in, out := &in.BoolEquals, &out.BoolEquals + *out = make([]BoolEqualsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsNotNull != nil { + in, out := &in.IsNotNull, &out.IsNotNull + *out = make([]IsNotNullParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IsNullOrUndefined != nil { + in, out := &in.IsNullOrUndefined, &out.IsNullOrUndefined + *out = make([]IsNullOrUndefinedParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberGreaterThan != nil { + in, out := &in.NumberGreaterThan, &out.NumberGreaterThan + *out = make([]NumberGreaterThanParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberGreaterThanOrEquals != nil { + in, out := &in.NumberGreaterThanOrEquals, &out.NumberGreaterThanOrEquals + *out = make([]NumberGreaterThanOrEqualsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberIn != nil { + in, out := &in.NumberIn, &out.NumberIn + *out = make([]NumberInParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberInRange != nil { + in, out := &in.NumberInRange, &out.NumberInRange + *out = make([]NumberInRangeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberLessThan != nil { + in, out := &in.NumberLessThan, &out.NumberLessThan + *out = make([]NumberLessThanParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberLessThanOrEquals != nil { + in, out := &in.NumberLessThanOrEquals, &out.NumberLessThanOrEquals + *out = make([]NumberLessThanOrEqualsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberNotIn != nil { + in, out := &in.NumberNotIn, &out.NumberNotIn + *out = make([]NumberNotInParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumberNotInRange != nil { + in, out := &in.NumberNotInRange, &out.NumberNotInRange + *out = make([]NumberNotInRangeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringBeginsWith != nil { + in, out := &in.StringBeginsWith, &out.StringBeginsWith + *out = make([]StringBeginsWithParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringContains != nil { + in, out := &in.StringContains, &out.StringContains + *out = make([]StringContainsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringEndsWith != nil { + in, out := &in.StringEndsWith, &out.StringEndsWith + *out = make([]StringEndsWithParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringIn != nil { + in, out := &in.StringIn, &out.StringIn + *out = make([]StringInParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotBeginsWith != nil { + in, out := &in.StringNotBeginsWith, &out.StringNotBeginsWith + *out = make([]StringNotBeginsWithParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotContains != nil { + in, out := &in.StringNotContains, &out.StringNotContains + *out = make([]StringNotContainsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotEndsWith != nil { + in, out := &in.StringNotEndsWith, &out.StringNotEndsWith + *out = make([]StringNotEndsWithParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StringNotIn != nil { + in, out := &in.StringNotIn, &out.StringNotIn + *out = make([]StringNotInParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedFilterParameters. +func (in *AdvancedFilterParameters) DeepCopy() *AdvancedFilterParameters { + if in == nil { + return nil + } + out := new(AdvancedFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFunctionEndpointInitParameters) DeepCopyInto(out *AzureFunctionEndpointInitParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.MaxEventsPerBatch != nil { + in, out := &in.MaxEventsPerBatch, &out.MaxEventsPerBatch + *out = new(float64) + **out = **in + } + if in.PreferredBatchSizeInKilobytes != nil { + in, out := &in.PreferredBatchSizeInKilobytes, &out.PreferredBatchSizeInKilobytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFunctionEndpointInitParameters. +func (in *AzureFunctionEndpointInitParameters) DeepCopy() *AzureFunctionEndpointInitParameters { + if in == nil { + return nil + } + out := new(AzureFunctionEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFunctionEndpointObservation) DeepCopyInto(out *AzureFunctionEndpointObservation) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.MaxEventsPerBatch != nil { + in, out := &in.MaxEventsPerBatch, &out.MaxEventsPerBatch + *out = new(float64) + **out = **in + } + if in.PreferredBatchSizeInKilobytes != nil { + in, out := &in.PreferredBatchSizeInKilobytes, &out.PreferredBatchSizeInKilobytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFunctionEndpointObservation. +func (in *AzureFunctionEndpointObservation) DeepCopy() *AzureFunctionEndpointObservation { + if in == nil { + return nil + } + out := new(AzureFunctionEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFunctionEndpointParameters) DeepCopyInto(out *AzureFunctionEndpointParameters) { + *out = *in + if in.FunctionID != nil { + in, out := &in.FunctionID, &out.FunctionID + *out = new(string) + **out = **in + } + if in.MaxEventsPerBatch != nil { + in, out := &in.MaxEventsPerBatch, &out.MaxEventsPerBatch + *out = new(float64) + **out = **in + } + if in.PreferredBatchSizeInKilobytes != nil { + in, out := &in.PreferredBatchSizeInKilobytes, &out.PreferredBatchSizeInKilobytes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFunctionEndpointParameters. +func (in *AzureFunctionEndpointParameters) DeepCopy() *AzureFunctionEndpointParameters { + if in == nil { + return nil + } + out := new(AzureFunctionEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BoolEqualsInitParameters) DeepCopyInto(out *BoolEqualsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoolEqualsInitParameters. +func (in *BoolEqualsInitParameters) DeepCopy() *BoolEqualsInitParameters { + if in == nil { + return nil + } + out := new(BoolEqualsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BoolEqualsObservation) DeepCopyInto(out *BoolEqualsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoolEqualsObservation. +func (in *BoolEqualsObservation) DeepCopy() *BoolEqualsObservation { + if in == nil { + return nil + } + out := new(BoolEqualsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BoolEqualsParameters) DeepCopyInto(out *BoolEqualsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoolEqualsParameters. +func (in *BoolEqualsParameters) DeepCopy() *BoolEqualsParameters { + if in == nil { + return nil + } + out := new(BoolEqualsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterIdentityInitParameters) DeepCopyInto(out *DeadLetterIdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UserAssignedIdentity != nil { + in, out := &in.UserAssignedIdentity, &out.UserAssignedIdentity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterIdentityInitParameters. +func (in *DeadLetterIdentityInitParameters) DeepCopy() *DeadLetterIdentityInitParameters { + if in == nil { + return nil + } + out := new(DeadLetterIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterIdentityObservation) DeepCopyInto(out *DeadLetterIdentityObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UserAssignedIdentity != nil { + in, out := &in.UserAssignedIdentity, &out.UserAssignedIdentity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterIdentityObservation. +func (in *DeadLetterIdentityObservation) DeepCopy() *DeadLetterIdentityObservation { + if in == nil { + return nil + } + out := new(DeadLetterIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeadLetterIdentityParameters) DeepCopyInto(out *DeadLetterIdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UserAssignedIdentity != nil { + in, out := &in.UserAssignedIdentity, &out.UserAssignedIdentity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeadLetterIdentityParameters. +func (in *DeadLetterIdentityParameters) DeepCopy() *DeadLetterIdentityParameters { + if in == nil { + return nil + } + out := new(DeadLetterIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryIdentityInitParameters) DeepCopyInto(out *DeliveryIdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UserAssignedIdentity != nil { + in, out := &in.UserAssignedIdentity, &out.UserAssignedIdentity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryIdentityInitParameters. +func (in *DeliveryIdentityInitParameters) DeepCopy() *DeliveryIdentityInitParameters { + if in == nil { + return nil + } + out := new(DeliveryIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryIdentityObservation) DeepCopyInto(out *DeliveryIdentityObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UserAssignedIdentity != nil { + in, out := &in.UserAssignedIdentity, &out.UserAssignedIdentity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryIdentityObservation. +func (in *DeliveryIdentityObservation) DeepCopy() *DeliveryIdentityObservation { + if in == nil { + return nil + } + out := new(DeliveryIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryIdentityParameters) DeepCopyInto(out *DeliveryIdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UserAssignedIdentity != nil { + in, out := &in.UserAssignedIdentity, &out.UserAssignedIdentity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryIdentityParameters. +func (in *DeliveryIdentityParameters) DeepCopy() *DeliveryIdentityParameters { + if in == nil { + return nil + } + out := new(DeliveryIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryPropertyInitParameters) DeepCopyInto(out *DeliveryPropertyInitParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.SourceField != nil { + in, out := &in.SourceField, &out.SourceField + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryPropertyInitParameters. +func (in *DeliveryPropertyInitParameters) DeepCopy() *DeliveryPropertyInitParameters { + if in == nil { + return nil + } + out := new(DeliveryPropertyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryPropertyObservation) DeepCopyInto(out *DeliveryPropertyObservation) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.SourceField != nil { + in, out := &in.SourceField, &out.SourceField + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryPropertyObservation. +func (in *DeliveryPropertyObservation) DeepCopy() *DeliveryPropertyObservation { + if in == nil { + return nil + } + out := new(DeliveryPropertyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeliveryPropertyParameters) DeepCopyInto(out *DeliveryPropertyParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(bool) + **out = **in + } + if in.SourceField != nil { + in, out := &in.SourceField, &out.SourceField + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.ValueSecretRef != nil { + in, out := &in.ValueSecretRef, &out.ValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeliveryPropertyParameters. +func (in *DeliveryPropertyParameters) DeepCopy() *DeliveryPropertyParameters { + if in == nil { + return nil + } + out := new(DeliveryPropertyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Domain) DeepCopyInto(out *Domain) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain. +func (in *Domain) DeepCopy() *Domain { + if in == nil { + return nil + } + out := new(Domain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Domain) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainInitParameters) DeepCopyInto(out *DomainInitParameters) { + *out = *in + if in.AutoCreateTopicWithFirstSubscription != nil { + in, out := &in.AutoCreateTopicWithFirstSubscription, &out.AutoCreateTopicWithFirstSubscription + *out = new(bool) + **out = **in + } + if in.AutoDeleteTopicWithLastSubscription != nil { + in, out := &in.AutoDeleteTopicWithLastSubscription, &out.AutoDeleteTopicWithLastSubscription + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InboundIPRule != nil { + in, out := &in.InboundIPRule, &out.InboundIPRule + *out = make([]InboundIPRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputMappingDefaultValues != nil { + in, out := &in.InputMappingDefaultValues, &out.InputMappingDefaultValues + *out = new(InputMappingDefaultValuesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputMappingFields != nil { + in, out := &in.InputMappingFields, &out.InputMappingFields + *out = new(InputMappingFieldsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainInitParameters. +func (in *DomainInitParameters) DeepCopy() *DomainInitParameters { + if in == nil { + return nil + } + out := new(DomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainList) DeepCopyInto(out *DomainList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Domain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainList. +func (in *DomainList) DeepCopy() *DomainList { + if in == nil { + return nil + } + out := new(DomainList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainObservation) DeepCopyInto(out *DomainObservation) { + *out = *in + if in.AutoCreateTopicWithFirstSubscription != nil { + in, out := &in.AutoCreateTopicWithFirstSubscription, &out.AutoCreateTopicWithFirstSubscription + *out = new(bool) + **out = **in + } + if in.AutoDeleteTopicWithLastSubscription != nil { + in, out := &in.AutoDeleteTopicWithLastSubscription, &out.AutoDeleteTopicWithLastSubscription + *out = new(bool) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.InboundIPRule != nil { + in, out := &in.InboundIPRule, &out.InboundIPRule + *out = make([]InboundIPRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputMappingDefaultValues != nil { + in, out := &in.InputMappingDefaultValues, &out.InputMappingDefaultValues + *out = new(InputMappingDefaultValuesObservation) + (*in).DeepCopyInto(*out) + } + if in.InputMappingFields != nil { + in, out := &in.InputMappingFields, &out.InputMappingFields + *out = new(InputMappingFieldsObservation) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainObservation. +func (in *DomainObservation) DeepCopy() *DomainObservation { + if in == nil { + return nil + } + out := new(DomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainParameters) DeepCopyInto(out *DomainParameters) { + *out = *in + if in.AutoCreateTopicWithFirstSubscription != nil { + in, out := &in.AutoCreateTopicWithFirstSubscription, &out.AutoCreateTopicWithFirstSubscription + *out = new(bool) + **out = **in + } + if in.AutoDeleteTopicWithLastSubscription != nil { + in, out := &in.AutoDeleteTopicWithLastSubscription, &out.AutoDeleteTopicWithLastSubscription + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.InboundIPRule != nil { + in, out := &in.InboundIPRule, &out.InboundIPRule + *out = make([]InboundIPRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputMappingDefaultValues != nil { + in, out := &in.InputMappingDefaultValues, &out.InputMappingDefaultValues + *out = new(InputMappingDefaultValuesParameters) + (*in).DeepCopyInto(*out) + } + if in.InputMappingFields != nil { + in, out := &in.InputMappingFields, &out.InputMappingFields + *out = new(InputMappingFieldsParameters) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainParameters. +func (in *DomainParameters) DeepCopy() *DomainParameters { + if in == nil { + return nil + } + out := new(DomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSpec) DeepCopyInto(out *DomainSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSpec. +func (in *DomainSpec) DeepCopy() *DomainSpec { + if in == nil { + return nil + } + out := new(DomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainStatus) DeepCopyInto(out *DomainStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainStatus. +func (in *DomainStatus) DeepCopy() *DomainStatus { + if in == nil { + return nil + } + out := new(DomainStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSubscription) DeepCopyInto(out *EventSubscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSubscription. +func (in *EventSubscription) DeepCopy() *EventSubscription { + if in == nil { + return nil + } + out := new(EventSubscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventSubscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSubscriptionInitParameters) DeepCopyInto(out *EventSubscriptionInitParameters) { + *out = *in + if in.AdvancedFilter != nil { + in, out := &in.AdvancedFilter, &out.AdvancedFilter + *out = new(AdvancedFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedFilteringOnArraysEnabled != nil { + in, out := &in.AdvancedFilteringOnArraysEnabled, &out.AdvancedFilteringOnArraysEnabled + *out = new(bool) + **out = **in + } + if in.AzureFunctionEndpoint != nil { + in, out := &in.AzureFunctionEndpoint, &out.AzureFunctionEndpoint + *out = new(AzureFunctionEndpointInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeadLetterIdentity != nil { + in, out := &in.DeadLetterIdentity, &out.DeadLetterIdentity + *out = new(DeadLetterIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeliveryIdentity != nil { + in, out := &in.DeliveryIdentity, &out.DeliveryIdentity + *out = new(DeliveryIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DeliveryProperty != nil { + in, out := &in.DeliveryProperty, &out.DeliveryProperty + *out = make([]DeliveryPropertyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventDeliverySchema != nil { + in, out := &in.EventDeliverySchema, &out.EventDeliverySchema + *out = new(string) + **out = **in + } + if in.EventHubEndpointID != nil { + in, out := &in.EventHubEndpointID, &out.EventHubEndpointID + *out = new(string) + **out = **in + } + if in.ExpirationTimeUtc != nil { + in, out := &in.ExpirationTimeUtc, &out.ExpirationTimeUtc + *out = new(string) + **out = **in + } + if in.HybridConnectionEndpointID != nil { + in, out := &in.HybridConnectionEndpointID, &out.HybridConnectionEndpointID + *out = new(string) + **out = **in + } + if in.IncludedEventTypes != nil { + in, out := &in.IncludedEventTypes, &out.IncludedEventTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ScopeRef != nil { + in, out := &in.ScopeRef, &out.ScopeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ScopeSelector != nil { + in, out := &in.ScopeSelector, &out.ScopeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusQueueEndpointID != nil { + in, out := &in.ServiceBusQueueEndpointID, &out.ServiceBusQueueEndpointID + *out = new(string) + **out = **in + } + if in.ServiceBusTopicEndpointID != nil { + in, out := &in.ServiceBusTopicEndpointID, &out.ServiceBusTopicEndpointID + *out = new(string) + **out = **in + } + if in.StorageBlobDeadLetterDestination != nil { + in, out := &in.StorageBlobDeadLetterDestination, &out.StorageBlobDeadLetterDestination + *out = new(StorageBlobDeadLetterDestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageQueueEndpoint != nil { + in, out := &in.StorageQueueEndpoint, &out.StorageQueueEndpoint + *out = new(StorageQueueEndpointInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubjectFilter != nil { + in, out := &in.SubjectFilter, &out.SubjectFilter + *out = new(SubjectFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WebhookEndpoint != nil { + in, out := &in.WebhookEndpoint, &out.WebhookEndpoint + *out = new(WebhookEndpointInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSubscriptionInitParameters. +func (in *EventSubscriptionInitParameters) DeepCopy() *EventSubscriptionInitParameters { + if in == nil { + return nil + } + out := new(EventSubscriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSubscriptionList) DeepCopyInto(out *EventSubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EventSubscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSubscriptionList. +func (in *EventSubscriptionList) DeepCopy() *EventSubscriptionList { + if in == nil { + return nil + } + out := new(EventSubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventSubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSubscriptionObservation) DeepCopyInto(out *EventSubscriptionObservation) { + *out = *in + if in.AdvancedFilter != nil { + in, out := &in.AdvancedFilter, &out.AdvancedFilter + *out = new(AdvancedFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.AdvancedFilteringOnArraysEnabled != nil { + in, out := &in.AdvancedFilteringOnArraysEnabled, &out.AdvancedFilteringOnArraysEnabled + *out = new(bool) + **out = **in + } + if in.AzureFunctionEndpoint != nil { + in, out := &in.AzureFunctionEndpoint, &out.AzureFunctionEndpoint + *out = new(AzureFunctionEndpointObservation) + (*in).DeepCopyInto(*out) + } + if in.DeadLetterIdentity != nil { + in, out := &in.DeadLetterIdentity, &out.DeadLetterIdentity + *out = new(DeadLetterIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.DeliveryIdentity != nil { + in, out := &in.DeliveryIdentity, &out.DeliveryIdentity + *out = new(DeliveryIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.DeliveryProperty != nil { + in, out := &in.DeliveryProperty, &out.DeliveryProperty + *out = make([]DeliveryPropertyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventDeliverySchema != nil { + in, out := &in.EventDeliverySchema, &out.EventDeliverySchema + *out = new(string) + **out = **in + } + if in.EventHubEndpointID != nil { + in, out := &in.EventHubEndpointID, &out.EventHubEndpointID + *out = new(string) + **out = **in + } + if in.ExpirationTimeUtc != nil { + in, out := &in.ExpirationTimeUtc, &out.ExpirationTimeUtc + *out = new(string) + **out = **in + } + if in.HybridConnectionEndpointID != nil { + in, out := &in.HybridConnectionEndpointID, &out.HybridConnectionEndpointID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IncludedEventTypes != nil { + in, out := &in.IncludedEventTypes, &out.IncludedEventTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ServiceBusQueueEndpointID != nil { + in, out := &in.ServiceBusQueueEndpointID, &out.ServiceBusQueueEndpointID + *out = new(string) + **out = **in + } + if in.ServiceBusTopicEndpointID != nil { + in, out := &in.ServiceBusTopicEndpointID, &out.ServiceBusTopicEndpointID + *out = new(string) + **out = **in + } + if in.StorageBlobDeadLetterDestination != nil { + in, out := &in.StorageBlobDeadLetterDestination, &out.StorageBlobDeadLetterDestination + *out = new(StorageBlobDeadLetterDestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageQueueEndpoint != nil { + in, out := &in.StorageQueueEndpoint, &out.StorageQueueEndpoint + *out = new(StorageQueueEndpointObservation) + (*in).DeepCopyInto(*out) + } + if in.SubjectFilter != nil { + in, out := &in.SubjectFilter, &out.SubjectFilter + *out = new(SubjectFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.WebhookEndpoint != nil { + in, out := &in.WebhookEndpoint, &out.WebhookEndpoint + *out = new(WebhookEndpointObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSubscriptionObservation. +func (in *EventSubscriptionObservation) DeepCopy() *EventSubscriptionObservation { + if in == nil { + return nil + } + out := new(EventSubscriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSubscriptionParameters) DeepCopyInto(out *EventSubscriptionParameters) { + *out = *in + if in.AdvancedFilter != nil { + in, out := &in.AdvancedFilter, &out.AdvancedFilter + *out = new(AdvancedFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.AdvancedFilteringOnArraysEnabled != nil { + in, out := &in.AdvancedFilteringOnArraysEnabled, &out.AdvancedFilteringOnArraysEnabled + *out = new(bool) + **out = **in + } + if in.AzureFunctionEndpoint != nil { + in, out := &in.AzureFunctionEndpoint, &out.AzureFunctionEndpoint + *out = new(AzureFunctionEndpointParameters) + (*in).DeepCopyInto(*out) + } + if in.DeadLetterIdentity != nil { + in, out := &in.DeadLetterIdentity, &out.DeadLetterIdentity + *out = new(DeadLetterIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.DeliveryIdentity != nil { + in, out := &in.DeliveryIdentity, &out.DeliveryIdentity + *out = new(DeliveryIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.DeliveryProperty != nil { + in, out := &in.DeliveryProperty, &out.DeliveryProperty + *out = make([]DeliveryPropertyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventDeliverySchema != nil { + in, out := &in.EventDeliverySchema, &out.EventDeliverySchema + *out = new(string) + **out = **in + } + if in.EventHubEndpointID != nil { + in, out := &in.EventHubEndpointID, &out.EventHubEndpointID + *out = new(string) + **out = **in + } + if in.ExpirationTimeUtc != nil { + in, out := &in.ExpirationTimeUtc, &out.ExpirationTimeUtc + *out = new(string) + **out = **in + } + if in.HybridConnectionEndpointID != nil { + in, out := &in.HybridConnectionEndpointID, &out.HybridConnectionEndpointID + *out = new(string) + **out = **in + } + if in.IncludedEventTypes != nil { + in, out := &in.IncludedEventTypes, &out.IncludedEventTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RetryPolicy != nil { + in, out := &in.RetryPolicy, &out.RetryPolicy + *out = new(RetryPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.ScopeRef != nil { + in, out := &in.ScopeRef, &out.ScopeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ScopeSelector != nil { + in, out := &in.ScopeSelector, &out.ScopeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusQueueEndpointID != nil { + in, out := &in.ServiceBusQueueEndpointID, &out.ServiceBusQueueEndpointID + *out = new(string) + **out = **in + } + if in.ServiceBusTopicEndpointID != nil { + in, out := &in.ServiceBusTopicEndpointID, &out.ServiceBusTopicEndpointID + *out = new(string) + **out = **in + } + if in.StorageBlobDeadLetterDestination != nil { + in, out := &in.StorageBlobDeadLetterDestination, &out.StorageBlobDeadLetterDestination + *out = new(StorageBlobDeadLetterDestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageQueueEndpoint != nil { + in, out := &in.StorageQueueEndpoint, &out.StorageQueueEndpoint + *out = new(StorageQueueEndpointParameters) + (*in).DeepCopyInto(*out) + } + if in.SubjectFilter != nil { + in, out := &in.SubjectFilter, &out.SubjectFilter + *out = new(SubjectFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.WebhookEndpoint != nil { + in, out := &in.WebhookEndpoint, &out.WebhookEndpoint + *out = new(WebhookEndpointParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSubscriptionParameters. +func (in *EventSubscriptionParameters) DeepCopy() *EventSubscriptionParameters { + if in == nil { + return nil + } + out := new(EventSubscriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSubscriptionSpec) DeepCopyInto(out *EventSubscriptionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSubscriptionSpec. +func (in *EventSubscriptionSpec) DeepCopy() *EventSubscriptionSpec { + if in == nil { + return nil + } + out := new(EventSubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSubscriptionStatus) DeepCopyInto(out *EventSubscriptionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSubscriptionStatus. +func (in *EventSubscriptionStatus) DeepCopy() *EventSubscriptionStatus { + if in == nil { + return nil + } + out := new(EventSubscriptionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundIPRuleInitParameters) DeepCopyInto(out *InboundIPRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundIPRuleInitParameters. +func (in *InboundIPRuleInitParameters) DeepCopy() *InboundIPRuleInitParameters { + if in == nil { + return nil + } + out := new(InboundIPRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundIPRuleObservation) DeepCopyInto(out *InboundIPRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundIPRuleObservation. +func (in *InboundIPRuleObservation) DeepCopy() *InboundIPRuleObservation { + if in == nil { + return nil + } + out := new(InboundIPRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InboundIPRuleParameters) DeepCopyInto(out *InboundIPRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InboundIPRuleParameters. +func (in *InboundIPRuleParameters) DeepCopy() *InboundIPRuleParameters { + if in == nil { + return nil + } + out := new(InboundIPRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputMappingDefaultValuesInitParameters) DeepCopyInto(out *InputMappingDefaultValuesInitParameters) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputMappingDefaultValuesInitParameters. +func (in *InputMappingDefaultValuesInitParameters) DeepCopy() *InputMappingDefaultValuesInitParameters { + if in == nil { + return nil + } + out := new(InputMappingDefaultValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputMappingDefaultValuesObservation) DeepCopyInto(out *InputMappingDefaultValuesObservation) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputMappingDefaultValuesObservation. +func (in *InputMappingDefaultValuesObservation) DeepCopy() *InputMappingDefaultValuesObservation { + if in == nil { + return nil + } + out := new(InputMappingDefaultValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputMappingDefaultValuesParameters) DeepCopyInto(out *InputMappingDefaultValuesParameters) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputMappingDefaultValuesParameters. +func (in *InputMappingDefaultValuesParameters) DeepCopy() *InputMappingDefaultValuesParameters { + if in == nil { + return nil + } + out := new(InputMappingDefaultValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputMappingFieldsInitParameters) DeepCopyInto(out *InputMappingFieldsInitParameters) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventTime != nil { + in, out := &in.EventTime, &out.EventTime + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputMappingFieldsInitParameters. +func (in *InputMappingFieldsInitParameters) DeepCopy() *InputMappingFieldsInitParameters { + if in == nil { + return nil + } + out := new(InputMappingFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputMappingFieldsObservation) DeepCopyInto(out *InputMappingFieldsObservation) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventTime != nil { + in, out := &in.EventTime, &out.EventTime + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputMappingFieldsObservation. +func (in *InputMappingFieldsObservation) DeepCopy() *InputMappingFieldsObservation { + if in == nil { + return nil + } + out := new(InputMappingFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputMappingFieldsParameters) DeepCopyInto(out *InputMappingFieldsParameters) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventTime != nil { + in, out := &in.EventTime, &out.EventTime + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputMappingFieldsParameters. +func (in *InputMappingFieldsParameters) DeepCopy() *InputMappingFieldsParameters { + if in == nil { + return nil + } + out := new(InputMappingFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsNotNullInitParameters) DeepCopyInto(out *IsNotNullInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsNotNullInitParameters. +func (in *IsNotNullInitParameters) DeepCopy() *IsNotNullInitParameters { + if in == nil { + return nil + } + out := new(IsNotNullInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsNotNullObservation) DeepCopyInto(out *IsNotNullObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsNotNullObservation. +func (in *IsNotNullObservation) DeepCopy() *IsNotNullObservation { + if in == nil { + return nil + } + out := new(IsNotNullObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsNotNullParameters) DeepCopyInto(out *IsNotNullParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsNotNullParameters. +func (in *IsNotNullParameters) DeepCopy() *IsNotNullParameters { + if in == nil { + return nil + } + out := new(IsNotNullParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsNullOrUndefinedInitParameters) DeepCopyInto(out *IsNullOrUndefinedInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsNullOrUndefinedInitParameters. +func (in *IsNullOrUndefinedInitParameters) DeepCopy() *IsNullOrUndefinedInitParameters { + if in == nil { + return nil + } + out := new(IsNullOrUndefinedInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsNullOrUndefinedObservation) DeepCopyInto(out *IsNullOrUndefinedObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsNullOrUndefinedObservation. +func (in *IsNullOrUndefinedObservation) DeepCopy() *IsNullOrUndefinedObservation { + if in == nil { + return nil + } + out := new(IsNullOrUndefinedObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsNullOrUndefinedParameters) DeepCopyInto(out *IsNullOrUndefinedParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsNullOrUndefinedParameters. +func (in *IsNullOrUndefinedParameters) DeepCopy() *IsNullOrUndefinedParameters { + if in == nil { + return nil + } + out := new(IsNullOrUndefinedParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberGreaterThanInitParameters) DeepCopyInto(out *NumberGreaterThanInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberGreaterThanInitParameters. +func (in *NumberGreaterThanInitParameters) DeepCopy() *NumberGreaterThanInitParameters { + if in == nil { + return nil + } + out := new(NumberGreaterThanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberGreaterThanObservation) DeepCopyInto(out *NumberGreaterThanObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberGreaterThanObservation. +func (in *NumberGreaterThanObservation) DeepCopy() *NumberGreaterThanObservation { + if in == nil { + return nil + } + out := new(NumberGreaterThanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberGreaterThanOrEqualsInitParameters) DeepCopyInto(out *NumberGreaterThanOrEqualsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberGreaterThanOrEqualsInitParameters. +func (in *NumberGreaterThanOrEqualsInitParameters) DeepCopy() *NumberGreaterThanOrEqualsInitParameters { + if in == nil { + return nil + } + out := new(NumberGreaterThanOrEqualsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberGreaterThanOrEqualsObservation) DeepCopyInto(out *NumberGreaterThanOrEqualsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberGreaterThanOrEqualsObservation. +func (in *NumberGreaterThanOrEqualsObservation) DeepCopy() *NumberGreaterThanOrEqualsObservation { + if in == nil { + return nil + } + out := new(NumberGreaterThanOrEqualsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberGreaterThanOrEqualsParameters) DeepCopyInto(out *NumberGreaterThanOrEqualsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberGreaterThanOrEqualsParameters. +func (in *NumberGreaterThanOrEqualsParameters) DeepCopy() *NumberGreaterThanOrEqualsParameters { + if in == nil { + return nil + } + out := new(NumberGreaterThanOrEqualsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberGreaterThanParameters) DeepCopyInto(out *NumberGreaterThanParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberGreaterThanParameters. +func (in *NumberGreaterThanParameters) DeepCopy() *NumberGreaterThanParameters { + if in == nil { + return nil + } + out := new(NumberGreaterThanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberInInitParameters) DeepCopyInto(out *NumberInInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberInInitParameters. +func (in *NumberInInitParameters) DeepCopy() *NumberInInitParameters { + if in == nil { + return nil + } + out := new(NumberInInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberInObservation) DeepCopyInto(out *NumberInObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberInObservation. +func (in *NumberInObservation) DeepCopy() *NumberInObservation { + if in == nil { + return nil + } + out := new(NumberInObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberInParameters) DeepCopyInto(out *NumberInParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberInParameters. +func (in *NumberInParameters) DeepCopy() *NumberInParameters { + if in == nil { + return nil + } + out := new(NumberInParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberInRangeInitParameters) DeepCopyInto(out *NumberInRangeInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([][]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberInRangeInitParameters. +func (in *NumberInRangeInitParameters) DeepCopy() *NumberInRangeInitParameters { + if in == nil { + return nil + } + out := new(NumberInRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberInRangeObservation) DeepCopyInto(out *NumberInRangeObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([][]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberInRangeObservation. +func (in *NumberInRangeObservation) DeepCopy() *NumberInRangeObservation { + if in == nil { + return nil + } + out := new(NumberInRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberInRangeParameters) DeepCopyInto(out *NumberInRangeParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([][]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberInRangeParameters. +func (in *NumberInRangeParameters) DeepCopy() *NumberInRangeParameters { + if in == nil { + return nil + } + out := new(NumberInRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberLessThanInitParameters) DeepCopyInto(out *NumberLessThanInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberLessThanInitParameters. +func (in *NumberLessThanInitParameters) DeepCopy() *NumberLessThanInitParameters { + if in == nil { + return nil + } + out := new(NumberLessThanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberLessThanObservation) DeepCopyInto(out *NumberLessThanObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberLessThanObservation. +func (in *NumberLessThanObservation) DeepCopy() *NumberLessThanObservation { + if in == nil { + return nil + } + out := new(NumberLessThanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberLessThanOrEqualsInitParameters) DeepCopyInto(out *NumberLessThanOrEqualsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberLessThanOrEqualsInitParameters. +func (in *NumberLessThanOrEqualsInitParameters) DeepCopy() *NumberLessThanOrEqualsInitParameters { + if in == nil { + return nil + } + out := new(NumberLessThanOrEqualsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberLessThanOrEqualsObservation) DeepCopyInto(out *NumberLessThanOrEqualsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberLessThanOrEqualsObservation. +func (in *NumberLessThanOrEqualsObservation) DeepCopy() *NumberLessThanOrEqualsObservation { + if in == nil { + return nil + } + out := new(NumberLessThanOrEqualsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberLessThanOrEqualsParameters) DeepCopyInto(out *NumberLessThanOrEqualsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberLessThanOrEqualsParameters. +func (in *NumberLessThanOrEqualsParameters) DeepCopy() *NumberLessThanOrEqualsParameters { + if in == nil { + return nil + } + out := new(NumberLessThanOrEqualsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberLessThanParameters) DeepCopyInto(out *NumberLessThanParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberLessThanParameters. +func (in *NumberLessThanParameters) DeepCopy() *NumberLessThanParameters { + if in == nil { + return nil + } + out := new(NumberLessThanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberNotInInitParameters) DeepCopyInto(out *NumberNotInInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberNotInInitParameters. +func (in *NumberNotInInitParameters) DeepCopy() *NumberNotInInitParameters { + if in == nil { + return nil + } + out := new(NumberNotInInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberNotInObservation) DeepCopyInto(out *NumberNotInObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberNotInObservation. +func (in *NumberNotInObservation) DeepCopy() *NumberNotInObservation { + if in == nil { + return nil + } + out := new(NumberNotInObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberNotInParameters) DeepCopyInto(out *NumberNotInParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberNotInParameters. +func (in *NumberNotInParameters) DeepCopy() *NumberNotInParameters { + if in == nil { + return nil + } + out := new(NumberNotInParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberNotInRangeInitParameters) DeepCopyInto(out *NumberNotInRangeInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([][]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberNotInRangeInitParameters. +func (in *NumberNotInRangeInitParameters) DeepCopy() *NumberNotInRangeInitParameters { + if in == nil { + return nil + } + out := new(NumberNotInRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberNotInRangeObservation) DeepCopyInto(out *NumberNotInRangeObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([][]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberNotInRangeObservation. +func (in *NumberNotInRangeObservation) DeepCopy() *NumberNotInRangeObservation { + if in == nil { + return nil + } + out := new(NumberNotInRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NumberNotInRangeParameters) DeepCopyInto(out *NumberNotInRangeParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([][]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumberNotInRangeParameters. +func (in *NumberNotInRangeParameters) DeepCopy() *NumberNotInRangeParameters { + if in == nil { + return nil + } + out := new(NumberNotInRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyInitParameters) DeepCopyInto(out *RetryPolicyInitParameters) { + *out = *in + if in.EventTimeToLive != nil { + in, out := &in.EventTimeToLive, &out.EventTimeToLive + *out = new(float64) + **out = **in + } + if in.MaxDeliveryAttempts != nil { + in, out := &in.MaxDeliveryAttempts, &out.MaxDeliveryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyInitParameters. +func (in *RetryPolicyInitParameters) DeepCopy() *RetryPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetryPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyObservation) DeepCopyInto(out *RetryPolicyObservation) { + *out = *in + if in.EventTimeToLive != nil { + in, out := &in.EventTimeToLive, &out.EventTimeToLive + *out = new(float64) + **out = **in + } + if in.MaxDeliveryAttempts != nil { + in, out := &in.MaxDeliveryAttempts, &out.MaxDeliveryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyObservation. +func (in *RetryPolicyObservation) DeepCopy() *RetryPolicyObservation { + if in == nil { + return nil + } + out := new(RetryPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetryPolicyParameters) DeepCopyInto(out *RetryPolicyParameters) { + *out = *in + if in.EventTimeToLive != nil { + in, out := &in.EventTimeToLive, &out.EventTimeToLive + *out = new(float64) + **out = **in + } + if in.MaxDeliveryAttempts != nil { + in, out := &in.MaxDeliveryAttempts, &out.MaxDeliveryAttempts + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicyParameters. +func (in *RetryPolicyParameters) DeepCopy() *RetryPolicyParameters { + if in == nil { + return nil + } + out := new(RetryPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobDeadLetterDestinationInitParameters) DeepCopyInto(out *StorageBlobDeadLetterDestinationInitParameters) { + *out = *in + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageBlobContainerName != nil { + in, out := &in.StorageBlobContainerName, &out.StorageBlobContainerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobDeadLetterDestinationInitParameters. +func (in *StorageBlobDeadLetterDestinationInitParameters) DeepCopy() *StorageBlobDeadLetterDestinationInitParameters { + if in == nil { + return nil + } + out := new(StorageBlobDeadLetterDestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobDeadLetterDestinationObservation) DeepCopyInto(out *StorageBlobDeadLetterDestinationObservation) { + *out = *in + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageBlobContainerName != nil { + in, out := &in.StorageBlobContainerName, &out.StorageBlobContainerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobDeadLetterDestinationObservation. +func (in *StorageBlobDeadLetterDestinationObservation) DeepCopy() *StorageBlobDeadLetterDestinationObservation { + if in == nil { + return nil + } + out := new(StorageBlobDeadLetterDestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobDeadLetterDestinationParameters) DeepCopyInto(out *StorageBlobDeadLetterDestinationParameters) { + *out = *in + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageBlobContainerName != nil { + in, out := &in.StorageBlobContainerName, &out.StorageBlobContainerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobDeadLetterDestinationParameters. +func (in *StorageBlobDeadLetterDestinationParameters) DeepCopy() *StorageBlobDeadLetterDestinationParameters { + if in == nil { + return nil + } + out := new(StorageBlobDeadLetterDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageQueueEndpointInitParameters) DeepCopyInto(out *StorageQueueEndpointInitParameters) { + *out = *in + if in.QueueMessageTimeToLiveInSeconds != nil { + in, out := &in.QueueMessageTimeToLiveInSeconds, &out.QueueMessageTimeToLiveInSeconds + *out = new(float64) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } + if in.QueueNameRef != nil { + in, out := &in.QueueNameRef, &out.QueueNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.QueueNameSelector != nil { + in, out := &in.QueueNameSelector, &out.QueueNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageQueueEndpointInitParameters. +func (in *StorageQueueEndpointInitParameters) DeepCopy() *StorageQueueEndpointInitParameters { + if in == nil { + return nil + } + out := new(StorageQueueEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageQueueEndpointObservation) DeepCopyInto(out *StorageQueueEndpointObservation) { + *out = *in + if in.QueueMessageTimeToLiveInSeconds != nil { + in, out := &in.QueueMessageTimeToLiveInSeconds, &out.QueueMessageTimeToLiveInSeconds + *out = new(float64) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageQueueEndpointObservation. +func (in *StorageQueueEndpointObservation) DeepCopy() *StorageQueueEndpointObservation { + if in == nil { + return nil + } + out := new(StorageQueueEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageQueueEndpointParameters) DeepCopyInto(out *StorageQueueEndpointParameters) { + *out = *in + if in.QueueMessageTimeToLiveInSeconds != nil { + in, out := &in.QueueMessageTimeToLiveInSeconds, &out.QueueMessageTimeToLiveInSeconds + *out = new(float64) + **out = **in + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } + if in.QueueNameRef != nil { + in, out := &in.QueueNameRef, &out.QueueNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.QueueNameSelector != nil { + in, out := &in.QueueNameSelector, &out.QueueNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageQueueEndpointParameters. +func (in *StorageQueueEndpointParameters) DeepCopy() *StorageQueueEndpointParameters { + if in == nil { + return nil + } + out := new(StorageQueueEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringBeginsWithInitParameters) DeepCopyInto(out *StringBeginsWithInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringBeginsWithInitParameters. +func (in *StringBeginsWithInitParameters) DeepCopy() *StringBeginsWithInitParameters { + if in == nil { + return nil + } + out := new(StringBeginsWithInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringBeginsWithObservation) DeepCopyInto(out *StringBeginsWithObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringBeginsWithObservation. +func (in *StringBeginsWithObservation) DeepCopy() *StringBeginsWithObservation { + if in == nil { + return nil + } + out := new(StringBeginsWithObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringBeginsWithParameters) DeepCopyInto(out *StringBeginsWithParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringBeginsWithParameters. +func (in *StringBeginsWithParameters) DeepCopy() *StringBeginsWithParameters { + if in == nil { + return nil + } + out := new(StringBeginsWithParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringContainsInitParameters) DeepCopyInto(out *StringContainsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringContainsInitParameters. +func (in *StringContainsInitParameters) DeepCopy() *StringContainsInitParameters { + if in == nil { + return nil + } + out := new(StringContainsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringContainsObservation) DeepCopyInto(out *StringContainsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringContainsObservation. +func (in *StringContainsObservation) DeepCopy() *StringContainsObservation { + if in == nil { + return nil + } + out := new(StringContainsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringContainsParameters) DeepCopyInto(out *StringContainsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringContainsParameters. +func (in *StringContainsParameters) DeepCopy() *StringContainsParameters { + if in == nil { + return nil + } + out := new(StringContainsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringEndsWithInitParameters) DeepCopyInto(out *StringEndsWithInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringEndsWithInitParameters. +func (in *StringEndsWithInitParameters) DeepCopy() *StringEndsWithInitParameters { + if in == nil { + return nil + } + out := new(StringEndsWithInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringEndsWithObservation) DeepCopyInto(out *StringEndsWithObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringEndsWithObservation. +func (in *StringEndsWithObservation) DeepCopy() *StringEndsWithObservation { + if in == nil { + return nil + } + out := new(StringEndsWithObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringEndsWithParameters) DeepCopyInto(out *StringEndsWithParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringEndsWithParameters. +func (in *StringEndsWithParameters) DeepCopy() *StringEndsWithParameters { + if in == nil { + return nil + } + out := new(StringEndsWithParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringInInitParameters) DeepCopyInto(out *StringInInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringInInitParameters. +func (in *StringInInitParameters) DeepCopy() *StringInInitParameters { + if in == nil { + return nil + } + out := new(StringInInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringInObservation) DeepCopyInto(out *StringInObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringInObservation. +func (in *StringInObservation) DeepCopy() *StringInObservation { + if in == nil { + return nil + } + out := new(StringInObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringInParameters) DeepCopyInto(out *StringInParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringInParameters. +func (in *StringInParameters) DeepCopy() *StringInParameters { + if in == nil { + return nil + } + out := new(StringInParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotBeginsWithInitParameters) DeepCopyInto(out *StringNotBeginsWithInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotBeginsWithInitParameters. +func (in *StringNotBeginsWithInitParameters) DeepCopy() *StringNotBeginsWithInitParameters { + if in == nil { + return nil + } + out := new(StringNotBeginsWithInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotBeginsWithObservation) DeepCopyInto(out *StringNotBeginsWithObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotBeginsWithObservation. +func (in *StringNotBeginsWithObservation) DeepCopy() *StringNotBeginsWithObservation { + if in == nil { + return nil + } + out := new(StringNotBeginsWithObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotBeginsWithParameters) DeepCopyInto(out *StringNotBeginsWithParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotBeginsWithParameters. +func (in *StringNotBeginsWithParameters) DeepCopy() *StringNotBeginsWithParameters { + if in == nil { + return nil + } + out := new(StringNotBeginsWithParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotContainsInitParameters) DeepCopyInto(out *StringNotContainsInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotContainsInitParameters. +func (in *StringNotContainsInitParameters) DeepCopy() *StringNotContainsInitParameters { + if in == nil { + return nil + } + out := new(StringNotContainsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotContainsObservation) DeepCopyInto(out *StringNotContainsObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotContainsObservation. +func (in *StringNotContainsObservation) DeepCopy() *StringNotContainsObservation { + if in == nil { + return nil + } + out := new(StringNotContainsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotContainsParameters) DeepCopyInto(out *StringNotContainsParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotContainsParameters. +func (in *StringNotContainsParameters) DeepCopy() *StringNotContainsParameters { + if in == nil { + return nil + } + out := new(StringNotContainsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotEndsWithInitParameters) DeepCopyInto(out *StringNotEndsWithInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotEndsWithInitParameters. +func (in *StringNotEndsWithInitParameters) DeepCopy() *StringNotEndsWithInitParameters { + if in == nil { + return nil + } + out := new(StringNotEndsWithInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotEndsWithObservation) DeepCopyInto(out *StringNotEndsWithObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotEndsWithObservation. +func (in *StringNotEndsWithObservation) DeepCopy() *StringNotEndsWithObservation { + if in == nil { + return nil + } + out := new(StringNotEndsWithObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotEndsWithParameters) DeepCopyInto(out *StringNotEndsWithParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotEndsWithParameters. +func (in *StringNotEndsWithParameters) DeepCopy() *StringNotEndsWithParameters { + if in == nil { + return nil + } + out := new(StringNotEndsWithParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotInInitParameters) DeepCopyInto(out *StringNotInInitParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotInInitParameters. +func (in *StringNotInInitParameters) DeepCopy() *StringNotInInitParameters { + if in == nil { + return nil + } + out := new(StringNotInInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotInObservation) DeepCopyInto(out *StringNotInObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotInObservation. +func (in *StringNotInObservation) DeepCopy() *StringNotInObservation { + if in == nil { + return nil + } + out := new(StringNotInObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringNotInParameters) DeepCopyInto(out *StringNotInParameters) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringNotInParameters. +func (in *StringNotInParameters) DeepCopy() *StringNotInParameters { + if in == nil { + return nil + } + out := new(StringNotInParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectFilterInitParameters) DeepCopyInto(out *SubjectFilterInitParameters) { + *out = *in + if in.CaseSensitive != nil { + in, out := &in.CaseSensitive, &out.CaseSensitive + *out = new(bool) + **out = **in + } + if in.SubjectBeginsWith != nil { + in, out := &in.SubjectBeginsWith, &out.SubjectBeginsWith + *out = new(string) + **out = **in + } + if in.SubjectEndsWith != nil { + in, out := &in.SubjectEndsWith, &out.SubjectEndsWith + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectFilterInitParameters. +func (in *SubjectFilterInitParameters) DeepCopy() *SubjectFilterInitParameters { + if in == nil { + return nil + } + out := new(SubjectFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectFilterObservation) DeepCopyInto(out *SubjectFilterObservation) { + *out = *in + if in.CaseSensitive != nil { + in, out := &in.CaseSensitive, &out.CaseSensitive + *out = new(bool) + **out = **in + } + if in.SubjectBeginsWith != nil { + in, out := &in.SubjectBeginsWith, &out.SubjectBeginsWith + *out = new(string) + **out = **in + } + if in.SubjectEndsWith != nil { + in, out := &in.SubjectEndsWith, &out.SubjectEndsWith + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectFilterObservation. +func (in *SubjectFilterObservation) DeepCopy() *SubjectFilterObservation { + if in == nil { + return nil + } + out := new(SubjectFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectFilterParameters) DeepCopyInto(out *SubjectFilterParameters) { + *out = *in + if in.CaseSensitive != nil { + in, out := &in.CaseSensitive, &out.CaseSensitive + *out = new(bool) + **out = **in + } + if in.SubjectBeginsWith != nil { + in, out := &in.SubjectBeginsWith, &out.SubjectBeginsWith + *out = new(string) + **out = **in + } + if in.SubjectEndsWith != nil { + in, out := &in.SubjectEndsWith, &out.SubjectEndsWith + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectFilterParameters. +func (in *SubjectFilterParameters) DeepCopy() *SubjectFilterParameters { + if in == nil { + return nil + } + out := new(SubjectFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopic) DeepCopyInto(out *SystemTopic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopic. +func (in *SystemTopic) DeepCopy() *SystemTopic { + if in == nil { + return nil + } + out := new(SystemTopic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SystemTopic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicIdentityInitParameters) DeepCopyInto(out *SystemTopicIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicIdentityInitParameters. +func (in *SystemTopicIdentityInitParameters) DeepCopy() *SystemTopicIdentityInitParameters { + if in == nil { + return nil + } + out := new(SystemTopicIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicIdentityObservation) DeepCopyInto(out *SystemTopicIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicIdentityObservation. +func (in *SystemTopicIdentityObservation) DeepCopy() *SystemTopicIdentityObservation { + if in == nil { + return nil + } + out := new(SystemTopicIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicIdentityParameters) DeepCopyInto(out *SystemTopicIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicIdentityParameters. +func (in *SystemTopicIdentityParameters) DeepCopy() *SystemTopicIdentityParameters { + if in == nil { + return nil + } + out := new(SystemTopicIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicInitParameters) DeepCopyInto(out *SystemTopicInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SystemTopicIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.SourceArmResourceID != nil { + in, out := &in.SourceArmResourceID, &out.SourceArmResourceID + *out = new(string) + **out = **in + } + if in.SourceArmResourceIDRef != nil { + in, out := &in.SourceArmResourceIDRef, &out.SourceArmResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceArmResourceIDSelector != nil { + in, out := &in.SourceArmResourceIDSelector, &out.SourceArmResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TopicType != nil { + in, out := &in.TopicType, &out.TopicType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicInitParameters. +func (in *SystemTopicInitParameters) DeepCopy() *SystemTopicInitParameters { + if in == nil { + return nil + } + out := new(SystemTopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicList) DeepCopyInto(out *SystemTopicList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SystemTopic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicList. +func (in *SystemTopicList) DeepCopy() *SystemTopicList { + if in == nil { + return nil + } + out := new(SystemTopicList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SystemTopicList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicObservation) DeepCopyInto(out *SystemTopicObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SystemTopicIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MetricArmResourceID != nil { + in, out := &in.MetricArmResourceID, &out.MetricArmResourceID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SourceArmResourceID != nil { + in, out := &in.SourceArmResourceID, &out.SourceArmResourceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TopicType != nil { + in, out := &in.TopicType, &out.TopicType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicObservation. +func (in *SystemTopicObservation) DeepCopy() *SystemTopicObservation { + if in == nil { + return nil + } + out := new(SystemTopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicParameters) DeepCopyInto(out *SystemTopicParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SystemTopicIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceArmResourceID != nil { + in, out := &in.SourceArmResourceID, &out.SourceArmResourceID + *out = new(string) + **out = **in + } + if in.SourceArmResourceIDRef != nil { + in, out := &in.SourceArmResourceIDRef, &out.SourceArmResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SourceArmResourceIDSelector != nil { + in, out := &in.SourceArmResourceIDSelector, &out.SourceArmResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TopicType != nil { + in, out := &in.TopicType, &out.TopicType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicParameters. +func (in *SystemTopicParameters) DeepCopy() *SystemTopicParameters { + if in == nil { + return nil + } + out := new(SystemTopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicSpec) DeepCopyInto(out *SystemTopicSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicSpec. +func (in *SystemTopicSpec) DeepCopy() *SystemTopicSpec { + if in == nil { + return nil + } + out := new(SystemTopicSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTopicStatus) DeepCopyInto(out *SystemTopicStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTopicStatus. +func (in *SystemTopicStatus) DeepCopy() *SystemTopicStatus { + if in == nil { + return nil + } + out := new(SystemTopicStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Topic) DeepCopyInto(out *Topic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topic. +func (in *Topic) DeepCopy() *Topic { + if in == nil { + return nil + } + out := new(Topic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Topic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicIdentityInitParameters) DeepCopyInto(out *TopicIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicIdentityInitParameters. +func (in *TopicIdentityInitParameters) DeepCopy() *TopicIdentityInitParameters { + if in == nil { + return nil + } + out := new(TopicIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicIdentityObservation) DeepCopyInto(out *TopicIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicIdentityObservation. +func (in *TopicIdentityObservation) DeepCopy() *TopicIdentityObservation { + if in == nil { + return nil + } + out := new(TopicIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicIdentityParameters) DeepCopyInto(out *TopicIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicIdentityParameters. +func (in *TopicIdentityParameters) DeepCopy() *TopicIdentityParameters { + if in == nil { + return nil + } + out := new(TopicIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInboundIPRuleInitParameters) DeepCopyInto(out *TopicInboundIPRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInboundIPRuleInitParameters. +func (in *TopicInboundIPRuleInitParameters) DeepCopy() *TopicInboundIPRuleInitParameters { + if in == nil { + return nil + } + out := new(TopicInboundIPRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInboundIPRuleObservation) DeepCopyInto(out *TopicInboundIPRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInboundIPRuleObservation. +func (in *TopicInboundIPRuleObservation) DeepCopy() *TopicInboundIPRuleObservation { + if in == nil { + return nil + } + out := new(TopicInboundIPRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInboundIPRuleParameters) DeepCopyInto(out *TopicInboundIPRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInboundIPRuleParameters. +func (in *TopicInboundIPRuleParameters) DeepCopy() *TopicInboundIPRuleParameters { + if in == nil { + return nil + } + out := new(TopicInboundIPRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInitParameters) DeepCopyInto(out *TopicInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(TopicIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InboundIPRule != nil { + in, out := &in.InboundIPRule, &out.InboundIPRule + *out = make([]TopicInboundIPRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputMappingDefaultValues != nil { + in, out := &in.InputMappingDefaultValues, &out.InputMappingDefaultValues + *out = new(TopicInputMappingDefaultValuesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputMappingFields != nil { + in, out := &in.InputMappingFields, &out.InputMappingFields + *out = new(TopicInputMappingFieldsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInitParameters. +func (in *TopicInitParameters) DeepCopy() *TopicInitParameters { + if in == nil { + return nil + } + out := new(TopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInputMappingDefaultValuesInitParameters) DeepCopyInto(out *TopicInputMappingDefaultValuesInitParameters) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInputMappingDefaultValuesInitParameters. +func (in *TopicInputMappingDefaultValuesInitParameters) DeepCopy() *TopicInputMappingDefaultValuesInitParameters { + if in == nil { + return nil + } + out := new(TopicInputMappingDefaultValuesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInputMappingDefaultValuesObservation) DeepCopyInto(out *TopicInputMappingDefaultValuesObservation) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInputMappingDefaultValuesObservation. +func (in *TopicInputMappingDefaultValuesObservation) DeepCopy() *TopicInputMappingDefaultValuesObservation { + if in == nil { + return nil + } + out := new(TopicInputMappingDefaultValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInputMappingDefaultValuesParameters) DeepCopyInto(out *TopicInputMappingDefaultValuesParameters) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInputMappingDefaultValuesParameters. +func (in *TopicInputMappingDefaultValuesParameters) DeepCopy() *TopicInputMappingDefaultValuesParameters { + if in == nil { + return nil + } + out := new(TopicInputMappingDefaultValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInputMappingFieldsInitParameters) DeepCopyInto(out *TopicInputMappingFieldsInitParameters) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventTime != nil { + in, out := &in.EventTime, &out.EventTime + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInputMappingFieldsInitParameters. +func (in *TopicInputMappingFieldsInitParameters) DeepCopy() *TopicInputMappingFieldsInitParameters { + if in == nil { + return nil + } + out := new(TopicInputMappingFieldsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInputMappingFieldsObservation) DeepCopyInto(out *TopicInputMappingFieldsObservation) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventTime != nil { + in, out := &in.EventTime, &out.EventTime + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInputMappingFieldsObservation. +func (in *TopicInputMappingFieldsObservation) DeepCopy() *TopicInputMappingFieldsObservation { + if in == nil { + return nil + } + out := new(TopicInputMappingFieldsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicInputMappingFieldsParameters) DeepCopyInto(out *TopicInputMappingFieldsParameters) { + *out = *in + if in.DataVersion != nil { + in, out := &in.DataVersion, &out.DataVersion + *out = new(string) + **out = **in + } + if in.EventTime != nil { + in, out := &in.EventTime, &out.EventTime + *out = new(string) + **out = **in + } + if in.EventType != nil { + in, out := &in.EventType, &out.EventType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.Topic != nil { + in, out := &in.Topic, &out.Topic + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicInputMappingFieldsParameters. +func (in *TopicInputMappingFieldsParameters) DeepCopy() *TopicInputMappingFieldsParameters { + if in == nil { + return nil + } + out := new(TopicInputMappingFieldsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicList) DeepCopyInto(out *TopicList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Topic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicList. +func (in *TopicList) DeepCopy() *TopicList { + if in == nil { + return nil + } + out := new(TopicList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TopicList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicObservation) DeepCopyInto(out *TopicObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(TopicIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.InboundIPRule != nil { + in, out := &in.InboundIPRule, &out.InboundIPRule + *out = make([]TopicInboundIPRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputMappingDefaultValues != nil { + in, out := &in.InputMappingDefaultValues, &out.InputMappingDefaultValues + *out = new(TopicInputMappingDefaultValuesObservation) + (*in).DeepCopyInto(*out) + } + if in.InputMappingFields != nil { + in, out := &in.InputMappingFields, &out.InputMappingFields + *out = new(TopicInputMappingFieldsObservation) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicObservation. +func (in *TopicObservation) DeepCopy() *TopicObservation { + if in == nil { + return nil + } + out := new(TopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicParameters) DeepCopyInto(out *TopicParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(TopicIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.InboundIPRule != nil { + in, out := &in.InboundIPRule, &out.InboundIPRule + *out = make([]TopicInboundIPRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InputMappingDefaultValues != nil { + in, out := &in.InputMappingDefaultValues, &out.InputMappingDefaultValues + *out = new(TopicInputMappingDefaultValuesParameters) + (*in).DeepCopyInto(*out) + } + if in.InputMappingFields != nil { + in, out := &in.InputMappingFields, &out.InputMappingFields + *out = new(TopicInputMappingFieldsParameters) + (*in).DeepCopyInto(*out) + } + if in.InputSchema != nil { + in, out := &in.InputSchema, &out.InputSchema + *out = new(string) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicParameters. +func (in *TopicParameters) DeepCopy() *TopicParameters { + if in == nil { + return nil + } + out := new(TopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicSpec) DeepCopyInto(out *TopicSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicSpec. +func (in *TopicSpec) DeepCopy() *TopicSpec { + if in == nil { + return nil + } + out := new(TopicSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicStatus) DeepCopyInto(out *TopicStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicStatus. +func (in *TopicStatus) DeepCopy() *TopicStatus { + if in == nil { + return nil + } + out := new(TopicStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookEndpointInitParameters) DeepCopyInto(out *WebhookEndpointInitParameters) { + *out = *in + if in.ActiveDirectoryAppIDOrURI != nil { + in, out := &in.ActiveDirectoryAppIDOrURI, &out.ActiveDirectoryAppIDOrURI + *out = new(string) + **out = **in + } + if in.ActiveDirectoryTenantID != nil { + in, out := &in.ActiveDirectoryTenantID, &out.ActiveDirectoryTenantID + *out = new(string) + **out = **in + } + if in.MaxEventsPerBatch != nil { + in, out := &in.MaxEventsPerBatch, &out.MaxEventsPerBatch + *out = new(float64) + **out = **in + } + if in.PreferredBatchSizeInKilobytes != nil { + in, out := &in.PreferredBatchSizeInKilobytes, &out.PreferredBatchSizeInKilobytes + *out = new(float64) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookEndpointInitParameters. +func (in *WebhookEndpointInitParameters) DeepCopy() *WebhookEndpointInitParameters { + if in == nil { + return nil + } + out := new(WebhookEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookEndpointObservation) DeepCopyInto(out *WebhookEndpointObservation) { + *out = *in + if in.ActiveDirectoryAppIDOrURI != nil { + in, out := &in.ActiveDirectoryAppIDOrURI, &out.ActiveDirectoryAppIDOrURI + *out = new(string) + **out = **in + } + if in.ActiveDirectoryTenantID != nil { + in, out := &in.ActiveDirectoryTenantID, &out.ActiveDirectoryTenantID + *out = new(string) + **out = **in + } + if in.BaseURL != nil { + in, out := &in.BaseURL, &out.BaseURL + *out = new(string) + **out = **in + } + if in.MaxEventsPerBatch != nil { + in, out := &in.MaxEventsPerBatch, &out.MaxEventsPerBatch + *out = new(float64) + **out = **in + } + if in.PreferredBatchSizeInKilobytes != nil { + in, out := &in.PreferredBatchSizeInKilobytes, &out.PreferredBatchSizeInKilobytes + *out = new(float64) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookEndpointObservation. +func (in *WebhookEndpointObservation) DeepCopy() *WebhookEndpointObservation { + if in == nil { + return nil + } + out := new(WebhookEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookEndpointParameters) DeepCopyInto(out *WebhookEndpointParameters) { + *out = *in + if in.ActiveDirectoryAppIDOrURI != nil { + in, out := &in.ActiveDirectoryAppIDOrURI, &out.ActiveDirectoryAppIDOrURI + *out = new(string) + **out = **in + } + if in.ActiveDirectoryTenantID != nil { + in, out := &in.ActiveDirectoryTenantID, &out.ActiveDirectoryTenantID + *out = new(string) + **out = **in + } + if in.MaxEventsPerBatch != nil { + in, out := &in.MaxEventsPerBatch, &out.MaxEventsPerBatch + *out = new(float64) + **out = **in + } + if in.PreferredBatchSizeInKilobytes != nil { + in, out := &in.PreferredBatchSizeInKilobytes, &out.PreferredBatchSizeInKilobytes + *out = new(float64) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookEndpointParameters. +func (in *WebhookEndpointParameters) DeepCopy() *WebhookEndpointParameters { + if in == nil { + return nil + } + out := new(WebhookEndpointParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/eventgrid/v1beta2/zz_generated.managed.go b/apis/eventgrid/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..335df2a14 --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Domain. +func (mg *Domain) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Domain. +func (mg *Domain) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Domain. +func (mg *Domain) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Domain. +func (mg *Domain) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Domain. +func (mg *Domain) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Domain. +func (mg *Domain) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Domain. +func (mg *Domain) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Domain. +func (mg *Domain) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Domain. +func (mg *Domain) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Domain. +func (mg *Domain) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this EventSubscription. +func (mg *EventSubscription) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EventSubscription. +func (mg *EventSubscription) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EventSubscription. +func (mg *EventSubscription) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EventSubscription. +func (mg *EventSubscription) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EventSubscription. +func (mg *EventSubscription) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EventSubscription. +func (mg *EventSubscription) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EventSubscription. +func (mg *EventSubscription) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EventSubscription. +func (mg *EventSubscription) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EventSubscription. +func (mg *EventSubscription) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EventSubscription. +func (mg *EventSubscription) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EventSubscription. +func (mg *EventSubscription) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EventSubscription. +func (mg *EventSubscription) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SystemTopic. +func (mg *SystemTopic) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SystemTopic. +func (mg *SystemTopic) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SystemTopic. +func (mg *SystemTopic) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SystemTopic. +func (mg *SystemTopic) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SystemTopic. +func (mg *SystemTopic) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SystemTopic. +func (mg *SystemTopic) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SystemTopic. +func (mg *SystemTopic) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SystemTopic. +func (mg *SystemTopic) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SystemTopic. +func (mg *SystemTopic) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SystemTopic. +func (mg *SystemTopic) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SystemTopic. +func (mg *SystemTopic) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SystemTopic. +func (mg *SystemTopic) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Topic. +func (mg *Topic) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Topic. +func (mg *Topic) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Topic. +func (mg *Topic) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Topic. +func (mg *Topic) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Topic. +func (mg *Topic) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Topic. +func (mg *Topic) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Topic. +func (mg *Topic) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Topic. +func (mg *Topic) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Topic. +func (mg *Topic) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Topic. +func (mg *Topic) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Topic. +func (mg *Topic) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Topic. +func (mg *Topic) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/eventgrid/v1beta2/zz_generated.managedlist.go b/apis/eventgrid/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..e818cd8d6 --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this DomainList. +func (l *DomainList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this EventSubscriptionList. +func (l *EventSubscriptionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SystemTopicList. +func (l *SystemTopicList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TopicList. +func (l *TopicList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/eventgrid/v1beta2/zz_generated.resolvers.go b/apis/eventgrid/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..4e06618dd --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,285 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Domain. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Domain) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this EventSubscription. +func (mg *EventSubscription) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Scope), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ScopeRef, + Selector: mg.Spec.ForProvider.ScopeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Scope") + } + mg.Spec.ForProvider.Scope = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ScopeRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.StorageQueueEndpoint != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Queue", "QueueList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageQueueEndpoint.QueueName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageQueueEndpoint.QueueNameRef, + Selector: mg.Spec.ForProvider.StorageQueueEndpoint.QueueNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageQueueEndpoint.QueueName") + } + mg.Spec.ForProvider.StorageQueueEndpoint.QueueName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageQueueEndpoint.QueueNameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.StorageQueueEndpoint != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageQueueEndpoint.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageQueueEndpoint.StorageAccountIDRef, + Selector: mg.Spec.ForProvider.StorageQueueEndpoint.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageQueueEndpoint.StorageAccountID") + } + mg.Spec.ForProvider.StorageQueueEndpoint.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageQueueEndpoint.StorageAccountIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Scope), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ScopeRef, + Selector: mg.Spec.InitProvider.ScopeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Scope") + } + mg.Spec.InitProvider.Scope = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ScopeRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.StorageQueueEndpoint != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Queue", "QueueList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageQueueEndpoint.QueueName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageQueueEndpoint.QueueNameRef, + Selector: mg.Spec.InitProvider.StorageQueueEndpoint.QueueNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageQueueEndpoint.QueueName") + } + mg.Spec.InitProvider.StorageQueueEndpoint.QueueName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageQueueEndpoint.QueueNameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageQueueEndpoint != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageQueueEndpoint.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageQueueEndpoint.StorageAccountIDRef, + Selector: mg.Spec.InitProvider.StorageQueueEndpoint.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageQueueEndpoint.StorageAccountID") + } + mg.Spec.InitProvider.StorageQueueEndpoint.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageQueueEndpoint.StorageAccountIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this SystemTopic. +func (mg *SystemTopic) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SourceArmResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SourceArmResourceIDRef, + Selector: mg.Spec.ForProvider.SourceArmResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SourceArmResourceID") + } + mg.Spec.ForProvider.SourceArmResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SourceArmResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SourceArmResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SourceArmResourceIDRef, + Selector: mg.Spec.InitProvider.SourceArmResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SourceArmResourceID") + } + mg.Spec.InitProvider.SourceArmResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SourceArmResourceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Topic. +func (mg *Topic) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/eventgrid/v1beta2/zz_groupversion_info.go b/apis/eventgrid/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..f6c3d24a8 --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=eventgrid.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "eventgrid.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/eventgrid/v1beta2/zz_systemtopic_terraformed.go b/apis/eventgrid/v1beta2/zz_systemtopic_terraformed.go new file mode 100755 index 000000000..8308a18ab --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_systemtopic_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SystemTopic +func (mg *SystemTopic) GetTerraformResourceType() string { + return "azurerm_eventgrid_system_topic" +} + +// GetConnectionDetailsMapping for this SystemTopic +func (tr *SystemTopic) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SystemTopic +func (tr *SystemTopic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SystemTopic +func (tr *SystemTopic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SystemTopic +func (tr *SystemTopic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SystemTopic +func (tr *SystemTopic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SystemTopic +func (tr *SystemTopic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SystemTopic +func (tr *SystemTopic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SystemTopic +func (tr *SystemTopic) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SystemTopic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SystemTopic) LateInitialize(attrs []byte) (bool, error) { + params := &SystemTopicParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SystemTopic) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eventgrid/v1beta2/zz_systemtopic_types.go b/apis/eventgrid/v1beta2/zz_systemtopic_types.go new file mode 100755 index 000000000..b23ecdfc1 --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_systemtopic_types.go @@ -0,0 +1,218 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SystemTopicIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid System Topic. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid System Topic. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SystemTopicIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid System Topic. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid System Topic. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SystemTopicIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid System Topic. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid System Topic. Possible values are SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SystemTopicInitParameters struct { + + // An identity block as defined below. + Identity *SystemTopicIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Event Grid System Topic should exist. Changing this forces a new Event Grid System Topic to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Event Grid System Topic ARM Source. Changing this forces a new Event Grid System Topic to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SourceArmResourceID *string `json:"sourceArmResourceId,omitempty" tf:"source_arm_resource_id,omitempty"` + + // Reference to a Account in storage to populate sourceArmResourceId. + // +kubebuilder:validation:Optional + SourceArmResourceIDRef *v1.Reference `json:"sourceArmResourceIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate sourceArmResourceId. + // +kubebuilder:validation:Optional + SourceArmResourceIDSelector *v1.Selector `json:"sourceArmResourceIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Event Grid System Topic. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Topic Type of the Event Grid System Topic. The topic type is validated by Azure and there may be additional topic types beyond the following: Microsoft.AppConfiguration.ConfigurationStores, Microsoft.Communication.CommunicationServices, Microsoft.ContainerRegistry.Registries, Microsoft.Devices.IoTHubs, Microsoft.EventGrid.Domains, Microsoft.EventGrid.Topics, Microsoft.Eventhub.Namespaces, Microsoft.KeyVault.vaults, Microsoft.MachineLearningServices.Workspaces, Microsoft.Maps.Accounts, Microsoft.Media.MediaServices, Microsoft.Resources.ResourceGroups, Microsoft.Resources.Subscriptions, Microsoft.ServiceBus.Namespaces, Microsoft.SignalRService.SignalR, Microsoft.Storage.StorageAccounts, Microsoft.Web.ServerFarms and Microsoft.Web.Sites. Changing this forces a new Event Grid System Topic to be created. + TopicType *string `json:"topicType,omitempty" tf:"topic_type,omitempty"` +} + +type SystemTopicObservation struct { + + // The ID of the Event Grid System Topic. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *SystemTopicIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Event Grid System Topic should exist. Changing this forces a new Event Grid System Topic to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Metric ARM Resource ID of the Event Grid System Topic. + MetricArmResourceID *string `json:"metricArmResourceId,omitempty" tf:"metric_arm_resource_id,omitempty"` + + // The name of the Resource Group where the Event Grid System Topic should exist. Changing this forces a new Event Grid System Topic to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The ID of the Event Grid System Topic ARM Source. Changing this forces a new Event Grid System Topic to be created. + SourceArmResourceID *string `json:"sourceArmResourceId,omitempty" tf:"source_arm_resource_id,omitempty"` + + // A mapping of tags which should be assigned to the Event Grid System Topic. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Topic Type of the Event Grid System Topic. The topic type is validated by Azure and there may be additional topic types beyond the following: Microsoft.AppConfiguration.ConfigurationStores, Microsoft.Communication.CommunicationServices, Microsoft.ContainerRegistry.Registries, Microsoft.Devices.IoTHubs, Microsoft.EventGrid.Domains, Microsoft.EventGrid.Topics, Microsoft.Eventhub.Namespaces, Microsoft.KeyVault.vaults, Microsoft.MachineLearningServices.Workspaces, Microsoft.Maps.Accounts, Microsoft.Media.MediaServices, Microsoft.Resources.ResourceGroups, Microsoft.Resources.Subscriptions, Microsoft.ServiceBus.Namespaces, Microsoft.SignalRService.SignalR, Microsoft.Storage.StorageAccounts, Microsoft.Web.ServerFarms and Microsoft.Web.Sites. Changing this forces a new Event Grid System Topic to be created. + TopicType *string `json:"topicType,omitempty" tf:"topic_type,omitempty"` +} + +type SystemTopicParameters struct { + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *SystemTopicIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Event Grid System Topic should exist. Changing this forces a new Event Grid System Topic to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Event Grid System Topic should exist. Changing this forces a new Event Grid System Topic to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the Event Grid System Topic ARM Source. Changing this forces a new Event Grid System Topic to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SourceArmResourceID *string `json:"sourceArmResourceId,omitempty" tf:"source_arm_resource_id,omitempty"` + + // Reference to a Account in storage to populate sourceArmResourceId. + // +kubebuilder:validation:Optional + SourceArmResourceIDRef *v1.Reference `json:"sourceArmResourceIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate sourceArmResourceId. + // +kubebuilder:validation:Optional + SourceArmResourceIDSelector *v1.Selector `json:"sourceArmResourceIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Event Grid System Topic. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Topic Type of the Event Grid System Topic. The topic type is validated by Azure and there may be additional topic types beyond the following: Microsoft.AppConfiguration.ConfigurationStores, Microsoft.Communication.CommunicationServices, Microsoft.ContainerRegistry.Registries, Microsoft.Devices.IoTHubs, Microsoft.EventGrid.Domains, Microsoft.EventGrid.Topics, Microsoft.Eventhub.Namespaces, Microsoft.KeyVault.vaults, Microsoft.MachineLearningServices.Workspaces, Microsoft.Maps.Accounts, Microsoft.Media.MediaServices, Microsoft.Resources.ResourceGroups, Microsoft.Resources.Subscriptions, Microsoft.ServiceBus.Namespaces, Microsoft.SignalRService.SignalR, Microsoft.Storage.StorageAccounts, Microsoft.Web.ServerFarms and Microsoft.Web.Sites. Changing this forces a new Event Grid System Topic to be created. + // +kubebuilder:validation:Optional + TopicType *string `json:"topicType,omitempty" tf:"topic_type,omitempty"` +} + +// SystemTopicSpec defines the desired state of SystemTopic +type SystemTopicSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SystemTopicParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SystemTopicInitParameters `json:"initProvider,omitempty"` +} + +// SystemTopicStatus defines the observed state of SystemTopic. +type SystemTopicStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SystemTopicObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SystemTopic is the Schema for the SystemTopics API. Manages an Event Grid System Topic +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SystemTopic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.topicType) || (has(self.initProvider) && has(self.initProvider.topicType))",message="spec.forProvider.topicType is a required parameter" + Spec SystemTopicSpec `json:"spec"` + Status SystemTopicStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SystemTopicList contains a list of SystemTopics +type SystemTopicList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SystemTopic `json:"items"` +} + +// Repository type metadata. +var ( + SystemTopic_Kind = "SystemTopic" + SystemTopic_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SystemTopic_Kind}.String() + SystemTopic_KindAPIVersion = SystemTopic_Kind + "." + CRDGroupVersion.String() + SystemTopic_GroupVersionKind = CRDGroupVersion.WithKind(SystemTopic_Kind) +) + +func init() { + SchemeBuilder.Register(&SystemTopic{}, &SystemTopicList{}) +} diff --git a/apis/eventgrid/v1beta2/zz_topic_terraformed.go b/apis/eventgrid/v1beta2/zz_topic_terraformed.go new file mode 100755 index 000000000..788b52fe6 --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_topic_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Topic +func (mg *Topic) GetTerraformResourceType() string { + return "azurerm_eventgrid_topic" +} + +// GetConnectionDetailsMapping for this Topic +func (tr *Topic) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_access_key": "status.atProvider.primaryAccessKey", "secondary_access_key": "status.atProvider.secondaryAccessKey"} +} + +// GetObservation of this Topic +func (tr *Topic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Topic +func (tr *Topic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Topic +func (tr *Topic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Topic +func (tr *Topic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Topic +func (tr *Topic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Topic +func (tr *Topic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Topic +func (tr *Topic) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Topic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Topic) LateInitialize(attrs []byte) (bool, error) { + params := &TopicParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Topic) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eventgrid/v1beta2/zz_topic_types.go b/apis/eventgrid/v1beta2/zz_topic_types.go new file mode 100755 index 000000000..045da87b5 --- /dev/null +++ b/apis/eventgrid/v1beta2/zz_topic_types.go @@ -0,0 +1,374 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type TopicIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid Topic. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid Topic. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TopicIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid Topic. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid Topic. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type TopicIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Event Grid Topic. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Grid Topic. Possible values are SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type TopicInboundIPRuleInitParameters struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action"` + + // The IP mask (CIDR) to match on. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask"` +} + +type TopicInboundIPRuleObservation struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The IP mask (CIDR) to match on. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask,omitempty"` +} + +type TopicInboundIPRuleParameters struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The IP mask (CIDR) to match on. + // +kubebuilder:validation:Optional + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask"` +} + +type TopicInitParameters struct { + + // An identity block as defined below. + Identity *TopicIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // One or more inbound_ip_rule blocks as defined below. + InboundIPRule []TopicInboundIPRuleInitParameters `json:"inboundIpRule,omitempty" tf:"inbound_ip_rule,omitempty"` + + // A input_mapping_default_values block as defined below. Changing this forces a new resource to be created. + InputMappingDefaultValues *TopicInputMappingDefaultValuesInitParameters `json:"inputMappingDefaultValues,omitempty" tf:"input_mapping_default_values,omitempty"` + + // A input_mapping_fields block as defined below. Changing this forces a new resource to be created. + InputMappingFields *TopicInputMappingFieldsInitParameters `json:"inputMappingFields,omitempty" tf:"input_mapping_fields,omitempty"` + + // Specifies the schema in which incoming events will be published to this domain. Allowed values are CloudEventSchemaV1_0, CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + InputSchema *string `json:"inputSchema,omitempty" tf:"input_schema,omitempty"` + + // Whether local authentication methods is enabled for the EventGrid Topic. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TopicInputMappingDefaultValuesInitParameters struct { + + // Specifies the default data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the default event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the default subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` +} + +type TopicInputMappingDefaultValuesObservation struct { + + // Specifies the default data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the default event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the default subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` +} + +type TopicInputMappingDefaultValuesParameters struct { + + // Specifies the default data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the default event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the default subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` +} + +type TopicInputMappingFieldsInitParameters struct { + + // Specifies the data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the event time of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventTime *string `json:"eventTime,omitempty" tf:"event_time,omitempty"` + + // Specifies the event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the id of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // Specifies the topic of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type TopicInputMappingFieldsObservation struct { + + // Specifies the data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the event time of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventTime *string `json:"eventTime,omitempty" tf:"event_time,omitempty"` + + // Specifies the event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the id of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // Specifies the topic of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type TopicInputMappingFieldsParameters struct { + + // Specifies the data version of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DataVersion *string `json:"dataVersion,omitempty" tf:"data_version,omitempty"` + + // Specifies the event time of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EventTime *string `json:"eventTime,omitempty" tf:"event_time,omitempty"` + + // Specifies the event type of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EventType *string `json:"eventType,omitempty" tf:"event_type,omitempty"` + + // Specifies the id of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the subject of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // Specifies the topic of the EventGrid Event to associate with the domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Topic *string `json:"topic,omitempty" tf:"topic,omitempty"` +} + +type TopicObservation struct { + + // The Endpoint associated with the EventGrid Topic. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The EventGrid Topic ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *TopicIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // One or more inbound_ip_rule blocks as defined below. + InboundIPRule []TopicInboundIPRuleObservation `json:"inboundIpRule,omitempty" tf:"inbound_ip_rule,omitempty"` + + // A input_mapping_default_values block as defined below. Changing this forces a new resource to be created. + InputMappingDefaultValues *TopicInputMappingDefaultValuesObservation `json:"inputMappingDefaultValues,omitempty" tf:"input_mapping_default_values,omitempty"` + + // A input_mapping_fields block as defined below. Changing this forces a new resource to be created. + InputMappingFields *TopicInputMappingFieldsObservation `json:"inputMappingFields,omitempty" tf:"input_mapping_fields,omitempty"` + + // Specifies the schema in which incoming events will be published to this domain. Allowed values are CloudEventSchemaV1_0, CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + InputSchema *string `json:"inputSchema,omitempty" tf:"input_schema,omitempty"` + + // Whether local authentication methods is enabled for the EventGrid Topic. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type TopicParameters struct { + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *TopicIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // One or more inbound_ip_rule blocks as defined below. + // +kubebuilder:validation:Optional + InboundIPRule []TopicInboundIPRuleParameters `json:"inboundIpRule,omitempty" tf:"inbound_ip_rule,omitempty"` + + // A input_mapping_default_values block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InputMappingDefaultValues *TopicInputMappingDefaultValuesParameters `json:"inputMappingDefaultValues,omitempty" tf:"input_mapping_default_values,omitempty"` + + // A input_mapping_fields block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InputMappingFields *TopicInputMappingFieldsParameters `json:"inputMappingFields,omitempty" tf:"input_mapping_fields,omitempty"` + + // Specifies the schema in which incoming events will be published to this domain. Allowed values are CloudEventSchemaV1_0, CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InputSchema *string `json:"inputSchema,omitempty" tf:"input_schema,omitempty"` + + // Whether local authentication methods is enabled for the EventGrid Topic. Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether or not public network access is allowed for this server. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which the EventGrid Topic exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// TopicSpec defines the desired state of Topic +type TopicSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TopicParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TopicInitParameters `json:"initProvider,omitempty"` +} + +// TopicStatus defines the observed state of Topic. +type TopicStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TopicObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Topic is the Schema for the Topics API. Manages an EventGrid Topic +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Topic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec TopicSpec `json:"spec"` + Status TopicStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TopicList contains a list of Topics +type TopicList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Topic `json:"items"` +} + +// Repository type metadata. +var ( + Topic_Kind = "Topic" + Topic_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Topic_Kind}.String() + Topic_KindAPIVersion = Topic_Kind + "." + CRDGroupVersion.String() + Topic_GroupVersionKind = CRDGroupVersion.WithKind(Topic_Kind) +) + +func init() { + SchemeBuilder.Register(&Topic{}, &TopicList{}) +} diff --git a/apis/eventhub/v1beta1/zz_authorizationrule_types.go b/apis/eventhub/v1beta1/zz_authorizationrule_types.go index da0441f1a..1f36fa651 100755 --- a/apis/eventhub/v1beta1/zz_authorizationrule_types.go +++ b/apis/eventhub/v1beta1/zz_authorizationrule_types.go @@ -52,7 +52,7 @@ type AuthorizationRuleObservation struct { type AuthorizationRuleParameters struct { // Specifies the name of the EventHub. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +kubebuilder:validation:Optional EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` @@ -73,7 +73,7 @@ type AuthorizationRuleParameters struct { Manage *bool `json:"manage,omitempty" tf:"manage,omitempty"` // Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace // +kubebuilder:validation:Optional NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` diff --git a/apis/eventhub/v1beta1/zz_consumergroup_types.go b/apis/eventhub/v1beta1/zz_consumergroup_types.go index 001147317..2507e0bb1 100755 --- a/apis/eventhub/v1beta1/zz_consumergroup_types.go +++ b/apis/eventhub/v1beta1/zz_consumergroup_types.go @@ -40,7 +40,7 @@ type ConsumerGroupObservation struct { type ConsumerGroupParameters struct { // Specifies the name of the EventHub. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +kubebuilder:validation:Optional EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` @@ -53,7 +53,7 @@ type ConsumerGroupParameters struct { EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` // Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace // +kubebuilder:validation:Optional NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` diff --git a/apis/eventhub/v1beta1/zz_generated.conversion_hubs.go b/apis/eventhub/v1beta1/zz_generated.conversion_hubs.go index 3b8dce3bd..dddbb6dbf 100755 --- a/apis/eventhub/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/eventhub/v1beta1/zz_generated.conversion_hubs.go @@ -6,18 +6,12 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *EventHub) Hub() {} - // Hub marks this type as a conversion hub. func (tr *AuthorizationRule) Hub() {} // Hub marks this type as a conversion hub. func (tr *ConsumerGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *EventHubNamespace) Hub() {} - // Hub marks this type as a conversion hub. func (tr *NamespaceAuthorizationRule) Hub() {} diff --git a/apis/eventhub/v1beta1/zz_generated.conversion_spokes.go b/apis/eventhub/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..307101b19 --- /dev/null +++ b/apis/eventhub/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this EventHub to the hub type. +func (tr *EventHub) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EventHub type. +func (tr *EventHub) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this EventHubNamespace to the hub type. +func (tr *EventHubNamespace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the EventHubNamespace type. +func (tr *EventHubNamespace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/eventhub/v1beta1/zz_generated.resolvers.go b/apis/eventhub/v1beta1/zz_generated.resolvers.go index 401515a45..310d88380 100644 --- a/apis/eventhub/v1beta1/zz_generated.resolvers.go +++ b/apis/eventhub/v1beta1/zz_generated.resolvers.go @@ -27,7 +27,7 @@ func (mg *AuthorizationRule) ResolveReferences( // ResolveReferences of this Aut var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -46,7 +46,7 @@ func (mg *AuthorizationRule) ResolveReferences( // ResolveReferences of this Aut mg.Spec.ForProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.EventHubNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -96,7 +96,7 @@ func (mg *ConsumerGroup) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -115,7 +115,7 @@ func (mg *ConsumerGroup) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.EventHubNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -293,7 +293,7 @@ func (mg *NamespaceAuthorizationRule) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -343,7 +343,7 @@ func (mg *NamespaceDisasterRecoveryConfig) ResolveReferences(ctx context.Context var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -362,7 +362,7 @@ func (mg *NamespaceDisasterRecoveryConfig) ResolveReferences(ctx context.Context mg.Spec.ForProvider.NamespaceName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NamespaceNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -400,7 +400,7 @@ func (mg *NamespaceDisasterRecoveryConfig) ResolveReferences(ctx context.Context mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -431,7 +431,7 @@ func (mg *NamespaceSchemaGroup) ResolveReferences(ctx context.Context, c client. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/eventhub/v1beta1/zz_namespaceauthorizationrule_types.go b/apis/eventhub/v1beta1/zz_namespaceauthorizationrule_types.go index b3d902953..b8e00bcb1 100755 --- a/apis/eventhub/v1beta1/zz_namespaceauthorizationrule_types.go +++ b/apis/eventhub/v1beta1/zz_namespaceauthorizationrule_types.go @@ -57,7 +57,7 @@ type NamespaceAuthorizationRuleParameters struct { Manage *bool `json:"manage,omitempty" tf:"manage,omitempty"` // Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace // +kubebuilder:validation:Optional NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` diff --git a/apis/eventhub/v1beta1/zz_namespacedisasterrecoveryconfig_types.go b/apis/eventhub/v1beta1/zz_namespacedisasterrecoveryconfig_types.go index 528c923ea..9308fae0c 100755 --- a/apis/eventhub/v1beta1/zz_namespacedisasterrecoveryconfig_types.go +++ b/apis/eventhub/v1beta1/zz_namespacedisasterrecoveryconfig_types.go @@ -16,7 +16,7 @@ import ( type NamespaceDisasterRecoveryConfigInitParameters struct { // The ID of the EventHub Namespace to replicate to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() PartnerNamespaceID *string `json:"partnerNamespaceId,omitempty" tf:"partner_namespace_id,omitempty"` @@ -47,7 +47,7 @@ type NamespaceDisasterRecoveryConfigObservation struct { type NamespaceDisasterRecoveryConfigParameters struct { // Specifies the name of the primary EventHub Namespace to replicate. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace // +kubebuilder:validation:Optional NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` @@ -60,7 +60,7 @@ type NamespaceDisasterRecoveryConfigParameters struct { NamespaceNameSelector *v1.Selector `json:"namespaceNameSelector,omitempty" tf:"-"` // The ID of the EventHub Namespace to replicate to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PartnerNamespaceID *string `json:"partnerNamespaceId,omitempty" tf:"partner_namespace_id,omitempty"` diff --git a/apis/eventhub/v1beta1/zz_namespaceschemagroup_types.go b/apis/eventhub/v1beta1/zz_namespaceschemagroup_types.go index 1a1f21cb6..12a5228fc 100755 --- a/apis/eventhub/v1beta1/zz_namespaceschemagroup_types.go +++ b/apis/eventhub/v1beta1/zz_namespaceschemagroup_types.go @@ -40,7 +40,7 @@ type NamespaceSchemaGroupObservation struct { type NamespaceSchemaGroupParameters struct { // Specifies the ID of the EventHub Namespace. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` diff --git a/apis/eventhub/v1beta2/zz_eventhub_terraformed.go b/apis/eventhub/v1beta2/zz_eventhub_terraformed.go new file mode 100755 index 000000000..a2a8489de --- /dev/null +++ b/apis/eventhub/v1beta2/zz_eventhub_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EventHub +func (mg *EventHub) GetTerraformResourceType() string { + return "azurerm_eventhub" +} + +// GetConnectionDetailsMapping for this EventHub +func (tr *EventHub) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this EventHub +func (tr *EventHub) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EventHub +func (tr *EventHub) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EventHub +func (tr *EventHub) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EventHub +func (tr *EventHub) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EventHub +func (tr *EventHub) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EventHub +func (tr *EventHub) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EventHub +func (tr *EventHub) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EventHub using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EventHub) LateInitialize(attrs []byte) (bool, error) { + params := &EventHubParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EventHub) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eventhub/v1beta2/zz_eventhub_types.go b/apis/eventhub/v1beta2/zz_eventhub_types.go new file mode 100755 index 000000000..ee1a036e1 --- /dev/null +++ b/apis/eventhub/v1beta2/zz_eventhub_types.go @@ -0,0 +1,282 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CaptureDescriptionInitParameters struct { + + // A destination block as defined below. + Destination *DestinationInitParameters `json:"destination,omitempty" tf:"destination,omitempty"` + + // Specifies if the Capture Description is Enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the Encoding used for the Capture Description. Possible values are Avro and AvroDeflate. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // Specifies the time interval in seconds at which the capture will happen. Values can be between 60 and 900 seconds. Defaults to 300 seconds. + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the amount of data built up in your EventHub before a Capture Operation occurs. Value should be between 10485760 and 524288000 bytes. Defaults to 314572800 bytes. + SizeLimitInBytes *float64 `json:"sizeLimitInBytes,omitempty" tf:"size_limit_in_bytes,omitempty"` + + // Specifies if empty files should not be emitted if no events occur during the Capture time window. Defaults to false. + SkipEmptyArchives *bool `json:"skipEmptyArchives,omitempty" tf:"skip_empty_archives,omitempty"` +} + +type CaptureDescriptionObservation struct { + + // A destination block as defined below. + Destination *DestinationObservation `json:"destination,omitempty" tf:"destination,omitempty"` + + // Specifies if the Capture Description is Enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the Encoding used for the Capture Description. Possible values are Avro and AvroDeflate. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // Specifies the time interval in seconds at which the capture will happen. Values can be between 60 and 900 seconds. Defaults to 300 seconds. + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the amount of data built up in your EventHub before a Capture Operation occurs. Value should be between 10485760 and 524288000 bytes. Defaults to 314572800 bytes. + SizeLimitInBytes *float64 `json:"sizeLimitInBytes,omitempty" tf:"size_limit_in_bytes,omitempty"` + + // Specifies if empty files should not be emitted if no events occur during the Capture time window. Defaults to false. + SkipEmptyArchives *bool `json:"skipEmptyArchives,omitempty" tf:"skip_empty_archives,omitempty"` +} + +type CaptureDescriptionParameters struct { + + // A destination block as defined below. + // +kubebuilder:validation:Optional + Destination *DestinationParameters `json:"destination" tf:"destination,omitempty"` + + // Specifies if the Capture Description is Enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Specifies the Encoding used for the Capture Description. Possible values are Avro and AvroDeflate. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding" tf:"encoding,omitempty"` + + // Specifies the time interval in seconds at which the capture will happen. Values can be between 60 and 900 seconds. Defaults to 300 seconds. + // +kubebuilder:validation:Optional + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the amount of data built up in your EventHub before a Capture Operation occurs. Value should be between 10485760 and 524288000 bytes. Defaults to 314572800 bytes. + // +kubebuilder:validation:Optional + SizeLimitInBytes *float64 `json:"sizeLimitInBytes,omitempty" tf:"size_limit_in_bytes,omitempty"` + + // Specifies if empty files should not be emitted if no events occur during the Capture time window. Defaults to false. + // +kubebuilder:validation:Optional + SkipEmptyArchives *bool `json:"skipEmptyArchives,omitempty" tf:"skip_empty_archives,omitempty"` +} + +type DestinationInitParameters struct { + + // The Blob naming convention for archiving. e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. Here all the parameters (Namespace,EventHub .. etc) are mandatory irrespective of order + ArchiveNameFormat *string `json:"archiveNameFormat,omitempty" tf:"archive_name_format,omitempty"` + + // The name of the Container within the Blob Storage Account where messages should be archived. + BlobContainerName *string `json:"blobContainerName,omitempty" tf:"blob_container_name,omitempty"` + + // Specifies the name of the EventHub resource. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Blob Storage Account where messages should be archived. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type DestinationObservation struct { + + // The Blob naming convention for archiving. e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. Here all the parameters (Namespace,EventHub .. etc) are mandatory irrespective of order + ArchiveNameFormat *string `json:"archiveNameFormat,omitempty" tf:"archive_name_format,omitempty"` + + // The name of the Container within the Blob Storage Account where messages should be archived. + BlobContainerName *string `json:"blobContainerName,omitempty" tf:"blob_container_name,omitempty"` + + // Specifies the name of the EventHub resource. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Blob Storage Account where messages should be archived. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type DestinationParameters struct { + + // The Blob naming convention for archiving. e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. Here all the parameters (Namespace,EventHub .. etc) are mandatory irrespective of order + // +kubebuilder:validation:Optional + ArchiveNameFormat *string `json:"archiveNameFormat" tf:"archive_name_format,omitempty"` + + // The name of the Container within the Blob Storage Account where messages should be archived. + // +kubebuilder:validation:Optional + BlobContainerName *string `json:"blobContainerName" tf:"blob_container_name,omitempty"` + + // Specifies the name of the EventHub resource. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the Blob Storage Account where messages should be archived. + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId" tf:"storage_account_id,omitempty"` +} + +type EventHubInitParameters struct { + + // A capture_description block as defined below. + CaptureDescription *CaptureDescriptionInitParameters `json:"captureDescription,omitempty" tf:"capture_description,omitempty"` + + // Specifies the number of days to retain the events for this Event Hub. + MessageRetention *float64 `json:"messageRetention,omitempty" tf:"message_retention,omitempty"` + + // Specifies the current number of shards on the Event Hub. + PartitionCount *float64 `json:"partitionCount,omitempty" tf:"partition_count,omitempty"` + + // Specifies the status of the Event Hub resource. Possible values are Active, Disabled and SendDisabled. Defaults to Active. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type EventHubObservation struct { + + // A capture_description block as defined below. + CaptureDescription *CaptureDescriptionObservation `json:"captureDescription,omitempty" tf:"capture_description,omitempty"` + + // The ID of the EventHub. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the number of days to retain the events for this Event Hub. + MessageRetention *float64 `json:"messageRetention,omitempty" tf:"message_retention,omitempty"` + + // Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created. + NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` + + // Specifies the current number of shards on the Event Hub. + PartitionCount *float64 `json:"partitionCount,omitempty" tf:"partition_count,omitempty"` + + // The identifiers for partitions created for Event Hubs. + // +listType=set + PartitionIds []*string `json:"partitionIds,omitempty" tf:"partition_ids,omitempty"` + + // The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the status of the Event Hub resource. Possible values are Active, Disabled and SendDisabled. Defaults to Active. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type EventHubParameters struct { + + // A capture_description block as defined below. + // +kubebuilder:validation:Optional + CaptureDescription *CaptureDescriptionParameters `json:"captureDescription,omitempty" tf:"capture_description,omitempty"` + + // Specifies the number of days to retain the events for this Event Hub. + // +kubebuilder:validation:Optional + MessageRetention *float64 `json:"messageRetention,omitempty" tf:"message_retention,omitempty"` + + // Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + // +kubebuilder:validation:Optional + NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate namespaceName. + // +kubebuilder:validation:Optional + NamespaceNameRef *v1.Reference `json:"namespaceNameRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate namespaceName. + // +kubebuilder:validation:Optional + NamespaceNameSelector *v1.Selector `json:"namespaceNameSelector,omitempty" tf:"-"` + + // Specifies the current number of shards on the Event Hub. + // +kubebuilder:validation:Optional + PartitionCount *float64 `json:"partitionCount,omitempty" tf:"partition_count,omitempty"` + + // The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the status of the Event Hub resource. Possible values are Active, Disabled and SendDisabled. Defaults to Active. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +// EventHubSpec defines the desired state of EventHub +type EventHubSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EventHubParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EventHubInitParameters `json:"initProvider,omitempty"` +} + +// EventHubStatus defines the observed state of EventHub. +type EventHubStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EventHubObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EventHub is the Schema for the EventHubs API. Manages a Event Hubs as a nested resource within an Event Hubs namespace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type EventHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.messageRetention) || (has(self.initProvider) && has(self.initProvider.messageRetention))",message="spec.forProvider.messageRetention is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.partitionCount) || (has(self.initProvider) && has(self.initProvider.partitionCount))",message="spec.forProvider.partitionCount is a required parameter" + Spec EventHubSpec `json:"spec"` + Status EventHubStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EventHubList contains a list of EventHubs +type EventHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EventHub `json:"items"` +} + +// Repository type metadata. +var ( + EventHub_Kind = "EventHub" + EventHub_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EventHub_Kind}.String() + EventHub_KindAPIVersion = EventHub_Kind + "." + CRDGroupVersion.String() + EventHub_GroupVersionKind = CRDGroupVersion.WithKind(EventHub_Kind) +) + +func init() { + SchemeBuilder.Register(&EventHub{}, &EventHubList{}) +} diff --git a/apis/eventhub/v1beta2/zz_eventhubnamespace_terraformed.go b/apis/eventhub/v1beta2/zz_eventhubnamespace_terraformed.go new file mode 100755 index 000000000..809acafb4 --- /dev/null +++ b/apis/eventhub/v1beta2/zz_eventhubnamespace_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this EventHubNamespace +func (mg *EventHubNamespace) GetTerraformResourceType() string { + return "azurerm_eventhub_namespace" +} + +// GetConnectionDetailsMapping for this EventHubNamespace +func (tr *EventHubNamespace) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"default_primary_connection_string": "status.atProvider.defaultPrimaryConnectionString", "default_primary_connection_string_alias": "status.atProvider.defaultPrimaryConnectionStringAlias", "default_primary_key": "status.atProvider.defaultPrimaryKey", "default_secondary_connection_string": "status.atProvider.defaultSecondaryConnectionString", "default_secondary_connection_string_alias": "status.atProvider.defaultSecondaryConnectionStringAlias", "default_secondary_key": "status.atProvider.defaultSecondaryKey"} +} + +// GetObservation of this EventHubNamespace +func (tr *EventHubNamespace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this EventHubNamespace +func (tr *EventHubNamespace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this EventHubNamespace +func (tr *EventHubNamespace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this EventHubNamespace +func (tr *EventHubNamespace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this EventHubNamespace +func (tr *EventHubNamespace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this EventHubNamespace +func (tr *EventHubNamespace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this EventHubNamespace +func (tr *EventHubNamespace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this EventHubNamespace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *EventHubNamespace) LateInitialize(attrs []byte) (bool, error) { + params := &EventHubNamespaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("NetworkRulesets")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *EventHubNamespace) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/eventhub/v1beta2/zz_eventhubnamespace_types.go b/apis/eventhub/v1beta2/zz_eventhubnamespace_types.go new file mode 100755 index 000000000..e87646a32 --- /dev/null +++ b/apis/eventhub/v1beta2/zz_eventhubnamespace_types.go @@ -0,0 +1,412 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EventHubNamespaceInitParameters struct { + + // Is Auto Inflate enabled for the EventHub Namespace? + AutoInflateEnabled *bool `json:"autoInflateEnabled,omitempty" tf:"auto_inflate_enabled,omitempty"` + + // Specifies the Capacity / Throughput Units for a Standard SKU namespace. Default capacity has a maximum of 2, but can be increased in blocks of 2 on a committed purchase basis. Defaults to 1. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies the ID of the EventHub Dedicated Cluster where this Namespace should created. Changing this forces a new resource to be created. + DedicatedClusterID *string `json:"dedicatedClusterId,omitempty" tf:"dedicated_cluster_id,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Is SAS authentication enabled for the EventHub Namespace? Defaults to true. + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the maximum number of throughput units when Auto Inflate is Enabled. Valid values range from 1 - 20. + MaximumThroughputUnits *float64 `json:"maximumThroughputUnits,omitempty" tf:"maximum_throughput_units,omitempty"` + + // The minimum supported TLS version for this EventHub Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default minimum TLS version is 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // A network_rulesets block as defined below. + NetworkRulesets []NetworkRulesetsInitParameters `json:"networkRulesets,omitempty" tf:"network_rulesets,omitempty"` + + // Is public network access enabled for the EventHub Namespace? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Defines which tier to use. Valid options are Basic, Standard, and Premium. Please note that setting this field to Premium will force the creation of a new resource. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if the EventHub Namespace should be Zone Redundant (created across Availability Zones). Changing this forces a new resource to be created. Defaults to false. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type EventHubNamespaceObservation struct { + + // Is Auto Inflate enabled for the EventHub Namespace? + AutoInflateEnabled *bool `json:"autoInflateEnabled,omitempty" tf:"auto_inflate_enabled,omitempty"` + + // Specifies the Capacity / Throughput Units for a Standard SKU namespace. Default capacity has a maximum of 2, but can be increased in blocks of 2 on a committed purchase basis. Defaults to 1. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies the ID of the EventHub Dedicated Cluster where this Namespace should created. Changing this forces a new resource to be created. + DedicatedClusterID *string `json:"dedicatedClusterId,omitempty" tf:"dedicated_cluster_id,omitempty"` + + // The EventHub Namespace ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Is SAS authentication enabled for the EventHub Namespace? Defaults to true. + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the maximum number of throughput units when Auto Inflate is Enabled. Valid values range from 1 - 20. + MaximumThroughputUnits *float64 `json:"maximumThroughputUnits,omitempty" tf:"maximum_throughput_units,omitempty"` + + // The minimum supported TLS version for this EventHub Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default minimum TLS version is 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // A network_rulesets block as defined below. + NetworkRulesets []NetworkRulesetsObservation `json:"networkRulesets,omitempty" tf:"network_rulesets,omitempty"` + + // Is public network access enabled for the EventHub Namespace? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the namespace. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Defines which tier to use. Valid options are Basic, Standard, and Premium. Please note that setting this field to Premium will force the creation of a new resource. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if the EventHub Namespace should be Zone Redundant (created across Availability Zones). Changing this forces a new resource to be created. Defaults to false. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type EventHubNamespaceParameters struct { + + // Is Auto Inflate enabled for the EventHub Namespace? + // +kubebuilder:validation:Optional + AutoInflateEnabled *bool `json:"autoInflateEnabled,omitempty" tf:"auto_inflate_enabled,omitempty"` + + // Specifies the Capacity / Throughput Units for a Standard SKU namespace. Default capacity has a maximum of 2, but can be increased in blocks of 2 on a committed purchase basis. Defaults to 1. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies the ID of the EventHub Dedicated Cluster where this Namespace should created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DedicatedClusterID *string `json:"dedicatedClusterId,omitempty" tf:"dedicated_cluster_id,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Is SAS authentication enabled for the EventHub Namespace? Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the maximum number of throughput units when Auto Inflate is Enabled. Valid values range from 1 - 20. + // +kubebuilder:validation:Optional + MaximumThroughputUnits *float64 `json:"maximumThroughputUnits,omitempty" tf:"maximum_throughput_units,omitempty"` + + // The minimum supported TLS version for this EventHub Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default minimum TLS version is 1.2. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // A network_rulesets block as defined below. + // +kubebuilder:validation:Optional + NetworkRulesets []NetworkRulesetsParameters `json:"networkRulesets,omitempty" tf:"network_rulesets,omitempty"` + + // Is public network access enabled for the EventHub Namespace? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the namespace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Defines which tier to use. Valid options are Basic, Standard, and Premium. Please note that setting this field to Premium will force the creation of a new resource. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if the EventHub Namespace should be Zone Redundant (created across Availability Zones). Changing this forces a new resource to be created. Defaults to false. + // +kubebuilder:validation:Optional + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type IPRuleInitParameters struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action"` + + // The IP mask to match on. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask"` +} + +type IPRuleObservation struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The IP mask to match on. + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask,omitempty"` +} + +type IPRuleParameters struct { + + // The action to take when the rule is matched. Possible values are Allow. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The IP mask to match on. + // +kubebuilder:validation:Optional + IPMask *string `json:"ipMask,omitempty" tf:"ip_mask"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this EventHub namespace. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Hub Namespace. Possible values are SystemAssigned or UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this EventHub namespace. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Hub Namespace. Possible values are SystemAssigned or UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this EventHub namespace. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Event Hub Namespace. Possible values are SystemAssigned or UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type NetworkRulesetsInitParameters struct { + + // The default action to take when a rule is not matched. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action"` + + // One or more ip_rule blocks as defined below. + IPRule []IPRuleInitParameters `json:"ipRule,omitempty" tf:"ip_rule"` + + // Is public network access enabled for the EventHub Namespace? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled"` + + // Whether Trusted Microsoft Services are allowed to bypass firewall. + TrustedServiceAccessEnabled *bool `json:"trustedServiceAccessEnabled,omitempty" tf:"trusted_service_access_enabled"` + + // One or more virtual_network_rule blocks as defined below. + VirtualNetworkRule []VirtualNetworkRuleInitParameters `json:"virtualNetworkRule,omitempty" tf:"virtual_network_rule"` +} + +type NetworkRulesetsObservation struct { + + // The default action to take when a rule is not matched. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more ip_rule blocks as defined below. + IPRule []IPRuleObservation `json:"ipRule,omitempty" tf:"ip_rule,omitempty"` + + // Is public network access enabled for the EventHub Namespace? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Whether Trusted Microsoft Services are allowed to bypass firewall. + TrustedServiceAccessEnabled *bool `json:"trustedServiceAccessEnabled,omitempty" tf:"trusted_service_access_enabled,omitempty"` + + // One or more virtual_network_rule blocks as defined below. + VirtualNetworkRule []VirtualNetworkRuleObservation `json:"virtualNetworkRule,omitempty" tf:"virtual_network_rule,omitempty"` +} + +type NetworkRulesetsParameters struct { + + // The default action to take when a rule is not matched. Possible values are Allow and Deny. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action"` + + // One or more ip_rule blocks as defined below. + // +kubebuilder:validation:Optional + IPRule []IPRuleParameters `json:"ipRule,omitempty" tf:"ip_rule"` + + // Is public network access enabled for the EventHub Namespace? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled"` + + // Whether Trusted Microsoft Services are allowed to bypass firewall. + // +kubebuilder:validation:Optional + TrustedServiceAccessEnabled *bool `json:"trustedServiceAccessEnabled,omitempty" tf:"trusted_service_access_enabled"` + + // One or more virtual_network_rule blocks as defined below. + // +kubebuilder:validation:Optional + VirtualNetworkRule []VirtualNetworkRuleParameters `json:"virtualNetworkRule,omitempty" tf:"virtual_network_rule"` +} + +type VirtualNetworkRuleInitParameters struct { + + // Are missing virtual network service endpoints ignored? + IgnoreMissingVirtualNetworkServiceEndpoint *bool `json:"ignoreMissingVirtualNetworkServiceEndpoint,omitempty" tf:"ignore_missing_virtual_network_service_endpoint"` + + // The id of the subnet to match on. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type VirtualNetworkRuleObservation struct { + + // Are missing virtual network service endpoints ignored? + IgnoreMissingVirtualNetworkServiceEndpoint *bool `json:"ignoreMissingVirtualNetworkServiceEndpoint,omitempty" tf:"ignore_missing_virtual_network_service_endpoint,omitempty"` + + // The id of the subnet to match on. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type VirtualNetworkRuleParameters struct { + + // Are missing virtual network service endpoints ignored? + // +kubebuilder:validation:Optional + IgnoreMissingVirtualNetworkServiceEndpoint *bool `json:"ignoreMissingVirtualNetworkServiceEndpoint,omitempty" tf:"ignore_missing_virtual_network_service_endpoint"` + + // The id of the subnet to match on. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +// EventHubNamespaceSpec defines the desired state of EventHubNamespace +type EventHubNamespaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider EventHubNamespaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider EventHubNamespaceInitParameters `json:"initProvider,omitempty"` +} + +// EventHubNamespaceStatus defines the observed state of EventHubNamespace. +type EventHubNamespaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider EventHubNamespaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// EventHubNamespace is the Schema for the EventHubNamespaces API. Manages an EventHub Namespace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type EventHubNamespace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec EventHubNamespaceSpec `json:"spec"` + Status EventHubNamespaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EventHubNamespaceList contains a list of EventHubNamespaces +type EventHubNamespaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EventHubNamespace `json:"items"` +} + +// Repository type metadata. +var ( + EventHubNamespace_Kind = "EventHubNamespace" + EventHubNamespace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: EventHubNamespace_Kind}.String() + EventHubNamespace_KindAPIVersion = EventHubNamespace_Kind + "." + CRDGroupVersion.String() + EventHubNamespace_GroupVersionKind = CRDGroupVersion.WithKind(EventHubNamespace_Kind) +) + +func init() { + SchemeBuilder.Register(&EventHubNamespace{}, &EventHubNamespaceList{}) +} diff --git a/apis/eventhub/v1beta2/zz_generated.conversion_hubs.go b/apis/eventhub/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..18603f503 --- /dev/null +++ b/apis/eventhub/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *EventHub) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *EventHubNamespace) Hub() {} diff --git a/apis/eventhub/v1beta2/zz_generated.deepcopy.go b/apis/eventhub/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..cb95fbb42 --- /dev/null +++ b/apis/eventhub/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1312 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureDescriptionInitParameters) DeepCopyInto(out *CaptureDescriptionInitParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.SizeLimitInBytes != nil { + in, out := &in.SizeLimitInBytes, &out.SizeLimitInBytes + *out = new(float64) + **out = **in + } + if in.SkipEmptyArchives != nil { + in, out := &in.SkipEmptyArchives, &out.SkipEmptyArchives + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureDescriptionInitParameters. +func (in *CaptureDescriptionInitParameters) DeepCopy() *CaptureDescriptionInitParameters { + if in == nil { + return nil + } + out := new(CaptureDescriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureDescriptionObservation) DeepCopyInto(out *CaptureDescriptionObservation) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.SizeLimitInBytes != nil { + in, out := &in.SizeLimitInBytes, &out.SizeLimitInBytes + *out = new(float64) + **out = **in + } + if in.SkipEmptyArchives != nil { + in, out := &in.SkipEmptyArchives, &out.SkipEmptyArchives + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureDescriptionObservation. +func (in *CaptureDescriptionObservation) DeepCopy() *CaptureDescriptionObservation { + if in == nil { + return nil + } + out := new(CaptureDescriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureDescriptionParameters) DeepCopyInto(out *CaptureDescriptionParameters) { + *out = *in + if in.Destination != nil { + in, out := &in.Destination, &out.Destination + *out = new(DestinationParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.SizeLimitInBytes != nil { + in, out := &in.SizeLimitInBytes, &out.SizeLimitInBytes + *out = new(float64) + **out = **in + } + if in.SkipEmptyArchives != nil { + in, out := &in.SkipEmptyArchives, &out.SkipEmptyArchives + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureDescriptionParameters. +func (in *CaptureDescriptionParameters) DeepCopy() *CaptureDescriptionParameters { + if in == nil { + return nil + } + out := new(CaptureDescriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationInitParameters) DeepCopyInto(out *DestinationInitParameters) { + *out = *in + if in.ArchiveNameFormat != nil { + in, out := &in.ArchiveNameFormat, &out.ArchiveNameFormat + *out = new(string) + **out = **in + } + if in.BlobContainerName != nil { + in, out := &in.BlobContainerName, &out.BlobContainerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationInitParameters. +func (in *DestinationInitParameters) DeepCopy() *DestinationInitParameters { + if in == nil { + return nil + } + out := new(DestinationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationObservation) DeepCopyInto(out *DestinationObservation) { + *out = *in + if in.ArchiveNameFormat != nil { + in, out := &in.ArchiveNameFormat, &out.ArchiveNameFormat + *out = new(string) + **out = **in + } + if in.BlobContainerName != nil { + in, out := &in.BlobContainerName, &out.BlobContainerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationObservation. +func (in *DestinationObservation) DeepCopy() *DestinationObservation { + if in == nil { + return nil + } + out := new(DestinationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationParameters) DeepCopyInto(out *DestinationParameters) { + *out = *in + if in.ArchiveNameFormat != nil { + in, out := &in.ArchiveNameFormat, &out.ArchiveNameFormat + *out = new(string) + **out = **in + } + if in.BlobContainerName != nil { + in, out := &in.BlobContainerName, &out.BlobContainerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationParameters. +func (in *DestinationParameters) DeepCopy() *DestinationParameters { + if in == nil { + return nil + } + out := new(DestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHub) DeepCopyInto(out *EventHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHub. +func (in *EventHub) DeepCopy() *EventHub { + if in == nil { + return nil + } + out := new(EventHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubInitParameters) DeepCopyInto(out *EventHubInitParameters) { + *out = *in + if in.CaptureDescription != nil { + in, out := &in.CaptureDescription, &out.CaptureDescription + *out = new(CaptureDescriptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MessageRetention != nil { + in, out := &in.MessageRetention, &out.MessageRetention + *out = new(float64) + **out = **in + } + if in.PartitionCount != nil { + in, out := &in.PartitionCount, &out.PartitionCount + *out = new(float64) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubInitParameters. +func (in *EventHubInitParameters) DeepCopy() *EventHubInitParameters { + if in == nil { + return nil + } + out := new(EventHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubList) DeepCopyInto(out *EventHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EventHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubList. +func (in *EventHubList) DeepCopy() *EventHubList { + if in == nil { + return nil + } + out := new(EventHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubNamespace) DeepCopyInto(out *EventHubNamespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubNamespace. +func (in *EventHubNamespace) DeepCopy() *EventHubNamespace { + if in == nil { + return nil + } + out := new(EventHubNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventHubNamespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubNamespaceInitParameters) DeepCopyInto(out *EventHubNamespaceInitParameters) { + *out = *in + if in.AutoInflateEnabled != nil { + in, out := &in.AutoInflateEnabled, &out.AutoInflateEnabled + *out = new(bool) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.DedicatedClusterID != nil { + in, out := &in.DedicatedClusterID, &out.DedicatedClusterID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaximumThroughputUnits != nil { + in, out := &in.MaximumThroughputUnits, &out.MaximumThroughputUnits + *out = new(float64) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRulesets != nil { + in, out := &in.NetworkRulesets, &out.NetworkRulesets + *out = make([]NetworkRulesetsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubNamespaceInitParameters. +func (in *EventHubNamespaceInitParameters) DeepCopy() *EventHubNamespaceInitParameters { + if in == nil { + return nil + } + out := new(EventHubNamespaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubNamespaceList) DeepCopyInto(out *EventHubNamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EventHubNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubNamespaceList. +func (in *EventHubNamespaceList) DeepCopy() *EventHubNamespaceList { + if in == nil { + return nil + } + out := new(EventHubNamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventHubNamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubNamespaceObservation) DeepCopyInto(out *EventHubNamespaceObservation) { + *out = *in + if in.AutoInflateEnabled != nil { + in, out := &in.AutoInflateEnabled, &out.AutoInflateEnabled + *out = new(bool) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.DedicatedClusterID != nil { + in, out := &in.DedicatedClusterID, &out.DedicatedClusterID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaximumThroughputUnits != nil { + in, out := &in.MaximumThroughputUnits, &out.MaximumThroughputUnits + *out = new(float64) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRulesets != nil { + in, out := &in.NetworkRulesets, &out.NetworkRulesets + *out = make([]NetworkRulesetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubNamespaceObservation. +func (in *EventHubNamespaceObservation) DeepCopy() *EventHubNamespaceObservation { + if in == nil { + return nil + } + out := new(EventHubNamespaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubNamespaceParameters) DeepCopyInto(out *EventHubNamespaceParameters) { + *out = *in + if in.AutoInflateEnabled != nil { + in, out := &in.AutoInflateEnabled, &out.AutoInflateEnabled + *out = new(bool) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.DedicatedClusterID != nil { + in, out := &in.DedicatedClusterID, &out.DedicatedClusterID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaximumThroughputUnits != nil { + in, out := &in.MaximumThroughputUnits, &out.MaximumThroughputUnits + *out = new(float64) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRulesets != nil { + in, out := &in.NetworkRulesets, &out.NetworkRulesets + *out = make([]NetworkRulesetsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubNamespaceParameters. +func (in *EventHubNamespaceParameters) DeepCopy() *EventHubNamespaceParameters { + if in == nil { + return nil + } + out := new(EventHubNamespaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubNamespaceSpec) DeepCopyInto(out *EventHubNamespaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubNamespaceSpec. +func (in *EventHubNamespaceSpec) DeepCopy() *EventHubNamespaceSpec { + if in == nil { + return nil + } + out := new(EventHubNamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubNamespaceStatus) DeepCopyInto(out *EventHubNamespaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubNamespaceStatus. +func (in *EventHubNamespaceStatus) DeepCopy() *EventHubNamespaceStatus { + if in == nil { + return nil + } + out := new(EventHubNamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubObservation) DeepCopyInto(out *EventHubObservation) { + *out = *in + if in.CaptureDescription != nil { + in, out := &in.CaptureDescription, &out.CaptureDescription + *out = new(CaptureDescriptionObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MessageRetention != nil { + in, out := &in.MessageRetention, &out.MessageRetention + *out = new(float64) + **out = **in + } + if in.NamespaceName != nil { + in, out := &in.NamespaceName, &out.NamespaceName + *out = new(string) + **out = **in + } + if in.PartitionCount != nil { + in, out := &in.PartitionCount, &out.PartitionCount + *out = new(float64) + **out = **in + } + if in.PartitionIds != nil { + in, out := &in.PartitionIds, &out.PartitionIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubObservation. +func (in *EventHubObservation) DeepCopy() *EventHubObservation { + if in == nil { + return nil + } + out := new(EventHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubParameters) DeepCopyInto(out *EventHubParameters) { + *out = *in + if in.CaptureDescription != nil { + in, out := &in.CaptureDescription, &out.CaptureDescription + *out = new(CaptureDescriptionParameters) + (*in).DeepCopyInto(*out) + } + if in.MessageRetention != nil { + in, out := &in.MessageRetention, &out.MessageRetention + *out = new(float64) + **out = **in + } + if in.NamespaceName != nil { + in, out := &in.NamespaceName, &out.NamespaceName + *out = new(string) + **out = **in + } + if in.NamespaceNameRef != nil { + in, out := &in.NamespaceNameRef, &out.NamespaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NamespaceNameSelector != nil { + in, out := &in.NamespaceNameSelector, &out.NamespaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PartitionCount != nil { + in, out := &in.PartitionCount, &out.PartitionCount + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubParameters. +func (in *EventHubParameters) DeepCopy() *EventHubParameters { + if in == nil { + return nil + } + out := new(EventHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubSpec) DeepCopyInto(out *EventHubSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubSpec. +func (in *EventHubSpec) DeepCopy() *EventHubSpec { + if in == nil { + return nil + } + out := new(EventHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubStatus) DeepCopyInto(out *EventHubStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubStatus. +func (in *EventHubStatus) DeepCopy() *EventHubStatus { + if in == nil { + return nil + } + out := new(EventHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleInitParameters) DeepCopyInto(out *IPRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleInitParameters. +func (in *IPRuleInitParameters) DeepCopy() *IPRuleInitParameters { + if in == nil { + return nil + } + out := new(IPRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleObservation) DeepCopyInto(out *IPRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleObservation. +func (in *IPRuleObservation) DeepCopy() *IPRuleObservation { + if in == nil { + return nil + } + out := new(IPRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRuleParameters) DeepCopyInto(out *IPRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.IPMask != nil { + in, out := &in.IPMask, &out.IPMask + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRuleParameters. +func (in *IPRuleParameters) DeepCopy() *IPRuleParameters { + if in == nil { + return nil + } + out := new(IPRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesetsInitParameters) DeepCopyInto(out *NetworkRulesetsInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.TrustedServiceAccessEnabled != nil { + in, out := &in.TrustedServiceAccessEnabled, &out.TrustedServiceAccessEnabled + *out = new(bool) + **out = **in + } + if in.VirtualNetworkRule != nil { + in, out := &in.VirtualNetworkRule, &out.VirtualNetworkRule + *out = make([]VirtualNetworkRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesetsInitParameters. +func (in *NetworkRulesetsInitParameters) DeepCopy() *NetworkRulesetsInitParameters { + if in == nil { + return nil + } + out := new(NetworkRulesetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesetsObservation) DeepCopyInto(out *NetworkRulesetsObservation) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.TrustedServiceAccessEnabled != nil { + in, out := &in.TrustedServiceAccessEnabled, &out.TrustedServiceAccessEnabled + *out = new(bool) + **out = **in + } + if in.VirtualNetworkRule != nil { + in, out := &in.VirtualNetworkRule, &out.VirtualNetworkRule + *out = make([]VirtualNetworkRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesetsObservation. +func (in *NetworkRulesetsObservation) DeepCopy() *NetworkRulesetsObservation { + if in == nil { + return nil + } + out := new(NetworkRulesetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesetsParameters) DeepCopyInto(out *NetworkRulesetsParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRule != nil { + in, out := &in.IPRule, &out.IPRule + *out = make([]IPRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.TrustedServiceAccessEnabled != nil { + in, out := &in.TrustedServiceAccessEnabled, &out.TrustedServiceAccessEnabled + *out = new(bool) + **out = **in + } + if in.VirtualNetworkRule != nil { + in, out := &in.VirtualNetworkRule, &out.VirtualNetworkRule + *out = make([]VirtualNetworkRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesetsParameters. +func (in *NetworkRulesetsParameters) DeepCopy() *NetworkRulesetsParameters { + if in == nil { + return nil + } + out := new(NetworkRulesetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRuleInitParameters) DeepCopyInto(out *VirtualNetworkRuleInitParameters) { + *out = *in + if in.IgnoreMissingVirtualNetworkServiceEndpoint != nil { + in, out := &in.IgnoreMissingVirtualNetworkServiceEndpoint, &out.IgnoreMissingVirtualNetworkServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRuleInitParameters. +func (in *VirtualNetworkRuleInitParameters) DeepCopy() *VirtualNetworkRuleInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRuleObservation) DeepCopyInto(out *VirtualNetworkRuleObservation) { + *out = *in + if in.IgnoreMissingVirtualNetworkServiceEndpoint != nil { + in, out := &in.IgnoreMissingVirtualNetworkServiceEndpoint, &out.IgnoreMissingVirtualNetworkServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRuleObservation. +func (in *VirtualNetworkRuleObservation) DeepCopy() *VirtualNetworkRuleObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkRuleParameters) DeepCopyInto(out *VirtualNetworkRuleParameters) { + *out = *in + if in.IgnoreMissingVirtualNetworkServiceEndpoint != nil { + in, out := &in.IgnoreMissingVirtualNetworkServiceEndpoint, &out.IgnoreMissingVirtualNetworkServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkRuleParameters. +func (in *VirtualNetworkRuleParameters) DeepCopy() *VirtualNetworkRuleParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkRuleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/eventhub/v1beta2/zz_generated.managed.go b/apis/eventhub/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..e94f2d26d --- /dev/null +++ b/apis/eventhub/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this EventHub. +func (mg *EventHub) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EventHub. +func (mg *EventHub) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EventHub. +func (mg *EventHub) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EventHub. +func (mg *EventHub) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EventHub. +func (mg *EventHub) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EventHub. +func (mg *EventHub) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EventHub. +func (mg *EventHub) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EventHub. +func (mg *EventHub) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EventHub. +func (mg *EventHub) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EventHub. +func (mg *EventHub) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EventHub. +func (mg *EventHub) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EventHub. +func (mg *EventHub) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this EventHubNamespace. +func (mg *EventHubNamespace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this EventHubNamespace. +func (mg *EventHubNamespace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this EventHubNamespace. +func (mg *EventHubNamespace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this EventHubNamespace. +func (mg *EventHubNamespace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this EventHubNamespace. +func (mg *EventHubNamespace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this EventHubNamespace. +func (mg *EventHubNamespace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this EventHubNamespace. +func (mg *EventHubNamespace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this EventHubNamespace. +func (mg *EventHubNamespace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this EventHubNamespace. +func (mg *EventHubNamespace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this EventHubNamespace. +func (mg *EventHubNamespace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this EventHubNamespace. +func (mg *EventHubNamespace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this EventHubNamespace. +func (mg *EventHubNamespace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/eventhub/v1beta2/zz_generated.managedlist.go b/apis/eventhub/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..02cc95674 --- /dev/null +++ b/apis/eventhub/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this EventHubList. +func (l *EventHubList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this EventHubNamespaceList. +func (l *EventHubNamespaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/eventhub/v1beta2/zz_generated.resolvers.go b/apis/eventhub/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..c63c279a3 --- /dev/null +++ b/apis/eventhub/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,147 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this EventHub. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *EventHub) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NamespaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NamespaceNameRef, + Selector: mg.Spec.ForProvider.NamespaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NamespaceName") + } + mg.Spec.ForProvider.NamespaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NamespaceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this EventHubNamespace. +func (mg *EventHubNamespace) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkRulesets); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.NetworkRulesets[i3].VirtualNetworkRule); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetID") + } + mg.Spec.ForProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkRulesets); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.NetworkRulesets[i3].VirtualNetworkRule); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetID") + } + mg.Spec.InitProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkRulesets[i3].VirtualNetworkRule[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} diff --git a/apis/eventhub/v1beta2/zz_groupversion_info.go b/apis/eventhub/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..d590f18c8 --- /dev/null +++ b/apis/eventhub/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=eventhub.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "eventhub.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/fluidrelay/v1beta1/zz_generated.conversion_spokes.go b/apis/fluidrelay/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..a8b6d9edd --- /dev/null +++ b/apis/fluidrelay/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Server to the hub type. +func (tr *Server) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Server type. +func (tr *Server) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/fluidrelay/v1beta1/zz_generated.conversion_hubs.go b/apis/fluidrelay/v1beta2/zz_generated.conversion_hubs.go similarity index 93% rename from apis/fluidrelay/v1beta1/zz_generated.conversion_hubs.go rename to apis/fluidrelay/v1beta2/zz_generated.conversion_hubs.go index bf1e481c2..a9dbb58e9 100755 --- a/apis/fluidrelay/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/fluidrelay/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *Server) Hub() {} diff --git a/apis/fluidrelay/v1beta2/zz_generated.deepcopy.go b/apis/fluidrelay/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..b24533549 --- /dev/null +++ b/apis/fluidrelay/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,442 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Server) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerInitParameters) DeepCopyInto(out *ServerInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageSku != nil { + in, out := &in.StorageSku, &out.StorageSku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerInitParameters. +func (in *ServerInitParameters) DeepCopy() *ServerInitParameters { + if in == nil { + return nil + } + out := new(ServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerList) DeepCopyInto(out *ServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerList. +func (in *ServerList) DeepCopy() *ServerList { + if in == nil { + return nil + } + out := new(ServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerObservation) DeepCopyInto(out *ServerObservation) { + *out = *in + if in.FrsTenantID != nil { + in, out := &in.FrsTenantID, &out.FrsTenantID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OrdererEndpoints != nil { + in, out := &in.OrdererEndpoints, &out.OrdererEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageEndpoints != nil { + in, out := &in.StorageEndpoints, &out.StorageEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StorageSku != nil { + in, out := &in.StorageSku, &out.StorageSku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerObservation. +func (in *ServerObservation) DeepCopy() *ServerObservation { + if in == nil { + return nil + } + out := new(ServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerParameters) DeepCopyInto(out *ServerParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageSku != nil { + in, out := &in.StorageSku, &out.StorageSku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerParameters. +func (in *ServerParameters) DeepCopy() *ServerParameters { + if in == nil { + return nil + } + out := new(ServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerSpec) DeepCopyInto(out *ServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec. +func (in *ServerSpec) DeepCopy() *ServerSpec { + if in == nil { + return nil + } + out := new(ServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatus) DeepCopyInto(out *ServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatus. +func (in *ServerStatus) DeepCopy() *ServerStatus { + if in == nil { + return nil + } + out := new(ServerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/fluidrelay/v1beta2/zz_generated.managed.go b/apis/fluidrelay/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..8102dbc4a --- /dev/null +++ b/apis/fluidrelay/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Server. +func (mg *Server) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Server. +func (mg *Server) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Server. +func (mg *Server) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Server. +func (mg *Server) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Server. +func (mg *Server) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Server. +func (mg *Server) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Server. +func (mg *Server) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Server. +func (mg *Server) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Server. +func (mg *Server) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Server. +func (mg *Server) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Server. +func (mg *Server) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Server. +func (mg *Server) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/fluidrelay/v1beta2/zz_generated.managedlist.go b/apis/fluidrelay/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..ae7fceedb --- /dev/null +++ b/apis/fluidrelay/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ServerList. +func (l *ServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/fluidrelay/v1beta2/zz_generated.resolvers.go b/apis/fluidrelay/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..2526b62c4 --- /dev/null +++ b/apis/fluidrelay/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Server. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Server) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/fluidrelay/v1beta2/zz_groupversion_info.go b/apis/fluidrelay/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..c924476d7 --- /dev/null +++ b/apis/fluidrelay/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=fluidrelay.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "fluidrelay.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/fluidrelay/v1beta2/zz_server_terraformed.go b/apis/fluidrelay/v1beta2/zz_server_terraformed.go new file mode 100755 index 000000000..96555f822 --- /dev/null +++ b/apis/fluidrelay/v1beta2/zz_server_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Server +func (mg *Server) GetTerraformResourceType() string { + return "azurerm_fluid_relay_server" +} + +// GetConnectionDetailsMapping for this Server +func (tr *Server) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_key": "status.atProvider.primaryKey", "secondary_key": "status.atProvider.secondaryKey"} +} + +// GetObservation of this Server +func (tr *Server) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Server +func (tr *Server) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Server +func (tr *Server) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Server +func (tr *Server) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Server +func (tr *Server) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Server +func (tr *Server) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Server +func (tr *Server) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Server using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Server) LateInitialize(attrs []byte) (bool, error) { + params := &ServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Server) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/fluidrelay/v1beta2/zz_server_types.go b/apis/fluidrelay/v1beta2/zz_server_types.go new file mode 100755 index 000000000..9aa058ca0 --- /dev/null +++ b/apis/fluidrelay/v1beta2/zz_server_types.go @@ -0,0 +1,219 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Fluid Relay Service. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Fluid Relay Service. Possible values are SystemAssigned,UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Fluid Relay Service. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Identity of this Fluid Relay Server. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Identity of this Fluid Relay Server. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Fluid Relay Service. Possible values are SystemAssigned,UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Fluid Relay Service. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Fluid Relay Service. Possible values are SystemAssigned,UserAssigned and SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ServerInitParameters struct { + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Fluid Relay Server should exist. Changing this forces a new Fluid Relay Server to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for this Fluid Relay Server. Changing this forces a new Fluid Relay Server to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the Resource Group where the Fluid Relay Server should exist. Changing this forces a new Fluid Relay Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Sku of the storage associated with the resource, Possible values are standard and basic. Changing this forces a new Fluid Relay Server to be created. + StorageSku *string `json:"storageSku,omitempty" tf:"storage_sku,omitempty"` + + // A mapping of tags which should be assigned to the Fluid Relay Server. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ServerObservation struct { + + // The Fluid tenantId for this server. + FrsTenantID *string `json:"frsTenantId,omitempty" tf:"frs_tenant_id,omitempty"` + + // The ID of the Fluid Relay Server. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Fluid Relay Server should exist. Changing this forces a new Fluid Relay Server to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for this Fluid Relay Server. Changing this forces a new Fluid Relay Server to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An array of the Fluid Relay Orderer endpoints. This will be deprecated in future version of fluid relay server and will always be empty, more details. + OrdererEndpoints []*string `json:"ordererEndpoints,omitempty" tf:"orderer_endpoints,omitempty"` + + // The name of the Resource Group where the Fluid Relay Server should exist. Changing this forces a new Fluid Relay Server to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // An array of service endpoints for this Fluid Relay Server. + ServiceEndpoints []*string `json:"serviceEndpoints,omitempty" tf:"service_endpoints,omitempty"` + + // An array of storage endpoints for this Fluid Relay Server. This will be deprecated in future version of fluid relay server and will always be empty, more details. + StorageEndpoints []*string `json:"storageEndpoints,omitempty" tf:"storage_endpoints,omitempty"` + + // Sku of the storage associated with the resource, Possible values are standard and basic. Changing this forces a new Fluid Relay Server to be created. + StorageSku *string `json:"storageSku,omitempty" tf:"storage_sku,omitempty"` + + // A mapping of tags which should be assigned to the Fluid Relay Server. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ServerParameters struct { + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Fluid Relay Server should exist. Changing this forces a new Fluid Relay Server to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for this Fluid Relay Server. Changing this forces a new Fluid Relay Server to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the Resource Group where the Fluid Relay Server should exist. Changing this forces a new Fluid Relay Server to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Sku of the storage associated with the resource, Possible values are standard and basic. Changing this forces a new Fluid Relay Server to be created. + // +kubebuilder:validation:Optional + StorageSku *string `json:"storageSku,omitempty" tf:"storage_sku,omitempty"` + + // A mapping of tags which should be assigned to the Fluid Relay Server. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// ServerSpec defines the desired state of Server +type ServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServerInitParameters `json:"initProvider,omitempty"` +} + +// ServerStatus defines the observed state of Server. +type ServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Server is the Schema for the Servers API. Manages a Fluid Relay Server. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Server struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ServerSpec `json:"spec"` + Status ServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServerList contains a list of Servers +type ServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Server `json:"items"` +} + +// Repository type metadata. +var ( + Server_Kind = "Server" + Server_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Server_Kind}.String() + Server_KindAPIVersion = Server_Kind + "." + CRDGroupVersion.String() + Server_GroupVersionKind = CRDGroupVersion.WithKind(Server_Kind) +) + +func init() { + SchemeBuilder.Register(&Server{}, &ServerList{}) +} diff --git a/apis/generate.go b/apis/generate.go index 301ca4a00..55e5952f6 100644 --- a/apis/generate.go +++ b/apis/generate.go @@ -11,6 +11,7 @@ //go:generate rm -rf ../package/crds // Remove generated files +//go:generate bash -c "find . \\( -iname 'zz_generated.conversion_hubs.go' -o -iname 'zz_generated.conversion_spokes.go' \\) -delete" //go:generate bash -c "find . -type d -empty -delete" //go:generate bash -c "find ../internal/controller -iname 'zz_*' -delete" //go:generate bash -c "find ../internal/controller -type d -empty -delete" diff --git a/apis/guestconfiguration/v1beta1/zz_generated.conversion_spokes.go b/apis/guestconfiguration/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..800385aed --- /dev/null +++ b/apis/guestconfiguration/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this PolicyVirtualMachineConfigurationAssignment to the hub type. +func (tr *PolicyVirtualMachineConfigurationAssignment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the PolicyVirtualMachineConfigurationAssignment type. +func (tr *PolicyVirtualMachineConfigurationAssignment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/guestconfiguration/v1beta1/zz_generated.conversion_hubs.go b/apis/guestconfiguration/v1beta2/zz_generated.conversion_hubs.go similarity index 94% rename from apis/guestconfiguration/v1beta1/zz_generated.conversion_hubs.go rename to apis/guestconfiguration/v1beta2/zz_generated.conversion_hubs.go index c952d61e0..57e912ba1 100755 --- a/apis/guestconfiguration/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/guestconfiguration/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *PolicyVirtualMachineConfigurationAssignment) Hub() {} diff --git a/apis/guestconfiguration/v1beta2/zz_generated.deepcopy.go b/apis/guestconfiguration/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..09bcf749e --- /dev/null +++ b/apis/guestconfiguration/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,409 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationInitParameters) DeepCopyInto(out *ConfigurationInitParameters) { + *out = *in + if in.AssignmentType != nil { + in, out := &in.AssignmentType, &out.AssignmentType + *out = new(string) + **out = **in + } + if in.ContentHash != nil { + in, out := &in.ContentHash, &out.ContentHash + *out = new(string) + **out = **in + } + if in.ContentURI != nil { + in, out := &in.ContentURI, &out.ContentURI + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationInitParameters. +func (in *ConfigurationInitParameters) DeepCopy() *ConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationObservation) DeepCopyInto(out *ConfigurationObservation) { + *out = *in + if in.AssignmentType != nil { + in, out := &in.AssignmentType, &out.AssignmentType + *out = new(string) + **out = **in + } + if in.ContentHash != nil { + in, out := &in.ContentHash, &out.ContentHash + *out = new(string) + **out = **in + } + if in.ContentURI != nil { + in, out := &in.ContentURI, &out.ContentURI + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationObservation. +func (in *ConfigurationObservation) DeepCopy() *ConfigurationObservation { + if in == nil { + return nil + } + out := new(ConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationParameters) DeepCopyInto(out *ConfigurationParameters) { + *out = *in + if in.AssignmentType != nil { + in, out := &in.AssignmentType, &out.AssignmentType + *out = new(string) + **out = **in + } + if in.ContentHash != nil { + in, out := &in.ContentHash, &out.ContentHash + *out = new(string) + **out = **in + } + if in.ContentURI != nil { + in, out := &in.ContentURI, &out.ContentURI + *out = new(string) + **out = **in + } + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = make([]ParameterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationParameters. +func (in *ConfigurationParameters) DeepCopy() *ConfigurationParameters { + if in == nil { + return nil + } + out := new(ConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterInitParameters) DeepCopyInto(out *ParameterInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterInitParameters. +func (in *ParameterInitParameters) DeepCopy() *ParameterInitParameters { + if in == nil { + return nil + } + out := new(ParameterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterObservation) DeepCopyInto(out *ParameterObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterObservation. +func (in *ParameterObservation) DeepCopy() *ParameterObservation { + if in == nil { + return nil + } + out := new(ParameterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParameterParameters) DeepCopyInto(out *ParameterParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterParameters. +func (in *ParameterParameters) DeepCopy() *ParameterParameters { + if in == nil { + return nil + } + out := new(ParameterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVirtualMachineConfigurationAssignment) DeepCopyInto(out *PolicyVirtualMachineConfigurationAssignment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVirtualMachineConfigurationAssignment. +func (in *PolicyVirtualMachineConfigurationAssignment) DeepCopy() *PolicyVirtualMachineConfigurationAssignment { + if in == nil { + return nil + } + out := new(PolicyVirtualMachineConfigurationAssignment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyVirtualMachineConfigurationAssignment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVirtualMachineConfigurationAssignmentInitParameters) DeepCopyInto(out *PolicyVirtualMachineConfigurationAssignmentInitParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVirtualMachineConfigurationAssignmentInitParameters. +func (in *PolicyVirtualMachineConfigurationAssignmentInitParameters) DeepCopy() *PolicyVirtualMachineConfigurationAssignmentInitParameters { + if in == nil { + return nil + } + out := new(PolicyVirtualMachineConfigurationAssignmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVirtualMachineConfigurationAssignmentList) DeepCopyInto(out *PolicyVirtualMachineConfigurationAssignmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PolicyVirtualMachineConfigurationAssignment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVirtualMachineConfigurationAssignmentList. +func (in *PolicyVirtualMachineConfigurationAssignmentList) DeepCopy() *PolicyVirtualMachineConfigurationAssignmentList { + if in == nil { + return nil + } + out := new(PolicyVirtualMachineConfigurationAssignmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyVirtualMachineConfigurationAssignmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVirtualMachineConfigurationAssignmentObservation) DeepCopyInto(out *PolicyVirtualMachineConfigurationAssignmentObservation) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVirtualMachineConfigurationAssignmentObservation. +func (in *PolicyVirtualMachineConfigurationAssignmentObservation) DeepCopy() *PolicyVirtualMachineConfigurationAssignmentObservation { + if in == nil { + return nil + } + out := new(PolicyVirtualMachineConfigurationAssignmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVirtualMachineConfigurationAssignmentParameters) DeepCopyInto(out *PolicyVirtualMachineConfigurationAssignmentParameters) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(ConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.VirtualMachineID != nil { + in, out := &in.VirtualMachineID, &out.VirtualMachineID + *out = new(string) + **out = **in + } + if in.VirtualMachineIDRef != nil { + in, out := &in.VirtualMachineIDRef, &out.VirtualMachineIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualMachineIDSelector != nil { + in, out := &in.VirtualMachineIDSelector, &out.VirtualMachineIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVirtualMachineConfigurationAssignmentParameters. +func (in *PolicyVirtualMachineConfigurationAssignmentParameters) DeepCopy() *PolicyVirtualMachineConfigurationAssignmentParameters { + if in == nil { + return nil + } + out := new(PolicyVirtualMachineConfigurationAssignmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVirtualMachineConfigurationAssignmentSpec) DeepCopyInto(out *PolicyVirtualMachineConfigurationAssignmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVirtualMachineConfigurationAssignmentSpec. +func (in *PolicyVirtualMachineConfigurationAssignmentSpec) DeepCopy() *PolicyVirtualMachineConfigurationAssignmentSpec { + if in == nil { + return nil + } + out := new(PolicyVirtualMachineConfigurationAssignmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyVirtualMachineConfigurationAssignmentStatus) DeepCopyInto(out *PolicyVirtualMachineConfigurationAssignmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyVirtualMachineConfigurationAssignmentStatus. +func (in *PolicyVirtualMachineConfigurationAssignmentStatus) DeepCopy() *PolicyVirtualMachineConfigurationAssignmentStatus { + if in == nil { + return nil + } + out := new(PolicyVirtualMachineConfigurationAssignmentStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/guestconfiguration/v1beta2/zz_generated.managed.go b/apis/guestconfiguration/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..d915897ea --- /dev/null +++ b/apis/guestconfiguration/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PolicyVirtualMachineConfigurationAssignment. +func (mg *PolicyVirtualMachineConfigurationAssignment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/guestconfiguration/v1beta2/zz_generated.managedlist.go b/apis/guestconfiguration/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..4260cd39d --- /dev/null +++ b/apis/guestconfiguration/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this PolicyVirtualMachineConfigurationAssignmentList. +func (l *PolicyVirtualMachineConfigurationAssignmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/guestconfiguration/v1beta2/zz_generated.resolvers.go b/apis/guestconfiguration/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..f426c042a --- /dev/null +++ b/apis/guestconfiguration/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *PolicyVirtualMachineConfigurationAssignment) ResolveReferences( // ResolveReferences of this PolicyVirtualMachineConfigurationAssignment. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "WindowsVirtualMachine", "WindowsVirtualMachineList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualMachineID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualMachineIDRef, + Selector: mg.Spec.ForProvider.VirtualMachineIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualMachineID") + } + mg.Spec.ForProvider.VirtualMachineID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualMachineIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/guestconfiguration/v1beta2/zz_groupversion_info.go b/apis/guestconfiguration/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..761347299 --- /dev/null +++ b/apis/guestconfiguration/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=guestconfiguration.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "guestconfiguration.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/guestconfiguration/v1beta2/zz_policyvirtualmachineconfigurationassignment_terraformed.go b/apis/guestconfiguration/v1beta2/zz_policyvirtualmachineconfigurationassignment_terraformed.go new file mode 100755 index 000000000..ce73f7263 --- /dev/null +++ b/apis/guestconfiguration/v1beta2/zz_policyvirtualmachineconfigurationassignment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PolicyVirtualMachineConfigurationAssignment +func (mg *PolicyVirtualMachineConfigurationAssignment) GetTerraformResourceType() string { + return "azurerm_policy_virtual_machine_configuration_assignment" +} + +// GetConnectionDetailsMapping for this PolicyVirtualMachineConfigurationAssignment +func (tr *PolicyVirtualMachineConfigurationAssignment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PolicyVirtualMachineConfigurationAssignment +func (tr *PolicyVirtualMachineConfigurationAssignment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PolicyVirtualMachineConfigurationAssignment +func (tr *PolicyVirtualMachineConfigurationAssignment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PolicyVirtualMachineConfigurationAssignment +func (tr *PolicyVirtualMachineConfigurationAssignment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PolicyVirtualMachineConfigurationAssignment +func (tr *PolicyVirtualMachineConfigurationAssignment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PolicyVirtualMachineConfigurationAssignment +func (tr *PolicyVirtualMachineConfigurationAssignment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PolicyVirtualMachineConfigurationAssignment +func (tr *PolicyVirtualMachineConfigurationAssignment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PolicyVirtualMachineConfigurationAssignment +func (tr *PolicyVirtualMachineConfigurationAssignment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PolicyVirtualMachineConfigurationAssignment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PolicyVirtualMachineConfigurationAssignment) LateInitialize(attrs []byte) (bool, error) { + params := &PolicyVirtualMachineConfigurationAssignmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PolicyVirtualMachineConfigurationAssignment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/guestconfiguration/v1beta2/zz_policyvirtualmachineconfigurationassignment_types.go b/apis/guestconfiguration/v1beta2/zz_policyvirtualmachineconfigurationassignment_types.go new file mode 100755 index 000000000..a5c097396 --- /dev/null +++ b/apis/guestconfiguration/v1beta2/zz_policyvirtualmachineconfigurationassignment_types.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConfigurationInitParameters struct { + + // The assignment type for the Guest Configuration Assignment. Possible values are Audit, ApplyAndAutoCorrect, ApplyAndMonitor and DeployAndAutoCorrect. + AssignmentType *string `json:"assignmentType,omitempty" tf:"assignment_type,omitempty"` + + // The content hash for the Guest Configuration package. + ContentHash *string `json:"contentHash,omitempty" tf:"content_hash,omitempty"` + + // The content URI where the Guest Configuration package is stored. + ContentURI *string `json:"contentUri,omitempty" tf:"content_uri,omitempty"` + + // One or more parameter blocks as defined below which define what configuration parameters and values against. + Parameter []ParameterInitParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The version of the Guest Configuration that will be assigned in this Guest Configuration Assignment. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ConfigurationObservation struct { + + // The assignment type for the Guest Configuration Assignment. Possible values are Audit, ApplyAndAutoCorrect, ApplyAndMonitor and DeployAndAutoCorrect. + AssignmentType *string `json:"assignmentType,omitempty" tf:"assignment_type,omitempty"` + + // The content hash for the Guest Configuration package. + ContentHash *string `json:"contentHash,omitempty" tf:"content_hash,omitempty"` + + // The content URI where the Guest Configuration package is stored. + ContentURI *string `json:"contentUri,omitempty" tf:"content_uri,omitempty"` + + // One or more parameter blocks as defined below which define what configuration parameters and values against. + Parameter []ParameterObservation `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The version of the Guest Configuration that will be assigned in this Guest Configuration Assignment. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ConfigurationParameters struct { + + // The assignment type for the Guest Configuration Assignment. Possible values are Audit, ApplyAndAutoCorrect, ApplyAndMonitor and DeployAndAutoCorrect. + // +kubebuilder:validation:Optional + AssignmentType *string `json:"assignmentType,omitempty" tf:"assignment_type,omitempty"` + + // The content hash for the Guest Configuration package. + // +kubebuilder:validation:Optional + ContentHash *string `json:"contentHash,omitempty" tf:"content_hash,omitempty"` + + // The content URI where the Guest Configuration package is stored. + // +kubebuilder:validation:Optional + ContentURI *string `json:"contentUri,omitempty" tf:"content_uri,omitempty"` + + // One or more parameter blocks as defined below which define what configuration parameters and values against. + // +kubebuilder:validation:Optional + Parameter []ParameterParameters `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // The version of the Guest Configuration that will be assigned in this Guest Configuration Assignment. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ParameterInitParameters struct { + + // The name of the configuration parameter to check. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value to check the configuration parameter with. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ParameterObservation struct { + + // The name of the configuration parameter to check. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value to check the configuration parameter with. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ParameterParameters struct { + + // The name of the configuration parameter to check. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value to check the configuration parameter with. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type PolicyVirtualMachineConfigurationAssignmentInitParameters struct { + + // A configuration block as defined below. + Configuration *ConfigurationInitParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The Azure location where the Policy Virtual Machine Configuration Assignment should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` +} + +type PolicyVirtualMachineConfigurationAssignmentObservation struct { + + // A configuration block as defined below. + Configuration *ConfigurationObservation `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The ID of the Policy Virtual Machine Configuration Assignment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure location where the Policy Virtual Machine Configuration Assignment should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The resource ID of the Policy Virtual Machine which this Guest Configuration Assignment should apply to. Changing this forces a new resource to be created. + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` +} + +type PolicyVirtualMachineConfigurationAssignmentParameters struct { + + // A configuration block as defined below. + // +kubebuilder:validation:Optional + Configuration *ConfigurationParameters `json:"configuration,omitempty" tf:"configuration,omitempty"` + + // The Azure location where the Policy Virtual Machine Configuration Assignment should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The resource ID of the Policy Virtual Machine which this Guest Configuration Assignment should apply to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.WindowsVirtualMachine + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` + + // Reference to a WindowsVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDRef *v1.Reference `json:"virtualMachineIdRef,omitempty" tf:"-"` + + // Selector for a WindowsVirtualMachine in compute to populate virtualMachineId. + // +kubebuilder:validation:Optional + VirtualMachineIDSelector *v1.Selector `json:"virtualMachineIdSelector,omitempty" tf:"-"` +} + +// PolicyVirtualMachineConfigurationAssignmentSpec defines the desired state of PolicyVirtualMachineConfigurationAssignment +type PolicyVirtualMachineConfigurationAssignmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PolicyVirtualMachineConfigurationAssignmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PolicyVirtualMachineConfigurationAssignmentInitParameters `json:"initProvider,omitempty"` +} + +// PolicyVirtualMachineConfigurationAssignmentStatus defines the observed state of PolicyVirtualMachineConfigurationAssignment. +type PolicyVirtualMachineConfigurationAssignmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PolicyVirtualMachineConfigurationAssignmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// PolicyVirtualMachineConfigurationAssignment is the Schema for the PolicyVirtualMachineConfigurationAssignments API. Applies a Guest Configuration Policy to a Virtual Machine. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type PolicyVirtualMachineConfigurationAssignment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.configuration) || (has(self.initProvider) && has(self.initProvider.configuration))",message="spec.forProvider.configuration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec PolicyVirtualMachineConfigurationAssignmentSpec `json:"spec"` + Status PolicyVirtualMachineConfigurationAssignmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PolicyVirtualMachineConfigurationAssignmentList contains a list of PolicyVirtualMachineConfigurationAssignments +type PolicyVirtualMachineConfigurationAssignmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PolicyVirtualMachineConfigurationAssignment `json:"items"` +} + +// Repository type metadata. +var ( + PolicyVirtualMachineConfigurationAssignment_Kind = "PolicyVirtualMachineConfigurationAssignment" + PolicyVirtualMachineConfigurationAssignment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PolicyVirtualMachineConfigurationAssignment_Kind}.String() + PolicyVirtualMachineConfigurationAssignment_KindAPIVersion = PolicyVirtualMachineConfigurationAssignment_Kind + "." + CRDGroupVersion.String() + PolicyVirtualMachineConfigurationAssignment_GroupVersionKind = CRDGroupVersion.WithKind(PolicyVirtualMachineConfigurationAssignment_Kind) +) + +func init() { + SchemeBuilder.Register(&PolicyVirtualMachineConfigurationAssignment{}, &PolicyVirtualMachineConfigurationAssignmentList{}) +} diff --git a/apis/hdinsight/v1beta1/zz_generated.conversion_spokes.go b/apis/hdinsight/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..5dfe35a2e --- /dev/null +++ b/apis/hdinsight/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this HadoopCluster to the hub type. +func (tr *HadoopCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HadoopCluster type. +func (tr *HadoopCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this HBaseCluster to the hub type. +func (tr *HBaseCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HBaseCluster type. +func (tr *HBaseCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this InteractiveQueryCluster to the hub type. +func (tr *InteractiveQueryCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the InteractiveQueryCluster type. +func (tr *InteractiveQueryCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this KafkaCluster to the hub type. +func (tr *KafkaCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the KafkaCluster type. +func (tr *KafkaCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SparkCluster to the hub type. +func (tr *SparkCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SparkCluster type. +func (tr *SparkCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/hdinsight/v1beta1/zz_generated.conversion_hubs.go b/apis/hdinsight/v1beta2/zz_generated.conversion_hubs.go similarity index 97% rename from apis/hdinsight/v1beta1/zz_generated.conversion_hubs.go rename to apis/hdinsight/v1beta2/zz_generated.conversion_hubs.go index fffac601b..0bc1dabfb 100755 --- a/apis/hdinsight/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/hdinsight/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *HadoopCluster) Hub() {} diff --git a/apis/hdinsight/v1beta2/zz_generated.deepcopy.go b/apis/hdinsight/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..9e3fc3fdf --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,15505 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmbariInitParameters) DeepCopyInto(out *AmbariInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmbariInitParameters. +func (in *AmbariInitParameters) DeepCopy() *AmbariInitParameters { + if in == nil { + return nil + } + out := new(AmbariInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmbariObservation) DeepCopyInto(out *AmbariObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmbariObservation. +func (in *AmbariObservation) DeepCopy() *AmbariObservation { + if in == nil { + return nil + } + out := new(AmbariObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmbariParameters) DeepCopyInto(out *AmbariParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmbariParameters. +func (in *AmbariParameters) DeepCopy() *AmbariParameters { + if in == nil { + return nil + } + out := new(AmbariParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleCapacityInitParameters) DeepCopyInto(out *AutoscaleCapacityInitParameters) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleCapacityInitParameters. +func (in *AutoscaleCapacityInitParameters) DeepCopy() *AutoscaleCapacityInitParameters { + if in == nil { + return nil + } + out := new(AutoscaleCapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleCapacityObservation) DeepCopyInto(out *AutoscaleCapacityObservation) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleCapacityObservation. +func (in *AutoscaleCapacityObservation) DeepCopy() *AutoscaleCapacityObservation { + if in == nil { + return nil + } + out := new(AutoscaleCapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleCapacityParameters) DeepCopyInto(out *AutoscaleCapacityParameters) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleCapacityParameters. +func (in *AutoscaleCapacityParameters) DeepCopy() *AutoscaleCapacityParameters { + if in == nil { + return nil + } + out := new(AutoscaleCapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleInitParameters) DeepCopyInto(out *AutoscaleInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleInitParameters. +func (in *AutoscaleInitParameters) DeepCopy() *AutoscaleInitParameters { + if in == nil { + return nil + } + out := new(AutoscaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleObservation) DeepCopyInto(out *AutoscaleObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityObservation) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleObservation. +func (in *AutoscaleObservation) DeepCopy() *AutoscaleObservation { + if in == nil { + return nil + } + out := new(AutoscaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleParameters) DeepCopyInto(out *AutoscaleParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityParameters) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleParameters. +func (in *AutoscaleParameters) DeepCopy() *AutoscaleParameters { + if in == nil { + return nil + } + out := new(AutoscaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleRecurrenceInitParameters) DeepCopyInto(out *AutoscaleRecurrenceInitParameters) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]RecurrenceScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleRecurrenceInitParameters. +func (in *AutoscaleRecurrenceInitParameters) DeepCopy() *AutoscaleRecurrenceInitParameters { + if in == nil { + return nil + } + out := new(AutoscaleRecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleRecurrenceObservation) DeepCopyInto(out *AutoscaleRecurrenceObservation) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]RecurrenceScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleRecurrenceObservation. +func (in *AutoscaleRecurrenceObservation) DeepCopy() *AutoscaleRecurrenceObservation { + if in == nil { + return nil + } + out := new(AutoscaleRecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleRecurrenceParameters) DeepCopyInto(out *AutoscaleRecurrenceParameters) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]RecurrenceScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleRecurrenceParameters. +func (in *AutoscaleRecurrenceParameters) DeepCopy() *AutoscaleRecurrenceParameters { + if in == nil { + return nil + } + out := new(AutoscaleRecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleRecurrenceScheduleInitParameters) DeepCopyInto(out *AutoscaleRecurrenceScheduleInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleRecurrenceScheduleInitParameters. +func (in *AutoscaleRecurrenceScheduleInitParameters) DeepCopy() *AutoscaleRecurrenceScheduleInitParameters { + if in == nil { + return nil + } + out := new(AutoscaleRecurrenceScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleRecurrenceScheduleObservation) DeepCopyInto(out *AutoscaleRecurrenceScheduleObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleRecurrenceScheduleObservation. +func (in *AutoscaleRecurrenceScheduleObservation) DeepCopy() *AutoscaleRecurrenceScheduleObservation { + if in == nil { + return nil + } + out := new(AutoscaleRecurrenceScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleRecurrenceScheduleParameters) DeepCopyInto(out *AutoscaleRecurrenceScheduleParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleRecurrenceScheduleParameters. +func (in *AutoscaleRecurrenceScheduleParameters) DeepCopy() *AutoscaleRecurrenceScheduleParameters { + if in == nil { + return nil + } + out := new(AutoscaleRecurrenceScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityInitParameters) DeepCopyInto(out *CapacityInitParameters) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityInitParameters. +func (in *CapacityInitParameters) DeepCopy() *CapacityInitParameters { + if in == nil { + return nil + } + out := new(CapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityObservation) DeepCopyInto(out *CapacityObservation) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityObservation. +func (in *CapacityObservation) DeepCopy() *CapacityObservation { + if in == nil { + return nil + } + out := new(CapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityParameters) DeepCopyInto(out *CapacityParameters) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityParameters. +func (in *CapacityParameters) DeepCopy() *CapacityParameters { + if in == nil { + return nil + } + out := new(CapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentVersionInitParameters) DeepCopyInto(out *ComponentVersionInitParameters) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentVersionInitParameters. +func (in *ComponentVersionInitParameters) DeepCopy() *ComponentVersionInitParameters { + if in == nil { + return nil + } + out := new(ComponentVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentVersionObservation) DeepCopyInto(out *ComponentVersionObservation) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentVersionObservation. +func (in *ComponentVersionObservation) DeepCopy() *ComponentVersionObservation { + if in == nil { + return nil + } + out := new(ComponentVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentVersionParameters) DeepCopyInto(out *ComponentVersionParameters) { + *out = *in + if in.Hadoop != nil { + in, out := &in.Hadoop, &out.Hadoop + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentVersionParameters. +func (in *ComponentVersionParameters) DeepCopy() *ComponentVersionParameters { + if in == nil { + return nil + } + out := new(ComponentVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeIsolationInitParameters) DeepCopyInto(out *ComputeIsolationInitParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeIsolationInitParameters. +func (in *ComputeIsolationInitParameters) DeepCopy() *ComputeIsolationInitParameters { + if in == nil { + return nil + } + out := new(ComputeIsolationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeIsolationObservation) DeepCopyInto(out *ComputeIsolationObservation) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeIsolationObservation. +func (in *ComputeIsolationObservation) DeepCopy() *ComputeIsolationObservation { + if in == nil { + return nil + } + out := new(ComputeIsolationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeIsolationParameters) DeepCopyInto(out *ComputeIsolationParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeIsolationParameters. +func (in *ComputeIsolationParameters) DeepCopy() *ComputeIsolationParameters { + if in == nil { + return nil + } + out := new(ComputeIsolationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionInitParameters) DeepCopyInto(out *DiskEncryptionInitParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionInitParameters. +func (in *DiskEncryptionInitParameters) DeepCopy() *DiskEncryptionInitParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionObservation) DeepCopyInto(out *DiskEncryptionObservation) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionObservation. +func (in *DiskEncryptionObservation) DeepCopy() *DiskEncryptionObservation { + if in == nil { + return nil + } + out := new(DiskEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionParameters) DeepCopyInto(out *DiskEncryptionParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionParameters. +func (in *DiskEncryptionParameters) DeepCopy() *DiskEncryptionParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EdgeNodeInitParameters) DeepCopyInto(out *EdgeNodeInitParameters) { + *out = *in + if in.HTTPSEndpoints != nil { + in, out := &in.HTTPSEndpoints, &out.HTTPSEndpoints + *out = make([]HTTPSEndpointsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstallScriptAction != nil { + in, out := &in.InstallScriptAction, &out.InstallScriptAction + *out = make([]InstallScriptActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.UninstallScriptActions != nil { + in, out := &in.UninstallScriptActions, &out.UninstallScriptActions + *out = make([]UninstallScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EdgeNodeInitParameters. +func (in *EdgeNodeInitParameters) DeepCopy() *EdgeNodeInitParameters { + if in == nil { + return nil + } + out := new(EdgeNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EdgeNodeObservation) DeepCopyInto(out *EdgeNodeObservation) { + *out = *in + if in.HTTPSEndpoints != nil { + in, out := &in.HTTPSEndpoints, &out.HTTPSEndpoints + *out = make([]HTTPSEndpointsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstallScriptAction != nil { + in, out := &in.InstallScriptAction, &out.InstallScriptAction + *out = make([]InstallScriptActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.UninstallScriptActions != nil { + in, out := &in.UninstallScriptActions, &out.UninstallScriptActions + *out = make([]UninstallScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EdgeNodeObservation. +func (in *EdgeNodeObservation) DeepCopy() *EdgeNodeObservation { + if in == nil { + return nil + } + out := new(EdgeNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EdgeNodeParameters) DeepCopyInto(out *EdgeNodeParameters) { + *out = *in + if in.HTTPSEndpoints != nil { + in, out := &in.HTTPSEndpoints, &out.HTTPSEndpoints + *out = make([]HTTPSEndpointsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstallScriptAction != nil { + in, out := &in.InstallScriptAction, &out.InstallScriptAction + *out = make([]InstallScriptActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.UninstallScriptActions != nil { + in, out := &in.UninstallScriptActions, &out.UninstallScriptActions + *out = make([]UninstallScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EdgeNodeParameters. +func (in *EdgeNodeParameters) DeepCopy() *EdgeNodeParameters { + if in == nil { + return nil + } + out := new(EdgeNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionInitParameters) DeepCopyInto(out *ExtensionInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionInitParameters. +func (in *ExtensionInitParameters) DeepCopy() *ExtensionInitParameters { + if in == nil { + return nil + } + out := new(ExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionObservation) DeepCopyInto(out *ExtensionObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionObservation. +func (in *ExtensionObservation) DeepCopy() *ExtensionObservation { + if in == nil { + return nil + } + out := new(ExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionParameters) DeepCopyInto(out *ExtensionParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionParameters. +func (in *ExtensionParameters) DeepCopy() *ExtensionParameters { + if in == nil { + return nil + } + out := new(ExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayInitParameters) DeepCopyInto(out *GatewayInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayInitParameters. +func (in *GatewayInitParameters) DeepCopy() *GatewayInitParameters { + if in == nil { + return nil + } + out := new(GatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayObservation) DeepCopyInto(out *GatewayObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayObservation. +func (in *GatewayObservation) DeepCopy() *GatewayObservation { + if in == nil { + return nil + } + out := new(GatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayParameters) DeepCopyInto(out *GatewayParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayParameters. +func (in *GatewayParameters) DeepCopy() *GatewayParameters { + if in == nil { + return nil + } + out := new(GatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseCluster) DeepCopyInto(out *HBaseCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseCluster. +func (in *HBaseCluster) DeepCopy() *HBaseCluster { + if in == nil { + return nil + } + out := new(HBaseCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HBaseCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterComponentVersionInitParameters) DeepCopyInto(out *HBaseClusterComponentVersionInitParameters) { + *out = *in + if in.HBase != nil { + in, out := &in.HBase, &out.HBase + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterComponentVersionInitParameters. +func (in *HBaseClusterComponentVersionInitParameters) DeepCopy() *HBaseClusterComponentVersionInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterComponentVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterComponentVersionObservation) DeepCopyInto(out *HBaseClusterComponentVersionObservation) { + *out = *in + if in.HBase != nil { + in, out := &in.HBase, &out.HBase + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterComponentVersionObservation. +func (in *HBaseClusterComponentVersionObservation) DeepCopy() *HBaseClusterComponentVersionObservation { + if in == nil { + return nil + } + out := new(HBaseClusterComponentVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterComponentVersionParameters) DeepCopyInto(out *HBaseClusterComponentVersionParameters) { + *out = *in + if in.HBase != nil { + in, out := &in.HBase, &out.HBase + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterComponentVersionParameters. +func (in *HBaseClusterComponentVersionParameters) DeepCopy() *HBaseClusterComponentVersionParameters { + if in == nil { + return nil + } + out := new(HBaseClusterComponentVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterComputeIsolationInitParameters) DeepCopyInto(out *HBaseClusterComputeIsolationInitParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterComputeIsolationInitParameters. +func (in *HBaseClusterComputeIsolationInitParameters) DeepCopy() *HBaseClusterComputeIsolationInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterComputeIsolationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterComputeIsolationObservation) DeepCopyInto(out *HBaseClusterComputeIsolationObservation) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterComputeIsolationObservation. +func (in *HBaseClusterComputeIsolationObservation) DeepCopy() *HBaseClusterComputeIsolationObservation { + if in == nil { + return nil + } + out := new(HBaseClusterComputeIsolationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterComputeIsolationParameters) DeepCopyInto(out *HBaseClusterComputeIsolationParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterComputeIsolationParameters. +func (in *HBaseClusterComputeIsolationParameters) DeepCopy() *HBaseClusterComputeIsolationParameters { + if in == nil { + return nil + } + out := new(HBaseClusterComputeIsolationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterDiskEncryptionInitParameters) DeepCopyInto(out *HBaseClusterDiskEncryptionInitParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterDiskEncryptionInitParameters. +func (in *HBaseClusterDiskEncryptionInitParameters) DeepCopy() *HBaseClusterDiskEncryptionInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterDiskEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterDiskEncryptionObservation) DeepCopyInto(out *HBaseClusterDiskEncryptionObservation) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterDiskEncryptionObservation. +func (in *HBaseClusterDiskEncryptionObservation) DeepCopy() *HBaseClusterDiskEncryptionObservation { + if in == nil { + return nil + } + out := new(HBaseClusterDiskEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterDiskEncryptionParameters) DeepCopyInto(out *HBaseClusterDiskEncryptionParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterDiskEncryptionParameters. +func (in *HBaseClusterDiskEncryptionParameters) DeepCopy() *HBaseClusterDiskEncryptionParameters { + if in == nil { + return nil + } + out := new(HBaseClusterDiskEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterExtensionInitParameters) DeepCopyInto(out *HBaseClusterExtensionInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterExtensionInitParameters. +func (in *HBaseClusterExtensionInitParameters) DeepCopy() *HBaseClusterExtensionInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterExtensionObservation) DeepCopyInto(out *HBaseClusterExtensionObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterExtensionObservation. +func (in *HBaseClusterExtensionObservation) DeepCopy() *HBaseClusterExtensionObservation { + if in == nil { + return nil + } + out := new(HBaseClusterExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterExtensionParameters) DeepCopyInto(out *HBaseClusterExtensionParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterExtensionParameters. +func (in *HBaseClusterExtensionParameters) DeepCopy() *HBaseClusterExtensionParameters { + if in == nil { + return nil + } + out := new(HBaseClusterExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterGatewayInitParameters) DeepCopyInto(out *HBaseClusterGatewayInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterGatewayInitParameters. +func (in *HBaseClusterGatewayInitParameters) DeepCopy() *HBaseClusterGatewayInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterGatewayObservation) DeepCopyInto(out *HBaseClusterGatewayObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterGatewayObservation. +func (in *HBaseClusterGatewayObservation) DeepCopy() *HBaseClusterGatewayObservation { + if in == nil { + return nil + } + out := new(HBaseClusterGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterGatewayParameters) DeepCopyInto(out *HBaseClusterGatewayParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterGatewayParameters. +func (in *HBaseClusterGatewayParameters) DeepCopy() *HBaseClusterGatewayParameters { + if in == nil { + return nil + } + out := new(HBaseClusterGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterInitParameters) DeepCopyInto(out *HBaseClusterInitParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(HBaseClusterComponentVersionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(HBaseClusterComputeIsolationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]HBaseClusterDiskEncryptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(HBaseClusterExtensionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(HBaseClusterGatewayInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(HBaseClusterMetastoresInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(HBaseClusterMonitorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(HBaseClusterNetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(HBaseClusterRolesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(HBaseClusterSecurityProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]HBaseClusterStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(HBaseClusterStorageAccountGen2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterInitParameters. +func (in *HBaseClusterInitParameters) DeepCopy() *HBaseClusterInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterList) DeepCopyInto(out *HBaseClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HBaseCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterList. +func (in *HBaseClusterList) DeepCopy() *HBaseClusterList { + if in == nil { + return nil + } + out := new(HBaseClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HBaseClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterMetastoresInitParameters) DeepCopyInto(out *HBaseClusterMetastoresInitParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(MetastoresAmbariInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(MetastoresHiveInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(MetastoresOozieInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterMetastoresInitParameters. +func (in *HBaseClusterMetastoresInitParameters) DeepCopy() *HBaseClusterMetastoresInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterMetastoresInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterMetastoresObservation) DeepCopyInto(out *HBaseClusterMetastoresObservation) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(MetastoresAmbariObservation) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(MetastoresHiveObservation) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(MetastoresOozieObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterMetastoresObservation. +func (in *HBaseClusterMetastoresObservation) DeepCopy() *HBaseClusterMetastoresObservation { + if in == nil { + return nil + } + out := new(HBaseClusterMetastoresObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterMetastoresParameters) DeepCopyInto(out *HBaseClusterMetastoresParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(MetastoresAmbariParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(MetastoresHiveParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(MetastoresOozieParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterMetastoresParameters. +func (in *HBaseClusterMetastoresParameters) DeepCopy() *HBaseClusterMetastoresParameters { + if in == nil { + return nil + } + out := new(HBaseClusterMetastoresParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterMonitorInitParameters) DeepCopyInto(out *HBaseClusterMonitorInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterMonitorInitParameters. +func (in *HBaseClusterMonitorInitParameters) DeepCopy() *HBaseClusterMonitorInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterMonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterMonitorObservation) DeepCopyInto(out *HBaseClusterMonitorObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterMonitorObservation. +func (in *HBaseClusterMonitorObservation) DeepCopy() *HBaseClusterMonitorObservation { + if in == nil { + return nil + } + out := new(HBaseClusterMonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterMonitorParameters) DeepCopyInto(out *HBaseClusterMonitorParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterMonitorParameters. +func (in *HBaseClusterMonitorParameters) DeepCopy() *HBaseClusterMonitorParameters { + if in == nil { + return nil + } + out := new(HBaseClusterMonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterNetworkInitParameters) DeepCopyInto(out *HBaseClusterNetworkInitParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterNetworkInitParameters. +func (in *HBaseClusterNetworkInitParameters) DeepCopy() *HBaseClusterNetworkInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterNetworkObservation) DeepCopyInto(out *HBaseClusterNetworkObservation) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterNetworkObservation. +func (in *HBaseClusterNetworkObservation) DeepCopy() *HBaseClusterNetworkObservation { + if in == nil { + return nil + } + out := new(HBaseClusterNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterNetworkParameters) DeepCopyInto(out *HBaseClusterNetworkParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterNetworkParameters. +func (in *HBaseClusterNetworkParameters) DeepCopy() *HBaseClusterNetworkParameters { + if in == nil { + return nil + } + out := new(HBaseClusterNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterObservation) DeepCopyInto(out *HBaseClusterObservation) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(HBaseClusterComponentVersionObservation) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(HBaseClusterComputeIsolationObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]HBaseClusterDiskEncryptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(HBaseClusterExtensionObservation) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(HBaseClusterGatewayObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPSEndpoint != nil { + in, out := &in.HTTPSEndpoint, &out.HTTPSEndpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(HBaseClusterMetastoresObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(HBaseClusterMonitorObservation) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(HBaseClusterNetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(HBaseClusterRolesObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHEndpoint != nil { + in, out := &in.SSHEndpoint, &out.SSHEndpoint + *out = new(string) + **out = **in + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(HBaseClusterSecurityProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]HBaseClusterStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(HBaseClusterStorageAccountGen2Observation) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterObservation. +func (in *HBaseClusterObservation) DeepCopy() *HBaseClusterObservation { + if in == nil { + return nil + } + out := new(HBaseClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterParameters) DeepCopyInto(out *HBaseClusterParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(HBaseClusterComponentVersionParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(HBaseClusterComputeIsolationParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]HBaseClusterDiskEncryptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(HBaseClusterExtensionParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(HBaseClusterGatewayParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(HBaseClusterMetastoresParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(HBaseClusterMonitorParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(HBaseClusterNetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(HBaseClusterRolesParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(HBaseClusterSecurityProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]HBaseClusterStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(HBaseClusterStorageAccountGen2Parameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterParameters. +func (in *HBaseClusterParameters) DeepCopy() *HBaseClusterParameters { + if in == nil { + return nil + } + out := new(HBaseClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterRolesInitParameters) DeepCopyInto(out *HBaseClusterRolesInitParameters) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(RolesHeadNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(RolesWorkerNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(RolesZookeeperNodeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterRolesInitParameters. +func (in *HBaseClusterRolesInitParameters) DeepCopy() *HBaseClusterRolesInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterRolesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterRolesObservation) DeepCopyInto(out *HBaseClusterRolesObservation) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(RolesHeadNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(RolesWorkerNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(RolesZookeeperNodeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterRolesObservation. +func (in *HBaseClusterRolesObservation) DeepCopy() *HBaseClusterRolesObservation { + if in == nil { + return nil + } + out := new(HBaseClusterRolesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterRolesParameters) DeepCopyInto(out *HBaseClusterRolesParameters) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(RolesHeadNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(RolesWorkerNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(RolesZookeeperNodeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterRolesParameters. +func (in *HBaseClusterRolesParameters) DeepCopy() *HBaseClusterRolesParameters { + if in == nil { + return nil + } + out := new(HBaseClusterRolesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterSecurityProfileInitParameters) DeepCopyInto(out *HBaseClusterSecurityProfileInitParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterSecurityProfileInitParameters. +func (in *HBaseClusterSecurityProfileInitParameters) DeepCopy() *HBaseClusterSecurityProfileInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterSecurityProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterSecurityProfileObservation) DeepCopyInto(out *HBaseClusterSecurityProfileObservation) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterSecurityProfileObservation. +func (in *HBaseClusterSecurityProfileObservation) DeepCopy() *HBaseClusterSecurityProfileObservation { + if in == nil { + return nil + } + out := new(HBaseClusterSecurityProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterSecurityProfileParameters) DeepCopyInto(out *HBaseClusterSecurityProfileParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + out.DomainUserPasswordSecretRef = in.DomainUserPasswordSecretRef + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterSecurityProfileParameters. +func (in *HBaseClusterSecurityProfileParameters) DeepCopy() *HBaseClusterSecurityProfileParameters { + if in == nil { + return nil + } + out := new(HBaseClusterSecurityProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterSpec) DeepCopyInto(out *HBaseClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterSpec. +func (in *HBaseClusterSpec) DeepCopy() *HBaseClusterSpec { + if in == nil { + return nil + } + out := new(HBaseClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterStatus) DeepCopyInto(out *HBaseClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterStatus. +func (in *HBaseClusterStatus) DeepCopy() *HBaseClusterStatus { + if in == nil { + return nil + } + out := new(HBaseClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterStorageAccountGen2InitParameters) DeepCopyInto(out *HBaseClusterStorageAccountGen2InitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterStorageAccountGen2InitParameters. +func (in *HBaseClusterStorageAccountGen2InitParameters) DeepCopy() *HBaseClusterStorageAccountGen2InitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterStorageAccountGen2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterStorageAccountGen2Observation) DeepCopyInto(out *HBaseClusterStorageAccountGen2Observation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterStorageAccountGen2Observation. +func (in *HBaseClusterStorageAccountGen2Observation) DeepCopy() *HBaseClusterStorageAccountGen2Observation { + if in == nil { + return nil + } + out := new(HBaseClusterStorageAccountGen2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterStorageAccountGen2Parameters) DeepCopyInto(out *HBaseClusterStorageAccountGen2Parameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterStorageAccountGen2Parameters. +func (in *HBaseClusterStorageAccountGen2Parameters) DeepCopy() *HBaseClusterStorageAccountGen2Parameters { + if in == nil { + return nil + } + out := new(HBaseClusterStorageAccountGen2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterStorageAccountInitParameters) DeepCopyInto(out *HBaseClusterStorageAccountInitParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterStorageAccountInitParameters. +func (in *HBaseClusterStorageAccountInitParameters) DeepCopy() *HBaseClusterStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(HBaseClusterStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterStorageAccountObservation) DeepCopyInto(out *HBaseClusterStorageAccountObservation) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterStorageAccountObservation. +func (in *HBaseClusterStorageAccountObservation) DeepCopy() *HBaseClusterStorageAccountObservation { + if in == nil { + return nil + } + out := new(HBaseClusterStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HBaseClusterStorageAccountParameters) DeepCopyInto(out *HBaseClusterStorageAccountParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + out.StorageAccountKeySecretRef = in.StorageAccountKeySecretRef + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HBaseClusterStorageAccountParameters. +func (in *HBaseClusterStorageAccountParameters) DeepCopy() *HBaseClusterStorageAccountParameters { + if in == nil { + return nil + } + out := new(HBaseClusterStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSEndpointsInitParameters) DeepCopyInto(out *HTTPSEndpointsInitParameters) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = new(float64) + **out = **in + } + if in.DisableGatewayAuth != nil { + in, out := &in.DisableGatewayAuth, &out.DisableGatewayAuth + *out = new(bool) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SubDomainSuffix != nil { + in, out := &in.SubDomainSuffix, &out.SubDomainSuffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSEndpointsInitParameters. +func (in *HTTPSEndpointsInitParameters) DeepCopy() *HTTPSEndpointsInitParameters { + if in == nil { + return nil + } + out := new(HTTPSEndpointsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSEndpointsObservation) DeepCopyInto(out *HTTPSEndpointsObservation) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = new(float64) + **out = **in + } + if in.DisableGatewayAuth != nil { + in, out := &in.DisableGatewayAuth, &out.DisableGatewayAuth + *out = new(bool) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SubDomainSuffix != nil { + in, out := &in.SubDomainSuffix, &out.SubDomainSuffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSEndpointsObservation. +func (in *HTTPSEndpointsObservation) DeepCopy() *HTTPSEndpointsObservation { + if in == nil { + return nil + } + out := new(HTTPSEndpointsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSEndpointsParameters) DeepCopyInto(out *HTTPSEndpointsParameters) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationPort != nil { + in, out := &in.DestinationPort, &out.DestinationPort + *out = new(float64) + **out = **in + } + if in.DisableGatewayAuth != nil { + in, out := &in.DisableGatewayAuth, &out.DisableGatewayAuth + *out = new(bool) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SubDomainSuffix != nil { + in, out := &in.SubDomainSuffix, &out.SubDomainSuffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSEndpointsParameters. +func (in *HTTPSEndpointsParameters) DeepCopy() *HTTPSEndpointsParameters { + if in == nil { + return nil + } + out := new(HTTPSEndpointsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopCluster) DeepCopyInto(out *HadoopCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopCluster. +func (in *HadoopCluster) DeepCopy() *HadoopCluster { + if in == nil { + return nil + } + out := new(HadoopCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HadoopCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopClusterInitParameters) DeepCopyInto(out *HadoopClusterInitParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(ComponentVersionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(ComputeIsolationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]DiskEncryptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(ExtensionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(GatewayInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(MetastoresInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(MonitorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(RolesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SecurityProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(StorageAccountGen2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopClusterInitParameters. +func (in *HadoopClusterInitParameters) DeepCopy() *HadoopClusterInitParameters { + if in == nil { + return nil + } + out := new(HadoopClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopClusterList) DeepCopyInto(out *HadoopClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HadoopCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopClusterList. +func (in *HadoopClusterList) DeepCopy() *HadoopClusterList { + if in == nil { + return nil + } + out := new(HadoopClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HadoopClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopClusterObservation) DeepCopyInto(out *HadoopClusterObservation) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(ComponentVersionObservation) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(ComputeIsolationObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]DiskEncryptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(ExtensionObservation) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(GatewayObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPSEndpoint != nil { + in, out := &in.HTTPSEndpoint, &out.HTTPSEndpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(MetastoresObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(MonitorObservation) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(RolesObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHEndpoint != nil { + in, out := &in.SSHEndpoint, &out.SSHEndpoint + *out = new(string) + **out = **in + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SecurityProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(StorageAccountGen2Observation) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopClusterObservation. +func (in *HadoopClusterObservation) DeepCopy() *HadoopClusterObservation { + if in == nil { + return nil + } + out := new(HadoopClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopClusterParameters) DeepCopyInto(out *HadoopClusterParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(ComponentVersionParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(ComputeIsolationParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]DiskEncryptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(ExtensionParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(GatewayParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(MetastoresParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(MonitorParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(RolesParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SecurityProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(StorageAccountGen2Parameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopClusterParameters. +func (in *HadoopClusterParameters) DeepCopy() *HadoopClusterParameters { + if in == nil { + return nil + } + out := new(HadoopClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopClusterSpec) DeepCopyInto(out *HadoopClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopClusterSpec. +func (in *HadoopClusterSpec) DeepCopy() *HadoopClusterSpec { + if in == nil { + return nil + } + out := new(HadoopClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopClusterStatus) DeepCopyInto(out *HadoopClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopClusterStatus. +func (in *HadoopClusterStatus) DeepCopy() *HadoopClusterStatus { + if in == nil { + return nil + } + out := new(HadoopClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadNodeInitParameters) DeepCopyInto(out *HeadNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]ScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadNodeInitParameters. +func (in *HeadNodeInitParameters) DeepCopy() *HeadNodeInitParameters { + if in == nil { + return nil + } + out := new(HeadNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadNodeObservation) DeepCopyInto(out *HeadNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]ScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadNodeObservation. +func (in *HeadNodeObservation) DeepCopy() *HeadNodeObservation { + if in == nil { + return nil + } + out := new(HeadNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadNodeParameters) DeepCopyInto(out *HeadNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]ScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadNodeParameters. +func (in *HeadNodeParameters) DeepCopy() *HeadNodeParameters { + if in == nil { + return nil + } + out := new(HeadNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadNodeScriptActionsInitParameters) DeepCopyInto(out *HeadNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadNodeScriptActionsInitParameters. +func (in *HeadNodeScriptActionsInitParameters) DeepCopy() *HeadNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(HeadNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadNodeScriptActionsObservation) DeepCopyInto(out *HeadNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadNodeScriptActionsObservation. +func (in *HeadNodeScriptActionsObservation) DeepCopy() *HeadNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(HeadNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadNodeScriptActionsParameters) DeepCopyInto(out *HeadNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadNodeScriptActionsParameters. +func (in *HeadNodeScriptActionsParameters) DeepCopy() *HeadNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(HeadNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveInitParameters) DeepCopyInto(out *HiveInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveInitParameters. +func (in *HiveInitParameters) DeepCopy() *HiveInitParameters { + if in == nil { + return nil + } + out := new(HiveInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveObservation) DeepCopyInto(out *HiveObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveObservation. +func (in *HiveObservation) DeepCopy() *HiveObservation { + if in == nil { + return nil + } + out := new(HiveObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveParameters) DeepCopyInto(out *HiveParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveParameters. +func (in *HiveParameters) DeepCopy() *HiveParameters { + if in == nil { + return nil + } + out := new(HiveParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallScriptActionInitParameters) DeepCopyInto(out *InstallScriptActionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallScriptActionInitParameters. +func (in *InstallScriptActionInitParameters) DeepCopy() *InstallScriptActionInitParameters { + if in == nil { + return nil + } + out := new(InstallScriptActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallScriptActionObservation) DeepCopyInto(out *InstallScriptActionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallScriptActionObservation. +func (in *InstallScriptActionObservation) DeepCopy() *InstallScriptActionObservation { + if in == nil { + return nil + } + out := new(InstallScriptActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallScriptActionParameters) DeepCopyInto(out *InstallScriptActionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallScriptActionParameters. +func (in *InstallScriptActionParameters) DeepCopy() *InstallScriptActionParameters { + if in == nil { + return nil + } + out := new(InstallScriptActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryCluster) DeepCopyInto(out *InteractiveQueryCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryCluster. +func (in *InteractiveQueryCluster) DeepCopy() *InteractiveQueryCluster { + if in == nil { + return nil + } + out := new(InteractiveQueryCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InteractiveQueryCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterComponentVersionInitParameters) DeepCopyInto(out *InteractiveQueryClusterComponentVersionInitParameters) { + *out = *in + if in.InteractiveHive != nil { + in, out := &in.InteractiveHive, &out.InteractiveHive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterComponentVersionInitParameters. +func (in *InteractiveQueryClusterComponentVersionInitParameters) DeepCopy() *InteractiveQueryClusterComponentVersionInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterComponentVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterComponentVersionObservation) DeepCopyInto(out *InteractiveQueryClusterComponentVersionObservation) { + *out = *in + if in.InteractiveHive != nil { + in, out := &in.InteractiveHive, &out.InteractiveHive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterComponentVersionObservation. +func (in *InteractiveQueryClusterComponentVersionObservation) DeepCopy() *InteractiveQueryClusterComponentVersionObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterComponentVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterComponentVersionParameters) DeepCopyInto(out *InteractiveQueryClusterComponentVersionParameters) { + *out = *in + if in.InteractiveHive != nil { + in, out := &in.InteractiveHive, &out.InteractiveHive + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterComponentVersionParameters. +func (in *InteractiveQueryClusterComponentVersionParameters) DeepCopy() *InteractiveQueryClusterComponentVersionParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterComponentVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterComputeIsolationInitParameters) DeepCopyInto(out *InteractiveQueryClusterComputeIsolationInitParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterComputeIsolationInitParameters. +func (in *InteractiveQueryClusterComputeIsolationInitParameters) DeepCopy() *InteractiveQueryClusterComputeIsolationInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterComputeIsolationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterComputeIsolationObservation) DeepCopyInto(out *InteractiveQueryClusterComputeIsolationObservation) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterComputeIsolationObservation. +func (in *InteractiveQueryClusterComputeIsolationObservation) DeepCopy() *InteractiveQueryClusterComputeIsolationObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterComputeIsolationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterComputeIsolationParameters) DeepCopyInto(out *InteractiveQueryClusterComputeIsolationParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterComputeIsolationParameters. +func (in *InteractiveQueryClusterComputeIsolationParameters) DeepCopy() *InteractiveQueryClusterComputeIsolationParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterComputeIsolationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterDiskEncryptionInitParameters) DeepCopyInto(out *InteractiveQueryClusterDiskEncryptionInitParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterDiskEncryptionInitParameters. +func (in *InteractiveQueryClusterDiskEncryptionInitParameters) DeepCopy() *InteractiveQueryClusterDiskEncryptionInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterDiskEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterDiskEncryptionObservation) DeepCopyInto(out *InteractiveQueryClusterDiskEncryptionObservation) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterDiskEncryptionObservation. +func (in *InteractiveQueryClusterDiskEncryptionObservation) DeepCopy() *InteractiveQueryClusterDiskEncryptionObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterDiskEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterDiskEncryptionParameters) DeepCopyInto(out *InteractiveQueryClusterDiskEncryptionParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterDiskEncryptionParameters. +func (in *InteractiveQueryClusterDiskEncryptionParameters) DeepCopy() *InteractiveQueryClusterDiskEncryptionParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterDiskEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterExtensionInitParameters) DeepCopyInto(out *InteractiveQueryClusterExtensionInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterExtensionInitParameters. +func (in *InteractiveQueryClusterExtensionInitParameters) DeepCopy() *InteractiveQueryClusterExtensionInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterExtensionObservation) DeepCopyInto(out *InteractiveQueryClusterExtensionObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterExtensionObservation. +func (in *InteractiveQueryClusterExtensionObservation) DeepCopy() *InteractiveQueryClusterExtensionObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterExtensionParameters) DeepCopyInto(out *InteractiveQueryClusterExtensionParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterExtensionParameters. +func (in *InteractiveQueryClusterExtensionParameters) DeepCopy() *InteractiveQueryClusterExtensionParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterGatewayInitParameters) DeepCopyInto(out *InteractiveQueryClusterGatewayInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterGatewayInitParameters. +func (in *InteractiveQueryClusterGatewayInitParameters) DeepCopy() *InteractiveQueryClusterGatewayInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterGatewayObservation) DeepCopyInto(out *InteractiveQueryClusterGatewayObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterGatewayObservation. +func (in *InteractiveQueryClusterGatewayObservation) DeepCopy() *InteractiveQueryClusterGatewayObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterGatewayParameters) DeepCopyInto(out *InteractiveQueryClusterGatewayParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterGatewayParameters. +func (in *InteractiveQueryClusterGatewayParameters) DeepCopy() *InteractiveQueryClusterGatewayParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterInitParameters) DeepCopyInto(out *InteractiveQueryClusterInitParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(InteractiveQueryClusterComponentVersionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(InteractiveQueryClusterComputeIsolationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]InteractiveQueryClusterDiskEncryptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(InteractiveQueryClusterExtensionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(InteractiveQueryClusterGatewayInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(InteractiveQueryClusterMetastoresInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(InteractiveQueryClusterMonitorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(InteractiveQueryClusterNetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(InteractiveQueryClusterRolesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(InteractiveQueryClusterSecurityProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]InteractiveQueryClusterStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(InteractiveQueryClusterStorageAccountGen2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterInitParameters. +func (in *InteractiveQueryClusterInitParameters) DeepCopy() *InteractiveQueryClusterInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterList) DeepCopyInto(out *InteractiveQueryClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InteractiveQueryCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterList. +func (in *InteractiveQueryClusterList) DeepCopy() *InteractiveQueryClusterList { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InteractiveQueryClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresAmbariInitParameters) DeepCopyInto(out *InteractiveQueryClusterMetastoresAmbariInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresAmbariInitParameters. +func (in *InteractiveQueryClusterMetastoresAmbariInitParameters) DeepCopy() *InteractiveQueryClusterMetastoresAmbariInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresAmbariInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresAmbariObservation) DeepCopyInto(out *InteractiveQueryClusterMetastoresAmbariObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresAmbariObservation. +func (in *InteractiveQueryClusterMetastoresAmbariObservation) DeepCopy() *InteractiveQueryClusterMetastoresAmbariObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresAmbariObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresAmbariParameters) DeepCopyInto(out *InteractiveQueryClusterMetastoresAmbariParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresAmbariParameters. +func (in *InteractiveQueryClusterMetastoresAmbariParameters) DeepCopy() *InteractiveQueryClusterMetastoresAmbariParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresAmbariParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresHiveInitParameters) DeepCopyInto(out *InteractiveQueryClusterMetastoresHiveInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresHiveInitParameters. +func (in *InteractiveQueryClusterMetastoresHiveInitParameters) DeepCopy() *InteractiveQueryClusterMetastoresHiveInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresHiveInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresHiveObservation) DeepCopyInto(out *InteractiveQueryClusterMetastoresHiveObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresHiveObservation. +func (in *InteractiveQueryClusterMetastoresHiveObservation) DeepCopy() *InteractiveQueryClusterMetastoresHiveObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresHiveObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresHiveParameters) DeepCopyInto(out *InteractiveQueryClusterMetastoresHiveParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresHiveParameters. +func (in *InteractiveQueryClusterMetastoresHiveParameters) DeepCopy() *InteractiveQueryClusterMetastoresHiveParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresHiveParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresInitParameters) DeepCopyInto(out *InteractiveQueryClusterMetastoresInitParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(InteractiveQueryClusterMetastoresAmbariInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(InteractiveQueryClusterMetastoresHiveInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(InteractiveQueryClusterMetastoresOozieInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresInitParameters. +func (in *InteractiveQueryClusterMetastoresInitParameters) DeepCopy() *InteractiveQueryClusterMetastoresInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresObservation) DeepCopyInto(out *InteractiveQueryClusterMetastoresObservation) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(InteractiveQueryClusterMetastoresAmbariObservation) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(InteractiveQueryClusterMetastoresHiveObservation) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(InteractiveQueryClusterMetastoresOozieObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresObservation. +func (in *InteractiveQueryClusterMetastoresObservation) DeepCopy() *InteractiveQueryClusterMetastoresObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresOozieInitParameters) DeepCopyInto(out *InteractiveQueryClusterMetastoresOozieInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresOozieInitParameters. +func (in *InteractiveQueryClusterMetastoresOozieInitParameters) DeepCopy() *InteractiveQueryClusterMetastoresOozieInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresOozieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresOozieObservation) DeepCopyInto(out *InteractiveQueryClusterMetastoresOozieObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresOozieObservation. +func (in *InteractiveQueryClusterMetastoresOozieObservation) DeepCopy() *InteractiveQueryClusterMetastoresOozieObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresOozieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresOozieParameters) DeepCopyInto(out *InteractiveQueryClusterMetastoresOozieParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresOozieParameters. +func (in *InteractiveQueryClusterMetastoresOozieParameters) DeepCopy() *InteractiveQueryClusterMetastoresOozieParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresOozieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMetastoresParameters) DeepCopyInto(out *InteractiveQueryClusterMetastoresParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(InteractiveQueryClusterMetastoresAmbariParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(InteractiveQueryClusterMetastoresHiveParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(InteractiveQueryClusterMetastoresOozieParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMetastoresParameters. +func (in *InteractiveQueryClusterMetastoresParameters) DeepCopy() *InteractiveQueryClusterMetastoresParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMetastoresParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMonitorInitParameters) DeepCopyInto(out *InteractiveQueryClusterMonitorInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMonitorInitParameters. +func (in *InteractiveQueryClusterMonitorInitParameters) DeepCopy() *InteractiveQueryClusterMonitorInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMonitorObservation) DeepCopyInto(out *InteractiveQueryClusterMonitorObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMonitorObservation. +func (in *InteractiveQueryClusterMonitorObservation) DeepCopy() *InteractiveQueryClusterMonitorObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterMonitorParameters) DeepCopyInto(out *InteractiveQueryClusterMonitorParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterMonitorParameters. +func (in *InteractiveQueryClusterMonitorParameters) DeepCopy() *InteractiveQueryClusterMonitorParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterMonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterNetworkInitParameters) DeepCopyInto(out *InteractiveQueryClusterNetworkInitParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterNetworkInitParameters. +func (in *InteractiveQueryClusterNetworkInitParameters) DeepCopy() *InteractiveQueryClusterNetworkInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterNetworkObservation) DeepCopyInto(out *InteractiveQueryClusterNetworkObservation) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterNetworkObservation. +func (in *InteractiveQueryClusterNetworkObservation) DeepCopy() *InteractiveQueryClusterNetworkObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterNetworkParameters) DeepCopyInto(out *InteractiveQueryClusterNetworkParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterNetworkParameters. +func (in *InteractiveQueryClusterNetworkParameters) DeepCopy() *InteractiveQueryClusterNetworkParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterObservation) DeepCopyInto(out *InteractiveQueryClusterObservation) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(InteractiveQueryClusterComponentVersionObservation) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(InteractiveQueryClusterComputeIsolationObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]InteractiveQueryClusterDiskEncryptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(InteractiveQueryClusterExtensionObservation) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(InteractiveQueryClusterGatewayObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPSEndpoint != nil { + in, out := &in.HTTPSEndpoint, &out.HTTPSEndpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(InteractiveQueryClusterMetastoresObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(InteractiveQueryClusterMonitorObservation) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(InteractiveQueryClusterNetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(InteractiveQueryClusterRolesObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHEndpoint != nil { + in, out := &in.SSHEndpoint, &out.SSHEndpoint + *out = new(string) + **out = **in + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(InteractiveQueryClusterSecurityProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]InteractiveQueryClusterStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(InteractiveQueryClusterStorageAccountGen2Observation) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterObservation. +func (in *InteractiveQueryClusterObservation) DeepCopy() *InteractiveQueryClusterObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterParameters) DeepCopyInto(out *InteractiveQueryClusterParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(InteractiveQueryClusterComponentVersionParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(InteractiveQueryClusterComputeIsolationParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]InteractiveQueryClusterDiskEncryptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(InteractiveQueryClusterExtensionParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(InteractiveQueryClusterGatewayParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(InteractiveQueryClusterMetastoresParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(InteractiveQueryClusterMonitorParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(InteractiveQueryClusterNetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(InteractiveQueryClusterRolesParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(InteractiveQueryClusterSecurityProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]InteractiveQueryClusterStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(InteractiveQueryClusterStorageAccountGen2Parameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterParameters. +func (in *InteractiveQueryClusterParameters) DeepCopy() *InteractiveQueryClusterParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesHeadNodeInitParameters) DeepCopyInto(out *InteractiveQueryClusterRolesHeadNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesHeadNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesHeadNodeInitParameters. +func (in *InteractiveQueryClusterRolesHeadNodeInitParameters) DeepCopy() *InteractiveQueryClusterRolesHeadNodeInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesHeadNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesHeadNodeObservation) DeepCopyInto(out *InteractiveQueryClusterRolesHeadNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesHeadNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesHeadNodeObservation. +func (in *InteractiveQueryClusterRolesHeadNodeObservation) DeepCopy() *InteractiveQueryClusterRolesHeadNodeObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesHeadNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesHeadNodeParameters) DeepCopyInto(out *InteractiveQueryClusterRolesHeadNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesHeadNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesHeadNodeParameters. +func (in *InteractiveQueryClusterRolesHeadNodeParameters) DeepCopy() *InteractiveQueryClusterRolesHeadNodeParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesHeadNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesInitParameters) DeepCopyInto(out *InteractiveQueryClusterRolesInitParameters) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(InteractiveQueryClusterRolesHeadNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(InteractiveQueryClusterRolesWorkerNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(InteractiveQueryClusterRolesZookeeperNodeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesInitParameters. +func (in *InteractiveQueryClusterRolesInitParameters) DeepCopy() *InteractiveQueryClusterRolesInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesObservation) DeepCopyInto(out *InteractiveQueryClusterRolesObservation) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(InteractiveQueryClusterRolesHeadNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(InteractiveQueryClusterRolesWorkerNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(InteractiveQueryClusterRolesZookeeperNodeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesObservation. +func (in *InteractiveQueryClusterRolesObservation) DeepCopy() *InteractiveQueryClusterRolesObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesParameters) DeepCopyInto(out *InteractiveQueryClusterRolesParameters) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(InteractiveQueryClusterRolesHeadNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(InteractiveQueryClusterRolesWorkerNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(InteractiveQueryClusterRolesZookeeperNodeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesParameters. +func (in *InteractiveQueryClusterRolesParameters) DeepCopy() *InteractiveQueryClusterRolesParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesWorkerNodeInitParameters) DeepCopyInto(out *InteractiveQueryClusterRolesWorkerNodeInitParameters) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(RolesWorkerNodeAutoscaleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesWorkerNodeInitParameters. +func (in *InteractiveQueryClusterRolesWorkerNodeInitParameters) DeepCopy() *InteractiveQueryClusterRolesWorkerNodeInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesWorkerNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesWorkerNodeObservation) DeepCopyInto(out *InteractiveQueryClusterRolesWorkerNodeObservation) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(RolesWorkerNodeAutoscaleObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesWorkerNodeObservation. +func (in *InteractiveQueryClusterRolesWorkerNodeObservation) DeepCopy() *InteractiveQueryClusterRolesWorkerNodeObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesWorkerNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesWorkerNodeParameters) DeepCopyInto(out *InteractiveQueryClusterRolesWorkerNodeParameters) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(RolesWorkerNodeAutoscaleParameters) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesWorkerNodeParameters. +func (in *InteractiveQueryClusterRolesWorkerNodeParameters) DeepCopy() *InteractiveQueryClusterRolesWorkerNodeParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesWorkerNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters) DeepCopyInto(out *InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters. +func (in *InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters) DeepCopy() *InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation) DeepCopyInto(out *InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation. +func (in *InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation) DeepCopy() *InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters) DeepCopyInto(out *InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters. +func (in *InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters) DeepCopy() *InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesZookeeperNodeInitParameters) DeepCopyInto(out *InteractiveQueryClusterRolesZookeeperNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesZookeeperNodeInitParameters. +func (in *InteractiveQueryClusterRolesZookeeperNodeInitParameters) DeepCopy() *InteractiveQueryClusterRolesZookeeperNodeInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesZookeeperNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesZookeeperNodeObservation) DeepCopyInto(out *InteractiveQueryClusterRolesZookeeperNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesZookeeperNodeObservation. +func (in *InteractiveQueryClusterRolesZookeeperNodeObservation) DeepCopy() *InteractiveQueryClusterRolesZookeeperNodeObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesZookeeperNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesZookeeperNodeParameters) DeepCopyInto(out *InteractiveQueryClusterRolesZookeeperNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesZookeeperNodeParameters. +func (in *InteractiveQueryClusterRolesZookeeperNodeParameters) DeepCopy() *InteractiveQueryClusterRolesZookeeperNodeParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesZookeeperNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters) DeepCopyInto(out *InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters. +func (in *InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters) DeepCopy() *InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation) DeepCopyInto(out *InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation. +func (in *InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation) DeepCopy() *InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters) DeepCopyInto(out *InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters. +func (in *InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters) DeepCopy() *InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterSecurityProfileInitParameters) DeepCopyInto(out *InteractiveQueryClusterSecurityProfileInitParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterSecurityProfileInitParameters. +func (in *InteractiveQueryClusterSecurityProfileInitParameters) DeepCopy() *InteractiveQueryClusterSecurityProfileInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterSecurityProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterSecurityProfileObservation) DeepCopyInto(out *InteractiveQueryClusterSecurityProfileObservation) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterSecurityProfileObservation. +func (in *InteractiveQueryClusterSecurityProfileObservation) DeepCopy() *InteractiveQueryClusterSecurityProfileObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterSecurityProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterSecurityProfileParameters) DeepCopyInto(out *InteractiveQueryClusterSecurityProfileParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + out.DomainUserPasswordSecretRef = in.DomainUserPasswordSecretRef + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterSecurityProfileParameters. +func (in *InteractiveQueryClusterSecurityProfileParameters) DeepCopy() *InteractiveQueryClusterSecurityProfileParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterSecurityProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterSpec) DeepCopyInto(out *InteractiveQueryClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterSpec. +func (in *InteractiveQueryClusterSpec) DeepCopy() *InteractiveQueryClusterSpec { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterStatus) DeepCopyInto(out *InteractiveQueryClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterStatus. +func (in *InteractiveQueryClusterStatus) DeepCopy() *InteractiveQueryClusterStatus { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterStorageAccountGen2InitParameters) DeepCopyInto(out *InteractiveQueryClusterStorageAccountGen2InitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterStorageAccountGen2InitParameters. +func (in *InteractiveQueryClusterStorageAccountGen2InitParameters) DeepCopy() *InteractiveQueryClusterStorageAccountGen2InitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterStorageAccountGen2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterStorageAccountGen2Observation) DeepCopyInto(out *InteractiveQueryClusterStorageAccountGen2Observation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterStorageAccountGen2Observation. +func (in *InteractiveQueryClusterStorageAccountGen2Observation) DeepCopy() *InteractiveQueryClusterStorageAccountGen2Observation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterStorageAccountGen2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterStorageAccountGen2Parameters) DeepCopyInto(out *InteractiveQueryClusterStorageAccountGen2Parameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterStorageAccountGen2Parameters. +func (in *InteractiveQueryClusterStorageAccountGen2Parameters) DeepCopy() *InteractiveQueryClusterStorageAccountGen2Parameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterStorageAccountGen2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterStorageAccountInitParameters) DeepCopyInto(out *InteractiveQueryClusterStorageAccountInitParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterStorageAccountInitParameters. +func (in *InteractiveQueryClusterStorageAccountInitParameters) DeepCopy() *InteractiveQueryClusterStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterStorageAccountObservation) DeepCopyInto(out *InteractiveQueryClusterStorageAccountObservation) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterStorageAccountObservation. +func (in *InteractiveQueryClusterStorageAccountObservation) DeepCopy() *InteractiveQueryClusterStorageAccountObservation { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InteractiveQueryClusterStorageAccountParameters) DeepCopyInto(out *InteractiveQueryClusterStorageAccountParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + out.StorageAccountKeySecretRef = in.StorageAccountKeySecretRef + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InteractiveQueryClusterStorageAccountParameters. +func (in *InteractiveQueryClusterStorageAccountParameters) DeepCopy() *InteractiveQueryClusterStorageAccountParameters { + if in == nil { + return nil + } + out := new(InteractiveQueryClusterStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaCluster) DeepCopyInto(out *KafkaCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaCluster. +func (in *KafkaCluster) DeepCopy() *KafkaCluster { + if in == nil { + return nil + } + out := new(KafkaCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterComponentVersionInitParameters) DeepCopyInto(out *KafkaClusterComponentVersionInitParameters) { + *out = *in + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterComponentVersionInitParameters. +func (in *KafkaClusterComponentVersionInitParameters) DeepCopy() *KafkaClusterComponentVersionInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterComponentVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterComponentVersionObservation) DeepCopyInto(out *KafkaClusterComponentVersionObservation) { + *out = *in + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterComponentVersionObservation. +func (in *KafkaClusterComponentVersionObservation) DeepCopy() *KafkaClusterComponentVersionObservation { + if in == nil { + return nil + } + out := new(KafkaClusterComponentVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterComponentVersionParameters) DeepCopyInto(out *KafkaClusterComponentVersionParameters) { + *out = *in + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterComponentVersionParameters. +func (in *KafkaClusterComponentVersionParameters) DeepCopy() *KafkaClusterComponentVersionParameters { + if in == nil { + return nil + } + out := new(KafkaClusterComponentVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterComputeIsolationInitParameters) DeepCopyInto(out *KafkaClusterComputeIsolationInitParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterComputeIsolationInitParameters. +func (in *KafkaClusterComputeIsolationInitParameters) DeepCopy() *KafkaClusterComputeIsolationInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterComputeIsolationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterComputeIsolationObservation) DeepCopyInto(out *KafkaClusterComputeIsolationObservation) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterComputeIsolationObservation. +func (in *KafkaClusterComputeIsolationObservation) DeepCopy() *KafkaClusterComputeIsolationObservation { + if in == nil { + return nil + } + out := new(KafkaClusterComputeIsolationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterComputeIsolationParameters) DeepCopyInto(out *KafkaClusterComputeIsolationParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterComputeIsolationParameters. +func (in *KafkaClusterComputeIsolationParameters) DeepCopy() *KafkaClusterComputeIsolationParameters { + if in == nil { + return nil + } + out := new(KafkaClusterComputeIsolationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterDiskEncryptionInitParameters) DeepCopyInto(out *KafkaClusterDiskEncryptionInitParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterDiskEncryptionInitParameters. +func (in *KafkaClusterDiskEncryptionInitParameters) DeepCopy() *KafkaClusterDiskEncryptionInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterDiskEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterDiskEncryptionObservation) DeepCopyInto(out *KafkaClusterDiskEncryptionObservation) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterDiskEncryptionObservation. +func (in *KafkaClusterDiskEncryptionObservation) DeepCopy() *KafkaClusterDiskEncryptionObservation { + if in == nil { + return nil + } + out := new(KafkaClusterDiskEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterDiskEncryptionParameters) DeepCopyInto(out *KafkaClusterDiskEncryptionParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterDiskEncryptionParameters. +func (in *KafkaClusterDiskEncryptionParameters) DeepCopy() *KafkaClusterDiskEncryptionParameters { + if in == nil { + return nil + } + out := new(KafkaClusterDiskEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterExtensionInitParameters) DeepCopyInto(out *KafkaClusterExtensionInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterExtensionInitParameters. +func (in *KafkaClusterExtensionInitParameters) DeepCopy() *KafkaClusterExtensionInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterExtensionObservation) DeepCopyInto(out *KafkaClusterExtensionObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterExtensionObservation. +func (in *KafkaClusterExtensionObservation) DeepCopy() *KafkaClusterExtensionObservation { + if in == nil { + return nil + } + out := new(KafkaClusterExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterExtensionParameters) DeepCopyInto(out *KafkaClusterExtensionParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterExtensionParameters. +func (in *KafkaClusterExtensionParameters) DeepCopy() *KafkaClusterExtensionParameters { + if in == nil { + return nil + } + out := new(KafkaClusterExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterGatewayInitParameters) DeepCopyInto(out *KafkaClusterGatewayInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterGatewayInitParameters. +func (in *KafkaClusterGatewayInitParameters) DeepCopy() *KafkaClusterGatewayInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterGatewayObservation) DeepCopyInto(out *KafkaClusterGatewayObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterGatewayObservation. +func (in *KafkaClusterGatewayObservation) DeepCopy() *KafkaClusterGatewayObservation { + if in == nil { + return nil + } + out := new(KafkaClusterGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterGatewayParameters) DeepCopyInto(out *KafkaClusterGatewayParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterGatewayParameters. +func (in *KafkaClusterGatewayParameters) DeepCopy() *KafkaClusterGatewayParameters { + if in == nil { + return nil + } + out := new(KafkaClusterGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterInitParameters) DeepCopyInto(out *KafkaClusterInitParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(KafkaClusterComponentVersionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(KafkaClusterComputeIsolationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]KafkaClusterDiskEncryptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(KafkaClusterExtensionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(KafkaClusterGatewayInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(KafkaClusterMetastoresInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(KafkaClusterMonitorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(KafkaClusterNetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RestProxy != nil { + in, out := &in.RestProxy, &out.RestProxy + *out = new(RestProxyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(KafkaClusterRolesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(KafkaClusterSecurityProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]KafkaClusterStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(KafkaClusterStorageAccountGen2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterInitParameters. +func (in *KafkaClusterInitParameters) DeepCopy() *KafkaClusterInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterList) DeepCopyInto(out *KafkaClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterList. +func (in *KafkaClusterList) DeepCopy() *KafkaClusterList { + if in == nil { + return nil + } + out := new(KafkaClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresAmbariInitParameters) DeepCopyInto(out *KafkaClusterMetastoresAmbariInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresAmbariInitParameters. +func (in *KafkaClusterMetastoresAmbariInitParameters) DeepCopy() *KafkaClusterMetastoresAmbariInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresAmbariInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresAmbariObservation) DeepCopyInto(out *KafkaClusterMetastoresAmbariObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresAmbariObservation. +func (in *KafkaClusterMetastoresAmbariObservation) DeepCopy() *KafkaClusterMetastoresAmbariObservation { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresAmbariObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresAmbariParameters) DeepCopyInto(out *KafkaClusterMetastoresAmbariParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresAmbariParameters. +func (in *KafkaClusterMetastoresAmbariParameters) DeepCopy() *KafkaClusterMetastoresAmbariParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresAmbariParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresHiveInitParameters) DeepCopyInto(out *KafkaClusterMetastoresHiveInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresHiveInitParameters. +func (in *KafkaClusterMetastoresHiveInitParameters) DeepCopy() *KafkaClusterMetastoresHiveInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresHiveInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresHiveObservation) DeepCopyInto(out *KafkaClusterMetastoresHiveObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresHiveObservation. +func (in *KafkaClusterMetastoresHiveObservation) DeepCopy() *KafkaClusterMetastoresHiveObservation { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresHiveObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresHiveParameters) DeepCopyInto(out *KafkaClusterMetastoresHiveParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresHiveParameters. +func (in *KafkaClusterMetastoresHiveParameters) DeepCopy() *KafkaClusterMetastoresHiveParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresHiveParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresInitParameters) DeepCopyInto(out *KafkaClusterMetastoresInitParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(KafkaClusterMetastoresAmbariInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(KafkaClusterMetastoresHiveInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(KafkaClusterMetastoresOozieInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresInitParameters. +func (in *KafkaClusterMetastoresInitParameters) DeepCopy() *KafkaClusterMetastoresInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresObservation) DeepCopyInto(out *KafkaClusterMetastoresObservation) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(KafkaClusterMetastoresAmbariObservation) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(KafkaClusterMetastoresHiveObservation) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(KafkaClusterMetastoresOozieObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresObservation. +func (in *KafkaClusterMetastoresObservation) DeepCopy() *KafkaClusterMetastoresObservation { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresOozieInitParameters) DeepCopyInto(out *KafkaClusterMetastoresOozieInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresOozieInitParameters. +func (in *KafkaClusterMetastoresOozieInitParameters) DeepCopy() *KafkaClusterMetastoresOozieInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresOozieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresOozieObservation) DeepCopyInto(out *KafkaClusterMetastoresOozieObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresOozieObservation. +func (in *KafkaClusterMetastoresOozieObservation) DeepCopy() *KafkaClusterMetastoresOozieObservation { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresOozieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresOozieParameters) DeepCopyInto(out *KafkaClusterMetastoresOozieParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresOozieParameters. +func (in *KafkaClusterMetastoresOozieParameters) DeepCopy() *KafkaClusterMetastoresOozieParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresOozieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMetastoresParameters) DeepCopyInto(out *KafkaClusterMetastoresParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(KafkaClusterMetastoresAmbariParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(KafkaClusterMetastoresHiveParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(KafkaClusterMetastoresOozieParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMetastoresParameters. +func (in *KafkaClusterMetastoresParameters) DeepCopy() *KafkaClusterMetastoresParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMetastoresParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMonitorInitParameters) DeepCopyInto(out *KafkaClusterMonitorInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMonitorInitParameters. +func (in *KafkaClusterMonitorInitParameters) DeepCopy() *KafkaClusterMonitorInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMonitorObservation) DeepCopyInto(out *KafkaClusterMonitorObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMonitorObservation. +func (in *KafkaClusterMonitorObservation) DeepCopy() *KafkaClusterMonitorObservation { + if in == nil { + return nil + } + out := new(KafkaClusterMonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterMonitorParameters) DeepCopyInto(out *KafkaClusterMonitorParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterMonitorParameters. +func (in *KafkaClusterMonitorParameters) DeepCopy() *KafkaClusterMonitorParameters { + if in == nil { + return nil + } + out := new(KafkaClusterMonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterNetworkInitParameters) DeepCopyInto(out *KafkaClusterNetworkInitParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterNetworkInitParameters. +func (in *KafkaClusterNetworkInitParameters) DeepCopy() *KafkaClusterNetworkInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterNetworkObservation) DeepCopyInto(out *KafkaClusterNetworkObservation) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterNetworkObservation. +func (in *KafkaClusterNetworkObservation) DeepCopy() *KafkaClusterNetworkObservation { + if in == nil { + return nil + } + out := new(KafkaClusterNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterNetworkParameters) DeepCopyInto(out *KafkaClusterNetworkParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterNetworkParameters. +func (in *KafkaClusterNetworkParameters) DeepCopy() *KafkaClusterNetworkParameters { + if in == nil { + return nil + } + out := new(KafkaClusterNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterObservation) DeepCopyInto(out *KafkaClusterObservation) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(KafkaClusterComponentVersionObservation) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(KafkaClusterComputeIsolationObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]KafkaClusterDiskEncryptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(KafkaClusterExtensionObservation) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(KafkaClusterGatewayObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPSEndpoint != nil { + in, out := &in.HTTPSEndpoint, &out.HTTPSEndpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KafkaRestProxyEndpoint != nil { + in, out := &in.KafkaRestProxyEndpoint, &out.KafkaRestProxyEndpoint + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(KafkaClusterMetastoresObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(KafkaClusterMonitorObservation) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(KafkaClusterNetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RestProxy != nil { + in, out := &in.RestProxy, &out.RestProxy + *out = new(RestProxyObservation) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(KafkaClusterRolesObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHEndpoint != nil { + in, out := &in.SSHEndpoint, &out.SSHEndpoint + *out = new(string) + **out = **in + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(KafkaClusterSecurityProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]KafkaClusterStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(KafkaClusterStorageAccountGen2Observation) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterObservation. +func (in *KafkaClusterObservation) DeepCopy() *KafkaClusterObservation { + if in == nil { + return nil + } + out := new(KafkaClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterParameters) DeepCopyInto(out *KafkaClusterParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(KafkaClusterComponentVersionParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(KafkaClusterComputeIsolationParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]KafkaClusterDiskEncryptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(KafkaClusterExtensionParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(KafkaClusterGatewayParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(KafkaClusterMetastoresParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(KafkaClusterMonitorParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(KafkaClusterNetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RestProxy != nil { + in, out := &in.RestProxy, &out.RestProxy + *out = new(RestProxyParameters) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(KafkaClusterRolesParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(KafkaClusterSecurityProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]KafkaClusterStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(KafkaClusterStorageAccountGen2Parameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterParameters. +func (in *KafkaClusterParameters) DeepCopy() *KafkaClusterParameters { + if in == nil { + return nil + } + out := new(KafkaClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesHeadNodeInitParameters) DeepCopyInto(out *KafkaClusterRolesHeadNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesHeadNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesHeadNodeInitParameters. +func (in *KafkaClusterRolesHeadNodeInitParameters) DeepCopy() *KafkaClusterRolesHeadNodeInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesHeadNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesHeadNodeObservation) DeepCopyInto(out *KafkaClusterRolesHeadNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesHeadNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesHeadNodeObservation. +func (in *KafkaClusterRolesHeadNodeObservation) DeepCopy() *KafkaClusterRolesHeadNodeObservation { + if in == nil { + return nil + } + out := new(KafkaClusterRolesHeadNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesHeadNodeParameters) DeepCopyInto(out *KafkaClusterRolesHeadNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesHeadNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesHeadNodeParameters. +func (in *KafkaClusterRolesHeadNodeParameters) DeepCopy() *KafkaClusterRolesHeadNodeParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesHeadNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesHeadNodeScriptActionsInitParameters) DeepCopyInto(out *KafkaClusterRolesHeadNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesHeadNodeScriptActionsInitParameters. +func (in *KafkaClusterRolesHeadNodeScriptActionsInitParameters) DeepCopy() *KafkaClusterRolesHeadNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesHeadNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesHeadNodeScriptActionsObservation) DeepCopyInto(out *KafkaClusterRolesHeadNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesHeadNodeScriptActionsObservation. +func (in *KafkaClusterRolesHeadNodeScriptActionsObservation) DeepCopy() *KafkaClusterRolesHeadNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(KafkaClusterRolesHeadNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesHeadNodeScriptActionsParameters) DeepCopyInto(out *KafkaClusterRolesHeadNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesHeadNodeScriptActionsParameters. +func (in *KafkaClusterRolesHeadNodeScriptActionsParameters) DeepCopy() *KafkaClusterRolesHeadNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesHeadNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesInitParameters) DeepCopyInto(out *KafkaClusterRolesInitParameters) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(KafkaClusterRolesHeadNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KafkaManagementNode != nil { + in, out := &in.KafkaManagementNode, &out.KafkaManagementNode + *out = new(KafkaManagementNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(KafkaClusterRolesWorkerNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(KafkaClusterRolesZookeeperNodeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesInitParameters. +func (in *KafkaClusterRolesInitParameters) DeepCopy() *KafkaClusterRolesInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesObservation) DeepCopyInto(out *KafkaClusterRolesObservation) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(KafkaClusterRolesHeadNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.KafkaManagementNode != nil { + in, out := &in.KafkaManagementNode, &out.KafkaManagementNode + *out = new(KafkaManagementNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(KafkaClusterRolesWorkerNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(KafkaClusterRolesZookeeperNodeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesObservation. +func (in *KafkaClusterRolesObservation) DeepCopy() *KafkaClusterRolesObservation { + if in == nil { + return nil + } + out := new(KafkaClusterRolesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesParameters) DeepCopyInto(out *KafkaClusterRolesParameters) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(KafkaClusterRolesHeadNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.KafkaManagementNode != nil { + in, out := &in.KafkaManagementNode, &out.KafkaManagementNode + *out = new(KafkaManagementNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(KafkaClusterRolesWorkerNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(KafkaClusterRolesZookeeperNodeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesParameters. +func (in *KafkaClusterRolesParameters) DeepCopy() *KafkaClusterRolesParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesWorkerNodeInitParameters) DeepCopyInto(out *KafkaClusterRolesWorkerNodeInitParameters) { + *out = *in + if in.NumberOfDisksPerNode != nil { + in, out := &in.NumberOfDisksPerNode, &out.NumberOfDisksPerNode + *out = new(float64) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesWorkerNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesWorkerNodeInitParameters. +func (in *KafkaClusterRolesWorkerNodeInitParameters) DeepCopy() *KafkaClusterRolesWorkerNodeInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesWorkerNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesWorkerNodeObservation) DeepCopyInto(out *KafkaClusterRolesWorkerNodeObservation) { + *out = *in + if in.NumberOfDisksPerNode != nil { + in, out := &in.NumberOfDisksPerNode, &out.NumberOfDisksPerNode + *out = new(float64) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesWorkerNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesWorkerNodeObservation. +func (in *KafkaClusterRolesWorkerNodeObservation) DeepCopy() *KafkaClusterRolesWorkerNodeObservation { + if in == nil { + return nil + } + out := new(KafkaClusterRolesWorkerNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesWorkerNodeParameters) DeepCopyInto(out *KafkaClusterRolesWorkerNodeParameters) { + *out = *in + if in.NumberOfDisksPerNode != nil { + in, out := &in.NumberOfDisksPerNode, &out.NumberOfDisksPerNode + *out = new(float64) + **out = **in + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesWorkerNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesWorkerNodeParameters. +func (in *KafkaClusterRolesWorkerNodeParameters) DeepCopy() *KafkaClusterRolesWorkerNodeParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesWorkerNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesWorkerNodeScriptActionsInitParameters) DeepCopyInto(out *KafkaClusterRolesWorkerNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesWorkerNodeScriptActionsInitParameters. +func (in *KafkaClusterRolesWorkerNodeScriptActionsInitParameters) DeepCopy() *KafkaClusterRolesWorkerNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesWorkerNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesWorkerNodeScriptActionsObservation) DeepCopyInto(out *KafkaClusterRolesWorkerNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesWorkerNodeScriptActionsObservation. +func (in *KafkaClusterRolesWorkerNodeScriptActionsObservation) DeepCopy() *KafkaClusterRolesWorkerNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(KafkaClusterRolesWorkerNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesWorkerNodeScriptActionsParameters) DeepCopyInto(out *KafkaClusterRolesWorkerNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesWorkerNodeScriptActionsParameters. +func (in *KafkaClusterRolesWorkerNodeScriptActionsParameters) DeepCopy() *KafkaClusterRolesWorkerNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesWorkerNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesZookeeperNodeInitParameters) DeepCopyInto(out *KafkaClusterRolesZookeeperNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesZookeeperNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesZookeeperNodeInitParameters. +func (in *KafkaClusterRolesZookeeperNodeInitParameters) DeepCopy() *KafkaClusterRolesZookeeperNodeInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesZookeeperNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesZookeeperNodeObservation) DeepCopyInto(out *KafkaClusterRolesZookeeperNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesZookeeperNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesZookeeperNodeObservation. +func (in *KafkaClusterRolesZookeeperNodeObservation) DeepCopy() *KafkaClusterRolesZookeeperNodeObservation { + if in == nil { + return nil + } + out := new(KafkaClusterRolesZookeeperNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesZookeeperNodeParameters) DeepCopyInto(out *KafkaClusterRolesZookeeperNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaClusterRolesZookeeperNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesZookeeperNodeParameters. +func (in *KafkaClusterRolesZookeeperNodeParameters) DeepCopy() *KafkaClusterRolesZookeeperNodeParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesZookeeperNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesZookeeperNodeScriptActionsInitParameters) DeepCopyInto(out *KafkaClusterRolesZookeeperNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesZookeeperNodeScriptActionsInitParameters. +func (in *KafkaClusterRolesZookeeperNodeScriptActionsInitParameters) DeepCopy() *KafkaClusterRolesZookeeperNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesZookeeperNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesZookeeperNodeScriptActionsObservation) DeepCopyInto(out *KafkaClusterRolesZookeeperNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesZookeeperNodeScriptActionsObservation. +func (in *KafkaClusterRolesZookeeperNodeScriptActionsObservation) DeepCopy() *KafkaClusterRolesZookeeperNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(KafkaClusterRolesZookeeperNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterRolesZookeeperNodeScriptActionsParameters) DeepCopyInto(out *KafkaClusterRolesZookeeperNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterRolesZookeeperNodeScriptActionsParameters. +func (in *KafkaClusterRolesZookeeperNodeScriptActionsParameters) DeepCopy() *KafkaClusterRolesZookeeperNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(KafkaClusterRolesZookeeperNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterSecurityProfileInitParameters) DeepCopyInto(out *KafkaClusterSecurityProfileInitParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterSecurityProfileInitParameters. +func (in *KafkaClusterSecurityProfileInitParameters) DeepCopy() *KafkaClusterSecurityProfileInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterSecurityProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterSecurityProfileObservation) DeepCopyInto(out *KafkaClusterSecurityProfileObservation) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterSecurityProfileObservation. +func (in *KafkaClusterSecurityProfileObservation) DeepCopy() *KafkaClusterSecurityProfileObservation { + if in == nil { + return nil + } + out := new(KafkaClusterSecurityProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterSecurityProfileParameters) DeepCopyInto(out *KafkaClusterSecurityProfileParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + out.DomainUserPasswordSecretRef = in.DomainUserPasswordSecretRef + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterSecurityProfileParameters. +func (in *KafkaClusterSecurityProfileParameters) DeepCopy() *KafkaClusterSecurityProfileParameters { + if in == nil { + return nil + } + out := new(KafkaClusterSecurityProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterSpec) DeepCopyInto(out *KafkaClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterSpec. +func (in *KafkaClusterSpec) DeepCopy() *KafkaClusterSpec { + if in == nil { + return nil + } + out := new(KafkaClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterStatus) DeepCopyInto(out *KafkaClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterStatus. +func (in *KafkaClusterStatus) DeepCopy() *KafkaClusterStatus { + if in == nil { + return nil + } + out := new(KafkaClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterStorageAccountGen2InitParameters) DeepCopyInto(out *KafkaClusterStorageAccountGen2InitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterStorageAccountGen2InitParameters. +func (in *KafkaClusterStorageAccountGen2InitParameters) DeepCopy() *KafkaClusterStorageAccountGen2InitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterStorageAccountGen2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterStorageAccountGen2Observation) DeepCopyInto(out *KafkaClusterStorageAccountGen2Observation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterStorageAccountGen2Observation. +func (in *KafkaClusterStorageAccountGen2Observation) DeepCopy() *KafkaClusterStorageAccountGen2Observation { + if in == nil { + return nil + } + out := new(KafkaClusterStorageAccountGen2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterStorageAccountGen2Parameters) DeepCopyInto(out *KafkaClusterStorageAccountGen2Parameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterStorageAccountGen2Parameters. +func (in *KafkaClusterStorageAccountGen2Parameters) DeepCopy() *KafkaClusterStorageAccountGen2Parameters { + if in == nil { + return nil + } + out := new(KafkaClusterStorageAccountGen2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterStorageAccountInitParameters) DeepCopyInto(out *KafkaClusterStorageAccountInitParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterStorageAccountInitParameters. +func (in *KafkaClusterStorageAccountInitParameters) DeepCopy() *KafkaClusterStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(KafkaClusterStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterStorageAccountObservation) DeepCopyInto(out *KafkaClusterStorageAccountObservation) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterStorageAccountObservation. +func (in *KafkaClusterStorageAccountObservation) DeepCopy() *KafkaClusterStorageAccountObservation { + if in == nil { + return nil + } + out := new(KafkaClusterStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaClusterStorageAccountParameters) DeepCopyInto(out *KafkaClusterStorageAccountParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + out.StorageAccountKeySecretRef = in.StorageAccountKeySecretRef + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaClusterStorageAccountParameters. +func (in *KafkaClusterStorageAccountParameters) DeepCopy() *KafkaClusterStorageAccountParameters { + if in == nil { + return nil + } + out := new(KafkaClusterStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaManagementNodeInitParameters) DeepCopyInto(out *KafkaManagementNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaManagementNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaManagementNodeInitParameters. +func (in *KafkaManagementNodeInitParameters) DeepCopy() *KafkaManagementNodeInitParameters { + if in == nil { + return nil + } + out := new(KafkaManagementNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaManagementNodeObservation) DeepCopyInto(out *KafkaManagementNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaManagementNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaManagementNodeObservation. +func (in *KafkaManagementNodeObservation) DeepCopy() *KafkaManagementNodeObservation { + if in == nil { + return nil + } + out := new(KafkaManagementNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaManagementNodeParameters) DeepCopyInto(out *KafkaManagementNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]KafkaManagementNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaManagementNodeParameters. +func (in *KafkaManagementNodeParameters) DeepCopy() *KafkaManagementNodeParameters { + if in == nil { + return nil + } + out := new(KafkaManagementNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaManagementNodeScriptActionsInitParameters) DeepCopyInto(out *KafkaManagementNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaManagementNodeScriptActionsInitParameters. +func (in *KafkaManagementNodeScriptActionsInitParameters) DeepCopy() *KafkaManagementNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(KafkaManagementNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaManagementNodeScriptActionsObservation) DeepCopyInto(out *KafkaManagementNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaManagementNodeScriptActionsObservation. +func (in *KafkaManagementNodeScriptActionsObservation) DeepCopy() *KafkaManagementNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(KafkaManagementNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaManagementNodeScriptActionsParameters) DeepCopyInto(out *KafkaManagementNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaManagementNodeScriptActionsParameters. +func (in *KafkaManagementNodeScriptActionsParameters) DeepCopy() *KafkaManagementNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(KafkaManagementNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresAmbariInitParameters) DeepCopyInto(out *MetastoresAmbariInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresAmbariInitParameters. +func (in *MetastoresAmbariInitParameters) DeepCopy() *MetastoresAmbariInitParameters { + if in == nil { + return nil + } + out := new(MetastoresAmbariInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresAmbariObservation) DeepCopyInto(out *MetastoresAmbariObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresAmbariObservation. +func (in *MetastoresAmbariObservation) DeepCopy() *MetastoresAmbariObservation { + if in == nil { + return nil + } + out := new(MetastoresAmbariObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresAmbariParameters) DeepCopyInto(out *MetastoresAmbariParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresAmbariParameters. +func (in *MetastoresAmbariParameters) DeepCopy() *MetastoresAmbariParameters { + if in == nil { + return nil + } + out := new(MetastoresAmbariParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresHiveInitParameters) DeepCopyInto(out *MetastoresHiveInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresHiveInitParameters. +func (in *MetastoresHiveInitParameters) DeepCopy() *MetastoresHiveInitParameters { + if in == nil { + return nil + } + out := new(MetastoresHiveInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresHiveObservation) DeepCopyInto(out *MetastoresHiveObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresHiveObservation. +func (in *MetastoresHiveObservation) DeepCopy() *MetastoresHiveObservation { + if in == nil { + return nil + } + out := new(MetastoresHiveObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresHiveParameters) DeepCopyInto(out *MetastoresHiveParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresHiveParameters. +func (in *MetastoresHiveParameters) DeepCopy() *MetastoresHiveParameters { + if in == nil { + return nil + } + out := new(MetastoresHiveParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresInitParameters) DeepCopyInto(out *MetastoresInitParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(AmbariInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(HiveInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(OozieInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresInitParameters. +func (in *MetastoresInitParameters) DeepCopy() *MetastoresInitParameters { + if in == nil { + return nil + } + out := new(MetastoresInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresObservation) DeepCopyInto(out *MetastoresObservation) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(AmbariObservation) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(HiveObservation) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(OozieObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresObservation. +func (in *MetastoresObservation) DeepCopy() *MetastoresObservation { + if in == nil { + return nil + } + out := new(MetastoresObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresOozieInitParameters) DeepCopyInto(out *MetastoresOozieInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresOozieInitParameters. +func (in *MetastoresOozieInitParameters) DeepCopy() *MetastoresOozieInitParameters { + if in == nil { + return nil + } + out := new(MetastoresOozieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresOozieObservation) DeepCopyInto(out *MetastoresOozieObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresOozieObservation. +func (in *MetastoresOozieObservation) DeepCopy() *MetastoresOozieObservation { + if in == nil { + return nil + } + out := new(MetastoresOozieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresOozieParameters) DeepCopyInto(out *MetastoresOozieParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresOozieParameters. +func (in *MetastoresOozieParameters) DeepCopy() *MetastoresOozieParameters { + if in == nil { + return nil + } + out := new(MetastoresOozieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoresParameters) DeepCopyInto(out *MetastoresParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(AmbariParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(HiveParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(OozieParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoresParameters. +func (in *MetastoresParameters) DeepCopy() *MetastoresParameters { + if in == nil { + return nil + } + out := new(MetastoresParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorInitParameters) DeepCopyInto(out *MonitorInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorInitParameters. +func (in *MonitorInitParameters) DeepCopy() *MonitorInitParameters { + if in == nil { + return nil + } + out := new(MonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorObservation) DeepCopyInto(out *MonitorObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorObservation. +func (in *MonitorObservation) DeepCopy() *MonitorObservation { + if in == nil { + return nil + } + out := new(MonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorParameters) DeepCopyInto(out *MonitorParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorParameters. +func (in *MonitorParameters) DeepCopy() *MonitorParameters { + if in == nil { + return nil + } + out := new(MonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInitParameters) DeepCopyInto(out *NetworkInitParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInitParameters. +func (in *NetworkInitParameters) DeepCopy() *NetworkInitParameters { + if in == nil { + return nil + } + out := new(NetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkObservation) DeepCopyInto(out *NetworkObservation) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkObservation. +func (in *NetworkObservation) DeepCopy() *NetworkObservation { + if in == nil { + return nil + } + out := new(NetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkParameters) DeepCopyInto(out *NetworkParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParameters. +func (in *NetworkParameters) DeepCopy() *NetworkParameters { + if in == nil { + return nil + } + out := new(NetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OozieInitParameters) DeepCopyInto(out *OozieInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OozieInitParameters. +func (in *OozieInitParameters) DeepCopy() *OozieInitParameters { + if in == nil { + return nil + } + out := new(OozieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OozieObservation) DeepCopyInto(out *OozieObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OozieObservation. +func (in *OozieObservation) DeepCopy() *OozieObservation { + if in == nil { + return nil + } + out := new(OozieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OozieParameters) DeepCopyInto(out *OozieParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OozieParameters. +func (in *OozieParameters) DeepCopy() *OozieParameters { + if in == nil { + return nil + } + out := new(OozieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceInitParameters) DeepCopyInto(out *RecurrenceInitParameters) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]ScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceInitParameters. +func (in *RecurrenceInitParameters) DeepCopy() *RecurrenceInitParameters { + if in == nil { + return nil + } + out := new(RecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceObservation) DeepCopyInto(out *RecurrenceObservation) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]ScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceObservation. +func (in *RecurrenceObservation) DeepCopy() *RecurrenceObservation { + if in == nil { + return nil + } + out := new(RecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceParameters) DeepCopyInto(out *RecurrenceParameters) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]ScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceParameters. +func (in *RecurrenceParameters) DeepCopy() *RecurrenceParameters { + if in == nil { + return nil + } + out := new(RecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceScheduleInitParameters) DeepCopyInto(out *RecurrenceScheduleInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceScheduleInitParameters. +func (in *RecurrenceScheduleInitParameters) DeepCopy() *RecurrenceScheduleInitParameters { + if in == nil { + return nil + } + out := new(RecurrenceScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceScheduleObservation) DeepCopyInto(out *RecurrenceScheduleObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceScheduleObservation. +func (in *RecurrenceScheduleObservation) DeepCopy() *RecurrenceScheduleObservation { + if in == nil { + return nil + } + out := new(RecurrenceScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceScheduleParameters) DeepCopyInto(out *RecurrenceScheduleParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceScheduleParameters. +func (in *RecurrenceScheduleParameters) DeepCopy() *RecurrenceScheduleParameters { + if in == nil { + return nil + } + out := new(RecurrenceScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestProxyInitParameters) DeepCopyInto(out *RestProxyInitParameters) { + *out = *in + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.SecurityGroupName != nil { + in, out := &in.SecurityGroupName, &out.SecurityGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestProxyInitParameters. +func (in *RestProxyInitParameters) DeepCopy() *RestProxyInitParameters { + if in == nil { + return nil + } + out := new(RestProxyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestProxyObservation) DeepCopyInto(out *RestProxyObservation) { + *out = *in + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.SecurityGroupName != nil { + in, out := &in.SecurityGroupName, &out.SecurityGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestProxyObservation. +func (in *RestProxyObservation) DeepCopy() *RestProxyObservation { + if in == nil { + return nil + } + out := new(RestProxyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestProxyParameters) DeepCopyInto(out *RestProxyParameters) { + *out = *in + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.SecurityGroupName != nil { + in, out := &in.SecurityGroupName, &out.SecurityGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestProxyParameters. +func (in *RestProxyParameters) DeepCopy() *RestProxyParameters { + if in == nil { + return nil + } + out := new(RestProxyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesHeadNodeInitParameters) DeepCopyInto(out *RolesHeadNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]HeadNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesHeadNodeInitParameters. +func (in *RolesHeadNodeInitParameters) DeepCopy() *RolesHeadNodeInitParameters { + if in == nil { + return nil + } + out := new(RolesHeadNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesHeadNodeObservation) DeepCopyInto(out *RolesHeadNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]HeadNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesHeadNodeObservation. +func (in *RolesHeadNodeObservation) DeepCopy() *RolesHeadNodeObservation { + if in == nil { + return nil + } + out := new(RolesHeadNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesHeadNodeParameters) DeepCopyInto(out *RolesHeadNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]HeadNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesHeadNodeParameters. +func (in *RolesHeadNodeParameters) DeepCopy() *RolesHeadNodeParameters { + if in == nil { + return nil + } + out := new(RolesHeadNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesHeadNodeScriptActionsInitParameters) DeepCopyInto(out *RolesHeadNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesHeadNodeScriptActionsInitParameters. +func (in *RolesHeadNodeScriptActionsInitParameters) DeepCopy() *RolesHeadNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(RolesHeadNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesHeadNodeScriptActionsObservation) DeepCopyInto(out *RolesHeadNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesHeadNodeScriptActionsObservation. +func (in *RolesHeadNodeScriptActionsObservation) DeepCopy() *RolesHeadNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(RolesHeadNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesHeadNodeScriptActionsParameters) DeepCopyInto(out *RolesHeadNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesHeadNodeScriptActionsParameters. +func (in *RolesHeadNodeScriptActionsParameters) DeepCopy() *RolesHeadNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(RolesHeadNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesInitParameters) DeepCopyInto(out *RolesInitParameters) { + *out = *in + if in.EdgeNode != nil { + in, out := &in.EdgeNode, &out.EdgeNode + *out = new(EdgeNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(HeadNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(WorkerNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(ZookeeperNodeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesInitParameters. +func (in *RolesInitParameters) DeepCopy() *RolesInitParameters { + if in == nil { + return nil + } + out := new(RolesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesObservation) DeepCopyInto(out *RolesObservation) { + *out = *in + if in.EdgeNode != nil { + in, out := &in.EdgeNode, &out.EdgeNode + *out = new(EdgeNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(HeadNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(WorkerNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(ZookeeperNodeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesObservation. +func (in *RolesObservation) DeepCopy() *RolesObservation { + if in == nil { + return nil + } + out := new(RolesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesParameters) DeepCopyInto(out *RolesParameters) { + *out = *in + if in.EdgeNode != nil { + in, out := &in.EdgeNode, &out.EdgeNode + *out = new(EdgeNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(HeadNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(WorkerNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(ZookeeperNodeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesParameters. +func (in *RolesParameters) DeepCopy() *RolesParameters { + if in == nil { + return nil + } + out := new(RolesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeAutoscaleInitParameters) DeepCopyInto(out *RolesWorkerNodeAutoscaleInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(AutoscaleCapacityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(WorkerNodeAutoscaleRecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeAutoscaleInitParameters. +func (in *RolesWorkerNodeAutoscaleInitParameters) DeepCopy() *RolesWorkerNodeAutoscaleInitParameters { + if in == nil { + return nil + } + out := new(RolesWorkerNodeAutoscaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeAutoscaleObservation) DeepCopyInto(out *RolesWorkerNodeAutoscaleObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(AutoscaleCapacityObservation) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(WorkerNodeAutoscaleRecurrenceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeAutoscaleObservation. +func (in *RolesWorkerNodeAutoscaleObservation) DeepCopy() *RolesWorkerNodeAutoscaleObservation { + if in == nil { + return nil + } + out := new(RolesWorkerNodeAutoscaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeAutoscaleParameters) DeepCopyInto(out *RolesWorkerNodeAutoscaleParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(AutoscaleCapacityParameters) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(WorkerNodeAutoscaleRecurrenceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeAutoscaleParameters. +func (in *RolesWorkerNodeAutoscaleParameters) DeepCopy() *RolesWorkerNodeAutoscaleParameters { + if in == nil { + return nil + } + out := new(RolesWorkerNodeAutoscaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeAutoscaleRecurrenceInitParameters) DeepCopyInto(out *RolesWorkerNodeAutoscaleRecurrenceInitParameters) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]WorkerNodeAutoscaleRecurrenceScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeAutoscaleRecurrenceInitParameters. +func (in *RolesWorkerNodeAutoscaleRecurrenceInitParameters) DeepCopy() *RolesWorkerNodeAutoscaleRecurrenceInitParameters { + if in == nil { + return nil + } + out := new(RolesWorkerNodeAutoscaleRecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeAutoscaleRecurrenceObservation) DeepCopyInto(out *RolesWorkerNodeAutoscaleRecurrenceObservation) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]WorkerNodeAutoscaleRecurrenceScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeAutoscaleRecurrenceObservation. +func (in *RolesWorkerNodeAutoscaleRecurrenceObservation) DeepCopy() *RolesWorkerNodeAutoscaleRecurrenceObservation { + if in == nil { + return nil + } + out := new(RolesWorkerNodeAutoscaleRecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeAutoscaleRecurrenceParameters) DeepCopyInto(out *RolesWorkerNodeAutoscaleRecurrenceParameters) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]WorkerNodeAutoscaleRecurrenceScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeAutoscaleRecurrenceParameters. +func (in *RolesWorkerNodeAutoscaleRecurrenceParameters) DeepCopy() *RolesWorkerNodeAutoscaleRecurrenceParameters { + if in == nil { + return nil + } + out := new(RolesWorkerNodeAutoscaleRecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeInitParameters) DeepCopyInto(out *RolesWorkerNodeInitParameters) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(WorkerNodeAutoscaleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesWorkerNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeInitParameters. +func (in *RolesWorkerNodeInitParameters) DeepCopy() *RolesWorkerNodeInitParameters { + if in == nil { + return nil + } + out := new(RolesWorkerNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeObservation) DeepCopyInto(out *RolesWorkerNodeObservation) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(WorkerNodeAutoscaleObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesWorkerNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeObservation. +func (in *RolesWorkerNodeObservation) DeepCopy() *RolesWorkerNodeObservation { + if in == nil { + return nil + } + out := new(RolesWorkerNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeParameters) DeepCopyInto(out *RolesWorkerNodeParameters) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(WorkerNodeAutoscaleParameters) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesWorkerNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeParameters. +func (in *RolesWorkerNodeParameters) DeepCopy() *RolesWorkerNodeParameters { + if in == nil { + return nil + } + out := new(RolesWorkerNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeScriptActionsInitParameters) DeepCopyInto(out *RolesWorkerNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeScriptActionsInitParameters. +func (in *RolesWorkerNodeScriptActionsInitParameters) DeepCopy() *RolesWorkerNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(RolesWorkerNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeScriptActionsObservation) DeepCopyInto(out *RolesWorkerNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeScriptActionsObservation. +func (in *RolesWorkerNodeScriptActionsObservation) DeepCopy() *RolesWorkerNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(RolesWorkerNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesWorkerNodeScriptActionsParameters) DeepCopyInto(out *RolesWorkerNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesWorkerNodeScriptActionsParameters. +func (in *RolesWorkerNodeScriptActionsParameters) DeepCopy() *RolesWorkerNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(RolesWorkerNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesZookeeperNodeInitParameters) DeepCopyInto(out *RolesZookeeperNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesZookeeperNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesZookeeperNodeInitParameters. +func (in *RolesZookeeperNodeInitParameters) DeepCopy() *RolesZookeeperNodeInitParameters { + if in == nil { + return nil + } + out := new(RolesZookeeperNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesZookeeperNodeObservation) DeepCopyInto(out *RolesZookeeperNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesZookeeperNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesZookeeperNodeObservation. +func (in *RolesZookeeperNodeObservation) DeepCopy() *RolesZookeeperNodeObservation { + if in == nil { + return nil + } + out := new(RolesZookeeperNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesZookeeperNodeParameters) DeepCopyInto(out *RolesZookeeperNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]RolesZookeeperNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesZookeeperNodeParameters. +func (in *RolesZookeeperNodeParameters) DeepCopy() *RolesZookeeperNodeParameters { + if in == nil { + return nil + } + out := new(RolesZookeeperNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesZookeeperNodeScriptActionsInitParameters) DeepCopyInto(out *RolesZookeeperNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesZookeeperNodeScriptActionsInitParameters. +func (in *RolesZookeeperNodeScriptActionsInitParameters) DeepCopy() *RolesZookeeperNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(RolesZookeeperNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesZookeeperNodeScriptActionsObservation) DeepCopyInto(out *RolesZookeeperNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesZookeeperNodeScriptActionsObservation. +func (in *RolesZookeeperNodeScriptActionsObservation) DeepCopy() *RolesZookeeperNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(RolesZookeeperNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolesZookeeperNodeScriptActionsParameters) DeepCopyInto(out *RolesZookeeperNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolesZookeeperNodeScriptActionsParameters. +func (in *RolesZookeeperNodeScriptActionsParameters) DeepCopy() *RolesZookeeperNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(RolesZookeeperNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptActionsInitParameters) DeepCopyInto(out *ScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptActionsInitParameters. +func (in *ScriptActionsInitParameters) DeepCopy() *ScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(ScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptActionsObservation) DeepCopyInto(out *ScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptActionsObservation. +func (in *ScriptActionsObservation) DeepCopy() *ScriptActionsObservation { + if in == nil { + return nil + } + out := new(ScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScriptActionsParameters) DeepCopyInto(out *ScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptActionsParameters. +func (in *ScriptActionsParameters) DeepCopy() *ScriptActionsParameters { + if in == nil { + return nil + } + out := new(ScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileInitParameters) DeepCopyInto(out *SecurityProfileInitParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileInitParameters. +func (in *SecurityProfileInitParameters) DeepCopy() *SecurityProfileInitParameters { + if in == nil { + return nil + } + out := new(SecurityProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileObservation) DeepCopyInto(out *SecurityProfileObservation) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileObservation. +func (in *SecurityProfileObservation) DeepCopy() *SecurityProfileObservation { + if in == nil { + return nil + } + out := new(SecurityProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfileParameters) DeepCopyInto(out *SecurityProfileParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + out.DomainUserPasswordSecretRef = in.DomainUserPasswordSecretRef + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfileParameters. +func (in *SecurityProfileParameters) DeepCopy() *SecurityProfileParameters { + if in == nil { + return nil + } + out := new(SecurityProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkCluster) DeepCopyInto(out *SparkCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkCluster. +func (in *SparkCluster) DeepCopy() *SparkCluster { + if in == nil { + return nil + } + out := new(SparkCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SparkCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterComponentVersionInitParameters) DeepCopyInto(out *SparkClusterComponentVersionInitParameters) { + *out = *in + if in.Spark != nil { + in, out := &in.Spark, &out.Spark + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterComponentVersionInitParameters. +func (in *SparkClusterComponentVersionInitParameters) DeepCopy() *SparkClusterComponentVersionInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterComponentVersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterComponentVersionObservation) DeepCopyInto(out *SparkClusterComponentVersionObservation) { + *out = *in + if in.Spark != nil { + in, out := &in.Spark, &out.Spark + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterComponentVersionObservation. +func (in *SparkClusterComponentVersionObservation) DeepCopy() *SparkClusterComponentVersionObservation { + if in == nil { + return nil + } + out := new(SparkClusterComponentVersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterComponentVersionParameters) DeepCopyInto(out *SparkClusterComponentVersionParameters) { + *out = *in + if in.Spark != nil { + in, out := &in.Spark, &out.Spark + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterComponentVersionParameters. +func (in *SparkClusterComponentVersionParameters) DeepCopy() *SparkClusterComponentVersionParameters { + if in == nil { + return nil + } + out := new(SparkClusterComponentVersionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterComputeIsolationInitParameters) DeepCopyInto(out *SparkClusterComputeIsolationInitParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterComputeIsolationInitParameters. +func (in *SparkClusterComputeIsolationInitParameters) DeepCopy() *SparkClusterComputeIsolationInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterComputeIsolationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterComputeIsolationObservation) DeepCopyInto(out *SparkClusterComputeIsolationObservation) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterComputeIsolationObservation. +func (in *SparkClusterComputeIsolationObservation) DeepCopy() *SparkClusterComputeIsolationObservation { + if in == nil { + return nil + } + out := new(SparkClusterComputeIsolationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterComputeIsolationParameters) DeepCopyInto(out *SparkClusterComputeIsolationParameters) { + *out = *in + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.HostSku != nil { + in, out := &in.HostSku, &out.HostSku + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterComputeIsolationParameters. +func (in *SparkClusterComputeIsolationParameters) DeepCopy() *SparkClusterComputeIsolationParameters { + if in == nil { + return nil + } + out := new(SparkClusterComputeIsolationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterDiskEncryptionInitParameters) DeepCopyInto(out *SparkClusterDiskEncryptionInitParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterDiskEncryptionInitParameters. +func (in *SparkClusterDiskEncryptionInitParameters) DeepCopy() *SparkClusterDiskEncryptionInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterDiskEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterDiskEncryptionObservation) DeepCopyInto(out *SparkClusterDiskEncryptionObservation) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterDiskEncryptionObservation. +func (in *SparkClusterDiskEncryptionObservation) DeepCopy() *SparkClusterDiskEncryptionObservation { + if in == nil { + return nil + } + out := new(SparkClusterDiskEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterDiskEncryptionParameters) DeepCopyInto(out *SparkClusterDiskEncryptionParameters) { + *out = *in + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.EncryptionAtHostEnabled != nil { + in, out := &in.EncryptionAtHostEnabled, &out.EncryptionAtHostEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.KeyVaultManagedIdentityID != nil { + in, out := &in.KeyVaultManagedIdentityID, &out.KeyVaultManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterDiskEncryptionParameters. +func (in *SparkClusterDiskEncryptionParameters) DeepCopy() *SparkClusterDiskEncryptionParameters { + if in == nil { + return nil + } + out := new(SparkClusterDiskEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterExtensionInitParameters) DeepCopyInto(out *SparkClusterExtensionInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterExtensionInitParameters. +func (in *SparkClusterExtensionInitParameters) DeepCopy() *SparkClusterExtensionInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterExtensionObservation) DeepCopyInto(out *SparkClusterExtensionObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterExtensionObservation. +func (in *SparkClusterExtensionObservation) DeepCopy() *SparkClusterExtensionObservation { + if in == nil { + return nil + } + out := new(SparkClusterExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterExtensionParameters) DeepCopyInto(out *SparkClusterExtensionParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterExtensionParameters. +func (in *SparkClusterExtensionParameters) DeepCopy() *SparkClusterExtensionParameters { + if in == nil { + return nil + } + out := new(SparkClusterExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterGatewayInitParameters) DeepCopyInto(out *SparkClusterGatewayInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterGatewayInitParameters. +func (in *SparkClusterGatewayInitParameters) DeepCopy() *SparkClusterGatewayInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterGatewayObservation) DeepCopyInto(out *SparkClusterGatewayObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterGatewayObservation. +func (in *SparkClusterGatewayObservation) DeepCopy() *SparkClusterGatewayObservation { + if in == nil { + return nil + } + out := new(SparkClusterGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterGatewayParameters) DeepCopyInto(out *SparkClusterGatewayParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterGatewayParameters. +func (in *SparkClusterGatewayParameters) DeepCopy() *SparkClusterGatewayParameters { + if in == nil { + return nil + } + out := new(SparkClusterGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterInitParameters) DeepCopyInto(out *SparkClusterInitParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(SparkClusterComponentVersionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(SparkClusterComputeIsolationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]SparkClusterDiskEncryptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(SparkClusterExtensionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(SparkClusterGatewayInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(SparkClusterMetastoresInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(SparkClusterMonitorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(SparkClusterNetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(SparkClusterRolesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SparkClusterSecurityProfileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]SparkClusterStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(SparkClusterStorageAccountGen2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterInitParameters. +func (in *SparkClusterInitParameters) DeepCopy() *SparkClusterInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterList) DeepCopyInto(out *SparkClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SparkCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterList. +func (in *SparkClusterList) DeepCopy() *SparkClusterList { + if in == nil { + return nil + } + out := new(SparkClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SparkClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresAmbariInitParameters) DeepCopyInto(out *SparkClusterMetastoresAmbariInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresAmbariInitParameters. +func (in *SparkClusterMetastoresAmbariInitParameters) DeepCopy() *SparkClusterMetastoresAmbariInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresAmbariInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresAmbariObservation) DeepCopyInto(out *SparkClusterMetastoresAmbariObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresAmbariObservation. +func (in *SparkClusterMetastoresAmbariObservation) DeepCopy() *SparkClusterMetastoresAmbariObservation { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresAmbariObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresAmbariParameters) DeepCopyInto(out *SparkClusterMetastoresAmbariParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresAmbariParameters. +func (in *SparkClusterMetastoresAmbariParameters) DeepCopy() *SparkClusterMetastoresAmbariParameters { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresAmbariParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresHiveInitParameters) DeepCopyInto(out *SparkClusterMetastoresHiveInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresHiveInitParameters. +func (in *SparkClusterMetastoresHiveInitParameters) DeepCopy() *SparkClusterMetastoresHiveInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresHiveInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresHiveObservation) DeepCopyInto(out *SparkClusterMetastoresHiveObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresHiveObservation. +func (in *SparkClusterMetastoresHiveObservation) DeepCopy() *SparkClusterMetastoresHiveObservation { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresHiveObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresHiveParameters) DeepCopyInto(out *SparkClusterMetastoresHiveParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresHiveParameters. +func (in *SparkClusterMetastoresHiveParameters) DeepCopy() *SparkClusterMetastoresHiveParameters { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresHiveParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresInitParameters) DeepCopyInto(out *SparkClusterMetastoresInitParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(SparkClusterMetastoresAmbariInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(SparkClusterMetastoresHiveInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(SparkClusterMetastoresOozieInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresInitParameters. +func (in *SparkClusterMetastoresInitParameters) DeepCopy() *SparkClusterMetastoresInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresObservation) DeepCopyInto(out *SparkClusterMetastoresObservation) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(SparkClusterMetastoresAmbariObservation) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(SparkClusterMetastoresHiveObservation) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(SparkClusterMetastoresOozieObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresObservation. +func (in *SparkClusterMetastoresObservation) DeepCopy() *SparkClusterMetastoresObservation { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresOozieInitParameters) DeepCopyInto(out *SparkClusterMetastoresOozieInitParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresOozieInitParameters. +func (in *SparkClusterMetastoresOozieInitParameters) DeepCopy() *SparkClusterMetastoresOozieInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresOozieInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresOozieObservation) DeepCopyInto(out *SparkClusterMetastoresOozieObservation) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresOozieObservation. +func (in *SparkClusterMetastoresOozieObservation) DeepCopy() *SparkClusterMetastoresOozieObservation { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresOozieObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresOozieParameters) DeepCopyInto(out *SparkClusterMetastoresOozieParameters) { + *out = *in + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresOozieParameters. +func (in *SparkClusterMetastoresOozieParameters) DeepCopy() *SparkClusterMetastoresOozieParameters { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresOozieParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMetastoresParameters) DeepCopyInto(out *SparkClusterMetastoresParameters) { + *out = *in + if in.Ambari != nil { + in, out := &in.Ambari, &out.Ambari + *out = new(SparkClusterMetastoresAmbariParameters) + (*in).DeepCopyInto(*out) + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(SparkClusterMetastoresHiveParameters) + (*in).DeepCopyInto(*out) + } + if in.Oozie != nil { + in, out := &in.Oozie, &out.Oozie + *out = new(SparkClusterMetastoresOozieParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMetastoresParameters. +func (in *SparkClusterMetastoresParameters) DeepCopy() *SparkClusterMetastoresParameters { + if in == nil { + return nil + } + out := new(SparkClusterMetastoresParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMonitorInitParameters) DeepCopyInto(out *SparkClusterMonitorInitParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMonitorInitParameters. +func (in *SparkClusterMonitorInitParameters) DeepCopy() *SparkClusterMonitorInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterMonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMonitorObservation) DeepCopyInto(out *SparkClusterMonitorObservation) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMonitorObservation. +func (in *SparkClusterMonitorObservation) DeepCopy() *SparkClusterMonitorObservation { + if in == nil { + return nil + } + out := new(SparkClusterMonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterMonitorParameters) DeepCopyInto(out *SparkClusterMonitorParameters) { + *out = *in + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + out.PrimaryKeySecretRef = in.PrimaryKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterMonitorParameters. +func (in *SparkClusterMonitorParameters) DeepCopy() *SparkClusterMonitorParameters { + if in == nil { + return nil + } + out := new(SparkClusterMonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterNetworkInitParameters) DeepCopyInto(out *SparkClusterNetworkInitParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterNetworkInitParameters. +func (in *SparkClusterNetworkInitParameters) DeepCopy() *SparkClusterNetworkInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterNetworkObservation) DeepCopyInto(out *SparkClusterNetworkObservation) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterNetworkObservation. +func (in *SparkClusterNetworkObservation) DeepCopy() *SparkClusterNetworkObservation { + if in == nil { + return nil + } + out := new(SparkClusterNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterNetworkParameters) DeepCopyInto(out *SparkClusterNetworkParameters) { + *out = *in + if in.ConnectionDirection != nil { + in, out := &in.ConnectionDirection, &out.ConnectionDirection + *out = new(string) + **out = **in + } + if in.PrivateLinkEnabled != nil { + in, out := &in.PrivateLinkEnabled, &out.PrivateLinkEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterNetworkParameters. +func (in *SparkClusterNetworkParameters) DeepCopy() *SparkClusterNetworkParameters { + if in == nil { + return nil + } + out := new(SparkClusterNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterObservation) DeepCopyInto(out *SparkClusterObservation) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(SparkClusterComponentVersionObservation) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(SparkClusterComputeIsolationObservation) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]SparkClusterDiskEncryptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(SparkClusterExtensionObservation) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(SparkClusterGatewayObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPSEndpoint != nil { + in, out := &in.HTTPSEndpoint, &out.HTTPSEndpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(SparkClusterMetastoresObservation) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(SparkClusterMonitorObservation) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(SparkClusterNetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(SparkClusterRolesObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHEndpoint != nil { + in, out := &in.SSHEndpoint, &out.SSHEndpoint + *out = new(string) + **out = **in + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SparkClusterSecurityProfileObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]SparkClusterStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(SparkClusterStorageAccountGen2Observation) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterObservation. +func (in *SparkClusterObservation) DeepCopy() *SparkClusterObservation { + if in == nil { + return nil + } + out := new(SparkClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterParameters) DeepCopyInto(out *SparkClusterParameters) { + *out = *in + if in.ClusterVersion != nil { + in, out := &in.ClusterVersion, &out.ClusterVersion + *out = new(string) + **out = **in + } + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = new(SparkClusterComponentVersionParameters) + (*in).DeepCopyInto(*out) + } + if in.ComputeIsolation != nil { + in, out := &in.ComputeIsolation, &out.ComputeIsolation + *out = new(SparkClusterComputeIsolationParameters) + (*in).DeepCopyInto(*out) + } + if in.DiskEncryption != nil { + in, out := &in.DiskEncryption, &out.DiskEncryption + *out = make([]SparkClusterDiskEncryptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionInTransitEnabled != nil { + in, out := &in.EncryptionInTransitEnabled, &out.EncryptionInTransitEnabled + *out = new(bool) + **out = **in + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = new(SparkClusterExtensionParameters) + (*in).DeepCopyInto(*out) + } + if in.Gateway != nil { + in, out := &in.Gateway, &out.Gateway + *out = new(SparkClusterGatewayParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Metastores != nil { + in, out := &in.Metastores, &out.Metastores + *out = new(SparkClusterMetastoresParameters) + (*in).DeepCopyInto(*out) + } + if in.Monitor != nil { + in, out := &in.Monitor, &out.Monitor + *out = new(SparkClusterMonitorParameters) + (*in).DeepCopyInto(*out) + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(SparkClusterNetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = new(SparkClusterRolesParameters) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SparkClusterSecurityProfileParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]SparkClusterStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountGen2 != nil { + in, out := &in.StorageAccountGen2, &out.StorageAccountGen2 + *out = new(SparkClusterStorageAccountGen2Parameters) + (*in).DeepCopyInto(*out) + } + if in.TLSMinVersion != nil { + in, out := &in.TLSMinVersion, &out.TLSMinVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterParameters. +func (in *SparkClusterParameters) DeepCopy() *SparkClusterParameters { + if in == nil { + return nil + } + out := new(SparkClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesHeadNodeInitParameters) DeepCopyInto(out *SparkClusterRolesHeadNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesHeadNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesHeadNodeInitParameters. +func (in *SparkClusterRolesHeadNodeInitParameters) DeepCopy() *SparkClusterRolesHeadNodeInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesHeadNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesHeadNodeObservation) DeepCopyInto(out *SparkClusterRolesHeadNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesHeadNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesHeadNodeObservation. +func (in *SparkClusterRolesHeadNodeObservation) DeepCopy() *SparkClusterRolesHeadNodeObservation { + if in == nil { + return nil + } + out := new(SparkClusterRolesHeadNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesHeadNodeParameters) DeepCopyInto(out *SparkClusterRolesHeadNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesHeadNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesHeadNodeParameters. +func (in *SparkClusterRolesHeadNodeParameters) DeepCopy() *SparkClusterRolesHeadNodeParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesHeadNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesHeadNodeScriptActionsInitParameters) DeepCopyInto(out *SparkClusterRolesHeadNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesHeadNodeScriptActionsInitParameters. +func (in *SparkClusterRolesHeadNodeScriptActionsInitParameters) DeepCopy() *SparkClusterRolesHeadNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesHeadNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesHeadNodeScriptActionsObservation) DeepCopyInto(out *SparkClusterRolesHeadNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesHeadNodeScriptActionsObservation. +func (in *SparkClusterRolesHeadNodeScriptActionsObservation) DeepCopy() *SparkClusterRolesHeadNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(SparkClusterRolesHeadNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesHeadNodeScriptActionsParameters) DeepCopyInto(out *SparkClusterRolesHeadNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesHeadNodeScriptActionsParameters. +func (in *SparkClusterRolesHeadNodeScriptActionsParameters) DeepCopy() *SparkClusterRolesHeadNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesHeadNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesInitParameters) DeepCopyInto(out *SparkClusterRolesInitParameters) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(SparkClusterRolesHeadNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(SparkClusterRolesWorkerNodeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(SparkClusterRolesZookeeperNodeInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesInitParameters. +func (in *SparkClusterRolesInitParameters) DeepCopy() *SparkClusterRolesInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesObservation) DeepCopyInto(out *SparkClusterRolesObservation) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(SparkClusterRolesHeadNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(SparkClusterRolesWorkerNodeObservation) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(SparkClusterRolesZookeeperNodeObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesObservation. +func (in *SparkClusterRolesObservation) DeepCopy() *SparkClusterRolesObservation { + if in == nil { + return nil + } + out := new(SparkClusterRolesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesParameters) DeepCopyInto(out *SparkClusterRolesParameters) { + *out = *in + if in.HeadNode != nil { + in, out := &in.HeadNode, &out.HeadNode + *out = new(SparkClusterRolesHeadNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkerNode != nil { + in, out := &in.WorkerNode, &out.WorkerNode + *out = new(SparkClusterRolesWorkerNodeParameters) + (*in).DeepCopyInto(*out) + } + if in.ZookeeperNode != nil { + in, out := &in.ZookeeperNode, &out.ZookeeperNode + *out = new(SparkClusterRolesZookeeperNodeParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesParameters. +func (in *SparkClusterRolesParameters) DeepCopy() *SparkClusterRolesParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeAutoscaleInitParameters) DeepCopyInto(out *SparkClusterRolesWorkerNodeAutoscaleInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(WorkerNodeAutoscaleCapacityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RolesWorkerNodeAutoscaleRecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeAutoscaleInitParameters. +func (in *SparkClusterRolesWorkerNodeAutoscaleInitParameters) DeepCopy() *SparkClusterRolesWorkerNodeAutoscaleInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeAutoscaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeAutoscaleObservation) DeepCopyInto(out *SparkClusterRolesWorkerNodeAutoscaleObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(WorkerNodeAutoscaleCapacityObservation) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RolesWorkerNodeAutoscaleRecurrenceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeAutoscaleObservation. +func (in *SparkClusterRolesWorkerNodeAutoscaleObservation) DeepCopy() *SparkClusterRolesWorkerNodeAutoscaleObservation { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeAutoscaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeAutoscaleParameters) DeepCopyInto(out *SparkClusterRolesWorkerNodeAutoscaleParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(WorkerNodeAutoscaleCapacityParameters) + (*in).DeepCopyInto(*out) + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RolesWorkerNodeAutoscaleRecurrenceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeAutoscaleParameters. +func (in *SparkClusterRolesWorkerNodeAutoscaleParameters) DeepCopy() *SparkClusterRolesWorkerNodeAutoscaleParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeAutoscaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeInitParameters) DeepCopyInto(out *SparkClusterRolesWorkerNodeInitParameters) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(SparkClusterRolesWorkerNodeAutoscaleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesWorkerNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeInitParameters. +func (in *SparkClusterRolesWorkerNodeInitParameters) DeepCopy() *SparkClusterRolesWorkerNodeInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeObservation) DeepCopyInto(out *SparkClusterRolesWorkerNodeObservation) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(SparkClusterRolesWorkerNodeAutoscaleObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesWorkerNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeObservation. +func (in *SparkClusterRolesWorkerNodeObservation) DeepCopy() *SparkClusterRolesWorkerNodeObservation { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeParameters) DeepCopyInto(out *SparkClusterRolesWorkerNodeParameters) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(SparkClusterRolesWorkerNodeAutoscaleParameters) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesWorkerNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeParameters. +func (in *SparkClusterRolesWorkerNodeParameters) DeepCopy() *SparkClusterRolesWorkerNodeParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeScriptActionsInitParameters) DeepCopyInto(out *SparkClusterRolesWorkerNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeScriptActionsInitParameters. +func (in *SparkClusterRolesWorkerNodeScriptActionsInitParameters) DeepCopy() *SparkClusterRolesWorkerNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeScriptActionsObservation) DeepCopyInto(out *SparkClusterRolesWorkerNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeScriptActionsObservation. +func (in *SparkClusterRolesWorkerNodeScriptActionsObservation) DeepCopy() *SparkClusterRolesWorkerNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesWorkerNodeScriptActionsParameters) DeepCopyInto(out *SparkClusterRolesWorkerNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesWorkerNodeScriptActionsParameters. +func (in *SparkClusterRolesWorkerNodeScriptActionsParameters) DeepCopy() *SparkClusterRolesWorkerNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesWorkerNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesZookeeperNodeInitParameters) DeepCopyInto(out *SparkClusterRolesZookeeperNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesZookeeperNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesZookeeperNodeInitParameters. +func (in *SparkClusterRolesZookeeperNodeInitParameters) DeepCopy() *SparkClusterRolesZookeeperNodeInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesZookeeperNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesZookeeperNodeObservation) DeepCopyInto(out *SparkClusterRolesZookeeperNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesZookeeperNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesZookeeperNodeObservation. +func (in *SparkClusterRolesZookeeperNodeObservation) DeepCopy() *SparkClusterRolesZookeeperNodeObservation { + if in == nil { + return nil + } + out := new(SparkClusterRolesZookeeperNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesZookeeperNodeParameters) DeepCopyInto(out *SparkClusterRolesZookeeperNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]SparkClusterRolesZookeeperNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesZookeeperNodeParameters. +func (in *SparkClusterRolesZookeeperNodeParameters) DeepCopy() *SparkClusterRolesZookeeperNodeParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesZookeeperNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesZookeeperNodeScriptActionsInitParameters) DeepCopyInto(out *SparkClusterRolesZookeeperNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesZookeeperNodeScriptActionsInitParameters. +func (in *SparkClusterRolesZookeeperNodeScriptActionsInitParameters) DeepCopy() *SparkClusterRolesZookeeperNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesZookeeperNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesZookeeperNodeScriptActionsObservation) DeepCopyInto(out *SparkClusterRolesZookeeperNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesZookeeperNodeScriptActionsObservation. +func (in *SparkClusterRolesZookeeperNodeScriptActionsObservation) DeepCopy() *SparkClusterRolesZookeeperNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(SparkClusterRolesZookeeperNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterRolesZookeeperNodeScriptActionsParameters) DeepCopyInto(out *SparkClusterRolesZookeeperNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterRolesZookeeperNodeScriptActionsParameters. +func (in *SparkClusterRolesZookeeperNodeScriptActionsParameters) DeepCopy() *SparkClusterRolesZookeeperNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(SparkClusterRolesZookeeperNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterSecurityProfileInitParameters) DeepCopyInto(out *SparkClusterSecurityProfileInitParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterSecurityProfileInitParameters. +func (in *SparkClusterSecurityProfileInitParameters) DeepCopy() *SparkClusterSecurityProfileInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterSecurityProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterSecurityProfileObservation) DeepCopyInto(out *SparkClusterSecurityProfileObservation) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterSecurityProfileObservation. +func (in *SparkClusterSecurityProfileObservation) DeepCopy() *SparkClusterSecurityProfileObservation { + if in == nil { + return nil + } + out := new(SparkClusterSecurityProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterSecurityProfileParameters) DeepCopyInto(out *SparkClusterSecurityProfileParameters) { + *out = *in + if in.AaddsResourceID != nil { + in, out := &in.AaddsResourceID, &out.AaddsResourceID + *out = new(string) + **out = **in + } + if in.ClusterUsersGroupDNS != nil { + in, out := &in.ClusterUsersGroupDNS, &out.ClusterUsersGroupDNS + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + out.DomainUserPasswordSecretRef = in.DomainUserPasswordSecretRef + if in.DomainUsername != nil { + in, out := &in.DomainUsername, &out.DomainUsername + *out = new(string) + **out = **in + } + if in.LdapsUrls != nil { + in, out := &in.LdapsUrls, &out.LdapsUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MsiResourceID != nil { + in, out := &in.MsiResourceID, &out.MsiResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterSecurityProfileParameters. +func (in *SparkClusterSecurityProfileParameters) DeepCopy() *SparkClusterSecurityProfileParameters { + if in == nil { + return nil + } + out := new(SparkClusterSecurityProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterSpec) DeepCopyInto(out *SparkClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterSpec. +func (in *SparkClusterSpec) DeepCopy() *SparkClusterSpec { + if in == nil { + return nil + } + out := new(SparkClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterStatus) DeepCopyInto(out *SparkClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterStatus. +func (in *SparkClusterStatus) DeepCopy() *SparkClusterStatus { + if in == nil { + return nil + } + out := new(SparkClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterStorageAccountGen2InitParameters) DeepCopyInto(out *SparkClusterStorageAccountGen2InitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterStorageAccountGen2InitParameters. +func (in *SparkClusterStorageAccountGen2InitParameters) DeepCopy() *SparkClusterStorageAccountGen2InitParameters { + if in == nil { + return nil + } + out := new(SparkClusterStorageAccountGen2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterStorageAccountGen2Observation) DeepCopyInto(out *SparkClusterStorageAccountGen2Observation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterStorageAccountGen2Observation. +func (in *SparkClusterStorageAccountGen2Observation) DeepCopy() *SparkClusterStorageAccountGen2Observation { + if in == nil { + return nil + } + out := new(SparkClusterStorageAccountGen2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterStorageAccountGen2Parameters) DeepCopyInto(out *SparkClusterStorageAccountGen2Parameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterStorageAccountGen2Parameters. +func (in *SparkClusterStorageAccountGen2Parameters) DeepCopy() *SparkClusterStorageAccountGen2Parameters { + if in == nil { + return nil + } + out := new(SparkClusterStorageAccountGen2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterStorageAccountInitParameters) DeepCopyInto(out *SparkClusterStorageAccountInitParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterStorageAccountInitParameters. +func (in *SparkClusterStorageAccountInitParameters) DeepCopy() *SparkClusterStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(SparkClusterStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterStorageAccountObservation) DeepCopyInto(out *SparkClusterStorageAccountObservation) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterStorageAccountObservation. +func (in *SparkClusterStorageAccountObservation) DeepCopy() *SparkClusterStorageAccountObservation { + if in == nil { + return nil + } + out := new(SparkClusterStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkClusterStorageAccountParameters) DeepCopyInto(out *SparkClusterStorageAccountParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + out.StorageAccountKeySecretRef = in.StorageAccountKeySecretRef + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkClusterStorageAccountParameters. +func (in *SparkClusterStorageAccountParameters) DeepCopy() *SparkClusterStorageAccountParameters { + if in == nil { + return nil + } + out := new(SparkClusterStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountGen2InitParameters) DeepCopyInto(out *StorageAccountGen2InitParameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountGen2InitParameters. +func (in *StorageAccountGen2InitParameters) DeepCopy() *StorageAccountGen2InitParameters { + if in == nil { + return nil + } + out := new(StorageAccountGen2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountGen2Observation) DeepCopyInto(out *StorageAccountGen2Observation) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountGen2Observation. +func (in *StorageAccountGen2Observation) DeepCopy() *StorageAccountGen2Observation { + if in == nil { + return nil + } + out := new(StorageAccountGen2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountGen2Parameters) DeepCopyInto(out *StorageAccountGen2Parameters) { + *out = *in + if in.FileSystemID != nil { + in, out := &in.FileSystemID, &out.FileSystemID + *out = new(string) + **out = **in + } + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.ManagedIdentityResourceID != nil { + in, out := &in.ManagedIdentityResourceID, &out.ManagedIdentityResourceID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountGen2Parameters. +func (in *StorageAccountGen2Parameters) DeepCopy() *StorageAccountGen2Parameters { + if in == nil { + return nil + } + out := new(StorageAccountGen2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountInitParameters) DeepCopyInto(out *StorageAccountInitParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountInitParameters. +func (in *StorageAccountInitParameters) DeepCopy() *StorageAccountInitParameters { + if in == nil { + return nil + } + out := new(StorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountObservation) DeepCopyInto(out *StorageAccountObservation) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountObservation. +func (in *StorageAccountObservation) DeepCopy() *StorageAccountObservation { + if in == nil { + return nil + } + out := new(StorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountParameters) DeepCopyInto(out *StorageAccountParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + out.StorageAccountKeySecretRef = in.StorageAccountKeySecretRef + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.StorageContainerIDRef != nil { + in, out := &in.StorageContainerIDRef, &out.StorageContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerIDSelector != nil { + in, out := &in.StorageContainerIDSelector, &out.StorageContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageResourceID != nil { + in, out := &in.StorageResourceID, &out.StorageResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountParameters. +func (in *StorageAccountParameters) DeepCopy() *StorageAccountParameters { + if in == nil { + return nil + } + out := new(StorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UninstallScriptActionsInitParameters) DeepCopyInto(out *UninstallScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UninstallScriptActionsInitParameters. +func (in *UninstallScriptActionsInitParameters) DeepCopy() *UninstallScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(UninstallScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UninstallScriptActionsObservation) DeepCopyInto(out *UninstallScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UninstallScriptActionsObservation. +func (in *UninstallScriptActionsObservation) DeepCopy() *UninstallScriptActionsObservation { + if in == nil { + return nil + } + out := new(UninstallScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UninstallScriptActionsParameters) DeepCopyInto(out *UninstallScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UninstallScriptActionsParameters. +func (in *UninstallScriptActionsParameters) DeepCopy() *UninstallScriptActionsParameters { + if in == nil { + return nil + } + out := new(UninstallScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleCapacityInitParameters) DeepCopyInto(out *WorkerNodeAutoscaleCapacityInitParameters) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleCapacityInitParameters. +func (in *WorkerNodeAutoscaleCapacityInitParameters) DeepCopy() *WorkerNodeAutoscaleCapacityInitParameters { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleCapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleCapacityObservation) DeepCopyInto(out *WorkerNodeAutoscaleCapacityObservation) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleCapacityObservation. +func (in *WorkerNodeAutoscaleCapacityObservation) DeepCopy() *WorkerNodeAutoscaleCapacityObservation { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleCapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleCapacityParameters) DeepCopyInto(out *WorkerNodeAutoscaleCapacityParameters) { + *out = *in + if in.MaxInstanceCount != nil { + in, out := &in.MaxInstanceCount, &out.MaxInstanceCount + *out = new(float64) + **out = **in + } + if in.MinInstanceCount != nil { + in, out := &in.MinInstanceCount, &out.MinInstanceCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleCapacityParameters. +func (in *WorkerNodeAutoscaleCapacityParameters) DeepCopy() *WorkerNodeAutoscaleCapacityParameters { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleCapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleInitParameters) DeepCopyInto(out *WorkerNodeAutoscaleInitParameters) { + *out = *in + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(AutoscaleRecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleInitParameters. +func (in *WorkerNodeAutoscaleInitParameters) DeepCopy() *WorkerNodeAutoscaleInitParameters { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleObservation) DeepCopyInto(out *WorkerNodeAutoscaleObservation) { + *out = *in + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(AutoscaleRecurrenceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleObservation. +func (in *WorkerNodeAutoscaleObservation) DeepCopy() *WorkerNodeAutoscaleObservation { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleParameters) DeepCopyInto(out *WorkerNodeAutoscaleParameters) { + *out = *in + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(AutoscaleRecurrenceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleParameters. +func (in *WorkerNodeAutoscaleParameters) DeepCopy() *WorkerNodeAutoscaleParameters { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleRecurrenceInitParameters) DeepCopyInto(out *WorkerNodeAutoscaleRecurrenceInitParameters) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]AutoscaleRecurrenceScheduleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleRecurrenceInitParameters. +func (in *WorkerNodeAutoscaleRecurrenceInitParameters) DeepCopy() *WorkerNodeAutoscaleRecurrenceInitParameters { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleRecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleRecurrenceObservation) DeepCopyInto(out *WorkerNodeAutoscaleRecurrenceObservation) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]AutoscaleRecurrenceScheduleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleRecurrenceObservation. +func (in *WorkerNodeAutoscaleRecurrenceObservation) DeepCopy() *WorkerNodeAutoscaleRecurrenceObservation { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleRecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleRecurrenceParameters) DeepCopyInto(out *WorkerNodeAutoscaleRecurrenceParameters) { + *out = *in + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = make([]AutoscaleRecurrenceScheduleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleRecurrenceParameters. +func (in *WorkerNodeAutoscaleRecurrenceParameters) DeepCopy() *WorkerNodeAutoscaleRecurrenceParameters { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleRecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleRecurrenceScheduleInitParameters) DeepCopyInto(out *WorkerNodeAutoscaleRecurrenceScheduleInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleRecurrenceScheduleInitParameters. +func (in *WorkerNodeAutoscaleRecurrenceScheduleInitParameters) DeepCopy() *WorkerNodeAutoscaleRecurrenceScheduleInitParameters { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleRecurrenceScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleRecurrenceScheduleObservation) DeepCopyInto(out *WorkerNodeAutoscaleRecurrenceScheduleObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleRecurrenceScheduleObservation. +func (in *WorkerNodeAutoscaleRecurrenceScheduleObservation) DeepCopy() *WorkerNodeAutoscaleRecurrenceScheduleObservation { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleRecurrenceScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeAutoscaleRecurrenceScheduleParameters) DeepCopyInto(out *WorkerNodeAutoscaleRecurrenceScheduleParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeAutoscaleRecurrenceScheduleParameters. +func (in *WorkerNodeAutoscaleRecurrenceScheduleParameters) DeepCopy() *WorkerNodeAutoscaleRecurrenceScheduleParameters { + if in == nil { + return nil + } + out := new(WorkerNodeAutoscaleRecurrenceScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeInitParameters) DeepCopyInto(out *WorkerNodeInitParameters) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(AutoscaleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]WorkerNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeInitParameters. +func (in *WorkerNodeInitParameters) DeepCopy() *WorkerNodeInitParameters { + if in == nil { + return nil + } + out := new(WorkerNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeObservation) DeepCopyInto(out *WorkerNodeObservation) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(AutoscaleObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]WorkerNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeObservation. +func (in *WorkerNodeObservation) DeepCopy() *WorkerNodeObservation { + if in == nil { + return nil + } + out := new(WorkerNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeParameters) DeepCopyInto(out *WorkerNodeParameters) { + *out = *in + if in.Autoscale != nil { + in, out := &in.Autoscale, &out.Autoscale + *out = new(AutoscaleParameters) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]WorkerNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetInstanceCount != nil { + in, out := &in.TargetInstanceCount, &out.TargetInstanceCount + *out = new(float64) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeParameters. +func (in *WorkerNodeParameters) DeepCopy() *WorkerNodeParameters { + if in == nil { + return nil + } + out := new(WorkerNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeScriptActionsInitParameters) DeepCopyInto(out *WorkerNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeScriptActionsInitParameters. +func (in *WorkerNodeScriptActionsInitParameters) DeepCopy() *WorkerNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(WorkerNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeScriptActionsObservation) DeepCopyInto(out *WorkerNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeScriptActionsObservation. +func (in *WorkerNodeScriptActionsObservation) DeepCopy() *WorkerNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(WorkerNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerNodeScriptActionsParameters) DeepCopyInto(out *WorkerNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeScriptActionsParameters. +func (in *WorkerNodeScriptActionsParameters) DeepCopy() *WorkerNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(WorkerNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperNodeInitParameters) DeepCopyInto(out *ZookeeperNodeInitParameters) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]ZookeeperNodeScriptActionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperNodeInitParameters. +func (in *ZookeeperNodeInitParameters) DeepCopy() *ZookeeperNodeInitParameters { + if in == nil { + return nil + } + out := new(ZookeeperNodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperNodeObservation) DeepCopyInto(out *ZookeeperNodeObservation) { + *out = *in + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]ZookeeperNodeScriptActionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperNodeObservation. +func (in *ZookeeperNodeObservation) DeepCopy() *ZookeeperNodeObservation { + if in == nil { + return nil + } + out := new(ZookeeperNodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperNodeParameters) DeepCopyInto(out *ZookeeperNodeParameters) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SSHKeys != nil { + in, out := &in.SSHKeys, &out.SSHKeys + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptActions != nil { + in, out := &in.ScriptActions, &out.ScriptActions + *out = make([]ZookeeperNodeScriptActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } + if in.VirtualNetworkID != nil { + in, out := &in.VirtualNetworkID, &out.VirtualNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperNodeParameters. +func (in *ZookeeperNodeParameters) DeepCopy() *ZookeeperNodeParameters { + if in == nil { + return nil + } + out := new(ZookeeperNodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperNodeScriptActionsInitParameters) DeepCopyInto(out *ZookeeperNodeScriptActionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperNodeScriptActionsInitParameters. +func (in *ZookeeperNodeScriptActionsInitParameters) DeepCopy() *ZookeeperNodeScriptActionsInitParameters { + if in == nil { + return nil + } + out := new(ZookeeperNodeScriptActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperNodeScriptActionsObservation) DeepCopyInto(out *ZookeeperNodeScriptActionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperNodeScriptActionsObservation. +func (in *ZookeeperNodeScriptActionsObservation) DeepCopy() *ZookeeperNodeScriptActionsObservation { + if in == nil { + return nil + } + out := new(ZookeeperNodeScriptActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperNodeScriptActionsParameters) DeepCopyInto(out *ZookeeperNodeScriptActionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperNodeScriptActionsParameters. +func (in *ZookeeperNodeScriptActionsParameters) DeepCopy() *ZookeeperNodeScriptActionsParameters { + if in == nil { + return nil + } + out := new(ZookeeperNodeScriptActionsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/hdinsight/v1beta2/zz_generated.managed.go b/apis/hdinsight/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..94bf20782 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this HBaseCluster. +func (mg *HBaseCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HBaseCluster. +func (mg *HBaseCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HBaseCluster. +func (mg *HBaseCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HBaseCluster. +func (mg *HBaseCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HBaseCluster. +func (mg *HBaseCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HBaseCluster. +func (mg *HBaseCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HBaseCluster. +func (mg *HBaseCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HBaseCluster. +func (mg *HBaseCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HBaseCluster. +func (mg *HBaseCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HBaseCluster. +func (mg *HBaseCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HBaseCluster. +func (mg *HBaseCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HBaseCluster. +func (mg *HBaseCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HadoopCluster. +func (mg *HadoopCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HadoopCluster. +func (mg *HadoopCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HadoopCluster. +func (mg *HadoopCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HadoopCluster. +func (mg *HadoopCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HadoopCluster. +func (mg *HadoopCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HadoopCluster. +func (mg *HadoopCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HadoopCluster. +func (mg *HadoopCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HadoopCluster. +func (mg *HadoopCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HadoopCluster. +func (mg *HadoopCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HadoopCluster. +func (mg *HadoopCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HadoopCluster. +func (mg *HadoopCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HadoopCluster. +func (mg *HadoopCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this KafkaCluster. +func (mg *KafkaCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this KafkaCluster. +func (mg *KafkaCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this KafkaCluster. +func (mg *KafkaCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this KafkaCluster. +func (mg *KafkaCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this KafkaCluster. +func (mg *KafkaCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this KafkaCluster. +func (mg *KafkaCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this KafkaCluster. +func (mg *KafkaCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this KafkaCluster. +func (mg *KafkaCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this KafkaCluster. +func (mg *KafkaCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this KafkaCluster. +func (mg *KafkaCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this KafkaCluster. +func (mg *KafkaCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this KafkaCluster. +func (mg *KafkaCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SparkCluster. +func (mg *SparkCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SparkCluster. +func (mg *SparkCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SparkCluster. +func (mg *SparkCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SparkCluster. +func (mg *SparkCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SparkCluster. +func (mg *SparkCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SparkCluster. +func (mg *SparkCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SparkCluster. +func (mg *SparkCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SparkCluster. +func (mg *SparkCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SparkCluster. +func (mg *SparkCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SparkCluster. +func (mg *SparkCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SparkCluster. +func (mg *SparkCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SparkCluster. +func (mg *SparkCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/hdinsight/v1beta2/zz_generated.managedlist.go b/apis/hdinsight/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..3eeedd0ab --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this HBaseClusterList. +func (l *HBaseClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HadoopClusterList. +func (l *HadoopClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this InteractiveQueryClusterList. +func (l *InteractiveQueryClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this KafkaClusterList. +func (l *KafkaClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SparkClusterList. +func (l *SparkClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/hdinsight/v1beta2/zz_generated.resolvers.go b/apis/hdinsight/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..245a73a52 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1126 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *HBaseCluster) ResolveReferences( // ResolveReferences of this HBaseCluster. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.ForProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.ForProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.InitProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.InitProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this HadoopCluster. +func (mg *HadoopCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.ForProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.ForProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.InitProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.InitProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this InteractiveQueryCluster. +func (mg *InteractiveQueryCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.ForProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.ForProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.InitProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.InitProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this KafkaCluster. +func (mg *KafkaCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.ForProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.KafkaManagementNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.KafkaManagementNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.KafkaManagementNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.KafkaManagementNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.KafkaManagementNode.SubnetID") + } + mg.Spec.ForProvider.Roles.KafkaManagementNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.KafkaManagementNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.ForProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.InitProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.KafkaManagementNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.KafkaManagementNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.KafkaManagementNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.KafkaManagementNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.KafkaManagementNode.SubnetID") + } + mg.Spec.InitProvider.Roles.KafkaManagementNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.KafkaManagementNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.InitProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this SparkCluster. +func (mg *SparkCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.ForProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.ForProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Roles != nil { + if mg.Spec.ForProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.HeadNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.HeadNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.HeadNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.HeadNode.SubnetID") + } + mg.Spec.InitProvider.Roles.HeadNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.HeadNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.WorkerNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.WorkerNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.WorkerNode.SubnetID") + } + mg.Spec.InitProvider.Roles.WorkerNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.WorkerNode.SubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Roles != nil { + if mg.Spec.InitProvider.Roles.ZookeeperNode != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef, + Selector: mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID") + } + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Roles.ZookeeperNode.SubnetIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef, + Selector: mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID") + } + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccount[i3].StorageContainerIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/hdinsight/v1beta2/zz_groupversion_info.go b/apis/hdinsight/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..bc820f3bd --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=hdinsight.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "hdinsight.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/hdinsight/v1beta2/zz_hadoopcluster_terraformed.go b/apis/hdinsight/v1beta2/zz_hadoopcluster_terraformed.go new file mode 100755 index 000000000..656d376cf --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_hadoopcluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HadoopCluster +func (mg *HadoopCluster) GetTerraformResourceType() string { + return "azurerm_hdinsight_hadoop_cluster" +} + +// GetConnectionDetailsMapping for this HadoopCluster +func (tr *HadoopCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"extension[*].primary_key": "spec.forProvider.extension[*].primaryKeySecretRef", "gateway[*].password": "spec.forProvider.gateway[*].passwordSecretRef", "metastores[*].ambari[*].password": "spec.forProvider.metastores[*].ambari[*].passwordSecretRef", "metastores[*].hive[*].password": "spec.forProvider.metastores[*].hive[*].passwordSecretRef", "metastores[*].oozie[*].password": "spec.forProvider.metastores[*].oozie[*].passwordSecretRef", "monitor[*].primary_key": "spec.forProvider.monitor[*].primaryKeySecretRef", "roles[*].head_node[*].password": "spec.forProvider.roles[*].headNode[*].passwordSecretRef", "roles[*].worker_node[*].password": "spec.forProvider.roles[*].workerNode[*].passwordSecretRef", "roles[*].zookeeper_node[*].password": "spec.forProvider.roles[*].zookeeperNode[*].passwordSecretRef", "security_profile[*].domain_user_password": "spec.forProvider.securityProfile[*].domainUserPasswordSecretRef", "storage_account[*].storage_account_key": "spec.forProvider.storageAccount[*].storageAccountKeySecretRef"} +} + +// GetObservation of this HadoopCluster +func (tr *HadoopCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HadoopCluster +func (tr *HadoopCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HadoopCluster +func (tr *HadoopCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HadoopCluster +func (tr *HadoopCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HadoopCluster +func (tr *HadoopCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HadoopCluster +func (tr *HadoopCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HadoopCluster +func (tr *HadoopCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HadoopCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HadoopCluster) LateInitialize(attrs []byte) (bool, error) { + params := &HadoopClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HadoopCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/hdinsight/v1beta2/zz_hadoopcluster_types.go b/apis/hdinsight/v1beta2/zz_hadoopcluster_types.go new file mode 100755 index 000000000..e0c20c0af --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_hadoopcluster_types.go @@ -0,0 +1,1637 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AmbariInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AmbariObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AmbariParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type AutoscaleInitParameters struct { + + // A capacity block as defined below. + Capacity *CapacityInitParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + Recurrence *RecurrenceInitParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type AutoscaleObservation struct { + + // A capacity block as defined below. + Capacity *CapacityObservation `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + Recurrence *RecurrenceObservation `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type AutoscaleParameters struct { + + // A capacity block as defined below. + // +kubebuilder:validation:Optional + Capacity *CapacityParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + // +kubebuilder:validation:Optional + Recurrence *RecurrenceParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type CapacityInitParameters struct { + + // The maximum number of worker nodes to autoscale to based on the cluster's activity. + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + // The minimum number of worker nodes to autoscale to based on the cluster's activity. + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + +type CapacityObservation struct { + + // The maximum number of worker nodes to autoscale to based on the cluster's activity. + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + // The minimum number of worker nodes to autoscale to based on the cluster's activity. + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + +type CapacityParameters struct { + + // The maximum number of worker nodes to autoscale to based on the cluster's activity. + // +kubebuilder:validation:Optional + MaxInstanceCount *float64 `json:"maxInstanceCount" tf:"max_instance_count,omitempty"` + + // The minimum number of worker nodes to autoscale to based on the cluster's activity. + // +kubebuilder:validation:Optional + MinInstanceCount *float64 `json:"minInstanceCount" tf:"min_instance_count,omitempty"` +} + +type ComponentVersionInitParameters struct { + + // The version of Hadoop which should be used for this HDInsight Hadoop Cluster. Changing this forces a new resource to be created. + Hadoop *string `json:"hadoop,omitempty" tf:"hadoop,omitempty"` +} + +type ComponentVersionObservation struct { + + // The version of Hadoop which should be used for this HDInsight Hadoop Cluster. Changing this forces a new resource to be created. + Hadoop *string `json:"hadoop,omitempty" tf:"hadoop,omitempty"` +} + +type ComponentVersionParameters struct { + + // The version of Hadoop which should be used for this HDInsight Hadoop Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Hadoop *string `json:"hadoop" tf:"hadoop,omitempty"` +} + +type ComputeIsolationInitParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type ComputeIsolationObservation struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type ComputeIsolationParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + // +kubebuilder:validation:Optional + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + // +kubebuilder:validation:Optional + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type DiskEncryptionInitParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type DiskEncryptionObservation struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type DiskEncryptionParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + // +kubebuilder:validation:Optional + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + // +kubebuilder:validation:Optional + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type EdgeNodeInitParameters struct { + + // The HTTPS Connectivity Endpoint for this HDInsight Hadoop Cluster. One or more https_endpoints blocks as defined below. + HTTPSEndpoints []HTTPSEndpointsInitParameters `json:"httpsEndpoints,omitempty" tf:"https_endpoints,omitempty"` + + // A install_script_action block as defined below. + InstallScriptAction []InstallScriptActionInitParameters `json:"installScriptAction,omitempty" tf:"install_script_action,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // A uninstall_script_actions block as defined below. + UninstallScriptActions []UninstallScriptActionsInitParameters `json:"uninstallScriptActions,omitempty" tf:"uninstall_script_actions,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` +} + +type EdgeNodeObservation struct { + + // The HTTPS Connectivity Endpoint for this HDInsight Hadoop Cluster. One or more https_endpoints blocks as defined below. + HTTPSEndpoints []HTTPSEndpointsObservation `json:"httpsEndpoints,omitempty" tf:"https_endpoints,omitempty"` + + // A install_script_action block as defined below. + InstallScriptAction []InstallScriptActionObservation `json:"installScriptAction,omitempty" tf:"install_script_action,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // A uninstall_script_actions block as defined below. + UninstallScriptActions []UninstallScriptActionsObservation `json:"uninstallScriptActions,omitempty" tf:"uninstall_script_actions,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` +} + +type EdgeNodeParameters struct { + + // The HTTPS Connectivity Endpoint for this HDInsight Hadoop Cluster. One or more https_endpoints blocks as defined below. + // +kubebuilder:validation:Optional + HTTPSEndpoints []HTTPSEndpointsParameters `json:"httpsEndpoints,omitempty" tf:"https_endpoints,omitempty"` + + // A install_script_action block as defined below. + // +kubebuilder:validation:Optional + InstallScriptAction []InstallScriptActionParameters `json:"installScriptAction" tf:"install_script_action,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // A uninstall_script_actions block as defined below. + // +kubebuilder:validation:Optional + UninstallScriptActions []UninstallScriptActionsParameters `json:"uninstallScriptActions,omitempty" tf:"uninstall_script_actions,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` +} + +type ExtensionInitParameters struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type ExtensionObservation struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type ExtensionParameters struct { + + // The workspace ID of the log analytics extension. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The workspace key of the log analytics extension. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type GatewayInitParameters struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type GatewayObservation struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type GatewayParameters struct { + + // The password used for the Ambari Portal. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type HTTPSEndpointsInitParameters struct { + + // A list of access modes for the application. + AccessModes []*string `json:"accessModes,omitempty" tf:"access_modes,omitempty"` + + // The destination port to connect to. + DestinationPort *float64 `json:"destinationPort,omitempty" tf:"destination_port,omitempty"` + + // The value indicates whether the gateway authentication is enabled or not. + DisableGatewayAuth *bool `json:"disableGatewayAuth,omitempty" tf:"disable_gateway_auth,omitempty"` + + // The private ip address of the endpoint. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The application's subdomain suffix. + SubDomainSuffix *string `json:"subDomainSuffix,omitempty" tf:"sub_domain_suffix,omitempty"` +} + +type HTTPSEndpointsObservation struct { + + // A list of access modes for the application. + AccessModes []*string `json:"accessModes,omitempty" tf:"access_modes,omitempty"` + + // The destination port to connect to. + DestinationPort *float64 `json:"destinationPort,omitempty" tf:"destination_port,omitempty"` + + // The value indicates whether the gateway authentication is enabled or not. + DisableGatewayAuth *bool `json:"disableGatewayAuth,omitempty" tf:"disable_gateway_auth,omitempty"` + + // The private ip address of the endpoint. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The application's subdomain suffix. + SubDomainSuffix *string `json:"subDomainSuffix,omitempty" tf:"sub_domain_suffix,omitempty"` +} + +type HTTPSEndpointsParameters struct { + + // A list of access modes for the application. + // +kubebuilder:validation:Optional + AccessModes []*string `json:"accessModes,omitempty" tf:"access_modes,omitempty"` + + // The destination port to connect to. + // +kubebuilder:validation:Optional + DestinationPort *float64 `json:"destinationPort,omitempty" tf:"destination_port,omitempty"` + + // The value indicates whether the gateway authentication is enabled or not. + // +kubebuilder:validation:Optional + DisableGatewayAuth *bool `json:"disableGatewayAuth,omitempty" tf:"disable_gateway_auth,omitempty"` + + // The private ip address of the endpoint. + // +kubebuilder:validation:Optional + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The application's subdomain suffix. + // +kubebuilder:validation:Optional + SubDomainSuffix *string `json:"subDomainSuffix,omitempty" tf:"sub_domain_suffix,omitempty"` +} + +type HadoopClusterInitParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *ComponentVersionInitParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *ComputeIsolationInitParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + DiskEncryption []DiskEncryptionInitParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // An extension block as defined below. + Extension *ExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *GatewayInitParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight Hadoop Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *MetastoresInitParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *MonitorInitParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *NetworkInitParameters `json:"network,omitempty" tf:"network,omitempty"` + + // A roles block as defined below. + Roles *RolesInitParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *SecurityProfileInitParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []StorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *StorageAccountGen2InitParameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Hadoop Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Hadoop Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type HadoopClusterObservation struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *ComponentVersionObservation `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *ComputeIsolationObservation `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + DiskEncryption []DiskEncryptionObservation `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // An extension block as defined below. + Extension *ExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *GatewayObservation `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // The HTTPS Connectivity Endpoint for this HDInsight Hadoop Cluster. + HTTPSEndpoint *string `json:"httpsEndpoint,omitempty" tf:"https_endpoint,omitempty"` + + // The ID of the HDInsight Hadoop Cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Azure Region which this HDInsight Hadoop Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *MetastoresObservation `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *MonitorObservation `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *NetworkObservation `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight Hadoop Cluster should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A roles block as defined below. + Roles *RolesObservation `json:"roles,omitempty" tf:"roles,omitempty"` + + // The SSH Connectivity Endpoint for this HDInsight Hadoop Cluster. + SSHEndpoint *string `json:"sshEndpoint,omitempty" tf:"ssh_endpoint,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *SecurityProfileObservation `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []StorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *StorageAccountGen2Observation `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Hadoop Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Hadoop Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type HadoopClusterParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + // +kubebuilder:validation:Optional + ComponentVersion *ComponentVersionParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + // +kubebuilder:validation:Optional + ComputeIsolation *ComputeIsolationParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + // +kubebuilder:validation:Optional + DiskEncryption []DiskEncryptionParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // An extension block as defined below. + // +kubebuilder:validation:Optional + Extension *ExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + // +kubebuilder:validation:Optional + Gateway *GatewayParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight Hadoop Cluster should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + // +kubebuilder:validation:Optional + Metastores *MetastoresParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + // +kubebuilder:validation:Optional + Monitor *MonitorParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + // +kubebuilder:validation:Optional + Network *NetworkParameters `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight Hadoop Cluster should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A roles block as defined below. + // +kubebuilder:validation:Optional + Roles *RolesParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityProfile *SecurityProfileParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + // +kubebuilder:validation:Optional + StorageAccount []StorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + // +kubebuilder:validation:Optional + StorageAccountGen2 *StorageAccountGen2Parameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Hadoop Cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Hadoop Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type HeadNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []ScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type HeadNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []ScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type HeadNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []ScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type HiveInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type HiveObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type HiveParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type InstallScriptActionInitParameters struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type InstallScriptActionObservation struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type InstallScriptActionParameters struct { + + // The name of the uninstall script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type MetastoresInitParameters struct { + + // An ambari block as defined below. + Ambari *AmbariInitParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *HiveInitParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *OozieInitParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type MetastoresObservation struct { + + // An ambari block as defined below. + Ambari *AmbariObservation `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *HiveObservation `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *OozieObservation `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type MetastoresParameters struct { + + // An ambari block as defined below. + // +kubebuilder:validation:Optional + Ambari *AmbariParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + // +kubebuilder:validation:Optional + Hive *HiveParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + // +kubebuilder:validation:Optional + Oozie *OozieParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type MonitorInitParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type MonitorObservation struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type MonitorParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The Operations Management Suite (OMS) workspace key. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type NetworkInitParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type NetworkObservation struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type NetworkParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type OozieInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type OozieObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type OozieParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type RecurrenceInitParameters struct { + + // A list of schedule blocks as defined below. + Schedule []ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type RecurrenceObservation struct { + + // A list of schedule blocks as defined below. + Schedule []ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type RecurrenceParameters struct { + + // A list of schedule blocks as defined below. + // +kubebuilder:validation:Optional + Schedule []ScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone" tf:"timezone,omitempty"` +} + +type RolesInitParameters struct { + + // A edge_node block as defined below. + EdgeNode *EdgeNodeInitParameters `json:"edgeNode,omitempty" tf:"edge_node,omitempty"` + + // A head_node block as defined above. + HeadNode *HeadNodeInitParameters `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *WorkerNodeInitParameters `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *ZookeeperNodeInitParameters `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type RolesObservation struct { + + // A edge_node block as defined below. + EdgeNode *EdgeNodeObservation `json:"edgeNode,omitempty" tf:"edge_node,omitempty"` + + // A head_node block as defined above. + HeadNode *HeadNodeObservation `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *WorkerNodeObservation `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *ZookeeperNodeObservation `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type RolesParameters struct { + + // A edge_node block as defined below. + // +kubebuilder:validation:Optional + EdgeNode *EdgeNodeParameters `json:"edgeNode,omitempty" tf:"edge_node,omitempty"` + + // A head_node block as defined above. + // +kubebuilder:validation:Optional + HeadNode *HeadNodeParameters `json:"headNode" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + // +kubebuilder:validation:Optional + WorkerNode *WorkerNodeParameters `json:"workerNode" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + // +kubebuilder:validation:Optional + ZookeeperNode *ZookeeperNodeParameters `json:"zookeeperNode" tf:"zookeeper_node,omitempty"` +} + +type ScheduleInitParameters struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type ScheduleObservation struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type ScheduleParameters struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +kubebuilder:validation:Optional + Days []*string `json:"days" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + // +kubebuilder:validation:Optional + Time *string `json:"time" tf:"time,omitempty"` +} + +type ScriptActionsInitParameters struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type ScriptActionsObservation struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type ScriptActionsParameters struct { + + // The name of the uninstall script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type SecurityProfileInitParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type SecurityProfileObservation struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type SecurityProfileParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AaddsResourceID *string `json:"aaddsResourceId" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + DomainUserPasswordSecretRef v1.SecretKeySelector `json:"domainUserPasswordSecretRef" tf:"-"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainUsername *string `json:"domainUsername" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + LdapsUrls []*string `json:"ldapsUrls" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MsiResourceID *string `json:"msiResourceId" tf:"msi_resource_id,omitempty"` +} + +type StorageAccountGen2InitParameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type StorageAccountGen2Observation struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type StorageAccountGen2Parameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"filesystemId" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagedIdentityResourceID *string `json:"managedIdentityResourceId" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId" tf:"storage_resource_id,omitempty"` +} + +type StorageAccountInitParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type StorageAccountObservation struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type StorageAccountParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + StorageAccountKeySecretRef v1.SecretKeySelector `json:"storageAccountKeySecretRef" tf:"-"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type UninstallScriptActionsInitParameters struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type UninstallScriptActionsObservation struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type UninstallScriptActionsParameters struct { + + // The name of the uninstall script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type WorkerNodeInitParameters struct { + + // A autoscale block as defined below. + Autoscale *AutoscaleInitParameters `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []WorkerNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type WorkerNodeObservation struct { + + // A autoscale block as defined below. + Autoscale *AutoscaleObservation `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []WorkerNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type WorkerNodeParameters struct { + + // A autoscale block as defined below. + // +kubebuilder:validation:Optional + Autoscale *AutoscaleParameters `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []WorkerNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type WorkerNodeScriptActionsInitParameters struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type WorkerNodeScriptActionsObservation struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type WorkerNodeScriptActionsParameters struct { + + // The name of the uninstall script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type ZookeeperNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []ZookeeperNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type ZookeeperNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []ZookeeperNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type ZookeeperNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []ZookeeperNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type ZookeeperNodeScriptActionsInitParameters struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type ZookeeperNodeScriptActionsObservation struct { + + // The name of the uninstall script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type ZookeeperNodeScriptActionsParameters struct { + + // The name of the uninstall script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI pointing to the script to run during the installation of the edge node. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +// HadoopClusterSpec defines the desired state of HadoopCluster +type HadoopClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HadoopClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HadoopClusterInitParameters `json:"initProvider,omitempty"` +} + +// HadoopClusterStatus defines the observed state of HadoopCluster. +type HadoopClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HadoopClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HadoopCluster is the Schema for the HadoopClusters API. Manages a HDInsight Hadoop Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type HadoopCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterVersion) || (has(self.initProvider) && has(self.initProvider.clusterVersion))",message="spec.forProvider.clusterVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.componentVersion) || (has(self.initProvider) && has(self.initProvider.componentVersion))",message="spec.forProvider.componentVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gateway) || (has(self.initProvider) && has(self.initProvider.gateway))",message="spec.forProvider.gateway is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.roles) || (has(self.initProvider) && has(self.initProvider.roles))",message="spec.forProvider.roles is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tier) || (has(self.initProvider) && has(self.initProvider.tier))",message="spec.forProvider.tier is a required parameter" + Spec HadoopClusterSpec `json:"spec"` + Status HadoopClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HadoopClusterList contains a list of HadoopClusters +type HadoopClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HadoopCluster `json:"items"` +} + +// Repository type metadata. +var ( + HadoopCluster_Kind = "HadoopCluster" + HadoopCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HadoopCluster_Kind}.String() + HadoopCluster_KindAPIVersion = HadoopCluster_Kind + "." + CRDGroupVersion.String() + HadoopCluster_GroupVersionKind = CRDGroupVersion.WithKind(HadoopCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&HadoopCluster{}, &HadoopClusterList{}) +} diff --git a/apis/hdinsight/v1beta2/zz_hbasecluster_terraformed.go b/apis/hdinsight/v1beta2/zz_hbasecluster_terraformed.go new file mode 100755 index 000000000..7009af923 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_hbasecluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HBaseCluster +func (mg *HBaseCluster) GetTerraformResourceType() string { + return "azurerm_hdinsight_hbase_cluster" +} + +// GetConnectionDetailsMapping for this HBaseCluster +func (tr *HBaseCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"extension[*].primary_key": "spec.forProvider.extension[*].primaryKeySecretRef", "gateway[*].password": "spec.forProvider.gateway[*].passwordSecretRef", "metastores[*].ambari[*].password": "spec.forProvider.metastores[*].ambari[*].passwordSecretRef", "metastores[*].hive[*].password": "spec.forProvider.metastores[*].hive[*].passwordSecretRef", "metastores[*].oozie[*].password": "spec.forProvider.metastores[*].oozie[*].passwordSecretRef", "monitor[*].primary_key": "spec.forProvider.monitor[*].primaryKeySecretRef", "roles[*].head_node[*].password": "spec.forProvider.roles[*].headNode[*].passwordSecretRef", "roles[*].worker_node[*].password": "spec.forProvider.roles[*].workerNode[*].passwordSecretRef", "roles[*].zookeeper_node[*].password": "spec.forProvider.roles[*].zookeeperNode[*].passwordSecretRef", "security_profile[*].domain_user_password": "spec.forProvider.securityProfile[*].domainUserPasswordSecretRef", "storage_account[*].storage_account_key": "spec.forProvider.storageAccount[*].storageAccountKeySecretRef"} +} + +// GetObservation of this HBaseCluster +func (tr *HBaseCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HBaseCluster +func (tr *HBaseCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HBaseCluster +func (tr *HBaseCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HBaseCluster +func (tr *HBaseCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HBaseCluster +func (tr *HBaseCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HBaseCluster +func (tr *HBaseCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HBaseCluster +func (tr *HBaseCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HBaseCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HBaseCluster) LateInitialize(attrs []byte) (bool, error) { + params := &HBaseClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HBaseCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/hdinsight/v1beta2/zz_hbasecluster_types.go b/apis/hdinsight/v1beta2/zz_hbasecluster_types.go new file mode 100755 index 000000000..2ef05e457 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_hbasecluster_types.go @@ -0,0 +1,1392 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoscaleRecurrenceInitParameters struct { + + // A list of schedule blocks as defined below. + Schedule []RecurrenceScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type AutoscaleRecurrenceObservation struct { + + // A list of schedule blocks as defined below. + Schedule []RecurrenceScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type AutoscaleRecurrenceParameters struct { + + // A list of schedule blocks as defined below. + // +kubebuilder:validation:Optional + Schedule []RecurrenceScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone" tf:"timezone,omitempty"` +} + +type HBaseClusterComponentVersionInitParameters struct { + + // The version of HBase which should be used for this HDInsight HBase Cluster. Changing this forces a new resource to be created. + HBase *string `json:"hbase,omitempty" tf:"hbase,omitempty"` +} + +type HBaseClusterComponentVersionObservation struct { + + // The version of HBase which should be used for this HDInsight HBase Cluster. Changing this forces a new resource to be created. + HBase *string `json:"hbase,omitempty" tf:"hbase,omitempty"` +} + +type HBaseClusterComponentVersionParameters struct { + + // The version of HBase which should be used for this HDInsight HBase Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HBase *string `json:"hbase" tf:"hbase,omitempty"` +} + +type HBaseClusterComputeIsolationInitParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type HBaseClusterComputeIsolationObservation struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type HBaseClusterComputeIsolationParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + // +kubebuilder:validation:Optional + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + // +kubebuilder:validation:Optional + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type HBaseClusterDiskEncryptionInitParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type HBaseClusterDiskEncryptionObservation struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type HBaseClusterDiskEncryptionParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + // +kubebuilder:validation:Optional + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + // +kubebuilder:validation:Optional + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type HBaseClusterExtensionInitParameters struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type HBaseClusterExtensionObservation struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type HBaseClusterExtensionParameters struct { + + // The workspace ID of the log analytics extension. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The workspace key of the log analytics extension. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type HBaseClusterGatewayInitParameters struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type HBaseClusterGatewayObservation struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type HBaseClusterGatewayParameters struct { + + // The password used for the Ambari Portal. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type HBaseClusterInitParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *HBaseClusterComponentVersionInitParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *HBaseClusterComputeIsolationInitParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + DiskEncryption []HBaseClusterDiskEncryptionInitParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // An extension block as defined below. + Extension *HBaseClusterExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *HBaseClusterGatewayInitParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight HBase Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *HBaseClusterMetastoresInitParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *HBaseClusterMonitorInitParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *HBaseClusterNetworkInitParameters `json:"network,omitempty" tf:"network,omitempty"` + + // A roles block as defined below. + Roles *HBaseClusterRolesInitParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *HBaseClusterSecurityProfileInitParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []HBaseClusterStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *HBaseClusterStorageAccountGen2InitParameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight HBase Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight HBase Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type HBaseClusterMetastoresInitParameters struct { + + // An ambari block as defined below. + Ambari *MetastoresAmbariInitParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *MetastoresHiveInitParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *MetastoresOozieInitParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type HBaseClusterMetastoresObservation struct { + + // An ambari block as defined below. + Ambari *MetastoresAmbariObservation `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *MetastoresHiveObservation `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *MetastoresOozieObservation `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type HBaseClusterMetastoresParameters struct { + + // An ambari block as defined below. + // +kubebuilder:validation:Optional + Ambari *MetastoresAmbariParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + // +kubebuilder:validation:Optional + Hive *MetastoresHiveParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + // +kubebuilder:validation:Optional + Oozie *MetastoresOozieParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type HBaseClusterMonitorInitParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type HBaseClusterMonitorObservation struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type HBaseClusterMonitorParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The Operations Management Suite (OMS) workspace key. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type HBaseClusterNetworkInitParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type HBaseClusterNetworkObservation struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type HBaseClusterNetworkParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type HBaseClusterObservation struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *HBaseClusterComponentVersionObservation `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *HBaseClusterComputeIsolationObservation `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + DiskEncryption []HBaseClusterDiskEncryptionObservation `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // An extension block as defined below. + Extension *HBaseClusterExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *HBaseClusterGatewayObservation `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // The HTTPS Connectivity Endpoint for this HDInsight HBase Cluster. + HTTPSEndpoint *string `json:"httpsEndpoint,omitempty" tf:"https_endpoint,omitempty"` + + // The ID of the HDInsight HBase Cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Azure Region which this HDInsight HBase Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *HBaseClusterMetastoresObservation `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *HBaseClusterMonitorObservation `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *HBaseClusterNetworkObservation `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight HBase Cluster should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A roles block as defined below. + Roles *HBaseClusterRolesObservation `json:"roles,omitempty" tf:"roles,omitempty"` + + // The SSH Connectivity Endpoint for this HDInsight HBase Cluster. + SSHEndpoint *string `json:"sshEndpoint,omitempty" tf:"ssh_endpoint,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *HBaseClusterSecurityProfileObservation `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []HBaseClusterStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *HBaseClusterStorageAccountGen2Observation `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight HBase Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight HBase Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type HBaseClusterParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + // +kubebuilder:validation:Optional + ComponentVersion *HBaseClusterComponentVersionParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + // +kubebuilder:validation:Optional + ComputeIsolation *HBaseClusterComputeIsolationParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + // +kubebuilder:validation:Optional + DiskEncryption []HBaseClusterDiskEncryptionParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // An extension block as defined below. + // +kubebuilder:validation:Optional + Extension *HBaseClusterExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + // +kubebuilder:validation:Optional + Gateway *HBaseClusterGatewayParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight HBase Cluster should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + // +kubebuilder:validation:Optional + Metastores *HBaseClusterMetastoresParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + // +kubebuilder:validation:Optional + Monitor *HBaseClusterMonitorParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + // +kubebuilder:validation:Optional + Network *HBaseClusterNetworkParameters `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight HBase Cluster should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A roles block as defined below. + // +kubebuilder:validation:Optional + Roles *HBaseClusterRolesParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityProfile *HBaseClusterSecurityProfileParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + // +kubebuilder:validation:Optional + StorageAccount []HBaseClusterStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + // +kubebuilder:validation:Optional + StorageAccountGen2 *HBaseClusterStorageAccountGen2Parameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight HBase Cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight HBase Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type HBaseClusterRolesInitParameters struct { + + // A head_node block as defined above. + HeadNode *RolesHeadNodeInitParameters `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *RolesWorkerNodeInitParameters `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *RolesZookeeperNodeInitParameters `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type HBaseClusterRolesObservation struct { + + // A head_node block as defined above. + HeadNode *RolesHeadNodeObservation `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *RolesWorkerNodeObservation `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *RolesZookeeperNodeObservation `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type HBaseClusterRolesParameters struct { + + // A head_node block as defined above. + // +kubebuilder:validation:Optional + HeadNode *RolesHeadNodeParameters `json:"headNode" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + // +kubebuilder:validation:Optional + WorkerNode *RolesWorkerNodeParameters `json:"workerNode" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + // +kubebuilder:validation:Optional + ZookeeperNode *RolesZookeeperNodeParameters `json:"zookeeperNode" tf:"zookeeper_node,omitempty"` +} + +type HBaseClusterSecurityProfileInitParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type HBaseClusterSecurityProfileObservation struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type HBaseClusterSecurityProfileParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AaddsResourceID *string `json:"aaddsResourceId" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + DomainUserPasswordSecretRef v1.SecretKeySelector `json:"domainUserPasswordSecretRef" tf:"-"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainUsername *string `json:"domainUsername" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + LdapsUrls []*string `json:"ldapsUrls" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MsiResourceID *string `json:"msiResourceId" tf:"msi_resource_id,omitempty"` +} + +type HBaseClusterStorageAccountGen2InitParameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type HBaseClusterStorageAccountGen2Observation struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type HBaseClusterStorageAccountGen2Parameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"filesystemId" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagedIdentityResourceID *string `json:"managedIdentityResourceId" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId" tf:"storage_resource_id,omitempty"` +} + +type HBaseClusterStorageAccountInitParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type HBaseClusterStorageAccountObservation struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type HBaseClusterStorageAccountParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + StorageAccountKeySecretRef v1.SecretKeySelector `json:"storageAccountKeySecretRef" tf:"-"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type HeadNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type HeadNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type HeadNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type MetastoresAmbariInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type MetastoresAmbariObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type MetastoresAmbariParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type MetastoresHiveInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type MetastoresHiveObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type MetastoresHiveParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type MetastoresOozieInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type MetastoresOozieObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type MetastoresOozieParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type RecurrenceScheduleInitParameters struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type RecurrenceScheduleObservation struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type RecurrenceScheduleParameters struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +kubebuilder:validation:Optional + Days []*string `json:"days" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + // +kubebuilder:validation:Optional + Time *string `json:"time" tf:"time,omitempty"` +} + +type RolesHeadNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []HeadNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesHeadNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []HeadNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesHeadNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []HeadNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesWorkerNodeInitParameters struct { + + // A autoscale block as defined below. + Autoscale *WorkerNodeAutoscaleInitParameters `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []RolesWorkerNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesWorkerNodeObservation struct { + + // A autoscale block as defined below. + Autoscale *WorkerNodeAutoscaleObservation `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []RolesWorkerNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesWorkerNodeParameters struct { + + // A autoscale block as defined below. + // +kubebuilder:validation:Optional + Autoscale *WorkerNodeAutoscaleParameters `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []RolesWorkerNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesWorkerNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type RolesWorkerNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type RolesWorkerNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type RolesZookeeperNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []RolesZookeeperNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesZookeeperNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []RolesZookeeperNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesZookeeperNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []RolesZookeeperNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type RolesZookeeperNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type RolesZookeeperNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type RolesZookeeperNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type WorkerNodeAutoscaleInitParameters struct { + + // A recurrence block as defined below. + Recurrence *AutoscaleRecurrenceInitParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type WorkerNodeAutoscaleObservation struct { + + // A recurrence block as defined below. + Recurrence *AutoscaleRecurrenceObservation `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type WorkerNodeAutoscaleParameters struct { + + // A recurrence block as defined below. + // +kubebuilder:validation:Optional + Recurrence *AutoscaleRecurrenceParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +// HBaseClusterSpec defines the desired state of HBaseCluster +type HBaseClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HBaseClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HBaseClusterInitParameters `json:"initProvider,omitempty"` +} + +// HBaseClusterStatus defines the observed state of HBaseCluster. +type HBaseClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HBaseClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HBaseCluster is the Schema for the HBaseClusters API. Manages a HDInsight HBase Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type HBaseCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterVersion) || (has(self.initProvider) && has(self.initProvider.clusterVersion))",message="spec.forProvider.clusterVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.componentVersion) || (has(self.initProvider) && has(self.initProvider.componentVersion))",message="spec.forProvider.componentVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gateway) || (has(self.initProvider) && has(self.initProvider.gateway))",message="spec.forProvider.gateway is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.roles) || (has(self.initProvider) && has(self.initProvider.roles))",message="spec.forProvider.roles is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tier) || (has(self.initProvider) && has(self.initProvider.tier))",message="spec.forProvider.tier is a required parameter" + Spec HBaseClusterSpec `json:"spec"` + Status HBaseClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HBaseClusterList contains a list of HBaseClusters +type HBaseClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HBaseCluster `json:"items"` +} + +// Repository type metadata. +var ( + HBaseCluster_Kind = "HBaseCluster" + HBaseCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HBaseCluster_Kind}.String() + HBaseCluster_KindAPIVersion = HBaseCluster_Kind + "." + CRDGroupVersion.String() + HBaseCluster_GroupVersionKind = CRDGroupVersion.WithKind(HBaseCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&HBaseCluster{}, &HBaseClusterList{}) +} diff --git a/apis/hdinsight/v1beta2/zz_interactivequerycluster_terraformed.go b/apis/hdinsight/v1beta2/zz_interactivequerycluster_terraformed.go new file mode 100755 index 000000000..5b9634919 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_interactivequerycluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this InteractiveQueryCluster +func (mg *InteractiveQueryCluster) GetTerraformResourceType() string { + return "azurerm_hdinsight_interactive_query_cluster" +} + +// GetConnectionDetailsMapping for this InteractiveQueryCluster +func (tr *InteractiveQueryCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"extension[*].primary_key": "spec.forProvider.extension[*].primaryKeySecretRef", "gateway[*].password": "spec.forProvider.gateway[*].passwordSecretRef", "metastores[*].ambari[*].password": "spec.forProvider.metastores[*].ambari[*].passwordSecretRef", "metastores[*].hive[*].password": "spec.forProvider.metastores[*].hive[*].passwordSecretRef", "metastores[*].oozie[*].password": "spec.forProvider.metastores[*].oozie[*].passwordSecretRef", "monitor[*].primary_key": "spec.forProvider.monitor[*].primaryKeySecretRef", "roles[*].head_node[*].password": "spec.forProvider.roles[*].headNode[*].passwordSecretRef", "roles[*].worker_node[*].password": "spec.forProvider.roles[*].workerNode[*].passwordSecretRef", "roles[*].zookeeper_node[*].password": "spec.forProvider.roles[*].zookeeperNode[*].passwordSecretRef", "security_profile[*].domain_user_password": "spec.forProvider.securityProfile[*].domainUserPasswordSecretRef", "storage_account[*].storage_account_key": "spec.forProvider.storageAccount[*].storageAccountKeySecretRef"} +} + +// GetObservation of this InteractiveQueryCluster +func (tr *InteractiveQueryCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this InteractiveQueryCluster +func (tr *InteractiveQueryCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this InteractiveQueryCluster +func (tr *InteractiveQueryCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this InteractiveQueryCluster +func (tr *InteractiveQueryCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this InteractiveQueryCluster +func (tr *InteractiveQueryCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this InteractiveQueryCluster +func (tr *InteractiveQueryCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this InteractiveQueryCluster +func (tr *InteractiveQueryCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this InteractiveQueryCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *InteractiveQueryCluster) LateInitialize(attrs []byte) (bool, error) { + params := &InteractiveQueryClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *InteractiveQueryCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/hdinsight/v1beta2/zz_interactivequerycluster_types.go b/apis/hdinsight/v1beta2/zz_interactivequerycluster_types.go new file mode 100755 index 000000000..4e2c08443 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_interactivequerycluster_types.go @@ -0,0 +1,1428 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoscaleCapacityInitParameters struct { + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + +type AutoscaleCapacityObservation struct { + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + +type AutoscaleCapacityParameters struct { + + // +kubebuilder:validation:Optional + MaxInstanceCount *float64 `json:"maxInstanceCount" tf:"max_instance_count,omitempty"` + + // +kubebuilder:validation:Optional + MinInstanceCount *float64 `json:"minInstanceCount" tf:"min_instance_count,omitempty"` +} + +type AutoscaleRecurrenceScheduleInitParameters struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type AutoscaleRecurrenceScheduleObservation struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type AutoscaleRecurrenceScheduleParameters struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +kubebuilder:validation:Optional + Days []*string `json:"days" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + // +kubebuilder:validation:Optional + Time *string `json:"time" tf:"time,omitempty"` +} + +type InteractiveQueryClusterComponentVersionInitParameters struct { + + // The version of Interactive Query which should be used for this HDInsight Interactive Query Cluster. Changing this forces a new resource to be created. + InteractiveHive *string `json:"interactiveHive,omitempty" tf:"interactive_hive,omitempty"` +} + +type InteractiveQueryClusterComponentVersionObservation struct { + + // The version of Interactive Query which should be used for this HDInsight Interactive Query Cluster. Changing this forces a new resource to be created. + InteractiveHive *string `json:"interactiveHive,omitempty" tf:"interactive_hive,omitempty"` +} + +type InteractiveQueryClusterComponentVersionParameters struct { + + // The version of Interactive Query which should be used for this HDInsight Interactive Query Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InteractiveHive *string `json:"interactiveHive" tf:"interactive_hive,omitempty"` +} + +type InteractiveQueryClusterComputeIsolationInitParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type InteractiveQueryClusterComputeIsolationObservation struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type InteractiveQueryClusterComputeIsolationParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + // +kubebuilder:validation:Optional + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + // +kubebuilder:validation:Optional + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type InteractiveQueryClusterDiskEncryptionInitParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type InteractiveQueryClusterDiskEncryptionObservation struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type InteractiveQueryClusterDiskEncryptionParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + // +kubebuilder:validation:Optional + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + // +kubebuilder:validation:Optional + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type InteractiveQueryClusterExtensionInitParameters struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type InteractiveQueryClusterExtensionObservation struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type InteractiveQueryClusterExtensionParameters struct { + + // The workspace ID of the log analytics extension. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The workspace key of the log analytics extension. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type InteractiveQueryClusterGatewayInitParameters struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InteractiveQueryClusterGatewayObservation struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InteractiveQueryClusterGatewayParameters struct { + + // The password used for the Ambari Portal. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type InteractiveQueryClusterInitParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *InteractiveQueryClusterComponentVersionInitParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *InteractiveQueryClusterComputeIsolationInitParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // A disk_encryption block as defined below. + DiskEncryption []InteractiveQueryClusterDiskEncryptionInitParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created. + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + Extension *InteractiveQueryClusterExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *InteractiveQueryClusterGatewayInitParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight Interactive Query Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *InteractiveQueryClusterMetastoresInitParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *InteractiveQueryClusterMonitorInitParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *InteractiveQueryClusterNetworkInitParameters `json:"network,omitempty" tf:"network,omitempty"` + + // A roles block as defined below. + Roles *InteractiveQueryClusterRolesInitParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *InteractiveQueryClusterSecurityProfileInitParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []InteractiveQueryClusterStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *InteractiveQueryClusterStorageAccountGen2InitParameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Interactive Query Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Interactive Query Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type InteractiveQueryClusterMetastoresAmbariInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresAmbariObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresAmbariParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresHiveInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresHiveObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresHiveParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresInitParameters struct { + + // An ambari block as defined below. + Ambari *InteractiveQueryClusterMetastoresAmbariInitParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *InteractiveQueryClusterMetastoresHiveInitParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *InteractiveQueryClusterMetastoresOozieInitParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type InteractiveQueryClusterMetastoresObservation struct { + + // An ambari block as defined below. + Ambari *InteractiveQueryClusterMetastoresAmbariObservation `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *InteractiveQueryClusterMetastoresHiveObservation `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *InteractiveQueryClusterMetastoresOozieObservation `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type InteractiveQueryClusterMetastoresOozieInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresOozieObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresOozieParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type InteractiveQueryClusterMetastoresParameters struct { + + // An ambari block as defined below. + // +kubebuilder:validation:Optional + Ambari *InteractiveQueryClusterMetastoresAmbariParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + // +kubebuilder:validation:Optional + Hive *InteractiveQueryClusterMetastoresHiveParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + // +kubebuilder:validation:Optional + Oozie *InteractiveQueryClusterMetastoresOozieParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type InteractiveQueryClusterMonitorInitParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type InteractiveQueryClusterMonitorObservation struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type InteractiveQueryClusterMonitorParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The Operations Management Suite (OMS) workspace key. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type InteractiveQueryClusterNetworkInitParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type InteractiveQueryClusterNetworkObservation struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type InteractiveQueryClusterNetworkParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type InteractiveQueryClusterObservation struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *InteractiveQueryClusterComponentVersionObservation `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *InteractiveQueryClusterComputeIsolationObservation `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // A disk_encryption block as defined below. + DiskEncryption []InteractiveQueryClusterDiskEncryptionObservation `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created. + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + Extension *InteractiveQueryClusterExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *InteractiveQueryClusterGatewayObservation `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // The HTTPS Connectivity Endpoint for this HDInsight Interactive Query Cluster. + HTTPSEndpoint *string `json:"httpsEndpoint,omitempty" tf:"https_endpoint,omitempty"` + + // The ID of the HDInsight Interactive Query Cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Azure Region which this HDInsight Interactive Query Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *InteractiveQueryClusterMetastoresObservation `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *InteractiveQueryClusterMonitorObservation `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *InteractiveQueryClusterNetworkObservation `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight Interactive Query Cluster should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A roles block as defined below. + Roles *InteractiveQueryClusterRolesObservation `json:"roles,omitempty" tf:"roles,omitempty"` + + // The SSH Connectivity Endpoint for this HDInsight Interactive Query Cluster. + SSHEndpoint *string `json:"sshEndpoint,omitempty" tf:"ssh_endpoint,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *InteractiveQueryClusterSecurityProfileObservation `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []InteractiveQueryClusterStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *InteractiveQueryClusterStorageAccountGen2Observation `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Interactive Query Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Interactive Query Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type InteractiveQueryClusterParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + // +kubebuilder:validation:Optional + ComponentVersion *InteractiveQueryClusterComponentVersionParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + // +kubebuilder:validation:Optional + ComputeIsolation *InteractiveQueryClusterComputeIsolationParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // A disk_encryption block as defined below. + // +kubebuilder:validation:Optional + DiskEncryption []InteractiveQueryClusterDiskEncryptionParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + // +kubebuilder:validation:Optional + Extension *InteractiveQueryClusterExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + // +kubebuilder:validation:Optional + Gateway *InteractiveQueryClusterGatewayParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight Interactive Query Cluster should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + // +kubebuilder:validation:Optional + Metastores *InteractiveQueryClusterMetastoresParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + // +kubebuilder:validation:Optional + Monitor *InteractiveQueryClusterMonitorParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + // +kubebuilder:validation:Optional + Network *InteractiveQueryClusterNetworkParameters `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight Interactive Query Cluster should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A roles block as defined below. + // +kubebuilder:validation:Optional + Roles *InteractiveQueryClusterRolesParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityProfile *InteractiveQueryClusterSecurityProfileParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + // +kubebuilder:validation:Optional + StorageAccount []InteractiveQueryClusterStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + // +kubebuilder:validation:Optional + StorageAccountGen2 *InteractiveQueryClusterStorageAccountGen2Parameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Interactive Query Cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Interactive Query Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type InteractiveQueryClusterRolesHeadNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []RolesHeadNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesHeadNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []RolesHeadNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesHeadNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []RolesHeadNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesInitParameters struct { + + // A head_node block as defined above. + HeadNode *InteractiveQueryClusterRolesHeadNodeInitParameters `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *InteractiveQueryClusterRolesWorkerNodeInitParameters `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *InteractiveQueryClusterRolesZookeeperNodeInitParameters `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type InteractiveQueryClusterRolesObservation struct { + + // A head_node block as defined above. + HeadNode *InteractiveQueryClusterRolesHeadNodeObservation `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *InteractiveQueryClusterRolesWorkerNodeObservation `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *InteractiveQueryClusterRolesZookeeperNodeObservation `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type InteractiveQueryClusterRolesParameters struct { + + // A head_node block as defined above. + // +kubebuilder:validation:Optional + HeadNode *InteractiveQueryClusterRolesHeadNodeParameters `json:"headNode" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + // +kubebuilder:validation:Optional + WorkerNode *InteractiveQueryClusterRolesWorkerNodeParameters `json:"workerNode" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + // +kubebuilder:validation:Optional + ZookeeperNode *InteractiveQueryClusterRolesZookeeperNodeParameters `json:"zookeeperNode" tf:"zookeeper_node,omitempty"` +} + +type InteractiveQueryClusterRolesWorkerNodeInitParameters struct { + + // A autoscale block as defined below. + Autoscale *RolesWorkerNodeAutoscaleInitParameters `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesWorkerNodeObservation struct { + + // A autoscale block as defined below. + Autoscale *RolesWorkerNodeAutoscaleObservation `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesWorkerNodeParameters struct { + + // A autoscale block as defined below. + // +kubebuilder:validation:Optional + Autoscale *RolesWorkerNodeAutoscaleParameters `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesWorkerNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type InteractiveQueryClusterRolesWorkerNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type InteractiveQueryClusterRolesWorkerNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type InteractiveQueryClusterRolesZookeeperNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesZookeeperNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesZookeeperNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type InteractiveQueryClusterRolesZookeeperNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type InteractiveQueryClusterRolesZookeeperNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type InteractiveQueryClusterRolesZookeeperNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type InteractiveQueryClusterSecurityProfileInitParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type InteractiveQueryClusterSecurityProfileObservation struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type InteractiveQueryClusterSecurityProfileParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AaddsResourceID *string `json:"aaddsResourceId" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + DomainUserPasswordSecretRef v1.SecretKeySelector `json:"domainUserPasswordSecretRef" tf:"-"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainUsername *string `json:"domainUsername" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + LdapsUrls []*string `json:"ldapsUrls" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MsiResourceID *string `json:"msiResourceId" tf:"msi_resource_id,omitempty"` +} + +type InteractiveQueryClusterStorageAccountGen2InitParameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type InteractiveQueryClusterStorageAccountGen2Observation struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type InteractiveQueryClusterStorageAccountGen2Parameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"filesystemId" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagedIdentityResourceID *string `json:"managedIdentityResourceId" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId" tf:"storage_resource_id,omitempty"` +} + +type InteractiveQueryClusterStorageAccountInitParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type InteractiveQueryClusterStorageAccountObservation struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type InteractiveQueryClusterStorageAccountParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + StorageAccountKeySecretRef v1.SecretKeySelector `json:"storageAccountKeySecretRef" tf:"-"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type RolesHeadNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type RolesHeadNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type RolesHeadNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type RolesWorkerNodeAutoscaleInitParameters struct { + Capacity *AutoscaleCapacityInitParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + Recurrence *WorkerNodeAutoscaleRecurrenceInitParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type RolesWorkerNodeAutoscaleObservation struct { + Capacity *AutoscaleCapacityObservation `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + Recurrence *WorkerNodeAutoscaleRecurrenceObservation `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type RolesWorkerNodeAutoscaleParameters struct { + + // +kubebuilder:validation:Optional + Capacity *AutoscaleCapacityParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + // +kubebuilder:validation:Optional + Recurrence *WorkerNodeAutoscaleRecurrenceParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type WorkerNodeAutoscaleRecurrenceInitParameters struct { + + // A list of schedule blocks as defined below. + Schedule []AutoscaleRecurrenceScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type WorkerNodeAutoscaleRecurrenceObservation struct { + + // A list of schedule blocks as defined below. + Schedule []AutoscaleRecurrenceScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type WorkerNodeAutoscaleRecurrenceParameters struct { + + // A list of schedule blocks as defined below. + // +kubebuilder:validation:Optional + Schedule []AutoscaleRecurrenceScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone" tf:"timezone,omitempty"` +} + +// InteractiveQueryClusterSpec defines the desired state of InteractiveQueryCluster +type InteractiveQueryClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider InteractiveQueryClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider InteractiveQueryClusterInitParameters `json:"initProvider,omitempty"` +} + +// InteractiveQueryClusterStatus defines the observed state of InteractiveQueryCluster. +type InteractiveQueryClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider InteractiveQueryClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// InteractiveQueryCluster is the Schema for the InteractiveQueryClusters API. Manages a HDInsight Interactive Query Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type InteractiveQueryCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterVersion) || (has(self.initProvider) && has(self.initProvider.clusterVersion))",message="spec.forProvider.clusterVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.componentVersion) || (has(self.initProvider) && has(self.initProvider.componentVersion))",message="spec.forProvider.componentVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gateway) || (has(self.initProvider) && has(self.initProvider.gateway))",message="spec.forProvider.gateway is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.roles) || (has(self.initProvider) && has(self.initProvider.roles))",message="spec.forProvider.roles is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tier) || (has(self.initProvider) && has(self.initProvider.tier))",message="spec.forProvider.tier is a required parameter" + Spec InteractiveQueryClusterSpec `json:"spec"` + Status InteractiveQueryClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// InteractiveQueryClusterList contains a list of InteractiveQueryClusters +type InteractiveQueryClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []InteractiveQueryCluster `json:"items"` +} + +// Repository type metadata. +var ( + InteractiveQueryCluster_Kind = "InteractiveQueryCluster" + InteractiveQueryCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: InteractiveQueryCluster_Kind}.String() + InteractiveQueryCluster_KindAPIVersion = InteractiveQueryCluster_Kind + "." + CRDGroupVersion.String() + InteractiveQueryCluster_GroupVersionKind = CRDGroupVersion.WithKind(InteractiveQueryCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&InteractiveQueryCluster{}, &InteractiveQueryClusterList{}) +} diff --git a/apis/hdinsight/v1beta2/zz_kafkacluster_terraformed.go b/apis/hdinsight/v1beta2/zz_kafkacluster_terraformed.go new file mode 100755 index 000000000..69665697a --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_kafkacluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this KafkaCluster +func (mg *KafkaCluster) GetTerraformResourceType() string { + return "azurerm_hdinsight_kafka_cluster" +} + +// GetConnectionDetailsMapping for this KafkaCluster +func (tr *KafkaCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"extension[*].primary_key": "spec.forProvider.extension[*].primaryKeySecretRef", "gateway[*].password": "spec.forProvider.gateway[*].passwordSecretRef", "metastores[*].ambari[*].password": "spec.forProvider.metastores[*].ambari[*].passwordSecretRef", "metastores[*].hive[*].password": "spec.forProvider.metastores[*].hive[*].passwordSecretRef", "metastores[*].oozie[*].password": "spec.forProvider.metastores[*].oozie[*].passwordSecretRef", "monitor[*].primary_key": "spec.forProvider.monitor[*].primaryKeySecretRef", "roles[*].head_node[*].password": "spec.forProvider.roles[*].headNode[*].passwordSecretRef", "roles[*].kafka_management_node[*].password": "spec.forProvider.roles[*].kafkaManagementNode[*].passwordSecretRef", "roles[*].worker_node[*].password": "spec.forProvider.roles[*].workerNode[*].passwordSecretRef", "roles[*].zookeeper_node[*].password": "spec.forProvider.roles[*].zookeeperNode[*].passwordSecretRef", "security_profile[*].domain_user_password": "spec.forProvider.securityProfile[*].domainUserPasswordSecretRef", "storage_account[*].storage_account_key": "spec.forProvider.storageAccount[*].storageAccountKeySecretRef"} +} + +// GetObservation of this KafkaCluster +func (tr *KafkaCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this KafkaCluster +func (tr *KafkaCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this KafkaCluster +func (tr *KafkaCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this KafkaCluster +func (tr *KafkaCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this KafkaCluster +func (tr *KafkaCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this KafkaCluster +func (tr *KafkaCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this KafkaCluster +func (tr *KafkaCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this KafkaCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *KafkaCluster) LateInitialize(attrs []byte) (bool, error) { + params := &KafkaClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *KafkaCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/hdinsight/v1beta2/zz_kafkacluster_types.go b/apis/hdinsight/v1beta2/zz_kafkacluster_types.go new file mode 100755 index 000000000..917f5c642 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_kafkacluster_types.go @@ -0,0 +1,1502 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type KafkaClusterComponentVersionInitParameters struct { + + // The version of Kafka which should be used for this HDInsight Kafka Cluster. Changing this forces a new resource to be created. + Kafka *string `json:"kafka,omitempty" tf:"kafka,omitempty"` +} + +type KafkaClusterComponentVersionObservation struct { + + // The version of Kafka which should be used for this HDInsight Kafka Cluster. Changing this forces a new resource to be created. + Kafka *string `json:"kafka,omitempty" tf:"kafka,omitempty"` +} + +type KafkaClusterComponentVersionParameters struct { + + // The version of Kafka which should be used for this HDInsight Kafka Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Kafka *string `json:"kafka" tf:"kafka,omitempty"` +} + +type KafkaClusterComputeIsolationInitParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type KafkaClusterComputeIsolationObservation struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type KafkaClusterComputeIsolationParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + // +kubebuilder:validation:Optional + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + // +kubebuilder:validation:Optional + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type KafkaClusterDiskEncryptionInitParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type KafkaClusterDiskEncryptionObservation struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type KafkaClusterDiskEncryptionParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + // +kubebuilder:validation:Optional + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + // +kubebuilder:validation:Optional + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type KafkaClusterExtensionInitParameters struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type KafkaClusterExtensionObservation struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type KafkaClusterExtensionParameters struct { + + // The workspace ID of the log analytics extension. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The workspace key of the log analytics extension. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type KafkaClusterGatewayInitParameters struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaClusterGatewayObservation struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaClusterGatewayParameters struct { + + // The password used for the Ambari Portal. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type KafkaClusterInitParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *KafkaClusterComponentVersionInitParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *KafkaClusterComputeIsolationInitParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + DiskEncryption []KafkaClusterDiskEncryptionInitParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this HDInsight Kafka Cluster. Changing this forces a new resource to be created. + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + Extension *KafkaClusterExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *KafkaClusterGatewayInitParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight Kafka Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *KafkaClusterMetastoresInitParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *KafkaClusterMonitorInitParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *KafkaClusterNetworkInitParameters `json:"network,omitempty" tf:"network,omitempty"` + + // A rest_proxy block as defined below. + RestProxy *RestProxyInitParameters `json:"restProxy,omitempty" tf:"rest_proxy,omitempty"` + + // A roles block as defined below. + Roles *KafkaClusterRolesInitParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *KafkaClusterSecurityProfileInitParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []KafkaClusterStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *KafkaClusterStorageAccountGen2InitParameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Kafka Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Kafka Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type KafkaClusterMetastoresAmbariInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresAmbariObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresAmbariParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresHiveInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresHiveObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresHiveParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresInitParameters struct { + + // An ambari block as defined below. + Ambari *KafkaClusterMetastoresAmbariInitParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *KafkaClusterMetastoresHiveInitParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *KafkaClusterMetastoresOozieInitParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type KafkaClusterMetastoresObservation struct { + + // An ambari block as defined below. + Ambari *KafkaClusterMetastoresAmbariObservation `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *KafkaClusterMetastoresHiveObservation `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *KafkaClusterMetastoresOozieObservation `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type KafkaClusterMetastoresOozieInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresOozieObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresOozieParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type KafkaClusterMetastoresParameters struct { + + // An ambari block as defined below. + // +kubebuilder:validation:Optional + Ambari *KafkaClusterMetastoresAmbariParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + // +kubebuilder:validation:Optional + Hive *KafkaClusterMetastoresHiveParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + // +kubebuilder:validation:Optional + Oozie *KafkaClusterMetastoresOozieParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type KafkaClusterMonitorInitParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type KafkaClusterMonitorObservation struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type KafkaClusterMonitorParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The Operations Management Suite (OMS) workspace key. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type KafkaClusterNetworkInitParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type KafkaClusterNetworkObservation struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type KafkaClusterNetworkParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type KafkaClusterObservation struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *KafkaClusterComponentVersionObservation `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *KafkaClusterComputeIsolationObservation `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + DiskEncryption []KafkaClusterDiskEncryptionObservation `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this HDInsight Kafka Cluster. Changing this forces a new resource to be created. + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + Extension *KafkaClusterExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *KafkaClusterGatewayObservation `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // The HTTPS Connectivity Endpoint for this HDInsight Kafka Cluster. + HTTPSEndpoint *string `json:"httpsEndpoint,omitempty" tf:"https_endpoint,omitempty"` + + // The ID of the HDInsight Kafka Cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Kafka Rest Proxy Endpoint for this HDInsight Kafka Cluster. + KafkaRestProxyEndpoint *string `json:"kafkaRestProxyEndpoint,omitempty" tf:"kafka_rest_proxy_endpoint,omitempty"` + + // Specifies the Azure Region which this HDInsight Kafka Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *KafkaClusterMetastoresObservation `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *KafkaClusterMonitorObservation `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *KafkaClusterNetworkObservation `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight Kafka Cluster should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A rest_proxy block as defined below. + RestProxy *RestProxyObservation `json:"restProxy,omitempty" tf:"rest_proxy,omitempty"` + + // A roles block as defined below. + Roles *KafkaClusterRolesObservation `json:"roles,omitempty" tf:"roles,omitempty"` + + // The SSH Connectivity Endpoint for this HDInsight Kafka Cluster. + SSHEndpoint *string `json:"sshEndpoint,omitempty" tf:"ssh_endpoint,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *KafkaClusterSecurityProfileObservation `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []KafkaClusterStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *KafkaClusterStorageAccountGen2Observation `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Kafka Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Kafka Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type KafkaClusterParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + // +kubebuilder:validation:Optional + ComponentVersion *KafkaClusterComponentVersionParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + // +kubebuilder:validation:Optional + ComputeIsolation *KafkaClusterComputeIsolationParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + // +kubebuilder:validation:Optional + DiskEncryption []KafkaClusterDiskEncryptionParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this HDInsight Kafka Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + // +kubebuilder:validation:Optional + Extension *KafkaClusterExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + // +kubebuilder:validation:Optional + Gateway *KafkaClusterGatewayParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight Kafka Cluster should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + // +kubebuilder:validation:Optional + Metastores *KafkaClusterMetastoresParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + // +kubebuilder:validation:Optional + Monitor *KafkaClusterMonitorParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + // +kubebuilder:validation:Optional + Network *KafkaClusterNetworkParameters `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight Kafka Cluster should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A rest_proxy block as defined below. + // +kubebuilder:validation:Optional + RestProxy *RestProxyParameters `json:"restProxy,omitempty" tf:"rest_proxy,omitempty"` + + // A roles block as defined below. + // +kubebuilder:validation:Optional + Roles *KafkaClusterRolesParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityProfile *KafkaClusterSecurityProfileParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + // +kubebuilder:validation:Optional + StorageAccount []KafkaClusterStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + // +kubebuilder:validation:Optional + StorageAccountGen2 *KafkaClusterStorageAccountGen2Parameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Kafka Cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Kafka Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type KafkaClusterRolesHeadNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + ScriptActions []KafkaClusterRolesHeadNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesHeadNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + ScriptActions []KafkaClusterRolesHeadNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesHeadNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + // +kubebuilder:validation:Optional + ScriptActions []KafkaClusterRolesHeadNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesHeadNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type KafkaClusterRolesHeadNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type KafkaClusterRolesHeadNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type KafkaClusterRolesInitParameters struct { + + // A head_node block as defined above. + HeadNode *KafkaClusterRolesHeadNodeInitParameters `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A kafka_management_node block as defined below. + KafkaManagementNode *KafkaManagementNodeInitParameters `json:"kafkaManagementNode,omitempty" tf:"kafka_management_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *KafkaClusterRolesWorkerNodeInitParameters `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *KafkaClusterRolesZookeeperNodeInitParameters `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type KafkaClusterRolesObservation struct { + + // A head_node block as defined above. + HeadNode *KafkaClusterRolesHeadNodeObservation `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A kafka_management_node block as defined below. + KafkaManagementNode *KafkaManagementNodeObservation `json:"kafkaManagementNode,omitempty" tf:"kafka_management_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *KafkaClusterRolesWorkerNodeObservation `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *KafkaClusterRolesZookeeperNodeObservation `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type KafkaClusterRolesParameters struct { + + // A head_node block as defined above. + // +kubebuilder:validation:Optional + HeadNode *KafkaClusterRolesHeadNodeParameters `json:"headNode" tf:"head_node,omitempty"` + + // A kafka_management_node block as defined below. + // +kubebuilder:validation:Optional + KafkaManagementNode *KafkaManagementNodeParameters `json:"kafkaManagementNode,omitempty" tf:"kafka_management_node,omitempty"` + + // A worker_node block as defined below. + // +kubebuilder:validation:Optional + WorkerNode *KafkaClusterRolesWorkerNodeParameters `json:"workerNode" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + // +kubebuilder:validation:Optional + ZookeeperNode *KafkaClusterRolesZookeeperNodeParameters `json:"zookeeperNode" tf:"zookeeper_node,omitempty"` +} + +type KafkaClusterRolesWorkerNodeInitParameters struct { + + // The number of Data Disks which should be assigned to each Worker Node, which can be between 1 and 8. Changing this forces a new resource to be created. + NumberOfDisksPerNode *float64 `json:"numberOfDisksPerNode,omitempty" tf:"number_of_disks_per_node,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + ScriptActions []KafkaClusterRolesWorkerNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesWorkerNodeObservation struct { + + // The number of Data Disks which should be assigned to each Worker Node, which can be between 1 and 8. Changing this forces a new resource to be created. + NumberOfDisksPerNode *float64 `json:"numberOfDisksPerNode,omitempty" tf:"number_of_disks_per_node,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + ScriptActions []KafkaClusterRolesWorkerNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesWorkerNodeParameters struct { + + // The number of Data Disks which should be assigned to each Worker Node, which can be between 1 and 8. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + NumberOfDisksPerNode *float64 `json:"numberOfDisksPerNode" tf:"number_of_disks_per_node,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + // +kubebuilder:validation:Optional + ScriptActions []KafkaClusterRolesWorkerNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesWorkerNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type KafkaClusterRolesWorkerNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type KafkaClusterRolesWorkerNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type KafkaClusterRolesZookeeperNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + ScriptActions []KafkaClusterRolesZookeeperNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesZookeeperNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + ScriptActions []KafkaClusterRolesZookeeperNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesZookeeperNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + // +kubebuilder:validation:Optional + ScriptActions []KafkaClusterRolesZookeeperNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaClusterRolesZookeeperNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type KafkaClusterRolesZookeeperNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type KafkaClusterRolesZookeeperNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type KafkaClusterSecurityProfileInitParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type KafkaClusterSecurityProfileObservation struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type KafkaClusterSecurityProfileParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AaddsResourceID *string `json:"aaddsResourceId" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + DomainUserPasswordSecretRef v1.SecretKeySelector `json:"domainUserPasswordSecretRef" tf:"-"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainUsername *string `json:"domainUsername" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + LdapsUrls []*string `json:"ldapsUrls" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MsiResourceID *string `json:"msiResourceId" tf:"msi_resource_id,omitempty"` +} + +type KafkaClusterStorageAccountGen2InitParameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type KafkaClusterStorageAccountGen2Observation struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type KafkaClusterStorageAccountGen2Parameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"filesystemId" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagedIdentityResourceID *string `json:"managedIdentityResourceId" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId" tf:"storage_resource_id,omitempty"` +} + +type KafkaClusterStorageAccountInitParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type KafkaClusterStorageAccountObservation struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type KafkaClusterStorageAccountParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + StorageAccountKeySecretRef v1.SecretKeySelector `json:"storageAccountKeySecretRef" tf:"-"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type KafkaManagementNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + ScriptActions []KafkaManagementNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaManagementNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + ScriptActions []KafkaManagementNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaManagementNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined below. + // +kubebuilder:validation:Optional + ScriptActions []KafkaManagementNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type KafkaManagementNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type KafkaManagementNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type KafkaManagementNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type RestProxyInitParameters struct { + + // The Azure Active Directory Security Group ID. Changing this forces a new resource to be created. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // The Azure Active Directory Security Group name. Changing this forces a new resource to be created. + SecurityGroupName *string `json:"securityGroupName,omitempty" tf:"security_group_name,omitempty"` +} + +type RestProxyObservation struct { + + // The Azure Active Directory Security Group ID. Changing this forces a new resource to be created. + SecurityGroupID *string `json:"securityGroupId,omitempty" tf:"security_group_id,omitempty"` + + // The Azure Active Directory Security Group name. Changing this forces a new resource to be created. + SecurityGroupName *string `json:"securityGroupName,omitempty" tf:"security_group_name,omitempty"` +} + +type RestProxyParameters struct { + + // The Azure Active Directory Security Group ID. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityGroupID *string `json:"securityGroupId" tf:"security_group_id,omitempty"` + + // The Azure Active Directory Security Group name. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityGroupName *string `json:"securityGroupName" tf:"security_group_name,omitempty"` +} + +// KafkaClusterSpec defines the desired state of KafkaCluster +type KafkaClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider KafkaClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider KafkaClusterInitParameters `json:"initProvider,omitempty"` +} + +// KafkaClusterStatus defines the observed state of KafkaCluster. +type KafkaClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider KafkaClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// KafkaCluster is the Schema for the KafkaClusters API. Manages a HDInsight Kafka Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type KafkaCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterVersion) || (has(self.initProvider) && has(self.initProvider.clusterVersion))",message="spec.forProvider.clusterVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.componentVersion) || (has(self.initProvider) && has(self.initProvider.componentVersion))",message="spec.forProvider.componentVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gateway) || (has(self.initProvider) && has(self.initProvider.gateway))",message="spec.forProvider.gateway is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.roles) || (has(self.initProvider) && has(self.initProvider.roles))",message="spec.forProvider.roles is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tier) || (has(self.initProvider) && has(self.initProvider.tier))",message="spec.forProvider.tier is a required parameter" + Spec KafkaClusterSpec `json:"spec"` + Status KafkaClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// KafkaClusterList contains a list of KafkaClusters +type KafkaClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KafkaCluster `json:"items"` +} + +// Repository type metadata. +var ( + KafkaCluster_Kind = "KafkaCluster" + KafkaCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: KafkaCluster_Kind}.String() + KafkaCluster_KindAPIVersion = KafkaCluster_Kind + "." + CRDGroupVersion.String() + KafkaCluster_GroupVersionKind = CRDGroupVersion.WithKind(KafkaCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&KafkaCluster{}, &KafkaClusterList{}) +} diff --git a/apis/hdinsight/v1beta2/zz_sparkcluster_terraformed.go b/apis/hdinsight/v1beta2/zz_sparkcluster_terraformed.go new file mode 100755 index 000000000..8be1a8f08 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_sparkcluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SparkCluster +func (mg *SparkCluster) GetTerraformResourceType() string { + return "azurerm_hdinsight_spark_cluster" +} + +// GetConnectionDetailsMapping for this SparkCluster +func (tr *SparkCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"extension[*].primary_key": "spec.forProvider.extension[*].primaryKeySecretRef", "gateway[*].password": "spec.forProvider.gateway[*].passwordSecretRef", "metastores[*].ambari[*].password": "spec.forProvider.metastores[*].ambari[*].passwordSecretRef", "metastores[*].hive[*].password": "spec.forProvider.metastores[*].hive[*].passwordSecretRef", "metastores[*].oozie[*].password": "spec.forProvider.metastores[*].oozie[*].passwordSecretRef", "monitor[*].primary_key": "spec.forProvider.monitor[*].primaryKeySecretRef", "roles[*].head_node[*].password": "spec.forProvider.roles[*].headNode[*].passwordSecretRef", "roles[*].worker_node[*].password": "spec.forProvider.roles[*].workerNode[*].passwordSecretRef", "roles[*].zookeeper_node[*].password": "spec.forProvider.roles[*].zookeeperNode[*].passwordSecretRef", "security_profile[*].domain_user_password": "spec.forProvider.securityProfile[*].domainUserPasswordSecretRef", "storage_account[*].storage_account_key": "spec.forProvider.storageAccount[*].storageAccountKeySecretRef"} +} + +// GetObservation of this SparkCluster +func (tr *SparkCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SparkCluster +func (tr *SparkCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SparkCluster +func (tr *SparkCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SparkCluster +func (tr *SparkCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SparkCluster +func (tr *SparkCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SparkCluster +func (tr *SparkCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SparkCluster +func (tr *SparkCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SparkCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SparkCluster) LateInitialize(attrs []byte) (bool, error) { + params := &SparkClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SparkCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/hdinsight/v1beta2/zz_sparkcluster_types.go b/apis/hdinsight/v1beta2/zz_sparkcluster_types.go new file mode 100755 index 000000000..3b97a2a44 --- /dev/null +++ b/apis/hdinsight/v1beta2/zz_sparkcluster_types.go @@ -0,0 +1,1441 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RolesWorkerNodeAutoscaleRecurrenceInitParameters struct { + + // A list of schedule blocks as defined below. + Schedule []WorkerNodeAutoscaleRecurrenceScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type RolesWorkerNodeAutoscaleRecurrenceObservation struct { + + // A list of schedule blocks as defined below. + Schedule []WorkerNodeAutoscaleRecurrenceScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type RolesWorkerNodeAutoscaleRecurrenceParameters struct { + + // A list of schedule blocks as defined below. + // +kubebuilder:validation:Optional + Schedule []WorkerNodeAutoscaleRecurrenceScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The time zone for the autoscale schedule times. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone" tf:"timezone,omitempty"` +} + +type SparkClusterComponentVersionInitParameters struct { + + // The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created. + Spark *string `json:"spark,omitempty" tf:"spark,omitempty"` +} + +type SparkClusterComponentVersionObservation struct { + + // The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created. + Spark *string `json:"spark,omitempty" tf:"spark,omitempty"` +} + +type SparkClusterComponentVersionParameters struct { + + // The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Spark *string `json:"spark" tf:"spark,omitempty"` +} + +type SparkClusterComputeIsolationInitParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type SparkClusterComputeIsolationObservation struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type SparkClusterComputeIsolationParameters struct { + + // This field indicates whether enable compute isolation or not. Possible values are true or false. + // +kubebuilder:validation:Optional + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // The name of the host SKU. + // +kubebuilder:validation:Optional + HostSku *string `json:"hostSku,omitempty" tf:"host_sku,omitempty"` +} + +type SparkClusterDiskEncryptionInitParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type SparkClusterDiskEncryptionObservation struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type SparkClusterDiskEncryptionParameters struct { + + // This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + // +kubebuilder:validation:Optional + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // This is indicator to show whether resource disk encryption is enabled. + // +kubebuilder:validation:Optional + EncryptionAtHostEnabled *bool `json:"encryptionAtHostEnabled,omitempty" tf:"encryption_at_host_enabled,omitempty"` + + // The ID of the key vault key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // This is the resource ID of Managed Identity used to access the key vault. + // +kubebuilder:validation:Optional + KeyVaultManagedIdentityID *string `json:"keyVaultManagedIdentityId,omitempty" tf:"key_vault_managed_identity_id,omitempty"` +} + +type SparkClusterExtensionInitParameters struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type SparkClusterExtensionObservation struct { + + // The workspace ID of the log analytics extension. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type SparkClusterExtensionParameters struct { + + // The workspace ID of the log analytics extension. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The workspace key of the log analytics extension. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type SparkClusterGatewayInitParameters struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SparkClusterGatewayObservation struct { + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SparkClusterGatewayParameters struct { + + // The password used for the Ambari Portal. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username used for the Ambari Portal. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type SparkClusterInitParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *SparkClusterComponentVersionInitParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *SparkClusterComputeIsolationInitParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + DiskEncryption []SparkClusterDiskEncryptionInitParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created. + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + Extension *SparkClusterExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *SparkClusterGatewayInitParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *SparkClusterMetastoresInitParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *SparkClusterMonitorInitParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *SparkClusterNetworkInitParameters `json:"network,omitempty" tf:"network,omitempty"` + + // A roles block as defined below. + Roles *SparkClusterRolesInitParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *SparkClusterSecurityProfileInitParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []SparkClusterStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *SparkClusterStorageAccountGen2InitParameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Spark Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SparkClusterMetastoresAmbariInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SparkClusterMetastoresAmbariObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SparkClusterMetastoresAmbariParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type SparkClusterMetastoresHiveInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SparkClusterMetastoresHiveObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SparkClusterMetastoresHiveParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type SparkClusterMetastoresInitParameters struct { + + // An ambari block as defined below. + Ambari *SparkClusterMetastoresAmbariInitParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *SparkClusterMetastoresHiveInitParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *SparkClusterMetastoresOozieInitParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type SparkClusterMetastoresObservation struct { + + // An ambari block as defined below. + Ambari *SparkClusterMetastoresAmbariObservation `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + Hive *SparkClusterMetastoresHiveObservation `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + Oozie *SparkClusterMetastoresOozieObservation `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type SparkClusterMetastoresOozieInitParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SparkClusterMetastoresOozieObservation struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + Server *string `json:"server,omitempty" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SparkClusterMetastoresOozieParameters struct { + + // The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName" tf:"database_name,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type SparkClusterMetastoresParameters struct { + + // An ambari block as defined below. + // +kubebuilder:validation:Optional + Ambari *SparkClusterMetastoresAmbariParameters `json:"ambari,omitempty" tf:"ambari,omitempty"` + + // A hive block as defined below. + // +kubebuilder:validation:Optional + Hive *SparkClusterMetastoresHiveParameters `json:"hive,omitempty" tf:"hive,omitempty"` + + // An oozie block as defined below. + // +kubebuilder:validation:Optional + Oozie *SparkClusterMetastoresOozieParameters `json:"oozie,omitempty" tf:"oozie,omitempty"` +} + +type SparkClusterMonitorInitParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type SparkClusterMonitorObservation struct { + + // The Operations Management Suite (OMS) workspace ID. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` +} + +type SparkClusterMonitorParameters struct { + + // The Operations Management Suite (OMS) workspace ID. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId" tf:"log_analytics_workspace_id,omitempty"` + + // The Operations Management Suite (OMS) workspace key. + // +kubebuilder:validation:Required + PrimaryKeySecretRef v1.SecretKeySelector `json:"primaryKeySecretRef" tf:"-"` +} + +type SparkClusterNetworkInitParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type SparkClusterNetworkObservation struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type SparkClusterNetworkParameters struct { + + // The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ConnectionDirection *string `json:"connectionDirection,omitempty" tf:"connection_direction,omitempty"` + + // Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateLinkEnabled *bool `json:"privateLinkEnabled,omitempty" tf:"private_link_enabled,omitempty"` +} + +type SparkClusterObservation struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + ComponentVersion *SparkClusterComponentVersionObservation `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + ComputeIsolation *SparkClusterComputeIsolationObservation `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + DiskEncryption []SparkClusterDiskEncryptionObservation `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created. + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + Extension *SparkClusterExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + Gateway *SparkClusterGatewayObservation `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster. + HTTPSEndpoint *string `json:"httpsEndpoint,omitempty" tf:"https_endpoint,omitempty"` + + // The ID of the HDInsight Spark Cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + Metastores *SparkClusterMetastoresObservation `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + Monitor *SparkClusterMonitorObservation `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + Network *SparkClusterNetworkObservation `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A roles block as defined below. + Roles *SparkClusterRolesObservation `json:"roles,omitempty" tf:"roles,omitempty"` + + // The SSH Connectivity Endpoint for this HDInsight Spark Cluster. + SSHEndpoint *string `json:"sshEndpoint,omitempty" tf:"ssh_endpoint,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + SecurityProfile *SparkClusterSecurityProfileObservation `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + StorageAccount []SparkClusterStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + StorageAccountGen2 *SparkClusterStorageAccountGen2Observation `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Spark Cluster. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SparkClusterParameters struct { + + // Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ClusterVersion *string `json:"clusterVersion,omitempty" tf:"cluster_version,omitempty"` + + // A component_version block as defined below. + // +kubebuilder:validation:Optional + ComponentVersion *SparkClusterComponentVersionParameters `json:"componentVersion,omitempty" tf:"component_version,omitempty"` + + // A compute_isolation block as defined below. + // +kubebuilder:validation:Optional + ComputeIsolation *SparkClusterComputeIsolationParameters `json:"computeIsolation,omitempty" tf:"compute_isolation,omitempty"` + + // One or more disk_encryption block as defined below. + // +kubebuilder:validation:Optional + DiskEncryption []SparkClusterDiskEncryptionParameters `json:"diskEncryption,omitempty" tf:"disk_encryption,omitempty"` + + // Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EncryptionInTransitEnabled *bool `json:"encryptionInTransitEnabled,omitempty" tf:"encryption_in_transit_enabled,omitempty"` + + // An extension block as defined below. + // +kubebuilder:validation:Optional + Extension *SparkClusterExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // A gateway block as defined below. + // +kubebuilder:validation:Optional + Gateway *SparkClusterGatewayParameters `json:"gateway,omitempty" tf:"gateway,omitempty"` + + // Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A metastores block as defined below. + // +kubebuilder:validation:Optional + Metastores *SparkClusterMetastoresParameters `json:"metastores,omitempty" tf:"metastores,omitempty"` + + // A monitor block as defined below. + // +kubebuilder:validation:Optional + Monitor *SparkClusterMonitorParameters `json:"monitor,omitempty" tf:"monitor,omitempty"` + + // A network block as defined below. + // +kubebuilder:validation:Optional + Network *SparkClusterNetworkParameters `json:"network,omitempty" tf:"network,omitempty"` + + // Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A roles block as defined below. + // +kubebuilder:validation:Optional + Roles *SparkClusterRolesParameters `json:"roles,omitempty" tf:"roles,omitempty"` + + // A security_profile block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityProfile *SparkClusterSecurityProfileParameters `json:"securityProfile,omitempty" tf:"security_profile,omitempty"` + + // One or more storage_account block as defined below. + // +kubebuilder:validation:Optional + StorageAccount []SparkClusterStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A storage_account_gen2 block as defined below. + // +kubebuilder:validation:Optional + StorageAccountGen2 *SparkClusterStorageAccountGen2Parameters `json:"storageAccountGen2,omitempty" tf:"storage_account_gen2,omitempty"` + + // The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TLSMinVersion *string `json:"tlsMinVersion,omitempty" tf:"tls_min_version,omitempty"` + + // A map of Tags which should be assigned to this HDInsight Spark Cluster. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SparkClusterRolesHeadNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []SparkClusterRolesHeadNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesHeadNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []SparkClusterRolesHeadNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesHeadNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []SparkClusterRolesHeadNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesHeadNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type SparkClusterRolesHeadNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type SparkClusterRolesHeadNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type SparkClusterRolesInitParameters struct { + + // A head_node block as defined above. + HeadNode *SparkClusterRolesHeadNodeInitParameters `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *SparkClusterRolesWorkerNodeInitParameters `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *SparkClusterRolesZookeeperNodeInitParameters `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type SparkClusterRolesObservation struct { + + // A head_node block as defined above. + HeadNode *SparkClusterRolesHeadNodeObservation `json:"headNode,omitempty" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + WorkerNode *SparkClusterRolesWorkerNodeObservation `json:"workerNode,omitempty" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + ZookeeperNode *SparkClusterRolesZookeeperNodeObservation `json:"zookeeperNode,omitempty" tf:"zookeeper_node,omitempty"` +} + +type SparkClusterRolesParameters struct { + + // A head_node block as defined above. + // +kubebuilder:validation:Optional + HeadNode *SparkClusterRolesHeadNodeParameters `json:"headNode" tf:"head_node,omitempty"` + + // A worker_node block as defined below. + // +kubebuilder:validation:Optional + WorkerNode *SparkClusterRolesWorkerNodeParameters `json:"workerNode" tf:"worker_node,omitempty"` + + // A zookeeper_node block as defined below. + // +kubebuilder:validation:Optional + ZookeeperNode *SparkClusterRolesZookeeperNodeParameters `json:"zookeeperNode" tf:"zookeeper_node,omitempty"` +} + +type SparkClusterRolesWorkerNodeAutoscaleInitParameters struct { + + // A capacity block as defined below. + Capacity *WorkerNodeAutoscaleCapacityInitParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + Recurrence *RolesWorkerNodeAutoscaleRecurrenceInitParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type SparkClusterRolesWorkerNodeAutoscaleObservation struct { + + // A capacity block as defined below. + Capacity *WorkerNodeAutoscaleCapacityObservation `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + Recurrence *RolesWorkerNodeAutoscaleRecurrenceObservation `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type SparkClusterRolesWorkerNodeAutoscaleParameters struct { + + // A capacity block as defined below. + // +kubebuilder:validation:Optional + Capacity *WorkerNodeAutoscaleCapacityParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A recurrence block as defined below. + // +kubebuilder:validation:Optional + Recurrence *RolesWorkerNodeAutoscaleRecurrenceParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type SparkClusterRolesWorkerNodeInitParameters struct { + + // A autoscale block as defined below. + Autoscale *SparkClusterRolesWorkerNodeAutoscaleInitParameters `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []SparkClusterRolesWorkerNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesWorkerNodeObservation struct { + + // A autoscale block as defined below. + Autoscale *SparkClusterRolesWorkerNodeAutoscaleObservation `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []SparkClusterRolesWorkerNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesWorkerNodeParameters struct { + + // A autoscale block as defined below. + // +kubebuilder:validation:Optional + Autoscale *SparkClusterRolesWorkerNodeAutoscaleParameters `json:"autoscale,omitempty" tf:"autoscale,omitempty"` + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []SparkClusterRolesWorkerNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesWorkerNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type SparkClusterRolesWorkerNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type SparkClusterRolesWorkerNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type SparkClusterRolesZookeeperNodeInitParameters struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []SparkClusterRolesZookeeperNodeScriptActionsInitParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesZookeeperNodeObservation struct { + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + ScriptActions []SparkClusterRolesZookeeperNodeScriptActionsObservation `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + Username *string `json:"username,omitempty" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesZookeeperNodeParameters struct { + + // The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + SSHKeys []*string `json:"sshKeys,omitempty" tf:"ssh_keys,omitempty"` + + // The script action which will run on the cluster. One or more script_actions blocks as defined above. + // +kubebuilder:validation:Optional + ScriptActions []SparkClusterRolesZookeeperNodeScriptActionsParameters `json:"scriptActions,omitempty" tf:"script_actions,omitempty"` + + // The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` + + // The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` + + // The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` +} + +type SparkClusterRolesZookeeperNodeScriptActionsInitParameters struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type SparkClusterRolesZookeeperNodeScriptActionsObservation struct { + + // The name of the script action. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The parameters for the script provided. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` +} + +type SparkClusterRolesZookeeperNodeScriptActionsParameters struct { + + // The name of the script action. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The parameters for the script provided. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The URI to the script. + // +kubebuilder:validation:Optional + URI *string `json:"uri" tf:"uri,omitempty"` +} + +type SparkClusterSecurityProfileInitParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type SparkClusterSecurityProfileObservation struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + AaddsResourceID *string `json:"aaddsResourceId,omitempty" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + DomainUsername *string `json:"domainUsername,omitempty" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +listType=set + LdapsUrls []*string `json:"ldapsUrls,omitempty" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + MsiResourceID *string `json:"msiResourceId,omitempty" tf:"msi_resource_id,omitempty"` +} + +type SparkClusterSecurityProfileParameters struct { + + // The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AaddsResourceID *string `json:"aaddsResourceId" tf:"aadds_resource_id,omitempty"` + + // A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + ClusterUsersGroupDNS []*string `json:"clusterUsersGroupDns,omitempty" tf:"cluster_users_group_dns,omitempty"` + + // The name of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + DomainUserPasswordSecretRef v1.SecretKeySelector `json:"domainUserPasswordSecretRef" tf:"-"` + + // The username of the Azure Active Directory Domain. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DomainUsername *string `json:"domainUsername" tf:"domain_username,omitempty"` + + // A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + LdapsUrls []*string `json:"ldapsUrls" tf:"ldaps_urls,omitempty"` + + // The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MsiResourceID *string `json:"msiResourceId" tf:"msi_resource_id,omitempty"` +} + +type SparkClusterStorageAccountGen2InitParameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type SparkClusterStorageAccountGen2Observation struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + FileSystemID *string `json:"filesystemId,omitempty" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + ManagedIdentityResourceID *string `json:"managedIdentityResourceId,omitempty" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type SparkClusterStorageAccountGen2Parameters struct { + + // The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FileSystemID *string `json:"filesystemId" tf:"filesystem_id,omitempty"` + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagedIdentityResourceID *string `json:"managedIdentityResourceId" tf:"managed_identity_resource_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId" tf:"storage_resource_id,omitempty"` +} + +type SparkClusterStorageAccountInitParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type SparkClusterStorageAccountObservation struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type SparkClusterStorageAccountParameters struct { + + // Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault" tf:"is_default,omitempty"` + + // The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + StorageAccountKeySecretRef v1.SecretKeySelector `json:"storageAccountKeySecretRef" tf:"-"` + + // The ID of the Storage Container. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // Reference to a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDRef *v1.Reference `json:"storageContainerIdRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerId. + // +kubebuilder:validation:Optional + StorageContainerIDSelector *v1.Selector `json:"storageContainerIdSelector,omitempty" tf:"-"` + + // The ID of the Storage Account. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageResourceID *string `json:"storageResourceId,omitempty" tf:"storage_resource_id,omitempty"` +} + +type WorkerNodeAutoscaleCapacityInitParameters struct { + + // The maximum number of worker nodes to autoscale to based on the cluster's activity. + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + // The minimum number of worker nodes to autoscale to based on the cluster's activity. + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + +type WorkerNodeAutoscaleCapacityObservation struct { + + // The maximum number of worker nodes to autoscale to based on the cluster's activity. + MaxInstanceCount *float64 `json:"maxInstanceCount,omitempty" tf:"max_instance_count,omitempty"` + + // The minimum number of worker nodes to autoscale to based on the cluster's activity. + MinInstanceCount *float64 `json:"minInstanceCount,omitempty" tf:"min_instance_count,omitempty"` +} + +type WorkerNodeAutoscaleCapacityParameters struct { + + // The maximum number of worker nodes to autoscale to based on the cluster's activity. + // +kubebuilder:validation:Optional + MaxInstanceCount *float64 `json:"maxInstanceCount" tf:"max_instance_count,omitempty"` + + // The minimum number of worker nodes to autoscale to based on the cluster's activity. + // +kubebuilder:validation:Optional + MinInstanceCount *float64 `json:"minInstanceCount" tf:"min_instance_count,omitempty"` +} + +type WorkerNodeAutoscaleRecurrenceScheduleInitParameters struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type WorkerNodeAutoscaleRecurrenceScheduleObservation struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + TargetInstanceCount *float64 `json:"targetInstanceCount,omitempty" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type WorkerNodeAutoscaleRecurrenceScheduleParameters struct { + + // The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +kubebuilder:validation:Optional + Days []*string `json:"days" tf:"days,omitempty"` + + // The number of instances which should be run for the Worker Nodes. + // +kubebuilder:validation:Optional + TargetInstanceCount *float64 `json:"targetInstanceCount" tf:"target_instance_count,omitempty"` + + // The time of day to perform the autoscale in 24hour format. + // +kubebuilder:validation:Optional + Time *string `json:"time" tf:"time,omitempty"` +} + +// SparkClusterSpec defines the desired state of SparkCluster +type SparkClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SparkClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SparkClusterInitParameters `json:"initProvider,omitempty"` +} + +// SparkClusterStatus defines the observed state of SparkCluster. +type SparkClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SparkClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SparkCluster is the Schema for the SparkClusters API. Manages a HDInsight Spark Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SparkCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clusterVersion) || (has(self.initProvider) && has(self.initProvider.clusterVersion))",message="spec.forProvider.clusterVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.componentVersion) || (has(self.initProvider) && has(self.initProvider.componentVersion))",message="spec.forProvider.componentVersion is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gateway) || (has(self.initProvider) && has(self.initProvider.gateway))",message="spec.forProvider.gateway is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.roles) || (has(self.initProvider) && has(self.initProvider.roles))",message="spec.forProvider.roles is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tier) || (has(self.initProvider) && has(self.initProvider.tier))",message="spec.forProvider.tier is a required parameter" + Spec SparkClusterSpec `json:"spec"` + Status SparkClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SparkClusterList contains a list of SparkClusters +type SparkClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SparkCluster `json:"items"` +} + +// Repository type metadata. +var ( + SparkCluster_Kind = "SparkCluster" + SparkCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SparkCluster_Kind}.String() + SparkCluster_KindAPIVersion = SparkCluster_Kind + "." + CRDGroupVersion.String() + SparkCluster_GroupVersionKind = CRDGroupVersion.WithKind(SparkCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&SparkCluster{}, &SparkClusterList{}) +} diff --git a/apis/healthcareapis/v1beta1/zz_generated.conversion_hubs.go b/apis/healthcareapis/v1beta1/zz_generated.conversion_hubs.go index 3ebf0fdc2..1d7c11d6b 100755 --- a/apis/healthcareapis/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/healthcareapis/v1beta1/zz_generated.conversion_hubs.go @@ -6,20 +6,8 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *HealthcareDICOMService) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *HealthcareFHIRService) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *HealthcareMedtechService) Hub() {} - // Hub marks this type as a conversion hub. func (tr *HealthcareMedtechServiceFHIRDestination) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *HealthcareService) Hub() {} - // Hub marks this type as a conversion hub. func (tr *HealthcareWorkspace) Hub() {} diff --git a/apis/healthcareapis/v1beta1/zz_generated.conversion_spokes.go b/apis/healthcareapis/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..a1a63cd5e --- /dev/null +++ b/apis/healthcareapis/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this HealthcareDICOMService to the hub type. +func (tr *HealthcareDICOMService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HealthcareDICOMService type. +func (tr *HealthcareDICOMService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this HealthcareFHIRService to the hub type. +func (tr *HealthcareFHIRService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HealthcareFHIRService type. +func (tr *HealthcareFHIRService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this HealthcareMedtechService to the hub type. +func (tr *HealthcareMedtechService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HealthcareMedtechService type. +func (tr *HealthcareMedtechService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this HealthcareService to the hub type. +func (tr *HealthcareService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HealthcareService type. +func (tr *HealthcareService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/healthcareapis/v1beta1/zz_generated.resolvers.go b/apis/healthcareapis/v1beta1/zz_generated.resolvers.go index 8fbdd9c11..bea66483a 100644 --- a/apis/healthcareapis/v1beta1/zz_generated.resolvers.go +++ b/apis/healthcareapis/v1beta1/zz_generated.resolvers.go @@ -272,7 +272,7 @@ func (mg *HealthcareMedtechServiceFHIRDestination) ResolveReferences(ctx context var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta1", "HealthcareFHIRService", "HealthcareFHIRServiceList") + m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta2", "HealthcareFHIRService", "HealthcareFHIRServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -291,7 +291,7 @@ func (mg *HealthcareMedtechServiceFHIRDestination) ResolveReferences(ctx context mg.Spec.ForProvider.DestinationFHIRServiceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DestinationFHIRServiceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta1", "HealthcareMedtechService", "HealthcareMedtechServiceList") + m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta2", "HealthcareMedtechService", "HealthcareMedtechServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -310,7 +310,7 @@ func (mg *HealthcareMedtechServiceFHIRDestination) ResolveReferences(ctx context mg.Spec.ForProvider.MedtechServiceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.MedtechServiceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta1", "HealthcareFHIRService", "HealthcareFHIRServiceList") + m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta2", "HealthcareFHIRService", "HealthcareFHIRServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/healthcareapis/v1beta1/zz_healthcaremedtechservicefhirdestination_types.go b/apis/healthcareapis/v1beta1/zz_healthcaremedtechservicefhirdestination_types.go index 53183da6e..b8140ef45 100755 --- a/apis/healthcareapis/v1beta1/zz_healthcaremedtechservicefhirdestination_types.go +++ b/apis/healthcareapis/v1beta1/zz_healthcaremedtechservicefhirdestination_types.go @@ -19,7 +19,7 @@ type HealthcareMedtechServiceFHIRDestinationInitParameters struct { DestinationFHIRMappingJSON *string `json:"destinationFhirMappingJson,omitempty" tf:"destination_fhir_mapping_json,omitempty"` // Specifies the destination fhir service id of the Med Tech Service Fhir Destination. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta1.HealthcareFHIRService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta2.HealthcareFHIRService // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() DestinationFHIRServiceID *string `json:"destinationFhirServiceId,omitempty" tf:"destination_fhir_service_id,omitempty"` @@ -66,7 +66,7 @@ type HealthcareMedtechServiceFHIRDestinationParameters struct { DestinationFHIRMappingJSON *string `json:"destinationFhirMappingJson,omitempty" tf:"destination_fhir_mapping_json,omitempty"` // Specifies the destination fhir service id of the Med Tech Service Fhir Destination. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta1.HealthcareFHIRService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta2.HealthcareFHIRService // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional DestinationFHIRServiceID *string `json:"destinationFhirServiceId,omitempty" tf:"destination_fhir_service_id,omitempty"` @@ -88,7 +88,7 @@ type HealthcareMedtechServiceFHIRDestinationParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // Specifies the name of the Healthcare Med Tech Service where the Healthcare Med Tech Service Fhir Destination should exist. Changing this forces a new Healthcare Med Tech Service Fhir Destination to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta1.HealthcareMedtechService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta2.HealthcareMedtechService // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional MedtechServiceID *string `json:"medtechServiceId,omitempty" tf:"medtech_service_id,omitempty"` diff --git a/apis/healthcareapis/v1beta2/zz_generated.conversion_hubs.go b/apis/healthcareapis/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..5402f51b9 --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *HealthcareDICOMService) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *HealthcareFHIRService) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *HealthcareMedtechService) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *HealthcareService) Hub() {} diff --git a/apis/healthcareapis/v1beta2/zz_generated.deepcopy.go b/apis/healthcareapis/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..a1e0921df --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2474 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationInitParameters) DeepCopyInto(out *AuthenticationConfigurationInitParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(string) + **out = **in + } + if in.SmartProxyEnabled != nil { + in, out := &in.SmartProxyEnabled, &out.SmartProxyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationInitParameters. +func (in *AuthenticationConfigurationInitParameters) DeepCopy() *AuthenticationConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationObservation) DeepCopyInto(out *AuthenticationConfigurationObservation) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(string) + **out = **in + } + if in.SmartProxyEnabled != nil { + in, out := &in.SmartProxyEnabled, &out.SmartProxyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationObservation. +func (in *AuthenticationConfigurationObservation) DeepCopy() *AuthenticationConfigurationObservation { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfigurationParameters) DeepCopyInto(out *AuthenticationConfigurationParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(string) + **out = **in + } + if in.SmartProxyEnabled != nil { + in, out := &in.SmartProxyEnabled, &out.SmartProxyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfigurationParameters. +func (in *AuthenticationConfigurationParameters) DeepCopy() *AuthenticationConfigurationParameters { + if in == nil { + return nil + } + out := new(AuthenticationConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationInitParameters) DeepCopyInto(out *AuthenticationInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationInitParameters. +func (in *AuthenticationInitParameters) DeepCopy() *AuthenticationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationObservation) DeepCopyInto(out *AuthenticationObservation) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationObservation. +func (in *AuthenticationObservation) DeepCopy() *AuthenticationObservation { + if in == nil { + return nil + } + out := new(AuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationParameters) DeepCopyInto(out *AuthenticationParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationParameters. +func (in *AuthenticationParameters) DeepCopy() *AuthenticationParameters { + if in == nil { + return nil + } + out := new(AuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigurationInitParameters) DeepCopyInto(out *CorsConfigurationInitParameters) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigurationInitParameters. +func (in *CorsConfigurationInitParameters) DeepCopy() *CorsConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CorsConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigurationObservation) DeepCopyInto(out *CorsConfigurationObservation) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigurationObservation. +func (in *CorsConfigurationObservation) DeepCopy() *CorsConfigurationObservation { + if in == nil { + return nil + } + out := new(CorsConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsConfigurationParameters) DeepCopyInto(out *CorsConfigurationParameters) { + *out = *in + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsConfigurationParameters. +func (in *CorsConfigurationParameters) DeepCopy() *CorsConfigurationParameters { + if in == nil { + return nil + } + out := new(CorsConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsInitParameters) DeepCopyInto(out *CorsInitParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CredentialsAllowed != nil { + in, out := &in.CredentialsAllowed, &out.CredentialsAllowed + *out = new(bool) + **out = **in + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsInitParameters. +func (in *CorsInitParameters) DeepCopy() *CorsInitParameters { + if in == nil { + return nil + } + out := new(CorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsObservation) DeepCopyInto(out *CorsObservation) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CredentialsAllowed != nil { + in, out := &in.CredentialsAllowed, &out.CredentialsAllowed + *out = new(bool) + **out = **in + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsObservation. +func (in *CorsObservation) DeepCopy() *CorsObservation { + if in == nil { + return nil + } + out := new(CorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsParameters) DeepCopyInto(out *CorsParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CredentialsAllowed != nil { + in, out := &in.CredentialsAllowed, &out.CredentialsAllowed + *out = new(bool) + **out = **in + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsParameters. +func (in *CorsParameters) DeepCopy() *CorsParameters { + if in == nil { + return nil + } + out := new(CorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareDICOMService) DeepCopyInto(out *HealthcareDICOMService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareDICOMService. +func (in *HealthcareDICOMService) DeepCopy() *HealthcareDICOMService { + if in == nil { + return nil + } + out := new(HealthcareDICOMService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HealthcareDICOMService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareDICOMServiceInitParameters) DeepCopyInto(out *HealthcareDICOMServiceInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareDICOMServiceInitParameters. +func (in *HealthcareDICOMServiceInitParameters) DeepCopy() *HealthcareDICOMServiceInitParameters { + if in == nil { + return nil + } + out := new(HealthcareDICOMServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareDICOMServiceList) DeepCopyInto(out *HealthcareDICOMServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HealthcareDICOMService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareDICOMServiceList. +func (in *HealthcareDICOMServiceList) DeepCopy() *HealthcareDICOMServiceList { + if in == nil { + return nil + } + out := new(HealthcareDICOMServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HealthcareDICOMServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareDICOMServiceObservation) DeepCopyInto(out *HealthcareDICOMServiceObservation) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = make([]AuthenticationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateEndpoint != nil { + in, out := &in.PrivateEndpoint, &out.PrivateEndpoint + *out = make([]PrivateEndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServiceURL != nil { + in, out := &in.ServiceURL, &out.ServiceURL + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareDICOMServiceObservation. +func (in *HealthcareDICOMServiceObservation) DeepCopy() *HealthcareDICOMServiceObservation { + if in == nil { + return nil + } + out := new(HealthcareDICOMServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareDICOMServiceParameters) DeepCopyInto(out *HealthcareDICOMServiceParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } + if in.WorkspaceIDRef != nil { + in, out := &in.WorkspaceIDRef, &out.WorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceIDSelector != nil { + in, out := &in.WorkspaceIDSelector, &out.WorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareDICOMServiceParameters. +func (in *HealthcareDICOMServiceParameters) DeepCopy() *HealthcareDICOMServiceParameters { + if in == nil { + return nil + } + out := new(HealthcareDICOMServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareDICOMServiceSpec) DeepCopyInto(out *HealthcareDICOMServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareDICOMServiceSpec. +func (in *HealthcareDICOMServiceSpec) DeepCopy() *HealthcareDICOMServiceSpec { + if in == nil { + return nil + } + out := new(HealthcareDICOMServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareDICOMServiceStatus) DeepCopyInto(out *HealthcareDICOMServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareDICOMServiceStatus. +func (in *HealthcareDICOMServiceStatus) DeepCopy() *HealthcareDICOMServiceStatus { + if in == nil { + return nil + } + out := new(HealthcareDICOMServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRService) DeepCopyInto(out *HealthcareFHIRService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRService. +func (in *HealthcareFHIRService) DeepCopy() *HealthcareFHIRService { + if in == nil { + return nil + } + out := new(HealthcareFHIRService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HealthcareFHIRService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceAuthenticationInitParameters) DeepCopyInto(out *HealthcareFHIRServiceAuthenticationInitParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(string) + **out = **in + } + if in.SmartProxyEnabled != nil { + in, out := &in.SmartProxyEnabled, &out.SmartProxyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceAuthenticationInitParameters. +func (in *HealthcareFHIRServiceAuthenticationInitParameters) DeepCopy() *HealthcareFHIRServiceAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceAuthenticationObservation) DeepCopyInto(out *HealthcareFHIRServiceAuthenticationObservation) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(string) + **out = **in + } + if in.SmartProxyEnabled != nil { + in, out := &in.SmartProxyEnabled, &out.SmartProxyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceAuthenticationObservation. +func (in *HealthcareFHIRServiceAuthenticationObservation) DeepCopy() *HealthcareFHIRServiceAuthenticationObservation { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceAuthenticationParameters) DeepCopyInto(out *HealthcareFHIRServiceAuthenticationParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(string) + **out = **in + } + if in.SmartProxyEnabled != nil { + in, out := &in.SmartProxyEnabled, &out.SmartProxyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceAuthenticationParameters. +func (in *HealthcareFHIRServiceAuthenticationParameters) DeepCopy() *HealthcareFHIRServiceAuthenticationParameters { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceIdentityInitParameters) DeepCopyInto(out *HealthcareFHIRServiceIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceIdentityInitParameters. +func (in *HealthcareFHIRServiceIdentityInitParameters) DeepCopy() *HealthcareFHIRServiceIdentityInitParameters { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceIdentityObservation) DeepCopyInto(out *HealthcareFHIRServiceIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceIdentityObservation. +func (in *HealthcareFHIRServiceIdentityObservation) DeepCopy() *HealthcareFHIRServiceIdentityObservation { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceIdentityParameters) DeepCopyInto(out *HealthcareFHIRServiceIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceIdentityParameters. +func (in *HealthcareFHIRServiceIdentityParameters) DeepCopy() *HealthcareFHIRServiceIdentityParameters { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceInitParameters) DeepCopyInto(out *HealthcareFHIRServiceInitParameters) { + *out = *in + if in.AccessPolicyObjectIds != nil { + in, out := &in.AccessPolicyObjectIds, &out.AccessPolicyObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(HealthcareFHIRServiceAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationExportStorageAccountName != nil { + in, out := &in.ConfigurationExportStorageAccountName, &out.ConfigurationExportStorageAccountName + *out = new(string) + **out = **in + } + if in.ContainerRegistryLoginServerURL != nil { + in, out := &in.ContainerRegistryLoginServerURL, &out.ContainerRegistryLoginServerURL + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(HealthcareFHIRServiceIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OciArtifact != nil { + in, out := &in.OciArtifact, &out.OciArtifact + *out = make([]OciArtifactInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceInitParameters. +func (in *HealthcareFHIRServiceInitParameters) DeepCopy() *HealthcareFHIRServiceInitParameters { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceList) DeepCopyInto(out *HealthcareFHIRServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HealthcareFHIRService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceList. +func (in *HealthcareFHIRServiceList) DeepCopy() *HealthcareFHIRServiceList { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HealthcareFHIRServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceObservation) DeepCopyInto(out *HealthcareFHIRServiceObservation) { + *out = *in + if in.AccessPolicyObjectIds != nil { + in, out := &in.AccessPolicyObjectIds, &out.AccessPolicyObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(HealthcareFHIRServiceAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationExportStorageAccountName != nil { + in, out := &in.ConfigurationExportStorageAccountName, &out.ConfigurationExportStorageAccountName + *out = new(string) + **out = **in + } + if in.ContainerRegistryLoginServerURL != nil { + in, out := &in.ContainerRegistryLoginServerURL, &out.ContainerRegistryLoginServerURL + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(HealthcareFHIRServiceIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OciArtifact != nil { + in, out := &in.OciArtifact, &out.OciArtifact + *out = make([]OciArtifactObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceObservation. +func (in *HealthcareFHIRServiceObservation) DeepCopy() *HealthcareFHIRServiceObservation { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceParameters) DeepCopyInto(out *HealthcareFHIRServiceParameters) { + *out = *in + if in.AccessPolicyObjectIds != nil { + in, out := &in.AccessPolicyObjectIds, &out.AccessPolicyObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(HealthcareFHIRServiceAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigurationExportStorageAccountName != nil { + in, out := &in.ConfigurationExportStorageAccountName, &out.ConfigurationExportStorageAccountName + *out = new(string) + **out = **in + } + if in.ContainerRegistryLoginServerURL != nil { + in, out := &in.ContainerRegistryLoginServerURL, &out.ContainerRegistryLoginServerURL + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(HealthcareFHIRServiceIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OciArtifact != nil { + in, out := &in.OciArtifact, &out.OciArtifact + *out = make([]OciArtifactParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } + if in.WorkspaceIDRef != nil { + in, out := &in.WorkspaceIDRef, &out.WorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceIDSelector != nil { + in, out := &in.WorkspaceIDSelector, &out.WorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceParameters. +func (in *HealthcareFHIRServiceParameters) DeepCopy() *HealthcareFHIRServiceParameters { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceSpec) DeepCopyInto(out *HealthcareFHIRServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceSpec. +func (in *HealthcareFHIRServiceSpec) DeepCopy() *HealthcareFHIRServiceSpec { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareFHIRServiceStatus) DeepCopyInto(out *HealthcareFHIRServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareFHIRServiceStatus. +func (in *HealthcareFHIRServiceStatus) DeepCopy() *HealthcareFHIRServiceStatus { + if in == nil { + return nil + } + out := new(HealthcareFHIRServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechService) DeepCopyInto(out *HealthcareMedtechService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechService. +func (in *HealthcareMedtechService) DeepCopy() *HealthcareMedtechService { + if in == nil { + return nil + } + out := new(HealthcareMedtechService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HealthcareMedtechService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceIdentityInitParameters) DeepCopyInto(out *HealthcareMedtechServiceIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceIdentityInitParameters. +func (in *HealthcareMedtechServiceIdentityInitParameters) DeepCopy() *HealthcareMedtechServiceIdentityInitParameters { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceIdentityObservation) DeepCopyInto(out *HealthcareMedtechServiceIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceIdentityObservation. +func (in *HealthcareMedtechServiceIdentityObservation) DeepCopy() *HealthcareMedtechServiceIdentityObservation { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceIdentityParameters) DeepCopyInto(out *HealthcareMedtechServiceIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceIdentityParameters. +func (in *HealthcareMedtechServiceIdentityParameters) DeepCopy() *HealthcareMedtechServiceIdentityParameters { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceInitParameters) DeepCopyInto(out *HealthcareMedtechServiceInitParameters) { + *out = *in + if in.DeviceMappingJSON != nil { + in, out := &in.DeviceMappingJSON, &out.DeviceMappingJSON + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupNameRef != nil { + in, out := &in.EventHubConsumerGroupNameRef, &out.EventHubConsumerGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubConsumerGroupNameSelector != nil { + in, out := &in.EventHubConsumerGroupNameSelector, &out.EventHubConsumerGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNameRef != nil { + in, out := &in.EventHubNameRef, &out.EventHubNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNameSelector != nil { + in, out := &in.EventHubNameSelector, &out.EventHubNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventHubNamespaceName != nil { + in, out := &in.EventHubNamespaceName, &out.EventHubNamespaceName + *out = new(string) + **out = **in + } + if in.EventHubNamespaceNameRef != nil { + in, out := &in.EventHubNamespaceNameRef, &out.EventHubNamespaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNamespaceNameSelector != nil { + in, out := &in.EventHubNamespaceNameSelector, &out.EventHubNamespaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(HealthcareMedtechServiceIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceInitParameters. +func (in *HealthcareMedtechServiceInitParameters) DeepCopy() *HealthcareMedtechServiceInitParameters { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceList) DeepCopyInto(out *HealthcareMedtechServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HealthcareMedtechService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceList. +func (in *HealthcareMedtechServiceList) DeepCopy() *HealthcareMedtechServiceList { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HealthcareMedtechServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceObservation) DeepCopyInto(out *HealthcareMedtechServiceObservation) { + *out = *in + if in.DeviceMappingJSON != nil { + in, out := &in.DeviceMappingJSON, &out.DeviceMappingJSON + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNamespaceName != nil { + in, out := &in.EventHubNamespaceName, &out.EventHubNamespaceName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(HealthcareMedtechServiceIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceObservation. +func (in *HealthcareMedtechServiceObservation) DeepCopy() *HealthcareMedtechServiceObservation { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceParameters) DeepCopyInto(out *HealthcareMedtechServiceParameters) { + *out = *in + if in.DeviceMappingJSON != nil { + in, out := &in.DeviceMappingJSON, &out.DeviceMappingJSON + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupNameRef != nil { + in, out := &in.EventHubConsumerGroupNameRef, &out.EventHubConsumerGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubConsumerGroupNameSelector != nil { + in, out := &in.EventHubConsumerGroupNameSelector, &out.EventHubConsumerGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNameRef != nil { + in, out := &in.EventHubNameRef, &out.EventHubNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNameSelector != nil { + in, out := &in.EventHubNameSelector, &out.EventHubNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventHubNamespaceName != nil { + in, out := &in.EventHubNamespaceName, &out.EventHubNamespaceName + *out = new(string) + **out = **in + } + if in.EventHubNamespaceNameRef != nil { + in, out := &in.EventHubNamespaceNameRef, &out.EventHubNamespaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNamespaceNameSelector != nil { + in, out := &in.EventHubNamespaceNameSelector, &out.EventHubNamespaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(HealthcareMedtechServiceIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } + if in.WorkspaceIDRef != nil { + in, out := &in.WorkspaceIDRef, &out.WorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceIDSelector != nil { + in, out := &in.WorkspaceIDSelector, &out.WorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceParameters. +func (in *HealthcareMedtechServiceParameters) DeepCopy() *HealthcareMedtechServiceParameters { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceSpec) DeepCopyInto(out *HealthcareMedtechServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceSpec. +func (in *HealthcareMedtechServiceSpec) DeepCopy() *HealthcareMedtechServiceSpec { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareMedtechServiceStatus) DeepCopyInto(out *HealthcareMedtechServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareMedtechServiceStatus. +func (in *HealthcareMedtechServiceStatus) DeepCopy() *HealthcareMedtechServiceStatus { + if in == nil { + return nil + } + out := new(HealthcareMedtechServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareService) DeepCopyInto(out *HealthcareService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareService. +func (in *HealthcareService) DeepCopy() *HealthcareService { + if in == nil { + return nil + } + out := new(HealthcareService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HealthcareService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareServiceInitParameters) DeepCopyInto(out *HealthcareServiceInitParameters) { + *out = *in + if in.AccessPolicyObjectIds != nil { + in, out := &in.AccessPolicyObjectIds, &out.AccessPolicyObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CorsConfiguration != nil { + in, out := &in.CorsConfiguration, &out.CorsConfiguration + *out = new(CorsConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CosmosDBKeyVaultKeyVersionlessID != nil { + in, out := &in.CosmosDBKeyVaultKeyVersionlessID, &out.CosmosDBKeyVaultKeyVersionlessID + *out = new(string) + **out = **in + } + if in.CosmosDBThroughput != nil { + in, out := &in.CosmosDBThroughput, &out.CosmosDBThroughput + *out = new(float64) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareServiceInitParameters. +func (in *HealthcareServiceInitParameters) DeepCopy() *HealthcareServiceInitParameters { + if in == nil { + return nil + } + out := new(HealthcareServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareServiceList) DeepCopyInto(out *HealthcareServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HealthcareService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareServiceList. +func (in *HealthcareServiceList) DeepCopy() *HealthcareServiceList { + if in == nil { + return nil + } + out := new(HealthcareServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HealthcareServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareServiceObservation) DeepCopyInto(out *HealthcareServiceObservation) { + *out = *in + if in.AccessPolicyObjectIds != nil { + in, out := &in.AccessPolicyObjectIds, &out.AccessPolicyObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CorsConfiguration != nil { + in, out := &in.CorsConfiguration, &out.CorsConfiguration + *out = new(CorsConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CosmosDBKeyVaultKeyVersionlessID != nil { + in, out := &in.CosmosDBKeyVaultKeyVersionlessID, &out.CosmosDBKeyVaultKeyVersionlessID + *out = new(string) + **out = **in + } + if in.CosmosDBThroughput != nil { + in, out := &in.CosmosDBThroughput, &out.CosmosDBThroughput + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareServiceObservation. +func (in *HealthcareServiceObservation) DeepCopy() *HealthcareServiceObservation { + if in == nil { + return nil + } + out := new(HealthcareServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareServiceParameters) DeepCopyInto(out *HealthcareServiceParameters) { + *out = *in + if in.AccessPolicyObjectIds != nil { + in, out := &in.AccessPolicyObjectIds, &out.AccessPolicyObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationConfiguration != nil { + in, out := &in.AuthenticationConfiguration, &out.AuthenticationConfiguration + *out = new(AuthenticationConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CorsConfiguration != nil { + in, out := &in.CorsConfiguration, &out.CorsConfiguration + *out = new(CorsConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CosmosDBKeyVaultKeyVersionlessID != nil { + in, out := &in.CosmosDBKeyVaultKeyVersionlessID, &out.CosmosDBKeyVaultKeyVersionlessID + *out = new(string) + **out = **in + } + if in.CosmosDBThroughput != nil { + in, out := &in.CosmosDBThroughput, &out.CosmosDBThroughput + *out = new(float64) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareServiceParameters. +func (in *HealthcareServiceParameters) DeepCopy() *HealthcareServiceParameters { + if in == nil { + return nil + } + out := new(HealthcareServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareServiceSpec) DeepCopyInto(out *HealthcareServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareServiceSpec. +func (in *HealthcareServiceSpec) DeepCopy() *HealthcareServiceSpec { + if in == nil { + return nil + } + out := new(HealthcareServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthcareServiceStatus) DeepCopyInto(out *HealthcareServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthcareServiceStatus. +func (in *HealthcareServiceStatus) DeepCopy() *HealthcareServiceStatus { + if in == nil { + return nil + } + out := new(HealthcareServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciArtifactInitParameters) DeepCopyInto(out *OciArtifactInitParameters) { + *out = *in + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.LoginServer != nil { + in, out := &in.LoginServer, &out.LoginServer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciArtifactInitParameters. +func (in *OciArtifactInitParameters) DeepCopy() *OciArtifactInitParameters { + if in == nil { + return nil + } + out := new(OciArtifactInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciArtifactObservation) DeepCopyInto(out *OciArtifactObservation) { + *out = *in + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.LoginServer != nil { + in, out := &in.LoginServer, &out.LoginServer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciArtifactObservation. +func (in *OciArtifactObservation) DeepCopy() *OciArtifactObservation { + if in == nil { + return nil + } + out := new(OciArtifactObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciArtifactParameters) DeepCopyInto(out *OciArtifactParameters) { + *out = *in + if in.Digest != nil { + in, out := &in.Digest, &out.Digest + *out = new(string) + **out = **in + } + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.LoginServer != nil { + in, out := &in.LoginServer, &out.LoginServer + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciArtifactParameters. +func (in *OciArtifactParameters) DeepCopy() *OciArtifactParameters { + if in == nil { + return nil + } + out := new(OciArtifactParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointInitParameters) DeepCopyInto(out *PrivateEndpointInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointInitParameters. +func (in *PrivateEndpointInitParameters) DeepCopy() *PrivateEndpointInitParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointObservation) DeepCopyInto(out *PrivateEndpointObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointObservation. +func (in *PrivateEndpointObservation) DeepCopy() *PrivateEndpointObservation { + if in == nil { + return nil + } + out := new(PrivateEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointParameters) DeepCopyInto(out *PrivateEndpointParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointParameters. +func (in *PrivateEndpointParameters) DeepCopy() *PrivateEndpointParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/healthcareapis/v1beta2/zz_generated.managed.go b/apis/healthcareapis/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..6f8da532b --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HealthcareDICOMService. +func (mg *HealthcareDICOMService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this HealthcareService. +func (mg *HealthcareService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HealthcareService. +func (mg *HealthcareService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HealthcareService. +func (mg *HealthcareService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HealthcareService. +func (mg *HealthcareService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HealthcareService. +func (mg *HealthcareService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HealthcareService. +func (mg *HealthcareService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HealthcareService. +func (mg *HealthcareService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HealthcareService. +func (mg *HealthcareService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HealthcareService. +func (mg *HealthcareService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HealthcareService. +func (mg *HealthcareService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HealthcareService. +func (mg *HealthcareService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HealthcareService. +func (mg *HealthcareService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/healthcareapis/v1beta2/zz_generated.managedlist.go b/apis/healthcareapis/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..8e8793da2 --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this HealthcareDICOMServiceList. +func (l *HealthcareDICOMServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HealthcareFHIRServiceList. +func (l *HealthcareFHIRServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HealthcareMedtechServiceList. +func (l *HealthcareMedtechServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this HealthcareServiceList. +func (l *HealthcareServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/healthcareapis/v1beta2/zz_generated.resolvers.go b/apis/healthcareapis/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..f5053ab9e --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,295 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *HealthcareDICOMService) ResolveReferences( // ResolveReferences of this HealthcareDICOMService. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta1", "HealthcareWorkspace", "HealthcareWorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WorkspaceIDRef, + Selector: mg.Spec.ForProvider.WorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkspaceID") + } + mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this HealthcareFHIRService. +func (mg *HealthcareFHIRService) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta1", "HealthcareWorkspace", "HealthcareWorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WorkspaceIDRef, + Selector: mg.Spec.ForProvider.WorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkspaceID") + } + mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this HealthcareMedtechService. +func (mg *HealthcareMedtechService) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "ConsumerGroup", "ConsumerGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventHubConsumerGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventHubConsumerGroupNameRef, + Selector: mg.Spec.ForProvider.EventHubConsumerGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventHubConsumerGroupName") + } + mg.Spec.ForProvider.EventHubConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventHubConsumerGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventHubName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventHubNameRef, + Selector: mg.Spec.ForProvider.EventHubNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventHubName") + } + mg.Spec.ForProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventHubNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventHubNamespaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventHubNamespaceNameRef, + Selector: mg.Spec.ForProvider.EventHubNamespaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventHubNamespaceName") + } + mg.Spec.ForProvider.EventHubNamespaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventHubNamespaceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("healthcareapis.azure.upbound.io", "v1beta1", "HealthcareWorkspace", "HealthcareWorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkspaceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WorkspaceIDRef, + Selector: mg.Spec.ForProvider.WorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkspaceID") + } + mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "ConsumerGroup", "ConsumerGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventHubConsumerGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventHubConsumerGroupNameRef, + Selector: mg.Spec.InitProvider.EventHubConsumerGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventHubConsumerGroupName") + } + mg.Spec.InitProvider.EventHubConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventHubConsumerGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventHubName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventHubNameRef, + Selector: mg.Spec.InitProvider.EventHubNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventHubName") + } + mg.Spec.InitProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventHubNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventHubNamespaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventHubNamespaceNameRef, + Selector: mg.Spec.InitProvider.EventHubNamespaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventHubNamespaceName") + } + mg.Spec.InitProvider.EventHubNamespaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventHubNamespaceNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this HealthcareService. +func (mg *HealthcareService) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/healthcareapis/v1beta2/zz_groupversion_info.go b/apis/healthcareapis/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..9fcdb38bd --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=healthcareapis.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "healthcareapis.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/healthcareapis/v1beta2/zz_healthcaredicomservice_terraformed.go b/apis/healthcareapis/v1beta2/zz_healthcaredicomservice_terraformed.go new file mode 100755 index 000000000..58667d9fe --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_healthcaredicomservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HealthcareDICOMService +func (mg *HealthcareDICOMService) GetTerraformResourceType() string { + return "azurerm_healthcare_dicom_service" +} + +// GetConnectionDetailsMapping for this HealthcareDICOMService +func (tr *HealthcareDICOMService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HealthcareDICOMService +func (tr *HealthcareDICOMService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HealthcareDICOMService +func (tr *HealthcareDICOMService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HealthcareDICOMService +func (tr *HealthcareDICOMService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HealthcareDICOMService +func (tr *HealthcareDICOMService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HealthcareDICOMService +func (tr *HealthcareDICOMService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HealthcareDICOMService +func (tr *HealthcareDICOMService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HealthcareDICOMService +func (tr *HealthcareDICOMService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HealthcareDICOMService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HealthcareDICOMService) LateInitialize(attrs []byte) (bool, error) { + params := &HealthcareDICOMServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HealthcareDICOMService) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/healthcareapis/v1beta2/zz_healthcaredicomservice_types.go b/apis/healthcareapis/v1beta2/zz_healthcaredicomservice_types.go new file mode 100755 index 000000000..56305e5d5 --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_healthcaredicomservice_types.go @@ -0,0 +1,224 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationInitParameters struct { +} + +type AuthenticationObservation struct { + + // The intended audience to receive authentication tokens for the service. The default value is https://dicom.azurehealthcareapis.azure.com + Audience []*string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + // Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + Authority *string `json:"authority,omitempty" tf:"authority,omitempty"` +} + +type AuthenticationParameters struct { +} + +type HealthcareDICOMServiceInitParameters struct { + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Healthcare DICOM Service should be created. Changing this forces a new Healthcare DICOM Service to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether to enabled public networks when data plane traffic coming from public networks while private endpoint is enabled. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A mapping of tags to assign to the Healthcare DICOM Service. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type HealthcareDICOMServiceObservation struct { + + // The authentication block as defined below. + Authentication []AuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The ID of the Healthcare DICOM Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Healthcare DICOM Service should be created. Changing this forces a new Healthcare DICOM Service to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + PrivateEndpoint []PrivateEndpointObservation `json:"privateEndpoint,omitempty" tf:"private_endpoint,omitempty"` + + // Whether to enabled public networks when data plane traffic coming from public networks while private endpoint is enabled. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The url of the Healthcare DICOM Services. + ServiceURL *string `json:"serviceUrl,omitempty" tf:"service_url,omitempty"` + + // A mapping of tags to assign to the Healthcare DICOM Service. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the id of the Healthcare Workspace where the Healthcare DICOM Service should exist. Changing this forces a new Healthcare DICOM Service to be created. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type HealthcareDICOMServiceParameters struct { + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Healthcare DICOM Service should be created. Changing this forces a new Healthcare DICOM Service to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether to enabled public networks when data plane traffic coming from public networks while private endpoint is enabled. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A mapping of tags to assign to the Healthcare DICOM Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the id of the Healthcare Workspace where the Healthcare DICOM Service should exist. Changing this forces a new Healthcare DICOM Service to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta1.HealthcareWorkspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` + + // Reference to a HealthcareWorkspace in healthcareapis to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDRef *v1.Reference `json:"workspaceIdRef,omitempty" tf:"-"` + + // Selector for a HealthcareWorkspace in healthcareapis to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDSelector *v1.Selector `json:"workspaceIdSelector,omitempty" tf:"-"` +} + +type IdentityInitParameters struct { + + // A list of User Assigned Identity IDs which should be assigned to this Healthcare DICOM service. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of identity used for the Healthcare DICOM service. Possible values are UserAssigned, SystemAssigned and SystemAssigned, UserAssigned. If UserAssigned is set, an identity_ids must be set as well. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Identity IDs which should be assigned to this Healthcare DICOM service. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The ID of the Healthcare DICOM Service. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the Healthcare DICOM Service. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The type of identity used for the Healthcare DICOM service. Possible values are UserAssigned, SystemAssigned and SystemAssigned, UserAssigned. If UserAssigned is set, an identity_ids must be set as well. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Identity IDs which should be assigned to this Healthcare DICOM service. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of identity used for the Healthcare DICOM service. Possible values are UserAssigned, SystemAssigned and SystemAssigned, UserAssigned. If UserAssigned is set, an identity_ids must be set as well. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type PrivateEndpointInitParameters struct { +} + +type PrivateEndpointObservation struct { + + // The ID of the Healthcare DICOM Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the Healthcare DICOM Service. Changing this forces a new Healthcare DICOM Service to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PrivateEndpointParameters struct { +} + +// HealthcareDICOMServiceSpec defines the desired state of HealthcareDICOMService +type HealthcareDICOMServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HealthcareDICOMServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HealthcareDICOMServiceInitParameters `json:"initProvider,omitempty"` +} + +// HealthcareDICOMServiceStatus defines the observed state of HealthcareDICOMService. +type HealthcareDICOMServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HealthcareDICOMServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HealthcareDICOMService is the Schema for the HealthcareDICOMServices API. Manages a Healthcare DICOM (Digital Imaging and Communications in Medicine) Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type HealthcareDICOMService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec HealthcareDICOMServiceSpec `json:"spec"` + Status HealthcareDICOMServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HealthcareDICOMServiceList contains a list of HealthcareDICOMServices +type HealthcareDICOMServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HealthcareDICOMService `json:"items"` +} + +// Repository type metadata. +var ( + HealthcareDICOMService_Kind = "HealthcareDICOMService" + HealthcareDICOMService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HealthcareDICOMService_Kind}.String() + HealthcareDICOMService_KindAPIVersion = HealthcareDICOMService_Kind + "." + CRDGroupVersion.String() + HealthcareDICOMService_GroupVersionKind = CRDGroupVersion.WithKind(HealthcareDICOMService_Kind) +) + +func init() { + SchemeBuilder.Register(&HealthcareDICOMService{}, &HealthcareDICOMServiceList{}) +} diff --git a/apis/healthcareapis/v1beta2/zz_healthcarefhirservice_terraformed.go b/apis/healthcareapis/v1beta2/zz_healthcarefhirservice_terraformed.go new file mode 100755 index 000000000..db21b5fa4 --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_healthcarefhirservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HealthcareFHIRService +func (mg *HealthcareFHIRService) GetTerraformResourceType() string { + return "azurerm_healthcare_fhir_service" +} + +// GetConnectionDetailsMapping for this HealthcareFHIRService +func (tr *HealthcareFHIRService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HealthcareFHIRService +func (tr *HealthcareFHIRService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HealthcareFHIRService +func (tr *HealthcareFHIRService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HealthcareFHIRService +func (tr *HealthcareFHIRService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HealthcareFHIRService +func (tr *HealthcareFHIRService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HealthcareFHIRService +func (tr *HealthcareFHIRService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HealthcareFHIRService +func (tr *HealthcareFHIRService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HealthcareFHIRService +func (tr *HealthcareFHIRService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HealthcareFHIRService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HealthcareFHIRService) LateInitialize(attrs []byte) (bool, error) { + params := &HealthcareFHIRServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HealthcareFHIRService) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/healthcareapis/v1beta2/zz_healthcarefhirservice_types.go b/apis/healthcareapis/v1beta2/zz_healthcarefhirservice_types.go new file mode 100755 index 000000000..08cb83ea1 --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_healthcarefhirservice_types.go @@ -0,0 +1,432 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CorsInitParameters struct { + + // A set of headers to be allowed via CORS. + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // The methods to be allowed via CORS. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH and PUT. + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A set of origins to be allowed via CORS. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // If credentials are allowed via CORS. + CredentialsAllowed *bool `json:"credentialsAllowed,omitempty" tf:"credentials_allowed,omitempty"` + + // The max age to be allowed via CORS. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type CorsObservation struct { + + // A set of headers to be allowed via CORS. + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // The methods to be allowed via CORS. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH and PUT. + // +listType=set + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A set of origins to be allowed via CORS. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // If credentials are allowed via CORS. + CredentialsAllowed *bool `json:"credentialsAllowed,omitempty" tf:"credentials_allowed,omitempty"` + + // The max age to be allowed via CORS. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type CorsParameters struct { + + // A set of headers to be allowed via CORS. + // +kubebuilder:validation:Optional + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders" tf:"allowed_headers,omitempty"` + + // The methods to be allowed via CORS. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH and PUT. + // +kubebuilder:validation:Optional + // +listType=set + AllowedMethods []*string `json:"allowedMethods" tf:"allowed_methods,omitempty"` + + // A set of origins to be allowed via CORS. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` + + // If credentials are allowed via CORS. + // +kubebuilder:validation:Optional + CredentialsAllowed *bool `json:"credentialsAllowed,omitempty" tf:"credentials_allowed,omitempty"` + + // The max age to be allowed via CORS. + // +kubebuilder:validation:Optional + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type HealthcareFHIRServiceAuthenticationInitParameters struct { + + // The intended audience to receive authentication tokens for the service. + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + // Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + Authority *string `json:"authority,omitempty" tf:"authority,omitempty"` + + // Whether smart proxy is enabled. + SmartProxyEnabled *bool `json:"smartProxyEnabled,omitempty" tf:"smart_proxy_enabled,omitempty"` +} + +type HealthcareFHIRServiceAuthenticationObservation struct { + + // The intended audience to receive authentication tokens for the service. + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + // Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + Authority *string `json:"authority,omitempty" tf:"authority,omitempty"` + + // Whether smart proxy is enabled. + SmartProxyEnabled *bool `json:"smartProxyEnabled,omitempty" tf:"smart_proxy_enabled,omitempty"` +} + +type HealthcareFHIRServiceAuthenticationParameters struct { + + // The intended audience to receive authentication tokens for the service. + // +kubebuilder:validation:Optional + Audience *string `json:"audience" tf:"audience,omitempty"` + + // The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + // Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + // +kubebuilder:validation:Optional + Authority *string `json:"authority" tf:"authority,omitempty"` + + // Whether smart proxy is enabled. + // +kubebuilder:validation:Optional + SmartProxyEnabled *bool `json:"smartProxyEnabled,omitempty" tf:"smart_proxy_enabled,omitempty"` +} + +type HealthcareFHIRServiceIdentityInitParameters struct { + + // A list of one or more Resource IDs for User Assigned Managed identities to assign. Required when type is set to UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of managed identity to assign. Possible values are UserAssigned and SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HealthcareFHIRServiceIdentityObservation struct { + + // A list of one or more Resource IDs for User Assigned Managed identities to assign. Required when type is set to UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The ID of the Healthcare FHIR Service. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the Healthcare FHIR Service. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The type of managed identity to assign. Possible values are UserAssigned and SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HealthcareFHIRServiceIdentityParameters struct { + + // A list of one or more Resource IDs for User Assigned Managed identities to assign. Required when type is set to UserAssigned. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of managed identity to assign. Possible values are UserAssigned and SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type HealthcareFHIRServiceInitParameters struct { + + // A list of the access policies of the service instance. + // +listType=set + AccessPolicyObjectIds []*string `json:"accessPolicyObjectIds,omitempty" tf:"access_policy_object_ids,omitempty"` + + // An authentication block as defined below. + Authentication *HealthcareFHIRServiceAuthenticationInitParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // Specifies the name of the storage account which the operation configuration information is exported to. + ConfigurationExportStorageAccountName *string `json:"configurationExportStorageAccountName,omitempty" tf:"configuration_export_storage_account_name,omitempty"` + + // A list of azure container registry settings used for convert data operation of the service instance. + // +listType=set + ContainerRegistryLoginServerURL []*string `json:"containerRegistryLoginServerUrl,omitempty" tf:"container_registry_login_server_url,omitempty"` + + // A cors block as defined below. + Cors *CorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // An identity block as defined below. + Identity *HealthcareFHIRServiceIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the kind of the Healthcare FHIR Service. Possible values are: fhir-Stu3 and fhir-R4. Defaults to fhir-R4. Changing this forces a new Healthcare FHIR Service to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the Azure Region where the Healthcare FHIR Service should be created. Changing this forces a new Healthcare FHIR Service to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of oci_artifact objects as defined below to describe OCI artifacts for export. + OciArtifact []OciArtifactInitParameters `json:"ociArtifact,omitempty" tf:"oci_artifact,omitempty"` + + // Specifies the name of the Resource Group in which to create the Healthcare FHIR Service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the Healthcare FHIR Service. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type HealthcareFHIRServiceObservation struct { + + // A list of the access policies of the service instance. + // +listType=set + AccessPolicyObjectIds []*string `json:"accessPolicyObjectIds,omitempty" tf:"access_policy_object_ids,omitempty"` + + // An authentication block as defined below. + Authentication *HealthcareFHIRServiceAuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // Specifies the name of the storage account which the operation configuration information is exported to. + ConfigurationExportStorageAccountName *string `json:"configurationExportStorageAccountName,omitempty" tf:"configuration_export_storage_account_name,omitempty"` + + // A list of azure container registry settings used for convert data operation of the service instance. + // +listType=set + ContainerRegistryLoginServerURL []*string `json:"containerRegistryLoginServerUrl,omitempty" tf:"container_registry_login_server_url,omitempty"` + + // A cors block as defined below. + Cors *CorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // The ID of the Healthcare FHIR Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *HealthcareFHIRServiceIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the kind of the Healthcare FHIR Service. Possible values are: fhir-Stu3 and fhir-R4. Defaults to fhir-R4. Changing this forces a new Healthcare FHIR Service to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the Azure Region where the Healthcare FHIR Service should be created. Changing this forces a new Healthcare FHIR Service to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of oci_artifact objects as defined below to describe OCI artifacts for export. + OciArtifact []OciArtifactObservation `json:"ociArtifact,omitempty" tf:"oci_artifact,omitempty"` + + // Whether public networks access is enabled. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the name of the Resource Group in which to create the Healthcare FHIR Service. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the Healthcare FHIR Service. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the id of the Healthcare Workspace where the Healthcare FHIR Service should exist. Changing this forces a new Healthcare FHIR Service to be created. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type HealthcareFHIRServiceParameters struct { + + // A list of the access policies of the service instance. + // +kubebuilder:validation:Optional + // +listType=set + AccessPolicyObjectIds []*string `json:"accessPolicyObjectIds,omitempty" tf:"access_policy_object_ids,omitempty"` + + // An authentication block as defined below. + // +kubebuilder:validation:Optional + Authentication *HealthcareFHIRServiceAuthenticationParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // Specifies the name of the storage account which the operation configuration information is exported to. + // +kubebuilder:validation:Optional + ConfigurationExportStorageAccountName *string `json:"configurationExportStorageAccountName,omitempty" tf:"configuration_export_storage_account_name,omitempty"` + + // A list of azure container registry settings used for convert data operation of the service instance. + // +kubebuilder:validation:Optional + // +listType=set + ContainerRegistryLoginServerURL []*string `json:"containerRegistryLoginServerUrl,omitempty" tf:"container_registry_login_server_url,omitempty"` + + // A cors block as defined below. + // +kubebuilder:validation:Optional + Cors *CorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *HealthcareFHIRServiceIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the kind of the Healthcare FHIR Service. Possible values are: fhir-Stu3 and fhir-R4. Defaults to fhir-R4. Changing this forces a new Healthcare FHIR Service to be created. + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the Azure Region where the Healthcare FHIR Service should be created. Changing this forces a new Healthcare FHIR Service to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of oci_artifact objects as defined below to describe OCI artifacts for export. + // +kubebuilder:validation:Optional + OciArtifact []OciArtifactParameters `json:"ociArtifact,omitempty" tf:"oci_artifact,omitempty"` + + // Specifies the name of the Resource Group in which to create the Healthcare FHIR Service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the Healthcare FHIR Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the id of the Healthcare Workspace where the Healthcare FHIR Service should exist. Changing this forces a new Healthcare FHIR Service to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta1.HealthcareWorkspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` + + // Reference to a HealthcareWorkspace in healthcareapis to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDRef *v1.Reference `json:"workspaceIdRef,omitempty" tf:"-"` + + // Selector for a HealthcareWorkspace in healthcareapis to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDSelector *v1.Selector `json:"workspaceIdSelector,omitempty" tf:"-"` +} + +type OciArtifactInitParameters struct { + + // A digest of an image within Azure container registry used for export operations of the service instance to narrow the artifacts down. + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + + // An image within Azure container registry used for export operations of the service instance. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // An Azure container registry used for export operations of the service instance. + LoginServer *string `json:"loginServer,omitempty" tf:"login_server,omitempty"` +} + +type OciArtifactObservation struct { + + // A digest of an image within Azure container registry used for export operations of the service instance to narrow the artifacts down. + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + + // An image within Azure container registry used for export operations of the service instance. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // An Azure container registry used for export operations of the service instance. + LoginServer *string `json:"loginServer,omitempty" tf:"login_server,omitempty"` +} + +type OciArtifactParameters struct { + + // A digest of an image within Azure container registry used for export operations of the service instance to narrow the artifacts down. + // +kubebuilder:validation:Optional + Digest *string `json:"digest,omitempty" tf:"digest,omitempty"` + + // An image within Azure container registry used for export operations of the service instance. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // An Azure container registry used for export operations of the service instance. + // +kubebuilder:validation:Optional + LoginServer *string `json:"loginServer" tf:"login_server,omitempty"` +} + +// HealthcareFHIRServiceSpec defines the desired state of HealthcareFHIRService +type HealthcareFHIRServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HealthcareFHIRServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HealthcareFHIRServiceInitParameters `json:"initProvider,omitempty"` +} + +// HealthcareFHIRServiceStatus defines the observed state of HealthcareFHIRService. +type HealthcareFHIRServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HealthcareFHIRServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HealthcareFHIRService is the Schema for the HealthcareFHIRServices API. Manages a Healthcare FHIR (Fast Healthcare Interoperability Resources) Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type HealthcareFHIRService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authentication) || (has(self.initProvider) && has(self.initProvider.authentication))",message="spec.forProvider.authentication is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec HealthcareFHIRServiceSpec `json:"spec"` + Status HealthcareFHIRServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HealthcareFHIRServiceList contains a list of HealthcareFHIRServices +type HealthcareFHIRServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HealthcareFHIRService `json:"items"` +} + +// Repository type metadata. +var ( + HealthcareFHIRService_Kind = "HealthcareFHIRService" + HealthcareFHIRService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HealthcareFHIRService_Kind}.String() + HealthcareFHIRService_KindAPIVersion = HealthcareFHIRService_Kind + "." + CRDGroupVersion.String() + HealthcareFHIRService_GroupVersionKind = CRDGroupVersion.WithKind(HealthcareFHIRService_Kind) +) + +func init() { + SchemeBuilder.Register(&HealthcareFHIRService{}, &HealthcareFHIRServiceList{}) +} diff --git a/apis/healthcareapis/v1beta2/zz_healthcaremedtechservice_terraformed.go b/apis/healthcareapis/v1beta2/zz_healthcaremedtechservice_terraformed.go new file mode 100755 index 000000000..bf55dbe6a --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_healthcaremedtechservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HealthcareMedtechService +func (mg *HealthcareMedtechService) GetTerraformResourceType() string { + return "azurerm_healthcare_medtech_service" +} + +// GetConnectionDetailsMapping for this HealthcareMedtechService +func (tr *HealthcareMedtechService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HealthcareMedtechService +func (tr *HealthcareMedtechService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HealthcareMedtechService +func (tr *HealthcareMedtechService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HealthcareMedtechService +func (tr *HealthcareMedtechService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HealthcareMedtechService +func (tr *HealthcareMedtechService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HealthcareMedtechService +func (tr *HealthcareMedtechService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HealthcareMedtechService +func (tr *HealthcareMedtechService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HealthcareMedtechService +func (tr *HealthcareMedtechService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HealthcareMedtechService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HealthcareMedtechService) LateInitialize(attrs []byte) (bool, error) { + params := &HealthcareMedtechServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HealthcareMedtechService) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/healthcareapis/v1beta2/zz_healthcaremedtechservice_types.go b/apis/healthcareapis/v1beta2/zz_healthcaremedtechservice_types.go new file mode 100755 index 000000000..404457bd6 --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_healthcaremedtechservice_types.go @@ -0,0 +1,270 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HealthcareMedtechServiceIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Healthcare Med Tech Service. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Healthcare Med Tech Service. Possible values are SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HealthcareMedtechServiceIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Healthcare Med Tech Service. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this System Assigned Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this System Assigned Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Healthcare Med Tech Service. Possible values are SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HealthcareMedtechServiceIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Healthcare Med Tech Service. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Healthcare Med Tech Service. Possible values are SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type HealthcareMedtechServiceInitParameters struct { + + // Specifies the Device Mappings of the Med Tech Service. + DeviceMappingJSON *string `json:"deviceMappingJson,omitempty" tf:"device_mapping_json,omitempty"` + + // Specifies the Consumer Group of the Event Hub to connect to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.ConsumerGroup + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // Reference to a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameRef *v1.Reference `json:"eventhubConsumerGroupNameRef,omitempty" tf:"-"` + + // Selector for a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameSelector *v1.Selector `json:"eventhubConsumerGroupNameSelector,omitempty" tf:"-"` + + // Specifies the name of the Event Hub to connect to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Reference to a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameRef *v1.Reference `json:"eventhubNameRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` + + // Specifies the namespace name of the Event Hub to connect to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + EventHubNamespaceName *string `json:"eventhubNamespaceName,omitempty" tf:"eventhub_namespace_name,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate eventhubNamespaceName. + // +kubebuilder:validation:Optional + EventHubNamespaceNameRef *v1.Reference `json:"eventhubNamespaceNameRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate eventhubNamespaceName. + // +kubebuilder:validation:Optional + EventHubNamespaceNameSelector *v1.Selector `json:"eventhubNamespaceNameSelector,omitempty" tf:"-"` + + // An identity block as defined below. + Identity *HealthcareMedtechServiceIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Healthcare Med Tech Service should be created. Changing this forces a new Healthcare Med Tech Service to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the Healthcare Med Tech Service. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type HealthcareMedtechServiceObservation struct { + + // Specifies the Device Mappings of the Med Tech Service. + DeviceMappingJSON *string `json:"deviceMappingJson,omitempty" tf:"device_mapping_json,omitempty"` + + // Specifies the Consumer Group of the Event Hub to connect to. + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // Specifies the name of the Event Hub to connect to. + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Specifies the namespace name of the Event Hub to connect to. + EventHubNamespaceName *string `json:"eventhubNamespaceName,omitempty" tf:"eventhub_namespace_name,omitempty"` + + // The ID of the Healthcare Med Tech Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *HealthcareMedtechServiceIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Healthcare Med Tech Service should be created. Changing this forces a new Healthcare Med Tech Service to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the Healthcare Med Tech Service. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the id of the Healthcare Workspace where the Healthcare Med Tech Service should exist. Changing this forces a new Healthcare Med Tech Service to be created. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type HealthcareMedtechServiceParameters struct { + + // Specifies the Device Mappings of the Med Tech Service. + // +kubebuilder:validation:Optional + DeviceMappingJSON *string `json:"deviceMappingJson,omitempty" tf:"device_mapping_json,omitempty"` + + // Specifies the Consumer Group of the Event Hub to connect to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.ConsumerGroup + // +kubebuilder:validation:Optional + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // Reference to a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameRef *v1.Reference `json:"eventhubConsumerGroupNameRef,omitempty" tf:"-"` + + // Selector for a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameSelector *v1.Selector `json:"eventhubConsumerGroupNameSelector,omitempty" tf:"-"` + + // Specifies the name of the Event Hub to connect to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + // +kubebuilder:validation:Optional + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Reference to a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameRef *v1.Reference `json:"eventhubNameRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` + + // Specifies the namespace name of the Event Hub to connect to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + // +kubebuilder:validation:Optional + EventHubNamespaceName *string `json:"eventhubNamespaceName,omitempty" tf:"eventhub_namespace_name,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate eventhubNamespaceName. + // +kubebuilder:validation:Optional + EventHubNamespaceNameRef *v1.Reference `json:"eventhubNamespaceNameRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate eventhubNamespaceName. + // +kubebuilder:validation:Optional + EventHubNamespaceNameSelector *v1.Selector `json:"eventhubNamespaceNameSelector,omitempty" tf:"-"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *HealthcareMedtechServiceIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Healthcare Med Tech Service should be created. Changing this forces a new Healthcare Med Tech Service to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the Healthcare Med Tech Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the id of the Healthcare Workspace where the Healthcare Med Tech Service should exist. Changing this forces a new Healthcare Med Tech Service to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/healthcareapis/v1beta1.HealthcareWorkspace + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` + + // Reference to a HealthcareWorkspace in healthcareapis to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDRef *v1.Reference `json:"workspaceIdRef,omitempty" tf:"-"` + + // Selector for a HealthcareWorkspace in healthcareapis to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDSelector *v1.Selector `json:"workspaceIdSelector,omitempty" tf:"-"` +} + +// HealthcareMedtechServiceSpec defines the desired state of HealthcareMedtechService +type HealthcareMedtechServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HealthcareMedtechServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HealthcareMedtechServiceInitParameters `json:"initProvider,omitempty"` +} + +// HealthcareMedtechServiceStatus defines the observed state of HealthcareMedtechService. +type HealthcareMedtechServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HealthcareMedtechServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HealthcareMedtechService is the Schema for the HealthcareMedtechServices API. Manages a Healthcare MedTech (Internet of Medical Things) devices Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type HealthcareMedtechService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.deviceMappingJson) || (has(self.initProvider) && has(self.initProvider.deviceMappingJson))",message="spec.forProvider.deviceMappingJson is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec HealthcareMedtechServiceSpec `json:"spec"` + Status HealthcareMedtechServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HealthcareMedtechServiceList contains a list of HealthcareMedtechServices +type HealthcareMedtechServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HealthcareMedtechService `json:"items"` +} + +// Repository type metadata. +var ( + HealthcareMedtechService_Kind = "HealthcareMedtechService" + HealthcareMedtechService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HealthcareMedtechService_Kind}.String() + HealthcareMedtechService_KindAPIVersion = HealthcareMedtechService_Kind + "." + CRDGroupVersion.String() + HealthcareMedtechService_GroupVersionKind = CRDGroupVersion.WithKind(HealthcareMedtechService_Kind) +) + +func init() { + SchemeBuilder.Register(&HealthcareMedtechService{}, &HealthcareMedtechServiceList{}) +} diff --git a/apis/healthcareapis/v1beta2/zz_healthcareservice_terraformed.go b/apis/healthcareapis/v1beta2/zz_healthcareservice_terraformed.go new file mode 100755 index 000000000..012785178 --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_healthcareservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HealthcareService +func (mg *HealthcareService) GetTerraformResourceType() string { + return "azurerm_healthcare_service" +} + +// GetConnectionDetailsMapping for this HealthcareService +func (tr *HealthcareService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this HealthcareService +func (tr *HealthcareService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HealthcareService +func (tr *HealthcareService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HealthcareService +func (tr *HealthcareService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HealthcareService +func (tr *HealthcareService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HealthcareService +func (tr *HealthcareService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HealthcareService +func (tr *HealthcareService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HealthcareService +func (tr *HealthcareService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HealthcareService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HealthcareService) LateInitialize(attrs []byte) (bool, error) { + params := &HealthcareServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HealthcareService) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/healthcareapis/v1beta2/zz_healthcareservice_types.go b/apis/healthcareapis/v1beta2/zz_healthcareservice_types.go new file mode 100755 index 000000000..ed51596db --- /dev/null +++ b/apis/healthcareapis/v1beta2/zz_healthcareservice_types.go @@ -0,0 +1,306 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationConfigurationInitParameters struct { + + // The intended audience to receive authentication tokens for the service. The default value is https://azurehealthcareapis.com + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + // Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + Authority *string `json:"authority,omitempty" tf:"authority,omitempty"` + + // (Boolean) Enables the 'SMART on FHIR' option for mobile and web implementations. + SmartProxyEnabled *bool `json:"smartProxyEnabled,omitempty" tf:"smart_proxy_enabled,omitempty"` +} + +type AuthenticationConfigurationObservation struct { + + // The intended audience to receive authentication tokens for the service. The default value is https://azurehealthcareapis.com + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + // Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + Authority *string `json:"authority,omitempty" tf:"authority,omitempty"` + + // (Boolean) Enables the 'SMART on FHIR' option for mobile and web implementations. + SmartProxyEnabled *bool `json:"smartProxyEnabled,omitempty" tf:"smart_proxy_enabled,omitempty"` +} + +type AuthenticationConfigurationParameters struct { + + // The intended audience to receive authentication tokens for the service. The default value is https://azurehealthcareapis.com + // +kubebuilder:validation:Optional + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + // Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + // +kubebuilder:validation:Optional + Authority *string `json:"authority,omitempty" tf:"authority,omitempty"` + + // (Boolean) Enables the 'SMART on FHIR' option for mobile and web implementations. + // +kubebuilder:validation:Optional + SmartProxyEnabled *bool `json:"smartProxyEnabled,omitempty" tf:"smart_proxy_enabled,omitempty"` +} + +type CorsConfigurationInitParameters struct { + + // (Boolean) If credentials are allowed via CORS. + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // A set of headers to be allowed via CORS. + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // The methods to be allowed via CORS. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH and PUT. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A set of origins to be allowed via CORS. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // The max age to be allowed via CORS. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type CorsConfigurationObservation struct { + + // (Boolean) If credentials are allowed via CORS. + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // A set of headers to be allowed via CORS. + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // The methods to be allowed via CORS. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH and PUT. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A set of origins to be allowed via CORS. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // The max age to be allowed via CORS. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type CorsConfigurationParameters struct { + + // (Boolean) If credentials are allowed via CORS. + // +kubebuilder:validation:Optional + AllowCredentials *bool `json:"allowCredentials,omitempty" tf:"allow_credentials,omitempty"` + + // A set of headers to be allowed via CORS. + // +kubebuilder:validation:Optional + // +listType=set + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // The methods to be allowed via CORS. Possible values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH and PUT. + // +kubebuilder:validation:Optional + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A set of origins to be allowed via CORS. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // The max age to be allowed via CORS. + // +kubebuilder:validation:Optional + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type HealthcareServiceInitParameters struct { + + // A set of Azure object IDs that are allowed to access the Service. + // +listType=set + AccessPolicyObjectIds []*string `json:"accessPolicyObjectIds,omitempty" tf:"access_policy_object_ids,omitempty"` + + // An authentication_configuration block as defined below. + AuthenticationConfiguration *AuthenticationConfigurationInitParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // A cors_configuration block as defined below. + CorsConfiguration *CorsConfigurationInitParameters `json:"corsConfiguration,omitempty" tf:"cors_configuration,omitempty"` + + // A versionless Key Vault Key ID for CMK encryption of the backing database. Changing this forces a new resource to be created. + CosmosDBKeyVaultKeyVersionlessID *string `json:"cosmosdbKeyVaultKeyVersionlessId,omitempty" tf:"cosmosdb_key_vault_key_versionless_id,omitempty"` + + // The provisioned throughput for the backing database. Range of 400-100000. Defaults to 1000. + CosmosDBThroughput *float64 `json:"cosmosdbThroughput,omitempty" tf:"cosmosdb_throughput,omitempty"` + + // The type of the service. Values at time of publication are: fhir, fhir-Stu3 and fhir-R4. Default value is fhir. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure Region where the Service should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether public network access is enabled or disabled for this service instance. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type HealthcareServiceObservation struct { + + // A set of Azure object IDs that are allowed to access the Service. + // +listType=set + AccessPolicyObjectIds []*string `json:"accessPolicyObjectIds,omitempty" tf:"access_policy_object_ids,omitempty"` + + // An authentication_configuration block as defined below. + AuthenticationConfiguration *AuthenticationConfigurationObservation `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // A cors_configuration block as defined below. + CorsConfiguration *CorsConfigurationObservation `json:"corsConfiguration,omitempty" tf:"cors_configuration,omitempty"` + + // A versionless Key Vault Key ID for CMK encryption of the backing database. Changing this forces a new resource to be created. + CosmosDBKeyVaultKeyVersionlessID *string `json:"cosmosdbKeyVaultKeyVersionlessId,omitempty" tf:"cosmosdb_key_vault_key_versionless_id,omitempty"` + + // The provisioned throughput for the backing database. Range of 400-100000. Defaults to 1000. + CosmosDBThroughput *float64 `json:"cosmosdbThroughput,omitempty" tf:"cosmosdb_throughput,omitempty"` + + // The ID of the Healthcare Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The type of the service. Values at time of publication are: fhir, fhir-Stu3 and fhir-R4. Default value is fhir. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure Region where the Service should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether public network access is enabled or disabled for this service instance. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group in which to create the Service. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type HealthcareServiceParameters struct { + + // A set of Azure object IDs that are allowed to access the Service. + // +kubebuilder:validation:Optional + // +listType=set + AccessPolicyObjectIds []*string `json:"accessPolicyObjectIds,omitempty" tf:"access_policy_object_ids,omitempty"` + + // An authentication_configuration block as defined below. + // +kubebuilder:validation:Optional + AuthenticationConfiguration *AuthenticationConfigurationParameters `json:"authenticationConfiguration,omitempty" tf:"authentication_configuration,omitempty"` + + // A cors_configuration block as defined below. + // +kubebuilder:validation:Optional + CorsConfiguration *CorsConfigurationParameters `json:"corsConfiguration,omitempty" tf:"cors_configuration,omitempty"` + + // A versionless Key Vault Key ID for CMK encryption of the backing database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CosmosDBKeyVaultKeyVersionlessID *string `json:"cosmosdbKeyVaultKeyVersionlessId,omitempty" tf:"cosmosdb_key_vault_key_versionless_id,omitempty"` + + // The provisioned throughput for the backing database. Range of 400-100000. Defaults to 1000. + // +kubebuilder:validation:Optional + CosmosDBThroughput *float64 `json:"cosmosdbThroughput,omitempty" tf:"cosmosdb_throughput,omitempty"` + + // The type of the service. Values at time of publication are: fhir, fhir-Stu3 and fhir-R4. Default value is fhir. + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure Region where the Service should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether public network access is enabled or disabled for this service instance. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group in which to create the Service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// HealthcareServiceSpec defines the desired state of HealthcareService +type HealthcareServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HealthcareServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HealthcareServiceInitParameters `json:"initProvider,omitempty"` +} + +// HealthcareServiceStatus defines the observed state of HealthcareService. +type HealthcareServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HealthcareServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HealthcareService is the Schema for the HealthcareServices API. Manages a Healthcare Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type HealthcareService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec HealthcareServiceSpec `json:"spec"` + Status HealthcareServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HealthcareServiceList contains a list of HealthcareServices +type HealthcareServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HealthcareService `json:"items"` +} + +// Repository type metadata. +var ( + HealthcareService_Kind = "HealthcareService" + HealthcareService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HealthcareService_Kind}.String() + HealthcareService_KindAPIVersion = HealthcareService_Kind + "." + CRDGroupVersion.String() + HealthcareService_GroupVersionKind = CRDGroupVersion.WithKind(HealthcareService_Kind) +) + +func init() { + SchemeBuilder.Register(&HealthcareService{}, &HealthcareServiceList{}) +} diff --git a/apis/insights/v1beta1/zz_applicationinsights_types.go b/apis/insights/v1beta1/zz_applicationinsights_types.go index 70d9dbf9b..3d975280e 100755 --- a/apis/insights/v1beta1/zz_applicationinsights_types.go +++ b/apis/insights/v1beta1/zz_applicationinsights_types.go @@ -53,7 +53,7 @@ type ApplicationInsightsInitParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the id of a log analytics workspace resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` @@ -183,7 +183,7 @@ type ApplicationInsightsParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the id of a log analytics workspace resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/insights/v1beta1/zz_generated.conversion_hubs.go b/apis/insights/v1beta1/zz_generated.conversion_hubs.go index 5e6a5bcc6..b2004a6d9 100755 --- a/apis/insights/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/insights/v1beta1/zz_generated.conversion_hubs.go @@ -18,53 +18,20 @@ func (tr *ApplicationInsightsAPIKey) Hub() {} // Hub marks this type as a conversion hub. func (tr *ApplicationInsightsSmartDetectionRule) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ApplicationInsightsStandardWebTest) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ApplicationInsightsWebTest) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ApplicationInsightsWorkbook) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ApplicationInsightsWorkbookTemplate) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MonitorActionGroup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MonitorActivityLogAlert) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MonitorAutoscaleSetting) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MonitorDataCollectionEndpoint) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MonitorDataCollectionRule) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MonitorDataCollectionRuleAssociation) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MonitorDiagnosticSetting) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MonitorMetricAlert) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MonitorPrivateLinkScope) Hub() {} // Hub marks this type as a conversion hub. func (tr *MonitorPrivateLinkScopedService) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MonitorScheduledQueryRulesAlert) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MonitorScheduledQueryRulesAlertV2) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MonitorScheduledQueryRulesLog) Hub() {} diff --git a/apis/insights/v1beta1/zz_generated.conversion_spokes.go b/apis/insights/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..6ac1f5171 --- /dev/null +++ b/apis/insights/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,234 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ApplicationInsightsStandardWebTest to the hub type. +func (tr *ApplicationInsightsStandardWebTest) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ApplicationInsightsStandardWebTest type. +func (tr *ApplicationInsightsStandardWebTest) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ApplicationInsightsWorkbook to the hub type. +func (tr *ApplicationInsightsWorkbook) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ApplicationInsightsWorkbook type. +func (tr *ApplicationInsightsWorkbook) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorActionGroup to the hub type. +func (tr *MonitorActionGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorActionGroup type. +func (tr *MonitorActionGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorActivityLogAlert to the hub type. +func (tr *MonitorActivityLogAlert) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorActivityLogAlert type. +func (tr *MonitorActivityLogAlert) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorAutoscaleSetting to the hub type. +func (tr *MonitorAutoscaleSetting) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorAutoscaleSetting type. +func (tr *MonitorAutoscaleSetting) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorDataCollectionRule to the hub type. +func (tr *MonitorDataCollectionRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorDataCollectionRule type. +func (tr *MonitorDataCollectionRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorDiagnosticSetting to the hub type. +func (tr *MonitorDiagnosticSetting) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorDiagnosticSetting type. +func (tr *MonitorDiagnosticSetting) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorMetricAlert to the hub type. +func (tr *MonitorMetricAlert) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorMetricAlert type. +func (tr *MonitorMetricAlert) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorScheduledQueryRulesAlert to the hub type. +func (tr *MonitorScheduledQueryRulesAlert) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorScheduledQueryRulesAlert type. +func (tr *MonitorScheduledQueryRulesAlert) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorScheduledQueryRulesAlertV2 to the hub type. +func (tr *MonitorScheduledQueryRulesAlertV2) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorScheduledQueryRulesAlertV2 type. +func (tr *MonitorScheduledQueryRulesAlertV2) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MonitorScheduledQueryRulesLog to the hub type. +func (tr *MonitorScheduledQueryRulesLog) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MonitorScheduledQueryRulesLog type. +func (tr *MonitorScheduledQueryRulesLog) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/insights/v1beta1/zz_generated.resolvers.go b/apis/insights/v1beta1/zz_generated.resolvers.go index 93c2dd74e..d9df82643 100644 --- a/apis/insights/v1beta1/zz_generated.resolvers.go +++ b/apis/insights/v1beta1/zz_generated.resolvers.go @@ -47,7 +47,7 @@ func (mg *ApplicationInsights) ResolveReferences( // ResolveReferences of this A mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -66,7 +66,7 @@ func (mg *ApplicationInsights) ResolveReferences( // ResolveReferences of this A mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1150,7 +1150,7 @@ func (mg *MonitorDataCollectionRuleAssociation) ResolveReferences(ctx context.Co mg.Spec.ForProvider.DataCollectionEndpointID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataCollectionEndpointIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "MonitorDataCollectionRule", "MonitorDataCollectionRuleList") + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorDataCollectionRule", "MonitorDataCollectionRuleList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1169,7 +1169,7 @@ func (mg *MonitorDataCollectionRuleAssociation) ResolveReferences(ctx context.Co mg.Spec.ForProvider.DataCollectionRuleID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DataCollectionRuleIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "LinuxVirtualMachine", "LinuxVirtualMachineList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1207,7 +1207,7 @@ func (mg *MonitorDataCollectionRuleAssociation) ResolveReferences(ctx context.Co mg.Spec.InitProvider.DataCollectionEndpointID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DataCollectionEndpointIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "MonitorDataCollectionRule", "MonitorDataCollectionRuleList") + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorDataCollectionRule", "MonitorDataCollectionRuleList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/insights/v1beta1/zz_monitordatacollectionruleassociation_types.go b/apis/insights/v1beta1/zz_monitordatacollectionruleassociation_types.go index 13aef1e1a..2b85b3a0c 100755 --- a/apis/insights/v1beta1/zz_monitordatacollectionruleassociation_types.go +++ b/apis/insights/v1beta1/zz_monitordatacollectionruleassociation_types.go @@ -29,7 +29,7 @@ type MonitorDataCollectionRuleAssociationInitParameters struct { DataCollectionEndpointIDSelector *v1.Selector `json:"dataCollectionEndpointIdSelector,omitempty" tf:"-"` // The ID of the Data Collection Rule which will be associated to the target resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.MonitorDataCollectionRule + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorDataCollectionRule // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DataCollectionRuleID *string `json:"dataCollectionRuleId,omitempty" tf:"data_collection_rule_id,omitempty"` @@ -80,7 +80,7 @@ type MonitorDataCollectionRuleAssociationParameters struct { DataCollectionEndpointIDSelector *v1.Selector `json:"dataCollectionEndpointIdSelector,omitempty" tf:"-"` // The ID of the Data Collection Rule which will be associated to the target resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.MonitorDataCollectionRule + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorDataCollectionRule // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DataCollectionRuleID *string `json:"dataCollectionRuleId,omitempty" tf:"data_collection_rule_id,omitempty"` @@ -98,7 +98,7 @@ type MonitorDataCollectionRuleAssociationParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // The ID of the Azure Resource which to associate to a Data Collection Rule or a Data Collection Endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.LinuxVirtualMachine + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` diff --git a/apis/insights/v1beta2/zz_applicationinsightsstandardwebtest_terraformed.go b/apis/insights/v1beta2/zz_applicationinsightsstandardwebtest_terraformed.go new file mode 100755 index 000000000..806190561 --- /dev/null +++ b/apis/insights/v1beta2/zz_applicationinsightsstandardwebtest_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ApplicationInsightsStandardWebTest +func (mg *ApplicationInsightsStandardWebTest) GetTerraformResourceType() string { + return "azurerm_application_insights_standard_web_test" +} + +// GetConnectionDetailsMapping for this ApplicationInsightsStandardWebTest +func (tr *ApplicationInsightsStandardWebTest) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ApplicationInsightsStandardWebTest +func (tr *ApplicationInsightsStandardWebTest) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ApplicationInsightsStandardWebTest +func (tr *ApplicationInsightsStandardWebTest) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ApplicationInsightsStandardWebTest +func (tr *ApplicationInsightsStandardWebTest) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ApplicationInsightsStandardWebTest +func (tr *ApplicationInsightsStandardWebTest) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ApplicationInsightsStandardWebTest +func (tr *ApplicationInsightsStandardWebTest) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ApplicationInsightsStandardWebTest +func (tr *ApplicationInsightsStandardWebTest) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ApplicationInsightsStandardWebTest +func (tr *ApplicationInsightsStandardWebTest) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ApplicationInsightsStandardWebTest using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ApplicationInsightsStandardWebTest) LateInitialize(attrs []byte) (bool, error) { + params := &ApplicationInsightsStandardWebTestParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ApplicationInsightsStandardWebTest) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/insights/v1beta2/zz_applicationinsightsstandardwebtest_types.go b/apis/insights/v1beta2/zz_applicationinsightsstandardwebtest_types.go new file mode 100755 index 000000000..b2ae87871 --- /dev/null +++ b/apis/insights/v1beta2/zz_applicationinsightsstandardwebtest_types.go @@ -0,0 +1,427 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationInsightsStandardWebTestInitParameters struct { + + // The ID of the Application Insights instance on which the WebTest operates. Changing this forces a new Application Insights Standard WebTest to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ApplicationInsightsID *string `json:"applicationInsightsId,omitempty" tf:"application_insights_id,omitempty"` + + // Reference to a ApplicationInsights in insights to populate applicationInsightsId. + // +kubebuilder:validation:Optional + ApplicationInsightsIDRef *v1.Reference `json:"applicationInsightsIdRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate applicationInsightsId. + // +kubebuilder:validation:Optional + ApplicationInsightsIDSelector *v1.Selector `json:"applicationInsightsIdSelector,omitempty" tf:"-"` + + // Purpose/user defined descriptive test for this WebTest. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the WebTest be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Interval in seconds between test runs for this WebTest. Valid options are 300, 600 and 900. Defaults to 300. + Frequency *float64 `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies a list of where to physically run the tests from to give global coverage for accessibility of your application. + GeoLocations []*string `json:"geoLocations,omitempty" tf:"geo_locations,omitempty"` + + // The Azure Region where the Application Insights Standard WebTest should exist. Changing this forces a new Application Insights Standard WebTest to be created. It needs to correlate with location of the parent resource (azurerm_application_insights) + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A request block as defined below. + Request *RequestInitParameters `json:"request,omitempty" tf:"request,omitempty"` + + // Should the retry on WebTest failure be enabled? + RetryEnabled *bool `json:"retryEnabled,omitempty" tf:"retry_enabled,omitempty"` + + // A mapping of tags which should be assigned to the Application Insights Standard WebTest. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Seconds until this WebTest will timeout and fail. Default is 30. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // A validation_rules block as defined below. + ValidationRules *ValidationRulesInitParameters `json:"validationRules,omitempty" tf:"validation_rules,omitempty"` +} + +type ApplicationInsightsStandardWebTestObservation struct { + + // The ID of the Application Insights instance on which the WebTest operates. Changing this forces a new Application Insights Standard WebTest to be created. + ApplicationInsightsID *string `json:"applicationInsightsId,omitempty" tf:"application_insights_id,omitempty"` + + // Purpose/user defined descriptive test for this WebTest. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the WebTest be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Interval in seconds between test runs for this WebTest. Valid options are 300, 600 and 900. Defaults to 300. + Frequency *float64 `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies a list of where to physically run the tests from to give global coverage for accessibility of your application. + GeoLocations []*string `json:"geoLocations,omitempty" tf:"geo_locations,omitempty"` + + // The ID of the Application Insights Standard WebTest. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region where the Application Insights Standard WebTest should exist. Changing this forces a new Application Insights Standard WebTest to be created. It needs to correlate with location of the parent resource (azurerm_application_insights) + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A request block as defined below. + Request *RequestObservation `json:"request,omitempty" tf:"request,omitempty"` + + // The name of the Resource Group where the Application Insights Standard WebTest should exist. Changing this forces a new Application Insights Standard WebTest to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Should the retry on WebTest failure be enabled? + RetryEnabled *bool `json:"retryEnabled,omitempty" tf:"retry_enabled,omitempty"` + + // Unique ID of this WebTest. This is typically the same value as the Name field. + SyntheticMonitorID *string `json:"syntheticMonitorId,omitempty" tf:"synthetic_monitor_id,omitempty"` + + // A mapping of tags which should be assigned to the Application Insights Standard WebTest. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Seconds until this WebTest will timeout and fail. Default is 30. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // A validation_rules block as defined below. + ValidationRules *ValidationRulesObservation `json:"validationRules,omitempty" tf:"validation_rules,omitempty"` +} + +type ApplicationInsightsStandardWebTestParameters struct { + + // The ID of the Application Insights instance on which the WebTest operates. Changing this forces a new Application Insights Standard WebTest to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ApplicationInsightsID *string `json:"applicationInsightsId,omitempty" tf:"application_insights_id,omitempty"` + + // Reference to a ApplicationInsights in insights to populate applicationInsightsId. + // +kubebuilder:validation:Optional + ApplicationInsightsIDRef *v1.Reference `json:"applicationInsightsIdRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate applicationInsightsId. + // +kubebuilder:validation:Optional + ApplicationInsightsIDSelector *v1.Selector `json:"applicationInsightsIdSelector,omitempty" tf:"-"` + + // Purpose/user defined descriptive test for this WebTest. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should the WebTest be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Interval in seconds between test runs for this WebTest. Valid options are 300, 600 and 900. Defaults to 300. + // +kubebuilder:validation:Optional + Frequency *float64 `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies a list of where to physically run the tests from to give global coverage for accessibility of your application. + // +kubebuilder:validation:Optional + GeoLocations []*string `json:"geoLocations,omitempty" tf:"geo_locations,omitempty"` + + // The Azure Region where the Application Insights Standard WebTest should exist. Changing this forces a new Application Insights Standard WebTest to be created. It needs to correlate with location of the parent resource (azurerm_application_insights) + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A request block as defined below. + // +kubebuilder:validation:Optional + Request *RequestParameters `json:"request,omitempty" tf:"request,omitempty"` + + // The name of the Resource Group where the Application Insights Standard WebTest should exist. Changing this forces a new Application Insights Standard WebTest to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Should the retry on WebTest failure be enabled? + // +kubebuilder:validation:Optional + RetryEnabled *bool `json:"retryEnabled,omitempty" tf:"retry_enabled,omitempty"` + + // A mapping of tags which should be assigned to the Application Insights Standard WebTest. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Seconds until this WebTest will timeout and fail. Default is 30. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // A validation_rules block as defined below. + // +kubebuilder:validation:Optional + ValidationRules *ValidationRulesParameters `json:"validationRules,omitempty" tf:"validation_rules,omitempty"` +} + +type ContentInitParameters struct { + + // A string value containing the content to match on. + ContentMatch *string `json:"contentMatch,omitempty" tf:"content_match,omitempty"` + + // Ignore the casing in the content_match value. + IgnoreCase *bool `json:"ignoreCase,omitempty" tf:"ignore_case,omitempty"` + + // If the content of content_match is found, pass the test. If set to false, the WebTest is failing if the content of content_match is found. + PassIfTextFound *bool `json:"passIfTextFound,omitempty" tf:"pass_if_text_found,omitempty"` +} + +type ContentObservation struct { + + // A string value containing the content to match on. + ContentMatch *string `json:"contentMatch,omitempty" tf:"content_match,omitempty"` + + // Ignore the casing in the content_match value. + IgnoreCase *bool `json:"ignoreCase,omitempty" tf:"ignore_case,omitempty"` + + // If the content of content_match is found, pass the test. If set to false, the WebTest is failing if the content of content_match is found. + PassIfTextFound *bool `json:"passIfTextFound,omitempty" tf:"pass_if_text_found,omitempty"` +} + +type ContentParameters struct { + + // A string value containing the content to match on. + // +kubebuilder:validation:Optional + ContentMatch *string `json:"contentMatch" tf:"content_match,omitempty"` + + // Ignore the casing in the content_match value. + // +kubebuilder:validation:Optional + IgnoreCase *bool `json:"ignoreCase,omitempty" tf:"ignore_case,omitempty"` + + // If the content of content_match is found, pass the test. If set to false, the WebTest is failing if the content of content_match is found. + // +kubebuilder:validation:Optional + PassIfTextFound *bool `json:"passIfTextFound,omitempty" tf:"pass_if_text_found,omitempty"` +} + +type HeaderInitParameters struct { + + // The name which should be used for this Application Insights Standard WebTest. Changing this forces a new Application Insights Standard WebTest to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value which should be used for a header in the request. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderObservation struct { + + // The name which should be used for this Application Insights Standard WebTest. Changing this forces a new Application Insights Standard WebTest to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value which should be used for a header in the request. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HeaderParameters struct { + + // The name which should be used for this Application Insights Standard WebTest. Changing this forces a new Application Insights Standard WebTest to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value which should be used for a header in the request. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type RequestInitParameters struct { + + // The WebTest request body. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Should the following of redirects be enabled? Defaults to true. + FollowRedirectsEnabled *bool `json:"followRedirectsEnabled,omitempty" tf:"follow_redirects_enabled,omitempty"` + + // Which HTTP verb to use for the call. Options are 'GET', 'POST', 'PUT', 'PATCH', and 'DELETE'. Defaults to GET. + HTTPVerb *string `json:"httpVerb,omitempty" tf:"http_verb,omitempty"` + + // One or more header blocks as defined above. + Header []HeaderInitParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Should the parsing of dependend requests be enabled? Defaults to true. + ParseDependentRequestsEnabled *bool `json:"parseDependentRequestsEnabled,omitempty" tf:"parse_dependent_requests_enabled,omitempty"` + + // The WebTest request URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type RequestObservation struct { + + // The WebTest request body. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Should the following of redirects be enabled? Defaults to true. + FollowRedirectsEnabled *bool `json:"followRedirectsEnabled,omitempty" tf:"follow_redirects_enabled,omitempty"` + + // Which HTTP verb to use for the call. Options are 'GET', 'POST', 'PUT', 'PATCH', and 'DELETE'. Defaults to GET. + HTTPVerb *string `json:"httpVerb,omitempty" tf:"http_verb,omitempty"` + + // One or more header blocks as defined above. + Header []HeaderObservation `json:"header,omitempty" tf:"header,omitempty"` + + // Should the parsing of dependend requests be enabled? Defaults to true. + ParseDependentRequestsEnabled *bool `json:"parseDependentRequestsEnabled,omitempty" tf:"parse_dependent_requests_enabled,omitempty"` + + // The WebTest request URL. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type RequestParameters struct { + + // The WebTest request body. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // Should the following of redirects be enabled? Defaults to true. + // +kubebuilder:validation:Optional + FollowRedirectsEnabled *bool `json:"followRedirectsEnabled,omitempty" tf:"follow_redirects_enabled,omitempty"` + + // Which HTTP verb to use for the call. Options are 'GET', 'POST', 'PUT', 'PATCH', and 'DELETE'. Defaults to GET. + // +kubebuilder:validation:Optional + HTTPVerb *string `json:"httpVerb,omitempty" tf:"http_verb,omitempty"` + + // One or more header blocks as defined above. + // +kubebuilder:validation:Optional + Header []HeaderParameters `json:"header,omitempty" tf:"header,omitempty"` + + // Should the parsing of dependend requests be enabled? Defaults to true. + // +kubebuilder:validation:Optional + ParseDependentRequestsEnabled *bool `json:"parseDependentRequestsEnabled,omitempty" tf:"parse_dependent_requests_enabled,omitempty"` + + // The WebTest request URL. + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type ValidationRulesInitParameters struct { + + // A content block as defined above. + Content *ContentInitParameters `json:"content,omitempty" tf:"content,omitempty"` + + // The expected status code of the response. Default is '200', '0' means 'response code < 400' + ExpectedStatusCode *float64 `json:"expectedStatusCode,omitempty" tf:"expected_status_code,omitempty"` + + // The number of days of SSL certificate validity remaining for the checked endpoint. If the certificate has a shorter remaining lifetime left, the test will fail. This number should be between 1 and 365. + SSLCertRemainingLifetime *float64 `json:"sslCertRemainingLifetime,omitempty" tf:"ssl_cert_remaining_lifetime,omitempty"` + + // Should the SSL check be enabled? + SSLCheckEnabled *bool `json:"sslCheckEnabled,omitempty" tf:"ssl_check_enabled,omitempty"` +} + +type ValidationRulesObservation struct { + + // A content block as defined above. + Content *ContentObservation `json:"content,omitempty" tf:"content,omitempty"` + + // The expected status code of the response. Default is '200', '0' means 'response code < 400' + ExpectedStatusCode *float64 `json:"expectedStatusCode,omitempty" tf:"expected_status_code,omitempty"` + + // The number of days of SSL certificate validity remaining for the checked endpoint. If the certificate has a shorter remaining lifetime left, the test will fail. This number should be between 1 and 365. + SSLCertRemainingLifetime *float64 `json:"sslCertRemainingLifetime,omitempty" tf:"ssl_cert_remaining_lifetime,omitempty"` + + // Should the SSL check be enabled? + SSLCheckEnabled *bool `json:"sslCheckEnabled,omitempty" tf:"ssl_check_enabled,omitempty"` +} + +type ValidationRulesParameters struct { + + // A content block as defined above. + // +kubebuilder:validation:Optional + Content *ContentParameters `json:"content,omitempty" tf:"content,omitempty"` + + // The expected status code of the response. Default is '200', '0' means 'response code < 400' + // +kubebuilder:validation:Optional + ExpectedStatusCode *float64 `json:"expectedStatusCode,omitempty" tf:"expected_status_code,omitempty"` + + // The number of days of SSL certificate validity remaining for the checked endpoint. If the certificate has a shorter remaining lifetime left, the test will fail. This number should be between 1 and 365. + // +kubebuilder:validation:Optional + SSLCertRemainingLifetime *float64 `json:"sslCertRemainingLifetime,omitempty" tf:"ssl_cert_remaining_lifetime,omitempty"` + + // Should the SSL check be enabled? + // +kubebuilder:validation:Optional + SSLCheckEnabled *bool `json:"sslCheckEnabled,omitempty" tf:"ssl_check_enabled,omitempty"` +} + +// ApplicationInsightsStandardWebTestSpec defines the desired state of ApplicationInsightsStandardWebTest +type ApplicationInsightsStandardWebTestSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ApplicationInsightsStandardWebTestParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ApplicationInsightsStandardWebTestInitParameters `json:"initProvider,omitempty"` +} + +// ApplicationInsightsStandardWebTestStatus defines the observed state of ApplicationInsightsStandardWebTest. +type ApplicationInsightsStandardWebTestStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ApplicationInsightsStandardWebTestObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ApplicationInsightsStandardWebTest is the Schema for the ApplicationInsightsStandardWebTests API. Manages a Application Insights Standard WebTest. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ApplicationInsightsStandardWebTest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.geoLocations) || (has(self.initProvider) && has(self.initProvider.geoLocations))",message="spec.forProvider.geoLocations is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.request) || (has(self.initProvider) && has(self.initProvider.request))",message="spec.forProvider.request is a required parameter" + Spec ApplicationInsightsStandardWebTestSpec `json:"spec"` + Status ApplicationInsightsStandardWebTestStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ApplicationInsightsStandardWebTestList contains a list of ApplicationInsightsStandardWebTests +type ApplicationInsightsStandardWebTestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ApplicationInsightsStandardWebTest `json:"items"` +} + +// Repository type metadata. +var ( + ApplicationInsightsStandardWebTest_Kind = "ApplicationInsightsStandardWebTest" + ApplicationInsightsStandardWebTest_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ApplicationInsightsStandardWebTest_Kind}.String() + ApplicationInsightsStandardWebTest_KindAPIVersion = ApplicationInsightsStandardWebTest_Kind + "." + CRDGroupVersion.String() + ApplicationInsightsStandardWebTest_GroupVersionKind = CRDGroupVersion.WithKind(ApplicationInsightsStandardWebTest_Kind) +) + +func init() { + SchemeBuilder.Register(&ApplicationInsightsStandardWebTest{}, &ApplicationInsightsStandardWebTestList{}) +} diff --git a/apis/insights/v1beta2/zz_applicationinsightsworkbook_terraformed.go b/apis/insights/v1beta2/zz_applicationinsightsworkbook_terraformed.go new file mode 100755 index 000000000..e6108bc8d --- /dev/null +++ b/apis/insights/v1beta2/zz_applicationinsightsworkbook_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ApplicationInsightsWorkbook +func (mg *ApplicationInsightsWorkbook) GetTerraformResourceType() string { + return "azurerm_application_insights_workbook" +} + +// GetConnectionDetailsMapping for this ApplicationInsightsWorkbook +func (tr *ApplicationInsightsWorkbook) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ApplicationInsightsWorkbook +func (tr *ApplicationInsightsWorkbook) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ApplicationInsightsWorkbook +func (tr *ApplicationInsightsWorkbook) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ApplicationInsightsWorkbook +func (tr *ApplicationInsightsWorkbook) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ApplicationInsightsWorkbook +func (tr *ApplicationInsightsWorkbook) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ApplicationInsightsWorkbook +func (tr *ApplicationInsightsWorkbook) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ApplicationInsightsWorkbook +func (tr *ApplicationInsightsWorkbook) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ApplicationInsightsWorkbook +func (tr *ApplicationInsightsWorkbook) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ApplicationInsightsWorkbook using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ApplicationInsightsWorkbook) LateInitialize(attrs []byte) (bool, error) { + params := &ApplicationInsightsWorkbookParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ApplicationInsightsWorkbook) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/insights/v1beta2/zz_applicationinsightsworkbook_types.go b/apis/insights/v1beta2/zz_applicationinsightsworkbook_types.go new file mode 100755 index 000000000..c7e81c3b8 --- /dev/null +++ b/apis/insights/v1beta2/zz_applicationinsightsworkbook_types.go @@ -0,0 +1,259 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationInsightsWorkbookInitParameters struct { + + // Workbook category, as defined by the user at creation time. There may be additional category types beyond the following: workbook, sentinel. Defaults to workbook. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // Configuration of this particular workbook. Configuration data is a string containing valid JSON. + DataJSON *string `json:"dataJson,omitempty" tf:"data_json,omitempty"` + + // Specifies the description of the workbook. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the user-defined name (display name) of the workbook. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // An identity block as defined below. Changing this forces a new Workbook to be created. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Workbook should exist. Changing this forces a new Workbook to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of this Workbook as a UUID/GUID. It should not contain any uppercase letters. Changing this forces a new Workbook to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the Resource Group where the Workbook should exist. Changing this forces a new Workbook to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Resource ID for a source resource. It should not contain any uppercase letters. Defaults to azure monitor. + SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` + + // Specifies the Resource Manager ID of the Storage Container when bring your own storage is used. Changing this forces a new Workbook to be created. + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // A mapping of tags which should be assigned to the Workbook. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ApplicationInsightsWorkbookObservation struct { + + // Workbook category, as defined by the user at creation time. There may be additional category types beyond the following: workbook, sentinel. Defaults to workbook. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // Configuration of this particular workbook. Configuration data is a string containing valid JSON. + DataJSON *string `json:"dataJson,omitempty" tf:"data_json,omitempty"` + + // Specifies the description of the workbook. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the user-defined name (display name) of the workbook. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The ID of the Workbook. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Changing this forces a new Workbook to be created. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Workbook should exist. Changing this forces a new Workbook to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of this Workbook as a UUID/GUID. It should not contain any uppercase letters. Changing this forces a new Workbook to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the Resource Group where the Workbook should exist. Changing this forces a new Workbook to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Resource ID for a source resource. It should not contain any uppercase letters. Defaults to azure monitor. + SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` + + // Specifies the Resource Manager ID of the Storage Container when bring your own storage is used. Changing this forces a new Workbook to be created. + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // A mapping of tags which should be assigned to the Workbook. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ApplicationInsightsWorkbookParameters struct { + + // Workbook category, as defined by the user at creation time. There may be additional category types beyond the following: workbook, sentinel. Defaults to workbook. + // +kubebuilder:validation:Optional + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // Configuration of this particular workbook. Configuration data is a string containing valid JSON. + // +kubebuilder:validation:Optional + DataJSON *string `json:"dataJson,omitempty" tf:"data_json,omitempty"` + + // Specifies the description of the workbook. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the user-defined name (display name) of the workbook. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // An identity block as defined below. Changing this forces a new Workbook to be created. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Workbook should exist. Changing this forces a new Workbook to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of this Workbook as a UUID/GUID. It should not contain any uppercase letters. Changing this forces a new Workbook to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the name of the Resource Group where the Workbook should exist. Changing this forces a new Workbook to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Resource ID for a source resource. It should not contain any uppercase letters. Defaults to azure monitor. + // +kubebuilder:validation:Optional + SourceID *string `json:"sourceId,omitempty" tf:"source_id,omitempty"` + + // Specifies the Resource Manager ID of the Storage Container when bring your own storage is used. Changing this forces a new Workbook to be created. + // +kubebuilder:validation:Optional + StorageContainerID *string `json:"storageContainerId,omitempty" tf:"storage_container_id,omitempty"` + + // A mapping of tags which should be assigned to the Workbook. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // The list of User Assigned Managed Identity IDs assigned to this Workbook. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of Managed Service Identity that is configured on this Workbook. Possible values are UserAssigned, SystemAssigned and SystemAssigned, UserAssigned. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // The list of User Assigned Managed Identity IDs assigned to this Workbook. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID of the System Assigned Managed Service Identity that is configured on this Workbook. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID of the System Assigned Managed Service Identity that is configured on this Workbook. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The type of Managed Service Identity that is configured on this Workbook. Possible values are UserAssigned, SystemAssigned and SystemAssigned, UserAssigned. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // The list of User Assigned Managed Identity IDs assigned to this Workbook. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The type of Managed Service Identity that is configured on this Workbook. Possible values are UserAssigned, SystemAssigned and SystemAssigned, UserAssigned. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// ApplicationInsightsWorkbookSpec defines the desired state of ApplicationInsightsWorkbook +type ApplicationInsightsWorkbookSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ApplicationInsightsWorkbookParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ApplicationInsightsWorkbookInitParameters `json:"initProvider,omitempty"` +} + +// ApplicationInsightsWorkbookStatus defines the observed state of ApplicationInsightsWorkbook. +type ApplicationInsightsWorkbookStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ApplicationInsightsWorkbookObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ApplicationInsightsWorkbook is the Schema for the ApplicationInsightsWorkbooks API. Manages an Azure Workbook. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ApplicationInsightsWorkbook struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dataJson) || (has(self.initProvider) && has(self.initProvider.dataJson))",message="spec.forProvider.dataJson is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.displayName) || (has(self.initProvider) && has(self.initProvider.displayName))",message="spec.forProvider.displayName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec ApplicationInsightsWorkbookSpec `json:"spec"` + Status ApplicationInsightsWorkbookStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ApplicationInsightsWorkbookList contains a list of ApplicationInsightsWorkbooks +type ApplicationInsightsWorkbookList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ApplicationInsightsWorkbook `json:"items"` +} + +// Repository type metadata. +var ( + ApplicationInsightsWorkbook_Kind = "ApplicationInsightsWorkbook" + ApplicationInsightsWorkbook_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ApplicationInsightsWorkbook_Kind}.String() + ApplicationInsightsWorkbook_KindAPIVersion = ApplicationInsightsWorkbook_Kind + "." + CRDGroupVersion.String() + ApplicationInsightsWorkbook_GroupVersionKind = CRDGroupVersion.WithKind(ApplicationInsightsWorkbook_Kind) +) + +func init() { + SchemeBuilder.Register(&ApplicationInsightsWorkbook{}, &ApplicationInsightsWorkbookList{}) +} diff --git a/apis/insights/v1beta2/zz_generated.conversion_hubs.go b/apis/insights/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..9dd471943 --- /dev/null +++ b/apis/insights/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ApplicationInsightsStandardWebTest) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ApplicationInsightsWorkbook) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorActionGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorActivityLogAlert) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorAutoscaleSetting) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorDataCollectionRule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorDiagnosticSetting) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorMetricAlert) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorScheduledQueryRulesAlert) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorScheduledQueryRulesAlertV2) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MonitorScheduledQueryRulesLog) Hub() {} diff --git a/apis/insights/v1beta2/zz_generated.deepcopy.go b/apis/insights/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..df566d9f0 --- /dev/null +++ b/apis/insights/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,14045 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AADAuthInitParameters) DeepCopyInto(out *AADAuthInitParameters) { + *out = *in + if in.IdentifierURI != nil { + in, out := &in.IdentifierURI, &out.IdentifierURI + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AADAuthInitParameters. +func (in *AADAuthInitParameters) DeepCopy() *AADAuthInitParameters { + if in == nil { + return nil + } + out := new(AADAuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AADAuthObservation) DeepCopyInto(out *AADAuthObservation) { + *out = *in + if in.IdentifierURI != nil { + in, out := &in.IdentifierURI, &out.IdentifierURI + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AADAuthObservation. +func (in *AADAuthObservation) DeepCopy() *AADAuthObservation { + if in == nil { + return nil + } + out := new(AADAuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AADAuthParameters) DeepCopyInto(out *AADAuthParameters) { + *out = *in + if in.IdentifierURI != nil { + in, out := &in.IdentifierURI, &out.IdentifierURI + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AADAuthParameters. +func (in *AADAuthParameters) DeepCopy() *AADAuthParameters { + if in == nil { + return nil + } + out := new(AADAuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.ActionGroupIDRef != nil { + in, out := &in.ActionGroupIDRef, &out.ActionGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ActionGroupIDSelector != nil { + in, out := &in.ActionGroupIDSelector, &out.ActionGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebhookProperties != nil { + in, out := &in.WebhookProperties, &out.WebhookProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.WebhookProperties != nil { + in, out := &in.WebhookProperties, &out.WebhookProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.ActionGroupIDRef != nil { + in, out := &in.ActionGroupIDRef, &out.ActionGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ActionGroupIDSelector != nil { + in, out := &in.ActionGroupIDSelector, &out.ActionGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebhookProperties != nil { + in, out := &in.WebhookProperties, &out.WebhookProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsStandardWebTest) DeepCopyInto(out *ApplicationInsightsStandardWebTest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsStandardWebTest. +func (in *ApplicationInsightsStandardWebTest) DeepCopy() *ApplicationInsightsStandardWebTest { + if in == nil { + return nil + } + out := new(ApplicationInsightsStandardWebTest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationInsightsStandardWebTest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsStandardWebTestInitParameters) DeepCopyInto(out *ApplicationInsightsStandardWebTestInitParameters) { + *out = *in + if in.ApplicationInsightsID != nil { + in, out := &in.ApplicationInsightsID, &out.ApplicationInsightsID + *out = new(string) + **out = **in + } + if in.ApplicationInsightsIDRef != nil { + in, out := &in.ApplicationInsightsIDRef, &out.ApplicationInsightsIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsightsIDSelector != nil { + in, out := &in.ApplicationInsightsIDSelector, &out.ApplicationInsightsIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(float64) + **out = **in + } + if in.GeoLocations != nil { + in, out := &in.GeoLocations, &out.GeoLocations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(RequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetryEnabled != nil { + in, out := &in.RetryEnabled, &out.RetryEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.ValidationRules != nil { + in, out := &in.ValidationRules, &out.ValidationRules + *out = new(ValidationRulesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsStandardWebTestInitParameters. +func (in *ApplicationInsightsStandardWebTestInitParameters) DeepCopy() *ApplicationInsightsStandardWebTestInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInsightsStandardWebTestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsStandardWebTestList) DeepCopyInto(out *ApplicationInsightsStandardWebTestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ApplicationInsightsStandardWebTest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsStandardWebTestList. +func (in *ApplicationInsightsStandardWebTestList) DeepCopy() *ApplicationInsightsStandardWebTestList { + if in == nil { + return nil + } + out := new(ApplicationInsightsStandardWebTestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationInsightsStandardWebTestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsStandardWebTestObservation) DeepCopyInto(out *ApplicationInsightsStandardWebTestObservation) { + *out = *in + if in.ApplicationInsightsID != nil { + in, out := &in.ApplicationInsightsID, &out.ApplicationInsightsID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(float64) + **out = **in + } + if in.GeoLocations != nil { + in, out := &in.GeoLocations, &out.GeoLocations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(RequestObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetryEnabled != nil { + in, out := &in.RetryEnabled, &out.RetryEnabled + *out = new(bool) + **out = **in + } + if in.SyntheticMonitorID != nil { + in, out := &in.SyntheticMonitorID, &out.SyntheticMonitorID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.ValidationRules != nil { + in, out := &in.ValidationRules, &out.ValidationRules + *out = new(ValidationRulesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsStandardWebTestObservation. +func (in *ApplicationInsightsStandardWebTestObservation) DeepCopy() *ApplicationInsightsStandardWebTestObservation { + if in == nil { + return nil + } + out := new(ApplicationInsightsStandardWebTestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsStandardWebTestParameters) DeepCopyInto(out *ApplicationInsightsStandardWebTestParameters) { + *out = *in + if in.ApplicationInsightsID != nil { + in, out := &in.ApplicationInsightsID, &out.ApplicationInsightsID + *out = new(string) + **out = **in + } + if in.ApplicationInsightsIDRef != nil { + in, out := &in.ApplicationInsightsIDRef, &out.ApplicationInsightsIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsightsIDSelector != nil { + in, out := &in.ApplicationInsightsIDSelector, &out.ApplicationInsightsIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(float64) + **out = **in + } + if in.GeoLocations != nil { + in, out := &in.GeoLocations, &out.GeoLocations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(RequestParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetryEnabled != nil { + in, out := &in.RetryEnabled, &out.RetryEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.ValidationRules != nil { + in, out := &in.ValidationRules, &out.ValidationRules + *out = new(ValidationRulesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsStandardWebTestParameters. +func (in *ApplicationInsightsStandardWebTestParameters) DeepCopy() *ApplicationInsightsStandardWebTestParameters { + if in == nil { + return nil + } + out := new(ApplicationInsightsStandardWebTestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsStandardWebTestSpec) DeepCopyInto(out *ApplicationInsightsStandardWebTestSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsStandardWebTestSpec. +func (in *ApplicationInsightsStandardWebTestSpec) DeepCopy() *ApplicationInsightsStandardWebTestSpec { + if in == nil { + return nil + } + out := new(ApplicationInsightsStandardWebTestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsStandardWebTestStatus) DeepCopyInto(out *ApplicationInsightsStandardWebTestStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsStandardWebTestStatus. +func (in *ApplicationInsightsStandardWebTestStatus) DeepCopy() *ApplicationInsightsStandardWebTestStatus { + if in == nil { + return nil + } + out := new(ApplicationInsightsStandardWebTestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters) DeepCopyInto(out *ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters) { + *out = *in + if in.ComponentID != nil { + in, out := &in.ComponentID, &out.ComponentID + *out = new(string) + **out = **in + } + if in.FailedLocationCount != nil { + in, out := &in.FailedLocationCount, &out.FailedLocationCount + *out = new(float64) + **out = **in + } + if in.WebTestID != nil { + in, out := &in.WebTestID, &out.WebTestID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters. +func (in *ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters) DeepCopy() *ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation) DeepCopyInto(out *ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation) { + *out = *in + if in.ComponentID != nil { + in, out := &in.ComponentID, &out.ComponentID + *out = new(string) + **out = **in + } + if in.FailedLocationCount != nil { + in, out := &in.FailedLocationCount, &out.FailedLocationCount + *out = new(float64) + **out = **in + } + if in.WebTestID != nil { + in, out := &in.WebTestID, &out.WebTestID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation. +func (in *ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation) DeepCopy() *ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation { + if in == nil { + return nil + } + out := new(ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters) DeepCopyInto(out *ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters) { + *out = *in + if in.ComponentID != nil { + in, out := &in.ComponentID, &out.ComponentID + *out = new(string) + **out = **in + } + if in.FailedLocationCount != nil { + in, out := &in.FailedLocationCount, &out.FailedLocationCount + *out = new(float64) + **out = **in + } + if in.WebTestID != nil { + in, out := &in.WebTestID, &out.WebTestID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters. +func (in *ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters) DeepCopy() *ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters { + if in == nil { + return nil + } + out := new(ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWorkbook) DeepCopyInto(out *ApplicationInsightsWorkbook) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWorkbook. +func (in *ApplicationInsightsWorkbook) DeepCopy() *ApplicationInsightsWorkbook { + if in == nil { + return nil + } + out := new(ApplicationInsightsWorkbook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationInsightsWorkbook) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWorkbookInitParameters) DeepCopyInto(out *ApplicationInsightsWorkbookInitParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.DataJSON != nil { + in, out := &in.DataJSON, &out.DataJSON + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceID != nil { + in, out := &in.SourceID, &out.SourceID + *out = new(string) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWorkbookInitParameters. +func (in *ApplicationInsightsWorkbookInitParameters) DeepCopy() *ApplicationInsightsWorkbookInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInsightsWorkbookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWorkbookList) DeepCopyInto(out *ApplicationInsightsWorkbookList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ApplicationInsightsWorkbook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWorkbookList. +func (in *ApplicationInsightsWorkbookList) DeepCopy() *ApplicationInsightsWorkbookList { + if in == nil { + return nil + } + out := new(ApplicationInsightsWorkbookList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationInsightsWorkbookList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWorkbookObservation) DeepCopyInto(out *ApplicationInsightsWorkbookObservation) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.DataJSON != nil { + in, out := &in.DataJSON, &out.DataJSON + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SourceID != nil { + in, out := &in.SourceID, &out.SourceID + *out = new(string) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWorkbookObservation. +func (in *ApplicationInsightsWorkbookObservation) DeepCopy() *ApplicationInsightsWorkbookObservation { + if in == nil { + return nil + } + out := new(ApplicationInsightsWorkbookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWorkbookParameters) DeepCopyInto(out *ApplicationInsightsWorkbookParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.DataJSON != nil { + in, out := &in.DataJSON, &out.DataJSON + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SourceID != nil { + in, out := &in.SourceID, &out.SourceID + *out = new(string) + **out = **in + } + if in.StorageContainerID != nil { + in, out := &in.StorageContainerID, &out.StorageContainerID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWorkbookParameters. +func (in *ApplicationInsightsWorkbookParameters) DeepCopy() *ApplicationInsightsWorkbookParameters { + if in == nil { + return nil + } + out := new(ApplicationInsightsWorkbookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWorkbookSpec) DeepCopyInto(out *ApplicationInsightsWorkbookSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWorkbookSpec. +func (in *ApplicationInsightsWorkbookSpec) DeepCopy() *ApplicationInsightsWorkbookSpec { + if in == nil { + return nil + } + out := new(ApplicationInsightsWorkbookSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInsightsWorkbookStatus) DeepCopyInto(out *ApplicationInsightsWorkbookStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInsightsWorkbookStatus. +func (in *ApplicationInsightsWorkbookStatus) DeepCopy() *ApplicationInsightsWorkbookStatus { + if in == nil { + return nil + } + out := new(ApplicationInsightsWorkbookStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArmRoleReceiverInitParameters) DeepCopyInto(out *ArmRoleReceiverInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleID != nil { + in, out := &in.RoleID, &out.RoleID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArmRoleReceiverInitParameters. +func (in *ArmRoleReceiverInitParameters) DeepCopy() *ArmRoleReceiverInitParameters { + if in == nil { + return nil + } + out := new(ArmRoleReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArmRoleReceiverObservation) DeepCopyInto(out *ArmRoleReceiverObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleID != nil { + in, out := &in.RoleID, &out.RoleID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArmRoleReceiverObservation. +func (in *ArmRoleReceiverObservation) DeepCopy() *ArmRoleReceiverObservation { + if in == nil { + return nil + } + out := new(ArmRoleReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArmRoleReceiverParameters) DeepCopyInto(out *ArmRoleReceiverParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RoleID != nil { + in, out := &in.RoleID, &out.RoleID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArmRoleReceiverParameters. +func (in *ArmRoleReceiverParameters) DeepCopy() *ArmRoleReceiverParameters { + if in == nil { + return nil + } + out := new(ArmRoleReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomationRunBookReceiverInitParameters) DeepCopyInto(out *AutomationRunBookReceiverInitParameters) { + *out = *in + if in.AutomationAccountID != nil { + in, out := &in.AutomationAccountID, &out.AutomationAccountID + *out = new(string) + **out = **in + } + if in.IsGlobalRunBook != nil { + in, out := &in.IsGlobalRunBook, &out.IsGlobalRunBook + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RunBookName != nil { + in, out := &in.RunBookName, &out.RunBookName + *out = new(string) + **out = **in + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } + if in.WebhookResourceID != nil { + in, out := &in.WebhookResourceID, &out.WebhookResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationRunBookReceiverInitParameters. +func (in *AutomationRunBookReceiverInitParameters) DeepCopy() *AutomationRunBookReceiverInitParameters { + if in == nil { + return nil + } + out := new(AutomationRunBookReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomationRunBookReceiverObservation) DeepCopyInto(out *AutomationRunBookReceiverObservation) { + *out = *in + if in.AutomationAccountID != nil { + in, out := &in.AutomationAccountID, &out.AutomationAccountID + *out = new(string) + **out = **in + } + if in.IsGlobalRunBook != nil { + in, out := &in.IsGlobalRunBook, &out.IsGlobalRunBook + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RunBookName != nil { + in, out := &in.RunBookName, &out.RunBookName + *out = new(string) + **out = **in + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } + if in.WebhookResourceID != nil { + in, out := &in.WebhookResourceID, &out.WebhookResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationRunBookReceiverObservation. +func (in *AutomationRunBookReceiverObservation) DeepCopy() *AutomationRunBookReceiverObservation { + if in == nil { + return nil + } + out := new(AutomationRunBookReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomationRunBookReceiverParameters) DeepCopyInto(out *AutomationRunBookReceiverParameters) { + *out = *in + if in.AutomationAccountID != nil { + in, out := &in.AutomationAccountID, &out.AutomationAccountID + *out = new(string) + **out = **in + } + if in.IsGlobalRunBook != nil { + in, out := &in.IsGlobalRunBook, &out.IsGlobalRunBook + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RunBookName != nil { + in, out := &in.RunBookName, &out.RunBookName + *out = new(string) + **out = **in + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } + if in.WebhookResourceID != nil { + in, out := &in.WebhookResourceID, &out.WebhookResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationRunBookReceiverParameters. +func (in *AutomationRunBookReceiverParameters) DeepCopy() *AutomationRunBookReceiverParameters { + if in == nil { + return nil + } + out := new(AutomationRunBookReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureAppPushReceiverInitParameters) DeepCopyInto(out *AzureAppPushReceiverInitParameters) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureAppPushReceiverInitParameters. +func (in *AzureAppPushReceiverInitParameters) DeepCopy() *AzureAppPushReceiverInitParameters { + if in == nil { + return nil + } + out := new(AzureAppPushReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureAppPushReceiverObservation) DeepCopyInto(out *AzureAppPushReceiverObservation) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureAppPushReceiverObservation. +func (in *AzureAppPushReceiverObservation) DeepCopy() *AzureAppPushReceiverObservation { + if in == nil { + return nil + } + out := new(AzureAppPushReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureAppPushReceiverParameters) DeepCopyInto(out *AzureAppPushReceiverParameters) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureAppPushReceiverParameters. +func (in *AzureAppPushReceiverParameters) DeepCopy() *AzureAppPushReceiverParameters { + if in == nil { + return nil + } + out := new(AzureAppPushReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFunctionReceiverInitParameters) DeepCopyInto(out *AzureFunctionReceiverInitParameters) { + *out = *in + if in.FunctionAppResourceID != nil { + in, out := &in.FunctionAppResourceID, &out.FunctionAppResourceID + *out = new(string) + **out = **in + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.HTTPTriggerURL != nil { + in, out := &in.HTTPTriggerURL, &out.HTTPTriggerURL + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFunctionReceiverInitParameters. +func (in *AzureFunctionReceiverInitParameters) DeepCopy() *AzureFunctionReceiverInitParameters { + if in == nil { + return nil + } + out := new(AzureFunctionReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFunctionReceiverObservation) DeepCopyInto(out *AzureFunctionReceiverObservation) { + *out = *in + if in.FunctionAppResourceID != nil { + in, out := &in.FunctionAppResourceID, &out.FunctionAppResourceID + *out = new(string) + **out = **in + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.HTTPTriggerURL != nil { + in, out := &in.HTTPTriggerURL, &out.HTTPTriggerURL + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFunctionReceiverObservation. +func (in *AzureFunctionReceiverObservation) DeepCopy() *AzureFunctionReceiverObservation { + if in == nil { + return nil + } + out := new(AzureFunctionReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFunctionReceiverParameters) DeepCopyInto(out *AzureFunctionReceiverParameters) { + *out = *in + if in.FunctionAppResourceID != nil { + in, out := &in.FunctionAppResourceID, &out.FunctionAppResourceID + *out = new(string) + **out = **in + } + if in.FunctionName != nil { + in, out := &in.FunctionName, &out.FunctionName + *out = new(string) + **out = **in + } + if in.HTTPTriggerURL != nil { + in, out := &in.HTTPTriggerURL, &out.HTTPTriggerURL + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFunctionReceiverParameters. +func (in *AzureFunctionReceiverParameters) DeepCopy() *AzureFunctionReceiverParameters { + if in == nil { + return nil + } + out := new(AzureFunctionReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMonitorMetricsInitParameters) DeepCopyInto(out *AzureMonitorMetricsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMonitorMetricsInitParameters. +func (in *AzureMonitorMetricsInitParameters) DeepCopy() *AzureMonitorMetricsInitParameters { + if in == nil { + return nil + } + out := new(AzureMonitorMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMonitorMetricsObservation) DeepCopyInto(out *AzureMonitorMetricsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMonitorMetricsObservation. +func (in *AzureMonitorMetricsObservation) DeepCopy() *AzureMonitorMetricsObservation { + if in == nil { + return nil + } + out := new(AzureMonitorMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMonitorMetricsParameters) DeepCopyInto(out *AzureMonitorMetricsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMonitorMetricsParameters. +func (in *AzureMonitorMetricsParameters) DeepCopy() *AzureMonitorMetricsParameters { + if in == nil { + return nil + } + out := new(AzureMonitorMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityInitParameters) DeepCopyInto(out *CapacityInitParameters) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(float64) + **out = **in + } + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(float64) + **out = **in + } + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityInitParameters. +func (in *CapacityInitParameters) DeepCopy() *CapacityInitParameters { + if in == nil { + return nil + } + out := new(CapacityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityObservation) DeepCopyInto(out *CapacityObservation) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(float64) + **out = **in + } + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(float64) + **out = **in + } + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityObservation. +func (in *CapacityObservation) DeepCopy() *CapacityObservation { + if in == nil { + return nil + } + out := new(CapacityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CapacityParameters) DeepCopyInto(out *CapacityParameters) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(float64) + **out = **in + } + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(float64) + **out = **in + } + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityParameters. +func (in *CapacityParameters) DeepCopy() *CapacityParameters { + if in == nil { + return nil + } + out := new(CapacityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnInitParameters) DeepCopyInto(out *ColumnInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnInitParameters. +func (in *ColumnInitParameters) DeepCopy() *ColumnInitParameters { + if in == nil { + return nil + } + out := new(ColumnInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnObservation) DeepCopyInto(out *ColumnObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnObservation. +func (in *ColumnObservation) DeepCopy() *ColumnObservation { + if in == nil { + return nil + } + out := new(ColumnObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ColumnParameters) DeepCopyInto(out *ColumnParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ColumnParameters. +func (in *ColumnParameters) DeepCopy() *ColumnParameters { + if in == nil { + return nil + } + out := new(ColumnParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentInitParameters) DeepCopyInto(out *ContentInitParameters) { + *out = *in + if in.ContentMatch != nil { + in, out := &in.ContentMatch, &out.ContentMatch + *out = new(string) + **out = **in + } + if in.IgnoreCase != nil { + in, out := &in.IgnoreCase, &out.IgnoreCase + *out = new(bool) + **out = **in + } + if in.PassIfTextFound != nil { + in, out := &in.PassIfTextFound, &out.PassIfTextFound + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentInitParameters. +func (in *ContentInitParameters) DeepCopy() *ContentInitParameters { + if in == nil { + return nil + } + out := new(ContentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentObservation) DeepCopyInto(out *ContentObservation) { + *out = *in + if in.ContentMatch != nil { + in, out := &in.ContentMatch, &out.ContentMatch + *out = new(string) + **out = **in + } + if in.IgnoreCase != nil { + in, out := &in.IgnoreCase, &out.IgnoreCase + *out = new(bool) + **out = **in + } + if in.PassIfTextFound != nil { + in, out := &in.PassIfTextFound, &out.PassIfTextFound + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentObservation. +func (in *ContentObservation) DeepCopy() *ContentObservation { + if in == nil { + return nil + } + out := new(ContentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentParameters) DeepCopyInto(out *ContentParameters) { + *out = *in + if in.ContentMatch != nil { + in, out := &in.ContentMatch, &out.ContentMatch + *out = new(string) + **out = **in + } + if in.IgnoreCase != nil { + in, out := &in.IgnoreCase, &out.IgnoreCase + *out = new(bool) + **out = **in + } + if in.PassIfTextFound != nil { + in, out := &in.PassIfTextFound, &out.PassIfTextFound + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentParameters. +func (in *ContentParameters) DeepCopy() *ContentParameters { + if in == nil { + return nil + } + out := new(ContentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaDimensionInitParameters) DeepCopyInto(out *CriteriaDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaDimensionInitParameters. +func (in *CriteriaDimensionInitParameters) DeepCopy() *CriteriaDimensionInitParameters { + if in == nil { + return nil + } + out := new(CriteriaDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaDimensionObservation) DeepCopyInto(out *CriteriaDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaDimensionObservation. +func (in *CriteriaDimensionObservation) DeepCopy() *CriteriaDimensionObservation { + if in == nil { + return nil + } + out := new(CriteriaDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaDimensionParameters) DeepCopyInto(out *CriteriaDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaDimensionParameters. +func (in *CriteriaDimensionParameters) DeepCopy() *CriteriaDimensionParameters { + if in == nil { + return nil + } + out := new(CriteriaDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaInitParameters) DeepCopyInto(out *CriteriaInitParameters) { + *out = *in + if in.Caller != nil { + in, out := &in.Caller, &out.Caller + *out = new(string) + **out = **in + } + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OperationName != nil { + in, out := &in.OperationName, &out.OperationName + *out = new(string) + **out = **in + } + if in.RecommendationCategory != nil { + in, out := &in.RecommendationCategory, &out.RecommendationCategory + *out = new(string) + **out = **in + } + if in.RecommendationImpact != nil { + in, out := &in.RecommendationImpact, &out.RecommendationImpact + *out = new(string) + **out = **in + } + if in.RecommendationType != nil { + in, out := &in.RecommendationType, &out.RecommendationType + *out = new(string) + **out = **in + } + if in.ResourceGroup != nil { + in, out := &in.ResourceGroup, &out.ResourceGroup + *out = new(string) + **out = **in + } + if in.ResourceGroups != nil { + in, out := &in.ResourceGroups, &out.ResourceGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceHealth != nil { + in, out := &in.ResourceHealth, &out.ResourceHealth + *out = new(ResourceHealthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceProvider != nil { + in, out := &in.ResourceProvider, &out.ResourceProvider + *out = new(string) + **out = **in + } + if in.ResourceProviders != nil { + in, out := &in.ResourceProviders, &out.ResourceProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceHealth != nil { + in, out := &in.ServiceHealth, &out.ServiceHealth + *out = new(ServiceHealthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Statuses != nil { + in, out := &in.Statuses, &out.Statuses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(string) + **out = **in + } + if in.SubStatuses != nil { + in, out := &in.SubStatuses, &out.SubStatuses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaInitParameters. +func (in *CriteriaInitParameters) DeepCopy() *CriteriaInitParameters { + if in == nil { + return nil + } + out := new(CriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaObservation) DeepCopyInto(out *CriteriaObservation) { + *out = *in + if in.Caller != nil { + in, out := &in.Caller, &out.Caller + *out = new(string) + **out = **in + } + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OperationName != nil { + in, out := &in.OperationName, &out.OperationName + *out = new(string) + **out = **in + } + if in.RecommendationCategory != nil { + in, out := &in.RecommendationCategory, &out.RecommendationCategory + *out = new(string) + **out = **in + } + if in.RecommendationImpact != nil { + in, out := &in.RecommendationImpact, &out.RecommendationImpact + *out = new(string) + **out = **in + } + if in.RecommendationType != nil { + in, out := &in.RecommendationType, &out.RecommendationType + *out = new(string) + **out = **in + } + if in.ResourceGroup != nil { + in, out := &in.ResourceGroup, &out.ResourceGroup + *out = new(string) + **out = **in + } + if in.ResourceGroups != nil { + in, out := &in.ResourceGroups, &out.ResourceGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceHealth != nil { + in, out := &in.ResourceHealth, &out.ResourceHealth + *out = new(ResourceHealthObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceProvider != nil { + in, out := &in.ResourceProvider, &out.ResourceProvider + *out = new(string) + **out = **in + } + if in.ResourceProviders != nil { + in, out := &in.ResourceProviders, &out.ResourceProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceHealth != nil { + in, out := &in.ServiceHealth, &out.ServiceHealth + *out = new(ServiceHealthObservation) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Statuses != nil { + in, out := &in.Statuses, &out.Statuses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(string) + **out = **in + } + if in.SubStatuses != nil { + in, out := &in.SubStatuses, &out.SubStatuses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaObservation. +func (in *CriteriaObservation) DeepCopy() *CriteriaObservation { + if in == nil { + return nil + } + out := new(CriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CriteriaParameters) DeepCopyInto(out *CriteriaParameters) { + *out = *in + if in.Caller != nil { + in, out := &in.Caller, &out.Caller + *out = new(string) + **out = **in + } + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.Levels != nil { + in, out := &in.Levels, &out.Levels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OperationName != nil { + in, out := &in.OperationName, &out.OperationName + *out = new(string) + **out = **in + } + if in.RecommendationCategory != nil { + in, out := &in.RecommendationCategory, &out.RecommendationCategory + *out = new(string) + **out = **in + } + if in.RecommendationImpact != nil { + in, out := &in.RecommendationImpact, &out.RecommendationImpact + *out = new(string) + **out = **in + } + if in.RecommendationType != nil { + in, out := &in.RecommendationType, &out.RecommendationType + *out = new(string) + **out = **in + } + if in.ResourceGroup != nil { + in, out := &in.ResourceGroup, &out.ResourceGroup + *out = new(string) + **out = **in + } + if in.ResourceGroups != nil { + in, out := &in.ResourceGroups, &out.ResourceGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceHealth != nil { + in, out := &in.ResourceHealth, &out.ResourceHealth + *out = new(ResourceHealthParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceIDRef != nil { + in, out := &in.ResourceIDRef, &out.ResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceIDSelector != nil { + in, out := &in.ResourceIDSelector, &out.ResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceIds != nil { + in, out := &in.ResourceIds, &out.ResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceProvider != nil { + in, out := &in.ResourceProvider, &out.ResourceProvider + *out = new(string) + **out = **in + } + if in.ResourceProviders != nil { + in, out := &in.ResourceProviders, &out.ResourceProviders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceType != nil { + in, out := &in.ResourceType, &out.ResourceType + *out = new(string) + **out = **in + } + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceHealth != nil { + in, out := &in.ServiceHealth, &out.ServiceHealth + *out = new(ServiceHealthParameters) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.Statuses != nil { + in, out := &in.Statuses, &out.Statuses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(string) + **out = **in + } + if in.SubStatuses != nil { + in, out := &in.SubStatuses, &out.SubStatuses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CriteriaParameters. +func (in *CriteriaParameters) DeepCopy() *CriteriaParameters { + if in == nil { + return nil + } + out := new(CriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowInitParameters) DeepCopyInto(out *DataFlowInitParameters) { + *out = *in + if in.BuiltInTransform != nil { + in, out := &in.BuiltInTransform, &out.BuiltInTransform + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutputStream != nil { + in, out := &in.OutputStream, &out.OutputStream + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TransformKql != nil { + in, out := &in.TransformKql, &out.TransformKql + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowInitParameters. +func (in *DataFlowInitParameters) DeepCopy() *DataFlowInitParameters { + if in == nil { + return nil + } + out := new(DataFlowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowObservation) DeepCopyInto(out *DataFlowObservation) { + *out = *in + if in.BuiltInTransform != nil { + in, out := &in.BuiltInTransform, &out.BuiltInTransform + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutputStream != nil { + in, out := &in.OutputStream, &out.OutputStream + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TransformKql != nil { + in, out := &in.TransformKql, &out.TransformKql + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowObservation. +func (in *DataFlowObservation) DeepCopy() *DataFlowObservation { + if in == nil { + return nil + } + out := new(DataFlowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataFlowParameters) DeepCopyInto(out *DataFlowParameters) { + *out = *in + if in.BuiltInTransform != nil { + in, out := &in.BuiltInTransform, &out.BuiltInTransform + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutputStream != nil { + in, out := &in.OutputStream, &out.OutputStream + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TransformKql != nil { + in, out := &in.TransformKql, &out.TransformKql + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataFlowParameters. +func (in *DataFlowParameters) DeepCopy() *DataFlowParameters { + if in == nil { + return nil + } + out := new(DataFlowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImportInitParameters) DeepCopyInto(out *DataImportInitParameters) { + *out = *in + if in.EventHubDataSource != nil { + in, out := &in.EventHubDataSource, &out.EventHubDataSource + *out = make([]EventHubDataSourceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportInitParameters. +func (in *DataImportInitParameters) DeepCopy() *DataImportInitParameters { + if in == nil { + return nil + } + out := new(DataImportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImportObservation) DeepCopyInto(out *DataImportObservation) { + *out = *in + if in.EventHubDataSource != nil { + in, out := &in.EventHubDataSource, &out.EventHubDataSource + *out = make([]EventHubDataSourceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportObservation. +func (in *DataImportObservation) DeepCopy() *DataImportObservation { + if in == nil { + return nil + } + out := new(DataImportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImportParameters) DeepCopyInto(out *DataImportParameters) { + *out = *in + if in.EventHubDataSource != nil { + in, out := &in.EventHubDataSource, &out.EventHubDataSource + *out = make([]EventHubDataSourceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportParameters. +func (in *DataImportParameters) DeepCopy() *DataImportParameters { + if in == nil { + return nil + } + out := new(DataImportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourcesInitParameters) DeepCopyInto(out *DataSourcesInitParameters) { + *out = *in + if in.DataImport != nil { + in, out := &in.DataImport, &out.DataImport + *out = new(DataImportInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IisLog != nil { + in, out := &in.IisLog, &out.IisLog + *out = make([]IisLogInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogFile != nil { + in, out := &in.LogFile, &out.LogFile + *out = make([]LogFileInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceCounter != nil { + in, out := &in.PerformanceCounter, &out.PerformanceCounter + *out = make([]PerformanceCounterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformTelemetry != nil { + in, out := &in.PlatformTelemetry, &out.PlatformTelemetry + *out = make([]PlatformTelemetryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrometheusForwarder != nil { + in, out := &in.PrometheusForwarder, &out.PrometheusForwarder + *out = make([]PrometheusForwarderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Syslog != nil { + in, out := &in.Syslog, &out.Syslog + *out = make([]SyslogInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WindowsEventLog != nil { + in, out := &in.WindowsEventLog, &out.WindowsEventLog + *out = make([]WindowsEventLogInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WindowsFirewallLog != nil { + in, out := &in.WindowsFirewallLog, &out.WindowsFirewallLog + *out = make([]WindowsFirewallLogInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourcesInitParameters. +func (in *DataSourcesInitParameters) DeepCopy() *DataSourcesInitParameters { + if in == nil { + return nil + } + out := new(DataSourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourcesObservation) DeepCopyInto(out *DataSourcesObservation) { + *out = *in + if in.DataImport != nil { + in, out := &in.DataImport, &out.DataImport + *out = new(DataImportObservation) + (*in).DeepCopyInto(*out) + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IisLog != nil { + in, out := &in.IisLog, &out.IisLog + *out = make([]IisLogObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogFile != nil { + in, out := &in.LogFile, &out.LogFile + *out = make([]LogFileObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceCounter != nil { + in, out := &in.PerformanceCounter, &out.PerformanceCounter + *out = make([]PerformanceCounterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformTelemetry != nil { + in, out := &in.PlatformTelemetry, &out.PlatformTelemetry + *out = make([]PlatformTelemetryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrometheusForwarder != nil { + in, out := &in.PrometheusForwarder, &out.PrometheusForwarder + *out = make([]PrometheusForwarderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Syslog != nil { + in, out := &in.Syslog, &out.Syslog + *out = make([]SyslogObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WindowsEventLog != nil { + in, out := &in.WindowsEventLog, &out.WindowsEventLog + *out = make([]WindowsEventLogObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WindowsFirewallLog != nil { + in, out := &in.WindowsFirewallLog, &out.WindowsFirewallLog + *out = make([]WindowsFirewallLogObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourcesObservation. +func (in *DataSourcesObservation) DeepCopy() *DataSourcesObservation { + if in == nil { + return nil + } + out := new(DataSourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourcesParameters) DeepCopyInto(out *DataSourcesParameters) { + *out = *in + if in.DataImport != nil { + in, out := &in.DataImport, &out.DataImport + *out = new(DataImportParameters) + (*in).DeepCopyInto(*out) + } + if in.Extension != nil { + in, out := &in.Extension, &out.Extension + *out = make([]ExtensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IisLog != nil { + in, out := &in.IisLog, &out.IisLog + *out = make([]IisLogParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogFile != nil { + in, out := &in.LogFile, &out.LogFile + *out = make([]LogFileParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PerformanceCounter != nil { + in, out := &in.PerformanceCounter, &out.PerformanceCounter + *out = make([]PerformanceCounterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlatformTelemetry != nil { + in, out := &in.PlatformTelemetry, &out.PlatformTelemetry + *out = make([]PlatformTelemetryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrometheusForwarder != nil { + in, out := &in.PrometheusForwarder, &out.PrometheusForwarder + *out = make([]PrometheusForwarderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Syslog != nil { + in, out := &in.Syslog, &out.Syslog + *out = make([]SyslogParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WindowsEventLog != nil { + in, out := &in.WindowsEventLog, &out.WindowsEventLog + *out = make([]WindowsEventLogParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WindowsFirewallLog != nil { + in, out := &in.WindowsFirewallLog, &out.WindowsFirewallLog + *out = make([]WindowsFirewallLogParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourcesParameters. +func (in *DataSourcesParameters) DeepCopy() *DataSourcesParameters { + if in == nil { + return nil + } + out := new(DataSourcesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationsInitParameters) DeepCopyInto(out *DestinationsInitParameters) { + *out = *in + if in.AzureMonitorMetrics != nil { + in, out := &in.AzureMonitorMetrics, &out.AzureMonitorMetrics + *out = new(AzureMonitorMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventHub != nil { + in, out := &in.EventHub, &out.EventHub + *out = new(EventHubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EventHubDirect != nil { + in, out := &in.EventHubDirect, &out.EventHubDirect + *out = new(EventHubDirectInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LogAnalytics != nil { + in, out := &in.LogAnalytics, &out.LogAnalytics + *out = make([]LogAnalyticsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MonitorAccount != nil { + in, out := &in.MonitorAccount, &out.MonitorAccount + *out = make([]MonitorAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageBlob != nil { + in, out := &in.StorageBlob, &out.StorageBlob + *out = make([]StorageBlobInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageBlobDirect != nil { + in, out := &in.StorageBlobDirect, &out.StorageBlobDirect + *out = make([]StorageBlobDirectInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageTableDirect != nil { + in, out := &in.StorageTableDirect, &out.StorageTableDirect + *out = make([]StorageTableDirectInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationsInitParameters. +func (in *DestinationsInitParameters) DeepCopy() *DestinationsInitParameters { + if in == nil { + return nil + } + out := new(DestinationsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationsObservation) DeepCopyInto(out *DestinationsObservation) { + *out = *in + if in.AzureMonitorMetrics != nil { + in, out := &in.AzureMonitorMetrics, &out.AzureMonitorMetrics + *out = new(AzureMonitorMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.EventHub != nil { + in, out := &in.EventHub, &out.EventHub + *out = new(EventHubObservation) + (*in).DeepCopyInto(*out) + } + if in.EventHubDirect != nil { + in, out := &in.EventHubDirect, &out.EventHubDirect + *out = new(EventHubDirectObservation) + (*in).DeepCopyInto(*out) + } + if in.LogAnalytics != nil { + in, out := &in.LogAnalytics, &out.LogAnalytics + *out = make([]LogAnalyticsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MonitorAccount != nil { + in, out := &in.MonitorAccount, &out.MonitorAccount + *out = make([]MonitorAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageBlob != nil { + in, out := &in.StorageBlob, &out.StorageBlob + *out = make([]StorageBlobObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageBlobDirect != nil { + in, out := &in.StorageBlobDirect, &out.StorageBlobDirect + *out = make([]StorageBlobDirectObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageTableDirect != nil { + in, out := &in.StorageTableDirect, &out.StorageTableDirect + *out = make([]StorageTableDirectObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationsObservation. +func (in *DestinationsObservation) DeepCopy() *DestinationsObservation { + if in == nil { + return nil + } + out := new(DestinationsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationsParameters) DeepCopyInto(out *DestinationsParameters) { + *out = *in + if in.AzureMonitorMetrics != nil { + in, out := &in.AzureMonitorMetrics, &out.AzureMonitorMetrics + *out = new(AzureMonitorMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.EventHub != nil { + in, out := &in.EventHub, &out.EventHub + *out = new(EventHubParameters) + (*in).DeepCopyInto(*out) + } + if in.EventHubDirect != nil { + in, out := &in.EventHubDirect, &out.EventHubDirect + *out = new(EventHubDirectParameters) + (*in).DeepCopyInto(*out) + } + if in.LogAnalytics != nil { + in, out := &in.LogAnalytics, &out.LogAnalytics + *out = make([]LogAnalyticsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MonitorAccount != nil { + in, out := &in.MonitorAccount, &out.MonitorAccount + *out = make([]MonitorAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageBlob != nil { + in, out := &in.StorageBlob, &out.StorageBlob + *out = make([]StorageBlobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageBlobDirect != nil { + in, out := &in.StorageBlobDirect, &out.StorageBlobDirect + *out = make([]StorageBlobDirectParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageTableDirect != nil { + in, out := &in.StorageTableDirect, &out.StorageTableDirect + *out = make([]StorageTableDirectParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationsParameters. +func (in *DestinationsParameters) DeepCopy() *DestinationsParameters { + if in == nil { + return nil + } + out := new(DestinationsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionInitParameters) DeepCopyInto(out *DimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionInitParameters. +func (in *DimensionInitParameters) DeepCopy() *DimensionInitParameters { + if in == nil { + return nil + } + out := new(DimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionObservation) DeepCopyInto(out *DimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionObservation. +func (in *DimensionObservation) DeepCopy() *DimensionObservation { + if in == nil { + return nil + } + out := new(DimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionParameters) DeepCopyInto(out *DimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionParameters. +func (in *DimensionParameters) DeepCopy() *DimensionParameters { + if in == nil { + return nil + } + out := new(DimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsInitParameters) DeepCopyInto(out *DimensionsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsInitParameters. +func (in *DimensionsInitParameters) DeepCopy() *DimensionsInitParameters { + if in == nil { + return nil + } + out := new(DimensionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsObservation) DeepCopyInto(out *DimensionsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsObservation. +func (in *DimensionsObservation) DeepCopy() *DimensionsObservation { + if in == nil { + return nil + } + out := new(DimensionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DimensionsParameters) DeepCopyInto(out *DimensionsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DimensionsParameters. +func (in *DimensionsParameters) DeepCopy() *DimensionsParameters { + if in == nil { + return nil + } + out := new(DimensionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicCriteriaDimensionInitParameters) DeepCopyInto(out *DynamicCriteriaDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicCriteriaDimensionInitParameters. +func (in *DynamicCriteriaDimensionInitParameters) DeepCopy() *DynamicCriteriaDimensionInitParameters { + if in == nil { + return nil + } + out := new(DynamicCriteriaDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicCriteriaDimensionObservation) DeepCopyInto(out *DynamicCriteriaDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicCriteriaDimensionObservation. +func (in *DynamicCriteriaDimensionObservation) DeepCopy() *DynamicCriteriaDimensionObservation { + if in == nil { + return nil + } + out := new(DynamicCriteriaDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicCriteriaDimensionParameters) DeepCopyInto(out *DynamicCriteriaDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicCriteriaDimensionParameters. +func (in *DynamicCriteriaDimensionParameters) DeepCopy() *DynamicCriteriaDimensionParameters { + if in == nil { + return nil + } + out := new(DynamicCriteriaDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicCriteriaInitParameters) DeepCopyInto(out *DynamicCriteriaInitParameters) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.AlertSensitivity != nil { + in, out := &in.AlertSensitivity, &out.AlertSensitivity + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DynamicCriteriaDimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EvaluationFailureCount != nil { + in, out := &in.EvaluationFailureCount, &out.EvaluationFailureCount + *out = new(float64) + **out = **in + } + if in.EvaluationTotalCount != nil { + in, out := &in.EvaluationTotalCount, &out.EvaluationTotalCount + *out = new(float64) + **out = **in + } + if in.IgnoreDataBefore != nil { + in, out := &in.IgnoreDataBefore, &out.IgnoreDataBefore + *out = new(string) + **out = **in + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.SkipMetricValidation != nil { + in, out := &in.SkipMetricValidation, &out.SkipMetricValidation + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicCriteriaInitParameters. +func (in *DynamicCriteriaInitParameters) DeepCopy() *DynamicCriteriaInitParameters { + if in == nil { + return nil + } + out := new(DynamicCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicCriteriaObservation) DeepCopyInto(out *DynamicCriteriaObservation) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.AlertSensitivity != nil { + in, out := &in.AlertSensitivity, &out.AlertSensitivity + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DynamicCriteriaDimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EvaluationFailureCount != nil { + in, out := &in.EvaluationFailureCount, &out.EvaluationFailureCount + *out = new(float64) + **out = **in + } + if in.EvaluationTotalCount != nil { + in, out := &in.EvaluationTotalCount, &out.EvaluationTotalCount + *out = new(float64) + **out = **in + } + if in.IgnoreDataBefore != nil { + in, out := &in.IgnoreDataBefore, &out.IgnoreDataBefore + *out = new(string) + **out = **in + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.SkipMetricValidation != nil { + in, out := &in.SkipMetricValidation, &out.SkipMetricValidation + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicCriteriaObservation. +func (in *DynamicCriteriaObservation) DeepCopy() *DynamicCriteriaObservation { + if in == nil { + return nil + } + out := new(DynamicCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicCriteriaParameters) DeepCopyInto(out *DynamicCriteriaParameters) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.AlertSensitivity != nil { + in, out := &in.AlertSensitivity, &out.AlertSensitivity + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DynamicCriteriaDimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EvaluationFailureCount != nil { + in, out := &in.EvaluationFailureCount, &out.EvaluationFailureCount + *out = new(float64) + **out = **in + } + if in.EvaluationTotalCount != nil { + in, out := &in.EvaluationTotalCount, &out.EvaluationTotalCount + *out = new(float64) + **out = **in + } + if in.IgnoreDataBefore != nil { + in, out := &in.IgnoreDataBefore, &out.IgnoreDataBefore + *out = new(string) + **out = **in + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.SkipMetricValidation != nil { + in, out := &in.SkipMetricValidation, &out.SkipMetricValidation + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicCriteriaParameters. +func (in *DynamicCriteriaParameters) DeepCopy() *DynamicCriteriaParameters { + if in == nil { + return nil + } + out := new(DynamicCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailInitParameters) DeepCopyInto(out *EmailInitParameters) { + *out = *in + if in.CustomEmails != nil { + in, out := &in.CustomEmails, &out.CustomEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SendToSubscriptionAdministrator != nil { + in, out := &in.SendToSubscriptionAdministrator, &out.SendToSubscriptionAdministrator + *out = new(bool) + **out = **in + } + if in.SendToSubscriptionCoAdministrator != nil { + in, out := &in.SendToSubscriptionCoAdministrator, &out.SendToSubscriptionCoAdministrator + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailInitParameters. +func (in *EmailInitParameters) DeepCopy() *EmailInitParameters { + if in == nil { + return nil + } + out := new(EmailInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailObservation) DeepCopyInto(out *EmailObservation) { + *out = *in + if in.CustomEmails != nil { + in, out := &in.CustomEmails, &out.CustomEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SendToSubscriptionAdministrator != nil { + in, out := &in.SendToSubscriptionAdministrator, &out.SendToSubscriptionAdministrator + *out = new(bool) + **out = **in + } + if in.SendToSubscriptionCoAdministrator != nil { + in, out := &in.SendToSubscriptionCoAdministrator, &out.SendToSubscriptionCoAdministrator + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailObservation. +func (in *EmailObservation) DeepCopy() *EmailObservation { + if in == nil { + return nil + } + out := new(EmailObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailParameters) DeepCopyInto(out *EmailParameters) { + *out = *in + if in.CustomEmails != nil { + in, out := &in.CustomEmails, &out.CustomEmails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SendToSubscriptionAdministrator != nil { + in, out := &in.SendToSubscriptionAdministrator, &out.SendToSubscriptionAdministrator + *out = new(bool) + **out = **in + } + if in.SendToSubscriptionCoAdministrator != nil { + in, out := &in.SendToSubscriptionCoAdministrator, &out.SendToSubscriptionCoAdministrator + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailParameters. +func (in *EmailParameters) DeepCopy() *EmailParameters { + if in == nil { + return nil + } + out := new(EmailParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailReceiverInitParameters) DeepCopyInto(out *EmailReceiverInitParameters) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailReceiverInitParameters. +func (in *EmailReceiverInitParameters) DeepCopy() *EmailReceiverInitParameters { + if in == nil { + return nil + } + out := new(EmailReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailReceiverObservation) DeepCopyInto(out *EmailReceiverObservation) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailReceiverObservation. +func (in *EmailReceiverObservation) DeepCopy() *EmailReceiverObservation { + if in == nil { + return nil + } + out := new(EmailReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmailReceiverParameters) DeepCopyInto(out *EmailReceiverParameters) { + *out = *in + if in.EmailAddress != nil { + in, out := &in.EmailAddress, &out.EmailAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailReceiverParameters. +func (in *EmailReceiverParameters) DeepCopy() *EmailReceiverParameters { + if in == nil { + return nil + } + out := new(EmailReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledLogInitParameters) DeepCopyInto(out *EnabledLogInitParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.CategoryGroup != nil { + in, out := &in.CategoryGroup, &out.CategoryGroup + *out = new(string) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledLogInitParameters. +func (in *EnabledLogInitParameters) DeepCopy() *EnabledLogInitParameters { + if in == nil { + return nil + } + out := new(EnabledLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledLogObservation) DeepCopyInto(out *EnabledLogObservation) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.CategoryGroup != nil { + in, out := &in.CategoryGroup, &out.CategoryGroup + *out = new(string) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledLogObservation. +func (in *EnabledLogObservation) DeepCopy() *EnabledLogObservation { + if in == nil { + return nil + } + out := new(EnabledLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledLogParameters) DeepCopyInto(out *EnabledLogParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.CategoryGroup != nil { + in, out := &in.CategoryGroup, &out.CategoryGroup + *out = new(string) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledLogParameters. +func (in *EnabledLogParameters) DeepCopy() *EnabledLogParameters { + if in == nil { + return nil + } + out := new(EnabledLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubDataSourceInitParameters) DeepCopyInto(out *EventHubDataSourceInitParameters) { + *out = *in + if in.ConsumerGroup != nil { + in, out := &in.ConsumerGroup, &out.ConsumerGroup + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubDataSourceInitParameters. +func (in *EventHubDataSourceInitParameters) DeepCopy() *EventHubDataSourceInitParameters { + if in == nil { + return nil + } + out := new(EventHubDataSourceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubDataSourceObservation) DeepCopyInto(out *EventHubDataSourceObservation) { + *out = *in + if in.ConsumerGroup != nil { + in, out := &in.ConsumerGroup, &out.ConsumerGroup + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubDataSourceObservation. +func (in *EventHubDataSourceObservation) DeepCopy() *EventHubDataSourceObservation { + if in == nil { + return nil + } + out := new(EventHubDataSourceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubDataSourceParameters) DeepCopyInto(out *EventHubDataSourceParameters) { + *out = *in + if in.ConsumerGroup != nil { + in, out := &in.ConsumerGroup, &out.ConsumerGroup + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubDataSourceParameters. +func (in *EventHubDataSourceParameters) DeepCopy() *EventHubDataSourceParameters { + if in == nil { + return nil + } + out := new(EventHubDataSourceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubDirectInitParameters) DeepCopyInto(out *EventHubDirectInitParameters) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubDirectInitParameters. +func (in *EventHubDirectInitParameters) DeepCopy() *EventHubDirectInitParameters { + if in == nil { + return nil + } + out := new(EventHubDirectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubDirectObservation) DeepCopyInto(out *EventHubDirectObservation) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubDirectObservation. +func (in *EventHubDirectObservation) DeepCopy() *EventHubDirectObservation { + if in == nil { + return nil + } + out := new(EventHubDirectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubDirectParameters) DeepCopyInto(out *EventHubDirectParameters) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubDirectParameters. +func (in *EventHubDirectParameters) DeepCopy() *EventHubDirectParameters { + if in == nil { + return nil + } + out := new(EventHubDirectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubInitParameters) DeepCopyInto(out *EventHubInitParameters) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.EventHubIDRef != nil { + in, out := &in.EventHubIDRef, &out.EventHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubIDSelector != nil { + in, out := &in.EventHubIDSelector, &out.EventHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubInitParameters. +func (in *EventHubInitParameters) DeepCopy() *EventHubInitParameters { + if in == nil { + return nil + } + out := new(EventHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubObservation) DeepCopyInto(out *EventHubObservation) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubObservation. +func (in *EventHubObservation) DeepCopy() *EventHubObservation { + if in == nil { + return nil + } + out := new(EventHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubParameters) DeepCopyInto(out *EventHubParameters) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.EventHubIDRef != nil { + in, out := &in.EventHubIDRef, &out.EventHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubIDSelector != nil { + in, out := &in.EventHubIDSelector, &out.EventHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubParameters. +func (in *EventHubParameters) DeepCopy() *EventHubParameters { + if in == nil { + return nil + } + out := new(EventHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubReceiverInitParameters) DeepCopyInto(out *EventHubReceiverInitParameters) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNamespace != nil { + in, out := &in.EventHubNamespace, &out.EventHubNamespace + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubReceiverInitParameters. +func (in *EventHubReceiverInitParameters) DeepCopy() *EventHubReceiverInitParameters { + if in == nil { + return nil + } + out := new(EventHubReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubReceiverObservation) DeepCopyInto(out *EventHubReceiverObservation) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNamespace != nil { + in, out := &in.EventHubNamespace, &out.EventHubNamespace + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubReceiverObservation. +func (in *EventHubReceiverObservation) DeepCopy() *EventHubReceiverObservation { + if in == nil { + return nil + } + out := new(EventHubReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHubReceiverParameters) DeepCopyInto(out *EventHubReceiverParameters) { + *out = *in + if in.EventHubID != nil { + in, out := &in.EventHubID, &out.EventHubID + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNamespace != nil { + in, out := &in.EventHubNamespace, &out.EventHubNamespace + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHubReceiverParameters. +func (in *EventHubReceiverParameters) DeepCopy() *EventHubReceiverParameters { + if in == nil { + return nil + } + out := new(EventHubReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionInitParameters) DeepCopyInto(out *ExtensionInitParameters) { + *out = *in + if in.ExtensionJSON != nil { + in, out := &in.ExtensionJSON, &out.ExtensionJSON + *out = new(string) + **out = **in + } + if in.ExtensionName != nil { + in, out := &in.ExtensionName, &out.ExtensionName + *out = new(string) + **out = **in + } + if in.InputDataSources != nil { + in, out := &in.InputDataSources, &out.InputDataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionInitParameters. +func (in *ExtensionInitParameters) DeepCopy() *ExtensionInitParameters { + if in == nil { + return nil + } + out := new(ExtensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionObservation) DeepCopyInto(out *ExtensionObservation) { + *out = *in + if in.ExtensionJSON != nil { + in, out := &in.ExtensionJSON, &out.ExtensionJSON + *out = new(string) + **out = **in + } + if in.ExtensionName != nil { + in, out := &in.ExtensionName, &out.ExtensionName + *out = new(string) + **out = **in + } + if in.InputDataSources != nil { + in, out := &in.InputDataSources, &out.InputDataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionObservation. +func (in *ExtensionObservation) DeepCopy() *ExtensionObservation { + if in == nil { + return nil + } + out := new(ExtensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionParameters) DeepCopyInto(out *ExtensionParameters) { + *out = *in + if in.ExtensionJSON != nil { + in, out := &in.ExtensionJSON, &out.ExtensionJSON + *out = new(string) + **out = **in + } + if in.ExtensionName != nil { + in, out := &in.ExtensionName, &out.ExtensionName + *out = new(string) + **out = **in + } + if in.InputDataSources != nil { + in, out := &in.InputDataSources, &out.InputDataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionParameters. +func (in *ExtensionParameters) DeepCopy() *ExtensionParameters { + if in == nil { + return nil + } + out := new(ExtensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailingPeriodsInitParameters) DeepCopyInto(out *FailingPeriodsInitParameters) { + *out = *in + if in.MinimumFailingPeriodsToTriggerAlert != nil { + in, out := &in.MinimumFailingPeriodsToTriggerAlert, &out.MinimumFailingPeriodsToTriggerAlert + *out = new(float64) + **out = **in + } + if in.NumberOfEvaluationPeriods != nil { + in, out := &in.NumberOfEvaluationPeriods, &out.NumberOfEvaluationPeriods + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailingPeriodsInitParameters. +func (in *FailingPeriodsInitParameters) DeepCopy() *FailingPeriodsInitParameters { + if in == nil { + return nil + } + out := new(FailingPeriodsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailingPeriodsObservation) DeepCopyInto(out *FailingPeriodsObservation) { + *out = *in + if in.MinimumFailingPeriodsToTriggerAlert != nil { + in, out := &in.MinimumFailingPeriodsToTriggerAlert, &out.MinimumFailingPeriodsToTriggerAlert + *out = new(float64) + **out = **in + } + if in.NumberOfEvaluationPeriods != nil { + in, out := &in.NumberOfEvaluationPeriods, &out.NumberOfEvaluationPeriods + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailingPeriodsObservation. +func (in *FailingPeriodsObservation) DeepCopy() *FailingPeriodsObservation { + if in == nil { + return nil + } + out := new(FailingPeriodsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailingPeriodsParameters) DeepCopyInto(out *FailingPeriodsParameters) { + *out = *in + if in.MinimumFailingPeriodsToTriggerAlert != nil { + in, out := &in.MinimumFailingPeriodsToTriggerAlert, &out.MinimumFailingPeriodsToTriggerAlert + *out = new(float64) + **out = **in + } + if in.NumberOfEvaluationPeriods != nil { + in, out := &in.NumberOfEvaluationPeriods, &out.NumberOfEvaluationPeriods + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailingPeriodsParameters. +func (in *FailingPeriodsParameters) DeepCopy() *FailingPeriodsParameters { + if in == nil { + return nil + } + out := new(FailingPeriodsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedDateInitParameters) DeepCopyInto(out *FixedDateInitParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedDateInitParameters. +func (in *FixedDateInitParameters) DeepCopy() *FixedDateInitParameters { + if in == nil { + return nil + } + out := new(FixedDateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedDateObservation) DeepCopyInto(out *FixedDateObservation) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedDateObservation. +func (in *FixedDateObservation) DeepCopy() *FixedDateObservation { + if in == nil { + return nil + } + out := new(FixedDateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedDateParameters) DeepCopyInto(out *FixedDateParameters) { + *out = *in + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedDateParameters. +func (in *FixedDateParameters) DeepCopy() *FixedDateParameters { + if in == nil { + return nil + } + out := new(FixedDateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderInitParameters) DeepCopyInto(out *HeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderInitParameters. +func (in *HeaderInitParameters) DeepCopy() *HeaderInitParameters { + if in == nil { + return nil + } + out := new(HeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderObservation) DeepCopyInto(out *HeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderObservation. +func (in *HeaderObservation) DeepCopy() *HeaderObservation { + if in == nil { + return nil + } + out := new(HeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeaderParameters) DeepCopyInto(out *HeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderParameters. +func (in *HeaderParameters) DeepCopy() *HeaderParameters { + if in == nil { + return nil + } + out := new(HeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IisLogInitParameters) DeepCopyInto(out *IisLogInitParameters) { + *out = *in + if in.LogDirectories != nil { + in, out := &in.LogDirectories, &out.LogDirectories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IisLogInitParameters. +func (in *IisLogInitParameters) DeepCopy() *IisLogInitParameters { + if in == nil { + return nil + } + out := new(IisLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IisLogObservation) DeepCopyInto(out *IisLogObservation) { + *out = *in + if in.LogDirectories != nil { + in, out := &in.LogDirectories, &out.LogDirectories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IisLogObservation. +func (in *IisLogObservation) DeepCopy() *IisLogObservation { + if in == nil { + return nil + } + out := new(IisLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IisLogParameters) DeepCopyInto(out *IisLogParameters) { + *out = *in + if in.LogDirectories != nil { + in, out := &in.LogDirectories, &out.LogDirectories + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IisLogParameters. +func (in *IisLogParameters) DeepCopy() *IisLogParameters { + if in == nil { + return nil + } + out := new(IisLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItsmReceiverInitParameters) DeepCopyInto(out *ItsmReceiverInitParameters) { + *out = *in + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TicketConfiguration != nil { + in, out := &in.TicketConfiguration, &out.TicketConfiguration + *out = new(string) + **out = **in + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItsmReceiverInitParameters. +func (in *ItsmReceiverInitParameters) DeepCopy() *ItsmReceiverInitParameters { + if in == nil { + return nil + } + out := new(ItsmReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItsmReceiverObservation) DeepCopyInto(out *ItsmReceiverObservation) { + *out = *in + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TicketConfiguration != nil { + in, out := &in.TicketConfiguration, &out.TicketConfiguration + *out = new(string) + **out = **in + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItsmReceiverObservation. +func (in *ItsmReceiverObservation) DeepCopy() *ItsmReceiverObservation { + if in == nil { + return nil + } + out := new(ItsmReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItsmReceiverParameters) DeepCopyInto(out *ItsmReceiverParameters) { + *out = *in + if in.ConnectionID != nil { + in, out := &in.ConnectionID, &out.ConnectionID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.TicketConfiguration != nil { + in, out := &in.TicketConfiguration, &out.TicketConfiguration + *out = new(string) + **out = **in + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItsmReceiverParameters. +func (in *ItsmReceiverParameters) DeepCopy() *ItsmReceiverParameters { + if in == nil { + return nil + } + out := new(ItsmReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelIncludeFilterInitParameters) DeepCopyInto(out *LabelIncludeFilterInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelIncludeFilterInitParameters. +func (in *LabelIncludeFilterInitParameters) DeepCopy() *LabelIncludeFilterInitParameters { + if in == nil { + return nil + } + out := new(LabelIncludeFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelIncludeFilterObservation) DeepCopyInto(out *LabelIncludeFilterObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelIncludeFilterObservation. +func (in *LabelIncludeFilterObservation) DeepCopy() *LabelIncludeFilterObservation { + if in == nil { + return nil + } + out := new(LabelIncludeFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelIncludeFilterParameters) DeepCopyInto(out *LabelIncludeFilterParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelIncludeFilterParameters. +func (in *LabelIncludeFilterParameters) DeepCopy() *LabelIncludeFilterParameters { + if in == nil { + return nil + } + out := new(LabelIncludeFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsInitParameters) DeepCopyInto(out *LogAnalyticsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } + if in.WorkspaceResourceIDRef != nil { + in, out := &in.WorkspaceResourceIDRef, &out.WorkspaceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceResourceIDSelector != nil { + in, out := &in.WorkspaceResourceIDSelector, &out.WorkspaceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsInitParameters. +func (in *LogAnalyticsInitParameters) DeepCopy() *LogAnalyticsInitParameters { + if in == nil { + return nil + } + out := new(LogAnalyticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsObservation) DeepCopyInto(out *LogAnalyticsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsObservation. +func (in *LogAnalyticsObservation) DeepCopy() *LogAnalyticsObservation { + if in == nil { + return nil + } + out := new(LogAnalyticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsParameters) DeepCopyInto(out *LogAnalyticsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } + if in.WorkspaceResourceIDRef != nil { + in, out := &in.WorkspaceResourceIDRef, &out.WorkspaceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceResourceIDSelector != nil { + in, out := &in.WorkspaceResourceIDSelector, &out.WorkspaceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsParameters. +func (in *LogAnalyticsParameters) DeepCopy() *LogAnalyticsParameters { + if in == nil { + return nil + } + out := new(LogAnalyticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogFileInitParameters) DeepCopyInto(out *LogFileInitParameters) { + *out = *in + if in.FilePatterns != nil { + in, out := &in.FilePatterns, &out.FilePatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogFileInitParameters. +func (in *LogFileInitParameters) DeepCopy() *LogFileInitParameters { + if in == nil { + return nil + } + out := new(LogFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogFileObservation) DeepCopyInto(out *LogFileObservation) { + *out = *in + if in.FilePatterns != nil { + in, out := &in.FilePatterns, &out.FilePatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogFileObservation. +func (in *LogFileObservation) DeepCopy() *LogFileObservation { + if in == nil { + return nil + } + out := new(LogFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogFileParameters) DeepCopyInto(out *LogFileParameters) { + *out = *in + if in.FilePatterns != nil { + in, out := &in.FilePatterns, &out.FilePatterns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogFileParameters. +func (in *LogFileParameters) DeepCopy() *LogFileParameters { + if in == nil { + return nil + } + out := new(LogFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogInitParameters) DeepCopyInto(out *LogInitParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.CategoryGroup != nil { + in, out := &in.CategoryGroup, &out.CategoryGroup + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(LogRetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogInitParameters. +func (in *LogInitParameters) DeepCopy() *LogInitParameters { + if in == nil { + return nil + } + out := new(LogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogObservation) DeepCopyInto(out *LogObservation) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.CategoryGroup != nil { + in, out := &in.CategoryGroup, &out.CategoryGroup + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(LogRetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogObservation. +func (in *LogObservation) DeepCopy() *LogObservation { + if in == nil { + return nil + } + out := new(LogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogParameters) DeepCopyInto(out *LogParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.CategoryGroup != nil { + in, out := &in.CategoryGroup, &out.CategoryGroup + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(LogRetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogParameters. +func (in *LogParameters) DeepCopy() *LogParameters { + if in == nil { + return nil + } + out := new(LogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogRetentionPolicyInitParameters) DeepCopyInto(out *LogRetentionPolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogRetentionPolicyInitParameters. +func (in *LogRetentionPolicyInitParameters) DeepCopy() *LogRetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(LogRetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogRetentionPolicyObservation) DeepCopyInto(out *LogRetentionPolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogRetentionPolicyObservation. +func (in *LogRetentionPolicyObservation) DeepCopy() *LogRetentionPolicyObservation { + if in == nil { + return nil + } + out := new(LogRetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogRetentionPolicyParameters) DeepCopyInto(out *LogRetentionPolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogRetentionPolicyParameters. +func (in *LogRetentionPolicyParameters) DeepCopy() *LogRetentionPolicyParameters { + if in == nil { + return nil + } + out := new(LogRetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogicAppReceiverInitParameters) DeepCopyInto(out *LogicAppReceiverInitParameters) { + *out = *in + if in.CallbackURL != nil { + in, out := &in.CallbackURL, &out.CallbackURL + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogicAppReceiverInitParameters. +func (in *LogicAppReceiverInitParameters) DeepCopy() *LogicAppReceiverInitParameters { + if in == nil { + return nil + } + out := new(LogicAppReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogicAppReceiverObservation) DeepCopyInto(out *LogicAppReceiverObservation) { + *out = *in + if in.CallbackURL != nil { + in, out := &in.CallbackURL, &out.CallbackURL + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogicAppReceiverObservation. +func (in *LogicAppReceiverObservation) DeepCopy() *LogicAppReceiverObservation { + if in == nil { + return nil + } + out := new(LogicAppReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogicAppReceiverParameters) DeepCopyInto(out *LogicAppReceiverParameters) { + *out = *in + if in.CallbackURL != nil { + in, out := &in.CallbackURL, &out.CallbackURL + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogicAppReceiverParameters. +func (in *LogicAppReceiverParameters) DeepCopy() *LogicAppReceiverParameters { + if in == nil { + return nil + } + out := new(LogicAppReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricInitParameters) DeepCopyInto(out *MetricInitParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(MetricRetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricInitParameters. +func (in *MetricInitParameters) DeepCopy() *MetricInitParameters { + if in == nil { + return nil + } + out := new(MetricInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricObservation) DeepCopyInto(out *MetricObservation) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(MetricRetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricObservation. +func (in *MetricObservation) DeepCopy() *MetricObservation { + if in == nil { + return nil + } + out := new(MetricObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricParameters) DeepCopyInto(out *MetricParameters) { + *out = *in + if in.Category != nil { + in, out := &in.Category, &out.Category + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(MetricRetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricParameters. +func (in *MetricParameters) DeepCopy() *MetricParameters { + if in == nil { + return nil + } + out := new(MetricParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricRetentionPolicyInitParameters) DeepCopyInto(out *MetricRetentionPolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricRetentionPolicyInitParameters. +func (in *MetricRetentionPolicyInitParameters) DeepCopy() *MetricRetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(MetricRetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricRetentionPolicyObservation) DeepCopyInto(out *MetricRetentionPolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricRetentionPolicyObservation. +func (in *MetricRetentionPolicyObservation) DeepCopy() *MetricRetentionPolicyObservation { + if in == nil { + return nil + } + out := new(MetricRetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricRetentionPolicyParameters) DeepCopyInto(out *MetricRetentionPolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricRetentionPolicyParameters. +func (in *MetricRetentionPolicyParameters) DeepCopy() *MetricRetentionPolicyParameters { + if in == nil { + return nil + } + out := new(MetricRetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTriggerInitParameters) DeepCopyInto(out *MetricTriggerInitParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DivideByInstanceCount != nil { + in, out := &in.DivideByInstanceCount, &out.DivideByInstanceCount + *out = new(bool) + **out = **in + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricResourceID != nil { + in, out := &in.MetricResourceID, &out.MetricResourceID + *out = new(string) + **out = **in + } + if in.MetricResourceIDRef != nil { + in, out := &in.MetricResourceIDRef, &out.MetricResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MetricResourceIDSelector != nil { + in, out := &in.MetricResourceIDSelector, &out.MetricResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.TimeAggregation != nil { + in, out := &in.TimeAggregation, &out.TimeAggregation + *out = new(string) + **out = **in + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTriggerInitParameters. +func (in *MetricTriggerInitParameters) DeepCopy() *MetricTriggerInitParameters { + if in == nil { + return nil + } + out := new(MetricTriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTriggerObservation) DeepCopyInto(out *MetricTriggerObservation) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DivideByInstanceCount != nil { + in, out := &in.DivideByInstanceCount, &out.DivideByInstanceCount + *out = new(bool) + **out = **in + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricResourceID != nil { + in, out := &in.MetricResourceID, &out.MetricResourceID + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.TimeAggregation != nil { + in, out := &in.TimeAggregation, &out.TimeAggregation + *out = new(string) + **out = **in + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTriggerObservation. +func (in *MetricTriggerObservation) DeepCopy() *MetricTriggerObservation { + if in == nil { + return nil + } + out := new(MetricTriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTriggerParameters) DeepCopyInto(out *MetricTriggerParameters) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]DimensionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DivideByInstanceCount != nil { + in, out := &in.DivideByInstanceCount, &out.DivideByInstanceCount + *out = new(bool) + **out = **in + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.MetricResourceID != nil { + in, out := &in.MetricResourceID, &out.MetricResourceID + *out = new(string) + **out = **in + } + if in.MetricResourceIDRef != nil { + in, out := &in.MetricResourceIDRef, &out.MetricResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MetricResourceIDSelector != nil { + in, out := &in.MetricResourceIDSelector, &out.MetricResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Statistic != nil { + in, out := &in.Statistic, &out.Statistic + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.TimeAggregation != nil { + in, out := &in.TimeAggregation, &out.TimeAggregation + *out = new(string) + **out = **in + } + if in.TimeGrain != nil { + in, out := &in.TimeGrain, &out.TimeGrain + *out = new(string) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTriggerParameters. +func (in *MetricTriggerParameters) DeepCopy() *MetricTriggerParameters { + if in == nil { + return nil + } + out := new(MetricTriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAccountInitParameters) DeepCopyInto(out *MonitorAccountInitParameters) { + *out = *in + if in.MonitorAccountID != nil { + in, out := &in.MonitorAccountID, &out.MonitorAccountID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAccountInitParameters. +func (in *MonitorAccountInitParameters) DeepCopy() *MonitorAccountInitParameters { + if in == nil { + return nil + } + out := new(MonitorAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAccountObservation) DeepCopyInto(out *MonitorAccountObservation) { + *out = *in + if in.MonitorAccountID != nil { + in, out := &in.MonitorAccountID, &out.MonitorAccountID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAccountObservation. +func (in *MonitorAccountObservation) DeepCopy() *MonitorAccountObservation { + if in == nil { + return nil + } + out := new(MonitorAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAccountParameters) DeepCopyInto(out *MonitorAccountParameters) { + *out = *in + if in.MonitorAccountID != nil { + in, out := &in.MonitorAccountID, &out.MonitorAccountID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAccountParameters. +func (in *MonitorAccountParameters) DeepCopy() *MonitorAccountParameters { + if in == nil { + return nil + } + out := new(MonitorAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionGroup) DeepCopyInto(out *MonitorActionGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionGroup. +func (in *MonitorActionGroup) DeepCopy() *MonitorActionGroup { + if in == nil { + return nil + } + out := new(MonitorActionGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorActionGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionGroupInitParameters) DeepCopyInto(out *MonitorActionGroupInitParameters) { + *out = *in + if in.ArmRoleReceiver != nil { + in, out := &in.ArmRoleReceiver, &out.ArmRoleReceiver + *out = make([]ArmRoleReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AutomationRunBookReceiver != nil { + in, out := &in.AutomationRunBookReceiver, &out.AutomationRunBookReceiver + *out = make([]AutomationRunBookReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureAppPushReceiver != nil { + in, out := &in.AzureAppPushReceiver, &out.AzureAppPushReceiver + *out = make([]AzureAppPushReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureFunctionReceiver != nil { + in, out := &in.AzureFunctionReceiver, &out.AzureFunctionReceiver + *out = make([]AzureFunctionReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EmailReceiver != nil { + in, out := &in.EmailReceiver, &out.EmailReceiver + *out = make([]EmailReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventHubReceiver != nil { + in, out := &in.EventHubReceiver, &out.EventHubReceiver + *out = make([]EventHubReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ItsmReceiver != nil { + in, out := &in.ItsmReceiver, &out.ItsmReceiver + *out = make([]ItsmReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicAppReceiver != nil { + in, out := &in.LogicAppReceiver, &out.LogicAppReceiver + *out = make([]LogicAppReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SMSReceiver != nil { + in, out := &in.SMSReceiver, &out.SMSReceiver + *out = make([]SMSReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VoiceReceiver != nil { + in, out := &in.VoiceReceiver, &out.VoiceReceiver + *out = make([]VoiceReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebhookReceiver != nil { + in, out := &in.WebhookReceiver, &out.WebhookReceiver + *out = make([]WebhookReceiverInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionGroupInitParameters. +func (in *MonitorActionGroupInitParameters) DeepCopy() *MonitorActionGroupInitParameters { + if in == nil { + return nil + } + out := new(MonitorActionGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionGroupList) DeepCopyInto(out *MonitorActionGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorActionGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionGroupList. +func (in *MonitorActionGroupList) DeepCopy() *MonitorActionGroupList { + if in == nil { + return nil + } + out := new(MonitorActionGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorActionGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionGroupObservation) DeepCopyInto(out *MonitorActionGroupObservation) { + *out = *in + if in.ArmRoleReceiver != nil { + in, out := &in.ArmRoleReceiver, &out.ArmRoleReceiver + *out = make([]ArmRoleReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AutomationRunBookReceiver != nil { + in, out := &in.AutomationRunBookReceiver, &out.AutomationRunBookReceiver + *out = make([]AutomationRunBookReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureAppPushReceiver != nil { + in, out := &in.AzureAppPushReceiver, &out.AzureAppPushReceiver + *out = make([]AzureAppPushReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureFunctionReceiver != nil { + in, out := &in.AzureFunctionReceiver, &out.AzureFunctionReceiver + *out = make([]AzureFunctionReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EmailReceiver != nil { + in, out := &in.EmailReceiver, &out.EmailReceiver + *out = make([]EmailReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventHubReceiver != nil { + in, out := &in.EventHubReceiver, &out.EventHubReceiver + *out = make([]EventHubReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ItsmReceiver != nil { + in, out := &in.ItsmReceiver, &out.ItsmReceiver + *out = make([]ItsmReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicAppReceiver != nil { + in, out := &in.LogicAppReceiver, &out.LogicAppReceiver + *out = make([]LogicAppReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SMSReceiver != nil { + in, out := &in.SMSReceiver, &out.SMSReceiver + *out = make([]SMSReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VoiceReceiver != nil { + in, out := &in.VoiceReceiver, &out.VoiceReceiver + *out = make([]VoiceReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebhookReceiver != nil { + in, out := &in.WebhookReceiver, &out.WebhookReceiver + *out = make([]WebhookReceiverObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionGroupObservation. +func (in *MonitorActionGroupObservation) DeepCopy() *MonitorActionGroupObservation { + if in == nil { + return nil + } + out := new(MonitorActionGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionGroupParameters) DeepCopyInto(out *MonitorActionGroupParameters) { + *out = *in + if in.ArmRoleReceiver != nil { + in, out := &in.ArmRoleReceiver, &out.ArmRoleReceiver + *out = make([]ArmRoleReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AutomationRunBookReceiver != nil { + in, out := &in.AutomationRunBookReceiver, &out.AutomationRunBookReceiver + *out = make([]AutomationRunBookReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureAppPushReceiver != nil { + in, out := &in.AzureAppPushReceiver, &out.AzureAppPushReceiver + *out = make([]AzureAppPushReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureFunctionReceiver != nil { + in, out := &in.AzureFunctionReceiver, &out.AzureFunctionReceiver + *out = make([]AzureFunctionReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EmailReceiver != nil { + in, out := &in.EmailReceiver, &out.EmailReceiver + *out = make([]EmailReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventHubReceiver != nil { + in, out := &in.EventHubReceiver, &out.EventHubReceiver + *out = make([]EventHubReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ItsmReceiver != nil { + in, out := &in.ItsmReceiver, &out.ItsmReceiver + *out = make([]ItsmReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicAppReceiver != nil { + in, out := &in.LogicAppReceiver, &out.LogicAppReceiver + *out = make([]LogicAppReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SMSReceiver != nil { + in, out := &in.SMSReceiver, &out.SMSReceiver + *out = make([]SMSReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ShortName != nil { + in, out := &in.ShortName, &out.ShortName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VoiceReceiver != nil { + in, out := &in.VoiceReceiver, &out.VoiceReceiver + *out = make([]VoiceReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WebhookReceiver != nil { + in, out := &in.WebhookReceiver, &out.WebhookReceiver + *out = make([]WebhookReceiverParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionGroupParameters. +func (in *MonitorActionGroupParameters) DeepCopy() *MonitorActionGroupParameters { + if in == nil { + return nil + } + out := new(MonitorActionGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionGroupSpec) DeepCopyInto(out *MonitorActionGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionGroupSpec. +func (in *MonitorActionGroupSpec) DeepCopy() *MonitorActionGroupSpec { + if in == nil { + return nil + } + out := new(MonitorActionGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActionGroupStatus) DeepCopyInto(out *MonitorActionGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActionGroupStatus. +func (in *MonitorActionGroupStatus) DeepCopy() *MonitorActionGroupStatus { + if in == nil { + return nil + } + out := new(MonitorActionGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActivityLogAlert) DeepCopyInto(out *MonitorActivityLogAlert) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActivityLogAlert. +func (in *MonitorActivityLogAlert) DeepCopy() *MonitorActivityLogAlert { + if in == nil { + return nil + } + out := new(MonitorActivityLogAlert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorActivityLogAlert) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActivityLogAlertInitParameters) DeepCopyInto(out *MonitorActivityLogAlertInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(CriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActivityLogAlertInitParameters. +func (in *MonitorActivityLogAlertInitParameters) DeepCopy() *MonitorActivityLogAlertInitParameters { + if in == nil { + return nil + } + out := new(MonitorActivityLogAlertInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActivityLogAlertList) DeepCopyInto(out *MonitorActivityLogAlertList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorActivityLogAlert, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActivityLogAlertList. +func (in *MonitorActivityLogAlertList) DeepCopy() *MonitorActivityLogAlertList { + if in == nil { + return nil + } + out := new(MonitorActivityLogAlertList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorActivityLogAlertList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActivityLogAlertObservation) DeepCopyInto(out *MonitorActivityLogAlertObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(CriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActivityLogAlertObservation. +func (in *MonitorActivityLogAlertObservation) DeepCopy() *MonitorActivityLogAlertObservation { + if in == nil { + return nil + } + out := new(MonitorActivityLogAlertObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActivityLogAlertParameters) DeepCopyInto(out *MonitorActivityLogAlertParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]ActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(CriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActivityLogAlertParameters. +func (in *MonitorActivityLogAlertParameters) DeepCopy() *MonitorActivityLogAlertParameters { + if in == nil { + return nil + } + out := new(MonitorActivityLogAlertParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActivityLogAlertSpec) DeepCopyInto(out *MonitorActivityLogAlertSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActivityLogAlertSpec. +func (in *MonitorActivityLogAlertSpec) DeepCopy() *MonitorActivityLogAlertSpec { + if in == nil { + return nil + } + out := new(MonitorActivityLogAlertSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorActivityLogAlertStatus) DeepCopyInto(out *MonitorActivityLogAlertStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorActivityLogAlertStatus. +func (in *MonitorActivityLogAlertStatus) DeepCopy() *MonitorActivityLogAlertStatus { + if in == nil { + return nil + } + out := new(MonitorActivityLogAlertStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAutoscaleSetting) DeepCopyInto(out *MonitorAutoscaleSetting) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAutoscaleSetting. +func (in *MonitorAutoscaleSetting) DeepCopy() *MonitorAutoscaleSetting { + if in == nil { + return nil + } + out := new(MonitorAutoscaleSetting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorAutoscaleSetting) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAutoscaleSettingInitParameters) DeepCopyInto(out *MonitorAutoscaleSettingInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = new(NotificationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Predictive != nil { + in, out := &in.Predictive, &out.Predictive + *out = new(PredictiveInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = make([]ProfileInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceIDRef != nil { + in, out := &in.TargetResourceIDRef, &out.TargetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceIDSelector != nil { + in, out := &in.TargetResourceIDSelector, &out.TargetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAutoscaleSettingInitParameters. +func (in *MonitorAutoscaleSettingInitParameters) DeepCopy() *MonitorAutoscaleSettingInitParameters { + if in == nil { + return nil + } + out := new(MonitorAutoscaleSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAutoscaleSettingList) DeepCopyInto(out *MonitorAutoscaleSettingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorAutoscaleSetting, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAutoscaleSettingList. +func (in *MonitorAutoscaleSettingList) DeepCopy() *MonitorAutoscaleSettingList { + if in == nil { + return nil + } + out := new(MonitorAutoscaleSettingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorAutoscaleSettingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAutoscaleSettingObservation) DeepCopyInto(out *MonitorAutoscaleSettingObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = new(NotificationObservation) + (*in).DeepCopyInto(*out) + } + if in.Predictive != nil { + in, out := &in.Predictive, &out.Predictive + *out = new(PredictiveObservation) + (*in).DeepCopyInto(*out) + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = make([]ProfileObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAutoscaleSettingObservation. +func (in *MonitorAutoscaleSettingObservation) DeepCopy() *MonitorAutoscaleSettingObservation { + if in == nil { + return nil + } + out := new(MonitorAutoscaleSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAutoscaleSettingParameters) DeepCopyInto(out *MonitorAutoscaleSettingParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Notification != nil { + in, out := &in.Notification, &out.Notification + *out = new(NotificationParameters) + (*in).DeepCopyInto(*out) + } + if in.Predictive != nil { + in, out := &in.Predictive, &out.Predictive + *out = new(PredictiveParameters) + (*in).DeepCopyInto(*out) + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = make([]ProfileParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceIDRef != nil { + in, out := &in.TargetResourceIDRef, &out.TargetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceIDSelector != nil { + in, out := &in.TargetResourceIDSelector, &out.TargetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAutoscaleSettingParameters. +func (in *MonitorAutoscaleSettingParameters) DeepCopy() *MonitorAutoscaleSettingParameters { + if in == nil { + return nil + } + out := new(MonitorAutoscaleSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAutoscaleSettingSpec) DeepCopyInto(out *MonitorAutoscaleSettingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAutoscaleSettingSpec. +func (in *MonitorAutoscaleSettingSpec) DeepCopy() *MonitorAutoscaleSettingSpec { + if in == nil { + return nil + } + out := new(MonitorAutoscaleSettingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorAutoscaleSettingStatus) DeepCopyInto(out *MonitorAutoscaleSettingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorAutoscaleSettingStatus. +func (in *MonitorAutoscaleSettingStatus) DeepCopy() *MonitorAutoscaleSettingStatus { + if in == nil { + return nil + } + out := new(MonitorAutoscaleSettingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRule) DeepCopyInto(out *MonitorDataCollectionRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRule. +func (in *MonitorDataCollectionRule) DeepCopy() *MonitorDataCollectionRule { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorDataCollectionRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleIdentityInitParameters) DeepCopyInto(out *MonitorDataCollectionRuleIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleIdentityInitParameters. +func (in *MonitorDataCollectionRuleIdentityInitParameters) DeepCopy() *MonitorDataCollectionRuleIdentityInitParameters { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleIdentityObservation) DeepCopyInto(out *MonitorDataCollectionRuleIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleIdentityObservation. +func (in *MonitorDataCollectionRuleIdentityObservation) DeepCopy() *MonitorDataCollectionRuleIdentityObservation { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleIdentityParameters) DeepCopyInto(out *MonitorDataCollectionRuleIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleIdentityParameters. +func (in *MonitorDataCollectionRuleIdentityParameters) DeepCopy() *MonitorDataCollectionRuleIdentityParameters { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleInitParameters) DeepCopyInto(out *MonitorDataCollectionRuleInitParameters) { + *out = *in + if in.DataCollectionEndpointID != nil { + in, out := &in.DataCollectionEndpointID, &out.DataCollectionEndpointID + *out = new(string) + **out = **in + } + if in.DataCollectionEndpointIDRef != nil { + in, out := &in.DataCollectionEndpointIDRef, &out.DataCollectionEndpointIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataCollectionEndpointIDSelector != nil { + in, out := &in.DataCollectionEndpointIDSelector, &out.DataCollectionEndpointIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DataFlow != nil { + in, out := &in.DataFlow, &out.DataFlow + *out = make([]DataFlowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DataSources != nil { + in, out := &in.DataSources, &out.DataSources + *out = new(DataSourcesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = new(DestinationsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MonitorDataCollectionRuleIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.StreamDeclaration != nil { + in, out := &in.StreamDeclaration, &out.StreamDeclaration + *out = make([]StreamDeclarationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleInitParameters. +func (in *MonitorDataCollectionRuleInitParameters) DeepCopy() *MonitorDataCollectionRuleInitParameters { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleList) DeepCopyInto(out *MonitorDataCollectionRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorDataCollectionRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleList. +func (in *MonitorDataCollectionRuleList) DeepCopy() *MonitorDataCollectionRuleList { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorDataCollectionRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleObservation) DeepCopyInto(out *MonitorDataCollectionRuleObservation) { + *out = *in + if in.DataCollectionEndpointID != nil { + in, out := &in.DataCollectionEndpointID, &out.DataCollectionEndpointID + *out = new(string) + **out = **in + } + if in.DataFlow != nil { + in, out := &in.DataFlow, &out.DataFlow + *out = make([]DataFlowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DataSources != nil { + in, out := &in.DataSources, &out.DataSources + *out = new(DataSourcesObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = new(DestinationsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MonitorDataCollectionRuleIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.ImmutableID != nil { + in, out := &in.ImmutableID, &out.ImmutableID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.StreamDeclaration != nil { + in, out := &in.StreamDeclaration, &out.StreamDeclaration + *out = make([]StreamDeclarationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleObservation. +func (in *MonitorDataCollectionRuleObservation) DeepCopy() *MonitorDataCollectionRuleObservation { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleParameters) DeepCopyInto(out *MonitorDataCollectionRuleParameters) { + *out = *in + if in.DataCollectionEndpointID != nil { + in, out := &in.DataCollectionEndpointID, &out.DataCollectionEndpointID + *out = new(string) + **out = **in + } + if in.DataCollectionEndpointIDRef != nil { + in, out := &in.DataCollectionEndpointIDRef, &out.DataCollectionEndpointIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataCollectionEndpointIDSelector != nil { + in, out := &in.DataCollectionEndpointIDSelector, &out.DataCollectionEndpointIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DataFlow != nil { + in, out := &in.DataFlow, &out.DataFlow + *out = make([]DataFlowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DataSources != nil { + in, out := &in.DataSources, &out.DataSources + *out = new(DataSourcesParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Destinations != nil { + in, out := &in.Destinations, &out.Destinations + *out = new(DestinationsParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MonitorDataCollectionRuleIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamDeclaration != nil { + in, out := &in.StreamDeclaration, &out.StreamDeclaration + *out = make([]StreamDeclarationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleParameters. +func (in *MonitorDataCollectionRuleParameters) DeepCopy() *MonitorDataCollectionRuleParameters { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleSpec) DeepCopyInto(out *MonitorDataCollectionRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleSpec. +func (in *MonitorDataCollectionRuleSpec) DeepCopy() *MonitorDataCollectionRuleSpec { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDataCollectionRuleStatus) DeepCopyInto(out *MonitorDataCollectionRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDataCollectionRuleStatus. +func (in *MonitorDataCollectionRuleStatus) DeepCopy() *MonitorDataCollectionRuleStatus { + if in == nil { + return nil + } + out := new(MonitorDataCollectionRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDiagnosticSetting) DeepCopyInto(out *MonitorDiagnosticSetting) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDiagnosticSetting. +func (in *MonitorDiagnosticSetting) DeepCopy() *MonitorDiagnosticSetting { + if in == nil { + return nil + } + out := new(MonitorDiagnosticSetting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorDiagnosticSetting) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDiagnosticSettingInitParameters) DeepCopyInto(out *MonitorDiagnosticSettingInitParameters) { + *out = *in + if in.EnabledLog != nil { + in, out := &in.EnabledLog, &out.EnabledLog + *out = make([]EnabledLogInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventHubAuthorizationRuleID != nil { + in, out := &in.EventHubAuthorizationRuleID, &out.EventHubAuthorizationRuleID + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = make([]LogInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogAnalyticsDestinationType != nil { + in, out := &in.LogAnalyticsDestinationType, &out.LogAnalyticsDestinationType + *out = new(string) + **out = **in + } + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = make([]MetricInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartnerSolutionID != nil { + in, out := &in.PartnerSolutionID, &out.PartnerSolutionID + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDiagnosticSettingInitParameters. +func (in *MonitorDiagnosticSettingInitParameters) DeepCopy() *MonitorDiagnosticSettingInitParameters { + if in == nil { + return nil + } + out := new(MonitorDiagnosticSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDiagnosticSettingList) DeepCopyInto(out *MonitorDiagnosticSettingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorDiagnosticSetting, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDiagnosticSettingList. +func (in *MonitorDiagnosticSettingList) DeepCopy() *MonitorDiagnosticSettingList { + if in == nil { + return nil + } + out := new(MonitorDiagnosticSettingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorDiagnosticSettingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDiagnosticSettingObservation) DeepCopyInto(out *MonitorDiagnosticSettingObservation) { + *out = *in + if in.EnabledLog != nil { + in, out := &in.EnabledLog, &out.EnabledLog + *out = make([]EnabledLogObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventHubAuthorizationRuleID != nil { + in, out := &in.EventHubAuthorizationRuleID, &out.EventHubAuthorizationRuleID + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = make([]LogObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogAnalyticsDestinationType != nil { + in, out := &in.LogAnalyticsDestinationType, &out.LogAnalyticsDestinationType + *out = new(string) + **out = **in + } + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = make([]MetricObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartnerSolutionID != nil { + in, out := &in.PartnerSolutionID, &out.PartnerSolutionID + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDiagnosticSettingObservation. +func (in *MonitorDiagnosticSettingObservation) DeepCopy() *MonitorDiagnosticSettingObservation { + if in == nil { + return nil + } + out := new(MonitorDiagnosticSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDiagnosticSettingParameters) DeepCopyInto(out *MonitorDiagnosticSettingParameters) { + *out = *in + if in.EnabledLog != nil { + in, out := &in.EnabledLog, &out.EnabledLog + *out = make([]EnabledLogParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventHubAuthorizationRuleID != nil { + in, out := &in.EventHubAuthorizationRuleID, &out.EventHubAuthorizationRuleID + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = make([]LogParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LogAnalyticsDestinationType != nil { + in, out := &in.LogAnalyticsDestinationType, &out.LogAnalyticsDestinationType + *out = new(string) + **out = **in + } + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = make([]MetricParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartnerSolutionID != nil { + in, out := &in.PartnerSolutionID, &out.PartnerSolutionID + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDiagnosticSettingParameters. +func (in *MonitorDiagnosticSettingParameters) DeepCopy() *MonitorDiagnosticSettingParameters { + if in == nil { + return nil + } + out := new(MonitorDiagnosticSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDiagnosticSettingSpec) DeepCopyInto(out *MonitorDiagnosticSettingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDiagnosticSettingSpec. +func (in *MonitorDiagnosticSettingSpec) DeepCopy() *MonitorDiagnosticSettingSpec { + if in == nil { + return nil + } + out := new(MonitorDiagnosticSettingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorDiagnosticSettingStatus) DeepCopyInto(out *MonitorDiagnosticSettingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorDiagnosticSettingStatus. +func (in *MonitorDiagnosticSettingStatus) DeepCopy() *MonitorDiagnosticSettingStatus { + if in == nil { + return nil + } + out := new(MonitorDiagnosticSettingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlert) DeepCopyInto(out *MonitorMetricAlert) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlert. +func (in *MonitorMetricAlert) DeepCopy() *MonitorMetricAlert { + if in == nil { + return nil + } + out := new(MonitorMetricAlert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorMetricAlert) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertActionInitParameters) DeepCopyInto(out *MonitorMetricAlertActionInitParameters) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.ActionGroupIDRef != nil { + in, out := &in.ActionGroupIDRef, &out.ActionGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ActionGroupIDSelector != nil { + in, out := &in.ActionGroupIDSelector, &out.ActionGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebhookProperties != nil { + in, out := &in.WebhookProperties, &out.WebhookProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertActionInitParameters. +func (in *MonitorMetricAlertActionInitParameters) DeepCopy() *MonitorMetricAlertActionInitParameters { + if in == nil { + return nil + } + out := new(MonitorMetricAlertActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertActionObservation) DeepCopyInto(out *MonitorMetricAlertActionObservation) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.WebhookProperties != nil { + in, out := &in.WebhookProperties, &out.WebhookProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertActionObservation. +func (in *MonitorMetricAlertActionObservation) DeepCopy() *MonitorMetricAlertActionObservation { + if in == nil { + return nil + } + out := new(MonitorMetricAlertActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertActionParameters) DeepCopyInto(out *MonitorMetricAlertActionParameters) { + *out = *in + if in.ActionGroupID != nil { + in, out := &in.ActionGroupID, &out.ActionGroupID + *out = new(string) + **out = **in + } + if in.ActionGroupIDRef != nil { + in, out := &in.ActionGroupIDRef, &out.ActionGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ActionGroupIDSelector != nil { + in, out := &in.ActionGroupIDSelector, &out.ActionGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebhookProperties != nil { + in, out := &in.WebhookProperties, &out.WebhookProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertActionParameters. +func (in *MonitorMetricAlertActionParameters) DeepCopy() *MonitorMetricAlertActionParameters { + if in == nil { + return nil + } + out := new(MonitorMetricAlertActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertCriteriaInitParameters) DeepCopyInto(out *MonitorMetricAlertCriteriaInitParameters) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.SkipMetricValidation != nil { + in, out := &in.SkipMetricValidation, &out.SkipMetricValidation + *out = new(bool) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertCriteriaInitParameters. +func (in *MonitorMetricAlertCriteriaInitParameters) DeepCopy() *MonitorMetricAlertCriteriaInitParameters { + if in == nil { + return nil + } + out := new(MonitorMetricAlertCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertCriteriaObservation) DeepCopyInto(out *MonitorMetricAlertCriteriaObservation) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.SkipMetricValidation != nil { + in, out := &in.SkipMetricValidation, &out.SkipMetricValidation + *out = new(bool) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertCriteriaObservation. +func (in *MonitorMetricAlertCriteriaObservation) DeepCopy() *MonitorMetricAlertCriteriaObservation { + if in == nil { + return nil + } + out := new(MonitorMetricAlertCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertCriteriaParameters) DeepCopyInto(out *MonitorMetricAlertCriteriaParameters) { + *out = *in + if in.Aggregation != nil { + in, out := &in.Aggregation, &out.Aggregation + *out = new(string) + **out = **in + } + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]DimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } + if in.MetricNamespace != nil { + in, out := &in.MetricNamespace, &out.MetricNamespace + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.SkipMetricValidation != nil { + in, out := &in.SkipMetricValidation, &out.SkipMetricValidation + *out = new(bool) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertCriteriaParameters. +func (in *MonitorMetricAlertCriteriaParameters) DeepCopy() *MonitorMetricAlertCriteriaParameters { + if in == nil { + return nil + } + out := new(MonitorMetricAlertCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertInitParameters) DeepCopyInto(out *MonitorMetricAlertInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]MonitorMetricAlertActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationInsightsWebTestLocationAvailabilityCriteria != nil { + in, out := &in.ApplicationInsightsWebTestLocationAvailabilityCriteria, &out.ApplicationInsightsWebTestLocationAvailabilityCriteria + *out = new(ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoMitigate != nil { + in, out := &in.AutoMitigate, &out.AutoMitigate + *out = new(bool) + **out = **in + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = make([]MonitorMetricAlertCriteriaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamicCriteria != nil { + in, out := &in.DynamicCriteria, &out.DynamicCriteria + *out = new(DynamicCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceLocation != nil { + in, out := &in.TargetResourceLocation, &out.TargetResourceLocation + *out = new(string) + **out = **in + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(string) + **out = **in + } + if in.WindowSize != nil { + in, out := &in.WindowSize, &out.WindowSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertInitParameters. +func (in *MonitorMetricAlertInitParameters) DeepCopy() *MonitorMetricAlertInitParameters { + if in == nil { + return nil + } + out := new(MonitorMetricAlertInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertList) DeepCopyInto(out *MonitorMetricAlertList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorMetricAlert, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertList. +func (in *MonitorMetricAlertList) DeepCopy() *MonitorMetricAlertList { + if in == nil { + return nil + } + out := new(MonitorMetricAlertList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorMetricAlertList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertObservation) DeepCopyInto(out *MonitorMetricAlertObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]MonitorMetricAlertActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationInsightsWebTestLocationAvailabilityCriteria != nil { + in, out := &in.ApplicationInsightsWebTestLocationAvailabilityCriteria, &out.ApplicationInsightsWebTestLocationAvailabilityCriteria + *out = new(ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoMitigate != nil { + in, out := &in.AutoMitigate, &out.AutoMitigate + *out = new(bool) + **out = **in + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = make([]MonitorMetricAlertCriteriaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamicCriteria != nil { + in, out := &in.DynamicCriteria, &out.DynamicCriteria + *out = new(DynamicCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceLocation != nil { + in, out := &in.TargetResourceLocation, &out.TargetResourceLocation + *out = new(string) + **out = **in + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(string) + **out = **in + } + if in.WindowSize != nil { + in, out := &in.WindowSize, &out.WindowSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertObservation. +func (in *MonitorMetricAlertObservation) DeepCopy() *MonitorMetricAlertObservation { + if in == nil { + return nil + } + out := new(MonitorMetricAlertObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertParameters) DeepCopyInto(out *MonitorMetricAlertParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = make([]MonitorMetricAlertActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ApplicationInsightsWebTestLocationAvailabilityCriteria != nil { + in, out := &in.ApplicationInsightsWebTestLocationAvailabilityCriteria, &out.ApplicationInsightsWebTestLocationAvailabilityCriteria + *out = new(ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoMitigate != nil { + in, out := &in.AutoMitigate, &out.AutoMitigate + *out = new(bool) + **out = **in + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = make([]MonitorMetricAlertCriteriaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DynamicCriteria != nil { + in, out := &in.DynamicCriteria, &out.DynamicCriteria + *out = new(DynamicCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceLocation != nil { + in, out := &in.TargetResourceLocation, &out.TargetResourceLocation + *out = new(string) + **out = **in + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(string) + **out = **in + } + if in.WindowSize != nil { + in, out := &in.WindowSize, &out.WindowSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertParameters. +func (in *MonitorMetricAlertParameters) DeepCopy() *MonitorMetricAlertParameters { + if in == nil { + return nil + } + out := new(MonitorMetricAlertParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertSpec) DeepCopyInto(out *MonitorMetricAlertSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertSpec. +func (in *MonitorMetricAlertSpec) DeepCopy() *MonitorMetricAlertSpec { + if in == nil { + return nil + } + out := new(MonitorMetricAlertSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorMetricAlertStatus) DeepCopyInto(out *MonitorMetricAlertStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorMetricAlertStatus. +func (in *MonitorMetricAlertStatus) DeepCopy() *MonitorMetricAlertStatus { + if in == nil { + return nil + } + out := new(MonitorMetricAlertStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlert) DeepCopyInto(out *MonitorScheduledQueryRulesAlert) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlert. +func (in *MonitorScheduledQueryRulesAlert) DeepCopy() *MonitorScheduledQueryRulesAlert { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorScheduledQueryRulesAlert) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertActionInitParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertActionInitParameters) { + *out = *in + if in.ActionGroup != nil { + in, out := &in.ActionGroup, &out.ActionGroup + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ActionGroupRefs != nil { + in, out := &in.ActionGroupRefs, &out.ActionGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ActionGroupSelector != nil { + in, out := &in.ActionGroupSelector, &out.ActionGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomWebhookPayload != nil { + in, out := &in.CustomWebhookPayload, &out.CustomWebhookPayload + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertActionInitParameters. +func (in *MonitorScheduledQueryRulesAlertActionInitParameters) DeepCopy() *MonitorScheduledQueryRulesAlertActionInitParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertActionObservation) DeepCopyInto(out *MonitorScheduledQueryRulesAlertActionObservation) { + *out = *in + if in.ActionGroup != nil { + in, out := &in.ActionGroup, &out.ActionGroup + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomWebhookPayload != nil { + in, out := &in.CustomWebhookPayload, &out.CustomWebhookPayload + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertActionObservation. +func (in *MonitorScheduledQueryRulesAlertActionObservation) DeepCopy() *MonitorScheduledQueryRulesAlertActionObservation { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertActionParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertActionParameters) { + *out = *in + if in.ActionGroup != nil { + in, out := &in.ActionGroup, &out.ActionGroup + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ActionGroupRefs != nil { + in, out := &in.ActionGroupRefs, &out.ActionGroupRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ActionGroupSelector != nil { + in, out := &in.ActionGroupSelector, &out.ActionGroupSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomWebhookPayload != nil { + in, out := &in.CustomWebhookPayload, &out.CustomWebhookPayload + *out = new(string) + **out = **in + } + if in.EmailSubject != nil { + in, out := &in.EmailSubject, &out.EmailSubject + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertActionParameters. +func (in *MonitorScheduledQueryRulesAlertActionParameters) DeepCopy() *MonitorScheduledQueryRulesAlertActionParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertInitParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(MonitorScheduledQueryRulesAlertActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthorizedResourceIds != nil { + in, out := &in.AuthorizedResourceIds, &out.AuthorizedResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutoMitigationEnabled != nil { + in, out := &in.AutoMitigationEnabled, &out.AutoMitigationEnabled + *out = new(bool) + **out = **in + } + if in.DataSourceID != nil { + in, out := &in.DataSourceID, &out.DataSourceID + *out = new(string) + **out = **in + } + if in.DataSourceIDRef != nil { + in, out := &in.DataSourceIDRef, &out.DataSourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceIDSelector != nil { + in, out := &in.DataSourceIDSelector, &out.DataSourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.QueryType != nil { + in, out := &in.QueryType, &out.QueryType + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throttling != nil { + in, out := &in.Throttling, &out.Throttling + *out = new(float64) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(float64) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertInitParameters. +func (in *MonitorScheduledQueryRulesAlertInitParameters) DeepCopy() *MonitorScheduledQueryRulesAlertInitParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertList) DeepCopyInto(out *MonitorScheduledQueryRulesAlertList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorScheduledQueryRulesAlert, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertList. +func (in *MonitorScheduledQueryRulesAlertList) DeepCopy() *MonitorScheduledQueryRulesAlertList { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorScheduledQueryRulesAlertList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertObservation) DeepCopyInto(out *MonitorScheduledQueryRulesAlertObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(MonitorScheduledQueryRulesAlertActionObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthorizedResourceIds != nil { + in, out := &in.AuthorizedResourceIds, &out.AuthorizedResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutoMitigationEnabled != nil { + in, out := &in.AutoMitigationEnabled, &out.AutoMitigationEnabled + *out = new(bool) + **out = **in + } + if in.DataSourceID != nil { + in, out := &in.DataSourceID, &out.DataSourceID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.QueryType != nil { + in, out := &in.QueryType, &out.QueryType + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throttling != nil { + in, out := &in.Throttling, &out.Throttling + *out = new(float64) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(float64) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertObservation. +func (in *MonitorScheduledQueryRulesAlertObservation) DeepCopy() *MonitorScheduledQueryRulesAlertObservation { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(MonitorScheduledQueryRulesAlertActionParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthorizedResourceIds != nil { + in, out := &in.AuthorizedResourceIds, &out.AuthorizedResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutoMitigationEnabled != nil { + in, out := &in.AutoMitigationEnabled, &out.AutoMitigationEnabled + *out = new(bool) + **out = **in + } + if in.DataSourceID != nil { + in, out := &in.DataSourceID, &out.DataSourceID + *out = new(string) + **out = **in + } + if in.DataSourceIDRef != nil { + in, out := &in.DataSourceIDRef, &out.DataSourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceIDSelector != nil { + in, out := &in.DataSourceIDSelector, &out.DataSourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.QueryType != nil { + in, out := &in.QueryType, &out.QueryType + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Throttling != nil { + in, out := &in.Throttling, &out.Throttling + *out = new(float64) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(float64) + **out = **in + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertParameters. +func (in *MonitorScheduledQueryRulesAlertParameters) DeepCopy() *MonitorScheduledQueryRulesAlertParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertSpec) DeepCopyInto(out *MonitorScheduledQueryRulesAlertSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertSpec. +func (in *MonitorScheduledQueryRulesAlertSpec) DeepCopy() *MonitorScheduledQueryRulesAlertSpec { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertStatus) DeepCopyInto(out *MonitorScheduledQueryRulesAlertStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertStatus. +func (in *MonitorScheduledQueryRulesAlertStatus) DeepCopy() *MonitorScheduledQueryRulesAlertStatus { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2. +func (in *MonitorScheduledQueryRulesAlertV2) DeepCopy() *MonitorScheduledQueryRulesAlertV2 { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorScheduledQueryRulesAlertV2) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2ActionInitParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2ActionInitParameters) { + *out = *in + if in.ActionGroups != nil { + in, out := &in.ActionGroups, &out.ActionGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2ActionInitParameters. +func (in *MonitorScheduledQueryRulesAlertV2ActionInitParameters) DeepCopy() *MonitorScheduledQueryRulesAlertV2ActionInitParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2ActionObservation) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2ActionObservation) { + *out = *in + if in.ActionGroups != nil { + in, out := &in.ActionGroups, &out.ActionGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2ActionObservation. +func (in *MonitorScheduledQueryRulesAlertV2ActionObservation) DeepCopy() *MonitorScheduledQueryRulesAlertV2ActionObservation { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2ActionParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2ActionParameters) { + *out = *in + if in.ActionGroups != nil { + in, out := &in.ActionGroups, &out.ActionGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomProperties != nil { + in, out := &in.CustomProperties, &out.CustomProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2ActionParameters. +func (in *MonitorScheduledQueryRulesAlertV2ActionParameters) DeepCopy() *MonitorScheduledQueryRulesAlertV2ActionParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2CriteriaInitParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2CriteriaInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]CriteriaDimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailingPeriods != nil { + in, out := &in.FailingPeriods, &out.FailingPeriods + *out = new(FailingPeriodsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MetricMeasureColumn != nil { + in, out := &in.MetricMeasureColumn, &out.MetricMeasureColumn + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.ResourceIDColumn != nil { + in, out := &in.ResourceIDColumn, &out.ResourceIDColumn + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.TimeAggregationMethod != nil { + in, out := &in.TimeAggregationMethod, &out.TimeAggregationMethod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2CriteriaInitParameters. +func (in *MonitorScheduledQueryRulesAlertV2CriteriaInitParameters) DeepCopy() *MonitorScheduledQueryRulesAlertV2CriteriaInitParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2CriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2CriteriaObservation) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2CriteriaObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]CriteriaDimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailingPeriods != nil { + in, out := &in.FailingPeriods, &out.FailingPeriods + *out = new(FailingPeriodsObservation) + (*in).DeepCopyInto(*out) + } + if in.MetricMeasureColumn != nil { + in, out := &in.MetricMeasureColumn, &out.MetricMeasureColumn + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.ResourceIDColumn != nil { + in, out := &in.ResourceIDColumn, &out.ResourceIDColumn + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.TimeAggregationMethod != nil { + in, out := &in.TimeAggregationMethod, &out.TimeAggregationMethod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2CriteriaObservation. +func (in *MonitorScheduledQueryRulesAlertV2CriteriaObservation) DeepCopy() *MonitorScheduledQueryRulesAlertV2CriteriaObservation { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2CriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2CriteriaParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2CriteriaParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]CriteriaDimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailingPeriods != nil { + in, out := &in.FailingPeriods, &out.FailingPeriods + *out = new(FailingPeriodsParameters) + (*in).DeepCopyInto(*out) + } + if in.MetricMeasureColumn != nil { + in, out := &in.MetricMeasureColumn, &out.MetricMeasureColumn + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(string) + **out = **in + } + if in.ResourceIDColumn != nil { + in, out := &in.ResourceIDColumn, &out.ResourceIDColumn + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } + if in.TimeAggregationMethod != nil { + in, out := &in.TimeAggregationMethod, &out.TimeAggregationMethod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2CriteriaParameters. +func (in *MonitorScheduledQueryRulesAlertV2CriteriaParameters) DeepCopy() *MonitorScheduledQueryRulesAlertV2CriteriaParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2CriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2InitParameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2InitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(MonitorScheduledQueryRulesAlertV2ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoMitigationEnabled != nil { + in, out := &in.AutoMitigationEnabled, &out.AutoMitigationEnabled + *out = new(bool) + **out = **in + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = make([]MonitorScheduledQueryRulesAlertV2CriteriaInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EvaluationFrequency != nil { + in, out := &in.EvaluationFrequency, &out.EvaluationFrequency + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MuteActionsAfterAlertDuration != nil { + in, out := &in.MuteActionsAfterAlertDuration, &out.MuteActionsAfterAlertDuration + *out = new(string) + **out = **in + } + if in.QueryTimeRangeOverride != nil { + in, out := &in.QueryTimeRangeOverride, &out.QueryTimeRangeOverride + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.SkipQueryValidation != nil { + in, out := &in.SkipQueryValidation, &out.SkipQueryValidation + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceTypes != nil { + in, out := &in.TargetResourceTypes, &out.TargetResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WindowDuration != nil { + in, out := &in.WindowDuration, &out.WindowDuration + *out = new(string) + **out = **in + } + if in.WorkspaceAlertsStorageEnabled != nil { + in, out := &in.WorkspaceAlertsStorageEnabled, &out.WorkspaceAlertsStorageEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2InitParameters. +func (in *MonitorScheduledQueryRulesAlertV2InitParameters) DeepCopy() *MonitorScheduledQueryRulesAlertV2InitParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2List) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2List) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorScheduledQueryRulesAlertV2, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2List. +func (in *MonitorScheduledQueryRulesAlertV2List) DeepCopy() *MonitorScheduledQueryRulesAlertV2List { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorScheduledQueryRulesAlertV2List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2Observation) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2Observation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(MonitorScheduledQueryRulesAlertV2ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoMitigationEnabled != nil { + in, out := &in.AutoMitigationEnabled, &out.AutoMitigationEnabled + *out = new(bool) + **out = **in + } + if in.CreatedWithAPIVersion != nil { + in, out := &in.CreatedWithAPIVersion, &out.CreatedWithAPIVersion + *out = new(string) + **out = **in + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = make([]MonitorScheduledQueryRulesAlertV2CriteriaObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EvaluationFrequency != nil { + in, out := &in.EvaluationFrequency, &out.EvaluationFrequency + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IsALegacyLogAnalyticsRule != nil { + in, out := &in.IsALegacyLogAnalyticsRule, &out.IsALegacyLogAnalyticsRule + *out = new(bool) + **out = **in + } + if in.IsWorkspaceAlertsStorageConfigured != nil { + in, out := &in.IsWorkspaceAlertsStorageConfigured, &out.IsWorkspaceAlertsStorageConfigured + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MuteActionsAfterAlertDuration != nil { + in, out := &in.MuteActionsAfterAlertDuration, &out.MuteActionsAfterAlertDuration + *out = new(string) + **out = **in + } + if in.QueryTimeRangeOverride != nil { + in, out := &in.QueryTimeRangeOverride, &out.QueryTimeRangeOverride + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.SkipQueryValidation != nil { + in, out := &in.SkipQueryValidation, &out.SkipQueryValidation + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceTypes != nil { + in, out := &in.TargetResourceTypes, &out.TargetResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WindowDuration != nil { + in, out := &in.WindowDuration, &out.WindowDuration + *out = new(string) + **out = **in + } + if in.WorkspaceAlertsStorageEnabled != nil { + in, out := &in.WorkspaceAlertsStorageEnabled, &out.WorkspaceAlertsStorageEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2Observation. +func (in *MonitorScheduledQueryRulesAlertV2Observation) DeepCopy() *MonitorScheduledQueryRulesAlertV2Observation { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2Parameters) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2Parameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(MonitorScheduledQueryRulesAlertV2ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoMitigationEnabled != nil { + in, out := &in.AutoMitigationEnabled, &out.AutoMitigationEnabled + *out = new(bool) + **out = **in + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = make([]MonitorScheduledQueryRulesAlertV2CriteriaParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EvaluationFrequency != nil { + in, out := &in.EvaluationFrequency, &out.EvaluationFrequency + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MuteActionsAfterAlertDuration != nil { + in, out := &in.MuteActionsAfterAlertDuration, &out.MuteActionsAfterAlertDuration + *out = new(string) + **out = **in + } + if in.QueryTimeRangeOverride != nil { + in, out := &in.QueryTimeRangeOverride, &out.QueryTimeRangeOverride + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScopesRefs != nil { + in, out := &in.ScopesRefs, &out.ScopesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScopesSelector != nil { + in, out := &in.ScopesSelector, &out.ScopesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Severity != nil { + in, out := &in.Severity, &out.Severity + *out = new(float64) + **out = **in + } + if in.SkipQueryValidation != nil { + in, out := &in.SkipQueryValidation, &out.SkipQueryValidation + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TargetResourceTypes != nil { + in, out := &in.TargetResourceTypes, &out.TargetResourceTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WindowDuration != nil { + in, out := &in.WindowDuration, &out.WindowDuration + *out = new(string) + **out = **in + } + if in.WorkspaceAlertsStorageEnabled != nil { + in, out := &in.WorkspaceAlertsStorageEnabled, &out.WorkspaceAlertsStorageEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2Parameters. +func (in *MonitorScheduledQueryRulesAlertV2Parameters) DeepCopy() *MonitorScheduledQueryRulesAlertV2Parameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2Spec) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2Spec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2Spec. +func (in *MonitorScheduledQueryRulesAlertV2Spec) DeepCopy() *MonitorScheduledQueryRulesAlertV2Spec { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2Spec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesAlertV2Status) DeepCopyInto(out *MonitorScheduledQueryRulesAlertV2Status) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesAlertV2Status. +func (in *MonitorScheduledQueryRulesAlertV2Status) DeepCopy() *MonitorScheduledQueryRulesAlertV2Status { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesAlertV2Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLog) DeepCopyInto(out *MonitorScheduledQueryRulesLog) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLog. +func (in *MonitorScheduledQueryRulesLog) DeepCopy() *MonitorScheduledQueryRulesLog { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorScheduledQueryRulesLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters) DeepCopyInto(out *MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters. +func (in *MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters) DeepCopy() *MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogCriteriaDimensionObservation) DeepCopyInto(out *MonitorScheduledQueryRulesLogCriteriaDimensionObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogCriteriaDimensionObservation. +func (in *MonitorScheduledQueryRulesLogCriteriaDimensionObservation) DeepCopy() *MonitorScheduledQueryRulesLogCriteriaDimensionObservation { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogCriteriaDimensionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogCriteriaDimensionParameters) DeepCopyInto(out *MonitorScheduledQueryRulesLogCriteriaDimensionParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogCriteriaDimensionParameters. +func (in *MonitorScheduledQueryRulesLogCriteriaDimensionParameters) DeepCopy() *MonitorScheduledQueryRulesLogCriteriaDimensionParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogCriteriaDimensionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogCriteriaInitParameters) DeepCopyInto(out *MonitorScheduledQueryRulesLogCriteriaInitParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogCriteriaInitParameters. +func (in *MonitorScheduledQueryRulesLogCriteriaInitParameters) DeepCopy() *MonitorScheduledQueryRulesLogCriteriaInitParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogCriteriaObservation) DeepCopyInto(out *MonitorScheduledQueryRulesLogCriteriaObservation) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]MonitorScheduledQueryRulesLogCriteriaDimensionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogCriteriaObservation. +func (in *MonitorScheduledQueryRulesLogCriteriaObservation) DeepCopy() *MonitorScheduledQueryRulesLogCriteriaObservation { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogCriteriaParameters) DeepCopyInto(out *MonitorScheduledQueryRulesLogCriteriaParameters) { + *out = *in + if in.Dimension != nil { + in, out := &in.Dimension, &out.Dimension + *out = make([]MonitorScheduledQueryRulesLogCriteriaDimensionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetricName != nil { + in, out := &in.MetricName, &out.MetricName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogCriteriaParameters. +func (in *MonitorScheduledQueryRulesLogCriteriaParameters) DeepCopy() *MonitorScheduledQueryRulesLogCriteriaParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogInitParameters) DeepCopyInto(out *MonitorScheduledQueryRulesLogInitParameters) { + *out = *in + if in.AuthorizedResourceIds != nil { + in, out := &in.AuthorizedResourceIds, &out.AuthorizedResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(MonitorScheduledQueryRulesLogCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DataSourceID != nil { + in, out := &in.DataSourceID, &out.DataSourceID + *out = new(string) + **out = **in + } + if in.DataSourceIDRef != nil { + in, out := &in.DataSourceIDRef, &out.DataSourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceIDSelector != nil { + in, out := &in.DataSourceIDSelector, &out.DataSourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogInitParameters. +func (in *MonitorScheduledQueryRulesLogInitParameters) DeepCopy() *MonitorScheduledQueryRulesLogInitParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogList) DeepCopyInto(out *MonitorScheduledQueryRulesLogList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MonitorScheduledQueryRulesLog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogList. +func (in *MonitorScheduledQueryRulesLogList) DeepCopy() *MonitorScheduledQueryRulesLogList { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorScheduledQueryRulesLogList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogObservation) DeepCopyInto(out *MonitorScheduledQueryRulesLogObservation) { + *out = *in + if in.AuthorizedResourceIds != nil { + in, out := &in.AuthorizedResourceIds, &out.AuthorizedResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(MonitorScheduledQueryRulesLogCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.DataSourceID != nil { + in, out := &in.DataSourceID, &out.DataSourceID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogObservation. +func (in *MonitorScheduledQueryRulesLogObservation) DeepCopy() *MonitorScheduledQueryRulesLogObservation { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogParameters) DeepCopyInto(out *MonitorScheduledQueryRulesLogParameters) { + *out = *in + if in.AuthorizedResourceIds != nil { + in, out := &in.AuthorizedResourceIds, &out.AuthorizedResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Criteria != nil { + in, out := &in.Criteria, &out.Criteria + *out = new(MonitorScheduledQueryRulesLogCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.DataSourceID != nil { + in, out := &in.DataSourceID, &out.DataSourceID + *out = new(string) + **out = **in + } + if in.DataSourceIDRef != nil { + in, out := &in.DataSourceIDRef, &out.DataSourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceIDSelector != nil { + in, out := &in.DataSourceIDSelector, &out.DataSourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogParameters. +func (in *MonitorScheduledQueryRulesLogParameters) DeepCopy() *MonitorScheduledQueryRulesLogParameters { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogSpec) DeepCopyInto(out *MonitorScheduledQueryRulesLogSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogSpec. +func (in *MonitorScheduledQueryRulesLogSpec) DeepCopy() *MonitorScheduledQueryRulesLogSpec { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorScheduledQueryRulesLogStatus) DeepCopyInto(out *MonitorScheduledQueryRulesLogStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorScheduledQueryRulesLogStatus. +func (in *MonitorScheduledQueryRulesLogStatus) DeepCopy() *MonitorScheduledQueryRulesLogStatus { + if in == nil { + return nil + } + out := new(MonitorScheduledQueryRulesLogStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationInitParameters) DeepCopyInto(out *NotificationInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(EmailInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = make([]WebhookInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationInitParameters. +func (in *NotificationInitParameters) DeepCopy() *NotificationInitParameters { + if in == nil { + return nil + } + out := new(NotificationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationObservation) DeepCopyInto(out *NotificationObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(EmailObservation) + (*in).DeepCopyInto(*out) + } + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = make([]WebhookObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationObservation. +func (in *NotificationObservation) DeepCopy() *NotificationObservation { + if in == nil { + return nil + } + out := new(NotificationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationParameters) DeepCopyInto(out *NotificationParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(EmailParameters) + (*in).DeepCopyInto(*out) + } + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = make([]WebhookParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationParameters. +func (in *NotificationParameters) DeepCopy() *NotificationParameters { + if in == nil { + return nil + } + out := new(NotificationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceCounterInitParameters) DeepCopyInto(out *PerformanceCounterInitParameters) { + *out = *in + if in.CounterSpecifiers != nil { + in, out := &in.CounterSpecifiers, &out.CounterSpecifiers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SamplingFrequencyInSeconds != nil { + in, out := &in.SamplingFrequencyInSeconds, &out.SamplingFrequencyInSeconds + *out = new(float64) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceCounterInitParameters. +func (in *PerformanceCounterInitParameters) DeepCopy() *PerformanceCounterInitParameters { + if in == nil { + return nil + } + out := new(PerformanceCounterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceCounterObservation) DeepCopyInto(out *PerformanceCounterObservation) { + *out = *in + if in.CounterSpecifiers != nil { + in, out := &in.CounterSpecifiers, &out.CounterSpecifiers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SamplingFrequencyInSeconds != nil { + in, out := &in.SamplingFrequencyInSeconds, &out.SamplingFrequencyInSeconds + *out = new(float64) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceCounterObservation. +func (in *PerformanceCounterObservation) DeepCopy() *PerformanceCounterObservation { + if in == nil { + return nil + } + out := new(PerformanceCounterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerformanceCounterParameters) DeepCopyInto(out *PerformanceCounterParameters) { + *out = *in + if in.CounterSpecifiers != nil { + in, out := &in.CounterSpecifiers, &out.CounterSpecifiers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SamplingFrequencyInSeconds != nil { + in, out := &in.SamplingFrequencyInSeconds, &out.SamplingFrequencyInSeconds + *out = new(float64) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceCounterParameters. +func (in *PerformanceCounterParameters) DeepCopy() *PerformanceCounterParameters { + if in == nil { + return nil + } + out := new(PerformanceCounterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformTelemetryInitParameters) DeepCopyInto(out *PlatformTelemetryInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformTelemetryInitParameters. +func (in *PlatformTelemetryInitParameters) DeepCopy() *PlatformTelemetryInitParameters { + if in == nil { + return nil + } + out := new(PlatformTelemetryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformTelemetryObservation) DeepCopyInto(out *PlatformTelemetryObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformTelemetryObservation. +func (in *PlatformTelemetryObservation) DeepCopy() *PlatformTelemetryObservation { + if in == nil { + return nil + } + out := new(PlatformTelemetryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformTelemetryParameters) DeepCopyInto(out *PlatformTelemetryParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformTelemetryParameters. +func (in *PlatformTelemetryParameters) DeepCopy() *PlatformTelemetryParameters { + if in == nil { + return nil + } + out := new(PlatformTelemetryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredictiveInitParameters) DeepCopyInto(out *PredictiveInitParameters) { + *out = *in + if in.LookAheadTime != nil { + in, out := &in.LookAheadTime, &out.LookAheadTime + *out = new(string) + **out = **in + } + if in.ScaleMode != nil { + in, out := &in.ScaleMode, &out.ScaleMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredictiveInitParameters. +func (in *PredictiveInitParameters) DeepCopy() *PredictiveInitParameters { + if in == nil { + return nil + } + out := new(PredictiveInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredictiveObservation) DeepCopyInto(out *PredictiveObservation) { + *out = *in + if in.LookAheadTime != nil { + in, out := &in.LookAheadTime, &out.LookAheadTime + *out = new(string) + **out = **in + } + if in.ScaleMode != nil { + in, out := &in.ScaleMode, &out.ScaleMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredictiveObservation. +func (in *PredictiveObservation) DeepCopy() *PredictiveObservation { + if in == nil { + return nil + } + out := new(PredictiveObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredictiveParameters) DeepCopyInto(out *PredictiveParameters) { + *out = *in + if in.LookAheadTime != nil { + in, out := &in.LookAheadTime, &out.LookAheadTime + *out = new(string) + **out = **in + } + if in.ScaleMode != nil { + in, out := &in.ScaleMode, &out.ScaleMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredictiveParameters. +func (in *PredictiveParameters) DeepCopy() *PredictiveParameters { + if in == nil { + return nil + } + out := new(PredictiveParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileInitParameters) DeepCopyInto(out *ProfileInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedDate != nil { + in, out := &in.FixedDate, &out.FixedDate + *out = new(FixedDateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileInitParameters. +func (in *ProfileInitParameters) DeepCopy() *ProfileInitParameters { + if in == nil { + return nil + } + out := new(ProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileObservation) DeepCopyInto(out *ProfileObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityObservation) + (*in).DeepCopyInto(*out) + } + if in.FixedDate != nil { + in, out := &in.FixedDate, &out.FixedDate + *out = new(FixedDateObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceObservation) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileObservation. +func (in *ProfileObservation) DeepCopy() *ProfileObservation { + if in == nil { + return nil + } + out := new(ProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileParameters) DeepCopyInto(out *ProfileParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(CapacityParameters) + (*in).DeepCopyInto(*out) + } + if in.FixedDate != nil { + in, out := &in.FixedDate, &out.FixedDate + *out = new(FixedDateParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceParameters) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileParameters. +func (in *ProfileParameters) DeepCopy() *ProfileParameters { + if in == nil { + return nil + } + out := new(ProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusForwarderInitParameters) DeepCopyInto(out *PrometheusForwarderInitParameters) { + *out = *in + if in.LabelIncludeFilter != nil { + in, out := &in.LabelIncludeFilter, &out.LabelIncludeFilter + *out = make([]LabelIncludeFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusForwarderInitParameters. +func (in *PrometheusForwarderInitParameters) DeepCopy() *PrometheusForwarderInitParameters { + if in == nil { + return nil + } + out := new(PrometheusForwarderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusForwarderObservation) DeepCopyInto(out *PrometheusForwarderObservation) { + *out = *in + if in.LabelIncludeFilter != nil { + in, out := &in.LabelIncludeFilter, &out.LabelIncludeFilter + *out = make([]LabelIncludeFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusForwarderObservation. +func (in *PrometheusForwarderObservation) DeepCopy() *PrometheusForwarderObservation { + if in == nil { + return nil + } + out := new(PrometheusForwarderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusForwarderParameters) DeepCopyInto(out *PrometheusForwarderParameters) { + *out = *in + if in.LabelIncludeFilter != nil { + in, out := &in.LabelIncludeFilter, &out.LabelIncludeFilter + *out = make([]LabelIncludeFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusForwarderParameters. +func (in *PrometheusForwarderParameters) DeepCopy() *PrometheusForwarderParameters { + if in == nil { + return nil + } + out := new(PrometheusForwarderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceInitParameters) DeepCopyInto(out *RecurrenceInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceInitParameters. +func (in *RecurrenceInitParameters) DeepCopy() *RecurrenceInitParameters { + if in == nil { + return nil + } + out := new(RecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceObservation) DeepCopyInto(out *RecurrenceObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceObservation. +func (in *RecurrenceObservation) DeepCopy() *RecurrenceObservation { + if in == nil { + return nil + } + out := new(RecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceParameters) DeepCopyInto(out *RecurrenceParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceParameters. +func (in *RecurrenceParameters) DeepCopy() *RecurrenceParameters { + if in == nil { + return nil + } + out := new(RecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestInitParameters) DeepCopyInto(out *RequestInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.FollowRedirectsEnabled != nil { + in, out := &in.FollowRedirectsEnabled, &out.FollowRedirectsEnabled + *out = new(bool) + **out = **in + } + if in.HTTPVerb != nil { + in, out := &in.HTTPVerb, &out.HTTPVerb + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ParseDependentRequestsEnabled != nil { + in, out := &in.ParseDependentRequestsEnabled, &out.ParseDependentRequestsEnabled + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestInitParameters. +func (in *RequestInitParameters) DeepCopy() *RequestInitParameters { + if in == nil { + return nil + } + out := new(RequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestObservation) DeepCopyInto(out *RequestObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.FollowRedirectsEnabled != nil { + in, out := &in.FollowRedirectsEnabled, &out.FollowRedirectsEnabled + *out = new(bool) + **out = **in + } + if in.HTTPVerb != nil { + in, out := &in.HTTPVerb, &out.HTTPVerb + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ParseDependentRequestsEnabled != nil { + in, out := &in.ParseDependentRequestsEnabled, &out.ParseDependentRequestsEnabled + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestObservation. +func (in *RequestObservation) DeepCopy() *RequestObservation { + if in == nil { + return nil + } + out := new(RequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestParameters) DeepCopyInto(out *RequestParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.FollowRedirectsEnabled != nil { + in, out := &in.FollowRedirectsEnabled, &out.FollowRedirectsEnabled + *out = new(bool) + **out = **in + } + if in.HTTPVerb != nil { + in, out := &in.HTTPVerb, &out.HTTPVerb + *out = new(string) + **out = **in + } + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make([]HeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ParseDependentRequestsEnabled != nil { + in, out := &in.ParseDependentRequestsEnabled, &out.ParseDependentRequestsEnabled + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestParameters. +func (in *RequestParameters) DeepCopy() *RequestParameters { + if in == nil { + return nil + } + out := new(RequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHealthInitParameters) DeepCopyInto(out *ResourceHealthInitParameters) { + *out = *in + if in.Current != nil { + in, out := &in.Current, &out.Current + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Previous != nil { + in, out := &in.Previous, &out.Previous + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealthInitParameters. +func (in *ResourceHealthInitParameters) DeepCopy() *ResourceHealthInitParameters { + if in == nil { + return nil + } + out := new(ResourceHealthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHealthObservation) DeepCopyInto(out *ResourceHealthObservation) { + *out = *in + if in.Current != nil { + in, out := &in.Current, &out.Current + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Previous != nil { + in, out := &in.Previous, &out.Previous + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealthObservation. +func (in *ResourceHealthObservation) DeepCopy() *ResourceHealthObservation { + if in == nil { + return nil + } + out := new(ResourceHealthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHealthParameters) DeepCopyInto(out *ResourceHealthParameters) { + *out = *in + if in.Current != nil { + in, out := &in.Current, &out.Current + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Previous != nil { + in, out := &in.Previous, &out.Previous + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealthParameters. +func (in *ResourceHealthParameters) DeepCopy() *ResourceHealthParameters { + if in == nil { + return nil + } + out := new(ResourceHealthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyInitParameters) DeepCopyInto(out *RetentionPolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyInitParameters. +func (in *RetentionPolicyInitParameters) DeepCopy() *RetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyObservation) DeepCopyInto(out *RetentionPolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyObservation. +func (in *RetentionPolicyObservation) DeepCopy() *RetentionPolicyObservation { + if in == nil { + return nil + } + out := new(RetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyParameters) DeepCopyInto(out *RetentionPolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyParameters. +func (in *RetentionPolicyParameters) DeepCopy() *RetentionPolicyParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in + if in.MetricTrigger != nil { + in, out := &in.MetricTrigger, &out.MetricTrigger + *out = new(MetricTriggerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleAction != nil { + in, out := &in.ScaleAction, &out.ScaleAction + *out = new(ScaleActionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.MetricTrigger != nil { + in, out := &in.MetricTrigger, &out.MetricTrigger + *out = new(MetricTriggerObservation) + (*in).DeepCopyInto(*out) + } + if in.ScaleAction != nil { + in, out := &in.ScaleAction, &out.ScaleAction + *out = new(ScaleActionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in + if in.MetricTrigger != nil { + in, out := &in.MetricTrigger, &out.MetricTrigger + *out = new(MetricTriggerParameters) + (*in).DeepCopyInto(*out) + } + if in.ScaleAction != nil { + in, out := &in.ScaleAction, &out.ScaleAction + *out = new(ScaleActionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMSReceiverInitParameters) DeepCopyInto(out *SMSReceiverInitParameters) { + *out = *in + if in.CountryCode != nil { + in, out := &in.CountryCode, &out.CountryCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSReceiverInitParameters. +func (in *SMSReceiverInitParameters) DeepCopy() *SMSReceiverInitParameters { + if in == nil { + return nil + } + out := new(SMSReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMSReceiverObservation) DeepCopyInto(out *SMSReceiverObservation) { + *out = *in + if in.CountryCode != nil { + in, out := &in.CountryCode, &out.CountryCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSReceiverObservation. +func (in *SMSReceiverObservation) DeepCopy() *SMSReceiverObservation { + if in == nil { + return nil + } + out := new(SMSReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMSReceiverParameters) DeepCopyInto(out *SMSReceiverParameters) { + *out = *in + if in.CountryCode != nil { + in, out := &in.CountryCode, &out.CountryCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMSReceiverParameters. +func (in *SMSReceiverParameters) DeepCopy() *SMSReceiverParameters { + if in == nil { + return nil + } + out := new(SMSReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleActionInitParameters) DeepCopyInto(out *ScaleActionInitParameters) { + *out = *in + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleActionInitParameters. +func (in *ScaleActionInitParameters) DeepCopy() *ScaleActionInitParameters { + if in == nil { + return nil + } + out := new(ScaleActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleActionObservation) DeepCopyInto(out *ScaleActionObservation) { + *out = *in + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleActionObservation. +func (in *ScaleActionObservation) DeepCopy() *ScaleActionObservation { + if in == nil { + return nil + } + out := new(ScaleActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleActionParameters) DeepCopyInto(out *ScaleActionParameters) { + *out = *in + if in.Cooldown != nil { + in, out := &in.Cooldown, &out.Cooldown + *out = new(string) + **out = **in + } + if in.Direction != nil { + in, out := &in.Direction, &out.Direction + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleActionParameters. +func (in *ScaleActionParameters) DeepCopy() *ScaleActionParameters { + if in == nil { + return nil + } + out := new(ScaleActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceHealthInitParameters) DeepCopyInto(out *ServiceHealthInitParameters) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceHealthInitParameters. +func (in *ServiceHealthInitParameters) DeepCopy() *ServiceHealthInitParameters { + if in == nil { + return nil + } + out := new(ServiceHealthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceHealthObservation) DeepCopyInto(out *ServiceHealthObservation) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceHealthObservation. +func (in *ServiceHealthObservation) DeepCopy() *ServiceHealthObservation { + if in == nil { + return nil + } + out := new(ServiceHealthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceHealthParameters) DeepCopyInto(out *ServiceHealthParameters) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceHealthParameters. +func (in *ServiceHealthParameters) DeepCopy() *ServiceHealthParameters { + if in == nil { + return nil + } + out := new(ServiceHealthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(TextInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(TextObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = new(TextParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobDirectInitParameters) DeepCopyInto(out *StorageBlobDirectInitParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobDirectInitParameters. +func (in *StorageBlobDirectInitParameters) DeepCopy() *StorageBlobDirectInitParameters { + if in == nil { + return nil + } + out := new(StorageBlobDirectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobDirectObservation) DeepCopyInto(out *StorageBlobDirectObservation) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobDirectObservation. +func (in *StorageBlobDirectObservation) DeepCopy() *StorageBlobDirectObservation { + if in == nil { + return nil + } + out := new(StorageBlobDirectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobDirectParameters) DeepCopyInto(out *StorageBlobDirectParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobDirectParameters. +func (in *StorageBlobDirectParameters) DeepCopy() *StorageBlobDirectParameters { + if in == nil { + return nil + } + out := new(StorageBlobDirectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobInitParameters) DeepCopyInto(out *StorageBlobInitParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerNameRef != nil { + in, out := &in.ContainerNameRef, &out.ContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerNameSelector != nil { + in, out := &in.ContainerNameSelector, &out.ContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobInitParameters. +func (in *StorageBlobInitParameters) DeepCopy() *StorageBlobInitParameters { + if in == nil { + return nil + } + out := new(StorageBlobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobObservation) DeepCopyInto(out *StorageBlobObservation) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobObservation. +func (in *StorageBlobObservation) DeepCopy() *StorageBlobObservation { + if in == nil { + return nil + } + out := new(StorageBlobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageBlobParameters) DeepCopyInto(out *StorageBlobParameters) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.ContainerNameRef != nil { + in, out := &in.ContainerNameRef, &out.ContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ContainerNameSelector != nil { + in, out := &in.ContainerNameSelector, &out.ContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageBlobParameters. +func (in *StorageBlobParameters) DeepCopy() *StorageBlobParameters { + if in == nil { + return nil + } + out := new(StorageBlobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageTableDirectInitParameters) DeepCopyInto(out *StorageTableDirectInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageTableDirectInitParameters. +func (in *StorageTableDirectInitParameters) DeepCopy() *StorageTableDirectInitParameters { + if in == nil { + return nil + } + out := new(StorageTableDirectInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageTableDirectObservation) DeepCopyInto(out *StorageTableDirectObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageTableDirectObservation. +func (in *StorageTableDirectObservation) DeepCopy() *StorageTableDirectObservation { + if in == nil { + return nil + } + out := new(StorageTableDirectObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageTableDirectParameters) DeepCopyInto(out *StorageTableDirectParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.TableName != nil { + in, out := &in.TableName, &out.TableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageTableDirectParameters. +func (in *StorageTableDirectParameters) DeepCopy() *StorageTableDirectParameters { + if in == nil { + return nil + } + out := new(StorageTableDirectParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamDeclarationInitParameters) DeepCopyInto(out *StreamDeclarationInitParameters) { + *out = *in + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamDeclarationInitParameters. +func (in *StreamDeclarationInitParameters) DeepCopy() *StreamDeclarationInitParameters { + if in == nil { + return nil + } + out := new(StreamDeclarationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamDeclarationObservation) DeepCopyInto(out *StreamDeclarationObservation) { + *out = *in + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamDeclarationObservation. +func (in *StreamDeclarationObservation) DeepCopy() *StreamDeclarationObservation { + if in == nil { + return nil + } + out := new(StreamDeclarationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamDeclarationParameters) DeepCopyInto(out *StreamDeclarationParameters) { + *out = *in + if in.Column != nil { + in, out := &in.Column, &out.Column + *out = make([]ColumnParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StreamName != nil { + in, out := &in.StreamName, &out.StreamName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamDeclarationParameters. +func (in *StreamDeclarationParameters) DeepCopy() *StreamDeclarationParameters { + if in == nil { + return nil + } + out := new(StreamDeclarationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyslogInitParameters) DeepCopyInto(out *SyslogInitParameters) { + *out = *in + if in.FacilityNames != nil { + in, out := &in.FacilityNames, &out.FacilityNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogLevels != nil { + in, out := &in.LogLevels, &out.LogLevels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogInitParameters. +func (in *SyslogInitParameters) DeepCopy() *SyslogInitParameters { + if in == nil { + return nil + } + out := new(SyslogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyslogObservation) DeepCopyInto(out *SyslogObservation) { + *out = *in + if in.FacilityNames != nil { + in, out := &in.FacilityNames, &out.FacilityNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogLevels != nil { + in, out := &in.LogLevels, &out.LogLevels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogObservation. +func (in *SyslogObservation) DeepCopy() *SyslogObservation { + if in == nil { + return nil + } + out := new(SyslogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyslogParameters) DeepCopyInto(out *SyslogParameters) { + *out = *in + if in.FacilityNames != nil { + in, out := &in.FacilityNames, &out.FacilityNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LogLevels != nil { + in, out := &in.LogLevels, &out.LogLevels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogParameters. +func (in *SyslogParameters) DeepCopy() *SyslogParameters { + if in == nil { + return nil + } + out := new(SyslogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextInitParameters) DeepCopyInto(out *TextInitParameters) { + *out = *in + if in.RecordStartTimestampFormat != nil { + in, out := &in.RecordStartTimestampFormat, &out.RecordStartTimestampFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextInitParameters. +func (in *TextInitParameters) DeepCopy() *TextInitParameters { + if in == nil { + return nil + } + out := new(TextInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextObservation) DeepCopyInto(out *TextObservation) { + *out = *in + if in.RecordStartTimestampFormat != nil { + in, out := &in.RecordStartTimestampFormat, &out.RecordStartTimestampFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextObservation. +func (in *TextObservation) DeepCopy() *TextObservation { + if in == nil { + return nil + } + out := new(TextObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TextParameters) DeepCopyInto(out *TextParameters) { + *out = *in + if in.RecordStartTimestampFormat != nil { + in, out := &in.RecordStartTimestampFormat, &out.RecordStartTimestampFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextParameters. +func (in *TextParameters) DeepCopy() *TextParameters { + if in == nil { + return nil + } + out := new(TextParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerInitParameters) DeepCopyInto(out *TriggerInitParameters) { + *out = *in + if in.MetricTrigger != nil { + in, out := &in.MetricTrigger, &out.MetricTrigger + *out = new(TriggerMetricTriggerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerInitParameters. +func (in *TriggerInitParameters) DeepCopy() *TriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerMetricTriggerInitParameters) DeepCopyInto(out *TriggerMetricTriggerInitParameters) { + *out = *in + if in.MetricColumn != nil { + in, out := &in.MetricColumn, &out.MetricColumn + *out = new(string) + **out = **in + } + if in.MetricTriggerType != nil { + in, out := &in.MetricTriggerType, &out.MetricTriggerType + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerMetricTriggerInitParameters. +func (in *TriggerMetricTriggerInitParameters) DeepCopy() *TriggerMetricTriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerMetricTriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerMetricTriggerObservation) DeepCopyInto(out *TriggerMetricTriggerObservation) { + *out = *in + if in.MetricColumn != nil { + in, out := &in.MetricColumn, &out.MetricColumn + *out = new(string) + **out = **in + } + if in.MetricTriggerType != nil { + in, out := &in.MetricTriggerType, &out.MetricTriggerType + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerMetricTriggerObservation. +func (in *TriggerMetricTriggerObservation) DeepCopy() *TriggerMetricTriggerObservation { + if in == nil { + return nil + } + out := new(TriggerMetricTriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerMetricTriggerParameters) DeepCopyInto(out *TriggerMetricTriggerParameters) { + *out = *in + if in.MetricColumn != nil { + in, out := &in.MetricColumn, &out.MetricColumn + *out = new(string) + **out = **in + } + if in.MetricTriggerType != nil { + in, out := &in.MetricTriggerType, &out.MetricTriggerType + *out = new(string) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerMetricTriggerParameters. +func (in *TriggerMetricTriggerParameters) DeepCopy() *TriggerMetricTriggerParameters { + if in == nil { + return nil + } + out := new(TriggerMetricTriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { + *out = *in + if in.MetricTrigger != nil { + in, out := &in.MetricTrigger, &out.MetricTrigger + *out = new(TriggerMetricTriggerObservation) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerObservation. +func (in *TriggerObservation) DeepCopy() *TriggerObservation { + if in == nil { + return nil + } + out := new(TriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerParameters) DeepCopyInto(out *TriggerParameters) { + *out = *in + if in.MetricTrigger != nil { + in, out := &in.MetricTrigger, &out.MetricTrigger + *out = new(TriggerMetricTriggerParameters) + (*in).DeepCopyInto(*out) + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerParameters. +func (in *TriggerParameters) DeepCopy() *TriggerParameters { + if in == nil { + return nil + } + out := new(TriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationRulesInitParameters) DeepCopyInto(out *ValidationRulesInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(ContentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpectedStatusCode != nil { + in, out := &in.ExpectedStatusCode, &out.ExpectedStatusCode + *out = new(float64) + **out = **in + } + if in.SSLCertRemainingLifetime != nil { + in, out := &in.SSLCertRemainingLifetime, &out.SSLCertRemainingLifetime + *out = new(float64) + **out = **in + } + if in.SSLCheckEnabled != nil { + in, out := &in.SSLCheckEnabled, &out.SSLCheckEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRulesInitParameters. +func (in *ValidationRulesInitParameters) DeepCopy() *ValidationRulesInitParameters { + if in == nil { + return nil + } + out := new(ValidationRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationRulesObservation) DeepCopyInto(out *ValidationRulesObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(ContentObservation) + (*in).DeepCopyInto(*out) + } + if in.ExpectedStatusCode != nil { + in, out := &in.ExpectedStatusCode, &out.ExpectedStatusCode + *out = new(float64) + **out = **in + } + if in.SSLCertRemainingLifetime != nil { + in, out := &in.SSLCertRemainingLifetime, &out.SSLCertRemainingLifetime + *out = new(float64) + **out = **in + } + if in.SSLCheckEnabled != nil { + in, out := &in.SSLCheckEnabled, &out.SSLCheckEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRulesObservation. +func (in *ValidationRulesObservation) DeepCopy() *ValidationRulesObservation { + if in == nil { + return nil + } + out := new(ValidationRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationRulesParameters) DeepCopyInto(out *ValidationRulesParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(ContentParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpectedStatusCode != nil { + in, out := &in.ExpectedStatusCode, &out.ExpectedStatusCode + *out = new(float64) + **out = **in + } + if in.SSLCertRemainingLifetime != nil { + in, out := &in.SSLCertRemainingLifetime, &out.SSLCertRemainingLifetime + *out = new(float64) + **out = **in + } + if in.SSLCheckEnabled != nil { + in, out := &in.SSLCheckEnabled, &out.SSLCheckEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRulesParameters. +func (in *ValidationRulesParameters) DeepCopy() *ValidationRulesParameters { + if in == nil { + return nil + } + out := new(ValidationRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceReceiverInitParameters) DeepCopyInto(out *VoiceReceiverInitParameters) { + *out = *in + if in.CountryCode != nil { + in, out := &in.CountryCode, &out.CountryCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceReceiverInitParameters. +func (in *VoiceReceiverInitParameters) DeepCopy() *VoiceReceiverInitParameters { + if in == nil { + return nil + } + out := new(VoiceReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceReceiverObservation) DeepCopyInto(out *VoiceReceiverObservation) { + *out = *in + if in.CountryCode != nil { + in, out := &in.CountryCode, &out.CountryCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceReceiverObservation. +func (in *VoiceReceiverObservation) DeepCopy() *VoiceReceiverObservation { + if in == nil { + return nil + } + out := new(VoiceReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VoiceReceiverParameters) DeepCopyInto(out *VoiceReceiverParameters) { + *out = *in + if in.CountryCode != nil { + in, out := &in.CountryCode, &out.CountryCode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VoiceReceiverParameters. +func (in *VoiceReceiverParameters) DeepCopy() *VoiceReceiverParameters { + if in == nil { + return nil + } + out := new(VoiceReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookInitParameters) DeepCopyInto(out *WebhookInitParameters) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookInitParameters. +func (in *WebhookInitParameters) DeepCopy() *WebhookInitParameters { + if in == nil { + return nil + } + out := new(WebhookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookObservation) DeepCopyInto(out *WebhookObservation) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookObservation. +func (in *WebhookObservation) DeepCopy() *WebhookObservation { + if in == nil { + return nil + } + out := new(WebhookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookParameters) DeepCopyInto(out *WebhookParameters) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookParameters. +func (in *WebhookParameters) DeepCopy() *WebhookParameters { + if in == nil { + return nil + } + out := new(WebhookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookReceiverInitParameters) DeepCopyInto(out *WebhookReceiverInitParameters) { + *out = *in + if in.AADAuth != nil { + in, out := &in.AADAuth, &out.AADAuth + *out = new(AADAuthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookReceiverInitParameters. +func (in *WebhookReceiverInitParameters) DeepCopy() *WebhookReceiverInitParameters { + if in == nil { + return nil + } + out := new(WebhookReceiverInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookReceiverObservation) DeepCopyInto(out *WebhookReceiverObservation) { + *out = *in + if in.AADAuth != nil { + in, out := &in.AADAuth, &out.AADAuth + *out = new(AADAuthObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookReceiverObservation. +func (in *WebhookReceiverObservation) DeepCopy() *WebhookReceiverObservation { + if in == nil { + return nil + } + out := new(WebhookReceiverObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookReceiverParameters) DeepCopyInto(out *WebhookReceiverParameters) { + *out = *in + if in.AADAuth != nil { + in, out := &in.AADAuth, &out.AADAuth + *out = new(AADAuthParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceURI != nil { + in, out := &in.ServiceURI, &out.ServiceURI + *out = new(string) + **out = **in + } + if in.UseCommonAlertSchema != nil { + in, out := &in.UseCommonAlertSchema, &out.UseCommonAlertSchema + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookReceiverParameters. +func (in *WebhookReceiverParameters) DeepCopy() *WebhookReceiverParameters { + if in == nil { + return nil + } + out := new(WebhookReceiverParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsEventLogInitParameters) DeepCopyInto(out *WindowsEventLogInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XPathQueries != nil { + in, out := &in.XPathQueries, &out.XPathQueries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsEventLogInitParameters. +func (in *WindowsEventLogInitParameters) DeepCopy() *WindowsEventLogInitParameters { + if in == nil { + return nil + } + out := new(WindowsEventLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsEventLogObservation) DeepCopyInto(out *WindowsEventLogObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XPathQueries != nil { + in, out := &in.XPathQueries, &out.XPathQueries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsEventLogObservation. +func (in *WindowsEventLogObservation) DeepCopy() *WindowsEventLogObservation { + if in == nil { + return nil + } + out := new(WindowsEventLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsEventLogParameters) DeepCopyInto(out *WindowsEventLogParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XPathQueries != nil { + in, out := &in.XPathQueries, &out.XPathQueries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsEventLogParameters. +func (in *WindowsEventLogParameters) DeepCopy() *WindowsEventLogParameters { + if in == nil { + return nil + } + out := new(WindowsEventLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFirewallLogInitParameters) DeepCopyInto(out *WindowsFirewallLogInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFirewallLogInitParameters. +func (in *WindowsFirewallLogInitParameters) DeepCopy() *WindowsFirewallLogInitParameters { + if in == nil { + return nil + } + out := new(WindowsFirewallLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFirewallLogObservation) DeepCopyInto(out *WindowsFirewallLogObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFirewallLogObservation. +func (in *WindowsFirewallLogObservation) DeepCopy() *WindowsFirewallLogObservation { + if in == nil { + return nil + } + out := new(WindowsFirewallLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFirewallLogParameters) DeepCopyInto(out *WindowsFirewallLogParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFirewallLogParameters. +func (in *WindowsFirewallLogParameters) DeepCopy() *WindowsFirewallLogParameters { + if in == nil { + return nil + } + out := new(WindowsFirewallLogParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/insights/v1beta2/zz_generated.managed.go b/apis/insights/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..b6db58c2d --- /dev/null +++ b/apis/insights/v1beta2/zz_generated.managed.go @@ -0,0 +1,668 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ApplicationInsightsStandardWebTest. +func (mg *ApplicationInsightsStandardWebTest) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorActionGroup. +func (mg *MonitorActionGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorActionGroup. +func (mg *MonitorActionGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorActionGroup. +func (mg *MonitorActionGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorActionGroup. +func (mg *MonitorActionGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorActionGroup. +func (mg *MonitorActionGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorActionGroup. +func (mg *MonitorActionGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorActionGroup. +func (mg *MonitorActionGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorActionGroup. +func (mg *MonitorActionGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorActionGroup. +func (mg *MonitorActionGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorActionGroup. +func (mg *MonitorActionGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorActionGroup. +func (mg *MonitorActionGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorActionGroup. +func (mg *MonitorActionGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/insights/v1beta2/zz_generated.managedlist.go b/apis/insights/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..636e24978 --- /dev/null +++ b/apis/insights/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,107 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ApplicationInsightsStandardWebTestList. +func (l *ApplicationInsightsStandardWebTestList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ApplicationInsightsWorkbookList. +func (l *ApplicationInsightsWorkbookList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorActionGroupList. +func (l *MonitorActionGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorActivityLogAlertList. +func (l *MonitorActivityLogAlertList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorAutoscaleSettingList. +func (l *MonitorAutoscaleSettingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorDataCollectionRuleList. +func (l *MonitorDataCollectionRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorDiagnosticSettingList. +func (l *MonitorDiagnosticSettingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorMetricAlertList. +func (l *MonitorMetricAlertList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorScheduledQueryRulesAlertList. +func (l *MonitorScheduledQueryRulesAlertList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorScheduledQueryRulesAlertV2List. +func (l *MonitorScheduledQueryRulesAlertV2List) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MonitorScheduledQueryRulesLogList. +func (l *MonitorScheduledQueryRulesLogList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/insights/v1beta2/zz_generated.resolvers.go b/apis/insights/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..d1eaf8f0d --- /dev/null +++ b/apis/insights/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1186 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ApplicationInsightsStandardWebTest) ResolveReferences( // ResolveReferences of this ApplicationInsightsStandardWebTest. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ApplicationInsightsID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ApplicationInsightsIDRef, + Selector: mg.Spec.ForProvider.ApplicationInsightsIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ApplicationInsightsID") + } + mg.Spec.ForProvider.ApplicationInsightsID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ApplicationInsightsIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ApplicationInsightsID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ApplicationInsightsIDRef, + Selector: mg.Spec.InitProvider.ApplicationInsightsIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ApplicationInsightsID") + } + mg.Spec.InitProvider.ApplicationInsightsID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ApplicationInsightsIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ApplicationInsightsWorkbook. +func (mg *ApplicationInsightsWorkbook) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MonitorActionGroup. +func (mg *MonitorActionGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MonitorActivityLogAlert. +func (mg *MonitorActivityLogAlert) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Action); i3++ { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Action[i3].ActionGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Action[i3].ActionGroupIDRef, + Selector: mg.Spec.ForProvider.Action[i3].ActionGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Action[i3].ActionGroupID") + } + mg.Spec.ForProvider.Action[i3].ActionGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Action[i3].ActionGroupIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Criteria != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Criteria.ResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Criteria.ResourceIDRef, + Selector: mg.Spec.ForProvider.Criteria.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Criteria.ResourceID") + } + mg.Spec.ForProvider.Criteria.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Criteria.ResourceIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.ScopesRefs, + Selector: mg.Spec.ForProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Scopes") + } + mg.Spec.ForProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ScopesRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Action); i3++ { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Action[i3].ActionGroupID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Action[i3].ActionGroupIDRef, + Selector: mg.Spec.InitProvider.Action[i3].ActionGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Action[i3].ActionGroupID") + } + mg.Spec.InitProvider.Action[i3].ActionGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Action[i3].ActionGroupIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Criteria != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Criteria.ResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Criteria.ResourceIDRef, + Selector: mg.Spec.InitProvider.Criteria.ResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Criteria.ResourceID") + } + mg.Spec.InitProvider.Criteria.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Criteria.ResourceIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.ScopesRefs, + Selector: mg.Spec.InitProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Scopes") + } + mg.Spec.InitProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ScopesRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this MonitorAutoscaleSetting. +func (mg *MonitorAutoscaleSetting) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Profile); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Profile[i3].Rule); i4++ { + if mg.Spec.ForProvider.Profile[i3].Rule[i4].MetricTrigger != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachineScaleSet", "LinuxVirtualMachineScaleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceIDRef, + Selector: mg.Spec.ForProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceID") + } + mg.Spec.ForProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceIDRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachineScaleSet", "LinuxVirtualMachineScaleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TargetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TargetResourceIDRef, + Selector: mg.Spec.ForProvider.TargetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TargetResourceID") + } + mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Profile); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Profile[i3].Rule); i4++ { + if mg.Spec.InitProvider.Profile[i3].Rule[i4].MetricTrigger != nil { + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachineScaleSet", "LinuxVirtualMachineScaleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceIDRef, + Selector: mg.Spec.InitProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceID") + } + mg.Spec.InitProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Profile[i3].Rule[i4].MetricTrigger.MetricResourceIDRef = rsp.ResolvedReference + + } + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachineScaleSet", "LinuxVirtualMachineScaleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TargetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TargetResourceIDRef, + Selector: mg.Spec.InitProvider.TargetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TargetResourceID") + } + mg.Spec.InitProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TargetResourceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MonitorDataCollectionRule. +func (mg *MonitorDataCollectionRule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "MonitorDataCollectionEndpoint", "MonitorDataCollectionEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataCollectionEndpointID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataCollectionEndpointIDRef, + Selector: mg.Spec.ForProvider.DataCollectionEndpointIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataCollectionEndpointID") + } + mg.Spec.ForProvider.DataCollectionEndpointID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataCollectionEndpointIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Destinations != nil { + if mg.Spec.ForProvider.Destinations.EventHub != nil { + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Destinations.EventHub.EventHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Destinations.EventHub.EventHubIDRef, + Selector: mg.Spec.ForProvider.Destinations.EventHub.EventHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Destinations.EventHub.EventHubID") + } + mg.Spec.ForProvider.Destinations.EventHub.EventHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Destinations.EventHub.EventHubIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Destinations != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Destinations.LogAnalytics); i4++ { + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Destinations.LogAnalytics[i4].WorkspaceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Destinations.LogAnalytics[i4].WorkspaceResourceIDRef, + Selector: mg.Spec.ForProvider.Destinations.LogAnalytics[i4].WorkspaceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Destinations.LogAnalytics[i4].WorkspaceResourceID") + } + mg.Spec.ForProvider.Destinations.LogAnalytics[i4].WorkspaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Destinations.LogAnalytics[i4].WorkspaceResourceIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Destinations != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Destinations.StorageBlob); i4++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Destinations.StorageBlob[i4].ContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Destinations.StorageBlob[i4].ContainerNameRef, + Selector: mg.Spec.ForProvider.Destinations.StorageBlob[i4].ContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Destinations.StorageBlob[i4].ContainerName") + } + mg.Spec.ForProvider.Destinations.StorageBlob[i4].ContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Destinations.StorageBlob[i4].ContainerNameRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.Destinations != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.Destinations.StorageBlob); i4++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Destinations.StorageBlob[i4].StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Destinations.StorageBlob[i4].StorageAccountIDRef, + Selector: mg.Spec.ForProvider.Destinations.StorageBlob[i4].StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Destinations.StorageBlob[i4].StorageAccountID") + } + mg.Spec.ForProvider.Destinations.StorageBlob[i4].StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Destinations.StorageBlob[i4].StorageAccountIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "MonitorDataCollectionEndpoint", "MonitorDataCollectionEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataCollectionEndpointID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DataCollectionEndpointIDRef, + Selector: mg.Spec.InitProvider.DataCollectionEndpointIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataCollectionEndpointID") + } + mg.Spec.InitProvider.DataCollectionEndpointID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataCollectionEndpointIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Destinations != nil { + if mg.Spec.InitProvider.Destinations.EventHub != nil { + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Destinations.EventHub.EventHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Destinations.EventHub.EventHubIDRef, + Selector: mg.Spec.InitProvider.Destinations.EventHub.EventHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Destinations.EventHub.EventHubID") + } + mg.Spec.InitProvider.Destinations.EventHub.EventHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Destinations.EventHub.EventHubIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Destinations != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Destinations.LogAnalytics); i4++ { + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Destinations.LogAnalytics[i4].WorkspaceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Destinations.LogAnalytics[i4].WorkspaceResourceIDRef, + Selector: mg.Spec.InitProvider.Destinations.LogAnalytics[i4].WorkspaceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Destinations.LogAnalytics[i4].WorkspaceResourceID") + } + mg.Spec.InitProvider.Destinations.LogAnalytics[i4].WorkspaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Destinations.LogAnalytics[i4].WorkspaceResourceIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Destinations != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Destinations.StorageBlob); i4++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Destinations.StorageBlob[i4].ContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Destinations.StorageBlob[i4].ContainerNameRef, + Selector: mg.Spec.InitProvider.Destinations.StorageBlob[i4].ContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Destinations.StorageBlob[i4].ContainerName") + } + mg.Spec.InitProvider.Destinations.StorageBlob[i4].ContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Destinations.StorageBlob[i4].ContainerNameRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.Destinations != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.Destinations.StorageBlob); i4++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Destinations.StorageBlob[i4].StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Destinations.StorageBlob[i4].StorageAccountIDRef, + Selector: mg.Spec.InitProvider.Destinations.StorageBlob[i4].StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Destinations.StorageBlob[i4].StorageAccountID") + } + mg.Spec.InitProvider.Destinations.StorageBlob[i4].StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Destinations.StorageBlob[i4].StorageAccountIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this MonitorDiagnosticSetting. +func (mg *MonitorDiagnosticSetting) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccountIDRef, + Selector: mg.Spec.ForProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountID") + } + mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccountIDRef, + Selector: mg.Spec.InitProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountID") + } + mg.Spec.InitProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MonitorMetricAlert. +func (mg *MonitorMetricAlert) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Action); i3++ { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Action[i3].ActionGroupID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Action[i3].ActionGroupIDRef, + Selector: mg.Spec.ForProvider.Action[i3].ActionGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Action[i3].ActionGroupID") + } + mg.Spec.ForProvider.Action[i3].ActionGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Action[i3].ActionGroupIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.ScopesRefs, + Selector: mg.Spec.ForProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Scopes") + } + mg.Spec.ForProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ScopesRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Action); i3++ { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Action[i3].ActionGroupID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Action[i3].ActionGroupIDRef, + Selector: mg.Spec.InitProvider.Action[i3].ActionGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Action[i3].ActionGroupID") + } + mg.Spec.InitProvider.Action[i3].ActionGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Action[i3].ActionGroupIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.ScopesRefs, + Selector: mg.Spec.InitProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Scopes") + } + mg.Spec.InitProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ScopesRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this MonitorScheduledQueryRulesAlert. +func (mg *MonitorScheduledQueryRulesAlert) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.Action != nil { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Action.ActionGroup), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.Action.ActionGroupRefs, + Selector: mg.Spec.ForProvider.Action.ActionGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Action.ActionGroup") + } + mg.Spec.ForProvider.Action.ActionGroup = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Action.ActionGroupRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataSourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataSourceIDRef, + Selector: mg.Spec.ForProvider.DataSourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataSourceID") + } + mg.Spec.ForProvider.DataSourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataSourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Action != nil { + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta2", "MonitorActionGroup", "MonitorActionGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Action.ActionGroup), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.Action.ActionGroupRefs, + Selector: mg.Spec.InitProvider.Action.ActionGroupSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Action.ActionGroup") + } + mg.Spec.InitProvider.Action.ActionGroup = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Action.ActionGroupRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataSourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DataSourceIDRef, + Selector: mg.Spec.InitProvider.DataSourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataSourceID") + } + mg.Spec.InitProvider.DataSourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataSourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MonitorScheduledQueryRulesAlertV2. +func (mg *MonitorScheduledQueryRulesAlertV2) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.ScopesRefs, + Selector: mg.Spec.ForProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Scopes") + } + mg.Spec.ForProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.ScopesRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Scopes), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.ScopesRefs, + Selector: mg.Spec.InitProvider.ScopesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Scopes") + } + mg.Spec.InitProvider.Scopes = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.ScopesRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this MonitorScheduledQueryRulesLog. +func (mg *MonitorScheduledQueryRulesLog) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataSourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataSourceIDRef, + Selector: mg.Spec.ForProvider.DataSourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataSourceID") + } + mg.Spec.ForProvider.DataSourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataSourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataSourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DataSourceIDRef, + Selector: mg.Spec.InitProvider.DataSourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataSourceID") + } + mg.Spec.InitProvider.DataSourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataSourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/insights/v1beta2/zz_groupversion_info.go b/apis/insights/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..67b629ed2 --- /dev/null +++ b/apis/insights/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=insights.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "insights.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/insights/v1beta2/zz_monitoractiongroup_terraformed.go b/apis/insights/v1beta2/zz_monitoractiongroup_terraformed.go new file mode 100755 index 000000000..53d9decda --- /dev/null +++ b/apis/insights/v1beta2/zz_monitoractiongroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorActionGroup +func (mg *MonitorActionGroup) GetTerraformResourceType() string { + return "azurerm_monitor_action_group" +} + +// GetConnectionDetailsMapping for this MonitorActionGroup +func (tr *MonitorActionGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorActionGroup +func (tr *MonitorActionGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorActionGroup +func (tr *MonitorActionGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorActionGroup +func (tr *MonitorActionGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorActionGroup +func (tr *MonitorActionGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorActionGroup +func (tr *MonitorActionGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorActionGroup +func (tr *MonitorActionGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorActionGroup +func (tr *MonitorActionGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorActionGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorActionGroup) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorActionGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorActionGroup) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/insights/v1beta2/zz_monitoractiongroup_types.go b/apis/insights/v1beta2/zz_monitoractiongroup_types.go new file mode 100755 index 000000000..b0047738b --- /dev/null +++ b/apis/insights/v1beta2/zz_monitoractiongroup_types.go @@ -0,0 +1,854 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AADAuthInitParameters struct { + + // The identifier URI for AAD auth. + IdentifierURI *string `json:"identifierUri,omitempty" tf:"identifier_uri,omitempty"` + + // The webhook application object Id for AAD auth. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The tenant id for AAD auth. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AADAuthObservation struct { + + // The identifier URI for AAD auth. + IdentifierURI *string `json:"identifierUri,omitempty" tf:"identifier_uri,omitempty"` + + // The webhook application object Id for AAD auth. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The tenant id for AAD auth. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AADAuthParameters struct { + + // The identifier URI for AAD auth. + // +kubebuilder:validation:Optional + IdentifierURI *string `json:"identifierUri,omitempty" tf:"identifier_uri,omitempty"` + + // The webhook application object Id for AAD auth. + // +kubebuilder:validation:Optional + ObjectID *string `json:"objectId" tf:"object_id,omitempty"` + + // The tenant id for AAD auth. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type ArmRoleReceiverInitParameters struct { + + // The name of the ARM role receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The arm role id. + RoleID *string `json:"roleId,omitempty" tf:"role_id,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type ArmRoleReceiverObservation struct { + + // The name of the ARM role receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The arm role id. + RoleID *string `json:"roleId,omitempty" tf:"role_id,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type ArmRoleReceiverParameters struct { + + // The name of the ARM role receiver. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The arm role id. + // +kubebuilder:validation:Optional + RoleID *string `json:"roleId" tf:"role_id,omitempty"` + + // Enables or disables the common alert schema. + // +kubebuilder:validation:Optional + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type AutomationRunBookReceiverInitParameters struct { + + // The automation account ID which holds this runbook and authenticates to Azure resources. + AutomationAccountID *string `json:"automationAccountId,omitempty" tf:"automation_account_id,omitempty"` + + // Indicates whether this instance is global runbook. + IsGlobalRunBook *bool `json:"isGlobalRunbook,omitempty" tf:"is_global_runbook,omitempty"` + + // The name of the automation runbook receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name for this runbook. + RunBookName *string `json:"runbookName,omitempty" tf:"runbook_name,omitempty"` + + // The URI where webhooks should be sent. + ServiceURI *string `json:"serviceUri,omitempty" tf:"service_uri,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` + + // The resource id for webhook linked to this runbook. + WebhookResourceID *string `json:"webhookResourceId,omitempty" tf:"webhook_resource_id,omitempty"` +} + +type AutomationRunBookReceiverObservation struct { + + // The automation account ID which holds this runbook and authenticates to Azure resources. + AutomationAccountID *string `json:"automationAccountId,omitempty" tf:"automation_account_id,omitempty"` + + // Indicates whether this instance is global runbook. + IsGlobalRunBook *bool `json:"isGlobalRunbook,omitempty" tf:"is_global_runbook,omitempty"` + + // The name of the automation runbook receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name for this runbook. + RunBookName *string `json:"runbookName,omitempty" tf:"runbook_name,omitempty"` + + // The URI where webhooks should be sent. + ServiceURI *string `json:"serviceUri,omitempty" tf:"service_uri,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` + + // The resource id for webhook linked to this runbook. + WebhookResourceID *string `json:"webhookResourceId,omitempty" tf:"webhook_resource_id,omitempty"` +} + +type AutomationRunBookReceiverParameters struct { + + // The automation account ID which holds this runbook and authenticates to Azure resources. + // +kubebuilder:validation:Optional + AutomationAccountID *string `json:"automationAccountId" tf:"automation_account_id,omitempty"` + + // Indicates whether this instance is global runbook. + // +kubebuilder:validation:Optional + IsGlobalRunBook *bool `json:"isGlobalRunbook" tf:"is_global_runbook,omitempty"` + + // The name of the automation runbook receiver. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name for this runbook. + // +kubebuilder:validation:Optional + RunBookName *string `json:"runbookName" tf:"runbook_name,omitempty"` + + // The URI where webhooks should be sent. + // +kubebuilder:validation:Optional + ServiceURI *string `json:"serviceUri" tf:"service_uri,omitempty"` + + // Enables or disables the common alert schema. + // +kubebuilder:validation:Optional + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` + + // The resource id for webhook linked to this runbook. + // +kubebuilder:validation:Optional + WebhookResourceID *string `json:"webhookResourceId" tf:"webhook_resource_id,omitempty"` +} + +type AzureAppPushReceiverInitParameters struct { + + // The email address of the user signed into the mobile app who will receive push notifications from this receiver. + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // The name of the Azure app push receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AzureAppPushReceiverObservation struct { + + // The email address of the user signed into the mobile app who will receive push notifications from this receiver. + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // The name of the Azure app push receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AzureAppPushReceiverParameters struct { + + // The email address of the user signed into the mobile app who will receive push notifications from this receiver. + // +kubebuilder:validation:Optional + EmailAddress *string `json:"emailAddress" tf:"email_address,omitempty"` + + // The name of the Azure app push receiver. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type AzureFunctionReceiverInitParameters struct { + + // The Azure resource ID of the function app. + FunctionAppResourceID *string `json:"functionAppResourceId,omitempty" tf:"function_app_resource_id,omitempty"` + + // The function name in the function app. + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // The HTTP trigger url where HTTP request sent to. + HTTPTriggerURL *string `json:"httpTriggerUrl,omitempty" tf:"http_trigger_url,omitempty"` + + // The name of the Azure Function receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type AzureFunctionReceiverObservation struct { + + // The Azure resource ID of the function app. + FunctionAppResourceID *string `json:"functionAppResourceId,omitempty" tf:"function_app_resource_id,omitempty"` + + // The function name in the function app. + FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` + + // The HTTP trigger url where HTTP request sent to. + HTTPTriggerURL *string `json:"httpTriggerUrl,omitempty" tf:"http_trigger_url,omitempty"` + + // The name of the Azure Function receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type AzureFunctionReceiverParameters struct { + + // The Azure resource ID of the function app. + // +kubebuilder:validation:Optional + FunctionAppResourceID *string `json:"functionAppResourceId" tf:"function_app_resource_id,omitempty"` + + // The function name in the function app. + // +kubebuilder:validation:Optional + FunctionName *string `json:"functionName" tf:"function_name,omitempty"` + + // The HTTP trigger url where HTTP request sent to. + // +kubebuilder:validation:Optional + HTTPTriggerURL *string `json:"httpTriggerUrl" tf:"http_trigger_url,omitempty"` + + // The name of the Azure Function receiver. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Enables or disables the common alert schema. + // +kubebuilder:validation:Optional + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type EmailReceiverInitParameters struct { + + // The email address of this receiver. + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // The name of the email receiver. Names must be unique (case-insensitive) across all receivers within an action group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type EmailReceiverObservation struct { + + // The email address of this receiver. + EmailAddress *string `json:"emailAddress,omitempty" tf:"email_address,omitempty"` + + // The name of the email receiver. Names must be unique (case-insensitive) across all receivers within an action group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type EmailReceiverParameters struct { + + // The email address of this receiver. + // +kubebuilder:validation:Optional + EmailAddress *string `json:"emailAddress" tf:"email_address,omitempty"` + + // The name of the email receiver. Names must be unique (case-insensitive) across all receivers within an action group. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Enables or disables the common alert schema. + // +kubebuilder:validation:Optional + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type EventHubReceiverInitParameters struct { + + // The resource ID of the respective Event Hub. + EventHubID *string `json:"eventHubId,omitempty" tf:"event_hub_id,omitempty"` + + // The name of the specific Event Hub queue. + EventHubName *string `json:"eventHubName,omitempty" tf:"event_hub_name,omitempty"` + + // The namespace name of the Event Hub. + EventHubNamespace *string `json:"eventHubNamespace,omitempty" tf:"event_hub_namespace,omitempty"` + + // The name of the EventHub Receiver, must be unique within action group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID for the subscription containing this Event Hub. Default to the subscription ID of the Action Group. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The Tenant ID for the subscription containing this Event Hub. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Indicates whether to use common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type EventHubReceiverObservation struct { + + // The resource ID of the respective Event Hub. + EventHubID *string `json:"eventHubId,omitempty" tf:"event_hub_id,omitempty"` + + // The name of the specific Event Hub queue. + EventHubName *string `json:"eventHubName,omitempty" tf:"event_hub_name,omitempty"` + + // The namespace name of the Event Hub. + EventHubNamespace *string `json:"eventHubNamespace,omitempty" tf:"event_hub_namespace,omitempty"` + + // The name of the EventHub Receiver, must be unique within action group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID for the subscription containing this Event Hub. Default to the subscription ID of the Action Group. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The Tenant ID for the subscription containing this Event Hub. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Indicates whether to use common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type EventHubReceiverParameters struct { + + // The resource ID of the respective Event Hub. + // +kubebuilder:validation:Optional + EventHubID *string `json:"eventHubId,omitempty" tf:"event_hub_id,omitempty"` + + // The name of the specific Event Hub queue. + // +kubebuilder:validation:Optional + EventHubName *string `json:"eventHubName,omitempty" tf:"event_hub_name,omitempty"` + + // The namespace name of the Event Hub. + // +kubebuilder:validation:Optional + EventHubNamespace *string `json:"eventHubNamespace,omitempty" tf:"event_hub_namespace,omitempty"` + + // The name of the EventHub Receiver, must be unique within action group. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID for the subscription containing this Event Hub. Default to the subscription ID of the Action Group. + // +kubebuilder:validation:Optional + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The Tenant ID for the subscription containing this Event Hub. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Indicates whether to use common alert schema. + // +kubebuilder:validation:Optional + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type ItsmReceiverInitParameters struct { + + // The unique connection identifier of the ITSM connection. + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // The name of the ITSM receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The region of the workspace. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // A JSON blob for the configurations of the ITSM action. CreateMultipleWorkItems option will be part of this blob as well. + TicketConfiguration *string `json:"ticketConfiguration,omitempty" tf:"ticket_configuration,omitempty"` + + // The Azure Log Analytics workspace ID where this connection is defined. Format is |, for example 00000000-0000-0000-0000-000000000000|00000000-0000-0000-0000-000000000000. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type ItsmReceiverObservation struct { + + // The unique connection identifier of the ITSM connection. + ConnectionID *string `json:"connectionId,omitempty" tf:"connection_id,omitempty"` + + // The name of the ITSM receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The region of the workspace. + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // A JSON blob for the configurations of the ITSM action. CreateMultipleWorkItems option will be part of this blob as well. + TicketConfiguration *string `json:"ticketConfiguration,omitempty" tf:"ticket_configuration,omitempty"` + + // The Azure Log Analytics workspace ID where this connection is defined. Format is |, for example 00000000-0000-0000-0000-000000000000|00000000-0000-0000-0000-000000000000. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type ItsmReceiverParameters struct { + + // The unique connection identifier of the ITSM connection. + // +kubebuilder:validation:Optional + ConnectionID *string `json:"connectionId" tf:"connection_id,omitempty"` + + // The name of the ITSM receiver. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The region of the workspace. + // +kubebuilder:validation:Optional + Region *string `json:"region" tf:"region,omitempty"` + + // A JSON blob for the configurations of the ITSM action. CreateMultipleWorkItems option will be part of this blob as well. + // +kubebuilder:validation:Optional + TicketConfiguration *string `json:"ticketConfiguration" tf:"ticket_configuration,omitempty"` + + // The Azure Log Analytics workspace ID where this connection is defined. Format is |, for example 00000000-0000-0000-0000-000000000000|00000000-0000-0000-0000-000000000000. + // +kubebuilder:validation:Optional + WorkspaceID *string `json:"workspaceId" tf:"workspace_id,omitempty"` +} + +type LogicAppReceiverInitParameters struct { + + // The callback url where HTTP request sent to. + CallbackURL *string `json:"callbackUrl,omitempty" tf:"callback_url,omitempty"` + + // The name of the logic app receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Azure resource ID of the logic app. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type LogicAppReceiverObservation struct { + + // The callback url where HTTP request sent to. + CallbackURL *string `json:"callbackUrl,omitempty" tf:"callback_url,omitempty"` + + // The name of the logic app receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Azure resource ID of the logic app. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type LogicAppReceiverParameters struct { + + // The callback url where HTTP request sent to. + // +kubebuilder:validation:Optional + CallbackURL *string `json:"callbackUrl" tf:"callback_url,omitempty"` + + // The name of the logic app receiver. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Azure resource ID of the logic app. + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId" tf:"resource_id,omitempty"` + + // Enables or disables the common alert schema. + // +kubebuilder:validation:Optional + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type MonitorActionGroupInitParameters struct { + + // One or more arm_role_receiver blocks as defined below. + ArmRoleReceiver []ArmRoleReceiverInitParameters `json:"armRoleReceiver,omitempty" tf:"arm_role_receiver,omitempty"` + + // One or more automation_runbook_receiver blocks as defined below. + AutomationRunBookReceiver []AutomationRunBookReceiverInitParameters `json:"automationRunbookReceiver,omitempty" tf:"automation_runbook_receiver,omitempty"` + + // One or more azure_app_push_receiver blocks as defined below. + AzureAppPushReceiver []AzureAppPushReceiverInitParameters `json:"azureAppPushReceiver,omitempty" tf:"azure_app_push_receiver,omitempty"` + + // One or more azure_function_receiver blocks as defined below. + AzureFunctionReceiver []AzureFunctionReceiverInitParameters `json:"azureFunctionReceiver,omitempty" tf:"azure_function_receiver,omitempty"` + + // One or more email_receiver blocks as defined below. + EmailReceiver []EmailReceiverInitParameters `json:"emailReceiver,omitempty" tf:"email_receiver,omitempty"` + + // Whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // One or more event_hub_receiver blocks as defined below. + EventHubReceiver []EventHubReceiverInitParameters `json:"eventHubReceiver,omitempty" tf:"event_hub_receiver,omitempty"` + + // One or more itsm_receiver blocks as defined below. + ItsmReceiver []ItsmReceiverInitParameters `json:"itsmReceiver,omitempty" tf:"itsm_receiver,omitempty"` + + // The Azure Region where the Action Group should exist. Changing this forces a new Action Group to be created. Defaults to global. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more logic_app_receiver blocks as defined below. + LogicAppReceiver []LogicAppReceiverInitParameters `json:"logicAppReceiver,omitempty" tf:"logic_app_receiver,omitempty"` + + // One or more sms_receiver blocks as defined below. + SMSReceiver []SMSReceiverInitParameters `json:"smsReceiver,omitempty" tf:"sms_receiver,omitempty"` + + // The short name of the action group. This will be used in SMS messages. + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more voice_receiver blocks as defined below. + VoiceReceiver []VoiceReceiverInitParameters `json:"voiceReceiver,omitempty" tf:"voice_receiver,omitempty"` + + // One or more webhook_receiver blocks as defined below. + WebhookReceiver []WebhookReceiverInitParameters `json:"webhookReceiver,omitempty" tf:"webhook_receiver,omitempty"` +} + +type MonitorActionGroupObservation struct { + + // One or more arm_role_receiver blocks as defined below. + ArmRoleReceiver []ArmRoleReceiverObservation `json:"armRoleReceiver,omitempty" tf:"arm_role_receiver,omitempty"` + + // One or more automation_runbook_receiver blocks as defined below. + AutomationRunBookReceiver []AutomationRunBookReceiverObservation `json:"automationRunbookReceiver,omitempty" tf:"automation_runbook_receiver,omitempty"` + + // One or more azure_app_push_receiver blocks as defined below. + AzureAppPushReceiver []AzureAppPushReceiverObservation `json:"azureAppPushReceiver,omitempty" tf:"azure_app_push_receiver,omitempty"` + + // One or more azure_function_receiver blocks as defined below. + AzureFunctionReceiver []AzureFunctionReceiverObservation `json:"azureFunctionReceiver,omitempty" tf:"azure_function_receiver,omitempty"` + + // One or more email_receiver blocks as defined below. + EmailReceiver []EmailReceiverObservation `json:"emailReceiver,omitempty" tf:"email_receiver,omitempty"` + + // Whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // One or more event_hub_receiver blocks as defined below. + EventHubReceiver []EventHubReceiverObservation `json:"eventHubReceiver,omitempty" tf:"event_hub_receiver,omitempty"` + + // The ID of the Action Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more itsm_receiver blocks as defined below. + ItsmReceiver []ItsmReceiverObservation `json:"itsmReceiver,omitempty" tf:"itsm_receiver,omitempty"` + + // The Azure Region where the Action Group should exist. Changing this forces a new Action Group to be created. Defaults to global. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more logic_app_receiver blocks as defined below. + LogicAppReceiver []LogicAppReceiverObservation `json:"logicAppReceiver,omitempty" tf:"logic_app_receiver,omitempty"` + + // The name of the resource group in which to create the Action Group instance. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // One or more sms_receiver blocks as defined below. + SMSReceiver []SMSReceiverObservation `json:"smsReceiver,omitempty" tf:"sms_receiver,omitempty"` + + // The short name of the action group. This will be used in SMS messages. + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more voice_receiver blocks as defined below. + VoiceReceiver []VoiceReceiverObservation `json:"voiceReceiver,omitempty" tf:"voice_receiver,omitempty"` + + // One or more webhook_receiver blocks as defined below. + WebhookReceiver []WebhookReceiverObservation `json:"webhookReceiver,omitempty" tf:"webhook_receiver,omitempty"` +} + +type MonitorActionGroupParameters struct { + + // One or more arm_role_receiver blocks as defined below. + // +kubebuilder:validation:Optional + ArmRoleReceiver []ArmRoleReceiverParameters `json:"armRoleReceiver,omitempty" tf:"arm_role_receiver,omitempty"` + + // One or more automation_runbook_receiver blocks as defined below. + // +kubebuilder:validation:Optional + AutomationRunBookReceiver []AutomationRunBookReceiverParameters `json:"automationRunbookReceiver,omitempty" tf:"automation_runbook_receiver,omitempty"` + + // One or more azure_app_push_receiver blocks as defined below. + // +kubebuilder:validation:Optional + AzureAppPushReceiver []AzureAppPushReceiverParameters `json:"azureAppPushReceiver,omitempty" tf:"azure_app_push_receiver,omitempty"` + + // One or more azure_function_receiver blocks as defined below. + // +kubebuilder:validation:Optional + AzureFunctionReceiver []AzureFunctionReceiverParameters `json:"azureFunctionReceiver,omitempty" tf:"azure_function_receiver,omitempty"` + + // One or more email_receiver blocks as defined below. + // +kubebuilder:validation:Optional + EmailReceiver []EmailReceiverParameters `json:"emailReceiver,omitempty" tf:"email_receiver,omitempty"` + + // Whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // One or more event_hub_receiver blocks as defined below. + // +kubebuilder:validation:Optional + EventHubReceiver []EventHubReceiverParameters `json:"eventHubReceiver,omitempty" tf:"event_hub_receiver,omitempty"` + + // One or more itsm_receiver blocks as defined below. + // +kubebuilder:validation:Optional + ItsmReceiver []ItsmReceiverParameters `json:"itsmReceiver,omitempty" tf:"itsm_receiver,omitempty"` + + // The Azure Region where the Action Group should exist. Changing this forces a new Action Group to be created. Defaults to global. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more logic_app_receiver blocks as defined below. + // +kubebuilder:validation:Optional + LogicAppReceiver []LogicAppReceiverParameters `json:"logicAppReceiver,omitempty" tf:"logic_app_receiver,omitempty"` + + // The name of the resource group in which to create the Action Group instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // One or more sms_receiver blocks as defined below. + // +kubebuilder:validation:Optional + SMSReceiver []SMSReceiverParameters `json:"smsReceiver,omitempty" tf:"sms_receiver,omitempty"` + + // The short name of the action group. This will be used in SMS messages. + // +kubebuilder:validation:Optional + ShortName *string `json:"shortName,omitempty" tf:"short_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more voice_receiver blocks as defined below. + // +kubebuilder:validation:Optional + VoiceReceiver []VoiceReceiverParameters `json:"voiceReceiver,omitempty" tf:"voice_receiver,omitempty"` + + // One or more webhook_receiver blocks as defined below. + // +kubebuilder:validation:Optional + WebhookReceiver []WebhookReceiverParameters `json:"webhookReceiver,omitempty" tf:"webhook_receiver,omitempty"` +} + +type SMSReceiverInitParameters struct { + + // The country code of the SMS receiver. + CountryCode *string `json:"countryCode,omitempty" tf:"country_code,omitempty"` + + // The name of the SMS receiver. Names must be unique (case-insensitive) across all receivers within an action group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The phone number of the SMS receiver. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type SMSReceiverObservation struct { + + // The country code of the SMS receiver. + CountryCode *string `json:"countryCode,omitempty" tf:"country_code,omitempty"` + + // The name of the SMS receiver. Names must be unique (case-insensitive) across all receivers within an action group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The phone number of the SMS receiver. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type SMSReceiverParameters struct { + + // The country code of the SMS receiver. + // +kubebuilder:validation:Optional + CountryCode *string `json:"countryCode" tf:"country_code,omitempty"` + + // The name of the SMS receiver. Names must be unique (case-insensitive) across all receivers within an action group. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The phone number of the SMS receiver. + // +kubebuilder:validation:Optional + PhoneNumber *string `json:"phoneNumber" tf:"phone_number,omitempty"` +} + +type VoiceReceiverInitParameters struct { + + // The country code of the voice receiver. + CountryCode *string `json:"countryCode,omitempty" tf:"country_code,omitempty"` + + // The name of the voice receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The phone number of the voice receiver. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type VoiceReceiverObservation struct { + + // The country code of the voice receiver. + CountryCode *string `json:"countryCode,omitempty" tf:"country_code,omitempty"` + + // The name of the voice receiver. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The phone number of the voice receiver. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type VoiceReceiverParameters struct { + + // The country code of the voice receiver. + // +kubebuilder:validation:Optional + CountryCode *string `json:"countryCode" tf:"country_code,omitempty"` + + // The name of the voice receiver. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The phone number of the voice receiver. + // +kubebuilder:validation:Optional + PhoneNumber *string `json:"phoneNumber" tf:"phone_number,omitempty"` +} + +type WebhookReceiverInitParameters struct { + + // The aad_auth block as defined below. + AADAuth *AADAuthInitParameters `json:"aadAuth,omitempty" tf:"aad_auth,omitempty"` + + // The name of the webhook receiver. Names must be unique (case-insensitive) across all receivers within an action group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The URI where webhooks should be sent. + ServiceURI *string `json:"serviceUri,omitempty" tf:"service_uri,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type WebhookReceiverObservation struct { + + // The aad_auth block as defined below. + AADAuth *AADAuthObservation `json:"aadAuth,omitempty" tf:"aad_auth,omitempty"` + + // The name of the webhook receiver. Names must be unique (case-insensitive) across all receivers within an action group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The URI where webhooks should be sent. + ServiceURI *string `json:"serviceUri,omitempty" tf:"service_uri,omitempty"` + + // Enables or disables the common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +type WebhookReceiverParameters struct { + + // The aad_auth block as defined below. + // +kubebuilder:validation:Optional + AADAuth *AADAuthParameters `json:"aadAuth,omitempty" tf:"aad_auth,omitempty"` + + // The name of the webhook receiver. Names must be unique (case-insensitive) across all receivers within an action group. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The URI where webhooks should be sent. + // +kubebuilder:validation:Optional + ServiceURI *string `json:"serviceUri" tf:"service_uri,omitempty"` + + // Enables or disables the common alert schema. + // +kubebuilder:validation:Optional + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty" tf:"use_common_alert_schema,omitempty"` +} + +// MonitorActionGroupSpec defines the desired state of MonitorActionGroup +type MonitorActionGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorActionGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorActionGroupInitParameters `json:"initProvider,omitempty"` +} + +// MonitorActionGroupStatus defines the observed state of MonitorActionGroup. +type MonitorActionGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorActionGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorActionGroup is the Schema for the MonitorActionGroups API. Manages an Action Group within Azure Monitor +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorActionGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.shortName) || (has(self.initProvider) && has(self.initProvider.shortName))",message="spec.forProvider.shortName is a required parameter" + Spec MonitorActionGroupSpec `json:"spec"` + Status MonitorActionGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorActionGroupList contains a list of MonitorActionGroups +type MonitorActionGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorActionGroup `json:"items"` +} + +// Repository type metadata. +var ( + MonitorActionGroup_Kind = "MonitorActionGroup" + MonitorActionGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorActionGroup_Kind}.String() + MonitorActionGroup_KindAPIVersion = MonitorActionGroup_Kind + "." + CRDGroupVersion.String() + MonitorActionGroup_GroupVersionKind = CRDGroupVersion.WithKind(MonitorActionGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorActionGroup{}, &MonitorActionGroupList{}) +} diff --git a/apis/insights/v1beta2/zz_monitoractivitylogalert_terraformed.go b/apis/insights/v1beta2/zz_monitoractivitylogalert_terraformed.go new file mode 100755 index 000000000..9da5021de --- /dev/null +++ b/apis/insights/v1beta2/zz_monitoractivitylogalert_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorActivityLogAlert +func (mg *MonitorActivityLogAlert) GetTerraformResourceType() string { + return "azurerm_monitor_activity_log_alert" +} + +// GetConnectionDetailsMapping for this MonitorActivityLogAlert +func (tr *MonitorActivityLogAlert) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorActivityLogAlert +func (tr *MonitorActivityLogAlert) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorActivityLogAlert +func (tr *MonitorActivityLogAlert) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorActivityLogAlert +func (tr *MonitorActivityLogAlert) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorActivityLogAlert +func (tr *MonitorActivityLogAlert) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorActivityLogAlert +func (tr *MonitorActivityLogAlert) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorActivityLogAlert +func (tr *MonitorActivityLogAlert) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorActivityLogAlert +func (tr *MonitorActivityLogAlert) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorActivityLogAlert using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorActivityLogAlert) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorActivityLogAlertParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorActivityLogAlert) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/insights/v1beta2/zz_monitoractivitylogalert_types.go b/apis/insights/v1beta2/zz_monitoractivitylogalert_types.go new file mode 100755 index 000000000..0cdd43f5d --- /dev/null +++ b/apis/insights/v1beta2/zz_monitoractivitylogalert_types.go @@ -0,0 +1,609 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // The ID of the Action Group can be sourced from the . + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // Reference to a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDRef *v1.Reference `json:"actionGroupIdRef,omitempty" tf:"-"` + + // Selector for a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDSelector *v1.Selector `json:"actionGroupIdSelector,omitempty" tf:"-"` + + // The map of custom string properties to include with the post operation. These data are appended to the webhook payload. + // +mapType=granular + WebhookProperties map[string]*string `json:"webhookProperties,omitempty" tf:"webhook_properties,omitempty"` +} + +type ActionObservation struct { + + // The ID of the Action Group can be sourced from the . + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // The map of custom string properties to include with the post operation. These data are appended to the webhook payload. + // +mapType=granular + WebhookProperties map[string]*string `json:"webhookProperties,omitempty" tf:"webhook_properties,omitempty"` +} + +type ActionParameters struct { + + // The ID of the Action Group can be sourced from the . + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // Reference to a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDRef *v1.Reference `json:"actionGroupIdRef,omitempty" tf:"-"` + + // Selector for a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDSelector *v1.Selector `json:"actionGroupIdSelector,omitempty" tf:"-"` + + // The map of custom string properties to include with the post operation. These data are appended to the webhook payload. + // +kubebuilder:validation:Optional + // +mapType=granular + WebhookProperties map[string]*string `json:"webhookProperties,omitempty" tf:"webhook_properties,omitempty"` +} + +type CriteriaInitParameters struct { + + // The email address or Azure Active Directory identifier of the user who performed the operation. + Caller *string `json:"caller,omitempty" tf:"caller,omitempty"` + + // The category of the operation. Possible values are Administrative, Autoscale, Policy, Recommendation, ResourceHealth, Security and ServiceHealth. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The severity level of the event. Possible values are Verbose, Informational, Warning, Error, and Critical. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // A list of severity level of the event. Possible values are Verbose, Informational, Warning, Error, and Critical. + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + + // The Resource Manager Role-Based Access Control operation name. Supported operation should be of the form: //. + OperationName *string `json:"operationName,omitempty" tf:"operation_name,omitempty"` + + // The recommendation category of the event. Possible values are Cost, Reliability, OperationalExcellence, HighAvailability and Performance. It is only allowed when category is Recommendation. + RecommendationCategory *string `json:"recommendationCategory,omitempty" tf:"recommendation_category,omitempty"` + + // The recommendation impact of the event. Possible values are High, Medium and Low. It is only allowed when category is Recommendation. + RecommendationImpact *string `json:"recommendationImpact,omitempty" tf:"recommendation_impact,omitempty"` + + // The recommendation type of the event. It is only allowed when category is Recommendation. + RecommendationType *string `json:"recommendationType,omitempty" tf:"recommendation_type,omitempty"` + + // The name of resource group monitored by the activity log alert. + ResourceGroup *string `json:"resourceGroup,omitempty" tf:"resource_group,omitempty"` + + // A list of names of resource groups monitored by the activity log alert. + ResourceGroups []*string `json:"resourceGroups,omitempty" tf:"resource_groups,omitempty"` + + // A block to define fine grain resource health settings. + ResourceHealth *ResourceHealthInitParameters `json:"resourceHealth,omitempty" tf:"resource_health,omitempty"` + + // The specific resource monitored by the activity log alert. It should be within one of the scopes. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a Account in storage to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` + + // A list of specific resources monitored by the activity log alert. It should be within one of the scopes. + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // The name of the resource provider monitored by the activity log alert. + ResourceProvider *string `json:"resourceProvider,omitempty" tf:"resource_provider,omitempty"` + + // A list of names of resource providers monitored by the activity log alert. + ResourceProviders []*string `json:"resourceProviders,omitempty" tf:"resource_providers,omitempty"` + + // The resource type monitored by the activity log alert. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // A list of resource types monitored by the activity log alert. + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // A block to define fine grain service health settings. + ServiceHealth *ServiceHealthInitParameters `json:"serviceHealth,omitempty" tf:"service_health,omitempty"` + + // The status of the event. For example, Started, Failed, or Succeeded. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A list of status of the event. For example, Started, Failed, or Succeeded. + Statuses []*string `json:"statuses,omitempty" tf:"statuses,omitempty"` + + // The sub status of the event. + SubStatus *string `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // A list of sub status of the event. + SubStatuses []*string `json:"subStatuses,omitempty" tf:"sub_statuses,omitempty"` +} + +type CriteriaObservation struct { + + // The email address or Azure Active Directory identifier of the user who performed the operation. + Caller *string `json:"caller,omitempty" tf:"caller,omitempty"` + + // The category of the operation. Possible values are Administrative, Autoscale, Policy, Recommendation, ResourceHealth, Security and ServiceHealth. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The severity level of the event. Possible values are Verbose, Informational, Warning, Error, and Critical. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // A list of severity level of the event. Possible values are Verbose, Informational, Warning, Error, and Critical. + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + + // The Resource Manager Role-Based Access Control operation name. Supported operation should be of the form: //. + OperationName *string `json:"operationName,omitempty" tf:"operation_name,omitempty"` + + // The recommendation category of the event. Possible values are Cost, Reliability, OperationalExcellence, HighAvailability and Performance. It is only allowed when category is Recommendation. + RecommendationCategory *string `json:"recommendationCategory,omitempty" tf:"recommendation_category,omitempty"` + + // The recommendation impact of the event. Possible values are High, Medium and Low. It is only allowed when category is Recommendation. + RecommendationImpact *string `json:"recommendationImpact,omitempty" tf:"recommendation_impact,omitempty"` + + // The recommendation type of the event. It is only allowed when category is Recommendation. + RecommendationType *string `json:"recommendationType,omitempty" tf:"recommendation_type,omitempty"` + + // The name of resource group monitored by the activity log alert. + ResourceGroup *string `json:"resourceGroup,omitempty" tf:"resource_group,omitempty"` + + // A list of names of resource groups monitored by the activity log alert. + ResourceGroups []*string `json:"resourceGroups,omitempty" tf:"resource_groups,omitempty"` + + // A block to define fine grain resource health settings. + ResourceHealth *ResourceHealthObservation `json:"resourceHealth,omitempty" tf:"resource_health,omitempty"` + + // The specific resource monitored by the activity log alert. It should be within one of the scopes. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // A list of specific resources monitored by the activity log alert. It should be within one of the scopes. + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // The name of the resource provider monitored by the activity log alert. + ResourceProvider *string `json:"resourceProvider,omitempty" tf:"resource_provider,omitempty"` + + // A list of names of resource providers monitored by the activity log alert. + ResourceProviders []*string `json:"resourceProviders,omitempty" tf:"resource_providers,omitempty"` + + // The resource type monitored by the activity log alert. + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // A list of resource types monitored by the activity log alert. + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // A block to define fine grain service health settings. + ServiceHealth *ServiceHealthObservation `json:"serviceHealth,omitempty" tf:"service_health,omitempty"` + + // The status of the event. For example, Started, Failed, or Succeeded. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A list of status of the event. For example, Started, Failed, or Succeeded. + Statuses []*string `json:"statuses,omitempty" tf:"statuses,omitempty"` + + // The sub status of the event. + SubStatus *string `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // A list of sub status of the event. + SubStatuses []*string `json:"subStatuses,omitempty" tf:"sub_statuses,omitempty"` +} + +type CriteriaParameters struct { + + // The email address or Azure Active Directory identifier of the user who performed the operation. + // +kubebuilder:validation:Optional + Caller *string `json:"caller,omitempty" tf:"caller,omitempty"` + + // The category of the operation. Possible values are Administrative, Autoscale, Policy, Recommendation, ResourceHealth, Security and ServiceHealth. + // +kubebuilder:validation:Optional + Category *string `json:"category" tf:"category,omitempty"` + + // The severity level of the event. Possible values are Verbose, Informational, Warning, Error, and Critical. + // +kubebuilder:validation:Optional + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // A list of severity level of the event. Possible values are Verbose, Informational, Warning, Error, and Critical. + // +kubebuilder:validation:Optional + Levels []*string `json:"levels,omitempty" tf:"levels,omitempty"` + + // The Resource Manager Role-Based Access Control operation name. Supported operation should be of the form: //. + // +kubebuilder:validation:Optional + OperationName *string `json:"operationName,omitempty" tf:"operation_name,omitempty"` + + // The recommendation category of the event. Possible values are Cost, Reliability, OperationalExcellence, HighAvailability and Performance. It is only allowed when category is Recommendation. + // +kubebuilder:validation:Optional + RecommendationCategory *string `json:"recommendationCategory,omitempty" tf:"recommendation_category,omitempty"` + + // The recommendation impact of the event. Possible values are High, Medium and Low. It is only allowed when category is Recommendation. + // +kubebuilder:validation:Optional + RecommendationImpact *string `json:"recommendationImpact,omitempty" tf:"recommendation_impact,omitempty"` + + // The recommendation type of the event. It is only allowed when category is Recommendation. + // +kubebuilder:validation:Optional + RecommendationType *string `json:"recommendationType,omitempty" tf:"recommendation_type,omitempty"` + + // The name of resource group monitored by the activity log alert. + // +kubebuilder:validation:Optional + ResourceGroup *string `json:"resourceGroup,omitempty" tf:"resource_group,omitempty"` + + // A list of names of resource groups monitored by the activity log alert. + // +kubebuilder:validation:Optional + ResourceGroups []*string `json:"resourceGroups,omitempty" tf:"resource_groups,omitempty"` + + // A block to define fine grain resource health settings. + // +kubebuilder:validation:Optional + ResourceHealth *ResourceHealthParameters `json:"resourceHealth,omitempty" tf:"resource_health,omitempty"` + + // The specific resource monitored by the activity log alert. It should be within one of the scopes. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // Reference to a Account in storage to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDRef *v1.Reference `json:"resourceIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate resourceId. + // +kubebuilder:validation:Optional + ResourceIDSelector *v1.Selector `json:"resourceIdSelector,omitempty" tf:"-"` + + // A list of specific resources monitored by the activity log alert. It should be within one of the scopes. + // +kubebuilder:validation:Optional + ResourceIds []*string `json:"resourceIds,omitempty" tf:"resource_ids,omitempty"` + + // The name of the resource provider monitored by the activity log alert. + // +kubebuilder:validation:Optional + ResourceProvider *string `json:"resourceProvider,omitempty" tf:"resource_provider,omitempty"` + + // A list of names of resource providers monitored by the activity log alert. + // +kubebuilder:validation:Optional + ResourceProviders []*string `json:"resourceProviders,omitempty" tf:"resource_providers,omitempty"` + + // The resource type monitored by the activity log alert. + // +kubebuilder:validation:Optional + ResourceType *string `json:"resourceType,omitempty" tf:"resource_type,omitempty"` + + // A list of resource types monitored by the activity log alert. + // +kubebuilder:validation:Optional + ResourceTypes []*string `json:"resourceTypes,omitempty" tf:"resource_types,omitempty"` + + // A block to define fine grain service health settings. + // +kubebuilder:validation:Optional + ServiceHealth *ServiceHealthParameters `json:"serviceHealth,omitempty" tf:"service_health,omitempty"` + + // The status of the event. For example, Started, Failed, or Succeeded. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // A list of status of the event. For example, Started, Failed, or Succeeded. + // +kubebuilder:validation:Optional + Statuses []*string `json:"statuses,omitempty" tf:"statuses,omitempty"` + + // The sub status of the event. + // +kubebuilder:validation:Optional + SubStatus *string `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // A list of sub status of the event. + // +kubebuilder:validation:Optional + SubStatuses []*string `json:"subStatuses,omitempty" tf:"sub_statuses,omitempty"` +} + +type MonitorActivityLogAlertInitParameters struct { + + // One or more action blocks as defined below. + Action []ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A criteria block as defined below. + Criteria *CriteriaInitParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The description of this activity log alert. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should this Activity Log Alert be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the activity log alert. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which to create the activity log alert instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The Scope at which the Activity Log should be applied. A list of strings which could be a resource group , or a subscription, or a resource ID (such as a Storage Account). + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +listType=set + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorActivityLogAlertObservation struct { + + // One or more action blocks as defined below. + Action []ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // A criteria block as defined below. + Criteria *CriteriaObservation `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The description of this activity log alert. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should this Activity Log Alert be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the activity log alert. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the activity log alert. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which to create the activity log alert instance. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The Scope at which the Activity Log should be applied. A list of strings which could be a resource group , or a subscription, or a resource ID (such as a Storage Account). + // +listType=set + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorActivityLogAlertParameters struct { + + // One or more action blocks as defined below. + // +kubebuilder:validation:Optional + Action []ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A criteria block as defined below. + // +kubebuilder:validation:Optional + Criteria *CriteriaParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The description of this activity log alert. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Should this Activity Log Alert be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the activity log alert. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which to create the activity log alert instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The Scope at which the Activity Log should be applied. A list of strings which could be a resource group , or a subscription, or a resource ID (such as a Storage Account). + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of ResourceGroup in azure to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ResourceHealthInitParameters struct { + + // The current resource health statuses that will log an alert. Possible values are Available, Degraded, Unavailable and Unknown. + // +listType=set + Current []*string `json:"current,omitempty" tf:"current,omitempty"` + + // The previous resource health statuses that will log an alert. Possible values are Available, Degraded, Unavailable and Unknown. + // +listType=set + Previous []*string `json:"previous,omitempty" tf:"previous,omitempty"` + + // The reason that will log an alert. Possible values are PlatformInitiated (such as a problem with the resource in an affected region of an Azure incident), UserInitiated (such as a shutdown request of a VM) and Unknown. + // +listType=set + Reason []*string `json:"reason,omitempty" tf:"reason,omitempty"` +} + +type ResourceHealthObservation struct { + + // The current resource health statuses that will log an alert. Possible values are Available, Degraded, Unavailable and Unknown. + // +listType=set + Current []*string `json:"current,omitempty" tf:"current,omitempty"` + + // The previous resource health statuses that will log an alert. Possible values are Available, Degraded, Unavailable and Unknown. + // +listType=set + Previous []*string `json:"previous,omitempty" tf:"previous,omitempty"` + + // The reason that will log an alert. Possible values are PlatformInitiated (such as a problem with the resource in an affected region of an Azure incident), UserInitiated (such as a shutdown request of a VM) and Unknown. + // +listType=set + Reason []*string `json:"reason,omitempty" tf:"reason,omitempty"` +} + +type ResourceHealthParameters struct { + + // The current resource health statuses that will log an alert. Possible values are Available, Degraded, Unavailable and Unknown. + // +kubebuilder:validation:Optional + // +listType=set + Current []*string `json:"current,omitempty" tf:"current,omitempty"` + + // The previous resource health statuses that will log an alert. Possible values are Available, Degraded, Unavailable and Unknown. + // +kubebuilder:validation:Optional + // +listType=set + Previous []*string `json:"previous,omitempty" tf:"previous,omitempty"` + + // The reason that will log an alert. Possible values are PlatformInitiated (such as a problem with the resource in an affected region of an Azure incident), UserInitiated (such as a shutdown request of a VM) and Unknown. + // +kubebuilder:validation:Optional + // +listType=set + Reason []*string `json:"reason,omitempty" tf:"reason,omitempty"` +} + +type ServiceHealthInitParameters struct { + + // Events this alert will monitor Possible values are Incident, Maintenance, Informational, ActionRequired and Security. + // +listType=set + Events []*string `json:"events,omitempty" tf:"events,omitempty"` + + // Locations this alert will monitor. For example, West Europe. + // +listType=set + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + + // Services this alert will monitor. For example, Activity Logs & Alerts, Action Groups. Defaults to all Services. + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` +} + +type ServiceHealthObservation struct { + + // Events this alert will monitor Possible values are Incident, Maintenance, Informational, ActionRequired and Security. + // +listType=set + Events []*string `json:"events,omitempty" tf:"events,omitempty"` + + // Locations this alert will monitor. For example, West Europe. + // +listType=set + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + + // Services this alert will monitor. For example, Activity Logs & Alerts, Action Groups. Defaults to all Services. + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` +} + +type ServiceHealthParameters struct { + + // Events this alert will monitor Possible values are Incident, Maintenance, Informational, ActionRequired and Security. + // +kubebuilder:validation:Optional + // +listType=set + Events []*string `json:"events,omitempty" tf:"events,omitempty"` + + // Locations this alert will monitor. For example, West Europe. + // +kubebuilder:validation:Optional + // +listType=set + Locations []*string `json:"locations,omitempty" tf:"locations,omitempty"` + + // Services this alert will monitor. For example, Activity Logs & Alerts, Action Groups. Defaults to all Services. + // +kubebuilder:validation:Optional + // +listType=set + Services []*string `json:"services,omitempty" tf:"services,omitempty"` +} + +// MonitorActivityLogAlertSpec defines the desired state of MonitorActivityLogAlert +type MonitorActivityLogAlertSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorActivityLogAlertParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorActivityLogAlertInitParameters `json:"initProvider,omitempty"` +} + +// MonitorActivityLogAlertStatus defines the observed state of MonitorActivityLogAlert. +type MonitorActivityLogAlertStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorActivityLogAlertObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorActivityLogAlert is the Schema for the MonitorActivityLogAlerts API. Manages an Activity Log Alert within Azure Monitor +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorActivityLogAlert struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.criteria) || (has(self.initProvider) && has(self.initProvider.criteria))",message="spec.forProvider.criteria is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec MonitorActivityLogAlertSpec `json:"spec"` + Status MonitorActivityLogAlertStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorActivityLogAlertList contains a list of MonitorActivityLogAlerts +type MonitorActivityLogAlertList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorActivityLogAlert `json:"items"` +} + +// Repository type metadata. +var ( + MonitorActivityLogAlert_Kind = "MonitorActivityLogAlert" + MonitorActivityLogAlert_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorActivityLogAlert_Kind}.String() + MonitorActivityLogAlert_KindAPIVersion = MonitorActivityLogAlert_Kind + "." + CRDGroupVersion.String() + MonitorActivityLogAlert_GroupVersionKind = CRDGroupVersion.WithKind(MonitorActivityLogAlert_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorActivityLogAlert{}, &MonitorActivityLogAlertList{}) +} diff --git a/apis/insights/v1beta2/zz_monitorautoscalesetting_terraformed.go b/apis/insights/v1beta2/zz_monitorautoscalesetting_terraformed.go new file mode 100755 index 000000000..21067cdc8 --- /dev/null +++ b/apis/insights/v1beta2/zz_monitorautoscalesetting_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorAutoscaleSetting +func (mg *MonitorAutoscaleSetting) GetTerraformResourceType() string { + return "azurerm_monitor_autoscale_setting" +} + +// GetConnectionDetailsMapping for this MonitorAutoscaleSetting +func (tr *MonitorAutoscaleSetting) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorAutoscaleSetting +func (tr *MonitorAutoscaleSetting) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorAutoscaleSetting +func (tr *MonitorAutoscaleSetting) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorAutoscaleSetting +func (tr *MonitorAutoscaleSetting) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorAutoscaleSetting +func (tr *MonitorAutoscaleSetting) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorAutoscaleSetting +func (tr *MonitorAutoscaleSetting) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorAutoscaleSetting +func (tr *MonitorAutoscaleSetting) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorAutoscaleSetting +func (tr *MonitorAutoscaleSetting) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorAutoscaleSetting using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorAutoscaleSetting) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorAutoscaleSettingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorAutoscaleSetting) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/insights/v1beta2/zz_monitorautoscalesetting_types.go b/apis/insights/v1beta2/zz_monitorautoscalesetting_types.go new file mode 100755 index 000000000..e44a74a22 --- /dev/null +++ b/apis/insights/v1beta2/zz_monitorautoscalesetting_types.go @@ -0,0 +1,791 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CapacityInitParameters struct { + + // The number of instances that are available for scaling if metrics are not available for evaluation. The default is only used if the current instance count is lower than the default. Valid values are between 0 and 1000. + Default *float64 `json:"default,omitempty" tf:"default,omitempty"` + + // The maximum number of instances for this resource. Valid values are between 0 and 1000. + Maximum *float64 `json:"maximum,omitempty" tf:"maximum,omitempty"` + + // The minimum number of instances for this resource. Valid values are between 0 and 1000. + Minimum *float64 `json:"minimum,omitempty" tf:"minimum,omitempty"` +} + +type CapacityObservation struct { + + // The number of instances that are available for scaling if metrics are not available for evaluation. The default is only used if the current instance count is lower than the default. Valid values are between 0 and 1000. + Default *float64 `json:"default,omitempty" tf:"default,omitempty"` + + // The maximum number of instances for this resource. Valid values are between 0 and 1000. + Maximum *float64 `json:"maximum,omitempty" tf:"maximum,omitempty"` + + // The minimum number of instances for this resource. Valid values are between 0 and 1000. + Minimum *float64 `json:"minimum,omitempty" tf:"minimum,omitempty"` +} + +type CapacityParameters struct { + + // The number of instances that are available for scaling if metrics are not available for evaluation. The default is only used if the current instance count is lower than the default. Valid values are between 0 and 1000. + // +kubebuilder:validation:Optional + Default *float64 `json:"default" tf:"default,omitempty"` + + // The maximum number of instances for this resource. Valid values are between 0 and 1000. + // +kubebuilder:validation:Optional + Maximum *float64 `json:"maximum" tf:"maximum,omitempty"` + + // The minimum number of instances for this resource. Valid values are between 0 and 1000. + // +kubebuilder:validation:Optional + Minimum *float64 `json:"minimum" tf:"minimum,omitempty"` +} + +type DimensionsInitParameters struct { + + // Specifies the name of the profile. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the operator used to compare the metric data and threshold. Possible values are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of dimension values. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DimensionsObservation struct { + + // Specifies the name of the profile. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the operator used to compare the metric data and threshold. Possible values are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of dimension values. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DimensionsParameters struct { + + // Specifies the name of the profile. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the operator used to compare the metric data and threshold. Possible values are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of dimension values. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type EmailInitParameters struct { + + // Specifies a list of custom email addresses to which the email notifications will be sent. + CustomEmails []*string `json:"customEmails,omitempty" tf:"custom_emails,omitempty"` + + // Should email notifications be sent to the subscription administrator? Defaults to false. + SendToSubscriptionAdministrator *bool `json:"sendToSubscriptionAdministrator,omitempty" tf:"send_to_subscription_administrator,omitempty"` + + // Should email notifications be sent to the subscription co-administrator? Defaults to false. + SendToSubscriptionCoAdministrator *bool `json:"sendToSubscriptionCoAdministrator,omitempty" tf:"send_to_subscription_co_administrator,omitempty"` +} + +type EmailObservation struct { + + // Specifies a list of custom email addresses to which the email notifications will be sent. + CustomEmails []*string `json:"customEmails,omitempty" tf:"custom_emails,omitempty"` + + // Should email notifications be sent to the subscription administrator? Defaults to false. + SendToSubscriptionAdministrator *bool `json:"sendToSubscriptionAdministrator,omitempty" tf:"send_to_subscription_administrator,omitempty"` + + // Should email notifications be sent to the subscription co-administrator? Defaults to false. + SendToSubscriptionCoAdministrator *bool `json:"sendToSubscriptionCoAdministrator,omitempty" tf:"send_to_subscription_co_administrator,omitempty"` +} + +type EmailParameters struct { + + // Specifies a list of custom email addresses to which the email notifications will be sent. + // +kubebuilder:validation:Optional + CustomEmails []*string `json:"customEmails,omitempty" tf:"custom_emails,omitempty"` + + // Should email notifications be sent to the subscription administrator? Defaults to false. + // +kubebuilder:validation:Optional + SendToSubscriptionAdministrator *bool `json:"sendToSubscriptionAdministrator,omitempty" tf:"send_to_subscription_administrator,omitempty"` + + // Should email notifications be sent to the subscription co-administrator? Defaults to false. + // +kubebuilder:validation:Optional + SendToSubscriptionCoAdministrator *bool `json:"sendToSubscriptionCoAdministrator,omitempty" tf:"send_to_subscription_co_administrator,omitempty"` +} + +type FixedDateInitParameters struct { + + // Specifies the end date for the profile, formatted as an RFC3339 date string. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // Specifies the start date for the profile, formatted as an RFC3339 date string. + Start *string `json:"start,omitempty" tf:"start,omitempty"` + + // The Time Zone used for the hours field. A list of possible values can be found here. Defaults to UTC. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type FixedDateObservation struct { + + // Specifies the end date for the profile, formatted as an RFC3339 date string. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // Specifies the start date for the profile, formatted as an RFC3339 date string. + Start *string `json:"start,omitempty" tf:"start,omitempty"` + + // The Time Zone used for the hours field. A list of possible values can be found here. Defaults to UTC. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type FixedDateParameters struct { + + // Specifies the end date for the profile, formatted as an RFC3339 date string. + // +kubebuilder:validation:Optional + End *string `json:"end" tf:"end,omitempty"` + + // Specifies the start date for the profile, formatted as an RFC3339 date string. + // +kubebuilder:validation:Optional + Start *string `json:"start" tf:"start,omitempty"` + + // The Time Zone used for the hours field. A list of possible values can be found here. Defaults to UTC. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type MetricTriggerInitParameters struct { + + // One or more dimensions block as defined below. + Dimensions []DimensionsInitParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Whether to enable metric divide by instance count. + DivideByInstanceCount *bool `json:"divideByInstanceCount,omitempty" tf:"divide_by_instance_count,omitempty"` + + // The name of the metric that defines what the rule monitors, such as Percentage CPU for Virtual Machine Scale Sets and CpuPercentage for App Service Plan. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The namespace of the metric that defines what the rule monitors, such as microsoft.compute/virtualmachinescalesets for Virtual Machine Scale Sets. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // The ID of the Resource which the Rule monitors. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachineScaleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + MetricResourceID *string `json:"metricResourceId,omitempty" tf:"metric_resource_id,omitempty"` + + // Reference to a LinuxVirtualMachineScaleSet in compute to populate metricResourceId. + // +kubebuilder:validation:Optional + MetricResourceIDRef *v1.Reference `json:"metricResourceIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachineScaleSet in compute to populate metricResourceId. + // +kubebuilder:validation:Optional + MetricResourceIDSelector *v1.Selector `json:"metricResourceIdSelector,omitempty" tf:"-"` + + // Specifies the operator used to compare the metric data and threshold. Possible values are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies how the metrics from multiple instances are combined. Possible values are Average, Max, Min and Sum. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Specifies the threshold of the metric that triggers the scale action. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // Specifies how the data that's collected should be combined over time. Possible values include Average, Count, Maximum, Minimum, Last and Total. + TimeAggregation *string `json:"timeAggregation,omitempty" tf:"time_aggregation,omitempty"` + + // Specifies the granularity of metrics that the rule monitors, which must be one of the pre-defined values returned from the metric definitions for the metric. This value must be between 1 minute and 12 hours an be formatted as an ISO 8601 string. + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // Specifies the time range for which data is collected, which must be greater than the delay in metric collection (which varies from resource to resource). This value must be between 5 minutes and 12 hours and be formatted as an ISO 8601 string. + TimeWindow *string `json:"timeWindow,omitempty" tf:"time_window,omitempty"` +} + +type MetricTriggerObservation struct { + + // One or more dimensions block as defined below. + Dimensions []DimensionsObservation `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Whether to enable metric divide by instance count. + DivideByInstanceCount *bool `json:"divideByInstanceCount,omitempty" tf:"divide_by_instance_count,omitempty"` + + // The name of the metric that defines what the rule monitors, such as Percentage CPU for Virtual Machine Scale Sets and CpuPercentage for App Service Plan. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // The namespace of the metric that defines what the rule monitors, such as microsoft.compute/virtualmachinescalesets for Virtual Machine Scale Sets. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // The ID of the Resource which the Rule monitors. + MetricResourceID *string `json:"metricResourceId,omitempty" tf:"metric_resource_id,omitempty"` + + // Specifies the operator used to compare the metric data and threshold. Possible values are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Specifies how the metrics from multiple instances are combined. Possible values are Average, Max, Min and Sum. + Statistic *string `json:"statistic,omitempty" tf:"statistic,omitempty"` + + // Specifies the threshold of the metric that triggers the scale action. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // Specifies how the data that's collected should be combined over time. Possible values include Average, Count, Maximum, Minimum, Last and Total. + TimeAggregation *string `json:"timeAggregation,omitempty" tf:"time_aggregation,omitempty"` + + // Specifies the granularity of metrics that the rule monitors, which must be one of the pre-defined values returned from the metric definitions for the metric. This value must be between 1 minute and 12 hours an be formatted as an ISO 8601 string. + TimeGrain *string `json:"timeGrain,omitempty" tf:"time_grain,omitempty"` + + // Specifies the time range for which data is collected, which must be greater than the delay in metric collection (which varies from resource to resource). This value must be between 5 minutes and 12 hours and be formatted as an ISO 8601 string. + TimeWindow *string `json:"timeWindow,omitempty" tf:"time_window,omitempty"` +} + +type MetricTriggerParameters struct { + + // One or more dimensions block as defined below. + // +kubebuilder:validation:Optional + Dimensions []DimensionsParameters `json:"dimensions,omitempty" tf:"dimensions,omitempty"` + + // Whether to enable metric divide by instance count. + // +kubebuilder:validation:Optional + DivideByInstanceCount *bool `json:"divideByInstanceCount,omitempty" tf:"divide_by_instance_count,omitempty"` + + // The name of the metric that defines what the rule monitors, such as Percentage CPU for Virtual Machine Scale Sets and CpuPercentage for App Service Plan. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // The namespace of the metric that defines what the rule monitors, such as microsoft.compute/virtualmachinescalesets for Virtual Machine Scale Sets. + // +kubebuilder:validation:Optional + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // The ID of the Resource which the Rule monitors. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachineScaleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MetricResourceID *string `json:"metricResourceId,omitempty" tf:"metric_resource_id,omitempty"` + + // Reference to a LinuxVirtualMachineScaleSet in compute to populate metricResourceId. + // +kubebuilder:validation:Optional + MetricResourceIDRef *v1.Reference `json:"metricResourceIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachineScaleSet in compute to populate metricResourceId. + // +kubebuilder:validation:Optional + MetricResourceIDSelector *v1.Selector `json:"metricResourceIdSelector,omitempty" tf:"-"` + + // Specifies the operator used to compare the metric data and threshold. Possible values are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Specifies how the metrics from multiple instances are combined. Possible values are Average, Max, Min and Sum. + // +kubebuilder:validation:Optional + Statistic *string `json:"statistic" tf:"statistic,omitempty"` + + // Specifies the threshold of the metric that triggers the scale action. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` + + // Specifies how the data that's collected should be combined over time. Possible values include Average, Count, Maximum, Minimum, Last and Total. + // +kubebuilder:validation:Optional + TimeAggregation *string `json:"timeAggregation" tf:"time_aggregation,omitempty"` + + // Specifies the granularity of metrics that the rule monitors, which must be one of the pre-defined values returned from the metric definitions for the metric. This value must be between 1 minute and 12 hours an be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + TimeGrain *string `json:"timeGrain" tf:"time_grain,omitempty"` + + // Specifies the time range for which data is collected, which must be greater than the delay in metric collection (which varies from resource to resource). This value must be between 5 minutes and 12 hours and be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + TimeWindow *string `json:"timeWindow" tf:"time_window,omitempty"` +} + +type MonitorAutoscaleSettingInitParameters struct { + + // Specifies whether automatic scaling is enabled for the target resource. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the supported Azure location where the AutoScale Setting should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the AutoScale Setting. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a notification block as defined below. + Notification *NotificationInitParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // A predictive block as defined below. + Predictive *PredictiveInitParameters `json:"predictive,omitempty" tf:"predictive,omitempty"` + + // Specifies one or more (up to 20) profile blocks as defined below. + Profile []ProfileInitParameters `json:"profile,omitempty" tf:"profile,omitempty"` + + // The name of the Resource Group in the AutoScale Setting should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the resource ID of the resource that the autoscale setting should be added to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachineScaleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // Reference to a LinuxVirtualMachineScaleSet in compute to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDRef *v1.Reference `json:"targetResourceIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachineScaleSet in compute to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` +} + +type MonitorAutoscaleSettingObservation struct { + + // Specifies whether automatic scaling is enabled for the target resource. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the AutoScale Setting. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the AutoScale Setting should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the AutoScale Setting. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a notification block as defined below. + Notification *NotificationObservation `json:"notification,omitempty" tf:"notification,omitempty"` + + // A predictive block as defined below. + Predictive *PredictiveObservation `json:"predictive,omitempty" tf:"predictive,omitempty"` + + // Specifies one or more (up to 20) profile blocks as defined below. + Profile []ProfileObservation `json:"profile,omitempty" tf:"profile,omitempty"` + + // The name of the Resource Group in the AutoScale Setting should be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the resource ID of the resource that the autoscale setting should be added to. Changing this forces a new resource to be created. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` +} + +type MonitorAutoscaleSettingParameters struct { + + // Specifies whether automatic scaling is enabled for the target resource. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the supported Azure location where the AutoScale Setting should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the AutoScale Setting. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a notification block as defined below. + // +kubebuilder:validation:Optional + Notification *NotificationParameters `json:"notification,omitempty" tf:"notification,omitempty"` + + // A predictive block as defined below. + // +kubebuilder:validation:Optional + Predictive *PredictiveParameters `json:"predictive,omitempty" tf:"predictive,omitempty"` + + // Specifies one or more (up to 20) profile blocks as defined below. + // +kubebuilder:validation:Optional + Profile []ProfileParameters `json:"profile,omitempty" tf:"profile,omitempty"` + + // The name of the Resource Group in the AutoScale Setting should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the resource ID of the resource that the autoscale setting should be added to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachineScaleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // Reference to a LinuxVirtualMachineScaleSet in compute to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDRef *v1.Reference `json:"targetResourceIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachineScaleSet in compute to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` +} + +type NotificationInitParameters struct { + + // A email block as defined below. + Email *EmailInitParameters `json:"email,omitempty" tf:"email,omitempty"` + + // One or more webhook blocks as defined below. + Webhook []WebhookInitParameters `json:"webhook,omitempty" tf:"webhook,omitempty"` +} + +type NotificationObservation struct { + + // A email block as defined below. + Email *EmailObservation `json:"email,omitempty" tf:"email,omitempty"` + + // One or more webhook blocks as defined below. + Webhook []WebhookObservation `json:"webhook,omitempty" tf:"webhook,omitempty"` +} + +type NotificationParameters struct { + + // A email block as defined below. + // +kubebuilder:validation:Optional + Email *EmailParameters `json:"email,omitempty" tf:"email,omitempty"` + + // One or more webhook blocks as defined below. + // +kubebuilder:validation:Optional + Webhook []WebhookParameters `json:"webhook,omitempty" tf:"webhook,omitempty"` +} + +type PredictiveInitParameters struct { + + // Specifies the amount of time by which instances are launched in advance. It must be between PT1M and PT1H in ISO 8601 format. + LookAheadTime *string `json:"lookAheadTime,omitempty" tf:"look_ahead_time,omitempty"` + + // Specifies the predictive scale mode. Possible values are Enabled or ForecastOnly. + ScaleMode *string `json:"scaleMode,omitempty" tf:"scale_mode,omitempty"` +} + +type PredictiveObservation struct { + + // Specifies the amount of time by which instances are launched in advance. It must be between PT1M and PT1H in ISO 8601 format. + LookAheadTime *string `json:"lookAheadTime,omitempty" tf:"look_ahead_time,omitempty"` + + // Specifies the predictive scale mode. Possible values are Enabled or ForecastOnly. + ScaleMode *string `json:"scaleMode,omitempty" tf:"scale_mode,omitempty"` +} + +type PredictiveParameters struct { + + // Specifies the amount of time by which instances are launched in advance. It must be between PT1M and PT1H in ISO 8601 format. + // +kubebuilder:validation:Optional + LookAheadTime *string `json:"lookAheadTime,omitempty" tf:"look_ahead_time,omitempty"` + + // Specifies the predictive scale mode. Possible values are Enabled or ForecastOnly. + // +kubebuilder:validation:Optional + ScaleMode *string `json:"scaleMode" tf:"scale_mode,omitempty"` +} + +type ProfileInitParameters struct { + + // A capacity block as defined below. + Capacity *CapacityInitParameters `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A fixed_date block as defined below. This cannot be specified if a recurrence block is specified. + FixedDate *FixedDateInitParameters `json:"fixedDate,omitempty" tf:"fixed_date,omitempty"` + + // Specifies the name of the profile. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A recurrence block as defined below. This cannot be specified if a fixed_date block is specified. + Recurrence *RecurrenceInitParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // One or more (up to 10) rule blocks as defined below. + Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ProfileObservation struct { + + // A capacity block as defined below. + Capacity *CapacityObservation `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // A fixed_date block as defined below. This cannot be specified if a recurrence block is specified. + FixedDate *FixedDateObservation `json:"fixedDate,omitempty" tf:"fixed_date,omitempty"` + + // Specifies the name of the profile. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A recurrence block as defined below. This cannot be specified if a fixed_date block is specified. + Recurrence *RecurrenceObservation `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // One or more (up to 10) rule blocks as defined below. + Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type ProfileParameters struct { + + // A capacity block as defined below. + // +kubebuilder:validation:Optional + Capacity *CapacityParameters `json:"capacity" tf:"capacity,omitempty"` + + // A fixed_date block as defined below. This cannot be specified if a recurrence block is specified. + // +kubebuilder:validation:Optional + FixedDate *FixedDateParameters `json:"fixedDate,omitempty" tf:"fixed_date,omitempty"` + + // Specifies the name of the profile. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A recurrence block as defined below. This cannot be specified if a fixed_date block is specified. + // +kubebuilder:validation:Optional + Recurrence *RecurrenceParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` + + // One or more (up to 10) rule blocks as defined below. + // +kubebuilder:validation:Optional + Rule []RuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type RecurrenceInitParameters struct { + + // A list of days that this profile takes effect on. Possible values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // A list containing a single item, which specifies the Hour interval at which this recurrence should be triggered (in 24-hour time). Possible values are from 0 to 23. + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // A list containing a single item which specifies the Minute interval at which this recurrence should be triggered. + Minutes []*float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // The Time Zone used for the hours field. A list of possible values can be found here. Defaults to UTC. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type RecurrenceObservation struct { + + // A list of days that this profile takes effect on. Possible values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + Days []*string `json:"days,omitempty" tf:"days,omitempty"` + + // A list containing a single item, which specifies the Hour interval at which this recurrence should be triggered (in 24-hour time). Possible values are from 0 to 23. + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // A list containing a single item which specifies the Minute interval at which this recurrence should be triggered. + Minutes []*float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // The Time Zone used for the hours field. A list of possible values can be found here. Defaults to UTC. + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type RecurrenceParameters struct { + + // A list of days that this profile takes effect on. Possible values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + // +kubebuilder:validation:Optional + Days []*string `json:"days" tf:"days,omitempty"` + + // A list containing a single item, which specifies the Hour interval at which this recurrence should be triggered (in 24-hour time). Possible values are from 0 to 23. + // +kubebuilder:validation:Optional + Hours []*float64 `json:"hours" tf:"hours,omitempty"` + + // A list containing a single item which specifies the Minute interval at which this recurrence should be triggered. + // +kubebuilder:validation:Optional + Minutes []*float64 `json:"minutes" tf:"minutes,omitempty"` + + // The Time Zone used for the hours field. A list of possible values can be found here. Defaults to UTC. + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type RuleInitParameters struct { + + // A metric_trigger block as defined below. + MetricTrigger *MetricTriggerInitParameters `json:"metricTrigger,omitempty" tf:"metric_trigger,omitempty"` + + // A scale_action block as defined below. + ScaleAction *ScaleActionInitParameters `json:"scaleAction,omitempty" tf:"scale_action,omitempty"` +} + +type RuleObservation struct { + + // A metric_trigger block as defined below. + MetricTrigger *MetricTriggerObservation `json:"metricTrigger,omitempty" tf:"metric_trigger,omitempty"` + + // A scale_action block as defined below. + ScaleAction *ScaleActionObservation `json:"scaleAction,omitempty" tf:"scale_action,omitempty"` +} + +type RuleParameters struct { + + // A metric_trigger block as defined below. + // +kubebuilder:validation:Optional + MetricTrigger *MetricTriggerParameters `json:"metricTrigger" tf:"metric_trigger,omitempty"` + + // A scale_action block as defined below. + // +kubebuilder:validation:Optional + ScaleAction *ScaleActionParameters `json:"scaleAction" tf:"scale_action,omitempty"` +} + +type ScaleActionInitParameters struct { + + // The amount of time to wait since the last scaling action before this action occurs. Must be between 1 minute and 1 week and formatted as a ISO 8601 string. + Cooldown *string `json:"cooldown,omitempty" tf:"cooldown,omitempty"` + + // The scale direction. Possible values are Increase and Decrease. + Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` + + // The type of action that should occur. Possible values are ChangeCount, ExactCount, PercentChangeCount and ServiceAllowedNextValue. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The number of instances involved in the scaling action. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ScaleActionObservation struct { + + // The amount of time to wait since the last scaling action before this action occurs. Must be between 1 minute and 1 week and formatted as a ISO 8601 string. + Cooldown *string `json:"cooldown,omitempty" tf:"cooldown,omitempty"` + + // The scale direction. Possible values are Increase and Decrease. + Direction *string `json:"direction,omitempty" tf:"direction,omitempty"` + + // The type of action that should occur. Possible values are ChangeCount, ExactCount, PercentChangeCount and ServiceAllowedNextValue. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The number of instances involved in the scaling action. + Value *float64 `json:"value,omitempty" tf:"value,omitempty"` +} + +type ScaleActionParameters struct { + + // The amount of time to wait since the last scaling action before this action occurs. Must be between 1 minute and 1 week and formatted as a ISO 8601 string. + // +kubebuilder:validation:Optional + Cooldown *string `json:"cooldown" tf:"cooldown,omitempty"` + + // The scale direction. Possible values are Increase and Decrease. + // +kubebuilder:validation:Optional + Direction *string `json:"direction" tf:"direction,omitempty"` + + // The type of action that should occur. Possible values are ChangeCount, ExactCount, PercentChangeCount and ServiceAllowedNextValue. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The number of instances involved in the scaling action. + // +kubebuilder:validation:Optional + Value *float64 `json:"value" tf:"value,omitempty"` +} + +type WebhookInitParameters struct { + + // A map of settings. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HTTPS URI which should receive scale notifications. + ServiceURI *string `json:"serviceUri,omitempty" tf:"service_uri,omitempty"` +} + +type WebhookObservation struct { + + // A map of settings. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HTTPS URI which should receive scale notifications. + ServiceURI *string `json:"serviceUri,omitempty" tf:"service_uri,omitempty"` +} + +type WebhookParameters struct { + + // A map of settings. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HTTPS URI which should receive scale notifications. + // +kubebuilder:validation:Optional + ServiceURI *string `json:"serviceUri" tf:"service_uri,omitempty"` +} + +// MonitorAutoscaleSettingSpec defines the desired state of MonitorAutoscaleSetting +type MonitorAutoscaleSettingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorAutoscaleSettingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorAutoscaleSettingInitParameters `json:"initProvider,omitempty"` +} + +// MonitorAutoscaleSettingStatus defines the observed state of MonitorAutoscaleSetting. +type MonitorAutoscaleSettingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorAutoscaleSettingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorAutoscaleSetting is the Schema for the MonitorAutoscaleSettings API. Manages an AutoScale Setting which can be applied to Virtual Machine Scale Sets, App Services and other scalable resources. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorAutoscaleSetting struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.profile) || (has(self.initProvider) && has(self.initProvider.profile))",message="spec.forProvider.profile is a required parameter" + Spec MonitorAutoscaleSettingSpec `json:"spec"` + Status MonitorAutoscaleSettingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorAutoscaleSettingList contains a list of MonitorAutoscaleSettings +type MonitorAutoscaleSettingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorAutoscaleSetting `json:"items"` +} + +// Repository type metadata. +var ( + MonitorAutoscaleSetting_Kind = "MonitorAutoscaleSetting" + MonitorAutoscaleSetting_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorAutoscaleSetting_Kind}.String() + MonitorAutoscaleSetting_KindAPIVersion = MonitorAutoscaleSetting_Kind + "." + CRDGroupVersion.String() + MonitorAutoscaleSetting_GroupVersionKind = CRDGroupVersion.WithKind(MonitorAutoscaleSetting_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorAutoscaleSetting{}, &MonitorAutoscaleSettingList{}) +} diff --git a/apis/insights/v1beta2/zz_monitordatacollectionrule_terraformed.go b/apis/insights/v1beta2/zz_monitordatacollectionrule_terraformed.go new file mode 100755 index 000000000..97fa525d6 --- /dev/null +++ b/apis/insights/v1beta2/zz_monitordatacollectionrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorDataCollectionRule +func (mg *MonitorDataCollectionRule) GetTerraformResourceType() string { + return "azurerm_monitor_data_collection_rule" +} + +// GetConnectionDetailsMapping for this MonitorDataCollectionRule +func (tr *MonitorDataCollectionRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorDataCollectionRule +func (tr *MonitorDataCollectionRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorDataCollectionRule +func (tr *MonitorDataCollectionRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorDataCollectionRule +func (tr *MonitorDataCollectionRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorDataCollectionRule +func (tr *MonitorDataCollectionRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorDataCollectionRule +func (tr *MonitorDataCollectionRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorDataCollectionRule +func (tr *MonitorDataCollectionRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorDataCollectionRule +func (tr *MonitorDataCollectionRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorDataCollectionRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorDataCollectionRule) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorDataCollectionRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorDataCollectionRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/insights/v1beta2/zz_monitordatacollectionrule_types.go b/apis/insights/v1beta2/zz_monitordatacollectionrule_types.go new file mode 100755 index 000000000..81071f66f --- /dev/null +++ b/apis/insights/v1beta2/zz_monitordatacollectionrule_types.go @@ -0,0 +1,1430 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AzureMonitorMetricsInitParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AzureMonitorMetricsObservation struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AzureMonitorMetricsParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type ColumnInitParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Collection Rule. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnObservation struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Collection Rule. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ColumnParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Collection Rule. Possible values are SystemAssigned and UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type DataFlowInitParameters struct { + + // The built-in transform to transform stream data. + BuiltInTransform *string `json:"builtInTransform,omitempty" tf:"built_in_transform,omitempty"` + + // Specifies a list of destination names. A azure_monitor_metrics data source only allows for stream of kind Microsoft-InsightsMetrics. + Destinations []*string `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // The output stream of the transform. Only required if the data flow changes data to a different stream. + OutputStream *string `json:"outputStream,omitempty" tf:"output_stream,omitempty"` + + // Specifies a list of streams. Possible values include but not limited to Microsoft-Event, Microsoft-InsightsMetrics, Microsoft-Perf, Microsoft-Syslog, Microsoft-WindowsEvent, and Microsoft-PrometheusMetrics. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` + + // The KQL query to transform stream data. + TransformKql *string `json:"transformKql,omitempty" tf:"transform_kql,omitempty"` +} + +type DataFlowObservation struct { + + // The built-in transform to transform stream data. + BuiltInTransform *string `json:"builtInTransform,omitempty" tf:"built_in_transform,omitempty"` + + // Specifies a list of destination names. A azure_monitor_metrics data source only allows for stream of kind Microsoft-InsightsMetrics. + Destinations []*string `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // The output stream of the transform. Only required if the data flow changes data to a different stream. + OutputStream *string `json:"outputStream,omitempty" tf:"output_stream,omitempty"` + + // Specifies a list of streams. Possible values include but not limited to Microsoft-Event, Microsoft-InsightsMetrics, Microsoft-Perf, Microsoft-Syslog, Microsoft-WindowsEvent, and Microsoft-PrometheusMetrics. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` + + // The KQL query to transform stream data. + TransformKql *string `json:"transformKql,omitempty" tf:"transform_kql,omitempty"` +} + +type DataFlowParameters struct { + + // The built-in transform to transform stream data. + // +kubebuilder:validation:Optional + BuiltInTransform *string `json:"builtInTransform,omitempty" tf:"built_in_transform,omitempty"` + + // Specifies a list of destination names. A azure_monitor_metrics data source only allows for stream of kind Microsoft-InsightsMetrics. + // +kubebuilder:validation:Optional + Destinations []*string `json:"destinations" tf:"destinations,omitempty"` + + // The output stream of the transform. Only required if the data flow changes data to a different stream. + // +kubebuilder:validation:Optional + OutputStream *string `json:"outputStream,omitempty" tf:"output_stream,omitempty"` + + // Specifies a list of streams. Possible values include but not limited to Microsoft-Event, Microsoft-InsightsMetrics, Microsoft-Perf, Microsoft-Syslog, Microsoft-WindowsEvent, and Microsoft-PrometheusMetrics. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` + + // The KQL query to transform stream data. + // +kubebuilder:validation:Optional + TransformKql *string `json:"transformKql,omitempty" tf:"transform_kql,omitempty"` +} + +type DataImportInitParameters struct { + + // An event_hub_data_source block as defined below. + EventHubDataSource []EventHubDataSourceInitParameters `json:"eventHubDataSource,omitempty" tf:"event_hub_data_source,omitempty"` +} + +type DataImportObservation struct { + + // An event_hub_data_source block as defined below. + EventHubDataSource []EventHubDataSourceObservation `json:"eventHubDataSource,omitempty" tf:"event_hub_data_source,omitempty"` +} + +type DataImportParameters struct { + + // An event_hub_data_source block as defined below. + // +kubebuilder:validation:Optional + EventHubDataSource []EventHubDataSourceParameters `json:"eventHubDataSource" tf:"event_hub_data_source,omitempty"` +} + +type DataSourcesInitParameters struct { + + // A data_import block as defined above. + DataImport *DataImportInitParameters `json:"dataImport,omitempty" tf:"data_import,omitempty"` + + // One or more extension blocks as defined below. + Extension []ExtensionInitParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // One or more iis_log blocks as defined below. + IisLog []IisLogInitParameters `json:"iisLog,omitempty" tf:"iis_log,omitempty"` + + // One or more log_file blocks as defined below. + LogFile []LogFileInitParameters `json:"logFile,omitempty" tf:"log_file,omitempty"` + + // One or more performance_counter blocks as defined below. + PerformanceCounter []PerformanceCounterInitParameters `json:"performanceCounter,omitempty" tf:"performance_counter,omitempty"` + + // One or more platform_telemetry blocks as defined below. + PlatformTelemetry []PlatformTelemetryInitParameters `json:"platformTelemetry,omitempty" tf:"platform_telemetry,omitempty"` + + // One or more prometheus_forwarder blocks as defined below. + PrometheusForwarder []PrometheusForwarderInitParameters `json:"prometheusForwarder,omitempty" tf:"prometheus_forwarder,omitempty"` + + // One or more syslog blocks as defined below. + Syslog []SyslogInitParameters `json:"syslog,omitempty" tf:"syslog,omitempty"` + + // One or more windows_event_log blocks as defined below. + WindowsEventLog []WindowsEventLogInitParameters `json:"windowsEventLog,omitempty" tf:"windows_event_log,omitempty"` + + // One or more windows_firewall_log blocks as defined below. + WindowsFirewallLog []WindowsFirewallLogInitParameters `json:"windowsFirewallLog,omitempty" tf:"windows_firewall_log,omitempty"` +} + +type DataSourcesObservation struct { + + // A data_import block as defined above. + DataImport *DataImportObservation `json:"dataImport,omitempty" tf:"data_import,omitempty"` + + // One or more extension blocks as defined below. + Extension []ExtensionObservation `json:"extension,omitempty" tf:"extension,omitempty"` + + // One or more iis_log blocks as defined below. + IisLog []IisLogObservation `json:"iisLog,omitempty" tf:"iis_log,omitempty"` + + // One or more log_file blocks as defined below. + LogFile []LogFileObservation `json:"logFile,omitempty" tf:"log_file,omitempty"` + + // One or more performance_counter blocks as defined below. + PerformanceCounter []PerformanceCounterObservation `json:"performanceCounter,omitempty" tf:"performance_counter,omitempty"` + + // One or more platform_telemetry blocks as defined below. + PlatformTelemetry []PlatformTelemetryObservation `json:"platformTelemetry,omitempty" tf:"platform_telemetry,omitempty"` + + // One or more prometheus_forwarder blocks as defined below. + PrometheusForwarder []PrometheusForwarderObservation `json:"prometheusForwarder,omitempty" tf:"prometheus_forwarder,omitempty"` + + // One or more syslog blocks as defined below. + Syslog []SyslogObservation `json:"syslog,omitempty" tf:"syslog,omitempty"` + + // One or more windows_event_log blocks as defined below. + WindowsEventLog []WindowsEventLogObservation `json:"windowsEventLog,omitempty" tf:"windows_event_log,omitempty"` + + // One or more windows_firewall_log blocks as defined below. + WindowsFirewallLog []WindowsFirewallLogObservation `json:"windowsFirewallLog,omitempty" tf:"windows_firewall_log,omitempty"` +} + +type DataSourcesParameters struct { + + // A data_import block as defined above. + // +kubebuilder:validation:Optional + DataImport *DataImportParameters `json:"dataImport,omitempty" tf:"data_import,omitempty"` + + // One or more extension blocks as defined below. + // +kubebuilder:validation:Optional + Extension []ExtensionParameters `json:"extension,omitempty" tf:"extension,omitempty"` + + // One or more iis_log blocks as defined below. + // +kubebuilder:validation:Optional + IisLog []IisLogParameters `json:"iisLog,omitempty" tf:"iis_log,omitempty"` + + // One or more log_file blocks as defined below. + // +kubebuilder:validation:Optional + LogFile []LogFileParameters `json:"logFile,omitempty" tf:"log_file,omitempty"` + + // One or more performance_counter blocks as defined below. + // +kubebuilder:validation:Optional + PerformanceCounter []PerformanceCounterParameters `json:"performanceCounter,omitempty" tf:"performance_counter,omitempty"` + + // One or more platform_telemetry blocks as defined below. + // +kubebuilder:validation:Optional + PlatformTelemetry []PlatformTelemetryParameters `json:"platformTelemetry,omitempty" tf:"platform_telemetry,omitempty"` + + // One or more prometheus_forwarder blocks as defined below. + // +kubebuilder:validation:Optional + PrometheusForwarder []PrometheusForwarderParameters `json:"prometheusForwarder,omitempty" tf:"prometheus_forwarder,omitempty"` + + // One or more syslog blocks as defined below. + // +kubebuilder:validation:Optional + Syslog []SyslogParameters `json:"syslog,omitempty" tf:"syslog,omitempty"` + + // One or more windows_event_log blocks as defined below. + // +kubebuilder:validation:Optional + WindowsEventLog []WindowsEventLogParameters `json:"windowsEventLog,omitempty" tf:"windows_event_log,omitempty"` + + // One or more windows_firewall_log blocks as defined below. + // +kubebuilder:validation:Optional + WindowsFirewallLog []WindowsFirewallLogParameters `json:"windowsFirewallLog,omitempty" tf:"windows_firewall_log,omitempty"` +} + +type DestinationsInitParameters struct { + + // A azure_monitor_metrics block as defined above. + AzureMonitorMetrics *AzureMonitorMetricsInitParameters `json:"azureMonitorMetrics,omitempty" tf:"azure_monitor_metrics,omitempty"` + + // One or more event_hub blocks as defined below. + EventHub *EventHubInitParameters `json:"eventHub,omitempty" tf:"event_hub,omitempty"` + + // One or more event_hub blocks as defined below. + EventHubDirect *EventHubDirectInitParameters `json:"eventHubDirect,omitempty" tf:"event_hub_direct,omitempty"` + + // One or more log_analytics blocks as defined below. + LogAnalytics []LogAnalyticsInitParameters `json:"logAnalytics,omitempty" tf:"log_analytics,omitempty"` + + // One or more monitor_account blocks as defined below. + MonitorAccount []MonitorAccountInitParameters `json:"monitorAccount,omitempty" tf:"monitor_account,omitempty"` + + // One or more storage_blob blocks as defined below. + StorageBlob []StorageBlobInitParameters `json:"storageBlob,omitempty" tf:"storage_blob,omitempty"` + + // One or more storage_blob_direct blocks as defined below. + StorageBlobDirect []StorageBlobDirectInitParameters `json:"storageBlobDirect,omitempty" tf:"storage_blob_direct,omitempty"` + + // One or more storage_table_direct blocks as defined below. + StorageTableDirect []StorageTableDirectInitParameters `json:"storageTableDirect,omitempty" tf:"storage_table_direct,omitempty"` +} + +type DestinationsObservation struct { + + // A azure_monitor_metrics block as defined above. + AzureMonitorMetrics *AzureMonitorMetricsObservation `json:"azureMonitorMetrics,omitempty" tf:"azure_monitor_metrics,omitempty"` + + // One or more event_hub blocks as defined below. + EventHub *EventHubObservation `json:"eventHub,omitempty" tf:"event_hub,omitempty"` + + // One or more event_hub blocks as defined below. + EventHubDirect *EventHubDirectObservation `json:"eventHubDirect,omitempty" tf:"event_hub_direct,omitempty"` + + // One or more log_analytics blocks as defined below. + LogAnalytics []LogAnalyticsObservation `json:"logAnalytics,omitempty" tf:"log_analytics,omitempty"` + + // One or more monitor_account blocks as defined below. + MonitorAccount []MonitorAccountObservation `json:"monitorAccount,omitempty" tf:"monitor_account,omitempty"` + + // One or more storage_blob blocks as defined below. + StorageBlob []StorageBlobObservation `json:"storageBlob,omitempty" tf:"storage_blob,omitempty"` + + // One or more storage_blob_direct blocks as defined below. + StorageBlobDirect []StorageBlobDirectObservation `json:"storageBlobDirect,omitempty" tf:"storage_blob_direct,omitempty"` + + // One or more storage_table_direct blocks as defined below. + StorageTableDirect []StorageTableDirectObservation `json:"storageTableDirect,omitempty" tf:"storage_table_direct,omitempty"` +} + +type DestinationsParameters struct { + + // A azure_monitor_metrics block as defined above. + // +kubebuilder:validation:Optional + AzureMonitorMetrics *AzureMonitorMetricsParameters `json:"azureMonitorMetrics,omitempty" tf:"azure_monitor_metrics,omitempty"` + + // One or more event_hub blocks as defined below. + // +kubebuilder:validation:Optional + EventHub *EventHubParameters `json:"eventHub,omitempty" tf:"event_hub,omitempty"` + + // One or more event_hub blocks as defined below. + // +kubebuilder:validation:Optional + EventHubDirect *EventHubDirectParameters `json:"eventHubDirect,omitempty" tf:"event_hub_direct,omitempty"` + + // One or more log_analytics blocks as defined below. + // +kubebuilder:validation:Optional + LogAnalytics []LogAnalyticsParameters `json:"logAnalytics,omitempty" tf:"log_analytics,omitempty"` + + // One or more monitor_account blocks as defined below. + // +kubebuilder:validation:Optional + MonitorAccount []MonitorAccountParameters `json:"monitorAccount,omitempty" tf:"monitor_account,omitempty"` + + // One or more storage_blob blocks as defined below. + // +kubebuilder:validation:Optional + StorageBlob []StorageBlobParameters `json:"storageBlob,omitempty" tf:"storage_blob,omitempty"` + + // One or more storage_blob_direct blocks as defined below. + // +kubebuilder:validation:Optional + StorageBlobDirect []StorageBlobDirectParameters `json:"storageBlobDirect,omitempty" tf:"storage_blob_direct,omitempty"` + + // One or more storage_table_direct blocks as defined below. + // +kubebuilder:validation:Optional + StorageTableDirect []StorageTableDirectParameters `json:"storageTableDirect,omitempty" tf:"storage_table_direct,omitempty"` +} + +type EventHubDataSourceInitParameters struct { + + // The Event Hub consumer group name. + ConsumerGroup *string `json:"consumerGroup,omitempty" tf:"consumer_group,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The stream to collect from Event Hub. Possible value should be a custom stream name. + Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` +} + +type EventHubDataSourceObservation struct { + + // The Event Hub consumer group name. + ConsumerGroup *string `json:"consumerGroup,omitempty" tf:"consumer_group,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The stream to collect from Event Hub. Possible value should be a custom stream name. + Stream *string `json:"stream,omitempty" tf:"stream,omitempty"` +} + +type EventHubDataSourceParameters struct { + + // The Event Hub consumer group name. + // +kubebuilder:validation:Optional + ConsumerGroup *string `json:"consumerGroup,omitempty" tf:"consumer_group,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The stream to collect from Event Hub. Possible value should be a custom stream name. + // +kubebuilder:validation:Optional + Stream *string `json:"stream" tf:"stream,omitempty"` +} + +type EventHubDirectInitParameters struct { + + // The resource ID of the Event Hub. + EventHubID *string `json:"eventHubId,omitempty" tf:"event_hub_id,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type EventHubDirectObservation struct { + + // The resource ID of the Event Hub. + EventHubID *string `json:"eventHubId,omitempty" tf:"event_hub_id,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type EventHubDirectParameters struct { + + // The resource ID of the Event Hub. + // +kubebuilder:validation:Optional + EventHubID *string `json:"eventHubId" tf:"event_hub_id,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type EventHubInitParameters struct { + + // The resource ID of the Event Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + EventHubID *string `json:"eventHubId,omitempty" tf:"event_hub_id,omitempty"` + + // Reference to a EventHub in eventhub to populate eventHubId. + // +kubebuilder:validation:Optional + EventHubIDRef *v1.Reference `json:"eventHubIdRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventHubId. + // +kubebuilder:validation:Optional + EventHubIDSelector *v1.Selector `json:"eventHubIdSelector,omitempty" tf:"-"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type EventHubObservation struct { + + // The resource ID of the Event Hub. + EventHubID *string `json:"eventHubId,omitempty" tf:"event_hub_id,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type EventHubParameters struct { + + // The resource ID of the Event Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + EventHubID *string `json:"eventHubId,omitempty" tf:"event_hub_id,omitempty"` + + // Reference to a EventHub in eventhub to populate eventHubId. + // +kubebuilder:validation:Optional + EventHubIDRef *v1.Reference `json:"eventHubIdRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventHubId. + // +kubebuilder:validation:Optional + EventHubIDSelector *v1.Selector `json:"eventHubIdSelector,omitempty" tf:"-"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type ExtensionInitParameters struct { + + // A JSON String which specifies the extension setting. + ExtensionJSON *string `json:"extensionJson,omitempty" tf:"extension_json,omitempty"` + + // The name of the VM extension. + ExtensionName *string `json:"extensionName,omitempty" tf:"extension_name,omitempty"` + + // Specifies a list of data sources this extension needs data from. An item should be a name of a supported data source which produces only one stream. Supported data sources type: performance_counter, windows_event_log,and syslog. + InputDataSources []*string `json:"inputDataSources,omitempty" tf:"input_data_sources,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type ExtensionObservation struct { + + // A JSON String which specifies the extension setting. + ExtensionJSON *string `json:"extensionJson,omitempty" tf:"extension_json,omitempty"` + + // The name of the VM extension. + ExtensionName *string `json:"extensionName,omitempty" tf:"extension_name,omitempty"` + + // Specifies a list of data sources this extension needs data from. An item should be a name of a supported data source which produces only one stream. Supported data sources type: performance_counter, windows_event_log,and syslog. + InputDataSources []*string `json:"inputDataSources,omitempty" tf:"input_data_sources,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type ExtensionParameters struct { + + // A JSON String which specifies the extension setting. + // +kubebuilder:validation:Optional + ExtensionJSON *string `json:"extensionJson,omitempty" tf:"extension_json,omitempty"` + + // The name of the VM extension. + // +kubebuilder:validation:Optional + ExtensionName *string `json:"extensionName" tf:"extension_name,omitempty"` + + // Specifies a list of data sources this extension needs data from. An item should be a name of a supported data source which produces only one stream. Supported data sources type: performance_counter, windows_event_log,and syslog. + // +kubebuilder:validation:Optional + InputDataSources []*string `json:"inputDataSources,omitempty" tf:"input_data_sources,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` +} + +type IisLogInitParameters struct { + + // Specifies a list of absolute paths where the log files are located. + LogDirectories []*string `json:"logDirectories,omitempty" tf:"log_directories,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type IisLogObservation struct { + + // Specifies a list of absolute paths where the log files are located. + LogDirectories []*string `json:"logDirectories,omitempty" tf:"log_directories,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type IisLogParameters struct { + + // Specifies a list of absolute paths where the log files are located. + // +kubebuilder:validation:Optional + LogDirectories []*string `json:"logDirectories,omitempty" tf:"log_directories,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` +} + +type LabelIncludeFilterInitParameters struct { + + // The label of the filter. This label should be unique across all label_include_fileter block. Possible value is microsoft_metrics_include_label. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The value of the filter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type LabelIncludeFilterObservation struct { + + // The label of the filter. This label should be unique across all label_include_fileter block. Possible value is microsoft_metrics_include_label. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The value of the filter. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type LabelIncludeFilterParameters struct { + + // The label of the filter. This label should be unique across all label_include_fileter block. Possible value is microsoft_metrics_include_label. + // +kubebuilder:validation:Optional + Label *string `json:"label" tf:"label,omitempty"` + + // The value of the filter. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type LogAnalyticsInitParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of a Log Analytic Workspace resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDRef *v1.Reference `json:"workspaceResourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDSelector *v1.Selector `json:"workspaceResourceIdSelector,omitempty" tf:"-"` +} + +type LogAnalyticsObservation struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of a Log Analytic Workspace resource. + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` +} + +type LogAnalyticsParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of a Log Analytic Workspace resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDRef *v1.Reference `json:"workspaceResourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDSelector *v1.Selector `json:"workspaceResourceIdSelector,omitempty" tf:"-"` +} + +type LogFileInitParameters struct { + + // Specifies a list of file patterns where the log files are located. For example, C:\\JavaLogs\\*.log. + FilePatterns []*string `json:"filePatterns,omitempty" tf:"file_patterns,omitempty"` + + // The data format of the log files. possible value is text. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A settings block as defined below. + Settings *SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type LogFileObservation struct { + + // Specifies a list of file patterns where the log files are located. For example, C:\\JavaLogs\\*.log. + FilePatterns []*string `json:"filePatterns,omitempty" tf:"file_patterns,omitempty"` + + // The data format of the log files. possible value is text. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A settings block as defined below. + Settings *SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type LogFileParameters struct { + + // Specifies a list of file patterns where the log files are located. For example, C:\\JavaLogs\\*.log. + // +kubebuilder:validation:Optional + FilePatterns []*string `json:"filePatterns" tf:"file_patterns,omitempty"` + + // The data format of the log files. possible value is text. + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A settings block as defined below. + // +kubebuilder:validation:Optional + Settings *SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` +} + +type MonitorAccountInitParameters struct { + + // The resource ID of the Monitor Account. + MonitorAccountID *string `json:"monitorAccountId,omitempty" tf:"monitor_account_id,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MonitorAccountObservation struct { + + // The resource ID of the Monitor Account. + MonitorAccountID *string `json:"monitorAccountId,omitempty" tf:"monitor_account_id,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type MonitorAccountParameters struct { + + // The resource ID of the Monitor Account. + // +kubebuilder:validation:Optional + MonitorAccountID *string `json:"monitorAccountId" tf:"monitor_account_id,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type MonitorDataCollectionRuleIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Data Collection Rule. Currently, up to 1 identity is supported. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Collection Rule. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MonitorDataCollectionRuleIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Data Collection Rule. Currently, up to 1 identity is supported. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Collection Rule. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MonitorDataCollectionRuleIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Data Collection Rule. Currently, up to 1 identity is supported. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Data Collection Rule. Possible values are SystemAssigned and UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MonitorDataCollectionRuleInitParameters struct { + + // The resource ID of the Data Collection Endpoint that this rule can be used with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.MonitorDataCollectionEndpoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DataCollectionEndpointID *string `json:"dataCollectionEndpointId,omitempty" tf:"data_collection_endpoint_id,omitempty"` + + // Reference to a MonitorDataCollectionEndpoint in insights to populate dataCollectionEndpointId. + // +kubebuilder:validation:Optional + DataCollectionEndpointIDRef *v1.Reference `json:"dataCollectionEndpointIdRef,omitempty" tf:"-"` + + // Selector for a MonitorDataCollectionEndpoint in insights to populate dataCollectionEndpointId. + // +kubebuilder:validation:Optional + DataCollectionEndpointIDSelector *v1.Selector `json:"dataCollectionEndpointIdSelector,omitempty" tf:"-"` + + // One or more data_flow blocks as defined below. + DataFlow []DataFlowInitParameters `json:"dataFlow,omitempty" tf:"data_flow,omitempty"` + + // A data_sources block as defined below. This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint. + DataSources *DataSourcesInitParameters `json:"dataSources,omitempty" tf:"data_sources,omitempty"` + + // The description of the Data Collection Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A destinations block as defined below. + Destinations *DestinationsInitParameters `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // An identity block as defined below. + Identity *MonitorDataCollectionRuleIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The kind of the Data Collection Rule. Possible values are Linux, Windows, AgentDirectToStore and WorkspaceTransforms. A rule of kind Linux does not allow for windows_event_log data sources. And a rule of kind Windows does not allow for syslog data sources. If kind is not specified, all kinds of data sources are allowed. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The Azure Region where the Data Collection Rule should exist. Changing this forces a new Data Collection Rule to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A stream_declaration block as defined below. + StreamDeclaration []StreamDeclarationInitParameters `json:"streamDeclaration,omitempty" tf:"stream_declaration,omitempty"` + + // A mapping of tags which should be assigned to the Data Collection Rule. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorDataCollectionRuleObservation struct { + + // The resource ID of the Data Collection Endpoint that this rule can be used with. + DataCollectionEndpointID *string `json:"dataCollectionEndpointId,omitempty" tf:"data_collection_endpoint_id,omitempty"` + + // One or more data_flow blocks as defined below. + DataFlow []DataFlowObservation `json:"dataFlow,omitempty" tf:"data_flow,omitempty"` + + // A data_sources block as defined below. This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint. + DataSources *DataSourcesObservation `json:"dataSources,omitempty" tf:"data_sources,omitempty"` + + // The description of the Data Collection Rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A destinations block as defined below. + Destinations *DestinationsObservation `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // The ID of the Data Collection Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *MonitorDataCollectionRuleIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The immutable ID of the Data Collection Rule. + ImmutableID *string `json:"immutableId,omitempty" tf:"immutable_id,omitempty"` + + // The kind of the Data Collection Rule. Possible values are Linux, Windows, AgentDirectToStore and WorkspaceTransforms. A rule of kind Linux does not allow for windows_event_log data sources. And a rule of kind Windows does not allow for syslog data sources. If kind is not specified, all kinds of data sources are allowed. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The Azure Region where the Data Collection Rule should exist. Changing this forces a new Data Collection Rule to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Data Collection Rule should exist. Changing this forces a new Data Collection Rule to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A stream_declaration block as defined below. + StreamDeclaration []StreamDeclarationObservation `json:"streamDeclaration,omitempty" tf:"stream_declaration,omitempty"` + + // A mapping of tags which should be assigned to the Data Collection Rule. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorDataCollectionRuleParameters struct { + + // The resource ID of the Data Collection Endpoint that this rule can be used with. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.MonitorDataCollectionEndpoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataCollectionEndpointID *string `json:"dataCollectionEndpointId,omitempty" tf:"data_collection_endpoint_id,omitempty"` + + // Reference to a MonitorDataCollectionEndpoint in insights to populate dataCollectionEndpointId. + // +kubebuilder:validation:Optional + DataCollectionEndpointIDRef *v1.Reference `json:"dataCollectionEndpointIdRef,omitempty" tf:"-"` + + // Selector for a MonitorDataCollectionEndpoint in insights to populate dataCollectionEndpointId. + // +kubebuilder:validation:Optional + DataCollectionEndpointIDSelector *v1.Selector `json:"dataCollectionEndpointIdSelector,omitempty" tf:"-"` + + // One or more data_flow blocks as defined below. + // +kubebuilder:validation:Optional + DataFlow []DataFlowParameters `json:"dataFlow,omitempty" tf:"data_flow,omitempty"` + + // A data_sources block as defined below. This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint. + // +kubebuilder:validation:Optional + DataSources *DataSourcesParameters `json:"dataSources,omitempty" tf:"data_sources,omitempty"` + + // The description of the Data Collection Rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A destinations block as defined below. + // +kubebuilder:validation:Optional + Destinations *DestinationsParameters `json:"destinations,omitempty" tf:"destinations,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *MonitorDataCollectionRuleIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The kind of the Data Collection Rule. Possible values are Linux, Windows, AgentDirectToStore and WorkspaceTransforms. A rule of kind Linux does not allow for windows_event_log data sources. And a rule of kind Windows does not allow for syslog data sources. If kind is not specified, all kinds of data sources are allowed. + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The Azure Region where the Data Collection Rule should exist. Changing this forces a new Data Collection Rule to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Data Collection Rule should exist. Changing this forces a new Data Collection Rule to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A stream_declaration block as defined below. + // +kubebuilder:validation:Optional + StreamDeclaration []StreamDeclarationParameters `json:"streamDeclaration,omitempty" tf:"stream_declaration,omitempty"` + + // A mapping of tags which should be assigned to the Data Collection Rule. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PerformanceCounterInitParameters struct { + + // Specifies a list of specifier names of the performance counters you want to collect. To get a list of performance counters on Windows, run the command typeperf. Please see this document for more information. + CounterSpecifiers []*string `json:"counterSpecifiers,omitempty" tf:"counter_specifiers,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The number of seconds between consecutive counter measurements (samples). The value should be integer between 1 and 300 inclusive. sampling_frequency_in_seconds must be equal to 60 seconds for counters collected with Microsoft-InsightsMetrics stream. + SamplingFrequencyInSeconds *float64 `json:"samplingFrequencyInSeconds,omitempty" tf:"sampling_frequency_in_seconds,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type PerformanceCounterObservation struct { + + // Specifies a list of specifier names of the performance counters you want to collect. To get a list of performance counters on Windows, run the command typeperf. Please see this document for more information. + CounterSpecifiers []*string `json:"counterSpecifiers,omitempty" tf:"counter_specifiers,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The number of seconds between consecutive counter measurements (samples). The value should be integer between 1 and 300 inclusive. sampling_frequency_in_seconds must be equal to 60 seconds for counters collected with Microsoft-InsightsMetrics stream. + SamplingFrequencyInSeconds *float64 `json:"samplingFrequencyInSeconds,omitempty" tf:"sampling_frequency_in_seconds,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type PerformanceCounterParameters struct { + + // Specifies a list of specifier names of the performance counters you want to collect. To get a list of performance counters on Windows, run the command typeperf. Please see this document for more information. + // +kubebuilder:validation:Optional + CounterSpecifiers []*string `json:"counterSpecifiers" tf:"counter_specifiers,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The number of seconds between consecutive counter measurements (samples). The value should be integer between 1 and 300 inclusive. sampling_frequency_in_seconds must be equal to 60 seconds for counters collected with Microsoft-InsightsMetrics stream. + // +kubebuilder:validation:Optional + SamplingFrequencyInSeconds *float64 `json:"samplingFrequencyInSeconds" tf:"sampling_frequency_in_seconds,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` +} + +type PlatformTelemetryInitParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type PlatformTelemetryObservation struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type PlatformTelemetryParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` +} + +type PrometheusForwarderInitParameters struct { + + // One or more label_include_filter blocks as defined above. + LabelIncludeFilter []LabelIncludeFilterInitParameters `json:"labelIncludeFilter,omitempty" tf:"label_include_filter,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type PrometheusForwarderObservation struct { + + // One or more label_include_filter blocks as defined above. + LabelIncludeFilter []LabelIncludeFilterObservation `json:"labelIncludeFilter,omitempty" tf:"label_include_filter,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type PrometheusForwarderParameters struct { + + // One or more label_include_filter blocks as defined above. + // +kubebuilder:validation:Optional + LabelIncludeFilter []LabelIncludeFilterParameters `json:"labelIncludeFilter,omitempty" tf:"label_include_filter,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` +} + +type SettingsInitParameters struct { + + // A text block as defined below. + Text *TextInitParameters `json:"text,omitempty" tf:"text,omitempty"` +} + +type SettingsObservation struct { + + // A text block as defined below. + Text *TextObservation `json:"text,omitempty" tf:"text,omitempty"` +} + +type SettingsParameters struct { + + // A text block as defined below. + // +kubebuilder:validation:Optional + Text *TextParameters `json:"text" tf:"text,omitempty"` +} + +type StorageBlobDirectInitParameters struct { + + // The Storage Container name. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type StorageBlobDirectObservation struct { + + // The Storage Container name. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type StorageBlobDirectParameters struct { + + // The Storage Container name. + // +kubebuilder:validation:Optional + ContainerName *string `json:"containerName" tf:"container_name,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId" tf:"storage_account_id,omitempty"` +} + +type StorageBlobInitParameters struct { + + // The Storage Container name. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Reference to a Container in storage to populate containerName. + // +kubebuilder:validation:Optional + ContainerNameRef *v1.Reference `json:"containerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate containerName. + // +kubebuilder:validation:Optional + ContainerNameSelector *v1.Selector `json:"containerNameSelector,omitempty" tf:"-"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type StorageBlobObservation struct { + + // The Storage Container name. + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type StorageBlobParameters struct { + + // The Storage Container name. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +kubebuilder:validation:Optional + ContainerName *string `json:"containerName,omitempty" tf:"container_name,omitempty"` + + // Reference to a Container in storage to populate containerName. + // +kubebuilder:validation:Optional + ContainerNameRef *v1.Reference `json:"containerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate containerName. + // +kubebuilder:validation:Optional + ContainerNameSelector *v1.Selector `json:"containerNameSelector,omitempty" tf:"-"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type StorageTableDirectInitParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // The Storage Table name. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type StorageTableDirectObservation struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // The Storage Table name. + TableName *string `json:"tableName,omitempty" tf:"table_name,omitempty"` +} + +type StorageTableDirectParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The resource ID of the Storage Account. + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId" tf:"storage_account_id,omitempty"` + + // The Storage Table name. + // +kubebuilder:validation:Optional + TableName *string `json:"tableName" tf:"table_name,omitempty"` +} + +type StreamDeclarationInitParameters struct { + + // One or more column blocks as defined above. + Column []ColumnInitParameters `json:"column,omitempty" tf:"column,omitempty"` + + // The name of the custom stream. This name should be unique across all stream_declaration blocks. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type StreamDeclarationObservation struct { + + // One or more column blocks as defined above. + Column []ColumnObservation `json:"column,omitempty" tf:"column,omitempty"` + + // The name of the custom stream. This name should be unique across all stream_declaration blocks. + StreamName *string `json:"streamName,omitempty" tf:"stream_name,omitempty"` +} + +type StreamDeclarationParameters struct { + + // One or more column blocks as defined above. + // +kubebuilder:validation:Optional + Column []ColumnParameters `json:"column" tf:"column,omitempty"` + + // The name of the custom stream. This name should be unique across all stream_declaration blocks. + // +kubebuilder:validation:Optional + StreamName *string `json:"streamName" tf:"stream_name,omitempty"` +} + +type SyslogInitParameters struct { + + // Specifies a list of facility names. Use a wildcard * to collect logs for all facility names. Possible values are alert, *, audit, auth, authpriv, clock, cron, daemon, ftp, kern, local5, local4, local1, local7, local6, local3, local2, local0, lpr, mail, mark, news, nopri, ntp, syslog, user and uucp. + FacilityNames []*string `json:"facilityNames,omitempty" tf:"facility_names,omitempty"` + + // Specifies a list of log levels. Use a wildcard * to collect logs for all log levels. Possible values are Debug, Info, Notice, Warning, Error, Critical, Alert, Emergency,and *. + LogLevels []*string `json:"logLevels,omitempty" tf:"log_levels,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type SyslogObservation struct { + + // Specifies a list of facility names. Use a wildcard * to collect logs for all facility names. Possible values are alert, *, audit, auth, authpriv, clock, cron, daemon, ftp, kern, local5, local4, local1, local7, local6, local3, local2, local0, lpr, mail, mark, news, nopri, ntp, syslog, user and uucp. + FacilityNames []*string `json:"facilityNames,omitempty" tf:"facility_names,omitempty"` + + // Specifies a list of log levels. Use a wildcard * to collect logs for all log levels. Possible values are Debug, Info, Notice, Warning, Error, Critical, Alert, Emergency,and *. + LogLevels []*string `json:"logLevels,omitempty" tf:"log_levels,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type SyslogParameters struct { + + // Specifies a list of facility names. Use a wildcard * to collect logs for all facility names. Possible values are alert, *, audit, auth, authpriv, clock, cron, daemon, ftp, kern, local5, local4, local1, local7, local6, local3, local2, local0, lpr, mail, mark, news, nopri, ntp, syslog, user and uucp. + // +kubebuilder:validation:Optional + FacilityNames []*string `json:"facilityNames" tf:"facility_names,omitempty"` + + // Specifies a list of log levels. Use a wildcard * to collect logs for all log levels. Possible values are Debug, Info, Notice, Warning, Error, Critical, Alert, Emergency,and *. + // +kubebuilder:validation:Optional + LogLevels []*string `json:"logLevels" tf:"log_levels,omitempty"` + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type TextInitParameters struct { + + // The timestamp format of the text log files. Possible values are ISO 8601, YYYY-MM-DD HH:MM:SS, M/D/YYYY HH:MM:SS AM/PM, Mon DD, YYYY HH:MM:SS, yyMMdd HH:mm:ss, ddMMyy HH:mm:ss, MMM d hh:mm:ss, dd/MMM/yyyy:HH:mm:ss zzz,and yyyy-MM-ddTHH:mm:ssK. + RecordStartTimestampFormat *string `json:"recordStartTimestampFormat,omitempty" tf:"record_start_timestamp_format,omitempty"` +} + +type TextObservation struct { + + // The timestamp format of the text log files. Possible values are ISO 8601, YYYY-MM-DD HH:MM:SS, M/D/YYYY HH:MM:SS AM/PM, Mon DD, YYYY HH:MM:SS, yyMMdd HH:mm:ss, ddMMyy HH:mm:ss, MMM d hh:mm:ss, dd/MMM/yyyy:HH:mm:ss zzz,and yyyy-MM-ddTHH:mm:ssK. + RecordStartTimestampFormat *string `json:"recordStartTimestampFormat,omitempty" tf:"record_start_timestamp_format,omitempty"` +} + +type TextParameters struct { + + // The timestamp format of the text log files. Possible values are ISO 8601, YYYY-MM-DD HH:MM:SS, M/D/YYYY HH:MM:SS AM/PM, Mon DD, YYYY HH:MM:SS, yyMMdd HH:mm:ss, ddMMyy HH:mm:ss, MMM d hh:mm:ss, dd/MMM/yyyy:HH:mm:ss zzz,and yyyy-MM-ddTHH:mm:ssK. + // +kubebuilder:validation:Optional + RecordStartTimestampFormat *string `json:"recordStartTimestampFormat" tf:"record_start_timestamp_format,omitempty"` +} + +type WindowsEventLogInitParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` + + // Specifies a list of Windows Event Log queries in XPath expression. Please see this document for more information. + XPathQueries []*string `json:"xPathQueries,omitempty" tf:"x_path_queries,omitempty"` +} + +type WindowsEventLogObservation struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` + + // Specifies a list of Windows Event Log queries in XPath expression. Please see this document for more information. + XPathQueries []*string `json:"xPathQueries,omitempty" tf:"x_path_queries,omitempty"` +} + +type WindowsEventLogParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` + + // Specifies a list of Windows Event Log queries in XPath expression. Please see this document for more information. + // +kubebuilder:validation:Optional + XPathQueries []*string `json:"xPathQueries" tf:"x_path_queries,omitempty"` +} + +type WindowsFirewallLogInitParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type WindowsFirewallLogObservation struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + Streams []*string `json:"streams,omitempty" tf:"streams,omitempty"` +} + +type WindowsFirewallLogParameters struct { + + // The name which should be used for this data source. This name should be unique across all data sources regardless of type within the Data Collection Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies a list of streams that this data source will be sent to. A stream indicates what schema will be used for this data and usually what table in Log Analytics the data will be sent to. + // +kubebuilder:validation:Optional + Streams []*string `json:"streams" tf:"streams,omitempty"` +} + +// MonitorDataCollectionRuleSpec defines the desired state of MonitorDataCollectionRule +type MonitorDataCollectionRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorDataCollectionRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorDataCollectionRuleInitParameters `json:"initProvider,omitempty"` +} + +// MonitorDataCollectionRuleStatus defines the observed state of MonitorDataCollectionRule. +type MonitorDataCollectionRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorDataCollectionRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorDataCollectionRule is the Schema for the MonitorDataCollectionRules API. Manages a Data Collection Rule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorDataCollectionRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dataFlow) || (has(self.initProvider) && has(self.initProvider.dataFlow))",message="spec.forProvider.dataFlow is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.destinations) || (has(self.initProvider) && has(self.initProvider.destinations))",message="spec.forProvider.destinations is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec MonitorDataCollectionRuleSpec `json:"spec"` + Status MonitorDataCollectionRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorDataCollectionRuleList contains a list of MonitorDataCollectionRules +type MonitorDataCollectionRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorDataCollectionRule `json:"items"` +} + +// Repository type metadata. +var ( + MonitorDataCollectionRule_Kind = "MonitorDataCollectionRule" + MonitorDataCollectionRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorDataCollectionRule_Kind}.String() + MonitorDataCollectionRule_KindAPIVersion = MonitorDataCollectionRule_Kind + "." + CRDGroupVersion.String() + MonitorDataCollectionRule_GroupVersionKind = CRDGroupVersion.WithKind(MonitorDataCollectionRule_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorDataCollectionRule{}, &MonitorDataCollectionRuleList{}) +} diff --git a/apis/insights/v1beta2/zz_monitordiagnosticsetting_terraformed.go b/apis/insights/v1beta2/zz_monitordiagnosticsetting_terraformed.go new file mode 100755 index 000000000..564da245c --- /dev/null +++ b/apis/insights/v1beta2/zz_monitordiagnosticsetting_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorDiagnosticSetting +func (mg *MonitorDiagnosticSetting) GetTerraformResourceType() string { + return "azurerm_monitor_diagnostic_setting" +} + +// GetConnectionDetailsMapping for this MonitorDiagnosticSetting +func (tr *MonitorDiagnosticSetting) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorDiagnosticSetting +func (tr *MonitorDiagnosticSetting) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorDiagnosticSetting +func (tr *MonitorDiagnosticSetting) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorDiagnosticSetting +func (tr *MonitorDiagnosticSetting) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorDiagnosticSetting +func (tr *MonitorDiagnosticSetting) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorDiagnosticSetting +func (tr *MonitorDiagnosticSetting) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorDiagnosticSetting +func (tr *MonitorDiagnosticSetting) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorDiagnosticSetting +func (tr *MonitorDiagnosticSetting) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorDiagnosticSetting using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorDiagnosticSetting) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorDiagnosticSettingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("EnabledLog")) + opts = append(opts, resource.WithNameFilter("Log")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorDiagnosticSetting) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/insights/v1beta2/zz_monitordiagnosticsetting_types.go b/apis/insights/v1beta2/zz_monitordiagnosticsetting_types.go new file mode 100755 index 000000000..6988642b9 --- /dev/null +++ b/apis/insights/v1beta2/zz_monitordiagnosticsetting_types.go @@ -0,0 +1,432 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EnabledLogInitParameters struct { + + // The name of a Diagnostic Log Category for this Resource. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The name of a Diagnostic Log Category Group for this Resource. + CategoryGroup *string `json:"categoryGroup,omitempty" tf:"category_group,omitempty"` + + // A retention_policy block as defined below. + RetentionPolicy *RetentionPolicyInitParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type EnabledLogObservation struct { + + // The name of a Diagnostic Log Category for this Resource. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The name of a Diagnostic Log Category Group for this Resource. + CategoryGroup *string `json:"categoryGroup,omitempty" tf:"category_group,omitempty"` + + // A retention_policy block as defined below. + RetentionPolicy *RetentionPolicyObservation `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type EnabledLogParameters struct { + + // The name of a Diagnostic Log Category for this Resource. + // +kubebuilder:validation:Optional + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The name of a Diagnostic Log Category Group for this Resource. + // +kubebuilder:validation:Optional + CategoryGroup *string `json:"categoryGroup,omitempty" tf:"category_group,omitempty"` + + // A retention_policy block as defined below. + // +kubebuilder:validation:Optional + RetentionPolicy *RetentionPolicyParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type LogInitParameters struct { + + // The name of a Diagnostic Log Category for this Resource. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The name of a Diagnostic Log Category Group for this Resource. + CategoryGroup *string `json:"categoryGroup,omitempty" tf:"category_group,omitempty"` + + // Is this Diagnostic Log enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A retention_policy block as defined below. + RetentionPolicy *LogRetentionPolicyInitParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type LogObservation struct { + + // The name of a Diagnostic Log Category for this Resource. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The name of a Diagnostic Log Category Group for this Resource. + CategoryGroup *string `json:"categoryGroup,omitempty" tf:"category_group,omitempty"` + + // Is this Diagnostic Log enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A retention_policy block as defined below. + RetentionPolicy *LogRetentionPolicyObservation `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type LogParameters struct { + + // The name of a Diagnostic Log Category for this Resource. + // +kubebuilder:validation:Optional + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // The name of a Diagnostic Log Category Group for this Resource. + // +kubebuilder:validation:Optional + CategoryGroup *string `json:"categoryGroup,omitempty" tf:"category_group,omitempty"` + + // Is this Diagnostic Log enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A retention_policy block as defined below. + // +kubebuilder:validation:Optional + RetentionPolicy *LogRetentionPolicyParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type LogRetentionPolicyInitParameters struct { + + // The number of days for which this Retention Policy should apply. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type LogRetentionPolicyObservation struct { + + // The number of days for which this Retention Policy should apply. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type LogRetentionPolicyParameters struct { + + // The number of days for which this Retention Policy should apply. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type MetricInitParameters struct { + + // The name of a Diagnostic Metric Category for this Resource. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // Is this Diagnostic Metric enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A retention_policy block as defined below. + RetentionPolicy *MetricRetentionPolicyInitParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type MetricObservation struct { + + // The name of a Diagnostic Metric Category for this Resource. + Category *string `json:"category,omitempty" tf:"category,omitempty"` + + // Is this Diagnostic Metric enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A retention_policy block as defined below. + RetentionPolicy *MetricRetentionPolicyObservation `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type MetricParameters struct { + + // The name of a Diagnostic Metric Category for this Resource. + // +kubebuilder:validation:Optional + Category *string `json:"category" tf:"category,omitempty"` + + // Is this Diagnostic Metric enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A retention_policy block as defined below. + // +kubebuilder:validation:Optional + RetentionPolicy *MetricRetentionPolicyParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` +} + +type MetricRetentionPolicyInitParameters struct { + + // The number of days for which this Retention Policy should apply. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type MetricRetentionPolicyObservation struct { + + // The number of days for which this Retention Policy should apply. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type MetricRetentionPolicyParameters struct { + + // The number of days for which this Retention Policy should apply. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type MonitorDiagnosticSettingInitParameters struct { + + // One or more enabled_log blocks as defined below. + EnabledLog []EnabledLogInitParameters `json:"enabledLog,omitempty" tf:"enabled_log,omitempty"` + + // Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. + EventHubAuthorizationRuleID *string `json:"eventhubAuthorizationRuleId,omitempty" tf:"eventhub_authorization_rule_id,omitempty"` + + // Specifies the name of the Event Hub where Diagnostics Data should be sent. + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // One or more log blocks as defined below. + Log []LogInitParameters `json:"log,omitempty" tf:"log,omitempty"` + + // Possible values are AzureDiagnostics and Dedicated. When set to Dedicated, logs sent to a Log Analytics workspace will go into resource specific tables, instead of the legacy AzureDiagnostics table. + LogAnalyticsDestinationType *string `json:"logAnalyticsDestinationType,omitempty" tf:"log_analytics_destination_type,omitempty"` + + // Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` + + // One or more metric blocks as defined below. + Metric []MetricInitParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Specifies the name of the Diagnostic Setting. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the market partner solution where Diagnostics Data should be sent. For potential partner integrations, click to learn more about partner integration. + PartnerSolutionID *string `json:"partnerSolutionId,omitempty" tf:"partner_solution_id,omitempty"` + + // The ID of the Storage Account where logs should be sent. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` + + // The ID of an existing Resource on which to configure Diagnostic Settings. Changing this forces a new resource to be created. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` +} + +type MonitorDiagnosticSettingObservation struct { + + // One or more enabled_log blocks as defined below. + EnabledLog []EnabledLogObservation `json:"enabledLog,omitempty" tf:"enabled_log,omitempty"` + + // Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. + EventHubAuthorizationRuleID *string `json:"eventhubAuthorizationRuleId,omitempty" tf:"eventhub_authorization_rule_id,omitempty"` + + // Specifies the name of the Event Hub where Diagnostics Data should be sent. + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // The ID of the Diagnostic Setting. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more log blocks as defined below. + Log []LogObservation `json:"log,omitempty" tf:"log,omitempty"` + + // Possible values are AzureDiagnostics and Dedicated. When set to Dedicated, logs sent to a Log Analytics workspace will go into resource specific tables, instead of the legacy AzureDiagnostics table. + LogAnalyticsDestinationType *string `json:"logAnalyticsDestinationType,omitempty" tf:"log_analytics_destination_type,omitempty"` + + // Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` + + // One or more metric blocks as defined below. + Metric []MetricObservation `json:"metric,omitempty" tf:"metric,omitempty"` + + // Specifies the name of the Diagnostic Setting. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the market partner solution where Diagnostics Data should be sent. For potential partner integrations, click to learn more about partner integration. + PartnerSolutionID *string `json:"partnerSolutionId,omitempty" tf:"partner_solution_id,omitempty"` + + // The ID of the Storage Account where logs should be sent. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // The ID of an existing Resource on which to configure Diagnostic Settings. Changing this forces a new resource to be created. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` +} + +type MonitorDiagnosticSettingParameters struct { + + // One or more enabled_log blocks as defined below. + // +kubebuilder:validation:Optional + EnabledLog []EnabledLogParameters `json:"enabledLog,omitempty" tf:"enabled_log,omitempty"` + + // Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. + // +kubebuilder:validation:Optional + EventHubAuthorizationRuleID *string `json:"eventhubAuthorizationRuleId,omitempty" tf:"eventhub_authorization_rule_id,omitempty"` + + // Specifies the name of the Event Hub where Diagnostics Data should be sent. + // +kubebuilder:validation:Optional + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // One or more log blocks as defined below. + // +kubebuilder:validation:Optional + Log []LogParameters `json:"log,omitempty" tf:"log,omitempty"` + + // Possible values are AzureDiagnostics and Dedicated. When set to Dedicated, logs sent to a Log Analytics workspace will go into resource specific tables, instead of the legacy AzureDiagnostics table. + // +kubebuilder:validation:Optional + LogAnalyticsDestinationType *string `json:"logAnalyticsDestinationType,omitempty" tf:"log_analytics_destination_type,omitempty"` + + // Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` + + // One or more metric blocks as defined below. + // +kubebuilder:validation:Optional + Metric []MetricParameters `json:"metric,omitempty" tf:"metric,omitempty"` + + // Specifies the name of the Diagnostic Setting. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the market partner solution where Diagnostics Data should be sent. For potential partner integrations, click to learn more about partner integration. + // +kubebuilder:validation:Optional + PartnerSolutionID *string `json:"partnerSolutionId,omitempty" tf:"partner_solution_id,omitempty"` + + // The ID of the Storage Account where logs should be sent. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` + + // The ID of an existing Resource on which to configure Diagnostic Settings. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` +} + +type RetentionPolicyInitParameters struct { + + // The number of days for which this Retention Policy should apply. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RetentionPolicyObservation struct { + + // The number of days for which this Retention Policy should apply. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RetentionPolicyParameters struct { + + // The number of days for which this Retention Policy should apply. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Is this Retention Policy enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +// MonitorDiagnosticSettingSpec defines the desired state of MonitorDiagnosticSetting +type MonitorDiagnosticSettingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorDiagnosticSettingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorDiagnosticSettingInitParameters `json:"initProvider,omitempty"` +} + +// MonitorDiagnosticSettingStatus defines the observed state of MonitorDiagnosticSetting. +type MonitorDiagnosticSettingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorDiagnosticSettingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorDiagnosticSetting is the Schema for the MonitorDiagnosticSettings API. Manages a Diagnostic Setting for an existing Resource. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorDiagnosticSetting struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetResourceId) || (has(self.initProvider) && has(self.initProvider.targetResourceId))",message="spec.forProvider.targetResourceId is a required parameter" + Spec MonitorDiagnosticSettingSpec `json:"spec"` + Status MonitorDiagnosticSettingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorDiagnosticSettingList contains a list of MonitorDiagnosticSettings +type MonitorDiagnosticSettingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorDiagnosticSetting `json:"items"` +} + +// Repository type metadata. +var ( + MonitorDiagnosticSetting_Kind = "MonitorDiagnosticSetting" + MonitorDiagnosticSetting_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorDiagnosticSetting_Kind}.String() + MonitorDiagnosticSetting_KindAPIVersion = MonitorDiagnosticSetting_Kind + "." + CRDGroupVersion.String() + MonitorDiagnosticSetting_GroupVersionKind = CRDGroupVersion.WithKind(MonitorDiagnosticSetting_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorDiagnosticSetting{}, &MonitorDiagnosticSettingList{}) +} diff --git a/apis/insights/v1beta2/zz_monitormetricalert_terraformed.go b/apis/insights/v1beta2/zz_monitormetricalert_terraformed.go new file mode 100755 index 000000000..67edc1434 --- /dev/null +++ b/apis/insights/v1beta2/zz_monitormetricalert_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorMetricAlert +func (mg *MonitorMetricAlert) GetTerraformResourceType() string { + return "azurerm_monitor_metric_alert" +} + +// GetConnectionDetailsMapping for this MonitorMetricAlert +func (tr *MonitorMetricAlert) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorMetricAlert +func (tr *MonitorMetricAlert) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorMetricAlert +func (tr *MonitorMetricAlert) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorMetricAlert +func (tr *MonitorMetricAlert) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorMetricAlert +func (tr *MonitorMetricAlert) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorMetricAlert +func (tr *MonitorMetricAlert) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorMetricAlert +func (tr *MonitorMetricAlert) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorMetricAlert +func (tr *MonitorMetricAlert) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorMetricAlert using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorMetricAlert) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorMetricAlertParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorMetricAlert) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/insights/v1beta2/zz_monitormetricalert_types.go b/apis/insights/v1beta2/zz_monitormetricalert_types.go new file mode 100755 index 000000000..dba94936c --- /dev/null +++ b/apis/insights/v1beta2/zz_monitormetricalert_types.go @@ -0,0 +1,631 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters struct { + + // The ID of the Application Insights Resource. + ComponentID *string `json:"componentId,omitempty" tf:"component_id,omitempty"` + + // The number of failed locations. + FailedLocationCount *float64 `json:"failedLocationCount,omitempty" tf:"failed_location_count,omitempty"` + + // The ID of the Application Insights Web Test. + WebTestID *string `json:"webTestId,omitempty" tf:"web_test_id,omitempty"` +} + +type ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation struct { + + // The ID of the Application Insights Resource. + ComponentID *string `json:"componentId,omitempty" tf:"component_id,omitempty"` + + // The number of failed locations. + FailedLocationCount *float64 `json:"failedLocationCount,omitempty" tf:"failed_location_count,omitempty"` + + // The ID of the Application Insights Web Test. + WebTestID *string `json:"webTestId,omitempty" tf:"web_test_id,omitempty"` +} + +type ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters struct { + + // The ID of the Application Insights Resource. + // +kubebuilder:validation:Optional + ComponentID *string `json:"componentId" tf:"component_id,omitempty"` + + // The number of failed locations. + // +kubebuilder:validation:Optional + FailedLocationCount *float64 `json:"failedLocationCount" tf:"failed_location_count,omitempty"` + + // The ID of the Application Insights Web Test. + // +kubebuilder:validation:Optional + WebTestID *string `json:"webTestId" tf:"web_test_id,omitempty"` +} + +type DimensionInitParameters struct { + + // The name of the Metric Alert. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // The list of dimension values. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DimensionObservation struct { + + // The name of the Metric Alert. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // The list of dimension values. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DimensionParameters struct { + + // The name of the Metric Alert. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // The list of dimension values. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type DynamicCriteriaDimensionInitParameters struct { + + // The name of the Metric Alert. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // The list of dimension values. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DynamicCriteriaDimensionObservation struct { + + // The name of the Metric Alert. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // The list of dimension values. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type DynamicCriteriaDimensionParameters struct { + + // The name of the Metric Alert. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // The list of dimension values. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type DynamicCriteriaInitParameters struct { + + // The statistic that runs over the metric values. Possible values are Average, Count, Minimum, Maximum and Total. + Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` + + // The extent of deviation required to trigger an alert. Possible values are Low, Medium and High. + AlertSensitivity *string `json:"alertSensitivity,omitempty" tf:"alert_sensitivity,omitempty"` + + // One or more dimension blocks as defined below. + Dimension []DynamicCriteriaDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // The number of violations to trigger an alert. Should be smaller or equal to evaluation_total_count. Defaults to 4. + EvaluationFailureCount *float64 `json:"evaluationFailureCount,omitempty" tf:"evaluation_failure_count,omitempty"` + + // The number of aggregated lookback points. The lookback time window is calculated based on the aggregation granularity (window_size) and the selected number of aggregated points. Defaults to 4. + EvaluationTotalCount *float64 `json:"evaluationTotalCount,omitempty" tf:"evaluation_total_count,omitempty"` + + // The ISO8601 date from which to start learning the metric historical data and calculate the dynamic thresholds. + IgnoreDataBefore *string `json:"ignoreDataBefore,omitempty" tf:"ignore_data_before,omitempty"` + + // One of the metric names to be monitored. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // One of the metric namespaces to be monitored. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Skip the metric validation to allow creating an alert rule on a custom metric that isn't yet emitted? + SkipMetricValidation *bool `json:"skipMetricValidation,omitempty" tf:"skip_metric_validation,omitempty"` +} + +type DynamicCriteriaObservation struct { + + // The statistic that runs over the metric values. Possible values are Average, Count, Minimum, Maximum and Total. + Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` + + // The extent of deviation required to trigger an alert. Possible values are Low, Medium and High. + AlertSensitivity *string `json:"alertSensitivity,omitempty" tf:"alert_sensitivity,omitempty"` + + // One or more dimension blocks as defined below. + Dimension []DynamicCriteriaDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // The number of violations to trigger an alert. Should be smaller or equal to evaluation_total_count. Defaults to 4. + EvaluationFailureCount *float64 `json:"evaluationFailureCount,omitempty" tf:"evaluation_failure_count,omitempty"` + + // The number of aggregated lookback points. The lookback time window is calculated based on the aggregation granularity (window_size) and the selected number of aggregated points. Defaults to 4. + EvaluationTotalCount *float64 `json:"evaluationTotalCount,omitempty" tf:"evaluation_total_count,omitempty"` + + // The ISO8601 date from which to start learning the metric historical data and calculate the dynamic thresholds. + IgnoreDataBefore *string `json:"ignoreDataBefore,omitempty" tf:"ignore_data_before,omitempty"` + + // One of the metric names to be monitored. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // One of the metric namespaces to be monitored. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Skip the metric validation to allow creating an alert rule on a custom metric that isn't yet emitted? + SkipMetricValidation *bool `json:"skipMetricValidation,omitempty" tf:"skip_metric_validation,omitempty"` +} + +type DynamicCriteriaParameters struct { + + // The statistic that runs over the metric values. Possible values are Average, Count, Minimum, Maximum and Total. + // +kubebuilder:validation:Optional + Aggregation *string `json:"aggregation" tf:"aggregation,omitempty"` + + // The extent of deviation required to trigger an alert. Possible values are Low, Medium and High. + // +kubebuilder:validation:Optional + AlertSensitivity *string `json:"alertSensitivity" tf:"alert_sensitivity,omitempty"` + + // One or more dimension blocks as defined below. + // +kubebuilder:validation:Optional + Dimension []DynamicCriteriaDimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // The number of violations to trigger an alert. Should be smaller or equal to evaluation_total_count. Defaults to 4. + // +kubebuilder:validation:Optional + EvaluationFailureCount *float64 `json:"evaluationFailureCount,omitempty" tf:"evaluation_failure_count,omitempty"` + + // The number of aggregated lookback points. The lookback time window is calculated based on the aggregation granularity (window_size) and the selected number of aggregated points. Defaults to 4. + // +kubebuilder:validation:Optional + EvaluationTotalCount *float64 `json:"evaluationTotalCount,omitempty" tf:"evaluation_total_count,omitempty"` + + // The ISO8601 date from which to start learning the metric historical data and calculate the dynamic thresholds. + // +kubebuilder:validation:Optional + IgnoreDataBefore *string `json:"ignoreDataBefore,omitempty" tf:"ignore_data_before,omitempty"` + + // One of the metric names to be monitored. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // One of the metric namespaces to be monitored. + // +kubebuilder:validation:Optional + MetricNamespace *string `json:"metricNamespace" tf:"metric_namespace,omitempty"` + + // The criteria operator. Possible values are LessThan, GreaterThan and GreaterOrLessThan. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Skip the metric validation to allow creating an alert rule on a custom metric that isn't yet emitted? + // +kubebuilder:validation:Optional + SkipMetricValidation *bool `json:"skipMetricValidation,omitempty" tf:"skip_metric_validation,omitempty"` +} + +type MonitorMetricAlertActionInitParameters struct { + + // The ID of the Action Group can be sourced from the + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // Reference to a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDRef *v1.Reference `json:"actionGroupIdRef,omitempty" tf:"-"` + + // Selector for a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDSelector *v1.Selector `json:"actionGroupIdSelector,omitempty" tf:"-"` + + // The map of custom string properties to include with the post operation. These data are appended to the webhook payload. + // +mapType=granular + WebhookProperties map[string]*string `json:"webhookProperties,omitempty" tf:"webhook_properties,omitempty"` +} + +type MonitorMetricAlertActionObservation struct { + + // The ID of the Action Group can be sourced from the + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // The map of custom string properties to include with the post operation. These data are appended to the webhook payload. + // +mapType=granular + WebhookProperties map[string]*string `json:"webhookProperties,omitempty" tf:"webhook_properties,omitempty"` +} + +type MonitorMetricAlertActionParameters struct { + + // The ID of the Action Group can be sourced from the + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ActionGroupID *string `json:"actionGroupId,omitempty" tf:"action_group_id,omitempty"` + + // Reference to a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDRef *v1.Reference `json:"actionGroupIdRef,omitempty" tf:"-"` + + // Selector for a MonitorActionGroup in insights to populate actionGroupId. + // +kubebuilder:validation:Optional + ActionGroupIDSelector *v1.Selector `json:"actionGroupIdSelector,omitempty" tf:"-"` + + // The map of custom string properties to include with the post operation. These data are appended to the webhook payload. + // +kubebuilder:validation:Optional + // +mapType=granular + WebhookProperties map[string]*string `json:"webhookProperties,omitempty" tf:"webhook_properties,omitempty"` +} + +type MonitorMetricAlertCriteriaInitParameters struct { + + // The statistic that runs over the metric values. Possible values are Average, Count, Minimum, Maximum and Total. + Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` + + // One or more dimension blocks as defined below. + Dimension []DimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One of the metric names to be monitored. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // One of the metric namespaces to be monitored. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // The criteria operator. Possible values are Equals, GreaterThan, GreaterThanOrEqual, LessThan and LessThanOrEqual. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Skip the metric validation to allow creating an alert rule on a custom metric that isn't yet emitted? Defaults to false. + SkipMetricValidation *bool `json:"skipMetricValidation,omitempty" tf:"skip_metric_validation,omitempty"` + + // The criteria threshold value that activates the alert. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` +} + +type MonitorMetricAlertCriteriaObservation struct { + + // The statistic that runs over the metric values. Possible values are Average, Count, Minimum, Maximum and Total. + Aggregation *string `json:"aggregation,omitempty" tf:"aggregation,omitempty"` + + // One or more dimension blocks as defined below. + Dimension []DimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One of the metric names to be monitored. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` + + // One of the metric namespaces to be monitored. + MetricNamespace *string `json:"metricNamespace,omitempty" tf:"metric_namespace,omitempty"` + + // The criteria operator. Possible values are Equals, GreaterThan, GreaterThanOrEqual, LessThan and LessThanOrEqual. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Skip the metric validation to allow creating an alert rule on a custom metric that isn't yet emitted? Defaults to false. + SkipMetricValidation *bool `json:"skipMetricValidation,omitempty" tf:"skip_metric_validation,omitempty"` + + // The criteria threshold value that activates the alert. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` +} + +type MonitorMetricAlertCriteriaParameters struct { + + // The statistic that runs over the metric values. Possible values are Average, Count, Minimum, Maximum and Total. + // +kubebuilder:validation:Optional + Aggregation *string `json:"aggregation" tf:"aggregation,omitempty"` + + // One or more dimension blocks as defined below. + // +kubebuilder:validation:Optional + Dimension []DimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // One of the metric names to be monitored. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` + + // One of the metric namespaces to be monitored. + // +kubebuilder:validation:Optional + MetricNamespace *string `json:"metricNamespace" tf:"metric_namespace,omitempty"` + + // The criteria operator. Possible values are Equals, GreaterThan, GreaterThanOrEqual, LessThan and LessThanOrEqual. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Skip the metric validation to allow creating an alert rule on a custom metric that isn't yet emitted? Defaults to false. + // +kubebuilder:validation:Optional + SkipMetricValidation *bool `json:"skipMetricValidation,omitempty" tf:"skip_metric_validation,omitempty"` + + // The criteria threshold value that activates the alert. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` +} + +type MonitorMetricAlertInitParameters struct { + + // One or more action blocks as defined below. + Action []MonitorMetricAlertActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A application_insights_web_test_location_availability_criteria block as defined below. + ApplicationInsightsWebTestLocationAvailabilityCriteria *ApplicationInsightsWebTestLocationAvailabilityCriteriaInitParameters `json:"applicationInsightsWebTestLocationAvailabilityCriteria,omitempty" tf:"application_insights_web_test_location_availability_criteria,omitempty"` + + // Should the alerts in this Metric Alert be auto resolved? Defaults to true. + AutoMitigate *bool `json:"autoMitigate,omitempty" tf:"auto_mitigate,omitempty"` + + // One or more (static) criteria blocks as defined below. + Criteria []MonitorMetricAlertCriteriaInitParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The description of this Metric Alert. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A dynamic_criteria block as defined below. + DynamicCriteria *DynamicCriteriaInitParameters `json:"dynamicCriteria,omitempty" tf:"dynamic_criteria,omitempty"` + + // Should this Metric Alert be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The evaluation frequency of this Metric Alert, represented in ISO 8601 duration format. Possible values are PT1M, PT5M, PT15M, PT30M and PT1H. Defaults to PT1M. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // A set of strings of resource IDs at which the metric criteria should be applied. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +listType=set + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to Account in storage to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of Account in storage to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // The severity of this Metric Alert. Possible values are 0, 1, 2, 3 and 4. Defaults to 3. + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The location of the target resource. + // The location of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + TargetResourceLocation *string `json:"targetResourceLocation,omitempty" tf:"target_resource_location,omitempty"` + + // The resource type (e.g. Microsoft.Compute/virtualMachines) of the target resource. + // The resource type (e.g. Microsoft.Compute/virtualMachines) of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + TargetResourceType *string `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` + + // The period of time that is used to monitor alert activity, represented in ISO 8601 duration format. This value must be greater than frequency. Possible values are PT1M, PT5M, PT15M, PT30M, PT1H, PT6H, PT12H and P1D. Defaults to PT5M. + WindowSize *string `json:"windowSize,omitempty" tf:"window_size,omitempty"` +} + +type MonitorMetricAlertObservation struct { + + // One or more action blocks as defined below. + Action []MonitorMetricAlertActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // A application_insights_web_test_location_availability_criteria block as defined below. + ApplicationInsightsWebTestLocationAvailabilityCriteria *ApplicationInsightsWebTestLocationAvailabilityCriteriaObservation `json:"applicationInsightsWebTestLocationAvailabilityCriteria,omitempty" tf:"application_insights_web_test_location_availability_criteria,omitempty"` + + // Should the alerts in this Metric Alert be auto resolved? Defaults to true. + AutoMitigate *bool `json:"autoMitigate,omitempty" tf:"auto_mitigate,omitempty"` + + // One or more (static) criteria blocks as defined below. + Criteria []MonitorMetricAlertCriteriaObservation `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The description of this Metric Alert. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A dynamic_criteria block as defined below. + DynamicCriteria *DynamicCriteriaObservation `json:"dynamicCriteria,omitempty" tf:"dynamic_criteria,omitempty"` + + // Should this Metric Alert be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The evaluation frequency of this Metric Alert, represented in ISO 8601 duration format. Possible values are PT1M, PT5M, PT15M, PT30M and PT1H. Defaults to PT1M. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The ID of the metric alert. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the resource group in which to create the Metric Alert instance. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A set of strings of resource IDs at which the metric criteria should be applied. + // +listType=set + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The severity of this Metric Alert. Possible values are 0, 1, 2, 3 and 4. Defaults to 3. + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The location of the target resource. + // The location of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + TargetResourceLocation *string `json:"targetResourceLocation,omitempty" tf:"target_resource_location,omitempty"` + + // The resource type (e.g. Microsoft.Compute/virtualMachines) of the target resource. + // The resource type (e.g. Microsoft.Compute/virtualMachines) of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + TargetResourceType *string `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` + + // The period of time that is used to monitor alert activity, represented in ISO 8601 duration format. This value must be greater than frequency. Possible values are PT1M, PT5M, PT15M, PT30M, PT1H, PT6H, PT12H and P1D. Defaults to PT5M. + WindowSize *string `json:"windowSize,omitempty" tf:"window_size,omitempty"` +} + +type MonitorMetricAlertParameters struct { + + // One or more action blocks as defined below. + // +kubebuilder:validation:Optional + Action []MonitorMetricAlertActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A application_insights_web_test_location_availability_criteria block as defined below. + // +kubebuilder:validation:Optional + ApplicationInsightsWebTestLocationAvailabilityCriteria *ApplicationInsightsWebTestLocationAvailabilityCriteriaParameters `json:"applicationInsightsWebTestLocationAvailabilityCriteria,omitempty" tf:"application_insights_web_test_location_availability_criteria,omitempty"` + + // Should the alerts in this Metric Alert be auto resolved? Defaults to true. + // +kubebuilder:validation:Optional + AutoMitigate *bool `json:"autoMitigate,omitempty" tf:"auto_mitigate,omitempty"` + + // One or more (static) criteria blocks as defined below. + // +kubebuilder:validation:Optional + Criteria []MonitorMetricAlertCriteriaParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The description of this Metric Alert. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A dynamic_criteria block as defined below. + // +kubebuilder:validation:Optional + DynamicCriteria *DynamicCriteriaParameters `json:"dynamicCriteria,omitempty" tf:"dynamic_criteria,omitempty"` + + // Should this Metric Alert be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The evaluation frequency of this Metric Alert, represented in ISO 8601 duration format. Possible values are PT1M, PT5M, PT15M, PT30M and PT1H. Defaults to PT1M. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The name of the resource group in which to create the Metric Alert instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A set of strings of resource IDs at which the metric criteria should be applied. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to Account in storage to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of Account in storage to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // The severity of this Metric Alert. Possible values are 0, 1, 2, 3 and 4. Defaults to 3. + // +kubebuilder:validation:Optional + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The location of the target resource. + // The location of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + // +kubebuilder:validation:Optional + TargetResourceLocation *string `json:"targetResourceLocation,omitempty" tf:"target_resource_location,omitempty"` + + // The resource type (e.g. Microsoft.Compute/virtualMachines) of the target resource. + // The resource type (e.g. Microsoft.Compute/virtualMachines) of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + // +kubebuilder:validation:Optional + TargetResourceType *string `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` + + // The period of time that is used to monitor alert activity, represented in ISO 8601 duration format. This value must be greater than frequency. Possible values are PT1M, PT5M, PT15M, PT30M, PT1H, PT6H, PT12H and P1D. Defaults to PT5M. + // +kubebuilder:validation:Optional + WindowSize *string `json:"windowSize,omitempty" tf:"window_size,omitempty"` +} + +// MonitorMetricAlertSpec defines the desired state of MonitorMetricAlert +type MonitorMetricAlertSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorMetricAlertParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorMetricAlertInitParameters `json:"initProvider,omitempty"` +} + +// MonitorMetricAlertStatus defines the observed state of MonitorMetricAlert. +type MonitorMetricAlertStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorMetricAlertObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorMetricAlert is the Schema for the MonitorMetricAlerts API. Manages a Metric Alert within Azure Monitor +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorMetricAlert struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MonitorMetricAlertSpec `json:"spec"` + Status MonitorMetricAlertStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorMetricAlertList contains a list of MonitorMetricAlerts +type MonitorMetricAlertList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorMetricAlert `json:"items"` +} + +// Repository type metadata. +var ( + MonitorMetricAlert_Kind = "MonitorMetricAlert" + MonitorMetricAlert_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorMetricAlert_Kind}.String() + MonitorMetricAlert_KindAPIVersion = MonitorMetricAlert_Kind + "." + CRDGroupVersion.String() + MonitorMetricAlert_GroupVersionKind = CRDGroupVersion.WithKind(MonitorMetricAlert_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorMetricAlert{}, &MonitorMetricAlertList{}) +} diff --git a/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalert_terraformed.go b/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalert_terraformed.go new file mode 100755 index 000000000..5c4f35cbe --- /dev/null +++ b/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalert_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorScheduledQueryRulesAlert +func (mg *MonitorScheduledQueryRulesAlert) GetTerraformResourceType() string { + return "azurerm_monitor_scheduled_query_rules_alert" +} + +// GetConnectionDetailsMapping for this MonitorScheduledQueryRulesAlert +func (tr *MonitorScheduledQueryRulesAlert) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorScheduledQueryRulesAlert +func (tr *MonitorScheduledQueryRulesAlert) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorScheduledQueryRulesAlert +func (tr *MonitorScheduledQueryRulesAlert) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorScheduledQueryRulesAlert +func (tr *MonitorScheduledQueryRulesAlert) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorScheduledQueryRulesAlert +func (tr *MonitorScheduledQueryRulesAlert) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorScheduledQueryRulesAlert +func (tr *MonitorScheduledQueryRulesAlert) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorScheduledQueryRulesAlert +func (tr *MonitorScheduledQueryRulesAlert) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorScheduledQueryRulesAlert +func (tr *MonitorScheduledQueryRulesAlert) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorScheduledQueryRulesAlert using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorScheduledQueryRulesAlert) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorScheduledQueryRulesAlertParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorScheduledQueryRulesAlert) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalert_types.go b/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalert_types.go new file mode 100755 index 000000000..f0a12c2ec --- /dev/null +++ b/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalert_types.go @@ -0,0 +1,460 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MonitorScheduledQueryRulesAlertActionInitParameters struct { + + // List of action group reference resource IDs. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +listType=set + ActionGroup []*string `json:"actionGroup,omitempty" tf:"action_group,omitempty"` + + // References to MonitorActionGroup in insights to populate actionGroup. + // +kubebuilder:validation:Optional + ActionGroupRefs []v1.Reference `json:"actionGroupRefs,omitempty" tf:"-"` + + // Selector for a list of MonitorActionGroup in insights to populate actionGroup. + // +kubebuilder:validation:Optional + ActionGroupSelector *v1.Selector `json:"actionGroupSelector,omitempty" tf:"-"` + + // Custom payload to be sent for all webhook payloads in alerting action. + CustomWebhookPayload *string `json:"customWebhookPayload,omitempty" tf:"custom_webhook_payload,omitempty"` + + // Custom subject override for all email ids in Azure action group. + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` +} + +type MonitorScheduledQueryRulesAlertActionObservation struct { + + // List of action group reference resource IDs. + // +listType=set + ActionGroup []*string `json:"actionGroup,omitempty" tf:"action_group,omitempty"` + + // Custom payload to be sent for all webhook payloads in alerting action. + CustomWebhookPayload *string `json:"customWebhookPayload,omitempty" tf:"custom_webhook_payload,omitempty"` + + // Custom subject override for all email ids in Azure action group. + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` +} + +type MonitorScheduledQueryRulesAlertActionParameters struct { + + // List of action group reference resource IDs. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta2.MonitorActionGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + ActionGroup []*string `json:"actionGroup,omitempty" tf:"action_group,omitempty"` + + // References to MonitorActionGroup in insights to populate actionGroup. + // +kubebuilder:validation:Optional + ActionGroupRefs []v1.Reference `json:"actionGroupRefs,omitempty" tf:"-"` + + // Selector for a list of MonitorActionGroup in insights to populate actionGroup. + // +kubebuilder:validation:Optional + ActionGroupSelector *v1.Selector `json:"actionGroupSelector,omitempty" tf:"-"` + + // Custom payload to be sent for all webhook payloads in alerting action. + // +kubebuilder:validation:Optional + CustomWebhookPayload *string `json:"customWebhookPayload,omitempty" tf:"custom_webhook_payload,omitempty"` + + // Custom subject override for all email ids in Azure action group. + // +kubebuilder:validation:Optional + EmailSubject *string `json:"emailSubject,omitempty" tf:"email_subject,omitempty"` +} + +type MonitorScheduledQueryRulesAlertInitParameters struct { + + // An action block as defined below. + Action *MonitorScheduledQueryRulesAlertActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // List of Resource IDs referred into query. + // +listType=set + AuthorizedResourceIds []*string `json:"authorizedResourceIds,omitempty" tf:"authorized_resource_ids,omitempty"` + + // Should the alerts in this Metric Alert be auto resolved? Defaults to false. + // -> NOTE auto_mitigation_enabled and throttling are mutually exclusive and cannot both be set. + AutoMitigationEnabled *bool `json:"autoMitigationEnabled,omitempty" tf:"auto_mitigation_enabled,omitempty"` + + // The resource URI over which log search query is to be run. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DataSourceID *string `json:"dataSourceId,omitempty" tf:"data_source_id,omitempty"` + + // Reference to a ApplicationInsights in insights to populate dataSourceId. + // +kubebuilder:validation:Optional + DataSourceIDRef *v1.Reference `json:"dataSourceIdRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate dataSourceId. + // +kubebuilder:validation:Optional + DataSourceIDSelector *v1.Selector `json:"dataSourceIdSelector,omitempty" tf:"-"` + + // The description of the scheduled query rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether this scheduled query rule is enabled. Default is true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive). + Frequency *float64 `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies the Azure Region where the resource should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the scheduled query rule. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Log search query. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // The type of query results. Possible values are ResultCount and Number. Default is ResultCount. If set to ResultCount, query must include an AggregatedValue column of a numeric type, for example, Heartbeat | summarize AggregatedValue = count() by bin(TimeGenerated, 5m). + QueryType *string `json:"queryType,omitempty" tf:"query_type,omitempty"` + + // The name of the resource group in which to create the scheduled query rule instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Severity of the alert. Possible values include: 0, 1, 2, 3, or 4. + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive). + Throttling *float64 `json:"throttling,omitempty" tf:"throttling,omitempty"` + + // Time window for which data needs to be fetched for query (must be greater than or equal to frequency). Values must be between 5 and 2880 (inclusive). + TimeWindow *float64 `json:"timeWindow,omitempty" tf:"time_window,omitempty"` + + // A trigger block as defined below. + Trigger *TriggerInitParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type MonitorScheduledQueryRulesAlertObservation struct { + + // An action block as defined below. + Action *MonitorScheduledQueryRulesAlertActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // List of Resource IDs referred into query. + // +listType=set + AuthorizedResourceIds []*string `json:"authorizedResourceIds,omitempty" tf:"authorized_resource_ids,omitempty"` + + // Should the alerts in this Metric Alert be auto resolved? Defaults to false. + // -> NOTE auto_mitigation_enabled and throttling are mutually exclusive and cannot both be set. + AutoMitigationEnabled *bool `json:"autoMitigationEnabled,omitempty" tf:"auto_mitigation_enabled,omitempty"` + + // The resource URI over which log search query is to be run. Changing this forces a new resource to be created. + DataSourceID *string `json:"dataSourceId,omitempty" tf:"data_source_id,omitempty"` + + // The description of the scheduled query rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether this scheduled query rule is enabled. Default is true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive). + Frequency *float64 `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The ID of the scheduled query rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Azure Region where the resource should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the scheduled query rule. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Log search query. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // The type of query results. Possible values are ResultCount and Number. Default is ResultCount. If set to ResultCount, query must include an AggregatedValue column of a numeric type, for example, Heartbeat | summarize AggregatedValue = count() by bin(TimeGenerated, 5m). + QueryType *string `json:"queryType,omitempty" tf:"query_type,omitempty"` + + // The name of the resource group in which to create the scheduled query rule instance. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Severity of the alert. Possible values include: 0, 1, 2, 3, or 4. + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive). + Throttling *float64 `json:"throttling,omitempty" tf:"throttling,omitempty"` + + // Time window for which data needs to be fetched for query (must be greater than or equal to frequency). Values must be between 5 and 2880 (inclusive). + TimeWindow *float64 `json:"timeWindow,omitempty" tf:"time_window,omitempty"` + + // A trigger block as defined below. + Trigger *TriggerObservation `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type MonitorScheduledQueryRulesAlertParameters struct { + + // An action block as defined below. + // +kubebuilder:validation:Optional + Action *MonitorScheduledQueryRulesAlertActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // List of Resource IDs referred into query. + // +kubebuilder:validation:Optional + // +listType=set + AuthorizedResourceIds []*string `json:"authorizedResourceIds,omitempty" tf:"authorized_resource_ids,omitempty"` + + // Should the alerts in this Metric Alert be auto resolved? Defaults to false. + // -> NOTE auto_mitigation_enabled and throttling are mutually exclusive and cannot both be set. + // +kubebuilder:validation:Optional + AutoMitigationEnabled *bool `json:"autoMitigationEnabled,omitempty" tf:"auto_mitigation_enabled,omitempty"` + + // The resource URI over which log search query is to be run. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataSourceID *string `json:"dataSourceId,omitempty" tf:"data_source_id,omitempty"` + + // Reference to a ApplicationInsights in insights to populate dataSourceId. + // +kubebuilder:validation:Optional + DataSourceIDRef *v1.Reference `json:"dataSourceIdRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate dataSourceId. + // +kubebuilder:validation:Optional + DataSourceIDSelector *v1.Selector `json:"dataSourceIdSelector,omitempty" tf:"-"` + + // The description of the scheduled query rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether this scheduled query rule is enabled. Default is true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Frequency (in minutes) at which rule condition should be evaluated. Values must be between 5 and 1440 (inclusive). + // +kubebuilder:validation:Optional + Frequency *float64 `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies the Azure Region where the resource should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the scheduled query rule. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Log search query. + // +kubebuilder:validation:Optional + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // The type of query results. Possible values are ResultCount and Number. Default is ResultCount. If set to ResultCount, query must include an AggregatedValue column of a numeric type, for example, Heartbeat | summarize AggregatedValue = count() by bin(TimeGenerated, 5m). + // +kubebuilder:validation:Optional + QueryType *string `json:"queryType,omitempty" tf:"query_type,omitempty"` + + // The name of the resource group in which to create the scheduled query rule instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Severity of the alert. Possible values include: 0, 1, 2, 3, or 4. + // +kubebuilder:validation:Optional + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Time (in minutes) for which Alerts should be throttled or suppressed. Values must be between 0 and 10000 (inclusive). + // +kubebuilder:validation:Optional + Throttling *float64 `json:"throttling,omitempty" tf:"throttling,omitempty"` + + // Time window for which data needs to be fetched for query (must be greater than or equal to frequency). Values must be between 5 and 2880 (inclusive). + // +kubebuilder:validation:Optional + TimeWindow *float64 `json:"timeWindow,omitempty" tf:"time_window,omitempty"` + + // A trigger block as defined below. + // +kubebuilder:validation:Optional + Trigger *TriggerParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type TriggerInitParameters struct { + + // A metric_trigger block as defined above. Trigger condition for metric query rule. + MetricTrigger *TriggerMetricTriggerInitParameters `json:"metricTrigger,omitempty" tf:"metric_trigger,omitempty"` + + // Evaluation operation for rule - 'GreaterThan', GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Result or count threshold based on which rule should be triggered. Values must be between 0 and 10000 inclusive. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` +} + +type TriggerMetricTriggerInitParameters struct { + + // Evaluation of metric on a particular column. + MetricColumn *string `json:"metricColumn,omitempty" tf:"metric_column,omitempty"` + + // Metric Trigger Type - 'Consecutive' or 'Total'. + MetricTriggerType *string `json:"metricTriggerType,omitempty" tf:"metric_trigger_type,omitempty"` + + // Evaluation operation for rule - 'GreaterThan', GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Result or count threshold based on which rule should be triggered. Values must be between 0 and 10000 inclusive. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` +} + +type TriggerMetricTriggerObservation struct { + + // Evaluation of metric on a particular column. + MetricColumn *string `json:"metricColumn,omitempty" tf:"metric_column,omitempty"` + + // Metric Trigger Type - 'Consecutive' or 'Total'. + MetricTriggerType *string `json:"metricTriggerType,omitempty" tf:"metric_trigger_type,omitempty"` + + // Evaluation operation for rule - 'GreaterThan', GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Result or count threshold based on which rule should be triggered. Values must be between 0 and 10000 inclusive. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` +} + +type TriggerMetricTriggerParameters struct { + + // Evaluation of metric on a particular column. + // +kubebuilder:validation:Optional + MetricColumn *string `json:"metricColumn,omitempty" tf:"metric_column,omitempty"` + + // Metric Trigger Type - 'Consecutive' or 'Total'. + // +kubebuilder:validation:Optional + MetricTriggerType *string `json:"metricTriggerType" tf:"metric_trigger_type,omitempty"` + + // Evaluation operation for rule - 'GreaterThan', GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Result or count threshold based on which rule should be triggered. Values must be between 0 and 10000 inclusive. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` +} + +type TriggerObservation struct { + + // A metric_trigger block as defined above. Trigger condition for metric query rule. + MetricTrigger *TriggerMetricTriggerObservation `json:"metricTrigger,omitempty" tf:"metric_trigger,omitempty"` + + // Evaluation operation for rule - 'GreaterThan', GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // Result or count threshold based on which rule should be triggered. Values must be between 0 and 10000 inclusive. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` +} + +type TriggerParameters struct { + + // A metric_trigger block as defined above. Trigger condition for metric query rule. + // +kubebuilder:validation:Optional + MetricTrigger *TriggerMetricTriggerParameters `json:"metricTrigger,omitempty" tf:"metric_trigger,omitempty"` + + // Evaluation operation for rule - 'GreaterThan', GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // Result or count threshold based on which rule should be triggered. Values must be between 0 and 10000 inclusive. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` +} + +// MonitorScheduledQueryRulesAlertSpec defines the desired state of MonitorScheduledQueryRulesAlert +type MonitorScheduledQueryRulesAlertSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorScheduledQueryRulesAlertParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorScheduledQueryRulesAlertInitParameters `json:"initProvider,omitempty"` +} + +// MonitorScheduledQueryRulesAlertStatus defines the observed state of MonitorScheduledQueryRulesAlert. +type MonitorScheduledQueryRulesAlertStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorScheduledQueryRulesAlertObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorScheduledQueryRulesAlert is the Schema for the MonitorScheduledQueryRulesAlerts API. Manages an AlertingAction Scheduled Query Rules resource within Azure Monitor +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorScheduledQueryRulesAlert struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.action) || (has(self.initProvider) && has(self.initProvider.action))",message="spec.forProvider.action is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.frequency) || (has(self.initProvider) && has(self.initProvider.frequency))",message="spec.forProvider.frequency is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.query) || (has(self.initProvider) && has(self.initProvider.query))",message="spec.forProvider.query is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timeWindow) || (has(self.initProvider) && has(self.initProvider.timeWindow))",message="spec.forProvider.timeWindow is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.trigger) || (has(self.initProvider) && has(self.initProvider.trigger))",message="spec.forProvider.trigger is a required parameter" + Spec MonitorScheduledQueryRulesAlertSpec `json:"spec"` + Status MonitorScheduledQueryRulesAlertStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorScheduledQueryRulesAlertList contains a list of MonitorScheduledQueryRulesAlerts +type MonitorScheduledQueryRulesAlertList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorScheduledQueryRulesAlert `json:"items"` +} + +// Repository type metadata. +var ( + MonitorScheduledQueryRulesAlert_Kind = "MonitorScheduledQueryRulesAlert" + MonitorScheduledQueryRulesAlert_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorScheduledQueryRulesAlert_Kind}.String() + MonitorScheduledQueryRulesAlert_KindAPIVersion = MonitorScheduledQueryRulesAlert_Kind + "." + CRDGroupVersion.String() + MonitorScheduledQueryRulesAlert_GroupVersionKind = CRDGroupVersion.WithKind(MonitorScheduledQueryRulesAlert_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorScheduledQueryRulesAlert{}, &MonitorScheduledQueryRulesAlertList{}) +} diff --git a/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalertv2_terraformed.go b/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalertv2_terraformed.go new file mode 100755 index 000000000..811cf169c --- /dev/null +++ b/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalertv2_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorScheduledQueryRulesAlertV2 +func (mg *MonitorScheduledQueryRulesAlertV2) GetTerraformResourceType() string { + return "azurerm_monitor_scheduled_query_rules_alert_v2" +} + +// GetConnectionDetailsMapping for this MonitorScheduledQueryRulesAlertV2 +func (tr *MonitorScheduledQueryRulesAlertV2) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorScheduledQueryRulesAlertV2 +func (tr *MonitorScheduledQueryRulesAlertV2) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorScheduledQueryRulesAlertV2 +func (tr *MonitorScheduledQueryRulesAlertV2) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorScheduledQueryRulesAlertV2 +func (tr *MonitorScheduledQueryRulesAlertV2) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorScheduledQueryRulesAlertV2 +func (tr *MonitorScheduledQueryRulesAlertV2) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorScheduledQueryRulesAlertV2 +func (tr *MonitorScheduledQueryRulesAlertV2) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorScheduledQueryRulesAlertV2 +func (tr *MonitorScheduledQueryRulesAlertV2) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorScheduledQueryRulesAlertV2 +func (tr *MonitorScheduledQueryRulesAlertV2) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorScheduledQueryRulesAlertV2 using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorScheduledQueryRulesAlertV2) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorScheduledQueryRulesAlertV2Parameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorScheduledQueryRulesAlertV2) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalertv2_types.go b/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalertv2_types.go new file mode 100755 index 000000000..2e6a79459 --- /dev/null +++ b/apis/insights/v1beta2/zz_monitorscheduledqueryrulesalertv2_types.go @@ -0,0 +1,497 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CriteriaDimensionInitParameters struct { + + // Specifies the name which should be used for this Monitor Scheduled Query Rule. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operator for dimension values. Possible values are Exclude,and Include. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // List of dimension values. Use a wildcard * to collect all. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type CriteriaDimensionObservation struct { + + // Specifies the name which should be used for this Monitor Scheduled Query Rule. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operator for dimension values. Possible values are Exclude,and Include. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // List of dimension values. Use a wildcard * to collect all. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type CriteriaDimensionParameters struct { + + // Specifies the name which should be used for this Monitor Scheduled Query Rule. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Operator for dimension values. Possible values are Exclude,and Include. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // List of dimension values. Use a wildcard * to collect all. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type FailingPeriodsInitParameters struct { + + // Specifies the number of violations to trigger an alert. Should be smaller or equal to number_of_evaluation_periods. Possible value is integer between 1 and 6. + MinimumFailingPeriodsToTriggerAlert *float64 `json:"minimumFailingPeriodsToTriggerAlert,omitempty" tf:"minimum_failing_periods_to_trigger_alert,omitempty"` + + // Specifies the number of aggregated look-back points. The look-back time window is calculated based on the aggregation granularity window_duration and the selected number of aggregated points. Possible value is integer between 1 and 6. + NumberOfEvaluationPeriods *float64 `json:"numberOfEvaluationPeriods,omitempty" tf:"number_of_evaluation_periods,omitempty"` +} + +type FailingPeriodsObservation struct { + + // Specifies the number of violations to trigger an alert. Should be smaller or equal to number_of_evaluation_periods. Possible value is integer between 1 and 6. + MinimumFailingPeriodsToTriggerAlert *float64 `json:"minimumFailingPeriodsToTriggerAlert,omitempty" tf:"minimum_failing_periods_to_trigger_alert,omitempty"` + + // Specifies the number of aggregated look-back points. The look-back time window is calculated based on the aggregation granularity window_duration and the selected number of aggregated points. Possible value is integer between 1 and 6. + NumberOfEvaluationPeriods *float64 `json:"numberOfEvaluationPeriods,omitempty" tf:"number_of_evaluation_periods,omitempty"` +} + +type FailingPeriodsParameters struct { + + // Specifies the number of violations to trigger an alert. Should be smaller or equal to number_of_evaluation_periods. Possible value is integer between 1 and 6. + // +kubebuilder:validation:Optional + MinimumFailingPeriodsToTriggerAlert *float64 `json:"minimumFailingPeriodsToTriggerAlert" tf:"minimum_failing_periods_to_trigger_alert,omitempty"` + + // Specifies the number of aggregated look-back points. The look-back time window is calculated based on the aggregation granularity window_duration and the selected number of aggregated points. Possible value is integer between 1 and 6. + // +kubebuilder:validation:Optional + NumberOfEvaluationPeriods *float64 `json:"numberOfEvaluationPeriods" tf:"number_of_evaluation_periods,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2ActionInitParameters struct { + + // List of Action Group resource IDs to invoke when the alert fires. + ActionGroups []*string `json:"actionGroups,omitempty" tf:"action_groups,omitempty"` + + // Specifies the properties of an alert payload. + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2ActionObservation struct { + + // List of Action Group resource IDs to invoke when the alert fires. + ActionGroups []*string `json:"actionGroups,omitempty" tf:"action_groups,omitempty"` + + // Specifies the properties of an alert payload. + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2ActionParameters struct { + + // List of Action Group resource IDs to invoke when the alert fires. + // +kubebuilder:validation:Optional + ActionGroups []*string `json:"actionGroups,omitempty" tf:"action_groups,omitempty"` + + // Specifies the properties of an alert payload. + // +kubebuilder:validation:Optional + // +mapType=granular + CustomProperties map[string]*string `json:"customProperties,omitempty" tf:"custom_properties,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2CriteriaInitParameters struct { + + // A dimension block as defined below. + Dimension []CriteriaDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A failing_periods block as defined below. + FailingPeriods *FailingPeriodsInitParameters `json:"failingPeriods,omitempty" tf:"failing_periods,omitempty"` + + // Specifies the column containing the metric measure number. + MetricMeasureColumn *string `json:"metricMeasureColumn,omitempty" tf:"metric_measure_column,omitempty"` + + // Specifies the criteria operator. Possible values are Equal, GreaterThan, GreaterThanOrEqual, LessThan,and LessThanOrEqual. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // The query to run on logs. The results returned by this query are used to populate the alert. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // Specifies the column containing the resource ID. The content of the column must be an uri formatted as resource ID. + ResourceIDColumn *string `json:"resourceIdColumn,omitempty" tf:"resource_id_column,omitempty"` + + // Specifies the criteria threshold value that activates the alert. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // The type of aggregation to apply to the data points in aggregation granularity. Possible values are Average, Count, Maximum, Minimum,and Total. + TimeAggregationMethod *string `json:"timeAggregationMethod,omitempty" tf:"time_aggregation_method,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2CriteriaObservation struct { + + // A dimension block as defined below. + Dimension []CriteriaDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A failing_periods block as defined below. + FailingPeriods *FailingPeriodsObservation `json:"failingPeriods,omitempty" tf:"failing_periods,omitempty"` + + // Specifies the column containing the metric measure number. + MetricMeasureColumn *string `json:"metricMeasureColumn,omitempty" tf:"metric_measure_column,omitempty"` + + // Specifies the criteria operator. Possible values are Equal, GreaterThan, GreaterThanOrEqual, LessThan,and LessThanOrEqual. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // The query to run on logs. The results returned by this query are used to populate the alert. + Query *string `json:"query,omitempty" tf:"query,omitempty"` + + // Specifies the column containing the resource ID. The content of the column must be an uri formatted as resource ID. + ResourceIDColumn *string `json:"resourceIdColumn,omitempty" tf:"resource_id_column,omitempty"` + + // Specifies the criteria threshold value that activates the alert. + Threshold *float64 `json:"threshold,omitempty" tf:"threshold,omitempty"` + + // The type of aggregation to apply to the data points in aggregation granularity. Possible values are Average, Count, Maximum, Minimum,and Total. + TimeAggregationMethod *string `json:"timeAggregationMethod,omitempty" tf:"time_aggregation_method,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2CriteriaParameters struct { + + // A dimension block as defined below. + // +kubebuilder:validation:Optional + Dimension []CriteriaDimensionParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // A failing_periods block as defined below. + // +kubebuilder:validation:Optional + FailingPeriods *FailingPeriodsParameters `json:"failingPeriods,omitempty" tf:"failing_periods,omitempty"` + + // Specifies the column containing the metric measure number. + // +kubebuilder:validation:Optional + MetricMeasureColumn *string `json:"metricMeasureColumn,omitempty" tf:"metric_measure_column,omitempty"` + + // Specifies the criteria operator. Possible values are Equal, GreaterThan, GreaterThanOrEqual, LessThan,and LessThanOrEqual. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // The query to run on logs. The results returned by this query are used to populate the alert. + // +kubebuilder:validation:Optional + Query *string `json:"query" tf:"query,omitempty"` + + // Specifies the column containing the resource ID. The content of the column must be an uri formatted as resource ID. + // +kubebuilder:validation:Optional + ResourceIDColumn *string `json:"resourceIdColumn,omitempty" tf:"resource_id_column,omitempty"` + + // Specifies the criteria threshold value that activates the alert. + // +kubebuilder:validation:Optional + Threshold *float64 `json:"threshold" tf:"threshold,omitempty"` + + // The type of aggregation to apply to the data points in aggregation granularity. Possible values are Average, Count, Maximum, Minimum,and Total. + // +kubebuilder:validation:Optional + TimeAggregationMethod *string `json:"timeAggregationMethod" tf:"time_aggregation_method,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2InitParameters struct { + + // An action block as defined below. + Action *MonitorScheduledQueryRulesAlertV2ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Specifies the flag that indicates whether the alert should be automatically resolved or not. Value should be true or false. The default is false. + AutoMitigationEnabled *bool `json:"autoMitigationEnabled,omitempty" tf:"auto_mitigation_enabled,omitempty"` + + // A criteria block as defined below. + Criteria []MonitorScheduledQueryRulesAlertV2CriteriaInitParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // Specifies the description of the scheduled query rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the display name of the alert rule. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies the flag which indicates whether this scheduled query rule is enabled. Value should be true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // How often the scheduled query rule is evaluated, represented in ISO 8601 duration format. Possible values are PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D. + EvaluationFrequency *string `json:"evaluationFrequency,omitempty" tf:"evaluation_frequency,omitempty"` + + // Specifies the Azure Region where the Monitor Scheduled Query Rule should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Mute actions for the chosen period of time in ISO 8601 duration format after the alert is fired. Possible values are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + MuteActionsAfterAlertDuration *string `json:"muteActionsAfterAlertDuration,omitempty" tf:"mute_actions_after_alert_duration,omitempty"` + + // Set this if the alert evaluation period is different from the query time range. If not specified, the value is window_duration*number_of_evaluation_periods. Possible values are PT5M, PT10M, PT15M, PT20M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + QueryTimeRangeOverride *string `json:"queryTimeRangeOverride,omitempty" tf:"query_time_range_override,omitempty"` + + // Specifies the list of resource IDs that this scheduled query rule is scoped to. Changing this forces a new resource to be created. Currently, the API supports exactly 1 resource ID in the scopes list. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to ApplicationInsights in insights to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of ApplicationInsights in insights to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // Severity of the alert. Should be an integer between 0 and 4. Value of 0 is severest. + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // Specifies the flag which indicates whether the provided query should be validated or not. The default is false. + SkipQueryValidation *bool `json:"skipQueryValidation,omitempty" tf:"skip_query_validation,omitempty"` + + // A mapping of tags which should be assigned to the Monitor Scheduled Query Rule. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // List of resource type of the target resource(s) on which the alert is created/updated. For example if the scope is a resource group and targetResourceTypes is Microsoft.Compute/virtualMachines, then a different alert will be fired for each virtual machine in the resource group which meet the alert criteria. + TargetResourceTypes []*string `json:"targetResourceTypes,omitempty" tf:"target_resource_types,omitempty"` + + // Specifies the period of time in ISO 8601 duration format on which the Scheduled Query Rule will be executed (bin size). If evaluation_frequency is PT1M, possible values are PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, and PT6H. Otherwise, possible values are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D, and P2D. + WindowDuration *string `json:"windowDuration,omitempty" tf:"window_duration,omitempty"` + + // Specifies the flag which indicates whether this scheduled query rule check if storage is configured. Value should be true or false. The default is false. + WorkspaceAlertsStorageEnabled *bool `json:"workspaceAlertsStorageEnabled,omitempty" tf:"workspace_alerts_storage_enabled,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2Observation struct { + + // An action block as defined below. + Action *MonitorScheduledQueryRulesAlertV2ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // Specifies the flag that indicates whether the alert should be automatically resolved or not. Value should be true or false. The default is false. + AutoMitigationEnabled *bool `json:"autoMitigationEnabled,omitempty" tf:"auto_mitigation_enabled,omitempty"` + + // The api-version used when creating this alert rule. + CreatedWithAPIVersion *string `json:"createdWithApiVersion,omitempty" tf:"created_with_api_version,omitempty"` + + // A criteria block as defined below. + Criteria []MonitorScheduledQueryRulesAlertV2CriteriaObservation `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // Specifies the description of the scheduled query rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the display name of the alert rule. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies the flag which indicates whether this scheduled query rule is enabled. Value should be true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // How often the scheduled query rule is evaluated, represented in ISO 8601 duration format. Possible values are PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D. + EvaluationFrequency *string `json:"evaluationFrequency,omitempty" tf:"evaluation_frequency,omitempty"` + + // The ID of the Monitor Scheduled Query Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // True if this alert rule is a legacy Log Analytic Rule. + IsALegacyLogAnalyticsRule *bool `json:"isALegacyLogAnalyticsRule,omitempty" tf:"is_a_legacy_log_analytics_rule,omitempty"` + + // The flag indicates whether this Scheduled Query Rule has been configured to be stored in the customer's storage. + IsWorkspaceAlertsStorageConfigured *bool `json:"isWorkspaceAlertsStorageConfigured,omitempty" tf:"is_workspace_alerts_storage_configured,omitempty"` + + // Specifies the Azure Region where the Monitor Scheduled Query Rule should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Mute actions for the chosen period of time in ISO 8601 duration format after the alert is fired. Possible values are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + MuteActionsAfterAlertDuration *string `json:"muteActionsAfterAlertDuration,omitempty" tf:"mute_actions_after_alert_duration,omitempty"` + + // Set this if the alert evaluation period is different from the query time range. If not specified, the value is window_duration*number_of_evaluation_periods. Possible values are PT5M, PT10M, PT15M, PT20M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + QueryTimeRangeOverride *string `json:"queryTimeRangeOverride,omitempty" tf:"query_time_range_override,omitempty"` + + // Specifies the name of the Resource Group where the Monitor Scheduled Query Rule should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the list of resource IDs that this scheduled query rule is scoped to. Changing this forces a new resource to be created. Currently, the API supports exactly 1 resource ID in the scopes list. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // Severity of the alert. Should be an integer between 0 and 4. Value of 0 is severest. + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // Specifies the flag which indicates whether the provided query should be validated or not. The default is false. + SkipQueryValidation *bool `json:"skipQueryValidation,omitempty" tf:"skip_query_validation,omitempty"` + + // A mapping of tags which should be assigned to the Monitor Scheduled Query Rule. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // List of resource type of the target resource(s) on which the alert is created/updated. For example if the scope is a resource group and targetResourceTypes is Microsoft.Compute/virtualMachines, then a different alert will be fired for each virtual machine in the resource group which meet the alert criteria. + TargetResourceTypes []*string `json:"targetResourceTypes,omitempty" tf:"target_resource_types,omitempty"` + + // Specifies the period of time in ISO 8601 duration format on which the Scheduled Query Rule will be executed (bin size). If evaluation_frequency is PT1M, possible values are PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, and PT6H. Otherwise, possible values are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D, and P2D. + WindowDuration *string `json:"windowDuration,omitempty" tf:"window_duration,omitempty"` + + // Specifies the flag which indicates whether this scheduled query rule check if storage is configured. Value should be true or false. The default is false. + WorkspaceAlertsStorageEnabled *bool `json:"workspaceAlertsStorageEnabled,omitempty" tf:"workspace_alerts_storage_enabled,omitempty"` +} + +type MonitorScheduledQueryRulesAlertV2Parameters struct { + + // An action block as defined below. + // +kubebuilder:validation:Optional + Action *MonitorScheduledQueryRulesAlertV2ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // Specifies the flag that indicates whether the alert should be automatically resolved or not. Value should be true or false. The default is false. + // +kubebuilder:validation:Optional + AutoMitigationEnabled *bool `json:"autoMitigationEnabled,omitempty" tf:"auto_mitigation_enabled,omitempty"` + + // A criteria block as defined below. + // +kubebuilder:validation:Optional + Criteria []MonitorScheduledQueryRulesAlertV2CriteriaParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // Specifies the description of the scheduled query rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the display name of the alert rule. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Specifies the flag which indicates whether this scheduled query rule is enabled. Value should be true or false. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // How often the scheduled query rule is evaluated, represented in ISO 8601 duration format. Possible values are PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D. + // +kubebuilder:validation:Optional + EvaluationFrequency *string `json:"evaluationFrequency,omitempty" tf:"evaluation_frequency,omitempty"` + + // Specifies the Azure Region where the Monitor Scheduled Query Rule should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Mute actions for the chosen period of time in ISO 8601 duration format after the alert is fired. Possible values are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + // +kubebuilder:validation:Optional + MuteActionsAfterAlertDuration *string `json:"muteActionsAfterAlertDuration,omitempty" tf:"mute_actions_after_alert_duration,omitempty"` + + // Set this if the alert evaluation period is different from the query time range. If not specified, the value is window_duration*number_of_evaluation_periods. Possible values are PT5M, PT10M, PT15M, PT20M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + // +kubebuilder:validation:Optional + QueryTimeRangeOverride *string `json:"queryTimeRangeOverride,omitempty" tf:"query_time_range_override,omitempty"` + + // Specifies the name of the Resource Group where the Monitor Scheduled Query Rule should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the list of resource IDs that this scheduled query rule is scoped to. Changing this forces a new resource to be created. Currently, the API supports exactly 1 resource ID in the scopes list. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // References to ApplicationInsights in insights to populate scopes. + // +kubebuilder:validation:Optional + ScopesRefs []v1.Reference `json:"scopesRefs,omitempty" tf:"-"` + + // Selector for a list of ApplicationInsights in insights to populate scopes. + // +kubebuilder:validation:Optional + ScopesSelector *v1.Selector `json:"scopesSelector,omitempty" tf:"-"` + + // Severity of the alert. Should be an integer between 0 and 4. Value of 0 is severest. + // +kubebuilder:validation:Optional + Severity *float64 `json:"severity,omitempty" tf:"severity,omitempty"` + + // Specifies the flag which indicates whether the provided query should be validated or not. The default is false. + // +kubebuilder:validation:Optional + SkipQueryValidation *bool `json:"skipQueryValidation,omitempty" tf:"skip_query_validation,omitempty"` + + // A mapping of tags which should be assigned to the Monitor Scheduled Query Rule. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // List of resource type of the target resource(s) on which the alert is created/updated. For example if the scope is a resource group and targetResourceTypes is Microsoft.Compute/virtualMachines, then a different alert will be fired for each virtual machine in the resource group which meet the alert criteria. + // +kubebuilder:validation:Optional + TargetResourceTypes []*string `json:"targetResourceTypes,omitempty" tf:"target_resource_types,omitempty"` + + // Specifies the period of time in ISO 8601 duration format on which the Scheduled Query Rule will be executed (bin size). If evaluation_frequency is PT1M, possible values are PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, and PT6H. Otherwise, possible values are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D, and P2D. + // +kubebuilder:validation:Optional + WindowDuration *string `json:"windowDuration,omitempty" tf:"window_duration,omitempty"` + + // Specifies the flag which indicates whether this scheduled query rule check if storage is configured. Value should be true or false. The default is false. + // +kubebuilder:validation:Optional + WorkspaceAlertsStorageEnabled *bool `json:"workspaceAlertsStorageEnabled,omitempty" tf:"workspace_alerts_storage_enabled,omitempty"` +} + +// MonitorScheduledQueryRulesAlertV2Spec defines the desired state of MonitorScheduledQueryRulesAlertV2 +type MonitorScheduledQueryRulesAlertV2Spec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorScheduledQueryRulesAlertV2Parameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorScheduledQueryRulesAlertV2InitParameters `json:"initProvider,omitempty"` +} + +// MonitorScheduledQueryRulesAlertV2Status defines the observed state of MonitorScheduledQueryRulesAlertV2. +type MonitorScheduledQueryRulesAlertV2Status struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorScheduledQueryRulesAlertV2Observation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorScheduledQueryRulesAlertV2 is the Schema for the MonitorScheduledQueryRulesAlertV2s API. Manages an AlertingAction Scheduled Query Rules Version 2 resource within Azure Monitor +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorScheduledQueryRulesAlertV2 struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.criteria) || (has(self.initProvider) && has(self.initProvider.criteria))",message="spec.forProvider.criteria is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.severity) || (has(self.initProvider) && has(self.initProvider.severity))",message="spec.forProvider.severity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.windowDuration) || (has(self.initProvider) && has(self.initProvider.windowDuration))",message="spec.forProvider.windowDuration is a required parameter" + Spec MonitorScheduledQueryRulesAlertV2Spec `json:"spec"` + Status MonitorScheduledQueryRulesAlertV2Status `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorScheduledQueryRulesAlertV2List contains a list of MonitorScheduledQueryRulesAlertV2s +type MonitorScheduledQueryRulesAlertV2List struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorScheduledQueryRulesAlertV2 `json:"items"` +} + +// Repository type metadata. +var ( + MonitorScheduledQueryRulesAlertV2_Kind = "MonitorScheduledQueryRulesAlertV2" + MonitorScheduledQueryRulesAlertV2_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorScheduledQueryRulesAlertV2_Kind}.String() + MonitorScheduledQueryRulesAlertV2_KindAPIVersion = MonitorScheduledQueryRulesAlertV2_Kind + "." + CRDGroupVersion.String() + MonitorScheduledQueryRulesAlertV2_GroupVersionKind = CRDGroupVersion.WithKind(MonitorScheduledQueryRulesAlertV2_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorScheduledQueryRulesAlertV2{}, &MonitorScheduledQueryRulesAlertV2List{}) +} diff --git a/apis/insights/v1beta2/zz_monitorscheduledqueryruleslog_terraformed.go b/apis/insights/v1beta2/zz_monitorscheduledqueryruleslog_terraformed.go new file mode 100755 index 000000000..211e46bc4 --- /dev/null +++ b/apis/insights/v1beta2/zz_monitorscheduledqueryruleslog_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MonitorScheduledQueryRulesLog +func (mg *MonitorScheduledQueryRulesLog) GetTerraformResourceType() string { + return "azurerm_monitor_scheduled_query_rules_log" +} + +// GetConnectionDetailsMapping for this MonitorScheduledQueryRulesLog +func (tr *MonitorScheduledQueryRulesLog) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MonitorScheduledQueryRulesLog +func (tr *MonitorScheduledQueryRulesLog) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MonitorScheduledQueryRulesLog +func (tr *MonitorScheduledQueryRulesLog) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MonitorScheduledQueryRulesLog +func (tr *MonitorScheduledQueryRulesLog) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MonitorScheduledQueryRulesLog +func (tr *MonitorScheduledQueryRulesLog) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MonitorScheduledQueryRulesLog +func (tr *MonitorScheduledQueryRulesLog) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MonitorScheduledQueryRulesLog +func (tr *MonitorScheduledQueryRulesLog) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MonitorScheduledQueryRulesLog +func (tr *MonitorScheduledQueryRulesLog) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MonitorScheduledQueryRulesLog using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MonitorScheduledQueryRulesLog) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorScheduledQueryRulesLogParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MonitorScheduledQueryRulesLog) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/insights/v1beta2/zz_monitorscheduledqueryruleslog_types.go b/apis/insights/v1beta2/zz_monitorscheduledqueryruleslog_types.go new file mode 100755 index 000000000..08fe27b2b --- /dev/null +++ b/apis/insights/v1beta2/zz_monitorscheduledqueryruleslog_types.go @@ -0,0 +1,291 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters struct { + + // Name of the dimension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operator for dimension values, - 'Include'. Defaults to Include. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // List of dimension values. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorScheduledQueryRulesLogCriteriaDimensionObservation struct { + + // Name of the dimension. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Operator for dimension values, - 'Include'. Defaults to Include. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // List of dimension values. + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type MonitorScheduledQueryRulesLogCriteriaDimensionParameters struct { + + // Name of the dimension. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Operator for dimension values, - 'Include'. Defaults to Include. + // +kubebuilder:validation:Optional + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // List of dimension values. + // +kubebuilder:validation:Optional + Values []*string `json:"values" tf:"values,omitempty"` +} + +type MonitorScheduledQueryRulesLogCriteriaInitParameters struct { + + // A dimension block as defined below. + Dimension []MonitorScheduledQueryRulesLogCriteriaDimensionInitParameters `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // Name of the metric. Supported metrics are listed in the Azure Monitor Microsoft.OperationalInsights/workspaces metrics namespace. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` +} + +type MonitorScheduledQueryRulesLogCriteriaObservation struct { + + // A dimension block as defined below. + Dimension []MonitorScheduledQueryRulesLogCriteriaDimensionObservation `json:"dimension,omitempty" tf:"dimension,omitempty"` + + // Name of the metric. Supported metrics are listed in the Azure Monitor Microsoft.OperationalInsights/workspaces metrics namespace. + MetricName *string `json:"metricName,omitempty" tf:"metric_name,omitempty"` +} + +type MonitorScheduledQueryRulesLogCriteriaParameters struct { + + // A dimension block as defined below. + // +kubebuilder:validation:Optional + Dimension []MonitorScheduledQueryRulesLogCriteriaDimensionParameters `json:"dimension" tf:"dimension,omitempty"` + + // Name of the metric. Supported metrics are listed in the Azure Monitor Microsoft.OperationalInsights/workspaces metrics namespace. + // +kubebuilder:validation:Optional + MetricName *string `json:"metricName" tf:"metric_name,omitempty"` +} + +type MonitorScheduledQueryRulesLogInitParameters struct { + + // A list of IDs of Resources referred into query. + // +listType=set + AuthorizedResourceIds []*string `json:"authorizedResourceIds,omitempty" tf:"authorized_resource_ids,omitempty"` + + // A criteria block as defined below. + Criteria *MonitorScheduledQueryRulesLogCriteriaInitParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The resource URI over which log search query is to be run. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + DataSourceID *string `json:"dataSourceId,omitempty" tf:"data_source_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate dataSourceId. + // +kubebuilder:validation:Optional + DataSourceIDRef *v1.Reference `json:"dataSourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate dataSourceId. + // +kubebuilder:validation:Optional + DataSourceIDSelector *v1.Selector `json:"dataSourceIdSelector,omitempty" tf:"-"` + + // The description of the scheduled query rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether this scheduled query rule is enabled. Default is true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the Azure Region where the resource should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the scheduled query rule. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which to create the scheduled query rule instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorScheduledQueryRulesLogObservation struct { + + // A list of IDs of Resources referred into query. + // +listType=set + AuthorizedResourceIds []*string `json:"authorizedResourceIds,omitempty" tf:"authorized_resource_ids,omitempty"` + + // A criteria block as defined below. + Criteria *MonitorScheduledQueryRulesLogCriteriaObservation `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The resource URI over which log search query is to be run. Changing this forces a new resource to be created. + DataSourceID *string `json:"dataSourceId,omitempty" tf:"data_source_id,omitempty"` + + // The description of the scheduled query rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether this scheduled query rule is enabled. Default is true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the scheduled query rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Azure Region where the resource should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the scheduled query rule. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which to create the scheduled query rule instance. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MonitorScheduledQueryRulesLogParameters struct { + + // A list of IDs of Resources referred into query. + // +kubebuilder:validation:Optional + // +listType=set + AuthorizedResourceIds []*string `json:"authorizedResourceIds,omitempty" tf:"authorized_resource_ids,omitempty"` + + // A criteria block as defined below. + // +kubebuilder:validation:Optional + Criteria *MonitorScheduledQueryRulesLogCriteriaParameters `json:"criteria,omitempty" tf:"criteria,omitempty"` + + // The resource URI over which log search query is to be run. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + DataSourceID *string `json:"dataSourceId,omitempty" tf:"data_source_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate dataSourceId. + // +kubebuilder:validation:Optional + DataSourceIDRef *v1.Reference `json:"dataSourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate dataSourceId. + // +kubebuilder:validation:Optional + DataSourceIDSelector *v1.Selector `json:"dataSourceIdSelector,omitempty" tf:"-"` + + // The description of the scheduled query rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Whether this scheduled query rule is enabled. Default is true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the Azure Region where the resource should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the scheduled query rule. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the resource group in which to create the scheduled query rule instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// MonitorScheduledQueryRulesLogSpec defines the desired state of MonitorScheduledQueryRulesLog +type MonitorScheduledQueryRulesLogSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorScheduledQueryRulesLogParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorScheduledQueryRulesLogInitParameters `json:"initProvider,omitempty"` +} + +// MonitorScheduledQueryRulesLogStatus defines the observed state of MonitorScheduledQueryRulesLog. +type MonitorScheduledQueryRulesLogStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorScheduledQueryRulesLogObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MonitorScheduledQueryRulesLog is the Schema for the MonitorScheduledQueryRulesLogs API. Manages a LogToMetricAction Scheduled Query Rules resources within Azure Monitor +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MonitorScheduledQueryRulesLog struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.criteria) || (has(self.initProvider) && has(self.initProvider.criteria))",message="spec.forProvider.criteria is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec MonitorScheduledQueryRulesLogSpec `json:"spec"` + Status MonitorScheduledQueryRulesLogStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorScheduledQueryRulesLogList contains a list of MonitorScheduledQueryRulesLogs +type MonitorScheduledQueryRulesLogList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MonitorScheduledQueryRulesLog `json:"items"` +} + +// Repository type metadata. +var ( + MonitorScheduledQueryRulesLog_Kind = "MonitorScheduledQueryRulesLog" + MonitorScheduledQueryRulesLog_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MonitorScheduledQueryRulesLog_Kind}.String() + MonitorScheduledQueryRulesLog_KindAPIVersion = MonitorScheduledQueryRulesLog_Kind + "." + CRDGroupVersion.String() + MonitorScheduledQueryRulesLog_GroupVersionKind = CRDGroupVersion.WithKind(MonitorScheduledQueryRulesLog_Kind) +) + +func init() { + SchemeBuilder.Register(&MonitorScheduledQueryRulesLog{}, &MonitorScheduledQueryRulesLogList{}) +} diff --git a/apis/iotcentral/v1beta1/zz_applicationnetworkruleset_types.go b/apis/iotcentral/v1beta1/zz_applicationnetworkruleset_types.go index 1a950b917..c6f442b40 100755 --- a/apis/iotcentral/v1beta1/zz_applicationnetworkruleset_types.go +++ b/apis/iotcentral/v1beta1/zz_applicationnetworkruleset_types.go @@ -25,7 +25,7 @@ type ApplicationNetworkRuleSetInitParameters struct { IPRule []IPRuleInitParameters `json:"ipRule,omitempty" tf:"ip_rule,omitempty"` // The ID of the IoT Central Application. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/iotcentral/v1beta1.Application + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/iotcentral/v1beta2.Application // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() IotcentralApplicationID *string `json:"iotcentralApplicationId,omitempty" tf:"iotcentral_application_id,omitempty"` @@ -71,7 +71,7 @@ type ApplicationNetworkRuleSetParameters struct { IPRule []IPRuleParameters `json:"ipRule,omitempty" tf:"ip_rule,omitempty"` // The ID of the IoT Central Application. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/iotcentral/v1beta1.Application + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/iotcentral/v1beta2.Application // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional IotcentralApplicationID *string `json:"iotcentralApplicationId,omitempty" tf:"iotcentral_application_id,omitempty"` diff --git a/apis/iotcentral/v1beta1/zz_generated.conversion_hubs.go b/apis/iotcentral/v1beta1/zz_generated.conversion_hubs.go index bcd1d4fb1..3b04bc275 100755 --- a/apis/iotcentral/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/iotcentral/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Application) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ApplicationNetworkRuleSet) Hub() {} diff --git a/apis/iotcentral/v1beta1/zz_generated.conversion_spokes.go b/apis/iotcentral/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..ada25ff08 --- /dev/null +++ b/apis/iotcentral/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Application to the hub type. +func (tr *Application) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Application type. +func (tr *Application) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/iotcentral/v1beta1/zz_generated.resolvers.go b/apis/iotcentral/v1beta1/zz_generated.resolvers.go index 5b2b2fdc6..d678da63d 100644 --- a/apis/iotcentral/v1beta1/zz_generated.resolvers.go +++ b/apis/iotcentral/v1beta1/zz_generated.resolvers.go @@ -76,7 +76,7 @@ func (mg *ApplicationNetworkRuleSet) ResolveReferences(ctx context.Context, c cl var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("iotcentral.azure.upbound.io", "v1beta1", "Application", "ApplicationList") + m, l, err = apisresolver.GetManagedResource("iotcentral.azure.upbound.io", "v1beta2", "Application", "ApplicationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -95,7 +95,7 @@ func (mg *ApplicationNetworkRuleSet) ResolveReferences(ctx context.Context, c cl mg.Spec.ForProvider.IotcentralApplicationID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.IotcentralApplicationIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("iotcentral.azure.upbound.io", "v1beta1", "Application", "ApplicationList") + m, l, err = apisresolver.GetManagedResource("iotcentral.azure.upbound.io", "v1beta2", "Application", "ApplicationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/iotcentral/v1beta2/zz_application_terraformed.go b/apis/iotcentral/v1beta2/zz_application_terraformed.go new file mode 100755 index 000000000..6c92d7871 --- /dev/null +++ b/apis/iotcentral/v1beta2/zz_application_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Application +func (mg *Application) GetTerraformResourceType() string { + return "azurerm_iotcentral_application" +} + +// GetConnectionDetailsMapping for this Application +func (tr *Application) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Application +func (tr *Application) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Application +func (tr *Application) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Application +func (tr *Application) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Application +func (tr *Application) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Application +func (tr *Application) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Application +func (tr *Application) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Application +func (tr *Application) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Application using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Application) LateInitialize(attrs []byte) (bool, error) { + params := &ApplicationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Application) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/iotcentral/v1beta2/zz_application_types.go b/apis/iotcentral/v1beta2/zz_application_types.go new file mode 100755 index 000000000..011047a8f --- /dev/null +++ b/apis/iotcentral/v1beta2/zz_application_types.go @@ -0,0 +1,235 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationInitParameters struct { + + // A display_name name. Custom display name for the IoT Central application. Default is resource name. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource has to be create. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the IotHub resource. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether public network access is allowed for the IoT Central Application. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group under which the IotHub resource has to be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sku name. Possible values is ST0, ST1, ST2, Default value is ST1 + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A sub_domain name. Subdomain for the IoT Central URL. Each application must have a unique subdomain. + SubDomain *string `json:"subDomain,omitempty" tf:"sub_domain,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A template name. IoT Central application template name. Default is a custom application. Changing this forces a new resource to be created. + Template *string `json:"template,omitempty" tf:"template,omitempty"` +} + +type ApplicationObservation struct { + + // A display_name name. Custom display name for the IoT Central application. Default is resource name. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // The ID of the IoT Central Application. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource has to be create. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the IotHub resource. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether public network access is allowed for the IoT Central Application. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group under which the IotHub resource has to be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A sku name. Possible values is ST0, ST1, ST2, Default value is ST1 + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A sub_domain name. Subdomain for the IoT Central URL. Each application must have a unique subdomain. + SubDomain *string `json:"subDomain,omitempty" tf:"sub_domain,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A template name. IoT Central application template name. Default is a custom application. Changing this forces a new resource to be created. + Template *string `json:"template,omitempty" tf:"template,omitempty"` +} + +type ApplicationParameters struct { + + // A display_name name. Custom display name for the IoT Central application. Default is resource name. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource has to be create. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the IotHub resource. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether public network access is allowed for the IoT Central Application. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group under which the IotHub resource has to be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sku name. Possible values is ST0, ST1, ST2, Default value is ST1 + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A sub_domain name. Subdomain for the IoT Central URL. Each application must have a unique subdomain. + // +kubebuilder:validation:Optional + SubDomain *string `json:"subDomain,omitempty" tf:"sub_domain,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A template name. IoT Central application template name. Default is a custom application. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Template *string `json:"template,omitempty" tf:"template,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this IoT Central Application. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this IoT Central Application. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this IoT Central Application. The only possible value is SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// ApplicationSpec defines the desired state of Application +type ApplicationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ApplicationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ApplicationInitParameters `json:"initProvider,omitempty"` +} + +// ApplicationStatus defines the observed state of Application. +type ApplicationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ApplicationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Application is the Schema for the Applications API. Manages an IotCentral Application +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Application struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.subDomain) || (has(self.initProvider) && has(self.initProvider.subDomain))",message="spec.forProvider.subDomain is a required parameter" + Spec ApplicationSpec `json:"spec"` + Status ApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ApplicationList contains a list of Applications +type ApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Application `json:"items"` +} + +// Repository type metadata. +var ( + Application_Kind = "Application" + Application_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Application_Kind}.String() + Application_KindAPIVersion = Application_Kind + "." + CRDGroupVersion.String() + Application_GroupVersionKind = CRDGroupVersion.WithKind(Application_Kind) +) + +func init() { + SchemeBuilder.Register(&Application{}, &ApplicationList{}) +} diff --git a/apis/iotcentral/v1beta2/zz_generated.conversion_hubs.go b/apis/iotcentral/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..2d4865553 --- /dev/null +++ b/apis/iotcentral/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Application) Hub() {} diff --git a/apis/iotcentral/v1beta2/zz_generated.deepcopy.go b/apis/iotcentral/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..4129ca7d7 --- /dev/null +++ b/apis/iotcentral/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,431 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Application) DeepCopyInto(out *Application) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application. +func (in *Application) DeepCopy() *Application { + if in == nil { + return nil + } + out := new(Application) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Application) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationInitParameters) DeepCopyInto(out *ApplicationInitParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SubDomain != nil { + in, out := &in.SubDomain, &out.SubDomain + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationInitParameters. +func (in *ApplicationInitParameters) DeepCopy() *ApplicationInitParameters { + if in == nil { + return nil + } + out := new(ApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationList) DeepCopyInto(out *ApplicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Application, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationList. +func (in *ApplicationList) DeepCopy() *ApplicationList { + if in == nil { + return nil + } + out := new(ApplicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationObservation) DeepCopyInto(out *ApplicationObservation) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SubDomain != nil { + in, out := &in.SubDomain, &out.SubDomain + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationObservation. +func (in *ApplicationObservation) DeepCopy() *ApplicationObservation { + if in == nil { + return nil + } + out := new(ApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationParameters) DeepCopyInto(out *ApplicationParameters) { + *out = *in + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SubDomain != nil { + in, out := &in.SubDomain, &out.SubDomain + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationParameters. +func (in *ApplicationParameters) DeepCopy() *ApplicationParameters { + if in == nil { + return nil + } + out := new(ApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSpec. +func (in *ApplicationSpec) DeepCopy() *ApplicationSpec { + if in == nil { + return nil + } + out := new(ApplicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStatus. +func (in *ApplicationStatus) DeepCopy() *ApplicationStatus { + if in == nil { + return nil + } + out := new(ApplicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/iotcentral/v1beta2/zz_generated.managed.go b/apis/iotcentral/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..d1481109b --- /dev/null +++ b/apis/iotcentral/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Application. +func (mg *Application) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Application. +func (mg *Application) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Application. +func (mg *Application) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Application. +func (mg *Application) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Application. +func (mg *Application) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Application. +func (mg *Application) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Application. +func (mg *Application) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Application. +func (mg *Application) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Application. +func (mg *Application) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Application. +func (mg *Application) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Application. +func (mg *Application) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Application. +func (mg *Application) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/iotcentral/v1beta2/zz_generated.managedlist.go b/apis/iotcentral/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..9c9817b1e --- /dev/null +++ b/apis/iotcentral/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ApplicationList. +func (l *ApplicationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/iotcentral/v1beta2/zz_generated.resolvers.go b/apis/iotcentral/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..c2984d501 --- /dev/null +++ b/apis/iotcentral/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Application) ResolveReferences( // ResolveReferences of this Application. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/iotcentral/v1beta2/zz_groupversion_info.go b/apis/iotcentral/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..7b12156d2 --- /dev/null +++ b/apis/iotcentral/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=iotcentral.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "iotcentral.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/keyvault/v1beta1/zz_accesspolicy_types.go b/apis/keyvault/v1beta1/zz_accesspolicy_types.go index e1cd9a2ac..923f25fe7 100755 --- a/apis/keyvault/v1beta1/zz_accesspolicy_types.go +++ b/apis/keyvault/v1beta1/zz_accesspolicy_types.go @@ -25,7 +25,7 @@ type AccessPolicyInitParameters_2 struct { KeyPermissions []*string `json:"keyPermissions,omitempty" tf:"key_permissions,omitempty"` // Specifies the id of the Key Vault resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -95,7 +95,7 @@ type AccessPolicyParameters_2 struct { KeyPermissions []*string `json:"keyPermissions,omitempty" tf:"key_permissions,omitempty"` // Specifies the id of the Key Vault resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` diff --git a/apis/keyvault/v1beta1/zz_certificatecontacts_types.go b/apis/keyvault/v1beta1/zz_certificatecontacts_types.go index bc02d069f..ef6662d70 100755 --- a/apis/keyvault/v1beta1/zz_certificatecontacts_types.go +++ b/apis/keyvault/v1beta1/zz_certificatecontacts_types.go @@ -58,7 +58,7 @@ type CertificateContactsInitParameters struct { Contact []CertificateContactsContactInitParameters `json:"contact,omitempty" tf:"contact,omitempty"` // The ID of the Key Vault. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -90,7 +90,7 @@ type CertificateContactsParameters struct { Contact []CertificateContactsContactParameters `json:"contact,omitempty" tf:"contact,omitempty"` // The ID of the Key Vault. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` diff --git a/apis/keyvault/v1beta1/zz_certificateissuer_types.go b/apis/keyvault/v1beta1/zz_certificateissuer_types.go index 449060e8f..e1350da47 100755 --- a/apis/keyvault/v1beta1/zz_certificateissuer_types.go +++ b/apis/keyvault/v1beta1/zz_certificateissuer_types.go @@ -71,7 +71,7 @@ type CertificateIssuerInitParameters struct { Admin []AdminInitParameters `json:"admin,omitempty" tf:"admin,omitempty"` // The ID of the Key Vault in which to create the Certificate Issuer. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -122,7 +122,7 @@ type CertificateIssuerParameters struct { Admin []AdminParameters `json:"admin,omitempty" tf:"admin,omitempty"` // The ID of the Key Vault in which to create the Certificate Issuer. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` diff --git a/apis/keyvault/v1beta1/zz_generated.conversion_hubs.go b/apis/keyvault/v1beta1/zz_generated.conversion_hubs.go index 9a57b7922..90d2dc243 100755 --- a/apis/keyvault/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/keyvault/v1beta1/zz_generated.conversion_hubs.go @@ -6,27 +6,15 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Vault) Hub() {} - // Hub marks this type as a conversion hub. func (tr *AccessPolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Certificate) Hub() {} - // Hub marks this type as a conversion hub. func (tr *CertificateContacts) Hub() {} // Hub marks this type as a conversion hub. func (tr *CertificateIssuer) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Key) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ManagedHardwareSecurityModule) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ManagedStorageAccount) Hub() {} diff --git a/apis/keyvault/v1beta1/zz_generated.conversion_spokes.go b/apis/keyvault/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..ec11eadc2 --- /dev/null +++ b/apis/keyvault/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Certificate to the hub type. +func (tr *Certificate) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Certificate type. +func (tr *Certificate) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Key to the hub type. +func (tr *Key) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Key type. +func (tr *Key) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ManagedHardwareSecurityModule to the hub type. +func (tr *ManagedHardwareSecurityModule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ManagedHardwareSecurityModule type. +func (tr *ManagedHardwareSecurityModule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Vault to the hub type. +func (tr *Vault) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Vault type. +func (tr *Vault) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/keyvault/v1beta1/zz_generated.resolvers.go b/apis/keyvault/v1beta1/zz_generated.resolvers.go index 80e3a3d6e..634ac17e4 100644 --- a/apis/keyvault/v1beta1/zz_generated.resolvers.go +++ b/apis/keyvault/v1beta1/zz_generated.resolvers.go @@ -28,7 +28,7 @@ func (mg *AccessPolicy) ResolveReferences( // ResolveReferences of this AccessPo var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -47,7 +47,7 @@ func (mg *AccessPolicy) ResolveReferences( // ResolveReferences of this AccessPo mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -128,7 +128,7 @@ func (mg *CertificateContacts) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -147,7 +147,7 @@ func (mg *CertificateContacts) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -178,7 +178,7 @@ func (mg *CertificateIssuer) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -197,7 +197,7 @@ func (mg *CertificateIssuer) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -309,7 +309,7 @@ func (mg *ManagedStorageAccount) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -328,7 +328,7 @@ func (mg *ManagedStorageAccount) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -347,7 +347,7 @@ func (mg *ManagedStorageAccount) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -366,7 +366,7 @@ func (mg *ManagedStorageAccount) ResolveReferences(ctx context.Context, c client mg.Spec.InitProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.KeyVaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -428,7 +428,7 @@ func (mg *Secret) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -447,7 +447,7 @@ func (mg *Secret) ResolveReferences(ctx context.Context, c client.Reader) error mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/keyvault/v1beta1/zz_managedstorageaccount_types.go b/apis/keyvault/v1beta1/zz_managedstorageaccount_types.go index 06e95376b..61b916fb4 100755 --- a/apis/keyvault/v1beta1/zz_managedstorageaccount_types.go +++ b/apis/keyvault/v1beta1/zz_managedstorageaccount_types.go @@ -16,7 +16,7 @@ import ( type ManagedStorageAccountInitParameters struct { // The ID of the Key Vault where the Managed Storage Account should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -35,7 +35,7 @@ type ManagedStorageAccountInitParameters struct { RegenerationPeriod *string `json:"regenerationPeriod,omitempty" tf:"regeneration_period,omitempty"` // The ID of the Storage Account. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -83,7 +83,7 @@ type ManagedStorageAccountObservation struct { type ManagedStorageAccountParameters struct { // The ID of the Key Vault where the Managed Storage Account should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -105,7 +105,7 @@ type ManagedStorageAccountParameters struct { RegenerationPeriod *string `json:"regenerationPeriod,omitempty" tf:"regeneration_period,omitempty"` // The ID of the Storage Account. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/keyvault/v1beta1/zz_secret_types.go b/apis/keyvault/v1beta1/zz_secret_types.go index bf59b19ab..2b142ac01 100755 --- a/apis/keyvault/v1beta1/zz_secret_types.go +++ b/apis/keyvault/v1beta1/zz_secret_types.go @@ -22,7 +22,7 @@ type SecretInitParameters struct { ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` // The ID of the Key Vault where the Secret should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` @@ -93,7 +93,7 @@ type SecretParameters struct { ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` // The ID of the Key Vault where the Secret should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` diff --git a/apis/keyvault/v1beta2/zz_certificate_terraformed.go b/apis/keyvault/v1beta2/zz_certificate_terraformed.go new file mode 100755 index 000000000..7dec14e3d --- /dev/null +++ b/apis/keyvault/v1beta2/zz_certificate_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Certificate +func (mg *Certificate) GetTerraformResourceType() string { + return "azurerm_key_vault_certificate" +} + +// GetConnectionDetailsMapping for this Certificate +func (tr *Certificate) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"certificate[*].contents": "spec.forProvider.certificate[*].contentsSecretRef", "certificate[*].password": "spec.forProvider.certificate[*].passwordSecretRef"} +} + +// GetObservation of this Certificate +func (tr *Certificate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Certificate +func (tr *Certificate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Certificate +func (tr *Certificate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Certificate +func (tr *Certificate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Certificate +func (tr *Certificate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Certificate +func (tr *Certificate) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Certificate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Certificate) LateInitialize(attrs []byte) (bool, error) { + params := &CertificateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Certificate) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/keyvault/v1beta2/zz_certificate_types.go b/apis/keyvault/v1beta2/zz_certificate_types.go new file mode 100755 index 000000000..4a77970d8 --- /dev/null +++ b/apis/keyvault/v1beta2/zz_certificate_types.go @@ -0,0 +1,577 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // The Type of action to be performed when the lifetime trigger is triggerec. Possible values include AutoRenew and EmailContacts. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` +} + +type ActionObservation struct { + + // The Type of action to be performed when the lifetime trigger is triggerec. Possible values include AutoRenew and EmailContacts. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` +} + +type ActionParameters struct { + + // The Type of action to be performed when the lifetime trigger is triggerec. Possible values include AutoRenew and EmailContacts. + // +kubebuilder:validation:Optional + ActionType *string `json:"actionType" tf:"action_type,omitempty"` +} + +type CertificateAttributeInitParameters struct { +} + +type CertificateAttributeObservation struct { + + // The create time of the Key Vault Certificate. + Created *string `json:"created,omitempty" tf:"created,omitempty"` + + // whether the Key Vault Certificate is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The expires time of the Key Vault Certificate. + Expires *string `json:"expires,omitempty" tf:"expires,omitempty"` + + // The not before valid time of the Key Vault Certificate. + NotBefore *string `json:"notBefore,omitempty" tf:"not_before,omitempty"` + + // The deletion recovery level of the Key Vault Certificate. + RecoveryLevel *string `json:"recoveryLevel,omitempty" tf:"recovery_level,omitempty"` + + // The recent update time of the Key Vault Certificate. + Updated *string `json:"updated,omitempty" tf:"updated,omitempty"` +} + +type CertificateAttributeParameters struct { +} + +type CertificateCertificateInitParameters struct { +} + +type CertificateCertificateObservation struct { +} + +type CertificateCertificateParameters struct { + + // The base64-encoded certificate contents. + // +kubebuilder:validation:Required + ContentsSecretRef v1.SecretKeySelector `json:"contentsSecretRef" tf:"-"` + + // The password associated with the certificate. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` +} + +type CertificateInitParameters struct { + + // A certificate block as defined below, used to Import an existing certificate. Changing this will create a new version of the Key Vault Certificate. + Certificate *CertificateCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A certificate_policy block as defined below. Changing this (except the lifetime_action field) will create a new version of the Key Vault Certificate. + CertificatePolicy *CertificatePolicyInitParameters `json:"certificatePolicy,omitempty" tf:"certificate_policy,omitempty"` + + // The ID of the Key Vault where the Certificate should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Reference to a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDRef *v1.Reference `json:"keyVaultIdRef,omitempty" tf:"-"` + + // Selector for a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDSelector *v1.Selector `json:"keyVaultIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Key Vault Certificate. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CertificateObservation struct { + + // A certificate block as defined below, used to Import an existing certificate. Changing this will create a new version of the Key Vault Certificate. + Certificate *CertificateCertificateParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A certificate_attribute block as defined below. + CertificateAttribute []CertificateAttributeObservation `json:"certificateAttribute,omitempty" tf:"certificate_attribute,omitempty"` + + // The raw Key Vault Certificate data represented as a hexadecimal string. + CertificateData *string `json:"certificateData,omitempty" tf:"certificate_data,omitempty"` + + // The Base64 encoded Key Vault Certificate data. + CertificateDataBase64 *string `json:"certificateDataBase64,omitempty" tf:"certificate_data_base64,omitempty"` + + // A certificate_policy block as defined below. Changing this (except the lifetime_action field) will create a new version of the Key Vault Certificate. + CertificatePolicy *CertificatePolicyObservation `json:"certificatePolicy,omitempty" tf:"certificate_policy,omitempty"` + + // The Key Vault Certificate ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ID of the Key Vault where the Certificate should be created. Changing this forces a new resource to be created. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Specifies the name of the Key Vault Certificate. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The (Versioned) ID for this Key Vault Certificate. This property points to a specific version of a Key Vault Certificate, as such using this won't auto-rotate values if used in other Azure Services. + ResourceManagerID *string `json:"resourceManagerId,omitempty" tf:"resource_manager_id,omitempty"` + + // The Versionless ID of the Key Vault Certificate. This property allows other Azure Services (that support it) to auto-rotate their value when the Key Vault Certificate is updated. + ResourceManagerVersionlessID *string `json:"resourceManagerVersionlessId,omitempty" tf:"resource_manager_versionless_id,omitempty"` + + // The ID of the associated Key Vault Secret. + SecretID *string `json:"secretId,omitempty" tf:"secret_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The X509 Thumbprint of the Key Vault Certificate represented as a hexadecimal string. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` + + // The current version of the Key Vault Certificate. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The Base ID of the Key Vault Certificate. + VersionlessID *string `json:"versionlessId,omitempty" tf:"versionless_id,omitempty"` + + // The Base ID of the Key Vault Secret. + VersionlessSecretID *string `json:"versionlessSecretId,omitempty" tf:"versionless_secret_id,omitempty"` +} + +type CertificateParameters struct { + + // A certificate block as defined below, used to Import an existing certificate. Changing this will create a new version of the Key Vault Certificate. + // +kubebuilder:validation:Optional + Certificate *CertificateCertificateParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A certificate_policy block as defined below. Changing this (except the lifetime_action field) will create a new version of the Key Vault Certificate. + // +kubebuilder:validation:Optional + CertificatePolicy *CertificatePolicyParameters `json:"certificatePolicy,omitempty" tf:"certificate_policy,omitempty"` + + // The ID of the Key Vault where the Certificate should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Reference to a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDRef *v1.Reference `json:"keyVaultIdRef,omitempty" tf:"-"` + + // Selector for a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDSelector *v1.Selector `json:"keyVaultIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Key Vault Certificate. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type CertificatePolicyInitParameters struct { + + // A issuer_parameters block as defined below. + IssuerParameters *IssuerParametersInitParameters `json:"issuerParameters,omitempty" tf:"issuer_parameters,omitempty"` + + // A key_properties block as defined below. + KeyProperties *KeyPropertiesInitParameters `json:"keyProperties,omitempty" tf:"key_properties,omitempty"` + + // A lifetime_action block as defined below. + LifetimeAction []LifetimeActionInitParameters `json:"lifetimeAction,omitempty" tf:"lifetime_action,omitempty"` + + // A secret_properties block as defined below. + SecretProperties *SecretPropertiesInitParameters `json:"secretProperties,omitempty" tf:"secret_properties,omitempty"` + + // A x509_certificate_properties block as defined below. Required when certificate block is not specified. + X509CertificateProperties *X509CertificatePropertiesInitParameters `json:"x509CertificateProperties,omitempty" tf:"x509_certificate_properties,omitempty"` +} + +type CertificatePolicyObservation struct { + + // A issuer_parameters block as defined below. + IssuerParameters *IssuerParametersObservation `json:"issuerParameters,omitempty" tf:"issuer_parameters,omitempty"` + + // A key_properties block as defined below. + KeyProperties *KeyPropertiesObservation `json:"keyProperties,omitempty" tf:"key_properties,omitempty"` + + // A lifetime_action block as defined below. + LifetimeAction []LifetimeActionObservation `json:"lifetimeAction,omitempty" tf:"lifetime_action,omitempty"` + + // A secret_properties block as defined below. + SecretProperties *SecretPropertiesObservation `json:"secretProperties,omitempty" tf:"secret_properties,omitempty"` + + // A x509_certificate_properties block as defined below. Required when certificate block is not specified. + X509CertificateProperties *X509CertificatePropertiesObservation `json:"x509CertificateProperties,omitempty" tf:"x509_certificate_properties,omitempty"` +} + +type CertificatePolicyParameters struct { + + // A issuer_parameters block as defined below. + // +kubebuilder:validation:Optional + IssuerParameters *IssuerParametersParameters `json:"issuerParameters" tf:"issuer_parameters,omitempty"` + + // A key_properties block as defined below. + // +kubebuilder:validation:Optional + KeyProperties *KeyPropertiesParameters `json:"keyProperties" tf:"key_properties,omitempty"` + + // A lifetime_action block as defined below. + // +kubebuilder:validation:Optional + LifetimeAction []LifetimeActionParameters `json:"lifetimeAction,omitempty" tf:"lifetime_action,omitempty"` + + // A secret_properties block as defined below. + // +kubebuilder:validation:Optional + SecretProperties *SecretPropertiesParameters `json:"secretProperties" tf:"secret_properties,omitempty"` + + // A x509_certificate_properties block as defined below. Required when certificate block is not specified. + // +kubebuilder:validation:Optional + X509CertificateProperties *X509CertificatePropertiesParameters `json:"x509CertificateProperties,omitempty" tf:"x509_certificate_properties,omitempty"` +} + +type IssuerParametersInitParameters struct { + + // The name of the Certificate Issuer. Possible values include Self (for self-signed certificate), or Unknown (for a certificate issuing authority like Let's Encrypt and Azure direct supported ones). + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IssuerParametersObservation struct { + + // The name of the Certificate Issuer. Possible values include Self (for self-signed certificate), or Unknown (for a certificate issuing authority like Let's Encrypt and Azure direct supported ones). + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type IssuerParametersParameters struct { + + // The name of the Certificate Issuer. Possible values include Self (for self-signed certificate), or Unknown (for a certificate issuing authority like Let's Encrypt and Azure direct supported ones). + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type KeyPropertiesInitParameters struct { + + // Specifies the curve to use when creating an EC key. Possible values are P-256, P-256K, P-384, and P-521. This field will be required in a future release if key_type is EC or EC-HSM. + Curve *string `json:"curve,omitempty" tf:"curve,omitempty"` + + // Is this certificate exportable? + Exportable *bool `json:"exportable,omitempty" tf:"exportable,omitempty"` + + // The size of the key used in the certificate. Possible values include 2048, 3072, and 4096 for RSA keys, or 256, 384, and 521 for EC keys. This property is required when using RSA keys. + KeySize *float64 `json:"keySize,omitempty" tf:"key_size,omitempty"` + + // Specifies the type of key. Possible values are EC, EC-HSM, RSA, RSA-HSM and oct. + KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` + + // Is the key reusable? + ReuseKey *bool `json:"reuseKey,omitempty" tf:"reuse_key,omitempty"` +} + +type KeyPropertiesObservation struct { + + // Specifies the curve to use when creating an EC key. Possible values are P-256, P-256K, P-384, and P-521. This field will be required in a future release if key_type is EC or EC-HSM. + Curve *string `json:"curve,omitempty" tf:"curve,omitempty"` + + // Is this certificate exportable? + Exportable *bool `json:"exportable,omitempty" tf:"exportable,omitempty"` + + // The size of the key used in the certificate. Possible values include 2048, 3072, and 4096 for RSA keys, or 256, 384, and 521 for EC keys. This property is required when using RSA keys. + KeySize *float64 `json:"keySize,omitempty" tf:"key_size,omitempty"` + + // Specifies the type of key. Possible values are EC, EC-HSM, RSA, RSA-HSM and oct. + KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` + + // Is the key reusable? + ReuseKey *bool `json:"reuseKey,omitempty" tf:"reuse_key,omitempty"` +} + +type KeyPropertiesParameters struct { + + // Specifies the curve to use when creating an EC key. Possible values are P-256, P-256K, P-384, and P-521. This field will be required in a future release if key_type is EC or EC-HSM. + // +kubebuilder:validation:Optional + Curve *string `json:"curve,omitempty" tf:"curve,omitempty"` + + // Is this certificate exportable? + // +kubebuilder:validation:Optional + Exportable *bool `json:"exportable" tf:"exportable,omitempty"` + + // The size of the key used in the certificate. Possible values include 2048, 3072, and 4096 for RSA keys, or 256, 384, and 521 for EC keys. This property is required when using RSA keys. + // +kubebuilder:validation:Optional + KeySize *float64 `json:"keySize,omitempty" tf:"key_size,omitempty"` + + // Specifies the type of key. Possible values are EC, EC-HSM, RSA, RSA-HSM and oct. + // +kubebuilder:validation:Optional + KeyType *string `json:"keyType" tf:"key_type,omitempty"` + + // Is the key reusable? + // +kubebuilder:validation:Optional + ReuseKey *bool `json:"reuseKey" tf:"reuse_key,omitempty"` +} + +type LifetimeActionInitParameters struct { + + // A action block as defined below. + Action *ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *TriggerInitParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type LifetimeActionObservation struct { + + // A action block as defined below. + Action *ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *TriggerObservation `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type LifetimeActionParameters struct { + + // A action block as defined below. + // +kubebuilder:validation:Optional + Action *ActionParameters `json:"action" tf:"action,omitempty"` + + // A trigger block as defined below. + // +kubebuilder:validation:Optional + Trigger *TriggerParameters `json:"trigger" tf:"trigger,omitempty"` +} + +type SecretPropertiesInitParameters struct { + + // The Content-Type of the Certificate, such as application/x-pkcs12 for a PFX or application/x-pem-file for a PEM. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` +} + +type SecretPropertiesObservation struct { + + // The Content-Type of the Certificate, such as application/x-pkcs12 for a PFX or application/x-pem-file for a PEM. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` +} + +type SecretPropertiesParameters struct { + + // The Content-Type of the Certificate, such as application/x-pkcs12 for a PFX or application/x-pem-file for a PEM. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType" tf:"content_type,omitempty"` +} + +type SubjectAlternativeNamesInitParameters struct { + + // A list of alternative DNS names (FQDNs) identified by the Certificate. + // +listType=set + DNSNames []*string `json:"dnsNames,omitempty" tf:"dns_names,omitempty"` + + // A list of email addresses identified by this Certificate. + // +listType=set + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // A list of User Principal Names identified by the Certificate. + // +listType=set + Upns []*string `json:"upns,omitempty" tf:"upns,omitempty"` +} + +type SubjectAlternativeNamesObservation struct { + + // A list of alternative DNS names (FQDNs) identified by the Certificate. + // +listType=set + DNSNames []*string `json:"dnsNames,omitempty" tf:"dns_names,omitempty"` + + // A list of email addresses identified by this Certificate. + // +listType=set + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // A list of User Principal Names identified by the Certificate. + // +listType=set + Upns []*string `json:"upns,omitempty" tf:"upns,omitempty"` +} + +type SubjectAlternativeNamesParameters struct { + + // A list of alternative DNS names (FQDNs) identified by the Certificate. + // +kubebuilder:validation:Optional + // +listType=set + DNSNames []*string `json:"dnsNames,omitempty" tf:"dns_names,omitempty"` + + // A list of email addresses identified by this Certificate. + // +kubebuilder:validation:Optional + // +listType=set + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // A list of User Principal Names identified by the Certificate. + // +kubebuilder:validation:Optional + // +listType=set + Upns []*string `json:"upns,omitempty" tf:"upns,omitempty"` +} + +type TriggerInitParameters struct { + + // The number of days before the Certificate expires that the action associated with this Trigger should run. Conflicts with lifetime_percentage. + DaysBeforeExpiry *float64 `json:"daysBeforeExpiry,omitempty" tf:"days_before_expiry,omitempty"` + + // The percentage at which during the Certificates Lifetime the action associated with this Trigger should run. Conflicts with days_before_expiry. + LifetimePercentage *float64 `json:"lifetimePercentage,omitempty" tf:"lifetime_percentage,omitempty"` +} + +type TriggerObservation struct { + + // The number of days before the Certificate expires that the action associated with this Trigger should run. Conflicts with lifetime_percentage. + DaysBeforeExpiry *float64 `json:"daysBeforeExpiry,omitempty" tf:"days_before_expiry,omitempty"` + + // The percentage at which during the Certificates Lifetime the action associated with this Trigger should run. Conflicts with days_before_expiry. + LifetimePercentage *float64 `json:"lifetimePercentage,omitempty" tf:"lifetime_percentage,omitempty"` +} + +type TriggerParameters struct { + + // The number of days before the Certificate expires that the action associated with this Trigger should run. Conflicts with lifetime_percentage. + // +kubebuilder:validation:Optional + DaysBeforeExpiry *float64 `json:"daysBeforeExpiry,omitempty" tf:"days_before_expiry,omitempty"` + + // The percentage at which during the Certificates Lifetime the action associated with this Trigger should run. Conflicts with days_before_expiry. + // +kubebuilder:validation:Optional + LifetimePercentage *float64 `json:"lifetimePercentage,omitempty" tf:"lifetime_percentage,omitempty"` +} + +type X509CertificatePropertiesInitParameters struct { + + // A list of Extended/Enhanced Key Usages. + ExtendedKeyUsage []*string `json:"extendedKeyUsage,omitempty" tf:"extended_key_usage,omitempty"` + + // A list of uses associated with this Key. Possible values include cRLSign, dataEncipherment, decipherOnly, digitalSignature, encipherOnly, keyAgreement, keyCertSign, keyEncipherment and nonRepudiation and are case-sensitive. + // +listType=set + KeyUsage []*string `json:"keyUsage,omitempty" tf:"key_usage,omitempty"` + + // The Certificate's Subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // A subject_alternative_names block as defined below. + SubjectAlternativeNames *SubjectAlternativeNamesInitParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // The Certificates Validity Period in Months. + ValidityInMonths *float64 `json:"validityInMonths,omitempty" tf:"validity_in_months,omitempty"` +} + +type X509CertificatePropertiesObservation struct { + + // A list of Extended/Enhanced Key Usages. + ExtendedKeyUsage []*string `json:"extendedKeyUsage,omitempty" tf:"extended_key_usage,omitempty"` + + // A list of uses associated with this Key. Possible values include cRLSign, dataEncipherment, decipherOnly, digitalSignature, encipherOnly, keyAgreement, keyCertSign, keyEncipherment and nonRepudiation and are case-sensitive. + // +listType=set + KeyUsage []*string `json:"keyUsage,omitempty" tf:"key_usage,omitempty"` + + // The Certificate's Subject. + Subject *string `json:"subject,omitempty" tf:"subject,omitempty"` + + // A subject_alternative_names block as defined below. + SubjectAlternativeNames *SubjectAlternativeNamesObservation `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // The Certificates Validity Period in Months. + ValidityInMonths *float64 `json:"validityInMonths,omitempty" tf:"validity_in_months,omitempty"` +} + +type X509CertificatePropertiesParameters struct { + + // A list of Extended/Enhanced Key Usages. + // +kubebuilder:validation:Optional + ExtendedKeyUsage []*string `json:"extendedKeyUsage,omitempty" tf:"extended_key_usage,omitempty"` + + // A list of uses associated with this Key. Possible values include cRLSign, dataEncipherment, decipherOnly, digitalSignature, encipherOnly, keyAgreement, keyCertSign, keyEncipherment and nonRepudiation and are case-sensitive. + // +kubebuilder:validation:Optional + // +listType=set + KeyUsage []*string `json:"keyUsage" tf:"key_usage,omitempty"` + + // The Certificate's Subject. + // +kubebuilder:validation:Optional + Subject *string `json:"subject" tf:"subject,omitempty"` + + // A subject_alternative_names block as defined below. + // +kubebuilder:validation:Optional + SubjectAlternativeNames *SubjectAlternativeNamesParameters `json:"subjectAlternativeNames,omitempty" tf:"subject_alternative_names,omitempty"` + + // The Certificates Validity Period in Months. + // +kubebuilder:validation:Optional + ValidityInMonths *float64 `json:"validityInMonths" tf:"validity_in_months,omitempty"` +} + +// CertificateSpec defines the desired state of Certificate +type CertificateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider CertificateParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider CertificateInitParameters `json:"initProvider,omitempty"` +} + +// CertificateStatus defines the observed state of Certificate. +type CertificateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider CertificateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Certificate is the Schema for the Certificates API. Manages a Key Vault Certificate. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Certificate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec CertificateSpec `json:"spec"` + Status CertificateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CertificateList contains a list of Certificates +type CertificateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Certificate `json:"items"` +} + +// Repository type metadata. +var ( + Certificate_Kind = "Certificate" + Certificate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Certificate_Kind}.String() + Certificate_KindAPIVersion = Certificate_Kind + "." + CRDGroupVersion.String() + Certificate_GroupVersionKind = CRDGroupVersion.WithKind(Certificate_Kind) +) + +func init() { + SchemeBuilder.Register(&Certificate{}, &CertificateList{}) +} diff --git a/apis/keyvault/v1beta2/zz_generated.conversion_hubs.go b/apis/keyvault/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..cb8b91b3e --- /dev/null +++ b/apis/keyvault/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Certificate) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Key) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ManagedHardwareSecurityModule) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Vault) Hub() {} diff --git a/apis/keyvault/v1beta2/zz_generated.deepcopy.go b/apis/keyvault/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..df34027c4 --- /dev/null +++ b/apis/keyvault/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3143 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPolicyInitParameters) DeepCopyInto(out *AccessPolicyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPolicyInitParameters. +func (in *AccessPolicyInitParameters) DeepCopy() *AccessPolicyInitParameters { + if in == nil { + return nil + } + out := new(AccessPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPolicyObservation) DeepCopyInto(out *AccessPolicyObservation) { + *out = *in + if in.ApplicationID != nil { + in, out := &in.ApplicationID, &out.ApplicationID + *out = new(string) + **out = **in + } + if in.CertificatePermissions != nil { + in, out := &in.CertificatePermissions, &out.CertificatePermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeyPermissions != nil { + in, out := &in.KeyPermissions, &out.KeyPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.SecretPermissions != nil { + in, out := &in.SecretPermissions, &out.SecretPermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.StoragePermissions != nil { + in, out := &in.StoragePermissions, &out.StoragePermissions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPolicyObservation. +func (in *AccessPolicyObservation) DeepCopy() *AccessPolicyObservation { + if in == nil { + return nil + } + out := new(AccessPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPolicyParameters) DeepCopyInto(out *AccessPolicyParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPolicyParameters. +func (in *AccessPolicyParameters) DeepCopy() *AccessPolicyParameters { + if in == nil { + return nil + } + out := new(AccessPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticInitParameters) DeepCopyInto(out *AutomaticInitParameters) { + *out = *in + if in.TimeAfterCreation != nil { + in, out := &in.TimeAfterCreation, &out.TimeAfterCreation + *out = new(string) + **out = **in + } + if in.TimeBeforeExpiry != nil { + in, out := &in.TimeBeforeExpiry, &out.TimeBeforeExpiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticInitParameters. +func (in *AutomaticInitParameters) DeepCopy() *AutomaticInitParameters { + if in == nil { + return nil + } + out := new(AutomaticInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticObservation) DeepCopyInto(out *AutomaticObservation) { + *out = *in + if in.TimeAfterCreation != nil { + in, out := &in.TimeAfterCreation, &out.TimeAfterCreation + *out = new(string) + **out = **in + } + if in.TimeBeforeExpiry != nil { + in, out := &in.TimeBeforeExpiry, &out.TimeBeforeExpiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticObservation. +func (in *AutomaticObservation) DeepCopy() *AutomaticObservation { + if in == nil { + return nil + } + out := new(AutomaticObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticParameters) DeepCopyInto(out *AutomaticParameters) { + *out = *in + if in.TimeAfterCreation != nil { + in, out := &in.TimeAfterCreation, &out.TimeAfterCreation + *out = new(string) + **out = **in + } + if in.TimeBeforeExpiry != nil { + in, out := &in.TimeBeforeExpiry, &out.TimeBeforeExpiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticParameters. +func (in *AutomaticParameters) DeepCopy() *AutomaticParameters { + if in == nil { + return nil + } + out := new(AutomaticParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Certificate) DeepCopyInto(out *Certificate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Certificate. +func (in *Certificate) DeepCopy() *Certificate { + if in == nil { + return nil + } + out := new(Certificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Certificate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAttributeInitParameters) DeepCopyInto(out *CertificateAttributeInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAttributeInitParameters. +func (in *CertificateAttributeInitParameters) DeepCopy() *CertificateAttributeInitParameters { + if in == nil { + return nil + } + out := new(CertificateAttributeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAttributeObservation) DeepCopyInto(out *CertificateAttributeObservation) { + *out = *in + if in.Created != nil { + in, out := &in.Created, &out.Created + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Expires != nil { + in, out := &in.Expires, &out.Expires + *out = new(string) + **out = **in + } + if in.NotBefore != nil { + in, out := &in.NotBefore, &out.NotBefore + *out = new(string) + **out = **in + } + if in.RecoveryLevel != nil { + in, out := &in.RecoveryLevel, &out.RecoveryLevel + *out = new(string) + **out = **in + } + if in.Updated != nil { + in, out := &in.Updated, &out.Updated + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAttributeObservation. +func (in *CertificateAttributeObservation) DeepCopy() *CertificateAttributeObservation { + if in == nil { + return nil + } + out := new(CertificateAttributeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAttributeParameters) DeepCopyInto(out *CertificateAttributeParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAttributeParameters. +func (in *CertificateAttributeParameters) DeepCopy() *CertificateAttributeParameters { + if in == nil { + return nil + } + out := new(CertificateAttributeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateCertificateInitParameters) DeepCopyInto(out *CertificateCertificateInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateCertificateInitParameters. +func (in *CertificateCertificateInitParameters) DeepCopy() *CertificateCertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateCertificateObservation) DeepCopyInto(out *CertificateCertificateObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateCertificateObservation. +func (in *CertificateCertificateObservation) DeepCopy() *CertificateCertificateObservation { + if in == nil { + return nil + } + out := new(CertificateCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateCertificateParameters) DeepCopyInto(out *CertificateCertificateParameters) { + *out = *in + out.ContentsSecretRef = in.ContentsSecretRef + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateCertificateParameters. +func (in *CertificateCertificateParameters) DeepCopy() *CertificateCertificateParameters { + if in == nil { + return nil + } + out := new(CertificateCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateCertificateInitParameters) + **out = **in + } + if in.CertificatePolicy != nil { + in, out := &in.CertificatePolicy, &out.CertificatePolicy + *out = new(CertificatePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.KeyVaultIDRef != nil { + in, out := &in.KeyVaultIDRef, &out.KeyVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultIDSelector != nil { + in, out := &in.KeyVaultIDSelector, &out.KeyVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateList) DeepCopyInto(out *CertificateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Certificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateList. +func (in *CertificateList) DeepCopy() *CertificateList { + if in == nil { + return nil + } + out := new(CertificateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.CertificateAttribute != nil { + in, out := &in.CertificateAttribute, &out.CertificateAttribute + *out = make([]CertificateAttributeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CertificateData != nil { + in, out := &in.CertificateData, &out.CertificateData + *out = new(string) + **out = **in + } + if in.CertificateDataBase64 != nil { + in, out := &in.CertificateDataBase64, &out.CertificateDataBase64 + *out = new(string) + **out = **in + } + if in.CertificatePolicy != nil { + in, out := &in.CertificatePolicy, &out.CertificatePolicy + *out = new(CertificatePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceManagerID != nil { + in, out := &in.ResourceManagerID, &out.ResourceManagerID + *out = new(string) + **out = **in + } + if in.ResourceManagerVersionlessID != nil { + in, out := &in.ResourceManagerVersionlessID, &out.ResourceManagerVersionlessID + *out = new(string) + **out = **in + } + if in.SecretID != nil { + in, out := &in.SecretID, &out.SecretID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionlessID != nil { + in, out := &in.VersionlessID, &out.VersionlessID + *out = new(string) + **out = **in + } + if in.VersionlessSecretID != nil { + in, out := &in.VersionlessSecretID, &out.VersionlessSecretID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.CertificatePolicy != nil { + in, out := &in.CertificatePolicy, &out.CertificatePolicy + *out = new(CertificatePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.KeyVaultIDRef != nil { + in, out := &in.KeyVaultIDRef, &out.KeyVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultIDSelector != nil { + in, out := &in.KeyVaultIDSelector, &out.KeyVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatePolicyInitParameters) DeepCopyInto(out *CertificatePolicyInitParameters) { + *out = *in + if in.IssuerParameters != nil { + in, out := &in.IssuerParameters, &out.IssuerParameters + *out = new(IssuerParametersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyProperties != nil { + in, out := &in.KeyProperties, &out.KeyProperties + *out = new(KeyPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LifetimeAction != nil { + in, out := &in.LifetimeAction, &out.LifetimeAction + *out = make([]LifetimeActionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecretProperties != nil { + in, out := &in.SecretProperties, &out.SecretProperties + *out = new(SecretPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.X509CertificateProperties != nil { + in, out := &in.X509CertificateProperties, &out.X509CertificateProperties + *out = new(X509CertificatePropertiesInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatePolicyInitParameters. +func (in *CertificatePolicyInitParameters) DeepCopy() *CertificatePolicyInitParameters { + if in == nil { + return nil + } + out := new(CertificatePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatePolicyObservation) DeepCopyInto(out *CertificatePolicyObservation) { + *out = *in + if in.IssuerParameters != nil { + in, out := &in.IssuerParameters, &out.IssuerParameters + *out = new(IssuerParametersObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyProperties != nil { + in, out := &in.KeyProperties, &out.KeyProperties + *out = new(KeyPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.LifetimeAction != nil { + in, out := &in.LifetimeAction, &out.LifetimeAction + *out = make([]LifetimeActionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecretProperties != nil { + in, out := &in.SecretProperties, &out.SecretProperties + *out = new(SecretPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.X509CertificateProperties != nil { + in, out := &in.X509CertificateProperties, &out.X509CertificateProperties + *out = new(X509CertificatePropertiesObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatePolicyObservation. +func (in *CertificatePolicyObservation) DeepCopy() *CertificatePolicyObservation { + if in == nil { + return nil + } + out := new(CertificatePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatePolicyParameters) DeepCopyInto(out *CertificatePolicyParameters) { + *out = *in + if in.IssuerParameters != nil { + in, out := &in.IssuerParameters, &out.IssuerParameters + *out = new(IssuerParametersParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyProperties != nil { + in, out := &in.KeyProperties, &out.KeyProperties + *out = new(KeyPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.LifetimeAction != nil { + in, out := &in.LifetimeAction, &out.LifetimeAction + *out = make([]LifetimeActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecretProperties != nil { + in, out := &in.SecretProperties, &out.SecretProperties + *out = new(SecretPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.X509CertificateProperties != nil { + in, out := &in.X509CertificateProperties, &out.X509CertificateProperties + *out = new(X509CertificatePropertiesParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatePolicyParameters. +func (in *CertificatePolicyParameters) DeepCopy() *CertificatePolicyParameters { + if in == nil { + return nil + } + out := new(CertificatePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSpec) DeepCopyInto(out *CertificateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSpec. +func (in *CertificateSpec) DeepCopy() *CertificateSpec { + if in == nil { + return nil + } + out := new(CertificateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateStatus) DeepCopyInto(out *CertificateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateStatus. +func (in *CertificateStatus) DeepCopy() *CertificateStatus { + if in == nil { + return nil + } + out := new(CertificateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContactInitParameters) DeepCopyInto(out *ContactInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Phone != nil { + in, out := &in.Phone, &out.Phone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContactInitParameters. +func (in *ContactInitParameters) DeepCopy() *ContactInitParameters { + if in == nil { + return nil + } + out := new(ContactInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContactObservation) DeepCopyInto(out *ContactObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Phone != nil { + in, out := &in.Phone, &out.Phone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContactObservation. +func (in *ContactObservation) DeepCopy() *ContactObservation { + if in == nil { + return nil + } + out := new(ContactObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContactParameters) DeepCopyInto(out *ContactParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Phone != nil { + in, out := &in.Phone, &out.Phone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContactParameters. +func (in *ContactParameters) DeepCopy() *ContactParameters { + if in == nil { + return nil + } + out := new(ContactParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerParametersInitParameters) DeepCopyInto(out *IssuerParametersInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerParametersInitParameters. +func (in *IssuerParametersInitParameters) DeepCopy() *IssuerParametersInitParameters { + if in == nil { + return nil + } + out := new(IssuerParametersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerParametersObservation) DeepCopyInto(out *IssuerParametersObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerParametersObservation. +func (in *IssuerParametersObservation) DeepCopy() *IssuerParametersObservation { + if in == nil { + return nil + } + out := new(IssuerParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IssuerParametersParameters) DeepCopyInto(out *IssuerParametersParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerParametersParameters. +func (in *IssuerParametersParameters) DeepCopy() *IssuerParametersParameters { + if in == nil { + return nil + } + out := new(IssuerParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Key) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyInitParameters) DeepCopyInto(out *KeyInitParameters) { + *out = *in + if in.Curve != nil { + in, out := &in.Curve, &out.Curve + *out = new(string) + **out = **in + } + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = new(string) + **out = **in + } + if in.KeyOpts != nil { + in, out := &in.KeyOpts, &out.KeyOpts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeySize != nil { + in, out := &in.KeySize, &out.KeySize + *out = new(float64) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.KeyVaultIDRef != nil { + in, out := &in.KeyVaultIDRef, &out.KeyVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultIDSelector != nil { + in, out := &in.KeyVaultIDSelector, &out.KeyVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotBeforeDate != nil { + in, out := &in.NotBeforeDate, &out.NotBeforeDate + *out = new(string) + **out = **in + } + if in.RotationPolicy != nil { + in, out := &in.RotationPolicy, &out.RotationPolicy + *out = new(RotationPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyInitParameters. +func (in *KeyInitParameters) DeepCopy() *KeyInitParameters { + if in == nil { + return nil + } + out := new(KeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyList) DeepCopyInto(out *KeyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Key, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyList. +func (in *KeyList) DeepCopy() *KeyList { + if in == nil { + return nil + } + out := new(KeyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KeyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyObservation) DeepCopyInto(out *KeyObservation) { + *out = *in + if in.Curve != nil { + in, out := &in.Curve, &out.Curve + *out = new(string) + **out = **in + } + if in.E != nil { + in, out := &in.E, &out.E + *out = new(string) + **out = **in + } + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyOpts != nil { + in, out := &in.KeyOpts, &out.KeyOpts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeySize != nil { + in, out := &in.KeySize, &out.KeySize + *out = new(float64) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.N != nil { + in, out := &in.N, &out.N + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotBeforeDate != nil { + in, out := &in.NotBeforeDate, &out.NotBeforeDate + *out = new(string) + **out = **in + } + if in.PublicKeyOpenssh != nil { + in, out := &in.PublicKeyOpenssh, &out.PublicKeyOpenssh + *out = new(string) + **out = **in + } + if in.PublicKeyPem != nil { + in, out := &in.PublicKeyPem, &out.PublicKeyPem + *out = new(string) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ResourceVersionlessID != nil { + in, out := &in.ResourceVersionlessID, &out.ResourceVersionlessID + *out = new(string) + **out = **in + } + if in.RotationPolicy != nil { + in, out := &in.RotationPolicy, &out.RotationPolicy + *out = new(RotationPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.VersionlessID != nil { + in, out := &in.VersionlessID, &out.VersionlessID + *out = new(string) + **out = **in + } + if in.X != nil { + in, out := &in.X, &out.X + *out = new(string) + **out = **in + } + if in.Y != nil { + in, out := &in.Y, &out.Y + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyObservation. +func (in *KeyObservation) DeepCopy() *KeyObservation { + if in == nil { + return nil + } + out := new(KeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyParameters) DeepCopyInto(out *KeyParameters) { + *out = *in + if in.Curve != nil { + in, out := &in.Curve, &out.Curve + *out = new(string) + **out = **in + } + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = new(string) + **out = **in + } + if in.KeyOpts != nil { + in, out := &in.KeyOpts, &out.KeyOpts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeySize != nil { + in, out := &in.KeySize, &out.KeySize + *out = new(float64) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.KeyVaultIDRef != nil { + in, out := &in.KeyVaultIDRef, &out.KeyVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultIDSelector != nil { + in, out := &in.KeyVaultIDSelector, &out.KeyVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NotBeforeDate != nil { + in, out := &in.NotBeforeDate, &out.NotBeforeDate + *out = new(string) + **out = **in + } + if in.RotationPolicy != nil { + in, out := &in.RotationPolicy, &out.RotationPolicy + *out = new(RotationPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyParameters. +func (in *KeyParameters) DeepCopy() *KeyParameters { + if in == nil { + return nil + } + out := new(KeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyPropertiesInitParameters) DeepCopyInto(out *KeyPropertiesInitParameters) { + *out = *in + if in.Curve != nil { + in, out := &in.Curve, &out.Curve + *out = new(string) + **out = **in + } + if in.Exportable != nil { + in, out := &in.Exportable, &out.Exportable + *out = new(bool) + **out = **in + } + if in.KeySize != nil { + in, out := &in.KeySize, &out.KeySize + *out = new(float64) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } + if in.ReuseKey != nil { + in, out := &in.ReuseKey, &out.ReuseKey + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyPropertiesInitParameters. +func (in *KeyPropertiesInitParameters) DeepCopy() *KeyPropertiesInitParameters { + if in == nil { + return nil + } + out := new(KeyPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyPropertiesObservation) DeepCopyInto(out *KeyPropertiesObservation) { + *out = *in + if in.Curve != nil { + in, out := &in.Curve, &out.Curve + *out = new(string) + **out = **in + } + if in.Exportable != nil { + in, out := &in.Exportable, &out.Exportable + *out = new(bool) + **out = **in + } + if in.KeySize != nil { + in, out := &in.KeySize, &out.KeySize + *out = new(float64) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } + if in.ReuseKey != nil { + in, out := &in.ReuseKey, &out.ReuseKey + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyPropertiesObservation. +func (in *KeyPropertiesObservation) DeepCopy() *KeyPropertiesObservation { + if in == nil { + return nil + } + out := new(KeyPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyPropertiesParameters) DeepCopyInto(out *KeyPropertiesParameters) { + *out = *in + if in.Curve != nil { + in, out := &in.Curve, &out.Curve + *out = new(string) + **out = **in + } + if in.Exportable != nil { + in, out := &in.Exportable, &out.Exportable + *out = new(bool) + **out = **in + } + if in.KeySize != nil { + in, out := &in.KeySize, &out.KeySize + *out = new(float64) + **out = **in + } + if in.KeyType != nil { + in, out := &in.KeyType, &out.KeyType + *out = new(string) + **out = **in + } + if in.ReuseKey != nil { + in, out := &in.ReuseKey, &out.ReuseKey + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyPropertiesParameters. +func (in *KeyPropertiesParameters) DeepCopy() *KeyPropertiesParameters { + if in == nil { + return nil + } + out := new(KeyPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeySpec) DeepCopyInto(out *KeySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeySpec. +func (in *KeySpec) DeepCopy() *KeySpec { + if in == nil { + return nil + } + out := new(KeySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyStatus) DeepCopyInto(out *KeyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyStatus. +func (in *KeyStatus) DeepCopy() *KeyStatus { + if in == nil { + return nil + } + out := new(KeyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifetimeActionInitParameters) DeepCopyInto(out *LifetimeActionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifetimeActionInitParameters. +func (in *LifetimeActionInitParameters) DeepCopy() *LifetimeActionInitParameters { + if in == nil { + return nil + } + out := new(LifetimeActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifetimeActionObservation) DeepCopyInto(out *LifetimeActionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifetimeActionObservation. +func (in *LifetimeActionObservation) DeepCopy() *LifetimeActionObservation { + if in == nil { + return nil + } + out := new(LifetimeActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifetimeActionParameters) DeepCopyInto(out *LifetimeActionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifetimeActionParameters. +func (in *LifetimeActionParameters) DeepCopy() *LifetimeActionParameters { + if in == nil { + return nil + } + out := new(LifetimeActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModule) DeepCopyInto(out *ManagedHardwareSecurityModule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModule. +func (in *ManagedHardwareSecurityModule) DeepCopy() *ManagedHardwareSecurityModule { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedHardwareSecurityModule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleInitParameters) DeepCopyInto(out *ManagedHardwareSecurityModuleInitParameters) { + *out = *in + if in.AdminObjectIds != nil { + in, out := &in.AdminObjectIds, &out.AdminObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(ManagedHardwareSecurityModuleNetworkAclsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.SecurityDomainKeyVaultCertificateIds != nil { + in, out := &in.SecurityDomainKeyVaultCertificateIds, &out.SecurityDomainKeyVaultCertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityDomainQuorum != nil { + in, out := &in.SecurityDomainQuorum, &out.SecurityDomainQuorum + *out = new(float64) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleInitParameters. +func (in *ManagedHardwareSecurityModuleInitParameters) DeepCopy() *ManagedHardwareSecurityModuleInitParameters { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleList) DeepCopyInto(out *ManagedHardwareSecurityModuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedHardwareSecurityModule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleList. +func (in *ManagedHardwareSecurityModuleList) DeepCopy() *ManagedHardwareSecurityModuleList { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedHardwareSecurityModuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleNetworkAclsInitParameters) DeepCopyInto(out *ManagedHardwareSecurityModuleNetworkAclsInitParameters) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleNetworkAclsInitParameters. +func (in *ManagedHardwareSecurityModuleNetworkAclsInitParameters) DeepCopy() *ManagedHardwareSecurityModuleNetworkAclsInitParameters { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleNetworkAclsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleNetworkAclsObservation) DeepCopyInto(out *ManagedHardwareSecurityModuleNetworkAclsObservation) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleNetworkAclsObservation. +func (in *ManagedHardwareSecurityModuleNetworkAclsObservation) DeepCopy() *ManagedHardwareSecurityModuleNetworkAclsObservation { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleNetworkAclsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleNetworkAclsParameters) DeepCopyInto(out *ManagedHardwareSecurityModuleNetworkAclsParameters) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleNetworkAclsParameters. +func (in *ManagedHardwareSecurityModuleNetworkAclsParameters) DeepCopy() *ManagedHardwareSecurityModuleNetworkAclsParameters { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleNetworkAclsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleObservation) DeepCopyInto(out *ManagedHardwareSecurityModuleObservation) { + *out = *in + if in.AdminObjectIds != nil { + in, out := &in.AdminObjectIds, &out.AdminObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HSMURI != nil { + in, out := &in.HSMURI, &out.HSMURI + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(ManagedHardwareSecurityModuleNetworkAclsObservation) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SecurityDomainKeyVaultCertificateIds != nil { + in, out := &in.SecurityDomainKeyVaultCertificateIds, &out.SecurityDomainKeyVaultCertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityDomainQuorum != nil { + in, out := &in.SecurityDomainQuorum, &out.SecurityDomainQuorum + *out = new(float64) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleObservation. +func (in *ManagedHardwareSecurityModuleObservation) DeepCopy() *ManagedHardwareSecurityModuleObservation { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleParameters) DeepCopyInto(out *ManagedHardwareSecurityModuleParameters) { + *out = *in + if in.AdminObjectIds != nil { + in, out := &in.AdminObjectIds, &out.AdminObjectIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(ManagedHardwareSecurityModuleNetworkAclsParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SecurityDomainKeyVaultCertificateIds != nil { + in, out := &in.SecurityDomainKeyVaultCertificateIds, &out.SecurityDomainKeyVaultCertificateIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityDomainQuorum != nil { + in, out := &in.SecurityDomainQuorum, &out.SecurityDomainQuorum + *out = new(float64) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleParameters. +func (in *ManagedHardwareSecurityModuleParameters) DeepCopy() *ManagedHardwareSecurityModuleParameters { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleSpec) DeepCopyInto(out *ManagedHardwareSecurityModuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleSpec. +func (in *ManagedHardwareSecurityModuleSpec) DeepCopy() *ManagedHardwareSecurityModuleSpec { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedHardwareSecurityModuleStatus) DeepCopyInto(out *ManagedHardwareSecurityModuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedHardwareSecurityModuleStatus. +func (in *ManagedHardwareSecurityModuleStatus) DeepCopy() *ManagedHardwareSecurityModuleStatus { + if in == nil { + return nil + } + out := new(ManagedHardwareSecurityModuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAclsInitParameters) DeepCopyInto(out *NetworkAclsInitParameters) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkSubnetIds != nil { + in, out := &in.VirtualNetworkSubnetIds, &out.VirtualNetworkSubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAclsInitParameters. +func (in *NetworkAclsInitParameters) DeepCopy() *NetworkAclsInitParameters { + if in == nil { + return nil + } + out := new(NetworkAclsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAclsObservation) DeepCopyInto(out *NetworkAclsObservation) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkSubnetIds != nil { + in, out := &in.VirtualNetworkSubnetIds, &out.VirtualNetworkSubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAclsObservation. +func (in *NetworkAclsObservation) DeepCopy() *NetworkAclsObservation { + if in == nil { + return nil + } + out := new(NetworkAclsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkAclsParameters) DeepCopyInto(out *NetworkAclsParameters) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = new(string) + **out = **in + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkSubnetIds != nil { + in, out := &in.VirtualNetworkSubnetIds, &out.VirtualNetworkSubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAclsParameters. +func (in *NetworkAclsParameters) DeepCopy() *NetworkAclsParameters { + if in == nil { + return nil + } + out := new(NetworkAclsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RotationPolicyInitParameters) DeepCopyInto(out *RotationPolicyInitParameters) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(AutomaticInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpireAfter != nil { + in, out := &in.ExpireAfter, &out.ExpireAfter + *out = new(string) + **out = **in + } + if in.NotifyBeforeExpiry != nil { + in, out := &in.NotifyBeforeExpiry, &out.NotifyBeforeExpiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RotationPolicyInitParameters. +func (in *RotationPolicyInitParameters) DeepCopy() *RotationPolicyInitParameters { + if in == nil { + return nil + } + out := new(RotationPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RotationPolicyObservation) DeepCopyInto(out *RotationPolicyObservation) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(AutomaticObservation) + (*in).DeepCopyInto(*out) + } + if in.ExpireAfter != nil { + in, out := &in.ExpireAfter, &out.ExpireAfter + *out = new(string) + **out = **in + } + if in.NotifyBeforeExpiry != nil { + in, out := &in.NotifyBeforeExpiry, &out.NotifyBeforeExpiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RotationPolicyObservation. +func (in *RotationPolicyObservation) DeepCopy() *RotationPolicyObservation { + if in == nil { + return nil + } + out := new(RotationPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RotationPolicyParameters) DeepCopyInto(out *RotationPolicyParameters) { + *out = *in + if in.Automatic != nil { + in, out := &in.Automatic, &out.Automatic + *out = new(AutomaticParameters) + (*in).DeepCopyInto(*out) + } + if in.ExpireAfter != nil { + in, out := &in.ExpireAfter, &out.ExpireAfter + *out = new(string) + **out = **in + } + if in.NotifyBeforeExpiry != nil { + in, out := &in.NotifyBeforeExpiry, &out.NotifyBeforeExpiry + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RotationPolicyParameters. +func (in *RotationPolicyParameters) DeepCopy() *RotationPolicyParameters { + if in == nil { + return nil + } + out := new(RotationPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretPropertiesInitParameters) DeepCopyInto(out *SecretPropertiesInitParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretPropertiesInitParameters. +func (in *SecretPropertiesInitParameters) DeepCopy() *SecretPropertiesInitParameters { + if in == nil { + return nil + } + out := new(SecretPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretPropertiesObservation) DeepCopyInto(out *SecretPropertiesObservation) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretPropertiesObservation. +func (in *SecretPropertiesObservation) DeepCopy() *SecretPropertiesObservation { + if in == nil { + return nil + } + out := new(SecretPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretPropertiesParameters) DeepCopyInto(out *SecretPropertiesParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretPropertiesParameters. +func (in *SecretPropertiesParameters) DeepCopy() *SecretPropertiesParameters { + if in == nil { + return nil + } + out := new(SecretPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesInitParameters) DeepCopyInto(out *SubjectAlternativeNamesInitParameters) { + *out = *in + if in.DNSNames != nil { + in, out := &in.DNSNames, &out.DNSNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Upns != nil { + in, out := &in.Upns, &out.Upns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesInitParameters. +func (in *SubjectAlternativeNamesInitParameters) DeepCopy() *SubjectAlternativeNamesInitParameters { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesObservation) DeepCopyInto(out *SubjectAlternativeNamesObservation) { + *out = *in + if in.DNSNames != nil { + in, out := &in.DNSNames, &out.DNSNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Upns != nil { + in, out := &in.Upns, &out.Upns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesObservation. +func (in *SubjectAlternativeNamesObservation) DeepCopy() *SubjectAlternativeNamesObservation { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAlternativeNamesParameters) DeepCopyInto(out *SubjectAlternativeNamesParameters) { + *out = *in + if in.DNSNames != nil { + in, out := &in.DNSNames, &out.DNSNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Upns != nil { + in, out := &in.Upns, &out.Upns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAlternativeNamesParameters. +func (in *SubjectAlternativeNamesParameters) DeepCopy() *SubjectAlternativeNamesParameters { + if in == nil { + return nil + } + out := new(SubjectAlternativeNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerInitParameters) DeepCopyInto(out *TriggerInitParameters) { + *out = *in + if in.DaysBeforeExpiry != nil { + in, out := &in.DaysBeforeExpiry, &out.DaysBeforeExpiry + *out = new(float64) + **out = **in + } + if in.LifetimePercentage != nil { + in, out := &in.LifetimePercentage, &out.LifetimePercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerInitParameters. +func (in *TriggerInitParameters) DeepCopy() *TriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { + *out = *in + if in.DaysBeforeExpiry != nil { + in, out := &in.DaysBeforeExpiry, &out.DaysBeforeExpiry + *out = new(float64) + **out = **in + } + if in.LifetimePercentage != nil { + in, out := &in.LifetimePercentage, &out.LifetimePercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerObservation. +func (in *TriggerObservation) DeepCopy() *TriggerObservation { + if in == nil { + return nil + } + out := new(TriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerParameters) DeepCopyInto(out *TriggerParameters) { + *out = *in + if in.DaysBeforeExpiry != nil { + in, out := &in.DaysBeforeExpiry, &out.DaysBeforeExpiry + *out = new(float64) + **out = **in + } + if in.LifetimePercentage != nil { + in, out := &in.LifetimePercentage, &out.LifetimePercentage + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerParameters. +func (in *TriggerParameters) DeepCopy() *TriggerParameters { + if in == nil { + return nil + } + out := new(TriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Vault) DeepCopyInto(out *Vault) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Vault. +func (in *Vault) DeepCopy() *Vault { + if in == nil { + return nil + } + out := new(Vault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Vault) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultInitParameters) DeepCopyInto(out *VaultInitParameters) { + *out = *in + if in.Contact != nil { + in, out := &in.Contact, &out.Contact + *out = make([]ContactInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableRbacAuthorization != nil { + in, out := &in.EnableRbacAuthorization, &out.EnableRbacAuthorization + *out = new(bool) + **out = **in + } + if in.EnabledForDeployment != nil { + in, out := &in.EnabledForDeployment, &out.EnabledForDeployment + *out = new(bool) + **out = **in + } + if in.EnabledForDiskEncryption != nil { + in, out := &in.EnabledForDiskEncryption, &out.EnabledForDiskEncryption + *out = new(bool) + **out = **in + } + if in.EnabledForTemplateDeployment != nil { + in, out := &in.EnabledForTemplateDeployment, &out.EnabledForTemplateDeployment + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(NetworkAclsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultInitParameters. +func (in *VaultInitParameters) DeepCopy() *VaultInitParameters { + if in == nil { + return nil + } + out := new(VaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultList) DeepCopyInto(out *VaultList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Vault, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultList. +func (in *VaultList) DeepCopy() *VaultList { + if in == nil { + return nil + } + out := new(VaultList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VaultList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultObservation) DeepCopyInto(out *VaultObservation) { + *out = *in + if in.AccessPolicy != nil { + in, out := &in.AccessPolicy, &out.AccessPolicy + *out = make([]AccessPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Contact != nil { + in, out := &in.Contact, &out.Contact + *out = make([]ContactObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableRbacAuthorization != nil { + in, out := &in.EnableRbacAuthorization, &out.EnableRbacAuthorization + *out = new(bool) + **out = **in + } + if in.EnabledForDeployment != nil { + in, out := &in.EnabledForDeployment, &out.EnabledForDeployment + *out = new(bool) + **out = **in + } + if in.EnabledForDiskEncryption != nil { + in, out := &in.EnabledForDiskEncryption, &out.EnabledForDiskEncryption + *out = new(bool) + **out = **in + } + if in.EnabledForTemplateDeployment != nil { + in, out := &in.EnabledForTemplateDeployment, &out.EnabledForTemplateDeployment + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(NetworkAclsObservation) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.VaultURI != nil { + in, out := &in.VaultURI, &out.VaultURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultObservation. +func (in *VaultObservation) DeepCopy() *VaultObservation { + if in == nil { + return nil + } + out := new(VaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultParameters) DeepCopyInto(out *VaultParameters) { + *out = *in + if in.Contact != nil { + in, out := &in.Contact, &out.Contact + *out = make([]ContactParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableRbacAuthorization != nil { + in, out := &in.EnableRbacAuthorization, &out.EnableRbacAuthorization + *out = new(bool) + **out = **in + } + if in.EnabledForDeployment != nil { + in, out := &in.EnabledForDeployment, &out.EnabledForDeployment + *out = new(bool) + **out = **in + } + if in.EnabledForDiskEncryption != nil { + in, out := &in.EnabledForDiskEncryption, &out.EnabledForDiskEncryption + *out = new(bool) + **out = **in + } + if in.EnabledForTemplateDeployment != nil { + in, out := &in.EnabledForTemplateDeployment, &out.EnabledForTemplateDeployment + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkAcls != nil { + in, out := &in.NetworkAcls, &out.NetworkAcls + *out = new(NetworkAclsParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeProtectionEnabled != nil { + in, out := &in.PurgeProtectionEnabled, &out.PurgeProtectionEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SoftDeleteRetentionDays != nil { + in, out := &in.SoftDeleteRetentionDays, &out.SoftDeleteRetentionDays + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultParameters. +func (in *VaultParameters) DeepCopy() *VaultParameters { + if in == nil { + return nil + } + out := new(VaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultSpec) DeepCopyInto(out *VaultSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultSpec. +func (in *VaultSpec) DeepCopy() *VaultSpec { + if in == nil { + return nil + } + out := new(VaultSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultStatus) DeepCopyInto(out *VaultStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultStatus. +func (in *VaultStatus) DeepCopy() *VaultStatus { + if in == nil { + return nil + } + out := new(VaultStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *X509CertificatePropertiesInitParameters) DeepCopyInto(out *X509CertificatePropertiesInitParameters) { + *out = *in + if in.ExtendedKeyUsage != nil { + in, out := &in.ExtendedKeyUsage, &out.ExtendedKeyUsage + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeyUsage != nil { + in, out := &in.KeyUsage, &out.KeyUsage + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(SubjectAlternativeNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ValidityInMonths != nil { + in, out := &in.ValidityInMonths, &out.ValidityInMonths + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new X509CertificatePropertiesInitParameters. +func (in *X509CertificatePropertiesInitParameters) DeepCopy() *X509CertificatePropertiesInitParameters { + if in == nil { + return nil + } + out := new(X509CertificatePropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *X509CertificatePropertiesObservation) DeepCopyInto(out *X509CertificatePropertiesObservation) { + *out = *in + if in.ExtendedKeyUsage != nil { + in, out := &in.ExtendedKeyUsage, &out.ExtendedKeyUsage + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeyUsage != nil { + in, out := &in.KeyUsage, &out.KeyUsage + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(SubjectAlternativeNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.ValidityInMonths != nil { + in, out := &in.ValidityInMonths, &out.ValidityInMonths + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new X509CertificatePropertiesObservation. +func (in *X509CertificatePropertiesObservation) DeepCopy() *X509CertificatePropertiesObservation { + if in == nil { + return nil + } + out := new(X509CertificatePropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *X509CertificatePropertiesParameters) DeepCopyInto(out *X509CertificatePropertiesParameters) { + *out = *in + if in.ExtendedKeyUsage != nil { + in, out := &in.ExtendedKeyUsage, &out.ExtendedKeyUsage + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KeyUsage != nil { + in, out := &in.KeyUsage, &out.KeyUsage + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(string) + **out = **in + } + if in.SubjectAlternativeNames != nil { + in, out := &in.SubjectAlternativeNames, &out.SubjectAlternativeNames + *out = new(SubjectAlternativeNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.ValidityInMonths != nil { + in, out := &in.ValidityInMonths, &out.ValidityInMonths + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new X509CertificatePropertiesParameters. +func (in *X509CertificatePropertiesParameters) DeepCopy() *X509CertificatePropertiesParameters { + if in == nil { + return nil + } + out := new(X509CertificatePropertiesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/keyvault/v1beta2/zz_generated.managed.go b/apis/keyvault/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..7bfc19064 --- /dev/null +++ b/apis/keyvault/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Certificate. +func (mg *Certificate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Certificate. +func (mg *Certificate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Certificate. +func (mg *Certificate) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Certificate. +func (mg *Certificate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Certificate. +func (mg *Certificate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Certificate. +func (mg *Certificate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Certificate. +func (mg *Certificate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Certificate. +func (mg *Certificate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Certificate. +func (mg *Certificate) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Certificate. +func (mg *Certificate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Certificate. +func (mg *Certificate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Certificate. +func (mg *Certificate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Key. +func (mg *Key) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Key. +func (mg *Key) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Key. +func (mg *Key) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Key. +func (mg *Key) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Key. +func (mg *Key) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Key. +func (mg *Key) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Key. +func (mg *Key) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Key. +func (mg *Key) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Key. +func (mg *Key) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Key. +func (mg *Key) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Key. +func (mg *Key) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Key. +func (mg *Key) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Vault. +func (mg *Vault) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Vault. +func (mg *Vault) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Vault. +func (mg *Vault) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Vault. +func (mg *Vault) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Vault. +func (mg *Vault) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Vault. +func (mg *Vault) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Vault. +func (mg *Vault) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Vault. +func (mg *Vault) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Vault. +func (mg *Vault) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Vault. +func (mg *Vault) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Vault. +func (mg *Vault) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Vault. +func (mg *Vault) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/keyvault/v1beta2/zz_generated.managedlist.go b/apis/keyvault/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..1b9619330 --- /dev/null +++ b/apis/keyvault/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this CertificateList. +func (l *CertificateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this KeyList. +func (l *KeyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ManagedHardwareSecurityModuleList. +func (l *ManagedHardwareSecurityModuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VaultList. +func (l *VaultList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/keyvault/v1beta2/zz_generated.resolvers.go b/apis/keyvault/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..138e48bbe --- /dev/null +++ b/apis/keyvault/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,180 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Certificate) ResolveReferences( // ResolveReferences of this Certificate. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyVaultID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.KeyVaultIDRef, + Selector: mg.Spec.ForProvider.KeyVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyVaultID") + } + mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyVaultID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.KeyVaultIDRef, + Selector: mg.Spec.InitProvider.KeyVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyVaultID") + } + mg.Spec.InitProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyVaultIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Key. +func (mg *Key) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyVaultID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.KeyVaultIDRef, + Selector: mg.Spec.ForProvider.KeyVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyVaultID") + } + mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyVaultID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.KeyVaultIDRef, + Selector: mg.Spec.InitProvider.KeyVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyVaultID") + } + mg.Spec.InitProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyVaultIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ManagedHardwareSecurityModule. +func (mg *ManagedHardwareSecurityModule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Vault. +func (mg *Vault) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/keyvault/v1beta2/zz_groupversion_info.go b/apis/keyvault/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..3df5cb292 --- /dev/null +++ b/apis/keyvault/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=keyvault.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "keyvault.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/keyvault/v1beta2/zz_key_terraformed.go b/apis/keyvault/v1beta2/zz_key_terraformed.go new file mode 100755 index 000000000..afc441c12 --- /dev/null +++ b/apis/keyvault/v1beta2/zz_key_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Key +func (mg *Key) GetTerraformResourceType() string { + return "azurerm_key_vault_key" +} + +// GetConnectionDetailsMapping for this Key +func (tr *Key) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Key +func (tr *Key) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Key +func (tr *Key) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Key +func (tr *Key) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Key +func (tr *Key) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Key +func (tr *Key) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Key +func (tr *Key) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Key +func (tr *Key) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Key using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Key) LateInitialize(attrs []byte) (bool, error) { + params := &KeyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("RotationPolicy")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Key) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/keyvault/v1beta2/zz_key_types.go b/apis/keyvault/v1beta2/zz_key_types.go new file mode 100755 index 000000000..a3ed3e26b --- /dev/null +++ b/apis/keyvault/v1beta2/zz_key_types.go @@ -0,0 +1,310 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutomaticInitParameters struct { + + // Rotate automatically at a duration after create as an ISO 8601 duration. + TimeAfterCreation *string `json:"timeAfterCreation,omitempty" tf:"time_after_creation,omitempty"` + + // Rotate automatically at a duration before expiry as an ISO 8601 duration. + TimeBeforeExpiry *string `json:"timeBeforeExpiry,omitempty" tf:"time_before_expiry,omitempty"` +} + +type AutomaticObservation struct { + + // Rotate automatically at a duration after create as an ISO 8601 duration. + TimeAfterCreation *string `json:"timeAfterCreation,omitempty" tf:"time_after_creation,omitempty"` + + // Rotate automatically at a duration before expiry as an ISO 8601 duration. + TimeBeforeExpiry *string `json:"timeBeforeExpiry,omitempty" tf:"time_before_expiry,omitempty"` +} + +type AutomaticParameters struct { + + // Rotate automatically at a duration after create as an ISO 8601 duration. + // +kubebuilder:validation:Optional + TimeAfterCreation *string `json:"timeAfterCreation,omitempty" tf:"time_after_creation,omitempty"` + + // Rotate automatically at a duration before expiry as an ISO 8601 duration. + // +kubebuilder:validation:Optional + TimeBeforeExpiry *string `json:"timeBeforeExpiry,omitempty" tf:"time_before_expiry,omitempty"` +} + +type KeyInitParameters struct { + + // Specifies the curve to use when creating an EC key. Possible values are P-256, P-256K, P-384, and P-521. This field will be required in a future release if key_type is EC or EC-HSM. The API will default to P-256 if nothing is specified. Changing this forces a new resource to be created. + Curve *string `json:"curve,omitempty" tf:"curve,omitempty"` + + // Expiration UTC datetime (Y-m-d'T'H:M:S'Z'). When this parameter gets changed on reruns, if newer date is ahead of current date, an update is performed. If the newer date is before the current date, resource will be force created. + ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` + + // A list of JSON web key operations. Possible values include: decrypt, encrypt, sign, unwrapKey, verify and wrapKey. Please note these values are case sensitive. + KeyOpts []*string `json:"keyOpts,omitempty" tf:"key_opts,omitempty"` + + // Specifies the Size of the RSA key to create in bytes. For example, 1024 or 2048. Note: This field is required if key_type is RSA or RSA-HSM. Changing this forces a new resource to be created. + KeySize *float64 `json:"keySize,omitempty" tf:"key_size,omitempty"` + + // Specifies the Key Type to use for this Key Vault Key. Possible values are EC (Elliptic Curve), EC-HSM, RSA and RSA-HSM. Changing this forces a new resource to be created. + KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` + + // The ID of the Key Vault where the Key should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Reference to a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDRef *v1.Reference `json:"keyVaultIdRef,omitempty" tf:"-"` + + // Selector for a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDSelector *v1.Selector `json:"keyVaultIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Key Vault Key. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key not usable before the provided UTC datetime (Y-m-d'T'H:M:S'Z'). + NotBeforeDate *string `json:"notBeforeDate,omitempty" tf:"not_before_date,omitempty"` + + // A rotation_policy block as defined below. + RotationPolicy *RotationPolicyInitParameters `json:"rotationPolicy,omitempty" tf:"rotation_policy,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type KeyObservation struct { + + // Specifies the curve to use when creating an EC key. Possible values are P-256, P-256K, P-384, and P-521. This field will be required in a future release if key_type is EC or EC-HSM. The API will default to P-256 if nothing is specified. Changing this forces a new resource to be created. + Curve *string `json:"curve,omitempty" tf:"curve,omitempty"` + + // The RSA public exponent of this Key Vault Key. + E *string `json:"e,omitempty" tf:"e,omitempty"` + + // Expiration UTC datetime (Y-m-d'T'H:M:S'Z'). When this parameter gets changed on reruns, if newer date is ahead of current date, an update is performed. If the newer date is before the current date, resource will be force created. + ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` + + // The Key Vault Key ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of JSON web key operations. Possible values include: decrypt, encrypt, sign, unwrapKey, verify and wrapKey. Please note these values are case sensitive. + KeyOpts []*string `json:"keyOpts,omitempty" tf:"key_opts,omitempty"` + + // Specifies the Size of the RSA key to create in bytes. For example, 1024 or 2048. Note: This field is required if key_type is RSA or RSA-HSM. Changing this forces a new resource to be created. + KeySize *float64 `json:"keySize,omitempty" tf:"key_size,omitempty"` + + // Specifies the Key Type to use for this Key Vault Key. Possible values are EC (Elliptic Curve), EC-HSM, RSA and RSA-HSM. Changing this forces a new resource to be created. + KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` + + // The ID of the Key Vault where the Key should be created. Changing this forces a new resource to be created. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // The RSA modulus of this Key Vault Key. + N *string `json:"n,omitempty" tf:"n,omitempty"` + + // Specifies the name of the Key Vault Key. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key not usable before the provided UTC datetime (Y-m-d'T'H:M:S'Z'). + NotBeforeDate *string `json:"notBeforeDate,omitempty" tf:"not_before_date,omitempty"` + + // The OpenSSH encoded public key of this Key Vault Key. + PublicKeyOpenssh *string `json:"publicKeyOpenssh,omitempty" tf:"public_key_openssh,omitempty"` + + // The PEM encoded public key of this Key Vault Key. + PublicKeyPem *string `json:"publicKeyPem,omitempty" tf:"public_key_pem,omitempty"` + + // The (Versioned) ID for this Key Vault Key. This property points to a specific version of a Key Vault Key, as such using this won't auto-rotate values if used in other Azure Services. + ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` + + // The Versionless ID of the Key Vault Key. This property allows other Azure Services (that support it) to auto-rotate their value when the Key Vault Key is updated. + ResourceVersionlessID *string `json:"resourceVersionlessId,omitempty" tf:"resource_versionless_id,omitempty"` + + // A rotation_policy block as defined below. + RotationPolicy *RotationPolicyObservation `json:"rotationPolicy,omitempty" tf:"rotation_policy,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The current version of the Key Vault Key. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // The Base ID of the Key Vault Key. + VersionlessID *string `json:"versionlessId,omitempty" tf:"versionless_id,omitempty"` + + // The EC X component of this Key Vault Key. + X *string `json:"x,omitempty" tf:"x,omitempty"` + + // The EC Y component of this Key Vault Key. + Y *string `json:"y,omitempty" tf:"y,omitempty"` +} + +type KeyParameters struct { + + // Specifies the curve to use when creating an EC key. Possible values are P-256, P-256K, P-384, and P-521. This field will be required in a future release if key_type is EC or EC-HSM. The API will default to P-256 if nothing is specified. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Curve *string `json:"curve,omitempty" tf:"curve,omitempty"` + + // Expiration UTC datetime (Y-m-d'T'H:M:S'Z'). When this parameter gets changed on reruns, if newer date is ahead of current date, an update is performed. If the newer date is before the current date, resource will be force created. + // +kubebuilder:validation:Optional + ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` + + // A list of JSON web key operations. Possible values include: decrypt, encrypt, sign, unwrapKey, verify and wrapKey. Please note these values are case sensitive. + // +kubebuilder:validation:Optional + KeyOpts []*string `json:"keyOpts,omitempty" tf:"key_opts,omitempty"` + + // Specifies the Size of the RSA key to create in bytes. For example, 1024 or 2048. Note: This field is required if key_type is RSA or RSA-HSM. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + KeySize *float64 `json:"keySize,omitempty" tf:"key_size,omitempty"` + + // Specifies the Key Type to use for this Key Vault Key. Possible values are EC (Elliptic Curve), EC-HSM, RSA and RSA-HSM. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + KeyType *string `json:"keyType,omitempty" tf:"key_type,omitempty"` + + // The ID of the Key Vault where the Key should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Reference to a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDRef *v1.Reference `json:"keyVaultIdRef,omitempty" tf:"-"` + + // Selector for a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDSelector *v1.Selector `json:"keyVaultIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Key Vault Key. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Key not usable before the provided UTC datetime (Y-m-d'T'H:M:S'Z'). + // +kubebuilder:validation:Optional + NotBeforeDate *string `json:"notBeforeDate,omitempty" tf:"not_before_date,omitempty"` + + // A rotation_policy block as defined below. + // +kubebuilder:validation:Optional + RotationPolicy *RotationPolicyParameters `json:"rotationPolicy,omitempty" tf:"rotation_policy,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RotationPolicyInitParameters struct { + + // An automatic block as defined below. + Automatic *AutomaticInitParameters `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // Expire a Key Vault Key after given duration as an ISO 8601 duration. + ExpireAfter *string `json:"expireAfter,omitempty" tf:"expire_after,omitempty"` + + // Notify at a given duration before expiry as an ISO 8601 duration. + NotifyBeforeExpiry *string `json:"notifyBeforeExpiry,omitempty" tf:"notify_before_expiry,omitempty"` +} + +type RotationPolicyObservation struct { + + // An automatic block as defined below. + Automatic *AutomaticObservation `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // Expire a Key Vault Key after given duration as an ISO 8601 duration. + ExpireAfter *string `json:"expireAfter,omitempty" tf:"expire_after,omitempty"` + + // Notify at a given duration before expiry as an ISO 8601 duration. + NotifyBeforeExpiry *string `json:"notifyBeforeExpiry,omitempty" tf:"notify_before_expiry,omitempty"` +} + +type RotationPolicyParameters struct { + + // An automatic block as defined below. + // +kubebuilder:validation:Optional + Automatic *AutomaticParameters `json:"automatic,omitempty" tf:"automatic,omitempty"` + + // Expire a Key Vault Key after given duration as an ISO 8601 duration. + // +kubebuilder:validation:Optional + ExpireAfter *string `json:"expireAfter,omitempty" tf:"expire_after,omitempty"` + + // Notify at a given duration before expiry as an ISO 8601 duration. + // +kubebuilder:validation:Optional + NotifyBeforeExpiry *string `json:"notifyBeforeExpiry,omitempty" tf:"notify_before_expiry,omitempty"` +} + +// KeySpec defines the desired state of Key +type KeySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider KeyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider KeyInitParameters `json:"initProvider,omitempty"` +} + +// KeyStatus defines the observed state of Key. +type KeyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider KeyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Key is the Schema for the Keys API. Manages a Key Vault Key. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Key struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.keyOpts) || (has(self.initProvider) && has(self.initProvider.keyOpts))",message="spec.forProvider.keyOpts is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.keyType) || (has(self.initProvider) && has(self.initProvider.keyType))",message="spec.forProvider.keyType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec KeySpec `json:"spec"` + Status KeyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// KeyList contains a list of Keys +type KeyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Key `json:"items"` +} + +// Repository type metadata. +var ( + Key_Kind = "Key" + Key_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Key_Kind}.String() + Key_KindAPIVersion = Key_Kind + "." + CRDGroupVersion.String() + Key_GroupVersionKind = CRDGroupVersion.WithKind(Key_Kind) +) + +func init() { + SchemeBuilder.Register(&Key{}, &KeyList{}) +} diff --git a/apis/keyvault/v1beta2/zz_managedhardwaresecuritymodule_terraformed.go b/apis/keyvault/v1beta2/zz_managedhardwaresecuritymodule_terraformed.go new file mode 100755 index 000000000..c10e41f75 --- /dev/null +++ b/apis/keyvault/v1beta2/zz_managedhardwaresecuritymodule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ManagedHardwareSecurityModule +func (mg *ManagedHardwareSecurityModule) GetTerraformResourceType() string { + return "azurerm_key_vault_managed_hardware_security_module" +} + +// GetConnectionDetailsMapping for this ManagedHardwareSecurityModule +func (tr *ManagedHardwareSecurityModule) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"security_domain_encrypted_data": "status.atProvider.securityDomainEncryptedData"} +} + +// GetObservation of this ManagedHardwareSecurityModule +func (tr *ManagedHardwareSecurityModule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ManagedHardwareSecurityModule +func (tr *ManagedHardwareSecurityModule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ManagedHardwareSecurityModule +func (tr *ManagedHardwareSecurityModule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ManagedHardwareSecurityModule +func (tr *ManagedHardwareSecurityModule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ManagedHardwareSecurityModule +func (tr *ManagedHardwareSecurityModule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ManagedHardwareSecurityModule +func (tr *ManagedHardwareSecurityModule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ManagedHardwareSecurityModule +func (tr *ManagedHardwareSecurityModule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ManagedHardwareSecurityModule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ManagedHardwareSecurityModule) LateInitialize(attrs []byte) (bool, error) { + params := &ManagedHardwareSecurityModuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ManagedHardwareSecurityModule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/keyvault/v1beta2/zz_managedhardwaresecuritymodule_types.go b/apis/keyvault/v1beta2/zz_managedhardwaresecuritymodule_types.go new file mode 100755 index 000000000..b00c0630a --- /dev/null +++ b/apis/keyvault/v1beta2/zz_managedhardwaresecuritymodule_types.go @@ -0,0 +1,254 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ManagedHardwareSecurityModuleInitParameters struct { + + // Specifies a list of administrators object IDs for the key vault Managed Hardware Security Module. Changing this forces a new resource to be created. + // +listType=set + AdminObjectIds []*string `json:"adminObjectIds,omitempty" tf:"admin_object_ids,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network_acls block as defined below. + NetworkAcls *ManagedHardwareSecurityModuleNetworkAclsInitParameters `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether traffic from public networks is permitted. Defaults to true. Changing this forces a new resource to be created. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Is Purge Protection enabled for this Key Vault Managed Hardware Security Module? Changing this forces a new resource to be created. + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // A list of KeyVault certificates resource IDs (minimum of three and up to a maximum of 10) to activate this Managed HSM. More information see activate-your-managed-hsm + SecurityDomainKeyVaultCertificateIds []*string `json:"securityDomainKeyVaultCertificateIds,omitempty" tf:"security_domain_key_vault_certificate_ids,omitempty"` + + // Specifies the minimum number of shares required to decrypt the security domain for recovery. This is required when security_domain_key_vault_certificate_ids is specified. Valid values are between 2 and 10. + SecurityDomainQuorum *float64 `json:"securityDomainQuorum,omitempty" tf:"security_domain_quorum,omitempty"` + + // The Name of the SKU used for this Key Vault Managed Hardware Security Module. Possible value is Standard_B1. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This value can be between 7 and 90 days. Defaults to 90. Changing this forces a new resource to be created. + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Azure Active Directory Tenant ID that should be used for authenticating requests to the key vault Managed Hardware Security Module. Changing this forces a new resource to be created. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type ManagedHardwareSecurityModuleNetworkAclsInitParameters struct { + + // Specifies which traffic can bypass the network rules. Possible values are AzureServices and None. + Bypass *string `json:"bypass,omitempty" tf:"bypass,omitempty"` + + // The Default Action to use. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` +} + +type ManagedHardwareSecurityModuleNetworkAclsObservation struct { + + // Specifies which traffic can bypass the network rules. Possible values are AzureServices and None. + Bypass *string `json:"bypass,omitempty" tf:"bypass,omitempty"` + + // The Default Action to use. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` +} + +type ManagedHardwareSecurityModuleNetworkAclsParameters struct { + + // Specifies which traffic can bypass the network rules. Possible values are AzureServices and None. + // +kubebuilder:validation:Optional + Bypass *string `json:"bypass" tf:"bypass,omitempty"` + + // The Default Action to use. Possible values are Allow and Deny. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction" tf:"default_action,omitempty"` +} + +type ManagedHardwareSecurityModuleObservation struct { + + // Specifies a list of administrators object IDs for the key vault Managed Hardware Security Module. Changing this forces a new resource to be created. + // +listType=set + AdminObjectIds []*string `json:"adminObjectIds,omitempty" tf:"admin_object_ids,omitempty"` + + // The URI of the Key Vault Managed Hardware Security Module, used for performing operations on keys. + HSMURI *string `json:"hsmUri,omitempty" tf:"hsm_uri,omitempty"` + + // The Key Vault Secret Managed Hardware Security Module ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network_acls block as defined below. + NetworkAcls *ManagedHardwareSecurityModuleNetworkAclsObservation `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether traffic from public networks is permitted. Defaults to true. Changing this forces a new resource to be created. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Is Purge Protection enabled for this Key Vault Managed Hardware Security Module? Changing this forces a new resource to be created. + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // The name of the resource group in which to create the Key Vault Managed Hardware Security Module. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A list of KeyVault certificates resource IDs (minimum of three and up to a maximum of 10) to activate this Managed HSM. More information see activate-your-managed-hsm + SecurityDomainKeyVaultCertificateIds []*string `json:"securityDomainKeyVaultCertificateIds,omitempty" tf:"security_domain_key_vault_certificate_ids,omitempty"` + + // Specifies the minimum number of shares required to decrypt the security domain for recovery. This is required when security_domain_key_vault_certificate_ids is specified. Valid values are between 2 and 10. + SecurityDomainQuorum *float64 `json:"securityDomainQuorum,omitempty" tf:"security_domain_quorum,omitempty"` + + // The Name of the SKU used for this Key Vault Managed Hardware Security Module. Possible value is Standard_B1. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This value can be between 7 and 90 days. Defaults to 90. Changing this forces a new resource to be created. + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Azure Active Directory Tenant ID that should be used for authenticating requests to the key vault Managed Hardware Security Module. Changing this forces a new resource to be created. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type ManagedHardwareSecurityModuleParameters struct { + + // Specifies a list of administrators object IDs for the key vault Managed Hardware Security Module. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + AdminObjectIds []*string `json:"adminObjectIds,omitempty" tf:"admin_object_ids,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network_acls block as defined below. + // +kubebuilder:validation:Optional + NetworkAcls *ManagedHardwareSecurityModuleNetworkAclsParameters `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether traffic from public networks is permitted. Defaults to true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Is Purge Protection enabled for this Key Vault Managed Hardware Security Module? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // The name of the resource group in which to create the Key Vault Managed Hardware Security Module. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A list of KeyVault certificates resource IDs (minimum of three and up to a maximum of 10) to activate this Managed HSM. More information see activate-your-managed-hsm + // +kubebuilder:validation:Optional + SecurityDomainKeyVaultCertificateIds []*string `json:"securityDomainKeyVaultCertificateIds,omitempty" tf:"security_domain_key_vault_certificate_ids,omitempty"` + + // Specifies the minimum number of shares required to decrypt the security domain for recovery. This is required when security_domain_key_vault_certificate_ids is specified. Valid values are between 2 and 10. + // +kubebuilder:validation:Optional + SecurityDomainQuorum *float64 `json:"securityDomainQuorum,omitempty" tf:"security_domain_quorum,omitempty"` + + // The Name of the SKU used for this Key Vault Managed Hardware Security Module. Possible value is Standard_B1. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This value can be between 7 and 90 days. Defaults to 90. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Azure Active Directory Tenant ID that should be used for authenticating requests to the key vault Managed Hardware Security Module. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +// ManagedHardwareSecurityModuleSpec defines the desired state of ManagedHardwareSecurityModule +type ManagedHardwareSecurityModuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ManagedHardwareSecurityModuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ManagedHardwareSecurityModuleInitParameters `json:"initProvider,omitempty"` +} + +// ManagedHardwareSecurityModuleStatus defines the observed state of ManagedHardwareSecurityModule. +type ManagedHardwareSecurityModuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ManagedHardwareSecurityModuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ManagedHardwareSecurityModule is the Schema for the ManagedHardwareSecurityModules API. Manages a Key Vault Managed Hardware Security Module. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ManagedHardwareSecurityModule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.adminObjectIds) || (has(self.initProvider) && has(self.initProvider.adminObjectIds))",message="spec.forProvider.adminObjectIds is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tenantId) || (has(self.initProvider) && has(self.initProvider.tenantId))",message="spec.forProvider.tenantId is a required parameter" + Spec ManagedHardwareSecurityModuleSpec `json:"spec"` + Status ManagedHardwareSecurityModuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ManagedHardwareSecurityModuleList contains a list of ManagedHardwareSecurityModules +type ManagedHardwareSecurityModuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ManagedHardwareSecurityModule `json:"items"` +} + +// Repository type metadata. +var ( + ManagedHardwareSecurityModule_Kind = "ManagedHardwareSecurityModule" + ManagedHardwareSecurityModule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ManagedHardwareSecurityModule_Kind}.String() + ManagedHardwareSecurityModule_KindAPIVersion = ManagedHardwareSecurityModule_Kind + "." + CRDGroupVersion.String() + ManagedHardwareSecurityModule_GroupVersionKind = CRDGroupVersion.WithKind(ManagedHardwareSecurityModule_Kind) +) + +func init() { + SchemeBuilder.Register(&ManagedHardwareSecurityModule{}, &ManagedHardwareSecurityModuleList{}) +} diff --git a/apis/keyvault/v1beta2/zz_vault_terraformed.go b/apis/keyvault/v1beta2/zz_vault_terraformed.go new file mode 100755 index 000000000..7c405edc2 --- /dev/null +++ b/apis/keyvault/v1beta2/zz_vault_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Vault +func (mg *Vault) GetTerraformResourceType() string { + return "azurerm_key_vault" +} + +// GetConnectionDetailsMapping for this Vault +func (tr *Vault) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Vault +func (tr *Vault) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Vault +func (tr *Vault) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Vault +func (tr *Vault) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Vault +func (tr *Vault) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Vault +func (tr *Vault) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Vault +func (tr *Vault) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Vault +func (tr *Vault) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Vault using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Vault) LateInitialize(attrs []byte) (bool, error) { + params := &VaultParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Vault) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/keyvault/v1beta2/zz_vault_types.go b/apis/keyvault/v1beta2/zz_vault_types.go new file mode 100755 index 000000000..48b6474bc --- /dev/null +++ b/apis/keyvault/v1beta2/zz_vault_types.go @@ -0,0 +1,368 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessPolicyInitParameters struct { +} + +type AccessPolicyObservation struct { + + // The object ID of an Application in Azure Active Directory. + ApplicationID *string `json:"applicationId,omitempty" tf:"application_id,omitempty"` + + // List of certificate permissions, must be one or more from the following: Backup, Create, Delete, DeleteIssuers, Get, GetIssuers, Import, List, ListIssuers, ManageContacts, ManageIssuers, Purge, Recover, Restore, SetIssuers and Update. + CertificatePermissions []*string `json:"certificatePermissions,omitempty" tf:"certificate_permissions,omitempty"` + + // List of key permissions. Possible values are Backup, Create, Decrypt, Delete, Encrypt, Get, Import, List, Purge, Recover, Restore, Sign, UnwrapKey, Update, Verify, WrapKey, Release, Rotate, GetRotationPolicy and SetRotationPolicy. + KeyPermissions []*string `json:"keyPermissions,omitempty" tf:"key_permissions,omitempty"` + + // The object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault. The object ID must be unique for the list of access policies. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // List of secret permissions, must be one or more from the following: Backup, Delete, Get, List, Purge, Recover, Restore and Set. + SecretPermissions []*string `json:"secretPermissions,omitempty" tf:"secret_permissions,omitempty"` + + // List of storage permissions, must be one or more from the following: Backup, Delete, DeleteSAS, Get, GetSAS, List, ListSAS, Purge, Recover, RegenerateKey, Restore, Set, SetSAS and Update. + StoragePermissions []*string `json:"storagePermissions,omitempty" tf:"storage_permissions,omitempty"` + + // The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. Must match the tenant_id used above. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AccessPolicyParameters struct { +} + +type ContactInitParameters struct { + + // E-mail address of the contact. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // Name of the contact. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Phone number of the contact. + Phone *string `json:"phone,omitempty" tf:"phone,omitempty"` +} + +type ContactObservation struct { + + // E-mail address of the contact. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // Name of the contact. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Phone number of the contact. + Phone *string `json:"phone,omitempty" tf:"phone,omitempty"` +} + +type ContactParameters struct { + + // E-mail address of the contact. + // +kubebuilder:validation:Optional + Email *string `json:"email" tf:"email,omitempty"` + + // Name of the contact. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Phone number of the contact. + // +kubebuilder:validation:Optional + Phone *string `json:"phone,omitempty" tf:"phone,omitempty"` +} + +type NetworkAclsInitParameters struct { + + // Specifies which traffic can bypass the network rules. Possible values are AzureServices and None. + Bypass *string `json:"bypass,omitempty" tf:"bypass,omitempty"` + + // The Default Action to use when no rules match from ip_rules / virtual_network_subnet_ids. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Key Vault. + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more Subnet IDs which should be able to access this Key Vault. + // +listType=set + VirtualNetworkSubnetIds []*string `json:"virtualNetworkSubnetIds,omitempty" tf:"virtual_network_subnet_ids,omitempty"` +} + +type NetworkAclsObservation struct { + + // Specifies which traffic can bypass the network rules. Possible values are AzureServices and None. + Bypass *string `json:"bypass,omitempty" tf:"bypass,omitempty"` + + // The Default Action to use when no rules match from ip_rules / virtual_network_subnet_ids. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Key Vault. + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more Subnet IDs which should be able to access this Key Vault. + // +listType=set + VirtualNetworkSubnetIds []*string `json:"virtualNetworkSubnetIds,omitempty" tf:"virtual_network_subnet_ids,omitempty"` +} + +type NetworkAclsParameters struct { + + // Specifies which traffic can bypass the network rules. Possible values are AzureServices and None. + // +kubebuilder:validation:Optional + Bypass *string `json:"bypass" tf:"bypass,omitempty"` + + // The Default Action to use when no rules match from ip_rules / virtual_network_subnet_ids. Possible values are Allow and Deny. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Key Vault. + // +kubebuilder:validation:Optional + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more Subnet IDs which should be able to access this Key Vault. + // +kubebuilder:validation:Optional + // +listType=set + VirtualNetworkSubnetIds []*string `json:"virtualNetworkSubnetIds,omitempty" tf:"virtual_network_subnet_ids,omitempty"` +} + +type VaultInitParameters struct { + + // One or more contact block as defined below. + Contact []ContactInitParameters `json:"contact,omitempty" tf:"contact,omitempty"` + + // Boolean flag to specify whether Azure Key Vault uses Role Based Access Control (RBAC) for authorization of data actions. + EnableRbacAuthorization *bool `json:"enableRbacAuthorization,omitempty" tf:"enable_rbac_authorization,omitempty"` + + // Boolean flag to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault. + EnabledForDeployment *bool `json:"enabledForDeployment,omitempty" tf:"enabled_for_deployment,omitempty"` + + // Boolean flag to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys. + EnabledForDiskEncryption *bool `json:"enabledForDiskEncryption,omitempty" tf:"enabled_for_disk_encryption,omitempty"` + + // Boolean flag to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault. + EnabledForTemplateDeployment *bool `json:"enabledForTemplateDeployment,omitempty" tf:"enabled_for_template_deployment,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network_acls block as defined below. + NetworkAcls *NetworkAclsInitParameters `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether public network access is allowed for this Key Vault. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Is Purge Protection enabled for this Key Vault? + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // The Name of the SKU used for this Key Vault. Possible values are standard and premium. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This value can be between 7 and 90 (the default) days. + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type VaultObservation struct { + + // A list of access_policy objects (up to 1024) describing access policies, as described below. + AccessPolicy []AccessPolicyObservation `json:"accessPolicy,omitempty" tf:"access_policy,omitempty"` + + // One or more contact block as defined below. + Contact []ContactObservation `json:"contact,omitempty" tf:"contact,omitempty"` + + // Boolean flag to specify whether Azure Key Vault uses Role Based Access Control (RBAC) for authorization of data actions. + EnableRbacAuthorization *bool `json:"enableRbacAuthorization,omitempty" tf:"enable_rbac_authorization,omitempty"` + + // Boolean flag to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault. + EnabledForDeployment *bool `json:"enabledForDeployment,omitempty" tf:"enabled_for_deployment,omitempty"` + + // Boolean flag to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys. + EnabledForDiskEncryption *bool `json:"enabledForDiskEncryption,omitempty" tf:"enabled_for_disk_encryption,omitempty"` + + // Boolean flag to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault. + EnabledForTemplateDeployment *bool `json:"enabledForTemplateDeployment,omitempty" tf:"enabled_for_template_deployment,omitempty"` + + // The ID of the Key Vault. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network_acls block as defined below. + NetworkAcls *NetworkAclsObservation `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether public network access is allowed for this Key Vault. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Is Purge Protection enabled for this Key Vault? + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // The name of the resource group in which to create the Key Vault. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The Name of the SKU used for this Key Vault. Possible values are standard and premium. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This value can be between 7 and 90 (the default) days. + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The URI of the Key Vault, used for performing operations on keys and secrets. + VaultURI *string `json:"vaultUri,omitempty" tf:"vault_uri,omitempty"` +} + +type VaultParameters struct { + + // One or more contact block as defined below. + // +kubebuilder:validation:Optional + Contact []ContactParameters `json:"contact,omitempty" tf:"contact,omitempty"` + + // Boolean flag to specify whether Azure Key Vault uses Role Based Access Control (RBAC) for authorization of data actions. + // +kubebuilder:validation:Optional + EnableRbacAuthorization *bool `json:"enableRbacAuthorization,omitempty" tf:"enable_rbac_authorization,omitempty"` + + // Boolean flag to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault. + // +kubebuilder:validation:Optional + EnabledForDeployment *bool `json:"enabledForDeployment,omitempty" tf:"enabled_for_deployment,omitempty"` + + // Boolean flag to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys. + // +kubebuilder:validation:Optional + EnabledForDiskEncryption *bool `json:"enabledForDiskEncryption,omitempty" tf:"enabled_for_disk_encryption,omitempty"` + + // Boolean flag to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault. + // +kubebuilder:validation:Optional + EnabledForTemplateDeployment *bool `json:"enabledForTemplateDeployment,omitempty" tf:"enabled_for_template_deployment,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network_acls block as defined below. + // +kubebuilder:validation:Optional + NetworkAcls *NetworkAclsParameters `json:"networkAcls,omitempty" tf:"network_acls,omitempty"` + + // Whether public network access is allowed for this Key Vault. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Is Purge Protection enabled for this Key Vault? + // +kubebuilder:validation:Optional + PurgeProtectionEnabled *bool `json:"purgeProtectionEnabled,omitempty" tf:"purge_protection_enabled,omitempty"` + + // The name of the resource group in which to create the Key Vault. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The Name of the SKU used for this Key Vault. Possible values are standard and premium. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The number of days that items should be retained for once soft-deleted. This value can be between 7 and 90 (the default) days. + // +kubebuilder:validation:Optional + SoftDeleteRetentionDays *float64 `json:"softDeleteRetentionDays,omitempty" tf:"soft_delete_retention_days,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +// VaultSpec defines the desired state of Vault +type VaultSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VaultParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VaultInitParameters `json:"initProvider,omitempty"` +} + +// VaultStatus defines the observed state of Vault. +type VaultStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VaultObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Vault is the Schema for the Vaults API. Manages a Key Vault. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Vault struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.tenantId) || (has(self.initProvider) && has(self.initProvider.tenantId))",message="spec.forProvider.tenantId is a required parameter" + Spec VaultSpec `json:"spec"` + Status VaultStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VaultList contains a list of Vaults +type VaultList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Vault `json:"items"` +} + +// Repository type metadata. +var ( + Vault_Kind = "Vault" + Vault_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Vault_Kind}.String() + Vault_KindAPIVersion = Vault_Kind + "." + CRDGroupVersion.String() + Vault_GroupVersionKind = CRDGroupVersion.WithKind(Vault_Kind) +) + +func init() { + SchemeBuilder.Register(&Vault{}, &VaultList{}) +} diff --git a/apis/kusto/v1beta1/zz_clustermanagedprivateendpoint_types.go b/apis/kusto/v1beta1/zz_clustermanagedprivateendpoint_types.go index 1c58967bf..b18bbcd39 100755 --- a/apis/kusto/v1beta1/zz_clustermanagedprivateendpoint_types.go +++ b/apis/kusto/v1beta1/zz_clustermanagedprivateendpoint_types.go @@ -19,7 +19,7 @@ type ClusterManagedPrivateEndpointInitParameters struct { GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` // The ARM resource ID of the resource for which the managed private endpoint is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() PrivateLinkResourceID *string `json:"privateLinkResourceId,omitempty" tf:"private_link_resource_id,omitempty"` @@ -32,7 +32,7 @@ type ClusterManagedPrivateEndpointInitParameters struct { PrivateLinkResourceIDSelector *v1.Selector `json:"privateLinkResourceIdSelector,omitempty" tf:"-"` // The region of the resource to which the managed private endpoint is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("location",false) PrivateLinkResourceRegion *string `json:"privateLinkResourceRegion,omitempty" tf:"private_link_resource_region,omitempty"` @@ -74,7 +74,7 @@ type ClusterManagedPrivateEndpointObservation struct { type ClusterManagedPrivateEndpointParameters struct { // The name of the Kusto Cluster. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` @@ -91,7 +91,7 @@ type ClusterManagedPrivateEndpointParameters struct { GroupID *string `json:"groupId,omitempty" tf:"group_id,omitempty"` // The ARM resource ID of the resource for which the managed private endpoint is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PrivateLinkResourceID *string `json:"privateLinkResourceId,omitempty" tf:"private_link_resource_id,omitempty"` @@ -105,7 +105,7 @@ type ClusterManagedPrivateEndpointParameters struct { PrivateLinkResourceIDSelector *v1.Selector `json:"privateLinkResourceIdSelector,omitempty" tf:"-"` // The region of the resource to which the managed private endpoint is created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("location",false) // +kubebuilder:validation:Optional PrivateLinkResourceRegion *string `json:"privateLinkResourceRegion,omitempty" tf:"private_link_resource_region,omitempty"` diff --git a/apis/kusto/v1beta1/zz_clusterprincipalassignment_types.go b/apis/kusto/v1beta1/zz_clusterprincipalassignment_types.go index fb7da9970..ed28957cf 100755 --- a/apis/kusto/v1beta1/zz_clusterprincipalassignment_types.go +++ b/apis/kusto/v1beta1/zz_clusterprincipalassignment_types.go @@ -61,7 +61,7 @@ type ClusterPrincipalAssignmentObservation struct { type ClusterPrincipalAssignmentParameters struct { // The name of the cluster in which to create the resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` diff --git a/apis/kusto/v1beta1/zz_database_types.go b/apis/kusto/v1beta1/zz_database_types.go index 64bd005a8..05f533c12 100755 --- a/apis/kusto/v1beta1/zz_database_types.go +++ b/apis/kusto/v1beta1/zz_database_types.go @@ -52,7 +52,7 @@ type DatabaseObservation struct { type DatabaseParameters struct { // Specifies the name of the Kusto Cluster this database will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` diff --git a/apis/kusto/v1beta1/zz_databaseprincipalassignment_types.go b/apis/kusto/v1beta1/zz_databaseprincipalassignment_types.go index 39ba850da..a909b25be 100755 --- a/apis/kusto/v1beta1/zz_databaseprincipalassignment_types.go +++ b/apis/kusto/v1beta1/zz_databaseprincipalassignment_types.go @@ -64,7 +64,7 @@ type DatabasePrincipalAssignmentObservation struct { type DatabasePrincipalAssignmentParameters struct { // The name of the cluster in which to create the resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` diff --git a/apis/kusto/v1beta1/zz_eventgriddataconnection_types.go b/apis/kusto/v1beta1/zz_eventgriddataconnection_types.go index 7fcc1c55e..59ea13b75 100755 --- a/apis/kusto/v1beta1/zz_eventgriddataconnection_types.go +++ b/apis/kusto/v1beta1/zz_eventgriddataconnection_types.go @@ -40,7 +40,7 @@ type EventGridDataConnectionInitParameters struct { EventHubConsumerGroupNameSelector *v1.Selector `json:"eventhubConsumerGroupNameSelector,omitempty" tf:"-"` // Specifies the resource id of the Event Hub this data connection will use for ingestion. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() EventHubID *string `json:"eventhubId,omitempty" tf:"eventhub_id,omitempty"` @@ -65,7 +65,7 @@ type EventGridDataConnectionInitParameters struct { SkipFirstRecord *bool `json:"skipFirstRecord,omitempty" tf:"skip_first_record,omitempty"` // Specifies the resource id of the Storage Account this data connection will use for ingestion. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -139,7 +139,7 @@ type EventGridDataConnectionParameters struct { BlobStorageEventType *string `json:"blobStorageEventType,omitempty" tf:"blob_storage_event_type,omitempty"` // Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` @@ -190,7 +190,7 @@ type EventGridDataConnectionParameters struct { EventHubConsumerGroupNameSelector *v1.Selector `json:"eventhubConsumerGroupNameSelector,omitempty" tf:"-"` // Specifies the resource id of the Event Hub this data connection will use for ingestion. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional EventHubID *string `json:"eventhubId,omitempty" tf:"eventhub_id,omitempty"` @@ -233,7 +233,7 @@ type EventGridDataConnectionParameters struct { SkipFirstRecord *bool `json:"skipFirstRecord,omitempty" tf:"skip_first_record,omitempty"` // Specifies the resource id of the Storage Account this data connection will use for ingestion. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/kusto/v1beta1/zz_eventhubdataconnection_types.go b/apis/kusto/v1beta1/zz_eventhubdataconnection_types.go index e9b7fc769..ac6fb623f 100755 --- a/apis/kusto/v1beta1/zz_eventhubdataconnection_types.go +++ b/apis/kusto/v1beta1/zz_eventhubdataconnection_types.go @@ -37,7 +37,7 @@ type EventHubDataConnectionInitParameters struct { DatabaseRoutingType *string `json:"databaseRoutingType,omitempty" tf:"database_routing_type,omitempty"` // Specifies the resource id of the EventHub this data connection will use for ingestion. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() EventHubID *string `json:"eventhubId,omitempty" tf:"eventhub_id,omitempty"` @@ -113,7 +113,7 @@ type EventHubDataConnectionObservation struct { type EventHubDataConnectionParameters struct { // Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` @@ -164,7 +164,7 @@ type EventHubDataConnectionParameters struct { DatabaseRoutingType *string `json:"databaseRoutingType,omitempty" tf:"database_routing_type,omitempty"` // Specifies the resource id of the EventHub this data connection will use for ingestion. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional EventHubID *string `json:"eventhubId,omitempty" tf:"eventhub_id,omitempty"` diff --git a/apis/kusto/v1beta1/zz_generated.conversion_hubs.go b/apis/kusto/v1beta1/zz_generated.conversion_hubs.go index 0c3113dd6..5ebccfd3c 100755 --- a/apis/kusto/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/kusto/v1beta1/zz_generated.conversion_hubs.go @@ -6,12 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *AttachedDatabaseConfiguration) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Cluster) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ClusterManagedPrivateEndpoint) Hub() {} diff --git a/apis/kusto/v1beta1/zz_generated.conversion_spokes.go b/apis/kusto/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..c062443f9 --- /dev/null +++ b/apis/kusto/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AttachedDatabaseConfiguration to the hub type. +func (tr *AttachedDatabaseConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AttachedDatabaseConfiguration type. +func (tr *AttachedDatabaseConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/kusto/v1beta1/zz_generated.resolvers.go b/apis/kusto/v1beta1/zz_generated.resolvers.go index b95c74dba..8a338df49 100644 --- a/apis/kusto/v1beta1/zz_generated.resolvers.go +++ b/apis/kusto/v1beta1/zz_generated.resolvers.go @@ -266,7 +266,7 @@ func (mg *ClusterManagedPrivateEndpoint) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -285,7 +285,7 @@ func (mg *ClusterManagedPrivateEndpoint) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ClusterNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -304,7 +304,7 @@ func (mg *ClusterManagedPrivateEndpoint) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.PrivateLinkResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PrivateLinkResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -342,7 +342,7 @@ func (mg *ClusterManagedPrivateEndpoint) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -361,7 +361,7 @@ func (mg *ClusterManagedPrivateEndpoint) ResolveReferences(ctx context.Context, mg.Spec.InitProvider.PrivateLinkResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.PrivateLinkResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -392,7 +392,7 @@ func (mg *ClusterPrincipalAssignment) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -442,7 +442,7 @@ func (mg *Database) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -492,7 +492,7 @@ func (mg *DatabasePrincipalAssignment) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -561,7 +561,7 @@ func (mg *EventGridDataConnection) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -618,7 +618,7 @@ func (mg *EventGridDataConnection) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.EventHubConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.EventHubConsumerGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -656,7 +656,7 @@ func (mg *EventGridDataConnection) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -694,7 +694,7 @@ func (mg *EventGridDataConnection) ResolveReferences(ctx context.Context, c clie mg.Spec.InitProvider.EventHubConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.EventHubConsumerGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -713,7 +713,7 @@ func (mg *EventGridDataConnection) ResolveReferences(ctx context.Context, c clie mg.Spec.InitProvider.EventHubID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.EventHubIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -744,7 +744,7 @@ func (mg *EventHubDataConnection) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -801,7 +801,7 @@ func (mg *EventHubDataConnection) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -858,7 +858,7 @@ func (mg *EventHubDataConnection) ResolveReferences(ctx context.Context, c clien mg.Spec.InitProvider.ConsumerGroup = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ConsumerGroupRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -889,7 +889,7 @@ func (mg *IOTHubDataConnection) ResolveReferences(ctx context.Context, c client. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -946,7 +946,7 @@ func (mg *IOTHubDataConnection) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1003,7 +1003,7 @@ func (mg *IOTHubDataConnection) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.SharedAccessPolicyName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SharedAccessPolicyNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Cluster", "ClusterList") + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1060,7 +1060,7 @@ func (mg *IOTHubDataConnection) ResolveReferences(ctx context.Context, c client. mg.Spec.InitProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/kusto/v1beta1/zz_iothubdataconnection_types.go b/apis/kusto/v1beta1/zz_iothubdataconnection_types.go index 305261a4f..d51d2c641 100755 --- a/apis/kusto/v1beta1/zz_iothubdataconnection_types.go +++ b/apis/kusto/v1beta1/zz_iothubdataconnection_types.go @@ -16,7 +16,7 @@ import ( type IOTHubDataConnectionInitParameters struct { // Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` // Reference to a Cluster in kusto to populate clusterName. @@ -62,7 +62,7 @@ type IOTHubDataConnectionInitParameters struct { EventSystemProperties []*string `json:"eventSystemProperties,omitempty" tf:"event_system_properties,omitempty"` // Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` @@ -160,7 +160,7 @@ type IOTHubDataConnectionObservation struct { type IOTHubDataConnectionParameters struct { // Specifies the name of the Kusto Cluster this data connection will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Cluster + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` @@ -212,7 +212,7 @@ type IOTHubDataConnectionParameters struct { EventSystemProperties []*string `json:"eventSystemProperties,omitempty" tf:"event_system_properties,omitempty"` // Specifies the resource id of the IotHub this data connection will use for ingestion. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` diff --git a/apis/kusto/v1beta2/zz_attacheddatabaseconfiguration_terraformed.go b/apis/kusto/v1beta2/zz_attacheddatabaseconfiguration_terraformed.go new file mode 100755 index 000000000..b82d8f6b3 --- /dev/null +++ b/apis/kusto/v1beta2/zz_attacheddatabaseconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AttachedDatabaseConfiguration +func (mg *AttachedDatabaseConfiguration) GetTerraformResourceType() string { + return "azurerm_kusto_attached_database_configuration" +} + +// GetConnectionDetailsMapping for this AttachedDatabaseConfiguration +func (tr *AttachedDatabaseConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AttachedDatabaseConfiguration +func (tr *AttachedDatabaseConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AttachedDatabaseConfiguration +func (tr *AttachedDatabaseConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AttachedDatabaseConfiguration +func (tr *AttachedDatabaseConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AttachedDatabaseConfiguration +func (tr *AttachedDatabaseConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AttachedDatabaseConfiguration +func (tr *AttachedDatabaseConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AttachedDatabaseConfiguration +func (tr *AttachedDatabaseConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AttachedDatabaseConfiguration +func (tr *AttachedDatabaseConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AttachedDatabaseConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AttachedDatabaseConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &AttachedDatabaseConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AttachedDatabaseConfiguration) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/kusto/v1beta2/zz_attacheddatabaseconfiguration_types.go b/apis/kusto/v1beta2/zz_attacheddatabaseconfiguration_types.go new file mode 100755 index 000000000..00c3f3f5e --- /dev/null +++ b/apis/kusto/v1beta2/zz_attacheddatabaseconfiguration_types.go @@ -0,0 +1,332 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AttachedDatabaseConfigurationInitParameters struct { + + // Specifies the name of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Reference to a Cluster in kusto to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameRef *v1.Reference `json:"clusterNameRef,omitempty" tf:"-"` + + // Selector for a Cluster in kusto to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameSelector *v1.Selector `json:"clusterNameSelector,omitempty" tf:"-"` + + // The resource id of the cluster where the databases you would like to attach reside. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ClusterResourceID *string `json:"clusterResourceId,omitempty" tf:"cluster_resource_id,omitempty"` + + // Reference to a Cluster in kusto to populate clusterResourceId. + // +kubebuilder:validation:Optional + ClusterResourceIDRef *v1.Reference `json:"clusterResourceIdRef,omitempty" tf:"-"` + + // Selector for a Cluster in kusto to populate clusterResourceId. + // +kubebuilder:validation:Optional + ClusterResourceIDSelector *v1.Selector `json:"clusterResourceIdSelector,omitempty" tf:"-"` + + // The name of the database which you would like to attach, use * if you want to follow all current and future databases. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Database + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a Database in kusto to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a Database in kusto to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // The default principals modification kind. Valid values are: None (default), Replace and Union. Defaults to None. + DefaultPrincipalModificationKind *string `json:"defaultPrincipalModificationKind,omitempty" tf:"default_principal_modification_kind,omitempty"` + + // Specifies the location of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Kusto Attached Database Configuration to create. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the resource group of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sharing block as defined below. + Sharing *SharingInitParameters `json:"sharing,omitempty" tf:"sharing,omitempty"` +} + +type AttachedDatabaseConfigurationObservation struct { + + // The list of databases from the cluster_resource_id which are currently attached to the cluster. + AttachedDatabaseNames []*string `json:"attachedDatabaseNames,omitempty" tf:"attached_database_names,omitempty"` + + // Specifies the name of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // The resource id of the cluster where the databases you would like to attach reside. Changing this forces a new resource to be created. + ClusterResourceID *string `json:"clusterResourceId,omitempty" tf:"cluster_resource_id,omitempty"` + + // The name of the database which you would like to attach, use * if you want to follow all current and future databases. Changing this forces a new resource to be created. + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // The default principals modification kind. Valid values are: None (default), Replace and Union. Defaults to None. + DefaultPrincipalModificationKind *string `json:"defaultPrincipalModificationKind,omitempty" tf:"default_principal_modification_kind,omitempty"` + + // The Kusto Attached Database Configuration ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the location of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Kusto Attached Database Configuration to create. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the resource group of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A sharing block as defined below. + Sharing *SharingObservation `json:"sharing,omitempty" tf:"sharing,omitempty"` +} + +type AttachedDatabaseConfigurationParameters struct { + + // Specifies the name of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster + // +kubebuilder:validation:Optional + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Reference to a Cluster in kusto to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameRef *v1.Reference `json:"clusterNameRef,omitempty" tf:"-"` + + // Selector for a Cluster in kusto to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameSelector *v1.Selector `json:"clusterNameSelector,omitempty" tf:"-"` + + // The resource id of the cluster where the databases you would like to attach reside. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta2.Cluster + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ClusterResourceID *string `json:"clusterResourceId,omitempty" tf:"cluster_resource_id,omitempty"` + + // Reference to a Cluster in kusto to populate clusterResourceId. + // +kubebuilder:validation:Optional + ClusterResourceIDRef *v1.Reference `json:"clusterResourceIdRef,omitempty" tf:"-"` + + // Selector for a Cluster in kusto to populate clusterResourceId. + // +kubebuilder:validation:Optional + ClusterResourceIDSelector *v1.Selector `json:"clusterResourceIdSelector,omitempty" tf:"-"` + + // The name of the database which you would like to attach, use * if you want to follow all current and future databases. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/kusto/v1beta1.Database + // +kubebuilder:validation:Optional + DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` + + // Reference to a Database in kusto to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameRef *v1.Reference `json:"databaseNameRef,omitempty" tf:"-"` + + // Selector for a Database in kusto to populate databaseName. + // +kubebuilder:validation:Optional + DatabaseNameSelector *v1.Selector `json:"databaseNameSelector,omitempty" tf:"-"` + + // The default principals modification kind. Valid values are: None (default), Replace and Union. Defaults to None. + // +kubebuilder:validation:Optional + DefaultPrincipalModificationKind *string `json:"defaultPrincipalModificationKind,omitempty" tf:"default_principal_modification_kind,omitempty"` + + // Specifies the location of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Kusto Attached Database Configuration to create. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the resource group of the Kusto Cluster for which the configuration will be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sharing block as defined below. + // +kubebuilder:validation:Optional + Sharing *SharingParameters `json:"sharing,omitempty" tf:"sharing,omitempty"` +} + +type SharingInitParameters struct { + + // List of external tables exclude from the follower database. + // +listType=set + ExternalTablesToExclude []*string `json:"externalTablesToExclude,omitempty" tf:"external_tables_to_exclude,omitempty"` + + // List of external tables to include in the follower database. + // +listType=set + ExternalTablesToInclude []*string `json:"externalTablesToInclude,omitempty" tf:"external_tables_to_include,omitempty"` + + // List of materialized views exclude from the follower database. + // +listType=set + MaterializedViewsToExclude []*string `json:"materializedViewsToExclude,omitempty" tf:"materialized_views_to_exclude,omitempty"` + + // List of materialized views to include in the follower database. + // +listType=set + MaterializedViewsToInclude []*string `json:"materializedViewsToInclude,omitempty" tf:"materialized_views_to_include,omitempty"` + + // List of tables to exclude from the follower database. + // +listType=set + TablesToExclude []*string `json:"tablesToExclude,omitempty" tf:"tables_to_exclude,omitempty"` + + // List of tables to include in the follower database. + // +listType=set + TablesToInclude []*string `json:"tablesToInclude,omitempty" tf:"tables_to_include,omitempty"` +} + +type SharingObservation struct { + + // List of external tables exclude from the follower database. + // +listType=set + ExternalTablesToExclude []*string `json:"externalTablesToExclude,omitempty" tf:"external_tables_to_exclude,omitempty"` + + // List of external tables to include in the follower database. + // +listType=set + ExternalTablesToInclude []*string `json:"externalTablesToInclude,omitempty" tf:"external_tables_to_include,omitempty"` + + // List of materialized views exclude from the follower database. + // +listType=set + MaterializedViewsToExclude []*string `json:"materializedViewsToExclude,omitempty" tf:"materialized_views_to_exclude,omitempty"` + + // List of materialized views to include in the follower database. + // +listType=set + MaterializedViewsToInclude []*string `json:"materializedViewsToInclude,omitempty" tf:"materialized_views_to_include,omitempty"` + + // List of tables to exclude from the follower database. + // +listType=set + TablesToExclude []*string `json:"tablesToExclude,omitempty" tf:"tables_to_exclude,omitempty"` + + // List of tables to include in the follower database. + // +listType=set + TablesToInclude []*string `json:"tablesToInclude,omitempty" tf:"tables_to_include,omitempty"` +} + +type SharingParameters struct { + + // List of external tables exclude from the follower database. + // +kubebuilder:validation:Optional + // +listType=set + ExternalTablesToExclude []*string `json:"externalTablesToExclude,omitempty" tf:"external_tables_to_exclude,omitempty"` + + // List of external tables to include in the follower database. + // +kubebuilder:validation:Optional + // +listType=set + ExternalTablesToInclude []*string `json:"externalTablesToInclude,omitempty" tf:"external_tables_to_include,omitempty"` + + // List of materialized views exclude from the follower database. + // +kubebuilder:validation:Optional + // +listType=set + MaterializedViewsToExclude []*string `json:"materializedViewsToExclude,omitempty" tf:"materialized_views_to_exclude,omitempty"` + + // List of materialized views to include in the follower database. + // +kubebuilder:validation:Optional + // +listType=set + MaterializedViewsToInclude []*string `json:"materializedViewsToInclude,omitempty" tf:"materialized_views_to_include,omitempty"` + + // List of tables to exclude from the follower database. + // +kubebuilder:validation:Optional + // +listType=set + TablesToExclude []*string `json:"tablesToExclude,omitempty" tf:"tables_to_exclude,omitempty"` + + // List of tables to include in the follower database. + // +kubebuilder:validation:Optional + // +listType=set + TablesToInclude []*string `json:"tablesToInclude,omitempty" tf:"tables_to_include,omitempty"` +} + +// AttachedDatabaseConfigurationSpec defines the desired state of AttachedDatabaseConfiguration +type AttachedDatabaseConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AttachedDatabaseConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AttachedDatabaseConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// AttachedDatabaseConfigurationStatus defines the observed state of AttachedDatabaseConfiguration. +type AttachedDatabaseConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AttachedDatabaseConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AttachedDatabaseConfiguration is the Schema for the AttachedDatabaseConfigurations API. Manages Kusto / Data Explorer Attached Database Configuration +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type AttachedDatabaseConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec AttachedDatabaseConfigurationSpec `json:"spec"` + Status AttachedDatabaseConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AttachedDatabaseConfigurationList contains a list of AttachedDatabaseConfigurations +type AttachedDatabaseConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AttachedDatabaseConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + AttachedDatabaseConfiguration_Kind = "AttachedDatabaseConfiguration" + AttachedDatabaseConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AttachedDatabaseConfiguration_Kind}.String() + AttachedDatabaseConfiguration_KindAPIVersion = AttachedDatabaseConfiguration_Kind + "." + CRDGroupVersion.String() + AttachedDatabaseConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(AttachedDatabaseConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&AttachedDatabaseConfiguration{}, &AttachedDatabaseConfigurationList{}) +} diff --git a/apis/kusto/v1beta2/zz_cluster_terraformed.go b/apis/kusto/v1beta2/zz_cluster_terraformed.go new file mode 100755 index 000000000..c785847af --- /dev/null +++ b/apis/kusto/v1beta2/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "azurerm_kusto_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/kusto/v1beta2/zz_cluster_types.go b/apis/kusto/v1beta2/zz_cluster_types.go new file mode 100755 index 000000000..12e312cd6 --- /dev/null +++ b/apis/kusto/v1beta2/zz_cluster_types.go @@ -0,0 +1,471 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClusterInitParameters struct { + + // List of allowed FQDNs(Fully Qualified Domain Name) for egress from Cluster. + AllowedFqdns []*string `json:"allowedFqdns,omitempty" tf:"allowed_fqdns,omitempty"` + + // The list of ips in the format of CIDR allowed to connect to the cluster. + AllowedIPRanges []*string `json:"allowedIpRanges,omitempty" tf:"allowed_ip_ranges,omitempty"` + + // Specifies if the cluster could be automatically stopped (due to lack of data or no activity for many days). Defaults to true. + AutoStopEnabled *bool `json:"autoStopEnabled,omitempty" tf:"auto_stop_enabled,omitempty"` + + // Specifies if the cluster's disks are encrypted. + DiskEncryptionEnabled *bool `json:"diskEncryptionEnabled,omitempty" tf:"disk_encryption_enabled,omitempty"` + + // Is the cluster's double encryption enabled? Changing this forces a new resource to be created. + DoubleEncryptionEnabled *bool `json:"doubleEncryptionEnabled,omitempty" tf:"double_encryption_enabled,omitempty"` + + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An list of language_extensions to enable. Valid values are: PYTHON, PYTHON_3.10.8 and R. PYTHON is used to specify Python 3.6.5 image and PYTHON_3.10.8 is used to specify Python 3.10.8 image. Note that PYTHON_3.10.8 is only available in skus which support nested virtualization. + // +listType=set + LanguageExtensions []*string `json:"languageExtensions,omitempty" tf:"language_extensions,omitempty"` + + // The location where the Kusto Cluster should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An optimized_auto_scale block as defined below. + OptimizedAutoScale *OptimizedAutoScaleInitParameters `json:"optimizedAutoScale,omitempty" tf:"optimized_auto_scale,omitempty"` + + // Whether to restrict outbound network access. Value is optional but if passed in, must be true or false, default is false. + OutboundNetworkAccessRestricted *bool `json:"outboundNetworkAccessRestricted,omitempty" tf:"outbound_network_access_restricted,omitempty"` + + // Indicates what public IP type to create - IPv4 (default), or DualStack (both IPv4 and IPv6). Defaults to IPv4. + PublicIPType *string `json:"publicIpType,omitempty" tf:"public_ip_type,omitempty"` + + // Is the public network access enabled? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies if the purge operations are enabled. + PurgeEnabled *bool `json:"purgeEnabled,omitempty" tf:"purge_enabled,omitempty"` + + // A sku block as defined below. + Sku *SkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies if the streaming ingest is enabled. + StreamingIngestionEnabled *bool `json:"streamingIngestionEnabled,omitempty" tf:"streaming_ingestion_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a list of tenant IDs that are trusted by the cluster. Default setting trusts all other tenants. Use trusted_external_tenants = ["*"] to explicitly allow all other tenants, trusted_external_tenants = ["MyTenantOnly"] for only your tenant or trusted_external_tenants = ["", ""] to allow specific other tenants. + TrustedExternalTenants []*string `json:"trustedExternalTenants,omitempty" tf:"trusted_external_tenants,omitempty"` + + // A virtual_network_configuration block as defined below. Changing this forces a new resource to be created. + VirtualNetworkConfiguration *VirtualNetworkConfigurationInitParameters `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // Specifies a list of Availability Zones in which this Kusto Cluster should be located. Changing this forces a new Kusto Cluster to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type ClusterObservation struct { + + // List of allowed FQDNs(Fully Qualified Domain Name) for egress from Cluster. + AllowedFqdns []*string `json:"allowedFqdns,omitempty" tf:"allowed_fqdns,omitempty"` + + // The list of ips in the format of CIDR allowed to connect to the cluster. + AllowedIPRanges []*string `json:"allowedIpRanges,omitempty" tf:"allowed_ip_ranges,omitempty"` + + // Specifies if the cluster could be automatically stopped (due to lack of data or no activity for many days). Defaults to true. + AutoStopEnabled *bool `json:"autoStopEnabled,omitempty" tf:"auto_stop_enabled,omitempty"` + + // The Kusto Cluster URI to be used for data ingestion. + DataIngestionURI *string `json:"dataIngestionUri,omitempty" tf:"data_ingestion_uri,omitempty"` + + // Specifies if the cluster's disks are encrypted. + DiskEncryptionEnabled *bool `json:"diskEncryptionEnabled,omitempty" tf:"disk_encryption_enabled,omitempty"` + + // Is the cluster's double encryption enabled? Changing this forces a new resource to be created. + DoubleEncryptionEnabled *bool `json:"doubleEncryptionEnabled,omitempty" tf:"double_encryption_enabled,omitempty"` + + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // The Kusto Cluster ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // An list of language_extensions to enable. Valid values are: PYTHON, PYTHON_3.10.8 and R. PYTHON is used to specify Python 3.6.5 image and PYTHON_3.10.8 is used to specify Python 3.10.8 image. Note that PYTHON_3.10.8 is only available in skus which support nested virtualization. + // +listType=set + LanguageExtensions []*string `json:"languageExtensions,omitempty" tf:"language_extensions,omitempty"` + + // The location where the Kusto Cluster should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An optimized_auto_scale block as defined below. + OptimizedAutoScale *OptimizedAutoScaleObservation `json:"optimizedAutoScale,omitempty" tf:"optimized_auto_scale,omitempty"` + + // Whether to restrict outbound network access. Value is optional but if passed in, must be true or false, default is false. + OutboundNetworkAccessRestricted *bool `json:"outboundNetworkAccessRestricted,omitempty" tf:"outbound_network_access_restricted,omitempty"` + + // Indicates what public IP type to create - IPv4 (default), or DualStack (both IPv4 and IPv6). Defaults to IPv4. + PublicIPType *string `json:"publicIpType,omitempty" tf:"public_ip_type,omitempty"` + + // Is the public network access enabled? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies if the purge operations are enabled. + PurgeEnabled *bool `json:"purgeEnabled,omitempty" tf:"purge_enabled,omitempty"` + + // Specifies the Resource Group where the Kusto Cluster should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A sku block as defined below. + Sku *SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies if the streaming ingest is enabled. + StreamingIngestionEnabled *bool `json:"streamingIngestionEnabled,omitempty" tf:"streaming_ingestion_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a list of tenant IDs that are trusted by the cluster. Default setting trusts all other tenants. Use trusted_external_tenants = ["*"] to explicitly allow all other tenants, trusted_external_tenants = ["MyTenantOnly"] for only your tenant or trusted_external_tenants = ["", ""] to allow specific other tenants. + TrustedExternalTenants []*string `json:"trustedExternalTenants,omitempty" tf:"trusted_external_tenants,omitempty"` + + // The FQDN of the Azure Kusto Cluster. + URI *string `json:"uri,omitempty" tf:"uri,omitempty"` + + // A virtual_network_configuration block as defined below. Changing this forces a new resource to be created. + VirtualNetworkConfiguration *VirtualNetworkConfigurationObservation `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // Specifies a list of Availability Zones in which this Kusto Cluster should be located. Changing this forces a new Kusto Cluster to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type ClusterParameters struct { + + // List of allowed FQDNs(Fully Qualified Domain Name) for egress from Cluster. + // +kubebuilder:validation:Optional + AllowedFqdns []*string `json:"allowedFqdns,omitempty" tf:"allowed_fqdns,omitempty"` + + // The list of ips in the format of CIDR allowed to connect to the cluster. + // +kubebuilder:validation:Optional + AllowedIPRanges []*string `json:"allowedIpRanges,omitempty" tf:"allowed_ip_ranges,omitempty"` + + // Specifies if the cluster could be automatically stopped (due to lack of data or no activity for many days). Defaults to true. + // +kubebuilder:validation:Optional + AutoStopEnabled *bool `json:"autoStopEnabled,omitempty" tf:"auto_stop_enabled,omitempty"` + + // Specifies if the cluster's disks are encrypted. + // +kubebuilder:validation:Optional + DiskEncryptionEnabled *bool `json:"diskEncryptionEnabled,omitempty" tf:"disk_encryption_enabled,omitempty"` + + // Is the cluster's double encryption enabled? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DoubleEncryptionEnabled *bool `json:"doubleEncryptionEnabled,omitempty" tf:"double_encryption_enabled,omitempty"` + + // +kubebuilder:validation:Optional + Engine *string `json:"engine,omitempty" tf:"engine,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An list of language_extensions to enable. Valid values are: PYTHON, PYTHON_3.10.8 and R. PYTHON is used to specify Python 3.6.5 image and PYTHON_3.10.8 is used to specify Python 3.10.8 image. Note that PYTHON_3.10.8 is only available in skus which support nested virtualization. + // +kubebuilder:validation:Optional + // +listType=set + LanguageExtensions []*string `json:"languageExtensions,omitempty" tf:"language_extensions,omitempty"` + + // The location where the Kusto Cluster should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An optimized_auto_scale block as defined below. + // +kubebuilder:validation:Optional + OptimizedAutoScale *OptimizedAutoScaleParameters `json:"optimizedAutoScale,omitempty" tf:"optimized_auto_scale,omitempty"` + + // Whether to restrict outbound network access. Value is optional but if passed in, must be true or false, default is false. + // +kubebuilder:validation:Optional + OutboundNetworkAccessRestricted *bool `json:"outboundNetworkAccessRestricted,omitempty" tf:"outbound_network_access_restricted,omitempty"` + + // Indicates what public IP type to create - IPv4 (default), or DualStack (both IPv4 and IPv6). Defaults to IPv4. + // +kubebuilder:validation:Optional + PublicIPType *string `json:"publicIpType,omitempty" tf:"public_ip_type,omitempty"` + + // Is the public network access enabled? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies if the purge operations are enabled. + // +kubebuilder:validation:Optional + PurgeEnabled *bool `json:"purgeEnabled,omitempty" tf:"purge_enabled,omitempty"` + + // Specifies the Resource Group where the Kusto Cluster should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sku block as defined below. + // +kubebuilder:validation:Optional + Sku *SkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies if the streaming ingest is enabled. + // +kubebuilder:validation:Optional + StreamingIngestionEnabled *bool `json:"streamingIngestionEnabled,omitempty" tf:"streaming_ingestion_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a list of tenant IDs that are trusted by the cluster. Default setting trusts all other tenants. Use trusted_external_tenants = ["*"] to explicitly allow all other tenants, trusted_external_tenants = ["MyTenantOnly"] for only your tenant or trusted_external_tenants = ["", ""] to allow specific other tenants. + // +kubebuilder:validation:Optional + TrustedExternalTenants []*string `json:"trustedExternalTenants,omitempty" tf:"trusted_external_tenants,omitempty"` + + // A virtual_network_configuration block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VirtualNetworkConfiguration *VirtualNetworkConfigurationParameters `json:"virtualNetworkConfiguration,omitempty" tf:"virtual_network_configuration,omitempty"` + + // Specifies a list of Availability Zones in which this Kusto Cluster should be located. Changing this forces a new Kusto Cluster to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kusto Cluster. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that is configured on this Kusto Cluster. Possible values are: SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kusto Cluster. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this System Assigned Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this System Assigned Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that is configured on this Kusto Cluster. Possible values are: SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kusto Cluster. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that is configured on this Kusto Cluster. Possible values are: SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type OptimizedAutoScaleInitParameters struct { + + // The maximum number of allowed instances. Must between 0 and 1000. + MaximumInstances *float64 `json:"maximumInstances,omitempty" tf:"maximum_instances,omitempty"` + + // The minimum number of allowed instances. Must between 0 and 1000. + MinimumInstances *float64 `json:"minimumInstances,omitempty" tf:"minimum_instances,omitempty"` +} + +type OptimizedAutoScaleObservation struct { + + // The maximum number of allowed instances. Must between 0 and 1000. + MaximumInstances *float64 `json:"maximumInstances,omitempty" tf:"maximum_instances,omitempty"` + + // The minimum number of allowed instances. Must between 0 and 1000. + MinimumInstances *float64 `json:"minimumInstances,omitempty" tf:"minimum_instances,omitempty"` +} + +type OptimizedAutoScaleParameters struct { + + // The maximum number of allowed instances. Must between 0 and 1000. + // +kubebuilder:validation:Optional + MaximumInstances *float64 `json:"maximumInstances" tf:"maximum_instances,omitempty"` + + // The minimum number of allowed instances. Must between 0 and 1000. + // +kubebuilder:validation:Optional + MinimumInstances *float64 `json:"minimumInstances" tf:"minimum_instances,omitempty"` +} + +type SkuInitParameters struct { + + // Specifies the node count for the cluster. Boundaries depend on the SKU name. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the SKU. Possible values are Dev(No SLA)_Standard_D11_v2, Dev(No SLA)_Standard_E2a_v4, Standard_D14_v2, Standard_D11_v2, Standard_D16d_v5, Standard_D13_v2, Standard_D12_v2, Standard_DS14_v2+4TB_PS, Standard_DS14_v2+3TB_PS, Standard_DS13_v2+1TB_PS, Standard_DS13_v2+2TB_PS, Standard_D32d_v5, Standard_D32d_v4, Standard_EC8ads_v5, Standard_EC8as_v5+1TB_PS, Standard_EC8as_v5+2TB_PS, Standard_EC16ads_v5, Standard_EC16as_v5+4TB_PS, Standard_EC16as_v5+3TB_PS, Standard_E80ids_v4, Standard_E8a_v4, Standard_E8ads_v5, Standard_E8as_v5+1TB_PS, Standard_E8as_v5+2TB_PS, Standard_E8as_v4+1TB_PS, Standard_E8as_v4+2TB_PS, Standard_E8d_v5, Standard_E8d_v4, Standard_E8s_v5+1TB_PS, Standard_E8s_v5+2TB_PS, Standard_E8s_v4+1TB_PS, Standard_E8s_v4+2TB_PS, Standard_E4a_v4, Standard_E4ads_v5, Standard_E4d_v5, Standard_E4d_v4, Standard_E16a_v4, Standard_E16ads_v5, Standard_E16as_v5+4TB_PS, Standard_E16as_v5+3TB_PS, Standard_E16as_v4+4TB_PS, Standard_E16as_v4+3TB_PS, Standard_E16d_v5, Standard_E16d_v4, Standard_E16s_v5+4TB_PS, Standard_E16s_v5+3TB_PS, Standard_E16s_v4+4TB_PS, Standard_E16s_v4+3TB_PS, Standard_E64i_v3, Standard_E2a_v4, Standard_E2ads_v5, Standard_E2d_v5, Standard_E2d_v4, Standard_L8as_v3, Standard_L8s, Standard_L8s_v3, Standard_L8s_v2, Standard_L4s, Standard_L16as_v3, Standard_L16s, Standard_L16s_v3, Standard_L16s_v2, Standard_L32as_v3 and Standard_L32s_v3. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuObservation struct { + + // Specifies the node count for the cluster. Boundaries depend on the SKU name. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the SKU. Possible values are Dev(No SLA)_Standard_D11_v2, Dev(No SLA)_Standard_E2a_v4, Standard_D14_v2, Standard_D11_v2, Standard_D16d_v5, Standard_D13_v2, Standard_D12_v2, Standard_DS14_v2+4TB_PS, Standard_DS14_v2+3TB_PS, Standard_DS13_v2+1TB_PS, Standard_DS13_v2+2TB_PS, Standard_D32d_v5, Standard_D32d_v4, Standard_EC8ads_v5, Standard_EC8as_v5+1TB_PS, Standard_EC8as_v5+2TB_PS, Standard_EC16ads_v5, Standard_EC16as_v5+4TB_PS, Standard_EC16as_v5+3TB_PS, Standard_E80ids_v4, Standard_E8a_v4, Standard_E8ads_v5, Standard_E8as_v5+1TB_PS, Standard_E8as_v5+2TB_PS, Standard_E8as_v4+1TB_PS, Standard_E8as_v4+2TB_PS, Standard_E8d_v5, Standard_E8d_v4, Standard_E8s_v5+1TB_PS, Standard_E8s_v5+2TB_PS, Standard_E8s_v4+1TB_PS, Standard_E8s_v4+2TB_PS, Standard_E4a_v4, Standard_E4ads_v5, Standard_E4d_v5, Standard_E4d_v4, Standard_E16a_v4, Standard_E16ads_v5, Standard_E16as_v5+4TB_PS, Standard_E16as_v5+3TB_PS, Standard_E16as_v4+4TB_PS, Standard_E16as_v4+3TB_PS, Standard_E16d_v5, Standard_E16d_v4, Standard_E16s_v5+4TB_PS, Standard_E16s_v5+3TB_PS, Standard_E16s_v4+4TB_PS, Standard_E16s_v4+3TB_PS, Standard_E64i_v3, Standard_E2a_v4, Standard_E2ads_v5, Standard_E2d_v5, Standard_E2d_v4, Standard_L8as_v3, Standard_L8s, Standard_L8s_v3, Standard_L8s_v2, Standard_L4s, Standard_L16as_v3, Standard_L16s, Standard_L16s_v3, Standard_L16s_v2, Standard_L32as_v3 and Standard_L32s_v3. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuParameters struct { + + // Specifies the node count for the cluster. Boundaries depend on the SKU name. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the SKU. Possible values are Dev(No SLA)_Standard_D11_v2, Dev(No SLA)_Standard_E2a_v4, Standard_D14_v2, Standard_D11_v2, Standard_D16d_v5, Standard_D13_v2, Standard_D12_v2, Standard_DS14_v2+4TB_PS, Standard_DS14_v2+3TB_PS, Standard_DS13_v2+1TB_PS, Standard_DS13_v2+2TB_PS, Standard_D32d_v5, Standard_D32d_v4, Standard_EC8ads_v5, Standard_EC8as_v5+1TB_PS, Standard_EC8as_v5+2TB_PS, Standard_EC16ads_v5, Standard_EC16as_v5+4TB_PS, Standard_EC16as_v5+3TB_PS, Standard_E80ids_v4, Standard_E8a_v4, Standard_E8ads_v5, Standard_E8as_v5+1TB_PS, Standard_E8as_v5+2TB_PS, Standard_E8as_v4+1TB_PS, Standard_E8as_v4+2TB_PS, Standard_E8d_v5, Standard_E8d_v4, Standard_E8s_v5+1TB_PS, Standard_E8s_v5+2TB_PS, Standard_E8s_v4+1TB_PS, Standard_E8s_v4+2TB_PS, Standard_E4a_v4, Standard_E4ads_v5, Standard_E4d_v5, Standard_E4d_v4, Standard_E16a_v4, Standard_E16ads_v5, Standard_E16as_v5+4TB_PS, Standard_E16as_v5+3TB_PS, Standard_E16as_v4+4TB_PS, Standard_E16as_v4+3TB_PS, Standard_E16d_v5, Standard_E16d_v4, Standard_E16s_v5+4TB_PS, Standard_E16s_v5+3TB_PS, Standard_E16s_v4+4TB_PS, Standard_E16s_v4+3TB_PS, Standard_E64i_v3, Standard_E2a_v4, Standard_E2ads_v5, Standard_E2d_v5, Standard_E2d_v4, Standard_L8as_v3, Standard_L8s, Standard_L8s_v3, Standard_L8s_v2, Standard_L4s, Standard_L16as_v3, Standard_L16s, Standard_L16s_v3, Standard_L16s_v2, Standard_L32as_v3 and Standard_L32s_v3. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type VirtualNetworkConfigurationInitParameters struct { + + // Data management's service public IP address resource id. + DataManagementPublicIPID *string `json:"dataManagementPublicIpId,omitempty" tf:"data_management_public_ip_id,omitempty"` + + // Engine service's public IP address resource id. + EnginePublicIPID *string `json:"enginePublicIpId,omitempty" tf:"engine_public_ip_id,omitempty"` + + // The subnet resource id. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type VirtualNetworkConfigurationObservation struct { + + // Data management's service public IP address resource id. + DataManagementPublicIPID *string `json:"dataManagementPublicIpId,omitempty" tf:"data_management_public_ip_id,omitempty"` + + // Engine service's public IP address resource id. + EnginePublicIPID *string `json:"enginePublicIpId,omitempty" tf:"engine_public_ip_id,omitempty"` + + // The subnet resource id. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type VirtualNetworkConfigurationParameters struct { + + // Data management's service public IP address resource id. + // +kubebuilder:validation:Optional + DataManagementPublicIPID *string `json:"dataManagementPublicIpId" tf:"data_management_public_ip_id,omitempty"` + + // Engine service's public IP address resource id. + // +kubebuilder:validation:Optional + EnginePublicIPID *string `json:"enginePublicIpId" tf:"engine_public_ip_id,omitempty"` + + // The subnet resource id. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. Manages Kusto (also known as Azure Data Explorer) Cluster +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/kusto/v1beta2/zz_generated.conversion_hubs.go b/apis/kusto/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..0b3081f4e --- /dev/null +++ b/apis/kusto/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *AttachedDatabaseConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Cluster) Hub() {} diff --git a/apis/kusto/v1beta2/zz_generated.deepcopy.go b/apis/kusto/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..1d469a40e --- /dev/null +++ b/apis/kusto/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1572 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedDatabaseConfiguration) DeepCopyInto(out *AttachedDatabaseConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedDatabaseConfiguration. +func (in *AttachedDatabaseConfiguration) DeepCopy() *AttachedDatabaseConfiguration { + if in == nil { + return nil + } + out := new(AttachedDatabaseConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AttachedDatabaseConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedDatabaseConfigurationInitParameters) DeepCopyInto(out *AttachedDatabaseConfigurationInitParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterNameRef != nil { + in, out := &in.ClusterNameRef, &out.ClusterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterNameSelector != nil { + in, out := &in.ClusterNameSelector, &out.ClusterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ClusterResourceID != nil { + in, out := &in.ClusterResourceID, &out.ClusterResourceID + *out = new(string) + **out = **in + } + if in.ClusterResourceIDRef != nil { + in, out := &in.ClusterResourceIDRef, &out.ClusterResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterResourceIDSelector != nil { + in, out := &in.ClusterResourceIDSelector, &out.ClusterResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultPrincipalModificationKind != nil { + in, out := &in.DefaultPrincipalModificationKind, &out.DefaultPrincipalModificationKind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sharing != nil { + in, out := &in.Sharing, &out.Sharing + *out = new(SharingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedDatabaseConfigurationInitParameters. +func (in *AttachedDatabaseConfigurationInitParameters) DeepCopy() *AttachedDatabaseConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AttachedDatabaseConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedDatabaseConfigurationList) DeepCopyInto(out *AttachedDatabaseConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AttachedDatabaseConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedDatabaseConfigurationList. +func (in *AttachedDatabaseConfigurationList) DeepCopy() *AttachedDatabaseConfigurationList { + if in == nil { + return nil + } + out := new(AttachedDatabaseConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AttachedDatabaseConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedDatabaseConfigurationObservation) DeepCopyInto(out *AttachedDatabaseConfigurationObservation) { + *out = *in + if in.AttachedDatabaseNames != nil { + in, out := &in.AttachedDatabaseNames, &out.AttachedDatabaseNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterResourceID != nil { + in, out := &in.ClusterResourceID, &out.ClusterResourceID + *out = new(string) + **out = **in + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DefaultPrincipalModificationKind != nil { + in, out := &in.DefaultPrincipalModificationKind, &out.DefaultPrincipalModificationKind + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sharing != nil { + in, out := &in.Sharing, &out.Sharing + *out = new(SharingObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedDatabaseConfigurationObservation. +func (in *AttachedDatabaseConfigurationObservation) DeepCopy() *AttachedDatabaseConfigurationObservation { + if in == nil { + return nil + } + out := new(AttachedDatabaseConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedDatabaseConfigurationParameters) DeepCopyInto(out *AttachedDatabaseConfigurationParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterNameRef != nil { + in, out := &in.ClusterNameRef, &out.ClusterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterNameSelector != nil { + in, out := &in.ClusterNameSelector, &out.ClusterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ClusterResourceID != nil { + in, out := &in.ClusterResourceID, &out.ClusterResourceID + *out = new(string) + **out = **in + } + if in.ClusterResourceIDRef != nil { + in, out := &in.ClusterResourceIDRef, &out.ClusterResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterResourceIDSelector != nil { + in, out := &in.ClusterResourceIDSelector, &out.ClusterResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DatabaseName != nil { + in, out := &in.DatabaseName, &out.DatabaseName + *out = new(string) + **out = **in + } + if in.DatabaseNameRef != nil { + in, out := &in.DatabaseNameRef, &out.DatabaseNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DatabaseNameSelector != nil { + in, out := &in.DatabaseNameSelector, &out.DatabaseNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DefaultPrincipalModificationKind != nil { + in, out := &in.DefaultPrincipalModificationKind, &out.DefaultPrincipalModificationKind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sharing != nil { + in, out := &in.Sharing, &out.Sharing + *out = new(SharingParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedDatabaseConfigurationParameters. +func (in *AttachedDatabaseConfigurationParameters) DeepCopy() *AttachedDatabaseConfigurationParameters { + if in == nil { + return nil + } + out := new(AttachedDatabaseConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedDatabaseConfigurationSpec) DeepCopyInto(out *AttachedDatabaseConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedDatabaseConfigurationSpec. +func (in *AttachedDatabaseConfigurationSpec) DeepCopy() *AttachedDatabaseConfigurationSpec { + if in == nil { + return nil + } + out := new(AttachedDatabaseConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedDatabaseConfigurationStatus) DeepCopyInto(out *AttachedDatabaseConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedDatabaseConfigurationStatus. +func (in *AttachedDatabaseConfigurationStatus) DeepCopy() *AttachedDatabaseConfigurationStatus { + if in == nil { + return nil + } + out := new(AttachedDatabaseConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.AllowedFqdns != nil { + in, out := &in.AllowedFqdns, &out.AllowedFqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIPRanges != nil { + in, out := &in.AllowedIPRanges, &out.AllowedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutoStopEnabled != nil { + in, out := &in.AutoStopEnabled, &out.AutoStopEnabled + *out = new(bool) + **out = **in + } + if in.DiskEncryptionEnabled != nil { + in, out := &in.DiskEncryptionEnabled, &out.DiskEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.DoubleEncryptionEnabled != nil { + in, out := &in.DoubleEncryptionEnabled, &out.DoubleEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LanguageExtensions != nil { + in, out := &in.LanguageExtensions, &out.LanguageExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OptimizedAutoScale != nil { + in, out := &in.OptimizedAutoScale, &out.OptimizedAutoScale + *out = new(OptimizedAutoScaleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutboundNetworkAccessRestricted != nil { + in, out := &in.OutboundNetworkAccessRestricted, &out.OutboundNetworkAccessRestricted + *out = new(bool) + **out = **in + } + if in.PublicIPType != nil { + in, out := &in.PublicIPType, &out.PublicIPType + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeEnabled != nil { + in, out := &in.PurgeEnabled, &out.PurgeEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamingIngestionEnabled != nil { + in, out := &in.StreamingIngestionEnabled, &out.StreamingIngestionEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedExternalTenants != nil { + in, out := &in.TrustedExternalTenants, &out.TrustedExternalTenants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(VirtualNetworkConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.AllowedFqdns != nil { + in, out := &in.AllowedFqdns, &out.AllowedFqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIPRanges != nil { + in, out := &in.AllowedIPRanges, &out.AllowedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutoStopEnabled != nil { + in, out := &in.AutoStopEnabled, &out.AutoStopEnabled + *out = new(bool) + **out = **in + } + if in.DataIngestionURI != nil { + in, out := &in.DataIngestionURI, &out.DataIngestionURI + *out = new(string) + **out = **in + } + if in.DiskEncryptionEnabled != nil { + in, out := &in.DiskEncryptionEnabled, &out.DiskEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.DoubleEncryptionEnabled != nil { + in, out := &in.DoubleEncryptionEnabled, &out.DoubleEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LanguageExtensions != nil { + in, out := &in.LanguageExtensions, &out.LanguageExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OptimizedAutoScale != nil { + in, out := &in.OptimizedAutoScale, &out.OptimizedAutoScale + *out = new(OptimizedAutoScaleObservation) + (*in).DeepCopyInto(*out) + } + if in.OutboundNetworkAccessRestricted != nil { + in, out := &in.OutboundNetworkAccessRestricted, &out.OutboundNetworkAccessRestricted + *out = new(bool) + **out = **in + } + if in.PublicIPType != nil { + in, out := &in.PublicIPType, &out.PublicIPType + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeEnabled != nil { + in, out := &in.PurgeEnabled, &out.PurgeEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuObservation) + (*in).DeepCopyInto(*out) + } + if in.StreamingIngestionEnabled != nil { + in, out := &in.StreamingIngestionEnabled, &out.StreamingIngestionEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedExternalTenants != nil { + in, out := &in.TrustedExternalTenants, &out.TrustedExternalTenants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(string) + **out = **in + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(VirtualNetworkConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.AllowedFqdns != nil { + in, out := &in.AllowedFqdns, &out.AllowedFqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIPRanges != nil { + in, out := &in.AllowedIPRanges, &out.AllowedIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AutoStopEnabled != nil { + in, out := &in.AutoStopEnabled, &out.AutoStopEnabled + *out = new(bool) + **out = **in + } + if in.DiskEncryptionEnabled != nil { + in, out := &in.DiskEncryptionEnabled, &out.DiskEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.DoubleEncryptionEnabled != nil { + in, out := &in.DoubleEncryptionEnabled, &out.DoubleEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LanguageExtensions != nil { + in, out := &in.LanguageExtensions, &out.LanguageExtensions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OptimizedAutoScale != nil { + in, out := &in.OptimizedAutoScale, &out.OptimizedAutoScale + *out = new(OptimizedAutoScaleParameters) + (*in).DeepCopyInto(*out) + } + if in.OutboundNetworkAccessRestricted != nil { + in, out := &in.OutboundNetworkAccessRestricted, &out.OutboundNetworkAccessRestricted + *out = new(bool) + **out = **in + } + if in.PublicIPType != nil { + in, out := &in.PublicIPType, &out.PublicIPType + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurgeEnabled != nil { + in, out := &in.PurgeEnabled, &out.PurgeEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamingIngestionEnabled != nil { + in, out := &in.StreamingIngestionEnabled, &out.StreamingIngestionEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedExternalTenants != nil { + in, out := &in.TrustedExternalTenants, &out.TrustedExternalTenants + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkConfiguration != nil { + in, out := &in.VirtualNetworkConfiguration, &out.VirtualNetworkConfiguration + *out = new(VirtualNetworkConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptimizedAutoScaleInitParameters) DeepCopyInto(out *OptimizedAutoScaleInitParameters) { + *out = *in + if in.MaximumInstances != nil { + in, out := &in.MaximumInstances, &out.MaximumInstances + *out = new(float64) + **out = **in + } + if in.MinimumInstances != nil { + in, out := &in.MinimumInstances, &out.MinimumInstances + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptimizedAutoScaleInitParameters. +func (in *OptimizedAutoScaleInitParameters) DeepCopy() *OptimizedAutoScaleInitParameters { + if in == nil { + return nil + } + out := new(OptimizedAutoScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptimizedAutoScaleObservation) DeepCopyInto(out *OptimizedAutoScaleObservation) { + *out = *in + if in.MaximumInstances != nil { + in, out := &in.MaximumInstances, &out.MaximumInstances + *out = new(float64) + **out = **in + } + if in.MinimumInstances != nil { + in, out := &in.MinimumInstances, &out.MinimumInstances + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptimizedAutoScaleObservation. +func (in *OptimizedAutoScaleObservation) DeepCopy() *OptimizedAutoScaleObservation { + if in == nil { + return nil + } + out := new(OptimizedAutoScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptimizedAutoScaleParameters) DeepCopyInto(out *OptimizedAutoScaleParameters) { + *out = *in + if in.MaximumInstances != nil { + in, out := &in.MaximumInstances, &out.MaximumInstances + *out = new(float64) + **out = **in + } + if in.MinimumInstances != nil { + in, out := &in.MinimumInstances, &out.MinimumInstances + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptimizedAutoScaleParameters. +func (in *OptimizedAutoScaleParameters) DeepCopy() *OptimizedAutoScaleParameters { + if in == nil { + return nil + } + out := new(OptimizedAutoScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingInitParameters) DeepCopyInto(out *SharingInitParameters) { + *out = *in + if in.ExternalTablesToExclude != nil { + in, out := &in.ExternalTablesToExclude, &out.ExternalTablesToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExternalTablesToInclude != nil { + in, out := &in.ExternalTablesToInclude, &out.ExternalTablesToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaterializedViewsToExclude != nil { + in, out := &in.MaterializedViewsToExclude, &out.MaterializedViewsToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaterializedViewsToInclude != nil { + in, out := &in.MaterializedViewsToInclude, &out.MaterializedViewsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TablesToExclude != nil { + in, out := &in.TablesToExclude, &out.TablesToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TablesToInclude != nil { + in, out := &in.TablesToInclude, &out.TablesToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingInitParameters. +func (in *SharingInitParameters) DeepCopy() *SharingInitParameters { + if in == nil { + return nil + } + out := new(SharingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingObservation) DeepCopyInto(out *SharingObservation) { + *out = *in + if in.ExternalTablesToExclude != nil { + in, out := &in.ExternalTablesToExclude, &out.ExternalTablesToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExternalTablesToInclude != nil { + in, out := &in.ExternalTablesToInclude, &out.ExternalTablesToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaterializedViewsToExclude != nil { + in, out := &in.MaterializedViewsToExclude, &out.MaterializedViewsToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaterializedViewsToInclude != nil { + in, out := &in.MaterializedViewsToInclude, &out.MaterializedViewsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TablesToExclude != nil { + in, out := &in.TablesToExclude, &out.TablesToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TablesToInclude != nil { + in, out := &in.TablesToInclude, &out.TablesToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingObservation. +func (in *SharingObservation) DeepCopy() *SharingObservation { + if in == nil { + return nil + } + out := new(SharingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharingParameters) DeepCopyInto(out *SharingParameters) { + *out = *in + if in.ExternalTablesToExclude != nil { + in, out := &in.ExternalTablesToExclude, &out.ExternalTablesToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExternalTablesToInclude != nil { + in, out := &in.ExternalTablesToInclude, &out.ExternalTablesToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaterializedViewsToExclude != nil { + in, out := &in.MaterializedViewsToExclude, &out.MaterializedViewsToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaterializedViewsToInclude != nil { + in, out := &in.MaterializedViewsToInclude, &out.MaterializedViewsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TablesToExclude != nil { + in, out := &in.TablesToExclude, &out.TablesToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TablesToInclude != nil { + in, out := &in.TablesToInclude, &out.TablesToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharingParameters. +func (in *SharingParameters) DeepCopy() *SharingParameters { + if in == nil { + return nil + } + out := new(SharingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkConfigurationInitParameters) DeepCopyInto(out *VirtualNetworkConfigurationInitParameters) { + *out = *in + if in.DataManagementPublicIPID != nil { + in, out := &in.DataManagementPublicIPID, &out.DataManagementPublicIPID + *out = new(string) + **out = **in + } + if in.EnginePublicIPID != nil { + in, out := &in.EnginePublicIPID, &out.EnginePublicIPID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkConfigurationInitParameters. +func (in *VirtualNetworkConfigurationInitParameters) DeepCopy() *VirtualNetworkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkConfigurationObservation) DeepCopyInto(out *VirtualNetworkConfigurationObservation) { + *out = *in + if in.DataManagementPublicIPID != nil { + in, out := &in.DataManagementPublicIPID, &out.DataManagementPublicIPID + *out = new(string) + **out = **in + } + if in.EnginePublicIPID != nil { + in, out := &in.EnginePublicIPID, &out.EnginePublicIPID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkConfigurationObservation. +func (in *VirtualNetworkConfigurationObservation) DeepCopy() *VirtualNetworkConfigurationObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkConfigurationParameters) DeepCopyInto(out *VirtualNetworkConfigurationParameters) { + *out = *in + if in.DataManagementPublicIPID != nil { + in, out := &in.DataManagementPublicIPID, &out.DataManagementPublicIPID + *out = new(string) + **out = **in + } + if in.EnginePublicIPID != nil { + in, out := &in.EnginePublicIPID, &out.EnginePublicIPID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkConfigurationParameters. +func (in *VirtualNetworkConfigurationParameters) DeepCopy() *VirtualNetworkConfigurationParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkConfigurationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/kusto/v1beta2/zz_generated.managed.go b/apis/kusto/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..2034b6289 --- /dev/null +++ b/apis/kusto/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AttachedDatabaseConfiguration. +func (mg *AttachedDatabaseConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/kusto/v1beta2/zz_generated.managedlist.go b/apis/kusto/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..0faa81b38 --- /dev/null +++ b/apis/kusto/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AttachedDatabaseConfigurationList. +func (l *AttachedDatabaseConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/kusto/v1beta2/zz_generated.resolvers.go b/apis/kusto/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..b18cee939 --- /dev/null +++ b/apis/kusto/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *AttachedDatabaseConfiguration) ResolveReferences( // ResolveReferences of this AttachedDatabaseConfiguration. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ClusterNameRef, + Selector: mg.Spec.ForProvider.ClusterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterName") + } + mg.Spec.ForProvider.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ClusterResourceIDRef, + Selector: mg.Spec.ForProvider.ClusterResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterResourceID") + } + mg.Spec.ForProvider.ClusterResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Database", "DatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.DatabaseNameRef, + Selector: mg.Spec.ForProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DatabaseName") + } + mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ClusterNameRef, + Selector: mg.Spec.InitProvider.ClusterNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterName") + } + mg.Spec.InitProvider.ClusterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta2", "Cluster", "ClusterList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ClusterResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ClusterResourceIDRef, + Selector: mg.Spec.InitProvider.ClusterResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ClusterResourceID") + } + mg.Spec.InitProvider.ClusterResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ClusterResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("kusto.azure.upbound.io", "v1beta1", "Database", "DatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DatabaseName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.DatabaseNameRef, + Selector: mg.Spec.InitProvider.DatabaseNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DatabaseName") + } + mg.Spec.InitProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DatabaseNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Cluster. +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.VirtualNetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetID") + } + mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkConfiguration.SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.VirtualNetworkConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetID") + } + mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkConfiguration.SubnetIDRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/kusto/v1beta2/zz_groupversion_info.go b/apis/kusto/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..193d6b063 --- /dev/null +++ b/apis/kusto/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=kusto.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "kusto.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/labservices/v1beta1/zz_generated.conversion_spokes.go b/apis/labservices/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..ef78de851 --- /dev/null +++ b/apis/labservices/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this LabServiceLab to the hub type. +func (tr *LabServiceLab) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LabServiceLab type. +func (tr *LabServiceLab) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LabServicePlan to the hub type. +func (tr *LabServicePlan) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LabServicePlan type. +func (tr *LabServicePlan) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/labservices/v1beta1/zz_generated.conversion_hubs.go b/apis/labservices/v1beta2/zz_generated.conversion_hubs.go similarity index 95% rename from apis/labservices/v1beta1/zz_generated.conversion_hubs.go rename to apis/labservices/v1beta2/zz_generated.conversion_hubs.go index c25f4ae37..f95f88efb 100755 --- a/apis/labservices/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/labservices/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *LabServiceLab) Hub() {} diff --git a/apis/labservices/v1beta2/zz_generated.deepcopy.go b/apis/labservices/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..ff60f289f --- /dev/null +++ b/apis/labservices/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1983 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminUserInitParameters) DeepCopyInto(out *AdminUserInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminUserInitParameters. +func (in *AdminUserInitParameters) DeepCopy() *AdminUserInitParameters { + if in == nil { + return nil + } + out := new(AdminUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminUserObservation) DeepCopyInto(out *AdminUserObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminUserObservation. +func (in *AdminUserObservation) DeepCopy() *AdminUserObservation { + if in == nil { + return nil + } + out := new(AdminUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminUserParameters) DeepCopyInto(out *AdminUserParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminUserParameters. +func (in *AdminUserParameters) DeepCopy() *AdminUserParameters { + if in == nil { + return nil + } + out := new(AdminUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoShutdownInitParameters) DeepCopyInto(out *AutoShutdownInitParameters) { + *out = *in + if in.DisconnectDelay != nil { + in, out := &in.DisconnectDelay, &out.DisconnectDelay + *out = new(string) + **out = **in + } + if in.IdleDelay != nil { + in, out := &in.IdleDelay, &out.IdleDelay + *out = new(string) + **out = **in + } + if in.NoConnectDelay != nil { + in, out := &in.NoConnectDelay, &out.NoConnectDelay + *out = new(string) + **out = **in + } + if in.ShutdownOnIdle != nil { + in, out := &in.ShutdownOnIdle, &out.ShutdownOnIdle + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoShutdownInitParameters. +func (in *AutoShutdownInitParameters) DeepCopy() *AutoShutdownInitParameters { + if in == nil { + return nil + } + out := new(AutoShutdownInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoShutdownObservation) DeepCopyInto(out *AutoShutdownObservation) { + *out = *in + if in.DisconnectDelay != nil { + in, out := &in.DisconnectDelay, &out.DisconnectDelay + *out = new(string) + **out = **in + } + if in.IdleDelay != nil { + in, out := &in.IdleDelay, &out.IdleDelay + *out = new(string) + **out = **in + } + if in.NoConnectDelay != nil { + in, out := &in.NoConnectDelay, &out.NoConnectDelay + *out = new(string) + **out = **in + } + if in.ShutdownOnIdle != nil { + in, out := &in.ShutdownOnIdle, &out.ShutdownOnIdle + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoShutdownObservation. +func (in *AutoShutdownObservation) DeepCopy() *AutoShutdownObservation { + if in == nil { + return nil + } + out := new(AutoShutdownObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoShutdownParameters) DeepCopyInto(out *AutoShutdownParameters) { + *out = *in + if in.DisconnectDelay != nil { + in, out := &in.DisconnectDelay, &out.DisconnectDelay + *out = new(string) + **out = **in + } + if in.IdleDelay != nil { + in, out := &in.IdleDelay, &out.IdleDelay + *out = new(string) + **out = **in + } + if in.NoConnectDelay != nil { + in, out := &in.NoConnectDelay, &out.NoConnectDelay + *out = new(string) + **out = **in + } + if in.ShutdownOnIdle != nil { + in, out := &in.ShutdownOnIdle, &out.ShutdownOnIdle + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoShutdownParameters. +func (in *AutoShutdownParameters) DeepCopy() *AutoShutdownParameters { + if in == nil { + return nil + } + out := new(AutoShutdownParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionSettingInitParameters) DeepCopyInto(out *ConnectionSettingInitParameters) { + *out = *in + if in.ClientRdpAccess != nil { + in, out := &in.ClientRdpAccess, &out.ClientRdpAccess + *out = new(string) + **out = **in + } + if in.ClientSSHAccess != nil { + in, out := &in.ClientSSHAccess, &out.ClientSSHAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionSettingInitParameters. +func (in *ConnectionSettingInitParameters) DeepCopy() *ConnectionSettingInitParameters { + if in == nil { + return nil + } + out := new(ConnectionSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionSettingObservation) DeepCopyInto(out *ConnectionSettingObservation) { + *out = *in + if in.ClientRdpAccess != nil { + in, out := &in.ClientRdpAccess, &out.ClientRdpAccess + *out = new(string) + **out = **in + } + if in.ClientSSHAccess != nil { + in, out := &in.ClientSSHAccess, &out.ClientSSHAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionSettingObservation. +func (in *ConnectionSettingObservation) DeepCopy() *ConnectionSettingObservation { + if in == nil { + return nil + } + out := new(ConnectionSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionSettingParameters) DeepCopyInto(out *ConnectionSettingParameters) { + *out = *in + if in.ClientRdpAccess != nil { + in, out := &in.ClientRdpAccess, &out.ClientRdpAccess + *out = new(string) + **out = **in + } + if in.ClientSSHAccess != nil { + in, out := &in.ClientSSHAccess, &out.ClientSSHAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionSettingParameters. +func (in *ConnectionSettingParameters) DeepCopy() *ConnectionSettingParameters { + if in == nil { + return nil + } + out := new(ConnectionSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultAutoShutdownInitParameters) DeepCopyInto(out *DefaultAutoShutdownInitParameters) { + *out = *in + if in.DisconnectDelay != nil { + in, out := &in.DisconnectDelay, &out.DisconnectDelay + *out = new(string) + **out = **in + } + if in.IdleDelay != nil { + in, out := &in.IdleDelay, &out.IdleDelay + *out = new(string) + **out = **in + } + if in.NoConnectDelay != nil { + in, out := &in.NoConnectDelay, &out.NoConnectDelay + *out = new(string) + **out = **in + } + if in.ShutdownOnIdle != nil { + in, out := &in.ShutdownOnIdle, &out.ShutdownOnIdle + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAutoShutdownInitParameters. +func (in *DefaultAutoShutdownInitParameters) DeepCopy() *DefaultAutoShutdownInitParameters { + if in == nil { + return nil + } + out := new(DefaultAutoShutdownInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultAutoShutdownObservation) DeepCopyInto(out *DefaultAutoShutdownObservation) { + *out = *in + if in.DisconnectDelay != nil { + in, out := &in.DisconnectDelay, &out.DisconnectDelay + *out = new(string) + **out = **in + } + if in.IdleDelay != nil { + in, out := &in.IdleDelay, &out.IdleDelay + *out = new(string) + **out = **in + } + if in.NoConnectDelay != nil { + in, out := &in.NoConnectDelay, &out.NoConnectDelay + *out = new(string) + **out = **in + } + if in.ShutdownOnIdle != nil { + in, out := &in.ShutdownOnIdle, &out.ShutdownOnIdle + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAutoShutdownObservation. +func (in *DefaultAutoShutdownObservation) DeepCopy() *DefaultAutoShutdownObservation { + if in == nil { + return nil + } + out := new(DefaultAutoShutdownObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultAutoShutdownParameters) DeepCopyInto(out *DefaultAutoShutdownParameters) { + *out = *in + if in.DisconnectDelay != nil { + in, out := &in.DisconnectDelay, &out.DisconnectDelay + *out = new(string) + **out = **in + } + if in.IdleDelay != nil { + in, out := &in.IdleDelay, &out.IdleDelay + *out = new(string) + **out = **in + } + if in.NoConnectDelay != nil { + in, out := &in.NoConnectDelay, &out.NoConnectDelay + *out = new(string) + **out = **in + } + if in.ShutdownOnIdle != nil { + in, out := &in.ShutdownOnIdle, &out.ShutdownOnIdle + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAutoShutdownParameters. +func (in *DefaultAutoShutdownParameters) DeepCopy() *DefaultAutoShutdownParameters { + if in == nil { + return nil + } + out := new(DefaultAutoShutdownParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultConnectionInitParameters) DeepCopyInto(out *DefaultConnectionInitParameters) { + *out = *in + if in.ClientRdpAccess != nil { + in, out := &in.ClientRdpAccess, &out.ClientRdpAccess + *out = new(string) + **out = **in + } + if in.ClientSSHAccess != nil { + in, out := &in.ClientSSHAccess, &out.ClientSSHAccess + *out = new(string) + **out = **in + } + if in.WebRdpAccess != nil { + in, out := &in.WebRdpAccess, &out.WebRdpAccess + *out = new(string) + **out = **in + } + if in.WebSSHAccess != nil { + in, out := &in.WebSSHAccess, &out.WebSSHAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultConnectionInitParameters. +func (in *DefaultConnectionInitParameters) DeepCopy() *DefaultConnectionInitParameters { + if in == nil { + return nil + } + out := new(DefaultConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultConnectionObservation) DeepCopyInto(out *DefaultConnectionObservation) { + *out = *in + if in.ClientRdpAccess != nil { + in, out := &in.ClientRdpAccess, &out.ClientRdpAccess + *out = new(string) + **out = **in + } + if in.ClientSSHAccess != nil { + in, out := &in.ClientSSHAccess, &out.ClientSSHAccess + *out = new(string) + **out = **in + } + if in.WebRdpAccess != nil { + in, out := &in.WebRdpAccess, &out.WebRdpAccess + *out = new(string) + **out = **in + } + if in.WebSSHAccess != nil { + in, out := &in.WebSSHAccess, &out.WebSSHAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultConnectionObservation. +func (in *DefaultConnectionObservation) DeepCopy() *DefaultConnectionObservation { + if in == nil { + return nil + } + out := new(DefaultConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultConnectionParameters) DeepCopyInto(out *DefaultConnectionParameters) { + *out = *in + if in.ClientRdpAccess != nil { + in, out := &in.ClientRdpAccess, &out.ClientRdpAccess + *out = new(string) + **out = **in + } + if in.ClientSSHAccess != nil { + in, out := &in.ClientSSHAccess, &out.ClientSSHAccess + *out = new(string) + **out = **in + } + if in.WebRdpAccess != nil { + in, out := &in.WebRdpAccess, &out.WebRdpAccess + *out = new(string) + **out = **in + } + if in.WebSSHAccess != nil { + in, out := &in.WebSSHAccess, &out.WebSSHAccess + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultConnectionParameters. +func (in *DefaultConnectionParameters) DeepCopy() *DefaultConnectionParameters { + if in == nil { + return nil + } + out := new(DefaultConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReferenceInitParameters) DeepCopyInto(out *ImageReferenceInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReferenceInitParameters. +func (in *ImageReferenceInitParameters) DeepCopy() *ImageReferenceInitParameters { + if in == nil { + return nil + } + out := new(ImageReferenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReferenceObservation) DeepCopyInto(out *ImageReferenceObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReferenceObservation. +func (in *ImageReferenceObservation) DeepCopy() *ImageReferenceObservation { + if in == nil { + return nil + } + out := new(ImageReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReferenceParameters) DeepCopyInto(out *ImageReferenceParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Offer != nil { + in, out := &in.Offer, &out.Offer + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReferenceParameters. +func (in *ImageReferenceParameters) DeepCopy() *ImageReferenceParameters { + if in == nil { + return nil + } + out := new(ImageReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServiceLab) DeepCopyInto(out *LabServiceLab) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServiceLab. +func (in *LabServiceLab) DeepCopy() *LabServiceLab { + if in == nil { + return nil + } + out := new(LabServiceLab) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LabServiceLab) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServiceLabInitParameters) DeepCopyInto(out *LabServiceLabInitParameters) { + *out = *in + if in.AutoShutdown != nil { + in, out := &in.AutoShutdown, &out.AutoShutdown + *out = new(AutoShutdownInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectionSetting != nil { + in, out := &in.ConnectionSetting, &out.ConnectionSetting + *out = new(ConnectionSettingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LabPlanID != nil { + in, out := &in.LabPlanID, &out.LabPlanID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Roster != nil { + in, out := &in.Roster, &out.Roster + *out = new(RosterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = new(SecurityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.VirtualMachine != nil { + in, out := &in.VirtualMachine, &out.VirtualMachine + *out = new(VirtualMachineInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServiceLabInitParameters. +func (in *LabServiceLabInitParameters) DeepCopy() *LabServiceLabInitParameters { + if in == nil { + return nil + } + out := new(LabServiceLabInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServiceLabList) DeepCopyInto(out *LabServiceLabList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LabServiceLab, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServiceLabList. +func (in *LabServiceLabList) DeepCopy() *LabServiceLabList { + if in == nil { + return nil + } + out := new(LabServiceLabList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LabServiceLabList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServiceLabObservation) DeepCopyInto(out *LabServiceLabObservation) { + *out = *in + if in.AutoShutdown != nil { + in, out := &in.AutoShutdown, &out.AutoShutdown + *out = new(AutoShutdownObservation) + (*in).DeepCopyInto(*out) + } + if in.ConnectionSetting != nil { + in, out := &in.ConnectionSetting, &out.ConnectionSetting + *out = new(ConnectionSettingObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LabPlanID != nil { + in, out := &in.LabPlanID, &out.LabPlanID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Roster != nil { + in, out := &in.Roster, &out.Roster + *out = new(RosterObservation) + (*in).DeepCopyInto(*out) + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = new(SecurityObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.VirtualMachine != nil { + in, out := &in.VirtualMachine, &out.VirtualMachine + *out = new(VirtualMachineObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServiceLabObservation. +func (in *LabServiceLabObservation) DeepCopy() *LabServiceLabObservation { + if in == nil { + return nil + } + out := new(LabServiceLabObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServiceLabParameters) DeepCopyInto(out *LabServiceLabParameters) { + *out = *in + if in.AutoShutdown != nil { + in, out := &in.AutoShutdown, &out.AutoShutdown + *out = new(AutoShutdownParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectionSetting != nil { + in, out := &in.ConnectionSetting, &out.ConnectionSetting + *out = new(ConnectionSettingParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LabPlanID != nil { + in, out := &in.LabPlanID, &out.LabPlanID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Roster != nil { + in, out := &in.Roster, &out.Roster + *out = new(RosterParameters) + (*in).DeepCopyInto(*out) + } + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = new(SecurityParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } + if in.VirtualMachine != nil { + in, out := &in.VirtualMachine, &out.VirtualMachine + *out = new(VirtualMachineParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServiceLabParameters. +func (in *LabServiceLabParameters) DeepCopy() *LabServiceLabParameters { + if in == nil { + return nil + } + out := new(LabServiceLabParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServiceLabSpec) DeepCopyInto(out *LabServiceLabSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServiceLabSpec. +func (in *LabServiceLabSpec) DeepCopy() *LabServiceLabSpec { + if in == nil { + return nil + } + out := new(LabServiceLabSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServiceLabStatus) DeepCopyInto(out *LabServiceLabStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServiceLabStatus. +func (in *LabServiceLabStatus) DeepCopy() *LabServiceLabStatus { + if in == nil { + return nil + } + out := new(LabServiceLabStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServicePlan) DeepCopyInto(out *LabServicePlan) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServicePlan. +func (in *LabServicePlan) DeepCopy() *LabServicePlan { + if in == nil { + return nil + } + out := new(LabServicePlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LabServicePlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServicePlanInitParameters) DeepCopyInto(out *LabServicePlanInitParameters) { + *out = *in + if in.AllowedRegions != nil { + in, out := &in.AllowedRegions, &out.AllowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultAutoShutdown != nil { + in, out := &in.DefaultAutoShutdown, &out.DefaultAutoShutdown + *out = new(DefaultAutoShutdownInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultConnection != nil { + in, out := &in.DefaultConnection, &out.DefaultConnection + *out = new(DefaultConnectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultNetworkSubnetID != nil { + in, out := &in.DefaultNetworkSubnetID, &out.DefaultNetworkSubnetID + *out = new(string) + **out = **in + } + if in.DefaultNetworkSubnetIDRef != nil { + in, out := &in.DefaultNetworkSubnetIDRef, &out.DefaultNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultNetworkSubnetIDSelector != nil { + in, out := &in.DefaultNetworkSubnetIDSelector, &out.DefaultNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.SharedGalleryID != nil { + in, out := &in.SharedGalleryID, &out.SharedGalleryID + *out = new(string) + **out = **in + } + if in.Support != nil { + in, out := &in.Support, &out.Support + *out = new(SupportInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServicePlanInitParameters. +func (in *LabServicePlanInitParameters) DeepCopy() *LabServicePlanInitParameters { + if in == nil { + return nil + } + out := new(LabServicePlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServicePlanList) DeepCopyInto(out *LabServicePlanList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LabServicePlan, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServicePlanList. +func (in *LabServicePlanList) DeepCopy() *LabServicePlanList { + if in == nil { + return nil + } + out := new(LabServicePlanList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LabServicePlanList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServicePlanObservation) DeepCopyInto(out *LabServicePlanObservation) { + *out = *in + if in.AllowedRegions != nil { + in, out := &in.AllowedRegions, &out.AllowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultAutoShutdown != nil { + in, out := &in.DefaultAutoShutdown, &out.DefaultAutoShutdown + *out = new(DefaultAutoShutdownObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultConnection != nil { + in, out := &in.DefaultConnection, &out.DefaultConnection + *out = new(DefaultConnectionObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultNetworkSubnetID != nil { + in, out := &in.DefaultNetworkSubnetID, &out.DefaultNetworkSubnetID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SharedGalleryID != nil { + in, out := &in.SharedGalleryID, &out.SharedGalleryID + *out = new(string) + **out = **in + } + if in.Support != nil { + in, out := &in.Support, &out.Support + *out = new(SupportObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServicePlanObservation. +func (in *LabServicePlanObservation) DeepCopy() *LabServicePlanObservation { + if in == nil { + return nil + } + out := new(LabServicePlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServicePlanParameters) DeepCopyInto(out *LabServicePlanParameters) { + *out = *in + if in.AllowedRegions != nil { + in, out := &in.AllowedRegions, &out.AllowedRegions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultAutoShutdown != nil { + in, out := &in.DefaultAutoShutdown, &out.DefaultAutoShutdown + *out = new(DefaultAutoShutdownParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultConnection != nil { + in, out := &in.DefaultConnection, &out.DefaultConnection + *out = new(DefaultConnectionParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultNetworkSubnetID != nil { + in, out := &in.DefaultNetworkSubnetID, &out.DefaultNetworkSubnetID + *out = new(string) + **out = **in + } + if in.DefaultNetworkSubnetIDRef != nil { + in, out := &in.DefaultNetworkSubnetIDRef, &out.DefaultNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DefaultNetworkSubnetIDSelector != nil { + in, out := &in.DefaultNetworkSubnetIDSelector, &out.DefaultNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedGalleryID != nil { + in, out := &in.SharedGalleryID, &out.SharedGalleryID + *out = new(string) + **out = **in + } + if in.Support != nil { + in, out := &in.Support, &out.Support + *out = new(SupportParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServicePlanParameters. +func (in *LabServicePlanParameters) DeepCopy() *LabServicePlanParameters { + if in == nil { + return nil + } + out := new(LabServicePlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServicePlanSpec) DeepCopyInto(out *LabServicePlanSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServicePlanSpec. +func (in *LabServicePlanSpec) DeepCopy() *LabServicePlanSpec { + if in == nil { + return nil + } + out := new(LabServicePlanSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabServicePlanStatus) DeepCopyInto(out *LabServicePlanStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabServicePlanStatus. +func (in *LabServicePlanStatus) DeepCopy() *LabServicePlanStatus { + if in == nil { + return nil + } + out := new(LabServicePlanStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInitParameters) DeepCopyInto(out *NetworkInitParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInitParameters. +func (in *NetworkInitParameters) DeepCopy() *NetworkInitParameters { + if in == nil { + return nil + } + out := new(NetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkObservation) DeepCopyInto(out *NetworkObservation) { + *out = *in + if in.LoadBalancerID != nil { + in, out := &in.LoadBalancerID, &out.LoadBalancerID + *out = new(string) + **out = **in + } + if in.PublicIPID != nil { + in, out := &in.PublicIPID, &out.PublicIPID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkObservation. +func (in *NetworkObservation) DeepCopy() *NetworkObservation { + if in == nil { + return nil + } + out := new(NetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkParameters) DeepCopyInto(out *NetworkParameters) { + *out = *in + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParameters. +func (in *NetworkParameters) DeepCopy() *NetworkParameters { + if in == nil { + return nil + } + out := new(NetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonAdminUserInitParameters) DeepCopyInto(out *NonAdminUserInitParameters) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminUserInitParameters. +func (in *NonAdminUserInitParameters) DeepCopy() *NonAdminUserInitParameters { + if in == nil { + return nil + } + out := new(NonAdminUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonAdminUserObservation) DeepCopyInto(out *NonAdminUserObservation) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminUserObservation. +func (in *NonAdminUserObservation) DeepCopy() *NonAdminUserObservation { + if in == nil { + return nil + } + out := new(NonAdminUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonAdminUserParameters) DeepCopyInto(out *NonAdminUserParameters) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminUserParameters. +func (in *NonAdminUserParameters) DeepCopy() *NonAdminUserParameters { + if in == nil { + return nil + } + out := new(NonAdminUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RosterInitParameters) DeepCopyInto(out *RosterInitParameters) { + *out = *in + if in.ActiveDirectoryGroupID != nil { + in, out := &in.ActiveDirectoryGroupID, &out.ActiveDirectoryGroupID + *out = new(string) + **out = **in + } + if in.LmsInstance != nil { + in, out := &in.LmsInstance, &out.LmsInstance + *out = new(string) + **out = **in + } + if in.LtiClientID != nil { + in, out := &in.LtiClientID, &out.LtiClientID + *out = new(string) + **out = **in + } + if in.LtiContextID != nil { + in, out := &in.LtiContextID, &out.LtiContextID + *out = new(string) + **out = **in + } + if in.LtiRosterEndpoint != nil { + in, out := &in.LtiRosterEndpoint, &out.LtiRosterEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosterInitParameters. +func (in *RosterInitParameters) DeepCopy() *RosterInitParameters { + if in == nil { + return nil + } + out := new(RosterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RosterObservation) DeepCopyInto(out *RosterObservation) { + *out = *in + if in.ActiveDirectoryGroupID != nil { + in, out := &in.ActiveDirectoryGroupID, &out.ActiveDirectoryGroupID + *out = new(string) + **out = **in + } + if in.LmsInstance != nil { + in, out := &in.LmsInstance, &out.LmsInstance + *out = new(string) + **out = **in + } + if in.LtiClientID != nil { + in, out := &in.LtiClientID, &out.LtiClientID + *out = new(string) + **out = **in + } + if in.LtiContextID != nil { + in, out := &in.LtiContextID, &out.LtiContextID + *out = new(string) + **out = **in + } + if in.LtiRosterEndpoint != nil { + in, out := &in.LtiRosterEndpoint, &out.LtiRosterEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosterObservation. +func (in *RosterObservation) DeepCopy() *RosterObservation { + if in == nil { + return nil + } + out := new(RosterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RosterParameters) DeepCopyInto(out *RosterParameters) { + *out = *in + if in.ActiveDirectoryGroupID != nil { + in, out := &in.ActiveDirectoryGroupID, &out.ActiveDirectoryGroupID + *out = new(string) + **out = **in + } + if in.LmsInstance != nil { + in, out := &in.LmsInstance, &out.LmsInstance + *out = new(string) + **out = **in + } + if in.LtiClientID != nil { + in, out := &in.LtiClientID, &out.LtiClientID + *out = new(string) + **out = **in + } + if in.LtiContextID != nil { + in, out := &in.LtiContextID, &out.LtiContextID + *out = new(string) + **out = **in + } + if in.LtiRosterEndpoint != nil { + in, out := &in.LtiRosterEndpoint, &out.LtiRosterEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosterParameters. +func (in *RosterParameters) DeepCopy() *RosterParameters { + if in == nil { + return nil + } + out := new(RosterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityInitParameters) DeepCopyInto(out *SecurityInitParameters) { + *out = *in + if in.OpenAccessEnabled != nil { + in, out := &in.OpenAccessEnabled, &out.OpenAccessEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityInitParameters. +func (in *SecurityInitParameters) DeepCopy() *SecurityInitParameters { + if in == nil { + return nil + } + out := new(SecurityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityObservation) DeepCopyInto(out *SecurityObservation) { + *out = *in + if in.OpenAccessEnabled != nil { + in, out := &in.OpenAccessEnabled, &out.OpenAccessEnabled + *out = new(bool) + **out = **in + } + if in.RegistrationCode != nil { + in, out := &in.RegistrationCode, &out.RegistrationCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityObservation. +func (in *SecurityObservation) DeepCopy() *SecurityObservation { + if in == nil { + return nil + } + out := new(SecurityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityParameters) DeepCopyInto(out *SecurityParameters) { + *out = *in + if in.OpenAccessEnabled != nil { + in, out := &in.OpenAccessEnabled, &out.OpenAccessEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityParameters. +func (in *SecurityParameters) DeepCopy() *SecurityParameters { + if in == nil { + return nil + } + out := new(SecurityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupportInitParameters) DeepCopyInto(out *SupportInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Instructions != nil { + in, out := &in.Instructions, &out.Instructions + *out = new(string) + **out = **in + } + if in.Phone != nil { + in, out := &in.Phone, &out.Phone + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupportInitParameters. +func (in *SupportInitParameters) DeepCopy() *SupportInitParameters { + if in == nil { + return nil + } + out := new(SupportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupportObservation) DeepCopyInto(out *SupportObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Instructions != nil { + in, out := &in.Instructions, &out.Instructions + *out = new(string) + **out = **in + } + if in.Phone != nil { + in, out := &in.Phone, &out.Phone + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupportObservation. +func (in *SupportObservation) DeepCopy() *SupportObservation { + if in == nil { + return nil + } + out := new(SupportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupportParameters) DeepCopyInto(out *SupportParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.Instructions != nil { + in, out := &in.Instructions, &out.Instructions + *out = new(string) + **out = **in + } + if in.Phone != nil { + in, out := &in.Phone, &out.Phone + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupportParameters. +func (in *SupportParameters) DeepCopy() *SupportParameters { + if in == nil { + return nil + } + out := new(SupportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInitParameters) DeepCopyInto(out *VirtualMachineInitParameters) { + *out = *in + if in.AdditionalCapabilityGpuDriversInstalled != nil { + in, out := &in.AdditionalCapabilityGpuDriversInstalled, &out.AdditionalCapabilityGpuDriversInstalled + *out = new(bool) + **out = **in + } + if in.AdminUser != nil { + in, out := &in.AdminUser, &out.AdminUser + *out = new(AdminUserInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.ImageReference != nil { + in, out := &in.ImageReference, &out.ImageReference + *out = new(ImageReferenceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NonAdminUser != nil { + in, out := &in.NonAdminUser, &out.NonAdminUser + *out = new(NonAdminUserInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SharedPasswordEnabled != nil { + in, out := &in.SharedPasswordEnabled, &out.SharedPasswordEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UsageQuota != nil { + in, out := &in.UsageQuota, &out.UsageQuota + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInitParameters. +func (in *VirtualMachineInitParameters) DeepCopy() *VirtualMachineInitParameters { + if in == nil { + return nil + } + out := new(VirtualMachineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineObservation) DeepCopyInto(out *VirtualMachineObservation) { + *out = *in + if in.AdditionalCapabilityGpuDriversInstalled != nil { + in, out := &in.AdditionalCapabilityGpuDriversInstalled, &out.AdditionalCapabilityGpuDriversInstalled + *out = new(bool) + **out = **in + } + if in.AdminUser != nil { + in, out := &in.AdminUser, &out.AdminUser + *out = new(AdminUserObservation) + (*in).DeepCopyInto(*out) + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.ImageReference != nil { + in, out := &in.ImageReference, &out.ImageReference + *out = new(ImageReferenceObservation) + (*in).DeepCopyInto(*out) + } + if in.NonAdminUser != nil { + in, out := &in.NonAdminUser, &out.NonAdminUser + *out = new(NonAdminUserObservation) + (*in).DeepCopyInto(*out) + } + if in.SharedPasswordEnabled != nil { + in, out := &in.SharedPasswordEnabled, &out.SharedPasswordEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuObservation) + (*in).DeepCopyInto(*out) + } + if in.UsageQuota != nil { + in, out := &in.UsageQuota, &out.UsageQuota + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineObservation. +func (in *VirtualMachineObservation) DeepCopy() *VirtualMachineObservation { + if in == nil { + return nil + } + out := new(VirtualMachineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineParameters) DeepCopyInto(out *VirtualMachineParameters) { + *out = *in + if in.AdditionalCapabilityGpuDriversInstalled != nil { + in, out := &in.AdditionalCapabilityGpuDriversInstalled, &out.AdditionalCapabilityGpuDriversInstalled + *out = new(bool) + **out = **in + } + if in.AdminUser != nil { + in, out := &in.AdminUser, &out.AdminUser + *out = new(AdminUserParameters) + (*in).DeepCopyInto(*out) + } + if in.CreateOption != nil { + in, out := &in.CreateOption, &out.CreateOption + *out = new(string) + **out = **in + } + if in.ImageReference != nil { + in, out := &in.ImageReference, &out.ImageReference + *out = new(ImageReferenceParameters) + (*in).DeepCopyInto(*out) + } + if in.NonAdminUser != nil { + in, out := &in.NonAdminUser, &out.NonAdminUser + *out = new(NonAdminUserParameters) + (*in).DeepCopyInto(*out) + } + if in.SharedPasswordEnabled != nil { + in, out := &in.SharedPasswordEnabled, &out.SharedPasswordEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuParameters) + (*in).DeepCopyInto(*out) + } + if in.UsageQuota != nil { + in, out := &in.UsageQuota, &out.UsageQuota + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineParameters. +func (in *VirtualMachineParameters) DeepCopy() *VirtualMachineParameters { + if in == nil { + return nil + } + out := new(VirtualMachineParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/labservices/v1beta2/zz_generated.managed.go b/apis/labservices/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..9d09ec184 --- /dev/null +++ b/apis/labservices/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LabServiceLab. +func (mg *LabServiceLab) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LabServiceLab. +func (mg *LabServiceLab) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LabServiceLab. +func (mg *LabServiceLab) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LabServiceLab. +func (mg *LabServiceLab) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LabServiceLab. +func (mg *LabServiceLab) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LabServiceLab. +func (mg *LabServiceLab) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LabServiceLab. +func (mg *LabServiceLab) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LabServiceLab. +func (mg *LabServiceLab) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LabServiceLab. +func (mg *LabServiceLab) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LabServiceLab. +func (mg *LabServiceLab) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LabServiceLab. +func (mg *LabServiceLab) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LabServiceLab. +func (mg *LabServiceLab) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LabServicePlan. +func (mg *LabServicePlan) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LabServicePlan. +func (mg *LabServicePlan) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LabServicePlan. +func (mg *LabServicePlan) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LabServicePlan. +func (mg *LabServicePlan) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LabServicePlan. +func (mg *LabServicePlan) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LabServicePlan. +func (mg *LabServicePlan) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LabServicePlan. +func (mg *LabServicePlan) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LabServicePlan. +func (mg *LabServicePlan) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LabServicePlan. +func (mg *LabServicePlan) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LabServicePlan. +func (mg *LabServicePlan) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LabServicePlan. +func (mg *LabServicePlan) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LabServicePlan. +func (mg *LabServicePlan) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/labservices/v1beta2/zz_generated.managedlist.go b/apis/labservices/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..eebe9b1ba --- /dev/null +++ b/apis/labservices/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LabServiceLabList. +func (l *LabServiceLabList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LabServicePlanList. +func (l *LabServicePlanList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/labservices/v1beta2/zz_generated.resolvers.go b/apis/labservices/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..c31217a72 --- /dev/null +++ b/apis/labservices/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,161 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *LabServiceLab) ResolveReferences( // ResolveReferences of this LabServiceLab. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.Network != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Network.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Network.SubnetIDRef, + Selector: mg.Spec.ForProvider.Network.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Network.SubnetID") + } + mg.Spec.ForProvider.Network.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Network.SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Network != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Network.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Network.SubnetIDRef, + Selector: mg.Spec.InitProvider.Network.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Network.SubnetID") + } + mg.Spec.InitProvider.Network.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Network.SubnetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this LabServicePlan. +func (mg *LabServicePlan) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DefaultNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DefaultNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.DefaultNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DefaultNetworkSubnetID") + } + mg.Spec.ForProvider.DefaultNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DefaultNetworkSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DefaultNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DefaultNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.DefaultNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DefaultNetworkSubnetID") + } + mg.Spec.InitProvider.DefaultNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DefaultNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/labservices/v1beta2/zz_groupversion_info.go b/apis/labservices/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..d85854e40 --- /dev/null +++ b/apis/labservices/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=labservices.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "labservices.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/labservices/v1beta2/zz_labservicelab_terraformed.go b/apis/labservices/v1beta2/zz_labservicelab_terraformed.go new file mode 100755 index 000000000..31b439bab --- /dev/null +++ b/apis/labservices/v1beta2/zz_labservicelab_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LabServiceLab +func (mg *LabServiceLab) GetTerraformResourceType() string { + return "azurerm_lab_service_lab" +} + +// GetConnectionDetailsMapping for this LabServiceLab +func (tr *LabServiceLab) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"virtual_machine[*].admin_user[*].password": "spec.forProvider.virtualMachine[*].adminUser[*].passwordSecretRef", "virtual_machine[*].non_admin_user[*].password": "spec.forProvider.virtualMachine[*].nonAdminUser[*].passwordSecretRef"} +} + +// GetObservation of this LabServiceLab +func (tr *LabServiceLab) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LabServiceLab +func (tr *LabServiceLab) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LabServiceLab +func (tr *LabServiceLab) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LabServiceLab +func (tr *LabServiceLab) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LabServiceLab +func (tr *LabServiceLab) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LabServiceLab +func (tr *LabServiceLab) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LabServiceLab +func (tr *LabServiceLab) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LabServiceLab using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LabServiceLab) LateInitialize(attrs []byte) (bool, error) { + params := &LabServiceLabParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LabServiceLab) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/labservices/v1beta2/zz_labservicelab_types.go b/apis/labservices/v1beta2/zz_labservicelab_types.go new file mode 100755 index 000000000..2889c8064 --- /dev/null +++ b/apis/labservices/v1beta2/zz_labservicelab_types.go @@ -0,0 +1,647 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdminUserInitParameters struct { + + // The username to use when signing in to Lab Service Lab VMs. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AdminUserObservation struct { + + // The username to use when signing in to Lab Service Lab VMs. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type AdminUserParameters struct { + + // The password for the user. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username to use when signing in to Lab Service Lab VMs. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type AutoShutdownInitParameters struct { + + // The amount of time a VM will stay running after a user disconnects if this behavior is enabled. This value must be formatted as an ISO 8601 string. + DisconnectDelay *string `json:"disconnectDelay,omitempty" tf:"disconnect_delay,omitempty"` + + // The amount of time a VM will idle before it is shutdown if this behavior is enabled. This value must be formatted as an ISO 8601 string. + IdleDelay *string `json:"idleDelay,omitempty" tf:"idle_delay,omitempty"` + + // The amount of time a VM will stay running before it is shutdown if no connection is made and this behavior is enabled. This value must be formatted as an ISO 8601 string. + NoConnectDelay *string `json:"noConnectDelay,omitempty" tf:"no_connect_delay,omitempty"` + + // A VM will get shutdown when it has idled for a period of time. Possible values are LowUsage and UserAbsence. + ShutdownOnIdle *string `json:"shutdownOnIdle,omitempty" tf:"shutdown_on_idle,omitempty"` +} + +type AutoShutdownObservation struct { + + // The amount of time a VM will stay running after a user disconnects if this behavior is enabled. This value must be formatted as an ISO 8601 string. + DisconnectDelay *string `json:"disconnectDelay,omitempty" tf:"disconnect_delay,omitempty"` + + // The amount of time a VM will idle before it is shutdown if this behavior is enabled. This value must be formatted as an ISO 8601 string. + IdleDelay *string `json:"idleDelay,omitempty" tf:"idle_delay,omitempty"` + + // The amount of time a VM will stay running before it is shutdown if no connection is made and this behavior is enabled. This value must be formatted as an ISO 8601 string. + NoConnectDelay *string `json:"noConnectDelay,omitempty" tf:"no_connect_delay,omitempty"` + + // A VM will get shutdown when it has idled for a period of time. Possible values are LowUsage and UserAbsence. + ShutdownOnIdle *string `json:"shutdownOnIdle,omitempty" tf:"shutdown_on_idle,omitempty"` +} + +type AutoShutdownParameters struct { + + // The amount of time a VM will stay running after a user disconnects if this behavior is enabled. This value must be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + DisconnectDelay *string `json:"disconnectDelay,omitempty" tf:"disconnect_delay,omitempty"` + + // The amount of time a VM will idle before it is shutdown if this behavior is enabled. This value must be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + IdleDelay *string `json:"idleDelay,omitempty" tf:"idle_delay,omitempty"` + + // The amount of time a VM will stay running before it is shutdown if no connection is made and this behavior is enabled. This value must be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + NoConnectDelay *string `json:"noConnectDelay,omitempty" tf:"no_connect_delay,omitempty"` + + // A VM will get shutdown when it has idled for a period of time. Possible values are LowUsage and UserAbsence. + // +kubebuilder:validation:Optional + ShutdownOnIdle *string `json:"shutdownOnIdle,omitempty" tf:"shutdown_on_idle,omitempty"` +} + +type ConnectionSettingInitParameters struct { + + // The enabled access level for Client Access over RDP. Possible value is Public. + ClientRdpAccess *string `json:"clientRdpAccess,omitempty" tf:"client_rdp_access,omitempty"` + + // The enabled access level for Client Access over SSH. Possible value is Public. + ClientSSHAccess *string `json:"clientSshAccess,omitempty" tf:"client_ssh_access,omitempty"` +} + +type ConnectionSettingObservation struct { + + // The enabled access level for Client Access over RDP. Possible value is Public. + ClientRdpAccess *string `json:"clientRdpAccess,omitempty" tf:"client_rdp_access,omitempty"` + + // The enabled access level for Client Access over SSH. Possible value is Public. + ClientSSHAccess *string `json:"clientSshAccess,omitempty" tf:"client_ssh_access,omitempty"` +} + +type ConnectionSettingParameters struct { + + // The enabled access level for Client Access over RDP. Possible value is Public. + // +kubebuilder:validation:Optional + ClientRdpAccess *string `json:"clientRdpAccess,omitempty" tf:"client_rdp_access,omitempty"` + + // The enabled access level for Client Access over SSH. Possible value is Public. + // +kubebuilder:validation:Optional + ClientSSHAccess *string `json:"clientSshAccess,omitempty" tf:"client_ssh_access,omitempty"` +} + +type ImageReferenceInitParameters struct { + + // The resource ID of the image. Changing this forces a new resource to be created. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The image offer if applicable. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The image publisher. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A sku block as defined below. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The image version specified on creation. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ImageReferenceObservation struct { + + // The resource ID of the image. Changing this forces a new resource to be created. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The image offer if applicable. Changing this forces a new resource to be created. + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The image publisher. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A sku block as defined below. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The image version specified on creation. Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ImageReferenceParameters struct { + + // The resource ID of the image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The image offer if applicable. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Offer *string `json:"offer,omitempty" tf:"offer,omitempty"` + + // The image publisher. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` + + // A sku block as defined below. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // The image version specified on creation. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LabServiceLabInitParameters struct { + + // An auto_shutdown block as defined below. + AutoShutdown *AutoShutdownInitParameters `json:"autoShutdown,omitempty" tf:"auto_shutdown,omitempty"` + + // A connection_setting block as defined below. + ConnectionSetting *ConnectionSettingInitParameters `json:"connectionSetting,omitempty" tf:"connection_setting,omitempty"` + + // The description of the Lab Service Lab. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The resource ID of the Lab Plan that is used during resource creation to provide defaults and acts as a permission container when creating a Lab Service Lab via labs.azure.com. + LabPlanID *string `json:"labPlanId,omitempty" tf:"lab_plan_id,omitempty"` + + // The Azure Region where the Lab Service Lab should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network block as defined below. + Network *NetworkInitParameters `json:"network,omitempty" tf:"network,omitempty"` + + // A roster block as defined below. + Roster *RosterInitParameters `json:"roster,omitempty" tf:"roster,omitempty"` + + // A security block as defined below. + Security *SecurityInitParameters `json:"security,omitempty" tf:"security,omitempty"` + + // A mapping of tags which should be assigned to the Lab Service Lab. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The title of the Lab Service Lab. + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // A virtual_machine block as defined below. + VirtualMachine *VirtualMachineInitParameters `json:"virtualMachine,omitempty" tf:"virtual_machine,omitempty"` +} + +type LabServiceLabObservation struct { + + // An auto_shutdown block as defined below. + AutoShutdown *AutoShutdownObservation `json:"autoShutdown,omitempty" tf:"auto_shutdown,omitempty"` + + // A connection_setting block as defined below. + ConnectionSetting *ConnectionSettingObservation `json:"connectionSetting,omitempty" tf:"connection_setting,omitempty"` + + // The description of the Lab Service Lab. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Lab Service Lab. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The resource ID of the Lab Plan that is used during resource creation to provide defaults and acts as a permission container when creating a Lab Service Lab via labs.azure.com. + LabPlanID *string `json:"labPlanId,omitempty" tf:"lab_plan_id,omitempty"` + + // The Azure Region where the Lab Service Lab should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network block as defined below. + Network *NetworkObservation `json:"network,omitempty" tf:"network,omitempty"` + + // The name of the Resource Group where the Lab Service Lab should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A roster block as defined below. + Roster *RosterObservation `json:"roster,omitempty" tf:"roster,omitempty"` + + // A security block as defined below. + Security *SecurityObservation `json:"security,omitempty" tf:"security,omitempty"` + + // A mapping of tags which should be assigned to the Lab Service Lab. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The title of the Lab Service Lab. + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // A virtual_machine block as defined below. + VirtualMachine *VirtualMachineObservation `json:"virtualMachine,omitempty" tf:"virtual_machine,omitempty"` +} + +type LabServiceLabParameters struct { + + // An auto_shutdown block as defined below. + // +kubebuilder:validation:Optional + AutoShutdown *AutoShutdownParameters `json:"autoShutdown,omitempty" tf:"auto_shutdown,omitempty"` + + // A connection_setting block as defined below. + // +kubebuilder:validation:Optional + ConnectionSetting *ConnectionSettingParameters `json:"connectionSetting,omitempty" tf:"connection_setting,omitempty"` + + // The description of the Lab Service Lab. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The resource ID of the Lab Plan that is used during resource creation to provide defaults and acts as a permission container when creating a Lab Service Lab via labs.azure.com. + // +kubebuilder:validation:Optional + LabPlanID *string `json:"labPlanId,omitempty" tf:"lab_plan_id,omitempty"` + + // The Azure Region where the Lab Service Lab should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network block as defined below. + // +kubebuilder:validation:Optional + Network *NetworkParameters `json:"network,omitempty" tf:"network,omitempty"` + + // The name of the Resource Group where the Lab Service Lab should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A roster block as defined below. + // +kubebuilder:validation:Optional + Roster *RosterParameters `json:"roster,omitempty" tf:"roster,omitempty"` + + // A security block as defined below. + // +kubebuilder:validation:Optional + Security *SecurityParameters `json:"security,omitempty" tf:"security,omitempty"` + + // A mapping of tags which should be assigned to the Lab Service Lab. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The title of the Lab Service Lab. + // +kubebuilder:validation:Optional + Title *string `json:"title,omitempty" tf:"title,omitempty"` + + // A virtual_machine block as defined below. + // +kubebuilder:validation:Optional + VirtualMachine *VirtualMachineParameters `json:"virtualMachine,omitempty" tf:"virtual_machine,omitempty"` +} + +type NetworkInitParameters struct { + + // The resource ID of the Subnet for the network profile of the Lab Service Lab. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type NetworkObservation struct { + + // The resource ID of the Load Balancer for the network profile of the Lab Service Lab. + LoadBalancerID *string `json:"loadBalancerId,omitempty" tf:"load_balancer_id,omitempty"` + + // The resource ID of the Public IP for the network profile of the Lab Service Lab. + PublicIPID *string `json:"publicIpId,omitempty" tf:"public_ip_id,omitempty"` + + // The resource ID of the Subnet for the network profile of the Lab Service Lab. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type NetworkParameters struct { + + // The resource ID of the Subnet for the network profile of the Lab Service Lab. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type NonAdminUserInitParameters struct { + + // The username to use when signing in to Lab Service Lab VMs. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type NonAdminUserObservation struct { + + // The username to use when signing in to Lab Service Lab VMs. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type NonAdminUserParameters struct { + + // The password for the user. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username to use when signing in to Lab Service Lab VMs. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type RosterInitParameters struct { + + // The AAD group ID which this Lab Service Lab roster is populated from. + ActiveDirectoryGroupID *string `json:"activeDirectoryGroupId,omitempty" tf:"active_directory_group_id,omitempty"` + + // The base URI identifying the lms instance. + LmsInstance *string `json:"lmsInstance,omitempty" tf:"lms_instance,omitempty"` + + // The unique id of the Azure Lab Service tool in the lms. + LtiClientID *string `json:"ltiClientId,omitempty" tf:"lti_client_id,omitempty"` + + // The unique context identifier for the Lab Service Lab in the lms. + LtiContextID *string `json:"ltiContextId,omitempty" tf:"lti_context_id,omitempty"` + + // The URI of the names and roles service endpoint on the lms for the class attached to this Lab Service Lab. + LtiRosterEndpoint *string `json:"ltiRosterEndpoint,omitempty" tf:"lti_roster_endpoint,omitempty"` +} + +type RosterObservation struct { + + // The AAD group ID which this Lab Service Lab roster is populated from. + ActiveDirectoryGroupID *string `json:"activeDirectoryGroupId,omitempty" tf:"active_directory_group_id,omitempty"` + + // The base URI identifying the lms instance. + LmsInstance *string `json:"lmsInstance,omitempty" tf:"lms_instance,omitempty"` + + // The unique id of the Azure Lab Service tool in the lms. + LtiClientID *string `json:"ltiClientId,omitempty" tf:"lti_client_id,omitempty"` + + // The unique context identifier for the Lab Service Lab in the lms. + LtiContextID *string `json:"ltiContextId,omitempty" tf:"lti_context_id,omitempty"` + + // The URI of the names and roles service endpoint on the lms for the class attached to this Lab Service Lab. + LtiRosterEndpoint *string `json:"ltiRosterEndpoint,omitempty" tf:"lti_roster_endpoint,omitempty"` +} + +type RosterParameters struct { + + // The AAD group ID which this Lab Service Lab roster is populated from. + // +kubebuilder:validation:Optional + ActiveDirectoryGroupID *string `json:"activeDirectoryGroupId,omitempty" tf:"active_directory_group_id,omitempty"` + + // The base URI identifying the lms instance. + // +kubebuilder:validation:Optional + LmsInstance *string `json:"lmsInstance,omitempty" tf:"lms_instance,omitempty"` + + // The unique id of the Azure Lab Service tool in the lms. + // +kubebuilder:validation:Optional + LtiClientID *string `json:"ltiClientId,omitempty" tf:"lti_client_id,omitempty"` + + // The unique context identifier for the Lab Service Lab in the lms. + // +kubebuilder:validation:Optional + LtiContextID *string `json:"ltiContextId,omitempty" tf:"lti_context_id,omitempty"` + + // The URI of the names and roles service endpoint on the lms for the class attached to this Lab Service Lab. + // +kubebuilder:validation:Optional + LtiRosterEndpoint *string `json:"ltiRosterEndpoint,omitempty" tf:"lti_roster_endpoint,omitempty"` +} + +type SecurityInitParameters struct { + + // Is open access enabled to allow any user or only specified users to register to a Lab Service Lab? + OpenAccessEnabled *bool `json:"openAccessEnabled,omitempty" tf:"open_access_enabled,omitempty"` +} + +type SecurityObservation struct { + + // Is open access enabled to allow any user or only specified users to register to a Lab Service Lab? + OpenAccessEnabled *bool `json:"openAccessEnabled,omitempty" tf:"open_access_enabled,omitempty"` + + // The registration code for the Lab Service Lab. + RegistrationCode *string `json:"registrationCode,omitempty" tf:"registration_code,omitempty"` +} + +type SecurityParameters struct { + + // Is open access enabled to allow any user or only specified users to register to a Lab Service Lab? + // +kubebuilder:validation:Optional + OpenAccessEnabled *bool `json:"openAccessEnabled" tf:"open_access_enabled,omitempty"` +} + +type SkuInitParameters struct { + + // The capacity for the SKU. Possible values are between 0 and 400. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the SKU. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuObservation struct { + + // The capacity for the SKU. Possible values are between 0 and 400. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The name of the SKU. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuParameters struct { + + // The capacity for the SKU. Possible values are between 0 and 400. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity" tf:"capacity,omitempty"` + + // The name of the SKU. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type VirtualMachineInitParameters struct { + + // Is flagged to pre-install dedicated GPU drivers? Defaults to false. Changing this forces a new resource to be created. + AdditionalCapabilityGpuDriversInstalled *bool `json:"additionalCapabilityGpuDriversInstalled,omitempty" tf:"additional_capability_gpu_drivers_installed,omitempty"` + + // An admin_user block as defined below. + AdminUser *AdminUserInitParameters `json:"adminUser,omitempty" tf:"admin_user,omitempty"` + + // The create option to indicate what Lab Service Lab VMs are created from. Possible values are Image and TemplateVM. Defaults to Image. Changing this forces a new resource to be created. + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // An image_reference block as defined below. + ImageReference *ImageReferenceInitParameters `json:"imageReference,omitempty" tf:"image_reference,omitempty"` + + // A non_admin_user block as defined below. + NonAdminUser *NonAdminUserInitParameters `json:"nonAdminUser,omitempty" tf:"non_admin_user,omitempty"` + + // Is the shared password enabled with the same password for all user VMs? Defaults to false. Changing this forces a new resource to be created. + SharedPasswordEnabled *bool `json:"sharedPasswordEnabled,omitempty" tf:"shared_password_enabled,omitempty"` + + // A sku block as defined below. + Sku *SkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // The initial quota allocated to each Lab Service Lab user. Defaults to PT0S. This value must be formatted as an ISO 8601 string. + UsageQuota *string `json:"usageQuota,omitempty" tf:"usage_quota,omitempty"` +} + +type VirtualMachineObservation struct { + + // Is flagged to pre-install dedicated GPU drivers? Defaults to false. Changing this forces a new resource to be created. + AdditionalCapabilityGpuDriversInstalled *bool `json:"additionalCapabilityGpuDriversInstalled,omitempty" tf:"additional_capability_gpu_drivers_installed,omitempty"` + + // An admin_user block as defined below. + AdminUser *AdminUserObservation `json:"adminUser,omitempty" tf:"admin_user,omitempty"` + + // The create option to indicate what Lab Service Lab VMs are created from. Possible values are Image and TemplateVM. Defaults to Image. Changing this forces a new resource to be created. + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // An image_reference block as defined below. + ImageReference *ImageReferenceObservation `json:"imageReference,omitempty" tf:"image_reference,omitempty"` + + // A non_admin_user block as defined below. + NonAdminUser *NonAdminUserObservation `json:"nonAdminUser,omitempty" tf:"non_admin_user,omitempty"` + + // Is the shared password enabled with the same password for all user VMs? Defaults to false. Changing this forces a new resource to be created. + SharedPasswordEnabled *bool `json:"sharedPasswordEnabled,omitempty" tf:"shared_password_enabled,omitempty"` + + // A sku block as defined below. + Sku *SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // The initial quota allocated to each Lab Service Lab user. Defaults to PT0S. This value must be formatted as an ISO 8601 string. + UsageQuota *string `json:"usageQuota,omitempty" tf:"usage_quota,omitempty"` +} + +type VirtualMachineParameters struct { + + // Is flagged to pre-install dedicated GPU drivers? Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdditionalCapabilityGpuDriversInstalled *bool `json:"additionalCapabilityGpuDriversInstalled,omitempty" tf:"additional_capability_gpu_drivers_installed,omitempty"` + + // An admin_user block as defined below. + // +kubebuilder:validation:Optional + AdminUser *AdminUserParameters `json:"adminUser" tf:"admin_user,omitempty"` + + // The create option to indicate what Lab Service Lab VMs are created from. Possible values are Image and TemplateVM. Defaults to Image. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CreateOption *string `json:"createOption,omitempty" tf:"create_option,omitempty"` + + // An image_reference block as defined below. + // +kubebuilder:validation:Optional + ImageReference *ImageReferenceParameters `json:"imageReference" tf:"image_reference,omitempty"` + + // A non_admin_user block as defined below. + // +kubebuilder:validation:Optional + NonAdminUser *NonAdminUserParameters `json:"nonAdminUser,omitempty" tf:"non_admin_user,omitempty"` + + // Is the shared password enabled with the same password for all user VMs? Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SharedPasswordEnabled *bool `json:"sharedPasswordEnabled,omitempty" tf:"shared_password_enabled,omitempty"` + + // A sku block as defined below. + // +kubebuilder:validation:Optional + Sku *SkuParameters `json:"sku" tf:"sku,omitempty"` + + // The initial quota allocated to each Lab Service Lab user. Defaults to PT0S. This value must be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + UsageQuota *string `json:"usageQuota,omitempty" tf:"usage_quota,omitempty"` +} + +// LabServiceLabSpec defines the desired state of LabServiceLab +type LabServiceLabSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LabServiceLabParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LabServiceLabInitParameters `json:"initProvider,omitempty"` +} + +// LabServiceLabStatus defines the observed state of LabServiceLab. +type LabServiceLabStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LabServiceLabObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LabServiceLab is the Schema for the LabServiceLabs API. Manages a Lab Service Lab. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LabServiceLab struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.connectionSetting) || (has(self.initProvider) && has(self.initProvider.connectionSetting))",message="spec.forProvider.connectionSetting is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.security) || (has(self.initProvider) && has(self.initProvider.security))",message="spec.forProvider.security is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.title) || (has(self.initProvider) && has(self.initProvider.title))",message="spec.forProvider.title is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.virtualMachine) || (has(self.initProvider) && has(self.initProvider.virtualMachine))",message="spec.forProvider.virtualMachine is a required parameter" + Spec LabServiceLabSpec `json:"spec"` + Status LabServiceLabStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LabServiceLabList contains a list of LabServiceLabs +type LabServiceLabList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LabServiceLab `json:"items"` +} + +// Repository type metadata. +var ( + LabServiceLab_Kind = "LabServiceLab" + LabServiceLab_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LabServiceLab_Kind}.String() + LabServiceLab_KindAPIVersion = LabServiceLab_Kind + "." + CRDGroupVersion.String() + LabServiceLab_GroupVersionKind = CRDGroupVersion.WithKind(LabServiceLab_Kind) +) + +func init() { + SchemeBuilder.Register(&LabServiceLab{}, &LabServiceLabList{}) +} diff --git a/apis/labservices/v1beta2/zz_labserviceplan_terraformed.go b/apis/labservices/v1beta2/zz_labserviceplan_terraformed.go new file mode 100755 index 000000000..106e4d7bb --- /dev/null +++ b/apis/labservices/v1beta2/zz_labserviceplan_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LabServicePlan +func (mg *LabServicePlan) GetTerraformResourceType() string { + return "azurerm_lab_service_plan" +} + +// GetConnectionDetailsMapping for this LabServicePlan +func (tr *LabServicePlan) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LabServicePlan +func (tr *LabServicePlan) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LabServicePlan +func (tr *LabServicePlan) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LabServicePlan +func (tr *LabServicePlan) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LabServicePlan +func (tr *LabServicePlan) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LabServicePlan +func (tr *LabServicePlan) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LabServicePlan +func (tr *LabServicePlan) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LabServicePlan +func (tr *LabServicePlan) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LabServicePlan using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LabServicePlan) LateInitialize(attrs []byte) (bool, error) { + params := &LabServicePlanParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LabServicePlan) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/labservices/v1beta2/zz_labserviceplan_types.go b/apis/labservices/v1beta2/zz_labserviceplan_types.go new file mode 100755 index 000000000..6f76a665a --- /dev/null +++ b/apis/labservices/v1beta2/zz_labserviceplan_types.go @@ -0,0 +1,354 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DefaultAutoShutdownInitParameters struct { + + // The amount of time a VM will stay running after a user disconnects if this behavior is enabled. This value must be formatted as an ISO 8601 string. + DisconnectDelay *string `json:"disconnectDelay,omitempty" tf:"disconnect_delay,omitempty"` + + // The amount of time a VM will idle before it is shutdown if this behavior is enabled. This value must be formatted as an ISO 8601 string. + IdleDelay *string `json:"idleDelay,omitempty" tf:"idle_delay,omitempty"` + + // The amount of time a VM will stay running before it is shutdown if no connection is made and this behavior is enabled. This value must be formatted as an ISO 8601 string. + NoConnectDelay *string `json:"noConnectDelay,omitempty" tf:"no_connect_delay,omitempty"` + + // Will a VM get shutdown when it has idled for a period of time? Possible values are LowUsage and UserAbsence. + ShutdownOnIdle *string `json:"shutdownOnIdle,omitempty" tf:"shutdown_on_idle,omitempty"` +} + +type DefaultAutoShutdownObservation struct { + + // The amount of time a VM will stay running after a user disconnects if this behavior is enabled. This value must be formatted as an ISO 8601 string. + DisconnectDelay *string `json:"disconnectDelay,omitempty" tf:"disconnect_delay,omitempty"` + + // The amount of time a VM will idle before it is shutdown if this behavior is enabled. This value must be formatted as an ISO 8601 string. + IdleDelay *string `json:"idleDelay,omitempty" tf:"idle_delay,omitempty"` + + // The amount of time a VM will stay running before it is shutdown if no connection is made and this behavior is enabled. This value must be formatted as an ISO 8601 string. + NoConnectDelay *string `json:"noConnectDelay,omitempty" tf:"no_connect_delay,omitempty"` + + // Will a VM get shutdown when it has idled for a period of time? Possible values are LowUsage and UserAbsence. + ShutdownOnIdle *string `json:"shutdownOnIdle,omitempty" tf:"shutdown_on_idle,omitempty"` +} + +type DefaultAutoShutdownParameters struct { + + // The amount of time a VM will stay running after a user disconnects if this behavior is enabled. This value must be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + DisconnectDelay *string `json:"disconnectDelay,omitempty" tf:"disconnect_delay,omitempty"` + + // The amount of time a VM will idle before it is shutdown if this behavior is enabled. This value must be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + IdleDelay *string `json:"idleDelay,omitempty" tf:"idle_delay,omitempty"` + + // The amount of time a VM will stay running before it is shutdown if no connection is made and this behavior is enabled. This value must be formatted as an ISO 8601 string. + // +kubebuilder:validation:Optional + NoConnectDelay *string `json:"noConnectDelay,omitempty" tf:"no_connect_delay,omitempty"` + + // Will a VM get shutdown when it has idled for a period of time? Possible values are LowUsage and UserAbsence. + // +kubebuilder:validation:Optional + ShutdownOnIdle *string `json:"shutdownOnIdle,omitempty" tf:"shutdown_on_idle,omitempty"` +} + +type DefaultConnectionInitParameters struct { + + // The enabled access level for Client Access over RDP. Possible values are Private and Public. + ClientRdpAccess *string `json:"clientRdpAccess,omitempty" tf:"client_rdp_access,omitempty"` + + // The enabled access level for Client Access over SSH. Possible values are Private and Public. + ClientSSHAccess *string `json:"clientSshAccess,omitempty" tf:"client_ssh_access,omitempty"` + + // The enabled access level for Web Access over RDP. Possible values are Private and Public. + WebRdpAccess *string `json:"webRdpAccess,omitempty" tf:"web_rdp_access,omitempty"` + + // The enabled access level for Web Access over SSH. Possible values are Private and Public. + WebSSHAccess *string `json:"webSshAccess,omitempty" tf:"web_ssh_access,omitempty"` +} + +type DefaultConnectionObservation struct { + + // The enabled access level for Client Access over RDP. Possible values are Private and Public. + ClientRdpAccess *string `json:"clientRdpAccess,omitempty" tf:"client_rdp_access,omitempty"` + + // The enabled access level for Client Access over SSH. Possible values are Private and Public. + ClientSSHAccess *string `json:"clientSshAccess,omitempty" tf:"client_ssh_access,omitempty"` + + // The enabled access level for Web Access over RDP. Possible values are Private and Public. + WebRdpAccess *string `json:"webRdpAccess,omitempty" tf:"web_rdp_access,omitempty"` + + // The enabled access level for Web Access over SSH. Possible values are Private and Public. + WebSSHAccess *string `json:"webSshAccess,omitempty" tf:"web_ssh_access,omitempty"` +} + +type DefaultConnectionParameters struct { + + // The enabled access level for Client Access over RDP. Possible values are Private and Public. + // +kubebuilder:validation:Optional + ClientRdpAccess *string `json:"clientRdpAccess,omitempty" tf:"client_rdp_access,omitempty"` + + // The enabled access level for Client Access over SSH. Possible values are Private and Public. + // +kubebuilder:validation:Optional + ClientSSHAccess *string `json:"clientSshAccess,omitempty" tf:"client_ssh_access,omitempty"` + + // The enabled access level for Web Access over RDP. Possible values are Private and Public. + // +kubebuilder:validation:Optional + WebRdpAccess *string `json:"webRdpAccess,omitempty" tf:"web_rdp_access,omitempty"` + + // The enabled access level for Web Access over SSH. Possible values are Private and Public. + // +kubebuilder:validation:Optional + WebSSHAccess *string `json:"webSshAccess,omitempty" tf:"web_ssh_access,omitempty"` +} + +type LabServicePlanInitParameters struct { + + // The allowed regions for the lab creator to use when creating labs using this Lab Service Plan. The allowed region's count must be between 1 and 28. + AllowedRegions []*string `json:"allowedRegions,omitempty" tf:"allowed_regions,omitempty"` + + // A default_auto_shutdown block as defined below. + DefaultAutoShutdown *DefaultAutoShutdownInitParameters `json:"defaultAutoShutdown,omitempty" tf:"default_auto_shutdown,omitempty"` + + // A default_connection block as defined below. + DefaultConnection *DefaultConnectionInitParameters `json:"defaultConnection,omitempty" tf:"default_connection,omitempty"` + + // The resource ID of the Subnet for the Lab Service Plan network profile. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + DefaultNetworkSubnetID *string `json:"defaultNetworkSubnetId,omitempty" tf:"default_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate defaultNetworkSubnetId. + // +kubebuilder:validation:Optional + DefaultNetworkSubnetIDRef *v1.Reference `json:"defaultNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate defaultNetworkSubnetId. + // +kubebuilder:validation:Optional + DefaultNetworkSubnetIDSelector *v1.Selector `json:"defaultNetworkSubnetIdSelector,omitempty" tf:"-"` + + // The Azure Region where the Lab Service Plan should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The resource ID of the Shared Image Gallery attached to this Lab Service Plan. When saving a lab template virtual machine image it will be persisted in this gallery. The shared images from the gallery can be made available to use when creating new labs. + SharedGalleryID *string `json:"sharedGalleryId,omitempty" tf:"shared_gallery_id,omitempty"` + + // A support block as defined below. + Support *SupportInitParameters `json:"support,omitempty" tf:"support,omitempty"` + + // A mapping of tags which should be assigned to the Lab Service Plan. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LabServicePlanObservation struct { + + // The allowed regions for the lab creator to use when creating labs using this Lab Service Plan. The allowed region's count must be between 1 and 28. + AllowedRegions []*string `json:"allowedRegions,omitempty" tf:"allowed_regions,omitempty"` + + // A default_auto_shutdown block as defined below. + DefaultAutoShutdown *DefaultAutoShutdownObservation `json:"defaultAutoShutdown,omitempty" tf:"default_auto_shutdown,omitempty"` + + // A default_connection block as defined below. + DefaultConnection *DefaultConnectionObservation `json:"defaultConnection,omitempty" tf:"default_connection,omitempty"` + + // The resource ID of the Subnet for the Lab Service Plan network profile. + DefaultNetworkSubnetID *string `json:"defaultNetworkSubnetId,omitempty" tf:"default_network_subnet_id,omitempty"` + + // The ID of the Lab Service Plan. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region where the Lab Service Plan should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Lab Service Plan should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The resource ID of the Shared Image Gallery attached to this Lab Service Plan. When saving a lab template virtual machine image it will be persisted in this gallery. The shared images from the gallery can be made available to use when creating new labs. + SharedGalleryID *string `json:"sharedGalleryId,omitempty" tf:"shared_gallery_id,omitempty"` + + // A support block as defined below. + Support *SupportObservation `json:"support,omitempty" tf:"support,omitempty"` + + // A mapping of tags which should be assigned to the Lab Service Plan. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LabServicePlanParameters struct { + + // The allowed regions for the lab creator to use when creating labs using this Lab Service Plan. The allowed region's count must be between 1 and 28. + // +kubebuilder:validation:Optional + AllowedRegions []*string `json:"allowedRegions,omitempty" tf:"allowed_regions,omitempty"` + + // A default_auto_shutdown block as defined below. + // +kubebuilder:validation:Optional + DefaultAutoShutdown *DefaultAutoShutdownParameters `json:"defaultAutoShutdown,omitempty" tf:"default_auto_shutdown,omitempty"` + + // A default_connection block as defined below. + // +kubebuilder:validation:Optional + DefaultConnection *DefaultConnectionParameters `json:"defaultConnection,omitempty" tf:"default_connection,omitempty"` + + // The resource ID of the Subnet for the Lab Service Plan network profile. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + DefaultNetworkSubnetID *string `json:"defaultNetworkSubnetId,omitempty" tf:"default_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate defaultNetworkSubnetId. + // +kubebuilder:validation:Optional + DefaultNetworkSubnetIDRef *v1.Reference `json:"defaultNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate defaultNetworkSubnetId. + // +kubebuilder:validation:Optional + DefaultNetworkSubnetIDSelector *v1.Selector `json:"defaultNetworkSubnetIdSelector,omitempty" tf:"-"` + + // The Azure Region where the Lab Service Plan should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Lab Service Plan should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The resource ID of the Shared Image Gallery attached to this Lab Service Plan. When saving a lab template virtual machine image it will be persisted in this gallery. The shared images from the gallery can be made available to use when creating new labs. + // +kubebuilder:validation:Optional + SharedGalleryID *string `json:"sharedGalleryId,omitempty" tf:"shared_gallery_id,omitempty"` + + // A support block as defined below. + // +kubebuilder:validation:Optional + Support *SupportParameters `json:"support,omitempty" tf:"support,omitempty"` + + // A mapping of tags which should be assigned to the Lab Service Plan. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SupportInitParameters struct { + + // The email address for the support contact. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The instructions for users of the Lab Service Plan. + Instructions *string `json:"instructions,omitempty" tf:"instructions,omitempty"` + + // The phone number for the support contact. + Phone *string `json:"phone,omitempty" tf:"phone,omitempty"` + + // The web address for users of the Lab Service Plan. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SupportObservation struct { + + // The email address for the support contact. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The instructions for users of the Lab Service Plan. + Instructions *string `json:"instructions,omitempty" tf:"instructions,omitempty"` + + // The phone number for the support contact. + Phone *string `json:"phone,omitempty" tf:"phone,omitempty"` + + // The web address for users of the Lab Service Plan. + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type SupportParameters struct { + + // The email address for the support contact. + // +kubebuilder:validation:Optional + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The instructions for users of the Lab Service Plan. + // +kubebuilder:validation:Optional + Instructions *string `json:"instructions,omitempty" tf:"instructions,omitempty"` + + // The phone number for the support contact. + // +kubebuilder:validation:Optional + Phone *string `json:"phone,omitempty" tf:"phone,omitempty"` + + // The web address for users of the Lab Service Plan. + // +kubebuilder:validation:Optional + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +// LabServicePlanSpec defines the desired state of LabServicePlan +type LabServicePlanSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LabServicePlanParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LabServicePlanInitParameters `json:"initProvider,omitempty"` +} + +// LabServicePlanStatus defines the observed state of LabServicePlan. +type LabServicePlanStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LabServicePlanObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LabServicePlan is the Schema for the LabServicePlans API. Manages a Lab Service Plan. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LabServicePlan struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.allowedRegions) || (has(self.initProvider) && has(self.initProvider.allowedRegions))",message="spec.forProvider.allowedRegions is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec LabServicePlanSpec `json:"spec"` + Status LabServicePlanStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LabServicePlanList contains a list of LabServicePlans +type LabServicePlanList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LabServicePlan `json:"items"` +} + +// Repository type metadata. +var ( + LabServicePlan_Kind = "LabServicePlan" + LabServicePlan_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LabServicePlan_Kind}.String() + LabServicePlan_KindAPIVersion = LabServicePlan_Kind + "." + CRDGroupVersion.String() + LabServicePlan_GroupVersionKind = CRDGroupVersion.WithKind(LabServicePlan_Kind) +) + +func init() { + SchemeBuilder.Register(&LabServicePlan{}, &LabServicePlanList{}) +} diff --git a/apis/loadtestservice/v1beta1/zz_generated.conversion_spokes.go b/apis/loadtestservice/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..fdc091eef --- /dev/null +++ b/apis/loadtestservice/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this LoadTest to the hub type. +func (tr *LoadTest) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LoadTest type. +func (tr *LoadTest) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/loadtestservice/v1beta1/zz_generated.conversion_hubs.go b/apis/loadtestservice/v1beta2/zz_generated.conversion_hubs.go similarity index 93% rename from apis/loadtestservice/v1beta1/zz_generated.conversion_hubs.go rename to apis/loadtestservice/v1beta2/zz_generated.conversion_hubs.go index df41f84c9..2ccb8388a 100755 --- a/apis/loadtestservice/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/loadtestservice/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *LoadTest) Hub() {} diff --git a/apis/loadtestservice/v1beta2/zz_generated.deepcopy.go b/apis/loadtestservice/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..3a4e99b27 --- /dev/null +++ b/apis/loadtestservice/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,379 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadTest) DeepCopyInto(out *LoadTest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadTest. +func (in *LoadTest) DeepCopy() *LoadTest { + if in == nil { + return nil + } + out := new(LoadTest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LoadTest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadTestInitParameters) DeepCopyInto(out *LoadTestInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadTestInitParameters. +func (in *LoadTestInitParameters) DeepCopy() *LoadTestInitParameters { + if in == nil { + return nil + } + out := new(LoadTestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadTestList) DeepCopyInto(out *LoadTestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LoadTest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadTestList. +func (in *LoadTestList) DeepCopy() *LoadTestList { + if in == nil { + return nil + } + out := new(LoadTestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LoadTestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadTestObservation) DeepCopyInto(out *LoadTestObservation) { + *out = *in + if in.DataPlaneURI != nil { + in, out := &in.DataPlaneURI, &out.DataPlaneURI + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadTestObservation. +func (in *LoadTestObservation) DeepCopy() *LoadTestObservation { + if in == nil { + return nil + } + out := new(LoadTestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadTestParameters) DeepCopyInto(out *LoadTestParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadTestParameters. +func (in *LoadTestParameters) DeepCopy() *LoadTestParameters { + if in == nil { + return nil + } + out := new(LoadTestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadTestSpec) DeepCopyInto(out *LoadTestSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadTestSpec. +func (in *LoadTestSpec) DeepCopy() *LoadTestSpec { + if in == nil { + return nil + } + out := new(LoadTestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadTestStatus) DeepCopyInto(out *LoadTestStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadTestStatus. +func (in *LoadTestStatus) DeepCopy() *LoadTestStatus { + if in == nil { + return nil + } + out := new(LoadTestStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/loadtestservice/v1beta2/zz_generated.managed.go b/apis/loadtestservice/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..7d9b2739b --- /dev/null +++ b/apis/loadtestservice/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LoadTest. +func (mg *LoadTest) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LoadTest. +func (mg *LoadTest) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LoadTest. +func (mg *LoadTest) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LoadTest. +func (mg *LoadTest) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LoadTest. +func (mg *LoadTest) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LoadTest. +func (mg *LoadTest) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LoadTest. +func (mg *LoadTest) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LoadTest. +func (mg *LoadTest) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LoadTest. +func (mg *LoadTest) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LoadTest. +func (mg *LoadTest) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LoadTest. +func (mg *LoadTest) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LoadTest. +func (mg *LoadTest) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/loadtestservice/v1beta2/zz_generated.managedlist.go b/apis/loadtestservice/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..2e3fb7cfa --- /dev/null +++ b/apis/loadtestservice/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LoadTestList. +func (l *LoadTestList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/loadtestservice/v1beta2/zz_generated.resolvers.go b/apis/loadtestservice/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..659c775c9 --- /dev/null +++ b/apis/loadtestservice/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this LoadTest. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *LoadTest) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/loadtestservice/v1beta2/zz_groupversion_info.go b/apis/loadtestservice/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..1c24978fe --- /dev/null +++ b/apis/loadtestservice/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=loadtestservice.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "loadtestservice.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/loadtestservice/v1beta2/zz_loadtest_terraformed.go b/apis/loadtestservice/v1beta2/zz_loadtest_terraformed.go new file mode 100755 index 000000000..182e8a4b4 --- /dev/null +++ b/apis/loadtestservice/v1beta2/zz_loadtest_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LoadTest +func (mg *LoadTest) GetTerraformResourceType() string { + return "azurerm_load_test" +} + +// GetConnectionDetailsMapping for this LoadTest +func (tr *LoadTest) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LoadTest +func (tr *LoadTest) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LoadTest +func (tr *LoadTest) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LoadTest +func (tr *LoadTest) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LoadTest +func (tr *LoadTest) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LoadTest +func (tr *LoadTest) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LoadTest +func (tr *LoadTest) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LoadTest +func (tr *LoadTest) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LoadTest using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LoadTest) LateInitialize(attrs []byte) (bool, error) { + params := &LoadTestParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LoadTest) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/loadtestservice/v1beta2/zz_loadtest_types.go b/apis/loadtestservice/v1beta2/zz_loadtest_types.go new file mode 100755 index 000000000..ccf07c3f5 --- /dev/null +++ b/apis/loadtestservice/v1beta2/zz_loadtest_types.go @@ -0,0 +1,187 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // A list of the User Assigned Identity IDs that should be assigned to this Load Test. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Identity that should be assigned to this Load Test. Possible values are SystemAssigned, SystemAssigned, UserAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of the User Assigned Identity IDs that should be assigned to this Load Test. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the System-Assigned Managed Identity assigned to this Load Test. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the System-Assigned Managed Identity assigned to this Load Test. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Identity that should be assigned to this Load Test. Possible values are SystemAssigned, SystemAssigned, UserAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of the User Assigned Identity IDs that should be assigned to this Load Test. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Identity that should be assigned to this Load Test. Possible values are SystemAssigned, SystemAssigned, UserAssigned and UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LoadTestInitParameters struct { + + // Description of the resource. Changing this forces a new Load Test to be created. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An identity block as defined below. Specifies the Managed Identity which should be assigned to this Load Test. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Load Test should exist. Changing this forces a new Load Test to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags which should be assigned to the Load Test. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LoadTestObservation struct { + + // Resource data plane URI. + DataPlaneURI *string `json:"dataPlaneUri,omitempty" tf:"data_plane_uri,omitempty"` + + // Description of the resource. Changing this forces a new Load Test to be created. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Load Test. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Specifies the Managed Identity which should be assigned to this Load Test. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Load Test should exist. Changing this forces a new Load Test to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Resource Group within which this Load Test should exist. Changing this forces a new Load Test to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags which should be assigned to the Load Test. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LoadTestParameters struct { + + // Description of the resource. Changing this forces a new Load Test to be created. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An identity block as defined below. Specifies the Managed Identity which should be assigned to this Load Test. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Load Test should exist. Changing this forces a new Load Test to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Resource Group within which this Load Test should exist. Changing this forces a new Load Test to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Load Test. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// LoadTestSpec defines the desired state of LoadTest +type LoadTestSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LoadTestParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LoadTestInitParameters `json:"initProvider,omitempty"` +} + +// LoadTestStatus defines the observed state of LoadTest. +type LoadTestStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LoadTestObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LoadTest is the Schema for the LoadTests API. Manages a Load Test. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LoadTest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec LoadTestSpec `json:"spec"` + Status LoadTestStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LoadTestList contains a list of LoadTests +type LoadTestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LoadTest `json:"items"` +} + +// Repository type metadata. +var ( + LoadTest_Kind = "LoadTest" + LoadTest_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LoadTest_Kind}.String() + LoadTest_KindAPIVersion = LoadTest_Kind + "." + CRDGroupVersion.String() + LoadTest_GroupVersionKind = CRDGroupVersion.WithKind(LoadTest_Kind) +) + +func init() { + SchemeBuilder.Register(&LoadTest{}, &LoadTestList{}) +} diff --git a/apis/logic/v1beta1/zz_appactioncustom_types.go b/apis/logic/v1beta1/zz_appactioncustom_types.go index 723b48187..551675d35 100755 --- a/apis/logic/v1beta1/zz_appactioncustom_types.go +++ b/apis/logic/v1beta1/zz_appactioncustom_types.go @@ -38,7 +38,7 @@ type AppActionCustomParameters struct { Body *string `json:"body,omitempty" tf:"body,omitempty"` // Specifies the ID of the Logic App Workflow. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta1.AppWorkflow + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta2.AppWorkflow // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogicAppID *string `json:"logicAppId,omitempty" tf:"logic_app_id,omitempty"` diff --git a/apis/logic/v1beta1/zz_appactionhttp_types.go b/apis/logic/v1beta1/zz_appactionhttp_types.go index 44d3c35f5..c33484720 100755 --- a/apis/logic/v1beta1/zz_appactionhttp_types.go +++ b/apis/logic/v1beta1/zz_appactionhttp_types.go @@ -77,7 +77,7 @@ type AppActionHTTPParameters struct { Headers map[string]*string `json:"headers,omitempty" tf:"headers,omitempty"` // Specifies the ID of the Logic App Workflow. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta1.AppWorkflow + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta2.AppWorkflow // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogicAppID *string `json:"logicAppId,omitempty" tf:"logic_app_id,omitempty"` diff --git a/apis/logic/v1beta1/zz_apptriggercustom_types.go b/apis/logic/v1beta1/zz_apptriggercustom_types.go index 7c4b05ad0..5a9bc41ea 100755 --- a/apis/logic/v1beta1/zz_apptriggercustom_types.go +++ b/apis/logic/v1beta1/zz_apptriggercustom_types.go @@ -38,7 +38,7 @@ type AppTriggerCustomParameters struct { Body *string `json:"body,omitempty" tf:"body,omitempty"` // Specifies the ID of the Logic App Workflow. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta1.AppWorkflow + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta2.AppWorkflow // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogicAppID *string `json:"logicAppId,omitempty" tf:"logic_app_id,omitempty"` diff --git a/apis/logic/v1beta1/zz_apptriggerhttprequest_types.go b/apis/logic/v1beta1/zz_apptriggerhttprequest_types.go index 9eb40446e..c3812c092 100755 --- a/apis/logic/v1beta1/zz_apptriggerhttprequest_types.go +++ b/apis/logic/v1beta1/zz_apptriggerhttprequest_types.go @@ -49,7 +49,7 @@ type AppTriggerHTTPRequestObservation struct { type AppTriggerHTTPRequestParameters struct { // Specifies the ID of the Logic App Workflow. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta1.AppWorkflow + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta2.AppWorkflow // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogicAppID *string `json:"logicAppId,omitempty" tf:"logic_app_id,omitempty"` diff --git a/apis/logic/v1beta1/zz_generated.conversion_hubs.go b/apis/logic/v1beta1/zz_generated.conversion_hubs.go index 9bd472e62..d762a8f46 100755 --- a/apis/logic/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/logic/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *IntegrationServiceEnvironment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *AppActionCustom) Hub() {} @@ -18,9 +15,6 @@ func (tr *AppActionHTTP) Hub() {} // Hub marks this type as a conversion hub. func (tr *AppIntegrationAccount) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *AppIntegrationAccountBatchConfiguration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *AppIntegrationAccountPartner) Hub() {} @@ -37,7 +31,4 @@ func (tr *AppTriggerCustom) Hub() {} func (tr *AppTriggerHTTPRequest) Hub() {} // Hub marks this type as a conversion hub. -func (tr *AppTriggerRecurrence) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *AppWorkflow) Hub() {} +func (tr *IntegrationServiceEnvironment) Hub() {} diff --git a/apis/logic/v1beta1/zz_generated.conversion_spokes.go b/apis/logic/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..860e2e3c9 --- /dev/null +++ b/apis/logic/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AppIntegrationAccountBatchConfiguration to the hub type. +func (tr *AppIntegrationAccountBatchConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AppIntegrationAccountBatchConfiguration type. +func (tr *AppIntegrationAccountBatchConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this AppTriggerRecurrence to the hub type. +func (tr *AppTriggerRecurrence) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AppTriggerRecurrence type. +func (tr *AppTriggerRecurrence) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this AppWorkflow to the hub type. +func (tr *AppWorkflow) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AppWorkflow type. +func (tr *AppWorkflow) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/logic/v1beta1/zz_generated.resolvers.go b/apis/logic/v1beta1/zz_generated.resolvers.go index 0026a9aa2..a9974509d 100644 --- a/apis/logic/v1beta1/zz_generated.resolvers.go +++ b/apis/logic/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *AppActionCustom) ResolveReferences( // ResolveReferences of this AppAc var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta1", "AppWorkflow", "AppWorkflowList") + m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta2", "AppWorkflow", "AppWorkflowList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -57,7 +57,7 @@ func (mg *AppActionHTTP) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta1", "AppWorkflow", "AppWorkflowList") + m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta2", "AppWorkflow", "AppWorkflowList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -376,7 +376,7 @@ func (mg *AppTriggerCustom) ResolveReferences(ctx context.Context, c client.Read var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta1", "AppWorkflow", "AppWorkflowList") + m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta2", "AppWorkflow", "AppWorkflowList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -407,7 +407,7 @@ func (mg *AppTriggerHTTPRequest) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta1", "AppWorkflow", "AppWorkflowList") + m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta2", "AppWorkflow", "AppWorkflowList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -520,7 +520,7 @@ func (mg *IntegrationServiceEnvironment) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -539,7 +539,7 @@ func (mg *IntegrationServiceEnvironment) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.VirtualNetworkSubnetIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.VirtualNetworkSubnetIdsRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/logic/v1beta1/zz_integrationserviceenvironment_types.go b/apis/logic/v1beta1/zz_integrationserviceenvironment_types.go index 2fbf6ccbb..fcfb706e7 100755 --- a/apis/logic/v1beta1/zz_integrationserviceenvironment_types.go +++ b/apis/logic/v1beta1/zz_integrationserviceenvironment_types.go @@ -29,7 +29,7 @@ type IntegrationServiceEnvironmentInitParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // A list of virtual network subnet ids to be used by Integration Service Environment. Exactly four distinct ids to /27 subnets must be provided. Changing this forces a new Integration Service Environment to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("id",true) // +listType=set VirtualNetworkSubnetIds []*string `json:"virtualNetworkSubnetIds,omitempty" tf:"virtual_network_subnet_ids,omitempty"` @@ -114,7 +114,7 @@ type IntegrationServiceEnvironmentParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // A list of virtual network subnet ids to be used by Integration Service Environment. Exactly four distinct ids to /27 subnets must be provided. Changing this forces a new Integration Service Environment to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("id",true) // +kubebuilder:validation:Optional // +listType=set diff --git a/apis/logic/v1beta2/zz_appintegrationaccountbatchconfiguration_terraformed.go b/apis/logic/v1beta2/zz_appintegrationaccountbatchconfiguration_terraformed.go new file mode 100755 index 000000000..1d47f92fb --- /dev/null +++ b/apis/logic/v1beta2/zz_appintegrationaccountbatchconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AppIntegrationAccountBatchConfiguration +func (mg *AppIntegrationAccountBatchConfiguration) GetTerraformResourceType() string { + return "azurerm_logic_app_integration_account_batch_configuration" +} + +// GetConnectionDetailsMapping for this AppIntegrationAccountBatchConfiguration +func (tr *AppIntegrationAccountBatchConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AppIntegrationAccountBatchConfiguration +func (tr *AppIntegrationAccountBatchConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AppIntegrationAccountBatchConfiguration +func (tr *AppIntegrationAccountBatchConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AppIntegrationAccountBatchConfiguration +func (tr *AppIntegrationAccountBatchConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AppIntegrationAccountBatchConfiguration +func (tr *AppIntegrationAccountBatchConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AppIntegrationAccountBatchConfiguration +func (tr *AppIntegrationAccountBatchConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AppIntegrationAccountBatchConfiguration +func (tr *AppIntegrationAccountBatchConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AppIntegrationAccountBatchConfiguration +func (tr *AppIntegrationAccountBatchConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AppIntegrationAccountBatchConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AppIntegrationAccountBatchConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &AppIntegrationAccountBatchConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AppIntegrationAccountBatchConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/logic/v1beta2/zz_appintegrationaccountbatchconfiguration_types.go b/apis/logic/v1beta2/zz_appintegrationaccountbatchconfiguration_types.go new file mode 100755 index 000000000..152da08b5 --- /dev/null +++ b/apis/logic/v1beta2/zz_appintegrationaccountbatchconfiguration_types.go @@ -0,0 +1,398 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AppIntegrationAccountBatchConfigurationInitParameters struct { + + // The batch group name of the Logic App Integration Batch Configuration. Changing this forces a new resource to be created. + BatchGroupName *string `json:"batchGroupName,omitempty" tf:"batch_group_name,omitempty"` + + // The name of the Logic App Integration Account. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta1.AppIntegrationAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + IntegrationAccountName *string `json:"integrationAccountName,omitempty" tf:"integration_account_name,omitempty"` + + // Reference to a AppIntegrationAccount in logic to populate integrationAccountName. + // +kubebuilder:validation:Optional + IntegrationAccountNameRef *v1.Reference `json:"integrationAccountNameRef,omitempty" tf:"-"` + + // Selector for a AppIntegrationAccount in logic to populate integrationAccountName. + // +kubebuilder:validation:Optional + IntegrationAccountNameSelector *v1.Selector `json:"integrationAccountNameSelector,omitempty" tf:"-"` + + // A JSON mapping of any Metadata for this Logic App Integration Account Batch Configuration. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name which should be used for this Logic App Integration Account Batch Configuration. Only Alphanumeric characters allowed. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A release_criteria block as documented below, which is used to select the criteria to meet before processing each batch. + ReleaseCriteria *ReleaseCriteriaInitParameters `json:"releaseCriteria,omitempty" tf:"release_criteria,omitempty"` + + // The name of the Resource Group where the Logic App Integration Account Batch Configuration should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` +} + +type AppIntegrationAccountBatchConfigurationObservation struct { + + // The batch group name of the Logic App Integration Batch Configuration. Changing this forces a new resource to be created. + BatchGroupName *string `json:"batchGroupName,omitempty" tf:"batch_group_name,omitempty"` + + // The ID of the Logic App Integration Account Batch Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Logic App Integration Account. Changing this forces a new resource to be created. + IntegrationAccountName *string `json:"integrationAccountName,omitempty" tf:"integration_account_name,omitempty"` + + // A JSON mapping of any Metadata for this Logic App Integration Account Batch Configuration. + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name which should be used for this Logic App Integration Account Batch Configuration. Only Alphanumeric characters allowed. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A release_criteria block as documented below, which is used to select the criteria to meet before processing each batch. + ReleaseCriteria *ReleaseCriteriaObservation `json:"releaseCriteria,omitempty" tf:"release_criteria,omitempty"` + + // The name of the Resource Group where the Logic App Integration Account Batch Configuration should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` +} + +type AppIntegrationAccountBatchConfigurationParameters struct { + + // The batch group name of the Logic App Integration Batch Configuration. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + BatchGroupName *string `json:"batchGroupName,omitempty" tf:"batch_group_name,omitempty"` + + // The name of the Logic App Integration Account. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta1.AppIntegrationAccount + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + IntegrationAccountName *string `json:"integrationAccountName,omitempty" tf:"integration_account_name,omitempty"` + + // Reference to a AppIntegrationAccount in logic to populate integrationAccountName. + // +kubebuilder:validation:Optional + IntegrationAccountNameRef *v1.Reference `json:"integrationAccountNameRef,omitempty" tf:"-"` + + // Selector for a AppIntegrationAccount in logic to populate integrationAccountName. + // +kubebuilder:validation:Optional + IntegrationAccountNameSelector *v1.Selector `json:"integrationAccountNameSelector,omitempty" tf:"-"` + + // A JSON mapping of any Metadata for this Logic App Integration Account Batch Configuration. + // +kubebuilder:validation:Optional + // +mapType=granular + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name which should be used for this Logic App Integration Account Batch Configuration. Only Alphanumeric characters allowed. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A release_criteria block as documented below, which is used to select the criteria to meet before processing each batch. + // +kubebuilder:validation:Optional + ReleaseCriteria *ReleaseCriteriaParameters `json:"releaseCriteria,omitempty" tf:"release_criteria,omitempty"` + + // The name of the Resource Group where the Logic App Integration Account Batch Configuration should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` +} + +type MonthlyInitParameters struct { + + // The occurrence of the week within the month. + Week *float64 `json:"week,omitempty" tf:"week,omitempty"` + + // The day of the occurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + Weekday *string `json:"weekday,omitempty" tf:"weekday,omitempty"` +} + +type MonthlyObservation struct { + + // The occurrence of the week within the month. + Week *float64 `json:"week,omitempty" tf:"week,omitempty"` + + // The day of the occurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + Weekday *string `json:"weekday,omitempty" tf:"weekday,omitempty"` +} + +type MonthlyParameters struct { + + // The occurrence of the week within the month. + // +kubebuilder:validation:Optional + Week *float64 `json:"week" tf:"week,omitempty"` + + // The day of the occurrence. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + // +kubebuilder:validation:Optional + Weekday *string `json:"weekday" tf:"weekday,omitempty"` +} + +type RecurrenceInitParameters struct { + + // The end time of the schedule, formatted as an RFC3339 string. + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The frequency of the schedule. Possible values are Day, Hour, Minute, Month, NotSpecified, Second, Week and Year. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The number of frequencys between runs. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // A schedule block as documented below. + Schedule *ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The start time of the schedule, formatted as an RFC3339 string. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start/end time. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type RecurrenceObservation struct { + + // The end time of the schedule, formatted as an RFC3339 string. + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The frequency of the schedule. Possible values are Day, Hour, Minute, Month, NotSpecified, Second, Week and Year. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The number of frequencys between runs. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // A schedule block as documented below. + Schedule *ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The start time of the schedule, formatted as an RFC3339 string. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start/end time. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type RecurrenceParameters struct { + + // The end time of the schedule, formatted as an RFC3339 string. + // +kubebuilder:validation:Optional + EndTime *string `json:"endTime,omitempty" tf:"end_time,omitempty"` + + // The frequency of the schedule. Possible values are Day, Hour, Minute, Month, NotSpecified, Second, Week and Year. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency" tf:"frequency,omitempty"` + + // The number of frequencys between runs. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // A schedule block as documented below. + // +kubebuilder:validation:Optional + Schedule *ScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // The start time of the schedule, formatted as an RFC3339 string. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // The timezone of the start/end time. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type ReleaseCriteriaInitParameters struct { + + // The batch size in bytes for the Logic App Integration Batch Configuration. + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // The message count for the Logic App Integration Batch Configuration. + MessageCount *float64 `json:"messageCount,omitempty" tf:"message_count,omitempty"` + + // A recurrence block as documented below. + Recurrence *RecurrenceInitParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type ReleaseCriteriaObservation struct { + + // The batch size in bytes for the Logic App Integration Batch Configuration. + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // The message count for the Logic App Integration Batch Configuration. + MessageCount *float64 `json:"messageCount,omitempty" tf:"message_count,omitempty"` + + // A recurrence block as documented below. + Recurrence *RecurrenceObservation `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type ReleaseCriteriaParameters struct { + + // The batch size in bytes for the Logic App Integration Batch Configuration. + // +kubebuilder:validation:Optional + BatchSize *float64 `json:"batchSize,omitempty" tf:"batch_size,omitempty"` + + // The message count for the Logic App Integration Batch Configuration. + // +kubebuilder:validation:Optional + MessageCount *float64 `json:"messageCount,omitempty" tf:"message_count,omitempty"` + + // A recurrence block as documented below. + // +kubebuilder:validation:Optional + Recurrence *RecurrenceParameters `json:"recurrence,omitempty" tf:"recurrence,omitempty"` +} + +type ScheduleInitParameters struct { + + // A list containing a single item, which specifies the Hour interval at which this recurrence should be triggered. + // +listType=set + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // A list containing a single item which specifies the Minute interval at which this recurrence should be triggered. + // +listType=set + Minutes []*float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // A list of days of the month that the job should execute on. + // +listType=set + MonthDays []*float64 `json:"monthDays,omitempty" tf:"month_days,omitempty"` + + // A monthly block as documented below. + Monthly []MonthlyInitParameters `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // A list of days of the week that the job should execute on. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + // +listType=set + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +type ScheduleObservation struct { + + // A list containing a single item, which specifies the Hour interval at which this recurrence should be triggered. + // +listType=set + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // A list containing a single item which specifies the Minute interval at which this recurrence should be triggered. + // +listType=set + Minutes []*float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // A list of days of the month that the job should execute on. + // +listType=set + MonthDays []*float64 `json:"monthDays,omitempty" tf:"month_days,omitempty"` + + // A monthly block as documented below. + Monthly []MonthlyObservation `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // A list of days of the week that the job should execute on. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + // +listType=set + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +type ScheduleParameters struct { + + // A list containing a single item, which specifies the Hour interval at which this recurrence should be triggered. + // +kubebuilder:validation:Optional + // +listType=set + Hours []*float64 `json:"hours,omitempty" tf:"hours,omitempty"` + + // A list containing a single item which specifies the Minute interval at which this recurrence should be triggered. + // +kubebuilder:validation:Optional + // +listType=set + Minutes []*float64 `json:"minutes,omitempty" tf:"minutes,omitempty"` + + // A list of days of the month that the job should execute on. + // +kubebuilder:validation:Optional + // +listType=set + MonthDays []*float64 `json:"monthDays,omitempty" tf:"month_days,omitempty"` + + // A monthly block as documented below. + // +kubebuilder:validation:Optional + Monthly []MonthlyParameters `json:"monthly,omitempty" tf:"monthly,omitempty"` + + // A list of days of the week that the job should execute on. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + // +kubebuilder:validation:Optional + // +listType=set + WeekDays []*string `json:"weekDays,omitempty" tf:"week_days,omitempty"` +} + +// AppIntegrationAccountBatchConfigurationSpec defines the desired state of AppIntegrationAccountBatchConfiguration +type AppIntegrationAccountBatchConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppIntegrationAccountBatchConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppIntegrationAccountBatchConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// AppIntegrationAccountBatchConfigurationStatus defines the observed state of AppIntegrationAccountBatchConfiguration. +type AppIntegrationAccountBatchConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppIntegrationAccountBatchConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AppIntegrationAccountBatchConfiguration is the Schema for the AppIntegrationAccountBatchConfigurations API. Manages a Logic App Integration Account Batch Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type AppIntegrationAccountBatchConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.batchGroupName) || (has(self.initProvider) && has(self.initProvider.batchGroupName))",message="spec.forProvider.batchGroupName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.releaseCriteria) || (has(self.initProvider) && has(self.initProvider.releaseCriteria))",message="spec.forProvider.releaseCriteria is a required parameter" + Spec AppIntegrationAccountBatchConfigurationSpec `json:"spec"` + Status AppIntegrationAccountBatchConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppIntegrationAccountBatchConfigurationList contains a list of AppIntegrationAccountBatchConfigurations +type AppIntegrationAccountBatchConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AppIntegrationAccountBatchConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + AppIntegrationAccountBatchConfiguration_Kind = "AppIntegrationAccountBatchConfiguration" + AppIntegrationAccountBatchConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AppIntegrationAccountBatchConfiguration_Kind}.String() + AppIntegrationAccountBatchConfiguration_KindAPIVersion = AppIntegrationAccountBatchConfiguration_Kind + "." + CRDGroupVersion.String() + AppIntegrationAccountBatchConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(AppIntegrationAccountBatchConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&AppIntegrationAccountBatchConfiguration{}, &AppIntegrationAccountBatchConfigurationList{}) +} diff --git a/apis/logic/v1beta2/zz_apptriggerrecurrence_terraformed.go b/apis/logic/v1beta2/zz_apptriggerrecurrence_terraformed.go new file mode 100755 index 000000000..7679a8b37 --- /dev/null +++ b/apis/logic/v1beta2/zz_apptriggerrecurrence_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AppTriggerRecurrence +func (mg *AppTriggerRecurrence) GetTerraformResourceType() string { + return "azurerm_logic_app_trigger_recurrence" +} + +// GetConnectionDetailsMapping for this AppTriggerRecurrence +func (tr *AppTriggerRecurrence) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AppTriggerRecurrence +func (tr *AppTriggerRecurrence) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AppTriggerRecurrence +func (tr *AppTriggerRecurrence) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AppTriggerRecurrence +func (tr *AppTriggerRecurrence) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AppTriggerRecurrence +func (tr *AppTriggerRecurrence) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AppTriggerRecurrence +func (tr *AppTriggerRecurrence) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AppTriggerRecurrence +func (tr *AppTriggerRecurrence) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AppTriggerRecurrence +func (tr *AppTriggerRecurrence) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AppTriggerRecurrence using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AppTriggerRecurrence) LateInitialize(attrs []byte) (bool, error) { + params := &AppTriggerRecurrenceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AppTriggerRecurrence) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/logic/v1beta2/zz_apptriggerrecurrence_types.go b/apis/logic/v1beta2/zz_apptriggerrecurrence_types.go new file mode 100755 index 000000000..891cfc2dd --- /dev/null +++ b/apis/logic/v1beta2/zz_apptriggerrecurrence_types.go @@ -0,0 +1,203 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AppTriggerRecurrenceInitParameters struct { + + // Specifies the Frequency at which this Trigger should be run. Possible values include Month, Week, Day, Hour, Minute and Second. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies interval used for the Frequency, for example a value of 4 for interval and hour for frequency would run the Trigger every 4 hours. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // A schedule block as specified below. + Schedule *AppTriggerRecurrenceScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Specifies the start date and time for this trigger in RFC3339 format: 2000-01-02T03:04:05Z. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Specifies the time zone for this trigger. Supported time zone options are listed here + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type AppTriggerRecurrenceObservation struct { + + // Specifies the Frequency at which this Trigger should be run. Possible values include Month, Week, Day, Hour, Minute and Second. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The ID of the Recurrence Trigger within the Logic App Workflow. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies interval used for the Frequency, for example a value of 4 for interval and hour for frequency would run the Trigger every 4 hours. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // Specifies the ID of the Logic App Workflow. Changing this forces a new resource to be created. + LogicAppID *string `json:"logicAppId,omitempty" tf:"logic_app_id,omitempty"` + + // A schedule block as specified below. + Schedule *AppTriggerRecurrenceScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Specifies the start date and time for this trigger in RFC3339 format: 2000-01-02T03:04:05Z. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Specifies the time zone for this trigger. Supported time zone options are listed here + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type AppTriggerRecurrenceParameters struct { + + // Specifies the Frequency at which this Trigger should be run. Possible values include Month, Week, Day, Hour, Minute and Second. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Specifies interval used for the Frequency, for example a value of 4 for interval and hour for frequency would run the Trigger every 4 hours. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // Specifies the ID of the Logic App Workflow. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logic/v1beta2.AppWorkflow + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + LogicAppID *string `json:"logicAppId,omitempty" tf:"logic_app_id,omitempty"` + + // Reference to a AppWorkflow in logic to populate logicAppId. + // +kubebuilder:validation:Optional + LogicAppIDRef *v1.Reference `json:"logicAppIdRef,omitempty" tf:"-"` + + // Selector for a AppWorkflow in logic to populate logicAppId. + // +kubebuilder:validation:Optional + LogicAppIDSelector *v1.Selector `json:"logicAppIdSelector,omitempty" tf:"-"` + + // A schedule block as specified below. + // +kubebuilder:validation:Optional + Schedule *AppTriggerRecurrenceScheduleParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // Specifies the start date and time for this trigger in RFC3339 format: 2000-01-02T03:04:05Z. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Specifies the time zone for this trigger. Supported time zone options are listed here + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type AppTriggerRecurrenceScheduleInitParameters struct { + + // Specifies a list of hours when the trigger should run. Valid values are between 0 and 23. + // +listType=set + AtTheseHours []*float64 `json:"atTheseHours,omitempty" tf:"at_these_hours,omitempty"` + + // Specifies a list of minutes when the trigger should run. Valid values are between 0 and 59. + // +listType=set + AtTheseMinutes []*float64 `json:"atTheseMinutes,omitempty" tf:"at_these_minutes,omitempty"` + + // Specifies a list of days when the trigger should run. Valid values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, and Sunday. + // +listType=set + OnTheseDays []*string `json:"onTheseDays,omitempty" tf:"on_these_days,omitempty"` +} + +type AppTriggerRecurrenceScheduleObservation struct { + + // Specifies a list of hours when the trigger should run. Valid values are between 0 and 23. + // +listType=set + AtTheseHours []*float64 `json:"atTheseHours,omitempty" tf:"at_these_hours,omitempty"` + + // Specifies a list of minutes when the trigger should run. Valid values are between 0 and 59. + // +listType=set + AtTheseMinutes []*float64 `json:"atTheseMinutes,omitempty" tf:"at_these_minutes,omitempty"` + + // Specifies a list of days when the trigger should run. Valid values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, and Sunday. + // +listType=set + OnTheseDays []*string `json:"onTheseDays,omitempty" tf:"on_these_days,omitempty"` +} + +type AppTriggerRecurrenceScheduleParameters struct { + + // Specifies a list of hours when the trigger should run. Valid values are between 0 and 23. + // +kubebuilder:validation:Optional + // +listType=set + AtTheseHours []*float64 `json:"atTheseHours,omitempty" tf:"at_these_hours,omitempty"` + + // Specifies a list of minutes when the trigger should run. Valid values are between 0 and 59. + // +kubebuilder:validation:Optional + // +listType=set + AtTheseMinutes []*float64 `json:"atTheseMinutes,omitempty" tf:"at_these_minutes,omitempty"` + + // Specifies a list of days when the trigger should run. Valid values include Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, and Sunday. + // +kubebuilder:validation:Optional + // +listType=set + OnTheseDays []*string `json:"onTheseDays,omitempty" tf:"on_these_days,omitempty"` +} + +// AppTriggerRecurrenceSpec defines the desired state of AppTriggerRecurrence +type AppTriggerRecurrenceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppTriggerRecurrenceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppTriggerRecurrenceInitParameters `json:"initProvider,omitempty"` +} + +// AppTriggerRecurrenceStatus defines the observed state of AppTriggerRecurrence. +type AppTriggerRecurrenceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppTriggerRecurrenceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AppTriggerRecurrence is the Schema for the AppTriggerRecurrences API. Manages a Recurrence Trigger within a Logic App Workflow +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type AppTriggerRecurrence struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.frequency) || (has(self.initProvider) && has(self.initProvider.frequency))",message="spec.forProvider.frequency is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.interval) || (has(self.initProvider) && has(self.initProvider.interval))",message="spec.forProvider.interval is a required parameter" + Spec AppTriggerRecurrenceSpec `json:"spec"` + Status AppTriggerRecurrenceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppTriggerRecurrenceList contains a list of AppTriggerRecurrences +type AppTriggerRecurrenceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AppTriggerRecurrence `json:"items"` +} + +// Repository type metadata. +var ( + AppTriggerRecurrence_Kind = "AppTriggerRecurrence" + AppTriggerRecurrence_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AppTriggerRecurrence_Kind}.String() + AppTriggerRecurrence_KindAPIVersion = AppTriggerRecurrence_Kind + "." + CRDGroupVersion.String() + AppTriggerRecurrence_GroupVersionKind = CRDGroupVersion.WithKind(AppTriggerRecurrence_Kind) +) + +func init() { + SchemeBuilder.Register(&AppTriggerRecurrence{}, &AppTriggerRecurrenceList{}) +} diff --git a/apis/logic/v1beta2/zz_appworkflow_terraformed.go b/apis/logic/v1beta2/zz_appworkflow_terraformed.go new file mode 100755 index 000000000..3a3d27122 --- /dev/null +++ b/apis/logic/v1beta2/zz_appworkflow_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AppWorkflow +func (mg *AppWorkflow) GetTerraformResourceType() string { + return "azurerm_logic_app_workflow" +} + +// GetConnectionDetailsMapping for this AppWorkflow +func (tr *AppWorkflow) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AppWorkflow +func (tr *AppWorkflow) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AppWorkflow +func (tr *AppWorkflow) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AppWorkflow +func (tr *AppWorkflow) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AppWorkflow +func (tr *AppWorkflow) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AppWorkflow +func (tr *AppWorkflow) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AppWorkflow +func (tr *AppWorkflow) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AppWorkflow +func (tr *AppWorkflow) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AppWorkflow using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AppWorkflow) LateInitialize(attrs []byte) (bool, error) { + params := &AppWorkflowParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AppWorkflow) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/logic/v1beta2/zz_appworkflow_types.go b/apis/logic/v1beta2/zz_appworkflow_types.go new file mode 100755 index 000000000..41e250583 --- /dev/null +++ b/apis/logic/v1beta2/zz_appworkflow_types.go @@ -0,0 +1,480 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessControlInitParameters struct { + + // A action block as defined below. + Action *ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A content block as defined below. + Content *ContentInitParameters `json:"content,omitempty" tf:"content,omitempty"` + + // A trigger block as defined below. + Trigger *TriggerInitParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` + + // A workflow_management block as defined below. + WorkflowManagement *WorkflowManagementInitParameters `json:"workflowManagement,omitempty" tf:"workflow_management,omitempty"` +} + +type AccessControlObservation struct { + + // A action block as defined below. + Action *ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // A content block as defined below. + Content *ContentObservation `json:"content,omitempty" tf:"content,omitempty"` + + // A trigger block as defined below. + Trigger *TriggerObservation `json:"trigger,omitempty" tf:"trigger,omitempty"` + + // A workflow_management block as defined below. + WorkflowManagement *WorkflowManagementObservation `json:"workflowManagement,omitempty" tf:"workflow_management,omitempty"` +} + +type AccessControlParameters struct { + + // A action block as defined below. + // +kubebuilder:validation:Optional + Action *ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A content block as defined below. + // +kubebuilder:validation:Optional + Content *ContentParameters `json:"content,omitempty" tf:"content,omitempty"` + + // A trigger block as defined below. + // +kubebuilder:validation:Optional + Trigger *TriggerParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` + + // A workflow_management block as defined below. + // +kubebuilder:validation:Optional + WorkflowManagement *WorkflowManagementParameters `json:"workflowManagement,omitempty" tf:"workflow_management,omitempty"` +} + +type ActionInitParameters struct { + + // A list of the allowed caller IP address ranges. + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange,omitempty" tf:"allowed_caller_ip_address_range,omitempty"` +} + +type ActionObservation struct { + + // A list of the allowed caller IP address ranges. + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange,omitempty" tf:"allowed_caller_ip_address_range,omitempty"` +} + +type ActionParameters struct { + + // A list of the allowed caller IP address ranges. + // +kubebuilder:validation:Optional + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange" tf:"allowed_caller_ip_address_range,omitempty"` +} + +type AppWorkflowInitParameters struct { + + // A access_control block as defined below. + AccessControl *AccessControlInitParameters `json:"accessControl,omitempty" tf:"access_control,omitempty"` + + // Is the Logic App Workflow enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The ID of the Integration Service Environment to which this Logic App Workflow belongs. Changing this forces a new Logic App Workflow to be created. + IntegrationServiceEnvironmentID *string `json:"integrationServiceEnvironmentId,omitempty" tf:"integration_service_environment_id,omitempty"` + + // Specifies the supported Azure location where the Logic App Workflow exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the integration account linked by this Logic App Workflow. + LogicAppIntegrationAccountID *string `json:"logicAppIntegrationAccountId,omitempty" tf:"logic_app_integration_account_id,omitempty"` + + // A map of Key-Value pairs. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a map of Key-Value pairs of the Parameter Definitions to use for this Logic App Workflow. The key is the parameter name, and the value is a JSON encoded string of the parameter definition (see: https://docs.microsoft.com/azure/logic-apps/logic-apps-workflow-definition-language#parameters). + // +mapType=granular + WorkflowParameters map[string]*string `json:"workflowParameters,omitempty" tf:"workflow_parameters,omitempty"` + + // Specifies the Schema to use for this Logic App Workflow. Defaults to https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#. Changing this forces a new resource to be created. + WorkflowSchema *string `json:"workflowSchema,omitempty" tf:"workflow_schema,omitempty"` + + // Specifies the version of the Schema used for this Logic App Workflow. Defaults to 1.0.0.0. Changing this forces a new resource to be created. + WorkflowVersion *string `json:"workflowVersion,omitempty" tf:"workflow_version,omitempty"` +} + +type AppWorkflowObservation struct { + + // A access_control block as defined below. + AccessControl *AccessControlObservation `json:"accessControl,omitempty" tf:"access_control,omitempty"` + + // The Access Endpoint for the Logic App Workflow. + AccessEndpoint *string `json:"accessEndpoint,omitempty" tf:"access_endpoint,omitempty"` + + // The list of access endpoint IP addresses of connector. + ConnectorEndpointIPAddresses []*string `json:"connectorEndpointIpAddresses,omitempty" tf:"connector_endpoint_ip_addresses,omitempty"` + + // The list of outgoing IP addresses of connector. + ConnectorOutboundIPAddresses []*string `json:"connectorOutboundIpAddresses,omitempty" tf:"connector_outbound_ip_addresses,omitempty"` + + // Is the Logic App Workflow enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The Logic App Workflow ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The ID of the Integration Service Environment to which this Logic App Workflow belongs. Changing this forces a new Logic App Workflow to be created. + IntegrationServiceEnvironmentID *string `json:"integrationServiceEnvironmentId,omitempty" tf:"integration_service_environment_id,omitempty"` + + // Specifies the supported Azure location where the Logic App Workflow exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the integration account linked by this Logic App Workflow. + LogicAppIntegrationAccountID *string `json:"logicAppIntegrationAccountId,omitempty" tf:"logic_app_integration_account_id,omitempty"` + + // A map of Key-Value pairs. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The name of the Resource Group in which the Logic App Workflow should be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The list of access endpoint IP addresses of workflow. + WorkflowEndpointIPAddresses []*string `json:"workflowEndpointIpAddresses,omitempty" tf:"workflow_endpoint_ip_addresses,omitempty"` + + // The list of outgoing IP addresses of workflow. + WorkflowOutboundIPAddresses []*string `json:"workflowOutboundIpAddresses,omitempty" tf:"workflow_outbound_ip_addresses,omitempty"` + + // Specifies a map of Key-Value pairs of the Parameter Definitions to use for this Logic App Workflow. The key is the parameter name, and the value is a JSON encoded string of the parameter definition (see: https://docs.microsoft.com/azure/logic-apps/logic-apps-workflow-definition-language#parameters). + // +mapType=granular + WorkflowParameters map[string]*string `json:"workflowParameters,omitempty" tf:"workflow_parameters,omitempty"` + + // Specifies the Schema to use for this Logic App Workflow. Defaults to https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#. Changing this forces a new resource to be created. + WorkflowSchema *string `json:"workflowSchema,omitempty" tf:"workflow_schema,omitempty"` + + // Specifies the version of the Schema used for this Logic App Workflow. Defaults to 1.0.0.0. Changing this forces a new resource to be created. + WorkflowVersion *string `json:"workflowVersion,omitempty" tf:"workflow_version,omitempty"` +} + +type AppWorkflowParameters struct { + + // A access_control block as defined below. + // +kubebuilder:validation:Optional + AccessControl *AccessControlParameters `json:"accessControl,omitempty" tf:"access_control,omitempty"` + + // Is the Logic App Workflow enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The ID of the Integration Service Environment to which this Logic App Workflow belongs. Changing this forces a new Logic App Workflow to be created. + // +kubebuilder:validation:Optional + IntegrationServiceEnvironmentID *string `json:"integrationServiceEnvironmentId,omitempty" tf:"integration_service_environment_id,omitempty"` + + // Specifies the supported Azure location where the Logic App Workflow exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the integration account linked by this Logic App Workflow. + // +kubebuilder:validation:Optional + LogicAppIntegrationAccountID *string `json:"logicAppIntegrationAccountId,omitempty" tf:"logic_app_integration_account_id,omitempty"` + + // A map of Key-Value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The name of the Resource Group in which the Logic App Workflow should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a map of Key-Value pairs of the Parameter Definitions to use for this Logic App Workflow. The key is the parameter name, and the value is a JSON encoded string of the parameter definition (see: https://docs.microsoft.com/azure/logic-apps/logic-apps-workflow-definition-language#parameters). + // +kubebuilder:validation:Optional + // +mapType=granular + WorkflowParameters map[string]*string `json:"workflowParameters,omitempty" tf:"workflow_parameters,omitempty"` + + // Specifies the Schema to use for this Logic App Workflow. Defaults to https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + WorkflowSchema *string `json:"workflowSchema,omitempty" tf:"workflow_schema,omitempty"` + + // Specifies the version of the Schema used for this Logic App Workflow. Defaults to 1.0.0.0. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + WorkflowVersion *string `json:"workflowVersion,omitempty" tf:"workflow_version,omitempty"` +} + +type ClaimInitParameters struct { + + // The OAuth policy name for the Logic App Workflow. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the OAuth policy claim for the Logic App Workflow. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ClaimObservation struct { + + // The OAuth policy name for the Logic App Workflow. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the OAuth policy claim for the Logic App Workflow. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ClaimParameters struct { + + // The OAuth policy name for the Logic App Workflow. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the OAuth policy claim for the Logic App Workflow. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ContentInitParameters struct { + + // A list of the allowed caller IP address ranges. + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange,omitempty" tf:"allowed_caller_ip_address_range,omitempty"` +} + +type ContentObservation struct { + + // A list of the allowed caller IP address ranges. + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange,omitempty" tf:"allowed_caller_ip_address_range,omitempty"` +} + +type ContentParameters struct { + + // A list of the allowed caller IP address ranges. + // +kubebuilder:validation:Optional + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange" tf:"allowed_caller_ip_address_range,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Logic App Workflow. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Logic App Workflow. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Logic App Workflow. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this Logic App Workflow. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this Logic App Workflow. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Logic App Workflow. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Logic App Workflow. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Logic App Workflow. Possible values are SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type OpenAuthenticationPolicyInitParameters struct { + + // A claim block as defined below. + Claim []ClaimInitParameters `json:"claim,omitempty" tf:"claim,omitempty"` + + // The OAuth policy name for the Logic App Workflow. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type OpenAuthenticationPolicyObservation struct { + + // A claim block as defined below. + Claim []ClaimObservation `json:"claim,omitempty" tf:"claim,omitempty"` + + // The OAuth policy name for the Logic App Workflow. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type OpenAuthenticationPolicyParameters struct { + + // A claim block as defined below. + // +kubebuilder:validation:Optional + Claim []ClaimParameters `json:"claim" tf:"claim,omitempty"` + + // The OAuth policy name for the Logic App Workflow. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type TriggerInitParameters struct { + + // A list of the allowed caller IP address ranges. + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange,omitempty" tf:"allowed_caller_ip_address_range,omitempty"` + + // A open_authentication_policy block as defined below. + OpenAuthenticationPolicy []OpenAuthenticationPolicyInitParameters `json:"openAuthenticationPolicy,omitempty" tf:"open_authentication_policy,omitempty"` +} + +type TriggerObservation struct { + + // A list of the allowed caller IP address ranges. + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange,omitempty" tf:"allowed_caller_ip_address_range,omitempty"` + + // A open_authentication_policy block as defined below. + OpenAuthenticationPolicy []OpenAuthenticationPolicyObservation `json:"openAuthenticationPolicy,omitempty" tf:"open_authentication_policy,omitempty"` +} + +type TriggerParameters struct { + + // A list of the allowed caller IP address ranges. + // +kubebuilder:validation:Optional + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange" tf:"allowed_caller_ip_address_range,omitempty"` + + // A open_authentication_policy block as defined below. + // +kubebuilder:validation:Optional + OpenAuthenticationPolicy []OpenAuthenticationPolicyParameters `json:"openAuthenticationPolicy,omitempty" tf:"open_authentication_policy,omitempty"` +} + +type WorkflowManagementInitParameters struct { + + // A list of the allowed caller IP address ranges. + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange,omitempty" tf:"allowed_caller_ip_address_range,omitempty"` +} + +type WorkflowManagementObservation struct { + + // A list of the allowed caller IP address ranges. + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange,omitempty" tf:"allowed_caller_ip_address_range,omitempty"` +} + +type WorkflowManagementParameters struct { + + // A list of the allowed caller IP address ranges. + // +kubebuilder:validation:Optional + // +listType=set + AllowedCallerIPAddressRange []*string `json:"allowedCallerIpAddressRange" tf:"allowed_caller_ip_address_range,omitempty"` +} + +// AppWorkflowSpec defines the desired state of AppWorkflow +type AppWorkflowSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppWorkflowParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppWorkflowInitParameters `json:"initProvider,omitempty"` +} + +// AppWorkflowStatus defines the observed state of AppWorkflow. +type AppWorkflowStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppWorkflowObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AppWorkflow is the Schema for the AppWorkflows API. Manages a Logic App Workflow. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type AppWorkflow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec AppWorkflowSpec `json:"spec"` + Status AppWorkflowStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppWorkflowList contains a list of AppWorkflows +type AppWorkflowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AppWorkflow `json:"items"` +} + +// Repository type metadata. +var ( + AppWorkflow_Kind = "AppWorkflow" + AppWorkflow_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AppWorkflow_Kind}.String() + AppWorkflow_KindAPIVersion = AppWorkflow_Kind + "." + CRDGroupVersion.String() + AppWorkflow_GroupVersionKind = CRDGroupVersion.WithKind(AppWorkflow_Kind) +) + +func init() { + SchemeBuilder.Register(&AppWorkflow{}, &AppWorkflowList{}) +} diff --git a/apis/logic/v1beta2/zz_generated.conversion_hubs.go b/apis/logic/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..9b475e89d --- /dev/null +++ b/apis/logic/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *AppIntegrationAccountBatchConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *AppTriggerRecurrence) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *AppWorkflow) Hub() {} diff --git a/apis/logic/v1beta2/zz_generated.deepcopy.go b/apis/logic/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..de5cf7f3e --- /dev/null +++ b/apis/logic/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2376 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlInitParameters) DeepCopyInto(out *AccessControlInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(ContentInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkflowManagement != nil { + in, out := &in.WorkflowManagement, &out.WorkflowManagement + *out = new(WorkflowManagementInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlInitParameters. +func (in *AccessControlInitParameters) DeepCopy() *AccessControlInitParameters { + if in == nil { + return nil + } + out := new(AccessControlInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlObservation) DeepCopyInto(out *AccessControlObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(ContentObservation) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkflowManagement != nil { + in, out := &in.WorkflowManagement, &out.WorkflowManagement + *out = new(WorkflowManagementObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlObservation. +func (in *AccessControlObservation) DeepCopy() *AccessControlObservation { + if in == nil { + return nil + } + out := new(AccessControlObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlParameters) DeepCopyInto(out *AccessControlParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(ContentParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkflowManagement != nil { + in, out := &in.WorkflowManagement, &out.WorkflowManagement + *out = new(WorkflowManagementParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlParameters. +func (in *AccessControlParameters) DeepCopy() *AccessControlParameters { + if in == nil { + return nil + } + out := new(AccessControlParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppIntegrationAccountBatchConfiguration) DeepCopyInto(out *AppIntegrationAccountBatchConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppIntegrationAccountBatchConfiguration. +func (in *AppIntegrationAccountBatchConfiguration) DeepCopy() *AppIntegrationAccountBatchConfiguration { + if in == nil { + return nil + } + out := new(AppIntegrationAccountBatchConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppIntegrationAccountBatchConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppIntegrationAccountBatchConfigurationInitParameters) DeepCopyInto(out *AppIntegrationAccountBatchConfigurationInitParameters) { + *out = *in + if in.BatchGroupName != nil { + in, out := &in.BatchGroupName, &out.BatchGroupName + *out = new(string) + **out = **in + } + if in.IntegrationAccountName != nil { + in, out := &in.IntegrationAccountName, &out.IntegrationAccountName + *out = new(string) + **out = **in + } + if in.IntegrationAccountNameRef != nil { + in, out := &in.IntegrationAccountNameRef, &out.IntegrationAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IntegrationAccountNameSelector != nil { + in, out := &in.IntegrationAccountNameSelector, &out.IntegrationAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReleaseCriteria != nil { + in, out := &in.ReleaseCriteria, &out.ReleaseCriteria + *out = new(ReleaseCriteriaInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppIntegrationAccountBatchConfigurationInitParameters. +func (in *AppIntegrationAccountBatchConfigurationInitParameters) DeepCopy() *AppIntegrationAccountBatchConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AppIntegrationAccountBatchConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppIntegrationAccountBatchConfigurationList) DeepCopyInto(out *AppIntegrationAccountBatchConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppIntegrationAccountBatchConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppIntegrationAccountBatchConfigurationList. +func (in *AppIntegrationAccountBatchConfigurationList) DeepCopy() *AppIntegrationAccountBatchConfigurationList { + if in == nil { + return nil + } + out := new(AppIntegrationAccountBatchConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppIntegrationAccountBatchConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppIntegrationAccountBatchConfigurationObservation) DeepCopyInto(out *AppIntegrationAccountBatchConfigurationObservation) { + *out = *in + if in.BatchGroupName != nil { + in, out := &in.BatchGroupName, &out.BatchGroupName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationAccountName != nil { + in, out := &in.IntegrationAccountName, &out.IntegrationAccountName + *out = new(string) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReleaseCriteria != nil { + in, out := &in.ReleaseCriteria, &out.ReleaseCriteria + *out = new(ReleaseCriteriaObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppIntegrationAccountBatchConfigurationObservation. +func (in *AppIntegrationAccountBatchConfigurationObservation) DeepCopy() *AppIntegrationAccountBatchConfigurationObservation { + if in == nil { + return nil + } + out := new(AppIntegrationAccountBatchConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppIntegrationAccountBatchConfigurationParameters) DeepCopyInto(out *AppIntegrationAccountBatchConfigurationParameters) { + *out = *in + if in.BatchGroupName != nil { + in, out := &in.BatchGroupName, &out.BatchGroupName + *out = new(string) + **out = **in + } + if in.IntegrationAccountName != nil { + in, out := &in.IntegrationAccountName, &out.IntegrationAccountName + *out = new(string) + **out = **in + } + if in.IntegrationAccountNameRef != nil { + in, out := &in.IntegrationAccountNameRef, &out.IntegrationAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IntegrationAccountNameSelector != nil { + in, out := &in.IntegrationAccountNameSelector, &out.IntegrationAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ReleaseCriteria != nil { + in, out := &in.ReleaseCriteria, &out.ReleaseCriteria + *out = new(ReleaseCriteriaParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppIntegrationAccountBatchConfigurationParameters. +func (in *AppIntegrationAccountBatchConfigurationParameters) DeepCopy() *AppIntegrationAccountBatchConfigurationParameters { + if in == nil { + return nil + } + out := new(AppIntegrationAccountBatchConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppIntegrationAccountBatchConfigurationSpec) DeepCopyInto(out *AppIntegrationAccountBatchConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppIntegrationAccountBatchConfigurationSpec. +func (in *AppIntegrationAccountBatchConfigurationSpec) DeepCopy() *AppIntegrationAccountBatchConfigurationSpec { + if in == nil { + return nil + } + out := new(AppIntegrationAccountBatchConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppIntegrationAccountBatchConfigurationStatus) DeepCopyInto(out *AppIntegrationAccountBatchConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppIntegrationAccountBatchConfigurationStatus. +func (in *AppIntegrationAccountBatchConfigurationStatus) DeepCopy() *AppIntegrationAccountBatchConfigurationStatus { + if in == nil { + return nil + } + out := new(AppIntegrationAccountBatchConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrence) DeepCopyInto(out *AppTriggerRecurrence) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrence. +func (in *AppTriggerRecurrence) DeepCopy() *AppTriggerRecurrence { + if in == nil { + return nil + } + out := new(AppTriggerRecurrence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppTriggerRecurrence) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceInitParameters) DeepCopyInto(out *AppTriggerRecurrenceInitParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(AppTriggerRecurrenceScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceInitParameters. +func (in *AppTriggerRecurrenceInitParameters) DeepCopy() *AppTriggerRecurrenceInitParameters { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceList) DeepCopyInto(out *AppTriggerRecurrenceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppTriggerRecurrence, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceList. +func (in *AppTriggerRecurrenceList) DeepCopy() *AppTriggerRecurrenceList { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppTriggerRecurrenceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceObservation) DeepCopyInto(out *AppTriggerRecurrenceObservation) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.LogicAppID != nil { + in, out := &in.LogicAppID, &out.LogicAppID + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(AppTriggerRecurrenceScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceObservation. +func (in *AppTriggerRecurrenceObservation) DeepCopy() *AppTriggerRecurrenceObservation { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceParameters) DeepCopyInto(out *AppTriggerRecurrenceParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.LogicAppID != nil { + in, out := &in.LogicAppID, &out.LogicAppID + *out = new(string) + **out = **in + } + if in.LogicAppIDRef != nil { + in, out := &in.LogicAppIDRef, &out.LogicAppIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogicAppIDSelector != nil { + in, out := &in.LogicAppIDSelector, &out.LogicAppIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(AppTriggerRecurrenceScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceParameters. +func (in *AppTriggerRecurrenceParameters) DeepCopy() *AppTriggerRecurrenceParameters { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceScheduleInitParameters) DeepCopyInto(out *AppTriggerRecurrenceScheduleInitParameters) { + *out = *in + if in.AtTheseHours != nil { + in, out := &in.AtTheseHours, &out.AtTheseHours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.AtTheseMinutes != nil { + in, out := &in.AtTheseMinutes, &out.AtTheseMinutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.OnTheseDays != nil { + in, out := &in.OnTheseDays, &out.OnTheseDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceScheduleInitParameters. +func (in *AppTriggerRecurrenceScheduleInitParameters) DeepCopy() *AppTriggerRecurrenceScheduleInitParameters { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceScheduleObservation) DeepCopyInto(out *AppTriggerRecurrenceScheduleObservation) { + *out = *in + if in.AtTheseHours != nil { + in, out := &in.AtTheseHours, &out.AtTheseHours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.AtTheseMinutes != nil { + in, out := &in.AtTheseMinutes, &out.AtTheseMinutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.OnTheseDays != nil { + in, out := &in.OnTheseDays, &out.OnTheseDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceScheduleObservation. +func (in *AppTriggerRecurrenceScheduleObservation) DeepCopy() *AppTriggerRecurrenceScheduleObservation { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceScheduleParameters) DeepCopyInto(out *AppTriggerRecurrenceScheduleParameters) { + *out = *in + if in.AtTheseHours != nil { + in, out := &in.AtTheseHours, &out.AtTheseHours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.AtTheseMinutes != nil { + in, out := &in.AtTheseMinutes, &out.AtTheseMinutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.OnTheseDays != nil { + in, out := &in.OnTheseDays, &out.OnTheseDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceScheduleParameters. +func (in *AppTriggerRecurrenceScheduleParameters) DeepCopy() *AppTriggerRecurrenceScheduleParameters { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceSpec) DeepCopyInto(out *AppTriggerRecurrenceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceSpec. +func (in *AppTriggerRecurrenceSpec) DeepCopy() *AppTriggerRecurrenceSpec { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppTriggerRecurrenceStatus) DeepCopyInto(out *AppTriggerRecurrenceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppTriggerRecurrenceStatus. +func (in *AppTriggerRecurrenceStatus) DeepCopy() *AppTriggerRecurrenceStatus { + if in == nil { + return nil + } + out := new(AppTriggerRecurrenceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppWorkflow) DeepCopyInto(out *AppWorkflow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppWorkflow. +func (in *AppWorkflow) DeepCopy() *AppWorkflow { + if in == nil { + return nil + } + out := new(AppWorkflow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppWorkflow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppWorkflowInitParameters) DeepCopyInto(out *AppWorkflowInitParameters) { + *out = *in + if in.AccessControl != nil { + in, out := &in.AccessControl, &out.AccessControl + *out = new(AccessControlInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IntegrationServiceEnvironmentID != nil { + in, out := &in.IntegrationServiceEnvironmentID, &out.IntegrationServiceEnvironmentID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicAppIntegrationAccountID != nil { + in, out := &in.LogicAppIntegrationAccountID, &out.LogicAppIntegrationAccountID + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkflowParameters != nil { + in, out := &in.WorkflowParameters, &out.WorkflowParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkflowSchema != nil { + in, out := &in.WorkflowSchema, &out.WorkflowSchema + *out = new(string) + **out = **in + } + if in.WorkflowVersion != nil { + in, out := &in.WorkflowVersion, &out.WorkflowVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppWorkflowInitParameters. +func (in *AppWorkflowInitParameters) DeepCopy() *AppWorkflowInitParameters { + if in == nil { + return nil + } + out := new(AppWorkflowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppWorkflowList) DeepCopyInto(out *AppWorkflowList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppWorkflow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppWorkflowList. +func (in *AppWorkflowList) DeepCopy() *AppWorkflowList { + if in == nil { + return nil + } + out := new(AppWorkflowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppWorkflowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppWorkflowObservation) DeepCopyInto(out *AppWorkflowObservation) { + *out = *in + if in.AccessControl != nil { + in, out := &in.AccessControl, &out.AccessControl + *out = new(AccessControlObservation) + (*in).DeepCopyInto(*out) + } + if in.AccessEndpoint != nil { + in, out := &in.AccessEndpoint, &out.AccessEndpoint + *out = new(string) + **out = **in + } + if in.ConnectorEndpointIPAddresses != nil { + in, out := &in.ConnectorEndpointIPAddresses, &out.ConnectorEndpointIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectorOutboundIPAddresses != nil { + in, out := &in.ConnectorOutboundIPAddresses, &out.ConnectorOutboundIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.IntegrationServiceEnvironmentID != nil { + in, out := &in.IntegrationServiceEnvironmentID, &out.IntegrationServiceEnvironmentID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicAppIntegrationAccountID != nil { + in, out := &in.LogicAppIntegrationAccountID, &out.LogicAppIntegrationAccountID + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkflowEndpointIPAddresses != nil { + in, out := &in.WorkflowEndpointIPAddresses, &out.WorkflowEndpointIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WorkflowOutboundIPAddresses != nil { + in, out := &in.WorkflowOutboundIPAddresses, &out.WorkflowOutboundIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WorkflowParameters != nil { + in, out := &in.WorkflowParameters, &out.WorkflowParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkflowSchema != nil { + in, out := &in.WorkflowSchema, &out.WorkflowSchema + *out = new(string) + **out = **in + } + if in.WorkflowVersion != nil { + in, out := &in.WorkflowVersion, &out.WorkflowVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppWorkflowObservation. +func (in *AppWorkflowObservation) DeepCopy() *AppWorkflowObservation { + if in == nil { + return nil + } + out := new(AppWorkflowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppWorkflowParameters) DeepCopyInto(out *AppWorkflowParameters) { + *out = *in + if in.AccessControl != nil { + in, out := &in.AccessControl, &out.AccessControl + *out = new(AccessControlParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.IntegrationServiceEnvironmentID != nil { + in, out := &in.IntegrationServiceEnvironmentID, &out.IntegrationServiceEnvironmentID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogicAppIntegrationAccountID != nil { + in, out := &in.LogicAppIntegrationAccountID, &out.LogicAppIntegrationAccountID + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkflowParameters != nil { + in, out := &in.WorkflowParameters, &out.WorkflowParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkflowSchema != nil { + in, out := &in.WorkflowSchema, &out.WorkflowSchema + *out = new(string) + **out = **in + } + if in.WorkflowVersion != nil { + in, out := &in.WorkflowVersion, &out.WorkflowVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppWorkflowParameters. +func (in *AppWorkflowParameters) DeepCopy() *AppWorkflowParameters { + if in == nil { + return nil + } + out := new(AppWorkflowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppWorkflowSpec) DeepCopyInto(out *AppWorkflowSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppWorkflowSpec. +func (in *AppWorkflowSpec) DeepCopy() *AppWorkflowSpec { + if in == nil { + return nil + } + out := new(AppWorkflowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppWorkflowStatus) DeepCopyInto(out *AppWorkflowStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppWorkflowStatus. +func (in *AppWorkflowStatus) DeepCopy() *AppWorkflowStatus { + if in == nil { + return nil + } + out := new(AppWorkflowStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimInitParameters) DeepCopyInto(out *ClaimInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimInitParameters. +func (in *ClaimInitParameters) DeepCopy() *ClaimInitParameters { + if in == nil { + return nil + } + out := new(ClaimInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimObservation) DeepCopyInto(out *ClaimObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimObservation. +func (in *ClaimObservation) DeepCopy() *ClaimObservation { + if in == nil { + return nil + } + out := new(ClaimObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimParameters) DeepCopyInto(out *ClaimParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimParameters. +func (in *ClaimParameters) DeepCopy() *ClaimParameters { + if in == nil { + return nil + } + out := new(ClaimParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentInitParameters) DeepCopyInto(out *ContentInitParameters) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentInitParameters. +func (in *ContentInitParameters) DeepCopy() *ContentInitParameters { + if in == nil { + return nil + } + out := new(ContentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentObservation) DeepCopyInto(out *ContentObservation) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentObservation. +func (in *ContentObservation) DeepCopy() *ContentObservation { + if in == nil { + return nil + } + out := new(ContentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentParameters) DeepCopyInto(out *ContentParameters) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentParameters. +func (in *ContentParameters) DeepCopy() *ContentParameters { + if in == nil { + return nil + } + out := new(ContentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyInitParameters) DeepCopyInto(out *MonthlyInitParameters) { + *out = *in + if in.Week != nil { + in, out := &in.Week, &out.Week + *out = new(float64) + **out = **in + } + if in.Weekday != nil { + in, out := &in.Weekday, &out.Weekday + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyInitParameters. +func (in *MonthlyInitParameters) DeepCopy() *MonthlyInitParameters { + if in == nil { + return nil + } + out := new(MonthlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyObservation) DeepCopyInto(out *MonthlyObservation) { + *out = *in + if in.Week != nil { + in, out := &in.Week, &out.Week + *out = new(float64) + **out = **in + } + if in.Weekday != nil { + in, out := &in.Weekday, &out.Weekday + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyObservation. +func (in *MonthlyObservation) DeepCopy() *MonthlyObservation { + if in == nil { + return nil + } + out := new(MonthlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyParameters) DeepCopyInto(out *MonthlyParameters) { + *out = *in + if in.Week != nil { + in, out := &in.Week, &out.Week + *out = new(float64) + **out = **in + } + if in.Weekday != nil { + in, out := &in.Weekday, &out.Weekday + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyParameters. +func (in *MonthlyParameters) DeepCopy() *MonthlyParameters { + if in == nil { + return nil + } + out := new(MonthlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenAuthenticationPolicyInitParameters) DeepCopyInto(out *OpenAuthenticationPolicyInitParameters) { + *out = *in + if in.Claim != nil { + in, out := &in.Claim, &out.Claim + *out = make([]ClaimInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenAuthenticationPolicyInitParameters. +func (in *OpenAuthenticationPolicyInitParameters) DeepCopy() *OpenAuthenticationPolicyInitParameters { + if in == nil { + return nil + } + out := new(OpenAuthenticationPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenAuthenticationPolicyObservation) DeepCopyInto(out *OpenAuthenticationPolicyObservation) { + *out = *in + if in.Claim != nil { + in, out := &in.Claim, &out.Claim + *out = make([]ClaimObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenAuthenticationPolicyObservation. +func (in *OpenAuthenticationPolicyObservation) DeepCopy() *OpenAuthenticationPolicyObservation { + if in == nil { + return nil + } + out := new(OpenAuthenticationPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenAuthenticationPolicyParameters) DeepCopyInto(out *OpenAuthenticationPolicyParameters) { + *out = *in + if in.Claim != nil { + in, out := &in.Claim, &out.Claim + *out = make([]ClaimParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenAuthenticationPolicyParameters. +func (in *OpenAuthenticationPolicyParameters) DeepCopy() *OpenAuthenticationPolicyParameters { + if in == nil { + return nil + } + out := new(OpenAuthenticationPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceInitParameters) DeepCopyInto(out *RecurrenceInitParameters) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceInitParameters. +func (in *RecurrenceInitParameters) DeepCopy() *RecurrenceInitParameters { + if in == nil { + return nil + } + out := new(RecurrenceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceObservation) DeepCopyInto(out *RecurrenceObservation) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceObservation. +func (in *RecurrenceObservation) DeepCopy() *RecurrenceObservation { + if in == nil { + return nil + } + out := new(RecurrenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurrenceParameters) DeepCopyInto(out *RecurrenceParameters) { + *out = *in + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = new(string) + **out = **in + } + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceParameters. +func (in *RecurrenceParameters) DeepCopy() *RecurrenceParameters { + if in == nil { + return nil + } + out := new(RecurrenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReleaseCriteriaInitParameters) DeepCopyInto(out *ReleaseCriteriaInitParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.MessageCount != nil { + in, out := &in.MessageCount, &out.MessageCount + *out = new(float64) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseCriteriaInitParameters. +func (in *ReleaseCriteriaInitParameters) DeepCopy() *ReleaseCriteriaInitParameters { + if in == nil { + return nil + } + out := new(ReleaseCriteriaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReleaseCriteriaObservation) DeepCopyInto(out *ReleaseCriteriaObservation) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.MessageCount != nil { + in, out := &in.MessageCount, &out.MessageCount + *out = new(float64) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseCriteriaObservation. +func (in *ReleaseCriteriaObservation) DeepCopy() *ReleaseCriteriaObservation { + if in == nil { + return nil + } + out := new(ReleaseCriteriaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReleaseCriteriaParameters) DeepCopyInto(out *ReleaseCriteriaParameters) { + *out = *in + if in.BatchSize != nil { + in, out := &in.BatchSize, &out.BatchSize + *out = new(float64) + **out = **in + } + if in.MessageCount != nil { + in, out := &in.MessageCount, &out.MessageCount + *out = new(float64) + **out = **in + } + if in.Recurrence != nil { + in, out := &in.Recurrence, &out.Recurrence + *out = new(RecurrenceParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseCriteriaParameters. +func (in *ReleaseCriteriaParameters) DeepCopy() *ReleaseCriteriaParameters { + if in == nil { + return nil + } + out := new(ReleaseCriteriaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.MonthDays != nil { + in, out := &in.MonthDays, &out.MonthDays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.MonthDays != nil { + in, out := &in.MonthDays, &out.MonthDays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.Hours != nil { + in, out := &in.Hours, &out.Hours + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Minutes != nil { + in, out := &in.Minutes, &out.Minutes + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.MonthDays != nil { + in, out := &in.MonthDays, &out.MonthDays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Monthly != nil { + in, out := &in.Monthly, &out.Monthly + *out = make([]MonthlyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WeekDays != nil { + in, out := &in.WeekDays, &out.WeekDays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerInitParameters) DeepCopyInto(out *TriggerInitParameters) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OpenAuthenticationPolicy != nil { + in, out := &in.OpenAuthenticationPolicy, &out.OpenAuthenticationPolicy + *out = make([]OpenAuthenticationPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerInitParameters. +func (in *TriggerInitParameters) DeepCopy() *TriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OpenAuthenticationPolicy != nil { + in, out := &in.OpenAuthenticationPolicy, &out.OpenAuthenticationPolicy + *out = make([]OpenAuthenticationPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerObservation. +func (in *TriggerObservation) DeepCopy() *TriggerObservation { + if in == nil { + return nil + } + out := new(TriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerParameters) DeepCopyInto(out *TriggerParameters) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OpenAuthenticationPolicy != nil { + in, out := &in.OpenAuthenticationPolicy, &out.OpenAuthenticationPolicy + *out = make([]OpenAuthenticationPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerParameters. +func (in *TriggerParameters) DeepCopy() *TriggerParameters { + if in == nil { + return nil + } + out := new(TriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowManagementInitParameters) DeepCopyInto(out *WorkflowManagementInitParameters) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowManagementInitParameters. +func (in *WorkflowManagementInitParameters) DeepCopy() *WorkflowManagementInitParameters { + if in == nil { + return nil + } + out := new(WorkflowManagementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowManagementObservation) DeepCopyInto(out *WorkflowManagementObservation) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowManagementObservation. +func (in *WorkflowManagementObservation) DeepCopy() *WorkflowManagementObservation { + if in == nil { + return nil + } + out := new(WorkflowManagementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowManagementParameters) DeepCopyInto(out *WorkflowManagementParameters) { + *out = *in + if in.AllowedCallerIPAddressRange != nil { + in, out := &in.AllowedCallerIPAddressRange, &out.AllowedCallerIPAddressRange + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowManagementParameters. +func (in *WorkflowManagementParameters) DeepCopy() *WorkflowManagementParameters { + if in == nil { + return nil + } + out := new(WorkflowManagementParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/logic/v1beta2/zz_generated.managed.go b/apis/logic/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..c76e15b59 --- /dev/null +++ b/apis/logic/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AppIntegrationAccountBatchConfiguration. +func (mg *AppIntegrationAccountBatchConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this AppWorkflow. +func (mg *AppWorkflow) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AppWorkflow. +func (mg *AppWorkflow) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AppWorkflow. +func (mg *AppWorkflow) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AppWorkflow. +func (mg *AppWorkflow) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AppWorkflow. +func (mg *AppWorkflow) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AppWorkflow. +func (mg *AppWorkflow) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AppWorkflow. +func (mg *AppWorkflow) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AppWorkflow. +func (mg *AppWorkflow) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AppWorkflow. +func (mg *AppWorkflow) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AppWorkflow. +func (mg *AppWorkflow) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AppWorkflow. +func (mg *AppWorkflow) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AppWorkflow. +func (mg *AppWorkflow) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/logic/v1beta2/zz_generated.managedlist.go b/apis/logic/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..a1c2af88f --- /dev/null +++ b/apis/logic/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AppIntegrationAccountBatchConfigurationList. +func (l *AppIntegrationAccountBatchConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this AppTriggerRecurrenceList. +func (l *AppTriggerRecurrenceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this AppWorkflowList. +func (l *AppWorkflowList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/logic/v1beta2/zz_generated.resolvers.go b/apis/logic/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..ff67f92cf --- /dev/null +++ b/apis/logic/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,168 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *AppIntegrationAccountBatchConfiguration) ResolveReferences( // ResolveReferences of this AppIntegrationAccountBatchConfiguration. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta1", "AppIntegrationAccount", "AppIntegrationAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IntegrationAccountName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.IntegrationAccountNameRef, + Selector: mg.Spec.ForProvider.IntegrationAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IntegrationAccountName") + } + mg.Spec.ForProvider.IntegrationAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IntegrationAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta1", "AppIntegrationAccount", "AppIntegrationAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IntegrationAccountName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.IntegrationAccountNameRef, + Selector: mg.Spec.InitProvider.IntegrationAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IntegrationAccountName") + } + mg.Spec.InitProvider.IntegrationAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IntegrationAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this AppTriggerRecurrence. +func (mg *AppTriggerRecurrence) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("logic.azure.upbound.io", "v1beta2", "AppWorkflow", "AppWorkflowList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogicAppID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.LogicAppIDRef, + Selector: mg.Spec.ForProvider.LogicAppIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogicAppID") + } + mg.Spec.ForProvider.LogicAppID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogicAppIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this AppWorkflow. +func (mg *AppWorkflow) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/logic/v1beta2/zz_groupversion_info.go b/apis/logic/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..3d62a2fa6 --- /dev/null +++ b/apis/logic/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=logic.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "logic.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/logz/v1beta1/zz_generated.conversion_hubs.go b/apis/logz/v1beta1/zz_generated.conversion_hubs.go index 3d19e8060..ce95aca2d 100755 --- a/apis/logz/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/logz/v1beta1/zz_generated.conversion_hubs.go @@ -6,12 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Monitor) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SubAccount) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SubAccountTagRule) Hub() {} diff --git a/apis/logz/v1beta1/zz_generated.conversion_spokes.go b/apis/logz/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..1bdb45eac --- /dev/null +++ b/apis/logz/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Monitor to the hub type. +func (tr *Monitor) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Monitor type. +func (tr *Monitor) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SubAccount to the hub type. +func (tr *SubAccount) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SubAccount type. +func (tr *SubAccount) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/logz/v1beta1/zz_generated.resolvers.go b/apis/logz/v1beta1/zz_generated.resolvers.go index 1c1a915dc..bcefead5f 100644 --- a/apis/logz/v1beta1/zz_generated.resolvers.go +++ b/apis/logz/v1beta1/zz_generated.resolvers.go @@ -89,7 +89,7 @@ func (mg *SubAccountTagRule) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("logz.azure.upbound.io", "v1beta1", "SubAccount", "SubAccountList") + m, l, err = apisresolver.GetManagedResource("logz.azure.upbound.io", "v1beta2", "SubAccount", "SubAccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -108,7 +108,7 @@ func (mg *SubAccountTagRule) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.LogzSubAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LogzSubAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("logz.azure.upbound.io", "v1beta1", "SubAccount", "SubAccountList") + m, l, err = apisresolver.GetManagedResource("logz.azure.upbound.io", "v1beta2", "SubAccount", "SubAccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -139,7 +139,7 @@ func (mg *TagRule) ResolveReferences(ctx context.Context, c client.Reader) error var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("logz.azure.upbound.io", "v1beta1", "Monitor", "MonitorList") + m, l, err = apisresolver.GetManagedResource("logz.azure.upbound.io", "v1beta2", "Monitor", "MonitorList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/logz/v1beta1/zz_subaccounttagrule_types.go b/apis/logz/v1beta1/zz_subaccounttagrule_types.go index f1a60270d..209061da7 100755 --- a/apis/logz/v1beta1/zz_subaccounttagrule_types.go +++ b/apis/logz/v1beta1/zz_subaccounttagrule_types.go @@ -16,7 +16,7 @@ import ( type SubAccountTagRuleInitParameters struct { // The ID of the Logz Sub Account. Changing this forces a new Logz Sub Account Tag Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logz/v1beta1.SubAccount + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logz/v1beta2.SubAccount // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() LogzSubAccountID *string `json:"logzSubAccountId,omitempty" tf:"logz_sub_account_id,omitempty"` @@ -65,7 +65,7 @@ type SubAccountTagRuleObservation struct { type SubAccountTagRuleParameters struct { // The ID of the Logz Sub Account. Changing this forces a new Logz Sub Account Tag Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logz/v1beta1.SubAccount + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logz/v1beta2.SubAccount // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogzSubAccountID *string `json:"logzSubAccountId,omitempty" tf:"logz_sub_account_id,omitempty"` diff --git a/apis/logz/v1beta1/zz_tagrule_types.go b/apis/logz/v1beta1/zz_tagrule_types.go index 1bf78860e..0407e1ec3 100755 --- a/apis/logz/v1beta1/zz_tagrule_types.go +++ b/apis/logz/v1beta1/zz_tagrule_types.go @@ -52,7 +52,7 @@ type TagRuleObservation struct { type TagRuleParameters struct { // The ID of the Logz Monitor. Changing this forces a new logz Tag Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logz/v1beta1.Monitor + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logz/v1beta2.Monitor // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogzMonitorID *string `json:"logzMonitorId,omitempty" tf:"logz_monitor_id,omitempty"` diff --git a/apis/logz/v1beta2/zz_generated.conversion_hubs.go b/apis/logz/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..f65ecece6 --- /dev/null +++ b/apis/logz/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Monitor) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SubAccount) Hub() {} diff --git a/apis/logz/v1beta2/zz_generated.deepcopy.go b/apis/logz/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..6d436cc1a --- /dev/null +++ b/apis/logz/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,883 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Monitor) DeepCopyInto(out *Monitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitor. +func (in *Monitor) DeepCopy() *Monitor { + if in == nil { + return nil + } + out := new(Monitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Monitor) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorInitParameters) DeepCopyInto(out *MonitorInitParameters) { + *out = *in + if in.CompanyName != nil { + in, out := &in.CompanyName, &out.CompanyName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EnterpriseAppID != nil { + in, out := &in.EnterpriseAppID, &out.EnterpriseAppID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorInitParameters. +func (in *MonitorInitParameters) DeepCopy() *MonitorInitParameters { + if in == nil { + return nil + } + out := new(MonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorList) DeepCopyInto(out *MonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Monitor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorList. +func (in *MonitorList) DeepCopy() *MonitorList { + if in == nil { + return nil + } + out := new(MonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorObservation) DeepCopyInto(out *MonitorObservation) { + *out = *in + if in.CompanyName != nil { + in, out := &in.CompanyName, &out.CompanyName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EnterpriseAppID != nil { + in, out := &in.EnterpriseAppID, &out.EnterpriseAppID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogzOrganizationID != nil { + in, out := &in.LogzOrganizationID, &out.LogzOrganizationID + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SingleSignOnURL != nil { + in, out := &in.SingleSignOnURL, &out.SingleSignOnURL + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorObservation. +func (in *MonitorObservation) DeepCopy() *MonitorObservation { + if in == nil { + return nil + } + out := new(MonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorParameters) DeepCopyInto(out *MonitorParameters) { + *out = *in + if in.CompanyName != nil { + in, out := &in.CompanyName, &out.CompanyName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EnterpriseAppID != nil { + in, out := &in.EnterpriseAppID, &out.EnterpriseAppID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorParameters. +func (in *MonitorParameters) DeepCopy() *MonitorParameters { + if in == nil { + return nil + } + out := new(MonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorSpec) DeepCopyInto(out *MonitorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorSpec. +func (in *MonitorSpec) DeepCopy() *MonitorSpec { + if in == nil { + return nil + } + out := new(MonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorStatus) DeepCopyInto(out *MonitorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorStatus. +func (in *MonitorStatus) DeepCopy() *MonitorStatus { + if in == nil { + return nil + } + out := new(MonitorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanInitParameters) DeepCopyInto(out *PlanInitParameters) { + *out = *in + if in.BillingCycle != nil { + in, out := &in.BillingCycle, &out.BillingCycle + *out = new(string) + **out = **in + } + if in.EffectiveDate != nil { + in, out := &in.EffectiveDate, &out.EffectiveDate + *out = new(string) + **out = **in + } + if in.PlanID != nil { + in, out := &in.PlanID, &out.PlanID + *out = new(string) + **out = **in + } + if in.UsageType != nil { + in, out := &in.UsageType, &out.UsageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanInitParameters. +func (in *PlanInitParameters) DeepCopy() *PlanInitParameters { + if in == nil { + return nil + } + out := new(PlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanObservation) DeepCopyInto(out *PlanObservation) { + *out = *in + if in.BillingCycle != nil { + in, out := &in.BillingCycle, &out.BillingCycle + *out = new(string) + **out = **in + } + if in.EffectiveDate != nil { + in, out := &in.EffectiveDate, &out.EffectiveDate + *out = new(string) + **out = **in + } + if in.PlanID != nil { + in, out := &in.PlanID, &out.PlanID + *out = new(string) + **out = **in + } + if in.UsageType != nil { + in, out := &in.UsageType, &out.UsageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanObservation. +func (in *PlanObservation) DeepCopy() *PlanObservation { + if in == nil { + return nil + } + out := new(PlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanParameters) DeepCopyInto(out *PlanParameters) { + *out = *in + if in.BillingCycle != nil { + in, out := &in.BillingCycle, &out.BillingCycle + *out = new(string) + **out = **in + } + if in.EffectiveDate != nil { + in, out := &in.EffectiveDate, &out.EffectiveDate + *out = new(string) + **out = **in + } + if in.PlanID != nil { + in, out := &in.PlanID, &out.PlanID + *out = new(string) + **out = **in + } + if in.UsageType != nil { + in, out := &in.UsageType, &out.UsageType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanParameters. +func (in *PlanParameters) DeepCopy() *PlanParameters { + if in == nil { + return nil + } + out := new(PlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccount) DeepCopyInto(out *SubAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccount. +func (in *SubAccount) DeepCopy() *SubAccount { + if in == nil { + return nil + } + out := new(SubAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountInitParameters) DeepCopyInto(out *SubAccountInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(SubAccountUserInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountInitParameters. +func (in *SubAccountInitParameters) DeepCopy() *SubAccountInitParameters { + if in == nil { + return nil + } + out := new(SubAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountList) DeepCopyInto(out *SubAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SubAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountList. +func (in *SubAccountList) DeepCopy() *SubAccountList { + if in == nil { + return nil + } + out := new(SubAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountObservation) DeepCopyInto(out *SubAccountObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LogzMonitorID != nil { + in, out := &in.LogzMonitorID, &out.LogzMonitorID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(SubAccountUserObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountObservation. +func (in *SubAccountObservation) DeepCopy() *SubAccountObservation { + if in == nil { + return nil + } + out := new(SubAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountParameters) DeepCopyInto(out *SubAccountParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogzMonitorID != nil { + in, out := &in.LogzMonitorID, &out.LogzMonitorID + *out = new(string) + **out = **in + } + if in.LogzMonitorIDRef != nil { + in, out := &in.LogzMonitorIDRef, &out.LogzMonitorIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LogzMonitorIDSelector != nil { + in, out := &in.LogzMonitorIDSelector, &out.LogzMonitorIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(SubAccountUserParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountParameters. +func (in *SubAccountParameters) DeepCopy() *SubAccountParameters { + if in == nil { + return nil + } + out := new(SubAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountSpec) DeepCopyInto(out *SubAccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountSpec. +func (in *SubAccountSpec) DeepCopy() *SubAccountSpec { + if in == nil { + return nil + } + out := new(SubAccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountStatus) DeepCopyInto(out *SubAccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountStatus. +func (in *SubAccountStatus) DeepCopy() *SubAccountStatus { + if in == nil { + return nil + } + out := new(SubAccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountUserInitParameters) DeepCopyInto(out *SubAccountUserInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName + *out = new(string) + **out = **in + } + if in.LastName != nil { + in, out := &in.LastName, &out.LastName + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountUserInitParameters. +func (in *SubAccountUserInitParameters) DeepCopy() *SubAccountUserInitParameters { + if in == nil { + return nil + } + out := new(SubAccountUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountUserObservation) DeepCopyInto(out *SubAccountUserObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName + *out = new(string) + **out = **in + } + if in.LastName != nil { + in, out := &in.LastName, &out.LastName + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountUserObservation. +func (in *SubAccountUserObservation) DeepCopy() *SubAccountUserObservation { + if in == nil { + return nil + } + out := new(SubAccountUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubAccountUserParameters) DeepCopyInto(out *SubAccountUserParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName + *out = new(string) + **out = **in + } + if in.LastName != nil { + in, out := &in.LastName, &out.LastName + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubAccountUserParameters. +func (in *SubAccountUserParameters) DeepCopy() *SubAccountUserParameters { + if in == nil { + return nil + } + out := new(SubAccountUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInitParameters) DeepCopyInto(out *UserInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName + *out = new(string) + **out = **in + } + if in.LastName != nil { + in, out := &in.LastName, &out.LastName + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInitParameters. +func (in *UserInitParameters) DeepCopy() *UserInitParameters { + if in == nil { + return nil + } + out := new(UserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName + *out = new(string) + **out = **in + } + if in.LastName != nil { + in, out := &in.LastName, &out.LastName + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.FirstName != nil { + in, out := &in.FirstName, &out.FirstName + *out = new(string) + **out = **in + } + if in.LastName != nil { + in, out := &in.LastName, &out.LastName + *out = new(string) + **out = **in + } + if in.PhoneNumber != nil { + in, out := &in.PhoneNumber, &out.PhoneNumber + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/logz/v1beta2/zz_generated.managed.go b/apis/logz/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..0ffe3b897 --- /dev/null +++ b/apis/logz/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Monitor. +func (mg *Monitor) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Monitor. +func (mg *Monitor) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Monitor. +func (mg *Monitor) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Monitor. +func (mg *Monitor) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Monitor. +func (mg *Monitor) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Monitor. +func (mg *Monitor) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Monitor. +func (mg *Monitor) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Monitor. +func (mg *Monitor) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Monitor. +func (mg *Monitor) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Monitor. +func (mg *Monitor) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Monitor. +func (mg *Monitor) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Monitor. +func (mg *Monitor) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SubAccount. +func (mg *SubAccount) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SubAccount. +func (mg *SubAccount) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SubAccount. +func (mg *SubAccount) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SubAccount. +func (mg *SubAccount) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SubAccount. +func (mg *SubAccount) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SubAccount. +func (mg *SubAccount) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SubAccount. +func (mg *SubAccount) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SubAccount. +func (mg *SubAccount) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SubAccount. +func (mg *SubAccount) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SubAccount. +func (mg *SubAccount) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SubAccount. +func (mg *SubAccount) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SubAccount. +func (mg *SubAccount) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/logz/v1beta2/zz_generated.managedlist.go b/apis/logz/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..9f52b2a48 --- /dev/null +++ b/apis/logz/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this MonitorList. +func (l *MonitorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SubAccountList. +func (l *SubAccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/logz/v1beta2/zz_generated.resolvers.go b/apis/logz/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..908943b7d --- /dev/null +++ b/apis/logz/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,81 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Monitor. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Monitor) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SubAccount. +func (mg *SubAccount) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("logz.azure.upbound.io", "v1beta2", "Monitor", "MonitorList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LogzMonitorID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.LogzMonitorIDRef, + Selector: mg.Spec.ForProvider.LogzMonitorIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LogzMonitorID") + } + mg.Spec.ForProvider.LogzMonitorID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LogzMonitorIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/logz/v1beta2/zz_groupversion_info.go b/apis/logz/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..1a51a897a --- /dev/null +++ b/apis/logz/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=logz.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "logz.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/logz/v1beta2/zz_monitor_terraformed.go b/apis/logz/v1beta2/zz_monitor_terraformed.go new file mode 100755 index 000000000..8d474d24d --- /dev/null +++ b/apis/logz/v1beta2/zz_monitor_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Monitor +func (mg *Monitor) GetTerraformResourceType() string { + return "azurerm_logz_monitor" +} + +// GetConnectionDetailsMapping for this Monitor +func (tr *Monitor) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Monitor +func (tr *Monitor) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Monitor +func (tr *Monitor) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Monitor +func (tr *Monitor) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Monitor +func (tr *Monitor) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Monitor +func (tr *Monitor) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Monitor +func (tr *Monitor) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Monitor +func (tr *Monitor) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Monitor using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Monitor) LateInitialize(attrs []byte) (bool, error) { + params := &MonitorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Monitor) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/logz/v1beta2/zz_monitor_types.go b/apis/logz/v1beta2/zz_monitor_types.go new file mode 100755 index 000000000..7cf2f895b --- /dev/null +++ b/apis/logz/v1beta2/zz_monitor_types.go @@ -0,0 +1,282 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MonitorInitParameters struct { + + // Name of the Logz organization. Changing this forces a new logz Monitor to be created. + CompanyName *string `json:"companyName,omitempty" tf:"company_name,omitempty"` + + // Whether the resource monitoring is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Enterprise App. Changing this forces a new logz Monitor to be created. + EnterpriseAppID *string `json:"enterpriseAppId,omitempty" tf:"enterprise_app_id,omitempty"` + + // The Azure Region where the logz Monitor should exist. Changing this forces a new logz Monitor to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *PlanInitParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // A mapping of tags which should be assigned to the logz Monitor. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A user block as defined below. Changing this forces a new resource to be created. + User *UserInitParameters `json:"user,omitempty" tf:"user,omitempty"` +} + +type MonitorObservation struct { + + // Name of the Logz organization. Changing this forces a new logz Monitor to be created. + CompanyName *string `json:"companyName,omitempty" tf:"company_name,omitempty"` + + // Whether the resource monitoring is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Enterprise App. Changing this forces a new logz Monitor to be created. + EnterpriseAppID *string `json:"enterpriseAppId,omitempty" tf:"enterprise_app_id,omitempty"` + + // The ID of the logz Monitor. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region where the logz Monitor should exist. Changing this forces a new logz Monitor to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID associated with the logz organization of this logz Monitor. + LogzOrganizationID *string `json:"logzOrganizationId,omitempty" tf:"logz_organization_id,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + Plan *PlanObservation `json:"plan,omitempty" tf:"plan,omitempty"` + + // The name of the Resource Group where the logz Monitor should exist. Changing this forces a new logz Monitor to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The single sign on url associated with the logz organization of this logz Monitor. + SingleSignOnURL *string `json:"singleSignOnUrl,omitempty" tf:"single_sign_on_url,omitempty"` + + // A mapping of tags which should be assigned to the logz Monitor. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A user block as defined below. Changing this forces a new resource to be created. + User *UserObservation `json:"user,omitempty" tf:"user,omitempty"` +} + +type MonitorParameters struct { + + // Name of the Logz organization. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + CompanyName *string `json:"companyName,omitempty" tf:"company_name,omitempty"` + + // Whether the resource monitoring is enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Enterprise App. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + EnterpriseAppID *string `json:"enterpriseAppId,omitempty" tf:"enterprise_app_id,omitempty"` + + // The Azure Region where the logz Monitor should exist. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A plan block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Plan *PlanParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // The name of the Resource Group where the logz Monitor should exist. Changing this forces a new logz Monitor to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the logz Monitor. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A user block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + User *UserParameters `json:"user,omitempty" tf:"user,omitempty"` +} + +type PlanInitParameters struct { + + // Different billing cycles. Possible values are MONTHLY or WEEKLY. Changing this forces a new logz Monitor to be created. + BillingCycle *string `json:"billingCycle,omitempty" tf:"billing_cycle,omitempty"` + + // Date when plan was applied. Changing this forces a new logz Monitor to be created. + EffectiveDate *string `json:"effectiveDate,omitempty" tf:"effective_date,omitempty"` + + // Plan id as published by Logz. The only possible value is 100gb14days. Defaults to 100gb14days. Changing this forces a new logz Monitor to be created. + PlanID *string `json:"planId,omitempty" tf:"plan_id,omitempty"` + + // Different usage types. Possible values are PAYG or COMMITTED. Changing this forces a new logz Monitor to be created. + UsageType *string `json:"usageType,omitempty" tf:"usage_type,omitempty"` +} + +type PlanObservation struct { + + // Different billing cycles. Possible values are MONTHLY or WEEKLY. Changing this forces a new logz Monitor to be created. + BillingCycle *string `json:"billingCycle,omitempty" tf:"billing_cycle,omitempty"` + + // Date when plan was applied. Changing this forces a new logz Monitor to be created. + EffectiveDate *string `json:"effectiveDate,omitempty" tf:"effective_date,omitempty"` + + // Plan id as published by Logz. The only possible value is 100gb14days. Defaults to 100gb14days. Changing this forces a new logz Monitor to be created. + PlanID *string `json:"planId,omitempty" tf:"plan_id,omitempty"` + + // Different usage types. Possible values are PAYG or COMMITTED. Changing this forces a new logz Monitor to be created. + UsageType *string `json:"usageType,omitempty" tf:"usage_type,omitempty"` +} + +type PlanParameters struct { + + // Different billing cycles. Possible values are MONTHLY or WEEKLY. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + BillingCycle *string `json:"billingCycle" tf:"billing_cycle,omitempty"` + + // Date when plan was applied. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + EffectiveDate *string `json:"effectiveDate" tf:"effective_date,omitempty"` + + // Plan id as published by Logz. The only possible value is 100gb14days. Defaults to 100gb14days. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + PlanID *string `json:"planId,omitempty" tf:"plan_id,omitempty"` + + // Different usage types. Possible values are PAYG or COMMITTED. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + UsageType *string `json:"usageType" tf:"usage_type,omitempty"` +} + +type UserInitParameters struct { + + // Email of the user used by Logz for contacting them if needed. Changing this forces a new logz Monitor to be created. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // First Name of the user. Changing this forces a new logz Monitor to be created. + FirstName *string `json:"firstName,omitempty" tf:"first_name,omitempty"` + + // Last Name of the user. Changing this forces a new logz Monitor to be created. + LastName *string `json:"lastName,omitempty" tf:"last_name,omitempty"` + + // Phone number of the user used by Logz for contacting them if needed. Changing this forces a new logz Monitor to be created. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type UserObservation struct { + + // Email of the user used by Logz for contacting them if needed. Changing this forces a new logz Monitor to be created. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // First Name of the user. Changing this forces a new logz Monitor to be created. + FirstName *string `json:"firstName,omitempty" tf:"first_name,omitempty"` + + // Last Name of the user. Changing this forces a new logz Monitor to be created. + LastName *string `json:"lastName,omitempty" tf:"last_name,omitempty"` + + // Phone number of the user used by Logz for contacting them if needed. Changing this forces a new logz Monitor to be created. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type UserParameters struct { + + // Email of the user used by Logz for contacting them if needed. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + Email *string `json:"email" tf:"email,omitempty"` + + // First Name of the user. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + FirstName *string `json:"firstName" tf:"first_name,omitempty"` + + // Last Name of the user. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + LastName *string `json:"lastName" tf:"last_name,omitempty"` + + // Phone number of the user used by Logz for contacting them if needed. Changing this forces a new logz Monitor to be created. + // +kubebuilder:validation:Optional + PhoneNumber *string `json:"phoneNumber" tf:"phone_number,omitempty"` +} + +// MonitorSpec defines the desired state of Monitor +type MonitorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MonitorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MonitorInitParameters `json:"initProvider,omitempty"` +} + +// MonitorStatus defines the observed state of Monitor. +type MonitorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MonitorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Monitor is the Schema for the Monitors API. Manages a logz Monitor. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Monitor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.plan) || (has(self.initProvider) && has(self.initProvider.plan))",message="spec.forProvider.plan is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.user) || (has(self.initProvider) && has(self.initProvider.user))",message="spec.forProvider.user is a required parameter" + Spec MonitorSpec `json:"spec"` + Status MonitorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MonitorList contains a list of Monitors +type MonitorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Monitor `json:"items"` +} + +// Repository type metadata. +var ( + Monitor_Kind = "Monitor" + Monitor_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Monitor_Kind}.String() + Monitor_KindAPIVersion = Monitor_Kind + "." + CRDGroupVersion.String() + Monitor_GroupVersionKind = CRDGroupVersion.WithKind(Monitor_Kind) +) + +func init() { + SchemeBuilder.Register(&Monitor{}, &MonitorList{}) +} diff --git a/apis/logz/v1beta2/zz_subaccount_terraformed.go b/apis/logz/v1beta2/zz_subaccount_terraformed.go new file mode 100755 index 000000000..88880b274 --- /dev/null +++ b/apis/logz/v1beta2/zz_subaccount_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SubAccount +func (mg *SubAccount) GetTerraformResourceType() string { + return "azurerm_logz_sub_account" +} + +// GetConnectionDetailsMapping for this SubAccount +func (tr *SubAccount) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SubAccount +func (tr *SubAccount) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SubAccount +func (tr *SubAccount) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SubAccount +func (tr *SubAccount) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SubAccount +func (tr *SubAccount) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SubAccount +func (tr *SubAccount) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SubAccount +func (tr *SubAccount) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SubAccount +func (tr *SubAccount) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SubAccount using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SubAccount) LateInitialize(attrs []byte) (bool, error) { + params := &SubAccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SubAccount) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/logz/v1beta2/zz_subaccount_types.go b/apis/logz/v1beta2/zz_subaccount_types.go new file mode 100755 index 000000000..eb3140b66 --- /dev/null +++ b/apis/logz/v1beta2/zz_subaccount_types.go @@ -0,0 +1,186 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SubAccountInitParameters struct { + + // Whether the resource monitoring is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A mapping of tags which should be assigned to the logz Sub Account. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A user block as defined below. Changing this forces a new resource to be created. + User *SubAccountUserInitParameters `json:"user,omitempty" tf:"user,omitempty"` +} + +type SubAccountObservation struct { + + // Whether the resource monitoring is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the logz Sub Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ID of the Logz Monitor. Changing this forces a new logz Sub Account to be created. + LogzMonitorID *string `json:"logzMonitorId,omitempty" tf:"logz_monitor_id,omitempty"` + + // A mapping of tags which should be assigned to the logz Sub Account. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A user block as defined below. Changing this forces a new resource to be created. + User *SubAccountUserObservation `json:"user,omitempty" tf:"user,omitempty"` +} + +type SubAccountParameters struct { + + // Whether the resource monitoring is enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Logz Monitor. Changing this forces a new logz Sub Account to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/logz/v1beta2.Monitor + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + LogzMonitorID *string `json:"logzMonitorId,omitempty" tf:"logz_monitor_id,omitempty"` + + // Reference to a Monitor in logz to populate logzMonitorId. + // +kubebuilder:validation:Optional + LogzMonitorIDRef *v1.Reference `json:"logzMonitorIdRef,omitempty" tf:"-"` + + // Selector for a Monitor in logz to populate logzMonitorId. + // +kubebuilder:validation:Optional + LogzMonitorIDSelector *v1.Selector `json:"logzMonitorIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the logz Sub Account. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A user block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + User *SubAccountUserParameters `json:"user,omitempty" tf:"user,omitempty"` +} + +type SubAccountUserInitParameters struct { + + // Email of the user used by Logz for contacting them if needed. A valid email address consists of an email prefix and an email domain. The prefix and domain may contain only letters, numbers, underscores, periods and dashes. Changing this forces a new logz Sub Account to be created. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // First Name of the user. Possible values must be between 1 and 50 characters in length. Changing this forces a new logz Sub Account to be created. + FirstName *string `json:"firstName,omitempty" tf:"first_name,omitempty"` + + // Last Name of the user. Possible values must be between 1 and 50 characters in length. Changing this forces a new logz Sub Account to be created. + LastName *string `json:"lastName,omitempty" tf:"last_name,omitempty"` + + // Phone number of the user used by Logz for contacting them if needed. Possible values must be between 1 and 40 characters in length. Changing this forces a new logz Sub Account to be created. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type SubAccountUserObservation struct { + + // Email of the user used by Logz for contacting them if needed. A valid email address consists of an email prefix and an email domain. The prefix and domain may contain only letters, numbers, underscores, periods and dashes. Changing this forces a new logz Sub Account to be created. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // First Name of the user. Possible values must be between 1 and 50 characters in length. Changing this forces a new logz Sub Account to be created. + FirstName *string `json:"firstName,omitempty" tf:"first_name,omitempty"` + + // Last Name of the user. Possible values must be between 1 and 50 characters in length. Changing this forces a new logz Sub Account to be created. + LastName *string `json:"lastName,omitempty" tf:"last_name,omitempty"` + + // Phone number of the user used by Logz for contacting them if needed. Possible values must be between 1 and 40 characters in length. Changing this forces a new logz Sub Account to be created. + PhoneNumber *string `json:"phoneNumber,omitempty" tf:"phone_number,omitempty"` +} + +type SubAccountUserParameters struct { + + // Email of the user used by Logz for contacting them if needed. A valid email address consists of an email prefix and an email domain. The prefix and domain may contain only letters, numbers, underscores, periods and dashes. Changing this forces a new logz Sub Account to be created. + // +kubebuilder:validation:Optional + Email *string `json:"email" tf:"email,omitempty"` + + // First Name of the user. Possible values must be between 1 and 50 characters in length. Changing this forces a new logz Sub Account to be created. + // +kubebuilder:validation:Optional + FirstName *string `json:"firstName" tf:"first_name,omitempty"` + + // Last Name of the user. Possible values must be between 1 and 50 characters in length. Changing this forces a new logz Sub Account to be created. + // +kubebuilder:validation:Optional + LastName *string `json:"lastName" tf:"last_name,omitempty"` + + // Phone number of the user used by Logz for contacting them if needed. Possible values must be between 1 and 40 characters in length. Changing this forces a new logz Sub Account to be created. + // +kubebuilder:validation:Optional + PhoneNumber *string `json:"phoneNumber" tf:"phone_number,omitempty"` +} + +// SubAccountSpec defines the desired state of SubAccount +type SubAccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SubAccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SubAccountInitParameters `json:"initProvider,omitempty"` +} + +// SubAccountStatus defines the observed state of SubAccount. +type SubAccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SubAccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SubAccount is the Schema for the SubAccounts API. Manages a logz Sub Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SubAccount struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.user) || (has(self.initProvider) && has(self.initProvider.user))",message="spec.forProvider.user is a required parameter" + Spec SubAccountSpec `json:"spec"` + Status SubAccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubAccountList contains a list of SubAccounts +type SubAccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SubAccount `json:"items"` +} + +// Repository type metadata. +var ( + SubAccount_Kind = "SubAccount" + SubAccount_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SubAccount_Kind}.String() + SubAccount_KindAPIVersion = SubAccount_Kind + "." + CRDGroupVersion.String() + SubAccount_GroupVersionKind = CRDGroupVersion.WithKind(SubAccount_Kind) +) + +func init() { + SchemeBuilder.Register(&SubAccount{}, &SubAccountList{}) +} diff --git a/apis/machinelearningservices/v1beta1/zz_generated.conversion_spokes.go b/apis/machinelearningservices/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..49911e574 --- /dev/null +++ b/apis/machinelearningservices/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ComputeCluster to the hub type. +func (tr *ComputeCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ComputeCluster type. +func (tr *ComputeCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ComputeInstance to the hub type. +func (tr *ComputeInstance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ComputeInstance type. +func (tr *ComputeInstance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SynapseSpark to the hub type. +func (tr *SynapseSpark) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SynapseSpark type. +func (tr *SynapseSpark) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Workspace to the hub type. +func (tr *Workspace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workspace type. +func (tr *Workspace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/machinelearningservices/v1beta2/zz_computecluster_terraformed.go b/apis/machinelearningservices/v1beta2/zz_computecluster_terraformed.go new file mode 100755 index 000000000..54417d300 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_computecluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ComputeCluster +func (mg *ComputeCluster) GetTerraformResourceType() string { + return "azurerm_machine_learning_compute_cluster" +} + +// GetConnectionDetailsMapping for this ComputeCluster +func (tr *ComputeCluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ComputeCluster +func (tr *ComputeCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ComputeCluster +func (tr *ComputeCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ComputeCluster +func (tr *ComputeCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ComputeCluster +func (tr *ComputeCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ComputeCluster +func (tr *ComputeCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ComputeCluster +func (tr *ComputeCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ComputeCluster +func (tr *ComputeCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ComputeCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ComputeCluster) LateInitialize(attrs []byte) (bool, error) { + params := &ComputeClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ComputeCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/machinelearningservices/v1beta2/zz_computecluster_types.go b/apis/machinelearningservices/v1beta2/zz_computecluster_types.go new file mode 100755 index 000000000..f2879bf3e --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_computecluster_types.go @@ -0,0 +1,390 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ComputeClusterInitParameters struct { + + // The description of the Machine Learning compute. Changing this forces a new Machine Learning Compute Cluster to be created. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Compute Cluster should exist. Changing this forces a new Machine Learning Compute Cluster to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Machine Learning Workspace. Changing this forces a new Machine Learning Compute Cluster to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/machinelearningservices/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // Reference to a Workspace in machinelearningservices to populate machineLearningWorkspaceId. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceIDRef *v1.Reference `json:"machineLearningWorkspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in machinelearningservices to populate machineLearningWorkspaceId. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceIDSelector *v1.Selector `json:"machineLearningWorkspaceIdSelector,omitempty" tf:"-"` + + // The name which should be used for this Machine Learning Compute Cluster. Changing this forces a new Machine Learning Compute Cluster to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether the compute cluster will have a public ip. To set this to false a subnet_resource_id needs to be set. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + NodePublicIPEnabled *bool `json:"nodePublicIpEnabled,omitempty" tf:"node_public_ip_enabled,omitempty"` + + // Credentials for an administrator user account that will be created on each compute node. A ssh block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + SSH *SSHInitParameters `json:"ssh,omitempty" tf:"ssh,omitempty"` + + // A boolean value indicating whether enable the public SSH port. Changing this forces a new Machine Learning Compute Cluster to be created. + SSHPublicAccessEnabled *bool `json:"sshPublicAccessEnabled,omitempty" tf:"ssh_public_access_enabled,omitempty"` + + // A scale_settings block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + ScaleSettings *ScaleSettingsInitParameters `json:"scaleSettings,omitempty" tf:"scale_settings,omitempty"` + + // The ID of the Subnet that the Compute Cluster should reside in. Changing this forces a new Machine Learning Compute Cluster to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetResourceID *string `json:"subnetResourceId,omitempty" tf:"subnet_resource_id,omitempty"` + + // Reference to a Subnet in network to populate subnetResourceId. + // +kubebuilder:validation:Optional + SubnetResourceIDRef *v1.Reference `json:"subnetResourceIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetResourceId. + // +kubebuilder:validation:Optional + SubnetResourceIDSelector *v1.Selector `json:"subnetResourceIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Machine Learning Compute Cluster. Changing this forces a new Machine Learning Compute Cluster to be created. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The priority of the VM. Changing this forces a new Machine Learning Compute Cluster to be created. Accepted values are Dedicated and LowPriority. + VMPriority *string `json:"vmPriority,omitempty" tf:"vm_priority,omitempty"` + + // The size of the VM. Changing this forces a new Machine Learning Compute Cluster to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` +} + +type ComputeClusterObservation struct { + + // The description of the Machine Learning compute. Changing this forces a new Machine Learning Compute Cluster to be created. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Machine Learning Compute Cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Compute Cluster should exist. Changing this forces a new Machine Learning Compute Cluster to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Machine Learning Workspace. Changing this forces a new Machine Learning Compute Cluster to be created. + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // The name which should be used for this Machine Learning Compute Cluster. Changing this forces a new Machine Learning Compute Cluster to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether the compute cluster will have a public ip. To set this to false a subnet_resource_id needs to be set. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + NodePublicIPEnabled *bool `json:"nodePublicIpEnabled,omitempty" tf:"node_public_ip_enabled,omitempty"` + + // Credentials for an administrator user account that will be created on each compute node. A ssh block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + SSH *SSHObservation `json:"ssh,omitempty" tf:"ssh,omitempty"` + + // A boolean value indicating whether enable the public SSH port. Changing this forces a new Machine Learning Compute Cluster to be created. + SSHPublicAccessEnabled *bool `json:"sshPublicAccessEnabled,omitempty" tf:"ssh_public_access_enabled,omitempty"` + + // A scale_settings block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + ScaleSettings *ScaleSettingsObservation `json:"scaleSettings,omitempty" tf:"scale_settings,omitempty"` + + // The ID of the Subnet that the Compute Cluster should reside in. Changing this forces a new Machine Learning Compute Cluster to be created. + SubnetResourceID *string `json:"subnetResourceId,omitempty" tf:"subnet_resource_id,omitempty"` + + // A mapping of tags which should be assigned to the Machine Learning Compute Cluster. Changing this forces a new Machine Learning Compute Cluster to be created. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The priority of the VM. Changing this forces a new Machine Learning Compute Cluster to be created. Accepted values are Dedicated and LowPriority. + VMPriority *string `json:"vmPriority,omitempty" tf:"vm_priority,omitempty"` + + // The size of the VM. Changing this forces a new Machine Learning Compute Cluster to be created. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` +} + +type ComputeClusterParameters struct { + + // The description of the Machine Learning compute. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Compute Cluster should exist. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Machine Learning Workspace. Changing this forces a new Machine Learning Compute Cluster to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/machinelearningservices/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // Reference to a Workspace in machinelearningservices to populate machineLearningWorkspaceId. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceIDRef *v1.Reference `json:"machineLearningWorkspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in machinelearningservices to populate machineLearningWorkspaceId. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceIDSelector *v1.Selector `json:"machineLearningWorkspaceIdSelector,omitempty" tf:"-"` + + // The name which should be used for this Machine Learning Compute Cluster. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether the compute cluster will have a public ip. To set this to false a subnet_resource_id needs to be set. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + NodePublicIPEnabled *bool `json:"nodePublicIpEnabled,omitempty" tf:"node_public_ip_enabled,omitempty"` + + // Credentials for an administrator user account that will be created on each compute node. A ssh block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + SSH *SSHParameters `json:"ssh,omitempty" tf:"ssh,omitempty"` + + // A boolean value indicating whether enable the public SSH port. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + SSHPublicAccessEnabled *bool `json:"sshPublicAccessEnabled,omitempty" tf:"ssh_public_access_enabled,omitempty"` + + // A scale_settings block as defined below. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + ScaleSettings *ScaleSettingsParameters `json:"scaleSettings,omitempty" tf:"scale_settings,omitempty"` + + // The ID of the Subnet that the Compute Cluster should reside in. Changing this forces a new Machine Learning Compute Cluster to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetResourceID *string `json:"subnetResourceId,omitempty" tf:"subnet_resource_id,omitempty"` + + // Reference to a Subnet in network to populate subnetResourceId. + // +kubebuilder:validation:Optional + SubnetResourceIDRef *v1.Reference `json:"subnetResourceIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetResourceId. + // +kubebuilder:validation:Optional + SubnetResourceIDSelector *v1.Selector `json:"subnetResourceIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Machine Learning Compute Cluster. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The priority of the VM. Changing this forces a new Machine Learning Compute Cluster to be created. Accepted values are Dedicated and LowPriority. + // +kubebuilder:validation:Optional + VMPriority *string `json:"vmPriority,omitempty" tf:"vm_priority,omitempty"` + + // The size of the VM. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Compute Cluster. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Compute Cluster. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Compute Cluster. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this Machine Learning Compute Cluster. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this Machine Learning Compute Cluster. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Compute Cluster. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Compute Cluster. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Compute Cluster. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SSHInitParameters struct { + + // Password of the administrator user account. Changing this forces a new Machine Learning Compute Cluster to be created. + AdminPassword *string `json:"adminPassword,omitempty" tf:"admin_password,omitempty"` + + // Name of the administrator user account which can be used to SSH to nodes. Changing this forces a new Machine Learning Compute Cluster to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // SSH public key of the administrator user account. Changing this forces a new Machine Learning Compute Cluster to be created. + KeyValue *string `json:"keyValue,omitempty" tf:"key_value,omitempty"` +} + +type SSHObservation struct { + + // Password of the administrator user account. Changing this forces a new Machine Learning Compute Cluster to be created. + AdminPassword *string `json:"adminPassword,omitempty" tf:"admin_password,omitempty"` + + // Name of the administrator user account which can be used to SSH to nodes. Changing this forces a new Machine Learning Compute Cluster to be created. + AdminUsername *string `json:"adminUsername,omitempty" tf:"admin_username,omitempty"` + + // SSH public key of the administrator user account. Changing this forces a new Machine Learning Compute Cluster to be created. + KeyValue *string `json:"keyValue,omitempty" tf:"key_value,omitempty"` +} + +type SSHParameters struct { + + // Password of the administrator user account. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + AdminPassword *string `json:"adminPassword,omitempty" tf:"admin_password,omitempty"` + + // Name of the administrator user account which can be used to SSH to nodes. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + AdminUsername *string `json:"adminUsername" tf:"admin_username,omitempty"` + + // SSH public key of the administrator user account. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + KeyValue *string `json:"keyValue,omitempty" tf:"key_value,omitempty"` +} + +type ScaleSettingsInitParameters struct { + + // Maximum node count. Changing this forces a new Machine Learning Compute Cluster to be created. + MaxNodeCount *float64 `json:"maxNodeCount,omitempty" tf:"max_node_count,omitempty"` + + // Minimal node count. Changing this forces a new Machine Learning Compute Cluster to be created. + MinNodeCount *float64 `json:"minNodeCount,omitempty" tf:"min_node_count,omitempty"` + + // Node Idle Time Before Scale Down: defines the time until the compute is shutdown when it has gone into Idle state. Is defined according to W3C XML schema standard for duration. Changing this forces a new Machine Learning Compute Cluster to be created. + ScaleDownNodesAfterIdleDuration *string `json:"scaleDownNodesAfterIdleDuration,omitempty" tf:"scale_down_nodes_after_idle_duration,omitempty"` +} + +type ScaleSettingsObservation struct { + + // Maximum node count. Changing this forces a new Machine Learning Compute Cluster to be created. + MaxNodeCount *float64 `json:"maxNodeCount,omitempty" tf:"max_node_count,omitempty"` + + // Minimal node count. Changing this forces a new Machine Learning Compute Cluster to be created. + MinNodeCount *float64 `json:"minNodeCount,omitempty" tf:"min_node_count,omitempty"` + + // Node Idle Time Before Scale Down: defines the time until the compute is shutdown when it has gone into Idle state. Is defined according to W3C XML schema standard for duration. Changing this forces a new Machine Learning Compute Cluster to be created. + ScaleDownNodesAfterIdleDuration *string `json:"scaleDownNodesAfterIdleDuration,omitempty" tf:"scale_down_nodes_after_idle_duration,omitempty"` +} + +type ScaleSettingsParameters struct { + + // Maximum node count. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + MaxNodeCount *float64 `json:"maxNodeCount" tf:"max_node_count,omitempty"` + + // Minimal node count. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + MinNodeCount *float64 `json:"minNodeCount" tf:"min_node_count,omitempty"` + + // Node Idle Time Before Scale Down: defines the time until the compute is shutdown when it has gone into Idle state. Is defined according to W3C XML schema standard for duration. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + ScaleDownNodesAfterIdleDuration *string `json:"scaleDownNodesAfterIdleDuration" tf:"scale_down_nodes_after_idle_duration,omitempty"` +} + +// ComputeClusterSpec defines the desired state of ComputeCluster +type ComputeClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ComputeClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ComputeClusterInitParameters `json:"initProvider,omitempty"` +} + +// ComputeClusterStatus defines the observed state of ComputeCluster. +type ComputeClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ComputeClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ComputeCluster is the Schema for the ComputeClusters API. Manages a Machine Learning Compute Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ComputeCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scaleSettings) || (has(self.initProvider) && has(self.initProvider.scaleSettings))",message="spec.forProvider.scaleSettings is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vmPriority) || (has(self.initProvider) && has(self.initProvider.vmPriority))",message="spec.forProvider.vmPriority is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vmSize) || (has(self.initProvider) && has(self.initProvider.vmSize))",message="spec.forProvider.vmSize is a required parameter" + Spec ComputeClusterSpec `json:"spec"` + Status ComputeClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ComputeClusterList contains a list of ComputeClusters +type ComputeClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ComputeCluster `json:"items"` +} + +// Repository type metadata. +var ( + ComputeCluster_Kind = "ComputeCluster" + ComputeCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ComputeCluster_Kind}.String() + ComputeCluster_KindAPIVersion = ComputeCluster_Kind + "." + CRDGroupVersion.String() + ComputeCluster_GroupVersionKind = CRDGroupVersion.WithKind(ComputeCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&ComputeCluster{}, &ComputeClusterList{}) +} diff --git a/apis/machinelearningservices/v1beta2/zz_computeinstance_terraformed.go b/apis/machinelearningservices/v1beta2/zz_computeinstance_terraformed.go new file mode 100755 index 000000000..292b53ee6 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_computeinstance_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ComputeInstance +func (mg *ComputeInstance) GetTerraformResourceType() string { + return "azurerm_machine_learning_compute_instance" +} + +// GetConnectionDetailsMapping for this ComputeInstance +func (tr *ComputeInstance) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ComputeInstance +func (tr *ComputeInstance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ComputeInstance +func (tr *ComputeInstance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ComputeInstance +func (tr *ComputeInstance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ComputeInstance +func (tr *ComputeInstance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ComputeInstance +func (tr *ComputeInstance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ComputeInstance +func (tr *ComputeInstance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ComputeInstance +func (tr *ComputeInstance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ComputeInstance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ComputeInstance) LateInitialize(attrs []byte) (bool, error) { + params := &ComputeInstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ComputeInstance) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/machinelearningservices/v1beta2/zz_computeinstance_types.go b/apis/machinelearningservices/v1beta2/zz_computeinstance_types.go new file mode 100755 index 000000000..78bda2b8c --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_computeinstance_types.go @@ -0,0 +1,330 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AssignToUserInitParameters struct { + + // User’s AAD Object Id. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // User’s AAD Tenant Id. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AssignToUserObservation struct { + + // User’s AAD Object Id. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // User’s AAD Tenant Id. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AssignToUserParameters struct { + + // User’s AAD Object Id. + // +kubebuilder:validation:Optional + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // User’s AAD Tenant Id. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type ComputeInstanceIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Compute Instance. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Compute Instance. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ComputeInstanceIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Compute Instance. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this Machine Learning Compute Instance. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this Machine Learning Compute Instance. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Compute Instance. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ComputeInstanceIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Compute Instance. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Compute Instance. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ComputeInstanceInitParameters struct { + + // A assign_to_user block as defined below. A user explicitly assigned to a personal compute instance. Changing this forces a new Machine Learning Compute Instance to be created. + AssignToUser *AssignToUserInitParameters `json:"assignToUser,omitempty" tf:"assign_to_user,omitempty"` + + // The Compute Instance Authorization type. Possible values include: personal. Changing this forces a new Machine Learning Compute Instance to be created. + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // The description of the Machine Learning Compute Instance. Changing this forces a new Machine Learning Compute Instance to be created. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Compute Instance to be created. + Identity *ComputeInstanceIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Compute Instance to be created. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Compute Instance should exist. Changing this forces a new Machine Learning Compute Instance to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether the compute instance will have a public ip. To set this to false a subnet_resource_id needs to be set. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + NodePublicIPEnabled *bool `json:"nodePublicIpEnabled,omitempty" tf:"node_public_ip_enabled,omitempty"` + + // A ssh block as defined below. Specifies policy and settings for SSH access. Changing this forces a new Machine Learning Compute Instance to be created. + SSH *ComputeInstanceSSHInitParameters `json:"ssh,omitempty" tf:"ssh,omitempty"` + + // Virtual network subnet resource ID the compute nodes belong to. Changing this forces a new Machine Learning Compute Instance to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetResourceID *string `json:"subnetResourceId,omitempty" tf:"subnet_resource_id,omitempty"` + + // Reference to a Subnet in network to populate subnetResourceId. + // +kubebuilder:validation:Optional + SubnetResourceIDRef *v1.Reference `json:"subnetResourceIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetResourceId. + // +kubebuilder:validation:Optional + SubnetResourceIDSelector *v1.Selector `json:"subnetResourceIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Machine Learning Compute Instance. Changing this forces a new Machine Learning Compute Instance to be created. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Virtual Machine Size. Changing this forces a new Machine Learning Compute Instance to be created. + VirtualMachineSize *string `json:"virtualMachineSize,omitempty" tf:"virtual_machine_size,omitempty"` +} + +type ComputeInstanceObservation struct { + + // A assign_to_user block as defined below. A user explicitly assigned to a personal compute instance. Changing this forces a new Machine Learning Compute Instance to be created. + AssignToUser *AssignToUserObservation `json:"assignToUser,omitempty" tf:"assign_to_user,omitempty"` + + // The Compute Instance Authorization type. Possible values include: personal. Changing this forces a new Machine Learning Compute Instance to be created. + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // The description of the Machine Learning Compute Instance. Changing this forces a new Machine Learning Compute Instance to be created. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Machine Learning Compute Instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Compute Instance to be created. + Identity *ComputeInstanceIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Compute Instance to be created. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Compute Instance should exist. Changing this forces a new Machine Learning Compute Instance to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Machine Learning Workspace. Changing this forces a new Machine Learning Compute Instance to be created. + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // Whether the compute instance will have a public ip. To set this to false a subnet_resource_id needs to be set. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + NodePublicIPEnabled *bool `json:"nodePublicIpEnabled,omitempty" tf:"node_public_ip_enabled,omitempty"` + + // A ssh block as defined below. Specifies policy and settings for SSH access. Changing this forces a new Machine Learning Compute Instance to be created. + SSH *ComputeInstanceSSHObservation `json:"ssh,omitempty" tf:"ssh,omitempty"` + + // Virtual network subnet resource ID the compute nodes belong to. Changing this forces a new Machine Learning Compute Instance to be created. + SubnetResourceID *string `json:"subnetResourceId,omitempty" tf:"subnet_resource_id,omitempty"` + + // A mapping of tags which should be assigned to the Machine Learning Compute Instance. Changing this forces a new Machine Learning Compute Instance to be created. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Virtual Machine Size. Changing this forces a new Machine Learning Compute Instance to be created. + VirtualMachineSize *string `json:"virtualMachineSize,omitempty" tf:"virtual_machine_size,omitempty"` +} + +type ComputeInstanceParameters struct { + + // A assign_to_user block as defined below. A user explicitly assigned to a personal compute instance. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + AssignToUser *AssignToUserParameters `json:"assignToUser,omitempty" tf:"assign_to_user,omitempty"` + + // The Compute Instance Authorization type. Possible values include: personal. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + AuthorizationType *string `json:"authorizationType,omitempty" tf:"authorization_type,omitempty"` + + // The description of the Machine Learning Compute Instance. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + Identity *ComputeInstanceIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Compute Instance should exist. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Machine Learning Workspace. Changing this forces a new Machine Learning Compute Instance to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/machinelearningservices/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // Reference to a Workspace in machinelearningservices to populate machineLearningWorkspaceId. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceIDRef *v1.Reference `json:"machineLearningWorkspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in machinelearningservices to populate machineLearningWorkspaceId. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceIDSelector *v1.Selector `json:"machineLearningWorkspaceIdSelector,omitempty" tf:"-"` + + // Whether the compute instance will have a public ip. To set this to false a subnet_resource_id needs to be set. Defaults to true. Changing this forces a new Machine Learning Compute Cluster to be created. + // +kubebuilder:validation:Optional + NodePublicIPEnabled *bool `json:"nodePublicIpEnabled,omitempty" tf:"node_public_ip_enabled,omitempty"` + + // A ssh block as defined below. Specifies policy and settings for SSH access. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + SSH *ComputeInstanceSSHParameters `json:"ssh,omitempty" tf:"ssh,omitempty"` + + // Virtual network subnet resource ID the compute nodes belong to. Changing this forces a new Machine Learning Compute Instance to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetResourceID *string `json:"subnetResourceId,omitempty" tf:"subnet_resource_id,omitempty"` + + // Reference to a Subnet in network to populate subnetResourceId. + // +kubebuilder:validation:Optional + SubnetResourceIDRef *v1.Reference `json:"subnetResourceIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetResourceId. + // +kubebuilder:validation:Optional + SubnetResourceIDSelector *v1.Selector `json:"subnetResourceIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Machine Learning Compute Instance. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Virtual Machine Size. Changing this forces a new Machine Learning Compute Instance to be created. + // +kubebuilder:validation:Optional + VirtualMachineSize *string `json:"virtualMachineSize,omitempty" tf:"virtual_machine_size,omitempty"` +} + +type ComputeInstanceSSHInitParameters struct { + + // Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` +} + +type ComputeInstanceSSHObservation struct { + + // Describes the port for connecting through SSH. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs. + PublicKey *string `json:"publicKey,omitempty" tf:"public_key,omitempty"` + + // The admin username of this Machine Learning Compute Instance. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ComputeInstanceSSHParameters struct { + + // Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t rsa -b 2048" to generate your SSH key pairs. + // +kubebuilder:validation:Optional + PublicKey *string `json:"publicKey" tf:"public_key,omitempty"` +} + +// ComputeInstanceSpec defines the desired state of ComputeInstance +type ComputeInstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ComputeInstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ComputeInstanceInitParameters `json:"initProvider,omitempty"` +} + +// ComputeInstanceStatus defines the observed state of ComputeInstance. +type ComputeInstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ComputeInstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ComputeInstance is the Schema for the ComputeInstances API. Manages a Machine Learning Compute Instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ComputeInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.virtualMachineSize) || (has(self.initProvider) && has(self.initProvider.virtualMachineSize))",message="spec.forProvider.virtualMachineSize is a required parameter" + Spec ComputeInstanceSpec `json:"spec"` + Status ComputeInstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ComputeInstanceList contains a list of ComputeInstances +type ComputeInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ComputeInstance `json:"items"` +} + +// Repository type metadata. +var ( + ComputeInstance_Kind = "ComputeInstance" + ComputeInstance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ComputeInstance_Kind}.String() + ComputeInstance_KindAPIVersion = ComputeInstance_Kind + "." + CRDGroupVersion.String() + ComputeInstance_GroupVersionKind = CRDGroupVersion.WithKind(ComputeInstance_Kind) +) + +func init() { + SchemeBuilder.Register(&ComputeInstance{}, &ComputeInstanceList{}) +} diff --git a/apis/machinelearningservices/v1beta1/zz_generated.conversion_hubs.go b/apis/machinelearningservices/v1beta2/zz_generated.conversion_hubs.go similarity index 96% rename from apis/machinelearningservices/v1beta1/zz_generated.conversion_hubs.go rename to apis/machinelearningservices/v1beta2/zz_generated.conversion_hubs.go index 414ac20b3..2902b48b9 100755 --- a/apis/machinelearningservices/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/machinelearningservices/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *ComputeCluster) Hub() {} diff --git a/apis/machinelearningservices/v1beta2/zz_generated.deepcopy.go b/apis/machinelearningservices/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..9e4acc916 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2754 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssignToUserInitParameters) DeepCopyInto(out *AssignToUserInitParameters) { + *out = *in + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssignToUserInitParameters. +func (in *AssignToUserInitParameters) DeepCopy() *AssignToUserInitParameters { + if in == nil { + return nil + } + out := new(AssignToUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssignToUserObservation) DeepCopyInto(out *AssignToUserObservation) { + *out = *in + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssignToUserObservation. +func (in *AssignToUserObservation) DeepCopy() *AssignToUserObservation { + if in == nil { + return nil + } + out := new(AssignToUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssignToUserParameters) DeepCopyInto(out *AssignToUserParameters) { + *out = *in + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssignToUserParameters. +func (in *AssignToUserParameters) DeepCopy() *AssignToUserParameters { + if in == nil { + return nil + } + out := new(AssignToUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeCluster) DeepCopyInto(out *ComputeCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeCluster. +func (in *ComputeCluster) DeepCopy() *ComputeCluster { + if in == nil { + return nil + } + out := new(ComputeCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputeCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeClusterInitParameters) DeepCopyInto(out *ComputeClusterInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceIDRef != nil { + in, out := &in.MachineLearningWorkspaceIDRef, &out.MachineLearningWorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MachineLearningWorkspaceIDSelector != nil { + in, out := &in.MachineLearningWorkspaceIDSelector, &out.MachineLearningWorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodePublicIPEnabled != nil { + in, out := &in.NodePublicIPEnabled, &out.NodePublicIPEnabled + *out = new(bool) + **out = **in + } + if in.SSH != nil { + in, out := &in.SSH, &out.SSH + *out = new(SSHInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SSHPublicAccessEnabled != nil { + in, out := &in.SSHPublicAccessEnabled, &out.SSHPublicAccessEnabled + *out = new(bool) + **out = **in + } + if in.ScaleSettings != nil { + in, out := &in.ScaleSettings, &out.ScaleSettings + *out = new(ScaleSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceID != nil { + in, out := &in.SubnetResourceID, &out.SubnetResourceID + *out = new(string) + **out = **in + } + if in.SubnetResourceIDRef != nil { + in, out := &in.SubnetResourceIDRef, &out.SubnetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceIDSelector != nil { + in, out := &in.SubnetResourceIDSelector, &out.SubnetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VMPriority != nil { + in, out := &in.VMPriority, &out.VMPriority + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeClusterInitParameters. +func (in *ComputeClusterInitParameters) DeepCopy() *ComputeClusterInitParameters { + if in == nil { + return nil + } + out := new(ComputeClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeClusterList) DeepCopyInto(out *ComputeClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComputeCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeClusterList. +func (in *ComputeClusterList) DeepCopy() *ComputeClusterList { + if in == nil { + return nil + } + out := new(ComputeClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputeClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeClusterObservation) DeepCopyInto(out *ComputeClusterObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodePublicIPEnabled != nil { + in, out := &in.NodePublicIPEnabled, &out.NodePublicIPEnabled + *out = new(bool) + **out = **in + } + if in.SSH != nil { + in, out := &in.SSH, &out.SSH + *out = new(SSHObservation) + (*in).DeepCopyInto(*out) + } + if in.SSHPublicAccessEnabled != nil { + in, out := &in.SSHPublicAccessEnabled, &out.SSHPublicAccessEnabled + *out = new(bool) + **out = **in + } + if in.ScaleSettings != nil { + in, out := &in.ScaleSettings, &out.ScaleSettings + *out = new(ScaleSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceID != nil { + in, out := &in.SubnetResourceID, &out.SubnetResourceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VMPriority != nil { + in, out := &in.VMPriority, &out.VMPriority + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeClusterObservation. +func (in *ComputeClusterObservation) DeepCopy() *ComputeClusterObservation { + if in == nil { + return nil + } + out := new(ComputeClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeClusterParameters) DeepCopyInto(out *ComputeClusterParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceIDRef != nil { + in, out := &in.MachineLearningWorkspaceIDRef, &out.MachineLearningWorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MachineLearningWorkspaceIDSelector != nil { + in, out := &in.MachineLearningWorkspaceIDSelector, &out.MachineLearningWorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NodePublicIPEnabled != nil { + in, out := &in.NodePublicIPEnabled, &out.NodePublicIPEnabled + *out = new(bool) + **out = **in + } + if in.SSH != nil { + in, out := &in.SSH, &out.SSH + *out = new(SSHParameters) + (*in).DeepCopyInto(*out) + } + if in.SSHPublicAccessEnabled != nil { + in, out := &in.SSHPublicAccessEnabled, &out.SSHPublicAccessEnabled + *out = new(bool) + **out = **in + } + if in.ScaleSettings != nil { + in, out := &in.ScaleSettings, &out.ScaleSettings + *out = new(ScaleSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceID != nil { + in, out := &in.SubnetResourceID, &out.SubnetResourceID + *out = new(string) + **out = **in + } + if in.SubnetResourceIDRef != nil { + in, out := &in.SubnetResourceIDRef, &out.SubnetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceIDSelector != nil { + in, out := &in.SubnetResourceIDSelector, &out.SubnetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VMPriority != nil { + in, out := &in.VMPriority, &out.VMPriority + *out = new(string) + **out = **in + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeClusterParameters. +func (in *ComputeClusterParameters) DeepCopy() *ComputeClusterParameters { + if in == nil { + return nil + } + out := new(ComputeClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeClusterSpec) DeepCopyInto(out *ComputeClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeClusterSpec. +func (in *ComputeClusterSpec) DeepCopy() *ComputeClusterSpec { + if in == nil { + return nil + } + out := new(ComputeClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeClusterStatus) DeepCopyInto(out *ComputeClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeClusterStatus. +func (in *ComputeClusterStatus) DeepCopy() *ComputeClusterStatus { + if in == nil { + return nil + } + out := new(ComputeClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstance) DeepCopyInto(out *ComputeInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstance. +func (in *ComputeInstance) DeepCopy() *ComputeInstance { + if in == nil { + return nil + } + out := new(ComputeInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputeInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceIdentityInitParameters) DeepCopyInto(out *ComputeInstanceIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceIdentityInitParameters. +func (in *ComputeInstanceIdentityInitParameters) DeepCopy() *ComputeInstanceIdentityInitParameters { + if in == nil { + return nil + } + out := new(ComputeInstanceIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceIdentityObservation) DeepCopyInto(out *ComputeInstanceIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceIdentityObservation. +func (in *ComputeInstanceIdentityObservation) DeepCopy() *ComputeInstanceIdentityObservation { + if in == nil { + return nil + } + out := new(ComputeInstanceIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceIdentityParameters) DeepCopyInto(out *ComputeInstanceIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceIdentityParameters. +func (in *ComputeInstanceIdentityParameters) DeepCopy() *ComputeInstanceIdentityParameters { + if in == nil { + return nil + } + out := new(ComputeInstanceIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceInitParameters) DeepCopyInto(out *ComputeInstanceInitParameters) { + *out = *in + if in.AssignToUser != nil { + in, out := &in.AssignToUser, &out.AssignToUser + *out = new(AssignToUserInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ComputeInstanceIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NodePublicIPEnabled != nil { + in, out := &in.NodePublicIPEnabled, &out.NodePublicIPEnabled + *out = new(bool) + **out = **in + } + if in.SSH != nil { + in, out := &in.SSH, &out.SSH + *out = new(ComputeInstanceSSHInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceID != nil { + in, out := &in.SubnetResourceID, &out.SubnetResourceID + *out = new(string) + **out = **in + } + if in.SubnetResourceIDRef != nil { + in, out := &in.SubnetResourceIDRef, &out.SubnetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceIDSelector != nil { + in, out := &in.SubnetResourceIDSelector, &out.SubnetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualMachineSize != nil { + in, out := &in.VirtualMachineSize, &out.VirtualMachineSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceInitParameters. +func (in *ComputeInstanceInitParameters) DeepCopy() *ComputeInstanceInitParameters { + if in == nil { + return nil + } + out := new(ComputeInstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceList) DeepCopyInto(out *ComputeInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComputeInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceList. +func (in *ComputeInstanceList) DeepCopy() *ComputeInstanceList { + if in == nil { + return nil + } + out := new(ComputeInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComputeInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceObservation) DeepCopyInto(out *ComputeInstanceObservation) { + *out = *in + if in.AssignToUser != nil { + in, out := &in.AssignToUser, &out.AssignToUser + *out = new(AssignToUserObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ComputeInstanceIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.NodePublicIPEnabled != nil { + in, out := &in.NodePublicIPEnabled, &out.NodePublicIPEnabled + *out = new(bool) + **out = **in + } + if in.SSH != nil { + in, out := &in.SSH, &out.SSH + *out = new(ComputeInstanceSSHObservation) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceID != nil { + in, out := &in.SubnetResourceID, &out.SubnetResourceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualMachineSize != nil { + in, out := &in.VirtualMachineSize, &out.VirtualMachineSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceObservation. +func (in *ComputeInstanceObservation) DeepCopy() *ComputeInstanceObservation { + if in == nil { + return nil + } + out := new(ComputeInstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceParameters) DeepCopyInto(out *ComputeInstanceParameters) { + *out = *in + if in.AssignToUser != nil { + in, out := &in.AssignToUser, &out.AssignToUser + *out = new(AssignToUserParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthorizationType != nil { + in, out := &in.AuthorizationType, &out.AuthorizationType + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ComputeInstanceIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceIDRef != nil { + in, out := &in.MachineLearningWorkspaceIDRef, &out.MachineLearningWorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MachineLearningWorkspaceIDSelector != nil { + in, out := &in.MachineLearningWorkspaceIDSelector, &out.MachineLearningWorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NodePublicIPEnabled != nil { + in, out := &in.NodePublicIPEnabled, &out.NodePublicIPEnabled + *out = new(bool) + **out = **in + } + if in.SSH != nil { + in, out := &in.SSH, &out.SSH + *out = new(ComputeInstanceSSHParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceID != nil { + in, out := &in.SubnetResourceID, &out.SubnetResourceID + *out = new(string) + **out = **in + } + if in.SubnetResourceIDRef != nil { + in, out := &in.SubnetResourceIDRef, &out.SubnetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetResourceIDSelector != nil { + in, out := &in.SubnetResourceIDSelector, &out.SubnetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualMachineSize != nil { + in, out := &in.VirtualMachineSize, &out.VirtualMachineSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceParameters. +func (in *ComputeInstanceParameters) DeepCopy() *ComputeInstanceParameters { + if in == nil { + return nil + } + out := new(ComputeInstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceSSHInitParameters) DeepCopyInto(out *ComputeInstanceSSHInitParameters) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceSSHInitParameters. +func (in *ComputeInstanceSSHInitParameters) DeepCopy() *ComputeInstanceSSHInitParameters { + if in == nil { + return nil + } + out := new(ComputeInstanceSSHInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceSSHObservation) DeepCopyInto(out *ComputeInstanceSSHObservation) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceSSHObservation. +func (in *ComputeInstanceSSHObservation) DeepCopy() *ComputeInstanceSSHObservation { + if in == nil { + return nil + } + out := new(ComputeInstanceSSHObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceSSHParameters) DeepCopyInto(out *ComputeInstanceSSHParameters) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceSSHParameters. +func (in *ComputeInstanceSSHParameters) DeepCopy() *ComputeInstanceSSHParameters { + if in == nil { + return nil + } + out := new(ComputeInstanceSSHParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceSpec) DeepCopyInto(out *ComputeInstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceSpec. +func (in *ComputeInstanceSpec) DeepCopy() *ComputeInstanceSpec { + if in == nil { + return nil + } + out := new(ComputeInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComputeInstanceStatus) DeepCopyInto(out *ComputeInstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeInstanceStatus. +func (in *ComputeInstanceStatus) DeepCopy() *ComputeInstanceStatus { + if in == nil { + return nil + } + out := new(ComputeInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.KeyVaultIDRef != nil { + in, out := &in.KeyVaultIDRef, &out.KeyVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultIDSelector != nil { + in, out := &in.KeyVaultIDSelector, &out.KeyVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityIDRef != nil { + in, out := &in.UserAssignedIdentityIDRef, &out.UserAssignedIdentityIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserAssignedIdentityIDSelector != nil { + in, out := &in.UserAssignedIdentityIDSelector, &out.UserAssignedIdentityIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.KeyIDRef != nil { + in, out := &in.KeyIDRef, &out.KeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyIDSelector != nil { + in, out := &in.KeyIDSelector, &out.KeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.KeyVaultIDRef != nil { + in, out := &in.KeyVaultIDRef, &out.KeyVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultIDSelector != nil { + in, out := &in.KeyVaultIDSelector, &out.KeyVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityIDRef != nil { + in, out := &in.UserAssignedIdentityIDRef, &out.UserAssignedIdentityIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.UserAssignedIdentityIDSelector != nil { + in, out := &in.UserAssignedIdentityIDSelector, &out.UserAssignedIdentityIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureStoreInitParameters) DeepCopyInto(out *FeatureStoreInitParameters) { + *out = *in + if in.ComputerSparkRuntimeVersion != nil { + in, out := &in.ComputerSparkRuntimeVersion, &out.ComputerSparkRuntimeVersion + *out = new(string) + **out = **in + } + if in.OfflineConnectionName != nil { + in, out := &in.OfflineConnectionName, &out.OfflineConnectionName + *out = new(string) + **out = **in + } + if in.OnlineConnectionName != nil { + in, out := &in.OnlineConnectionName, &out.OnlineConnectionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureStoreInitParameters. +func (in *FeatureStoreInitParameters) DeepCopy() *FeatureStoreInitParameters { + if in == nil { + return nil + } + out := new(FeatureStoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureStoreObservation) DeepCopyInto(out *FeatureStoreObservation) { + *out = *in + if in.ComputerSparkRuntimeVersion != nil { + in, out := &in.ComputerSparkRuntimeVersion, &out.ComputerSparkRuntimeVersion + *out = new(string) + **out = **in + } + if in.OfflineConnectionName != nil { + in, out := &in.OfflineConnectionName, &out.OfflineConnectionName + *out = new(string) + **out = **in + } + if in.OnlineConnectionName != nil { + in, out := &in.OnlineConnectionName, &out.OnlineConnectionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureStoreObservation. +func (in *FeatureStoreObservation) DeepCopy() *FeatureStoreObservation { + if in == nil { + return nil + } + out := new(FeatureStoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureStoreParameters) DeepCopyInto(out *FeatureStoreParameters) { + *out = *in + if in.ComputerSparkRuntimeVersion != nil { + in, out := &in.ComputerSparkRuntimeVersion, &out.ComputerSparkRuntimeVersion + *out = new(string) + **out = **in + } + if in.OfflineConnectionName != nil { + in, out := &in.OfflineConnectionName, &out.OfflineConnectionName + *out = new(string) + **out = **in + } + if in.OnlineConnectionName != nil { + in, out := &in.OnlineConnectionName, &out.OnlineConnectionName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureStoreParameters. +func (in *FeatureStoreParameters) DeepCopy() *FeatureStoreParameters { + if in == nil { + return nil + } + out := new(FeatureStoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedNetworkInitParameters) DeepCopyInto(out *ManagedNetworkInitParameters) { + *out = *in + if in.IsolationMode != nil { + in, out := &in.IsolationMode, &out.IsolationMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedNetworkInitParameters. +func (in *ManagedNetworkInitParameters) DeepCopy() *ManagedNetworkInitParameters { + if in == nil { + return nil + } + out := new(ManagedNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedNetworkObservation) DeepCopyInto(out *ManagedNetworkObservation) { + *out = *in + if in.IsolationMode != nil { + in, out := &in.IsolationMode, &out.IsolationMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedNetworkObservation. +func (in *ManagedNetworkObservation) DeepCopy() *ManagedNetworkObservation { + if in == nil { + return nil + } + out := new(ManagedNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedNetworkParameters) DeepCopyInto(out *ManagedNetworkParameters) { + *out = *in + if in.IsolationMode != nil { + in, out := &in.IsolationMode, &out.IsolationMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedNetworkParameters. +func (in *ManagedNetworkParameters) DeepCopy() *ManagedNetworkParameters { + if in == nil { + return nil + } + out := new(ManagedNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHInitParameters) DeepCopyInto(out *SSHInitParameters) { + *out = *in + if in.AdminPassword != nil { + in, out := &in.AdminPassword, &out.AdminPassword + *out = new(string) + **out = **in + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.KeyValue != nil { + in, out := &in.KeyValue, &out.KeyValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHInitParameters. +func (in *SSHInitParameters) DeepCopy() *SSHInitParameters { + if in == nil { + return nil + } + out := new(SSHInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHObservation) DeepCopyInto(out *SSHObservation) { + *out = *in + if in.AdminPassword != nil { + in, out := &in.AdminPassword, &out.AdminPassword + *out = new(string) + **out = **in + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.KeyValue != nil { + in, out := &in.KeyValue, &out.KeyValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHObservation. +func (in *SSHObservation) DeepCopy() *SSHObservation { + if in == nil { + return nil + } + out := new(SSHObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHParameters) DeepCopyInto(out *SSHParameters) { + *out = *in + if in.AdminPassword != nil { + in, out := &in.AdminPassword, &out.AdminPassword + *out = new(string) + **out = **in + } + if in.AdminUsername != nil { + in, out := &in.AdminUsername, &out.AdminUsername + *out = new(string) + **out = **in + } + if in.KeyValue != nil { + in, out := &in.KeyValue, &out.KeyValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHParameters. +func (in *SSHParameters) DeepCopy() *SSHParameters { + if in == nil { + return nil + } + out := new(SSHParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSettingsInitParameters) DeepCopyInto(out *ScaleSettingsInitParameters) { + *out = *in + if in.MaxNodeCount != nil { + in, out := &in.MaxNodeCount, &out.MaxNodeCount + *out = new(float64) + **out = **in + } + if in.MinNodeCount != nil { + in, out := &in.MinNodeCount, &out.MinNodeCount + *out = new(float64) + **out = **in + } + if in.ScaleDownNodesAfterIdleDuration != nil { + in, out := &in.ScaleDownNodesAfterIdleDuration, &out.ScaleDownNodesAfterIdleDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSettingsInitParameters. +func (in *ScaleSettingsInitParameters) DeepCopy() *ScaleSettingsInitParameters { + if in == nil { + return nil + } + out := new(ScaleSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSettingsObservation) DeepCopyInto(out *ScaleSettingsObservation) { + *out = *in + if in.MaxNodeCount != nil { + in, out := &in.MaxNodeCount, &out.MaxNodeCount + *out = new(float64) + **out = **in + } + if in.MinNodeCount != nil { + in, out := &in.MinNodeCount, &out.MinNodeCount + *out = new(float64) + **out = **in + } + if in.ScaleDownNodesAfterIdleDuration != nil { + in, out := &in.ScaleDownNodesAfterIdleDuration, &out.ScaleDownNodesAfterIdleDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSettingsObservation. +func (in *ScaleSettingsObservation) DeepCopy() *ScaleSettingsObservation { + if in == nil { + return nil + } + out := new(ScaleSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSettingsParameters) DeepCopyInto(out *ScaleSettingsParameters) { + *out = *in + if in.MaxNodeCount != nil { + in, out := &in.MaxNodeCount, &out.MaxNodeCount + *out = new(float64) + **out = **in + } + if in.MinNodeCount != nil { + in, out := &in.MinNodeCount, &out.MinNodeCount + *out = new(float64) + **out = **in + } + if in.ScaleDownNodesAfterIdleDuration != nil { + in, out := &in.ScaleDownNodesAfterIdleDuration, &out.ScaleDownNodesAfterIdleDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSettingsParameters. +func (in *ScaleSettingsParameters) DeepCopy() *ScaleSettingsParameters { + if in == nil { + return nil + } + out := new(ScaleSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSpark) DeepCopyInto(out *SynapseSpark) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSpark. +func (in *SynapseSpark) DeepCopy() *SynapseSpark { + if in == nil { + return nil + } + out := new(SynapseSpark) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SynapseSpark) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkIdentityInitParameters) DeepCopyInto(out *SynapseSparkIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkIdentityInitParameters. +func (in *SynapseSparkIdentityInitParameters) DeepCopy() *SynapseSparkIdentityInitParameters { + if in == nil { + return nil + } + out := new(SynapseSparkIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkIdentityObservation) DeepCopyInto(out *SynapseSparkIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkIdentityObservation. +func (in *SynapseSparkIdentityObservation) DeepCopy() *SynapseSparkIdentityObservation { + if in == nil { + return nil + } + out := new(SynapseSparkIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkIdentityParameters) DeepCopyInto(out *SynapseSparkIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkIdentityParameters. +func (in *SynapseSparkIdentityParameters) DeepCopy() *SynapseSparkIdentityParameters { + if in == nil { + return nil + } + out := new(SynapseSparkIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkInitParameters) DeepCopyInto(out *SynapseSparkInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SynapseSparkIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.SynapseSparkPoolID != nil { + in, out := &in.SynapseSparkPoolID, &out.SynapseSparkPoolID + *out = new(string) + **out = **in + } + if in.SynapseSparkPoolIDRef != nil { + in, out := &in.SynapseSparkPoolIDRef, &out.SynapseSparkPoolIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SynapseSparkPoolIDSelector != nil { + in, out := &in.SynapseSparkPoolIDSelector, &out.SynapseSparkPoolIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkInitParameters. +func (in *SynapseSparkInitParameters) DeepCopy() *SynapseSparkInitParameters { + if in == nil { + return nil + } + out := new(SynapseSparkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkList) DeepCopyInto(out *SynapseSparkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SynapseSpark, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkList. +func (in *SynapseSparkList) DeepCopy() *SynapseSparkList { + if in == nil { + return nil + } + out := new(SynapseSparkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SynapseSparkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkObservation) DeepCopyInto(out *SynapseSparkObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SynapseSparkIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.SynapseSparkPoolID != nil { + in, out := &in.SynapseSparkPoolID, &out.SynapseSparkPoolID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkObservation. +func (in *SynapseSparkObservation) DeepCopy() *SynapseSparkObservation { + if in == nil { + return nil + } + out := new(SynapseSparkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkParameters) DeepCopyInto(out *SynapseSparkParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(SynapseSparkIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceID != nil { + in, out := &in.MachineLearningWorkspaceID, &out.MachineLearningWorkspaceID + *out = new(string) + **out = **in + } + if in.MachineLearningWorkspaceIDRef != nil { + in, out := &in.MachineLearningWorkspaceIDRef, &out.MachineLearningWorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MachineLearningWorkspaceIDSelector != nil { + in, out := &in.MachineLearningWorkspaceIDSelector, &out.MachineLearningWorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SynapseSparkPoolID != nil { + in, out := &in.SynapseSparkPoolID, &out.SynapseSparkPoolID + *out = new(string) + **out = **in + } + if in.SynapseSparkPoolIDRef != nil { + in, out := &in.SynapseSparkPoolIDRef, &out.SynapseSparkPoolIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SynapseSparkPoolIDSelector != nil { + in, out := &in.SynapseSparkPoolIDSelector, &out.SynapseSparkPoolIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkParameters. +func (in *SynapseSparkParameters) DeepCopy() *SynapseSparkParameters { + if in == nil { + return nil + } + out := new(SynapseSparkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkSpec) DeepCopyInto(out *SynapseSparkSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkSpec. +func (in *SynapseSparkSpec) DeepCopy() *SynapseSparkSpec { + if in == nil { + return nil + } + out := new(SynapseSparkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynapseSparkStatus) DeepCopyInto(out *SynapseSparkStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynapseSparkStatus. +func (in *SynapseSparkStatus) DeepCopy() *SynapseSparkStatus { + if in == nil { + return nil + } + out := new(SynapseSparkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceIdentityInitParameters) DeepCopyInto(out *WorkspaceIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceIdentityInitParameters. +func (in *WorkspaceIdentityInitParameters) DeepCopy() *WorkspaceIdentityInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceIdentityObservation) DeepCopyInto(out *WorkspaceIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceIdentityObservation. +func (in *WorkspaceIdentityObservation) DeepCopy() *WorkspaceIdentityObservation { + if in == nil { + return nil + } + out := new(WorkspaceIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceIdentityParameters) DeepCopyInto(out *WorkspaceIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceIdentityParameters. +func (in *WorkspaceIdentityParameters) DeepCopy() *WorkspaceIdentityParameters { + if in == nil { + return nil + } + out := new(WorkspaceIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceInitParameters) DeepCopyInto(out *WorkspaceInitParameters) { + *out = *in + if in.ApplicationInsightsID != nil { + in, out := &in.ApplicationInsightsID, &out.ApplicationInsightsID + *out = new(string) + **out = **in + } + if in.ApplicationInsightsIDRef != nil { + in, out := &in.ApplicationInsightsIDRef, &out.ApplicationInsightsIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsightsIDSelector != nil { + in, out := &in.ApplicationInsightsIDSelector, &out.ApplicationInsightsIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryID != nil { + in, out := &in.ContainerRegistryID, &out.ContainerRegistryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FeatureStore != nil { + in, out := &in.FeatureStore, &out.FeatureStore + *out = new(FeatureStoreInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FriendlyName != nil { + in, out := &in.FriendlyName, &out.FriendlyName + *out = new(string) + **out = **in + } + if in.HighBusinessImpact != nil { + in, out := &in.HighBusinessImpact, &out.HighBusinessImpact + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WorkspaceIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageBuildComputeName != nil { + in, out := &in.ImageBuildComputeName, &out.ImageBuildComputeName + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.KeyVaultIDRef != nil { + in, out := &in.KeyVaultIDRef, &out.KeyVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultIDSelector != nil { + in, out := &in.KeyVaultIDSelector, &out.KeyVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedNetwork != nil { + in, out := &in.ManagedNetwork, &out.ManagedNetwork + *out = new(ManagedNetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrimaryUserAssignedIdentity != nil { + in, out := &in.PrimaryUserAssignedIdentity, &out.PrimaryUserAssignedIdentity + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityRef != nil { + in, out := &in.PrimaryUserAssignedIdentityRef, &out.PrimaryUserAssignedIdentityRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrimaryUserAssignedIdentitySelector != nil { + in, out := &in.PrimaryUserAssignedIdentitySelector, &out.PrimaryUserAssignedIdentitySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicAccessBehindVirtualNetworkEnabled != nil { + in, out := &in.PublicAccessBehindVirtualNetworkEnabled, &out.PublicAccessBehindVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.V1LegacyModeEnabled != nil { + in, out := &in.V1LegacyModeEnabled, &out.V1LegacyModeEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceInitParameters. +func (in *WorkspaceInitParameters) DeepCopy() *WorkspaceInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. +func (in *WorkspaceList) DeepCopy() *WorkspaceList { + if in == nil { + return nil + } + out := new(WorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceObservation) DeepCopyInto(out *WorkspaceObservation) { + *out = *in + if in.ApplicationInsightsID != nil { + in, out := &in.ApplicationInsightsID, &out.ApplicationInsightsID + *out = new(string) + **out = **in + } + if in.ContainerRegistryID != nil { + in, out := &in.ContainerRegistryID, &out.ContainerRegistryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DiscoveryURL != nil { + in, out := &in.DiscoveryURL, &out.DiscoveryURL + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.FeatureStore != nil { + in, out := &in.FeatureStore, &out.FeatureStore + *out = new(FeatureStoreObservation) + (*in).DeepCopyInto(*out) + } + if in.FriendlyName != nil { + in, out := &in.FriendlyName, &out.FriendlyName + *out = new(string) + **out = **in + } + if in.HighBusinessImpact != nil { + in, out := &in.HighBusinessImpact, &out.HighBusinessImpact + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WorkspaceIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.ImageBuildComputeName != nil { + in, out := &in.ImageBuildComputeName, &out.ImageBuildComputeName + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedNetwork != nil { + in, out := &in.ManagedNetwork, &out.ManagedNetwork + *out = new(ManagedNetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.PrimaryUserAssignedIdentity != nil { + in, out := &in.PrimaryUserAssignedIdentity, &out.PrimaryUserAssignedIdentity + *out = new(string) + **out = **in + } + if in.PublicAccessBehindVirtualNetworkEnabled != nil { + in, out := &in.PublicAccessBehindVirtualNetworkEnabled, &out.PublicAccessBehindVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.V1LegacyModeEnabled != nil { + in, out := &in.V1LegacyModeEnabled, &out.V1LegacyModeEnabled + *out = new(bool) + **out = **in + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceObservation. +func (in *WorkspaceObservation) DeepCopy() *WorkspaceObservation { + if in == nil { + return nil + } + out := new(WorkspaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceParameters) DeepCopyInto(out *WorkspaceParameters) { + *out = *in + if in.ApplicationInsightsID != nil { + in, out := &in.ApplicationInsightsID, &out.ApplicationInsightsID + *out = new(string) + **out = **in + } + if in.ApplicationInsightsIDRef != nil { + in, out := &in.ApplicationInsightsIDRef, &out.ApplicationInsightsIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsightsIDSelector != nil { + in, out := &in.ApplicationInsightsIDSelector, &out.ApplicationInsightsIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryID != nil { + in, out := &in.ContainerRegistryID, &out.ContainerRegistryID + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.FeatureStore != nil { + in, out := &in.FeatureStore, &out.FeatureStore + *out = new(FeatureStoreParameters) + (*in).DeepCopyInto(*out) + } + if in.FriendlyName != nil { + in, out := &in.FriendlyName, &out.FriendlyName + *out = new(string) + **out = **in + } + if in.HighBusinessImpact != nil { + in, out := &in.HighBusinessImpact, &out.HighBusinessImpact + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WorkspaceIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.ImageBuildComputeName != nil { + in, out := &in.ImageBuildComputeName, &out.ImageBuildComputeName + *out = new(string) + **out = **in + } + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } + if in.KeyVaultIDRef != nil { + in, out := &in.KeyVaultIDRef, &out.KeyVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultIDSelector != nil { + in, out := &in.KeyVaultIDSelector, &out.KeyVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedNetwork != nil { + in, out := &in.ManagedNetwork, &out.ManagedNetwork + *out = new(ManagedNetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.PrimaryUserAssignedIdentity != nil { + in, out := &in.PrimaryUserAssignedIdentity, &out.PrimaryUserAssignedIdentity + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityRef != nil { + in, out := &in.PrimaryUserAssignedIdentityRef, &out.PrimaryUserAssignedIdentityRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrimaryUserAssignedIdentitySelector != nil { + in, out := &in.PrimaryUserAssignedIdentitySelector, &out.PrimaryUserAssignedIdentitySelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicAccessBehindVirtualNetworkEnabled != nil { + in, out := &in.PublicAccessBehindVirtualNetworkEnabled, &out.PublicAccessBehindVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.V1LegacyModeEnabled != nil { + in, out := &in.V1LegacyModeEnabled, &out.V1LegacyModeEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceParameters. +func (in *WorkspaceParameters) DeepCopy() *WorkspaceParameters { + if in == nil { + return nil + } + out := new(WorkspaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. +func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { + if in == nil { + return nil + } + out := new(WorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. +func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus { + if in == nil { + return nil + } + out := new(WorkspaceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/machinelearningservices/v1beta2/zz_generated.managed.go b/apis/machinelearningservices/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..23d11af11 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ComputeCluster. +func (mg *ComputeCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ComputeCluster. +func (mg *ComputeCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ComputeCluster. +func (mg *ComputeCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ComputeCluster. +func (mg *ComputeCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ComputeCluster. +func (mg *ComputeCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ComputeCluster. +func (mg *ComputeCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ComputeCluster. +func (mg *ComputeCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ComputeCluster. +func (mg *ComputeCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ComputeCluster. +func (mg *ComputeCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ComputeCluster. +func (mg *ComputeCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ComputeCluster. +func (mg *ComputeCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ComputeCluster. +func (mg *ComputeCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ComputeInstance. +func (mg *ComputeInstance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ComputeInstance. +func (mg *ComputeInstance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ComputeInstance. +func (mg *ComputeInstance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ComputeInstance. +func (mg *ComputeInstance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ComputeInstance. +func (mg *ComputeInstance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ComputeInstance. +func (mg *ComputeInstance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ComputeInstance. +func (mg *ComputeInstance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ComputeInstance. +func (mg *ComputeInstance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ComputeInstance. +func (mg *ComputeInstance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ComputeInstance. +func (mg *ComputeInstance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ComputeInstance. +func (mg *ComputeInstance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ComputeInstance. +func (mg *ComputeInstance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SynapseSpark. +func (mg *SynapseSpark) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SynapseSpark. +func (mg *SynapseSpark) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SynapseSpark. +func (mg *SynapseSpark) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SynapseSpark. +func (mg *SynapseSpark) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SynapseSpark. +func (mg *SynapseSpark) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SynapseSpark. +func (mg *SynapseSpark) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SynapseSpark. +func (mg *SynapseSpark) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SynapseSpark. +func (mg *SynapseSpark) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SynapseSpark. +func (mg *SynapseSpark) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SynapseSpark. +func (mg *SynapseSpark) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SynapseSpark. +func (mg *SynapseSpark) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SynapseSpark. +func (mg *SynapseSpark) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Workspace. +func (mg *Workspace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workspace. +func (mg *Workspace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workspace. +func (mg *Workspace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workspace. +func (mg *Workspace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workspace. +func (mg *Workspace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workspace. +func (mg *Workspace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workspace. +func (mg *Workspace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workspace. +func (mg *Workspace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/machinelearningservices/v1beta2/zz_generated.managedlist.go b/apis/machinelearningservices/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..1a5d555a7 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ComputeClusterList. +func (l *ComputeClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ComputeInstanceList. +func (l *ComputeInstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SynapseSparkList. +func (l *SynapseSparkList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkspaceList. +func (l *WorkspaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/machinelearningservices/v1beta2/zz_generated.resolvers.go b/apis/machinelearningservices/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..3760a044d --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,553 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ComputeCluster) ResolveReferences( // ResolveReferences of this ComputeCluster. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("machinelearningservices.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MachineLearningWorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MachineLearningWorkspaceIDRef, + Selector: mg.Spec.ForProvider.MachineLearningWorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MachineLearningWorkspaceID") + } + mg.Spec.ForProvider.MachineLearningWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MachineLearningWorkspaceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubnetResourceIDRef, + Selector: mg.Spec.ForProvider.SubnetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetResourceID") + } + mg.Spec.ForProvider.SubnetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("machinelearningservices.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.MachineLearningWorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.MachineLearningWorkspaceIDRef, + Selector: mg.Spec.InitProvider.MachineLearningWorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.MachineLearningWorkspaceID") + } + mg.Spec.InitProvider.MachineLearningWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.MachineLearningWorkspaceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubnetResourceIDRef, + Selector: mg.Spec.InitProvider.SubnetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetResourceID") + } + mg.Spec.InitProvider.SubnetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetResourceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ComputeInstance. +func (mg *ComputeInstance) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("machinelearningservices.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MachineLearningWorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MachineLearningWorkspaceIDRef, + Selector: mg.Spec.ForProvider.MachineLearningWorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MachineLearningWorkspaceID") + } + mg.Spec.ForProvider.MachineLearningWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MachineLearningWorkspaceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubnetResourceIDRef, + Selector: mg.Spec.ForProvider.SubnetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetResourceID") + } + mg.Spec.ForProvider.SubnetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubnetResourceIDRef, + Selector: mg.Spec.InitProvider.SubnetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetResourceID") + } + mg.Spec.InitProvider.SubnetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetResourceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SynapseSpark. +func (mg *SynapseSpark) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("machinelearningservices.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MachineLearningWorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.MachineLearningWorkspaceIDRef, + Selector: mg.Spec.ForProvider.MachineLearningWorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MachineLearningWorkspaceID") + } + mg.Spec.ForProvider.MachineLearningWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MachineLearningWorkspaceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "SparkPool", "SparkPoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SynapseSparkPoolID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SynapseSparkPoolIDRef, + Selector: mg.Spec.ForProvider.SynapseSparkPoolIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SynapseSparkPoolID") + } + mg.Spec.ForProvider.SynapseSparkPoolID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SynapseSparkPoolIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "SparkPool", "SparkPoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SynapseSparkPoolID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SynapseSparkPoolIDRef, + Selector: mg.Spec.InitProvider.SynapseSparkPoolIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SynapseSparkPoolID") + } + mg.Spec.InitProvider.SynapseSparkPoolID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SynapseSparkPoolIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Workspace. +func (mg *Workspace) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ApplicationInsightsID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ApplicationInsightsIDRef, + Selector: mg.Spec.ForProvider.ApplicationInsightsIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ApplicationInsightsID") + } + mg.Spec.ForProvider.ApplicationInsightsID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ApplicationInsightsIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Encryption.KeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Encryption.KeyIDRef, + Selector: mg.Spec.ForProvider.Encryption.KeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Encryption.KeyID") + } + mg.Spec.ForProvider.Encryption.KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Encryption.KeyIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Encryption.KeyVaultID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Encryption.KeyVaultIDRef, + Selector: mg.Spec.ForProvider.Encryption.KeyVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Encryption.KeyVaultID") + } + mg.Spec.ForProvider.Encryption.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Encryption.KeyVaultIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Encryption.UserAssignedIdentityID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Encryption.UserAssignedIdentityIDRef, + Selector: mg.Spec.ForProvider.Encryption.UserAssignedIdentityIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Encryption.UserAssignedIdentityID") + } + mg.Spec.ForProvider.Encryption.UserAssignedIdentityID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Encryption.UserAssignedIdentityIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.KeyVaultID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.KeyVaultIDRef, + Selector: mg.Spec.ForProvider.KeyVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.KeyVaultID") + } + mg.Spec.ForProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.KeyVaultIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrimaryUserAssignedIdentity), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrimaryUserAssignedIdentityRef, + Selector: mg.Spec.ForProvider.PrimaryUserAssignedIdentitySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrimaryUserAssignedIdentity") + } + mg.Spec.ForProvider.PrimaryUserAssignedIdentity = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrimaryUserAssignedIdentityRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccountIDRef, + Selector: mg.Spec.ForProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountID") + } + mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("insights.azure.upbound.io", "v1beta1", "ApplicationInsights", "ApplicationInsightsList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ApplicationInsightsID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ApplicationInsightsIDRef, + Selector: mg.Spec.InitProvider.ApplicationInsightsIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ApplicationInsightsID") + } + mg.Spec.InitProvider.ApplicationInsightsID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ApplicationInsightsIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Encryption.KeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Encryption.KeyIDRef, + Selector: mg.Spec.InitProvider.Encryption.KeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Encryption.KeyID") + } + mg.Spec.InitProvider.Encryption.KeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Encryption.KeyIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Encryption.KeyVaultID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Encryption.KeyVaultIDRef, + Selector: mg.Spec.InitProvider.Encryption.KeyVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Encryption.KeyVaultID") + } + mg.Spec.InitProvider.Encryption.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Encryption.KeyVaultIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Encryption != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Encryption.UserAssignedIdentityID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Encryption.UserAssignedIdentityIDRef, + Selector: mg.Spec.InitProvider.Encryption.UserAssignedIdentityIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Encryption.UserAssignedIdentityID") + } + mg.Spec.InitProvider.Encryption.UserAssignedIdentityID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Encryption.UserAssignedIdentityIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.KeyVaultID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.KeyVaultIDRef, + Selector: mg.Spec.InitProvider.KeyVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.KeyVaultID") + } + mg.Spec.InitProvider.KeyVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.KeyVaultIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrimaryUserAssignedIdentity), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrimaryUserAssignedIdentityRef, + Selector: mg.Spec.InitProvider.PrimaryUserAssignedIdentitySelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrimaryUserAssignedIdentity") + } + mg.Spec.InitProvider.PrimaryUserAssignedIdentity = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrimaryUserAssignedIdentityRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccountIDRef, + Selector: mg.Spec.InitProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountID") + } + mg.Spec.InitProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/machinelearningservices/v1beta2/zz_groupversion_info.go b/apis/machinelearningservices/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..10de63933 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=machinelearningservices.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "machinelearningservices.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/machinelearningservices/v1beta2/zz_synapsespark_terraformed.go b/apis/machinelearningservices/v1beta2/zz_synapsespark_terraformed.go new file mode 100755 index 000000000..79c32bb1f --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_synapsespark_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SynapseSpark +func (mg *SynapseSpark) GetTerraformResourceType() string { + return "azurerm_machine_learning_synapse_spark" +} + +// GetConnectionDetailsMapping for this SynapseSpark +func (tr *SynapseSpark) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SynapseSpark +func (tr *SynapseSpark) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SynapseSpark +func (tr *SynapseSpark) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SynapseSpark +func (tr *SynapseSpark) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SynapseSpark +func (tr *SynapseSpark) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SynapseSpark +func (tr *SynapseSpark) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SynapseSpark +func (tr *SynapseSpark) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SynapseSpark +func (tr *SynapseSpark) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SynapseSpark using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SynapseSpark) LateInitialize(attrs []byte) (bool, error) { + params := &SynapseSparkParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SynapseSpark) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/machinelearningservices/v1beta2/zz_synapsespark_types.go b/apis/machinelearningservices/v1beta2/zz_synapsespark_types.go new file mode 100755 index 000000000..03b055206 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_synapsespark_types.go @@ -0,0 +1,225 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SynapseSparkIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Synapse Spark. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Synapse Spark. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SynapseSparkIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Synapse Spark. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this Machine Learning Synapse Spark. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this Machine Learning Synapse Spark. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Synapse Spark. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SynapseSparkIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Synapse Spark. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Synapse Spark. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SynapseSparkInitParameters struct { + + // The description of the Machine Learning Synapse Spark. Changing this forces a new Machine Learning Synapse Spark to be created. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Synapse Spark to be created. + Identity *SynapseSparkIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Synapse Spark to be created. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Synapse Spark should exist. Changing this forces a new Machine Learning Synapse Spark to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the linked Synapse Spark Pool. Changing this forces a new Machine Learning Synapse Spark to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.SparkPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SynapseSparkPoolID *string `json:"synapseSparkPoolId,omitempty" tf:"synapse_spark_pool_id,omitempty"` + + // Reference to a SparkPool in synapse to populate synapseSparkPoolId. + // +kubebuilder:validation:Optional + SynapseSparkPoolIDRef *v1.Reference `json:"synapseSparkPoolIdRef,omitempty" tf:"-"` + + // Selector for a SparkPool in synapse to populate synapseSparkPoolId. + // +kubebuilder:validation:Optional + SynapseSparkPoolIDSelector *v1.Selector `json:"synapseSparkPoolIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Machine Learning Synapse Spark. Changing this forces a new Machine Learning Synapse Spark to be created. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SynapseSparkObservation struct { + + // The description of the Machine Learning Synapse Spark. Changing this forces a new Machine Learning Synapse Spark to be created. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Machine Learning Synapse Spark. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Synapse Spark to be created. + Identity *SynapseSparkIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Synapse Spark to be created. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Synapse Spark should exist. Changing this forces a new Machine Learning Synapse Spark to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Machine Learning Workspace. Changing this forces a new Machine Learning Synapse Spark to be created. + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // The ID of the linked Synapse Spark Pool. Changing this forces a new Machine Learning Synapse Spark to be created. + SynapseSparkPoolID *string `json:"synapseSparkPoolId,omitempty" tf:"synapse_spark_pool_id,omitempty"` + + // A mapping of tags which should be assigned to the Machine Learning Synapse Spark. Changing this forces a new Machine Learning Synapse Spark to be created. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SynapseSparkParameters struct { + + // The description of the Machine Learning Synapse Spark. Changing this forces a new Machine Learning Synapse Spark to be created. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An identity block as defined below. Changing this forces a new Machine Learning Synapse Spark to be created. + // +kubebuilder:validation:Optional + Identity *SynapseSparkIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether local authentication methods is enabled. Defaults to true. Changing this forces a new Machine Learning Synapse Spark to be created. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // The Azure Region where the Machine Learning Synapse Spark should exist. Changing this forces a new Machine Learning Synapse Spark to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Machine Learning Workspace. Changing this forces a new Machine Learning Synapse Spark to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/machinelearningservices/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + MachineLearningWorkspaceID *string `json:"machineLearningWorkspaceId,omitempty" tf:"machine_learning_workspace_id,omitempty"` + + // Reference to a Workspace in machinelearningservices to populate machineLearningWorkspaceId. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceIDRef *v1.Reference `json:"machineLearningWorkspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in machinelearningservices to populate machineLearningWorkspaceId. + // +kubebuilder:validation:Optional + MachineLearningWorkspaceIDSelector *v1.Selector `json:"machineLearningWorkspaceIdSelector,omitempty" tf:"-"` + + // The ID of the linked Synapse Spark Pool. Changing this forces a new Machine Learning Synapse Spark to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.SparkPool + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SynapseSparkPoolID *string `json:"synapseSparkPoolId,omitempty" tf:"synapse_spark_pool_id,omitempty"` + + // Reference to a SparkPool in synapse to populate synapseSparkPoolId. + // +kubebuilder:validation:Optional + SynapseSparkPoolIDRef *v1.Reference `json:"synapseSparkPoolIdRef,omitempty" tf:"-"` + + // Selector for a SparkPool in synapse to populate synapseSparkPoolId. + // +kubebuilder:validation:Optional + SynapseSparkPoolIDSelector *v1.Selector `json:"synapseSparkPoolIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Machine Learning Synapse Spark. Changing this forces a new Machine Learning Synapse Spark to be created. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// SynapseSparkSpec defines the desired state of SynapseSpark +type SynapseSparkSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SynapseSparkParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SynapseSparkInitParameters `json:"initProvider,omitempty"` +} + +// SynapseSparkStatus defines the observed state of SynapseSpark. +type SynapseSparkStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SynapseSparkObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SynapseSpark is the Schema for the SynapseSparks API. Manages the linked service to link an Azure Machine learning workspace to an Azure Synapse workspace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SynapseSpark struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec SynapseSparkSpec `json:"spec"` + Status SynapseSparkStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SynapseSparkList contains a list of SynapseSparks +type SynapseSparkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SynapseSpark `json:"items"` +} + +// Repository type metadata. +var ( + SynapseSpark_Kind = "SynapseSpark" + SynapseSpark_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SynapseSpark_Kind}.String() + SynapseSpark_KindAPIVersion = SynapseSpark_Kind + "." + CRDGroupVersion.String() + SynapseSpark_GroupVersionKind = CRDGroupVersion.WithKind(SynapseSpark_Kind) +) + +func init() { + SchemeBuilder.Register(&SynapseSpark{}, &SynapseSparkList{}) +} diff --git a/apis/machinelearningservices/v1beta2/zz_workspace_terraformed.go b/apis/machinelearningservices/v1beta2/zz_workspace_terraformed.go new file mode 100755 index 000000000..a187e1f19 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_workspace_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workspace +func (mg *Workspace) GetTerraformResourceType() string { + return "azurerm_machine_learning_workspace" +} + +// GetConnectionDetailsMapping for this Workspace +func (tr *Workspace) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Workspace +func (tr *Workspace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workspace +func (tr *Workspace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workspace +func (tr *Workspace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workspace +func (tr *Workspace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workspace +func (tr *Workspace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workspace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workspace) LateInitialize(attrs []byte) (bool, error) { + params := &WorkspaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workspace) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/machinelearningservices/v1beta2/zz_workspace_types.go b/apis/machinelearningservices/v1beta2/zz_workspace_types.go new file mode 100755 index 000000000..7b9d16123 --- /dev/null +++ b/apis/machinelearningservices/v1beta2/zz_workspace_types.go @@ -0,0 +1,588 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EncryptionInitParameters struct { + + // The Key Vault URI to access the encryption key. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a Key in keyvault to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` + + // The ID of the keyVault where the customer owned encryption key is present. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Reference to a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDRef *v1.Reference `json:"keyVaultIdRef,omitempty" tf:"-"` + + // Selector for a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDSelector *v1.Selector `json:"keyVaultIdSelector,omitempty" tf:"-"` + + // The Key Vault URI to access the encryption key. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate userAssignedIdentityId. + // +kubebuilder:validation:Optional + UserAssignedIdentityIDRef *v1.Reference `json:"userAssignedIdentityIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate userAssignedIdentityId. + // +kubebuilder:validation:Optional + UserAssignedIdentityIDSelector *v1.Selector `json:"userAssignedIdentityIdSelector,omitempty" tf:"-"` +} + +type EncryptionObservation struct { + + // The Key Vault URI to access the encryption key. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The ID of the keyVault where the customer owned encryption key is present. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // The Key Vault URI to access the encryption key. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type EncryptionParameters struct { + + // The Key Vault URI to access the encryption key. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Reference to a Key in keyvault to populate keyId. + // +kubebuilder:validation:Optional + KeyIDRef *v1.Reference `json:"keyIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate keyId. + // +kubebuilder:validation:Optional + KeyIDSelector *v1.Selector `json:"keyIdSelector,omitempty" tf:"-"` + + // The ID of the keyVault where the customer owned encryption key is present. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Reference to a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDRef *v1.Reference `json:"keyVaultIdRef,omitempty" tf:"-"` + + // Selector for a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDSelector *v1.Selector `json:"keyVaultIdSelector,omitempty" tf:"-"` + + // The Key Vault URI to access the encryption key. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate userAssignedIdentityId. + // +kubebuilder:validation:Optional + UserAssignedIdentityIDRef *v1.Reference `json:"userAssignedIdentityIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate userAssignedIdentityId. + // +kubebuilder:validation:Optional + UserAssignedIdentityIDSelector *v1.Selector `json:"userAssignedIdentityIdSelector,omitempty" tf:"-"` +} + +type FeatureStoreInitParameters struct { + + // The version of Spark runtime. + ComputerSparkRuntimeVersion *string `json:"computerSparkRuntimeVersion,omitempty" tf:"computer_spark_runtime_version,omitempty"` + + // The name of offline store connection. + OfflineConnectionName *string `json:"offlineConnectionName,omitempty" tf:"offline_connection_name,omitempty"` + + // The name of online store connection. + OnlineConnectionName *string `json:"onlineConnectionName,omitempty" tf:"online_connection_name,omitempty"` +} + +type FeatureStoreObservation struct { + + // The version of Spark runtime. + ComputerSparkRuntimeVersion *string `json:"computerSparkRuntimeVersion,omitempty" tf:"computer_spark_runtime_version,omitempty"` + + // The name of offline store connection. + OfflineConnectionName *string `json:"offlineConnectionName,omitempty" tf:"offline_connection_name,omitempty"` + + // The name of online store connection. + OnlineConnectionName *string `json:"onlineConnectionName,omitempty" tf:"online_connection_name,omitempty"` +} + +type FeatureStoreParameters struct { + + // The version of Spark runtime. + // +kubebuilder:validation:Optional + ComputerSparkRuntimeVersion *string `json:"computerSparkRuntimeVersion,omitempty" tf:"computer_spark_runtime_version,omitempty"` + + // The name of offline store connection. + // +kubebuilder:validation:Optional + OfflineConnectionName *string `json:"offlineConnectionName,omitempty" tf:"offline_connection_name,omitempty"` + + // The name of online store connection. + // +kubebuilder:validation:Optional + OnlineConnectionName *string `json:"onlineConnectionName,omitempty" tf:"online_connection_name,omitempty"` +} + +type ManagedNetworkInitParameters struct { + + // The isolation mode of the Machine Learning Workspace. Possible values are Disabled, AllowOnlyApprovedOutbound, and AllowInternetOutbound + IsolationMode *string `json:"isolationMode,omitempty" tf:"isolation_mode,omitempty"` +} + +type ManagedNetworkObservation struct { + + // The isolation mode of the Machine Learning Workspace. Possible values are Disabled, AllowOnlyApprovedOutbound, and AllowInternetOutbound + IsolationMode *string `json:"isolationMode,omitempty" tf:"isolation_mode,omitempty"` +} + +type ManagedNetworkParameters struct { + + // The isolation mode of the Machine Learning Workspace. Possible values are Disabled, AllowOnlyApprovedOutbound, and AllowInternetOutbound + // +kubebuilder:validation:Optional + IsolationMode *string `json:"isolationMode,omitempty" tf:"isolation_mode,omitempty"` +} + +type WorkspaceIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Workspace. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Workspace. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WorkspaceIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Workspace. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Workspace. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WorkspaceIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Machine Learning Workspace. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Machine Learning Workspace. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WorkspaceInitParameters struct { + + // The ID of the Application Insights associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ApplicationInsightsID *string `json:"applicationInsightsId,omitempty" tf:"application_insights_id,omitempty"` + + // Reference to a ApplicationInsights in insights to populate applicationInsightsId. + // +kubebuilder:validation:Optional + ApplicationInsightsIDRef *v1.Reference `json:"applicationInsightsIdRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate applicationInsightsId. + // +kubebuilder:validation:Optional + ApplicationInsightsIDSelector *v1.Selector `json:"applicationInsightsIdSelector,omitempty" tf:"-"` + + // The ID of the container registry associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + ContainerRegistryID *string `json:"containerRegistryId,omitempty" tf:"container_registry_id,omitempty"` + + // The description of this Machine Learning Workspace. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An encryption block as defined below. Changing this forces a new resource to be created. + Encryption *EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // A feature_store block as defined below. + FeatureStore *FeatureStoreInitParameters `json:"featureStore,omitempty" tf:"feature_store,omitempty"` + + // Display name for this Machine Learning Workspace. + FriendlyName *string `json:"friendlyName,omitempty" tf:"friendly_name,omitempty"` + + // Flag to signal High Business Impact (HBI) data in the workspace and reduce diagnostic data collected by the service. Changing this forces a new resource to be created. + HighBusinessImpact *bool `json:"highBusinessImpact,omitempty" tf:"high_business_impact,omitempty"` + + // An identity block as defined below. + Identity *WorkspaceIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The compute name for image build of the Machine Learning Workspace. + ImageBuildComputeName *string `json:"imageBuildComputeName,omitempty" tf:"image_build_compute_name,omitempty"` + + // The ID of key vault associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Reference to a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDRef *v1.Reference `json:"keyVaultIdRef,omitempty" tf:"-"` + + // Selector for a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDSelector *v1.Selector `json:"keyVaultIdSelector,omitempty" tf:"-"` + + // The type of the Workspace. Possible values are Default, FeatureStore. Defaults to Default + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure location where the Machine Learning Workspace should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A managed_network block as defined below. + ManagedNetwork *ManagedNetworkInitParameters `json:"managedNetwork,omitempty" tf:"managed_network,omitempty"` + + // The user assigned identity id that represents the workspace identity. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PrimaryUserAssignedIdentity *string `json:"primaryUserAssignedIdentity,omitempty" tf:"primary_user_assigned_identity,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate primaryUserAssignedIdentity. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityRef *v1.Reference `json:"primaryUserAssignedIdentityRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate primaryUserAssignedIdentity. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentitySelector *v1.Selector `json:"primaryUserAssignedIdentitySelector,omitempty" tf:"-"` + + // Enable public access when this Machine Learning Workspace is behind a VNet. Changing this forces a new resource to be created. + PublicAccessBehindVirtualNetworkEnabled *bool `json:"publicAccessBehindVirtualNetworkEnabled,omitempty" tf:"public_access_behind_virtual_network_enabled,omitempty"` + + // Enable public access when this Machine Learning Workspace is behind VNet. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // SKU/edition of the Machine Learning Workspace, possible values are Free, Basic, Standard and Premium. Defaults to Basic. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of the Storage Account associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Enable V1 API features, enabling v1_legacy_mode may prevent you from using features provided by the v2 API. Defaults to false. + V1LegacyModeEnabled *bool `json:"v1LegacyModeEnabled,omitempty" tf:"v1_legacy_mode_enabled,omitempty"` +} + +type WorkspaceObservation struct { + + // The ID of the Application Insights associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + ApplicationInsightsID *string `json:"applicationInsightsId,omitempty" tf:"application_insights_id,omitempty"` + + // The ID of the container registry associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + ContainerRegistryID *string `json:"containerRegistryId,omitempty" tf:"container_registry_id,omitempty"` + + // The description of this Machine Learning Workspace. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The url for the discovery service to identify regional endpoints for machine learning experimentation services. + DiscoveryURL *string `json:"discoveryUrl,omitempty" tf:"discovery_url,omitempty"` + + // An encryption block as defined below. Changing this forces a new resource to be created. + Encryption *EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // A feature_store block as defined below. + FeatureStore *FeatureStoreObservation `json:"featureStore,omitempty" tf:"feature_store,omitempty"` + + // Display name for this Machine Learning Workspace. + FriendlyName *string `json:"friendlyName,omitempty" tf:"friendly_name,omitempty"` + + // Flag to signal High Business Impact (HBI) data in the workspace and reduce diagnostic data collected by the service. Changing this forces a new resource to be created. + HighBusinessImpact *bool `json:"highBusinessImpact,omitempty" tf:"high_business_impact,omitempty"` + + // The ID of the Machine Learning Workspace. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *WorkspaceIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The compute name for image build of the Machine Learning Workspace. + ImageBuildComputeName *string `json:"imageBuildComputeName,omitempty" tf:"image_build_compute_name,omitempty"` + + // The ID of key vault associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // The type of the Workspace. Possible values are Default, FeatureStore. Defaults to Default + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure location where the Machine Learning Workspace should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A managed_network block as defined below. + ManagedNetwork *ManagedNetworkObservation `json:"managedNetwork,omitempty" tf:"managed_network,omitempty"` + + // The user assigned identity id that represents the workspace identity. + PrimaryUserAssignedIdentity *string `json:"primaryUserAssignedIdentity,omitempty" tf:"primary_user_assigned_identity,omitempty"` + + // Enable public access when this Machine Learning Workspace is behind a VNet. Changing this forces a new resource to be created. + PublicAccessBehindVirtualNetworkEnabled *bool `json:"publicAccessBehindVirtualNetworkEnabled,omitempty" tf:"public_access_behind_virtual_network_enabled,omitempty"` + + // Enable public access when this Machine Learning Workspace is behind VNet. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the name of the Resource Group in which the Machine Learning Workspace should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // SKU/edition of the Machine Learning Workspace, possible values are Free, Basic, Standard and Premium. Defaults to Basic. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of the Storage Account associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Enable V1 API features, enabling v1_legacy_mode may prevent you from using features provided by the v2 API. Defaults to false. + V1LegacyModeEnabled *bool `json:"v1LegacyModeEnabled,omitempty" tf:"v1_legacy_mode_enabled,omitempty"` + + // The immutable id associated with this workspace. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type WorkspaceParameters struct { + + // The ID of the Application Insights associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/insights/v1beta1.ApplicationInsights + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ApplicationInsightsID *string `json:"applicationInsightsId,omitempty" tf:"application_insights_id,omitempty"` + + // Reference to a ApplicationInsights in insights to populate applicationInsightsId. + // +kubebuilder:validation:Optional + ApplicationInsightsIDRef *v1.Reference `json:"applicationInsightsIdRef,omitempty" tf:"-"` + + // Selector for a ApplicationInsights in insights to populate applicationInsightsId. + // +kubebuilder:validation:Optional + ApplicationInsightsIDSelector *v1.Selector `json:"applicationInsightsIdSelector,omitempty" tf:"-"` + + // The ID of the container registry associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ContainerRegistryID *string `json:"containerRegistryId,omitempty" tf:"container_registry_id,omitempty"` + + // The description of this Machine Learning Workspace. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // An encryption block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Encryption *EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // A feature_store block as defined below. + // +kubebuilder:validation:Optional + FeatureStore *FeatureStoreParameters `json:"featureStore,omitempty" tf:"feature_store,omitempty"` + + // Display name for this Machine Learning Workspace. + // +kubebuilder:validation:Optional + FriendlyName *string `json:"friendlyName,omitempty" tf:"friendly_name,omitempty"` + + // Flag to signal High Business Impact (HBI) data in the workspace and reduce diagnostic data collected by the service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + HighBusinessImpact *bool `json:"highBusinessImpact,omitempty" tf:"high_business_impact,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *WorkspaceIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The compute name for image build of the Machine Learning Workspace. + // +kubebuilder:validation:Optional + ImageBuildComputeName *string `json:"imageBuildComputeName,omitempty" tf:"image_build_compute_name,omitempty"` + + // The ID of key vault associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` + + // Reference to a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDRef *v1.Reference `json:"keyVaultIdRef,omitempty" tf:"-"` + + // Selector for a Vault in keyvault to populate keyVaultId. + // +kubebuilder:validation:Optional + KeyVaultIDSelector *v1.Selector `json:"keyVaultIdSelector,omitempty" tf:"-"` + + // The type of the Workspace. Possible values are Default, FeatureStore. Defaults to Default + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure location where the Machine Learning Workspace should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A managed_network block as defined below. + // +kubebuilder:validation:Optional + ManagedNetwork *ManagedNetworkParameters `json:"managedNetwork,omitempty" tf:"managed_network,omitempty"` + + // The user assigned identity id that represents the workspace identity. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentity *string `json:"primaryUserAssignedIdentity,omitempty" tf:"primary_user_assigned_identity,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate primaryUserAssignedIdentity. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityRef *v1.Reference `json:"primaryUserAssignedIdentityRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate primaryUserAssignedIdentity. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentitySelector *v1.Selector `json:"primaryUserAssignedIdentitySelector,omitempty" tf:"-"` + + // Enable public access when this Machine Learning Workspace is behind a VNet. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PublicAccessBehindVirtualNetworkEnabled *bool `json:"publicAccessBehindVirtualNetworkEnabled,omitempty" tf:"public_access_behind_virtual_network_enabled,omitempty"` + + // Enable public access when this Machine Learning Workspace is behind VNet. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the name of the Resource Group in which the Machine Learning Workspace should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // SKU/edition of the Machine Learning Workspace, possible values are Free, Basic, Standard and Premium. Defaults to Basic. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of the Storage Account associated with this Machine Learning Workspace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Enable V1 API features, enabling v1_legacy_mode may prevent you from using features provided by the v2 API. Defaults to false. + // +kubebuilder:validation:Optional + V1LegacyModeEnabled *bool `json:"v1LegacyModeEnabled,omitempty" tf:"v1_legacy_mode_enabled,omitempty"` +} + +// WorkspaceSpec defines the desired state of Workspace +type WorkspaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkspaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkspaceInitParameters `json:"initProvider,omitempty"` +} + +// WorkspaceStatus defines the observed state of Workspace. +type WorkspaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkspaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workspace is the Schema for the Workspaces API. Manages a Azure Machine Learning Workspace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Workspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.identity) || (has(self.initProvider) && has(self.initProvider.identity))",message="spec.forProvider.identity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec WorkspaceSpec `json:"spec"` + Status WorkspaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkspaceList contains a list of Workspaces +type WorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workspace `json:"items"` +} + +// Repository type metadata. +var ( + Workspace_Kind = "Workspace" + Workspace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workspace_Kind}.String() + Workspace_KindAPIVersion = Workspace_Kind + "." + CRDGroupVersion.String() + Workspace_GroupVersionKind = CRDGroupVersion.WithKind(Workspace_Kind) +) + +func init() { + SchemeBuilder.Register(&Workspace{}, &WorkspaceList{}) +} diff --git a/apis/maintenance/v1beta1/zz_generated.conversion_hubs.go b/apis/maintenance/v1beta1/zz_generated.conversion_hubs.go index ba69ee8d2..440df069d 100755 --- a/apis/maintenance/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/maintenance/v1beta1/zz_generated.conversion_hubs.go @@ -11,6 +11,3 @@ func (tr *MaintenanceAssignmentDedicatedHost) Hub() {} // Hub marks this type as a conversion hub. func (tr *MaintenanceAssignmentVirtualMachine) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MaintenanceConfiguration) Hub() {} diff --git a/apis/maintenance/v1beta1/zz_generated.conversion_spokes.go b/apis/maintenance/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..4a2328229 --- /dev/null +++ b/apis/maintenance/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this MaintenanceConfiguration to the hub type. +func (tr *MaintenanceConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MaintenanceConfiguration type. +func (tr *MaintenanceConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/maintenance/v1beta1/zz_generated.resolvers.go b/apis/maintenance/v1beta1/zz_generated.resolvers.go index 253eb3dc2..dd9a28aa9 100644 --- a/apis/maintenance/v1beta1/zz_generated.resolvers.go +++ b/apis/maintenance/v1beta1/zz_generated.resolvers.go @@ -45,7 +45,7 @@ func (mg *MaintenanceAssignmentDedicatedHost) ResolveReferences( // ResolveRefer mg.Spec.ForProvider.DedicatedHostID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DedicatedHostIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("maintenance.azure.upbound.io", "v1beta1", "MaintenanceConfiguration", "MaintenanceConfigurationList") + m, l, err = apisresolver.GetManagedResource("maintenance.azure.upbound.io", "v1beta2", "MaintenanceConfiguration", "MaintenanceConfigurationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -83,7 +83,7 @@ func (mg *MaintenanceAssignmentDedicatedHost) ResolveReferences( // ResolveRefer mg.Spec.InitProvider.DedicatedHostID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DedicatedHostIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("maintenance.azure.upbound.io", "v1beta1", "MaintenanceConfiguration", "MaintenanceConfigurationList") + m, l, err = apisresolver.GetManagedResource("maintenance.azure.upbound.io", "v1beta2", "MaintenanceConfiguration", "MaintenanceConfigurationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -114,7 +114,7 @@ func (mg *MaintenanceAssignmentVirtualMachine) ResolveReferences(ctx context.Con var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("maintenance.azure.upbound.io", "v1beta1", "MaintenanceConfiguration", "MaintenanceConfigurationList") + m, l, err = apisresolver.GetManagedResource("maintenance.azure.upbound.io", "v1beta2", "MaintenanceConfiguration", "MaintenanceConfigurationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -133,7 +133,7 @@ func (mg *MaintenanceAssignmentVirtualMachine) ResolveReferences(ctx context.Con mg.Spec.ForProvider.MaintenanceConfigurationID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.MaintenanceConfigurationIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "LinuxVirtualMachine", "LinuxVirtualMachineList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -152,7 +152,7 @@ func (mg *MaintenanceAssignmentVirtualMachine) ResolveReferences(ctx context.Con mg.Spec.ForProvider.VirtualMachineID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VirtualMachineIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("maintenance.azure.upbound.io", "v1beta1", "MaintenanceConfiguration", "MaintenanceConfigurationList") + m, l, err = apisresolver.GetManagedResource("maintenance.azure.upbound.io", "v1beta2", "MaintenanceConfiguration", "MaintenanceConfigurationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/maintenance/v1beta1/zz_maintenanceassignmentdedicatedhost_types.go b/apis/maintenance/v1beta1/zz_maintenanceassignmentdedicatedhost_types.go index 3aeaf9c85..051ca4e13 100755 --- a/apis/maintenance/v1beta1/zz_maintenanceassignmentdedicatedhost_types.go +++ b/apis/maintenance/v1beta1/zz_maintenanceassignmentdedicatedhost_types.go @@ -32,7 +32,7 @@ type MaintenanceAssignmentDedicatedHostInitParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // Specifies the ID of the Maintenance Configuration Resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/maintenance/v1beta1.MaintenanceConfiguration + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/maintenance/v1beta2.MaintenanceConfiguration // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() MaintenanceConfigurationID *string `json:"maintenanceConfigurationId,omitempty" tf:"maintenance_configuration_id,omitempty"` @@ -81,7 +81,7 @@ type MaintenanceAssignmentDedicatedHostParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // Specifies the ID of the Maintenance Configuration Resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/maintenance/v1beta1.MaintenanceConfiguration + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/maintenance/v1beta2.MaintenanceConfiguration // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional MaintenanceConfigurationID *string `json:"maintenanceConfigurationId,omitempty" tf:"maintenance_configuration_id,omitempty"` diff --git a/apis/maintenance/v1beta1/zz_maintenanceassignmentvirtualmachine_types.go b/apis/maintenance/v1beta1/zz_maintenanceassignmentvirtualmachine_types.go index 096cd26d4..51bded77a 100755 --- a/apis/maintenance/v1beta1/zz_maintenanceassignmentvirtualmachine_types.go +++ b/apis/maintenance/v1beta1/zz_maintenanceassignmentvirtualmachine_types.go @@ -19,7 +19,7 @@ type MaintenanceAssignmentVirtualMachineInitParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // Specifies the ID of the Maintenance Configuration Resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/maintenance/v1beta1.MaintenanceConfiguration + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/maintenance/v1beta2.MaintenanceConfiguration // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() MaintenanceConfigurationID *string `json:"maintenanceConfigurationId,omitempty" tf:"maintenance_configuration_id,omitempty"` @@ -54,7 +54,7 @@ type MaintenanceAssignmentVirtualMachineParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // Specifies the ID of the Maintenance Configuration Resource. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/maintenance/v1beta1.MaintenanceConfiguration + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/maintenance/v1beta2.MaintenanceConfiguration // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional MaintenanceConfigurationID *string `json:"maintenanceConfigurationId,omitempty" tf:"maintenance_configuration_id,omitempty"` @@ -68,7 +68,7 @@ type MaintenanceAssignmentVirtualMachineParameters struct { MaintenanceConfigurationIDSelector *v1.Selector `json:"maintenanceConfigurationIdSelector,omitempty" tf:"-"` // Specifies the Virtual Machine ID to which the Maintenance Configuration will be assigned. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.LinuxVirtualMachine + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` diff --git a/apis/maintenance/v1beta2/zz_generated.conversion_hubs.go b/apis/maintenance/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..ae05fecde --- /dev/null +++ b/apis/maintenance/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *MaintenanceConfiguration) Hub() {} diff --git a/apis/maintenance/v1beta2/zz_generated.deepcopy.go b/apis/maintenance/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..587794523 --- /dev/null +++ b/apis/maintenance/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,874 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPatchesInitParameters) DeepCopyInto(out *InstallPatchesInitParameters) { + *out = *in + if in.Linux != nil { + in, out := &in.Linux, &out.Linux + *out = make([]LinuxInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Reboot != nil { + in, out := &in.Reboot, &out.Reboot + *out = new(string) + **out = **in + } + if in.Windows != nil { + in, out := &in.Windows, &out.Windows + *out = make([]WindowsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPatchesInitParameters. +func (in *InstallPatchesInitParameters) DeepCopy() *InstallPatchesInitParameters { + if in == nil { + return nil + } + out := new(InstallPatchesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPatchesObservation) DeepCopyInto(out *InstallPatchesObservation) { + *out = *in + if in.Linux != nil { + in, out := &in.Linux, &out.Linux + *out = make([]LinuxObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Reboot != nil { + in, out := &in.Reboot, &out.Reboot + *out = new(string) + **out = **in + } + if in.Windows != nil { + in, out := &in.Windows, &out.Windows + *out = make([]WindowsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPatchesObservation. +func (in *InstallPatchesObservation) DeepCopy() *InstallPatchesObservation { + if in == nil { + return nil + } + out := new(InstallPatchesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPatchesParameters) DeepCopyInto(out *InstallPatchesParameters) { + *out = *in + if in.Linux != nil { + in, out := &in.Linux, &out.Linux + *out = make([]LinuxParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Reboot != nil { + in, out := &in.Reboot, &out.Reboot + *out = new(string) + **out = **in + } + if in.Windows != nil { + in, out := &in.Windows, &out.Windows + *out = make([]WindowsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPatchesParameters. +func (in *InstallPatchesParameters) DeepCopy() *InstallPatchesParameters { + if in == nil { + return nil + } + out := new(InstallPatchesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxInitParameters) DeepCopyInto(out *LinuxInitParameters) { + *out = *in + if in.ClassificationsToInclude != nil { + in, out := &in.ClassificationsToInclude, &out.ClassificationsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PackageNamesMaskToExclude != nil { + in, out := &in.PackageNamesMaskToExclude, &out.PackageNamesMaskToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PackageNamesMaskToInclude != nil { + in, out := &in.PackageNamesMaskToInclude, &out.PackageNamesMaskToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxInitParameters. +func (in *LinuxInitParameters) DeepCopy() *LinuxInitParameters { + if in == nil { + return nil + } + out := new(LinuxInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxObservation) DeepCopyInto(out *LinuxObservation) { + *out = *in + if in.ClassificationsToInclude != nil { + in, out := &in.ClassificationsToInclude, &out.ClassificationsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PackageNamesMaskToExclude != nil { + in, out := &in.PackageNamesMaskToExclude, &out.PackageNamesMaskToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PackageNamesMaskToInclude != nil { + in, out := &in.PackageNamesMaskToInclude, &out.PackageNamesMaskToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxObservation. +func (in *LinuxObservation) DeepCopy() *LinuxObservation { + if in == nil { + return nil + } + out := new(LinuxObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxParameters) DeepCopyInto(out *LinuxParameters) { + *out = *in + if in.ClassificationsToInclude != nil { + in, out := &in.ClassificationsToInclude, &out.ClassificationsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PackageNamesMaskToExclude != nil { + in, out := &in.PackageNamesMaskToExclude, &out.PackageNamesMaskToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PackageNamesMaskToInclude != nil { + in, out := &in.PackageNamesMaskToInclude, &out.PackageNamesMaskToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxParameters. +func (in *LinuxParameters) DeepCopy() *LinuxParameters { + if in == nil { + return nil + } + out := new(LinuxParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceConfiguration) DeepCopyInto(out *MaintenanceConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceConfiguration. +func (in *MaintenanceConfiguration) DeepCopy() *MaintenanceConfiguration { + if in == nil { + return nil + } + out := new(MaintenanceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MaintenanceConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceConfigurationInitParameters) DeepCopyInto(out *MaintenanceConfigurationInitParameters) { + *out = *in + if in.InGuestUserPatchMode != nil { + in, out := &in.InGuestUserPatchMode, &out.InGuestUserPatchMode + *out = new(string) + **out = **in + } + if in.InstallPatches != nil { + in, out := &in.InstallPatches, &out.InstallPatches + *out = new(InstallPatchesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Visibility != nil { + in, out := &in.Visibility, &out.Visibility + *out = new(string) + **out = **in + } + if in.Window != nil { + in, out := &in.Window, &out.Window + *out = new(WindowInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceConfigurationInitParameters. +func (in *MaintenanceConfigurationInitParameters) DeepCopy() *MaintenanceConfigurationInitParameters { + if in == nil { + return nil + } + out := new(MaintenanceConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceConfigurationList) DeepCopyInto(out *MaintenanceConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MaintenanceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceConfigurationList. +func (in *MaintenanceConfigurationList) DeepCopy() *MaintenanceConfigurationList { + if in == nil { + return nil + } + out := new(MaintenanceConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MaintenanceConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceConfigurationObservation) DeepCopyInto(out *MaintenanceConfigurationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InGuestUserPatchMode != nil { + in, out := &in.InGuestUserPatchMode, &out.InGuestUserPatchMode + *out = new(string) + **out = **in + } + if in.InstallPatches != nil { + in, out := &in.InstallPatches, &out.InstallPatches + *out = new(InstallPatchesObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Visibility != nil { + in, out := &in.Visibility, &out.Visibility + *out = new(string) + **out = **in + } + if in.Window != nil { + in, out := &in.Window, &out.Window + *out = new(WindowObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceConfigurationObservation. +func (in *MaintenanceConfigurationObservation) DeepCopy() *MaintenanceConfigurationObservation { + if in == nil { + return nil + } + out := new(MaintenanceConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceConfigurationParameters) DeepCopyInto(out *MaintenanceConfigurationParameters) { + *out = *in + if in.InGuestUserPatchMode != nil { + in, out := &in.InGuestUserPatchMode, &out.InGuestUserPatchMode + *out = new(string) + **out = **in + } + if in.InstallPatches != nil { + in, out := &in.InstallPatches, &out.InstallPatches + *out = new(InstallPatchesParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Visibility != nil { + in, out := &in.Visibility, &out.Visibility + *out = new(string) + **out = **in + } + if in.Window != nil { + in, out := &in.Window, &out.Window + *out = new(WindowParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceConfigurationParameters. +func (in *MaintenanceConfigurationParameters) DeepCopy() *MaintenanceConfigurationParameters { + if in == nil { + return nil + } + out := new(MaintenanceConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceConfigurationSpec) DeepCopyInto(out *MaintenanceConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceConfigurationSpec. +func (in *MaintenanceConfigurationSpec) DeepCopy() *MaintenanceConfigurationSpec { + if in == nil { + return nil + } + out := new(MaintenanceConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceConfigurationStatus) DeepCopyInto(out *MaintenanceConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceConfigurationStatus. +func (in *MaintenanceConfigurationStatus) DeepCopy() *MaintenanceConfigurationStatus { + if in == nil { + return nil + } + out := new(MaintenanceConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowInitParameters) DeepCopyInto(out *WindowInitParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.ExpirationDateTime != nil { + in, out := &in.ExpirationDateTime, &out.ExpirationDateTime + *out = new(string) + **out = **in + } + if in.RecurEvery != nil { + in, out := &in.RecurEvery, &out.RecurEvery + *out = new(string) + **out = **in + } + if in.StartDateTime != nil { + in, out := &in.StartDateTime, &out.StartDateTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowInitParameters. +func (in *WindowInitParameters) DeepCopy() *WindowInitParameters { + if in == nil { + return nil + } + out := new(WindowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowObservation) DeepCopyInto(out *WindowObservation) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.ExpirationDateTime != nil { + in, out := &in.ExpirationDateTime, &out.ExpirationDateTime + *out = new(string) + **out = **in + } + if in.RecurEvery != nil { + in, out := &in.RecurEvery, &out.RecurEvery + *out = new(string) + **out = **in + } + if in.StartDateTime != nil { + in, out := &in.StartDateTime, &out.StartDateTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowObservation. +func (in *WindowObservation) DeepCopy() *WindowObservation { + if in == nil { + return nil + } + out := new(WindowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowParameters) DeepCopyInto(out *WindowParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.ExpirationDateTime != nil { + in, out := &in.ExpirationDateTime, &out.ExpirationDateTime + *out = new(string) + **out = **in + } + if in.RecurEvery != nil { + in, out := &in.RecurEvery, &out.RecurEvery + *out = new(string) + **out = **in + } + if in.StartDateTime != nil { + in, out := &in.StartDateTime, &out.StartDateTime + *out = new(string) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowParameters. +func (in *WindowParameters) DeepCopy() *WindowParameters { + if in == nil { + return nil + } + out := new(WindowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsInitParameters) DeepCopyInto(out *WindowsInitParameters) { + *out = *in + if in.ClassificationsToInclude != nil { + in, out := &in.ClassificationsToInclude, &out.ClassificationsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KbNumbersToExclude != nil { + in, out := &in.KbNumbersToExclude, &out.KbNumbersToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KbNumbersToInclude != nil { + in, out := &in.KbNumbersToInclude, &out.KbNumbersToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsInitParameters. +func (in *WindowsInitParameters) DeepCopy() *WindowsInitParameters { + if in == nil { + return nil + } + out := new(WindowsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsObservation) DeepCopyInto(out *WindowsObservation) { + *out = *in + if in.ClassificationsToInclude != nil { + in, out := &in.ClassificationsToInclude, &out.ClassificationsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KbNumbersToExclude != nil { + in, out := &in.KbNumbersToExclude, &out.KbNumbersToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KbNumbersToInclude != nil { + in, out := &in.KbNumbersToInclude, &out.KbNumbersToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsObservation. +func (in *WindowsObservation) DeepCopy() *WindowsObservation { + if in == nil { + return nil + } + out := new(WindowsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsParameters) DeepCopyInto(out *WindowsParameters) { + *out = *in + if in.ClassificationsToInclude != nil { + in, out := &in.ClassificationsToInclude, &out.ClassificationsToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KbNumbersToExclude != nil { + in, out := &in.KbNumbersToExclude, &out.KbNumbersToExclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KbNumbersToInclude != nil { + in, out := &in.KbNumbersToInclude, &out.KbNumbersToInclude + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsParameters. +func (in *WindowsParameters) DeepCopy() *WindowsParameters { + if in == nil { + return nil + } + out := new(WindowsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/maintenance/v1beta2/zz_generated.managed.go b/apis/maintenance/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..62791e3d2 --- /dev/null +++ b/apis/maintenance/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MaintenanceConfiguration. +func (mg *MaintenanceConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/maintenance/v1beta2/zz_generated.managedlist.go b/apis/maintenance/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..4fff21255 --- /dev/null +++ b/apis/maintenance/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this MaintenanceConfigurationList. +func (l *MaintenanceConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/maintenance/v1beta2/zz_generated.resolvers.go b/apis/maintenance/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..dbea1e021 --- /dev/null +++ b/apis/maintenance/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *MaintenanceConfiguration) ResolveReferences( // ResolveReferences of this MaintenanceConfiguration. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/maintenance/v1beta2/zz_groupversion_info.go b/apis/maintenance/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..33ef9aa6e --- /dev/null +++ b/apis/maintenance/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=maintenance.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "maintenance.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/maintenance/v1beta2/zz_maintenanceconfiguration_terraformed.go b/apis/maintenance/v1beta2/zz_maintenanceconfiguration_terraformed.go new file mode 100755 index 000000000..d3866dccb --- /dev/null +++ b/apis/maintenance/v1beta2/zz_maintenanceconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MaintenanceConfiguration +func (mg *MaintenanceConfiguration) GetTerraformResourceType() string { + return "azurerm_maintenance_configuration" +} + +// GetConnectionDetailsMapping for this MaintenanceConfiguration +func (tr *MaintenanceConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MaintenanceConfiguration +func (tr *MaintenanceConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MaintenanceConfiguration +func (tr *MaintenanceConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MaintenanceConfiguration +func (tr *MaintenanceConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MaintenanceConfiguration +func (tr *MaintenanceConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MaintenanceConfiguration +func (tr *MaintenanceConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MaintenanceConfiguration +func (tr *MaintenanceConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MaintenanceConfiguration +func (tr *MaintenanceConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MaintenanceConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MaintenanceConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &MaintenanceConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MaintenanceConfiguration) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/maintenance/v1beta2/zz_maintenanceconfiguration_types.go b/apis/maintenance/v1beta2/zz_maintenanceconfiguration_types.go new file mode 100755 index 000000000..084ffdbd7 --- /dev/null +++ b/apis/maintenance/v1beta2/zz_maintenanceconfiguration_types.go @@ -0,0 +1,366 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InstallPatchesInitParameters struct { + + // A linux block as defined above. This property only applies when scope is set to InGuestPatch + Linux []LinuxInitParameters `json:"linux,omitempty" tf:"linux,omitempty"` + + // Possible reboot preference as defined by the user based on which it would be decided to reboot the machine or not after the patch operation is completed. Possible values are Always, IfRequired and Never. This property only applies when scope is set to InGuestPatch. + Reboot *string `json:"reboot,omitempty" tf:"reboot,omitempty"` + + // A windows block as defined above. This property only applies when scope is set to InGuestPatch + Windows []WindowsInitParameters `json:"windows,omitempty" tf:"windows,omitempty"` +} + +type InstallPatchesObservation struct { + + // A linux block as defined above. This property only applies when scope is set to InGuestPatch + Linux []LinuxObservation `json:"linux,omitempty" tf:"linux,omitempty"` + + // Possible reboot preference as defined by the user based on which it would be decided to reboot the machine or not after the patch operation is completed. Possible values are Always, IfRequired and Never. This property only applies when scope is set to InGuestPatch. + Reboot *string `json:"reboot,omitempty" tf:"reboot,omitempty"` + + // A windows block as defined above. This property only applies when scope is set to InGuestPatch + Windows []WindowsObservation `json:"windows,omitempty" tf:"windows,omitempty"` +} + +type InstallPatchesParameters struct { + + // A linux block as defined above. This property only applies when scope is set to InGuestPatch + // +kubebuilder:validation:Optional + Linux []LinuxParameters `json:"linux,omitempty" tf:"linux,omitempty"` + + // Possible reboot preference as defined by the user based on which it would be decided to reboot the machine or not after the patch operation is completed. Possible values are Always, IfRequired and Never. This property only applies when scope is set to InGuestPatch. + // +kubebuilder:validation:Optional + Reboot *string `json:"reboot,omitempty" tf:"reboot,omitempty"` + + // A windows block as defined above. This property only applies when scope is set to InGuestPatch + // +kubebuilder:validation:Optional + Windows []WindowsParameters `json:"windows,omitempty" tf:"windows,omitempty"` +} + +type LinuxInitParameters struct { + + // List of Classification category of patches to be patched. Possible values are Critical, Security, UpdateRollup, FeaturePack, ServicePack, Definition, Tools and Updates. + ClassificationsToInclude []*string `json:"classificationsToInclude,omitempty" tf:"classifications_to_include,omitempty"` + + // List of package names to be excluded from patching. + PackageNamesMaskToExclude []*string `json:"packageNamesMaskToExclude,omitempty" tf:"package_names_mask_to_exclude,omitempty"` + + // List of package names to be included for patching. + PackageNamesMaskToInclude []*string `json:"packageNamesMaskToInclude,omitempty" tf:"package_names_mask_to_include,omitempty"` +} + +type LinuxObservation struct { + + // List of Classification category of patches to be patched. Possible values are Critical, Security, UpdateRollup, FeaturePack, ServicePack, Definition, Tools and Updates. + ClassificationsToInclude []*string `json:"classificationsToInclude,omitempty" tf:"classifications_to_include,omitempty"` + + // List of package names to be excluded from patching. + PackageNamesMaskToExclude []*string `json:"packageNamesMaskToExclude,omitempty" tf:"package_names_mask_to_exclude,omitempty"` + + // List of package names to be included for patching. + PackageNamesMaskToInclude []*string `json:"packageNamesMaskToInclude,omitempty" tf:"package_names_mask_to_include,omitempty"` +} + +type LinuxParameters struct { + + // List of Classification category of patches to be patched. Possible values are Critical, Security, UpdateRollup, FeaturePack, ServicePack, Definition, Tools and Updates. + // +kubebuilder:validation:Optional + ClassificationsToInclude []*string `json:"classificationsToInclude,omitempty" tf:"classifications_to_include,omitempty"` + + // List of package names to be excluded from patching. + // +kubebuilder:validation:Optional + PackageNamesMaskToExclude []*string `json:"packageNamesMaskToExclude,omitempty" tf:"package_names_mask_to_exclude,omitempty"` + + // List of package names to be included for patching. + // +kubebuilder:validation:Optional + PackageNamesMaskToInclude []*string `json:"packageNamesMaskToInclude,omitempty" tf:"package_names_mask_to_include,omitempty"` +} + +type MaintenanceConfigurationInitParameters struct { + + // The in guest user patch mode. Possible values are Platform or User. Must be specified when scope is InGuestPatch. + InGuestUserPatchMode *string `json:"inGuestUserPatchMode,omitempty" tf:"in_guest_user_patch_mode,omitempty"` + + // An install_patches block as defined below. + InstallPatches *InstallPatchesInitParameters `json:"installPatches,omitempty" tf:"install_patches,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of properties to assign to the resource. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The scope of the Maintenance Configuration. Possible values are Extension, Host, InGuestPatch, OSImage, SQLDB or SQLManagedInstance. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // A mapping of tags to assign to the resource. The key could not contain upper case letter. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The visibility of the Maintenance Configuration. The only allowable value is Custom. Defaults to Custom. + Visibility *string `json:"visibility,omitempty" tf:"visibility,omitempty"` + + // A window block as defined below. + Window *WindowInitParameters `json:"window,omitempty" tf:"window,omitempty"` +} + +type MaintenanceConfigurationObservation struct { + + // The ID of the Maintenance Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The in guest user patch mode. Possible values are Platform or User. Must be specified when scope is InGuestPatch. + InGuestUserPatchMode *string `json:"inGuestUserPatchMode,omitempty" tf:"in_guest_user_patch_mode,omitempty"` + + // An install_patches block as defined below. + InstallPatches *InstallPatchesObservation `json:"installPatches,omitempty" tf:"install_patches,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of properties to assign to the resource. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The name of the Resource Group where the Maintenance Configuration should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The scope of the Maintenance Configuration. Possible values are Extension, Host, InGuestPatch, OSImage, SQLDB or SQLManagedInstance. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // A mapping of tags to assign to the resource. The key could not contain upper case letter. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The visibility of the Maintenance Configuration. The only allowable value is Custom. Defaults to Custom. + Visibility *string `json:"visibility,omitempty" tf:"visibility,omitempty"` + + // A window block as defined below. + Window *WindowObservation `json:"window,omitempty" tf:"window,omitempty"` +} + +type MaintenanceConfigurationParameters struct { + + // The in guest user patch mode. Possible values are Platform or User. Must be specified when scope is InGuestPatch. + // +kubebuilder:validation:Optional + InGuestUserPatchMode *string `json:"inGuestUserPatchMode,omitempty" tf:"in_guest_user_patch_mode,omitempty"` + + // An install_patches block as defined below. + // +kubebuilder:validation:Optional + InstallPatches *InstallPatchesParameters `json:"installPatches,omitempty" tf:"install_patches,omitempty"` + + // Specified the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of properties to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The name of the Resource Group where the Maintenance Configuration should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The scope of the Maintenance Configuration. Possible values are Extension, Host, InGuestPatch, OSImage, SQLDB or SQLManagedInstance. + // +kubebuilder:validation:Optional + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // A mapping of tags to assign to the resource. The key could not contain upper case letter. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The visibility of the Maintenance Configuration. The only allowable value is Custom. Defaults to Custom. + // +kubebuilder:validation:Optional + Visibility *string `json:"visibility,omitempty" tf:"visibility,omitempty"` + + // A window block as defined below. + // +kubebuilder:validation:Optional + Window *WindowParameters `json:"window,omitempty" tf:"window,omitempty"` +} + +type WindowInitParameters struct { + + // The duration of the maintenance window in HH:mm format. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Effective expiration date of the maintenance window in YYYY-MM-DD hh:mm format. + ExpirationDateTime *string `json:"expirationDateTime,omitempty" tf:"expiration_date_time,omitempty"` + + // The rate at which a maintenance window is expected to recur. The rate can be expressed as daily, weekly, or monthly schedules. + RecurEvery *string `json:"recurEvery,omitempty" tf:"recur_every,omitempty"` + + // Effective start date of the maintenance window in YYYY-MM-DD hh:mm format. + StartDateTime *string `json:"startDateTime,omitempty" tf:"start_date_time,omitempty"` + + // The time zone for the maintenance window. A list of timezones can be obtained by executing [System.TimeZoneInfo]::GetSystemTimeZones() in PowerShell. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type WindowObservation struct { + + // The duration of the maintenance window in HH:mm format. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Effective expiration date of the maintenance window in YYYY-MM-DD hh:mm format. + ExpirationDateTime *string `json:"expirationDateTime,omitempty" tf:"expiration_date_time,omitempty"` + + // The rate at which a maintenance window is expected to recur. The rate can be expressed as daily, weekly, or monthly schedules. + RecurEvery *string `json:"recurEvery,omitempty" tf:"recur_every,omitempty"` + + // Effective start date of the maintenance window in YYYY-MM-DD hh:mm format. + StartDateTime *string `json:"startDateTime,omitempty" tf:"start_date_time,omitempty"` + + // The time zone for the maintenance window. A list of timezones can be obtained by executing [System.TimeZoneInfo]::GetSystemTimeZones() in PowerShell. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type WindowParameters struct { + + // The duration of the maintenance window in HH:mm format. + // +kubebuilder:validation:Optional + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // Effective expiration date of the maintenance window in YYYY-MM-DD hh:mm format. + // +kubebuilder:validation:Optional + ExpirationDateTime *string `json:"expirationDateTime,omitempty" tf:"expiration_date_time,omitempty"` + + // The rate at which a maintenance window is expected to recur. The rate can be expressed as daily, weekly, or monthly schedules. + // +kubebuilder:validation:Optional + RecurEvery *string `json:"recurEvery,omitempty" tf:"recur_every,omitempty"` + + // Effective start date of the maintenance window in YYYY-MM-DD hh:mm format. + // +kubebuilder:validation:Optional + StartDateTime *string `json:"startDateTime" tf:"start_date_time,omitempty"` + + // The time zone for the maintenance window. A list of timezones can be obtained by executing [System.TimeZoneInfo]::GetSystemTimeZones() in PowerShell. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone" tf:"time_zone,omitempty"` +} + +type WindowsInitParameters struct { + + // List of Classification category of patches to be patched. Possible values are Critical, Security, UpdateRollup, FeaturePack, ServicePack, Definition, Tools and Updates. + ClassificationsToInclude []*string `json:"classificationsToInclude,omitempty" tf:"classifications_to_include,omitempty"` + + // List of KB numbers to be excluded from patching. + KbNumbersToExclude []*string `json:"kbNumbersToExclude,omitempty" tf:"kb_numbers_to_exclude,omitempty"` + + // List of KB numbers to be included for patching. + KbNumbersToInclude []*string `json:"kbNumbersToInclude,omitempty" tf:"kb_numbers_to_include,omitempty"` +} + +type WindowsObservation struct { + + // List of Classification category of patches to be patched. Possible values are Critical, Security, UpdateRollup, FeaturePack, ServicePack, Definition, Tools and Updates. + ClassificationsToInclude []*string `json:"classificationsToInclude,omitempty" tf:"classifications_to_include,omitempty"` + + // List of KB numbers to be excluded from patching. + KbNumbersToExclude []*string `json:"kbNumbersToExclude,omitempty" tf:"kb_numbers_to_exclude,omitempty"` + + // List of KB numbers to be included for patching. + KbNumbersToInclude []*string `json:"kbNumbersToInclude,omitempty" tf:"kb_numbers_to_include,omitempty"` +} + +type WindowsParameters struct { + + // List of Classification category of patches to be patched. Possible values are Critical, Security, UpdateRollup, FeaturePack, ServicePack, Definition, Tools and Updates. + // +kubebuilder:validation:Optional + ClassificationsToInclude []*string `json:"classificationsToInclude,omitempty" tf:"classifications_to_include,omitempty"` + + // List of KB numbers to be excluded from patching. + // +kubebuilder:validation:Optional + KbNumbersToExclude []*string `json:"kbNumbersToExclude,omitempty" tf:"kb_numbers_to_exclude,omitempty"` + + // List of KB numbers to be included for patching. + // +kubebuilder:validation:Optional + KbNumbersToInclude []*string `json:"kbNumbersToInclude,omitempty" tf:"kb_numbers_to_include,omitempty"` +} + +// MaintenanceConfigurationSpec defines the desired state of MaintenanceConfiguration +type MaintenanceConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MaintenanceConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MaintenanceConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// MaintenanceConfigurationStatus defines the observed state of MaintenanceConfiguration. +type MaintenanceConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MaintenanceConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MaintenanceConfiguration is the Schema for the MaintenanceConfigurations API. Manages a Maintenance Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MaintenanceConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scope) || (has(self.initProvider) && has(self.initProvider.scope))",message="spec.forProvider.scope is a required parameter" + Spec MaintenanceConfigurationSpec `json:"spec"` + Status MaintenanceConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MaintenanceConfigurationList contains a list of MaintenanceConfigurations +type MaintenanceConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MaintenanceConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + MaintenanceConfiguration_Kind = "MaintenanceConfiguration" + MaintenanceConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MaintenanceConfiguration_Kind}.String() + MaintenanceConfiguration_KindAPIVersion = MaintenanceConfiguration_Kind + "." + CRDGroupVersion.String() + MaintenanceConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(MaintenanceConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&MaintenanceConfiguration{}, &MaintenanceConfigurationList{}) +} diff --git a/apis/media/v1beta1/zz_asset_types.go b/apis/media/v1beta1/zz_asset_types.go index 44526ca9e..64982ed03 100755 --- a/apis/media/v1beta1/zz_asset_types.go +++ b/apis/media/v1beta1/zz_asset_types.go @@ -67,7 +67,7 @@ type AssetParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // Specifies the name of the Media Services Account. Changing this forces a new Media Asset to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta1.ServicesAccount + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount // +kubebuilder:validation:Optional MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` diff --git a/apis/media/v1beta1/zz_generated.conversion_hubs.go b/apis/media/v1beta1/zz_generated.conversion_hubs.go index f73693c71..15064a7ed 100755 --- a/apis/media/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/media/v1beta1/zz_generated.conversion_hubs.go @@ -9,35 +9,8 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Asset) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *AssetFilter) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ContentKeyPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Job) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LiveEvent) Hub() {} - // Hub marks this type as a conversion hub. func (tr *LiveEventOutput) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ServicesAccount) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ServicesAccountFilter) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StreamingEndpoint) Hub() {} - // Hub marks this type as a conversion hub. func (tr *StreamingLocator) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StreamingPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Transform) Hub() {} diff --git a/apis/media/v1beta1/zz_generated.conversion_spokes.go b/apis/media/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..8fdaf63e6 --- /dev/null +++ b/apis/media/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AssetFilter to the hub type. +func (tr *AssetFilter) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AssetFilter type. +func (tr *AssetFilter) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ContentKeyPolicy to the hub type. +func (tr *ContentKeyPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ContentKeyPolicy type. +func (tr *ContentKeyPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Job to the hub type. +func (tr *Job) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Job type. +func (tr *Job) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LiveEvent to the hub type. +func (tr *LiveEvent) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LiveEvent type. +func (tr *LiveEvent) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ServicesAccount to the hub type. +func (tr *ServicesAccount) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ServicesAccount type. +func (tr *ServicesAccount) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ServicesAccountFilter to the hub type. +func (tr *ServicesAccountFilter) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ServicesAccountFilter type. +func (tr *ServicesAccountFilter) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StreamingEndpoint to the hub type. +func (tr *StreamingEndpoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StreamingEndpoint type. +func (tr *StreamingEndpoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StreamingPolicy to the hub type. +func (tr *StreamingPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StreamingPolicy type. +func (tr *StreamingPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Transform to the hub type. +func (tr *Transform) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Transform type. +func (tr *Transform) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/media/v1beta1/zz_generated.resolvers.go b/apis/media/v1beta1/zz_generated.resolvers.go index 4ddae4382..038a4b972 100644 --- a/apis/media/v1beta1/zz_generated.resolvers.go +++ b/apis/media/v1beta1/zz_generated.resolvers.go @@ -29,7 +29,7 @@ func (mg *Asset) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta1", "ServicesAccount", "ServicesAccountList") + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -383,7 +383,7 @@ func (mg *LiveEventOutput) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.AssetName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AssetNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta1", "LiveEvent", "LiveEventList") + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "LiveEvent", "LiveEventList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -626,7 +626,7 @@ func (mg *StreamingLocator) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.AssetName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AssetNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta1", "ServicesAccount", "ServicesAccountList") + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/media/v1beta1/zz_liveeventoutput_types.go b/apis/media/v1beta1/zz_liveeventoutput_types.go index 72c9ff956..7396b551c 100755 --- a/apis/media/v1beta1/zz_liveeventoutput_types.go +++ b/apis/media/v1beta1/zz_liveeventoutput_types.go @@ -104,7 +104,7 @@ type LiveEventOutputParameters struct { HlsFragmentsPerTSSegment *float64 `json:"hlsFragmentsPerTsSegment,omitempty" tf:"hls_fragments_per_ts_segment,omitempty"` // The id of the live event. Changing this forces a new Live Output to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta1.LiveEvent + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.LiveEvent // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional LiveEventID *string `json:"liveEventId,omitempty" tf:"live_event_id,omitempty"` diff --git a/apis/media/v1beta1/zz_streaminglocator_types.go b/apis/media/v1beta1/zz_streaminglocator_types.go index 803d62667..e4a3d6923 100755 --- a/apis/media/v1beta1/zz_streaminglocator_types.go +++ b/apis/media/v1beta1/zz_streaminglocator_types.go @@ -186,7 +186,7 @@ type StreamingLocatorParameters struct { FilterNames []*string `json:"filterNames,omitempty" tf:"filter_names,omitempty"` // The Media Services account name. Changing this forces a new Streaming Locator to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta1.ServicesAccount + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount // +kubebuilder:validation:Optional MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` diff --git a/apis/media/v1beta2/zz_assetfilter_terraformed.go b/apis/media/v1beta2/zz_assetfilter_terraformed.go new file mode 100755 index 000000000..31c2fe9cc --- /dev/null +++ b/apis/media/v1beta2/zz_assetfilter_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AssetFilter +func (mg *AssetFilter) GetTerraformResourceType() string { + return "azurerm_media_asset_filter" +} + +// GetConnectionDetailsMapping for this AssetFilter +func (tr *AssetFilter) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AssetFilter +func (tr *AssetFilter) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AssetFilter +func (tr *AssetFilter) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AssetFilter +func (tr *AssetFilter) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AssetFilter +func (tr *AssetFilter) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AssetFilter +func (tr *AssetFilter) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AssetFilter +func (tr *AssetFilter) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AssetFilter +func (tr *AssetFilter) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AssetFilter using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AssetFilter) LateInitialize(attrs []byte) (bool, error) { + params := &AssetFilterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AssetFilter) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/media/v1beta2/zz_assetfilter_types.go b/apis/media/v1beta2/zz_assetfilter_types.go new file mode 100755 index 000000000..e709a7226 --- /dev/null +++ b/apis/media/v1beta2/zz_assetfilter_types.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AssetFilterInitParameters struct { + + // The first quality bitrate. Sets the first video track to appear in the Live Streaming playlist to allow HLS native players to start downloading from this quality level at the beginning. + FirstQualityBitrate *float64 `json:"firstQualityBitrate,omitempty" tf:"first_quality_bitrate,omitempty"` + + // A presentation_time_range block as defined below. + PresentationTimeRange *PresentationTimeRangeInitParameters `json:"presentationTimeRange,omitempty" tf:"presentation_time_range,omitempty"` + + // One or more track_selection blocks as defined below. + TrackSelection []TrackSelectionInitParameters `json:"trackSelection,omitempty" tf:"track_selection,omitempty"` +} + +type AssetFilterObservation struct { + + // The Asset ID for which the Asset Filter should be created. Changing this forces a new Asset Filter to be created. + AssetID *string `json:"assetId,omitempty" tf:"asset_id,omitempty"` + + // The first quality bitrate. Sets the first video track to appear in the Live Streaming playlist to allow HLS native players to start downloading from this quality level at the beginning. + FirstQualityBitrate *float64 `json:"firstQualityBitrate,omitempty" tf:"first_quality_bitrate,omitempty"` + + // The ID of the Asset Filter. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A presentation_time_range block as defined below. + PresentationTimeRange *PresentationTimeRangeObservation `json:"presentationTimeRange,omitempty" tf:"presentation_time_range,omitempty"` + + // One or more track_selection blocks as defined below. + TrackSelection []TrackSelectionObservation `json:"trackSelection,omitempty" tf:"track_selection,omitempty"` +} + +type AssetFilterParameters struct { + + // The Asset ID for which the Asset Filter should be created. Changing this forces a new Asset Filter to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta1.Asset + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + AssetID *string `json:"assetId,omitempty" tf:"asset_id,omitempty"` + + // Reference to a Asset in media to populate assetId. + // +kubebuilder:validation:Optional + AssetIDRef *v1.Reference `json:"assetIdRef,omitempty" tf:"-"` + + // Selector for a Asset in media to populate assetId. + // +kubebuilder:validation:Optional + AssetIDSelector *v1.Selector `json:"assetIdSelector,omitempty" tf:"-"` + + // The first quality bitrate. Sets the first video track to appear in the Live Streaming playlist to allow HLS native players to start downloading from this quality level at the beginning. + // +kubebuilder:validation:Optional + FirstQualityBitrate *float64 `json:"firstQualityBitrate,omitempty" tf:"first_quality_bitrate,omitempty"` + + // A presentation_time_range block as defined below. + // +kubebuilder:validation:Optional + PresentationTimeRange *PresentationTimeRangeParameters `json:"presentationTimeRange,omitempty" tf:"presentation_time_range,omitempty"` + + // One or more track_selection blocks as defined below. + // +kubebuilder:validation:Optional + TrackSelection []TrackSelectionParameters `json:"trackSelection,omitempty" tf:"track_selection,omitempty"` +} + +type ConditionInitParameters struct { + + // The condition operation to test a track property against. Supported values are Equal and NotEqual. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property to compare. Supported values are Bitrate, FourCC, Language, Name and Type. Check documentation for more details. + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value to match or not match. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ConditionObservation struct { + + // The condition operation to test a track property against. Supported values are Equal and NotEqual. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property to compare. Supported values are Bitrate, FourCC, Language, Name and Type. Check documentation for more details. + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value to match or not match. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ConditionParameters struct { + + // The condition operation to test a track property against. Supported values are Equal and NotEqual. + // +kubebuilder:validation:Optional + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property to compare. Supported values are Bitrate, FourCC, Language, Name and Type. Check documentation for more details. + // +kubebuilder:validation:Optional + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value to match or not match. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PresentationTimeRangeInitParameters struct { + + // The absolute end time boundary. Applies to Video on Demand (VoD). + // For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + EndInUnits *float64 `json:"endInUnits,omitempty" tf:"end_in_units,omitempty"` + + // Indicates whether the end_in_units property must be present. If true, end_in_units must be specified or a bad request code is returned. Applies to Live Streaming only. Allowed values: false, true. + ForceEnd *bool `json:"forceEnd,omitempty" tf:"force_end,omitempty"` + + // The relative to end right edge. Applies to Live Streaming only. + // This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_miliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + LiveBackoffInUnits *float64 `json:"liveBackoffInUnits,omitempty" tf:"live_backoff_in_units,omitempty"` + + // The relative to end sliding window. Applies to Live Streaming only. Use presentation_window_in_units to apply a sliding window of fragments to include in a playlist. The unit is defined by unit_timescale_in_miliseconds. For example, set presentation_window_in_units to 120 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds. + PresentationWindowInUnits *float64 `json:"presentationWindowInUnits,omitempty" tf:"presentation_window_in_units,omitempty"` + + // The absolute start time boundary. Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so a start_in_units of 15 would be for 15 seconds. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + StartInUnits *float64 `json:"startInUnits,omitempty" tf:"start_in_units,omitempty"` + + // Specified as the number of miliseconds in one unit timescale. For example, if you want to set a start_in_units at 30 seconds, you would use a value of 30 when using the unit_timescale_in_miliseconds in 1000. Or if you want to set start_in_units in 30 miliseconds, you would use a value of 30 when using the unit_timescale_in_miliseconds in 1. Applies timescale to start_in_units, start_timescale and presentation_window_in_timescale and live_backoff_in_timescale. + UnitTimescaleInMiliseconds *float64 `json:"unitTimescaleInMiliseconds,omitempty" tf:"unit_timescale_in_miliseconds,omitempty"` +} + +type PresentationTimeRangeObservation struct { + + // The absolute end time boundary. Applies to Video on Demand (VoD). + // For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + EndInUnits *float64 `json:"endInUnits,omitempty" tf:"end_in_units,omitempty"` + + // Indicates whether the end_in_units property must be present. If true, end_in_units must be specified or a bad request code is returned. Applies to Live Streaming only. Allowed values: false, true. + ForceEnd *bool `json:"forceEnd,omitempty" tf:"force_end,omitempty"` + + // The relative to end right edge. Applies to Live Streaming only. + // This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_miliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + LiveBackoffInUnits *float64 `json:"liveBackoffInUnits,omitempty" tf:"live_backoff_in_units,omitempty"` + + // The relative to end sliding window. Applies to Live Streaming only. Use presentation_window_in_units to apply a sliding window of fragments to include in a playlist. The unit is defined by unit_timescale_in_miliseconds. For example, set presentation_window_in_units to 120 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds. + PresentationWindowInUnits *float64 `json:"presentationWindowInUnits,omitempty" tf:"presentation_window_in_units,omitempty"` + + // The absolute start time boundary. Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so a start_in_units of 15 would be for 15 seconds. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + StartInUnits *float64 `json:"startInUnits,omitempty" tf:"start_in_units,omitempty"` + + // Specified as the number of miliseconds in one unit timescale. For example, if you want to set a start_in_units at 30 seconds, you would use a value of 30 when using the unit_timescale_in_miliseconds in 1000. Or if you want to set start_in_units in 30 miliseconds, you would use a value of 30 when using the unit_timescale_in_miliseconds in 1. Applies timescale to start_in_units, start_timescale and presentation_window_in_timescale and live_backoff_in_timescale. + UnitTimescaleInMiliseconds *float64 `json:"unitTimescaleInMiliseconds,omitempty" tf:"unit_timescale_in_miliseconds,omitempty"` +} + +type PresentationTimeRangeParameters struct { + + // The absolute end time boundary. Applies to Video on Demand (VoD). + // For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + // +kubebuilder:validation:Optional + EndInUnits *float64 `json:"endInUnits,omitempty" tf:"end_in_units,omitempty"` + + // Indicates whether the end_in_units property must be present. If true, end_in_units must be specified or a bad request code is returned. Applies to Live Streaming only. Allowed values: false, true. + // +kubebuilder:validation:Optional + ForceEnd *bool `json:"forceEnd,omitempty" tf:"force_end,omitempty"` + + // The relative to end right edge. Applies to Live Streaming only. + // This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_miliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + // +kubebuilder:validation:Optional + LiveBackoffInUnits *float64 `json:"liveBackoffInUnits,omitempty" tf:"live_backoff_in_units,omitempty"` + + // The relative to end sliding window. Applies to Live Streaming only. Use presentation_window_in_units to apply a sliding window of fragments to include in a playlist. The unit is defined by unit_timescale_in_miliseconds. For example, set presentation_window_in_units to 120 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds. + // +kubebuilder:validation:Optional + PresentationWindowInUnits *float64 `json:"presentationWindowInUnits,omitempty" tf:"presentation_window_in_units,omitempty"` + + // The absolute start time boundary. Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so a start_in_units of 15 would be for 15 seconds. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + // +kubebuilder:validation:Optional + StartInUnits *float64 `json:"startInUnits,omitempty" tf:"start_in_units,omitempty"` + + // Specified as the number of miliseconds in one unit timescale. For example, if you want to set a start_in_units at 30 seconds, you would use a value of 30 when using the unit_timescale_in_miliseconds in 1000. Or if you want to set start_in_units in 30 miliseconds, you would use a value of 30 when using the unit_timescale_in_miliseconds in 1. Applies timescale to start_in_units, start_timescale and presentation_window_in_timescale and live_backoff_in_timescale. + // +kubebuilder:validation:Optional + UnitTimescaleInMiliseconds *float64 `json:"unitTimescaleInMiliseconds,omitempty" tf:"unit_timescale_in_miliseconds,omitempty"` +} + +type TrackSelectionInitParameters struct { + + // One or more condition blocks as defined above. + Condition []ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` +} + +type TrackSelectionObservation struct { + + // One or more condition blocks as defined above. + Condition []ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` +} + +type TrackSelectionParameters struct { + + // One or more condition blocks as defined above. + // +kubebuilder:validation:Optional + Condition []ConditionParameters `json:"condition" tf:"condition,omitempty"` +} + +// AssetFilterSpec defines the desired state of AssetFilter +type AssetFilterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AssetFilterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AssetFilterInitParameters `json:"initProvider,omitempty"` +} + +// AssetFilterStatus defines the observed state of AssetFilter. +type AssetFilterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AssetFilterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AssetFilter is the Schema for the AssetFilters API. Manages an Azure Media Asset Filter. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type AssetFilter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AssetFilterSpec `json:"spec"` + Status AssetFilterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AssetFilterList contains a list of AssetFilters +type AssetFilterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AssetFilter `json:"items"` +} + +// Repository type metadata. +var ( + AssetFilter_Kind = "AssetFilter" + AssetFilter_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AssetFilter_Kind}.String() + AssetFilter_KindAPIVersion = AssetFilter_Kind + "." + CRDGroupVersion.String() + AssetFilter_GroupVersionKind = CRDGroupVersion.WithKind(AssetFilter_Kind) +) + +func init() { + SchemeBuilder.Register(&AssetFilter{}, &AssetFilterList{}) +} diff --git a/apis/media/v1beta2/zz_contentkeypolicy_terraformed.go b/apis/media/v1beta2/zz_contentkeypolicy_terraformed.go new file mode 100755 index 000000000..12fb92a70 --- /dev/null +++ b/apis/media/v1beta2/zz_contentkeypolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ContentKeyPolicy +func (mg *ContentKeyPolicy) GetTerraformResourceType() string { + return "azurerm_media_content_key_policy" +} + +// GetConnectionDetailsMapping for this ContentKeyPolicy +func (tr *ContentKeyPolicy) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"policy_option[*].fairplay_configuration[*].ask": "spec.forProvider.policyOption[*].fairplayConfiguration[*].askSecretRef", "policy_option[*].fairplay_configuration[*].pfx": "spec.forProvider.policyOption[*].fairplayConfiguration[*].pfxSecretRef", "policy_option[*].fairplay_configuration[*].pfx_password": "spec.forProvider.policyOption[*].fairplayConfiguration[*].pfxPasswordSecretRef", "policy_option[*].playready_configuration_license[*].grace_period": "spec.forProvider.policyOption[*].playreadyConfigurationLicense[*].gracePeriodSecretRef", "policy_option[*].token_restriction[*].alternate_key[*].rsa_token_key_exponent": "spec.forProvider.policyOption[*].tokenRestriction[*].alternateKey[*].rsaTokenKeyExponentSecretRef", "policy_option[*].token_restriction[*].alternate_key[*].rsa_token_key_modulus": "spec.forProvider.policyOption[*].tokenRestriction[*].alternateKey[*].rsaTokenKeyModulusSecretRef", "policy_option[*].token_restriction[*].alternate_key[*].symmetric_token_key": "spec.forProvider.policyOption[*].tokenRestriction[*].alternateKey[*].symmetricTokenKeySecretRef", "policy_option[*].token_restriction[*].alternate_key[*].x509_token_key_raw": "spec.forProvider.policyOption[*].tokenRestriction[*].alternateKey[*].x509TokenKeyRawSecretRef", "policy_option[*].token_restriction[*].primary_rsa_token_key_exponent": "spec.forProvider.policyOption[*].tokenRestriction[*].primaryRsaTokenKeyExponentSecretRef", "policy_option[*].token_restriction[*].primary_rsa_token_key_modulus": "spec.forProvider.policyOption[*].tokenRestriction[*].primaryRsaTokenKeyModulusSecretRef", "policy_option[*].token_restriction[*].primary_symmetric_token_key": "spec.forProvider.policyOption[*].tokenRestriction[*].primarySymmetricTokenKeySecretRef", "policy_option[*].token_restriction[*].primary_x509_token_key_raw": "spec.forProvider.policyOption[*].tokenRestriction[*].primaryX509TokenKeyRawSecretRef"} +} + +// GetObservation of this ContentKeyPolicy +func (tr *ContentKeyPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ContentKeyPolicy +func (tr *ContentKeyPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ContentKeyPolicy +func (tr *ContentKeyPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ContentKeyPolicy +func (tr *ContentKeyPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ContentKeyPolicy +func (tr *ContentKeyPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ContentKeyPolicy +func (tr *ContentKeyPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ContentKeyPolicy +func (tr *ContentKeyPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ContentKeyPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ContentKeyPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &ContentKeyPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ContentKeyPolicy) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/media/v1beta2/zz_contentkeypolicy_types.go b/apis/media/v1beta2/zz_contentkeypolicy_types.go new file mode 100755 index 000000000..dfd723d62 --- /dev/null +++ b/apis/media/v1beta2/zz_contentkeypolicy_types.go @@ -0,0 +1,738 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AlternateKeyInitParameters struct { +} + +type AlternateKeyObservation struct { +} + +type AlternateKeyParameters struct { + + // The RSA parameter exponent. + // +kubebuilder:validation:Optional + RsaTokenKeyExponentSecretRef *v1.SecretKeySelector `json:"rsaTokenKeyExponentSecretRef,omitempty" tf:"-"` + + // The RSA parameter modulus. + // +kubebuilder:validation:Optional + RsaTokenKeyModulusSecretRef *v1.SecretKeySelector `json:"rsaTokenKeyModulusSecretRef,omitempty" tf:"-"` + + // The key value of the key. Specifies a symmetric key for token validation. + // +kubebuilder:validation:Optional + SymmetricTokenKeySecretRef *v1.SecretKeySelector `json:"symmetricTokenKeySecretRef,omitempty" tf:"-"` + + // The raw data field of a certificate in PKCS 12 format (X509Certificate2 in .NET). Specifies a certificate for token validation. + // +kubebuilder:validation:Optional + X509TokenKeyRawSecretRef *v1.SecretKeySelector `json:"x509TokenKeyRawSecretRef,omitempty" tf:"-"` +} + +type ContentKeyPolicyInitParameters struct { + + // A description for the Policy. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more policy_option blocks as defined below. + PolicyOption []PolicyOptionInitParameters `json:"policyOption,omitempty" tf:"policy_option,omitempty"` +} + +type ContentKeyPolicyObservation struct { + + // A description for the Policy. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Content Key Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Media Services account name. Changing this forces a new Content Key Policy to be created. + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // One or more policy_option blocks as defined below. + PolicyOption []PolicyOptionObservation `json:"policyOption,omitempty" tf:"policy_option,omitempty"` + + // The name of the Resource Group where the Content Key Policy should exist. Changing this forces a new Content Key Policy to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` +} + +type ContentKeyPolicyParameters struct { + + // A description for the Policy. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Media Services account name. Changing this forces a new Content Key Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount + // +kubebuilder:validation:Optional + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // Reference to a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameRef *v1.Reference `json:"mediaServicesAccountNameRef,omitempty" tf:"-"` + + // Selector for a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameSelector *v1.Selector `json:"mediaServicesAccountNameSelector,omitempty" tf:"-"` + + // One or more policy_option blocks as defined below. + // +kubebuilder:validation:Optional + PolicyOption []PolicyOptionParameters `json:"policyOption,omitempty" tf:"policy_option,omitempty"` + + // The name of the Resource Group where the Content Key Policy should exist. Changing this forces a new Content Key Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` +} + +type ExplicitAnalogTelevisionOutputRestrictionInitParameters struct { + + // Indicates whether this restriction is enforced on a best effort basis. Possible values are true or false. Defaults to false. + BestEffortEnforced *bool `json:"bestEffortEnforced,omitempty" tf:"best_effort_enforced,omitempty"` + + // The restriction control bits. Possible value is integer between 0 and 3 inclusive. + ControlBits *float64 `json:"controlBits,omitempty" tf:"control_bits,omitempty"` +} + +type ExplicitAnalogTelevisionOutputRestrictionObservation struct { + + // Indicates whether this restriction is enforced on a best effort basis. Possible values are true or false. Defaults to false. + BestEffortEnforced *bool `json:"bestEffortEnforced,omitempty" tf:"best_effort_enforced,omitempty"` + + // The restriction control bits. Possible value is integer between 0 and 3 inclusive. + ControlBits *float64 `json:"controlBits,omitempty" tf:"control_bits,omitempty"` +} + +type ExplicitAnalogTelevisionOutputRestrictionParameters struct { + + // Indicates whether this restriction is enforced on a best effort basis. Possible values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + BestEffortEnforced *bool `json:"bestEffortEnforced,omitempty" tf:"best_effort_enforced,omitempty"` + + // The restriction control bits. Possible value is integer between 0 and 3 inclusive. + // +kubebuilder:validation:Optional + ControlBits *float64 `json:"controlBits" tf:"control_bits,omitempty"` +} + +type FairplayConfigurationInitParameters struct { + + // A offline_rental_configuration block as defined below. + OfflineRentalConfiguration *OfflineRentalConfigurationInitParameters `json:"offlineRentalConfiguration,omitempty" tf:"offline_rental_configuration,omitempty"` + + // The rental and lease key type. Supported values are DualExpiry, PersistentLimited, PersistentUnlimited or Undefined. + RentalAndLeaseKeyType *string `json:"rentalAndLeaseKeyType,omitempty" tf:"rental_and_lease_key_type,omitempty"` + + // The rental duration. Must be greater than 0. + RentalDurationSeconds *float64 `json:"rentalDurationSeconds,omitempty" tf:"rental_duration_seconds,omitempty"` +} + +type FairplayConfigurationObservation struct { + + // A offline_rental_configuration block as defined below. + OfflineRentalConfiguration *OfflineRentalConfigurationObservation `json:"offlineRentalConfiguration,omitempty" tf:"offline_rental_configuration,omitempty"` + + // The rental and lease key type. Supported values are DualExpiry, PersistentLimited, PersistentUnlimited or Undefined. + RentalAndLeaseKeyType *string `json:"rentalAndLeaseKeyType,omitempty" tf:"rental_and_lease_key_type,omitempty"` + + // The rental duration. Must be greater than 0. + RentalDurationSeconds *float64 `json:"rentalDurationSeconds,omitempty" tf:"rental_duration_seconds,omitempty"` +} + +type FairplayConfigurationParameters struct { + + // The key that must be used as FairPlay Application Secret key. + // +kubebuilder:validation:Optional + AskSecretRef *v1.SecretKeySelector `json:"askSecretRef,omitempty" tf:"-"` + + // A offline_rental_configuration block as defined below. + // +kubebuilder:validation:Optional + OfflineRentalConfiguration *OfflineRentalConfigurationParameters `json:"offlineRentalConfiguration,omitempty" tf:"offline_rental_configuration,omitempty"` + + // The password encrypting FairPlay certificate in PKCS 12 (pfx) format. + // +kubebuilder:validation:Optional + PfxPasswordSecretRef *v1.SecretKeySelector `json:"pfxPasswordSecretRef,omitempty" tf:"-"` + + // The Base64 representation of FairPlay certificate in PKCS 12 (pfx) format (including private key). + // +kubebuilder:validation:Optional + PfxSecretRef *v1.SecretKeySelector `json:"pfxSecretRef,omitempty" tf:"-"` + + // The rental and lease key type. Supported values are DualExpiry, PersistentLimited, PersistentUnlimited or Undefined. + // +kubebuilder:validation:Optional + RentalAndLeaseKeyType *string `json:"rentalAndLeaseKeyType,omitempty" tf:"rental_and_lease_key_type,omitempty"` + + // The rental duration. Must be greater than 0. + // +kubebuilder:validation:Optional + RentalDurationSeconds *float64 `json:"rentalDurationSeconds,omitempty" tf:"rental_duration_seconds,omitempty"` +} + +type OfflineRentalConfigurationInitParameters struct { + + // Playback duration. + PlaybackDurationSeconds *float64 `json:"playbackDurationSeconds,omitempty" tf:"playback_duration_seconds,omitempty"` + + // Storage duration. + StorageDurationSeconds *float64 `json:"storageDurationSeconds,omitempty" tf:"storage_duration_seconds,omitempty"` +} + +type OfflineRentalConfigurationObservation struct { + + // Playback duration. + PlaybackDurationSeconds *float64 `json:"playbackDurationSeconds,omitempty" tf:"playback_duration_seconds,omitempty"` + + // Storage duration. + StorageDurationSeconds *float64 `json:"storageDurationSeconds,omitempty" tf:"storage_duration_seconds,omitempty"` +} + +type OfflineRentalConfigurationParameters struct { + + // Playback duration. + // +kubebuilder:validation:Optional + PlaybackDurationSeconds *float64 `json:"playbackDurationSeconds,omitempty" tf:"playback_duration_seconds,omitempty"` + + // Storage duration. + // +kubebuilder:validation:Optional + StorageDurationSeconds *float64 `json:"storageDurationSeconds,omitempty" tf:"storage_duration_seconds,omitempty"` +} + +type PlayRightInitParameters struct { + + // Configures Automatic Gain Control (AGC) and Color Stripe in the license. Must be between 0 and 3 inclusive. + AgcAndColorStripeRestriction *float64 `json:"agcAndColorStripeRestriction,omitempty" tf:"agc_and_color_stripe_restriction,omitempty"` + + // Configures Unknown output handling settings of the license. Supported values are Allowed, AllowedWithVideoConstriction or NotAllowed. + AllowPassingVideoContentToUnknownOutput *string `json:"allowPassingVideoContentToUnknownOutput,omitempty" tf:"allow_passing_video_content_to_unknown_output,omitempty"` + + // Specifies the output protection level for compressed digital audio. Supported values are 100, 150 or 200. + AnalogVideoOpl *float64 `json:"analogVideoOpl,omitempty" tf:"analog_video_opl,omitempty"` + + // Specifies the output protection level for compressed digital audio.Supported values are 100, 150, 200, 250 or 300. + CompressedDigitalAudioOpl *float64 `json:"compressedDigitalAudioOpl,omitempty" tf:"compressed_digital_audio_opl,omitempty"` + + // Specifies the output protection level for compressed digital video. Supported values are 400 or 500. + CompressedDigitalVideoOpl *float64 `json:"compressedDigitalVideoOpl,omitempty" tf:"compressed_digital_video_opl,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + DigitalVideoOnlyContentRestriction *bool `json:"digitalVideoOnlyContentRestriction,omitempty" tf:"digital_video_only_content_restriction,omitempty"` + + // An explicit_analog_television_output_restriction block as defined above. + ExplicitAnalogTelevisionOutputRestriction *ExplicitAnalogTelevisionOutputRestrictionInitParameters `json:"explicitAnalogTelevisionOutputRestriction,omitempty" tf:"explicit_analog_television_output_restriction,omitempty"` + + // The amount of time that the license is valid after the license is first used to play content. + FirstPlayExpiration *string `json:"firstPlayExpiration,omitempty" tf:"first_play_expiration,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + ImageConstraintForAnalogComponentVideoRestriction *bool `json:"imageConstraintForAnalogComponentVideoRestriction,omitempty" tf:"image_constraint_for_analog_component_video_restriction,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + ImageConstraintForAnalogComputerMonitorRestriction *bool `json:"imageConstraintForAnalogComputerMonitorRestriction,omitempty" tf:"image_constraint_for_analog_computer_monitor_restriction,omitempty"` + + // Configures the Serial Copy Management System (SCMS) in the license. Must be between 0 and 3 inclusive. + ScmsRestriction *float64 `json:"scmsRestriction,omitempty" tf:"scms_restriction,omitempty"` + + // Specifies the output protection level for uncompressed digital audio. Supported values are 100, 150, 200, 250 or 300. + UncompressedDigitalAudioOpl *float64 `json:"uncompressedDigitalAudioOpl,omitempty" tf:"uncompressed_digital_audio_opl,omitempty"` + + // Specifies the output protection level for uncompressed digital video. Supported values are 100, 250, 270 or 300. + UncompressedDigitalVideoOpl *float64 `json:"uncompressedDigitalVideoOpl,omitempty" tf:"uncompressed_digital_video_opl,omitempty"` +} + +type PlayRightObservation struct { + + // Configures Automatic Gain Control (AGC) and Color Stripe in the license. Must be between 0 and 3 inclusive. + AgcAndColorStripeRestriction *float64 `json:"agcAndColorStripeRestriction,omitempty" tf:"agc_and_color_stripe_restriction,omitempty"` + + // Configures Unknown output handling settings of the license. Supported values are Allowed, AllowedWithVideoConstriction or NotAllowed. + AllowPassingVideoContentToUnknownOutput *string `json:"allowPassingVideoContentToUnknownOutput,omitempty" tf:"allow_passing_video_content_to_unknown_output,omitempty"` + + // Specifies the output protection level for compressed digital audio. Supported values are 100, 150 or 200. + AnalogVideoOpl *float64 `json:"analogVideoOpl,omitempty" tf:"analog_video_opl,omitempty"` + + // Specifies the output protection level for compressed digital audio.Supported values are 100, 150, 200, 250 or 300. + CompressedDigitalAudioOpl *float64 `json:"compressedDigitalAudioOpl,omitempty" tf:"compressed_digital_audio_opl,omitempty"` + + // Specifies the output protection level for compressed digital video. Supported values are 400 or 500. + CompressedDigitalVideoOpl *float64 `json:"compressedDigitalVideoOpl,omitempty" tf:"compressed_digital_video_opl,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + DigitalVideoOnlyContentRestriction *bool `json:"digitalVideoOnlyContentRestriction,omitempty" tf:"digital_video_only_content_restriction,omitempty"` + + // An explicit_analog_television_output_restriction block as defined above. + ExplicitAnalogTelevisionOutputRestriction *ExplicitAnalogTelevisionOutputRestrictionObservation `json:"explicitAnalogTelevisionOutputRestriction,omitempty" tf:"explicit_analog_television_output_restriction,omitempty"` + + // The amount of time that the license is valid after the license is first used to play content. + FirstPlayExpiration *string `json:"firstPlayExpiration,omitempty" tf:"first_play_expiration,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + ImageConstraintForAnalogComponentVideoRestriction *bool `json:"imageConstraintForAnalogComponentVideoRestriction,omitempty" tf:"image_constraint_for_analog_component_video_restriction,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + ImageConstraintForAnalogComputerMonitorRestriction *bool `json:"imageConstraintForAnalogComputerMonitorRestriction,omitempty" tf:"image_constraint_for_analog_computer_monitor_restriction,omitempty"` + + // Configures the Serial Copy Management System (SCMS) in the license. Must be between 0 and 3 inclusive. + ScmsRestriction *float64 `json:"scmsRestriction,omitempty" tf:"scms_restriction,omitempty"` + + // Specifies the output protection level for uncompressed digital audio. Supported values are 100, 150, 200, 250 or 300. + UncompressedDigitalAudioOpl *float64 `json:"uncompressedDigitalAudioOpl,omitempty" tf:"uncompressed_digital_audio_opl,omitempty"` + + // Specifies the output protection level for uncompressed digital video. Supported values are 100, 250, 270 or 300. + UncompressedDigitalVideoOpl *float64 `json:"uncompressedDigitalVideoOpl,omitempty" tf:"uncompressed_digital_video_opl,omitempty"` +} + +type PlayRightParameters struct { + + // Configures Automatic Gain Control (AGC) and Color Stripe in the license. Must be between 0 and 3 inclusive. + // +kubebuilder:validation:Optional + AgcAndColorStripeRestriction *float64 `json:"agcAndColorStripeRestriction,omitempty" tf:"agc_and_color_stripe_restriction,omitempty"` + + // Configures Unknown output handling settings of the license. Supported values are Allowed, AllowedWithVideoConstriction or NotAllowed. + // +kubebuilder:validation:Optional + AllowPassingVideoContentToUnknownOutput *string `json:"allowPassingVideoContentToUnknownOutput,omitempty" tf:"allow_passing_video_content_to_unknown_output,omitempty"` + + // Specifies the output protection level for compressed digital audio. Supported values are 100, 150 or 200. + // +kubebuilder:validation:Optional + AnalogVideoOpl *float64 `json:"analogVideoOpl,omitempty" tf:"analog_video_opl,omitempty"` + + // Specifies the output protection level for compressed digital audio.Supported values are 100, 150, 200, 250 or 300. + // +kubebuilder:validation:Optional + CompressedDigitalAudioOpl *float64 `json:"compressedDigitalAudioOpl,omitempty" tf:"compressed_digital_audio_opl,omitempty"` + + // Specifies the output protection level for compressed digital video. Supported values are 400 or 500. + // +kubebuilder:validation:Optional + CompressedDigitalVideoOpl *float64 `json:"compressedDigitalVideoOpl,omitempty" tf:"compressed_digital_video_opl,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + // +kubebuilder:validation:Optional + DigitalVideoOnlyContentRestriction *bool `json:"digitalVideoOnlyContentRestriction,omitempty" tf:"digital_video_only_content_restriction,omitempty"` + + // An explicit_analog_television_output_restriction block as defined above. + // +kubebuilder:validation:Optional + ExplicitAnalogTelevisionOutputRestriction *ExplicitAnalogTelevisionOutputRestrictionParameters `json:"explicitAnalogTelevisionOutputRestriction,omitempty" tf:"explicit_analog_television_output_restriction,omitempty"` + + // The amount of time that the license is valid after the license is first used to play content. + // +kubebuilder:validation:Optional + FirstPlayExpiration *string `json:"firstPlayExpiration,omitempty" tf:"first_play_expiration,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + // +kubebuilder:validation:Optional + ImageConstraintForAnalogComponentVideoRestriction *bool `json:"imageConstraintForAnalogComponentVideoRestriction,omitempty" tf:"image_constraint_for_analog_component_video_restriction,omitempty"` + + // Enables the Image Constraint For Analog Component Video Restriction in the license. + // +kubebuilder:validation:Optional + ImageConstraintForAnalogComputerMonitorRestriction *bool `json:"imageConstraintForAnalogComputerMonitorRestriction,omitempty" tf:"image_constraint_for_analog_computer_monitor_restriction,omitempty"` + + // Configures the Serial Copy Management System (SCMS) in the license. Must be between 0 and 3 inclusive. + // +kubebuilder:validation:Optional + ScmsRestriction *float64 `json:"scmsRestriction,omitempty" tf:"scms_restriction,omitempty"` + + // Specifies the output protection level for uncompressed digital audio. Supported values are 100, 150, 200, 250 or 300. + // +kubebuilder:validation:Optional + UncompressedDigitalAudioOpl *float64 `json:"uncompressedDigitalAudioOpl,omitempty" tf:"uncompressed_digital_audio_opl,omitempty"` + + // Specifies the output protection level for uncompressed digital video. Supported values are 100, 250, 270 or 300. + // +kubebuilder:validation:Optional + UncompressedDigitalVideoOpl *float64 `json:"uncompressedDigitalVideoOpl,omitempty" tf:"uncompressed_digital_video_opl,omitempty"` +} + +type PlayreadyConfigurationLicenseInitParameters struct { + + // A flag indicating whether test devices can use the license. + AllowTestDevices *bool `json:"allowTestDevices,omitempty" tf:"allow_test_devices,omitempty"` + + // The begin date of license. + BeginDate *string `json:"beginDate,omitempty" tf:"begin_date,omitempty"` + + // Specifies that the content key ID is in the PlayReady header. + ContentKeyLocationFromHeaderEnabled *bool `json:"contentKeyLocationFromHeaderEnabled,omitempty" tf:"content_key_location_from_header_enabled,omitempty"` + + // The content key ID. Specifies that the content key ID is specified in the PlayReady configuration. + ContentKeyLocationFromKeyID *string `json:"contentKeyLocationFromKeyId,omitempty" tf:"content_key_location_from_key_id,omitempty"` + + // The PlayReady content type. Supported values are UltraVioletDownload, UltraVioletStreaming or Unspecified. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The expiration date of license. + ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` + + // The license type. Supported values are NonPersistent or Persistent. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // A play_right block as defined above. + PlayRight *PlayRightInitParameters `json:"playRight,omitempty" tf:"play_right,omitempty"` + + // The relative begin date of license. + RelativeBeginDate *string `json:"relativeBeginDate,omitempty" tf:"relative_begin_date,omitempty"` + + // The relative expiration date of license. + RelativeExpirationDate *string `json:"relativeExpirationDate,omitempty" tf:"relative_expiration_date,omitempty"` + + // The security level of the PlayReady license. Possible values are SL150, SL2000 and SL3000. Please see this document for more information about security level. See this document for more information about SL3000 support. + SecurityLevel *string `json:"securityLevel,omitempty" tf:"security_level,omitempty"` +} + +type PlayreadyConfigurationLicenseObservation struct { + + // A flag indicating whether test devices can use the license. + AllowTestDevices *bool `json:"allowTestDevices,omitempty" tf:"allow_test_devices,omitempty"` + + // The begin date of license. + BeginDate *string `json:"beginDate,omitempty" tf:"begin_date,omitempty"` + + // Specifies that the content key ID is in the PlayReady header. + ContentKeyLocationFromHeaderEnabled *bool `json:"contentKeyLocationFromHeaderEnabled,omitempty" tf:"content_key_location_from_header_enabled,omitempty"` + + // The content key ID. Specifies that the content key ID is specified in the PlayReady configuration. + ContentKeyLocationFromKeyID *string `json:"contentKeyLocationFromKeyId,omitempty" tf:"content_key_location_from_key_id,omitempty"` + + // The PlayReady content type. Supported values are UltraVioletDownload, UltraVioletStreaming or Unspecified. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The expiration date of license. + ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` + + // The license type. Supported values are NonPersistent or Persistent. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // A play_right block as defined above. + PlayRight *PlayRightObservation `json:"playRight,omitempty" tf:"play_right,omitempty"` + + // The relative begin date of license. + RelativeBeginDate *string `json:"relativeBeginDate,omitempty" tf:"relative_begin_date,omitempty"` + + // The relative expiration date of license. + RelativeExpirationDate *string `json:"relativeExpirationDate,omitempty" tf:"relative_expiration_date,omitempty"` + + // The security level of the PlayReady license. Possible values are SL150, SL2000 and SL3000. Please see this document for more information about security level. See this document for more information about SL3000 support. + SecurityLevel *string `json:"securityLevel,omitempty" tf:"security_level,omitempty"` +} + +type PlayreadyConfigurationLicenseParameters struct { + + // A flag indicating whether test devices can use the license. + // +kubebuilder:validation:Optional + AllowTestDevices *bool `json:"allowTestDevices,omitempty" tf:"allow_test_devices,omitempty"` + + // The begin date of license. + // +kubebuilder:validation:Optional + BeginDate *string `json:"beginDate,omitempty" tf:"begin_date,omitempty"` + + // Specifies that the content key ID is in the PlayReady header. + // +kubebuilder:validation:Optional + ContentKeyLocationFromHeaderEnabled *bool `json:"contentKeyLocationFromHeaderEnabled,omitempty" tf:"content_key_location_from_header_enabled,omitempty"` + + // The content key ID. Specifies that the content key ID is specified in the PlayReady configuration. + // +kubebuilder:validation:Optional + ContentKeyLocationFromKeyID *string `json:"contentKeyLocationFromKeyId,omitempty" tf:"content_key_location_from_key_id,omitempty"` + + // The PlayReady content type. Supported values are UltraVioletDownload, UltraVioletStreaming or Unspecified. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // The expiration date of license. + // +kubebuilder:validation:Optional + ExpirationDate *string `json:"expirationDate,omitempty" tf:"expiration_date,omitempty"` + + // The grace period of license. + // +kubebuilder:validation:Optional + GracePeriodSecretRef *v1.SecretKeySelector `json:"gracePeriodSecretRef,omitempty" tf:"-"` + + // The license type. Supported values are NonPersistent or Persistent. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // A play_right block as defined above. + // +kubebuilder:validation:Optional + PlayRight *PlayRightParameters `json:"playRight,omitempty" tf:"play_right,omitempty"` + + // The relative begin date of license. + // +kubebuilder:validation:Optional + RelativeBeginDate *string `json:"relativeBeginDate,omitempty" tf:"relative_begin_date,omitempty"` + + // The relative expiration date of license. + // +kubebuilder:validation:Optional + RelativeExpirationDate *string `json:"relativeExpirationDate,omitempty" tf:"relative_expiration_date,omitempty"` + + // The security level of the PlayReady license. Possible values are SL150, SL2000 and SL3000. Please see this document for more information about security level. See this document for more information about SL3000 support. + // +kubebuilder:validation:Optional + SecurityLevel *string `json:"securityLevel,omitempty" tf:"security_level,omitempty"` +} + +type PolicyOptionInitParameters struct { + + // Enable a configuration for non-DRM keys. + ClearKeyConfigurationEnabled *bool `json:"clearKeyConfigurationEnabled,omitempty" tf:"clear_key_configuration_enabled,omitempty"` + + // A fairplay_configuration block as defined above. Check license requirements here https://docs.microsoft.com/azure/media-services/latest/fairplay-license-overview. + FairplayConfiguration *FairplayConfigurationInitParameters `json:"fairplayConfiguration,omitempty" tf:"fairplay_configuration,omitempty"` + + // The name which should be used for this Policy Option. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Enable an open restriction. License or key will be delivered on every request. + OpenRestrictionEnabled *bool `json:"openRestrictionEnabled,omitempty" tf:"open_restriction_enabled,omitempty"` + + // One or more playready_configuration_license blocks as defined above. + PlayreadyConfigurationLicense []PlayreadyConfigurationLicenseInitParameters `json:"playreadyConfigurationLicense,omitempty" tf:"playready_configuration_license,omitempty"` + + // The custom response data of the PlayReady configuration. This only applies when playready_configuration_license is specified. + PlayreadyResponseCustomData *string `json:"playreadyResponseCustomData,omitempty" tf:"playready_response_custom_data,omitempty"` + + // A token_restriction block as defined below. + TokenRestriction *TokenRestrictionInitParameters `json:"tokenRestriction,omitempty" tf:"token_restriction,omitempty"` + + // The Widevine template. + WidevineConfigurationTemplate *string `json:"widevineConfigurationTemplate,omitempty" tf:"widevine_configuration_template,omitempty"` +} + +type PolicyOptionObservation struct { + + // Enable a configuration for non-DRM keys. + ClearKeyConfigurationEnabled *bool `json:"clearKeyConfigurationEnabled,omitempty" tf:"clear_key_configuration_enabled,omitempty"` + + // A fairplay_configuration block as defined above. Check license requirements here https://docs.microsoft.com/azure/media-services/latest/fairplay-license-overview. + FairplayConfiguration *FairplayConfigurationObservation `json:"fairplayConfiguration,omitempty" tf:"fairplay_configuration,omitempty"` + + // The name which should be used for this Policy Option. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Enable an open restriction. License or key will be delivered on every request. + OpenRestrictionEnabled *bool `json:"openRestrictionEnabled,omitempty" tf:"open_restriction_enabled,omitempty"` + + // One or more playready_configuration_license blocks as defined above. + PlayreadyConfigurationLicense []PlayreadyConfigurationLicenseObservation `json:"playreadyConfigurationLicense,omitempty" tf:"playready_configuration_license,omitempty"` + + // The custom response data of the PlayReady configuration. This only applies when playready_configuration_license is specified. + PlayreadyResponseCustomData *string `json:"playreadyResponseCustomData,omitempty" tf:"playready_response_custom_data,omitempty"` + + // A token_restriction block as defined below. + TokenRestriction *TokenRestrictionObservation `json:"tokenRestriction,omitempty" tf:"token_restriction,omitempty"` + + // The Widevine template. + WidevineConfigurationTemplate *string `json:"widevineConfigurationTemplate,omitempty" tf:"widevine_configuration_template,omitempty"` +} + +type PolicyOptionParameters struct { + + // Enable a configuration for non-DRM keys. + // +kubebuilder:validation:Optional + ClearKeyConfigurationEnabled *bool `json:"clearKeyConfigurationEnabled,omitempty" tf:"clear_key_configuration_enabled,omitempty"` + + // A fairplay_configuration block as defined above. Check license requirements here https://docs.microsoft.com/azure/media-services/latest/fairplay-license-overview. + // +kubebuilder:validation:Optional + FairplayConfiguration *FairplayConfigurationParameters `json:"fairplayConfiguration,omitempty" tf:"fairplay_configuration,omitempty"` + + // The name which should be used for this Policy Option. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Enable an open restriction. License or key will be delivered on every request. + // +kubebuilder:validation:Optional + OpenRestrictionEnabled *bool `json:"openRestrictionEnabled,omitempty" tf:"open_restriction_enabled,omitempty"` + + // One or more playready_configuration_license blocks as defined above. + // +kubebuilder:validation:Optional + PlayreadyConfigurationLicense []PlayreadyConfigurationLicenseParameters `json:"playreadyConfigurationLicense,omitempty" tf:"playready_configuration_license,omitempty"` + + // The custom response data of the PlayReady configuration. This only applies when playready_configuration_license is specified. + // +kubebuilder:validation:Optional + PlayreadyResponseCustomData *string `json:"playreadyResponseCustomData,omitempty" tf:"playready_response_custom_data,omitempty"` + + // A token_restriction block as defined below. + // +kubebuilder:validation:Optional + TokenRestriction *TokenRestrictionParameters `json:"tokenRestriction,omitempty" tf:"token_restriction,omitempty"` + + // The Widevine template. + // +kubebuilder:validation:Optional + WidevineConfigurationTemplate *string `json:"widevineConfigurationTemplate,omitempty" tf:"widevine_configuration_template,omitempty"` +} + +type RequiredClaimInitParameters struct { + + // Token claim type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Token claim value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequiredClaimObservation struct { + + // Token claim type. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Token claim value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequiredClaimParameters struct { + + // Token claim type. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // Token claim value. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TokenRestrictionInitParameters struct { + + // One or more alternate_key block as defined above. + AlternateKey []AlternateKeyInitParameters `json:"alternateKey,omitempty" tf:"alternate_key,omitempty"` + + // The audience for the token. + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The token issuer. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The OpenID connect discovery document. + OpenIDConnectDiscoveryDocument *string `json:"openIdConnectDiscoveryDocument,omitempty" tf:"open_id_connect_discovery_document,omitempty"` + + // One or more required_claim blocks as defined above. + RequiredClaim []RequiredClaimInitParameters `json:"requiredClaim,omitempty" tf:"required_claim,omitempty"` + + // The type of token. Supported values are Jwt or Swt. + TokenType *string `json:"tokenType,omitempty" tf:"token_type,omitempty"` +} + +type TokenRestrictionObservation struct { + + // One or more alternate_key block as defined above. + AlternateKey []AlternateKeyParameters `json:"alternateKey,omitempty" tf:"alternate_key,omitempty"` + + // The audience for the token. + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The token issuer. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The OpenID connect discovery document. + OpenIDConnectDiscoveryDocument *string `json:"openIdConnectDiscoveryDocument,omitempty" tf:"open_id_connect_discovery_document,omitempty"` + + // One or more required_claim blocks as defined above. + RequiredClaim []RequiredClaimObservation `json:"requiredClaim,omitempty" tf:"required_claim,omitempty"` + + // The type of token. Supported values are Jwt or Swt. + TokenType *string `json:"tokenType,omitempty" tf:"token_type,omitempty"` +} + +type TokenRestrictionParameters struct { + + // One or more alternate_key block as defined above. + // +kubebuilder:validation:Optional + AlternateKey []AlternateKeyParameters `json:"alternateKey,omitempty" tf:"alternate_key,omitempty"` + + // The audience for the token. + // +kubebuilder:validation:Optional + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The token issuer. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The OpenID connect discovery document. + // +kubebuilder:validation:Optional + OpenIDConnectDiscoveryDocument *string `json:"openIdConnectDiscoveryDocument,omitempty" tf:"open_id_connect_discovery_document,omitempty"` + + // The RSA parameter exponent. + // +kubebuilder:validation:Optional + PrimaryRsaTokenKeyExponentSecretRef *v1.SecretKeySelector `json:"primaryRsaTokenKeyExponentSecretRef,omitempty" tf:"-"` + + // The RSA parameter modulus. + // +kubebuilder:validation:Optional + PrimaryRsaTokenKeyModulusSecretRef *v1.SecretKeySelector `json:"primaryRsaTokenKeyModulusSecretRef,omitempty" tf:"-"` + + // The key value of the key. Specifies a symmetric key for token validation. + // +kubebuilder:validation:Optional + PrimarySymmetricTokenKeySecretRef *v1.SecretKeySelector `json:"primarySymmetricTokenKeySecretRef,omitempty" tf:"-"` + + // The raw data field of a certificate in PKCS 12 format (X509Certificate2 in .NET). Specifies a certificate for token validation. + // +kubebuilder:validation:Optional + PrimaryX509TokenKeyRawSecretRef *v1.SecretKeySelector `json:"primaryX509TokenKeyRawSecretRef,omitempty" tf:"-"` + + // One or more required_claim blocks as defined above. + // +kubebuilder:validation:Optional + RequiredClaim []RequiredClaimParameters `json:"requiredClaim,omitempty" tf:"required_claim,omitempty"` + + // The type of token. Supported values are Jwt or Swt. + // +kubebuilder:validation:Optional + TokenType *string `json:"tokenType,omitempty" tf:"token_type,omitempty"` +} + +// ContentKeyPolicySpec defines the desired state of ContentKeyPolicy +type ContentKeyPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ContentKeyPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ContentKeyPolicyInitParameters `json:"initProvider,omitempty"` +} + +// ContentKeyPolicyStatus defines the observed state of ContentKeyPolicy. +type ContentKeyPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ContentKeyPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ContentKeyPolicy is the Schema for the ContentKeyPolicys API. Manages a Content Key Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ContentKeyPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyOption) || (has(self.initProvider) && has(self.initProvider.policyOption))",message="spec.forProvider.policyOption is a required parameter" + Spec ContentKeyPolicySpec `json:"spec"` + Status ContentKeyPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ContentKeyPolicyList contains a list of ContentKeyPolicys +type ContentKeyPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ContentKeyPolicy `json:"items"` +} + +// Repository type metadata. +var ( + ContentKeyPolicy_Kind = "ContentKeyPolicy" + ContentKeyPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ContentKeyPolicy_Kind}.String() + ContentKeyPolicy_KindAPIVersion = ContentKeyPolicy_Kind + "." + CRDGroupVersion.String() + ContentKeyPolicy_GroupVersionKind = CRDGroupVersion.WithKind(ContentKeyPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&ContentKeyPolicy{}, &ContentKeyPolicyList{}) +} diff --git a/apis/media/v1beta2/zz_generated.conversion_hubs.go b/apis/media/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..72b11c454 --- /dev/null +++ b/apis/media/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *AssetFilter) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ContentKeyPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Job) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LiveEvent) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServicesAccount) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ServicesAccountFilter) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StreamingEndpoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StreamingPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Transform) Hub() {} diff --git a/apis/media/v1beta2/zz_generated.deepcopy.go b/apis/media/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..90327453c --- /dev/null +++ b/apis/media/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,12425 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AacAudioInitParameters) DeepCopyInto(out *AacAudioInitParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(float64) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AacAudioInitParameters. +func (in *AacAudioInitParameters) DeepCopy() *AacAudioInitParameters { + if in == nil { + return nil + } + out := new(AacAudioInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AacAudioObservation) DeepCopyInto(out *AacAudioObservation) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(float64) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AacAudioObservation. +func (in *AacAudioObservation) DeepCopy() *AacAudioObservation { + if in == nil { + return nil + } + out := new(AacAudioObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AacAudioParameters) DeepCopyInto(out *AacAudioParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(float64) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AacAudioParameters. +func (in *AacAudioParameters) DeepCopy() *AacAudioParameters { + if in == nil { + return nil + } + out := new(AacAudioParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlInitParameters) DeepCopyInto(out *AccessControlInitParameters) { + *out = *in + if in.AkamaiSignatureHeaderAuthenticationKey != nil { + in, out := &in.AkamaiSignatureHeaderAuthenticationKey, &out.AkamaiSignatureHeaderAuthenticationKey + *out = make([]AkamaiSignatureHeaderAuthenticationKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAllow != nil { + in, out := &in.IPAllow, &out.IPAllow + *out = make([]IPAllowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlInitParameters. +func (in *AccessControlInitParameters) DeepCopy() *AccessControlInitParameters { + if in == nil { + return nil + } + out := new(AccessControlInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlObservation) DeepCopyInto(out *AccessControlObservation) { + *out = *in + if in.AkamaiSignatureHeaderAuthenticationKey != nil { + in, out := &in.AkamaiSignatureHeaderAuthenticationKey, &out.AkamaiSignatureHeaderAuthenticationKey + *out = make([]AkamaiSignatureHeaderAuthenticationKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAllow != nil { + in, out := &in.IPAllow, &out.IPAllow + *out = make([]IPAllowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlObservation. +func (in *AccessControlObservation) DeepCopy() *AccessControlObservation { + if in == nil { + return nil + } + out := new(AccessControlObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessControlParameters) DeepCopyInto(out *AccessControlParameters) { + *out = *in + if in.AkamaiSignatureHeaderAuthenticationKey != nil { + in, out := &in.AkamaiSignatureHeaderAuthenticationKey, &out.AkamaiSignatureHeaderAuthenticationKey + *out = make([]AkamaiSignatureHeaderAuthenticationKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAllow != nil { + in, out := &in.IPAllow, &out.IPAllow + *out = make([]IPAllowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessControlParameters. +func (in *AccessControlParameters) DeepCopy() *AccessControlParameters { + if in == nil { + return nil + } + out := new(AccessControlParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AkamaiSignatureHeaderAuthenticationKeyInitParameters) DeepCopyInto(out *AkamaiSignatureHeaderAuthenticationKeyInitParameters) { + *out = *in + if in.Base64Key != nil { + in, out := &in.Base64Key, &out.Base64Key + *out = new(string) + **out = **in + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AkamaiSignatureHeaderAuthenticationKeyInitParameters. +func (in *AkamaiSignatureHeaderAuthenticationKeyInitParameters) DeepCopy() *AkamaiSignatureHeaderAuthenticationKeyInitParameters { + if in == nil { + return nil + } + out := new(AkamaiSignatureHeaderAuthenticationKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AkamaiSignatureHeaderAuthenticationKeyObservation) DeepCopyInto(out *AkamaiSignatureHeaderAuthenticationKeyObservation) { + *out = *in + if in.Base64Key != nil { + in, out := &in.Base64Key, &out.Base64Key + *out = new(string) + **out = **in + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AkamaiSignatureHeaderAuthenticationKeyObservation. +func (in *AkamaiSignatureHeaderAuthenticationKeyObservation) DeepCopy() *AkamaiSignatureHeaderAuthenticationKeyObservation { + if in == nil { + return nil + } + out := new(AkamaiSignatureHeaderAuthenticationKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AkamaiSignatureHeaderAuthenticationKeyParameters) DeepCopyInto(out *AkamaiSignatureHeaderAuthenticationKeyParameters) { + *out = *in + if in.Base64Key != nil { + in, out := &in.Base64Key, &out.Base64Key + *out = new(string) + **out = **in + } + if in.Expiration != nil { + in, out := &in.Expiration, &out.Expiration + *out = new(string) + **out = **in + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AkamaiSignatureHeaderAuthenticationKeyParameters. +func (in *AkamaiSignatureHeaderAuthenticationKeyParameters) DeepCopy() *AkamaiSignatureHeaderAuthenticationKeyParameters { + if in == nil { + return nil + } + out := new(AkamaiSignatureHeaderAuthenticationKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlternateKeyInitParameters) DeepCopyInto(out *AlternateKeyInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlternateKeyInitParameters. +func (in *AlternateKeyInitParameters) DeepCopy() *AlternateKeyInitParameters { + if in == nil { + return nil + } + out := new(AlternateKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlternateKeyObservation) DeepCopyInto(out *AlternateKeyObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlternateKeyObservation. +func (in *AlternateKeyObservation) DeepCopy() *AlternateKeyObservation { + if in == nil { + return nil + } + out := new(AlternateKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlternateKeyParameters) DeepCopyInto(out *AlternateKeyParameters) { + *out = *in + if in.RsaTokenKeyExponentSecretRef != nil { + in, out := &in.RsaTokenKeyExponentSecretRef, &out.RsaTokenKeyExponentSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RsaTokenKeyModulusSecretRef != nil { + in, out := &in.RsaTokenKeyModulusSecretRef, &out.RsaTokenKeyModulusSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SymmetricTokenKeySecretRef != nil { + in, out := &in.SymmetricTokenKeySecretRef, &out.SymmetricTokenKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.X509TokenKeyRawSecretRef != nil { + in, out := &in.X509TokenKeyRawSecretRef, &out.X509TokenKeyRawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlternateKeyParameters. +func (in *AlternateKeyParameters) DeepCopy() *AlternateKeyParameters { + if in == nil { + return nil + } + out := new(AlternateKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssetFilter) DeepCopyInto(out *AssetFilter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssetFilter. +func (in *AssetFilter) DeepCopy() *AssetFilter { + if in == nil { + return nil + } + out := new(AssetFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AssetFilter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssetFilterInitParameters) DeepCopyInto(out *AssetFilterInitParameters) { + *out = *in + if in.FirstQualityBitrate != nil { + in, out := &in.FirstQualityBitrate, &out.FirstQualityBitrate + *out = new(float64) + **out = **in + } + if in.PresentationTimeRange != nil { + in, out := &in.PresentationTimeRange, &out.PresentationTimeRange + *out = new(PresentationTimeRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TrackSelection != nil { + in, out := &in.TrackSelection, &out.TrackSelection + *out = make([]TrackSelectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssetFilterInitParameters. +func (in *AssetFilterInitParameters) DeepCopy() *AssetFilterInitParameters { + if in == nil { + return nil + } + out := new(AssetFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssetFilterList) DeepCopyInto(out *AssetFilterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AssetFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssetFilterList. +func (in *AssetFilterList) DeepCopy() *AssetFilterList { + if in == nil { + return nil + } + out := new(AssetFilterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AssetFilterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssetFilterObservation) DeepCopyInto(out *AssetFilterObservation) { + *out = *in + if in.AssetID != nil { + in, out := &in.AssetID, &out.AssetID + *out = new(string) + **out = **in + } + if in.FirstQualityBitrate != nil { + in, out := &in.FirstQualityBitrate, &out.FirstQualityBitrate + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PresentationTimeRange != nil { + in, out := &in.PresentationTimeRange, &out.PresentationTimeRange + *out = new(PresentationTimeRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.TrackSelection != nil { + in, out := &in.TrackSelection, &out.TrackSelection + *out = make([]TrackSelectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssetFilterObservation. +func (in *AssetFilterObservation) DeepCopy() *AssetFilterObservation { + if in == nil { + return nil + } + out := new(AssetFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssetFilterParameters) DeepCopyInto(out *AssetFilterParameters) { + *out = *in + if in.AssetID != nil { + in, out := &in.AssetID, &out.AssetID + *out = new(string) + **out = **in + } + if in.AssetIDRef != nil { + in, out := &in.AssetIDRef, &out.AssetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AssetIDSelector != nil { + in, out := &in.AssetIDSelector, &out.AssetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FirstQualityBitrate != nil { + in, out := &in.FirstQualityBitrate, &out.FirstQualityBitrate + *out = new(float64) + **out = **in + } + if in.PresentationTimeRange != nil { + in, out := &in.PresentationTimeRange, &out.PresentationTimeRange + *out = new(PresentationTimeRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.TrackSelection != nil { + in, out := &in.TrackSelection, &out.TrackSelection + *out = make([]TrackSelectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssetFilterParameters. +func (in *AssetFilterParameters) DeepCopy() *AssetFilterParameters { + if in == nil { + return nil + } + out := new(AssetFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssetFilterSpec) DeepCopyInto(out *AssetFilterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssetFilterSpec. +func (in *AssetFilterSpec) DeepCopy() *AssetFilterSpec { + if in == nil { + return nil + } + out := new(AssetFilterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AssetFilterStatus) DeepCopyInto(out *AssetFilterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssetFilterStatus. +func (in *AssetFilterStatus) DeepCopy() *AssetFilterStatus { + if in == nil { + return nil + } + out := new(AssetFilterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioAnalyzerPresetInitParameters) DeepCopyInto(out *AudioAnalyzerPresetInitParameters) { + *out = *in + if in.AudioAnalysisMode != nil { + in, out := &in.AudioAnalysisMode, &out.AudioAnalysisMode + *out = new(string) + **out = **in + } + if in.AudioLanguage != nil { + in, out := &in.AudioLanguage, &out.AudioLanguage + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioAnalyzerPresetInitParameters. +func (in *AudioAnalyzerPresetInitParameters) DeepCopy() *AudioAnalyzerPresetInitParameters { + if in == nil { + return nil + } + out := new(AudioAnalyzerPresetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioAnalyzerPresetObservation) DeepCopyInto(out *AudioAnalyzerPresetObservation) { + *out = *in + if in.AudioAnalysisMode != nil { + in, out := &in.AudioAnalysisMode, &out.AudioAnalysisMode + *out = new(string) + **out = **in + } + if in.AudioLanguage != nil { + in, out := &in.AudioLanguage, &out.AudioLanguage + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioAnalyzerPresetObservation. +func (in *AudioAnalyzerPresetObservation) DeepCopy() *AudioAnalyzerPresetObservation { + if in == nil { + return nil + } + out := new(AudioAnalyzerPresetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioAnalyzerPresetParameters) DeepCopyInto(out *AudioAnalyzerPresetParameters) { + *out = *in + if in.AudioAnalysisMode != nil { + in, out := &in.AudioAnalysisMode, &out.AudioAnalysisMode + *out = new(string) + **out = **in + } + if in.AudioLanguage != nil { + in, out := &in.AudioLanguage, &out.AudioLanguage + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioAnalyzerPresetParameters. +func (in *AudioAnalyzerPresetParameters) DeepCopy() *AudioAnalyzerPresetParameters { + if in == nil { + return nil + } + out := new(AudioAnalyzerPresetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioInitParameters) DeepCopyInto(out *AudioInitParameters) { + *out = *in + if in.AudioGainLevel != nil { + in, out := &in.AudioGainLevel, &out.AudioGainLevel + *out = new(float64) + **out = **in + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.FadeInDuration != nil { + in, out := &in.FadeInDuration, &out.FadeInDuration + *out = new(string) + **out = **in + } + if in.FadeOutDuration != nil { + in, out := &in.FadeOutDuration, &out.FadeOutDuration + *out = new(string) + **out = **in + } + if in.InputLabel != nil { + in, out := &in.InputLabel, &out.InputLabel + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioInitParameters. +func (in *AudioInitParameters) DeepCopy() *AudioInitParameters { + if in == nil { + return nil + } + out := new(AudioInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioObservation) DeepCopyInto(out *AudioObservation) { + *out = *in + if in.AudioGainLevel != nil { + in, out := &in.AudioGainLevel, &out.AudioGainLevel + *out = new(float64) + **out = **in + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.FadeInDuration != nil { + in, out := &in.FadeInDuration, &out.FadeInDuration + *out = new(string) + **out = **in + } + if in.FadeOutDuration != nil { + in, out := &in.FadeOutDuration, &out.FadeOutDuration + *out = new(string) + **out = **in + } + if in.InputLabel != nil { + in, out := &in.InputLabel, &out.InputLabel + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioObservation. +func (in *AudioObservation) DeepCopy() *AudioObservation { + if in == nil { + return nil + } + out := new(AudioObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AudioParameters) DeepCopyInto(out *AudioParameters) { + *out = *in + if in.AudioGainLevel != nil { + in, out := &in.AudioGainLevel, &out.AudioGainLevel + *out = new(float64) + **out = **in + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.FadeInDuration != nil { + in, out := &in.FadeInDuration, &out.FadeInDuration + *out = new(string) + **out = **in + } + if in.FadeOutDuration != nil { + in, out := &in.FadeOutDuration, &out.FadeOutDuration + *out = new(string) + **out = **in + } + if in.InputLabel != nil { + in, out := &in.InputLabel, &out.InputLabel + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AudioParameters. +func (in *AudioParameters) DeepCopy() *AudioParameters { + if in == nil { + return nil + } + out := new(AudioParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuiltinPresetInitParameters) DeepCopyInto(out *BuiltinPresetInitParameters) { + *out = *in + if in.PresetConfiguration != nil { + in, out := &in.PresetConfiguration, &out.PresetConfiguration + *out = new(PresetConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PresetName != nil { + in, out := &in.PresetName, &out.PresetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuiltinPresetInitParameters. +func (in *BuiltinPresetInitParameters) DeepCopy() *BuiltinPresetInitParameters { + if in == nil { + return nil + } + out := new(BuiltinPresetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuiltinPresetObservation) DeepCopyInto(out *BuiltinPresetObservation) { + *out = *in + if in.PresetConfiguration != nil { + in, out := &in.PresetConfiguration, &out.PresetConfiguration + *out = new(PresetConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.PresetName != nil { + in, out := &in.PresetName, &out.PresetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuiltinPresetObservation. +func (in *BuiltinPresetObservation) DeepCopy() *BuiltinPresetObservation { + if in == nil { + return nil + } + out := new(BuiltinPresetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuiltinPresetParameters) DeepCopyInto(out *BuiltinPresetParameters) { + *out = *in + if in.PresetConfiguration != nil { + in, out := &in.PresetConfiguration, &out.PresetConfiguration + *out = new(PresetConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.PresetName != nil { + in, out := &in.PresetName, &out.PresetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuiltinPresetParameters. +func (in *BuiltinPresetParameters) DeepCopy() *BuiltinPresetParameters { + if in == nil { + return nil + } + out := new(BuiltinPresetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearKeyEncryptionInitParameters) DeepCopyInto(out *ClearKeyEncryptionInitParameters) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearKeyEncryptionInitParameters. +func (in *ClearKeyEncryptionInitParameters) DeepCopy() *ClearKeyEncryptionInitParameters { + if in == nil { + return nil + } + out := new(ClearKeyEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearKeyEncryptionObservation) DeepCopyInto(out *ClearKeyEncryptionObservation) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearKeyEncryptionObservation. +func (in *ClearKeyEncryptionObservation) DeepCopy() *ClearKeyEncryptionObservation { + if in == nil { + return nil + } + out := new(ClearKeyEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearKeyEncryptionParameters) DeepCopyInto(out *ClearKeyEncryptionParameters) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearKeyEncryptionParameters. +func (in *ClearKeyEncryptionParameters) DeepCopy() *ClearKeyEncryptionParameters { + if in == nil { + return nil + } + out := new(ClearKeyEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearTrackConditionInitParameters) DeepCopyInto(out *ClearTrackConditionInitParameters) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearTrackConditionInitParameters. +func (in *ClearTrackConditionInitParameters) DeepCopy() *ClearTrackConditionInitParameters { + if in == nil { + return nil + } + out := new(ClearTrackConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearTrackConditionObservation) DeepCopyInto(out *ClearTrackConditionObservation) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearTrackConditionObservation. +func (in *ClearTrackConditionObservation) DeepCopy() *ClearTrackConditionObservation { + if in == nil { + return nil + } + out := new(ClearTrackConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearTrackConditionParameters) DeepCopyInto(out *ClearTrackConditionParameters) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearTrackConditionParameters. +func (in *ClearTrackConditionParameters) DeepCopy() *ClearTrackConditionParameters { + if in == nil { + return nil + } + out := new(ClearTrackConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearTrackInitParameters) DeepCopyInto(out *ClearTrackInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ClearTrackConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearTrackInitParameters. +func (in *ClearTrackInitParameters) DeepCopy() *ClearTrackInitParameters { + if in == nil { + return nil + } + out := new(ClearTrackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearTrackObservation) DeepCopyInto(out *ClearTrackObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ClearTrackConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearTrackObservation. +func (in *ClearTrackObservation) DeepCopy() *ClearTrackObservation { + if in == nil { + return nil + } + out := new(ClearTrackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClearTrackParameters) DeepCopyInto(out *ClearTrackParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ClearTrackConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClearTrackParameters. +func (in *ClearTrackParameters) DeepCopy() *ClearTrackParameters { + if in == nil { + return nil + } + out := new(ClearTrackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodecInitParameters) DeepCopyInto(out *CodecInitParameters) { + *out = *in + if in.AacAudio != nil { + in, out := &in.AacAudio, &out.AacAudio + *out = new(AacAudioInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CopyAudio != nil { + in, out := &in.CopyAudio, &out.CopyAudio + *out = new(CopyAudioInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CopyVideo != nil { + in, out := &in.CopyVideo, &out.CopyVideo + *out = new(CopyVideoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DdAudio != nil { + in, out := &in.DdAudio, &out.DdAudio + *out = new(DdAudioInitParameters) + (*in).DeepCopyInto(*out) + } + if in.H264Video != nil { + in, out := &in.H264Video, &out.H264Video + *out = new(H264VideoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.H265Video != nil { + in, out := &in.H265Video, &out.H265Video + *out = new(H265VideoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JpgImage != nil { + in, out := &in.JpgImage, &out.JpgImage + *out = new(JpgImageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PngImage != nil { + in, out := &in.PngImage, &out.PngImage + *out = new(PngImageInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecInitParameters. +func (in *CodecInitParameters) DeepCopy() *CodecInitParameters { + if in == nil { + return nil + } + out := new(CodecInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodecObservation) DeepCopyInto(out *CodecObservation) { + *out = *in + if in.AacAudio != nil { + in, out := &in.AacAudio, &out.AacAudio + *out = new(AacAudioObservation) + (*in).DeepCopyInto(*out) + } + if in.CopyAudio != nil { + in, out := &in.CopyAudio, &out.CopyAudio + *out = new(CopyAudioObservation) + (*in).DeepCopyInto(*out) + } + if in.CopyVideo != nil { + in, out := &in.CopyVideo, &out.CopyVideo + *out = new(CopyVideoObservation) + (*in).DeepCopyInto(*out) + } + if in.DdAudio != nil { + in, out := &in.DdAudio, &out.DdAudio + *out = new(DdAudioObservation) + (*in).DeepCopyInto(*out) + } + if in.H264Video != nil { + in, out := &in.H264Video, &out.H264Video + *out = new(H264VideoObservation) + (*in).DeepCopyInto(*out) + } + if in.H265Video != nil { + in, out := &in.H265Video, &out.H265Video + *out = new(H265VideoObservation) + (*in).DeepCopyInto(*out) + } + if in.JpgImage != nil { + in, out := &in.JpgImage, &out.JpgImage + *out = new(JpgImageObservation) + (*in).DeepCopyInto(*out) + } + if in.PngImage != nil { + in, out := &in.PngImage, &out.PngImage + *out = new(PngImageObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecObservation. +func (in *CodecObservation) DeepCopy() *CodecObservation { + if in == nil { + return nil + } + out := new(CodecObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodecParameters) DeepCopyInto(out *CodecParameters) { + *out = *in + if in.AacAudio != nil { + in, out := &in.AacAudio, &out.AacAudio + *out = new(AacAudioParameters) + (*in).DeepCopyInto(*out) + } + if in.CopyAudio != nil { + in, out := &in.CopyAudio, &out.CopyAudio + *out = new(CopyAudioParameters) + (*in).DeepCopyInto(*out) + } + if in.CopyVideo != nil { + in, out := &in.CopyVideo, &out.CopyVideo + *out = new(CopyVideoParameters) + (*in).DeepCopyInto(*out) + } + if in.DdAudio != nil { + in, out := &in.DdAudio, &out.DdAudio + *out = new(DdAudioParameters) + (*in).DeepCopyInto(*out) + } + if in.H264Video != nil { + in, out := &in.H264Video, &out.H264Video + *out = new(H264VideoParameters) + (*in).DeepCopyInto(*out) + } + if in.H265Video != nil { + in, out := &in.H265Video, &out.H265Video + *out = new(H265VideoParameters) + (*in).DeepCopyInto(*out) + } + if in.JpgImage != nil { + in, out := &in.JpgImage, &out.JpgImage + *out = new(JpgImageParameters) + (*in).DeepCopyInto(*out) + } + if in.PngImage != nil { + in, out := &in.PngImage, &out.PngImage + *out = new(PngImageParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecParameters. +func (in *CodecParameters) DeepCopy() *CodecParameters { + if in == nil { + return nil + } + out := new(CodecParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCbcsInitParameters) DeepCopyInto(out *CommonEncryptionCbcsInitParameters) { + *out = *in + if in.ClearKeyEncryption != nil { + in, out := &in.ClearKeyEncryption, &out.ClearKeyEncryption + *out = new(ClearKeyEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(DefaultContentKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DrmFairplay != nil { + in, out := &in.DrmFairplay, &out.DrmFairplay + *out = new(DrmFairplayInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(EnabledProtocolsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCbcsInitParameters. +func (in *CommonEncryptionCbcsInitParameters) DeepCopy() *CommonEncryptionCbcsInitParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCbcsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCbcsObservation) DeepCopyInto(out *CommonEncryptionCbcsObservation) { + *out = *in + if in.ClearKeyEncryption != nil { + in, out := &in.ClearKeyEncryption, &out.ClearKeyEncryption + *out = new(ClearKeyEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(DefaultContentKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.DrmFairplay != nil { + in, out := &in.DrmFairplay, &out.DrmFairplay + *out = new(DrmFairplayObservation) + (*in).DeepCopyInto(*out) + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(EnabledProtocolsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCbcsObservation. +func (in *CommonEncryptionCbcsObservation) DeepCopy() *CommonEncryptionCbcsObservation { + if in == nil { + return nil + } + out := new(CommonEncryptionCbcsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCbcsParameters) DeepCopyInto(out *CommonEncryptionCbcsParameters) { + *out = *in + if in.ClearKeyEncryption != nil { + in, out := &in.ClearKeyEncryption, &out.ClearKeyEncryption + *out = new(ClearKeyEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(DefaultContentKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.DrmFairplay != nil { + in, out := &in.DrmFairplay, &out.DrmFairplay + *out = new(DrmFairplayParameters) + (*in).DeepCopyInto(*out) + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(EnabledProtocolsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCbcsParameters. +func (in *CommonEncryptionCbcsParameters) DeepCopy() *CommonEncryptionCbcsParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCbcsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencClearKeyEncryptionInitParameters) DeepCopyInto(out *CommonEncryptionCencClearKeyEncryptionInitParameters) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencClearKeyEncryptionInitParameters. +func (in *CommonEncryptionCencClearKeyEncryptionInitParameters) DeepCopy() *CommonEncryptionCencClearKeyEncryptionInitParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCencClearKeyEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencClearKeyEncryptionObservation) DeepCopyInto(out *CommonEncryptionCencClearKeyEncryptionObservation) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencClearKeyEncryptionObservation. +func (in *CommonEncryptionCencClearKeyEncryptionObservation) DeepCopy() *CommonEncryptionCencClearKeyEncryptionObservation { + if in == nil { + return nil + } + out := new(CommonEncryptionCencClearKeyEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencClearKeyEncryptionParameters) DeepCopyInto(out *CommonEncryptionCencClearKeyEncryptionParameters) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencClearKeyEncryptionParameters. +func (in *CommonEncryptionCencClearKeyEncryptionParameters) DeepCopy() *CommonEncryptionCencClearKeyEncryptionParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCencClearKeyEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencDefaultContentKeyInitParameters) DeepCopyInto(out *CommonEncryptionCencDefaultContentKeyInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PolicyNameRef != nil { + in, out := &in.PolicyNameRef, &out.PolicyNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyNameSelector != nil { + in, out := &in.PolicyNameSelector, &out.PolicyNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencDefaultContentKeyInitParameters. +func (in *CommonEncryptionCencDefaultContentKeyInitParameters) DeepCopy() *CommonEncryptionCencDefaultContentKeyInitParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCencDefaultContentKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencDefaultContentKeyObservation) DeepCopyInto(out *CommonEncryptionCencDefaultContentKeyObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencDefaultContentKeyObservation. +func (in *CommonEncryptionCencDefaultContentKeyObservation) DeepCopy() *CommonEncryptionCencDefaultContentKeyObservation { + if in == nil { + return nil + } + out := new(CommonEncryptionCencDefaultContentKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencDefaultContentKeyParameters) DeepCopyInto(out *CommonEncryptionCencDefaultContentKeyParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PolicyNameRef != nil { + in, out := &in.PolicyNameRef, &out.PolicyNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PolicyNameSelector != nil { + in, out := &in.PolicyNameSelector, &out.PolicyNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencDefaultContentKeyParameters. +func (in *CommonEncryptionCencDefaultContentKeyParameters) DeepCopy() *CommonEncryptionCencDefaultContentKeyParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCencDefaultContentKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencEnabledProtocolsInitParameters) DeepCopyInto(out *CommonEncryptionCencEnabledProtocolsInitParameters) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencEnabledProtocolsInitParameters. +func (in *CommonEncryptionCencEnabledProtocolsInitParameters) DeepCopy() *CommonEncryptionCencEnabledProtocolsInitParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCencEnabledProtocolsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencEnabledProtocolsObservation) DeepCopyInto(out *CommonEncryptionCencEnabledProtocolsObservation) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencEnabledProtocolsObservation. +func (in *CommonEncryptionCencEnabledProtocolsObservation) DeepCopy() *CommonEncryptionCencEnabledProtocolsObservation { + if in == nil { + return nil + } + out := new(CommonEncryptionCencEnabledProtocolsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencEnabledProtocolsParameters) DeepCopyInto(out *CommonEncryptionCencEnabledProtocolsParameters) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencEnabledProtocolsParameters. +func (in *CommonEncryptionCencEnabledProtocolsParameters) DeepCopy() *CommonEncryptionCencEnabledProtocolsParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCencEnabledProtocolsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencInitParameters) DeepCopyInto(out *CommonEncryptionCencInitParameters) { + *out = *in + if in.ClearKeyEncryption != nil { + in, out := &in.ClearKeyEncryption, &out.ClearKeyEncryption + *out = new(CommonEncryptionCencClearKeyEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClearTrack != nil { + in, out := &in.ClearTrack, &out.ClearTrack + *out = make([]ClearTrackInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentKeyToTrackMapping != nil { + in, out := &in.ContentKeyToTrackMapping, &out.ContentKeyToTrackMapping + *out = make([]ContentKeyToTrackMappingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(CommonEncryptionCencDefaultContentKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DrmPlayready != nil { + in, out := &in.DrmPlayready, &out.DrmPlayready + *out = new(DrmPlayreadyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DrmWidevineCustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.DrmWidevineCustomLicenseAcquisitionURLTemplate, &out.DrmWidevineCustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(CommonEncryptionCencEnabledProtocolsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencInitParameters. +func (in *CommonEncryptionCencInitParameters) DeepCopy() *CommonEncryptionCencInitParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCencInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencObservation) DeepCopyInto(out *CommonEncryptionCencObservation) { + *out = *in + if in.ClearKeyEncryption != nil { + in, out := &in.ClearKeyEncryption, &out.ClearKeyEncryption + *out = new(CommonEncryptionCencClearKeyEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.ClearTrack != nil { + in, out := &in.ClearTrack, &out.ClearTrack + *out = make([]ClearTrackObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentKeyToTrackMapping != nil { + in, out := &in.ContentKeyToTrackMapping, &out.ContentKeyToTrackMapping + *out = make([]ContentKeyToTrackMappingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(CommonEncryptionCencDefaultContentKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.DrmPlayready != nil { + in, out := &in.DrmPlayready, &out.DrmPlayready + *out = new(DrmPlayreadyObservation) + (*in).DeepCopyInto(*out) + } + if in.DrmWidevineCustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.DrmWidevineCustomLicenseAcquisitionURLTemplate, &out.DrmWidevineCustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(CommonEncryptionCencEnabledProtocolsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencObservation. +func (in *CommonEncryptionCencObservation) DeepCopy() *CommonEncryptionCencObservation { + if in == nil { + return nil + } + out := new(CommonEncryptionCencObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonEncryptionCencParameters) DeepCopyInto(out *CommonEncryptionCencParameters) { + *out = *in + if in.ClearKeyEncryption != nil { + in, out := &in.ClearKeyEncryption, &out.ClearKeyEncryption + *out = new(CommonEncryptionCencClearKeyEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.ClearTrack != nil { + in, out := &in.ClearTrack, &out.ClearTrack + *out = make([]ClearTrackParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentKeyToTrackMapping != nil { + in, out := &in.ContentKeyToTrackMapping, &out.ContentKeyToTrackMapping + *out = make([]ContentKeyToTrackMappingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(CommonEncryptionCencDefaultContentKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.DrmPlayready != nil { + in, out := &in.DrmPlayready, &out.DrmPlayready + *out = new(DrmPlayreadyParameters) + (*in).DeepCopyInto(*out) + } + if in.DrmWidevineCustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.DrmWidevineCustomLicenseAcquisitionURLTemplate, &out.DrmWidevineCustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(CommonEncryptionCencEnabledProtocolsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonEncryptionCencParameters. +func (in *CommonEncryptionCencParameters) DeepCopy() *CommonEncryptionCencParameters { + if in == nil { + return nil + } + out := new(CommonEncryptionCencParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyPolicy) DeepCopyInto(out *ContentKeyPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyPolicy. +func (in *ContentKeyPolicy) DeepCopy() *ContentKeyPolicy { + if in == nil { + return nil + } + out := new(ContentKeyPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContentKeyPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyPolicyInitParameters) DeepCopyInto(out *ContentKeyPolicyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.PolicyOption != nil { + in, out := &in.PolicyOption, &out.PolicyOption + *out = make([]PolicyOptionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyPolicyInitParameters. +func (in *ContentKeyPolicyInitParameters) DeepCopy() *ContentKeyPolicyInitParameters { + if in == nil { + return nil + } + out := new(ContentKeyPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyPolicyList) DeepCopyInto(out *ContentKeyPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ContentKeyPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyPolicyList. +func (in *ContentKeyPolicyList) DeepCopy() *ContentKeyPolicyList { + if in == nil { + return nil + } + out := new(ContentKeyPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ContentKeyPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyPolicyObservation) DeepCopyInto(out *ContentKeyPolicyObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.PolicyOption != nil { + in, out := &in.PolicyOption, &out.PolicyOption + *out = make([]PolicyOptionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyPolicyObservation. +func (in *ContentKeyPolicyObservation) DeepCopy() *ContentKeyPolicyObservation { + if in == nil { + return nil + } + out := new(ContentKeyPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyPolicyParameters) DeepCopyInto(out *ContentKeyPolicyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.MediaServicesAccountNameRef != nil { + in, out := &in.MediaServicesAccountNameRef, &out.MediaServicesAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountNameSelector != nil { + in, out := &in.MediaServicesAccountNameSelector, &out.MediaServicesAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyOption != nil { + in, out := &in.PolicyOption, &out.PolicyOption + *out = make([]PolicyOptionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyPolicyParameters. +func (in *ContentKeyPolicyParameters) DeepCopy() *ContentKeyPolicyParameters { + if in == nil { + return nil + } + out := new(ContentKeyPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyPolicySpec) DeepCopyInto(out *ContentKeyPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyPolicySpec. +func (in *ContentKeyPolicySpec) DeepCopy() *ContentKeyPolicySpec { + if in == nil { + return nil + } + out := new(ContentKeyPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyPolicyStatus) DeepCopyInto(out *ContentKeyPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyPolicyStatus. +func (in *ContentKeyPolicyStatus) DeepCopy() *ContentKeyPolicyStatus { + if in == nil { + return nil + } + out := new(ContentKeyPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyToTrackMappingInitParameters) DeepCopyInto(out *ContentKeyToTrackMappingInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.Track != nil { + in, out := &in.Track, &out.Track + *out = make([]TrackInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyToTrackMappingInitParameters. +func (in *ContentKeyToTrackMappingInitParameters) DeepCopy() *ContentKeyToTrackMappingInitParameters { + if in == nil { + return nil + } + out := new(ContentKeyToTrackMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyToTrackMappingObservation) DeepCopyInto(out *ContentKeyToTrackMappingObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.Track != nil { + in, out := &in.Track, &out.Track + *out = make([]TrackObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyToTrackMappingObservation. +func (in *ContentKeyToTrackMappingObservation) DeepCopy() *ContentKeyToTrackMappingObservation { + if in == nil { + return nil + } + out := new(ContentKeyToTrackMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContentKeyToTrackMappingParameters) DeepCopyInto(out *ContentKeyToTrackMappingParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.Track != nil { + in, out := &in.Track, &out.Track + *out = make([]TrackParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContentKeyToTrackMappingParameters. +func (in *ContentKeyToTrackMappingParameters) DeepCopy() *ContentKeyToTrackMappingParameters { + if in == nil { + return nil + } + out := new(ContentKeyToTrackMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyAudioInitParameters) DeepCopyInto(out *CopyAudioInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyAudioInitParameters. +func (in *CopyAudioInitParameters) DeepCopy() *CopyAudioInitParameters { + if in == nil { + return nil + } + out := new(CopyAudioInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyAudioObservation) DeepCopyInto(out *CopyAudioObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyAudioObservation. +func (in *CopyAudioObservation) DeepCopy() *CopyAudioObservation { + if in == nil { + return nil + } + out := new(CopyAudioObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyAudioParameters) DeepCopyInto(out *CopyAudioParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyAudioParameters. +func (in *CopyAudioParameters) DeepCopy() *CopyAudioParameters { + if in == nil { + return nil + } + out := new(CopyAudioParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyVideoInitParameters) DeepCopyInto(out *CopyVideoInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyVideoInitParameters. +func (in *CopyVideoInitParameters) DeepCopy() *CopyVideoInitParameters { + if in == nil { + return nil + } + out := new(CopyVideoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyVideoObservation) DeepCopyInto(out *CopyVideoObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyVideoObservation. +func (in *CopyVideoObservation) DeepCopy() *CopyVideoObservation { + if in == nil { + return nil + } + out := new(CopyVideoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CopyVideoParameters) DeepCopyInto(out *CopyVideoParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyVideoParameters. +func (in *CopyVideoParameters) DeepCopy() *CopyVideoParameters { + if in == nil { + return nil + } + out := new(CopyVideoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CropRectangleInitParameters) DeepCopyInto(out *CropRectangleInitParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CropRectangleInitParameters. +func (in *CropRectangleInitParameters) DeepCopy() *CropRectangleInitParameters { + if in == nil { + return nil + } + out := new(CropRectangleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CropRectangleObservation) DeepCopyInto(out *CropRectangleObservation) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CropRectangleObservation. +func (in *CropRectangleObservation) DeepCopy() *CropRectangleObservation { + if in == nil { + return nil + } + out := new(CropRectangleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CropRectangleParameters) DeepCopyInto(out *CropRectangleParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CropRectangleParameters. +func (in *CropRectangleParameters) DeepCopy() *CropRectangleParameters { + if in == nil { + return nil + } + out := new(CropRectangleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossSiteAccessPolicyInitParameters) DeepCopyInto(out *CrossSiteAccessPolicyInitParameters) { + *out = *in + if in.ClientAccessPolicy != nil { + in, out := &in.ClientAccessPolicy, &out.ClientAccessPolicy + *out = new(string) + **out = **in + } + if in.CrossDomainPolicy != nil { + in, out := &in.CrossDomainPolicy, &out.CrossDomainPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossSiteAccessPolicyInitParameters. +func (in *CrossSiteAccessPolicyInitParameters) DeepCopy() *CrossSiteAccessPolicyInitParameters { + if in == nil { + return nil + } + out := new(CrossSiteAccessPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossSiteAccessPolicyObservation) DeepCopyInto(out *CrossSiteAccessPolicyObservation) { + *out = *in + if in.ClientAccessPolicy != nil { + in, out := &in.ClientAccessPolicy, &out.ClientAccessPolicy + *out = new(string) + **out = **in + } + if in.CrossDomainPolicy != nil { + in, out := &in.CrossDomainPolicy, &out.CrossDomainPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossSiteAccessPolicyObservation. +func (in *CrossSiteAccessPolicyObservation) DeepCopy() *CrossSiteAccessPolicyObservation { + if in == nil { + return nil + } + out := new(CrossSiteAccessPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossSiteAccessPolicyParameters) DeepCopyInto(out *CrossSiteAccessPolicyParameters) { + *out = *in + if in.ClientAccessPolicy != nil { + in, out := &in.ClientAccessPolicy, &out.ClientAccessPolicy + *out = new(string) + **out = **in + } + if in.CrossDomainPolicy != nil { + in, out := &in.CrossDomainPolicy, &out.CrossDomainPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossSiteAccessPolicyParameters. +func (in *CrossSiteAccessPolicyParameters) DeepCopy() *CrossSiteAccessPolicyParameters { + if in == nil { + return nil + } + out := new(CrossSiteAccessPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPresetInitParameters) DeepCopyInto(out *CustomPresetInitParameters) { + *out = *in + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = make([]CodecInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = make([]FormatInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPresetInitParameters. +func (in *CustomPresetInitParameters) DeepCopy() *CustomPresetInitParameters { + if in == nil { + return nil + } + out := new(CustomPresetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPresetObservation) DeepCopyInto(out *CustomPresetObservation) { + *out = *in + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = make([]CodecObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterObservation) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = make([]FormatObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPresetObservation. +func (in *CustomPresetObservation) DeepCopy() *CustomPresetObservation { + if in == nil { + return nil + } + out := new(CustomPresetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomPresetParameters) DeepCopyInto(out *CustomPresetParameters) { + *out = *in + if in.Codec != nil { + in, out := &in.Codec, &out.Codec + *out = make([]CodecParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterParameters) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = make([]FormatParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomPresetParameters. +func (in *CustomPresetParameters) DeepCopy() *CustomPresetParameters { + if in == nil { + return nil + } + out := new(CustomPresetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DdAudioInitParameters) DeepCopyInto(out *DdAudioInitParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(float64) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DdAudioInitParameters. +func (in *DdAudioInitParameters) DeepCopy() *DdAudioInitParameters { + if in == nil { + return nil + } + out := new(DdAudioInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DdAudioObservation) DeepCopyInto(out *DdAudioObservation) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(float64) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DdAudioObservation. +func (in *DdAudioObservation) DeepCopy() *DdAudioObservation { + if in == nil { + return nil + } + out := new(DdAudioObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DdAudioParameters) DeepCopyInto(out *DdAudioParameters) { + *out = *in + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = new(float64) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.SamplingRate != nil { + in, out := &in.SamplingRate, &out.SamplingRate + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DdAudioParameters. +func (in *DdAudioParameters) DeepCopy() *DdAudioParameters { + if in == nil { + return nil + } + out := new(DdAudioParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultContentKeyInitParameters) DeepCopyInto(out *DefaultContentKeyInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultContentKeyInitParameters. +func (in *DefaultContentKeyInitParameters) DeepCopy() *DefaultContentKeyInitParameters { + if in == nil { + return nil + } + out := new(DefaultContentKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultContentKeyObservation) DeepCopyInto(out *DefaultContentKeyObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultContentKeyObservation. +func (in *DefaultContentKeyObservation) DeepCopy() *DefaultContentKeyObservation { + if in == nil { + return nil + } + out := new(DefaultContentKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultContentKeyParameters) DeepCopyInto(out *DefaultContentKeyParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultContentKeyParameters. +func (in *DefaultContentKeyParameters) DeepCopy() *DefaultContentKeyParameters { + if in == nil { + return nil + } + out := new(DefaultContentKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeinterlaceInitParameters) DeepCopyInto(out *DeinterlaceInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Parity != nil { + in, out := &in.Parity, &out.Parity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeinterlaceInitParameters. +func (in *DeinterlaceInitParameters) DeepCopy() *DeinterlaceInitParameters { + if in == nil { + return nil + } + out := new(DeinterlaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeinterlaceObservation) DeepCopyInto(out *DeinterlaceObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Parity != nil { + in, out := &in.Parity, &out.Parity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeinterlaceObservation. +func (in *DeinterlaceObservation) DeepCopy() *DeinterlaceObservation { + if in == nil { + return nil + } + out := new(DeinterlaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeinterlaceParameters) DeepCopyInto(out *DeinterlaceParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Parity != nil { + in, out := &in.Parity, &out.Parity + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeinterlaceParameters. +func (in *DeinterlaceParameters) DeepCopy() *DeinterlaceParameters { + if in == nil { + return nil + } + out := new(DeinterlaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DrmFairplayInitParameters) DeepCopyInto(out *DrmFairplayInitParameters) { + *out = *in + if in.AllowPersistentLicense != nil { + in, out := &in.AllowPersistentLicense, &out.AllowPersistentLicense + *out = new(bool) + **out = **in + } + if in.CustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.CustomLicenseAcquisitionURLTemplate, &out.CustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DrmFairplayInitParameters. +func (in *DrmFairplayInitParameters) DeepCopy() *DrmFairplayInitParameters { + if in == nil { + return nil + } + out := new(DrmFairplayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DrmFairplayObservation) DeepCopyInto(out *DrmFairplayObservation) { + *out = *in + if in.AllowPersistentLicense != nil { + in, out := &in.AllowPersistentLicense, &out.AllowPersistentLicense + *out = new(bool) + **out = **in + } + if in.CustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.CustomLicenseAcquisitionURLTemplate, &out.CustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DrmFairplayObservation. +func (in *DrmFairplayObservation) DeepCopy() *DrmFairplayObservation { + if in == nil { + return nil + } + out := new(DrmFairplayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DrmFairplayParameters) DeepCopyInto(out *DrmFairplayParameters) { + *out = *in + if in.AllowPersistentLicense != nil { + in, out := &in.AllowPersistentLicense, &out.AllowPersistentLicense + *out = new(bool) + **out = **in + } + if in.CustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.CustomLicenseAcquisitionURLTemplate, &out.CustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DrmFairplayParameters. +func (in *DrmFairplayParameters) DeepCopy() *DrmFairplayParameters { + if in == nil { + return nil + } + out := new(DrmFairplayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DrmPlayreadyInitParameters) DeepCopyInto(out *DrmPlayreadyInitParameters) { + *out = *in + if in.CustomAttributes != nil { + in, out := &in.CustomAttributes, &out.CustomAttributes + *out = new(string) + **out = **in + } + if in.CustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.CustomLicenseAcquisitionURLTemplate, &out.CustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DrmPlayreadyInitParameters. +func (in *DrmPlayreadyInitParameters) DeepCopy() *DrmPlayreadyInitParameters { + if in == nil { + return nil + } + out := new(DrmPlayreadyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DrmPlayreadyObservation) DeepCopyInto(out *DrmPlayreadyObservation) { + *out = *in + if in.CustomAttributes != nil { + in, out := &in.CustomAttributes, &out.CustomAttributes + *out = new(string) + **out = **in + } + if in.CustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.CustomLicenseAcquisitionURLTemplate, &out.CustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DrmPlayreadyObservation. +func (in *DrmPlayreadyObservation) DeepCopy() *DrmPlayreadyObservation { + if in == nil { + return nil + } + out := new(DrmPlayreadyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DrmPlayreadyParameters) DeepCopyInto(out *DrmPlayreadyParameters) { + *out = *in + if in.CustomAttributes != nil { + in, out := &in.CustomAttributes, &out.CustomAttributes + *out = new(string) + **out = **in + } + if in.CustomLicenseAcquisitionURLTemplate != nil { + in, out := &in.CustomLicenseAcquisitionURLTemplate, &out.CustomLicenseAcquisitionURLTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DrmPlayreadyParameters. +func (in *DrmPlayreadyParameters) DeepCopy() *DrmPlayreadyParameters { + if in == nil { + return nil + } + out := new(DrmPlayreadyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledProtocolsInitParameters) DeepCopyInto(out *EnabledProtocolsInitParameters) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledProtocolsInitParameters. +func (in *EnabledProtocolsInitParameters) DeepCopy() *EnabledProtocolsInitParameters { + if in == nil { + return nil + } + out := new(EnabledProtocolsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledProtocolsObservation) DeepCopyInto(out *EnabledProtocolsObservation) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledProtocolsObservation. +func (in *EnabledProtocolsObservation) DeepCopy() *EnabledProtocolsObservation { + if in == nil { + return nil + } + out := new(EnabledProtocolsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnabledProtocolsParameters) DeepCopyInto(out *EnabledProtocolsParameters) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnabledProtocolsParameters. +func (in *EnabledProtocolsParameters) DeepCopy() *EnabledProtocolsParameters { + if in == nil { + return nil + } + out := new(EnabledProtocolsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncodingInitParameters) DeepCopyInto(out *EncodingInitParameters) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.PresetName != nil { + in, out := &in.PresetName, &out.PresetName + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncodingInitParameters. +func (in *EncodingInitParameters) DeepCopy() *EncodingInitParameters { + if in == nil { + return nil + } + out := new(EncodingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncodingObservation) DeepCopyInto(out *EncodingObservation) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.PresetName != nil { + in, out := &in.PresetName, &out.PresetName + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncodingObservation. +func (in *EncodingObservation) DeepCopy() *EncodingObservation { + if in == nil { + return nil + } + out := new(EncodingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncodingParameters) DeepCopyInto(out *EncodingParameters) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.PresetName != nil { + in, out := &in.PresetName, &out.PresetName + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncodingParameters. +func (in *EncodingParameters) DeepCopy() *EncodingParameters { + if in == nil { + return nil + } + out := new(EncodingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.KeyVaultKeyIdentifier != nil { + in, out := &in.KeyVaultKeyIdentifier, &out.KeyVaultKeyIdentifier + *out = new(string) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(ManagedIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.CurrentKeyIdentifier != nil { + in, out := &in.CurrentKeyIdentifier, &out.CurrentKeyIdentifier + *out = new(string) + **out = **in + } + if in.KeyVaultKeyIdentifier != nil { + in, out := &in.KeyVaultKeyIdentifier, &out.KeyVaultKeyIdentifier + *out = new(string) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(ManagedIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.KeyVaultKeyIdentifier != nil { + in, out := &in.KeyVaultKeyIdentifier, &out.KeyVaultKeyIdentifier + *out = new(string) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(ManagedIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionDefaultContentKeyInitParameters) DeepCopyInto(out *EnvelopeEncryptionDefaultContentKeyInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionDefaultContentKeyInitParameters. +func (in *EnvelopeEncryptionDefaultContentKeyInitParameters) DeepCopy() *EnvelopeEncryptionDefaultContentKeyInitParameters { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionDefaultContentKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionDefaultContentKeyObservation) DeepCopyInto(out *EnvelopeEncryptionDefaultContentKeyObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionDefaultContentKeyObservation. +func (in *EnvelopeEncryptionDefaultContentKeyObservation) DeepCopy() *EnvelopeEncryptionDefaultContentKeyObservation { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionDefaultContentKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionDefaultContentKeyParameters) DeepCopyInto(out *EnvelopeEncryptionDefaultContentKeyParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionDefaultContentKeyParameters. +func (in *EnvelopeEncryptionDefaultContentKeyParameters) DeepCopy() *EnvelopeEncryptionDefaultContentKeyParameters { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionDefaultContentKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionEnabledProtocolsInitParameters) DeepCopyInto(out *EnvelopeEncryptionEnabledProtocolsInitParameters) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionEnabledProtocolsInitParameters. +func (in *EnvelopeEncryptionEnabledProtocolsInitParameters) DeepCopy() *EnvelopeEncryptionEnabledProtocolsInitParameters { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionEnabledProtocolsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionEnabledProtocolsObservation) DeepCopyInto(out *EnvelopeEncryptionEnabledProtocolsObservation) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionEnabledProtocolsObservation. +func (in *EnvelopeEncryptionEnabledProtocolsObservation) DeepCopy() *EnvelopeEncryptionEnabledProtocolsObservation { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionEnabledProtocolsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionEnabledProtocolsParameters) DeepCopyInto(out *EnvelopeEncryptionEnabledProtocolsParameters) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionEnabledProtocolsParameters. +func (in *EnvelopeEncryptionEnabledProtocolsParameters) DeepCopy() *EnvelopeEncryptionEnabledProtocolsParameters { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionEnabledProtocolsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionInitParameters) DeepCopyInto(out *EnvelopeEncryptionInitParameters) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(EnvelopeEncryptionDefaultContentKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(EnvelopeEncryptionEnabledProtocolsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionInitParameters. +func (in *EnvelopeEncryptionInitParameters) DeepCopy() *EnvelopeEncryptionInitParameters { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionObservation) DeepCopyInto(out *EnvelopeEncryptionObservation) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(EnvelopeEncryptionDefaultContentKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(EnvelopeEncryptionEnabledProtocolsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionObservation. +func (in *EnvelopeEncryptionObservation) DeepCopy() *EnvelopeEncryptionObservation { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvelopeEncryptionParameters) DeepCopyInto(out *EnvelopeEncryptionParameters) { + *out = *in + if in.CustomKeysAcquisitionURLTemplate != nil { + in, out := &in.CustomKeysAcquisitionURLTemplate, &out.CustomKeysAcquisitionURLTemplate + *out = new(string) + **out = **in + } + if in.DefaultContentKey != nil { + in, out := &in.DefaultContentKey, &out.DefaultContentKey + *out = new(EnvelopeEncryptionDefaultContentKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.EnabledProtocols != nil { + in, out := &in.EnabledProtocols, &out.EnabledProtocols + *out = new(EnvelopeEncryptionEnabledProtocolsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvelopeEncryptionParameters. +func (in *EnvelopeEncryptionParameters) DeepCopy() *EnvelopeEncryptionParameters { + if in == nil { + return nil + } + out := new(EnvelopeEncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitAnalogTelevisionOutputRestrictionInitParameters) DeepCopyInto(out *ExplicitAnalogTelevisionOutputRestrictionInitParameters) { + *out = *in + if in.BestEffortEnforced != nil { + in, out := &in.BestEffortEnforced, &out.BestEffortEnforced + *out = new(bool) + **out = **in + } + if in.ControlBits != nil { + in, out := &in.ControlBits, &out.ControlBits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitAnalogTelevisionOutputRestrictionInitParameters. +func (in *ExplicitAnalogTelevisionOutputRestrictionInitParameters) DeepCopy() *ExplicitAnalogTelevisionOutputRestrictionInitParameters { + if in == nil { + return nil + } + out := new(ExplicitAnalogTelevisionOutputRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitAnalogTelevisionOutputRestrictionObservation) DeepCopyInto(out *ExplicitAnalogTelevisionOutputRestrictionObservation) { + *out = *in + if in.BestEffortEnforced != nil { + in, out := &in.BestEffortEnforced, &out.BestEffortEnforced + *out = new(bool) + **out = **in + } + if in.ControlBits != nil { + in, out := &in.ControlBits, &out.ControlBits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitAnalogTelevisionOutputRestrictionObservation. +func (in *ExplicitAnalogTelevisionOutputRestrictionObservation) DeepCopy() *ExplicitAnalogTelevisionOutputRestrictionObservation { + if in == nil { + return nil + } + out := new(ExplicitAnalogTelevisionOutputRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitAnalogTelevisionOutputRestrictionParameters) DeepCopyInto(out *ExplicitAnalogTelevisionOutputRestrictionParameters) { + *out = *in + if in.BestEffortEnforced != nil { + in, out := &in.BestEffortEnforced, &out.BestEffortEnforced + *out = new(bool) + **out = **in + } + if in.ControlBits != nil { + in, out := &in.ControlBits, &out.ControlBits + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitAnalogTelevisionOutputRestrictionParameters. +func (in *ExplicitAnalogTelevisionOutputRestrictionParameters) DeepCopy() *ExplicitAnalogTelevisionOutputRestrictionParameters { + if in == nil { + return nil + } + out := new(ExplicitAnalogTelevisionOutputRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FaceDetectorPresetInitParameters) DeepCopyInto(out *FaceDetectorPresetInitParameters) { + *out = *in + if in.AnalysisResolution != nil { + in, out := &in.AnalysisResolution, &out.AnalysisResolution + *out = new(string) + **out = **in + } + if in.BlurType != nil { + in, out := &in.BlurType, &out.BlurType + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.FaceRedactorMode != nil { + in, out := &in.FaceRedactorMode, &out.FaceRedactorMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FaceDetectorPresetInitParameters. +func (in *FaceDetectorPresetInitParameters) DeepCopy() *FaceDetectorPresetInitParameters { + if in == nil { + return nil + } + out := new(FaceDetectorPresetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FaceDetectorPresetObservation) DeepCopyInto(out *FaceDetectorPresetObservation) { + *out = *in + if in.AnalysisResolution != nil { + in, out := &in.AnalysisResolution, &out.AnalysisResolution + *out = new(string) + **out = **in + } + if in.BlurType != nil { + in, out := &in.BlurType, &out.BlurType + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.FaceRedactorMode != nil { + in, out := &in.FaceRedactorMode, &out.FaceRedactorMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FaceDetectorPresetObservation. +func (in *FaceDetectorPresetObservation) DeepCopy() *FaceDetectorPresetObservation { + if in == nil { + return nil + } + out := new(FaceDetectorPresetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FaceDetectorPresetParameters) DeepCopyInto(out *FaceDetectorPresetParameters) { + *out = *in + if in.AnalysisResolution != nil { + in, out := &in.AnalysisResolution, &out.AnalysisResolution + *out = new(string) + **out = **in + } + if in.BlurType != nil { + in, out := &in.BlurType, &out.BlurType + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.FaceRedactorMode != nil { + in, out := &in.FaceRedactorMode, &out.FaceRedactorMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FaceDetectorPresetParameters. +func (in *FaceDetectorPresetParameters) DeepCopy() *FaceDetectorPresetParameters { + if in == nil { + return nil + } + out := new(FaceDetectorPresetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FadeInInitParameters) DeepCopyInto(out *FadeInInitParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.FadeColor != nil { + in, out := &in.FadeColor, &out.FadeColor + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FadeInInitParameters. +func (in *FadeInInitParameters) DeepCopy() *FadeInInitParameters { + if in == nil { + return nil + } + out := new(FadeInInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FadeInObservation) DeepCopyInto(out *FadeInObservation) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.FadeColor != nil { + in, out := &in.FadeColor, &out.FadeColor + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FadeInObservation. +func (in *FadeInObservation) DeepCopy() *FadeInObservation { + if in == nil { + return nil + } + out := new(FadeInObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FadeInParameters) DeepCopyInto(out *FadeInParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.FadeColor != nil { + in, out := &in.FadeColor, &out.FadeColor + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FadeInParameters. +func (in *FadeInParameters) DeepCopy() *FadeInParameters { + if in == nil { + return nil + } + out := new(FadeInParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FadeOutInitParameters) DeepCopyInto(out *FadeOutInitParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.FadeColor != nil { + in, out := &in.FadeColor, &out.FadeColor + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FadeOutInitParameters. +func (in *FadeOutInitParameters) DeepCopy() *FadeOutInitParameters { + if in == nil { + return nil + } + out := new(FadeOutInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FadeOutObservation) DeepCopyInto(out *FadeOutObservation) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.FadeColor != nil { + in, out := &in.FadeColor, &out.FadeColor + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FadeOutObservation. +func (in *FadeOutObservation) DeepCopy() *FadeOutObservation { + if in == nil { + return nil + } + out := new(FadeOutObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FadeOutParameters) DeepCopyInto(out *FadeOutParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.FadeColor != nil { + in, out := &in.FadeColor, &out.FadeColor + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FadeOutParameters. +func (in *FadeOutParameters) DeepCopy() *FadeOutParameters { + if in == nil { + return nil + } + out := new(FadeOutParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FairplayConfigurationInitParameters) DeepCopyInto(out *FairplayConfigurationInitParameters) { + *out = *in + if in.OfflineRentalConfiguration != nil { + in, out := &in.OfflineRentalConfiguration, &out.OfflineRentalConfiguration + *out = new(OfflineRentalConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RentalAndLeaseKeyType != nil { + in, out := &in.RentalAndLeaseKeyType, &out.RentalAndLeaseKeyType + *out = new(string) + **out = **in + } + if in.RentalDurationSeconds != nil { + in, out := &in.RentalDurationSeconds, &out.RentalDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FairplayConfigurationInitParameters. +func (in *FairplayConfigurationInitParameters) DeepCopy() *FairplayConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FairplayConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FairplayConfigurationObservation) DeepCopyInto(out *FairplayConfigurationObservation) { + *out = *in + if in.OfflineRentalConfiguration != nil { + in, out := &in.OfflineRentalConfiguration, &out.OfflineRentalConfiguration + *out = new(OfflineRentalConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.RentalAndLeaseKeyType != nil { + in, out := &in.RentalAndLeaseKeyType, &out.RentalAndLeaseKeyType + *out = new(string) + **out = **in + } + if in.RentalDurationSeconds != nil { + in, out := &in.RentalDurationSeconds, &out.RentalDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FairplayConfigurationObservation. +func (in *FairplayConfigurationObservation) DeepCopy() *FairplayConfigurationObservation { + if in == nil { + return nil + } + out := new(FairplayConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FairplayConfigurationParameters) DeepCopyInto(out *FairplayConfigurationParameters) { + *out = *in + if in.AskSecretRef != nil { + in, out := &in.AskSecretRef, &out.AskSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.OfflineRentalConfiguration != nil { + in, out := &in.OfflineRentalConfiguration, &out.OfflineRentalConfiguration + *out = new(OfflineRentalConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.PfxPasswordSecretRef != nil { + in, out := &in.PfxPasswordSecretRef, &out.PfxPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PfxSecretRef != nil { + in, out := &in.PfxSecretRef, &out.PfxSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RentalAndLeaseKeyType != nil { + in, out := &in.RentalAndLeaseKeyType, &out.RentalAndLeaseKeyType + *out = new(string) + **out = **in + } + if in.RentalDurationSeconds != nil { + in, out := &in.RentalDurationSeconds, &out.RentalDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FairplayConfigurationParameters. +func (in *FairplayConfigurationParameters) DeepCopy() *FairplayConfigurationParameters { + if in == nil { + return nil + } + out := new(FairplayConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.CropRectangle != nil { + in, out := &in.CropRectangle, &out.CropRectangle + *out = new(CropRectangleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Deinterlace != nil { + in, out := &in.Deinterlace, &out.Deinterlace + *out = new(DeinterlaceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FadeIn != nil { + in, out := &in.FadeIn, &out.FadeIn + *out = new(FadeInInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FadeOut != nil { + in, out := &in.FadeOut, &out.FadeOut + *out = new(FadeOutInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Overlay != nil { + in, out := &in.Overlay, &out.Overlay + *out = make([]OverlayInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Rotation != nil { + in, out := &in.Rotation, &out.Rotation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.CropRectangle != nil { + in, out := &in.CropRectangle, &out.CropRectangle + *out = new(CropRectangleObservation) + (*in).DeepCopyInto(*out) + } + if in.Deinterlace != nil { + in, out := &in.Deinterlace, &out.Deinterlace + *out = new(DeinterlaceObservation) + (*in).DeepCopyInto(*out) + } + if in.FadeIn != nil { + in, out := &in.FadeIn, &out.FadeIn + *out = new(FadeInObservation) + (*in).DeepCopyInto(*out) + } + if in.FadeOut != nil { + in, out := &in.FadeOut, &out.FadeOut + *out = new(FadeOutObservation) + (*in).DeepCopyInto(*out) + } + if in.Overlay != nil { + in, out := &in.Overlay, &out.Overlay + *out = make([]OverlayObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Rotation != nil { + in, out := &in.Rotation, &out.Rotation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.CropRectangle != nil { + in, out := &in.CropRectangle, &out.CropRectangle + *out = new(CropRectangleParameters) + (*in).DeepCopyInto(*out) + } + if in.Deinterlace != nil { + in, out := &in.Deinterlace, &out.Deinterlace + *out = new(DeinterlaceParameters) + (*in).DeepCopyInto(*out) + } + if in.FadeIn != nil { + in, out := &in.FadeIn, &out.FadeIn + *out = new(FadeInParameters) + (*in).DeepCopyInto(*out) + } + if in.FadeOut != nil { + in, out := &in.FadeOut, &out.FadeOut + *out = new(FadeOutParameters) + (*in).DeepCopyInto(*out) + } + if in.Overlay != nil { + in, out := &in.Overlay, &out.Overlay + *out = make([]OverlayParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Rotation != nil { + in, out := &in.Rotation, &out.Rotation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatInitParameters) DeepCopyInto(out *FormatInitParameters) { + *out = *in + if in.Jpg != nil { + in, out := &in.Jpg, &out.Jpg + *out = new(JpgInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Mp4 != nil { + in, out := &in.Mp4, &out.Mp4 + *out = new(Mp4InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Png != nil { + in, out := &in.Png, &out.Png + *out = new(PngInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TransportStream != nil { + in, out := &in.TransportStream, &out.TransportStream + *out = new(TransportStreamInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatInitParameters. +func (in *FormatInitParameters) DeepCopy() *FormatInitParameters { + if in == nil { + return nil + } + out := new(FormatInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatObservation) DeepCopyInto(out *FormatObservation) { + *out = *in + if in.Jpg != nil { + in, out := &in.Jpg, &out.Jpg + *out = new(JpgObservation) + (*in).DeepCopyInto(*out) + } + if in.Mp4 != nil { + in, out := &in.Mp4, &out.Mp4 + *out = new(Mp4Observation) + (*in).DeepCopyInto(*out) + } + if in.Png != nil { + in, out := &in.Png, &out.Png + *out = new(PngObservation) + (*in).DeepCopyInto(*out) + } + if in.TransportStream != nil { + in, out := &in.TransportStream, &out.TransportStream + *out = new(TransportStreamObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatObservation. +func (in *FormatObservation) DeepCopy() *FormatObservation { + if in == nil { + return nil + } + out := new(FormatObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FormatParameters) DeepCopyInto(out *FormatParameters) { + *out = *in + if in.Jpg != nil { + in, out := &in.Jpg, &out.Jpg + *out = new(JpgParameters) + (*in).DeepCopyInto(*out) + } + if in.Mp4 != nil { + in, out := &in.Mp4, &out.Mp4 + *out = new(Mp4Parameters) + (*in).DeepCopyInto(*out) + } + if in.Png != nil { + in, out := &in.Png, &out.Png + *out = new(PngParameters) + (*in).DeepCopyInto(*out) + } + if in.TransportStream != nil { + in, out := &in.TransportStream, &out.TransportStream + *out = new(TransportStreamParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatParameters. +func (in *FormatParameters) DeepCopy() *FormatParameters { + if in == nil { + return nil + } + out := new(FormatParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H264VideoInitParameters) DeepCopyInto(out *H264VideoInitParameters) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]LayerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.SceneChangeDetectionEnabled != nil { + in, out := &in.SceneChangeDetectionEnabled, &out.SceneChangeDetectionEnabled + *out = new(bool) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H264VideoInitParameters. +func (in *H264VideoInitParameters) DeepCopy() *H264VideoInitParameters { + if in == nil { + return nil + } + out := new(H264VideoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H264VideoObservation) DeepCopyInto(out *H264VideoObservation) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]LayerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.SceneChangeDetectionEnabled != nil { + in, out := &in.SceneChangeDetectionEnabled, &out.SceneChangeDetectionEnabled + *out = new(bool) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H264VideoObservation. +func (in *H264VideoObservation) DeepCopy() *H264VideoObservation { + if in == nil { + return nil + } + out := new(H264VideoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H264VideoParameters) DeepCopyInto(out *H264VideoParameters) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]LayerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RateControlMode != nil { + in, out := &in.RateControlMode, &out.RateControlMode + *out = new(string) + **out = **in + } + if in.SceneChangeDetectionEnabled != nil { + in, out := &in.SceneChangeDetectionEnabled, &out.SceneChangeDetectionEnabled + *out = new(bool) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H264VideoParameters. +func (in *H264VideoParameters) DeepCopy() *H264VideoParameters { + if in == nil { + return nil + } + out := new(H264VideoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265VideoInitParameters) DeepCopyInto(out *H265VideoInitParameters) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]H265VideoLayerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SceneChangeDetectionEnabled != nil { + in, out := &in.SceneChangeDetectionEnabled, &out.SceneChangeDetectionEnabled + *out = new(bool) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265VideoInitParameters. +func (in *H265VideoInitParameters) DeepCopy() *H265VideoInitParameters { + if in == nil { + return nil + } + out := new(H265VideoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265VideoLayerInitParameters) DeepCopyInto(out *H265VideoLayerInitParameters) { + *out = *in + if in.AdaptiveBFrameEnabled != nil { + in, out := &in.AdaptiveBFrameEnabled, &out.AdaptiveBFrameEnabled + *out = new(bool) + **out = **in + } + if in.BFrames != nil { + in, out := &in.BFrames, &out.BFrames + *out = new(float64) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferWindow != nil { + in, out := &in.BufferWindow, &out.BufferWindow + *out = new(string) + **out = **in + } + if in.Crf != nil { + in, out := &in.Crf, &out.Crf + *out = new(float64) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.ReferenceFrames != nil { + in, out := &in.ReferenceFrames, &out.ReferenceFrames + *out = new(float64) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265VideoLayerInitParameters. +func (in *H265VideoLayerInitParameters) DeepCopy() *H265VideoLayerInitParameters { + if in == nil { + return nil + } + out := new(H265VideoLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265VideoLayerObservation) DeepCopyInto(out *H265VideoLayerObservation) { + *out = *in + if in.AdaptiveBFrameEnabled != nil { + in, out := &in.AdaptiveBFrameEnabled, &out.AdaptiveBFrameEnabled + *out = new(bool) + **out = **in + } + if in.BFrames != nil { + in, out := &in.BFrames, &out.BFrames + *out = new(float64) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferWindow != nil { + in, out := &in.BufferWindow, &out.BufferWindow + *out = new(string) + **out = **in + } + if in.Crf != nil { + in, out := &in.Crf, &out.Crf + *out = new(float64) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.ReferenceFrames != nil { + in, out := &in.ReferenceFrames, &out.ReferenceFrames + *out = new(float64) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265VideoLayerObservation. +func (in *H265VideoLayerObservation) DeepCopy() *H265VideoLayerObservation { + if in == nil { + return nil + } + out := new(H265VideoLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265VideoLayerParameters) DeepCopyInto(out *H265VideoLayerParameters) { + *out = *in + if in.AdaptiveBFrameEnabled != nil { + in, out := &in.AdaptiveBFrameEnabled, &out.AdaptiveBFrameEnabled + *out = new(bool) + **out = **in + } + if in.BFrames != nil { + in, out := &in.BFrames, &out.BFrames + *out = new(float64) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferWindow != nil { + in, out := &in.BufferWindow, &out.BufferWindow + *out = new(string) + **out = **in + } + if in.Crf != nil { + in, out := &in.Crf, &out.Crf + *out = new(float64) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.ReferenceFrames != nil { + in, out := &in.ReferenceFrames, &out.ReferenceFrames + *out = new(float64) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265VideoLayerParameters. +func (in *H265VideoLayerParameters) DeepCopy() *H265VideoLayerParameters { + if in == nil { + return nil + } + out := new(H265VideoLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265VideoObservation) DeepCopyInto(out *H265VideoObservation) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]H265VideoLayerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SceneChangeDetectionEnabled != nil { + in, out := &in.SceneChangeDetectionEnabled, &out.SceneChangeDetectionEnabled + *out = new(bool) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265VideoObservation. +func (in *H265VideoObservation) DeepCopy() *H265VideoObservation { + if in == nil { + return nil + } + out := new(H265VideoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *H265VideoParameters) DeepCopyInto(out *H265VideoParameters) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]H265VideoLayerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SceneChangeDetectionEnabled != nil { + in, out := &in.SceneChangeDetectionEnabled, &out.SceneChangeDetectionEnabled + *out = new(bool) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new H265VideoParameters. +func (in *H265VideoParameters) DeepCopy() *H265VideoParameters { + if in == nil { + return nil + } + out := new(H265VideoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAccessControlAllowInitParameters) DeepCopyInto(out *IPAccessControlAllowInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAccessControlAllowInitParameters. +func (in *IPAccessControlAllowInitParameters) DeepCopy() *IPAccessControlAllowInitParameters { + if in == nil { + return nil + } + out := new(IPAccessControlAllowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAccessControlAllowObservation) DeepCopyInto(out *IPAccessControlAllowObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAccessControlAllowObservation. +func (in *IPAccessControlAllowObservation) DeepCopy() *IPAccessControlAllowObservation { + if in == nil { + return nil + } + out := new(IPAccessControlAllowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAccessControlAllowParameters) DeepCopyInto(out *IPAccessControlAllowParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAccessControlAllowParameters. +func (in *IPAccessControlAllowParameters) DeepCopy() *IPAccessControlAllowParameters { + if in == nil { + return nil + } + out := new(IPAccessControlAllowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAllowInitParameters) DeepCopyInto(out *IPAllowInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAllowInitParameters. +func (in *IPAllowInitParameters) DeepCopy() *IPAllowInitParameters { + if in == nil { + return nil + } + out := new(IPAllowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAllowObservation) DeepCopyInto(out *IPAllowObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAllowObservation. +func (in *IPAllowObservation) DeepCopy() *IPAllowObservation { + if in == nil { + return nil + } + out := new(IPAllowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAllowParameters) DeepCopyInto(out *IPAllowParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAllowParameters. +func (in *IPAllowParameters) DeepCopy() *IPAllowParameters { + if in == nil { + return nil + } + out := new(IPAllowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputAssetInitParameters) DeepCopyInto(out *InputAssetInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputAssetInitParameters. +func (in *InputAssetInitParameters) DeepCopy() *InputAssetInitParameters { + if in == nil { + return nil + } + out := new(InputAssetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputAssetObservation) DeepCopyInto(out *InputAssetObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputAssetObservation. +func (in *InputAssetObservation) DeepCopy() *InputAssetObservation { + if in == nil { + return nil + } + out := new(InputAssetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputAssetParameters) DeepCopyInto(out *InputAssetParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputAssetParameters. +func (in *InputAssetParameters) DeepCopy() *InputAssetParameters { + if in == nil { + return nil + } + out := new(InputAssetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputInitParameters) DeepCopyInto(out *InputInitParameters) { + *out = *in + if in.AccessToken != nil { + in, out := &in.AccessToken, &out.AccessToken + *out = new(string) + **out = **in + } + if in.IPAccessControlAllow != nil { + in, out := &in.IPAccessControlAllow, &out.IPAccessControlAllow + *out = make([]IPAccessControlAllowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyFrameIntervalDuration != nil { + in, out := &in.KeyFrameIntervalDuration, &out.KeyFrameIntervalDuration + *out = new(string) + **out = **in + } + if in.StreamingProtocol != nil { + in, out := &in.StreamingProtocol, &out.StreamingProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputInitParameters. +func (in *InputInitParameters) DeepCopy() *InputInitParameters { + if in == nil { + return nil + } + out := new(InputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputObservation) DeepCopyInto(out *InputObservation) { + *out = *in + if in.AccessToken != nil { + in, out := &in.AccessToken, &out.AccessToken + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]EndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAccessControlAllow != nil { + in, out := &in.IPAccessControlAllow, &out.IPAccessControlAllow + *out = make([]IPAccessControlAllowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyFrameIntervalDuration != nil { + in, out := &in.KeyFrameIntervalDuration, &out.KeyFrameIntervalDuration + *out = new(string) + **out = **in + } + if in.StreamingProtocol != nil { + in, out := &in.StreamingProtocol, &out.StreamingProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputObservation. +func (in *InputObservation) DeepCopy() *InputObservation { + if in == nil { + return nil + } + out := new(InputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParameters) DeepCopyInto(out *InputParameters) { + *out = *in + if in.AccessToken != nil { + in, out := &in.AccessToken, &out.AccessToken + *out = new(string) + **out = **in + } + if in.IPAccessControlAllow != nil { + in, out := &in.IPAccessControlAllow, &out.IPAccessControlAllow + *out = make([]IPAccessControlAllowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyFrameIntervalDuration != nil { + in, out := &in.KeyFrameIntervalDuration, &out.KeyFrameIntervalDuration + *out = new(string) + **out = **in + } + if in.StreamingProtocol != nil { + in, out := &in.StreamingProtocol, &out.StreamingProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParameters. +func (in *InputParameters) DeepCopy() *InputParameters { + if in == nil { + return nil + } + out := new(InputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Job) DeepCopyInto(out *Job) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job. +func (in *Job) DeepCopy() *Job { + if in == nil { + return nil + } + out := new(Job) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Job) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobInitParameters) DeepCopyInto(out *JobInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InputAsset != nil { + in, out := &in.InputAsset, &out.InputAsset + *out = new(InputAssetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OutputAsset != nil { + in, out := &in.OutputAsset, &out.OutputAsset + *out = make([]OutputAssetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobInitParameters. +func (in *JobInitParameters) DeepCopy() *JobInitParameters { + if in == nil { + return nil + } + out := new(JobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobList) DeepCopyInto(out *JobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList. +func (in *JobList) DeepCopy() *JobList { + if in == nil { + return nil + } + out := new(JobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobObservation) DeepCopyInto(out *JobObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InputAsset != nil { + in, out := &in.InputAsset, &out.InputAsset + *out = new(InputAssetObservation) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.OutputAsset != nil { + in, out := &in.OutputAsset, &out.OutputAsset + *out = make([]OutputAssetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.TransformName != nil { + in, out := &in.TransformName, &out.TransformName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobObservation. +func (in *JobObservation) DeepCopy() *JobObservation { + if in == nil { + return nil + } + out := new(JobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobParameters) DeepCopyInto(out *JobParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.InputAsset != nil { + in, out := &in.InputAsset, &out.InputAsset + *out = new(InputAssetParameters) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.MediaServicesAccountNameRef != nil { + in, out := &in.MediaServicesAccountNameRef, &out.MediaServicesAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountNameSelector != nil { + in, out := &in.MediaServicesAccountNameSelector, &out.MediaServicesAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.OutputAsset != nil { + in, out := &in.OutputAsset, &out.OutputAsset + *out = make([]OutputAssetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TransformName != nil { + in, out := &in.TransformName, &out.TransformName + *out = new(string) + **out = **in + } + if in.TransformNameRef != nil { + in, out := &in.TransformNameRef, &out.TransformNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransformNameSelector != nil { + in, out := &in.TransformNameSelector, &out.TransformNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobParameters. +func (in *JobParameters) DeepCopy() *JobParameters { + if in == nil { + return nil + } + out := new(JobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobSpec) DeepCopyInto(out *JobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec. +func (in *JobSpec) DeepCopy() *JobSpec { + if in == nil { + return nil + } + out := new(JobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStatus) DeepCopyInto(out *JobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. +func (in *JobStatus) DeepCopy() *JobStatus { + if in == nil { + return nil + } + out := new(JobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgImageInitParameters) DeepCopyInto(out *JpgImageInitParameters) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]JpgImageLayerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(string) + **out = **in + } + if in.SpriteColumn != nil { + in, out := &in.SpriteColumn, &out.SpriteColumn + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Step != nil { + in, out := &in.Step, &out.Step + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgImageInitParameters. +func (in *JpgImageInitParameters) DeepCopy() *JpgImageInitParameters { + if in == nil { + return nil + } + out := new(JpgImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgImageLayerInitParameters) DeepCopyInto(out *JpgImageLayerInitParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Quality != nil { + in, out := &in.Quality, &out.Quality + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgImageLayerInitParameters. +func (in *JpgImageLayerInitParameters) DeepCopy() *JpgImageLayerInitParameters { + if in == nil { + return nil + } + out := new(JpgImageLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgImageLayerObservation) DeepCopyInto(out *JpgImageLayerObservation) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Quality != nil { + in, out := &in.Quality, &out.Quality + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgImageLayerObservation. +func (in *JpgImageLayerObservation) DeepCopy() *JpgImageLayerObservation { + if in == nil { + return nil + } + out := new(JpgImageLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgImageLayerParameters) DeepCopyInto(out *JpgImageLayerParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Quality != nil { + in, out := &in.Quality, &out.Quality + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgImageLayerParameters. +func (in *JpgImageLayerParameters) DeepCopy() *JpgImageLayerParameters { + if in == nil { + return nil + } + out := new(JpgImageLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgImageObservation) DeepCopyInto(out *JpgImageObservation) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]JpgImageLayerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(string) + **out = **in + } + if in.SpriteColumn != nil { + in, out := &in.SpriteColumn, &out.SpriteColumn + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Step != nil { + in, out := &in.Step, &out.Step + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgImageObservation. +func (in *JpgImageObservation) DeepCopy() *JpgImageObservation { + if in == nil { + return nil + } + out := new(JpgImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgImageParameters) DeepCopyInto(out *JpgImageParameters) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]JpgImageLayerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(string) + **out = **in + } + if in.SpriteColumn != nil { + in, out := &in.SpriteColumn, &out.SpriteColumn + *out = new(float64) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Step != nil { + in, out := &in.Step, &out.Step + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgImageParameters. +func (in *JpgImageParameters) DeepCopy() *JpgImageParameters { + if in == nil { + return nil + } + out := new(JpgImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgInitParameters) DeepCopyInto(out *JpgInitParameters) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgInitParameters. +func (in *JpgInitParameters) DeepCopy() *JpgInitParameters { + if in == nil { + return nil + } + out := new(JpgInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgObservation) DeepCopyInto(out *JpgObservation) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgObservation. +func (in *JpgObservation) DeepCopy() *JpgObservation { + if in == nil { + return nil + } + out := new(JpgObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JpgParameters) DeepCopyInto(out *JpgParameters) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JpgParameters. +func (in *JpgParameters) DeepCopy() *JpgParameters { + if in == nil { + return nil + } + out := new(JpgParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyDeliveryAccessControlInitParameters) DeepCopyInto(out *KeyDeliveryAccessControlInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPAllowList != nil { + in, out := &in.IPAllowList, &out.IPAllowList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyDeliveryAccessControlInitParameters. +func (in *KeyDeliveryAccessControlInitParameters) DeepCopy() *KeyDeliveryAccessControlInitParameters { + if in == nil { + return nil + } + out := new(KeyDeliveryAccessControlInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyDeliveryAccessControlObservation) DeepCopyInto(out *KeyDeliveryAccessControlObservation) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPAllowList != nil { + in, out := &in.IPAllowList, &out.IPAllowList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyDeliveryAccessControlObservation. +func (in *KeyDeliveryAccessControlObservation) DeepCopy() *KeyDeliveryAccessControlObservation { + if in == nil { + return nil + } + out := new(KeyDeliveryAccessControlObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyDeliveryAccessControlParameters) DeepCopyInto(out *KeyDeliveryAccessControlParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPAllowList != nil { + in, out := &in.IPAllowList, &out.IPAllowList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyDeliveryAccessControlParameters. +func (in *KeyDeliveryAccessControlParameters) DeepCopy() *KeyDeliveryAccessControlParameters { + if in == nil { + return nil + } + out := new(KeyDeliveryAccessControlParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LayerInitParameters) DeepCopyInto(out *LayerInitParameters) { + *out = *in + if in.AdaptiveBFrameEnabled != nil { + in, out := &in.AdaptiveBFrameEnabled, &out.AdaptiveBFrameEnabled + *out = new(bool) + **out = **in + } + if in.BFrames != nil { + in, out := &in.BFrames, &out.BFrames + *out = new(float64) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferWindow != nil { + in, out := &in.BufferWindow, &out.BufferWindow + *out = new(string) + **out = **in + } + if in.Crf != nil { + in, out := &in.Crf, &out.Crf + *out = new(float64) + **out = **in + } + if in.EntropyMode != nil { + in, out := &in.EntropyMode, &out.EntropyMode + *out = new(string) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.ReferenceFrames != nil { + in, out := &in.ReferenceFrames, &out.ReferenceFrames + *out = new(float64) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LayerInitParameters. +func (in *LayerInitParameters) DeepCopy() *LayerInitParameters { + if in == nil { + return nil + } + out := new(LayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LayerObservation) DeepCopyInto(out *LayerObservation) { + *out = *in + if in.AdaptiveBFrameEnabled != nil { + in, out := &in.AdaptiveBFrameEnabled, &out.AdaptiveBFrameEnabled + *out = new(bool) + **out = **in + } + if in.BFrames != nil { + in, out := &in.BFrames, &out.BFrames + *out = new(float64) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferWindow != nil { + in, out := &in.BufferWindow, &out.BufferWindow + *out = new(string) + **out = **in + } + if in.Crf != nil { + in, out := &in.Crf, &out.Crf + *out = new(float64) + **out = **in + } + if in.EntropyMode != nil { + in, out := &in.EntropyMode, &out.EntropyMode + *out = new(string) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.ReferenceFrames != nil { + in, out := &in.ReferenceFrames, &out.ReferenceFrames + *out = new(float64) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LayerObservation. +func (in *LayerObservation) DeepCopy() *LayerObservation { + if in == nil { + return nil + } + out := new(LayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LayerParameters) DeepCopyInto(out *LayerParameters) { + *out = *in + if in.AdaptiveBFrameEnabled != nil { + in, out := &in.AdaptiveBFrameEnabled, &out.AdaptiveBFrameEnabled + *out = new(bool) + **out = **in + } + if in.BFrames != nil { + in, out := &in.BFrames, &out.BFrames + *out = new(float64) + **out = **in + } + if in.Bitrate != nil { + in, out := &in.Bitrate, &out.Bitrate + *out = new(float64) + **out = **in + } + if in.BufferWindow != nil { + in, out := &in.BufferWindow, &out.BufferWindow + *out = new(string) + **out = **in + } + if in.Crf != nil { + in, out := &in.Crf, &out.Crf + *out = new(float64) + **out = **in + } + if in.EntropyMode != nil { + in, out := &in.EntropyMode, &out.EntropyMode + *out = new(string) + **out = **in + } + if in.FrameRate != nil { + in, out := &in.FrameRate, &out.FrameRate + *out = new(string) + **out = **in + } + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.MaxBitrate != nil { + in, out := &in.MaxBitrate, &out.MaxBitrate + *out = new(float64) + **out = **in + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = new(string) + **out = **in + } + if in.ReferenceFrames != nil { + in, out := &in.ReferenceFrames, &out.ReferenceFrames + *out = new(float64) + **out = **in + } + if in.Slices != nil { + in, out := &in.Slices, &out.Slices + *out = new(float64) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LayerParameters. +func (in *LayerParameters) DeepCopy() *LayerParameters { + if in == nil { + return nil + } + out := new(LayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveEvent) DeepCopyInto(out *LiveEvent) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveEvent. +func (in *LiveEvent) DeepCopy() *LiveEvent { + if in == nil { + return nil + } + out := new(LiveEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LiveEvent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveEventInitParameters) DeepCopyInto(out *LiveEventInitParameters) { + *out = *in + if in.AutoStartEnabled != nil { + in, out := &in.AutoStartEnabled, &out.AutoStartEnabled + *out = new(bool) + **out = **in + } + if in.CrossSiteAccessPolicy != nil { + in, out := &in.CrossSiteAccessPolicy, &out.CrossSiteAccessPolicy + *out = new(CrossSiteAccessPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(EncodingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HostNamePrefix != nil { + in, out := &in.HostNamePrefix, &out.HostNamePrefix + *out = new(string) + **out = **in + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(InputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Preview != nil { + in, out := &in.Preview, &out.Preview + *out = new(PreviewInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StreamOptions != nil { + in, out := &in.StreamOptions, &out.StreamOptions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TranscriptionLanguages != nil { + in, out := &in.TranscriptionLanguages, &out.TranscriptionLanguages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UseStaticHostName != nil { + in, out := &in.UseStaticHostName, &out.UseStaticHostName + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveEventInitParameters. +func (in *LiveEventInitParameters) DeepCopy() *LiveEventInitParameters { + if in == nil { + return nil + } + out := new(LiveEventInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveEventList) DeepCopyInto(out *LiveEventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LiveEvent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveEventList. +func (in *LiveEventList) DeepCopy() *LiveEventList { + if in == nil { + return nil + } + out := new(LiveEventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LiveEventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveEventObservation) DeepCopyInto(out *LiveEventObservation) { + *out = *in + if in.AutoStartEnabled != nil { + in, out := &in.AutoStartEnabled, &out.AutoStartEnabled + *out = new(bool) + **out = **in + } + if in.CrossSiteAccessPolicy != nil { + in, out := &in.CrossSiteAccessPolicy, &out.CrossSiteAccessPolicy + *out = new(CrossSiteAccessPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(EncodingObservation) + (*in).DeepCopyInto(*out) + } + if in.HostNamePrefix != nil { + in, out := &in.HostNamePrefix, &out.HostNamePrefix + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(InputObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.Preview != nil { + in, out := &in.Preview, &out.Preview + *out = new(PreviewObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.StreamOptions != nil { + in, out := &in.StreamOptions, &out.StreamOptions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TranscriptionLanguages != nil { + in, out := &in.TranscriptionLanguages, &out.TranscriptionLanguages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UseStaticHostName != nil { + in, out := &in.UseStaticHostName, &out.UseStaticHostName + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveEventObservation. +func (in *LiveEventObservation) DeepCopy() *LiveEventObservation { + if in == nil { + return nil + } + out := new(LiveEventObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveEventParameters) DeepCopyInto(out *LiveEventParameters) { + *out = *in + if in.AutoStartEnabled != nil { + in, out := &in.AutoStartEnabled, &out.AutoStartEnabled + *out = new(bool) + **out = **in + } + if in.CrossSiteAccessPolicy != nil { + in, out := &in.CrossSiteAccessPolicy, &out.CrossSiteAccessPolicy + *out = new(CrossSiteAccessPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(EncodingParameters) + (*in).DeepCopyInto(*out) + } + if in.HostNamePrefix != nil { + in, out := &in.HostNamePrefix, &out.HostNamePrefix + *out = new(string) + **out = **in + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = new(InputParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.MediaServicesAccountNameRef != nil { + in, out := &in.MediaServicesAccountNameRef, &out.MediaServicesAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountNameSelector != nil { + in, out := &in.MediaServicesAccountNameSelector, &out.MediaServicesAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Preview != nil { + in, out := &in.Preview, &out.Preview + *out = new(PreviewParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamOptions != nil { + in, out := &in.StreamOptions, &out.StreamOptions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TranscriptionLanguages != nil { + in, out := &in.TranscriptionLanguages, &out.TranscriptionLanguages + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UseStaticHostName != nil { + in, out := &in.UseStaticHostName, &out.UseStaticHostName + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveEventParameters. +func (in *LiveEventParameters) DeepCopy() *LiveEventParameters { + if in == nil { + return nil + } + out := new(LiveEventParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveEventSpec) DeepCopyInto(out *LiveEventSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveEventSpec. +func (in *LiveEventSpec) DeepCopy() *LiveEventSpec { + if in == nil { + return nil + } + out := new(LiveEventSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveEventStatus) DeepCopyInto(out *LiveEventStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveEventStatus. +func (in *LiveEventStatus) DeepCopy() *LiveEventStatus { + if in == nil { + return nil + } + out := new(LiveEventStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedIdentityInitParameters) DeepCopyInto(out *ManagedIdentityInitParameters) { + *out = *in + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedIdentityInitParameters. +func (in *ManagedIdentityInitParameters) DeepCopy() *ManagedIdentityInitParameters { + if in == nil { + return nil + } + out := new(ManagedIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedIdentityObservation) DeepCopyInto(out *ManagedIdentityObservation) { + *out = *in + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedIdentityObservation. +func (in *ManagedIdentityObservation) DeepCopy() *ManagedIdentityObservation { + if in == nil { + return nil + } + out := new(ManagedIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedIdentityParameters) DeepCopyInto(out *ManagedIdentityParameters) { + *out = *in + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedIdentityParameters. +func (in *ManagedIdentityParameters) DeepCopy() *ManagedIdentityParameters { + if in == nil { + return nil + } + out := new(ManagedIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mp4InitParameters) DeepCopyInto(out *Mp4InitParameters) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } + if in.OutputFile != nil { + in, out := &in.OutputFile, &out.OutputFile + *out = make([]OutputFileInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mp4InitParameters. +func (in *Mp4InitParameters) DeepCopy() *Mp4InitParameters { + if in == nil { + return nil + } + out := new(Mp4InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mp4Observation) DeepCopyInto(out *Mp4Observation) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } + if in.OutputFile != nil { + in, out := &in.OutputFile, &out.OutputFile + *out = make([]OutputFileObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mp4Observation. +func (in *Mp4Observation) DeepCopy() *Mp4Observation { + if in == nil { + return nil + } + out := new(Mp4Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mp4Parameters) DeepCopyInto(out *Mp4Parameters) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } + if in.OutputFile != nil { + in, out := &in.OutputFile, &out.OutputFile + *out = make([]OutputFileParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mp4Parameters. +func (in *Mp4Parameters) DeepCopy() *Mp4Parameters { + if in == nil { + return nil + } + out := new(Mp4Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoEncryptionEnabledProtocolsInitParameters) DeepCopyInto(out *NoEncryptionEnabledProtocolsInitParameters) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoEncryptionEnabledProtocolsInitParameters. +func (in *NoEncryptionEnabledProtocolsInitParameters) DeepCopy() *NoEncryptionEnabledProtocolsInitParameters { + if in == nil { + return nil + } + out := new(NoEncryptionEnabledProtocolsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoEncryptionEnabledProtocolsObservation) DeepCopyInto(out *NoEncryptionEnabledProtocolsObservation) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoEncryptionEnabledProtocolsObservation. +func (in *NoEncryptionEnabledProtocolsObservation) DeepCopy() *NoEncryptionEnabledProtocolsObservation { + if in == nil { + return nil + } + out := new(NoEncryptionEnabledProtocolsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoEncryptionEnabledProtocolsParameters) DeepCopyInto(out *NoEncryptionEnabledProtocolsParameters) { + *out = *in + if in.Dash != nil { + in, out := &in.Dash, &out.Dash + *out = new(bool) + **out = **in + } + if in.Download != nil { + in, out := &in.Download, &out.Download + *out = new(bool) + **out = **in + } + if in.Hls != nil { + in, out := &in.Hls, &out.Hls + *out = new(bool) + **out = **in + } + if in.SmoothStreaming != nil { + in, out := &in.SmoothStreaming, &out.SmoothStreaming + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoEncryptionEnabledProtocolsParameters. +func (in *NoEncryptionEnabledProtocolsParameters) DeepCopy() *NoEncryptionEnabledProtocolsParameters { + if in == nil { + return nil + } + out := new(NoEncryptionEnabledProtocolsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineRentalConfigurationInitParameters) DeepCopyInto(out *OfflineRentalConfigurationInitParameters) { + *out = *in + if in.PlaybackDurationSeconds != nil { + in, out := &in.PlaybackDurationSeconds, &out.PlaybackDurationSeconds + *out = new(float64) + **out = **in + } + if in.StorageDurationSeconds != nil { + in, out := &in.StorageDurationSeconds, &out.StorageDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineRentalConfigurationInitParameters. +func (in *OfflineRentalConfigurationInitParameters) DeepCopy() *OfflineRentalConfigurationInitParameters { + if in == nil { + return nil + } + out := new(OfflineRentalConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineRentalConfigurationObservation) DeepCopyInto(out *OfflineRentalConfigurationObservation) { + *out = *in + if in.PlaybackDurationSeconds != nil { + in, out := &in.PlaybackDurationSeconds, &out.PlaybackDurationSeconds + *out = new(float64) + **out = **in + } + if in.StorageDurationSeconds != nil { + in, out := &in.StorageDurationSeconds, &out.StorageDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineRentalConfigurationObservation. +func (in *OfflineRentalConfigurationObservation) DeepCopy() *OfflineRentalConfigurationObservation { + if in == nil { + return nil + } + out := new(OfflineRentalConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OfflineRentalConfigurationParameters) DeepCopyInto(out *OfflineRentalConfigurationParameters) { + *out = *in + if in.PlaybackDurationSeconds != nil { + in, out := &in.PlaybackDurationSeconds, &out.PlaybackDurationSeconds + *out = new(float64) + **out = **in + } + if in.StorageDurationSeconds != nil { + in, out := &in.StorageDurationSeconds, &out.StorageDurationSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineRentalConfigurationParameters. +func (in *OfflineRentalConfigurationParameters) DeepCopy() *OfflineRentalConfigurationParameters { + if in == nil { + return nil + } + out := new(OfflineRentalConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputAssetInitParameters) DeepCopyInto(out *OutputAssetInitParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputAssetInitParameters. +func (in *OutputAssetInitParameters) DeepCopy() *OutputAssetInitParameters { + if in == nil { + return nil + } + out := new(OutputAssetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputAssetObservation) DeepCopyInto(out *OutputAssetObservation) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputAssetObservation. +func (in *OutputAssetObservation) DeepCopy() *OutputAssetObservation { + if in == nil { + return nil + } + out := new(OutputAssetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputAssetParameters) DeepCopyInto(out *OutputAssetParameters) { + *out = *in + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputAssetParameters. +func (in *OutputAssetParameters) DeepCopy() *OutputAssetParameters { + if in == nil { + return nil + } + out := new(OutputAssetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputFileInitParameters) DeepCopyInto(out *OutputFileInitParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputFileInitParameters. +func (in *OutputFileInitParameters) DeepCopy() *OutputFileInitParameters { + if in == nil { + return nil + } + out := new(OutputFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputFileObservation) DeepCopyInto(out *OutputFileObservation) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputFileObservation. +func (in *OutputFileObservation) DeepCopy() *OutputFileObservation { + if in == nil { + return nil + } + out := new(OutputFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputFileParameters) DeepCopyInto(out *OutputFileParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputFileParameters. +func (in *OutputFileParameters) DeepCopy() *OutputFileParameters { + if in == nil { + return nil + } + out := new(OutputFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputInitParameters) DeepCopyInto(out *OutputInitParameters) { + *out = *in + if in.AudioAnalyzerPreset != nil { + in, out := &in.AudioAnalyzerPreset, &out.AudioAnalyzerPreset + *out = new(AudioAnalyzerPresetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinPreset != nil { + in, out := &in.BuiltinPreset, &out.BuiltinPreset + *out = new(BuiltinPresetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomPreset != nil { + in, out := &in.CustomPreset, &out.CustomPreset + *out = new(CustomPresetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FaceDetectorPreset != nil { + in, out := &in.FaceDetectorPreset, &out.FaceDetectorPreset + *out = new(FaceDetectorPresetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.OnErrorAction != nil { + in, out := &in.OnErrorAction, &out.OnErrorAction + *out = new(string) + **out = **in + } + if in.RelativePriority != nil { + in, out := &in.RelativePriority, &out.RelativePriority + *out = new(string) + **out = **in + } + if in.VideoAnalyzerPreset != nil { + in, out := &in.VideoAnalyzerPreset, &out.VideoAnalyzerPreset + *out = new(VideoAnalyzerPresetInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputInitParameters. +func (in *OutputInitParameters) DeepCopy() *OutputInitParameters { + if in == nil { + return nil + } + out := new(OutputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputObservation) DeepCopyInto(out *OutputObservation) { + *out = *in + if in.AudioAnalyzerPreset != nil { + in, out := &in.AudioAnalyzerPreset, &out.AudioAnalyzerPreset + *out = new(AudioAnalyzerPresetObservation) + (*in).DeepCopyInto(*out) + } + if in.BuiltinPreset != nil { + in, out := &in.BuiltinPreset, &out.BuiltinPreset + *out = new(BuiltinPresetObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomPreset != nil { + in, out := &in.CustomPreset, &out.CustomPreset + *out = new(CustomPresetObservation) + (*in).DeepCopyInto(*out) + } + if in.FaceDetectorPreset != nil { + in, out := &in.FaceDetectorPreset, &out.FaceDetectorPreset + *out = new(FaceDetectorPresetObservation) + (*in).DeepCopyInto(*out) + } + if in.OnErrorAction != nil { + in, out := &in.OnErrorAction, &out.OnErrorAction + *out = new(string) + **out = **in + } + if in.RelativePriority != nil { + in, out := &in.RelativePriority, &out.RelativePriority + *out = new(string) + **out = **in + } + if in.VideoAnalyzerPreset != nil { + in, out := &in.VideoAnalyzerPreset, &out.VideoAnalyzerPreset + *out = new(VideoAnalyzerPresetObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputObservation. +func (in *OutputObservation) DeepCopy() *OutputObservation { + if in == nil { + return nil + } + out := new(OutputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputParameters) DeepCopyInto(out *OutputParameters) { + *out = *in + if in.AudioAnalyzerPreset != nil { + in, out := &in.AudioAnalyzerPreset, &out.AudioAnalyzerPreset + *out = new(AudioAnalyzerPresetParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinPreset != nil { + in, out := &in.BuiltinPreset, &out.BuiltinPreset + *out = new(BuiltinPresetParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomPreset != nil { + in, out := &in.CustomPreset, &out.CustomPreset + *out = new(CustomPresetParameters) + (*in).DeepCopyInto(*out) + } + if in.FaceDetectorPreset != nil { + in, out := &in.FaceDetectorPreset, &out.FaceDetectorPreset + *out = new(FaceDetectorPresetParameters) + (*in).DeepCopyInto(*out) + } + if in.OnErrorAction != nil { + in, out := &in.OnErrorAction, &out.OnErrorAction + *out = new(string) + **out = **in + } + if in.RelativePriority != nil { + in, out := &in.RelativePriority, &out.RelativePriority + *out = new(string) + **out = **in + } + if in.VideoAnalyzerPreset != nil { + in, out := &in.VideoAnalyzerPreset, &out.VideoAnalyzerPreset + *out = new(VideoAnalyzerPresetParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputParameters. +func (in *OutputParameters) DeepCopy() *OutputParameters { + if in == nil { + return nil + } + out := new(OutputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverlayInitParameters) DeepCopyInto(out *OverlayInitParameters) { + *out = *in + if in.Audio != nil { + in, out := &in.Audio, &out.Audio + *out = new(AudioInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Video != nil { + in, out := &in.Video, &out.Video + *out = new(VideoInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverlayInitParameters. +func (in *OverlayInitParameters) DeepCopy() *OverlayInitParameters { + if in == nil { + return nil + } + out := new(OverlayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverlayObservation) DeepCopyInto(out *OverlayObservation) { + *out = *in + if in.Audio != nil { + in, out := &in.Audio, &out.Audio + *out = new(AudioObservation) + (*in).DeepCopyInto(*out) + } + if in.Video != nil { + in, out := &in.Video, &out.Video + *out = new(VideoObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverlayObservation. +func (in *OverlayObservation) DeepCopy() *OverlayObservation { + if in == nil { + return nil + } + out := new(OverlayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverlayParameters) DeepCopyInto(out *OverlayParameters) { + *out = *in + if in.Audio != nil { + in, out := &in.Audio, &out.Audio + *out = new(AudioParameters) + (*in).DeepCopyInto(*out) + } + if in.Video != nil { + in, out := &in.Video, &out.Video + *out = new(VideoParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverlayParameters. +func (in *OverlayParameters) DeepCopy() *OverlayParameters { + if in == nil { + return nil + } + out := new(OverlayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlayRightInitParameters) DeepCopyInto(out *PlayRightInitParameters) { + *out = *in + if in.AgcAndColorStripeRestriction != nil { + in, out := &in.AgcAndColorStripeRestriction, &out.AgcAndColorStripeRestriction + *out = new(float64) + **out = **in + } + if in.AllowPassingVideoContentToUnknownOutput != nil { + in, out := &in.AllowPassingVideoContentToUnknownOutput, &out.AllowPassingVideoContentToUnknownOutput + *out = new(string) + **out = **in + } + if in.AnalogVideoOpl != nil { + in, out := &in.AnalogVideoOpl, &out.AnalogVideoOpl + *out = new(float64) + **out = **in + } + if in.CompressedDigitalAudioOpl != nil { + in, out := &in.CompressedDigitalAudioOpl, &out.CompressedDigitalAudioOpl + *out = new(float64) + **out = **in + } + if in.CompressedDigitalVideoOpl != nil { + in, out := &in.CompressedDigitalVideoOpl, &out.CompressedDigitalVideoOpl + *out = new(float64) + **out = **in + } + if in.DigitalVideoOnlyContentRestriction != nil { + in, out := &in.DigitalVideoOnlyContentRestriction, &out.DigitalVideoOnlyContentRestriction + *out = new(bool) + **out = **in + } + if in.ExplicitAnalogTelevisionOutputRestriction != nil { + in, out := &in.ExplicitAnalogTelevisionOutputRestriction, &out.ExplicitAnalogTelevisionOutputRestriction + *out = new(ExplicitAnalogTelevisionOutputRestrictionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FirstPlayExpiration != nil { + in, out := &in.FirstPlayExpiration, &out.FirstPlayExpiration + *out = new(string) + **out = **in + } + if in.ImageConstraintForAnalogComponentVideoRestriction != nil { + in, out := &in.ImageConstraintForAnalogComponentVideoRestriction, &out.ImageConstraintForAnalogComponentVideoRestriction + *out = new(bool) + **out = **in + } + if in.ImageConstraintForAnalogComputerMonitorRestriction != nil { + in, out := &in.ImageConstraintForAnalogComputerMonitorRestriction, &out.ImageConstraintForAnalogComputerMonitorRestriction + *out = new(bool) + **out = **in + } + if in.ScmsRestriction != nil { + in, out := &in.ScmsRestriction, &out.ScmsRestriction + *out = new(float64) + **out = **in + } + if in.UncompressedDigitalAudioOpl != nil { + in, out := &in.UncompressedDigitalAudioOpl, &out.UncompressedDigitalAudioOpl + *out = new(float64) + **out = **in + } + if in.UncompressedDigitalVideoOpl != nil { + in, out := &in.UncompressedDigitalVideoOpl, &out.UncompressedDigitalVideoOpl + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlayRightInitParameters. +func (in *PlayRightInitParameters) DeepCopy() *PlayRightInitParameters { + if in == nil { + return nil + } + out := new(PlayRightInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlayRightObservation) DeepCopyInto(out *PlayRightObservation) { + *out = *in + if in.AgcAndColorStripeRestriction != nil { + in, out := &in.AgcAndColorStripeRestriction, &out.AgcAndColorStripeRestriction + *out = new(float64) + **out = **in + } + if in.AllowPassingVideoContentToUnknownOutput != nil { + in, out := &in.AllowPassingVideoContentToUnknownOutput, &out.AllowPassingVideoContentToUnknownOutput + *out = new(string) + **out = **in + } + if in.AnalogVideoOpl != nil { + in, out := &in.AnalogVideoOpl, &out.AnalogVideoOpl + *out = new(float64) + **out = **in + } + if in.CompressedDigitalAudioOpl != nil { + in, out := &in.CompressedDigitalAudioOpl, &out.CompressedDigitalAudioOpl + *out = new(float64) + **out = **in + } + if in.CompressedDigitalVideoOpl != nil { + in, out := &in.CompressedDigitalVideoOpl, &out.CompressedDigitalVideoOpl + *out = new(float64) + **out = **in + } + if in.DigitalVideoOnlyContentRestriction != nil { + in, out := &in.DigitalVideoOnlyContentRestriction, &out.DigitalVideoOnlyContentRestriction + *out = new(bool) + **out = **in + } + if in.ExplicitAnalogTelevisionOutputRestriction != nil { + in, out := &in.ExplicitAnalogTelevisionOutputRestriction, &out.ExplicitAnalogTelevisionOutputRestriction + *out = new(ExplicitAnalogTelevisionOutputRestrictionObservation) + (*in).DeepCopyInto(*out) + } + if in.FirstPlayExpiration != nil { + in, out := &in.FirstPlayExpiration, &out.FirstPlayExpiration + *out = new(string) + **out = **in + } + if in.ImageConstraintForAnalogComponentVideoRestriction != nil { + in, out := &in.ImageConstraintForAnalogComponentVideoRestriction, &out.ImageConstraintForAnalogComponentVideoRestriction + *out = new(bool) + **out = **in + } + if in.ImageConstraintForAnalogComputerMonitorRestriction != nil { + in, out := &in.ImageConstraintForAnalogComputerMonitorRestriction, &out.ImageConstraintForAnalogComputerMonitorRestriction + *out = new(bool) + **out = **in + } + if in.ScmsRestriction != nil { + in, out := &in.ScmsRestriction, &out.ScmsRestriction + *out = new(float64) + **out = **in + } + if in.UncompressedDigitalAudioOpl != nil { + in, out := &in.UncompressedDigitalAudioOpl, &out.UncompressedDigitalAudioOpl + *out = new(float64) + **out = **in + } + if in.UncompressedDigitalVideoOpl != nil { + in, out := &in.UncompressedDigitalVideoOpl, &out.UncompressedDigitalVideoOpl + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlayRightObservation. +func (in *PlayRightObservation) DeepCopy() *PlayRightObservation { + if in == nil { + return nil + } + out := new(PlayRightObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlayRightParameters) DeepCopyInto(out *PlayRightParameters) { + *out = *in + if in.AgcAndColorStripeRestriction != nil { + in, out := &in.AgcAndColorStripeRestriction, &out.AgcAndColorStripeRestriction + *out = new(float64) + **out = **in + } + if in.AllowPassingVideoContentToUnknownOutput != nil { + in, out := &in.AllowPassingVideoContentToUnknownOutput, &out.AllowPassingVideoContentToUnknownOutput + *out = new(string) + **out = **in + } + if in.AnalogVideoOpl != nil { + in, out := &in.AnalogVideoOpl, &out.AnalogVideoOpl + *out = new(float64) + **out = **in + } + if in.CompressedDigitalAudioOpl != nil { + in, out := &in.CompressedDigitalAudioOpl, &out.CompressedDigitalAudioOpl + *out = new(float64) + **out = **in + } + if in.CompressedDigitalVideoOpl != nil { + in, out := &in.CompressedDigitalVideoOpl, &out.CompressedDigitalVideoOpl + *out = new(float64) + **out = **in + } + if in.DigitalVideoOnlyContentRestriction != nil { + in, out := &in.DigitalVideoOnlyContentRestriction, &out.DigitalVideoOnlyContentRestriction + *out = new(bool) + **out = **in + } + if in.ExplicitAnalogTelevisionOutputRestriction != nil { + in, out := &in.ExplicitAnalogTelevisionOutputRestriction, &out.ExplicitAnalogTelevisionOutputRestriction + *out = new(ExplicitAnalogTelevisionOutputRestrictionParameters) + (*in).DeepCopyInto(*out) + } + if in.FirstPlayExpiration != nil { + in, out := &in.FirstPlayExpiration, &out.FirstPlayExpiration + *out = new(string) + **out = **in + } + if in.ImageConstraintForAnalogComponentVideoRestriction != nil { + in, out := &in.ImageConstraintForAnalogComponentVideoRestriction, &out.ImageConstraintForAnalogComponentVideoRestriction + *out = new(bool) + **out = **in + } + if in.ImageConstraintForAnalogComputerMonitorRestriction != nil { + in, out := &in.ImageConstraintForAnalogComputerMonitorRestriction, &out.ImageConstraintForAnalogComputerMonitorRestriction + *out = new(bool) + **out = **in + } + if in.ScmsRestriction != nil { + in, out := &in.ScmsRestriction, &out.ScmsRestriction + *out = new(float64) + **out = **in + } + if in.UncompressedDigitalAudioOpl != nil { + in, out := &in.UncompressedDigitalAudioOpl, &out.UncompressedDigitalAudioOpl + *out = new(float64) + **out = **in + } + if in.UncompressedDigitalVideoOpl != nil { + in, out := &in.UncompressedDigitalVideoOpl, &out.UncompressedDigitalVideoOpl + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlayRightParameters. +func (in *PlayRightParameters) DeepCopy() *PlayRightParameters { + if in == nil { + return nil + } + out := new(PlayRightParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlayreadyConfigurationLicenseInitParameters) DeepCopyInto(out *PlayreadyConfigurationLicenseInitParameters) { + *out = *in + if in.AllowTestDevices != nil { + in, out := &in.AllowTestDevices, &out.AllowTestDevices + *out = new(bool) + **out = **in + } + if in.BeginDate != nil { + in, out := &in.BeginDate, &out.BeginDate + *out = new(string) + **out = **in + } + if in.ContentKeyLocationFromHeaderEnabled != nil { + in, out := &in.ContentKeyLocationFromHeaderEnabled, &out.ContentKeyLocationFromHeaderEnabled + *out = new(bool) + **out = **in + } + if in.ContentKeyLocationFromKeyID != nil { + in, out := &in.ContentKeyLocationFromKeyID, &out.ContentKeyLocationFromKeyID + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.PlayRight != nil { + in, out := &in.PlayRight, &out.PlayRight + *out = new(PlayRightInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RelativeBeginDate != nil { + in, out := &in.RelativeBeginDate, &out.RelativeBeginDate + *out = new(string) + **out = **in + } + if in.RelativeExpirationDate != nil { + in, out := &in.RelativeExpirationDate, &out.RelativeExpirationDate + *out = new(string) + **out = **in + } + if in.SecurityLevel != nil { + in, out := &in.SecurityLevel, &out.SecurityLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlayreadyConfigurationLicenseInitParameters. +func (in *PlayreadyConfigurationLicenseInitParameters) DeepCopy() *PlayreadyConfigurationLicenseInitParameters { + if in == nil { + return nil + } + out := new(PlayreadyConfigurationLicenseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlayreadyConfigurationLicenseObservation) DeepCopyInto(out *PlayreadyConfigurationLicenseObservation) { + *out = *in + if in.AllowTestDevices != nil { + in, out := &in.AllowTestDevices, &out.AllowTestDevices + *out = new(bool) + **out = **in + } + if in.BeginDate != nil { + in, out := &in.BeginDate, &out.BeginDate + *out = new(string) + **out = **in + } + if in.ContentKeyLocationFromHeaderEnabled != nil { + in, out := &in.ContentKeyLocationFromHeaderEnabled, &out.ContentKeyLocationFromHeaderEnabled + *out = new(bool) + **out = **in + } + if in.ContentKeyLocationFromKeyID != nil { + in, out := &in.ContentKeyLocationFromKeyID, &out.ContentKeyLocationFromKeyID + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.PlayRight != nil { + in, out := &in.PlayRight, &out.PlayRight + *out = new(PlayRightObservation) + (*in).DeepCopyInto(*out) + } + if in.RelativeBeginDate != nil { + in, out := &in.RelativeBeginDate, &out.RelativeBeginDate + *out = new(string) + **out = **in + } + if in.RelativeExpirationDate != nil { + in, out := &in.RelativeExpirationDate, &out.RelativeExpirationDate + *out = new(string) + **out = **in + } + if in.SecurityLevel != nil { + in, out := &in.SecurityLevel, &out.SecurityLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlayreadyConfigurationLicenseObservation. +func (in *PlayreadyConfigurationLicenseObservation) DeepCopy() *PlayreadyConfigurationLicenseObservation { + if in == nil { + return nil + } + out := new(PlayreadyConfigurationLicenseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlayreadyConfigurationLicenseParameters) DeepCopyInto(out *PlayreadyConfigurationLicenseParameters) { + *out = *in + if in.AllowTestDevices != nil { + in, out := &in.AllowTestDevices, &out.AllowTestDevices + *out = new(bool) + **out = **in + } + if in.BeginDate != nil { + in, out := &in.BeginDate, &out.BeginDate + *out = new(string) + **out = **in + } + if in.ContentKeyLocationFromHeaderEnabled != nil { + in, out := &in.ContentKeyLocationFromHeaderEnabled, &out.ContentKeyLocationFromHeaderEnabled + *out = new(bool) + **out = **in + } + if in.ContentKeyLocationFromKeyID != nil { + in, out := &in.ContentKeyLocationFromKeyID, &out.ContentKeyLocationFromKeyID + *out = new(string) + **out = **in + } + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = new(string) + **out = **in + } + if in.GracePeriodSecretRef != nil { + in, out := &in.GracePeriodSecretRef, &out.GracePeriodSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.PlayRight != nil { + in, out := &in.PlayRight, &out.PlayRight + *out = new(PlayRightParameters) + (*in).DeepCopyInto(*out) + } + if in.RelativeBeginDate != nil { + in, out := &in.RelativeBeginDate, &out.RelativeBeginDate + *out = new(string) + **out = **in + } + if in.RelativeExpirationDate != nil { + in, out := &in.RelativeExpirationDate, &out.RelativeExpirationDate + *out = new(string) + **out = **in + } + if in.SecurityLevel != nil { + in, out := &in.SecurityLevel, &out.SecurityLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlayreadyConfigurationLicenseParameters. +func (in *PlayreadyConfigurationLicenseParameters) DeepCopy() *PlayreadyConfigurationLicenseParameters { + if in == nil { + return nil + } + out := new(PlayreadyConfigurationLicenseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngImageInitParameters) DeepCopyInto(out *PngImageInitParameters) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]PngImageLayerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Step != nil { + in, out := &in.Step, &out.Step + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngImageInitParameters. +func (in *PngImageInitParameters) DeepCopy() *PngImageInitParameters { + if in == nil { + return nil + } + out := new(PngImageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngImageLayerInitParameters) DeepCopyInto(out *PngImageLayerInitParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngImageLayerInitParameters. +func (in *PngImageLayerInitParameters) DeepCopy() *PngImageLayerInitParameters { + if in == nil { + return nil + } + out := new(PngImageLayerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngImageLayerObservation) DeepCopyInto(out *PngImageLayerObservation) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngImageLayerObservation. +func (in *PngImageLayerObservation) DeepCopy() *PngImageLayerObservation { + if in == nil { + return nil + } + out := new(PngImageLayerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngImageLayerParameters) DeepCopyInto(out *PngImageLayerParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngImageLayerParameters. +func (in *PngImageLayerParameters) DeepCopy() *PngImageLayerParameters { + if in == nil { + return nil + } + out := new(PngImageLayerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngImageObservation) DeepCopyInto(out *PngImageObservation) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]PngImageLayerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Step != nil { + in, out := &in.Step, &out.Step + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngImageObservation. +func (in *PngImageObservation) DeepCopy() *PngImageObservation { + if in == nil { + return nil + } + out := new(PngImageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngImageParameters) DeepCopyInto(out *PngImageParameters) { + *out = *in + if in.KeyFrameInterval != nil { + in, out := &in.KeyFrameInterval, &out.KeyFrameInterval + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.Layer != nil { + in, out := &in.Layer, &out.Layer + *out = make([]PngImageLayerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Range != nil { + in, out := &in.Range, &out.Range + *out = new(string) + **out = **in + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.Step != nil { + in, out := &in.Step, &out.Step + *out = new(string) + **out = **in + } + if in.StretchMode != nil { + in, out := &in.StretchMode, &out.StretchMode + *out = new(string) + **out = **in + } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngImageParameters. +func (in *PngImageParameters) DeepCopy() *PngImageParameters { + if in == nil { + return nil + } + out := new(PngImageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngInitParameters) DeepCopyInto(out *PngInitParameters) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngInitParameters. +func (in *PngInitParameters) DeepCopy() *PngInitParameters { + if in == nil { + return nil + } + out := new(PngInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngObservation) DeepCopyInto(out *PngObservation) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngObservation. +func (in *PngObservation) DeepCopy() *PngObservation { + if in == nil { + return nil + } + out := new(PngObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PngParameters) DeepCopyInto(out *PngParameters) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PngParameters. +func (in *PngParameters) DeepCopy() *PngParameters { + if in == nil { + return nil + } + out := new(PngParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyOptionInitParameters) DeepCopyInto(out *PolicyOptionInitParameters) { + *out = *in + if in.ClearKeyConfigurationEnabled != nil { + in, out := &in.ClearKeyConfigurationEnabled, &out.ClearKeyConfigurationEnabled + *out = new(bool) + **out = **in + } + if in.FairplayConfiguration != nil { + in, out := &in.FairplayConfiguration, &out.FairplayConfiguration + *out = new(FairplayConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpenRestrictionEnabled != nil { + in, out := &in.OpenRestrictionEnabled, &out.OpenRestrictionEnabled + *out = new(bool) + **out = **in + } + if in.PlayreadyConfigurationLicense != nil { + in, out := &in.PlayreadyConfigurationLicense, &out.PlayreadyConfigurationLicense + *out = make([]PlayreadyConfigurationLicenseInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlayreadyResponseCustomData != nil { + in, out := &in.PlayreadyResponseCustomData, &out.PlayreadyResponseCustomData + *out = new(string) + **out = **in + } + if in.TokenRestriction != nil { + in, out := &in.TokenRestriction, &out.TokenRestriction + *out = new(TokenRestrictionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WidevineConfigurationTemplate != nil { + in, out := &in.WidevineConfigurationTemplate, &out.WidevineConfigurationTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyOptionInitParameters. +func (in *PolicyOptionInitParameters) DeepCopy() *PolicyOptionInitParameters { + if in == nil { + return nil + } + out := new(PolicyOptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyOptionObservation) DeepCopyInto(out *PolicyOptionObservation) { + *out = *in + if in.ClearKeyConfigurationEnabled != nil { + in, out := &in.ClearKeyConfigurationEnabled, &out.ClearKeyConfigurationEnabled + *out = new(bool) + **out = **in + } + if in.FairplayConfiguration != nil { + in, out := &in.FairplayConfiguration, &out.FairplayConfiguration + *out = new(FairplayConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpenRestrictionEnabled != nil { + in, out := &in.OpenRestrictionEnabled, &out.OpenRestrictionEnabled + *out = new(bool) + **out = **in + } + if in.PlayreadyConfigurationLicense != nil { + in, out := &in.PlayreadyConfigurationLicense, &out.PlayreadyConfigurationLicense + *out = make([]PlayreadyConfigurationLicenseObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlayreadyResponseCustomData != nil { + in, out := &in.PlayreadyResponseCustomData, &out.PlayreadyResponseCustomData + *out = new(string) + **out = **in + } + if in.TokenRestriction != nil { + in, out := &in.TokenRestriction, &out.TokenRestriction + *out = new(TokenRestrictionObservation) + (*in).DeepCopyInto(*out) + } + if in.WidevineConfigurationTemplate != nil { + in, out := &in.WidevineConfigurationTemplate, &out.WidevineConfigurationTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyOptionObservation. +func (in *PolicyOptionObservation) DeepCopy() *PolicyOptionObservation { + if in == nil { + return nil + } + out := new(PolicyOptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyOptionParameters) DeepCopyInto(out *PolicyOptionParameters) { + *out = *in + if in.ClearKeyConfigurationEnabled != nil { + in, out := &in.ClearKeyConfigurationEnabled, &out.ClearKeyConfigurationEnabled + *out = new(bool) + **out = **in + } + if in.FairplayConfiguration != nil { + in, out := &in.FairplayConfiguration, &out.FairplayConfiguration + *out = new(FairplayConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OpenRestrictionEnabled != nil { + in, out := &in.OpenRestrictionEnabled, &out.OpenRestrictionEnabled + *out = new(bool) + **out = **in + } + if in.PlayreadyConfigurationLicense != nil { + in, out := &in.PlayreadyConfigurationLicense, &out.PlayreadyConfigurationLicense + *out = make([]PlayreadyConfigurationLicenseParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PlayreadyResponseCustomData != nil { + in, out := &in.PlayreadyResponseCustomData, &out.PlayreadyResponseCustomData + *out = new(string) + **out = **in + } + if in.TokenRestriction != nil { + in, out := &in.TokenRestriction, &out.TokenRestriction + *out = new(TokenRestrictionParameters) + (*in).DeepCopyInto(*out) + } + if in.WidevineConfigurationTemplate != nil { + in, out := &in.WidevineConfigurationTemplate, &out.WidevineConfigurationTemplate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyOptionParameters. +func (in *PolicyOptionParameters) DeepCopy() *PolicyOptionParameters { + if in == nil { + return nil + } + out := new(PolicyOptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PositionInitParameters) DeepCopyInto(out *PositionInitParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PositionInitParameters. +func (in *PositionInitParameters) DeepCopy() *PositionInitParameters { + if in == nil { + return nil + } + out := new(PositionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PositionObservation) DeepCopyInto(out *PositionObservation) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PositionObservation. +func (in *PositionObservation) DeepCopy() *PositionObservation { + if in == nil { + return nil + } + out := new(PositionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PositionParameters) DeepCopyInto(out *PositionParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PositionParameters. +func (in *PositionParameters) DeepCopy() *PositionParameters { + if in == nil { + return nil + } + out := new(PositionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresentationTimeRangeInitParameters) DeepCopyInto(out *PresentationTimeRangeInitParameters) { + *out = *in + if in.EndInUnits != nil { + in, out := &in.EndInUnits, &out.EndInUnits + *out = new(float64) + **out = **in + } + if in.ForceEnd != nil { + in, out := &in.ForceEnd, &out.ForceEnd + *out = new(bool) + **out = **in + } + if in.LiveBackoffInUnits != nil { + in, out := &in.LiveBackoffInUnits, &out.LiveBackoffInUnits + *out = new(float64) + **out = **in + } + if in.PresentationWindowInUnits != nil { + in, out := &in.PresentationWindowInUnits, &out.PresentationWindowInUnits + *out = new(float64) + **out = **in + } + if in.StartInUnits != nil { + in, out := &in.StartInUnits, &out.StartInUnits + *out = new(float64) + **out = **in + } + if in.UnitTimescaleInMiliseconds != nil { + in, out := &in.UnitTimescaleInMiliseconds, &out.UnitTimescaleInMiliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresentationTimeRangeInitParameters. +func (in *PresentationTimeRangeInitParameters) DeepCopy() *PresentationTimeRangeInitParameters { + if in == nil { + return nil + } + out := new(PresentationTimeRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresentationTimeRangeObservation) DeepCopyInto(out *PresentationTimeRangeObservation) { + *out = *in + if in.EndInUnits != nil { + in, out := &in.EndInUnits, &out.EndInUnits + *out = new(float64) + **out = **in + } + if in.ForceEnd != nil { + in, out := &in.ForceEnd, &out.ForceEnd + *out = new(bool) + **out = **in + } + if in.LiveBackoffInUnits != nil { + in, out := &in.LiveBackoffInUnits, &out.LiveBackoffInUnits + *out = new(float64) + **out = **in + } + if in.PresentationWindowInUnits != nil { + in, out := &in.PresentationWindowInUnits, &out.PresentationWindowInUnits + *out = new(float64) + **out = **in + } + if in.StartInUnits != nil { + in, out := &in.StartInUnits, &out.StartInUnits + *out = new(float64) + **out = **in + } + if in.UnitTimescaleInMiliseconds != nil { + in, out := &in.UnitTimescaleInMiliseconds, &out.UnitTimescaleInMiliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresentationTimeRangeObservation. +func (in *PresentationTimeRangeObservation) DeepCopy() *PresentationTimeRangeObservation { + if in == nil { + return nil + } + out := new(PresentationTimeRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresentationTimeRangeParameters) DeepCopyInto(out *PresentationTimeRangeParameters) { + *out = *in + if in.EndInUnits != nil { + in, out := &in.EndInUnits, &out.EndInUnits + *out = new(float64) + **out = **in + } + if in.ForceEnd != nil { + in, out := &in.ForceEnd, &out.ForceEnd + *out = new(bool) + **out = **in + } + if in.LiveBackoffInUnits != nil { + in, out := &in.LiveBackoffInUnits, &out.LiveBackoffInUnits + *out = new(float64) + **out = **in + } + if in.PresentationWindowInUnits != nil { + in, out := &in.PresentationWindowInUnits, &out.PresentationWindowInUnits + *out = new(float64) + **out = **in + } + if in.StartInUnits != nil { + in, out := &in.StartInUnits, &out.StartInUnits + *out = new(float64) + **out = **in + } + if in.UnitTimescaleInMiliseconds != nil { + in, out := &in.UnitTimescaleInMiliseconds, &out.UnitTimescaleInMiliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresentationTimeRangeParameters. +func (in *PresentationTimeRangeParameters) DeepCopy() *PresentationTimeRangeParameters { + if in == nil { + return nil + } + out := new(PresentationTimeRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetConfigurationInitParameters) DeepCopyInto(out *PresetConfigurationInitParameters) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.InterleaveOutput != nil { + in, out := &in.InterleaveOutput, &out.InterleaveOutput + *out = new(string) + **out = **in + } + if in.KeyFrameIntervalInSeconds != nil { + in, out := &in.KeyFrameIntervalInSeconds, &out.KeyFrameIntervalInSeconds + *out = new(float64) + **out = **in + } + if in.MaxBitrateBps != nil { + in, out := &in.MaxBitrateBps, &out.MaxBitrateBps + *out = new(float64) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(float64) + **out = **in + } + if in.MaxLayers != nil { + in, out := &in.MaxLayers, &out.MaxLayers + *out = new(float64) + **out = **in + } + if in.MinBitrateBps != nil { + in, out := &in.MinBitrateBps, &out.MinBitrateBps + *out = new(float64) + **out = **in + } + if in.MinHeight != nil { + in, out := &in.MinHeight, &out.MinHeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetConfigurationInitParameters. +func (in *PresetConfigurationInitParameters) DeepCopy() *PresetConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PresetConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetConfigurationObservation) DeepCopyInto(out *PresetConfigurationObservation) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.InterleaveOutput != nil { + in, out := &in.InterleaveOutput, &out.InterleaveOutput + *out = new(string) + **out = **in + } + if in.KeyFrameIntervalInSeconds != nil { + in, out := &in.KeyFrameIntervalInSeconds, &out.KeyFrameIntervalInSeconds + *out = new(float64) + **out = **in + } + if in.MaxBitrateBps != nil { + in, out := &in.MaxBitrateBps, &out.MaxBitrateBps + *out = new(float64) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(float64) + **out = **in + } + if in.MaxLayers != nil { + in, out := &in.MaxLayers, &out.MaxLayers + *out = new(float64) + **out = **in + } + if in.MinBitrateBps != nil { + in, out := &in.MinBitrateBps, &out.MinBitrateBps + *out = new(float64) + **out = **in + } + if in.MinHeight != nil { + in, out := &in.MinHeight, &out.MinHeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetConfigurationObservation. +func (in *PresetConfigurationObservation) DeepCopy() *PresetConfigurationObservation { + if in == nil { + return nil + } + out := new(PresetConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PresetConfigurationParameters) DeepCopyInto(out *PresetConfigurationParameters) { + *out = *in + if in.Complexity != nil { + in, out := &in.Complexity, &out.Complexity + *out = new(string) + **out = **in + } + if in.InterleaveOutput != nil { + in, out := &in.InterleaveOutput, &out.InterleaveOutput + *out = new(string) + **out = **in + } + if in.KeyFrameIntervalInSeconds != nil { + in, out := &in.KeyFrameIntervalInSeconds, &out.KeyFrameIntervalInSeconds + *out = new(float64) + **out = **in + } + if in.MaxBitrateBps != nil { + in, out := &in.MaxBitrateBps, &out.MaxBitrateBps + *out = new(float64) + **out = **in + } + if in.MaxHeight != nil { + in, out := &in.MaxHeight, &out.MaxHeight + *out = new(float64) + **out = **in + } + if in.MaxLayers != nil { + in, out := &in.MaxLayers, &out.MaxLayers + *out = new(float64) + **out = **in + } + if in.MinBitrateBps != nil { + in, out := &in.MinBitrateBps, &out.MinBitrateBps + *out = new(float64) + **out = **in + } + if in.MinHeight != nil { + in, out := &in.MinHeight, &out.MinHeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PresetConfigurationParameters. +func (in *PresetConfigurationParameters) DeepCopy() *PresetConfigurationParameters { + if in == nil { + return nil + } + out := new(PresetConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewEndpointInitParameters) DeepCopyInto(out *PreviewEndpointInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewEndpointInitParameters. +func (in *PreviewEndpointInitParameters) DeepCopy() *PreviewEndpointInitParameters { + if in == nil { + return nil + } + out := new(PreviewEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewEndpointObservation) DeepCopyInto(out *PreviewEndpointObservation) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewEndpointObservation. +func (in *PreviewEndpointObservation) DeepCopy() *PreviewEndpointObservation { + if in == nil { + return nil + } + out := new(PreviewEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewEndpointParameters) DeepCopyInto(out *PreviewEndpointParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewEndpointParameters. +func (in *PreviewEndpointParameters) DeepCopy() *PreviewEndpointParameters { + if in == nil { + return nil + } + out := new(PreviewEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewIPAccessControlAllowInitParameters) DeepCopyInto(out *PreviewIPAccessControlAllowInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewIPAccessControlAllowInitParameters. +func (in *PreviewIPAccessControlAllowInitParameters) DeepCopy() *PreviewIPAccessControlAllowInitParameters { + if in == nil { + return nil + } + out := new(PreviewIPAccessControlAllowInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewIPAccessControlAllowObservation) DeepCopyInto(out *PreviewIPAccessControlAllowObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewIPAccessControlAllowObservation. +func (in *PreviewIPAccessControlAllowObservation) DeepCopy() *PreviewIPAccessControlAllowObservation { + if in == nil { + return nil + } + out := new(PreviewIPAccessControlAllowObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewIPAccessControlAllowParameters) DeepCopyInto(out *PreviewIPAccessControlAllowParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetPrefixLength != nil { + in, out := &in.SubnetPrefixLength, &out.SubnetPrefixLength + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewIPAccessControlAllowParameters. +func (in *PreviewIPAccessControlAllowParameters) DeepCopy() *PreviewIPAccessControlAllowParameters { + if in == nil { + return nil + } + out := new(PreviewIPAccessControlAllowParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewInitParameters) DeepCopyInto(out *PreviewInitParameters) { + *out = *in + if in.AlternativeMediaID != nil { + in, out := &in.AlternativeMediaID, &out.AlternativeMediaID + *out = new(string) + **out = **in + } + if in.IPAccessControlAllow != nil { + in, out := &in.IPAccessControlAllow, &out.IPAccessControlAllow + *out = make([]PreviewIPAccessControlAllowInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreviewLocator != nil { + in, out := &in.PreviewLocator, &out.PreviewLocator + *out = new(string) + **out = **in + } + if in.StreamingPolicyName != nil { + in, out := &in.StreamingPolicyName, &out.StreamingPolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewInitParameters. +func (in *PreviewInitParameters) DeepCopy() *PreviewInitParameters { + if in == nil { + return nil + } + out := new(PreviewInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewObservation) DeepCopyInto(out *PreviewObservation) { + *out = *in + if in.AlternativeMediaID != nil { + in, out := &in.AlternativeMediaID, &out.AlternativeMediaID + *out = new(string) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]PreviewEndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAccessControlAllow != nil { + in, out := &in.IPAccessControlAllow, &out.IPAccessControlAllow + *out = make([]PreviewIPAccessControlAllowObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreviewLocator != nil { + in, out := &in.PreviewLocator, &out.PreviewLocator + *out = new(string) + **out = **in + } + if in.StreamingPolicyName != nil { + in, out := &in.StreamingPolicyName, &out.StreamingPolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewObservation. +func (in *PreviewObservation) DeepCopy() *PreviewObservation { + if in == nil { + return nil + } + out := new(PreviewObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreviewParameters) DeepCopyInto(out *PreviewParameters) { + *out = *in + if in.AlternativeMediaID != nil { + in, out := &in.AlternativeMediaID, &out.AlternativeMediaID + *out = new(string) + **out = **in + } + if in.IPAccessControlAllow != nil { + in, out := &in.IPAccessControlAllow, &out.IPAccessControlAllow + *out = make([]PreviewIPAccessControlAllowParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreviewLocator != nil { + in, out := &in.PreviewLocator, &out.PreviewLocator + *out = new(string) + **out = **in + } + if in.StreamingPolicyName != nil { + in, out := &in.StreamingPolicyName, &out.StreamingPolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreviewParameters. +func (in *PreviewParameters) DeepCopy() *PreviewParameters { + if in == nil { + return nil + } + out := new(PreviewParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredClaimInitParameters) DeepCopyInto(out *RequiredClaimInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredClaimInitParameters. +func (in *RequiredClaimInitParameters) DeepCopy() *RequiredClaimInitParameters { + if in == nil { + return nil + } + out := new(RequiredClaimInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredClaimObservation) DeepCopyInto(out *RequiredClaimObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredClaimObservation. +func (in *RequiredClaimObservation) DeepCopy() *RequiredClaimObservation { + if in == nil { + return nil + } + out := new(RequiredClaimObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredClaimParameters) DeepCopyInto(out *RequiredClaimParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredClaimParameters. +func (in *RequiredClaimParameters) DeepCopy() *RequiredClaimParameters { + if in == nil { + return nil + } + out := new(RequiredClaimParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccount) DeepCopyInto(out *ServicesAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccount. +func (in *ServicesAccount) DeepCopy() *ServicesAccount { + if in == nil { + return nil + } + out := new(ServicesAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServicesAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilter) DeepCopyInto(out *ServicesAccountFilter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilter. +func (in *ServicesAccountFilter) DeepCopy() *ServicesAccountFilter { + if in == nil { + return nil + } + out := new(ServicesAccountFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServicesAccountFilter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterInitParameters) DeepCopyInto(out *ServicesAccountFilterInitParameters) { + *out = *in + if in.FirstQualityBitrate != nil { + in, out := &in.FirstQualityBitrate, &out.FirstQualityBitrate + *out = new(float64) + **out = **in + } + if in.PresentationTimeRange != nil { + in, out := &in.PresentationTimeRange, &out.PresentationTimeRange + *out = new(ServicesAccountFilterPresentationTimeRangeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TrackSelection != nil { + in, out := &in.TrackSelection, &out.TrackSelection + *out = make([]ServicesAccountFilterTrackSelectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterInitParameters. +func (in *ServicesAccountFilterInitParameters) DeepCopy() *ServicesAccountFilterInitParameters { + if in == nil { + return nil + } + out := new(ServicesAccountFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterList) DeepCopyInto(out *ServicesAccountFilterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServicesAccountFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterList. +func (in *ServicesAccountFilterList) DeepCopy() *ServicesAccountFilterList { + if in == nil { + return nil + } + out := new(ServicesAccountFilterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServicesAccountFilterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterObservation) DeepCopyInto(out *ServicesAccountFilterObservation) { + *out = *in + if in.FirstQualityBitrate != nil { + in, out := &in.FirstQualityBitrate, &out.FirstQualityBitrate + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.PresentationTimeRange != nil { + in, out := &in.PresentationTimeRange, &out.PresentationTimeRange + *out = new(ServicesAccountFilterPresentationTimeRangeObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.TrackSelection != nil { + in, out := &in.TrackSelection, &out.TrackSelection + *out = make([]ServicesAccountFilterTrackSelectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterObservation. +func (in *ServicesAccountFilterObservation) DeepCopy() *ServicesAccountFilterObservation { + if in == nil { + return nil + } + out := new(ServicesAccountFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterParameters) DeepCopyInto(out *ServicesAccountFilterParameters) { + *out = *in + if in.FirstQualityBitrate != nil { + in, out := &in.FirstQualityBitrate, &out.FirstQualityBitrate + *out = new(float64) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.MediaServicesAccountNameRef != nil { + in, out := &in.MediaServicesAccountNameRef, &out.MediaServicesAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountNameSelector != nil { + in, out := &in.MediaServicesAccountNameSelector, &out.MediaServicesAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PresentationTimeRange != nil { + in, out := &in.PresentationTimeRange, &out.PresentationTimeRange + *out = new(ServicesAccountFilterPresentationTimeRangeParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TrackSelection != nil { + in, out := &in.TrackSelection, &out.TrackSelection + *out = make([]ServicesAccountFilterTrackSelectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterParameters. +func (in *ServicesAccountFilterParameters) DeepCopy() *ServicesAccountFilterParameters { + if in == nil { + return nil + } + out := new(ServicesAccountFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterPresentationTimeRangeInitParameters) DeepCopyInto(out *ServicesAccountFilterPresentationTimeRangeInitParameters) { + *out = *in + if in.EndInUnits != nil { + in, out := &in.EndInUnits, &out.EndInUnits + *out = new(float64) + **out = **in + } + if in.ForceEnd != nil { + in, out := &in.ForceEnd, &out.ForceEnd + *out = new(bool) + **out = **in + } + if in.LiveBackoffInUnits != nil { + in, out := &in.LiveBackoffInUnits, &out.LiveBackoffInUnits + *out = new(float64) + **out = **in + } + if in.PresentationWindowInUnits != nil { + in, out := &in.PresentationWindowInUnits, &out.PresentationWindowInUnits + *out = new(float64) + **out = **in + } + if in.StartInUnits != nil { + in, out := &in.StartInUnits, &out.StartInUnits + *out = new(float64) + **out = **in + } + if in.UnitTimescaleInMilliseconds != nil { + in, out := &in.UnitTimescaleInMilliseconds, &out.UnitTimescaleInMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterPresentationTimeRangeInitParameters. +func (in *ServicesAccountFilterPresentationTimeRangeInitParameters) DeepCopy() *ServicesAccountFilterPresentationTimeRangeInitParameters { + if in == nil { + return nil + } + out := new(ServicesAccountFilterPresentationTimeRangeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterPresentationTimeRangeObservation) DeepCopyInto(out *ServicesAccountFilterPresentationTimeRangeObservation) { + *out = *in + if in.EndInUnits != nil { + in, out := &in.EndInUnits, &out.EndInUnits + *out = new(float64) + **out = **in + } + if in.ForceEnd != nil { + in, out := &in.ForceEnd, &out.ForceEnd + *out = new(bool) + **out = **in + } + if in.LiveBackoffInUnits != nil { + in, out := &in.LiveBackoffInUnits, &out.LiveBackoffInUnits + *out = new(float64) + **out = **in + } + if in.PresentationWindowInUnits != nil { + in, out := &in.PresentationWindowInUnits, &out.PresentationWindowInUnits + *out = new(float64) + **out = **in + } + if in.StartInUnits != nil { + in, out := &in.StartInUnits, &out.StartInUnits + *out = new(float64) + **out = **in + } + if in.UnitTimescaleInMilliseconds != nil { + in, out := &in.UnitTimescaleInMilliseconds, &out.UnitTimescaleInMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterPresentationTimeRangeObservation. +func (in *ServicesAccountFilterPresentationTimeRangeObservation) DeepCopy() *ServicesAccountFilterPresentationTimeRangeObservation { + if in == nil { + return nil + } + out := new(ServicesAccountFilterPresentationTimeRangeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterPresentationTimeRangeParameters) DeepCopyInto(out *ServicesAccountFilterPresentationTimeRangeParameters) { + *out = *in + if in.EndInUnits != nil { + in, out := &in.EndInUnits, &out.EndInUnits + *out = new(float64) + **out = **in + } + if in.ForceEnd != nil { + in, out := &in.ForceEnd, &out.ForceEnd + *out = new(bool) + **out = **in + } + if in.LiveBackoffInUnits != nil { + in, out := &in.LiveBackoffInUnits, &out.LiveBackoffInUnits + *out = new(float64) + **out = **in + } + if in.PresentationWindowInUnits != nil { + in, out := &in.PresentationWindowInUnits, &out.PresentationWindowInUnits + *out = new(float64) + **out = **in + } + if in.StartInUnits != nil { + in, out := &in.StartInUnits, &out.StartInUnits + *out = new(float64) + **out = **in + } + if in.UnitTimescaleInMilliseconds != nil { + in, out := &in.UnitTimescaleInMilliseconds, &out.UnitTimescaleInMilliseconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterPresentationTimeRangeParameters. +func (in *ServicesAccountFilterPresentationTimeRangeParameters) DeepCopy() *ServicesAccountFilterPresentationTimeRangeParameters { + if in == nil { + return nil + } + out := new(ServicesAccountFilterPresentationTimeRangeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterSpec) DeepCopyInto(out *ServicesAccountFilterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterSpec. +func (in *ServicesAccountFilterSpec) DeepCopy() *ServicesAccountFilterSpec { + if in == nil { + return nil + } + out := new(ServicesAccountFilterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterStatus) DeepCopyInto(out *ServicesAccountFilterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterStatus. +func (in *ServicesAccountFilterStatus) DeepCopy() *ServicesAccountFilterStatus { + if in == nil { + return nil + } + out := new(ServicesAccountFilterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterTrackSelectionInitParameters) DeepCopyInto(out *ServicesAccountFilterTrackSelectionInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]TrackSelectionConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterTrackSelectionInitParameters. +func (in *ServicesAccountFilterTrackSelectionInitParameters) DeepCopy() *ServicesAccountFilterTrackSelectionInitParameters { + if in == nil { + return nil + } + out := new(ServicesAccountFilterTrackSelectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterTrackSelectionObservation) DeepCopyInto(out *ServicesAccountFilterTrackSelectionObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]TrackSelectionConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterTrackSelectionObservation. +func (in *ServicesAccountFilterTrackSelectionObservation) DeepCopy() *ServicesAccountFilterTrackSelectionObservation { + if in == nil { + return nil + } + out := new(ServicesAccountFilterTrackSelectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountFilterTrackSelectionParameters) DeepCopyInto(out *ServicesAccountFilterTrackSelectionParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]TrackSelectionConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountFilterTrackSelectionParameters. +func (in *ServicesAccountFilterTrackSelectionParameters) DeepCopy() *ServicesAccountFilterTrackSelectionParameters { + if in == nil { + return nil + } + out := new(ServicesAccountFilterTrackSelectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountInitParameters) DeepCopyInto(out *ServicesAccountInitParameters) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyDeliveryAccessControl != nil { + in, out := &in.KeyDeliveryAccessControl, &out.KeyDeliveryAccessControl + *out = new(KeyDeliveryAccessControlInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAuthenticationType != nil { + in, out := &in.StorageAuthenticationType, &out.StorageAuthenticationType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountInitParameters. +func (in *ServicesAccountInitParameters) DeepCopy() *ServicesAccountInitParameters { + if in == nil { + return nil + } + out := new(ServicesAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountList) DeepCopyInto(out *ServicesAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServicesAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountList. +func (in *ServicesAccountList) DeepCopy() *ServicesAccountList { + if in == nil { + return nil + } + out := new(ServicesAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServicesAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountObservation) DeepCopyInto(out *ServicesAccountObservation) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyDeliveryAccessControl != nil { + in, out := &in.KeyDeliveryAccessControl, &out.KeyDeliveryAccessControl + *out = new(KeyDeliveryAccessControlObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAuthenticationType != nil { + in, out := &in.StorageAuthenticationType, &out.StorageAuthenticationType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountObservation. +func (in *ServicesAccountObservation) DeepCopy() *ServicesAccountObservation { + if in == nil { + return nil + } + out := new(ServicesAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountParameters) DeepCopyInto(out *ServicesAccountParameters) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyDeliveryAccessControl != nil { + in, out := &in.KeyDeliveryAccessControl, &out.KeyDeliveryAccessControl + *out = new(KeyDeliveryAccessControlParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAuthenticationType != nil { + in, out := &in.StorageAuthenticationType, &out.StorageAuthenticationType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountParameters. +func (in *ServicesAccountParameters) DeepCopy() *ServicesAccountParameters { + if in == nil { + return nil + } + out := new(ServicesAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountSpec) DeepCopyInto(out *ServicesAccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountSpec. +func (in *ServicesAccountSpec) DeepCopy() *ServicesAccountSpec { + if in == nil { + return nil + } + out := new(ServicesAccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesAccountStatus) DeepCopyInto(out *ServicesAccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesAccountStatus. +func (in *ServicesAccountStatus) DeepCopy() *ServicesAccountStatus { + if in == nil { + return nil + } + out := new(ServicesAccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountInitParameters) DeepCopyInto(out *StorageAccountInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IsPrimary != nil { + in, out := &in.IsPrimary, &out.IsPrimary + *out = new(bool) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(StorageAccountManagedIdentityInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountInitParameters. +func (in *StorageAccountInitParameters) DeepCopy() *StorageAccountInitParameters { + if in == nil { + return nil + } + out := new(StorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountManagedIdentityInitParameters) DeepCopyInto(out *StorageAccountManagedIdentityInitParameters) { + *out = *in + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountManagedIdentityInitParameters. +func (in *StorageAccountManagedIdentityInitParameters) DeepCopy() *StorageAccountManagedIdentityInitParameters { + if in == nil { + return nil + } + out := new(StorageAccountManagedIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountManagedIdentityObservation) DeepCopyInto(out *StorageAccountManagedIdentityObservation) { + *out = *in + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountManagedIdentityObservation. +func (in *StorageAccountManagedIdentityObservation) DeepCopy() *StorageAccountManagedIdentityObservation { + if in == nil { + return nil + } + out := new(StorageAccountManagedIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountManagedIdentityParameters) DeepCopyInto(out *StorageAccountManagedIdentityParameters) { + *out = *in + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountManagedIdentityParameters. +func (in *StorageAccountManagedIdentityParameters) DeepCopy() *StorageAccountManagedIdentityParameters { + if in == nil { + return nil + } + out := new(StorageAccountManagedIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountObservation) DeepCopyInto(out *StorageAccountObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IsPrimary != nil { + in, out := &in.IsPrimary, &out.IsPrimary + *out = new(bool) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(StorageAccountManagedIdentityObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountObservation. +func (in *StorageAccountObservation) DeepCopy() *StorageAccountObservation { + if in == nil { + return nil + } + out := new(StorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountParameters) DeepCopyInto(out *StorageAccountParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IsPrimary != nil { + in, out := &in.IsPrimary, &out.IsPrimary + *out = new(bool) + **out = **in + } + if in.ManagedIdentity != nil { + in, out := &in.ManagedIdentity, &out.ManagedIdentity + *out = new(StorageAccountManagedIdentityParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountParameters. +func (in *StorageAccountParameters) DeepCopy() *StorageAccountParameters { + if in == nil { + return nil + } + out := new(StorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpoint) DeepCopyInto(out *StreamingEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpoint. +func (in *StreamingEndpoint) DeepCopy() *StreamingEndpoint { + if in == nil { + return nil + } + out := new(StreamingEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamingEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointCrossSiteAccessPolicyInitParameters) DeepCopyInto(out *StreamingEndpointCrossSiteAccessPolicyInitParameters) { + *out = *in + if in.ClientAccessPolicy != nil { + in, out := &in.ClientAccessPolicy, &out.ClientAccessPolicy + *out = new(string) + **out = **in + } + if in.CrossDomainPolicy != nil { + in, out := &in.CrossDomainPolicy, &out.CrossDomainPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointCrossSiteAccessPolicyInitParameters. +func (in *StreamingEndpointCrossSiteAccessPolicyInitParameters) DeepCopy() *StreamingEndpointCrossSiteAccessPolicyInitParameters { + if in == nil { + return nil + } + out := new(StreamingEndpointCrossSiteAccessPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointCrossSiteAccessPolicyObservation) DeepCopyInto(out *StreamingEndpointCrossSiteAccessPolicyObservation) { + *out = *in + if in.ClientAccessPolicy != nil { + in, out := &in.ClientAccessPolicy, &out.ClientAccessPolicy + *out = new(string) + **out = **in + } + if in.CrossDomainPolicy != nil { + in, out := &in.CrossDomainPolicy, &out.CrossDomainPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointCrossSiteAccessPolicyObservation. +func (in *StreamingEndpointCrossSiteAccessPolicyObservation) DeepCopy() *StreamingEndpointCrossSiteAccessPolicyObservation { + if in == nil { + return nil + } + out := new(StreamingEndpointCrossSiteAccessPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointCrossSiteAccessPolicyParameters) DeepCopyInto(out *StreamingEndpointCrossSiteAccessPolicyParameters) { + *out = *in + if in.ClientAccessPolicy != nil { + in, out := &in.ClientAccessPolicy, &out.ClientAccessPolicy + *out = new(string) + **out = **in + } + if in.CrossDomainPolicy != nil { + in, out := &in.CrossDomainPolicy, &out.CrossDomainPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointCrossSiteAccessPolicyParameters. +func (in *StreamingEndpointCrossSiteAccessPolicyParameters) DeepCopy() *StreamingEndpointCrossSiteAccessPolicyParameters { + if in == nil { + return nil + } + out := new(StreamingEndpointCrossSiteAccessPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointInitParameters) DeepCopyInto(out *StreamingEndpointInitParameters) { + *out = *in + if in.AccessControl != nil { + in, out := &in.AccessControl, &out.AccessControl + *out = new(AccessControlInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoStartEnabled != nil { + in, out := &in.AutoStartEnabled, &out.AutoStartEnabled + *out = new(bool) + **out = **in + } + if in.CdnEnabled != nil { + in, out := &in.CdnEnabled, &out.CdnEnabled + *out = new(bool) + **out = **in + } + if in.CdnProfile != nil { + in, out := &in.CdnProfile, &out.CdnProfile + *out = new(string) + **out = **in + } + if in.CdnProvider != nil { + in, out := &in.CdnProvider, &out.CdnProvider + *out = new(string) + **out = **in + } + if in.CrossSiteAccessPolicy != nil { + in, out := &in.CrossSiteAccessPolicy, &out.CrossSiteAccessPolicy + *out = new(StreamingEndpointCrossSiteAccessPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomHostNames != nil { + in, out := &in.CustomHostNames, &out.CustomHostNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxCacheAgeSeconds != nil { + in, out := &in.MaxCacheAgeSeconds, &out.MaxCacheAgeSeconds + *out = new(float64) + **out = **in + } + if in.ScaleUnits != nil { + in, out := &in.ScaleUnits, &out.ScaleUnits + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointInitParameters. +func (in *StreamingEndpointInitParameters) DeepCopy() *StreamingEndpointInitParameters { + if in == nil { + return nil + } + out := new(StreamingEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointList) DeepCopyInto(out *StreamingEndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StreamingEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointList. +func (in *StreamingEndpointList) DeepCopy() *StreamingEndpointList { + if in == nil { + return nil + } + out := new(StreamingEndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamingEndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointObservation) DeepCopyInto(out *StreamingEndpointObservation) { + *out = *in + if in.AccessControl != nil { + in, out := &in.AccessControl, &out.AccessControl + *out = new(AccessControlObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoStartEnabled != nil { + in, out := &in.AutoStartEnabled, &out.AutoStartEnabled + *out = new(bool) + **out = **in + } + if in.CdnEnabled != nil { + in, out := &in.CdnEnabled, &out.CdnEnabled + *out = new(bool) + **out = **in + } + if in.CdnProfile != nil { + in, out := &in.CdnProfile, &out.CdnProfile + *out = new(string) + **out = **in + } + if in.CdnProvider != nil { + in, out := &in.CdnProvider, &out.CdnProvider + *out = new(string) + **out = **in + } + if in.CrossSiteAccessPolicy != nil { + in, out := &in.CrossSiteAccessPolicy, &out.CrossSiteAccessPolicy + *out = new(StreamingEndpointCrossSiteAccessPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomHostNames != nil { + in, out := &in.CustomHostNames, &out.CustomHostNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxCacheAgeSeconds != nil { + in, out := &in.MaxCacheAgeSeconds, &out.MaxCacheAgeSeconds + *out = new(float64) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ScaleUnits != nil { + in, out := &in.ScaleUnits, &out.ScaleUnits + *out = new(float64) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = make([]SkuObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointObservation. +func (in *StreamingEndpointObservation) DeepCopy() *StreamingEndpointObservation { + if in == nil { + return nil + } + out := new(StreamingEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointParameters) DeepCopyInto(out *StreamingEndpointParameters) { + *out = *in + if in.AccessControl != nil { + in, out := &in.AccessControl, &out.AccessControl + *out = new(AccessControlParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoStartEnabled != nil { + in, out := &in.AutoStartEnabled, &out.AutoStartEnabled + *out = new(bool) + **out = **in + } + if in.CdnEnabled != nil { + in, out := &in.CdnEnabled, &out.CdnEnabled + *out = new(bool) + **out = **in + } + if in.CdnProfile != nil { + in, out := &in.CdnProfile, &out.CdnProfile + *out = new(string) + **out = **in + } + if in.CdnProvider != nil { + in, out := &in.CdnProvider, &out.CdnProvider + *out = new(string) + **out = **in + } + if in.CrossSiteAccessPolicy != nil { + in, out := &in.CrossSiteAccessPolicy, &out.CrossSiteAccessPolicy + *out = new(StreamingEndpointCrossSiteAccessPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomHostNames != nil { + in, out := &in.CustomHostNames, &out.CustomHostNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaxCacheAgeSeconds != nil { + in, out := &in.MaxCacheAgeSeconds, &out.MaxCacheAgeSeconds + *out = new(float64) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.MediaServicesAccountNameRef != nil { + in, out := &in.MediaServicesAccountNameRef, &out.MediaServicesAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountNameSelector != nil { + in, out := &in.MediaServicesAccountNameSelector, &out.MediaServicesAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ScaleUnits != nil { + in, out := &in.ScaleUnits, &out.ScaleUnits + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointParameters. +func (in *StreamingEndpointParameters) DeepCopy() *StreamingEndpointParameters { + if in == nil { + return nil + } + out := new(StreamingEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointSpec) DeepCopyInto(out *StreamingEndpointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointSpec. +func (in *StreamingEndpointSpec) DeepCopy() *StreamingEndpointSpec { + if in == nil { + return nil + } + out := new(StreamingEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingEndpointStatus) DeepCopyInto(out *StreamingEndpointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingEndpointStatus. +func (in *StreamingEndpointStatus) DeepCopy() *StreamingEndpointStatus { + if in == nil { + return nil + } + out := new(StreamingEndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingPolicy) DeepCopyInto(out *StreamingPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingPolicy. +func (in *StreamingPolicy) DeepCopy() *StreamingPolicy { + if in == nil { + return nil + } + out := new(StreamingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamingPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingPolicyInitParameters) DeepCopyInto(out *StreamingPolicyInitParameters) { + *out = *in + if in.CommonEncryptionCbcs != nil { + in, out := &in.CommonEncryptionCbcs, &out.CommonEncryptionCbcs + *out = new(CommonEncryptionCbcsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CommonEncryptionCenc != nil { + in, out := &in.CommonEncryptionCenc, &out.CommonEncryptionCenc + *out = new(CommonEncryptionCencInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultContentKeyPolicyName != nil { + in, out := &in.DefaultContentKeyPolicyName, &out.DefaultContentKeyPolicyName + *out = new(string) + **out = **in + } + if in.EnvelopeEncryption != nil { + in, out := &in.EnvelopeEncryption, &out.EnvelopeEncryption + *out = new(EnvelopeEncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.NoEncryptionEnabledProtocols != nil { + in, out := &in.NoEncryptionEnabledProtocols, &out.NoEncryptionEnabledProtocols + *out = new(NoEncryptionEnabledProtocolsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingPolicyInitParameters. +func (in *StreamingPolicyInitParameters) DeepCopy() *StreamingPolicyInitParameters { + if in == nil { + return nil + } + out := new(StreamingPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingPolicyList) DeepCopyInto(out *StreamingPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StreamingPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingPolicyList. +func (in *StreamingPolicyList) DeepCopy() *StreamingPolicyList { + if in == nil { + return nil + } + out := new(StreamingPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamingPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingPolicyObservation) DeepCopyInto(out *StreamingPolicyObservation) { + *out = *in + if in.CommonEncryptionCbcs != nil { + in, out := &in.CommonEncryptionCbcs, &out.CommonEncryptionCbcs + *out = new(CommonEncryptionCbcsObservation) + (*in).DeepCopyInto(*out) + } + if in.CommonEncryptionCenc != nil { + in, out := &in.CommonEncryptionCenc, &out.CommonEncryptionCenc + *out = new(CommonEncryptionCencObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultContentKeyPolicyName != nil { + in, out := &in.DefaultContentKeyPolicyName, &out.DefaultContentKeyPolicyName + *out = new(string) + **out = **in + } + if in.EnvelopeEncryption != nil { + in, out := &in.EnvelopeEncryption, &out.EnvelopeEncryption + *out = new(EnvelopeEncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.NoEncryptionEnabledProtocols != nil { + in, out := &in.NoEncryptionEnabledProtocols, &out.NoEncryptionEnabledProtocols + *out = new(NoEncryptionEnabledProtocolsObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingPolicyObservation. +func (in *StreamingPolicyObservation) DeepCopy() *StreamingPolicyObservation { + if in == nil { + return nil + } + out := new(StreamingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingPolicyParameters) DeepCopyInto(out *StreamingPolicyParameters) { + *out = *in + if in.CommonEncryptionCbcs != nil { + in, out := &in.CommonEncryptionCbcs, &out.CommonEncryptionCbcs + *out = new(CommonEncryptionCbcsParameters) + (*in).DeepCopyInto(*out) + } + if in.CommonEncryptionCenc != nil { + in, out := &in.CommonEncryptionCenc, &out.CommonEncryptionCenc + *out = new(CommonEncryptionCencParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultContentKeyPolicyName != nil { + in, out := &in.DefaultContentKeyPolicyName, &out.DefaultContentKeyPolicyName + *out = new(string) + **out = **in + } + if in.EnvelopeEncryption != nil { + in, out := &in.EnvelopeEncryption, &out.EnvelopeEncryption + *out = new(EnvelopeEncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.MediaServicesAccountNameRef != nil { + in, out := &in.MediaServicesAccountNameRef, &out.MediaServicesAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountNameSelector != nil { + in, out := &in.MediaServicesAccountNameSelector, &out.MediaServicesAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NoEncryptionEnabledProtocols != nil { + in, out := &in.NoEncryptionEnabledProtocols, &out.NoEncryptionEnabledProtocols + *out = new(NoEncryptionEnabledProtocolsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingPolicyParameters. +func (in *StreamingPolicyParameters) DeepCopy() *StreamingPolicyParameters { + if in == nil { + return nil + } + out := new(StreamingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingPolicySpec) DeepCopyInto(out *StreamingPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingPolicySpec. +func (in *StreamingPolicySpec) DeepCopy() *StreamingPolicySpec { + if in == nil { + return nil + } + out := new(StreamingPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamingPolicyStatus) DeepCopyInto(out *StreamingPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamingPolicyStatus. +func (in *StreamingPolicyStatus) DeepCopy() *StreamingPolicyStatus { + if in == nil { + return nil + } + out := new(StreamingPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRestrictionInitParameters) DeepCopyInto(out *TokenRestrictionInitParameters) { + *out = *in + if in.AlternateKey != nil { + in, out := &in.AlternateKey, &out.AlternateKey + *out = make([]AlternateKeyInitParameters, len(*in)) + copy(*out, *in) + } + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OpenIDConnectDiscoveryDocument != nil { + in, out := &in.OpenIDConnectDiscoveryDocument, &out.OpenIDConnectDiscoveryDocument + *out = new(string) + **out = **in + } + if in.RequiredClaim != nil { + in, out := &in.RequiredClaim, &out.RequiredClaim + *out = make([]RequiredClaimInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TokenType != nil { + in, out := &in.TokenType, &out.TokenType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRestrictionInitParameters. +func (in *TokenRestrictionInitParameters) DeepCopy() *TokenRestrictionInitParameters { + if in == nil { + return nil + } + out := new(TokenRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRestrictionObservation) DeepCopyInto(out *TokenRestrictionObservation) { + *out = *in + if in.AlternateKey != nil { + in, out := &in.AlternateKey, &out.AlternateKey + *out = make([]AlternateKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OpenIDConnectDiscoveryDocument != nil { + in, out := &in.OpenIDConnectDiscoveryDocument, &out.OpenIDConnectDiscoveryDocument + *out = new(string) + **out = **in + } + if in.RequiredClaim != nil { + in, out := &in.RequiredClaim, &out.RequiredClaim + *out = make([]RequiredClaimObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TokenType != nil { + in, out := &in.TokenType, &out.TokenType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRestrictionObservation. +func (in *TokenRestrictionObservation) DeepCopy() *TokenRestrictionObservation { + if in == nil { + return nil + } + out := new(TokenRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRestrictionParameters) DeepCopyInto(out *TokenRestrictionParameters) { + *out = *in + if in.AlternateKey != nil { + in, out := &in.AlternateKey, &out.AlternateKey + *out = make([]AlternateKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.OpenIDConnectDiscoveryDocument != nil { + in, out := &in.OpenIDConnectDiscoveryDocument, &out.OpenIDConnectDiscoveryDocument + *out = new(string) + **out = **in + } + if in.PrimaryRsaTokenKeyExponentSecretRef != nil { + in, out := &in.PrimaryRsaTokenKeyExponentSecretRef, &out.PrimaryRsaTokenKeyExponentSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PrimaryRsaTokenKeyModulusSecretRef != nil { + in, out := &in.PrimaryRsaTokenKeyModulusSecretRef, &out.PrimaryRsaTokenKeyModulusSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PrimarySymmetricTokenKeySecretRef != nil { + in, out := &in.PrimarySymmetricTokenKeySecretRef, &out.PrimarySymmetricTokenKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.PrimaryX509TokenKeyRawSecretRef != nil { + in, out := &in.PrimaryX509TokenKeyRawSecretRef, &out.PrimaryX509TokenKeyRawSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RequiredClaim != nil { + in, out := &in.RequiredClaim, &out.RequiredClaim + *out = make([]RequiredClaimParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TokenType != nil { + in, out := &in.TokenType, &out.TokenType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRestrictionParameters. +func (in *TokenRestrictionParameters) DeepCopy() *TokenRestrictionParameters { + if in == nil { + return nil + } + out := new(TokenRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackConditionInitParameters) DeepCopyInto(out *TrackConditionInitParameters) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackConditionInitParameters. +func (in *TrackConditionInitParameters) DeepCopy() *TrackConditionInitParameters { + if in == nil { + return nil + } + out := new(TrackConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackConditionObservation) DeepCopyInto(out *TrackConditionObservation) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackConditionObservation. +func (in *TrackConditionObservation) DeepCopy() *TrackConditionObservation { + if in == nil { + return nil + } + out := new(TrackConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackConditionParameters) DeepCopyInto(out *TrackConditionParameters) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackConditionParameters. +func (in *TrackConditionParameters) DeepCopy() *TrackConditionParameters { + if in == nil { + return nil + } + out := new(TrackConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackInitParameters) DeepCopyInto(out *TrackInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]TrackConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackInitParameters. +func (in *TrackInitParameters) DeepCopy() *TrackInitParameters { + if in == nil { + return nil + } + out := new(TrackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackObservation) DeepCopyInto(out *TrackObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]TrackConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackObservation. +func (in *TrackObservation) DeepCopy() *TrackObservation { + if in == nil { + return nil + } + out := new(TrackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackParameters) DeepCopyInto(out *TrackParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]TrackConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackParameters. +func (in *TrackParameters) DeepCopy() *TrackParameters { + if in == nil { + return nil + } + out := new(TrackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackSelectionConditionInitParameters) DeepCopyInto(out *TrackSelectionConditionInitParameters) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackSelectionConditionInitParameters. +func (in *TrackSelectionConditionInitParameters) DeepCopy() *TrackSelectionConditionInitParameters { + if in == nil { + return nil + } + out := new(TrackSelectionConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackSelectionConditionObservation) DeepCopyInto(out *TrackSelectionConditionObservation) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackSelectionConditionObservation. +func (in *TrackSelectionConditionObservation) DeepCopy() *TrackSelectionConditionObservation { + if in == nil { + return nil + } + out := new(TrackSelectionConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackSelectionConditionParameters) DeepCopyInto(out *TrackSelectionConditionParameters) { + *out = *in + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Property != nil { + in, out := &in.Property, &out.Property + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackSelectionConditionParameters. +func (in *TrackSelectionConditionParameters) DeepCopy() *TrackSelectionConditionParameters { + if in == nil { + return nil + } + out := new(TrackSelectionConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackSelectionInitParameters) DeepCopyInto(out *TrackSelectionInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackSelectionInitParameters. +func (in *TrackSelectionInitParameters) DeepCopy() *TrackSelectionInitParameters { + if in == nil { + return nil + } + out := new(TrackSelectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackSelectionObservation) DeepCopyInto(out *TrackSelectionObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackSelectionObservation. +func (in *TrackSelectionObservation) DeepCopy() *TrackSelectionObservation { + if in == nil { + return nil + } + out := new(TrackSelectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrackSelectionParameters) DeepCopyInto(out *TrackSelectionParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrackSelectionParameters. +func (in *TrackSelectionParameters) DeepCopy() *TrackSelectionParameters { + if in == nil { + return nil + } + out := new(TrackSelectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Transform) DeepCopyInto(out *Transform) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transform. +func (in *Transform) DeepCopy() *Transform { + if in == nil { + return nil + } + out := new(Transform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Transform) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformInitParameters) DeepCopyInto(out *TransformInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = make([]OutputInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformInitParameters. +func (in *TransformInitParameters) DeepCopy() *TransformInitParameters { + if in == nil { + return nil + } + out := new(TransformInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformList) DeepCopyInto(out *TransformList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Transform, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformList. +func (in *TransformList) DeepCopy() *TransformList { + if in == nil { + return nil + } + out := new(TransformList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TransformList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformObservation) DeepCopyInto(out *TransformObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = make([]OutputObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformObservation. +func (in *TransformObservation) DeepCopy() *TransformObservation { + if in == nil { + return nil + } + out := new(TransformObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformParameters) DeepCopyInto(out *TransformParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.MediaServicesAccountName != nil { + in, out := &in.MediaServicesAccountName, &out.MediaServicesAccountName + *out = new(string) + **out = **in + } + if in.MediaServicesAccountNameRef != nil { + in, out := &in.MediaServicesAccountNameRef, &out.MediaServicesAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.MediaServicesAccountNameSelector != nil { + in, out := &in.MediaServicesAccountNameSelector, &out.MediaServicesAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = make([]OutputParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformParameters. +func (in *TransformParameters) DeepCopy() *TransformParameters { + if in == nil { + return nil + } + out := new(TransformParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformSpec) DeepCopyInto(out *TransformSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformSpec. +func (in *TransformSpec) DeepCopy() *TransformSpec { + if in == nil { + return nil + } + out := new(TransformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformStatus) DeepCopyInto(out *TransformStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformStatus. +func (in *TransformStatus) DeepCopy() *TransformStatus { + if in == nil { + return nil + } + out := new(TransformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportStreamInitParameters) DeepCopyInto(out *TransportStreamInitParameters) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } + if in.OutputFile != nil { + in, out := &in.OutputFile, &out.OutputFile + *out = make([]TransportStreamOutputFileInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportStreamInitParameters. +func (in *TransportStreamInitParameters) DeepCopy() *TransportStreamInitParameters { + if in == nil { + return nil + } + out := new(TransportStreamInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportStreamObservation) DeepCopyInto(out *TransportStreamObservation) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } + if in.OutputFile != nil { + in, out := &in.OutputFile, &out.OutputFile + *out = make([]TransportStreamOutputFileObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportStreamObservation. +func (in *TransportStreamObservation) DeepCopy() *TransportStreamObservation { + if in == nil { + return nil + } + out := new(TransportStreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportStreamOutputFileInitParameters) DeepCopyInto(out *TransportStreamOutputFileInitParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportStreamOutputFileInitParameters. +func (in *TransportStreamOutputFileInitParameters) DeepCopy() *TransportStreamOutputFileInitParameters { + if in == nil { + return nil + } + out := new(TransportStreamOutputFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportStreamOutputFileObservation) DeepCopyInto(out *TransportStreamOutputFileObservation) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportStreamOutputFileObservation. +func (in *TransportStreamOutputFileObservation) DeepCopy() *TransportStreamOutputFileObservation { + if in == nil { + return nil + } + out := new(TransportStreamOutputFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportStreamOutputFileParameters) DeepCopyInto(out *TransportStreamOutputFileParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportStreamOutputFileParameters. +func (in *TransportStreamOutputFileParameters) DeepCopy() *TransportStreamOutputFileParameters { + if in == nil { + return nil + } + out := new(TransportStreamOutputFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransportStreamParameters) DeepCopyInto(out *TransportStreamParameters) { + *out = *in + if in.FilenamePattern != nil { + in, out := &in.FilenamePattern, &out.FilenamePattern + *out = new(string) + **out = **in + } + if in.OutputFile != nil { + in, out := &in.OutputFile, &out.OutputFile + *out = make([]TransportStreamOutputFileParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransportStreamParameters. +func (in *TransportStreamParameters) DeepCopy() *TransportStreamParameters { + if in == nil { + return nil + } + out := new(TransportStreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoAnalyzerPresetInitParameters) DeepCopyInto(out *VideoAnalyzerPresetInitParameters) { + *out = *in + if in.AudioAnalysisMode != nil { + in, out := &in.AudioAnalysisMode, &out.AudioAnalysisMode + *out = new(string) + **out = **in + } + if in.AudioLanguage != nil { + in, out := &in.AudioLanguage, &out.AudioLanguage + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InsightsType != nil { + in, out := &in.InsightsType, &out.InsightsType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoAnalyzerPresetInitParameters. +func (in *VideoAnalyzerPresetInitParameters) DeepCopy() *VideoAnalyzerPresetInitParameters { + if in == nil { + return nil + } + out := new(VideoAnalyzerPresetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoAnalyzerPresetObservation) DeepCopyInto(out *VideoAnalyzerPresetObservation) { + *out = *in + if in.AudioAnalysisMode != nil { + in, out := &in.AudioAnalysisMode, &out.AudioAnalysisMode + *out = new(string) + **out = **in + } + if in.AudioLanguage != nil { + in, out := &in.AudioLanguage, &out.AudioLanguage + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InsightsType != nil { + in, out := &in.InsightsType, &out.InsightsType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoAnalyzerPresetObservation. +func (in *VideoAnalyzerPresetObservation) DeepCopy() *VideoAnalyzerPresetObservation { + if in == nil { + return nil + } + out := new(VideoAnalyzerPresetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoAnalyzerPresetParameters) DeepCopyInto(out *VideoAnalyzerPresetParameters) { + *out = *in + if in.AudioAnalysisMode != nil { + in, out := &in.AudioAnalysisMode, &out.AudioAnalysisMode + *out = new(string) + **out = **in + } + if in.AudioLanguage != nil { + in, out := &in.AudioLanguage, &out.AudioLanguage + *out = new(string) + **out = **in + } + if in.ExperimentalOptions != nil { + in, out := &in.ExperimentalOptions, &out.ExperimentalOptions + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.InsightsType != nil { + in, out := &in.InsightsType, &out.InsightsType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoAnalyzerPresetParameters. +func (in *VideoAnalyzerPresetParameters) DeepCopy() *VideoAnalyzerPresetParameters { + if in == nil { + return nil + } + out := new(VideoAnalyzerPresetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoCropRectangleInitParameters) DeepCopyInto(out *VideoCropRectangleInitParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoCropRectangleInitParameters. +func (in *VideoCropRectangleInitParameters) DeepCopy() *VideoCropRectangleInitParameters { + if in == nil { + return nil + } + out := new(VideoCropRectangleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoCropRectangleObservation) DeepCopyInto(out *VideoCropRectangleObservation) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoCropRectangleObservation. +func (in *VideoCropRectangleObservation) DeepCopy() *VideoCropRectangleObservation { + if in == nil { + return nil + } + out := new(VideoCropRectangleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoCropRectangleParameters) DeepCopyInto(out *VideoCropRectangleParameters) { + *out = *in + if in.Height != nil { + in, out := &in.Height, &out.Height + *out = new(string) + **out = **in + } + if in.Left != nil { + in, out := &in.Left, &out.Left + *out = new(string) + **out = **in + } + if in.Top != nil { + in, out := &in.Top, &out.Top + *out = new(string) + **out = **in + } + if in.Width != nil { + in, out := &in.Width, &out.Width + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoCropRectangleParameters. +func (in *VideoCropRectangleParameters) DeepCopy() *VideoCropRectangleParameters { + if in == nil { + return nil + } + out := new(VideoCropRectangleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoInitParameters) DeepCopyInto(out *VideoInitParameters) { + *out = *in + if in.AudioGainLevel != nil { + in, out := &in.AudioGainLevel, &out.AudioGainLevel + *out = new(float64) + **out = **in + } + if in.CropRectangle != nil { + in, out := &in.CropRectangle, &out.CropRectangle + *out = new(VideoCropRectangleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.FadeInDuration != nil { + in, out := &in.FadeInDuration, &out.FadeInDuration + *out = new(string) + **out = **in + } + if in.FadeOutDuration != nil { + in, out := &in.FadeOutDuration, &out.FadeOutDuration + *out = new(string) + **out = **in + } + if in.InputLabel != nil { + in, out := &in.InputLabel, &out.InputLabel + *out = new(string) + **out = **in + } + if in.Opacity != nil { + in, out := &in.Opacity, &out.Opacity + *out = new(float64) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(PositionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoInitParameters. +func (in *VideoInitParameters) DeepCopy() *VideoInitParameters { + if in == nil { + return nil + } + out := new(VideoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoObservation) DeepCopyInto(out *VideoObservation) { + *out = *in + if in.AudioGainLevel != nil { + in, out := &in.AudioGainLevel, &out.AudioGainLevel + *out = new(float64) + **out = **in + } + if in.CropRectangle != nil { + in, out := &in.CropRectangle, &out.CropRectangle + *out = new(VideoCropRectangleObservation) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.FadeInDuration != nil { + in, out := &in.FadeInDuration, &out.FadeInDuration + *out = new(string) + **out = **in + } + if in.FadeOutDuration != nil { + in, out := &in.FadeOutDuration, &out.FadeOutDuration + *out = new(string) + **out = **in + } + if in.InputLabel != nil { + in, out := &in.InputLabel, &out.InputLabel + *out = new(string) + **out = **in + } + if in.Opacity != nil { + in, out := &in.Opacity, &out.Opacity + *out = new(float64) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(PositionObservation) + (*in).DeepCopyInto(*out) + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoObservation. +func (in *VideoObservation) DeepCopy() *VideoObservation { + if in == nil { + return nil + } + out := new(VideoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VideoParameters) DeepCopyInto(out *VideoParameters) { + *out = *in + if in.AudioGainLevel != nil { + in, out := &in.AudioGainLevel, &out.AudioGainLevel + *out = new(float64) + **out = **in + } + if in.CropRectangle != nil { + in, out := &in.CropRectangle, &out.CropRectangle + *out = new(VideoCropRectangleParameters) + (*in).DeepCopyInto(*out) + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.FadeInDuration != nil { + in, out := &in.FadeInDuration, &out.FadeInDuration + *out = new(string) + **out = **in + } + if in.FadeOutDuration != nil { + in, out := &in.FadeOutDuration, &out.FadeOutDuration + *out = new(string) + **out = **in + } + if in.InputLabel != nil { + in, out := &in.InputLabel, &out.InputLabel + *out = new(string) + **out = **in + } + if in.Opacity != nil { + in, out := &in.Opacity, &out.Opacity + *out = new(float64) + **out = **in + } + if in.Position != nil { + in, out := &in.Position, &out.Position + *out = new(PositionParameters) + (*in).DeepCopyInto(*out) + } + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoParameters. +func (in *VideoParameters) DeepCopy() *VideoParameters { + if in == nil { + return nil + } + out := new(VideoParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/media/v1beta2/zz_generated.managed.go b/apis/media/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..7b19e01be --- /dev/null +++ b/apis/media/v1beta2/zz_generated.managed.go @@ -0,0 +1,548 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AssetFilter. +func (mg *AssetFilter) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AssetFilter. +func (mg *AssetFilter) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AssetFilter. +func (mg *AssetFilter) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AssetFilter. +func (mg *AssetFilter) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AssetFilter. +func (mg *AssetFilter) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AssetFilter. +func (mg *AssetFilter) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AssetFilter. +func (mg *AssetFilter) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AssetFilter. +func (mg *AssetFilter) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AssetFilter. +func (mg *AssetFilter) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AssetFilter. +func (mg *AssetFilter) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AssetFilter. +func (mg *AssetFilter) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AssetFilter. +func (mg *AssetFilter) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Job. +func (mg *Job) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Job. +func (mg *Job) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Job. +func (mg *Job) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Job. +func (mg *Job) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Job. +func (mg *Job) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Job. +func (mg *Job) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Job. +func (mg *Job) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Job. +func (mg *Job) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Job. +func (mg *Job) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Job. +func (mg *Job) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Job. +func (mg *Job) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Job. +func (mg *Job) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LiveEvent. +func (mg *LiveEvent) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LiveEvent. +func (mg *LiveEvent) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LiveEvent. +func (mg *LiveEvent) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LiveEvent. +func (mg *LiveEvent) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LiveEvent. +func (mg *LiveEvent) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LiveEvent. +func (mg *LiveEvent) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LiveEvent. +func (mg *LiveEvent) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LiveEvent. +func (mg *LiveEvent) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LiveEvent. +func (mg *LiveEvent) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LiveEvent. +func (mg *LiveEvent) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LiveEvent. +func (mg *LiveEvent) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LiveEvent. +func (mg *LiveEvent) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ServicesAccount. +func (mg *ServicesAccount) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ServicesAccount. +func (mg *ServicesAccount) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ServicesAccount. +func (mg *ServicesAccount) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ServicesAccount. +func (mg *ServicesAccount) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ServicesAccount. +func (mg *ServicesAccount) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ServicesAccount. +func (mg *ServicesAccount) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ServicesAccount. +func (mg *ServicesAccount) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ServicesAccount. +func (mg *ServicesAccount) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ServicesAccount. +func (mg *ServicesAccount) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ServicesAccount. +func (mg *ServicesAccount) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ServicesAccount. +func (mg *ServicesAccount) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ServicesAccount. +func (mg *ServicesAccount) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StreamingEndpoint. +func (mg *StreamingEndpoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StreamingEndpoint. +func (mg *StreamingEndpoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StreamingEndpoint. +func (mg *StreamingEndpoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StreamingEndpoint. +func (mg *StreamingEndpoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StreamingEndpoint. +func (mg *StreamingEndpoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StreamingEndpoint. +func (mg *StreamingEndpoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StreamingEndpoint. +func (mg *StreamingEndpoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StreamingEndpoint. +func (mg *StreamingEndpoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StreamingEndpoint. +func (mg *StreamingEndpoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StreamingEndpoint. +func (mg *StreamingEndpoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StreamingEndpoint. +func (mg *StreamingEndpoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StreamingEndpoint. +func (mg *StreamingEndpoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StreamingPolicy. +func (mg *StreamingPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StreamingPolicy. +func (mg *StreamingPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StreamingPolicy. +func (mg *StreamingPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StreamingPolicy. +func (mg *StreamingPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StreamingPolicy. +func (mg *StreamingPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StreamingPolicy. +func (mg *StreamingPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StreamingPolicy. +func (mg *StreamingPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StreamingPolicy. +func (mg *StreamingPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StreamingPolicy. +func (mg *StreamingPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StreamingPolicy. +func (mg *StreamingPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StreamingPolicy. +func (mg *StreamingPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StreamingPolicy. +func (mg *StreamingPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Transform. +func (mg *Transform) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Transform. +func (mg *Transform) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Transform. +func (mg *Transform) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Transform. +func (mg *Transform) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Transform. +func (mg *Transform) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Transform. +func (mg *Transform) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Transform. +func (mg *Transform) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Transform. +func (mg *Transform) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Transform. +func (mg *Transform) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Transform. +func (mg *Transform) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Transform. +func (mg *Transform) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Transform. +func (mg *Transform) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/media/v1beta2/zz_generated.managedlist.go b/apis/media/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..1a0bb1e26 --- /dev/null +++ b/apis/media/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,89 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AssetFilterList. +func (l *AssetFilterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ContentKeyPolicyList. +func (l *ContentKeyPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this JobList. +func (l *JobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LiveEventList. +func (l *LiveEventList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ServicesAccountFilterList. +func (l *ServicesAccountFilterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ServicesAccountList. +func (l *ServicesAccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StreamingEndpointList. +func (l *StreamingEndpointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StreamingPolicyList. +func (l *StreamingPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TransformList. +func (l *TransformList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/media/v1beta2/zz_generated.resolvers.go b/apis/media/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..174bddd52 --- /dev/null +++ b/apis/media/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,626 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *AssetFilter) ResolveReferences( // ResolveReferences of this AssetFilter. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta1", "Asset", "AssetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AssetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.AssetIDRef, + Selector: mg.Spec.ForProvider.AssetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AssetID") + } + mg.Spec.ForProvider.AssetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AssetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ContentKeyPolicy. +func (mg *ContentKeyPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MediaServicesAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.MediaServicesAccountNameRef, + Selector: mg.Spec.ForProvider.MediaServicesAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MediaServicesAccountName") + } + mg.Spec.ForProvider.MediaServicesAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MediaServicesAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Job. +func (mg *Job) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.InputAsset != nil { + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta1", "Asset", "AssetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.InputAsset.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.InputAsset.NameRef, + Selector: mg.Spec.ForProvider.InputAsset.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.InputAsset.Name") + } + mg.Spec.ForProvider.InputAsset.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.InputAsset.NameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MediaServicesAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.MediaServicesAccountNameRef, + Selector: mg.Spec.ForProvider.MediaServicesAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MediaServicesAccountName") + } + mg.Spec.ForProvider.MediaServicesAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MediaServicesAccountNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.OutputAsset); i3++ { + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta1", "Asset", "AssetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.OutputAsset[i3].Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.OutputAsset[i3].NameRef, + Selector: mg.Spec.ForProvider.OutputAsset[i3].NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.OutputAsset[i3].Name") + } + mg.Spec.ForProvider.OutputAsset[i3].Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.OutputAsset[i3].NameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "Transform", "TransformList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TransformName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.TransformNameRef, + Selector: mg.Spec.ForProvider.TransformNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TransformName") + } + mg.Spec.ForProvider.TransformName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TransformNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.InputAsset != nil { + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta1", "Asset", "AssetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.InputAsset.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.InputAsset.NameRef, + Selector: mg.Spec.InitProvider.InputAsset.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.InputAsset.Name") + } + mg.Spec.InitProvider.InputAsset.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.InputAsset.NameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.OutputAsset); i3++ { + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta1", "Asset", "AssetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.OutputAsset[i3].Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.OutputAsset[i3].NameRef, + Selector: mg.Spec.InitProvider.OutputAsset[i3].NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.OutputAsset[i3].Name") + } + mg.Spec.InitProvider.OutputAsset[i3].Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.OutputAsset[i3].NameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this LiveEvent. +func (mg *LiveEvent) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MediaServicesAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.MediaServicesAccountNameRef, + Selector: mg.Spec.ForProvider.MediaServicesAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MediaServicesAccountName") + } + mg.Spec.ForProvider.MediaServicesAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MediaServicesAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ServicesAccount. +func (mg *ServicesAccount) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.ForProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccount[i3].ID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccount[i3].IDRef, + Selector: mg.Spec.ForProvider.StorageAccount[i3].IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccount[i3].ID") + } + mg.Spec.ForProvider.StorageAccount[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccount[i3].IDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.StorageAccount); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccount[i3].ID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccount[i3].IDRef, + Selector: mg.Spec.InitProvider.StorageAccount[i3].IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccount[i3].ID") + } + mg.Spec.InitProvider.StorageAccount[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccount[i3].IDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this ServicesAccountFilter. +func (mg *ServicesAccountFilter) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MediaServicesAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.MediaServicesAccountNameRef, + Selector: mg.Spec.ForProvider.MediaServicesAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MediaServicesAccountName") + } + mg.Spec.ForProvider.MediaServicesAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MediaServicesAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this StreamingEndpoint. +func (mg *StreamingEndpoint) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MediaServicesAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.MediaServicesAccountNameRef, + Selector: mg.Spec.ForProvider.MediaServicesAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MediaServicesAccountName") + } + mg.Spec.ForProvider.MediaServicesAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MediaServicesAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this StreamingPolicy. +func (mg *StreamingPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.CommonEncryptionCenc != nil { + if mg.Spec.ForProvider.CommonEncryptionCenc.DefaultContentKey != nil { + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ContentKeyPolicy", "ContentKeyPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CommonEncryptionCenc.DefaultContentKey.PolicyName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.CommonEncryptionCenc.DefaultContentKey.PolicyNameRef, + Selector: mg.Spec.ForProvider.CommonEncryptionCenc.DefaultContentKey.PolicyNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CommonEncryptionCenc.DefaultContentKey.PolicyName") + } + mg.Spec.ForProvider.CommonEncryptionCenc.DefaultContentKey.PolicyName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CommonEncryptionCenc.DefaultContentKey.PolicyNameRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MediaServicesAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.MediaServicesAccountNameRef, + Selector: mg.Spec.ForProvider.MediaServicesAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MediaServicesAccountName") + } + mg.Spec.ForProvider.MediaServicesAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MediaServicesAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.CommonEncryptionCenc != nil { + if mg.Spec.InitProvider.CommonEncryptionCenc.DefaultContentKey != nil { + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ContentKeyPolicy", "ContentKeyPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CommonEncryptionCenc.DefaultContentKey.PolicyName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.CommonEncryptionCenc.DefaultContentKey.PolicyNameRef, + Selector: mg.Spec.InitProvider.CommonEncryptionCenc.DefaultContentKey.PolicyNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CommonEncryptionCenc.DefaultContentKey.PolicyName") + } + mg.Spec.InitProvider.CommonEncryptionCenc.DefaultContentKey.PolicyName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CommonEncryptionCenc.DefaultContentKey.PolicyNameRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this Transform. +func (mg *Transform) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("media.azure.upbound.io", "v1beta2", "ServicesAccount", "ServicesAccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.MediaServicesAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.MediaServicesAccountNameRef, + Selector: mg.Spec.ForProvider.MediaServicesAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.MediaServicesAccountName") + } + mg.Spec.ForProvider.MediaServicesAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.MediaServicesAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/media/v1beta2/zz_groupversion_info.go b/apis/media/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..7256f7ce3 --- /dev/null +++ b/apis/media/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=media.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "media.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/media/v1beta2/zz_job_terraformed.go b/apis/media/v1beta2/zz_job_terraformed.go new file mode 100755 index 000000000..d0b11f69e --- /dev/null +++ b/apis/media/v1beta2/zz_job_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Job +func (mg *Job) GetTerraformResourceType() string { + return "azurerm_media_job" +} + +// GetConnectionDetailsMapping for this Job +func (tr *Job) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Job +func (tr *Job) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Job +func (tr *Job) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Job +func (tr *Job) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Job +func (tr *Job) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Job +func (tr *Job) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Job +func (tr *Job) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Job +func (tr *Job) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Job using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Job) LateInitialize(attrs []byte) (bool, error) { + params := &JobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Job) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/media/v1beta2/zz_job_types.go b/apis/media/v1beta2/zz_job_types.go new file mode 100755 index 000000000..cccd9eb71 --- /dev/null +++ b/apis/media/v1beta2/zz_job_types.go @@ -0,0 +1,270 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type InputAssetInitParameters struct { + + // A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. Changing this forces a new resource to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The name of the input Asset. Changing this forces a new Media Job to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta1.Asset + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Asset in media to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Asset in media to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type InputAssetObservation struct { + + // A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. Changing this forces a new resource to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The name of the input Asset. Changing this forces a new Media Job to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type InputAssetParameters struct { + + // A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The name of the input Asset. Changing this forces a new Media Job to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta1.Asset + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Asset in media to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Asset in media to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type JobInitParameters struct { + + // Optional customer supplied description of the Job. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A input_asset block as defined below. Changing this forces a new Media Job to be created. + InputAsset *InputAssetInitParameters `json:"inputAsset,omitempty" tf:"input_asset,omitempty"` + + // One or more output_asset blocks as defined below. Changing this forces a new Media Job to be created. + OutputAsset []OutputAssetInitParameters `json:"outputAsset,omitempty" tf:"output_asset,omitempty"` + + // Priority with which the job should be processed. Higher priority jobs are processed before lower priority jobs. Changing this forces a new Media Job to be created. Possible values are High, Normal and Low. Defaults to Normal. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type JobObservation struct { + + // Optional customer supplied description of the Job. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Media Job. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A input_asset block as defined below. Changing this forces a new Media Job to be created. + InputAsset *InputAssetObservation `json:"inputAsset,omitempty" tf:"input_asset,omitempty"` + + // The Media Services account name. Changing this forces a new Transform to be created. + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // One or more output_asset blocks as defined below. Changing this forces a new Media Job to be created. + OutputAsset []OutputAssetObservation `json:"outputAsset,omitempty" tf:"output_asset,omitempty"` + + // Priority with which the job should be processed. Higher priority jobs are processed before lower priority jobs. Changing this forces a new Media Job to be created. Possible values are High, Normal and Low. Defaults to Normal. + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // The name of the Resource Group where the Media Job should exist. Changing this forces a new Media Job to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The Transform name. Changing this forces a new Media Job to be created. + TransformName *string `json:"transformName,omitempty" tf:"transform_name,omitempty"` +} + +type JobParameters struct { + + // Optional customer supplied description of the Job. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A input_asset block as defined below. Changing this forces a new Media Job to be created. + // +kubebuilder:validation:Optional + InputAsset *InputAssetParameters `json:"inputAsset,omitempty" tf:"input_asset,omitempty"` + + // The Media Services account name. Changing this forces a new Transform to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount + // +kubebuilder:validation:Optional + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // Reference to a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameRef *v1.Reference `json:"mediaServicesAccountNameRef,omitempty" tf:"-"` + + // Selector for a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameSelector *v1.Selector `json:"mediaServicesAccountNameSelector,omitempty" tf:"-"` + + // One or more output_asset blocks as defined below. Changing this forces a new Media Job to be created. + // +kubebuilder:validation:Optional + OutputAsset []OutputAssetParameters `json:"outputAsset,omitempty" tf:"output_asset,omitempty"` + + // Priority with which the job should be processed. Higher priority jobs are processed before lower priority jobs. Changing this forces a new Media Job to be created. Possible values are High, Normal and Low. Defaults to Normal. + // +kubebuilder:validation:Optional + Priority *string `json:"priority,omitempty" tf:"priority,omitempty"` + + // The name of the Resource Group where the Media Job should exist. Changing this forces a new Media Job to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The Transform name. Changing this forces a new Media Job to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.Transform + // +kubebuilder:validation:Optional + TransformName *string `json:"transformName,omitempty" tf:"transform_name,omitempty"` + + // Reference to a Transform in media to populate transformName. + // +kubebuilder:validation:Optional + TransformNameRef *v1.Reference `json:"transformNameRef,omitempty" tf:"-"` + + // Selector for a Transform in media to populate transformName. + // +kubebuilder:validation:Optional + TransformNameSelector *v1.Selector `json:"transformNameSelector,omitempty" tf:"-"` +} + +type OutputAssetInitParameters struct { + + // A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform. Changing this forces a new resource to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The name of the output Asset. Changing this forces a new Media Job to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta1.Asset + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Asset in media to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Asset in media to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type OutputAssetObservation struct { + + // A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform. Changing this forces a new resource to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The name of the output Asset. Changing this forces a new Media Job to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type OutputAssetParameters struct { + + // A label that is assigned to a JobOutput in order to help uniquely identify it. This is useful when your Transform has more than one TransformOutput, whereby your Job has more than one JobOutput. In such cases, when you submit the Job, you will add two or more JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you retrieve the Job, either through events or on a GET request, you can use the label to easily identify the JobOutput. If a label is not provided, a default value of '{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in the corresponding TransformOutput and the output index is the relative index of the this JobOutput within the Job. Note that this index is the same as the relative index of the corresponding TransformOutput within its Transform. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The name of the output Asset. Changing this forces a new Media Job to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta1.Asset + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Asset in media to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Asset in media to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +// JobSpec defines the desired state of Job +type JobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider JobParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider JobInitParameters `json:"initProvider,omitempty"` +} + +// JobStatus defines the observed state of Job. +type JobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider JobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Job is the Schema for the Jobs API. Manages a Media Job. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Job struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.inputAsset) || (has(self.initProvider) && has(self.initProvider.inputAsset))",message="spec.forProvider.inputAsset is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.outputAsset) || (has(self.initProvider) && has(self.initProvider.outputAsset))",message="spec.forProvider.outputAsset is a required parameter" + Spec JobSpec `json:"spec"` + Status JobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// JobList contains a list of Jobs +type JobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Job `json:"items"` +} + +// Repository type metadata. +var ( + Job_Kind = "Job" + Job_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Job_Kind}.String() + Job_KindAPIVersion = Job_Kind + "." + CRDGroupVersion.String() + Job_GroupVersionKind = CRDGroupVersion.WithKind(Job_Kind) +) + +func init() { + SchemeBuilder.Register(&Job{}, &JobList{}) +} diff --git a/apis/media/v1beta2/zz_liveevent_terraformed.go b/apis/media/v1beta2/zz_liveevent_terraformed.go new file mode 100755 index 000000000..d58abbe4e --- /dev/null +++ b/apis/media/v1beta2/zz_liveevent_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LiveEvent +func (mg *LiveEvent) GetTerraformResourceType() string { + return "azurerm_media_live_event" +} + +// GetConnectionDetailsMapping for this LiveEvent +func (tr *LiveEvent) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LiveEvent +func (tr *LiveEvent) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LiveEvent +func (tr *LiveEvent) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LiveEvent +func (tr *LiveEvent) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LiveEvent +func (tr *LiveEvent) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LiveEvent +func (tr *LiveEvent) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LiveEvent +func (tr *LiveEvent) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LiveEvent +func (tr *LiveEvent) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LiveEvent using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LiveEvent) LateInitialize(attrs []byte) (bool, error) { + params := &LiveEventParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LiveEvent) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/media/v1beta2/zz_liveevent_types.go b/apis/media/v1beta2/zz_liveevent_types.go new file mode 100755 index 000000000..1c84fdfb0 --- /dev/null +++ b/apis/media/v1beta2/zz_liveevent_types.go @@ -0,0 +1,525 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CrossSiteAccessPolicyInitParameters struct { + + // The content of clientaccesspolicy.xml used by Silverlight. + ClientAccessPolicy *string `json:"clientAccessPolicy,omitempty" tf:"client_access_policy,omitempty"` + + // The content of the Cross Domain Policy (crossdomain.xml). + CrossDomainPolicy *string `json:"crossDomainPolicy,omitempty" tf:"cross_domain_policy,omitempty"` +} + +type CrossSiteAccessPolicyObservation struct { + + // The content of clientaccesspolicy.xml used by Silverlight. + ClientAccessPolicy *string `json:"clientAccessPolicy,omitempty" tf:"client_access_policy,omitempty"` + + // The content of the Cross Domain Policy (crossdomain.xml). + CrossDomainPolicy *string `json:"crossDomainPolicy,omitempty" tf:"cross_domain_policy,omitempty"` +} + +type CrossSiteAccessPolicyParameters struct { + + // The content of clientaccesspolicy.xml used by Silverlight. + // +kubebuilder:validation:Optional + ClientAccessPolicy *string `json:"clientAccessPolicy,omitempty" tf:"client_access_policy,omitempty"` + + // The content of the Cross Domain Policy (crossdomain.xml). + // +kubebuilder:validation:Optional + CrossDomainPolicy *string `json:"crossDomainPolicy,omitempty" tf:"cross_domain_policy,omitempty"` +} + +type EncodingInitParameters struct { + + // Use an ISO 8601 time value between 0.5 to 20 seconds to specify the output fragment length for the video and audio tracks of an encoding live event. For example, use PT2S to indicate 2 seconds. For the video track it also defines the key frame interval, or the length of a GoP (group of pictures). The value cannot be set for pass-through live events. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // The optional encoding preset name, used when type is not None. If the type is set to Standard, then the default preset name is Default720p. Else if the type is set to Premium1080p, Changing this forces a new resource to be created. + PresetName *string `json:"presetName,omitempty" tf:"preset_name,omitempty"` + + // Specifies how the input video will be resized to fit the desired output resolution(s). Allowed values are None, AutoFit or AutoSize. Default is None. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Live event type. Possible values are None, Premium1080p, PassthroughBasic, PassthroughStandard and Standard. When set to None, the service simply passes through the incoming video and audio layer(s) to the output. When type is set to Standard or Premium1080p, a live encoder transcodes the incoming stream into multiple bitrates or layers. Defaults to None. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncodingObservation struct { + + // Use an ISO 8601 time value between 0.5 to 20 seconds to specify the output fragment length for the video and audio tracks of an encoding live event. For example, use PT2S to indicate 2 seconds. For the video track it also defines the key frame interval, or the length of a GoP (group of pictures). The value cannot be set for pass-through live events. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // The optional encoding preset name, used when type is not None. If the type is set to Standard, then the default preset name is Default720p. Else if the type is set to Premium1080p, Changing this forces a new resource to be created. + PresetName *string `json:"presetName,omitempty" tf:"preset_name,omitempty"` + + // Specifies how the input video will be resized to fit the desired output resolution(s). Allowed values are None, AutoFit or AutoSize. Default is None. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Live event type. Possible values are None, Premium1080p, PassthroughBasic, PassthroughStandard and Standard. When set to None, the service simply passes through the incoming video and audio layer(s) to the output. When type is set to Standard or Premium1080p, a live encoder transcodes the incoming stream into multiple bitrates or layers. Defaults to None. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncodingParameters struct { + + // Use an ISO 8601 time value between 0.5 to 20 seconds to specify the output fragment length for the video and audio tracks of an encoding live event. For example, use PT2S to indicate 2 seconds. For the video track it also defines the key frame interval, or the length of a GoP (group of pictures). The value cannot be set for pass-through live events. Defaults to PT2S. + // +kubebuilder:validation:Optional + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // The optional encoding preset name, used when type is not None. If the type is set to Standard, then the default preset name is Default720p. Else if the type is set to Premium1080p, Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PresetName *string `json:"presetName,omitempty" tf:"preset_name,omitempty"` + + // Specifies how the input video will be resized to fit the desired output resolution(s). Allowed values are None, AutoFit or AutoSize. Default is None. + // +kubebuilder:validation:Optional + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Live event type. Possible values are None, Premium1080p, PassthroughBasic, PassthroughStandard and Standard. When set to None, the service simply passes through the incoming video and audio layer(s) to the output. When type is set to Standard or Premium1080p, a live encoder transcodes the incoming stream into multiple bitrates or layers. Defaults to None. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EndpointInitParameters struct { +} + +type EndpointObservation struct { + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type EndpointParameters struct { +} + +type IPAccessControlAllowInitParameters struct { + + // The IP address or CIDR range. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The name which should be used for this Live Event. Changing this forces a new Live Event to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type IPAccessControlAllowObservation struct { + + // The IP address or CIDR range. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The name which should be used for this Live Event. Changing this forces a new Live Event to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type IPAccessControlAllowParameters struct { + + // The IP address or CIDR range. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The name which should be used for this Live Event. Changing this forces a new Live Event to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + // +kubebuilder:validation:Optional + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type InputInitParameters struct { + + // A UUID in string form to uniquely identify the stream. If omitted, the service will generate a unique value. Changing this forces a new value to be created. + AccessToken *string `json:"accessToken,omitempty" tf:"access_token,omitempty"` + + // One or more ip_access_control_allow blocks as defined below. + IPAccessControlAllow []IPAccessControlAllowInitParameters `json:"ipAccessControlAllow,omitempty" tf:"ip_access_control_allow,omitempty"` + + // ISO 8601 time duration of the key frame interval duration of the input. This value sets the EXT-X-TARGETDURATION property in the HLS output. For example, use PT2S to indicate 2 seconds. This field cannot be set when type is set to Encoding. + KeyFrameIntervalDuration *string `json:"keyFrameIntervalDuration,omitempty" tf:"key_frame_interval_duration,omitempty"` + + // The input protocol for the live event. Allowed values are FragmentedMP4 and RTMP. Changing this forces a new resource to be created. + StreamingProtocol *string `json:"streamingProtocol,omitempty" tf:"streaming_protocol,omitempty"` +} + +type InputObservation struct { + + // A UUID in string form to uniquely identify the stream. If omitted, the service will generate a unique value. Changing this forces a new value to be created. + AccessToken *string `json:"accessToken,omitempty" tf:"access_token,omitempty"` + + Endpoint []EndpointObservation `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // One or more ip_access_control_allow blocks as defined below. + IPAccessControlAllow []IPAccessControlAllowObservation `json:"ipAccessControlAllow,omitempty" tf:"ip_access_control_allow,omitempty"` + + // ISO 8601 time duration of the key frame interval duration of the input. This value sets the EXT-X-TARGETDURATION property in the HLS output. For example, use PT2S to indicate 2 seconds. This field cannot be set when type is set to Encoding. + KeyFrameIntervalDuration *string `json:"keyFrameIntervalDuration,omitempty" tf:"key_frame_interval_duration,omitempty"` + + // The input protocol for the live event. Allowed values are FragmentedMP4 and RTMP. Changing this forces a new resource to be created. + StreamingProtocol *string `json:"streamingProtocol,omitempty" tf:"streaming_protocol,omitempty"` +} + +type InputParameters struct { + + // A UUID in string form to uniquely identify the stream. If omitted, the service will generate a unique value. Changing this forces a new value to be created. + // +kubebuilder:validation:Optional + AccessToken *string `json:"accessToken,omitempty" tf:"access_token,omitempty"` + + // One or more ip_access_control_allow blocks as defined below. + // +kubebuilder:validation:Optional + IPAccessControlAllow []IPAccessControlAllowParameters `json:"ipAccessControlAllow,omitempty" tf:"ip_access_control_allow,omitempty"` + + // ISO 8601 time duration of the key frame interval duration of the input. This value sets the EXT-X-TARGETDURATION property in the HLS output. For example, use PT2S to indicate 2 seconds. This field cannot be set when type is set to Encoding. + // +kubebuilder:validation:Optional + KeyFrameIntervalDuration *string `json:"keyFrameIntervalDuration,omitempty" tf:"key_frame_interval_duration,omitempty"` + + // The input protocol for the live event. Allowed values are FragmentedMP4 and RTMP. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StreamingProtocol *string `json:"streamingProtocol,omitempty" tf:"streaming_protocol,omitempty"` +} + +type LiveEventInitParameters struct { + + // The flag indicates if the resource should be automatically started on creation. Changing this forces a new resource to be created. + AutoStartEnabled *bool `json:"autoStartEnabled,omitempty" tf:"auto_start_enabled,omitempty"` + + // A cross_site_access_policy block as defined below. + CrossSiteAccessPolicy *CrossSiteAccessPolicyInitParameters `json:"crossSiteAccessPolicy,omitempty" tf:"cross_site_access_policy,omitempty"` + + // A description for the live event. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A encoding block as defined below. + Encoding *EncodingInitParameters `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // When use_static_hostname is set to true, the hostname_prefix specifies the first part of the hostname assigned to the live event preview and ingest endpoints. The final hostname would be a combination of this prefix, the media service account name and a short code for the Azure Media Services data center. + HostNamePrefix *string `json:"hostnamePrefix,omitempty" tf:"hostname_prefix,omitempty"` + + // A input block as defined below. + Input *InputInitParameters `json:"input,omitempty" tf:"input,omitempty"` + + // The Azure Region where the Live Event should exist. Changing this forces a new Live Event to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A preview block as defined below. + Preview *PreviewInitParameters `json:"preview,omitempty" tf:"preview,omitempty"` + + // A list of options to use for the LiveEvent. Possible values are Default, LowLatency, LowLatencyV2. Please see more at this document. Changing this forces a new resource to be created. + StreamOptions []*string `json:"streamOptions,omitempty" tf:"stream_options,omitempty"` + + // A mapping of tags which should be assigned to the Live Event. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a list of languages (locale) to be used for speech-to-text transcription – it should match the spoken language in the audio track. The value should be in BCP-47 format (e.g: en-US). See the Microsoft Documentation for more information about the live transcription feature and the list of supported languages. + TranscriptionLanguages []*string `json:"transcriptionLanguages,omitempty" tf:"transcription_languages,omitempty"` + + // Specifies whether a static hostname would be assigned to the live event preview and ingest endpoints. Changing this forces a new Live Event to be created. + UseStaticHostName *bool `json:"useStaticHostname,omitempty" tf:"use_static_hostname,omitempty"` +} + +type LiveEventObservation struct { + + // The flag indicates if the resource should be automatically started on creation. Changing this forces a new resource to be created. + AutoStartEnabled *bool `json:"autoStartEnabled,omitempty" tf:"auto_start_enabled,omitempty"` + + // A cross_site_access_policy block as defined below. + CrossSiteAccessPolicy *CrossSiteAccessPolicyObservation `json:"crossSiteAccessPolicy,omitempty" tf:"cross_site_access_policy,omitempty"` + + // A description for the live event. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A encoding block as defined below. + Encoding *EncodingObservation `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // When use_static_hostname is set to true, the hostname_prefix specifies the first part of the hostname assigned to the live event preview and ingest endpoints. The final hostname would be a combination of this prefix, the media service account name and a short code for the Azure Media Services data center. + HostNamePrefix *string `json:"hostnamePrefix,omitempty" tf:"hostname_prefix,omitempty"` + + // The ID of the Live Event. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A input block as defined below. + Input *InputObservation `json:"input,omitempty" tf:"input,omitempty"` + + // The Azure Region where the Live Event should exist. Changing this forces a new Live Event to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Media Services account name. Changing this forces a new Live Event to be created. + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // A preview block as defined below. + Preview *PreviewObservation `json:"preview,omitempty" tf:"preview,omitempty"` + + // The name of the Resource Group where the Live Event should exist. Changing this forces a new Live Event to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A list of options to use for the LiveEvent. Possible values are Default, LowLatency, LowLatencyV2. Please see more at this document. Changing this forces a new resource to be created. + StreamOptions []*string `json:"streamOptions,omitempty" tf:"stream_options,omitempty"` + + // A mapping of tags which should be assigned to the Live Event. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a list of languages (locale) to be used for speech-to-text transcription – it should match the spoken language in the audio track. The value should be in BCP-47 format (e.g: en-US). See the Microsoft Documentation for more information about the live transcription feature and the list of supported languages. + TranscriptionLanguages []*string `json:"transcriptionLanguages,omitempty" tf:"transcription_languages,omitempty"` + + // Specifies whether a static hostname would be assigned to the live event preview and ingest endpoints. Changing this forces a new Live Event to be created. + UseStaticHostName *bool `json:"useStaticHostname,omitempty" tf:"use_static_hostname,omitempty"` +} + +type LiveEventParameters struct { + + // The flag indicates if the resource should be automatically started on creation. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AutoStartEnabled *bool `json:"autoStartEnabled,omitempty" tf:"auto_start_enabled,omitempty"` + + // A cross_site_access_policy block as defined below. + // +kubebuilder:validation:Optional + CrossSiteAccessPolicy *CrossSiteAccessPolicyParameters `json:"crossSiteAccessPolicy,omitempty" tf:"cross_site_access_policy,omitempty"` + + // A description for the live event. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A encoding block as defined below. + // +kubebuilder:validation:Optional + Encoding *EncodingParameters `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // When use_static_hostname is set to true, the hostname_prefix specifies the first part of the hostname assigned to the live event preview and ingest endpoints. The final hostname would be a combination of this prefix, the media service account name and a short code for the Azure Media Services data center. + // +kubebuilder:validation:Optional + HostNamePrefix *string `json:"hostnamePrefix,omitempty" tf:"hostname_prefix,omitempty"` + + // A input block as defined below. + // +kubebuilder:validation:Optional + Input *InputParameters `json:"input,omitempty" tf:"input,omitempty"` + + // The Azure Region where the Live Event should exist. Changing this forces a new Live Event to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Media Services account name. Changing this forces a new Live Event to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount + // +kubebuilder:validation:Optional + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // Reference to a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameRef *v1.Reference `json:"mediaServicesAccountNameRef,omitempty" tf:"-"` + + // Selector for a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameSelector *v1.Selector `json:"mediaServicesAccountNameSelector,omitempty" tf:"-"` + + // A preview block as defined below. + // +kubebuilder:validation:Optional + Preview *PreviewParameters `json:"preview,omitempty" tf:"preview,omitempty"` + + // The name of the Resource Group where the Live Event should exist. Changing this forces a new Live Event to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A list of options to use for the LiveEvent. Possible values are Default, LowLatency, LowLatencyV2. Please see more at this document. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StreamOptions []*string `json:"streamOptions,omitempty" tf:"stream_options,omitempty"` + + // A mapping of tags which should be assigned to the Live Event. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies a list of languages (locale) to be used for speech-to-text transcription – it should match the spoken language in the audio track. The value should be in BCP-47 format (e.g: en-US). See the Microsoft Documentation for more information about the live transcription feature and the list of supported languages. + // +kubebuilder:validation:Optional + TranscriptionLanguages []*string `json:"transcriptionLanguages,omitempty" tf:"transcription_languages,omitempty"` + + // Specifies whether a static hostname would be assigned to the live event preview and ingest endpoints. Changing this forces a new Live Event to be created. + // +kubebuilder:validation:Optional + UseStaticHostName *bool `json:"useStaticHostname,omitempty" tf:"use_static_hostname,omitempty"` +} + +type PreviewEndpointInitParameters struct { +} + +type PreviewEndpointObservation struct { + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type PreviewEndpointParameters struct { +} + +type PreviewIPAccessControlAllowInitParameters struct { + + // The IP address or CIDR range. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The name which should be used for this Live Event. Changing this forces a new Live Event to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type PreviewIPAccessControlAllowObservation struct { + + // The IP address or CIDR range. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The name which should be used for this Live Event. Changing this forces a new Live Event to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type PreviewIPAccessControlAllowParameters struct { + + // The IP address or CIDR range. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The name which should be used for this Live Event. Changing this forces a new Live Event to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + // +kubebuilder:validation:Optional + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type PreviewInitParameters struct { + + // An alternative media identifier associated with the streaming locator created for the preview. The identifier can be used in the CustomLicenseAcquisitionUrlTemplate or the CustomKeyAcquisitionUrlTemplate of the Streaming Policy specified in the streaming_policy_name field. + AlternativeMediaID *string `json:"alternativeMediaId,omitempty" tf:"alternative_media_id,omitempty"` + + // One or more ip_access_control_allow blocks as defined above. + IPAccessControlAllow []PreviewIPAccessControlAllowInitParameters `json:"ipAccessControlAllow,omitempty" tf:"ip_access_control_allow,omitempty"` + + // The identifier of the preview locator in GUID format. Specifying this at creation time allows the caller to know the preview locator url before the event is created. If omitted, the service will generate a random identifier. Changing this forces a new resource to be created. + PreviewLocator *string `json:"previewLocator,omitempty" tf:"preview_locator,omitempty"` + + // The name of streaming policy used for the live event preview. Changing this forces a new resource to be created. + StreamingPolicyName *string `json:"streamingPolicyName,omitempty" tf:"streaming_policy_name,omitempty"` +} + +type PreviewObservation struct { + + // An alternative media identifier associated with the streaming locator created for the preview. The identifier can be used in the CustomLicenseAcquisitionUrlTemplate or the CustomKeyAcquisitionUrlTemplate of the Streaming Policy specified in the streaming_policy_name field. + AlternativeMediaID *string `json:"alternativeMediaId,omitempty" tf:"alternative_media_id,omitempty"` + + Endpoint []PreviewEndpointObservation `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // One or more ip_access_control_allow blocks as defined above. + IPAccessControlAllow []PreviewIPAccessControlAllowObservation `json:"ipAccessControlAllow,omitempty" tf:"ip_access_control_allow,omitempty"` + + // The identifier of the preview locator in GUID format. Specifying this at creation time allows the caller to know the preview locator url before the event is created. If omitted, the service will generate a random identifier. Changing this forces a new resource to be created. + PreviewLocator *string `json:"previewLocator,omitempty" tf:"preview_locator,omitempty"` + + // The name of streaming policy used for the live event preview. Changing this forces a new resource to be created. + StreamingPolicyName *string `json:"streamingPolicyName,omitempty" tf:"streaming_policy_name,omitempty"` +} + +type PreviewParameters struct { + + // An alternative media identifier associated with the streaming locator created for the preview. The identifier can be used in the CustomLicenseAcquisitionUrlTemplate or the CustomKeyAcquisitionUrlTemplate of the Streaming Policy specified in the streaming_policy_name field. + // +kubebuilder:validation:Optional + AlternativeMediaID *string `json:"alternativeMediaId,omitempty" tf:"alternative_media_id,omitempty"` + + // One or more ip_access_control_allow blocks as defined above. + // +kubebuilder:validation:Optional + IPAccessControlAllow []PreviewIPAccessControlAllowParameters `json:"ipAccessControlAllow,omitempty" tf:"ip_access_control_allow,omitempty"` + + // The identifier of the preview locator in GUID format. Specifying this at creation time allows the caller to know the preview locator url before the event is created. If omitted, the service will generate a random identifier. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PreviewLocator *string `json:"previewLocator,omitempty" tf:"preview_locator,omitempty"` + + // The name of streaming policy used for the live event preview. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StreamingPolicyName *string `json:"streamingPolicyName,omitempty" tf:"streaming_policy_name,omitempty"` +} + +// LiveEventSpec defines the desired state of LiveEvent +type LiveEventSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LiveEventParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LiveEventInitParameters `json:"initProvider,omitempty"` +} + +// LiveEventStatus defines the observed state of LiveEvent. +type LiveEventStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LiveEventObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LiveEvent is the Schema for the LiveEvents API. Manages a Live Event. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LiveEvent struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.input) || (has(self.initProvider) && has(self.initProvider.input))",message="spec.forProvider.input is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec LiveEventSpec `json:"spec"` + Status LiveEventStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LiveEventList contains a list of LiveEvents +type LiveEventList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LiveEvent `json:"items"` +} + +// Repository type metadata. +var ( + LiveEvent_Kind = "LiveEvent" + LiveEvent_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LiveEvent_Kind}.String() + LiveEvent_KindAPIVersion = LiveEvent_Kind + "." + CRDGroupVersion.String() + LiveEvent_GroupVersionKind = CRDGroupVersion.WithKind(LiveEvent_Kind) +) + +func init() { + SchemeBuilder.Register(&LiveEvent{}, &LiveEventList{}) +} diff --git a/apis/media/v1beta2/zz_servicesaccount_terraformed.go b/apis/media/v1beta2/zz_servicesaccount_terraformed.go new file mode 100755 index 000000000..88c0e55c6 --- /dev/null +++ b/apis/media/v1beta2/zz_servicesaccount_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServicesAccount +func (mg *ServicesAccount) GetTerraformResourceType() string { + return "azurerm_media_services_account" +} + +// GetConnectionDetailsMapping for this ServicesAccount +func (tr *ServicesAccount) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ServicesAccount +func (tr *ServicesAccount) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServicesAccount +func (tr *ServicesAccount) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServicesAccount +func (tr *ServicesAccount) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServicesAccount +func (tr *ServicesAccount) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServicesAccount +func (tr *ServicesAccount) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServicesAccount +func (tr *ServicesAccount) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServicesAccount +func (tr *ServicesAccount) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServicesAccount using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServicesAccount) LateInitialize(attrs []byte) (bool, error) { + params := &ServicesAccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServicesAccount) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/media/v1beta2/zz_servicesaccount_types.go b/apis/media/v1beta2/zz_servicesaccount_types.go new file mode 100755 index 000000000..169682e4a --- /dev/null +++ b/apis/media/v1beta2/zz_servicesaccount_types.go @@ -0,0 +1,416 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EncryptionInitParameters struct { + + // Specifies the URI of the Key Vault Key used to encrypt data. The key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key without a version (for example https://vault/keys/mykey). + KeyVaultKeyIdentifier *string `json:"keyVaultKeyIdentifier,omitempty" tf:"key_vault_key_identifier,omitempty"` + + // A managed_identity block as defined below. + ManagedIdentity *ManagedIdentityInitParameters `json:"managedIdentity,omitempty" tf:"managed_identity,omitempty"` + + // Specifies the type of key used to encrypt the account data. Possible values are SystemKey and CustomerKey. Defaults to SystemKey. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncryptionObservation struct { + + // The current key used to encrypt the Media Services Account, including the key version. + CurrentKeyIdentifier *string `json:"currentKeyIdentifier,omitempty" tf:"current_key_identifier,omitempty"` + + // Specifies the URI of the Key Vault Key used to encrypt data. The key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key without a version (for example https://vault/keys/mykey). + KeyVaultKeyIdentifier *string `json:"keyVaultKeyIdentifier,omitempty" tf:"key_vault_key_identifier,omitempty"` + + // A managed_identity block as defined below. + ManagedIdentity *ManagedIdentityObservation `json:"managedIdentity,omitempty" tf:"managed_identity,omitempty"` + + // Specifies the type of key used to encrypt the account data. Possible values are SystemKey and CustomerKey. Defaults to SystemKey. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type EncryptionParameters struct { + + // Specifies the URI of the Key Vault Key used to encrypt data. The key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key without a version (for example https://vault/keys/mykey). + // +kubebuilder:validation:Optional + KeyVaultKeyIdentifier *string `json:"keyVaultKeyIdentifier,omitempty" tf:"key_vault_key_identifier,omitempty"` + + // A managed_identity block as defined below. + // +kubebuilder:validation:Optional + ManagedIdentity *ManagedIdentityParameters `json:"managedIdentity,omitempty" tf:"managed_identity,omitempty"` + + // Specifies the type of key used to encrypt the account data. Possible values are SystemKey and CustomerKey. Defaults to SystemKey. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Media Services Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Media Services Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Media Services Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Media Services Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Media Services Account. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Media Services Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type KeyDeliveryAccessControlInitParameters struct { + + // The Default Action to use when no rules match from ip_allow_list. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Key Delivery. + // +listType=set + IPAllowList []*string `json:"ipAllowList,omitempty" tf:"ip_allow_list,omitempty"` +} + +type KeyDeliveryAccessControlObservation struct { + + // The Default Action to use when no rules match from ip_allow_list. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Key Delivery. + // +listType=set + IPAllowList []*string `json:"ipAllowList,omitempty" tf:"ip_allow_list,omitempty"` +} + +type KeyDeliveryAccessControlParameters struct { + + // The Default Action to use when no rules match from ip_allow_list. Possible values are Allow and Deny. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the Key Delivery. + // +kubebuilder:validation:Optional + // +listType=set + IPAllowList []*string `json:"ipAllowList,omitempty" tf:"ip_allow_list,omitempty"` +} + +type ManagedIdentityInitParameters struct { + + // Whether to use System Assigned Identity. Possible Values are true and false. + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // The ID of the User Assigned Identity. This value can only be set when use_system_assigned_identity is false + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type ManagedIdentityObservation struct { + + // Whether to use System Assigned Identity. Possible Values are true and false. + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // The ID of the User Assigned Identity. This value can only be set when use_system_assigned_identity is false + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type ManagedIdentityParameters struct { + + // Whether to use System Assigned Identity. Possible Values are true and false. + // +kubebuilder:validation:Optional + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // The ID of the User Assigned Identity. This value can only be set when use_system_assigned_identity is false + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type ServicesAccountInitParameters struct { + + // An encryption block as defined below. + Encryption *EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A key_delivery_access_control block as defined below. + KeyDeliveryAccessControl *KeyDeliveryAccessControlInitParameters `json:"keyDeliveryAccessControl,omitempty" tf:"key_delivery_access_control,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []StorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Specifies the storage authentication type. Possible value is ManagedIdentity or System. + StorageAuthenticationType *string `json:"storageAuthenticationType,omitempty" tf:"storage_authentication_type,omitempty"` + + // A mapping of tags assigned to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ServicesAccountObservation struct { + + // An encryption block as defined below. + Encryption *EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The ID of the Media Services Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // A key_delivery_access_control block as defined below. + KeyDeliveryAccessControl *KeyDeliveryAccessControlObservation `json:"keyDeliveryAccessControl,omitempty" tf:"key_delivery_access_control,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the Media Services Account. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []StorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Specifies the storage authentication type. Possible value is ManagedIdentity or System. + StorageAuthenticationType *string `json:"storageAuthenticationType,omitempty" tf:"storage_authentication_type,omitempty"` + + // A mapping of tags assigned to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ServicesAccountParameters struct { + + // An encryption block as defined below. + // +kubebuilder:validation:Optional + Encryption *EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A key_delivery_access_control block as defined below. + // +kubebuilder:validation:Optional + KeyDeliveryAccessControl *KeyDeliveryAccessControlParameters `json:"keyDeliveryAccessControl,omitempty" tf:"key_delivery_access_control,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Whether public network access is allowed for this server. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the Media Services Account. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []StorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Specifies the storage authentication type. Possible value is ManagedIdentity or System. + // +kubebuilder:validation:Optional + StorageAuthenticationType *string `json:"storageAuthenticationType,omitempty" tf:"storage_authentication_type,omitempty"` + + // A mapping of tags assigned to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StorageAccountInitParameters struct { + + // Specifies the ID of the Storage Account that will be associated with the Media Services instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a Account in storage to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + + // Specifies whether the storage account should be the primary account or not. Defaults to false. + IsPrimary *bool `json:"isPrimary,omitempty" tf:"is_primary,omitempty"` + + // A managed_identity block as defined below. + ManagedIdentity *StorageAccountManagedIdentityInitParameters `json:"managedIdentity,omitempty" tf:"managed_identity,omitempty"` +} + +type StorageAccountManagedIdentityInitParameters struct { + + // Whether to use System Assigned Identity. Possible Values are true and false. + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // The ID of the User Assigned Identity. This value can only be set when use_system_assigned_identity is false + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type StorageAccountManagedIdentityObservation struct { + + // Whether to use System Assigned Identity. Possible Values are true and false. + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // The ID of the User Assigned Identity. This value can only be set when use_system_assigned_identity is false + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type StorageAccountManagedIdentityParameters struct { + + // Whether to use System Assigned Identity. Possible Values are true and false. + // +kubebuilder:validation:Optional + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // The ID of the User Assigned Identity. This value can only be set when use_system_assigned_identity is false + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type StorageAccountObservation struct { + + // Specifies the ID of the Storage Account that will be associated with the Media Services instance. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies whether the storage account should be the primary account or not. Defaults to false. + IsPrimary *bool `json:"isPrimary,omitempty" tf:"is_primary,omitempty"` + + // A managed_identity block as defined below. + ManagedIdentity *StorageAccountManagedIdentityObservation `json:"managedIdentity,omitempty" tf:"managed_identity,omitempty"` +} + +type StorageAccountParameters struct { + + // Specifies the ID of the Storage Account that will be associated with the Media Services instance. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a Account in storage to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` + + // Specifies whether the storage account should be the primary account or not. Defaults to false. + // +kubebuilder:validation:Optional + IsPrimary *bool `json:"isPrimary,omitempty" tf:"is_primary,omitempty"` + + // A managed_identity block as defined below. + // +kubebuilder:validation:Optional + ManagedIdentity *StorageAccountManagedIdentityParameters `json:"managedIdentity,omitempty" tf:"managed_identity,omitempty"` +} + +// ServicesAccountSpec defines the desired state of ServicesAccount +type ServicesAccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServicesAccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServicesAccountInitParameters `json:"initProvider,omitempty"` +} + +// ServicesAccountStatus defines the observed state of ServicesAccount. +type ServicesAccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServicesAccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ServicesAccount is the Schema for the ServicesAccounts API. Manages a Media Services Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ServicesAccount struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageAccount) || (has(self.initProvider) && has(self.initProvider.storageAccount))",message="spec.forProvider.storageAccount is a required parameter" + Spec ServicesAccountSpec `json:"spec"` + Status ServicesAccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServicesAccountList contains a list of ServicesAccounts +type ServicesAccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServicesAccount `json:"items"` +} + +// Repository type metadata. +var ( + ServicesAccount_Kind = "ServicesAccount" + ServicesAccount_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServicesAccount_Kind}.String() + ServicesAccount_KindAPIVersion = ServicesAccount_Kind + "." + CRDGroupVersion.String() + ServicesAccount_GroupVersionKind = CRDGroupVersion.WithKind(ServicesAccount_Kind) +) + +func init() { + SchemeBuilder.Register(&ServicesAccount{}, &ServicesAccountList{}) +} diff --git a/apis/media/v1beta2/zz_servicesaccountfilter_terraformed.go b/apis/media/v1beta2/zz_servicesaccountfilter_terraformed.go new file mode 100755 index 000000000..400c45441 --- /dev/null +++ b/apis/media/v1beta2/zz_servicesaccountfilter_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServicesAccountFilter +func (mg *ServicesAccountFilter) GetTerraformResourceType() string { + return "azurerm_media_services_account_filter" +} + +// GetConnectionDetailsMapping for this ServicesAccountFilter +func (tr *ServicesAccountFilter) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ServicesAccountFilter +func (tr *ServicesAccountFilter) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServicesAccountFilter +func (tr *ServicesAccountFilter) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServicesAccountFilter +func (tr *ServicesAccountFilter) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServicesAccountFilter +func (tr *ServicesAccountFilter) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServicesAccountFilter +func (tr *ServicesAccountFilter) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServicesAccountFilter +func (tr *ServicesAccountFilter) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServicesAccountFilter +func (tr *ServicesAccountFilter) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServicesAccountFilter using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServicesAccountFilter) LateInitialize(attrs []byte) (bool, error) { + params := &ServicesAccountFilterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServicesAccountFilter) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/media/v1beta2/zz_servicesaccountfilter_types.go b/apis/media/v1beta2/zz_servicesaccountfilter_types.go new file mode 100755 index 000000000..4a9cd5533 --- /dev/null +++ b/apis/media/v1beta2/zz_servicesaccountfilter_types.go @@ -0,0 +1,281 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ServicesAccountFilterInitParameters struct { + + // The first quality bitrate. Sets the first video track to appear in the Live Streaming playlist to allow HLS native players to start downloading from this quality level at the beginning. + FirstQualityBitrate *float64 `json:"firstQualityBitrate,omitempty" tf:"first_quality_bitrate,omitempty"` + + // A presentation_time_range block as defined below. + PresentationTimeRange *ServicesAccountFilterPresentationTimeRangeInitParameters `json:"presentationTimeRange,omitempty" tf:"presentation_time_range,omitempty"` + + // One or more track_selection blocks as defined below. + TrackSelection []ServicesAccountFilterTrackSelectionInitParameters `json:"trackSelection,omitempty" tf:"track_selection,omitempty"` +} + +type ServicesAccountFilterObservation struct { + + // The first quality bitrate. Sets the first video track to appear in the Live Streaming playlist to allow HLS native players to start downloading from this quality level at the beginning. + FirstQualityBitrate *float64 `json:"firstQualityBitrate,omitempty" tf:"first_quality_bitrate,omitempty"` + + // The ID of the Account Filter. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Media Services account name. Changing this forces a new Account Filter to be created. + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // A presentation_time_range block as defined below. + PresentationTimeRange *ServicesAccountFilterPresentationTimeRangeObservation `json:"presentationTimeRange,omitempty" tf:"presentation_time_range,omitempty"` + + // The name of the Resource Group where the Account Filter should exist. Changing this forces a new Account Filter to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // One or more track_selection blocks as defined below. + TrackSelection []ServicesAccountFilterTrackSelectionObservation `json:"trackSelection,omitempty" tf:"track_selection,omitempty"` +} + +type ServicesAccountFilterParameters struct { + + // The first quality bitrate. Sets the first video track to appear in the Live Streaming playlist to allow HLS native players to start downloading from this quality level at the beginning. + // +kubebuilder:validation:Optional + FirstQualityBitrate *float64 `json:"firstQualityBitrate,omitempty" tf:"first_quality_bitrate,omitempty"` + + // The Media Services account name. Changing this forces a new Account Filter to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount + // +kubebuilder:validation:Optional + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // Reference to a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameRef *v1.Reference `json:"mediaServicesAccountNameRef,omitempty" tf:"-"` + + // Selector for a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameSelector *v1.Selector `json:"mediaServicesAccountNameSelector,omitempty" tf:"-"` + + // A presentation_time_range block as defined below. + // +kubebuilder:validation:Optional + PresentationTimeRange *ServicesAccountFilterPresentationTimeRangeParameters `json:"presentationTimeRange,omitempty" tf:"presentation_time_range,omitempty"` + + // The name of the Resource Group where the Account Filter should exist. Changing this forces a new Account Filter to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // One or more track_selection blocks as defined below. + // +kubebuilder:validation:Optional + TrackSelection []ServicesAccountFilterTrackSelectionParameters `json:"trackSelection,omitempty" tf:"track_selection,omitempty"` +} + +type ServicesAccountFilterPresentationTimeRangeInitParameters struct { + + // The absolute end time boundary. Applies to Video on Demand (VoD). + // For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + EndInUnits *float64 `json:"endInUnits,omitempty" tf:"end_in_units,omitempty"` + + // Indicates whether the end_in_units property must be present. If true, end_in_units must be specified or a bad request code is returned. Applies to Live Streaming only. Allowed values: false, true. + ForceEnd *bool `json:"forceEnd,omitempty" tf:"force_end,omitempty"` + + // The relative to end right edge. Applies to Live Streaming only. + // This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_milliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + LiveBackoffInUnits *float64 `json:"liveBackoffInUnits,omitempty" tf:"live_backoff_in_units,omitempty"` + + // The relative to end sliding window. Applies to Live Streaming only. Use presentation_window_in_units to apply a sliding window of fragments to include in a playlist. The unit is defined by unit_timescale_in_milliseconds. For example, set presentation_window_in_units to 120 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds. + PresentationWindowInUnits *float64 `json:"presentationWindowInUnits,omitempty" tf:"presentation_window_in_units,omitempty"` + + // The absolute start time boundary. Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so a start_in_units of 15 would be for 15 seconds. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + StartInUnits *float64 `json:"startInUnits,omitempty" tf:"start_in_units,omitempty"` + + // Specified as the number of milliseconds in one unit timescale. For example, if you want to set a start_in_units at 30 seconds, you would use a value of 30 when using the unit_timescale_in_milliseconds in 1000. Or if you want to set start_in_units in 30 milliseconds, you would use a value of 30 when using the unit_timescale_in_milliseconds in 1. Applies timescale to start_in_units, start_timescale and presentation_window_in_timescale and live_backoff_in_timescale. + UnitTimescaleInMilliseconds *float64 `json:"unitTimescaleInMilliseconds,omitempty" tf:"unit_timescale_in_milliseconds,omitempty"` +} + +type ServicesAccountFilterPresentationTimeRangeObservation struct { + + // The absolute end time boundary. Applies to Video on Demand (VoD). + // For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + EndInUnits *float64 `json:"endInUnits,omitempty" tf:"end_in_units,omitempty"` + + // Indicates whether the end_in_units property must be present. If true, end_in_units must be specified or a bad request code is returned. Applies to Live Streaming only. Allowed values: false, true. + ForceEnd *bool `json:"forceEnd,omitempty" tf:"force_end,omitempty"` + + // The relative to end right edge. Applies to Live Streaming only. + // This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_milliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + LiveBackoffInUnits *float64 `json:"liveBackoffInUnits,omitempty" tf:"live_backoff_in_units,omitempty"` + + // The relative to end sliding window. Applies to Live Streaming only. Use presentation_window_in_units to apply a sliding window of fragments to include in a playlist. The unit is defined by unit_timescale_in_milliseconds. For example, set presentation_window_in_units to 120 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds. + PresentationWindowInUnits *float64 `json:"presentationWindowInUnits,omitempty" tf:"presentation_window_in_units,omitempty"` + + // The absolute start time boundary. Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so a start_in_units of 15 would be for 15 seconds. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + StartInUnits *float64 `json:"startInUnits,omitempty" tf:"start_in_units,omitempty"` + + // Specified as the number of milliseconds in one unit timescale. For example, if you want to set a start_in_units at 30 seconds, you would use a value of 30 when using the unit_timescale_in_milliseconds in 1000. Or if you want to set start_in_units in 30 milliseconds, you would use a value of 30 when using the unit_timescale_in_milliseconds in 1. Applies timescale to start_in_units, start_timescale and presentation_window_in_timescale and live_backoff_in_timescale. + UnitTimescaleInMilliseconds *float64 `json:"unitTimescaleInMilliseconds,omitempty" tf:"unit_timescale_in_milliseconds,omitempty"` +} + +type ServicesAccountFilterPresentationTimeRangeParameters struct { + + // The absolute end time boundary. Applies to Video on Demand (VoD). + // For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + // +kubebuilder:validation:Optional + EndInUnits *float64 `json:"endInUnits,omitempty" tf:"end_in_units,omitempty"` + + // Indicates whether the end_in_units property must be present. If true, end_in_units must be specified or a bad request code is returned. Applies to Live Streaming only. Allowed values: false, true. + // +kubebuilder:validation:Optional + ForceEnd *bool `json:"forceEnd,omitempty" tf:"force_end,omitempty"` + + // The relative to end right edge. Applies to Live Streaming only. + // This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_milliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + // +kubebuilder:validation:Optional + LiveBackoffInUnits *float64 `json:"liveBackoffInUnits,omitempty" tf:"live_backoff_in_units,omitempty"` + + // The relative to end sliding window. Applies to Live Streaming only. Use presentation_window_in_units to apply a sliding window of fragments to include in a playlist. The unit is defined by unit_timescale_in_milliseconds. For example, set presentation_window_in_units to 120 to apply a two-minute sliding window. Media within 2 minutes of the live edge will be included in the playlist. If a fragment straddles the boundary, the entire fragment will be included in the playlist. The minimum presentation window duration is 60 seconds. + // +kubebuilder:validation:Optional + PresentationWindowInUnits *float64 `json:"presentationWindowInUnits,omitempty" tf:"presentation_window_in_units,omitempty"` + + // The absolute start time boundary. Applies to Video on Demand (VoD) or Live Streaming. This is a long value that represents an absolute start point of the stream. The value gets rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so a start_in_units of 15 would be for 15 seconds. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + // +kubebuilder:validation:Optional + StartInUnits *float64 `json:"startInUnits,omitempty" tf:"start_in_units,omitempty"` + + // Specified as the number of milliseconds in one unit timescale. For example, if you want to set a start_in_units at 30 seconds, you would use a value of 30 when using the unit_timescale_in_milliseconds in 1000. Or if you want to set start_in_units in 30 milliseconds, you would use a value of 30 when using the unit_timescale_in_milliseconds in 1. Applies timescale to start_in_units, start_timescale and presentation_window_in_timescale and live_backoff_in_timescale. + // +kubebuilder:validation:Optional + UnitTimescaleInMilliseconds *float64 `json:"unitTimescaleInMilliseconds" tf:"unit_timescale_in_milliseconds,omitempty"` +} + +type ServicesAccountFilterTrackSelectionInitParameters struct { + + // One or more selection blocks as defined above. + Condition []TrackSelectionConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` +} + +type ServicesAccountFilterTrackSelectionObservation struct { + + // One or more selection blocks as defined above. + Condition []TrackSelectionConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` +} + +type ServicesAccountFilterTrackSelectionParameters struct { + + // One or more selection blocks as defined above. + // +kubebuilder:validation:Optional + Condition []TrackSelectionConditionParameters `json:"condition" tf:"condition,omitempty"` +} + +type TrackSelectionConditionInitParameters struct { + + // The condition operation to test a track property against. Supported values are Equal and NotEqual. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property to compare. Supported values are Bitrate, FourCC, Language, Name and Type. Check documentation for more details. + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value to match or not match. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TrackSelectionConditionObservation struct { + + // The condition operation to test a track property against. Supported values are Equal and NotEqual. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property to compare. Supported values are Bitrate, FourCC, Language, Name and Type. Check documentation for more details. + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value to match or not match. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TrackSelectionConditionParameters struct { + + // The condition operation to test a track property against. Supported values are Equal and NotEqual. + // +kubebuilder:validation:Optional + Operation *string `json:"operation" tf:"operation,omitempty"` + + // The track property to compare. Supported values are Bitrate, FourCC, Language, Name and Type. Check documentation for more details. + // +kubebuilder:validation:Optional + Property *string `json:"property" tf:"property,omitempty"` + + // The track property value to match or not match. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +// ServicesAccountFilterSpec defines the desired state of ServicesAccountFilter +type ServicesAccountFilterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServicesAccountFilterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServicesAccountFilterInitParameters `json:"initProvider,omitempty"` +} + +// ServicesAccountFilterStatus defines the observed state of ServicesAccountFilter. +type ServicesAccountFilterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServicesAccountFilterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ServicesAccountFilter is the Schema for the ServicesAccountFilters API. Manages a Media Services Account Filter. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ServicesAccountFilter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServicesAccountFilterSpec `json:"spec"` + Status ServicesAccountFilterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServicesAccountFilterList contains a list of ServicesAccountFilters +type ServicesAccountFilterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServicesAccountFilter `json:"items"` +} + +// Repository type metadata. +var ( + ServicesAccountFilter_Kind = "ServicesAccountFilter" + ServicesAccountFilter_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServicesAccountFilter_Kind}.String() + ServicesAccountFilter_KindAPIVersion = ServicesAccountFilter_Kind + "." + CRDGroupVersion.String() + ServicesAccountFilter_GroupVersionKind = CRDGroupVersion.WithKind(ServicesAccountFilter_Kind) +) + +func init() { + SchemeBuilder.Register(&ServicesAccountFilter{}, &ServicesAccountFilterList{}) +} diff --git a/apis/media/v1beta2/zz_streamingendpoint_terraformed.go b/apis/media/v1beta2/zz_streamingendpoint_terraformed.go new file mode 100755 index 000000000..ef16d1b90 --- /dev/null +++ b/apis/media/v1beta2/zz_streamingendpoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StreamingEndpoint +func (mg *StreamingEndpoint) GetTerraformResourceType() string { + return "azurerm_media_streaming_endpoint" +} + +// GetConnectionDetailsMapping for this StreamingEndpoint +func (tr *StreamingEndpoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this StreamingEndpoint +func (tr *StreamingEndpoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StreamingEndpoint +func (tr *StreamingEndpoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StreamingEndpoint +func (tr *StreamingEndpoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StreamingEndpoint +func (tr *StreamingEndpoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StreamingEndpoint +func (tr *StreamingEndpoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StreamingEndpoint +func (tr *StreamingEndpoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StreamingEndpoint +func (tr *StreamingEndpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StreamingEndpoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StreamingEndpoint) LateInitialize(attrs []byte) (bool, error) { + params := &StreamingEndpointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StreamingEndpoint) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/media/v1beta2/zz_streamingendpoint_types.go b/apis/media/v1beta2/zz_streamingendpoint_types.go new file mode 100755 index 000000000..919c2e698 --- /dev/null +++ b/apis/media/v1beta2/zz_streamingendpoint_types.go @@ -0,0 +1,403 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessControlInitParameters struct { + + // One or more akamai_signature_header_authentication_key blocks as defined below. + AkamaiSignatureHeaderAuthenticationKey []AkamaiSignatureHeaderAuthenticationKeyInitParameters `json:"akamaiSignatureHeaderAuthenticationKey,omitempty" tf:"akamai_signature_header_authentication_key,omitempty"` + + // A ip_allow block as defined below. + IPAllow []IPAllowInitParameters `json:"ipAllow,omitempty" tf:"ip_allow,omitempty"` +} + +type AccessControlObservation struct { + + // One or more akamai_signature_header_authentication_key blocks as defined below. + AkamaiSignatureHeaderAuthenticationKey []AkamaiSignatureHeaderAuthenticationKeyObservation `json:"akamaiSignatureHeaderAuthenticationKey,omitempty" tf:"akamai_signature_header_authentication_key,omitempty"` + + // A ip_allow block as defined below. + IPAllow []IPAllowObservation `json:"ipAllow,omitempty" tf:"ip_allow,omitempty"` +} + +type AccessControlParameters struct { + + // One or more akamai_signature_header_authentication_key blocks as defined below. + // +kubebuilder:validation:Optional + AkamaiSignatureHeaderAuthenticationKey []AkamaiSignatureHeaderAuthenticationKeyParameters `json:"akamaiSignatureHeaderAuthenticationKey,omitempty" tf:"akamai_signature_header_authentication_key,omitempty"` + + // A ip_allow block as defined below. + // +kubebuilder:validation:Optional + IPAllow []IPAllowParameters `json:"ipAllow,omitempty" tf:"ip_allow,omitempty"` +} + +type AkamaiSignatureHeaderAuthenticationKeyInitParameters struct { + + // Authentication key. + Base64Key *string `json:"base64Key,omitempty" tf:"base64_key,omitempty"` + + // The expiration time of the authentication key. + Expiration *string `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Identifier of the key. + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` +} + +type AkamaiSignatureHeaderAuthenticationKeyObservation struct { + + // Authentication key. + Base64Key *string `json:"base64Key,omitempty" tf:"base64_key,omitempty"` + + // The expiration time of the authentication key. + Expiration *string `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Identifier of the key. + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` +} + +type AkamaiSignatureHeaderAuthenticationKeyParameters struct { + + // Authentication key. + // +kubebuilder:validation:Optional + Base64Key *string `json:"base64Key,omitempty" tf:"base64_key,omitempty"` + + // The expiration time of the authentication key. + // +kubebuilder:validation:Optional + Expiration *string `json:"expiration,omitempty" tf:"expiration,omitempty"` + + // Identifier of the key. + // +kubebuilder:validation:Optional + Identifier *string `json:"identifier,omitempty" tf:"identifier,omitempty"` +} + +type IPAllowInitParameters struct { + + // The IP address to allow. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The sku name of Streaming Endpoint. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type IPAllowObservation struct { + + // The IP address to allow. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The sku name of Streaming Endpoint. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type IPAllowParameters struct { + + // The IP address to allow. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The sku name of Streaming Endpoint. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The subnet mask prefix length (see CIDR notation). + // +kubebuilder:validation:Optional + SubnetPrefixLength *float64 `json:"subnetPrefixLength,omitempty" tf:"subnet_prefix_length,omitempty"` +} + +type SkuInitParameters struct { +} + +type SkuObservation struct { + + // The sku capacity of Streaming Endpoint. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The sku name of Streaming Endpoint. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuParameters struct { +} + +type StreamingEndpointCrossSiteAccessPolicyInitParameters struct { + + // The content of clientaccesspolicy.xml used by Silverlight. + ClientAccessPolicy *string `json:"clientAccessPolicy,omitempty" tf:"client_access_policy,omitempty"` + + // The content of crossdomain.xml used by Silverlight. + CrossDomainPolicy *string `json:"crossDomainPolicy,omitempty" tf:"cross_domain_policy,omitempty"` +} + +type StreamingEndpointCrossSiteAccessPolicyObservation struct { + + // The content of clientaccesspolicy.xml used by Silverlight. + ClientAccessPolicy *string `json:"clientAccessPolicy,omitempty" tf:"client_access_policy,omitempty"` + + // The content of crossdomain.xml used by Silverlight. + CrossDomainPolicy *string `json:"crossDomainPolicy,omitempty" tf:"cross_domain_policy,omitempty"` +} + +type StreamingEndpointCrossSiteAccessPolicyParameters struct { + + // The content of clientaccesspolicy.xml used by Silverlight. + // +kubebuilder:validation:Optional + ClientAccessPolicy *string `json:"clientAccessPolicy,omitempty" tf:"client_access_policy,omitempty"` + + // The content of crossdomain.xml used by Silverlight. + // +kubebuilder:validation:Optional + CrossDomainPolicy *string `json:"crossDomainPolicy,omitempty" tf:"cross_domain_policy,omitempty"` +} + +type StreamingEndpointInitParameters struct { + + // A access_control block as defined below. + AccessControl *AccessControlInitParameters `json:"accessControl,omitempty" tf:"access_control,omitempty"` + + // The flag indicates if the resource should be automatically started on creation. + AutoStartEnabled *bool `json:"autoStartEnabled,omitempty" tf:"auto_start_enabled,omitempty"` + + // The CDN enabled flag. + CdnEnabled *bool `json:"cdnEnabled,omitempty" tf:"cdn_enabled,omitempty"` + + // The CDN profile name. + CdnProfile *string `json:"cdnProfile,omitempty" tf:"cdn_profile,omitempty"` + + // The CDN provider name. Supported value are StandardVerizon,PremiumVerizon and StandardAkamai + CdnProvider *string `json:"cdnProvider,omitempty" tf:"cdn_provider,omitempty"` + + // A cross_site_access_policy block as defined below. + CrossSiteAccessPolicy *StreamingEndpointCrossSiteAccessPolicyInitParameters `json:"crossSiteAccessPolicy,omitempty" tf:"cross_site_access_policy,omitempty"` + + // The custom host names of the streaming endpoint. + // +listType=set + CustomHostNames []*string `json:"customHostNames,omitempty" tf:"custom_host_names,omitempty"` + + // The streaming endpoint description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Azure Region where the Streaming Endpoint should exist. Changing this forces a new Streaming Endpoint to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Max cache age in seconds. + MaxCacheAgeSeconds *float64 `json:"maxCacheAgeSeconds,omitempty" tf:"max_cache_age_seconds,omitempty"` + + // The number of scale units. To create a Standard Streaming Endpoint set 0. For Premium Streaming Endpoint valid values are between 1 and 10. + ScaleUnits *float64 `json:"scaleUnits,omitempty" tf:"scale_units,omitempty"` + + // A mapping of tags which should be assigned to the Streaming Endpoint. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StreamingEndpointObservation struct { + + // A access_control block as defined below. + AccessControl *AccessControlObservation `json:"accessControl,omitempty" tf:"access_control,omitempty"` + + // The flag indicates if the resource should be automatically started on creation. + AutoStartEnabled *bool `json:"autoStartEnabled,omitempty" tf:"auto_start_enabled,omitempty"` + + // The CDN enabled flag. + CdnEnabled *bool `json:"cdnEnabled,omitempty" tf:"cdn_enabled,omitempty"` + + // The CDN profile name. + CdnProfile *string `json:"cdnProfile,omitempty" tf:"cdn_profile,omitempty"` + + // The CDN provider name. Supported value are StandardVerizon,PremiumVerizon and StandardAkamai + CdnProvider *string `json:"cdnProvider,omitempty" tf:"cdn_provider,omitempty"` + + // A cross_site_access_policy block as defined below. + CrossSiteAccessPolicy *StreamingEndpointCrossSiteAccessPolicyObservation `json:"crossSiteAccessPolicy,omitempty" tf:"cross_site_access_policy,omitempty"` + + // The custom host names of the streaming endpoint. + // +listType=set + CustomHostNames []*string `json:"customHostNames,omitempty" tf:"custom_host_names,omitempty"` + + // The streaming endpoint description. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The host name of the Streaming Endpoint. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Streaming Endpoint. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region where the Streaming Endpoint should exist. Changing this forces a new Streaming Endpoint to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Max cache age in seconds. + MaxCacheAgeSeconds *float64 `json:"maxCacheAgeSeconds,omitempty" tf:"max_cache_age_seconds,omitempty"` + + // The Media Services account name. Changing this forces a new Streaming Endpoint to be created. + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // The name of the Resource Group where the Streaming Endpoint should exist. Changing this forces a new Streaming Endpoint to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The number of scale units. To create a Standard Streaming Endpoint set 0. For Premium Streaming Endpoint valid values are between 1 and 10. + ScaleUnits *float64 `json:"scaleUnits,omitempty" tf:"scale_units,omitempty"` + + // A sku block defined as below. + Sku []SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags which should be assigned to the Streaming Endpoint. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StreamingEndpointParameters struct { + + // A access_control block as defined below. + // +kubebuilder:validation:Optional + AccessControl *AccessControlParameters `json:"accessControl,omitempty" tf:"access_control,omitempty"` + + // The flag indicates if the resource should be automatically started on creation. + // +kubebuilder:validation:Optional + AutoStartEnabled *bool `json:"autoStartEnabled,omitempty" tf:"auto_start_enabled,omitempty"` + + // The CDN enabled flag. + // +kubebuilder:validation:Optional + CdnEnabled *bool `json:"cdnEnabled,omitempty" tf:"cdn_enabled,omitempty"` + + // The CDN profile name. + // +kubebuilder:validation:Optional + CdnProfile *string `json:"cdnProfile,omitempty" tf:"cdn_profile,omitempty"` + + // The CDN provider name. Supported value are StandardVerizon,PremiumVerizon and StandardAkamai + // +kubebuilder:validation:Optional + CdnProvider *string `json:"cdnProvider,omitempty" tf:"cdn_provider,omitempty"` + + // A cross_site_access_policy block as defined below. + // +kubebuilder:validation:Optional + CrossSiteAccessPolicy *StreamingEndpointCrossSiteAccessPolicyParameters `json:"crossSiteAccessPolicy,omitempty" tf:"cross_site_access_policy,omitempty"` + + // The custom host names of the streaming endpoint. + // +kubebuilder:validation:Optional + // +listType=set + CustomHostNames []*string `json:"customHostNames,omitempty" tf:"custom_host_names,omitempty"` + + // The streaming endpoint description. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Azure Region where the Streaming Endpoint should exist. Changing this forces a new Streaming Endpoint to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Max cache age in seconds. + // +kubebuilder:validation:Optional + MaxCacheAgeSeconds *float64 `json:"maxCacheAgeSeconds,omitempty" tf:"max_cache_age_seconds,omitempty"` + + // The Media Services account name. Changing this forces a new Streaming Endpoint to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount + // +kubebuilder:validation:Optional + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // Reference to a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameRef *v1.Reference `json:"mediaServicesAccountNameRef,omitempty" tf:"-"` + + // Selector for a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameSelector *v1.Selector `json:"mediaServicesAccountNameSelector,omitempty" tf:"-"` + + // The name of the Resource Group where the Streaming Endpoint should exist. Changing this forces a new Streaming Endpoint to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The number of scale units. To create a Standard Streaming Endpoint set 0. For Premium Streaming Endpoint valid values are between 1 and 10. + // +kubebuilder:validation:Optional + ScaleUnits *float64 `json:"scaleUnits,omitempty" tf:"scale_units,omitempty"` + + // A mapping of tags which should be assigned to the Streaming Endpoint. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// StreamingEndpointSpec defines the desired state of StreamingEndpoint +type StreamingEndpointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StreamingEndpointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StreamingEndpointInitParameters `json:"initProvider,omitempty"` +} + +// StreamingEndpointStatus defines the observed state of StreamingEndpoint. +type StreamingEndpointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StreamingEndpointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StreamingEndpoint is the Schema for the StreamingEndpoints API. Manages a Streaming Endpoint. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type StreamingEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scaleUnits) || (has(self.initProvider) && has(self.initProvider.scaleUnits))",message="spec.forProvider.scaleUnits is a required parameter" + Spec StreamingEndpointSpec `json:"spec"` + Status StreamingEndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamingEndpointList contains a list of StreamingEndpoints +type StreamingEndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StreamingEndpoint `json:"items"` +} + +// Repository type metadata. +var ( + StreamingEndpoint_Kind = "StreamingEndpoint" + StreamingEndpoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StreamingEndpoint_Kind}.String() + StreamingEndpoint_KindAPIVersion = StreamingEndpoint_Kind + "." + CRDGroupVersion.String() + StreamingEndpoint_GroupVersionKind = CRDGroupVersion.WithKind(StreamingEndpoint_Kind) +) + +func init() { + SchemeBuilder.Register(&StreamingEndpoint{}, &StreamingEndpointList{}) +} diff --git a/apis/media/v1beta2/zz_streamingpolicy_terraformed.go b/apis/media/v1beta2/zz_streamingpolicy_terraformed.go new file mode 100755 index 000000000..8d2a25408 --- /dev/null +++ b/apis/media/v1beta2/zz_streamingpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StreamingPolicy +func (mg *StreamingPolicy) GetTerraformResourceType() string { + return "azurerm_media_streaming_policy" +} + +// GetConnectionDetailsMapping for this StreamingPolicy +func (tr *StreamingPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this StreamingPolicy +func (tr *StreamingPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StreamingPolicy +func (tr *StreamingPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StreamingPolicy +func (tr *StreamingPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StreamingPolicy +func (tr *StreamingPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StreamingPolicy +func (tr *StreamingPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StreamingPolicy +func (tr *StreamingPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StreamingPolicy +func (tr *StreamingPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StreamingPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StreamingPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &StreamingPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StreamingPolicy) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/media/v1beta2/zz_streamingpolicy_types.go b/apis/media/v1beta2/zz_streamingpolicy_types.go new file mode 100755 index 000000000..da07affe8 --- /dev/null +++ b/apis/media/v1beta2/zz_streamingpolicy_types.go @@ -0,0 +1,887 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClearKeyEncryptionInitParameters struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate,omitempty" tf:"custom_keys_acquisition_url_template,omitempty"` +} + +type ClearKeyEncryptionObservation struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate,omitempty" tf:"custom_keys_acquisition_url_template,omitempty"` +} + +type ClearKeyEncryptionParameters struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate" tf:"custom_keys_acquisition_url_template,omitempty"` +} + +type ClearTrackConditionInitParameters struct { + + // The track property condition operation. Possible value is Equal. Changing this forces a new Streaming Policy to be created. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property type. Possible value is FourCC. Changing this forces a new Streaming Policy to be created. + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value. Changing this forces a new Streaming Policy to be created. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ClearTrackConditionObservation struct { + + // The track property condition operation. Possible value is Equal. Changing this forces a new Streaming Policy to be created. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property type. Possible value is FourCC. Changing this forces a new Streaming Policy to be created. + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value. Changing this forces a new Streaming Policy to be created. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ClearTrackConditionParameters struct { + + // The track property condition operation. Possible value is Equal. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Operation *string `json:"operation" tf:"operation,omitempty"` + + // The track property type. Possible value is FourCC. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Property *string `json:"property" tf:"property,omitempty"` + + // The track property value. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type ClearTrackInitParameters struct { + + // One or more condition blocks as defined below. Changing this forces a new Streaming Policy to be created. + Condition []ClearTrackConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` +} + +type ClearTrackObservation struct { + + // One or more condition blocks as defined below. Changing this forces a new Streaming Policy to be created. + Condition []ClearTrackConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` +} + +type ClearTrackParameters struct { + + // One or more condition blocks as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Condition []ClearTrackConditionParameters `json:"condition" tf:"condition,omitempty"` +} + +type CommonEncryptionCbcsInitParameters struct { + + // A clear_key_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + ClearKeyEncryption *ClearKeyEncryptionInitParameters `json:"clearKeyEncryption,omitempty" tf:"clear_key_encryption,omitempty"` + + // A default_content_key block as defined below. Changing this forces a new Streaming Policy to be created. + DefaultContentKey *DefaultContentKeyInitParameters `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A drm_fairplay block as defined below. Changing this forces a new Streaming Policy to be created. + DrmFairplay *DrmFairplayInitParameters `json:"drmFairplay,omitempty" tf:"drm_fairplay,omitempty"` + + // A enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + EnabledProtocols *EnabledProtocolsInitParameters `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type CommonEncryptionCbcsObservation struct { + + // A clear_key_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + ClearKeyEncryption *ClearKeyEncryptionObservation `json:"clearKeyEncryption,omitempty" tf:"clear_key_encryption,omitempty"` + + // A default_content_key block as defined below. Changing this forces a new Streaming Policy to be created. + DefaultContentKey *DefaultContentKeyObservation `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A drm_fairplay block as defined below. Changing this forces a new Streaming Policy to be created. + DrmFairplay *DrmFairplayObservation `json:"drmFairplay,omitempty" tf:"drm_fairplay,omitempty"` + + // A enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + EnabledProtocols *EnabledProtocolsObservation `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type CommonEncryptionCbcsParameters struct { + + // A clear_key_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + ClearKeyEncryption *ClearKeyEncryptionParameters `json:"clearKeyEncryption,omitempty" tf:"clear_key_encryption,omitempty"` + + // A default_content_key block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + DefaultContentKey *DefaultContentKeyParameters `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A drm_fairplay block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + DrmFairplay *DrmFairplayParameters `json:"drmFairplay,omitempty" tf:"drm_fairplay,omitempty"` + + // A enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + EnabledProtocols *EnabledProtocolsParameters `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type CommonEncryptionCencClearKeyEncryptionInitParameters struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate,omitempty" tf:"custom_keys_acquisition_url_template,omitempty"` +} + +type CommonEncryptionCencClearKeyEncryptionObservation struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate,omitempty" tf:"custom_keys_acquisition_url_template,omitempty"` +} + +type CommonEncryptionCencClearKeyEncryptionParameters struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate" tf:"custom_keys_acquisition_url_template,omitempty"` +} + +type CommonEncryptionCencDefaultContentKeyInitParameters struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ContentKeyPolicy + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // Reference to a ContentKeyPolicy in media to populate policyName. + // +kubebuilder:validation:Optional + PolicyNameRef *v1.Reference `json:"policyNameRef,omitempty" tf:"-"` + + // Selector for a ContentKeyPolicy in media to populate policyName. + // +kubebuilder:validation:Optional + PolicyNameSelector *v1.Selector `json:"policyNameSelector,omitempty" tf:"-"` +} + +type CommonEncryptionCencDefaultContentKeyObservation struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` +} + +type CommonEncryptionCencDefaultContentKeyParameters struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ContentKeyPolicy + // +kubebuilder:validation:Optional + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // Reference to a ContentKeyPolicy in media to populate policyName. + // +kubebuilder:validation:Optional + PolicyNameRef *v1.Reference `json:"policyNameRef,omitempty" tf:"-"` + + // Selector for a ContentKeyPolicy in media to populate policyName. + // +kubebuilder:validation:Optional + PolicyNameSelector *v1.Selector `json:"policyNameSelector,omitempty" tf:"-"` +} + +type CommonEncryptionCencEnabledProtocolsInitParameters struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type CommonEncryptionCencEnabledProtocolsObservation struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type CommonEncryptionCencEnabledProtocolsParameters struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type CommonEncryptionCencInitParameters struct { + + // A clear_key_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + ClearKeyEncryption *CommonEncryptionCencClearKeyEncryptionInitParameters `json:"clearKeyEncryption,omitempty" tf:"clear_key_encryption,omitempty"` + + // One or more clear_track blocks as defined below. Changing this forces a new Streaming Policy to be created. + ClearTrack []ClearTrackInitParameters `json:"clearTrack,omitempty" tf:"clear_track,omitempty"` + + // One or more content_key_to_track_mapping blocks as defined below. Changing this forces a new Streaming Policy to be created. + ContentKeyToTrackMapping []ContentKeyToTrackMappingInitParameters `json:"contentKeyToTrackMapping,omitempty" tf:"content_key_to_track_mapping,omitempty"` + + // A default_content_key block as defined below. Changing this forces a new Streaming Policy to be created. + DefaultContentKey *CommonEncryptionCencDefaultContentKeyInitParameters `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A drm_playready block as defined below. Changing this forces a new Streaming Policy to be created. + DrmPlayready *DrmPlayreadyInitParameters `json:"drmPlayready,omitempty" tf:"drm_playready,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + DrmWidevineCustomLicenseAcquisitionURLTemplate *string `json:"drmWidevineCustomLicenseAcquisitionUrlTemplate,omitempty" tf:"drm_widevine_custom_license_acquisition_url_template,omitempty"` + + // A enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + EnabledProtocols *CommonEncryptionCencEnabledProtocolsInitParameters `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type CommonEncryptionCencObservation struct { + + // A clear_key_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + ClearKeyEncryption *CommonEncryptionCencClearKeyEncryptionObservation `json:"clearKeyEncryption,omitempty" tf:"clear_key_encryption,omitempty"` + + // One or more clear_track blocks as defined below. Changing this forces a new Streaming Policy to be created. + ClearTrack []ClearTrackObservation `json:"clearTrack,omitempty" tf:"clear_track,omitempty"` + + // One or more content_key_to_track_mapping blocks as defined below. Changing this forces a new Streaming Policy to be created. + ContentKeyToTrackMapping []ContentKeyToTrackMappingObservation `json:"contentKeyToTrackMapping,omitempty" tf:"content_key_to_track_mapping,omitempty"` + + // A default_content_key block as defined below. Changing this forces a new Streaming Policy to be created. + DefaultContentKey *CommonEncryptionCencDefaultContentKeyObservation `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A drm_playready block as defined below. Changing this forces a new Streaming Policy to be created. + DrmPlayready *DrmPlayreadyObservation `json:"drmPlayready,omitempty" tf:"drm_playready,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + DrmWidevineCustomLicenseAcquisitionURLTemplate *string `json:"drmWidevineCustomLicenseAcquisitionUrlTemplate,omitempty" tf:"drm_widevine_custom_license_acquisition_url_template,omitempty"` + + // A enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + EnabledProtocols *CommonEncryptionCencEnabledProtocolsObservation `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type CommonEncryptionCencParameters struct { + + // A clear_key_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + ClearKeyEncryption *CommonEncryptionCencClearKeyEncryptionParameters `json:"clearKeyEncryption,omitempty" tf:"clear_key_encryption,omitempty"` + + // One or more clear_track blocks as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + ClearTrack []ClearTrackParameters `json:"clearTrack,omitempty" tf:"clear_track,omitempty"` + + // One or more content_key_to_track_mapping blocks as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + ContentKeyToTrackMapping []ContentKeyToTrackMappingParameters `json:"contentKeyToTrackMapping,omitempty" tf:"content_key_to_track_mapping,omitempty"` + + // A default_content_key block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + DefaultContentKey *CommonEncryptionCencDefaultContentKeyParameters `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A drm_playready block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + DrmPlayready *DrmPlayreadyParameters `json:"drmPlayready,omitempty" tf:"drm_playready,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + DrmWidevineCustomLicenseAcquisitionURLTemplate *string `json:"drmWidevineCustomLicenseAcquisitionUrlTemplate,omitempty" tf:"drm_widevine_custom_license_acquisition_url_template,omitempty"` + + // A enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + EnabledProtocols *CommonEncryptionCencEnabledProtocolsParameters `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type ContentKeyToTrackMappingInitParameters struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // One or more track blocks as defined below. Changing this forces a new Streaming Policy to be created. + Track []TrackInitParameters `json:"track,omitempty" tf:"track,omitempty"` +} + +type ContentKeyToTrackMappingObservation struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // One or more track blocks as defined below. Changing this forces a new Streaming Policy to be created. + Track []TrackObservation `json:"track,omitempty" tf:"track,omitempty"` +} + +type ContentKeyToTrackMappingParameters struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // One or more track blocks as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Track []TrackParameters `json:"track" tf:"track,omitempty"` +} + +type DefaultContentKeyInitParameters struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` +} + +type DefaultContentKeyObservation struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` +} + +type DefaultContentKeyParameters struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` +} + +type DrmFairplayInitParameters struct { + + // All license to be persistent or not. Changing this forces a new Streaming Policy to be created. + AllowPersistentLicense *bool `json:"allowPersistentLicense,omitempty" tf:"allow_persistent_license,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty" tf:"custom_license_acquisition_url_template,omitempty"` +} + +type DrmFairplayObservation struct { + + // All license to be persistent or not. Changing this forces a new Streaming Policy to be created. + AllowPersistentLicense *bool `json:"allowPersistentLicense,omitempty" tf:"allow_persistent_license,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty" tf:"custom_license_acquisition_url_template,omitempty"` +} + +type DrmFairplayParameters struct { + + // All license to be persistent or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + AllowPersistentLicense *bool `json:"allowPersistentLicense,omitempty" tf:"allow_persistent_license,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty" tf:"custom_license_acquisition_url_template,omitempty"` +} + +type DrmPlayreadyInitParameters struct { + + // Custom attributes for PlayReady. Changing this forces a new Streaming Policy to be created. + CustomAttributes *string `json:"customAttributes,omitempty" tf:"custom_attributes,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty" tf:"custom_license_acquisition_url_template,omitempty"` +} + +type DrmPlayreadyObservation struct { + + // Custom attributes for PlayReady. Changing this forces a new Streaming Policy to be created. + CustomAttributes *string `json:"customAttributes,omitempty" tf:"custom_attributes,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty" tf:"custom_license_acquisition_url_template,omitempty"` +} + +type DrmPlayreadyParameters struct { + + // Custom attributes for PlayReady. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + CustomAttributes *string `json:"customAttributes,omitempty" tf:"custom_attributes,omitempty"` + + // The URL template for the custom service that delivers licenses to the end user. This is not required when using Azure Media Services for issuing licenses. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + CustomLicenseAcquisitionURLTemplate *string `json:"customLicenseAcquisitionUrlTemplate,omitempty" tf:"custom_license_acquisition_url_template,omitempty"` +} + +type EnabledProtocolsInitParameters struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type EnabledProtocolsObservation struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type EnabledProtocolsParameters struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type EnvelopeEncryptionDefaultContentKeyInitParameters struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` +} + +type EnvelopeEncryptionDefaultContentKeyObservation struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` +} + +type EnvelopeEncryptionDefaultContentKeyParameters struct { + + // Label can be used to specify Content Key when creating a Streaming Locator. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Policy used by Default Key. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` +} + +type EnvelopeEncryptionEnabledProtocolsInitParameters struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type EnvelopeEncryptionEnabledProtocolsObservation struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type EnvelopeEncryptionEnabledProtocolsParameters struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type EnvelopeEncryptionInitParameters struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate,omitempty" tf:"custom_keys_acquisition_url_template,omitempty"` + + // A default_content_key block as defined above. Changing this forces a new Streaming Policy to be created. + DefaultContentKey *EnvelopeEncryptionDefaultContentKeyInitParameters `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A enabled_protocols block as defined above. Changing this forces a new Streaming Policy to be created. + EnabledProtocols *EnvelopeEncryptionEnabledProtocolsInitParameters `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type EnvelopeEncryptionObservation struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate,omitempty" tf:"custom_keys_acquisition_url_template,omitempty"` + + // A default_content_key block as defined above. Changing this forces a new Streaming Policy to be created. + DefaultContentKey *EnvelopeEncryptionDefaultContentKeyObservation `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A enabled_protocols block as defined above. Changing this forces a new Streaming Policy to be created. + EnabledProtocols *EnvelopeEncryptionEnabledProtocolsObservation `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type EnvelopeEncryptionParameters struct { + + // The URL template for the custom service that delivers content keys to the end user. This is not required when using Azure Media Services for issuing keys. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + CustomKeysAcquisitionURLTemplate *string `json:"customKeysAcquisitionUrlTemplate,omitempty" tf:"custom_keys_acquisition_url_template,omitempty"` + + // A default_content_key block as defined above. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + DefaultContentKey *EnvelopeEncryptionDefaultContentKeyParameters `json:"defaultContentKey,omitempty" tf:"default_content_key,omitempty"` + + // A enabled_protocols block as defined above. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + EnabledProtocols *EnvelopeEncryptionEnabledProtocolsParameters `json:"enabledProtocols,omitempty" tf:"enabled_protocols,omitempty"` +} + +type NoEncryptionEnabledProtocolsInitParameters struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type NoEncryptionEnabledProtocolsObservation struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type NoEncryptionEnabledProtocolsParameters struct { + + // Enable DASH protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Dash *bool `json:"dash,omitempty" tf:"dash,omitempty"` + + // Enable Download protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Download *bool `json:"download,omitempty" tf:"download,omitempty"` + + // Enable HLS protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Hls *bool `json:"hls,omitempty" tf:"hls,omitempty"` + + // Enable SmoothStreaming protocol or not. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + SmoothStreaming *bool `json:"smoothStreaming,omitempty" tf:"smooth_streaming,omitempty"` +} + +type StreamingPolicyInitParameters struct { + + // A common_encryption_cbcs block as defined below. Changing this forces a new Streaming Policy to be created. + CommonEncryptionCbcs *CommonEncryptionCbcsInitParameters `json:"commonEncryptionCbcs,omitempty" tf:"common_encryption_cbcs,omitempty"` + + // A common_encryption_cenc block as defined below. Changing this forces a new Streaming Policy to be created. + CommonEncryptionCenc *CommonEncryptionCencInitParameters `json:"commonEncryptionCenc,omitempty" tf:"common_encryption_cenc,omitempty"` + + // Default Content Key used by current Streaming Policy. Changing this forces a new Streaming Policy to be created. + DefaultContentKeyPolicyName *string `json:"defaultContentKeyPolicyName,omitempty" tf:"default_content_key_policy_name,omitempty"` + + // A envelope_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + EnvelopeEncryption *EnvelopeEncryptionInitParameters `json:"envelopeEncryption,omitempty" tf:"envelope_encryption,omitempty"` + + // A no_encryption_enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + NoEncryptionEnabledProtocols *NoEncryptionEnabledProtocolsInitParameters `json:"noEncryptionEnabledProtocols,omitempty" tf:"no_encryption_enabled_protocols,omitempty"` +} + +type StreamingPolicyObservation struct { + + // A common_encryption_cbcs block as defined below. Changing this forces a new Streaming Policy to be created. + CommonEncryptionCbcs *CommonEncryptionCbcsObservation `json:"commonEncryptionCbcs,omitempty" tf:"common_encryption_cbcs,omitempty"` + + // A common_encryption_cenc block as defined below. Changing this forces a new Streaming Policy to be created. + CommonEncryptionCenc *CommonEncryptionCencObservation `json:"commonEncryptionCenc,omitempty" tf:"common_encryption_cenc,omitempty"` + + // Default Content Key used by current Streaming Policy. Changing this forces a new Streaming Policy to be created. + DefaultContentKeyPolicyName *string `json:"defaultContentKeyPolicyName,omitempty" tf:"default_content_key_policy_name,omitempty"` + + // A envelope_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + EnvelopeEncryption *EnvelopeEncryptionObservation `json:"envelopeEncryption,omitempty" tf:"envelope_encryption,omitempty"` + + // The ID of the Streaming Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Media Services account name. Changing this forces a new Streaming Policy to be created. + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // A no_encryption_enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + NoEncryptionEnabledProtocols *NoEncryptionEnabledProtocolsObservation `json:"noEncryptionEnabledProtocols,omitempty" tf:"no_encryption_enabled_protocols,omitempty"` + + // The name of the Resource Group where the Streaming Policy should exist. Changing this forces a new Streaming Policy to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` +} + +type StreamingPolicyParameters struct { + + // A common_encryption_cbcs block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + CommonEncryptionCbcs *CommonEncryptionCbcsParameters `json:"commonEncryptionCbcs,omitempty" tf:"common_encryption_cbcs,omitempty"` + + // A common_encryption_cenc block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + CommonEncryptionCenc *CommonEncryptionCencParameters `json:"commonEncryptionCenc,omitempty" tf:"common_encryption_cenc,omitempty"` + + // Default Content Key used by current Streaming Policy. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + DefaultContentKeyPolicyName *string `json:"defaultContentKeyPolicyName,omitempty" tf:"default_content_key_policy_name,omitempty"` + + // A envelope_encryption block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + EnvelopeEncryption *EnvelopeEncryptionParameters `json:"envelopeEncryption,omitempty" tf:"envelope_encryption,omitempty"` + + // The Media Services account name. Changing this forces a new Streaming Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount + // +kubebuilder:validation:Optional + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // Reference to a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameRef *v1.Reference `json:"mediaServicesAccountNameRef,omitempty" tf:"-"` + + // Selector for a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameSelector *v1.Selector `json:"mediaServicesAccountNameSelector,omitempty" tf:"-"` + + // A no_encryption_enabled_protocols block as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + NoEncryptionEnabledProtocols *NoEncryptionEnabledProtocolsParameters `json:"noEncryptionEnabledProtocols,omitempty" tf:"no_encryption_enabled_protocols,omitempty"` + + // The name of the Resource Group where the Streaming Policy should exist. Changing this forces a new Streaming Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` +} + +type TrackConditionInitParameters struct { + + // The track property condition operation. Possible value is Equal. Changing this forces a new Streaming Policy to be created. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property type. Possible value is FourCC. Changing this forces a new Streaming Policy to be created. + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value. Changing this forces a new Streaming Policy to be created. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TrackConditionObservation struct { + + // The track property condition operation. Possible value is Equal. Changing this forces a new Streaming Policy to be created. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The track property type. Possible value is FourCC. Changing this forces a new Streaming Policy to be created. + Property *string `json:"property,omitempty" tf:"property,omitempty"` + + // The track property value. Changing this forces a new Streaming Policy to be created. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type TrackConditionParameters struct { + + // The track property condition operation. Possible value is Equal. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Operation *string `json:"operation" tf:"operation,omitempty"` + + // The track property type. Possible value is FourCC. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Property *string `json:"property" tf:"property,omitempty"` + + // The track property value. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type TrackInitParameters struct { + + // One or more condition blocks as defined below. Changing this forces a new Streaming Policy to be created. + Condition []TrackConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` +} + +type TrackObservation struct { + + // One or more condition blocks as defined below. Changing this forces a new Streaming Policy to be created. + Condition []TrackConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` +} + +type TrackParameters struct { + + // One or more condition blocks as defined below. Changing this forces a new Streaming Policy to be created. + // +kubebuilder:validation:Optional + Condition []TrackConditionParameters `json:"condition" tf:"condition,omitempty"` +} + +// StreamingPolicySpec defines the desired state of StreamingPolicy +type StreamingPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StreamingPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StreamingPolicyInitParameters `json:"initProvider,omitempty"` +} + +// StreamingPolicyStatus defines the observed state of StreamingPolicy. +type StreamingPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StreamingPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StreamingPolicy is the Schema for the StreamingPolicys API. Manages a Streaming Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type StreamingPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec StreamingPolicySpec `json:"spec"` + Status StreamingPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamingPolicyList contains a list of StreamingPolicys +type StreamingPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StreamingPolicy `json:"items"` +} + +// Repository type metadata. +var ( + StreamingPolicy_Kind = "StreamingPolicy" + StreamingPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StreamingPolicy_Kind}.String() + StreamingPolicy_KindAPIVersion = StreamingPolicy_Kind + "." + CRDGroupVersion.String() + StreamingPolicy_GroupVersionKind = CRDGroupVersion.WithKind(StreamingPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&StreamingPolicy{}, &StreamingPolicyList{}) +} diff --git a/apis/media/v1beta2/zz_transform_terraformed.go b/apis/media/v1beta2/zz_transform_terraformed.go new file mode 100755 index 000000000..7329b946d --- /dev/null +++ b/apis/media/v1beta2/zz_transform_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Transform +func (mg *Transform) GetTerraformResourceType() string { + return "azurerm_media_transform" +} + +// GetConnectionDetailsMapping for this Transform +func (tr *Transform) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Transform +func (tr *Transform) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Transform +func (tr *Transform) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Transform +func (tr *Transform) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Transform +func (tr *Transform) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Transform +func (tr *Transform) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Transform +func (tr *Transform) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Transform +func (tr *Transform) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Transform using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Transform) LateInitialize(attrs []byte) (bool, error) { + params := &TransformParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Transform) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/media/v1beta2/zz_transform_types.go b/apis/media/v1beta2/zz_transform_types.go new file mode 100755 index 000000000..36cc4baee --- /dev/null +++ b/apis/media/v1beta2/zz_transform_types.go @@ -0,0 +1,2223 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AacAudioInitParameters struct { + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // The number of audio channels. Default to 2. + Channels *float64 `json:"channels,omitempty" tf:"channels,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The sampling rate to use for encoding in Hertz. Default to 48000. + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type AacAudioObservation struct { + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // The number of audio channels. Default to 2. + Channels *float64 `json:"channels,omitempty" tf:"channels,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The sampling rate to use for encoding in Hertz. Default to 48000. + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type AacAudioParameters struct { + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // The number of audio channels. Default to 2. + // +kubebuilder:validation:Optional + Channels *float64 `json:"channels,omitempty" tf:"channels,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + // +kubebuilder:validation:Optional + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The sampling rate to use for encoding in Hertz. Default to 48000. + // +kubebuilder:validation:Optional + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type AudioAnalyzerPresetInitParameters struct { + + // Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard. + AudioAnalysisMode *string `json:"audioAnalysisMode,omitempty" tf:"audio_analysis_mode,omitempty"` + + // The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. + AudioLanguage *string `json:"audioLanguage,omitempty" tf:"audio_language,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` +} + +type AudioAnalyzerPresetObservation struct { + + // Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard. + AudioAnalysisMode *string `json:"audioAnalysisMode,omitempty" tf:"audio_analysis_mode,omitempty"` + + // The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. + AudioLanguage *string `json:"audioLanguage,omitempty" tf:"audio_language,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` +} + +type AudioAnalyzerPresetParameters struct { + + // Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard. + // +kubebuilder:validation:Optional + AudioAnalysisMode *string `json:"audioAnalysisMode,omitempty" tf:"audio_analysis_mode,omitempty"` + + // The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. + // +kubebuilder:validation:Optional + AudioLanguage *string `json:"audioLanguage,omitempty" tf:"audio_language,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +kubebuilder:validation:Optional + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` +} + +type AudioInitParameters struct { + + // The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0. + AudioGainLevel *float64 `json:"audioGainLevel,omitempty" tf:"audio_gain_level,omitempty"` + + // The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + FadeInDuration *string `json:"fadeInDuration,omitempty" tf:"fade_in_duration,omitempty"` + + // The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + FadeOutDuration *string `json:"fadeOutDuration,omitempty" tf:"fade_out_duration,omitempty"` + + // The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. + InputLabel *string `json:"inputLabel,omitempty" tf:"input_label,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type AudioObservation struct { + + // The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0. + AudioGainLevel *float64 `json:"audioGainLevel,omitempty" tf:"audio_gain_level,omitempty"` + + // The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + FadeInDuration *string `json:"fadeInDuration,omitempty" tf:"fade_in_duration,omitempty"` + + // The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + FadeOutDuration *string `json:"fadeOutDuration,omitempty" tf:"fade_out_duration,omitempty"` + + // The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. + InputLabel *string `json:"inputLabel,omitempty" tf:"input_label,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type AudioParameters struct { + + // The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0. + // +kubebuilder:validation:Optional + AudioGainLevel *float64 `json:"audioGainLevel,omitempty" tf:"audio_gain_level,omitempty"` + + // The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + // +kubebuilder:validation:Optional + FadeInDuration *string `json:"fadeInDuration,omitempty" tf:"fade_in_duration,omitempty"` + + // The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + // +kubebuilder:validation:Optional + FadeOutDuration *string `json:"fadeOutDuration,omitempty" tf:"fade_out_duration,omitempty"` + + // The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. + // +kubebuilder:validation:Optional + InputLabel *string `json:"inputLabel" tf:"input_label,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type BuiltinPresetInitParameters struct { + + // A preset_configuration block as defined below. + PresetConfiguration *PresetConfigurationInitParameters `json:"presetConfiguration,omitempty" tf:"preset_configuration,omitempty"` + + // The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p. + PresetName *string `json:"presetName,omitempty" tf:"preset_name,omitempty"` +} + +type BuiltinPresetObservation struct { + + // A preset_configuration block as defined below. + PresetConfiguration *PresetConfigurationObservation `json:"presetConfiguration,omitempty" tf:"preset_configuration,omitempty"` + + // The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p. + PresetName *string `json:"presetName,omitempty" tf:"preset_name,omitempty"` +} + +type BuiltinPresetParameters struct { + + // A preset_configuration block as defined below. + // +kubebuilder:validation:Optional + PresetConfiguration *PresetConfigurationParameters `json:"presetConfiguration,omitempty" tf:"preset_configuration,omitempty"` + + // The built-in preset to be used for encoding videos. The Possible values are AACGoodQualityAudio, AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, CopyAllBitrateNonInterleaved, DDGoodQualityAudio, H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, H264SingleBitrateSD and H264SingleBitrate720p. + // +kubebuilder:validation:Optional + PresetName *string `json:"presetName" tf:"preset_name,omitempty"` +} + +type CodecInitParameters struct { + + // A aac_audio block as defined above. + AacAudio *AacAudioInitParameters `json:"aacAudio,omitempty" tf:"aac_audio,omitempty"` + + // A copy_audio block as defined below. + CopyAudio *CopyAudioInitParameters `json:"copyAudio,omitempty" tf:"copy_audio,omitempty"` + + // A copy_video block as defined below. + CopyVideo *CopyVideoInitParameters `json:"copyVideo,omitempty" tf:"copy_video,omitempty"` + + // A dd_audio block as defined below. + DdAudio *DdAudioInitParameters `json:"ddAudio,omitempty" tf:"dd_audio,omitempty"` + + // A h264_video block as defined below. + H264Video *H264VideoInitParameters `json:"h264Video,omitempty" tf:"h264_video,omitempty"` + + // A h265_video block as defined below. + H265Video *H265VideoInitParameters `json:"h265Video,omitempty" tf:"h265_video,omitempty"` + + // A jpg_image block as defined below. + JpgImage *JpgImageInitParameters `json:"jpgImage,omitempty" tf:"jpg_image,omitempty"` + + // A png_image block as defined below. + PngImage *PngImageInitParameters `json:"pngImage,omitempty" tf:"png_image,omitempty"` +} + +type CodecObservation struct { + + // A aac_audio block as defined above. + AacAudio *AacAudioObservation `json:"aacAudio,omitempty" tf:"aac_audio,omitempty"` + + // A copy_audio block as defined below. + CopyAudio *CopyAudioObservation `json:"copyAudio,omitempty" tf:"copy_audio,omitempty"` + + // A copy_video block as defined below. + CopyVideo *CopyVideoObservation `json:"copyVideo,omitempty" tf:"copy_video,omitempty"` + + // A dd_audio block as defined below. + DdAudio *DdAudioObservation `json:"ddAudio,omitempty" tf:"dd_audio,omitempty"` + + // A h264_video block as defined below. + H264Video *H264VideoObservation `json:"h264Video,omitempty" tf:"h264_video,omitempty"` + + // A h265_video block as defined below. + H265Video *H265VideoObservation `json:"h265Video,omitempty" tf:"h265_video,omitempty"` + + // A jpg_image block as defined below. + JpgImage *JpgImageObservation `json:"jpgImage,omitempty" tf:"jpg_image,omitempty"` + + // A png_image block as defined below. + PngImage *PngImageObservation `json:"pngImage,omitempty" tf:"png_image,omitempty"` +} + +type CodecParameters struct { + + // A aac_audio block as defined above. + // +kubebuilder:validation:Optional + AacAudio *AacAudioParameters `json:"aacAudio,omitempty" tf:"aac_audio,omitempty"` + + // A copy_audio block as defined below. + // +kubebuilder:validation:Optional + CopyAudio *CopyAudioParameters `json:"copyAudio,omitempty" tf:"copy_audio,omitempty"` + + // A copy_video block as defined below. + // +kubebuilder:validation:Optional + CopyVideo *CopyVideoParameters `json:"copyVideo,omitempty" tf:"copy_video,omitempty"` + + // A dd_audio block as defined below. + // +kubebuilder:validation:Optional + DdAudio *DdAudioParameters `json:"ddAudio,omitempty" tf:"dd_audio,omitempty"` + + // A h264_video block as defined below. + // +kubebuilder:validation:Optional + H264Video *H264VideoParameters `json:"h264Video,omitempty" tf:"h264_video,omitempty"` + + // A h265_video block as defined below. + // +kubebuilder:validation:Optional + H265Video *H265VideoParameters `json:"h265Video,omitempty" tf:"h265_video,omitempty"` + + // A jpg_image block as defined below. + // +kubebuilder:validation:Optional + JpgImage *JpgImageParameters `json:"jpgImage,omitempty" tf:"jpg_image,omitempty"` + + // A png_image block as defined below. + // +kubebuilder:validation:Optional + PngImage *PngImageParameters `json:"pngImage,omitempty" tf:"png_image,omitempty"` +} + +type CopyAudioInitParameters struct { + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` +} + +type CopyAudioObservation struct { + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` +} + +type CopyAudioParameters struct { + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` +} + +type CopyVideoInitParameters struct { + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` +} + +type CopyVideoObservation struct { + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` +} + +type CopyVideoParameters struct { + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` +} + +type CropRectangleInitParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type CropRectangleObservation struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type CropRectangleParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type CustomPresetInitParameters struct { + + // One or more codec blocks as defined above. + Codec []CodecInitParameters `json:"codec,omitempty" tf:"codec,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // A filter block as defined below. + Filter *FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // One or more format blocks as defined below. + Format []FormatInitParameters `json:"format,omitempty" tf:"format,omitempty"` +} + +type CustomPresetObservation struct { + + // One or more codec blocks as defined above. + Codec []CodecObservation `json:"codec,omitempty" tf:"codec,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // A filter block as defined below. + Filter *FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // One or more format blocks as defined below. + Format []FormatObservation `json:"format,omitempty" tf:"format,omitempty"` +} + +type CustomPresetParameters struct { + + // One or more codec blocks as defined above. + // +kubebuilder:validation:Optional + Codec []CodecParameters `json:"codec" tf:"codec,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +kubebuilder:validation:Optional + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // A filter block as defined below. + // +kubebuilder:validation:Optional + Filter *FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // One or more format blocks as defined below. + // +kubebuilder:validation:Optional + Format []FormatParameters `json:"format" tf:"format,omitempty"` +} + +type DdAudioInitParameters struct { + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // The number of audio channels. Default to 2. + Channels *float64 `json:"channels,omitempty" tf:"channels,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The sampling rate to use for encoding in Hertz. Default to 48000. + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type DdAudioObservation struct { + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // The number of audio channels. Default to 2. + Channels *float64 `json:"channels,omitempty" tf:"channels,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The sampling rate to use for encoding in Hertz. Default to 48000. + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type DdAudioParameters struct { + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // The number of audio channels. Default to 2. + // +kubebuilder:validation:Optional + Channels *float64 `json:"channels,omitempty" tf:"channels,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The sampling rate to use for encoding in Hertz. Default to 48000. + // +kubebuilder:validation:Optional + SamplingRate *float64 `json:"samplingRate,omitempty" tf:"sampling_rate,omitempty"` +} + +type DeinterlaceInitParameters struct { + + // The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto. + Parity *string `json:"parity,omitempty" tf:"parity,omitempty"` +} + +type DeinterlaceObservation struct { + + // The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto. + Parity *string `json:"parity,omitempty" tf:"parity,omitempty"` +} + +type DeinterlaceParameters struct { + + // The deinterlacing mode. Possible values are AutoPixelAdaptive or Off. Default to AutoPixelAdaptive. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // The field parity to use for deinterlacing. Possible values are Auto, TopFieldFirst or BottomFieldFirst. Default to Auto. + // +kubebuilder:validation:Optional + Parity *string `json:"parity,omitempty" tf:"parity,omitempty"` +} + +type FaceDetectorPresetInitParameters struct { + + // Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution. + AnalysisResolution *string `json:"analysisResolution,omitempty" tf:"analysis_resolution,omitempty"` + + // Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med. + BlurType *string `json:"blurType,omitempty" tf:"blur_type,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze. + FaceRedactorMode *string `json:"faceRedactorMode,omitempty" tf:"face_redactor_mode,omitempty"` +} + +type FaceDetectorPresetObservation struct { + + // Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution. + AnalysisResolution *string `json:"analysisResolution,omitempty" tf:"analysis_resolution,omitempty"` + + // Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med. + BlurType *string `json:"blurType,omitempty" tf:"blur_type,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze. + FaceRedactorMode *string `json:"faceRedactorMode,omitempty" tf:"face_redactor_mode,omitempty"` +} + +type FaceDetectorPresetParameters struct { + + // Possible values are SourceResolution or StandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. Using StandardDefinition will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to StandardDefinition will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default to SourceResolution. + // +kubebuilder:validation:Optional + AnalysisResolution *string `json:"analysisResolution,omitempty" tf:"analysis_resolution,omitempty"` + + // Specifies the type of blur to apply to faces in the output video. Possible values are Black, Box, High, Low,and Med. + // +kubebuilder:validation:Optional + BlurType *string `json:"blurType,omitempty" tf:"blur_type,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +kubebuilder:validation:Optional + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // This mode provides the ability to choose between the following settings: 1) Analyze - For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default to Analyze. + // +kubebuilder:validation:Optional + FaceRedactorMode *string `json:"faceRedactorMode,omitempty" tf:"face_redactor_mode,omitempty"` +} + +type FadeInInitParameters struct { + + // The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration). + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000. + FadeColor *string `json:"fadeColor,omitempty" tf:"fade_color,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type FadeInObservation struct { + + // The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration). + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000. + FadeColor *string `json:"fadeColor,omitempty" tf:"fade_color,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type FadeInParameters struct { + + // The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration). + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000. + // +kubebuilder:validation:Optional + FadeColor *string `json:"fadeColor" tf:"fade_color,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type FadeOutInitParameters struct { + + // The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration). + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000. + FadeColor *string `json:"fadeColor,omitempty" tf:"fade_color,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type FadeOutObservation struct { + + // The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration). + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000. + FadeColor *string `json:"fadeColor,omitempty" tf:"fade_color,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type FadeOutParameters struct { + + // The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration). + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000. + // +kubebuilder:validation:Optional + FadeColor *string `json:"fadeColor" tf:"fade_color,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type FilterInitParameters struct { + + // A crop_rectangle block as defined above. + CropRectangle *CropRectangleInitParameters `json:"cropRectangle,omitempty" tf:"crop_rectangle,omitempty"` + + // A deinterlace block as defined below. + Deinterlace *DeinterlaceInitParameters `json:"deinterlace,omitempty" tf:"deinterlace,omitempty"` + + // A fade_in block as defined above. + FadeIn *FadeInInitParameters `json:"fadeIn,omitempty" tf:"fade_in,omitempty"` + + // A fade_out block as defined above. + FadeOut *FadeOutInitParameters `json:"fadeOut,omitempty" tf:"fade_out,omitempty"` + + // One or more overlay blocks as defined below. + Overlay []OverlayInitParameters `json:"overlay,omitempty" tf:"overlay,omitempty"` + + // The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto. + Rotation *string `json:"rotation,omitempty" tf:"rotation,omitempty"` +} + +type FilterObservation struct { + + // A crop_rectangle block as defined above. + CropRectangle *CropRectangleObservation `json:"cropRectangle,omitempty" tf:"crop_rectangle,omitempty"` + + // A deinterlace block as defined below. + Deinterlace *DeinterlaceObservation `json:"deinterlace,omitempty" tf:"deinterlace,omitempty"` + + // A fade_in block as defined above. + FadeIn *FadeInObservation `json:"fadeIn,omitempty" tf:"fade_in,omitempty"` + + // A fade_out block as defined above. + FadeOut *FadeOutObservation `json:"fadeOut,omitempty" tf:"fade_out,omitempty"` + + // One or more overlay blocks as defined below. + Overlay []OverlayObservation `json:"overlay,omitempty" tf:"overlay,omitempty"` + + // The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto. + Rotation *string `json:"rotation,omitempty" tf:"rotation,omitempty"` +} + +type FilterParameters struct { + + // A crop_rectangle block as defined above. + // +kubebuilder:validation:Optional + CropRectangle *CropRectangleParameters `json:"cropRectangle,omitempty" tf:"crop_rectangle,omitempty"` + + // A deinterlace block as defined below. + // +kubebuilder:validation:Optional + Deinterlace *DeinterlaceParameters `json:"deinterlace,omitempty" tf:"deinterlace,omitempty"` + + // A fade_in block as defined above. + // +kubebuilder:validation:Optional + FadeIn *FadeInParameters `json:"fadeIn,omitempty" tf:"fade_in,omitempty"` + + // A fade_out block as defined above. + // +kubebuilder:validation:Optional + FadeOut *FadeOutParameters `json:"fadeOut,omitempty" tf:"fade_out,omitempty"` + + // One or more overlay blocks as defined below. + // +kubebuilder:validation:Optional + Overlay []OverlayParameters `json:"overlay,omitempty" tf:"overlay,omitempty"` + + // The rotation to be applied to the input video before it is encoded. Possible values are Auto, None, Rotate90, Rotate180, Rotate270,or Rotate0. Default to Auto. + // +kubebuilder:validation:Optional + Rotation *string `json:"rotation,omitempty" tf:"rotation,omitempty"` +} + +type FormatInitParameters struct { + + // A jpg block as defined below. + Jpg *JpgInitParameters `json:"jpg,omitempty" tf:"jpg,omitempty"` + + // A mp4 block as defined below. + Mp4 *Mp4InitParameters `json:"mp4,omitempty" tf:"mp4,omitempty"` + + // A png block as defined below. + Png *PngInitParameters `json:"png,omitempty" tf:"png,omitempty"` + + // A transport_stream block as defined below. + TransportStream *TransportStreamInitParameters `json:"transportStream,omitempty" tf:"transport_stream,omitempty"` +} + +type FormatObservation struct { + + // A jpg block as defined below. + Jpg *JpgObservation `json:"jpg,omitempty" tf:"jpg,omitempty"` + + // A mp4 block as defined below. + Mp4 *Mp4Observation `json:"mp4,omitempty" tf:"mp4,omitempty"` + + // A png block as defined below. + Png *PngObservation `json:"png,omitempty" tf:"png,omitempty"` + + // A transport_stream block as defined below. + TransportStream *TransportStreamObservation `json:"transportStream,omitempty" tf:"transport_stream,omitempty"` +} + +type FormatParameters struct { + + // A jpg block as defined below. + // +kubebuilder:validation:Optional + Jpg *JpgParameters `json:"jpg,omitempty" tf:"jpg,omitempty"` + + // A mp4 block as defined below. + // +kubebuilder:validation:Optional + Mp4 *Mp4Parameters `json:"mp4,omitempty" tf:"mp4,omitempty"` + + // A png block as defined below. + // +kubebuilder:validation:Optional + Png *PngParameters `json:"png,omitempty" tf:"png,omitempty"` + + // A transport_stream block as defined below. + // +kubebuilder:validation:Optional + TransportStream *TransportStreamParameters `json:"transportStream,omitempty" tf:"transport_stream,omitempty"` +} + +type H264VideoInitParameters struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + Layer []LayerInitParameters `json:"layer,omitempty" tf:"layer,omitempty"` + + // The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR. + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false. + SceneChangeDetectionEnabled *bool `json:"sceneChangeDetectionEnabled,omitempty" tf:"scene_change_detection_enabled,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type H264VideoObservation struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + Layer []LayerObservation `json:"layer,omitempty" tf:"layer,omitempty"` + + // The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR. + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false. + SceneChangeDetectionEnabled *bool `json:"sceneChangeDetectionEnabled,omitempty" tf:"scene_change_detection_enabled,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type H264VideoParameters struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + // +kubebuilder:validation:Optional + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + // +kubebuilder:validation:Optional + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + // +kubebuilder:validation:Optional + Layer []LayerParameters `json:"layer,omitempty" tf:"layer,omitempty"` + + // The rate control mode. Possible values are ABR, CBR or CRF. Default to ABR. + // +kubebuilder:validation:Optional + RateControlMode *string `json:"rateControlMode,omitempty" tf:"rate_control_mode,omitempty"` + + // Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false. + // +kubebuilder:validation:Optional + SceneChangeDetectionEnabled *bool `json:"sceneChangeDetectionEnabled,omitempty" tf:"scene_change_detection_enabled,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + // +kubebuilder:validation:Optional + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + // +kubebuilder:validation:Optional + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type H265VideoInitParameters struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + Layer []H265VideoLayerInitParameters `json:"layer,omitempty" tf:"layer,omitempty"` + + // Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false. + SceneChangeDetectionEnabled *bool `json:"sceneChangeDetectionEnabled,omitempty" tf:"scene_change_detection_enabled,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type H265VideoLayerInitParameters struct { + + // Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true. + AdaptiveBFrameEnabled *bool `json:"adaptiveBFrameEnabled,omitempty" tf:"adaptive_b_frame_enabled,omitempty"` + + // The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level. + BFrames *float64 `json:"bFrames,omitempty" tf:"b_frames,omitempty"` + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S. + BufferWindow *string `json:"bufferWindow,omitempty" tf:"buffer_window,omitempty"` + + // The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 23. + Crf *float64 `json:"crf,omitempty" tf:"crf,omitempty"` + + // The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate. + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting. + ReferenceFrames *float64 `json:"referenceFrames,omitempty" tf:"reference_frames,omitempty"` + + // The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame. + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type H265VideoLayerObservation struct { + + // Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true. + AdaptiveBFrameEnabled *bool `json:"adaptiveBFrameEnabled,omitempty" tf:"adaptive_b_frame_enabled,omitempty"` + + // The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level. + BFrames *float64 `json:"bFrames,omitempty" tf:"b_frames,omitempty"` + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S. + BufferWindow *string `json:"bufferWindow,omitempty" tf:"buffer_window,omitempty"` + + // The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 23. + Crf *float64 `json:"crf,omitempty" tf:"crf,omitempty"` + + // The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate. + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting. + ReferenceFrames *float64 `json:"referenceFrames,omitempty" tf:"reference_frames,omitempty"` + + // The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame. + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type H265VideoLayerParameters struct { + + // Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true. + // +kubebuilder:validation:Optional + AdaptiveBFrameEnabled *bool `json:"adaptiveBFrameEnabled,omitempty" tf:"adaptive_b_frame_enabled,omitempty"` + + // The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level. + // +kubebuilder:validation:Optional + BFrames *float64 `json:"bFrames,omitempty" tf:"b_frames,omitempty"` + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate" tf:"bitrate,omitempty"` + + // Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S. + // +kubebuilder:validation:Optional + BufferWindow *string `json:"bufferWindow,omitempty" tf:"buffer_window,omitempty"` + + // The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 23. + // +kubebuilder:validation:Optional + Crf *float64 `json:"crf,omitempty" tf:"crf,omitempty"` + + // The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. + // +kubebuilder:validation:Optional + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer. + // +kubebuilder:validation:Optional + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate. + // +kubebuilder:validation:Optional + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + // +kubebuilder:validation:Optional + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting. + // +kubebuilder:validation:Optional + ReferenceFrames *float64 `json:"referenceFrames,omitempty" tf:"reference_frames,omitempty"` + + // The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame. + // +kubebuilder:validation:Optional + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type H265VideoObservation struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + Layer []H265VideoLayerObservation `json:"layer,omitempty" tf:"layer,omitempty"` + + // Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false. + SceneChangeDetectionEnabled *bool `json:"sceneChangeDetectionEnabled,omitempty" tf:"scene_change_detection_enabled,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type H265VideoParameters struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + // +kubebuilder:validation:Optional + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + // +kubebuilder:validation:Optional + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + // +kubebuilder:validation:Optional + Layer []H265VideoLayerParameters `json:"layer,omitempty" tf:"layer,omitempty"` + + // Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to false. + // +kubebuilder:validation:Optional + SceneChangeDetectionEnabled *bool `json:"sceneChangeDetectionEnabled,omitempty" tf:"scene_change_detection_enabled,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + // +kubebuilder:validation:Optional + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + // +kubebuilder:validation:Optional + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type JpgImageInitParameters struct { + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + Layer []JpgImageLayerInitParameters `json:"layer,omitempty" tf:"layer,omitempty"` + + // The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream. + Range *string `json:"range,omitempty" tf:"range,omitempty"` + + // Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535. + SpriteColumn *float64 `json:"spriteColumn,omitempty" tf:"sprite_column,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` + + // The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time. + Step *string `json:"step,omitempty" tf:"step,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type JpgImageLayerInitParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70. + Quality *float64 `json:"quality,omitempty" tf:"quality,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type JpgImageLayerObservation struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70. + Quality *float64 `json:"quality,omitempty" tf:"quality,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type JpgImageLayerParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The compression quality of the JPEG output. Range is from 0 to 100 and the default is 70. + // +kubebuilder:validation:Optional + Quality *float64 `json:"quality,omitempty" tf:"quality,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type JpgImageObservation struct { + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + Layer []JpgImageLayerObservation `json:"layer,omitempty" tf:"layer,omitempty"` + + // The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream. + Range *string `json:"range,omitempty" tf:"range,omitempty"` + + // Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535. + SpriteColumn *float64 `json:"spriteColumn,omitempty" tf:"sprite_column,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` + + // The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time. + Step *string `json:"step,omitempty" tf:"step,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type JpgImageParameters struct { + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + // +kubebuilder:validation:Optional + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + // +kubebuilder:validation:Optional + Layer []JpgImageLayerParameters `json:"layer,omitempty" tf:"layer,omitempty"` + + // The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream. + // +kubebuilder:validation:Optional + Range *string `json:"range,omitempty" tf:"range,omitempty"` + + // Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 65535x65535. + // +kubebuilder:validation:Optional + SpriteColumn *float64 `json:"spriteColumn,omitempty" tf:"sprite_column,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + // +kubebuilder:validation:Optional + Start *string `json:"start" tf:"start,omitempty"` + + // The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time. + // +kubebuilder:validation:Optional + Step *string `json:"step,omitempty" tf:"step,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + // +kubebuilder:validation:Optional + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + // +kubebuilder:validation:Optional + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type JpgInitParameters struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty" tf:"filename_pattern,omitempty"` +} + +type JpgObservation struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty" tf:"filename_pattern,omitempty"` +} + +type JpgParameters struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + // +kubebuilder:validation:Optional + FilenamePattern *string `json:"filenamePattern" tf:"filename_pattern,omitempty"` +} + +type LayerInitParameters struct { + + // Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true. + AdaptiveBFrameEnabled *bool `json:"adaptiveBFrameEnabled,omitempty" tf:"adaptive_b_frame_enabled,omitempty"` + + // The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level. + BFrames *float64 `json:"bFrames,omitempty" tf:"b_frames,omitempty"` + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S. + BufferWindow *string `json:"bufferWindow,omitempty" tf:"buffer_window,omitempty"` + + // The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 23. + Crf *float64 `json:"crf,omitempty" tf:"crf,omitempty"` + + // The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. + EntropyMode *string `json:"entropyMode,omitempty" tf:"entropy_mode,omitempty"` + + // The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate. + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting. + ReferenceFrames *float64 `json:"referenceFrames,omitempty" tf:"reference_frames,omitempty"` + + // The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame. + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type LayerObservation struct { + + // Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true. + AdaptiveBFrameEnabled *bool `json:"adaptiveBFrameEnabled,omitempty" tf:"adaptive_b_frame_enabled,omitempty"` + + // The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level. + BFrames *float64 `json:"bFrames,omitempty" tf:"b_frames,omitempty"` + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + Bitrate *float64 `json:"bitrate,omitempty" tf:"bitrate,omitempty"` + + // Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S. + BufferWindow *string `json:"bufferWindow,omitempty" tf:"buffer_window,omitempty"` + + // The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 23. + Crf *float64 `json:"crf,omitempty" tf:"crf,omitempty"` + + // The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. + EntropyMode *string `json:"entropyMode,omitempty" tf:"entropy_mode,omitempty"` + + // The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer. + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate. + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting. + ReferenceFrames *float64 `json:"referenceFrames,omitempty" tf:"reference_frames,omitempty"` + + // The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame. + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type LayerParameters struct { + + // Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to true. + // +kubebuilder:validation:Optional + AdaptiveBFrameEnabled *bool `json:"adaptiveBFrameEnabled,omitempty" tf:"adaptive_b_frame_enabled,omitempty"` + + // The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level. + // +kubebuilder:validation:Optional + BFrames *float64 `json:"bFrames,omitempty" tf:"b_frames,omitempty"` + + // The average bitrate in bits per second at which to encode the input video when generating this layer. + // +kubebuilder:validation:Optional + Bitrate *float64 `json:"bitrate" tf:"bitrate,omitempty"` + + // Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 0.1 to 100 seconds. Defaults to PT5S. + // +kubebuilder:validation:Optional + BufferWindow *string `json:"bufferWindow,omitempty" tf:"buffer_window,omitempty"` + + // The value of CRF to be used when encoding this layer. This setting takes effect when rate_control_mode is set CRF. The range of CRF value is between 0 and 51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to 23. + // +kubebuilder:validation:Optional + Crf *float64 `json:"crf,omitempty" tf:"crf,omitempty"` + + // The entropy mode to be used for this layer. Possible values are Cabac or Cavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. + // +kubebuilder:validation:Optional + EntropyMode *string `json:"entropyMode,omitempty" tf:"entropy_mode,omitempty"` + + // The frame rate (in frames per second) at which to encode this layer. The value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. + // +kubebuilder:validation:Optional + FrameRate *string `json:"frameRate,omitempty" tf:"frame_rate,omitempty"` + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The H.264 levels. Currently, the resource support Level up to 6.2. The value can be auto, or a number that matches the H.264 profile. If not specified, the default is auto, which lets the encoder choose the Level that is appropriate for this layer. + // +kubebuilder:validation:Optional + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate. + // +kubebuilder:validation:Optional + MaxBitrate *float64 `json:"maxBitrate,omitempty" tf:"max_bitrate,omitempty"` + + // The H.264 profile. Possible values are Auto, Baseline, High, High422, High444,or Main. Default to Auto. + // +kubebuilder:validation:Optional + Profile *string `json:"profile,omitempty" tf:"profile,omitempty"` + + // The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting. + // +kubebuilder:validation:Optional + ReferenceFrames *float64 `json:"referenceFrames,omitempty" tf:"reference_frames,omitempty"` + + // The number of slices to be used when encoding this layer. If not specified, default is 1, which means that encoder will use a single slice for each frame. + // +kubebuilder:validation:Optional + Slices *float64 `json:"slices,omitempty" tf:"slices,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type Mp4InitParameters struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty" tf:"filename_pattern,omitempty"` + + // One or more output_file blocks as defined above. + OutputFile []OutputFileInitParameters `json:"outputFile,omitempty" tf:"output_file,omitempty"` +} + +type Mp4Observation struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty" tf:"filename_pattern,omitempty"` + + // One or more output_file blocks as defined above. + OutputFile []OutputFileObservation `json:"outputFile,omitempty" tf:"output_file,omitempty"` +} + +type Mp4Parameters struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + // +kubebuilder:validation:Optional + FilenamePattern *string `json:"filenamePattern" tf:"filename_pattern,omitempty"` + + // One or more output_file blocks as defined above. + // +kubebuilder:validation:Optional + OutputFile []OutputFileParameters `json:"outputFile,omitempty" tf:"output_file,omitempty"` +} + +type OutputFileInitParameters struct { + + // The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1. + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type OutputFileObservation struct { + + // The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1. + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type OutputFileParameters struct { + + // The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1. + // +kubebuilder:validation:Optional + Labels []*string `json:"labels" tf:"labels,omitempty"` +} + +type OutputInitParameters struct { + + // An audio_analyzer_preset block as defined above. + AudioAnalyzerPreset *AudioAnalyzerPresetInitParameters `json:"audioAnalyzerPreset,omitempty" tf:"audio_analyzer_preset,omitempty"` + + // A builtin_preset block as defined above. + BuiltinPreset *BuiltinPresetInitParameters `json:"builtinPreset,omitempty" tf:"builtin_preset,omitempty"` + + // A custom_preset block as defined above. + CustomPreset *CustomPresetInitParameters `json:"customPreset,omitempty" tf:"custom_preset,omitempty"` + + // A face_detector_preset block as defined above. + FaceDetectorPreset *FaceDetectorPresetInitParameters `json:"faceDetectorPreset,omitempty" tf:"face_detector_preset,omitempty"` + + // A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob. + OnErrorAction *string `json:"onErrorAction,omitempty" tf:"on_error_action,omitempty"` + + // Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal. + RelativePriority *string `json:"relativePriority,omitempty" tf:"relative_priority,omitempty"` + + // A video_analyzer_preset block as defined below. + VideoAnalyzerPreset *VideoAnalyzerPresetInitParameters `json:"videoAnalyzerPreset,omitempty" tf:"video_analyzer_preset,omitempty"` +} + +type OutputObservation struct { + + // An audio_analyzer_preset block as defined above. + AudioAnalyzerPreset *AudioAnalyzerPresetObservation `json:"audioAnalyzerPreset,omitempty" tf:"audio_analyzer_preset,omitempty"` + + // A builtin_preset block as defined above. + BuiltinPreset *BuiltinPresetObservation `json:"builtinPreset,omitempty" tf:"builtin_preset,omitempty"` + + // A custom_preset block as defined above. + CustomPreset *CustomPresetObservation `json:"customPreset,omitempty" tf:"custom_preset,omitempty"` + + // A face_detector_preset block as defined above. + FaceDetectorPreset *FaceDetectorPresetObservation `json:"faceDetectorPreset,omitempty" tf:"face_detector_preset,omitempty"` + + // A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob. + OnErrorAction *string `json:"onErrorAction,omitempty" tf:"on_error_action,omitempty"` + + // Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal. + RelativePriority *string `json:"relativePriority,omitempty" tf:"relative_priority,omitempty"` + + // A video_analyzer_preset block as defined below. + VideoAnalyzerPreset *VideoAnalyzerPresetObservation `json:"videoAnalyzerPreset,omitempty" tf:"video_analyzer_preset,omitempty"` +} + +type OutputParameters struct { + + // An audio_analyzer_preset block as defined above. + // +kubebuilder:validation:Optional + AudioAnalyzerPreset *AudioAnalyzerPresetParameters `json:"audioAnalyzerPreset,omitempty" tf:"audio_analyzer_preset,omitempty"` + + // A builtin_preset block as defined above. + // +kubebuilder:validation:Optional + BuiltinPreset *BuiltinPresetParameters `json:"builtinPreset,omitempty" tf:"builtin_preset,omitempty"` + + // A custom_preset block as defined above. + // +kubebuilder:validation:Optional + CustomPreset *CustomPresetParameters `json:"customPreset,omitempty" tf:"custom_preset,omitempty"` + + // A face_detector_preset block as defined above. + // +kubebuilder:validation:Optional + FaceDetectorPreset *FaceDetectorPresetParameters `json:"faceDetectorPreset,omitempty" tf:"face_detector_preset,omitempty"` + + // A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with ContinueJob. Possible values are StopProcessingJob or ContinueJob. Defaults to StopProcessingJob. + // +kubebuilder:validation:Optional + OnErrorAction *string `json:"onErrorAction,omitempty" tf:"on_error_action,omitempty"` + + // Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are High, Normal or Low. Defaults to Normal. + // +kubebuilder:validation:Optional + RelativePriority *string `json:"relativePriority,omitempty" tf:"relative_priority,omitempty"` + + // A video_analyzer_preset block as defined below. + // +kubebuilder:validation:Optional + VideoAnalyzerPreset *VideoAnalyzerPresetParameters `json:"videoAnalyzerPreset,omitempty" tf:"video_analyzer_preset,omitempty"` +} + +type OverlayInitParameters struct { + + // An audio block as defined above. + Audio *AudioInitParameters `json:"audio,omitempty" tf:"audio,omitempty"` + + // A video block as defined below. + Video *VideoInitParameters `json:"video,omitempty" tf:"video,omitempty"` +} + +type OverlayObservation struct { + + // An audio block as defined above. + Audio *AudioObservation `json:"audio,omitempty" tf:"audio,omitempty"` + + // A video block as defined below. + Video *VideoObservation `json:"video,omitempty" tf:"video,omitempty"` +} + +type OverlayParameters struct { + + // An audio block as defined above. + // +kubebuilder:validation:Optional + Audio *AudioParameters `json:"audio,omitempty" tf:"audio,omitempty"` + + // A video block as defined below. + // +kubebuilder:validation:Optional + Video *VideoParameters `json:"video,omitempty" tf:"video,omitempty"` +} + +type PngImageInitParameters struct { + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + Layer []PngImageLayerInitParameters `json:"layer,omitempty" tf:"layer,omitempty"` + + // The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream. + Range *string `json:"range,omitempty" tf:"range,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` + + // The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time. + Step *string `json:"step,omitempty" tf:"step,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type PngImageLayerInitParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type PngImageLayerObservation struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type PngImageLayerParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type PngImageObservation struct { + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + Layer []PngImageLayerObservation `json:"layer,omitempty" tf:"layer,omitempty"` + + // The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream. + Range *string `json:"range,omitempty" tf:"range,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` + + // The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time. + Step *string `json:"step,omitempty" tf:"step,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type PngImageParameters struct { + + // The distance between two key frames. The value should be non-zero in the range 0.5 to 20 seconds, specified in ISO 8601 format. Note that this setting is ignored if sync_mode is set to Passthrough, where the KeyFrameInterval value will follow the input source setting. Defaults to PT2S. + // +kubebuilder:validation:Optional + KeyFrameInterval *string `json:"keyFrameInterval,omitempty" tf:"key_frame_interval,omitempty"` + + // Specifies the label for the codec. The label can be used to control muxing behavior. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // One or more layer blocks as defined below. + // +kubebuilder:validation:Optional + Layer []PngImageLayerParameters `json:"layer,omitempty" tf:"layer,omitempty"` + + // The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop at the 300th frame from the frame at start time. If this value is 1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example, 50% to stop at half of stream duration from start time). The default value is 100%, which means to stop at the end of the stream. + // +kubebuilder:validation:Optional + Range *string `json:"range,omitempty" tf:"range,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + // +kubebuilder:validation:Optional + Start *string `json:"start" tf:"start,omitempty"` + + // The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30 for one image every 30 frames), or a relative value to stream duration (For example, 10% for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is 10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at 1 if only one thumbnail is needed at start time. + // +kubebuilder:validation:Optional + Step *string `json:"step,omitempty" tf:"step,omitempty"` + + // The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are AutoFit, AutoSize or None. Default to AutoSize. + // +kubebuilder:validation:Optional + StretchMode *string `json:"stretchMode,omitempty" tf:"stretch_mode,omitempty"` + + // Specifies the synchronization mode for the video. Possible values are Auto, Cfr, Passthrough or Vfr. Default to Auto. + // +kubebuilder:validation:Optional + SyncMode *string `json:"syncMode,omitempty" tf:"sync_mode,omitempty"` +} + +type PngInitParameters struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty" tf:"filename_pattern,omitempty"` +} + +type PngObservation struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty" tf:"filename_pattern,omitempty"` +} + +type PngParameters struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + // +kubebuilder:validation:Optional + FilenamePattern *string `json:"filenamePattern" tf:"filename_pattern,omitempty"` +} + +type PositionInitParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type PositionObservation struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type PositionParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type PresetConfigurationInitParameters struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput. + InterleaveOutput *string `json:"interleaveOutput,omitempty" tf:"interleave_output,omitempty"` + + // The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players. + KeyFrameIntervalInSeconds *float64 `json:"keyFrameIntervalInSeconds,omitempty" tf:"key_frame_interval_in_seconds,omitempty"` + + // The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity. + MaxBitrateBps *float64 `json:"maxBitrateBps,omitempty" tf:"max_bitrate_bps,omitempty"` + + // The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K. + MaxHeight *float64 `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job. + MaxLayers *float64 `json:"maxLayers,omitempty" tf:"max_layers,omitempty"` + + // The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth. + MinBitrateBps *float64 `json:"minBitrateBps,omitempty" tf:"min_bitrate_bps,omitempty"` + + // The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P. + MinHeight *float64 `json:"minHeight,omitempty" tf:"min_height,omitempty"` +} + +type PresetConfigurationObservation struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput. + InterleaveOutput *string `json:"interleaveOutput,omitempty" tf:"interleave_output,omitempty"` + + // The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players. + KeyFrameIntervalInSeconds *float64 `json:"keyFrameIntervalInSeconds,omitempty" tf:"key_frame_interval_in_seconds,omitempty"` + + // The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity. + MaxBitrateBps *float64 `json:"maxBitrateBps,omitempty" tf:"max_bitrate_bps,omitempty"` + + // The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K. + MaxHeight *float64 `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job. + MaxLayers *float64 `json:"maxLayers,omitempty" tf:"max_layers,omitempty"` + + // The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth. + MinBitrateBps *float64 `json:"minBitrateBps,omitempty" tf:"min_bitrate_bps,omitempty"` + + // The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P. + MinHeight *float64 `json:"minHeight,omitempty" tf:"min_height,omitempty"` +} + +type PresetConfigurationParameters struct { + + // The complexity of the encoding. Possible values are Balanced, Speed or Quality. + // +kubebuilder:validation:Optional + Complexity *string `json:"complexity,omitempty" tf:"complexity,omitempty"` + + // Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are InterleavedOutput and NonInterleavedOutput. + // +kubebuilder:validation:Optional + InterleaveOutput *string `json:"interleaveOutput,omitempty" tf:"interleave_output,omitempty"` + + // The key frame interval in seconds. Possible value is a positive float. For example, set as 2.0 to reduce the playback buffering for some players. + // +kubebuilder:validation:Optional + KeyFrameIntervalInSeconds *float64 `json:"keyFrameIntervalInSeconds,omitempty" tf:"key_frame_interval_in_seconds,omitempty"` + + // The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 6000000 to avoid producing very high bitrate outputs for contents with high complexity. + // +kubebuilder:validation:Optional + MaxBitrateBps *float64 `json:"maxBitrateBps,omitempty" tf:"max_bitrate_bps,omitempty"` + + // The maximum height of output video layers. For example, set as 720 to produce output layers up to 720P even if the input is 4K. + // +kubebuilder:validation:Optional + MaxHeight *float64 `json:"maxHeight,omitempty" tf:"max_height,omitempty"` + + // The maximum number of output video layers. For example, set as 4 to make sure at most 4 output layers are produced to control the overall cost of the encoding job. + // +kubebuilder:validation:Optional + MaxLayers *float64 `json:"maxLayers,omitempty" tf:"max_layers,omitempty"` + + // The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 200000 to have a bottom layer that covers users with low network bandwidth. + // +kubebuilder:validation:Optional + MinBitrateBps *float64 `json:"minBitrateBps,omitempty" tf:"min_bitrate_bps,omitempty"` + + // The minimum height of output video layers. For example, set as 360 to avoid output layers of smaller resolutions like 180P. + // +kubebuilder:validation:Optional + MinHeight *float64 `json:"minHeight,omitempty" tf:"min_height,omitempty"` +} + +type TransformInitParameters struct { + + // An optional verbose description of the Transform. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // One or more output blocks as defined below. At least one output must be defined. + Output []OutputInitParameters `json:"output,omitempty" tf:"output,omitempty"` +} + +type TransformObservation struct { + + // An optional verbose description of the Transform. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Transform. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Media Services account name. Changing this forces a new Transform to be created. + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // One or more output blocks as defined below. At least one output must be defined. + Output []OutputObservation `json:"output,omitempty" tf:"output,omitempty"` + + // The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` +} + +type TransformParameters struct { + + // An optional verbose description of the Transform. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The Media Services account name. Changing this forces a new Transform to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/media/v1beta2.ServicesAccount + // +kubebuilder:validation:Optional + MediaServicesAccountName *string `json:"mediaServicesAccountName,omitempty" tf:"media_services_account_name,omitempty"` + + // Reference to a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameRef *v1.Reference `json:"mediaServicesAccountNameRef,omitempty" tf:"-"` + + // Selector for a ServicesAccount in media to populate mediaServicesAccountName. + // +kubebuilder:validation:Optional + MediaServicesAccountNameSelector *v1.Selector `json:"mediaServicesAccountNameSelector,omitempty" tf:"-"` + + // One or more output blocks as defined below. At least one output must be defined. + // +kubebuilder:validation:Optional + Output []OutputParameters `json:"output,omitempty" tf:"output,omitempty"` + + // The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` +} + +type TransportStreamInitParameters struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty" tf:"filename_pattern,omitempty"` + + // One or more output_file blocks as defined above. + OutputFile []TransportStreamOutputFileInitParameters `json:"outputFile,omitempty" tf:"output_file,omitempty"` +} + +type TransportStreamObservation struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + FilenamePattern *string `json:"filenamePattern,omitempty" tf:"filename_pattern,omitempty"` + + // One or more output_file blocks as defined above. + OutputFile []TransportStreamOutputFileObservation `json:"outputFile,omitempty" tf:"output_file,omitempty"` +} + +type TransportStreamOutputFileInitParameters struct { + + // The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1. + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type TransportStreamOutputFileObservation struct { + + // The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1. + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type TransportStreamOutputFileParameters struct { + + // The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like ["v1", "a1"] tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1. + // +kubebuilder:validation:Optional + Labels []*string `json:"labels" tf:"labels,omitempty"` +} + +type TransportStreamParameters struct { + + // The file naming pattern used for the creation of output files. The following macros are supported in the file name: {Basename} - An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {AudioStream} - string "Audio" plus audio stream number(start from 1). {Bitrate} - The audio/video bitrate in kbps. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. + // +kubebuilder:validation:Optional + FilenamePattern *string `json:"filenamePattern" tf:"filename_pattern,omitempty"` + + // One or more output_file blocks as defined above. + // +kubebuilder:validation:Optional + OutputFile []TransportStreamOutputFileParameters `json:"outputFile,omitempty" tf:"output_file,omitempty"` +} + +type VideoAnalyzerPresetInitParameters struct { + + // Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard. + AudioAnalysisMode *string `json:"audioAnalysisMode,omitempty" tf:"audio_analysis_mode,omitempty"` + + // The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. + AudioLanguage *string `json:"audioLanguage,omitempty" tf:"audio_language,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights. + InsightsType *string `json:"insightsType,omitempty" tf:"insights_type,omitempty"` +} + +type VideoAnalyzerPresetObservation struct { + + // Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard. + AudioAnalysisMode *string `json:"audioAnalysisMode,omitempty" tf:"audio_analysis_mode,omitempty"` + + // The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. + AudioLanguage *string `json:"audioLanguage,omitempty" tf:"audio_language,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights. + InsightsType *string `json:"insightsType,omitempty" tf:"insights_type,omitempty"` +} + +type VideoAnalyzerPresetParameters struct { + + // Possible values are Basic or Standard. Determines the set of audio analysis operations to be performed. Default to Standard. + // +kubebuilder:validation:Optional + AudioAnalysisMode *string `json:"audioAnalysisMode,omitempty" tf:"audio_analysis_mode,omitempty"` + + // The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. + // +kubebuilder:validation:Optional + AudioLanguage *string `json:"audioLanguage,omitempty" tf:"audio_language,omitempty"` + + // Dictionary containing key value pairs for parameters not exposed in the preset itself. + // +kubebuilder:validation:Optional + // +mapType=granular + ExperimentalOptions map[string]*string `json:"experimentalOptions,omitempty" tf:"experimental_options,omitempty"` + + // Defines the type of insights that you want the service to generate. The allowed values are AudioInsightsOnly, VideoInsightsOnly, and AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default to AllInsights. + // +kubebuilder:validation:Optional + InsightsType *string `json:"insightsType,omitempty" tf:"insights_type,omitempty"` +} + +type VideoCropRectangleInitParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type VideoCropRectangleObservation struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type VideoCropRectangleParameters struct { + + // The height of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Height *string `json:"height,omitempty" tf:"height,omitempty"` + + // The number of pixels from the left-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Left *string `json:"left,omitempty" tf:"left,omitempty"` + + // The number of pixels from the top-margin. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Top *string `json:"top,omitempty" tf:"top,omitempty"` + + // The width of the rectangular region in pixels. This can be absolute pixel value (e.g 100), or relative to the size of the video (For example, 50%). + // +kubebuilder:validation:Optional + Width *string `json:"width,omitempty" tf:"width,omitempty"` +} + +type VideoInitParameters struct { + + // The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0. + AudioGainLevel *float64 `json:"audioGainLevel,omitempty" tf:"audio_gain_level,omitempty"` + + // A crop_rectangle block as defined above. + CropRectangle *VideoCropRectangleInitParameters `json:"cropRectangle,omitempty" tf:"crop_rectangle,omitempty"` + + // The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + FadeInDuration *string `json:"fadeInDuration,omitempty" tf:"fade_in_duration,omitempty"` + + // The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + FadeOutDuration *string `json:"fadeOutDuration,omitempty" tf:"fade_out_duration,omitempty"` + + // The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. + InputLabel *string `json:"inputLabel,omitempty" tf:"input_label,omitempty"` + + // The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque. + Opacity *float64 `json:"opacity,omitempty" tf:"opacity,omitempty"` + + // A position block as defined above. + Position *PositionInitParameters `json:"position,omitempty" tf:"position,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type VideoObservation struct { + + // The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0. + AudioGainLevel *float64 `json:"audioGainLevel,omitempty" tf:"audio_gain_level,omitempty"` + + // A crop_rectangle block as defined above. + CropRectangle *VideoCropRectangleObservation `json:"cropRectangle,omitempty" tf:"crop_rectangle,omitempty"` + + // The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + FadeInDuration *string `json:"fadeInDuration,omitempty" tf:"fade_in_duration,omitempty"` + + // The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + FadeOutDuration *string `json:"fadeOutDuration,omitempty" tf:"fade_out_duration,omitempty"` + + // The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. + InputLabel *string `json:"inputLabel,omitempty" tf:"input_label,omitempty"` + + // The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque. + Opacity *float64 `json:"opacity,omitempty" tf:"opacity,omitempty"` + + // A position block as defined above. + Position *PositionObservation `json:"position,omitempty" tf:"position,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +type VideoParameters struct { + + // The gain level of audio in the overlay. The value should be in range between 0 to 1.0. The default is 1.0. + // +kubebuilder:validation:Optional + AudioGainLevel *float64 `json:"audioGainLevel,omitempty" tf:"audio_gain_level,omitempty"` + + // A crop_rectangle block as defined above. + // +kubebuilder:validation:Optional + CropRectangle *VideoCropRectangleParameters `json:"cropRectangle,omitempty" tf:"crop_rectangle,omitempty"` + + // The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. + // +kubebuilder:validation:Optional + End *string `json:"end,omitempty" tf:"end,omitempty"` + + // The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as PT0S). + // +kubebuilder:validation:Optional + FadeInDuration *string `json:"fadeInDuration,omitempty" tf:"fade_in_duration,omitempty"` + + // The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as PT0S). + // +kubebuilder:validation:Optional + FadeOutDuration *string `json:"fadeOutDuration,omitempty" tf:"fade_out_duration,omitempty"` + + // The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. + // +kubebuilder:validation:Optional + InputLabel *string `json:"inputLabel" tf:"input_label,omitempty"` + + // The opacity of the overlay. The value should be in the range between 0 to 1.0. Default to 1.0, which means the overlay is opaque. + // +kubebuilder:validation:Optional + Opacity *float64 `json:"opacity,omitempty" tf:"opacity,omitempty"` + + // A position block as defined above. + // +kubebuilder:validation:Optional + Position *PositionParameters `json:"position,omitempty" tf:"position,omitempty"` + + // The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. + // +kubebuilder:validation:Optional + Start *string `json:"start,omitempty" tf:"start,omitempty"` +} + +// TransformSpec defines the desired state of Transform +type TransformSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TransformParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TransformInitParameters `json:"initProvider,omitempty"` +} + +// TransformStatus defines the observed state of Transform. +type TransformStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TransformObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Transform is the Schema for the Transforms API. Manages a Transform. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Transform struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec TransformSpec `json:"spec"` + Status TransformStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TransformList contains a list of Transforms +type TransformList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Transform `json:"items"` +} + +// Repository type metadata. +var ( + Transform_Kind = "Transform" + Transform_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Transform_Kind}.String() + Transform_KindAPIVersion = Transform_Kind + "." + CRDGroupVersion.String() + Transform_GroupVersionKind = CRDGroupVersion.WithKind(Transform_Kind) +) + +func init() { + SchemeBuilder.Register(&Transform{}, &TransformList{}) +} diff --git a/apis/netapp/v1beta1/zz_generated.conversion_hubs.go b/apis/netapp/v1beta1/zz_generated.conversion_hubs.go index 3d6461aa1..76eab5a29 100755 --- a/apis/netapp/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/netapp/v1beta1/zz_generated.conversion_hubs.go @@ -6,17 +6,8 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Account) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Pool) Hub() {} // Hub marks this type as a conversion hub. func (tr *Snapshot) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SnapshotPolicy) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Volume) Hub() {} diff --git a/apis/netapp/v1beta1/zz_generated.conversion_spokes.go b/apis/netapp/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..afb1eb66c --- /dev/null +++ b/apis/netapp/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Account to the hub type. +func (tr *Account) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Account type. +func (tr *Account) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SnapshotPolicy to the hub type. +func (tr *SnapshotPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SnapshotPolicy type. +func (tr *SnapshotPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Volume to the hub type. +func (tr *Volume) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Volume type. +func (tr *Volume) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/netapp/v1beta1/zz_generated.resolvers.go b/apis/netapp/v1beta1/zz_generated.resolvers.go index f3e0a44c2..20ae5b648 100644 --- a/apis/netapp/v1beta1/zz_generated.resolvers.go +++ b/apis/netapp/v1beta1/zz_generated.resolvers.go @@ -59,7 +59,7 @@ func (mg *Pool) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -109,7 +109,7 @@ func (mg *Snapshot) ResolveReferences(ctx context.Context, c client.Reader) erro var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -166,7 +166,7 @@ func (mg *Snapshot) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta1", "Volume", "VolumeList") + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "Volume", "VolumeList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/netapp/v1beta1/zz_pool_types.go b/apis/netapp/v1beta1/zz_pool_types.go index cf268d9fb..6f26b3fd3 100755 --- a/apis/netapp/v1beta1/zz_pool_types.go +++ b/apis/netapp/v1beta1/zz_pool_types.go @@ -69,7 +69,7 @@ type PoolObservation struct { type PoolParameters struct { // The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.Account // +kubebuilder:validation:Optional AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` diff --git a/apis/netapp/v1beta1/zz_snapshot_types.go b/apis/netapp/v1beta1/zz_snapshot_types.go index 0f6bd024e..3440b65fb 100755 --- a/apis/netapp/v1beta1/zz_snapshot_types.go +++ b/apis/netapp/v1beta1/zz_snapshot_types.go @@ -43,7 +43,7 @@ type SnapshotObservation struct { type SnapshotParameters struct { // The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.Account // +kubebuilder:validation:Optional AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` @@ -86,7 +86,7 @@ type SnapshotParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the NetApp volume in which the NetApp Snapshot should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta1.Volume + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.Volume // +kubebuilder:validation:Optional VolumeName *string `json:"volumeName,omitempty" tf:"volume_name,omitempty"` diff --git a/apis/netapp/v1beta2/zz_account_terraformed.go b/apis/netapp/v1beta2/zz_account_terraformed.go new file mode 100755 index 000000000..0be7cb676 --- /dev/null +++ b/apis/netapp/v1beta2/zz_account_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Account +func (mg *Account) GetTerraformResourceType() string { + return "azurerm_netapp_account" +} + +// GetConnectionDetailsMapping for this Account +func (tr *Account) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"active_directory[*].password": "spec.forProvider.activeDirectory[*].passwordSecretRef"} +} + +// GetObservation of this Account +func (tr *Account) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Account +func (tr *Account) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Account +func (tr *Account) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Account +func (tr *Account) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Account +func (tr *Account) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Account +func (tr *Account) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Account +func (tr *Account) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Account using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Account) LateInitialize(attrs []byte) (bool, error) { + params := &AccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Account) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/netapp/v1beta2/zz_account_types.go b/apis/netapp/v1beta2/zz_account_types.go new file mode 100755 index 000000000..8e1037c74 --- /dev/null +++ b/apis/netapp/v1beta2/zz_account_types.go @@ -0,0 +1,247 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountInitParameters struct { + + // A active_directory block as defined below. + ActiveDirectory *ActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // The identity block where it is used when customer managed keys based encryption will be enabled as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountObservation struct { + + // A active_directory block as defined below. + ActiveDirectory *ActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // The ID of the NetApp Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The identity block where it is used when customer managed keys based encryption will be enabled as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group where the NetApp Account should be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountParameters struct { + + // A active_directory block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectory *ActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // The identity block where it is used when customer managed keys based encryption will be enabled as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group where the NetApp Account should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ActiveDirectoryInitParameters struct { + + // A list of DNS server IP addresses for the Active Directory domain. Only allows IPv4 address. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // The name of the Active Directory domain. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The Organizational Unit (OU) within the Active Directory Domain. + OrganizationalUnit *string `json:"organizationalUnit,omitempty" tf:"organizational_unit,omitempty"` + + // The NetBIOS name which should be used for the NetApp SMB Server, which will be registered as a computer account in the AD and used to mount volumes. + SMBServerName *string `json:"smbServerName,omitempty" tf:"smb_server_name,omitempty"` + + // The Username of Active Directory Domain Administrator. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ActiveDirectoryObservation struct { + + // A list of DNS server IP addresses for the Active Directory domain. Only allows IPv4 address. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // The name of the Active Directory domain. + Domain *string `json:"domain,omitempty" tf:"domain,omitempty"` + + // The Organizational Unit (OU) within the Active Directory Domain. + OrganizationalUnit *string `json:"organizationalUnit,omitempty" tf:"organizational_unit,omitempty"` + + // The NetBIOS name which should be used for the NetApp SMB Server, which will be registered as a computer account in the AD and used to mount volumes. + SMBServerName *string `json:"smbServerName,omitempty" tf:"smb_server_name,omitempty"` + + // The Username of Active Directory Domain Administrator. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ActiveDirectoryParameters struct { + + // A list of DNS server IP addresses for the Active Directory domain. Only allows IPv4 address. + // +kubebuilder:validation:Optional + DNSServers []*string `json:"dnsServers" tf:"dns_servers,omitempty"` + + // The name of the Active Directory domain. + // +kubebuilder:validation:Optional + Domain *string `json:"domain" tf:"domain,omitempty"` + + // The Organizational Unit (OU) within the Active Directory Domain. + // +kubebuilder:validation:Optional + OrganizationalUnit *string `json:"organizationalUnit,omitempty" tf:"organizational_unit,omitempty"` + + // The password associated with the username. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The NetBIOS name which should be used for the NetApp SMB Server, which will be registered as a computer account in the AD and used to mount volumes. + // +kubebuilder:validation:Optional + SMBServerName *string `json:"smbServerName" tf:"smb_server_name,omitempty"` + + // The Username of Active Directory Domain Administrator. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type IdentityInitParameters struct { + + // The identity id of the user assigned identity to use when type is UserAssigned + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The identity type, which can be SystemAssigned or UserAssigned. Only one type at a time is supported by Azure NetApp Files. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // The identity id of the user assigned identity to use when type is UserAssigned + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The ID of the NetApp Account. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the NetApp Account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The identity type, which can be SystemAssigned or UserAssigned. Only one type at a time is supported by Azure NetApp Files. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // The identity id of the user assigned identity to use when type is UserAssigned + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The identity type, which can be SystemAssigned or UserAssigned. Only one type at a time is supported by Azure NetApp Files. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccountInitParameters `json:"initProvider,omitempty"` +} + +// AccountStatus defines the observed state of Account. +type AccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Account is the Schema for the Accounts API. Manages a NetApp Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Accounts +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Repository type metadata. +var ( + Account_Kind = "Account" + Account_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Account_Kind}.String() + Account_KindAPIVersion = Account_Kind + "." + CRDGroupVersion.String() + Account_GroupVersionKind = CRDGroupVersion.WithKind(Account_Kind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/apis/netapp/v1beta2/zz_generated.conversion_hubs.go b/apis/netapp/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..6ae96f929 --- /dev/null +++ b/apis/netapp/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Account) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SnapshotPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Volume) Hub() {} diff --git a/apis/netapp/v1beta2/zz_generated.deepcopy.go b/apis/netapp/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..70b215e83 --- /dev/null +++ b/apis/netapp/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2249 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountInitParameters) DeepCopyInto(out *AccountInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountInitParameters. +func (in *AccountInitParameters) DeepCopy() *AccountInitParameters { + if in == nil { + return nil + } + out := new(AccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryInitParameters) DeepCopyInto(out *ActiveDirectoryInitParameters) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.OrganizationalUnit != nil { + in, out := &in.OrganizationalUnit, &out.OrganizationalUnit + *out = new(string) + **out = **in + } + if in.SMBServerName != nil { + in, out := &in.SMBServerName, &out.SMBServerName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryInitParameters. +func (in *ActiveDirectoryInitParameters) DeepCopy() *ActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryObservation) DeepCopyInto(out *ActiveDirectoryObservation) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.OrganizationalUnit != nil { + in, out := &in.OrganizationalUnit, &out.OrganizationalUnit + *out = new(string) + **out = **in + } + if in.SMBServerName != nil { + in, out := &in.SMBServerName, &out.SMBServerName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryObservation. +func (in *ActiveDirectoryObservation) DeepCopy() *ActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(ActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryParameters) DeepCopyInto(out *ActiveDirectoryParameters) { + *out = *in + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.OrganizationalUnit != nil { + in, out := &in.OrganizationalUnit, &out.OrganizationalUnit + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.SMBServerName != nil { + in, out := &in.SMBServerName, &out.SMBServerName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryParameters. +func (in *ActiveDirectoryParameters) DeepCopy() *ActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyScheduleInitParameters) DeepCopyInto(out *DailyScheduleInitParameters) { + *out = *in + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyScheduleInitParameters. +func (in *DailyScheduleInitParameters) DeepCopy() *DailyScheduleInitParameters { + if in == nil { + return nil + } + out := new(DailyScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyScheduleObservation) DeepCopyInto(out *DailyScheduleObservation) { + *out = *in + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyScheduleObservation. +func (in *DailyScheduleObservation) DeepCopy() *DailyScheduleObservation { + if in == nil { + return nil + } + out := new(DailyScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DailyScheduleParameters) DeepCopyInto(out *DailyScheduleParameters) { + *out = *in + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyScheduleParameters. +func (in *DailyScheduleParameters) DeepCopy() *DailyScheduleParameters { + if in == nil { + return nil + } + out := new(DailyScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataProtectionReplicationInitParameters) DeepCopyInto(out *DataProtectionReplicationInitParameters) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.RemoteVolumeLocation != nil { + in, out := &in.RemoteVolumeLocation, &out.RemoteVolumeLocation + *out = new(string) + **out = **in + } + if in.RemoteVolumeResourceID != nil { + in, out := &in.RemoteVolumeResourceID, &out.RemoteVolumeResourceID + *out = new(string) + **out = **in + } + if in.RemoteVolumeResourceIDRef != nil { + in, out := &in.RemoteVolumeResourceIDRef, &out.RemoteVolumeResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RemoteVolumeResourceIDSelector != nil { + in, out := &in.RemoteVolumeResourceIDSelector, &out.RemoteVolumeResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplicationFrequency != nil { + in, out := &in.ReplicationFrequency, &out.ReplicationFrequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataProtectionReplicationInitParameters. +func (in *DataProtectionReplicationInitParameters) DeepCopy() *DataProtectionReplicationInitParameters { + if in == nil { + return nil + } + out := new(DataProtectionReplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataProtectionReplicationObservation) DeepCopyInto(out *DataProtectionReplicationObservation) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.RemoteVolumeLocation != nil { + in, out := &in.RemoteVolumeLocation, &out.RemoteVolumeLocation + *out = new(string) + **out = **in + } + if in.RemoteVolumeResourceID != nil { + in, out := &in.RemoteVolumeResourceID, &out.RemoteVolumeResourceID + *out = new(string) + **out = **in + } + if in.ReplicationFrequency != nil { + in, out := &in.ReplicationFrequency, &out.ReplicationFrequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataProtectionReplicationObservation. +func (in *DataProtectionReplicationObservation) DeepCopy() *DataProtectionReplicationObservation { + if in == nil { + return nil + } + out := new(DataProtectionReplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataProtectionReplicationParameters) DeepCopyInto(out *DataProtectionReplicationParameters) { + *out = *in + if in.EndpointType != nil { + in, out := &in.EndpointType, &out.EndpointType + *out = new(string) + **out = **in + } + if in.RemoteVolumeLocation != nil { + in, out := &in.RemoteVolumeLocation, &out.RemoteVolumeLocation + *out = new(string) + **out = **in + } + if in.RemoteVolumeResourceID != nil { + in, out := &in.RemoteVolumeResourceID, &out.RemoteVolumeResourceID + *out = new(string) + **out = **in + } + if in.RemoteVolumeResourceIDRef != nil { + in, out := &in.RemoteVolumeResourceIDRef, &out.RemoteVolumeResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RemoteVolumeResourceIDSelector != nil { + in, out := &in.RemoteVolumeResourceIDSelector, &out.RemoteVolumeResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReplicationFrequency != nil { + in, out := &in.ReplicationFrequency, &out.ReplicationFrequency + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataProtectionReplicationParameters. +func (in *DataProtectionReplicationParameters) DeepCopy() *DataProtectionReplicationParameters { + if in == nil { + return nil + } + out := new(DataProtectionReplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataProtectionSnapshotPolicyInitParameters) DeepCopyInto(out *DataProtectionSnapshotPolicyInitParameters) { + *out = *in + if in.SnapshotPolicyID != nil { + in, out := &in.SnapshotPolicyID, &out.SnapshotPolicyID + *out = new(string) + **out = **in + } + if in.SnapshotPolicyIDRef != nil { + in, out := &in.SnapshotPolicyIDRef, &out.SnapshotPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnapshotPolicyIDSelector != nil { + in, out := &in.SnapshotPolicyIDSelector, &out.SnapshotPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataProtectionSnapshotPolicyInitParameters. +func (in *DataProtectionSnapshotPolicyInitParameters) DeepCopy() *DataProtectionSnapshotPolicyInitParameters { + if in == nil { + return nil + } + out := new(DataProtectionSnapshotPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataProtectionSnapshotPolicyObservation) DeepCopyInto(out *DataProtectionSnapshotPolicyObservation) { + *out = *in + if in.SnapshotPolicyID != nil { + in, out := &in.SnapshotPolicyID, &out.SnapshotPolicyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataProtectionSnapshotPolicyObservation. +func (in *DataProtectionSnapshotPolicyObservation) DeepCopy() *DataProtectionSnapshotPolicyObservation { + if in == nil { + return nil + } + out := new(DataProtectionSnapshotPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataProtectionSnapshotPolicyParameters) DeepCopyInto(out *DataProtectionSnapshotPolicyParameters) { + *out = *in + if in.SnapshotPolicyID != nil { + in, out := &in.SnapshotPolicyID, &out.SnapshotPolicyID + *out = new(string) + **out = **in + } + if in.SnapshotPolicyIDRef != nil { + in, out := &in.SnapshotPolicyIDRef, &out.SnapshotPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SnapshotPolicyIDSelector != nil { + in, out := &in.SnapshotPolicyIDSelector, &out.SnapshotPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataProtectionSnapshotPolicyParameters. +func (in *DataProtectionSnapshotPolicyParameters) DeepCopy() *DataProtectionSnapshotPolicyParameters { + if in == nil { + return nil + } + out := new(DataProtectionSnapshotPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportPolicyRuleInitParameters) DeepCopyInto(out *ExportPolicyRuleInitParameters) { + *out = *in + if in.AllowedClients != nil { + in, out := &in.AllowedClients, &out.AllowedClients + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProtocolsEnabled != nil { + in, out := &in.ProtocolsEnabled, &out.ProtocolsEnabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RootAccessEnabled != nil { + in, out := &in.RootAccessEnabled, &out.RootAccessEnabled + *out = new(bool) + **out = **in + } + if in.RuleIndex != nil { + in, out := &in.RuleIndex, &out.RuleIndex + *out = new(float64) + **out = **in + } + if in.UnixReadOnly != nil { + in, out := &in.UnixReadOnly, &out.UnixReadOnly + *out = new(bool) + **out = **in + } + if in.UnixReadWrite != nil { + in, out := &in.UnixReadWrite, &out.UnixReadWrite + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportPolicyRuleInitParameters. +func (in *ExportPolicyRuleInitParameters) DeepCopy() *ExportPolicyRuleInitParameters { + if in == nil { + return nil + } + out := new(ExportPolicyRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportPolicyRuleObservation) DeepCopyInto(out *ExportPolicyRuleObservation) { + *out = *in + if in.AllowedClients != nil { + in, out := &in.AllowedClients, &out.AllowedClients + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProtocolsEnabled != nil { + in, out := &in.ProtocolsEnabled, &out.ProtocolsEnabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RootAccessEnabled != nil { + in, out := &in.RootAccessEnabled, &out.RootAccessEnabled + *out = new(bool) + **out = **in + } + if in.RuleIndex != nil { + in, out := &in.RuleIndex, &out.RuleIndex + *out = new(float64) + **out = **in + } + if in.UnixReadOnly != nil { + in, out := &in.UnixReadOnly, &out.UnixReadOnly + *out = new(bool) + **out = **in + } + if in.UnixReadWrite != nil { + in, out := &in.UnixReadWrite, &out.UnixReadWrite + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportPolicyRuleObservation. +func (in *ExportPolicyRuleObservation) DeepCopy() *ExportPolicyRuleObservation { + if in == nil { + return nil + } + out := new(ExportPolicyRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportPolicyRuleParameters) DeepCopyInto(out *ExportPolicyRuleParameters) { + *out = *in + if in.AllowedClients != nil { + in, out := &in.AllowedClients, &out.AllowedClients + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProtocolsEnabled != nil { + in, out := &in.ProtocolsEnabled, &out.ProtocolsEnabled + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RootAccessEnabled != nil { + in, out := &in.RootAccessEnabled, &out.RootAccessEnabled + *out = new(bool) + **out = **in + } + if in.RuleIndex != nil { + in, out := &in.RuleIndex, &out.RuleIndex + *out = new(float64) + **out = **in + } + if in.UnixReadOnly != nil { + in, out := &in.UnixReadOnly, &out.UnixReadOnly + *out = new(bool) + **out = **in + } + if in.UnixReadWrite != nil { + in, out := &in.UnixReadWrite, &out.UnixReadWrite + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportPolicyRuleParameters. +func (in *ExportPolicyRuleParameters) DeepCopy() *ExportPolicyRuleParameters { + if in == nil { + return nil + } + out := new(ExportPolicyRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyScheduleInitParameters) DeepCopyInto(out *HourlyScheduleInitParameters) { + *out = *in + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyScheduleInitParameters. +func (in *HourlyScheduleInitParameters) DeepCopy() *HourlyScheduleInitParameters { + if in == nil { + return nil + } + out := new(HourlyScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyScheduleObservation) DeepCopyInto(out *HourlyScheduleObservation) { + *out = *in + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyScheduleObservation. +func (in *HourlyScheduleObservation) DeepCopy() *HourlyScheduleObservation { + if in == nil { + return nil + } + out := new(HourlyScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyScheduleParameters) DeepCopyInto(out *HourlyScheduleParameters) { + *out = *in + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyScheduleParameters. +func (in *HourlyScheduleParameters) DeepCopy() *HourlyScheduleParameters { + if in == nil { + return nil + } + out := new(HourlyScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyScheduleInitParameters) DeepCopyInto(out *MonthlyScheduleInitParameters) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyScheduleInitParameters. +func (in *MonthlyScheduleInitParameters) DeepCopy() *MonthlyScheduleInitParameters { + if in == nil { + return nil + } + out := new(MonthlyScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyScheduleObservation) DeepCopyInto(out *MonthlyScheduleObservation) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyScheduleObservation. +func (in *MonthlyScheduleObservation) DeepCopy() *MonthlyScheduleObservation { + if in == nil { + return nil + } + out := new(MonthlyScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonthlyScheduleParameters) DeepCopyInto(out *MonthlyScheduleParameters) { + *out = *in + if in.DaysOfMonth != nil { + in, out := &in.DaysOfMonth, &out.DaysOfMonth + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonthlyScheduleParameters. +func (in *MonthlyScheduleParameters) DeepCopy() *MonthlyScheduleParameters { + if in == nil { + return nil + } + out := new(MonthlyScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotPolicy) DeepCopyInto(out *SnapshotPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicy. +func (in *SnapshotPolicy) DeepCopy() *SnapshotPolicy { + if in == nil { + return nil + } + out := new(SnapshotPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotPolicyInitParameters) DeepCopyInto(out *SnapshotPolicyInitParameters) { + *out = *in + if in.DailySchedule != nil { + in, out := &in.DailySchedule, &out.DailySchedule + *out = new(DailyScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HourlySchedule != nil { + in, out := &in.HourlySchedule, &out.HourlySchedule + *out = new(HourlyScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MonthlySchedule != nil { + in, out := &in.MonthlySchedule, &out.MonthlySchedule + *out = new(MonthlyScheduleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WeeklySchedule != nil { + in, out := &in.WeeklySchedule, &out.WeeklySchedule + *out = new(WeeklyScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicyInitParameters. +func (in *SnapshotPolicyInitParameters) DeepCopy() *SnapshotPolicyInitParameters { + if in == nil { + return nil + } + out := new(SnapshotPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotPolicyList) DeepCopyInto(out *SnapshotPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SnapshotPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicyList. +func (in *SnapshotPolicyList) DeepCopy() *SnapshotPolicyList { + if in == nil { + return nil + } + out := new(SnapshotPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SnapshotPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotPolicyObservation) DeepCopyInto(out *SnapshotPolicyObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.DailySchedule != nil { + in, out := &in.DailySchedule, &out.DailySchedule + *out = new(DailyScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HourlySchedule != nil { + in, out := &in.HourlySchedule, &out.HourlySchedule + *out = new(HourlyScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MonthlySchedule != nil { + in, out := &in.MonthlySchedule, &out.MonthlySchedule + *out = new(MonthlyScheduleObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WeeklySchedule != nil { + in, out := &in.WeeklySchedule, &out.WeeklySchedule + *out = new(WeeklyScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicyObservation. +func (in *SnapshotPolicyObservation) DeepCopy() *SnapshotPolicyObservation { + if in == nil { + return nil + } + out := new(SnapshotPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotPolicyParameters) DeepCopyInto(out *SnapshotPolicyParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DailySchedule != nil { + in, out := &in.DailySchedule, &out.DailySchedule + *out = new(DailyScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HourlySchedule != nil { + in, out := &in.HourlySchedule, &out.HourlySchedule + *out = new(HourlyScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MonthlySchedule != nil { + in, out := &in.MonthlySchedule, &out.MonthlySchedule + *out = new(MonthlyScheduleParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WeeklySchedule != nil { + in, out := &in.WeeklySchedule, &out.WeeklySchedule + *out = new(WeeklyScheduleParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicyParameters. +func (in *SnapshotPolicyParameters) DeepCopy() *SnapshotPolicyParameters { + if in == nil { + return nil + } + out := new(SnapshotPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotPolicySpec) DeepCopyInto(out *SnapshotPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicySpec. +func (in *SnapshotPolicySpec) DeepCopy() *SnapshotPolicySpec { + if in == nil { + return nil + } + out := new(SnapshotPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotPolicyStatus) DeepCopyInto(out *SnapshotPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotPolicyStatus. +func (in *SnapshotPolicyStatus) DeepCopy() *SnapshotPolicyStatus { + if in == nil { + return nil + } + out := new(SnapshotPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Volume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeInitParameters) DeepCopyInto(out *VolumeInitParameters) { + *out = *in + if in.AzureVMwareDataStoreEnabled != nil { + in, out := &in.AzureVMwareDataStoreEnabled, &out.AzureVMwareDataStoreEnabled + *out = new(bool) + **out = **in + } + if in.CreateFromSnapshotResourceID != nil { + in, out := &in.CreateFromSnapshotResourceID, &out.CreateFromSnapshotResourceID + *out = new(string) + **out = **in + } + if in.CreateFromSnapshotResourceIDRef != nil { + in, out := &in.CreateFromSnapshotResourceIDRef, &out.CreateFromSnapshotResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CreateFromSnapshotResourceIDSelector != nil { + in, out := &in.CreateFromSnapshotResourceIDSelector, &out.CreateFromSnapshotResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DataProtectionReplication != nil { + in, out := &in.DataProtectionReplication, &out.DataProtectionReplication + *out = new(DataProtectionReplicationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DataProtectionSnapshotPolicy != nil { + in, out := &in.DataProtectionSnapshotPolicy, &out.DataProtectionSnapshotPolicy + *out = new(DataProtectionSnapshotPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptionKeySource != nil { + in, out := &in.EncryptionKeySource, &out.EncryptionKeySource + *out = new(string) + **out = **in + } + if in.ExportPolicyRule != nil { + in, out := &in.ExportPolicyRule, &out.ExportPolicyRule + *out = make([]ExportPolicyRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultPrivateEndpointID != nil { + in, out := &in.KeyVaultPrivateEndpointID, &out.KeyVaultPrivateEndpointID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkFeatures != nil { + in, out := &in.NetworkFeatures, &out.NetworkFeatures + *out = new(string) + **out = **in + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SMBAccessBasedEnumerationEnabled != nil { + in, out := &in.SMBAccessBasedEnumerationEnabled, &out.SMBAccessBasedEnumerationEnabled + *out = new(bool) + **out = **in + } + if in.SMBNonBrowsableEnabled != nil { + in, out := &in.SMBNonBrowsableEnabled, &out.SMBNonBrowsableEnabled + *out = new(bool) + **out = **in + } + if in.SecurityStyle != nil { + in, out := &in.SecurityStyle, &out.SecurityStyle + *out = new(string) + **out = **in + } + if in.ServiceLevel != nil { + in, out := &in.ServiceLevel, &out.ServiceLevel + *out = new(string) + **out = **in + } + if in.SnapshotDirectoryVisible != nil { + in, out := &in.SnapshotDirectoryVisible, &out.SnapshotDirectoryVisible + *out = new(bool) + **out = **in + } + if in.StorageQuotaInGb != nil { + in, out := &in.StorageQuotaInGb, &out.StorageQuotaInGb + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputInMibps != nil { + in, out := &in.ThroughputInMibps, &out.ThroughputInMibps + *out = new(float64) + **out = **in + } + if in.VolumePath != nil { + in, out := &in.VolumePath, &out.VolumePath + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeInitParameters. +func (in *VolumeInitParameters) DeepCopy() *VolumeInitParameters { + if in == nil { + return nil + } + out := new(VolumeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeList) DeepCopyInto(out *VolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeList. +func (in *VolumeList) DeepCopy() *VolumeList { + if in == nil { + return nil + } + out := new(VolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeObservation) DeepCopyInto(out *VolumeObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AzureVMwareDataStoreEnabled != nil { + in, out := &in.AzureVMwareDataStoreEnabled, &out.AzureVMwareDataStoreEnabled + *out = new(bool) + **out = **in + } + if in.CreateFromSnapshotResourceID != nil { + in, out := &in.CreateFromSnapshotResourceID, &out.CreateFromSnapshotResourceID + *out = new(string) + **out = **in + } + if in.DataProtectionReplication != nil { + in, out := &in.DataProtectionReplication, &out.DataProtectionReplication + *out = new(DataProtectionReplicationObservation) + (*in).DeepCopyInto(*out) + } + if in.DataProtectionSnapshotPolicy != nil { + in, out := &in.DataProtectionSnapshotPolicy, &out.DataProtectionSnapshotPolicy + *out = new(DataProtectionSnapshotPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.EncryptionKeySource != nil { + in, out := &in.EncryptionKeySource, &out.EncryptionKeySource + *out = new(string) + **out = **in + } + if in.ExportPolicyRule != nil { + in, out := &in.ExportPolicyRule, &out.ExportPolicyRule + *out = make([]ExportPolicyRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyVaultPrivateEndpointID != nil { + in, out := &in.KeyVaultPrivateEndpointID, &out.KeyVaultPrivateEndpointID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MountIPAddresses != nil { + in, out := &in.MountIPAddresses, &out.MountIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkFeatures != nil { + in, out := &in.NetworkFeatures, &out.NetworkFeatures + *out = new(string) + **out = **in + } + if in.PoolName != nil { + in, out := &in.PoolName, &out.PoolName + *out = new(string) + **out = **in + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SMBAccessBasedEnumerationEnabled != nil { + in, out := &in.SMBAccessBasedEnumerationEnabled, &out.SMBAccessBasedEnumerationEnabled + *out = new(bool) + **out = **in + } + if in.SMBNonBrowsableEnabled != nil { + in, out := &in.SMBNonBrowsableEnabled, &out.SMBNonBrowsableEnabled + *out = new(bool) + **out = **in + } + if in.SecurityStyle != nil { + in, out := &in.SecurityStyle, &out.SecurityStyle + *out = new(string) + **out = **in + } + if in.ServiceLevel != nil { + in, out := &in.ServiceLevel, &out.ServiceLevel + *out = new(string) + **out = **in + } + if in.SnapshotDirectoryVisible != nil { + in, out := &in.SnapshotDirectoryVisible, &out.SnapshotDirectoryVisible + *out = new(bool) + **out = **in + } + if in.StorageQuotaInGb != nil { + in, out := &in.StorageQuotaInGb, &out.StorageQuotaInGb + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputInMibps != nil { + in, out := &in.ThroughputInMibps, &out.ThroughputInMibps + *out = new(float64) + **out = **in + } + if in.VolumePath != nil { + in, out := &in.VolumePath, &out.VolumePath + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeObservation. +func (in *VolumeObservation) DeepCopy() *VolumeObservation { + if in == nil { + return nil + } + out := new(VolumeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeParameters) DeepCopyInto(out *VolumeParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AccountNameRef != nil { + in, out := &in.AccountNameRef, &out.AccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AccountNameSelector != nil { + in, out := &in.AccountNameSelector, &out.AccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AzureVMwareDataStoreEnabled != nil { + in, out := &in.AzureVMwareDataStoreEnabled, &out.AzureVMwareDataStoreEnabled + *out = new(bool) + **out = **in + } + if in.CreateFromSnapshotResourceID != nil { + in, out := &in.CreateFromSnapshotResourceID, &out.CreateFromSnapshotResourceID + *out = new(string) + **out = **in + } + if in.CreateFromSnapshotResourceIDRef != nil { + in, out := &in.CreateFromSnapshotResourceIDRef, &out.CreateFromSnapshotResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.CreateFromSnapshotResourceIDSelector != nil { + in, out := &in.CreateFromSnapshotResourceIDSelector, &out.CreateFromSnapshotResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.DataProtectionReplication != nil { + in, out := &in.DataProtectionReplication, &out.DataProtectionReplication + *out = new(DataProtectionReplicationParameters) + (*in).DeepCopyInto(*out) + } + if in.DataProtectionSnapshotPolicy != nil { + in, out := &in.DataProtectionSnapshotPolicy, &out.DataProtectionSnapshotPolicy + *out = new(DataProtectionSnapshotPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.EncryptionKeySource != nil { + in, out := &in.EncryptionKeySource, &out.EncryptionKeySource + *out = new(string) + **out = **in + } + if in.ExportPolicyRule != nil { + in, out := &in.ExportPolicyRule, &out.ExportPolicyRule + *out = make([]ExportPolicyRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KeyVaultPrivateEndpointID != nil { + in, out := &in.KeyVaultPrivateEndpointID, &out.KeyVaultPrivateEndpointID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkFeatures != nil { + in, out := &in.NetworkFeatures, &out.NetworkFeatures + *out = new(string) + **out = **in + } + if in.PoolName != nil { + in, out := &in.PoolName, &out.PoolName + *out = new(string) + **out = **in + } + if in.PoolNameRef != nil { + in, out := &in.PoolNameRef, &out.PoolNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PoolNameSelector != nil { + in, out := &in.PoolNameSelector, &out.PoolNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Protocols != nil { + in, out := &in.Protocols, &out.Protocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SMBAccessBasedEnumerationEnabled != nil { + in, out := &in.SMBAccessBasedEnumerationEnabled, &out.SMBAccessBasedEnumerationEnabled + *out = new(bool) + **out = **in + } + if in.SMBNonBrowsableEnabled != nil { + in, out := &in.SMBNonBrowsableEnabled, &out.SMBNonBrowsableEnabled + *out = new(bool) + **out = **in + } + if in.SecurityStyle != nil { + in, out := &in.SecurityStyle, &out.SecurityStyle + *out = new(string) + **out = **in + } + if in.ServiceLevel != nil { + in, out := &in.ServiceLevel, &out.ServiceLevel + *out = new(string) + **out = **in + } + if in.SnapshotDirectoryVisible != nil { + in, out := &in.SnapshotDirectoryVisible, &out.SnapshotDirectoryVisible + *out = new(bool) + **out = **in + } + if in.StorageQuotaInGb != nil { + in, out := &in.StorageQuotaInGb, &out.StorageQuotaInGb + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThroughputInMibps != nil { + in, out := &in.ThroughputInMibps, &out.ThroughputInMibps + *out = new(float64) + **out = **in + } + if in.VolumePath != nil { + in, out := &in.VolumePath, &out.VolumePath + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeParameters. +func (in *VolumeParameters) DeepCopy() *VolumeParameters { + if in == nil { + return nil + } + out := new(VolumeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec. +func (in *VolumeSpec) DeepCopy() *VolumeSpec { + if in == nil { + return nil + } + out := new(VolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeStatus) DeepCopyInto(out *VolumeStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeStatus. +func (in *VolumeStatus) DeepCopy() *VolumeStatus { + if in == nil { + return nil + } + out := new(VolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyScheduleInitParameters) DeepCopyInto(out *WeeklyScheduleInitParameters) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyScheduleInitParameters. +func (in *WeeklyScheduleInitParameters) DeepCopy() *WeeklyScheduleInitParameters { + if in == nil { + return nil + } + out := new(WeeklyScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyScheduleObservation) DeepCopyInto(out *WeeklyScheduleObservation) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyScheduleObservation. +func (in *WeeklyScheduleObservation) DeepCopy() *WeeklyScheduleObservation { + if in == nil { + return nil + } + out := new(WeeklyScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeeklyScheduleParameters) DeepCopyInto(out *WeeklyScheduleParameters) { + *out = *in + if in.DaysOfWeek != nil { + in, out := &in.DaysOfWeek, &out.DaysOfWeek + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Hour != nil { + in, out := &in.Hour, &out.Hour + *out = new(float64) + **out = **in + } + if in.Minute != nil { + in, out := &in.Minute, &out.Minute + *out = new(float64) + **out = **in + } + if in.SnapshotsToKeep != nil { + in, out := &in.SnapshotsToKeep, &out.SnapshotsToKeep + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeeklyScheduleParameters. +func (in *WeeklyScheduleParameters) DeepCopy() *WeeklyScheduleParameters { + if in == nil { + return nil + } + out := new(WeeklyScheduleParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/netapp/v1beta2/zz_generated.managed.go b/apis/netapp/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..22ea13528 --- /dev/null +++ b/apis/netapp/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Account. +func (mg *Account) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Account. +func (mg *Account) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SnapshotPolicy. +func (mg *SnapshotPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SnapshotPolicy. +func (mg *SnapshotPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SnapshotPolicy. +func (mg *SnapshotPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SnapshotPolicy. +func (mg *SnapshotPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SnapshotPolicy. +func (mg *SnapshotPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SnapshotPolicy. +func (mg *SnapshotPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SnapshotPolicy. +func (mg *SnapshotPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SnapshotPolicy. +func (mg *SnapshotPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SnapshotPolicy. +func (mg *SnapshotPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SnapshotPolicy. +func (mg *SnapshotPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SnapshotPolicy. +func (mg *SnapshotPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SnapshotPolicy. +func (mg *SnapshotPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Volume. +func (mg *Volume) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Volume. +func (mg *Volume) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Volume. +func (mg *Volume) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Volume. +func (mg *Volume) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Volume. +func (mg *Volume) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Volume. +func (mg *Volume) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Volume. +func (mg *Volume) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Volume. +func (mg *Volume) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Volume. +func (mg *Volume) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Volume. +func (mg *Volume) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Volume. +func (mg *Volume) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Volume. +func (mg *Volume) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/netapp/v1beta2/zz_generated.managedlist.go b/apis/netapp/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..a8cfb20b1 --- /dev/null +++ b/apis/netapp/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SnapshotPolicyList. +func (l *SnapshotPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VolumeList. +func (l *VolumeList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/netapp/v1beta2/zz_generated.resolvers.go b/apis/netapp/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..b135b5f64 --- /dev/null +++ b/apis/netapp/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,330 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Account. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Account) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SnapshotPolicy. +func (mg *SnapshotPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Volume. +func (mg *Volume) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.AccountNameRef, + Selector: mg.Spec.ForProvider.AccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AccountName") + } + mg.Spec.ForProvider.AccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta1", "Snapshot", "SnapshotList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CreateFromSnapshotResourceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CreateFromSnapshotResourceIDRef, + Selector: mg.Spec.ForProvider.CreateFromSnapshotResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CreateFromSnapshotResourceID") + } + mg.Spec.ForProvider.CreateFromSnapshotResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CreateFromSnapshotResourceIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.DataProtectionReplication != nil { + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "Volume", "VolumeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataProtectionReplication.RemoteVolumeResourceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataProtectionReplication.RemoteVolumeResourceIDRef, + Selector: mg.Spec.ForProvider.DataProtectionReplication.RemoteVolumeResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataProtectionReplication.RemoteVolumeResourceID") + } + mg.Spec.ForProvider.DataProtectionReplication.RemoteVolumeResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataProtectionReplication.RemoteVolumeResourceIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.DataProtectionSnapshotPolicy != nil { + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "SnapshotPolicy", "SnapshotPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DataProtectionSnapshotPolicy.SnapshotPolicyID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DataProtectionSnapshotPolicy.SnapshotPolicyIDRef, + Selector: mg.Spec.ForProvider.DataProtectionSnapshotPolicy.SnapshotPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DataProtectionSnapshotPolicy.SnapshotPolicyID") + } + mg.Spec.ForProvider.DataProtectionSnapshotPolicy.SnapshotPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DataProtectionSnapshotPolicy.SnapshotPolicyIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta1", "Pool", "PoolList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PoolName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.PoolNameRef, + Selector: mg.Spec.ForProvider.PoolNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PoolName") + } + mg.Spec.ForProvider.PoolName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PoolNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta1", "Snapshot", "SnapshotList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CreateFromSnapshotResourceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.CreateFromSnapshotResourceIDRef, + Selector: mg.Spec.InitProvider.CreateFromSnapshotResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CreateFromSnapshotResourceID") + } + mg.Spec.InitProvider.CreateFromSnapshotResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CreateFromSnapshotResourceIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.DataProtectionReplication != nil { + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "Volume", "VolumeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataProtectionReplication.RemoteVolumeResourceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DataProtectionReplication.RemoteVolumeResourceIDRef, + Selector: mg.Spec.InitProvider.DataProtectionReplication.RemoteVolumeResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataProtectionReplication.RemoteVolumeResourceID") + } + mg.Spec.InitProvider.DataProtectionReplication.RemoteVolumeResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataProtectionReplication.RemoteVolumeResourceIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.DataProtectionSnapshotPolicy != nil { + { + m, l, err = apisresolver.GetManagedResource("netapp.azure.upbound.io", "v1beta2", "SnapshotPolicy", "SnapshotPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DataProtectionSnapshotPolicy.SnapshotPolicyID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DataProtectionSnapshotPolicy.SnapshotPolicyIDRef, + Selector: mg.Spec.InitProvider.DataProtectionSnapshotPolicy.SnapshotPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DataProtectionSnapshotPolicy.SnapshotPolicyID") + } + mg.Spec.InitProvider.DataProtectionSnapshotPolicy.SnapshotPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DataProtectionSnapshotPolicy.SnapshotPolicyIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/netapp/v1beta2/zz_groupversion_info.go b/apis/netapp/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..fdcc59d08 --- /dev/null +++ b/apis/netapp/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=netapp.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "netapp.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/netapp/v1beta2/zz_snapshotpolicy_terraformed.go b/apis/netapp/v1beta2/zz_snapshotpolicy_terraformed.go new file mode 100755 index 000000000..aa4539960 --- /dev/null +++ b/apis/netapp/v1beta2/zz_snapshotpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SnapshotPolicy +func (mg *SnapshotPolicy) GetTerraformResourceType() string { + return "azurerm_netapp_snapshot_policy" +} + +// GetConnectionDetailsMapping for this SnapshotPolicy +func (tr *SnapshotPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SnapshotPolicy +func (tr *SnapshotPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SnapshotPolicy +func (tr *SnapshotPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SnapshotPolicy +func (tr *SnapshotPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SnapshotPolicy +func (tr *SnapshotPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SnapshotPolicy +func (tr *SnapshotPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SnapshotPolicy +func (tr *SnapshotPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SnapshotPolicy +func (tr *SnapshotPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SnapshotPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SnapshotPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &SnapshotPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SnapshotPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/netapp/v1beta2/zz_snapshotpolicy_types.go b/apis/netapp/v1beta2/zz_snapshotpolicy_types.go new file mode 100755 index 000000000..f7c50893c --- /dev/null +++ b/apis/netapp/v1beta2/zz_snapshotpolicy_types.go @@ -0,0 +1,365 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DailyScheduleInitParameters struct { + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + SnapshotsToKeep *float64 `json:"snapshotsToKeep,omitempty" tf:"snapshots_to_keep,omitempty"` +} + +type DailyScheduleObservation struct { + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + SnapshotsToKeep *float64 `json:"snapshotsToKeep,omitempty" tf:"snapshots_to_keep,omitempty"` +} + +type DailyScheduleParameters struct { + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + // +kubebuilder:validation:Optional + Minute *float64 `json:"minute" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + // +kubebuilder:validation:Optional + SnapshotsToKeep *float64 `json:"snapshotsToKeep" tf:"snapshots_to_keep,omitempty"` +} + +type HourlyScheduleInitParameters struct { + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + SnapshotsToKeep *float64 `json:"snapshotsToKeep,omitempty" tf:"snapshots_to_keep,omitempty"` +} + +type HourlyScheduleObservation struct { + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + SnapshotsToKeep *float64 `json:"snapshotsToKeep,omitempty" tf:"snapshots_to_keep,omitempty"` +} + +type HourlyScheduleParameters struct { + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + // +kubebuilder:validation:Optional + Minute *float64 `json:"minute" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + // +kubebuilder:validation:Optional + SnapshotsToKeep *float64 `json:"snapshotsToKeep" tf:"snapshots_to_keep,omitempty"` +} + +type MonthlyScheduleInitParameters struct { + + // List of the days of the month when the snapshots will be created, valid range is from 1 to 30. + // +listType=set + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + SnapshotsToKeep *float64 `json:"snapshotsToKeep,omitempty" tf:"snapshots_to_keep,omitempty"` +} + +type MonthlyScheduleObservation struct { + + // List of the days of the month when the snapshots will be created, valid range is from 1 to 30. + // +listType=set + DaysOfMonth []*float64 `json:"daysOfMonth,omitempty" tf:"days_of_month,omitempty"` + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + SnapshotsToKeep *float64 `json:"snapshotsToKeep,omitempty" tf:"snapshots_to_keep,omitempty"` +} + +type MonthlyScheduleParameters struct { + + // List of the days of the month when the snapshots will be created, valid range is from 1 to 30. + // +kubebuilder:validation:Optional + // +listType=set + DaysOfMonth []*float64 `json:"daysOfMonth" tf:"days_of_month,omitempty"` + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + // +kubebuilder:validation:Optional + Minute *float64 `json:"minute" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + // +kubebuilder:validation:Optional + SnapshotsToKeep *float64 `json:"snapshotsToKeep" tf:"snapshots_to_keep,omitempty"` +} + +type SnapshotPolicyInitParameters struct { + + // Sets a daily snapshot schedule. A daily_schedule block as defined below. + DailySchedule *DailyScheduleInitParameters `json:"dailySchedule,omitempty" tf:"daily_schedule,omitempty"` + + // Defines that the NetApp Snapshot Policy is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Sets an hourly snapshot schedule. A hourly_schedule block as defined below. + HourlySchedule *HourlyScheduleInitParameters `json:"hourlySchedule,omitempty" tf:"hourly_schedule,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Sets a monthly snapshot schedule. A monthly_schedule block as defined below. + MonthlySchedule *MonthlyScheduleInitParameters `json:"monthlySchedule,omitempty" tf:"monthly_schedule,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Sets a weekly snapshot schedule. A weekly_schedule block as defined below. + WeeklySchedule *WeeklyScheduleInitParameters `json:"weeklySchedule,omitempty" tf:"weekly_schedule,omitempty"` +} + +type SnapshotPolicyObservation struct { + + // The name of the NetApp Account in which the NetApp Snapshot Policy should be created. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Sets a daily snapshot schedule. A daily_schedule block as defined below. + DailySchedule *DailyScheduleObservation `json:"dailySchedule,omitempty" tf:"daily_schedule,omitempty"` + + // Defines that the NetApp Snapshot Policy is enabled or not. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Sets an hourly snapshot schedule. A hourly_schedule block as defined below. + HourlySchedule *HourlyScheduleObservation `json:"hourlySchedule,omitempty" tf:"hourly_schedule,omitempty"` + + // The ID of the NetApp Snapshot. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Sets a monthly snapshot schedule. A monthly_schedule block as defined below. + MonthlySchedule *MonthlyScheduleObservation `json:"monthlySchedule,omitempty" tf:"monthly_schedule,omitempty"` + + // The name of the resource group where the NetApp Snapshot Policy should be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Sets a weekly snapshot schedule. A weekly_schedule block as defined below. + WeeklySchedule *WeeklyScheduleObservation `json:"weeklySchedule,omitempty" tf:"weekly_schedule,omitempty"` +} + +type SnapshotPolicyParameters struct { + + // The name of the NetApp Account in which the NetApp Snapshot Policy should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in netapp to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in netapp to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // Sets a daily snapshot schedule. A daily_schedule block as defined below. + // +kubebuilder:validation:Optional + DailySchedule *DailyScheduleParameters `json:"dailySchedule,omitempty" tf:"daily_schedule,omitempty"` + + // Defines that the NetApp Snapshot Policy is enabled or not. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Sets an hourly snapshot schedule. A hourly_schedule block as defined below. + // +kubebuilder:validation:Optional + HourlySchedule *HourlyScheduleParameters `json:"hourlySchedule,omitempty" tf:"hourly_schedule,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Sets a monthly snapshot schedule. A monthly_schedule block as defined below. + // +kubebuilder:validation:Optional + MonthlySchedule *MonthlyScheduleParameters `json:"monthlySchedule,omitempty" tf:"monthly_schedule,omitempty"` + + // The name of the resource group where the NetApp Snapshot Policy should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Sets a weekly snapshot schedule. A weekly_schedule block as defined below. + // +kubebuilder:validation:Optional + WeeklySchedule *WeeklyScheduleParameters `json:"weeklySchedule,omitempty" tf:"weekly_schedule,omitempty"` +} + +type WeeklyScheduleInitParameters struct { + + // List of the week days using English names when the snapshots will be created. + // +listType=set + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + SnapshotsToKeep *float64 `json:"snapshotsToKeep,omitempty" tf:"snapshots_to_keep,omitempty"` +} + +type WeeklyScheduleObservation struct { + + // List of the week days using English names when the snapshots will be created. + // +listType=set + DaysOfWeek []*string `json:"daysOfWeek,omitempty" tf:"days_of_week,omitempty"` + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + Hour *float64 `json:"hour,omitempty" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + Minute *float64 `json:"minute,omitempty" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + SnapshotsToKeep *float64 `json:"snapshotsToKeep,omitempty" tf:"snapshots_to_keep,omitempty"` +} + +type WeeklyScheduleParameters struct { + + // List of the week days using English names when the snapshots will be created. + // +kubebuilder:validation:Optional + // +listType=set + DaysOfWeek []*string `json:"daysOfWeek" tf:"days_of_week,omitempty"` + + // Hour of the day that the snapshots will be created, valid range is from 0 to 23. + // +kubebuilder:validation:Optional + Hour *float64 `json:"hour" tf:"hour,omitempty"` + + // Minute of the hour that the snapshots will be created, valid range is from 0 to 59. + // +kubebuilder:validation:Optional + Minute *float64 `json:"minute" tf:"minute,omitempty"` + + // How many hourly snapshots to keep, valid range is from 0 to 255. + // +kubebuilder:validation:Optional + SnapshotsToKeep *float64 `json:"snapshotsToKeep" tf:"snapshots_to_keep,omitempty"` +} + +// SnapshotPolicySpec defines the desired state of SnapshotPolicy +type SnapshotPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SnapshotPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SnapshotPolicyInitParameters `json:"initProvider,omitempty"` +} + +// SnapshotPolicyStatus defines the observed state of SnapshotPolicy. +type SnapshotPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SnapshotPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SnapshotPolicy is the Schema for the SnapshotPolicys API. Manages a NetApp Snapshot Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SnapshotPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.enabled) || (has(self.initProvider) && has(self.initProvider.enabled))",message="spec.forProvider.enabled is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec SnapshotPolicySpec `json:"spec"` + Status SnapshotPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SnapshotPolicyList contains a list of SnapshotPolicys +type SnapshotPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SnapshotPolicy `json:"items"` +} + +// Repository type metadata. +var ( + SnapshotPolicy_Kind = "SnapshotPolicy" + SnapshotPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SnapshotPolicy_Kind}.String() + SnapshotPolicy_KindAPIVersion = SnapshotPolicy_Kind + "." + CRDGroupVersion.String() + SnapshotPolicy_GroupVersionKind = CRDGroupVersion.WithKind(SnapshotPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&SnapshotPolicy{}, &SnapshotPolicyList{}) +} diff --git a/apis/netapp/v1beta2/zz_volume_terraformed.go b/apis/netapp/v1beta2/zz_volume_terraformed.go new file mode 100755 index 000000000..77b5ec978 --- /dev/null +++ b/apis/netapp/v1beta2/zz_volume_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Volume +func (mg *Volume) GetTerraformResourceType() string { + return "azurerm_netapp_volume" +} + +// GetConnectionDetailsMapping for this Volume +func (tr *Volume) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Volume +func (tr *Volume) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Volume +func (tr *Volume) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Volume +func (tr *Volume) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Volume +func (tr *Volume) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Volume +func (tr *Volume) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Volume +func (tr *Volume) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Volume +func (tr *Volume) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Volume using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Volume) LateInitialize(attrs []byte) (bool, error) { + params := &VolumeParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Volume) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/netapp/v1beta2/zz_volume_types.go b/apis/netapp/v1beta2/zz_volume_types.go new file mode 100755 index 000000000..a6b125d0c --- /dev/null +++ b/apis/netapp/v1beta2/zz_volume_types.go @@ -0,0 +1,577 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DataProtectionReplicationInitParameters struct { + + // The endpoint type, default value is dst for destination. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // Location of the primary volume. Changing this forces a new resource to be created. + RemoteVolumeLocation *string `json:"remoteVolumeLocation,omitempty" tf:"remote_volume_location,omitempty"` + + // Resource ID of the primary volume. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.Volume + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + RemoteVolumeResourceID *string `json:"remoteVolumeResourceId,omitempty" tf:"remote_volume_resource_id,omitempty"` + + // Reference to a Volume in netapp to populate remoteVolumeResourceId. + // +kubebuilder:validation:Optional + RemoteVolumeResourceIDRef *v1.Reference `json:"remoteVolumeResourceIdRef,omitempty" tf:"-"` + + // Selector for a Volume in netapp to populate remoteVolumeResourceId. + // +kubebuilder:validation:Optional + RemoteVolumeResourceIDSelector *v1.Selector `json:"remoteVolumeResourceIdSelector,omitempty" tf:"-"` + + // Replication frequency, supported values are '10minutes', 'hourly', 'daily', values are case sensitive. + ReplicationFrequency *string `json:"replicationFrequency,omitempty" tf:"replication_frequency,omitempty"` +} + +type DataProtectionReplicationObservation struct { + + // The endpoint type, default value is dst for destination. + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // Location of the primary volume. Changing this forces a new resource to be created. + RemoteVolumeLocation *string `json:"remoteVolumeLocation,omitempty" tf:"remote_volume_location,omitempty"` + + // Resource ID of the primary volume. + RemoteVolumeResourceID *string `json:"remoteVolumeResourceId,omitempty" tf:"remote_volume_resource_id,omitempty"` + + // Replication frequency, supported values are '10minutes', 'hourly', 'daily', values are case sensitive. + ReplicationFrequency *string `json:"replicationFrequency,omitempty" tf:"replication_frequency,omitempty"` +} + +type DataProtectionReplicationParameters struct { + + // The endpoint type, default value is dst for destination. + // +kubebuilder:validation:Optional + EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"` + + // Location of the primary volume. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RemoteVolumeLocation *string `json:"remoteVolumeLocation" tf:"remote_volume_location,omitempty"` + + // Resource ID of the primary volume. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.Volume + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + RemoteVolumeResourceID *string `json:"remoteVolumeResourceId,omitempty" tf:"remote_volume_resource_id,omitempty"` + + // Reference to a Volume in netapp to populate remoteVolumeResourceId. + // +kubebuilder:validation:Optional + RemoteVolumeResourceIDRef *v1.Reference `json:"remoteVolumeResourceIdRef,omitempty" tf:"-"` + + // Selector for a Volume in netapp to populate remoteVolumeResourceId. + // +kubebuilder:validation:Optional + RemoteVolumeResourceIDSelector *v1.Selector `json:"remoteVolumeResourceIdSelector,omitempty" tf:"-"` + + // Replication frequency, supported values are '10minutes', 'hourly', 'daily', values are case sensitive. + // +kubebuilder:validation:Optional + ReplicationFrequency *string `json:"replicationFrequency" tf:"replication_frequency,omitempty"` +} + +type DataProtectionSnapshotPolicyInitParameters struct { + + // Resource ID of the snapshot policy to apply to the volume. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.SnapshotPolicy + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SnapshotPolicyID *string `json:"snapshotPolicyId,omitempty" tf:"snapshot_policy_id,omitempty"` + + // Reference to a SnapshotPolicy in netapp to populate snapshotPolicyId. + // +kubebuilder:validation:Optional + SnapshotPolicyIDRef *v1.Reference `json:"snapshotPolicyIdRef,omitempty" tf:"-"` + + // Selector for a SnapshotPolicy in netapp to populate snapshotPolicyId. + // +kubebuilder:validation:Optional + SnapshotPolicyIDSelector *v1.Selector `json:"snapshotPolicyIdSelector,omitempty" tf:"-"` +} + +type DataProtectionSnapshotPolicyObservation struct { + + // Resource ID of the snapshot policy to apply to the volume. + SnapshotPolicyID *string `json:"snapshotPolicyId,omitempty" tf:"snapshot_policy_id,omitempty"` +} + +type DataProtectionSnapshotPolicyParameters struct { + + // Resource ID of the snapshot policy to apply to the volume. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.SnapshotPolicy + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SnapshotPolicyID *string `json:"snapshotPolicyId,omitempty" tf:"snapshot_policy_id,omitempty"` + + // Reference to a SnapshotPolicy in netapp to populate snapshotPolicyId. + // +kubebuilder:validation:Optional + SnapshotPolicyIDRef *v1.Reference `json:"snapshotPolicyIdRef,omitempty" tf:"-"` + + // Selector for a SnapshotPolicy in netapp to populate snapshotPolicyId. + // +kubebuilder:validation:Optional + SnapshotPolicyIDSelector *v1.Selector `json:"snapshotPolicyIdSelector,omitempty" tf:"-"` +} + +type ExportPolicyRuleInitParameters struct { + + // A list of allowed clients IPv4 addresses. + // +listType=set + AllowedClients []*string `json:"allowedClients,omitempty" tf:"allowed_clients,omitempty"` + + // A list of allowed protocols. Valid values include CIFS, NFSv3, or NFSv4.1. Only one value is supported at this time. This replaces the previous arguments: cifs_enabled, nfsv3_enabled and nfsv4_enabled. + ProtocolsEnabled []*string `json:"protocolsEnabled,omitempty" tf:"protocols_enabled,omitempty"` + + // Is root access permitted to this volume? + RootAccessEnabled *bool `json:"rootAccessEnabled,omitempty" tf:"root_access_enabled,omitempty"` + + // The index number of the rule. + RuleIndex *float64 `json:"ruleIndex,omitempty" tf:"rule_index,omitempty"` + + // Is the file system on unix read only? + UnixReadOnly *bool `json:"unixReadOnly,omitempty" tf:"unix_read_only,omitempty"` + + // Is the file system on unix read and write? + UnixReadWrite *bool `json:"unixReadWrite,omitempty" tf:"unix_read_write,omitempty"` +} + +type ExportPolicyRuleObservation struct { + + // A list of allowed clients IPv4 addresses. + // +listType=set + AllowedClients []*string `json:"allowedClients,omitempty" tf:"allowed_clients,omitempty"` + + // A list of allowed protocols. Valid values include CIFS, NFSv3, or NFSv4.1. Only one value is supported at this time. This replaces the previous arguments: cifs_enabled, nfsv3_enabled and nfsv4_enabled. + ProtocolsEnabled []*string `json:"protocolsEnabled,omitempty" tf:"protocols_enabled,omitempty"` + + // Is root access permitted to this volume? + RootAccessEnabled *bool `json:"rootAccessEnabled,omitempty" tf:"root_access_enabled,omitempty"` + + // The index number of the rule. + RuleIndex *float64 `json:"ruleIndex,omitempty" tf:"rule_index,omitempty"` + + // Is the file system on unix read only? + UnixReadOnly *bool `json:"unixReadOnly,omitempty" tf:"unix_read_only,omitempty"` + + // Is the file system on unix read and write? + UnixReadWrite *bool `json:"unixReadWrite,omitempty" tf:"unix_read_write,omitempty"` +} + +type ExportPolicyRuleParameters struct { + + // A list of allowed clients IPv4 addresses. + // +kubebuilder:validation:Optional + // +listType=set + AllowedClients []*string `json:"allowedClients" tf:"allowed_clients,omitempty"` + + // A list of allowed protocols. Valid values include CIFS, NFSv3, or NFSv4.1. Only one value is supported at this time. This replaces the previous arguments: cifs_enabled, nfsv3_enabled and nfsv4_enabled. + // +kubebuilder:validation:Optional + ProtocolsEnabled []*string `json:"protocolsEnabled,omitempty" tf:"protocols_enabled,omitempty"` + + // Is root access permitted to this volume? + // +kubebuilder:validation:Optional + RootAccessEnabled *bool `json:"rootAccessEnabled,omitempty" tf:"root_access_enabled,omitempty"` + + // The index number of the rule. + // +kubebuilder:validation:Optional + RuleIndex *float64 `json:"ruleIndex" tf:"rule_index,omitempty"` + + // Is the file system on unix read only? + // +kubebuilder:validation:Optional + UnixReadOnly *bool `json:"unixReadOnly,omitempty" tf:"unix_read_only,omitempty"` + + // Is the file system on unix read and write? + // +kubebuilder:validation:Optional + UnixReadWrite *bool `json:"unixReadWrite,omitempty" tf:"unix_read_write,omitempty"` +} + +type VolumeInitParameters struct { + + // Is the NetApp Volume enabled for Azure VMware Solution (AVS) datastore purpose. Defaults to false. Changing this forces a new resource to be created. + AzureVMwareDataStoreEnabled *bool `json:"azureVmwareDataStoreEnabled,omitempty" tf:"azure_vmware_data_store_enabled,omitempty"` + + // Creates volume from snapshot. Following properties must be the same as the original volume where the snapshot was taken from: protocols, subnet_id, location, service_level, resource_group_name, account_name and pool_name. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta1.Snapshot + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + CreateFromSnapshotResourceID *string `json:"createFromSnapshotResourceId,omitempty" tf:"create_from_snapshot_resource_id,omitempty"` + + // Reference to a Snapshot in netapp to populate createFromSnapshotResourceId. + // +kubebuilder:validation:Optional + CreateFromSnapshotResourceIDRef *v1.Reference `json:"createFromSnapshotResourceIdRef,omitempty" tf:"-"` + + // Selector for a Snapshot in netapp to populate createFromSnapshotResourceId. + // +kubebuilder:validation:Optional + CreateFromSnapshotResourceIDSelector *v1.Selector `json:"createFromSnapshotResourceIdSelector,omitempty" tf:"-"` + + // A data_protection_replication block as defined below. Changing this forces a new resource to be created. + DataProtectionReplication *DataProtectionReplicationInitParameters `json:"dataProtectionReplication,omitempty" tf:"data_protection_replication,omitempty"` + + // A data_protection_snapshot_policy block as defined below. + DataProtectionSnapshotPolicy *DataProtectionSnapshotPolicyInitParameters `json:"dataProtectionSnapshotPolicy,omitempty" tf:"data_protection_snapshot_policy,omitempty"` + + // The encryption key source, it can be Microsoft.NetApp for platform managed keys or Microsoft.KeyVault for customer-managed keys. This is required with key_vault_private_endpoint_id. Changing this forces a new resource to be created. + EncryptionKeySource *string `json:"encryptionKeySource,omitempty" tf:"encryption_key_source,omitempty"` + + // One or more export_policy_rule block defined below. + ExportPolicyRule []ExportPolicyRuleInitParameters `json:"exportPolicyRule,omitempty" tf:"export_policy_rule,omitempty"` + + // The Private Endpoint ID for Key Vault, which is required when using customer-managed keys. This is required with encryption_key_source. Changing this forces a new resource to be created. + KeyVaultPrivateEndpointID *string `json:"keyVaultPrivateEndpointId,omitempty" tf:"key_vault_private_endpoint_id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Indicates which network feature to use, accepted values are Basic or Standard, it defaults to Basic if not defined. This is a feature in public preview and for more information about it and how to register, please refer to Configure network features for an Azure NetApp Files volume. + NetworkFeatures *string `json:"networkFeatures,omitempty" tf:"network_features,omitempty"` + + // The target volume protocol expressed as a list. Supported single value include CIFS, NFSv3, or NFSv4.1. If argument is not defined it will default to NFSv3. Changing this forces a new resource to be created and data will be lost. Dual protocol scenario is supported for CIFS and NFSv3, for more information, please refer to Create a dual-protocol volume for Azure NetApp Files document. + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // Limits enumeration of files and folders (that is, listing the contents) in SMB only to users with allowed access on the share. For instance, if a user doesn't have access to read a file or folder in a share with access-based enumeration enabled, then the file or folder doesn't show up in directory listings. Defaults to false. For more information, please refer to Understand NAS share permissions in Azure NetApp Files + SMBAccessBasedEnumerationEnabled *bool `json:"smbAccessBasedEnumerationEnabled,omitempty" tf:"smb_access_based_enumeration_enabled,omitempty"` + + // Limits clients from browsing for an SMB share by hiding the share from view in Windows Explorer or when listing shares in "net view." Only end users that know the absolute paths to the share are able to find the share. Defaults to false. For more information, please refer to Understand NAS share permissions in Azure NetApp Files + SMBNonBrowsableEnabled *bool `json:"smbNonBrowsableEnabled,omitempty" tf:"smb_non_browsable_enabled,omitempty"` + + // Volume security style, accepted values are unix or ntfs. If not provided, single-protocol volume is created defaulting to unix if it is NFSv3 or NFSv4.1 volume, if CIFS, it will default to ntfs. In a dual-protocol volume, if not provided, its value will be ntfs. Changing this forces a new resource to be created. + SecurityStyle *string `json:"securityStyle,omitempty" tf:"security_style,omitempty"` + + // The target performance of the file system. Valid values include Premium, Standard, or Ultra. Changing this forces a new resource to be created. + ServiceLevel *string `json:"serviceLevel,omitempty" tf:"service_level,omitempty"` + + // Specifies whether the .snapshot (NFS clients) or ~snapshot (SMB clients) path of a volume is visible, default value is true. + SnapshotDirectoryVisible *bool `json:"snapshotDirectoryVisible,omitempty" tf:"snapshot_directory_visible,omitempty"` + + // The maximum Storage Quota allowed for a file system in Gigabytes. + StorageQuotaInGb *float64 `json:"storageQuotaInGb,omitempty" tf:"storage_quota_in_gb,omitempty"` + + // The ID of the Subnet the NetApp Volume resides in, which must have the Microsoft.NetApp/volumes delegation. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Throughput of this volume in Mibps. + ThroughputInMibps *float64 `json:"throughputInMibps,omitempty" tf:"throughput_in_mibps,omitempty"` + + // A unique file path for the volume. Used when creating mount targets. Changing this forces a new resource to be created. + VolumePath *string `json:"volumePath,omitempty" tf:"volume_path,omitempty"` + + // Specifies the Availability Zone in which the Volume should be located. Possible values are 1, 2 and 3. Changing this forces a new resource to be created. This feature is currently in preview, for more information on how to enable it, please refer to Manage availability zone volume placement for Azure NetApp Files. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type VolumeObservation struct { + + // The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Is the NetApp Volume enabled for Azure VMware Solution (AVS) datastore purpose. Defaults to false. Changing this forces a new resource to be created. + AzureVMwareDataStoreEnabled *bool `json:"azureVmwareDataStoreEnabled,omitempty" tf:"azure_vmware_data_store_enabled,omitempty"` + + // Creates volume from snapshot. Following properties must be the same as the original volume where the snapshot was taken from: protocols, subnet_id, location, service_level, resource_group_name, account_name and pool_name. Changing this forces a new resource to be created. + CreateFromSnapshotResourceID *string `json:"createFromSnapshotResourceId,omitempty" tf:"create_from_snapshot_resource_id,omitempty"` + + // A data_protection_replication block as defined below. Changing this forces a new resource to be created. + DataProtectionReplication *DataProtectionReplicationObservation `json:"dataProtectionReplication,omitempty" tf:"data_protection_replication,omitempty"` + + // A data_protection_snapshot_policy block as defined below. + DataProtectionSnapshotPolicy *DataProtectionSnapshotPolicyObservation `json:"dataProtectionSnapshotPolicy,omitempty" tf:"data_protection_snapshot_policy,omitempty"` + + // The encryption key source, it can be Microsoft.NetApp for platform managed keys or Microsoft.KeyVault for customer-managed keys. This is required with key_vault_private_endpoint_id. Changing this forces a new resource to be created. + EncryptionKeySource *string `json:"encryptionKeySource,omitempty" tf:"encryption_key_source,omitempty"` + + // One or more export_policy_rule block defined below. + ExportPolicyRule []ExportPolicyRuleObservation `json:"exportPolicyRule,omitempty" tf:"export_policy_rule,omitempty"` + + // The ID of the NetApp Volume. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Private Endpoint ID for Key Vault, which is required when using customer-managed keys. This is required with encryption_key_source. Changing this forces a new resource to be created. + KeyVaultPrivateEndpointID *string `json:"keyVaultPrivateEndpointId,omitempty" tf:"key_vault_private_endpoint_id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of IPv4 Addresses which should be used to mount the volume. + MountIPAddresses []*string `json:"mountIpAddresses,omitempty" tf:"mount_ip_addresses,omitempty"` + + // Indicates which network feature to use, accepted values are Basic or Standard, it defaults to Basic if not defined. This is a feature in public preview and for more information about it and how to register, please refer to Configure network features for an Azure NetApp Files volume. + NetworkFeatures *string `json:"networkFeatures,omitempty" tf:"network_features,omitempty"` + + // The name of the NetApp pool in which the NetApp Volume should be created. Changing this forces a new resource to be created. + PoolName *string `json:"poolName,omitempty" tf:"pool_name,omitempty"` + + // The target volume protocol expressed as a list. Supported single value include CIFS, NFSv3, or NFSv4.1. If argument is not defined it will default to NFSv3. Changing this forces a new resource to be created and data will be lost. Dual protocol scenario is supported for CIFS and NFSv3, for more information, please refer to Create a dual-protocol volume for Azure NetApp Files document. + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // The name of the resource group where the NetApp Volume should be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Limits enumeration of files and folders (that is, listing the contents) in SMB only to users with allowed access on the share. For instance, if a user doesn't have access to read a file or folder in a share with access-based enumeration enabled, then the file or folder doesn't show up in directory listings. Defaults to false. For more information, please refer to Understand NAS share permissions in Azure NetApp Files + SMBAccessBasedEnumerationEnabled *bool `json:"smbAccessBasedEnumerationEnabled,omitempty" tf:"smb_access_based_enumeration_enabled,omitempty"` + + // Limits clients from browsing for an SMB share by hiding the share from view in Windows Explorer or when listing shares in "net view." Only end users that know the absolute paths to the share are able to find the share. Defaults to false. For more information, please refer to Understand NAS share permissions in Azure NetApp Files + SMBNonBrowsableEnabled *bool `json:"smbNonBrowsableEnabled,omitempty" tf:"smb_non_browsable_enabled,omitempty"` + + // Volume security style, accepted values are unix or ntfs. If not provided, single-protocol volume is created defaulting to unix if it is NFSv3 or NFSv4.1 volume, if CIFS, it will default to ntfs. In a dual-protocol volume, if not provided, its value will be ntfs. Changing this forces a new resource to be created. + SecurityStyle *string `json:"securityStyle,omitempty" tf:"security_style,omitempty"` + + // The target performance of the file system. Valid values include Premium, Standard, or Ultra. Changing this forces a new resource to be created. + ServiceLevel *string `json:"serviceLevel,omitempty" tf:"service_level,omitempty"` + + // Specifies whether the .snapshot (NFS clients) or ~snapshot (SMB clients) path of a volume is visible, default value is true. + SnapshotDirectoryVisible *bool `json:"snapshotDirectoryVisible,omitempty" tf:"snapshot_directory_visible,omitempty"` + + // The maximum Storage Quota allowed for a file system in Gigabytes. + StorageQuotaInGb *float64 `json:"storageQuotaInGb,omitempty" tf:"storage_quota_in_gb,omitempty"` + + // The ID of the Subnet the NetApp Volume resides in, which must have the Microsoft.NetApp/volumes delegation. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Throughput of this volume in Mibps. + ThroughputInMibps *float64 `json:"throughputInMibps,omitempty" tf:"throughput_in_mibps,omitempty"` + + // A unique file path for the volume. Used when creating mount targets. Changing this forces a new resource to be created. + VolumePath *string `json:"volumePath,omitempty" tf:"volume_path,omitempty"` + + // Specifies the Availability Zone in which the Volume should be located. Possible values are 1, 2 and 3. Changing this forces a new resource to be created. This feature is currently in preview, for more information on how to enable it, please refer to Manage availability zone volume placement for Azure NetApp Files. + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type VolumeParameters struct { + + // The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta2.Account + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Reference to a Account in netapp to populate accountName. + // +kubebuilder:validation:Optional + AccountNameRef *v1.Reference `json:"accountNameRef,omitempty" tf:"-"` + + // Selector for a Account in netapp to populate accountName. + // +kubebuilder:validation:Optional + AccountNameSelector *v1.Selector `json:"accountNameSelector,omitempty" tf:"-"` + + // Is the NetApp Volume enabled for Azure VMware Solution (AVS) datastore purpose. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AzureVMwareDataStoreEnabled *bool `json:"azureVmwareDataStoreEnabled,omitempty" tf:"azure_vmware_data_store_enabled,omitempty"` + + // Creates volume from snapshot. Following properties must be the same as the original volume where the snapshot was taken from: protocols, subnet_id, location, service_level, resource_group_name, account_name and pool_name. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta1.Snapshot + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + CreateFromSnapshotResourceID *string `json:"createFromSnapshotResourceId,omitempty" tf:"create_from_snapshot_resource_id,omitempty"` + + // Reference to a Snapshot in netapp to populate createFromSnapshotResourceId. + // +kubebuilder:validation:Optional + CreateFromSnapshotResourceIDRef *v1.Reference `json:"createFromSnapshotResourceIdRef,omitempty" tf:"-"` + + // Selector for a Snapshot in netapp to populate createFromSnapshotResourceId. + // +kubebuilder:validation:Optional + CreateFromSnapshotResourceIDSelector *v1.Selector `json:"createFromSnapshotResourceIdSelector,omitempty" tf:"-"` + + // A data_protection_replication block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DataProtectionReplication *DataProtectionReplicationParameters `json:"dataProtectionReplication,omitempty" tf:"data_protection_replication,omitempty"` + + // A data_protection_snapshot_policy block as defined below. + // +kubebuilder:validation:Optional + DataProtectionSnapshotPolicy *DataProtectionSnapshotPolicyParameters `json:"dataProtectionSnapshotPolicy,omitempty" tf:"data_protection_snapshot_policy,omitempty"` + + // The encryption key source, it can be Microsoft.NetApp for platform managed keys or Microsoft.KeyVault for customer-managed keys. This is required with key_vault_private_endpoint_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + EncryptionKeySource *string `json:"encryptionKeySource,omitempty" tf:"encryption_key_source,omitempty"` + + // One or more export_policy_rule block defined below. + // +kubebuilder:validation:Optional + ExportPolicyRule []ExportPolicyRuleParameters `json:"exportPolicyRule,omitempty" tf:"export_policy_rule,omitempty"` + + // The Private Endpoint ID for Key Vault, which is required when using customer-managed keys. This is required with encryption_key_source. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + KeyVaultPrivateEndpointID *string `json:"keyVaultPrivateEndpointId,omitempty" tf:"key_vault_private_endpoint_id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Indicates which network feature to use, accepted values are Basic or Standard, it defaults to Basic if not defined. This is a feature in public preview and for more information about it and how to register, please refer to Configure network features for an Azure NetApp Files volume. + // +kubebuilder:validation:Optional + NetworkFeatures *string `json:"networkFeatures,omitempty" tf:"network_features,omitempty"` + + // The name of the NetApp pool in which the NetApp Volume should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/netapp/v1beta1.Pool + // +kubebuilder:validation:Optional + PoolName *string `json:"poolName,omitempty" tf:"pool_name,omitempty"` + + // Reference to a Pool in netapp to populate poolName. + // +kubebuilder:validation:Optional + PoolNameRef *v1.Reference `json:"poolNameRef,omitempty" tf:"-"` + + // Selector for a Pool in netapp to populate poolName. + // +kubebuilder:validation:Optional + PoolNameSelector *v1.Selector `json:"poolNameSelector,omitempty" tf:"-"` + + // The target volume protocol expressed as a list. Supported single value include CIFS, NFSv3, or NFSv4.1. If argument is not defined it will default to NFSv3. Changing this forces a new resource to be created and data will be lost. Dual protocol scenario is supported for CIFS and NFSv3, for more information, please refer to Create a dual-protocol volume for Azure NetApp Files document. + // +kubebuilder:validation:Optional + // +listType=set + Protocols []*string `json:"protocols,omitempty" tf:"protocols,omitempty"` + + // The name of the resource group where the NetApp Volume should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Limits enumeration of files and folders (that is, listing the contents) in SMB only to users with allowed access on the share. For instance, if a user doesn't have access to read a file or folder in a share with access-based enumeration enabled, then the file or folder doesn't show up in directory listings. Defaults to false. For more information, please refer to Understand NAS share permissions in Azure NetApp Files + // +kubebuilder:validation:Optional + SMBAccessBasedEnumerationEnabled *bool `json:"smbAccessBasedEnumerationEnabled,omitempty" tf:"smb_access_based_enumeration_enabled,omitempty"` + + // Limits clients from browsing for an SMB share by hiding the share from view in Windows Explorer or when listing shares in "net view." Only end users that know the absolute paths to the share are able to find the share. Defaults to false. For more information, please refer to Understand NAS share permissions in Azure NetApp Files + // +kubebuilder:validation:Optional + SMBNonBrowsableEnabled *bool `json:"smbNonBrowsableEnabled,omitempty" tf:"smb_non_browsable_enabled,omitempty"` + + // Volume security style, accepted values are unix or ntfs. If not provided, single-protocol volume is created defaulting to unix if it is NFSv3 or NFSv4.1 volume, if CIFS, it will default to ntfs. In a dual-protocol volume, if not provided, its value will be ntfs. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SecurityStyle *string `json:"securityStyle,omitempty" tf:"security_style,omitempty"` + + // The target performance of the file system. Valid values include Premium, Standard, or Ultra. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ServiceLevel *string `json:"serviceLevel,omitempty" tf:"service_level,omitempty"` + + // Specifies whether the .snapshot (NFS clients) or ~snapshot (SMB clients) path of a volume is visible, default value is true. + // +kubebuilder:validation:Optional + SnapshotDirectoryVisible *bool `json:"snapshotDirectoryVisible,omitempty" tf:"snapshot_directory_visible,omitempty"` + + // The maximum Storage Quota allowed for a file system in Gigabytes. + // +kubebuilder:validation:Optional + StorageQuotaInGb *float64 `json:"storageQuotaInGb,omitempty" tf:"storage_quota_in_gb,omitempty"` + + // The ID of the Subnet the NetApp Volume resides in, which must have the Microsoft.NetApp/volumes delegation. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Throughput of this volume in Mibps. + // +kubebuilder:validation:Optional + ThroughputInMibps *float64 `json:"throughputInMibps,omitempty" tf:"throughput_in_mibps,omitempty"` + + // A unique file path for the volume. Used when creating mount targets. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VolumePath *string `json:"volumePath,omitempty" tf:"volume_path,omitempty"` + + // Specifies the Availability Zone in which the Volume should be located. Possible values are 1, 2 and 3. Changing this forces a new resource to be created. This feature is currently in preview, for more information on how to enable it, please refer to Manage availability zone volume placement for Azure NetApp Files. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +// VolumeSpec defines the desired state of Volume +type VolumeSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VolumeParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VolumeInitParameters `json:"initProvider,omitempty"` +} + +// VolumeStatus defines the observed state of Volume. +type VolumeStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VolumeObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Volume is the Schema for the Volumes API. Manages a NetApp Volume. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Volume struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serviceLevel) || (has(self.initProvider) && has(self.initProvider.serviceLevel))",message="spec.forProvider.serviceLevel is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageQuotaInGb) || (has(self.initProvider) && has(self.initProvider.storageQuotaInGb))",message="spec.forProvider.storageQuotaInGb is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.volumePath) || (has(self.initProvider) && has(self.initProvider.volumePath))",message="spec.forProvider.volumePath is a required parameter" + Spec VolumeSpec `json:"spec"` + Status VolumeStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VolumeList contains a list of Volumes +type VolumeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Volume `json:"items"` +} + +// Repository type metadata. +var ( + Volume_Kind = "Volume" + Volume_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Volume_Kind}.String() + Volume_KindAPIVersion = Volume_Kind + "." + CRDGroupVersion.String() + Volume_GroupVersionKind = CRDGroupVersion.WithKind(Volume_Kind) +) + +func init() { + SchemeBuilder.Register(&Volume{}, &VolumeList{}) +} diff --git a/apis/network/v1beta1/zz_dnsaaaarecord_types.go b/apis/network/v1beta1/zz_dnsaaaarecord_types.go index 5768b34ec..d60468d46 100755 --- a/apis/network/v1beta1/zz_dnsaaaarecord_types.go +++ b/apis/network/v1beta1/zz_dnsaaaarecord_types.go @@ -113,7 +113,7 @@ type DNSAAAARecordParameters struct { TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_dnsarecord_types.go b/apis/network/v1beta1/zz_dnsarecord_types.go index a4a3bf560..bb9c6af4e 100755 --- a/apis/network/v1beta1/zz_dnsarecord_types.go +++ b/apis/network/v1beta1/zz_dnsarecord_types.go @@ -113,7 +113,7 @@ type DNSARecordParameters struct { TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_dnscaarecord_types.go b/apis/network/v1beta1/zz_dnscaarecord_types.go index 3aac3e604..fe96bf01e 100755 --- a/apis/network/v1beta1/zz_dnscaarecord_types.go +++ b/apis/network/v1beta1/zz_dnscaarecord_types.go @@ -80,7 +80,7 @@ type DNSCAARecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_dnscnamerecord_types.go b/apis/network/v1beta1/zz_dnscnamerecord_types.go index bf6e5c657..d6d7fbeaf 100755 --- a/apis/network/v1beta1/zz_dnscnamerecord_types.go +++ b/apis/network/v1beta1/zz_dnscnamerecord_types.go @@ -110,7 +110,7 @@ type DNSCNAMERecordParameters struct { TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_dnsmxrecord_types.go b/apis/network/v1beta1/zz_dnsmxrecord_types.go index 60cabb3c5..3e5600130 100755 --- a/apis/network/v1beta1/zz_dnsmxrecord_types.go +++ b/apis/network/v1beta1/zz_dnsmxrecord_types.go @@ -80,7 +80,7 @@ type DNSMXRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_dnsnsrecord_types.go b/apis/network/v1beta1/zz_dnsnsrecord_types.go index eccafd342..8a6484409 100755 --- a/apis/network/v1beta1/zz_dnsnsrecord_types.go +++ b/apis/network/v1beta1/zz_dnsnsrecord_types.go @@ -80,7 +80,7 @@ type DNSNSRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_dnsptrrecord_types.go b/apis/network/v1beta1/zz_dnsptrrecord_types.go index 5bcdc46da..5b45bb8e6 100755 --- a/apis/network/v1beta1/zz_dnsptrrecord_types.go +++ b/apis/network/v1beta1/zz_dnsptrrecord_types.go @@ -83,7 +83,7 @@ type DNSPTRRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_dnssrvrecord_types.go b/apis/network/v1beta1/zz_dnssrvrecord_types.go index ccca04ce8..4bb98f4b0 100755 --- a/apis/network/v1beta1/zz_dnssrvrecord_types.go +++ b/apis/network/v1beta1/zz_dnssrvrecord_types.go @@ -80,7 +80,7 @@ type DNSSRVRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_dnstxtrecord_types.go b/apis/network/v1beta1/zz_dnstxtrecord_types.go index 1a9d1e49f..4798fe657 100755 --- a/apis/network/v1beta1/zz_dnstxtrecord_types.go +++ b/apis/network/v1beta1/zz_dnstxtrecord_types.go @@ -80,7 +80,7 @@ type DNSTXTRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.DNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.DNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_expressroutecircuitauthorization_types.go b/apis/network/v1beta1/zz_expressroutecircuitauthorization_types.go index e3cd8d0ef..afe088b34 100755 --- a/apis/network/v1beta1/zz_expressroutecircuitauthorization_types.go +++ b/apis/network/v1beta1/zz_expressroutecircuitauthorization_types.go @@ -34,7 +34,7 @@ type ExpressRouteCircuitAuthorizationObservation struct { type ExpressRouteCircuitAuthorizationParameters struct { // The name of the Express Route Circuit in which to create the Authorization. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.ExpressRouteCircuit + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.ExpressRouteCircuit // +kubebuilder:validation:Optional ExpressRouteCircuitName *string `json:"expressRouteCircuitName,omitempty" tf:"express_route_circuit_name,omitempty"` diff --git a/apis/network/v1beta1/zz_expressroutecircuitconnection_types.go b/apis/network/v1beta1/zz_expressroutecircuitconnection_types.go index a7621a435..c5baa30c9 100755 --- a/apis/network/v1beta1/zz_expressroutecircuitconnection_types.go +++ b/apis/network/v1beta1/zz_expressroutecircuitconnection_types.go @@ -22,7 +22,7 @@ type ExpressRouteCircuitConnectionInitParameters struct { AddressPrefixIPv6 *string `json:"addressPrefixIpv6,omitempty" tf:"address_prefix_ipv6,omitempty"` // The ID of the peered Express Route Circuit Private Peering. Changing this forces a new Express Route Circuit Connection to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.ExpressRouteCircuitPeering + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.ExpressRouteCircuitPeering // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() PeerPeeringID *string `json:"peerPeeringId,omitempty" tf:"peer_peering_id,omitempty"` @@ -68,7 +68,7 @@ type ExpressRouteCircuitConnectionParameters struct { AuthorizationKeySecretRef *v1.SecretKeySelector `json:"authorizationKeySecretRef,omitempty" tf:"-"` // The ID of the peered Express Route Circuit Private Peering. Changing this forces a new Express Route Circuit Connection to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.ExpressRouteCircuitPeering + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.ExpressRouteCircuitPeering // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional PeerPeeringID *string `json:"peerPeeringId,omitempty" tf:"peer_peering_id,omitempty"` @@ -82,7 +82,7 @@ type ExpressRouteCircuitConnectionParameters struct { PeerPeeringIDSelector *v1.Selector `json:"peerPeeringIdSelector,omitempty" tf:"-"` // The ID of the Express Route Circuit Private Peering that this Express Route Circuit Connection connects with. Changing this forces a new Express Route Circuit Connection to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.ExpressRouteCircuitPeering + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.ExpressRouteCircuitPeering // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional PeeringID *string `json:"peeringId,omitempty" tf:"peering_id,omitempty"` diff --git a/apis/network/v1beta1/zz_firewallapplicationrulecollection_types.go b/apis/network/v1beta1/zz_firewallapplicationrulecollection_types.go index 3e862eedb..73e92c2cd 100755 --- a/apis/network/v1beta1/zz_firewallapplicationrulecollection_types.go +++ b/apis/network/v1beta1/zz_firewallapplicationrulecollection_types.go @@ -52,7 +52,7 @@ type FirewallApplicationRuleCollectionParameters struct { Action *string `json:"action,omitempty" tf:"action,omitempty"` // Specifies the name of the Firewall in which the Application Rule Collection should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Firewall + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Firewall // +kubebuilder:validation:Optional AzureFirewallName *string `json:"azureFirewallName,omitempty" tf:"azure_firewall_name,omitempty"` diff --git a/apis/network/v1beta1/zz_firewallnatrulecollection_types.go b/apis/network/v1beta1/zz_firewallnatrulecollection_types.go index 9dca75f76..7026d7b68 100755 --- a/apis/network/v1beta1/zz_firewallnatrulecollection_types.go +++ b/apis/network/v1beta1/zz_firewallnatrulecollection_types.go @@ -52,7 +52,7 @@ type FirewallNATRuleCollectionParameters struct { Action *string `json:"action,omitempty" tf:"action,omitempty"` // Specifies the name of the Firewall in which the NAT Rule Collection should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Firewall + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Firewall // +kubebuilder:validation:Optional AzureFirewallName *string `json:"azureFirewallName,omitempty" tf:"azure_firewall_name,omitempty"` diff --git a/apis/network/v1beta1/zz_firewallnetworkrulecollection_types.go b/apis/network/v1beta1/zz_firewallnetworkrulecollection_types.go index 29b3a83d4..ffdc4c8df 100755 --- a/apis/network/v1beta1/zz_firewallnetworkrulecollection_types.go +++ b/apis/network/v1beta1/zz_firewallnetworkrulecollection_types.go @@ -52,7 +52,7 @@ type FirewallNetworkRuleCollectionParameters struct { Action *string `json:"action,omitempty" tf:"action,omitempty"` // Specifies the name of the Firewall in which the Network Rule Collection should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Firewall + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Firewall // +kubebuilder:validation:Optional AzureFirewallName *string `json:"azureFirewallName,omitempty" tf:"azure_firewall_name,omitempty"` diff --git a/apis/network/v1beta1/zz_firewallpolicyrulecollectiongroup_types.go b/apis/network/v1beta1/zz_firewallpolicyrulecollectiongroup_types.go index 21d45c068..54f206299 100755 --- a/apis/network/v1beta1/zz_firewallpolicyrulecollectiongroup_types.go +++ b/apis/network/v1beta1/zz_firewallpolicyrulecollectiongroup_types.go @@ -234,7 +234,7 @@ type FirewallPolicyRuleCollectionGroupParameters struct { ApplicationRuleCollection []ApplicationRuleCollectionParameters `json:"applicationRuleCollection,omitempty" tf:"application_rule_collection,omitempty"` // The ID of the Firewall Policy where the Firewall Policy Rule Collection Group should exist. Changing this forces a new Firewall Policy Rule Collection Group to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.FirewallPolicy + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.FirewallPolicy // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` diff --git a/apis/network/v1beta1/zz_generated.conversion_hubs.go b/apis/network/v1beta1/zz_generated.conversion_hubs.go index aa9042bd7..9212d6856 100755 --- a/apis/network/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/network/v1beta1/zz_generated.conversion_hubs.go @@ -6,18 +6,18 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *ApplicationGateway) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ApplicationSecurityGroup) Hub() {} // Hub marks this type as a conversion hub. -func (tr *DNSARecord) Hub() {} +func (tr *DDoSProtectionPlan) Hub() {} // Hub marks this type as a conversion hub. func (tr *DNSAAAARecord) Hub() {} +// Hub marks this type as a conversion hub. +func (tr *DNSARecord) Hub() {} + // Hub marks this type as a conversion hub. func (tr *DNSCAARecord) Hub() {} @@ -39,33 +39,15 @@ func (tr *DNSSRVRecord) Hub() {} // Hub marks this type as a conversion hub. func (tr *DNSTXTRecord) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *DNSZone) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ExpressRouteCircuit) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ExpressRouteCircuitAuthorization) Hub() {} // Hub marks this type as a conversion hub. func (tr *ExpressRouteCircuitConnection) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ExpressRouteCircuitPeering) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ExpressRouteConnection) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ExpressRouteGateway) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ExpressRoutePort) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Firewall) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FirewallApplicationRuleCollection) Hub() {} @@ -75,24 +57,12 @@ func (tr *FirewallNATRuleCollection) Hub() {} // Hub marks this type as a conversion hub. func (tr *FirewallNetworkRuleCollection) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FirewallPolicy) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FirewallPolicyRuleCollectionGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FrontDoor) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *FrontdoorCustomHTTPSConfiguration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *FrontdoorFirewallPolicy) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FrontdoorRulesEngine) Hub() {} - // Hub marks this type as a conversion hub. func (tr *IPGroup) Hub() {} @@ -121,22 +91,25 @@ func (tr *LoadBalancerProbe) Hub() {} func (tr *LoadBalancerRule) Hub() {} // Hub marks this type as a conversion hub. -func (tr *LocalNetworkGateway) Hub() {} +func (tr *ManagerManagementGroupConnection) Hub() {} // Hub marks this type as a conversion hub. -func (tr *NATGateway) Hub() {} +func (tr *ManagerNetworkGroup) Hub() {} // Hub marks this type as a conversion hub. -func (tr *NATGatewayPublicIPAssociation) Hub() {} +func (tr *ManagerStaticMember) Hub() {} // Hub marks this type as a conversion hub. -func (tr *NATGatewayPublicIPPrefixAssociation) Hub() {} +func (tr *ManagerSubscriptionConnection) Hub() {} // Hub marks this type as a conversion hub. -func (tr *ConnectionMonitor) Hub() {} +func (tr *NATGateway) Hub() {} // Hub marks this type as a conversion hub. -func (tr *DDoSProtectionPlan) Hub() {} +func (tr *NATGatewayPublicIPAssociation) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *NATGatewayPublicIPPrefixAssociation) Hub() {} // Hub marks this type as a conversion hub. func (tr *NetworkInterface) Hub() {} @@ -154,47 +127,11 @@ func (tr *NetworkInterfaceNatRuleAssociation) Hub() {} func (tr *NetworkInterfaceSecurityGroupAssociation) Hub() {} // Hub marks this type as a conversion hub. -func (tr *Manager) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ManagerManagementGroupConnection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ManagerNetworkGroup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ManagerStaticMember) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ManagerSubscriptionConnection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *PacketCapture) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Profile) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SecurityGroup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SecurityRule) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Watcher) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WatcherFlowLog) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *PointToSiteVPNGateway) Hub() {} +func (tr *PrivateDNSAAAARecord) Hub() {} // Hub marks this type as a conversion hub. func (tr *PrivateDNSARecord) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *PrivateDNSAAAARecord) Hub() {} - // Hub marks this type as a conversion hub. func (tr *PrivateDNSCNAMERecord) Hub() {} @@ -213,15 +150,9 @@ func (tr *PrivateDNSSRVRecord) Hub() {} // Hub marks this type as a conversion hub. func (tr *PrivateDNSTXTRecord) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *PrivateDNSZone) Hub() {} - // Hub marks this type as a conversion hub. func (tr *PrivateDNSZoneVirtualNetworkLink) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *PrivateEndpoint) Hub() {} - // Hub marks this type as a conversion hub. func (tr *PrivateEndpointApplicationSecurityGroupAssociation) Hub() {} @@ -253,7 +184,10 @@ func (tr *RouteServerBGPConnection) Hub() {} func (tr *RouteTable) Hub() {} // Hub marks this type as a conversion hub. -func (tr *Subnet) Hub() {} +func (tr *SecurityGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SecurityRule) Hub() {} // Hub marks this type as a conversion hub. func (tr *SubnetNATGatewayAssociation) Hub() {} @@ -276,15 +210,9 @@ func (tr *TrafficManagerExternalEndpoint) Hub() {} // Hub marks this type as a conversion hub. func (tr *TrafficManagerNestedEndpoint) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *TrafficManagerProfile) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VirtualHub) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *VirtualHubConnection) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VirtualHubIP) Hub() {} @@ -297,35 +225,14 @@ func (tr *VirtualHubRouteTableRoute) Hub() {} // Hub marks this type as a conversion hub. func (tr *VirtualHubSecurityPartnerProvider) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *VirtualNetwork) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualNetworkGateway) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VirtualNetworkGatewayConnection) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VirtualNetworkPeering) Hub() {} // Hub marks this type as a conversion hub. func (tr *VirtualWAN) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *VPNGateway) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VPNGatewayConnection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *VPNServerConfiguration) Hub() {} - // Hub marks this type as a conversion hub. func (tr *VPNServerConfigurationPolicyGroup) Hub() {} // Hub marks this type as a conversion hub. -func (tr *VPNSite) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WebApplicationFirewallPolicy) Hub() {} +func (tr *Watcher) Hub() {} diff --git a/apis/network/v1beta1/zz_generated.conversion_spokes.go b/apis/network/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..c602ae4ee --- /dev/null +++ b/apis/network/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,634 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ApplicationGateway to the hub type. +func (tr *ApplicationGateway) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ApplicationGateway type. +func (tr *ApplicationGateway) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ConnectionMonitor to the hub type. +func (tr *ConnectionMonitor) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ConnectionMonitor type. +func (tr *ConnectionMonitor) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this DNSZone to the hub type. +func (tr *DNSZone) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the DNSZone type. +func (tr *DNSZone) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ExpressRouteCircuit to the hub type. +func (tr *ExpressRouteCircuit) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ExpressRouteCircuit type. +func (tr *ExpressRouteCircuit) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ExpressRouteCircuitPeering to the hub type. +func (tr *ExpressRouteCircuitPeering) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ExpressRouteCircuitPeering type. +func (tr *ExpressRouteCircuitPeering) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ExpressRouteConnection to the hub type. +func (tr *ExpressRouteConnection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ExpressRouteConnection type. +func (tr *ExpressRouteConnection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ExpressRoutePort to the hub type. +func (tr *ExpressRoutePort) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ExpressRoutePort type. +func (tr *ExpressRoutePort) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Firewall to the hub type. +func (tr *Firewall) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Firewall type. +func (tr *Firewall) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FirewallPolicy to the hub type. +func (tr *FirewallPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FirewallPolicy type. +func (tr *FirewallPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontDoor to the hub type. +func (tr *FrontDoor) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontDoor type. +func (tr *FrontDoor) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontdoorCustomHTTPSConfiguration to the hub type. +func (tr *FrontdoorCustomHTTPSConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontdoorCustomHTTPSConfiguration type. +func (tr *FrontdoorCustomHTTPSConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FrontdoorRulesEngine to the hub type. +func (tr *FrontdoorRulesEngine) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FrontdoorRulesEngine type. +func (tr *FrontdoorRulesEngine) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LocalNetworkGateway to the hub type. +func (tr *LocalNetworkGateway) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LocalNetworkGateway type. +func (tr *LocalNetworkGateway) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Manager to the hub type. +func (tr *Manager) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Manager type. +func (tr *Manager) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this PacketCapture to the hub type. +func (tr *PacketCapture) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the PacketCapture type. +func (tr *PacketCapture) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this PointToSiteVPNGateway to the hub type. +func (tr *PointToSiteVPNGateway) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the PointToSiteVPNGateway type. +func (tr *PointToSiteVPNGateway) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this PrivateDNSZone to the hub type. +func (tr *PrivateDNSZone) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the PrivateDNSZone type. +func (tr *PrivateDNSZone) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this PrivateEndpoint to the hub type. +func (tr *PrivateEndpoint) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the PrivateEndpoint type. +func (tr *PrivateEndpoint) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Profile to the hub type. +func (tr *Profile) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Profile type. +func (tr *Profile) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Subnet to the hub type. +func (tr *Subnet) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Subnet type. +func (tr *Subnet) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this TrafficManagerProfile to the hub type. +func (tr *TrafficManagerProfile) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the TrafficManagerProfile type. +func (tr *TrafficManagerProfile) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualHubConnection to the hub type. +func (tr *VirtualHubConnection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualHubConnection type. +func (tr *VirtualHubConnection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualNetwork to the hub type. +func (tr *VirtualNetwork) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualNetwork type. +func (tr *VirtualNetwork) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualNetworkGateway to the hub type. +func (tr *VirtualNetworkGateway) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualNetworkGateway type. +func (tr *VirtualNetworkGateway) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VirtualNetworkGatewayConnection to the hub type. +func (tr *VirtualNetworkGatewayConnection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VirtualNetworkGatewayConnection type. +func (tr *VirtualNetworkGatewayConnection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPNGateway to the hub type. +func (tr *VPNGateway) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPNGateway type. +func (tr *VPNGateway) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPNGatewayConnection to the hub type. +func (tr *VPNGatewayConnection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPNGatewayConnection type. +func (tr *VPNGatewayConnection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPNServerConfiguration to the hub type. +func (tr *VPNServerConfiguration) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPNServerConfiguration type. +func (tr *VPNServerConfiguration) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this VPNSite to the hub type. +func (tr *VPNSite) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the VPNSite type. +func (tr *VPNSite) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WatcherFlowLog to the hub type. +func (tr *WatcherFlowLog) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WatcherFlowLog type. +func (tr *WatcherFlowLog) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WebApplicationFirewallPolicy to the hub type. +func (tr *WebApplicationFirewallPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WebApplicationFirewallPolicy type. +func (tr *WebApplicationFirewallPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/network/v1beta1/zz_generated.resolvers.go b/apis/network/v1beta1/zz_generated.resolvers.go index a00a29551..f8e00f34a 100644 --- a/apis/network/v1beta1/zz_generated.resolvers.go +++ b/apis/network/v1beta1/zz_generated.resolvers.go @@ -363,7 +363,7 @@ func (mg *DNSAAAARecord) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -451,7 +451,7 @@ func (mg *DNSARecord) ResolveReferences(ctx context.Context, c client.Reader) er mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -520,7 +520,7 @@ func (mg *DNSCAARecord) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -589,7 +589,7 @@ func (mg *DNSCNAMERecord) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -658,7 +658,7 @@ func (mg *DNSMXRecord) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -708,7 +708,7 @@ func (mg *DNSNSRecord) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -758,7 +758,7 @@ func (mg *DNSPTRRecord) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -808,7 +808,7 @@ func (mg *DNSSRVRecord) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -858,7 +858,7 @@ func (mg *DNSTXTRecord) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "DNSZone", "DNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "DNSZone", "DNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -951,7 +951,7 @@ func (mg *ExpressRouteCircuitAuthorization) ResolveReferences(ctx context.Contex var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "ExpressRouteCircuit", "ExpressRouteCircuitList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "ExpressRouteCircuit", "ExpressRouteCircuitList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1001,7 +1001,7 @@ func (mg *ExpressRouteCircuitConnection) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "ExpressRouteCircuitPeering", "ExpressRouteCircuitPeeringList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "ExpressRouteCircuitPeering", "ExpressRouteCircuitPeeringList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1020,7 +1020,7 @@ func (mg *ExpressRouteCircuitConnection) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.PeerPeeringID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PeerPeeringIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "ExpressRouteCircuitPeering", "ExpressRouteCircuitPeeringList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "ExpressRouteCircuitPeering", "ExpressRouteCircuitPeeringList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1039,7 +1039,7 @@ func (mg *ExpressRouteCircuitConnection) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.PeeringID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PeeringIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "ExpressRouteCircuitPeering", "ExpressRouteCircuitPeeringList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "ExpressRouteCircuitPeering", "ExpressRouteCircuitPeeringList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1447,7 +1447,7 @@ func (mg *FirewallApplicationRuleCollection) ResolveReferences(ctx context.Conte var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Firewall", "FirewallList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Firewall", "FirewallList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1497,7 +1497,7 @@ func (mg *FirewallNATRuleCollection) ResolveReferences(ctx context.Context, c cl var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Firewall", "FirewallList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Firewall", "FirewallList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1547,7 +1547,7 @@ func (mg *FirewallNetworkRuleCollection) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Firewall", "FirewallList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Firewall", "FirewallList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1628,7 +1628,7 @@ func (mg *FirewallPolicyRuleCollectionGroup) ResolveReferences(ctx context.Conte var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "FirewallPolicy", "FirewallPolicyList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "FirewallPolicy", "FirewallPolicyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1880,7 +1880,7 @@ func (mg *LoadBalancer) ResolveReferences(ctx context.Context, c client.Reader) } for i3 := 0; i3 < len(mg.Spec.ForProvider.FrontendIPConfiguration); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1941,7 +1941,7 @@ func (mg *LoadBalancer) ResolveReferences(ctx context.Context, c client.Reader) } for i3 := 0; i3 < len(mg.Spec.InitProvider.FrontendIPConfiguration); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2023,7 +2023,7 @@ func (mg *LoadBalancerBackendAddressPoolAddress) ResolveReferences(ctx context.C mg.Spec.ForProvider.BackendAddressPoolID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BackendAddressPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2042,7 +2042,7 @@ func (mg *LoadBalancerBackendAddressPoolAddress) ResolveReferences(ctx context.C mg.Spec.ForProvider.VirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VirtualNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2461,7 +2461,7 @@ func (mg *ManagerManagementGroupConnection) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.ManagementGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ManagementGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Manager", "ManagerList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Manager", "ManagerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2480,7 +2480,7 @@ func (mg *ManagerManagementGroupConnection) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.NetworkManagerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NetworkManagerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Manager", "ManagerList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Manager", "ManagerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2511,7 +2511,7 @@ func (mg *ManagerNetworkGroup) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Manager", "ManagerList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Manager", "ManagerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2561,7 +2561,7 @@ func (mg *ManagerStaticMember) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.NetworkGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NetworkGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2580,7 +2580,7 @@ func (mg *ManagerStaticMember) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.TargetVirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetVirtualNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2611,7 +2611,7 @@ func (mg *ManagerSubscriptionConnection) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Manager", "ManagerList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Manager", "ManagerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2630,7 +2630,7 @@ func (mg *ManagerSubscriptionConnection) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.NetworkManagerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NetworkManagerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Manager", "ManagerList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Manager", "ManagerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2891,7 +2891,7 @@ func (mg *NetworkInterface) ResolveReferences(ctx context.Context, c client.Read } for i3 := 0; i3 < len(mg.Spec.ForProvider.IPConfiguration); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -2952,7 +2952,7 @@ func (mg *NetworkInterface) ResolveReferences(ctx context.Context, c client.Read } for i3 := 0; i3 < len(mg.Spec.InitProvider.IPConfiguration); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3555,7 +3555,7 @@ func (mg *PrivateDNSAAAARecord) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateDNSZone", "PrivateDNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3605,7 +3605,7 @@ func (mg *PrivateDNSARecord) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateDNSZone", "PrivateDNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3655,7 +3655,7 @@ func (mg *PrivateDNSCNAMERecord) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateDNSZone", "PrivateDNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3705,7 +3705,7 @@ func (mg *PrivateDNSMXRecord) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateDNSZone", "PrivateDNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3755,7 +3755,7 @@ func (mg *PrivateDNSPTRRecord) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateDNSZone", "PrivateDNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3805,7 +3805,7 @@ func (mg *PrivateDNSResolver) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3824,7 +3824,7 @@ func (mg *PrivateDNSResolver) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.VirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VirtualNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3874,7 +3874,7 @@ func (mg *PrivateDNSSRVRecord) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateDNSZone", "PrivateDNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3924,7 +3924,7 @@ func (mg *PrivateDNSTXTRecord) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateDNSZone", "PrivateDNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -3986,7 +3986,7 @@ func (mg *PrivateDNSZoneVirtualNetworkLink) ResolveReferences(ctx context.Contex var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateDNSZone", "PrivateDNSZoneList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4024,7 +4024,7 @@ func (mg *PrivateDNSZoneVirtualNetworkLink) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4043,7 +4043,7 @@ func (mg *PrivateDNSZoneVirtualNetworkLink) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.VirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VirtualNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4205,7 +4205,7 @@ func (mg *PrivateEndpointApplicationSecurityGroupAssociation) ResolveReferences( mg.Spec.ForProvider.ApplicationSecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ApplicationSecurityGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateEndpoint", "PrivateEndpointList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateEndpoint", "PrivateEndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4243,7 +4243,7 @@ func (mg *PrivateEndpointApplicationSecurityGroupAssociation) ResolveReferences( mg.Spec.InitProvider.ApplicationSecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ApplicationSecurityGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PrivateEndpoint", "PrivateEndpointList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateEndpoint", "PrivateEndpointList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4276,7 +4276,7 @@ func (mg *PrivateLinkService) ResolveReferences(ctx context.Context, c client.Re for i3 := 0; i3 < len(mg.Spec.ForProvider.NATIPConfiguration); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4316,7 +4316,7 @@ func (mg *PrivateLinkService) ResolveReferences(ctx context.Context, c client.Re for i3 := 0; i3 < len(mg.Spec.InitProvider.NATIPConfiguration); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4638,7 +4638,7 @@ func (mg *RouteServer) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4676,7 +4676,7 @@ func (mg *RouteServer) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.InitProvider.PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.PublicIPAddressIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4919,7 +4919,7 @@ func (mg *SubnetNATGatewayAssociation) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.NATGatewayID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NATGatewayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -4957,7 +4957,7 @@ func (mg *SubnetNATGatewayAssociation) ResolveReferences(ctx context.Context, c mg.Spec.InitProvider.NATGatewayID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.NATGatewayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5007,7 +5007,7 @@ func (mg *SubnetNetworkSecurityGroupAssociation) ResolveReferences(ctx context.C mg.Spec.ForProvider.NetworkSecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NetworkSecurityGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5045,7 +5045,7 @@ func (mg *SubnetNetworkSecurityGroupAssociation) ResolveReferences(ctx context.C mg.Spec.InitProvider.NetworkSecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.NetworkSecurityGroupIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5095,7 +5095,7 @@ func (mg *SubnetRouteTableAssociation) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RouteTableIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5133,7 +5133,7 @@ func (mg *SubnetRouteTableAssociation) ResolveReferences(ctx context.Context, c mg.Spec.InitProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.RouteTableIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5195,7 +5195,7 @@ func (mg *TrafficManagerAzureEndpoint) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "TrafficManagerProfile", "TrafficManagerProfileList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "TrafficManagerProfile", "TrafficManagerProfileList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5264,7 +5264,7 @@ func (mg *TrafficManagerExternalEndpoint) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "TrafficManagerProfile", "TrafficManagerProfileList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "TrafficManagerProfile", "TrafficManagerProfileList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5295,7 +5295,7 @@ func (mg *TrafficManagerNestedEndpoint) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "TrafficManagerProfile", "TrafficManagerProfileList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "TrafficManagerProfile", "TrafficManagerProfileList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5314,7 +5314,7 @@ func (mg *TrafficManagerNestedEndpoint) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.ProfileID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ProfileIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "TrafficManagerProfile", "TrafficManagerProfileList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "TrafficManagerProfile", "TrafficManagerProfileList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5333,7 +5333,7 @@ func (mg *TrafficManagerNestedEndpoint) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "TrafficManagerProfile", "TrafficManagerProfileList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "TrafficManagerProfile", "TrafficManagerProfileList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5564,7 +5564,7 @@ func (mg *VPNServerConfigurationPolicyGroup) ResolveReferences(ctx context.Conte var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VPNServerConfiguration", "VPNServerConfigurationList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VPNServerConfiguration", "VPNServerConfigurationList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5864,7 +5864,7 @@ func (mg *VirtualHubIP) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PublicIPAddressIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5921,7 +5921,7 @@ func (mg *VirtualHubIP) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.InitProvider.PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.PublicIPAddressIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5954,7 +5954,7 @@ func (mg *VirtualHubRouteTable) ResolveReferences(ctx context.Context, c client. for i3 := 0; i3 < len(mg.Spec.ForProvider.Route); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHubConnection", "VirtualHubConnectionList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualHubConnection", "VirtualHubConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -5994,7 +5994,7 @@ func (mg *VirtualHubRouteTable) ResolveReferences(ctx context.Context, c client. for i3 := 0; i3 < len(mg.Spec.InitProvider.Route); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHubConnection", "VirtualHubConnectionList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualHubConnection", "VirtualHubConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6026,7 +6026,7 @@ func (mg *VirtualHubRouteTableRoute) ResolveReferences(ctx context.Context, c cl var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHubConnection", "VirtualHubConnectionList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualHubConnection", "VirtualHubConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6064,7 +6064,7 @@ func (mg *VirtualHubRouteTableRoute) ResolveReferences(ctx context.Context, c cl mg.Spec.ForProvider.RouteTableID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RouteTableIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHubConnection", "VirtualHubConnectionList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualHubConnection", "VirtualHubConnectionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6456,7 +6456,7 @@ func (mg *VirtualNetworkPeering) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6494,7 +6494,7 @@ func (mg *VirtualNetworkPeering) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -6513,7 +6513,7 @@ func (mg *VirtualNetworkPeering) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.VirtualNetworkName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VirtualNetworkNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/network/v1beta1/zz_loadbalancer_types.go b/apis/network/v1beta1/zz_loadbalancer_types.go index fce00a36e..11ff7ccae 100755 --- a/apis/network/v1beta1/zz_loadbalancer_types.go +++ b/apis/network/v1beta1/zz_loadbalancer_types.go @@ -47,7 +47,7 @@ type LoadBalancerFrontendIPConfigurationInitParameters struct { PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` // The ID of the Subnet which should be associated with the IP Configuration. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -151,7 +151,7 @@ type LoadBalancerFrontendIPConfigurationParameters struct { PublicIPPrefixID *string `json:"publicIpPrefixId,omitempty" tf:"public_ip_prefix_id,omitempty"` // The ID of the Subnet which should be associated with the IP Configuration. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/network/v1beta1/zz_loadbalancerbackendaddresspooladdress_types.go b/apis/network/v1beta1/zz_loadbalancerbackendaddresspooladdress_types.go index c5d44b0d8..2f0d7a1bc 100755 --- a/apis/network/v1beta1/zz_loadbalancerbackendaddresspooladdress_types.go +++ b/apis/network/v1beta1/zz_loadbalancerbackendaddresspooladdress_types.go @@ -42,7 +42,7 @@ type LoadBalancerBackendAddressPoolAddressInitParameters struct { // The ID of the Virtual Network within which the Backend Address Pool should exist. // For regional load balancer, user needs to specify `virtual_network_id` and `ip_address` - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` @@ -105,7 +105,7 @@ type LoadBalancerBackendAddressPoolAddressParameters struct { // The ID of the Virtual Network within which the Backend Address Pool should exist. // For regional load balancer, user needs to specify `virtual_network_id` and `ip_address` - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` diff --git a/apis/network/v1beta1/zz_managermanagementgroupconnection_types.go b/apis/network/v1beta1/zz_managermanagementgroupconnection_types.go index 12f7c73d4..1c11b3f87 100755 --- a/apis/network/v1beta1/zz_managermanagementgroupconnection_types.go +++ b/apis/network/v1beta1/zz_managermanagementgroupconnection_types.go @@ -19,7 +19,7 @@ type ManagerManagementGroupConnectionInitParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // Specifies the ID of the Network Manager which the Management Group is connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Manager + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Manager // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() NetworkManagerID *string `json:"networkManagerId,omitempty" tf:"network_manager_id,omitempty"` @@ -71,7 +71,7 @@ type ManagerManagementGroupConnectionParameters struct { ManagementGroupIDSelector *v1.Selector `json:"managementGroupIdSelector,omitempty" tf:"-"` // Specifies the ID of the Network Manager which the Management Group is connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Manager + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Manager // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NetworkManagerID *string `json:"networkManagerId,omitempty" tf:"network_manager_id,omitempty"` diff --git a/apis/network/v1beta1/zz_managernetworkgroup_types.go b/apis/network/v1beta1/zz_managernetworkgroup_types.go index fa659fc52..51f5a67e4 100755 --- a/apis/network/v1beta1/zz_managernetworkgroup_types.go +++ b/apis/network/v1beta1/zz_managernetworkgroup_types.go @@ -38,7 +38,7 @@ type ManagerNetworkGroupParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // Specifies the ID of the Network Manager. Changing this forces a new Network Manager Network Group to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Manager + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Manager // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NetworkManagerID *string `json:"networkManagerId,omitempty" tf:"network_manager_id,omitempty"` diff --git a/apis/network/v1beta1/zz_managerstaticmember_types.go b/apis/network/v1beta1/zz_managerstaticmember_types.go index 51bdf4a86..4f2cd0484 100755 --- a/apis/network/v1beta1/zz_managerstaticmember_types.go +++ b/apis/network/v1beta1/zz_managerstaticmember_types.go @@ -16,7 +16,7 @@ import ( type ManagerStaticMemberInitParameters struct { // Specifies the Resource ID of the Virtual Network using as the Static Member. Changing this forces a new Network Manager Static Member to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() TargetVirtualNetworkID *string `json:"targetVirtualNetworkId,omitempty" tf:"target_virtual_network_id,omitempty"` @@ -61,7 +61,7 @@ type ManagerStaticMemberParameters struct { NetworkGroupIDSelector *v1.Selector `json:"networkGroupIdSelector,omitempty" tf:"-"` // Specifies the Resource ID of the Virtual Network using as the Static Member. Changing this forces a new Network Manager Static Member to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional TargetVirtualNetworkID *string `json:"targetVirtualNetworkId,omitempty" tf:"target_virtual_network_id,omitempty"` diff --git a/apis/network/v1beta1/zz_managersubscriptionconnection_types.go b/apis/network/v1beta1/zz_managersubscriptionconnection_types.go index 41a9918e5..c6c13d18a 100755 --- a/apis/network/v1beta1/zz_managersubscriptionconnection_types.go +++ b/apis/network/v1beta1/zz_managersubscriptionconnection_types.go @@ -19,7 +19,7 @@ type ManagerSubscriptionConnectionInitParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // Specifies the ID of the Network Manager which the Subscription is connected to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Manager + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Manager // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() NetworkManagerID *string `json:"networkManagerId,omitempty" tf:"network_manager_id,omitempty"` @@ -60,7 +60,7 @@ type ManagerSubscriptionConnectionParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // Specifies the ID of the Network Manager which the Subscription is connected to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Manager + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Manager // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NetworkManagerID *string `json:"networkManagerId,omitempty" tf:"network_manager_id,omitempty"` diff --git a/apis/network/v1beta1/zz_networkinterface_types.go b/apis/network/v1beta1/zz_networkinterface_types.go index 5e86b1070..b70a469d6 100755 --- a/apis/network/v1beta1/zz_networkinterface_types.go +++ b/apis/network/v1beta1/zz_networkinterface_types.go @@ -47,7 +47,7 @@ type NetworkInterfaceIPConfigurationInitParameters struct { PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` // The ID of the Subnet where this Network Interface should be located in. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -128,7 +128,7 @@ type NetworkInterfaceIPConfigurationParameters struct { PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` // The ID of the Subnet where this Network Interface should be located in. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednsaaaarecord_types.go b/apis/network/v1beta1/zz_privatednsaaaarecord_types.go index 6feece3f9..ae51323e9 100755 --- a/apis/network/v1beta1/zz_privatednsaaaarecord_types.go +++ b/apis/network/v1beta1/zz_privatednsaaaarecord_types.go @@ -83,7 +83,7 @@ type PrivateDNSAAAARecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the Private DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateDNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednsarecord_types.go b/apis/network/v1beta1/zz_privatednsarecord_types.go index 590a832ba..860b32017 100755 --- a/apis/network/v1beta1/zz_privatednsarecord_types.go +++ b/apis/network/v1beta1/zz_privatednsarecord_types.go @@ -83,7 +83,7 @@ type PrivateDNSARecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the Private DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateDNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednscnamerecord_types.go b/apis/network/v1beta1/zz_privatednscnamerecord_types.go index 86e1ddec7..d63af55c6 100755 --- a/apis/network/v1beta1/zz_privatednscnamerecord_types.go +++ b/apis/network/v1beta1/zz_privatednscnamerecord_types.go @@ -80,7 +80,7 @@ type PrivateDNSCNAMERecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the Private DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateDNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednsmxrecord_types.go b/apis/network/v1beta1/zz_privatednsmxrecord_types.go index ed2613d7a..eed0780fd 100755 --- a/apis/network/v1beta1/zz_privatednsmxrecord_types.go +++ b/apis/network/v1beta1/zz_privatednsmxrecord_types.go @@ -80,7 +80,7 @@ type PrivateDNSMXRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the Private DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateDNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednsptrrecord_types.go b/apis/network/v1beta1/zz_privatednsptrrecord_types.go index 056825d07..d66106f61 100755 --- a/apis/network/v1beta1/zz_privatednsptrrecord_types.go +++ b/apis/network/v1beta1/zz_privatednsptrrecord_types.go @@ -83,7 +83,7 @@ type PrivateDNSPTRRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the Private DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateDNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednsresolver_types.go b/apis/network/v1beta1/zz_privatednsresolver_types.go index c5ad4fc28..ac7f95d10 100755 --- a/apis/network/v1beta1/zz_privatednsresolver_types.go +++ b/apis/network/v1beta1/zz_privatednsresolver_types.go @@ -23,7 +23,7 @@ type PrivateDNSResolverInitParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // The ID of the Virtual Network that is linked to the Private DNS Resolver. Changing this forces a new Private DNS Resolver to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` @@ -80,7 +80,7 @@ type PrivateDNSResolverParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // The ID of the Virtual Network that is linked to the Private DNS Resolver. Changing this forces a new Private DNS Resolver to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednssrvrecord_types.go b/apis/network/v1beta1/zz_privatednssrvrecord_types.go index 62f820238..7d3621759 100755 --- a/apis/network/v1beta1/zz_privatednssrvrecord_types.go +++ b/apis/network/v1beta1/zz_privatednssrvrecord_types.go @@ -80,7 +80,7 @@ type PrivateDNSSRVRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the Private DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateDNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednstxtrecord_types.go b/apis/network/v1beta1/zz_privatednstxtrecord_types.go index 8fab409c3..d74cf9a61 100755 --- a/apis/network/v1beta1/zz_privatednstxtrecord_types.go +++ b/apis/network/v1beta1/zz_privatednstxtrecord_types.go @@ -80,7 +80,7 @@ type PrivateDNSTXTRecordParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // Specifies the Private DNS Zone where the resource exists. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateDNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` diff --git a/apis/network/v1beta1/zz_privatednszonevirtualnetworklink_types.go b/apis/network/v1beta1/zz_privatednszonevirtualnetworklink_types.go index be4781fb5..faa637333 100755 --- a/apis/network/v1beta1/zz_privatednszonevirtualnetworklink_types.go +++ b/apis/network/v1beta1/zz_privatednszonevirtualnetworklink_types.go @@ -23,7 +23,7 @@ type PrivateDNSZoneVirtualNetworkLinkInitParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // The ID of the Virtual Network that should be linked to the DNS Zone. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` @@ -61,7 +61,7 @@ type PrivateDNSZoneVirtualNetworkLinkObservation struct { type PrivateDNSZoneVirtualNetworkLinkParameters struct { // The name of the Private DNS zone (without a terminating dot). Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateDNSZone + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone // +kubebuilder:validation:Optional PrivateDNSZoneName *string `json:"privateDnsZoneName,omitempty" tf:"private_dns_zone_name,omitempty"` @@ -96,7 +96,7 @@ type PrivateDNSZoneVirtualNetworkLinkParameters struct { Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` // The ID of the Virtual Network that should be linked to the DNS Zone. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional VirtualNetworkID *string `json:"virtualNetworkId,omitempty" tf:"virtual_network_id,omitempty"` diff --git a/apis/network/v1beta1/zz_privateendpointapplicationsecuritygroupassociation_types.go b/apis/network/v1beta1/zz_privateendpointapplicationsecuritygroupassociation_types.go index 75a4f65a6..3e8a90a63 100755 --- a/apis/network/v1beta1/zz_privateendpointapplicationsecuritygroupassociation_types.go +++ b/apis/network/v1beta1/zz_privateendpointapplicationsecuritygroupassociation_types.go @@ -29,7 +29,7 @@ type PrivateEndpointApplicationSecurityGroupAssociationInitParameters struct { ApplicationSecurityGroupIDSelector *v1.Selector `json:"applicationSecurityGroupIdSelector,omitempty" tf:"-"` // The id of private endpoint to associate. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateEndpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() PrivateEndpointID *string `json:"privateEndpointId,omitempty" tf:"private_endpoint_id,omitempty"` @@ -70,7 +70,7 @@ type PrivateEndpointApplicationSecurityGroupAssociationParameters struct { ApplicationSecurityGroupIDSelector *v1.Selector `json:"applicationSecurityGroupIdSelector,omitempty" tf:"-"` // The id of private endpoint to associate. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PrivateEndpoint + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateEndpoint // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PrivateEndpointID *string `json:"privateEndpointId,omitempty" tf:"private_endpoint_id,omitempty"` diff --git a/apis/network/v1beta1/zz_privatelinkservice_types.go b/apis/network/v1beta1/zz_privatelinkservice_types.go index 807fd245c..ff2671ee9 100755 --- a/apis/network/v1beta1/zz_privatelinkservice_types.go +++ b/apis/network/v1beta1/zz_privatelinkservice_types.go @@ -28,7 +28,7 @@ type NATIPConfigurationInitParameters struct { PrivateIPAddressVersion *string `json:"privateIpAddressVersion,omitempty" tf:"private_ip_address_version,omitempty"` // Specifies the ID of the Subnet which should be used for the Private Link Service. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -78,7 +78,7 @@ type NATIPConfigurationParameters struct { PrivateIPAddressVersion *string `json:"privateIpAddressVersion,omitempty" tf:"private_ip_address_version,omitempty"` // Specifies the ID of the Subnet which should be used for the Private Link Service. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/network/v1beta1/zz_routeserver_types.go b/apis/network/v1beta1/zz_routeserver_types.go index 2779c1075..f80432256 100755 --- a/apis/network/v1beta1/zz_routeserver_types.go +++ b/apis/network/v1beta1/zz_routeserver_types.go @@ -38,7 +38,7 @@ type RouteServerInitParameters struct { Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` // The ID of the Subnet that the Route Server will reside. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -132,7 +132,7 @@ type RouteServerParameters struct { Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` // The ID of the Subnet that the Route Server will reside. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/network/v1beta1/zz_subnetnatgatewayassociation_types.go b/apis/network/v1beta1/zz_subnetnatgatewayassociation_types.go index fb06dd96f..515757bbe 100755 --- a/apis/network/v1beta1/zz_subnetnatgatewayassociation_types.go +++ b/apis/network/v1beta1/zz_subnetnatgatewayassociation_types.go @@ -29,7 +29,7 @@ type SubnetNATGatewayAssociationInitParameters struct { NATGatewayIDSelector *v1.Selector `json:"natGatewayIdSelector,omitempty" tf:"-"` // The ID of the Subnet. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -71,7 +71,7 @@ type SubnetNATGatewayAssociationParameters struct { NATGatewayIDSelector *v1.Selector `json:"natGatewayIdSelector,omitempty" tf:"-"` // The ID of the Subnet. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/network/v1beta1/zz_subnetnetworksecuritygroupassociation_types.go b/apis/network/v1beta1/zz_subnetnetworksecuritygroupassociation_types.go index 4ecb15660..aad9a8d8b 100755 --- a/apis/network/v1beta1/zz_subnetnetworksecuritygroupassociation_types.go +++ b/apis/network/v1beta1/zz_subnetnetworksecuritygroupassociation_types.go @@ -29,7 +29,7 @@ type SubnetNetworkSecurityGroupAssociationInitParameters struct { NetworkSecurityGroupIDSelector *v1.Selector `json:"networkSecurityGroupIdSelector,omitempty" tf:"-"` // The ID of the Subnet. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -71,7 +71,7 @@ type SubnetNetworkSecurityGroupAssociationParameters struct { NetworkSecurityGroupIDSelector *v1.Selector `json:"networkSecurityGroupIdSelector,omitempty" tf:"-"` // The ID of the Subnet. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/network/v1beta1/zz_subnetroutetableassociation_types.go b/apis/network/v1beta1/zz_subnetroutetableassociation_types.go index d8f58e24f..31f64aa17 100755 --- a/apis/network/v1beta1/zz_subnetroutetableassociation_types.go +++ b/apis/network/v1beta1/zz_subnetroutetableassociation_types.go @@ -29,7 +29,7 @@ type SubnetRouteTableAssociationInitParameters struct { RouteTableIDSelector *v1.Selector `json:"routeTableIdSelector,omitempty" tf:"-"` // The ID of the Subnet. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -71,7 +71,7 @@ type SubnetRouteTableAssociationParameters struct { RouteTableIDSelector *v1.Selector `json:"routeTableIdSelector,omitempty" tf:"-"` // The ID of the Subnet. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/network/v1beta1/zz_trafficmanagerazureendpoint_types.go b/apis/network/v1beta1/zz_trafficmanagerazureendpoint_types.go index 5cc181c6a..c53c844b0 100755 --- a/apis/network/v1beta1/zz_trafficmanagerazureendpoint_types.go +++ b/apis/network/v1beta1/zz_trafficmanagerazureendpoint_types.go @@ -135,7 +135,7 @@ type TrafficManagerAzureEndpointParameters struct { Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` // The ID of the Traffic Manager Profile that this Azure Endpoint should be created within. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.TrafficManagerProfile + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.TrafficManagerProfile // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` diff --git a/apis/network/v1beta1/zz_trafficmanagerexternalendpoint_types.go b/apis/network/v1beta1/zz_trafficmanagerexternalendpoint_types.go index 0dde2af4a..43d8dde1f 100755 --- a/apis/network/v1beta1/zz_trafficmanagerexternalendpoint_types.go +++ b/apis/network/v1beta1/zz_trafficmanagerexternalendpoint_types.go @@ -135,7 +135,7 @@ type TrafficManagerExternalEndpointParameters struct { Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` // The ID of the Traffic Manager Profile that this External Endpoint should be created within. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.TrafficManagerProfile + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.TrafficManagerProfile // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` diff --git a/apis/network/v1beta1/zz_trafficmanagernestedendpoint_types.go b/apis/network/v1beta1/zz_trafficmanagernestedendpoint_types.go index ab40288ed..c37bcc910 100755 --- a/apis/network/v1beta1/zz_trafficmanagernestedendpoint_types.go +++ b/apis/network/v1beta1/zz_trafficmanagernestedendpoint_types.go @@ -72,7 +72,7 @@ type TrafficManagerNestedEndpointInitParameters struct { Subnet []TrafficManagerNestedEndpointSubnetInitParameters `json:"subnet,omitempty" tf:"subnet,omitempty"` // The resource id of an Azure resource to target. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.TrafficManagerProfile + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.TrafficManagerProfile // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` @@ -165,7 +165,7 @@ type TrafficManagerNestedEndpointParameters struct { Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` // The ID of the Traffic Manager Profile that this External Endpoint should be created within. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.TrafficManagerProfile + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.TrafficManagerProfile // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ProfileID *string `json:"profileId,omitempty" tf:"profile_id,omitempty"` @@ -183,7 +183,7 @@ type TrafficManagerNestedEndpointParameters struct { Subnet []TrafficManagerNestedEndpointSubnetParameters `json:"subnet,omitempty" tf:"subnet,omitempty"` // The resource id of an Azure resource to target. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.TrafficManagerProfile + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.TrafficManagerProfile // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` diff --git a/apis/network/v1beta1/zz_virtualhubip_types.go b/apis/network/v1beta1/zz_virtualhubip_types.go index c220478d5..419740452 100755 --- a/apis/network/v1beta1/zz_virtualhubip_types.go +++ b/apis/network/v1beta1/zz_virtualhubip_types.go @@ -35,7 +35,7 @@ type VirtualHubIPInitParameters struct { PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` // The ID of the Subnet that the IP will reside. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -94,7 +94,7 @@ type VirtualHubIPParameters struct { PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` // The ID of the Subnet that the IP will reside. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/network/v1beta1/zz_virtualhubroutetable_types.go b/apis/network/v1beta1/zz_virtualhubroutetable_types.go index 7630be03f..c00967dd4 100755 --- a/apis/network/v1beta1/zz_virtualhubroutetable_types.go +++ b/apis/network/v1beta1/zz_virtualhubroutetable_types.go @@ -78,7 +78,7 @@ type VirtualHubRouteTableRouteInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The next hop's resource ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHubConnection + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualHubConnection // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() NextHop *string `json:"nextHop,omitempty" tf:"next_hop,omitempty"` @@ -129,7 +129,7 @@ type VirtualHubRouteTableRouteParameters struct { Name *string `json:"name" tf:"name,omitempty"` // The next hop's resource ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHubConnection + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualHubConnection // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NextHop *string `json:"nextHop,omitempty" tf:"next_hop,omitempty"` diff --git a/apis/network/v1beta1/zz_virtualhubroutetableroute_types.go b/apis/network/v1beta1/zz_virtualhubroutetableroute_types.go index a9cbe15f4..5d2670456 100755 --- a/apis/network/v1beta1/zz_virtualhubroutetableroute_types.go +++ b/apis/network/v1beta1/zz_virtualhubroutetableroute_types.go @@ -23,7 +23,7 @@ type VirtualHubRouteTableRouteInitParameters_2 struct { DestinationsType *string `json:"destinationsType,omitempty" tf:"destinations_type,omitempty"` // The next hop's resource ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHubConnection + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualHubConnection // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() NextHop *string `json:"nextHop,omitempty" tf:"next_hop,omitempty"` @@ -73,7 +73,7 @@ type VirtualHubRouteTableRouteParameters_2 struct { DestinationsType *string `json:"destinationsType,omitempty" tf:"destinations_type,omitempty"` // The next hop's resource ID. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHubConnection + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualHubConnection // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NextHop *string `json:"nextHop,omitempty" tf:"next_hop,omitempty"` diff --git a/apis/network/v1beta1/zz_virtualnetworkpeering_types.go b/apis/network/v1beta1/zz_virtualnetworkpeering_types.go index ab33fbe00..87ce1f48c 100755 --- a/apis/network/v1beta1/zz_virtualnetworkpeering_types.go +++ b/apis/network/v1beta1/zz_virtualnetworkpeering_types.go @@ -25,7 +25,7 @@ type VirtualNetworkPeeringInitParameters struct { AllowVirtualNetworkAccess *bool `json:"allowVirtualNetworkAccess,omitempty" tf:"allow_virtual_network_access,omitempty"` // The full Azure resource ID of the remote virtual network. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() RemoteVirtualNetworkID *string `json:"remoteVirtualNetworkId,omitempty" tf:"remote_virtual_network_id,omitempty"` @@ -91,7 +91,7 @@ type VirtualNetworkPeeringParameters struct { AllowVirtualNetworkAccess *bool `json:"allowVirtualNetworkAccess,omitempty" tf:"allow_virtual_network_access,omitempty"` // The full Azure resource ID of the remote virtual network. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional RemoteVirtualNetworkID *string `json:"remoteVirtualNetworkId,omitempty" tf:"remote_virtual_network_id,omitempty"` @@ -127,7 +127,7 @@ type VirtualNetworkPeeringParameters struct { UseRemoteGateways *bool `json:"useRemoteGateways,omitempty" tf:"use_remote_gateways,omitempty"` // The name of the virtual network. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +kubebuilder:validation:Optional VirtualNetworkName *string `json:"virtualNetworkName,omitempty" tf:"virtual_network_name,omitempty"` diff --git a/apis/network/v1beta1/zz_vpnserverconfigurationpolicygroup_types.go b/apis/network/v1beta1/zz_vpnserverconfigurationpolicygroup_types.go index 3a3060529..de1f41345 100755 --- a/apis/network/v1beta1/zz_vpnserverconfigurationpolicygroup_types.go +++ b/apis/network/v1beta1/zz_vpnserverconfigurationpolicygroup_types.go @@ -97,7 +97,7 @@ type VPNServerConfigurationPolicyGroupParameters struct { Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` // The ID of the VPN Server Configuration that the VPN Server Configuration Policy Group belongs to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VPNServerConfiguration + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VPNServerConfiguration // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VPNServerConfigurationID *string `json:"vpnServerConfigurationId,omitempty" tf:"vpn_server_configuration_id,omitempty"` diff --git a/apis/network/v1beta2/zz_applicationgateway_terraformed.go b/apis/network/v1beta2/zz_applicationgateway_terraformed.go new file mode 100755 index 000000000..3f844206a --- /dev/null +++ b/apis/network/v1beta2/zz_applicationgateway_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ApplicationGateway +func (mg *ApplicationGateway) GetTerraformResourceType() string { + return "azurerm_application_gateway" +} + +// GetConnectionDetailsMapping for this ApplicationGateway +func (tr *ApplicationGateway) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"authentication_certificate[*].data": "spec.forProvider.authenticationCertificate[*].dataSecretRef", "ssl_certificate[*].data": "spec.forProvider.sslCertificate[*].dataSecretRef", "ssl_certificate[*].password": "spec.forProvider.sslCertificate[*].passwordSecretRef", "trusted_client_certificate[*].data": "spec.forProvider.trustedClientCertificate[*].dataSecretRef", "trusted_root_certificate[*].data": "spec.forProvider.trustedRootCertificate[*].dataSecretRef"} +} + +// GetObservation of this ApplicationGateway +func (tr *ApplicationGateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ApplicationGateway +func (tr *ApplicationGateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ApplicationGateway +func (tr *ApplicationGateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ApplicationGateway +func (tr *ApplicationGateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ApplicationGateway +func (tr *ApplicationGateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ApplicationGateway +func (tr *ApplicationGateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ApplicationGateway +func (tr *ApplicationGateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ApplicationGateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ApplicationGateway) LateInitialize(attrs []byte) (bool, error) { + params := &ApplicationGatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ApplicationGateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_applicationgateway_types.go b/apis/network/v1beta2/zz_applicationgateway_types.go new file mode 100755 index 000000000..90f335102 --- /dev/null +++ b/apis/network/v1beta2/zz_applicationgateway_types.go @@ -0,0 +1,2588 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationGatewayInitParameters struct { + + // One or more authentication_certificate blocks as defined below. + AuthenticationCertificate []AuthenticationCertificateInitParameters `json:"authenticationCertificate,omitempty" tf:"authentication_certificate,omitempty"` + + // An autoscale_configuration block as defined below. + AutoscaleConfiguration *AutoscaleConfigurationInitParameters `json:"autoscaleConfiguration,omitempty" tf:"autoscale_configuration,omitempty"` + + // One or more backend_address_pool blocks as defined below. + BackendAddressPool []BackendAddressPoolInitParameters `json:"backendAddressPool,omitempty" tf:"backend_address_pool,omitempty"` + + // One or more backend_http_settings blocks as defined below. + BackendHTTPSettings []BackendHTTPSettingsInitParameters `json:"backendHttpSettings,omitempty" tf:"backend_http_settings,omitempty"` + + // One or more custom_error_configuration blocks as defined below. + CustomErrorConfiguration []CustomErrorConfigurationInitParameters `json:"customErrorConfiguration,omitempty" tf:"custom_error_configuration,omitempty"` + + // Is HTTP2 enabled on the application gateway resource? Defaults to false. + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` + + // Is FIPS enabled on the Application Gateway? + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // The ID of the Web Application Firewall Policy. + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // Is the Firewall Policy associated with the Application Gateway? + ForceFirewallPolicyAssociation *bool `json:"forceFirewallPolicyAssociation,omitempty" tf:"force_firewall_policy_association,omitempty"` + + // One or more frontend_ip_configuration blocks as defined below. + FrontendIPConfiguration []FrontendIPConfigurationInitParameters `json:"frontendIpConfiguration,omitempty" tf:"frontend_ip_configuration,omitempty"` + + // One or more frontend_port blocks as defined below. + FrontendPort []FrontendPortInitParameters `json:"frontendPort,omitempty" tf:"frontend_port,omitempty"` + + // One or more gateway_ip_configuration blocks as defined below. + GatewayIPConfiguration []GatewayIPConfigurationInitParameters `json:"gatewayIpConfiguration,omitempty" tf:"gateway_ip_configuration,omitempty"` + + // A global block as defined below. + Global *GlobalInitParameters `json:"global,omitempty" tf:"global,omitempty"` + + // One or more http_listener blocks as defined below. + HTTPListener []HTTPListenerInitParameters `json:"httpListener,omitempty" tf:"http_listener,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure region where the Application Gateway should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more private_link_configuration blocks as defined below. + PrivateLinkConfiguration []PrivateLinkConfigurationInitParameters `json:"privateLinkConfiguration,omitempty" tf:"private_link_configuration,omitempty"` + + // One or more probe blocks as defined below. + Probe []ProbeInitParameters `json:"probe,omitempty" tf:"probe,omitempty"` + + // One or more redirect_configuration blocks as defined below. + RedirectConfiguration []RedirectConfigurationInitParameters `json:"redirectConfiguration,omitempty" tf:"redirect_configuration,omitempty"` + + // One or more request_routing_rule blocks as defined below. + RequestRoutingRule []RequestRoutingRuleInitParameters `json:"requestRoutingRule,omitempty" tf:"request_routing_rule,omitempty"` + + // One or more rewrite_rule_set blocks as defined below. Only valid for v2 SKUs. + RewriteRuleSet []RewriteRuleSetInitParameters `json:"rewriteRuleSet,omitempty" tf:"rewrite_rule_set,omitempty"` + + // One or more ssl_certificate blocks as defined below. + SSLCertificate []SSLCertificateInitParameters `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + + // a ssl_policy block as defined below. + SSLPolicy *SSLPolicyInitParameters `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // One or more ssl_profile blocks as defined below. + SSLProfile []SSLProfileInitParameters `json:"sslProfile,omitempty" tf:"ssl_profile,omitempty"` + + // A sku block as defined below. + Sku *SkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more trusted_client_certificate blocks as defined below. + TrustedClientCertificate []TrustedClientCertificateInitParameters `json:"trustedClientCertificate,omitempty" tf:"trusted_client_certificate,omitempty"` + + // One or more trusted_root_certificate blocks as defined below. + TrustedRootCertificate []TrustedRootCertificateInitParameters `json:"trustedRootCertificate,omitempty" tf:"trusted_root_certificate,omitempty"` + + // One or more url_path_map blocks as defined below. + URLPathMap []URLPathMapInitParameters `json:"urlPathMap,omitempty" tf:"url_path_map,omitempty"` + + // A waf_configuration block as defined below. + WafConfiguration *WafConfigurationInitParameters `json:"wafConfiguration,omitempty" tf:"waf_configuration,omitempty"` + + // Specifies a list of Availability Zones in which this Application Gateway should be located. Changing this forces a new Application Gateway to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type ApplicationGatewayObservation struct { + + // One or more authentication_certificate blocks as defined below. + AuthenticationCertificate []AuthenticationCertificateObservation `json:"authenticationCertificate,omitempty" tf:"authentication_certificate,omitempty"` + + // An autoscale_configuration block as defined below. + AutoscaleConfiguration *AutoscaleConfigurationObservation `json:"autoscaleConfiguration,omitempty" tf:"autoscale_configuration,omitempty"` + + // One or more backend_address_pool blocks as defined below. + BackendAddressPool []BackendAddressPoolObservation `json:"backendAddressPool,omitempty" tf:"backend_address_pool,omitempty"` + + // One or more backend_http_settings blocks as defined below. + BackendHTTPSettings []BackendHTTPSettingsObservation `json:"backendHttpSettings,omitempty" tf:"backend_http_settings,omitempty"` + + // One or more custom_error_configuration blocks as defined below. + CustomErrorConfiguration []CustomErrorConfigurationObservation `json:"customErrorConfiguration,omitempty" tf:"custom_error_configuration,omitempty"` + + // Is HTTP2 enabled on the application gateway resource? Defaults to false. + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` + + // Is FIPS enabled on the Application Gateway? + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // The ID of the Web Application Firewall Policy. + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // Is the Firewall Policy associated with the Application Gateway? + ForceFirewallPolicyAssociation *bool `json:"forceFirewallPolicyAssociation,omitempty" tf:"force_firewall_policy_association,omitempty"` + + // One or more frontend_ip_configuration blocks as defined below. + FrontendIPConfiguration []FrontendIPConfigurationObservation `json:"frontendIpConfiguration,omitempty" tf:"frontend_ip_configuration,omitempty"` + + // One or more frontend_port blocks as defined below. + FrontendPort []FrontendPortObservation `json:"frontendPort,omitempty" tf:"frontend_port,omitempty"` + + // One or more gateway_ip_configuration blocks as defined below. + GatewayIPConfiguration []GatewayIPConfigurationObservation `json:"gatewayIpConfiguration,omitempty" tf:"gateway_ip_configuration,omitempty"` + + // A global block as defined below. + Global *GlobalObservation `json:"global,omitempty" tf:"global,omitempty"` + + // One or more http_listener blocks as defined below. + HTTPListener []HTTPListenerObservation `json:"httpListener,omitempty" tf:"http_listener,omitempty"` + + // The ID of the Application Gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure region where the Application Gateway should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of private_endpoint_connection blocks as defined below. + PrivateEndpointConnection []PrivateEndpointConnectionObservation `json:"privateEndpointConnection,omitempty" tf:"private_endpoint_connection,omitempty"` + + // One or more private_link_configuration blocks as defined below. + PrivateLinkConfiguration []PrivateLinkConfigurationObservation `json:"privateLinkConfiguration,omitempty" tf:"private_link_configuration,omitempty"` + + // One or more probe blocks as defined below. + Probe []ProbeObservation `json:"probe,omitempty" tf:"probe,omitempty"` + + // One or more redirect_configuration blocks as defined below. + RedirectConfiguration []RedirectConfigurationObservation `json:"redirectConfiguration,omitempty" tf:"redirect_configuration,omitempty"` + + // One or more request_routing_rule blocks as defined below. + RequestRoutingRule []RequestRoutingRuleObservation `json:"requestRoutingRule,omitempty" tf:"request_routing_rule,omitempty"` + + // The name of the resource group in which to the Application Gateway should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // One or more rewrite_rule_set blocks as defined below. Only valid for v2 SKUs. + RewriteRuleSet []RewriteRuleSetObservation `json:"rewriteRuleSet,omitempty" tf:"rewrite_rule_set,omitempty"` + + // One or more ssl_certificate blocks as defined below. + SSLCertificate []SSLCertificateObservation `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + + // a ssl_policy block as defined below. + SSLPolicy *SSLPolicyObservation `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // One or more ssl_profile blocks as defined below. + SSLProfile []SSLProfileObservation `json:"sslProfile,omitempty" tf:"ssl_profile,omitempty"` + + // A sku block as defined below. + Sku *SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more trusted_client_certificate blocks as defined below. + TrustedClientCertificate []TrustedClientCertificateObservation `json:"trustedClientCertificate,omitempty" tf:"trusted_client_certificate,omitempty"` + + // One or more trusted_root_certificate blocks as defined below. + TrustedRootCertificate []TrustedRootCertificateObservation `json:"trustedRootCertificate,omitempty" tf:"trusted_root_certificate,omitempty"` + + // One or more url_path_map blocks as defined below. + URLPathMap []URLPathMapObservation `json:"urlPathMap,omitempty" tf:"url_path_map,omitempty"` + + // A waf_configuration block as defined below. + WafConfiguration *WafConfigurationObservation `json:"wafConfiguration,omitempty" tf:"waf_configuration,omitempty"` + + // Specifies a list of Availability Zones in which this Application Gateway should be located. Changing this forces a new Application Gateway to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type ApplicationGatewayParameters struct { + + // One or more authentication_certificate blocks as defined below. + // +kubebuilder:validation:Optional + AuthenticationCertificate []AuthenticationCertificateParameters `json:"authenticationCertificate,omitempty" tf:"authentication_certificate,omitempty"` + + // An autoscale_configuration block as defined below. + // +kubebuilder:validation:Optional + AutoscaleConfiguration *AutoscaleConfigurationParameters `json:"autoscaleConfiguration,omitempty" tf:"autoscale_configuration,omitempty"` + + // One or more backend_address_pool blocks as defined below. + // +kubebuilder:validation:Optional + BackendAddressPool []BackendAddressPoolParameters `json:"backendAddressPool,omitempty" tf:"backend_address_pool,omitempty"` + + // One or more backend_http_settings blocks as defined below. + // +kubebuilder:validation:Optional + BackendHTTPSettings []BackendHTTPSettingsParameters `json:"backendHttpSettings,omitempty" tf:"backend_http_settings,omitempty"` + + // One or more custom_error_configuration blocks as defined below. + // +kubebuilder:validation:Optional + CustomErrorConfiguration []CustomErrorConfigurationParameters `json:"customErrorConfiguration,omitempty" tf:"custom_error_configuration,omitempty"` + + // Is HTTP2 enabled on the application gateway resource? Defaults to false. + // +kubebuilder:validation:Optional + EnableHttp2 *bool `json:"enableHttp2,omitempty" tf:"enable_http2,omitempty"` + + // Is FIPS enabled on the Application Gateway? + // +kubebuilder:validation:Optional + FipsEnabled *bool `json:"fipsEnabled,omitempty" tf:"fips_enabled,omitempty"` + + // The ID of the Web Application Firewall Policy. + // +kubebuilder:validation:Optional + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // Is the Firewall Policy associated with the Application Gateway? + // +kubebuilder:validation:Optional + ForceFirewallPolicyAssociation *bool `json:"forceFirewallPolicyAssociation,omitempty" tf:"force_firewall_policy_association,omitempty"` + + // One or more frontend_ip_configuration blocks as defined below. + // +kubebuilder:validation:Optional + FrontendIPConfiguration []FrontendIPConfigurationParameters `json:"frontendIpConfiguration,omitempty" tf:"frontend_ip_configuration,omitempty"` + + // One or more frontend_port blocks as defined below. + // +kubebuilder:validation:Optional + FrontendPort []FrontendPortParameters `json:"frontendPort,omitempty" tf:"frontend_port,omitempty"` + + // One or more gateway_ip_configuration blocks as defined below. + // +kubebuilder:validation:Optional + GatewayIPConfiguration []GatewayIPConfigurationParameters `json:"gatewayIpConfiguration,omitempty" tf:"gateway_ip_configuration,omitempty"` + + // A global block as defined below. + // +kubebuilder:validation:Optional + Global *GlobalParameters `json:"global,omitempty" tf:"global,omitempty"` + + // One or more http_listener blocks as defined below. + // +kubebuilder:validation:Optional + HTTPListener []HTTPListenerParameters `json:"httpListener,omitempty" tf:"http_listener,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure region where the Application Gateway should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more private_link_configuration blocks as defined below. + // +kubebuilder:validation:Optional + PrivateLinkConfiguration []PrivateLinkConfigurationParameters `json:"privateLinkConfiguration,omitempty" tf:"private_link_configuration,omitempty"` + + // One or more probe blocks as defined below. + // +kubebuilder:validation:Optional + Probe []ProbeParameters `json:"probe,omitempty" tf:"probe,omitempty"` + + // One or more redirect_configuration blocks as defined below. + // +kubebuilder:validation:Optional + RedirectConfiguration []RedirectConfigurationParameters `json:"redirectConfiguration,omitempty" tf:"redirect_configuration,omitempty"` + + // One or more request_routing_rule blocks as defined below. + // +kubebuilder:validation:Optional + RequestRoutingRule []RequestRoutingRuleParameters `json:"requestRoutingRule,omitempty" tf:"request_routing_rule,omitempty"` + + // The name of the resource group in which to the Application Gateway should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // One or more rewrite_rule_set blocks as defined below. Only valid for v2 SKUs. + // +kubebuilder:validation:Optional + RewriteRuleSet []RewriteRuleSetParameters `json:"rewriteRuleSet,omitempty" tf:"rewrite_rule_set,omitempty"` + + // One or more ssl_certificate blocks as defined below. + // +kubebuilder:validation:Optional + SSLCertificate []SSLCertificateParameters `json:"sslCertificate,omitempty" tf:"ssl_certificate,omitempty"` + + // a ssl_policy block as defined below. + // +kubebuilder:validation:Optional + SSLPolicy *SSLPolicyParameters `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // One or more ssl_profile blocks as defined below. + // +kubebuilder:validation:Optional + SSLProfile []SSLProfileParameters `json:"sslProfile,omitempty" tf:"ssl_profile,omitempty"` + + // A sku block as defined below. + // +kubebuilder:validation:Optional + Sku *SkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more trusted_client_certificate blocks as defined below. + // +kubebuilder:validation:Optional + TrustedClientCertificate []TrustedClientCertificateParameters `json:"trustedClientCertificate,omitempty" tf:"trusted_client_certificate,omitempty"` + + // One or more trusted_root_certificate blocks as defined below. + // +kubebuilder:validation:Optional + TrustedRootCertificate []TrustedRootCertificateParameters `json:"trustedRootCertificate,omitempty" tf:"trusted_root_certificate,omitempty"` + + // One or more url_path_map blocks as defined below. + // +kubebuilder:validation:Optional + URLPathMap []URLPathMapParameters `json:"urlPathMap,omitempty" tf:"url_path_map,omitempty"` + + // A waf_configuration block as defined below. + // +kubebuilder:validation:Optional + WafConfiguration *WafConfigurationParameters `json:"wafConfiguration,omitempty" tf:"waf_configuration,omitempty"` + + // Specifies a list of Availability Zones in which this Application Gateway should be located. Changing this forces a new Application Gateway to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type AuthenticationCertificateInitParameters struct { + + // The Name of the Authentication Certificate to use. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AuthenticationCertificateObservation struct { + + // The ID of the Authentication Certificate. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Name of the Authentication Certificate to use. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AuthenticationCertificateParameters struct { + + // The contents of the Authentication Certificate which should be used. + // +kubebuilder:validation:Required + DataSecretRef v1.SecretKeySelector `json:"dataSecretRef" tf:"-"` + + // The Name of the Authentication Certificate to use. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type AutoscaleConfigurationInitParameters struct { + + // Maximum capacity for autoscaling. Accepted values are in the range 2 to 125. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity for autoscaling. Accepted values are in the range 0 to 100. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type AutoscaleConfigurationObservation struct { + + // Maximum capacity for autoscaling. Accepted values are in the range 2 to 125. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity for autoscaling. Accepted values are in the range 0 to 100. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type AutoscaleConfigurationParameters struct { + + // Maximum capacity for autoscaling. Accepted values are in the range 2 to 125. + // +kubebuilder:validation:Optional + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // Minimum capacity for autoscaling. Accepted values are in the range 0 to 100. + // +kubebuilder:validation:Optional + MinCapacity *float64 `json:"minCapacity" tf:"min_capacity,omitempty"` +} + +type BackendAddressPoolInitParameters struct { + + // A list of FQDN's which should be part of the Backend Address Pool. + // +listType=set + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // A list of IP Addresses which should be part of the Backend Address Pool. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // The name of the Backend Address Pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BackendAddressPoolObservation struct { + + // A list of FQDN's which should be part of the Backend Address Pool. + // +listType=set + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // The ID of the Backend Address Pool. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of IP Addresses which should be part of the Backend Address Pool. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // The name of the Backend Address Pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BackendAddressPoolParameters struct { + + // A list of FQDN's which should be part of the Backend Address Pool. + // +kubebuilder:validation:Optional + // +listType=set + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // A list of IP Addresses which should be part of the Backend Address Pool. + // +kubebuilder:validation:Optional + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // The name of the Backend Address Pool. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type BackendHTTPSettingsAuthenticationCertificateInitParameters struct { + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BackendHTTPSettingsAuthenticationCertificateObservation struct { + + // The ID of the URL Path Map. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BackendHTTPSettingsAuthenticationCertificateParameters struct { + + // The Name of the URL Path Map. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type BackendHTTPSettingsInitParameters struct { + + // The name of the affinity cookie. + AffinityCookieName *string `json:"affinityCookieName,omitempty" tf:"affinity_cookie_name,omitempty"` + + // One or more authentication_certificate_backend blocks as defined below. + AuthenticationCertificate []BackendHTTPSettingsAuthenticationCertificateInitParameters `json:"authenticationCertificate,omitempty" tf:"authentication_certificate,omitempty"` + + // A connection_draining block as defined below. + ConnectionDraining *ConnectionDrainingInitParameters `json:"connectionDraining,omitempty" tf:"connection_draining,omitempty"` + + // Is Cookie-Based Affinity enabled? Possible values are Enabled and Disabled. + CookieBasedAffinity *string `json:"cookieBasedAffinity,omitempty" tf:"cookie_based_affinity,omitempty"` + + // Host header to be sent to the backend servers. Cannot be set if pick_host_name_from_backend_address is set to true. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The name of the Backend HTTP Settings Collection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Path which should be used as a prefix for all HTTP requests. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Whether host header should be picked from the host name of the backend server. Defaults to false. + PickHostNameFromBackendAddress *bool `json:"pickHostNameFromBackendAddress,omitempty" tf:"pick_host_name_from_backend_address,omitempty"` + + // The port which should be used for this Backend HTTP Settings Collection. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The name of an associated HTTP Probe. + ProbeName *string `json:"probeName,omitempty" tf:"probe_name,omitempty"` + + // The Protocol which should be used. Possible values are Http and Https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The request timeout in seconds, which must be between 1 and 86400 seconds. Defaults to 30. + RequestTimeout *float64 `json:"requestTimeout,omitempty" tf:"request_timeout,omitempty"` + + // A list of trusted_root_certificate names. + TrustedRootCertificateNames []*string `json:"trustedRootCertificateNames,omitempty" tf:"trusted_root_certificate_names,omitempty"` +} + +type BackendHTTPSettingsObservation struct { + + // The name of the affinity cookie. + AffinityCookieName *string `json:"affinityCookieName,omitempty" tf:"affinity_cookie_name,omitempty"` + + // One or more authentication_certificate_backend blocks as defined below. + AuthenticationCertificate []BackendHTTPSettingsAuthenticationCertificateObservation `json:"authenticationCertificate,omitempty" tf:"authentication_certificate,omitempty"` + + // A connection_draining block as defined below. + ConnectionDraining *ConnectionDrainingObservation `json:"connectionDraining,omitempty" tf:"connection_draining,omitempty"` + + // Is Cookie-Based Affinity enabled? Possible values are Enabled and Disabled. + CookieBasedAffinity *string `json:"cookieBasedAffinity,omitempty" tf:"cookie_based_affinity,omitempty"` + + // Host header to be sent to the backend servers. Cannot be set if pick_host_name_from_backend_address is set to true. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Backend HTTP Settings Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Backend HTTP Settings Collection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Path which should be used as a prefix for all HTTP requests. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Whether host header should be picked from the host name of the backend server. Defaults to false. + PickHostNameFromBackendAddress *bool `json:"pickHostNameFromBackendAddress,omitempty" tf:"pick_host_name_from_backend_address,omitempty"` + + // The port which should be used for this Backend HTTP Settings Collection. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The ID of the associated Probe. + ProbeID *string `json:"probeId,omitempty" tf:"probe_id,omitempty"` + + // The name of an associated HTTP Probe. + ProbeName *string `json:"probeName,omitempty" tf:"probe_name,omitempty"` + + // The Protocol which should be used. Possible values are Http and Https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The request timeout in seconds, which must be between 1 and 86400 seconds. Defaults to 30. + RequestTimeout *float64 `json:"requestTimeout,omitempty" tf:"request_timeout,omitempty"` + + // A list of trusted_root_certificate names. + TrustedRootCertificateNames []*string `json:"trustedRootCertificateNames,omitempty" tf:"trusted_root_certificate_names,omitempty"` +} + +type BackendHTTPSettingsParameters struct { + + // The name of the affinity cookie. + // +kubebuilder:validation:Optional + AffinityCookieName *string `json:"affinityCookieName,omitempty" tf:"affinity_cookie_name,omitempty"` + + // One or more authentication_certificate_backend blocks as defined below. + // +kubebuilder:validation:Optional + AuthenticationCertificate []BackendHTTPSettingsAuthenticationCertificateParameters `json:"authenticationCertificate,omitempty" tf:"authentication_certificate,omitempty"` + + // A connection_draining block as defined below. + // +kubebuilder:validation:Optional + ConnectionDraining *ConnectionDrainingParameters `json:"connectionDraining,omitempty" tf:"connection_draining,omitempty"` + + // Is Cookie-Based Affinity enabled? Possible values are Enabled and Disabled. + // +kubebuilder:validation:Optional + CookieBasedAffinity *string `json:"cookieBasedAffinity" tf:"cookie_based_affinity,omitempty"` + + // Host header to be sent to the backend servers. Cannot be set if pick_host_name_from_backend_address is set to true. + // +kubebuilder:validation:Optional + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The name of the Backend HTTP Settings Collection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Path which should be used as a prefix for all HTTP requests. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Whether host header should be picked from the host name of the backend server. Defaults to false. + // +kubebuilder:validation:Optional + PickHostNameFromBackendAddress *bool `json:"pickHostNameFromBackendAddress,omitempty" tf:"pick_host_name_from_backend_address,omitempty"` + + // The port which should be used for this Backend HTTP Settings Collection. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // The name of an associated HTTP Probe. + // +kubebuilder:validation:Optional + ProbeName *string `json:"probeName,omitempty" tf:"probe_name,omitempty"` + + // The Protocol which should be used. Possible values are Http and Https. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // The request timeout in seconds, which must be between 1 and 86400 seconds. Defaults to 30. + // +kubebuilder:validation:Optional + RequestTimeout *float64 `json:"requestTimeout,omitempty" tf:"request_timeout,omitempty"` + + // A list of trusted_root_certificate names. + // +kubebuilder:validation:Optional + TrustedRootCertificateNames []*string `json:"trustedRootCertificateNames,omitempty" tf:"trusted_root_certificate_names,omitempty"` +} + +type ConditionInitParameters struct { + + // Perform a case in-sensitive comparison. Defaults to false + IgnoreCase *bool `json:"ignoreCase,omitempty" tf:"ignore_case,omitempty"` + + // Negate the result of the condition evaluation. Defaults to false + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + // The pattern, either fixed string or regular expression, that evaluates the truthfulness of the condition. + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // The variable of the condition. + Variable *string `json:"variable,omitempty" tf:"variable,omitempty"` +} + +type ConditionObservation struct { + + // Perform a case in-sensitive comparison. Defaults to false + IgnoreCase *bool `json:"ignoreCase,omitempty" tf:"ignore_case,omitempty"` + + // Negate the result of the condition evaluation. Defaults to false + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + // The pattern, either fixed string or regular expression, that evaluates the truthfulness of the condition. + Pattern *string `json:"pattern,omitempty" tf:"pattern,omitempty"` + + // The variable of the condition. + Variable *string `json:"variable,omitempty" tf:"variable,omitempty"` +} + +type ConditionParameters struct { + + // Perform a case in-sensitive comparison. Defaults to false + // +kubebuilder:validation:Optional + IgnoreCase *bool `json:"ignoreCase,omitempty" tf:"ignore_case,omitempty"` + + // Negate the result of the condition evaluation. Defaults to false + // +kubebuilder:validation:Optional + Negate *bool `json:"negate,omitempty" tf:"negate,omitempty"` + + // The pattern, either fixed string or regular expression, that evaluates the truthfulness of the condition. + // +kubebuilder:validation:Optional + Pattern *string `json:"pattern" tf:"pattern,omitempty"` + + // The variable of the condition. + // +kubebuilder:validation:Optional + Variable *string `json:"variable" tf:"variable,omitempty"` +} + +type ConnectionDrainingInitParameters struct { + + // The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds. + DrainTimeoutSec *float64 `json:"drainTimeoutSec,omitempty" tf:"drain_timeout_sec,omitempty"` + + // Is the Web Application Firewall enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ConnectionDrainingObservation struct { + + // The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds. + DrainTimeoutSec *float64 `json:"drainTimeoutSec,omitempty" tf:"drain_timeout_sec,omitempty"` + + // Is the Web Application Firewall enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type ConnectionDrainingParameters struct { + + // The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds. + // +kubebuilder:validation:Optional + DrainTimeoutSec *float64 `json:"drainTimeoutSec" tf:"drain_timeout_sec,omitempty"` + + // Is the Web Application Firewall enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type CustomErrorConfigurationInitParameters struct { + + // Error page URL of the application gateway customer error. + CustomErrorPageURL *string `json:"customErrorPageUrl,omitempty" tf:"custom_error_page_url,omitempty"` + + // Status code of the application gateway customer error. Possible values are HttpStatus403 and HttpStatus502 + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type CustomErrorConfigurationObservation struct { + + // Error page URL of the application gateway customer error. + CustomErrorPageURL *string `json:"customErrorPageUrl,omitempty" tf:"custom_error_page_url,omitempty"` + + // The ID of the Custom Error Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Status code of the application gateway customer error. Possible values are HttpStatus403 and HttpStatus502 + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type CustomErrorConfigurationParameters struct { + + // Error page URL of the application gateway customer error. + // +kubebuilder:validation:Optional + CustomErrorPageURL *string `json:"customErrorPageUrl" tf:"custom_error_page_url,omitempty"` + + // Status code of the application gateway customer error. Possible values are HttpStatus403 and HttpStatus502 + // +kubebuilder:validation:Optional + StatusCode *string `json:"statusCode" tf:"status_code,omitempty"` +} + +type DisabledRuleGroupInitParameters struct { + + // The rule group where specific rules should be disabled. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEs. + RuleGroupName *string `json:"ruleGroupName,omitempty" tf:"rule_group_name,omitempty"` + + // A list of rules which should be disabled in that group. Disables all rules in the specified group if rules is not specified. + Rules []*float64 `json:"rules,omitempty" tf:"rules,omitempty"` +} + +type DisabledRuleGroupObservation struct { + + // The rule group where specific rules should be disabled. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEs. + RuleGroupName *string `json:"ruleGroupName,omitempty" tf:"rule_group_name,omitempty"` + + // A list of rules which should be disabled in that group. Disables all rules in the specified group if rules is not specified. + Rules []*float64 `json:"rules,omitempty" tf:"rules,omitempty"` +} + +type DisabledRuleGroupParameters struct { + + // The rule group where specific rules should be disabled. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEs. + // +kubebuilder:validation:Optional + RuleGroupName *string `json:"ruleGroupName" tf:"rule_group_name,omitempty"` + + // A list of rules which should be disabled in that group. Disables all rules in the specified group if rules is not specified. + // +kubebuilder:validation:Optional + Rules []*float64 `json:"rules,omitempty" tf:"rules,omitempty"` +} + +type ExclusionInitParameters struct { + + // Match variable of the exclusion rule to exclude header, cookie or GET arguments. Possible values are RequestArgKeys, RequestArgNames, RequestArgValues, RequestCookieKeys, RequestCookieNames, RequestCookieValues, RequestHeaderKeys, RequestHeaderNames and RequestHeaderValues + MatchVariable *string `json:"matchVariable,omitempty" tf:"match_variable,omitempty"` + + // String value which will be used for the filter operation. If empty will exclude all traffic on this match_variable + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // Operator which will be used to search in the variable content. Possible values are Contains, EndsWith, Equals, EqualsAny and StartsWith. If empty will exclude all traffic on this match_variable + SelectorMatchOperator *string `json:"selectorMatchOperator,omitempty" tf:"selector_match_operator,omitempty"` +} + +type ExclusionObservation struct { + + // Match variable of the exclusion rule to exclude header, cookie or GET arguments. Possible values are RequestArgKeys, RequestArgNames, RequestArgValues, RequestCookieKeys, RequestCookieNames, RequestCookieValues, RequestHeaderKeys, RequestHeaderNames and RequestHeaderValues + MatchVariable *string `json:"matchVariable,omitempty" tf:"match_variable,omitempty"` + + // String value which will be used for the filter operation. If empty will exclude all traffic on this match_variable + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // Operator which will be used to search in the variable content. Possible values are Contains, EndsWith, Equals, EqualsAny and StartsWith. If empty will exclude all traffic on this match_variable + SelectorMatchOperator *string `json:"selectorMatchOperator,omitempty" tf:"selector_match_operator,omitempty"` +} + +type ExclusionParameters struct { + + // Match variable of the exclusion rule to exclude header, cookie or GET arguments. Possible values are RequestArgKeys, RequestArgNames, RequestArgValues, RequestCookieKeys, RequestCookieNames, RequestCookieValues, RequestHeaderKeys, RequestHeaderNames and RequestHeaderValues + // +kubebuilder:validation:Optional + MatchVariable *string `json:"matchVariable" tf:"match_variable,omitempty"` + + // String value which will be used for the filter operation. If empty will exclude all traffic on this match_variable + // +kubebuilder:validation:Optional + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // Operator which will be used to search in the variable content. Possible values are Contains, EndsWith, Equals, EqualsAny and StartsWith. If empty will exclude all traffic on this match_variable + // +kubebuilder:validation:Optional + SelectorMatchOperator *string `json:"selectorMatchOperator,omitempty" tf:"selector_match_operator,omitempty"` +} + +type FrontendIPConfigurationInitParameters struct { + + // The name of the Frontend IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Private IP Address to use for the Application Gateway. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The Allocation Method for the Private IP Address. Possible values are Dynamic and Static. Defaults to Dynamic. + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation,omitempty" tf:"private_ip_address_allocation,omitempty"` + + // The name of the private link configuration to use for this frontend IP configuration. + PrivateLinkConfigurationName *string `json:"privateLinkConfigurationName,omitempty" tf:"private_link_configuration_name,omitempty"` + + // The ID of a Public IP Address which the Application Gateway should use. The allocation method for the Public IP Address depends on the sku of this Application Gateway. Please refer to the Azure documentation for public IP addresses for details. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PublicIP + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDRef *v1.Reference `json:"publicIpAddressIdRef,omitempty" tf:"-"` + + // Selector for a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` + + // The ID of the Subnet. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type FrontendIPConfigurationObservation struct { + + // The ID of the Frontend IP Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Frontend IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Private IP Address to use for the Application Gateway. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The Allocation Method for the Private IP Address. Possible values are Dynamic and Static. Defaults to Dynamic. + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation,omitempty" tf:"private_ip_address_allocation,omitempty"` + + // The ID of the associated private link configuration. + PrivateLinkConfigurationID *string `json:"privateLinkConfigurationId,omitempty" tf:"private_link_configuration_id,omitempty"` + + // The name of the private link configuration to use for this frontend IP configuration. + PrivateLinkConfigurationName *string `json:"privateLinkConfigurationName,omitempty" tf:"private_link_configuration_name,omitempty"` + + // The ID of a Public IP Address which the Application Gateway should use. The allocation method for the Public IP Address depends on the sku of this Application Gateway. Please refer to the Azure documentation for public IP addresses for details. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // The ID of the Subnet. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type FrontendIPConfigurationParameters struct { + + // The name of the Frontend IP Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Private IP Address to use for the Application Gateway. + // +kubebuilder:validation:Optional + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The Allocation Method for the Private IP Address. Possible values are Dynamic and Static. Defaults to Dynamic. + // +kubebuilder:validation:Optional + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation,omitempty" tf:"private_ip_address_allocation,omitempty"` + + // The name of the private link configuration to use for this frontend IP configuration. + // +kubebuilder:validation:Optional + PrivateLinkConfigurationName *string `json:"privateLinkConfigurationName,omitempty" tf:"private_link_configuration_name,omitempty"` + + // The ID of a Public IP Address which the Application Gateway should use. The allocation method for the Public IP Address depends on the sku of this Application Gateway. Please refer to the Azure documentation for public IP addresses for details. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PublicIP + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDRef *v1.Reference `json:"publicIpAddressIdRef,omitempty" tf:"-"` + + // Selector for a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` + + // The ID of the Subnet. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type FrontendPortInitParameters struct { + + // The name of the Frontend Port. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The port used for this Frontend Port. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type FrontendPortObservation struct { + + // The ID of the Frontend Port. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Frontend Port. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The port used for this Frontend Port. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` +} + +type FrontendPortParameters struct { + + // The name of the Frontend Port. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The port used for this Frontend Port. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` +} + +type GatewayIPConfigurationInitParameters struct { + + // The Name of this Gateway IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Subnet which the Application Gateway should be connected to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type GatewayIPConfigurationObservation struct { + + // The ID of the Gateway IP Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Name of this Gateway IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Subnet which the Application Gateway should be connected to. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type GatewayIPConfigurationParameters struct { + + // The Name of this Gateway IP Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the Subnet which the Application Gateway should be connected to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type GlobalInitParameters struct { + + // Whether Application Gateway's Request buffer is enabled. + RequestBufferingEnabled *bool `json:"requestBufferingEnabled,omitempty" tf:"request_buffering_enabled,omitempty"` + + // Whether Application Gateway's Response buffer is enabled. + ResponseBufferingEnabled *bool `json:"responseBufferingEnabled,omitempty" tf:"response_buffering_enabled,omitempty"` +} + +type GlobalObservation struct { + + // Whether Application Gateway's Request buffer is enabled. + RequestBufferingEnabled *bool `json:"requestBufferingEnabled,omitempty" tf:"request_buffering_enabled,omitempty"` + + // Whether Application Gateway's Response buffer is enabled. + ResponseBufferingEnabled *bool `json:"responseBufferingEnabled,omitempty" tf:"response_buffering_enabled,omitempty"` +} + +type GlobalParameters struct { + + // Whether Application Gateway's Request buffer is enabled. + // +kubebuilder:validation:Optional + RequestBufferingEnabled *bool `json:"requestBufferingEnabled" tf:"request_buffering_enabled,omitempty"` + + // Whether Application Gateway's Response buffer is enabled. + // +kubebuilder:validation:Optional + ResponseBufferingEnabled *bool `json:"responseBufferingEnabled" tf:"response_buffering_enabled,omitempty"` +} + +type HTTPListenerCustomErrorConfigurationInitParameters struct { + + // Error page URL of the application gateway customer error. + CustomErrorPageURL *string `json:"customErrorPageUrl,omitempty" tf:"custom_error_page_url,omitempty"` + + // A list of allowed status codes for this Health Probe. + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type HTTPListenerCustomErrorConfigurationObservation struct { + + // Error page URL of the application gateway customer error. + CustomErrorPageURL *string `json:"customErrorPageUrl,omitempty" tf:"custom_error_page_url,omitempty"` + + // The ID of the URL Path Map. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of allowed status codes for this Health Probe. + StatusCode *string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type HTTPListenerCustomErrorConfigurationParameters struct { + + // Error page URL of the application gateway customer error. + // +kubebuilder:validation:Optional + CustomErrorPageURL *string `json:"customErrorPageUrl" tf:"custom_error_page_url,omitempty"` + + // A list of allowed status codes for this Health Probe. + // +kubebuilder:validation:Optional + StatusCode *string `json:"statusCode" tf:"status_code,omitempty"` +} + +type HTTPListenerInitParameters struct { + + // One or more custom_error_configuration blocks as defined below. + CustomErrorConfiguration []HTTPListenerCustomErrorConfigurationInitParameters `json:"customErrorConfiguration,omitempty" tf:"custom_error_configuration,omitempty"` + + // The ID of the Web Application Firewall Policy which should be used for this HTTP Listener. + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // The Name of the Frontend IP Configuration used for this HTTP Listener. + FrontendIPConfigurationName *string `json:"frontendIpConfigurationName,omitempty" tf:"frontend_ip_configuration_name,omitempty"` + + // The Name of the Frontend Port use for this HTTP Listener. + FrontendPortName *string `json:"frontendPortName,omitempty" tf:"frontend_port_name,omitempty"` + + // The Hostname which should be used for this HTTP Listener. Setting this value changes Listener Type to 'Multi site'. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // A list of Hostname(s) should be used for this HTTP Listener. It allows special wildcard characters. + // +listType=set + HostNames []*string `json:"hostNames,omitempty" tf:"host_names,omitempty"` + + // The Name of the HTTP Listener. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Protocol to use for this HTTP Listener. Possible values are Http and Https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Should Server Name Indication be Required? Defaults to false. + RequireSni *bool `json:"requireSni,omitempty" tf:"require_sni,omitempty"` + + // The name of the associated SSL Certificate which should be used for this HTTP Listener. + SSLCertificateName *string `json:"sslCertificateName,omitempty" tf:"ssl_certificate_name,omitempty"` + + // The name of the associated SSL Profile which should be used for this HTTP Listener. + SSLProfileName *string `json:"sslProfileName,omitempty" tf:"ssl_profile_name,omitempty"` +} + +type HTTPListenerObservation struct { + + // One or more custom_error_configuration blocks as defined below. + CustomErrorConfiguration []HTTPListenerCustomErrorConfigurationObservation `json:"customErrorConfiguration,omitempty" tf:"custom_error_configuration,omitempty"` + + // The ID of the Web Application Firewall Policy which should be used for this HTTP Listener. + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // The ID of the associated Frontend Configuration. + FrontendIPConfigurationID *string `json:"frontendIpConfigurationId,omitempty" tf:"frontend_ip_configuration_id,omitempty"` + + // The Name of the Frontend IP Configuration used for this HTTP Listener. + FrontendIPConfigurationName *string `json:"frontendIpConfigurationName,omitempty" tf:"frontend_ip_configuration_name,omitempty"` + + // The ID of the associated Frontend Port. + FrontendPortID *string `json:"frontendPortId,omitempty" tf:"frontend_port_id,omitempty"` + + // The Name of the Frontend Port use for this HTTP Listener. + FrontendPortName *string `json:"frontendPortName,omitempty" tf:"frontend_port_name,omitempty"` + + // The Hostname which should be used for this HTTP Listener. Setting this value changes Listener Type to 'Multi site'. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // A list of Hostname(s) should be used for this HTTP Listener. It allows special wildcard characters. + // +listType=set + HostNames []*string `json:"hostNames,omitempty" tf:"host_names,omitempty"` + + // The ID of the HTTP Listener. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Name of the HTTP Listener. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Protocol to use for this HTTP Listener. Possible values are Http and Https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Should Server Name Indication be Required? Defaults to false. + RequireSni *bool `json:"requireSni,omitempty" tf:"require_sni,omitempty"` + + // The ID of the associated SSL Certificate. + SSLCertificateID *string `json:"sslCertificateId,omitempty" tf:"ssl_certificate_id,omitempty"` + + // The name of the associated SSL Certificate which should be used for this HTTP Listener. + SSLCertificateName *string `json:"sslCertificateName,omitempty" tf:"ssl_certificate_name,omitempty"` + + // The ID of the associated SSL Profile. + SSLProfileID *string `json:"sslProfileId,omitempty" tf:"ssl_profile_id,omitempty"` + + // The name of the associated SSL Profile which should be used for this HTTP Listener. + SSLProfileName *string `json:"sslProfileName,omitempty" tf:"ssl_profile_name,omitempty"` +} + +type HTTPListenerParameters struct { + + // One or more custom_error_configuration blocks as defined below. + // +kubebuilder:validation:Optional + CustomErrorConfiguration []HTTPListenerCustomErrorConfigurationParameters `json:"customErrorConfiguration,omitempty" tf:"custom_error_configuration,omitempty"` + + // The ID of the Web Application Firewall Policy which should be used for this HTTP Listener. + // +kubebuilder:validation:Optional + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // The Name of the Frontend IP Configuration used for this HTTP Listener. + // +kubebuilder:validation:Optional + FrontendIPConfigurationName *string `json:"frontendIpConfigurationName" tf:"frontend_ip_configuration_name,omitempty"` + + // The Name of the Frontend Port use for this HTTP Listener. + // +kubebuilder:validation:Optional + FrontendPortName *string `json:"frontendPortName" tf:"frontend_port_name,omitempty"` + + // The Hostname which should be used for this HTTP Listener. Setting this value changes Listener Type to 'Multi site'. + // +kubebuilder:validation:Optional + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // A list of Hostname(s) should be used for this HTTP Listener. It allows special wildcard characters. + // +kubebuilder:validation:Optional + // +listType=set + HostNames []*string `json:"hostNames,omitempty" tf:"host_names,omitempty"` + + // The Name of the HTTP Listener. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Protocol to use for this HTTP Listener. Possible values are Http and Https. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Should Server Name Indication be Required? Defaults to false. + // +kubebuilder:validation:Optional + RequireSni *bool `json:"requireSni,omitempty" tf:"require_sni,omitempty"` + + // The name of the associated SSL Certificate which should be used for this HTTP Listener. + // +kubebuilder:validation:Optional + SSLCertificateName *string `json:"sslCertificateName,omitempty" tf:"ssl_certificate_name,omitempty"` + + // The name of the associated SSL Profile which should be used for this HTTP Listener. + // +kubebuilder:validation:Optional + SSLProfileName *string `json:"sslProfileName,omitempty" tf:"ssl_profile_name,omitempty"` +} + +type IPConfigurationInitParameters struct { + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The Static IP Address which should be used. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The allocation method used for the Private IP Address. Possible values are Dynamic and Static. + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation,omitempty" tf:"private_ip_address_allocation,omitempty"` + + // The ID of the subnet the private link configuration should connect to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type IPConfigurationObservation struct { + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // The Static IP Address which should be used. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The allocation method used for the Private IP Address. Possible values are Dynamic and Static. + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation,omitempty" tf:"private_ip_address_allocation,omitempty"` + + // The ID of the subnet the private link configuration should connect to. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type IPConfigurationParameters struct { + + // The Name of the URL Path Map. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Is this the Primary IP Configuration? + // +kubebuilder:validation:Optional + Primary *bool `json:"primary" tf:"primary,omitempty"` + + // The Static IP Address which should be used. + // +kubebuilder:validation:Optional + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The allocation method used for the Private IP Address. Possible values are Dynamic and Static. + // +kubebuilder:validation:Optional + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation" tf:"private_ip_address_allocation,omitempty"` + + // The ID of the subnet the private link configuration should connect to. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Application Gateway. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Application Gateway. Only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Application Gateway. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Application Gateway. Only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Application Gateway. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Application Gateway. Only possible value is UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MatchInitParameters struct { + + // A snippet from the Response Body which must be present in the Response. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // A list of allowed status codes for this Health Probe. + StatusCode []*string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type MatchObservation struct { + + // A snippet from the Response Body which must be present in the Response. + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // A list of allowed status codes for this Health Probe. + StatusCode []*string `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type MatchParameters struct { + + // A snippet from the Response Body which must be present in the Response. + // +kubebuilder:validation:Optional + Body *string `json:"body,omitempty" tf:"body,omitempty"` + + // A list of allowed status codes for this Health Probe. + // +kubebuilder:validation:Optional + StatusCode []*string `json:"statusCode" tf:"status_code,omitempty"` +} + +type PathRuleInitParameters struct { + + // The Name of the Backend Address Pool which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + BackendAddressPoolName *string `json:"backendAddressPoolName,omitempty" tf:"backend_address_pool_name,omitempty"` + + // The Name of the Backend HTTP Settings Collection which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + BackendHTTPSettingsName *string `json:"backendHttpSettingsName,omitempty" tf:"backend_http_settings_name,omitempty"` + + // The ID of the Web Application Firewall Policy which should be used as an HTTP Listener. + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of Paths used in this Path Rule. + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` + + // The Name of the Redirect Configuration which should be used for this Routing Rule. Cannot be set if either backend_address_pool_name or backend_http_settings_name is set. + RedirectConfigurationName *string `json:"redirectConfigurationName,omitempty" tf:"redirect_configuration_name,omitempty"` + + // The Name of the Rewrite Rule Set which should be used for this Routing Rule. Only valid for v2 SKUs. + RewriteRuleSetName *string `json:"rewriteRuleSetName,omitempty" tf:"rewrite_rule_set_name,omitempty"` +} + +type PathRuleObservation struct { + + // The ID of the associated Backend Address Pool. + BackendAddressPoolID *string `json:"backendAddressPoolId,omitempty" tf:"backend_address_pool_id,omitempty"` + + // The Name of the Backend Address Pool which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + BackendAddressPoolName *string `json:"backendAddressPoolName,omitempty" tf:"backend_address_pool_name,omitempty"` + + // The ID of the associated Backend HTTP Settings Configuration. + BackendHTTPSettingsID *string `json:"backendHttpSettingsId,omitempty" tf:"backend_http_settings_id,omitempty"` + + // The Name of the Backend HTTP Settings Collection which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + BackendHTTPSettingsName *string `json:"backendHttpSettingsName,omitempty" tf:"backend_http_settings_name,omitempty"` + + // The ID of the Web Application Firewall Policy which should be used as an HTTP Listener. + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // The ID of the URL Path Map. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of Paths used in this Path Rule. + Paths []*string `json:"paths,omitempty" tf:"paths,omitempty"` + + // The ID of the associated Redirect Configuration. + RedirectConfigurationID *string `json:"redirectConfigurationId,omitempty" tf:"redirect_configuration_id,omitempty"` + + // The Name of the Redirect Configuration which should be used for this Routing Rule. Cannot be set if either backend_address_pool_name or backend_http_settings_name is set. + RedirectConfigurationName *string `json:"redirectConfigurationName,omitempty" tf:"redirect_configuration_name,omitempty"` + + // The ID of the associated Rewrite Rule Set. + RewriteRuleSetID *string `json:"rewriteRuleSetId,omitempty" tf:"rewrite_rule_set_id,omitempty"` + + // The Name of the Rewrite Rule Set which should be used for this Routing Rule. Only valid for v2 SKUs. + RewriteRuleSetName *string `json:"rewriteRuleSetName,omitempty" tf:"rewrite_rule_set_name,omitempty"` +} + +type PathRuleParameters struct { + + // The Name of the Backend Address Pool which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + // +kubebuilder:validation:Optional + BackendAddressPoolName *string `json:"backendAddressPoolName,omitempty" tf:"backend_address_pool_name,omitempty"` + + // The Name of the Backend HTTP Settings Collection which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + // +kubebuilder:validation:Optional + BackendHTTPSettingsName *string `json:"backendHttpSettingsName,omitempty" tf:"backend_http_settings_name,omitempty"` + + // The ID of the Web Application Firewall Policy which should be used as an HTTP Listener. + // +kubebuilder:validation:Optional + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // The Name of the URL Path Map. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A list of Paths used in this Path Rule. + // +kubebuilder:validation:Optional + Paths []*string `json:"paths" tf:"paths,omitempty"` + + // The Name of the Redirect Configuration which should be used for this Routing Rule. Cannot be set if either backend_address_pool_name or backend_http_settings_name is set. + // +kubebuilder:validation:Optional + RedirectConfigurationName *string `json:"redirectConfigurationName,omitempty" tf:"redirect_configuration_name,omitempty"` + + // The Name of the Rewrite Rule Set which should be used for this Routing Rule. Only valid for v2 SKUs. + // +kubebuilder:validation:Optional + RewriteRuleSetName *string `json:"rewriteRuleSetName,omitempty" tf:"rewrite_rule_set_name,omitempty"` +} + +type PrivateEndpointConnectionInitParameters struct { +} + +type PrivateEndpointConnectionObservation struct { + + // The ID of the private endpoint connection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the private endpoint connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PrivateEndpointConnectionParameters struct { +} + +type PrivateLinkConfigurationInitParameters struct { + + // One or more ip_configuration blocks as defined below. + IPConfiguration []IPConfigurationInitParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The name of the private link configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PrivateLinkConfigurationObservation struct { + + // The ID of the private link configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more ip_configuration blocks as defined below. + IPConfiguration []IPConfigurationObservation `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The name of the private link configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type PrivateLinkConfigurationParameters struct { + + // One or more ip_configuration blocks as defined below. + // +kubebuilder:validation:Optional + IPConfiguration []IPConfigurationParameters `json:"ipConfiguration" tf:"ip_configuration,omitempty"` + + // The name of the private link configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type ProbeInitParameters struct { + + // The Hostname used for this Probe. If the Application Gateway is configured for a single site, by default the Host name should be specified as 127.0.0.1, unless otherwise configured in custom probe. Cannot be set if pick_host_name_from_backend_http_settings is set to true. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The Interval between two consecutive probes in seconds. Possible values range from 1 second to a maximum of 86,400 seconds. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // A match block as defined above. + Match *MatchInitParameters `json:"match,omitempty" tf:"match,omitempty"` + + // The minimum number of servers that are always marked as healthy. Defaults to 0. + MinimumServers *float64 `json:"minimumServers,omitempty" tf:"minimum_servers,omitempty"` + + // The Name of the Probe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Path used for this Probe. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Whether the host header should be picked from the backend HTTP settings. Defaults to false. + PickHostNameFromBackendHTTPSettings *bool `json:"pickHostNameFromBackendHttpSettings,omitempty" tf:"pick_host_name_from_backend_http_settings,omitempty"` + + // Custom port which will be used for probing the backend servers. The valid value ranges from 1 to 65535. In case not set, port from HTTP settings will be used. This property is valid for Standard_v2 and WAF_v2 only. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The Protocol used for this Probe. Possible values are Http and Https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The Timeout used for this Probe, which indicates when a probe becomes unhealthy. Possible values range from 1 second to a maximum of 86,400 seconds. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The Unhealthy Threshold for this Probe, which indicates the amount of retries which should be attempted before a node is deemed unhealthy. Possible values are from 1 to 20. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type ProbeObservation struct { + + // The Hostname used for this Probe. If the Application Gateway is configured for a single site, by default the Host name should be specified as 127.0.0.1, unless otherwise configured in custom probe. Cannot be set if pick_host_name_from_backend_http_settings is set to true. + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The ID of the Probe. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Interval between two consecutive probes in seconds. Possible values range from 1 second to a maximum of 86,400 seconds. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // A match block as defined above. + Match *MatchObservation `json:"match,omitempty" tf:"match,omitempty"` + + // The minimum number of servers that are always marked as healthy. Defaults to 0. + MinimumServers *float64 `json:"minimumServers,omitempty" tf:"minimum_servers,omitempty"` + + // The Name of the Probe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Path used for this Probe. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Whether the host header should be picked from the backend HTTP settings. Defaults to false. + PickHostNameFromBackendHTTPSettings *bool `json:"pickHostNameFromBackendHttpSettings,omitempty" tf:"pick_host_name_from_backend_http_settings,omitempty"` + + // Custom port which will be used for probing the backend servers. The valid value ranges from 1 to 65535. In case not set, port from HTTP settings will be used. This property is valid for Standard_v2 and WAF_v2 only. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The Protocol used for this Probe. Possible values are Http and Https. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The Timeout used for this Probe, which indicates when a probe becomes unhealthy. Possible values range from 1 second to a maximum of 86,400 seconds. + Timeout *float64 `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // The Unhealthy Threshold for this Probe, which indicates the amount of retries which should be attempted before a node is deemed unhealthy. Possible values are from 1 to 20. + UnhealthyThreshold *float64 `json:"unhealthyThreshold,omitempty" tf:"unhealthy_threshold,omitempty"` +} + +type ProbeParameters struct { + + // The Hostname used for this Probe. If the Application Gateway is configured for a single site, by default the Host name should be specified as 127.0.0.1, unless otherwise configured in custom probe. Cannot be set if pick_host_name_from_backend_http_settings is set to true. + // +kubebuilder:validation:Optional + Host *string `json:"host,omitempty" tf:"host,omitempty"` + + // The Interval between two consecutive probes in seconds. Possible values range from 1 second to a maximum of 86,400 seconds. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // A match block as defined above. + // +kubebuilder:validation:Optional + Match *MatchParameters `json:"match,omitempty" tf:"match,omitempty"` + + // The minimum number of servers that are always marked as healthy. Defaults to 0. + // +kubebuilder:validation:Optional + MinimumServers *float64 `json:"minimumServers,omitempty" tf:"minimum_servers,omitempty"` + + // The Name of the Probe. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Path used for this Probe. + // +kubebuilder:validation:Optional + Path *string `json:"path" tf:"path,omitempty"` + + // Whether the host header should be picked from the backend HTTP settings. Defaults to false. + // +kubebuilder:validation:Optional + PickHostNameFromBackendHTTPSettings *bool `json:"pickHostNameFromBackendHttpSettings,omitempty" tf:"pick_host_name_from_backend_http_settings,omitempty"` + + // Custom port which will be used for probing the backend servers. The valid value ranges from 1 to 65535. In case not set, port from HTTP settings will be used. This property is valid for Standard_v2 and WAF_v2 only. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The Protocol used for this Probe. Possible values are Http and Https. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // The Timeout used for this Probe, which indicates when a probe becomes unhealthy. Possible values range from 1 second to a maximum of 86,400 seconds. + // +kubebuilder:validation:Optional + Timeout *float64 `json:"timeout" tf:"timeout,omitempty"` + + // The Unhealthy Threshold for this Probe, which indicates the amount of retries which should be attempted before a node is deemed unhealthy. Possible values are from 1 to 20. + // +kubebuilder:validation:Optional + UnhealthyThreshold *float64 `json:"unhealthyThreshold" tf:"unhealthy_threshold,omitempty"` +} + +type RedirectConfigurationInitParameters struct { + + // Whether to include the path in the redirected URL. Defaults to false + IncludePath *bool `json:"includePath,omitempty" tf:"include_path,omitempty"` + + // Whether to include the query string in the redirected URL. Default to false + IncludeQueryString *bool `json:"includeQueryString,omitempty" tf:"include_query_string,omitempty"` + + // Unique name of the redirect configuration block + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of redirect. Possible values are Permanent, Temporary, Found and SeeOther + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` + + // The name of the listener to redirect to. Cannot be set if target_url is set. + TargetListenerName *string `json:"targetListenerName,omitempty" tf:"target_listener_name,omitempty"` + + // The URL to redirect the request to. Cannot be set if target_listener_name is set. + TargetURL *string `json:"targetUrl,omitempty" tf:"target_url,omitempty"` +} + +type RedirectConfigurationObservation struct { + + // The ID of the Redirect Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to include the path in the redirected URL. Defaults to false + IncludePath *bool `json:"includePath,omitempty" tf:"include_path,omitempty"` + + // Whether to include the query string in the redirected URL. Default to false + IncludeQueryString *bool `json:"includeQueryString,omitempty" tf:"include_query_string,omitempty"` + + // Unique name of the redirect configuration block + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of redirect. Possible values are Permanent, Temporary, Found and SeeOther + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` + + // The ID of the Application Gateway. + TargetListenerID *string `json:"targetListenerId,omitempty" tf:"target_listener_id,omitempty"` + + // The name of the listener to redirect to. Cannot be set if target_url is set. + TargetListenerName *string `json:"targetListenerName,omitempty" tf:"target_listener_name,omitempty"` + + // The URL to redirect the request to. Cannot be set if target_listener_name is set. + TargetURL *string `json:"targetUrl,omitempty" tf:"target_url,omitempty"` +} + +type RedirectConfigurationParameters struct { + + // Whether to include the path in the redirected URL. Defaults to false + // +kubebuilder:validation:Optional + IncludePath *bool `json:"includePath,omitempty" tf:"include_path,omitempty"` + + // Whether to include the query string in the redirected URL. Default to false + // +kubebuilder:validation:Optional + IncludeQueryString *bool `json:"includeQueryString,omitempty" tf:"include_query_string,omitempty"` + + // Unique name of the redirect configuration block + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The type of redirect. Possible values are Permanent, Temporary, Found and SeeOther + // +kubebuilder:validation:Optional + RedirectType *string `json:"redirectType" tf:"redirect_type,omitempty"` + + // The name of the listener to redirect to. Cannot be set if target_url is set. + // +kubebuilder:validation:Optional + TargetListenerName *string `json:"targetListenerName,omitempty" tf:"target_listener_name,omitempty"` + + // The URL to redirect the request to. Cannot be set if target_listener_name is set. + // +kubebuilder:validation:Optional + TargetURL *string `json:"targetUrl,omitempty" tf:"target_url,omitempty"` +} + +type RequestHeaderConfigurationInitParameters struct { + + // Header name of the header configuration. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // Header value of the header configuration. To delete a response header set this property to an empty string. + HeaderValue *string `json:"headerValue,omitempty" tf:"header_value,omitempty"` +} + +type RequestHeaderConfigurationObservation struct { + + // Header name of the header configuration. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // Header value of the header configuration. To delete a response header set this property to an empty string. + HeaderValue *string `json:"headerValue,omitempty" tf:"header_value,omitempty"` +} + +type RequestHeaderConfigurationParameters struct { + + // Header name of the header configuration. + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName" tf:"header_name,omitempty"` + + // Header value of the header configuration. To delete a response header set this property to an empty string. + // +kubebuilder:validation:Optional + HeaderValue *string `json:"headerValue" tf:"header_value,omitempty"` +} + +type RequestRoutingRuleInitParameters struct { + + // The Name of the Backend Address Pool which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + BackendAddressPoolName *string `json:"backendAddressPoolName,omitempty" tf:"backend_address_pool_name,omitempty"` + + // The Name of the Backend HTTP Settings Collection which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + BackendHTTPSettingsName *string `json:"backendHttpSettingsName,omitempty" tf:"backend_http_settings_name,omitempty"` + + // The Name of the HTTP Listener which should be used for this Routing Rule. + HTTPListenerName *string `json:"httpListenerName,omitempty" tf:"http_listener_name,omitempty"` + + // The Name of this Request Routing Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Rule evaluation order can be dictated by specifying an integer value from 1 to 20000 with 1 being the highest priority and 20000 being the lowest priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Name of the Redirect Configuration which should be used for this Routing Rule. Cannot be set if either backend_address_pool_name or backend_http_settings_name is set. + RedirectConfigurationName *string `json:"redirectConfigurationName,omitempty" tf:"redirect_configuration_name,omitempty"` + + // The Name of the Rewrite Rule Set which should be used for this Routing Rule. Only valid for v2 SKUs. + RewriteRuleSetName *string `json:"rewriteRuleSetName,omitempty" tf:"rewrite_rule_set_name,omitempty"` + + // The Type of Routing that should be used for this Rule. Possible values are Basic and PathBasedRouting. + RuleType *string `json:"ruleType,omitempty" tf:"rule_type,omitempty"` + + // The Name of the URL Path Map which should be associated with this Routing Rule. + URLPathMapName *string `json:"urlPathMapName,omitempty" tf:"url_path_map_name,omitempty"` +} + +type RequestRoutingRuleObservation struct { + + // The ID of the associated Backend Address Pool. + BackendAddressPoolID *string `json:"backendAddressPoolId,omitempty" tf:"backend_address_pool_id,omitempty"` + + // The Name of the Backend Address Pool which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + BackendAddressPoolName *string `json:"backendAddressPoolName,omitempty" tf:"backend_address_pool_name,omitempty"` + + // The ID of the associated Backend HTTP Settings Configuration. + BackendHTTPSettingsID *string `json:"backendHttpSettingsId,omitempty" tf:"backend_http_settings_id,omitempty"` + + // The Name of the Backend HTTP Settings Collection which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + BackendHTTPSettingsName *string `json:"backendHttpSettingsName,omitempty" tf:"backend_http_settings_name,omitempty"` + + // The ID of the associated HTTP Listener. + HTTPListenerID *string `json:"httpListenerId,omitempty" tf:"http_listener_id,omitempty"` + + // The Name of the HTTP Listener which should be used for this Routing Rule. + HTTPListenerName *string `json:"httpListenerName,omitempty" tf:"http_listener_name,omitempty"` + + // The ID of the Request Routing Rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Name of this Request Routing Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Rule evaluation order can be dictated by specifying an integer value from 1 to 20000 with 1 being the highest priority and 20000 being the lowest priority. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The ID of the associated Redirect Configuration. + RedirectConfigurationID *string `json:"redirectConfigurationId,omitempty" tf:"redirect_configuration_id,omitempty"` + + // The Name of the Redirect Configuration which should be used for this Routing Rule. Cannot be set if either backend_address_pool_name or backend_http_settings_name is set. + RedirectConfigurationName *string `json:"redirectConfigurationName,omitempty" tf:"redirect_configuration_name,omitempty"` + + // The ID of the associated Rewrite Rule Set. + RewriteRuleSetID *string `json:"rewriteRuleSetId,omitempty" tf:"rewrite_rule_set_id,omitempty"` + + // The Name of the Rewrite Rule Set which should be used for this Routing Rule. Only valid for v2 SKUs. + RewriteRuleSetName *string `json:"rewriteRuleSetName,omitempty" tf:"rewrite_rule_set_name,omitempty"` + + // The Type of Routing that should be used for this Rule. Possible values are Basic and PathBasedRouting. + RuleType *string `json:"ruleType,omitempty" tf:"rule_type,omitempty"` + + // The ID of the associated URL Path Map. + URLPathMapID *string `json:"urlPathMapId,omitempty" tf:"url_path_map_id,omitempty"` + + // The Name of the URL Path Map which should be associated with this Routing Rule. + URLPathMapName *string `json:"urlPathMapName,omitempty" tf:"url_path_map_name,omitempty"` +} + +type RequestRoutingRuleParameters struct { + + // The Name of the Backend Address Pool which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + // +kubebuilder:validation:Optional + BackendAddressPoolName *string `json:"backendAddressPoolName,omitempty" tf:"backend_address_pool_name,omitempty"` + + // The Name of the Backend HTTP Settings Collection which should be used for this Routing Rule. Cannot be set if redirect_configuration_name is set. + // +kubebuilder:validation:Optional + BackendHTTPSettingsName *string `json:"backendHttpSettingsName,omitempty" tf:"backend_http_settings_name,omitempty"` + + // The Name of the HTTP Listener which should be used for this Routing Rule. + // +kubebuilder:validation:Optional + HTTPListenerName *string `json:"httpListenerName" tf:"http_listener_name,omitempty"` + + // The Name of this Request Routing Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Rule evaluation order can be dictated by specifying an integer value from 1 to 20000 with 1 being the highest priority and 20000 being the lowest priority. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Name of the Redirect Configuration which should be used for this Routing Rule. Cannot be set if either backend_address_pool_name or backend_http_settings_name is set. + // +kubebuilder:validation:Optional + RedirectConfigurationName *string `json:"redirectConfigurationName,omitempty" tf:"redirect_configuration_name,omitempty"` + + // The Name of the Rewrite Rule Set which should be used for this Routing Rule. Only valid for v2 SKUs. + // +kubebuilder:validation:Optional + RewriteRuleSetName *string `json:"rewriteRuleSetName,omitempty" tf:"rewrite_rule_set_name,omitempty"` + + // The Type of Routing that should be used for this Rule. Possible values are Basic and PathBasedRouting. + // +kubebuilder:validation:Optional + RuleType *string `json:"ruleType" tf:"rule_type,omitempty"` + + // The Name of the URL Path Map which should be associated with this Routing Rule. + // +kubebuilder:validation:Optional + URLPathMapName *string `json:"urlPathMapName,omitempty" tf:"url_path_map_name,omitempty"` +} + +type ResponseHeaderConfigurationInitParameters struct { + + // Header name of the header configuration. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // Header value of the header configuration. To delete a response header set this property to an empty string. + HeaderValue *string `json:"headerValue,omitempty" tf:"header_value,omitempty"` +} + +type ResponseHeaderConfigurationObservation struct { + + // Header name of the header configuration. + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // Header value of the header configuration. To delete a response header set this property to an empty string. + HeaderValue *string `json:"headerValue,omitempty" tf:"header_value,omitempty"` +} + +type ResponseHeaderConfigurationParameters struct { + + // Header name of the header configuration. + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName" tf:"header_name,omitempty"` + + // Header value of the header configuration. To delete a response header set this property to an empty string. + // +kubebuilder:validation:Optional + HeaderValue *string `json:"headerValue" tf:"header_value,omitempty"` +} + +type RewriteRuleInitParameters struct { + + // One or more condition blocks as defined above. + Condition []ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more request_header_configuration blocks as defined above. + RequestHeaderConfiguration []RequestHeaderConfigurationInitParameters `json:"requestHeaderConfiguration,omitempty" tf:"request_header_configuration,omitempty"` + + // One or more response_header_configuration blocks as defined above. + ResponseHeaderConfiguration []ResponseHeaderConfigurationInitParameters `json:"responseHeaderConfiguration,omitempty" tf:"response_header_configuration,omitempty"` + + // Rule sequence of the rewrite rule that determines the order of execution in a set. + RuleSequence *float64 `json:"ruleSequence,omitempty" tf:"rule_sequence,omitempty"` + + // One url block as defined below + URL *URLInitParameters `json:"url,omitempty" tf:"url,omitempty"` +} + +type RewriteRuleObservation struct { + + // One or more condition blocks as defined above. + Condition []ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more request_header_configuration blocks as defined above. + RequestHeaderConfiguration []RequestHeaderConfigurationObservation `json:"requestHeaderConfiguration,omitempty" tf:"request_header_configuration,omitempty"` + + // One or more response_header_configuration blocks as defined above. + ResponseHeaderConfiguration []ResponseHeaderConfigurationObservation `json:"responseHeaderConfiguration,omitempty" tf:"response_header_configuration,omitempty"` + + // Rule sequence of the rewrite rule that determines the order of execution in a set. + RuleSequence *float64 `json:"ruleSequence,omitempty" tf:"rule_sequence,omitempty"` + + // One url block as defined below + URL *URLObservation `json:"url,omitempty" tf:"url,omitempty"` +} + +type RewriteRuleParameters struct { + + // One or more condition blocks as defined above. + // +kubebuilder:validation:Optional + Condition []ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // The Name of the URL Path Map. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // One or more request_header_configuration blocks as defined above. + // +kubebuilder:validation:Optional + RequestHeaderConfiguration []RequestHeaderConfigurationParameters `json:"requestHeaderConfiguration,omitempty" tf:"request_header_configuration,omitempty"` + + // One or more response_header_configuration blocks as defined above. + // +kubebuilder:validation:Optional + ResponseHeaderConfiguration []ResponseHeaderConfigurationParameters `json:"responseHeaderConfiguration,omitempty" tf:"response_header_configuration,omitempty"` + + // Rule sequence of the rewrite rule that determines the order of execution in a set. + // +kubebuilder:validation:Optional + RuleSequence *float64 `json:"ruleSequence" tf:"rule_sequence,omitempty"` + + // One url block as defined below + // +kubebuilder:validation:Optional + URL *URLParameters `json:"url,omitempty" tf:"url,omitempty"` +} + +type RewriteRuleSetInitParameters struct { + + // Unique name of the rewrite rule set block + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more rewrite_rule blocks as defined below. + RewriteRule []RewriteRuleInitParameters `json:"rewriteRule,omitempty" tf:"rewrite_rule,omitempty"` +} + +type RewriteRuleSetObservation struct { + + // The ID of the Rewrite Rule Set + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Unique name of the rewrite rule set block + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more rewrite_rule blocks as defined below. + RewriteRule []RewriteRuleObservation `json:"rewriteRule,omitempty" tf:"rewrite_rule,omitempty"` +} + +type RewriteRuleSetParameters struct { + + // Unique name of the rewrite rule set block + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // One or more rewrite_rule blocks as defined below. + // +kubebuilder:validation:Optional + RewriteRule []RewriteRuleParameters `json:"rewriteRule,omitempty" tf:"rewrite_rule,omitempty"` +} + +type SSLCertificateInitParameters struct { + + // The Secret ID of (base-64 encoded unencrypted pfx) the Secret or Certificate object stored in Azure KeyVault. You need to enable soft delete for Key Vault to use this feature. Required if data is not set. + KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` + + // The Name of the SSL certificate that is unique within this Application Gateway + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SSLCertificateObservation struct { + + // The ID of the SSL Certificate. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Secret ID of (base-64 encoded unencrypted pfx) the Secret or Certificate object stored in Azure KeyVault. You need to enable soft delete for Key Vault to use this feature. Required if data is not set. + KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` + + // The Name of the SSL certificate that is unique within this Application Gateway + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Public Certificate Data associated with the SSL Certificate. + PublicCertData *string `json:"publicCertData,omitempty" tf:"public_cert_data,omitempty"` +} + +type SSLCertificateParameters struct { + + // The base64-encoded PFX certificate data. Required if key_vault_secret_id is not set. + // +kubebuilder:validation:Optional + DataSecretRef *v1.SecretKeySelector `json:"dataSecretRef,omitempty" tf:"-"` + + // The Secret ID of (base-64 encoded unencrypted pfx) the Secret or Certificate object stored in Azure KeyVault. You need to enable soft delete for Key Vault to use this feature. Required if data is not set. + // +kubebuilder:validation:Optional + KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` + + // The Name of the SSL certificate that is unique within this Application Gateway + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Password for the pfx file specified in data. Required if data is set. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` +} + +type SSLPolicyInitParameters struct { + + // A List of accepted cipher suites. Possible values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384. + CipherSuites []*string `json:"cipherSuites,omitempty" tf:"cipher_suites,omitempty"` + + // A list of SSL Protocols which should be disabled on this Application Gateway. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + DisabledProtocols []*string `json:"disabledProtocols,omitempty" tf:"disabled_protocols,omitempty"` + + // The minimal TLS version. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + MinProtocolVersion *string `json:"minProtocolVersion,omitempty" tf:"min_protocol_version,omitempty"` + + // The Name of the Policy e.g. AppGwSslPolicy20170401S. Required if policy_type is set to Predefined. Possible values can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. Not compatible with disabled_protocols. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // The Type of the Policy. Possible values are Predefined, Custom and CustomV2. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type SSLPolicyObservation struct { + + // A List of accepted cipher suites. Possible values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384. + CipherSuites []*string `json:"cipherSuites,omitempty" tf:"cipher_suites,omitempty"` + + // A list of SSL Protocols which should be disabled on this Application Gateway. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + DisabledProtocols []*string `json:"disabledProtocols,omitempty" tf:"disabled_protocols,omitempty"` + + // The minimal TLS version. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + MinProtocolVersion *string `json:"minProtocolVersion,omitempty" tf:"min_protocol_version,omitempty"` + + // The Name of the Policy e.g. AppGwSslPolicy20170401S. Required if policy_type is set to Predefined. Possible values can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. Not compatible with disabled_protocols. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // The Type of the Policy. Possible values are Predefined, Custom and CustomV2. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type SSLPolicyParameters struct { + + // A List of accepted cipher suites. Possible values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384. + // +kubebuilder:validation:Optional + CipherSuites []*string `json:"cipherSuites,omitempty" tf:"cipher_suites,omitempty"` + + // A list of SSL Protocols which should be disabled on this Application Gateway. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + // +kubebuilder:validation:Optional + DisabledProtocols []*string `json:"disabledProtocols,omitempty" tf:"disabled_protocols,omitempty"` + + // The minimal TLS version. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + // +kubebuilder:validation:Optional + MinProtocolVersion *string `json:"minProtocolVersion,omitempty" tf:"min_protocol_version,omitempty"` + + // The Name of the Policy e.g. AppGwSslPolicy20170401S. Required if policy_type is set to Predefined. Possible values can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. Not compatible with disabled_protocols. + // +kubebuilder:validation:Optional + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // The Type of the Policy. Possible values are Predefined, Custom and CustomV2. + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type SSLProfileInitParameters struct { + + // The name of the SSL Profile that is unique within this Application Gateway. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // a ssl_policy block as defined below. + SSLPolicy *SSLProfileSSLPolicyInitParameters `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // The name of the Trusted Client Certificate that will be used to authenticate requests from clients. + TrustedClientCertificateNames []*string `json:"trustedClientCertificateNames,omitempty" tf:"trusted_client_certificate_names,omitempty"` + + // Should client certificate issuer DN be verified? Defaults to false. + VerifyClientCertIssuerDn *bool `json:"verifyClientCertIssuerDn,omitempty" tf:"verify_client_cert_issuer_dn,omitempty"` + + // Specify the method to check client certificate revocation status. Possible value is OCSP. + VerifyClientCertificateRevocation *string `json:"verifyClientCertificateRevocation,omitempty" tf:"verify_client_certificate_revocation,omitempty"` +} + +type SSLProfileObservation struct { + + // The ID of the URL Path Map. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the SSL Profile that is unique within this Application Gateway. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // a ssl_policy block as defined below. + SSLPolicy *SSLProfileSSLPolicyObservation `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // The name of the Trusted Client Certificate that will be used to authenticate requests from clients. + TrustedClientCertificateNames []*string `json:"trustedClientCertificateNames,omitempty" tf:"trusted_client_certificate_names,omitempty"` + + // Should client certificate issuer DN be verified? Defaults to false. + VerifyClientCertIssuerDn *bool `json:"verifyClientCertIssuerDn,omitempty" tf:"verify_client_cert_issuer_dn,omitempty"` + + // Specify the method to check client certificate revocation status. Possible value is OCSP. + VerifyClientCertificateRevocation *string `json:"verifyClientCertificateRevocation,omitempty" tf:"verify_client_certificate_revocation,omitempty"` +} + +type SSLProfileParameters struct { + + // The name of the SSL Profile that is unique within this Application Gateway. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // a ssl_policy block as defined below. + // +kubebuilder:validation:Optional + SSLPolicy *SSLProfileSSLPolicyParameters `json:"sslPolicy,omitempty" tf:"ssl_policy,omitempty"` + + // The name of the Trusted Client Certificate that will be used to authenticate requests from clients. + // +kubebuilder:validation:Optional + TrustedClientCertificateNames []*string `json:"trustedClientCertificateNames,omitempty" tf:"trusted_client_certificate_names,omitempty"` + + // Should client certificate issuer DN be verified? Defaults to false. + // +kubebuilder:validation:Optional + VerifyClientCertIssuerDn *bool `json:"verifyClientCertIssuerDn,omitempty" tf:"verify_client_cert_issuer_dn,omitempty"` + + // Specify the method to check client certificate revocation status. Possible value is OCSP. + // +kubebuilder:validation:Optional + VerifyClientCertificateRevocation *string `json:"verifyClientCertificateRevocation,omitempty" tf:"verify_client_certificate_revocation,omitempty"` +} + +type SSLProfileSSLPolicyInitParameters struct { + + // A List of accepted cipher suites. Possible values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384. + CipherSuites []*string `json:"cipherSuites,omitempty" tf:"cipher_suites,omitempty"` + + // A list of SSL Protocols which should be disabled on this Application Gateway. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + DisabledProtocols []*string `json:"disabledProtocols,omitempty" tf:"disabled_protocols,omitempty"` + + // The minimal TLS version. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + MinProtocolVersion *string `json:"minProtocolVersion,omitempty" tf:"min_protocol_version,omitempty"` + + // The Name of the Policy e.g. AppGwSslPolicy20170401S. Required if policy_type is set to Predefined. Possible values can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. Not compatible with disabled_protocols. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // The Type of the Policy. Possible values are Predefined, Custom and CustomV2. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type SSLProfileSSLPolicyObservation struct { + + // A List of accepted cipher suites. Possible values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384. + CipherSuites []*string `json:"cipherSuites,omitempty" tf:"cipher_suites,omitempty"` + + // A list of SSL Protocols which should be disabled on this Application Gateway. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + DisabledProtocols []*string `json:"disabledProtocols,omitempty" tf:"disabled_protocols,omitempty"` + + // The minimal TLS version. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + MinProtocolVersion *string `json:"minProtocolVersion,omitempty" tf:"min_protocol_version,omitempty"` + + // The Name of the Policy e.g. AppGwSslPolicy20170401S. Required if policy_type is set to Predefined. Possible values can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. Not compatible with disabled_protocols. + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // The Type of the Policy. Possible values are Predefined, Custom and CustomV2. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type SSLProfileSSLPolicyParameters struct { + + // A List of accepted cipher suites. Possible values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384. + // +kubebuilder:validation:Optional + CipherSuites []*string `json:"cipherSuites,omitempty" tf:"cipher_suites,omitempty"` + + // A list of SSL Protocols which should be disabled on this Application Gateway. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + // +kubebuilder:validation:Optional + DisabledProtocols []*string `json:"disabledProtocols,omitempty" tf:"disabled_protocols,omitempty"` + + // The minimal TLS version. Possible values are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + // +kubebuilder:validation:Optional + MinProtocolVersion *string `json:"minProtocolVersion,omitempty" tf:"min_protocol_version,omitempty"` + + // The Name of the Policy e.g. AppGwSslPolicy20170401S. Required if policy_type is set to Predefined. Possible values can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. Not compatible with disabled_protocols. + // +kubebuilder:validation:Optional + PolicyName *string `json:"policyName,omitempty" tf:"policy_name,omitempty"` + + // The Type of the Policy. Possible values are Predefined, Custom and CustomV2. + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` +} + +type SkuInitParameters struct { + + // The Capacity of the SKU to use for this Application Gateway. When using a V1 SKU this value must be between 1 and 32, and 1 to 125 for a V2 SKU. This property is optional if autoscale_configuration is set. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The Name of the SKU to use for this Application Gateway. Possible values are Standard_Small, Standard_Medium, Standard_Large, Standard_v2, WAF_Medium, WAF_Large, and WAF_v2. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Tier of the SKU to use for this Application Gateway. Possible values are Standard, Standard_v2, WAF and WAF_v2. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SkuObservation struct { + + // The Capacity of the SKU to use for this Application Gateway. When using a V1 SKU this value must be between 1 and 32, and 1 to 125 for a V2 SKU. This property is optional if autoscale_configuration is set. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The Name of the SKU to use for this Application Gateway. Possible values are Standard_Small, Standard_Medium, Standard_Large, Standard_v2, WAF_Medium, WAF_Large, and WAF_v2. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Tier of the SKU to use for this Application Gateway. Possible values are Standard, Standard_v2, WAF and WAF_v2. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SkuParameters struct { + + // The Capacity of the SKU to use for this Application Gateway. When using a V1 SKU this value must be between 1 and 32, and 1 to 125 for a V2 SKU. This property is optional if autoscale_configuration is set. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The Name of the SKU to use for this Application Gateway. Possible values are Standard_Small, Standard_Medium, Standard_Large, Standard_v2, WAF_Medium, WAF_Large, and WAF_v2. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Tier of the SKU to use for this Application Gateway. Possible values are Standard, Standard_v2, WAF and WAF_v2. + // +kubebuilder:validation:Optional + Tier *string `json:"tier" tf:"tier,omitempty"` +} + +type TrustedClientCertificateInitParameters struct { + + // The name of the Trusted Client Certificate that is unique within this Application Gateway. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TrustedClientCertificateObservation struct { + + // The ID of the URL Path Map. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Trusted Client Certificate that is unique within this Application Gateway. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TrustedClientCertificateParameters struct { + + // The base-64 encoded certificate. + // +kubebuilder:validation:Required + DataSecretRef v1.SecretKeySelector `json:"dataSecretRef" tf:"-"` + + // The name of the Trusted Client Certificate that is unique within this Application Gateway. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type TrustedRootCertificateInitParameters struct { + + // The Secret ID of (base-64 encoded unencrypted pfx) Secret or Certificate object stored in Azure KeyVault. You need to enable soft delete for the Key Vault to use this feature. Required if data is not set. + KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` + + // The Name of the Trusted Root Certificate to use. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TrustedRootCertificateObservation struct { + + // The ID of the URL Path Map. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Secret ID of (base-64 encoded unencrypted pfx) Secret or Certificate object stored in Azure KeyVault. You need to enable soft delete for the Key Vault to use this feature. Required if data is not set. + KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` + + // The Name of the Trusted Root Certificate to use. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TrustedRootCertificateParameters struct { + + // The contents of the Trusted Root Certificate which should be used. Required if key_vault_secret_id is not set. + // +kubebuilder:validation:Optional + DataSecretRef *v1.SecretKeySelector `json:"dataSecretRef,omitempty" tf:"-"` + + // The Secret ID of (base-64 encoded unencrypted pfx) Secret or Certificate object stored in Azure KeyVault. You need to enable soft delete for the Key Vault to use this feature. Required if data is not set. + // +kubebuilder:validation:Optional + KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` + + // The Name of the Trusted Root Certificate to use. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type URLInitParameters struct { + + // The components used to rewrite the URL. Possible values are path_only and query_string_only to limit the rewrite to the URL Path or URL Query String only. + Components *string `json:"components,omitempty" tf:"components,omitempty"` + + // The URL path to rewrite. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The query string to rewrite. + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Whether the URL path map should be reevaluated after this rewrite has been applied. More info on rewrite configuration + Reroute *bool `json:"reroute,omitempty" tf:"reroute,omitempty"` +} + +type URLObservation struct { + + // The components used to rewrite the URL. Possible values are path_only and query_string_only to limit the rewrite to the URL Path or URL Query String only. + Components *string `json:"components,omitempty" tf:"components,omitempty"` + + // The URL path to rewrite. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The query string to rewrite. + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Whether the URL path map should be reevaluated after this rewrite has been applied. More info on rewrite configuration + Reroute *bool `json:"reroute,omitempty" tf:"reroute,omitempty"` +} + +type URLParameters struct { + + // The components used to rewrite the URL. Possible values are path_only and query_string_only to limit the rewrite to the URL Path or URL Query String only. + // +kubebuilder:validation:Optional + Components *string `json:"components,omitempty" tf:"components,omitempty"` + + // The URL path to rewrite. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The query string to rewrite. + // +kubebuilder:validation:Optional + QueryString *string `json:"queryString,omitempty" tf:"query_string,omitempty"` + + // Whether the URL path map should be reevaluated after this rewrite has been applied. More info on rewrite configuration + // +kubebuilder:validation:Optional + Reroute *bool `json:"reroute,omitempty" tf:"reroute,omitempty"` +} + +type URLPathMapInitParameters struct { + + // The Name of the Default Backend Address Pool which should be used for this URL Path Map. Cannot be set if default_redirect_configuration_name is set. + DefaultBackendAddressPoolName *string `json:"defaultBackendAddressPoolName,omitempty" tf:"default_backend_address_pool_name,omitempty"` + + // The Name of the Default Backend HTTP Settings Collection which should be used for this URL Path Map. Cannot be set if default_redirect_configuration_name is set. + DefaultBackendHTTPSettingsName *string `json:"defaultBackendHttpSettingsName,omitempty" tf:"default_backend_http_settings_name,omitempty"` + + // The Name of the Default Redirect Configuration which should be used for this URL Path Map. Cannot be set if either default_backend_address_pool_name or default_backend_http_settings_name is set. + DefaultRedirectConfigurationName *string `json:"defaultRedirectConfigurationName,omitempty" tf:"default_redirect_configuration_name,omitempty"` + + // The Name of the Default Rewrite Rule Set which should be used for this URL Path Map. Only valid for v2 SKUs. + DefaultRewriteRuleSetName *string `json:"defaultRewriteRuleSetName,omitempty" tf:"default_rewrite_rule_set_name,omitempty"` + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more path_rule blocks as defined above. + PathRule []PathRuleInitParameters `json:"pathRule,omitempty" tf:"path_rule,omitempty"` +} + +type URLPathMapObservation struct { + + // The ID of the Default Backend Address Pool. + DefaultBackendAddressPoolID *string `json:"defaultBackendAddressPoolId,omitempty" tf:"default_backend_address_pool_id,omitempty"` + + // The Name of the Default Backend Address Pool which should be used for this URL Path Map. Cannot be set if default_redirect_configuration_name is set. + DefaultBackendAddressPoolName *string `json:"defaultBackendAddressPoolName,omitempty" tf:"default_backend_address_pool_name,omitempty"` + + // The ID of the Default Backend HTTP Settings Collection. + DefaultBackendHTTPSettingsID *string `json:"defaultBackendHttpSettingsId,omitempty" tf:"default_backend_http_settings_id,omitempty"` + + // The Name of the Default Backend HTTP Settings Collection which should be used for this URL Path Map. Cannot be set if default_redirect_configuration_name is set. + DefaultBackendHTTPSettingsName *string `json:"defaultBackendHttpSettingsName,omitempty" tf:"default_backend_http_settings_name,omitempty"` + + // The ID of the Default Redirect Configuration. + DefaultRedirectConfigurationID *string `json:"defaultRedirectConfigurationId,omitempty" tf:"default_redirect_configuration_id,omitempty"` + + // The Name of the Default Redirect Configuration which should be used for this URL Path Map. Cannot be set if either default_backend_address_pool_name or default_backend_http_settings_name is set. + DefaultRedirectConfigurationName *string `json:"defaultRedirectConfigurationName,omitempty" tf:"default_redirect_configuration_name,omitempty"` + + // The ID of the Application Gateway. + DefaultRewriteRuleSetID *string `json:"defaultRewriteRuleSetId,omitempty" tf:"default_rewrite_rule_set_id,omitempty"` + + // The Name of the Default Rewrite Rule Set which should be used for this URL Path Map. Only valid for v2 SKUs. + DefaultRewriteRuleSetName *string `json:"defaultRewriteRuleSetName,omitempty" tf:"default_rewrite_rule_set_name,omitempty"` + + // The ID of the URL Path Map. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Name of the URL Path Map. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more path_rule blocks as defined above. + PathRule []PathRuleObservation `json:"pathRule,omitempty" tf:"path_rule,omitempty"` +} + +type URLPathMapParameters struct { + + // The Name of the Default Backend Address Pool which should be used for this URL Path Map. Cannot be set if default_redirect_configuration_name is set. + // +kubebuilder:validation:Optional + DefaultBackendAddressPoolName *string `json:"defaultBackendAddressPoolName,omitempty" tf:"default_backend_address_pool_name,omitempty"` + + // The Name of the Default Backend HTTP Settings Collection which should be used for this URL Path Map. Cannot be set if default_redirect_configuration_name is set. + // +kubebuilder:validation:Optional + DefaultBackendHTTPSettingsName *string `json:"defaultBackendHttpSettingsName,omitempty" tf:"default_backend_http_settings_name,omitempty"` + + // The Name of the Default Redirect Configuration which should be used for this URL Path Map. Cannot be set if either default_backend_address_pool_name or default_backend_http_settings_name is set. + // +kubebuilder:validation:Optional + DefaultRedirectConfigurationName *string `json:"defaultRedirectConfigurationName,omitempty" tf:"default_redirect_configuration_name,omitempty"` + + // The Name of the Default Rewrite Rule Set which should be used for this URL Path Map. Only valid for v2 SKUs. + // +kubebuilder:validation:Optional + DefaultRewriteRuleSetName *string `json:"defaultRewriteRuleSetName,omitempty" tf:"default_rewrite_rule_set_name,omitempty"` + + // The Name of the URL Path Map. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // One or more path_rule blocks as defined above. + // +kubebuilder:validation:Optional + PathRule []PathRuleParameters `json:"pathRule" tf:"path_rule,omitempty"` +} + +type WafConfigurationInitParameters struct { + + // One or more disabled_rule_group blocks as defined below. + DisabledRuleGroup []DisabledRuleGroupInitParameters `json:"disabledRuleGroup,omitempty" tf:"disabled_rule_group,omitempty"` + + // Is the Web Application Firewall enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // One or more exclusion blocks as defined below. + Exclusion []ExclusionInitParameters `json:"exclusion,omitempty" tf:"exclusion,omitempty"` + + // The File Upload Limit in MB. Accepted values are in the range 1MB to 750MB for the WAF_v2 SKU, and 1MB to 500MB for all other SKUs. Defaults to 100MB. + FileUploadLimitMb *float64 `json:"fileUploadLimitMb,omitempty" tf:"file_upload_limit_mb,omitempty"` + + // The Web Application Firewall Mode. Possible values are Detection and Prevention. + FirewallMode *string `json:"firewallMode,omitempty" tf:"firewall_mode,omitempty"` + + // The Maximum Request Body Size in KB. Accepted values are in the range 1KB to 128KB. Defaults to 128KB. + MaxRequestBodySizeKb *float64 `json:"maxRequestBodySizeKb,omitempty" tf:"max_request_body_size_kb,omitempty"` + + // Is Request Body Inspection enabled? Defaults to true. + RequestBodyCheck *bool `json:"requestBodyCheck,omitempty" tf:"request_body_check,omitempty"` + + // The Type of the Rule Set used for this Web Application Firewall. Possible values are OWASP, Microsoft_BotManagerRuleSet and Microsoft_DefaultRuleSet. Defaults to OWASP. + RuleSetType *string `json:"ruleSetType,omitempty" tf:"rule_set_type,omitempty"` + + // The Version of the Rule Set used for this Web Application Firewall. Possible values are 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + RuleSetVersion *string `json:"ruleSetVersion,omitempty" tf:"rule_set_version,omitempty"` +} + +type WafConfigurationObservation struct { + + // One or more disabled_rule_group blocks as defined below. + DisabledRuleGroup []DisabledRuleGroupObservation `json:"disabledRuleGroup,omitempty" tf:"disabled_rule_group,omitempty"` + + // Is the Web Application Firewall enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // One or more exclusion blocks as defined below. + Exclusion []ExclusionObservation `json:"exclusion,omitempty" tf:"exclusion,omitempty"` + + // The File Upload Limit in MB. Accepted values are in the range 1MB to 750MB for the WAF_v2 SKU, and 1MB to 500MB for all other SKUs. Defaults to 100MB. + FileUploadLimitMb *float64 `json:"fileUploadLimitMb,omitempty" tf:"file_upload_limit_mb,omitempty"` + + // The Web Application Firewall Mode. Possible values are Detection and Prevention. + FirewallMode *string `json:"firewallMode,omitempty" tf:"firewall_mode,omitempty"` + + // The Maximum Request Body Size in KB. Accepted values are in the range 1KB to 128KB. Defaults to 128KB. + MaxRequestBodySizeKb *float64 `json:"maxRequestBodySizeKb,omitempty" tf:"max_request_body_size_kb,omitempty"` + + // Is Request Body Inspection enabled? Defaults to true. + RequestBodyCheck *bool `json:"requestBodyCheck,omitempty" tf:"request_body_check,omitempty"` + + // The Type of the Rule Set used for this Web Application Firewall. Possible values are OWASP, Microsoft_BotManagerRuleSet and Microsoft_DefaultRuleSet. Defaults to OWASP. + RuleSetType *string `json:"ruleSetType,omitempty" tf:"rule_set_type,omitempty"` + + // The Version of the Rule Set used for this Web Application Firewall. Possible values are 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + RuleSetVersion *string `json:"ruleSetVersion,omitempty" tf:"rule_set_version,omitempty"` +} + +type WafConfigurationParameters struct { + + // One or more disabled_rule_group blocks as defined below. + // +kubebuilder:validation:Optional + DisabledRuleGroup []DisabledRuleGroupParameters `json:"disabledRuleGroup,omitempty" tf:"disabled_rule_group,omitempty"` + + // Is the Web Application Firewall enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // One or more exclusion blocks as defined below. + // +kubebuilder:validation:Optional + Exclusion []ExclusionParameters `json:"exclusion,omitempty" tf:"exclusion,omitempty"` + + // The File Upload Limit in MB. Accepted values are in the range 1MB to 750MB for the WAF_v2 SKU, and 1MB to 500MB for all other SKUs. Defaults to 100MB. + // +kubebuilder:validation:Optional + FileUploadLimitMb *float64 `json:"fileUploadLimitMb,omitempty" tf:"file_upload_limit_mb,omitempty"` + + // The Web Application Firewall Mode. Possible values are Detection and Prevention. + // +kubebuilder:validation:Optional + FirewallMode *string `json:"firewallMode" tf:"firewall_mode,omitempty"` + + // The Maximum Request Body Size in KB. Accepted values are in the range 1KB to 128KB. Defaults to 128KB. + // +kubebuilder:validation:Optional + MaxRequestBodySizeKb *float64 `json:"maxRequestBodySizeKb,omitempty" tf:"max_request_body_size_kb,omitempty"` + + // Is Request Body Inspection enabled? Defaults to true. + // +kubebuilder:validation:Optional + RequestBodyCheck *bool `json:"requestBodyCheck,omitempty" tf:"request_body_check,omitempty"` + + // The Type of the Rule Set used for this Web Application Firewall. Possible values are OWASP, Microsoft_BotManagerRuleSet and Microsoft_DefaultRuleSet. Defaults to OWASP. + // +kubebuilder:validation:Optional + RuleSetType *string `json:"ruleSetType,omitempty" tf:"rule_set_type,omitempty"` + + // The Version of the Rule Set used for this Web Application Firewall. Possible values are 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + // +kubebuilder:validation:Optional + RuleSetVersion *string `json:"ruleSetVersion" tf:"rule_set_version,omitempty"` +} + +// ApplicationGatewaySpec defines the desired state of ApplicationGateway +type ApplicationGatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ApplicationGatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ApplicationGatewayInitParameters `json:"initProvider,omitempty"` +} + +// ApplicationGatewayStatus defines the observed state of ApplicationGateway. +type ApplicationGatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ApplicationGatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ApplicationGateway is the Schema for the ApplicationGateways API. Manages an Application Gateway. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ApplicationGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backendAddressPool) || (has(self.initProvider) && has(self.initProvider.backendAddressPool))",message="spec.forProvider.backendAddressPool is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backendHttpSettings) || (has(self.initProvider) && has(self.initProvider.backendHttpSettings))",message="spec.forProvider.backendHttpSettings is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.frontendIpConfiguration) || (has(self.initProvider) && has(self.initProvider.frontendIpConfiguration))",message="spec.forProvider.frontendIpConfiguration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.frontendPort) || (has(self.initProvider) && has(self.initProvider.frontendPort))",message="spec.forProvider.frontendPort is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.gatewayIpConfiguration) || (has(self.initProvider) && has(self.initProvider.gatewayIpConfiguration))",message="spec.forProvider.gatewayIpConfiguration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.httpListener) || (has(self.initProvider) && has(self.initProvider.httpListener))",message="spec.forProvider.httpListener is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.requestRoutingRule) || (has(self.initProvider) && has(self.initProvider.requestRoutingRule))",message="spec.forProvider.requestRoutingRule is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec ApplicationGatewaySpec `json:"spec"` + Status ApplicationGatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ApplicationGatewayList contains a list of ApplicationGateways +type ApplicationGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ApplicationGateway `json:"items"` +} + +// Repository type metadata. +var ( + ApplicationGateway_Kind = "ApplicationGateway" + ApplicationGateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ApplicationGateway_Kind}.String() + ApplicationGateway_KindAPIVersion = ApplicationGateway_Kind + "." + CRDGroupVersion.String() + ApplicationGateway_GroupVersionKind = CRDGroupVersion.WithKind(ApplicationGateway_Kind) +) + +func init() { + SchemeBuilder.Register(&ApplicationGateway{}, &ApplicationGatewayList{}) +} diff --git a/apis/network/v1beta2/zz_connectionmonitor_terraformed.go b/apis/network/v1beta2/zz_connectionmonitor_terraformed.go new file mode 100755 index 000000000..61be0e789 --- /dev/null +++ b/apis/network/v1beta2/zz_connectionmonitor_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ConnectionMonitor +func (mg *ConnectionMonitor) GetTerraformResourceType() string { + return "azurerm_network_connection_monitor" +} + +// GetConnectionDetailsMapping for this ConnectionMonitor +func (tr *ConnectionMonitor) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ConnectionMonitor +func (tr *ConnectionMonitor) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ConnectionMonitor +func (tr *ConnectionMonitor) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ConnectionMonitor +func (tr *ConnectionMonitor) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ConnectionMonitor +func (tr *ConnectionMonitor) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ConnectionMonitor +func (tr *ConnectionMonitor) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ConnectionMonitor +func (tr *ConnectionMonitor) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ConnectionMonitor +func (tr *ConnectionMonitor) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ConnectionMonitor using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ConnectionMonitor) LateInitialize(attrs []byte) (bool, error) { + params := &ConnectionMonitorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ConnectionMonitor) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_connectionmonitor_types.go b/apis/network/v1beta2/zz_connectionmonitor_types.go new file mode 100755 index 000000000..9c27dd9bf --- /dev/null +++ b/apis/network/v1beta2/zz_connectionmonitor_types.go @@ -0,0 +1,681 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConnectionMonitorInitParameters struct { + + // A endpoint block as defined below. + Endpoint []EndpointInitParameters `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The Azure Region where the Network Connection Monitor should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The description of the Network Connection Monitor. + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // A list of IDs of the Log Analytics Workspace which will accept the output from the Network Connection Monitor. + // +listType=set + OutputWorkspaceResourceIds []*string `json:"outputWorkspaceResourceIds,omitempty" tf:"output_workspace_resource_ids,omitempty"` + + // A mapping of tags which should be assigned to the Network Connection Monitor. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A test_configuration block as defined below. + TestConfiguration []TestConfigurationInitParameters `json:"testConfiguration,omitempty" tf:"test_configuration,omitempty"` + + // A test_group block as defined below. + TestGroup []TestGroupInitParameters `json:"testGroup,omitempty" tf:"test_group,omitempty"` +} + +type ConnectionMonitorObservation struct { + + // A endpoint block as defined below. + Endpoint []EndpointObservation `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The ID of the Network Connection Monitor. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region where the Network Connection Monitor should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Network Watcher. Changing this forces a new resource to be created. + NetworkWatcherID *string `json:"networkWatcherId,omitempty" tf:"network_watcher_id,omitempty"` + + // The description of the Network Connection Monitor. + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // A list of IDs of the Log Analytics Workspace which will accept the output from the Network Connection Monitor. + // +listType=set + OutputWorkspaceResourceIds []*string `json:"outputWorkspaceResourceIds,omitempty" tf:"output_workspace_resource_ids,omitempty"` + + // A mapping of tags which should be assigned to the Network Connection Monitor. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A test_configuration block as defined below. + TestConfiguration []TestConfigurationObservation `json:"testConfiguration,omitempty" tf:"test_configuration,omitempty"` + + // A test_group block as defined below. + TestGroup []TestGroupObservation `json:"testGroup,omitempty" tf:"test_group,omitempty"` +} + +type ConnectionMonitorParameters struct { + + // A endpoint block as defined below. + // +kubebuilder:validation:Optional + Endpoint []EndpointParameters `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The Azure Region where the Network Connection Monitor should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Network Watcher. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Watcher + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + NetworkWatcherID *string `json:"networkWatcherId,omitempty" tf:"network_watcher_id,omitempty"` + + // Reference to a Watcher in network to populate networkWatcherId. + // +kubebuilder:validation:Optional + NetworkWatcherIDRef *v1.Reference `json:"networkWatcherIdRef,omitempty" tf:"-"` + + // Selector for a Watcher in network to populate networkWatcherId. + // +kubebuilder:validation:Optional + NetworkWatcherIDSelector *v1.Selector `json:"networkWatcherIdSelector,omitempty" tf:"-"` + + // The description of the Network Connection Monitor. + // +kubebuilder:validation:Optional + Notes *string `json:"notes,omitempty" tf:"notes,omitempty"` + + // A list of IDs of the Log Analytics Workspace which will accept the output from the Network Connection Monitor. + // +kubebuilder:validation:Optional + // +listType=set + OutputWorkspaceResourceIds []*string `json:"outputWorkspaceResourceIds,omitempty" tf:"output_workspace_resource_ids,omitempty"` + + // A mapping of tags which should be assigned to the Network Connection Monitor. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A test_configuration block as defined below. + // +kubebuilder:validation:Optional + TestConfiguration []TestConfigurationParameters `json:"testConfiguration,omitempty" tf:"test_configuration,omitempty"` + + // A test_group block as defined below. + // +kubebuilder:validation:Optional + TestGroup []TestGroupParameters `json:"testGroup,omitempty" tf:"test_group,omitempty"` +} + +type EndpointInitParameters struct { + + // The IP address or domain name of the Network Connection Monitor endpoint. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The test coverage for the Network Connection Monitor endpoint. Possible values are AboveAverage, Average, BelowAverage, Default, Full and Low. + CoverageLevel *string `json:"coverageLevel,omitempty" tf:"coverage_level,omitempty"` + + // A list of IPv4/IPv6 subnet masks or IPv4/IPv6 IP addresses to be excluded to the Network Connection Monitor endpoint. + // +listType=set + ExcludedIPAddresses []*string `json:"excludedIpAddresses,omitempty" tf:"excluded_ip_addresses,omitempty"` + + // A filter block as defined below. + Filter *FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // A list of IPv4/IPv6 subnet masks or IPv4/IPv6 IP addresses to be included to the Network Connection Monitor endpoint. + // +listType=set + IncludedIPAddresses []*string `json:"includedIpAddresses,omitempty" tf:"included_ip_addresses,omitempty"` + + // The name of the endpoint for the Network Connection Monitor . + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource ID which is used as the endpoint by the Network Connection Monitor. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // The endpoint type of the Network Connection Monitor. Possible values are AzureSubnet, AzureVM, AzureVNet, ExternalAddress, MMAWorkspaceMachine and MMAWorkspaceNetwork. + TargetResourceType *string `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type EndpointObservation struct { + + // The IP address or domain name of the Network Connection Monitor endpoint. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The test coverage for the Network Connection Monitor endpoint. Possible values are AboveAverage, Average, BelowAverage, Default, Full and Low. + CoverageLevel *string `json:"coverageLevel,omitempty" tf:"coverage_level,omitempty"` + + // A list of IPv4/IPv6 subnet masks or IPv4/IPv6 IP addresses to be excluded to the Network Connection Monitor endpoint. + // +listType=set + ExcludedIPAddresses []*string `json:"excludedIpAddresses,omitempty" tf:"excluded_ip_addresses,omitempty"` + + // A filter block as defined below. + Filter *FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // A list of IPv4/IPv6 subnet masks or IPv4/IPv6 IP addresses to be included to the Network Connection Monitor endpoint. + // +listType=set + IncludedIPAddresses []*string `json:"includedIpAddresses,omitempty" tf:"included_ip_addresses,omitempty"` + + // The name of the endpoint for the Network Connection Monitor . + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The resource ID which is used as the endpoint by the Network Connection Monitor. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // The endpoint type of the Network Connection Monitor. Possible values are AzureSubnet, AzureVM, AzureVNet, ExternalAddress, MMAWorkspaceMachine and MMAWorkspaceNetwork. + TargetResourceType *string `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type EndpointParameters struct { + + // The IP address or domain name of the Network Connection Monitor endpoint. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The test coverage for the Network Connection Monitor endpoint. Possible values are AboveAverage, Average, BelowAverage, Default, Full and Low. + // +kubebuilder:validation:Optional + CoverageLevel *string `json:"coverageLevel,omitempty" tf:"coverage_level,omitempty"` + + // A list of IPv4/IPv6 subnet masks or IPv4/IPv6 IP addresses to be excluded to the Network Connection Monitor endpoint. + // +kubebuilder:validation:Optional + // +listType=set + ExcludedIPAddresses []*string `json:"excludedIpAddresses,omitempty" tf:"excluded_ip_addresses,omitempty"` + + // A filter block as defined below. + // +kubebuilder:validation:Optional + Filter *FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // A list of IPv4/IPv6 subnet masks or IPv4/IPv6 IP addresses to be included to the Network Connection Monitor endpoint. + // +kubebuilder:validation:Optional + // +listType=set + IncludedIPAddresses []*string `json:"includedIpAddresses,omitempty" tf:"included_ip_addresses,omitempty"` + + // The name of the endpoint for the Network Connection Monitor . + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The resource ID which is used as the endpoint by the Network Connection Monitor. + // +kubebuilder:validation:Optional + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // The endpoint type of the Network Connection Monitor. Possible values are AzureSubnet, AzureVM, AzureVNet, ExternalAddress, MMAWorkspaceMachine and MMAWorkspaceNetwork. + // +kubebuilder:validation:Optional + TargetResourceType *string `json:"targetResourceType,omitempty" tf:"target_resource_type,omitempty"` +} + +type FilterInitParameters struct { + + // A item block as defined below. + Item []ItemInitParameters `json:"item,omitempty" tf:"item,omitempty"` + + // The type of items included in the filter. Possible values are AgentAddress. Defaults to AgentAddress. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FilterObservation struct { + + // A item block as defined below. + Item []ItemObservation `json:"item,omitempty" tf:"item,omitempty"` + + // The type of items included in the filter. Possible values are AgentAddress. Defaults to AgentAddress. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FilterParameters struct { + + // A item block as defined below. + // +kubebuilder:validation:Optional + Item []ItemParameters `json:"item,omitempty" tf:"item,omitempty"` + + // The type of items included in the filter. Possible values are AgentAddress. Defaults to AgentAddress. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type HTTPConfigurationInitParameters struct { + + // The HTTP method for the HTTP request. Possible values are Get and Post. Defaults to Get. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // The path component of the URI. It only accepts the absolute path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port for the TCP connection. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Should HTTPS be preferred over HTTP in cases where the choice is not explicit? Defaults to false. + PreferHTTPS *bool `json:"preferHttps,omitempty" tf:"prefer_https,omitempty"` + + // A request_header block as defined below. + RequestHeader []HTTPConfigurationRequestHeaderInitParameters `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // The HTTP status codes to consider successful. For instance, 2xx, 301-304 and 418. + // +listType=set + ValidStatusCodeRanges []*string `json:"validStatusCodeRanges,omitempty" tf:"valid_status_code_ranges,omitempty"` +} + +type HTTPConfigurationObservation struct { + + // The HTTP method for the HTTP request. Possible values are Get and Post. Defaults to Get. + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // The path component of the URI. It only accepts the absolute path. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port for the TCP connection. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Should HTTPS be preferred over HTTP in cases where the choice is not explicit? Defaults to false. + PreferHTTPS *bool `json:"preferHttps,omitempty" tf:"prefer_https,omitempty"` + + // A request_header block as defined below. + RequestHeader []HTTPConfigurationRequestHeaderObservation `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // The HTTP status codes to consider successful. For instance, 2xx, 301-304 and 418. + // +listType=set + ValidStatusCodeRanges []*string `json:"validStatusCodeRanges,omitempty" tf:"valid_status_code_ranges,omitempty"` +} + +type HTTPConfigurationParameters struct { + + // The HTTP method for the HTTP request. Possible values are Get and Post. Defaults to Get. + // +kubebuilder:validation:Optional + Method *string `json:"method,omitempty" tf:"method,omitempty"` + + // The path component of the URI. It only accepts the absolute path. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port for the TCP connection. + // +kubebuilder:validation:Optional + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Should HTTPS be preferred over HTTP in cases where the choice is not explicit? Defaults to false. + // +kubebuilder:validation:Optional + PreferHTTPS *bool `json:"preferHttps,omitempty" tf:"prefer_https,omitempty"` + + // A request_header block as defined below. + // +kubebuilder:validation:Optional + RequestHeader []HTTPConfigurationRequestHeaderParameters `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // The HTTP status codes to consider successful. For instance, 2xx, 301-304 and 418. + // +kubebuilder:validation:Optional + // +listType=set + ValidStatusCodeRanges []*string `json:"validStatusCodeRanges,omitempty" tf:"valid_status_code_ranges,omitempty"` +} + +type HTTPConfigurationRequestHeaderInitParameters struct { + + // The name of the test group for the Network Connection Monitor. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPConfigurationRequestHeaderObservation struct { + + // The name of the test group for the Network Connection Monitor. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of the HTTP header. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type HTTPConfigurationRequestHeaderParameters struct { + + // The name of the test group for the Network Connection Monitor. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of the HTTP header. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type IcmpConfigurationInitParameters struct { + + // Should path evaluation with trace route be enabled? Defaults to true. + TraceRouteEnabled *bool `json:"traceRouteEnabled,omitempty" tf:"trace_route_enabled,omitempty"` +} + +type IcmpConfigurationObservation struct { + + // Should path evaluation with trace route be enabled? Defaults to true. + TraceRouteEnabled *bool `json:"traceRouteEnabled,omitempty" tf:"trace_route_enabled,omitempty"` +} + +type IcmpConfigurationParameters struct { + + // Should path evaluation with trace route be enabled? Defaults to true. + // +kubebuilder:validation:Optional + TraceRouteEnabled *bool `json:"traceRouteEnabled,omitempty" tf:"trace_route_enabled,omitempty"` +} + +type ItemInitParameters struct { + + // The address of the filter item. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The type of items included in the filter. Possible values are AgentAddress. Defaults to AgentAddress. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ItemObservation struct { + + // The address of the filter item. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The type of items included in the filter. Possible values are AgentAddress. Defaults to AgentAddress. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ItemParameters struct { + + // The address of the filter item. + // +kubebuilder:validation:Optional + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The type of items included in the filter. Possible values are AgentAddress. Defaults to AgentAddress. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SuccessThresholdInitParameters struct { + + // The maximum percentage of failed checks permitted for a test to be successful. + ChecksFailedPercent *float64 `json:"checksFailedPercent,omitempty" tf:"checks_failed_percent,omitempty"` + + // The maximum round-trip time in milliseconds permitted for a test to be successful. + RoundTripTimeMS *float64 `json:"roundTripTimeMs,omitempty" tf:"round_trip_time_ms,omitempty"` +} + +type SuccessThresholdObservation struct { + + // The maximum percentage of failed checks permitted for a test to be successful. + ChecksFailedPercent *float64 `json:"checksFailedPercent,omitempty" tf:"checks_failed_percent,omitempty"` + + // The maximum round-trip time in milliseconds permitted for a test to be successful. + RoundTripTimeMS *float64 `json:"roundTripTimeMs,omitempty" tf:"round_trip_time_ms,omitempty"` +} + +type SuccessThresholdParameters struct { + + // The maximum percentage of failed checks permitted for a test to be successful. + // +kubebuilder:validation:Optional + ChecksFailedPercent *float64 `json:"checksFailedPercent,omitempty" tf:"checks_failed_percent,omitempty"` + + // The maximum round-trip time in milliseconds permitted for a test to be successful. + // +kubebuilder:validation:Optional + RoundTripTimeMS *float64 `json:"roundTripTimeMs,omitempty" tf:"round_trip_time_ms,omitempty"` +} + +type TCPConfigurationInitParameters struct { + + // The destination port behavior for the TCP connection. Possible values are None and ListenIfAvailable. + DestinationPortBehavior *string `json:"destinationPortBehavior,omitempty" tf:"destination_port_behavior,omitempty"` + + // The port for the TCP connection. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Should path evaluation with trace route be enabled? Defaults to true. + TraceRouteEnabled *bool `json:"traceRouteEnabled,omitempty" tf:"trace_route_enabled,omitempty"` +} + +type TCPConfigurationObservation struct { + + // The destination port behavior for the TCP connection. Possible values are None and ListenIfAvailable. + DestinationPortBehavior *string `json:"destinationPortBehavior,omitempty" tf:"destination_port_behavior,omitempty"` + + // The port for the TCP connection. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // Should path evaluation with trace route be enabled? Defaults to true. + TraceRouteEnabled *bool `json:"traceRouteEnabled,omitempty" tf:"trace_route_enabled,omitempty"` +} + +type TCPConfigurationParameters struct { + + // The destination port behavior for the TCP connection. Possible values are None and ListenIfAvailable. + // +kubebuilder:validation:Optional + DestinationPortBehavior *string `json:"destinationPortBehavior,omitempty" tf:"destination_port_behavior,omitempty"` + + // The port for the TCP connection. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // Should path evaluation with trace route be enabled? Defaults to true. + // +kubebuilder:validation:Optional + TraceRouteEnabled *bool `json:"traceRouteEnabled,omitempty" tf:"trace_route_enabled,omitempty"` +} + +type TestConfigurationInitParameters struct { + + // A http_configuration block as defined below. + HTTPConfiguration *HTTPConfigurationInitParameters `json:"httpConfiguration,omitempty" tf:"http_configuration,omitempty"` + + // A icmp_configuration block as defined below. + IcmpConfiguration *IcmpConfigurationInitParameters `json:"icmpConfiguration,omitempty" tf:"icmp_configuration,omitempty"` + + // The name of test configuration for the Network Connection Monitor. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The preferred IP version which is used in the test evaluation. Possible values are IPv4 and IPv6. + PreferredIPVersion *string `json:"preferredIpVersion,omitempty" tf:"preferred_ip_version,omitempty"` + + // The protocol used to evaluate tests. Possible values are Tcp, Http and Icmp. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // A success_threshold block as defined below. + SuccessThreshold *SuccessThresholdInitParameters `json:"successThreshold,omitempty" tf:"success_threshold,omitempty"` + + // A tcp_configuration block as defined below. + TCPConfiguration *TCPConfigurationInitParameters `json:"tcpConfiguration,omitempty" tf:"tcp_configuration,omitempty"` + + // The time interval in seconds at which the test evaluation will happen. Defaults to 60. + TestFrequencyInSeconds *float64 `json:"testFrequencyInSeconds,omitempty" tf:"test_frequency_in_seconds,omitempty"` +} + +type TestConfigurationObservation struct { + + // A http_configuration block as defined below. + HTTPConfiguration *HTTPConfigurationObservation `json:"httpConfiguration,omitempty" tf:"http_configuration,omitempty"` + + // A icmp_configuration block as defined below. + IcmpConfiguration *IcmpConfigurationObservation `json:"icmpConfiguration,omitempty" tf:"icmp_configuration,omitempty"` + + // The name of test configuration for the Network Connection Monitor. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The preferred IP version which is used in the test evaluation. Possible values are IPv4 and IPv6. + PreferredIPVersion *string `json:"preferredIpVersion,omitempty" tf:"preferred_ip_version,omitempty"` + + // The protocol used to evaluate tests. Possible values are Tcp, Http and Icmp. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // A success_threshold block as defined below. + SuccessThreshold *SuccessThresholdObservation `json:"successThreshold,omitempty" tf:"success_threshold,omitempty"` + + // A tcp_configuration block as defined below. + TCPConfiguration *TCPConfigurationObservation `json:"tcpConfiguration,omitempty" tf:"tcp_configuration,omitempty"` + + // The time interval in seconds at which the test evaluation will happen. Defaults to 60. + TestFrequencyInSeconds *float64 `json:"testFrequencyInSeconds,omitempty" tf:"test_frequency_in_seconds,omitempty"` +} + +type TestConfigurationParameters struct { + + // A http_configuration block as defined below. + // +kubebuilder:validation:Optional + HTTPConfiguration *HTTPConfigurationParameters `json:"httpConfiguration,omitempty" tf:"http_configuration,omitempty"` + + // A icmp_configuration block as defined below. + // +kubebuilder:validation:Optional + IcmpConfiguration *IcmpConfigurationParameters `json:"icmpConfiguration,omitempty" tf:"icmp_configuration,omitempty"` + + // The name of test configuration for the Network Connection Monitor. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The preferred IP version which is used in the test evaluation. Possible values are IPv4 and IPv6. + // +kubebuilder:validation:Optional + PreferredIPVersion *string `json:"preferredIpVersion,omitempty" tf:"preferred_ip_version,omitempty"` + + // The protocol used to evaluate tests. Possible values are Tcp, Http and Icmp. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // A success_threshold block as defined below. + // +kubebuilder:validation:Optional + SuccessThreshold *SuccessThresholdParameters `json:"successThreshold,omitempty" tf:"success_threshold,omitempty"` + + // A tcp_configuration block as defined below. + // +kubebuilder:validation:Optional + TCPConfiguration *TCPConfigurationParameters `json:"tcpConfiguration,omitempty" tf:"tcp_configuration,omitempty"` + + // The time interval in seconds at which the test evaluation will happen. Defaults to 60. + // +kubebuilder:validation:Optional + TestFrequencyInSeconds *float64 `json:"testFrequencyInSeconds,omitempty" tf:"test_frequency_in_seconds,omitempty"` +} + +type TestGroupInitParameters struct { + + // A list of destination endpoint names. + // +listType=set + DestinationEndpoints []*string `json:"destinationEndpoints,omitempty" tf:"destination_endpoints,omitempty"` + + // Should the test group be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the test group for the Network Connection Monitor. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of source endpoint names. + // +listType=set + SourceEndpoints []*string `json:"sourceEndpoints,omitempty" tf:"source_endpoints,omitempty"` + + // A list of test configuration names. + // +listType=set + TestConfigurationNames []*string `json:"testConfigurationNames,omitempty" tf:"test_configuration_names,omitempty"` +} + +type TestGroupObservation struct { + + // A list of destination endpoint names. + // +listType=set + DestinationEndpoints []*string `json:"destinationEndpoints,omitempty" tf:"destination_endpoints,omitempty"` + + // Should the test group be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the test group for the Network Connection Monitor. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of source endpoint names. + // +listType=set + SourceEndpoints []*string `json:"sourceEndpoints,omitempty" tf:"source_endpoints,omitempty"` + + // A list of test configuration names. + // +listType=set + TestConfigurationNames []*string `json:"testConfigurationNames,omitempty" tf:"test_configuration_names,omitempty"` +} + +type TestGroupParameters struct { + + // A list of destination endpoint names. + // +kubebuilder:validation:Optional + // +listType=set + DestinationEndpoints []*string `json:"destinationEndpoints" tf:"destination_endpoints,omitempty"` + + // Should the test group be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the test group for the Network Connection Monitor. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A list of source endpoint names. + // +kubebuilder:validation:Optional + // +listType=set + SourceEndpoints []*string `json:"sourceEndpoints" tf:"source_endpoints,omitempty"` + + // A list of test configuration names. + // +kubebuilder:validation:Optional + // +listType=set + TestConfigurationNames []*string `json:"testConfigurationNames" tf:"test_configuration_names,omitempty"` +} + +// ConnectionMonitorSpec defines the desired state of ConnectionMonitor +type ConnectionMonitorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ConnectionMonitorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ConnectionMonitorInitParameters `json:"initProvider,omitempty"` +} + +// ConnectionMonitorStatus defines the observed state of ConnectionMonitor. +type ConnectionMonitorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ConnectionMonitorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ConnectionMonitor is the Schema for the ConnectionMonitors API. Manages a Network Connection Monitor. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ConnectionMonitor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.endpoint) || (has(self.initProvider) && has(self.initProvider.endpoint))",message="spec.forProvider.endpoint is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.testConfiguration) || (has(self.initProvider) && has(self.initProvider.testConfiguration))",message="spec.forProvider.testConfiguration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.testGroup) || (has(self.initProvider) && has(self.initProvider.testGroup))",message="spec.forProvider.testGroup is a required parameter" + Spec ConnectionMonitorSpec `json:"spec"` + Status ConnectionMonitorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConnectionMonitorList contains a list of ConnectionMonitors +type ConnectionMonitorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ConnectionMonitor `json:"items"` +} + +// Repository type metadata. +var ( + ConnectionMonitor_Kind = "ConnectionMonitor" + ConnectionMonitor_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ConnectionMonitor_Kind}.String() + ConnectionMonitor_KindAPIVersion = ConnectionMonitor_Kind + "." + CRDGroupVersion.String() + ConnectionMonitor_GroupVersionKind = CRDGroupVersion.WithKind(ConnectionMonitor_Kind) +) + +func init() { + SchemeBuilder.Register(&ConnectionMonitor{}, &ConnectionMonitorList{}) +} diff --git a/apis/network/v1beta2/zz_dnszone_terraformed.go b/apis/network/v1beta2/zz_dnszone_terraformed.go new file mode 100755 index 000000000..81fee7194 --- /dev/null +++ b/apis/network/v1beta2/zz_dnszone_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this DNSZone +func (mg *DNSZone) GetTerraformResourceType() string { + return "azurerm_dns_zone" +} + +// GetConnectionDetailsMapping for this DNSZone +func (tr *DNSZone) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this DNSZone +func (tr *DNSZone) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this DNSZone +func (tr *DNSZone) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this DNSZone +func (tr *DNSZone) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this DNSZone +func (tr *DNSZone) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this DNSZone +func (tr *DNSZone) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this DNSZone +func (tr *DNSZone) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this DNSZone +func (tr *DNSZone) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this DNSZone using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *DNSZone) LateInitialize(attrs []byte) (bool, error) { + params := &DNSZoneParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *DNSZone) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/network/v1beta2/zz_dnszone_types.go b/apis/network/v1beta2/zz_dnszone_types.go new file mode 100755 index 000000000..47ce29afd --- /dev/null +++ b/apis/network/v1beta2/zz_dnszone_types.go @@ -0,0 +1,239 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DNSZoneInitParameters struct { + + // An soa_record block as defined below. + SoaRecord *SoaRecordInitParameters `json:"soaRecord,omitempty" tf:"soa_record,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DNSZoneObservation struct { + + // The DNS Zone ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Maximum number of Records in the zone. Defaults to 1000. + MaxNumberOfRecordSets *float64 `json:"maxNumberOfRecordSets,omitempty" tf:"max_number_of_record_sets,omitempty"` + + // A list of values that make up the NS record for the zone. + // +listType=set + NameServers []*string `json:"nameServers,omitempty" tf:"name_servers,omitempty"` + + // The number of records already in the zone. + NumberOfRecordSets *float64 `json:"numberOfRecordSets,omitempty" tf:"number_of_record_sets,omitempty"` + + // Specifies the resource group where the resource exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // An soa_record block as defined below. + SoaRecord *SoaRecordObservation `json:"soaRecord,omitempty" tf:"soa_record,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type DNSZoneParameters struct { + + // Specifies the resource group where the resource exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // An soa_record block as defined below. + // +kubebuilder:validation:Optional + SoaRecord *SoaRecordParameters `json:"soaRecord,omitempty" tf:"soa_record,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SoaRecordInitParameters struct { + + // The email contact for the SOA record. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The expire time for the SOA record. Defaults to 2419200. + ExpireTime *float64 `json:"expireTime,omitempty" tf:"expire_time,omitempty"` + + // The domain name of the authoritative name server for the SOA record. If not set, computed value from Azure will be used. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The minimum Time To Live for the SOA record. By convention, it is used to determine the negative caching duration. Defaults to 300. + MinimumTTL *float64 `json:"minimumTtl,omitempty" tf:"minimum_ttl,omitempty"` + + // The refresh time for the SOA record. Defaults to 3600. + RefreshTime *float64 `json:"refreshTime,omitempty" tf:"refresh_time,omitempty"` + + // The retry time for the SOA record. Defaults to 300. + RetryTime *float64 `json:"retryTime,omitempty" tf:"retry_time,omitempty"` + + // The serial number for the SOA record. Defaults to 1. + SerialNumber *float64 `json:"serialNumber,omitempty" tf:"serial_number,omitempty"` + + // The Time To Live of the SOA Record in seconds. Defaults to 3600. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // A mapping of tags to assign to the Record Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SoaRecordObservation struct { + + // The email contact for the SOA record. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The expire time for the SOA record. Defaults to 2419200. + ExpireTime *float64 `json:"expireTime,omitempty" tf:"expire_time,omitempty"` + + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The domain name of the authoritative name server for the SOA record. If not set, computed value from Azure will be used. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The minimum Time To Live for the SOA record. By convention, it is used to determine the negative caching duration. Defaults to 300. + MinimumTTL *float64 `json:"minimumTtl,omitempty" tf:"minimum_ttl,omitempty"` + + // The refresh time for the SOA record. Defaults to 3600. + RefreshTime *float64 `json:"refreshTime,omitempty" tf:"refresh_time,omitempty"` + + // The retry time for the SOA record. Defaults to 300. + RetryTime *float64 `json:"retryTime,omitempty" tf:"retry_time,omitempty"` + + // The serial number for the SOA record. Defaults to 1. + SerialNumber *float64 `json:"serialNumber,omitempty" tf:"serial_number,omitempty"` + + // The Time To Live of the SOA Record in seconds. Defaults to 3600. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // A mapping of tags to assign to the Record Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SoaRecordParameters struct { + + // The email contact for the SOA record. + // +kubebuilder:validation:Optional + Email *string `json:"email" tf:"email,omitempty"` + + // The expire time for the SOA record. Defaults to 2419200. + // +kubebuilder:validation:Optional + ExpireTime *float64 `json:"expireTime,omitempty" tf:"expire_time,omitempty"` + + // The domain name of the authoritative name server for the SOA record. If not set, computed value from Azure will be used. + // +kubebuilder:validation:Optional + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The minimum Time To Live for the SOA record. By convention, it is used to determine the negative caching duration. Defaults to 300. + // +kubebuilder:validation:Optional + MinimumTTL *float64 `json:"minimumTtl,omitempty" tf:"minimum_ttl,omitempty"` + + // The refresh time for the SOA record. Defaults to 3600. + // +kubebuilder:validation:Optional + RefreshTime *float64 `json:"refreshTime,omitempty" tf:"refresh_time,omitempty"` + + // The retry time for the SOA record. Defaults to 300. + // +kubebuilder:validation:Optional + RetryTime *float64 `json:"retryTime,omitempty" tf:"retry_time,omitempty"` + + // The serial number for the SOA record. Defaults to 1. + // +kubebuilder:validation:Optional + SerialNumber *float64 `json:"serialNumber,omitempty" tf:"serial_number,omitempty"` + + // The Time To Live of the SOA Record in seconds. Defaults to 3600. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // A mapping of tags to assign to the Record Set. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// DNSZoneSpec defines the desired state of DNSZone +type DNSZoneSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider DNSZoneParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider DNSZoneInitParameters `json:"initProvider,omitempty"` +} + +// DNSZoneStatus defines the observed state of DNSZone. +type DNSZoneStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider DNSZoneObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// DNSZone is the Schema for the DNSZones API. Manages a DNS Zone. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type DNSZone struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DNSZoneSpec `json:"spec"` + Status DNSZoneStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DNSZoneList contains a list of DNSZones +type DNSZoneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DNSZone `json:"items"` +} + +// Repository type metadata. +var ( + DNSZone_Kind = "DNSZone" + DNSZone_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: DNSZone_Kind}.String() + DNSZone_KindAPIVersion = DNSZone_Kind + "." + CRDGroupVersion.String() + DNSZone_GroupVersionKind = CRDGroupVersion.WithKind(DNSZone_Kind) +) + +func init() { + SchemeBuilder.Register(&DNSZone{}, &DNSZoneList{}) +} diff --git a/apis/network/v1beta2/zz_expressroutecircuit_terraformed.go b/apis/network/v1beta2/zz_expressroutecircuit_terraformed.go new file mode 100755 index 000000000..04456f13f --- /dev/null +++ b/apis/network/v1beta2/zz_expressroutecircuit_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ExpressRouteCircuit +func (mg *ExpressRouteCircuit) GetTerraformResourceType() string { + return "azurerm_express_route_circuit" +} + +// GetConnectionDetailsMapping for this ExpressRouteCircuit +func (tr *ExpressRouteCircuit) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"authorization_key": "spec.forProvider.authorizationKeySecretRef", "service_key": "status.atProvider.serviceKey"} +} + +// GetObservation of this ExpressRouteCircuit +func (tr *ExpressRouteCircuit) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ExpressRouteCircuit +func (tr *ExpressRouteCircuit) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ExpressRouteCircuit +func (tr *ExpressRouteCircuit) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ExpressRouteCircuit +func (tr *ExpressRouteCircuit) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ExpressRouteCircuit +func (tr *ExpressRouteCircuit) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ExpressRouteCircuit +func (tr *ExpressRouteCircuit) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ExpressRouteCircuit +func (tr *ExpressRouteCircuit) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ExpressRouteCircuit using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ExpressRouteCircuit) LateInitialize(attrs []byte) (bool, error) { + params := &ExpressRouteCircuitParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ExpressRouteCircuit) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_expressroutecircuit_types.go b/apis/network/v1beta2/zz_expressroutecircuit_types.go new file mode 100755 index 000000000..b736ac84b --- /dev/null +++ b/apis/network/v1beta2/zz_expressroutecircuit_types.go @@ -0,0 +1,233 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ExpressRouteCircuitInitParameters struct { + + // Allow the circuit to interact with classic (RDFE) resources. Defaults to false. + AllowClassicOperations *bool `json:"allowClassicOperations,omitempty" tf:"allow_classic_operations,omitempty"` + + // The bandwidth in Gbps of the circuit being created on the Express Route Port. + BandwidthInGbps *float64 `json:"bandwidthInGbps,omitempty" tf:"bandwidth_in_gbps,omitempty"` + + // The bandwidth in Mbps of the circuit being created on the Service Provider. + BandwidthInMbps *float64 `json:"bandwidthInMbps,omitempty" tf:"bandwidth_in_mbps,omitempty"` + + // The ID of the Express Route Port this Express Route Circuit is based on. Changing this forces a new resource to be created. + ExpressRoutePortID *string `json:"expressRoutePortId,omitempty" tf:"express_route_port_id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the peering location and not the Azure resource location. Changing this forces a new resource to be created. + PeeringLocation *string `json:"peeringLocation,omitempty" tf:"peering_location,omitempty"` + + // The name of the ExpressRoute Service Provider. Changing this forces a new resource to be created. + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` + + // A sku block for the ExpressRoute circuit as documented below. + Sku *ExpressRouteCircuitSkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ExpressRouteCircuitObservation struct { + + // Allow the circuit to interact with classic (RDFE) resources. Defaults to false. + AllowClassicOperations *bool `json:"allowClassicOperations,omitempty" tf:"allow_classic_operations,omitempty"` + + // The bandwidth in Gbps of the circuit being created on the Express Route Port. + BandwidthInGbps *float64 `json:"bandwidthInGbps,omitempty" tf:"bandwidth_in_gbps,omitempty"` + + // The bandwidth in Mbps of the circuit being created on the Service Provider. + BandwidthInMbps *float64 `json:"bandwidthInMbps,omitempty" tf:"bandwidth_in_mbps,omitempty"` + + // The ID of the Express Route Port this Express Route Circuit is based on. Changing this forces a new resource to be created. + ExpressRoutePortID *string `json:"expressRoutePortId,omitempty" tf:"express_route_port_id,omitempty"` + + // The ID of the ExpressRoute circuit. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the peering location and not the Azure resource location. Changing this forces a new resource to be created. + PeeringLocation *string `json:"peeringLocation,omitempty" tf:"peering_location,omitempty"` + + // The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The name of the ExpressRoute Service Provider. Changing this forces a new resource to be created. + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` + + // The ExpressRoute circuit provisioning state from your chosen service provider. Possible values are NotProvisioned, Provisioning, Provisioned, and Deprovisioning. + ServiceProviderProvisioningState *string `json:"serviceProviderProvisioningState,omitempty" tf:"service_provider_provisioning_state,omitempty"` + + // A sku block for the ExpressRoute circuit as documented below. + Sku *ExpressRouteCircuitSkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ExpressRouteCircuitParameters struct { + + // Allow the circuit to interact with classic (RDFE) resources. Defaults to false. + // +kubebuilder:validation:Optional + AllowClassicOperations *bool `json:"allowClassicOperations,omitempty" tf:"allow_classic_operations,omitempty"` + + // The authorization key. This can be used to set up an ExpressRoute Circuit with an ExpressRoute Port from another subscription. + // +kubebuilder:validation:Optional + AuthorizationKeySecretRef *v1.SecretKeySelector `json:"authorizationKeySecretRef,omitempty" tf:"-"` + + // The bandwidth in Gbps of the circuit being created on the Express Route Port. + // +kubebuilder:validation:Optional + BandwidthInGbps *float64 `json:"bandwidthInGbps,omitempty" tf:"bandwidth_in_gbps,omitempty"` + + // The bandwidth in Mbps of the circuit being created on the Service Provider. + // +kubebuilder:validation:Optional + BandwidthInMbps *float64 `json:"bandwidthInMbps,omitempty" tf:"bandwidth_in_mbps,omitempty"` + + // The ID of the Express Route Port this Express Route Circuit is based on. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ExpressRoutePortID *string `json:"expressRoutePortId,omitempty" tf:"express_route_port_id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the peering location and not the Azure resource location. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PeeringLocation *string `json:"peeringLocation,omitempty" tf:"peering_location,omitempty"` + + // The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The name of the ExpressRoute Service Provider. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ServiceProviderName *string `json:"serviceProviderName,omitempty" tf:"service_provider_name,omitempty"` + + // A sku block for the ExpressRoute circuit as documented below. + // +kubebuilder:validation:Optional + Sku *ExpressRouteCircuitSkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ExpressRouteCircuitSkuInitParameters struct { + + // The billing mode for bandwidth. Possible values are MeteredData or UnlimitedData. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The service tier. Possible values are Basic, Local, Standard or Premium. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type ExpressRouteCircuitSkuObservation struct { + + // The billing mode for bandwidth. Possible values are MeteredData or UnlimitedData. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // The service tier. Possible values are Basic, Local, Standard or Premium. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type ExpressRouteCircuitSkuParameters struct { + + // The billing mode for bandwidth. Possible values are MeteredData or UnlimitedData. + // +kubebuilder:validation:Optional + Family *string `json:"family" tf:"family,omitempty"` + + // The service tier. Possible values are Basic, Local, Standard or Premium. + // +kubebuilder:validation:Optional + Tier *string `json:"tier" tf:"tier,omitempty"` +} + +// ExpressRouteCircuitSpec defines the desired state of ExpressRouteCircuit +type ExpressRouteCircuitSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ExpressRouteCircuitParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ExpressRouteCircuitInitParameters `json:"initProvider,omitempty"` +} + +// ExpressRouteCircuitStatus defines the observed state of ExpressRouteCircuit. +type ExpressRouteCircuitStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ExpressRouteCircuitObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ExpressRouteCircuit is the Schema for the ExpressRouteCircuits API. Manages an ExpressRoute circuit. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ExpressRouteCircuit struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec ExpressRouteCircuitSpec `json:"spec"` + Status ExpressRouteCircuitStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExpressRouteCircuitList contains a list of ExpressRouteCircuits +type ExpressRouteCircuitList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExpressRouteCircuit `json:"items"` +} + +// Repository type metadata. +var ( + ExpressRouteCircuit_Kind = "ExpressRouteCircuit" + ExpressRouteCircuit_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ExpressRouteCircuit_Kind}.String() + ExpressRouteCircuit_KindAPIVersion = ExpressRouteCircuit_Kind + "." + CRDGroupVersion.String() + ExpressRouteCircuit_GroupVersionKind = CRDGroupVersion.WithKind(ExpressRouteCircuit_Kind) +) + +func init() { + SchemeBuilder.Register(&ExpressRouteCircuit{}, &ExpressRouteCircuitList{}) +} diff --git a/apis/network/v1beta2/zz_expressroutecircuitpeering_terraformed.go b/apis/network/v1beta2/zz_expressroutecircuitpeering_terraformed.go new file mode 100755 index 000000000..0723095fb --- /dev/null +++ b/apis/network/v1beta2/zz_expressroutecircuitpeering_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ExpressRouteCircuitPeering +func (mg *ExpressRouteCircuitPeering) GetTerraformResourceType() string { + return "azurerm_express_route_circuit_peering" +} + +// GetConnectionDetailsMapping for this ExpressRouteCircuitPeering +func (tr *ExpressRouteCircuitPeering) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"shared_key": "spec.forProvider.sharedKeySecretRef"} +} + +// GetObservation of this ExpressRouteCircuitPeering +func (tr *ExpressRouteCircuitPeering) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ExpressRouteCircuitPeering +func (tr *ExpressRouteCircuitPeering) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ExpressRouteCircuitPeering +func (tr *ExpressRouteCircuitPeering) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ExpressRouteCircuitPeering +func (tr *ExpressRouteCircuitPeering) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ExpressRouteCircuitPeering +func (tr *ExpressRouteCircuitPeering) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ExpressRouteCircuitPeering +func (tr *ExpressRouteCircuitPeering) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ExpressRouteCircuitPeering +func (tr *ExpressRouteCircuitPeering) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ExpressRouteCircuitPeering using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ExpressRouteCircuitPeering) LateInitialize(attrs []byte) (bool, error) { + params := &ExpressRouteCircuitPeeringParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ExpressRouteCircuitPeering) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_expressroutecircuitpeering_types.go b/apis/network/v1beta2/zz_expressroutecircuitpeering_types.go new file mode 100755 index 000000000..9781223c4 --- /dev/null +++ b/apis/network/v1beta2/zz_expressroutecircuitpeering_types.go @@ -0,0 +1,371 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ExpressRouteCircuitPeeringInitParameters struct { + + // A boolean value indicating whether the IPv4 peering is enabled. Defaults to true. + IPv4Enabled *bool `json:"ipv4Enabled,omitempty" tf:"ipv4_enabled,omitempty"` + + // A ipv6 block as defined below. + IPv6 *IPv6InitParameters `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // A microsoft_peering_config block as defined below. Required when peering_type is set to MicrosoftPeering and config for IPv4. + MicrosoftPeeringConfig *MicrosoftPeeringConfigInitParameters `json:"microsoftPeeringConfig,omitempty" tf:"microsoft_peering_config,omitempty"` + + // The Either a 16-bit or a 32-bit ASN. Can either be public or private. + PeerAsn *float64 `json:"peerAsn,omitempty" tf:"peer_asn,omitempty"` + + // A /30 subnet for the primary link. Required when config for IPv4. + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty" tf:"primary_peer_address_prefix,omitempty"` + + // The ID of the Route Filter. Only available when peering_type is set to MicrosoftPeering. + RouteFilterID *string `json:"routeFilterId,omitempty" tf:"route_filter_id,omitempty"` + + // A /30 subnet for the secondary link. Required when config for IPv4. + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty" tf:"secondary_peer_address_prefix,omitempty"` + + // A valid VLAN ID to establish this peering on. + VlanID *float64 `json:"vlanId,omitempty" tf:"vlan_id,omitempty"` +} + +type ExpressRouteCircuitPeeringObservation struct { + + // The ASN used by Azure. + AzureAsn *float64 `json:"azureAsn,omitempty" tf:"azure_asn,omitempty"` + + // The name of the ExpressRoute Circuit in which to create the Peering. Changing this forces a new resource to be created. + ExpressRouteCircuitName *string `json:"expressRouteCircuitName,omitempty" tf:"express_route_circuit_name,omitempty"` + + GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty" tf:"gateway_manager_etag,omitempty"` + + // The ID of the ExpressRoute Circuit Peering. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A boolean value indicating whether the IPv4 peering is enabled. Defaults to true. + IPv4Enabled *bool `json:"ipv4Enabled,omitempty" tf:"ipv4_enabled,omitempty"` + + // A ipv6 block as defined below. + IPv6 *IPv6Observation `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // A microsoft_peering_config block as defined below. Required when peering_type is set to MicrosoftPeering and config for IPv4. + MicrosoftPeeringConfig *MicrosoftPeeringConfigObservation `json:"microsoftPeeringConfig,omitempty" tf:"microsoft_peering_config,omitempty"` + + // The Either a 16-bit or a 32-bit ASN. Can either be public or private. + PeerAsn *float64 `json:"peerAsn,omitempty" tf:"peer_asn,omitempty"` + + // The Primary Port used by Azure for this Peering. + PrimaryAzurePort *string `json:"primaryAzurePort,omitempty" tf:"primary_azure_port,omitempty"` + + // A /30 subnet for the primary link. Required when config for IPv4. + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty" tf:"primary_peer_address_prefix,omitempty"` + + // The name of the resource group in which to create the Express Route Circuit Peering. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The ID of the Route Filter. Only available when peering_type is set to MicrosoftPeering. + RouteFilterID *string `json:"routeFilterId,omitempty" tf:"route_filter_id,omitempty"` + + // The Secondary Port used by Azure for this Peering. + SecondaryAzurePort *string `json:"secondaryAzurePort,omitempty" tf:"secondary_azure_port,omitempty"` + + // A /30 subnet for the secondary link. Required when config for IPv4. + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty" tf:"secondary_peer_address_prefix,omitempty"` + + // A valid VLAN ID to establish this peering on. + VlanID *float64 `json:"vlanId,omitempty" tf:"vlan_id,omitempty"` +} + +type ExpressRouteCircuitPeeringParameters struct { + + // The name of the ExpressRoute Circuit in which to create the Peering. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.ExpressRouteCircuit + // +kubebuilder:validation:Optional + ExpressRouteCircuitName *string `json:"expressRouteCircuitName,omitempty" tf:"express_route_circuit_name,omitempty"` + + // Reference to a ExpressRouteCircuit in network to populate expressRouteCircuitName. + // +kubebuilder:validation:Optional + ExpressRouteCircuitNameRef *v1.Reference `json:"expressRouteCircuitNameRef,omitempty" tf:"-"` + + // Selector for a ExpressRouteCircuit in network to populate expressRouteCircuitName. + // +kubebuilder:validation:Optional + ExpressRouteCircuitNameSelector *v1.Selector `json:"expressRouteCircuitNameSelector,omitempty" tf:"-"` + + // A boolean value indicating whether the IPv4 peering is enabled. Defaults to true. + // +kubebuilder:validation:Optional + IPv4Enabled *bool `json:"ipv4Enabled,omitempty" tf:"ipv4_enabled,omitempty"` + + // A ipv6 block as defined below. + // +kubebuilder:validation:Optional + IPv6 *IPv6Parameters `json:"ipv6,omitempty" tf:"ipv6,omitempty"` + + // A microsoft_peering_config block as defined below. Required when peering_type is set to MicrosoftPeering and config for IPv4. + // +kubebuilder:validation:Optional + MicrosoftPeeringConfig *MicrosoftPeeringConfigParameters `json:"microsoftPeeringConfig,omitempty" tf:"microsoft_peering_config,omitempty"` + + // The Either a 16-bit or a 32-bit ASN. Can either be public or private. + // +kubebuilder:validation:Optional + PeerAsn *float64 `json:"peerAsn,omitempty" tf:"peer_asn,omitempty"` + + // A /30 subnet for the primary link. Required when config for IPv4. + // +kubebuilder:validation:Optional + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty" tf:"primary_peer_address_prefix,omitempty"` + + // The name of the resource group in which to create the Express Route Circuit Peering. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the Route Filter. Only available when peering_type is set to MicrosoftPeering. + // +kubebuilder:validation:Optional + RouteFilterID *string `json:"routeFilterId,omitempty" tf:"route_filter_id,omitempty"` + + // A /30 subnet for the secondary link. Required when config for IPv4. + // +kubebuilder:validation:Optional + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty" tf:"secondary_peer_address_prefix,omitempty"` + + // The shared key. Can be a maximum of 25 characters. + // +kubebuilder:validation:Optional + SharedKeySecretRef *v1.SecretKeySelector `json:"sharedKeySecretRef,omitempty" tf:"-"` + + // A valid VLAN ID to establish this peering on. + // +kubebuilder:validation:Optional + VlanID *float64 `json:"vlanId,omitempty" tf:"vlan_id,omitempty"` +} + +type IPv6InitParameters struct { + + // A boolean value indicating whether the IPv6 peering is enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A microsoft_peering block as defined below. + MicrosoftPeering *MicrosoftPeeringInitParameters `json:"microsoftPeering,omitempty" tf:"microsoft_peering,omitempty"` + + // A subnet for the primary link. + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty" tf:"primary_peer_address_prefix,omitempty"` + + // The ID of the Route Filter. Only available when peering_type is set to MicrosoftPeering. + RouteFilterID *string `json:"routeFilterId,omitempty" tf:"route_filter_id,omitempty"` + + // A subnet for the secondary link. + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty" tf:"secondary_peer_address_prefix,omitempty"` +} + +type IPv6Observation struct { + + // A boolean value indicating whether the IPv6 peering is enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A microsoft_peering block as defined below. + MicrosoftPeering *MicrosoftPeeringObservation `json:"microsoftPeering,omitempty" tf:"microsoft_peering,omitempty"` + + // A subnet for the primary link. + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty" tf:"primary_peer_address_prefix,omitempty"` + + // The ID of the Route Filter. Only available when peering_type is set to MicrosoftPeering. + RouteFilterID *string `json:"routeFilterId,omitempty" tf:"route_filter_id,omitempty"` + + // A subnet for the secondary link. + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty" tf:"secondary_peer_address_prefix,omitempty"` +} + +type IPv6Parameters struct { + + // A boolean value indicating whether the IPv6 peering is enabled. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A microsoft_peering block as defined below. + // +kubebuilder:validation:Optional + MicrosoftPeering *MicrosoftPeeringParameters `json:"microsoftPeering,omitempty" tf:"microsoft_peering,omitempty"` + + // A subnet for the primary link. + // +kubebuilder:validation:Optional + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix" tf:"primary_peer_address_prefix,omitempty"` + + // The ID of the Route Filter. Only available when peering_type is set to MicrosoftPeering. + // +kubebuilder:validation:Optional + RouteFilterID *string `json:"routeFilterId,omitempty" tf:"route_filter_id,omitempty"` + + // A subnet for the secondary link. + // +kubebuilder:validation:Optional + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix" tf:"secondary_peer_address_prefix,omitempty"` +} + +type MicrosoftPeeringConfigInitParameters struct { + + // The communities of Bgp Peering specified for microsoft peering. + AdvertisedCommunities []*string `json:"advertisedCommunities,omitempty" tf:"advertised_communities,omitempty"` + + // A list of Advertised Public Prefixes. + AdvertisedPublicPrefixes []*string `json:"advertisedPublicPrefixes,omitempty" tf:"advertised_public_prefixes,omitempty"` + + // The CustomerASN of the peering. Defaults to 0. + CustomerAsn *float64 `json:"customerAsn,omitempty" tf:"customer_asn,omitempty"` + + // The Routing Registry against which the AS number and prefixes are registered. For example: ARIN, RIPE, AFRINIC etc. Defaults to NONE. + RoutingRegistryName *string `json:"routingRegistryName,omitempty" tf:"routing_registry_name,omitempty"` +} + +type MicrosoftPeeringConfigObservation struct { + + // The communities of Bgp Peering specified for microsoft peering. + AdvertisedCommunities []*string `json:"advertisedCommunities,omitempty" tf:"advertised_communities,omitempty"` + + // A list of Advertised Public Prefixes. + AdvertisedPublicPrefixes []*string `json:"advertisedPublicPrefixes,omitempty" tf:"advertised_public_prefixes,omitempty"` + + // The CustomerASN of the peering. Defaults to 0. + CustomerAsn *float64 `json:"customerAsn,omitempty" tf:"customer_asn,omitempty"` + + // The Routing Registry against which the AS number and prefixes are registered. For example: ARIN, RIPE, AFRINIC etc. Defaults to NONE. + RoutingRegistryName *string `json:"routingRegistryName,omitempty" tf:"routing_registry_name,omitempty"` +} + +type MicrosoftPeeringConfigParameters struct { + + // The communities of Bgp Peering specified for microsoft peering. + // +kubebuilder:validation:Optional + AdvertisedCommunities []*string `json:"advertisedCommunities,omitempty" tf:"advertised_communities,omitempty"` + + // A list of Advertised Public Prefixes. + // +kubebuilder:validation:Optional + AdvertisedPublicPrefixes []*string `json:"advertisedPublicPrefixes" tf:"advertised_public_prefixes,omitempty"` + + // The CustomerASN of the peering. Defaults to 0. + // +kubebuilder:validation:Optional + CustomerAsn *float64 `json:"customerAsn,omitempty" tf:"customer_asn,omitempty"` + + // The Routing Registry against which the AS number and prefixes are registered. For example: ARIN, RIPE, AFRINIC etc. Defaults to NONE. + // +kubebuilder:validation:Optional + RoutingRegistryName *string `json:"routingRegistryName,omitempty" tf:"routing_registry_name,omitempty"` +} + +type MicrosoftPeeringInitParameters struct { + + // The communities of Bgp Peering specified for microsoft peering. + AdvertisedCommunities []*string `json:"advertisedCommunities,omitempty" tf:"advertised_communities,omitempty"` + + // A list of Advertised Public Prefixes. + AdvertisedPublicPrefixes []*string `json:"advertisedPublicPrefixes,omitempty" tf:"advertised_public_prefixes,omitempty"` + + // The CustomerASN of the peering. Defaults to 0. + CustomerAsn *float64 `json:"customerAsn,omitempty" tf:"customer_asn,omitempty"` + + // The Routing Registry against which the AS number and prefixes are registered. For example: ARIN, RIPE, AFRINIC etc. Defaults to NONE. + RoutingRegistryName *string `json:"routingRegistryName,omitempty" tf:"routing_registry_name,omitempty"` +} + +type MicrosoftPeeringObservation struct { + + // The communities of Bgp Peering specified for microsoft peering. + AdvertisedCommunities []*string `json:"advertisedCommunities,omitempty" tf:"advertised_communities,omitempty"` + + // A list of Advertised Public Prefixes. + AdvertisedPublicPrefixes []*string `json:"advertisedPublicPrefixes,omitempty" tf:"advertised_public_prefixes,omitempty"` + + // The CustomerASN of the peering. Defaults to 0. + CustomerAsn *float64 `json:"customerAsn,omitempty" tf:"customer_asn,omitempty"` + + // The Routing Registry against which the AS number and prefixes are registered. For example: ARIN, RIPE, AFRINIC etc. Defaults to NONE. + RoutingRegistryName *string `json:"routingRegistryName,omitempty" tf:"routing_registry_name,omitempty"` +} + +type MicrosoftPeeringParameters struct { + + // The communities of Bgp Peering specified for microsoft peering. + // +kubebuilder:validation:Optional + AdvertisedCommunities []*string `json:"advertisedCommunities,omitempty" tf:"advertised_communities,omitempty"` + + // A list of Advertised Public Prefixes. + // +kubebuilder:validation:Optional + AdvertisedPublicPrefixes []*string `json:"advertisedPublicPrefixes,omitempty" tf:"advertised_public_prefixes,omitempty"` + + // The CustomerASN of the peering. Defaults to 0. + // +kubebuilder:validation:Optional + CustomerAsn *float64 `json:"customerAsn,omitempty" tf:"customer_asn,omitempty"` + + // The Routing Registry against which the AS number and prefixes are registered. For example: ARIN, RIPE, AFRINIC etc. Defaults to NONE. + // +kubebuilder:validation:Optional + RoutingRegistryName *string `json:"routingRegistryName,omitempty" tf:"routing_registry_name,omitempty"` +} + +// ExpressRouteCircuitPeeringSpec defines the desired state of ExpressRouteCircuitPeering +type ExpressRouteCircuitPeeringSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ExpressRouteCircuitPeeringParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ExpressRouteCircuitPeeringInitParameters `json:"initProvider,omitempty"` +} + +// ExpressRouteCircuitPeeringStatus defines the observed state of ExpressRouteCircuitPeering. +type ExpressRouteCircuitPeeringStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ExpressRouteCircuitPeeringObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ExpressRouteCircuitPeering is the Schema for the ExpressRouteCircuitPeerings API. Manages an ExpressRoute Circuit Peering. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ExpressRouteCircuitPeering struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vlanId) || (has(self.initProvider) && has(self.initProvider.vlanId))",message="spec.forProvider.vlanId is a required parameter" + Spec ExpressRouteCircuitPeeringSpec `json:"spec"` + Status ExpressRouteCircuitPeeringStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExpressRouteCircuitPeeringList contains a list of ExpressRouteCircuitPeerings +type ExpressRouteCircuitPeeringList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExpressRouteCircuitPeering `json:"items"` +} + +// Repository type metadata. +var ( + ExpressRouteCircuitPeering_Kind = "ExpressRouteCircuitPeering" + ExpressRouteCircuitPeering_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ExpressRouteCircuitPeering_Kind}.String() + ExpressRouteCircuitPeering_KindAPIVersion = ExpressRouteCircuitPeering_Kind + "." + CRDGroupVersion.String() + ExpressRouteCircuitPeering_GroupVersionKind = CRDGroupVersion.WithKind(ExpressRouteCircuitPeering_Kind) +) + +func init() { + SchemeBuilder.Register(&ExpressRouteCircuitPeering{}, &ExpressRouteCircuitPeeringList{}) +} diff --git a/apis/network/v1beta2/zz_expressrouteconnection_terraformed.go b/apis/network/v1beta2/zz_expressrouteconnection_terraformed.go new file mode 100755 index 000000000..cf5d26e6f --- /dev/null +++ b/apis/network/v1beta2/zz_expressrouteconnection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ExpressRouteConnection +func (mg *ExpressRouteConnection) GetTerraformResourceType() string { + return "azurerm_express_route_connection" +} + +// GetConnectionDetailsMapping for this ExpressRouteConnection +func (tr *ExpressRouteConnection) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ExpressRouteConnection +func (tr *ExpressRouteConnection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ExpressRouteConnection +func (tr *ExpressRouteConnection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ExpressRouteConnection +func (tr *ExpressRouteConnection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ExpressRouteConnection +func (tr *ExpressRouteConnection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ExpressRouteConnection +func (tr *ExpressRouteConnection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ExpressRouteConnection +func (tr *ExpressRouteConnection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ExpressRouteConnection +func (tr *ExpressRouteConnection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ExpressRouteConnection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ExpressRouteConnection) LateInitialize(attrs []byte) (bool, error) { + params := &ExpressRouteConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ExpressRouteConnection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_expressrouteconnection_types.go b/apis/network/v1beta2/zz_expressrouteconnection_types.go new file mode 100755 index 000000000..0d869cafa --- /dev/null +++ b/apis/network/v1beta2/zz_expressrouteconnection_types.go @@ -0,0 +1,264 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ExpressRouteConnectionInitParameters struct { + + // The authorization key to establish the Express Route Connection. + AuthorizationKey *string `json:"authorizationKey,omitempty" tf:"authorization_key,omitempty"` + + // Is Internet security enabled for this Express Route Connection? + EnableInternetSecurity *bool `json:"enableInternetSecurity,omitempty" tf:"enable_internet_security,omitempty"` + + // The ID of the Express Route Circuit Peering that this Express Route Connection connects with. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.ExpressRouteCircuitPeering + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + ExpressRouteCircuitPeeringID *string `json:"expressRouteCircuitPeeringId,omitempty" tf:"express_route_circuit_peering_id,omitempty"` + + // Reference to a ExpressRouteCircuitPeering in network to populate expressRouteCircuitPeeringId. + // +kubebuilder:validation:Optional + ExpressRouteCircuitPeeringIDRef *v1.Reference `json:"expressRouteCircuitPeeringIdRef,omitempty" tf:"-"` + + // Selector for a ExpressRouteCircuitPeering in network to populate expressRouteCircuitPeeringId. + // +kubebuilder:validation:Optional + ExpressRouteCircuitPeeringIDSelector *v1.Selector `json:"expressRouteCircuitPeeringIdSelector,omitempty" tf:"-"` + + // Specified whether Fast Path is enabled for Virtual Wan Firewall Hub. Defaults to false. + ExpressRouteGatewayBypassEnabled *bool `json:"expressRouteGatewayBypassEnabled,omitempty" tf:"express_route_gateway_bypass_enabled,omitempty"` + + // A routing block as defined below. + Routing *RoutingInitParameters `json:"routing,omitempty" tf:"routing,omitempty"` + + // The routing weight associated to the Express Route Connection. Possible value is between 0 and 32000. Defaults to 0. + RoutingWeight *float64 `json:"routingWeight,omitempty" tf:"routing_weight,omitempty"` +} + +type ExpressRouteConnectionObservation struct { + + // The authorization key to establish the Express Route Connection. + AuthorizationKey *string `json:"authorizationKey,omitempty" tf:"authorization_key,omitempty"` + + // Is Internet security enabled for this Express Route Connection? + EnableInternetSecurity *bool `json:"enableInternetSecurity,omitempty" tf:"enable_internet_security,omitempty"` + + // The ID of the Express Route Circuit Peering that this Express Route Connection connects with. Changing this forces a new resource to be created. + ExpressRouteCircuitPeeringID *string `json:"expressRouteCircuitPeeringId,omitempty" tf:"express_route_circuit_peering_id,omitempty"` + + // Specified whether Fast Path is enabled for Virtual Wan Firewall Hub. Defaults to false. + ExpressRouteGatewayBypassEnabled *bool `json:"expressRouteGatewayBypassEnabled,omitempty" tf:"express_route_gateway_bypass_enabled,omitempty"` + + // The ID of the Express Route Gateway that this Express Route Connection connects with. Changing this forces a new resource to be created. + ExpressRouteGatewayID *string `json:"expressRouteGatewayId,omitempty" tf:"express_route_gateway_id,omitempty"` + + // The ID of the Express Route Connection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A routing block as defined below. + Routing *RoutingObservation `json:"routing,omitempty" tf:"routing,omitempty"` + + // The routing weight associated to the Express Route Connection. Possible value is between 0 and 32000. Defaults to 0. + RoutingWeight *float64 `json:"routingWeight,omitempty" tf:"routing_weight,omitempty"` +} + +type ExpressRouteConnectionParameters struct { + + // The authorization key to establish the Express Route Connection. + // +kubebuilder:validation:Optional + AuthorizationKey *string `json:"authorizationKey,omitempty" tf:"authorization_key,omitempty"` + + // Is Internet security enabled for this Express Route Connection? + // +kubebuilder:validation:Optional + EnableInternetSecurity *bool `json:"enableInternetSecurity,omitempty" tf:"enable_internet_security,omitempty"` + + // The ID of the Express Route Circuit Peering that this Express Route Connection connects with. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.ExpressRouteCircuitPeering + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ExpressRouteCircuitPeeringID *string `json:"expressRouteCircuitPeeringId,omitempty" tf:"express_route_circuit_peering_id,omitempty"` + + // Reference to a ExpressRouteCircuitPeering in network to populate expressRouteCircuitPeeringId. + // +kubebuilder:validation:Optional + ExpressRouteCircuitPeeringIDRef *v1.Reference `json:"expressRouteCircuitPeeringIdRef,omitempty" tf:"-"` + + // Selector for a ExpressRouteCircuitPeering in network to populate expressRouteCircuitPeeringId. + // +kubebuilder:validation:Optional + ExpressRouteCircuitPeeringIDSelector *v1.Selector `json:"expressRouteCircuitPeeringIdSelector,omitempty" tf:"-"` + + // Specified whether Fast Path is enabled for Virtual Wan Firewall Hub. Defaults to false. + // +kubebuilder:validation:Optional + ExpressRouteGatewayBypassEnabled *bool `json:"expressRouteGatewayBypassEnabled,omitempty" tf:"express_route_gateway_bypass_enabled,omitempty"` + + // The ID of the Express Route Gateway that this Express Route Connection connects with. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.ExpressRouteGateway + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ExpressRouteGatewayID *string `json:"expressRouteGatewayId,omitempty" tf:"express_route_gateway_id,omitempty"` + + // Reference to a ExpressRouteGateway in network to populate expressRouteGatewayId. + // +kubebuilder:validation:Optional + ExpressRouteGatewayIDRef *v1.Reference `json:"expressRouteGatewayIdRef,omitempty" tf:"-"` + + // Selector for a ExpressRouteGateway in network to populate expressRouteGatewayId. + // +kubebuilder:validation:Optional + ExpressRouteGatewayIDSelector *v1.Selector `json:"expressRouteGatewayIdSelector,omitempty" tf:"-"` + + // A routing block as defined below. + // +kubebuilder:validation:Optional + Routing *RoutingParameters `json:"routing,omitempty" tf:"routing,omitempty"` + + // The routing weight associated to the Express Route Connection. Possible value is between 0 and 32000. Defaults to 0. + // +kubebuilder:validation:Optional + RoutingWeight *float64 `json:"routingWeight,omitempty" tf:"routing_weight,omitempty"` +} + +type PropagatedRouteTableInitParameters struct { + + // The list of labels to logically group route tables. + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of IDs of the Virtual Hub Route Table to propagate routes from Express Route Connection to the route table. + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` +} + +type PropagatedRouteTableObservation struct { + + // The list of labels to logically group route tables. + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of IDs of the Virtual Hub Route Table to propagate routes from Express Route Connection to the route table. + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` +} + +type PropagatedRouteTableParameters struct { + + // The list of labels to logically group route tables. + // +kubebuilder:validation:Optional + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of IDs of the Virtual Hub Route Table to propagate routes from Express Route Connection to the route table. + // +kubebuilder:validation:Optional + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` +} + +type RoutingInitParameters struct { + + // The ID of the Virtual Hub Route Table associated with this Express Route Connection. + AssociatedRouteTableID *string `json:"associatedRouteTableId,omitempty" tf:"associated_route_table_id,omitempty"` + + // The ID of the Route Map associated with this Express Route Connection for inbound routes. + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The ID of the Route Map associated with this Express Route Connection for outbound routes. + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + PropagatedRouteTable *PropagatedRouteTableInitParameters `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +type RoutingObservation struct { + + // The ID of the Virtual Hub Route Table associated with this Express Route Connection. + AssociatedRouteTableID *string `json:"associatedRouteTableId,omitempty" tf:"associated_route_table_id,omitempty"` + + // The ID of the Route Map associated with this Express Route Connection for inbound routes. + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The ID of the Route Map associated with this Express Route Connection for outbound routes. + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + PropagatedRouteTable *PropagatedRouteTableObservation `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +type RoutingParameters struct { + + // The ID of the Virtual Hub Route Table associated with this Express Route Connection. + // +kubebuilder:validation:Optional + AssociatedRouteTableID *string `json:"associatedRouteTableId,omitempty" tf:"associated_route_table_id,omitempty"` + + // The ID of the Route Map associated with this Express Route Connection for inbound routes. + // +kubebuilder:validation:Optional + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The ID of the Route Map associated with this Express Route Connection for outbound routes. + // +kubebuilder:validation:Optional + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + // +kubebuilder:validation:Optional + PropagatedRouteTable *PropagatedRouteTableParameters `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +// ExpressRouteConnectionSpec defines the desired state of ExpressRouteConnection +type ExpressRouteConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ExpressRouteConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ExpressRouteConnectionInitParameters `json:"initProvider,omitempty"` +} + +// ExpressRouteConnectionStatus defines the observed state of ExpressRouteConnection. +type ExpressRouteConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ExpressRouteConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ExpressRouteConnection is the Schema for the ExpressRouteConnections API. Manages an Express Route Connection. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ExpressRouteConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ExpressRouteConnectionSpec `json:"spec"` + Status ExpressRouteConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExpressRouteConnectionList contains a list of ExpressRouteConnections +type ExpressRouteConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExpressRouteConnection `json:"items"` +} + +// Repository type metadata. +var ( + ExpressRouteConnection_Kind = "ExpressRouteConnection" + ExpressRouteConnection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ExpressRouteConnection_Kind}.String() + ExpressRouteConnection_KindAPIVersion = ExpressRouteConnection_Kind + "." + CRDGroupVersion.String() + ExpressRouteConnection_GroupVersionKind = CRDGroupVersion.WithKind(ExpressRouteConnection_Kind) +) + +func init() { + SchemeBuilder.Register(&ExpressRouteConnection{}, &ExpressRouteConnectionList{}) +} diff --git a/apis/network/v1beta2/zz_expressrouteport_terraformed.go b/apis/network/v1beta2/zz_expressrouteport_terraformed.go new file mode 100755 index 000000000..4af6793ab --- /dev/null +++ b/apis/network/v1beta2/zz_expressrouteport_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ExpressRoutePort +func (mg *ExpressRoutePort) GetTerraformResourceType() string { + return "azurerm_express_route_port" +} + +// GetConnectionDetailsMapping for this ExpressRoutePort +func (tr *ExpressRoutePort) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ExpressRoutePort +func (tr *ExpressRoutePort) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ExpressRoutePort +func (tr *ExpressRoutePort) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ExpressRoutePort +func (tr *ExpressRoutePort) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ExpressRoutePort +func (tr *ExpressRoutePort) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ExpressRoutePort +func (tr *ExpressRoutePort) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ExpressRoutePort +func (tr *ExpressRoutePort) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ExpressRoutePort +func (tr *ExpressRoutePort) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ExpressRoutePort using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ExpressRoutePort) LateInitialize(attrs []byte) (bool, error) { + params := &ExpressRoutePortParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ExpressRoutePort) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_expressrouteport_types.go b/apis/network/v1beta2/zz_expressrouteport_types.go new file mode 100755 index 000000000..925348f71 --- /dev/null +++ b/apis/network/v1beta2/zz_expressrouteport_types.go @@ -0,0 +1,394 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ExpressRoutePortIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Express Route Port. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Express Route Port. Only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ExpressRoutePortIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Express Route Port. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Express Route Port. Only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ExpressRoutePortIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Express Route Port. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Express Route Port. Only possible value is UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ExpressRoutePortInitParameters struct { + + // Bandwidth of the Express Route Port in Gbps. Changing this forces a new Express Route Port to be created. + BandwidthInGbps *float64 `json:"bandwidthInGbps,omitempty" tf:"bandwidth_in_gbps,omitempty"` + + // The billing type of the Express Route Port. Possible values are MeteredData and UnlimitedData. + BillingType *string `json:"billingType,omitempty" tf:"billing_type,omitempty"` + + // The encapsulation method used for the Express Route Port. Changing this forces a new Express Route Port to be created. Possible values are: Dot1Q, QinQ. + Encapsulation *string `json:"encapsulation,omitempty" tf:"encapsulation,omitempty"` + + // An identity block as defined below. + Identity *ExpressRoutePortIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A list of link blocks as defined below. + Link1 *Link1InitParameters `json:"link1,omitempty" tf:"link1,omitempty"` + + // A list of link blocks as defined below. + Link2 *Link2InitParameters `json:"link2,omitempty" tf:"link2,omitempty"` + + // The Azure Region where the Express Route Port should exist. Changing this forces a new Express Route Port to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the peering location that this Express Route Port is physically mapped to. Changing this forces a new Express Route Port to be created. + PeeringLocation *string `json:"peeringLocation,omitempty" tf:"peering_location,omitempty"` + + // A mapping of tags which should be assigned to the Express Route Port. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ExpressRoutePortObservation struct { + + // Bandwidth of the Express Route Port in Gbps. Changing this forces a new Express Route Port to be created. + BandwidthInGbps *float64 `json:"bandwidthInGbps,omitempty" tf:"bandwidth_in_gbps,omitempty"` + + // The billing type of the Express Route Port. Possible values are MeteredData and UnlimitedData. + BillingType *string `json:"billingType,omitempty" tf:"billing_type,omitempty"` + + // The encapsulation method used for the Express Route Port. Changing this forces a new Express Route Port to be created. Possible values are: Dot1Q, QinQ. + Encapsulation *string `json:"encapsulation,omitempty" tf:"encapsulation,omitempty"` + + // The EtherType of the Express Route Port. + Ethertype *string `json:"ethertype,omitempty" tf:"ethertype,omitempty"` + + // The resource GUID of the Express Route Port. + GUID *string `json:"guid,omitempty" tf:"guid,omitempty"` + + // The ID of the Express Route Port. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *ExpressRoutePortIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // A list of link blocks as defined below. + Link1 *Link1Observation `json:"link1,omitempty" tf:"link1,omitempty"` + + // A list of link blocks as defined below. + Link2 *Link2Observation `json:"link2,omitempty" tf:"link2,omitempty"` + + // The Azure Region where the Express Route Port should exist. Changing this forces a new Express Route Port to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum transmission unit of the Express Route Port. + Mtu *string `json:"mtu,omitempty" tf:"mtu,omitempty"` + + // The name of the peering location that this Express Route Port is physically mapped to. Changing this forces a new Express Route Port to be created. + PeeringLocation *string `json:"peeringLocation,omitempty" tf:"peering_location,omitempty"` + + // The name of the Resource Group where the Express Route Port should exist. Changing this forces a new Express Route Port to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags which should be assigned to the Express Route Port. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ExpressRoutePortParameters struct { + + // Bandwidth of the Express Route Port in Gbps. Changing this forces a new Express Route Port to be created. + // +kubebuilder:validation:Optional + BandwidthInGbps *float64 `json:"bandwidthInGbps,omitempty" tf:"bandwidth_in_gbps,omitempty"` + + // The billing type of the Express Route Port. Possible values are MeteredData and UnlimitedData. + // +kubebuilder:validation:Optional + BillingType *string `json:"billingType,omitempty" tf:"billing_type,omitempty"` + + // The encapsulation method used for the Express Route Port. Changing this forces a new Express Route Port to be created. Possible values are: Dot1Q, QinQ. + // +kubebuilder:validation:Optional + Encapsulation *string `json:"encapsulation,omitempty" tf:"encapsulation,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *ExpressRoutePortIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A list of link blocks as defined below. + // +kubebuilder:validation:Optional + Link1 *Link1Parameters `json:"link1,omitempty" tf:"link1,omitempty"` + + // A list of link blocks as defined below. + // +kubebuilder:validation:Optional + Link2 *Link2Parameters `json:"link2,omitempty" tf:"link2,omitempty"` + + // The Azure Region where the Express Route Port should exist. Changing this forces a new Express Route Port to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the peering location that this Express Route Port is physically mapped to. Changing this forces a new Express Route Port to be created. + // +kubebuilder:validation:Optional + PeeringLocation *string `json:"peeringLocation,omitempty" tf:"peering_location,omitempty"` + + // The name of the Resource Group where the Express Route Port should exist. Changing this forces a new Express Route Port to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Express Route Port. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type Link1InitParameters struct { + + // Whether enable administration state on the Express Route Port Link? Defaults to false. + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // The ID of the Key Vault Secret that contains the Mac security CAK key for this Express Route Port Link. + MacsecCakKeyvaultSecretID *string `json:"macsecCakKeyvaultSecretId,omitempty" tf:"macsec_cak_keyvault_secret_id,omitempty"` + + // The MACSec cipher used for this Express Route Port Link. Possible values are GcmAes128 and GcmAes256. Defaults to GcmAes128. + MacsecCipher *string `json:"macsecCipher,omitempty" tf:"macsec_cipher,omitempty"` + + // The ID of the Key Vault Secret that contains the MACSec CKN key for this Express Route Port Link. + MacsecCknKeyvaultSecretID *string `json:"macsecCknKeyvaultSecretId,omitempty" tf:"macsec_ckn_keyvault_secret_id,omitempty"` + + // Should Secure Channel Identifier on the Express Route Port Link be enabled? Defaults to false. + MacsecSciEnabled *bool `json:"macsecSciEnabled,omitempty" tf:"macsec_sci_enabled,omitempty"` +} + +type Link1Observation struct { + + // Whether enable administration state on the Express Route Port Link? Defaults to false. + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // The connector type of the Express Route Port Link. + ConnectorType *string `json:"connectorType,omitempty" tf:"connector_type,omitempty"` + + // The ID of this Express Route Port Link. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The interface name of the Azure router associated with the Express Route Port Link. + InterfaceName *string `json:"interfaceName,omitempty" tf:"interface_name,omitempty"` + + // The ID of the Key Vault Secret that contains the Mac security CAK key for this Express Route Port Link. + MacsecCakKeyvaultSecretID *string `json:"macsecCakKeyvaultSecretId,omitempty" tf:"macsec_cak_keyvault_secret_id,omitempty"` + + // The MACSec cipher used for this Express Route Port Link. Possible values are GcmAes128 and GcmAes256. Defaults to GcmAes128. + MacsecCipher *string `json:"macsecCipher,omitempty" tf:"macsec_cipher,omitempty"` + + // The ID of the Key Vault Secret that contains the MACSec CKN key for this Express Route Port Link. + MacsecCknKeyvaultSecretID *string `json:"macsecCknKeyvaultSecretId,omitempty" tf:"macsec_ckn_keyvault_secret_id,omitempty"` + + // Should Secure Channel Identifier on the Express Route Port Link be enabled? Defaults to false. + MacsecSciEnabled *bool `json:"macsecSciEnabled,omitempty" tf:"macsec_sci_enabled,omitempty"` + + // The ID that maps from the Express Route Port Link to the patch panel port. + PatchPanelID *string `json:"patchPanelId,omitempty" tf:"patch_panel_id,omitempty"` + + // The ID that maps from the patch panel port to the rack. + RackID *string `json:"rackId,omitempty" tf:"rack_id,omitempty"` + + // The name of the Azure router associated with the Express Route Port Link. + RouterName *string `json:"routerName,omitempty" tf:"router_name,omitempty"` +} + +type Link1Parameters struct { + + // Whether enable administration state on the Express Route Port Link? Defaults to false. + // +kubebuilder:validation:Optional + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // The ID of the Key Vault Secret that contains the Mac security CAK key for this Express Route Port Link. + // +kubebuilder:validation:Optional + MacsecCakKeyvaultSecretID *string `json:"macsecCakKeyvaultSecretId,omitempty" tf:"macsec_cak_keyvault_secret_id,omitempty"` + + // The MACSec cipher used for this Express Route Port Link. Possible values are GcmAes128 and GcmAes256. Defaults to GcmAes128. + // +kubebuilder:validation:Optional + MacsecCipher *string `json:"macsecCipher,omitempty" tf:"macsec_cipher,omitempty"` + + // The ID of the Key Vault Secret that contains the MACSec CKN key for this Express Route Port Link. + // +kubebuilder:validation:Optional + MacsecCknKeyvaultSecretID *string `json:"macsecCknKeyvaultSecretId,omitempty" tf:"macsec_ckn_keyvault_secret_id,omitempty"` + + // Should Secure Channel Identifier on the Express Route Port Link be enabled? Defaults to false. + // +kubebuilder:validation:Optional + MacsecSciEnabled *bool `json:"macsecSciEnabled,omitempty" tf:"macsec_sci_enabled,omitempty"` +} + +type Link2InitParameters struct { + + // Whether enable administration state on the Express Route Port Link? Defaults to false. + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // The ID of the Key Vault Secret that contains the Mac security CAK key for this Express Route Port Link. + MacsecCakKeyvaultSecretID *string `json:"macsecCakKeyvaultSecretId,omitempty" tf:"macsec_cak_keyvault_secret_id,omitempty"` + + // The MACSec cipher used for this Express Route Port Link. Possible values are GcmAes128 and GcmAes256. Defaults to GcmAes128. + MacsecCipher *string `json:"macsecCipher,omitempty" tf:"macsec_cipher,omitempty"` + + // The ID of the Key Vault Secret that contains the MACSec CKN key for this Express Route Port Link. + MacsecCknKeyvaultSecretID *string `json:"macsecCknKeyvaultSecretId,omitempty" tf:"macsec_ckn_keyvault_secret_id,omitempty"` + + // Should Secure Channel Identifier on the Express Route Port Link be enabled? Defaults to false. + MacsecSciEnabled *bool `json:"macsecSciEnabled,omitempty" tf:"macsec_sci_enabled,omitempty"` +} + +type Link2Observation struct { + + // Whether enable administration state on the Express Route Port Link? Defaults to false. + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // The connector type of the Express Route Port Link. + ConnectorType *string `json:"connectorType,omitempty" tf:"connector_type,omitempty"` + + // The ID of this Express Route Port Link. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The interface name of the Azure router associated with the Express Route Port Link. + InterfaceName *string `json:"interfaceName,omitempty" tf:"interface_name,omitempty"` + + // The ID of the Key Vault Secret that contains the Mac security CAK key for this Express Route Port Link. + MacsecCakKeyvaultSecretID *string `json:"macsecCakKeyvaultSecretId,omitempty" tf:"macsec_cak_keyvault_secret_id,omitempty"` + + // The MACSec cipher used for this Express Route Port Link. Possible values are GcmAes128 and GcmAes256. Defaults to GcmAes128. + MacsecCipher *string `json:"macsecCipher,omitempty" tf:"macsec_cipher,omitempty"` + + // The ID of the Key Vault Secret that contains the MACSec CKN key for this Express Route Port Link. + MacsecCknKeyvaultSecretID *string `json:"macsecCknKeyvaultSecretId,omitempty" tf:"macsec_ckn_keyvault_secret_id,omitempty"` + + // Should Secure Channel Identifier on the Express Route Port Link be enabled? Defaults to false. + MacsecSciEnabled *bool `json:"macsecSciEnabled,omitempty" tf:"macsec_sci_enabled,omitempty"` + + // The ID that maps from the Express Route Port Link to the patch panel port. + PatchPanelID *string `json:"patchPanelId,omitempty" tf:"patch_panel_id,omitempty"` + + // The ID that maps from the patch panel port to the rack. + RackID *string `json:"rackId,omitempty" tf:"rack_id,omitempty"` + + // The name of the Azure router associated with the Express Route Port Link. + RouterName *string `json:"routerName,omitempty" tf:"router_name,omitempty"` +} + +type Link2Parameters struct { + + // Whether enable administration state on the Express Route Port Link? Defaults to false. + // +kubebuilder:validation:Optional + AdminEnabled *bool `json:"adminEnabled,omitempty" tf:"admin_enabled,omitempty"` + + // The ID of the Key Vault Secret that contains the Mac security CAK key for this Express Route Port Link. + // +kubebuilder:validation:Optional + MacsecCakKeyvaultSecretID *string `json:"macsecCakKeyvaultSecretId,omitempty" tf:"macsec_cak_keyvault_secret_id,omitempty"` + + // The MACSec cipher used for this Express Route Port Link. Possible values are GcmAes128 and GcmAes256. Defaults to GcmAes128. + // +kubebuilder:validation:Optional + MacsecCipher *string `json:"macsecCipher,omitempty" tf:"macsec_cipher,omitempty"` + + // The ID of the Key Vault Secret that contains the MACSec CKN key for this Express Route Port Link. + // +kubebuilder:validation:Optional + MacsecCknKeyvaultSecretID *string `json:"macsecCknKeyvaultSecretId,omitempty" tf:"macsec_ckn_keyvault_secret_id,omitempty"` + + // Should Secure Channel Identifier on the Express Route Port Link be enabled? Defaults to false. + // +kubebuilder:validation:Optional + MacsecSciEnabled *bool `json:"macsecSciEnabled,omitempty" tf:"macsec_sci_enabled,omitempty"` +} + +// ExpressRoutePortSpec defines the desired state of ExpressRoutePort +type ExpressRoutePortSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ExpressRoutePortParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ExpressRoutePortInitParameters `json:"initProvider,omitempty"` +} + +// ExpressRoutePortStatus defines the observed state of ExpressRoutePort. +type ExpressRoutePortStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ExpressRoutePortObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ExpressRoutePort is the Schema for the ExpressRoutePorts API. Manages a Express Route Port. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ExpressRoutePort struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.bandwidthInGbps) || (has(self.initProvider) && has(self.initProvider.bandwidthInGbps))",message="spec.forProvider.bandwidthInGbps is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.encapsulation) || (has(self.initProvider) && has(self.initProvider.encapsulation))",message="spec.forProvider.encapsulation is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.peeringLocation) || (has(self.initProvider) && has(self.initProvider.peeringLocation))",message="spec.forProvider.peeringLocation is a required parameter" + Spec ExpressRoutePortSpec `json:"spec"` + Status ExpressRoutePortStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExpressRoutePortList contains a list of ExpressRoutePorts +type ExpressRoutePortList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExpressRoutePort `json:"items"` +} + +// Repository type metadata. +var ( + ExpressRoutePort_Kind = "ExpressRoutePort" + ExpressRoutePort_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ExpressRoutePort_Kind}.String() + ExpressRoutePort_KindAPIVersion = ExpressRoutePort_Kind + "." + CRDGroupVersion.String() + ExpressRoutePort_GroupVersionKind = CRDGroupVersion.WithKind(ExpressRoutePort_Kind) +) + +func init() { + SchemeBuilder.Register(&ExpressRoutePort{}, &ExpressRoutePortList{}) +} diff --git a/apis/network/v1beta2/zz_firewall_terraformed.go b/apis/network/v1beta2/zz_firewall_terraformed.go new file mode 100755 index 000000000..d326bdaec --- /dev/null +++ b/apis/network/v1beta2/zz_firewall_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Firewall +func (mg *Firewall) GetTerraformResourceType() string { + return "azurerm_firewall" +} + +// GetConnectionDetailsMapping for this Firewall +func (tr *Firewall) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Firewall +func (tr *Firewall) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Firewall +func (tr *Firewall) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Firewall +func (tr *Firewall) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Firewall +func (tr *Firewall) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Firewall +func (tr *Firewall) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Firewall +func (tr *Firewall) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Firewall +func (tr *Firewall) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Firewall using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Firewall) LateInitialize(attrs []byte) (bool, error) { + params := &FirewallParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Firewall) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_firewall_types.go b/apis/network/v1beta2/zz_firewall_types.go new file mode 100755 index 000000000..807b73266 --- /dev/null +++ b/apis/network/v1beta2/zz_firewall_types.go @@ -0,0 +1,423 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FirewallIPConfigurationInitParameters struct { + + // Specifies the name of the IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Public IP Address associated with the firewall. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PublicIP + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDRef *v1.Reference `json:"publicIpAddressIdRef,omitempty" tf:"-"` + + // Selector for a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` + + // Reference to the subnet associated with the IP Configuration. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type FirewallIPConfigurationObservation struct { + + // Specifies the name of the IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Private IP address of the Azure Firewall. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The ID of the Public IP Address associated with the firewall. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to the subnet associated with the IP Configuration. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type FirewallIPConfigurationParameters struct { + + // Specifies the name of the IP Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the Public IP Address associated with the firewall. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PublicIP + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDRef *v1.Reference `json:"publicIpAddressIdRef,omitempty" tf:"-"` + + // Selector for a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` + + // Reference to the subnet associated with the IP Configuration. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type FirewallInitParameters struct { + + // Whether DNS proxy is enabled. It will forward DNS requests to the DNS servers when set to true. It will be set to true if dns_servers provided with a not empty list. + DNSProxyEnabled *bool `json:"dnsProxyEnabled,omitempty" tf:"dns_proxy_enabled,omitempty"` + + // A list of DNS servers that the Azure Firewall will direct DNS traffic to the for name resolution. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // The ID of the Firewall Policy applied to this Firewall. + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // An ip_configuration block as documented below. + IPConfiguration []FirewallIPConfigurationInitParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A management_ip_configuration block as documented below, which allows force-tunnelling of traffic to be performed by the firewall. Adding or removing this block or changing the subnet_id in an existing block forces a new resource to be created. Changing this forces a new resource to be created. + ManagementIPConfiguration *ManagementIPConfigurationInitParameters `json:"managementIpConfiguration,omitempty" tf:"management_ip_configuration,omitempty"` + + // A list of SNAT private CIDR IP ranges, or the special string IANAPrivateRanges, which indicates Azure Firewall does not SNAT when the destination IP address is a private range per IANA RFC 1918. + // +listType=set + PrivateIPRanges []*string `json:"privateIpRanges,omitempty" tf:"private_ip_ranges,omitempty"` + + // SKU name of the Firewall. Possible values are AZFW_Hub and AZFW_VNet. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // SKU tier of the Firewall. Possible values are Premium, Standard and Basic. + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The operation mode for threat intelligence-based filtering. Possible values are: Off, Alert and Deny. Defaults to Alert. + ThreatIntelMode *string `json:"threatIntelMode,omitempty" tf:"threat_intel_mode,omitempty"` + + // A virtual_hub block as documented below. + VirtualHub *VirtualHubInitParameters `json:"virtualHub,omitempty" tf:"virtual_hub,omitempty"` + + // Specifies a list of Availability Zones in which this Azure Firewall should be located. Changing this forces a new Azure Firewall to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type FirewallObservation struct { + + // Whether DNS proxy is enabled. It will forward DNS requests to the DNS servers when set to true. It will be set to true if dns_servers provided with a not empty list. + DNSProxyEnabled *bool `json:"dnsProxyEnabled,omitempty" tf:"dns_proxy_enabled,omitempty"` + + // A list of DNS servers that the Azure Firewall will direct DNS traffic to the for name resolution. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // The ID of the Firewall Policy applied to this Firewall. + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // The ID of the Azure Firewall. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An ip_configuration block as documented below. + IPConfiguration []FirewallIPConfigurationObservation `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A management_ip_configuration block as documented below, which allows force-tunnelling of traffic to be performed by the firewall. Adding or removing this block or changing the subnet_id in an existing block forces a new resource to be created. Changing this forces a new resource to be created. + ManagementIPConfiguration *ManagementIPConfigurationObservation `json:"managementIpConfiguration,omitempty" tf:"management_ip_configuration,omitempty"` + + // A list of SNAT private CIDR IP ranges, or the special string IANAPrivateRanges, which indicates Azure Firewall does not SNAT when the destination IP address is a private range per IANA RFC 1918. + // +listType=set + PrivateIPRanges []*string `json:"privateIpRanges,omitempty" tf:"private_ip_ranges,omitempty"` + + // The name of the resource group in which to create the resource. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // SKU name of the Firewall. Possible values are AZFW_Hub and AZFW_VNet. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // SKU tier of the Firewall. Possible values are Premium, Standard and Basic. + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The operation mode for threat intelligence-based filtering. Possible values are: Off, Alert and Deny. Defaults to Alert. + ThreatIntelMode *string `json:"threatIntelMode,omitempty" tf:"threat_intel_mode,omitempty"` + + // A virtual_hub block as documented below. + VirtualHub *VirtualHubObservation `json:"virtualHub,omitempty" tf:"virtual_hub,omitempty"` + + // Specifies a list of Availability Zones in which this Azure Firewall should be located. Changing this forces a new Azure Firewall to be created. + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type FirewallParameters struct { + + // Whether DNS proxy is enabled. It will forward DNS requests to the DNS servers when set to true. It will be set to true if dns_servers provided with a not empty list. + // +kubebuilder:validation:Optional + DNSProxyEnabled *bool `json:"dnsProxyEnabled,omitempty" tf:"dns_proxy_enabled,omitempty"` + + // A list of DNS servers that the Azure Firewall will direct DNS traffic to the for name resolution. + // +kubebuilder:validation:Optional + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // The ID of the Firewall Policy applied to this Firewall. + // +kubebuilder:validation:Optional + FirewallPolicyID *string `json:"firewallPolicyId,omitempty" tf:"firewall_policy_id,omitempty"` + + // An ip_configuration block as documented below. + // +kubebuilder:validation:Optional + IPConfiguration []FirewallIPConfigurationParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A management_ip_configuration block as documented below, which allows force-tunnelling of traffic to be performed by the firewall. Adding or removing this block or changing the subnet_id in an existing block forces a new resource to be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagementIPConfiguration *ManagementIPConfigurationParameters `json:"managementIpConfiguration,omitempty" tf:"management_ip_configuration,omitempty"` + + // A list of SNAT private CIDR IP ranges, or the special string IANAPrivateRanges, which indicates Azure Firewall does not SNAT when the destination IP address is a private range per IANA RFC 1918. + // +kubebuilder:validation:Optional + // +listType=set + PrivateIPRanges []*string `json:"privateIpRanges,omitempty" tf:"private_ip_ranges,omitempty"` + + // The name of the resource group in which to create the resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // SKU name of the Firewall. Possible values are AZFW_Hub and AZFW_VNet. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // SKU tier of the Firewall. Possible values are Premium, Standard and Basic. + // +kubebuilder:validation:Optional + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The operation mode for threat intelligence-based filtering. Possible values are: Off, Alert and Deny. Defaults to Alert. + // +kubebuilder:validation:Optional + ThreatIntelMode *string `json:"threatIntelMode,omitempty" tf:"threat_intel_mode,omitempty"` + + // A virtual_hub block as documented below. + // +kubebuilder:validation:Optional + VirtualHub *VirtualHubParameters `json:"virtualHub,omitempty" tf:"virtual_hub,omitempty"` + + // Specifies a list of Availability Zones in which this Azure Firewall should be located. Changing this forces a new Azure Firewall to be created. + // +kubebuilder:validation:Optional + // +listType=set + Zones []*string `json:"zones,omitempty" tf:"zones,omitempty"` +} + +type ManagementIPConfigurationInitParameters struct { + + // Specifies the name of the IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The ID of the Public IP Address associated with the firewall. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to the subnet associated with the IP Configuration. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type ManagementIPConfigurationObservation struct { + + // Specifies the name of the IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The private IP address associated with the Firewall. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The ID of the Public IP Address associated with the firewall. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to the subnet associated with the IP Configuration. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type ManagementIPConfigurationParameters struct { + + // Specifies the name of the IP Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the Public IP Address associated with the firewall. + // +kubebuilder:validation:Optional + PublicIPAddressID *string `json:"publicIpAddressId" tf:"public_ip_address_id,omitempty"` + + // Reference to the subnet associated with the IP Configuration. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type VirtualHubInitParameters struct { + + // Specifies the number of public IPs to assign to the Firewall. Defaults to 1. + PublicIPCount *float64 `json:"publicIpCount,omitempty" tf:"public_ip_count,omitempty"` + + // Specifies the ID of the Virtual Hub where the Firewall resides in. + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` +} + +type VirtualHubObservation struct { + + // The private IP address associated with the Firewall. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // The list of public IP addresses associated with the Firewall. + PublicIPAddresses []*string `json:"publicIpAddresses,omitempty" tf:"public_ip_addresses,omitempty"` + + // Specifies the number of public IPs to assign to the Firewall. Defaults to 1. + PublicIPCount *float64 `json:"publicIpCount,omitempty" tf:"public_ip_count,omitempty"` + + // Specifies the ID of the Virtual Hub where the Firewall resides in. + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` +} + +type VirtualHubParameters struct { + + // Specifies the number of public IPs to assign to the Firewall. Defaults to 1. + // +kubebuilder:validation:Optional + PublicIPCount *float64 `json:"publicIpCount,omitempty" tf:"public_ip_count,omitempty"` + + // Specifies the ID of the Virtual Hub where the Firewall resides in. + // +kubebuilder:validation:Optional + VirtualHubID *string `json:"virtualHubId" tf:"virtual_hub_id,omitempty"` +} + +// FirewallSpec defines the desired state of Firewall +type FirewallSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FirewallParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FirewallInitParameters `json:"initProvider,omitempty"` +} + +// FirewallStatus defines the observed state of Firewall. +type FirewallStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FirewallObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Firewall is the Schema for the Firewalls API. Manages an Azure Firewall. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Firewall struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuTier) || (has(self.initProvider) && has(self.initProvider.skuTier))",message="spec.forProvider.skuTier is a required parameter" + Spec FirewallSpec `json:"spec"` + Status FirewallStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FirewallList contains a list of Firewalls +type FirewallList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Firewall `json:"items"` +} + +// Repository type metadata. +var ( + Firewall_Kind = "Firewall" + Firewall_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Firewall_Kind}.String() + Firewall_KindAPIVersion = Firewall_Kind + "." + CRDGroupVersion.String() + Firewall_GroupVersionKind = CRDGroupVersion.WithKind(Firewall_Kind) +) + +func init() { + SchemeBuilder.Register(&Firewall{}, &FirewallList{}) +} diff --git a/apis/network/v1beta2/zz_firewallpolicy_terraformed.go b/apis/network/v1beta2/zz_firewallpolicy_terraformed.go new file mode 100755 index 000000000..0ce60fc39 --- /dev/null +++ b/apis/network/v1beta2/zz_firewallpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FirewallPolicy +func (mg *FirewallPolicy) GetTerraformResourceType() string { + return "azurerm_firewall_policy" +} + +// GetConnectionDetailsMapping for this FirewallPolicy +func (tr *FirewallPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FirewallPolicy +func (tr *FirewallPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FirewallPolicy +func (tr *FirewallPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FirewallPolicy +func (tr *FirewallPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FirewallPolicy +func (tr *FirewallPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FirewallPolicy +func (tr *FirewallPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FirewallPolicy +func (tr *FirewallPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FirewallPolicy +func (tr *FirewallPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FirewallPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FirewallPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &FirewallPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FirewallPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_firewallpolicy_types.go b/apis/network/v1beta2/zz_firewallpolicy_types.go new file mode 100755 index 000000000..8e72c9a77 --- /dev/null +++ b/apis/network/v1beta2/zz_firewallpolicy_types.go @@ -0,0 +1,725 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DNSInitParameters struct { + + // Whether to enable DNS proxy on Firewalls attached to this Firewall Policy? Defaults to false. + ProxyEnabled *bool `json:"proxyEnabled,omitempty" tf:"proxy_enabled,omitempty"` + + // A list of custom DNS servers' IP addresses. + Servers []*string `json:"servers,omitempty" tf:"servers,omitempty"` +} + +type DNSObservation struct { + + // Whether to enable DNS proxy on Firewalls attached to this Firewall Policy? Defaults to false. + ProxyEnabled *bool `json:"proxyEnabled,omitempty" tf:"proxy_enabled,omitempty"` + + // A list of custom DNS servers' IP addresses. + Servers []*string `json:"servers,omitempty" tf:"servers,omitempty"` +} + +type DNSParameters struct { + + // Whether to enable DNS proxy on Firewalls attached to this Firewall Policy? Defaults to false. + // +kubebuilder:validation:Optional + ProxyEnabled *bool `json:"proxyEnabled,omitempty" tf:"proxy_enabled,omitempty"` + + // A list of custom DNS servers' IP addresses. + // +kubebuilder:validation:Optional + Servers []*string `json:"servers,omitempty" tf:"servers,omitempty"` +} + +type ExplicitProxyInitParameters struct { + + // Whether the pac file port and url need to be provided. + EnablePacFile *bool `json:"enablePacFile,omitempty" tf:"enable_pac_file,omitempty"` + + // Whether the explicit proxy is enabled for this Firewall Policy. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The port number for explicit http protocol. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The port number for explicit proxy https protocol. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // Specifies a SAS URL for PAC file. + PacFile *string `json:"pacFile,omitempty" tf:"pac_file,omitempty"` + + // Specifies a port number for firewall to serve PAC file. + PacFilePort *float64 `json:"pacFilePort,omitempty" tf:"pac_file_port,omitempty"` +} + +type ExplicitProxyObservation struct { + + // Whether the pac file port and url need to be provided. + EnablePacFile *bool `json:"enablePacFile,omitempty" tf:"enable_pac_file,omitempty"` + + // Whether the explicit proxy is enabled for this Firewall Policy. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The port number for explicit http protocol. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The port number for explicit proxy https protocol. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // Specifies a SAS URL for PAC file. + PacFile *string `json:"pacFile,omitempty" tf:"pac_file,omitempty"` + + // Specifies a port number for firewall to serve PAC file. + PacFilePort *float64 `json:"pacFilePort,omitempty" tf:"pac_file_port,omitempty"` +} + +type ExplicitProxyParameters struct { + + // Whether the pac file port and url need to be provided. + // +kubebuilder:validation:Optional + EnablePacFile *bool `json:"enablePacFile,omitempty" tf:"enable_pac_file,omitempty"` + + // Whether the explicit proxy is enabled for this Firewall Policy. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The port number for explicit http protocol. + // +kubebuilder:validation:Optional + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The port number for explicit proxy https protocol. + // +kubebuilder:validation:Optional + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // Specifies a SAS URL for PAC file. + // +kubebuilder:validation:Optional + PacFile *string `json:"pacFile,omitempty" tf:"pac_file,omitempty"` + + // Specifies a port number for firewall to serve PAC file. + // +kubebuilder:validation:Optional + PacFilePort *float64 `json:"pacFilePort,omitempty" tf:"pac_file_port,omitempty"` +} + +type FirewallPolicyIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Firewall Policy. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Firewall Policy. Only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FirewallPolicyIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Firewall Policy. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The ID of the Firewall Policy. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the Firewall Policy. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Firewall Policy. Only possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FirewallPolicyIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Firewall Policy. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Firewall Policy. Only possible value is UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FirewallPolicyInitParameters struct { + + // Whether enable auto learn private ip range. + AutoLearnPrivateRangesEnabled *bool `json:"autoLearnPrivateRangesEnabled,omitempty" tf:"auto_learn_private_ranges_enabled,omitempty"` + + // The ID of the base Firewall Policy. + BasePolicyID *string `json:"basePolicyId,omitempty" tf:"base_policy_id,omitempty"` + + // A dns block as defined below. + DNS *DNSInitParameters `json:"dns,omitempty" tf:"dns,omitempty"` + + // A explicit_proxy block as defined below. + ExplicitProxy *ExplicitProxyInitParameters `json:"explicitProxy,omitempty" tf:"explicit_proxy,omitempty"` + + // An identity block as defined below. + Identity *FirewallPolicyIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An insights block as defined below. + Insights *InsightsInitParameters `json:"insights,omitempty" tf:"insights,omitempty"` + + // A intrusion_detection block as defined below. + IntrusionDetection *IntrusionDetectionInitParameters `json:"intrusionDetection,omitempty" tf:"intrusion_detection,omitempty"` + + // The Azure Region where the Firewall Policy should exist. Changing this forces a new Firewall Policy to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of private IP ranges to which traffic will not be SNAT. + PrivateIPRanges []*string `json:"privateIpRanges,omitempty" tf:"private_ip_ranges,omitempty"` + + // Whether SQL Redirect traffic filtering is allowed. Enabling this flag requires no rule using ports between 11000-11999. + SQLRedirectAllowed *bool `json:"sqlRedirectAllowed,omitempty" tf:"sql_redirect_allowed,omitempty"` + + // The SKU Tier of the Firewall Policy. Possible values are Standard, Premium and Basic. Changing this forces a new Firewall Policy to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A tls_certificate block as defined below. + TLSCertificate *TLSCertificateInitParameters `json:"tlsCertificate,omitempty" tf:"tls_certificate,omitempty"` + + // A mapping of tags which should be assigned to the Firewall Policy. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A threat_intelligence_allowlist block as defined below. + ThreatIntelligenceAllowlist *ThreatIntelligenceAllowlistInitParameters `json:"threatIntelligenceAllowlist,omitempty" tf:"threat_intelligence_allowlist,omitempty"` + + // The operation mode for Threat Intelligence. Possible values are Alert, Deny and Off. Defaults to Alert. + ThreatIntelligenceMode *string `json:"threatIntelligenceMode,omitempty" tf:"threat_intelligence_mode,omitempty"` +} + +type FirewallPolicyObservation struct { + + // Whether enable auto learn private ip range. + AutoLearnPrivateRangesEnabled *bool `json:"autoLearnPrivateRangesEnabled,omitempty" tf:"auto_learn_private_ranges_enabled,omitempty"` + + // The ID of the base Firewall Policy. + BasePolicyID *string `json:"basePolicyId,omitempty" tf:"base_policy_id,omitempty"` + + // A list of reference to child Firewall Policies of this Firewall Policy. + ChildPolicies []*string `json:"childPolicies,omitempty" tf:"child_policies,omitempty"` + + // A dns block as defined below. + DNS *DNSObservation `json:"dns,omitempty" tf:"dns,omitempty"` + + // A explicit_proxy block as defined below. + ExplicitProxy *ExplicitProxyObservation `json:"explicitProxy,omitempty" tf:"explicit_proxy,omitempty"` + + // A list of references to Azure Firewalls that this Firewall Policy is associated with. + Firewalls []*string `json:"firewalls,omitempty" tf:"firewalls,omitempty"` + + // The ID of the Firewall Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *FirewallPolicyIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // An insights block as defined below. + Insights *InsightsObservation `json:"insights,omitempty" tf:"insights,omitempty"` + + // A intrusion_detection block as defined below. + IntrusionDetection *IntrusionDetectionObservation `json:"intrusionDetection,omitempty" tf:"intrusion_detection,omitempty"` + + // The Azure Region where the Firewall Policy should exist. Changing this forces a new Firewall Policy to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of private IP ranges to which traffic will not be SNAT. + PrivateIPRanges []*string `json:"privateIpRanges,omitempty" tf:"private_ip_ranges,omitempty"` + + // The name of the Resource Group where the Firewall Policy should exist. Changing this forces a new Firewall Policy to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A list of references to Firewall Policy Rule Collection Groups that belongs to this Firewall Policy. + RuleCollectionGroups []*string `json:"ruleCollectionGroups,omitempty" tf:"rule_collection_groups,omitempty"` + + // Whether SQL Redirect traffic filtering is allowed. Enabling this flag requires no rule using ports between 11000-11999. + SQLRedirectAllowed *bool `json:"sqlRedirectAllowed,omitempty" tf:"sql_redirect_allowed,omitempty"` + + // The SKU Tier of the Firewall Policy. Possible values are Standard, Premium and Basic. Changing this forces a new Firewall Policy to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A tls_certificate block as defined below. + TLSCertificate *TLSCertificateObservation `json:"tlsCertificate,omitempty" tf:"tls_certificate,omitempty"` + + // A mapping of tags which should be assigned to the Firewall Policy. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A threat_intelligence_allowlist block as defined below. + ThreatIntelligenceAllowlist *ThreatIntelligenceAllowlistObservation `json:"threatIntelligenceAllowlist,omitempty" tf:"threat_intelligence_allowlist,omitempty"` + + // The operation mode for Threat Intelligence. Possible values are Alert, Deny and Off. Defaults to Alert. + ThreatIntelligenceMode *string `json:"threatIntelligenceMode,omitempty" tf:"threat_intelligence_mode,omitempty"` +} + +type FirewallPolicyParameters struct { + + // Whether enable auto learn private ip range. + // +kubebuilder:validation:Optional + AutoLearnPrivateRangesEnabled *bool `json:"autoLearnPrivateRangesEnabled,omitempty" tf:"auto_learn_private_ranges_enabled,omitempty"` + + // The ID of the base Firewall Policy. + // +kubebuilder:validation:Optional + BasePolicyID *string `json:"basePolicyId,omitempty" tf:"base_policy_id,omitempty"` + + // A dns block as defined below. + // +kubebuilder:validation:Optional + DNS *DNSParameters `json:"dns,omitempty" tf:"dns,omitempty"` + + // A explicit_proxy block as defined below. + // +kubebuilder:validation:Optional + ExplicitProxy *ExplicitProxyParameters `json:"explicitProxy,omitempty" tf:"explicit_proxy,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *FirewallPolicyIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An insights block as defined below. + // +kubebuilder:validation:Optional + Insights *InsightsParameters `json:"insights,omitempty" tf:"insights,omitempty"` + + // A intrusion_detection block as defined below. + // +kubebuilder:validation:Optional + IntrusionDetection *IntrusionDetectionParameters `json:"intrusionDetection,omitempty" tf:"intrusion_detection,omitempty"` + + // The Azure Region where the Firewall Policy should exist. Changing this forces a new Firewall Policy to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of private IP ranges to which traffic will not be SNAT. + // +kubebuilder:validation:Optional + PrivateIPRanges []*string `json:"privateIpRanges,omitempty" tf:"private_ip_ranges,omitempty"` + + // The name of the Resource Group where the Firewall Policy should exist. Changing this forces a new Firewall Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Whether SQL Redirect traffic filtering is allowed. Enabling this flag requires no rule using ports between 11000-11999. + // +kubebuilder:validation:Optional + SQLRedirectAllowed *bool `json:"sqlRedirectAllowed,omitempty" tf:"sql_redirect_allowed,omitempty"` + + // The SKU Tier of the Firewall Policy. Possible values are Standard, Premium and Basic. Changing this forces a new Firewall Policy to be created. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A tls_certificate block as defined below. + // +kubebuilder:validation:Optional + TLSCertificate *TLSCertificateParameters `json:"tlsCertificate,omitempty" tf:"tls_certificate,omitempty"` + + // A mapping of tags which should be assigned to the Firewall Policy. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A threat_intelligence_allowlist block as defined below. + // +kubebuilder:validation:Optional + ThreatIntelligenceAllowlist *ThreatIntelligenceAllowlistParameters `json:"threatIntelligenceAllowlist,omitempty" tf:"threat_intelligence_allowlist,omitempty"` + + // The operation mode for Threat Intelligence. Possible values are Alert, Deny and Off. Defaults to Alert. + // +kubebuilder:validation:Optional + ThreatIntelligenceMode *string `json:"threatIntelligenceMode,omitempty" tf:"threat_intelligence_mode,omitempty"` +} + +type InsightsInitParameters struct { + + // The ID of the default Log Analytics Workspace that the Firewalls associated with this Firewall Policy will send their logs to, when there is no location matches in the log_analytics_workspace. + DefaultLogAnalyticsWorkspaceID *string `json:"defaultLogAnalyticsWorkspaceId,omitempty" tf:"default_log_analytics_workspace_id,omitempty"` + + // Whether the insights functionality is enabled for this Firewall Policy. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A list of log_analytics_workspace block as defined below. + LogAnalyticsWorkspace []LogAnalyticsWorkspaceInitParameters `json:"logAnalyticsWorkspace,omitempty" tf:"log_analytics_workspace,omitempty"` + + // The log retention period in days. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type InsightsObservation struct { + + // The ID of the default Log Analytics Workspace that the Firewalls associated with this Firewall Policy will send their logs to, when there is no location matches in the log_analytics_workspace. + DefaultLogAnalyticsWorkspaceID *string `json:"defaultLogAnalyticsWorkspaceId,omitempty" tf:"default_log_analytics_workspace_id,omitempty"` + + // Whether the insights functionality is enabled for this Firewall Policy. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A list of log_analytics_workspace block as defined below. + LogAnalyticsWorkspace []LogAnalyticsWorkspaceObservation `json:"logAnalyticsWorkspace,omitempty" tf:"log_analytics_workspace,omitempty"` + + // The log retention period in days. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type InsightsParameters struct { + + // The ID of the default Log Analytics Workspace that the Firewalls associated with this Firewall Policy will send their logs to, when there is no location matches in the log_analytics_workspace. + // +kubebuilder:validation:Optional + DefaultLogAnalyticsWorkspaceID *string `json:"defaultLogAnalyticsWorkspaceId" tf:"default_log_analytics_workspace_id,omitempty"` + + // Whether the insights functionality is enabled for this Firewall Policy. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A list of log_analytics_workspace block as defined below. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspace []LogAnalyticsWorkspaceParameters `json:"logAnalyticsWorkspace,omitempty" tf:"log_analytics_workspace,omitempty"` + + // The log retention period in days. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type IntrusionDetectionInitParameters struct { + + // In which mode you want to run intrusion detection: Off, Alert or Deny. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // A list of Private IP address ranges to identify traffic direction. By default, only ranges defined by IANA RFC 1918 are considered private IP addresses. + PrivateRanges []*string `json:"privateRanges,omitempty" tf:"private_ranges,omitempty"` + + // One or more signature_overrides blocks as defined below. + SignatureOverrides []SignatureOverridesInitParameters `json:"signatureOverrides,omitempty" tf:"signature_overrides,omitempty"` + + // One or more traffic_bypass blocks as defined below. + TrafficBypass []TrafficBypassInitParameters `json:"trafficBypass,omitempty" tf:"traffic_bypass,omitempty"` +} + +type IntrusionDetectionObservation struct { + + // In which mode you want to run intrusion detection: Off, Alert or Deny. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // A list of Private IP address ranges to identify traffic direction. By default, only ranges defined by IANA RFC 1918 are considered private IP addresses. + PrivateRanges []*string `json:"privateRanges,omitempty" tf:"private_ranges,omitempty"` + + // One or more signature_overrides blocks as defined below. + SignatureOverrides []SignatureOverridesObservation `json:"signatureOverrides,omitempty" tf:"signature_overrides,omitempty"` + + // One or more traffic_bypass blocks as defined below. + TrafficBypass []TrafficBypassObservation `json:"trafficBypass,omitempty" tf:"traffic_bypass,omitempty"` +} + +type IntrusionDetectionParameters struct { + + // In which mode you want to run intrusion detection: Off, Alert or Deny. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // A list of Private IP address ranges to identify traffic direction. By default, only ranges defined by IANA RFC 1918 are considered private IP addresses. + // +kubebuilder:validation:Optional + PrivateRanges []*string `json:"privateRanges,omitempty" tf:"private_ranges,omitempty"` + + // One or more signature_overrides blocks as defined below. + // +kubebuilder:validation:Optional + SignatureOverrides []SignatureOverridesParameters `json:"signatureOverrides,omitempty" tf:"signature_overrides,omitempty"` + + // One or more traffic_bypass blocks as defined below. + // +kubebuilder:validation:Optional + TrafficBypass []TrafficBypassParameters `json:"trafficBypass,omitempty" tf:"traffic_bypass,omitempty"` +} + +type LogAnalyticsWorkspaceInitParameters struct { + + // The location of the Firewalls, that when matches this Log Analytics Workspace will be used to consume their logs. + FirewallLocation *string `json:"firewallLocation,omitempty" tf:"firewall_location,omitempty"` + + // 12-digit number (id) which identifies your signature. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type LogAnalyticsWorkspaceObservation struct { + + // The location of the Firewalls, that when matches this Log Analytics Workspace will be used to consume their logs. + FirewallLocation *string `json:"firewallLocation,omitempty" tf:"firewall_location,omitempty"` + + // 12-digit number (id) which identifies your signature. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type LogAnalyticsWorkspaceParameters struct { + + // The location of the Firewalls, that when matches this Log Analytics Workspace will be used to consume their logs. + // +kubebuilder:validation:Optional + FirewallLocation *string `json:"firewallLocation" tf:"firewall_location,omitempty"` + + // 12-digit number (id) which identifies your signature. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` +} + +type SignatureOverridesInitParameters struct { + + // 12-digit number (id) which identifies your signature. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // state can be any of Off, Alert or Deny. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type SignatureOverridesObservation struct { + + // 12-digit number (id) which identifies your signature. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // state can be any of Off, Alert or Deny. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type SignatureOverridesParameters struct { + + // 12-digit number (id) which identifies your signature. + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // state can be any of Off, Alert or Deny. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type TLSCertificateInitParameters struct { + + // The ID of the Key Vault, where the secret or certificate is stored. + KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` + + // The name of the certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TLSCertificateObservation struct { + + // The ID of the Key Vault, where the secret or certificate is stored. + KeyVaultSecretID *string `json:"keyVaultSecretId,omitempty" tf:"key_vault_secret_id,omitempty"` + + // The name of the certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type TLSCertificateParameters struct { + + // The ID of the Key Vault, where the secret or certificate is stored. + // +kubebuilder:validation:Optional + KeyVaultSecretID *string `json:"keyVaultSecretId" tf:"key_vault_secret_id,omitempty"` + + // The name of the certificate. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type ThreatIntelligenceAllowlistInitParameters struct { + + // A list of FQDNs that will be skipped for threat detection. + // +listType=set + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // A list of IP addresses or CIDR ranges that will be skipped for threat detection. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type ThreatIntelligenceAllowlistObservation struct { + + // A list of FQDNs that will be skipped for threat detection. + // +listType=set + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // A list of IP addresses or CIDR ranges that will be skipped for threat detection. + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type ThreatIntelligenceAllowlistParameters struct { + + // A list of FQDNs that will be skipped for threat detection. + // +kubebuilder:validation:Optional + // +listType=set + Fqdns []*string `json:"fqdns,omitempty" tf:"fqdns,omitempty"` + + // A list of IP addresses or CIDR ranges that will be skipped for threat detection. + // +kubebuilder:validation:Optional + // +listType=set + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type TrafficBypassInitParameters struct { + + // The description for this bypass traffic setting. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies a list of destination IP addresses that shall be bypassed by intrusion detection. + // +listType=set + DestinationAddresses []*string `json:"destinationAddresses,omitempty" tf:"destination_addresses,omitempty"` + + // Specifies a list of destination IP groups that shall be bypassed by intrusion detection. + // +listType=set + DestinationIPGroups []*string `json:"destinationIpGroups,omitempty" tf:"destination_ip_groups,omitempty"` + + // Specifies a list of destination IP ports that shall be bypassed by intrusion detection. + // +listType=set + DestinationPorts []*string `json:"destinationPorts,omitempty" tf:"destination_ports,omitempty"` + + // The name which should be used for this bypass traffic setting. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The protocols any of ANY, TCP, ICMP, UDP that shall be bypassed by intrusion detection. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies a list of source addresses that shall be bypassed by intrusion detection. + // +listType=set + SourceAddresses []*string `json:"sourceAddresses,omitempty" tf:"source_addresses,omitempty"` + + // Specifies a list of source IP groups that shall be bypassed by intrusion detection. + // +listType=set + SourceIPGroups []*string `json:"sourceIpGroups,omitempty" tf:"source_ip_groups,omitempty"` +} + +type TrafficBypassObservation struct { + + // The description for this bypass traffic setting. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies a list of destination IP addresses that shall be bypassed by intrusion detection. + // +listType=set + DestinationAddresses []*string `json:"destinationAddresses,omitempty" tf:"destination_addresses,omitempty"` + + // Specifies a list of destination IP groups that shall be bypassed by intrusion detection. + // +listType=set + DestinationIPGroups []*string `json:"destinationIpGroups,omitempty" tf:"destination_ip_groups,omitempty"` + + // Specifies a list of destination IP ports that shall be bypassed by intrusion detection. + // +listType=set + DestinationPorts []*string `json:"destinationPorts,omitempty" tf:"destination_ports,omitempty"` + + // The name which should be used for this bypass traffic setting. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The protocols any of ANY, TCP, ICMP, UDP that shall be bypassed by intrusion detection. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Specifies a list of source addresses that shall be bypassed by intrusion detection. + // +listType=set + SourceAddresses []*string `json:"sourceAddresses,omitempty" tf:"source_addresses,omitempty"` + + // Specifies a list of source IP groups that shall be bypassed by intrusion detection. + // +listType=set + SourceIPGroups []*string `json:"sourceIpGroups,omitempty" tf:"source_ip_groups,omitempty"` +} + +type TrafficBypassParameters struct { + + // The description for this bypass traffic setting. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies a list of destination IP addresses that shall be bypassed by intrusion detection. + // +kubebuilder:validation:Optional + // +listType=set + DestinationAddresses []*string `json:"destinationAddresses,omitempty" tf:"destination_addresses,omitempty"` + + // Specifies a list of destination IP groups that shall be bypassed by intrusion detection. + // +kubebuilder:validation:Optional + // +listType=set + DestinationIPGroups []*string `json:"destinationIpGroups,omitempty" tf:"destination_ip_groups,omitempty"` + + // Specifies a list of destination IP ports that shall be bypassed by intrusion detection. + // +kubebuilder:validation:Optional + // +listType=set + DestinationPorts []*string `json:"destinationPorts,omitempty" tf:"destination_ports,omitempty"` + + // The name which should be used for this bypass traffic setting. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The protocols any of ANY, TCP, ICMP, UDP that shall be bypassed by intrusion detection. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // Specifies a list of source addresses that shall be bypassed by intrusion detection. + // +kubebuilder:validation:Optional + // +listType=set + SourceAddresses []*string `json:"sourceAddresses,omitempty" tf:"source_addresses,omitempty"` + + // Specifies a list of source IP groups that shall be bypassed by intrusion detection. + // +kubebuilder:validation:Optional + // +listType=set + SourceIPGroups []*string `json:"sourceIpGroups,omitempty" tf:"source_ip_groups,omitempty"` +} + +// FirewallPolicySpec defines the desired state of FirewallPolicy +type FirewallPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FirewallPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FirewallPolicyInitParameters `json:"initProvider,omitempty"` +} + +// FirewallPolicyStatus defines the observed state of FirewallPolicy. +type FirewallPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FirewallPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FirewallPolicy is the Schema for the FirewallPolicys API. Manages a Firewall Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FirewallPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec FirewallPolicySpec `json:"spec"` + Status FirewallPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FirewallPolicyList contains a list of FirewallPolicys +type FirewallPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FirewallPolicy `json:"items"` +} + +// Repository type metadata. +var ( + FirewallPolicy_Kind = "FirewallPolicy" + FirewallPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FirewallPolicy_Kind}.String() + FirewallPolicy_KindAPIVersion = FirewallPolicy_Kind + "." + CRDGroupVersion.String() + FirewallPolicy_GroupVersionKind = CRDGroupVersion.WithKind(FirewallPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&FirewallPolicy{}, &FirewallPolicyList{}) +} diff --git a/apis/network/v1beta2/zz_frontdoor_terraformed.go b/apis/network/v1beta2/zz_frontdoor_terraformed.go new file mode 100755 index 000000000..f3b97e3de --- /dev/null +++ b/apis/network/v1beta2/zz_frontdoor_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontDoor +func (mg *FrontDoor) GetTerraformResourceType() string { + return "azurerm_frontdoor" +} + +// GetConnectionDetailsMapping for this FrontDoor +func (tr *FrontDoor) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontDoor +func (tr *FrontDoor) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontDoor +func (tr *FrontDoor) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontDoor +func (tr *FrontDoor) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontDoor +func (tr *FrontDoor) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontDoor +func (tr *FrontDoor) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontDoor +func (tr *FrontDoor) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontDoor +func (tr *FrontDoor) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontDoor using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontDoor) LateInitialize(attrs []byte) (bool, error) { + params := &FrontDoorParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontDoor) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/network/v1beta2/zz_frontdoor_types.go b/apis/network/v1beta2/zz_frontdoor_types.go new file mode 100755 index 000000000..9f1712ffe --- /dev/null +++ b/apis/network/v1beta2/zz_frontdoor_types.go @@ -0,0 +1,832 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackendInitParameters struct { + + // Location of the backend (IP address or FQDN) + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // Enable or Disable use of this Backend Routing Rule. Permitted values are true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The HTTP TCP port number. Possible values are between 1 - 65535. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The HTTPS TCP port number. Possible values are between 1 - 65535. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // The value to use as the host header sent to the backend. + HostHeader *string `json:"hostHeader,omitempty" tf:"host_header,omitempty"` + + // Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy. Defaults to 1. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Weight of this endpoint for load balancing purposes. Defaults to 50. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type BackendObservation struct { + + // Location of the backend (IP address or FQDN) + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // Enable or Disable use of this Backend Routing Rule. Permitted values are true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The HTTP TCP port number. Possible values are between 1 - 65535. + HTTPPort *float64 `json:"httpPort,omitempty" tf:"http_port,omitempty"` + + // The HTTPS TCP port number. Possible values are between 1 - 65535. + HTTPSPort *float64 `json:"httpsPort,omitempty" tf:"https_port,omitempty"` + + // The value to use as the host header sent to the backend. + HostHeader *string `json:"hostHeader,omitempty" tf:"host_header,omitempty"` + + // Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy. Defaults to 1. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Weight of this endpoint for load balancing purposes. Defaults to 50. + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type BackendParameters struct { + + // Location of the backend (IP address or FQDN) + // +kubebuilder:validation:Optional + Address *string `json:"address" tf:"address,omitempty"` + + // Enable or Disable use of this Backend Routing Rule. Permitted values are true or false. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The HTTP TCP port number. Possible values are between 1 - 65535. + // +kubebuilder:validation:Optional + HTTPPort *float64 `json:"httpPort" tf:"http_port,omitempty"` + + // The HTTPS TCP port number. Possible values are between 1 - 65535. + // +kubebuilder:validation:Optional + HTTPSPort *float64 `json:"httpsPort" tf:"https_port,omitempty"` + + // The value to use as the host header sent to the backend. + // +kubebuilder:validation:Optional + HostHeader *string `json:"hostHeader" tf:"host_header,omitempty"` + + // Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy. Defaults to 1. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Weight of this endpoint for load balancing purposes. Defaults to 50. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type BackendPoolHealthProbeInitParameters struct { + + // Is this health probe enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The number of seconds between each Health Probe. Defaults to 120. + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the name of the Health Probe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path to use for the Health Probe. Default is /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies HTTP method the health probe uses when querying the backend pool instances. Possible values include: GET and HEAD. Defaults to GET. + ProbeMethod *string `json:"probeMethod,omitempty" tf:"probe_method,omitempty"` + + // Protocol scheme to use for the Health Probe. Possible values are Http and Https. Defaults to Http. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type BackendPoolHealthProbeObservation struct { + + // Is this health probe enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Azure Front Door Backend. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The number of seconds between each Health Probe. Defaults to 120. + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the name of the Health Probe. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The path to use for the Health Probe. Default is /. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies HTTP method the health probe uses when querying the backend pool instances. Possible values include: GET and HEAD. Defaults to GET. + ProbeMethod *string `json:"probeMethod,omitempty" tf:"probe_method,omitempty"` + + // Protocol scheme to use for the Health Probe. Possible values are Http and Https. Defaults to Http. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type BackendPoolHealthProbeParameters struct { + + // Is this health probe enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The number of seconds between each Health Probe. Defaults to 120. + // +kubebuilder:validation:Optional + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // Specifies the name of the Health Probe. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The path to use for the Health Probe. Default is /. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // Specifies HTTP method the health probe uses when querying the backend pool instances. Possible values include: GET and HEAD. Defaults to GET. + // +kubebuilder:validation:Optional + ProbeMethod *string `json:"probeMethod,omitempty" tf:"probe_method,omitempty"` + + // Protocol scheme to use for the Health Probe. Possible values are Http and Https. Defaults to Http. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type BackendPoolInitParameters struct { + + // A backend block as defined below. + Backend []BackendInitParameters `json:"backend,omitempty" tf:"backend,omitempty"` + + // Specifies the name of the backend_pool_health_probe block within this resource to use for this Backend Pool. + HealthProbeName *string `json:"healthProbeName,omitempty" tf:"health_probe_name,omitempty"` + + // Specifies the name of the backend_pool_load_balancing block within this resource to use for this Backend Pool. + LoadBalancingName *string `json:"loadBalancingName,omitempty" tf:"load_balancing_name,omitempty"` + + // Specifies the name of the Backend Pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BackendPoolLoadBalancingInitParameters struct { + + // The additional latency in milliseconds for probes to fall into the lowest latency bucket. Defaults to 0. + AdditionalLatencyMilliseconds *float64 `json:"additionalLatencyMilliseconds,omitempty" tf:"additional_latency_milliseconds,omitempty"` + + // Specifies the name of the Load Balancer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The number of samples to consider for load balancing decisions. Defaults to 4. + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` + + // The number of samples within the sample period that must succeed. Defaults to 2. + SuccessfulSamplesRequired *float64 `json:"successfulSamplesRequired,omitempty" tf:"successful_samples_required,omitempty"` +} + +type BackendPoolLoadBalancingObservation struct { + + // The additional latency in milliseconds for probes to fall into the lowest latency bucket. Defaults to 0. + AdditionalLatencyMilliseconds *float64 `json:"additionalLatencyMilliseconds,omitempty" tf:"additional_latency_milliseconds,omitempty"` + + // The ID of the Azure Front Door Backend. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the Load Balancer. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The number of samples to consider for load balancing decisions. Defaults to 4. + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` + + // The number of samples within the sample period that must succeed. Defaults to 2. + SuccessfulSamplesRequired *float64 `json:"successfulSamplesRequired,omitempty" tf:"successful_samples_required,omitempty"` +} + +type BackendPoolLoadBalancingParameters struct { + + // The additional latency in milliseconds for probes to fall into the lowest latency bucket. Defaults to 0. + // +kubebuilder:validation:Optional + AdditionalLatencyMilliseconds *float64 `json:"additionalLatencyMilliseconds,omitempty" tf:"additional_latency_milliseconds,omitempty"` + + // Specifies the name of the Load Balancer. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The number of samples to consider for load balancing decisions. Defaults to 4. + // +kubebuilder:validation:Optional + SampleSize *float64 `json:"sampleSize,omitempty" tf:"sample_size,omitempty"` + + // The number of samples within the sample period that must succeed. Defaults to 2. + // +kubebuilder:validation:Optional + SuccessfulSamplesRequired *float64 `json:"successfulSamplesRequired,omitempty" tf:"successful_samples_required,omitempty"` +} + +type BackendPoolObservation struct { + + // A backend block as defined below. + Backend []BackendObservation `json:"backend,omitempty" tf:"backend,omitempty"` + + // Specifies the name of the backend_pool_health_probe block within this resource to use for this Backend Pool. + HealthProbeName *string `json:"healthProbeName,omitempty" tf:"health_probe_name,omitempty"` + + // The ID of the Azure Front Door Backend. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the backend_pool_load_balancing block within this resource to use for this Backend Pool. + LoadBalancingName *string `json:"loadBalancingName,omitempty" tf:"load_balancing_name,omitempty"` + + // Specifies the name of the Backend Pool. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type BackendPoolParameters struct { + + // A backend block as defined below. + // +kubebuilder:validation:Optional + Backend []BackendParameters `json:"backend" tf:"backend,omitempty"` + + // Specifies the name of the backend_pool_health_probe block within this resource to use for this Backend Pool. + // +kubebuilder:validation:Optional + HealthProbeName *string `json:"healthProbeName" tf:"health_probe_name,omitempty"` + + // Specifies the name of the backend_pool_load_balancing block within this resource to use for this Backend Pool. + // +kubebuilder:validation:Optional + LoadBalancingName *string `json:"loadBalancingName" tf:"load_balancing_name,omitempty"` + + // Specifies the name of the Backend Pool. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type BackendPoolSettingsInitParameters struct { + + // Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between 0 - 240. Defaults to 60. + BackendPoolsSendReceiveTimeoutSeconds *float64 `json:"backendPoolsSendReceiveTimeoutSeconds,omitempty" tf:"backend_pools_send_receive_timeout_seconds,omitempty"` + + // Enforce certificate name check on HTTPS requests to all backend pools, this setting will have no effect on HTTP requests. Permitted values are true or false. + EnforceBackendPoolsCertificateNameCheck *bool `json:"enforceBackendPoolsCertificateNameCheck,omitempty" tf:"enforce_backend_pools_certificate_name_check,omitempty"` +} + +type BackendPoolSettingsObservation struct { + + // Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between 0 - 240. Defaults to 60. + BackendPoolsSendReceiveTimeoutSeconds *float64 `json:"backendPoolsSendReceiveTimeoutSeconds,omitempty" tf:"backend_pools_send_receive_timeout_seconds,omitempty"` + + // Enforce certificate name check on HTTPS requests to all backend pools, this setting will have no effect on HTTP requests. Permitted values are true or false. + EnforceBackendPoolsCertificateNameCheck *bool `json:"enforceBackendPoolsCertificateNameCheck,omitempty" tf:"enforce_backend_pools_certificate_name_check,omitempty"` +} + +type BackendPoolSettingsParameters struct { + + // Specifies the send and receive timeout on forwarding request to the backend. When the timeout is reached, the request fails and returns. Possible values are between 0 - 240. Defaults to 60. + // +kubebuilder:validation:Optional + BackendPoolsSendReceiveTimeoutSeconds *float64 `json:"backendPoolsSendReceiveTimeoutSeconds,omitempty" tf:"backend_pools_send_receive_timeout_seconds,omitempty"` + + // Enforce certificate name check on HTTPS requests to all backend pools, this setting will have no effect on HTTP requests. Permitted values are true or false. + // +kubebuilder:validation:Optional + EnforceBackendPoolsCertificateNameCheck *bool `json:"enforceBackendPoolsCertificateNameCheck" tf:"enforce_backend_pools_certificate_name_check,omitempty"` +} + +type ExplicitResourceOrderInitParameters struct { +} + +type ExplicitResourceOrderObservation struct { + BackendPoolHealthProbeIds []*string `json:"backendPoolHealthProbeIds,omitempty" tf:"backend_pool_health_probe_ids,omitempty"` + + BackendPoolIds []*string `json:"backendPoolIds,omitempty" tf:"backend_pool_ids,omitempty"` + + BackendPoolLoadBalancingIds []*string `json:"backendPoolLoadBalancingIds,omitempty" tf:"backend_pool_load_balancing_ids,omitempty"` + + FrontendEndpointIds []*string `json:"frontendEndpointIds,omitempty" tf:"frontend_endpoint_ids,omitempty"` + + RoutingRuleIds []*string `json:"routingRuleIds,omitempty" tf:"routing_rule_ids,omitempty"` +} + +type ExplicitResourceOrderParameters struct { +} + +type ForwardingConfigurationInitParameters struct { + + // Specifies the name of the Backend Pool to forward the incoming traffic to. + BackendPoolName *string `json:"backendPoolName,omitempty" tf:"backend_pool_name,omitempty"` + + // Specify the minimum caching duration (in ISO8601 notation e.g. P1DT2H for 1 day and 2 hours). Needs to be greater than 0 and smaller than 365 days. cache_duration works only in combination with cache_enabled set to true. + CacheDuration *string `json:"cacheDuration,omitempty" tf:"cache_duration,omitempty"` + + // Specifies whether to Enable caching or not. Valid options are true or false. Defaults to false. + CacheEnabled *bool `json:"cacheEnabled,omitempty" tf:"cache_enabled,omitempty"` + + // Defines cache behaviour in relation to query string parameters. Valid options are StripAll, StripAllExcept, StripOnly or StripNone. Defaults to StripAll. + CacheQueryParameterStripDirective *string `json:"cacheQueryParameterStripDirective,omitempty" tf:"cache_query_parameter_strip_directive,omitempty"` + + // Specify query parameters (array). Works only in combination with cache_query_parameter_strip_directive set to StripAllExcept or StripOnly. + CacheQueryParameters []*string `json:"cacheQueryParameters,omitempty" tf:"cache_query_parameters,omitempty"` + + // Whether to use dynamic compression when caching. Valid options are true or false. Defaults to false. + CacheUseDynamicCompression *bool `json:"cacheUseDynamicCompression,omitempty" tf:"cache_use_dynamic_compression,omitempty"` + + // Path to use when constructing the request to forward to the backend. This functions as a URL Rewrite. Default behaviour preserves the URL path. + CustomForwardingPath *string `json:"customForwardingPath,omitempty" tf:"custom_forwarding_path,omitempty"` + + // Protocol to use when redirecting. Valid options are HttpOnly, HttpsOnly, or MatchRequest. Defaults to HttpsOnly. + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` +} + +type ForwardingConfigurationObservation struct { + + // Specifies the name of the Backend Pool to forward the incoming traffic to. + BackendPoolName *string `json:"backendPoolName,omitempty" tf:"backend_pool_name,omitempty"` + + // Specify the minimum caching duration (in ISO8601 notation e.g. P1DT2H for 1 day and 2 hours). Needs to be greater than 0 and smaller than 365 days. cache_duration works only in combination with cache_enabled set to true. + CacheDuration *string `json:"cacheDuration,omitempty" tf:"cache_duration,omitempty"` + + // Specifies whether to Enable caching or not. Valid options are true or false. Defaults to false. + CacheEnabled *bool `json:"cacheEnabled,omitempty" tf:"cache_enabled,omitempty"` + + // Defines cache behaviour in relation to query string parameters. Valid options are StripAll, StripAllExcept, StripOnly or StripNone. Defaults to StripAll. + CacheQueryParameterStripDirective *string `json:"cacheQueryParameterStripDirective,omitempty" tf:"cache_query_parameter_strip_directive,omitempty"` + + // Specify query parameters (array). Works only in combination with cache_query_parameter_strip_directive set to StripAllExcept or StripOnly. + CacheQueryParameters []*string `json:"cacheQueryParameters,omitempty" tf:"cache_query_parameters,omitempty"` + + // Whether to use dynamic compression when caching. Valid options are true or false. Defaults to false. + CacheUseDynamicCompression *bool `json:"cacheUseDynamicCompression,omitempty" tf:"cache_use_dynamic_compression,omitempty"` + + // Path to use when constructing the request to forward to the backend. This functions as a URL Rewrite. Default behaviour preserves the URL path. + CustomForwardingPath *string `json:"customForwardingPath,omitempty" tf:"custom_forwarding_path,omitempty"` + + // Protocol to use when redirecting. Valid options are HttpOnly, HttpsOnly, or MatchRequest. Defaults to HttpsOnly. + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` +} + +type ForwardingConfigurationParameters struct { + + // Specifies the name of the Backend Pool to forward the incoming traffic to. + // +kubebuilder:validation:Optional + BackendPoolName *string `json:"backendPoolName" tf:"backend_pool_name,omitempty"` + + // Specify the minimum caching duration (in ISO8601 notation e.g. P1DT2H for 1 day and 2 hours). Needs to be greater than 0 and smaller than 365 days. cache_duration works only in combination with cache_enabled set to true. + // +kubebuilder:validation:Optional + CacheDuration *string `json:"cacheDuration,omitempty" tf:"cache_duration,omitempty"` + + // Specifies whether to Enable caching or not. Valid options are true or false. Defaults to false. + // +kubebuilder:validation:Optional + CacheEnabled *bool `json:"cacheEnabled,omitempty" tf:"cache_enabled,omitempty"` + + // Defines cache behaviour in relation to query string parameters. Valid options are StripAll, StripAllExcept, StripOnly or StripNone. Defaults to StripAll. + // +kubebuilder:validation:Optional + CacheQueryParameterStripDirective *string `json:"cacheQueryParameterStripDirective,omitempty" tf:"cache_query_parameter_strip_directive,omitempty"` + + // Specify query parameters (array). Works only in combination with cache_query_parameter_strip_directive set to StripAllExcept or StripOnly. + // +kubebuilder:validation:Optional + CacheQueryParameters []*string `json:"cacheQueryParameters,omitempty" tf:"cache_query_parameters,omitempty"` + + // Whether to use dynamic compression when caching. Valid options are true or false. Defaults to false. + // +kubebuilder:validation:Optional + CacheUseDynamicCompression *bool `json:"cacheUseDynamicCompression,omitempty" tf:"cache_use_dynamic_compression,omitempty"` + + // Path to use when constructing the request to forward to the backend. This functions as a URL Rewrite. Default behaviour preserves the URL path. + // +kubebuilder:validation:Optional + CustomForwardingPath *string `json:"customForwardingPath,omitempty" tf:"custom_forwarding_path,omitempty"` + + // Protocol to use when redirecting. Valid options are HttpOnly, HttpsOnly, or MatchRequest. Defaults to HttpsOnly. + // +kubebuilder:validation:Optional + ForwardingProtocol *string `json:"forwardingProtocol,omitempty" tf:"forwarding_protocol,omitempty"` +} + +type FrontDoorInitParameters struct { + + // A backend_pool block as defined below. + BackendPool []BackendPoolInitParameters `json:"backendPool,omitempty" tf:"backend_pool,omitempty"` + + // A backend_pool_health_probe block as defined below. + BackendPoolHealthProbe []BackendPoolHealthProbeInitParameters `json:"backendPoolHealthProbe,omitempty" tf:"backend_pool_health_probe,omitempty"` + + // A backend_pool_load_balancing block as defined below. + BackendPoolLoadBalancing []BackendPoolLoadBalancingInitParameters `json:"backendPoolLoadBalancing,omitempty" tf:"backend_pool_load_balancing,omitempty"` + + // A backend_pool_settings block as defined below. + BackendPoolSettings []BackendPoolSettingsInitParameters `json:"backendPoolSettings,omitempty" tf:"backend_pool_settings,omitempty"` + + // A friendly name for the Front Door service. + FriendlyName *string `json:"friendlyName,omitempty" tf:"friendly_name,omitempty"` + + // A frontend_endpoint block as defined below. + FrontendEndpoint []FrontendEndpointInitParameters `json:"frontendEndpoint,omitempty" tf:"frontend_endpoint,omitempty"` + + // Should the Front Door Load Balancer be Enabled? Defaults to true. + LoadBalancerEnabled *bool `json:"loadBalancerEnabled,omitempty" tf:"load_balancer_enabled,omitempty"` + + // A routing_rule block as defined below. + RoutingRule []RoutingRuleInitParameters `json:"routingRule,omitempty" tf:"routing_rule,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FrontDoorObservation struct { + + // A backend_pool block as defined below. + BackendPool []BackendPoolObservation `json:"backendPool,omitempty" tf:"backend_pool,omitempty"` + + // A backend_pool_health_probe block as defined below. + BackendPoolHealthProbe []BackendPoolHealthProbeObservation `json:"backendPoolHealthProbe,omitempty" tf:"backend_pool_health_probe,omitempty"` + + // A map/dictionary of Backend Pool Health Probe Names (key) to the Backend Pool Health Probe ID (value) + // +mapType=granular + BackendPoolHealthProbes map[string]*string `json:"backendPoolHealthProbes,omitempty" tf:"backend_pool_health_probes,omitempty"` + + // A backend_pool_load_balancing block as defined below. + BackendPoolLoadBalancing []BackendPoolLoadBalancingObservation `json:"backendPoolLoadBalancing,omitempty" tf:"backend_pool_load_balancing,omitempty"` + + // A map/dictionary of Backend Pool Load Balancing Setting Names (key) to the Backend Pool Load Balancing Setting ID (value) + // +mapType=granular + BackendPoolLoadBalancingSettings map[string]*string `json:"backendPoolLoadBalancingSettings,omitempty" tf:"backend_pool_load_balancing_settings,omitempty"` + + // A backend_pool_settings block as defined below. + BackendPoolSettings []BackendPoolSettingsObservation `json:"backendPoolSettings,omitempty" tf:"backend_pool_settings,omitempty"` + + // A map/dictionary of Backend Pool Names (key) to the Backend Pool ID (value) + // +mapType=granular + BackendPools map[string]*string `json:"backendPools,omitempty" tf:"backend_pools,omitempty"` + + // The host that each frontendEndpoint must CNAME to. + CNAME *string `json:"cname,omitempty" tf:"cname,omitempty"` + + ExplicitResourceOrder []ExplicitResourceOrderObservation `json:"explicitResourceOrder,omitempty" tf:"explicit_resource_order,omitempty"` + + // A friendly name for the Front Door service. + FriendlyName *string `json:"friendlyName,omitempty" tf:"friendly_name,omitempty"` + + // A frontend_endpoint block as defined below. + FrontendEndpoint []FrontendEndpointObservation `json:"frontendEndpoint,omitempty" tf:"frontend_endpoint,omitempty"` + + // A map/dictionary of Frontend Endpoint Names (key) to the Frontend Endpoint ID (value) + // +mapType=granular + FrontendEndpoints map[string]*string `json:"frontendEndpoints,omitempty" tf:"frontend_endpoints,omitempty"` + + // The unique ID of the Front Door which is embedded into the incoming headers X-Azure-FDID attribute and maybe used to filter traffic sent by the Front Door to your backend. + HeaderFrontdoorID *string `json:"headerFrontdoorId,omitempty" tf:"header_frontdoor_id,omitempty"` + + // The ID of the Azure Front Door Backend. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Should the Front Door Load Balancer be Enabled? Defaults to true. + LoadBalancerEnabled *bool `json:"loadBalancerEnabled,omitempty" tf:"load_balancer_enabled,omitempty"` + + // Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A routing_rule block as defined below. + RoutingRule []RoutingRuleObservation `json:"routingRule,omitempty" tf:"routing_rule,omitempty"` + + // A map/dictionary of Routing Rule Names (key) to the Routing Rule ID (value) + // +mapType=granular + RoutingRules map[string]*string `json:"routingRules,omitempty" tf:"routing_rules,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FrontDoorParameters struct { + + // A backend_pool block as defined below. + // +kubebuilder:validation:Optional + BackendPool []BackendPoolParameters `json:"backendPool,omitempty" tf:"backend_pool,omitempty"` + + // A backend_pool_health_probe block as defined below. + // +kubebuilder:validation:Optional + BackendPoolHealthProbe []BackendPoolHealthProbeParameters `json:"backendPoolHealthProbe,omitempty" tf:"backend_pool_health_probe,omitempty"` + + // A backend_pool_load_balancing block as defined below. + // +kubebuilder:validation:Optional + BackendPoolLoadBalancing []BackendPoolLoadBalancingParameters `json:"backendPoolLoadBalancing,omitempty" tf:"backend_pool_load_balancing,omitempty"` + + // A backend_pool_settings block as defined below. + // +kubebuilder:validation:Optional + BackendPoolSettings []BackendPoolSettingsParameters `json:"backendPoolSettings,omitempty" tf:"backend_pool_settings,omitempty"` + + // A friendly name for the Front Door service. + // +kubebuilder:validation:Optional + FriendlyName *string `json:"friendlyName,omitempty" tf:"friendly_name,omitempty"` + + // A frontend_endpoint block as defined below. + // +kubebuilder:validation:Optional + FrontendEndpoint []FrontendEndpointParameters `json:"frontendEndpoint,omitempty" tf:"frontend_endpoint,omitempty"` + + // Should the Front Door Load Balancer be Enabled? Defaults to true. + // +kubebuilder:validation:Optional + LoadBalancerEnabled *bool `json:"loadBalancerEnabled,omitempty" tf:"load_balancer_enabled,omitempty"` + + // Specifies the name of the Resource Group in which the Front Door service should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A routing_rule block as defined below. + // +kubebuilder:validation:Optional + RoutingRule []RoutingRuleParameters `json:"routingRule,omitempty" tf:"routing_rule,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type FrontendEndpointInitParameters struct { + + // Specifies the host name of the frontend_endpoint. Must be a domain name. In order to use a name.azurefd.net domain, the name value must match the Front Door name. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // Specifies the name of the frontend_endpoint. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to allow session affinity on this host. Valid options are true or false Defaults to false. + SessionAffinityEnabled *bool `json:"sessionAffinityEnabled,omitempty" tf:"session_affinity_enabled,omitempty"` + + // The TTL to use in seconds for session affinity, if applicable. Defaults to 0. + SessionAffinityTTLSeconds *float64 `json:"sessionAffinityTtlSeconds,omitempty" tf:"session_affinity_ttl_seconds,omitempty"` + + // Defines the Web Application Firewall policy ID for each host. + WebApplicationFirewallPolicyLinkID *string `json:"webApplicationFirewallPolicyLinkId,omitempty" tf:"web_application_firewall_policy_link_id,omitempty"` +} + +type FrontendEndpointObservation struct { + + // Specifies the host name of the frontend_endpoint. Must be a domain name. In order to use a name.azurefd.net domain, the name value must match the Front Door name. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The ID of the Azure Front Door Backend. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the frontend_endpoint. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to allow session affinity on this host. Valid options are true or false Defaults to false. + SessionAffinityEnabled *bool `json:"sessionAffinityEnabled,omitempty" tf:"session_affinity_enabled,omitempty"` + + // The TTL to use in seconds for session affinity, if applicable. Defaults to 0. + SessionAffinityTTLSeconds *float64 `json:"sessionAffinityTtlSeconds,omitempty" tf:"session_affinity_ttl_seconds,omitempty"` + + // Defines the Web Application Firewall policy ID for each host. + WebApplicationFirewallPolicyLinkID *string `json:"webApplicationFirewallPolicyLinkId,omitempty" tf:"web_application_firewall_policy_link_id,omitempty"` +} + +type FrontendEndpointParameters struct { + + // Specifies the host name of the frontend_endpoint. Must be a domain name. In order to use a name.azurefd.net domain, the name value must match the Front Door name. + // +kubebuilder:validation:Optional + HostName *string `json:"hostName" tf:"host_name,omitempty"` + + // Specifies the name of the frontend_endpoint. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Whether to allow session affinity on this host. Valid options are true or false Defaults to false. + // +kubebuilder:validation:Optional + SessionAffinityEnabled *bool `json:"sessionAffinityEnabled,omitempty" tf:"session_affinity_enabled,omitempty"` + + // The TTL to use in seconds for session affinity, if applicable. Defaults to 0. + // +kubebuilder:validation:Optional + SessionAffinityTTLSeconds *float64 `json:"sessionAffinityTtlSeconds,omitempty" tf:"session_affinity_ttl_seconds,omitempty"` + + // Defines the Web Application Firewall policy ID for each host. + // +kubebuilder:validation:Optional + WebApplicationFirewallPolicyLinkID *string `json:"webApplicationFirewallPolicyLinkId,omitempty" tf:"web_application_firewall_policy_link_id,omitempty"` +} + +type RoutingRuleInitParameters struct { + + // Protocol schemes to match for the Backend Routing Rule. Possible values are Http and Https. + AcceptedProtocols []*string `json:"acceptedProtocols,omitempty" tf:"accepted_protocols,omitempty"` + + // Enable or Disable use of this Backend Routing Rule. Permitted values are true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A forwarding_configuration block as defined below. + ForwardingConfiguration *ForwardingConfigurationInitParameters `json:"forwardingConfiguration,omitempty" tf:"forwarding_configuration,omitempty"` + + // The names of the frontend_endpoint blocks within this resource to associate with this routing_rule. + FrontendEndpoints []*string `json:"frontendEndpoints,omitempty" tf:"frontend_endpoints,omitempty"` + + // Specifies the name of the Routing Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The route patterns for the Backend Routing Rule. + PatternsToMatch []*string `json:"patternsToMatch,omitempty" tf:"patterns_to_match,omitempty"` + + // A redirect_configuration block as defined below. + RedirectConfiguration *RoutingRuleRedirectConfigurationInitParameters `json:"redirectConfiguration,omitempty" tf:"redirect_configuration,omitempty"` +} + +type RoutingRuleObservation struct { + + // Protocol schemes to match for the Backend Routing Rule. Possible values are Http and Https. + AcceptedProtocols []*string `json:"acceptedProtocols,omitempty" tf:"accepted_protocols,omitempty"` + + // Enable or Disable use of this Backend Routing Rule. Permitted values are true or false. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A forwarding_configuration block as defined below. + ForwardingConfiguration *ForwardingConfigurationObservation `json:"forwardingConfiguration,omitempty" tf:"forwarding_configuration,omitempty"` + + // The names of the frontend_endpoint blocks within this resource to associate with this routing_rule. + FrontendEndpoints []*string `json:"frontendEndpoints,omitempty" tf:"frontend_endpoints,omitempty"` + + // The ID of the Azure Front Door Backend. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the Routing Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The route patterns for the Backend Routing Rule. + PatternsToMatch []*string `json:"patternsToMatch,omitempty" tf:"patterns_to_match,omitempty"` + + // A redirect_configuration block as defined below. + RedirectConfiguration *RoutingRuleRedirectConfigurationObservation `json:"redirectConfiguration,omitempty" tf:"redirect_configuration,omitempty"` +} + +type RoutingRuleParameters struct { + + // Protocol schemes to match for the Backend Routing Rule. Possible values are Http and Https. + // +kubebuilder:validation:Optional + AcceptedProtocols []*string `json:"acceptedProtocols" tf:"accepted_protocols,omitempty"` + + // Enable or Disable use of this Backend Routing Rule. Permitted values are true or false. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A forwarding_configuration block as defined below. + // +kubebuilder:validation:Optional + ForwardingConfiguration *ForwardingConfigurationParameters `json:"forwardingConfiguration,omitempty" tf:"forwarding_configuration,omitempty"` + + // The names of the frontend_endpoint blocks within this resource to associate with this routing_rule. + // +kubebuilder:validation:Optional + FrontendEndpoints []*string `json:"frontendEndpoints" tf:"frontend_endpoints,omitempty"` + + // Specifies the name of the Routing Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The route patterns for the Backend Routing Rule. + // +kubebuilder:validation:Optional + PatternsToMatch []*string `json:"patternsToMatch" tf:"patterns_to_match,omitempty"` + + // A redirect_configuration block as defined below. + // +kubebuilder:validation:Optional + RedirectConfiguration *RoutingRuleRedirectConfigurationParameters `json:"redirectConfiguration,omitempty" tf:"redirect_configuration,omitempty"` +} + +type RoutingRuleRedirectConfigurationInitParameters struct { + + // The destination fragment in the portion of URL after '#'. Set this to add a fragment to the redirect URL. + CustomFragment *string `json:"customFragment,omitempty" tf:"custom_fragment,omitempty"` + + // Set this to change the URL for the redirection. + CustomHost *string `json:"customHost,omitempty" tf:"custom_host,omitempty"` + + // The path to retain as per the incoming request, or update in the URL for the redirection. + CustomPath *string `json:"customPath,omitempty" tf:"custom_path,omitempty"` + + // Replace any existing query string from the incoming request URL. + CustomQueryString *string `json:"customQueryString,omitempty" tf:"custom_query_string,omitempty"` + + // Protocol to use when redirecting. Valid options are HttpOnly, HttpsOnly, or MatchRequest. + RedirectProtocol *string `json:"redirectProtocol,omitempty" tf:"redirect_protocol,omitempty"` + + // Status code for the redirect. Valida options are Moved, Found, TemporaryRedirect, PermanentRedirect. + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` +} + +type RoutingRuleRedirectConfigurationObservation struct { + + // The destination fragment in the portion of URL after '#'. Set this to add a fragment to the redirect URL. + CustomFragment *string `json:"customFragment,omitempty" tf:"custom_fragment,omitempty"` + + // Set this to change the URL for the redirection. + CustomHost *string `json:"customHost,omitempty" tf:"custom_host,omitempty"` + + // The path to retain as per the incoming request, or update in the URL for the redirection. + CustomPath *string `json:"customPath,omitempty" tf:"custom_path,omitempty"` + + // Replace any existing query string from the incoming request URL. + CustomQueryString *string `json:"customQueryString,omitempty" tf:"custom_query_string,omitempty"` + + // Protocol to use when redirecting. Valid options are HttpOnly, HttpsOnly, or MatchRequest. + RedirectProtocol *string `json:"redirectProtocol,omitempty" tf:"redirect_protocol,omitempty"` + + // Status code for the redirect. Valida options are Moved, Found, TemporaryRedirect, PermanentRedirect. + RedirectType *string `json:"redirectType,omitempty" tf:"redirect_type,omitempty"` +} + +type RoutingRuleRedirectConfigurationParameters struct { + + // The destination fragment in the portion of URL after '#'. Set this to add a fragment to the redirect URL. + // +kubebuilder:validation:Optional + CustomFragment *string `json:"customFragment,omitempty" tf:"custom_fragment,omitempty"` + + // Set this to change the URL for the redirection. + // +kubebuilder:validation:Optional + CustomHost *string `json:"customHost,omitempty" tf:"custom_host,omitempty"` + + // The path to retain as per the incoming request, or update in the URL for the redirection. + // +kubebuilder:validation:Optional + CustomPath *string `json:"customPath,omitempty" tf:"custom_path,omitempty"` + + // Replace any existing query string from the incoming request URL. + // +kubebuilder:validation:Optional + CustomQueryString *string `json:"customQueryString,omitempty" tf:"custom_query_string,omitempty"` + + // Protocol to use when redirecting. Valid options are HttpOnly, HttpsOnly, or MatchRequest. + // +kubebuilder:validation:Optional + RedirectProtocol *string `json:"redirectProtocol" tf:"redirect_protocol,omitempty"` + + // Status code for the redirect. Valida options are Moved, Found, TemporaryRedirect, PermanentRedirect. + // +kubebuilder:validation:Optional + RedirectType *string `json:"redirectType" tf:"redirect_type,omitempty"` +} + +// FrontDoorSpec defines the desired state of FrontDoor +type FrontDoorSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontDoorParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontDoorInitParameters `json:"initProvider,omitempty"` +} + +// FrontDoorStatus defines the observed state of FrontDoor. +type FrontDoorStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontDoorObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontDoor is the Schema for the FrontDoors API. Manages an Azure Front Door (classic) instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontDoor struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backendPool) || (has(self.initProvider) && has(self.initProvider.backendPool))",message="spec.forProvider.backendPool is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backendPoolHealthProbe) || (has(self.initProvider) && has(self.initProvider.backendPoolHealthProbe))",message="spec.forProvider.backendPoolHealthProbe is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backendPoolLoadBalancing) || (has(self.initProvider) && has(self.initProvider.backendPoolLoadBalancing))",message="spec.forProvider.backendPoolLoadBalancing is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.frontendEndpoint) || (has(self.initProvider) && has(self.initProvider.frontendEndpoint))",message="spec.forProvider.frontendEndpoint is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.routingRule) || (has(self.initProvider) && has(self.initProvider.routingRule))",message="spec.forProvider.routingRule is a required parameter" + Spec FrontDoorSpec `json:"spec"` + Status FrontDoorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontDoorList contains a list of FrontDoors +type FrontDoorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontDoor `json:"items"` +} + +// Repository type metadata. +var ( + FrontDoor_Kind = "FrontDoor" + FrontDoor_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontDoor_Kind}.String() + FrontDoor_KindAPIVersion = FrontDoor_Kind + "." + CRDGroupVersion.String() + FrontDoor_GroupVersionKind = CRDGroupVersion.WithKind(FrontDoor_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontDoor{}, &FrontDoorList{}) +} diff --git a/apis/network/v1beta2/zz_frontdoorcustomhttpsconfiguration_terraformed.go b/apis/network/v1beta2/zz_frontdoorcustomhttpsconfiguration_terraformed.go new file mode 100755 index 000000000..3da105a37 --- /dev/null +++ b/apis/network/v1beta2/zz_frontdoorcustomhttpsconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontdoorCustomHTTPSConfiguration +func (mg *FrontdoorCustomHTTPSConfiguration) GetTerraformResourceType() string { + return "azurerm_frontdoor_custom_https_configuration" +} + +// GetConnectionDetailsMapping for this FrontdoorCustomHTTPSConfiguration +func (tr *FrontdoorCustomHTTPSConfiguration) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontdoorCustomHTTPSConfiguration +func (tr *FrontdoorCustomHTTPSConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontdoorCustomHTTPSConfiguration +func (tr *FrontdoorCustomHTTPSConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontdoorCustomHTTPSConfiguration +func (tr *FrontdoorCustomHTTPSConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontdoorCustomHTTPSConfiguration +func (tr *FrontdoorCustomHTTPSConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontdoorCustomHTTPSConfiguration +func (tr *FrontdoorCustomHTTPSConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontdoorCustomHTTPSConfiguration +func (tr *FrontdoorCustomHTTPSConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontdoorCustomHTTPSConfiguration +func (tr *FrontdoorCustomHTTPSConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontdoorCustomHTTPSConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontdoorCustomHTTPSConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &FrontdoorCustomHTTPSConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontdoorCustomHTTPSConfiguration) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/network/v1beta2/zz_frontdoorcustomhttpsconfiguration_types.go b/apis/network/v1beta2/zz_frontdoorcustomhttpsconfiguration_types.go new file mode 100755 index 000000000..94a944f29 --- /dev/null +++ b/apis/network/v1beta2/zz_frontdoorcustomhttpsconfiguration_types.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomHTTPSConfigurationInitParameters struct { + + // The name of the Key Vault secret representing the full certificate PFX. + AzureKeyVaultCertificateSecretName *string `json:"azureKeyVaultCertificateSecretName,omitempty" tf:"azure_key_vault_certificate_secret_name,omitempty"` + + // The version of the Key Vault secret representing the full certificate PFX. + AzureKeyVaultCertificateSecretVersion *string `json:"azureKeyVaultCertificateSecretVersion,omitempty" tf:"azure_key_vault_certificate_secret_version,omitempty"` + + // The ID of the Key Vault containing the SSL certificate. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + AzureKeyVaultCertificateVaultID *string `json:"azureKeyVaultCertificateVaultId,omitempty" tf:"azure_key_vault_certificate_vault_id,omitempty"` + + // Reference to a Key in keyvault to populate azureKeyVaultCertificateVaultId. + // +kubebuilder:validation:Optional + AzureKeyVaultCertificateVaultIDRef *v1.Reference `json:"azureKeyVaultCertificateVaultIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate azureKeyVaultCertificateVaultId. + // +kubebuilder:validation:Optional + AzureKeyVaultCertificateVaultIDSelector *v1.Selector `json:"azureKeyVaultCertificateVaultIdSelector,omitempty" tf:"-"` + + // Certificate source to encrypted HTTPS traffic with. Allowed values are FrontDoor or AzureKeyVault. Defaults to FrontDoor. + CertificateSource *string `json:"certificateSource,omitempty" tf:"certificate_source,omitempty"` +} + +type CustomHTTPSConfigurationObservation struct { + + // The name of the Key Vault secret representing the full certificate PFX. + AzureKeyVaultCertificateSecretName *string `json:"azureKeyVaultCertificateSecretName,omitempty" tf:"azure_key_vault_certificate_secret_name,omitempty"` + + // The version of the Key Vault secret representing the full certificate PFX. + AzureKeyVaultCertificateSecretVersion *string `json:"azureKeyVaultCertificateSecretVersion,omitempty" tf:"azure_key_vault_certificate_secret_version,omitempty"` + + // The ID of the Key Vault containing the SSL certificate. + AzureKeyVaultCertificateVaultID *string `json:"azureKeyVaultCertificateVaultId,omitempty" tf:"azure_key_vault_certificate_vault_id,omitempty"` + + // Certificate source to encrypted HTTPS traffic with. Allowed values are FrontDoor or AzureKeyVault. Defaults to FrontDoor. + CertificateSource *string `json:"certificateSource,omitempty" tf:"certificate_source,omitempty"` + + // Minimum client TLS version supported. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + ProvisioningState *string `json:"provisioningState,omitempty" tf:"provisioning_state,omitempty"` + + ProvisioningSubstate *string `json:"provisioningSubstate,omitempty" tf:"provisioning_substate,omitempty"` +} + +type CustomHTTPSConfigurationParameters struct { + + // The name of the Key Vault secret representing the full certificate PFX. + // +kubebuilder:validation:Optional + AzureKeyVaultCertificateSecretName *string `json:"azureKeyVaultCertificateSecretName,omitempty" tf:"azure_key_vault_certificate_secret_name,omitempty"` + + // The version of the Key Vault secret representing the full certificate PFX. + // +kubebuilder:validation:Optional + AzureKeyVaultCertificateSecretVersion *string `json:"azureKeyVaultCertificateSecretVersion,omitempty" tf:"azure_key_vault_certificate_secret_version,omitempty"` + + // The ID of the Key Vault containing the SSL certificate. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + AzureKeyVaultCertificateVaultID *string `json:"azureKeyVaultCertificateVaultId,omitempty" tf:"azure_key_vault_certificate_vault_id,omitempty"` + + // Reference to a Key in keyvault to populate azureKeyVaultCertificateVaultId. + // +kubebuilder:validation:Optional + AzureKeyVaultCertificateVaultIDRef *v1.Reference `json:"azureKeyVaultCertificateVaultIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate azureKeyVaultCertificateVaultId. + // +kubebuilder:validation:Optional + AzureKeyVaultCertificateVaultIDSelector *v1.Selector `json:"azureKeyVaultCertificateVaultIdSelector,omitempty" tf:"-"` + + // Certificate source to encrypted HTTPS traffic with. Allowed values are FrontDoor or AzureKeyVault. Defaults to FrontDoor. + // +kubebuilder:validation:Optional + CertificateSource *string `json:"certificateSource,omitempty" tf:"certificate_source,omitempty"` +} + +type FrontdoorCustomHTTPSConfigurationInitParameters struct { + + // A custom_https_configuration block as defined above. + CustomHTTPSConfiguration *CustomHTTPSConfigurationInitParameters `json:"customHttpsConfiguration,omitempty" tf:"custom_https_configuration,omitempty"` + + // Should the HTTPS protocol be enabled for this custom domain associated with the Front Door? + CustomHTTPSProvisioningEnabled *bool `json:"customHttpsProvisioningEnabled,omitempty" tf:"custom_https_provisioning_enabled,omitempty"` + + // The ID of the Front Door Frontend Endpoint which this configuration refers to. Changing this forces a new resource to be created. + FrontendEndpointID *string `json:"frontendEndpointId,omitempty" tf:"frontend_endpoint_id,omitempty"` +} + +type FrontdoorCustomHTTPSConfigurationObservation struct { + + // A custom_https_configuration block as defined above. + CustomHTTPSConfiguration *CustomHTTPSConfigurationObservation `json:"customHttpsConfiguration,omitempty" tf:"custom_https_configuration,omitempty"` + + // Should the HTTPS protocol be enabled for this custom domain associated with the Front Door? + CustomHTTPSProvisioningEnabled *bool `json:"customHttpsProvisioningEnabled,omitempty" tf:"custom_https_provisioning_enabled,omitempty"` + + // The ID of the Front Door Frontend Endpoint which this configuration refers to. Changing this forces a new resource to be created. + FrontendEndpointID *string `json:"frontendEndpointId,omitempty" tf:"frontend_endpoint_id,omitempty"` + + // The ID of the Azure Front Door Custom HTTPS Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type FrontdoorCustomHTTPSConfigurationParameters struct { + + // A custom_https_configuration block as defined above. + // +kubebuilder:validation:Optional + CustomHTTPSConfiguration *CustomHTTPSConfigurationParameters `json:"customHttpsConfiguration,omitempty" tf:"custom_https_configuration,omitempty"` + + // Should the HTTPS protocol be enabled for this custom domain associated with the Front Door? + // +kubebuilder:validation:Optional + CustomHTTPSProvisioningEnabled *bool `json:"customHttpsProvisioningEnabled,omitempty" tf:"custom_https_provisioning_enabled,omitempty"` + + // The ID of the Front Door Frontend Endpoint which this configuration refers to. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + FrontendEndpointID *string `json:"frontendEndpointId,omitempty" tf:"frontend_endpoint_id,omitempty"` +} + +// FrontdoorCustomHTTPSConfigurationSpec defines the desired state of FrontdoorCustomHTTPSConfiguration +type FrontdoorCustomHTTPSConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontdoorCustomHTTPSConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontdoorCustomHTTPSConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// FrontdoorCustomHTTPSConfigurationStatus defines the observed state of FrontdoorCustomHTTPSConfiguration. +type FrontdoorCustomHTTPSConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontdoorCustomHTTPSConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontdoorCustomHTTPSConfiguration is the Schema for the FrontdoorCustomHTTPSConfigurations API. Manages the Custom Https Configuration for an Azure Front Door (classic) Frontend Endpoint. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontdoorCustomHTTPSConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.customHttpsProvisioningEnabled) || (has(self.initProvider) && has(self.initProvider.customHttpsProvisioningEnabled))",message="spec.forProvider.customHttpsProvisioningEnabled is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.frontendEndpointId) || (has(self.initProvider) && has(self.initProvider.frontendEndpointId))",message="spec.forProvider.frontendEndpointId is a required parameter" + Spec FrontdoorCustomHTTPSConfigurationSpec `json:"spec"` + Status FrontdoorCustomHTTPSConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontdoorCustomHTTPSConfigurationList contains a list of FrontdoorCustomHTTPSConfigurations +type FrontdoorCustomHTTPSConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontdoorCustomHTTPSConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + FrontdoorCustomHTTPSConfiguration_Kind = "FrontdoorCustomHTTPSConfiguration" + FrontdoorCustomHTTPSConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontdoorCustomHTTPSConfiguration_Kind}.String() + FrontdoorCustomHTTPSConfiguration_KindAPIVersion = FrontdoorCustomHTTPSConfiguration_Kind + "." + CRDGroupVersion.String() + FrontdoorCustomHTTPSConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(FrontdoorCustomHTTPSConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontdoorCustomHTTPSConfiguration{}, &FrontdoorCustomHTTPSConfigurationList{}) +} diff --git a/apis/network/v1beta2/zz_frontdoorrulesengine_terraformed.go b/apis/network/v1beta2/zz_frontdoorrulesengine_terraformed.go new file mode 100755 index 000000000..16490a18e --- /dev/null +++ b/apis/network/v1beta2/zz_frontdoorrulesengine_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FrontdoorRulesEngine +func (mg *FrontdoorRulesEngine) GetTerraformResourceType() string { + return "azurerm_frontdoor_rules_engine" +} + +// GetConnectionDetailsMapping for this FrontdoorRulesEngine +func (tr *FrontdoorRulesEngine) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FrontdoorRulesEngine +func (tr *FrontdoorRulesEngine) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FrontdoorRulesEngine +func (tr *FrontdoorRulesEngine) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FrontdoorRulesEngine +func (tr *FrontdoorRulesEngine) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FrontdoorRulesEngine +func (tr *FrontdoorRulesEngine) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FrontdoorRulesEngine +func (tr *FrontdoorRulesEngine) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FrontdoorRulesEngine +func (tr *FrontdoorRulesEngine) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FrontdoorRulesEngine +func (tr *FrontdoorRulesEngine) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FrontdoorRulesEngine using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FrontdoorRulesEngine) LateInitialize(attrs []byte) (bool, error) { + params := &FrontdoorRulesEngineParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FrontdoorRulesEngine) GetTerraformSchemaVersion() int { + return 2 +} diff --git a/apis/network/v1beta2/zz_frontdoorrulesengine_types.go b/apis/network/v1beta2/zz_frontdoorrulesengine_types.go new file mode 100755 index 000000000..6ba575e38 --- /dev/null +++ b/apis/network/v1beta2/zz_frontdoorrulesengine_types.go @@ -0,0 +1,364 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // A request_header block as defined below. + RequestHeader []RequestHeaderInitParameters `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // A response_header block as defined below. + ResponseHeader []ResponseHeaderInitParameters `json:"responseHeader,omitempty" tf:"response_header,omitempty"` +} + +type ActionObservation struct { + + // A request_header block as defined below. + RequestHeader []RequestHeaderObservation `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // A response_header block as defined below. + ResponseHeader []ResponseHeaderObservation `json:"responseHeader,omitempty" tf:"response_header,omitempty"` +} + +type ActionParameters struct { + + // A request_header block as defined below. + // +kubebuilder:validation:Optional + RequestHeader []RequestHeaderParameters `json:"requestHeader,omitempty" tf:"request_header,omitempty"` + + // A response_header block as defined below. + // +kubebuilder:validation:Optional + ResponseHeader []ResponseHeaderParameters `json:"responseHeader,omitempty" tf:"response_header,omitempty"` +} + +type FrontdoorRulesEngineInitParameters struct { + + // Whether this Rules engine configuration is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A rule block as defined below. + Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type FrontdoorRulesEngineObservation struct { + + // Whether this Rules engine configuration is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the Front Door instance. Changing this forces a new resource to be created. + FrontdoorName *string `json:"frontdoorName,omitempty" tf:"frontdoor_name,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A rule block as defined below. + Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type FrontdoorRulesEngineParameters struct { + + // Whether this Rules engine configuration is enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the Front Door instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.FrontDoor + // +kubebuilder:validation:Optional + FrontdoorName *string `json:"frontdoorName,omitempty" tf:"frontdoor_name,omitempty"` + + // Reference to a FrontDoor in network to populate frontdoorName. + // +kubebuilder:validation:Optional + FrontdoorNameRef *v1.Reference `json:"frontdoorNameRef,omitempty" tf:"-"` + + // Selector for a FrontDoor in network to populate frontdoorName. + // +kubebuilder:validation:Optional + FrontdoorNameSelector *v1.Selector `json:"frontdoorNameSelector,omitempty" tf:"-"` + + // The name of the resource group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A rule block as defined below. + // +kubebuilder:validation:Optional + Rule []RuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type MatchConditionInitParameters struct { + + // can be set to true or false to negate the given condition. Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // can be set to Any, IPMatch, GeoMatch, Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, BeginsWith or EndsWith + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // match against a specific key when variable is set to PostArgs or RequestHeader. It cannot be used with QueryString and RequestMethod. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // can be set to one or more values out of Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode and UrlEncode + Transform []*string `json:"transform,omitempty" tf:"transform,omitempty"` + + // value name (string). + Value []*string `json:"value,omitempty" tf:"value,omitempty"` + + // can be set to IsMobile, RemoteAddr, RequestMethod, QueryString, PostArgs, RequestURI, RequestPath, RequestFilename, RequestFilenameExtension,RequestHeader,RequestBody or RequestScheme. + Variable *string `json:"variable,omitempty" tf:"variable,omitempty"` +} + +type MatchConditionObservation struct { + + // can be set to true or false to negate the given condition. Defaults to false. + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // can be set to Any, IPMatch, GeoMatch, Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, BeginsWith or EndsWith + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // match against a specific key when variable is set to PostArgs or RequestHeader. It cannot be used with QueryString and RequestMethod. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // can be set to one or more values out of Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode and UrlEncode + Transform []*string `json:"transform,omitempty" tf:"transform,omitempty"` + + // value name (string). + Value []*string `json:"value,omitempty" tf:"value,omitempty"` + + // can be set to IsMobile, RemoteAddr, RequestMethod, QueryString, PostArgs, RequestURI, RequestPath, RequestFilename, RequestFilenameExtension,RequestHeader,RequestBody or RequestScheme. + Variable *string `json:"variable,omitempty" tf:"variable,omitempty"` +} + +type MatchConditionParameters struct { + + // can be set to true or false to negate the given condition. Defaults to false. + // +kubebuilder:validation:Optional + NegateCondition *bool `json:"negateCondition,omitempty" tf:"negate_condition,omitempty"` + + // can be set to Any, IPMatch, GeoMatch, Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, BeginsWith or EndsWith + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // match against a specific key when variable is set to PostArgs or RequestHeader. It cannot be used with QueryString and RequestMethod. + // +kubebuilder:validation:Optional + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // can be set to one or more values out of Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode and UrlEncode + // +kubebuilder:validation:Optional + Transform []*string `json:"transform,omitempty" tf:"transform,omitempty"` + + // value name (string). + // +kubebuilder:validation:Optional + Value []*string `json:"value,omitempty" tf:"value,omitempty"` + + // can be set to IsMobile, RemoteAddr, RequestMethod, QueryString, PostArgs, RequestURI, RequestPath, RequestFilename, RequestFilenameExtension,RequestHeader,RequestBody or RequestScheme. + // +kubebuilder:validation:Optional + Variable *string `json:"variable,omitempty" tf:"variable,omitempty"` +} + +type RequestHeaderInitParameters struct { + + // can be set to Overwrite, Append or Delete. + HeaderActionType *string `json:"headerActionType,omitempty" tf:"header_action_type,omitempty"` + + // header name (string). + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // value name (string). + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestHeaderObservation struct { + + // can be set to Overwrite, Append or Delete. + HeaderActionType *string `json:"headerActionType,omitempty" tf:"header_action_type,omitempty"` + + // header name (string). + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // value name (string). + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RequestHeaderParameters struct { + + // can be set to Overwrite, Append or Delete. + // +kubebuilder:validation:Optional + HeaderActionType *string `json:"headerActionType,omitempty" tf:"header_action_type,omitempty"` + + // header name (string). + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // value name (string). + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseHeaderInitParameters struct { + + // can be set to Overwrite, Append or Delete. + HeaderActionType *string `json:"headerActionType,omitempty" tf:"header_action_type,omitempty"` + + // header name (string). + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // value name (string). + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseHeaderObservation struct { + + // can be set to Overwrite, Append or Delete. + HeaderActionType *string `json:"headerActionType,omitempty" tf:"header_action_type,omitempty"` + + // header name (string). + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // value name (string). + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResponseHeaderParameters struct { + + // can be set to Overwrite, Append or Delete. + // +kubebuilder:validation:Optional + HeaderActionType *string `json:"headerActionType,omitempty" tf:"header_action_type,omitempty"` + + // header name (string). + // +kubebuilder:validation:Optional + HeaderName *string `json:"headerName,omitempty" tf:"header_name,omitempty"` + + // value name (string). + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type RuleInitParameters struct { + + // An action block as defined below. + Action *ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // One or more match_condition block as defined below. + MatchCondition []MatchConditionInitParameters `json:"matchCondition,omitempty" tf:"match_condition,omitempty"` + + // The name of the rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Priority of the rule, must be unique per rules engine definition. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type RuleObservation struct { + + // An action block as defined below. + Action *ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // One or more match_condition block as defined below. + MatchCondition []MatchConditionObservation `json:"matchCondition,omitempty" tf:"match_condition,omitempty"` + + // The name of the rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Priority of the rule, must be unique per rules engine definition. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type RuleParameters struct { + + // An action block as defined below. + // +kubebuilder:validation:Optional + Action *ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // One or more match_condition block as defined below. + // +kubebuilder:validation:Optional + MatchCondition []MatchConditionParameters `json:"matchCondition,omitempty" tf:"match_condition,omitempty"` + + // The name of the rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Priority of the rule, must be unique per rules engine definition. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` +} + +// FrontdoorRulesEngineSpec defines the desired state of FrontdoorRulesEngine +type FrontdoorRulesEngineSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FrontdoorRulesEngineParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FrontdoorRulesEngineInitParameters `json:"initProvider,omitempty"` +} + +// FrontdoorRulesEngineStatus defines the observed state of FrontdoorRulesEngine. +type FrontdoorRulesEngineStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FrontdoorRulesEngineObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FrontdoorRulesEngine is the Schema for the FrontdoorRulesEngines API. Manages an Azure Front Door (classic) Rules Engine configuration and rules. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FrontdoorRulesEngine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FrontdoorRulesEngineSpec `json:"spec"` + Status FrontdoorRulesEngineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FrontdoorRulesEngineList contains a list of FrontdoorRulesEngines +type FrontdoorRulesEngineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FrontdoorRulesEngine `json:"items"` +} + +// Repository type metadata. +var ( + FrontdoorRulesEngine_Kind = "FrontdoorRulesEngine" + FrontdoorRulesEngine_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FrontdoorRulesEngine_Kind}.String() + FrontdoorRulesEngine_KindAPIVersion = FrontdoorRulesEngine_Kind + "." + CRDGroupVersion.String() + FrontdoorRulesEngine_GroupVersionKind = CRDGroupVersion.WithKind(FrontdoorRulesEngine_Kind) +) + +func init() { + SchemeBuilder.Register(&FrontdoorRulesEngine{}, &FrontdoorRulesEngineList{}) +} diff --git a/apis/network/v1beta2/zz_generated.conversion_hubs.go b/apis/network/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..f95a6fa1b --- /dev/null +++ b/apis/network/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,100 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ApplicationGateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ConnectionMonitor) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *DNSZone) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ExpressRouteCircuit) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ExpressRouteCircuitPeering) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ExpressRouteConnection) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ExpressRoutePort) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Firewall) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FirewallPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontDoor) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontdoorCustomHTTPSConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FrontdoorRulesEngine) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LocalNetworkGateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Manager) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PacketCapture) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PointToSiteVPNGateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PrivateDNSZone) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *PrivateEndpoint) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Profile) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Subnet) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *TrafficManagerProfile) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualHubConnection) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualNetwork) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualNetworkGateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VirtualNetworkGatewayConnection) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPNGateway) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPNGatewayConnection) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPNServerConfiguration) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *VPNSite) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WatcherFlowLog) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WebApplicationFirewallPolicy) Hub() {} diff --git a/apis/network/v1beta2/zz_generated.deepcopy.go b/apis/network/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..6d08934f3 --- /dev/null +++ b/apis/network/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,30546 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]RequestHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeader != nil { + in, out := &in.ResponseHeader, &out.ResponseHeader + *out = make([]ResponseHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]RequestHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeader != nil { + in, out := &in.ResponseHeader, &out.ResponseHeader + *out = make([]ResponseHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]RequestHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeader != nil { + in, out := &in.ResponseHeader, &out.ResponseHeader + *out = make([]ResponseHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationGateway) DeepCopyInto(out *ApplicationGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationGateway. +func (in *ApplicationGateway) DeepCopy() *ApplicationGateway { + if in == nil { + return nil + } + out := new(ApplicationGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationGatewayInitParameters) DeepCopyInto(out *ApplicationGatewayInitParameters) { + *out = *in + if in.AuthenticationCertificate != nil { + in, out := &in.AuthenticationCertificate, &out.AuthenticationCertificate + *out = make([]AuthenticationCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AutoscaleConfiguration != nil { + in, out := &in.AutoscaleConfiguration, &out.AutoscaleConfiguration + *out = new(AutoscaleConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BackendAddressPool != nil { + in, out := &in.BackendAddressPool, &out.BackendAddressPool + *out = make([]BackendAddressPoolInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendHTTPSettings != nil { + in, out := &in.BackendHTTPSettings, &out.BackendHTTPSettings + *out = make([]BackendHTTPSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomErrorConfiguration != nil { + in, out := &in.CustomErrorConfiguration, &out.CustomErrorConfiguration + *out = make([]CustomErrorConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.ForceFirewallPolicyAssociation != nil { + in, out := &in.ForceFirewallPolicyAssociation, &out.ForceFirewallPolicyAssociation + *out = new(bool) + **out = **in + } + if in.FrontendIPConfiguration != nil { + in, out := &in.FrontendIPConfiguration, &out.FrontendIPConfiguration + *out = make([]FrontendIPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FrontendPort != nil { + in, out := &in.FrontendPort, &out.FrontendPort + *out = make([]FrontendPortInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GatewayIPConfiguration != nil { + in, out := &in.GatewayIPConfiguration, &out.GatewayIPConfiguration + *out = make([]GatewayIPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(GlobalInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPListener != nil { + in, out := &in.HTTPListener, &out.HTTPListener + *out = make([]HTTPListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateLinkConfiguration != nil { + in, out := &in.PrivateLinkConfiguration, &out.PrivateLinkConfiguration + *out = make([]PrivateLinkConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]ProbeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RedirectConfiguration != nil { + in, out := &in.RedirectConfiguration, &out.RedirectConfiguration + *out = make([]RedirectConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestRoutingRule != nil { + in, out := &in.RequestRoutingRule, &out.RequestRoutingRule + *out = make([]RequestRoutingRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RewriteRuleSet != nil { + in, out := &in.RewriteRuleSet, &out.RewriteRuleSet + *out = make([]RewriteRuleSetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(SSLPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SSLProfile != nil { + in, out := &in.SSLProfile, &out.SSLProfile + *out = make([]SSLProfileInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedClientCertificate != nil { + in, out := &in.TrustedClientCertificate, &out.TrustedClientCertificate + *out = make([]TrustedClientCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrustedRootCertificate != nil { + in, out := &in.TrustedRootCertificate, &out.TrustedRootCertificate + *out = make([]TrustedRootCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathMap != nil { + in, out := &in.URLPathMap, &out.URLPathMap + *out = make([]URLPathMapInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WafConfiguration != nil { + in, out := &in.WafConfiguration, &out.WafConfiguration + *out = new(WafConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationGatewayInitParameters. +func (in *ApplicationGatewayInitParameters) DeepCopy() *ApplicationGatewayInitParameters { + if in == nil { + return nil + } + out := new(ApplicationGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationGatewayList) DeepCopyInto(out *ApplicationGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ApplicationGateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationGatewayList. +func (in *ApplicationGatewayList) DeepCopy() *ApplicationGatewayList { + if in == nil { + return nil + } + out := new(ApplicationGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApplicationGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationGatewayObservation) DeepCopyInto(out *ApplicationGatewayObservation) { + *out = *in + if in.AuthenticationCertificate != nil { + in, out := &in.AuthenticationCertificate, &out.AuthenticationCertificate + *out = make([]AuthenticationCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AutoscaleConfiguration != nil { + in, out := &in.AutoscaleConfiguration, &out.AutoscaleConfiguration + *out = new(AutoscaleConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.BackendAddressPool != nil { + in, out := &in.BackendAddressPool, &out.BackendAddressPool + *out = make([]BackendAddressPoolObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendHTTPSettings != nil { + in, out := &in.BackendHTTPSettings, &out.BackendHTTPSettings + *out = make([]BackendHTTPSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomErrorConfiguration != nil { + in, out := &in.CustomErrorConfiguration, &out.CustomErrorConfiguration + *out = make([]CustomErrorConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.ForceFirewallPolicyAssociation != nil { + in, out := &in.ForceFirewallPolicyAssociation, &out.ForceFirewallPolicyAssociation + *out = new(bool) + **out = **in + } + if in.FrontendIPConfiguration != nil { + in, out := &in.FrontendIPConfiguration, &out.FrontendIPConfiguration + *out = make([]FrontendIPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FrontendPort != nil { + in, out := &in.FrontendPort, &out.FrontendPort + *out = make([]FrontendPortObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GatewayIPConfiguration != nil { + in, out := &in.GatewayIPConfiguration, &out.GatewayIPConfiguration + *out = make([]GatewayIPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(GlobalObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPListener != nil { + in, out := &in.HTTPListener, &out.HTTPListener + *out = make([]HTTPListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateEndpointConnection != nil { + in, out := &in.PrivateEndpointConnection, &out.PrivateEndpointConnection + *out = make([]PrivateEndpointConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateLinkConfiguration != nil { + in, out := &in.PrivateLinkConfiguration, &out.PrivateLinkConfiguration + *out = make([]PrivateLinkConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]ProbeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RedirectConfiguration != nil { + in, out := &in.RedirectConfiguration, &out.RedirectConfiguration + *out = make([]RedirectConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestRoutingRule != nil { + in, out := &in.RequestRoutingRule, &out.RequestRoutingRule + *out = make([]RequestRoutingRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RewriteRuleSet != nil { + in, out := &in.RewriteRuleSet, &out.RewriteRuleSet + *out = make([]RewriteRuleSetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(SSLPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.SSLProfile != nil { + in, out := &in.SSLProfile, &out.SSLProfile + *out = make([]SSLProfileObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedClientCertificate != nil { + in, out := &in.TrustedClientCertificate, &out.TrustedClientCertificate + *out = make([]TrustedClientCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrustedRootCertificate != nil { + in, out := &in.TrustedRootCertificate, &out.TrustedRootCertificate + *out = make([]TrustedRootCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathMap != nil { + in, out := &in.URLPathMap, &out.URLPathMap + *out = make([]URLPathMapObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WafConfiguration != nil { + in, out := &in.WafConfiguration, &out.WafConfiguration + *out = new(WafConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationGatewayObservation. +func (in *ApplicationGatewayObservation) DeepCopy() *ApplicationGatewayObservation { + if in == nil { + return nil + } + out := new(ApplicationGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationGatewayParameters) DeepCopyInto(out *ApplicationGatewayParameters) { + *out = *in + if in.AuthenticationCertificate != nil { + in, out := &in.AuthenticationCertificate, &out.AuthenticationCertificate + *out = make([]AuthenticationCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AutoscaleConfiguration != nil { + in, out := &in.AutoscaleConfiguration, &out.AutoscaleConfiguration + *out = new(AutoscaleConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.BackendAddressPool != nil { + in, out := &in.BackendAddressPool, &out.BackendAddressPool + *out = make([]BackendAddressPoolParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendHTTPSettings != nil { + in, out := &in.BackendHTTPSettings, &out.BackendHTTPSettings + *out = make([]BackendHTTPSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomErrorConfiguration != nil { + in, out := &in.CustomErrorConfiguration, &out.CustomErrorConfiguration + *out = make([]CustomErrorConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnableHttp2 != nil { + in, out := &in.EnableHttp2, &out.EnableHttp2 + *out = new(bool) + **out = **in + } + if in.FipsEnabled != nil { + in, out := &in.FipsEnabled, &out.FipsEnabled + *out = new(bool) + **out = **in + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.ForceFirewallPolicyAssociation != nil { + in, out := &in.ForceFirewallPolicyAssociation, &out.ForceFirewallPolicyAssociation + *out = new(bool) + **out = **in + } + if in.FrontendIPConfiguration != nil { + in, out := &in.FrontendIPConfiguration, &out.FrontendIPConfiguration + *out = make([]FrontendIPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FrontendPort != nil { + in, out := &in.FrontendPort, &out.FrontendPort + *out = make([]FrontendPortParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GatewayIPConfiguration != nil { + in, out := &in.GatewayIPConfiguration, &out.GatewayIPConfiguration + *out = make([]GatewayIPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(GlobalParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPListener != nil { + in, out := &in.HTTPListener, &out.HTTPListener + *out = make([]HTTPListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateLinkConfiguration != nil { + in, out := &in.PrivateLinkConfiguration, &out.PrivateLinkConfiguration + *out = make([]PrivateLinkConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = make([]ProbeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RedirectConfiguration != nil { + in, out := &in.RedirectConfiguration, &out.RedirectConfiguration + *out = make([]RedirectConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequestRoutingRule != nil { + in, out := &in.RequestRoutingRule, &out.RequestRoutingRule + *out = make([]RequestRoutingRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RewriteRuleSet != nil { + in, out := &in.RewriteRuleSet, &out.RewriteRuleSet + *out = make([]RewriteRuleSetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLCertificate != nil { + in, out := &in.SSLCertificate, &out.SSLCertificate + *out = make([]SSLCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(SSLPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.SSLProfile != nil { + in, out := &in.SSLProfile, &out.SSLProfile + *out = make([]SSLProfileParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrustedClientCertificate != nil { + in, out := &in.TrustedClientCertificate, &out.TrustedClientCertificate + *out = make([]TrustedClientCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrustedRootCertificate != nil { + in, out := &in.TrustedRootCertificate, &out.TrustedRootCertificate + *out = make([]TrustedRootCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.URLPathMap != nil { + in, out := &in.URLPathMap, &out.URLPathMap + *out = make([]URLPathMapParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WafConfiguration != nil { + in, out := &in.WafConfiguration, &out.WafConfiguration + *out = new(WafConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationGatewayParameters. +func (in *ApplicationGatewayParameters) DeepCopy() *ApplicationGatewayParameters { + if in == nil { + return nil + } + out := new(ApplicationGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationGatewaySpec) DeepCopyInto(out *ApplicationGatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationGatewaySpec. +func (in *ApplicationGatewaySpec) DeepCopy() *ApplicationGatewaySpec { + if in == nil { + return nil + } + out := new(ApplicationGatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationGatewayStatus) DeepCopyInto(out *ApplicationGatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationGatewayStatus. +func (in *ApplicationGatewayStatus) DeepCopy() *ApplicationGatewayStatus { + if in == nil { + return nil + } + out := new(ApplicationGatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationCertificateInitParameters) DeepCopyInto(out *AuthenticationCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationCertificateInitParameters. +func (in *AuthenticationCertificateInitParameters) DeepCopy() *AuthenticationCertificateInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationCertificateObservation) DeepCopyInto(out *AuthenticationCertificateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationCertificateObservation. +func (in *AuthenticationCertificateObservation) DeepCopy() *AuthenticationCertificateObservation { + if in == nil { + return nil + } + out := new(AuthenticationCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationCertificateParameters) DeepCopyInto(out *AuthenticationCertificateParameters) { + *out = *in + out.DataSecretRef = in.DataSecretRef + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationCertificateParameters. +func (in *AuthenticationCertificateParameters) DeepCopy() *AuthenticationCertificateParameters { + if in == nil { + return nil + } + out := new(AuthenticationCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleConfigurationInitParameters) DeepCopyInto(out *AutoscaleConfigurationInitParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleConfigurationInitParameters. +func (in *AutoscaleConfigurationInitParameters) DeepCopy() *AutoscaleConfigurationInitParameters { + if in == nil { + return nil + } + out := new(AutoscaleConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleConfigurationObservation) DeepCopyInto(out *AutoscaleConfigurationObservation) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleConfigurationObservation. +func (in *AutoscaleConfigurationObservation) DeepCopy() *AutoscaleConfigurationObservation { + if in == nil { + return nil + } + out := new(AutoscaleConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscaleConfigurationParameters) DeepCopyInto(out *AutoscaleConfigurationParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscaleConfigurationParameters. +func (in *AutoscaleConfigurationParameters) DeepCopy() *AutoscaleConfigurationParameters { + if in == nil { + return nil + } + out := new(AutoscaleConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryAuthenticationInitParameters) DeepCopyInto(out *AzureActiveDirectoryAuthenticationInitParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Tenant != nil { + in, out := &in.Tenant, &out.Tenant + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryAuthenticationInitParameters. +func (in *AzureActiveDirectoryAuthenticationInitParameters) DeepCopy() *AzureActiveDirectoryAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryAuthenticationObservation) DeepCopyInto(out *AzureActiveDirectoryAuthenticationObservation) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Tenant != nil { + in, out := &in.Tenant, &out.Tenant + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryAuthenticationObservation. +func (in *AzureActiveDirectoryAuthenticationObservation) DeepCopy() *AzureActiveDirectoryAuthenticationObservation { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryAuthenticationParameters) DeepCopyInto(out *AzureActiveDirectoryAuthenticationParameters) { + *out = *in + if in.Audience != nil { + in, out := &in.Audience, &out.Audience + *out = new(string) + **out = **in + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Tenant != nil { + in, out := &in.Tenant, &out.Tenant + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryAuthenticationParameters. +func (in *AzureActiveDirectoryAuthenticationParameters) DeepCopy() *AzureActiveDirectoryAuthenticationParameters { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPInitParameters) DeepCopyInto(out *BGPInitParameters) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.PeeringAddress != nil { + in, out := &in.PeeringAddress, &out.PeeringAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPInitParameters. +func (in *BGPInitParameters) DeepCopy() *BGPInitParameters { + if in == nil { + return nil + } + out := new(BGPInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPObservation) DeepCopyInto(out *BGPObservation) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.PeeringAddress != nil { + in, out := &in.PeeringAddress, &out.PeeringAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPObservation. +func (in *BGPObservation) DeepCopy() *BGPObservation { + if in == nil { + return nil + } + out := new(BGPObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPParameters) DeepCopyInto(out *BGPParameters) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.PeeringAddress != nil { + in, out := &in.PeeringAddress, &out.PeeringAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPParameters. +func (in *BGPParameters) DeepCopy() *BGPParameters { + if in == nil { + return nil + } + out := new(BGPParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPSettingsInitParameters) DeepCopyInto(out *BGPSettingsInitParameters) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.BGPPeeringAddress != nil { + in, out := &in.BGPPeeringAddress, &out.BGPPeeringAddress + *out = new(string) + **out = **in + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPSettingsInitParameters. +func (in *BGPSettingsInitParameters) DeepCopy() *BGPSettingsInitParameters { + if in == nil { + return nil + } + out := new(BGPSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPSettingsObservation) DeepCopyInto(out *BGPSettingsObservation) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.BGPPeeringAddress != nil { + in, out := &in.BGPPeeringAddress, &out.BGPPeeringAddress + *out = new(string) + **out = **in + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPSettingsObservation. +func (in *BGPSettingsObservation) DeepCopy() *BGPSettingsObservation { + if in == nil { + return nil + } + out := new(BGPSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPSettingsParameters) DeepCopyInto(out *BGPSettingsParameters) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.BGPPeeringAddress != nil { + in, out := &in.BGPPeeringAddress, &out.BGPPeeringAddress + *out = new(string) + **out = **in + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPSettingsParameters. +func (in *BGPSettingsParameters) DeepCopy() *BGPSettingsParameters { + if in == nil { + return nil + } + out := new(BGPSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendAddressPoolInitParameters) DeepCopyInto(out *BackendAddressPoolInitParameters) { + *out = *in + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendAddressPoolInitParameters. +func (in *BackendAddressPoolInitParameters) DeepCopy() *BackendAddressPoolInitParameters { + if in == nil { + return nil + } + out := new(BackendAddressPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendAddressPoolObservation) DeepCopyInto(out *BackendAddressPoolObservation) { + *out = *in + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendAddressPoolObservation. +func (in *BackendAddressPoolObservation) DeepCopy() *BackendAddressPoolObservation { + if in == nil { + return nil + } + out := new(BackendAddressPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendAddressPoolParameters) DeepCopyInto(out *BackendAddressPoolParameters) { + *out = *in + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendAddressPoolParameters. +func (in *BackendAddressPoolParameters) DeepCopy() *BackendAddressPoolParameters { + if in == nil { + return nil + } + out := new(BackendAddressPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendHTTPSettingsAuthenticationCertificateInitParameters) DeepCopyInto(out *BackendHTTPSettingsAuthenticationCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendHTTPSettingsAuthenticationCertificateInitParameters. +func (in *BackendHTTPSettingsAuthenticationCertificateInitParameters) DeepCopy() *BackendHTTPSettingsAuthenticationCertificateInitParameters { + if in == nil { + return nil + } + out := new(BackendHTTPSettingsAuthenticationCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendHTTPSettingsAuthenticationCertificateObservation) DeepCopyInto(out *BackendHTTPSettingsAuthenticationCertificateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendHTTPSettingsAuthenticationCertificateObservation. +func (in *BackendHTTPSettingsAuthenticationCertificateObservation) DeepCopy() *BackendHTTPSettingsAuthenticationCertificateObservation { + if in == nil { + return nil + } + out := new(BackendHTTPSettingsAuthenticationCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendHTTPSettingsAuthenticationCertificateParameters) DeepCopyInto(out *BackendHTTPSettingsAuthenticationCertificateParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendHTTPSettingsAuthenticationCertificateParameters. +func (in *BackendHTTPSettingsAuthenticationCertificateParameters) DeepCopy() *BackendHTTPSettingsAuthenticationCertificateParameters { + if in == nil { + return nil + } + out := new(BackendHTTPSettingsAuthenticationCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendHTTPSettingsInitParameters) DeepCopyInto(out *BackendHTTPSettingsInitParameters) { + *out = *in + if in.AffinityCookieName != nil { + in, out := &in.AffinityCookieName, &out.AffinityCookieName + *out = new(string) + **out = **in + } + if in.AuthenticationCertificate != nil { + in, out := &in.AuthenticationCertificate, &out.AuthenticationCertificate + *out = make([]BackendHTTPSettingsAuthenticationCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectionDraining != nil { + in, out := &in.ConnectionDraining, &out.ConnectionDraining + *out = new(ConnectionDrainingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CookieBasedAffinity != nil { + in, out := &in.CookieBasedAffinity, &out.CookieBasedAffinity + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PickHostNameFromBackendAddress != nil { + in, out := &in.PickHostNameFromBackendAddress, &out.PickHostNameFromBackendAddress + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ProbeName != nil { + in, out := &in.ProbeName, &out.ProbeName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequestTimeout != nil { + in, out := &in.RequestTimeout, &out.RequestTimeout + *out = new(float64) + **out = **in + } + if in.TrustedRootCertificateNames != nil { + in, out := &in.TrustedRootCertificateNames, &out.TrustedRootCertificateNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendHTTPSettingsInitParameters. +func (in *BackendHTTPSettingsInitParameters) DeepCopy() *BackendHTTPSettingsInitParameters { + if in == nil { + return nil + } + out := new(BackendHTTPSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendHTTPSettingsObservation) DeepCopyInto(out *BackendHTTPSettingsObservation) { + *out = *in + if in.AffinityCookieName != nil { + in, out := &in.AffinityCookieName, &out.AffinityCookieName + *out = new(string) + **out = **in + } + if in.AuthenticationCertificate != nil { + in, out := &in.AuthenticationCertificate, &out.AuthenticationCertificate + *out = make([]BackendHTTPSettingsAuthenticationCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectionDraining != nil { + in, out := &in.ConnectionDraining, &out.ConnectionDraining + *out = new(ConnectionDrainingObservation) + (*in).DeepCopyInto(*out) + } + if in.CookieBasedAffinity != nil { + in, out := &in.CookieBasedAffinity, &out.CookieBasedAffinity + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PickHostNameFromBackendAddress != nil { + in, out := &in.PickHostNameFromBackendAddress, &out.PickHostNameFromBackendAddress + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ProbeID != nil { + in, out := &in.ProbeID, &out.ProbeID + *out = new(string) + **out = **in + } + if in.ProbeName != nil { + in, out := &in.ProbeName, &out.ProbeName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequestTimeout != nil { + in, out := &in.RequestTimeout, &out.RequestTimeout + *out = new(float64) + **out = **in + } + if in.TrustedRootCertificateNames != nil { + in, out := &in.TrustedRootCertificateNames, &out.TrustedRootCertificateNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendHTTPSettingsObservation. +func (in *BackendHTTPSettingsObservation) DeepCopy() *BackendHTTPSettingsObservation { + if in == nil { + return nil + } + out := new(BackendHTTPSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendHTTPSettingsParameters) DeepCopyInto(out *BackendHTTPSettingsParameters) { + *out = *in + if in.AffinityCookieName != nil { + in, out := &in.AffinityCookieName, &out.AffinityCookieName + *out = new(string) + **out = **in + } + if in.AuthenticationCertificate != nil { + in, out := &in.AuthenticationCertificate, &out.AuthenticationCertificate + *out = make([]BackendHTTPSettingsAuthenticationCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConnectionDraining != nil { + in, out := &in.ConnectionDraining, &out.ConnectionDraining + *out = new(ConnectionDrainingParameters) + (*in).DeepCopyInto(*out) + } + if in.CookieBasedAffinity != nil { + in, out := &in.CookieBasedAffinity, &out.CookieBasedAffinity + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PickHostNameFromBackendAddress != nil { + in, out := &in.PickHostNameFromBackendAddress, &out.PickHostNameFromBackendAddress + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.ProbeName != nil { + in, out := &in.ProbeName, &out.ProbeName + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequestTimeout != nil { + in, out := &in.RequestTimeout, &out.RequestTimeout + *out = new(float64) + **out = **in + } + if in.TrustedRootCertificateNames != nil { + in, out := &in.TrustedRootCertificateNames, &out.TrustedRootCertificateNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendHTTPSettingsParameters. +func (in *BackendHTTPSettingsParameters) DeepCopy() *BackendHTTPSettingsParameters { + if in == nil { + return nil + } + out := new(BackendHTTPSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendInitParameters) DeepCopyInto(out *BackendInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HostHeader != nil { + in, out := &in.HostHeader, &out.HostHeader + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendInitParameters. +func (in *BackendInitParameters) DeepCopy() *BackendInitParameters { + if in == nil { + return nil + } + out := new(BackendInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendObservation) DeepCopyInto(out *BackendObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HostHeader != nil { + in, out := &in.HostHeader, &out.HostHeader + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendObservation. +func (in *BackendObservation) DeepCopy() *BackendObservation { + if in == nil { + return nil + } + out := new(BackendObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendParameters) DeepCopyInto(out *BackendParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.HostHeader != nil { + in, out := &in.HostHeader, &out.HostHeader + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendParameters. +func (in *BackendParameters) DeepCopy() *BackendParameters { + if in == nil { + return nil + } + out := new(BackendParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolHealthProbeInitParameters) DeepCopyInto(out *BackendPoolHealthProbeInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ProbeMethod != nil { + in, out := &in.ProbeMethod, &out.ProbeMethod + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolHealthProbeInitParameters. +func (in *BackendPoolHealthProbeInitParameters) DeepCopy() *BackendPoolHealthProbeInitParameters { + if in == nil { + return nil + } + out := new(BackendPoolHealthProbeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolHealthProbeObservation) DeepCopyInto(out *BackendPoolHealthProbeObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ProbeMethod != nil { + in, out := &in.ProbeMethod, &out.ProbeMethod + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolHealthProbeObservation. +func (in *BackendPoolHealthProbeObservation) DeepCopy() *BackendPoolHealthProbeObservation { + if in == nil { + return nil + } + out := new(BackendPoolHealthProbeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolHealthProbeParameters) DeepCopyInto(out *BackendPoolHealthProbeParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.ProbeMethod != nil { + in, out := &in.ProbeMethod, &out.ProbeMethod + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolHealthProbeParameters. +func (in *BackendPoolHealthProbeParameters) DeepCopy() *BackendPoolHealthProbeParameters { + if in == nil { + return nil + } + out := new(BackendPoolHealthProbeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolInitParameters) DeepCopyInto(out *BackendPoolInitParameters) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = make([]BackendInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeName != nil { + in, out := &in.HealthProbeName, &out.HealthProbeName + *out = new(string) + **out = **in + } + if in.LoadBalancingName != nil { + in, out := &in.LoadBalancingName, &out.LoadBalancingName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolInitParameters. +func (in *BackendPoolInitParameters) DeepCopy() *BackendPoolInitParameters { + if in == nil { + return nil + } + out := new(BackendPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolLoadBalancingInitParameters) DeepCopyInto(out *BackendPoolLoadBalancingInitParameters) { + *out = *in + if in.AdditionalLatencyMilliseconds != nil { + in, out := &in.AdditionalLatencyMilliseconds, &out.AdditionalLatencyMilliseconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } + if in.SuccessfulSamplesRequired != nil { + in, out := &in.SuccessfulSamplesRequired, &out.SuccessfulSamplesRequired + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolLoadBalancingInitParameters. +func (in *BackendPoolLoadBalancingInitParameters) DeepCopy() *BackendPoolLoadBalancingInitParameters { + if in == nil { + return nil + } + out := new(BackendPoolLoadBalancingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolLoadBalancingObservation) DeepCopyInto(out *BackendPoolLoadBalancingObservation) { + *out = *in + if in.AdditionalLatencyMilliseconds != nil { + in, out := &in.AdditionalLatencyMilliseconds, &out.AdditionalLatencyMilliseconds + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } + if in.SuccessfulSamplesRequired != nil { + in, out := &in.SuccessfulSamplesRequired, &out.SuccessfulSamplesRequired + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolLoadBalancingObservation. +func (in *BackendPoolLoadBalancingObservation) DeepCopy() *BackendPoolLoadBalancingObservation { + if in == nil { + return nil + } + out := new(BackendPoolLoadBalancingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolLoadBalancingParameters) DeepCopyInto(out *BackendPoolLoadBalancingParameters) { + *out = *in + if in.AdditionalLatencyMilliseconds != nil { + in, out := &in.AdditionalLatencyMilliseconds, &out.AdditionalLatencyMilliseconds + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SampleSize != nil { + in, out := &in.SampleSize, &out.SampleSize + *out = new(float64) + **out = **in + } + if in.SuccessfulSamplesRequired != nil { + in, out := &in.SuccessfulSamplesRequired, &out.SuccessfulSamplesRequired + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolLoadBalancingParameters. +func (in *BackendPoolLoadBalancingParameters) DeepCopy() *BackendPoolLoadBalancingParameters { + if in == nil { + return nil + } + out := new(BackendPoolLoadBalancingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolObservation) DeepCopyInto(out *BackendPoolObservation) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = make([]BackendObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeName != nil { + in, out := &in.HealthProbeName, &out.HealthProbeName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoadBalancingName != nil { + in, out := &in.LoadBalancingName, &out.LoadBalancingName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolObservation. +func (in *BackendPoolObservation) DeepCopy() *BackendPoolObservation { + if in == nil { + return nil + } + out := new(BackendPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolParameters) DeepCopyInto(out *BackendPoolParameters) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = make([]BackendParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HealthProbeName != nil { + in, out := &in.HealthProbeName, &out.HealthProbeName + *out = new(string) + **out = **in + } + if in.LoadBalancingName != nil { + in, out := &in.LoadBalancingName, &out.LoadBalancingName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolParameters. +func (in *BackendPoolParameters) DeepCopy() *BackendPoolParameters { + if in == nil { + return nil + } + out := new(BackendPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolSettingsInitParameters) DeepCopyInto(out *BackendPoolSettingsInitParameters) { + *out = *in + if in.BackendPoolsSendReceiveTimeoutSeconds != nil { + in, out := &in.BackendPoolsSendReceiveTimeoutSeconds, &out.BackendPoolsSendReceiveTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.EnforceBackendPoolsCertificateNameCheck != nil { + in, out := &in.EnforceBackendPoolsCertificateNameCheck, &out.EnforceBackendPoolsCertificateNameCheck + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolSettingsInitParameters. +func (in *BackendPoolSettingsInitParameters) DeepCopy() *BackendPoolSettingsInitParameters { + if in == nil { + return nil + } + out := new(BackendPoolSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolSettingsObservation) DeepCopyInto(out *BackendPoolSettingsObservation) { + *out = *in + if in.BackendPoolsSendReceiveTimeoutSeconds != nil { + in, out := &in.BackendPoolsSendReceiveTimeoutSeconds, &out.BackendPoolsSendReceiveTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.EnforceBackendPoolsCertificateNameCheck != nil { + in, out := &in.EnforceBackendPoolsCertificateNameCheck, &out.EnforceBackendPoolsCertificateNameCheck + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolSettingsObservation. +func (in *BackendPoolSettingsObservation) DeepCopy() *BackendPoolSettingsObservation { + if in == nil { + return nil + } + out := new(BackendPoolSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendPoolSettingsParameters) DeepCopyInto(out *BackendPoolSettingsParameters) { + *out = *in + if in.BackendPoolsSendReceiveTimeoutSeconds != nil { + in, out := &in.BackendPoolsSendReceiveTimeoutSeconds, &out.BackendPoolsSendReceiveTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.EnforceBackendPoolsCertificateNameCheck != nil { + in, out := &in.EnforceBackendPoolsCertificateNameCheck, &out.EnforceBackendPoolsCertificateNameCheck + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPoolSettingsParameters. +func (in *BackendPoolSettingsParameters) DeepCopy() *BackendPoolSettingsParameters { + if in == nil { + return nil + } + out := new(BackendPoolSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientRevokedCertificateInitParameters) DeepCopyInto(out *ClientRevokedCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientRevokedCertificateInitParameters. +func (in *ClientRevokedCertificateInitParameters) DeepCopy() *ClientRevokedCertificateInitParameters { + if in == nil { + return nil + } + out := new(ClientRevokedCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientRevokedCertificateObservation) DeepCopyInto(out *ClientRevokedCertificateObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientRevokedCertificateObservation. +func (in *ClientRevokedCertificateObservation) DeepCopy() *ClientRevokedCertificateObservation { + if in == nil { + return nil + } + out := new(ClientRevokedCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientRevokedCertificateParameters) DeepCopyInto(out *ClientRevokedCertificateParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientRevokedCertificateParameters. +func (in *ClientRevokedCertificateParameters) DeepCopy() *ClientRevokedCertificateParameters { + if in == nil { + return nil + } + out := new(ClientRevokedCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientRootCertificateInitParameters) DeepCopyInto(out *ClientRootCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientRootCertificateInitParameters. +func (in *ClientRootCertificateInitParameters) DeepCopy() *ClientRootCertificateInitParameters { + if in == nil { + return nil + } + out := new(ClientRootCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientRootCertificateObservation) DeepCopyInto(out *ClientRootCertificateObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientRootCertificateObservation. +func (in *ClientRootCertificateObservation) DeepCopy() *ClientRootCertificateObservation { + if in == nil { + return nil + } + out := new(ClientRootCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientRootCertificateParameters) DeepCopyInto(out *ClientRootCertificateParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientRootCertificateParameters. +func (in *ClientRootCertificateParameters) DeepCopy() *ClientRootCertificateParameters { + if in == nil { + return nil + } + out := new(ClientRootCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.IgnoreCase != nil { + in, out := &in.IgnoreCase, &out.IgnoreCase + *out = new(bool) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.IgnoreCase != nil { + in, out := &in.IgnoreCase, &out.IgnoreCase + *out = new(bool) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.IgnoreCase != nil { + in, out := &in.IgnoreCase, &out.IgnoreCase + *out = new(bool) + **out = **in + } + if in.Negate != nil { + in, out := &in.Negate, &out.Negate + *out = new(bool) + **out = **in + } + if in.Pattern != nil { + in, out := &in.Pattern, &out.Pattern + *out = new(string) + **out = **in + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConfigurationInitParameters) DeepCopyInto(out *ConnectionConfigurationInitParameters) { + *out = *in + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = new(RouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VPNClientAddressPool != nil { + in, out := &in.VPNClientAddressPool, &out.VPNClientAddressPool + *out = new(VPNClientAddressPoolInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfigurationInitParameters. +func (in *ConnectionConfigurationInitParameters) DeepCopy() *ConnectionConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ConnectionConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConfigurationObservation) DeepCopyInto(out *ConnectionConfigurationObservation) { + *out = *in + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = new(RouteObservation) + (*in).DeepCopyInto(*out) + } + if in.VPNClientAddressPool != nil { + in, out := &in.VPNClientAddressPool, &out.VPNClientAddressPool + *out = new(VPNClientAddressPoolObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfigurationObservation. +func (in *ConnectionConfigurationObservation) DeepCopy() *ConnectionConfigurationObservation { + if in == nil { + return nil + } + out := new(ConnectionConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConfigurationParameters) DeepCopyInto(out *ConnectionConfigurationParameters) { + *out = *in + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = new(RouteParameters) + (*in).DeepCopyInto(*out) + } + if in.VPNClientAddressPool != nil { + in, out := &in.VPNClientAddressPool, &out.VPNClientAddressPool + *out = new(VPNClientAddressPoolParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfigurationParameters. +func (in *ConnectionConfigurationParameters) DeepCopy() *ConnectionConfigurationParameters { + if in == nil { + return nil + } + out := new(ConnectionConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionDrainingInitParameters) DeepCopyInto(out *ConnectionDrainingInitParameters) { + *out = *in + if in.DrainTimeoutSec != nil { + in, out := &in.DrainTimeoutSec, &out.DrainTimeoutSec + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionDrainingInitParameters. +func (in *ConnectionDrainingInitParameters) DeepCopy() *ConnectionDrainingInitParameters { + if in == nil { + return nil + } + out := new(ConnectionDrainingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionDrainingObservation) DeepCopyInto(out *ConnectionDrainingObservation) { + *out = *in + if in.DrainTimeoutSec != nil { + in, out := &in.DrainTimeoutSec, &out.DrainTimeoutSec + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionDrainingObservation. +func (in *ConnectionDrainingObservation) DeepCopy() *ConnectionDrainingObservation { + if in == nil { + return nil + } + out := new(ConnectionDrainingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionDrainingParameters) DeepCopyInto(out *ConnectionDrainingParameters) { + *out = *in + if in.DrainTimeoutSec != nil { + in, out := &in.DrainTimeoutSec, &out.DrainTimeoutSec + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionDrainingParameters. +func (in *ConnectionDrainingParameters) DeepCopy() *ConnectionDrainingParameters { + if in == nil { + return nil + } + out := new(ConnectionDrainingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionMonitor) DeepCopyInto(out *ConnectionMonitor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionMonitor. +func (in *ConnectionMonitor) DeepCopy() *ConnectionMonitor { + if in == nil { + return nil + } + out := new(ConnectionMonitor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConnectionMonitor) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionMonitorInitParameters) DeepCopyInto(out *ConnectionMonitorInitParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]EndpointInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + if in.OutputWorkspaceResourceIds != nil { + in, out := &in.OutputWorkspaceResourceIds, &out.OutputWorkspaceResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TestConfiguration != nil { + in, out := &in.TestConfiguration, &out.TestConfiguration + *out = make([]TestConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestGroup != nil { + in, out := &in.TestGroup, &out.TestGroup + *out = make([]TestGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionMonitorInitParameters. +func (in *ConnectionMonitorInitParameters) DeepCopy() *ConnectionMonitorInitParameters { + if in == nil { + return nil + } + out := new(ConnectionMonitorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionMonitorList) DeepCopyInto(out *ConnectionMonitorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConnectionMonitor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionMonitorList. +func (in *ConnectionMonitorList) DeepCopy() *ConnectionMonitorList { + if in == nil { + return nil + } + out := new(ConnectionMonitorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConnectionMonitorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionMonitorObservation) DeepCopyInto(out *ConnectionMonitorObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]EndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkWatcherID != nil { + in, out := &in.NetworkWatcherID, &out.NetworkWatcherID + *out = new(string) + **out = **in + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + if in.OutputWorkspaceResourceIds != nil { + in, out := &in.OutputWorkspaceResourceIds, &out.OutputWorkspaceResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TestConfiguration != nil { + in, out := &in.TestConfiguration, &out.TestConfiguration + *out = make([]TestConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestGroup != nil { + in, out := &in.TestGroup, &out.TestGroup + *out = make([]TestGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionMonitorObservation. +func (in *ConnectionMonitorObservation) DeepCopy() *ConnectionMonitorObservation { + if in == nil { + return nil + } + out := new(ConnectionMonitorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionMonitorParameters) DeepCopyInto(out *ConnectionMonitorParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = make([]EndpointParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkWatcherID != nil { + in, out := &in.NetworkWatcherID, &out.NetworkWatcherID + *out = new(string) + **out = **in + } + if in.NetworkWatcherIDRef != nil { + in, out := &in.NetworkWatcherIDRef, &out.NetworkWatcherIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkWatcherIDSelector != nil { + in, out := &in.NetworkWatcherIDSelector, &out.NetworkWatcherIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Notes != nil { + in, out := &in.Notes, &out.Notes + *out = new(string) + **out = **in + } + if in.OutputWorkspaceResourceIds != nil { + in, out := &in.OutputWorkspaceResourceIds, &out.OutputWorkspaceResourceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TestConfiguration != nil { + in, out := &in.TestConfiguration, &out.TestConfiguration + *out = make([]TestConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TestGroup != nil { + in, out := &in.TestGroup, &out.TestGroup + *out = make([]TestGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionMonitorParameters. +func (in *ConnectionMonitorParameters) DeepCopy() *ConnectionMonitorParameters { + if in == nil { + return nil + } + out := new(ConnectionMonitorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionMonitorSpec) DeepCopyInto(out *ConnectionMonitorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionMonitorSpec. +func (in *ConnectionMonitorSpec) DeepCopy() *ConnectionMonitorSpec { + if in == nil { + return nil + } + out := new(ConnectionMonitorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionMonitorStatus) DeepCopyInto(out *ConnectionMonitorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionMonitorStatus. +func (in *ConnectionMonitorStatus) DeepCopy() *ConnectionMonitorStatus { + if in == nil { + return nil + } + out := new(ConnectionMonitorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkInterfaceIPConfigurationInitParameters) DeepCopyInto(out *ContainerNetworkInterfaceIPConfigurationInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkInterfaceIPConfigurationInitParameters. +func (in *ContainerNetworkInterfaceIPConfigurationInitParameters) DeepCopy() *ContainerNetworkInterfaceIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ContainerNetworkInterfaceIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkInterfaceIPConfigurationObservation) DeepCopyInto(out *ContainerNetworkInterfaceIPConfigurationObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkInterfaceIPConfigurationObservation. +func (in *ContainerNetworkInterfaceIPConfigurationObservation) DeepCopy() *ContainerNetworkInterfaceIPConfigurationObservation { + if in == nil { + return nil + } + out := new(ContainerNetworkInterfaceIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkInterfaceIPConfigurationParameters) DeepCopyInto(out *ContainerNetworkInterfaceIPConfigurationParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkInterfaceIPConfigurationParameters. +func (in *ContainerNetworkInterfaceIPConfigurationParameters) DeepCopy() *ContainerNetworkInterfaceIPConfigurationParameters { + if in == nil { + return nil + } + out := new(ContainerNetworkInterfaceIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkInterfaceInitParameters) DeepCopyInto(out *ContainerNetworkInterfaceInitParameters) { + *out = *in + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]ContainerNetworkInterfaceIPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkInterfaceInitParameters. +func (in *ContainerNetworkInterfaceInitParameters) DeepCopy() *ContainerNetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(ContainerNetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkInterfaceObservation) DeepCopyInto(out *ContainerNetworkInterfaceObservation) { + *out = *in + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]ContainerNetworkInterfaceIPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkInterfaceObservation. +func (in *ContainerNetworkInterfaceObservation) DeepCopy() *ContainerNetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(ContainerNetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerNetworkInterfaceParameters) DeepCopyInto(out *ContainerNetworkInterfaceParameters) { + *out = *in + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]ContainerNetworkInterfaceIPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNetworkInterfaceParameters. +func (in *ContainerNetworkInterfaceParameters) DeepCopy() *ContainerNetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(ContainerNetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossTenantScopesInitParameters) DeepCopyInto(out *CrossTenantScopesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossTenantScopesInitParameters. +func (in *CrossTenantScopesInitParameters) DeepCopy() *CrossTenantScopesInitParameters { + if in == nil { + return nil + } + out := new(CrossTenantScopesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossTenantScopesObservation) DeepCopyInto(out *CrossTenantScopesObservation) { + *out = *in + if in.ManagementGroups != nil { + in, out := &in.ManagementGroups, &out.ManagementGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Subscriptions != nil { + in, out := &in.Subscriptions, &out.Subscriptions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossTenantScopesObservation. +func (in *CrossTenantScopesObservation) DeepCopy() *CrossTenantScopesObservation { + if in == nil { + return nil + } + out := new(CrossTenantScopesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossTenantScopesParameters) DeepCopyInto(out *CrossTenantScopesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossTenantScopesParameters. +func (in *CrossTenantScopesParameters) DeepCopy() *CrossTenantScopesParameters { + if in == nil { + return nil + } + out := new(CrossTenantScopesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBGPAddressInitParameters) DeepCopyInto(out *CustomBGPAddressInitParameters) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPConfigurationID != nil { + in, out := &in.IPConfigurationID, &out.IPConfigurationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBGPAddressInitParameters. +func (in *CustomBGPAddressInitParameters) DeepCopy() *CustomBGPAddressInitParameters { + if in == nil { + return nil + } + out := new(CustomBGPAddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBGPAddressObservation) DeepCopyInto(out *CustomBGPAddressObservation) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPConfigurationID != nil { + in, out := &in.IPConfigurationID, &out.IPConfigurationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBGPAddressObservation. +func (in *CustomBGPAddressObservation) DeepCopy() *CustomBGPAddressObservation { + if in == nil { + return nil + } + out := new(CustomBGPAddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBGPAddressParameters) DeepCopyInto(out *CustomBGPAddressParameters) { + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.IPConfigurationID != nil { + in, out := &in.IPConfigurationID, &out.IPConfigurationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBGPAddressParameters. +func (in *CustomBGPAddressParameters) DeepCopy() *CustomBGPAddressParameters { + if in == nil { + return nil + } + out := new(CustomBGPAddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBGPAddressesInitParameters) DeepCopyInto(out *CustomBGPAddressesInitParameters) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(string) + **out = **in + } + if in.Secondary != nil { + in, out := &in.Secondary, &out.Secondary + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBGPAddressesInitParameters. +func (in *CustomBGPAddressesInitParameters) DeepCopy() *CustomBGPAddressesInitParameters { + if in == nil { + return nil + } + out := new(CustomBGPAddressesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBGPAddressesObservation) DeepCopyInto(out *CustomBGPAddressesObservation) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(string) + **out = **in + } + if in.Secondary != nil { + in, out := &in.Secondary, &out.Secondary + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBGPAddressesObservation. +func (in *CustomBGPAddressesObservation) DeepCopy() *CustomBGPAddressesObservation { + if in == nil { + return nil + } + out := new(CustomBGPAddressesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBGPAddressesParameters) DeepCopyInto(out *CustomBGPAddressesParameters) { + *out = *in + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(string) + **out = **in + } + if in.Secondary != nil { + in, out := &in.Secondary, &out.Secondary + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBGPAddressesParameters. +func (in *CustomBGPAddressesParameters) DeepCopy() *CustomBGPAddressesParameters { + if in == nil { + return nil + } + out := new(CustomBGPAddressesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDNSConfigsInitParameters) DeepCopyInto(out *CustomDNSConfigsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDNSConfigsInitParameters. +func (in *CustomDNSConfigsInitParameters) DeepCopy() *CustomDNSConfigsInitParameters { + if in == nil { + return nil + } + out := new(CustomDNSConfigsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDNSConfigsObservation) DeepCopyInto(out *CustomDNSConfigsObservation) { + *out = *in + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDNSConfigsObservation. +func (in *CustomDNSConfigsObservation) DeepCopy() *CustomDNSConfigsObservation { + if in == nil { + return nil + } + out := new(CustomDNSConfigsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDNSConfigsParameters) DeepCopyInto(out *CustomDNSConfigsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDNSConfigsParameters. +func (in *CustomDNSConfigsParameters) DeepCopy() *CustomDNSConfigsParameters { + if in == nil { + return nil + } + out := new(CustomDNSConfigsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomErrorConfigurationInitParameters) DeepCopyInto(out *CustomErrorConfigurationInitParameters) { + *out = *in + if in.CustomErrorPageURL != nil { + in, out := &in.CustomErrorPageURL, &out.CustomErrorPageURL + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomErrorConfigurationInitParameters. +func (in *CustomErrorConfigurationInitParameters) DeepCopy() *CustomErrorConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CustomErrorConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomErrorConfigurationObservation) DeepCopyInto(out *CustomErrorConfigurationObservation) { + *out = *in + if in.CustomErrorPageURL != nil { + in, out := &in.CustomErrorPageURL, &out.CustomErrorPageURL + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomErrorConfigurationObservation. +func (in *CustomErrorConfigurationObservation) DeepCopy() *CustomErrorConfigurationObservation { + if in == nil { + return nil + } + out := new(CustomErrorConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomErrorConfigurationParameters) DeepCopyInto(out *CustomErrorConfigurationParameters) { + *out = *in + if in.CustomErrorPageURL != nil { + in, out := &in.CustomErrorPageURL, &out.CustomErrorPageURL + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomErrorConfigurationParameters. +func (in *CustomErrorConfigurationParameters) DeepCopy() *CustomErrorConfigurationParameters { + if in == nil { + return nil + } + out := new(CustomErrorConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHTTPSConfigurationInitParameters) DeepCopyInto(out *CustomHTTPSConfigurationInitParameters) { + *out = *in + if in.AzureKeyVaultCertificateSecretName != nil { + in, out := &in.AzureKeyVaultCertificateSecretName, &out.AzureKeyVaultCertificateSecretName + *out = new(string) + **out = **in + } + if in.AzureKeyVaultCertificateSecretVersion != nil { + in, out := &in.AzureKeyVaultCertificateSecretVersion, &out.AzureKeyVaultCertificateSecretVersion + *out = new(string) + **out = **in + } + if in.AzureKeyVaultCertificateVaultID != nil { + in, out := &in.AzureKeyVaultCertificateVaultID, &out.AzureKeyVaultCertificateVaultID + *out = new(string) + **out = **in + } + if in.AzureKeyVaultCertificateVaultIDRef != nil { + in, out := &in.AzureKeyVaultCertificateVaultIDRef, &out.AzureKeyVaultCertificateVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AzureKeyVaultCertificateVaultIDSelector != nil { + in, out := &in.AzureKeyVaultCertificateVaultIDSelector, &out.AzureKeyVaultCertificateVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CertificateSource != nil { + in, out := &in.CertificateSource, &out.CertificateSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHTTPSConfigurationInitParameters. +func (in *CustomHTTPSConfigurationInitParameters) DeepCopy() *CustomHTTPSConfigurationInitParameters { + if in == nil { + return nil + } + out := new(CustomHTTPSConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHTTPSConfigurationObservation) DeepCopyInto(out *CustomHTTPSConfigurationObservation) { + *out = *in + if in.AzureKeyVaultCertificateSecretName != nil { + in, out := &in.AzureKeyVaultCertificateSecretName, &out.AzureKeyVaultCertificateSecretName + *out = new(string) + **out = **in + } + if in.AzureKeyVaultCertificateSecretVersion != nil { + in, out := &in.AzureKeyVaultCertificateSecretVersion, &out.AzureKeyVaultCertificateSecretVersion + *out = new(string) + **out = **in + } + if in.AzureKeyVaultCertificateVaultID != nil { + in, out := &in.AzureKeyVaultCertificateVaultID, &out.AzureKeyVaultCertificateVaultID + *out = new(string) + **out = **in + } + if in.CertificateSource != nil { + in, out := &in.CertificateSource, &out.CertificateSource + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ProvisioningState != nil { + in, out := &in.ProvisioningState, &out.ProvisioningState + *out = new(string) + **out = **in + } + if in.ProvisioningSubstate != nil { + in, out := &in.ProvisioningSubstate, &out.ProvisioningSubstate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHTTPSConfigurationObservation. +func (in *CustomHTTPSConfigurationObservation) DeepCopy() *CustomHTTPSConfigurationObservation { + if in == nil { + return nil + } + out := new(CustomHTTPSConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHTTPSConfigurationParameters) DeepCopyInto(out *CustomHTTPSConfigurationParameters) { + *out = *in + if in.AzureKeyVaultCertificateSecretName != nil { + in, out := &in.AzureKeyVaultCertificateSecretName, &out.AzureKeyVaultCertificateSecretName + *out = new(string) + **out = **in + } + if in.AzureKeyVaultCertificateSecretVersion != nil { + in, out := &in.AzureKeyVaultCertificateSecretVersion, &out.AzureKeyVaultCertificateSecretVersion + *out = new(string) + **out = **in + } + if in.AzureKeyVaultCertificateVaultID != nil { + in, out := &in.AzureKeyVaultCertificateVaultID, &out.AzureKeyVaultCertificateVaultID + *out = new(string) + **out = **in + } + if in.AzureKeyVaultCertificateVaultIDRef != nil { + in, out := &in.AzureKeyVaultCertificateVaultIDRef, &out.AzureKeyVaultCertificateVaultIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AzureKeyVaultCertificateVaultIDSelector != nil { + in, out := &in.AzureKeyVaultCertificateVaultIDSelector, &out.AzureKeyVaultCertificateVaultIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CertificateSource != nil { + in, out := &in.CertificateSource, &out.CertificateSource + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHTTPSConfigurationParameters. +func (in *CustomHTTPSConfigurationParameters) DeepCopy() *CustomHTTPSConfigurationParameters { + if in == nil { + return nil + } + out := new(CustomHTTPSConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeaderInitParameters) DeepCopyInto(out *CustomHeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeaderInitParameters. +func (in *CustomHeaderInitParameters) DeepCopy() *CustomHeaderInitParameters { + if in == nil { + return nil + } + out := new(CustomHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeaderObservation) DeepCopyInto(out *CustomHeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeaderObservation. +func (in *CustomHeaderObservation) DeepCopy() *CustomHeaderObservation { + if in == nil { + return nil + } + out := new(CustomHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomHeaderParameters) DeepCopyInto(out *CustomHeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomHeaderParameters. +func (in *CustomHeaderParameters) DeepCopy() *CustomHeaderParameters { + if in == nil { + return nil + } + out := new(CustomHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRouteInitParameters) DeepCopyInto(out *CustomRouteInitParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRouteInitParameters. +func (in *CustomRouteInitParameters) DeepCopy() *CustomRouteInitParameters { + if in == nil { + return nil + } + out := new(CustomRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRouteObservation) DeepCopyInto(out *CustomRouteObservation) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRouteObservation. +func (in *CustomRouteObservation) DeepCopy() *CustomRouteObservation { + if in == nil { + return nil + } + out := new(CustomRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRouteParameters) DeepCopyInto(out *CustomRouteParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRouteParameters. +func (in *CustomRouteParameters) DeepCopy() *CustomRouteParameters { + if in == nil { + return nil + } + out := new(CustomRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRulesInitParameters) DeepCopyInto(out *CustomRulesInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GroupRateLimitBy != nil { + in, out := &in.GroupRateLimitBy, &out.GroupRateLimitBy + *out = new(string) + **out = **in + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchConditionsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RateLimitDuration != nil { + in, out := &in.RateLimitDuration, &out.RateLimitDuration + *out = new(string) + **out = **in + } + if in.RateLimitThreshold != nil { + in, out := &in.RateLimitThreshold, &out.RateLimitThreshold + *out = new(float64) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRulesInitParameters. +func (in *CustomRulesInitParameters) DeepCopy() *CustomRulesInitParameters { + if in == nil { + return nil + } + out := new(CustomRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRulesObservation) DeepCopyInto(out *CustomRulesObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GroupRateLimitBy != nil { + in, out := &in.GroupRateLimitBy, &out.GroupRateLimitBy + *out = new(string) + **out = **in + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchConditionsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RateLimitDuration != nil { + in, out := &in.RateLimitDuration, &out.RateLimitDuration + *out = new(string) + **out = **in + } + if in.RateLimitThreshold != nil { + in, out := &in.RateLimitThreshold, &out.RateLimitThreshold + *out = new(float64) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRulesObservation. +func (in *CustomRulesObservation) DeepCopy() *CustomRulesObservation { + if in == nil { + return nil + } + out := new(CustomRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRulesParameters) DeepCopyInto(out *CustomRulesParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.GroupRateLimitBy != nil { + in, out := &in.GroupRateLimitBy, &out.GroupRateLimitBy + *out = new(string) + **out = **in + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchConditionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RateLimitDuration != nil { + in, out := &in.RateLimitDuration, &out.RateLimitDuration + *out = new(string) + **out = **in + } + if in.RateLimitThreshold != nil { + in, out := &in.RateLimitThreshold, &out.RateLimitThreshold + *out = new(float64) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRulesParameters. +func (in *CustomRulesParameters) DeepCopy() *CustomRulesParameters { + if in == nil { + return nil + } + out := new(CustomRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DDOSProtectionPlanInitParameters) DeepCopyInto(out *DDOSProtectionPlanInitParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DDOSProtectionPlanInitParameters. +func (in *DDOSProtectionPlanInitParameters) DeepCopy() *DDOSProtectionPlanInitParameters { + if in == nil { + return nil + } + out := new(DDOSProtectionPlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DDOSProtectionPlanObservation) DeepCopyInto(out *DDOSProtectionPlanObservation) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DDOSProtectionPlanObservation. +func (in *DDOSProtectionPlanObservation) DeepCopy() *DDOSProtectionPlanObservation { + if in == nil { + return nil + } + out := new(DDOSProtectionPlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DDOSProtectionPlanParameters) DeepCopyInto(out *DDOSProtectionPlanParameters) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DDOSProtectionPlanParameters. +func (in *DDOSProtectionPlanParameters) DeepCopy() *DDOSProtectionPlanParameters { + if in == nil { + return nil + } + out := new(DDOSProtectionPlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigInitParameters) DeepCopyInto(out *DNSConfigInitParameters) { + *out = *in + if in.RelativeName != nil { + in, out := &in.RelativeName, &out.RelativeName + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigInitParameters. +func (in *DNSConfigInitParameters) DeepCopy() *DNSConfigInitParameters { + if in == nil { + return nil + } + out := new(DNSConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigObservation) DeepCopyInto(out *DNSConfigObservation) { + *out = *in + if in.RelativeName != nil { + in, out := &in.RelativeName, &out.RelativeName + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigObservation. +func (in *DNSConfigObservation) DeepCopy() *DNSConfigObservation { + if in == nil { + return nil + } + out := new(DNSConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfigParameters) DeepCopyInto(out *DNSConfigParameters) { + *out = *in + if in.RelativeName != nil { + in, out := &in.RelativeName, &out.RelativeName + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfigParameters. +func (in *DNSConfigParameters) DeepCopy() *DNSConfigParameters { + if in == nil { + return nil + } + out := new(DNSConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSInitParameters) DeepCopyInto(out *DNSInitParameters) { + *out = *in + if in.ProxyEnabled != nil { + in, out := &in.ProxyEnabled, &out.ProxyEnabled + *out = new(bool) + **out = **in + } + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSInitParameters. +func (in *DNSInitParameters) DeepCopy() *DNSInitParameters { + if in == nil { + return nil + } + out := new(DNSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSObservation) DeepCopyInto(out *DNSObservation) { + *out = *in + if in.ProxyEnabled != nil { + in, out := &in.ProxyEnabled, &out.ProxyEnabled + *out = new(bool) + **out = **in + } + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSObservation. +func (in *DNSObservation) DeepCopy() *DNSObservation { + if in == nil { + return nil + } + out := new(DNSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSParameters) DeepCopyInto(out *DNSParameters) { + *out = *in + if in.ProxyEnabled != nil { + in, out := &in.ProxyEnabled, &out.ProxyEnabled + *out = new(bool) + **out = **in + } + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSParameters. +func (in *DNSParameters) DeepCopy() *DNSParameters { + if in == nil { + return nil + } + out := new(DNSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZone) DeepCopyInto(out *DNSZone) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone. +func (in *DNSZone) DeepCopy() *DNSZone { + if in == nil { + return nil + } + out := new(DNSZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSZone) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneInitParameters) DeepCopyInto(out *DNSZoneInitParameters) { + *out = *in + if in.SoaRecord != nil { + in, out := &in.SoaRecord, &out.SoaRecord + *out = new(SoaRecordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneInitParameters. +func (in *DNSZoneInitParameters) DeepCopy() *DNSZoneInitParameters { + if in == nil { + return nil + } + out := new(DNSZoneInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneList) DeepCopyInto(out *DNSZoneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSZone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneList. +func (in *DNSZoneList) DeepCopy() *DNSZoneList { + if in == nil { + return nil + } + out := new(DNSZoneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSZoneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneObservation) DeepCopyInto(out *DNSZoneObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxNumberOfRecordSets != nil { + in, out := &in.MaxNumberOfRecordSets, &out.MaxNumberOfRecordSets + *out = new(float64) + **out = **in + } + if in.NameServers != nil { + in, out := &in.NameServers, &out.NameServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NumberOfRecordSets != nil { + in, out := &in.NumberOfRecordSets, &out.NumberOfRecordSets + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SoaRecord != nil { + in, out := &in.SoaRecord, &out.SoaRecord + *out = new(SoaRecordObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneObservation. +func (in *DNSZoneObservation) DeepCopy() *DNSZoneObservation { + if in == nil { + return nil + } + out := new(DNSZoneObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneParameters) DeepCopyInto(out *DNSZoneParameters) { + *out = *in + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SoaRecord != nil { + in, out := &in.SoaRecord, &out.SoaRecord + *out = new(SoaRecordParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneParameters. +func (in *DNSZoneParameters) DeepCopy() *DNSZoneParameters { + if in == nil { + return nil + } + out := new(DNSZoneParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneSpec) DeepCopyInto(out *DNSZoneSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneSpec. +func (in *DNSZoneSpec) DeepCopy() *DNSZoneSpec { + if in == nil { + return nil + } + out := new(DNSZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZoneStatus) DeepCopyInto(out *DNSZoneStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZoneStatus. +func (in *DNSZoneStatus) DeepCopy() *DNSZoneStatus { + if in == nil { + return nil + } + out := new(DNSZoneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegationInitParameters) DeepCopyInto(out *DelegationInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceDelegation != nil { + in, out := &in.ServiceDelegation, &out.ServiceDelegation + *out = new(ServiceDelegationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegationInitParameters. +func (in *DelegationInitParameters) DeepCopy() *DelegationInitParameters { + if in == nil { + return nil + } + out := new(DelegationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegationObservation) DeepCopyInto(out *DelegationObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceDelegation != nil { + in, out := &in.ServiceDelegation, &out.ServiceDelegation + *out = new(ServiceDelegationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegationObservation. +func (in *DelegationObservation) DeepCopy() *DelegationObservation { + if in == nil { + return nil + } + out := new(DelegationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegationParameters) DeepCopyInto(out *DelegationParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ServiceDelegation != nil { + in, out := &in.ServiceDelegation, &out.ServiceDelegation + *out = new(ServiceDelegationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegationParameters. +func (in *DelegationParameters) DeepCopy() *DelegationParameters { + if in == nil { + return nil + } + out := new(DelegationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisabledRuleGroupInitParameters) DeepCopyInto(out *DisabledRuleGroupInitParameters) { + *out = *in + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisabledRuleGroupInitParameters. +func (in *DisabledRuleGroupInitParameters) DeepCopy() *DisabledRuleGroupInitParameters { + if in == nil { + return nil + } + out := new(DisabledRuleGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisabledRuleGroupObservation) DeepCopyInto(out *DisabledRuleGroupObservation) { + *out = *in + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisabledRuleGroupObservation. +func (in *DisabledRuleGroupObservation) DeepCopy() *DisabledRuleGroupObservation { + if in == nil { + return nil + } + out := new(DisabledRuleGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisabledRuleGroupParameters) DeepCopyInto(out *DisabledRuleGroupParameters) { + *out = *in + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisabledRuleGroupParameters. +func (in *DisabledRuleGroupParameters) DeepCopy() *DisabledRuleGroupParameters { + if in == nil { + return nil + } + out := new(DisabledRuleGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.Enforcement != nil { + in, out := &in.Enforcement, &out.Enforcement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.Enforcement != nil { + in, out := &in.Enforcement, &out.Enforcement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.Enforcement != nil { + in, out := &in.Enforcement, &out.Enforcement + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.CoverageLevel != nil { + in, out := &in.CoverageLevel, &out.CoverageLevel + *out = new(string) + **out = **in + } + if in.ExcludedIPAddresses != nil { + in, out := &in.ExcludedIPAddresses, &out.ExcludedIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IncludedIPAddresses != nil { + in, out := &in.IncludedIPAddresses, &out.IncludedIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters. +func (in *EndpointInitParameters) DeepCopy() *EndpointInitParameters { + if in == nil { + return nil + } + out := new(EndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.CoverageLevel != nil { + in, out := &in.CoverageLevel, &out.CoverageLevel + *out = new(string) + **out = **in + } + if in.ExcludedIPAddresses != nil { + in, out := &in.ExcludedIPAddresses, &out.ExcludedIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterObservation) + (*in).DeepCopyInto(*out) + } + if in.IncludedIPAddresses != nil { + in, out := &in.IncludedIPAddresses, &out.IncludedIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation. +func (in *EndpointObservation) DeepCopy() *EndpointObservation { + if in == nil { + return nil + } + out := new(EndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.CoverageLevel != nil { + in, out := &in.CoverageLevel, &out.CoverageLevel + *out = new(string) + **out = **in + } + if in.ExcludedIPAddresses != nil { + in, out := &in.ExcludedIPAddresses, &out.ExcludedIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterParameters) + (*in).DeepCopyInto(*out) + } + if in.IncludedIPAddresses != nil { + in, out := &in.IncludedIPAddresses, &out.IncludedIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceType != nil { + in, out := &in.TargetResourceType, &out.TargetResourceType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters. +func (in *EndpointParameters) DeepCopy() *EndpointParameters { + if in == nil { + return nil + } + out := new(EndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedRuleSetInitParameters) DeepCopyInto(out *ExcludedRuleSetInitParameters) { + *out = *in + if in.RuleGroup != nil { + in, out := &in.RuleGroup, &out.RuleGroup + *out = make([]RuleGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedRuleSetInitParameters. +func (in *ExcludedRuleSetInitParameters) DeepCopy() *ExcludedRuleSetInitParameters { + if in == nil { + return nil + } + out := new(ExcludedRuleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedRuleSetObservation) DeepCopyInto(out *ExcludedRuleSetObservation) { + *out = *in + if in.RuleGroup != nil { + in, out := &in.RuleGroup, &out.RuleGroup + *out = make([]RuleGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedRuleSetObservation. +func (in *ExcludedRuleSetObservation) DeepCopy() *ExcludedRuleSetObservation { + if in == nil { + return nil + } + out := new(ExcludedRuleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExcludedRuleSetParameters) DeepCopyInto(out *ExcludedRuleSetParameters) { + *out = *in + if in.RuleGroup != nil { + in, out := &in.RuleGroup, &out.RuleGroup + *out = make([]RuleGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludedRuleSetParameters. +func (in *ExcludedRuleSetParameters) DeepCopy() *ExcludedRuleSetParameters { + if in == nil { + return nil + } + out := new(ExcludedRuleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionInitParameters) DeepCopyInto(out *ExclusionInitParameters) { + *out = *in + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionInitParameters. +func (in *ExclusionInitParameters) DeepCopy() *ExclusionInitParameters { + if in == nil { + return nil + } + out := new(ExclusionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionObservation) DeepCopyInto(out *ExclusionObservation) { + *out = *in + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionObservation. +func (in *ExclusionObservation) DeepCopy() *ExclusionObservation { + if in == nil { + return nil + } + out := new(ExclusionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionParameters) DeepCopyInto(out *ExclusionParameters) { + *out = *in + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionParameters. +func (in *ExclusionParameters) DeepCopy() *ExclusionParameters { + if in == nil { + return nil + } + out := new(ExclusionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitProxyInitParameters) DeepCopyInto(out *ExplicitProxyInitParameters) { + *out = *in + if in.EnablePacFile != nil { + in, out := &in.EnablePacFile, &out.EnablePacFile + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.PacFile != nil { + in, out := &in.PacFile, &out.PacFile + *out = new(string) + **out = **in + } + if in.PacFilePort != nil { + in, out := &in.PacFilePort, &out.PacFilePort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitProxyInitParameters. +func (in *ExplicitProxyInitParameters) DeepCopy() *ExplicitProxyInitParameters { + if in == nil { + return nil + } + out := new(ExplicitProxyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitProxyObservation) DeepCopyInto(out *ExplicitProxyObservation) { + *out = *in + if in.EnablePacFile != nil { + in, out := &in.EnablePacFile, &out.EnablePacFile + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.PacFile != nil { + in, out := &in.PacFile, &out.PacFile + *out = new(string) + **out = **in + } + if in.PacFilePort != nil { + in, out := &in.PacFilePort, &out.PacFilePort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitProxyObservation. +func (in *ExplicitProxyObservation) DeepCopy() *ExplicitProxyObservation { + if in == nil { + return nil + } + out := new(ExplicitProxyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitProxyParameters) DeepCopyInto(out *ExplicitProxyParameters) { + *out = *in + if in.EnablePacFile != nil { + in, out := &in.EnablePacFile, &out.EnablePacFile + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(float64) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(float64) + **out = **in + } + if in.PacFile != nil { + in, out := &in.PacFile, &out.PacFile + *out = new(string) + **out = **in + } + if in.PacFilePort != nil { + in, out := &in.PacFilePort, &out.PacFilePort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitProxyParameters. +func (in *ExplicitProxyParameters) DeepCopy() *ExplicitProxyParameters { + if in == nil { + return nil + } + out := new(ExplicitProxyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitResourceOrderInitParameters) DeepCopyInto(out *ExplicitResourceOrderInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitResourceOrderInitParameters. +func (in *ExplicitResourceOrderInitParameters) DeepCopy() *ExplicitResourceOrderInitParameters { + if in == nil { + return nil + } + out := new(ExplicitResourceOrderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitResourceOrderObservation) DeepCopyInto(out *ExplicitResourceOrderObservation) { + *out = *in + if in.BackendPoolHealthProbeIds != nil { + in, out := &in.BackendPoolHealthProbeIds, &out.BackendPoolHealthProbeIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackendPoolIds != nil { + in, out := &in.BackendPoolIds, &out.BackendPoolIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BackendPoolLoadBalancingIds != nil { + in, out := &in.BackendPoolLoadBalancingIds, &out.BackendPoolLoadBalancingIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FrontendEndpointIds != nil { + in, out := &in.FrontendEndpointIds, &out.FrontendEndpointIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RoutingRuleIds != nil { + in, out := &in.RoutingRuleIds, &out.RoutingRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitResourceOrderObservation. +func (in *ExplicitResourceOrderObservation) DeepCopy() *ExplicitResourceOrderObservation { + if in == nil { + return nil + } + out := new(ExplicitResourceOrderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExplicitResourceOrderParameters) DeepCopyInto(out *ExplicitResourceOrderParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExplicitResourceOrderParameters. +func (in *ExplicitResourceOrderParameters) DeepCopy() *ExplicitResourceOrderParameters { + if in == nil { + return nil + } + out := new(ExplicitResourceOrderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuit) DeepCopyInto(out *ExpressRouteCircuit) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuit. +func (in *ExpressRouteCircuit) DeepCopy() *ExpressRouteCircuit { + if in == nil { + return nil + } + out := new(ExpressRouteCircuit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExpressRouteCircuit) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitInitParameters) DeepCopyInto(out *ExpressRouteCircuitInitParameters) { + *out = *in + if in.AllowClassicOperations != nil { + in, out := &in.AllowClassicOperations, &out.AllowClassicOperations + *out = new(bool) + **out = **in + } + if in.BandwidthInGbps != nil { + in, out := &in.BandwidthInGbps, &out.BandwidthInGbps + *out = new(float64) + **out = **in + } + if in.BandwidthInMbps != nil { + in, out := &in.BandwidthInMbps, &out.BandwidthInMbps + *out = new(float64) + **out = **in + } + if in.ExpressRoutePortID != nil { + in, out := &in.ExpressRoutePortID, &out.ExpressRoutePortID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PeeringLocation != nil { + in, out := &in.PeeringLocation, &out.PeeringLocation + *out = new(string) + **out = **in + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(ExpressRouteCircuitSkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitInitParameters. +func (in *ExpressRouteCircuitInitParameters) DeepCopy() *ExpressRouteCircuitInitParameters { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitList) DeepCopyInto(out *ExpressRouteCircuitList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExpressRouteCircuit, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitList. +func (in *ExpressRouteCircuitList) DeepCopy() *ExpressRouteCircuitList { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExpressRouteCircuitList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitObservation) DeepCopyInto(out *ExpressRouteCircuitObservation) { + *out = *in + if in.AllowClassicOperations != nil { + in, out := &in.AllowClassicOperations, &out.AllowClassicOperations + *out = new(bool) + **out = **in + } + if in.BandwidthInGbps != nil { + in, out := &in.BandwidthInGbps, &out.BandwidthInGbps + *out = new(float64) + **out = **in + } + if in.BandwidthInMbps != nil { + in, out := &in.BandwidthInMbps, &out.BandwidthInMbps + *out = new(float64) + **out = **in + } + if in.ExpressRoutePortID != nil { + in, out := &in.ExpressRoutePortID, &out.ExpressRoutePortID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PeeringLocation != nil { + in, out := &in.PeeringLocation, &out.PeeringLocation + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } + if in.ServiceProviderProvisioningState != nil { + in, out := &in.ServiceProviderProvisioningState, &out.ServiceProviderProvisioningState + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(ExpressRouteCircuitSkuObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitObservation. +func (in *ExpressRouteCircuitObservation) DeepCopy() *ExpressRouteCircuitObservation { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitParameters) DeepCopyInto(out *ExpressRouteCircuitParameters) { + *out = *in + if in.AllowClassicOperations != nil { + in, out := &in.AllowClassicOperations, &out.AllowClassicOperations + *out = new(bool) + **out = **in + } + if in.AuthorizationKeySecretRef != nil { + in, out := &in.AuthorizationKeySecretRef, &out.AuthorizationKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.BandwidthInGbps != nil { + in, out := &in.BandwidthInGbps, &out.BandwidthInGbps + *out = new(float64) + **out = **in + } + if in.BandwidthInMbps != nil { + in, out := &in.BandwidthInMbps, &out.BandwidthInMbps + *out = new(float64) + **out = **in + } + if in.ExpressRoutePortID != nil { + in, out := &in.ExpressRoutePortID, &out.ExpressRoutePortID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PeeringLocation != nil { + in, out := &in.PeeringLocation, &out.PeeringLocation + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceProviderName != nil { + in, out := &in.ServiceProviderName, &out.ServiceProviderName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(ExpressRouteCircuitSkuParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitParameters. +func (in *ExpressRouteCircuitParameters) DeepCopy() *ExpressRouteCircuitParameters { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitPeering) DeepCopyInto(out *ExpressRouteCircuitPeering) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitPeering. +func (in *ExpressRouteCircuitPeering) DeepCopy() *ExpressRouteCircuitPeering { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitPeering) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExpressRouteCircuitPeering) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitPeeringInitParameters) DeepCopyInto(out *ExpressRouteCircuitPeeringInitParameters) { + *out = *in + if in.IPv4Enabled != nil { + in, out := &in.IPv4Enabled, &out.IPv4Enabled + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(IPv6InitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftPeeringConfig != nil { + in, out := &in.MicrosoftPeeringConfig, &out.MicrosoftPeeringConfig + *out = new(MicrosoftPeeringConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PeerAsn != nil { + in, out := &in.PeerAsn, &out.PeerAsn + *out = new(float64) + **out = **in + } + if in.PrimaryPeerAddressPrefix != nil { + in, out := &in.PrimaryPeerAddressPrefix, &out.PrimaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.RouteFilterID != nil { + in, out := &in.RouteFilterID, &out.RouteFilterID + *out = new(string) + **out = **in + } + if in.SecondaryPeerAddressPrefix != nil { + in, out := &in.SecondaryPeerAddressPrefix, &out.SecondaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.VlanID != nil { + in, out := &in.VlanID, &out.VlanID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitPeeringInitParameters. +func (in *ExpressRouteCircuitPeeringInitParameters) DeepCopy() *ExpressRouteCircuitPeeringInitParameters { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitPeeringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitPeeringList) DeepCopyInto(out *ExpressRouteCircuitPeeringList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExpressRouteCircuitPeering, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitPeeringList. +func (in *ExpressRouteCircuitPeeringList) DeepCopy() *ExpressRouteCircuitPeeringList { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitPeeringList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExpressRouteCircuitPeeringList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitPeeringObservation) DeepCopyInto(out *ExpressRouteCircuitPeeringObservation) { + *out = *in + if in.AzureAsn != nil { + in, out := &in.AzureAsn, &out.AzureAsn + *out = new(float64) + **out = **in + } + if in.ExpressRouteCircuitName != nil { + in, out := &in.ExpressRouteCircuitName, &out.ExpressRouteCircuitName + *out = new(string) + **out = **in + } + if in.GatewayManagerEtag != nil { + in, out := &in.GatewayManagerEtag, &out.GatewayManagerEtag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPv4Enabled != nil { + in, out := &in.IPv4Enabled, &out.IPv4Enabled + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(IPv6Observation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftPeeringConfig != nil { + in, out := &in.MicrosoftPeeringConfig, &out.MicrosoftPeeringConfig + *out = new(MicrosoftPeeringConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.PeerAsn != nil { + in, out := &in.PeerAsn, &out.PeerAsn + *out = new(float64) + **out = **in + } + if in.PrimaryAzurePort != nil { + in, out := &in.PrimaryAzurePort, &out.PrimaryAzurePort + *out = new(string) + **out = **in + } + if in.PrimaryPeerAddressPrefix != nil { + in, out := &in.PrimaryPeerAddressPrefix, &out.PrimaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RouteFilterID != nil { + in, out := &in.RouteFilterID, &out.RouteFilterID + *out = new(string) + **out = **in + } + if in.SecondaryAzurePort != nil { + in, out := &in.SecondaryAzurePort, &out.SecondaryAzurePort + *out = new(string) + **out = **in + } + if in.SecondaryPeerAddressPrefix != nil { + in, out := &in.SecondaryPeerAddressPrefix, &out.SecondaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.VlanID != nil { + in, out := &in.VlanID, &out.VlanID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitPeeringObservation. +func (in *ExpressRouteCircuitPeeringObservation) DeepCopy() *ExpressRouteCircuitPeeringObservation { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitPeeringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitPeeringParameters) DeepCopyInto(out *ExpressRouteCircuitPeeringParameters) { + *out = *in + if in.ExpressRouteCircuitName != nil { + in, out := &in.ExpressRouteCircuitName, &out.ExpressRouteCircuitName + *out = new(string) + **out = **in + } + if in.ExpressRouteCircuitNameRef != nil { + in, out := &in.ExpressRouteCircuitNameRef, &out.ExpressRouteCircuitNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExpressRouteCircuitNameSelector != nil { + in, out := &in.ExpressRouteCircuitNameSelector, &out.ExpressRouteCircuitNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IPv4Enabled != nil { + in, out := &in.IPv4Enabled, &out.IPv4Enabled + *out = new(bool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(IPv6Parameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftPeeringConfig != nil { + in, out := &in.MicrosoftPeeringConfig, &out.MicrosoftPeeringConfig + *out = new(MicrosoftPeeringConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.PeerAsn != nil { + in, out := &in.PeerAsn, &out.PeerAsn + *out = new(float64) + **out = **in + } + if in.PrimaryPeerAddressPrefix != nil { + in, out := &in.PrimaryPeerAddressPrefix, &out.PrimaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RouteFilterID != nil { + in, out := &in.RouteFilterID, &out.RouteFilterID + *out = new(string) + **out = **in + } + if in.SecondaryPeerAddressPrefix != nil { + in, out := &in.SecondaryPeerAddressPrefix, &out.SecondaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.SharedKeySecretRef != nil { + in, out := &in.SharedKeySecretRef, &out.SharedKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.VlanID != nil { + in, out := &in.VlanID, &out.VlanID + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitPeeringParameters. +func (in *ExpressRouteCircuitPeeringParameters) DeepCopy() *ExpressRouteCircuitPeeringParameters { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitPeeringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitPeeringSpec) DeepCopyInto(out *ExpressRouteCircuitPeeringSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitPeeringSpec. +func (in *ExpressRouteCircuitPeeringSpec) DeepCopy() *ExpressRouteCircuitPeeringSpec { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitPeeringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitPeeringStatus) DeepCopyInto(out *ExpressRouteCircuitPeeringStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitPeeringStatus. +func (in *ExpressRouteCircuitPeeringStatus) DeepCopy() *ExpressRouteCircuitPeeringStatus { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitPeeringStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitSkuInitParameters) DeepCopyInto(out *ExpressRouteCircuitSkuInitParameters) { + *out = *in + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitSkuInitParameters. +func (in *ExpressRouteCircuitSkuInitParameters) DeepCopy() *ExpressRouteCircuitSkuInitParameters { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitSkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitSkuObservation) DeepCopyInto(out *ExpressRouteCircuitSkuObservation) { + *out = *in + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitSkuObservation. +func (in *ExpressRouteCircuitSkuObservation) DeepCopy() *ExpressRouteCircuitSkuObservation { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitSkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitSkuParameters) DeepCopyInto(out *ExpressRouteCircuitSkuParameters) { + *out = *in + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitSkuParameters. +func (in *ExpressRouteCircuitSkuParameters) DeepCopy() *ExpressRouteCircuitSkuParameters { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitSkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitSpec) DeepCopyInto(out *ExpressRouteCircuitSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitSpec. +func (in *ExpressRouteCircuitSpec) DeepCopy() *ExpressRouteCircuitSpec { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteCircuitStatus) DeepCopyInto(out *ExpressRouteCircuitStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteCircuitStatus. +func (in *ExpressRouteCircuitStatus) DeepCopy() *ExpressRouteCircuitStatus { + if in == nil { + return nil + } + out := new(ExpressRouteCircuitStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteConnection) DeepCopyInto(out *ExpressRouteConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteConnection. +func (in *ExpressRouteConnection) DeepCopy() *ExpressRouteConnection { + if in == nil { + return nil + } + out := new(ExpressRouteConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExpressRouteConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteConnectionInitParameters) DeepCopyInto(out *ExpressRouteConnectionInitParameters) { + *out = *in + if in.AuthorizationKey != nil { + in, out := &in.AuthorizationKey, &out.AuthorizationKey + *out = new(string) + **out = **in + } + if in.EnableInternetSecurity != nil { + in, out := &in.EnableInternetSecurity, &out.EnableInternetSecurity + *out = new(bool) + **out = **in + } + if in.ExpressRouteCircuitPeeringID != nil { + in, out := &in.ExpressRouteCircuitPeeringID, &out.ExpressRouteCircuitPeeringID + *out = new(string) + **out = **in + } + if in.ExpressRouteCircuitPeeringIDRef != nil { + in, out := &in.ExpressRouteCircuitPeeringIDRef, &out.ExpressRouteCircuitPeeringIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExpressRouteCircuitPeeringIDSelector != nil { + in, out := &in.ExpressRouteCircuitPeeringIDSelector, &out.ExpressRouteCircuitPeeringIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpressRouteGatewayBypassEnabled != nil { + in, out := &in.ExpressRouteGatewayBypassEnabled, &out.ExpressRouteGatewayBypassEnabled + *out = new(bool) + **out = **in + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(RoutingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RoutingWeight != nil { + in, out := &in.RoutingWeight, &out.RoutingWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteConnectionInitParameters. +func (in *ExpressRouteConnectionInitParameters) DeepCopy() *ExpressRouteConnectionInitParameters { + if in == nil { + return nil + } + out := new(ExpressRouteConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteConnectionList) DeepCopyInto(out *ExpressRouteConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExpressRouteConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteConnectionList. +func (in *ExpressRouteConnectionList) DeepCopy() *ExpressRouteConnectionList { + if in == nil { + return nil + } + out := new(ExpressRouteConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExpressRouteConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteConnectionObservation) DeepCopyInto(out *ExpressRouteConnectionObservation) { + *out = *in + if in.AuthorizationKey != nil { + in, out := &in.AuthorizationKey, &out.AuthorizationKey + *out = new(string) + **out = **in + } + if in.EnableInternetSecurity != nil { + in, out := &in.EnableInternetSecurity, &out.EnableInternetSecurity + *out = new(bool) + **out = **in + } + if in.ExpressRouteCircuitPeeringID != nil { + in, out := &in.ExpressRouteCircuitPeeringID, &out.ExpressRouteCircuitPeeringID + *out = new(string) + **out = **in + } + if in.ExpressRouteGatewayBypassEnabled != nil { + in, out := &in.ExpressRouteGatewayBypassEnabled, &out.ExpressRouteGatewayBypassEnabled + *out = new(bool) + **out = **in + } + if in.ExpressRouteGatewayID != nil { + in, out := &in.ExpressRouteGatewayID, &out.ExpressRouteGatewayID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(RoutingObservation) + (*in).DeepCopyInto(*out) + } + if in.RoutingWeight != nil { + in, out := &in.RoutingWeight, &out.RoutingWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteConnectionObservation. +func (in *ExpressRouteConnectionObservation) DeepCopy() *ExpressRouteConnectionObservation { + if in == nil { + return nil + } + out := new(ExpressRouteConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteConnectionParameters) DeepCopyInto(out *ExpressRouteConnectionParameters) { + *out = *in + if in.AuthorizationKey != nil { + in, out := &in.AuthorizationKey, &out.AuthorizationKey + *out = new(string) + **out = **in + } + if in.EnableInternetSecurity != nil { + in, out := &in.EnableInternetSecurity, &out.EnableInternetSecurity + *out = new(bool) + **out = **in + } + if in.ExpressRouteCircuitPeeringID != nil { + in, out := &in.ExpressRouteCircuitPeeringID, &out.ExpressRouteCircuitPeeringID + *out = new(string) + **out = **in + } + if in.ExpressRouteCircuitPeeringIDRef != nil { + in, out := &in.ExpressRouteCircuitPeeringIDRef, &out.ExpressRouteCircuitPeeringIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExpressRouteCircuitPeeringIDSelector != nil { + in, out := &in.ExpressRouteCircuitPeeringIDSelector, &out.ExpressRouteCircuitPeeringIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ExpressRouteGatewayBypassEnabled != nil { + in, out := &in.ExpressRouteGatewayBypassEnabled, &out.ExpressRouteGatewayBypassEnabled + *out = new(bool) + **out = **in + } + if in.ExpressRouteGatewayID != nil { + in, out := &in.ExpressRouteGatewayID, &out.ExpressRouteGatewayID + *out = new(string) + **out = **in + } + if in.ExpressRouteGatewayIDRef != nil { + in, out := &in.ExpressRouteGatewayIDRef, &out.ExpressRouteGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ExpressRouteGatewayIDSelector != nil { + in, out := &in.ExpressRouteGatewayIDSelector, &out.ExpressRouteGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(RoutingParameters) + (*in).DeepCopyInto(*out) + } + if in.RoutingWeight != nil { + in, out := &in.RoutingWeight, &out.RoutingWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteConnectionParameters. +func (in *ExpressRouteConnectionParameters) DeepCopy() *ExpressRouteConnectionParameters { + if in == nil { + return nil + } + out := new(ExpressRouteConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteConnectionSpec) DeepCopyInto(out *ExpressRouteConnectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteConnectionSpec. +func (in *ExpressRouteConnectionSpec) DeepCopy() *ExpressRouteConnectionSpec { + if in == nil { + return nil + } + out := new(ExpressRouteConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRouteConnectionStatus) DeepCopyInto(out *ExpressRouteConnectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRouteConnectionStatus. +func (in *ExpressRouteConnectionStatus) DeepCopy() *ExpressRouteConnectionStatus { + if in == nil { + return nil + } + out := new(ExpressRouteConnectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePort) DeepCopyInto(out *ExpressRoutePort) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePort. +func (in *ExpressRoutePort) DeepCopy() *ExpressRoutePort { + if in == nil { + return nil + } + out := new(ExpressRoutePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExpressRoutePort) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortIdentityInitParameters) DeepCopyInto(out *ExpressRoutePortIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortIdentityInitParameters. +func (in *ExpressRoutePortIdentityInitParameters) DeepCopy() *ExpressRoutePortIdentityInitParameters { + if in == nil { + return nil + } + out := new(ExpressRoutePortIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortIdentityObservation) DeepCopyInto(out *ExpressRoutePortIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortIdentityObservation. +func (in *ExpressRoutePortIdentityObservation) DeepCopy() *ExpressRoutePortIdentityObservation { + if in == nil { + return nil + } + out := new(ExpressRoutePortIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortIdentityParameters) DeepCopyInto(out *ExpressRoutePortIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortIdentityParameters. +func (in *ExpressRoutePortIdentityParameters) DeepCopy() *ExpressRoutePortIdentityParameters { + if in == nil { + return nil + } + out := new(ExpressRoutePortIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortInitParameters) DeepCopyInto(out *ExpressRoutePortInitParameters) { + *out = *in + if in.BandwidthInGbps != nil { + in, out := &in.BandwidthInGbps, &out.BandwidthInGbps + *out = new(float64) + **out = **in + } + if in.BillingType != nil { + in, out := &in.BillingType, &out.BillingType + *out = new(string) + **out = **in + } + if in.Encapsulation != nil { + in, out := &in.Encapsulation, &out.Encapsulation + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ExpressRoutePortIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Link1 != nil { + in, out := &in.Link1, &out.Link1 + *out = new(Link1InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Link2 != nil { + in, out := &in.Link2, &out.Link2 + *out = new(Link2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PeeringLocation != nil { + in, out := &in.PeeringLocation, &out.PeeringLocation + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortInitParameters. +func (in *ExpressRoutePortInitParameters) DeepCopy() *ExpressRoutePortInitParameters { + if in == nil { + return nil + } + out := new(ExpressRoutePortInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortList) DeepCopyInto(out *ExpressRoutePortList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExpressRoutePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortList. +func (in *ExpressRoutePortList) DeepCopy() *ExpressRoutePortList { + if in == nil { + return nil + } + out := new(ExpressRoutePortList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExpressRoutePortList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortObservation) DeepCopyInto(out *ExpressRoutePortObservation) { + *out = *in + if in.BandwidthInGbps != nil { + in, out := &in.BandwidthInGbps, &out.BandwidthInGbps + *out = new(float64) + **out = **in + } + if in.BillingType != nil { + in, out := &in.BillingType, &out.BillingType + *out = new(string) + **out = **in + } + if in.Encapsulation != nil { + in, out := &in.Encapsulation, &out.Encapsulation + *out = new(string) + **out = **in + } + if in.Ethertype != nil { + in, out := &in.Ethertype, &out.Ethertype + *out = new(string) + **out = **in + } + if in.GUID != nil { + in, out := &in.GUID, &out.GUID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ExpressRoutePortIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Link1 != nil { + in, out := &in.Link1, &out.Link1 + *out = new(Link1Observation) + (*in).DeepCopyInto(*out) + } + if in.Link2 != nil { + in, out := &in.Link2, &out.Link2 + *out = new(Link2Observation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Mtu != nil { + in, out := &in.Mtu, &out.Mtu + *out = new(string) + **out = **in + } + if in.PeeringLocation != nil { + in, out := &in.PeeringLocation, &out.PeeringLocation + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortObservation. +func (in *ExpressRoutePortObservation) DeepCopy() *ExpressRoutePortObservation { + if in == nil { + return nil + } + out := new(ExpressRoutePortObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortParameters) DeepCopyInto(out *ExpressRoutePortParameters) { + *out = *in + if in.BandwidthInGbps != nil { + in, out := &in.BandwidthInGbps, &out.BandwidthInGbps + *out = new(float64) + **out = **in + } + if in.BillingType != nil { + in, out := &in.BillingType, &out.BillingType + *out = new(string) + **out = **in + } + if in.Encapsulation != nil { + in, out := &in.Encapsulation, &out.Encapsulation + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ExpressRoutePortIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Link1 != nil { + in, out := &in.Link1, &out.Link1 + *out = new(Link1Parameters) + (*in).DeepCopyInto(*out) + } + if in.Link2 != nil { + in, out := &in.Link2, &out.Link2 + *out = new(Link2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PeeringLocation != nil { + in, out := &in.PeeringLocation, &out.PeeringLocation + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortParameters. +func (in *ExpressRoutePortParameters) DeepCopy() *ExpressRoutePortParameters { + if in == nil { + return nil + } + out := new(ExpressRoutePortParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortSpec) DeepCopyInto(out *ExpressRoutePortSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortSpec. +func (in *ExpressRoutePortSpec) DeepCopy() *ExpressRoutePortSpec { + if in == nil { + return nil + } + out := new(ExpressRoutePortSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressRoutePortStatus) DeepCopyInto(out *ExpressRoutePortStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressRoutePortStatus. +func (in *ExpressRoutePortStatus) DeepCopy() *ExpressRoutePortStatus { + if in == nil { + return nil + } + out := new(ExpressRoutePortStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.Item != nil { + in, out := &in.Item, &out.Item + *out = make([]ItemInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.Item != nil { + in, out := &in.Item, &out.Item + *out = make([]ItemObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.Item != nil { + in, out := &in.Item, &out.Item + *out = make([]ItemParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Firewall) DeepCopyInto(out *Firewall) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Firewall. +func (in *Firewall) DeepCopy() *Firewall { + if in == nil { + return nil + } + out := new(Firewall) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Firewall) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallIPConfigurationInitParameters) DeepCopyInto(out *FirewallIPConfigurationInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicIPAddressIDRef != nil { + in, out := &in.PublicIPAddressIDRef, &out.PublicIPAddressIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressIDSelector != nil { + in, out := &in.PublicIPAddressIDSelector, &out.PublicIPAddressIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallIPConfigurationInitParameters. +func (in *FirewallIPConfigurationInitParameters) DeepCopy() *FirewallIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FirewallIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallIPConfigurationObservation) DeepCopyInto(out *FirewallIPConfigurationObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallIPConfigurationObservation. +func (in *FirewallIPConfigurationObservation) DeepCopy() *FirewallIPConfigurationObservation { + if in == nil { + return nil + } + out := new(FirewallIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallIPConfigurationParameters) DeepCopyInto(out *FirewallIPConfigurationParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicIPAddressIDRef != nil { + in, out := &in.PublicIPAddressIDRef, &out.PublicIPAddressIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressIDSelector != nil { + in, out := &in.PublicIPAddressIDSelector, &out.PublicIPAddressIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallIPConfigurationParameters. +func (in *FirewallIPConfigurationParameters) DeepCopy() *FirewallIPConfigurationParameters { + if in == nil { + return nil + } + out := new(FirewallIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallInitParameters) DeepCopyInto(out *FirewallInitParameters) { + *out = *in + if in.DNSProxyEnabled != nil { + in, out := &in.DNSProxyEnabled, &out.DNSProxyEnabled + *out = new(bool) + **out = **in + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]FirewallIPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagementIPConfiguration != nil { + in, out := &in.ManagementIPConfiguration, &out.ManagementIPConfiguration + *out = new(ManagementIPConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateIPRanges != nil { + in, out := &in.PrivateIPRanges, &out.PrivateIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatIntelMode != nil { + in, out := &in.ThreatIntelMode, &out.ThreatIntelMode + *out = new(string) + **out = **in + } + if in.VirtualHub != nil { + in, out := &in.VirtualHub, &out.VirtualHub + *out = new(VirtualHubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallInitParameters. +func (in *FirewallInitParameters) DeepCopy() *FirewallInitParameters { + if in == nil { + return nil + } + out := new(FirewallInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallList) DeepCopyInto(out *FirewallList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Firewall, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallList. +func (in *FirewallList) DeepCopy() *FirewallList { + if in == nil { + return nil + } + out := new(FirewallList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FirewallList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallObservation) DeepCopyInto(out *FirewallObservation) { + *out = *in + if in.DNSProxyEnabled != nil { + in, out := &in.DNSProxyEnabled, &out.DNSProxyEnabled + *out = new(bool) + **out = **in + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]FirewallIPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagementIPConfiguration != nil { + in, out := &in.ManagementIPConfiguration, &out.ManagementIPConfiguration + *out = new(ManagementIPConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.PrivateIPRanges != nil { + in, out := &in.PrivateIPRanges, &out.PrivateIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatIntelMode != nil { + in, out := &in.ThreatIntelMode, &out.ThreatIntelMode + *out = new(string) + **out = **in + } + if in.VirtualHub != nil { + in, out := &in.VirtualHub, &out.VirtualHub + *out = new(VirtualHubObservation) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallObservation. +func (in *FirewallObservation) DeepCopy() *FirewallObservation { + if in == nil { + return nil + } + out := new(FirewallObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallParameters) DeepCopyInto(out *FirewallParameters) { + *out = *in + if in.DNSProxyEnabled != nil { + in, out := &in.DNSProxyEnabled, &out.DNSProxyEnabled + *out = new(bool) + **out = **in + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]FirewallIPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagementIPConfiguration != nil { + in, out := &in.ManagementIPConfiguration, &out.ManagementIPConfiguration + *out = new(ManagementIPConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateIPRanges != nil { + in, out := &in.PrivateIPRanges, &out.PrivateIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatIntelMode != nil { + in, out := &in.ThreatIntelMode, &out.ThreatIntelMode + *out = new(string) + **out = **in + } + if in.VirtualHub != nil { + in, out := &in.VirtualHub, &out.VirtualHub + *out = new(VirtualHubParameters) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallParameters. +func (in *FirewallParameters) DeepCopy() *FirewallParameters { + if in == nil { + return nil + } + out := new(FirewallParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicy) DeepCopyInto(out *FirewallPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicy. +func (in *FirewallPolicy) DeepCopy() *FirewallPolicy { + if in == nil { + return nil + } + out := new(FirewallPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FirewallPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyIdentityInitParameters) DeepCopyInto(out *FirewallPolicyIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyIdentityInitParameters. +func (in *FirewallPolicyIdentityInitParameters) DeepCopy() *FirewallPolicyIdentityInitParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyIdentityObservation) DeepCopyInto(out *FirewallPolicyIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyIdentityObservation. +func (in *FirewallPolicyIdentityObservation) DeepCopy() *FirewallPolicyIdentityObservation { + if in == nil { + return nil + } + out := new(FirewallPolicyIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyIdentityParameters) DeepCopyInto(out *FirewallPolicyIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyIdentityParameters. +func (in *FirewallPolicyIdentityParameters) DeepCopy() *FirewallPolicyIdentityParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyInitParameters) DeepCopyInto(out *FirewallPolicyInitParameters) { + *out = *in + if in.AutoLearnPrivateRangesEnabled != nil { + in, out := &in.AutoLearnPrivateRangesEnabled, &out.AutoLearnPrivateRangesEnabled + *out = new(bool) + **out = **in + } + if in.BasePolicyID != nil { + in, out := &in.BasePolicyID, &out.BasePolicyID + *out = new(string) + **out = **in + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ExplicitProxy != nil { + in, out := &in.ExplicitProxy, &out.ExplicitProxy + *out = new(ExplicitProxyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(FirewallPolicyIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Insights != nil { + in, out := &in.Insights, &out.Insights + *out = new(InsightsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IntrusionDetection != nil { + in, out := &in.IntrusionDetection, &out.IntrusionDetection + *out = new(IntrusionDetectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateIPRanges != nil { + in, out := &in.PrivateIPRanges, &out.PrivateIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SQLRedirectAllowed != nil { + in, out := &in.SQLRedirectAllowed, &out.SQLRedirectAllowed + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.TLSCertificate != nil { + in, out := &in.TLSCertificate, &out.TLSCertificate + *out = new(TLSCertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatIntelligenceAllowlist != nil { + in, out := &in.ThreatIntelligenceAllowlist, &out.ThreatIntelligenceAllowlist + *out = new(ThreatIntelligenceAllowlistInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ThreatIntelligenceMode != nil { + in, out := &in.ThreatIntelligenceMode, &out.ThreatIntelligenceMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyInitParameters. +func (in *FirewallPolicyInitParameters) DeepCopy() *FirewallPolicyInitParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyList) DeepCopyInto(out *FirewallPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FirewallPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyList. +func (in *FirewallPolicyList) DeepCopy() *FirewallPolicyList { + if in == nil { + return nil + } + out := new(FirewallPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FirewallPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyObservation) DeepCopyInto(out *FirewallPolicyObservation) { + *out = *in + if in.AutoLearnPrivateRangesEnabled != nil { + in, out := &in.AutoLearnPrivateRangesEnabled, &out.AutoLearnPrivateRangesEnabled + *out = new(bool) + **out = **in + } + if in.BasePolicyID != nil { + in, out := &in.BasePolicyID, &out.BasePolicyID + *out = new(string) + **out = **in + } + if in.ChildPolicies != nil { + in, out := &in.ChildPolicies, &out.ChildPolicies + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSObservation) + (*in).DeepCopyInto(*out) + } + if in.ExplicitProxy != nil { + in, out := &in.ExplicitProxy, &out.ExplicitProxy + *out = new(ExplicitProxyObservation) + (*in).DeepCopyInto(*out) + } + if in.Firewalls != nil { + in, out := &in.Firewalls, &out.Firewalls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(FirewallPolicyIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Insights != nil { + in, out := &in.Insights, &out.Insights + *out = new(InsightsObservation) + (*in).DeepCopyInto(*out) + } + if in.IntrusionDetection != nil { + in, out := &in.IntrusionDetection, &out.IntrusionDetection + *out = new(IntrusionDetectionObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateIPRanges != nil { + in, out := &in.PrivateIPRanges, &out.PrivateIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RuleCollectionGroups != nil { + in, out := &in.RuleCollectionGroups, &out.RuleCollectionGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SQLRedirectAllowed != nil { + in, out := &in.SQLRedirectAllowed, &out.SQLRedirectAllowed + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.TLSCertificate != nil { + in, out := &in.TLSCertificate, &out.TLSCertificate + *out = new(TLSCertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatIntelligenceAllowlist != nil { + in, out := &in.ThreatIntelligenceAllowlist, &out.ThreatIntelligenceAllowlist + *out = new(ThreatIntelligenceAllowlistObservation) + (*in).DeepCopyInto(*out) + } + if in.ThreatIntelligenceMode != nil { + in, out := &in.ThreatIntelligenceMode, &out.ThreatIntelligenceMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyObservation. +func (in *FirewallPolicyObservation) DeepCopy() *FirewallPolicyObservation { + if in == nil { + return nil + } + out := new(FirewallPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyParameters) DeepCopyInto(out *FirewallPolicyParameters) { + *out = *in + if in.AutoLearnPrivateRangesEnabled != nil { + in, out := &in.AutoLearnPrivateRangesEnabled, &out.AutoLearnPrivateRangesEnabled + *out = new(bool) + **out = **in + } + if in.BasePolicyID != nil { + in, out := &in.BasePolicyID, &out.BasePolicyID + *out = new(string) + **out = **in + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSParameters) + (*in).DeepCopyInto(*out) + } + if in.ExplicitProxy != nil { + in, out := &in.ExplicitProxy, &out.ExplicitProxy + *out = new(ExplicitProxyParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(FirewallPolicyIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Insights != nil { + in, out := &in.Insights, &out.Insights + *out = new(InsightsParameters) + (*in).DeepCopyInto(*out) + } + if in.IntrusionDetection != nil { + in, out := &in.IntrusionDetection, &out.IntrusionDetection + *out = new(IntrusionDetectionParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateIPRanges != nil { + in, out := &in.PrivateIPRanges, &out.PrivateIPRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SQLRedirectAllowed != nil { + in, out := &in.SQLRedirectAllowed, &out.SQLRedirectAllowed + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.TLSCertificate != nil { + in, out := &in.TLSCertificate, &out.TLSCertificate + *out = new(TLSCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatIntelligenceAllowlist != nil { + in, out := &in.ThreatIntelligenceAllowlist, &out.ThreatIntelligenceAllowlist + *out = new(ThreatIntelligenceAllowlistParameters) + (*in).DeepCopyInto(*out) + } + if in.ThreatIntelligenceMode != nil { + in, out := &in.ThreatIntelligenceMode, &out.ThreatIntelligenceMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyParameters. +func (in *FirewallPolicyParameters) DeepCopy() *FirewallPolicyParameters { + if in == nil { + return nil + } + out := new(FirewallPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicySpec) DeepCopyInto(out *FirewallPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicySpec. +func (in *FirewallPolicySpec) DeepCopy() *FirewallPolicySpec { + if in == nil { + return nil + } + out := new(FirewallPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallPolicyStatus) DeepCopyInto(out *FirewallPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallPolicyStatus. +func (in *FirewallPolicyStatus) DeepCopy() *FirewallPolicyStatus { + if in == nil { + return nil + } + out := new(FirewallPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallSpec) DeepCopyInto(out *FirewallSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallSpec. +func (in *FirewallSpec) DeepCopy() *FirewallSpec { + if in == nil { + return nil + } + out := new(FirewallSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirewallStatus) DeepCopyInto(out *FirewallStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirewallStatus. +func (in *FirewallStatus) DeepCopy() *FirewallStatus { + if in == nil { + return nil + } + out := new(FirewallStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardingConfigurationInitParameters) DeepCopyInto(out *ForwardingConfigurationInitParameters) { + *out = *in + if in.BackendPoolName != nil { + in, out := &in.BackendPoolName, &out.BackendPoolName + *out = new(string) + **out = **in + } + if in.CacheDuration != nil { + in, out := &in.CacheDuration, &out.CacheDuration + *out = new(string) + **out = **in + } + if in.CacheEnabled != nil { + in, out := &in.CacheEnabled, &out.CacheEnabled + *out = new(bool) + **out = **in + } + if in.CacheQueryParameterStripDirective != nil { + in, out := &in.CacheQueryParameterStripDirective, &out.CacheQueryParameterStripDirective + *out = new(string) + **out = **in + } + if in.CacheQueryParameters != nil { + in, out := &in.CacheQueryParameters, &out.CacheQueryParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheUseDynamicCompression != nil { + in, out := &in.CacheUseDynamicCompression, &out.CacheUseDynamicCompression + *out = new(bool) + **out = **in + } + if in.CustomForwardingPath != nil { + in, out := &in.CustomForwardingPath, &out.CustomForwardingPath + *out = new(string) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardingConfigurationInitParameters. +func (in *ForwardingConfigurationInitParameters) DeepCopy() *ForwardingConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ForwardingConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardingConfigurationObservation) DeepCopyInto(out *ForwardingConfigurationObservation) { + *out = *in + if in.BackendPoolName != nil { + in, out := &in.BackendPoolName, &out.BackendPoolName + *out = new(string) + **out = **in + } + if in.CacheDuration != nil { + in, out := &in.CacheDuration, &out.CacheDuration + *out = new(string) + **out = **in + } + if in.CacheEnabled != nil { + in, out := &in.CacheEnabled, &out.CacheEnabled + *out = new(bool) + **out = **in + } + if in.CacheQueryParameterStripDirective != nil { + in, out := &in.CacheQueryParameterStripDirective, &out.CacheQueryParameterStripDirective + *out = new(string) + **out = **in + } + if in.CacheQueryParameters != nil { + in, out := &in.CacheQueryParameters, &out.CacheQueryParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheUseDynamicCompression != nil { + in, out := &in.CacheUseDynamicCompression, &out.CacheUseDynamicCompression + *out = new(bool) + **out = **in + } + if in.CustomForwardingPath != nil { + in, out := &in.CustomForwardingPath, &out.CustomForwardingPath + *out = new(string) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardingConfigurationObservation. +func (in *ForwardingConfigurationObservation) DeepCopy() *ForwardingConfigurationObservation { + if in == nil { + return nil + } + out := new(ForwardingConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardingConfigurationParameters) DeepCopyInto(out *ForwardingConfigurationParameters) { + *out = *in + if in.BackendPoolName != nil { + in, out := &in.BackendPoolName, &out.BackendPoolName + *out = new(string) + **out = **in + } + if in.CacheDuration != nil { + in, out := &in.CacheDuration, &out.CacheDuration + *out = new(string) + **out = **in + } + if in.CacheEnabled != nil { + in, out := &in.CacheEnabled, &out.CacheEnabled + *out = new(bool) + **out = **in + } + if in.CacheQueryParameterStripDirective != nil { + in, out := &in.CacheQueryParameterStripDirective, &out.CacheQueryParameterStripDirective + *out = new(string) + **out = **in + } + if in.CacheQueryParameters != nil { + in, out := &in.CacheQueryParameters, &out.CacheQueryParameters + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheUseDynamicCompression != nil { + in, out := &in.CacheUseDynamicCompression, &out.CacheUseDynamicCompression + *out = new(bool) + **out = **in + } + if in.CustomForwardingPath != nil { + in, out := &in.CustomForwardingPath, &out.CustomForwardingPath + *out = new(string) + **out = **in + } + if in.ForwardingProtocol != nil { + in, out := &in.ForwardingProtocol, &out.ForwardingProtocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardingConfigurationParameters. +func (in *ForwardingConfigurationParameters) DeepCopy() *ForwardingConfigurationParameters { + if in == nil { + return nil + } + out := new(ForwardingConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontDoor) DeepCopyInto(out *FrontDoor) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontDoor. +func (in *FrontDoor) DeepCopy() *FrontDoor { + if in == nil { + return nil + } + out := new(FrontDoor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontDoor) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontDoorInitParameters) DeepCopyInto(out *FrontDoorInitParameters) { + *out = *in + if in.BackendPool != nil { + in, out := &in.BackendPool, &out.BackendPool + *out = make([]BackendPoolInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolHealthProbe != nil { + in, out := &in.BackendPoolHealthProbe, &out.BackendPoolHealthProbe + *out = make([]BackendPoolHealthProbeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolLoadBalancing != nil { + in, out := &in.BackendPoolLoadBalancing, &out.BackendPoolLoadBalancing + *out = make([]BackendPoolLoadBalancingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolSettings != nil { + in, out := &in.BackendPoolSettings, &out.BackendPoolSettings + *out = make([]BackendPoolSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FriendlyName != nil { + in, out := &in.FriendlyName, &out.FriendlyName + *out = new(string) + **out = **in + } + if in.FrontendEndpoint != nil { + in, out := &in.FrontendEndpoint, &out.FrontendEndpoint + *out = make([]FrontendEndpointInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancerEnabled != nil { + in, out := &in.LoadBalancerEnabled, &out.LoadBalancerEnabled + *out = new(bool) + **out = **in + } + if in.RoutingRule != nil { + in, out := &in.RoutingRule, &out.RoutingRule + *out = make([]RoutingRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontDoorInitParameters. +func (in *FrontDoorInitParameters) DeepCopy() *FrontDoorInitParameters { + if in == nil { + return nil + } + out := new(FrontDoorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontDoorList) DeepCopyInto(out *FrontDoorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontDoor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontDoorList. +func (in *FrontDoorList) DeepCopy() *FrontDoorList { + if in == nil { + return nil + } + out := new(FrontDoorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontDoorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontDoorObservation) DeepCopyInto(out *FrontDoorObservation) { + *out = *in + if in.BackendPool != nil { + in, out := &in.BackendPool, &out.BackendPool + *out = make([]BackendPoolObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolHealthProbe != nil { + in, out := &in.BackendPoolHealthProbe, &out.BackendPoolHealthProbe + *out = make([]BackendPoolHealthProbeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolHealthProbes != nil { + in, out := &in.BackendPoolHealthProbes, &out.BackendPoolHealthProbes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.BackendPoolLoadBalancing != nil { + in, out := &in.BackendPoolLoadBalancing, &out.BackendPoolLoadBalancing + *out = make([]BackendPoolLoadBalancingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolLoadBalancingSettings != nil { + in, out := &in.BackendPoolLoadBalancingSettings, &out.BackendPoolLoadBalancingSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.BackendPoolSettings != nil { + in, out := &in.BackendPoolSettings, &out.BackendPoolSettings + *out = make([]BackendPoolSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPools != nil { + in, out := &in.BackendPools, &out.BackendPools + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.CNAME != nil { + in, out := &in.CNAME, &out.CNAME + *out = new(string) + **out = **in + } + if in.ExplicitResourceOrder != nil { + in, out := &in.ExplicitResourceOrder, &out.ExplicitResourceOrder + *out = make([]ExplicitResourceOrderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FriendlyName != nil { + in, out := &in.FriendlyName, &out.FriendlyName + *out = new(string) + **out = **in + } + if in.FrontendEndpoint != nil { + in, out := &in.FrontendEndpoint, &out.FrontendEndpoint + *out = make([]FrontendEndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FrontendEndpoints != nil { + in, out := &in.FrontendEndpoints, &out.FrontendEndpoints + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.HeaderFrontdoorID != nil { + in, out := &in.HeaderFrontdoorID, &out.HeaderFrontdoorID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LoadBalancerEnabled != nil { + in, out := &in.LoadBalancerEnabled, &out.LoadBalancerEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RoutingRule != nil { + in, out := &in.RoutingRule, &out.RoutingRule + *out = make([]RoutingRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RoutingRules != nil { + in, out := &in.RoutingRules, &out.RoutingRules + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontDoorObservation. +func (in *FrontDoorObservation) DeepCopy() *FrontDoorObservation { + if in == nil { + return nil + } + out := new(FrontDoorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontDoorParameters) DeepCopyInto(out *FrontDoorParameters) { + *out = *in + if in.BackendPool != nil { + in, out := &in.BackendPool, &out.BackendPool + *out = make([]BackendPoolParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolHealthProbe != nil { + in, out := &in.BackendPoolHealthProbe, &out.BackendPoolHealthProbe + *out = make([]BackendPoolHealthProbeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolLoadBalancing != nil { + in, out := &in.BackendPoolLoadBalancing, &out.BackendPoolLoadBalancing + *out = make([]BackendPoolLoadBalancingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackendPoolSettings != nil { + in, out := &in.BackendPoolSettings, &out.BackendPoolSettings + *out = make([]BackendPoolSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FriendlyName != nil { + in, out := &in.FriendlyName, &out.FriendlyName + *out = new(string) + **out = **in + } + if in.FrontendEndpoint != nil { + in, out := &in.FrontendEndpoint, &out.FrontendEndpoint + *out = make([]FrontendEndpointParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LoadBalancerEnabled != nil { + in, out := &in.LoadBalancerEnabled, &out.LoadBalancerEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingRule != nil { + in, out := &in.RoutingRule, &out.RoutingRule + *out = make([]RoutingRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontDoorParameters. +func (in *FrontDoorParameters) DeepCopy() *FrontDoorParameters { + if in == nil { + return nil + } + out := new(FrontDoorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontDoorSpec) DeepCopyInto(out *FrontDoorSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontDoorSpec. +func (in *FrontDoorSpec) DeepCopy() *FrontDoorSpec { + if in == nil { + return nil + } + out := new(FrontDoorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontDoorStatus) DeepCopyInto(out *FrontDoorStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontDoorStatus. +func (in *FrontDoorStatus) DeepCopy() *FrontDoorStatus { + if in == nil { + return nil + } + out := new(FrontDoorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomHTTPSConfiguration) DeepCopyInto(out *FrontdoorCustomHTTPSConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomHTTPSConfiguration. +func (in *FrontdoorCustomHTTPSConfiguration) DeepCopy() *FrontdoorCustomHTTPSConfiguration { + if in == nil { + return nil + } + out := new(FrontdoorCustomHTTPSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorCustomHTTPSConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomHTTPSConfigurationInitParameters) DeepCopyInto(out *FrontdoorCustomHTTPSConfigurationInitParameters) { + *out = *in + if in.CustomHTTPSConfiguration != nil { + in, out := &in.CustomHTTPSConfiguration, &out.CustomHTTPSConfiguration + *out = new(CustomHTTPSConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomHTTPSProvisioningEnabled != nil { + in, out := &in.CustomHTTPSProvisioningEnabled, &out.CustomHTTPSProvisioningEnabled + *out = new(bool) + **out = **in + } + if in.FrontendEndpointID != nil { + in, out := &in.FrontendEndpointID, &out.FrontendEndpointID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomHTTPSConfigurationInitParameters. +func (in *FrontdoorCustomHTTPSConfigurationInitParameters) DeepCopy() *FrontdoorCustomHTTPSConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FrontdoorCustomHTTPSConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomHTTPSConfigurationList) DeepCopyInto(out *FrontdoorCustomHTTPSConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontdoorCustomHTTPSConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomHTTPSConfigurationList. +func (in *FrontdoorCustomHTTPSConfigurationList) DeepCopy() *FrontdoorCustomHTTPSConfigurationList { + if in == nil { + return nil + } + out := new(FrontdoorCustomHTTPSConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorCustomHTTPSConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomHTTPSConfigurationObservation) DeepCopyInto(out *FrontdoorCustomHTTPSConfigurationObservation) { + *out = *in + if in.CustomHTTPSConfiguration != nil { + in, out := &in.CustomHTTPSConfiguration, &out.CustomHTTPSConfiguration + *out = new(CustomHTTPSConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomHTTPSProvisioningEnabled != nil { + in, out := &in.CustomHTTPSProvisioningEnabled, &out.CustomHTTPSProvisioningEnabled + *out = new(bool) + **out = **in + } + if in.FrontendEndpointID != nil { + in, out := &in.FrontendEndpointID, &out.FrontendEndpointID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomHTTPSConfigurationObservation. +func (in *FrontdoorCustomHTTPSConfigurationObservation) DeepCopy() *FrontdoorCustomHTTPSConfigurationObservation { + if in == nil { + return nil + } + out := new(FrontdoorCustomHTTPSConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomHTTPSConfigurationParameters) DeepCopyInto(out *FrontdoorCustomHTTPSConfigurationParameters) { + *out = *in + if in.CustomHTTPSConfiguration != nil { + in, out := &in.CustomHTTPSConfiguration, &out.CustomHTTPSConfiguration + *out = new(CustomHTTPSConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomHTTPSProvisioningEnabled != nil { + in, out := &in.CustomHTTPSProvisioningEnabled, &out.CustomHTTPSProvisioningEnabled + *out = new(bool) + **out = **in + } + if in.FrontendEndpointID != nil { + in, out := &in.FrontendEndpointID, &out.FrontendEndpointID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomHTTPSConfigurationParameters. +func (in *FrontdoorCustomHTTPSConfigurationParameters) DeepCopy() *FrontdoorCustomHTTPSConfigurationParameters { + if in == nil { + return nil + } + out := new(FrontdoorCustomHTTPSConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomHTTPSConfigurationSpec) DeepCopyInto(out *FrontdoorCustomHTTPSConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomHTTPSConfigurationSpec. +func (in *FrontdoorCustomHTTPSConfigurationSpec) DeepCopy() *FrontdoorCustomHTTPSConfigurationSpec { + if in == nil { + return nil + } + out := new(FrontdoorCustomHTTPSConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorCustomHTTPSConfigurationStatus) DeepCopyInto(out *FrontdoorCustomHTTPSConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorCustomHTTPSConfigurationStatus. +func (in *FrontdoorCustomHTTPSConfigurationStatus) DeepCopy() *FrontdoorCustomHTTPSConfigurationStatus { + if in == nil { + return nil + } + out := new(FrontdoorCustomHTTPSConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRulesEngine) DeepCopyInto(out *FrontdoorRulesEngine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRulesEngine. +func (in *FrontdoorRulesEngine) DeepCopy() *FrontdoorRulesEngine { + if in == nil { + return nil + } + out := new(FrontdoorRulesEngine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorRulesEngine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRulesEngineInitParameters) DeepCopyInto(out *FrontdoorRulesEngineInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRulesEngineInitParameters. +func (in *FrontdoorRulesEngineInitParameters) DeepCopy() *FrontdoorRulesEngineInitParameters { + if in == nil { + return nil + } + out := new(FrontdoorRulesEngineInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRulesEngineList) DeepCopyInto(out *FrontdoorRulesEngineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FrontdoorRulesEngine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRulesEngineList. +func (in *FrontdoorRulesEngineList) DeepCopy() *FrontdoorRulesEngineList { + if in == nil { + return nil + } + out := new(FrontdoorRulesEngineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FrontdoorRulesEngineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRulesEngineObservation) DeepCopyInto(out *FrontdoorRulesEngineObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FrontdoorName != nil { + in, out := &in.FrontdoorName, &out.FrontdoorName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRulesEngineObservation. +func (in *FrontdoorRulesEngineObservation) DeepCopy() *FrontdoorRulesEngineObservation { + if in == nil { + return nil + } + out := new(FrontdoorRulesEngineObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRulesEngineParameters) DeepCopyInto(out *FrontdoorRulesEngineParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FrontdoorName != nil { + in, out := &in.FrontdoorName, &out.FrontdoorName + *out = new(string) + **out = **in + } + if in.FrontdoorNameRef != nil { + in, out := &in.FrontdoorNameRef, &out.FrontdoorNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FrontdoorNameSelector != nil { + in, out := &in.FrontdoorNameSelector, &out.FrontdoorNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRulesEngineParameters. +func (in *FrontdoorRulesEngineParameters) DeepCopy() *FrontdoorRulesEngineParameters { + if in == nil { + return nil + } + out := new(FrontdoorRulesEngineParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRulesEngineSpec) DeepCopyInto(out *FrontdoorRulesEngineSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRulesEngineSpec. +func (in *FrontdoorRulesEngineSpec) DeepCopy() *FrontdoorRulesEngineSpec { + if in == nil { + return nil + } + out := new(FrontdoorRulesEngineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontdoorRulesEngineStatus) DeepCopyInto(out *FrontdoorRulesEngineStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontdoorRulesEngineStatus. +func (in *FrontdoorRulesEngineStatus) DeepCopy() *FrontdoorRulesEngineStatus { + if in == nil { + return nil + } + out := new(FrontdoorRulesEngineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendEndpointInitParameters) DeepCopyInto(out *FrontendEndpointInitParameters) { + *out = *in + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SessionAffinityEnabled != nil { + in, out := &in.SessionAffinityEnabled, &out.SessionAffinityEnabled + *out = new(bool) + **out = **in + } + if in.SessionAffinityTTLSeconds != nil { + in, out := &in.SessionAffinityTTLSeconds, &out.SessionAffinityTTLSeconds + *out = new(float64) + **out = **in + } + if in.WebApplicationFirewallPolicyLinkID != nil { + in, out := &in.WebApplicationFirewallPolicyLinkID, &out.WebApplicationFirewallPolicyLinkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendEndpointInitParameters. +func (in *FrontendEndpointInitParameters) DeepCopy() *FrontendEndpointInitParameters { + if in == nil { + return nil + } + out := new(FrontendEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendEndpointObservation) DeepCopyInto(out *FrontendEndpointObservation) { + *out = *in + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SessionAffinityEnabled != nil { + in, out := &in.SessionAffinityEnabled, &out.SessionAffinityEnabled + *out = new(bool) + **out = **in + } + if in.SessionAffinityTTLSeconds != nil { + in, out := &in.SessionAffinityTTLSeconds, &out.SessionAffinityTTLSeconds + *out = new(float64) + **out = **in + } + if in.WebApplicationFirewallPolicyLinkID != nil { + in, out := &in.WebApplicationFirewallPolicyLinkID, &out.WebApplicationFirewallPolicyLinkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendEndpointObservation. +func (in *FrontendEndpointObservation) DeepCopy() *FrontendEndpointObservation { + if in == nil { + return nil + } + out := new(FrontendEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendEndpointParameters) DeepCopyInto(out *FrontendEndpointParameters) { + *out = *in + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SessionAffinityEnabled != nil { + in, out := &in.SessionAffinityEnabled, &out.SessionAffinityEnabled + *out = new(bool) + **out = **in + } + if in.SessionAffinityTTLSeconds != nil { + in, out := &in.SessionAffinityTTLSeconds, &out.SessionAffinityTTLSeconds + *out = new(float64) + **out = **in + } + if in.WebApplicationFirewallPolicyLinkID != nil { + in, out := &in.WebApplicationFirewallPolicyLinkID, &out.WebApplicationFirewallPolicyLinkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendEndpointParameters. +func (in *FrontendEndpointParameters) DeepCopy() *FrontendEndpointParameters { + if in == nil { + return nil + } + out := new(FrontendEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendIPConfigurationInitParameters) DeepCopyInto(out *FrontendIPConfigurationInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.PrivateLinkConfigurationName != nil { + in, out := &in.PrivateLinkConfigurationName, &out.PrivateLinkConfigurationName + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicIPAddressIDRef != nil { + in, out := &in.PublicIPAddressIDRef, &out.PublicIPAddressIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressIDSelector != nil { + in, out := &in.PublicIPAddressIDSelector, &out.PublicIPAddressIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendIPConfigurationInitParameters. +func (in *FrontendIPConfigurationInitParameters) DeepCopy() *FrontendIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(FrontendIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendIPConfigurationObservation) DeepCopyInto(out *FrontendIPConfigurationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.PrivateLinkConfigurationID != nil { + in, out := &in.PrivateLinkConfigurationID, &out.PrivateLinkConfigurationID + *out = new(string) + **out = **in + } + if in.PrivateLinkConfigurationName != nil { + in, out := &in.PrivateLinkConfigurationName, &out.PrivateLinkConfigurationName + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendIPConfigurationObservation. +func (in *FrontendIPConfigurationObservation) DeepCopy() *FrontendIPConfigurationObservation { + if in == nil { + return nil + } + out := new(FrontendIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendIPConfigurationParameters) DeepCopyInto(out *FrontendIPConfigurationParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.PrivateLinkConfigurationName != nil { + in, out := &in.PrivateLinkConfigurationName, &out.PrivateLinkConfigurationName + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicIPAddressIDRef != nil { + in, out := &in.PublicIPAddressIDRef, &out.PublicIPAddressIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressIDSelector != nil { + in, out := &in.PublicIPAddressIDSelector, &out.PublicIPAddressIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendIPConfigurationParameters. +func (in *FrontendIPConfigurationParameters) DeepCopy() *FrontendIPConfigurationParameters { + if in == nil { + return nil + } + out := new(FrontendIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendPortInitParameters) DeepCopyInto(out *FrontendPortInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendPortInitParameters. +func (in *FrontendPortInitParameters) DeepCopy() *FrontendPortInitParameters { + if in == nil { + return nil + } + out := new(FrontendPortInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendPortObservation) DeepCopyInto(out *FrontendPortObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendPortObservation. +func (in *FrontendPortObservation) DeepCopy() *FrontendPortObservation { + if in == nil { + return nil + } + out := new(FrontendPortObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendPortParameters) DeepCopyInto(out *FrontendPortParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendPortParameters. +func (in *FrontendPortParameters) DeepCopy() *FrontendPortParameters { + if in == nil { + return nil + } + out := new(FrontendPortParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayIPConfigurationInitParameters) DeepCopyInto(out *GatewayIPConfigurationInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayIPConfigurationInitParameters. +func (in *GatewayIPConfigurationInitParameters) DeepCopy() *GatewayIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(GatewayIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayIPConfigurationObservation) DeepCopyInto(out *GatewayIPConfigurationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayIPConfigurationObservation. +func (in *GatewayIPConfigurationObservation) DeepCopy() *GatewayIPConfigurationObservation { + if in == nil { + return nil + } + out := new(GatewayIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayIPConfigurationParameters) DeepCopyInto(out *GatewayIPConfigurationParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayIPConfigurationParameters. +func (in *GatewayIPConfigurationParameters) DeepCopy() *GatewayIPConfigurationParameters { + if in == nil { + return nil + } + out := new(GatewayIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalInitParameters) DeepCopyInto(out *GlobalInitParameters) { + *out = *in + if in.RequestBufferingEnabled != nil { + in, out := &in.RequestBufferingEnabled, &out.RequestBufferingEnabled + *out = new(bool) + **out = **in + } + if in.ResponseBufferingEnabled != nil { + in, out := &in.ResponseBufferingEnabled, &out.ResponseBufferingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalInitParameters. +func (in *GlobalInitParameters) DeepCopy() *GlobalInitParameters { + if in == nil { + return nil + } + out := new(GlobalInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalObservation) DeepCopyInto(out *GlobalObservation) { + *out = *in + if in.RequestBufferingEnabled != nil { + in, out := &in.RequestBufferingEnabled, &out.RequestBufferingEnabled + *out = new(bool) + **out = **in + } + if in.ResponseBufferingEnabled != nil { + in, out := &in.ResponseBufferingEnabled, &out.ResponseBufferingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalObservation. +func (in *GlobalObservation) DeepCopy() *GlobalObservation { + if in == nil { + return nil + } + out := new(GlobalObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalParameters) DeepCopyInto(out *GlobalParameters) { + *out = *in + if in.RequestBufferingEnabled != nil { + in, out := &in.RequestBufferingEnabled, &out.RequestBufferingEnabled + *out = new(bool) + **out = **in + } + if in.ResponseBufferingEnabled != nil { + in, out := &in.ResponseBufferingEnabled, &out.ResponseBufferingEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalParameters. +func (in *GlobalParameters) DeepCopy() *GlobalParameters { + if in == nil { + return nil + } + out := new(GlobalParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigurationInitParameters) DeepCopyInto(out *HTTPConfigurationInitParameters) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferHTTPS != nil { + in, out := &in.PreferHTTPS, &out.PreferHTTPS + *out = new(bool) + **out = **in + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]HTTPConfigurationRequestHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ValidStatusCodeRanges != nil { + in, out := &in.ValidStatusCodeRanges, &out.ValidStatusCodeRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigurationInitParameters. +func (in *HTTPConfigurationInitParameters) DeepCopy() *HTTPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(HTTPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigurationObservation) DeepCopyInto(out *HTTPConfigurationObservation) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferHTTPS != nil { + in, out := &in.PreferHTTPS, &out.PreferHTTPS + *out = new(bool) + **out = **in + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]HTTPConfigurationRequestHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ValidStatusCodeRanges != nil { + in, out := &in.ValidStatusCodeRanges, &out.ValidStatusCodeRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigurationObservation. +func (in *HTTPConfigurationObservation) DeepCopy() *HTTPConfigurationObservation { + if in == nil { + return nil + } + out := new(HTTPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigurationParameters) DeepCopyInto(out *HTTPConfigurationParameters) { + *out = *in + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.PreferHTTPS != nil { + in, out := &in.PreferHTTPS, &out.PreferHTTPS + *out = new(bool) + **out = **in + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = make([]HTTPConfigurationRequestHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ValidStatusCodeRanges != nil { + in, out := &in.ValidStatusCodeRanges, &out.ValidStatusCodeRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigurationParameters. +func (in *HTTPConfigurationParameters) DeepCopy() *HTTPConfigurationParameters { + if in == nil { + return nil + } + out := new(HTTPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigurationRequestHeaderInitParameters) DeepCopyInto(out *HTTPConfigurationRequestHeaderInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigurationRequestHeaderInitParameters. +func (in *HTTPConfigurationRequestHeaderInitParameters) DeepCopy() *HTTPConfigurationRequestHeaderInitParameters { + if in == nil { + return nil + } + out := new(HTTPConfigurationRequestHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigurationRequestHeaderObservation) DeepCopyInto(out *HTTPConfigurationRequestHeaderObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigurationRequestHeaderObservation. +func (in *HTTPConfigurationRequestHeaderObservation) DeepCopy() *HTTPConfigurationRequestHeaderObservation { + if in == nil { + return nil + } + out := new(HTTPConfigurationRequestHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPConfigurationRequestHeaderParameters) DeepCopyInto(out *HTTPConfigurationRequestHeaderParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConfigurationRequestHeaderParameters. +func (in *HTTPConfigurationRequestHeaderParameters) DeepCopy() *HTTPConfigurationRequestHeaderParameters { + if in == nil { + return nil + } + out := new(HTTPConfigurationRequestHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPListenerCustomErrorConfigurationInitParameters) DeepCopyInto(out *HTTPListenerCustomErrorConfigurationInitParameters) { + *out = *in + if in.CustomErrorPageURL != nil { + in, out := &in.CustomErrorPageURL, &out.CustomErrorPageURL + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPListenerCustomErrorConfigurationInitParameters. +func (in *HTTPListenerCustomErrorConfigurationInitParameters) DeepCopy() *HTTPListenerCustomErrorConfigurationInitParameters { + if in == nil { + return nil + } + out := new(HTTPListenerCustomErrorConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPListenerCustomErrorConfigurationObservation) DeepCopyInto(out *HTTPListenerCustomErrorConfigurationObservation) { + *out = *in + if in.CustomErrorPageURL != nil { + in, out := &in.CustomErrorPageURL, &out.CustomErrorPageURL + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPListenerCustomErrorConfigurationObservation. +func (in *HTTPListenerCustomErrorConfigurationObservation) DeepCopy() *HTTPListenerCustomErrorConfigurationObservation { + if in == nil { + return nil + } + out := new(HTTPListenerCustomErrorConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPListenerCustomErrorConfigurationParameters) DeepCopyInto(out *HTTPListenerCustomErrorConfigurationParameters) { + *out = *in + if in.CustomErrorPageURL != nil { + in, out := &in.CustomErrorPageURL, &out.CustomErrorPageURL + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPListenerCustomErrorConfigurationParameters. +func (in *HTTPListenerCustomErrorConfigurationParameters) DeepCopy() *HTTPListenerCustomErrorConfigurationParameters { + if in == nil { + return nil + } + out := new(HTTPListenerCustomErrorConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPListenerInitParameters) DeepCopyInto(out *HTTPListenerInitParameters) { + *out = *in + if in.CustomErrorConfiguration != nil { + in, out := &in.CustomErrorConfiguration, &out.CustomErrorConfiguration + *out = make([]HTTPListenerCustomErrorConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.FrontendIPConfigurationName != nil { + in, out := &in.FrontendIPConfigurationName, &out.FrontendIPConfigurationName + *out = new(string) + **out = **in + } + if in.FrontendPortName != nil { + in, out := &in.FrontendPortName, &out.FrontendPortName + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.HostNames != nil { + in, out := &in.HostNames, &out.HostNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequireSni != nil { + in, out := &in.RequireSni, &out.RequireSni + *out = new(bool) + **out = **in + } + if in.SSLCertificateName != nil { + in, out := &in.SSLCertificateName, &out.SSLCertificateName + *out = new(string) + **out = **in + } + if in.SSLProfileName != nil { + in, out := &in.SSLProfileName, &out.SSLProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPListenerInitParameters. +func (in *HTTPListenerInitParameters) DeepCopy() *HTTPListenerInitParameters { + if in == nil { + return nil + } + out := new(HTTPListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPListenerObservation) DeepCopyInto(out *HTTPListenerObservation) { + *out = *in + if in.CustomErrorConfiguration != nil { + in, out := &in.CustomErrorConfiguration, &out.CustomErrorConfiguration + *out = make([]HTTPListenerCustomErrorConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.FrontendIPConfigurationID != nil { + in, out := &in.FrontendIPConfigurationID, &out.FrontendIPConfigurationID + *out = new(string) + **out = **in + } + if in.FrontendIPConfigurationName != nil { + in, out := &in.FrontendIPConfigurationName, &out.FrontendIPConfigurationName + *out = new(string) + **out = **in + } + if in.FrontendPortID != nil { + in, out := &in.FrontendPortID, &out.FrontendPortID + *out = new(string) + **out = **in + } + if in.FrontendPortName != nil { + in, out := &in.FrontendPortName, &out.FrontendPortName + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.HostNames != nil { + in, out := &in.HostNames, &out.HostNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequireSni != nil { + in, out := &in.RequireSni, &out.RequireSni + *out = new(bool) + **out = **in + } + if in.SSLCertificateID != nil { + in, out := &in.SSLCertificateID, &out.SSLCertificateID + *out = new(string) + **out = **in + } + if in.SSLCertificateName != nil { + in, out := &in.SSLCertificateName, &out.SSLCertificateName + *out = new(string) + **out = **in + } + if in.SSLProfileID != nil { + in, out := &in.SSLProfileID, &out.SSLProfileID + *out = new(string) + **out = **in + } + if in.SSLProfileName != nil { + in, out := &in.SSLProfileName, &out.SSLProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPListenerObservation. +func (in *HTTPListenerObservation) DeepCopy() *HTTPListenerObservation { + if in == nil { + return nil + } + out := new(HTTPListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPListenerParameters) DeepCopyInto(out *HTTPListenerParameters) { + *out = *in + if in.CustomErrorConfiguration != nil { + in, out := &in.CustomErrorConfiguration, &out.CustomErrorConfiguration + *out = make([]HTTPListenerCustomErrorConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.FrontendIPConfigurationName != nil { + in, out := &in.FrontendIPConfigurationName, &out.FrontendIPConfigurationName + *out = new(string) + **out = **in + } + if in.FrontendPortName != nil { + in, out := &in.FrontendPortName, &out.FrontendPortName + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.HostNames != nil { + in, out := &in.HostNames, &out.HostNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RequireSni != nil { + in, out := &in.RequireSni, &out.RequireSni + *out = new(bool) + **out = **in + } + if in.SSLCertificateName != nil { + in, out := &in.SSLCertificateName, &out.SSLCertificateName + *out = new(string) + **out = **in + } + if in.SSLProfileName != nil { + in, out := &in.SSLProfileName, &out.SSLProfileName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPListenerParameters. +func (in *HTTPListenerParameters) DeepCopy() *HTTPListenerParameters { + if in == nil { + return nil + } + out := new(HTTPListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationInitParameters) DeepCopyInto(out *IPConfigurationInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationInitParameters. +func (in *IPConfigurationInitParameters) DeepCopy() *IPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(IPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationObservation) DeepCopyInto(out *IPConfigurationObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationObservation. +func (in *IPConfigurationObservation) DeepCopy() *IPConfigurationObservation { + if in == nil { + return nil + } + out := new(IPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPConfigurationParameters) DeepCopyInto(out *IPConfigurationParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPConfigurationParameters. +func (in *IPConfigurationParameters) DeepCopy() *IPConfigurationParameters { + if in == nil { + return nil + } + out := new(IPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6InitParameters) DeepCopyInto(out *IPv6InitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MicrosoftPeering != nil { + in, out := &in.MicrosoftPeering, &out.MicrosoftPeering + *out = new(MicrosoftPeeringInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrimaryPeerAddressPrefix != nil { + in, out := &in.PrimaryPeerAddressPrefix, &out.PrimaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.RouteFilterID != nil { + in, out := &in.RouteFilterID, &out.RouteFilterID + *out = new(string) + **out = **in + } + if in.SecondaryPeerAddressPrefix != nil { + in, out := &in.SecondaryPeerAddressPrefix, &out.SecondaryPeerAddressPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6InitParameters. +func (in *IPv6InitParameters) DeepCopy() *IPv6InitParameters { + if in == nil { + return nil + } + out := new(IPv6InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6Observation) DeepCopyInto(out *IPv6Observation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MicrosoftPeering != nil { + in, out := &in.MicrosoftPeering, &out.MicrosoftPeering + *out = new(MicrosoftPeeringObservation) + (*in).DeepCopyInto(*out) + } + if in.PrimaryPeerAddressPrefix != nil { + in, out := &in.PrimaryPeerAddressPrefix, &out.PrimaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.RouteFilterID != nil { + in, out := &in.RouteFilterID, &out.RouteFilterID + *out = new(string) + **out = **in + } + if in.SecondaryPeerAddressPrefix != nil { + in, out := &in.SecondaryPeerAddressPrefix, &out.SecondaryPeerAddressPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6Observation. +func (in *IPv6Observation) DeepCopy() *IPv6Observation { + if in == nil { + return nil + } + out := new(IPv6Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6Parameters) DeepCopyInto(out *IPv6Parameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MicrosoftPeering != nil { + in, out := &in.MicrosoftPeering, &out.MicrosoftPeering + *out = new(MicrosoftPeeringParameters) + (*in).DeepCopyInto(*out) + } + if in.PrimaryPeerAddressPrefix != nil { + in, out := &in.PrimaryPeerAddressPrefix, &out.PrimaryPeerAddressPrefix + *out = new(string) + **out = **in + } + if in.RouteFilterID != nil { + in, out := &in.RouteFilterID, &out.RouteFilterID + *out = new(string) + **out = **in + } + if in.SecondaryPeerAddressPrefix != nil { + in, out := &in.SecondaryPeerAddressPrefix, &out.SecondaryPeerAddressPrefix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6Parameters. +func (in *IPv6Parameters) DeepCopy() *IPv6Parameters { + if in == nil { + return nil + } + out := new(IPv6Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcmpConfigurationInitParameters) DeepCopyInto(out *IcmpConfigurationInitParameters) { + *out = *in + if in.TraceRouteEnabled != nil { + in, out := &in.TraceRouteEnabled, &out.TraceRouteEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcmpConfigurationInitParameters. +func (in *IcmpConfigurationInitParameters) DeepCopy() *IcmpConfigurationInitParameters { + if in == nil { + return nil + } + out := new(IcmpConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcmpConfigurationObservation) DeepCopyInto(out *IcmpConfigurationObservation) { + *out = *in + if in.TraceRouteEnabled != nil { + in, out := &in.TraceRouteEnabled, &out.TraceRouteEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcmpConfigurationObservation. +func (in *IcmpConfigurationObservation) DeepCopy() *IcmpConfigurationObservation { + if in == nil { + return nil + } + out := new(IcmpConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IcmpConfigurationParameters) DeepCopyInto(out *IcmpConfigurationParameters) { + *out = *in + if in.TraceRouteEnabled != nil { + in, out := &in.TraceRouteEnabled, &out.TraceRouteEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IcmpConfigurationParameters. +func (in *IcmpConfigurationParameters) DeepCopy() *IcmpConfigurationParameters { + if in == nil { + return nil + } + out := new(IcmpConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsInitParameters) DeepCopyInto(out *InsightsInitParameters) { + *out = *in + if in.DefaultLogAnalyticsWorkspaceID != nil { + in, out := &in.DefaultLogAnalyticsWorkspaceID, &out.DefaultLogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogAnalyticsWorkspace != nil { + in, out := &in.LogAnalyticsWorkspace, &out.LogAnalyticsWorkspace + *out = make([]LogAnalyticsWorkspaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsInitParameters. +func (in *InsightsInitParameters) DeepCopy() *InsightsInitParameters { + if in == nil { + return nil + } + out := new(InsightsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsObservation) DeepCopyInto(out *InsightsObservation) { + *out = *in + if in.DefaultLogAnalyticsWorkspaceID != nil { + in, out := &in.DefaultLogAnalyticsWorkspaceID, &out.DefaultLogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogAnalyticsWorkspace != nil { + in, out := &in.LogAnalyticsWorkspace, &out.LogAnalyticsWorkspace + *out = make([]LogAnalyticsWorkspaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsObservation. +func (in *InsightsObservation) DeepCopy() *InsightsObservation { + if in == nil { + return nil + } + out := new(InsightsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsParameters) DeepCopyInto(out *InsightsParameters) { + *out = *in + if in.DefaultLogAnalyticsWorkspaceID != nil { + in, out := &in.DefaultLogAnalyticsWorkspaceID, &out.DefaultLogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.LogAnalyticsWorkspace != nil { + in, out := &in.LogAnalyticsWorkspace, &out.LogAnalyticsWorkspace + *out = make([]LogAnalyticsWorkspaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsParameters. +func (in *InsightsParameters) DeepCopy() *InsightsParameters { + if in == nil { + return nil + } + out := new(InsightsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance0BGPPeeringAddressInitParameters) DeepCopyInto(out *Instance0BGPPeeringAddressInitParameters) { + *out = *in + if in.CustomIps != nil { + in, out := &in.CustomIps, &out.CustomIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance0BGPPeeringAddressInitParameters. +func (in *Instance0BGPPeeringAddressInitParameters) DeepCopy() *Instance0BGPPeeringAddressInitParameters { + if in == nil { + return nil + } + out := new(Instance0BGPPeeringAddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance0BGPPeeringAddressObservation) DeepCopyInto(out *Instance0BGPPeeringAddressObservation) { + *out = *in + if in.CustomIps != nil { + in, out := &in.CustomIps, &out.CustomIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultIps != nil { + in, out := &in.DefaultIps, &out.DefaultIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPConfigurationID != nil { + in, out := &in.IPConfigurationID, &out.IPConfigurationID + *out = new(string) + **out = **in + } + if in.TunnelIps != nil { + in, out := &in.TunnelIps, &out.TunnelIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance0BGPPeeringAddressObservation. +func (in *Instance0BGPPeeringAddressObservation) DeepCopy() *Instance0BGPPeeringAddressObservation { + if in == nil { + return nil + } + out := new(Instance0BGPPeeringAddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance0BGPPeeringAddressParameters) DeepCopyInto(out *Instance0BGPPeeringAddressParameters) { + *out = *in + if in.CustomIps != nil { + in, out := &in.CustomIps, &out.CustomIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance0BGPPeeringAddressParameters. +func (in *Instance0BGPPeeringAddressParameters) DeepCopy() *Instance0BGPPeeringAddressParameters { + if in == nil { + return nil + } + out := new(Instance0BGPPeeringAddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance1BGPPeeringAddressInitParameters) DeepCopyInto(out *Instance1BGPPeeringAddressInitParameters) { + *out = *in + if in.CustomIps != nil { + in, out := &in.CustomIps, &out.CustomIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance1BGPPeeringAddressInitParameters. +func (in *Instance1BGPPeeringAddressInitParameters) DeepCopy() *Instance1BGPPeeringAddressInitParameters { + if in == nil { + return nil + } + out := new(Instance1BGPPeeringAddressInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance1BGPPeeringAddressObservation) DeepCopyInto(out *Instance1BGPPeeringAddressObservation) { + *out = *in + if in.CustomIps != nil { + in, out := &in.CustomIps, &out.CustomIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultIps != nil { + in, out := &in.DefaultIps, &out.DefaultIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPConfigurationID != nil { + in, out := &in.IPConfigurationID, &out.IPConfigurationID + *out = new(string) + **out = **in + } + if in.TunnelIps != nil { + in, out := &in.TunnelIps, &out.TunnelIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance1BGPPeeringAddressObservation. +func (in *Instance1BGPPeeringAddressObservation) DeepCopy() *Instance1BGPPeeringAddressObservation { + if in == nil { + return nil + } + out := new(Instance1BGPPeeringAddressObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance1BGPPeeringAddressParameters) DeepCopyInto(out *Instance1BGPPeeringAddressParameters) { + *out = *in + if in.CustomIps != nil { + in, out := &in.CustomIps, &out.CustomIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance1BGPPeeringAddressParameters. +func (in *Instance1BGPPeeringAddressParameters) DeepCopy() *Instance1BGPPeeringAddressParameters { + if in == nil { + return nil + } + out := new(Instance1BGPPeeringAddressParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntrusionDetectionInitParameters) DeepCopyInto(out *IntrusionDetectionInitParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PrivateRanges != nil { + in, out := &in.PrivateRanges, &out.PrivateRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SignatureOverrides != nil { + in, out := &in.SignatureOverrides, &out.SignatureOverrides + *out = make([]SignatureOverridesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrafficBypass != nil { + in, out := &in.TrafficBypass, &out.TrafficBypass + *out = make([]TrafficBypassInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntrusionDetectionInitParameters. +func (in *IntrusionDetectionInitParameters) DeepCopy() *IntrusionDetectionInitParameters { + if in == nil { + return nil + } + out := new(IntrusionDetectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntrusionDetectionObservation) DeepCopyInto(out *IntrusionDetectionObservation) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PrivateRanges != nil { + in, out := &in.PrivateRanges, &out.PrivateRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SignatureOverrides != nil { + in, out := &in.SignatureOverrides, &out.SignatureOverrides + *out = make([]SignatureOverridesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrafficBypass != nil { + in, out := &in.TrafficBypass, &out.TrafficBypass + *out = make([]TrafficBypassObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntrusionDetectionObservation. +func (in *IntrusionDetectionObservation) DeepCopy() *IntrusionDetectionObservation { + if in == nil { + return nil + } + out := new(IntrusionDetectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntrusionDetectionParameters) DeepCopyInto(out *IntrusionDetectionParameters) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.PrivateRanges != nil { + in, out := &in.PrivateRanges, &out.PrivateRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SignatureOverrides != nil { + in, out := &in.SignatureOverrides, &out.SignatureOverrides + *out = make([]SignatureOverridesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrafficBypass != nil { + in, out := &in.TrafficBypass, &out.TrafficBypass + *out = make([]TrafficBypassParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntrusionDetectionParameters. +func (in *IntrusionDetectionParameters) DeepCopy() *IntrusionDetectionParameters { + if in == nil { + return nil + } + out := new(IntrusionDetectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IpsecPolicyInitParameters) DeepCopyInto(out *IpsecPolicyInitParameters) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeInKilobytes != nil { + in, out := &in.SaDataSizeInKilobytes, &out.SaDataSizeInKilobytes + *out = new(float64) + **out = **in + } + if in.SaLifetimeInSeconds != nil { + in, out := &in.SaLifetimeInSeconds, &out.SaLifetimeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IpsecPolicyInitParameters. +func (in *IpsecPolicyInitParameters) DeepCopy() *IpsecPolicyInitParameters { + if in == nil { + return nil + } + out := new(IpsecPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IpsecPolicyObservation) DeepCopyInto(out *IpsecPolicyObservation) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeInKilobytes != nil { + in, out := &in.SaDataSizeInKilobytes, &out.SaDataSizeInKilobytes + *out = new(float64) + **out = **in + } + if in.SaLifetimeInSeconds != nil { + in, out := &in.SaLifetimeInSeconds, &out.SaLifetimeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IpsecPolicyObservation. +func (in *IpsecPolicyObservation) DeepCopy() *IpsecPolicyObservation { + if in == nil { + return nil + } + out := new(IpsecPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IpsecPolicyParameters) DeepCopyInto(out *IpsecPolicyParameters) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeInKilobytes != nil { + in, out := &in.SaDataSizeInKilobytes, &out.SaDataSizeInKilobytes + *out = new(float64) + **out = **in + } + if in.SaLifetimeInSeconds != nil { + in, out := &in.SaLifetimeInSeconds, &out.SaLifetimeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IpsecPolicyParameters. +func (in *IpsecPolicyParameters) DeepCopy() *IpsecPolicyParameters { + if in == nil { + return nil + } + out := new(IpsecPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItemInitParameters) DeepCopyInto(out *ItemInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItemInitParameters. +func (in *ItemInitParameters) DeepCopy() *ItemInitParameters { + if in == nil { + return nil + } + out := new(ItemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItemObservation) DeepCopyInto(out *ItemObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItemObservation. +func (in *ItemObservation) DeepCopy() *ItemObservation { + if in == nil { + return nil + } + out := new(ItemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItemParameters) DeepCopyInto(out *ItemParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItemParameters. +func (in *ItemParameters) DeepCopy() *ItemParameters { + if in == nil { + return nil + } + out := new(ItemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link1InitParameters) DeepCopyInto(out *Link1InitParameters) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.MacsecCakKeyvaultSecretID != nil { + in, out := &in.MacsecCakKeyvaultSecretID, &out.MacsecCakKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecCipher != nil { + in, out := &in.MacsecCipher, &out.MacsecCipher + *out = new(string) + **out = **in + } + if in.MacsecCknKeyvaultSecretID != nil { + in, out := &in.MacsecCknKeyvaultSecretID, &out.MacsecCknKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecSciEnabled != nil { + in, out := &in.MacsecSciEnabled, &out.MacsecSciEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link1InitParameters. +func (in *Link1InitParameters) DeepCopy() *Link1InitParameters { + if in == nil { + return nil + } + out := new(Link1InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link1Observation) DeepCopyInto(out *Link1Observation) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InterfaceName != nil { + in, out := &in.InterfaceName, &out.InterfaceName + *out = new(string) + **out = **in + } + if in.MacsecCakKeyvaultSecretID != nil { + in, out := &in.MacsecCakKeyvaultSecretID, &out.MacsecCakKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecCipher != nil { + in, out := &in.MacsecCipher, &out.MacsecCipher + *out = new(string) + **out = **in + } + if in.MacsecCknKeyvaultSecretID != nil { + in, out := &in.MacsecCknKeyvaultSecretID, &out.MacsecCknKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecSciEnabled != nil { + in, out := &in.MacsecSciEnabled, &out.MacsecSciEnabled + *out = new(bool) + **out = **in + } + if in.PatchPanelID != nil { + in, out := &in.PatchPanelID, &out.PatchPanelID + *out = new(string) + **out = **in + } + if in.RackID != nil { + in, out := &in.RackID, &out.RackID + *out = new(string) + **out = **in + } + if in.RouterName != nil { + in, out := &in.RouterName, &out.RouterName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link1Observation. +func (in *Link1Observation) DeepCopy() *Link1Observation { + if in == nil { + return nil + } + out := new(Link1Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link1Parameters) DeepCopyInto(out *Link1Parameters) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.MacsecCakKeyvaultSecretID != nil { + in, out := &in.MacsecCakKeyvaultSecretID, &out.MacsecCakKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecCipher != nil { + in, out := &in.MacsecCipher, &out.MacsecCipher + *out = new(string) + **out = **in + } + if in.MacsecCknKeyvaultSecretID != nil { + in, out := &in.MacsecCknKeyvaultSecretID, &out.MacsecCknKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecSciEnabled != nil { + in, out := &in.MacsecSciEnabled, &out.MacsecSciEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link1Parameters. +func (in *Link1Parameters) DeepCopy() *Link1Parameters { + if in == nil { + return nil + } + out := new(Link1Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link2InitParameters) DeepCopyInto(out *Link2InitParameters) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.MacsecCakKeyvaultSecretID != nil { + in, out := &in.MacsecCakKeyvaultSecretID, &out.MacsecCakKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecCipher != nil { + in, out := &in.MacsecCipher, &out.MacsecCipher + *out = new(string) + **out = **in + } + if in.MacsecCknKeyvaultSecretID != nil { + in, out := &in.MacsecCknKeyvaultSecretID, &out.MacsecCknKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecSciEnabled != nil { + in, out := &in.MacsecSciEnabled, &out.MacsecSciEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link2InitParameters. +func (in *Link2InitParameters) DeepCopy() *Link2InitParameters { + if in == nil { + return nil + } + out := new(Link2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link2Observation) DeepCopyInto(out *Link2Observation) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.ConnectorType != nil { + in, out := &in.ConnectorType, &out.ConnectorType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InterfaceName != nil { + in, out := &in.InterfaceName, &out.InterfaceName + *out = new(string) + **out = **in + } + if in.MacsecCakKeyvaultSecretID != nil { + in, out := &in.MacsecCakKeyvaultSecretID, &out.MacsecCakKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecCipher != nil { + in, out := &in.MacsecCipher, &out.MacsecCipher + *out = new(string) + **out = **in + } + if in.MacsecCknKeyvaultSecretID != nil { + in, out := &in.MacsecCknKeyvaultSecretID, &out.MacsecCknKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecSciEnabled != nil { + in, out := &in.MacsecSciEnabled, &out.MacsecSciEnabled + *out = new(bool) + **out = **in + } + if in.PatchPanelID != nil { + in, out := &in.PatchPanelID, &out.PatchPanelID + *out = new(string) + **out = **in + } + if in.RackID != nil { + in, out := &in.RackID, &out.RackID + *out = new(string) + **out = **in + } + if in.RouterName != nil { + in, out := &in.RouterName, &out.RouterName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link2Observation. +func (in *Link2Observation) DeepCopy() *Link2Observation { + if in == nil { + return nil + } + out := new(Link2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link2Parameters) DeepCopyInto(out *Link2Parameters) { + *out = *in + if in.AdminEnabled != nil { + in, out := &in.AdminEnabled, &out.AdminEnabled + *out = new(bool) + **out = **in + } + if in.MacsecCakKeyvaultSecretID != nil { + in, out := &in.MacsecCakKeyvaultSecretID, &out.MacsecCakKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecCipher != nil { + in, out := &in.MacsecCipher, &out.MacsecCipher + *out = new(string) + **out = **in + } + if in.MacsecCknKeyvaultSecretID != nil { + in, out := &in.MacsecCknKeyvaultSecretID, &out.MacsecCknKeyvaultSecretID + *out = new(string) + **out = **in + } + if in.MacsecSciEnabled != nil { + in, out := &in.MacsecSciEnabled, &out.MacsecSciEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link2Parameters. +func (in *Link2Parameters) DeepCopy() *Link2Parameters { + if in == nil { + return nil + } + out := new(Link2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkInitParameters) DeepCopyInto(out *LinkInitParameters) { + *out = *in + if in.BGP != nil { + in, out := &in.BGP, &out.BGP + *out = new(BGPInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.SpeedInMbps != nil { + in, out := &in.SpeedInMbps, &out.SpeedInMbps + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkInitParameters. +func (in *LinkInitParameters) DeepCopy() *LinkInitParameters { + if in == nil { + return nil + } + out := new(LinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkObservation) DeepCopyInto(out *LinkObservation) { + *out = *in + if in.BGP != nil { + in, out := &in.BGP, &out.BGP + *out = new(BGPObservation) + (*in).DeepCopyInto(*out) + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.SpeedInMbps != nil { + in, out := &in.SpeedInMbps, &out.SpeedInMbps + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkObservation. +func (in *LinkObservation) DeepCopy() *LinkObservation { + if in == nil { + return nil + } + out := new(LinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkParameters) DeepCopyInto(out *LinkParameters) { + *out = *in + if in.BGP != nil { + in, out := &in.BGP, &out.BGP + *out = new(BGPParameters) + (*in).DeepCopyInto(*out) + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ProviderName != nil { + in, out := &in.ProviderName, &out.ProviderName + *out = new(string) + **out = **in + } + if in.SpeedInMbps != nil { + in, out := &in.SpeedInMbps, &out.SpeedInMbps + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkParameters. +func (in *LinkParameters) DeepCopy() *LinkParameters { + if in == nil { + return nil + } + out := new(LinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalNetworkGateway) DeepCopyInto(out *LocalNetworkGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNetworkGateway. +func (in *LocalNetworkGateway) DeepCopy() *LocalNetworkGateway { + if in == nil { + return nil + } + out := new(LocalNetworkGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalNetworkGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalNetworkGatewayInitParameters) DeepCopyInto(out *LocalNetworkGatewayInitParameters) { + *out = *in + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(BGPSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GatewayAddress != nil { + in, out := &in.GatewayAddress, &out.GatewayAddress + *out = new(string) + **out = **in + } + if in.GatewayFqdn != nil { + in, out := &in.GatewayFqdn, &out.GatewayFqdn + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNetworkGatewayInitParameters. +func (in *LocalNetworkGatewayInitParameters) DeepCopy() *LocalNetworkGatewayInitParameters { + if in == nil { + return nil + } + out := new(LocalNetworkGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalNetworkGatewayList) DeepCopyInto(out *LocalNetworkGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LocalNetworkGateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNetworkGatewayList. +func (in *LocalNetworkGatewayList) DeepCopy() *LocalNetworkGatewayList { + if in == nil { + return nil + } + out := new(LocalNetworkGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalNetworkGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalNetworkGatewayObservation) DeepCopyInto(out *LocalNetworkGatewayObservation) { + *out = *in + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(BGPSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.GatewayAddress != nil { + in, out := &in.GatewayAddress, &out.GatewayAddress + *out = new(string) + **out = **in + } + if in.GatewayFqdn != nil { + in, out := &in.GatewayFqdn, &out.GatewayFqdn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNetworkGatewayObservation. +func (in *LocalNetworkGatewayObservation) DeepCopy() *LocalNetworkGatewayObservation { + if in == nil { + return nil + } + out := new(LocalNetworkGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalNetworkGatewayParameters) DeepCopyInto(out *LocalNetworkGatewayParameters) { + *out = *in + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(BGPSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.GatewayAddress != nil { + in, out := &in.GatewayAddress, &out.GatewayAddress + *out = new(string) + **out = **in + } + if in.GatewayFqdn != nil { + in, out := &in.GatewayFqdn, &out.GatewayFqdn + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNetworkGatewayParameters. +func (in *LocalNetworkGatewayParameters) DeepCopy() *LocalNetworkGatewayParameters { + if in == nil { + return nil + } + out := new(LocalNetworkGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalNetworkGatewaySpec) DeepCopyInto(out *LocalNetworkGatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNetworkGatewaySpec. +func (in *LocalNetworkGatewaySpec) DeepCopy() *LocalNetworkGatewaySpec { + if in == nil { + return nil + } + out := new(LocalNetworkGatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalNetworkGatewayStatus) DeepCopyInto(out *LocalNetworkGatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalNetworkGatewayStatus. +func (in *LocalNetworkGatewayStatus) DeepCopy() *LocalNetworkGatewayStatus { + if in == nil { + return nil + } + out := new(LocalNetworkGatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsWorkspaceInitParameters) DeepCopyInto(out *LogAnalyticsWorkspaceInitParameters) { + *out = *in + if in.FirewallLocation != nil { + in, out := &in.FirewallLocation, &out.FirewallLocation + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsWorkspaceInitParameters. +func (in *LogAnalyticsWorkspaceInitParameters) DeepCopy() *LogAnalyticsWorkspaceInitParameters { + if in == nil { + return nil + } + out := new(LogAnalyticsWorkspaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsWorkspaceObservation) DeepCopyInto(out *LogAnalyticsWorkspaceObservation) { + *out = *in + if in.FirewallLocation != nil { + in, out := &in.FirewallLocation, &out.FirewallLocation + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsWorkspaceObservation. +func (in *LogAnalyticsWorkspaceObservation) DeepCopy() *LogAnalyticsWorkspaceObservation { + if in == nil { + return nil + } + out := new(LogAnalyticsWorkspaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsWorkspaceParameters) DeepCopyInto(out *LogAnalyticsWorkspaceParameters) { + *out = *in + if in.FirewallLocation != nil { + in, out := &in.FirewallLocation, &out.FirewallLocation + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsWorkspaceParameters. +func (in *LogAnalyticsWorkspaceParameters) DeepCopy() *LogAnalyticsWorkspaceParameters { + if in == nil { + return nil + } + out := new(LogAnalyticsWorkspaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogScrubbingInitParameters) DeepCopyInto(out *LogScrubbingInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]LogScrubbingRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogScrubbingInitParameters. +func (in *LogScrubbingInitParameters) DeepCopy() *LogScrubbingInitParameters { + if in == nil { + return nil + } + out := new(LogScrubbingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogScrubbingObservation) DeepCopyInto(out *LogScrubbingObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]LogScrubbingRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogScrubbingObservation. +func (in *LogScrubbingObservation) DeepCopy() *LogScrubbingObservation { + if in == nil { + return nil + } + out := new(LogScrubbingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogScrubbingParameters) DeepCopyInto(out *LogScrubbingParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]LogScrubbingRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogScrubbingParameters. +func (in *LogScrubbingParameters) DeepCopy() *LogScrubbingParameters { + if in == nil { + return nil + } + out := new(LogScrubbingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogScrubbingRuleInitParameters) DeepCopyInto(out *LogScrubbingRuleInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogScrubbingRuleInitParameters. +func (in *LogScrubbingRuleInitParameters) DeepCopy() *LogScrubbingRuleInitParameters { + if in == nil { + return nil + } + out := new(LogScrubbingRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogScrubbingRuleObservation) DeepCopyInto(out *LogScrubbingRuleObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogScrubbingRuleObservation. +func (in *LogScrubbingRuleObservation) DeepCopy() *LogScrubbingRuleObservation { + if in == nil { + return nil + } + out := new(LogScrubbingRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogScrubbingRuleParameters) DeepCopyInto(out *LogScrubbingRuleParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogScrubbingRuleParameters. +func (in *LogScrubbingRuleParameters) DeepCopy() *LogScrubbingRuleParameters { + if in == nil { + return nil + } + out := new(LogScrubbingRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRuleSetInitParameters) DeepCopyInto(out *ManagedRuleSetInitParameters) { + *out = *in + if in.RuleGroupOverride != nil { + in, out := &in.RuleGroupOverride, &out.RuleGroupOverride + *out = make([]RuleGroupOverrideInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRuleSetInitParameters. +func (in *ManagedRuleSetInitParameters) DeepCopy() *ManagedRuleSetInitParameters { + if in == nil { + return nil + } + out := new(ManagedRuleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRuleSetObservation) DeepCopyInto(out *ManagedRuleSetObservation) { + *out = *in + if in.RuleGroupOverride != nil { + in, out := &in.RuleGroupOverride, &out.RuleGroupOverride + *out = make([]RuleGroupOverrideObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRuleSetObservation. +func (in *ManagedRuleSetObservation) DeepCopy() *ManagedRuleSetObservation { + if in == nil { + return nil + } + out := new(ManagedRuleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRuleSetParameters) DeepCopyInto(out *ManagedRuleSetParameters) { + *out = *in + if in.RuleGroupOverride != nil { + in, out := &in.RuleGroupOverride, &out.RuleGroupOverride + *out = make([]RuleGroupOverrideParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRuleSetParameters. +func (in *ManagedRuleSetParameters) DeepCopy() *ManagedRuleSetParameters { + if in == nil { + return nil + } + out := new(ManagedRuleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRulesExclusionInitParameters) DeepCopyInto(out *ManagedRulesExclusionInitParameters) { + *out = *in + if in.ExcludedRuleSet != nil { + in, out := &in.ExcludedRuleSet, &out.ExcludedRuleSet + *out = new(ExcludedRuleSetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRulesExclusionInitParameters. +func (in *ManagedRulesExclusionInitParameters) DeepCopy() *ManagedRulesExclusionInitParameters { + if in == nil { + return nil + } + out := new(ManagedRulesExclusionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRulesExclusionObservation) DeepCopyInto(out *ManagedRulesExclusionObservation) { + *out = *in + if in.ExcludedRuleSet != nil { + in, out := &in.ExcludedRuleSet, &out.ExcludedRuleSet + *out = new(ExcludedRuleSetObservation) + (*in).DeepCopyInto(*out) + } + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRulesExclusionObservation. +func (in *ManagedRulesExclusionObservation) DeepCopy() *ManagedRulesExclusionObservation { + if in == nil { + return nil + } + out := new(ManagedRulesExclusionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRulesExclusionParameters) DeepCopyInto(out *ManagedRulesExclusionParameters) { + *out = *in + if in.ExcludedRuleSet != nil { + in, out := &in.ExcludedRuleSet, &out.ExcludedRuleSet + *out = new(ExcludedRuleSetParameters) + (*in).DeepCopyInto(*out) + } + if in.MatchVariable != nil { + in, out := &in.MatchVariable, &out.MatchVariable + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.SelectorMatchOperator != nil { + in, out := &in.SelectorMatchOperator, &out.SelectorMatchOperator + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRulesExclusionParameters. +func (in *ManagedRulesExclusionParameters) DeepCopy() *ManagedRulesExclusionParameters { + if in == nil { + return nil + } + out := new(ManagedRulesExclusionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRulesInitParameters) DeepCopyInto(out *ManagedRulesInitParameters) { + *out = *in + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ManagedRulesExclusionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagedRuleSet != nil { + in, out := &in.ManagedRuleSet, &out.ManagedRuleSet + *out = make([]ManagedRuleSetInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRulesInitParameters. +func (in *ManagedRulesInitParameters) DeepCopy() *ManagedRulesInitParameters { + if in == nil { + return nil + } + out := new(ManagedRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRulesObservation) DeepCopyInto(out *ManagedRulesObservation) { + *out = *in + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ManagedRulesExclusionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagedRuleSet != nil { + in, out := &in.ManagedRuleSet, &out.ManagedRuleSet + *out = make([]ManagedRuleSetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRulesObservation. +func (in *ManagedRulesObservation) DeepCopy() *ManagedRulesObservation { + if in == nil { + return nil + } + out := new(ManagedRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRulesParameters) DeepCopyInto(out *ManagedRulesParameters) { + *out = *in + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ManagedRulesExclusionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagedRuleSet != nil { + in, out := &in.ManagedRuleSet, &out.ManagedRuleSet + *out = make([]ManagedRuleSetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRulesParameters. +func (in *ManagedRulesParameters) DeepCopy() *ManagedRulesParameters { + if in == nil { + return nil + } + out := new(ManagedRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementIPConfigurationInitParameters) DeepCopyInto(out *ManagementIPConfigurationInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementIPConfigurationInitParameters. +func (in *ManagementIPConfigurationInitParameters) DeepCopy() *ManagementIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ManagementIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementIPConfigurationObservation) DeepCopyInto(out *ManagementIPConfigurationObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementIPConfigurationObservation. +func (in *ManagementIPConfigurationObservation) DeepCopy() *ManagementIPConfigurationObservation { + if in == nil { + return nil + } + out := new(ManagementIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementIPConfigurationParameters) DeepCopyInto(out *ManagementIPConfigurationParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementIPConfigurationParameters. +func (in *ManagementIPConfigurationParameters) DeepCopy() *ManagementIPConfigurationParameters { + if in == nil { + return nil + } + out := new(ManagementIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Manager) DeepCopyInto(out *Manager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manager. +func (in *Manager) DeepCopy() *Manager { + if in == nil { + return nil + } + out := new(Manager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Manager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagerInitParameters) DeepCopyInto(out *ManagerInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ScopeAccesses != nil { + in, out := &in.ScopeAccesses, &out.ScopeAccesses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerInitParameters. +func (in *ManagerInitParameters) DeepCopy() *ManagerInitParameters { + if in == nil { + return nil + } + out := new(ManagerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagerList) DeepCopyInto(out *ManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Manager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerList. +func (in *ManagerList) DeepCopy() *ManagerList { + if in == nil { + return nil + } + out := new(ManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagerObservation) DeepCopyInto(out *ManagerObservation) { + *out = *in + if in.CrossTenantScopes != nil { + in, out := &in.CrossTenantScopes, &out.CrossTenantScopes + *out = make([]CrossTenantScopesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeObservation) + (*in).DeepCopyInto(*out) + } + if in.ScopeAccesses != nil { + in, out := &in.ScopeAccesses, &out.ScopeAccesses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerObservation. +func (in *ManagerObservation) DeepCopy() *ManagerObservation { + if in == nil { + return nil + } + out := new(ManagerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagerParameters) DeepCopyInto(out *ManagerParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeParameters) + (*in).DeepCopyInto(*out) + } + if in.ScopeAccesses != nil { + in, out := &in.ScopeAccesses, &out.ScopeAccesses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerParameters. +func (in *ManagerParameters) DeepCopy() *ManagerParameters { + if in == nil { + return nil + } + out := new(ManagerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagerSpec) DeepCopyInto(out *ManagerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerSpec. +func (in *ManagerSpec) DeepCopy() *ManagerSpec { + if in == nil { + return nil + } + out := new(ManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagerStatus) DeepCopyInto(out *ManagerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerStatus. +func (in *ManagerStatus) DeepCopy() *ManagerStatus { + if in == nil { + return nil + } + out := new(ManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchConditionInitParameters) DeepCopyInto(out *MatchConditionInitParameters) { + *out = *in + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transform != nil { + in, out := &in.Transform, &out.Transform + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchConditionInitParameters. +func (in *MatchConditionInitParameters) DeepCopy() *MatchConditionInitParameters { + if in == nil { + return nil + } + out := new(MatchConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchConditionObservation) DeepCopyInto(out *MatchConditionObservation) { + *out = *in + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transform != nil { + in, out := &in.Transform, &out.Transform + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchConditionObservation. +func (in *MatchConditionObservation) DeepCopy() *MatchConditionObservation { + if in == nil { + return nil + } + out := new(MatchConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchConditionParameters) DeepCopyInto(out *MatchConditionParameters) { + *out = *in + if in.NegateCondition != nil { + in, out := &in.NegateCondition, &out.NegateCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.Transform != nil { + in, out := &in.Transform, &out.Transform + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Variable != nil { + in, out := &in.Variable, &out.Variable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchConditionParameters. +func (in *MatchConditionParameters) DeepCopy() *MatchConditionParameters { + if in == nil { + return nil + } + out := new(MatchConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchConditionsInitParameters) DeepCopyInto(out *MatchConditionsInitParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchVariables != nil { + in, out := &in.MatchVariables, &out.MatchVariables + *out = make([]MatchVariablesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NegationCondition != nil { + in, out := &in.NegationCondition, &out.NegationCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchConditionsInitParameters. +func (in *MatchConditionsInitParameters) DeepCopy() *MatchConditionsInitParameters { + if in == nil { + return nil + } + out := new(MatchConditionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchConditionsObservation) DeepCopyInto(out *MatchConditionsObservation) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchVariables != nil { + in, out := &in.MatchVariables, &out.MatchVariables + *out = make([]MatchVariablesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NegationCondition != nil { + in, out := &in.NegationCondition, &out.NegationCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchConditionsObservation. +func (in *MatchConditionsObservation) DeepCopy() *MatchConditionsObservation { + if in == nil { + return nil + } + out := new(MatchConditionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchConditionsParameters) DeepCopyInto(out *MatchConditionsParameters) { + *out = *in + if in.MatchValues != nil { + in, out := &in.MatchValues, &out.MatchValues + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchVariables != nil { + in, out := &in.MatchVariables, &out.MatchVariables + *out = make([]MatchVariablesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NegationCondition != nil { + in, out := &in.NegationCondition, &out.NegationCondition + *out = new(bool) + **out = **in + } + if in.Operator != nil { + in, out := &in.Operator, &out.Operator + *out = new(string) + **out = **in + } + if in.Transforms != nil { + in, out := &in.Transforms, &out.Transforms + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchConditionsParameters. +func (in *MatchConditionsParameters) DeepCopy() *MatchConditionsParameters { + if in == nil { + return nil + } + out := new(MatchConditionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchInitParameters) DeepCopyInto(out *MatchInitParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchInitParameters. +func (in *MatchInitParameters) DeepCopy() *MatchInitParameters { + if in == nil { + return nil + } + out := new(MatchInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchObservation) DeepCopyInto(out *MatchObservation) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchObservation. +func (in *MatchObservation) DeepCopy() *MatchObservation { + if in == nil { + return nil + } + out := new(MatchObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchParameters) DeepCopyInto(out *MatchParameters) { + *out = *in + if in.Body != nil { + in, out := &in.Body, &out.Body + *out = new(string) + **out = **in + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchParameters. +func (in *MatchParameters) DeepCopy() *MatchParameters { + if in == nil { + return nil + } + out := new(MatchParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchVariablesInitParameters) DeepCopyInto(out *MatchVariablesInitParameters) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.VariableName != nil { + in, out := &in.VariableName, &out.VariableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchVariablesInitParameters. +func (in *MatchVariablesInitParameters) DeepCopy() *MatchVariablesInitParameters { + if in == nil { + return nil + } + out := new(MatchVariablesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchVariablesObservation) DeepCopyInto(out *MatchVariablesObservation) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.VariableName != nil { + in, out := &in.VariableName, &out.VariableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchVariablesObservation. +func (in *MatchVariablesObservation) DeepCopy() *MatchVariablesObservation { + if in == nil { + return nil + } + out := new(MatchVariablesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchVariablesParameters) DeepCopyInto(out *MatchVariablesParameters) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(string) + **out = **in + } + if in.VariableName != nil { + in, out := &in.VariableName, &out.VariableName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchVariablesParameters. +func (in *MatchVariablesParameters) DeepCopy() *MatchVariablesParameters { + if in == nil { + return nil + } + out := new(MatchVariablesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftPeeringConfigInitParameters) DeepCopyInto(out *MicrosoftPeeringConfigInitParameters) { + *out = *in + if in.AdvertisedCommunities != nil { + in, out := &in.AdvertisedCommunities, &out.AdvertisedCommunities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdvertisedPublicPrefixes != nil { + in, out := &in.AdvertisedPublicPrefixes, &out.AdvertisedPublicPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerAsn != nil { + in, out := &in.CustomerAsn, &out.CustomerAsn + *out = new(float64) + **out = **in + } + if in.RoutingRegistryName != nil { + in, out := &in.RoutingRegistryName, &out.RoutingRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftPeeringConfigInitParameters. +func (in *MicrosoftPeeringConfigInitParameters) DeepCopy() *MicrosoftPeeringConfigInitParameters { + if in == nil { + return nil + } + out := new(MicrosoftPeeringConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftPeeringConfigObservation) DeepCopyInto(out *MicrosoftPeeringConfigObservation) { + *out = *in + if in.AdvertisedCommunities != nil { + in, out := &in.AdvertisedCommunities, &out.AdvertisedCommunities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdvertisedPublicPrefixes != nil { + in, out := &in.AdvertisedPublicPrefixes, &out.AdvertisedPublicPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerAsn != nil { + in, out := &in.CustomerAsn, &out.CustomerAsn + *out = new(float64) + **out = **in + } + if in.RoutingRegistryName != nil { + in, out := &in.RoutingRegistryName, &out.RoutingRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftPeeringConfigObservation. +func (in *MicrosoftPeeringConfigObservation) DeepCopy() *MicrosoftPeeringConfigObservation { + if in == nil { + return nil + } + out := new(MicrosoftPeeringConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftPeeringConfigParameters) DeepCopyInto(out *MicrosoftPeeringConfigParameters) { + *out = *in + if in.AdvertisedCommunities != nil { + in, out := &in.AdvertisedCommunities, &out.AdvertisedCommunities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdvertisedPublicPrefixes != nil { + in, out := &in.AdvertisedPublicPrefixes, &out.AdvertisedPublicPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerAsn != nil { + in, out := &in.CustomerAsn, &out.CustomerAsn + *out = new(float64) + **out = **in + } + if in.RoutingRegistryName != nil { + in, out := &in.RoutingRegistryName, &out.RoutingRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftPeeringConfigParameters. +func (in *MicrosoftPeeringConfigParameters) DeepCopy() *MicrosoftPeeringConfigParameters { + if in == nil { + return nil + } + out := new(MicrosoftPeeringConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftPeeringInitParameters) DeepCopyInto(out *MicrosoftPeeringInitParameters) { + *out = *in + if in.AdvertisedCommunities != nil { + in, out := &in.AdvertisedCommunities, &out.AdvertisedCommunities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdvertisedPublicPrefixes != nil { + in, out := &in.AdvertisedPublicPrefixes, &out.AdvertisedPublicPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerAsn != nil { + in, out := &in.CustomerAsn, &out.CustomerAsn + *out = new(float64) + **out = **in + } + if in.RoutingRegistryName != nil { + in, out := &in.RoutingRegistryName, &out.RoutingRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftPeeringInitParameters. +func (in *MicrosoftPeeringInitParameters) DeepCopy() *MicrosoftPeeringInitParameters { + if in == nil { + return nil + } + out := new(MicrosoftPeeringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftPeeringObservation) DeepCopyInto(out *MicrosoftPeeringObservation) { + *out = *in + if in.AdvertisedCommunities != nil { + in, out := &in.AdvertisedCommunities, &out.AdvertisedCommunities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdvertisedPublicPrefixes != nil { + in, out := &in.AdvertisedPublicPrefixes, &out.AdvertisedPublicPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerAsn != nil { + in, out := &in.CustomerAsn, &out.CustomerAsn + *out = new(float64) + **out = **in + } + if in.RoutingRegistryName != nil { + in, out := &in.RoutingRegistryName, &out.RoutingRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftPeeringObservation. +func (in *MicrosoftPeeringObservation) DeepCopy() *MicrosoftPeeringObservation { + if in == nil { + return nil + } + out := new(MicrosoftPeeringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftPeeringParameters) DeepCopyInto(out *MicrosoftPeeringParameters) { + *out = *in + if in.AdvertisedCommunities != nil { + in, out := &in.AdvertisedCommunities, &out.AdvertisedCommunities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AdvertisedPublicPrefixes != nil { + in, out := &in.AdvertisedPublicPrefixes, &out.AdvertisedPublicPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CustomerAsn != nil { + in, out := &in.CustomerAsn, &out.CustomerAsn + *out = new(float64) + **out = **in + } + if in.RoutingRegistryName != nil { + in, out := &in.RoutingRegistryName, &out.RoutingRegistryName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftPeeringParameters. +func (in *MicrosoftPeeringParameters) DeepCopy() *MicrosoftPeeringParameters { + if in == nil { + return nil + } + out := new(MicrosoftPeeringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorConfigInitParameters) DeepCopyInto(out *MonitorConfigInitParameters) { + *out = *in + if in.CustomHeader != nil { + in, out := &in.CustomHeader, &out.CustomHeader + *out = make([]CustomHeaderInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExpectedStatusCodeRanges != nil { + in, out := &in.ExpectedStatusCodeRanges, &out.ExpectedStatusCodeRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutInSeconds != nil { + in, out := &in.TimeoutInSeconds, &out.TimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ToleratedNumberOfFailures != nil { + in, out := &in.ToleratedNumberOfFailures, &out.ToleratedNumberOfFailures + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorConfigInitParameters. +func (in *MonitorConfigInitParameters) DeepCopy() *MonitorConfigInitParameters { + if in == nil { + return nil + } + out := new(MonitorConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorConfigObservation) DeepCopyInto(out *MonitorConfigObservation) { + *out = *in + if in.CustomHeader != nil { + in, out := &in.CustomHeader, &out.CustomHeader + *out = make([]CustomHeaderObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExpectedStatusCodeRanges != nil { + in, out := &in.ExpectedStatusCodeRanges, &out.ExpectedStatusCodeRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutInSeconds != nil { + in, out := &in.TimeoutInSeconds, &out.TimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ToleratedNumberOfFailures != nil { + in, out := &in.ToleratedNumberOfFailures, &out.ToleratedNumberOfFailures + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorConfigObservation. +func (in *MonitorConfigObservation) DeepCopy() *MonitorConfigObservation { + if in == nil { + return nil + } + out := new(MonitorConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitorConfigParameters) DeepCopyInto(out *MonitorConfigParameters) { + *out = *in + if in.CustomHeader != nil { + in, out := &in.CustomHeader, &out.CustomHeader + *out = make([]CustomHeaderParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExpectedStatusCodeRanges != nil { + in, out := &in.ExpectedStatusCodeRanges, &out.ExpectedStatusCodeRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IntervalInSeconds != nil { + in, out := &in.IntervalInSeconds, &out.IntervalInSeconds + *out = new(float64) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.TimeoutInSeconds != nil { + in, out := &in.TimeoutInSeconds, &out.TimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ToleratedNumberOfFailures != nil { + in, out := &in.ToleratedNumberOfFailures, &out.ToleratedNumberOfFailures + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitorConfigParameters. +func (in *MonitorConfigParameters) DeepCopy() *MonitorConfigParameters { + if in == nil { + return nil + } + out := new(MonitorConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceInitParameters) DeepCopyInto(out *NetworkInterfaceInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceInitParameters. +func (in *NetworkInterfaceInitParameters) DeepCopy() *NetworkInterfaceInitParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceObservation) DeepCopyInto(out *NetworkInterfaceObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceObservation. +func (in *NetworkInterfaceObservation) DeepCopy() *NetworkInterfaceObservation { + if in == nil { + return nil + } + out := new(NetworkInterfaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkInterfaceParameters) DeepCopyInto(out *NetworkInterfaceParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterfaceParameters. +func (in *NetworkInterfaceParameters) DeepCopy() *NetworkInterfaceParameters { + if in == nil { + return nil + } + out := new(NetworkInterfaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *O365PolicyInitParameters) DeepCopyInto(out *O365PolicyInitParameters) { + *out = *in + if in.TrafficCategory != nil { + in, out := &in.TrafficCategory, &out.TrafficCategory + *out = new(TrafficCategoryInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new O365PolicyInitParameters. +func (in *O365PolicyInitParameters) DeepCopy() *O365PolicyInitParameters { + if in == nil { + return nil + } + out := new(O365PolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *O365PolicyObservation) DeepCopyInto(out *O365PolicyObservation) { + *out = *in + if in.TrafficCategory != nil { + in, out := &in.TrafficCategory, &out.TrafficCategory + *out = new(TrafficCategoryObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new O365PolicyObservation. +func (in *O365PolicyObservation) DeepCopy() *O365PolicyObservation { + if in == nil { + return nil + } + out := new(O365PolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *O365PolicyParameters) DeepCopyInto(out *O365PolicyParameters) { + *out = *in + if in.TrafficCategory != nil { + in, out := &in.TrafficCategory, &out.TrafficCategory + *out = new(TrafficCategoryParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new O365PolicyParameters. +func (in *O365PolicyParameters) DeepCopy() *O365PolicyParameters { + if in == nil { + return nil + } + out := new(O365PolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCapture) DeepCopyInto(out *PacketCapture) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCapture. +func (in *PacketCapture) DeepCopy() *PacketCapture { + if in == nil { + return nil + } + out := new(PacketCapture) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketCapture) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureFilterInitParameters) DeepCopyInto(out *PacketCaptureFilterInitParameters) { + *out = *in + if in.LocalIPAddress != nil { + in, out := &in.LocalIPAddress, &out.LocalIPAddress + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RemoteIPAddress != nil { + in, out := &in.RemoteIPAddress, &out.RemoteIPAddress + *out = new(string) + **out = **in + } + if in.RemotePort != nil { + in, out := &in.RemotePort, &out.RemotePort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureFilterInitParameters. +func (in *PacketCaptureFilterInitParameters) DeepCopy() *PacketCaptureFilterInitParameters { + if in == nil { + return nil + } + out := new(PacketCaptureFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureFilterObservation) DeepCopyInto(out *PacketCaptureFilterObservation) { + *out = *in + if in.LocalIPAddress != nil { + in, out := &in.LocalIPAddress, &out.LocalIPAddress + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RemoteIPAddress != nil { + in, out := &in.RemoteIPAddress, &out.RemoteIPAddress + *out = new(string) + **out = **in + } + if in.RemotePort != nil { + in, out := &in.RemotePort, &out.RemotePort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureFilterObservation. +func (in *PacketCaptureFilterObservation) DeepCopy() *PacketCaptureFilterObservation { + if in == nil { + return nil + } + out := new(PacketCaptureFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureFilterParameters) DeepCopyInto(out *PacketCaptureFilterParameters) { + *out = *in + if in.LocalIPAddress != nil { + in, out := &in.LocalIPAddress, &out.LocalIPAddress + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RemoteIPAddress != nil { + in, out := &in.RemoteIPAddress, &out.RemoteIPAddress + *out = new(string) + **out = **in + } + if in.RemotePort != nil { + in, out := &in.RemotePort, &out.RemotePort + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureFilterParameters. +func (in *PacketCaptureFilterParameters) DeepCopy() *PacketCaptureFilterParameters { + if in == nil { + return nil + } + out := new(PacketCaptureFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureInitParameters) DeepCopyInto(out *PacketCaptureInitParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]PacketCaptureFilterInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaximumBytesPerPacket != nil { + in, out := &in.MaximumBytesPerPacket, &out.MaximumBytesPerPacket + *out = new(float64) + **out = **in + } + if in.MaximumBytesPerSession != nil { + in, out := &in.MaximumBytesPerSession, &out.MaximumBytesPerSession + *out = new(float64) + **out = **in + } + if in.MaximumCaptureDuration != nil { + in, out := &in.MaximumCaptureDuration, &out.MaximumCaptureDuration + *out = new(float64) + **out = **in + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(StorageLocationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureInitParameters. +func (in *PacketCaptureInitParameters) DeepCopy() *PacketCaptureInitParameters { + if in == nil { + return nil + } + out := new(PacketCaptureInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureList) DeepCopyInto(out *PacketCaptureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PacketCapture, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureList. +func (in *PacketCaptureList) DeepCopy() *PacketCaptureList { + if in == nil { + return nil + } + out := new(PacketCaptureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacketCaptureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureObservation) DeepCopyInto(out *PacketCaptureObservation) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]PacketCaptureFilterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaximumBytesPerPacket != nil { + in, out := &in.MaximumBytesPerPacket, &out.MaximumBytesPerPacket + *out = new(float64) + **out = **in + } + if in.MaximumBytesPerSession != nil { + in, out := &in.MaximumBytesPerSession, &out.MaximumBytesPerSession + *out = new(float64) + **out = **in + } + if in.MaximumCaptureDuration != nil { + in, out := &in.MaximumCaptureDuration, &out.MaximumCaptureDuration + *out = new(float64) + **out = **in + } + if in.NetworkWatcherName != nil { + in, out := &in.NetworkWatcherName, &out.NetworkWatcherName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(StorageLocationObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureObservation. +func (in *PacketCaptureObservation) DeepCopy() *PacketCaptureObservation { + if in == nil { + return nil + } + out := new(PacketCaptureObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureParameters) DeepCopyInto(out *PacketCaptureParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = make([]PacketCaptureFilterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MaximumBytesPerPacket != nil { + in, out := &in.MaximumBytesPerPacket, &out.MaximumBytesPerPacket + *out = new(float64) + **out = **in + } + if in.MaximumBytesPerSession != nil { + in, out := &in.MaximumBytesPerSession, &out.MaximumBytesPerSession + *out = new(float64) + **out = **in + } + if in.MaximumCaptureDuration != nil { + in, out := &in.MaximumCaptureDuration, &out.MaximumCaptureDuration + *out = new(float64) + **out = **in + } + if in.NetworkWatcherName != nil { + in, out := &in.NetworkWatcherName, &out.NetworkWatcherName + *out = new(string) + **out = **in + } + if in.NetworkWatcherNameRef != nil { + in, out := &in.NetworkWatcherNameRef, &out.NetworkWatcherNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkWatcherNameSelector != nil { + in, out := &in.NetworkWatcherNameSelector, &out.NetworkWatcherNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageLocation != nil { + in, out := &in.StorageLocation, &out.StorageLocation + *out = new(StorageLocationParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureParameters. +func (in *PacketCaptureParameters) DeepCopy() *PacketCaptureParameters { + if in == nil { + return nil + } + out := new(PacketCaptureParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureSpec) DeepCopyInto(out *PacketCaptureSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureSpec. +func (in *PacketCaptureSpec) DeepCopy() *PacketCaptureSpec { + if in == nil { + return nil + } + out := new(PacketCaptureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacketCaptureStatus) DeepCopyInto(out *PacketCaptureStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketCaptureStatus. +func (in *PacketCaptureStatus) DeepCopy() *PacketCaptureStatus { + if in == nil { + return nil + } + out := new(PacketCaptureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathRuleInitParameters) DeepCopyInto(out *PathRuleInitParameters) { + *out = *in + if in.BackendAddressPoolName != nil { + in, out := &in.BackendAddressPoolName, &out.BackendAddressPoolName + *out = new(string) + **out = **in + } + if in.BackendHTTPSettingsName != nil { + in, out := &in.BackendHTTPSettingsName, &out.BackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectConfigurationName != nil { + in, out := &in.RedirectConfigurationName, &out.RedirectConfigurationName + *out = new(string) + **out = **in + } + if in.RewriteRuleSetName != nil { + in, out := &in.RewriteRuleSetName, &out.RewriteRuleSetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathRuleInitParameters. +func (in *PathRuleInitParameters) DeepCopy() *PathRuleInitParameters { + if in == nil { + return nil + } + out := new(PathRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathRuleObservation) DeepCopyInto(out *PathRuleObservation) { + *out = *in + if in.BackendAddressPoolID != nil { + in, out := &in.BackendAddressPoolID, &out.BackendAddressPoolID + *out = new(string) + **out = **in + } + if in.BackendAddressPoolName != nil { + in, out := &in.BackendAddressPoolName, &out.BackendAddressPoolName + *out = new(string) + **out = **in + } + if in.BackendHTTPSettingsID != nil { + in, out := &in.BackendHTTPSettingsID, &out.BackendHTTPSettingsID + *out = new(string) + **out = **in + } + if in.BackendHTTPSettingsName != nil { + in, out := &in.BackendHTTPSettingsName, &out.BackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectConfigurationID != nil { + in, out := &in.RedirectConfigurationID, &out.RedirectConfigurationID + *out = new(string) + **out = **in + } + if in.RedirectConfigurationName != nil { + in, out := &in.RedirectConfigurationName, &out.RedirectConfigurationName + *out = new(string) + **out = **in + } + if in.RewriteRuleSetID != nil { + in, out := &in.RewriteRuleSetID, &out.RewriteRuleSetID + *out = new(string) + **out = **in + } + if in.RewriteRuleSetName != nil { + in, out := &in.RewriteRuleSetName, &out.RewriteRuleSetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathRuleObservation. +func (in *PathRuleObservation) DeepCopy() *PathRuleObservation { + if in == nil { + return nil + } + out := new(PathRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PathRuleParameters) DeepCopyInto(out *PathRuleParameters) { + *out = *in + if in.BackendAddressPoolName != nil { + in, out := &in.BackendAddressPoolName, &out.BackendAddressPoolName + *out = new(string) + **out = **in + } + if in.BackendHTTPSettingsName != nil { + in, out := &in.BackendHTTPSettingsName, &out.BackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.FirewallPolicyID != nil { + in, out := &in.FirewallPolicyID, &out.FirewallPolicyID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectConfigurationName != nil { + in, out := &in.RedirectConfigurationName, &out.RedirectConfigurationName + *out = new(string) + **out = **in + } + if in.RewriteRuleSetName != nil { + in, out := &in.RewriteRuleSetName, &out.RewriteRuleSetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathRuleParameters. +func (in *PathRuleParameters) DeepCopy() *PathRuleParameters { + if in == nil { + return nil + } + out := new(PathRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeeringAddressesInitParameters) DeepCopyInto(out *PeeringAddressesInitParameters) { + *out = *in + if in.ApipaAddresses != nil { + in, out := &in.ApipaAddresses, &out.ApipaAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPConfigurationName != nil { + in, out := &in.IPConfigurationName, &out.IPConfigurationName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeeringAddressesInitParameters. +func (in *PeeringAddressesInitParameters) DeepCopy() *PeeringAddressesInitParameters { + if in == nil { + return nil + } + out := new(PeeringAddressesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeeringAddressesObservation) DeepCopyInto(out *PeeringAddressesObservation) { + *out = *in + if in.ApipaAddresses != nil { + in, out := &in.ApipaAddresses, &out.ApipaAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultAddresses != nil { + in, out := &in.DefaultAddresses, &out.DefaultAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPConfigurationName != nil { + in, out := &in.IPConfigurationName, &out.IPConfigurationName + *out = new(string) + **out = **in + } + if in.TunnelIPAddresses != nil { + in, out := &in.TunnelIPAddresses, &out.TunnelIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeeringAddressesObservation. +func (in *PeeringAddressesObservation) DeepCopy() *PeeringAddressesObservation { + if in == nil { + return nil + } + out := new(PeeringAddressesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeeringAddressesParameters) DeepCopyInto(out *PeeringAddressesParameters) { + *out = *in + if in.ApipaAddresses != nil { + in, out := &in.ApipaAddresses, &out.ApipaAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPConfigurationName != nil { + in, out := &in.IPConfigurationName, &out.IPConfigurationName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeeringAddressesParameters. +func (in *PeeringAddressesParameters) DeepCopy() *PeeringAddressesParameters { + if in == nil { + return nil + } + out := new(PeeringAddressesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointToSiteVPNGateway) DeepCopyInto(out *PointToSiteVPNGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointToSiteVPNGateway. +func (in *PointToSiteVPNGateway) DeepCopy() *PointToSiteVPNGateway { + if in == nil { + return nil + } + out := new(PointToSiteVPNGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PointToSiteVPNGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointToSiteVPNGatewayInitParameters) DeepCopyInto(out *PointToSiteVPNGatewayInitParameters) { + *out = *in + if in.ConnectionConfiguration != nil { + in, out := &in.ConnectionConfiguration, &out.ConnectionConfiguration + *out = make([]ConnectionConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.RoutingPreferenceInternetEnabled != nil { + in, out := &in.RoutingPreferenceInternetEnabled, &out.RoutingPreferenceInternetEnabled + *out = new(bool) + **out = **in + } + if in.ScaleUnit != nil { + in, out := &in.ScaleUnit, &out.ScaleUnit + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPNServerConfigurationID != nil { + in, out := &in.VPNServerConfigurationID, &out.VPNServerConfigurationID + *out = new(string) + **out = **in + } + if in.VPNServerConfigurationIDRef != nil { + in, out := &in.VPNServerConfigurationIDRef, &out.VPNServerConfigurationIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPNServerConfigurationIDSelector != nil { + in, out := &in.VPNServerConfigurationIDSelector, &out.VPNServerConfigurationIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } + if in.VirtualHubIDRef != nil { + in, out := &in.VirtualHubIDRef, &out.VirtualHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubIDSelector != nil { + in, out := &in.VirtualHubIDSelector, &out.VirtualHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointToSiteVPNGatewayInitParameters. +func (in *PointToSiteVPNGatewayInitParameters) DeepCopy() *PointToSiteVPNGatewayInitParameters { + if in == nil { + return nil + } + out := new(PointToSiteVPNGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointToSiteVPNGatewayList) DeepCopyInto(out *PointToSiteVPNGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PointToSiteVPNGateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointToSiteVPNGatewayList. +func (in *PointToSiteVPNGatewayList) DeepCopy() *PointToSiteVPNGatewayList { + if in == nil { + return nil + } + out := new(PointToSiteVPNGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PointToSiteVPNGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointToSiteVPNGatewayObservation) DeepCopyInto(out *PointToSiteVPNGatewayObservation) { + *out = *in + if in.ConnectionConfiguration != nil { + in, out := &in.ConnectionConfiguration, &out.ConnectionConfiguration + *out = make([]ConnectionConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RoutingPreferenceInternetEnabled != nil { + in, out := &in.RoutingPreferenceInternetEnabled, &out.RoutingPreferenceInternetEnabled + *out = new(bool) + **out = **in + } + if in.ScaleUnit != nil { + in, out := &in.ScaleUnit, &out.ScaleUnit + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPNServerConfigurationID != nil { + in, out := &in.VPNServerConfigurationID, &out.VPNServerConfigurationID + *out = new(string) + **out = **in + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointToSiteVPNGatewayObservation. +func (in *PointToSiteVPNGatewayObservation) DeepCopy() *PointToSiteVPNGatewayObservation { + if in == nil { + return nil + } + out := new(PointToSiteVPNGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointToSiteVPNGatewayParameters) DeepCopyInto(out *PointToSiteVPNGatewayParameters) { + *out = *in + if in.ConnectionConfiguration != nil { + in, out := &in.ConnectionConfiguration, &out.ConnectionConfiguration + *out = make([]ConnectionConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingPreferenceInternetEnabled != nil { + in, out := &in.RoutingPreferenceInternetEnabled, &out.RoutingPreferenceInternetEnabled + *out = new(bool) + **out = **in + } + if in.ScaleUnit != nil { + in, out := &in.ScaleUnit, &out.ScaleUnit + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPNServerConfigurationID != nil { + in, out := &in.VPNServerConfigurationID, &out.VPNServerConfigurationID + *out = new(string) + **out = **in + } + if in.VPNServerConfigurationIDRef != nil { + in, out := &in.VPNServerConfigurationIDRef, &out.VPNServerConfigurationIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPNServerConfigurationIDSelector != nil { + in, out := &in.VPNServerConfigurationIDSelector, &out.VPNServerConfigurationIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } + if in.VirtualHubIDRef != nil { + in, out := &in.VirtualHubIDRef, &out.VirtualHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubIDSelector != nil { + in, out := &in.VirtualHubIDSelector, &out.VirtualHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointToSiteVPNGatewayParameters. +func (in *PointToSiteVPNGatewayParameters) DeepCopy() *PointToSiteVPNGatewayParameters { + if in == nil { + return nil + } + out := new(PointToSiteVPNGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointToSiteVPNGatewaySpec) DeepCopyInto(out *PointToSiteVPNGatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointToSiteVPNGatewaySpec. +func (in *PointToSiteVPNGatewaySpec) DeepCopy() *PointToSiteVPNGatewaySpec { + if in == nil { + return nil + } + out := new(PointToSiteVPNGatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointToSiteVPNGatewayStatus) DeepCopyInto(out *PointToSiteVPNGatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointToSiteVPNGatewayStatus. +func (in *PointToSiteVPNGatewayStatus) DeepCopy() *PointToSiteVPNGatewayStatus { + if in == nil { + return nil + } + out := new(PointToSiteVPNGatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyGroupInitParameters) DeepCopyInto(out *PolicyGroupInitParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyMember != nil { + in, out := &in.PolicyMember, &out.PolicyMember + *out = make([]PolicyMemberInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyGroupInitParameters. +func (in *PolicyGroupInitParameters) DeepCopy() *PolicyGroupInitParameters { + if in == nil { + return nil + } + out := new(PolicyGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyGroupObservation) DeepCopyInto(out *PolicyGroupObservation) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyMember != nil { + in, out := &in.PolicyMember, &out.PolicyMember + *out = make([]PolicyMemberObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyGroupObservation. +func (in *PolicyGroupObservation) DeepCopy() *PolicyGroupObservation { + if in == nil { + return nil + } + out := new(PolicyGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyGroupParameters) DeepCopyInto(out *PolicyGroupParameters) { + *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyMember != nil { + in, out := &in.PolicyMember, &out.PolicyMember + *out = make([]PolicyMemberParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyGroupParameters. +func (in *PolicyGroupParameters) DeepCopy() *PolicyGroupParameters { + if in == nil { + return nil + } + out := new(PolicyGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMemberInitParameters) DeepCopyInto(out *PolicyMemberInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMemberInitParameters. +func (in *PolicyMemberInitParameters) DeepCopy() *PolicyMemberInitParameters { + if in == nil { + return nil + } + out := new(PolicyMemberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMemberObservation) DeepCopyInto(out *PolicyMemberObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMemberObservation. +func (in *PolicyMemberObservation) DeepCopy() *PolicyMemberObservation { + if in == nil { + return nil + } + out := new(PolicyMemberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMemberParameters) DeepCopyInto(out *PolicyMemberParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMemberParameters. +func (in *PolicyMemberParameters) DeepCopy() *PolicyMemberParameters { + if in == nil { + return nil + } + out := new(PolicyMemberParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySettingsInitParameters) DeepCopyInto(out *PolicySettingsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FileUploadLimitInMb != nil { + in, out := &in.FileUploadLimitInMb, &out.FileUploadLimitInMb + *out = new(float64) + **out = **in + } + if in.LogScrubbing != nil { + in, out := &in.LogScrubbing, &out.LogScrubbing + *out = new(LogScrubbingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxRequestBodySizeInKb != nil { + in, out := &in.MaxRequestBodySizeInKb, &out.MaxRequestBodySizeInKb + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.RequestBodyCheck != nil { + in, out := &in.RequestBodyCheck, &out.RequestBodyCheck + *out = new(bool) + **out = **in + } + if in.RequestBodyInspectLimitInKb != nil { + in, out := &in.RequestBodyInspectLimitInKb, &out.RequestBodyInspectLimitInKb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySettingsInitParameters. +func (in *PolicySettingsInitParameters) DeepCopy() *PolicySettingsInitParameters { + if in == nil { + return nil + } + out := new(PolicySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySettingsObservation) DeepCopyInto(out *PolicySettingsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FileUploadLimitInMb != nil { + in, out := &in.FileUploadLimitInMb, &out.FileUploadLimitInMb + *out = new(float64) + **out = **in + } + if in.LogScrubbing != nil { + in, out := &in.LogScrubbing, &out.LogScrubbing + *out = new(LogScrubbingObservation) + (*in).DeepCopyInto(*out) + } + if in.MaxRequestBodySizeInKb != nil { + in, out := &in.MaxRequestBodySizeInKb, &out.MaxRequestBodySizeInKb + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.RequestBodyCheck != nil { + in, out := &in.RequestBodyCheck, &out.RequestBodyCheck + *out = new(bool) + **out = **in + } + if in.RequestBodyInspectLimitInKb != nil { + in, out := &in.RequestBodyInspectLimitInKb, &out.RequestBodyInspectLimitInKb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySettingsObservation. +func (in *PolicySettingsObservation) DeepCopy() *PolicySettingsObservation { + if in == nil { + return nil + } + out := new(PolicySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySettingsParameters) DeepCopyInto(out *PolicySettingsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FileUploadLimitInMb != nil { + in, out := &in.FileUploadLimitInMb, &out.FileUploadLimitInMb + *out = new(float64) + **out = **in + } + if in.LogScrubbing != nil { + in, out := &in.LogScrubbing, &out.LogScrubbing + *out = new(LogScrubbingParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxRequestBodySizeInKb != nil { + in, out := &in.MaxRequestBodySizeInKb, &out.MaxRequestBodySizeInKb + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.RequestBodyCheck != nil { + in, out := &in.RequestBodyCheck, &out.RequestBodyCheck + *out = new(bool) + **out = **in + } + if in.RequestBodyInspectLimitInKb != nil { + in, out := &in.RequestBodyInspectLimitInKb, &out.RequestBodyInspectLimitInKb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySettingsParameters. +func (in *PolicySettingsParameters) DeepCopy() *PolicySettingsParameters { + if in == nil { + return nil + } + out := new(PolicySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZone) DeepCopyInto(out *PrivateDNSZone) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZone. +func (in *PrivateDNSZone) DeepCopy() *PrivateDNSZone { + if in == nil { + return nil + } + out := new(PrivateDNSZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PrivateDNSZone) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneConfigsInitParameters) DeepCopyInto(out *PrivateDNSZoneConfigsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneConfigsInitParameters. +func (in *PrivateDNSZoneConfigsInitParameters) DeepCopy() *PrivateDNSZoneConfigsInitParameters { + if in == nil { + return nil + } + out := new(PrivateDNSZoneConfigsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneConfigsObservation) DeepCopyInto(out *PrivateDNSZoneConfigsObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneID != nil { + in, out := &in.PrivateDNSZoneID, &out.PrivateDNSZoneID + *out = new(string) + **out = **in + } + if in.RecordSets != nil { + in, out := &in.RecordSets, &out.RecordSets + *out = make([]RecordSetsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneConfigsObservation. +func (in *PrivateDNSZoneConfigsObservation) DeepCopy() *PrivateDNSZoneConfigsObservation { + if in == nil { + return nil + } + out := new(PrivateDNSZoneConfigsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneConfigsParameters) DeepCopyInto(out *PrivateDNSZoneConfigsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneConfigsParameters. +func (in *PrivateDNSZoneConfigsParameters) DeepCopy() *PrivateDNSZoneConfigsParameters { + if in == nil { + return nil + } + out := new(PrivateDNSZoneConfigsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneGroupInitParameters) DeepCopyInto(out *PrivateDNSZoneGroupInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIds != nil { + in, out := &in.PrivateDNSZoneIds, &out.PrivateDNSZoneIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrivateDNSZoneIdsRefs != nil { + in, out := &in.PrivateDNSZoneIdsRefs, &out.PrivateDNSZoneIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateDNSZoneIdsSelector != nil { + in, out := &in.PrivateDNSZoneIdsSelector, &out.PrivateDNSZoneIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneGroupInitParameters. +func (in *PrivateDNSZoneGroupInitParameters) DeepCopy() *PrivateDNSZoneGroupInitParameters { + if in == nil { + return nil + } + out := new(PrivateDNSZoneGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneGroupObservation) DeepCopyInto(out *PrivateDNSZoneGroupObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIds != nil { + in, out := &in.PrivateDNSZoneIds, &out.PrivateDNSZoneIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneGroupObservation. +func (in *PrivateDNSZoneGroupObservation) DeepCopy() *PrivateDNSZoneGroupObservation { + if in == nil { + return nil + } + out := new(PrivateDNSZoneGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneGroupParameters) DeepCopyInto(out *PrivateDNSZoneGroupParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneIds != nil { + in, out := &in.PrivateDNSZoneIds, &out.PrivateDNSZoneIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrivateDNSZoneIdsRefs != nil { + in, out := &in.PrivateDNSZoneIdsRefs, &out.PrivateDNSZoneIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateDNSZoneIdsSelector != nil { + in, out := &in.PrivateDNSZoneIdsSelector, &out.PrivateDNSZoneIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneGroupParameters. +func (in *PrivateDNSZoneGroupParameters) DeepCopy() *PrivateDNSZoneGroupParameters { + if in == nil { + return nil + } + out := new(PrivateDNSZoneGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneInitParameters) DeepCopyInto(out *PrivateDNSZoneInitParameters) { + *out = *in + if in.SoaRecord != nil { + in, out := &in.SoaRecord, &out.SoaRecord + *out = new(PrivateDNSZoneSoaRecordInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneInitParameters. +func (in *PrivateDNSZoneInitParameters) DeepCopy() *PrivateDNSZoneInitParameters { + if in == nil { + return nil + } + out := new(PrivateDNSZoneInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneList) DeepCopyInto(out *PrivateDNSZoneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PrivateDNSZone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneList. +func (in *PrivateDNSZoneList) DeepCopy() *PrivateDNSZoneList { + if in == nil { + return nil + } + out := new(PrivateDNSZoneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PrivateDNSZoneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneObservation) DeepCopyInto(out *PrivateDNSZoneObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxNumberOfRecordSets != nil { + in, out := &in.MaxNumberOfRecordSets, &out.MaxNumberOfRecordSets + *out = new(float64) + **out = **in + } + if in.MaxNumberOfVirtualNetworkLinks != nil { + in, out := &in.MaxNumberOfVirtualNetworkLinks, &out.MaxNumberOfVirtualNetworkLinks + *out = new(float64) + **out = **in + } + if in.MaxNumberOfVirtualNetworkLinksWithRegistration != nil { + in, out := &in.MaxNumberOfVirtualNetworkLinksWithRegistration, &out.MaxNumberOfVirtualNetworkLinksWithRegistration + *out = new(float64) + **out = **in + } + if in.NumberOfRecordSets != nil { + in, out := &in.NumberOfRecordSets, &out.NumberOfRecordSets + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SoaRecord != nil { + in, out := &in.SoaRecord, &out.SoaRecord + *out = new(PrivateDNSZoneSoaRecordObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneObservation. +func (in *PrivateDNSZoneObservation) DeepCopy() *PrivateDNSZoneObservation { + if in == nil { + return nil + } + out := new(PrivateDNSZoneObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneParameters) DeepCopyInto(out *PrivateDNSZoneParameters) { + *out = *in + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SoaRecord != nil { + in, out := &in.SoaRecord, &out.SoaRecord + *out = new(PrivateDNSZoneSoaRecordParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneParameters. +func (in *PrivateDNSZoneParameters) DeepCopy() *PrivateDNSZoneParameters { + if in == nil { + return nil + } + out := new(PrivateDNSZoneParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneSoaRecordInitParameters) DeepCopyInto(out *PrivateDNSZoneSoaRecordInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.ExpireTime != nil { + in, out := &in.ExpireTime, &out.ExpireTime + *out = new(float64) + **out = **in + } + if in.MinimumTTL != nil { + in, out := &in.MinimumTTL, &out.MinimumTTL + *out = new(float64) + **out = **in + } + if in.RefreshTime != nil { + in, out := &in.RefreshTime, &out.RefreshTime + *out = new(float64) + **out = **in + } + if in.RetryTime != nil { + in, out := &in.RetryTime, &out.RetryTime + *out = new(float64) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneSoaRecordInitParameters. +func (in *PrivateDNSZoneSoaRecordInitParameters) DeepCopy() *PrivateDNSZoneSoaRecordInitParameters { + if in == nil { + return nil + } + out := new(PrivateDNSZoneSoaRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneSoaRecordObservation) DeepCopyInto(out *PrivateDNSZoneSoaRecordObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.ExpireTime != nil { + in, out := &in.ExpireTime, &out.ExpireTime + *out = new(float64) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.MinimumTTL != nil { + in, out := &in.MinimumTTL, &out.MinimumTTL + *out = new(float64) + **out = **in + } + if in.RefreshTime != nil { + in, out := &in.RefreshTime, &out.RefreshTime + *out = new(float64) + **out = **in + } + if in.RetryTime != nil { + in, out := &in.RetryTime, &out.RetryTime + *out = new(float64) + **out = **in + } + if in.SerialNumber != nil { + in, out := &in.SerialNumber, &out.SerialNumber + *out = new(float64) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneSoaRecordObservation. +func (in *PrivateDNSZoneSoaRecordObservation) DeepCopy() *PrivateDNSZoneSoaRecordObservation { + if in == nil { + return nil + } + out := new(PrivateDNSZoneSoaRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneSoaRecordParameters) DeepCopyInto(out *PrivateDNSZoneSoaRecordParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.ExpireTime != nil { + in, out := &in.ExpireTime, &out.ExpireTime + *out = new(float64) + **out = **in + } + if in.MinimumTTL != nil { + in, out := &in.MinimumTTL, &out.MinimumTTL + *out = new(float64) + **out = **in + } + if in.RefreshTime != nil { + in, out := &in.RefreshTime, &out.RefreshTime + *out = new(float64) + **out = **in + } + if in.RetryTime != nil { + in, out := &in.RetryTime, &out.RetryTime + *out = new(float64) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneSoaRecordParameters. +func (in *PrivateDNSZoneSoaRecordParameters) DeepCopy() *PrivateDNSZoneSoaRecordParameters { + if in == nil { + return nil + } + out := new(PrivateDNSZoneSoaRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneSpec) DeepCopyInto(out *PrivateDNSZoneSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneSpec. +func (in *PrivateDNSZoneSpec) DeepCopy() *PrivateDNSZoneSpec { + if in == nil { + return nil + } + out := new(PrivateDNSZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSZoneStatus) DeepCopyInto(out *PrivateDNSZoneStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSZoneStatus. +func (in *PrivateDNSZoneStatus) DeepCopy() *PrivateDNSZoneStatus { + if in == nil { + return nil + } + out := new(PrivateDNSZoneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpoint) DeepCopyInto(out *PrivateEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpoint. +func (in *PrivateEndpoint) DeepCopy() *PrivateEndpoint { + if in == nil { + return nil + } + out := new(PrivateEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PrivateEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointConnectionInitParameters) DeepCopyInto(out *PrivateEndpointConnectionInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointConnectionInitParameters. +func (in *PrivateEndpointConnectionInitParameters) DeepCopy() *PrivateEndpointConnectionInitParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointConnectionObservation) DeepCopyInto(out *PrivateEndpointConnectionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointConnectionObservation. +func (in *PrivateEndpointConnectionObservation) DeepCopy() *PrivateEndpointConnectionObservation { + if in == nil { + return nil + } + out := new(PrivateEndpointConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointConnectionParameters) DeepCopyInto(out *PrivateEndpointConnectionParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointConnectionParameters. +func (in *PrivateEndpointConnectionParameters) DeepCopy() *PrivateEndpointConnectionParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointIPConfigurationInitParameters) DeepCopyInto(out *PrivateEndpointIPConfigurationInitParameters) { + *out = *in + if in.MemberName != nil { + in, out := &in.MemberName, &out.MemberName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SubresourceName != nil { + in, out := &in.SubresourceName, &out.SubresourceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointIPConfigurationInitParameters. +func (in *PrivateEndpointIPConfigurationInitParameters) DeepCopy() *PrivateEndpointIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointIPConfigurationObservation) DeepCopyInto(out *PrivateEndpointIPConfigurationObservation) { + *out = *in + if in.MemberName != nil { + in, out := &in.MemberName, &out.MemberName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SubresourceName != nil { + in, out := &in.SubresourceName, &out.SubresourceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointIPConfigurationObservation. +func (in *PrivateEndpointIPConfigurationObservation) DeepCopy() *PrivateEndpointIPConfigurationObservation { + if in == nil { + return nil + } + out := new(PrivateEndpointIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointIPConfigurationParameters) DeepCopyInto(out *PrivateEndpointIPConfigurationParameters) { + *out = *in + if in.MemberName != nil { + in, out := &in.MemberName, &out.MemberName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.SubresourceName != nil { + in, out := &in.SubresourceName, &out.SubresourceName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointIPConfigurationParameters. +func (in *PrivateEndpointIPConfigurationParameters) DeepCopy() *PrivateEndpointIPConfigurationParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointInitParameters) DeepCopyInto(out *PrivateEndpointInitParameters) { + *out = *in + if in.CustomNetworkInterfaceName != nil { + in, out := &in.CustomNetworkInterfaceName, &out.CustomNetworkInterfaceName + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]PrivateEndpointIPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneGroup != nil { + in, out := &in.PrivateDNSZoneGroup, &out.PrivateDNSZoneGroup + *out = new(PrivateDNSZoneGroupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateServiceConnection != nil { + in, out := &in.PrivateServiceConnection, &out.PrivateServiceConnection + *out = new(PrivateServiceConnectionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointInitParameters. +func (in *PrivateEndpointInitParameters) DeepCopy() *PrivateEndpointInitParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointList) DeepCopyInto(out *PrivateEndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PrivateEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointList. +func (in *PrivateEndpointList) DeepCopy() *PrivateEndpointList { + if in == nil { + return nil + } + out := new(PrivateEndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PrivateEndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointObservation) DeepCopyInto(out *PrivateEndpointObservation) { + *out = *in + if in.CustomDNSConfigs != nil { + in, out := &in.CustomDNSConfigs, &out.CustomDNSConfigs + *out = make([]CustomDNSConfigsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomNetworkInterfaceName != nil { + in, out := &in.CustomNetworkInterfaceName, &out.CustomNetworkInterfaceName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]PrivateEndpointIPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkInterface != nil { + in, out := &in.NetworkInterface, &out.NetworkInterface + *out = make([]NetworkInterfaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateDNSZoneConfigs != nil { + in, out := &in.PrivateDNSZoneConfigs, &out.PrivateDNSZoneConfigs + *out = make([]PrivateDNSZoneConfigsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateDNSZoneGroup != nil { + in, out := &in.PrivateDNSZoneGroup, &out.PrivateDNSZoneGroup + *out = new(PrivateDNSZoneGroupObservation) + (*in).DeepCopyInto(*out) + } + if in.PrivateServiceConnection != nil { + in, out := &in.PrivateServiceConnection, &out.PrivateServiceConnection + *out = new(PrivateServiceConnectionObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointObservation. +func (in *PrivateEndpointObservation) DeepCopy() *PrivateEndpointObservation { + if in == nil { + return nil + } + out := new(PrivateEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointParameters) DeepCopyInto(out *PrivateEndpointParameters) { + *out = *in + if in.CustomNetworkInterfaceName != nil { + in, out := &in.CustomNetworkInterfaceName, &out.CustomNetworkInterfaceName + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]PrivateEndpointIPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PrivateDNSZoneGroup != nil { + in, out := &in.PrivateDNSZoneGroup, &out.PrivateDNSZoneGroup + *out = new(PrivateDNSZoneGroupParameters) + (*in).DeepCopyInto(*out) + } + if in.PrivateServiceConnection != nil { + in, out := &in.PrivateServiceConnection, &out.PrivateServiceConnection + *out = new(PrivateServiceConnectionParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointParameters. +func (in *PrivateEndpointParameters) DeepCopy() *PrivateEndpointParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointSpec) DeepCopyInto(out *PrivateEndpointSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointSpec. +func (in *PrivateEndpointSpec) DeepCopy() *PrivateEndpointSpec { + if in == nil { + return nil + } + out := new(PrivateEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointStatus) DeepCopyInto(out *PrivateEndpointStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointStatus. +func (in *PrivateEndpointStatus) DeepCopy() *PrivateEndpointStatus { + if in == nil { + return nil + } + out := new(PrivateEndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkConfigurationInitParameters) DeepCopyInto(out *PrivateLinkConfigurationInitParameters) { + *out = *in + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]IPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkConfigurationInitParameters. +func (in *PrivateLinkConfigurationInitParameters) DeepCopy() *PrivateLinkConfigurationInitParameters { + if in == nil { + return nil + } + out := new(PrivateLinkConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkConfigurationObservation) DeepCopyInto(out *PrivateLinkConfigurationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]IPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkConfigurationObservation. +func (in *PrivateLinkConfigurationObservation) DeepCopy() *PrivateLinkConfigurationObservation { + if in == nil { + return nil + } + out := new(PrivateLinkConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkConfigurationParameters) DeepCopyInto(out *PrivateLinkConfigurationParameters) { + *out = *in + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]IPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkConfigurationParameters. +func (in *PrivateLinkConfigurationParameters) DeepCopy() *PrivateLinkConfigurationParameters { + if in == nil { + return nil + } + out := new(PrivateLinkConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateServiceConnectionInitParameters) DeepCopyInto(out *PrivateServiceConnectionInitParameters) { + *out = *in + if in.IsManualConnection != nil { + in, out := &in.IsManualConnection, &out.IsManualConnection + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateConnectionResourceAlias != nil { + in, out := &in.PrivateConnectionResourceAlias, &out.PrivateConnectionResourceAlias + *out = new(string) + **out = **in + } + if in.PrivateConnectionResourceID != nil { + in, out := &in.PrivateConnectionResourceID, &out.PrivateConnectionResourceID + *out = new(string) + **out = **in + } + if in.RequestMessage != nil { + in, out := &in.RequestMessage, &out.RequestMessage + *out = new(string) + **out = **in + } + if in.SubresourceNames != nil { + in, out := &in.SubresourceNames, &out.SubresourceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateServiceConnectionInitParameters. +func (in *PrivateServiceConnectionInitParameters) DeepCopy() *PrivateServiceConnectionInitParameters { + if in == nil { + return nil + } + out := new(PrivateServiceConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateServiceConnectionObservation) DeepCopyInto(out *PrivateServiceConnectionObservation) { + *out = *in + if in.IsManualConnection != nil { + in, out := &in.IsManualConnection, &out.IsManualConnection + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateConnectionResourceAlias != nil { + in, out := &in.PrivateConnectionResourceAlias, &out.PrivateConnectionResourceAlias + *out = new(string) + **out = **in + } + if in.PrivateConnectionResourceID != nil { + in, out := &in.PrivateConnectionResourceID, &out.PrivateConnectionResourceID + *out = new(string) + **out = **in + } + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.RequestMessage != nil { + in, out := &in.RequestMessage, &out.RequestMessage + *out = new(string) + **out = **in + } + if in.SubresourceNames != nil { + in, out := &in.SubresourceNames, &out.SubresourceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateServiceConnectionObservation. +func (in *PrivateServiceConnectionObservation) DeepCopy() *PrivateServiceConnectionObservation { + if in == nil { + return nil + } + out := new(PrivateServiceConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateServiceConnectionParameters) DeepCopyInto(out *PrivateServiceConnectionParameters) { + *out = *in + if in.IsManualConnection != nil { + in, out := &in.IsManualConnection, &out.IsManualConnection + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateConnectionResourceAlias != nil { + in, out := &in.PrivateConnectionResourceAlias, &out.PrivateConnectionResourceAlias + *out = new(string) + **out = **in + } + if in.PrivateConnectionResourceID != nil { + in, out := &in.PrivateConnectionResourceID, &out.PrivateConnectionResourceID + *out = new(string) + **out = **in + } + if in.RequestMessage != nil { + in, out := &in.RequestMessage, &out.RequestMessage + *out = new(string) + **out = **in + } + if in.SubresourceNames != nil { + in, out := &in.SubresourceNames, &out.SubresourceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateServiceConnectionParameters. +func (in *PrivateServiceConnectionParameters) DeepCopy() *PrivateServiceConnectionParameters { + if in == nil { + return nil + } + out := new(PrivateServiceConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeInitParameters) DeepCopyInto(out *ProbeInitParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MinimumServers != nil { + in, out := &in.MinimumServers, &out.MinimumServers + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PickHostNameFromBackendHTTPSettings != nil { + in, out := &in.PickHostNameFromBackendHTTPSettings, &out.PickHostNameFromBackendHTTPSettings + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeInitParameters. +func (in *ProbeInitParameters) DeepCopy() *ProbeInitParameters { + if in == nil { + return nil + } + out := new(ProbeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeObservation) DeepCopyInto(out *ProbeObservation) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchObservation) + (*in).DeepCopyInto(*out) + } + if in.MinimumServers != nil { + in, out := &in.MinimumServers, &out.MinimumServers + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PickHostNameFromBackendHTTPSettings != nil { + in, out := &in.PickHostNameFromBackendHTTPSettings, &out.PickHostNameFromBackendHTTPSettings + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeObservation. +func (in *ProbeObservation) DeepCopy() *ProbeObservation { + if in == nil { + return nil + } + out := new(ProbeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeParameters) DeepCopyInto(out *ProbeParameters) { + *out = *in + if in.Host != nil { + in, out := &in.Host, &out.Host + *out = new(string) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = new(MatchParameters) + (*in).DeepCopyInto(*out) + } + if in.MinimumServers != nil { + in, out := &in.MinimumServers, &out.MinimumServers + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.PickHostNameFromBackendHTTPSettings != nil { + in, out := &in.PickHostNameFromBackendHTTPSettings, &out.PickHostNameFromBackendHTTPSettings + *out = new(bool) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(float64) + **out = **in + } + if in.UnhealthyThreshold != nil { + in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeParameters. +func (in *ProbeParameters) DeepCopy() *ProbeParameters { + if in == nil { + return nil + } + out := new(ProbeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Profile) DeepCopyInto(out *Profile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Profile. +func (in *Profile) DeepCopy() *Profile { + if in == nil { + return nil + } + out := new(Profile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Profile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileInitParameters) DeepCopyInto(out *ProfileInitParameters) { + *out = *in + if in.ContainerNetworkInterface != nil { + in, out := &in.ContainerNetworkInterface, &out.ContainerNetworkInterface + *out = new(ContainerNetworkInterfaceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileInitParameters. +func (in *ProfileInitParameters) DeepCopy() *ProfileInitParameters { + if in == nil { + return nil + } + out := new(ProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileList) DeepCopyInto(out *ProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Profile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileList. +func (in *ProfileList) DeepCopy() *ProfileList { + if in == nil { + return nil + } + out := new(ProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileObservation) DeepCopyInto(out *ProfileObservation) { + *out = *in + if in.ContainerNetworkInterface != nil { + in, out := &in.ContainerNetworkInterface, &out.ContainerNetworkInterface + *out = new(ContainerNetworkInterfaceObservation) + (*in).DeepCopyInto(*out) + } + if in.ContainerNetworkInterfaceIds != nil { + in, out := &in.ContainerNetworkInterfaceIds, &out.ContainerNetworkInterfaceIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileObservation. +func (in *ProfileObservation) DeepCopy() *ProfileObservation { + if in == nil { + return nil + } + out := new(ProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileParameters) DeepCopyInto(out *ProfileParameters) { + *out = *in + if in.ContainerNetworkInterface != nil { + in, out := &in.ContainerNetworkInterface, &out.ContainerNetworkInterface + *out = new(ContainerNetworkInterfaceParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileParameters. +func (in *ProfileParameters) DeepCopy() *ProfileParameters { + if in == nil { + return nil + } + out := new(ProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileSpec) DeepCopyInto(out *ProfileSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileSpec. +func (in *ProfileSpec) DeepCopy() *ProfileSpec { + if in == nil { + return nil + } + out := new(ProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileStatus) DeepCopyInto(out *ProfileStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileStatus. +func (in *ProfileStatus) DeepCopy() *ProfileStatus { + if in == nil { + return nil + } + out := new(ProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropagatedRouteTableInitParameters) DeepCopyInto(out *PropagatedRouteTableInitParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagatedRouteTableInitParameters. +func (in *PropagatedRouteTableInitParameters) DeepCopy() *PropagatedRouteTableInitParameters { + if in == nil { + return nil + } + out := new(PropagatedRouteTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropagatedRouteTableObservation) DeepCopyInto(out *PropagatedRouteTableObservation) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagatedRouteTableObservation. +func (in *PropagatedRouteTableObservation) DeepCopy() *PropagatedRouteTableObservation { + if in == nil { + return nil + } + out := new(PropagatedRouteTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropagatedRouteTableParameters) DeepCopyInto(out *PropagatedRouteTableParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagatedRouteTableParameters. +func (in *PropagatedRouteTableParameters) DeepCopy() *PropagatedRouteTableParameters { + if in == nil { + return nil + } + out := new(PropagatedRouteTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusClientRootCertificateInitParameters) DeepCopyInto(out *RadiusClientRootCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusClientRootCertificateInitParameters. +func (in *RadiusClientRootCertificateInitParameters) DeepCopy() *RadiusClientRootCertificateInitParameters { + if in == nil { + return nil + } + out := new(RadiusClientRootCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusClientRootCertificateObservation) DeepCopyInto(out *RadiusClientRootCertificateObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusClientRootCertificateObservation. +func (in *RadiusClientRootCertificateObservation) DeepCopy() *RadiusClientRootCertificateObservation { + if in == nil { + return nil + } + out := new(RadiusClientRootCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusClientRootCertificateParameters) DeepCopyInto(out *RadiusClientRootCertificateParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusClientRootCertificateParameters. +func (in *RadiusClientRootCertificateParameters) DeepCopy() *RadiusClientRootCertificateParameters { + if in == nil { + return nil + } + out := new(RadiusClientRootCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusInitParameters) DeepCopyInto(out *RadiusInitParameters) { + *out = *in + if in.ClientRootCertificate != nil { + in, out := &in.ClientRootCertificate, &out.ClientRootCertificate + *out = make([]RadiusClientRootCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = make([]ServerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerRootCertificate != nil { + in, out := &in.ServerRootCertificate, &out.ServerRootCertificate + *out = make([]ServerRootCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusInitParameters. +func (in *RadiusInitParameters) DeepCopy() *RadiusInitParameters { + if in == nil { + return nil + } + out := new(RadiusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusObservation) DeepCopyInto(out *RadiusObservation) { + *out = *in + if in.ClientRootCertificate != nil { + in, out := &in.ClientRootCertificate, &out.ClientRootCertificate + *out = make([]RadiusClientRootCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = make([]ServerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerRootCertificate != nil { + in, out := &in.ServerRootCertificate, &out.ServerRootCertificate + *out = make([]ServerRootCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusObservation. +func (in *RadiusObservation) DeepCopy() *RadiusObservation { + if in == nil { + return nil + } + out := new(RadiusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusParameters) DeepCopyInto(out *RadiusParameters) { + *out = *in + if in.ClientRootCertificate != nil { + in, out := &in.ClientRootCertificate, &out.ClientRootCertificate + *out = make([]RadiusClientRootCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = make([]ServerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServerRootCertificate != nil { + in, out := &in.ServerRootCertificate, &out.ServerRootCertificate + *out = make([]ServerRootCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusParameters. +func (in *RadiusParameters) DeepCopy() *RadiusParameters { + if in == nil { + return nil + } + out := new(RadiusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusServerInitParameters) DeepCopyInto(out *RadiusServerInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusServerInitParameters. +func (in *RadiusServerInitParameters) DeepCopy() *RadiusServerInitParameters { + if in == nil { + return nil + } + out := new(RadiusServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusServerObservation) DeepCopyInto(out *RadiusServerObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusServerObservation. +func (in *RadiusServerObservation) DeepCopy() *RadiusServerObservation { + if in == nil { + return nil + } + out := new(RadiusServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadiusServerParameters) DeepCopyInto(out *RadiusServerParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(float64) + **out = **in + } + out.SecretSecretRef = in.SecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadiusServerParameters. +func (in *RadiusServerParameters) DeepCopy() *RadiusServerParameters { + if in == nil { + return nil + } + out := new(RadiusServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordSetsInitParameters) DeepCopyInto(out *RecordSetsInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordSetsInitParameters. +func (in *RecordSetsInitParameters) DeepCopy() *RecordSetsInitParameters { + if in == nil { + return nil + } + out := new(RecordSetsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordSetsObservation) DeepCopyInto(out *RecordSetsObservation) { + *out = *in + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordSetsObservation. +func (in *RecordSetsObservation) DeepCopy() *RecordSetsObservation { + if in == nil { + return nil + } + out := new(RecordSetsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecordSetsParameters) DeepCopyInto(out *RecordSetsParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecordSetsParameters. +func (in *RecordSetsParameters) DeepCopy() *RecordSetsParameters { + if in == nil { + return nil + } + out := new(RecordSetsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectConfigurationInitParameters) DeepCopyInto(out *RedirectConfigurationInitParameters) { + *out = *in + if in.IncludePath != nil { + in, out := &in.IncludePath, &out.IncludePath + *out = new(bool) + **out = **in + } + if in.IncludeQueryString != nil { + in, out := &in.IncludeQueryString, &out.IncludeQueryString + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } + if in.TargetListenerName != nil { + in, out := &in.TargetListenerName, &out.TargetListenerName + *out = new(string) + **out = **in + } + if in.TargetURL != nil { + in, out := &in.TargetURL, &out.TargetURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectConfigurationInitParameters. +func (in *RedirectConfigurationInitParameters) DeepCopy() *RedirectConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RedirectConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectConfigurationObservation) DeepCopyInto(out *RedirectConfigurationObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IncludePath != nil { + in, out := &in.IncludePath, &out.IncludePath + *out = new(bool) + **out = **in + } + if in.IncludeQueryString != nil { + in, out := &in.IncludeQueryString, &out.IncludeQueryString + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } + if in.TargetListenerID != nil { + in, out := &in.TargetListenerID, &out.TargetListenerID + *out = new(string) + **out = **in + } + if in.TargetListenerName != nil { + in, out := &in.TargetListenerName, &out.TargetListenerName + *out = new(string) + **out = **in + } + if in.TargetURL != nil { + in, out := &in.TargetURL, &out.TargetURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectConfigurationObservation. +func (in *RedirectConfigurationObservation) DeepCopy() *RedirectConfigurationObservation { + if in == nil { + return nil + } + out := new(RedirectConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectConfigurationParameters) DeepCopyInto(out *RedirectConfigurationParameters) { + *out = *in + if in.IncludePath != nil { + in, out := &in.IncludePath, &out.IncludePath + *out = new(bool) + **out = **in + } + if in.IncludeQueryString != nil { + in, out := &in.IncludeQueryString, &out.IncludeQueryString + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } + if in.TargetListenerName != nil { + in, out := &in.TargetListenerName, &out.TargetListenerName + *out = new(string) + **out = **in + } + if in.TargetURL != nil { + in, out := &in.TargetURL, &out.TargetURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectConfigurationParameters. +func (in *RedirectConfigurationParameters) DeepCopy() *RedirectConfigurationParameters { + if in == nil { + return nil + } + out := new(RedirectConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderConfigurationInitParameters) DeepCopyInto(out *RequestHeaderConfigurationInitParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.HeaderValue != nil { + in, out := &in.HeaderValue, &out.HeaderValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderConfigurationInitParameters. +func (in *RequestHeaderConfigurationInitParameters) DeepCopy() *RequestHeaderConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RequestHeaderConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderConfigurationObservation) DeepCopyInto(out *RequestHeaderConfigurationObservation) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.HeaderValue != nil { + in, out := &in.HeaderValue, &out.HeaderValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderConfigurationObservation. +func (in *RequestHeaderConfigurationObservation) DeepCopy() *RequestHeaderConfigurationObservation { + if in == nil { + return nil + } + out := new(RequestHeaderConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderConfigurationParameters) DeepCopyInto(out *RequestHeaderConfigurationParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.HeaderValue != nil { + in, out := &in.HeaderValue, &out.HeaderValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderConfigurationParameters. +func (in *RequestHeaderConfigurationParameters) DeepCopy() *RequestHeaderConfigurationParameters { + if in == nil { + return nil + } + out := new(RequestHeaderConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderInitParameters) DeepCopyInto(out *RequestHeaderInitParameters) { + *out = *in + if in.HeaderActionType != nil { + in, out := &in.HeaderActionType, &out.HeaderActionType + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderInitParameters. +func (in *RequestHeaderInitParameters) DeepCopy() *RequestHeaderInitParameters { + if in == nil { + return nil + } + out := new(RequestHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderObservation) DeepCopyInto(out *RequestHeaderObservation) { + *out = *in + if in.HeaderActionType != nil { + in, out := &in.HeaderActionType, &out.HeaderActionType + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderObservation. +func (in *RequestHeaderObservation) DeepCopy() *RequestHeaderObservation { + if in == nil { + return nil + } + out := new(RequestHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderParameters) DeepCopyInto(out *RequestHeaderParameters) { + *out = *in + if in.HeaderActionType != nil { + in, out := &in.HeaderActionType, &out.HeaderActionType + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderParameters. +func (in *RequestHeaderParameters) DeepCopy() *RequestHeaderParameters { + if in == nil { + return nil + } + out := new(RequestHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestRoutingRuleInitParameters) DeepCopyInto(out *RequestRoutingRuleInitParameters) { + *out = *in + if in.BackendAddressPoolName != nil { + in, out := &in.BackendAddressPoolName, &out.BackendAddressPoolName + *out = new(string) + **out = **in + } + if in.BackendHTTPSettingsName != nil { + in, out := &in.BackendHTTPSettingsName, &out.BackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.HTTPListenerName != nil { + in, out := &in.HTTPListenerName, &out.HTTPListenerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RedirectConfigurationName != nil { + in, out := &in.RedirectConfigurationName, &out.RedirectConfigurationName + *out = new(string) + **out = **in + } + if in.RewriteRuleSetName != nil { + in, out := &in.RewriteRuleSetName, &out.RewriteRuleSetName + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.URLPathMapName != nil { + in, out := &in.URLPathMapName, &out.URLPathMapName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestRoutingRuleInitParameters. +func (in *RequestRoutingRuleInitParameters) DeepCopy() *RequestRoutingRuleInitParameters { + if in == nil { + return nil + } + out := new(RequestRoutingRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestRoutingRuleObservation) DeepCopyInto(out *RequestRoutingRuleObservation) { + *out = *in + if in.BackendAddressPoolID != nil { + in, out := &in.BackendAddressPoolID, &out.BackendAddressPoolID + *out = new(string) + **out = **in + } + if in.BackendAddressPoolName != nil { + in, out := &in.BackendAddressPoolName, &out.BackendAddressPoolName + *out = new(string) + **out = **in + } + if in.BackendHTTPSettingsID != nil { + in, out := &in.BackendHTTPSettingsID, &out.BackendHTTPSettingsID + *out = new(string) + **out = **in + } + if in.BackendHTTPSettingsName != nil { + in, out := &in.BackendHTTPSettingsName, &out.BackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.HTTPListenerID != nil { + in, out := &in.HTTPListenerID, &out.HTTPListenerID + *out = new(string) + **out = **in + } + if in.HTTPListenerName != nil { + in, out := &in.HTTPListenerName, &out.HTTPListenerName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RedirectConfigurationID != nil { + in, out := &in.RedirectConfigurationID, &out.RedirectConfigurationID + *out = new(string) + **out = **in + } + if in.RedirectConfigurationName != nil { + in, out := &in.RedirectConfigurationName, &out.RedirectConfigurationName + *out = new(string) + **out = **in + } + if in.RewriteRuleSetID != nil { + in, out := &in.RewriteRuleSetID, &out.RewriteRuleSetID + *out = new(string) + **out = **in + } + if in.RewriteRuleSetName != nil { + in, out := &in.RewriteRuleSetName, &out.RewriteRuleSetName + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.URLPathMapID != nil { + in, out := &in.URLPathMapID, &out.URLPathMapID + *out = new(string) + **out = **in + } + if in.URLPathMapName != nil { + in, out := &in.URLPathMapName, &out.URLPathMapName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestRoutingRuleObservation. +func (in *RequestRoutingRuleObservation) DeepCopy() *RequestRoutingRuleObservation { + if in == nil { + return nil + } + out := new(RequestRoutingRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestRoutingRuleParameters) DeepCopyInto(out *RequestRoutingRuleParameters) { + *out = *in + if in.BackendAddressPoolName != nil { + in, out := &in.BackendAddressPoolName, &out.BackendAddressPoolName + *out = new(string) + **out = **in + } + if in.BackendHTTPSettingsName != nil { + in, out := &in.BackendHTTPSettingsName, &out.BackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.HTTPListenerName != nil { + in, out := &in.HTTPListenerName, &out.HTTPListenerName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.RedirectConfigurationName != nil { + in, out := &in.RedirectConfigurationName, &out.RedirectConfigurationName + *out = new(string) + **out = **in + } + if in.RewriteRuleSetName != nil { + in, out := &in.RewriteRuleSetName, &out.RewriteRuleSetName + *out = new(string) + **out = **in + } + if in.RuleType != nil { + in, out := &in.RuleType, &out.RuleType + *out = new(string) + **out = **in + } + if in.URLPathMapName != nil { + in, out := &in.URLPathMapName, &out.URLPathMapName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestRoutingRuleParameters. +func (in *RequestRoutingRuleParameters) DeepCopy() *RequestRoutingRuleParameters { + if in == nil { + return nil + } + out := new(RequestRoutingRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderConfigurationInitParameters) DeepCopyInto(out *ResponseHeaderConfigurationInitParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.HeaderValue != nil { + in, out := &in.HeaderValue, &out.HeaderValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderConfigurationInitParameters. +func (in *ResponseHeaderConfigurationInitParameters) DeepCopy() *ResponseHeaderConfigurationInitParameters { + if in == nil { + return nil + } + out := new(ResponseHeaderConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderConfigurationObservation) DeepCopyInto(out *ResponseHeaderConfigurationObservation) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.HeaderValue != nil { + in, out := &in.HeaderValue, &out.HeaderValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderConfigurationObservation. +func (in *ResponseHeaderConfigurationObservation) DeepCopy() *ResponseHeaderConfigurationObservation { + if in == nil { + return nil + } + out := new(ResponseHeaderConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderConfigurationParameters) DeepCopyInto(out *ResponseHeaderConfigurationParameters) { + *out = *in + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.HeaderValue != nil { + in, out := &in.HeaderValue, &out.HeaderValue + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderConfigurationParameters. +func (in *ResponseHeaderConfigurationParameters) DeepCopy() *ResponseHeaderConfigurationParameters { + if in == nil { + return nil + } + out := new(ResponseHeaderConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderInitParameters) DeepCopyInto(out *ResponseHeaderInitParameters) { + *out = *in + if in.HeaderActionType != nil { + in, out := &in.HeaderActionType, &out.HeaderActionType + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderInitParameters. +func (in *ResponseHeaderInitParameters) DeepCopy() *ResponseHeaderInitParameters { + if in == nil { + return nil + } + out := new(ResponseHeaderInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderObservation) DeepCopyInto(out *ResponseHeaderObservation) { + *out = *in + if in.HeaderActionType != nil { + in, out := &in.HeaderActionType, &out.HeaderActionType + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderObservation. +func (in *ResponseHeaderObservation) DeepCopy() *ResponseHeaderObservation { + if in == nil { + return nil + } + out := new(ResponseHeaderObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponseHeaderParameters) DeepCopyInto(out *ResponseHeaderParameters) { + *out = *in + if in.HeaderActionType != nil { + in, out := &in.HeaderActionType, &out.HeaderActionType + *out = new(string) + **out = **in + } + if in.HeaderName != nil { + in, out := &in.HeaderName, &out.HeaderName + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseHeaderParameters. +func (in *ResponseHeaderParameters) DeepCopy() *ResponseHeaderParameters { + if in == nil { + return nil + } + out := new(ResponseHeaderParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyInitParameters) DeepCopyInto(out *RetentionPolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyInitParameters. +func (in *RetentionPolicyInitParameters) DeepCopy() *RetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyObservation) DeepCopyInto(out *RetentionPolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyObservation. +func (in *RetentionPolicyObservation) DeepCopy() *RetentionPolicyObservation { + if in == nil { + return nil + } + out := new(RetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyParameters) DeepCopyInto(out *RetentionPolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyParameters. +func (in *RetentionPolicyParameters) DeepCopy() *RetentionPolicyParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevokedCertificateInitParameters) DeepCopyInto(out *RevokedCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevokedCertificateInitParameters. +func (in *RevokedCertificateInitParameters) DeepCopy() *RevokedCertificateInitParameters { + if in == nil { + return nil + } + out := new(RevokedCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevokedCertificateObservation) DeepCopyInto(out *RevokedCertificateObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevokedCertificateObservation. +func (in *RevokedCertificateObservation) DeepCopy() *RevokedCertificateObservation { + if in == nil { + return nil + } + out := new(RevokedCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevokedCertificateParameters) DeepCopyInto(out *RevokedCertificateParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevokedCertificateParameters. +func (in *RevokedCertificateParameters) DeepCopy() *RevokedCertificateParameters { + if in == nil { + return nil + } + out := new(RevokedCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteRuleInitParameters) DeepCopyInto(out *RewriteRuleInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RequestHeaderConfiguration != nil { + in, out := &in.RequestHeaderConfiguration, &out.RequestHeaderConfiguration + *out = make([]RequestHeaderConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeaderConfiguration != nil { + in, out := &in.ResponseHeaderConfiguration, &out.ResponseHeaderConfiguration + *out = make([]ResponseHeaderConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RuleSequence != nil { + in, out := &in.RuleSequence, &out.RuleSequence + *out = new(float64) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(URLInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteRuleInitParameters. +func (in *RewriteRuleInitParameters) DeepCopy() *RewriteRuleInitParameters { + if in == nil { + return nil + } + out := new(RewriteRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteRuleObservation) DeepCopyInto(out *RewriteRuleObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RequestHeaderConfiguration != nil { + in, out := &in.RequestHeaderConfiguration, &out.RequestHeaderConfiguration + *out = make([]RequestHeaderConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeaderConfiguration != nil { + in, out := &in.ResponseHeaderConfiguration, &out.ResponseHeaderConfiguration + *out = make([]ResponseHeaderConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RuleSequence != nil { + in, out := &in.RuleSequence, &out.RuleSequence + *out = new(float64) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(URLObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteRuleObservation. +func (in *RewriteRuleObservation) DeepCopy() *RewriteRuleObservation { + if in == nil { + return nil + } + out := new(RewriteRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteRuleParameters) DeepCopyInto(out *RewriteRuleParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = make([]ConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RequestHeaderConfiguration != nil { + in, out := &in.RequestHeaderConfiguration, &out.RequestHeaderConfiguration + *out = make([]RequestHeaderConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResponseHeaderConfiguration != nil { + in, out := &in.ResponseHeaderConfiguration, &out.ResponseHeaderConfiguration + *out = make([]ResponseHeaderConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RuleSequence != nil { + in, out := &in.RuleSequence, &out.RuleSequence + *out = new(float64) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(URLParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteRuleParameters. +func (in *RewriteRuleParameters) DeepCopy() *RewriteRuleParameters { + if in == nil { + return nil + } + out := new(RewriteRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteRuleSetInitParameters) DeepCopyInto(out *RewriteRuleSetInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RewriteRule != nil { + in, out := &in.RewriteRule, &out.RewriteRule + *out = make([]RewriteRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteRuleSetInitParameters. +func (in *RewriteRuleSetInitParameters) DeepCopy() *RewriteRuleSetInitParameters { + if in == nil { + return nil + } + out := new(RewriteRuleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteRuleSetObservation) DeepCopyInto(out *RewriteRuleSetObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RewriteRule != nil { + in, out := &in.RewriteRule, &out.RewriteRule + *out = make([]RewriteRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteRuleSetObservation. +func (in *RewriteRuleSetObservation) DeepCopy() *RewriteRuleSetObservation { + if in == nil { + return nil + } + out := new(RewriteRuleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RewriteRuleSetParameters) DeepCopyInto(out *RewriteRuleSetParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RewriteRule != nil { + in, out := &in.RewriteRule, &out.RewriteRule + *out = make([]RewriteRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RewriteRuleSetParameters. +func (in *RewriteRuleSetParameters) DeepCopy() *RewriteRuleSetParameters { + if in == nil { + return nil + } + out := new(RewriteRuleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootCertificateInitParameters) DeepCopyInto(out *RootCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootCertificateInitParameters. +func (in *RootCertificateInitParameters) DeepCopy() *RootCertificateInitParameters { + if in == nil { + return nil + } + out := new(RootCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootCertificateObservation) DeepCopyInto(out *RootCertificateObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootCertificateObservation. +func (in *RootCertificateObservation) DeepCopy() *RootCertificateObservation { + if in == nil { + return nil + } + out := new(RootCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootCertificateParameters) DeepCopyInto(out *RootCertificateParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootCertificateParameters. +func (in *RootCertificateParameters) DeepCopy() *RootCertificateParameters { + if in == nil { + return nil + } + out := new(RootCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteInitParameters) DeepCopyInto(out *RouteInitParameters) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(RoutePropagatedRouteTableInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteInitParameters. +func (in *RouteInitParameters) DeepCopy() *RouteInitParameters { + if in == nil { + return nil + } + out := new(RouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteObservation) DeepCopyInto(out *RouteObservation) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(RoutePropagatedRouteTableObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteObservation. +func (in *RouteObservation) DeepCopy() *RouteObservation { + if in == nil { + return nil + } + out := new(RouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteParameters) DeepCopyInto(out *RouteParameters) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(RoutePropagatedRouteTableParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteParameters. +func (in *RouteParameters) DeepCopy() *RouteParameters { + if in == nil { + return nil + } + out := new(RouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutePropagatedRouteTableInitParameters) DeepCopyInto(out *RoutePropagatedRouteTableInitParameters) { + *out = *in + if in.Ids != nil { + in, out := &in.Ids, &out.Ids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePropagatedRouteTableInitParameters. +func (in *RoutePropagatedRouteTableInitParameters) DeepCopy() *RoutePropagatedRouteTableInitParameters { + if in == nil { + return nil + } + out := new(RoutePropagatedRouteTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutePropagatedRouteTableObservation) DeepCopyInto(out *RoutePropagatedRouteTableObservation) { + *out = *in + if in.Ids != nil { + in, out := &in.Ids, &out.Ids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePropagatedRouteTableObservation. +func (in *RoutePropagatedRouteTableObservation) DeepCopy() *RoutePropagatedRouteTableObservation { + if in == nil { + return nil + } + out := new(RoutePropagatedRouteTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutePropagatedRouteTableParameters) DeepCopyInto(out *RoutePropagatedRouteTableParameters) { + *out = *in + if in.Ids != nil { + in, out := &in.Ids, &out.Ids + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePropagatedRouteTableParameters. +func (in *RoutePropagatedRouteTableParameters) DeepCopy() *RoutePropagatedRouteTableParameters { + if in == nil { + return nil + } + out := new(RoutePropagatedRouteTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingInitParameters) DeepCopyInto(out *RoutingInitParameters) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(PropagatedRouteTableInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingInitParameters. +func (in *RoutingInitParameters) DeepCopy() *RoutingInitParameters { + if in == nil { + return nil + } + out := new(RoutingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingObservation) DeepCopyInto(out *RoutingObservation) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(PropagatedRouteTableObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingObservation. +func (in *RoutingObservation) DeepCopy() *RoutingObservation { + if in == nil { + return nil + } + out := new(RoutingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingParameters) DeepCopyInto(out *RoutingParameters) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(PropagatedRouteTableParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingParameters. +func (in *RoutingParameters) DeepCopy() *RoutingParameters { + if in == nil { + return nil + } + out := new(RoutingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingPropagatedRouteTableInitParameters) DeepCopyInto(out *RoutingPropagatedRouteTableInitParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPropagatedRouteTableInitParameters. +func (in *RoutingPropagatedRouteTableInitParameters) DeepCopy() *RoutingPropagatedRouteTableInitParameters { + if in == nil { + return nil + } + out := new(RoutingPropagatedRouteTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingPropagatedRouteTableObservation) DeepCopyInto(out *RoutingPropagatedRouteTableObservation) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPropagatedRouteTableObservation. +func (in *RoutingPropagatedRouteTableObservation) DeepCopy() *RoutingPropagatedRouteTableObservation { + if in == nil { + return nil + } + out := new(RoutingPropagatedRouteTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingPropagatedRouteTableParameters) DeepCopyInto(out *RoutingPropagatedRouteTableParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPropagatedRouteTableParameters. +func (in *RoutingPropagatedRouteTableParameters) DeepCopy() *RoutingPropagatedRouteTableParameters { + if in == nil { + return nil + } + out := new(RoutingPropagatedRouteTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleInitParameters) DeepCopyInto(out *RoutingRuleInitParameters) { + *out = *in + if in.AcceptedProtocols != nil { + in, out := &in.AcceptedProtocols, &out.AcceptedProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ForwardingConfiguration != nil { + in, out := &in.ForwardingConfiguration, &out.ForwardingConfiguration + *out = new(ForwardingConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendEndpoints != nil { + in, out := &in.FrontendEndpoints, &out.FrontendEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectConfiguration != nil { + in, out := &in.RedirectConfiguration, &out.RedirectConfiguration + *out = new(RoutingRuleRedirectConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleInitParameters. +func (in *RoutingRuleInitParameters) DeepCopy() *RoutingRuleInitParameters { + if in == nil { + return nil + } + out := new(RoutingRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleObservation) DeepCopyInto(out *RoutingRuleObservation) { + *out = *in + if in.AcceptedProtocols != nil { + in, out := &in.AcceptedProtocols, &out.AcceptedProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ForwardingConfiguration != nil { + in, out := &in.ForwardingConfiguration, &out.ForwardingConfiguration + *out = new(ForwardingConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.FrontendEndpoints != nil { + in, out := &in.FrontendEndpoints, &out.FrontendEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectConfiguration != nil { + in, out := &in.RedirectConfiguration, &out.RedirectConfiguration + *out = new(RoutingRuleRedirectConfigurationObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleObservation. +func (in *RoutingRuleObservation) DeepCopy() *RoutingRuleObservation { + if in == nil { + return nil + } + out := new(RoutingRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleParameters) DeepCopyInto(out *RoutingRuleParameters) { + *out = *in + if in.AcceptedProtocols != nil { + in, out := &in.AcceptedProtocols, &out.AcceptedProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ForwardingConfiguration != nil { + in, out := &in.ForwardingConfiguration, &out.ForwardingConfiguration + *out = new(ForwardingConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.FrontendEndpoints != nil { + in, out := &in.FrontendEndpoints, &out.FrontendEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PatternsToMatch != nil { + in, out := &in.PatternsToMatch, &out.PatternsToMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RedirectConfiguration != nil { + in, out := &in.RedirectConfiguration, &out.RedirectConfiguration + *out = new(RoutingRuleRedirectConfigurationParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleParameters. +func (in *RoutingRuleParameters) DeepCopy() *RoutingRuleParameters { + if in == nil { + return nil + } + out := new(RoutingRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleRedirectConfigurationInitParameters) DeepCopyInto(out *RoutingRuleRedirectConfigurationInitParameters) { + *out = *in + if in.CustomFragment != nil { + in, out := &in.CustomFragment, &out.CustomFragment + *out = new(string) + **out = **in + } + if in.CustomHost != nil { + in, out := &in.CustomHost, &out.CustomHost + *out = new(string) + **out = **in + } + if in.CustomPath != nil { + in, out := &in.CustomPath, &out.CustomPath + *out = new(string) + **out = **in + } + if in.CustomQueryString != nil { + in, out := &in.CustomQueryString, &out.CustomQueryString + *out = new(string) + **out = **in + } + if in.RedirectProtocol != nil { + in, out := &in.RedirectProtocol, &out.RedirectProtocol + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleRedirectConfigurationInitParameters. +func (in *RoutingRuleRedirectConfigurationInitParameters) DeepCopy() *RoutingRuleRedirectConfigurationInitParameters { + if in == nil { + return nil + } + out := new(RoutingRuleRedirectConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleRedirectConfigurationObservation) DeepCopyInto(out *RoutingRuleRedirectConfigurationObservation) { + *out = *in + if in.CustomFragment != nil { + in, out := &in.CustomFragment, &out.CustomFragment + *out = new(string) + **out = **in + } + if in.CustomHost != nil { + in, out := &in.CustomHost, &out.CustomHost + *out = new(string) + **out = **in + } + if in.CustomPath != nil { + in, out := &in.CustomPath, &out.CustomPath + *out = new(string) + **out = **in + } + if in.CustomQueryString != nil { + in, out := &in.CustomQueryString, &out.CustomQueryString + *out = new(string) + **out = **in + } + if in.RedirectProtocol != nil { + in, out := &in.RedirectProtocol, &out.RedirectProtocol + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleRedirectConfigurationObservation. +func (in *RoutingRuleRedirectConfigurationObservation) DeepCopy() *RoutingRuleRedirectConfigurationObservation { + if in == nil { + return nil + } + out := new(RoutingRuleRedirectConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingRuleRedirectConfigurationParameters) DeepCopyInto(out *RoutingRuleRedirectConfigurationParameters) { + *out = *in + if in.CustomFragment != nil { + in, out := &in.CustomFragment, &out.CustomFragment + *out = new(string) + **out = **in + } + if in.CustomHost != nil { + in, out := &in.CustomHost, &out.CustomHost + *out = new(string) + **out = **in + } + if in.CustomPath != nil { + in, out := &in.CustomPath, &out.CustomPath + *out = new(string) + **out = **in + } + if in.CustomQueryString != nil { + in, out := &in.CustomQueryString, &out.CustomQueryString + *out = new(string) + **out = **in + } + if in.RedirectProtocol != nil { + in, out := &in.RedirectProtocol, &out.RedirectProtocol + *out = new(string) + **out = **in + } + if in.RedirectType != nil { + in, out := &in.RedirectType, &out.RedirectType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingRuleRedirectConfigurationParameters. +func (in *RoutingRuleRedirectConfigurationParameters) DeepCopy() *RoutingRuleRedirectConfigurationParameters { + if in == nil { + return nil + } + out := new(RoutingRuleRedirectConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupInitParameters) DeepCopyInto(out *RuleGroupInitParameters) { + *out = *in + if in.ExcludedRules != nil { + in, out := &in.ExcludedRules, &out.ExcludedRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupInitParameters. +func (in *RuleGroupInitParameters) DeepCopy() *RuleGroupInitParameters { + if in == nil { + return nil + } + out := new(RuleGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupObservation) DeepCopyInto(out *RuleGroupObservation) { + *out = *in + if in.ExcludedRules != nil { + in, out := &in.ExcludedRules, &out.ExcludedRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupObservation. +func (in *RuleGroupObservation) DeepCopy() *RuleGroupObservation { + if in == nil { + return nil + } + out := new(RuleGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupOverrideInitParameters) DeepCopyInto(out *RuleGroupOverrideInitParameters) { + *out = *in + if in.DisabledRules != nil { + in, out := &in.DisabledRules, &out.DisabledRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleGroupOverrideRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupOverrideInitParameters. +func (in *RuleGroupOverrideInitParameters) DeepCopy() *RuleGroupOverrideInitParameters { + if in == nil { + return nil + } + out := new(RuleGroupOverrideInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupOverrideObservation) DeepCopyInto(out *RuleGroupOverrideObservation) { + *out = *in + if in.DisabledRules != nil { + in, out := &in.DisabledRules, &out.DisabledRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleGroupOverrideRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupOverrideObservation. +func (in *RuleGroupOverrideObservation) DeepCopy() *RuleGroupOverrideObservation { + if in == nil { + return nil + } + out := new(RuleGroupOverrideObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupOverrideParameters) DeepCopyInto(out *RuleGroupOverrideParameters) { + *out = *in + if in.DisabledRules != nil { + in, out := &in.DisabledRules, &out.DisabledRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleGroupOverrideRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupOverrideParameters. +func (in *RuleGroupOverrideParameters) DeepCopy() *RuleGroupOverrideParameters { + if in == nil { + return nil + } + out := new(RuleGroupOverrideParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupOverrideRuleInitParameters) DeepCopyInto(out *RuleGroupOverrideRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupOverrideRuleInitParameters. +func (in *RuleGroupOverrideRuleInitParameters) DeepCopy() *RuleGroupOverrideRuleInitParameters { + if in == nil { + return nil + } + out := new(RuleGroupOverrideRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupOverrideRuleObservation) DeepCopyInto(out *RuleGroupOverrideRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupOverrideRuleObservation. +func (in *RuleGroupOverrideRuleObservation) DeepCopy() *RuleGroupOverrideRuleObservation { + if in == nil { + return nil + } + out := new(RuleGroupOverrideRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupOverrideRuleParameters) DeepCopyInto(out *RuleGroupOverrideRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupOverrideRuleParameters. +func (in *RuleGroupOverrideRuleParameters) DeepCopy() *RuleGroupOverrideRuleParameters { + if in == nil { + return nil + } + out := new(RuleGroupOverrideRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroupParameters) DeepCopyInto(out *RuleGroupParameters) { + *out = *in + if in.ExcludedRules != nil { + in, out := &in.ExcludedRules, &out.ExcludedRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RuleGroupName != nil { + in, out := &in.RuleGroupName, &out.RuleGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroupParameters. +func (in *RuleGroupParameters) DeepCopy() *RuleGroupParameters { + if in == nil { + return nil + } + out := new(RuleGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MatchCondition != nil { + in, out := &in.MatchCondition, &out.MatchCondition + *out = make([]MatchConditionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.MatchCondition != nil { + in, out := &in.MatchCondition, &out.MatchCondition + *out = make([]MatchConditionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.MatchCondition != nil { + in, out := &in.MatchCondition, &out.MatchCondition + *out = make([]MatchConditionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateInitParameters) DeepCopyInto(out *SSLCertificateInitParameters) { + *out = *in + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateInitParameters. +func (in *SSLCertificateInitParameters) DeepCopy() *SSLCertificateInitParameters { + if in == nil { + return nil + } + out := new(SSLCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateObservation) DeepCopyInto(out *SSLCertificateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateObservation. +func (in *SSLCertificateObservation) DeepCopy() *SSLCertificateObservation { + if in == nil { + return nil + } + out := new(SSLCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLCertificateParameters) DeepCopyInto(out *SSLCertificateParameters) { + *out = *in + if in.DataSecretRef != nil { + in, out := &in.DataSecretRef, &out.DataSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLCertificateParameters. +func (in *SSLCertificateParameters) DeepCopy() *SSLCertificateParameters { + if in == nil { + return nil + } + out := new(SSLCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLPolicyInitParameters) DeepCopyInto(out *SSLPolicyInitParameters) { + *out = *in + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledProtocols != nil { + in, out := &in.DisabledProtocols, &out.DisabledProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MinProtocolVersion != nil { + in, out := &in.MinProtocolVersion, &out.MinProtocolVersion + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLPolicyInitParameters. +func (in *SSLPolicyInitParameters) DeepCopy() *SSLPolicyInitParameters { + if in == nil { + return nil + } + out := new(SSLPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLPolicyObservation) DeepCopyInto(out *SSLPolicyObservation) { + *out = *in + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledProtocols != nil { + in, out := &in.DisabledProtocols, &out.DisabledProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MinProtocolVersion != nil { + in, out := &in.MinProtocolVersion, &out.MinProtocolVersion + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLPolicyObservation. +func (in *SSLPolicyObservation) DeepCopy() *SSLPolicyObservation { + if in == nil { + return nil + } + out := new(SSLPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLPolicyParameters) DeepCopyInto(out *SSLPolicyParameters) { + *out = *in + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledProtocols != nil { + in, out := &in.DisabledProtocols, &out.DisabledProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MinProtocolVersion != nil { + in, out := &in.MinProtocolVersion, &out.MinProtocolVersion + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLPolicyParameters. +func (in *SSLPolicyParameters) DeepCopy() *SSLPolicyParameters { + if in == nil { + return nil + } + out := new(SSLPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProfileInitParameters) DeepCopyInto(out *SSLProfileInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(SSLProfileSSLPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TrustedClientCertificateNames != nil { + in, out := &in.TrustedClientCertificateNames, &out.TrustedClientCertificateNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VerifyClientCertIssuerDn != nil { + in, out := &in.VerifyClientCertIssuerDn, &out.VerifyClientCertIssuerDn + *out = new(bool) + **out = **in + } + if in.VerifyClientCertificateRevocation != nil { + in, out := &in.VerifyClientCertificateRevocation, &out.VerifyClientCertificateRevocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProfileInitParameters. +func (in *SSLProfileInitParameters) DeepCopy() *SSLProfileInitParameters { + if in == nil { + return nil + } + out := new(SSLProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProfileObservation) DeepCopyInto(out *SSLProfileObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(SSLProfileSSLPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.TrustedClientCertificateNames != nil { + in, out := &in.TrustedClientCertificateNames, &out.TrustedClientCertificateNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VerifyClientCertIssuerDn != nil { + in, out := &in.VerifyClientCertIssuerDn, &out.VerifyClientCertIssuerDn + *out = new(bool) + **out = **in + } + if in.VerifyClientCertificateRevocation != nil { + in, out := &in.VerifyClientCertificateRevocation, &out.VerifyClientCertificateRevocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProfileObservation. +func (in *SSLProfileObservation) DeepCopy() *SSLProfileObservation { + if in == nil { + return nil + } + out := new(SSLProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProfileParameters) DeepCopyInto(out *SSLProfileParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SSLPolicy != nil { + in, out := &in.SSLPolicy, &out.SSLPolicy + *out = new(SSLProfileSSLPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.TrustedClientCertificateNames != nil { + in, out := &in.TrustedClientCertificateNames, &out.TrustedClientCertificateNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VerifyClientCertIssuerDn != nil { + in, out := &in.VerifyClientCertIssuerDn, &out.VerifyClientCertIssuerDn + *out = new(bool) + **out = **in + } + if in.VerifyClientCertificateRevocation != nil { + in, out := &in.VerifyClientCertificateRevocation, &out.VerifyClientCertificateRevocation + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProfileParameters. +func (in *SSLProfileParameters) DeepCopy() *SSLProfileParameters { + if in == nil { + return nil + } + out := new(SSLProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProfileSSLPolicyInitParameters) DeepCopyInto(out *SSLProfileSSLPolicyInitParameters) { + *out = *in + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledProtocols != nil { + in, out := &in.DisabledProtocols, &out.DisabledProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MinProtocolVersion != nil { + in, out := &in.MinProtocolVersion, &out.MinProtocolVersion + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProfileSSLPolicyInitParameters. +func (in *SSLProfileSSLPolicyInitParameters) DeepCopy() *SSLProfileSSLPolicyInitParameters { + if in == nil { + return nil + } + out := new(SSLProfileSSLPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProfileSSLPolicyObservation) DeepCopyInto(out *SSLProfileSSLPolicyObservation) { + *out = *in + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledProtocols != nil { + in, out := &in.DisabledProtocols, &out.DisabledProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MinProtocolVersion != nil { + in, out := &in.MinProtocolVersion, &out.MinProtocolVersion + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProfileSSLPolicyObservation. +func (in *SSLProfileSSLPolicyObservation) DeepCopy() *SSLProfileSSLPolicyObservation { + if in == nil { + return nil + } + out := new(SSLProfileSSLPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSLProfileSSLPolicyParameters) DeepCopyInto(out *SSLProfileSSLPolicyParameters) { + *out = *in + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisabledProtocols != nil { + in, out := &in.DisabledProtocols, &out.DisabledProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MinProtocolVersion != nil { + in, out := &in.MinProtocolVersion, &out.MinProtocolVersion + *out = new(string) + **out = **in + } + if in.PolicyName != nil { + in, out := &in.PolicyName, &out.PolicyName + *out = new(string) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSLProfileSSLPolicyParameters. +func (in *SSLProfileSSLPolicyParameters) DeepCopy() *SSLProfileSSLPolicyParameters { + if in == nil { + return nil + } + out := new(SSLProfileSSLPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeInitParameters) DeepCopyInto(out *ScopeInitParameters) { + *out = *in + if in.ManagementGroupIds != nil { + in, out := &in.ManagementGroupIds, &out.ManagementGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubscriptionIds != nil { + in, out := &in.SubscriptionIds, &out.SubscriptionIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeInitParameters. +func (in *ScopeInitParameters) DeepCopy() *ScopeInitParameters { + if in == nil { + return nil + } + out := new(ScopeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeObservation) DeepCopyInto(out *ScopeObservation) { + *out = *in + if in.ManagementGroupIds != nil { + in, out := &in.ManagementGroupIds, &out.ManagementGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubscriptionIds != nil { + in, out := &in.SubscriptionIds, &out.SubscriptionIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeObservation. +func (in *ScopeObservation) DeepCopy() *ScopeObservation { + if in == nil { + return nil + } + out := new(ScopeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeParameters) DeepCopyInto(out *ScopeParameters) { + *out = *in + if in.ManagementGroupIds != nil { + in, out := &in.ManagementGroupIds, &out.ManagementGroupIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SubscriptionIds != nil { + in, out := &in.SubscriptionIds, &out.SubscriptionIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeParameters. +func (in *ScopeParameters) DeepCopy() *ScopeParameters { + if in == nil { + return nil + } + out := new(ScopeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerInitParameters) DeepCopyInto(out *ServerInitParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerInitParameters. +func (in *ServerInitParameters) DeepCopy() *ServerInitParameters { + if in == nil { + return nil + } + out := new(ServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerObservation) DeepCopyInto(out *ServerObservation) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerObservation. +func (in *ServerObservation) DeepCopy() *ServerObservation { + if in == nil { + return nil + } + out := new(ServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerParameters) DeepCopyInto(out *ServerParameters) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(float64) + **out = **in + } + out.SecretSecretRef = in.SecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerParameters. +func (in *ServerParameters) DeepCopy() *ServerParameters { + if in == nil { + return nil + } + out := new(ServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerRootCertificateInitParameters) DeepCopyInto(out *ServerRootCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerRootCertificateInitParameters. +func (in *ServerRootCertificateInitParameters) DeepCopy() *ServerRootCertificateInitParameters { + if in == nil { + return nil + } + out := new(ServerRootCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerRootCertificateObservation) DeepCopyInto(out *ServerRootCertificateObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerRootCertificateObservation. +func (in *ServerRootCertificateObservation) DeepCopy() *ServerRootCertificateObservation { + if in == nil { + return nil + } + out := new(ServerRootCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerRootCertificateParameters) DeepCopyInto(out *ServerRootCertificateParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicCertData != nil { + in, out := &in.PublicCertData, &out.PublicCertData + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerRootCertificateParameters. +func (in *ServerRootCertificateParameters) DeepCopy() *ServerRootCertificateParameters { + if in == nil { + return nil + } + out := new(ServerRootCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDelegationInitParameters) DeepCopyInto(out *ServiceDelegationInitParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDelegationInitParameters. +func (in *ServiceDelegationInitParameters) DeepCopy() *ServiceDelegationInitParameters { + if in == nil { + return nil + } + out := new(ServiceDelegationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDelegationObservation) DeepCopyInto(out *ServiceDelegationObservation) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDelegationObservation. +func (in *ServiceDelegationObservation) DeepCopy() *ServiceDelegationObservation { + if in == nil { + return nil + } + out := new(ServiceDelegationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDelegationParameters) DeepCopyInto(out *ServiceDelegationParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDelegationParameters. +func (in *ServiceDelegationParameters) DeepCopy() *ServiceDelegationParameters { + if in == nil { + return nil + } + out := new(ServiceDelegationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureOverridesInitParameters) DeepCopyInto(out *SignatureOverridesInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureOverridesInitParameters. +func (in *SignatureOverridesInitParameters) DeepCopy() *SignatureOverridesInitParameters { + if in == nil { + return nil + } + out := new(SignatureOverridesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureOverridesObservation) DeepCopyInto(out *SignatureOverridesObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureOverridesObservation. +func (in *SignatureOverridesObservation) DeepCopy() *SignatureOverridesObservation { + if in == nil { + return nil + } + out := new(SignatureOverridesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureOverridesParameters) DeepCopyInto(out *SignatureOverridesParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureOverridesParameters. +func (in *SignatureOverridesParameters) DeepCopy() *SignatureOverridesParameters { + if in == nil { + return nil + } + out := new(SignatureOverridesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoaRecordInitParameters) DeepCopyInto(out *SoaRecordInitParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.ExpireTime != nil { + in, out := &in.ExpireTime, &out.ExpireTime + *out = new(float64) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.MinimumTTL != nil { + in, out := &in.MinimumTTL, &out.MinimumTTL + *out = new(float64) + **out = **in + } + if in.RefreshTime != nil { + in, out := &in.RefreshTime, &out.RefreshTime + *out = new(float64) + **out = **in + } + if in.RetryTime != nil { + in, out := &in.RetryTime, &out.RetryTime + *out = new(float64) + **out = **in + } + if in.SerialNumber != nil { + in, out := &in.SerialNumber, &out.SerialNumber + *out = new(float64) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoaRecordInitParameters. +func (in *SoaRecordInitParameters) DeepCopy() *SoaRecordInitParameters { + if in == nil { + return nil + } + out := new(SoaRecordInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoaRecordObservation) DeepCopyInto(out *SoaRecordObservation) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.ExpireTime != nil { + in, out := &in.ExpireTime, &out.ExpireTime + *out = new(float64) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.MinimumTTL != nil { + in, out := &in.MinimumTTL, &out.MinimumTTL + *out = new(float64) + **out = **in + } + if in.RefreshTime != nil { + in, out := &in.RefreshTime, &out.RefreshTime + *out = new(float64) + **out = **in + } + if in.RetryTime != nil { + in, out := &in.RetryTime, &out.RetryTime + *out = new(float64) + **out = **in + } + if in.SerialNumber != nil { + in, out := &in.SerialNumber, &out.SerialNumber + *out = new(float64) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoaRecordObservation. +func (in *SoaRecordObservation) DeepCopy() *SoaRecordObservation { + if in == nil { + return nil + } + out := new(SoaRecordObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoaRecordParameters) DeepCopyInto(out *SoaRecordParameters) { + *out = *in + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(string) + **out = **in + } + if in.ExpireTime != nil { + in, out := &in.ExpireTime, &out.ExpireTime + *out = new(float64) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.MinimumTTL != nil { + in, out := &in.MinimumTTL, &out.MinimumTTL + *out = new(float64) + **out = **in + } + if in.RefreshTime != nil { + in, out := &in.RefreshTime, &out.RefreshTime + *out = new(float64) + **out = **in + } + if in.RetryTime != nil { + in, out := &in.RetryTime, &out.RetryTime + *out = new(float64) + **out = **in + } + if in.SerialNumber != nil { + in, out := &in.SerialNumber, &out.SerialNumber + *out = new(float64) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoaRecordParameters. +func (in *SoaRecordParameters) DeepCopy() *SoaRecordParameters { + if in == nil { + return nil + } + out := new(SoaRecordParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticVnetRouteInitParameters) DeepCopyInto(out *StaticVnetRouteInitParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NextHopIPAddress != nil { + in, out := &in.NextHopIPAddress, &out.NextHopIPAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticVnetRouteInitParameters. +func (in *StaticVnetRouteInitParameters) DeepCopy() *StaticVnetRouteInitParameters { + if in == nil { + return nil + } + out := new(StaticVnetRouteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticVnetRouteObservation) DeepCopyInto(out *StaticVnetRouteObservation) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NextHopIPAddress != nil { + in, out := &in.NextHopIPAddress, &out.NextHopIPAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticVnetRouteObservation. +func (in *StaticVnetRouteObservation) DeepCopy() *StaticVnetRouteObservation { + if in == nil { + return nil + } + out := new(StaticVnetRouteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticVnetRouteParameters) DeepCopyInto(out *StaticVnetRouteParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NextHopIPAddress != nil { + in, out := &in.NextHopIPAddress, &out.NextHopIPAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticVnetRouteParameters. +func (in *StaticVnetRouteParameters) DeepCopy() *StaticVnetRouteParameters { + if in == nil { + return nil + } + out := new(StaticVnetRouteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLocationInitParameters) DeepCopyInto(out *StorageLocationInitParameters) { + *out = *in + if in.FilePath != nil { + in, out := &in.FilePath, &out.FilePath + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLocationInitParameters. +func (in *StorageLocationInitParameters) DeepCopy() *StorageLocationInitParameters { + if in == nil { + return nil + } + out := new(StorageLocationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLocationObservation) DeepCopyInto(out *StorageLocationObservation) { + *out = *in + if in.FilePath != nil { + in, out := &in.FilePath, &out.FilePath + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StoragePath != nil { + in, out := &in.StoragePath, &out.StoragePath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLocationObservation. +func (in *StorageLocationObservation) DeepCopy() *StorageLocationObservation { + if in == nil { + return nil + } + out := new(StorageLocationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageLocationParameters) DeepCopyInto(out *StorageLocationParameters) { + *out = *in + if in.FilePath != nil { + in, out := &in.FilePath, &out.FilePath + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageLocationParameters. +func (in *StorageLocationParameters) DeepCopy() *StorageLocationParameters { + if in == nil { + return nil + } + out := new(StorageLocationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subnet) DeepCopyInto(out *Subnet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnet. +func (in *Subnet) DeepCopy() *Subnet { + if in == nil { + return nil + } + out := new(Subnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Subnet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetInitParameters) DeepCopyInto(out *SubnetInitParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Delegation != nil { + in, out := &in.Delegation, &out.Delegation + *out = make([]DelegationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnforcePrivateLinkEndpointNetworkPolicies != nil { + in, out := &in.EnforcePrivateLinkEndpointNetworkPolicies, &out.EnforcePrivateLinkEndpointNetworkPolicies + *out = new(bool) + **out = **in + } + if in.EnforcePrivateLinkServiceNetworkPolicies != nil { + in, out := &in.EnforcePrivateLinkServiceNetworkPolicies, &out.EnforcePrivateLinkServiceNetworkPolicies + *out = new(bool) + **out = **in + } + if in.PrivateEndpointNetworkPoliciesEnabled != nil { + in, out := &in.PrivateEndpointNetworkPoliciesEnabled, &out.PrivateEndpointNetworkPoliciesEnabled + *out = new(bool) + **out = **in + } + if in.PrivateLinkServiceNetworkPoliciesEnabled != nil { + in, out := &in.PrivateLinkServiceNetworkPoliciesEnabled, &out.PrivateLinkServiceNetworkPoliciesEnabled + *out = new(bool) + **out = **in + } + if in.ServiceEndpointPolicyIds != nil { + in, out := &in.ServiceEndpointPolicyIds, &out.ServiceEndpointPolicyIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetInitParameters. +func (in *SubnetInitParameters) DeepCopy() *SubnetInitParameters { + if in == nil { + return nil + } + out := new(SubnetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetList) DeepCopyInto(out *SubnetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Subnet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetList. +func (in *SubnetList) DeepCopy() *SubnetList { + if in == nil { + return nil + } + out := new(SubnetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubnetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetObservation) DeepCopyInto(out *SubnetObservation) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Delegation != nil { + in, out := &in.Delegation, &out.Delegation + *out = make([]DelegationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnforcePrivateLinkEndpointNetworkPolicies != nil { + in, out := &in.EnforcePrivateLinkEndpointNetworkPolicies, &out.EnforcePrivateLinkEndpointNetworkPolicies + *out = new(bool) + **out = **in + } + if in.EnforcePrivateLinkServiceNetworkPolicies != nil { + in, out := &in.EnforcePrivateLinkServiceNetworkPolicies, &out.EnforcePrivateLinkServiceNetworkPolicies + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PrivateEndpointNetworkPoliciesEnabled != nil { + in, out := &in.PrivateEndpointNetworkPoliciesEnabled, &out.PrivateEndpointNetworkPoliciesEnabled + *out = new(bool) + **out = **in + } + if in.PrivateLinkServiceNetworkPoliciesEnabled != nil { + in, out := &in.PrivateLinkServiceNetworkPoliciesEnabled, &out.PrivateLinkServiceNetworkPoliciesEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServiceEndpointPolicyIds != nil { + in, out := &in.ServiceEndpointPolicyIds, &out.ServiceEndpointPolicyIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkName != nil { + in, out := &in.VirtualNetworkName, &out.VirtualNetworkName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetObservation. +func (in *SubnetObservation) DeepCopy() *SubnetObservation { + if in == nil { + return nil + } + out := new(SubnetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetParameters) DeepCopyInto(out *SubnetParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Delegation != nil { + in, out := &in.Delegation, &out.Delegation + *out = make([]DelegationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnforcePrivateLinkEndpointNetworkPolicies != nil { + in, out := &in.EnforcePrivateLinkEndpointNetworkPolicies, &out.EnforcePrivateLinkEndpointNetworkPolicies + *out = new(bool) + **out = **in + } + if in.EnforcePrivateLinkServiceNetworkPolicies != nil { + in, out := &in.EnforcePrivateLinkServiceNetworkPolicies, &out.EnforcePrivateLinkServiceNetworkPolicies + *out = new(bool) + **out = **in + } + if in.PrivateEndpointNetworkPoliciesEnabled != nil { + in, out := &in.PrivateEndpointNetworkPoliciesEnabled, &out.PrivateEndpointNetworkPoliciesEnabled + *out = new(bool) + **out = **in + } + if in.PrivateLinkServiceNetworkPoliciesEnabled != nil { + in, out := &in.PrivateLinkServiceNetworkPoliciesEnabled, &out.PrivateLinkServiceNetworkPoliciesEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServiceEndpointPolicyIds != nil { + in, out := &in.ServiceEndpointPolicyIds, &out.ServiceEndpointPolicyIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkName != nil { + in, out := &in.VirtualNetworkName, &out.VirtualNetworkName + *out = new(string) + **out = **in + } + if in.VirtualNetworkNameRef != nil { + in, out := &in.VirtualNetworkNameRef, &out.VirtualNetworkNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkNameSelector != nil { + in, out := &in.VirtualNetworkNameSelector, &out.VirtualNetworkNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParameters. +func (in *SubnetParameters) DeepCopy() *SubnetParameters { + if in == nil { + return nil + } + out := new(SubnetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec. +func (in *SubnetSpec) DeepCopy() *SubnetSpec { + if in == nil { + return nil + } + out := new(SubnetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetStatus) DeepCopyInto(out *SubnetStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetStatus. +func (in *SubnetStatus) DeepCopy() *SubnetStatus { + if in == nil { + return nil + } + out := new(SubnetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessThresholdInitParameters) DeepCopyInto(out *SuccessThresholdInitParameters) { + *out = *in + if in.ChecksFailedPercent != nil { + in, out := &in.ChecksFailedPercent, &out.ChecksFailedPercent + *out = new(float64) + **out = **in + } + if in.RoundTripTimeMS != nil { + in, out := &in.RoundTripTimeMS, &out.RoundTripTimeMS + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessThresholdInitParameters. +func (in *SuccessThresholdInitParameters) DeepCopy() *SuccessThresholdInitParameters { + if in == nil { + return nil + } + out := new(SuccessThresholdInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessThresholdObservation) DeepCopyInto(out *SuccessThresholdObservation) { + *out = *in + if in.ChecksFailedPercent != nil { + in, out := &in.ChecksFailedPercent, &out.ChecksFailedPercent + *out = new(float64) + **out = **in + } + if in.RoundTripTimeMS != nil { + in, out := &in.RoundTripTimeMS, &out.RoundTripTimeMS + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessThresholdObservation. +func (in *SuccessThresholdObservation) DeepCopy() *SuccessThresholdObservation { + if in == nil { + return nil + } + out := new(SuccessThresholdObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessThresholdParameters) DeepCopyInto(out *SuccessThresholdParameters) { + *out = *in + if in.ChecksFailedPercent != nil { + in, out := &in.ChecksFailedPercent, &out.ChecksFailedPercent + *out = new(float64) + **out = **in + } + if in.RoundTripTimeMS != nil { + in, out := &in.RoundTripTimeMS, &out.RoundTripTimeMS + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessThresholdParameters. +func (in *SuccessThresholdParameters) DeepCopy() *SuccessThresholdParameters { + if in == nil { + return nil + } + out := new(SuccessThresholdParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPConfigurationInitParameters) DeepCopyInto(out *TCPConfigurationInitParameters) { + *out = *in + if in.DestinationPortBehavior != nil { + in, out := &in.DestinationPortBehavior, &out.DestinationPortBehavior + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TraceRouteEnabled != nil { + in, out := &in.TraceRouteEnabled, &out.TraceRouteEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPConfigurationInitParameters. +func (in *TCPConfigurationInitParameters) DeepCopy() *TCPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TCPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPConfigurationObservation) DeepCopyInto(out *TCPConfigurationObservation) { + *out = *in + if in.DestinationPortBehavior != nil { + in, out := &in.DestinationPortBehavior, &out.DestinationPortBehavior + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TraceRouteEnabled != nil { + in, out := &in.TraceRouteEnabled, &out.TraceRouteEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPConfigurationObservation. +func (in *TCPConfigurationObservation) DeepCopy() *TCPConfigurationObservation { + if in == nil { + return nil + } + out := new(TCPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPConfigurationParameters) DeepCopyInto(out *TCPConfigurationParameters) { + *out = *in + if in.DestinationPortBehavior != nil { + in, out := &in.DestinationPortBehavior, &out.DestinationPortBehavior + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(float64) + **out = **in + } + if in.TraceRouteEnabled != nil { + in, out := &in.TraceRouteEnabled, &out.TraceRouteEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPConfigurationParameters. +func (in *TCPConfigurationParameters) DeepCopy() *TCPConfigurationParameters { + if in == nil { + return nil + } + out := new(TCPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateInitParameters) DeepCopyInto(out *TLSCertificateInitParameters) { + *out = *in + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateInitParameters. +func (in *TLSCertificateInitParameters) DeepCopy() *TLSCertificateInitParameters { + if in == nil { + return nil + } + out := new(TLSCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateObservation) DeepCopyInto(out *TLSCertificateObservation) { + *out = *in + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateObservation. +func (in *TLSCertificateObservation) DeepCopy() *TLSCertificateObservation { + if in == nil { + return nil + } + out := new(TLSCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateParameters) DeepCopyInto(out *TLSCertificateParameters) { + *out = *in + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateParameters. +func (in *TLSCertificateParameters) DeepCopy() *TLSCertificateParameters { + if in == nil { + return nil + } + out := new(TLSCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestConfigurationInitParameters) DeepCopyInto(out *TestConfigurationInitParameters) { + *out = *in + if in.HTTPConfiguration != nil { + in, out := &in.HTTPConfiguration, &out.HTTPConfiguration + *out = new(HTTPConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IcmpConfiguration != nil { + in, out := &in.IcmpConfiguration, &out.IcmpConfiguration + *out = new(IcmpConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PreferredIPVersion != nil { + in, out := &in.PreferredIPVersion, &out.PreferredIPVersion + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SuccessThreshold != nil { + in, out := &in.SuccessThreshold, &out.SuccessThreshold + *out = new(SuccessThresholdInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TCPConfiguration != nil { + in, out := &in.TCPConfiguration, &out.TCPConfiguration + *out = new(TCPConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TestFrequencyInSeconds != nil { + in, out := &in.TestFrequencyInSeconds, &out.TestFrequencyInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestConfigurationInitParameters. +func (in *TestConfigurationInitParameters) DeepCopy() *TestConfigurationInitParameters { + if in == nil { + return nil + } + out := new(TestConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestConfigurationObservation) DeepCopyInto(out *TestConfigurationObservation) { + *out = *in + if in.HTTPConfiguration != nil { + in, out := &in.HTTPConfiguration, &out.HTTPConfiguration + *out = new(HTTPConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.IcmpConfiguration != nil { + in, out := &in.IcmpConfiguration, &out.IcmpConfiguration + *out = new(IcmpConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PreferredIPVersion != nil { + in, out := &in.PreferredIPVersion, &out.PreferredIPVersion + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SuccessThreshold != nil { + in, out := &in.SuccessThreshold, &out.SuccessThreshold + *out = new(SuccessThresholdObservation) + (*in).DeepCopyInto(*out) + } + if in.TCPConfiguration != nil { + in, out := &in.TCPConfiguration, &out.TCPConfiguration + *out = new(TCPConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.TestFrequencyInSeconds != nil { + in, out := &in.TestFrequencyInSeconds, &out.TestFrequencyInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestConfigurationObservation. +func (in *TestConfigurationObservation) DeepCopy() *TestConfigurationObservation { + if in == nil { + return nil + } + out := new(TestConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestConfigurationParameters) DeepCopyInto(out *TestConfigurationParameters) { + *out = *in + if in.HTTPConfiguration != nil { + in, out := &in.HTTPConfiguration, &out.HTTPConfiguration + *out = new(HTTPConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.IcmpConfiguration != nil { + in, out := &in.IcmpConfiguration, &out.IcmpConfiguration + *out = new(IcmpConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PreferredIPVersion != nil { + in, out := &in.PreferredIPVersion, &out.PreferredIPVersion + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SuccessThreshold != nil { + in, out := &in.SuccessThreshold, &out.SuccessThreshold + *out = new(SuccessThresholdParameters) + (*in).DeepCopyInto(*out) + } + if in.TCPConfiguration != nil { + in, out := &in.TCPConfiguration, &out.TCPConfiguration + *out = new(TCPConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.TestFrequencyInSeconds != nil { + in, out := &in.TestFrequencyInSeconds, &out.TestFrequencyInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestConfigurationParameters. +func (in *TestConfigurationParameters) DeepCopy() *TestConfigurationParameters { + if in == nil { + return nil + } + out := new(TestConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGroupInitParameters) DeepCopyInto(out *TestGroupInitParameters) { + *out = *in + if in.DestinationEndpoints != nil { + in, out := &in.DestinationEndpoints, &out.DestinationEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceEndpoints != nil { + in, out := &in.SourceEndpoints, &out.SourceEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TestConfigurationNames != nil { + in, out := &in.TestConfigurationNames, &out.TestConfigurationNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGroupInitParameters. +func (in *TestGroupInitParameters) DeepCopy() *TestGroupInitParameters { + if in == nil { + return nil + } + out := new(TestGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGroupObservation) DeepCopyInto(out *TestGroupObservation) { + *out = *in + if in.DestinationEndpoints != nil { + in, out := &in.DestinationEndpoints, &out.DestinationEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceEndpoints != nil { + in, out := &in.SourceEndpoints, &out.SourceEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TestConfigurationNames != nil { + in, out := &in.TestConfigurationNames, &out.TestConfigurationNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGroupObservation. +func (in *TestGroupObservation) DeepCopy() *TestGroupObservation { + if in == nil { + return nil + } + out := new(TestGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestGroupParameters) DeepCopyInto(out *TestGroupParameters) { + *out = *in + if in.DestinationEndpoints != nil { + in, out := &in.DestinationEndpoints, &out.DestinationEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SourceEndpoints != nil { + in, out := &in.SourceEndpoints, &out.SourceEndpoints + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TestConfigurationNames != nil { + in, out := &in.TestConfigurationNames, &out.TestConfigurationNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestGroupParameters. +func (in *TestGroupParameters) DeepCopy() *TestGroupParameters { + if in == nil { + return nil + } + out := new(TestGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelligenceAllowlistInitParameters) DeepCopyInto(out *ThreatIntelligenceAllowlistInitParameters) { + *out = *in + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelligenceAllowlistInitParameters. +func (in *ThreatIntelligenceAllowlistInitParameters) DeepCopy() *ThreatIntelligenceAllowlistInitParameters { + if in == nil { + return nil + } + out := new(ThreatIntelligenceAllowlistInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelligenceAllowlistObservation) DeepCopyInto(out *ThreatIntelligenceAllowlistObservation) { + *out = *in + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelligenceAllowlistObservation. +func (in *ThreatIntelligenceAllowlistObservation) DeepCopy() *ThreatIntelligenceAllowlistObservation { + if in == nil { + return nil + } + out := new(ThreatIntelligenceAllowlistObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatIntelligenceAllowlistParameters) DeepCopyInto(out *ThreatIntelligenceAllowlistParameters) { + *out = *in + if in.Fqdns != nil { + in, out := &in.Fqdns, &out.Fqdns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IPAddresses != nil { + in, out := &in.IPAddresses, &out.IPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatIntelligenceAllowlistParameters. +func (in *ThreatIntelligenceAllowlistParameters) DeepCopy() *ThreatIntelligenceAllowlistParameters { + if in == nil { + return nil + } + out := new(ThreatIntelligenceAllowlistParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficAnalyticsInitParameters) DeepCopyInto(out *TrafficAnalyticsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IntervalInMinutes != nil { + in, out := &in.IntervalInMinutes, &out.IntervalInMinutes + *out = new(float64) + **out = **in + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } + if in.WorkspaceIDRef != nil { + in, out := &in.WorkspaceIDRef, &out.WorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceIDSelector != nil { + in, out := &in.WorkspaceIDSelector, &out.WorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceRegion != nil { + in, out := &in.WorkspaceRegion, &out.WorkspaceRegion + *out = new(string) + **out = **in + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } + if in.WorkspaceResourceIDRef != nil { + in, out := &in.WorkspaceResourceIDRef, &out.WorkspaceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceResourceIDSelector != nil { + in, out := &in.WorkspaceResourceIDSelector, &out.WorkspaceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficAnalyticsInitParameters. +func (in *TrafficAnalyticsInitParameters) DeepCopy() *TrafficAnalyticsInitParameters { + if in == nil { + return nil + } + out := new(TrafficAnalyticsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficAnalyticsObservation) DeepCopyInto(out *TrafficAnalyticsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IntervalInMinutes != nil { + in, out := &in.IntervalInMinutes, &out.IntervalInMinutes + *out = new(float64) + **out = **in + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } + if in.WorkspaceRegion != nil { + in, out := &in.WorkspaceRegion, &out.WorkspaceRegion + *out = new(string) + **out = **in + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficAnalyticsObservation. +func (in *TrafficAnalyticsObservation) DeepCopy() *TrafficAnalyticsObservation { + if in == nil { + return nil + } + out := new(TrafficAnalyticsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficAnalyticsParameters) DeepCopyInto(out *TrafficAnalyticsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IntervalInMinutes != nil { + in, out := &in.IntervalInMinutes, &out.IntervalInMinutes + *out = new(float64) + **out = **in + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } + if in.WorkspaceIDRef != nil { + in, out := &in.WorkspaceIDRef, &out.WorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceIDSelector != nil { + in, out := &in.WorkspaceIDSelector, &out.WorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceRegion != nil { + in, out := &in.WorkspaceRegion, &out.WorkspaceRegion + *out = new(string) + **out = **in + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } + if in.WorkspaceResourceIDRef != nil { + in, out := &in.WorkspaceResourceIDRef, &out.WorkspaceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceResourceIDSelector != nil { + in, out := &in.WorkspaceResourceIDSelector, &out.WorkspaceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficAnalyticsParameters. +func (in *TrafficAnalyticsParameters) DeepCopy() *TrafficAnalyticsParameters { + if in == nil { + return nil + } + out := new(TrafficAnalyticsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficBypassInitParameters) DeepCopyInto(out *TrafficBypassInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationAddresses != nil { + in, out := &in.DestinationAddresses, &out.DestinationAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationIPGroups != nil { + in, out := &in.DestinationIPGroups, &out.DestinationIPGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationPorts != nil { + in, out := &in.DestinationPorts, &out.DestinationPorts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SourceAddresses != nil { + in, out := &in.SourceAddresses, &out.SourceAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceIPGroups != nil { + in, out := &in.SourceIPGroups, &out.SourceIPGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficBypassInitParameters. +func (in *TrafficBypassInitParameters) DeepCopy() *TrafficBypassInitParameters { + if in == nil { + return nil + } + out := new(TrafficBypassInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficBypassObservation) DeepCopyInto(out *TrafficBypassObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationAddresses != nil { + in, out := &in.DestinationAddresses, &out.DestinationAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationIPGroups != nil { + in, out := &in.DestinationIPGroups, &out.DestinationIPGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationPorts != nil { + in, out := &in.DestinationPorts, &out.DestinationPorts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SourceAddresses != nil { + in, out := &in.SourceAddresses, &out.SourceAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceIPGroups != nil { + in, out := &in.SourceIPGroups, &out.SourceIPGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficBypassObservation. +func (in *TrafficBypassObservation) DeepCopy() *TrafficBypassObservation { + if in == nil { + return nil + } + out := new(TrafficBypassObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficBypassParameters) DeepCopyInto(out *TrafficBypassParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.DestinationAddresses != nil { + in, out := &in.DestinationAddresses, &out.DestinationAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationIPGroups != nil { + in, out := &in.DestinationIPGroups, &out.DestinationIPGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DestinationPorts != nil { + in, out := &in.DestinationPorts, &out.DestinationPorts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.SourceAddresses != nil { + in, out := &in.SourceAddresses, &out.SourceAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SourceIPGroups != nil { + in, out := &in.SourceIPGroups, &out.SourceIPGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficBypassParameters. +func (in *TrafficBypassParameters) DeepCopy() *TrafficBypassParameters { + if in == nil { + return nil + } + out := new(TrafficBypassParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficCategoryInitParameters) DeepCopyInto(out *TrafficCategoryInitParameters) { + *out = *in + if in.AllowEndpointEnabled != nil { + in, out := &in.AllowEndpointEnabled, &out.AllowEndpointEnabled + *out = new(bool) + **out = **in + } + if in.DefaultEndpointEnabled != nil { + in, out := &in.DefaultEndpointEnabled, &out.DefaultEndpointEnabled + *out = new(bool) + **out = **in + } + if in.OptimizeEndpointEnabled != nil { + in, out := &in.OptimizeEndpointEnabled, &out.OptimizeEndpointEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficCategoryInitParameters. +func (in *TrafficCategoryInitParameters) DeepCopy() *TrafficCategoryInitParameters { + if in == nil { + return nil + } + out := new(TrafficCategoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficCategoryObservation) DeepCopyInto(out *TrafficCategoryObservation) { + *out = *in + if in.AllowEndpointEnabled != nil { + in, out := &in.AllowEndpointEnabled, &out.AllowEndpointEnabled + *out = new(bool) + **out = **in + } + if in.DefaultEndpointEnabled != nil { + in, out := &in.DefaultEndpointEnabled, &out.DefaultEndpointEnabled + *out = new(bool) + **out = **in + } + if in.OptimizeEndpointEnabled != nil { + in, out := &in.OptimizeEndpointEnabled, &out.OptimizeEndpointEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficCategoryObservation. +func (in *TrafficCategoryObservation) DeepCopy() *TrafficCategoryObservation { + if in == nil { + return nil + } + out := new(TrafficCategoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficCategoryParameters) DeepCopyInto(out *TrafficCategoryParameters) { + *out = *in + if in.AllowEndpointEnabled != nil { + in, out := &in.AllowEndpointEnabled, &out.AllowEndpointEnabled + *out = new(bool) + **out = **in + } + if in.DefaultEndpointEnabled != nil { + in, out := &in.DefaultEndpointEnabled, &out.DefaultEndpointEnabled + *out = new(bool) + **out = **in + } + if in.OptimizeEndpointEnabled != nil { + in, out := &in.OptimizeEndpointEnabled, &out.OptimizeEndpointEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficCategoryParameters. +func (in *TrafficCategoryParameters) DeepCopy() *TrafficCategoryParameters { + if in == nil { + return nil + } + out := new(TrafficCategoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficManagerProfile) DeepCopyInto(out *TrafficManagerProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficManagerProfile. +func (in *TrafficManagerProfile) DeepCopy() *TrafficManagerProfile { + if in == nil { + return nil + } + out := new(TrafficManagerProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrafficManagerProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficManagerProfileInitParameters) DeepCopyInto(out *TrafficManagerProfileInitParameters) { + *out = *in + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(DNSConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxReturn != nil { + in, out := &in.MaxReturn, &out.MaxReturn + *out = new(float64) + **out = **in + } + if in.MonitorConfig != nil { + in, out := &in.MonitorConfig, &out.MonitorConfig + *out = new(MonitorConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ProfileStatus != nil { + in, out := &in.ProfileStatus, &out.ProfileStatus + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficRoutingMethod != nil { + in, out := &in.TrafficRoutingMethod, &out.TrafficRoutingMethod + *out = new(string) + **out = **in + } + if in.TrafficViewEnabled != nil { + in, out := &in.TrafficViewEnabled, &out.TrafficViewEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficManagerProfileInitParameters. +func (in *TrafficManagerProfileInitParameters) DeepCopy() *TrafficManagerProfileInitParameters { + if in == nil { + return nil + } + out := new(TrafficManagerProfileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficManagerProfileList) DeepCopyInto(out *TrafficManagerProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TrafficManagerProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficManagerProfileList. +func (in *TrafficManagerProfileList) DeepCopy() *TrafficManagerProfileList { + if in == nil { + return nil + } + out := new(TrafficManagerProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrafficManagerProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficManagerProfileObservation) DeepCopyInto(out *TrafficManagerProfileObservation) { + *out = *in + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(DNSConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MaxReturn != nil { + in, out := &in.MaxReturn, &out.MaxReturn + *out = new(float64) + **out = **in + } + if in.MonitorConfig != nil { + in, out := &in.MonitorConfig, &out.MonitorConfig + *out = new(MonitorConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.ProfileStatus != nil { + in, out := &in.ProfileStatus, &out.ProfileStatus + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficRoutingMethod != nil { + in, out := &in.TrafficRoutingMethod, &out.TrafficRoutingMethod + *out = new(string) + **out = **in + } + if in.TrafficViewEnabled != nil { + in, out := &in.TrafficViewEnabled, &out.TrafficViewEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficManagerProfileObservation. +func (in *TrafficManagerProfileObservation) DeepCopy() *TrafficManagerProfileObservation { + if in == nil { + return nil + } + out := new(TrafficManagerProfileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficManagerProfileParameters) DeepCopyInto(out *TrafficManagerProfileParameters) { + *out = *in + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(DNSConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxReturn != nil { + in, out := &in.MaxReturn, &out.MaxReturn + *out = new(float64) + **out = **in + } + if in.MonitorConfig != nil { + in, out := &in.MonitorConfig, &out.MonitorConfig + *out = new(MonitorConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.ProfileStatus != nil { + in, out := &in.ProfileStatus, &out.ProfileStatus + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficRoutingMethod != nil { + in, out := &in.TrafficRoutingMethod, &out.TrafficRoutingMethod + *out = new(string) + **out = **in + } + if in.TrafficViewEnabled != nil { + in, out := &in.TrafficViewEnabled, &out.TrafficViewEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficManagerProfileParameters. +func (in *TrafficManagerProfileParameters) DeepCopy() *TrafficManagerProfileParameters { + if in == nil { + return nil + } + out := new(TrafficManagerProfileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficManagerProfileSpec) DeepCopyInto(out *TrafficManagerProfileSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficManagerProfileSpec. +func (in *TrafficManagerProfileSpec) DeepCopy() *TrafficManagerProfileSpec { + if in == nil { + return nil + } + out := new(TrafficManagerProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficManagerProfileStatus) DeepCopyInto(out *TrafficManagerProfileStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficManagerProfileStatus. +func (in *TrafficManagerProfileStatus) DeepCopy() *TrafficManagerProfileStatus { + if in == nil { + return nil + } + out := new(TrafficManagerProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficSelectorPolicyInitParameters) DeepCopyInto(out *TrafficSelectorPolicyInitParameters) { + *out = *in + if in.LocalAddressCidrs != nil { + in, out := &in.LocalAddressCidrs, &out.LocalAddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RemoteAddressCidrs != nil { + in, out := &in.RemoteAddressCidrs, &out.RemoteAddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSelectorPolicyInitParameters. +func (in *TrafficSelectorPolicyInitParameters) DeepCopy() *TrafficSelectorPolicyInitParameters { + if in == nil { + return nil + } + out := new(TrafficSelectorPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficSelectorPolicyObservation) DeepCopyInto(out *TrafficSelectorPolicyObservation) { + *out = *in + if in.LocalAddressCidrs != nil { + in, out := &in.LocalAddressCidrs, &out.LocalAddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RemoteAddressCidrs != nil { + in, out := &in.RemoteAddressCidrs, &out.RemoteAddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSelectorPolicyObservation. +func (in *TrafficSelectorPolicyObservation) DeepCopy() *TrafficSelectorPolicyObservation { + if in == nil { + return nil + } + out := new(TrafficSelectorPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficSelectorPolicyParameters) DeepCopyInto(out *TrafficSelectorPolicyParameters) { + *out = *in + if in.LocalAddressCidrs != nil { + in, out := &in.LocalAddressCidrs, &out.LocalAddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RemoteAddressCidrs != nil { + in, out := &in.RemoteAddressCidrs, &out.RemoteAddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSelectorPolicyParameters. +func (in *TrafficSelectorPolicyParameters) DeepCopy() *TrafficSelectorPolicyParameters { + if in == nil { + return nil + } + out := new(TrafficSelectorPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedClientCertificateInitParameters) DeepCopyInto(out *TrustedClientCertificateInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedClientCertificateInitParameters. +func (in *TrustedClientCertificateInitParameters) DeepCopy() *TrustedClientCertificateInitParameters { + if in == nil { + return nil + } + out := new(TrustedClientCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedClientCertificateObservation) DeepCopyInto(out *TrustedClientCertificateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedClientCertificateObservation. +func (in *TrustedClientCertificateObservation) DeepCopy() *TrustedClientCertificateObservation { + if in == nil { + return nil + } + out := new(TrustedClientCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedClientCertificateParameters) DeepCopyInto(out *TrustedClientCertificateParameters) { + *out = *in + out.DataSecretRef = in.DataSecretRef + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedClientCertificateParameters. +func (in *TrustedClientCertificateParameters) DeepCopy() *TrustedClientCertificateParameters { + if in == nil { + return nil + } + out := new(TrustedClientCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedRootCertificateInitParameters) DeepCopyInto(out *TrustedRootCertificateInitParameters) { + *out = *in + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedRootCertificateInitParameters. +func (in *TrustedRootCertificateInitParameters) DeepCopy() *TrustedRootCertificateInitParameters { + if in == nil { + return nil + } + out := new(TrustedRootCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedRootCertificateObservation) DeepCopyInto(out *TrustedRootCertificateObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedRootCertificateObservation. +func (in *TrustedRootCertificateObservation) DeepCopy() *TrustedRootCertificateObservation { + if in == nil { + return nil + } + out := new(TrustedRootCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedRootCertificateParameters) DeepCopyInto(out *TrustedRootCertificateParameters) { + *out = *in + if in.DataSecretRef != nil { + in, out := &in.DataSecretRef, &out.DataSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.KeyVaultSecretID != nil { + in, out := &in.KeyVaultSecretID, &out.KeyVaultSecretID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedRootCertificateParameters. +func (in *TrustedRootCertificateParameters) DeepCopy() *TrustedRootCertificateParameters { + if in == nil { + return nil + } + out := new(TrustedRootCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLInitParameters) DeepCopyInto(out *URLInitParameters) { + *out = *in + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.Reroute != nil { + in, out := &in.Reroute, &out.Reroute + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLInitParameters. +func (in *URLInitParameters) DeepCopy() *URLInitParameters { + if in == nil { + return nil + } + out := new(URLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLObservation) DeepCopyInto(out *URLObservation) { + *out = *in + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.Reroute != nil { + in, out := &in.Reroute, &out.Reroute + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLObservation. +func (in *URLObservation) DeepCopy() *URLObservation { + if in == nil { + return nil + } + out := new(URLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLParameters) DeepCopyInto(out *URLParameters) { + *out = *in + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.QueryString != nil { + in, out := &in.QueryString, &out.QueryString + *out = new(string) + **out = **in + } + if in.Reroute != nil { + in, out := &in.Reroute, &out.Reroute + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLParameters. +func (in *URLParameters) DeepCopy() *URLParameters { + if in == nil { + return nil + } + out := new(URLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLPathMapInitParameters) DeepCopyInto(out *URLPathMapInitParameters) { + *out = *in + if in.DefaultBackendAddressPoolName != nil { + in, out := &in.DefaultBackendAddressPoolName, &out.DefaultBackendAddressPoolName + *out = new(string) + **out = **in + } + if in.DefaultBackendHTTPSettingsName != nil { + in, out := &in.DefaultBackendHTTPSettingsName, &out.DefaultBackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.DefaultRedirectConfigurationName != nil { + in, out := &in.DefaultRedirectConfigurationName, &out.DefaultRedirectConfigurationName + *out = new(string) + **out = **in + } + if in.DefaultRewriteRuleSetName != nil { + in, out := &in.DefaultRewriteRuleSetName, &out.DefaultRewriteRuleSetName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathRule != nil { + in, out := &in.PathRule, &out.PathRule + *out = make([]PathRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLPathMapInitParameters. +func (in *URLPathMapInitParameters) DeepCopy() *URLPathMapInitParameters { + if in == nil { + return nil + } + out := new(URLPathMapInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLPathMapObservation) DeepCopyInto(out *URLPathMapObservation) { + *out = *in + if in.DefaultBackendAddressPoolID != nil { + in, out := &in.DefaultBackendAddressPoolID, &out.DefaultBackendAddressPoolID + *out = new(string) + **out = **in + } + if in.DefaultBackendAddressPoolName != nil { + in, out := &in.DefaultBackendAddressPoolName, &out.DefaultBackendAddressPoolName + *out = new(string) + **out = **in + } + if in.DefaultBackendHTTPSettingsID != nil { + in, out := &in.DefaultBackendHTTPSettingsID, &out.DefaultBackendHTTPSettingsID + *out = new(string) + **out = **in + } + if in.DefaultBackendHTTPSettingsName != nil { + in, out := &in.DefaultBackendHTTPSettingsName, &out.DefaultBackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.DefaultRedirectConfigurationID != nil { + in, out := &in.DefaultRedirectConfigurationID, &out.DefaultRedirectConfigurationID + *out = new(string) + **out = **in + } + if in.DefaultRedirectConfigurationName != nil { + in, out := &in.DefaultRedirectConfigurationName, &out.DefaultRedirectConfigurationName + *out = new(string) + **out = **in + } + if in.DefaultRewriteRuleSetID != nil { + in, out := &in.DefaultRewriteRuleSetID, &out.DefaultRewriteRuleSetID + *out = new(string) + **out = **in + } + if in.DefaultRewriteRuleSetName != nil { + in, out := &in.DefaultRewriteRuleSetName, &out.DefaultRewriteRuleSetName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathRule != nil { + in, out := &in.PathRule, &out.PathRule + *out = make([]PathRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLPathMapObservation. +func (in *URLPathMapObservation) DeepCopy() *URLPathMapObservation { + if in == nil { + return nil + } + out := new(URLPathMapObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *URLPathMapParameters) DeepCopyInto(out *URLPathMapParameters) { + *out = *in + if in.DefaultBackendAddressPoolName != nil { + in, out := &in.DefaultBackendAddressPoolName, &out.DefaultBackendAddressPoolName + *out = new(string) + **out = **in + } + if in.DefaultBackendHTTPSettingsName != nil { + in, out := &in.DefaultBackendHTTPSettingsName, &out.DefaultBackendHTTPSettingsName + *out = new(string) + **out = **in + } + if in.DefaultRedirectConfigurationName != nil { + in, out := &in.DefaultRedirectConfigurationName, &out.DefaultRedirectConfigurationName + *out = new(string) + **out = **in + } + if in.DefaultRewriteRuleSetName != nil { + in, out := &in.DefaultRewriteRuleSetName, &out.DefaultRewriteRuleSetName + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathRule != nil { + in, out := &in.PathRule, &out.PathRule + *out = make([]PathRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URLPathMapParameters. +func (in *URLPathMapParameters) DeepCopy() *URLPathMapParameters { + if in == nil { + return nil + } + out := new(URLPathMapParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNClientAddressPoolInitParameters) DeepCopyInto(out *VPNClientAddressPoolInitParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNClientAddressPoolInitParameters. +func (in *VPNClientAddressPoolInitParameters) DeepCopy() *VPNClientAddressPoolInitParameters { + if in == nil { + return nil + } + out := new(VPNClientAddressPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNClientAddressPoolObservation) DeepCopyInto(out *VPNClientAddressPoolObservation) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNClientAddressPoolObservation. +func (in *VPNClientAddressPoolObservation) DeepCopy() *VPNClientAddressPoolObservation { + if in == nil { + return nil + } + out := new(VPNClientAddressPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNClientAddressPoolParameters) DeepCopyInto(out *VPNClientAddressPoolParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNClientAddressPoolParameters. +func (in *VPNClientAddressPoolParameters) DeepCopy() *VPNClientAddressPoolParameters { + if in == nil { + return nil + } + out := new(VPNClientAddressPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNClientConfigurationInitParameters) DeepCopyInto(out *VPNClientConfigurationInitParameters) { + *out = *in + if in.AADAudience != nil { + in, out := &in.AADAudience, &out.AADAudience + *out = new(string) + **out = **in + } + if in.AADIssuer != nil { + in, out := &in.AADIssuer, &out.AADIssuer + *out = new(string) + **out = **in + } + if in.AADTenant != nil { + in, out := &in.AADTenant, &out.AADTenant + *out = new(string) + **out = **in + } + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(IpsecPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RadiusServer != nil { + in, out := &in.RadiusServer, &out.RadiusServer + *out = make([]RadiusServerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RadiusServerAddress != nil { + in, out := &in.RadiusServerAddress, &out.RadiusServerAddress + *out = new(string) + **out = **in + } + if in.RadiusServerSecret != nil { + in, out := &in.RadiusServerSecret, &out.RadiusServerSecret + *out = new(string) + **out = **in + } + if in.RevokedCertificate != nil { + in, out := &in.RevokedCertificate, &out.RevokedCertificate + *out = make([]RevokedCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RootCertificate != nil { + in, out := &in.RootCertificate, &out.RootCertificate + *out = make([]RootCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPNAuthTypes != nil { + in, out := &in.VPNAuthTypes, &out.VPNAuthTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPNClientProtocols != nil { + in, out := &in.VPNClientProtocols, &out.VPNClientProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkGatewayClientConnection != nil { + in, out := &in.VirtualNetworkGatewayClientConnection, &out.VirtualNetworkGatewayClientConnection + *out = make([]VirtualNetworkGatewayClientConnectionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNClientConfigurationInitParameters. +func (in *VPNClientConfigurationInitParameters) DeepCopy() *VPNClientConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VPNClientConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNClientConfigurationObservation) DeepCopyInto(out *VPNClientConfigurationObservation) { + *out = *in + if in.AADAudience != nil { + in, out := &in.AADAudience, &out.AADAudience + *out = new(string) + **out = **in + } + if in.AADIssuer != nil { + in, out := &in.AADIssuer, &out.AADIssuer + *out = new(string) + **out = **in + } + if in.AADTenant != nil { + in, out := &in.AADTenant, &out.AADTenant + *out = new(string) + **out = **in + } + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(IpsecPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.RadiusServer != nil { + in, out := &in.RadiusServer, &out.RadiusServer + *out = make([]RadiusServerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RadiusServerAddress != nil { + in, out := &in.RadiusServerAddress, &out.RadiusServerAddress + *out = new(string) + **out = **in + } + if in.RadiusServerSecret != nil { + in, out := &in.RadiusServerSecret, &out.RadiusServerSecret + *out = new(string) + **out = **in + } + if in.RevokedCertificate != nil { + in, out := &in.RevokedCertificate, &out.RevokedCertificate + *out = make([]RevokedCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RootCertificate != nil { + in, out := &in.RootCertificate, &out.RootCertificate + *out = make([]RootCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPNAuthTypes != nil { + in, out := &in.VPNAuthTypes, &out.VPNAuthTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPNClientProtocols != nil { + in, out := &in.VPNClientProtocols, &out.VPNClientProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkGatewayClientConnection != nil { + in, out := &in.VirtualNetworkGatewayClientConnection, &out.VirtualNetworkGatewayClientConnection + *out = make([]VirtualNetworkGatewayClientConnectionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNClientConfigurationObservation. +func (in *VPNClientConfigurationObservation) DeepCopy() *VPNClientConfigurationObservation { + if in == nil { + return nil + } + out := new(VPNClientConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNClientConfigurationParameters) DeepCopyInto(out *VPNClientConfigurationParameters) { + *out = *in + if in.AADAudience != nil { + in, out := &in.AADAudience, &out.AADAudience + *out = new(string) + **out = **in + } + if in.AADIssuer != nil { + in, out := &in.AADIssuer, &out.AADIssuer + *out = new(string) + **out = **in + } + if in.AADTenant != nil { + in, out := &in.AADTenant, &out.AADTenant + *out = new(string) + **out = **in + } + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(IpsecPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.RadiusServer != nil { + in, out := &in.RadiusServer, &out.RadiusServer + *out = make([]RadiusServerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RadiusServerAddress != nil { + in, out := &in.RadiusServerAddress, &out.RadiusServerAddress + *out = new(string) + **out = **in + } + if in.RadiusServerSecret != nil { + in, out := &in.RadiusServerSecret, &out.RadiusServerSecret + *out = new(string) + **out = **in + } + if in.RevokedCertificate != nil { + in, out := &in.RevokedCertificate, &out.RevokedCertificate + *out = make([]RevokedCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RootCertificate != nil { + in, out := &in.RootCertificate, &out.RootCertificate + *out = make([]RootCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPNAuthTypes != nil { + in, out := &in.VPNAuthTypes, &out.VPNAuthTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPNClientProtocols != nil { + in, out := &in.VPNClientProtocols, &out.VPNClientProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VirtualNetworkGatewayClientConnection != nil { + in, out := &in.VirtualNetworkGatewayClientConnection, &out.VirtualNetworkGatewayClientConnection + *out = make([]VirtualNetworkGatewayClientConnectionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNClientConfigurationParameters. +func (in *VPNClientConfigurationParameters) DeepCopy() *VPNClientConfigurationParameters { + if in == nil { + return nil + } + out := new(VPNClientConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGateway) DeepCopyInto(out *VPNGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGateway. +func (in *VPNGateway) DeepCopy() *VPNGateway { + if in == nil { + return nil + } + out := new(VPNGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayBGPSettingsInitParameters) DeepCopyInto(out *VPNGatewayBGPSettingsInitParameters) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.Instance0BGPPeeringAddress != nil { + in, out := &in.Instance0BGPPeeringAddress, &out.Instance0BGPPeeringAddress + *out = new(Instance0BGPPeeringAddressInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Instance1BGPPeeringAddress != nil { + in, out := &in.Instance1BGPPeeringAddress, &out.Instance1BGPPeeringAddress + *out = new(Instance1BGPPeeringAddressInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayBGPSettingsInitParameters. +func (in *VPNGatewayBGPSettingsInitParameters) DeepCopy() *VPNGatewayBGPSettingsInitParameters { + if in == nil { + return nil + } + out := new(VPNGatewayBGPSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayBGPSettingsObservation) DeepCopyInto(out *VPNGatewayBGPSettingsObservation) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.BGPPeeringAddress != nil { + in, out := &in.BGPPeeringAddress, &out.BGPPeeringAddress + *out = new(string) + **out = **in + } + if in.Instance0BGPPeeringAddress != nil { + in, out := &in.Instance0BGPPeeringAddress, &out.Instance0BGPPeeringAddress + *out = new(Instance0BGPPeeringAddressObservation) + (*in).DeepCopyInto(*out) + } + if in.Instance1BGPPeeringAddress != nil { + in, out := &in.Instance1BGPPeeringAddress, &out.Instance1BGPPeeringAddress + *out = new(Instance1BGPPeeringAddressObservation) + (*in).DeepCopyInto(*out) + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayBGPSettingsObservation. +func (in *VPNGatewayBGPSettingsObservation) DeepCopy() *VPNGatewayBGPSettingsObservation { + if in == nil { + return nil + } + out := new(VPNGatewayBGPSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayBGPSettingsParameters) DeepCopyInto(out *VPNGatewayBGPSettingsParameters) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.Instance0BGPPeeringAddress != nil { + in, out := &in.Instance0BGPPeeringAddress, &out.Instance0BGPPeeringAddress + *out = new(Instance0BGPPeeringAddressParameters) + (*in).DeepCopyInto(*out) + } + if in.Instance1BGPPeeringAddress != nil { + in, out := &in.Instance1BGPPeeringAddress, &out.Instance1BGPPeeringAddress + *out = new(Instance1BGPPeeringAddressParameters) + (*in).DeepCopyInto(*out) + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayBGPSettingsParameters. +func (in *VPNGatewayBGPSettingsParameters) DeepCopy() *VPNGatewayBGPSettingsParameters { + if in == nil { + return nil + } + out := new(VPNGatewayBGPSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnection) DeepCopyInto(out *VPNGatewayConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnection. +func (in *VPNGatewayConnection) DeepCopy() *VPNGatewayConnection { + if in == nil { + return nil + } + out := new(VPNGatewayConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNGatewayConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionInitParameters) DeepCopyInto(out *VPNGatewayConnectionInitParameters) { + *out = *in + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVPNSiteID != nil { + in, out := &in.RemoteVPNSiteID, &out.RemoteVPNSiteID + *out = new(string) + **out = **in + } + if in.RemoteVPNSiteIDRef != nil { + in, out := &in.RemoteVPNSiteIDRef, &out.RemoteVPNSiteIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RemoteVPNSiteIDSelector != nil { + in, out := &in.RemoteVPNSiteIDSelector, &out.RemoteVPNSiteIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(VPNGatewayConnectionRoutingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TrafficSelectorPolicy != nil { + in, out := &in.TrafficSelectorPolicy, &out.TrafficSelectorPolicy + *out = make([]VPNGatewayConnectionTrafficSelectorPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPNLink != nil { + in, out := &in.VPNLink, &out.VPNLink + *out = make([]VPNLinkInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionInitParameters. +func (in *VPNGatewayConnectionInitParameters) DeepCopy() *VPNGatewayConnectionInitParameters { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionList) DeepCopyInto(out *VPNGatewayConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPNGatewayConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionList. +func (in *VPNGatewayConnectionList) DeepCopy() *VPNGatewayConnectionList { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNGatewayConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionObservation) DeepCopyInto(out *VPNGatewayConnectionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVPNSiteID != nil { + in, out := &in.RemoteVPNSiteID, &out.RemoteVPNSiteID + *out = new(string) + **out = **in + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(VPNGatewayConnectionRoutingObservation) + (*in).DeepCopyInto(*out) + } + if in.TrafficSelectorPolicy != nil { + in, out := &in.TrafficSelectorPolicy, &out.TrafficSelectorPolicy + *out = make([]VPNGatewayConnectionTrafficSelectorPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPNGatewayID != nil { + in, out := &in.VPNGatewayID, &out.VPNGatewayID + *out = new(string) + **out = **in + } + if in.VPNLink != nil { + in, out := &in.VPNLink, &out.VPNLink + *out = make([]VPNLinkObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionObservation. +func (in *VPNGatewayConnectionObservation) DeepCopy() *VPNGatewayConnectionObservation { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionParameters) DeepCopyInto(out *VPNGatewayConnectionParameters) { + *out = *in + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVPNSiteID != nil { + in, out := &in.RemoteVPNSiteID, &out.RemoteVPNSiteID + *out = new(string) + **out = **in + } + if in.RemoteVPNSiteIDRef != nil { + in, out := &in.RemoteVPNSiteIDRef, &out.RemoteVPNSiteIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RemoteVPNSiteIDSelector != nil { + in, out := &in.RemoteVPNSiteIDSelector, &out.RemoteVPNSiteIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(VPNGatewayConnectionRoutingParameters) + (*in).DeepCopyInto(*out) + } + if in.TrafficSelectorPolicy != nil { + in, out := &in.TrafficSelectorPolicy, &out.TrafficSelectorPolicy + *out = make([]VPNGatewayConnectionTrafficSelectorPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPNGatewayID != nil { + in, out := &in.VPNGatewayID, &out.VPNGatewayID + *out = new(string) + **out = **in + } + if in.VPNGatewayIDRef != nil { + in, out := &in.VPNGatewayIDRef, &out.VPNGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VPNGatewayIDSelector != nil { + in, out := &in.VPNGatewayIDSelector, &out.VPNGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VPNLink != nil { + in, out := &in.VPNLink, &out.VPNLink + *out = make([]VPNLinkParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionParameters. +func (in *VPNGatewayConnectionParameters) DeepCopy() *VPNGatewayConnectionParameters { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionRoutingInitParameters) DeepCopyInto(out *VPNGatewayConnectionRoutingInitParameters) { + *out = *in + if in.AssociatedRouteTable != nil { + in, out := &in.AssociatedRouteTable, &out.AssociatedRouteTable + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionRoutingInitParameters. +func (in *VPNGatewayConnectionRoutingInitParameters) DeepCopy() *VPNGatewayConnectionRoutingInitParameters { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionRoutingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionRoutingObservation) DeepCopyInto(out *VPNGatewayConnectionRoutingObservation) { + *out = *in + if in.AssociatedRouteTable != nil { + in, out := &in.AssociatedRouteTable, &out.AssociatedRouteTable + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(VPNGatewayConnectionRoutingPropagatedRouteTableObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionRoutingObservation. +func (in *VPNGatewayConnectionRoutingObservation) DeepCopy() *VPNGatewayConnectionRoutingObservation { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionRoutingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionRoutingParameters) DeepCopyInto(out *VPNGatewayConnectionRoutingParameters) { + *out = *in + if in.AssociatedRouteTable != nil { + in, out := &in.AssociatedRouteTable, &out.AssociatedRouteTable + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(VPNGatewayConnectionRoutingPropagatedRouteTableParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionRoutingParameters. +func (in *VPNGatewayConnectionRoutingParameters) DeepCopy() *VPNGatewayConnectionRoutingParameters { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionRoutingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters) DeepCopyInto(out *VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters. +func (in *VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters) DeepCopy() *VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionRoutingPropagatedRouteTableObservation) DeepCopyInto(out *VPNGatewayConnectionRoutingPropagatedRouteTableObservation) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionRoutingPropagatedRouteTableObservation. +func (in *VPNGatewayConnectionRoutingPropagatedRouteTableObservation) DeepCopy() *VPNGatewayConnectionRoutingPropagatedRouteTableObservation { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionRoutingPropagatedRouteTableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionRoutingPropagatedRouteTableParameters) DeepCopyInto(out *VPNGatewayConnectionRoutingPropagatedRouteTableParameters) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RouteTableIds != nil { + in, out := &in.RouteTableIds, &out.RouteTableIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionRoutingPropagatedRouteTableParameters. +func (in *VPNGatewayConnectionRoutingPropagatedRouteTableParameters) DeepCopy() *VPNGatewayConnectionRoutingPropagatedRouteTableParameters { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionRoutingPropagatedRouteTableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionSpec) DeepCopyInto(out *VPNGatewayConnectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionSpec. +func (in *VPNGatewayConnectionSpec) DeepCopy() *VPNGatewayConnectionSpec { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionStatus) DeepCopyInto(out *VPNGatewayConnectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionStatus. +func (in *VPNGatewayConnectionStatus) DeepCopy() *VPNGatewayConnectionStatus { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionTrafficSelectorPolicyInitParameters) DeepCopyInto(out *VPNGatewayConnectionTrafficSelectorPolicyInitParameters) { + *out = *in + if in.LocalAddressRanges != nil { + in, out := &in.LocalAddressRanges, &out.LocalAddressRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RemoteAddressRanges != nil { + in, out := &in.RemoteAddressRanges, &out.RemoteAddressRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionTrafficSelectorPolicyInitParameters. +func (in *VPNGatewayConnectionTrafficSelectorPolicyInitParameters) DeepCopy() *VPNGatewayConnectionTrafficSelectorPolicyInitParameters { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionTrafficSelectorPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionTrafficSelectorPolicyObservation) DeepCopyInto(out *VPNGatewayConnectionTrafficSelectorPolicyObservation) { + *out = *in + if in.LocalAddressRanges != nil { + in, out := &in.LocalAddressRanges, &out.LocalAddressRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RemoteAddressRanges != nil { + in, out := &in.RemoteAddressRanges, &out.RemoteAddressRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionTrafficSelectorPolicyObservation. +func (in *VPNGatewayConnectionTrafficSelectorPolicyObservation) DeepCopy() *VPNGatewayConnectionTrafficSelectorPolicyObservation { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionTrafficSelectorPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayConnectionTrafficSelectorPolicyParameters) DeepCopyInto(out *VPNGatewayConnectionTrafficSelectorPolicyParameters) { + *out = *in + if in.LocalAddressRanges != nil { + in, out := &in.LocalAddressRanges, &out.LocalAddressRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RemoteAddressRanges != nil { + in, out := &in.RemoteAddressRanges, &out.RemoteAddressRanges + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayConnectionTrafficSelectorPolicyParameters. +func (in *VPNGatewayConnectionTrafficSelectorPolicyParameters) DeepCopy() *VPNGatewayConnectionTrafficSelectorPolicyParameters { + if in == nil { + return nil + } + out := new(VPNGatewayConnectionTrafficSelectorPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayInitParameters) DeepCopyInto(out *VPNGatewayInitParameters) { + *out = *in + if in.BGPRouteTranslationForNATEnabled != nil { + in, out := &in.BGPRouteTranslationForNATEnabled, &out.BGPRouteTranslationForNATEnabled + *out = new(bool) + **out = **in + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(VPNGatewayBGPSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.RoutingPreference != nil { + in, out := &in.RoutingPreference, &out.RoutingPreference + *out = new(string) + **out = **in + } + if in.ScaleUnit != nil { + in, out := &in.ScaleUnit, &out.ScaleUnit + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } + if in.VirtualHubIDRef != nil { + in, out := &in.VirtualHubIDRef, &out.VirtualHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubIDSelector != nil { + in, out := &in.VirtualHubIDSelector, &out.VirtualHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayInitParameters. +func (in *VPNGatewayInitParameters) DeepCopy() *VPNGatewayInitParameters { + if in == nil { + return nil + } + out := new(VPNGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayList) DeepCopyInto(out *VPNGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPNGateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayList. +func (in *VPNGatewayList) DeepCopy() *VPNGatewayList { + if in == nil { + return nil + } + out := new(VPNGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayObservation) DeepCopyInto(out *VPNGatewayObservation) { + *out = *in + if in.BGPRouteTranslationForNATEnabled != nil { + in, out := &in.BGPRouteTranslationForNATEnabled, &out.BGPRouteTranslationForNATEnabled + *out = new(bool) + **out = **in + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(VPNGatewayBGPSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RoutingPreference != nil { + in, out := &in.RoutingPreference, &out.RoutingPreference + *out = new(string) + **out = **in + } + if in.ScaleUnit != nil { + in, out := &in.ScaleUnit, &out.ScaleUnit + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayObservation. +func (in *VPNGatewayObservation) DeepCopy() *VPNGatewayObservation { + if in == nil { + return nil + } + out := new(VPNGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayParameters) DeepCopyInto(out *VPNGatewayParameters) { + *out = *in + if in.BGPRouteTranslationForNATEnabled != nil { + in, out := &in.BGPRouteTranslationForNATEnabled, &out.BGPRouteTranslationForNATEnabled + *out = new(bool) + **out = **in + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(VPNGatewayBGPSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingPreference != nil { + in, out := &in.RoutingPreference, &out.RoutingPreference + *out = new(string) + **out = **in + } + if in.ScaleUnit != nil { + in, out := &in.ScaleUnit, &out.ScaleUnit + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } + if in.VirtualHubIDRef != nil { + in, out := &in.VirtualHubIDRef, &out.VirtualHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubIDSelector != nil { + in, out := &in.VirtualHubIDSelector, &out.VirtualHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayParameters. +func (in *VPNGatewayParameters) DeepCopy() *VPNGatewayParameters { + if in == nil { + return nil + } + out := new(VPNGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewaySpec) DeepCopyInto(out *VPNGatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewaySpec. +func (in *VPNGatewaySpec) DeepCopy() *VPNGatewaySpec { + if in == nil { + return nil + } + out := new(VPNGatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNGatewayStatus) DeepCopyInto(out *VPNGatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNGatewayStatus. +func (in *VPNGatewayStatus) DeepCopy() *VPNGatewayStatus { + if in == nil { + return nil + } + out := new(VPNGatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNLinkInitParameters) DeepCopyInto(out *VPNLinkInitParameters) { + *out = *in + if in.BGPEnabled != nil { + in, out := &in.BGPEnabled, &out.BGPEnabled + *out = new(bool) + **out = **in + } + if in.BandwidthMbps != nil { + in, out := &in.BandwidthMbps, &out.BandwidthMbps + *out = new(float64) + **out = **in + } + if in.ConnectionMode != nil { + in, out := &in.ConnectionMode, &out.ConnectionMode + *out = new(string) + **out = **in + } + if in.CustomBGPAddress != nil { + in, out := &in.CustomBGPAddress, &out.CustomBGPAddress + *out = make([]CustomBGPAddressInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EgressNATRuleIds != nil { + in, out := &in.EgressNATRuleIds, &out.EgressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IngressNATRuleIds != nil { + in, out := &in.IngressNATRuleIds, &out.IngressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = make([]VPNLinkIpsecPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocalAzureIPAddressEnabled != nil { + in, out := &in.LocalAzureIPAddressEnabled, &out.LocalAzureIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyBasedTrafficSelectorEnabled != nil { + in, out := &in.PolicyBasedTrafficSelectorEnabled, &out.PolicyBasedTrafficSelectorEnabled + *out = new(bool) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RatelimitEnabled != nil { + in, out := &in.RatelimitEnabled, &out.RatelimitEnabled + *out = new(bool) + **out = **in + } + if in.RouteWeight != nil { + in, out := &in.RouteWeight, &out.RouteWeight + *out = new(float64) + **out = **in + } + if in.SharedKey != nil { + in, out := &in.SharedKey, &out.SharedKey + *out = new(string) + **out = **in + } + if in.VPNSiteLinkID != nil { + in, out := &in.VPNSiteLinkID, &out.VPNSiteLinkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNLinkInitParameters. +func (in *VPNLinkInitParameters) DeepCopy() *VPNLinkInitParameters { + if in == nil { + return nil + } + out := new(VPNLinkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNLinkIpsecPolicyInitParameters) DeepCopyInto(out *VPNLinkIpsecPolicyInitParameters) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.IkeEncryptionAlgorithm != nil { + in, out := &in.IkeEncryptionAlgorithm, &out.IkeEncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.IkeIntegrityAlgorithm != nil { + in, out := &in.IkeIntegrityAlgorithm, &out.IkeIntegrityAlgorithm + *out = new(string) + **out = **in + } + if in.IntegrityAlgorithm != nil { + in, out := &in.IntegrityAlgorithm, &out.IntegrityAlgorithm + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeKb != nil { + in, out := &in.SaDataSizeKb, &out.SaDataSizeKb + *out = new(float64) + **out = **in + } + if in.SaLifetimeSec != nil { + in, out := &in.SaLifetimeSec, &out.SaLifetimeSec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNLinkIpsecPolicyInitParameters. +func (in *VPNLinkIpsecPolicyInitParameters) DeepCopy() *VPNLinkIpsecPolicyInitParameters { + if in == nil { + return nil + } + out := new(VPNLinkIpsecPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNLinkIpsecPolicyObservation) DeepCopyInto(out *VPNLinkIpsecPolicyObservation) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.IkeEncryptionAlgorithm != nil { + in, out := &in.IkeEncryptionAlgorithm, &out.IkeEncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.IkeIntegrityAlgorithm != nil { + in, out := &in.IkeIntegrityAlgorithm, &out.IkeIntegrityAlgorithm + *out = new(string) + **out = **in + } + if in.IntegrityAlgorithm != nil { + in, out := &in.IntegrityAlgorithm, &out.IntegrityAlgorithm + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeKb != nil { + in, out := &in.SaDataSizeKb, &out.SaDataSizeKb + *out = new(float64) + **out = **in + } + if in.SaLifetimeSec != nil { + in, out := &in.SaLifetimeSec, &out.SaLifetimeSec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNLinkIpsecPolicyObservation. +func (in *VPNLinkIpsecPolicyObservation) DeepCopy() *VPNLinkIpsecPolicyObservation { + if in == nil { + return nil + } + out := new(VPNLinkIpsecPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNLinkIpsecPolicyParameters) DeepCopyInto(out *VPNLinkIpsecPolicyParameters) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.EncryptionAlgorithm != nil { + in, out := &in.EncryptionAlgorithm, &out.EncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.IkeEncryptionAlgorithm != nil { + in, out := &in.IkeEncryptionAlgorithm, &out.IkeEncryptionAlgorithm + *out = new(string) + **out = **in + } + if in.IkeIntegrityAlgorithm != nil { + in, out := &in.IkeIntegrityAlgorithm, &out.IkeIntegrityAlgorithm + *out = new(string) + **out = **in + } + if in.IntegrityAlgorithm != nil { + in, out := &in.IntegrityAlgorithm, &out.IntegrityAlgorithm + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeKb != nil { + in, out := &in.SaDataSizeKb, &out.SaDataSizeKb + *out = new(float64) + **out = **in + } + if in.SaLifetimeSec != nil { + in, out := &in.SaLifetimeSec, &out.SaLifetimeSec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNLinkIpsecPolicyParameters. +func (in *VPNLinkIpsecPolicyParameters) DeepCopy() *VPNLinkIpsecPolicyParameters { + if in == nil { + return nil + } + out := new(VPNLinkIpsecPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNLinkObservation) DeepCopyInto(out *VPNLinkObservation) { + *out = *in + if in.BGPEnabled != nil { + in, out := &in.BGPEnabled, &out.BGPEnabled + *out = new(bool) + **out = **in + } + if in.BandwidthMbps != nil { + in, out := &in.BandwidthMbps, &out.BandwidthMbps + *out = new(float64) + **out = **in + } + if in.ConnectionMode != nil { + in, out := &in.ConnectionMode, &out.ConnectionMode + *out = new(string) + **out = **in + } + if in.CustomBGPAddress != nil { + in, out := &in.CustomBGPAddress, &out.CustomBGPAddress + *out = make([]CustomBGPAddressObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EgressNATRuleIds != nil { + in, out := &in.EgressNATRuleIds, &out.EgressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IngressNATRuleIds != nil { + in, out := &in.IngressNATRuleIds, &out.IngressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = make([]VPNLinkIpsecPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocalAzureIPAddressEnabled != nil { + in, out := &in.LocalAzureIPAddressEnabled, &out.LocalAzureIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyBasedTrafficSelectorEnabled != nil { + in, out := &in.PolicyBasedTrafficSelectorEnabled, &out.PolicyBasedTrafficSelectorEnabled + *out = new(bool) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RatelimitEnabled != nil { + in, out := &in.RatelimitEnabled, &out.RatelimitEnabled + *out = new(bool) + **out = **in + } + if in.RouteWeight != nil { + in, out := &in.RouteWeight, &out.RouteWeight + *out = new(float64) + **out = **in + } + if in.SharedKey != nil { + in, out := &in.SharedKey, &out.SharedKey + *out = new(string) + **out = **in + } + if in.VPNSiteLinkID != nil { + in, out := &in.VPNSiteLinkID, &out.VPNSiteLinkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNLinkObservation. +func (in *VPNLinkObservation) DeepCopy() *VPNLinkObservation { + if in == nil { + return nil + } + out := new(VPNLinkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNLinkParameters) DeepCopyInto(out *VPNLinkParameters) { + *out = *in + if in.BGPEnabled != nil { + in, out := &in.BGPEnabled, &out.BGPEnabled + *out = new(bool) + **out = **in + } + if in.BandwidthMbps != nil { + in, out := &in.BandwidthMbps, &out.BandwidthMbps + *out = new(float64) + **out = **in + } + if in.ConnectionMode != nil { + in, out := &in.ConnectionMode, &out.ConnectionMode + *out = new(string) + **out = **in + } + if in.CustomBGPAddress != nil { + in, out := &in.CustomBGPAddress, &out.CustomBGPAddress + *out = make([]CustomBGPAddressParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EgressNATRuleIds != nil { + in, out := &in.EgressNATRuleIds, &out.EgressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IngressNATRuleIds != nil { + in, out := &in.IngressNATRuleIds, &out.IngressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = make([]VPNLinkIpsecPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LocalAzureIPAddressEnabled != nil { + in, out := &in.LocalAzureIPAddressEnabled, &out.LocalAzureIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyBasedTrafficSelectorEnabled != nil { + in, out := &in.PolicyBasedTrafficSelectorEnabled, &out.PolicyBasedTrafficSelectorEnabled + *out = new(bool) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.RatelimitEnabled != nil { + in, out := &in.RatelimitEnabled, &out.RatelimitEnabled + *out = new(bool) + **out = **in + } + if in.RouteWeight != nil { + in, out := &in.RouteWeight, &out.RouteWeight + *out = new(float64) + **out = **in + } + if in.SharedKey != nil { + in, out := &in.SharedKey, &out.SharedKey + *out = new(string) + **out = **in + } + if in.VPNSiteLinkID != nil { + in, out := &in.VPNSiteLinkID, &out.VPNSiteLinkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNLinkParameters. +func (in *VPNLinkParameters) DeepCopy() *VPNLinkParameters { + if in == nil { + return nil + } + out := new(VPNLinkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfiguration) DeepCopyInto(out *VPNServerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfiguration. +func (in *VPNServerConfiguration) DeepCopy() *VPNServerConfiguration { + if in == nil { + return nil + } + out := new(VPNServerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNServerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationInitParameters) DeepCopyInto(out *VPNServerConfigurationInitParameters) { + *out = *in + if in.AzureActiveDirectoryAuthentication != nil { + in, out := &in.AzureActiveDirectoryAuthentication, &out.AzureActiveDirectoryAuthentication + *out = make([]AzureActiveDirectoryAuthenticationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientRevokedCertificate != nil { + in, out := &in.ClientRevokedCertificate, &out.ClientRevokedCertificate + *out = make([]ClientRevokedCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientRootCertificate != nil { + in, out := &in.ClientRootCertificate, &out.ClientRootCertificate + *out = make([]ClientRootCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(VPNServerConfigurationIpsecPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Radius != nil { + in, out := &in.Radius, &out.Radius + *out = new(RadiusInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPNAuthenticationTypes != nil { + in, out := &in.VPNAuthenticationTypes, &out.VPNAuthenticationTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPNProtocols != nil { + in, out := &in.VPNProtocols, &out.VPNProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationInitParameters. +func (in *VPNServerConfigurationInitParameters) DeepCopy() *VPNServerConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VPNServerConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationIpsecPolicyInitParameters) DeepCopyInto(out *VPNServerConfigurationIpsecPolicyInitParameters) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeKilobytes != nil { + in, out := &in.SaDataSizeKilobytes, &out.SaDataSizeKilobytes + *out = new(float64) + **out = **in + } + if in.SaLifetimeSeconds != nil { + in, out := &in.SaLifetimeSeconds, &out.SaLifetimeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationIpsecPolicyInitParameters. +func (in *VPNServerConfigurationIpsecPolicyInitParameters) DeepCopy() *VPNServerConfigurationIpsecPolicyInitParameters { + if in == nil { + return nil + } + out := new(VPNServerConfigurationIpsecPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationIpsecPolicyObservation) DeepCopyInto(out *VPNServerConfigurationIpsecPolicyObservation) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeKilobytes != nil { + in, out := &in.SaDataSizeKilobytes, &out.SaDataSizeKilobytes + *out = new(float64) + **out = **in + } + if in.SaLifetimeSeconds != nil { + in, out := &in.SaLifetimeSeconds, &out.SaLifetimeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationIpsecPolicyObservation. +func (in *VPNServerConfigurationIpsecPolicyObservation) DeepCopy() *VPNServerConfigurationIpsecPolicyObservation { + if in == nil { + return nil + } + out := new(VPNServerConfigurationIpsecPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationIpsecPolicyParameters) DeepCopyInto(out *VPNServerConfigurationIpsecPolicyParameters) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDataSizeKilobytes != nil { + in, out := &in.SaDataSizeKilobytes, &out.SaDataSizeKilobytes + *out = new(float64) + **out = **in + } + if in.SaLifetimeSeconds != nil { + in, out := &in.SaLifetimeSeconds, &out.SaLifetimeSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationIpsecPolicyParameters. +func (in *VPNServerConfigurationIpsecPolicyParameters) DeepCopy() *VPNServerConfigurationIpsecPolicyParameters { + if in == nil { + return nil + } + out := new(VPNServerConfigurationIpsecPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationList) DeepCopyInto(out *VPNServerConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPNServerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationList. +func (in *VPNServerConfigurationList) DeepCopy() *VPNServerConfigurationList { + if in == nil { + return nil + } + out := new(VPNServerConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNServerConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationObservation) DeepCopyInto(out *VPNServerConfigurationObservation) { + *out = *in + if in.AzureActiveDirectoryAuthentication != nil { + in, out := &in.AzureActiveDirectoryAuthentication, &out.AzureActiveDirectoryAuthentication + *out = make([]AzureActiveDirectoryAuthenticationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientRevokedCertificate != nil { + in, out := &in.ClientRevokedCertificate, &out.ClientRevokedCertificate + *out = make([]ClientRevokedCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientRootCertificate != nil { + in, out := &in.ClientRootCertificate, &out.ClientRootCertificate + *out = make([]ClientRootCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(VPNServerConfigurationIpsecPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Radius != nil { + in, out := &in.Radius, &out.Radius + *out = new(RadiusObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPNAuthenticationTypes != nil { + in, out := &in.VPNAuthenticationTypes, &out.VPNAuthenticationTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPNProtocols != nil { + in, out := &in.VPNProtocols, &out.VPNProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationObservation. +func (in *VPNServerConfigurationObservation) DeepCopy() *VPNServerConfigurationObservation { + if in == nil { + return nil + } + out := new(VPNServerConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationParameters) DeepCopyInto(out *VPNServerConfigurationParameters) { + *out = *in + if in.AzureActiveDirectoryAuthentication != nil { + in, out := &in.AzureActiveDirectoryAuthentication, &out.AzureActiveDirectoryAuthentication + *out = make([]AzureActiveDirectoryAuthenticationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientRevokedCertificate != nil { + in, out := &in.ClientRevokedCertificate, &out.ClientRevokedCertificate + *out = make([]ClientRevokedCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientRootCertificate != nil { + in, out := &in.ClientRootCertificate, &out.ClientRootCertificate + *out = make([]ClientRootCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(VPNServerConfigurationIpsecPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Radius != nil { + in, out := &in.Radius, &out.Radius + *out = new(RadiusParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VPNAuthenticationTypes != nil { + in, out := &in.VPNAuthenticationTypes, &out.VPNAuthenticationTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.VPNProtocols != nil { + in, out := &in.VPNProtocols, &out.VPNProtocols + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationParameters. +func (in *VPNServerConfigurationParameters) DeepCopy() *VPNServerConfigurationParameters { + if in == nil { + return nil + } + out := new(VPNServerConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationSpec) DeepCopyInto(out *VPNServerConfigurationSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationSpec. +func (in *VPNServerConfigurationSpec) DeepCopy() *VPNServerConfigurationSpec { + if in == nil { + return nil + } + out := new(VPNServerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNServerConfigurationStatus) DeepCopyInto(out *VPNServerConfigurationStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNServerConfigurationStatus. +func (in *VPNServerConfigurationStatus) DeepCopy() *VPNServerConfigurationStatus { + if in == nil { + return nil + } + out := new(VPNServerConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNSite) DeepCopyInto(out *VPNSite) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNSite. +func (in *VPNSite) DeepCopy() *VPNSite { + if in == nil { + return nil + } + out := new(VPNSite) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNSite) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNSiteInitParameters) DeepCopyInto(out *VPNSiteInitParameters) { + *out = *in + if in.AddressCidrs != nil { + in, out := &in.AddressCidrs, &out.AddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeviceModel != nil { + in, out := &in.DeviceModel, &out.DeviceModel + *out = new(string) + **out = **in + } + if in.DeviceVendor != nil { + in, out := &in.DeviceVendor, &out.DeviceVendor + *out = new(string) + **out = **in + } + if in.Link != nil { + in, out := &in.Link, &out.Link + *out = make([]LinkInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.O365Policy != nil { + in, out := &in.O365Policy, &out.O365Policy + *out = new(O365PolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualWanID != nil { + in, out := &in.VirtualWanID, &out.VirtualWanID + *out = new(string) + **out = **in + } + if in.VirtualWanIDRef != nil { + in, out := &in.VirtualWanIDRef, &out.VirtualWanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualWanIDSelector != nil { + in, out := &in.VirtualWanIDSelector, &out.VirtualWanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNSiteInitParameters. +func (in *VPNSiteInitParameters) DeepCopy() *VPNSiteInitParameters { + if in == nil { + return nil + } + out := new(VPNSiteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNSiteList) DeepCopyInto(out *VPNSiteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VPNSite, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNSiteList. +func (in *VPNSiteList) DeepCopy() *VPNSiteList { + if in == nil { + return nil + } + out := new(VPNSiteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VPNSiteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNSiteObservation) DeepCopyInto(out *VPNSiteObservation) { + *out = *in + if in.AddressCidrs != nil { + in, out := &in.AddressCidrs, &out.AddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeviceModel != nil { + in, out := &in.DeviceModel, &out.DeviceModel + *out = new(string) + **out = **in + } + if in.DeviceVendor != nil { + in, out := &in.DeviceVendor, &out.DeviceVendor + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Link != nil { + in, out := &in.Link, &out.Link + *out = make([]LinkObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.O365Policy != nil { + in, out := &in.O365Policy, &out.O365Policy + *out = new(O365PolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualWanID != nil { + in, out := &in.VirtualWanID, &out.VirtualWanID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNSiteObservation. +func (in *VPNSiteObservation) DeepCopy() *VPNSiteObservation { + if in == nil { + return nil + } + out := new(VPNSiteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNSiteParameters) DeepCopyInto(out *VPNSiteParameters) { + *out = *in + if in.AddressCidrs != nil { + in, out := &in.AddressCidrs, &out.AddressCidrs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeviceModel != nil { + in, out := &in.DeviceModel, &out.DeviceModel + *out = new(string) + **out = **in + } + if in.DeviceVendor != nil { + in, out := &in.DeviceVendor, &out.DeviceVendor + *out = new(string) + **out = **in + } + if in.Link != nil { + in, out := &in.Link, &out.Link + *out = make([]LinkParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.O365Policy != nil { + in, out := &in.O365Policy, &out.O365Policy + *out = new(O365PolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualWanID != nil { + in, out := &in.VirtualWanID, &out.VirtualWanID + *out = new(string) + **out = **in + } + if in.VirtualWanIDRef != nil { + in, out := &in.VirtualWanIDRef, &out.VirtualWanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualWanIDSelector != nil { + in, out := &in.VirtualWanIDSelector, &out.VirtualWanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNSiteParameters. +func (in *VPNSiteParameters) DeepCopy() *VPNSiteParameters { + if in == nil { + return nil + } + out := new(VPNSiteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNSiteSpec) DeepCopyInto(out *VPNSiteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNSiteSpec. +func (in *VPNSiteSpec) DeepCopy() *VPNSiteSpec { + if in == nil { + return nil + } + out := new(VPNSiteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPNSiteStatus) DeepCopyInto(out *VPNSiteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPNSiteStatus. +func (in *VPNSiteStatus) DeepCopy() *VPNSiteStatus { + if in == nil { + return nil + } + out := new(VPNSiteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnection) DeepCopyInto(out *VirtualHubConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnection. +func (in *VirtualHubConnection) DeepCopy() *VirtualHubConnection { + if in == nil { + return nil + } + out := new(VirtualHubConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualHubConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionInitParameters) DeepCopyInto(out *VirtualHubConnectionInitParameters) { + *out = *in + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVirtualNetworkID != nil { + in, out := &in.RemoteVirtualNetworkID, &out.RemoteVirtualNetworkID + *out = new(string) + **out = **in + } + if in.RemoteVirtualNetworkIDRef != nil { + in, out := &in.RemoteVirtualNetworkIDRef, &out.RemoteVirtualNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RemoteVirtualNetworkIDSelector != nil { + in, out := &in.RemoteVirtualNetworkIDSelector, &out.RemoteVirtualNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(VirtualHubConnectionRoutingInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionInitParameters. +func (in *VirtualHubConnectionInitParameters) DeepCopy() *VirtualHubConnectionInitParameters { + if in == nil { + return nil + } + out := new(VirtualHubConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionList) DeepCopyInto(out *VirtualHubConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualHubConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionList. +func (in *VirtualHubConnectionList) DeepCopy() *VirtualHubConnectionList { + if in == nil { + return nil + } + out := new(VirtualHubConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualHubConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionObservation) DeepCopyInto(out *VirtualHubConnectionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVirtualNetworkID != nil { + in, out := &in.RemoteVirtualNetworkID, &out.RemoteVirtualNetworkID + *out = new(string) + **out = **in + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(VirtualHubConnectionRoutingObservation) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionObservation. +func (in *VirtualHubConnectionObservation) DeepCopy() *VirtualHubConnectionObservation { + if in == nil { + return nil + } + out := new(VirtualHubConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionParameters) DeepCopyInto(out *VirtualHubConnectionParameters) { + *out = *in + if in.InternetSecurityEnabled != nil { + in, out := &in.InternetSecurityEnabled, &out.InternetSecurityEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVirtualNetworkID != nil { + in, out := &in.RemoteVirtualNetworkID, &out.RemoteVirtualNetworkID + *out = new(string) + **out = **in + } + if in.RemoteVirtualNetworkIDRef != nil { + in, out := &in.RemoteVirtualNetworkIDRef, &out.RemoteVirtualNetworkIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RemoteVirtualNetworkIDSelector != nil { + in, out := &in.RemoteVirtualNetworkIDSelector, &out.RemoteVirtualNetworkIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(VirtualHubConnectionRoutingParameters) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } + if in.VirtualHubIDRef != nil { + in, out := &in.VirtualHubIDRef, &out.VirtualHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualHubIDSelector != nil { + in, out := &in.VirtualHubIDSelector, &out.VirtualHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionParameters. +func (in *VirtualHubConnectionParameters) DeepCopy() *VirtualHubConnectionParameters { + if in == nil { + return nil + } + out := new(VirtualHubConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionRoutingInitParameters) DeepCopyInto(out *VirtualHubConnectionRoutingInitParameters) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.AssociatedRouteTableIDRef != nil { + in, out := &in.AssociatedRouteTableIDRef, &out.AssociatedRouteTableIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AssociatedRouteTableIDSelector != nil { + in, out := &in.AssociatedRouteTableIDSelector, &out.AssociatedRouteTableIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(RoutingPropagatedRouteTableInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StaticVnetLocalRouteOverrideCriteria != nil { + in, out := &in.StaticVnetLocalRouteOverrideCriteria, &out.StaticVnetLocalRouteOverrideCriteria + *out = new(string) + **out = **in + } + if in.StaticVnetRoute != nil { + in, out := &in.StaticVnetRoute, &out.StaticVnetRoute + *out = make([]StaticVnetRouteInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionRoutingInitParameters. +func (in *VirtualHubConnectionRoutingInitParameters) DeepCopy() *VirtualHubConnectionRoutingInitParameters { + if in == nil { + return nil + } + out := new(VirtualHubConnectionRoutingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionRoutingObservation) DeepCopyInto(out *VirtualHubConnectionRoutingObservation) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(RoutingPropagatedRouteTableObservation) + (*in).DeepCopyInto(*out) + } + if in.StaticVnetLocalRouteOverrideCriteria != nil { + in, out := &in.StaticVnetLocalRouteOverrideCriteria, &out.StaticVnetLocalRouteOverrideCriteria + *out = new(string) + **out = **in + } + if in.StaticVnetRoute != nil { + in, out := &in.StaticVnetRoute, &out.StaticVnetRoute + *out = make([]StaticVnetRouteObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionRoutingObservation. +func (in *VirtualHubConnectionRoutingObservation) DeepCopy() *VirtualHubConnectionRoutingObservation { + if in == nil { + return nil + } + out := new(VirtualHubConnectionRoutingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionRoutingParameters) DeepCopyInto(out *VirtualHubConnectionRoutingParameters) { + *out = *in + if in.AssociatedRouteTableID != nil { + in, out := &in.AssociatedRouteTableID, &out.AssociatedRouteTableID + *out = new(string) + **out = **in + } + if in.AssociatedRouteTableIDRef != nil { + in, out := &in.AssociatedRouteTableIDRef, &out.AssociatedRouteTableIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AssociatedRouteTableIDSelector != nil { + in, out := &in.AssociatedRouteTableIDSelector, &out.AssociatedRouteTableIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.InboundRouteMapID != nil { + in, out := &in.InboundRouteMapID, &out.InboundRouteMapID + *out = new(string) + **out = **in + } + if in.OutboundRouteMapID != nil { + in, out := &in.OutboundRouteMapID, &out.OutboundRouteMapID + *out = new(string) + **out = **in + } + if in.PropagatedRouteTable != nil { + in, out := &in.PropagatedRouteTable, &out.PropagatedRouteTable + *out = new(RoutingPropagatedRouteTableParameters) + (*in).DeepCopyInto(*out) + } + if in.StaticVnetLocalRouteOverrideCriteria != nil { + in, out := &in.StaticVnetLocalRouteOverrideCriteria, &out.StaticVnetLocalRouteOverrideCriteria + *out = new(string) + **out = **in + } + if in.StaticVnetRoute != nil { + in, out := &in.StaticVnetRoute, &out.StaticVnetRoute + *out = make([]StaticVnetRouteParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionRoutingParameters. +func (in *VirtualHubConnectionRoutingParameters) DeepCopy() *VirtualHubConnectionRoutingParameters { + if in == nil { + return nil + } + out := new(VirtualHubConnectionRoutingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionSpec) DeepCopyInto(out *VirtualHubConnectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionSpec. +func (in *VirtualHubConnectionSpec) DeepCopy() *VirtualHubConnectionSpec { + if in == nil { + return nil + } + out := new(VirtualHubConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubConnectionStatus) DeepCopyInto(out *VirtualHubConnectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubConnectionStatus. +func (in *VirtualHubConnectionStatus) DeepCopy() *VirtualHubConnectionStatus { + if in == nil { + return nil + } + out := new(VirtualHubConnectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubInitParameters) DeepCopyInto(out *VirtualHubInitParameters) { + *out = *in + if in.PublicIPCount != nil { + in, out := &in.PublicIPCount, &out.PublicIPCount + *out = new(float64) + **out = **in + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubInitParameters. +func (in *VirtualHubInitParameters) DeepCopy() *VirtualHubInitParameters { + if in == nil { + return nil + } + out := new(VirtualHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubObservation) DeepCopyInto(out *VirtualHubObservation) { + *out = *in + if in.PrivateIPAddress != nil { + in, out := &in.PrivateIPAddress, &out.PrivateIPAddress + *out = new(string) + **out = **in + } + if in.PublicIPAddresses != nil { + in, out := &in.PublicIPAddresses, &out.PublicIPAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PublicIPCount != nil { + in, out := &in.PublicIPCount, &out.PublicIPCount + *out = new(float64) + **out = **in + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubObservation. +func (in *VirtualHubObservation) DeepCopy() *VirtualHubObservation { + if in == nil { + return nil + } + out := new(VirtualHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualHubParameters) DeepCopyInto(out *VirtualHubParameters) { + *out = *in + if in.PublicIPCount != nil { + in, out := &in.PublicIPCount, &out.PublicIPCount + *out = new(float64) + **out = **in + } + if in.VirtualHubID != nil { + in, out := &in.VirtualHubID, &out.VirtualHubID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHubParameters. +func (in *VirtualHubParameters) DeepCopy() *VirtualHubParameters { + if in == nil { + return nil + } + out := new(VirtualHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetwork) DeepCopyInto(out *VirtualNetwork) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetwork. +func (in *VirtualNetwork) DeepCopy() *VirtualNetwork { + if in == nil { + return nil + } + out := new(VirtualNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNetwork) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGateway) DeepCopyInto(out *VirtualNetworkGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGateway. +func (in *VirtualNetworkGateway) DeepCopy() *VirtualNetworkGateway { + if in == nil { + return nil + } + out := new(VirtualNetworkGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNetworkGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayBGPSettingsInitParameters) DeepCopyInto(out *VirtualNetworkGatewayBGPSettingsInitParameters) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } + if in.PeeringAddresses != nil { + in, out := &in.PeeringAddresses, &out.PeeringAddresses + *out = make([]PeeringAddressesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayBGPSettingsInitParameters. +func (in *VirtualNetworkGatewayBGPSettingsInitParameters) DeepCopy() *VirtualNetworkGatewayBGPSettingsInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayBGPSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayBGPSettingsObservation) DeepCopyInto(out *VirtualNetworkGatewayBGPSettingsObservation) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } + if in.PeeringAddresses != nil { + in, out := &in.PeeringAddresses, &out.PeeringAddresses + *out = make([]PeeringAddressesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayBGPSettingsObservation. +func (in *VirtualNetworkGatewayBGPSettingsObservation) DeepCopy() *VirtualNetworkGatewayBGPSettingsObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayBGPSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayBGPSettingsParameters) DeepCopyInto(out *VirtualNetworkGatewayBGPSettingsParameters) { + *out = *in + if in.Asn != nil { + in, out := &in.Asn, &out.Asn + *out = new(float64) + **out = **in + } + if in.PeerWeight != nil { + in, out := &in.PeerWeight, &out.PeerWeight + *out = new(float64) + **out = **in + } + if in.PeeringAddresses != nil { + in, out := &in.PeeringAddresses, &out.PeeringAddresses + *out = make([]PeeringAddressesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayBGPSettingsParameters. +func (in *VirtualNetworkGatewayBGPSettingsParameters) DeepCopy() *VirtualNetworkGatewayBGPSettingsParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayBGPSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayClientConnectionInitParameters) DeepCopyInto(out *VirtualNetworkGatewayClientConnectionInitParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyGroupNames != nil { + in, out := &in.PolicyGroupNames, &out.PolicyGroupNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayClientConnectionInitParameters. +func (in *VirtualNetworkGatewayClientConnectionInitParameters) DeepCopy() *VirtualNetworkGatewayClientConnectionInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayClientConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayClientConnectionObservation) DeepCopyInto(out *VirtualNetworkGatewayClientConnectionObservation) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyGroupNames != nil { + in, out := &in.PolicyGroupNames, &out.PolicyGroupNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayClientConnectionObservation. +func (in *VirtualNetworkGatewayClientConnectionObservation) DeepCopy() *VirtualNetworkGatewayClientConnectionObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayClientConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayClientConnectionParameters) DeepCopyInto(out *VirtualNetworkGatewayClientConnectionParameters) { + *out = *in + if in.AddressPrefixes != nil { + in, out := &in.AddressPrefixes, &out.AddressPrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PolicyGroupNames != nil { + in, out := &in.PolicyGroupNames, &out.PolicyGroupNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayClientConnectionParameters. +func (in *VirtualNetworkGatewayClientConnectionParameters) DeepCopy() *VirtualNetworkGatewayClientConnectionParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayClientConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnection) DeepCopyInto(out *VirtualNetworkGatewayConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnection. +func (in *VirtualNetworkGatewayConnection) DeepCopy() *VirtualNetworkGatewayConnection { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNetworkGatewayConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionInitParameters) DeepCopyInto(out *VirtualNetworkGatewayConnectionInitParameters) { + *out = *in + if in.ConnectionMode != nil { + in, out := &in.ConnectionMode, &out.ConnectionMode + *out = new(string) + **out = **in + } + if in.ConnectionProtocol != nil { + in, out := &in.ConnectionProtocol, &out.ConnectionProtocol + *out = new(string) + **out = **in + } + if in.CustomBGPAddresses != nil { + in, out := &in.CustomBGPAddresses, &out.CustomBGPAddresses + *out = new(CustomBGPAddressesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DpdTimeoutSeconds != nil { + in, out := &in.DpdTimeoutSeconds, &out.DpdTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.EgressNATRuleIds != nil { + in, out := &in.EgressNATRuleIds, &out.EgressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableBGP != nil { + in, out := &in.EnableBGP, &out.EnableBGP + *out = new(bool) + **out = **in + } + if in.ExpressRouteCircuitID != nil { + in, out := &in.ExpressRouteCircuitID, &out.ExpressRouteCircuitID + *out = new(string) + **out = **in + } + if in.ExpressRouteGatewayBypass != nil { + in, out := &in.ExpressRouteGatewayBypass, &out.ExpressRouteGatewayBypass + *out = new(bool) + **out = **in + } + if in.IngressNATRuleIds != nil { + in, out := &in.IngressNATRuleIds, &out.IngressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(VirtualNetworkGatewayConnectionIpsecPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAzureIPAddressEnabled != nil { + in, out := &in.LocalAzureIPAddressEnabled, &out.LocalAzureIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.LocalNetworkGatewayID != nil { + in, out := &in.LocalNetworkGatewayID, &out.LocalNetworkGatewayID + *out = new(string) + **out = **in + } + if in.LocalNetworkGatewayIDRef != nil { + in, out := &in.LocalNetworkGatewayIDRef, &out.LocalNetworkGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LocalNetworkGatewayIDSelector != nil { + in, out := &in.LocalNetworkGatewayIDSelector, &out.LocalNetworkGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PeerVirtualNetworkGatewayID != nil { + in, out := &in.PeerVirtualNetworkGatewayID, &out.PeerVirtualNetworkGatewayID + *out = new(string) + **out = **in + } + if in.PeerVirtualNetworkGatewayIDRef != nil { + in, out := &in.PeerVirtualNetworkGatewayIDRef, &out.PeerVirtualNetworkGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PeerVirtualNetworkGatewayIDSelector != nil { + in, out := &in.PeerVirtualNetworkGatewayIDSelector, &out.PeerVirtualNetworkGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingWeight != nil { + in, out := &in.RoutingWeight, &out.RoutingWeight + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficSelectorPolicy != nil { + in, out := &in.TrafficSelectorPolicy, &out.TrafficSelectorPolicy + *out = make([]TrafficSelectorPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UsePolicyBasedTrafficSelectors != nil { + in, out := &in.UsePolicyBasedTrafficSelectors, &out.UsePolicyBasedTrafficSelectors + *out = new(bool) + **out = **in + } + if in.VirtualNetworkGatewayID != nil { + in, out := &in.VirtualNetworkGatewayID, &out.VirtualNetworkGatewayID + *out = new(string) + **out = **in + } + if in.VirtualNetworkGatewayIDRef != nil { + in, out := &in.VirtualNetworkGatewayIDRef, &out.VirtualNetworkGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkGatewayIDSelector != nil { + in, out := &in.VirtualNetworkGatewayIDSelector, &out.VirtualNetworkGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionInitParameters. +func (in *VirtualNetworkGatewayConnectionInitParameters) DeepCopy() *VirtualNetworkGatewayConnectionInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionIpsecPolicyInitParameters) DeepCopyInto(out *VirtualNetworkGatewayConnectionIpsecPolicyInitParameters) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDatasize != nil { + in, out := &in.SaDatasize, &out.SaDatasize + *out = new(float64) + **out = **in + } + if in.SaLifetime != nil { + in, out := &in.SaLifetime, &out.SaLifetime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionIpsecPolicyInitParameters. +func (in *VirtualNetworkGatewayConnectionIpsecPolicyInitParameters) DeepCopy() *VirtualNetworkGatewayConnectionIpsecPolicyInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionIpsecPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionIpsecPolicyObservation) DeepCopyInto(out *VirtualNetworkGatewayConnectionIpsecPolicyObservation) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDatasize != nil { + in, out := &in.SaDatasize, &out.SaDatasize + *out = new(float64) + **out = **in + } + if in.SaLifetime != nil { + in, out := &in.SaLifetime, &out.SaLifetime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionIpsecPolicyObservation. +func (in *VirtualNetworkGatewayConnectionIpsecPolicyObservation) DeepCopy() *VirtualNetworkGatewayConnectionIpsecPolicyObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionIpsecPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionIpsecPolicyParameters) DeepCopyInto(out *VirtualNetworkGatewayConnectionIpsecPolicyParameters) { + *out = *in + if in.DhGroup != nil { + in, out := &in.DhGroup, &out.DhGroup + *out = new(string) + **out = **in + } + if in.IkeEncryption != nil { + in, out := &in.IkeEncryption, &out.IkeEncryption + *out = new(string) + **out = **in + } + if in.IkeIntegrity != nil { + in, out := &in.IkeIntegrity, &out.IkeIntegrity + *out = new(string) + **out = **in + } + if in.IpsecEncryption != nil { + in, out := &in.IpsecEncryption, &out.IpsecEncryption + *out = new(string) + **out = **in + } + if in.IpsecIntegrity != nil { + in, out := &in.IpsecIntegrity, &out.IpsecIntegrity + *out = new(string) + **out = **in + } + if in.PfsGroup != nil { + in, out := &in.PfsGroup, &out.PfsGroup + *out = new(string) + **out = **in + } + if in.SaDatasize != nil { + in, out := &in.SaDatasize, &out.SaDatasize + *out = new(float64) + **out = **in + } + if in.SaLifetime != nil { + in, out := &in.SaLifetime, &out.SaLifetime + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionIpsecPolicyParameters. +func (in *VirtualNetworkGatewayConnectionIpsecPolicyParameters) DeepCopy() *VirtualNetworkGatewayConnectionIpsecPolicyParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionIpsecPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionList) DeepCopyInto(out *VirtualNetworkGatewayConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualNetworkGatewayConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionList. +func (in *VirtualNetworkGatewayConnectionList) DeepCopy() *VirtualNetworkGatewayConnectionList { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNetworkGatewayConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionObservation) DeepCopyInto(out *VirtualNetworkGatewayConnectionObservation) { + *out = *in + if in.ConnectionMode != nil { + in, out := &in.ConnectionMode, &out.ConnectionMode + *out = new(string) + **out = **in + } + if in.ConnectionProtocol != nil { + in, out := &in.ConnectionProtocol, &out.ConnectionProtocol + *out = new(string) + **out = **in + } + if in.CustomBGPAddresses != nil { + in, out := &in.CustomBGPAddresses, &out.CustomBGPAddresses + *out = new(CustomBGPAddressesObservation) + (*in).DeepCopyInto(*out) + } + if in.DpdTimeoutSeconds != nil { + in, out := &in.DpdTimeoutSeconds, &out.DpdTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.EgressNATRuleIds != nil { + in, out := &in.EgressNATRuleIds, &out.EgressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableBGP != nil { + in, out := &in.EnableBGP, &out.EnableBGP + *out = new(bool) + **out = **in + } + if in.ExpressRouteCircuitID != nil { + in, out := &in.ExpressRouteCircuitID, &out.ExpressRouteCircuitID + *out = new(string) + **out = **in + } + if in.ExpressRouteGatewayBypass != nil { + in, out := &in.ExpressRouteGatewayBypass, &out.ExpressRouteGatewayBypass + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IngressNATRuleIds != nil { + in, out := &in.IngressNATRuleIds, &out.IngressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(VirtualNetworkGatewayConnectionIpsecPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAzureIPAddressEnabled != nil { + in, out := &in.LocalAzureIPAddressEnabled, &out.LocalAzureIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.LocalNetworkGatewayID != nil { + in, out := &in.LocalNetworkGatewayID, &out.LocalNetworkGatewayID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PeerVirtualNetworkGatewayID != nil { + in, out := &in.PeerVirtualNetworkGatewayID, &out.PeerVirtualNetworkGatewayID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RoutingWeight != nil { + in, out := &in.RoutingWeight, &out.RoutingWeight + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficSelectorPolicy != nil { + in, out := &in.TrafficSelectorPolicy, &out.TrafficSelectorPolicy + *out = make([]TrafficSelectorPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UsePolicyBasedTrafficSelectors != nil { + in, out := &in.UsePolicyBasedTrafficSelectors, &out.UsePolicyBasedTrafficSelectors + *out = new(bool) + **out = **in + } + if in.VirtualNetworkGatewayID != nil { + in, out := &in.VirtualNetworkGatewayID, &out.VirtualNetworkGatewayID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionObservation. +func (in *VirtualNetworkGatewayConnectionObservation) DeepCopy() *VirtualNetworkGatewayConnectionObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionParameters) DeepCopyInto(out *VirtualNetworkGatewayConnectionParameters) { + *out = *in + if in.AuthorizationKeySecretRef != nil { + in, out := &in.AuthorizationKeySecretRef, &out.AuthorizationKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConnectionMode != nil { + in, out := &in.ConnectionMode, &out.ConnectionMode + *out = new(string) + **out = **in + } + if in.ConnectionProtocol != nil { + in, out := &in.ConnectionProtocol, &out.ConnectionProtocol + *out = new(string) + **out = **in + } + if in.CustomBGPAddresses != nil { + in, out := &in.CustomBGPAddresses, &out.CustomBGPAddresses + *out = new(CustomBGPAddressesParameters) + (*in).DeepCopyInto(*out) + } + if in.DpdTimeoutSeconds != nil { + in, out := &in.DpdTimeoutSeconds, &out.DpdTimeoutSeconds + *out = new(float64) + **out = **in + } + if in.EgressNATRuleIds != nil { + in, out := &in.EgressNATRuleIds, &out.EgressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EnableBGP != nil { + in, out := &in.EnableBGP, &out.EnableBGP + *out = new(bool) + **out = **in + } + if in.ExpressRouteCircuitID != nil { + in, out := &in.ExpressRouteCircuitID, &out.ExpressRouteCircuitID + *out = new(string) + **out = **in + } + if in.ExpressRouteGatewayBypass != nil { + in, out := &in.ExpressRouteGatewayBypass, &out.ExpressRouteGatewayBypass + *out = new(bool) + **out = **in + } + if in.IngressNATRuleIds != nil { + in, out := &in.IngressNATRuleIds, &out.IngressNATRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IpsecPolicy != nil { + in, out := &in.IpsecPolicy, &out.IpsecPolicy + *out = new(VirtualNetworkGatewayConnectionIpsecPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAzureIPAddressEnabled != nil { + in, out := &in.LocalAzureIPAddressEnabled, &out.LocalAzureIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.LocalNetworkGatewayID != nil { + in, out := &in.LocalNetworkGatewayID, &out.LocalNetworkGatewayID + *out = new(string) + **out = **in + } + if in.LocalNetworkGatewayIDRef != nil { + in, out := &in.LocalNetworkGatewayIDRef, &out.LocalNetworkGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LocalNetworkGatewayIDSelector != nil { + in, out := &in.LocalNetworkGatewayIDSelector, &out.LocalNetworkGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PeerVirtualNetworkGatewayID != nil { + in, out := &in.PeerVirtualNetworkGatewayID, &out.PeerVirtualNetworkGatewayID + *out = new(string) + **out = **in + } + if in.PeerVirtualNetworkGatewayIDRef != nil { + in, out := &in.PeerVirtualNetworkGatewayIDRef, &out.PeerVirtualNetworkGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PeerVirtualNetworkGatewayIDSelector != nil { + in, out := &in.PeerVirtualNetworkGatewayIDSelector, &out.PeerVirtualNetworkGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RoutingWeight != nil { + in, out := &in.RoutingWeight, &out.RoutingWeight + *out = new(float64) + **out = **in + } + if in.SharedKeySecretRef != nil { + in, out := &in.SharedKeySecretRef, &out.SharedKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficSelectorPolicy != nil { + in, out := &in.TrafficSelectorPolicy, &out.TrafficSelectorPolicy + *out = make([]TrafficSelectorPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.UsePolicyBasedTrafficSelectors != nil { + in, out := &in.UsePolicyBasedTrafficSelectors, &out.UsePolicyBasedTrafficSelectors + *out = new(bool) + **out = **in + } + if in.VirtualNetworkGatewayID != nil { + in, out := &in.VirtualNetworkGatewayID, &out.VirtualNetworkGatewayID + *out = new(string) + **out = **in + } + if in.VirtualNetworkGatewayIDRef != nil { + in, out := &in.VirtualNetworkGatewayIDRef, &out.VirtualNetworkGatewayIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkGatewayIDSelector != nil { + in, out := &in.VirtualNetworkGatewayIDSelector, &out.VirtualNetworkGatewayIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionParameters. +func (in *VirtualNetworkGatewayConnectionParameters) DeepCopy() *VirtualNetworkGatewayConnectionParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionSpec) DeepCopyInto(out *VirtualNetworkGatewayConnectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionSpec. +func (in *VirtualNetworkGatewayConnectionSpec) DeepCopy() *VirtualNetworkGatewayConnectionSpec { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayConnectionStatus) DeepCopyInto(out *VirtualNetworkGatewayConnectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayConnectionStatus. +func (in *VirtualNetworkGatewayConnectionStatus) DeepCopy() *VirtualNetworkGatewayConnectionStatus { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayConnectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayIPConfigurationInitParameters) DeepCopyInto(out *VirtualNetworkGatewayIPConfigurationInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicIPAddressIDRef != nil { + in, out := &in.PublicIPAddressIDRef, &out.PublicIPAddressIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressIDSelector != nil { + in, out := &in.PublicIPAddressIDSelector, &out.PublicIPAddressIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayIPConfigurationInitParameters. +func (in *VirtualNetworkGatewayIPConfigurationInitParameters) DeepCopy() *VirtualNetworkGatewayIPConfigurationInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayIPConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayIPConfigurationObservation) DeepCopyInto(out *VirtualNetworkGatewayIPConfigurationObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayIPConfigurationObservation. +func (in *VirtualNetworkGatewayIPConfigurationObservation) DeepCopy() *VirtualNetworkGatewayIPConfigurationObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayIPConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayIPConfigurationParameters) DeepCopyInto(out *VirtualNetworkGatewayIPConfigurationParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrivateIPAddressAllocation != nil { + in, out := &in.PrivateIPAddressAllocation, &out.PrivateIPAddressAllocation + *out = new(string) + **out = **in + } + if in.PublicIPAddressID != nil { + in, out := &in.PublicIPAddressID, &out.PublicIPAddressID + *out = new(string) + **out = **in + } + if in.PublicIPAddressIDRef != nil { + in, out := &in.PublicIPAddressIDRef, &out.PublicIPAddressIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PublicIPAddressIDSelector != nil { + in, out := &in.PublicIPAddressIDSelector, &out.PublicIPAddressIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayIPConfigurationParameters. +func (in *VirtualNetworkGatewayIPConfigurationParameters) DeepCopy() *VirtualNetworkGatewayIPConfigurationParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayIPConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayInitParameters) DeepCopyInto(out *VirtualNetworkGatewayInitParameters) { + *out = *in + if in.ActiveActive != nil { + in, out := &in.ActiveActive, &out.ActiveActive + *out = new(bool) + **out = **in + } + if in.BGPRouteTranslationForNATEnabled != nil { + in, out := &in.BGPRouteTranslationForNATEnabled, &out.BGPRouteTranslationForNATEnabled + *out = new(bool) + **out = **in + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(VirtualNetworkGatewayBGPSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomRoute != nil { + in, out := &in.CustomRoute, &out.CustomRoute + *out = new(CustomRouteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DNSForwardingEnabled != nil { + in, out := &in.DNSForwardingEnabled, &out.DNSForwardingEnabled + *out = new(bool) + **out = **in + } + if in.DefaultLocalNetworkGatewayID != nil { + in, out := &in.DefaultLocalNetworkGatewayID, &out.DefaultLocalNetworkGatewayID + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableBGP != nil { + in, out := &in.EnableBGP, &out.EnableBGP + *out = new(bool) + **out = **in + } + if in.Generation != nil { + in, out := &in.Generation, &out.Generation + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]VirtualNetworkGatewayIPConfigurationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPSecReplayProtectionEnabled != nil { + in, out := &in.IPSecReplayProtectionEnabled, &out.IPSecReplayProtectionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PolicyGroup != nil { + in, out := &in.PolicyGroup, &out.PolicyGroup + *out = make([]PolicyGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateIPAddressEnabled != nil { + in, out := &in.PrivateIPAddressEnabled, &out.PrivateIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVnetTrafficEnabled != nil { + in, out := &in.RemoteVnetTrafficEnabled, &out.RemoteVnetTrafficEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPNClientConfiguration != nil { + in, out := &in.VPNClientConfiguration, &out.VPNClientConfiguration + *out = new(VPNClientConfigurationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VPNType != nil { + in, out := &in.VPNType, &out.VPNType + *out = new(string) + **out = **in + } + if in.VirtualWanTrafficEnabled != nil { + in, out := &in.VirtualWanTrafficEnabled, &out.VirtualWanTrafficEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayInitParameters. +func (in *VirtualNetworkGatewayInitParameters) DeepCopy() *VirtualNetworkGatewayInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayList) DeepCopyInto(out *VirtualNetworkGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualNetworkGateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayList. +func (in *VirtualNetworkGatewayList) DeepCopy() *VirtualNetworkGatewayList { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNetworkGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayObservation) DeepCopyInto(out *VirtualNetworkGatewayObservation) { + *out = *in + if in.ActiveActive != nil { + in, out := &in.ActiveActive, &out.ActiveActive + *out = new(bool) + **out = **in + } + if in.BGPRouteTranslationForNATEnabled != nil { + in, out := &in.BGPRouteTranslationForNATEnabled, &out.BGPRouteTranslationForNATEnabled + *out = new(bool) + **out = **in + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(VirtualNetworkGatewayBGPSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomRoute != nil { + in, out := &in.CustomRoute, &out.CustomRoute + *out = new(CustomRouteObservation) + (*in).DeepCopyInto(*out) + } + if in.DNSForwardingEnabled != nil { + in, out := &in.DNSForwardingEnabled, &out.DNSForwardingEnabled + *out = new(bool) + **out = **in + } + if in.DefaultLocalNetworkGatewayID != nil { + in, out := &in.DefaultLocalNetworkGatewayID, &out.DefaultLocalNetworkGatewayID + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableBGP != nil { + in, out := &in.EnableBGP, &out.EnableBGP + *out = new(bool) + **out = **in + } + if in.Generation != nil { + in, out := &in.Generation, &out.Generation + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]VirtualNetworkGatewayIPConfigurationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPSecReplayProtectionEnabled != nil { + in, out := &in.IPSecReplayProtectionEnabled, &out.IPSecReplayProtectionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PolicyGroup != nil { + in, out := &in.PolicyGroup, &out.PolicyGroup + *out = make([]PolicyGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateIPAddressEnabled != nil { + in, out := &in.PrivateIPAddressEnabled, &out.PrivateIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVnetTrafficEnabled != nil { + in, out := &in.RemoteVnetTrafficEnabled, &out.RemoteVnetTrafficEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPNClientConfiguration != nil { + in, out := &in.VPNClientConfiguration, &out.VPNClientConfiguration + *out = new(VPNClientConfigurationObservation) + (*in).DeepCopyInto(*out) + } + if in.VPNType != nil { + in, out := &in.VPNType, &out.VPNType + *out = new(string) + **out = **in + } + if in.VirtualWanTrafficEnabled != nil { + in, out := &in.VirtualWanTrafficEnabled, &out.VirtualWanTrafficEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayObservation. +func (in *VirtualNetworkGatewayObservation) DeepCopy() *VirtualNetworkGatewayObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayParameters) DeepCopyInto(out *VirtualNetworkGatewayParameters) { + *out = *in + if in.ActiveActive != nil { + in, out := &in.ActiveActive, &out.ActiveActive + *out = new(bool) + **out = **in + } + if in.BGPRouteTranslationForNATEnabled != nil { + in, out := &in.BGPRouteTranslationForNATEnabled, &out.BGPRouteTranslationForNATEnabled + *out = new(bool) + **out = **in + } + if in.BGPSettings != nil { + in, out := &in.BGPSettings, &out.BGPSettings + *out = new(VirtualNetworkGatewayBGPSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomRoute != nil { + in, out := &in.CustomRoute, &out.CustomRoute + *out = new(CustomRouteParameters) + (*in).DeepCopyInto(*out) + } + if in.DNSForwardingEnabled != nil { + in, out := &in.DNSForwardingEnabled, &out.DNSForwardingEnabled + *out = new(bool) + **out = **in + } + if in.DefaultLocalNetworkGatewayID != nil { + in, out := &in.DefaultLocalNetworkGatewayID, &out.DefaultLocalNetworkGatewayID + *out = new(string) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableBGP != nil { + in, out := &in.EnableBGP, &out.EnableBGP + *out = new(bool) + **out = **in + } + if in.Generation != nil { + in, out := &in.Generation, &out.Generation + *out = new(string) + **out = **in + } + if in.IPConfiguration != nil { + in, out := &in.IPConfiguration, &out.IPConfiguration + *out = make([]VirtualNetworkGatewayIPConfigurationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPSecReplayProtectionEnabled != nil { + in, out := &in.IPSecReplayProtectionEnabled, &out.IPSecReplayProtectionEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PolicyGroup != nil { + in, out := &in.PolicyGroup, &out.PolicyGroup + *out = make([]PolicyGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateIPAddressEnabled != nil { + in, out := &in.PrivateIPAddressEnabled, &out.PrivateIPAddressEnabled + *out = new(bool) + **out = **in + } + if in.RemoteVnetTrafficEnabled != nil { + in, out := &in.RemoteVnetTrafficEnabled, &out.RemoteVnetTrafficEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.VPNClientConfiguration != nil { + in, out := &in.VPNClientConfiguration, &out.VPNClientConfiguration + *out = new(VPNClientConfigurationParameters) + (*in).DeepCopyInto(*out) + } + if in.VPNType != nil { + in, out := &in.VPNType, &out.VPNType + *out = new(string) + **out = **in + } + if in.VirtualWanTrafficEnabled != nil { + in, out := &in.VirtualWanTrafficEnabled, &out.VirtualWanTrafficEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayParameters. +func (in *VirtualNetworkGatewayParameters) DeepCopy() *VirtualNetworkGatewayParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewaySpec) DeepCopyInto(out *VirtualNetworkGatewaySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewaySpec. +func (in *VirtualNetworkGatewaySpec) DeepCopy() *VirtualNetworkGatewaySpec { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkGatewayStatus) DeepCopyInto(out *VirtualNetworkGatewayStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkGatewayStatus. +func (in *VirtualNetworkGatewayStatus) DeepCopy() *VirtualNetworkGatewayStatus { + if in == nil { + return nil + } + out := new(VirtualNetworkGatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkInitParameters) DeepCopyInto(out *VirtualNetworkInitParameters) { + *out = *in + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BGPCommunity != nil { + in, out := &in.BGPCommunity, &out.BGPCommunity + *out = new(string) + **out = **in + } + if in.DDOSProtectionPlan != nil { + in, out := &in.DDOSProtectionPlan, &out.DDOSProtectionPlan + *out = new(DDOSProtectionPlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FlowTimeoutInMinutes != nil { + in, out := &in.FlowTimeoutInMinutes, &out.FlowTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkInitParameters. +func (in *VirtualNetworkInitParameters) DeepCopy() *VirtualNetworkInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkList) DeepCopyInto(out *VirtualNetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkList. +func (in *VirtualNetworkList) DeepCopy() *VirtualNetworkList { + if in == nil { + return nil + } + out := new(VirtualNetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualNetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkObservation) DeepCopyInto(out *VirtualNetworkObservation) { + *out = *in + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BGPCommunity != nil { + in, out := &in.BGPCommunity, &out.BGPCommunity + *out = new(string) + **out = **in + } + if in.DDOSProtectionPlan != nil { + in, out := &in.DDOSProtectionPlan, &out.DDOSProtectionPlan + *out = new(DDOSProtectionPlanObservation) + (*in).DeepCopyInto(*out) + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.FlowTimeoutInMinutes != nil { + in, out := &in.FlowTimeoutInMinutes, &out.FlowTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.GUID != nil { + in, out := &in.GUID, &out.GUID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = make([]VirtualNetworkSubnetObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkObservation. +func (in *VirtualNetworkObservation) DeepCopy() *VirtualNetworkObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkParameters) DeepCopyInto(out *VirtualNetworkParameters) { + *out = *in + if in.AddressSpace != nil { + in, out := &in.AddressSpace, &out.AddressSpace + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.BGPCommunity != nil { + in, out := &in.BGPCommunity, &out.BGPCommunity + *out = new(string) + **out = **in + } + if in.DDOSProtectionPlan != nil { + in, out := &in.DDOSProtectionPlan, &out.DDOSProtectionPlan + *out = new(DDOSProtectionPlanParameters) + (*in).DeepCopyInto(*out) + } + if in.DNSServers != nil { + in, out := &in.DNSServers, &out.DNSServers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.FlowTimeoutInMinutes != nil { + in, out := &in.FlowTimeoutInMinutes, &out.FlowTimeoutInMinutes + *out = new(float64) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkParameters. +func (in *VirtualNetworkParameters) DeepCopy() *VirtualNetworkParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkSpec) DeepCopyInto(out *VirtualNetworkSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkSpec. +func (in *VirtualNetworkSpec) DeepCopy() *VirtualNetworkSpec { + if in == nil { + return nil + } + out := new(VirtualNetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkStatus) DeepCopyInto(out *VirtualNetworkStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkStatus. +func (in *VirtualNetworkStatus) DeepCopy() *VirtualNetworkStatus { + if in == nil { + return nil + } + out := new(VirtualNetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkSubnetInitParameters) DeepCopyInto(out *VirtualNetworkSubnetInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkSubnetInitParameters. +func (in *VirtualNetworkSubnetInitParameters) DeepCopy() *VirtualNetworkSubnetInitParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkSubnetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkSubnetObservation) DeepCopyInto(out *VirtualNetworkSubnetObservation) { + *out = *in + if in.AddressPrefix != nil { + in, out := &in.AddressPrefix, &out.AddressPrefix + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecurityGroup != nil { + in, out := &in.SecurityGroup, &out.SecurityGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkSubnetObservation. +func (in *VirtualNetworkSubnetObservation) DeepCopy() *VirtualNetworkSubnetObservation { + if in == nil { + return nil + } + out := new(VirtualNetworkSubnetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualNetworkSubnetParameters) DeepCopyInto(out *VirtualNetworkSubnetParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkSubnetParameters. +func (in *VirtualNetworkSubnetParameters) DeepCopy() *VirtualNetworkSubnetParameters { + if in == nil { + return nil + } + out := new(VirtualNetworkSubnetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConfigurationInitParameters) DeepCopyInto(out *WafConfigurationInitParameters) { + *out = *in + if in.DisabledRuleGroup != nil { + in, out := &in.DisabledRuleGroup, &out.DisabledRuleGroup + *out = make([]DisabledRuleGroupInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ExclusionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FileUploadLimitMb != nil { + in, out := &in.FileUploadLimitMb, &out.FileUploadLimitMb + *out = new(float64) + **out = **in + } + if in.FirewallMode != nil { + in, out := &in.FirewallMode, &out.FirewallMode + *out = new(string) + **out = **in + } + if in.MaxRequestBodySizeKb != nil { + in, out := &in.MaxRequestBodySizeKb, &out.MaxRequestBodySizeKb + *out = new(float64) + **out = **in + } + if in.RequestBodyCheck != nil { + in, out := &in.RequestBodyCheck, &out.RequestBodyCheck + *out = new(bool) + **out = **in + } + if in.RuleSetType != nil { + in, out := &in.RuleSetType, &out.RuleSetType + *out = new(string) + **out = **in + } + if in.RuleSetVersion != nil { + in, out := &in.RuleSetVersion, &out.RuleSetVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConfigurationInitParameters. +func (in *WafConfigurationInitParameters) DeepCopy() *WafConfigurationInitParameters { + if in == nil { + return nil + } + out := new(WafConfigurationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConfigurationObservation) DeepCopyInto(out *WafConfigurationObservation) { + *out = *in + if in.DisabledRuleGroup != nil { + in, out := &in.DisabledRuleGroup, &out.DisabledRuleGroup + *out = make([]DisabledRuleGroupObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ExclusionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FileUploadLimitMb != nil { + in, out := &in.FileUploadLimitMb, &out.FileUploadLimitMb + *out = new(float64) + **out = **in + } + if in.FirewallMode != nil { + in, out := &in.FirewallMode, &out.FirewallMode + *out = new(string) + **out = **in + } + if in.MaxRequestBodySizeKb != nil { + in, out := &in.MaxRequestBodySizeKb, &out.MaxRequestBodySizeKb + *out = new(float64) + **out = **in + } + if in.RequestBodyCheck != nil { + in, out := &in.RequestBodyCheck, &out.RequestBodyCheck + *out = new(bool) + **out = **in + } + if in.RuleSetType != nil { + in, out := &in.RuleSetType, &out.RuleSetType + *out = new(string) + **out = **in + } + if in.RuleSetVersion != nil { + in, out := &in.RuleSetVersion, &out.RuleSetVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConfigurationObservation. +func (in *WafConfigurationObservation) DeepCopy() *WafConfigurationObservation { + if in == nil { + return nil + } + out := new(WafConfigurationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WafConfigurationParameters) DeepCopyInto(out *WafConfigurationParameters) { + *out = *in + if in.DisabledRuleGroup != nil { + in, out := &in.DisabledRuleGroup, &out.DisabledRuleGroup + *out = make([]DisabledRuleGroupParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Exclusion != nil { + in, out := &in.Exclusion, &out.Exclusion + *out = make([]ExclusionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FileUploadLimitMb != nil { + in, out := &in.FileUploadLimitMb, &out.FileUploadLimitMb + *out = new(float64) + **out = **in + } + if in.FirewallMode != nil { + in, out := &in.FirewallMode, &out.FirewallMode + *out = new(string) + **out = **in + } + if in.MaxRequestBodySizeKb != nil { + in, out := &in.MaxRequestBodySizeKb, &out.MaxRequestBodySizeKb + *out = new(float64) + **out = **in + } + if in.RequestBodyCheck != nil { + in, out := &in.RequestBodyCheck, &out.RequestBodyCheck + *out = new(bool) + **out = **in + } + if in.RuleSetType != nil { + in, out := &in.RuleSetType, &out.RuleSetType + *out = new(string) + **out = **in + } + if in.RuleSetVersion != nil { + in, out := &in.RuleSetVersion, &out.RuleSetVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WafConfigurationParameters. +func (in *WafConfigurationParameters) DeepCopy() *WafConfigurationParameters { + if in == nil { + return nil + } + out := new(WafConfigurationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatcherFlowLog) DeepCopyInto(out *WatcherFlowLog) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatcherFlowLog. +func (in *WatcherFlowLog) DeepCopy() *WatcherFlowLog { + if in == nil { + return nil + } + out := new(WatcherFlowLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WatcherFlowLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatcherFlowLogInitParameters) DeepCopyInto(out *WatcherFlowLogInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupIDRef != nil { + in, out := &in.NetworkSecurityGroupIDRef, &out.NetworkSecurityGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkSecurityGroupIDSelector != nil { + in, out := &in.NetworkSecurityGroupIDSelector, &out.NetworkSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficAnalytics != nil { + in, out := &in.TrafficAnalytics, &out.TrafficAnalytics + *out = new(TrafficAnalyticsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatcherFlowLogInitParameters. +func (in *WatcherFlowLogInitParameters) DeepCopy() *WatcherFlowLogInitParameters { + if in == nil { + return nil + } + out := new(WatcherFlowLogInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatcherFlowLogList) DeepCopyInto(out *WatcherFlowLogList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WatcherFlowLog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatcherFlowLogList. +func (in *WatcherFlowLogList) DeepCopy() *WatcherFlowLogList { + if in == nil { + return nil + } + out := new(WatcherFlowLogList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WatcherFlowLogList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatcherFlowLogObservation) DeepCopyInto(out *WatcherFlowLogObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.NetworkWatcherName != nil { + in, out := &in.NetworkWatcherName, &out.NetworkWatcherName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficAnalytics != nil { + in, out := &in.TrafficAnalytics, &out.TrafficAnalytics + *out = new(TrafficAnalyticsObservation) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatcherFlowLogObservation. +func (in *WatcherFlowLogObservation) DeepCopy() *WatcherFlowLogObservation { + if in == nil { + return nil + } + out := new(WatcherFlowLogObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatcherFlowLogParameters) DeepCopyInto(out *WatcherFlowLogParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupID != nil { + in, out := &in.NetworkSecurityGroupID, &out.NetworkSecurityGroupID + *out = new(string) + **out = **in + } + if in.NetworkSecurityGroupIDRef != nil { + in, out := &in.NetworkSecurityGroupIDRef, &out.NetworkSecurityGroupIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkSecurityGroupIDSelector != nil { + in, out := &in.NetworkSecurityGroupIDSelector, &out.NetworkSecurityGroupIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.NetworkWatcherName != nil { + in, out := &in.NetworkWatcherName, &out.NetworkWatcherName + *out = new(string) + **out = **in + } + if in.NetworkWatcherNameRef != nil { + in, out := &in.NetworkWatcherNameRef, &out.NetworkWatcherNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NetworkWatcherNameSelector != nil { + in, out := &in.NetworkWatcherNameSelector, &out.NetworkWatcherNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TrafficAnalytics != nil { + in, out := &in.TrafficAnalytics, &out.TrafficAnalytics + *out = new(TrafficAnalyticsParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatcherFlowLogParameters. +func (in *WatcherFlowLogParameters) DeepCopy() *WatcherFlowLogParameters { + if in == nil { + return nil + } + out := new(WatcherFlowLogParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatcherFlowLogSpec) DeepCopyInto(out *WatcherFlowLogSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatcherFlowLogSpec. +func (in *WatcherFlowLogSpec) DeepCopy() *WatcherFlowLogSpec { + if in == nil { + return nil + } + out := new(WatcherFlowLogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatcherFlowLogStatus) DeepCopyInto(out *WatcherFlowLogStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatcherFlowLogStatus. +func (in *WatcherFlowLogStatus) DeepCopy() *WatcherFlowLogStatus { + if in == nil { + return nil + } + out := new(WatcherFlowLogStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebApplicationFirewallPolicy) DeepCopyInto(out *WebApplicationFirewallPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebApplicationFirewallPolicy. +func (in *WebApplicationFirewallPolicy) DeepCopy() *WebApplicationFirewallPolicy { + if in == nil { + return nil + } + out := new(WebApplicationFirewallPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebApplicationFirewallPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebApplicationFirewallPolicyInitParameters) DeepCopyInto(out *WebApplicationFirewallPolicyInitParameters) { + *out = *in + if in.CustomRules != nil { + in, out := &in.CustomRules, &out.CustomRules + *out = make([]CustomRulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedRules != nil { + in, out := &in.ManagedRules, &out.ManagedRules + *out = new(ManagedRulesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PolicySettings != nil { + in, out := &in.PolicySettings, &out.PolicySettings + *out = new(PolicySettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebApplicationFirewallPolicyInitParameters. +func (in *WebApplicationFirewallPolicyInitParameters) DeepCopy() *WebApplicationFirewallPolicyInitParameters { + if in == nil { + return nil + } + out := new(WebApplicationFirewallPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebApplicationFirewallPolicyList) DeepCopyInto(out *WebApplicationFirewallPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WebApplicationFirewallPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebApplicationFirewallPolicyList. +func (in *WebApplicationFirewallPolicyList) DeepCopy() *WebApplicationFirewallPolicyList { + if in == nil { + return nil + } + out := new(WebApplicationFirewallPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebApplicationFirewallPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebApplicationFirewallPolicyObservation) DeepCopyInto(out *WebApplicationFirewallPolicyObservation) { + *out = *in + if in.CustomRules != nil { + in, out := &in.CustomRules, &out.CustomRules + *out = make([]CustomRulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPListenerIds != nil { + in, out := &in.HTTPListenerIds, &out.HTTPListenerIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedRules != nil { + in, out := &in.ManagedRules, &out.ManagedRules + *out = new(ManagedRulesObservation) + (*in).DeepCopyInto(*out) + } + if in.PathBasedRuleIds != nil { + in, out := &in.PathBasedRuleIds, &out.PathBasedRuleIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PolicySettings != nil { + in, out := &in.PolicySettings, &out.PolicySettings + *out = new(PolicySettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebApplicationFirewallPolicyObservation. +func (in *WebApplicationFirewallPolicyObservation) DeepCopy() *WebApplicationFirewallPolicyObservation { + if in == nil { + return nil + } + out := new(WebApplicationFirewallPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebApplicationFirewallPolicyParameters) DeepCopyInto(out *WebApplicationFirewallPolicyParameters) { + *out = *in + if in.CustomRules != nil { + in, out := &in.CustomRules, &out.CustomRules + *out = make([]CustomRulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedRules != nil { + in, out := &in.ManagedRules, &out.ManagedRules + *out = new(ManagedRulesParameters) + (*in).DeepCopyInto(*out) + } + if in.PolicySettings != nil { + in, out := &in.PolicySettings, &out.PolicySettings + *out = new(PolicySettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebApplicationFirewallPolicyParameters. +func (in *WebApplicationFirewallPolicyParameters) DeepCopy() *WebApplicationFirewallPolicyParameters { + if in == nil { + return nil + } + out := new(WebApplicationFirewallPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebApplicationFirewallPolicySpec) DeepCopyInto(out *WebApplicationFirewallPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebApplicationFirewallPolicySpec. +func (in *WebApplicationFirewallPolicySpec) DeepCopy() *WebApplicationFirewallPolicySpec { + if in == nil { + return nil + } + out := new(WebApplicationFirewallPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebApplicationFirewallPolicyStatus) DeepCopyInto(out *WebApplicationFirewallPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebApplicationFirewallPolicyStatus. +func (in *WebApplicationFirewallPolicyStatus) DeepCopy() *WebApplicationFirewallPolicyStatus { + if in == nil { + return nil + } + out := new(WebApplicationFirewallPolicyStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/network/v1beta2/zz_generated.managed.go b/apis/network/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..4c994aa1e --- /dev/null +++ b/apis/network/v1beta2/zz_generated.managed.go @@ -0,0 +1,1868 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ApplicationGateway. +func (mg *ApplicationGateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ApplicationGateway. +func (mg *ApplicationGateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ApplicationGateway. +func (mg *ApplicationGateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ApplicationGateway. +func (mg *ApplicationGateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ApplicationGateway. +func (mg *ApplicationGateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ApplicationGateway. +func (mg *ApplicationGateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ApplicationGateway. +func (mg *ApplicationGateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ApplicationGateway. +func (mg *ApplicationGateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ApplicationGateway. +func (mg *ApplicationGateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ApplicationGateway. +func (mg *ApplicationGateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ApplicationGateway. +func (mg *ApplicationGateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ApplicationGateway. +func (mg *ApplicationGateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ConnectionMonitor. +func (mg *ConnectionMonitor) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ConnectionMonitor. +func (mg *ConnectionMonitor) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ConnectionMonitor. +func (mg *ConnectionMonitor) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ConnectionMonitor. +func (mg *ConnectionMonitor) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ConnectionMonitor. +func (mg *ConnectionMonitor) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ConnectionMonitor. +func (mg *ConnectionMonitor) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ConnectionMonitor. +func (mg *ConnectionMonitor) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ConnectionMonitor. +func (mg *ConnectionMonitor) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ConnectionMonitor. +func (mg *ConnectionMonitor) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ConnectionMonitor. +func (mg *ConnectionMonitor) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ConnectionMonitor. +func (mg *ConnectionMonitor) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ConnectionMonitor. +func (mg *ConnectionMonitor) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this DNSZone. +func (mg *DNSZone) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this DNSZone. +func (mg *DNSZone) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this DNSZone. +func (mg *DNSZone) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this DNSZone. +func (mg *DNSZone) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this DNSZone. +func (mg *DNSZone) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this DNSZone. +func (mg *DNSZone) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this DNSZone. +func (mg *DNSZone) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this DNSZone. +func (mg *DNSZone) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this DNSZone. +func (mg *DNSZone) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this DNSZone. +func (mg *DNSZone) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this DNSZone. +func (mg *DNSZone) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this DNSZone. +func (mg *DNSZone) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ExpressRoutePort. +func (mg *ExpressRoutePort) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ExpressRoutePort. +func (mg *ExpressRoutePort) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ExpressRoutePort. +func (mg *ExpressRoutePort) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ExpressRoutePort. +func (mg *ExpressRoutePort) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ExpressRoutePort. +func (mg *ExpressRoutePort) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ExpressRoutePort. +func (mg *ExpressRoutePort) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ExpressRoutePort. +func (mg *ExpressRoutePort) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ExpressRoutePort. +func (mg *ExpressRoutePort) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ExpressRoutePort. +func (mg *ExpressRoutePort) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ExpressRoutePort. +func (mg *ExpressRoutePort) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ExpressRoutePort. +func (mg *ExpressRoutePort) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ExpressRoutePort. +func (mg *ExpressRoutePort) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Firewall. +func (mg *Firewall) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Firewall. +func (mg *Firewall) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Firewall. +func (mg *Firewall) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Firewall. +func (mg *Firewall) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Firewall. +func (mg *Firewall) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Firewall. +func (mg *Firewall) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Firewall. +func (mg *Firewall) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Firewall. +func (mg *Firewall) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Firewall. +func (mg *Firewall) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Firewall. +func (mg *Firewall) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Firewall. +func (mg *Firewall) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Firewall. +func (mg *Firewall) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FirewallPolicy. +func (mg *FirewallPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FirewallPolicy. +func (mg *FirewallPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FirewallPolicy. +func (mg *FirewallPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FirewallPolicy. +func (mg *FirewallPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FirewallPolicy. +func (mg *FirewallPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FirewallPolicy. +func (mg *FirewallPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FirewallPolicy. +func (mg *FirewallPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FirewallPolicy. +func (mg *FirewallPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FirewallPolicy. +func (mg *FirewallPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FirewallPolicy. +func (mg *FirewallPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FirewallPolicy. +func (mg *FirewallPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FirewallPolicy. +func (mg *FirewallPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontDoor. +func (mg *FrontDoor) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontDoor. +func (mg *FrontDoor) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontDoor. +func (mg *FrontDoor) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontDoor. +func (mg *FrontDoor) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontDoor. +func (mg *FrontDoor) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontDoor. +func (mg *FrontDoor) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontDoor. +func (mg *FrontDoor) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontDoor. +func (mg *FrontDoor) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontDoor. +func (mg *FrontDoor) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontDoor. +func (mg *FrontDoor) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontDoor. +func (mg *FrontDoor) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontDoor. +func (mg *FrontDoor) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Manager. +func (mg *Manager) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Manager. +func (mg *Manager) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Manager. +func (mg *Manager) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Manager. +func (mg *Manager) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Manager. +func (mg *Manager) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Manager. +func (mg *Manager) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Manager. +func (mg *Manager) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Manager. +func (mg *Manager) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Manager. +func (mg *Manager) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Manager. +func (mg *Manager) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Manager. +func (mg *Manager) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Manager. +func (mg *Manager) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this PacketCapture. +func (mg *PacketCapture) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PacketCapture. +func (mg *PacketCapture) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PacketCapture. +func (mg *PacketCapture) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PacketCapture. +func (mg *PacketCapture) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PacketCapture. +func (mg *PacketCapture) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PacketCapture. +func (mg *PacketCapture) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PacketCapture. +func (mg *PacketCapture) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PacketCapture. +func (mg *PacketCapture) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PacketCapture. +func (mg *PacketCapture) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PacketCapture. +func (mg *PacketCapture) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PacketCapture. +func (mg *PacketCapture) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PacketCapture. +func (mg *PacketCapture) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this PrivateDNSZone. +func (mg *PrivateDNSZone) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PrivateDNSZone. +func (mg *PrivateDNSZone) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PrivateDNSZone. +func (mg *PrivateDNSZone) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PrivateDNSZone. +func (mg *PrivateDNSZone) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PrivateDNSZone. +func (mg *PrivateDNSZone) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PrivateDNSZone. +func (mg *PrivateDNSZone) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PrivateDNSZone. +func (mg *PrivateDNSZone) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PrivateDNSZone. +func (mg *PrivateDNSZone) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PrivateDNSZone. +func (mg *PrivateDNSZone) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PrivateDNSZone. +func (mg *PrivateDNSZone) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PrivateDNSZone. +func (mg *PrivateDNSZone) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PrivateDNSZone. +func (mg *PrivateDNSZone) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this PrivateEndpoint. +func (mg *PrivateEndpoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this PrivateEndpoint. +func (mg *PrivateEndpoint) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this PrivateEndpoint. +func (mg *PrivateEndpoint) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this PrivateEndpoint. +func (mg *PrivateEndpoint) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this PrivateEndpoint. +func (mg *PrivateEndpoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this PrivateEndpoint. +func (mg *PrivateEndpoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this PrivateEndpoint. +func (mg *PrivateEndpoint) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this PrivateEndpoint. +func (mg *PrivateEndpoint) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this PrivateEndpoint. +func (mg *PrivateEndpoint) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this PrivateEndpoint. +func (mg *PrivateEndpoint) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this PrivateEndpoint. +func (mg *PrivateEndpoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this PrivateEndpoint. +func (mg *PrivateEndpoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Profile. +func (mg *Profile) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Profile. +func (mg *Profile) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Profile. +func (mg *Profile) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Profile. +func (mg *Profile) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Profile. +func (mg *Profile) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Profile. +func (mg *Profile) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Profile. +func (mg *Profile) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Profile. +func (mg *Profile) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Profile. +func (mg *Profile) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Profile. +func (mg *Profile) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Profile. +func (mg *Profile) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Profile. +func (mg *Profile) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Subnet. +func (mg *Subnet) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Subnet. +func (mg *Subnet) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Subnet. +func (mg *Subnet) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Subnet. +func (mg *Subnet) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Subnet. +func (mg *Subnet) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Subnet. +func (mg *Subnet) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Subnet. +func (mg *Subnet) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Subnet. +func (mg *Subnet) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Subnet. +func (mg *Subnet) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Subnet. +func (mg *Subnet) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Subnet. +func (mg *Subnet) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Subnet. +func (mg *Subnet) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPNGateway. +func (mg *VPNGateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPNGateway. +func (mg *VPNGateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPNGateway. +func (mg *VPNGateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPNGateway. +func (mg *VPNGateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPNGateway. +func (mg *VPNGateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPNGateway. +func (mg *VPNGateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPNGateway. +func (mg *VPNGateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPNGateway. +func (mg *VPNGateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPNGateway. +func (mg *VPNGateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPNGateway. +func (mg *VPNGateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPNGateway. +func (mg *VPNGateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPNGateway. +func (mg *VPNGateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VPNSite. +func (mg *VPNSite) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VPNSite. +func (mg *VPNSite) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VPNSite. +func (mg *VPNSite) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VPNSite. +func (mg *VPNSite) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VPNSite. +func (mg *VPNSite) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VPNSite. +func (mg *VPNSite) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VPNSite. +func (mg *VPNSite) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VPNSite. +func (mg *VPNSite) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VPNSite. +func (mg *VPNSite) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VPNSite. +func (mg *VPNSite) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VPNSite. +func (mg *VPNSite) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VPNSite. +func (mg *VPNSite) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualHubConnection. +func (mg *VirtualHubConnection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualHubConnection. +func (mg *VirtualHubConnection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualHubConnection. +func (mg *VirtualHubConnection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualHubConnection. +func (mg *VirtualHubConnection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualHubConnection. +func (mg *VirtualHubConnection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualHubConnection. +func (mg *VirtualHubConnection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualHubConnection. +func (mg *VirtualHubConnection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualHubConnection. +func (mg *VirtualHubConnection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualHubConnection. +func (mg *VirtualHubConnection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualHubConnection. +func (mg *VirtualHubConnection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualHubConnection. +func (mg *VirtualHubConnection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualHubConnection. +func (mg *VirtualHubConnection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualNetwork. +func (mg *VirtualNetwork) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualNetwork. +func (mg *VirtualNetwork) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualNetwork. +func (mg *VirtualNetwork) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualNetwork. +func (mg *VirtualNetwork) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualNetwork. +func (mg *VirtualNetwork) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualNetwork. +func (mg *VirtualNetwork) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualNetwork. +func (mg *VirtualNetwork) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualNetwork. +func (mg *VirtualNetwork) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualNetwork. +func (mg *VirtualNetwork) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualNetwork. +func (mg *VirtualNetwork) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualNetwork. +func (mg *VirtualNetwork) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualNetwork. +func (mg *VirtualNetwork) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WatcherFlowLog. +func (mg *WatcherFlowLog) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WatcherFlowLog. +func (mg *WatcherFlowLog) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WatcherFlowLog. +func (mg *WatcherFlowLog) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WatcherFlowLog. +func (mg *WatcherFlowLog) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WatcherFlowLog. +func (mg *WatcherFlowLog) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WatcherFlowLog. +func (mg *WatcherFlowLog) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WatcherFlowLog. +func (mg *WatcherFlowLog) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WatcherFlowLog. +func (mg *WatcherFlowLog) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WatcherFlowLog. +func (mg *WatcherFlowLog) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WatcherFlowLog. +func (mg *WatcherFlowLog) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WatcherFlowLog. +func (mg *WatcherFlowLog) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WatcherFlowLog. +func (mg *WatcherFlowLog) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/network/v1beta2/zz_generated.managedlist.go b/apis/network/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..2f4ff75dd --- /dev/null +++ b/apis/network/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,287 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ApplicationGatewayList. +func (l *ApplicationGatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ConnectionMonitorList. +func (l *ConnectionMonitorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this DNSZoneList. +func (l *DNSZoneList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ExpressRouteCircuitList. +func (l *ExpressRouteCircuitList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ExpressRouteCircuitPeeringList. +func (l *ExpressRouteCircuitPeeringList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ExpressRouteConnectionList. +func (l *ExpressRouteConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ExpressRoutePortList. +func (l *ExpressRoutePortList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FirewallList. +func (l *FirewallList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FirewallPolicyList. +func (l *FirewallPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontDoorList. +func (l *FrontDoorList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontdoorCustomHTTPSConfigurationList. +func (l *FrontdoorCustomHTTPSConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FrontdoorRulesEngineList. +func (l *FrontdoorRulesEngineList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LocalNetworkGatewayList. +func (l *LocalNetworkGatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ManagerList. +func (l *ManagerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PacketCaptureList. +func (l *PacketCaptureList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PointToSiteVPNGatewayList. +func (l *PointToSiteVPNGatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PrivateDNSZoneList. +func (l *PrivateDNSZoneList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this PrivateEndpointList. +func (l *PrivateEndpointList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ProfileList. +func (l *ProfileList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SubnetList. +func (l *SubnetList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this TrafficManagerProfileList. +func (l *TrafficManagerProfileList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPNGatewayConnectionList. +func (l *VPNGatewayConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPNGatewayList. +func (l *VPNGatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPNServerConfigurationList. +func (l *VPNServerConfigurationList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VPNSiteList. +func (l *VPNSiteList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualHubConnectionList. +func (l *VirtualHubConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualNetworkGatewayConnectionList. +func (l *VirtualNetworkGatewayConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualNetworkGatewayList. +func (l *VirtualNetworkGatewayList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VirtualNetworkList. +func (l *VirtualNetworkList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WatcherFlowLogList. +func (l *WatcherFlowLogList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WebApplicationFirewallPolicyList. +func (l *WebApplicationFirewallPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/network/v1beta2/zz_generated.resolvers.go b/apis/network/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..9f8a0d4b7 --- /dev/null +++ b/apis/network/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,2240 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ApplicationGateway) ResolveReferences( // ResolveReferences of this ApplicationGateway. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.FrontendIPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PublicIP", "PublicIPList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FrontendIPConfiguration[i3].PublicIPAddressID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.FrontendIPConfiguration[i3].PublicIPAddressIDRef, + Selector: mg.Spec.ForProvider.FrontendIPConfiguration[i3].PublicIPAddressIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FrontendIPConfiguration[i3].PublicIPAddressID") + } + mg.Spec.ForProvider.FrontendIPConfiguration[i3].PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FrontendIPConfiguration[i3].PublicIPAddressIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.FrontendIPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FrontendIPConfiguration[i3].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.FrontendIPConfiguration[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.FrontendIPConfiguration[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FrontendIPConfiguration[i3].SubnetID") + } + mg.Spec.ForProvider.FrontendIPConfiguration[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FrontendIPConfiguration[i3].SubnetIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.GatewayIPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.GatewayIPConfiguration[i3].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.GatewayIPConfiguration[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.GatewayIPConfiguration[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.GatewayIPConfiguration[i3].SubnetID") + } + mg.Spec.ForProvider.GatewayIPConfiguration[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.GatewayIPConfiguration[i3].SubnetIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.PrivateLinkConfiguration); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.PrivateLinkConfiguration[i3].IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetID") + } + mg.Spec.ForProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.FrontendIPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PublicIP", "PublicIPList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FrontendIPConfiguration[i3].PublicIPAddressID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.FrontendIPConfiguration[i3].PublicIPAddressIDRef, + Selector: mg.Spec.InitProvider.FrontendIPConfiguration[i3].PublicIPAddressIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FrontendIPConfiguration[i3].PublicIPAddressID") + } + mg.Spec.InitProvider.FrontendIPConfiguration[i3].PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FrontendIPConfiguration[i3].PublicIPAddressIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.FrontendIPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.FrontendIPConfiguration[i3].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.FrontendIPConfiguration[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.FrontendIPConfiguration[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.FrontendIPConfiguration[i3].SubnetID") + } + mg.Spec.InitProvider.FrontendIPConfiguration[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.FrontendIPConfiguration[i3].SubnetIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.GatewayIPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.GatewayIPConfiguration[i3].SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.GatewayIPConfiguration[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.GatewayIPConfiguration[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.GatewayIPConfiguration[i3].SubnetID") + } + mg.Spec.InitProvider.GatewayIPConfiguration[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.GatewayIPConfiguration[i3].SubnetIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.PrivateLinkConfiguration); i3++ { + for i4 := 0; i4 < len(mg.Spec.InitProvider.PrivateLinkConfiguration[i3].IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetID") + } + mg.Spec.InitProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrivateLinkConfiguration[i3].IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this ConnectionMonitor. +func (mg *ConnectionMonitor) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Watcher", "WatcherList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkWatcherID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkWatcherIDRef, + Selector: mg.Spec.ForProvider.NetworkWatcherIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkWatcherID") + } + mg.Spec.ForProvider.NetworkWatcherID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkWatcherIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this DNSZone. +func (mg *DNSZone) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ExpressRouteCircuit. +func (mg *ExpressRouteCircuit) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ExpressRouteCircuitPeering. +func (mg *ExpressRouteCircuitPeering) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "ExpressRouteCircuit", "ExpressRouteCircuitList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExpressRouteCircuitName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ExpressRouteCircuitNameRef, + Selector: mg.Spec.ForProvider.ExpressRouteCircuitNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExpressRouteCircuitName") + } + mg.Spec.ForProvider.ExpressRouteCircuitName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExpressRouteCircuitNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ExpressRouteConnection. +func (mg *ExpressRouteConnection) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "ExpressRouteCircuitPeering", "ExpressRouteCircuitPeeringList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExpressRouteCircuitPeeringID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ExpressRouteCircuitPeeringIDRef, + Selector: mg.Spec.ForProvider.ExpressRouteCircuitPeeringIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExpressRouteCircuitPeeringID") + } + mg.Spec.ForProvider.ExpressRouteCircuitPeeringID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExpressRouteCircuitPeeringIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "ExpressRouteGateway", "ExpressRouteGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ExpressRouteGatewayID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ExpressRouteGatewayIDRef, + Selector: mg.Spec.ForProvider.ExpressRouteGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ExpressRouteGatewayID") + } + mg.Spec.ForProvider.ExpressRouteGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ExpressRouteGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "ExpressRouteCircuitPeering", "ExpressRouteCircuitPeeringList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ExpressRouteCircuitPeeringID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ExpressRouteCircuitPeeringIDRef, + Selector: mg.Spec.InitProvider.ExpressRouteCircuitPeeringIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ExpressRouteCircuitPeeringID") + } + mg.Spec.InitProvider.ExpressRouteCircuitPeeringID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ExpressRouteCircuitPeeringIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ExpressRoutePort. +func (mg *ExpressRoutePort) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Firewall. +func (mg *Firewall) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.IPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PublicIP", "PublicIPList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressIDRef, + Selector: mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressID") + } + mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.IPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IPConfiguration[i3].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IPConfiguration[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.IPConfiguration[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IPConfiguration[i3].SubnetID") + } + mg.Spec.ForProvider.IPConfiguration[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IPConfiguration[i3].SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.ManagementIPConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ManagementIPConfiguration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ManagementIPConfiguration.SubnetIDRef, + Selector: mg.Spec.ForProvider.ManagementIPConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ManagementIPConfiguration.SubnetID") + } + mg.Spec.ForProvider.ManagementIPConfiguration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ManagementIPConfiguration.SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.IPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PublicIP", "PublicIPList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressIDRef, + Selector: mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressID") + } + mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.IPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IPConfiguration[i3].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IPConfiguration[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.IPConfiguration[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IPConfiguration[i3].SubnetID") + } + mg.Spec.InitProvider.IPConfiguration[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IPConfiguration[i3].SubnetIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.ManagementIPConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ManagementIPConfiguration.SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ManagementIPConfiguration.SubnetIDRef, + Selector: mg.Spec.InitProvider.ManagementIPConfiguration.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ManagementIPConfiguration.SubnetID") + } + mg.Spec.InitProvider.ManagementIPConfiguration.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ManagementIPConfiguration.SubnetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this FirewallPolicy. +func (mg *FirewallPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FrontDoor. +func (mg *FrontDoor) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FrontdoorCustomHTTPSConfiguration. +func (mg *FrontdoorCustomHTTPSConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.CustomHTTPSConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultIDRef, + Selector: mg.Spec.ForProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultID") + } + mg.Spec.ForProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.CustomHTTPSConfiguration != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultIDRef, + Selector: mg.Spec.InitProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultID") + } + mg.Spec.InitProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CustomHTTPSConfiguration.AzureKeyVaultCertificateVaultIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this FrontdoorRulesEngine. +func (mg *FrontdoorRulesEngine) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "FrontDoor", "FrontDoorList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FrontdoorName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.FrontdoorNameRef, + Selector: mg.Spec.ForProvider.FrontdoorNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FrontdoorName") + } + mg.Spec.ForProvider.FrontdoorName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FrontdoorNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LocalNetworkGateway. +func (mg *LocalNetworkGateway) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Manager. +func (mg *Manager) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this PacketCapture. +func (mg *PacketCapture) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Watcher", "WatcherList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkWatcherName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkWatcherNameRef, + Selector: mg.Spec.ForProvider.NetworkWatcherNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkWatcherName") + } + mg.Spec.ForProvider.NetworkWatcherName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkWatcherNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageLocation.StorageAccountID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageLocation.StorageAccountIDRef, + Selector: mg.Spec.ForProvider.StorageLocation.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageLocation.StorageAccountID") + } + mg.Spec.ForProvider.StorageLocation.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageLocation.StorageAccountIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.StorageLocation != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageLocation.StorageAccountID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageLocation.StorageAccountIDRef, + Selector: mg.Spec.InitProvider.StorageLocation.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageLocation.StorageAccountID") + } + mg.Spec.InitProvider.StorageLocation.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageLocation.StorageAccountIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this PointToSiteVPNGateway. +func (mg *PointToSiteVPNGateway) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VPNServerConfiguration", "VPNServerConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPNServerConfigurationID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VPNServerConfigurationIDRef, + Selector: mg.Spec.ForProvider.VPNServerConfigurationIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPNServerConfigurationID") + } + mg.Spec.ForProvider.VPNServerConfigurationID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPNServerConfigurationIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHub", "VirtualHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualHubID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualHubIDRef, + Selector: mg.Spec.ForProvider.VirtualHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualHubID") + } + mg.Spec.ForProvider.VirtualHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualHubIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VPNServerConfiguration", "VPNServerConfigurationList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VPNServerConfigurationID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VPNServerConfigurationIDRef, + Selector: mg.Spec.InitProvider.VPNServerConfigurationIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VPNServerConfigurationID") + } + mg.Spec.InitProvider.VPNServerConfigurationID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VPNServerConfigurationIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHub", "VirtualHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualHubID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualHubIDRef, + Selector: mg.Spec.InitProvider.VirtualHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualHubID") + } + mg.Spec.InitProvider.VirtualHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualHubIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this PrivateDNSZone. +func (mg *PrivateDNSZone) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this PrivateEndpoint. +func (mg *PrivateEndpoint) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.PrivateDNSZoneGroup != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.PrivateDNSZoneGroup.PrivateDNSZoneIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.PrivateDNSZoneGroup.PrivateDNSZoneIdsRefs, + Selector: mg.Spec.ForProvider.PrivateDNSZoneGroup.PrivateDNSZoneIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateDNSZoneGroup.PrivateDNSZoneIds") + } + mg.Spec.ForProvider.PrivateDNSZoneGroup.PrivateDNSZoneIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.PrivateDNSZoneGroup.PrivateDNSZoneIdsRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.PrivateDNSZoneGroup != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateDNSZone", "PrivateDNSZoneList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.PrivateDNSZoneGroup.PrivateDNSZoneIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.PrivateDNSZoneGroup.PrivateDNSZoneIdsRefs, + Selector: mg.Spec.InitProvider.PrivateDNSZoneGroup.PrivateDNSZoneIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateDNSZoneGroup.PrivateDNSZoneIds") + } + mg.Spec.InitProvider.PrivateDNSZoneGroup.PrivateDNSZoneIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.PrivateDNSZoneGroup.PrivateDNSZoneIdsRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Profile. +func (mg *Profile) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.ContainerNetworkInterface != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.ContainerNetworkInterface.IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetID") + } + mg.Spec.ForProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.ContainerNetworkInterface != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.ContainerNetworkInterface.IPConfiguration); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetID") + } + mg.Spec.InitProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ContainerNetworkInterface.IPConfiguration[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this Subnet. +func (mg *Subnet) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.VirtualNetworkNameRef, + Selector: mg.Spec.ForProvider.VirtualNetworkNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkName") + } + mg.Spec.ForProvider.VirtualNetworkName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this TrafficManagerProfile. +func (mg *TrafficManagerProfile) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPNGateway. +func (mg *VPNGateway) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHub", "VirtualHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualHubIDRef, + Selector: mg.Spec.ForProvider.VirtualHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualHubID") + } + mg.Spec.ForProvider.VirtualHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualHubIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHub", "VirtualHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualHubIDRef, + Selector: mg.Spec.InitProvider.VirtualHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualHubID") + } + mg.Spec.InitProvider.VirtualHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualHubIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPNGatewayConnection. +func (mg *VPNGatewayConnection) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VPNSite", "VPNSiteList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RemoteVPNSiteID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RemoteVPNSiteIDRef, + Selector: mg.Spec.ForProvider.RemoteVPNSiteIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RemoteVPNSiteID") + } + mg.Spec.ForProvider.RemoteVPNSiteID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RemoteVPNSiteIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VPNGateway", "VPNGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VPNGatewayID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VPNGatewayIDRef, + Selector: mg.Spec.ForProvider.VPNGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VPNGatewayID") + } + mg.Spec.ForProvider.VPNGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VPNGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VPNSite", "VPNSiteList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RemoteVPNSiteID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RemoteVPNSiteIDRef, + Selector: mg.Spec.InitProvider.RemoteVPNSiteIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RemoteVPNSiteID") + } + mg.Spec.InitProvider.RemoteVPNSiteID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RemoteVPNSiteIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPNServerConfiguration. +func (mg *VPNServerConfiguration) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VPNSite. +func (mg *VPNSite) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualWAN", "VirtualWANList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualWanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualWanIDRef, + Selector: mg.Spec.ForProvider.VirtualWanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualWanID") + } + mg.Spec.ForProvider.VirtualWanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualWanIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualWAN", "VirtualWANList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualWanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualWanIDRef, + Selector: mg.Spec.InitProvider.VirtualWanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualWanID") + } + mg.Spec.InitProvider.VirtualWanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualWanIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VirtualHubConnection. +func (mg *VirtualHubConnection) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RemoteVirtualNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RemoteVirtualNetworkIDRef, + Selector: mg.Spec.ForProvider.RemoteVirtualNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RemoteVirtualNetworkID") + } + mg.Spec.ForProvider.RemoteVirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RemoteVirtualNetworkIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Routing != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHubRouteTable", "VirtualHubRouteTableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Routing.AssociatedRouteTableID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.Routing.AssociatedRouteTableIDRef, + Selector: mg.Spec.ForProvider.Routing.AssociatedRouteTableIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Routing.AssociatedRouteTableID") + } + mg.Spec.ForProvider.Routing.AssociatedRouteTableID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Routing.AssociatedRouteTableIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHub", "VirtualHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualHubIDRef, + Selector: mg.Spec.ForProvider.VirtualHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualHubID") + } + mg.Spec.ForProvider.VirtualHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualHubIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RemoteVirtualNetworkID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RemoteVirtualNetworkIDRef, + Selector: mg.Spec.InitProvider.RemoteVirtualNetworkIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RemoteVirtualNetworkID") + } + mg.Spec.InitProvider.RemoteVirtualNetworkID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RemoteVirtualNetworkIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Routing != nil { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualHubRouteTable", "VirtualHubRouteTableList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Routing.AssociatedRouteTableID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.Routing.AssociatedRouteTableIDRef, + Selector: mg.Spec.InitProvider.Routing.AssociatedRouteTableIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Routing.AssociatedRouteTableID") + } + mg.Spec.InitProvider.Routing.AssociatedRouteTableID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Routing.AssociatedRouteTableIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this VirtualNetwork. +func (mg *VirtualNetwork) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this VirtualNetworkGateway. +func (mg *VirtualNetworkGateway) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.IPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PublicIP", "PublicIPList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressIDRef, + Selector: mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressID") + } + mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IPConfiguration[i3].PublicIPAddressIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.IPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IPConfiguration[i3].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IPConfiguration[i3].SubnetIDRef, + Selector: mg.Spec.ForProvider.IPConfiguration[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IPConfiguration[i3].SubnetID") + } + mg.Spec.ForProvider.IPConfiguration[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IPConfiguration[i3].SubnetIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.IPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "PublicIP", "PublicIPList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressIDRef, + Selector: mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressID") + } + mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IPConfiguration[i3].PublicIPAddressIDRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.IPConfiguration); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IPConfiguration[i3].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IPConfiguration[i3].SubnetIDRef, + Selector: mg.Spec.InitProvider.IPConfiguration[i3].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IPConfiguration[i3].SubnetID") + } + mg.Spec.InitProvider.IPConfiguration[i3].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IPConfiguration[i3].SubnetIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this VirtualNetworkGatewayConnection. +func (mg *VirtualNetworkGatewayConnection) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "LocalNetworkGateway", "LocalNetworkGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.LocalNetworkGatewayID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.LocalNetworkGatewayIDRef, + Selector: mg.Spec.ForProvider.LocalNetworkGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.LocalNetworkGatewayID") + } + mg.Spec.ForProvider.LocalNetworkGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LocalNetworkGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetworkGateway", "VirtualNetworkGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PeerVirtualNetworkGatewayID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PeerVirtualNetworkGatewayIDRef, + Selector: mg.Spec.ForProvider.PeerVirtualNetworkGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PeerVirtualNetworkGatewayID") + } + mg.Spec.ForProvider.PeerVirtualNetworkGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PeerVirtualNetworkGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetworkGateway", "VirtualNetworkGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkGatewayID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkGatewayIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkGatewayID") + } + mg.Spec.ForProvider.VirtualNetworkGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "LocalNetworkGateway", "LocalNetworkGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.LocalNetworkGatewayID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.LocalNetworkGatewayIDRef, + Selector: mg.Spec.InitProvider.LocalNetworkGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.LocalNetworkGatewayID") + } + mg.Spec.InitProvider.LocalNetworkGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LocalNetworkGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetworkGateway", "VirtualNetworkGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PeerVirtualNetworkGatewayID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PeerVirtualNetworkGatewayIDRef, + Selector: mg.Spec.InitProvider.PeerVirtualNetworkGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PeerVirtualNetworkGatewayID") + } + mg.Spec.InitProvider.PeerVirtualNetworkGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PeerVirtualNetworkGatewayIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetworkGateway", "VirtualNetworkGatewayList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkGatewayID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkGatewayIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkGatewayIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkGatewayID") + } + mg.Spec.InitProvider.VirtualNetworkGatewayID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkGatewayIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WatcherFlowLog. +func (mg *WatcherFlowLog) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkSecurityGroupID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkSecurityGroupIDRef, + Selector: mg.Spec.ForProvider.NetworkSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkSecurityGroupID") + } + mg.Spec.ForProvider.NetworkSecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkSecurityGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Watcher", "WatcherList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkWatcherName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NetworkWatcherNameRef, + Selector: mg.Spec.ForProvider.NetworkWatcherNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkWatcherName") + } + mg.Spec.ForProvider.NetworkWatcherName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkWatcherNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccountIDRef, + Selector: mg.Spec.ForProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountID") + } + mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.TrafficAnalytics != nil { + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TrafficAnalytics.WorkspaceID), + Extract: resource.ExtractParamPath("workspace_id", true), + Reference: mg.Spec.ForProvider.TrafficAnalytics.WorkspaceIDRef, + Selector: mg.Spec.ForProvider.TrafficAnalytics.WorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TrafficAnalytics.WorkspaceID") + } + mg.Spec.ForProvider.TrafficAnalytics.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TrafficAnalytics.WorkspaceIDRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.TrafficAnalytics != nil { + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TrafficAnalytics.WorkspaceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TrafficAnalytics.WorkspaceResourceIDRef, + Selector: mg.Spec.ForProvider.TrafficAnalytics.WorkspaceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TrafficAnalytics.WorkspaceResourceID") + } + mg.Spec.ForProvider.TrafficAnalytics.WorkspaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TrafficAnalytics.WorkspaceResourceIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "SecurityGroup", "SecurityGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkSecurityGroupID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkSecurityGroupIDRef, + Selector: mg.Spec.InitProvider.NetworkSecurityGroupIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkSecurityGroupID") + } + mg.Spec.InitProvider.NetworkSecurityGroupID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkSecurityGroupIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccountIDRef, + Selector: mg.Spec.InitProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountID") + } + mg.Spec.InitProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.TrafficAnalytics != nil { + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TrafficAnalytics.WorkspaceID), + Extract: resource.ExtractParamPath("workspace_id", true), + Reference: mg.Spec.InitProvider.TrafficAnalytics.WorkspaceIDRef, + Selector: mg.Spec.InitProvider.TrafficAnalytics.WorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TrafficAnalytics.WorkspaceID") + } + mg.Spec.InitProvider.TrafficAnalytics.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TrafficAnalytics.WorkspaceIDRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.TrafficAnalytics != nil { + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TrafficAnalytics.WorkspaceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TrafficAnalytics.WorkspaceResourceIDRef, + Selector: mg.Spec.InitProvider.TrafficAnalytics.WorkspaceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TrafficAnalytics.WorkspaceResourceID") + } + mg.Spec.InitProvider.TrafficAnalytics.WorkspaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TrafficAnalytics.WorkspaceResourceIDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this WebApplicationFirewallPolicy. +func (mg *WebApplicationFirewallPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/network/v1beta2/zz_groupversion_info.go b/apis/network/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..2124fa96b --- /dev/null +++ b/apis/network/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=network.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "network.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/network/v1beta2/zz_localnetworkgateway_terraformed.go b/apis/network/v1beta2/zz_localnetworkgateway_terraformed.go new file mode 100755 index 000000000..520113b28 --- /dev/null +++ b/apis/network/v1beta2/zz_localnetworkgateway_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LocalNetworkGateway +func (mg *LocalNetworkGateway) GetTerraformResourceType() string { + return "azurerm_local_network_gateway" +} + +// GetConnectionDetailsMapping for this LocalNetworkGateway +func (tr *LocalNetworkGateway) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LocalNetworkGateway +func (tr *LocalNetworkGateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LocalNetworkGateway +func (tr *LocalNetworkGateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LocalNetworkGateway +func (tr *LocalNetworkGateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LocalNetworkGateway +func (tr *LocalNetworkGateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LocalNetworkGateway +func (tr *LocalNetworkGateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LocalNetworkGateway +func (tr *LocalNetworkGateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LocalNetworkGateway +func (tr *LocalNetworkGateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LocalNetworkGateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LocalNetworkGateway) LateInitialize(attrs []byte) (bool, error) { + params := &LocalNetworkGatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LocalNetworkGateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_localnetworkgateway_types.go b/apis/network/v1beta2/zz_localnetworkgateway_types.go new file mode 100755 index 000000000..2185794c8 --- /dev/null +++ b/apis/network/v1beta2/zz_localnetworkgateway_types.go @@ -0,0 +1,205 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BGPSettingsInitParameters struct { + + // The BGP speaker's ASN. + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // The BGP peering address and BGP identifier of this BGP speaker. + BGPPeeringAddress *string `json:"bgpPeeringAddress,omitempty" tf:"bgp_peering_address,omitempty"` + + // The weight added to routes learned from this BGP speaker. + PeerWeight *float64 `json:"peerWeight,omitempty" tf:"peer_weight,omitempty"` +} + +type BGPSettingsObservation struct { + + // The BGP speaker's ASN. + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // The BGP peering address and BGP identifier of this BGP speaker. + BGPPeeringAddress *string `json:"bgpPeeringAddress,omitempty" tf:"bgp_peering_address,omitempty"` + + // The weight added to routes learned from this BGP speaker. + PeerWeight *float64 `json:"peerWeight,omitempty" tf:"peer_weight,omitempty"` +} + +type BGPSettingsParameters struct { + + // The BGP speaker's ASN. + // +kubebuilder:validation:Optional + Asn *float64 `json:"asn" tf:"asn,omitempty"` + + // The BGP peering address and BGP identifier of this BGP speaker. + // +kubebuilder:validation:Optional + BGPPeeringAddress *string `json:"bgpPeeringAddress" tf:"bgp_peering_address,omitempty"` + + // The weight added to routes learned from this BGP speaker. + // +kubebuilder:validation:Optional + PeerWeight *float64 `json:"peerWeight,omitempty" tf:"peer_weight,omitempty"` +} + +type LocalNetworkGatewayInitParameters struct { + + // The list of string CIDRs representing the address spaces the gateway exposes. + AddressSpace []*string `json:"addressSpace,omitempty" tf:"address_space,omitempty"` + + // A bgp_settings block as defined below containing the Local Network Gateway's BGP speaker settings. + BGPSettings *BGPSettingsInitParameters `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // The gateway IP address to connect with. + GatewayAddress *string `json:"gatewayAddress,omitempty" tf:"gateway_address,omitempty"` + + // The gateway FQDN to connect with. + GatewayFqdn *string `json:"gatewayFqdn,omitempty" tf:"gateway_fqdn,omitempty"` + + // The location/region where the local network gateway is created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LocalNetworkGatewayObservation struct { + + // The list of string CIDRs representing the address spaces the gateway exposes. + AddressSpace []*string `json:"addressSpace,omitempty" tf:"address_space,omitempty"` + + // A bgp_settings block as defined below containing the Local Network Gateway's BGP speaker settings. + BGPSettings *BGPSettingsObservation `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // The gateway IP address to connect with. + GatewayAddress *string `json:"gatewayAddress,omitempty" tf:"gateway_address,omitempty"` + + // The gateway FQDN to connect with. + GatewayFqdn *string `json:"gatewayFqdn,omitempty" tf:"gateway_fqdn,omitempty"` + + // The ID of the Local Network Gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The location/region where the local network gateway is created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the local network gateway. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type LocalNetworkGatewayParameters struct { + + // The list of string CIDRs representing the address spaces the gateway exposes. + // +kubebuilder:validation:Optional + AddressSpace []*string `json:"addressSpace,omitempty" tf:"address_space,omitempty"` + + // A bgp_settings block as defined below containing the Local Network Gateway's BGP speaker settings. + // +kubebuilder:validation:Optional + BGPSettings *BGPSettingsParameters `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // The gateway IP address to connect with. + // +kubebuilder:validation:Optional + GatewayAddress *string `json:"gatewayAddress,omitempty" tf:"gateway_address,omitempty"` + + // The gateway FQDN to connect with. + // +kubebuilder:validation:Optional + GatewayFqdn *string `json:"gatewayFqdn,omitempty" tf:"gateway_fqdn,omitempty"` + + // The location/region where the local network gateway is created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the local network gateway. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// LocalNetworkGatewaySpec defines the desired state of LocalNetworkGateway +type LocalNetworkGatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LocalNetworkGatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LocalNetworkGatewayInitParameters `json:"initProvider,omitempty"` +} + +// LocalNetworkGatewayStatus defines the observed state of LocalNetworkGateway. +type LocalNetworkGatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LocalNetworkGatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LocalNetworkGateway is the Schema for the LocalNetworkGateways API. Manages a local network gateway connection over which specific connections can be configured. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LocalNetworkGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec LocalNetworkGatewaySpec `json:"spec"` + Status LocalNetworkGatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LocalNetworkGatewayList contains a list of LocalNetworkGateways +type LocalNetworkGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LocalNetworkGateway `json:"items"` +} + +// Repository type metadata. +var ( + LocalNetworkGateway_Kind = "LocalNetworkGateway" + LocalNetworkGateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LocalNetworkGateway_Kind}.String() + LocalNetworkGateway_KindAPIVersion = LocalNetworkGateway_Kind + "." + CRDGroupVersion.String() + LocalNetworkGateway_GroupVersionKind = CRDGroupVersion.WithKind(LocalNetworkGateway_Kind) +) + +func init() { + SchemeBuilder.Register(&LocalNetworkGateway{}, &LocalNetworkGatewayList{}) +} diff --git a/apis/network/v1beta2/zz_manager_terraformed.go b/apis/network/v1beta2/zz_manager_terraformed.go new file mode 100755 index 000000000..c529d9b97 --- /dev/null +++ b/apis/network/v1beta2/zz_manager_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Manager +func (mg *Manager) GetTerraformResourceType() string { + return "azurerm_network_manager" +} + +// GetConnectionDetailsMapping for this Manager +func (tr *Manager) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Manager +func (tr *Manager) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Manager +func (tr *Manager) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Manager +func (tr *Manager) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Manager +func (tr *Manager) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Manager +func (tr *Manager) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Manager +func (tr *Manager) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Manager +func (tr *Manager) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Manager using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Manager) LateInitialize(attrs []byte) (bool, error) { + params := &ManagerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Manager) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_manager_types.go b/apis/network/v1beta2/zz_manager_types.go new file mode 100755 index 000000000..104387934 --- /dev/null +++ b/apis/network/v1beta2/zz_manager_types.go @@ -0,0 +1,208 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CrossTenantScopesInitParameters struct { +} + +type CrossTenantScopesObservation struct { + + // List of management groups. + ManagementGroups []*string `json:"managementGroups,omitempty" tf:"management_groups,omitempty"` + + // List of subscriptions. + Subscriptions []*string `json:"subscriptions,omitempty" tf:"subscriptions,omitempty"` + + // Tenant ID. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type CrossTenantScopesParameters struct { +} + +type ManagerInitParameters struct { + + // A description of the network manager. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the Azure Region where the Network Managers should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A scope block as defined below. + Scope *ScopeInitParameters `json:"scope,omitempty" tf:"scope,omitempty"` + + // A list of configuration deployment type. Possible values are Connectivity and SecurityAdmin, corresponds to if Connectivity Configuration and Security Admin Configuration is allowed for the Network Manager. + ScopeAccesses []*string `json:"scopeAccesses,omitempty" tf:"scope_accesses,omitempty"` + + // A mapping of tags which should be assigned to the Network Managers. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ManagerObservation struct { + + // One or more cross_tenant_scopes blocks as defined below. + CrossTenantScopes []CrossTenantScopesObservation `json:"crossTenantScopes,omitempty" tf:"cross_tenant_scopes,omitempty"` + + // A description of the network manager. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Network Managers. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Azure Region where the Network Managers should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Resource Group where the Network Managers should exist. Changing this forces a new Network Managers to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A scope block as defined below. + Scope *ScopeObservation `json:"scope,omitempty" tf:"scope,omitempty"` + + // A list of configuration deployment type. Possible values are Connectivity and SecurityAdmin, corresponds to if Connectivity Configuration and Security Admin Configuration is allowed for the Network Manager. + ScopeAccesses []*string `json:"scopeAccesses,omitempty" tf:"scope_accesses,omitempty"` + + // A mapping of tags which should be assigned to the Network Managers. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ManagerParameters struct { + + // A description of the network manager. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Specifies the Azure Region where the Network Managers should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Resource Group where the Network Managers should exist. Changing this forces a new Network Managers to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A scope block as defined below. + // +kubebuilder:validation:Optional + Scope *ScopeParameters `json:"scope,omitempty" tf:"scope,omitempty"` + + // A list of configuration deployment type. Possible values are Connectivity and SecurityAdmin, corresponds to if Connectivity Configuration and Security Admin Configuration is allowed for the Network Manager. + // +kubebuilder:validation:Optional + ScopeAccesses []*string `json:"scopeAccesses,omitempty" tf:"scope_accesses,omitempty"` + + // A mapping of tags which should be assigned to the Network Managers. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ScopeInitParameters struct { + + // A list of management group IDs. + ManagementGroupIds []*string `json:"managementGroupIds,omitempty" tf:"management_group_ids,omitempty"` + + // A list of subscription IDs. + SubscriptionIds []*string `json:"subscriptionIds,omitempty" tf:"subscription_ids,omitempty"` +} + +type ScopeObservation struct { + + // A list of management group IDs. + ManagementGroupIds []*string `json:"managementGroupIds,omitempty" tf:"management_group_ids,omitempty"` + + // A list of subscription IDs. + SubscriptionIds []*string `json:"subscriptionIds,omitempty" tf:"subscription_ids,omitempty"` +} + +type ScopeParameters struct { + + // A list of management group IDs. + // +kubebuilder:validation:Optional + ManagementGroupIds []*string `json:"managementGroupIds,omitempty" tf:"management_group_ids,omitempty"` + + // A list of subscription IDs. + // +kubebuilder:validation:Optional + SubscriptionIds []*string `json:"subscriptionIds,omitempty" tf:"subscription_ids,omitempty"` +} + +// ManagerSpec defines the desired state of Manager +type ManagerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ManagerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ManagerInitParameters `json:"initProvider,omitempty"` +} + +// ManagerStatus defines the observed state of Manager. +type ManagerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ManagerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Manager is the Schema for the Managers API. Manages a Network Managers. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Manager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scope) || (has(self.initProvider) && has(self.initProvider.scope))",message="spec.forProvider.scope is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scopeAccesses) || (has(self.initProvider) && has(self.initProvider.scopeAccesses))",message="spec.forProvider.scopeAccesses is a required parameter" + Spec ManagerSpec `json:"spec"` + Status ManagerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ManagerList contains a list of Managers +type ManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Manager `json:"items"` +} + +// Repository type metadata. +var ( + Manager_Kind = "Manager" + Manager_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Manager_Kind}.String() + Manager_KindAPIVersion = Manager_Kind + "." + CRDGroupVersion.String() + Manager_GroupVersionKind = CRDGroupVersion.WithKind(Manager_Kind) +) + +func init() { + SchemeBuilder.Register(&Manager{}, &ManagerList{}) +} diff --git a/apis/network/v1beta2/zz_packetcapture_terraformed.go b/apis/network/v1beta2/zz_packetcapture_terraformed.go new file mode 100755 index 000000000..c61b6e9ee --- /dev/null +++ b/apis/network/v1beta2/zz_packetcapture_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PacketCapture +func (mg *PacketCapture) GetTerraformResourceType() string { + return "azurerm_network_packet_capture" +} + +// GetConnectionDetailsMapping for this PacketCapture +func (tr *PacketCapture) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PacketCapture +func (tr *PacketCapture) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PacketCapture +func (tr *PacketCapture) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PacketCapture +func (tr *PacketCapture) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PacketCapture +func (tr *PacketCapture) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PacketCapture +func (tr *PacketCapture) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PacketCapture +func (tr *PacketCapture) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PacketCapture +func (tr *PacketCapture) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PacketCapture using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PacketCapture) LateInitialize(attrs []byte) (bool, error) { + params := &PacketCaptureParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PacketCapture) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/network/v1beta2/zz_packetcapture_types.go b/apis/network/v1beta2/zz_packetcapture_types.go new file mode 100755 index 000000000..e80acc056 --- /dev/null +++ b/apis/network/v1beta2/zz_packetcapture_types.go @@ -0,0 +1,291 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PacketCaptureFilterInitParameters struct { + + // The local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + LocalIPAddress *string `json:"localIpAddress,omitempty" tf:"local_ip_address,omitempty"` + + // The local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + LocalPort *string `json:"localPort,omitempty" tf:"local_port,omitempty"` + + // The Protocol to be filtered on. Possible values include Any, TCP and UDP. Changing this forces a new resource to be created. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The remote IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported.. Changing this forces a new resource to be created. + RemoteIPAddress *string `json:"remoteIpAddress,omitempty" tf:"remote_ip_address,omitempty"` + + // The remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + RemotePort *string `json:"remotePort,omitempty" tf:"remote_port,omitempty"` +} + +type PacketCaptureFilterObservation struct { + + // The local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + LocalIPAddress *string `json:"localIpAddress,omitempty" tf:"local_ip_address,omitempty"` + + // The local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + LocalPort *string `json:"localPort,omitempty" tf:"local_port,omitempty"` + + // The Protocol to be filtered on. Possible values include Any, TCP and UDP. Changing this forces a new resource to be created. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The remote IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported.. Changing this forces a new resource to be created. + RemoteIPAddress *string `json:"remoteIpAddress,omitempty" tf:"remote_ip_address,omitempty"` + + // The remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + RemotePort *string `json:"remotePort,omitempty" tf:"remote_port,omitempty"` +} + +type PacketCaptureFilterParameters struct { + + // The local IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LocalIPAddress *string `json:"localIpAddress,omitempty" tf:"local_ip_address,omitempty"` + + // The local port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LocalPort *string `json:"localPort,omitempty" tf:"local_port,omitempty"` + + // The Protocol to be filtered on. Possible values include Any, TCP and UDP. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // The remote IP Address to be filtered on. Notation: "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported.. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RemoteIPAddress *string `json:"remoteIpAddress,omitempty" tf:"remote_ip_address,omitempty"` + + // The remote port to be filtered on. Notation: "80" for single port entry."80-85" for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing ranges with multiple entries not currently supported. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RemotePort *string `json:"remotePort,omitempty" tf:"remote_port,omitempty"` +} + +type PacketCaptureInitParameters struct { + + // One or more filter blocks as defined below. Changing this forces a new resource to be created. + Filter []PacketCaptureFilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // The number of bytes captured per packet. The remaining bytes are truncated. Defaults to 0 (Entire Packet Captured). Changing this forces a new resource to be created. + MaximumBytesPerPacket *float64 `json:"maximumBytesPerPacket,omitempty" tf:"maximum_bytes_per_packet,omitempty"` + + // Maximum size of the capture in Bytes. Defaults to 1073741824 (1GB). Changing this forces a new resource to be created. + MaximumBytesPerSession *float64 `json:"maximumBytesPerSession,omitempty" tf:"maximum_bytes_per_session,omitempty"` + + // The maximum duration of the capture session in seconds. Defaults to 18000 (5 hours). Changing this forces a new resource to be created. + MaximumCaptureDuration *float64 `json:"maximumCaptureDuration,omitempty" tf:"maximum_capture_duration,omitempty"` + + // A storage_location block as defined below. Changing this forces a new resource to be created. + StorageLocation *StorageLocationInitParameters `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // The ID of the Resource to capture packets from. Changing this forces a new resource to be created. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` +} + +type PacketCaptureObservation struct { + + // One or more filter blocks as defined below. Changing this forces a new resource to be created. + Filter []PacketCaptureFilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // The Packet Capture ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The number of bytes captured per packet. The remaining bytes are truncated. Defaults to 0 (Entire Packet Captured). Changing this forces a new resource to be created. + MaximumBytesPerPacket *float64 `json:"maximumBytesPerPacket,omitempty" tf:"maximum_bytes_per_packet,omitempty"` + + // Maximum size of the capture in Bytes. Defaults to 1073741824 (1GB). Changing this forces a new resource to be created. + MaximumBytesPerSession *float64 `json:"maximumBytesPerSession,omitempty" tf:"maximum_bytes_per_session,omitempty"` + + // The maximum duration of the capture session in seconds. Defaults to 18000 (5 hours). Changing this forces a new resource to be created. + MaximumCaptureDuration *float64 `json:"maximumCaptureDuration,omitempty" tf:"maximum_capture_duration,omitempty"` + + // The name of the Network Watcher. Changing this forces a new resource to be created. + NetworkWatcherName *string `json:"networkWatcherName,omitempty" tf:"network_watcher_name,omitempty"` + + // The name of the resource group in which the Network Watcher exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A storage_location block as defined below. Changing this forces a new resource to be created. + StorageLocation *StorageLocationObservation `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // The ID of the Resource to capture packets from. Changing this forces a new resource to be created. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` +} + +type PacketCaptureParameters struct { + + // One or more filter blocks as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Filter []PacketCaptureFilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // The number of bytes captured per packet. The remaining bytes are truncated. Defaults to 0 (Entire Packet Captured). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MaximumBytesPerPacket *float64 `json:"maximumBytesPerPacket,omitempty" tf:"maximum_bytes_per_packet,omitempty"` + + // Maximum size of the capture in Bytes. Defaults to 1073741824 (1GB). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MaximumBytesPerSession *float64 `json:"maximumBytesPerSession,omitempty" tf:"maximum_bytes_per_session,omitempty"` + + // The maximum duration of the capture session in seconds. Defaults to 18000 (5 hours). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MaximumCaptureDuration *float64 `json:"maximumCaptureDuration,omitempty" tf:"maximum_capture_duration,omitempty"` + + // The name of the Network Watcher. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Watcher + // +kubebuilder:validation:Optional + NetworkWatcherName *string `json:"networkWatcherName,omitempty" tf:"network_watcher_name,omitempty"` + + // Reference to a Watcher in network to populate networkWatcherName. + // +kubebuilder:validation:Optional + NetworkWatcherNameRef *v1.Reference `json:"networkWatcherNameRef,omitempty" tf:"-"` + + // Selector for a Watcher in network to populate networkWatcherName. + // +kubebuilder:validation:Optional + NetworkWatcherNameSelector *v1.Selector `json:"networkWatcherNameSelector,omitempty" tf:"-"` + + // The name of the resource group in which the Network Watcher exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A storage_location block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StorageLocation *StorageLocationParameters `json:"storageLocation,omitempty" tf:"storage_location,omitempty"` + + // The ID of the Resource to capture packets from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` +} + +type StorageLocationInitParameters struct { + + // A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For Linux virtual machine it must start with /var/captures. + FilePath *string `json:"filePath,omitempty" tf:"file_path,omitempty"` + + // The ID of the storage account to save the packet capture session + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type StorageLocationObservation struct { + + // A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For Linux virtual machine it must start with /var/captures. + FilePath *string `json:"filePath,omitempty" tf:"file_path,omitempty"` + + // The ID of the storage account to save the packet capture session + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // The URI of the storage path to save the packet capture. + StoragePath *string `json:"storagePath,omitempty" tf:"storage_path,omitempty"` +} + +type StorageLocationParameters struct { + + // A valid local path on the targeting VM. Must include the name of the capture file (*.cap). For Linux virtual machine it must start with /var/captures. + // +kubebuilder:validation:Optional + FilePath *string `json:"filePath,omitempty" tf:"file_path,omitempty"` + + // The ID of the storage account to save the packet capture session + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +// PacketCaptureSpec defines the desired state of PacketCapture +type PacketCaptureSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PacketCaptureParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PacketCaptureInitParameters `json:"initProvider,omitempty"` +} + +// PacketCaptureStatus defines the observed state of PacketCapture. +type PacketCaptureStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PacketCaptureObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// PacketCapture is the Schema for the PacketCaptures API. Configures Packet Capturing against a Virtual Machine using a Network Watcher. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type PacketCapture struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageLocation) || (has(self.initProvider) && has(self.initProvider.storageLocation))",message="spec.forProvider.storageLocation is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.targetResourceId) || (has(self.initProvider) && has(self.initProvider.targetResourceId))",message="spec.forProvider.targetResourceId is a required parameter" + Spec PacketCaptureSpec `json:"spec"` + Status PacketCaptureStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PacketCaptureList contains a list of PacketCaptures +type PacketCaptureList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PacketCapture `json:"items"` +} + +// Repository type metadata. +var ( + PacketCapture_Kind = "PacketCapture" + PacketCapture_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PacketCapture_Kind}.String() + PacketCapture_KindAPIVersion = PacketCapture_Kind + "." + CRDGroupVersion.String() + PacketCapture_GroupVersionKind = CRDGroupVersion.WithKind(PacketCapture_Kind) +) + +func init() { + SchemeBuilder.Register(&PacketCapture{}, &PacketCaptureList{}) +} diff --git a/apis/network/v1beta2/zz_pointtositevpngateway_terraformed.go b/apis/network/v1beta2/zz_pointtositevpngateway_terraformed.go new file mode 100755 index 000000000..c55798ac7 --- /dev/null +++ b/apis/network/v1beta2/zz_pointtositevpngateway_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PointToSiteVPNGateway +func (mg *PointToSiteVPNGateway) GetTerraformResourceType() string { + return "azurerm_point_to_site_vpn_gateway" +} + +// GetConnectionDetailsMapping for this PointToSiteVPNGateway +func (tr *PointToSiteVPNGateway) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PointToSiteVPNGateway +func (tr *PointToSiteVPNGateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PointToSiteVPNGateway +func (tr *PointToSiteVPNGateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PointToSiteVPNGateway +func (tr *PointToSiteVPNGateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PointToSiteVPNGateway +func (tr *PointToSiteVPNGateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PointToSiteVPNGateway +func (tr *PointToSiteVPNGateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PointToSiteVPNGateway +func (tr *PointToSiteVPNGateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PointToSiteVPNGateway +func (tr *PointToSiteVPNGateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PointToSiteVPNGateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PointToSiteVPNGateway) LateInitialize(attrs []byte) (bool, error) { + params := &PointToSiteVPNGatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PointToSiteVPNGateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_pointtositevpngateway_types.go b/apis/network/v1beta2/zz_pointtositevpngateway_types.go new file mode 100755 index 000000000..a45d6a98a --- /dev/null +++ b/apis/network/v1beta2/zz_pointtositevpngateway_types.go @@ -0,0 +1,380 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConnectionConfigurationInitParameters struct { + + // Should Internet Security be enabled to secure internet traffic? Changing this forces a new resource to be created. Defaults to false. + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The Name which should be used for this Connection Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A route block as defined below. + Route *RouteInitParameters `json:"route,omitempty" tf:"route,omitempty"` + + // A vpn_client_address_pool block as defined below. + VPNClientAddressPool *VPNClientAddressPoolInitParameters `json:"vpnClientAddressPool,omitempty" tf:"vpn_client_address_pool,omitempty"` +} + +type ConnectionConfigurationObservation struct { + + // Should Internet Security be enabled to secure internet traffic? Changing this forces a new resource to be created. Defaults to false. + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The Name which should be used for this Connection Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A route block as defined below. + Route *RouteObservation `json:"route,omitempty" tf:"route,omitempty"` + + // A vpn_client_address_pool block as defined below. + VPNClientAddressPool *VPNClientAddressPoolObservation `json:"vpnClientAddressPool,omitempty" tf:"vpn_client_address_pool,omitempty"` +} + +type ConnectionConfigurationParameters struct { + + // Should Internet Security be enabled to secure internet traffic? Changing this forces a new resource to be created. Defaults to false. + // +kubebuilder:validation:Optional + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The Name which should be used for this Connection Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A route block as defined below. + // +kubebuilder:validation:Optional + Route *RouteParameters `json:"route,omitempty" tf:"route,omitempty"` + + // A vpn_client_address_pool block as defined below. + // +kubebuilder:validation:Optional + VPNClientAddressPool *VPNClientAddressPoolParameters `json:"vpnClientAddressPool" tf:"vpn_client_address_pool,omitempty"` +} + +type PointToSiteVPNGatewayInitParameters struct { + + // A connection_configuration block as defined below. + ConnectionConfiguration []ConnectionConfigurationInitParameters `json:"connectionConfiguration,omitempty" tf:"connection_configuration,omitempty"` + + // A list of IP Addresses of DNS Servers for the Point-to-Site VPN Gateway. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Is the Routing Preference for the Public IP Interface of the VPN Gateway enabled? Defaults to false. Changing this forces a new resource to be created. + RoutingPreferenceInternetEnabled *bool `json:"routingPreferenceInternetEnabled,omitempty" tf:"routing_preference_internet_enabled,omitempty"` + + // The Scale Unit for this Point-to-Site VPN Gateway. + ScaleUnit *float64 `json:"scaleUnit,omitempty" tf:"scale_unit,omitempty"` + + // A mapping of tags to assign to the Point-to-Site VPN Gateway. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the VPN Server Configuration which this Point-to-Site VPN Gateway should use. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VPNServerConfiguration + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VPNServerConfigurationID *string `json:"vpnServerConfigurationId,omitempty" tf:"vpn_server_configuration_id,omitempty"` + + // Reference to a VPNServerConfiguration in network to populate vpnServerConfigurationId. + // +kubebuilder:validation:Optional + VPNServerConfigurationIDRef *v1.Reference `json:"vpnServerConfigurationIdRef,omitempty" tf:"-"` + + // Selector for a VPNServerConfiguration in network to populate vpnServerConfigurationId. + // +kubebuilder:validation:Optional + VPNServerConfigurationIDSelector *v1.Selector `json:"vpnServerConfigurationIdSelector,omitempty" tf:"-"` + + // The ID of the Virtual Hub where this Point-to-Site VPN Gateway should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHub + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` + + // Reference to a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDRef *v1.Reference `json:"virtualHubIdRef,omitempty" tf:"-"` + + // Selector for a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDSelector *v1.Selector `json:"virtualHubIdSelector,omitempty" tf:"-"` +} + +type PointToSiteVPNGatewayObservation struct { + + // A connection_configuration block as defined below. + ConnectionConfiguration []ConnectionConfigurationObservation `json:"connectionConfiguration,omitempty" tf:"connection_configuration,omitempty"` + + // A list of IP Addresses of DNS Servers for the Point-to-Site VPN Gateway. + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // The ID of the Point-to-Site VPN Gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the Point-to-Site VPN Gateway. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Is the Routing Preference for the Public IP Interface of the VPN Gateway enabled? Defaults to false. Changing this forces a new resource to be created. + RoutingPreferenceInternetEnabled *bool `json:"routingPreferenceInternetEnabled,omitempty" tf:"routing_preference_internet_enabled,omitempty"` + + // The Scale Unit for this Point-to-Site VPN Gateway. + ScaleUnit *float64 `json:"scaleUnit,omitempty" tf:"scale_unit,omitempty"` + + // A mapping of tags to assign to the Point-to-Site VPN Gateway. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the VPN Server Configuration which this Point-to-Site VPN Gateway should use. Changing this forces a new resource to be created. + VPNServerConfigurationID *string `json:"vpnServerConfigurationId,omitempty" tf:"vpn_server_configuration_id,omitempty"` + + // The ID of the Virtual Hub where this Point-to-Site VPN Gateway should exist. Changing this forces a new resource to be created. + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` +} + +type PointToSiteVPNGatewayParameters struct { + + // A connection_configuration block as defined below. + // +kubebuilder:validation:Optional + ConnectionConfiguration []ConnectionConfigurationParameters `json:"connectionConfiguration,omitempty" tf:"connection_configuration,omitempty"` + + // A list of IP Addresses of DNS Servers for the Point-to-Site VPN Gateway. + // +kubebuilder:validation:Optional + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the Point-to-Site VPN Gateway. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Is the Routing Preference for the Public IP Interface of the VPN Gateway enabled? Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RoutingPreferenceInternetEnabled *bool `json:"routingPreferenceInternetEnabled,omitempty" tf:"routing_preference_internet_enabled,omitempty"` + + // The Scale Unit for this Point-to-Site VPN Gateway. + // +kubebuilder:validation:Optional + ScaleUnit *float64 `json:"scaleUnit,omitempty" tf:"scale_unit,omitempty"` + + // A mapping of tags to assign to the Point-to-Site VPN Gateway. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the VPN Server Configuration which this Point-to-Site VPN Gateway should use. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VPNServerConfiguration + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VPNServerConfigurationID *string `json:"vpnServerConfigurationId,omitempty" tf:"vpn_server_configuration_id,omitempty"` + + // Reference to a VPNServerConfiguration in network to populate vpnServerConfigurationId. + // +kubebuilder:validation:Optional + VPNServerConfigurationIDRef *v1.Reference `json:"vpnServerConfigurationIdRef,omitempty" tf:"-"` + + // Selector for a VPNServerConfiguration in network to populate vpnServerConfigurationId. + // +kubebuilder:validation:Optional + VPNServerConfigurationIDSelector *v1.Selector `json:"vpnServerConfigurationIdSelector,omitempty" tf:"-"` + + // The ID of the Virtual Hub where this Point-to-Site VPN Gateway should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHub + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` + + // Reference to a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDRef *v1.Reference `json:"virtualHubIdRef,omitempty" tf:"-"` + + // Selector for a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDSelector *v1.Selector `json:"virtualHubIdSelector,omitempty" tf:"-"` +} + +type RouteInitParameters struct { + + // The Virtual Hub Route Table resource id associated with this Routing Configuration. + AssociatedRouteTableID *string `json:"associatedRouteTableId,omitempty" tf:"associated_route_table_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + PropagatedRouteTable *RoutePropagatedRouteTableInitParameters `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +type RouteObservation struct { + + // The Virtual Hub Route Table resource id associated with this Routing Configuration. + AssociatedRouteTableID *string `json:"associatedRouteTableId,omitempty" tf:"associated_route_table_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + PropagatedRouteTable *RoutePropagatedRouteTableObservation `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +type RouteParameters struct { + + // The Virtual Hub Route Table resource id associated with this Routing Configuration. + // +kubebuilder:validation:Optional + AssociatedRouteTableID *string `json:"associatedRouteTableId" tf:"associated_route_table_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + // +kubebuilder:validation:Optional + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + // +kubebuilder:validation:Optional + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + // +kubebuilder:validation:Optional + PropagatedRouteTable *RoutePropagatedRouteTableParameters `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +type RoutePropagatedRouteTableInitParameters struct { + + // The list of Virtual Hub Route Table resource id which the routes will be propagated to. + Ids []*string `json:"ids,omitempty" tf:"ids,omitempty"` + + // The list of labels to logically group Virtual Hub Route Tables which the routes will be propagated to. + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type RoutePropagatedRouteTableObservation struct { + + // The list of Virtual Hub Route Table resource id which the routes will be propagated to. + Ids []*string `json:"ids,omitempty" tf:"ids,omitempty"` + + // The list of labels to logically group Virtual Hub Route Tables which the routes will be propagated to. + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type RoutePropagatedRouteTableParameters struct { + + // The list of Virtual Hub Route Table resource id which the routes will be propagated to. + // +kubebuilder:validation:Optional + Ids []*string `json:"ids" tf:"ids,omitempty"` + + // The list of labels to logically group Virtual Hub Route Tables which the routes will be propagated to. + // +kubebuilder:validation:Optional + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type VPNClientAddressPoolInitParameters struct { + + // A list of CIDR Ranges which should be used as Address Prefixes. + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` +} + +type VPNClientAddressPoolObservation struct { + + // A list of CIDR Ranges which should be used as Address Prefixes. + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` +} + +type VPNClientAddressPoolParameters struct { + + // A list of CIDR Ranges which should be used as Address Prefixes. + // +kubebuilder:validation:Optional + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes" tf:"address_prefixes,omitempty"` +} + +// PointToSiteVPNGatewaySpec defines the desired state of PointToSiteVPNGateway +type PointToSiteVPNGatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PointToSiteVPNGatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PointToSiteVPNGatewayInitParameters `json:"initProvider,omitempty"` +} + +// PointToSiteVPNGatewayStatus defines the observed state of PointToSiteVPNGateway. +type PointToSiteVPNGatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PointToSiteVPNGatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// PointToSiteVPNGateway is the Schema for the PointToSiteVPNGateways API. Manages a Point-to-Site VPN Gateway. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type PointToSiteVPNGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.connectionConfiguration) || (has(self.initProvider) && has(self.initProvider.connectionConfiguration))",message="spec.forProvider.connectionConfiguration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.scaleUnit) || (has(self.initProvider) && has(self.initProvider.scaleUnit))",message="spec.forProvider.scaleUnit is a required parameter" + Spec PointToSiteVPNGatewaySpec `json:"spec"` + Status PointToSiteVPNGatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PointToSiteVPNGatewayList contains a list of PointToSiteVPNGateways +type PointToSiteVPNGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PointToSiteVPNGateway `json:"items"` +} + +// Repository type metadata. +var ( + PointToSiteVPNGateway_Kind = "PointToSiteVPNGateway" + PointToSiteVPNGateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PointToSiteVPNGateway_Kind}.String() + PointToSiteVPNGateway_KindAPIVersion = PointToSiteVPNGateway_Kind + "." + CRDGroupVersion.String() + PointToSiteVPNGateway_GroupVersionKind = CRDGroupVersion.WithKind(PointToSiteVPNGateway_Kind) +) + +func init() { + SchemeBuilder.Register(&PointToSiteVPNGateway{}, &PointToSiteVPNGatewayList{}) +} diff --git a/apis/network/v1beta2/zz_privatednszone_terraformed.go b/apis/network/v1beta2/zz_privatednszone_terraformed.go new file mode 100755 index 000000000..8063bd478 --- /dev/null +++ b/apis/network/v1beta2/zz_privatednszone_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PrivateDNSZone +func (mg *PrivateDNSZone) GetTerraformResourceType() string { + return "azurerm_private_dns_zone" +} + +// GetConnectionDetailsMapping for this PrivateDNSZone +func (tr *PrivateDNSZone) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PrivateDNSZone +func (tr *PrivateDNSZone) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PrivateDNSZone +func (tr *PrivateDNSZone) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PrivateDNSZone +func (tr *PrivateDNSZone) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PrivateDNSZone +func (tr *PrivateDNSZone) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PrivateDNSZone +func (tr *PrivateDNSZone) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PrivateDNSZone +func (tr *PrivateDNSZone) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PrivateDNSZone +func (tr *PrivateDNSZone) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PrivateDNSZone using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PrivateDNSZone) LateInitialize(attrs []byte) (bool, error) { + params := &PrivateDNSZoneParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PrivateDNSZone) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_privatednszone_types.go b/apis/network/v1beta2/zz_privatednszone_types.go new file mode 100755 index 000000000..d8c3c27fc --- /dev/null +++ b/apis/network/v1beta2/zz_privatednszone_types.go @@ -0,0 +1,228 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type PrivateDNSZoneInitParameters struct { + + // An soa_record block as defined below. Changing this forces a new resource to be created. + SoaRecord *PrivateDNSZoneSoaRecordInitParameters `json:"soaRecord,omitempty" tf:"soa_record,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PrivateDNSZoneObservation struct { + + // The Private DNS Zone ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The maximum number of record sets that can be created in this Private DNS zone. + MaxNumberOfRecordSets *float64 `json:"maxNumberOfRecordSets,omitempty" tf:"max_number_of_record_sets,omitempty"` + + // The maximum number of virtual networks that can be linked to this Private DNS zone. + MaxNumberOfVirtualNetworkLinks *float64 `json:"maxNumberOfVirtualNetworkLinks,omitempty" tf:"max_number_of_virtual_network_links,omitempty"` + + // The maximum number of virtual networks that can be linked to this Private DNS zone with registration enabled. + MaxNumberOfVirtualNetworkLinksWithRegistration *float64 `json:"maxNumberOfVirtualNetworkLinksWithRegistration,omitempty" tf:"max_number_of_virtual_network_links_with_registration,omitempty"` + + // The current number of record sets in this Private DNS zone. + NumberOfRecordSets *float64 `json:"numberOfRecordSets,omitempty" tf:"number_of_record_sets,omitempty"` + + // Specifies the resource group where the resource exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // An soa_record block as defined below. Changing this forces a new resource to be created. + SoaRecord *PrivateDNSZoneSoaRecordObservation `json:"soaRecord,omitempty" tf:"soa_record,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PrivateDNSZoneParameters struct { + + // Specifies the resource group where the resource exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // An soa_record block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SoaRecord *PrivateDNSZoneSoaRecordParameters `json:"soaRecord,omitempty" tf:"soa_record,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PrivateDNSZoneSoaRecordInitParameters struct { + + // The email contact for the SOA record. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The expire time for the SOA record. Defaults to 2419200. + ExpireTime *float64 `json:"expireTime,omitempty" tf:"expire_time,omitempty"` + + // The minimum Time To Live for the SOA record. By convention, it is used to determine the negative caching duration. Defaults to 10. + MinimumTTL *float64 `json:"minimumTtl,omitempty" tf:"minimum_ttl,omitempty"` + + // The refresh time for the SOA record. Defaults to 3600. + RefreshTime *float64 `json:"refreshTime,omitempty" tf:"refresh_time,omitempty"` + + // The retry time for the SOA record. Defaults to 300. + RetryTime *float64 `json:"retryTime,omitempty" tf:"retry_time,omitempty"` + + // The Time To Live of the SOA Record in seconds. Defaults to 3600. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // A mapping of tags to assign to the Record Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PrivateDNSZoneSoaRecordObservation struct { + + // The email contact for the SOA record. + Email *string `json:"email,omitempty" tf:"email,omitempty"` + + // The expire time for the SOA record. Defaults to 2419200. + ExpireTime *float64 `json:"expireTime,omitempty" tf:"expire_time,omitempty"` + + // The fully qualified domain name of the Record Set. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The domain name of the authoritative name server for the SOA record. + HostName *string `json:"hostName,omitempty" tf:"host_name,omitempty"` + + // The minimum Time To Live for the SOA record. By convention, it is used to determine the negative caching duration. Defaults to 10. + MinimumTTL *float64 `json:"minimumTtl,omitempty" tf:"minimum_ttl,omitempty"` + + // The refresh time for the SOA record. Defaults to 3600. + RefreshTime *float64 `json:"refreshTime,omitempty" tf:"refresh_time,omitempty"` + + // The retry time for the SOA record. Defaults to 300. + RetryTime *float64 `json:"retryTime,omitempty" tf:"retry_time,omitempty"` + + // The serial number for the SOA record. + SerialNumber *float64 `json:"serialNumber,omitempty" tf:"serial_number,omitempty"` + + // The Time To Live of the SOA Record in seconds. Defaults to 3600. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // A mapping of tags to assign to the Record Set. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PrivateDNSZoneSoaRecordParameters struct { + + // The email contact for the SOA record. + // +kubebuilder:validation:Optional + Email *string `json:"email" tf:"email,omitempty"` + + // The expire time for the SOA record. Defaults to 2419200. + // +kubebuilder:validation:Optional + ExpireTime *float64 `json:"expireTime,omitempty" tf:"expire_time,omitempty"` + + // The minimum Time To Live for the SOA record. By convention, it is used to determine the negative caching duration. Defaults to 10. + // +kubebuilder:validation:Optional + MinimumTTL *float64 `json:"minimumTtl,omitempty" tf:"minimum_ttl,omitempty"` + + // The refresh time for the SOA record. Defaults to 3600. + // +kubebuilder:validation:Optional + RefreshTime *float64 `json:"refreshTime,omitempty" tf:"refresh_time,omitempty"` + + // The retry time for the SOA record. Defaults to 300. + // +kubebuilder:validation:Optional + RetryTime *float64 `json:"retryTime,omitempty" tf:"retry_time,omitempty"` + + // The Time To Live of the SOA Record in seconds. Defaults to 3600. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // A mapping of tags to assign to the Record Set. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// PrivateDNSZoneSpec defines the desired state of PrivateDNSZone +type PrivateDNSZoneSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PrivateDNSZoneParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PrivateDNSZoneInitParameters `json:"initProvider,omitempty"` +} + +// PrivateDNSZoneStatus defines the observed state of PrivateDNSZone. +type PrivateDNSZoneStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PrivateDNSZoneObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// PrivateDNSZone is the Schema for the PrivateDNSZones API. Manages a Private DNS Zone. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type PrivateDNSZone struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PrivateDNSZoneSpec `json:"spec"` + Status PrivateDNSZoneStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PrivateDNSZoneList contains a list of PrivateDNSZones +type PrivateDNSZoneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PrivateDNSZone `json:"items"` +} + +// Repository type metadata. +var ( + PrivateDNSZone_Kind = "PrivateDNSZone" + PrivateDNSZone_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PrivateDNSZone_Kind}.String() + PrivateDNSZone_KindAPIVersion = PrivateDNSZone_Kind + "." + CRDGroupVersion.String() + PrivateDNSZone_GroupVersionKind = CRDGroupVersion.WithKind(PrivateDNSZone_Kind) +) + +func init() { + SchemeBuilder.Register(&PrivateDNSZone{}, &PrivateDNSZoneList{}) +} diff --git a/apis/network/v1beta2/zz_privateendpoint_terraformed.go b/apis/network/v1beta2/zz_privateendpoint_terraformed.go new file mode 100755 index 000000000..0e7a9b799 --- /dev/null +++ b/apis/network/v1beta2/zz_privateendpoint_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this PrivateEndpoint +func (mg *PrivateEndpoint) GetTerraformResourceType() string { + return "azurerm_private_endpoint" +} + +// GetConnectionDetailsMapping for this PrivateEndpoint +func (tr *PrivateEndpoint) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this PrivateEndpoint +func (tr *PrivateEndpoint) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this PrivateEndpoint +func (tr *PrivateEndpoint) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this PrivateEndpoint +func (tr *PrivateEndpoint) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this PrivateEndpoint +func (tr *PrivateEndpoint) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this PrivateEndpoint +func (tr *PrivateEndpoint) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this PrivateEndpoint +func (tr *PrivateEndpoint) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this PrivateEndpoint +func (tr *PrivateEndpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this PrivateEndpoint using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *PrivateEndpoint) LateInitialize(attrs []byte) (bool, error) { + params := &PrivateEndpointParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *PrivateEndpoint) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_privateendpoint_types.go b/apis/network/v1beta2/zz_privateendpoint_types.go new file mode 100755 index 000000000..85fb5b100 --- /dev/null +++ b/apis/network/v1beta2/zz_privateendpoint_types.go @@ -0,0 +1,454 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomDNSConfigsInitParameters struct { +} + +type CustomDNSConfigsObservation struct { + + // The fully qualified domain name to the private_endpoint. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // A list of all IP Addresses that map to the private_endpoint fqdn. + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` +} + +type CustomDNSConfigsParameters struct { +} + +type NetworkInterfaceInitParameters struct { +} + +type NetworkInterfaceObservation struct { + + // The ID of the network interface associated with the private_endpoint. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the network interface associated with the private_endpoint. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type NetworkInterfaceParameters struct { +} + +type PrivateDNSZoneConfigsInitParameters struct { +} + +type PrivateDNSZoneConfigsObservation struct { + + // The ID of the Private DNS Zone Config. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Private DNS Zone that the config belongs to. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of IP Addresses + PrivateDNSZoneID *string `json:"privateDnsZoneId,omitempty" tf:"private_dns_zone_id,omitempty"` + + // A record_sets block as defined below. + RecordSets []RecordSetsObservation `json:"recordSets,omitempty" tf:"record_sets,omitempty"` +} + +type PrivateDNSZoneConfigsParameters struct { +} + +type PrivateDNSZoneGroupInitParameters struct { + + // Specifies the Name of the Private DNS Zone Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the list of Private DNS Zones to include within the private_dns_zone_group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + PrivateDNSZoneIds []*string `json:"privateDnsZoneIds,omitempty" tf:"private_dns_zone_ids,omitempty"` + + // References to PrivateDNSZone in network to populate privateDnsZoneIds. + // +kubebuilder:validation:Optional + PrivateDNSZoneIdsRefs []v1.Reference `json:"privateDnsZoneIdsRefs,omitempty" tf:"-"` + + // Selector for a list of PrivateDNSZone in network to populate privateDnsZoneIds. + // +kubebuilder:validation:Optional + PrivateDNSZoneIdsSelector *v1.Selector `json:"privateDnsZoneIdsSelector,omitempty" tf:"-"` +} + +type PrivateDNSZoneGroupObservation struct { + + // The ID of the Private DNS Zone Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Name of the Private DNS Zone Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the list of Private DNS Zones to include within the private_dns_zone_group. + PrivateDNSZoneIds []*string `json:"privateDnsZoneIds,omitempty" tf:"private_dns_zone_ids,omitempty"` +} + +type PrivateDNSZoneGroupParameters struct { + + // Specifies the Name of the Private DNS Zone Group. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the list of Private DNS Zones to include within the private_dns_zone_group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateDNSZone + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + PrivateDNSZoneIds []*string `json:"privateDnsZoneIds,omitempty" tf:"private_dns_zone_ids,omitempty"` + + // References to PrivateDNSZone in network to populate privateDnsZoneIds. + // +kubebuilder:validation:Optional + PrivateDNSZoneIdsRefs []v1.Reference `json:"privateDnsZoneIdsRefs,omitempty" tf:"-"` + + // Selector for a list of PrivateDNSZone in network to populate privateDnsZoneIds. + // +kubebuilder:validation:Optional + PrivateDNSZoneIdsSelector *v1.Selector `json:"privateDnsZoneIdsSelector,omitempty" tf:"-"` +} + +type PrivateEndpointIPConfigurationInitParameters struct { + + // Specifies the member name this IP address applies to. If it is not specified, it will use the value of subresource_name. Changing this forces a new resource to be created. + MemberName *string `json:"memberName,omitempty" tf:"member_name,omitempty"` + + // Specifies the Name of the IP Configuration. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the static IP address within the private endpoint's subnet to be used. Changing this forces a new resource to be created. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // Specifies the subresource this IP address applies to. subresource_names corresponds to group_id. Changing this forces a new resource to be created. + SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` +} + +type PrivateEndpointIPConfigurationObservation struct { + + // Specifies the member name this IP address applies to. If it is not specified, it will use the value of subresource_name. Changing this forces a new resource to be created. + MemberName *string `json:"memberName,omitempty" tf:"member_name,omitempty"` + + // Specifies the Name of the IP Configuration. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the static IP address within the private endpoint's subnet to be used. Changing this forces a new resource to be created. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // Specifies the subresource this IP address applies to. subresource_names corresponds to group_id. Changing this forces a new resource to be created. + SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` +} + +type PrivateEndpointIPConfigurationParameters struct { + + // Specifies the member name this IP address applies to. If it is not specified, it will use the value of subresource_name. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + MemberName *string `json:"memberName,omitempty" tf:"member_name,omitempty"` + + // Specifies the Name of the IP Configuration. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the static IP address within the private endpoint's subnet to be used. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateIPAddress *string `json:"privateIpAddress" tf:"private_ip_address,omitempty"` + + // Specifies the subresource this IP address applies to. subresource_names corresponds to group_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` +} + +type PrivateEndpointInitParameters struct { + + // The custom name of the network interface attached to the private endpoint. Changing this forces a new resource to be created. + CustomNetworkInterfaceName *string `json:"customNetworkInterfaceName,omitempty" tf:"custom_network_interface_name,omitempty"` + + // One or more ip_configuration blocks as defined below. This allows a static IP address to be set for this Private Endpoint, otherwise an address is dynamically allocated from the Subnet. + IPConfiguration []PrivateEndpointIPConfigurationInitParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A private_dns_zone_group block as defined below. + PrivateDNSZoneGroup *PrivateDNSZoneGroupInitParameters `json:"privateDnsZoneGroup,omitempty" tf:"private_dns_zone_group,omitempty"` + + // A private_service_connection block as defined below. + PrivateServiceConnection *PrivateServiceConnectionInitParameters `json:"privateServiceConnection,omitempty" tf:"private_service_connection,omitempty"` + + // The ID of the Subnet from which Private IP Addresses will be allocated for this Private Endpoint. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PrivateEndpointObservation struct { + + // A custom_dns_configs block as defined below. + CustomDNSConfigs []CustomDNSConfigsObservation `json:"customDnsConfigs,omitempty" tf:"custom_dns_configs,omitempty"` + + // The custom name of the network interface attached to the private endpoint. Changing this forces a new resource to be created. + CustomNetworkInterfaceName *string `json:"customNetworkInterfaceName,omitempty" tf:"custom_network_interface_name,omitempty"` + + // The ID of the Private Endpoint. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more ip_configuration blocks as defined below. This allows a static IP address to be set for this Private Endpoint, otherwise an address is dynamically allocated from the Subnet. + IPConfiguration []PrivateEndpointIPConfigurationObservation `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A network_interface block as defined below. + NetworkInterface []NetworkInterfaceObservation `json:"networkInterface,omitempty" tf:"network_interface,omitempty"` + + // A private_dns_zone_configs block as defined below. + PrivateDNSZoneConfigs []PrivateDNSZoneConfigsObservation `json:"privateDnsZoneConfigs,omitempty" tf:"private_dns_zone_configs,omitempty"` + + // A private_dns_zone_group block as defined below. + PrivateDNSZoneGroup *PrivateDNSZoneGroupObservation `json:"privateDnsZoneGroup,omitempty" tf:"private_dns_zone_group,omitempty"` + + // A private_service_connection block as defined below. + PrivateServiceConnection *PrivateServiceConnectionObservation `json:"privateServiceConnection,omitempty" tf:"private_service_connection,omitempty"` + + // Specifies the Name of the Resource Group within which the Private Endpoint should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The ID of the Subnet from which Private IP Addresses will be allocated for this Private Endpoint. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PrivateEndpointParameters struct { + + // The custom name of the network interface attached to the private endpoint. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CustomNetworkInterfaceName *string `json:"customNetworkInterfaceName,omitempty" tf:"custom_network_interface_name,omitempty"` + + // One or more ip_configuration blocks as defined below. This allows a static IP address to be set for this Private Endpoint, otherwise an address is dynamically allocated from the Subnet. + // +kubebuilder:validation:Optional + IPConfiguration []PrivateEndpointIPConfigurationParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // The supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A private_dns_zone_group block as defined below. + // +kubebuilder:validation:Optional + PrivateDNSZoneGroup *PrivateDNSZoneGroupParameters `json:"privateDnsZoneGroup,omitempty" tf:"private_dns_zone_group,omitempty"` + + // A private_service_connection block as defined below. + // +kubebuilder:validation:Optional + PrivateServiceConnection *PrivateServiceConnectionParameters `json:"privateServiceConnection,omitempty" tf:"private_service_connection,omitempty"` + + // Specifies the Name of the Resource Group within which the Private Endpoint should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the Subnet from which Private IP Addresses will be allocated for this Private Endpoint. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PrivateServiceConnectionInitParameters struct { + + // Does the Private Endpoint require Manual Approval from the remote resource owner? Changing this forces a new resource to be created. + IsManualConnection *bool `json:"isManualConnection,omitempty" tf:"is_manual_connection,omitempty"` + + // Specifies the Name of the Private Service Connection. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Service Alias of the Private Link Enabled Remote Resource which this Private Endpoint should be connected to. One of private_connection_resource_id or private_connection_resource_alias must be specified. Changing this forces a new resource to be created. + PrivateConnectionResourceAlias *string `json:"privateConnectionResourceAlias,omitempty" tf:"private_connection_resource_alias,omitempty"` + + // The ID of the Private Link Enabled Remote Resource which this Private Endpoint should be connected to. One of private_connection_resource_id or private_connection_resource_alias must be specified. Changing this forces a new resource to be created. For a web app or function app slot, the parent web app should be used in this field instead of a reference to the slot itself. + PrivateConnectionResourceID *string `json:"privateConnectionResourceId,omitempty" tf:"private_connection_resource_id,omitempty"` + + // A message passed to the owner of the remote resource when the private endpoint attempts to establish the connection to the remote resource. The request message can be a maximum of 140 characters in length. Only valid if is_manual_connection is set to true. + RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` + + // A list of subresource names which the Private Endpoint is able to connect to. subresource_names corresponds to group_id. Possible values are detailed in the product documentation in the Subresources column. Changing this forces a new resource to be created. + SubresourceNames []*string `json:"subresourceNames,omitempty" tf:"subresource_names,omitempty"` +} + +type PrivateServiceConnectionObservation struct { + + // Does the Private Endpoint require Manual Approval from the remote resource owner? Changing this forces a new resource to be created. + IsManualConnection *bool `json:"isManualConnection,omitempty" tf:"is_manual_connection,omitempty"` + + // Specifies the Name of the Private Service Connection. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Service Alias of the Private Link Enabled Remote Resource which this Private Endpoint should be connected to. One of private_connection_resource_id or private_connection_resource_alias must be specified. Changing this forces a new resource to be created. + PrivateConnectionResourceAlias *string `json:"privateConnectionResourceAlias,omitempty" tf:"private_connection_resource_alias,omitempty"` + + // The ID of the Private Link Enabled Remote Resource which this Private Endpoint should be connected to. One of private_connection_resource_id or private_connection_resource_alias must be specified. Changing this forces a new resource to be created. For a web app or function app slot, the parent web app should be used in this field instead of a reference to the slot itself. + PrivateConnectionResourceID *string `json:"privateConnectionResourceId,omitempty" tf:"private_connection_resource_id,omitempty"` + + // (Computed) The private IP address associated with the private endpoint, note that you will have a private IP address assigned to the private endpoint even if the connection request was Rejected. + PrivateIPAddress *string `json:"privateIpAddress,omitempty" tf:"private_ip_address,omitempty"` + + // A message passed to the owner of the remote resource when the private endpoint attempts to establish the connection to the remote resource. The request message can be a maximum of 140 characters in length. Only valid if is_manual_connection is set to true. + RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` + + // A list of subresource names which the Private Endpoint is able to connect to. subresource_names corresponds to group_id. Possible values are detailed in the product documentation in the Subresources column. Changing this forces a new resource to be created. + SubresourceNames []*string `json:"subresourceNames,omitempty" tf:"subresource_names,omitempty"` +} + +type PrivateServiceConnectionParameters struct { + + // Does the Private Endpoint require Manual Approval from the remote resource owner? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsManualConnection *bool `json:"isManualConnection" tf:"is_manual_connection,omitempty"` + + // Specifies the Name of the Private Service Connection. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Service Alias of the Private Link Enabled Remote Resource which this Private Endpoint should be connected to. One of private_connection_resource_id or private_connection_resource_alias must be specified. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateConnectionResourceAlias *string `json:"privateConnectionResourceAlias,omitempty" tf:"private_connection_resource_alias,omitempty"` + + // The ID of the Private Link Enabled Remote Resource which this Private Endpoint should be connected to. One of private_connection_resource_id or private_connection_resource_alias must be specified. Changing this forces a new resource to be created. For a web app or function app slot, the parent web app should be used in this field instead of a reference to the slot itself. + // +kubebuilder:validation:Optional + PrivateConnectionResourceID *string `json:"privateConnectionResourceId,omitempty" tf:"private_connection_resource_id,omitempty"` + + // A message passed to the owner of the remote resource when the private endpoint attempts to establish the connection to the remote resource. The request message can be a maximum of 140 characters in length. Only valid if is_manual_connection is set to true. + // +kubebuilder:validation:Optional + RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` + + // A list of subresource names which the Private Endpoint is able to connect to. subresource_names corresponds to group_id. Possible values are detailed in the product documentation in the Subresources column. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SubresourceNames []*string `json:"subresourceNames,omitempty" tf:"subresource_names,omitempty"` +} + +type RecordSetsInitParameters struct { +} + +type RecordSetsObservation struct { + + // The fully qualified domain name to the private_dns_zone. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // A list of all IP Addresses that map to the private_dns_zone fqdn. + IPAddresses []*string `json:"ipAddresses,omitempty" tf:"ip_addresses,omitempty"` + + // The name of the Private DNS Zone that the config belongs to. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The time to live for each connection to the private_dns_zone. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` + + // The type of DNS record. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RecordSetsParameters struct { +} + +// PrivateEndpointSpec defines the desired state of PrivateEndpoint +type PrivateEndpointSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider PrivateEndpointParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider PrivateEndpointInitParameters `json:"initProvider,omitempty"` +} + +// PrivateEndpointStatus defines the observed state of PrivateEndpoint. +type PrivateEndpointStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider PrivateEndpointObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// PrivateEndpoint is the Schema for the PrivateEndpoints API. Manages a Private Endpoint. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type PrivateEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.privateServiceConnection) || (has(self.initProvider) && has(self.initProvider.privateServiceConnection))",message="spec.forProvider.privateServiceConnection is a required parameter" + Spec PrivateEndpointSpec `json:"spec"` + Status PrivateEndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PrivateEndpointList contains a list of PrivateEndpoints +type PrivateEndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PrivateEndpoint `json:"items"` +} + +// Repository type metadata. +var ( + PrivateEndpoint_Kind = "PrivateEndpoint" + PrivateEndpoint_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: PrivateEndpoint_Kind}.String() + PrivateEndpoint_KindAPIVersion = PrivateEndpoint_Kind + "." + CRDGroupVersion.String() + PrivateEndpoint_GroupVersionKind = CRDGroupVersion.WithKind(PrivateEndpoint_Kind) +) + +func init() { + SchemeBuilder.Register(&PrivateEndpoint{}, &PrivateEndpointList{}) +} diff --git a/apis/network/v1beta2/zz_profile_terraformed.go b/apis/network/v1beta2/zz_profile_terraformed.go new file mode 100755 index 000000000..e224b2a65 --- /dev/null +++ b/apis/network/v1beta2/zz_profile_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Profile +func (mg *Profile) GetTerraformResourceType() string { + return "azurerm_network_profile" +} + +// GetConnectionDetailsMapping for this Profile +func (tr *Profile) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Profile +func (tr *Profile) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Profile +func (tr *Profile) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Profile +func (tr *Profile) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Profile +func (tr *Profile) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Profile +func (tr *Profile) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Profile +func (tr *Profile) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Profile +func (tr *Profile) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Profile using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Profile) LateInitialize(attrs []byte) (bool, error) { + params := &ProfileParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Profile) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_profile_types.go b/apis/network/v1beta2/zz_profile_types.go new file mode 100755 index 000000000..6b99aa387 --- /dev/null +++ b/apis/network/v1beta2/zz_profile_types.go @@ -0,0 +1,218 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContainerNetworkInterfaceIPConfigurationInitParameters struct { + + // Specifies the name of the Network Profile. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to the subnet associated with the IP Configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type ContainerNetworkInterfaceIPConfigurationObservation struct { + + // Specifies the name of the Network Profile. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to the subnet associated with the IP Configuration. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type ContainerNetworkInterfaceIPConfigurationParameters struct { + + // Specifies the name of the Network Profile. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Reference to the subnet associated with the IP Configuration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type ContainerNetworkInterfaceInitParameters struct { + + // One or more ip_configuration blocks as documented below. + IPConfiguration []ContainerNetworkInterfaceIPConfigurationInitParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // Specifies the name of the IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ContainerNetworkInterfaceObservation struct { + + // One or more ip_configuration blocks as documented below. + IPConfiguration []ContainerNetworkInterfaceIPConfigurationObservation `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // Specifies the name of the IP Configuration. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ContainerNetworkInterfaceParameters struct { + + // One or more ip_configuration blocks as documented below. + // +kubebuilder:validation:Optional + IPConfiguration []ContainerNetworkInterfaceIPConfigurationParameters `json:"ipConfiguration" tf:"ip_configuration,omitempty"` + + // Specifies the name of the IP Configuration. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type ProfileInitParameters struct { + + // A container_network_interface block as documented below. + ContainerNetworkInterface *ContainerNetworkInterfaceInitParameters `json:"containerNetworkInterface,omitempty" tf:"container_network_interface,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ProfileObservation struct { + + // A container_network_interface block as documented below. + ContainerNetworkInterface *ContainerNetworkInterfaceObservation `json:"containerNetworkInterface,omitempty" tf:"container_network_interface,omitempty"` + + // A list of Container Network Interface IDs. + ContainerNetworkInterfaceIds []*string `json:"containerNetworkInterfaceIds,omitempty" tf:"container_network_interface_ids,omitempty"` + + // The ID of the Network Profile. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the resource. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ProfileParameters struct { + + // A container_network_interface block as documented below. + // +kubebuilder:validation:Optional + ContainerNetworkInterface *ContainerNetworkInterfaceParameters `json:"containerNetworkInterface,omitempty" tf:"container_network_interface,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// ProfileSpec defines the desired state of Profile +type ProfileSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ProfileParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ProfileInitParameters `json:"initProvider,omitempty"` +} + +// ProfileStatus defines the observed state of Profile. +type ProfileStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ProfileObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Profile is the Schema for the Profiles API. Manages a Network Profile. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Profile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.containerNetworkInterface) || (has(self.initProvider) && has(self.initProvider.containerNetworkInterface))",message="spec.forProvider.containerNetworkInterface is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec ProfileSpec `json:"spec"` + Status ProfileStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProfileList contains a list of Profiles +type ProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Profile `json:"items"` +} + +// Repository type metadata. +var ( + Profile_Kind = "Profile" + Profile_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Profile_Kind}.String() + Profile_KindAPIVersion = Profile_Kind + "." + CRDGroupVersion.String() + Profile_GroupVersionKind = CRDGroupVersion.WithKind(Profile_Kind) +) + +func init() { + SchemeBuilder.Register(&Profile{}, &ProfileList{}) +} diff --git a/apis/network/v1beta2/zz_subnet_terraformed.go b/apis/network/v1beta2/zz_subnet_terraformed.go new file mode 100755 index 000000000..a4072e0a3 --- /dev/null +++ b/apis/network/v1beta2/zz_subnet_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Subnet +func (mg *Subnet) GetTerraformResourceType() string { + return "azurerm_subnet" +} + +// GetConnectionDetailsMapping for this Subnet +func (tr *Subnet) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Subnet +func (tr *Subnet) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Subnet +func (tr *Subnet) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Subnet +func (tr *Subnet) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Subnet +func (tr *Subnet) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Subnet +func (tr *Subnet) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Subnet +func (tr *Subnet) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Subnet +func (tr *Subnet) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Subnet using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Subnet) LateInitialize(attrs []byte) (bool, error) { + params := &SubnetParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Subnet) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_subnet_types.go b/apis/network/v1beta2/zz_subnet_types.go new file mode 100755 index 000000000..8b26104ea --- /dev/null +++ b/apis/network/v1beta2/zz_subnet_types.go @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DelegationInitParameters struct { + + // A name for this delegation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A service_delegation block as defined below. + ServiceDelegation *ServiceDelegationInitParameters `json:"serviceDelegation,omitempty" tf:"service_delegation,omitempty"` +} + +type DelegationObservation struct { + + // A name for this delegation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A service_delegation block as defined below. + ServiceDelegation *ServiceDelegationObservation `json:"serviceDelegation,omitempty" tf:"service_delegation,omitempty"` +} + +type DelegationParameters struct { + + // A name for this delegation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A service_delegation block as defined below. + // +kubebuilder:validation:Optional + ServiceDelegation *ServiceDelegationParameters `json:"serviceDelegation" tf:"service_delegation,omitempty"` +} + +type ServiceDelegationInitParameters struct { + + // A list of Actions which should be delegated. This list is specific to the service to delegate to. Possible values are Microsoft.Network/networkinterfaces/*, Microsoft.Network/publicIPAddresses/join/action, Microsoft.Network/publicIPAddresses/read, Microsoft.Network/virtualNetworks/read, Microsoft.Network/virtualNetworks/subnets/action, Microsoft.Network/virtualNetworks/subnets/join/action, Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action, and Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action. + Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` + + // The name of service to delegate to. Possible values are GitHub.Network/networkSettings, Microsoft.ApiManagement/service, Microsoft.Apollo/npu, Microsoft.App/environments, Microsoft.App/testClients, Microsoft.AVS/PrivateClouds, Microsoft.AzureCosmosDB/clusters, Microsoft.BareMetal/AzureHostedService, Microsoft.BareMetal/AzureHPC, Microsoft.BareMetal/AzurePaymentHSM, Microsoft.BareMetal/AzureVMware, Microsoft.BareMetal/CrayServers, Microsoft.BareMetal/MonitoringServers, Microsoft.Batch/batchAccounts, Microsoft.CloudTest/hostedpools, Microsoft.CloudTest/images, Microsoft.CloudTest/pools, Microsoft.Codespaces/plans, Microsoft.ContainerInstance/containerGroups, Microsoft.ContainerService/managedClusters, Microsoft.ContainerService/TestClients, Microsoft.Databricks/workspaces, Microsoft.DBforMySQL/flexibleServers, Microsoft.DBforMySQL/servers, Microsoft.DBforMySQL/serversv2, Microsoft.DBforPostgreSQL/flexibleServers, Microsoft.DBforPostgreSQL/serversv2, Microsoft.DBforPostgreSQL/singleServers, Microsoft.DelegatedNetwork/controller, Microsoft.DevCenter/networkConnection, Microsoft.DocumentDB/cassandraClusters, Microsoft.Fidalgo/networkSettings, Microsoft.HardwareSecurityModules/dedicatedHSMs, Microsoft.Kusto/clusters, Microsoft.LabServices/labplans, Microsoft.Logic/integrationServiceEnvironments, Microsoft.MachineLearningServices/workspaces, Microsoft.Netapp/volumes, Microsoft.Network/dnsResolvers, Microsoft.Network/managedResolvers, Microsoft.Network/fpgaNetworkInterfaces, Microsoft.Network/networkWatchers., Microsoft.Network/virtualNetworkGateways, Microsoft.Orbital/orbitalGateways, Microsoft.PowerPlatform/enterprisePolicies, Microsoft.PowerPlatform/vnetaccesslinks, Microsoft.ServiceFabricMesh/networks, Microsoft.ServiceNetworking/trafficControllers, Microsoft.Singularity/accounts/networks, Microsoft.Singularity/accounts/npu, Microsoft.Sql/managedInstances, Microsoft.Sql/managedInstancesOnebox, Microsoft.Sql/managedInstancesStage, Microsoft.Sql/managedInstancesTest, Microsoft.Sql/servers, Microsoft.StoragePool/diskPools, Microsoft.StreamAnalytics/streamingJobs, Microsoft.Synapse/workspaces, Microsoft.Web/hostingEnvironments, Microsoft.Web/serverFarms, NGINX.NGINXPLUS/nginxDeployments, PaloAltoNetworks.Cloudngfw/firewalls and Qumulo.Storage/fileSystems. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ServiceDelegationObservation struct { + + // A list of Actions which should be delegated. This list is specific to the service to delegate to. Possible values are Microsoft.Network/networkinterfaces/*, Microsoft.Network/publicIPAddresses/join/action, Microsoft.Network/publicIPAddresses/read, Microsoft.Network/virtualNetworks/read, Microsoft.Network/virtualNetworks/subnets/action, Microsoft.Network/virtualNetworks/subnets/join/action, Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action, and Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action. + Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` + + // The name of service to delegate to. Possible values are GitHub.Network/networkSettings, Microsoft.ApiManagement/service, Microsoft.Apollo/npu, Microsoft.App/environments, Microsoft.App/testClients, Microsoft.AVS/PrivateClouds, Microsoft.AzureCosmosDB/clusters, Microsoft.BareMetal/AzureHostedService, Microsoft.BareMetal/AzureHPC, Microsoft.BareMetal/AzurePaymentHSM, Microsoft.BareMetal/AzureVMware, Microsoft.BareMetal/CrayServers, Microsoft.BareMetal/MonitoringServers, Microsoft.Batch/batchAccounts, Microsoft.CloudTest/hostedpools, Microsoft.CloudTest/images, Microsoft.CloudTest/pools, Microsoft.Codespaces/plans, Microsoft.ContainerInstance/containerGroups, Microsoft.ContainerService/managedClusters, Microsoft.ContainerService/TestClients, Microsoft.Databricks/workspaces, Microsoft.DBforMySQL/flexibleServers, Microsoft.DBforMySQL/servers, Microsoft.DBforMySQL/serversv2, Microsoft.DBforPostgreSQL/flexibleServers, Microsoft.DBforPostgreSQL/serversv2, Microsoft.DBforPostgreSQL/singleServers, Microsoft.DelegatedNetwork/controller, Microsoft.DevCenter/networkConnection, Microsoft.DocumentDB/cassandraClusters, Microsoft.Fidalgo/networkSettings, Microsoft.HardwareSecurityModules/dedicatedHSMs, Microsoft.Kusto/clusters, Microsoft.LabServices/labplans, Microsoft.Logic/integrationServiceEnvironments, Microsoft.MachineLearningServices/workspaces, Microsoft.Netapp/volumes, Microsoft.Network/dnsResolvers, Microsoft.Network/managedResolvers, Microsoft.Network/fpgaNetworkInterfaces, Microsoft.Network/networkWatchers., Microsoft.Network/virtualNetworkGateways, Microsoft.Orbital/orbitalGateways, Microsoft.PowerPlatform/enterprisePolicies, Microsoft.PowerPlatform/vnetaccesslinks, Microsoft.ServiceFabricMesh/networks, Microsoft.ServiceNetworking/trafficControllers, Microsoft.Singularity/accounts/networks, Microsoft.Singularity/accounts/npu, Microsoft.Sql/managedInstances, Microsoft.Sql/managedInstancesOnebox, Microsoft.Sql/managedInstancesStage, Microsoft.Sql/managedInstancesTest, Microsoft.Sql/servers, Microsoft.StoragePool/diskPools, Microsoft.StreamAnalytics/streamingJobs, Microsoft.Synapse/workspaces, Microsoft.Web/hostingEnvironments, Microsoft.Web/serverFarms, NGINX.NGINXPLUS/nginxDeployments, PaloAltoNetworks.Cloudngfw/firewalls and Qumulo.Storage/fileSystems. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ServiceDelegationParameters struct { + + // A list of Actions which should be delegated. This list is specific to the service to delegate to. Possible values are Microsoft.Network/networkinterfaces/*, Microsoft.Network/publicIPAddresses/join/action, Microsoft.Network/publicIPAddresses/read, Microsoft.Network/virtualNetworks/read, Microsoft.Network/virtualNetworks/subnets/action, Microsoft.Network/virtualNetworks/subnets/join/action, Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action, and Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action. + // +kubebuilder:validation:Optional + Actions []*string `json:"actions,omitempty" tf:"actions,omitempty"` + + // The name of service to delegate to. Possible values are GitHub.Network/networkSettings, Microsoft.ApiManagement/service, Microsoft.Apollo/npu, Microsoft.App/environments, Microsoft.App/testClients, Microsoft.AVS/PrivateClouds, Microsoft.AzureCosmosDB/clusters, Microsoft.BareMetal/AzureHostedService, Microsoft.BareMetal/AzureHPC, Microsoft.BareMetal/AzurePaymentHSM, Microsoft.BareMetal/AzureVMware, Microsoft.BareMetal/CrayServers, Microsoft.BareMetal/MonitoringServers, Microsoft.Batch/batchAccounts, Microsoft.CloudTest/hostedpools, Microsoft.CloudTest/images, Microsoft.CloudTest/pools, Microsoft.Codespaces/plans, Microsoft.ContainerInstance/containerGroups, Microsoft.ContainerService/managedClusters, Microsoft.ContainerService/TestClients, Microsoft.Databricks/workspaces, Microsoft.DBforMySQL/flexibleServers, Microsoft.DBforMySQL/servers, Microsoft.DBforMySQL/serversv2, Microsoft.DBforPostgreSQL/flexibleServers, Microsoft.DBforPostgreSQL/serversv2, Microsoft.DBforPostgreSQL/singleServers, Microsoft.DelegatedNetwork/controller, Microsoft.DevCenter/networkConnection, Microsoft.DocumentDB/cassandraClusters, Microsoft.Fidalgo/networkSettings, Microsoft.HardwareSecurityModules/dedicatedHSMs, Microsoft.Kusto/clusters, Microsoft.LabServices/labplans, Microsoft.Logic/integrationServiceEnvironments, Microsoft.MachineLearningServices/workspaces, Microsoft.Netapp/volumes, Microsoft.Network/dnsResolvers, Microsoft.Network/managedResolvers, Microsoft.Network/fpgaNetworkInterfaces, Microsoft.Network/networkWatchers., Microsoft.Network/virtualNetworkGateways, Microsoft.Orbital/orbitalGateways, Microsoft.PowerPlatform/enterprisePolicies, Microsoft.PowerPlatform/vnetaccesslinks, Microsoft.ServiceFabricMesh/networks, Microsoft.ServiceNetworking/trafficControllers, Microsoft.Singularity/accounts/networks, Microsoft.Singularity/accounts/npu, Microsoft.Sql/managedInstances, Microsoft.Sql/managedInstancesOnebox, Microsoft.Sql/managedInstancesStage, Microsoft.Sql/managedInstancesTest, Microsoft.Sql/servers, Microsoft.StoragePool/diskPools, Microsoft.StreamAnalytics/streamingJobs, Microsoft.Synapse/workspaces, Microsoft.Web/hostingEnvironments, Microsoft.Web/serverFarms, NGINX.NGINXPLUS/nginxDeployments, PaloAltoNetworks.Cloudngfw/firewalls and Qumulo.Storage/fileSystems. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type SubnetInitParameters struct { + + // The address prefixes to use for the subnet. + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` + + // One or more delegation blocks as defined below. + Delegation []DelegationInitParameters `json:"delegation,omitempty" tf:"delegation,omitempty"` + + EnforcePrivateLinkEndpointNetworkPolicies *bool `json:"enforcePrivateLinkEndpointNetworkPolicies,omitempty" tf:"enforce_private_link_endpoint_network_policies,omitempty"` + + EnforcePrivateLinkServiceNetworkPolicies *bool `json:"enforcePrivateLinkServiceNetworkPolicies,omitempty" tf:"enforce_private_link_service_network_policies,omitempty"` + + // Enable or Disable network policies for the private endpoint on the subnet. Setting this to true will Enable the policy and setting this to false will Disable the policy. Defaults to true. + PrivateEndpointNetworkPoliciesEnabled *bool `json:"privateEndpointNetworkPoliciesEnabled,omitempty" tf:"private_endpoint_network_policies_enabled,omitempty"` + + // Enable or Disable network policies for the private link service on the subnet. Setting this to true will Enable the policy and setting this to false will Disable the policy. Defaults to true. + PrivateLinkServiceNetworkPoliciesEnabled *bool `json:"privateLinkServiceNetworkPoliciesEnabled,omitempty" tf:"private_link_service_network_policies_enabled,omitempty"` + + // The list of IDs of Service Endpoint Policies to associate with the subnet. + // +listType=set + ServiceEndpointPolicyIds []*string `json:"serviceEndpointPolicyIds,omitempty" tf:"service_endpoint_policy_ids,omitempty"` + + // The list of Service endpoints to associate with the subnet. Possible values include: Microsoft.AzureActiveDirectory, Microsoft.AzureCosmosDB, Microsoft.ContainerRegistry, Microsoft.EventHub, Microsoft.KeyVault, Microsoft.ServiceBus, Microsoft.Sql, Microsoft.Storage, Microsoft.Storage.Global and Microsoft.Web. + // +listType=set + ServiceEndpoints []*string `json:"serviceEndpoints,omitempty" tf:"service_endpoints,omitempty"` +} + +type SubnetObservation struct { + + // The address prefixes to use for the subnet. + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` + + // One or more delegation blocks as defined below. + Delegation []DelegationObservation `json:"delegation,omitempty" tf:"delegation,omitempty"` + + EnforcePrivateLinkEndpointNetworkPolicies *bool `json:"enforcePrivateLinkEndpointNetworkPolicies,omitempty" tf:"enforce_private_link_endpoint_network_policies,omitempty"` + + EnforcePrivateLinkServiceNetworkPolicies *bool `json:"enforcePrivateLinkServiceNetworkPolicies,omitempty" tf:"enforce_private_link_service_network_policies,omitempty"` + + // The subnet ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Enable or Disable network policies for the private endpoint on the subnet. Setting this to true will Enable the policy and setting this to false will Disable the policy. Defaults to true. + PrivateEndpointNetworkPoliciesEnabled *bool `json:"privateEndpointNetworkPoliciesEnabled,omitempty" tf:"private_endpoint_network_policies_enabled,omitempty"` + + // Enable or Disable network policies for the private link service on the subnet. Setting this to true will Enable the policy and setting this to false will Disable the policy. Defaults to true. + PrivateLinkServiceNetworkPoliciesEnabled *bool `json:"privateLinkServiceNetworkPoliciesEnabled,omitempty" tf:"private_link_service_network_policies_enabled,omitempty"` + + // The name of the resource group in which to create the subnet. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The list of IDs of Service Endpoint Policies to associate with the subnet. + // +listType=set + ServiceEndpointPolicyIds []*string `json:"serviceEndpointPolicyIds,omitempty" tf:"service_endpoint_policy_ids,omitempty"` + + // The list of Service endpoints to associate with the subnet. Possible values include: Microsoft.AzureActiveDirectory, Microsoft.AzureCosmosDB, Microsoft.ContainerRegistry, Microsoft.EventHub, Microsoft.KeyVault, Microsoft.ServiceBus, Microsoft.Sql, Microsoft.Storage, Microsoft.Storage.Global and Microsoft.Web. + // +listType=set + ServiceEndpoints []*string `json:"serviceEndpoints,omitempty" tf:"service_endpoints,omitempty"` + + // The name of the virtual network to which to attach the subnet. Changing this forces a new resource to be created. + VirtualNetworkName *string `json:"virtualNetworkName,omitempty" tf:"virtual_network_name,omitempty"` +} + +type SubnetParameters struct { + + // The address prefixes to use for the subnet. + // +kubebuilder:validation:Optional + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` + + // One or more delegation blocks as defined below. + // +kubebuilder:validation:Optional + Delegation []DelegationParameters `json:"delegation,omitempty" tf:"delegation,omitempty"` + + // +kubebuilder:validation:Optional + EnforcePrivateLinkEndpointNetworkPolicies *bool `json:"enforcePrivateLinkEndpointNetworkPolicies,omitempty" tf:"enforce_private_link_endpoint_network_policies,omitempty"` + + // +kubebuilder:validation:Optional + EnforcePrivateLinkServiceNetworkPolicies *bool `json:"enforcePrivateLinkServiceNetworkPolicies,omitempty" tf:"enforce_private_link_service_network_policies,omitempty"` + + // Enable or Disable network policies for the private endpoint on the subnet. Setting this to true will Enable the policy and setting this to false will Disable the policy. Defaults to true. + // +kubebuilder:validation:Optional + PrivateEndpointNetworkPoliciesEnabled *bool `json:"privateEndpointNetworkPoliciesEnabled,omitempty" tf:"private_endpoint_network_policies_enabled,omitempty"` + + // Enable or Disable network policies for the private link service on the subnet. Setting this to true will Enable the policy and setting this to false will Disable the policy. Defaults to true. + // +kubebuilder:validation:Optional + PrivateLinkServiceNetworkPoliciesEnabled *bool `json:"privateLinkServiceNetworkPoliciesEnabled,omitempty" tf:"private_link_service_network_policies_enabled,omitempty"` + + // The name of the resource group in which to create the subnet. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The list of IDs of Service Endpoint Policies to associate with the subnet. + // +kubebuilder:validation:Optional + // +listType=set + ServiceEndpointPolicyIds []*string `json:"serviceEndpointPolicyIds,omitempty" tf:"service_endpoint_policy_ids,omitempty"` + + // The list of Service endpoints to associate with the subnet. Possible values include: Microsoft.AzureActiveDirectory, Microsoft.AzureCosmosDB, Microsoft.ContainerRegistry, Microsoft.EventHub, Microsoft.KeyVault, Microsoft.ServiceBus, Microsoft.Sql, Microsoft.Storage, Microsoft.Storage.Global and Microsoft.Web. + // +kubebuilder:validation:Optional + // +listType=set + ServiceEndpoints []*string `json:"serviceEndpoints,omitempty" tf:"service_endpoints,omitempty"` + + // The name of the virtual network to which to attach the subnet. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork + // +kubebuilder:validation:Optional + VirtualNetworkName *string `json:"virtualNetworkName,omitempty" tf:"virtual_network_name,omitempty"` + + // Reference to a VirtualNetwork in network to populate virtualNetworkName. + // +kubebuilder:validation:Optional + VirtualNetworkNameRef *v1.Reference `json:"virtualNetworkNameRef,omitempty" tf:"-"` + + // Selector for a VirtualNetwork in network to populate virtualNetworkName. + // +kubebuilder:validation:Optional + VirtualNetworkNameSelector *v1.Selector `json:"virtualNetworkNameSelector,omitempty" tf:"-"` +} + +// SubnetSpec defines the desired state of Subnet +type SubnetSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SubnetParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SubnetInitParameters `json:"initProvider,omitempty"` +} + +// SubnetStatus defines the observed state of Subnet. +type SubnetStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SubnetObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Subnet is the Schema for the Subnets API. Manages a subnet. Subnets represent network segments within the IP space defined by the virtual network. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Subnet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.addressPrefixes) || (has(self.initProvider) && has(self.initProvider.addressPrefixes))",message="spec.forProvider.addressPrefixes is a required parameter" + Spec SubnetSpec `json:"spec"` + Status SubnetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubnetList contains a list of Subnets +type SubnetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Subnet `json:"items"` +} + +// Repository type metadata. +var ( + Subnet_Kind = "Subnet" + Subnet_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Subnet_Kind}.String() + Subnet_KindAPIVersion = Subnet_Kind + "." + CRDGroupVersion.String() + Subnet_GroupVersionKind = CRDGroupVersion.WithKind(Subnet_Kind) +) + +func init() { + SchemeBuilder.Register(&Subnet{}, &SubnetList{}) +} diff --git a/apis/network/v1beta2/zz_trafficmanagerprofile_terraformed.go b/apis/network/v1beta2/zz_trafficmanagerprofile_terraformed.go new file mode 100755 index 000000000..89feae4b1 --- /dev/null +++ b/apis/network/v1beta2/zz_trafficmanagerprofile_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this TrafficManagerProfile +func (mg *TrafficManagerProfile) GetTerraformResourceType() string { + return "azurerm_traffic_manager_profile" +} + +// GetConnectionDetailsMapping for this TrafficManagerProfile +func (tr *TrafficManagerProfile) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this TrafficManagerProfile +func (tr *TrafficManagerProfile) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this TrafficManagerProfile +func (tr *TrafficManagerProfile) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this TrafficManagerProfile +func (tr *TrafficManagerProfile) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this TrafficManagerProfile +func (tr *TrafficManagerProfile) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this TrafficManagerProfile +func (tr *TrafficManagerProfile) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this TrafficManagerProfile +func (tr *TrafficManagerProfile) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this TrafficManagerProfile +func (tr *TrafficManagerProfile) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this TrafficManagerProfile using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *TrafficManagerProfile) LateInitialize(attrs []byte) (bool, error) { + params := &TrafficManagerProfileParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *TrafficManagerProfile) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_trafficmanagerprofile_types.go b/apis/network/v1beta2/zz_trafficmanagerprofile_types.go new file mode 100755 index 000000000..112d9819e --- /dev/null +++ b/apis/network/v1beta2/zz_trafficmanagerprofile_types.go @@ -0,0 +1,328 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomHeaderInitParameters struct { + + // The name of the Traffic Manager profile. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of custom header. Applicable for HTTP and HTTPS protocol. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CustomHeaderObservation struct { + + // The name of the Traffic Manager profile. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The value of custom header. Applicable for HTTP and HTTPS protocol. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CustomHeaderParameters struct { + + // The name of the Traffic Manager profile. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The value of custom header. Applicable for HTTP and HTTPS protocol. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type DNSConfigInitParameters struct { + + // The relative domain name, this is combined with the domain name used by Traffic Manager to form the FQDN which is exported as documented below. Changing this forces a new resource to be created. + RelativeName *string `json:"relativeName,omitempty" tf:"relative_name,omitempty"` + + // The TTL value of the Profile used by Local DNS resolvers and clients. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type DNSConfigObservation struct { + + // The relative domain name, this is combined with the domain name used by Traffic Manager to form the FQDN which is exported as documented below. Changing this forces a new resource to be created. + RelativeName *string `json:"relativeName,omitempty" tf:"relative_name,omitempty"` + + // The TTL value of the Profile used by Local DNS resolvers and clients. + TTL *float64 `json:"ttl,omitempty" tf:"ttl,omitempty"` +} + +type DNSConfigParameters struct { + + // The relative domain name, this is combined with the domain name used by Traffic Manager to form the FQDN which is exported as documented below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RelativeName *string `json:"relativeName" tf:"relative_name,omitempty"` + + // The TTL value of the Profile used by Local DNS resolvers and clients. + // +kubebuilder:validation:Optional + TTL *float64 `json:"ttl" tf:"ttl,omitempty"` +} + +type MonitorConfigInitParameters struct { + + // One or more custom_header blocks as defined below. + CustomHeader []CustomHeaderInitParameters `json:"customHeader,omitempty" tf:"custom_header,omitempty"` + + // A list of status code ranges in the format of 100-101. + ExpectedStatusCodeRanges []*string `json:"expectedStatusCodeRanges,omitempty" tf:"expected_status_code_ranges,omitempty"` + + // The interval used to check the endpoint health from a Traffic Manager probing agent. You can specify two values here: 30 (normal probing) and 10 (fast probing). The default value is 30. + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // The path used by the monitoring checks. Required when protocol is set to HTTP or HTTPS - cannot be set when protocol is set to TCP. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number used by the monitoring checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The protocol used by the monitoring checks, supported values are HTTP, HTTPS and TCP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The amount of time the Traffic Manager probing agent should wait before considering that check a failure when a health check probe is sent to the endpoint. If interval_in_seconds is set to 30, then timeout_in_seconds can be between 5 and 10. The default value is 10. If interval_in_seconds is set to 10, then valid values are between 5 and 9 and timeout_in_seconds is required. + TimeoutInSeconds *float64 `json:"timeoutInSeconds,omitempty" tf:"timeout_in_seconds,omitempty"` + + // The number of failures a Traffic Manager probing agent tolerates before marking that endpoint as unhealthy. Valid values are between 0 and 9. The default value is 3 + ToleratedNumberOfFailures *float64 `json:"toleratedNumberOfFailures,omitempty" tf:"tolerated_number_of_failures,omitempty"` +} + +type MonitorConfigObservation struct { + + // One or more custom_header blocks as defined below. + CustomHeader []CustomHeaderObservation `json:"customHeader,omitempty" tf:"custom_header,omitempty"` + + // A list of status code ranges in the format of 100-101. + ExpectedStatusCodeRanges []*string `json:"expectedStatusCodeRanges,omitempty" tf:"expected_status_code_ranges,omitempty"` + + // The interval used to check the endpoint health from a Traffic Manager probing agent. You can specify two values here: 30 (normal probing) and 10 (fast probing). The default value is 30. + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // The path used by the monitoring checks. Required when protocol is set to HTTP or HTTPS - cannot be set when protocol is set to TCP. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number used by the monitoring checks. + Port *float64 `json:"port,omitempty" tf:"port,omitempty"` + + // The protocol used by the monitoring checks, supported values are HTTP, HTTPS and TCP. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // The amount of time the Traffic Manager probing agent should wait before considering that check a failure when a health check probe is sent to the endpoint. If interval_in_seconds is set to 30, then timeout_in_seconds can be between 5 and 10. The default value is 10. If interval_in_seconds is set to 10, then valid values are between 5 and 9 and timeout_in_seconds is required. + TimeoutInSeconds *float64 `json:"timeoutInSeconds,omitempty" tf:"timeout_in_seconds,omitempty"` + + // The number of failures a Traffic Manager probing agent tolerates before marking that endpoint as unhealthy. Valid values are between 0 and 9. The default value is 3 + ToleratedNumberOfFailures *float64 `json:"toleratedNumberOfFailures,omitempty" tf:"tolerated_number_of_failures,omitempty"` +} + +type MonitorConfigParameters struct { + + // One or more custom_header blocks as defined below. + // +kubebuilder:validation:Optional + CustomHeader []CustomHeaderParameters `json:"customHeader,omitempty" tf:"custom_header,omitempty"` + + // A list of status code ranges in the format of 100-101. + // +kubebuilder:validation:Optional + ExpectedStatusCodeRanges []*string `json:"expectedStatusCodeRanges,omitempty" tf:"expected_status_code_ranges,omitempty"` + + // The interval used to check the endpoint health from a Traffic Manager probing agent. You can specify two values here: 30 (normal probing) and 10 (fast probing). The default value is 30. + // +kubebuilder:validation:Optional + IntervalInSeconds *float64 `json:"intervalInSeconds,omitempty" tf:"interval_in_seconds,omitempty"` + + // The path used by the monitoring checks. Required when protocol is set to HTTP or HTTPS - cannot be set when protocol is set to TCP. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The port number used by the monitoring checks. + // +kubebuilder:validation:Optional + Port *float64 `json:"port" tf:"port,omitempty"` + + // The protocol used by the monitoring checks, supported values are HTTP, HTTPS and TCP. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` + + // The amount of time the Traffic Manager probing agent should wait before considering that check a failure when a health check probe is sent to the endpoint. If interval_in_seconds is set to 30, then timeout_in_seconds can be between 5 and 10. The default value is 10. If interval_in_seconds is set to 10, then valid values are between 5 and 9 and timeout_in_seconds is required. + // +kubebuilder:validation:Optional + TimeoutInSeconds *float64 `json:"timeoutInSeconds,omitempty" tf:"timeout_in_seconds,omitempty"` + + // The number of failures a Traffic Manager probing agent tolerates before marking that endpoint as unhealthy. Valid values are between 0 and 9. The default value is 3 + // +kubebuilder:validation:Optional + ToleratedNumberOfFailures *float64 `json:"toleratedNumberOfFailures,omitempty" tf:"tolerated_number_of_failures,omitempty"` +} + +type TrafficManagerProfileInitParameters struct { + + // This block specifies the DNS configuration of the Profile. One dns_config block as defined below. + DNSConfig *DNSConfigInitParameters `json:"dnsConfig,omitempty" tf:"dns_config,omitempty"` + + // The amount of endpoints to return for DNS queries to this Profile. Possible values range from 1 to 8. + MaxReturn *float64 `json:"maxReturn,omitempty" tf:"max_return,omitempty"` + + // This block specifies the Endpoint monitoring configuration for the Profile. One monitor_config block as defined below. + MonitorConfig *MonitorConfigInitParameters `json:"monitorConfig,omitempty" tf:"monitor_config,omitempty"` + + // The status of the profile, can be set to either Enabled or Disabled. Defaults to Enabled. + ProfileStatus *string `json:"profileStatus,omitempty" tf:"profile_status,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the algorithm used to route traffic. Possible values are Geographic, Weighted, Performance, Priority, Subnet and MultiValue. + TrafficRoutingMethod *string `json:"trafficRoutingMethod,omitempty" tf:"traffic_routing_method,omitempty"` + + // Indicates whether Traffic View is enabled for the Traffic Manager profile. + TrafficViewEnabled *bool `json:"trafficViewEnabled,omitempty" tf:"traffic_view_enabled,omitempty"` +} + +type TrafficManagerProfileObservation struct { + + // This block specifies the DNS configuration of the Profile. One dns_config block as defined below. + DNSConfig *DNSConfigObservation `json:"dnsConfig,omitempty" tf:"dns_config,omitempty"` + + // The FQDN of the created Profile. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The ID of the Traffic Manager Profile. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The amount of endpoints to return for DNS queries to this Profile. Possible values range from 1 to 8. + MaxReturn *float64 `json:"maxReturn,omitempty" tf:"max_return,omitempty"` + + // This block specifies the Endpoint monitoring configuration for the Profile. One monitor_config block as defined below. + MonitorConfig *MonitorConfigObservation `json:"monitorConfig,omitempty" tf:"monitor_config,omitempty"` + + // The status of the profile, can be set to either Enabled or Disabled. Defaults to Enabled. + ProfileStatus *string `json:"profileStatus,omitempty" tf:"profile_status,omitempty"` + + // The name of the resource group in which to create the Traffic Manager profile. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the algorithm used to route traffic. Possible values are Geographic, Weighted, Performance, Priority, Subnet and MultiValue. + TrafficRoutingMethod *string `json:"trafficRoutingMethod,omitempty" tf:"traffic_routing_method,omitempty"` + + // Indicates whether Traffic View is enabled for the Traffic Manager profile. + TrafficViewEnabled *bool `json:"trafficViewEnabled,omitempty" tf:"traffic_view_enabled,omitempty"` +} + +type TrafficManagerProfileParameters struct { + + // This block specifies the DNS configuration of the Profile. One dns_config block as defined below. + // +kubebuilder:validation:Optional + DNSConfig *DNSConfigParameters `json:"dnsConfig,omitempty" tf:"dns_config,omitempty"` + + // The amount of endpoints to return for DNS queries to this Profile. Possible values range from 1 to 8. + // +kubebuilder:validation:Optional + MaxReturn *float64 `json:"maxReturn,omitempty" tf:"max_return,omitempty"` + + // This block specifies the Endpoint monitoring configuration for the Profile. One monitor_config block as defined below. + // +kubebuilder:validation:Optional + MonitorConfig *MonitorConfigParameters `json:"monitorConfig,omitempty" tf:"monitor_config,omitempty"` + + // The status of the profile, can be set to either Enabled or Disabled. Defaults to Enabled. + // +kubebuilder:validation:Optional + ProfileStatus *string `json:"profileStatus,omitempty" tf:"profile_status,omitempty"` + + // The name of the resource group in which to create the Traffic Manager profile. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the algorithm used to route traffic. Possible values are Geographic, Weighted, Performance, Priority, Subnet and MultiValue. + // +kubebuilder:validation:Optional + TrafficRoutingMethod *string `json:"trafficRoutingMethod,omitempty" tf:"traffic_routing_method,omitempty"` + + // Indicates whether Traffic View is enabled for the Traffic Manager profile. + // +kubebuilder:validation:Optional + TrafficViewEnabled *bool `json:"trafficViewEnabled,omitempty" tf:"traffic_view_enabled,omitempty"` +} + +// TrafficManagerProfileSpec defines the desired state of TrafficManagerProfile +type TrafficManagerProfileSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider TrafficManagerProfileParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider TrafficManagerProfileInitParameters `json:"initProvider,omitempty"` +} + +// TrafficManagerProfileStatus defines the observed state of TrafficManagerProfile. +type TrafficManagerProfileStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider TrafficManagerProfileObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// TrafficManagerProfile is the Schema for the TrafficManagerProfiles API. Manages a Traffic Manager Profile. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type TrafficManagerProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dnsConfig) || (has(self.initProvider) && has(self.initProvider.dnsConfig))",message="spec.forProvider.dnsConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.monitorConfig) || (has(self.initProvider) && has(self.initProvider.monitorConfig))",message="spec.forProvider.monitorConfig is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.trafficRoutingMethod) || (has(self.initProvider) && has(self.initProvider.trafficRoutingMethod))",message="spec.forProvider.trafficRoutingMethod is a required parameter" + Spec TrafficManagerProfileSpec `json:"spec"` + Status TrafficManagerProfileStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TrafficManagerProfileList contains a list of TrafficManagerProfiles +type TrafficManagerProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TrafficManagerProfile `json:"items"` +} + +// Repository type metadata. +var ( + TrafficManagerProfile_Kind = "TrafficManagerProfile" + TrafficManagerProfile_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: TrafficManagerProfile_Kind}.String() + TrafficManagerProfile_KindAPIVersion = TrafficManagerProfile_Kind + "." + CRDGroupVersion.String() + TrafficManagerProfile_GroupVersionKind = CRDGroupVersion.WithKind(TrafficManagerProfile_Kind) +) + +func init() { + SchemeBuilder.Register(&TrafficManagerProfile{}, &TrafficManagerProfileList{}) +} diff --git a/apis/network/v1beta2/zz_virtualhubconnection_terraformed.go b/apis/network/v1beta2/zz_virtualhubconnection_terraformed.go new file mode 100755 index 000000000..f74d85bf1 --- /dev/null +++ b/apis/network/v1beta2/zz_virtualhubconnection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualHubConnection +func (mg *VirtualHubConnection) GetTerraformResourceType() string { + return "azurerm_virtual_hub_connection" +} + +// GetConnectionDetailsMapping for this VirtualHubConnection +func (tr *VirtualHubConnection) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VirtualHubConnection +func (tr *VirtualHubConnection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualHubConnection +func (tr *VirtualHubConnection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualHubConnection +func (tr *VirtualHubConnection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualHubConnection +func (tr *VirtualHubConnection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualHubConnection +func (tr *VirtualHubConnection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualHubConnection +func (tr *VirtualHubConnection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualHubConnection +func (tr *VirtualHubConnection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualHubConnection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualHubConnection) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualHubConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualHubConnection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_virtualhubconnection_types.go b/apis/network/v1beta2/zz_virtualhubconnection_types.go new file mode 100755 index 000000000..ed0aafdc3 --- /dev/null +++ b/apis/network/v1beta2/zz_virtualhubconnection_types.go @@ -0,0 +1,316 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RoutingPropagatedRouteTableInitParameters struct { + + // The list of labels to assign to this route table. + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of Route Table IDs to associated with this Virtual Hub Connection. + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` +} + +type RoutingPropagatedRouteTableObservation struct { + + // The list of labels to assign to this route table. + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of Route Table IDs to associated with this Virtual Hub Connection. + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` +} + +type RoutingPropagatedRouteTableParameters struct { + + // The list of labels to assign to this route table. + // +kubebuilder:validation:Optional + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of Route Table IDs to associated with this Virtual Hub Connection. + // +kubebuilder:validation:Optional + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` +} + +type StaticVnetRouteInitParameters struct { + + // A list of CIDR Ranges which should be used as Address Prefixes. + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` + + // The name which should be used for this Static Route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The IP Address which should be used for the Next Hop. + NextHopIPAddress *string `json:"nextHopIpAddress,omitempty" tf:"next_hop_ip_address,omitempty"` +} + +type StaticVnetRouteObservation struct { + + // A list of CIDR Ranges which should be used as Address Prefixes. + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` + + // The name which should be used for this Static Route. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The IP Address which should be used for the Next Hop. + NextHopIPAddress *string `json:"nextHopIpAddress,omitempty" tf:"next_hop_ip_address,omitempty"` +} + +type StaticVnetRouteParameters struct { + + // A list of CIDR Ranges which should be used as Address Prefixes. + // +kubebuilder:validation:Optional + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` + + // The name which should be used for this Static Route. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The IP Address which should be used for the Next Hop. + // +kubebuilder:validation:Optional + NextHopIPAddress *string `json:"nextHopIpAddress,omitempty" tf:"next_hop_ip_address,omitempty"` +} + +type VirtualHubConnectionInitParameters struct { + + // Should Internet Security be enabled to secure internet traffic? Defaults to false. + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The ID of the Virtual Network which the Virtual Hub should be connected to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RemoteVirtualNetworkID *string `json:"remoteVirtualNetworkId,omitempty" tf:"remote_virtual_network_id,omitempty"` + + // Reference to a VirtualNetwork in network to populate remoteVirtualNetworkId. + // +kubebuilder:validation:Optional + RemoteVirtualNetworkIDRef *v1.Reference `json:"remoteVirtualNetworkIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetwork in network to populate remoteVirtualNetworkId. + // +kubebuilder:validation:Optional + RemoteVirtualNetworkIDSelector *v1.Selector `json:"remoteVirtualNetworkIdSelector,omitempty" tf:"-"` + + // A routing block as defined below. + Routing *VirtualHubConnectionRoutingInitParameters `json:"routing,omitempty" tf:"routing,omitempty"` +} + +type VirtualHubConnectionObservation struct { + + // The ID of the Virtual Hub Connection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Should Internet Security be enabled to secure internet traffic? Defaults to false. + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The ID of the Virtual Network which the Virtual Hub should be connected to. Changing this forces a new resource to be created. + RemoteVirtualNetworkID *string `json:"remoteVirtualNetworkId,omitempty" tf:"remote_virtual_network_id,omitempty"` + + // A routing block as defined below. + Routing *VirtualHubConnectionRoutingObservation `json:"routing,omitempty" tf:"routing,omitempty"` + + // The ID of the Virtual Hub within which this connection should be created. Changing this forces a new resource to be created. + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` +} + +type VirtualHubConnectionParameters struct { + + // Should Internet Security be enabled to secure internet traffic? Defaults to false. + // +kubebuilder:validation:Optional + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The ID of the Virtual Network which the Virtual Hub should be connected to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RemoteVirtualNetworkID *string `json:"remoteVirtualNetworkId,omitempty" tf:"remote_virtual_network_id,omitempty"` + + // Reference to a VirtualNetwork in network to populate remoteVirtualNetworkId. + // +kubebuilder:validation:Optional + RemoteVirtualNetworkIDRef *v1.Reference `json:"remoteVirtualNetworkIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetwork in network to populate remoteVirtualNetworkId. + // +kubebuilder:validation:Optional + RemoteVirtualNetworkIDSelector *v1.Selector `json:"remoteVirtualNetworkIdSelector,omitempty" tf:"-"` + + // A routing block as defined below. + // +kubebuilder:validation:Optional + Routing *VirtualHubConnectionRoutingParameters `json:"routing,omitempty" tf:"routing,omitempty"` + + // The ID of the Virtual Hub within which this connection should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` + + // Reference to a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDRef *v1.Reference `json:"virtualHubIdRef,omitempty" tf:"-"` + + // Selector for a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDSelector *v1.Selector `json:"virtualHubIdSelector,omitempty" tf:"-"` +} + +type VirtualHubConnectionRoutingInitParameters struct { + + // The ID of the route table associated with this Virtual Hub connection. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHubRouteTable + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + AssociatedRouteTableID *string `json:"associatedRouteTableId,omitempty" tf:"associated_route_table_id,omitempty"` + + // Reference to a VirtualHubRouteTable in network to populate associatedRouteTableId. + // +kubebuilder:validation:Optional + AssociatedRouteTableIDRef *v1.Reference `json:"associatedRouteTableIdRef,omitempty" tf:"-"` + + // Selector for a VirtualHubRouteTable in network to populate associatedRouteTableId. + // +kubebuilder:validation:Optional + AssociatedRouteTableIDSelector *v1.Selector `json:"associatedRouteTableIdSelector,omitempty" tf:"-"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + PropagatedRouteTable *RoutingPropagatedRouteTableInitParameters `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` + + // The static VNet local route override criteria that is used to determine whether NVA in spoke VNet is bypassed for traffic with destination in spoke VNet. Possible values are Contains and Equal. Defaults to Contains. Changing this forces a new resource to be created. + StaticVnetLocalRouteOverrideCriteria *string `json:"staticVnetLocalRouteOverrideCriteria,omitempty" tf:"static_vnet_local_route_override_criteria,omitempty"` + + // A static_vnet_route block as defined below. + StaticVnetRoute []StaticVnetRouteInitParameters `json:"staticVnetRoute,omitempty" tf:"static_vnet_route,omitempty"` +} + +type VirtualHubConnectionRoutingObservation struct { + + // The ID of the route table associated with this Virtual Hub connection. + AssociatedRouteTableID *string `json:"associatedRouteTableId,omitempty" tf:"associated_route_table_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + PropagatedRouteTable *RoutingPropagatedRouteTableObservation `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` + + // The static VNet local route override criteria that is used to determine whether NVA in spoke VNet is bypassed for traffic with destination in spoke VNet. Possible values are Contains and Equal. Defaults to Contains. Changing this forces a new resource to be created. + StaticVnetLocalRouteOverrideCriteria *string `json:"staticVnetLocalRouteOverrideCriteria,omitempty" tf:"static_vnet_local_route_override_criteria,omitempty"` + + // A static_vnet_route block as defined below. + StaticVnetRoute []StaticVnetRouteObservation `json:"staticVnetRoute,omitempty" tf:"static_vnet_route,omitempty"` +} + +type VirtualHubConnectionRoutingParameters struct { + + // The ID of the route table associated with this Virtual Hub connection. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHubRouteTable + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + AssociatedRouteTableID *string `json:"associatedRouteTableId,omitempty" tf:"associated_route_table_id,omitempty"` + + // Reference to a VirtualHubRouteTable in network to populate associatedRouteTableId. + // +kubebuilder:validation:Optional + AssociatedRouteTableIDRef *v1.Reference `json:"associatedRouteTableIdRef,omitempty" tf:"-"` + + // Selector for a VirtualHubRouteTable in network to populate associatedRouteTableId. + // +kubebuilder:validation:Optional + AssociatedRouteTableIDSelector *v1.Selector `json:"associatedRouteTableIdSelector,omitempty" tf:"-"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + // +kubebuilder:validation:Optional + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + // +kubebuilder:validation:Optional + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + // +kubebuilder:validation:Optional + PropagatedRouteTable *RoutingPropagatedRouteTableParameters `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` + + // The static VNet local route override criteria that is used to determine whether NVA in spoke VNet is bypassed for traffic with destination in spoke VNet. Possible values are Contains and Equal. Defaults to Contains. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + StaticVnetLocalRouteOverrideCriteria *string `json:"staticVnetLocalRouteOverrideCriteria,omitempty" tf:"static_vnet_local_route_override_criteria,omitempty"` + + // A static_vnet_route block as defined below. + // +kubebuilder:validation:Optional + StaticVnetRoute []StaticVnetRouteParameters `json:"staticVnetRoute,omitempty" tf:"static_vnet_route,omitempty"` +} + +// VirtualHubConnectionSpec defines the desired state of VirtualHubConnection +type VirtualHubConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualHubConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualHubConnectionInitParameters `json:"initProvider,omitempty"` +} + +// VirtualHubConnectionStatus defines the observed state of VirtualHubConnection. +type VirtualHubConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualHubConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualHubConnection is the Schema for the VirtualHubConnections API. Manages a Connection for a Virtual Hub. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VirtualHubConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VirtualHubConnectionSpec `json:"spec"` + Status VirtualHubConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualHubConnectionList contains a list of VirtualHubConnections +type VirtualHubConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualHubConnection `json:"items"` +} + +// Repository type metadata. +var ( + VirtualHubConnection_Kind = "VirtualHubConnection" + VirtualHubConnection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualHubConnection_Kind}.String() + VirtualHubConnection_KindAPIVersion = VirtualHubConnection_Kind + "." + CRDGroupVersion.String() + VirtualHubConnection_GroupVersionKind = CRDGroupVersion.WithKind(VirtualHubConnection_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualHubConnection{}, &VirtualHubConnectionList{}) +} diff --git a/apis/network/v1beta2/zz_virtualnetwork_terraformed.go b/apis/network/v1beta2/zz_virtualnetwork_terraformed.go new file mode 100755 index 000000000..fb701d9a1 --- /dev/null +++ b/apis/network/v1beta2/zz_virtualnetwork_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualNetwork +func (mg *VirtualNetwork) GetTerraformResourceType() string { + return "azurerm_virtual_network" +} + +// GetConnectionDetailsMapping for this VirtualNetwork +func (tr *VirtualNetwork) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VirtualNetwork +func (tr *VirtualNetwork) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualNetwork +func (tr *VirtualNetwork) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualNetwork +func (tr *VirtualNetwork) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualNetwork +func (tr *VirtualNetwork) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualNetwork +func (tr *VirtualNetwork) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualNetwork +func (tr *VirtualNetwork) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualNetwork +func (tr *VirtualNetwork) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualNetwork using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualNetwork) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualNetworkParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("Subnet")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualNetwork) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_virtualnetwork_types.go b/apis/network/v1beta2/zz_virtualnetwork_types.go new file mode 100755 index 000000000..f48118efc --- /dev/null +++ b/apis/network/v1beta2/zz_virtualnetwork_types.go @@ -0,0 +1,272 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type DDOSProtectionPlanInitParameters struct { + + // Enable/disable DDoS Protection Plan on Virtual Network. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The ID of DDoS Protection Plan. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type DDOSProtectionPlanObservation struct { + + // Enable/disable DDoS Protection Plan on Virtual Network. + Enable *bool `json:"enable,omitempty" tf:"enable,omitempty"` + + // The ID of DDoS Protection Plan. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type DDOSProtectionPlanParameters struct { + + // Enable/disable DDoS Protection Plan on Virtual Network. + // +kubebuilder:validation:Optional + Enable *bool `json:"enable" tf:"enable,omitempty"` + + // The ID of DDoS Protection Plan. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` +} + +type EncryptionInitParameters struct { + + // Specifies if the encrypted Virtual Network allows VM that does not support encryption. Possible values are DropUnencrypted and AllowUnencrypted. + Enforcement *string `json:"enforcement,omitempty" tf:"enforcement,omitempty"` +} + +type EncryptionObservation struct { + + // Specifies if the encrypted Virtual Network allows VM that does not support encryption. Possible values are DropUnencrypted and AllowUnencrypted. + Enforcement *string `json:"enforcement,omitempty" tf:"enforcement,omitempty"` +} + +type EncryptionParameters struct { + + // Specifies if the encrypted Virtual Network allows VM that does not support encryption. Possible values are DropUnencrypted and AllowUnencrypted. + // +kubebuilder:validation:Optional + Enforcement *string `json:"enforcement" tf:"enforcement,omitempty"` +} + +type VirtualNetworkInitParameters struct { + + // The address space that is used the virtual network. You can supply more than one address space. + AddressSpace []*string `json:"addressSpace,omitempty" tf:"address_space,omitempty"` + + // The BGP community attribute in format :. + BGPCommunity *string `json:"bgpCommunity,omitempty" tf:"bgp_community,omitempty"` + + // A ddos_protection_plan block as documented below. + DDOSProtectionPlan *DDOSProtectionPlanInitParameters `json:"ddosProtectionPlan,omitempty" tf:"ddos_protection_plan,omitempty"` + + // List of IP addresses of DNS servers + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Virtual Network should exist. Changing this forces a new Virtual Network to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // A encryption block as defined below. + Encryption *EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The flow timeout in minutes for the Virtual Network, which is used to enable connection tracking for intra-VM flows. Possible values are between 4 and 30 minutes. + FlowTimeoutInMinutes *float64 `json:"flowTimeoutInMinutes,omitempty" tf:"flow_timeout_in_minutes,omitempty"` + + // The location/region where the virtual network is created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualNetworkObservation struct { + + // The address space that is used the virtual network. You can supply more than one address space. + AddressSpace []*string `json:"addressSpace,omitempty" tf:"address_space,omitempty"` + + // The BGP community attribute in format :. + BGPCommunity *string `json:"bgpCommunity,omitempty" tf:"bgp_community,omitempty"` + + // A ddos_protection_plan block as documented below. + DDOSProtectionPlan *DDOSProtectionPlanObservation `json:"ddosProtectionPlan,omitempty" tf:"ddos_protection_plan,omitempty"` + + // List of IP addresses of DNS servers + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Virtual Network should exist. Changing this forces a new Virtual Network to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // A encryption block as defined below. + Encryption *EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The flow timeout in minutes for the Virtual Network, which is used to enable connection tracking for intra-VM flows. Possible values are between 4 and 30 minutes. + FlowTimeoutInMinutes *float64 `json:"flowTimeoutInMinutes,omitempty" tf:"flow_timeout_in_minutes,omitempty"` + + // The GUID of the virtual network. + GUID *string `json:"guid,omitempty" tf:"guid,omitempty"` + + // The virtual NetworkConfiguration ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The location/region where the virtual network is created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the virtual network. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Can be specified multiple times to define multiple subnets. Each subnet block supports fields documented below. + Subnet []VirtualNetworkSubnetObservation `json:"subnet,omitempty" tf:"subnet,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualNetworkParameters struct { + + // The address space that is used the virtual network. You can supply more than one address space. + // +kubebuilder:validation:Optional + AddressSpace []*string `json:"addressSpace,omitempty" tf:"address_space,omitempty"` + + // The BGP community attribute in format :. + // +kubebuilder:validation:Optional + BGPCommunity *string `json:"bgpCommunity,omitempty" tf:"bgp_community,omitempty"` + + // A ddos_protection_plan block as documented below. + // +kubebuilder:validation:Optional + DDOSProtectionPlan *DDOSProtectionPlanParameters `json:"ddosProtectionPlan,omitempty" tf:"ddos_protection_plan,omitempty"` + + // List of IP addresses of DNS servers + // +kubebuilder:validation:Optional + DNSServers []*string `json:"dnsServers,omitempty" tf:"dns_servers,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Virtual Network should exist. Changing this forces a new Virtual Network to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // A encryption block as defined below. + // +kubebuilder:validation:Optional + Encryption *EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The flow timeout in minutes for the Virtual Network, which is used to enable connection tracking for intra-VM flows. Possible values are between 4 and 30 minutes. + // +kubebuilder:validation:Optional + FlowTimeoutInMinutes *float64 `json:"flowTimeoutInMinutes,omitempty" tf:"flow_timeout_in_minutes,omitempty"` + + // The location/region where the virtual network is created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the virtual network. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VirtualNetworkSubnetInitParameters struct { +} + +type VirtualNetworkSubnetObservation struct { + + // The address prefix to use for the subnet. + AddressPrefix *string `json:"addressPrefix,omitempty" tf:"address_prefix,omitempty"` + + // The ID of this subnet. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the subnet. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Network Security Group to associate with the subnet. (Referenced by id, ie. azurerm_network_security_group.example.id) + SecurityGroup *string `json:"securityGroup,omitempty" tf:"security_group,omitempty"` +} + +type VirtualNetworkSubnetParameters struct { +} + +// VirtualNetworkSpec defines the desired state of VirtualNetwork +type VirtualNetworkSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualNetworkParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualNetworkInitParameters `json:"initProvider,omitempty"` +} + +// VirtualNetworkStatus defines the observed state of VirtualNetwork. +type VirtualNetworkStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualNetworkObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualNetwork is the Schema for the VirtualNetworks API. Manages a virtual network including any configured subnets. Each subnet can optionally be configured with a security group to be associated with the subnet. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VirtualNetwork struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.addressSpace) || (has(self.initProvider) && has(self.initProvider.addressSpace))",message="spec.forProvider.addressSpace is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec VirtualNetworkSpec `json:"spec"` + Status VirtualNetworkStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualNetworkList contains a list of VirtualNetworks +type VirtualNetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualNetwork `json:"items"` +} + +// Repository type metadata. +var ( + VirtualNetwork_Kind = "VirtualNetwork" + VirtualNetwork_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualNetwork_Kind}.String() + VirtualNetwork_KindAPIVersion = VirtualNetwork_Kind + "." + CRDGroupVersion.String() + VirtualNetwork_GroupVersionKind = CRDGroupVersion.WithKind(VirtualNetwork_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualNetwork{}, &VirtualNetworkList{}) +} diff --git a/apis/network/v1beta2/zz_virtualnetworkgateway_terraformed.go b/apis/network/v1beta2/zz_virtualnetworkgateway_terraformed.go new file mode 100755 index 000000000..36f59b926 --- /dev/null +++ b/apis/network/v1beta2/zz_virtualnetworkgateway_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualNetworkGateway +func (mg *VirtualNetworkGateway) GetTerraformResourceType() string { + return "azurerm_virtual_network_gateway" +} + +// GetConnectionDetailsMapping for this VirtualNetworkGateway +func (tr *VirtualNetworkGateway) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"vpn_client_configuration[*].radius_server[*].secret": "spec.forProvider.vpnClientConfiguration[*].radiusServer[*].secretSecretRef"} +} + +// GetObservation of this VirtualNetworkGateway +func (tr *VirtualNetworkGateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualNetworkGateway +func (tr *VirtualNetworkGateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualNetworkGateway +func (tr *VirtualNetworkGateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualNetworkGateway +func (tr *VirtualNetworkGateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualNetworkGateway +func (tr *VirtualNetworkGateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualNetworkGateway +func (tr *VirtualNetworkGateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualNetworkGateway +func (tr *VirtualNetworkGateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualNetworkGateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualNetworkGateway) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualNetworkGatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualNetworkGateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_virtualnetworkgateway_types.go b/apis/network/v1beta2/zz_virtualnetworkgateway_types.go new file mode 100755 index 000000000..34a153ffb --- /dev/null +++ b/apis/network/v1beta2/zz_virtualnetworkgateway_types.go @@ -0,0 +1,980 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomRouteInitParameters struct { + + // A list of address blocks reserved for this virtual network in CIDR notation. + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` +} + +type CustomRouteObservation struct { + + // A list of address blocks reserved for this virtual network in CIDR notation. + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` +} + +type CustomRouteParameters struct { + + // A list of address blocks reserved for this virtual network in CIDR notation. + // +kubebuilder:validation:Optional + // +listType=set + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` +} + +type IpsecPolicyInitParameters struct { + + // The DH Group, used in IKE Phase 1. Possible values are DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384 and None. + DhGroup *string `json:"dhGroup,omitempty" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm, used for IKE Phase 2. Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128 and GCMAES256. + IkeEncryption *string `json:"ikeEncryption,omitempty" tf:"ike_encryption,omitempty"` + + // The IKE encryption integrity algorithm, used for IKE Phase 2. Possible values are GCMAES128, GCMAES256, MD5, SHA1, SHA256 and SHA384. + IkeIntegrity *string `json:"ikeIntegrity,omitempty" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm, used for IKE phase 1. Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + IpsecEncryption *string `json:"ipsecEncryption,omitempty" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm, used for IKE phase 1. Possible values are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1 and SHA256. + IpsecIntegrity *string `json:"ipsecIntegrity,omitempty" tf:"ipsec_integrity,omitempty"` + + // The Pfs Group, used in IKE Phase 2. Possible values are ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM and None. + PfsGroup *string `json:"pfsGroup,omitempty" tf:"pfs_group,omitempty"` + + // The IPSec Security Association payload size in KB for a Site-to-Site VPN tunnel. Possible values are between 1024 and 2147483647. + SaDataSizeInKilobytes *float64 `json:"saDataSizeInKilobytes,omitempty" tf:"sa_data_size_in_kilobytes,omitempty"` + + // The IPSec Security Association lifetime in seconds for a Site-to-Site VPN tunnel. Possible values are between 300 and 172799. + SaLifetimeInSeconds *float64 `json:"saLifetimeInSeconds,omitempty" tf:"sa_lifetime_in_seconds,omitempty"` +} + +type IpsecPolicyObservation struct { + + // The DH Group, used in IKE Phase 1. Possible values are DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384 and None. + DhGroup *string `json:"dhGroup,omitempty" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm, used for IKE Phase 2. Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128 and GCMAES256. + IkeEncryption *string `json:"ikeEncryption,omitempty" tf:"ike_encryption,omitempty"` + + // The IKE encryption integrity algorithm, used for IKE Phase 2. Possible values are GCMAES128, GCMAES256, MD5, SHA1, SHA256 and SHA384. + IkeIntegrity *string `json:"ikeIntegrity,omitempty" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm, used for IKE phase 1. Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + IpsecEncryption *string `json:"ipsecEncryption,omitempty" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm, used for IKE phase 1. Possible values are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1 and SHA256. + IpsecIntegrity *string `json:"ipsecIntegrity,omitempty" tf:"ipsec_integrity,omitempty"` + + // The Pfs Group, used in IKE Phase 2. Possible values are ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM and None. + PfsGroup *string `json:"pfsGroup,omitempty" tf:"pfs_group,omitempty"` + + // The IPSec Security Association payload size in KB for a Site-to-Site VPN tunnel. Possible values are between 1024 and 2147483647. + SaDataSizeInKilobytes *float64 `json:"saDataSizeInKilobytes,omitempty" tf:"sa_data_size_in_kilobytes,omitempty"` + + // The IPSec Security Association lifetime in seconds for a Site-to-Site VPN tunnel. Possible values are between 300 and 172799. + SaLifetimeInSeconds *float64 `json:"saLifetimeInSeconds,omitempty" tf:"sa_lifetime_in_seconds,omitempty"` +} + +type IpsecPolicyParameters struct { + + // The DH Group, used in IKE Phase 1. Possible values are DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384 and None. + // +kubebuilder:validation:Optional + DhGroup *string `json:"dhGroup" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm, used for IKE Phase 2. Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128 and GCMAES256. + // +kubebuilder:validation:Optional + IkeEncryption *string `json:"ikeEncryption" tf:"ike_encryption,omitempty"` + + // The IKE encryption integrity algorithm, used for IKE Phase 2. Possible values are GCMAES128, GCMAES256, MD5, SHA1, SHA256 and SHA384. + // +kubebuilder:validation:Optional + IkeIntegrity *string `json:"ikeIntegrity" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm, used for IKE phase 1. Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + // +kubebuilder:validation:Optional + IpsecEncryption *string `json:"ipsecEncryption" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm, used for IKE phase 1. Possible values are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1 and SHA256. + // +kubebuilder:validation:Optional + IpsecIntegrity *string `json:"ipsecIntegrity" tf:"ipsec_integrity,omitempty"` + + // The Pfs Group, used in IKE Phase 2. Possible values are ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM and None. + // +kubebuilder:validation:Optional + PfsGroup *string `json:"pfsGroup" tf:"pfs_group,omitempty"` + + // The IPSec Security Association payload size in KB for a Site-to-Site VPN tunnel. Possible values are between 1024 and 2147483647. + // +kubebuilder:validation:Optional + SaDataSizeInKilobytes *float64 `json:"saDataSizeInKilobytes" tf:"sa_data_size_in_kilobytes,omitempty"` + + // The IPSec Security Association lifetime in seconds for a Site-to-Site VPN tunnel. Possible values are between 300 and 172799. + // +kubebuilder:validation:Optional + SaLifetimeInSeconds *float64 `json:"saLifetimeInSeconds" tf:"sa_lifetime_in_seconds,omitempty"` +} + +type PeeringAddressesInitParameters struct { + + // A list of Azure custom APIPA addresses assigned to the BGP peer of the Virtual Network Gateway. + ApipaAddresses []*string `json:"apipaAddresses,omitempty" tf:"apipa_addresses,omitempty"` + + // The name of the IP configuration of this Virtual Network Gateway. In case there are multiple ip_configuration blocks defined, this property is required to specify. + IPConfigurationName *string `json:"ipConfigurationName,omitempty" tf:"ip_configuration_name,omitempty"` +} + +type PeeringAddressesObservation struct { + + // A list of Azure custom APIPA addresses assigned to the BGP peer of the Virtual Network Gateway. + ApipaAddresses []*string `json:"apipaAddresses,omitempty" tf:"apipa_addresses,omitempty"` + + // A list of peering address assigned to the BGP peer of the Virtual Network Gateway. + DefaultAddresses []*string `json:"defaultAddresses,omitempty" tf:"default_addresses,omitempty"` + + // The name of the IP configuration of this Virtual Network Gateway. In case there are multiple ip_configuration blocks defined, this property is required to specify. + IPConfigurationName *string `json:"ipConfigurationName,omitempty" tf:"ip_configuration_name,omitempty"` + + // A list of tunnel IP addresses assigned to the BGP peer of the Virtual Network Gateway. + TunnelIPAddresses []*string `json:"tunnelIpAddresses,omitempty" tf:"tunnel_ip_addresses,omitempty"` +} + +type PeeringAddressesParameters struct { + + // A list of Azure custom APIPA addresses assigned to the BGP peer of the Virtual Network Gateway. + // +kubebuilder:validation:Optional + ApipaAddresses []*string `json:"apipaAddresses,omitempty" tf:"apipa_addresses,omitempty"` + + // The name of the IP configuration of this Virtual Network Gateway. In case there are multiple ip_configuration blocks defined, this property is required to specify. + // +kubebuilder:validation:Optional + IPConfigurationName *string `json:"ipConfigurationName,omitempty" tf:"ip_configuration_name,omitempty"` +} + +type PolicyGroupInitParameters struct { + + // Is this a Default Virtual Network Gateway Policy Group? Defaults to false. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The name of the Virtual Network Gateway Policy Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more policy_member blocks as defined below. + PolicyMember []PolicyMemberInitParameters `json:"policyMember,omitempty" tf:"policy_member,omitempty"` + + // The priority for the Virtual Network Gateway Policy Group. Defaults to 0. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type PolicyGroupObservation struct { + + // Is this a Default Virtual Network Gateway Policy Group? Defaults to false. + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The name of the Virtual Network Gateway Policy Group. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more policy_member blocks as defined below. + PolicyMember []PolicyMemberObservation `json:"policyMember,omitempty" tf:"policy_member,omitempty"` + + // The priority for the Virtual Network Gateway Policy Group. Defaults to 0. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type PolicyGroupParameters struct { + + // Is this a Default Virtual Network Gateway Policy Group? Defaults to false. + // +kubebuilder:validation:Optional + IsDefault *bool `json:"isDefault,omitempty" tf:"is_default,omitempty"` + + // The name of the Virtual Network Gateway Policy Group. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // One or more policy_member blocks as defined below. + // +kubebuilder:validation:Optional + PolicyMember []PolicyMemberParameters `json:"policyMember" tf:"policy_member,omitempty"` + + // The priority for the Virtual Network Gateway Policy Group. Defaults to 0. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` +} + +type PolicyMemberInitParameters struct { + + // The name of the Virtual Network Gateway Client Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the Virtual Network Gateway. Valid options are Vpn or ExpressRoute. Changing the type forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of attribute that is used for this Virtual Network Gateway Policy Group Member. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PolicyMemberObservation struct { + + // The name of the Virtual Network Gateway Client Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the Virtual Network Gateway. Valid options are Vpn or ExpressRoute. Changing the type forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The value of attribute that is used for this Virtual Network Gateway Policy Group Member. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type PolicyMemberParameters struct { + + // The name of the Virtual Network Gateway Client Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The type of the Virtual Network Gateway. Valid options are Vpn or ExpressRoute. Changing the type forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The value of attribute that is used for this Virtual Network Gateway Policy Group Member. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type RadiusServerInitParameters struct { + + // The address of the Radius Server. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The score of the Radius Server determines the priority of the server. Possible values are between 1 and 30. + Score *float64 `json:"score,omitempty" tf:"score,omitempty"` +} + +type RadiusServerObservation struct { + + // The address of the Radius Server. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The score of the Radius Server determines the priority of the server. Possible values are between 1 and 30. + Score *float64 `json:"score,omitempty" tf:"score,omitempty"` +} + +type RadiusServerParameters struct { + + // The address of the Radius Server. + // +kubebuilder:validation:Optional + Address *string `json:"address" tf:"address,omitempty"` + + // The score of the Radius Server determines the priority of the server. Possible values are between 1 and 30. + // +kubebuilder:validation:Optional + Score *float64 `json:"score" tf:"score,omitempty"` + + // The secret that is used to communicate with the Radius Server. + // +kubebuilder:validation:Required + SecretSecretRef v1.SecretKeySelector `json:"secretSecretRef" tf:"-"` +} + +type RevokedCertificateInitParameters struct { + + // The name of the Virtual Network Gateway Client Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the public data of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type RevokedCertificateObservation struct { + + // The name of the Virtual Network Gateway Client Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the public data of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type RevokedCertificateParameters struct { + + // The name of the Virtual Network Gateway Client Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the public data of the certificate. + // +kubebuilder:validation:Optional + Thumbprint *string `json:"thumbprint" tf:"thumbprint,omitempty"` +} + +type RootCertificateInitParameters struct { + + // The name of the Virtual Network Gateway Client Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The public certificate of the root certificate authority. The certificate must be provided in Base-64 encoded X.509 format (PEM). In particular, this argument must not include the -----BEGIN CERTIFICATE----- or -----END CERTIFICATE----- markers, nor any newlines. + PublicCertData *string `json:"publicCertData,omitempty" tf:"public_cert_data,omitempty"` +} + +type RootCertificateObservation struct { + + // The name of the Virtual Network Gateway Client Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The public certificate of the root certificate authority. The certificate must be provided in Base-64 encoded X.509 format (PEM). In particular, this argument must not include the -----BEGIN CERTIFICATE----- or -----END CERTIFICATE----- markers, nor any newlines. + PublicCertData *string `json:"publicCertData,omitempty" tf:"public_cert_data,omitempty"` +} + +type RootCertificateParameters struct { + + // The name of the Virtual Network Gateway Client Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The public certificate of the root certificate authority. The certificate must be provided in Base-64 encoded X.509 format (PEM). In particular, this argument must not include the -----BEGIN CERTIFICATE----- or -----END CERTIFICATE----- markers, nor any newlines. + // +kubebuilder:validation:Optional + PublicCertData *string `json:"publicCertData" tf:"public_cert_data,omitempty"` +} + +type VPNClientConfigurationInitParameters struct { + + // The client id of the Azure VPN application. + // See Create an Active Directory (AD) tenant for P2S OpenVPN protocol connections for values + AADAudience *string `json:"aadAudience,omitempty" tf:"aad_audience,omitempty"` + + // The STS url for your tenant + AADIssuer *string `json:"aadIssuer,omitempty" tf:"aad_issuer,omitempty"` + + // AzureAD Tenant URL + AADTenant *string `json:"aadTenant,omitempty" tf:"aad_tenant,omitempty"` + + // The address space out of which IP addresses for vpn clients will be taken. You can provide more than one address space, e.g. in CIDR notation. + AddressSpace []*string `json:"addressSpace,omitempty" tf:"address_space,omitempty"` + + // An ipsec_policy block as defined below. + IpsecPolicy *IpsecPolicyInitParameters `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // One or more radius_server blocks as defined below. + RadiusServer []RadiusServerInitParameters `json:"radiusServer,omitempty" tf:"radius_server,omitempty"` + + // The address of the Radius server. + RadiusServerAddress *string `json:"radiusServerAddress,omitempty" tf:"radius_server_address,omitempty"` + + // The secret used by the Radius server. + RadiusServerSecret *string `json:"radiusServerSecret,omitempty" tf:"radius_server_secret,omitempty"` + + // One or more revoked_certificate blocks which are defined below. + RevokedCertificate []RevokedCertificateInitParameters `json:"revokedCertificate,omitempty" tf:"revoked_certificate,omitempty"` + + // One or more root_certificate blocks which are defined below. These root certificates are used to sign the client certificate used by the VPN clients to connect to the gateway. + RootCertificate []RootCertificateInitParameters `json:"rootCertificate,omitempty" tf:"root_certificate,omitempty"` + + // List of the vpn authentication types for the virtual network gateway. + // The supported values are AAD, Radius and Certificate. + // +listType=set + VPNAuthTypes []*string `json:"vpnAuthTypes,omitempty" tf:"vpn_auth_types,omitempty"` + + // List of the protocols supported by the vpn client. + // The supported values are SSTP, IkeV2 and OpenVPN. + // Values SSTP and IkeV2 are incompatible with the use of + // aad_tenant, aad_audience and aad_issuer. + // +listType=set + VPNClientProtocols []*string `json:"vpnClientProtocols,omitempty" tf:"vpn_client_protocols,omitempty"` + + // One or more virtual_network_gateway_client_connection blocks as defined below. + VirtualNetworkGatewayClientConnection []VirtualNetworkGatewayClientConnectionInitParameters `json:"virtualNetworkGatewayClientConnection,omitempty" tf:"virtual_network_gateway_client_connection,omitempty"` +} + +type VPNClientConfigurationObservation struct { + + // The client id of the Azure VPN application. + // See Create an Active Directory (AD) tenant for P2S OpenVPN protocol connections for values + AADAudience *string `json:"aadAudience,omitempty" tf:"aad_audience,omitempty"` + + // The STS url for your tenant + AADIssuer *string `json:"aadIssuer,omitempty" tf:"aad_issuer,omitempty"` + + // AzureAD Tenant URL + AADTenant *string `json:"aadTenant,omitempty" tf:"aad_tenant,omitempty"` + + // The address space out of which IP addresses for vpn clients will be taken. You can provide more than one address space, e.g. in CIDR notation. + AddressSpace []*string `json:"addressSpace,omitempty" tf:"address_space,omitempty"` + + // An ipsec_policy block as defined below. + IpsecPolicy *IpsecPolicyObservation `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // One or more radius_server blocks as defined below. + RadiusServer []RadiusServerObservation `json:"radiusServer,omitempty" tf:"radius_server,omitempty"` + + // The address of the Radius server. + RadiusServerAddress *string `json:"radiusServerAddress,omitempty" tf:"radius_server_address,omitempty"` + + // The secret used by the Radius server. + RadiusServerSecret *string `json:"radiusServerSecret,omitempty" tf:"radius_server_secret,omitempty"` + + // One or more revoked_certificate blocks which are defined below. + RevokedCertificate []RevokedCertificateObservation `json:"revokedCertificate,omitempty" tf:"revoked_certificate,omitempty"` + + // One or more root_certificate blocks which are defined below. These root certificates are used to sign the client certificate used by the VPN clients to connect to the gateway. + RootCertificate []RootCertificateObservation `json:"rootCertificate,omitempty" tf:"root_certificate,omitempty"` + + // List of the vpn authentication types for the virtual network gateway. + // The supported values are AAD, Radius and Certificate. + // +listType=set + VPNAuthTypes []*string `json:"vpnAuthTypes,omitempty" tf:"vpn_auth_types,omitempty"` + + // List of the protocols supported by the vpn client. + // The supported values are SSTP, IkeV2 and OpenVPN. + // Values SSTP and IkeV2 are incompatible with the use of + // aad_tenant, aad_audience and aad_issuer. + // +listType=set + VPNClientProtocols []*string `json:"vpnClientProtocols,omitempty" tf:"vpn_client_protocols,omitempty"` + + // One or more virtual_network_gateway_client_connection blocks as defined below. + VirtualNetworkGatewayClientConnection []VirtualNetworkGatewayClientConnectionObservation `json:"virtualNetworkGatewayClientConnection,omitempty" tf:"virtual_network_gateway_client_connection,omitempty"` +} + +type VPNClientConfigurationParameters struct { + + // The client id of the Azure VPN application. + // See Create an Active Directory (AD) tenant for P2S OpenVPN protocol connections for values + // +kubebuilder:validation:Optional + AADAudience *string `json:"aadAudience,omitempty" tf:"aad_audience,omitempty"` + + // The STS url for your tenant + // +kubebuilder:validation:Optional + AADIssuer *string `json:"aadIssuer,omitempty" tf:"aad_issuer,omitempty"` + + // AzureAD Tenant URL + // +kubebuilder:validation:Optional + AADTenant *string `json:"aadTenant,omitempty" tf:"aad_tenant,omitempty"` + + // The address space out of which IP addresses for vpn clients will be taken. You can provide more than one address space, e.g. in CIDR notation. + // +kubebuilder:validation:Optional + AddressSpace []*string `json:"addressSpace" tf:"address_space,omitempty"` + + // An ipsec_policy block as defined below. + // +kubebuilder:validation:Optional + IpsecPolicy *IpsecPolicyParameters `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // One or more radius_server blocks as defined below. + // +kubebuilder:validation:Optional + RadiusServer []RadiusServerParameters `json:"radiusServer,omitempty" tf:"radius_server,omitempty"` + + // The address of the Radius server. + // +kubebuilder:validation:Optional + RadiusServerAddress *string `json:"radiusServerAddress,omitempty" tf:"radius_server_address,omitempty"` + + // The secret used by the Radius server. + // +kubebuilder:validation:Optional + RadiusServerSecret *string `json:"radiusServerSecret,omitempty" tf:"radius_server_secret,omitempty"` + + // One or more revoked_certificate blocks which are defined below. + // +kubebuilder:validation:Optional + RevokedCertificate []RevokedCertificateParameters `json:"revokedCertificate,omitempty" tf:"revoked_certificate,omitempty"` + + // One or more root_certificate blocks which are defined below. These root certificates are used to sign the client certificate used by the VPN clients to connect to the gateway. + // +kubebuilder:validation:Optional + RootCertificate []RootCertificateParameters `json:"rootCertificate,omitempty" tf:"root_certificate,omitempty"` + + // List of the vpn authentication types for the virtual network gateway. + // The supported values are AAD, Radius and Certificate. + // +kubebuilder:validation:Optional + // +listType=set + VPNAuthTypes []*string `json:"vpnAuthTypes,omitempty" tf:"vpn_auth_types,omitempty"` + + // List of the protocols supported by the vpn client. + // The supported values are SSTP, IkeV2 and OpenVPN. + // Values SSTP and IkeV2 are incompatible with the use of + // aad_tenant, aad_audience and aad_issuer. + // +kubebuilder:validation:Optional + // +listType=set + VPNClientProtocols []*string `json:"vpnClientProtocols,omitempty" tf:"vpn_client_protocols,omitempty"` + + // One or more virtual_network_gateway_client_connection blocks as defined below. + // +kubebuilder:validation:Optional + VirtualNetworkGatewayClientConnection []VirtualNetworkGatewayClientConnectionParameters `json:"virtualNetworkGatewayClientConnection,omitempty" tf:"virtual_network_gateway_client_connection,omitempty"` +} + +type VirtualNetworkGatewayBGPSettingsInitParameters struct { + + // The Autonomous System Number (ASN) to use as part of the BGP. + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // The weight added to routes which have been learned through BGP peering. Valid values can be between 0 and 100. + PeerWeight *float64 `json:"peerWeight,omitempty" tf:"peer_weight,omitempty"` + + // A list of peering_addresses blocks as defined below. Only one peering_addresses block can be specified except when active_active of this Virtual Network Gateway is true. + PeeringAddresses []PeeringAddressesInitParameters `json:"peeringAddresses,omitempty" tf:"peering_addresses,omitempty"` +} + +type VirtualNetworkGatewayBGPSettingsObservation struct { + + // The Autonomous System Number (ASN) to use as part of the BGP. + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // The weight added to routes which have been learned through BGP peering. Valid values can be between 0 and 100. + PeerWeight *float64 `json:"peerWeight,omitempty" tf:"peer_weight,omitempty"` + + // A list of peering_addresses blocks as defined below. Only one peering_addresses block can be specified except when active_active of this Virtual Network Gateway is true. + PeeringAddresses []PeeringAddressesObservation `json:"peeringAddresses,omitempty" tf:"peering_addresses,omitempty"` +} + +type VirtualNetworkGatewayBGPSettingsParameters struct { + + // The Autonomous System Number (ASN) to use as part of the BGP. + // +kubebuilder:validation:Optional + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // The weight added to routes which have been learned through BGP peering. Valid values can be between 0 and 100. + // +kubebuilder:validation:Optional + PeerWeight *float64 `json:"peerWeight,omitempty" tf:"peer_weight,omitempty"` + + // A list of peering_addresses blocks as defined below. Only one peering_addresses block can be specified except when active_active of this Virtual Network Gateway is true. + // +kubebuilder:validation:Optional + PeeringAddresses []PeeringAddressesParameters `json:"peeringAddresses,omitempty" tf:"peering_addresses,omitempty"` +} + +type VirtualNetworkGatewayClientConnectionInitParameters struct { + + // A list of address prefixes for P2S VPN Client. + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` + + // The name of the Virtual Network Gateway Client Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of names of Virtual Network Gateway Policy Groups. + PolicyGroupNames []*string `json:"policyGroupNames,omitempty" tf:"policy_group_names,omitempty"` +} + +type VirtualNetworkGatewayClientConnectionObservation struct { + + // A list of address prefixes for P2S VPN Client. + AddressPrefixes []*string `json:"addressPrefixes,omitempty" tf:"address_prefixes,omitempty"` + + // The name of the Virtual Network Gateway Client Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of names of Virtual Network Gateway Policy Groups. + PolicyGroupNames []*string `json:"policyGroupNames,omitempty" tf:"policy_group_names,omitempty"` +} + +type VirtualNetworkGatewayClientConnectionParameters struct { + + // A list of address prefixes for P2S VPN Client. + // +kubebuilder:validation:Optional + AddressPrefixes []*string `json:"addressPrefixes" tf:"address_prefixes,omitempty"` + + // The name of the Virtual Network Gateway Client Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A list of names of Virtual Network Gateway Policy Groups. + // +kubebuilder:validation:Optional + PolicyGroupNames []*string `json:"policyGroupNames" tf:"policy_group_names,omitempty"` +} + +type VirtualNetworkGatewayIPConfigurationInitParameters struct { + + // A user-defined name of the IP configuration. Defaults to vnetGatewayConfig. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Defines how the private IP address of the gateways virtual interface is assigned. The only valid value is Dynamic for Virtual Network Gateway (Static is not supported by the service yet). Defaults to Dynamic. + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation,omitempty" tf:"private_ip_address_allocation,omitempty"` + + // The ID of the public IP address to associate with the Virtual Network Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PublicIP + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDRef *v1.Reference `json:"publicIpAddressIdRef,omitempty" tf:"-"` + + // Selector for a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` + + // The ID of the gateway subnet of a virtual network in which the virtual network gateway will be created. It is mandatory that the associated subnet is named GatewaySubnet. Therefore, each virtual network can contain at most a single Virtual Network Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type VirtualNetworkGatewayIPConfigurationObservation struct { + + // A user-defined name of the IP configuration. Defaults to vnetGatewayConfig. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Defines how the private IP address of the gateways virtual interface is assigned. The only valid value is Dynamic for Virtual Network Gateway (Static is not supported by the service yet). Defaults to Dynamic. + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation,omitempty" tf:"private_ip_address_allocation,omitempty"` + + // The ID of the public IP address to associate with the Virtual Network Gateway. + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // The ID of the gateway subnet of a virtual network in which the virtual network gateway will be created. It is mandatory that the associated subnet is named GatewaySubnet. Therefore, each virtual network can contain at most a single Virtual Network Gateway. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type VirtualNetworkGatewayIPConfigurationParameters struct { + + // A user-defined name of the IP configuration. Defaults to vnetGatewayConfig. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Defines how the private IP address of the gateways virtual interface is assigned. The only valid value is Dynamic for Virtual Network Gateway (Static is not supported by the service yet). Defaults to Dynamic. + // +kubebuilder:validation:Optional + PrivateIPAddressAllocation *string `json:"privateIpAddressAllocation,omitempty" tf:"private_ip_address_allocation,omitempty"` + + // The ID of the public IP address to associate with the Virtual Network Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.PublicIP + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PublicIPAddressID *string `json:"publicIpAddressId,omitempty" tf:"public_ip_address_id,omitempty"` + + // Reference to a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDRef *v1.Reference `json:"publicIpAddressIdRef,omitempty" tf:"-"` + + // Selector for a PublicIP in network to populate publicIpAddressId. + // +kubebuilder:validation:Optional + PublicIPAddressIDSelector *v1.Selector `json:"publicIpAddressIdSelector,omitempty" tf:"-"` + + // The ID of the gateway subnet of a virtual network in which the virtual network gateway will be created. It is mandatory that the associated subnet is named GatewaySubnet. Therefore, each virtual network can contain at most a single Virtual Network Gateway. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type VirtualNetworkGatewayInitParameters struct { + + // If true, an active-active Virtual Network Gateway will be created. An active-active gateway requires a HighPerformance or an UltraPerformance SKU. If false, an active-standby gateway will be created. Defaults to false. + ActiveActive *bool `json:"activeActive,omitempty" tf:"active_active,omitempty"` + + // Is BGP Route Translation for NAT enabled? Defaults to false. + BGPRouteTranslationForNATEnabled *bool `json:"bgpRouteTranslationForNatEnabled,omitempty" tf:"bgp_route_translation_for_nat_enabled,omitempty"` + + // A bgp_settings block which is documented below. In this block the BGP specific settings can be defined. + BGPSettings *VirtualNetworkGatewayBGPSettingsInitParameters `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // A custom_route block as defined below. Specifies a custom routes address space for a virtual network gateway and a VpnClient. + CustomRoute *CustomRouteInitParameters `json:"customRoute,omitempty" tf:"custom_route,omitempty"` + + // Is DNS forwarding enabled? + DNSForwardingEnabled *bool `json:"dnsForwardingEnabled,omitempty" tf:"dns_forwarding_enabled,omitempty"` + + // The ID of the local network gateway through which outbound Internet traffic from the virtual network in which the gateway is created will be routed (forced tunnelling). Refer to the Azure documentation on forced tunnelling. If not specified, forced tunnelling is disabled. + DefaultLocalNetworkGatewayID *string `json:"defaultLocalNetworkGatewayId,omitempty" tf:"default_local_network_gateway_id,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Virtual Network Gateway should exist. Changing this forces a new Virtual Network Gateway to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // If true, BGP (Border Gateway Protocol) will be enabled for this Virtual Network Gateway. Defaults to false. + EnableBGP *bool `json:"enableBgp,omitempty" tf:"enable_bgp,omitempty"` + + // The Generation of the Virtual Network gateway. Possible values include Generation1, Generation2 or None. Changing this forces a new resource to be created. + Generation *string `json:"generation,omitempty" tf:"generation,omitempty"` + + // One or more (up to 3) ip_configuration blocks documented below. + // An active-standby gateway requires exactly one ip_configuration block, + // an active-active gateway requires exactly two ip_configuration blocks whereas + // an active-active zone redundant gateway with P2S configuration requires exactly three ip_configuration blocks. + IPConfiguration []VirtualNetworkGatewayIPConfigurationInitParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // Is IP Sec Replay Protection enabled? Defaults to true. + IPSecReplayProtectionEnabled *bool `json:"ipSecReplayProtectionEnabled,omitempty" tf:"ip_sec_replay_protection_enabled,omitempty"` + + // The location/region where the Virtual Network Gateway is located. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more policy_group blocks as defined below. + PolicyGroup []PolicyGroupInitParameters `json:"policyGroup,omitempty" tf:"policy_group,omitempty"` + + // Should private IP be enabled on this gateway for connections? Changing this forces a new resource to be created. + PrivateIPAddressEnabled *bool `json:"privateIpAddressEnabled,omitempty" tf:"private_ip_address_enabled,omitempty"` + + // Is remote vnet traffic that is used to configure this gateway to accept traffic from other Azure Virtual Networks enabled? Defaults to false. + RemoteVnetTrafficEnabled *bool `json:"remoteVnetTrafficEnabled,omitempty" tf:"remote_vnet_traffic_enabled,omitempty"` + + // Configuration of the size and capacity of the virtual network gateway. Valid options are Basic, Standard, HighPerformance, UltraPerformance, ErGw1AZ, ErGw2AZ, ErGw3AZ, VpnGw1, VpnGw2, VpnGw3, VpnGw4,VpnGw5, VpnGw1AZ, VpnGw2AZ, VpnGw3AZ,VpnGw4AZ and VpnGw5AZ and depend on the type, vpn_type and generation arguments. A PolicyBased gateway only supports the Basic SKU. Further, the UltraPerformance SKU is only supported by an ExpressRoute gateway. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of the Virtual Network Gateway. Valid options are Vpn or ExpressRoute. Changing the type forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A vpn_client_configuration block which is documented below. In this block the Virtual Network Gateway can be configured to accept IPSec point-to-site connections. + VPNClientConfiguration *VPNClientConfigurationInitParameters `json:"vpnClientConfiguration,omitempty" tf:"vpn_client_configuration,omitempty"` + + // The routing type of the Virtual Network Gateway. Valid options are RouteBased or PolicyBased. Defaults to RouteBased. Changing this forces a new resource to be created. + VPNType *string `json:"vpnType,omitempty" tf:"vpn_type,omitempty"` + + // Is remote vnet traffic that is used to configure this gateway to accept traffic from remote Virtual WAN networks enabled? Defaults to false. + VirtualWanTrafficEnabled *bool `json:"virtualWanTrafficEnabled,omitempty" tf:"virtual_wan_traffic_enabled,omitempty"` +} + +type VirtualNetworkGatewayObservation struct { + + // If true, an active-active Virtual Network Gateway will be created. An active-active gateway requires a HighPerformance or an UltraPerformance SKU. If false, an active-standby gateway will be created. Defaults to false. + ActiveActive *bool `json:"activeActive,omitempty" tf:"active_active,omitempty"` + + // Is BGP Route Translation for NAT enabled? Defaults to false. + BGPRouteTranslationForNATEnabled *bool `json:"bgpRouteTranslationForNatEnabled,omitempty" tf:"bgp_route_translation_for_nat_enabled,omitempty"` + + // A bgp_settings block which is documented below. In this block the BGP specific settings can be defined. + BGPSettings *VirtualNetworkGatewayBGPSettingsObservation `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // A custom_route block as defined below. Specifies a custom routes address space for a virtual network gateway and a VpnClient. + CustomRoute *CustomRouteObservation `json:"customRoute,omitempty" tf:"custom_route,omitempty"` + + // Is DNS forwarding enabled? + DNSForwardingEnabled *bool `json:"dnsForwardingEnabled,omitempty" tf:"dns_forwarding_enabled,omitempty"` + + // The ID of the local network gateway through which outbound Internet traffic from the virtual network in which the gateway is created will be routed (forced tunnelling). Refer to the Azure documentation on forced tunnelling. If not specified, forced tunnelling is disabled. + DefaultLocalNetworkGatewayID *string `json:"defaultLocalNetworkGatewayId,omitempty" tf:"default_local_network_gateway_id,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Virtual Network Gateway should exist. Changing this forces a new Virtual Network Gateway to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // If true, BGP (Border Gateway Protocol) will be enabled for this Virtual Network Gateway. Defaults to false. + EnableBGP *bool `json:"enableBgp,omitempty" tf:"enable_bgp,omitempty"` + + // The Generation of the Virtual Network gateway. Possible values include Generation1, Generation2 or None. Changing this forces a new resource to be created. + Generation *string `json:"generation,omitempty" tf:"generation,omitempty"` + + // The ID of the Virtual Network Gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more (up to 3) ip_configuration blocks documented below. + // An active-standby gateway requires exactly one ip_configuration block, + // an active-active gateway requires exactly two ip_configuration blocks whereas + // an active-active zone redundant gateway with P2S configuration requires exactly three ip_configuration blocks. + IPConfiguration []VirtualNetworkGatewayIPConfigurationObservation `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // Is IP Sec Replay Protection enabled? Defaults to true. + IPSecReplayProtectionEnabled *bool `json:"ipSecReplayProtectionEnabled,omitempty" tf:"ip_sec_replay_protection_enabled,omitempty"` + + // The location/region where the Virtual Network Gateway is located. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more policy_group blocks as defined below. + PolicyGroup []PolicyGroupObservation `json:"policyGroup,omitempty" tf:"policy_group,omitempty"` + + // Should private IP be enabled on this gateway for connections? Changing this forces a new resource to be created. + PrivateIPAddressEnabled *bool `json:"privateIpAddressEnabled,omitempty" tf:"private_ip_address_enabled,omitempty"` + + // Is remote vnet traffic that is used to configure this gateway to accept traffic from other Azure Virtual Networks enabled? Defaults to false. + RemoteVnetTrafficEnabled *bool `json:"remoteVnetTrafficEnabled,omitempty" tf:"remote_vnet_traffic_enabled,omitempty"` + + // The name of the resource group in which to create the Virtual Network Gateway. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Configuration of the size and capacity of the virtual network gateway. Valid options are Basic, Standard, HighPerformance, UltraPerformance, ErGw1AZ, ErGw2AZ, ErGw3AZ, VpnGw1, VpnGw2, VpnGw3, VpnGw4,VpnGw5, VpnGw1AZ, VpnGw2AZ, VpnGw3AZ,VpnGw4AZ and VpnGw5AZ and depend on the type, vpn_type and generation arguments. A PolicyBased gateway only supports the Basic SKU. Further, the UltraPerformance SKU is only supported by an ExpressRoute gateway. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of the Virtual Network Gateway. Valid options are Vpn or ExpressRoute. Changing the type forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A vpn_client_configuration block which is documented below. In this block the Virtual Network Gateway can be configured to accept IPSec point-to-site connections. + VPNClientConfiguration *VPNClientConfigurationObservation `json:"vpnClientConfiguration,omitempty" tf:"vpn_client_configuration,omitempty"` + + // The routing type of the Virtual Network Gateway. Valid options are RouteBased or PolicyBased. Defaults to RouteBased. Changing this forces a new resource to be created. + VPNType *string `json:"vpnType,omitempty" tf:"vpn_type,omitempty"` + + // Is remote vnet traffic that is used to configure this gateway to accept traffic from remote Virtual WAN networks enabled? Defaults to false. + VirtualWanTrafficEnabled *bool `json:"virtualWanTrafficEnabled,omitempty" tf:"virtual_wan_traffic_enabled,omitempty"` +} + +type VirtualNetworkGatewayParameters struct { + + // If true, an active-active Virtual Network Gateway will be created. An active-active gateway requires a HighPerformance or an UltraPerformance SKU. If false, an active-standby gateway will be created. Defaults to false. + // +kubebuilder:validation:Optional + ActiveActive *bool `json:"activeActive,omitempty" tf:"active_active,omitempty"` + + // Is BGP Route Translation for NAT enabled? Defaults to false. + // +kubebuilder:validation:Optional + BGPRouteTranslationForNATEnabled *bool `json:"bgpRouteTranslationForNatEnabled,omitempty" tf:"bgp_route_translation_for_nat_enabled,omitempty"` + + // A bgp_settings block which is documented below. In this block the BGP specific settings can be defined. + // +kubebuilder:validation:Optional + BGPSettings *VirtualNetworkGatewayBGPSettingsParameters `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // A custom_route block as defined below. Specifies a custom routes address space for a virtual network gateway and a VpnClient. + // +kubebuilder:validation:Optional + CustomRoute *CustomRouteParameters `json:"customRoute,omitempty" tf:"custom_route,omitempty"` + + // Is DNS forwarding enabled? + // +kubebuilder:validation:Optional + DNSForwardingEnabled *bool `json:"dnsForwardingEnabled,omitempty" tf:"dns_forwarding_enabled,omitempty"` + + // The ID of the local network gateway through which outbound Internet traffic from the virtual network in which the gateway is created will be routed (forced tunnelling). Refer to the Azure documentation on forced tunnelling. If not specified, forced tunnelling is disabled. + // +kubebuilder:validation:Optional + DefaultLocalNetworkGatewayID *string `json:"defaultLocalNetworkGatewayId,omitempty" tf:"default_local_network_gateway_id,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Virtual Network Gateway should exist. Changing this forces a new Virtual Network Gateway to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // If true, BGP (Border Gateway Protocol) will be enabled for this Virtual Network Gateway. Defaults to false. + // +kubebuilder:validation:Optional + EnableBGP *bool `json:"enableBgp,omitempty" tf:"enable_bgp,omitempty"` + + // The Generation of the Virtual Network gateway. Possible values include Generation1, Generation2 or None. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Generation *string `json:"generation,omitempty" tf:"generation,omitempty"` + + // One or more (up to 3) ip_configuration blocks documented below. + // An active-standby gateway requires exactly one ip_configuration block, + // an active-active gateway requires exactly two ip_configuration blocks whereas + // an active-active zone redundant gateway with P2S configuration requires exactly three ip_configuration blocks. + // +kubebuilder:validation:Optional + IPConfiguration []VirtualNetworkGatewayIPConfigurationParameters `json:"ipConfiguration,omitempty" tf:"ip_configuration,omitempty"` + + // Is IP Sec Replay Protection enabled? Defaults to true. + // +kubebuilder:validation:Optional + IPSecReplayProtectionEnabled *bool `json:"ipSecReplayProtectionEnabled,omitempty" tf:"ip_sec_replay_protection_enabled,omitempty"` + + // The location/region where the Virtual Network Gateway is located. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more policy_group blocks as defined below. + // +kubebuilder:validation:Optional + PolicyGroup []PolicyGroupParameters `json:"policyGroup,omitempty" tf:"policy_group,omitempty"` + + // Should private IP be enabled on this gateway for connections? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PrivateIPAddressEnabled *bool `json:"privateIpAddressEnabled,omitempty" tf:"private_ip_address_enabled,omitempty"` + + // Is remote vnet traffic that is used to configure this gateway to accept traffic from other Azure Virtual Networks enabled? Defaults to false. + // +kubebuilder:validation:Optional + RemoteVnetTrafficEnabled *bool `json:"remoteVnetTrafficEnabled,omitempty" tf:"remote_vnet_traffic_enabled,omitempty"` + + // The name of the resource group in which to create the Virtual Network Gateway. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Configuration of the size and capacity of the virtual network gateway. Valid options are Basic, Standard, HighPerformance, UltraPerformance, ErGw1AZ, ErGw2AZ, ErGw3AZ, VpnGw1, VpnGw2, VpnGw3, VpnGw4,VpnGw5, VpnGw1AZ, VpnGw2AZ, VpnGw3AZ,VpnGw4AZ and VpnGw5AZ and depend on the type, vpn_type and generation arguments. A PolicyBased gateway only supports the Basic SKU. Further, the UltraPerformance SKU is only supported by an ExpressRoute gateway. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The type of the Virtual Network Gateway. Valid options are Vpn or ExpressRoute. Changing the type forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A vpn_client_configuration block which is documented below. In this block the Virtual Network Gateway can be configured to accept IPSec point-to-site connections. + // +kubebuilder:validation:Optional + VPNClientConfiguration *VPNClientConfigurationParameters `json:"vpnClientConfiguration,omitempty" tf:"vpn_client_configuration,omitempty"` + + // The routing type of the Virtual Network Gateway. Valid options are RouteBased or PolicyBased. Defaults to RouteBased. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VPNType *string `json:"vpnType,omitempty" tf:"vpn_type,omitempty"` + + // Is remote vnet traffic that is used to configure this gateway to accept traffic from remote Virtual WAN networks enabled? Defaults to false. + // +kubebuilder:validation:Optional + VirtualWanTrafficEnabled *bool `json:"virtualWanTrafficEnabled,omitempty" tf:"virtual_wan_traffic_enabled,omitempty"` +} + +// VirtualNetworkGatewaySpec defines the desired state of VirtualNetworkGateway +type VirtualNetworkGatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualNetworkGatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualNetworkGatewayInitParameters `json:"initProvider,omitempty"` +} + +// VirtualNetworkGatewayStatus defines the observed state of VirtualNetworkGateway. +type VirtualNetworkGatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualNetworkGatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualNetworkGateway is the Schema for the VirtualNetworkGateways API. Manages a virtual network gateway to establish secure, cross-premises connectivity. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VirtualNetworkGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.ipConfiguration) || (has(self.initProvider) && has(self.initProvider.ipConfiguration))",message="spec.forProvider.ipConfiguration is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec VirtualNetworkGatewaySpec `json:"spec"` + Status VirtualNetworkGatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualNetworkGatewayList contains a list of VirtualNetworkGateways +type VirtualNetworkGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualNetworkGateway `json:"items"` +} + +// Repository type metadata. +var ( + VirtualNetworkGateway_Kind = "VirtualNetworkGateway" + VirtualNetworkGateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualNetworkGateway_Kind}.String() + VirtualNetworkGateway_KindAPIVersion = VirtualNetworkGateway_Kind + "." + CRDGroupVersion.String() + VirtualNetworkGateway_GroupVersionKind = CRDGroupVersion.WithKind(VirtualNetworkGateway_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualNetworkGateway{}, &VirtualNetworkGatewayList{}) +} diff --git a/apis/network/v1beta2/zz_virtualnetworkgatewayconnection_terraformed.go b/apis/network/v1beta2/zz_virtualnetworkgatewayconnection_terraformed.go new file mode 100755 index 000000000..7e3324553 --- /dev/null +++ b/apis/network/v1beta2/zz_virtualnetworkgatewayconnection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VirtualNetworkGatewayConnection +func (mg *VirtualNetworkGatewayConnection) GetTerraformResourceType() string { + return "azurerm_virtual_network_gateway_connection" +} + +// GetConnectionDetailsMapping for this VirtualNetworkGatewayConnection +func (tr *VirtualNetworkGatewayConnection) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"authorization_key": "spec.forProvider.authorizationKeySecretRef", "shared_key": "spec.forProvider.sharedKeySecretRef"} +} + +// GetObservation of this VirtualNetworkGatewayConnection +func (tr *VirtualNetworkGatewayConnection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VirtualNetworkGatewayConnection +func (tr *VirtualNetworkGatewayConnection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VirtualNetworkGatewayConnection +func (tr *VirtualNetworkGatewayConnection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VirtualNetworkGatewayConnection +func (tr *VirtualNetworkGatewayConnection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VirtualNetworkGatewayConnection +func (tr *VirtualNetworkGatewayConnection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VirtualNetworkGatewayConnection +func (tr *VirtualNetworkGatewayConnection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VirtualNetworkGatewayConnection +func (tr *VirtualNetworkGatewayConnection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VirtualNetworkGatewayConnection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VirtualNetworkGatewayConnection) LateInitialize(attrs []byte) (bool, error) { + params := &VirtualNetworkGatewayConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VirtualNetworkGatewayConnection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_virtualnetworkgatewayconnection_types.go b/apis/network/v1beta2/zz_virtualnetworkgatewayconnection_types.go new file mode 100755 index 000000000..299b20669 --- /dev/null +++ b/apis/network/v1beta2/zz_virtualnetworkgatewayconnection_types.go @@ -0,0 +1,555 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomBGPAddressesInitParameters struct { + + // single IP address that is part of the azurerm_virtual_network_gateway ip_configuration (first one) + Primary *string `json:"primary,omitempty" tf:"primary,omitempty"` + + // single IP address that is part of the azurerm_virtual_network_gateway ip_configuration (second one) + Secondary *string `json:"secondary,omitempty" tf:"secondary,omitempty"` +} + +type CustomBGPAddressesObservation struct { + + // single IP address that is part of the azurerm_virtual_network_gateway ip_configuration (first one) + Primary *string `json:"primary,omitempty" tf:"primary,omitempty"` + + // single IP address that is part of the azurerm_virtual_network_gateway ip_configuration (second one) + Secondary *string `json:"secondary,omitempty" tf:"secondary,omitempty"` +} + +type CustomBGPAddressesParameters struct { + + // single IP address that is part of the azurerm_virtual_network_gateway ip_configuration (first one) + // +kubebuilder:validation:Optional + Primary *string `json:"primary" tf:"primary,omitempty"` + + // single IP address that is part of the azurerm_virtual_network_gateway ip_configuration (second one) + // +kubebuilder:validation:Optional + Secondary *string `json:"secondary,omitempty" tf:"secondary,omitempty"` +} + +type TrafficSelectorPolicyInitParameters struct { + + // List of local CIDRs. + LocalAddressCidrs []*string `json:"localAddressCidrs,omitempty" tf:"local_address_cidrs,omitempty"` + + // List of remote CIDRs. + RemoteAddressCidrs []*string `json:"remoteAddressCidrs,omitempty" tf:"remote_address_cidrs,omitempty"` +} + +type TrafficSelectorPolicyObservation struct { + + // List of local CIDRs. + LocalAddressCidrs []*string `json:"localAddressCidrs,omitempty" tf:"local_address_cidrs,omitempty"` + + // List of remote CIDRs. + RemoteAddressCidrs []*string `json:"remoteAddressCidrs,omitempty" tf:"remote_address_cidrs,omitempty"` +} + +type TrafficSelectorPolicyParameters struct { + + // List of local CIDRs. + // +kubebuilder:validation:Optional + LocalAddressCidrs []*string `json:"localAddressCidrs" tf:"local_address_cidrs,omitempty"` + + // List of remote CIDRs. + // +kubebuilder:validation:Optional + RemoteAddressCidrs []*string `json:"remoteAddressCidrs" tf:"remote_address_cidrs,omitempty"` +} + +type VirtualNetworkGatewayConnectionInitParameters struct { + + // Connection mode to use. Possible values are Default, InitiatorOnly and ResponderOnly. Defaults to Default. Changing this value will force a resource to be created. + ConnectionMode *string `json:"connectionMode,omitempty" tf:"connection_mode,omitempty"` + + // The IKE protocol version to use. Possible values are IKEv1 and IKEv2, values are IKEv1 and IKEv2. Defaults to IKEv2. Changing this forces a new resource to be created. + // -> Note: Only valid for IPSec connections on virtual network gateways with SKU VpnGw1, VpnGw2, VpnGw3, VpnGw1AZ, VpnGw2AZ or VpnGw3AZ. + ConnectionProtocol *string `json:"connectionProtocol,omitempty" tf:"connection_protocol,omitempty"` + + // A custom_bgp_addresses block which is documented below. + // The block can only be used on IPSec / activeactive connections, + // For details about see the relevant section in the Azure documentation. + CustomBGPAddresses *CustomBGPAddressesInitParameters `json:"customBgpAddresses,omitempty" tf:"custom_bgp_addresses,omitempty"` + + // The dead peer detection timeout of this connection in seconds. Changing this forces a new resource to be created. + DpdTimeoutSeconds *float64 `json:"dpdTimeoutSeconds,omitempty" tf:"dpd_timeout_seconds,omitempty"` + + // A list of the egress NAT Rule Ids. + // +listType=set + EgressNATRuleIds []*string `json:"egressNatRuleIds,omitempty" tf:"egress_nat_rule_ids,omitempty"` + + // If true, BGP (Border Gateway Protocol) is enabled for this connection. Defaults to false. + EnableBGP *bool `json:"enableBgp,omitempty" tf:"enable_bgp,omitempty"` + + // The ID of the Express Route Circuit when creating an ExpressRoute connection (i.e. when type is ExpressRoute). The Express Route Circuit can be in the same or in a different subscription. Changing this forces a new resource to be created. + ExpressRouteCircuitID *string `json:"expressRouteCircuitId,omitempty" tf:"express_route_circuit_id,omitempty"` + + // If true, data packets will bypass ExpressRoute Gateway for data forwarding This is only valid for ExpressRoute connections. + ExpressRouteGatewayBypass *bool `json:"expressRouteGatewayBypass,omitempty" tf:"express_route_gateway_bypass,omitempty"` + + // A list of the ingress NAT Rule Ids. + // +listType=set + IngressNATRuleIds []*string `json:"ingressNatRuleIds,omitempty" tf:"ingress_nat_rule_ids,omitempty"` + + // A ipsec_policy block which is documented below. + // Only a single policy can be defined for a connection. For details on + // custom policies refer to the relevant section in the Azure documentation. + IpsecPolicy *VirtualNetworkGatewayConnectionIpsecPolicyInitParameters `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // Use private local Azure IP for the connection. Changing this forces a new resource to be created. + LocalAzureIPAddressEnabled *bool `json:"localAzureIpAddressEnabled,omitempty" tf:"local_azure_ip_address_enabled,omitempty"` + + // The ID of the local network gateway when creating Site-to-Site connection (i.e. when type is IPsec). + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.LocalNetworkGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + LocalNetworkGatewayID *string `json:"localNetworkGatewayId,omitempty" tf:"local_network_gateway_id,omitempty"` + + // Reference to a LocalNetworkGateway in network to populate localNetworkGatewayId. + // +kubebuilder:validation:Optional + LocalNetworkGatewayIDRef *v1.Reference `json:"localNetworkGatewayIdRef,omitempty" tf:"-"` + + // Selector for a LocalNetworkGateway in network to populate localNetworkGatewayId. + // +kubebuilder:validation:Optional + LocalNetworkGatewayIDSelector *v1.Selector `json:"localNetworkGatewayIdSelector,omitempty" tf:"-"` + + // The location/region where the connection is located. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the peer virtual network gateway when creating a VNet-to-VNet connection (i.e. when type is Vnet2Vnet). The peer Virtual Network Gateway can be in the same or in a different subscription. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetworkGateway + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + PeerVirtualNetworkGatewayID *string `json:"peerVirtualNetworkGatewayId,omitempty" tf:"peer_virtual_network_gateway_id,omitempty"` + + // Reference to a VirtualNetworkGateway in network to populate peerVirtualNetworkGatewayId. + // +kubebuilder:validation:Optional + PeerVirtualNetworkGatewayIDRef *v1.Reference `json:"peerVirtualNetworkGatewayIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetworkGateway in network to populate peerVirtualNetworkGatewayId. + // +kubebuilder:validation:Optional + PeerVirtualNetworkGatewayIDSelector *v1.Selector `json:"peerVirtualNetworkGatewayIdSelector,omitempty" tf:"-"` + + // The routing weight. Defaults to 10. + RoutingWeight *float64 `json:"routingWeight,omitempty" tf:"routing_weight,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more traffic_selector_policy blocks which are documented below. + // A traffic_selector_policy allows to specify a traffic selector policy proposal to be used in a virtual network gateway connection. + // For details about traffic selectors refer to the relevant section in the Azure documentation. + TrafficSelectorPolicy []TrafficSelectorPolicyInitParameters `json:"trafficSelectorPolicy,omitempty" tf:"traffic_selector_policy,omitempty"` + + // The type of connection. Valid options are IPsec (Site-to-Site), ExpressRoute (ExpressRoute), and Vnet2Vnet (VNet-to-VNet). Each connection type requires different mandatory arguments (refer to the examples above). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // If true, policy-based traffic selectors are enabled for this connection. Enabling policy-based traffic selectors requires an ipsec_policy block. Defaults to false. + UsePolicyBasedTrafficSelectors *bool `json:"usePolicyBasedTrafficSelectors,omitempty" tf:"use_policy_based_traffic_selectors,omitempty"` + + // The ID of the Virtual Network Gateway in which the connection will be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetworkGateway + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkGatewayID *string `json:"virtualNetworkGatewayId,omitempty" tf:"virtual_network_gateway_id,omitempty"` + + // Reference to a VirtualNetworkGateway in network to populate virtualNetworkGatewayId. + // +kubebuilder:validation:Optional + VirtualNetworkGatewayIDRef *v1.Reference `json:"virtualNetworkGatewayIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetworkGateway in network to populate virtualNetworkGatewayId. + // +kubebuilder:validation:Optional + VirtualNetworkGatewayIDSelector *v1.Selector `json:"virtualNetworkGatewayIdSelector,omitempty" tf:"-"` +} + +type VirtualNetworkGatewayConnectionIpsecPolicyInitParameters struct { + + // The DH group used in IKE phase 1 for initial SA. Valid options are DHGroup1, DHGroup14, DHGroup2, DHGroup2048, DHGroup24, ECP256, ECP384, or None. + DhGroup *string `json:"dhGroup,omitempty" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm. Valid options are AES128, AES192, AES256, DES, DES3, GCMAES128, or GCMAES256. + IkeEncryption *string `json:"ikeEncryption,omitempty" tf:"ike_encryption,omitempty"` + + // The IKE integrity algorithm. Valid options are GCMAES128, GCMAES256, MD5, SHA1, SHA256, or SHA384. + IkeIntegrity *string `json:"ikeIntegrity,omitempty" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm. Valid options are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256, or None. + IpsecEncryption *string `json:"ipsecEncryption,omitempty" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm. Valid options are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1, or SHA256. + IpsecIntegrity *string `json:"ipsecIntegrity,omitempty" tf:"ipsec_integrity,omitempty"` + + // The DH group used in IKE phase 2 for new child SA. + // Valid options are ECP256, ECP384, PFS1, PFS14, PFS2, PFS2048, PFS24, PFSMM, + // or None. + PfsGroup *string `json:"pfsGroup,omitempty" tf:"pfs_group,omitempty"` + + // The IPSec SA payload size in KB. Must be at least 1024 KB. Defaults to 102400000 KB. + SaDatasize *float64 `json:"saDatasize,omitempty" tf:"sa_datasize,omitempty"` + + // The IPSec SA lifetime in seconds. Must be at least 300 seconds. Defaults to 27000 seconds. + SaLifetime *float64 `json:"saLifetime,omitempty" tf:"sa_lifetime,omitempty"` +} + +type VirtualNetworkGatewayConnectionIpsecPolicyObservation struct { + + // The DH group used in IKE phase 1 for initial SA. Valid options are DHGroup1, DHGroup14, DHGroup2, DHGroup2048, DHGroup24, ECP256, ECP384, or None. + DhGroup *string `json:"dhGroup,omitempty" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm. Valid options are AES128, AES192, AES256, DES, DES3, GCMAES128, or GCMAES256. + IkeEncryption *string `json:"ikeEncryption,omitempty" tf:"ike_encryption,omitempty"` + + // The IKE integrity algorithm. Valid options are GCMAES128, GCMAES256, MD5, SHA1, SHA256, or SHA384. + IkeIntegrity *string `json:"ikeIntegrity,omitempty" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm. Valid options are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256, or None. + IpsecEncryption *string `json:"ipsecEncryption,omitempty" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm. Valid options are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1, or SHA256. + IpsecIntegrity *string `json:"ipsecIntegrity,omitempty" tf:"ipsec_integrity,omitempty"` + + // The DH group used in IKE phase 2 for new child SA. + // Valid options are ECP256, ECP384, PFS1, PFS14, PFS2, PFS2048, PFS24, PFSMM, + // or None. + PfsGroup *string `json:"pfsGroup,omitempty" tf:"pfs_group,omitempty"` + + // The IPSec SA payload size in KB. Must be at least 1024 KB. Defaults to 102400000 KB. + SaDatasize *float64 `json:"saDatasize,omitempty" tf:"sa_datasize,omitempty"` + + // The IPSec SA lifetime in seconds. Must be at least 300 seconds. Defaults to 27000 seconds. + SaLifetime *float64 `json:"saLifetime,omitempty" tf:"sa_lifetime,omitempty"` +} + +type VirtualNetworkGatewayConnectionIpsecPolicyParameters struct { + + // The DH group used in IKE phase 1 for initial SA. Valid options are DHGroup1, DHGroup14, DHGroup2, DHGroup2048, DHGroup24, ECP256, ECP384, or None. + // +kubebuilder:validation:Optional + DhGroup *string `json:"dhGroup" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm. Valid options are AES128, AES192, AES256, DES, DES3, GCMAES128, or GCMAES256. + // +kubebuilder:validation:Optional + IkeEncryption *string `json:"ikeEncryption" tf:"ike_encryption,omitempty"` + + // The IKE integrity algorithm. Valid options are GCMAES128, GCMAES256, MD5, SHA1, SHA256, or SHA384. + // +kubebuilder:validation:Optional + IkeIntegrity *string `json:"ikeIntegrity" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm. Valid options are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256, or None. + // +kubebuilder:validation:Optional + IpsecEncryption *string `json:"ipsecEncryption" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm. Valid options are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1, or SHA256. + // +kubebuilder:validation:Optional + IpsecIntegrity *string `json:"ipsecIntegrity" tf:"ipsec_integrity,omitempty"` + + // The DH group used in IKE phase 2 for new child SA. + // Valid options are ECP256, ECP384, PFS1, PFS14, PFS2, PFS2048, PFS24, PFSMM, + // or None. + // +kubebuilder:validation:Optional + PfsGroup *string `json:"pfsGroup" tf:"pfs_group,omitempty"` + + // The IPSec SA payload size in KB. Must be at least 1024 KB. Defaults to 102400000 KB. + // +kubebuilder:validation:Optional + SaDatasize *float64 `json:"saDatasize,omitempty" tf:"sa_datasize,omitempty"` + + // The IPSec SA lifetime in seconds. Must be at least 300 seconds. Defaults to 27000 seconds. + // +kubebuilder:validation:Optional + SaLifetime *float64 `json:"saLifetime,omitempty" tf:"sa_lifetime,omitempty"` +} + +type VirtualNetworkGatewayConnectionObservation struct { + + // Connection mode to use. Possible values are Default, InitiatorOnly and ResponderOnly. Defaults to Default. Changing this value will force a resource to be created. + ConnectionMode *string `json:"connectionMode,omitempty" tf:"connection_mode,omitempty"` + + // The IKE protocol version to use. Possible values are IKEv1 and IKEv2, values are IKEv1 and IKEv2. Defaults to IKEv2. Changing this forces a new resource to be created. + // -> Note: Only valid for IPSec connections on virtual network gateways with SKU VpnGw1, VpnGw2, VpnGw3, VpnGw1AZ, VpnGw2AZ or VpnGw3AZ. + ConnectionProtocol *string `json:"connectionProtocol,omitempty" tf:"connection_protocol,omitempty"` + + // A custom_bgp_addresses block which is documented below. + // The block can only be used on IPSec / activeactive connections, + // For details about see the relevant section in the Azure documentation. + CustomBGPAddresses *CustomBGPAddressesObservation `json:"customBgpAddresses,omitempty" tf:"custom_bgp_addresses,omitempty"` + + // The dead peer detection timeout of this connection in seconds. Changing this forces a new resource to be created. + DpdTimeoutSeconds *float64 `json:"dpdTimeoutSeconds,omitempty" tf:"dpd_timeout_seconds,omitempty"` + + // A list of the egress NAT Rule Ids. + // +listType=set + EgressNATRuleIds []*string `json:"egressNatRuleIds,omitempty" tf:"egress_nat_rule_ids,omitempty"` + + // If true, BGP (Border Gateway Protocol) is enabled for this connection. Defaults to false. + EnableBGP *bool `json:"enableBgp,omitempty" tf:"enable_bgp,omitempty"` + + // The ID of the Express Route Circuit when creating an ExpressRoute connection (i.e. when type is ExpressRoute). The Express Route Circuit can be in the same or in a different subscription. Changing this forces a new resource to be created. + ExpressRouteCircuitID *string `json:"expressRouteCircuitId,omitempty" tf:"express_route_circuit_id,omitempty"` + + // If true, data packets will bypass ExpressRoute Gateway for data forwarding This is only valid for ExpressRoute connections. + ExpressRouteGatewayBypass *bool `json:"expressRouteGatewayBypass,omitempty" tf:"express_route_gateway_bypass,omitempty"` + + // The ID of the Virtual Network Gateway Connection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of the ingress NAT Rule Ids. + // +listType=set + IngressNATRuleIds []*string `json:"ingressNatRuleIds,omitempty" tf:"ingress_nat_rule_ids,omitempty"` + + // A ipsec_policy block which is documented below. + // Only a single policy can be defined for a connection. For details on + // custom policies refer to the relevant section in the Azure documentation. + IpsecPolicy *VirtualNetworkGatewayConnectionIpsecPolicyObservation `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // Use private local Azure IP for the connection. Changing this forces a new resource to be created. + LocalAzureIPAddressEnabled *bool `json:"localAzureIpAddressEnabled,omitempty" tf:"local_azure_ip_address_enabled,omitempty"` + + // The ID of the local network gateway when creating Site-to-Site connection (i.e. when type is IPsec). + LocalNetworkGatewayID *string `json:"localNetworkGatewayId,omitempty" tf:"local_network_gateway_id,omitempty"` + + // The location/region where the connection is located. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the peer virtual network gateway when creating a VNet-to-VNet connection (i.e. when type is Vnet2Vnet). The peer Virtual Network Gateway can be in the same or in a different subscription. Changing this forces a new resource to be created. + PeerVirtualNetworkGatewayID *string `json:"peerVirtualNetworkGatewayId,omitempty" tf:"peer_virtual_network_gateway_id,omitempty"` + + // The name of the resource group in which to create the connection Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The routing weight. Defaults to 10. + RoutingWeight *float64 `json:"routingWeight,omitempty" tf:"routing_weight,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more traffic_selector_policy blocks which are documented below. + // A traffic_selector_policy allows to specify a traffic selector policy proposal to be used in a virtual network gateway connection. + // For details about traffic selectors refer to the relevant section in the Azure documentation. + TrafficSelectorPolicy []TrafficSelectorPolicyObservation `json:"trafficSelectorPolicy,omitempty" tf:"traffic_selector_policy,omitempty"` + + // The type of connection. Valid options are IPsec (Site-to-Site), ExpressRoute (ExpressRoute), and Vnet2Vnet (VNet-to-VNet). Each connection type requires different mandatory arguments (refer to the examples above). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // If true, policy-based traffic selectors are enabled for this connection. Enabling policy-based traffic selectors requires an ipsec_policy block. Defaults to false. + UsePolicyBasedTrafficSelectors *bool `json:"usePolicyBasedTrafficSelectors,omitempty" tf:"use_policy_based_traffic_selectors,omitempty"` + + // The ID of the Virtual Network Gateway in which the connection will be created. Changing this forces a new resource to be created. + VirtualNetworkGatewayID *string `json:"virtualNetworkGatewayId,omitempty" tf:"virtual_network_gateway_id,omitempty"` +} + +type VirtualNetworkGatewayConnectionParameters struct { + + // The authorization key associated with the Express Route Circuit. This field is required only if the type is an ExpressRoute connection. + // +kubebuilder:validation:Optional + AuthorizationKeySecretRef *v1.SecretKeySelector `json:"authorizationKeySecretRef,omitempty" tf:"-"` + + // Connection mode to use. Possible values are Default, InitiatorOnly and ResponderOnly. Defaults to Default. Changing this value will force a resource to be created. + // +kubebuilder:validation:Optional + ConnectionMode *string `json:"connectionMode,omitempty" tf:"connection_mode,omitempty"` + + // The IKE protocol version to use. Possible values are IKEv1 and IKEv2, values are IKEv1 and IKEv2. Defaults to IKEv2. Changing this forces a new resource to be created. + // -> Note: Only valid for IPSec connections on virtual network gateways with SKU VpnGw1, VpnGw2, VpnGw3, VpnGw1AZ, VpnGw2AZ or VpnGw3AZ. + // +kubebuilder:validation:Optional + ConnectionProtocol *string `json:"connectionProtocol,omitempty" tf:"connection_protocol,omitempty"` + + // A custom_bgp_addresses block which is documented below. + // The block can only be used on IPSec / activeactive connections, + // For details about see the relevant section in the Azure documentation. + // +kubebuilder:validation:Optional + CustomBGPAddresses *CustomBGPAddressesParameters `json:"customBgpAddresses,omitempty" tf:"custom_bgp_addresses,omitempty"` + + // The dead peer detection timeout of this connection in seconds. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DpdTimeoutSeconds *float64 `json:"dpdTimeoutSeconds,omitempty" tf:"dpd_timeout_seconds,omitempty"` + + // A list of the egress NAT Rule Ids. + // +kubebuilder:validation:Optional + // +listType=set + EgressNATRuleIds []*string `json:"egressNatRuleIds,omitempty" tf:"egress_nat_rule_ids,omitempty"` + + // If true, BGP (Border Gateway Protocol) is enabled for this connection. Defaults to false. + // +kubebuilder:validation:Optional + EnableBGP *bool `json:"enableBgp,omitempty" tf:"enable_bgp,omitempty"` + + // The ID of the Express Route Circuit when creating an ExpressRoute connection (i.e. when type is ExpressRoute). The Express Route Circuit can be in the same or in a different subscription. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ExpressRouteCircuitID *string `json:"expressRouteCircuitId,omitempty" tf:"express_route_circuit_id,omitempty"` + + // If true, data packets will bypass ExpressRoute Gateway for data forwarding This is only valid for ExpressRoute connections. + // +kubebuilder:validation:Optional + ExpressRouteGatewayBypass *bool `json:"expressRouteGatewayBypass,omitempty" tf:"express_route_gateway_bypass,omitempty"` + + // A list of the ingress NAT Rule Ids. + // +kubebuilder:validation:Optional + // +listType=set + IngressNATRuleIds []*string `json:"ingressNatRuleIds,omitempty" tf:"ingress_nat_rule_ids,omitempty"` + + // A ipsec_policy block which is documented below. + // Only a single policy can be defined for a connection. For details on + // custom policies refer to the relevant section in the Azure documentation. + // +kubebuilder:validation:Optional + IpsecPolicy *VirtualNetworkGatewayConnectionIpsecPolicyParameters `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // Use private local Azure IP for the connection. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LocalAzureIPAddressEnabled *bool `json:"localAzureIpAddressEnabled,omitempty" tf:"local_azure_ip_address_enabled,omitempty"` + + // The ID of the local network gateway when creating Site-to-Site connection (i.e. when type is IPsec). + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.LocalNetworkGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + LocalNetworkGatewayID *string `json:"localNetworkGatewayId,omitempty" tf:"local_network_gateway_id,omitempty"` + + // Reference to a LocalNetworkGateway in network to populate localNetworkGatewayId. + // +kubebuilder:validation:Optional + LocalNetworkGatewayIDRef *v1.Reference `json:"localNetworkGatewayIdRef,omitempty" tf:"-"` + + // Selector for a LocalNetworkGateway in network to populate localNetworkGatewayId. + // +kubebuilder:validation:Optional + LocalNetworkGatewayIDSelector *v1.Selector `json:"localNetworkGatewayIdSelector,omitempty" tf:"-"` + + // The location/region where the connection is located. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the peer virtual network gateway when creating a VNet-to-VNet connection (i.e. when type is Vnet2Vnet). The peer Virtual Network Gateway can be in the same or in a different subscription. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetworkGateway + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + PeerVirtualNetworkGatewayID *string `json:"peerVirtualNetworkGatewayId,omitempty" tf:"peer_virtual_network_gateway_id,omitempty"` + + // Reference to a VirtualNetworkGateway in network to populate peerVirtualNetworkGatewayId. + // +kubebuilder:validation:Optional + PeerVirtualNetworkGatewayIDRef *v1.Reference `json:"peerVirtualNetworkGatewayIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetworkGateway in network to populate peerVirtualNetworkGatewayId. + // +kubebuilder:validation:Optional + PeerVirtualNetworkGatewayIDSelector *v1.Selector `json:"peerVirtualNetworkGatewayIdSelector,omitempty" tf:"-"` + + // The name of the resource group in which to create the connection Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The routing weight. Defaults to 10. + // +kubebuilder:validation:Optional + RoutingWeight *float64 `json:"routingWeight,omitempty" tf:"routing_weight,omitempty"` + + // The shared IPSec key. A key could be provided if a Site-to-Site, VNet-to-VNet or ExpressRoute connection is created. + // +kubebuilder:validation:Optional + SharedKeySecretRef *v1.SecretKeySelector `json:"sharedKeySecretRef,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // One or more traffic_selector_policy blocks which are documented below. + // A traffic_selector_policy allows to specify a traffic selector policy proposal to be used in a virtual network gateway connection. + // For details about traffic selectors refer to the relevant section in the Azure documentation. + // +kubebuilder:validation:Optional + TrafficSelectorPolicy []TrafficSelectorPolicyParameters `json:"trafficSelectorPolicy,omitempty" tf:"traffic_selector_policy,omitempty"` + + // The type of connection. Valid options are IPsec (Site-to-Site), ExpressRoute (ExpressRoute), and Vnet2Vnet (VNet-to-VNet). Each connection type requires different mandatory arguments (refer to the examples above). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // If true, policy-based traffic selectors are enabled for this connection. Enabling policy-based traffic selectors requires an ipsec_policy block. Defaults to false. + // +kubebuilder:validation:Optional + UsePolicyBasedTrafficSelectors *bool `json:"usePolicyBasedTrafficSelectors,omitempty" tf:"use_policy_based_traffic_selectors,omitempty"` + + // The ID of the Virtual Network Gateway in which the connection will be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetworkGateway + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkGatewayID *string `json:"virtualNetworkGatewayId,omitempty" tf:"virtual_network_gateway_id,omitempty"` + + // Reference to a VirtualNetworkGateway in network to populate virtualNetworkGatewayId. + // +kubebuilder:validation:Optional + VirtualNetworkGatewayIDRef *v1.Reference `json:"virtualNetworkGatewayIdRef,omitempty" tf:"-"` + + // Selector for a VirtualNetworkGateway in network to populate virtualNetworkGatewayId. + // +kubebuilder:validation:Optional + VirtualNetworkGatewayIDSelector *v1.Selector `json:"virtualNetworkGatewayIdSelector,omitempty" tf:"-"` +} + +// VirtualNetworkGatewayConnectionSpec defines the desired state of VirtualNetworkGatewayConnection +type VirtualNetworkGatewayConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VirtualNetworkGatewayConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VirtualNetworkGatewayConnectionInitParameters `json:"initProvider,omitempty"` +} + +// VirtualNetworkGatewayConnectionStatus defines the observed state of VirtualNetworkGatewayConnection. +type VirtualNetworkGatewayConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VirtualNetworkGatewayConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VirtualNetworkGatewayConnection is the Schema for the VirtualNetworkGatewayConnections API. Manages a connection in an existing Virtual Network Gateway. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VirtualNetworkGatewayConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + Spec VirtualNetworkGatewayConnectionSpec `json:"spec"` + Status VirtualNetworkGatewayConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VirtualNetworkGatewayConnectionList contains a list of VirtualNetworkGatewayConnections +type VirtualNetworkGatewayConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualNetworkGatewayConnection `json:"items"` +} + +// Repository type metadata. +var ( + VirtualNetworkGatewayConnection_Kind = "VirtualNetworkGatewayConnection" + VirtualNetworkGatewayConnection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VirtualNetworkGatewayConnection_Kind}.String() + VirtualNetworkGatewayConnection_KindAPIVersion = VirtualNetworkGatewayConnection_Kind + "." + CRDGroupVersion.String() + VirtualNetworkGatewayConnection_GroupVersionKind = CRDGroupVersion.WithKind(VirtualNetworkGatewayConnection_Kind) +) + +func init() { + SchemeBuilder.Register(&VirtualNetworkGatewayConnection{}, &VirtualNetworkGatewayConnectionList{}) +} diff --git a/apis/network/v1beta2/zz_vpngateway_terraformed.go b/apis/network/v1beta2/zz_vpngateway_terraformed.go new file mode 100755 index 000000000..3d7235b69 --- /dev/null +++ b/apis/network/v1beta2/zz_vpngateway_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPNGateway +func (mg *VPNGateway) GetTerraformResourceType() string { + return "azurerm_vpn_gateway" +} + +// GetConnectionDetailsMapping for this VPNGateway +func (tr *VPNGateway) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPNGateway +func (tr *VPNGateway) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPNGateway +func (tr *VPNGateway) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPNGateway +func (tr *VPNGateway) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPNGateway +func (tr *VPNGateway) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPNGateway +func (tr *VPNGateway) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPNGateway +func (tr *VPNGateway) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPNGateway +func (tr *VPNGateway) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPNGateway using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPNGateway) LateInitialize(attrs []byte) (bool, error) { + params := &VPNGatewayParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("BGPSettings")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPNGateway) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_vpngateway_types.go b/apis/network/v1beta2/zz_vpngateway_types.go new file mode 100755 index 000000000..77080a8f3 --- /dev/null +++ b/apis/network/v1beta2/zz_vpngateway_types.go @@ -0,0 +1,314 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type Instance0BGPPeeringAddressInitParameters struct { + + // A list of custom BGP peering addresses to assign to this instance. + // +listType=set + CustomIps []*string `json:"customIps,omitempty" tf:"custom_ips,omitempty"` +} + +type Instance0BGPPeeringAddressObservation struct { + + // A list of custom BGP peering addresses to assign to this instance. + // +listType=set + CustomIps []*string `json:"customIps,omitempty" tf:"custom_ips,omitempty"` + + // The list of default BGP peering addresses which belong to the pre-defined VPN Gateway IP configuration. + // +listType=set + DefaultIps []*string `json:"defaultIps,omitempty" tf:"default_ips,omitempty"` + + // The pre-defined id of VPN Gateway IP Configuration. + IPConfigurationID *string `json:"ipConfigurationId,omitempty" tf:"ip_configuration_id,omitempty"` + + // The list of tunnel public IP addresses which belong to the pre-defined VPN Gateway IP configuration. + // +listType=set + TunnelIps []*string `json:"tunnelIps,omitempty" tf:"tunnel_ips,omitempty"` +} + +type Instance0BGPPeeringAddressParameters struct { + + // A list of custom BGP peering addresses to assign to this instance. + // +kubebuilder:validation:Optional + // +listType=set + CustomIps []*string `json:"customIps" tf:"custom_ips,omitempty"` +} + +type Instance1BGPPeeringAddressInitParameters struct { + + // A list of custom BGP peering addresses to assign to this instance. + // +listType=set + CustomIps []*string `json:"customIps,omitempty" tf:"custom_ips,omitempty"` +} + +type Instance1BGPPeeringAddressObservation struct { + + // A list of custom BGP peering addresses to assign to this instance. + // +listType=set + CustomIps []*string `json:"customIps,omitempty" tf:"custom_ips,omitempty"` + + // The list of default BGP peering addresses which belong to the pre-defined VPN Gateway IP configuration. + // +listType=set + DefaultIps []*string `json:"defaultIps,omitempty" tf:"default_ips,omitempty"` + + // The pre-defined id of VPN Gateway IP Configuration. + IPConfigurationID *string `json:"ipConfigurationId,omitempty" tf:"ip_configuration_id,omitempty"` + + // The list of tunnel public IP addresses which belong to the pre-defined VPN Gateway IP configuration. + // +listType=set + TunnelIps []*string `json:"tunnelIps,omitempty" tf:"tunnel_ips,omitempty"` +} + +type Instance1BGPPeeringAddressParameters struct { + + // A list of custom BGP peering addresses to assign to this instance. + // +kubebuilder:validation:Optional + // +listType=set + CustomIps []*string `json:"customIps" tf:"custom_ips,omitempty"` +} + +type VPNGatewayBGPSettingsInitParameters struct { + + // The ASN of the BGP Speaker. Changing this forces a new resource to be created. + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // An instance_bgp_peering_address block as defined below. + Instance0BGPPeeringAddress *Instance0BGPPeeringAddressInitParameters `json:"instance0BgpPeeringAddress,omitempty" tf:"instance_0_bgp_peering_address,omitempty"` + + // An instance_bgp_peering_address block as defined below. + Instance1BGPPeeringAddress *Instance1BGPPeeringAddressInitParameters `json:"instance1BgpPeeringAddress,omitempty" tf:"instance_1_bgp_peering_address,omitempty"` + + // The weight added to Routes learned from this BGP Speaker. Changing this forces a new resource to be created. + PeerWeight *float64 `json:"peerWeight,omitempty" tf:"peer_weight,omitempty"` +} + +type VPNGatewayBGPSettingsObservation struct { + + // The ASN of the BGP Speaker. Changing this forces a new resource to be created. + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // The Address which should be used for the BGP Peering. + BGPPeeringAddress *string `json:"bgpPeeringAddress,omitempty" tf:"bgp_peering_address,omitempty"` + + // An instance_bgp_peering_address block as defined below. + Instance0BGPPeeringAddress *Instance0BGPPeeringAddressObservation `json:"instance0BgpPeeringAddress,omitempty" tf:"instance_0_bgp_peering_address,omitempty"` + + // An instance_bgp_peering_address block as defined below. + Instance1BGPPeeringAddress *Instance1BGPPeeringAddressObservation `json:"instance1BgpPeeringAddress,omitempty" tf:"instance_1_bgp_peering_address,omitempty"` + + // The weight added to Routes learned from this BGP Speaker. Changing this forces a new resource to be created. + PeerWeight *float64 `json:"peerWeight,omitempty" tf:"peer_weight,omitempty"` +} + +type VPNGatewayBGPSettingsParameters struct { + + // The ASN of the BGP Speaker. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Asn *float64 `json:"asn" tf:"asn,omitempty"` + + // An instance_bgp_peering_address block as defined below. + // +kubebuilder:validation:Optional + Instance0BGPPeeringAddress *Instance0BGPPeeringAddressParameters `json:"instance0BgpPeeringAddress,omitempty" tf:"instance_0_bgp_peering_address,omitempty"` + + // An instance_bgp_peering_address block as defined below. + // +kubebuilder:validation:Optional + Instance1BGPPeeringAddress *Instance1BGPPeeringAddressParameters `json:"instance1BgpPeeringAddress,omitempty" tf:"instance_1_bgp_peering_address,omitempty"` + + // The weight added to Routes learned from this BGP Speaker. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PeerWeight *float64 `json:"peerWeight" tf:"peer_weight,omitempty"` +} + +type VPNGatewayInitParameters struct { + + // Is BGP route translation for NAT on this VPN Gateway enabled? Defaults to false. + BGPRouteTranslationForNATEnabled *bool `json:"bgpRouteTranslationForNatEnabled,omitempty" tf:"bgp_route_translation_for_nat_enabled,omitempty"` + + // A bgp_settings block as defined below. + BGPSettings *VPNGatewayBGPSettingsInitParameters `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // The Azure location where this VPN Gateway should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Azure routing preference lets you to choose how your traffic routes between Azure and the internet. You can choose to route traffic either via the Microsoft network (default value, Microsoft Network), or via the ISP network (public internet, set to Internet). More context of the configuration can be found in the Microsoft Docs to create a VPN Gateway. Changing this forces a new resource to be created. + RoutingPreference *string `json:"routingPreference,omitempty" tf:"routing_preference,omitempty"` + + // The Scale Unit for this VPN Gateway. Defaults to 1. + ScaleUnit *float64 `json:"scaleUnit,omitempty" tf:"scale_unit,omitempty"` + + // A mapping of tags to assign to the VPN Gateway. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the Virtual Hub within which this VPN Gateway should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` + + // Reference to a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDRef *v1.Reference `json:"virtualHubIdRef,omitempty" tf:"-"` + + // Selector for a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDSelector *v1.Selector `json:"virtualHubIdSelector,omitempty" tf:"-"` +} + +type VPNGatewayObservation struct { + + // Is BGP route translation for NAT on this VPN Gateway enabled? Defaults to false. + BGPRouteTranslationForNATEnabled *bool `json:"bgpRouteTranslationForNatEnabled,omitempty" tf:"bgp_route_translation_for_nat_enabled,omitempty"` + + // A bgp_settings block as defined below. + BGPSettings *VPNGatewayBGPSettingsObservation `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // The ID of the VPN Gateway. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure location where this VPN Gateway should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Name of the Resource Group in which this VPN Gateway should be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Azure routing preference lets you to choose how your traffic routes between Azure and the internet. You can choose to route traffic either via the Microsoft network (default value, Microsoft Network), or via the ISP network (public internet, set to Internet). More context of the configuration can be found in the Microsoft Docs to create a VPN Gateway. Changing this forces a new resource to be created. + RoutingPreference *string `json:"routingPreference,omitempty" tf:"routing_preference,omitempty"` + + // The Scale Unit for this VPN Gateway. Defaults to 1. + ScaleUnit *float64 `json:"scaleUnit,omitempty" tf:"scale_unit,omitempty"` + + // A mapping of tags to assign to the VPN Gateway. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the Virtual Hub within which this VPN Gateway should be created. Changing this forces a new resource to be created. + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` +} + +type VPNGatewayParameters struct { + + // Is BGP route translation for NAT on this VPN Gateway enabled? Defaults to false. + // +kubebuilder:validation:Optional + BGPRouteTranslationForNATEnabled *bool `json:"bgpRouteTranslationForNatEnabled,omitempty" tf:"bgp_route_translation_for_nat_enabled,omitempty"` + + // A bgp_settings block as defined below. + // +kubebuilder:validation:Optional + BGPSettings *VPNGatewayBGPSettingsParameters `json:"bgpSettings,omitempty" tf:"bgp_settings,omitempty"` + + // The Azure location where this VPN Gateway should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Name of the Resource Group in which this VPN Gateway should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Azure routing preference lets you to choose how your traffic routes between Azure and the internet. You can choose to route traffic either via the Microsoft network (default value, Microsoft Network), or via the ISP network (public internet, set to Internet). More context of the configuration can be found in the Microsoft Docs to create a VPN Gateway. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RoutingPreference *string `json:"routingPreference,omitempty" tf:"routing_preference,omitempty"` + + // The Scale Unit for this VPN Gateway. Defaults to 1. + // +kubebuilder:validation:Optional + ScaleUnit *float64 `json:"scaleUnit,omitempty" tf:"scale_unit,omitempty"` + + // A mapping of tags to assign to the VPN Gateway. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the Virtual Hub within which this VPN Gateway should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualHubID *string `json:"virtualHubId,omitempty" tf:"virtual_hub_id,omitempty"` + + // Reference to a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDRef *v1.Reference `json:"virtualHubIdRef,omitempty" tf:"-"` + + // Selector for a VirtualHub in network to populate virtualHubId. + // +kubebuilder:validation:Optional + VirtualHubIDSelector *v1.Selector `json:"virtualHubIdSelector,omitempty" tf:"-"` +} + +// VPNGatewaySpec defines the desired state of VPNGateway +type VPNGatewaySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPNGatewayParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPNGatewayInitParameters `json:"initProvider,omitempty"` +} + +// VPNGatewayStatus defines the observed state of VPNGateway. +type VPNGatewayStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPNGatewayObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPNGateway is the Schema for the VPNGateways API. Manages a VPN Gateway within a Virtual Hub. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VPNGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec VPNGatewaySpec `json:"spec"` + Status VPNGatewayStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPNGatewayList contains a list of VPNGateways +type VPNGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPNGateway `json:"items"` +} + +// Repository type metadata. +var ( + VPNGateway_Kind = "VPNGateway" + VPNGateway_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPNGateway_Kind}.String() + VPNGateway_KindAPIVersion = VPNGateway_Kind + "." + CRDGroupVersion.String() + VPNGateway_GroupVersionKind = CRDGroupVersion.WithKind(VPNGateway_Kind) +) + +func init() { + SchemeBuilder.Register(&VPNGateway{}, &VPNGatewayList{}) +} diff --git a/apis/network/v1beta2/zz_vpngatewayconnection_terraformed.go b/apis/network/v1beta2/zz_vpngatewayconnection_terraformed.go new file mode 100755 index 000000000..4dd4481c1 --- /dev/null +++ b/apis/network/v1beta2/zz_vpngatewayconnection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPNGatewayConnection +func (mg *VPNGatewayConnection) GetTerraformResourceType() string { + return "azurerm_vpn_gateway_connection" +} + +// GetConnectionDetailsMapping for this VPNGatewayConnection +func (tr *VPNGatewayConnection) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPNGatewayConnection +func (tr *VPNGatewayConnection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPNGatewayConnection +func (tr *VPNGatewayConnection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPNGatewayConnection +func (tr *VPNGatewayConnection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPNGatewayConnection +func (tr *VPNGatewayConnection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPNGatewayConnection +func (tr *VPNGatewayConnection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPNGatewayConnection +func (tr *VPNGatewayConnection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPNGatewayConnection +func (tr *VPNGatewayConnection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPNGatewayConnection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPNGatewayConnection) LateInitialize(attrs []byte) (bool, error) { + params := &VPNGatewayConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPNGatewayConnection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_vpngatewayconnection_types.go b/apis/network/v1beta2/zz_vpngatewayconnection_types.go new file mode 100755 index 000000000..60fcfbb42 --- /dev/null +++ b/apis/network/v1beta2/zz_vpngatewayconnection_types.go @@ -0,0 +1,573 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomBGPAddressInitParameters struct { + + // The custom bgp ip address which belongs to the IP Configuration. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The ID of the IP Configuration which belongs to the VPN Gateway. + IPConfigurationID *string `json:"ipConfigurationId,omitempty" tf:"ip_configuration_id,omitempty"` +} + +type CustomBGPAddressObservation struct { + + // The custom bgp ip address which belongs to the IP Configuration. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The ID of the IP Configuration which belongs to the VPN Gateway. + IPConfigurationID *string `json:"ipConfigurationId,omitempty" tf:"ip_configuration_id,omitempty"` +} + +type CustomBGPAddressParameters struct { + + // The custom bgp ip address which belongs to the IP Configuration. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress" tf:"ip_address,omitempty"` + + // The ID of the IP Configuration which belongs to the VPN Gateway. + // +kubebuilder:validation:Optional + IPConfigurationID *string `json:"ipConfigurationId" tf:"ip_configuration_id,omitempty"` +} + +type VPNGatewayConnectionInitParameters struct { + + // Whether Internet Security is enabled for this VPN Connection. Defaults to false. + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The ID of the remote VPN Site, which will connect to the VPN Gateway. Changing this forces a new VPN Gateway Connection to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VPNSite + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RemoteVPNSiteID *string `json:"remoteVpnSiteId,omitempty" tf:"remote_vpn_site_id,omitempty"` + + // Reference to a VPNSite in network to populate remoteVpnSiteId. + // +kubebuilder:validation:Optional + RemoteVPNSiteIDRef *v1.Reference `json:"remoteVpnSiteIdRef,omitempty" tf:"-"` + + // Selector for a VPNSite in network to populate remoteVpnSiteId. + // +kubebuilder:validation:Optional + RemoteVPNSiteIDSelector *v1.Selector `json:"remoteVpnSiteIdSelector,omitempty" tf:"-"` + + // A routing block as defined below. If this is not specified, there will be a default route table created implicitly. + Routing *VPNGatewayConnectionRoutingInitParameters `json:"routing,omitempty" tf:"routing,omitempty"` + + // One or more traffic_selector_policy blocks as defined below. + TrafficSelectorPolicy []VPNGatewayConnectionTrafficSelectorPolicyInitParameters `json:"trafficSelectorPolicy,omitempty" tf:"traffic_selector_policy,omitempty"` + + // One or more vpn_link blocks as defined below. + VPNLink []VPNLinkInitParameters `json:"vpnLink,omitempty" tf:"vpn_link,omitempty"` +} + +type VPNGatewayConnectionObservation struct { + + // The ID of the VPN Gateway Connection. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether Internet Security is enabled for this VPN Connection. Defaults to false. + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The ID of the remote VPN Site, which will connect to the VPN Gateway. Changing this forces a new VPN Gateway Connection to be created. + RemoteVPNSiteID *string `json:"remoteVpnSiteId,omitempty" tf:"remote_vpn_site_id,omitempty"` + + // A routing block as defined below. If this is not specified, there will be a default route table created implicitly. + Routing *VPNGatewayConnectionRoutingObservation `json:"routing,omitempty" tf:"routing,omitempty"` + + // One or more traffic_selector_policy blocks as defined below. + TrafficSelectorPolicy []VPNGatewayConnectionTrafficSelectorPolicyObservation `json:"trafficSelectorPolicy,omitempty" tf:"traffic_selector_policy,omitempty"` + + // The ID of the VPN Gateway that this VPN Gateway Connection belongs to. Changing this forces a new VPN Gateway Connection to be created. + VPNGatewayID *string `json:"vpnGatewayId,omitempty" tf:"vpn_gateway_id,omitempty"` + + // One or more vpn_link blocks as defined below. + VPNLink []VPNLinkObservation `json:"vpnLink,omitempty" tf:"vpn_link,omitempty"` +} + +type VPNGatewayConnectionParameters struct { + + // Whether Internet Security is enabled for this VPN Connection. Defaults to false. + // +kubebuilder:validation:Optional + InternetSecurityEnabled *bool `json:"internetSecurityEnabled,omitempty" tf:"internet_security_enabled,omitempty"` + + // The ID of the remote VPN Site, which will connect to the VPN Gateway. Changing this forces a new VPN Gateway Connection to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VPNSite + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RemoteVPNSiteID *string `json:"remoteVpnSiteId,omitempty" tf:"remote_vpn_site_id,omitempty"` + + // Reference to a VPNSite in network to populate remoteVpnSiteId. + // +kubebuilder:validation:Optional + RemoteVPNSiteIDRef *v1.Reference `json:"remoteVpnSiteIdRef,omitempty" tf:"-"` + + // Selector for a VPNSite in network to populate remoteVpnSiteId. + // +kubebuilder:validation:Optional + RemoteVPNSiteIDSelector *v1.Selector `json:"remoteVpnSiteIdSelector,omitempty" tf:"-"` + + // A routing block as defined below. If this is not specified, there will be a default route table created implicitly. + // +kubebuilder:validation:Optional + Routing *VPNGatewayConnectionRoutingParameters `json:"routing,omitempty" tf:"routing,omitempty"` + + // One or more traffic_selector_policy blocks as defined below. + // +kubebuilder:validation:Optional + TrafficSelectorPolicy []VPNGatewayConnectionTrafficSelectorPolicyParameters `json:"trafficSelectorPolicy,omitempty" tf:"traffic_selector_policy,omitempty"` + + // The ID of the VPN Gateway that this VPN Gateway Connection belongs to. Changing this forces a new VPN Gateway Connection to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VPNGateway + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VPNGatewayID *string `json:"vpnGatewayId,omitempty" tf:"vpn_gateway_id,omitempty"` + + // Reference to a VPNGateway in network to populate vpnGatewayId. + // +kubebuilder:validation:Optional + VPNGatewayIDRef *v1.Reference `json:"vpnGatewayIdRef,omitempty" tf:"-"` + + // Selector for a VPNGateway in network to populate vpnGatewayId. + // +kubebuilder:validation:Optional + VPNGatewayIDSelector *v1.Selector `json:"vpnGatewayIdSelector,omitempty" tf:"-"` + + // One or more vpn_link blocks as defined below. + // +kubebuilder:validation:Optional + VPNLink []VPNLinkParameters `json:"vpnLink,omitempty" tf:"vpn_link,omitempty"` +} + +type VPNGatewayConnectionRoutingInitParameters struct { + + // The ID of the Route Table associated with this VPN Connection. + AssociatedRouteTable *string `json:"associatedRouteTable,omitempty" tf:"associated_route_table,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + PropagatedRouteTable *VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +type VPNGatewayConnectionRoutingObservation struct { + + // The ID of the Route Table associated with this VPN Connection. + AssociatedRouteTable *string `json:"associatedRouteTable,omitempty" tf:"associated_route_table,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + PropagatedRouteTable *VPNGatewayConnectionRoutingPropagatedRouteTableObservation `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +type VPNGatewayConnectionRoutingParameters struct { + + // The ID of the Route Table associated with this VPN Connection. + // +kubebuilder:validation:Optional + AssociatedRouteTable *string `json:"associatedRouteTable" tf:"associated_route_table,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for inbound learned routes. + // +kubebuilder:validation:Optional + InboundRouteMapID *string `json:"inboundRouteMapId,omitempty" tf:"inbound_route_map_id,omitempty"` + + // The resource ID of the Route Map associated with this Routing Configuration for outbound advertised routes. + // +kubebuilder:validation:Optional + OutboundRouteMapID *string `json:"outboundRouteMapId,omitempty" tf:"outbound_route_map_id,omitempty"` + + // A propagated_route_table block as defined below. + // +kubebuilder:validation:Optional + PropagatedRouteTable *VPNGatewayConnectionRoutingPropagatedRouteTableParameters `json:"propagatedRouteTable,omitempty" tf:"propagated_route_table,omitempty"` +} + +type VPNGatewayConnectionRoutingPropagatedRouteTableInitParameters struct { + + // A list of labels to assign to this route table. + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of Route Table IDs to associated with this VPN Gateway Connection. + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` +} + +type VPNGatewayConnectionRoutingPropagatedRouteTableObservation struct { + + // A list of labels to assign to this route table. + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of Route Table IDs to associated with this VPN Gateway Connection. + RouteTableIds []*string `json:"routeTableIds,omitempty" tf:"route_table_ids,omitempty"` +} + +type VPNGatewayConnectionRoutingPropagatedRouteTableParameters struct { + + // A list of labels to assign to this route table. + // +kubebuilder:validation:Optional + // +listType=set + Labels []*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // A list of Route Table IDs to associated with this VPN Gateway Connection. + // +kubebuilder:validation:Optional + RouteTableIds []*string `json:"routeTableIds" tf:"route_table_ids,omitempty"` +} + +type VPNGatewayConnectionTrafficSelectorPolicyInitParameters struct { + + // A list of local address spaces in CIDR format for this VPN Gateway Connection. + // +listType=set + LocalAddressRanges []*string `json:"localAddressRanges,omitempty" tf:"local_address_ranges,omitempty"` + + // A list of remote address spaces in CIDR format for this VPN Gateway Connection. + // +listType=set + RemoteAddressRanges []*string `json:"remoteAddressRanges,omitempty" tf:"remote_address_ranges,omitempty"` +} + +type VPNGatewayConnectionTrafficSelectorPolicyObservation struct { + + // A list of local address spaces in CIDR format for this VPN Gateway Connection. + // +listType=set + LocalAddressRanges []*string `json:"localAddressRanges,omitempty" tf:"local_address_ranges,omitempty"` + + // A list of remote address spaces in CIDR format for this VPN Gateway Connection. + // +listType=set + RemoteAddressRanges []*string `json:"remoteAddressRanges,omitempty" tf:"remote_address_ranges,omitempty"` +} + +type VPNGatewayConnectionTrafficSelectorPolicyParameters struct { + + // A list of local address spaces in CIDR format for this VPN Gateway Connection. + // +kubebuilder:validation:Optional + // +listType=set + LocalAddressRanges []*string `json:"localAddressRanges" tf:"local_address_ranges,omitempty"` + + // A list of remote address spaces in CIDR format for this VPN Gateway Connection. + // +kubebuilder:validation:Optional + // +listType=set + RemoteAddressRanges []*string `json:"remoteAddressRanges" tf:"remote_address_ranges,omitempty"` +} + +type VPNLinkInitParameters struct { + + // Should the BGP be enabled? Defaults to false. Changing this forces a new VPN Gateway Connection to be created. + BGPEnabled *bool `json:"bgpEnabled,omitempty" tf:"bgp_enabled,omitempty"` + + // The expected connection bandwidth in MBPS. Defaults to 10. + BandwidthMbps *float64 `json:"bandwidthMbps,omitempty" tf:"bandwidth_mbps,omitempty"` + + // The connection mode of this VPN Link. Possible values are Default, InitiatorOnly and ResponderOnly. Defaults to Default. + ConnectionMode *string `json:"connectionMode,omitempty" tf:"connection_mode,omitempty"` + + // One or more custom_bgp_address blocks as defined below. + CustomBGPAddress []CustomBGPAddressInitParameters `json:"customBgpAddress,omitempty" tf:"custom_bgp_address,omitempty"` + + // A list of the egress NAT Rule Ids. + // +listType=set + EgressNATRuleIds []*string `json:"egressNatRuleIds,omitempty" tf:"egress_nat_rule_ids,omitempty"` + + // A list of the ingress NAT Rule Ids. + // +listType=set + IngressNATRuleIds []*string `json:"ingressNatRuleIds,omitempty" tf:"ingress_nat_rule_ids,omitempty"` + + // One or more ipsec_policy blocks as defined above. + IpsecPolicy []VPNLinkIpsecPolicyInitParameters `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // Whether to use local Azure IP to initiate connection? Defaults to false. + LocalAzureIPAddressEnabled *bool `json:"localAzureIpAddressEnabled,omitempty" tf:"local_azure_ip_address_enabled,omitempty"` + + // The name which should be used for this VPN Link Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to enable policy-based traffic selectors? Defaults to false. + PolicyBasedTrafficSelectorEnabled *bool `json:"policyBasedTrafficSelectorEnabled,omitempty" tf:"policy_based_traffic_selector_enabled,omitempty"` + + // The protocol used for this VPN Link Connection. Possible values are IKEv1 and IKEv2. Defaults to IKEv2. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Should the rate limit be enabled? Defaults to false. + RatelimitEnabled *bool `json:"ratelimitEnabled,omitempty" tf:"ratelimit_enabled,omitempty"` + + // Routing weight for this VPN Link Connection. Defaults to 0. + RouteWeight *float64 `json:"routeWeight,omitempty" tf:"route_weight,omitempty"` + + // SharedKey for this VPN Link Connection. + SharedKey *string `json:"sharedKey,omitempty" tf:"shared_key,omitempty"` + + // The ID of the connected VPN Site Link. Changing this forces a new VPN Gateway Connection to be created. + VPNSiteLinkID *string `json:"vpnSiteLinkId,omitempty" tf:"vpn_site_link_id,omitempty"` +} + +type VPNLinkIpsecPolicyInitParameters struct { + + // The DH Group used in IKE Phase 1 for initial SA. Possible values are None, DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384. + DhGroup *string `json:"dhGroup,omitempty" tf:"dh_group,omitempty"` + + // The IPSec encryption algorithm (IKE phase 1). Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256, None. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // The IKE encryption algorithm (IKE phase 2). Possible values are DES, DES3, AES128, AES192, AES256, GCMAES128, GCMAES256. + IkeEncryptionAlgorithm *string `json:"ikeEncryptionAlgorithm,omitempty" tf:"ike_encryption_algorithm,omitempty"` + + // The IKE integrity algorithm (IKE phase 2). Possible values are MD5, SHA1, SHA256, SHA384, GCMAES128, GCMAES256. + IkeIntegrityAlgorithm *string `json:"ikeIntegrityAlgorithm,omitempty" tf:"ike_integrity_algorithm,omitempty"` + + // The IPSec integrity algorithm (IKE phase 1). Possible values are MD5, SHA1, SHA256, GCMAES128, GCMAES192, GCMAES256. + IntegrityAlgorithm *string `json:"integrityAlgorithm,omitempty" tf:"integrity_algorithm,omitempty"` + + // The Pfs Group used in IKE Phase 2 for the new child SA. Possible values are None, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM, ECP256, ECP384. + PfsGroup *string `json:"pfsGroup,omitempty" tf:"pfs_group,omitempty"` + + // The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload size in KB for the site to site VPN tunnel. + SaDataSizeKb *float64 `json:"saDataSizeKb,omitempty" tf:"sa_data_size_kb,omitempty"` + + // The IPSec Security Association (also called Quick Mode or Phase 2 SA) lifetime in seconds for the site to site VPN tunnel. + SaLifetimeSec *float64 `json:"saLifetimeSec,omitempty" tf:"sa_lifetime_sec,omitempty"` +} + +type VPNLinkIpsecPolicyObservation struct { + + // The DH Group used in IKE Phase 1 for initial SA. Possible values are None, DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384. + DhGroup *string `json:"dhGroup,omitempty" tf:"dh_group,omitempty"` + + // The IPSec encryption algorithm (IKE phase 1). Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256, None. + EncryptionAlgorithm *string `json:"encryptionAlgorithm,omitempty" tf:"encryption_algorithm,omitempty"` + + // The IKE encryption algorithm (IKE phase 2). Possible values are DES, DES3, AES128, AES192, AES256, GCMAES128, GCMAES256. + IkeEncryptionAlgorithm *string `json:"ikeEncryptionAlgorithm,omitempty" tf:"ike_encryption_algorithm,omitempty"` + + // The IKE integrity algorithm (IKE phase 2). Possible values are MD5, SHA1, SHA256, SHA384, GCMAES128, GCMAES256. + IkeIntegrityAlgorithm *string `json:"ikeIntegrityAlgorithm,omitempty" tf:"ike_integrity_algorithm,omitempty"` + + // The IPSec integrity algorithm (IKE phase 1). Possible values are MD5, SHA1, SHA256, GCMAES128, GCMAES192, GCMAES256. + IntegrityAlgorithm *string `json:"integrityAlgorithm,omitempty" tf:"integrity_algorithm,omitempty"` + + // The Pfs Group used in IKE Phase 2 for the new child SA. Possible values are None, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM, ECP256, ECP384. + PfsGroup *string `json:"pfsGroup,omitempty" tf:"pfs_group,omitempty"` + + // The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload size in KB for the site to site VPN tunnel. + SaDataSizeKb *float64 `json:"saDataSizeKb,omitempty" tf:"sa_data_size_kb,omitempty"` + + // The IPSec Security Association (also called Quick Mode or Phase 2 SA) lifetime in seconds for the site to site VPN tunnel. + SaLifetimeSec *float64 `json:"saLifetimeSec,omitempty" tf:"sa_lifetime_sec,omitempty"` +} + +type VPNLinkIpsecPolicyParameters struct { + + // The DH Group used in IKE Phase 1 for initial SA. Possible values are None, DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384. + // +kubebuilder:validation:Optional + DhGroup *string `json:"dhGroup" tf:"dh_group,omitempty"` + + // The IPSec encryption algorithm (IKE phase 1). Possible values are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256, None. + // +kubebuilder:validation:Optional + EncryptionAlgorithm *string `json:"encryptionAlgorithm" tf:"encryption_algorithm,omitempty"` + + // The IKE encryption algorithm (IKE phase 2). Possible values are DES, DES3, AES128, AES192, AES256, GCMAES128, GCMAES256. + // +kubebuilder:validation:Optional + IkeEncryptionAlgorithm *string `json:"ikeEncryptionAlgorithm" tf:"ike_encryption_algorithm,omitempty"` + + // The IKE integrity algorithm (IKE phase 2). Possible values are MD5, SHA1, SHA256, SHA384, GCMAES128, GCMAES256. + // +kubebuilder:validation:Optional + IkeIntegrityAlgorithm *string `json:"ikeIntegrityAlgorithm" tf:"ike_integrity_algorithm,omitempty"` + + // The IPSec integrity algorithm (IKE phase 1). Possible values are MD5, SHA1, SHA256, GCMAES128, GCMAES192, GCMAES256. + // +kubebuilder:validation:Optional + IntegrityAlgorithm *string `json:"integrityAlgorithm" tf:"integrity_algorithm,omitempty"` + + // The Pfs Group used in IKE Phase 2 for the new child SA. Possible values are None, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM, ECP256, ECP384. + // +kubebuilder:validation:Optional + PfsGroup *string `json:"pfsGroup" tf:"pfs_group,omitempty"` + + // The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload size in KB for the site to site VPN tunnel. + // +kubebuilder:validation:Optional + SaDataSizeKb *float64 `json:"saDataSizeKb" tf:"sa_data_size_kb,omitempty"` + + // The IPSec Security Association (also called Quick Mode or Phase 2 SA) lifetime in seconds for the site to site VPN tunnel. + // +kubebuilder:validation:Optional + SaLifetimeSec *float64 `json:"saLifetimeSec" tf:"sa_lifetime_sec,omitempty"` +} + +type VPNLinkObservation struct { + + // Should the BGP be enabled? Defaults to false. Changing this forces a new VPN Gateway Connection to be created. + BGPEnabled *bool `json:"bgpEnabled,omitempty" tf:"bgp_enabled,omitempty"` + + // The expected connection bandwidth in MBPS. Defaults to 10. + BandwidthMbps *float64 `json:"bandwidthMbps,omitempty" tf:"bandwidth_mbps,omitempty"` + + // The connection mode of this VPN Link. Possible values are Default, InitiatorOnly and ResponderOnly. Defaults to Default. + ConnectionMode *string `json:"connectionMode,omitempty" tf:"connection_mode,omitempty"` + + // One or more custom_bgp_address blocks as defined below. + CustomBGPAddress []CustomBGPAddressObservation `json:"customBgpAddress,omitempty" tf:"custom_bgp_address,omitempty"` + + // A list of the egress NAT Rule Ids. + // +listType=set + EgressNATRuleIds []*string `json:"egressNatRuleIds,omitempty" tf:"egress_nat_rule_ids,omitempty"` + + // A list of the ingress NAT Rule Ids. + // +listType=set + IngressNATRuleIds []*string `json:"ingressNatRuleIds,omitempty" tf:"ingress_nat_rule_ids,omitempty"` + + // One or more ipsec_policy blocks as defined above. + IpsecPolicy []VPNLinkIpsecPolicyObservation `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // Whether to use local Azure IP to initiate connection? Defaults to false. + LocalAzureIPAddressEnabled *bool `json:"localAzureIpAddressEnabled,omitempty" tf:"local_azure_ip_address_enabled,omitempty"` + + // The name which should be used for this VPN Link Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to enable policy-based traffic selectors? Defaults to false. + PolicyBasedTrafficSelectorEnabled *bool `json:"policyBasedTrafficSelectorEnabled,omitempty" tf:"policy_based_traffic_selector_enabled,omitempty"` + + // The protocol used for this VPN Link Connection. Possible values are IKEv1 and IKEv2. Defaults to IKEv2. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Should the rate limit be enabled? Defaults to false. + RatelimitEnabled *bool `json:"ratelimitEnabled,omitempty" tf:"ratelimit_enabled,omitempty"` + + // Routing weight for this VPN Link Connection. Defaults to 0. + RouteWeight *float64 `json:"routeWeight,omitempty" tf:"route_weight,omitempty"` + + // SharedKey for this VPN Link Connection. + SharedKey *string `json:"sharedKey,omitempty" tf:"shared_key,omitempty"` + + // The ID of the connected VPN Site Link. Changing this forces a new VPN Gateway Connection to be created. + VPNSiteLinkID *string `json:"vpnSiteLinkId,omitempty" tf:"vpn_site_link_id,omitempty"` +} + +type VPNLinkParameters struct { + + // Should the BGP be enabled? Defaults to false. Changing this forces a new VPN Gateway Connection to be created. + // +kubebuilder:validation:Optional + BGPEnabled *bool `json:"bgpEnabled,omitempty" tf:"bgp_enabled,omitempty"` + + // The expected connection bandwidth in MBPS. Defaults to 10. + // +kubebuilder:validation:Optional + BandwidthMbps *float64 `json:"bandwidthMbps,omitempty" tf:"bandwidth_mbps,omitempty"` + + // The connection mode of this VPN Link. Possible values are Default, InitiatorOnly and ResponderOnly. Defaults to Default. + // +kubebuilder:validation:Optional + ConnectionMode *string `json:"connectionMode,omitempty" tf:"connection_mode,omitempty"` + + // One or more custom_bgp_address blocks as defined below. + // +kubebuilder:validation:Optional + CustomBGPAddress []CustomBGPAddressParameters `json:"customBgpAddress,omitempty" tf:"custom_bgp_address,omitempty"` + + // A list of the egress NAT Rule Ids. + // +kubebuilder:validation:Optional + // +listType=set + EgressNATRuleIds []*string `json:"egressNatRuleIds,omitempty" tf:"egress_nat_rule_ids,omitempty"` + + // A list of the ingress NAT Rule Ids. + // +kubebuilder:validation:Optional + // +listType=set + IngressNATRuleIds []*string `json:"ingressNatRuleIds,omitempty" tf:"ingress_nat_rule_ids,omitempty"` + + // One or more ipsec_policy blocks as defined above. + // +kubebuilder:validation:Optional + IpsecPolicy []VPNLinkIpsecPolicyParameters `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // Whether to use local Azure IP to initiate connection? Defaults to false. + // +kubebuilder:validation:Optional + LocalAzureIPAddressEnabled *bool `json:"localAzureIpAddressEnabled,omitempty" tf:"local_azure_ip_address_enabled,omitempty"` + + // The name which should be used for this VPN Link Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Whether to enable policy-based traffic selectors? Defaults to false. + // +kubebuilder:validation:Optional + PolicyBasedTrafficSelectorEnabled *bool `json:"policyBasedTrafficSelectorEnabled,omitempty" tf:"policy_based_traffic_selector_enabled,omitempty"` + + // The protocol used for this VPN Link Connection. Possible values are IKEv1 and IKEv2. Defaults to IKEv2. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` + + // Should the rate limit be enabled? Defaults to false. + // +kubebuilder:validation:Optional + RatelimitEnabled *bool `json:"ratelimitEnabled,omitempty" tf:"ratelimit_enabled,omitempty"` + + // Routing weight for this VPN Link Connection. Defaults to 0. + // +kubebuilder:validation:Optional + RouteWeight *float64 `json:"routeWeight,omitempty" tf:"route_weight,omitempty"` + + // SharedKey for this VPN Link Connection. + // +kubebuilder:validation:Optional + SharedKey *string `json:"sharedKey,omitempty" tf:"shared_key,omitempty"` + + // The ID of the connected VPN Site Link. Changing this forces a new VPN Gateway Connection to be created. + // +kubebuilder:validation:Optional + VPNSiteLinkID *string `json:"vpnSiteLinkId" tf:"vpn_site_link_id,omitempty"` +} + +// VPNGatewayConnectionSpec defines the desired state of VPNGatewayConnection +type VPNGatewayConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPNGatewayConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPNGatewayConnectionInitParameters `json:"initProvider,omitempty"` +} + +// VPNGatewayConnectionStatus defines the observed state of VPNGatewayConnection. +type VPNGatewayConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPNGatewayConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPNGatewayConnection is the Schema for the VPNGatewayConnections API. Manages a VPN Gateway Connection. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VPNGatewayConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vpnLink) || (has(self.initProvider) && has(self.initProvider.vpnLink))",message="spec.forProvider.vpnLink is a required parameter" + Spec VPNGatewayConnectionSpec `json:"spec"` + Status VPNGatewayConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPNGatewayConnectionList contains a list of VPNGatewayConnections +type VPNGatewayConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPNGatewayConnection `json:"items"` +} + +// Repository type metadata. +var ( + VPNGatewayConnection_Kind = "VPNGatewayConnection" + VPNGatewayConnection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPNGatewayConnection_Kind}.String() + VPNGatewayConnection_KindAPIVersion = VPNGatewayConnection_Kind + "." + CRDGroupVersion.String() + VPNGatewayConnection_GroupVersionKind = CRDGroupVersion.WithKind(VPNGatewayConnection_Kind) +) + +func init() { + SchemeBuilder.Register(&VPNGatewayConnection{}, &VPNGatewayConnectionList{}) +} diff --git a/apis/network/v1beta2/zz_vpnserverconfiguration_terraformed.go b/apis/network/v1beta2/zz_vpnserverconfiguration_terraformed.go new file mode 100755 index 000000000..ee21c0f98 --- /dev/null +++ b/apis/network/v1beta2/zz_vpnserverconfiguration_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPNServerConfiguration +func (mg *VPNServerConfiguration) GetTerraformResourceType() string { + return "azurerm_vpn_server_configuration" +} + +// GetConnectionDetailsMapping for this VPNServerConfiguration +func (tr *VPNServerConfiguration) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"radius[*].server[*].secret": "spec.forProvider.radius[*].server[*].secretSecretRef"} +} + +// GetObservation of this VPNServerConfiguration +func (tr *VPNServerConfiguration) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPNServerConfiguration +func (tr *VPNServerConfiguration) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPNServerConfiguration +func (tr *VPNServerConfiguration) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPNServerConfiguration +func (tr *VPNServerConfiguration) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPNServerConfiguration +func (tr *VPNServerConfiguration) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPNServerConfiguration +func (tr *VPNServerConfiguration) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPNServerConfiguration +func (tr *VPNServerConfiguration) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPNServerConfiguration using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPNServerConfiguration) LateInitialize(attrs []byte) (bool, error) { + params := &VPNServerConfigurationParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPNServerConfiguration) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_vpnserverconfiguration_types.go b/apis/network/v1beta2/zz_vpnserverconfiguration_types.go new file mode 100755 index 000000000..b0b010f5a --- /dev/null +++ b/apis/network/v1beta2/zz_vpnserverconfiguration_types.go @@ -0,0 +1,516 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AzureActiveDirectoryAuthenticationInitParameters struct { + + // The Audience which should be used for authentication. + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The Issuer which should be used for authentication. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The Tenant which should be used for authentication. + Tenant *string `json:"tenant,omitempty" tf:"tenant,omitempty"` +} + +type AzureActiveDirectoryAuthenticationObservation struct { + + // The Audience which should be used for authentication. + Audience *string `json:"audience,omitempty" tf:"audience,omitempty"` + + // The Issuer which should be used for authentication. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // The Tenant which should be used for authentication. + Tenant *string `json:"tenant,omitempty" tf:"tenant,omitempty"` +} + +type AzureActiveDirectoryAuthenticationParameters struct { + + // The Audience which should be used for authentication. + // +kubebuilder:validation:Optional + Audience *string `json:"audience" tf:"audience,omitempty"` + + // The Issuer which should be used for authentication. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer" tf:"issuer,omitempty"` + + // The Tenant which should be used for authentication. + // +kubebuilder:validation:Optional + Tenant *string `json:"tenant" tf:"tenant,omitempty"` +} + +type ClientRevokedCertificateInitParameters struct { + + // A name used to uniquely identify this certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Thumbprint of the Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type ClientRevokedCertificateObservation struct { + + // A name used to uniquely identify this certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Thumbprint of the Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type ClientRevokedCertificateParameters struct { + + // A name used to uniquely identify this certificate. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + Thumbprint *string `json:"thumbprint" tf:"thumbprint,omitempty"` +} + +type ClientRootCertificateInitParameters struct { + + // A name used to uniquely identify this certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Public Key Data associated with the Certificate. + PublicCertData *string `json:"publicCertData,omitempty" tf:"public_cert_data,omitempty"` +} + +type ClientRootCertificateObservation struct { + + // A name used to uniquely identify this certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Public Key Data associated with the Certificate. + PublicCertData *string `json:"publicCertData,omitempty" tf:"public_cert_data,omitempty"` +} + +type ClientRootCertificateParameters struct { + + // A name used to uniquely identify this certificate. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Public Key Data associated with the Certificate. + // +kubebuilder:validation:Optional + PublicCertData *string `json:"publicCertData" tf:"public_cert_data,omitempty"` +} + +type RadiusClientRootCertificateInitParameters struct { + + // A name used to uniquely identify this certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Thumbprint of the Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type RadiusClientRootCertificateObservation struct { + + // A name used to uniquely identify this certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Thumbprint of the Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type RadiusClientRootCertificateParameters struct { + + // A name used to uniquely identify this certificate. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + Thumbprint *string `json:"thumbprint" tf:"thumbprint,omitempty"` +} + +type RadiusInitParameters struct { + + // One or more client_root_certificate blocks as defined below. + ClientRootCertificate []RadiusClientRootCertificateInitParameters `json:"clientRootCertificate,omitempty" tf:"client_root_certificate,omitempty"` + + // One or more server blocks as defined below. + Server []ServerInitParameters `json:"server,omitempty" tf:"server,omitempty"` + + // One or more server_root_certificate blocks as defined below. + ServerRootCertificate []ServerRootCertificateInitParameters `json:"serverRootCertificate,omitempty" tf:"server_root_certificate,omitempty"` +} + +type RadiusObservation struct { + + // One or more client_root_certificate blocks as defined below. + ClientRootCertificate []RadiusClientRootCertificateObservation `json:"clientRootCertificate,omitempty" tf:"client_root_certificate,omitempty"` + + // One or more server blocks as defined below. + Server []ServerObservation `json:"server,omitempty" tf:"server,omitempty"` + + // One or more server_root_certificate blocks as defined below. + ServerRootCertificate []ServerRootCertificateObservation `json:"serverRootCertificate,omitempty" tf:"server_root_certificate,omitempty"` +} + +type RadiusParameters struct { + + // One or more client_root_certificate blocks as defined below. + // +kubebuilder:validation:Optional + ClientRootCertificate []RadiusClientRootCertificateParameters `json:"clientRootCertificate,omitempty" tf:"client_root_certificate,omitempty"` + + // One or more server blocks as defined below. + // +kubebuilder:validation:Optional + Server []ServerParameters `json:"server,omitempty" tf:"server,omitempty"` + + // One or more server_root_certificate blocks as defined below. + // +kubebuilder:validation:Optional + ServerRootCertificate []ServerRootCertificateParameters `json:"serverRootCertificate,omitempty" tf:"server_root_certificate,omitempty"` +} + +type ServerInitParameters struct { + + // The Address of the Radius Server. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The Score of the Radius Server determines the priority of the server. Ranges from 1 to 30. + Score *float64 `json:"score,omitempty" tf:"score,omitempty"` +} + +type ServerObservation struct { + + // The Address of the Radius Server. + Address *string `json:"address,omitempty" tf:"address,omitempty"` + + // The Score of the Radius Server determines the priority of the server. Ranges from 1 to 30. + Score *float64 `json:"score,omitempty" tf:"score,omitempty"` +} + +type ServerParameters struct { + + // The Address of the Radius Server. + // +kubebuilder:validation:Optional + Address *string `json:"address" tf:"address,omitempty"` + + // The Score of the Radius Server determines the priority of the server. Ranges from 1 to 30. + // +kubebuilder:validation:Optional + Score *float64 `json:"score" tf:"score,omitempty"` + + // The Secret used to communicate with the Radius Server. + // +kubebuilder:validation:Required + SecretSecretRef v1.SecretKeySelector `json:"secretSecretRef" tf:"-"` +} + +type ServerRootCertificateInitParameters struct { + + // A name used to uniquely identify this certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Public Key Data associated with the Certificate. + PublicCertData *string `json:"publicCertData,omitempty" tf:"public_cert_data,omitempty"` +} + +type ServerRootCertificateObservation struct { + + // A name used to uniquely identify this certificate. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Public Key Data associated with the Certificate. + PublicCertData *string `json:"publicCertData,omitempty" tf:"public_cert_data,omitempty"` +} + +type ServerRootCertificateParameters struct { + + // A name used to uniquely identify this certificate. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Public Key Data associated with the Certificate. + // +kubebuilder:validation:Optional + PublicCertData *string `json:"publicCertData" tf:"public_cert_data,omitempty"` +} + +type VPNServerConfigurationInitParameters struct { + + // A azure_active_directory_authentication block as defined below. + AzureActiveDirectoryAuthentication []AzureActiveDirectoryAuthenticationInitParameters `json:"azureActiveDirectoryAuthentication,omitempty" tf:"azure_active_directory_authentication,omitempty"` + + // One or more client_revoked_certificate blocks as defined below. + ClientRevokedCertificate []ClientRevokedCertificateInitParameters `json:"clientRevokedCertificate,omitempty" tf:"client_revoked_certificate,omitempty"` + + // One or more client_root_certificate blocks as defined below. + ClientRootCertificate []ClientRootCertificateInitParameters `json:"clientRootCertificate,omitempty" tf:"client_root_certificate,omitempty"` + + // A ipsec_policy block as defined below. + IpsecPolicy *VPNServerConfigurationIpsecPolicyInitParameters `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // The Azure location where this VPN Server Configuration should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A radius block as defined below. + Radius *RadiusInitParameters `json:"radius,omitempty" tf:"radius,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A list of Authentication Types applicable for this VPN Server Configuration. Possible values are AAD (Azure Active Directory), Certificate and Radius. + VPNAuthenticationTypes []*string `json:"vpnAuthenticationTypes,omitempty" tf:"vpn_authentication_types,omitempty"` + + // A list of VPN Protocols to use for this Server Configuration. Possible values are IkeV2 and OpenVPN. + // +listType=set + VPNProtocols []*string `json:"vpnProtocols,omitempty" tf:"vpn_protocols,omitempty"` +} + +type VPNServerConfigurationIpsecPolicyInitParameters struct { + + // The DH Group, used in IKE Phase 1. Possible values include DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384 and None. + DhGroup *string `json:"dhGroup,omitempty" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm, used for IKE Phase 2. Possible values include AES128, AES192, AES256, DES, DES3, GCMAES128 and GCMAES256. + IkeEncryption *string `json:"ikeEncryption,omitempty" tf:"ike_encryption,omitempty"` + + // The IKE encryption integrity algorithm, used for IKE Phase 2. Possible values include GCMAES128, GCMAES256, MD5, SHA1, SHA256 and SHA384. + IkeIntegrity *string `json:"ikeIntegrity,omitempty" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm, used for IKE phase 1. Possible values include AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + IpsecEncryption *string `json:"ipsecEncryption,omitempty" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm, used for IKE phase 1. Possible values include GCMAES128, GCMAES192, GCMAES256, MD5, SHA1 and SHA256. + IpsecIntegrity *string `json:"ipsecIntegrity,omitempty" tf:"ipsec_integrity,omitempty"` + + // The Pfs Group, used in IKE Phase 2. Possible values include ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM and None. + PfsGroup *string `json:"pfsGroup,omitempty" tf:"pfs_group,omitempty"` + + // The IPSec Security Association payload size in KB for a Site-to-Site VPN tunnel. + SaDataSizeKilobytes *float64 `json:"saDataSizeKilobytes,omitempty" tf:"sa_data_size_kilobytes,omitempty"` + + // The IPSec Security Association lifetime in seconds for a Site-to-Site VPN tunnel. + SaLifetimeSeconds *float64 `json:"saLifetimeSeconds,omitempty" tf:"sa_lifetime_seconds,omitempty"` +} + +type VPNServerConfigurationIpsecPolicyObservation struct { + + // The DH Group, used in IKE Phase 1. Possible values include DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384 and None. + DhGroup *string `json:"dhGroup,omitempty" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm, used for IKE Phase 2. Possible values include AES128, AES192, AES256, DES, DES3, GCMAES128 and GCMAES256. + IkeEncryption *string `json:"ikeEncryption,omitempty" tf:"ike_encryption,omitempty"` + + // The IKE encryption integrity algorithm, used for IKE Phase 2. Possible values include GCMAES128, GCMAES256, MD5, SHA1, SHA256 and SHA384. + IkeIntegrity *string `json:"ikeIntegrity,omitempty" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm, used for IKE phase 1. Possible values include AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + IpsecEncryption *string `json:"ipsecEncryption,omitempty" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm, used for IKE phase 1. Possible values include GCMAES128, GCMAES192, GCMAES256, MD5, SHA1 and SHA256. + IpsecIntegrity *string `json:"ipsecIntegrity,omitempty" tf:"ipsec_integrity,omitempty"` + + // The Pfs Group, used in IKE Phase 2. Possible values include ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM and None. + PfsGroup *string `json:"pfsGroup,omitempty" tf:"pfs_group,omitempty"` + + // The IPSec Security Association payload size in KB for a Site-to-Site VPN tunnel. + SaDataSizeKilobytes *float64 `json:"saDataSizeKilobytes,omitempty" tf:"sa_data_size_kilobytes,omitempty"` + + // The IPSec Security Association lifetime in seconds for a Site-to-Site VPN tunnel. + SaLifetimeSeconds *float64 `json:"saLifetimeSeconds,omitempty" tf:"sa_lifetime_seconds,omitempty"` +} + +type VPNServerConfigurationIpsecPolicyParameters struct { + + // The DH Group, used in IKE Phase 1. Possible values include DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, ECP384 and None. + // +kubebuilder:validation:Optional + DhGroup *string `json:"dhGroup" tf:"dh_group,omitempty"` + + // The IKE encryption algorithm, used for IKE Phase 2. Possible values include AES128, AES192, AES256, DES, DES3, GCMAES128 and GCMAES256. + // +kubebuilder:validation:Optional + IkeEncryption *string `json:"ikeEncryption" tf:"ike_encryption,omitempty"` + + // The IKE encryption integrity algorithm, used for IKE Phase 2. Possible values include GCMAES128, GCMAES256, MD5, SHA1, SHA256 and SHA384. + // +kubebuilder:validation:Optional + IkeIntegrity *string `json:"ikeIntegrity" tf:"ike_integrity,omitempty"` + + // The IPSec encryption algorithm, used for IKE phase 1. Possible values include AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + // +kubebuilder:validation:Optional + IpsecEncryption *string `json:"ipsecEncryption" tf:"ipsec_encryption,omitempty"` + + // The IPSec integrity algorithm, used for IKE phase 1. Possible values include GCMAES128, GCMAES192, GCMAES256, MD5, SHA1 and SHA256. + // +kubebuilder:validation:Optional + IpsecIntegrity *string `json:"ipsecIntegrity" tf:"ipsec_integrity,omitempty"` + + // The Pfs Group, used in IKE Phase 2. Possible values include ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, PFS2048, PFSMM and None. + // +kubebuilder:validation:Optional + PfsGroup *string `json:"pfsGroup" tf:"pfs_group,omitempty"` + + // The IPSec Security Association payload size in KB for a Site-to-Site VPN tunnel. + // +kubebuilder:validation:Optional + SaDataSizeKilobytes *float64 `json:"saDataSizeKilobytes" tf:"sa_data_size_kilobytes,omitempty"` + + // The IPSec Security Association lifetime in seconds for a Site-to-Site VPN tunnel. + // +kubebuilder:validation:Optional + SaLifetimeSeconds *float64 `json:"saLifetimeSeconds" tf:"sa_lifetime_seconds,omitempty"` +} + +type VPNServerConfigurationObservation struct { + + // A azure_active_directory_authentication block as defined below. + AzureActiveDirectoryAuthentication []AzureActiveDirectoryAuthenticationObservation `json:"azureActiveDirectoryAuthentication,omitempty" tf:"azure_active_directory_authentication,omitempty"` + + // One or more client_revoked_certificate blocks as defined below. + ClientRevokedCertificate []ClientRevokedCertificateObservation `json:"clientRevokedCertificate,omitempty" tf:"client_revoked_certificate,omitempty"` + + // One or more client_root_certificate blocks as defined below. + ClientRootCertificate []ClientRootCertificateObservation `json:"clientRootCertificate,omitempty" tf:"client_root_certificate,omitempty"` + + // The ID of the VPN Server Configuration. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A ipsec_policy block as defined below. + IpsecPolicy *VPNServerConfigurationIpsecPolicyObservation `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // The Azure location where this VPN Server Configuration should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A radius block as defined below. + Radius *RadiusObservation `json:"radius,omitempty" tf:"radius,omitempty"` + + // The Name of the Resource Group in which this VPN Server Configuration should be created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A list of Authentication Types applicable for this VPN Server Configuration. Possible values are AAD (Azure Active Directory), Certificate and Radius. + VPNAuthenticationTypes []*string `json:"vpnAuthenticationTypes,omitempty" tf:"vpn_authentication_types,omitempty"` + + // A list of VPN Protocols to use for this Server Configuration. Possible values are IkeV2 and OpenVPN. + // +listType=set + VPNProtocols []*string `json:"vpnProtocols,omitempty" tf:"vpn_protocols,omitempty"` +} + +type VPNServerConfigurationParameters struct { + + // A azure_active_directory_authentication block as defined below. + // +kubebuilder:validation:Optional + AzureActiveDirectoryAuthentication []AzureActiveDirectoryAuthenticationParameters `json:"azureActiveDirectoryAuthentication,omitempty" tf:"azure_active_directory_authentication,omitempty"` + + // One or more client_revoked_certificate blocks as defined below. + // +kubebuilder:validation:Optional + ClientRevokedCertificate []ClientRevokedCertificateParameters `json:"clientRevokedCertificate,omitempty" tf:"client_revoked_certificate,omitempty"` + + // One or more client_root_certificate blocks as defined below. + // +kubebuilder:validation:Optional + ClientRootCertificate []ClientRootCertificateParameters `json:"clientRootCertificate,omitempty" tf:"client_root_certificate,omitempty"` + + // A ipsec_policy block as defined below. + // +kubebuilder:validation:Optional + IpsecPolicy *VPNServerConfigurationIpsecPolicyParameters `json:"ipsecPolicy,omitempty" tf:"ipsec_policy,omitempty"` + + // The Azure location where this VPN Server Configuration should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A radius block as defined below. + // +kubebuilder:validation:Optional + Radius *RadiusParameters `json:"radius,omitempty" tf:"radius,omitempty"` + + // The Name of the Resource Group in which this VPN Server Configuration should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A list of Authentication Types applicable for this VPN Server Configuration. Possible values are AAD (Azure Active Directory), Certificate and Radius. + // +kubebuilder:validation:Optional + VPNAuthenticationTypes []*string `json:"vpnAuthenticationTypes,omitempty" tf:"vpn_authentication_types,omitempty"` + + // A list of VPN Protocols to use for this Server Configuration. Possible values are IkeV2 and OpenVPN. + // +kubebuilder:validation:Optional + // +listType=set + VPNProtocols []*string `json:"vpnProtocols,omitempty" tf:"vpn_protocols,omitempty"` +} + +// VPNServerConfigurationSpec defines the desired state of VPNServerConfiguration +type VPNServerConfigurationSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPNServerConfigurationParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPNServerConfigurationInitParameters `json:"initProvider,omitempty"` +} + +// VPNServerConfigurationStatus defines the observed state of VPNServerConfiguration. +type VPNServerConfigurationStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPNServerConfigurationObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPNServerConfiguration is the Schema for the VPNServerConfigurations API. Manages a VPN Server Configuration. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VPNServerConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vpnAuthenticationTypes) || (has(self.initProvider) && has(self.initProvider.vpnAuthenticationTypes))",message="spec.forProvider.vpnAuthenticationTypes is a required parameter" + Spec VPNServerConfigurationSpec `json:"spec"` + Status VPNServerConfigurationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPNServerConfigurationList contains a list of VPNServerConfigurations +type VPNServerConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPNServerConfiguration `json:"items"` +} + +// Repository type metadata. +var ( + VPNServerConfiguration_Kind = "VPNServerConfiguration" + VPNServerConfiguration_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPNServerConfiguration_Kind}.String() + VPNServerConfiguration_KindAPIVersion = VPNServerConfiguration_Kind + "." + CRDGroupVersion.String() + VPNServerConfiguration_GroupVersionKind = CRDGroupVersion.WithKind(VPNServerConfiguration_Kind) +) + +func init() { + SchemeBuilder.Register(&VPNServerConfiguration{}, &VPNServerConfigurationList{}) +} diff --git a/apis/network/v1beta2/zz_vpnsite_terraformed.go b/apis/network/v1beta2/zz_vpnsite_terraformed.go new file mode 100755 index 000000000..b13af0191 --- /dev/null +++ b/apis/network/v1beta2/zz_vpnsite_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this VPNSite +func (mg *VPNSite) GetTerraformResourceType() string { + return "azurerm_vpn_site" +} + +// GetConnectionDetailsMapping for this VPNSite +func (tr *VPNSite) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this VPNSite +func (tr *VPNSite) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this VPNSite +func (tr *VPNSite) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this VPNSite +func (tr *VPNSite) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this VPNSite +func (tr *VPNSite) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this VPNSite +func (tr *VPNSite) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this VPNSite +func (tr *VPNSite) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this VPNSite +func (tr *VPNSite) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this VPNSite using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *VPNSite) LateInitialize(attrs []byte) (bool, error) { + params := &VPNSiteParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *VPNSite) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/network/v1beta2/zz_vpnsite_types.go b/apis/network/v1beta2/zz_vpnsite_types.go new file mode 100755 index 000000000..64cf6a5ae --- /dev/null +++ b/apis/network/v1beta2/zz_vpnsite_types.go @@ -0,0 +1,368 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BGPInitParameters struct { + + // The BGP speaker's ASN. + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // The BGP peering IP address. + PeeringAddress *string `json:"peeringAddress,omitempty" tf:"peering_address,omitempty"` +} + +type BGPObservation struct { + + // The BGP speaker's ASN. + Asn *float64 `json:"asn,omitempty" tf:"asn,omitempty"` + + // The BGP peering IP address. + PeeringAddress *string `json:"peeringAddress,omitempty" tf:"peering_address,omitempty"` +} + +type BGPParameters struct { + + // The BGP speaker's ASN. + // +kubebuilder:validation:Optional + Asn *float64 `json:"asn" tf:"asn,omitempty"` + + // The BGP peering IP address. + // +kubebuilder:validation:Optional + PeeringAddress *string `json:"peeringAddress" tf:"peering_address,omitempty"` +} + +type LinkInitParameters struct { + + // A bgp block as defined above. + BGP *BGPInitParameters `json:"bgp,omitempty" tf:"bgp,omitempty"` + + // The FQDN of this VPN Site Link. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The IP address of this VPN Site Link. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this VPN Site Link. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the physical link at the VPN Site. Example: ATT, Verizon. + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // The speed of the VPN device at the branch location in unit of mbps. Defaults to 0. + SpeedInMbps *float64 `json:"speedInMbps,omitempty" tf:"speed_in_mbps,omitempty"` +} + +type LinkObservation struct { + + // A bgp block as defined above. + BGP *BGPObservation `json:"bgp,omitempty" tf:"bgp,omitempty"` + + // The FQDN of this VPN Site Link. + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The ID of the VPN Site Link. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The IP address of this VPN Site Link. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this VPN Site Link. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the physical link at the VPN Site. Example: ATT, Verizon. + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // The speed of the VPN device at the branch location in unit of mbps. Defaults to 0. + SpeedInMbps *float64 `json:"speedInMbps,omitempty" tf:"speed_in_mbps,omitempty"` +} + +type LinkParameters struct { + + // A bgp block as defined above. + // +kubebuilder:validation:Optional + BGP *BGPParameters `json:"bgp,omitempty" tf:"bgp,omitempty"` + + // The FQDN of this VPN Site Link. + // +kubebuilder:validation:Optional + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The IP address of this VPN Site Link. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this VPN Site Link. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the physical link at the VPN Site. Example: ATT, Verizon. + // +kubebuilder:validation:Optional + ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` + + // The speed of the VPN device at the branch location in unit of mbps. Defaults to 0. + // +kubebuilder:validation:Optional + SpeedInMbps *float64 `json:"speedInMbps,omitempty" tf:"speed_in_mbps,omitempty"` +} + +type O365PolicyInitParameters struct { + + // A traffic_category block as defined above. + TrafficCategory *TrafficCategoryInitParameters `json:"trafficCategory,omitempty" tf:"traffic_category,omitempty"` +} + +type O365PolicyObservation struct { + + // A traffic_category block as defined above. + TrafficCategory *TrafficCategoryObservation `json:"trafficCategory,omitempty" tf:"traffic_category,omitempty"` +} + +type O365PolicyParameters struct { + + // A traffic_category block as defined above. + // +kubebuilder:validation:Optional + TrafficCategory *TrafficCategoryParameters `json:"trafficCategory,omitempty" tf:"traffic_category,omitempty"` +} + +type TrafficCategoryInitParameters struct { + + // Is allow endpoint enabled? The Allow endpoint is required for connectivity to specific O365 services and features, but are not as sensitive to network performance and latency as other endpoint types. Defaults to false. + AllowEndpointEnabled *bool `json:"allowEndpointEnabled,omitempty" tf:"allow_endpoint_enabled,omitempty"` + + // Is default endpoint enabled? The Default endpoint represents O365 services and dependencies that do not require any optimization, and can be treated by customer networks as normal Internet bound traffic. Defaults to false. + DefaultEndpointEnabled *bool `json:"defaultEndpointEnabled,omitempty" tf:"default_endpoint_enabled,omitempty"` + + // Is optimize endpoint enabled? The Optimize endpoint is required for connectivity to every O365 service and represents the O365 scenario that is the most sensitive to network performance, latency, and availability. Defaults to false. + OptimizeEndpointEnabled *bool `json:"optimizeEndpointEnabled,omitempty" tf:"optimize_endpoint_enabled,omitempty"` +} + +type TrafficCategoryObservation struct { + + // Is allow endpoint enabled? The Allow endpoint is required for connectivity to specific O365 services and features, but are not as sensitive to network performance and latency as other endpoint types. Defaults to false. + AllowEndpointEnabled *bool `json:"allowEndpointEnabled,omitempty" tf:"allow_endpoint_enabled,omitempty"` + + // Is default endpoint enabled? The Default endpoint represents O365 services and dependencies that do not require any optimization, and can be treated by customer networks as normal Internet bound traffic. Defaults to false. + DefaultEndpointEnabled *bool `json:"defaultEndpointEnabled,omitempty" tf:"default_endpoint_enabled,omitempty"` + + // Is optimize endpoint enabled? The Optimize endpoint is required for connectivity to every O365 service and represents the O365 scenario that is the most sensitive to network performance, latency, and availability. Defaults to false. + OptimizeEndpointEnabled *bool `json:"optimizeEndpointEnabled,omitempty" tf:"optimize_endpoint_enabled,omitempty"` +} + +type TrafficCategoryParameters struct { + + // Is allow endpoint enabled? The Allow endpoint is required for connectivity to specific O365 services and features, but are not as sensitive to network performance and latency as other endpoint types. Defaults to false. + // +kubebuilder:validation:Optional + AllowEndpointEnabled *bool `json:"allowEndpointEnabled,omitempty" tf:"allow_endpoint_enabled,omitempty"` + + // Is default endpoint enabled? The Default endpoint represents O365 services and dependencies that do not require any optimization, and can be treated by customer networks as normal Internet bound traffic. Defaults to false. + // +kubebuilder:validation:Optional + DefaultEndpointEnabled *bool `json:"defaultEndpointEnabled,omitempty" tf:"default_endpoint_enabled,omitempty"` + + // Is optimize endpoint enabled? The Optimize endpoint is required for connectivity to every O365 service and represents the O365 scenario that is the most sensitive to network performance, latency, and availability. Defaults to false. + // +kubebuilder:validation:Optional + OptimizeEndpointEnabled *bool `json:"optimizeEndpointEnabled,omitempty" tf:"optimize_endpoint_enabled,omitempty"` +} + +type VPNSiteInitParameters struct { + + // Specifies a list of IP address CIDRs that are located on your on-premises site. Traffic destined for these address spaces is routed to your local site. + // +listType=set + AddressCidrs []*string `json:"addressCidrs,omitempty" tf:"address_cidrs,omitempty"` + + // The model of the VPN device. + DeviceModel *string `json:"deviceModel,omitempty" tf:"device_model,omitempty"` + + // The name of the VPN device vendor. + DeviceVendor *string `json:"deviceVendor,omitempty" tf:"device_vendor,omitempty"` + + // One or more link blocks as defined below. + Link []LinkInitParameters `json:"link,omitempty" tf:"link,omitempty"` + + // The Azure Region where the VPN Site should exist. Changing this forces a new VPN Site to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An o365_policy block as defined below. + O365Policy *O365PolicyInitParameters `json:"o365Policy,omitempty" tf:"o365_policy,omitempty"` + + // A mapping of tags which should be assigned to the VPN Site. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the Virtual Wan where this VPN site resides in. Changing this forces a new VPN Site to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualWAN + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + VirtualWanID *string `json:"virtualWanId,omitempty" tf:"virtual_wan_id,omitempty"` + + // Reference to a VirtualWAN in network to populate virtualWanId. + // +kubebuilder:validation:Optional + VirtualWanIDRef *v1.Reference `json:"virtualWanIdRef,omitempty" tf:"-"` + + // Selector for a VirtualWAN in network to populate virtualWanId. + // +kubebuilder:validation:Optional + VirtualWanIDSelector *v1.Selector `json:"virtualWanIdSelector,omitempty" tf:"-"` +} + +type VPNSiteObservation struct { + + // Specifies a list of IP address CIDRs that are located on your on-premises site. Traffic destined for these address spaces is routed to your local site. + // +listType=set + AddressCidrs []*string `json:"addressCidrs,omitempty" tf:"address_cidrs,omitempty"` + + // The model of the VPN device. + DeviceModel *string `json:"deviceModel,omitempty" tf:"device_model,omitempty"` + + // The name of the VPN device vendor. + DeviceVendor *string `json:"deviceVendor,omitempty" tf:"device_vendor,omitempty"` + + // The ID of the VPN Site. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more link blocks as defined below. + Link []LinkObservation `json:"link,omitempty" tf:"link,omitempty"` + + // The Azure Region where the VPN Site should exist. Changing this forces a new VPN Site to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An o365_policy block as defined below. + O365Policy *O365PolicyObservation `json:"o365Policy,omitempty" tf:"o365_policy,omitempty"` + + // The name of the Resource Group where the VPN Site should exist. Changing this forces a new VPN Site to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags which should be assigned to the VPN Site. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the Virtual Wan where this VPN site resides in. Changing this forces a new VPN Site to be created. + VirtualWanID *string `json:"virtualWanId,omitempty" tf:"virtual_wan_id,omitempty"` +} + +type VPNSiteParameters struct { + + // Specifies a list of IP address CIDRs that are located on your on-premises site. Traffic destined for these address spaces is routed to your local site. + // +kubebuilder:validation:Optional + // +listType=set + AddressCidrs []*string `json:"addressCidrs,omitempty" tf:"address_cidrs,omitempty"` + + // The model of the VPN device. + // +kubebuilder:validation:Optional + DeviceModel *string `json:"deviceModel,omitempty" tf:"device_model,omitempty"` + + // The name of the VPN device vendor. + // +kubebuilder:validation:Optional + DeviceVendor *string `json:"deviceVendor,omitempty" tf:"device_vendor,omitempty"` + + // One or more link blocks as defined below. + // +kubebuilder:validation:Optional + Link []LinkParameters `json:"link,omitempty" tf:"link,omitempty"` + + // The Azure Region where the VPN Site should exist. Changing this forces a new VPN Site to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // An o365_policy block as defined below. + // +kubebuilder:validation:Optional + O365Policy *O365PolicyParameters `json:"o365Policy,omitempty" tf:"o365_policy,omitempty"` + + // The name of the Resource Group where the VPN Site should exist. Changing this forces a new VPN Site to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the VPN Site. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The ID of the Virtual Wan where this VPN site resides in. Changing this forces a new VPN Site to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualWAN + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualWanID *string `json:"virtualWanId,omitempty" tf:"virtual_wan_id,omitempty"` + + // Reference to a VirtualWAN in network to populate virtualWanId. + // +kubebuilder:validation:Optional + VirtualWanIDRef *v1.Reference `json:"virtualWanIdRef,omitempty" tf:"-"` + + // Selector for a VirtualWAN in network to populate virtualWanId. + // +kubebuilder:validation:Optional + VirtualWanIDSelector *v1.Selector `json:"virtualWanIdSelector,omitempty" tf:"-"` +} + +// VPNSiteSpec defines the desired state of VPNSite +type VPNSiteSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VPNSiteParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VPNSiteInitParameters `json:"initProvider,omitempty"` +} + +// VPNSiteStatus defines the observed state of VPNSite. +type VPNSiteStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VPNSiteObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// VPNSite is the Schema for the VPNSites API. Manages a VPN Site. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type VPNSite struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec VPNSiteSpec `json:"spec"` + Status VPNSiteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VPNSiteList contains a list of VPNSites +type VPNSiteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VPNSite `json:"items"` +} + +// Repository type metadata. +var ( + VPNSite_Kind = "VPNSite" + VPNSite_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: VPNSite_Kind}.String() + VPNSite_KindAPIVersion = VPNSite_Kind + "." + CRDGroupVersion.String() + VPNSite_GroupVersionKind = CRDGroupVersion.WithKind(VPNSite_Kind) +) + +func init() { + SchemeBuilder.Register(&VPNSite{}, &VPNSiteList{}) +} diff --git a/apis/network/v1beta2/zz_watcherflowlog_terraformed.go b/apis/network/v1beta2/zz_watcherflowlog_terraformed.go new file mode 100755 index 000000000..3f5cfd85f --- /dev/null +++ b/apis/network/v1beta2/zz_watcherflowlog_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WatcherFlowLog +func (mg *WatcherFlowLog) GetTerraformResourceType() string { + return "azurerm_network_watcher_flow_log" +} + +// GetConnectionDetailsMapping for this WatcherFlowLog +func (tr *WatcherFlowLog) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this WatcherFlowLog +func (tr *WatcherFlowLog) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WatcherFlowLog +func (tr *WatcherFlowLog) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WatcherFlowLog +func (tr *WatcherFlowLog) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WatcherFlowLog +func (tr *WatcherFlowLog) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WatcherFlowLog +func (tr *WatcherFlowLog) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WatcherFlowLog +func (tr *WatcherFlowLog) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WatcherFlowLog +func (tr *WatcherFlowLog) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WatcherFlowLog using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WatcherFlowLog) LateInitialize(attrs []byte) (bool, error) { + params := &WatcherFlowLogParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WatcherFlowLog) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/network/v1beta2/zz_watcherflowlog_types.go b/apis/network/v1beta2/zz_watcherflowlog_types.go new file mode 100755 index 000000000..dc2b23f2e --- /dev/null +++ b/apis/network/v1beta2/zz_watcherflowlog_types.go @@ -0,0 +1,371 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RetentionPolicyInitParameters struct { + + // The number of days to retain flow log records. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Boolean flag to enable/disable retention. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RetentionPolicyObservation struct { + + // The number of days to retain flow log records. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Boolean flag to enable/disable retention. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RetentionPolicyParameters struct { + + // The number of days to retain flow log records. + // +kubebuilder:validation:Optional + Days *float64 `json:"days" tf:"days,omitempty"` + + // Boolean flag to enable/disable retention. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` +} + +type TrafficAnalyticsInitParameters struct { + + // Boolean flag to enable/disable traffic analytics. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // How frequently service should do flow analytics in minutes. Defaults to 60. + IntervalInMinutes *float64 `json:"intervalInMinutes,omitempty" tf:"interval_in_minutes,omitempty"` + + // The resource GUID of the attached workspace. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("workspace_id",true) + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDRef *v1.Reference `json:"workspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDSelector *v1.Selector `json:"workspaceIdSelector,omitempty" tf:"-"` + + // The location of the attached workspace. + WorkspaceRegion *string `json:"workspaceRegion,omitempty" tf:"workspace_region,omitempty"` + + // The resource ID of the attached workspace. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDRef *v1.Reference `json:"workspaceResourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDSelector *v1.Selector `json:"workspaceResourceIdSelector,omitempty" tf:"-"` +} + +type TrafficAnalyticsObservation struct { + + // Boolean flag to enable/disable traffic analytics. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // How frequently service should do flow analytics in minutes. Defaults to 60. + IntervalInMinutes *float64 `json:"intervalInMinutes,omitempty" tf:"interval_in_minutes,omitempty"` + + // The resource GUID of the attached workspace. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` + + // The location of the attached workspace. + WorkspaceRegion *string `json:"workspaceRegion,omitempty" tf:"workspace_region,omitempty"` + + // The resource ID of the attached workspace. + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` +} + +type TrafficAnalyticsParameters struct { + + // Boolean flag to enable/disable traffic analytics. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // How frequently service should do flow analytics in minutes. Defaults to 60. + // +kubebuilder:validation:Optional + IntervalInMinutes *float64 `json:"intervalInMinutes,omitempty" tf:"interval_in_minutes,omitempty"` + + // The resource GUID of the attached workspace. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("workspace_id",true) + // +kubebuilder:validation:Optional + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDRef *v1.Reference `json:"workspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceId. + // +kubebuilder:validation:Optional + WorkspaceIDSelector *v1.Selector `json:"workspaceIdSelector,omitempty" tf:"-"` + + // The location of the attached workspace. + // +kubebuilder:validation:Optional + WorkspaceRegion *string `json:"workspaceRegion" tf:"workspace_region,omitempty"` + + // The resource ID of the attached workspace. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDRef *v1.Reference `json:"workspaceResourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDSelector *v1.Selector `json:"workspaceResourceIdSelector,omitempty" tf:"-"` +} + +type WatcherFlowLogInitParameters struct { + + // Should Network Flow Logging be Enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The location where the Network Watcher Flow Log resides. Changing this forces a new resource to be created. Defaults to the location of the Network Watcher. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Network Security Group for which to enable flow logs for. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.SecurityGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Reference to a SecurityGroup in network to populate networkSecurityGroupId. + // +kubebuilder:validation:Optional + NetworkSecurityGroupIDRef *v1.Reference `json:"networkSecurityGroupIdRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup in network to populate networkSecurityGroupId. + // +kubebuilder:validation:Optional + NetworkSecurityGroupIDSelector *v1.Selector `json:"networkSecurityGroupIdSelector,omitempty" tf:"-"` + + // A retention_policy block as documented below. + RetentionPolicy *RetentionPolicyInitParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // The ID of the Storage Account where flow logs are stored. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Network Watcher Flow Log. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A traffic_analytics block as documented below. + TrafficAnalytics *TrafficAnalyticsInitParameters `json:"trafficAnalytics,omitempty" tf:"traffic_analytics,omitempty"` + + // The version (revision) of the flow log. Possible values are 1 and 2. + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +type WatcherFlowLogObservation struct { + + // Should Network Flow Logging be Enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The ID of the Network Watcher. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The location where the Network Watcher Flow Log resides. Changing this forces a new resource to be created. Defaults to the location of the Network Watcher. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Network Security Group for which to enable flow logs for. Changing this forces a new resource to be created. + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // The name of the Network Watcher. Changing this forces a new resource to be created. + NetworkWatcherName *string `json:"networkWatcherName,omitempty" tf:"network_watcher_name,omitempty"` + + // The name of the resource group in which the Network Watcher was deployed. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A retention_policy block as documented below. + RetentionPolicy *RetentionPolicyObservation `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // The ID of the Storage Account where flow logs are stored. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // A mapping of tags which should be assigned to the Network Watcher Flow Log. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A traffic_analytics block as documented below. + TrafficAnalytics *TrafficAnalyticsObservation `json:"trafficAnalytics,omitempty" tf:"traffic_analytics,omitempty"` + + // The version (revision) of the flow log. Possible values are 1 and 2. + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +type WatcherFlowLogParameters struct { + + // Should Network Flow Logging be Enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The location where the Network Watcher Flow Log resides. Changing this forces a new resource to be created. Defaults to the location of the Network Watcher. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Network Security Group for which to enable flow logs for. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.SecurityGroup + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + NetworkSecurityGroupID *string `json:"networkSecurityGroupId,omitempty" tf:"network_security_group_id,omitempty"` + + // Reference to a SecurityGroup in network to populate networkSecurityGroupId. + // +kubebuilder:validation:Optional + NetworkSecurityGroupIDRef *v1.Reference `json:"networkSecurityGroupIdRef,omitempty" tf:"-"` + + // Selector for a SecurityGroup in network to populate networkSecurityGroupId. + // +kubebuilder:validation:Optional + NetworkSecurityGroupIDSelector *v1.Selector `json:"networkSecurityGroupIdSelector,omitempty" tf:"-"` + + // The name of the Network Watcher. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Watcher + // +kubebuilder:validation:Optional + NetworkWatcherName *string `json:"networkWatcherName,omitempty" tf:"network_watcher_name,omitempty"` + + // Reference to a Watcher in network to populate networkWatcherName. + // +kubebuilder:validation:Optional + NetworkWatcherNameRef *v1.Reference `json:"networkWatcherNameRef,omitempty" tf:"-"` + + // Selector for a Watcher in network to populate networkWatcherName. + // +kubebuilder:validation:Optional + NetworkWatcherNameSelector *v1.Selector `json:"networkWatcherNameSelector,omitempty" tf:"-"` + + // The name of the resource group in which the Network Watcher was deployed. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A retention_policy block as documented below. + // +kubebuilder:validation:Optional + RetentionPolicy *RetentionPolicyParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // The ID of the Storage Account where flow logs are stored. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Network Watcher Flow Log. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // A traffic_analytics block as documented below. + // +kubebuilder:validation:Optional + TrafficAnalytics *TrafficAnalyticsParameters `json:"trafficAnalytics,omitempty" tf:"traffic_analytics,omitempty"` + + // The version (revision) of the flow log. Possible values are 1 and 2. + // +kubebuilder:validation:Optional + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +// WatcherFlowLogSpec defines the desired state of WatcherFlowLog +type WatcherFlowLogSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WatcherFlowLogParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WatcherFlowLogInitParameters `json:"initProvider,omitempty"` +} + +// WatcherFlowLogStatus defines the observed state of WatcherFlowLog. +type WatcherFlowLogStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WatcherFlowLogObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WatcherFlowLog is the Schema for the WatcherFlowLogs API. Manages a Network Watcher Flow Log. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WatcherFlowLog struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.enabled) || (has(self.initProvider) && has(self.initProvider.enabled))",message="spec.forProvider.enabled is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.retentionPolicy) || (has(self.initProvider) && has(self.initProvider.retentionPolicy))",message="spec.forProvider.retentionPolicy is a required parameter" + Spec WatcherFlowLogSpec `json:"spec"` + Status WatcherFlowLogStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WatcherFlowLogList contains a list of WatcherFlowLogs +type WatcherFlowLogList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WatcherFlowLog `json:"items"` +} + +// Repository type metadata. +var ( + WatcherFlowLog_Kind = "WatcherFlowLog" + WatcherFlowLog_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WatcherFlowLog_Kind}.String() + WatcherFlowLog_KindAPIVersion = WatcherFlowLog_Kind + "." + CRDGroupVersion.String() + WatcherFlowLog_GroupVersionKind = CRDGroupVersion.WithKind(WatcherFlowLog_Kind) +) + +func init() { + SchemeBuilder.Register(&WatcherFlowLog{}, &WatcherFlowLogList{}) +} diff --git a/apis/network/v1beta2/zz_webapplicationfirewallpolicy_terraformed.go b/apis/network/v1beta2/zz_webapplicationfirewallpolicy_terraformed.go new file mode 100755 index 000000000..ed0dd85a5 --- /dev/null +++ b/apis/network/v1beta2/zz_webapplicationfirewallpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WebApplicationFirewallPolicy +func (mg *WebApplicationFirewallPolicy) GetTerraformResourceType() string { + return "azurerm_web_application_firewall_policy" +} + +// GetConnectionDetailsMapping for this WebApplicationFirewallPolicy +func (tr *WebApplicationFirewallPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this WebApplicationFirewallPolicy +func (tr *WebApplicationFirewallPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WebApplicationFirewallPolicy +func (tr *WebApplicationFirewallPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WebApplicationFirewallPolicy +func (tr *WebApplicationFirewallPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WebApplicationFirewallPolicy +func (tr *WebApplicationFirewallPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WebApplicationFirewallPolicy +func (tr *WebApplicationFirewallPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WebApplicationFirewallPolicy +func (tr *WebApplicationFirewallPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WebApplicationFirewallPolicy +func (tr *WebApplicationFirewallPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WebApplicationFirewallPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WebApplicationFirewallPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &WebApplicationFirewallPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WebApplicationFirewallPolicy) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/network/v1beta2/zz_webapplicationfirewallpolicy_types.go b/apis/network/v1beta2/zz_webapplicationfirewallpolicy_types.go new file mode 100755 index 000000000..625673fa0 --- /dev/null +++ b/apis/network/v1beta2/zz_webapplicationfirewallpolicy_types.go @@ -0,0 +1,771 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomRulesInitParameters struct { + + // Type of action. Possible values are Allow, Block and Log. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Describes if the policy is in enabled state or disabled state. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies what grouping the rate limit will count requests by. Possible values are GeoLocation, ClientAddr and None. + GroupRateLimitBy *string `json:"groupRateLimitBy,omitempty" tf:"group_rate_limit_by,omitempty"` + + // One or more match_conditions blocks as defined below. + MatchConditions []MatchConditionsInitParameters `json:"matchConditions,omitempty" tf:"match_conditions,omitempty"` + + // Gets name of the resource that is unique within a policy. This name can be used to access the resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Specifies the duration at which the rate limit policy will be applied. Should be used with RateLimitRule rule type. Possible values are FiveMins and OneMin. + RateLimitDuration *string `json:"rateLimitDuration,omitempty" tf:"rate_limit_duration,omitempty"` + + // Specifies the threshold value for the rate limit policy. Must be greater than or equal to 1 if provided. + RateLimitThreshold *float64 `json:"rateLimitThreshold,omitempty" tf:"rate_limit_threshold,omitempty"` + + // Describes the type of rule. Possible values are MatchRule, RateLimitRule and Invalid. + RuleType *string `json:"ruleType,omitempty" tf:"rule_type,omitempty"` +} + +type CustomRulesObservation struct { + + // Type of action. Possible values are Allow, Block and Log. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Describes if the policy is in enabled state or disabled state. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies what grouping the rate limit will count requests by. Possible values are GeoLocation, ClientAddr and None. + GroupRateLimitBy *string `json:"groupRateLimitBy,omitempty" tf:"group_rate_limit_by,omitempty"` + + // One or more match_conditions blocks as defined below. + MatchConditions []MatchConditionsObservation `json:"matchConditions,omitempty" tf:"match_conditions,omitempty"` + + // Gets name of the resource that is unique within a policy. This name can be used to access the resource. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // Specifies the duration at which the rate limit policy will be applied. Should be used with RateLimitRule rule type. Possible values are FiveMins and OneMin. + RateLimitDuration *string `json:"rateLimitDuration,omitempty" tf:"rate_limit_duration,omitempty"` + + // Specifies the threshold value for the rate limit policy. Must be greater than or equal to 1 if provided. + RateLimitThreshold *float64 `json:"rateLimitThreshold,omitempty" tf:"rate_limit_threshold,omitempty"` + + // Describes the type of rule. Possible values are MatchRule, RateLimitRule and Invalid. + RuleType *string `json:"ruleType,omitempty" tf:"rule_type,omitempty"` +} + +type CustomRulesParameters struct { + + // Type of action. Possible values are Allow, Block and Log. + // +kubebuilder:validation:Optional + Action *string `json:"action" tf:"action,omitempty"` + + // Describes if the policy is in enabled state or disabled state. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies what grouping the rate limit will count requests by. Possible values are GeoLocation, ClientAddr and None. + // +kubebuilder:validation:Optional + GroupRateLimitBy *string `json:"groupRateLimitBy,omitempty" tf:"group_rate_limit_by,omitempty"` + + // One or more match_conditions blocks as defined below. + // +kubebuilder:validation:Optional + MatchConditions []MatchConditionsParameters `json:"matchConditions" tf:"match_conditions,omitempty"` + + // Gets name of the resource that is unique within a policy. This name can be used to access the resource. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority" tf:"priority,omitempty"` + + // Specifies the duration at which the rate limit policy will be applied. Should be used with RateLimitRule rule type. Possible values are FiveMins and OneMin. + // +kubebuilder:validation:Optional + RateLimitDuration *string `json:"rateLimitDuration,omitempty" tf:"rate_limit_duration,omitempty"` + + // Specifies the threshold value for the rate limit policy. Must be greater than or equal to 1 if provided. + // +kubebuilder:validation:Optional + RateLimitThreshold *float64 `json:"rateLimitThreshold,omitempty" tf:"rate_limit_threshold,omitempty"` + + // Describes the type of rule. Possible values are MatchRule, RateLimitRule and Invalid. + // +kubebuilder:validation:Optional + RuleType *string `json:"ruleType" tf:"rule_type,omitempty"` +} + +type ExcludedRuleSetInitParameters struct { + + // One or more rule_group block defined below. + RuleGroup []RuleGroupInitParameters `json:"ruleGroup,omitempty" tf:"rule_group,omitempty"` + + // The rule set type. Possible values: Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The rule set version. Possible values: 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ExcludedRuleSetObservation struct { + + // One or more rule_group block defined below. + RuleGroup []RuleGroupObservation `json:"ruleGroup,omitempty" tf:"rule_group,omitempty"` + + // The rule set type. Possible values: Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The rule set version. Possible values: 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ExcludedRuleSetParameters struct { + + // One or more rule_group block defined below. + // +kubebuilder:validation:Optional + RuleGroup []RuleGroupParameters `json:"ruleGroup,omitempty" tf:"rule_group,omitempty"` + + // The rule set type. Possible values: Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The rule set version. Possible values: 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type LogScrubbingInitParameters struct { + + // Whether this rule is enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // One or more rule block defined below. + Rule []LogScrubbingRuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type LogScrubbingObservation struct { + + // Whether this rule is enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // One or more rule block defined below. + Rule []LogScrubbingRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type LogScrubbingParameters struct { + + // Whether this rule is enabled. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // One or more rule block defined below. + // +kubebuilder:validation:Optional + Rule []LogScrubbingRuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` +} + +type LogScrubbingRuleInitParameters struct { + + // Whether this rule is enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the variable to be scrubbed from the logs. Possible values are RequestHeaderNames, RequestCookieNames, RequestArgNames, RequestPostArgNames, RequestJSONArgNames and RequestIPAddress. + MatchVariable *string `json:"matchVariable,omitempty" tf:"match_variable,omitempty"` + + // Specifies which elements in the collection this rule applies to. + // When matchVariable is a collection, operator used to specify which elements in the collection this rule applies to. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // Specifies the operating on the selector. Possible values are Equals and EqualsAny. Defaults to Equals. + SelectorMatchOperator *string `json:"selectorMatchOperator,omitempty" tf:"selector_match_operator,omitempty"` +} + +type LogScrubbingRuleObservation struct { + + // Whether this rule is enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the variable to be scrubbed from the logs. Possible values are RequestHeaderNames, RequestCookieNames, RequestArgNames, RequestPostArgNames, RequestJSONArgNames and RequestIPAddress. + MatchVariable *string `json:"matchVariable,omitempty" tf:"match_variable,omitempty"` + + // Specifies which elements in the collection this rule applies to. + // When matchVariable is a collection, operator used to specify which elements in the collection this rule applies to. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // Specifies the operating on the selector. Possible values are Equals and EqualsAny. Defaults to Equals. + SelectorMatchOperator *string `json:"selectorMatchOperator,omitempty" tf:"selector_match_operator,omitempty"` +} + +type LogScrubbingRuleParameters struct { + + // Whether this rule is enabled. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Specifies the variable to be scrubbed from the logs. Possible values are RequestHeaderNames, RequestCookieNames, RequestArgNames, RequestPostArgNames, RequestJSONArgNames and RequestIPAddress. + // +kubebuilder:validation:Optional + MatchVariable *string `json:"matchVariable" tf:"match_variable,omitempty"` + + // Specifies which elements in the collection this rule applies to. + // When matchVariable is a collection, operator used to specify which elements in the collection this rule applies to. + // +kubebuilder:validation:Optional + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // Specifies the operating on the selector. Possible values are Equals and EqualsAny. Defaults to Equals. + // +kubebuilder:validation:Optional + SelectorMatchOperator *string `json:"selectorMatchOperator,omitempty" tf:"selector_match_operator,omitempty"` +} + +type ManagedRuleSetInitParameters struct { + + // One or more rule_group_override block defined below. + RuleGroupOverride []RuleGroupOverrideInitParameters `json:"ruleGroupOverride,omitempty" tf:"rule_group_override,omitempty"` + + // The rule set type. Possible values: Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The rule set version. Possible values: 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ManagedRuleSetObservation struct { + + // One or more rule_group_override block defined below. + RuleGroupOverride []RuleGroupOverrideObservation `json:"ruleGroupOverride,omitempty" tf:"rule_group_override,omitempty"` + + // The rule set type. Possible values: Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The rule set version. Possible values: 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ManagedRuleSetParameters struct { + + // One or more rule_group_override block defined below. + // +kubebuilder:validation:Optional + RuleGroupOverride []RuleGroupOverrideParameters `json:"ruleGroupOverride,omitempty" tf:"rule_group_override,omitempty"` + + // The rule set type. Possible values: Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // The rule set version. Possible values: 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type ManagedRulesExclusionInitParameters struct { + + // One or more excluded_rule_set block defined below. + ExcludedRuleSet *ExcludedRuleSetInitParameters `json:"excludedRuleSet,omitempty" tf:"excluded_rule_set,omitempty"` + + // Specifies the variable to be scrubbed from the logs. Possible values are RequestHeaderNames, RequestCookieNames, RequestArgNames, RequestPostArgNames, RequestJSONArgNames and RequestIPAddress. + MatchVariable *string `json:"matchVariable,omitempty" tf:"match_variable,omitempty"` + + // Specifies which elements in the collection this rule applies to. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // Specifies the operating on the selector. Possible values are Equals and EqualsAny. Defaults to Equals. + SelectorMatchOperator *string `json:"selectorMatchOperator,omitempty" tf:"selector_match_operator,omitempty"` +} + +type ManagedRulesExclusionObservation struct { + + // One or more excluded_rule_set block defined below. + ExcludedRuleSet *ExcludedRuleSetObservation `json:"excludedRuleSet,omitempty" tf:"excluded_rule_set,omitempty"` + + // Specifies the variable to be scrubbed from the logs. Possible values are RequestHeaderNames, RequestCookieNames, RequestArgNames, RequestPostArgNames, RequestJSONArgNames and RequestIPAddress. + MatchVariable *string `json:"matchVariable,omitempty" tf:"match_variable,omitempty"` + + // Specifies which elements in the collection this rule applies to. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // Specifies the operating on the selector. Possible values are Equals and EqualsAny. Defaults to Equals. + SelectorMatchOperator *string `json:"selectorMatchOperator,omitempty" tf:"selector_match_operator,omitempty"` +} + +type ManagedRulesExclusionParameters struct { + + // One or more excluded_rule_set block defined below. + // +kubebuilder:validation:Optional + ExcludedRuleSet *ExcludedRuleSetParameters `json:"excludedRuleSet,omitempty" tf:"excluded_rule_set,omitempty"` + + // Specifies the variable to be scrubbed from the logs. Possible values are RequestHeaderNames, RequestCookieNames, RequestArgNames, RequestPostArgNames, RequestJSONArgNames and RequestIPAddress. + // +kubebuilder:validation:Optional + MatchVariable *string `json:"matchVariable" tf:"match_variable,omitempty"` + + // Specifies which elements in the collection this rule applies to. + // +kubebuilder:validation:Optional + Selector *string `json:"selector" tf:"selector,omitempty"` + + // Specifies the operating on the selector. Possible values are Equals and EqualsAny. Defaults to Equals. + // +kubebuilder:validation:Optional + SelectorMatchOperator *string `json:"selectorMatchOperator" tf:"selector_match_operator,omitempty"` +} + +type ManagedRulesInitParameters struct { + + // One or more exclusion block defined below. + Exclusion []ManagedRulesExclusionInitParameters `json:"exclusion,omitempty" tf:"exclusion,omitempty"` + + // One or more managed_rule_set block defined below. + ManagedRuleSet []ManagedRuleSetInitParameters `json:"managedRuleSet,omitempty" tf:"managed_rule_set,omitempty"` +} + +type ManagedRulesObservation struct { + + // One or more exclusion block defined below. + Exclusion []ManagedRulesExclusionObservation `json:"exclusion,omitempty" tf:"exclusion,omitempty"` + + // One or more managed_rule_set block defined below. + ManagedRuleSet []ManagedRuleSetObservation `json:"managedRuleSet,omitempty" tf:"managed_rule_set,omitempty"` +} + +type ManagedRulesParameters struct { + + // One or more exclusion block defined below. + // +kubebuilder:validation:Optional + Exclusion []ManagedRulesExclusionParameters `json:"exclusion,omitempty" tf:"exclusion,omitempty"` + + // One or more managed_rule_set block defined below. + // +kubebuilder:validation:Optional + ManagedRuleSet []ManagedRuleSetParameters `json:"managedRuleSet" tf:"managed_rule_set,omitempty"` +} + +type MatchConditionsInitParameters struct { + + // A list of match values. This is Required when the operator is not Any. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // One or more match_variables blocks as defined below. + MatchVariables []MatchVariablesInitParameters `json:"matchVariables,omitempty" tf:"match_variables,omitempty"` + + // Describes if this is negate condition or not + NegationCondition *bool `json:"negationCondition,omitempty" tf:"negation_condition,omitempty"` + + // Describes operator to be matched. Possible values are Any, IPMatch, GeoMatch, Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, BeginsWith, EndsWith and Regex. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transformations to do before the match is attempted. Possible values are HtmlEntityDecode, Lowercase, RemoveNulls, Trim, UrlDecode and UrlEncode. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type MatchConditionsObservation struct { + + // A list of match values. This is Required when the operator is not Any. + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // One or more match_variables blocks as defined below. + MatchVariables []MatchVariablesObservation `json:"matchVariables,omitempty" tf:"match_variables,omitempty"` + + // Describes if this is negate condition or not + NegationCondition *bool `json:"negationCondition,omitempty" tf:"negation_condition,omitempty"` + + // Describes operator to be matched. Possible values are Any, IPMatch, GeoMatch, Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, BeginsWith, EndsWith and Regex. + Operator *string `json:"operator,omitempty" tf:"operator,omitempty"` + + // A list of transformations to do before the match is attempted. Possible values are HtmlEntityDecode, Lowercase, RemoveNulls, Trim, UrlDecode and UrlEncode. + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type MatchConditionsParameters struct { + + // A list of match values. This is Required when the operator is not Any. + // +kubebuilder:validation:Optional + MatchValues []*string `json:"matchValues,omitempty" tf:"match_values,omitempty"` + + // One or more match_variables blocks as defined below. + // +kubebuilder:validation:Optional + MatchVariables []MatchVariablesParameters `json:"matchVariables" tf:"match_variables,omitempty"` + + // Describes if this is negate condition or not + // +kubebuilder:validation:Optional + NegationCondition *bool `json:"negationCondition,omitempty" tf:"negation_condition,omitempty"` + + // Describes operator to be matched. Possible values are Any, IPMatch, GeoMatch, Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, BeginsWith, EndsWith and Regex. + // +kubebuilder:validation:Optional + Operator *string `json:"operator" tf:"operator,omitempty"` + + // A list of transformations to do before the match is attempted. Possible values are HtmlEntityDecode, Lowercase, RemoveNulls, Trim, UrlDecode and UrlEncode. + // +kubebuilder:validation:Optional + // +listType=set + Transforms []*string `json:"transforms,omitempty" tf:"transforms,omitempty"` +} + +type MatchVariablesInitParameters struct { + + // Specifies which elements in the collection this rule applies to. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // The name of the Match Variable. Possible values are RemoteAddr, RequestMethod, QueryString, PostArgs, RequestUri, RequestHeaders, RequestBody and RequestCookies. + VariableName *string `json:"variableName,omitempty" tf:"variable_name,omitempty"` +} + +type MatchVariablesObservation struct { + + // Specifies which elements in the collection this rule applies to. + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // The name of the Match Variable. Possible values are RemoteAddr, RequestMethod, QueryString, PostArgs, RequestUri, RequestHeaders, RequestBody and RequestCookies. + VariableName *string `json:"variableName,omitempty" tf:"variable_name,omitempty"` +} + +type MatchVariablesParameters struct { + + // Specifies which elements in the collection this rule applies to. + // +kubebuilder:validation:Optional + Selector *string `json:"selector,omitempty" tf:"selector,omitempty"` + + // The name of the Match Variable. Possible values are RemoteAddr, RequestMethod, QueryString, PostArgs, RequestUri, RequestHeaders, RequestBody and RequestCookies. + // +kubebuilder:validation:Optional + VariableName *string `json:"variableName" tf:"variable_name,omitempty"` +} + +type PolicySettingsInitParameters struct { + + // Describes if the policy is in enabled state or disabled state. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The File Upload Limit in MB. Accepted values are in the range 1 to 4000. Defaults to 100. + FileUploadLimitInMb *float64 `json:"fileUploadLimitInMb,omitempty" tf:"file_upload_limit_in_mb,omitempty"` + + // One log_scrubbing block as defined below. + LogScrubbing *LogScrubbingInitParameters `json:"logScrubbing,omitempty" tf:"log_scrubbing,omitempty"` + + // The Maximum Request Body Size in KB. Accepted values are in the range 8 to 2000. Defaults to 128. + MaxRequestBodySizeInKb *float64 `json:"maxRequestBodySizeInKb,omitempty" tf:"max_request_body_size_in_kb,omitempty"` + + // Describes if it is in detection mode or prevention mode at the policy level. Valid values are Detection and Prevention. Defaults to Prevention. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Is Request Body Inspection enabled? Defaults to true. + RequestBodyCheck *bool `json:"requestBodyCheck,omitempty" tf:"request_body_check,omitempty"` + + // Specifies the maximum request body inspection limit in KB for the Web Application Firewall. Defaults to 128. + RequestBodyInspectLimitInKb *float64 `json:"requestBodyInspectLimitInKb,omitempty" tf:"request_body_inspect_limit_in_kb,omitempty"` +} + +type PolicySettingsObservation struct { + + // Describes if the policy is in enabled state or disabled state. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The File Upload Limit in MB. Accepted values are in the range 1 to 4000. Defaults to 100. + FileUploadLimitInMb *float64 `json:"fileUploadLimitInMb,omitempty" tf:"file_upload_limit_in_mb,omitempty"` + + // One log_scrubbing block as defined below. + LogScrubbing *LogScrubbingObservation `json:"logScrubbing,omitempty" tf:"log_scrubbing,omitempty"` + + // The Maximum Request Body Size in KB. Accepted values are in the range 8 to 2000. Defaults to 128. + MaxRequestBodySizeInKb *float64 `json:"maxRequestBodySizeInKb,omitempty" tf:"max_request_body_size_in_kb,omitempty"` + + // Describes if it is in detection mode or prevention mode at the policy level. Valid values are Detection and Prevention. Defaults to Prevention. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Is Request Body Inspection enabled? Defaults to true. + RequestBodyCheck *bool `json:"requestBodyCheck,omitempty" tf:"request_body_check,omitempty"` + + // Specifies the maximum request body inspection limit in KB for the Web Application Firewall. Defaults to 128. + RequestBodyInspectLimitInKb *float64 `json:"requestBodyInspectLimitInKb,omitempty" tf:"request_body_inspect_limit_in_kb,omitempty"` +} + +type PolicySettingsParameters struct { + + // Describes if the policy is in enabled state or disabled state. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The File Upload Limit in MB. Accepted values are in the range 1 to 4000. Defaults to 100. + // +kubebuilder:validation:Optional + FileUploadLimitInMb *float64 `json:"fileUploadLimitInMb,omitempty" tf:"file_upload_limit_in_mb,omitempty"` + + // One log_scrubbing block as defined below. + // +kubebuilder:validation:Optional + LogScrubbing *LogScrubbingParameters `json:"logScrubbing,omitempty" tf:"log_scrubbing,omitempty"` + + // The Maximum Request Body Size in KB. Accepted values are in the range 8 to 2000. Defaults to 128. + // +kubebuilder:validation:Optional + MaxRequestBodySizeInKb *float64 `json:"maxRequestBodySizeInKb,omitempty" tf:"max_request_body_size_in_kb,omitempty"` + + // Describes if it is in detection mode or prevention mode at the policy level. Valid values are Detection and Prevention. Defaults to Prevention. + // +kubebuilder:validation:Optional + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` + + // Is Request Body Inspection enabled? Defaults to true. + // +kubebuilder:validation:Optional + RequestBodyCheck *bool `json:"requestBodyCheck,omitempty" tf:"request_body_check,omitempty"` + + // Specifies the maximum request body inspection limit in KB for the Web Application Firewall. Defaults to 128. + // +kubebuilder:validation:Optional + RequestBodyInspectLimitInKb *float64 `json:"requestBodyInspectLimitInKb,omitempty" tf:"request_body_inspect_limit_in_kb,omitempty"` +} + +type RuleGroupInitParameters struct { + + // One or more Rule IDs for exclusion. + ExcludedRules []*string `json:"excludedRules,omitempty" tf:"excluded_rules,omitempty"` + + // The name of the Rule Group. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + RuleGroupName *string `json:"ruleGroupName,omitempty" tf:"rule_group_name,omitempty"` +} + +type RuleGroupObservation struct { + + // One or more Rule IDs for exclusion. + ExcludedRules []*string `json:"excludedRules,omitempty" tf:"excluded_rules,omitempty"` + + // The name of the Rule Group. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + RuleGroupName *string `json:"ruleGroupName,omitempty" tf:"rule_group_name,omitempty"` +} + +type RuleGroupOverrideInitParameters struct { + DisabledRules []*string `json:"disabledRules,omitempty" tf:"disabled_rules,omitempty"` + + // One or more rule block defined below. + Rule []RuleGroupOverrideRuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // The name of the Rule Group. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + RuleGroupName *string `json:"ruleGroupName,omitempty" tf:"rule_group_name,omitempty"` +} + +type RuleGroupOverrideObservation struct { + DisabledRules []*string `json:"disabledRules,omitempty" tf:"disabled_rules,omitempty"` + + // One or more rule block defined below. + Rule []RuleGroupOverrideRuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` + + // The name of the Rule Group. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + RuleGroupName *string `json:"ruleGroupName,omitempty" tf:"rule_group_name,omitempty"` +} + +type RuleGroupOverrideParameters struct { + + // +kubebuilder:validation:Optional + DisabledRules []*string `json:"disabledRules,omitempty" tf:"disabled_rules,omitempty"` + + // One or more rule block defined below. + // +kubebuilder:validation:Optional + Rule []RuleGroupOverrideRuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // The name of the Rule Group. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + // +kubebuilder:validation:Optional + RuleGroupName *string `json:"ruleGroupName" tf:"rule_group_name,omitempty"` +} + +type RuleGroupOverrideRuleInitParameters struct { + + // Describes the override action to be applied when rule matches. Possible values are Allow, AnomalyScoring, Block and Log. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Whether this rule is enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Identifier for the managed rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type RuleGroupOverrideRuleObservation struct { + + // Describes the override action to be applied when rule matches. Possible values are Allow, AnomalyScoring, Block and Log. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Whether this rule is enabled. Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Identifier for the managed rule. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type RuleGroupOverrideRuleParameters struct { + + // Describes the override action to be applied when rule matches. Possible values are Allow, AnomalyScoring, Block and Log. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // Whether this rule is enabled. Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Identifier for the managed rule. + // +kubebuilder:validation:Optional + ID *string `json:"id" tf:"id,omitempty"` +} + +type RuleGroupParameters struct { + + // One or more Rule IDs for exclusion. + // +kubebuilder:validation:Optional + ExcludedRules []*string `json:"excludedRules,omitempty" tf:"excluded_rules,omitempty"` + + // The name of the Rule Group. Possible values are BadBots, crs_20_protocol_violations, crs_21_protocol_anomalies, crs_23_request_limits, crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, crs_41_sql_injection_attacks, crs_41_xss_attacks, crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + // +kubebuilder:validation:Optional + RuleGroupName *string `json:"ruleGroupName" tf:"rule_group_name,omitempty"` +} + +type WebApplicationFirewallPolicyInitParameters struct { + + // One or more custom_rules blocks as defined below. + CustomRules []CustomRulesInitParameters `json:"customRules,omitempty" tf:"custom_rules,omitempty"` + + // Resource location. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A managed_rules blocks as defined below. + ManagedRules *ManagedRulesInitParameters `json:"managedRules,omitempty" tf:"managed_rules,omitempty"` + + // A policy_settings block as defined below. + PolicySettings *PolicySettingsInitParameters `json:"policySettings,omitempty" tf:"policy_settings,omitempty"` + + // A mapping of tags to assign to the Web Application Firewall Policy. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WebApplicationFirewallPolicyObservation struct { + + // One or more custom_rules blocks as defined below. + CustomRules []CustomRulesObservation `json:"customRules,omitempty" tf:"custom_rules,omitempty"` + + // A list of HTTP Listener IDs from an azurerm_application_gateway. + HTTPListenerIds []*string `json:"httpListenerIds,omitempty" tf:"http_listener_ids,omitempty"` + + // The ID of the Web Application Firewall Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Resource location. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A managed_rules blocks as defined below. + ManagedRules *ManagedRulesObservation `json:"managedRules,omitempty" tf:"managed_rules,omitempty"` + + // A list of URL Path Map Path Rule IDs from an azurerm_application_gateway. + PathBasedRuleIds []*string `json:"pathBasedRuleIds,omitempty" tf:"path_based_rule_ids,omitempty"` + + // A policy_settings block as defined below. + PolicySettings *PolicySettingsObservation `json:"policySettings,omitempty" tf:"policy_settings,omitempty"` + + // The name of the resource group. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the Web Application Firewall Policy. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WebApplicationFirewallPolicyParameters struct { + + // One or more custom_rules blocks as defined below. + // +kubebuilder:validation:Optional + CustomRules []CustomRulesParameters `json:"customRules,omitempty" tf:"custom_rules,omitempty"` + + // Resource location. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A managed_rules blocks as defined below. + // +kubebuilder:validation:Optional + ManagedRules *ManagedRulesParameters `json:"managedRules,omitempty" tf:"managed_rules,omitempty"` + + // A policy_settings block as defined below. + // +kubebuilder:validation:Optional + PolicySettings *PolicySettingsParameters `json:"policySettings,omitempty" tf:"policy_settings,omitempty"` + + // The name of the resource group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the Web Application Firewall Policy. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WebApplicationFirewallPolicySpec defines the desired state of WebApplicationFirewallPolicy +type WebApplicationFirewallPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WebApplicationFirewallPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WebApplicationFirewallPolicyInitParameters `json:"initProvider,omitempty"` +} + +// WebApplicationFirewallPolicyStatus defines the observed state of WebApplicationFirewallPolicy. +type WebApplicationFirewallPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WebApplicationFirewallPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WebApplicationFirewallPolicy is the Schema for the WebApplicationFirewallPolicys API. Manages a Azure Web Application Firewall Policy instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WebApplicationFirewallPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.managedRules) || (has(self.initProvider) && has(self.initProvider.managedRules))",message="spec.forProvider.managedRules is a required parameter" + Spec WebApplicationFirewallPolicySpec `json:"spec"` + Status WebApplicationFirewallPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WebApplicationFirewallPolicyList contains a list of WebApplicationFirewallPolicys +type WebApplicationFirewallPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WebApplicationFirewallPolicy `json:"items"` +} + +// Repository type metadata. +var ( + WebApplicationFirewallPolicy_Kind = "WebApplicationFirewallPolicy" + WebApplicationFirewallPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WebApplicationFirewallPolicy_Kind}.String() + WebApplicationFirewallPolicy_KindAPIVersion = WebApplicationFirewallPolicy_Kind + "." + CRDGroupVersion.String() + WebApplicationFirewallPolicy_GroupVersionKind = CRDGroupVersion.WithKind(WebApplicationFirewallPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&WebApplicationFirewallPolicy{}, &WebApplicationFirewallPolicyList{}) +} diff --git a/apis/notificationhubs/v1beta1/zz_authorizationrule_types.go b/apis/notificationhubs/v1beta1/zz_authorizationrule_types.go index 213847472..a5e14ccb8 100755 --- a/apis/notificationhubs/v1beta1/zz_authorizationrule_types.go +++ b/apis/notificationhubs/v1beta1/zz_authorizationrule_types.go @@ -79,7 +79,7 @@ type AuthorizationRuleParameters struct { NamespaceNameSelector *v1.Selector `json:"namespaceNameSelector,omitempty" tf:"-"` // The name of the Notification Hub for which the Authorization Rule should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/notificationhubs/v1beta1.NotificationHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/notificationhubs/v1beta2.NotificationHub // +kubebuilder:validation:Optional NotificationHubName *string `json:"notificationHubName,omitempty" tf:"notification_hub_name,omitempty"` diff --git a/apis/notificationhubs/v1beta1/zz_generated.conversion_hubs.go b/apis/notificationhubs/v1beta1/zz_generated.conversion_hubs.go index cff1e69f7..2f1093d7a 100755 --- a/apis/notificationhubs/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/notificationhubs/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *NotificationHub) Hub() {} - // Hub marks this type as a conversion hub. func (tr *AuthorizationRule) Hub() {} diff --git a/apis/notificationhubs/v1beta1/zz_generated.conversion_spokes.go b/apis/notificationhubs/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..e95bc1f8f --- /dev/null +++ b/apis/notificationhubs/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this NotificationHub to the hub type. +func (tr *NotificationHub) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the NotificationHub type. +func (tr *NotificationHub) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/notificationhubs/v1beta1/zz_generated.resolvers.go b/apis/notificationhubs/v1beta1/zz_generated.resolvers.go index 2fa4f48b6..1596978d0 100644 --- a/apis/notificationhubs/v1beta1/zz_generated.resolvers.go +++ b/apis/notificationhubs/v1beta1/zz_generated.resolvers.go @@ -44,7 +44,7 @@ func (mg *AuthorizationRule) ResolveReferences( // ResolveReferences of this Aut mg.Spec.ForProvider.NamespaceName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.NamespaceNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("notificationhubs.azure.upbound.io", "v1beta1", "NotificationHub", "NotificationHubList") + m, l, err = apisresolver.GetManagedResource("notificationhubs.azure.upbound.io", "v1beta2", "NotificationHub", "NotificationHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/notificationhubs/v1beta2/zz_generated.conversion_hubs.go b/apis/notificationhubs/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..847d97e4f --- /dev/null +++ b/apis/notificationhubs/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *NotificationHub) Hub() {} diff --git a/apis/notificationhubs/v1beta2/zz_generated.deepcopy.go b/apis/notificationhubs/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..b44683164 --- /dev/null +++ b/apis/notificationhubs/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,443 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APNSCredentialInitParameters) DeepCopyInto(out *APNSCredentialInitParameters) { + *out = *in + if in.ApplicationMode != nil { + in, out := &in.ApplicationMode, &out.ApplicationMode + *out = new(string) + **out = **in + } + if in.BundleID != nil { + in, out := &in.BundleID, &out.BundleID + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.TeamID != nil { + in, out := &in.TeamID, &out.TeamID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APNSCredentialInitParameters. +func (in *APNSCredentialInitParameters) DeepCopy() *APNSCredentialInitParameters { + if in == nil { + return nil + } + out := new(APNSCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APNSCredentialObservation) DeepCopyInto(out *APNSCredentialObservation) { + *out = *in + if in.ApplicationMode != nil { + in, out := &in.ApplicationMode, &out.ApplicationMode + *out = new(string) + **out = **in + } + if in.BundleID != nil { + in, out := &in.BundleID, &out.BundleID + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.TeamID != nil { + in, out := &in.TeamID, &out.TeamID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APNSCredentialObservation. +func (in *APNSCredentialObservation) DeepCopy() *APNSCredentialObservation { + if in == nil { + return nil + } + out := new(APNSCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APNSCredentialParameters) DeepCopyInto(out *APNSCredentialParameters) { + *out = *in + if in.ApplicationMode != nil { + in, out := &in.ApplicationMode, &out.ApplicationMode + *out = new(string) + **out = **in + } + if in.BundleID != nil { + in, out := &in.BundleID, &out.BundleID + *out = new(string) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.TeamID != nil { + in, out := &in.TeamID, &out.TeamID + *out = new(string) + **out = **in + } + out.TokenSecretRef = in.TokenSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APNSCredentialParameters. +func (in *APNSCredentialParameters) DeepCopy() *APNSCredentialParameters { + if in == nil { + return nil + } + out := new(APNSCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCMCredentialInitParameters) DeepCopyInto(out *GCMCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCMCredentialInitParameters. +func (in *GCMCredentialInitParameters) DeepCopy() *GCMCredentialInitParameters { + if in == nil { + return nil + } + out := new(GCMCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCMCredentialObservation) DeepCopyInto(out *GCMCredentialObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCMCredentialObservation. +func (in *GCMCredentialObservation) DeepCopy() *GCMCredentialObservation { + if in == nil { + return nil + } + out := new(GCMCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCMCredentialParameters) DeepCopyInto(out *GCMCredentialParameters) { + *out = *in + out.APIKeySecretRef = in.APIKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCMCredentialParameters. +func (in *GCMCredentialParameters) DeepCopy() *GCMCredentialParameters { + if in == nil { + return nil + } + out := new(GCMCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationHub) DeepCopyInto(out *NotificationHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationHub. +func (in *NotificationHub) DeepCopy() *NotificationHub { + if in == nil { + return nil + } + out := new(NotificationHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NotificationHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationHubInitParameters) DeepCopyInto(out *NotificationHubInitParameters) { + *out = *in + if in.APNSCredential != nil { + in, out := &in.APNSCredential, &out.APNSCredential + *out = new(APNSCredentialInitParameters) + (*in).DeepCopyInto(*out) + } + if in.GCMCredential != nil { + in, out := &in.GCMCredential, &out.GCMCredential + *out = new(GCMCredentialInitParameters) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationHubInitParameters. +func (in *NotificationHubInitParameters) DeepCopy() *NotificationHubInitParameters { + if in == nil { + return nil + } + out := new(NotificationHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationHubList) DeepCopyInto(out *NotificationHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NotificationHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationHubList. +func (in *NotificationHubList) DeepCopy() *NotificationHubList { + if in == nil { + return nil + } + out := new(NotificationHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NotificationHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationHubObservation) DeepCopyInto(out *NotificationHubObservation) { + *out = *in + if in.APNSCredential != nil { + in, out := &in.APNSCredential, &out.APNSCredential + *out = new(APNSCredentialObservation) + (*in).DeepCopyInto(*out) + } + if in.GCMCredential != nil { + in, out := &in.GCMCredential, &out.GCMCredential + *out = new(GCMCredentialParameters) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NamespaceName != nil { + in, out := &in.NamespaceName, &out.NamespaceName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationHubObservation. +func (in *NotificationHubObservation) DeepCopy() *NotificationHubObservation { + if in == nil { + return nil + } + out := new(NotificationHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationHubParameters) DeepCopyInto(out *NotificationHubParameters) { + *out = *in + if in.APNSCredential != nil { + in, out := &in.APNSCredential, &out.APNSCredential + *out = new(APNSCredentialParameters) + (*in).DeepCopyInto(*out) + } + if in.GCMCredential != nil { + in, out := &in.GCMCredential, &out.GCMCredential + *out = new(GCMCredentialParameters) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NamespaceName != nil { + in, out := &in.NamespaceName, &out.NamespaceName + *out = new(string) + **out = **in + } + if in.NamespaceNameRef != nil { + in, out := &in.NamespaceNameRef, &out.NamespaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NamespaceNameSelector != nil { + in, out := &in.NamespaceNameSelector, &out.NamespaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationHubParameters. +func (in *NotificationHubParameters) DeepCopy() *NotificationHubParameters { + if in == nil { + return nil + } + out := new(NotificationHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationHubSpec) DeepCopyInto(out *NotificationHubSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationHubSpec. +func (in *NotificationHubSpec) DeepCopy() *NotificationHubSpec { + if in == nil { + return nil + } + out := new(NotificationHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationHubStatus) DeepCopyInto(out *NotificationHubStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationHubStatus. +func (in *NotificationHubStatus) DeepCopy() *NotificationHubStatus { + if in == nil { + return nil + } + out := new(NotificationHubStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/notificationhubs/v1beta2/zz_generated.managed.go b/apis/notificationhubs/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..04d4f0d54 --- /dev/null +++ b/apis/notificationhubs/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this NotificationHub. +func (mg *NotificationHub) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this NotificationHub. +func (mg *NotificationHub) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this NotificationHub. +func (mg *NotificationHub) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this NotificationHub. +func (mg *NotificationHub) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this NotificationHub. +func (mg *NotificationHub) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this NotificationHub. +func (mg *NotificationHub) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this NotificationHub. +func (mg *NotificationHub) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this NotificationHub. +func (mg *NotificationHub) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this NotificationHub. +func (mg *NotificationHub) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this NotificationHub. +func (mg *NotificationHub) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this NotificationHub. +func (mg *NotificationHub) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this NotificationHub. +func (mg *NotificationHub) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/notificationhubs/v1beta2/zz_generated.managedlist.go b/apis/notificationhubs/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..0bc62914f --- /dev/null +++ b/apis/notificationhubs/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this NotificationHubList. +func (l *NotificationHubList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/notificationhubs/v1beta2/zz_generated.resolvers.go b/apis/notificationhubs/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..212fba268 --- /dev/null +++ b/apis/notificationhubs/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *NotificationHub) ResolveReferences( // ResolveReferences of this NotificationHub. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("notificationhubs.azure.upbound.io", "v1beta1", "NotificationHubNamespace", "NotificationHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NamespaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.NamespaceNameRef, + Selector: mg.Spec.ForProvider.NamespaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NamespaceName") + } + mg.Spec.ForProvider.NamespaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NamespaceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/notificationhubs/v1beta2/zz_groupversion_info.go b/apis/notificationhubs/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..a5ec547ce --- /dev/null +++ b/apis/notificationhubs/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=notificationhubs.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "notificationhubs.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/notificationhubs/v1beta2/zz_notificationhub_terraformed.go b/apis/notificationhubs/v1beta2/zz_notificationhub_terraformed.go new file mode 100755 index 000000000..a1d6a5d65 --- /dev/null +++ b/apis/notificationhubs/v1beta2/zz_notificationhub_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this NotificationHub +func (mg *NotificationHub) GetTerraformResourceType() string { + return "azurerm_notification_hub" +} + +// GetConnectionDetailsMapping for this NotificationHub +func (tr *NotificationHub) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"apns_credential[*].token": "spec.forProvider.apnsCredential[*].tokenSecretRef", "gcm_credential[*].api_key": "spec.forProvider.gcmCredential[*].apiKeySecretRef"} +} + +// GetObservation of this NotificationHub +func (tr *NotificationHub) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this NotificationHub +func (tr *NotificationHub) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this NotificationHub +func (tr *NotificationHub) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this NotificationHub +func (tr *NotificationHub) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this NotificationHub +func (tr *NotificationHub) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this NotificationHub +func (tr *NotificationHub) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this NotificationHub +func (tr *NotificationHub) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this NotificationHub using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *NotificationHub) LateInitialize(attrs []byte) (bool, error) { + params := &NotificationHubParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *NotificationHub) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/notificationhubs/v1beta2/zz_notificationhub_types.go b/apis/notificationhubs/v1beta2/zz_notificationhub_types.go new file mode 100755 index 000000000..49e719d99 --- /dev/null +++ b/apis/notificationhubs/v1beta2/zz_notificationhub_types.go @@ -0,0 +1,228 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type APNSCredentialInitParameters struct { + + // The Application Mode which defines which server the APNS Messages should be sent to. Possible values are Production and Sandbox. + ApplicationMode *string `json:"applicationMode,omitempty" tf:"application_mode,omitempty"` + + // The Bundle ID of the iOS/macOS application to send push notifications for, such as com.hashicorp.example. + BundleID *string `json:"bundleId,omitempty" tf:"bundle_id,omitempty"` + + // The Apple Push Notifications Service (APNS) Key. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The ID of the team the Token. + TeamID *string `json:"teamId,omitempty" tf:"team_id,omitempty"` +} + +type APNSCredentialObservation struct { + + // The Application Mode which defines which server the APNS Messages should be sent to. Possible values are Production and Sandbox. + ApplicationMode *string `json:"applicationMode,omitempty" tf:"application_mode,omitempty"` + + // The Bundle ID of the iOS/macOS application to send push notifications for, such as com.hashicorp.example. + BundleID *string `json:"bundleId,omitempty" tf:"bundle_id,omitempty"` + + // The Apple Push Notifications Service (APNS) Key. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // The ID of the team the Token. + TeamID *string `json:"teamId,omitempty" tf:"team_id,omitempty"` +} + +type APNSCredentialParameters struct { + + // The Application Mode which defines which server the APNS Messages should be sent to. Possible values are Production and Sandbox. + // +kubebuilder:validation:Optional + ApplicationMode *string `json:"applicationMode" tf:"application_mode,omitempty"` + + // The Bundle ID of the iOS/macOS application to send push notifications for, such as com.hashicorp.example. + // +kubebuilder:validation:Optional + BundleID *string `json:"bundleId" tf:"bundle_id,omitempty"` + + // The Apple Push Notifications Service (APNS) Key. + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId" tf:"key_id,omitempty"` + + // The ID of the team the Token. + // +kubebuilder:validation:Optional + TeamID *string `json:"teamId" tf:"team_id,omitempty"` + + // The Push Token associated with the Apple Developer Account. This is the contents of the key downloaded from the Apple Developer Portal between the -----BEGIN PRIVATE KEY----- and -----END PRIVATE KEY----- blocks. + // +kubebuilder:validation:Required + TokenSecretRef v1.SecretKeySelector `json:"tokenSecretRef" tf:"-"` +} + +type GCMCredentialInitParameters struct { +} + +type GCMCredentialObservation struct { +} + +type GCMCredentialParameters struct { + + // The API Key associated with the Google Cloud Messaging service. + // +kubebuilder:validation:Required + APIKeySecretRef v1.SecretKeySelector `json:"apiKeySecretRef" tf:"-"` +} + +type NotificationHubInitParameters struct { + + // A apns_credential block as defined below. + APNSCredential *APNSCredentialInitParameters `json:"apnsCredential,omitempty" tf:"apns_credential,omitempty"` + + // A gcm_credential block as defined below. + GCMCredential *GCMCredentialInitParameters `json:"gcmCredential,omitempty" tf:"gcm_credential,omitempty"` + + // The Azure Region in which this Notification Hub Namespace exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type NotificationHubObservation struct { + + // A apns_credential block as defined below. + APNSCredential *APNSCredentialObservation `json:"apnsCredential,omitempty" tf:"apns_credential,omitempty"` + + // A gcm_credential block as defined below. + GCMCredential *GCMCredentialParameters `json:"gcmCredential,omitempty" tf:"gcm_credential,omitempty"` + + // The ID of the Notification Hub. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region in which this Notification Hub Namespace exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Notification Hub Namespace in which to create this Notification Hub. Changing this forces a new resource to be created. + NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` + + // The name of the Resource Group in which the Notification Hub Namespace exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type NotificationHubParameters struct { + + // A apns_credential block as defined below. + // +kubebuilder:validation:Optional + APNSCredential *APNSCredentialParameters `json:"apnsCredential,omitempty" tf:"apns_credential,omitempty"` + + // A gcm_credential block as defined below. + // +kubebuilder:validation:Optional + GCMCredential *GCMCredentialParameters `json:"gcmCredential,omitempty" tf:"gcm_credential,omitempty"` + + // The Azure Region in which this Notification Hub Namespace exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Notification Hub Namespace in which to create this Notification Hub. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/notificationhubs/v1beta1.NotificationHubNamespace + // +kubebuilder:validation:Optional + NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` + + // Reference to a NotificationHubNamespace in notificationhubs to populate namespaceName. + // +kubebuilder:validation:Optional + NamespaceNameRef *v1.Reference `json:"namespaceNameRef,omitempty" tf:"-"` + + // Selector for a NotificationHubNamespace in notificationhubs to populate namespaceName. + // +kubebuilder:validation:Optional + NamespaceNameSelector *v1.Selector `json:"namespaceNameSelector,omitempty" tf:"-"` + + // The name of the Resource Group in which the Notification Hub Namespace exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// NotificationHubSpec defines the desired state of NotificationHub +type NotificationHubSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider NotificationHubParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider NotificationHubInitParameters `json:"initProvider,omitempty"` +} + +// NotificationHubStatus defines the observed state of NotificationHub. +type NotificationHubStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider NotificationHubObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NotificationHub is the Schema for the NotificationHubs API. Manages a Notification Hub within a Notification Hub Namespace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type NotificationHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec NotificationHubSpec `json:"spec"` + Status NotificationHubStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NotificationHubList contains a list of NotificationHubs +type NotificationHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NotificationHub `json:"items"` +} + +// Repository type metadata. +var ( + NotificationHub_Kind = "NotificationHub" + NotificationHub_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: NotificationHub_Kind}.String() + NotificationHub_KindAPIVersion = NotificationHub_Kind + "." + CRDGroupVersion.String() + NotificationHub_GroupVersionKind = CRDGroupVersion.WithKind(NotificationHub_Kind) +) + +func init() { + SchemeBuilder.Register(&NotificationHub{}, &NotificationHubList{}) +} diff --git a/apis/operationalinsights/v1beta1/zz_generated.conversion_hubs.go b/apis/operationalinsights/v1beta1/zz_generated.conversion_hubs.go index d050d6f1d..cba3c07d8 100755 --- a/apis/operationalinsights/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/operationalinsights/v1beta1/zz_generated.conversion_hubs.go @@ -29,6 +29,3 @@ func (tr *LogAnalyticsQueryPackQuery) Hub() {} // Hub marks this type as a conversion hub. func (tr *LogAnalyticsSavedSearch) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Workspace) Hub() {} diff --git a/apis/operationalinsights/v1beta1/zz_generated.conversion_spokes.go b/apis/operationalinsights/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..508736b23 --- /dev/null +++ b/apis/operationalinsights/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Workspace to the hub type. +func (tr *Workspace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workspace type. +func (tr *Workspace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/operationalinsights/v1beta1/zz_generated.resolvers.go b/apis/operationalinsights/v1beta1/zz_generated.resolvers.go index 26bae412c..048e10488 100644 --- a/apis/operationalinsights/v1beta1/zz_generated.resolvers.go +++ b/apis/operationalinsights/v1beta1/zz_generated.resolvers.go @@ -28,7 +28,7 @@ func (mg *LogAnalyticsDataExportRule) ResolveReferences( // ResolveReferences of var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -66,7 +66,7 @@ func (mg *LogAnalyticsDataExportRule) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -85,7 +85,7 @@ func (mg *LogAnalyticsDataExportRule) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.WorkspaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -154,7 +154,7 @@ func (mg *LogAnalyticsDataSourceWindowsEvent) ResolveReferences(ctx context.Cont mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -204,7 +204,7 @@ func (mg *LogAnalyticsDataSourceWindowsPerformanceCounter) ResolveReferences(ctx mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -235,7 +235,7 @@ func (mg *LogAnalyticsLinkedService) ResolveReferences(ctx context.Context, c cl var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -273,7 +273,7 @@ func (mg *LogAnalyticsLinkedService) ResolveReferences(ctx context.Context, c cl mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -292,7 +292,7 @@ func (mg *LogAnalyticsLinkedService) ResolveReferences(ctx context.Context, c cl mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("automation.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -362,7 +362,7 @@ func (mg *LogAnalyticsLinkedStorageAccount) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -381,7 +381,7 @@ func (mg *LogAnalyticsLinkedStorageAccount) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.StorageAccountIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.ForProvider.StorageAccountIdsRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -419,7 +419,7 @@ func (mg *LogAnalyticsLinkedStorageAccount) ResolveReferences(ctx context.Contex mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -438,7 +438,7 @@ func (mg *LogAnalyticsLinkedStorageAccount) ResolveReferences(ctx context.Contex mg.Spec.InitProvider.StorageAccountIds = reference.ToPtrValues(mrsp.ResolvedValues) mg.Spec.InitProvider.StorageAccountIdsRefs = mrsp.ResolvedReferences { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -550,7 +550,7 @@ func (mg *LogAnalyticsSavedSearch) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/operationalinsights/v1beta1/zz_loganalyticsdataexportrule_types.go b/apis/operationalinsights/v1beta1/zz_loganalyticsdataexportrule_types.go index d995e5809..be87a86ac 100755 --- a/apis/operationalinsights/v1beta1/zz_loganalyticsdataexportrule_types.go +++ b/apis/operationalinsights/v1beta1/zz_loganalyticsdataexportrule_types.go @@ -16,7 +16,7 @@ import ( type LogAnalyticsDataExportRuleInitParameters struct { // The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DestinationResourceID *string `json:"destinationResourceId,omitempty" tf:"destination_resource_id,omitempty"` @@ -76,7 +76,7 @@ type LogAnalyticsDataExportRuleObservation struct { type LogAnalyticsDataExportRuleParameters struct { // The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DestinationResourceID *string `json:"destinationResourceId,omitempty" tf:"destination_resource_id,omitempty"` @@ -112,7 +112,7 @@ type LogAnalyticsDataExportRuleParameters struct { TableNames []*string `json:"tableNames,omitempty" tf:"table_names,omitempty"` // The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` diff --git a/apis/operationalinsights/v1beta1/zz_loganalyticsdatasourcewindowsevent_types.go b/apis/operationalinsights/v1beta1/zz_loganalyticsdatasourcewindowsevent_types.go index 405af0075..00fd791ac 100755 --- a/apis/operationalinsights/v1beta1/zz_loganalyticsdatasourcewindowsevent_types.go +++ b/apis/operationalinsights/v1beta1/zz_loganalyticsdatasourcewindowsevent_types.go @@ -67,7 +67,7 @@ type LogAnalyticsDataSourceWindowsEventParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the Log Analytics Workspace where the Log Analytics Windows Event DataSource should exist. Changing this forces a new Log Analytics Windows Event DataSource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +kubebuilder:validation:Optional WorkspaceName *string `json:"workspaceName,omitempty" tf:"workspace_name,omitempty"` diff --git a/apis/operationalinsights/v1beta1/zz_loganalyticsdatasourcewindowsperformancecounter_types.go b/apis/operationalinsights/v1beta1/zz_loganalyticsdatasourcewindowsperformancecounter_types.go index d86ddb0dd..8df8de7da 100755 --- a/apis/operationalinsights/v1beta1/zz_loganalyticsdatasourcewindowsperformancecounter_types.go +++ b/apis/operationalinsights/v1beta1/zz_loganalyticsdatasourcewindowsperformancecounter_types.go @@ -84,7 +84,7 @@ type LogAnalyticsDataSourceWindowsPerformanceCounterParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the Log Analytics Workspace where the Log Analytics Windows Performance Counter DataSource should exist. Changing this forces a new Log Analytics Windows Performance Counter DataSource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +kubebuilder:validation:Optional WorkspaceName *string `json:"workspaceName,omitempty" tf:"workspace_name,omitempty"` diff --git a/apis/operationalinsights/v1beta1/zz_loganalyticslinkedservice_types.go b/apis/operationalinsights/v1beta1/zz_loganalyticslinkedservice_types.go index 51cd0f3c6..648b967d2 100755 --- a/apis/operationalinsights/v1beta1/zz_loganalyticslinkedservice_types.go +++ b/apis/operationalinsights/v1beta1/zz_loganalyticslinkedservice_types.go @@ -16,7 +16,7 @@ import ( type LogAnalyticsLinkedServiceInitParameters struct { // The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ReadAccessID *string `json:"readAccessId,omitempty" tf:"read_access_id,omitempty"` @@ -68,7 +68,7 @@ type LogAnalyticsLinkedServiceObservation struct { type LogAnalyticsLinkedServiceParameters struct { // The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/automation/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ReadAccessID *string `json:"readAccessId,omitempty" tf:"read_access_id,omitempty"` @@ -95,7 +95,7 @@ type LogAnalyticsLinkedServiceParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/operationalinsights/v1beta1/zz_loganalyticslinkedstorageaccount_types.go b/apis/operationalinsights/v1beta1/zz_loganalyticslinkedstorageaccount_types.go index d7a7f75d7..da3d0db59 100755 --- a/apis/operationalinsights/v1beta1/zz_loganalyticslinkedstorageaccount_types.go +++ b/apis/operationalinsights/v1beta1/zz_loganalyticslinkedstorageaccount_types.go @@ -31,7 +31,7 @@ type LogAnalyticsLinkedStorageAccountInitParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The storage account resource ids to be linked. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +listType=set StorageAccountIds []*string `json:"storageAccountIds,omitempty" tf:"storage_account_ids,omitempty"` @@ -45,7 +45,7 @@ type LogAnalyticsLinkedStorageAccountInitParameters struct { StorageAccountIdsSelector *v1.Selector `json:"storageAccountIdsSelector,omitempty" tf:"-"` // The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` @@ -97,7 +97,7 @@ type LogAnalyticsLinkedStorageAccountParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The storage account resource ids to be linked. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional // +listType=set @@ -112,7 +112,7 @@ type LogAnalyticsLinkedStorageAccountParameters struct { StorageAccountIdsSelector *v1.Selector `json:"storageAccountIdsSelector,omitempty" tf:"-"` // The resource ID of the Log Analytics Workspace. Changing this forces a new Log Analytics Linked Storage Account to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` diff --git a/apis/operationalinsights/v1beta1/zz_loganalyticssavedsearch_types.go b/apis/operationalinsights/v1beta1/zz_loganalyticssavedsearch_types.go index 447b425d0..62c5cc61d 100755 --- a/apis/operationalinsights/v1beta1/zz_loganalyticssavedsearch_types.go +++ b/apis/operationalinsights/v1beta1/zz_loganalyticssavedsearch_types.go @@ -85,7 +85,7 @@ type LogAnalyticsSavedSearchParameters struct { FunctionParameters []*string `json:"functionParameters,omitempty" tf:"function_parameters,omitempty"` // Specifies the ID of the Log Analytics Workspace that the Saved Search will be associated with. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` diff --git a/apis/operationalinsights/v1beta2/zz_generated.conversion_hubs.go b/apis/operationalinsights/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..60d422318 --- /dev/null +++ b/apis/operationalinsights/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Workspace) Hub() {} diff --git a/apis/operationalinsights/v1beta2/zz_generated.deepcopy.go b/apis/operationalinsights/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..7c248a3a8 --- /dev/null +++ b/apis/operationalinsights/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,529 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceInitParameters) DeepCopyInto(out *WorkspaceInitParameters) { + *out = *in + if in.AllowResourceOnlyPermissions != nil { + in, out := &in.AllowResourceOnlyPermissions, &out.AllowResourceOnlyPermissions + *out = new(bool) + **out = **in + } + if in.CmkForQueryForced != nil { + in, out := &in.CmkForQueryForced, &out.CmkForQueryForced + *out = new(bool) + **out = **in + } + if in.DailyQuotaGb != nil { + in, out := &in.DailyQuotaGb, &out.DailyQuotaGb + *out = new(float64) + **out = **in + } + if in.DataCollectionRuleID != nil { + in, out := &in.DataCollectionRuleID, &out.DataCollectionRuleID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImmediateDataPurgeOn30DaysEnabled != nil { + in, out := &in.ImmediateDataPurgeOn30DaysEnabled, &out.ImmediateDataPurgeOn30DaysEnabled + *out = new(bool) + **out = **in + } + if in.InternetIngestionEnabled != nil { + in, out := &in.InternetIngestionEnabled, &out.InternetIngestionEnabled + *out = new(bool) + **out = **in + } + if in.InternetQueryEnabled != nil { + in, out := &in.InternetQueryEnabled, &out.InternetQueryEnabled + *out = new(bool) + **out = **in + } + if in.LocalAuthenticationDisabled != nil { + in, out := &in.LocalAuthenticationDisabled, &out.LocalAuthenticationDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ReservationCapacityInGbPerDay != nil { + in, out := &in.ReservationCapacityInGbPerDay, &out.ReservationCapacityInGbPerDay + *out = new(float64) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceInitParameters. +func (in *WorkspaceInitParameters) DeepCopy() *WorkspaceInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. +func (in *WorkspaceList) DeepCopy() *WorkspaceList { + if in == nil { + return nil + } + out := new(WorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceObservation) DeepCopyInto(out *WorkspaceObservation) { + *out = *in + if in.AllowResourceOnlyPermissions != nil { + in, out := &in.AllowResourceOnlyPermissions, &out.AllowResourceOnlyPermissions + *out = new(bool) + **out = **in + } + if in.CmkForQueryForced != nil { + in, out := &in.CmkForQueryForced, &out.CmkForQueryForced + *out = new(bool) + **out = **in + } + if in.DailyQuotaGb != nil { + in, out := &in.DailyQuotaGb, &out.DailyQuotaGb + *out = new(float64) + **out = **in + } + if in.DataCollectionRuleID != nil { + in, out := &in.DataCollectionRuleID, &out.DataCollectionRuleID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.ImmediateDataPurgeOn30DaysEnabled != nil { + in, out := &in.ImmediateDataPurgeOn30DaysEnabled, &out.ImmediateDataPurgeOn30DaysEnabled + *out = new(bool) + **out = **in + } + if in.InternetIngestionEnabled != nil { + in, out := &in.InternetIngestionEnabled, &out.InternetIngestionEnabled + *out = new(bool) + **out = **in + } + if in.InternetQueryEnabled != nil { + in, out := &in.InternetQueryEnabled, &out.InternetQueryEnabled + *out = new(bool) + **out = **in + } + if in.LocalAuthenticationDisabled != nil { + in, out := &in.LocalAuthenticationDisabled, &out.LocalAuthenticationDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ReservationCapacityInGbPerDay != nil { + in, out := &in.ReservationCapacityInGbPerDay, &out.ReservationCapacityInGbPerDay + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceObservation. +func (in *WorkspaceObservation) DeepCopy() *WorkspaceObservation { + if in == nil { + return nil + } + out := new(WorkspaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceParameters) DeepCopyInto(out *WorkspaceParameters) { + *out = *in + if in.AllowResourceOnlyPermissions != nil { + in, out := &in.AllowResourceOnlyPermissions, &out.AllowResourceOnlyPermissions + *out = new(bool) + **out = **in + } + if in.CmkForQueryForced != nil { + in, out := &in.CmkForQueryForced, &out.CmkForQueryForced + *out = new(bool) + **out = **in + } + if in.DailyQuotaGb != nil { + in, out := &in.DailyQuotaGb, &out.DailyQuotaGb + *out = new(float64) + **out = **in + } + if in.DataCollectionRuleID != nil { + in, out := &in.DataCollectionRuleID, &out.DataCollectionRuleID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.ImmediateDataPurgeOn30DaysEnabled != nil { + in, out := &in.ImmediateDataPurgeOn30DaysEnabled, &out.ImmediateDataPurgeOn30DaysEnabled + *out = new(bool) + **out = **in + } + if in.InternetIngestionEnabled != nil { + in, out := &in.InternetIngestionEnabled, &out.InternetIngestionEnabled + *out = new(bool) + **out = **in + } + if in.InternetQueryEnabled != nil { + in, out := &in.InternetQueryEnabled, &out.InternetQueryEnabled + *out = new(bool) + **out = **in + } + if in.LocalAuthenticationDisabled != nil { + in, out := &in.LocalAuthenticationDisabled, &out.LocalAuthenticationDisabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ReservationCapacityInGbPerDay != nil { + in, out := &in.ReservationCapacityInGbPerDay, &out.ReservationCapacityInGbPerDay + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceParameters. +func (in *WorkspaceParameters) DeepCopy() *WorkspaceParameters { + if in == nil { + return nil + } + out := new(WorkspaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. +func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { + if in == nil { + return nil + } + out := new(WorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. +func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus { + if in == nil { + return nil + } + out := new(WorkspaceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/operationalinsights/v1beta2/zz_generated.managed.go b/apis/operationalinsights/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..562d4553d --- /dev/null +++ b/apis/operationalinsights/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Workspace. +func (mg *Workspace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workspace. +func (mg *Workspace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workspace. +func (mg *Workspace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workspace. +func (mg *Workspace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workspace. +func (mg *Workspace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workspace. +func (mg *Workspace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workspace. +func (mg *Workspace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workspace. +func (mg *Workspace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/operationalinsights/v1beta2/zz_generated.managedlist.go b/apis/operationalinsights/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..d32cca0e4 --- /dev/null +++ b/apis/operationalinsights/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this WorkspaceList. +func (l *WorkspaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/operationalinsights/v1beta2/zz_generated.resolvers.go b/apis/operationalinsights/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..54940d154 --- /dev/null +++ b/apis/operationalinsights/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Workspace) ResolveReferences( // ResolveReferences of this Workspace. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/operationalinsights/v1beta2/zz_groupversion_info.go b/apis/operationalinsights/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..2e516ea5a --- /dev/null +++ b/apis/operationalinsights/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=operationalinsights.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "operationalinsights.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/operationalinsights/v1beta2/zz_workspace_terraformed.go b/apis/operationalinsights/v1beta2/zz_workspace_terraformed.go new file mode 100755 index 000000000..9d651fa4c --- /dev/null +++ b/apis/operationalinsights/v1beta2/zz_workspace_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workspace +func (mg *Workspace) GetTerraformResourceType() string { + return "azurerm_log_analytics_workspace" +} + +// GetConnectionDetailsMapping for this Workspace +func (tr *Workspace) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_shared_key": "status.atProvider.primarySharedKey", "secondary_shared_key": "status.atProvider.secondarySharedKey"} +} + +// GetObservation of this Workspace +func (tr *Workspace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workspace +func (tr *Workspace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workspace +func (tr *Workspace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workspace +func (tr *Workspace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workspace +func (tr *Workspace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workspace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workspace) LateInitialize(attrs []byte) (bool, error) { + params := &WorkspaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workspace) GetTerraformSchemaVersion() int { + return 3 +} diff --git a/apis/operationalinsights/v1beta2/zz_workspace_types.go b/apis/operationalinsights/v1beta2/zz_workspace_types.go new file mode 100755 index 000000000..162d08d04 --- /dev/null +++ b/apis/operationalinsights/v1beta2/zz_workspace_types.go @@ -0,0 +1,287 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the identity type of the Log Analytics Workspace. Possible values are SystemAssigned (where Azure will generate a Service Principal for you) and UserAssigned where you can specify the Service Principal IDs in the identity_ids field. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Log Analytics Workspace ID. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Log Analytics Workspace ID. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the identity type of the Log Analytics Workspace. Possible values are SystemAssigned (where Azure will generate a Service Principal for you) and UserAssigned where you can specify the Service Principal IDs in the identity_ids field. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the identity type of the Log Analytics Workspace. Possible values are SystemAssigned (where Azure will generate a Service Principal for you) and UserAssigned where you can specify the Service Principal IDs in the identity_ids field. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WorkspaceInitParameters struct { + + // Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to true. + AllowResourceOnlyPermissions *bool `json:"allowResourceOnlyPermissions,omitempty" tf:"allow_resource_only_permissions,omitempty"` + + // Is Customer Managed Storage mandatory for query management? + CmkForQueryForced *bool `json:"cmkForQueryForced,omitempty" tf:"cmk_for_query_forced,omitempty"` + + // The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. + DailyQuotaGb *float64 `json:"dailyQuotaGb,omitempty" tf:"daily_quota_gb,omitempty"` + + // The ID of the Data Collection Rule to use for this workspace. + DataCollectionRuleID *string `json:"dataCollectionRuleId,omitempty" tf:"data_collection_rule_id,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether to remove the data in the Log Analytics Workspace immediately after 30 days. + ImmediateDataPurgeOn30DaysEnabled *bool `json:"immediateDataPurgeOn30DaysEnabled,omitempty" tf:"immediate_data_purge_on_30_days_enabled,omitempty"` + + // Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to true. + InternetIngestionEnabled *bool `json:"internetIngestionEnabled,omitempty" tf:"internet_ingestion_enabled,omitempty"` + + // Should the Log Analytics Workspace support querying over the Public Internet? Defaults to true. + InternetQueryEnabled *bool `json:"internetQueryEnabled,omitempty" tf:"internet_query_enabled,omitempty"` + + // Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to false. + LocalAuthenticationDisabled *bool `json:"localAuthenticationDisabled,omitempty" tf:"local_authentication_disabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The capacity reservation level in GB for this workspace. Possible values are 100, 200, 300, 400, 500, 1000, 2000 and 5000. + ReservationCapacityInGbPerDay *float64 `json:"reservationCapacityInGbPerDay,omitempty" tf:"reservation_capacity_in_gb_per_day,omitempty"` + + // The workspace data retention in days. Possible values are either 7 (Free Tier only) or range between 30 and 730. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // Specifies the SKU of the Log Analytics Workspace. Possible values are Free, PerNode, Premium, Standard, Standalone, Unlimited, CapacityReservation, and PerGB2018 (new SKU as of 2018-04-03). Defaults to PerGB2018. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WorkspaceObservation struct { + + // Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to true. + AllowResourceOnlyPermissions *bool `json:"allowResourceOnlyPermissions,omitempty" tf:"allow_resource_only_permissions,omitempty"` + + // Is Customer Managed Storage mandatory for query management? + CmkForQueryForced *bool `json:"cmkForQueryForced,omitempty" tf:"cmk_for_query_forced,omitempty"` + + // The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. + DailyQuotaGb *float64 `json:"dailyQuotaGb,omitempty" tf:"daily_quota_gb,omitempty"` + + // The ID of the Data Collection Rule to use for this workspace. + DataCollectionRuleID *string `json:"dataCollectionRuleId,omitempty" tf:"data_collection_rule_id,omitempty"` + + // The Log Analytics Workspace ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether to remove the data in the Log Analytics Workspace immediately after 30 days. + ImmediateDataPurgeOn30DaysEnabled *bool `json:"immediateDataPurgeOn30DaysEnabled,omitempty" tf:"immediate_data_purge_on_30_days_enabled,omitempty"` + + // Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to true. + InternetIngestionEnabled *bool `json:"internetIngestionEnabled,omitempty" tf:"internet_ingestion_enabled,omitempty"` + + // Should the Log Analytics Workspace support querying over the Public Internet? Defaults to true. + InternetQueryEnabled *bool `json:"internetQueryEnabled,omitempty" tf:"internet_query_enabled,omitempty"` + + // Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to false. + LocalAuthenticationDisabled *bool `json:"localAuthenticationDisabled,omitempty" tf:"local_authentication_disabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The capacity reservation level in GB for this workspace. Possible values are 100, 200, 300, 400, 500, 1000, 2000 and 5000. + ReservationCapacityInGbPerDay *float64 `json:"reservationCapacityInGbPerDay,omitempty" tf:"reservation_capacity_in_gb_per_day,omitempty"` + + // The name of the resource group in which the Log Analytics workspace is created. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The workspace data retention in days. Possible values are either 7 (Free Tier only) or range between 30 and 730. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // Specifies the SKU of the Log Analytics Workspace. Possible values are Free, PerNode, Premium, Standard, Standalone, Unlimited, CapacityReservation, and PerGB2018 (new SKU as of 2018-04-03). Defaults to PerGB2018. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The Workspace (or Customer) ID for the Log Analytics Workspace. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type WorkspaceParameters struct { + + // Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to true. + // +kubebuilder:validation:Optional + AllowResourceOnlyPermissions *bool `json:"allowResourceOnlyPermissions,omitempty" tf:"allow_resource_only_permissions,omitempty"` + + // Is Customer Managed Storage mandatory for query management? + // +kubebuilder:validation:Optional + CmkForQueryForced *bool `json:"cmkForQueryForced,omitempty" tf:"cmk_for_query_forced,omitempty"` + + // The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. + // +kubebuilder:validation:Optional + DailyQuotaGb *float64 `json:"dailyQuotaGb,omitempty" tf:"daily_quota_gb,omitempty"` + + // The ID of the Data Collection Rule to use for this workspace. + // +kubebuilder:validation:Optional + DataCollectionRuleID *string `json:"dataCollectionRuleId,omitempty" tf:"data_collection_rule_id,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether to remove the data in the Log Analytics Workspace immediately after 30 days. + // +kubebuilder:validation:Optional + ImmediateDataPurgeOn30DaysEnabled *bool `json:"immediateDataPurgeOn30DaysEnabled,omitempty" tf:"immediate_data_purge_on_30_days_enabled,omitempty"` + + // Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to true. + // +kubebuilder:validation:Optional + InternetIngestionEnabled *bool `json:"internetIngestionEnabled,omitempty" tf:"internet_ingestion_enabled,omitempty"` + + // Should the Log Analytics Workspace support querying over the Public Internet? Defaults to true. + // +kubebuilder:validation:Optional + InternetQueryEnabled *bool `json:"internetQueryEnabled,omitempty" tf:"internet_query_enabled,omitempty"` + + // Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to false. + // +kubebuilder:validation:Optional + LocalAuthenticationDisabled *bool `json:"localAuthenticationDisabled,omitempty" tf:"local_authentication_disabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The capacity reservation level in GB for this workspace. Possible values are 100, 200, 300, 400, 500, 1000, 2000 and 5000. + // +kubebuilder:validation:Optional + ReservationCapacityInGbPerDay *float64 `json:"reservationCapacityInGbPerDay,omitempty" tf:"reservation_capacity_in_gb_per_day,omitempty"` + + // The name of the resource group in which the Log Analytics workspace is created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The workspace data retention in days. Possible values are either 7 (Free Tier only) or range between 30 and 730. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // Specifies the SKU of the Log Analytics Workspace. Possible values are Free, PerNode, Premium, Standard, Standalone, Unlimited, CapacityReservation, and PerGB2018 (new SKU as of 2018-04-03). Defaults to PerGB2018. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WorkspaceSpec defines the desired state of Workspace +type WorkspaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkspaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkspaceInitParameters `json:"initProvider,omitempty"` +} + +// WorkspaceStatus defines the observed state of Workspace. +type WorkspaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkspaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workspace is the Schema for the Workspaces API. Manages a Log Analytics (formally Operational Insights) Workspace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Workspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec WorkspaceSpec `json:"spec"` + Status WorkspaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkspaceList contains a list of Workspaces +type WorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workspace `json:"items"` +} + +// Repository type metadata. +var ( + Workspace_Kind = "Workspace" + Workspace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workspace_Kind}.String() + Workspace_KindAPIVersion = Workspace_Kind + "." + CRDGroupVersion.String() + Workspace_GroupVersionKind = CRDGroupVersion.WithKind(Workspace_Kind) +) + +func init() { + SchemeBuilder.Register(&Workspace{}, &WorkspaceList{}) +} diff --git a/apis/operationsmanagement/v1beta1/zz_generated.conversion_spokes.go b/apis/operationsmanagement/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..b5f1148f5 --- /dev/null +++ b/apis/operationsmanagement/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this LogAnalyticsSolution to the hub type. +func (tr *LogAnalyticsSolution) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LogAnalyticsSolution type. +func (tr *LogAnalyticsSolution) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/operationsmanagement/v1beta1/zz_generated.conversion_hubs.go b/apis/operationsmanagement/v1beta2/zz_generated.conversion_hubs.go similarity index 93% rename from apis/operationsmanagement/v1beta1/zz_generated.conversion_hubs.go rename to apis/operationsmanagement/v1beta2/zz_generated.conversion_hubs.go index d8e8048dc..7c80f4e1e 100755 --- a/apis/operationsmanagement/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/operationsmanagement/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *LogAnalyticsSolution) Hub() {} diff --git a/apis/operationsmanagement/v1beta2/zz_generated.deepcopy.go b/apis/operationsmanagement/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..2ab6d96fd --- /dev/null +++ b/apis/operationsmanagement/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,451 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsSolution) DeepCopyInto(out *LogAnalyticsSolution) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsSolution. +func (in *LogAnalyticsSolution) DeepCopy() *LogAnalyticsSolution { + if in == nil { + return nil + } + out := new(LogAnalyticsSolution) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LogAnalyticsSolution) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsSolutionInitParameters) DeepCopyInto(out *LogAnalyticsSolutionInitParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SolutionName != nil { + in, out := &in.SolutionName, &out.SolutionName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceName != nil { + in, out := &in.WorkspaceName, &out.WorkspaceName + *out = new(string) + **out = **in + } + if in.WorkspaceNameRef != nil { + in, out := &in.WorkspaceNameRef, &out.WorkspaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceNameSelector != nil { + in, out := &in.WorkspaceNameSelector, &out.WorkspaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } + if in.WorkspaceResourceIDRef != nil { + in, out := &in.WorkspaceResourceIDRef, &out.WorkspaceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceResourceIDSelector != nil { + in, out := &in.WorkspaceResourceIDSelector, &out.WorkspaceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsSolutionInitParameters. +func (in *LogAnalyticsSolutionInitParameters) DeepCopy() *LogAnalyticsSolutionInitParameters { + if in == nil { + return nil + } + out := new(LogAnalyticsSolutionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsSolutionList) DeepCopyInto(out *LogAnalyticsSolutionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LogAnalyticsSolution, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsSolutionList. +func (in *LogAnalyticsSolutionList) DeepCopy() *LogAnalyticsSolutionList { + if in == nil { + return nil + } + out := new(LogAnalyticsSolutionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LogAnalyticsSolutionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsSolutionObservation) DeepCopyInto(out *LogAnalyticsSolutionObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SolutionName != nil { + in, out := &in.SolutionName, &out.SolutionName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceName != nil { + in, out := &in.WorkspaceName, &out.WorkspaceName + *out = new(string) + **out = **in + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsSolutionObservation. +func (in *LogAnalyticsSolutionObservation) DeepCopy() *LogAnalyticsSolutionObservation { + if in == nil { + return nil + } + out := new(LogAnalyticsSolutionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsSolutionParameters) DeepCopyInto(out *LogAnalyticsSolutionParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = new(PlanParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SolutionName != nil { + in, out := &in.SolutionName, &out.SolutionName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WorkspaceName != nil { + in, out := &in.WorkspaceName, &out.WorkspaceName + *out = new(string) + **out = **in + } + if in.WorkspaceNameRef != nil { + in, out := &in.WorkspaceNameRef, &out.WorkspaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceNameSelector != nil { + in, out := &in.WorkspaceNameSelector, &out.WorkspaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceResourceID != nil { + in, out := &in.WorkspaceResourceID, &out.WorkspaceResourceID + *out = new(string) + **out = **in + } + if in.WorkspaceResourceIDRef != nil { + in, out := &in.WorkspaceResourceIDRef, &out.WorkspaceResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceResourceIDSelector != nil { + in, out := &in.WorkspaceResourceIDSelector, &out.WorkspaceResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsSolutionParameters. +func (in *LogAnalyticsSolutionParameters) DeepCopy() *LogAnalyticsSolutionParameters { + if in == nil { + return nil + } + out := new(LogAnalyticsSolutionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsSolutionSpec) DeepCopyInto(out *LogAnalyticsSolutionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsSolutionSpec. +func (in *LogAnalyticsSolutionSpec) DeepCopy() *LogAnalyticsSolutionSpec { + if in == nil { + return nil + } + out := new(LogAnalyticsSolutionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogAnalyticsSolutionStatus) DeepCopyInto(out *LogAnalyticsSolutionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogAnalyticsSolutionStatus. +func (in *LogAnalyticsSolutionStatus) DeepCopy() *LogAnalyticsSolutionStatus { + if in == nil { + return nil + } + out := new(LogAnalyticsSolutionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanInitParameters) DeepCopyInto(out *PlanInitParameters) { + *out = *in + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.PromotionCode != nil { + in, out := &in.PromotionCode, &out.PromotionCode + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanInitParameters. +func (in *PlanInitParameters) DeepCopy() *PlanInitParameters { + if in == nil { + return nil + } + out := new(PlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanObservation) DeepCopyInto(out *PlanObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.PromotionCode != nil { + in, out := &in.PromotionCode, &out.PromotionCode + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanObservation. +func (in *PlanObservation) DeepCopy() *PlanObservation { + if in == nil { + return nil + } + out := new(PlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlanParameters) DeepCopyInto(out *PlanParameters) { + *out = *in + if in.Product != nil { + in, out := &in.Product, &out.Product + *out = new(string) + **out = **in + } + if in.PromotionCode != nil { + in, out := &in.PromotionCode, &out.PromotionCode + *out = new(string) + **out = **in + } + if in.Publisher != nil { + in, out := &in.Publisher, &out.Publisher + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlanParameters. +func (in *PlanParameters) DeepCopy() *PlanParameters { + if in == nil { + return nil + } + out := new(PlanParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/operationsmanagement/v1beta2/zz_generated.managed.go b/apis/operationsmanagement/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..3636959d1 --- /dev/null +++ b/apis/operationsmanagement/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LogAnalyticsSolution. +func (mg *LogAnalyticsSolution) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/operationsmanagement/v1beta2/zz_generated.managedlist.go b/apis/operationsmanagement/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..c432016a2 --- /dev/null +++ b/apis/operationsmanagement/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LogAnalyticsSolutionList. +func (l *LogAnalyticsSolutionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/operationsmanagement/v1beta2/zz_generated.resolvers.go b/apis/operationsmanagement/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..a63578981 --- /dev/null +++ b/apis/operationsmanagement/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,144 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *LogAnalyticsSolution) ResolveReferences( // ResolveReferences of this LogAnalyticsSolution. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkspaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.WorkspaceNameRef, + Selector: mg.Spec.ForProvider.WorkspaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkspaceName") + } + mg.Spec.ForProvider.WorkspaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkspaceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkspaceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WorkspaceResourceIDRef, + Selector: mg.Spec.ForProvider.WorkspaceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkspaceResourceID") + } + mg.Spec.ForProvider.WorkspaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkspaceResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WorkspaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.WorkspaceNameRef, + Selector: mg.Spec.InitProvider.WorkspaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WorkspaceName") + } + mg.Spec.InitProvider.WorkspaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WorkspaceNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WorkspaceResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.WorkspaceResourceIDRef, + Selector: mg.Spec.InitProvider.WorkspaceResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WorkspaceResourceID") + } + mg.Spec.InitProvider.WorkspaceResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WorkspaceResourceIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/operationsmanagement/v1beta2/zz_groupversion_info.go b/apis/operationsmanagement/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..7b0071264 --- /dev/null +++ b/apis/operationsmanagement/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=operationsmanagement.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "operationsmanagement.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/operationsmanagement/v1beta2/zz_loganalyticssolution_terraformed.go b/apis/operationsmanagement/v1beta2/zz_loganalyticssolution_terraformed.go new file mode 100755 index 000000000..6f3bb3f18 --- /dev/null +++ b/apis/operationsmanagement/v1beta2/zz_loganalyticssolution_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LogAnalyticsSolution +func (mg *LogAnalyticsSolution) GetTerraformResourceType() string { + return "azurerm_log_analytics_solution" +} + +// GetConnectionDetailsMapping for this LogAnalyticsSolution +func (tr *LogAnalyticsSolution) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LogAnalyticsSolution +func (tr *LogAnalyticsSolution) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LogAnalyticsSolution +func (tr *LogAnalyticsSolution) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LogAnalyticsSolution +func (tr *LogAnalyticsSolution) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LogAnalyticsSolution +func (tr *LogAnalyticsSolution) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LogAnalyticsSolution +func (tr *LogAnalyticsSolution) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LogAnalyticsSolution +func (tr *LogAnalyticsSolution) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LogAnalyticsSolution +func (tr *LogAnalyticsSolution) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LogAnalyticsSolution using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LogAnalyticsSolution) LateInitialize(attrs []byte) (bool, error) { + params := &LogAnalyticsSolutionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LogAnalyticsSolution) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/operationsmanagement/v1beta2/zz_loganalyticssolution_types.go b/apis/operationsmanagement/v1beta2/zz_loganalyticssolution_types.go new file mode 100755 index 000000000..d50c4ffa2 --- /dev/null +++ b/apis/operationsmanagement/v1beta2/zz_loganalyticssolution_types.go @@ -0,0 +1,256 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LogAnalyticsSolutionInitParameters struct { + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A plan block as documented below. + Plan *PlanInitParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // The name of the resource group in which the Log Analytics solution is created. Changing this forces a new resource to be created. Note: The solution and its related workspace can only exist in the same resource group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the name of the solution to be deployed. See here for options.Changing this forces a new resource to be created. + SolutionName *string `json:"solutionName,omitempty" tf:"solution_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The full name of the Log Analytics workspace with which the solution will be linked. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + WorkspaceName *string `json:"workspaceName,omitempty" tf:"workspace_name,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceName. + // +kubebuilder:validation:Optional + WorkspaceNameRef *v1.Reference `json:"workspaceNameRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceName. + // +kubebuilder:validation:Optional + WorkspaceNameSelector *v1.Selector `json:"workspaceNameSelector,omitempty" tf:"-"` + + // The full resource ID of the Log Analytics workspace with which the solution will be linked. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDRef *v1.Reference `json:"workspaceResourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDSelector *v1.Selector `json:"workspaceResourceIdSelector,omitempty" tf:"-"` +} + +type LogAnalyticsSolutionObservation struct { + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A plan block as documented below. + Plan *PlanObservation `json:"plan,omitempty" tf:"plan,omitempty"` + + // The name of the resource group in which the Log Analytics solution is created. Changing this forces a new resource to be created. Note: The solution and its related workspace can only exist in the same resource group. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the name of the solution to be deployed. See here for options.Changing this forces a new resource to be created. + SolutionName *string `json:"solutionName,omitempty" tf:"solution_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The full name of the Log Analytics workspace with which the solution will be linked. Changing this forces a new resource to be created. + WorkspaceName *string `json:"workspaceName,omitempty" tf:"workspace_name,omitempty"` + + // The full resource ID of the Log Analytics workspace with which the solution will be linked. Changing this forces a new resource to be created. + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` +} + +type LogAnalyticsSolutionParameters struct { + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A plan block as documented below. + // +kubebuilder:validation:Optional + Plan *PlanParameters `json:"plan,omitempty" tf:"plan,omitempty"` + + // The name of the resource group in which the Log Analytics solution is created. Changing this forces a new resource to be created. Note: The solution and its related workspace can only exist in the same resource group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the name of the solution to be deployed. See here for options.Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SolutionName *string `json:"solutionName,omitempty" tf:"solution_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The full name of the Log Analytics workspace with which the solution will be linked. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +kubebuilder:validation:Optional + WorkspaceName *string `json:"workspaceName,omitempty" tf:"workspace_name,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceName. + // +kubebuilder:validation:Optional + WorkspaceNameRef *v1.Reference `json:"workspaceNameRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceName. + // +kubebuilder:validation:Optional + WorkspaceNameSelector *v1.Selector `json:"workspaceNameSelector,omitempty" tf:"-"` + + // The full resource ID of the Log Analytics workspace with which the solution will be linked. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WorkspaceResourceID *string `json:"workspaceResourceId,omitempty" tf:"workspace_resource_id,omitempty"` + + // Reference to a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDRef *v1.Reference `json:"workspaceResourceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in operationalinsights to populate workspaceResourceId. + // +kubebuilder:validation:Optional + WorkspaceResourceIDSelector *v1.Selector `json:"workspaceResourceIdSelector,omitempty" tf:"-"` +} + +type PlanInitParameters struct { + + // The product name of the solution. For example OMSGallery/Containers. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // A promotion code to be used with the solution. Changing this forces a new resource to be created. + PromotionCode *string `json:"promotionCode,omitempty" tf:"promotion_code,omitempty"` + + // The publisher of the solution. For example Microsoft. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type PlanObservation struct { + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The product name of the solution. For example OMSGallery/Containers. Changing this forces a new resource to be created. + Product *string `json:"product,omitempty" tf:"product,omitempty"` + + // A promotion code to be used with the solution. Changing this forces a new resource to be created. + PromotionCode *string `json:"promotionCode,omitempty" tf:"promotion_code,omitempty"` + + // The publisher of the solution. For example Microsoft. Changing this forces a new resource to be created. + Publisher *string `json:"publisher,omitempty" tf:"publisher,omitempty"` +} + +type PlanParameters struct { + + // The product name of the solution. For example OMSGallery/Containers. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Product *string `json:"product" tf:"product,omitempty"` + + // A promotion code to be used with the solution. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PromotionCode *string `json:"promotionCode,omitempty" tf:"promotion_code,omitempty"` + + // The publisher of the solution. For example Microsoft. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Publisher *string `json:"publisher" tf:"publisher,omitempty"` +} + +// LogAnalyticsSolutionSpec defines the desired state of LogAnalyticsSolution +type LogAnalyticsSolutionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LogAnalyticsSolutionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LogAnalyticsSolutionInitParameters `json:"initProvider,omitempty"` +} + +// LogAnalyticsSolutionStatus defines the observed state of LogAnalyticsSolution. +type LogAnalyticsSolutionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LogAnalyticsSolutionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LogAnalyticsSolution is the Schema for the LogAnalyticsSolutions API. Manages a Log Analytics (formally Operational Insights) Solution. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LogAnalyticsSolution struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.plan) || (has(self.initProvider) && has(self.initProvider.plan))",message="spec.forProvider.plan is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.solutionName) || (has(self.initProvider) && has(self.initProvider.solutionName))",message="spec.forProvider.solutionName is a required parameter" + Spec LogAnalyticsSolutionSpec `json:"spec"` + Status LogAnalyticsSolutionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LogAnalyticsSolutionList contains a list of LogAnalyticsSolutions +type LogAnalyticsSolutionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LogAnalyticsSolution `json:"items"` +} + +// Repository type metadata. +var ( + LogAnalyticsSolution_Kind = "LogAnalyticsSolution" + LogAnalyticsSolution_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LogAnalyticsSolution_Kind}.String() + LogAnalyticsSolution_KindAPIVersion = LogAnalyticsSolution_Kind + "." + CRDGroupVersion.String() + LogAnalyticsSolution_GroupVersionKind = CRDGroupVersion.WithKind(LogAnalyticsSolution_Kind) +) + +func init() { + SchemeBuilder.Register(&LogAnalyticsSolution{}, &LogAnalyticsSolutionList{}) +} diff --git a/apis/orbital/v1beta1/zz_contactprofile_types.go b/apis/orbital/v1beta1/zz_contactprofile_types.go index 4409d6cab..9d894002e 100755 --- a/apis/orbital/v1beta1/zz_contactprofile_types.go +++ b/apis/orbital/v1beta1/zz_contactprofile_types.go @@ -103,7 +103,7 @@ type ContactProfileInitParameters struct { MinimumVariableContactDuration *string `json:"minimumVariableContactDuration,omitempty" tf:"minimum_variable_contact_duration,omitempty"` // ARM resource identifier of the subnet delegated to the Microsoft.Orbital/orbitalGateways. Needs to be at least a class C subnet, and should not have any IP created in it. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() NetworkConfigurationSubnetID *string `json:"networkConfigurationSubnetId,omitempty" tf:"network_configuration_subnet_id,omitempty"` @@ -181,7 +181,7 @@ type ContactProfileParameters struct { MinimumVariableContactDuration *string `json:"minimumVariableContactDuration,omitempty" tf:"minimum_variable_contact_duration,omitempty"` // ARM resource identifier of the subnet delegated to the Microsoft.Orbital/orbitalGateways. Needs to be at least a class C subnet, and should not have any IP created in it. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NetworkConfigurationSubnetID *string `json:"networkConfigurationSubnetId,omitempty" tf:"network_configuration_subnet_id,omitempty"` diff --git a/apis/orbital/v1beta1/zz_generated.resolvers.go b/apis/orbital/v1beta1/zz_generated.resolvers.go index 949bef656..1cdbadc74 100644 --- a/apis/orbital/v1beta1/zz_generated.resolvers.go +++ b/apis/orbital/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *ContactProfile) ResolveReferences( // ResolveReferences of this Contac var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -64,7 +64,7 @@ func (mg *ContactProfile) ResolveReferences( // ResolveReferences of this Contac mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/policyinsights/v1beta1/zz_generated.resolvers.go b/apis/policyinsights/v1beta1/zz_generated.resolvers.go index 4bab3e5a9..ae330fdfa 100644 --- a/apis/policyinsights/v1beta1/zz_generated.resolvers.go +++ b/apis/policyinsights/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *ResourcePolicyRemediation) ResolveReferences( // ResolveReferences of var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "ResourceGroupPolicyAssignment", "ResourceGroupPolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "ResourceGroupPolicyAssignment", "ResourceGroupPolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -45,7 +45,7 @@ func (mg *ResourcePolicyRemediation) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.PolicyAssignmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PolicyAssignmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -64,7 +64,7 @@ func (mg *ResourcePolicyRemediation) ResolveReferences( // ResolveReferences of mg.Spec.ForProvider.ResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "ResourceGroupPolicyAssignment", "ResourceGroupPolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "ResourceGroupPolicyAssignment", "ResourceGroupPolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -83,7 +83,7 @@ func (mg *ResourcePolicyRemediation) ResolveReferences( // ResolveReferences of mg.Spec.InitProvider.PolicyAssignmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.PolicyAssignmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -114,7 +114,7 @@ func (mg *SubscriptionPolicyRemediation) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "SubscriptionPolicyAssignment", "SubscriptionPolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "SubscriptionPolicyAssignment", "SubscriptionPolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -133,7 +133,7 @@ func (mg *SubscriptionPolicyRemediation) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.PolicyAssignmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PolicyAssignmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta1", "SubscriptionPolicyAssignment", "SubscriptionPolicyAssignmentList") + m, l, err = apisresolver.GetManagedResource("authorization.azure.upbound.io", "v1beta2", "SubscriptionPolicyAssignment", "SubscriptionPolicyAssignmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/policyinsights/v1beta1/zz_resourcepolicyremediation_types.go b/apis/policyinsights/v1beta1/zz_resourcepolicyremediation_types.go index 5afa47394..7b85e24af 100755 --- a/apis/policyinsights/v1beta1/zz_resourcepolicyremediation_types.go +++ b/apis/policyinsights/v1beta1/zz_resourcepolicyremediation_types.go @@ -28,7 +28,7 @@ type ResourcePolicyRemediationInitParameters struct { ParallelDeployments *float64 `json:"parallelDeployments,omitempty" tf:"parallel_deployments,omitempty"` // The ID of the Policy Assignment that should be remediated. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.ResourceGroupPolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.ResourceGroupPolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() PolicyAssignmentID *string `json:"policyAssignmentId,omitempty" tf:"policy_assignment_id,omitempty"` @@ -53,7 +53,7 @@ type ResourcePolicyRemediationInitParameters struct { ResourceDiscoveryMode *string `json:"resourceDiscoveryMode,omitempty" tf:"resource_discovery_mode,omitempty"` // The Resource ID at which the Policy Remediation should be applied. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` @@ -121,7 +121,7 @@ type ResourcePolicyRemediationParameters struct { ParallelDeployments *float64 `json:"parallelDeployments,omitempty" tf:"parallel_deployments,omitempty"` // The ID of the Policy Assignment that should be remediated. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.ResourceGroupPolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.ResourceGroupPolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PolicyAssignmentID *string `json:"policyAssignmentId,omitempty" tf:"policy_assignment_id,omitempty"` @@ -151,7 +151,7 @@ type ResourcePolicyRemediationParameters struct { ResourceDiscoveryMode *string `json:"resourceDiscoveryMode,omitempty" tf:"resource_discovery_mode,omitempty"` // The Resource ID at which the Policy Remediation should be applied. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ResourceID *string `json:"resourceId,omitempty" tf:"resource_id,omitempty"` diff --git a/apis/policyinsights/v1beta1/zz_subscriptionpolicyremediation_types.go b/apis/policyinsights/v1beta1/zz_subscriptionpolicyremediation_types.go index 12d2e1647..a0841e57b 100755 --- a/apis/policyinsights/v1beta1/zz_subscriptionpolicyremediation_types.go +++ b/apis/policyinsights/v1beta1/zz_subscriptionpolicyremediation_types.go @@ -25,7 +25,7 @@ type SubscriptionPolicyRemediationInitParameters struct { ParallelDeployments *float64 `json:"parallelDeployments,omitempty" tf:"parallel_deployments,omitempty"` // The ID of the Policy Assignment that should be remediated. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.SubscriptionPolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.SubscriptionPolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() PolicyAssignmentID *string `json:"policyAssignmentId,omitempty" tf:"policy_assignment_id,omitempty"` @@ -101,7 +101,7 @@ type SubscriptionPolicyRemediationParameters struct { ParallelDeployments *float64 `json:"parallelDeployments,omitempty" tf:"parallel_deployments,omitempty"` // The ID of the Policy Assignment that should be remediated. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta1.SubscriptionPolicyAssignment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/authorization/v1beta2.SubscriptionPolicyAssignment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PolicyAssignmentID *string `json:"policyAssignmentId,omitempty" tf:"policy_assignment_id,omitempty"` diff --git a/apis/purview/v1beta1/zz_generated.conversion_spokes.go b/apis/purview/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..7dd07a30c --- /dev/null +++ b/apis/purview/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Account to the hub type. +func (tr *Account) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Account type. +func (tr *Account) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/purview/v1beta2/zz_account_terraformed.go b/apis/purview/v1beta2/zz_account_terraformed.go new file mode 100755 index 000000000..f4ae19063 --- /dev/null +++ b/apis/purview/v1beta2/zz_account_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Account +func (mg *Account) GetTerraformResourceType() string { + return "azurerm_purview_account" +} + +// GetConnectionDetailsMapping for this Account +func (tr *Account) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"atlas_kafka_endpoint_primary_connection_string": "status.atProvider.atlasKafkaEndpointPrimaryConnectionString", "atlas_kafka_endpoint_secondary_connection_string": "status.atProvider.atlasKafkaEndpointSecondaryConnectionString"} +} + +// GetObservation of this Account +func (tr *Account) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Account +func (tr *Account) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Account +func (tr *Account) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Account +func (tr *Account) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Account +func (tr *Account) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Account +func (tr *Account) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Account +func (tr *Account) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Account using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Account) LateInitialize(attrs []byte) (bool, error) { + params := &AccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Account) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/purview/v1beta2/zz_account_types.go b/apis/purview/v1beta2/zz_account_types.go new file mode 100755 index 000000000..e3445ef44 --- /dev/null +++ b/apis/purview/v1beta2/zz_account_types.go @@ -0,0 +1,243 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountInitParameters struct { + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Purview Account should exist. Changing this forces a new Purview Account to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for the new Resource Group where Purview Account creates the managed resources. Changing this forces a new Purview Account to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameRef *v1.Reference `json:"managedResourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameSelector *v1.Selector `json:"managedResourceGroupNameSelector,omitempty" tf:"-"` + + // Should the Purview Account be visible to the public network? Defaults to true. + PublicNetworkEnabled *bool `json:"publicNetworkEnabled,omitempty" tf:"public_network_enabled,omitempty"` + + // A mapping of tags which should be assigned to the Purview Account. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountObservation struct { + + // Catalog endpoint. + CatalogEndpoint *string `json:"catalogEndpoint,omitempty" tf:"catalog_endpoint,omitempty"` + + // Guardian endpoint. + GuardianEndpoint *string `json:"guardianEndpoint,omitempty" tf:"guardian_endpoint,omitempty"` + + // The ID of the Purview Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Purview Account should exist. Changing this forces a new Purview Account to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for the new Resource Group where Purview Account creates the managed resources. Changing this forces a new Purview Account to be created. + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // A managed_resources block as defined below. + ManagedResources []ManagedResourcesObservation `json:"managedResources,omitempty" tf:"managed_resources,omitempty"` + + // Should the Purview Account be visible to the public network? Defaults to true. + PublicNetworkEnabled *bool `json:"publicNetworkEnabled,omitempty" tf:"public_network_enabled,omitempty"` + + // The name of the Resource Group where the Purview Account should exist. Changing this forces a new Purview Account to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Scan endpoint. + ScanEndpoint *string `json:"scanEndpoint,omitempty" tf:"scan_endpoint,omitempty"` + + // A mapping of tags which should be assigned to the Purview Account. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountParameters struct { + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Purview Account should exist. Changing this forces a new Purview Account to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for the new Resource Group where Purview Account creates the managed resources. Changing this forces a new Purview Account to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameRef *v1.Reference `json:"managedResourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameSelector *v1.Selector `json:"managedResourceGroupNameSelector,omitempty" tf:"-"` + + // Should the Purview Account be visible to the public network? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkEnabled *bool `json:"publicNetworkEnabled,omitempty" tf:"public_network_enabled,omitempty"` + + // The name of the Resource Group where the Purview Account should exist. Changing this forces a new Purview Account to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Purview Account. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Purview Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Purview Account. Possible values are UserAssigned and SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Purview Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Purview Account. Possible values are UserAssigned and SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Purview Account. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Purview Account. Possible values are UserAssigned and SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ManagedResourcesInitParameters struct { +} + +type ManagedResourcesObservation struct { + + // The ID of the managed event hub namespace. + EventHubNamespaceID *string `json:"eventHubNamespaceId,omitempty" tf:"event_hub_namespace_id,omitempty"` + + // The ID of the managed resource group. + ResourceGroupID *string `json:"resourceGroupId,omitempty" tf:"resource_group_id,omitempty"` + + // The ID of the managed storage account. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type ManagedResourcesParameters struct { +} + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccountInitParameters `json:"initProvider,omitempty"` +} + +// AccountStatus defines the observed state of Account. +type AccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Account is the Schema for the Accounts API. Manages a Purview Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.identity) || (has(self.initProvider) && has(self.initProvider.identity))",message="spec.forProvider.identity is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Accounts +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Repository type metadata. +var ( + Account_Kind = "Account" + Account_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Account_Kind}.String() + Account_KindAPIVersion = Account_Kind + "." + CRDGroupVersion.String() + Account_GroupVersionKind = CRDGroupVersion.WithKind(Account_Kind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/apis/purview/v1beta1/zz_generated.conversion_hubs.go b/apis/purview/v1beta2/zz_generated.conversion_hubs.go similarity index 93% rename from apis/purview/v1beta1/zz_generated.conversion_hubs.go rename to apis/purview/v1beta2/zz_generated.conversion_hubs.go index 8298b14a9..05ad8f2dd 100755 --- a/apis/purview/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/purview/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *Account) Hub() {} diff --git a/apis/purview/v1beta2/zz_generated.deepcopy.go b/apis/purview/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..5c780f96f --- /dev/null +++ b/apis/purview/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,491 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountInitParameters) DeepCopyInto(out *AccountInitParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupNameRef != nil { + in, out := &in.ManagedResourceGroupNameRef, &out.ManagedResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedResourceGroupNameSelector != nil { + in, out := &in.ManagedResourceGroupNameSelector, &out.ManagedResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkEnabled != nil { + in, out := &in.PublicNetworkEnabled, &out.PublicNetworkEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountInitParameters. +func (in *AccountInitParameters) DeepCopy() *AccountInitParameters { + if in == nil { + return nil + } + out := new(AccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in + if in.CatalogEndpoint != nil { + in, out := &in.CatalogEndpoint, &out.CatalogEndpoint + *out = new(string) + **out = **in + } + if in.GuardianEndpoint != nil { + in, out := &in.GuardianEndpoint, &out.GuardianEndpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedResources != nil { + in, out := &in.ManagedResources, &out.ManagedResources + *out = make([]ManagedResourcesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkEnabled != nil { + in, out := &in.PublicNetworkEnabled, &out.PublicNetworkEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ScanEndpoint != nil { + in, out := &in.ScanEndpoint, &out.ScanEndpoint + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupNameRef != nil { + in, out := &in.ManagedResourceGroupNameRef, &out.ManagedResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedResourceGroupNameSelector != nil { + in, out := &in.ManagedResourceGroupNameSelector, &out.ManagedResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkEnabled != nil { + in, out := &in.PublicNetworkEnabled, &out.PublicNetworkEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedResourcesInitParameters) DeepCopyInto(out *ManagedResourcesInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourcesInitParameters. +func (in *ManagedResourcesInitParameters) DeepCopy() *ManagedResourcesInitParameters { + if in == nil { + return nil + } + out := new(ManagedResourcesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedResourcesObservation) DeepCopyInto(out *ManagedResourcesObservation) { + *out = *in + if in.EventHubNamespaceID != nil { + in, out := &in.EventHubNamespaceID, &out.EventHubNamespaceID + *out = new(string) + **out = **in + } + if in.ResourceGroupID != nil { + in, out := &in.ResourceGroupID, &out.ResourceGroupID + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourcesObservation. +func (in *ManagedResourcesObservation) DeepCopy() *ManagedResourcesObservation { + if in == nil { + return nil + } + out := new(ManagedResourcesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedResourcesParameters) DeepCopyInto(out *ManagedResourcesParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourcesParameters. +func (in *ManagedResourcesParameters) DeepCopy() *ManagedResourcesParameters { + if in == nil { + return nil + } + out := new(ManagedResourcesParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/purview/v1beta2/zz_generated.managed.go b/apis/purview/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..1581c0329 --- /dev/null +++ b/apis/purview/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Account. +func (mg *Account) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Account. +func (mg *Account) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/purview/v1beta2/zz_generated.managedlist.go b/apis/purview/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..619b13d9e --- /dev/null +++ b/apis/purview/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/purview/v1beta2/zz_generated.resolvers.go b/apis/purview/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..7362b943a --- /dev/null +++ b/apis/purview/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,87 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Account. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Account) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ManagedResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ManagedResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ManagedResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ManagedResourceGroupName") + } + mg.Spec.ForProvider.ManagedResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ManagedResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ManagedResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ManagedResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ManagedResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ManagedResourceGroupName") + } + mg.Spec.InitProvider.ManagedResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ManagedResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/purview/v1beta2/zz_groupversion_info.go b/apis/purview/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..1ddec40ff --- /dev/null +++ b/apis/purview/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=purview.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "purview.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/recoveryservices/v1beta1/zz_backupcontainerstorageaccount_types.go b/apis/recoveryservices/v1beta1/zz_backupcontainerstorageaccount_types.go index e8c5b0595..c15d0cb69 100755 --- a/apis/recoveryservices/v1beta1/zz_backupcontainerstorageaccount_types.go +++ b/apis/recoveryservices/v1beta1/zz_backupcontainerstorageaccount_types.go @@ -16,7 +16,7 @@ import ( type BackupContainerStorageAccountInitParameters struct { // The name of the vault where the storage account will be registered. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` // Reference to a Vault in recoveryservices to populate recoveryVaultName. @@ -40,7 +40,7 @@ type BackupContainerStorageAccountInitParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The ID of the Storage Account to be registered Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -71,7 +71,7 @@ type BackupContainerStorageAccountObservation struct { type BackupContainerStorageAccountParameters struct { // The name of the vault where the storage account will be registered. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault // +kubebuilder:validation:Optional RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` @@ -97,7 +97,7 @@ type BackupContainerStorageAccountParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The ID of the Storage Account to be registered Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/recoveryservices/v1beta1/zz_backupprotectedfileshare_types.go b/apis/recoveryservices/v1beta1/zz_backupprotectedfileshare_types.go index 40ff8ed95..b32c7163c 100755 --- a/apis/recoveryservices/v1beta1/zz_backupprotectedfileshare_types.go +++ b/apis/recoveryservices/v1beta1/zz_backupprotectedfileshare_types.go @@ -16,7 +16,7 @@ import ( type BackupProtectedFileShareInitParameters struct { // Specifies the ID of the backup policy to use. The policy must be an Azure File Share backup policy. Other types are not supported. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.BackupPolicyFileShare + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.BackupPolicyFileShare // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() BackupPolicyID *string `json:"backupPolicyId,omitempty" tf:"backup_policy_id,omitempty"` @@ -29,7 +29,7 @@ type BackupProtectedFileShareInitParameters struct { BackupPolicyIDSelector *v1.Selector `json:"backupPolicyIdSelector,omitempty" tf:"-"` // Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` // Reference to a Vault in recoveryservices to populate recoveryVaultName. @@ -102,7 +102,7 @@ type BackupProtectedFileShareObservation struct { type BackupProtectedFileShareParameters struct { // Specifies the ID of the backup policy to use. The policy must be an Azure File Share backup policy. Other types are not supported. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.BackupPolicyFileShare + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.BackupPolicyFileShare // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional BackupPolicyID *string `json:"backupPolicyId,omitempty" tf:"backup_policy_id,omitempty"` @@ -116,7 +116,7 @@ type BackupProtectedFileShareParameters struct { BackupPolicyIDSelector *v1.Selector `json:"backupPolicyIdSelector,omitempty" tf:"-"` // Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault // +kubebuilder:validation:Optional RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` diff --git a/apis/recoveryservices/v1beta1/zz_backupprotectedvm_types.go b/apis/recoveryservices/v1beta1/zz_backupprotectedvm_types.go index b05d5c545..204a2ad61 100755 --- a/apis/recoveryservices/v1beta1/zz_backupprotectedvm_types.go +++ b/apis/recoveryservices/v1beta1/zz_backupprotectedvm_types.go @@ -16,7 +16,7 @@ import ( type BackupProtectedVMInitParameters struct { // Specifies the id of the backup policy to use. Required in creation or when protection_stopped is not specified. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.BackupPolicyVM + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.BackupPolicyVM // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() BackupPolicyID *string `json:"backupPolicyId,omitempty" tf:"backup_policy_id,omitempty"` @@ -40,7 +40,7 @@ type BackupProtectedVMInitParameters struct { ProtectionState *string `json:"protectionState,omitempty" tf:"protection_state,omitempty"` // Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` // Reference to a Vault in recoveryservices to populate recoveryVaultName. @@ -99,7 +99,7 @@ type BackupProtectedVMObservation struct { type BackupProtectedVMParameters struct { // Specifies the id of the backup policy to use. Required in creation or when protection_stopped is not specified. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.BackupPolicyVM + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.BackupPolicyVM // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional BackupPolicyID *string `json:"backupPolicyId,omitempty" tf:"backup_policy_id,omitempty"` @@ -127,7 +127,7 @@ type BackupProtectedVMParameters struct { ProtectionState *string `json:"protectionState,omitempty" tf:"protection_state,omitempty"` // Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault // +kubebuilder:validation:Optional RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` diff --git a/apis/recoveryservices/v1beta1/zz_generated.conversion_hubs.go b/apis/recoveryservices/v1beta1/zz_generated.conversion_hubs.go index 743964a48..7f80b71d3 100755 --- a/apis/recoveryservices/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/recoveryservices/v1beta1/zz_generated.conversion_hubs.go @@ -9,24 +9,12 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *BackupContainerStorageAccount) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *BackupPolicyFileShare) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BackupPolicyVM) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *BackupPolicyVMWorkload) Hub() {} - // Hub marks this type as a conversion hub. func (tr *BackupProtectedFileShare) Hub() {} // Hub marks this type as a conversion hub. func (tr *BackupProtectedVM) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Vault) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SiteRecoveryFabric) Hub() {} @@ -36,8 +24,5 @@ func (tr *SiteRecoveryNetworkMapping) Hub() {} // Hub marks this type as a conversion hub. func (tr *SiteRecoveryProtectionContainer) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SiteRecoveryProtectionContainerMapping) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SiteRecoveryReplicationPolicy) Hub() {} diff --git a/apis/recoveryservices/v1beta1/zz_generated.conversion_spokes.go b/apis/recoveryservices/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..0e253f243 --- /dev/null +++ b/apis/recoveryservices/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this BackupPolicyFileShare to the hub type. +func (tr *BackupPolicyFileShare) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BackupPolicyFileShare type. +func (tr *BackupPolicyFileShare) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BackupPolicyVM to the hub type. +func (tr *BackupPolicyVM) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BackupPolicyVM type. +func (tr *BackupPolicyVM) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BackupPolicyVMWorkload to the hub type. +func (tr *BackupPolicyVMWorkload) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BackupPolicyVMWorkload type. +func (tr *BackupPolicyVMWorkload) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SiteRecoveryProtectionContainerMapping to the hub type. +func (tr *SiteRecoveryProtectionContainerMapping) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SiteRecoveryProtectionContainerMapping type. +func (tr *SiteRecoveryProtectionContainerMapping) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Vault to the hub type. +func (tr *Vault) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Vault type. +func (tr *Vault) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/recoveryservices/v1beta1/zz_generated.resolvers.go b/apis/recoveryservices/v1beta1/zz_generated.resolvers.go index 269fa32e7..34c6a406a 100644 --- a/apis/recoveryservices/v1beta1/zz_generated.resolvers.go +++ b/apis/recoveryservices/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *BackupContainerStorageAccount) ResolveReferences( // ResolveReferences var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -64,7 +64,7 @@ func (mg *BackupContainerStorageAccount) ResolveReferences( // ResolveReferences mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -83,7 +83,7 @@ func (mg *BackupContainerStorageAccount) ResolveReferences( // ResolveReferences mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -121,7 +121,7 @@ func (mg *BackupContainerStorageAccount) ResolveReferences( // ResolveReferences mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -302,7 +302,7 @@ func (mg *BackupProtectedFileShare) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "BackupPolicyFileShare", "BackupPolicyFileShareList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "BackupPolicyFileShare", "BackupPolicyFileShareList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -321,7 +321,7 @@ func (mg *BackupProtectedFileShare) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.BackupPolicyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BackupPolicyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -397,7 +397,7 @@ func (mg *BackupProtectedFileShare) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.SourceStorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SourceStorageAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "BackupPolicyFileShare", "BackupPolicyFileShareList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "BackupPolicyFileShare", "BackupPolicyFileShareList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -416,7 +416,7 @@ func (mg *BackupProtectedFileShare) ResolveReferences(ctx context.Context, c cli mg.Spec.InitProvider.BackupPolicyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.BackupPolicyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -504,7 +504,7 @@ func (mg *BackupProtectedVM) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "BackupPolicyVM", "BackupPolicyVMList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "BackupPolicyVM", "BackupPolicyVMList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -523,7 +523,7 @@ func (mg *BackupProtectedVM) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.BackupPolicyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BackupPolicyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -561,7 +561,7 @@ func (mg *BackupProtectedVM) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "BackupPolicyVM", "BackupPolicyVMList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "BackupPolicyVM", "BackupPolicyVMList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -580,7 +580,7 @@ func (mg *BackupProtectedVM) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.InitProvider.BackupPolicyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.BackupPolicyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -630,7 +630,7 @@ func (mg *SiteRecoveryFabric) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -680,7 +680,7 @@ func (mg *SiteRecoveryNetworkMapping) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -718,7 +718,7 @@ func (mg *SiteRecoveryNetworkMapping) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -737,7 +737,7 @@ func (mg *SiteRecoveryNetworkMapping) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.SourceNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SourceNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -756,7 +756,7 @@ func (mg *SiteRecoveryNetworkMapping) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.TargetNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -794,7 +794,7 @@ func (mg *SiteRecoveryNetworkMapping) ResolveReferences(ctx context.Context, c c mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -813,7 +813,7 @@ func (mg *SiteRecoveryNetworkMapping) ResolveReferences(ctx context.Context, c c mg.Spec.InitProvider.SourceNetworkID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.SourceNetworkIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "VirtualNetwork", "VirtualNetworkList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "VirtualNetwork", "VirtualNetworkList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -863,7 +863,7 @@ func (mg *SiteRecoveryProtectionContainer) ResolveReferences(ctx context.Context mg.Spec.ForProvider.RecoveryFabricName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RecoveryFabricNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1077,7 +1077,7 @@ func (mg *SiteRecoveryReplicationPolicy) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/recoveryservices/v1beta1/zz_siterecoveryfabric_types.go b/apis/recoveryservices/v1beta1/zz_siterecoveryfabric_types.go index cb709cf3f..5819eb2ae 100755 --- a/apis/recoveryservices/v1beta1/zz_siterecoveryfabric_types.go +++ b/apis/recoveryservices/v1beta1/zz_siterecoveryfabric_types.go @@ -41,7 +41,7 @@ type SiteRecoveryFabricParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // The name of the vault that should be updated. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault // +kubebuilder:validation:Optional RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` diff --git a/apis/recoveryservices/v1beta1/zz_siterecoverynetworkmapping_types.go b/apis/recoveryservices/v1beta1/zz_siterecoverynetworkmapping_types.go index 49d6f3ada..170006b12 100755 --- a/apis/recoveryservices/v1beta1/zz_siterecoverynetworkmapping_types.go +++ b/apis/recoveryservices/v1beta1/zz_siterecoverynetworkmapping_types.go @@ -19,7 +19,7 @@ type SiteRecoveryNetworkMappingInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The name of the vault that should be updated. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` // Reference to a Vault in recoveryservices to populate recoveryVaultName. @@ -43,7 +43,7 @@ type SiteRecoveryNetworkMappingInitParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The id of the primary network. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SourceNetworkID *string `json:"sourceNetworkId,omitempty" tf:"source_network_id,omitempty"` @@ -59,7 +59,7 @@ type SiteRecoveryNetworkMappingInitParameters struct { SourceRecoveryFabricName *string `json:"sourceRecoveryFabricName,omitempty" tf:"source_recovery_fabric_name,omitempty"` // The id of the recovery network. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() TargetNetworkID *string `json:"targetNetworkId,omitempty" tf:"target_network_id,omitempty"` @@ -109,7 +109,7 @@ type SiteRecoveryNetworkMappingParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // The name of the vault that should be updated. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault // +kubebuilder:validation:Optional RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` @@ -135,7 +135,7 @@ type SiteRecoveryNetworkMappingParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The id of the primary network. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SourceNetworkID *string `json:"sourceNetworkId,omitempty" tf:"source_network_id,omitempty"` @@ -153,7 +153,7 @@ type SiteRecoveryNetworkMappingParameters struct { SourceRecoveryFabricName *string `json:"sourceRecoveryFabricName,omitempty" tf:"source_recovery_fabric_name,omitempty"` // The id of the recovery network. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.VirtualNetwork + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.VirtualNetwork // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional TargetNetworkID *string `json:"targetNetworkId,omitempty" tf:"target_network_id,omitempty"` diff --git a/apis/recoveryservices/v1beta1/zz_siterecoveryprotectioncontainer_types.go b/apis/recoveryservices/v1beta1/zz_siterecoveryprotectioncontainer_types.go index ab9e2c9de..de8aadedd 100755 --- a/apis/recoveryservices/v1beta1/zz_siterecoveryprotectioncontainer_types.go +++ b/apis/recoveryservices/v1beta1/zz_siterecoveryprotectioncontainer_types.go @@ -47,7 +47,7 @@ type SiteRecoveryProtectionContainerParameters struct { RecoveryFabricNameSelector *v1.Selector `json:"recoveryFabricNameSelector,omitempty" tf:"-"` // The name of the vault that should be updated. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault // +kubebuilder:validation:Optional RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` diff --git a/apis/recoveryservices/v1beta1/zz_siterecoveryreplicationpolicy_types.go b/apis/recoveryservices/v1beta1/zz_siterecoveryreplicationpolicy_types.go index 80e1a72d7..576fd246b 100755 --- a/apis/recoveryservices/v1beta1/zz_siterecoveryreplicationpolicy_types.go +++ b/apis/recoveryservices/v1beta1/zz_siterecoveryreplicationpolicy_types.go @@ -51,7 +51,7 @@ type SiteRecoveryReplicationPolicyParameters struct { RecoveryPointRetentionInMinutes *float64 `json:"recoveryPointRetentionInMinutes,omitempty" tf:"recovery_point_retention_in_minutes,omitempty"` // The name of the vault that should be updated. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault // +kubebuilder:validation:Optional RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` diff --git a/apis/recoveryservices/v1beta2/zz_backuppolicyfileshare_terraformed.go b/apis/recoveryservices/v1beta2/zz_backuppolicyfileshare_terraformed.go new file mode 100755 index 000000000..bdb7a9eb6 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_backuppolicyfileshare_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BackupPolicyFileShare +func (mg *BackupPolicyFileShare) GetTerraformResourceType() string { + return "azurerm_backup_policy_file_share" +} + +// GetConnectionDetailsMapping for this BackupPolicyFileShare +func (tr *BackupPolicyFileShare) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BackupPolicyFileShare +func (tr *BackupPolicyFileShare) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BackupPolicyFileShare +func (tr *BackupPolicyFileShare) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BackupPolicyFileShare +func (tr *BackupPolicyFileShare) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BackupPolicyFileShare +func (tr *BackupPolicyFileShare) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BackupPolicyFileShare +func (tr *BackupPolicyFileShare) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BackupPolicyFileShare +func (tr *BackupPolicyFileShare) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BackupPolicyFileShare +func (tr *BackupPolicyFileShare) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BackupPolicyFileShare using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BackupPolicyFileShare) LateInitialize(attrs []byte) (bool, error) { + params := &BackupPolicyFileShareParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BackupPolicyFileShare) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/recoveryservices/v1beta2/zz_backuppolicyfileshare_types.go b/apis/recoveryservices/v1beta2/zz_backuppolicyfileshare_types.go new file mode 100755 index 000000000..33d2b1858 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_backuppolicyfileshare_types.go @@ -0,0 +1,458 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackupInitParameters struct { + + // Sets the backup frequency. Possible values are Daily and Hourly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // A hourly block defined as below. This is required when frequency is set to Hourly. + Hourly *HourlyInitParameters `json:"hourly,omitempty" tf:"hourly,omitempty"` + + // The time of day to perform the backup in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.) + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type BackupObservation struct { + + // Sets the backup frequency. Possible values are Daily and Hourly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // A hourly block defined as below. This is required when frequency is set to Hourly. + Hourly *HourlyObservation `json:"hourly,omitempty" tf:"hourly,omitempty"` + + // The time of day to perform the backup in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.) + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type BackupParameters struct { + + // Sets the backup frequency. Possible values are Daily and Hourly. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency" tf:"frequency,omitempty"` + + // A hourly block defined as below. This is required when frequency is set to Hourly. + // +kubebuilder:validation:Optional + Hourly *HourlyParameters `json:"hourly,omitempty" tf:"hourly,omitempty"` + + // The time of day to perform the backup in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.) + // +kubebuilder:validation:Optional + Time *string `json:"time,omitempty" tf:"time,omitempty"` +} + +type BackupPolicyFileShareInitParameters struct { + + // Configures the Policy backup frequency and times as documented in the backup block below. + Backup *BackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Configures the policy daily retention as documented in the retention_daily block below. + RetentionDaily *RetentionDailyInitParameters `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // Configures the policy monthly retention as documented in the retention_monthly block below. + RetentionMonthly *RetentionMonthlyInitParameters `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // Configures the policy weekly retention as documented in the retention_weekly block below. + RetentionWeekly *RetentionWeeklyInitParameters `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // Configures the policy yearly retention as documented in the retention_yearly block below. + RetentionYearly *RetentionYearlyInitParameters `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // Specifies the timezone. the possible values are defined here. Defaults to UTC + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type BackupPolicyFileShareObservation struct { + + // Configures the Policy backup frequency and times as documented in the backup block below. + Backup *BackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // The ID of the Azure File Share Backup Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. + RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` + + // The name of the resource group in which to create the policy. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Configures the policy daily retention as documented in the retention_daily block below. + RetentionDaily *RetentionDailyObservation `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // Configures the policy monthly retention as documented in the retention_monthly block below. + RetentionMonthly *RetentionMonthlyObservation `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // Configures the policy weekly retention as documented in the retention_weekly block below. + RetentionWeekly *RetentionWeeklyObservation `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // Configures the policy yearly retention as documented in the retention_yearly block below. + RetentionYearly *RetentionYearlyObservation `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // Specifies the timezone. the possible values are defined here. Defaults to UTC + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type BackupPolicyFileShareParameters struct { + + // Configures the Policy backup frequency and times as documented in the backup block below. + // +kubebuilder:validation:Optional + Backup *BackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault + // +kubebuilder:validation:Optional + RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` + + // Reference to a Vault in recoveryservices to populate recoveryVaultName. + // +kubebuilder:validation:Optional + RecoveryVaultNameRef *v1.Reference `json:"recoveryVaultNameRef,omitempty" tf:"-"` + + // Selector for a Vault in recoveryservices to populate recoveryVaultName. + // +kubebuilder:validation:Optional + RecoveryVaultNameSelector *v1.Selector `json:"recoveryVaultNameSelector,omitempty" tf:"-"` + + // The name of the resource group in which to create the policy. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Configures the policy daily retention as documented in the retention_daily block below. + // +kubebuilder:validation:Optional + RetentionDaily *RetentionDailyParameters `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // Configures the policy monthly retention as documented in the retention_monthly block below. + // +kubebuilder:validation:Optional + RetentionMonthly *RetentionMonthlyParameters `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // Configures the policy weekly retention as documented in the retention_weekly block below. + // +kubebuilder:validation:Optional + RetentionWeekly *RetentionWeeklyParameters `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // Configures the policy yearly retention as documented in the retention_yearly block below. + // +kubebuilder:validation:Optional + RetentionYearly *RetentionYearlyParameters `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // Specifies the timezone. the possible values are defined here. Defaults to UTC + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type HourlyInitParameters struct { + + // Specifies the interval at which backup needs to be triggered. Possible values are 4, 6, 8 and 12. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // Specifies the start time of the hourly backup. The time format should be in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Species the duration of the backup window in hours. Details could be found here. + WindowDuration *float64 `json:"windowDuration,omitempty" tf:"window_duration,omitempty"` +} + +type HourlyObservation struct { + + // Specifies the interval at which backup needs to be triggered. Possible values are 4, 6, 8 and 12. + Interval *float64 `json:"interval,omitempty" tf:"interval,omitempty"` + + // Specifies the start time of the hourly backup. The time format should be in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.). + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` + + // Species the duration of the backup window in hours. Details could be found here. + WindowDuration *float64 `json:"windowDuration,omitempty" tf:"window_duration,omitempty"` +} + +type HourlyParameters struct { + + // Specifies the interval at which backup needs to be triggered. Possible values are 4, 6, 8 and 12. + // +kubebuilder:validation:Optional + Interval *float64 `json:"interval" tf:"interval,omitempty"` + + // Specifies the start time of the hourly backup. The time format should be in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.). + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime" tf:"start_time,omitempty"` + + // Species the duration of the backup window in hours. Details could be found here. + // +kubebuilder:validation:Optional + WindowDuration *float64 `json:"windowDuration" tf:"window_duration,omitempty"` +} + +type RetentionDailyInitParameters struct { + + // The number of daily backups to keep. Must be between 1 and 200 (inclusive) + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type RetentionDailyObservation struct { + + // The number of daily backups to keep. Must be between 1 and 200 (inclusive) + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type RetentionDailyParameters struct { + + // The number of daily backups to keep. Must be between 1 and 200 (inclusive) + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` +} + +type RetentionMonthlyInitParameters struct { + + // The number of monthly backups to keep. Must be between 1 and 120 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type RetentionMonthlyObservation struct { + + // The number of monthly backups to keep. Must be between 1 and 120 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type RetentionMonthlyParameters struct { + + // The number of monthly backups to keep. Must be between 1 and 120 + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +kubebuilder:validation:Optional + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + // +kubebuilder:validation:Optional + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +kubebuilder:validation:Optional + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type RetentionWeeklyInitParameters struct { + + // The number of daily backups to keep. Must be between 1 and 200 (inclusive) + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The weekday backups to retain. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type RetentionWeeklyObservation struct { + + // The number of daily backups to keep. Must be between 1 and 200 (inclusive) + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The weekday backups to retain. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type RetentionWeeklyParameters struct { + + // The number of daily backups to keep. Must be between 1 and 200 (inclusive) + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The weekday backups to retain. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays" tf:"weekdays,omitempty"` +} + +type RetentionYearlyInitParameters struct { + + // The number of yearly backups to keep. Must be between 1 and 10 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The months of the year to retain backups of. Must be one of January, February, March, April, May, June, July, Augest, September, October, November and December. + // +listType=set + Months []*string `json:"months,omitempty" tf:"months,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type RetentionYearlyObservation struct { + + // The number of yearly backups to keep. Must be between 1 and 10 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The months of the year to retain backups of. Must be one of January, February, March, April, May, June, July, Augest, September, October, November and December. + // +listType=set + Months []*string `json:"months,omitempty" tf:"months,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type RetentionYearlyParameters struct { + + // The number of yearly backups to keep. Must be between 1 and 10 + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +kubebuilder:validation:Optional + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + // +kubebuilder:validation:Optional + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The months of the year to retain backups of. Must be one of January, February, March, April, May, June, July, Augest, September, October, November and December. + // +kubebuilder:validation:Optional + // +listType=set + Months []*string `json:"months" tf:"months,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +kubebuilder:validation:Optional + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +// BackupPolicyFileShareSpec defines the desired state of BackupPolicyFileShare +type BackupPolicyFileShareSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BackupPolicyFileShareParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BackupPolicyFileShareInitParameters `json:"initProvider,omitempty"` +} + +// BackupPolicyFileShareStatus defines the observed state of BackupPolicyFileShare. +type BackupPolicyFileShareStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BackupPolicyFileShareObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BackupPolicyFileShare is the Schema for the BackupPolicyFileShares API. Manages an Azure File Share Backup Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BackupPolicyFileShare struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backup) || (has(self.initProvider) && has(self.initProvider.backup))",message="spec.forProvider.backup is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.retentionDaily) || (has(self.initProvider) && has(self.initProvider.retentionDaily))",message="spec.forProvider.retentionDaily is a required parameter" + Spec BackupPolicyFileShareSpec `json:"spec"` + Status BackupPolicyFileShareStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupPolicyFileShareList contains a list of BackupPolicyFileShares +type BackupPolicyFileShareList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupPolicyFileShare `json:"items"` +} + +// Repository type metadata. +var ( + BackupPolicyFileShare_Kind = "BackupPolicyFileShare" + BackupPolicyFileShare_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BackupPolicyFileShare_Kind}.String() + BackupPolicyFileShare_KindAPIVersion = BackupPolicyFileShare_Kind + "." + CRDGroupVersion.String() + BackupPolicyFileShare_GroupVersionKind = CRDGroupVersion.WithKind(BackupPolicyFileShare_Kind) +) + +func init() { + SchemeBuilder.Register(&BackupPolicyFileShare{}, &BackupPolicyFileShareList{}) +} diff --git a/apis/recoveryservices/v1beta2/zz_backuppolicyvm_terraformed.go b/apis/recoveryservices/v1beta2/zz_backuppolicyvm_terraformed.go new file mode 100755 index 000000000..297f54948 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_backuppolicyvm_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BackupPolicyVM +func (mg *BackupPolicyVM) GetTerraformResourceType() string { + return "azurerm_backup_policy_vm" +} + +// GetConnectionDetailsMapping for this BackupPolicyVM +func (tr *BackupPolicyVM) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BackupPolicyVM +func (tr *BackupPolicyVM) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BackupPolicyVM +func (tr *BackupPolicyVM) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BackupPolicyVM +func (tr *BackupPolicyVM) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BackupPolicyVM +func (tr *BackupPolicyVM) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BackupPolicyVM +func (tr *BackupPolicyVM) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BackupPolicyVM +func (tr *BackupPolicyVM) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BackupPolicyVM +func (tr *BackupPolicyVM) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BackupPolicyVM using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BackupPolicyVM) LateInitialize(attrs []byte) (bool, error) { + params := &BackupPolicyVMParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BackupPolicyVM) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/recoveryservices/v1beta2/zz_backuppolicyvm_types.go b/apis/recoveryservices/v1beta2/zz_backuppolicyvm_types.go new file mode 100755 index 000000000..781f866e1 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_backuppolicyvm_types.go @@ -0,0 +1,500 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackupPolicyVMBackupInitParameters struct { + + // Sets the backup frequency. Possible values are Hourly, Daily and Weekly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Duration of the backup window in hours. Possible values are between 4 and 24 This is used when frequency is Hourly. + HourDuration *float64 `json:"hourDuration,omitempty" tf:"hour_duration,omitempty"` + + // Interval in hour at which backup is triggered. Possible values are 4, 6, 8 and 12. This is used when frequency is Hourly. + HourInterval *float64 `json:"hourInterval,omitempty" tf:"hour_interval,omitempty"` + + // The time of day to perform the backup in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // The days of the week to perform backups on. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. This is used when frequency is Weekly. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type BackupPolicyVMBackupObservation struct { + + // Sets the backup frequency. Possible values are Hourly, Daily and Weekly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // Duration of the backup window in hours. Possible values are between 4 and 24 This is used when frequency is Hourly. + HourDuration *float64 `json:"hourDuration,omitempty" tf:"hour_duration,omitempty"` + + // Interval in hour at which backup is triggered. Possible values are 4, 6, 8 and 12. This is used when frequency is Hourly. + HourInterval *float64 `json:"hourInterval,omitempty" tf:"hour_interval,omitempty"` + + // The time of day to perform the backup in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // The days of the week to perform backups on. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. This is used when frequency is Weekly. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type BackupPolicyVMBackupParameters struct { + + // Sets the backup frequency. Possible values are Hourly, Daily and Weekly. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency" tf:"frequency,omitempty"` + + // Duration of the backup window in hours. Possible values are between 4 and 24 This is used when frequency is Hourly. + // +kubebuilder:validation:Optional + HourDuration *float64 `json:"hourDuration,omitempty" tf:"hour_duration,omitempty"` + + // Interval in hour at which backup is triggered. Possible values are 4, 6, 8 and 12. This is used when frequency is Hourly. + // +kubebuilder:validation:Optional + HourInterval *float64 `json:"hourInterval,omitempty" tf:"hour_interval,omitempty"` + + // The time of day to perform the backup in 24hour format. + // +kubebuilder:validation:Optional + Time *string `json:"time" tf:"time,omitempty"` + + // The days of the week to perform backups on. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. This is used when frequency is Weekly. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type BackupPolicyVMInitParameters struct { + + // Configures the Policy backup frequency, times & days as documented in the backup block below. + Backup *BackupPolicyVMBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Specifies the instant restore resource group name as documented in the instant_restore_resource_group block below. + InstantRestoreResourceGroup *InstantRestoreResourceGroupInitParameters `json:"instantRestoreResourceGroup,omitempty" tf:"instant_restore_resource_group,omitempty"` + + // Specifies the instant restore retention range in days. Possible values are between 1 and 5 when policy_type is V1, and 1 to 30 when policy_type is V2. + InstantRestoreRetentionDays *float64 `json:"instantRestoreRetentionDays,omitempty" tf:"instant_restore_retention_days,omitempty"` + + // Type of the Backup Policy. Possible values are V1 and V2 where V2 stands for the Enhanced Policy. Defaults to V1. Changing this forces a new resource to be created. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Configures the policy daily retention as documented in the retention_daily block below. Required when backup frequency is Daily. + RetentionDaily *BackupPolicyVMRetentionDailyInitParameters `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // Configures the policy monthly retention as documented in the retention_monthly block below. + RetentionMonthly *BackupPolicyVMRetentionMonthlyInitParameters `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // Configures the policy weekly retention as documented in the retention_weekly block below. Required when backup frequency is Weekly. + RetentionWeekly *BackupPolicyVMRetentionWeeklyInitParameters `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // Configures the policy yearly retention as documented in the retention_yearly block below. + RetentionYearly *BackupPolicyVMRetentionYearlyInitParameters `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // Specifies the timezone. the possible values are defined here. Defaults to UTC + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type BackupPolicyVMObservation struct { + + // Configures the Policy backup frequency, times & days as documented in the backup block below. + Backup *BackupPolicyVMBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // The ID of the VM Backup Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the instant restore resource group name as documented in the instant_restore_resource_group block below. + InstantRestoreResourceGroup *InstantRestoreResourceGroupObservation `json:"instantRestoreResourceGroup,omitempty" tf:"instant_restore_resource_group,omitempty"` + + // Specifies the instant restore retention range in days. Possible values are between 1 and 5 when policy_type is V1, and 1 to 30 when policy_type is V2. + InstantRestoreRetentionDays *float64 `json:"instantRestoreRetentionDays,omitempty" tf:"instant_restore_retention_days,omitempty"` + + // Type of the Backup Policy. Possible values are V1 and V2 where V2 stands for the Enhanced Policy. Defaults to V1. Changing this forces a new resource to be created. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. + RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` + + // The name of the resource group in which to create the policy. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Configures the policy daily retention as documented in the retention_daily block below. Required when backup frequency is Daily. + RetentionDaily *BackupPolicyVMRetentionDailyObservation `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // Configures the policy monthly retention as documented in the retention_monthly block below. + RetentionMonthly *BackupPolicyVMRetentionMonthlyObservation `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // Configures the policy weekly retention as documented in the retention_weekly block below. Required when backup frequency is Weekly. + RetentionWeekly *BackupPolicyVMRetentionWeeklyObservation `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // Configures the policy yearly retention as documented in the retention_yearly block below. + RetentionYearly *BackupPolicyVMRetentionYearlyObservation `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // Specifies the timezone. the possible values are defined here. Defaults to UTC + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type BackupPolicyVMParameters struct { + + // Configures the Policy backup frequency, times & days as documented in the backup block below. + // +kubebuilder:validation:Optional + Backup *BackupPolicyVMBackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Specifies the instant restore resource group name as documented in the instant_restore_resource_group block below. + // +kubebuilder:validation:Optional + InstantRestoreResourceGroup *InstantRestoreResourceGroupParameters `json:"instantRestoreResourceGroup,omitempty" tf:"instant_restore_resource_group,omitempty"` + + // Specifies the instant restore retention range in days. Possible values are between 1 and 5 when policy_type is V1, and 1 to 30 when policy_type is V2. + // +kubebuilder:validation:Optional + InstantRestoreRetentionDays *float64 `json:"instantRestoreRetentionDays,omitempty" tf:"instant_restore_retention_days,omitempty"` + + // Type of the Backup Policy. Possible values are V1 and V2 where V2 stands for the Enhanced Policy. Defaults to V1. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault + // +kubebuilder:validation:Optional + RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` + + // Reference to a Vault in recoveryservices to populate recoveryVaultName. + // +kubebuilder:validation:Optional + RecoveryVaultNameRef *v1.Reference `json:"recoveryVaultNameRef,omitempty" tf:"-"` + + // Selector for a Vault in recoveryservices to populate recoveryVaultName. + // +kubebuilder:validation:Optional + RecoveryVaultNameSelector *v1.Selector `json:"recoveryVaultNameSelector,omitempty" tf:"-"` + + // The name of the resource group in which to create the policy. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Configures the policy daily retention as documented in the retention_daily block below. Required when backup frequency is Daily. + // +kubebuilder:validation:Optional + RetentionDaily *BackupPolicyVMRetentionDailyParameters `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // Configures the policy monthly retention as documented in the retention_monthly block below. + // +kubebuilder:validation:Optional + RetentionMonthly *BackupPolicyVMRetentionMonthlyParameters `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // Configures the policy weekly retention as documented in the retention_weekly block below. Required when backup frequency is Weekly. + // +kubebuilder:validation:Optional + RetentionWeekly *BackupPolicyVMRetentionWeeklyParameters `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // Configures the policy yearly retention as documented in the retention_yearly block below. + // +kubebuilder:validation:Optional + RetentionYearly *BackupPolicyVMRetentionYearlyParameters `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // Specifies the timezone. the possible values are defined here. Defaults to UTC + // +kubebuilder:validation:Optional + Timezone *string `json:"timezone,omitempty" tf:"timezone,omitempty"` +} + +type BackupPolicyVMRetentionDailyInitParameters struct { + + // The number of daily backups to keep. Must be between 7 and 9999. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type BackupPolicyVMRetentionDailyObservation struct { + + // The number of daily backups to keep. Must be between 7 and 9999. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type BackupPolicyVMRetentionDailyParameters struct { + + // The number of daily backups to keep. Must be between 7 and 9999. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` +} + +type BackupPolicyVMRetentionMonthlyInitParameters struct { + + // The number of monthly backups to keep. Must be between 1 and 9999 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type BackupPolicyVMRetentionMonthlyObservation struct { + + // The number of monthly backups to keep. Must be between 1 and 9999 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type BackupPolicyVMRetentionMonthlyParameters struct { + + // The number of monthly backups to keep. Must be between 1 and 9999 + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +kubebuilder:validation:Optional + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + // +kubebuilder:validation:Optional + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +kubebuilder:validation:Optional + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type BackupPolicyVMRetentionWeeklyInitParameters struct { + + // The number of weekly backups to keep. Must be between 1 and 9999 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The weekday backups to retain. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type BackupPolicyVMRetentionWeeklyObservation struct { + + // The number of weekly backups to keep. Must be between 1 and 9999 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The weekday backups to retain. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type BackupPolicyVMRetentionWeeklyParameters struct { + + // The number of weekly backups to keep. Must be between 1 and 9999 + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The weekday backups to retain. Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays" tf:"weekdays,omitempty"` +} + +type BackupPolicyVMRetentionYearlyInitParameters struct { + + // The number of yearly backups to keep. Must be between 1 and 9999 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The months of the year to retain backups of. Must be one of January, February, March, April, May, June, July, August, September, October, November and December. + // +listType=set + Months []*string `json:"months,omitempty" tf:"months,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type BackupPolicyVMRetentionYearlyObservation struct { + + // The number of yearly backups to keep. Must be between 1 and 9999 + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The months of the year to retain backups of. Must be one of January, February, March, April, May, June, July, August, September, October, November and December. + // +listType=set + Months []*string `json:"months,omitempty" tf:"months,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type BackupPolicyVMRetentionYearlyParameters struct { + + // The number of yearly backups to keep. Must be between 1 and 9999 + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The days of the month to retain backups of. Must be between 1 and 31. + // +kubebuilder:validation:Optional + // +listType=set + Days []*float64 `json:"days,omitempty" tf:"days,omitempty"` + + // Including the last day of the month, default to false. + // +kubebuilder:validation:Optional + IncludeLastDays *bool `json:"includeLastDays,omitempty" tf:"include_last_days,omitempty"` + + // The months of the year to retain backups of. Must be one of January, February, March, April, May, June, July, August, September, October, November and December. + // +kubebuilder:validation:Optional + // +listType=set + Months []*string `json:"months" tf:"months,omitempty"` + + // The weekday backups to retain . Must be one of Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Must be one of First, Second, Third, Fourth, Last. + // +kubebuilder:validation:Optional + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type InstantRestoreResourceGroupInitParameters struct { + + // The prefix for the instant_restore_resource_group name. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The suffix for the instant_restore_resource_group name. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type InstantRestoreResourceGroupObservation struct { + + // The prefix for the instant_restore_resource_group name. + Prefix *string `json:"prefix,omitempty" tf:"prefix,omitempty"` + + // The suffix for the instant_restore_resource_group name. + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +type InstantRestoreResourceGroupParameters struct { + + // The prefix for the instant_restore_resource_group name. + // +kubebuilder:validation:Optional + Prefix *string `json:"prefix" tf:"prefix,omitempty"` + + // The suffix for the instant_restore_resource_group name. + // +kubebuilder:validation:Optional + Suffix *string `json:"suffix,omitempty" tf:"suffix,omitempty"` +} + +// BackupPolicyVMSpec defines the desired state of BackupPolicyVM +type BackupPolicyVMSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BackupPolicyVMParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BackupPolicyVMInitParameters `json:"initProvider,omitempty"` +} + +// BackupPolicyVMStatus defines the observed state of BackupPolicyVM. +type BackupPolicyVMStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BackupPolicyVMObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BackupPolicyVM is the Schema for the BackupPolicyVMs API. Manages an Azure Backup VM Backup Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BackupPolicyVM struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.backup) || (has(self.initProvider) && has(self.initProvider.backup))",message="spec.forProvider.backup is a required parameter" + Spec BackupPolicyVMSpec `json:"spec"` + Status BackupPolicyVMStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupPolicyVMList contains a list of BackupPolicyVMs +type BackupPolicyVMList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupPolicyVM `json:"items"` +} + +// Repository type metadata. +var ( + BackupPolicyVM_Kind = "BackupPolicyVM" + BackupPolicyVM_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BackupPolicyVM_Kind}.String() + BackupPolicyVM_KindAPIVersion = BackupPolicyVM_Kind + "." + CRDGroupVersion.String() + BackupPolicyVM_GroupVersionKind = CRDGroupVersion.WithKind(BackupPolicyVM_Kind) +) + +func init() { + SchemeBuilder.Register(&BackupPolicyVM{}, &BackupPolicyVMList{}) +} diff --git a/apis/recoveryservices/v1beta2/zz_backuppolicyvmworkload_terraformed.go b/apis/recoveryservices/v1beta2/zz_backuppolicyvmworkload_terraformed.go new file mode 100755 index 000000000..bf8688900 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_backuppolicyvmworkload_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BackupPolicyVMWorkload +func (mg *BackupPolicyVMWorkload) GetTerraformResourceType() string { + return "azurerm_backup_policy_vm_workload" +} + +// GetConnectionDetailsMapping for this BackupPolicyVMWorkload +func (tr *BackupPolicyVMWorkload) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BackupPolicyVMWorkload +func (tr *BackupPolicyVMWorkload) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BackupPolicyVMWorkload +func (tr *BackupPolicyVMWorkload) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BackupPolicyVMWorkload +func (tr *BackupPolicyVMWorkload) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BackupPolicyVMWorkload +func (tr *BackupPolicyVMWorkload) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BackupPolicyVMWorkload +func (tr *BackupPolicyVMWorkload) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BackupPolicyVMWorkload +func (tr *BackupPolicyVMWorkload) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BackupPolicyVMWorkload +func (tr *BackupPolicyVMWorkload) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BackupPolicyVMWorkload using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BackupPolicyVMWorkload) LateInitialize(attrs []byte) (bool, error) { + params := &BackupPolicyVMWorkloadParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BackupPolicyVMWorkload) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/recoveryservices/v1beta2/zz_backuppolicyvmworkload_types.go b/apis/recoveryservices/v1beta2/zz_backuppolicyvmworkload_types.go new file mode 100755 index 000000000..0c2c5243f --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_backuppolicyvmworkload_types.go @@ -0,0 +1,530 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BackupPolicyVMWorkloadInitParameters struct { + + // One or more protection_policy blocks as defined below. + ProtectionPolicy []ProtectionPolicyInitParameters `json:"protectionPolicy,omitempty" tf:"protection_policy,omitempty"` + + // A settings block as defined below. + Settings *SettingsInitParameters `json:"settings,omitempty" tf:"settings,omitempty"` + + // The VM Workload type for the Backup Policy. Possible values are SQLDataBase and SAPHanaDatabase. Changing this forces a new resource to be created. + WorkloadType *string `json:"workloadType,omitempty" tf:"workload_type,omitempty"` +} + +type BackupPolicyVMWorkloadObservation struct { + + // The ID of the Azure VM Workload Backup Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more protection_policy blocks as defined below. + ProtectionPolicy []ProtectionPolicyObservation `json:"protectionPolicy,omitempty" tf:"protection_policy,omitempty"` + + // The name of the Recovery Services Vault to use. Changing this forces a new resource to be created. + RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` + + // The name of the resource group in which to create the VM Workload Backup Policy. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A settings block as defined below. + Settings *SettingsObservation `json:"settings,omitempty" tf:"settings,omitempty"` + + // The VM Workload type for the Backup Policy. Possible values are SQLDataBase and SAPHanaDatabase. Changing this forces a new resource to be created. + WorkloadType *string `json:"workloadType,omitempty" tf:"workload_type,omitempty"` +} + +type BackupPolicyVMWorkloadParameters struct { + + // One or more protection_policy blocks as defined below. + // +kubebuilder:validation:Optional + ProtectionPolicy []ProtectionPolicyParameters `json:"protectionPolicy,omitempty" tf:"protection_policy,omitempty"` + + // The name of the Recovery Services Vault to use. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault + // +kubebuilder:validation:Optional + RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` + + // Reference to a Vault in recoveryservices to populate recoveryVaultName. + // +kubebuilder:validation:Optional + RecoveryVaultNameRef *v1.Reference `json:"recoveryVaultNameRef,omitempty" tf:"-"` + + // Selector for a Vault in recoveryservices to populate recoveryVaultName. + // +kubebuilder:validation:Optional + RecoveryVaultNameSelector *v1.Selector `json:"recoveryVaultNameSelector,omitempty" tf:"-"` + + // The name of the resource group in which to create the VM Workload Backup Policy. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A settings block as defined below. + // +kubebuilder:validation:Optional + Settings *SettingsParameters `json:"settings,omitempty" tf:"settings,omitempty"` + + // The VM Workload type for the Backup Policy. Possible values are SQLDataBase and SAPHanaDatabase. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + WorkloadType *string `json:"workloadType,omitempty" tf:"workload_type,omitempty"` +} + +type ProtectionPolicyBackupInitParameters struct { + + // The backup frequency for the VM Workload Backup Policy. Possible values are Daily and Weekly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The backup frequency in minutes for the VM Workload Backup Policy. Possible values are 15, 30, 60, 120, 240, 480, 720 and 1440. + FrequencyInMinutes *float64 `json:"frequencyInMinutes,omitempty" tf:"frequency_in_minutes,omitempty"` + + // The time of day to perform the backup in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type ProtectionPolicyBackupObservation struct { + + // The backup frequency for the VM Workload Backup Policy. Possible values are Daily and Weekly. + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The backup frequency in minutes for the VM Workload Backup Policy. Possible values are 15, 30, 60, 120, 240, 480, 720 and 1440. + FrequencyInMinutes *float64 `json:"frequencyInMinutes,omitempty" tf:"frequency_in_minutes,omitempty"` + + // The time of day to perform the backup in 24hour format. + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type ProtectionPolicyBackupParameters struct { + + // The backup frequency for the VM Workload Backup Policy. Possible values are Daily and Weekly. + // +kubebuilder:validation:Optional + Frequency *string `json:"frequency,omitempty" tf:"frequency,omitempty"` + + // The backup frequency in minutes for the VM Workload Backup Policy. Possible values are 15, 30, 60, 120, 240, 480, 720 and 1440. + // +kubebuilder:validation:Optional + FrequencyInMinutes *float64 `json:"frequencyInMinutes,omitempty" tf:"frequency_in_minutes,omitempty"` + + // The time of day to perform the backup in 24hour format. + // +kubebuilder:validation:Optional + Time *string `json:"time,omitempty" tf:"time,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type ProtectionPolicyInitParameters struct { + + // A backup block as defined below. + Backup *ProtectionPolicyBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // The type of the VM Workload Backup Policy. Possible values are Differential, Full, Incremental and Log. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // A retention_daily block as defined below. + RetentionDaily *ProtectionPolicyRetentionDailyInitParameters `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // A retention_monthly block as defined below. + RetentionMonthly *ProtectionPolicyRetentionMonthlyInitParameters `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // A retention_weekly block as defined below. + RetentionWeekly *ProtectionPolicyRetentionWeeklyInitParameters `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // A retention_yearly block as defined below. + RetentionYearly *ProtectionPolicyRetentionYearlyInitParameters `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // A simple_retention block as defined below. + SimpleRetention *SimpleRetentionInitParameters `json:"simpleRetention,omitempty" tf:"simple_retention,omitempty"` +} + +type ProtectionPolicyObservation struct { + + // A backup block as defined below. + Backup *ProtectionPolicyBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // The type of the VM Workload Backup Policy. Possible values are Differential, Full, Incremental and Log. + PolicyType *string `json:"policyType,omitempty" tf:"policy_type,omitempty"` + + // A retention_daily block as defined below. + RetentionDaily *ProtectionPolicyRetentionDailyObservation `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // A retention_monthly block as defined below. + RetentionMonthly *ProtectionPolicyRetentionMonthlyObservation `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // A retention_weekly block as defined below. + RetentionWeekly *ProtectionPolicyRetentionWeeklyObservation `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // A retention_yearly block as defined below. + RetentionYearly *ProtectionPolicyRetentionYearlyObservation `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // A simple_retention block as defined below. + SimpleRetention *SimpleRetentionObservation `json:"simpleRetention,omitempty" tf:"simple_retention,omitempty"` +} + +type ProtectionPolicyParameters struct { + + // A backup block as defined below. + // +kubebuilder:validation:Optional + Backup *ProtectionPolicyBackupParameters `json:"backup" tf:"backup,omitempty"` + + // The type of the VM Workload Backup Policy. Possible values are Differential, Full, Incremental and Log. + // +kubebuilder:validation:Optional + PolicyType *string `json:"policyType" tf:"policy_type,omitempty"` + + // A retention_daily block as defined below. + // +kubebuilder:validation:Optional + RetentionDaily *ProtectionPolicyRetentionDailyParameters `json:"retentionDaily,omitempty" tf:"retention_daily,omitempty"` + + // A retention_monthly block as defined below. + // +kubebuilder:validation:Optional + RetentionMonthly *ProtectionPolicyRetentionMonthlyParameters `json:"retentionMonthly,omitempty" tf:"retention_monthly,omitempty"` + + // A retention_weekly block as defined below. + // +kubebuilder:validation:Optional + RetentionWeekly *ProtectionPolicyRetentionWeeklyParameters `json:"retentionWeekly,omitempty" tf:"retention_weekly,omitempty"` + + // A retention_yearly block as defined below. + // +kubebuilder:validation:Optional + RetentionYearly *ProtectionPolicyRetentionYearlyParameters `json:"retentionYearly,omitempty" tf:"retention_yearly,omitempty"` + + // A simple_retention block as defined below. + // +kubebuilder:validation:Optional + SimpleRetention *SimpleRetentionParameters `json:"simpleRetention,omitempty" tf:"simple_retention,omitempty"` +} + +type ProtectionPolicyRetentionDailyInitParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type ProtectionPolicyRetentionDailyObservation struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type ProtectionPolicyRetentionDailyParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` +} + +type ProtectionPolicyRetentionMonthlyInitParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The retention schedule format type for yearly retention policy. Possible values are Daily and Weekly. + FormatType *string `json:"formatType,omitempty" tf:"format_type,omitempty"` + + // The monthday backups to retain. Possible values are between 0 and 28. + // +listType=set + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Possible values are First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type ProtectionPolicyRetentionMonthlyObservation struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The retention schedule format type for yearly retention policy. Possible values are Daily and Weekly. + FormatType *string `json:"formatType,omitempty" tf:"format_type,omitempty"` + + // The monthday backups to retain. Possible values are between 0 and 28. + // +listType=set + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Possible values are First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type ProtectionPolicyRetentionMonthlyParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The retention schedule format type for yearly retention policy. Possible values are Daily and Weekly. + // +kubebuilder:validation:Optional + FormatType *string `json:"formatType" tf:"format_type,omitempty"` + + // The monthday backups to retain. Possible values are between 0 and 28. + // +kubebuilder:validation:Optional + // +listType=set + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Possible values are First, Second, Third, Fourth, Last. + // +kubebuilder:validation:Optional + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type ProtectionPolicyRetentionWeeklyInitParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type ProtectionPolicyRetentionWeeklyObservation struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` +} + +type ProtectionPolicyRetentionWeeklyParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays" tf:"weekdays,omitempty"` +} + +type ProtectionPolicyRetentionYearlyInitParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The retention schedule format type for yearly retention policy. Possible values are Daily and Weekly. + FormatType *string `json:"formatType,omitempty" tf:"format_type,omitempty"` + + // The monthday backups to retain. Possible values are between 0 and 28. + // +listType=set + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // The months of the year to retain backups of. Possible values are January, February, March, April, May, June, July, August, September, October, November and December. + // +listType=set + Months []*string `json:"months,omitempty" tf:"months,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Possible values are First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type ProtectionPolicyRetentionYearlyObservation struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The retention schedule format type for yearly retention policy. Possible values are Daily and Weekly. + FormatType *string `json:"formatType,omitempty" tf:"format_type,omitempty"` + + // The monthday backups to retain. Possible values are between 0 and 28. + // +listType=set + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // The months of the year to retain backups of. Possible values are January, February, March, April, May, June, July, August, September, October, November and December. + // +listType=set + Months []*string `json:"months,omitempty" tf:"months,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Possible values are First, Second, Third, Fourth, Last. + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type ProtectionPolicyRetentionYearlyParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The retention schedule format type for yearly retention policy. Possible values are Daily and Weekly. + // +kubebuilder:validation:Optional + FormatType *string `json:"formatType" tf:"format_type,omitempty"` + + // The monthday backups to retain. Possible values are between 0 and 28. + // +kubebuilder:validation:Optional + // +listType=set + Monthdays []*float64 `json:"monthdays,omitempty" tf:"monthdays,omitempty"` + + // The months of the year to retain backups of. Possible values are January, February, March, April, May, June, July, August, September, October, November and December. + // +kubebuilder:validation:Optional + // +listType=set + Months []*string `json:"months" tf:"months,omitempty"` + + // The weekday backups to retain. Possible values are Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday. + // +kubebuilder:validation:Optional + // +listType=set + Weekdays []*string `json:"weekdays,omitempty" tf:"weekdays,omitempty"` + + // The weeks of the month to retain backups of. Possible values are First, Second, Third, Fourth, Last. + // +kubebuilder:validation:Optional + // +listType=set + Weeks []*string `json:"weeks,omitempty" tf:"weeks,omitempty"` +} + +type SettingsInitParameters struct { + + // The compression setting for the VM Workload Backup Policy. Defaults to false. + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // The timezone for the VM Workload Backup Policy. The possible values are defined here. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type SettingsObservation struct { + + // The compression setting for the VM Workload Backup Policy. Defaults to false. + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // The timezone for the VM Workload Backup Policy. The possible values are defined here. + TimeZone *string `json:"timeZone,omitempty" tf:"time_zone,omitempty"` +} + +type SettingsParameters struct { + + // The compression setting for the VM Workload Backup Policy. Defaults to false. + // +kubebuilder:validation:Optional + CompressionEnabled *bool `json:"compressionEnabled,omitempty" tf:"compression_enabled,omitempty"` + + // The timezone for the VM Workload Backup Policy. The possible values are defined here. + // +kubebuilder:validation:Optional + TimeZone *string `json:"timeZone" tf:"time_zone,omitempty"` +} + +type SimpleRetentionInitParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type SimpleRetentionObservation struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` +} + +type SimpleRetentionParameters struct { + + // The count that is used to count retention duration with duration type Days. Possible values are between 7 and 35. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` +} + +// BackupPolicyVMWorkloadSpec defines the desired state of BackupPolicyVMWorkload +type BackupPolicyVMWorkloadSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BackupPolicyVMWorkloadParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BackupPolicyVMWorkloadInitParameters `json:"initProvider,omitempty"` +} + +// BackupPolicyVMWorkloadStatus defines the observed state of BackupPolicyVMWorkload. +type BackupPolicyVMWorkloadStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BackupPolicyVMWorkloadObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BackupPolicyVMWorkload is the Schema for the BackupPolicyVMWorkloads API. Manages an Azure VM Workload Backup Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BackupPolicyVMWorkload struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.protectionPolicy) || (has(self.initProvider) && has(self.initProvider.protectionPolicy))",message="spec.forProvider.protectionPolicy is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.settings) || (has(self.initProvider) && has(self.initProvider.settings))",message="spec.forProvider.settings is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.workloadType) || (has(self.initProvider) && has(self.initProvider.workloadType))",message="spec.forProvider.workloadType is a required parameter" + Spec BackupPolicyVMWorkloadSpec `json:"spec"` + Status BackupPolicyVMWorkloadStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupPolicyVMWorkloadList contains a list of BackupPolicyVMWorkloads +type BackupPolicyVMWorkloadList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackupPolicyVMWorkload `json:"items"` +} + +// Repository type metadata. +var ( + BackupPolicyVMWorkload_Kind = "BackupPolicyVMWorkload" + BackupPolicyVMWorkload_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BackupPolicyVMWorkload_Kind}.String() + BackupPolicyVMWorkload_KindAPIVersion = BackupPolicyVMWorkload_Kind + "." + CRDGroupVersion.String() + BackupPolicyVMWorkload_GroupVersionKind = CRDGroupVersion.WithKind(BackupPolicyVMWorkload_Kind) +) + +func init() { + SchemeBuilder.Register(&BackupPolicyVMWorkload{}, &BackupPolicyVMWorkloadList{}) +} diff --git a/apis/recoveryservices/v1beta2/zz_generated.conversion_hubs.go b/apis/recoveryservices/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..c6e8e9985 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *BackupPolicyFileShare) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BackupPolicyVM) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BackupPolicyVMWorkload) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SiteRecoveryProtectionContainerMapping) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Vault) Hub() {} diff --git a/apis/recoveryservices/v1beta2/zz_generated.deepcopy.go b/apis/recoveryservices/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..b4032ba72 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,4304 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticUpdateInitParameters) DeepCopyInto(out *AutomaticUpdateInitParameters) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.AutomationAccountID != nil { + in, out := &in.AutomationAccountID, &out.AutomationAccountID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticUpdateInitParameters. +func (in *AutomaticUpdateInitParameters) DeepCopy() *AutomaticUpdateInitParameters { + if in == nil { + return nil + } + out := new(AutomaticUpdateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticUpdateObservation) DeepCopyInto(out *AutomaticUpdateObservation) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.AutomationAccountID != nil { + in, out := &in.AutomationAccountID, &out.AutomationAccountID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticUpdateObservation. +func (in *AutomaticUpdateObservation) DeepCopy() *AutomaticUpdateObservation { + if in == nil { + return nil + } + out := new(AutomaticUpdateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomaticUpdateParameters) DeepCopyInto(out *AutomaticUpdateParameters) { + *out = *in + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.AutomationAccountID != nil { + in, out := &in.AutomationAccountID, &out.AutomationAccountID + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomaticUpdateParameters. +func (in *AutomaticUpdateParameters) DeepCopy() *AutomaticUpdateParameters { + if in == nil { + return nil + } + out := new(AutomaticUpdateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupInitParameters) DeepCopyInto(out *BackupInitParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Hourly != nil { + in, out := &in.Hourly, &out.Hourly + *out = new(HourlyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupInitParameters. +func (in *BackupInitParameters) DeepCopy() *BackupInitParameters { + if in == nil { + return nil + } + out := new(BackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupObservation) DeepCopyInto(out *BackupObservation) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Hourly != nil { + in, out := &in.Hourly, &out.Hourly + *out = new(HourlyObservation) + (*in).DeepCopyInto(*out) + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupObservation. +func (in *BackupObservation) DeepCopy() *BackupObservation { + if in == nil { + return nil + } + out := new(BackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupParameters) DeepCopyInto(out *BackupParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.Hourly != nil { + in, out := &in.Hourly, &out.Hourly + *out = new(HourlyParameters) + (*in).DeepCopyInto(*out) + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupParameters. +func (in *BackupParameters) DeepCopy() *BackupParameters { + if in == nil { + return nil + } + out := new(BackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyFileShare) DeepCopyInto(out *BackupPolicyFileShare) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyFileShare. +func (in *BackupPolicyFileShare) DeepCopy() *BackupPolicyFileShare { + if in == nil { + return nil + } + out := new(BackupPolicyFileShare) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyFileShare) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyFileShareInitParameters) DeepCopyInto(out *BackupPolicyFileShareInitParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(RetentionDailyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(RetentionMonthlyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(RetentionWeeklyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(RetentionYearlyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyFileShareInitParameters. +func (in *BackupPolicyFileShareInitParameters) DeepCopy() *BackupPolicyFileShareInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyFileShareInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyFileShareList) DeepCopyInto(out *BackupPolicyFileShareList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupPolicyFileShare, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyFileShareList. +func (in *BackupPolicyFileShareList) DeepCopy() *BackupPolicyFileShareList { + if in == nil { + return nil + } + out := new(BackupPolicyFileShareList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyFileShareList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyFileShareObservation) DeepCopyInto(out *BackupPolicyFileShareObservation) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RecoveryVaultName != nil { + in, out := &in.RecoveryVaultName, &out.RecoveryVaultName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(RetentionDailyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(RetentionMonthlyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(RetentionWeeklyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(RetentionYearlyObservation) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyFileShareObservation. +func (in *BackupPolicyFileShareObservation) DeepCopy() *BackupPolicyFileShareObservation { + if in == nil { + return nil + } + out := new(BackupPolicyFileShareObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyFileShareParameters) DeepCopyInto(out *BackupPolicyFileShareParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupParameters) + (*in).DeepCopyInto(*out) + } + if in.RecoveryVaultName != nil { + in, out := &in.RecoveryVaultName, &out.RecoveryVaultName + *out = new(string) + **out = **in + } + if in.RecoveryVaultNameRef != nil { + in, out := &in.RecoveryVaultNameRef, &out.RecoveryVaultNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryVaultNameSelector != nil { + in, out := &in.RecoveryVaultNameSelector, &out.RecoveryVaultNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(RetentionDailyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(RetentionMonthlyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(RetentionWeeklyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(RetentionYearlyParameters) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyFileShareParameters. +func (in *BackupPolicyFileShareParameters) DeepCopy() *BackupPolicyFileShareParameters { + if in == nil { + return nil + } + out := new(BackupPolicyFileShareParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyFileShareSpec) DeepCopyInto(out *BackupPolicyFileShareSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyFileShareSpec. +func (in *BackupPolicyFileShareSpec) DeepCopy() *BackupPolicyFileShareSpec { + if in == nil { + return nil + } + out := new(BackupPolicyFileShareSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyFileShareStatus) DeepCopyInto(out *BackupPolicyFileShareStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyFileShareStatus. +func (in *BackupPolicyFileShareStatus) DeepCopy() *BackupPolicyFileShareStatus { + if in == nil { + return nil + } + out := new(BackupPolicyFileShareStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVM) DeepCopyInto(out *BackupPolicyVM) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVM. +func (in *BackupPolicyVM) DeepCopy() *BackupPolicyVM { + if in == nil { + return nil + } + out := new(BackupPolicyVM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyVM) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMBackupInitParameters) DeepCopyInto(out *BackupPolicyVMBackupInitParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.HourDuration != nil { + in, out := &in.HourDuration, &out.HourDuration + *out = new(float64) + **out = **in + } + if in.HourInterval != nil { + in, out := &in.HourInterval, &out.HourInterval + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMBackupInitParameters. +func (in *BackupPolicyVMBackupInitParameters) DeepCopy() *BackupPolicyVMBackupInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMBackupObservation) DeepCopyInto(out *BackupPolicyVMBackupObservation) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.HourDuration != nil { + in, out := &in.HourDuration, &out.HourDuration + *out = new(float64) + **out = **in + } + if in.HourInterval != nil { + in, out := &in.HourInterval, &out.HourInterval + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMBackupObservation. +func (in *BackupPolicyVMBackupObservation) DeepCopy() *BackupPolicyVMBackupObservation { + if in == nil { + return nil + } + out := new(BackupPolicyVMBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMBackupParameters) DeepCopyInto(out *BackupPolicyVMBackupParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.HourDuration != nil { + in, out := &in.HourDuration, &out.HourDuration + *out = new(float64) + **out = **in + } + if in.HourInterval != nil { + in, out := &in.HourInterval, &out.HourInterval + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMBackupParameters. +func (in *BackupPolicyVMBackupParameters) DeepCopy() *BackupPolicyVMBackupParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMInitParameters) DeepCopyInto(out *BackupPolicyVMInitParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupPolicyVMBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstantRestoreResourceGroup != nil { + in, out := &in.InstantRestoreResourceGroup, &out.InstantRestoreResourceGroup + *out = new(InstantRestoreResourceGroupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InstantRestoreRetentionDays != nil { + in, out := &in.InstantRestoreRetentionDays, &out.InstantRestoreRetentionDays + *out = new(float64) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(BackupPolicyVMRetentionDailyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(BackupPolicyVMRetentionMonthlyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(BackupPolicyVMRetentionWeeklyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(BackupPolicyVMRetentionYearlyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMInitParameters. +func (in *BackupPolicyVMInitParameters) DeepCopy() *BackupPolicyVMInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMList) DeepCopyInto(out *BackupPolicyVMList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupPolicyVM, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMList. +func (in *BackupPolicyVMList) DeepCopy() *BackupPolicyVMList { + if in == nil { + return nil + } + out := new(BackupPolicyVMList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyVMList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMObservation) DeepCopyInto(out *BackupPolicyVMObservation) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupPolicyVMBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.InstantRestoreResourceGroup != nil { + in, out := &in.InstantRestoreResourceGroup, &out.InstantRestoreResourceGroup + *out = new(InstantRestoreResourceGroupObservation) + (*in).DeepCopyInto(*out) + } + if in.InstantRestoreRetentionDays != nil { + in, out := &in.InstantRestoreRetentionDays, &out.InstantRestoreRetentionDays + *out = new(float64) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.RecoveryVaultName != nil { + in, out := &in.RecoveryVaultName, &out.RecoveryVaultName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(BackupPolicyVMRetentionDailyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(BackupPolicyVMRetentionMonthlyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(BackupPolicyVMRetentionWeeklyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(BackupPolicyVMRetentionYearlyObservation) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMObservation. +func (in *BackupPolicyVMObservation) DeepCopy() *BackupPolicyVMObservation { + if in == nil { + return nil + } + out := new(BackupPolicyVMObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMParameters) DeepCopyInto(out *BackupPolicyVMParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupPolicyVMBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.InstantRestoreResourceGroup != nil { + in, out := &in.InstantRestoreResourceGroup, &out.InstantRestoreResourceGroup + *out = new(InstantRestoreResourceGroupParameters) + (*in).DeepCopyInto(*out) + } + if in.InstantRestoreRetentionDays != nil { + in, out := &in.InstantRestoreRetentionDays, &out.InstantRestoreRetentionDays + *out = new(float64) + **out = **in + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.RecoveryVaultName != nil { + in, out := &in.RecoveryVaultName, &out.RecoveryVaultName + *out = new(string) + **out = **in + } + if in.RecoveryVaultNameRef != nil { + in, out := &in.RecoveryVaultNameRef, &out.RecoveryVaultNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryVaultNameSelector != nil { + in, out := &in.RecoveryVaultNameSelector, &out.RecoveryVaultNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(BackupPolicyVMRetentionDailyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(BackupPolicyVMRetentionMonthlyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(BackupPolicyVMRetentionWeeklyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(BackupPolicyVMRetentionYearlyParameters) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMParameters. +func (in *BackupPolicyVMParameters) DeepCopy() *BackupPolicyVMParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionDailyInitParameters) DeepCopyInto(out *BackupPolicyVMRetentionDailyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionDailyInitParameters. +func (in *BackupPolicyVMRetentionDailyInitParameters) DeepCopy() *BackupPolicyVMRetentionDailyInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionDailyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionDailyObservation) DeepCopyInto(out *BackupPolicyVMRetentionDailyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionDailyObservation. +func (in *BackupPolicyVMRetentionDailyObservation) DeepCopy() *BackupPolicyVMRetentionDailyObservation { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionDailyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionDailyParameters) DeepCopyInto(out *BackupPolicyVMRetentionDailyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionDailyParameters. +func (in *BackupPolicyVMRetentionDailyParameters) DeepCopy() *BackupPolicyVMRetentionDailyParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionDailyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionMonthlyInitParameters) DeepCopyInto(out *BackupPolicyVMRetentionMonthlyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionMonthlyInitParameters. +func (in *BackupPolicyVMRetentionMonthlyInitParameters) DeepCopy() *BackupPolicyVMRetentionMonthlyInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionMonthlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionMonthlyObservation) DeepCopyInto(out *BackupPolicyVMRetentionMonthlyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionMonthlyObservation. +func (in *BackupPolicyVMRetentionMonthlyObservation) DeepCopy() *BackupPolicyVMRetentionMonthlyObservation { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionMonthlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionMonthlyParameters) DeepCopyInto(out *BackupPolicyVMRetentionMonthlyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionMonthlyParameters. +func (in *BackupPolicyVMRetentionMonthlyParameters) DeepCopy() *BackupPolicyVMRetentionMonthlyParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionMonthlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionWeeklyInitParameters) DeepCopyInto(out *BackupPolicyVMRetentionWeeklyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionWeeklyInitParameters. +func (in *BackupPolicyVMRetentionWeeklyInitParameters) DeepCopy() *BackupPolicyVMRetentionWeeklyInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionWeeklyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionWeeklyObservation) DeepCopyInto(out *BackupPolicyVMRetentionWeeklyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionWeeklyObservation. +func (in *BackupPolicyVMRetentionWeeklyObservation) DeepCopy() *BackupPolicyVMRetentionWeeklyObservation { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionWeeklyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionWeeklyParameters) DeepCopyInto(out *BackupPolicyVMRetentionWeeklyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionWeeklyParameters. +func (in *BackupPolicyVMRetentionWeeklyParameters) DeepCopy() *BackupPolicyVMRetentionWeeklyParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionWeeklyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionYearlyInitParameters) DeepCopyInto(out *BackupPolicyVMRetentionYearlyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionYearlyInitParameters. +func (in *BackupPolicyVMRetentionYearlyInitParameters) DeepCopy() *BackupPolicyVMRetentionYearlyInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionYearlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionYearlyObservation) DeepCopyInto(out *BackupPolicyVMRetentionYearlyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionYearlyObservation. +func (in *BackupPolicyVMRetentionYearlyObservation) DeepCopy() *BackupPolicyVMRetentionYearlyObservation { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionYearlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMRetentionYearlyParameters) DeepCopyInto(out *BackupPolicyVMRetentionYearlyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMRetentionYearlyParameters. +func (in *BackupPolicyVMRetentionYearlyParameters) DeepCopy() *BackupPolicyVMRetentionYearlyParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMRetentionYearlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMSpec) DeepCopyInto(out *BackupPolicyVMSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMSpec. +func (in *BackupPolicyVMSpec) DeepCopy() *BackupPolicyVMSpec { + if in == nil { + return nil + } + out := new(BackupPolicyVMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMStatus) DeepCopyInto(out *BackupPolicyVMStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMStatus. +func (in *BackupPolicyVMStatus) DeepCopy() *BackupPolicyVMStatus { + if in == nil { + return nil + } + out := new(BackupPolicyVMStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMWorkload) DeepCopyInto(out *BackupPolicyVMWorkload) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMWorkload. +func (in *BackupPolicyVMWorkload) DeepCopy() *BackupPolicyVMWorkload { + if in == nil { + return nil + } + out := new(BackupPolicyVMWorkload) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyVMWorkload) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMWorkloadInitParameters) DeepCopyInto(out *BackupPolicyVMWorkloadInitParameters) { + *out = *in + if in.ProtectionPolicy != nil { + in, out := &in.ProtectionPolicy, &out.ProtectionPolicy + *out = make([]ProtectionPolicyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadType != nil { + in, out := &in.WorkloadType, &out.WorkloadType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMWorkloadInitParameters. +func (in *BackupPolicyVMWorkloadInitParameters) DeepCopy() *BackupPolicyVMWorkloadInitParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMWorkloadInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMWorkloadList) DeepCopyInto(out *BackupPolicyVMWorkloadList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupPolicyVMWorkload, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMWorkloadList. +func (in *BackupPolicyVMWorkloadList) DeepCopy() *BackupPolicyVMWorkloadList { + if in == nil { + return nil + } + out := new(BackupPolicyVMWorkloadList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupPolicyVMWorkloadList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMWorkloadObservation) DeepCopyInto(out *BackupPolicyVMWorkloadObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ProtectionPolicy != nil { + in, out := &in.ProtectionPolicy, &out.ProtectionPolicy + *out = make([]ProtectionPolicyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecoveryVaultName != nil { + in, out := &in.RecoveryVaultName, &out.RecoveryVaultName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.WorkloadType != nil { + in, out := &in.WorkloadType, &out.WorkloadType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMWorkloadObservation. +func (in *BackupPolicyVMWorkloadObservation) DeepCopy() *BackupPolicyVMWorkloadObservation { + if in == nil { + return nil + } + out := new(BackupPolicyVMWorkloadObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMWorkloadParameters) DeepCopyInto(out *BackupPolicyVMWorkloadParameters) { + *out = *in + if in.ProtectionPolicy != nil { + in, out := &in.ProtectionPolicy, &out.ProtectionPolicy + *out = make([]ProtectionPolicyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RecoveryVaultName != nil { + in, out := &in.RecoveryVaultName, &out.RecoveryVaultName + *out = new(string) + **out = **in + } + if in.RecoveryVaultNameRef != nil { + in, out := &in.RecoveryVaultNameRef, &out.RecoveryVaultNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryVaultNameSelector != nil { + in, out := &in.RecoveryVaultNameSelector, &out.RecoveryVaultNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.WorkloadType != nil { + in, out := &in.WorkloadType, &out.WorkloadType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMWorkloadParameters. +func (in *BackupPolicyVMWorkloadParameters) DeepCopy() *BackupPolicyVMWorkloadParameters { + if in == nil { + return nil + } + out := new(BackupPolicyVMWorkloadParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMWorkloadSpec) DeepCopyInto(out *BackupPolicyVMWorkloadSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMWorkloadSpec. +func (in *BackupPolicyVMWorkloadSpec) DeepCopy() *BackupPolicyVMWorkloadSpec { + if in == nil { + return nil + } + out := new(BackupPolicyVMWorkloadSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPolicyVMWorkloadStatus) DeepCopyInto(out *BackupPolicyVMWorkloadStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPolicyVMWorkloadStatus. +func (in *BackupPolicyVMWorkloadStatus) DeepCopy() *BackupPolicyVMWorkloadStatus { + if in == nil { + return nil + } + out := new(BackupPolicyVMWorkloadStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionInitParameters) DeepCopyInto(out *EncryptionInitParameters) { + *out = *in + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionInitParameters. +func (in *EncryptionInitParameters) DeepCopy() *EncryptionInitParameters { + if in == nil { + return nil + } + out := new(EncryptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionObservation) DeepCopyInto(out *EncryptionObservation) { + *out = *in + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionObservation. +func (in *EncryptionObservation) DeepCopy() *EncryptionObservation { + if in == nil { + return nil + } + out := new(EncryptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionParameters) DeepCopyInto(out *EncryptionParameters) { + *out = *in + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.KeyID != nil { + in, out := &in.KeyID, &out.KeyID + *out = new(string) + **out = **in + } + if in.UseSystemAssignedIdentity != nil { + in, out := &in.UseSystemAssignedIdentity, &out.UseSystemAssignedIdentity + *out = new(bool) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionParameters. +func (in *EncryptionParameters) DeepCopy() *EncryptionParameters { + if in == nil { + return nil + } + out := new(EncryptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyInitParameters) DeepCopyInto(out *HourlyInitParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.WindowDuration != nil { + in, out := &in.WindowDuration, &out.WindowDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyInitParameters. +func (in *HourlyInitParameters) DeepCopy() *HourlyInitParameters { + if in == nil { + return nil + } + out := new(HourlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyObservation) DeepCopyInto(out *HourlyObservation) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.WindowDuration != nil { + in, out := &in.WindowDuration, &out.WindowDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyObservation. +func (in *HourlyObservation) DeepCopy() *HourlyObservation { + if in == nil { + return nil + } + out := new(HourlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourlyParameters) DeepCopyInto(out *HourlyParameters) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } + if in.WindowDuration != nil { + in, out := &in.WindowDuration, &out.WindowDuration + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourlyParameters. +func (in *HourlyParameters) DeepCopy() *HourlyParameters { + if in == nil { + return nil + } + out := new(HourlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstantRestoreResourceGroupInitParameters) DeepCopyInto(out *InstantRestoreResourceGroupInitParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstantRestoreResourceGroupInitParameters. +func (in *InstantRestoreResourceGroupInitParameters) DeepCopy() *InstantRestoreResourceGroupInitParameters { + if in == nil { + return nil + } + out := new(InstantRestoreResourceGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstantRestoreResourceGroupObservation) DeepCopyInto(out *InstantRestoreResourceGroupObservation) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstantRestoreResourceGroupObservation. +func (in *InstantRestoreResourceGroupObservation) DeepCopy() *InstantRestoreResourceGroupObservation { + if in == nil { + return nil + } + out := new(InstantRestoreResourceGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstantRestoreResourceGroupParameters) DeepCopyInto(out *InstantRestoreResourceGroupParameters) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + if in.Suffix != nil { + in, out := &in.Suffix, &out.Suffix + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstantRestoreResourceGroupParameters. +func (in *InstantRestoreResourceGroupParameters) DeepCopy() *InstantRestoreResourceGroupParameters { + if in == nil { + return nil + } + out := new(InstantRestoreResourceGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringInitParameters) DeepCopyInto(out *MonitoringInitParameters) { + *out = *in + if in.AlertsForAllJobFailuresEnabled != nil { + in, out := &in.AlertsForAllJobFailuresEnabled, &out.AlertsForAllJobFailuresEnabled + *out = new(bool) + **out = **in + } + if in.AlertsForCriticalOperationFailuresEnabled != nil { + in, out := &in.AlertsForCriticalOperationFailuresEnabled, &out.AlertsForCriticalOperationFailuresEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringInitParameters. +func (in *MonitoringInitParameters) DeepCopy() *MonitoringInitParameters { + if in == nil { + return nil + } + out := new(MonitoringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringObservation) DeepCopyInto(out *MonitoringObservation) { + *out = *in + if in.AlertsForAllJobFailuresEnabled != nil { + in, out := &in.AlertsForAllJobFailuresEnabled, &out.AlertsForAllJobFailuresEnabled + *out = new(bool) + **out = **in + } + if in.AlertsForCriticalOperationFailuresEnabled != nil { + in, out := &in.AlertsForCriticalOperationFailuresEnabled, &out.AlertsForCriticalOperationFailuresEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringObservation. +func (in *MonitoringObservation) DeepCopy() *MonitoringObservation { + if in == nil { + return nil + } + out := new(MonitoringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringParameters) DeepCopyInto(out *MonitoringParameters) { + *out = *in + if in.AlertsForAllJobFailuresEnabled != nil { + in, out := &in.AlertsForAllJobFailuresEnabled, &out.AlertsForAllJobFailuresEnabled + *out = new(bool) + **out = **in + } + if in.AlertsForCriticalOperationFailuresEnabled != nil { + in, out := &in.AlertsForCriticalOperationFailuresEnabled, &out.AlertsForCriticalOperationFailuresEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringParameters. +func (in *MonitoringParameters) DeepCopy() *MonitoringParameters { + if in == nil { + return nil + } + out := new(MonitoringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyBackupInitParameters) DeepCopyInto(out *ProtectionPolicyBackupInitParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.FrequencyInMinutes != nil { + in, out := &in.FrequencyInMinutes, &out.FrequencyInMinutes + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyBackupInitParameters. +func (in *ProtectionPolicyBackupInitParameters) DeepCopy() *ProtectionPolicyBackupInitParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyBackupObservation) DeepCopyInto(out *ProtectionPolicyBackupObservation) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.FrequencyInMinutes != nil { + in, out := &in.FrequencyInMinutes, &out.FrequencyInMinutes + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyBackupObservation. +func (in *ProtectionPolicyBackupObservation) DeepCopy() *ProtectionPolicyBackupObservation { + if in == nil { + return nil + } + out := new(ProtectionPolicyBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyBackupParameters) DeepCopyInto(out *ProtectionPolicyBackupParameters) { + *out = *in + if in.Frequency != nil { + in, out := &in.Frequency, &out.Frequency + *out = new(string) + **out = **in + } + if in.FrequencyInMinutes != nil { + in, out := &in.FrequencyInMinutes, &out.FrequencyInMinutes + *out = new(float64) + **out = **in + } + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = new(string) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyBackupParameters. +func (in *ProtectionPolicyBackupParameters) DeepCopy() *ProtectionPolicyBackupParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyInitParameters) DeepCopyInto(out *ProtectionPolicyInitParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(ProtectionPolicyBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(ProtectionPolicyRetentionDailyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(ProtectionPolicyRetentionMonthlyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(ProtectionPolicyRetentionWeeklyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(ProtectionPolicyRetentionYearlyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SimpleRetention != nil { + in, out := &in.SimpleRetention, &out.SimpleRetention + *out = new(SimpleRetentionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyInitParameters. +func (in *ProtectionPolicyInitParameters) DeepCopy() *ProtectionPolicyInitParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyObservation) DeepCopyInto(out *ProtectionPolicyObservation) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(ProtectionPolicyBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(ProtectionPolicyRetentionDailyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(ProtectionPolicyRetentionMonthlyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(ProtectionPolicyRetentionWeeklyObservation) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(ProtectionPolicyRetentionYearlyObservation) + (*in).DeepCopyInto(*out) + } + if in.SimpleRetention != nil { + in, out := &in.SimpleRetention, &out.SimpleRetention + *out = new(SimpleRetentionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyObservation. +func (in *ProtectionPolicyObservation) DeepCopy() *ProtectionPolicyObservation { + if in == nil { + return nil + } + out := new(ProtectionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyParameters) DeepCopyInto(out *ProtectionPolicyParameters) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(ProtectionPolicyBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.PolicyType != nil { + in, out := &in.PolicyType, &out.PolicyType + *out = new(string) + **out = **in + } + if in.RetentionDaily != nil { + in, out := &in.RetentionDaily, &out.RetentionDaily + *out = new(ProtectionPolicyRetentionDailyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionMonthly != nil { + in, out := &in.RetentionMonthly, &out.RetentionMonthly + *out = new(ProtectionPolicyRetentionMonthlyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionWeekly != nil { + in, out := &in.RetentionWeekly, &out.RetentionWeekly + *out = new(ProtectionPolicyRetentionWeeklyParameters) + (*in).DeepCopyInto(*out) + } + if in.RetentionYearly != nil { + in, out := &in.RetentionYearly, &out.RetentionYearly + *out = new(ProtectionPolicyRetentionYearlyParameters) + (*in).DeepCopyInto(*out) + } + if in.SimpleRetention != nil { + in, out := &in.SimpleRetention, &out.SimpleRetention + *out = new(SimpleRetentionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyParameters. +func (in *ProtectionPolicyParameters) DeepCopy() *ProtectionPolicyParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionDailyInitParameters) DeepCopyInto(out *ProtectionPolicyRetentionDailyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionDailyInitParameters. +func (in *ProtectionPolicyRetentionDailyInitParameters) DeepCopy() *ProtectionPolicyRetentionDailyInitParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionDailyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionDailyObservation) DeepCopyInto(out *ProtectionPolicyRetentionDailyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionDailyObservation. +func (in *ProtectionPolicyRetentionDailyObservation) DeepCopy() *ProtectionPolicyRetentionDailyObservation { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionDailyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionDailyParameters) DeepCopyInto(out *ProtectionPolicyRetentionDailyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionDailyParameters. +func (in *ProtectionPolicyRetentionDailyParameters) DeepCopy() *ProtectionPolicyRetentionDailyParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionDailyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionMonthlyInitParameters) DeepCopyInto(out *ProtectionPolicyRetentionMonthlyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.FormatType != nil { + in, out := &in.FormatType, &out.FormatType + *out = new(string) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionMonthlyInitParameters. +func (in *ProtectionPolicyRetentionMonthlyInitParameters) DeepCopy() *ProtectionPolicyRetentionMonthlyInitParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionMonthlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionMonthlyObservation) DeepCopyInto(out *ProtectionPolicyRetentionMonthlyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.FormatType != nil { + in, out := &in.FormatType, &out.FormatType + *out = new(string) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionMonthlyObservation. +func (in *ProtectionPolicyRetentionMonthlyObservation) DeepCopy() *ProtectionPolicyRetentionMonthlyObservation { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionMonthlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionMonthlyParameters) DeepCopyInto(out *ProtectionPolicyRetentionMonthlyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.FormatType != nil { + in, out := &in.FormatType, &out.FormatType + *out = new(string) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionMonthlyParameters. +func (in *ProtectionPolicyRetentionMonthlyParameters) DeepCopy() *ProtectionPolicyRetentionMonthlyParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionMonthlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionWeeklyInitParameters) DeepCopyInto(out *ProtectionPolicyRetentionWeeklyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionWeeklyInitParameters. +func (in *ProtectionPolicyRetentionWeeklyInitParameters) DeepCopy() *ProtectionPolicyRetentionWeeklyInitParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionWeeklyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionWeeklyObservation) DeepCopyInto(out *ProtectionPolicyRetentionWeeklyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionWeeklyObservation. +func (in *ProtectionPolicyRetentionWeeklyObservation) DeepCopy() *ProtectionPolicyRetentionWeeklyObservation { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionWeeklyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionWeeklyParameters) DeepCopyInto(out *ProtectionPolicyRetentionWeeklyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionWeeklyParameters. +func (in *ProtectionPolicyRetentionWeeklyParameters) DeepCopy() *ProtectionPolicyRetentionWeeklyParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionWeeklyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionYearlyInitParameters) DeepCopyInto(out *ProtectionPolicyRetentionYearlyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.FormatType != nil { + in, out := &in.FormatType, &out.FormatType + *out = new(string) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionYearlyInitParameters. +func (in *ProtectionPolicyRetentionYearlyInitParameters) DeepCopy() *ProtectionPolicyRetentionYearlyInitParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionYearlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionYearlyObservation) DeepCopyInto(out *ProtectionPolicyRetentionYearlyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.FormatType != nil { + in, out := &in.FormatType, &out.FormatType + *out = new(string) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionYearlyObservation. +func (in *ProtectionPolicyRetentionYearlyObservation) DeepCopy() *ProtectionPolicyRetentionYearlyObservation { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionYearlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtectionPolicyRetentionYearlyParameters) DeepCopyInto(out *ProtectionPolicyRetentionYearlyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.FormatType != nil { + in, out := &in.FormatType, &out.FormatType + *out = new(string) + **out = **in + } + if in.Monthdays != nil { + in, out := &in.Monthdays, &out.Monthdays + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtectionPolicyRetentionYearlyParameters. +func (in *ProtectionPolicyRetentionYearlyParameters) DeepCopy() *ProtectionPolicyRetentionYearlyParameters { + if in == nil { + return nil + } + out := new(ProtectionPolicyRetentionYearlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionDailyInitParameters) DeepCopyInto(out *RetentionDailyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionDailyInitParameters. +func (in *RetentionDailyInitParameters) DeepCopy() *RetentionDailyInitParameters { + if in == nil { + return nil + } + out := new(RetentionDailyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionDailyObservation) DeepCopyInto(out *RetentionDailyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionDailyObservation. +func (in *RetentionDailyObservation) DeepCopy() *RetentionDailyObservation { + if in == nil { + return nil + } + out := new(RetentionDailyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionDailyParameters) DeepCopyInto(out *RetentionDailyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionDailyParameters. +func (in *RetentionDailyParameters) DeepCopy() *RetentionDailyParameters { + if in == nil { + return nil + } + out := new(RetentionDailyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionMonthlyInitParameters) DeepCopyInto(out *RetentionMonthlyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionMonthlyInitParameters. +func (in *RetentionMonthlyInitParameters) DeepCopy() *RetentionMonthlyInitParameters { + if in == nil { + return nil + } + out := new(RetentionMonthlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionMonthlyObservation) DeepCopyInto(out *RetentionMonthlyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionMonthlyObservation. +func (in *RetentionMonthlyObservation) DeepCopy() *RetentionMonthlyObservation { + if in == nil { + return nil + } + out := new(RetentionMonthlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionMonthlyParameters) DeepCopyInto(out *RetentionMonthlyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionMonthlyParameters. +func (in *RetentionMonthlyParameters) DeepCopy() *RetentionMonthlyParameters { + if in == nil { + return nil + } + out := new(RetentionMonthlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionWeeklyInitParameters) DeepCopyInto(out *RetentionWeeklyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionWeeklyInitParameters. +func (in *RetentionWeeklyInitParameters) DeepCopy() *RetentionWeeklyInitParameters { + if in == nil { + return nil + } + out := new(RetentionWeeklyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionWeeklyObservation) DeepCopyInto(out *RetentionWeeklyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionWeeklyObservation. +func (in *RetentionWeeklyObservation) DeepCopy() *RetentionWeeklyObservation { + if in == nil { + return nil + } + out := new(RetentionWeeklyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionWeeklyParameters) DeepCopyInto(out *RetentionWeeklyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionWeeklyParameters. +func (in *RetentionWeeklyParameters) DeepCopy() *RetentionWeeklyParameters { + if in == nil { + return nil + } + out := new(RetentionWeeklyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionYearlyInitParameters) DeepCopyInto(out *RetentionYearlyInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionYearlyInitParameters. +func (in *RetentionYearlyInitParameters) DeepCopy() *RetentionYearlyInitParameters { + if in == nil { + return nil + } + out := new(RetentionYearlyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionYearlyObservation) DeepCopyInto(out *RetentionYearlyObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionYearlyObservation. +func (in *RetentionYearlyObservation) DeepCopy() *RetentionYearlyObservation { + if in == nil { + return nil + } + out := new(RetentionYearlyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionYearlyParameters) DeepCopyInto(out *RetentionYearlyParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = make([]*float64, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(float64) + **out = **in + } + } + } + if in.IncludeLastDays != nil { + in, out := &in.IncludeLastDays, &out.IncludeLastDays + *out = new(bool) + **out = **in + } + if in.Months != nil { + in, out := &in.Months, &out.Months + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weekdays != nil { + in, out := &in.Weekdays, &out.Weekdays + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Weeks != nil { + in, out := &in.Weeks, &out.Weeks + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionYearlyParameters. +func (in *RetentionYearlyParameters) DeepCopy() *RetentionYearlyParameters { + if in == nil { + return nil + } + out := new(RetentionYearlyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsInitParameters) DeepCopyInto(out *SettingsInitParameters) { + *out = *in + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsInitParameters. +func (in *SettingsInitParameters) DeepCopy() *SettingsInitParameters { + if in == nil { + return nil + } + out := new(SettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsObservation) DeepCopyInto(out *SettingsObservation) { + *out = *in + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsObservation. +func (in *SettingsObservation) DeepCopy() *SettingsObservation { + if in == nil { + return nil + } + out := new(SettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingsParameters) DeepCopyInto(out *SettingsParameters) { + *out = *in + if in.CompressionEnabled != nil { + in, out := &in.CompressionEnabled, &out.CompressionEnabled + *out = new(bool) + **out = **in + } + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsParameters. +func (in *SettingsParameters) DeepCopy() *SettingsParameters { + if in == nil { + return nil + } + out := new(SettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleRetentionInitParameters) DeepCopyInto(out *SimpleRetentionInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleRetentionInitParameters. +func (in *SimpleRetentionInitParameters) DeepCopy() *SimpleRetentionInitParameters { + if in == nil { + return nil + } + out := new(SimpleRetentionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleRetentionObservation) DeepCopyInto(out *SimpleRetentionObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleRetentionObservation. +func (in *SimpleRetentionObservation) DeepCopy() *SimpleRetentionObservation { + if in == nil { + return nil + } + out := new(SimpleRetentionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleRetentionParameters) DeepCopyInto(out *SimpleRetentionParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleRetentionParameters. +func (in *SimpleRetentionParameters) DeepCopy() *SimpleRetentionParameters { + if in == nil { + return nil + } + out := new(SimpleRetentionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteRecoveryProtectionContainerMapping) DeepCopyInto(out *SiteRecoveryProtectionContainerMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteRecoveryProtectionContainerMapping. +func (in *SiteRecoveryProtectionContainerMapping) DeepCopy() *SiteRecoveryProtectionContainerMapping { + if in == nil { + return nil + } + out := new(SiteRecoveryProtectionContainerMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SiteRecoveryProtectionContainerMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteRecoveryProtectionContainerMappingInitParameters) DeepCopyInto(out *SiteRecoveryProtectionContainerMappingInitParameters) { + *out = *in + if in.AutomaticUpdate != nil { + in, out := &in.AutomaticUpdate, &out.AutomaticUpdate + *out = new(AutomaticUpdateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RecoveryReplicationPolicyID != nil { + in, out := &in.RecoveryReplicationPolicyID, &out.RecoveryReplicationPolicyID + *out = new(string) + **out = **in + } + if in.RecoveryReplicationPolicyIDRef != nil { + in, out := &in.RecoveryReplicationPolicyIDRef, &out.RecoveryReplicationPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryReplicationPolicyIDSelector != nil { + in, out := &in.RecoveryReplicationPolicyIDSelector, &out.RecoveryReplicationPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RecoveryTargetProtectionContainerID != nil { + in, out := &in.RecoveryTargetProtectionContainerID, &out.RecoveryTargetProtectionContainerID + *out = new(string) + **out = **in + } + if in.RecoveryTargetProtectionContainerIDRef != nil { + in, out := &in.RecoveryTargetProtectionContainerIDRef, &out.RecoveryTargetProtectionContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryTargetProtectionContainerIDSelector != nil { + in, out := &in.RecoveryTargetProtectionContainerIDSelector, &out.RecoveryTargetProtectionContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteRecoveryProtectionContainerMappingInitParameters. +func (in *SiteRecoveryProtectionContainerMappingInitParameters) DeepCopy() *SiteRecoveryProtectionContainerMappingInitParameters { + if in == nil { + return nil + } + out := new(SiteRecoveryProtectionContainerMappingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteRecoveryProtectionContainerMappingList) DeepCopyInto(out *SiteRecoveryProtectionContainerMappingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SiteRecoveryProtectionContainerMapping, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteRecoveryProtectionContainerMappingList. +func (in *SiteRecoveryProtectionContainerMappingList) DeepCopy() *SiteRecoveryProtectionContainerMappingList { + if in == nil { + return nil + } + out := new(SiteRecoveryProtectionContainerMappingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SiteRecoveryProtectionContainerMappingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteRecoveryProtectionContainerMappingObservation) DeepCopyInto(out *SiteRecoveryProtectionContainerMappingObservation) { + *out = *in + if in.AutomaticUpdate != nil { + in, out := &in.AutomaticUpdate, &out.AutomaticUpdate + *out = new(AutomaticUpdateObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RecoveryFabricName != nil { + in, out := &in.RecoveryFabricName, &out.RecoveryFabricName + *out = new(string) + **out = **in + } + if in.RecoveryReplicationPolicyID != nil { + in, out := &in.RecoveryReplicationPolicyID, &out.RecoveryReplicationPolicyID + *out = new(string) + **out = **in + } + if in.RecoverySourceProtectionContainerName != nil { + in, out := &in.RecoverySourceProtectionContainerName, &out.RecoverySourceProtectionContainerName + *out = new(string) + **out = **in + } + if in.RecoveryTargetProtectionContainerID != nil { + in, out := &in.RecoveryTargetProtectionContainerID, &out.RecoveryTargetProtectionContainerID + *out = new(string) + **out = **in + } + if in.RecoveryVaultName != nil { + in, out := &in.RecoveryVaultName, &out.RecoveryVaultName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteRecoveryProtectionContainerMappingObservation. +func (in *SiteRecoveryProtectionContainerMappingObservation) DeepCopy() *SiteRecoveryProtectionContainerMappingObservation { + if in == nil { + return nil + } + out := new(SiteRecoveryProtectionContainerMappingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteRecoveryProtectionContainerMappingParameters) DeepCopyInto(out *SiteRecoveryProtectionContainerMappingParameters) { + *out = *in + if in.AutomaticUpdate != nil { + in, out := &in.AutomaticUpdate, &out.AutomaticUpdate + *out = new(AutomaticUpdateParameters) + (*in).DeepCopyInto(*out) + } + if in.RecoveryFabricName != nil { + in, out := &in.RecoveryFabricName, &out.RecoveryFabricName + *out = new(string) + **out = **in + } + if in.RecoveryFabricNameRef != nil { + in, out := &in.RecoveryFabricNameRef, &out.RecoveryFabricNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryFabricNameSelector != nil { + in, out := &in.RecoveryFabricNameSelector, &out.RecoveryFabricNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RecoveryReplicationPolicyID != nil { + in, out := &in.RecoveryReplicationPolicyID, &out.RecoveryReplicationPolicyID + *out = new(string) + **out = **in + } + if in.RecoveryReplicationPolicyIDRef != nil { + in, out := &in.RecoveryReplicationPolicyIDRef, &out.RecoveryReplicationPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryReplicationPolicyIDSelector != nil { + in, out := &in.RecoveryReplicationPolicyIDSelector, &out.RecoveryReplicationPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RecoverySourceProtectionContainerName != nil { + in, out := &in.RecoverySourceProtectionContainerName, &out.RecoverySourceProtectionContainerName + *out = new(string) + **out = **in + } + if in.RecoverySourceProtectionContainerNameRef != nil { + in, out := &in.RecoverySourceProtectionContainerNameRef, &out.RecoverySourceProtectionContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoverySourceProtectionContainerNameSelector != nil { + in, out := &in.RecoverySourceProtectionContainerNameSelector, &out.RecoverySourceProtectionContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RecoveryTargetProtectionContainerID != nil { + in, out := &in.RecoveryTargetProtectionContainerID, &out.RecoveryTargetProtectionContainerID + *out = new(string) + **out = **in + } + if in.RecoveryTargetProtectionContainerIDRef != nil { + in, out := &in.RecoveryTargetProtectionContainerIDRef, &out.RecoveryTargetProtectionContainerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryTargetProtectionContainerIDSelector != nil { + in, out := &in.RecoveryTargetProtectionContainerIDSelector, &out.RecoveryTargetProtectionContainerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RecoveryVaultName != nil { + in, out := &in.RecoveryVaultName, &out.RecoveryVaultName + *out = new(string) + **out = **in + } + if in.RecoveryVaultNameRef != nil { + in, out := &in.RecoveryVaultNameRef, &out.RecoveryVaultNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RecoveryVaultNameSelector != nil { + in, out := &in.RecoveryVaultNameSelector, &out.RecoveryVaultNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteRecoveryProtectionContainerMappingParameters. +func (in *SiteRecoveryProtectionContainerMappingParameters) DeepCopy() *SiteRecoveryProtectionContainerMappingParameters { + if in == nil { + return nil + } + out := new(SiteRecoveryProtectionContainerMappingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteRecoveryProtectionContainerMappingSpec) DeepCopyInto(out *SiteRecoveryProtectionContainerMappingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteRecoveryProtectionContainerMappingSpec. +func (in *SiteRecoveryProtectionContainerMappingSpec) DeepCopy() *SiteRecoveryProtectionContainerMappingSpec { + if in == nil { + return nil + } + out := new(SiteRecoveryProtectionContainerMappingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteRecoveryProtectionContainerMappingStatus) DeepCopyInto(out *SiteRecoveryProtectionContainerMappingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteRecoveryProtectionContainerMappingStatus. +func (in *SiteRecoveryProtectionContainerMappingStatus) DeepCopy() *SiteRecoveryProtectionContainerMappingStatus { + if in == nil { + return nil + } + out := new(SiteRecoveryProtectionContainerMappingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Vault) DeepCopyInto(out *Vault) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Vault. +func (in *Vault) DeepCopy() *Vault { + if in == nil { + return nil + } + out := new(Vault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Vault) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultInitParameters) DeepCopyInto(out *VaultInitParameters) { + *out = *in + if in.ClassicVMwareReplicationEnabled != nil { + in, out := &in.ClassicVMwareReplicationEnabled, &out.ClassicVMwareReplicationEnabled + *out = new(bool) + **out = **in + } + if in.CrossRegionRestoreEnabled != nil { + in, out := &in.CrossRegionRestoreEnabled, &out.CrossRegionRestoreEnabled + *out = new(bool) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Immutability != nil { + in, out := &in.Immutability, &out.Immutability + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SoftDeleteEnabled != nil { + in, out := &in.SoftDeleteEnabled, &out.SoftDeleteEnabled + *out = new(bool) + **out = **in + } + if in.StorageModeType != nil { + in, out := &in.StorageModeType, &out.StorageModeType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultInitParameters. +func (in *VaultInitParameters) DeepCopy() *VaultInitParameters { + if in == nil { + return nil + } + out := new(VaultInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultList) DeepCopyInto(out *VaultList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Vault, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultList. +func (in *VaultList) DeepCopy() *VaultList { + if in == nil { + return nil + } + out := new(VaultList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VaultList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultObservation) DeepCopyInto(out *VaultObservation) { + *out = *in + if in.ClassicVMwareReplicationEnabled != nil { + in, out := &in.ClassicVMwareReplicationEnabled, &out.ClassicVMwareReplicationEnabled + *out = new(bool) + **out = **in + } + if in.CrossRegionRestoreEnabled != nil { + in, out := &in.CrossRegionRestoreEnabled, &out.CrossRegionRestoreEnabled + *out = new(bool) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Immutability != nil { + in, out := &in.Immutability, &out.Immutability + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringObservation) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SoftDeleteEnabled != nil { + in, out := &in.SoftDeleteEnabled, &out.SoftDeleteEnabled + *out = new(bool) + **out = **in + } + if in.StorageModeType != nil { + in, out := &in.StorageModeType, &out.StorageModeType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultObservation. +func (in *VaultObservation) DeepCopy() *VaultObservation { + if in == nil { + return nil + } + out := new(VaultObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultParameters) DeepCopyInto(out *VaultParameters) { + *out = *in + if in.ClassicVMwareReplicationEnabled != nil { + in, out := &in.ClassicVMwareReplicationEnabled, &out.ClassicVMwareReplicationEnabled + *out = new(bool) + **out = **in + } + if in.CrossRegionRestoreEnabled != nil { + in, out := &in.CrossRegionRestoreEnabled, &out.CrossRegionRestoreEnabled + *out = new(bool) + **out = **in + } + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Immutability != nil { + in, out := &in.Immutability, &out.Immutability + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.SoftDeleteEnabled != nil { + in, out := &in.SoftDeleteEnabled, &out.SoftDeleteEnabled + *out = new(bool) + **out = **in + } + if in.StorageModeType != nil { + in, out := &in.StorageModeType, &out.StorageModeType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultParameters. +func (in *VaultParameters) DeepCopy() *VaultParameters { + if in == nil { + return nil + } + out := new(VaultParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultSpec) DeepCopyInto(out *VaultSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultSpec. +func (in *VaultSpec) DeepCopy() *VaultSpec { + if in == nil { + return nil + } + out := new(VaultSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultStatus) DeepCopyInto(out *VaultStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultStatus. +func (in *VaultStatus) DeepCopy() *VaultStatus { + if in == nil { + return nil + } + out := new(VaultStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/recoveryservices/v1beta2/zz_generated.managed.go b/apis/recoveryservices/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..5befb0c3f --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BackupPolicyFileShare. +func (mg *BackupPolicyFileShare) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BackupPolicyVM. +func (mg *BackupPolicyVM) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BackupPolicyVM. +func (mg *BackupPolicyVM) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BackupPolicyVM. +func (mg *BackupPolicyVM) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BackupPolicyVM. +func (mg *BackupPolicyVM) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BackupPolicyVM. +func (mg *BackupPolicyVM) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BackupPolicyVM. +func (mg *BackupPolicyVM) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BackupPolicyVM. +func (mg *BackupPolicyVM) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BackupPolicyVM. +func (mg *BackupPolicyVM) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BackupPolicyVM. +func (mg *BackupPolicyVM) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BackupPolicyVM. +func (mg *BackupPolicyVM) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BackupPolicyVM. +func (mg *BackupPolicyVM) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BackupPolicyVM. +func (mg *BackupPolicyVM) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Vault. +func (mg *Vault) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Vault. +func (mg *Vault) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Vault. +func (mg *Vault) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Vault. +func (mg *Vault) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Vault. +func (mg *Vault) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Vault. +func (mg *Vault) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Vault. +func (mg *Vault) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Vault. +func (mg *Vault) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Vault. +func (mg *Vault) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Vault. +func (mg *Vault) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Vault. +func (mg *Vault) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Vault. +func (mg *Vault) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/recoveryservices/v1beta2/zz_generated.managedlist.go b/apis/recoveryservices/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..7b8f63275 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this BackupPolicyFileShareList. +func (l *BackupPolicyFileShareList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BackupPolicyVMList. +func (l *BackupPolicyVMList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BackupPolicyVMWorkloadList. +func (l *BackupPolicyVMWorkloadList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SiteRecoveryProtectionContainerMappingList. +func (l *SiteRecoveryProtectionContainerMappingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this VaultList. +func (l *VaultList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/recoveryservices/v1beta2/zz_generated.resolvers.go b/apis/recoveryservices/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..aeef0972d --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,363 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *BackupPolicyFileShare) ResolveReferences( // ResolveReferences of this BackupPolicyFileShare. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RecoveryVaultName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RecoveryVaultNameRef, + Selector: mg.Spec.ForProvider.RecoveryVaultNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RecoveryVaultName") + } + mg.Spec.ForProvider.RecoveryVaultName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RecoveryVaultNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BackupPolicyVM. +func (mg *BackupPolicyVM) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RecoveryVaultName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RecoveryVaultNameRef, + Selector: mg.Spec.ForProvider.RecoveryVaultNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RecoveryVaultName") + } + mg.Spec.ForProvider.RecoveryVaultName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RecoveryVaultNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this BackupPolicyVMWorkload. +func (mg *BackupPolicyVMWorkload) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RecoveryVaultName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RecoveryVaultNameRef, + Selector: mg.Spec.ForProvider.RecoveryVaultNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RecoveryVaultName") + } + mg.Spec.ForProvider.RecoveryVaultName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RecoveryVaultNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SiteRecoveryProtectionContainerMapping. +func (mg *SiteRecoveryProtectionContainerMapping) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "SiteRecoveryFabric", "SiteRecoveryFabricList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RecoveryFabricName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RecoveryFabricNameRef, + Selector: mg.Spec.ForProvider.RecoveryFabricNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RecoveryFabricName") + } + mg.Spec.ForProvider.RecoveryFabricName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RecoveryFabricNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "SiteRecoveryReplicationPolicy", "SiteRecoveryReplicationPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RecoveryReplicationPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RecoveryReplicationPolicyIDRef, + Selector: mg.Spec.ForProvider.RecoveryReplicationPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RecoveryReplicationPolicyID") + } + mg.Spec.ForProvider.RecoveryReplicationPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RecoveryReplicationPolicyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "SiteRecoveryProtectionContainer", "SiteRecoveryProtectionContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RecoverySourceProtectionContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RecoverySourceProtectionContainerNameRef, + Selector: mg.Spec.ForProvider.RecoverySourceProtectionContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RecoverySourceProtectionContainerName") + } + mg.Spec.ForProvider.RecoverySourceProtectionContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RecoverySourceProtectionContainerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "SiteRecoveryProtectionContainer", "SiteRecoveryProtectionContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RecoveryTargetProtectionContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.RecoveryTargetProtectionContainerIDRef, + Selector: mg.Spec.ForProvider.RecoveryTargetProtectionContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RecoveryTargetProtectionContainerID") + } + mg.Spec.ForProvider.RecoveryTargetProtectionContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RecoveryTargetProtectionContainerIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta2", "Vault", "VaultList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.RecoveryVaultName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.RecoveryVaultNameRef, + Selector: mg.Spec.ForProvider.RecoveryVaultNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.RecoveryVaultName") + } + mg.Spec.ForProvider.RecoveryVaultName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RecoveryVaultNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "SiteRecoveryReplicationPolicy", "SiteRecoveryReplicationPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RecoveryReplicationPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RecoveryReplicationPolicyIDRef, + Selector: mg.Spec.InitProvider.RecoveryReplicationPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RecoveryReplicationPolicyID") + } + mg.Spec.InitProvider.RecoveryReplicationPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RecoveryReplicationPolicyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("recoveryservices.azure.upbound.io", "v1beta1", "SiteRecoveryProtectionContainer", "SiteRecoveryProtectionContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.RecoveryTargetProtectionContainerID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.RecoveryTargetProtectionContainerIDRef, + Selector: mg.Spec.InitProvider.RecoveryTargetProtectionContainerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.RecoveryTargetProtectionContainerID") + } + mg.Spec.InitProvider.RecoveryTargetProtectionContainerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.RecoveryTargetProtectionContainerIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Vault. +func (mg *Vault) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/recoveryservices/v1beta2/zz_groupversion_info.go b/apis/recoveryservices/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..24a04fa74 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=recoveryservices.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "recoveryservices.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/recoveryservices/v1beta2/zz_siterecoveryprotectioncontainermapping_terraformed.go b/apis/recoveryservices/v1beta2/zz_siterecoveryprotectioncontainermapping_terraformed.go new file mode 100755 index 000000000..e0ce62cac --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_siterecoveryprotectioncontainermapping_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SiteRecoveryProtectionContainerMapping +func (mg *SiteRecoveryProtectionContainerMapping) GetTerraformResourceType() string { + return "azurerm_site_recovery_protection_container_mapping" +} + +// GetConnectionDetailsMapping for this SiteRecoveryProtectionContainerMapping +func (tr *SiteRecoveryProtectionContainerMapping) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SiteRecoveryProtectionContainerMapping +func (tr *SiteRecoveryProtectionContainerMapping) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SiteRecoveryProtectionContainerMapping +func (tr *SiteRecoveryProtectionContainerMapping) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SiteRecoveryProtectionContainerMapping +func (tr *SiteRecoveryProtectionContainerMapping) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SiteRecoveryProtectionContainerMapping +func (tr *SiteRecoveryProtectionContainerMapping) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SiteRecoveryProtectionContainerMapping +func (tr *SiteRecoveryProtectionContainerMapping) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SiteRecoveryProtectionContainerMapping +func (tr *SiteRecoveryProtectionContainerMapping) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SiteRecoveryProtectionContainerMapping +func (tr *SiteRecoveryProtectionContainerMapping) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SiteRecoveryProtectionContainerMapping using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SiteRecoveryProtectionContainerMapping) LateInitialize(attrs []byte) (bool, error) { + params := &SiteRecoveryProtectionContainerMappingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SiteRecoveryProtectionContainerMapping) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/recoveryservices/v1beta2/zz_siterecoveryprotectioncontainermapping_types.go b/apis/recoveryservices/v1beta2/zz_siterecoveryprotectioncontainermapping_types.go new file mode 100755 index 000000000..ffab87119 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_siterecoveryprotectioncontainermapping_types.go @@ -0,0 +1,259 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutomaticUpdateInitParameters struct { + + // The authentication type used for automation account. Possible values are RunAsAccount and SystemAssignedIdentity. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // The automation account ID which holds the automatic update runbook and authenticates to Azure resources. + AutomationAccountID *string `json:"automationAccountId,omitempty" tf:"automation_account_id,omitempty"` + + // Should the Mobility service installed on Azure virtual machines be automatically updated. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AutomaticUpdateObservation struct { + + // The authentication type used for automation account. Possible values are RunAsAccount and SystemAssignedIdentity. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // The automation account ID which holds the automatic update runbook and authenticates to Azure resources. + AutomationAccountID *string `json:"automationAccountId,omitempty" tf:"automation_account_id,omitempty"` + + // Should the Mobility service installed on Azure virtual machines be automatically updated. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type AutomaticUpdateParameters struct { + + // The authentication type used for automation account. Possible values are RunAsAccount and SystemAssignedIdentity. + // +kubebuilder:validation:Optional + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // The automation account ID which holds the automatic update runbook and authenticates to Azure resources. + // +kubebuilder:validation:Optional + AutomationAccountID *string `json:"automationAccountId,omitempty" tf:"automation_account_id,omitempty"` + + // Should the Mobility service installed on Azure virtual machines be automatically updated. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type SiteRecoveryProtectionContainerMappingInitParameters struct { + + // a automatic_update block defined as below. + AutomaticUpdate *AutomaticUpdateInitParameters `json:"automaticUpdate,omitempty" tf:"automatic_update,omitempty"` + + // Id of the policy to use for this mapping. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.SiteRecoveryReplicationPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RecoveryReplicationPolicyID *string `json:"recoveryReplicationPolicyId,omitempty" tf:"recovery_replication_policy_id,omitempty"` + + // Reference to a SiteRecoveryReplicationPolicy in recoveryservices to populate recoveryReplicationPolicyId. + // +kubebuilder:validation:Optional + RecoveryReplicationPolicyIDRef *v1.Reference `json:"recoveryReplicationPolicyIdRef,omitempty" tf:"-"` + + // Selector for a SiteRecoveryReplicationPolicy in recoveryservices to populate recoveryReplicationPolicyId. + // +kubebuilder:validation:Optional + RecoveryReplicationPolicyIDSelector *v1.Selector `json:"recoveryReplicationPolicyIdSelector,omitempty" tf:"-"` + + // Id of target protection container to map to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.SiteRecoveryProtectionContainer + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + RecoveryTargetProtectionContainerID *string `json:"recoveryTargetProtectionContainerId,omitempty" tf:"recovery_target_protection_container_id,omitempty"` + + // Reference to a SiteRecoveryProtectionContainer in recoveryservices to populate recoveryTargetProtectionContainerId. + // +kubebuilder:validation:Optional + RecoveryTargetProtectionContainerIDRef *v1.Reference `json:"recoveryTargetProtectionContainerIdRef,omitempty" tf:"-"` + + // Selector for a SiteRecoveryProtectionContainer in recoveryservices to populate recoveryTargetProtectionContainerId. + // +kubebuilder:validation:Optional + RecoveryTargetProtectionContainerIDSelector *v1.Selector `json:"recoveryTargetProtectionContainerIdSelector,omitempty" tf:"-"` +} + +type SiteRecoveryProtectionContainerMappingObservation struct { + + // a automatic_update block defined as below. + AutomaticUpdate *AutomaticUpdateObservation `json:"automaticUpdate,omitempty" tf:"automatic_update,omitempty"` + + // The ID of the Site Recovery Protection Container Mapping. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Name of fabric that should contains the protection container to map. Changing this forces a new resource to be created. + RecoveryFabricName *string `json:"recoveryFabricName,omitempty" tf:"recovery_fabric_name,omitempty"` + + // Id of the policy to use for this mapping. Changing this forces a new resource to be created. + RecoveryReplicationPolicyID *string `json:"recoveryReplicationPolicyId,omitempty" tf:"recovery_replication_policy_id,omitempty"` + + // Name of the source protection container to map. Changing this forces a new resource to be created. + RecoverySourceProtectionContainerName *string `json:"recoverySourceProtectionContainerName,omitempty" tf:"recovery_source_protection_container_name,omitempty"` + + // Id of target protection container to map to. Changing this forces a new resource to be created. + RecoveryTargetProtectionContainerID *string `json:"recoveryTargetProtectionContainerId,omitempty" tf:"recovery_target_protection_container_id,omitempty"` + + // The name of the vault that should be updated. Changing this forces a new resource to be created. + RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` + + // Name of the resource group where the vault that should be updated is located. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` +} + +type SiteRecoveryProtectionContainerMappingParameters struct { + + // a automatic_update block defined as below. + // +kubebuilder:validation:Optional + AutomaticUpdate *AutomaticUpdateParameters `json:"automaticUpdate,omitempty" tf:"automatic_update,omitempty"` + + // Name of fabric that should contains the protection container to map. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.SiteRecoveryFabric + // +kubebuilder:validation:Optional + RecoveryFabricName *string `json:"recoveryFabricName,omitempty" tf:"recovery_fabric_name,omitempty"` + + // Reference to a SiteRecoveryFabric in recoveryservices to populate recoveryFabricName. + // +kubebuilder:validation:Optional + RecoveryFabricNameRef *v1.Reference `json:"recoveryFabricNameRef,omitempty" tf:"-"` + + // Selector for a SiteRecoveryFabric in recoveryservices to populate recoveryFabricName. + // +kubebuilder:validation:Optional + RecoveryFabricNameSelector *v1.Selector `json:"recoveryFabricNameSelector,omitempty" tf:"-"` + + // Id of the policy to use for this mapping. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.SiteRecoveryReplicationPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RecoveryReplicationPolicyID *string `json:"recoveryReplicationPolicyId,omitempty" tf:"recovery_replication_policy_id,omitempty"` + + // Reference to a SiteRecoveryReplicationPolicy in recoveryservices to populate recoveryReplicationPolicyId. + // +kubebuilder:validation:Optional + RecoveryReplicationPolicyIDRef *v1.Reference `json:"recoveryReplicationPolicyIdRef,omitempty" tf:"-"` + + // Selector for a SiteRecoveryReplicationPolicy in recoveryservices to populate recoveryReplicationPolicyId. + // +kubebuilder:validation:Optional + RecoveryReplicationPolicyIDSelector *v1.Selector `json:"recoveryReplicationPolicyIdSelector,omitempty" tf:"-"` + + // Name of the source protection container to map. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.SiteRecoveryProtectionContainer + // +kubebuilder:validation:Optional + RecoverySourceProtectionContainerName *string `json:"recoverySourceProtectionContainerName,omitempty" tf:"recovery_source_protection_container_name,omitempty"` + + // Reference to a SiteRecoveryProtectionContainer in recoveryservices to populate recoverySourceProtectionContainerName. + // +kubebuilder:validation:Optional + RecoverySourceProtectionContainerNameRef *v1.Reference `json:"recoverySourceProtectionContainerNameRef,omitempty" tf:"-"` + + // Selector for a SiteRecoveryProtectionContainer in recoveryservices to populate recoverySourceProtectionContainerName. + // +kubebuilder:validation:Optional + RecoverySourceProtectionContainerNameSelector *v1.Selector `json:"recoverySourceProtectionContainerNameSelector,omitempty" tf:"-"` + + // Id of target protection container to map to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta1.SiteRecoveryProtectionContainer + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + RecoveryTargetProtectionContainerID *string `json:"recoveryTargetProtectionContainerId,omitempty" tf:"recovery_target_protection_container_id,omitempty"` + + // Reference to a SiteRecoveryProtectionContainer in recoveryservices to populate recoveryTargetProtectionContainerId. + // +kubebuilder:validation:Optional + RecoveryTargetProtectionContainerIDRef *v1.Reference `json:"recoveryTargetProtectionContainerIdRef,omitempty" tf:"-"` + + // Selector for a SiteRecoveryProtectionContainer in recoveryservices to populate recoveryTargetProtectionContainerId. + // +kubebuilder:validation:Optional + RecoveryTargetProtectionContainerIDSelector *v1.Selector `json:"recoveryTargetProtectionContainerIdSelector,omitempty" tf:"-"` + + // The name of the vault that should be updated. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/recoveryservices/v1beta2.Vault + // +kubebuilder:validation:Optional + RecoveryVaultName *string `json:"recoveryVaultName,omitempty" tf:"recovery_vault_name,omitempty"` + + // Reference to a Vault in recoveryservices to populate recoveryVaultName. + // +kubebuilder:validation:Optional + RecoveryVaultNameRef *v1.Reference `json:"recoveryVaultNameRef,omitempty" tf:"-"` + + // Selector for a Vault in recoveryservices to populate recoveryVaultName. + // +kubebuilder:validation:Optional + RecoveryVaultNameSelector *v1.Selector `json:"recoveryVaultNameSelector,omitempty" tf:"-"` + + // Name of the resource group where the vault that should be updated is located. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` +} + +// SiteRecoveryProtectionContainerMappingSpec defines the desired state of SiteRecoveryProtectionContainerMapping +type SiteRecoveryProtectionContainerMappingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SiteRecoveryProtectionContainerMappingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SiteRecoveryProtectionContainerMappingInitParameters `json:"initProvider,omitempty"` +} + +// SiteRecoveryProtectionContainerMappingStatus defines the observed state of SiteRecoveryProtectionContainerMapping. +type SiteRecoveryProtectionContainerMappingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SiteRecoveryProtectionContainerMappingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SiteRecoveryProtectionContainerMapping is the Schema for the SiteRecoveryProtectionContainerMappings API. Manages a Site Recovery protection container mapping on Azure. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SiteRecoveryProtectionContainerMapping struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec SiteRecoveryProtectionContainerMappingSpec `json:"spec"` + Status SiteRecoveryProtectionContainerMappingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SiteRecoveryProtectionContainerMappingList contains a list of SiteRecoveryProtectionContainerMappings +type SiteRecoveryProtectionContainerMappingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SiteRecoveryProtectionContainerMapping `json:"items"` +} + +// Repository type metadata. +var ( + SiteRecoveryProtectionContainerMapping_Kind = "SiteRecoveryProtectionContainerMapping" + SiteRecoveryProtectionContainerMapping_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SiteRecoveryProtectionContainerMapping_Kind}.String() + SiteRecoveryProtectionContainerMapping_KindAPIVersion = SiteRecoveryProtectionContainerMapping_Kind + "." + CRDGroupVersion.String() + SiteRecoveryProtectionContainerMapping_GroupVersionKind = CRDGroupVersion.WithKind(SiteRecoveryProtectionContainerMapping_Kind) +) + +func init() { + SchemeBuilder.Register(&SiteRecoveryProtectionContainerMapping{}, &SiteRecoveryProtectionContainerMappingList{}) +} diff --git a/apis/recoveryservices/v1beta2/zz_vault_terraformed.go b/apis/recoveryservices/v1beta2/zz_vault_terraformed.go new file mode 100755 index 000000000..c10582680 --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_vault_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Vault +func (mg *Vault) GetTerraformResourceType() string { + return "azurerm_recovery_services_vault" +} + +// GetConnectionDetailsMapping for this Vault +func (tr *Vault) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Vault +func (tr *Vault) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Vault +func (tr *Vault) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Vault +func (tr *Vault) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Vault +func (tr *Vault) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Vault +func (tr *Vault) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Vault +func (tr *Vault) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Vault +func (tr *Vault) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Vault using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Vault) LateInitialize(attrs []byte) (bool, error) { + params := &VaultParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Vault) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/recoveryservices/v1beta2/zz_vault_types.go b/apis/recoveryservices/v1beta2/zz_vault_types.go new file mode 100755 index 000000000..028883aed --- /dev/null +++ b/apis/recoveryservices/v1beta2/zz_vault_types.go @@ -0,0 +1,343 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type EncryptionInitParameters struct { + + // Enabling/Disabling the Double Encryption state. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // The Key Vault key id used to encrypt this vault. Key managed by Vault Managed Hardware Security Module is also supported. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Indicate that system assigned identity should be used or not. Defaults to true. Must be set to false when user_assigned_identity_id is set. + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // Specifies the user assigned identity ID to be used. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type EncryptionObservation struct { + + // Enabling/Disabling the Double Encryption state. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // The Key Vault key id used to encrypt this vault. Key managed by Vault Managed Hardware Security Module is also supported. + KeyID *string `json:"keyId,omitempty" tf:"key_id,omitempty"` + + // Indicate that system assigned identity should be used or not. Defaults to true. Must be set to false when user_assigned_identity_id is set. + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // Specifies the user assigned identity ID to be used. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type EncryptionParameters struct { + + // Enabling/Disabling the Double Encryption state. + // +kubebuilder:validation:Optional + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled" tf:"infrastructure_encryption_enabled,omitempty"` + + // The Key Vault key id used to encrypt this vault. Key managed by Vault Managed Hardware Security Module is also supported. + // +kubebuilder:validation:Optional + KeyID *string `json:"keyId" tf:"key_id,omitempty"` + + // Indicate that system assigned identity should be used or not. Defaults to true. Must be set to false when user_assigned_identity_id is set. + // +kubebuilder:validation:Optional + UseSystemAssignedIdentity *bool `json:"useSystemAssignedIdentity,omitempty" tf:"use_system_assigned_identity,omitempty"` + + // Specifies the user assigned identity ID to be used. + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type IdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this App Configuration. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Recovery Services Vault. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this App Configuration. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Recovery Services Vault. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this App Configuration. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Recovery Services Vault. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MonitoringInitParameters struct { + + // Enabling/Disabling built-in Azure Monitor alerts for security scenarios and job failure scenarios. Defaults to true. + AlertsForAllJobFailuresEnabled *bool `json:"alertsForAllJobFailuresEnabled,omitempty" tf:"alerts_for_all_job_failures_enabled,omitempty"` + + // Enabling/Disabling alerts from the older (classic alerts) solution. Defaults to true. More details could be found here. + AlertsForCriticalOperationFailuresEnabled *bool `json:"alertsForCriticalOperationFailuresEnabled,omitempty" tf:"alerts_for_critical_operation_failures_enabled,omitempty"` +} + +type MonitoringObservation struct { + + // Enabling/Disabling built-in Azure Monitor alerts for security scenarios and job failure scenarios. Defaults to true. + AlertsForAllJobFailuresEnabled *bool `json:"alertsForAllJobFailuresEnabled,omitempty" tf:"alerts_for_all_job_failures_enabled,omitempty"` + + // Enabling/Disabling alerts from the older (classic alerts) solution. Defaults to true. More details could be found here. + AlertsForCriticalOperationFailuresEnabled *bool `json:"alertsForCriticalOperationFailuresEnabled,omitempty" tf:"alerts_for_critical_operation_failures_enabled,omitempty"` +} + +type MonitoringParameters struct { + + // Enabling/Disabling built-in Azure Monitor alerts for security scenarios and job failure scenarios. Defaults to true. + // +kubebuilder:validation:Optional + AlertsForAllJobFailuresEnabled *bool `json:"alertsForAllJobFailuresEnabled,omitempty" tf:"alerts_for_all_job_failures_enabled,omitempty"` + + // Enabling/Disabling alerts from the older (classic alerts) solution. Defaults to true. More details could be found here. + // +kubebuilder:validation:Optional + AlertsForCriticalOperationFailuresEnabled *bool `json:"alertsForCriticalOperationFailuresEnabled,omitempty" tf:"alerts_for_critical_operation_failures_enabled,omitempty"` +} + +type VaultInitParameters struct { + + // Whether to enable the Classic experience for VMware replication. If set to false VMware machines will be protected using the new stateless ASR replication appliance. Changing this forces a new resource to be created. + ClassicVMwareReplicationEnabled *bool `json:"classicVmwareReplicationEnabled,omitempty" tf:"classic_vmware_replication_enabled,omitempty"` + + // Is cross region restore enabled for this Vault? Only can be true, when storage_mode_type is GeoRedundant. Defaults to false. + CrossRegionRestoreEnabled *bool `json:"crossRegionRestoreEnabled,omitempty" tf:"cross_region_restore_enabled,omitempty"` + + // An encryption block as defined below. Required with identity. + Encryption *EncryptionInitParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Immutability Settings of vault, possible values include: Locked, Unlocked and Disabled. + Immutability *string `json:"immutability,omitempty" tf:"immutability,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A monitoring block as defined below. + Monitoring *MonitoringInitParameters `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Is it enabled to access the vault from public networks. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Sets the vault's SKU. Possible values include: Standard, RS0. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Is soft delete enable for this Vault? Defaults to true. + SoftDeleteEnabled *bool `json:"softDeleteEnabled,omitempty" tf:"soft_delete_enabled,omitempty"` + + // The storage type of the Recovery Services Vault. Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. Defaults to GeoRedundant. + StorageModeType *string `json:"storageModeType,omitempty" tf:"storage_mode_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VaultObservation struct { + + // Whether to enable the Classic experience for VMware replication. If set to false VMware machines will be protected using the new stateless ASR replication appliance. Changing this forces a new resource to be created. + ClassicVMwareReplicationEnabled *bool `json:"classicVmwareReplicationEnabled,omitempty" tf:"classic_vmware_replication_enabled,omitempty"` + + // Is cross region restore enabled for this Vault? Only can be true, when storage_mode_type is GeoRedundant. Defaults to false. + CrossRegionRestoreEnabled *bool `json:"crossRegionRestoreEnabled,omitempty" tf:"cross_region_restore_enabled,omitempty"` + + // An encryption block as defined below. Required with identity. + Encryption *EncryptionObservation `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // The ID of the Recovery Services Vault. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Immutability Settings of vault, possible values include: Locked, Unlocked and Disabled. + Immutability *string `json:"immutability,omitempty" tf:"immutability,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A monitoring block as defined below. + Monitoring *MonitoringObservation `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Is it enabled to access the vault from public networks. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the Recovery Services Vault. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Sets the vault's SKU. Possible values include: Standard, RS0. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Is soft delete enable for this Vault? Defaults to true. + SoftDeleteEnabled *bool `json:"softDeleteEnabled,omitempty" tf:"soft_delete_enabled,omitempty"` + + // The storage type of the Recovery Services Vault. Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. Defaults to GeoRedundant. + StorageModeType *string `json:"storageModeType,omitempty" tf:"storage_mode_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type VaultParameters struct { + + // Whether to enable the Classic experience for VMware replication. If set to false VMware machines will be protected using the new stateless ASR replication appliance. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ClassicVMwareReplicationEnabled *bool `json:"classicVmwareReplicationEnabled,omitempty" tf:"classic_vmware_replication_enabled,omitempty"` + + // Is cross region restore enabled for this Vault? Only can be true, when storage_mode_type is GeoRedundant. Defaults to false. + // +kubebuilder:validation:Optional + CrossRegionRestoreEnabled *bool `json:"crossRegionRestoreEnabled,omitempty" tf:"cross_region_restore_enabled,omitempty"` + + // An encryption block as defined below. Required with identity. + // +kubebuilder:validation:Optional + Encryption *EncryptionParameters `json:"encryption,omitempty" tf:"encryption,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Immutability Settings of vault, possible values include: Locked, Unlocked and Disabled. + // +kubebuilder:validation:Optional + Immutability *string `json:"immutability,omitempty" tf:"immutability,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A monitoring block as defined below. + // +kubebuilder:validation:Optional + Monitoring *MonitoringParameters `json:"monitoring,omitempty" tf:"monitoring,omitempty"` + + // Is it enabled to access the vault from public networks. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the Recovery Services Vault. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Sets the vault's SKU. Possible values include: Standard, RS0. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Is soft delete enable for this Vault? Defaults to true. + // +kubebuilder:validation:Optional + SoftDeleteEnabled *bool `json:"softDeleteEnabled,omitempty" tf:"soft_delete_enabled,omitempty"` + + // The storage type of the Recovery Services Vault. Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. Defaults to GeoRedundant. + // +kubebuilder:validation:Optional + StorageModeType *string `json:"storageModeType,omitempty" tf:"storage_mode_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// VaultSpec defines the desired state of Vault +type VaultSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider VaultParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider VaultInitParameters `json:"initProvider,omitempty"` +} + +// VaultStatus defines the observed state of Vault. +type VaultStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider VaultObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Vault is the Schema for the Vaults API. Manages a Recovery Services Vault. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Vault struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec VaultSpec `json:"spec"` + Status VaultStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VaultList contains a list of Vaults +type VaultList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Vault `json:"items"` +} + +// Repository type metadata. +var ( + Vault_Kind = "Vault" + Vault_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Vault_Kind}.String() + Vault_KindAPIVersion = Vault_Kind + "." + CRDGroupVersion.String() + Vault_GroupVersionKind = CRDGroupVersion.WithKind(Vault_Kind) +) + +func init() { + SchemeBuilder.Register(&Vault{}, &VaultList{}) +} diff --git a/apis/relay/v1beta1/zz_generated.conversion_hubs.go b/apis/relay/v1beta1/zz_generated.conversion_hubs.go index 4918a106c..f0ce12174 100755 --- a/apis/relay/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/relay/v1beta1/zz_generated.conversion_hubs.go @@ -7,13 +7,13 @@ package v1beta1 // Hub marks this type as a conversion hub. -func (tr *HybridConnection) Hub() {} +func (tr *EventRelayNamespace) Hub() {} // Hub marks this type as a conversion hub. -func (tr *HybridConnectionAuthorizationRule) Hub() {} +func (tr *HybridConnection) Hub() {} // Hub marks this type as a conversion hub. -func (tr *EventRelayNamespace) Hub() {} +func (tr *HybridConnectionAuthorizationRule) Hub() {} // Hub marks this type as a conversion hub. func (tr *NamespaceAuthorizationRule) Hub() {} diff --git a/apis/resources/v1beta1/zz_generated.conversion_hubs.go b/apis/resources/v1beta1/zz_generated.conversion_hubs.go index c418bb3e9..8518fef14 100755 --- a/apis/resources/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/resources/v1beta1/zz_generated.conversion_hubs.go @@ -6,12 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *ResourceDeploymentScriptAzureCli) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *ResourceDeploymentScriptAzurePowerShell) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ResourceGroupTemplateDeployment) Hub() {} diff --git a/apis/resources/v1beta1/zz_generated.conversion_spokes.go b/apis/resources/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..4b598e24c --- /dev/null +++ b/apis/resources/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ResourceDeploymentScriptAzureCli to the hub type. +func (tr *ResourceDeploymentScriptAzureCli) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ResourceDeploymentScriptAzureCli type. +func (tr *ResourceDeploymentScriptAzureCli) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ResourceDeploymentScriptAzurePowerShell to the hub type. +func (tr *ResourceDeploymentScriptAzurePowerShell) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ResourceDeploymentScriptAzurePowerShell type. +func (tr *ResourceDeploymentScriptAzurePowerShell) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/resources/v1beta2/zz_generated.conversion_hubs.go b/apis/resources/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..bd0b0432c --- /dev/null +++ b/apis/resources/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ResourceDeploymentScriptAzureCli) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ResourceDeploymentScriptAzurePowerShell) Hub() {} diff --git a/apis/resources/v1beta2/zz_generated.deepcopy.go b/apis/resources/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..c01dcf6a9 --- /dev/null +++ b/apis/resources/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1612 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerInitParameters) DeepCopyInto(out *ContainerInitParameters) { + *out = *in + if in.ContainerGroupName != nil { + in, out := &in.ContainerGroupName, &out.ContainerGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInitParameters. +func (in *ContainerInitParameters) DeepCopy() *ContainerInitParameters { + if in == nil { + return nil + } + out := new(ContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerObservation) DeepCopyInto(out *ContainerObservation) { + *out = *in + if in.ContainerGroupName != nil { + in, out := &in.ContainerGroupName, &out.ContainerGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerObservation. +func (in *ContainerObservation) DeepCopy() *ContainerObservation { + if in == nil { + return nil + } + out := new(ContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerParameters) DeepCopyInto(out *ContainerParameters) { + *out = *in + if in.ContainerGroupName != nil { + in, out := &in.ContainerGroupName, &out.ContainerGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParameters. +func (in *ContainerParameters) DeepCopy() *ContainerParameters { + if in == nil { + return nil + } + out := new(ContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentVariableInitParameters) DeepCopyInto(out *EnvironmentVariableInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentVariableInitParameters. +func (in *EnvironmentVariableInitParameters) DeepCopy() *EnvironmentVariableInitParameters { + if in == nil { + return nil + } + out := new(EnvironmentVariableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentVariableObservation) DeepCopyInto(out *EnvironmentVariableObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentVariableObservation. +func (in *EnvironmentVariableObservation) DeepCopy() *EnvironmentVariableObservation { + if in == nil { + return nil + } + out := new(EnvironmentVariableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentVariableParameters) DeepCopyInto(out *EnvironmentVariableParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecureValueSecretRef != nil { + in, out := &in.SecureValueSecretRef, &out.SecureValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentVariableParameters. +func (in *EnvironmentVariableParameters) DeepCopy() *EnvironmentVariableParameters { + if in == nil { + return nil + } + out := new(EnvironmentVariableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IdentityIdsRefs != nil { + in, out := &in.IdentityIdsRefs, &out.IdentityIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdentityIdsSelector != nil { + in, out := &in.IdentityIdsSelector, &out.IdentityIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IdentityIdsRefs != nil { + in, out := &in.IdentityIdsRefs, &out.IdentityIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdentityIdsSelector != nil { + in, out := &in.IdentityIdsSelector, &out.IdentityIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzureCli) DeepCopyInto(out *ResourceDeploymentScriptAzureCli) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzureCli. +func (in *ResourceDeploymentScriptAzureCli) DeepCopy() *ResourceDeploymentScriptAzureCli { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzureCli) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceDeploymentScriptAzureCli) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzureCliInitParameters) DeepCopyInto(out *ResourceDeploymentScriptAzureCliInitParameters) { + *out = *in + if in.CleanupPreference != nil { + in, out := &in.CleanupPreference, &out.CleanupPreference + *out = new(string) + **out = **in + } + if in.CommandLine != nil { + in, out := &in.CommandLine, &out.CommandLine + *out = new(string) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = make([]EnvironmentVariableInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrimaryScriptURI != nil { + in, out := &in.PrimaryScriptURI, &out.PrimaryScriptURI + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionInterval != nil { + in, out := &in.RetentionInterval, &out.RetentionInterval + *out = new(string) + **out = **in + } + if in.ScriptContent != nil { + in, out := &in.ScriptContent, &out.ScriptContent + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(StorageAccountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SupportingScriptUris != nil { + in, out := &in.SupportingScriptUris, &out.SupportingScriptUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzureCliInitParameters. +func (in *ResourceDeploymentScriptAzureCliInitParameters) DeepCopy() *ResourceDeploymentScriptAzureCliInitParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzureCliInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzureCliList) DeepCopyInto(out *ResourceDeploymentScriptAzureCliList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceDeploymentScriptAzureCli, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzureCliList. +func (in *ResourceDeploymentScriptAzureCliList) DeepCopy() *ResourceDeploymentScriptAzureCliList { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzureCliList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceDeploymentScriptAzureCliList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzureCliObservation) DeepCopyInto(out *ResourceDeploymentScriptAzureCliObservation) { + *out = *in + if in.CleanupPreference != nil { + in, out := &in.CleanupPreference, &out.CleanupPreference + *out = new(string) + **out = **in + } + if in.CommandLine != nil { + in, out := &in.CommandLine, &out.CommandLine + *out = new(string) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerObservation) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = make([]EnvironmentVariableObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = new(string) + **out = **in + } + if in.PrimaryScriptURI != nil { + in, out := &in.PrimaryScriptURI, &out.PrimaryScriptURI + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionInterval != nil { + in, out := &in.RetentionInterval, &out.RetentionInterval + *out = new(string) + **out = **in + } + if in.ScriptContent != nil { + in, out := &in.ScriptContent, &out.ScriptContent + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(StorageAccountObservation) + (*in).DeepCopyInto(*out) + } + if in.SupportingScriptUris != nil { + in, out := &in.SupportingScriptUris, &out.SupportingScriptUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzureCliObservation. +func (in *ResourceDeploymentScriptAzureCliObservation) DeepCopy() *ResourceDeploymentScriptAzureCliObservation { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzureCliObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzureCliParameters) DeepCopyInto(out *ResourceDeploymentScriptAzureCliParameters) { + *out = *in + if in.CleanupPreference != nil { + in, out := &in.CleanupPreference, &out.CleanupPreference + *out = new(string) + **out = **in + } + if in.CommandLine != nil { + in, out := &in.CommandLine, &out.CommandLine + *out = new(string) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerParameters) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = make([]EnvironmentVariableParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrimaryScriptURI != nil { + in, out := &in.PrimaryScriptURI, &out.PrimaryScriptURI + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionInterval != nil { + in, out := &in.RetentionInterval, &out.RetentionInterval + *out = new(string) + **out = **in + } + if in.ScriptContent != nil { + in, out := &in.ScriptContent, &out.ScriptContent + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(StorageAccountParameters) + (*in).DeepCopyInto(*out) + } + if in.SupportingScriptUris != nil { + in, out := &in.SupportingScriptUris, &out.SupportingScriptUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzureCliParameters. +func (in *ResourceDeploymentScriptAzureCliParameters) DeepCopy() *ResourceDeploymentScriptAzureCliParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzureCliParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzureCliSpec) DeepCopyInto(out *ResourceDeploymentScriptAzureCliSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzureCliSpec. +func (in *ResourceDeploymentScriptAzureCliSpec) DeepCopy() *ResourceDeploymentScriptAzureCliSpec { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzureCliSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzureCliStatus) DeepCopyInto(out *ResourceDeploymentScriptAzureCliStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzureCliStatus. +func (in *ResourceDeploymentScriptAzureCliStatus) DeepCopy() *ResourceDeploymentScriptAzureCliStatus { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzureCliStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShell) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShell) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShell. +func (in *ResourceDeploymentScriptAzurePowerShell) DeepCopy() *ResourceDeploymentScriptAzurePowerShell { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShell) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceDeploymentScriptAzurePowerShell) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellContainerInitParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellContainerInitParameters) { + *out = *in + if in.ContainerGroupName != nil { + in, out := &in.ContainerGroupName, &out.ContainerGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellContainerInitParameters. +func (in *ResourceDeploymentScriptAzurePowerShellContainerInitParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellContainerInitParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellContainerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellContainerObservation) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellContainerObservation) { + *out = *in + if in.ContainerGroupName != nil { + in, out := &in.ContainerGroupName, &out.ContainerGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellContainerObservation. +func (in *ResourceDeploymentScriptAzurePowerShellContainerObservation) DeepCopy() *ResourceDeploymentScriptAzurePowerShellContainerObservation { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellContainerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellContainerParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellContainerParameters) { + *out = *in + if in.ContainerGroupName != nil { + in, out := &in.ContainerGroupName, &out.ContainerGroupName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellContainerParameters. +func (in *ResourceDeploymentScriptAzurePowerShellContainerParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellContainerParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellContainerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters. +func (in *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation. +func (in *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation) DeepCopy() *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecureValueSecretRef != nil { + in, out := &in.SecureValueSecretRef, &out.SecureValueSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters. +func (in *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellIdentityInitParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IdentityIdsRefs != nil { + in, out := &in.IdentityIdsRefs, &out.IdentityIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdentityIdsSelector != nil { + in, out := &in.IdentityIdsSelector, &out.IdentityIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellIdentityInitParameters. +func (in *ResourceDeploymentScriptAzurePowerShellIdentityInitParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellIdentityInitParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellIdentityObservation) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellIdentityObservation. +func (in *ResourceDeploymentScriptAzurePowerShellIdentityObservation) DeepCopy() *ResourceDeploymentScriptAzurePowerShellIdentityObservation { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellIdentityParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IdentityIdsRefs != nil { + in, out := &in.IdentityIdsRefs, &out.IdentityIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IdentityIdsSelector != nil { + in, out := &in.IdentityIdsSelector, &out.IdentityIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellIdentityParameters. +func (in *ResourceDeploymentScriptAzurePowerShellIdentityParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellIdentityParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellInitParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellInitParameters) { + *out = *in + if in.CleanupPreference != nil { + in, out := &in.CleanupPreference, &out.CleanupPreference + *out = new(string) + **out = **in + } + if in.CommandLine != nil { + in, out := &in.CommandLine, &out.CommandLine + *out = new(string) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ResourceDeploymentScriptAzurePowerShellContainerInitParameters) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = make([]ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ResourceDeploymentScriptAzurePowerShellIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrimaryScriptURI != nil { + in, out := &in.PrimaryScriptURI, &out.PrimaryScriptURI + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionInterval != nil { + in, out := &in.RetentionInterval, &out.RetentionInterval + *out = new(string) + **out = **in + } + if in.ScriptContent != nil { + in, out := &in.ScriptContent, &out.ScriptContent + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SupportingScriptUris != nil { + in, out := &in.SupportingScriptUris, &out.SupportingScriptUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellInitParameters. +func (in *ResourceDeploymentScriptAzurePowerShellInitParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellInitParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellList) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceDeploymentScriptAzurePowerShell, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellList. +func (in *ResourceDeploymentScriptAzurePowerShellList) DeepCopy() *ResourceDeploymentScriptAzurePowerShellList { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceDeploymentScriptAzurePowerShellList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellObservation) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellObservation) { + *out = *in + if in.CleanupPreference != nil { + in, out := &in.CleanupPreference, &out.CleanupPreference + *out = new(string) + **out = **in + } + if in.CommandLine != nil { + in, out := &in.CommandLine, &out.CommandLine + *out = new(string) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ResourceDeploymentScriptAzurePowerShellContainerObservation) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = make([]ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ResourceDeploymentScriptAzurePowerShellIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Outputs != nil { + in, out := &in.Outputs, &out.Outputs + *out = new(string) + **out = **in + } + if in.PrimaryScriptURI != nil { + in, out := &in.PrimaryScriptURI, &out.PrimaryScriptURI + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RetentionInterval != nil { + in, out := &in.RetentionInterval, &out.RetentionInterval + *out = new(string) + **out = **in + } + if in.ScriptContent != nil { + in, out := &in.ScriptContent, &out.ScriptContent + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(ResourceDeploymentScriptAzurePowerShellStorageAccountObservation) + (*in).DeepCopyInto(*out) + } + if in.SupportingScriptUris != nil { + in, out := &in.SupportingScriptUris, &out.SupportingScriptUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellObservation. +func (in *ResourceDeploymentScriptAzurePowerShellObservation) DeepCopy() *ResourceDeploymentScriptAzurePowerShellObservation { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellParameters) { + *out = *in + if in.CleanupPreference != nil { + in, out := &in.CleanupPreference, &out.CleanupPreference + *out = new(string) + **out = **in + } + if in.CommandLine != nil { + in, out := &in.CommandLine, &out.CommandLine + *out = new(string) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ResourceDeploymentScriptAzurePowerShellContainerParameters) + (*in).DeepCopyInto(*out) + } + if in.EnvironmentVariable != nil { + in, out := &in.EnvironmentVariable, &out.EnvironmentVariable + *out = make([]ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForceUpdateTag != nil { + in, out := &in.ForceUpdateTag, &out.ForceUpdateTag + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(ResourceDeploymentScriptAzurePowerShellIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrimaryScriptURI != nil { + in, out := &in.PrimaryScriptURI, &out.PrimaryScriptURI + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RetentionInterval != nil { + in, out := &in.RetentionInterval, &out.RetentionInterval + *out = new(string) + **out = **in + } + if in.ScriptContent != nil { + in, out := &in.ScriptContent, &out.ScriptContent + *out = new(string) + **out = **in + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = new(ResourceDeploymentScriptAzurePowerShellStorageAccountParameters) + (*in).DeepCopyInto(*out) + } + if in.SupportingScriptUris != nil { + in, out := &in.SupportingScriptUris, &out.SupportingScriptUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellParameters. +func (in *ResourceDeploymentScriptAzurePowerShellParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellSpec) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellSpec. +func (in *ResourceDeploymentScriptAzurePowerShellSpec) DeepCopy() *ResourceDeploymentScriptAzurePowerShellSpec { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellStatus) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellStatus. +func (in *ResourceDeploymentScriptAzurePowerShellStatus) DeepCopy() *ResourceDeploymentScriptAzurePowerShellStatus { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters. +func (in *ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellStorageAccountObservation) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellStorageAccountObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellStorageAccountObservation. +func (in *ResourceDeploymentScriptAzurePowerShellStorageAccountObservation) DeepCopy() *ResourceDeploymentScriptAzurePowerShellStorageAccountObservation { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDeploymentScriptAzurePowerShellStorageAccountParameters) DeepCopyInto(out *ResourceDeploymentScriptAzurePowerShellStorageAccountParameters) { + *out = *in + out.KeySecretRef = in.KeySecretRef + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDeploymentScriptAzurePowerShellStorageAccountParameters. +func (in *ResourceDeploymentScriptAzurePowerShellStorageAccountParameters) DeepCopy() *ResourceDeploymentScriptAzurePowerShellStorageAccountParameters { + if in == nil { + return nil + } + out := new(ResourceDeploymentScriptAzurePowerShellStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountInitParameters) DeepCopyInto(out *StorageAccountInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountInitParameters. +func (in *StorageAccountInitParameters) DeepCopy() *StorageAccountInitParameters { + if in == nil { + return nil + } + out := new(StorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountObservation) DeepCopyInto(out *StorageAccountObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountObservation. +func (in *StorageAccountObservation) DeepCopy() *StorageAccountObservation { + if in == nil { + return nil + } + out := new(StorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountParameters) DeepCopyInto(out *StorageAccountParameters) { + *out = *in + out.KeySecretRef = in.KeySecretRef + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountParameters. +func (in *StorageAccountParameters) DeepCopy() *StorageAccountParameters { + if in == nil { + return nil + } + out := new(StorageAccountParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/resources/v1beta2/zz_generated.managed.go b/apis/resources/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..122d8369b --- /dev/null +++ b/apis/resources/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ResourceDeploymentScriptAzureCli. +func (mg *ResourceDeploymentScriptAzureCli) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/resources/v1beta2/zz_generated.managedlist.go b/apis/resources/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..fda3da382 --- /dev/null +++ b/apis/resources/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ResourceDeploymentScriptAzureCliList. +func (l *ResourceDeploymentScriptAzureCliList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ResourceDeploymentScriptAzurePowerShellList. +func (l *ResourceDeploymentScriptAzurePowerShellList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/resources/v1beta2/zz_generated.resolvers.go b/apis/resources/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..f0481b109 --- /dev/null +++ b/apis/resources/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,204 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + rconfig "github.com/upbound/provider-azure/apis/rconfig" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ResourceDeploymentScriptAzureCli) ResolveReferences( // ResolveReferences of this ResourceDeploymentScriptAzureCli. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.Identity != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Identity.IdentityIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.Identity.IdentityIdsRefs, + Selector: mg.Spec.ForProvider.Identity.IdentityIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Identity.IdentityIds") + } + mg.Spec.ForProvider.Identity.IdentityIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Identity.IdentityIdsRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Identity != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Identity.IdentityIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.Identity.IdentityIdsRefs, + Selector: mg.Spec.InitProvider.Identity.IdentityIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Identity.IdentityIds") + } + mg.Spec.InitProvider.Identity.IdentityIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Identity.IdentityIdsRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ResourceDeploymentScriptAzurePowerShell. +func (mg *ResourceDeploymentScriptAzurePowerShell) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + + if mg.Spec.ForProvider.Identity != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Identity.IdentityIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.Identity.IdentityIdsRefs, + Selector: mg.Spec.ForProvider.Identity.IdentityIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Identity.IdentityIds") + } + mg.Spec.ForProvider.Identity.IdentityIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.Identity.IdentityIdsRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.Identity != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Identity.IdentityIds), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.Identity.IdentityIdsRefs, + Selector: mg.Spec.InitProvider.Identity.IdentityIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Identity.IdentityIds") + } + mg.Spec.InitProvider.Identity.IdentityIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.Identity.IdentityIdsRefs = mrsp.ResolvedReferences + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/resources/v1beta2/zz_groupversion_info.go b/apis/resources/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..d09bffc38 --- /dev/null +++ b/apis/resources/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=resources.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "resources.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/resources/v1beta2/zz_resourcedeploymentscriptazurecli_terraformed.go b/apis/resources/v1beta2/zz_resourcedeploymentscriptazurecli_terraformed.go new file mode 100755 index 000000000..b69ce5a79 --- /dev/null +++ b/apis/resources/v1beta2/zz_resourcedeploymentscriptazurecli_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ResourceDeploymentScriptAzureCli +func (mg *ResourceDeploymentScriptAzureCli) GetTerraformResourceType() string { + return "azurerm_resource_deployment_script_azure_cli" +} + +// GetConnectionDetailsMapping for this ResourceDeploymentScriptAzureCli +func (tr *ResourceDeploymentScriptAzureCli) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"environment_variable[*].secure_value": "spec.forProvider.environmentVariable[*].secureValueSecretRef", "storage_account[*].key": "spec.forProvider.storageAccount[*].keySecretRef"} +} + +// GetObservation of this ResourceDeploymentScriptAzureCli +func (tr *ResourceDeploymentScriptAzureCli) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ResourceDeploymentScriptAzureCli +func (tr *ResourceDeploymentScriptAzureCli) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ResourceDeploymentScriptAzureCli +func (tr *ResourceDeploymentScriptAzureCli) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ResourceDeploymentScriptAzureCli +func (tr *ResourceDeploymentScriptAzureCli) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ResourceDeploymentScriptAzureCli +func (tr *ResourceDeploymentScriptAzureCli) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ResourceDeploymentScriptAzureCli +func (tr *ResourceDeploymentScriptAzureCli) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ResourceDeploymentScriptAzureCli +func (tr *ResourceDeploymentScriptAzureCli) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ResourceDeploymentScriptAzureCli using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ResourceDeploymentScriptAzureCli) LateInitialize(attrs []byte) (bool, error) { + params := &ResourceDeploymentScriptAzureCliParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ResourceDeploymentScriptAzureCli) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/resources/v1beta2/zz_resourcedeploymentscriptazurecli_types.go b/apis/resources/v1beta2/zz_resourcedeploymentscriptazurecli_types.go new file mode 100755 index 000000000..72e6d0abf --- /dev/null +++ b/apis/resources/v1beta2/zz_resourcedeploymentscriptazurecli_types.go @@ -0,0 +1,411 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ContainerInitParameters struct { + + // Container group name, if not specified then the name will get auto-generated. For more information, please refer to the Container Configuration documentation. + ContainerGroupName *string `json:"containerGroupName,omitempty" tf:"container_group_name,omitempty"` +} + +type ContainerObservation struct { + + // Container group name, if not specified then the name will get auto-generated. For more information, please refer to the Container Configuration documentation. + ContainerGroupName *string `json:"containerGroupName,omitempty" tf:"container_group_name,omitempty"` +} + +type ContainerParameters struct { + + // Container group name, if not specified then the name will get auto-generated. For more information, please refer to the Container Configuration documentation. + // +kubebuilder:validation:Optional + ContainerGroupName *string `json:"containerGroupName,omitempty" tf:"container_group_name,omitempty"` +} + +type EnvironmentVariableInitParameters struct { + + // Specifies the name of the environment variable. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the value of the environment variable. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EnvironmentVariableObservation struct { + + // Specifies the name of the environment variable. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the value of the environment variable. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type EnvironmentVariableParameters struct { + + // Specifies the name of the environment variable. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the value of the secure environment variable. + // +kubebuilder:validation:Optional + SecureValueSecretRef *v1.SecretKeySelector `json:"secureValueSecretRef,omitempty" tf:"-"` + + // Specifies the value of the environment variable. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies the list of user-assigned managed identity IDs associated with the resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // References to UserAssignedIdentity in managedidentity to populate identityIds. + // +kubebuilder:validation:Optional + IdentityIdsRefs []v1.Reference `json:"identityIdsRefs,omitempty" tf:"-"` + + // Selector for a list of UserAssignedIdentity in managedidentity to populate identityIds. + // +kubebuilder:validation:Optional + IdentityIdsSelector *v1.Selector `json:"identityIdsSelector,omitempty" tf:"-"` + + // Type of the managed identity. The only possible value is UserAssigned. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies the list of user-assigned managed identity IDs associated with the resource. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Type of the managed identity. The only possible value is UserAssigned. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies the list of user-assigned managed identity IDs associated with the resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // References to UserAssignedIdentity in managedidentity to populate identityIds. + // +kubebuilder:validation:Optional + IdentityIdsRefs []v1.Reference `json:"identityIdsRefs,omitempty" tf:"-"` + + // Selector for a list of UserAssignedIdentity in managedidentity to populate identityIds. + // +kubebuilder:validation:Optional + IdentityIdsSelector *v1.Selector `json:"identityIdsSelector,omitempty" tf:"-"` + + // Type of the managed identity. The only possible value is UserAssigned. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ResourceDeploymentScriptAzureCliInitParameters struct { + + // Specifies the cleanup preference when the script execution gets in a terminal state. Possible values are Always, OnExpiration, OnSuccess. Defaults to Always. Changing this forces a new Resource Deployment Script to be created. + CleanupPreference *string `json:"cleanupPreference,omitempty" tf:"cleanup_preference,omitempty"` + + // Command line arguments to pass to the script. Changing this forces a new Resource Deployment Script to be created. + CommandLine *string `json:"commandLine,omitempty" tf:"command_line,omitempty"` + + // A container block as defined below. Changing this forces a new Resource Deployment Script to be created. + Container *ContainerInitParameters `json:"container,omitempty" tf:"container,omitempty"` + + // An environment_variable block as defined below. Changing this forces a new Resource Deployment Script to be created. + EnvironmentVariable []EnvironmentVariableInitParameters `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Gets or sets how the deployment script should be forced to execute even if the script resource has not changed. Can be current time stamp or a GUID. Changing this forces a new Resource Deployment Script to be created. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // An identity block as defined below. Changing this forces a new Resource Deployment Script to be created. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name which should be used for this Resource Deployment Script. The name length must be from 1 to 260 characters. The name can only contain alphanumeric, underscore, parentheses, hyphen and period, and it cannot end with a period. Changing this forces a new Resource Deployment Script to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Uri for the script. This is the entry point for the external script. Changing this forces a new Resource Deployment Script to be created. + PrimaryScriptURI *string `json:"primaryScriptUri,omitempty" tf:"primary_script_uri,omitempty"` + + // Specifies the name of the Resource Group where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Interval for which the service retains the script resource after it reaches a terminal state. Resource will be deleted when this duration expires. The time duration should be between 1 hour and 26 hours (inclusive) and should be specified in ISO 8601 format. Changing this forces a new Resource Deployment Script to be created. + RetentionInterval *string `json:"retentionInterval,omitempty" tf:"retention_interval,omitempty"` + + // Script body. Changing this forces a new Resource Deployment Script to be created. + ScriptContent *string `json:"scriptContent,omitempty" tf:"script_content,omitempty"` + + // A storage_account block as defined below. Changing this forces a new Resource Deployment Script to be created. + StorageAccount *StorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Supporting files for the external script. Changing this forces a new Resource Deployment Script to be created. + SupportingScriptUris []*string `json:"supportingScriptUris,omitempty" tf:"supporting_script_uris,omitempty"` + + // A mapping of tags which should be assigned to the Resource Deployment Script. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Maximum allowed script execution time specified in ISO 8601 format. Needs to be greater than 0 and smaller than 1 day. Defaults to P1D. Changing this forces a new Resource Deployment Script to be created. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Specifies the version of the Azure CLI that should be used in the format X.Y.Z (e.g. 2.30.0). A canonical list of versions is available from the Microsoft Container Registry API. Changing this forces a new Resource Deployment Script to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ResourceDeploymentScriptAzureCliObservation struct { + + // Specifies the cleanup preference when the script execution gets in a terminal state. Possible values are Always, OnExpiration, OnSuccess. Defaults to Always. Changing this forces a new Resource Deployment Script to be created. + CleanupPreference *string `json:"cleanupPreference,omitempty" tf:"cleanup_preference,omitempty"` + + // Command line arguments to pass to the script. Changing this forces a new Resource Deployment Script to be created. + CommandLine *string `json:"commandLine,omitempty" tf:"command_line,omitempty"` + + // A container block as defined below. Changing this forces a new Resource Deployment Script to be created. + Container *ContainerObservation `json:"container,omitempty" tf:"container,omitempty"` + + // An environment_variable block as defined below. Changing this forces a new Resource Deployment Script to be created. + EnvironmentVariable []EnvironmentVariableObservation `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Gets or sets how the deployment script should be forced to execute even if the script resource has not changed. Can be current time stamp or a GUID. Changing this forces a new Resource Deployment Script to be created. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // The ID of the Resource Deployment Script. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Changing this forces a new Resource Deployment Script to be created. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name which should be used for this Resource Deployment Script. The name length must be from 1 to 260 characters. The name can only contain alphanumeric, underscore, parentheses, hyphen and period, and it cannot end with a period. Changing this forces a new Resource Deployment Script to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // List of script outputs. + Outputs *string `json:"outputs,omitempty" tf:"outputs,omitempty"` + + // Uri for the script. This is the entry point for the external script. Changing this forces a new Resource Deployment Script to be created. + PrimaryScriptURI *string `json:"primaryScriptUri,omitempty" tf:"primary_script_uri,omitempty"` + + // Specifies the name of the Resource Group where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Interval for which the service retains the script resource after it reaches a terminal state. Resource will be deleted when this duration expires. The time duration should be between 1 hour and 26 hours (inclusive) and should be specified in ISO 8601 format. Changing this forces a new Resource Deployment Script to be created. + RetentionInterval *string `json:"retentionInterval,omitempty" tf:"retention_interval,omitempty"` + + // Script body. Changing this forces a new Resource Deployment Script to be created. + ScriptContent *string `json:"scriptContent,omitempty" tf:"script_content,omitempty"` + + // A storage_account block as defined below. Changing this forces a new Resource Deployment Script to be created. + StorageAccount *StorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Supporting files for the external script. Changing this forces a new Resource Deployment Script to be created. + SupportingScriptUris []*string `json:"supportingScriptUris,omitempty" tf:"supporting_script_uris,omitempty"` + + // A mapping of tags which should be assigned to the Resource Deployment Script. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Maximum allowed script execution time specified in ISO 8601 format. Needs to be greater than 0 and smaller than 1 day. Defaults to P1D. Changing this forces a new Resource Deployment Script to be created. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Specifies the version of the Azure CLI that should be used in the format X.Y.Z (e.g. 2.30.0). A canonical list of versions is available from the Microsoft Container Registry API. Changing this forces a new Resource Deployment Script to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ResourceDeploymentScriptAzureCliParameters struct { + + // Specifies the cleanup preference when the script execution gets in a terminal state. Possible values are Always, OnExpiration, OnSuccess. Defaults to Always. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + CleanupPreference *string `json:"cleanupPreference,omitempty" tf:"cleanup_preference,omitempty"` + + // Command line arguments to pass to the script. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + CommandLine *string `json:"commandLine,omitempty" tf:"command_line,omitempty"` + + // A container block as defined below. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Container *ContainerParameters `json:"container,omitempty" tf:"container,omitempty"` + + // An environment_variable block as defined below. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + EnvironmentVariable []EnvironmentVariableParameters `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Gets or sets how the deployment script should be forced to execute even if the script resource has not changed. Can be current time stamp or a GUID. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // An identity block as defined below. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name which should be used for this Resource Deployment Script. The name length must be from 1 to 260 characters. The name can only contain alphanumeric, underscore, parentheses, hyphen and period, and it cannot end with a period. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Uri for the script. This is the entry point for the external script. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + PrimaryScriptURI *string `json:"primaryScriptUri,omitempty" tf:"primary_script_uri,omitempty"` + + // Specifies the name of the Resource Group where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Interval for which the service retains the script resource after it reaches a terminal state. Resource will be deleted when this duration expires. The time duration should be between 1 hour and 26 hours (inclusive) and should be specified in ISO 8601 format. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + RetentionInterval *string `json:"retentionInterval,omitempty" tf:"retention_interval,omitempty"` + + // Script body. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + ScriptContent *string `json:"scriptContent,omitempty" tf:"script_content,omitempty"` + + // A storage_account block as defined below. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + StorageAccount *StorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Supporting files for the external script. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + SupportingScriptUris []*string `json:"supportingScriptUris,omitempty" tf:"supporting_script_uris,omitempty"` + + // A mapping of tags which should be assigned to the Resource Deployment Script. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Maximum allowed script execution time specified in ISO 8601 format. Needs to be greater than 0 and smaller than 1 day. Defaults to P1D. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Specifies the version of the Azure CLI that should be used in the format X.Y.Z (e.g. 2.30.0). A canonical list of versions is available from the Microsoft Container Registry API. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type StorageAccountInitParameters struct { + + // Specifies the storage account name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StorageAccountObservation struct { + + // Specifies the storage account name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StorageAccountParameters struct { + + // Specifies the storage account access key. + // +kubebuilder:validation:Required + KeySecretRef v1.SecretKeySelector `json:"keySecretRef" tf:"-"` + + // Specifies the storage account name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +// ResourceDeploymentScriptAzureCliSpec defines the desired state of ResourceDeploymentScriptAzureCli +type ResourceDeploymentScriptAzureCliSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResourceDeploymentScriptAzureCliParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResourceDeploymentScriptAzureCliInitParameters `json:"initProvider,omitempty"` +} + +// ResourceDeploymentScriptAzureCliStatus defines the observed state of ResourceDeploymentScriptAzureCli. +type ResourceDeploymentScriptAzureCliStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResourceDeploymentScriptAzureCliObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ResourceDeploymentScriptAzureCli is the Schema for the ResourceDeploymentScriptAzureClis API. Manages a Resource Deployment Script of Azure Cli. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure},path=resourcedeploymentscriptazureclicli +type ResourceDeploymentScriptAzureCli struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.retentionInterval) || (has(self.initProvider) && has(self.initProvider.retentionInterval))",message="spec.forProvider.retentionInterval is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec ResourceDeploymentScriptAzureCliSpec `json:"spec"` + Status ResourceDeploymentScriptAzureCliStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceDeploymentScriptAzureCliList contains a list of ResourceDeploymentScriptAzureClis +type ResourceDeploymentScriptAzureCliList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceDeploymentScriptAzureCli `json:"items"` +} + +// Repository type metadata. +var ( + ResourceDeploymentScriptAzureCli_Kind = "ResourceDeploymentScriptAzureCli" + ResourceDeploymentScriptAzureCli_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ResourceDeploymentScriptAzureCli_Kind}.String() + ResourceDeploymentScriptAzureCli_KindAPIVersion = ResourceDeploymentScriptAzureCli_Kind + "." + CRDGroupVersion.String() + ResourceDeploymentScriptAzureCli_GroupVersionKind = CRDGroupVersion.WithKind(ResourceDeploymentScriptAzureCli_Kind) +) + +func init() { + SchemeBuilder.Register(&ResourceDeploymentScriptAzureCli{}, &ResourceDeploymentScriptAzureCliList{}) +} diff --git a/apis/resources/v1beta2/zz_resourcedeploymentscriptazurepowershell_terraformed.go b/apis/resources/v1beta2/zz_resourcedeploymentscriptazurepowershell_terraformed.go new file mode 100755 index 000000000..aab1b641c --- /dev/null +++ b/apis/resources/v1beta2/zz_resourcedeploymentscriptazurepowershell_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ResourceDeploymentScriptAzurePowerShell +func (mg *ResourceDeploymentScriptAzurePowerShell) GetTerraformResourceType() string { + return "azurerm_resource_deployment_script_azure_power_shell" +} + +// GetConnectionDetailsMapping for this ResourceDeploymentScriptAzurePowerShell +func (tr *ResourceDeploymentScriptAzurePowerShell) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"environment_variable[*].secure_value": "spec.forProvider.environmentVariable[*].secureValueSecretRef", "storage_account[*].key": "spec.forProvider.storageAccount[*].keySecretRef"} +} + +// GetObservation of this ResourceDeploymentScriptAzurePowerShell +func (tr *ResourceDeploymentScriptAzurePowerShell) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ResourceDeploymentScriptAzurePowerShell +func (tr *ResourceDeploymentScriptAzurePowerShell) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ResourceDeploymentScriptAzurePowerShell +func (tr *ResourceDeploymentScriptAzurePowerShell) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ResourceDeploymentScriptAzurePowerShell +func (tr *ResourceDeploymentScriptAzurePowerShell) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ResourceDeploymentScriptAzurePowerShell +func (tr *ResourceDeploymentScriptAzurePowerShell) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ResourceDeploymentScriptAzurePowerShell +func (tr *ResourceDeploymentScriptAzurePowerShell) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ResourceDeploymentScriptAzurePowerShell +func (tr *ResourceDeploymentScriptAzurePowerShell) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ResourceDeploymentScriptAzurePowerShell using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ResourceDeploymentScriptAzurePowerShell) LateInitialize(attrs []byte) (bool, error) { + params := &ResourceDeploymentScriptAzurePowerShellParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ResourceDeploymentScriptAzurePowerShell) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/resources/v1beta2/zz_resourcedeploymentscriptazurepowershell_types.go b/apis/resources/v1beta2/zz_resourcedeploymentscriptazurepowershell_types.go new file mode 100755 index 000000000..f49981838 --- /dev/null +++ b/apis/resources/v1beta2/zz_resourcedeploymentscriptazurepowershell_types.go @@ -0,0 +1,411 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ResourceDeploymentScriptAzurePowerShellContainerInitParameters struct { + + // Container group name, if not specified then the name will get auto-generated. For more information, please refer to the Container Configuration documentation. + ContainerGroupName *string `json:"containerGroupName,omitempty" tf:"container_group_name,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellContainerObservation struct { + + // Container group name, if not specified then the name will get auto-generated. For more information, please refer to the Container Configuration documentation. + ContainerGroupName *string `json:"containerGroupName,omitempty" tf:"container_group_name,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellContainerParameters struct { + + // Container group name, if not specified then the name will get auto-generated. For more information, please refer to the Container Configuration documentation. + // +kubebuilder:validation:Optional + ContainerGroupName *string `json:"containerGroupName,omitempty" tf:"container_group_name,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters struct { + + // Specifies the name of the environment variable. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the value of the environment variable. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation struct { + + // Specifies the name of the environment variable. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the value of the environment variable. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters struct { + + // Specifies the name of the environment variable. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies the value of the secure environment variable. + // +kubebuilder:validation:Optional + SecureValueSecretRef *v1.SecretKeySelector `json:"secureValueSecretRef,omitempty" tf:"-"` + + // Specifies the value of the environment variable. + // +kubebuilder:validation:Optional + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellIdentityInitParameters struct { + + // Specifies the list of user-assigned managed identity IDs associated with the resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // References to UserAssignedIdentity in managedidentity to populate identityIds. + // +kubebuilder:validation:Optional + IdentityIdsRefs []v1.Reference `json:"identityIdsRefs,omitempty" tf:"-"` + + // Selector for a list of UserAssignedIdentity in managedidentity to populate identityIds. + // +kubebuilder:validation:Optional + IdentityIdsSelector *v1.Selector `json:"identityIdsSelector,omitempty" tf:"-"` + + // Type of the managed identity. The only possible value is UserAssigned. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellIdentityObservation struct { + + // Specifies the list of user-assigned managed identity IDs associated with the resource. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Type of the managed identity. The only possible value is UserAssigned. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellIdentityParameters struct { + + // Specifies the list of user-assigned managed identity IDs associated with the resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // References to UserAssignedIdentity in managedidentity to populate identityIds. + // +kubebuilder:validation:Optional + IdentityIdsRefs []v1.Reference `json:"identityIdsRefs,omitempty" tf:"-"` + + // Selector for a list of UserAssignedIdentity in managedidentity to populate identityIds. + // +kubebuilder:validation:Optional + IdentityIdsSelector *v1.Selector `json:"identityIdsSelector,omitempty" tf:"-"` + + // Type of the managed identity. The only possible value is UserAssigned. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellInitParameters struct { + + // Specifies the cleanup preference when the script execution gets in a terminal state. Possible values are Always, OnExpiration, OnSuccess. Defaults to Always. Changing this forces a new Resource Deployment Script to be created. + CleanupPreference *string `json:"cleanupPreference,omitempty" tf:"cleanup_preference,omitempty"` + + // Command line arguments to pass to the script. Changing this forces a new Resource Deployment Script to be created. + CommandLine *string `json:"commandLine,omitempty" tf:"command_line,omitempty"` + + // A container block as defined below. Changing this forces a new Resource Deployment Script to be created. + Container *ResourceDeploymentScriptAzurePowerShellContainerInitParameters `json:"container,omitempty" tf:"container,omitempty"` + + // An environment_variable block as defined below. Changing this forces a new Resource Deployment Script to be created. + EnvironmentVariable []ResourceDeploymentScriptAzurePowerShellEnvironmentVariableInitParameters `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Gets or sets how the deployment script should be forced to execute even if the script resource has not changed. Can be current time stamp or a GUID. Changing this forces a new Resource Deployment Script to be created. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // An identity block as defined below. Changing this forces a new Resource Deployment Script to be created. + Identity *ResourceDeploymentScriptAzurePowerShellIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name which should be used for this Resource Deployment Script. The name length must be from 1 to 260 characters. The name can only contain alphanumeric, underscore, parentheses, hyphen and period, and it cannot end with a period. Changing this forces a new Resource Deployment Script to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Uri for the script. This is the entry point for the external script. Changing this forces a new Resource Deployment Script to be created. + PrimaryScriptURI *string `json:"primaryScriptUri,omitempty" tf:"primary_script_uri,omitempty"` + + // Specifies the name of the Resource Group where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Interval for which the service retains the script resource after it reaches a terminal state. Resource will be deleted when this duration expires. The time duration should be between 1 hour and 26 hours (inclusive) and should be specified in ISO 8601 format. Changing this forces a new Resource Deployment Script to be created. + RetentionInterval *string `json:"retentionInterval,omitempty" tf:"retention_interval,omitempty"` + + // Script body. Changing this forces a new Resource Deployment Script to be created. + ScriptContent *string `json:"scriptContent,omitempty" tf:"script_content,omitempty"` + + // A storage_account block as defined below. Changing this forces a new Resource Deployment Script to be created. + StorageAccount *ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Supporting files for the external script. Changing this forces a new Resource Deployment Script to be created. + SupportingScriptUris []*string `json:"supportingScriptUris,omitempty" tf:"supporting_script_uris,omitempty"` + + // A mapping of tags which should be assigned to the Resource Deployment Script. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Maximum allowed script execution time specified in ISO 8601 format. Needs to be greater than 0 and smaller than 1 day. Defaults to P1D. Changing this forces a new Resource Deployment Script to be created. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Specifies the version of the Azure PowerShell that should be used in the format X.Y (e.g. 9.7). A canonical list of versions is available from the Microsoft Container Registry API. Changing this forces a new Resource Deployment Script to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellObservation struct { + + // Specifies the cleanup preference when the script execution gets in a terminal state. Possible values are Always, OnExpiration, OnSuccess. Defaults to Always. Changing this forces a new Resource Deployment Script to be created. + CleanupPreference *string `json:"cleanupPreference,omitempty" tf:"cleanup_preference,omitempty"` + + // Command line arguments to pass to the script. Changing this forces a new Resource Deployment Script to be created. + CommandLine *string `json:"commandLine,omitempty" tf:"command_line,omitempty"` + + // A container block as defined below. Changing this forces a new Resource Deployment Script to be created. + Container *ResourceDeploymentScriptAzurePowerShellContainerObservation `json:"container,omitempty" tf:"container,omitempty"` + + // An environment_variable block as defined below. Changing this forces a new Resource Deployment Script to be created. + EnvironmentVariable []ResourceDeploymentScriptAzurePowerShellEnvironmentVariableObservation `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Gets or sets how the deployment script should be forced to execute even if the script resource has not changed. Can be current time stamp or a GUID. Changing this forces a new Resource Deployment Script to be created. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // The ID of the Resource Deployment Script. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Changing this forces a new Resource Deployment Script to be created. + Identity *ResourceDeploymentScriptAzurePowerShellIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name which should be used for this Resource Deployment Script. The name length must be from 1 to 260 characters. The name can only contain alphanumeric, underscore, parentheses, hyphen and period, and it cannot end with a period. Changing this forces a new Resource Deployment Script to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // List of script outputs. + Outputs *string `json:"outputs,omitempty" tf:"outputs,omitempty"` + + // Uri for the script. This is the entry point for the external script. Changing this forces a new Resource Deployment Script to be created. + PrimaryScriptURI *string `json:"primaryScriptUri,omitempty" tf:"primary_script_uri,omitempty"` + + // Specifies the name of the Resource Group where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Interval for which the service retains the script resource after it reaches a terminal state. Resource will be deleted when this duration expires. The time duration should be between 1 hour and 26 hours (inclusive) and should be specified in ISO 8601 format. Changing this forces a new Resource Deployment Script to be created. + RetentionInterval *string `json:"retentionInterval,omitempty" tf:"retention_interval,omitempty"` + + // Script body. Changing this forces a new Resource Deployment Script to be created. + ScriptContent *string `json:"scriptContent,omitempty" tf:"script_content,omitempty"` + + // A storage_account block as defined below. Changing this forces a new Resource Deployment Script to be created. + StorageAccount *ResourceDeploymentScriptAzurePowerShellStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Supporting files for the external script. Changing this forces a new Resource Deployment Script to be created. + SupportingScriptUris []*string `json:"supportingScriptUris,omitempty" tf:"supporting_script_uris,omitempty"` + + // A mapping of tags which should be assigned to the Resource Deployment Script. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Maximum allowed script execution time specified in ISO 8601 format. Needs to be greater than 0 and smaller than 1 day. Defaults to P1D. Changing this forces a new Resource Deployment Script to be created. + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Specifies the version of the Azure PowerShell that should be used in the format X.Y (e.g. 9.7). A canonical list of versions is available from the Microsoft Container Registry API. Changing this forces a new Resource Deployment Script to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellParameters struct { + + // Specifies the cleanup preference when the script execution gets in a terminal state. Possible values are Always, OnExpiration, OnSuccess. Defaults to Always. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + CleanupPreference *string `json:"cleanupPreference,omitempty" tf:"cleanup_preference,omitempty"` + + // Command line arguments to pass to the script. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + CommandLine *string `json:"commandLine,omitempty" tf:"command_line,omitempty"` + + // A container block as defined below. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Container *ResourceDeploymentScriptAzurePowerShellContainerParameters `json:"container,omitempty" tf:"container,omitempty"` + + // An environment_variable block as defined below. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + EnvironmentVariable []ResourceDeploymentScriptAzurePowerShellEnvironmentVariableParameters `json:"environmentVariable,omitempty" tf:"environment_variable,omitempty"` + + // Gets or sets how the deployment script should be forced to execute even if the script resource has not changed. Can be current time stamp or a GUID. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + ForceUpdateTag *string `json:"forceUpdateTag,omitempty" tf:"force_update_tag,omitempty"` + + // An identity block as defined below. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Identity *ResourceDeploymentScriptAzurePowerShellIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the Azure Region where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name which should be used for this Resource Deployment Script. The name length must be from 1 to 260 characters. The name can only contain alphanumeric, underscore, parentheses, hyphen and period, and it cannot end with a period. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Uri for the script. This is the entry point for the external script. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + PrimaryScriptURI *string `json:"primaryScriptUri,omitempty" tf:"primary_script_uri,omitempty"` + + // Specifies the name of the Resource Group where the Resource Deployment Script should exist. Changing this forces a new Resource Deployment Script to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Interval for which the service retains the script resource after it reaches a terminal state. Resource will be deleted when this duration expires. The time duration should be between 1 hour and 26 hours (inclusive) and should be specified in ISO 8601 format. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + RetentionInterval *string `json:"retentionInterval,omitempty" tf:"retention_interval,omitempty"` + + // Script body. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + ScriptContent *string `json:"scriptContent,omitempty" tf:"script_content,omitempty"` + + // A storage_account block as defined below. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + StorageAccount *ResourceDeploymentScriptAzurePowerShellStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // Supporting files for the external script. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + SupportingScriptUris []*string `json:"supportingScriptUris,omitempty" tf:"supporting_script_uris,omitempty"` + + // A mapping of tags which should be assigned to the Resource Deployment Script. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Maximum allowed script execution time specified in ISO 8601 format. Needs to be greater than 0 and smaller than 1 day. Defaults to P1D. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Timeout *string `json:"timeout,omitempty" tf:"timeout,omitempty"` + + // Specifies the version of the Azure PowerShell that should be used in the format X.Y (e.g. 9.7). A canonical list of versions is available from the Microsoft Container Registry API. Changing this forces a new Resource Deployment Script to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellStorageAccountInitParameters struct { + + // Specifies the storage account name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellStorageAccountObservation struct { + + // Specifies the storage account name. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type ResourceDeploymentScriptAzurePowerShellStorageAccountParameters struct { + + // Specifies the storage account access key. + // +kubebuilder:validation:Required + KeySecretRef v1.SecretKeySelector `json:"keySecretRef" tf:"-"` + + // Specifies the storage account name. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +// ResourceDeploymentScriptAzurePowerShellSpec defines the desired state of ResourceDeploymentScriptAzurePowerShell +type ResourceDeploymentScriptAzurePowerShellSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ResourceDeploymentScriptAzurePowerShellParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ResourceDeploymentScriptAzurePowerShellInitParameters `json:"initProvider,omitempty"` +} + +// ResourceDeploymentScriptAzurePowerShellStatus defines the observed state of ResourceDeploymentScriptAzurePowerShell. +type ResourceDeploymentScriptAzurePowerShellStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ResourceDeploymentScriptAzurePowerShellObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ResourceDeploymentScriptAzurePowerShell is the Schema for the ResourceDeploymentScriptAzurePowerShells API. Manages a Resource Deployment Script of Azure PowerShell. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ResourceDeploymentScriptAzurePowerShell struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.retentionInterval) || (has(self.initProvider) && has(self.initProvider.retentionInterval))",message="spec.forProvider.retentionInterval is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec ResourceDeploymentScriptAzurePowerShellSpec `json:"spec"` + Status ResourceDeploymentScriptAzurePowerShellStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ResourceDeploymentScriptAzurePowerShellList contains a list of ResourceDeploymentScriptAzurePowerShells +type ResourceDeploymentScriptAzurePowerShellList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceDeploymentScriptAzurePowerShell `json:"items"` +} + +// Repository type metadata. +var ( + ResourceDeploymentScriptAzurePowerShell_Kind = "ResourceDeploymentScriptAzurePowerShell" + ResourceDeploymentScriptAzurePowerShell_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ResourceDeploymentScriptAzurePowerShell_Kind}.String() + ResourceDeploymentScriptAzurePowerShell_KindAPIVersion = ResourceDeploymentScriptAzurePowerShell_Kind + "." + CRDGroupVersion.String() + ResourceDeploymentScriptAzurePowerShell_GroupVersionKind = CRDGroupVersion.WithKind(ResourceDeploymentScriptAzurePowerShell_Kind) +) + +func init() { + SchemeBuilder.Register(&ResourceDeploymentScriptAzurePowerShell{}, &ResourceDeploymentScriptAzurePowerShellList{}) +} diff --git a/apis/search/v1beta1/zz_generated.conversion_hubs.go b/apis/search/v1beta1/zz_generated.conversion_hubs.go index 3d65ad11c..fc8c6b974 100755 --- a/apis/search/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/search/v1beta1/zz_generated.conversion_hubs.go @@ -6,8 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Service) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SharedPrivateLinkService) Hub() {} diff --git a/apis/search/v1beta1/zz_generated.conversion_spokes.go b/apis/search/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..80f757476 --- /dev/null +++ b/apis/search/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Service to the hub type. +func (tr *Service) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Service type. +func (tr *Service) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/search/v1beta1/zz_generated.resolvers.go b/apis/search/v1beta1/zz_generated.resolvers.go index ecc93fbfe..e2e033999 100644 --- a/apis/search/v1beta1/zz_generated.resolvers.go +++ b/apis/search/v1beta1/zz_generated.resolvers.go @@ -58,7 +58,7 @@ func (mg *SharedPrivateLinkService) ResolveReferences(ctx context.Context, c cli var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("search.azure.upbound.io", "v1beta1", "Service", "ServiceList") + m, l, err = apisresolver.GetManagedResource("search.azure.upbound.io", "v1beta2", "Service", "ServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -77,7 +77,7 @@ func (mg *SharedPrivateLinkService) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.SearchServiceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SearchServiceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -96,7 +96,7 @@ func (mg *SharedPrivateLinkService) ResolveReferences(ctx context.Context, c cli mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/search/v1beta1/zz_sharedprivatelinkservice_types.go b/apis/search/v1beta1/zz_sharedprivatelinkservice_types.go index 5046c4642..4c9d2728e 100755 --- a/apis/search/v1beta1/zz_sharedprivatelinkservice_types.go +++ b/apis/search/v1beta1/zz_sharedprivatelinkservice_types.go @@ -22,7 +22,7 @@ type SharedPrivateLinkServiceInitParameters struct { SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` // Specify the ID of the Shared Private Link Enabled Remote Resource which this Azure Search Private Endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` @@ -63,7 +63,7 @@ type SharedPrivateLinkServiceParameters struct { RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` // Specify the id of the Azure Search Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/search/v1beta1.Service + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/search/v1beta2.Service // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SearchServiceID *string `json:"searchServiceId,omitempty" tf:"search_service_id,omitempty"` @@ -81,7 +81,7 @@ type SharedPrivateLinkServiceParameters struct { SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` // Specify the ID of the Shared Private Link Enabled Remote Resource which this Azure Search Private Endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` diff --git a/apis/search/v1beta2/zz_generated.conversion_hubs.go b/apis/search/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..aa1fa71e2 --- /dev/null +++ b/apis/search/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Service) Hub() {} diff --git a/apis/search/v1beta2/zz_generated.deepcopy.go b/apis/search/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..743ba8596 --- /dev/null +++ b/apis/search/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,556 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKeysInitParameters) DeepCopyInto(out *QueryKeysInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKeysInitParameters. +func (in *QueryKeysInitParameters) DeepCopy() *QueryKeysInitParameters { + if in == nil { + return nil + } + out := new(QueryKeysInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKeysObservation) DeepCopyInto(out *QueryKeysObservation) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKeysObservation. +func (in *QueryKeysObservation) DeepCopy() *QueryKeysObservation { + if in == nil { + return nil + } + out := new(QueryKeysObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryKeysParameters) DeepCopyInto(out *QueryKeysParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryKeysParameters. +func (in *QueryKeysParameters) DeepCopy() *QueryKeysParameters { + if in == nil { + return nil + } + out := new(QueryKeysParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceInitParameters) DeepCopyInto(out *ServiceInitParameters) { + *out = *in + if in.AllowedIps != nil { + in, out := &in.AllowedIps, &out.AllowedIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationFailureMode != nil { + in, out := &in.AuthenticationFailureMode, &out.AuthenticationFailureMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKeyEnforcementEnabled != nil { + in, out := &in.CustomerManagedKeyEnforcementEnabled, &out.CustomerManagedKeyEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.HostingMode != nil { + in, out := &in.HostingMode, &out.HostingMode + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PartitionCount != nil { + in, out := &in.PartitionCount, &out.PartitionCount + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(float64) + **out = **in + } + if in.SemanticSearchSku != nil { + in, out := &in.SemanticSearchSku, &out.SemanticSearchSku + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInitParameters. +func (in *ServiceInitParameters) DeepCopy() *ServiceInitParameters { + if in == nil { + return nil + } + out := new(ServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceObservation) DeepCopyInto(out *ServiceObservation) { + *out = *in + if in.AllowedIps != nil { + in, out := &in.AllowedIps, &out.AllowedIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationFailureMode != nil { + in, out := &in.AuthenticationFailureMode, &out.AuthenticationFailureMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKeyEnforcementEnabled != nil { + in, out := &in.CustomerManagedKeyEnforcementEnabled, &out.CustomerManagedKeyEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.HostingMode != nil { + in, out := &in.HostingMode, &out.HostingMode + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PartitionCount != nil { + in, out := &in.PartitionCount, &out.PartitionCount + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QueryKeys != nil { + in, out := &in.QueryKeys, &out.QueryKeys + *out = make([]QueryKeysObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SemanticSearchSku != nil { + in, out := &in.SemanticSearchSku, &out.SemanticSearchSku + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceObservation. +func (in *ServiceObservation) DeepCopy() *ServiceObservation { + if in == nil { + return nil + } + out := new(ServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceParameters) DeepCopyInto(out *ServiceParameters) { + *out = *in + if in.AllowedIps != nil { + in, out := &in.AllowedIps, &out.AllowedIps + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AuthenticationFailureMode != nil { + in, out := &in.AuthenticationFailureMode, &out.AuthenticationFailureMode + *out = new(string) + **out = **in + } + if in.CustomerManagedKeyEnforcementEnabled != nil { + in, out := &in.CustomerManagedKeyEnforcementEnabled, &out.CustomerManagedKeyEnforcementEnabled + *out = new(bool) + **out = **in + } + if in.HostingMode != nil { + in, out := &in.HostingMode, &out.HostingMode + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthenticationEnabled != nil { + in, out := &in.LocalAuthenticationEnabled, &out.LocalAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PartitionCount != nil { + in, out := &in.PartitionCount, &out.PartitionCount + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SemanticSearchSku != nil { + in, out := &in.SemanticSearchSku, &out.SemanticSearchSku + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceParameters. +func (in *ServiceParameters) DeepCopy() *ServiceParameters { + if in == nil { + return nil + } + out := new(ServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/search/v1beta2/zz_generated.managed.go b/apis/search/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..841889e27 --- /dev/null +++ b/apis/search/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Service. +func (mg *Service) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Service. +func (mg *Service) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Service. +func (mg *Service) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Service. +func (mg *Service) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Service. +func (mg *Service) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Service. +func (mg *Service) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Service. +func (mg *Service) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Service. +func (mg *Service) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Service. +func (mg *Service) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Service. +func (mg *Service) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Service. +func (mg *Service) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Service. +func (mg *Service) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/search/v1beta2/zz_generated.managedlist.go b/apis/search/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..d9baadeb7 --- /dev/null +++ b/apis/search/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ServiceList. +func (l *ServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/search/v1beta2/zz_generated.resolvers.go b/apis/search/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..0b3aadf11 --- /dev/null +++ b/apis/search/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Service. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Service) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/search/v1beta2/zz_groupversion_info.go b/apis/search/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..43f214224 --- /dev/null +++ b/apis/search/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=search.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "search.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/search/v1beta2/zz_service_terraformed.go b/apis/search/v1beta2/zz_service_terraformed.go new file mode 100755 index 000000000..318fa992d --- /dev/null +++ b/apis/search/v1beta2/zz_service_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Service +func (mg *Service) GetTerraformResourceType() string { + return "azurerm_search_service" +} + +// GetConnectionDetailsMapping for this Service +func (tr *Service) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_key": "status.atProvider.primaryKey", "secondary_key": "status.atProvider.secondaryKey"} +} + +// GetObservation of this Service +func (tr *Service) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Service +func (tr *Service) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Service +func (tr *Service) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Service +func (tr *Service) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Service +func (tr *Service) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Service +func (tr *Service) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Service +func (tr *Service) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Service using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Service) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Service) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/search/v1beta2/zz_service_types.go b/apis/search/v1beta2/zz_service_types.go new file mode 100755 index 000000000..de2a024f5 --- /dev/null +++ b/apis/search/v1beta2/zz_service_types.go @@ -0,0 +1,283 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this Search Service. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Search Service. The only possible value is SystemAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies the type of Managed Service Identity that should be configured on this Search Service. The only possible value is SystemAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type QueryKeysInitParameters struct { +} + +type QueryKeysObservation struct { + + // The value of this Query Key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // The name of this Query Key. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type QueryKeysParameters struct { +} + +type ServiceInitParameters struct { + + // Specifies a list of inbound IPv4 or CIDRs that are allowed to access the Search Service. If the incoming IP request is from an IP address which is not included in the allowed_ips it will be blocked by the Search Services firewall. + // +listType=set + AllowedIps []*string `json:"allowedIps,omitempty" tf:"allowed_ips,omitempty"` + + // Specifies the response that the Search Service should return for requests that fail authentication. Possible values include http401WithBearerChallenge or http403. + AuthenticationFailureMode *string `json:"authenticationFailureMode,omitempty" tf:"authentication_failure_mode,omitempty"` + + // Specifies whether the Search Service should enforce that non-customer resources are encrypted. Defaults to false. + CustomerManagedKeyEnforcementEnabled *bool `json:"customerManagedKeyEnforcementEnabled,omitempty" tf:"customer_managed_key_enforcement_enabled,omitempty"` + + // Specifies the Hosting Mode, which allows for High Density partitions (that allow for up to 1000 indexes) should be supported. Possible values are highDensity or default. Defaults to default. Changing this forces a new Search Service to be created. + HostingMode *string `json:"hostingMode,omitempty" tf:"hosting_mode,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies whether the Search Service allows authenticating using API Keys? Defaults to true. + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the number of partitions which should be created. This field cannot be set when using a free or basic sku (see the Microsoft documentation). Possible values include 1, 2, 3, 4, 6, or 12. Defaults to 1. + PartitionCount *float64 `json:"partitionCount,omitempty" tf:"partition_count,omitempty"` + + // Specifies whether Public Network Access is allowed for this resource. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the number of Replica's which should be created for this Search Service. This field cannot be set when using a free sku (see the Microsoft documentation). + ReplicaCount *float64 `json:"replicaCount,omitempty" tf:"replica_count,omitempty"` + + // Specifies the Semantic Search SKU which should be used for this Search Service. Possible values include free and standard. + SemanticSearchSku *string `json:"semanticSearchSku,omitempty" tf:"semantic_search_sku,omitempty"` + + // The SKU which should be used for this Search Service. Possible values include basic, free, standard, standard2, standard3, storage_optimized_l1 and storage_optimized_l2. Changing this forces a new Search Service to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies a mapping of tags which should be assigned to this Search Service. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ServiceObservation struct { + + // Specifies a list of inbound IPv4 or CIDRs that are allowed to access the Search Service. If the incoming IP request is from an IP address which is not included in the allowed_ips it will be blocked by the Search Services firewall. + // +listType=set + AllowedIps []*string `json:"allowedIps,omitempty" tf:"allowed_ips,omitempty"` + + // Specifies the response that the Search Service should return for requests that fail authentication. Possible values include http401WithBearerChallenge or http403. + AuthenticationFailureMode *string `json:"authenticationFailureMode,omitempty" tf:"authentication_failure_mode,omitempty"` + + // Specifies whether the Search Service should enforce that non-customer resources are encrypted. Defaults to false. + CustomerManagedKeyEnforcementEnabled *bool `json:"customerManagedKeyEnforcementEnabled,omitempty" tf:"customer_managed_key_enforcement_enabled,omitempty"` + + // Specifies the Hosting Mode, which allows for High Density partitions (that allow for up to 1000 indexes) should be supported. Possible values are highDensity or default. Defaults to default. Changing this forces a new Search Service to be created. + HostingMode *string `json:"hostingMode,omitempty" tf:"hosting_mode,omitempty"` + + // The ID of the Search Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies whether the Search Service allows authenticating using API Keys? Defaults to true. + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the number of partitions which should be created. This field cannot be set when using a free or basic sku (see the Microsoft documentation). Possible values include 1, 2, 3, 4, 6, or 12. Defaults to 1. + PartitionCount *float64 `json:"partitionCount,omitempty" tf:"partition_count,omitempty"` + + // Specifies whether Public Network Access is allowed for this resource. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A query_keys block as defined below. + QueryKeys []QueryKeysObservation `json:"queryKeys,omitempty" tf:"query_keys,omitempty"` + + // Specifies the number of Replica's which should be created for this Search Service. This field cannot be set when using a free sku (see the Microsoft documentation). + ReplicaCount *float64 `json:"replicaCount,omitempty" tf:"replica_count,omitempty"` + + // The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the Semantic Search SKU which should be used for this Search Service. Possible values include free and standard. + SemanticSearchSku *string `json:"semanticSearchSku,omitempty" tf:"semantic_search_sku,omitempty"` + + // The SKU which should be used for this Search Service. Possible values include basic, free, standard, standard2, standard3, storage_optimized_l1 and storage_optimized_l2. Changing this forces a new Search Service to be created. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies a mapping of tags which should be assigned to this Search Service. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ServiceParameters struct { + + // Specifies a list of inbound IPv4 or CIDRs that are allowed to access the Search Service. If the incoming IP request is from an IP address which is not included in the allowed_ips it will be blocked by the Search Services firewall. + // +kubebuilder:validation:Optional + // +listType=set + AllowedIps []*string `json:"allowedIps,omitempty" tf:"allowed_ips,omitempty"` + + // Specifies the response that the Search Service should return for requests that fail authentication. Possible values include http401WithBearerChallenge or http403. + // +kubebuilder:validation:Optional + AuthenticationFailureMode *string `json:"authenticationFailureMode,omitempty" tf:"authentication_failure_mode,omitempty"` + + // Specifies whether the Search Service should enforce that non-customer resources are encrypted. Defaults to false. + // +kubebuilder:validation:Optional + CustomerManagedKeyEnforcementEnabled *bool `json:"customerManagedKeyEnforcementEnabled,omitempty" tf:"customer_managed_key_enforcement_enabled,omitempty"` + + // Specifies the Hosting Mode, which allows for High Density partitions (that allow for up to 1000 indexes) should be supported. Possible values are highDensity or default. Defaults to default. Changing this forces a new Search Service to be created. + // +kubebuilder:validation:Optional + HostingMode *string `json:"hostingMode,omitempty" tf:"hosting_mode,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies whether the Search Service allows authenticating using API Keys? Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthenticationEnabled *bool `json:"localAuthenticationEnabled,omitempty" tf:"local_authentication_enabled,omitempty"` + + // The Azure Region where the Search Service should exist. Changing this forces a new Search Service to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the number of partitions which should be created. This field cannot be set when using a free or basic sku (see the Microsoft documentation). Possible values include 1, 2, 3, 4, 6, or 12. Defaults to 1. + // +kubebuilder:validation:Optional + PartitionCount *float64 `json:"partitionCount,omitempty" tf:"partition_count,omitempty"` + + // Specifies whether Public Network Access is allowed for this resource. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the number of Replica's which should be created for this Search Service. This field cannot be set when using a free sku (see the Microsoft documentation). + // +kubebuilder:validation:Optional + ReplicaCount *float64 `json:"replicaCount,omitempty" tf:"replica_count,omitempty"` + + // The name of the Resource Group where the Search Service should exist. Changing this forces a new Search Service to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the Semantic Search SKU which should be used for this Search Service. Possible values include free and standard. + // +kubebuilder:validation:Optional + SemanticSearchSku *string `json:"semanticSearchSku,omitempty" tf:"semantic_search_sku,omitempty"` + + // The SKU which should be used for this Search Service. Possible values include basic, free, standard, standard2, standard3, storage_optimized_l1 and storage_optimized_l2. Changing this forces a new Search Service to be created. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Specifies a mapping of tags which should be assigned to this Search Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// ServiceSpec defines the desired state of Service +type ServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceInitParameters `json:"initProvider,omitempty"` +} + +// ServiceStatus defines the observed state of Service. +type ServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Service is the Schema for the Services API. Manages a Search Service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Service struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec ServiceSpec `json:"spec"` + Status ServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceList contains a list of Services +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Service `json:"items"` +} + +// Repository type metadata. +var ( + Service_Kind = "Service" + Service_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Service_Kind}.String() + Service_KindAPIVersion = Service_Kind + "." + CRDGroupVersion.String() + Service_GroupVersionKind = CRDGroupVersion.WithKind(Service_Kind) +) + +func init() { + SchemeBuilder.Register(&Service{}, &ServiceList{}) +} diff --git a/apis/security/v1beta1/zz_generated.conversion_hubs.go b/apis/security/v1beta1/zz_generated.conversion_hubs.go index 81cd79ced..53e9169c6 100755 --- a/apis/security/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/security/v1beta1/zz_generated.conversion_hubs.go @@ -9,15 +9,6 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AdvancedThreatProtection) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *IOTSecurityDeviceGroup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *IOTSecuritySolution) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SecurityCenterAssessment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SecurityCenterAssessmentPolicy) Hub() {} diff --git a/apis/security/v1beta1/zz_generated.conversion_spokes.go b/apis/security/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..bd124be78 --- /dev/null +++ b/apis/security/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this IOTSecurityDeviceGroup to the hub type. +func (tr *IOTSecurityDeviceGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IOTSecurityDeviceGroup type. +func (tr *IOTSecurityDeviceGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this IOTSecuritySolution to the hub type. +func (tr *IOTSecuritySolution) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the IOTSecuritySolution type. +func (tr *IOTSecuritySolution) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SecurityCenterAssessment to the hub type. +func (tr *SecurityCenterAssessment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SecurityCenterAssessment type. +func (tr *SecurityCenterAssessment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/security/v1beta1/zz_generated.resolvers.go b/apis/security/v1beta1/zz_generated.resolvers.go index 3e539e7a7..a91e76015 100644 --- a/apis/security/v1beta1/zz_generated.resolvers.go +++ b/apis/security/v1beta1/zz_generated.resolvers.go @@ -234,7 +234,7 @@ func (mg *SecurityCenterServerVulnerabilityAssessment) ResolveReferences(ctx con var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "LinuxVirtualMachine", "LinuxVirtualMachineList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -253,7 +253,7 @@ func (mg *SecurityCenterServerVulnerabilityAssessment) ResolveReferences(ctx con mg.Spec.ForProvider.VirtualMachineID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.VirtualMachineIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "LinuxVirtualMachine", "LinuxVirtualMachineList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -284,7 +284,7 @@ func (mg *SecurityCenterServerVulnerabilityAssessmentVirtualMachine) ResolveRefe var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta1", "LinuxVirtualMachine", "LinuxVirtualMachineList") + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachine", "LinuxVirtualMachineList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -315,7 +315,7 @@ func (mg *SecurityCenterWorkspace) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -334,7 +334,7 @@ func (mg *SecurityCenterWorkspace) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.WorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.WorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/security/v1beta1/zz_securitycenterservervulnerabilityassessment_types.go b/apis/security/v1beta1/zz_securitycenterservervulnerabilityassessment_types.go index 236ba820e..abc89132b 100755 --- a/apis/security/v1beta1/zz_securitycenterservervulnerabilityassessment_types.go +++ b/apis/security/v1beta1/zz_securitycenterservervulnerabilityassessment_types.go @@ -19,7 +19,7 @@ type SecurityCenterServerVulnerabilityAssessmentInitParameters struct { HybridMachineID *string `json:"hybridMachineId,omitempty" tf:"hybrid_machine_id,omitempty"` // The ID of the virtual machine to be monitored by vulnerability assessment. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.LinuxVirtualMachine + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` @@ -51,7 +51,7 @@ type SecurityCenterServerVulnerabilityAssessmentParameters struct { HybridMachineID *string `json:"hybridMachineId,omitempty" tf:"hybrid_machine_id,omitempty"` // The ID of the virtual machine to be monitored by vulnerability assessment. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.LinuxVirtualMachine + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` diff --git a/apis/security/v1beta1/zz_securitycenterservervulnerabilityassessmentvirtualmachine_types.go b/apis/security/v1beta1/zz_securitycenterservervulnerabilityassessmentvirtualmachine_types.go index 08be18b07..dcf6290b4 100755 --- a/apis/security/v1beta1/zz_securitycenterservervulnerabilityassessmentvirtualmachine_types.go +++ b/apis/security/v1beta1/zz_securitycenterservervulnerabilityassessmentvirtualmachine_types.go @@ -28,7 +28,7 @@ type SecurityCenterServerVulnerabilityAssessmentVirtualMachineObservation struct type SecurityCenterServerVulnerabilityAssessmentVirtualMachineParameters struct { // The ID of the virtual machine to be monitored by vulnerability assessment. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta1.LinuxVirtualMachine + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachine // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional VirtualMachineID *string `json:"virtualMachineId,omitempty" tf:"virtual_machine_id,omitempty"` diff --git a/apis/security/v1beta1/zz_securitycenterworkspace_types.go b/apis/security/v1beta1/zz_securitycenterworkspace_types.go index 8a6c6c347..66d22b2ea 100755 --- a/apis/security/v1beta1/zz_securitycenterworkspace_types.go +++ b/apis/security/v1beta1/zz_securitycenterworkspace_types.go @@ -19,7 +19,7 @@ type SecurityCenterWorkspaceInitParameters struct { Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` // The ID of the Log Analytics Workspace to save the data in. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` @@ -51,7 +51,7 @@ type SecurityCenterWorkspaceParameters struct { Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` // The ID of the Log Analytics Workspace to save the data in. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` diff --git a/apis/security/v1beta2/zz_generated.conversion_hubs.go b/apis/security/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..d29de0092 --- /dev/null +++ b/apis/security/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *IOTSecurityDeviceGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *IOTSecuritySolution) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SecurityCenterAssessment) Hub() {} diff --git a/apis/security/v1beta2/zz_generated.deepcopy.go b/apis/security/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..77d301a57 --- /dev/null +++ b/apis/security/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1780 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalWorkspaceInitParameters) DeepCopyInto(out *AdditionalWorkspaceInitParameters) { + *out = *in + if in.DataTypes != nil { + in, out := &in.DataTypes, &out.DataTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalWorkspaceInitParameters. +func (in *AdditionalWorkspaceInitParameters) DeepCopy() *AdditionalWorkspaceInitParameters { + if in == nil { + return nil + } + out := new(AdditionalWorkspaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalWorkspaceObservation) DeepCopyInto(out *AdditionalWorkspaceObservation) { + *out = *in + if in.DataTypes != nil { + in, out := &in.DataTypes, &out.DataTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalWorkspaceObservation. +func (in *AdditionalWorkspaceObservation) DeepCopy() *AdditionalWorkspaceObservation { + if in == nil { + return nil + } + out := new(AdditionalWorkspaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalWorkspaceParameters) DeepCopyInto(out *AdditionalWorkspaceParameters) { + *out = *in + if in.DataTypes != nil { + in, out := &in.DataTypes, &out.DataTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.WorkspaceID != nil { + in, out := &in.WorkspaceID, &out.WorkspaceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalWorkspaceParameters. +func (in *AdditionalWorkspaceParameters) DeepCopy() *AdditionalWorkspaceParameters { + if in == nil { + return nil + } + out := new(AdditionalWorkspaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowRuleInitParameters) DeepCopyInto(out *AllowRuleInitParameters) { + *out = *in + if in.ConnectionFromIpsNotAllowed != nil { + in, out := &in.ConnectionFromIpsNotAllowed, &out.ConnectionFromIpsNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionToIpsNotAllowed != nil { + in, out := &in.ConnectionToIpsNotAllowed, &out.ConnectionToIpsNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalUsersNotAllowed != nil { + in, out := &in.LocalUsersNotAllowed, &out.LocalUsersNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProcessesNotAllowed != nil { + in, out := &in.ProcessesNotAllowed, &out.ProcessesNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowRuleInitParameters. +func (in *AllowRuleInitParameters) DeepCopy() *AllowRuleInitParameters { + if in == nil { + return nil + } + out := new(AllowRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowRuleObservation) DeepCopyInto(out *AllowRuleObservation) { + *out = *in + if in.ConnectionFromIpsNotAllowed != nil { + in, out := &in.ConnectionFromIpsNotAllowed, &out.ConnectionFromIpsNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionToIpsNotAllowed != nil { + in, out := &in.ConnectionToIpsNotAllowed, &out.ConnectionToIpsNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalUsersNotAllowed != nil { + in, out := &in.LocalUsersNotAllowed, &out.LocalUsersNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProcessesNotAllowed != nil { + in, out := &in.ProcessesNotAllowed, &out.ProcessesNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowRuleObservation. +func (in *AllowRuleObservation) DeepCopy() *AllowRuleObservation { + if in == nil { + return nil + } + out := new(AllowRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowRuleParameters) DeepCopyInto(out *AllowRuleParameters) { + *out = *in + if in.ConnectionFromIpsNotAllowed != nil { + in, out := &in.ConnectionFromIpsNotAllowed, &out.ConnectionFromIpsNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionToIpsNotAllowed != nil { + in, out := &in.ConnectionToIpsNotAllowed, &out.ConnectionToIpsNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LocalUsersNotAllowed != nil { + in, out := &in.LocalUsersNotAllowed, &out.LocalUsersNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ProcessesNotAllowed != nil { + in, out := &in.ProcessesNotAllowed, &out.ProcessesNotAllowed + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowRuleParameters. +func (in *AllowRuleParameters) DeepCopy() *AllowRuleParameters { + if in == nil { + return nil + } + out := new(AllowRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecurityDeviceGroup) DeepCopyInto(out *IOTSecurityDeviceGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecurityDeviceGroup. +func (in *IOTSecurityDeviceGroup) DeepCopy() *IOTSecurityDeviceGroup { + if in == nil { + return nil + } + out := new(IOTSecurityDeviceGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTSecurityDeviceGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecurityDeviceGroupInitParameters) DeepCopyInto(out *IOTSecurityDeviceGroupInitParameters) { + *out = *in + if in.AllowRule != nil { + in, out := &in.AllowRule, &out.AllowRule + *out = new(AllowRuleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.IOTHubID != nil { + in, out := &in.IOTHubID, &out.IOTHubID + *out = new(string) + **out = **in + } + if in.IOTHubIDRef != nil { + in, out := &in.IOTHubIDRef, &out.IOTHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IOTHubIDSelector != nil { + in, out := &in.IOTHubIDSelector, &out.IOTHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RangeRule != nil { + in, out := &in.RangeRule, &out.RangeRule + *out = make([]RangeRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecurityDeviceGroupInitParameters. +func (in *IOTSecurityDeviceGroupInitParameters) DeepCopy() *IOTSecurityDeviceGroupInitParameters { + if in == nil { + return nil + } + out := new(IOTSecurityDeviceGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecurityDeviceGroupList) DeepCopyInto(out *IOTSecurityDeviceGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IOTSecurityDeviceGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecurityDeviceGroupList. +func (in *IOTSecurityDeviceGroupList) DeepCopy() *IOTSecurityDeviceGroupList { + if in == nil { + return nil + } + out := new(IOTSecurityDeviceGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTSecurityDeviceGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecurityDeviceGroupObservation) DeepCopyInto(out *IOTSecurityDeviceGroupObservation) { + *out = *in + if in.AllowRule != nil { + in, out := &in.AllowRule, &out.AllowRule + *out = new(AllowRuleObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IOTHubID != nil { + in, out := &in.IOTHubID, &out.IOTHubID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RangeRule != nil { + in, out := &in.RangeRule, &out.RangeRule + *out = make([]RangeRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecurityDeviceGroupObservation. +func (in *IOTSecurityDeviceGroupObservation) DeepCopy() *IOTSecurityDeviceGroupObservation { + if in == nil { + return nil + } + out := new(IOTSecurityDeviceGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecurityDeviceGroupParameters) DeepCopyInto(out *IOTSecurityDeviceGroupParameters) { + *out = *in + if in.AllowRule != nil { + in, out := &in.AllowRule, &out.AllowRule + *out = new(AllowRuleParameters) + (*in).DeepCopyInto(*out) + } + if in.IOTHubID != nil { + in, out := &in.IOTHubID, &out.IOTHubID + *out = new(string) + **out = **in + } + if in.IOTHubIDRef != nil { + in, out := &in.IOTHubIDRef, &out.IOTHubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IOTHubIDSelector != nil { + in, out := &in.IOTHubIDSelector, &out.IOTHubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RangeRule != nil { + in, out := &in.RangeRule, &out.RangeRule + *out = make([]RangeRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecurityDeviceGroupParameters. +func (in *IOTSecurityDeviceGroupParameters) DeepCopy() *IOTSecurityDeviceGroupParameters { + if in == nil { + return nil + } + out := new(IOTSecurityDeviceGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecurityDeviceGroupSpec) DeepCopyInto(out *IOTSecurityDeviceGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecurityDeviceGroupSpec. +func (in *IOTSecurityDeviceGroupSpec) DeepCopy() *IOTSecurityDeviceGroupSpec { + if in == nil { + return nil + } + out := new(IOTSecurityDeviceGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecurityDeviceGroupStatus) DeepCopyInto(out *IOTSecurityDeviceGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecurityDeviceGroupStatus. +func (in *IOTSecurityDeviceGroupStatus) DeepCopy() *IOTSecurityDeviceGroupStatus { + if in == nil { + return nil + } + out := new(IOTSecurityDeviceGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecuritySolution) DeepCopyInto(out *IOTSecuritySolution) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecuritySolution. +func (in *IOTSecuritySolution) DeepCopy() *IOTSecuritySolution { + if in == nil { + return nil + } + out := new(IOTSecuritySolution) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTSecuritySolution) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecuritySolutionInitParameters) DeepCopyInto(out *IOTSecuritySolutionInitParameters) { + *out = *in + if in.AdditionalWorkspace != nil { + in, out := &in.AdditionalWorkspace, &out.AdditionalWorkspace + *out = make([]AdditionalWorkspaceInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisabledDataSources != nil { + in, out := &in.DisabledDataSources, &out.DisabledDataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventsToExport != nil { + in, out := &in.EventsToExport, &out.EventsToExport + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IOTHubIds != nil { + in, out := &in.IOTHubIds, &out.IOTHubIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IOTHubIdsRefs != nil { + in, out := &in.IOTHubIdsRefs, &out.IOTHubIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IOTHubIdsSelector != nil { + in, out := &in.IOTHubIdsSelector, &out.IOTHubIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.LogUnmaskedIpsEnabled != nil { + in, out := &in.LogUnmaskedIpsEnabled, &out.LogUnmaskedIpsEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueryForResources != nil { + in, out := &in.QueryForResources, &out.QueryForResources + *out = new(string) + **out = **in + } + if in.QuerySubscriptionIds != nil { + in, out := &in.QuerySubscriptionIds, &out.QuerySubscriptionIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RecommendationsEnabled != nil { + in, out := &in.RecommendationsEnabled, &out.RecommendationsEnabled + *out = new(RecommendationsEnabledInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecuritySolutionInitParameters. +func (in *IOTSecuritySolutionInitParameters) DeepCopy() *IOTSecuritySolutionInitParameters { + if in == nil { + return nil + } + out := new(IOTSecuritySolutionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecuritySolutionList) DeepCopyInto(out *IOTSecuritySolutionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IOTSecuritySolution, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecuritySolutionList. +func (in *IOTSecuritySolutionList) DeepCopy() *IOTSecuritySolutionList { + if in == nil { + return nil + } + out := new(IOTSecuritySolutionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IOTSecuritySolutionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecuritySolutionObservation) DeepCopyInto(out *IOTSecuritySolutionObservation) { + *out = *in + if in.AdditionalWorkspace != nil { + in, out := &in.AdditionalWorkspace, &out.AdditionalWorkspace + *out = make([]AdditionalWorkspaceObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisabledDataSources != nil { + in, out := &in.DisabledDataSources, &out.DisabledDataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventsToExport != nil { + in, out := &in.EventsToExport, &out.EventsToExport + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IOTHubIds != nil { + in, out := &in.IOTHubIds, &out.IOTHubIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.LogUnmaskedIpsEnabled != nil { + in, out := &in.LogUnmaskedIpsEnabled, &out.LogUnmaskedIpsEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueryForResources != nil { + in, out := &in.QueryForResources, &out.QueryForResources + *out = new(string) + **out = **in + } + if in.QuerySubscriptionIds != nil { + in, out := &in.QuerySubscriptionIds, &out.QuerySubscriptionIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RecommendationsEnabled != nil { + in, out := &in.RecommendationsEnabled, &out.RecommendationsEnabled + *out = new(RecommendationsEnabledObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecuritySolutionObservation. +func (in *IOTSecuritySolutionObservation) DeepCopy() *IOTSecuritySolutionObservation { + if in == nil { + return nil + } + out := new(IOTSecuritySolutionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecuritySolutionParameters) DeepCopyInto(out *IOTSecuritySolutionParameters) { + *out = *in + if in.AdditionalWorkspace != nil { + in, out := &in.AdditionalWorkspace, &out.AdditionalWorkspace + *out = make([]AdditionalWorkspaceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisabledDataSources != nil { + in, out := &in.DisabledDataSources, &out.DisabledDataSources + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.EventsToExport != nil { + in, out := &in.EventsToExport, &out.EventsToExport + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IOTHubIds != nil { + in, out := &in.IOTHubIds, &out.IOTHubIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IOTHubIdsRefs != nil { + in, out := &in.IOTHubIdsRefs, &out.IOTHubIdsRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IOTHubIdsSelector != nil { + in, out := &in.IOTHubIdsSelector, &out.IOTHubIdsSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LogAnalyticsWorkspaceID != nil { + in, out := &in.LogAnalyticsWorkspaceID, &out.LogAnalyticsWorkspaceID + *out = new(string) + **out = **in + } + if in.LogUnmaskedIpsEnabled != nil { + in, out := &in.LogUnmaskedIpsEnabled, &out.LogUnmaskedIpsEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.QueryForResources != nil { + in, out := &in.QueryForResources, &out.QueryForResources + *out = new(string) + **out = **in + } + if in.QuerySubscriptionIds != nil { + in, out := &in.QuerySubscriptionIds, &out.QuerySubscriptionIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RecommendationsEnabled != nil { + in, out := &in.RecommendationsEnabled, &out.RecommendationsEnabled + *out = new(RecommendationsEnabledParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecuritySolutionParameters. +func (in *IOTSecuritySolutionParameters) DeepCopy() *IOTSecuritySolutionParameters { + if in == nil { + return nil + } + out := new(IOTSecuritySolutionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecuritySolutionSpec) DeepCopyInto(out *IOTSecuritySolutionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecuritySolutionSpec. +func (in *IOTSecuritySolutionSpec) DeepCopy() *IOTSecuritySolutionSpec { + if in == nil { + return nil + } + out := new(IOTSecuritySolutionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IOTSecuritySolutionStatus) DeepCopyInto(out *IOTSecuritySolutionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOTSecuritySolutionStatus. +func (in *IOTSecuritySolutionStatus) DeepCopy() *IOTSecuritySolutionStatus { + if in == nil { + return nil + } + out := new(IOTSecuritySolutionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeRuleInitParameters) DeepCopyInto(out *RangeRuleInitParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeRuleInitParameters. +func (in *RangeRuleInitParameters) DeepCopy() *RangeRuleInitParameters { + if in == nil { + return nil + } + out := new(RangeRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeRuleObservation) DeepCopyInto(out *RangeRuleObservation) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeRuleObservation. +func (in *RangeRuleObservation) DeepCopy() *RangeRuleObservation { + if in == nil { + return nil + } + out := new(RangeRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeRuleParameters) DeepCopyInto(out *RangeRuleParameters) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(string) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(float64) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(float64) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeRuleParameters. +func (in *RangeRuleParameters) DeepCopy() *RangeRuleParameters { + if in == nil { + return nil + } + out := new(RangeRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendationsEnabledInitParameters) DeepCopyInto(out *RecommendationsEnabledInitParameters) { + *out = *in + if in.AcrAuthentication != nil { + in, out := &in.AcrAuthentication, &out.AcrAuthentication + *out = new(bool) + **out = **in + } + if in.AgentSendUnutilizedMsg != nil { + in, out := &in.AgentSendUnutilizedMsg, &out.AgentSendUnutilizedMsg + *out = new(bool) + **out = **in + } + if in.Baseline != nil { + in, out := &in.Baseline, &out.Baseline + *out = new(bool) + **out = **in + } + if in.EdgeHubMemOptimize != nil { + in, out := &in.EdgeHubMemOptimize, &out.EdgeHubMemOptimize + *out = new(bool) + **out = **in + } + if in.EdgeLoggingOption != nil { + in, out := &in.EdgeLoggingOption, &out.EdgeLoggingOption + *out = new(bool) + **out = **in + } + if in.IPFilterDenyAll != nil { + in, out := &in.IPFilterDenyAll, &out.IPFilterDenyAll + *out = new(bool) + **out = **in + } + if in.IPFilterPermissiveRule != nil { + in, out := &in.IPFilterPermissiveRule, &out.IPFilterPermissiveRule + *out = new(bool) + **out = **in + } + if in.InconsistentModuleSettings != nil { + in, out := &in.InconsistentModuleSettings, &out.InconsistentModuleSettings + *out = new(bool) + **out = **in + } + if in.InstallAgent != nil { + in, out := &in.InstallAgent, &out.InstallAgent + *out = new(bool) + **out = **in + } + if in.OpenPorts != nil { + in, out := &in.OpenPorts, &out.OpenPorts + *out = new(bool) + **out = **in + } + if in.PermissiveFirewallPolicy != nil { + in, out := &in.PermissiveFirewallPolicy, &out.PermissiveFirewallPolicy + *out = new(bool) + **out = **in + } + if in.PermissiveInputFirewallRules != nil { + in, out := &in.PermissiveInputFirewallRules, &out.PermissiveInputFirewallRules + *out = new(bool) + **out = **in + } + if in.PermissiveOutputFirewallRules != nil { + in, out := &in.PermissiveOutputFirewallRules, &out.PermissiveOutputFirewallRules + *out = new(bool) + **out = **in + } + if in.PrivilegedDockerOptions != nil { + in, out := &in.PrivilegedDockerOptions, &out.PrivilegedDockerOptions + *out = new(bool) + **out = **in + } + if in.SharedCredentials != nil { + in, out := &in.SharedCredentials, &out.SharedCredentials + *out = new(bool) + **out = **in + } + if in.VulnerableTLSCipherSuite != nil { + in, out := &in.VulnerableTLSCipherSuite, &out.VulnerableTLSCipherSuite + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendationsEnabledInitParameters. +func (in *RecommendationsEnabledInitParameters) DeepCopy() *RecommendationsEnabledInitParameters { + if in == nil { + return nil + } + out := new(RecommendationsEnabledInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendationsEnabledObservation) DeepCopyInto(out *RecommendationsEnabledObservation) { + *out = *in + if in.AcrAuthentication != nil { + in, out := &in.AcrAuthentication, &out.AcrAuthentication + *out = new(bool) + **out = **in + } + if in.AgentSendUnutilizedMsg != nil { + in, out := &in.AgentSendUnutilizedMsg, &out.AgentSendUnutilizedMsg + *out = new(bool) + **out = **in + } + if in.Baseline != nil { + in, out := &in.Baseline, &out.Baseline + *out = new(bool) + **out = **in + } + if in.EdgeHubMemOptimize != nil { + in, out := &in.EdgeHubMemOptimize, &out.EdgeHubMemOptimize + *out = new(bool) + **out = **in + } + if in.EdgeLoggingOption != nil { + in, out := &in.EdgeLoggingOption, &out.EdgeLoggingOption + *out = new(bool) + **out = **in + } + if in.IPFilterDenyAll != nil { + in, out := &in.IPFilterDenyAll, &out.IPFilterDenyAll + *out = new(bool) + **out = **in + } + if in.IPFilterPermissiveRule != nil { + in, out := &in.IPFilterPermissiveRule, &out.IPFilterPermissiveRule + *out = new(bool) + **out = **in + } + if in.InconsistentModuleSettings != nil { + in, out := &in.InconsistentModuleSettings, &out.InconsistentModuleSettings + *out = new(bool) + **out = **in + } + if in.InstallAgent != nil { + in, out := &in.InstallAgent, &out.InstallAgent + *out = new(bool) + **out = **in + } + if in.OpenPorts != nil { + in, out := &in.OpenPorts, &out.OpenPorts + *out = new(bool) + **out = **in + } + if in.PermissiveFirewallPolicy != nil { + in, out := &in.PermissiveFirewallPolicy, &out.PermissiveFirewallPolicy + *out = new(bool) + **out = **in + } + if in.PermissiveInputFirewallRules != nil { + in, out := &in.PermissiveInputFirewallRules, &out.PermissiveInputFirewallRules + *out = new(bool) + **out = **in + } + if in.PermissiveOutputFirewallRules != nil { + in, out := &in.PermissiveOutputFirewallRules, &out.PermissiveOutputFirewallRules + *out = new(bool) + **out = **in + } + if in.PrivilegedDockerOptions != nil { + in, out := &in.PrivilegedDockerOptions, &out.PrivilegedDockerOptions + *out = new(bool) + **out = **in + } + if in.SharedCredentials != nil { + in, out := &in.SharedCredentials, &out.SharedCredentials + *out = new(bool) + **out = **in + } + if in.VulnerableTLSCipherSuite != nil { + in, out := &in.VulnerableTLSCipherSuite, &out.VulnerableTLSCipherSuite + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendationsEnabledObservation. +func (in *RecommendationsEnabledObservation) DeepCopy() *RecommendationsEnabledObservation { + if in == nil { + return nil + } + out := new(RecommendationsEnabledObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendationsEnabledParameters) DeepCopyInto(out *RecommendationsEnabledParameters) { + *out = *in + if in.AcrAuthentication != nil { + in, out := &in.AcrAuthentication, &out.AcrAuthentication + *out = new(bool) + **out = **in + } + if in.AgentSendUnutilizedMsg != nil { + in, out := &in.AgentSendUnutilizedMsg, &out.AgentSendUnutilizedMsg + *out = new(bool) + **out = **in + } + if in.Baseline != nil { + in, out := &in.Baseline, &out.Baseline + *out = new(bool) + **out = **in + } + if in.EdgeHubMemOptimize != nil { + in, out := &in.EdgeHubMemOptimize, &out.EdgeHubMemOptimize + *out = new(bool) + **out = **in + } + if in.EdgeLoggingOption != nil { + in, out := &in.EdgeLoggingOption, &out.EdgeLoggingOption + *out = new(bool) + **out = **in + } + if in.IPFilterDenyAll != nil { + in, out := &in.IPFilterDenyAll, &out.IPFilterDenyAll + *out = new(bool) + **out = **in + } + if in.IPFilterPermissiveRule != nil { + in, out := &in.IPFilterPermissiveRule, &out.IPFilterPermissiveRule + *out = new(bool) + **out = **in + } + if in.InconsistentModuleSettings != nil { + in, out := &in.InconsistentModuleSettings, &out.InconsistentModuleSettings + *out = new(bool) + **out = **in + } + if in.InstallAgent != nil { + in, out := &in.InstallAgent, &out.InstallAgent + *out = new(bool) + **out = **in + } + if in.OpenPorts != nil { + in, out := &in.OpenPorts, &out.OpenPorts + *out = new(bool) + **out = **in + } + if in.PermissiveFirewallPolicy != nil { + in, out := &in.PermissiveFirewallPolicy, &out.PermissiveFirewallPolicy + *out = new(bool) + **out = **in + } + if in.PermissiveInputFirewallRules != nil { + in, out := &in.PermissiveInputFirewallRules, &out.PermissiveInputFirewallRules + *out = new(bool) + **out = **in + } + if in.PermissiveOutputFirewallRules != nil { + in, out := &in.PermissiveOutputFirewallRules, &out.PermissiveOutputFirewallRules + *out = new(bool) + **out = **in + } + if in.PrivilegedDockerOptions != nil { + in, out := &in.PrivilegedDockerOptions, &out.PrivilegedDockerOptions + *out = new(bool) + **out = **in + } + if in.SharedCredentials != nil { + in, out := &in.SharedCredentials, &out.SharedCredentials + *out = new(bool) + **out = **in + } + if in.VulnerableTLSCipherSuite != nil { + in, out := &in.VulnerableTLSCipherSuite, &out.VulnerableTLSCipherSuite + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendationsEnabledParameters. +func (in *RecommendationsEnabledParameters) DeepCopy() *RecommendationsEnabledParameters { + if in == nil { + return nil + } + out := new(RecommendationsEnabledParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityCenterAssessment) DeepCopyInto(out *SecurityCenterAssessment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityCenterAssessment. +func (in *SecurityCenterAssessment) DeepCopy() *SecurityCenterAssessment { + if in == nil { + return nil + } + out := new(SecurityCenterAssessment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityCenterAssessment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityCenterAssessmentInitParameters) DeepCopyInto(out *SecurityCenterAssessmentInitParameters) { + *out = *in + if in.AdditionalData != nil { + in, out := &in.AdditionalData, &out.AdditionalData + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AssessmentPolicyID != nil { + in, out := &in.AssessmentPolicyID, &out.AssessmentPolicyID + *out = new(string) + **out = **in + } + if in.AssessmentPolicyIDRef != nil { + in, out := &in.AssessmentPolicyIDRef, &out.AssessmentPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AssessmentPolicyIDSelector != nil { + in, out := &in.AssessmentPolicyIDSelector, &out.AssessmentPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(StatusInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceIDRef != nil { + in, out := &in.TargetResourceIDRef, &out.TargetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceIDSelector != nil { + in, out := &in.TargetResourceIDSelector, &out.TargetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityCenterAssessmentInitParameters. +func (in *SecurityCenterAssessmentInitParameters) DeepCopy() *SecurityCenterAssessmentInitParameters { + if in == nil { + return nil + } + out := new(SecurityCenterAssessmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityCenterAssessmentList) DeepCopyInto(out *SecurityCenterAssessmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecurityCenterAssessment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityCenterAssessmentList. +func (in *SecurityCenterAssessmentList) DeepCopy() *SecurityCenterAssessmentList { + if in == nil { + return nil + } + out := new(SecurityCenterAssessmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityCenterAssessmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityCenterAssessmentObservation) DeepCopyInto(out *SecurityCenterAssessmentObservation) { + *out = *in + if in.AdditionalData != nil { + in, out := &in.AdditionalData, &out.AdditionalData + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AssessmentPolicyID != nil { + in, out := &in.AssessmentPolicyID, &out.AssessmentPolicyID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(StatusObservation) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityCenterAssessmentObservation. +func (in *SecurityCenterAssessmentObservation) DeepCopy() *SecurityCenterAssessmentObservation { + if in == nil { + return nil + } + out := new(SecurityCenterAssessmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityCenterAssessmentParameters) DeepCopyInto(out *SecurityCenterAssessmentParameters) { + *out = *in + if in.AdditionalData != nil { + in, out := &in.AdditionalData, &out.AdditionalData + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AssessmentPolicyID != nil { + in, out := &in.AssessmentPolicyID, &out.AssessmentPolicyID + *out = new(string) + **out = **in + } + if in.AssessmentPolicyIDRef != nil { + in, out := &in.AssessmentPolicyIDRef, &out.AssessmentPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AssessmentPolicyIDSelector != nil { + in, out := &in.AssessmentPolicyIDSelector, &out.AssessmentPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(StatusParameters) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceIDRef != nil { + in, out := &in.TargetResourceIDRef, &out.TargetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceIDSelector != nil { + in, out := &in.TargetResourceIDSelector, &out.TargetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityCenterAssessmentParameters. +func (in *SecurityCenterAssessmentParameters) DeepCopy() *SecurityCenterAssessmentParameters { + if in == nil { + return nil + } + out := new(SecurityCenterAssessmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityCenterAssessmentSpec) DeepCopyInto(out *SecurityCenterAssessmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityCenterAssessmentSpec. +func (in *SecurityCenterAssessmentSpec) DeepCopy() *SecurityCenterAssessmentSpec { + if in == nil { + return nil + } + out := new(SecurityCenterAssessmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityCenterAssessmentStatus) DeepCopyInto(out *SecurityCenterAssessmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityCenterAssessmentStatus. +func (in *SecurityCenterAssessmentStatus) DeepCopy() *SecurityCenterAssessmentStatus { + if in == nil { + return nil + } + out := new(SecurityCenterAssessmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusInitParameters) DeepCopyInto(out *StatusInitParameters) { + *out = *in + if in.Cause != nil { + in, out := &in.Cause, &out.Cause + *out = new(string) + **out = **in + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusInitParameters. +func (in *StatusInitParameters) DeepCopy() *StatusInitParameters { + if in == nil { + return nil + } + out := new(StatusInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusObservation) DeepCopyInto(out *StatusObservation) { + *out = *in + if in.Cause != nil { + in, out := &in.Cause, &out.Cause + *out = new(string) + **out = **in + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusObservation. +func (in *StatusObservation) DeepCopy() *StatusObservation { + if in == nil { + return nil + } + out := new(StatusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusParameters) DeepCopyInto(out *StatusParameters) { + *out = *in + if in.Cause != nil { + in, out := &in.Cause, &out.Cause + *out = new(string) + **out = **in + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusParameters. +func (in *StatusParameters) DeepCopy() *StatusParameters { + if in == nil { + return nil + } + out := new(StatusParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/security/v1beta2/zz_generated.managed.go b/apis/security/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..5374c08bd --- /dev/null +++ b/apis/security/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IOTSecurityDeviceGroup. +func (mg *IOTSecurityDeviceGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/security/v1beta2/zz_generated.managedlist.go b/apis/security/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..93b2452ca --- /dev/null +++ b/apis/security/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this IOTSecurityDeviceGroupList. +func (l *IOTSecurityDeviceGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this IOTSecuritySolutionList. +func (l *IOTSecuritySolutionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SecurityCenterAssessmentList. +func (l *SecurityCenterAssessmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/security/v1beta2/zz_generated.resolvers.go b/apis/security/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..d6d6e11bd --- /dev/null +++ b/apis/security/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,226 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *IOTSecurityDeviceGroup) ResolveReferences( // ResolveReferences of this IOTSecurityDeviceGroup. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IOTHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.IOTHubIDRef, + Selector: mg.Spec.ForProvider.IOTHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IOTHubID") + } + mg.Spec.ForProvider.IOTHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IOTHubIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IOTHubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.IOTHubIDRef, + Selector: mg.Spec.InitProvider.IOTHubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IOTHubID") + } + mg.Spec.InitProvider.IOTHubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IOTHubIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this IOTSecuritySolution. +func (mg *IOTSecuritySolution) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.IOTHubIds), + Extract: resource.ExtractParamPath("id", true), + References: mg.Spec.ForProvider.IOTHubIdsRefs, + Selector: mg.Spec.ForProvider.IOTHubIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IOTHubIds") + } + mg.Spec.ForProvider.IOTHubIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.IOTHubIdsRefs = mrsp.ResolvedReferences + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.IOTHubIds), + Extract: resource.ExtractParamPath("id", true), + References: mg.Spec.InitProvider.IOTHubIdsRefs, + Selector: mg.Spec.InitProvider.IOTHubIdsSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IOTHubIds") + } + mg.Spec.InitProvider.IOTHubIds = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.IOTHubIdsRefs = mrsp.ResolvedReferences + + return nil +} + +// ResolveReferences of this SecurityCenterAssessment. +func (mg *SecurityCenterAssessment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("security.azure.upbound.io", "v1beta1", "SecurityCenterAssessmentPolicy", "SecurityCenterAssessmentPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AssessmentPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.AssessmentPolicyIDRef, + Selector: mg.Spec.ForProvider.AssessmentPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AssessmentPolicyID") + } + mg.Spec.ForProvider.AssessmentPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AssessmentPolicyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachineScaleSet", "LinuxVirtualMachineScaleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TargetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TargetResourceIDRef, + Selector: mg.Spec.ForProvider.TargetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TargetResourceID") + } + mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("security.azure.upbound.io", "v1beta1", "SecurityCenterAssessmentPolicy", "SecurityCenterAssessmentPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AssessmentPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.AssessmentPolicyIDRef, + Selector: mg.Spec.InitProvider.AssessmentPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AssessmentPolicyID") + } + mg.Spec.InitProvider.AssessmentPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AssessmentPolicyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("compute.azure.upbound.io", "v1beta2", "LinuxVirtualMachineScaleSet", "LinuxVirtualMachineScaleSetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TargetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TargetResourceIDRef, + Selector: mg.Spec.InitProvider.TargetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TargetResourceID") + } + mg.Spec.InitProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TargetResourceIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/security/v1beta2/zz_groupversion_info.go b/apis/security/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..620c66b37 --- /dev/null +++ b/apis/security/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=security.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "security.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/security/v1beta2/zz_iotsecuritydevicegroup_terraformed.go b/apis/security/v1beta2/zz_iotsecuritydevicegroup_terraformed.go new file mode 100755 index 000000000..ee5980cdc --- /dev/null +++ b/apis/security/v1beta2/zz_iotsecuritydevicegroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IOTSecurityDeviceGroup +func (mg *IOTSecurityDeviceGroup) GetTerraformResourceType() string { + return "azurerm_iot_security_device_group" +} + +// GetConnectionDetailsMapping for this IOTSecurityDeviceGroup +func (tr *IOTSecurityDeviceGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this IOTSecurityDeviceGroup +func (tr *IOTSecurityDeviceGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IOTSecurityDeviceGroup +func (tr *IOTSecurityDeviceGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IOTSecurityDeviceGroup +func (tr *IOTSecurityDeviceGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IOTSecurityDeviceGroup +func (tr *IOTSecurityDeviceGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IOTSecurityDeviceGroup +func (tr *IOTSecurityDeviceGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IOTSecurityDeviceGroup +func (tr *IOTSecurityDeviceGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IOTSecurityDeviceGroup +func (tr *IOTSecurityDeviceGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IOTSecurityDeviceGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IOTSecurityDeviceGroup) LateInitialize(attrs []byte) (bool, error) { + params := &IOTSecurityDeviceGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IOTSecurityDeviceGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/security/v1beta2/zz_iotsecuritydevicegroup_types.go b/apis/security/v1beta2/zz_iotsecuritydevicegroup_types.go new file mode 100755 index 000000000..26792a84e --- /dev/null +++ b/apis/security/v1beta2/zz_iotsecuritydevicegroup_types.go @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AllowRuleInitParameters struct { + + // Specifies which IP is not allowed to be connected to in current device group for inbound connection. + // +listType=set + ConnectionFromIpsNotAllowed []*string `json:"connectionFromIpsNotAllowed,omitempty" tf:"connection_from_ips_not_allowed,omitempty"` + + // Specifies which IP is not allowed to be connected to in current device group for outbound connection. + // +listType=set + ConnectionToIpsNotAllowed []*string `json:"connectionToIpsNotAllowed,omitempty" tf:"connection_to_ips_not_allowed,omitempty"` + + // Specifies which local user is not allowed to login in current device group. + // +listType=set + LocalUsersNotAllowed []*string `json:"localUsersNotAllowed,omitempty" tf:"local_users_not_allowed,omitempty"` + + // Specifies which process is not allowed to be executed in current device group. + // +listType=set + ProcessesNotAllowed []*string `json:"processesNotAllowed,omitempty" tf:"processes_not_allowed,omitempty"` +} + +type AllowRuleObservation struct { + + // Specifies which IP is not allowed to be connected to in current device group for inbound connection. + // +listType=set + ConnectionFromIpsNotAllowed []*string `json:"connectionFromIpsNotAllowed,omitempty" tf:"connection_from_ips_not_allowed,omitempty"` + + // Specifies which IP is not allowed to be connected to in current device group for outbound connection. + // +listType=set + ConnectionToIpsNotAllowed []*string `json:"connectionToIpsNotAllowed,omitempty" tf:"connection_to_ips_not_allowed,omitempty"` + + // Specifies which local user is not allowed to login in current device group. + // +listType=set + LocalUsersNotAllowed []*string `json:"localUsersNotAllowed,omitempty" tf:"local_users_not_allowed,omitempty"` + + // Specifies which process is not allowed to be executed in current device group. + // +listType=set + ProcessesNotAllowed []*string `json:"processesNotAllowed,omitempty" tf:"processes_not_allowed,omitempty"` +} + +type AllowRuleParameters struct { + + // Specifies which IP is not allowed to be connected to in current device group for inbound connection. + // +kubebuilder:validation:Optional + // +listType=set + ConnectionFromIpsNotAllowed []*string `json:"connectionFromIpsNotAllowed,omitempty" tf:"connection_from_ips_not_allowed,omitempty"` + + // Specifies which IP is not allowed to be connected to in current device group for outbound connection. + // +kubebuilder:validation:Optional + // +listType=set + ConnectionToIpsNotAllowed []*string `json:"connectionToIpsNotAllowed,omitempty" tf:"connection_to_ips_not_allowed,omitempty"` + + // Specifies which local user is not allowed to login in current device group. + // +kubebuilder:validation:Optional + // +listType=set + LocalUsersNotAllowed []*string `json:"localUsersNotAllowed,omitempty" tf:"local_users_not_allowed,omitempty"` + + // Specifies which process is not allowed to be executed in current device group. + // +kubebuilder:validation:Optional + // +listType=set + ProcessesNotAllowed []*string `json:"processesNotAllowed,omitempty" tf:"processes_not_allowed,omitempty"` +} + +type IOTSecurityDeviceGroupInitParameters struct { + + // an allow_rule blocks as defined below. + AllowRule *AllowRuleInitParameters `json:"allowRule,omitempty" tf:"allow_rule,omitempty"` + + // The ID of the IoT Hub which to link the Security Device Group to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` + + // Reference to a IOTHub in devices to populate iothubId. + // +kubebuilder:validation:Optional + IOTHubIDRef *v1.Reference `json:"iothubIdRef,omitempty" tf:"-"` + + // Selector for a IOTHub in devices to populate iothubId. + // +kubebuilder:validation:Optional + IOTHubIDSelector *v1.Selector `json:"iothubIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Device Security Group. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more range_rule blocks as defined below. + RangeRule []RangeRuleInitParameters `json:"rangeRule,omitempty" tf:"range_rule,omitempty"` +} + +type IOTSecurityDeviceGroupObservation struct { + + // an allow_rule blocks as defined below. + AllowRule *AllowRuleObservation `json:"allowRule,omitempty" tf:"allow_rule,omitempty"` + + // The ID of the Iot Security Device Group resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ID of the IoT Hub which to link the Security Device Group to. Changing this forces a new resource to be created. + IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` + + // Specifies the name of the Device Security Group. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more range_rule blocks as defined below. + RangeRule []RangeRuleObservation `json:"rangeRule,omitempty" tf:"range_rule,omitempty"` +} + +type IOTSecurityDeviceGroupParameters struct { + + // an allow_rule blocks as defined below. + // +kubebuilder:validation:Optional + AllowRule *AllowRuleParameters `json:"allowRule,omitempty" tf:"allow_rule,omitempty"` + + // The ID of the IoT Hub which to link the Security Device Group to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + IOTHubID *string `json:"iothubId,omitempty" tf:"iothub_id,omitempty"` + + // Reference to a IOTHub in devices to populate iothubId. + // +kubebuilder:validation:Optional + IOTHubIDRef *v1.Reference `json:"iothubIdRef,omitempty" tf:"-"` + + // Selector for a IOTHub in devices to populate iothubId. + // +kubebuilder:validation:Optional + IOTHubIDSelector *v1.Selector `json:"iothubIdSelector,omitempty" tf:"-"` + + // Specifies the name of the Device Security Group. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // One or more range_rule blocks as defined below. + // +kubebuilder:validation:Optional + RangeRule []RangeRuleParameters `json:"rangeRule,omitempty" tf:"range_rule,omitempty"` +} + +type RangeRuleInitParameters struct { + + // Specifies the time range. represented in ISO 8601 duration format. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The maximum threshold in the given time window. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // The minimum threshold in the given time window. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` + + // The type of supported rule type. Possible Values are ActiveConnectionsNotInAllowedRange, AmqpC2DMessagesNotInAllowedRange, MqttC2DMessagesNotInAllowedRange, HttpC2DMessagesNotInAllowedRange, AmqpC2DRejectedMessagesNotInAllowedRange, MqttC2DRejectedMessagesNotInAllowedRange, HttpC2DRejectedMessagesNotInAllowedRange, AmqpD2CMessagesNotInAllowedRange, MqttD2CMessagesNotInAllowedRange, HttpD2CMessagesNotInAllowedRange, DirectMethodInvokesNotInAllowedRange, FailedLocalLoginsNotInAllowedRange, FileUploadsNotInAllowedRange, QueuePurgesNotInAllowedRange, TwinUpdatesNotInAllowedRange and UnauthorizedOperationsNotInAllowedRange. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RangeRuleObservation struct { + + // Specifies the time range. represented in ISO 8601 duration format. + Duration *string `json:"duration,omitempty" tf:"duration,omitempty"` + + // The maximum threshold in the given time window. + Max *float64 `json:"max,omitempty" tf:"max,omitempty"` + + // The minimum threshold in the given time window. + Min *float64 `json:"min,omitempty" tf:"min,omitempty"` + + // The type of supported rule type. Possible Values are ActiveConnectionsNotInAllowedRange, AmqpC2DMessagesNotInAllowedRange, MqttC2DMessagesNotInAllowedRange, HttpC2DMessagesNotInAllowedRange, AmqpC2DRejectedMessagesNotInAllowedRange, MqttC2DRejectedMessagesNotInAllowedRange, HttpC2DRejectedMessagesNotInAllowedRange, AmqpD2CMessagesNotInAllowedRange, MqttD2CMessagesNotInAllowedRange, HttpD2CMessagesNotInAllowedRange, DirectMethodInvokesNotInAllowedRange, FailedLocalLoginsNotInAllowedRange, FileUploadsNotInAllowedRange, QueuePurgesNotInAllowedRange, TwinUpdatesNotInAllowedRange and UnauthorizedOperationsNotInAllowedRange. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type RangeRuleParameters struct { + + // Specifies the time range. represented in ISO 8601 duration format. + // +kubebuilder:validation:Optional + Duration *string `json:"duration" tf:"duration,omitempty"` + + // The maximum threshold in the given time window. + // +kubebuilder:validation:Optional + Max *float64 `json:"max" tf:"max,omitempty"` + + // The minimum threshold in the given time window. + // +kubebuilder:validation:Optional + Min *float64 `json:"min" tf:"min,omitempty"` + + // The type of supported rule type. Possible Values are ActiveConnectionsNotInAllowedRange, AmqpC2DMessagesNotInAllowedRange, MqttC2DMessagesNotInAllowedRange, HttpC2DMessagesNotInAllowedRange, AmqpC2DRejectedMessagesNotInAllowedRange, MqttC2DRejectedMessagesNotInAllowedRange, HttpC2DRejectedMessagesNotInAllowedRange, AmqpD2CMessagesNotInAllowedRange, MqttD2CMessagesNotInAllowedRange, HttpD2CMessagesNotInAllowedRange, DirectMethodInvokesNotInAllowedRange, FailedLocalLoginsNotInAllowedRange, FileUploadsNotInAllowedRange, QueuePurgesNotInAllowedRange, TwinUpdatesNotInAllowedRange and UnauthorizedOperationsNotInAllowedRange. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// IOTSecurityDeviceGroupSpec defines the desired state of IOTSecurityDeviceGroup +type IOTSecurityDeviceGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IOTSecurityDeviceGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IOTSecurityDeviceGroupInitParameters `json:"initProvider,omitempty"` +} + +// IOTSecurityDeviceGroupStatus defines the observed state of IOTSecurityDeviceGroup. +type IOTSecurityDeviceGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IOTSecurityDeviceGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IOTSecurityDeviceGroup is the Schema for the IOTSecurityDeviceGroups API. Manages a Iot Security Device Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type IOTSecurityDeviceGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec IOTSecurityDeviceGroupSpec `json:"spec"` + Status IOTSecurityDeviceGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IOTSecurityDeviceGroupList contains a list of IOTSecurityDeviceGroups +type IOTSecurityDeviceGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IOTSecurityDeviceGroup `json:"items"` +} + +// Repository type metadata. +var ( + IOTSecurityDeviceGroup_Kind = "IOTSecurityDeviceGroup" + IOTSecurityDeviceGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IOTSecurityDeviceGroup_Kind}.String() + IOTSecurityDeviceGroup_KindAPIVersion = IOTSecurityDeviceGroup_Kind + "." + CRDGroupVersion.String() + IOTSecurityDeviceGroup_GroupVersionKind = CRDGroupVersion.WithKind(IOTSecurityDeviceGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&IOTSecurityDeviceGroup{}, &IOTSecurityDeviceGroupList{}) +} diff --git a/apis/security/v1beta2/zz_iotsecuritysolution_terraformed.go b/apis/security/v1beta2/zz_iotsecuritysolution_terraformed.go new file mode 100755 index 000000000..66c6b306b --- /dev/null +++ b/apis/security/v1beta2/zz_iotsecuritysolution_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this IOTSecuritySolution +func (mg *IOTSecuritySolution) GetTerraformResourceType() string { + return "azurerm_iot_security_solution" +} + +// GetConnectionDetailsMapping for this IOTSecuritySolution +func (tr *IOTSecuritySolution) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this IOTSecuritySolution +func (tr *IOTSecuritySolution) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this IOTSecuritySolution +func (tr *IOTSecuritySolution) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this IOTSecuritySolution +func (tr *IOTSecuritySolution) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this IOTSecuritySolution +func (tr *IOTSecuritySolution) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this IOTSecuritySolution +func (tr *IOTSecuritySolution) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this IOTSecuritySolution +func (tr *IOTSecuritySolution) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this IOTSecuritySolution +func (tr *IOTSecuritySolution) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this IOTSecuritySolution using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *IOTSecuritySolution) LateInitialize(attrs []byte) (bool, error) { + params := &IOTSecuritySolutionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *IOTSecuritySolution) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/security/v1beta2/zz_iotsecuritysolution_types.go b/apis/security/v1beta2/zz_iotsecuritysolution_types.go new file mode 100755 index 000000000..ab9a80dcd --- /dev/null +++ b/apis/security/v1beta2/zz_iotsecuritysolution_types.go @@ -0,0 +1,481 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AdditionalWorkspaceInitParameters struct { + + // A list of data types which sent to workspace. Possible values are Alerts and RawEvents. + // +listType=set + DataTypes []*string `json:"dataTypes,omitempty" tf:"data_types,omitempty"` + + // The resource ID of the Log Analytics Workspace. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type AdditionalWorkspaceObservation struct { + + // A list of data types which sent to workspace. Possible values are Alerts and RawEvents. + // +listType=set + DataTypes []*string `json:"dataTypes,omitempty" tf:"data_types,omitempty"` + + // The resource ID of the Log Analytics Workspace. + WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` +} + +type AdditionalWorkspaceParameters struct { + + // A list of data types which sent to workspace. Possible values are Alerts and RawEvents. + // +kubebuilder:validation:Optional + // +listType=set + DataTypes []*string `json:"dataTypes" tf:"data_types,omitempty"` + + // The resource ID of the Log Analytics Workspace. + // +kubebuilder:validation:Optional + WorkspaceID *string `json:"workspaceId" tf:"workspace_id,omitempty"` +} + +type IOTSecuritySolutionInitParameters struct { + + // A additional_workspace block as defined below. + AdditionalWorkspace []AdditionalWorkspaceInitParameters `json:"additionalWorkspace,omitempty" tf:"additional_workspace,omitempty"` + + // A list of disabled data sources for the Iot Security Solution. Possible value is TwinData. + // +listType=set + DisabledDataSources []*string `json:"disabledDataSources,omitempty" tf:"disabled_data_sources,omitempty"` + + // Specifies the Display Name for this Iot Security Solution. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Is the Iot Security Solution enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A list of data which is to exported to analytic workspace. Valid values include RawEvents. + // +listType=set + EventsToExport []*string `json:"eventsToExport,omitempty" tf:"events_to_export,omitempty"` + + // Specifies the IoT Hub resource IDs to which this Iot Security Solution is applied. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("id",true) + // +listType=set + IOTHubIds []*string `json:"iothubIds,omitempty" tf:"iothub_ids,omitempty"` + + // References to IOTHub in devices to populate iothubIds. + // +kubebuilder:validation:Optional + IOTHubIdsRefs []v1.Reference `json:"iothubIdsRefs,omitempty" tf:"-"` + + // Selector for a list of IOTHub in devices to populate iothubIds. + // +kubebuilder:validation:Optional + IOTHubIdsSelector *v1.Selector `json:"iothubIdsSelector,omitempty" tf:"-"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the Log Analytics Workspace ID to which the security data will be sent. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` + + // Should IP addressed be unmasked in the log? Defaults to false. + LogUnmaskedIpsEnabled *bool `json:"logUnmaskedIpsEnabled,omitempty" tf:"log_unmasked_ips_enabled,omitempty"` + + // Specifies the name of the Iot Security Solution. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An Azure Resource Graph query used to set the resources monitored. + QueryForResources *string `json:"queryForResources,omitempty" tf:"query_for_resources,omitempty"` + + // A list of subscription Ids on which the user defined resources query should be executed. + // +listType=set + QuerySubscriptionIds []*string `json:"querySubscriptionIds,omitempty" tf:"query_subscription_ids,omitempty"` + + // A recommendations_enabled block of options to enable or disable as defined below. + RecommendationsEnabled *RecommendationsEnabledInitParameters `json:"recommendationsEnabled,omitempty" tf:"recommendations_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTSecuritySolutionObservation struct { + + // A additional_workspace block as defined below. + AdditionalWorkspace []AdditionalWorkspaceObservation `json:"additionalWorkspace,omitempty" tf:"additional_workspace,omitempty"` + + // A list of disabled data sources for the Iot Security Solution. Possible value is TwinData. + // +listType=set + DisabledDataSources []*string `json:"disabledDataSources,omitempty" tf:"disabled_data_sources,omitempty"` + + // Specifies the Display Name for this Iot Security Solution. + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Is the Iot Security Solution enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A list of data which is to exported to analytic workspace. Valid values include RawEvents. + // +listType=set + EventsToExport []*string `json:"eventsToExport,omitempty" tf:"events_to_export,omitempty"` + + // The ID of the Iot Security Solution resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the IoT Hub resource IDs to which this Iot Security Solution is applied. + // +listType=set + IOTHubIds []*string `json:"iothubIds,omitempty" tf:"iothub_ids,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the Log Analytics Workspace ID to which the security data will be sent. + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` + + // Should IP addressed be unmasked in the log? Defaults to false. + LogUnmaskedIpsEnabled *bool `json:"logUnmaskedIpsEnabled,omitempty" tf:"log_unmasked_ips_enabled,omitempty"` + + // Specifies the name of the Iot Security Solution. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An Azure Resource Graph query used to set the resources monitored. + QueryForResources *string `json:"queryForResources,omitempty" tf:"query_for_resources,omitempty"` + + // A list of subscription Ids on which the user defined resources query should be executed. + // +listType=set + QuerySubscriptionIds []*string `json:"querySubscriptionIds,omitempty" tf:"query_subscription_ids,omitempty"` + + // A recommendations_enabled block of options to enable or disable as defined below. + RecommendationsEnabled *RecommendationsEnabledObservation `json:"recommendationsEnabled,omitempty" tf:"recommendations_enabled,omitempty"` + + // Specifies the name of the resource group in which to create the Iot Security Solution. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IOTSecuritySolutionParameters struct { + + // A additional_workspace block as defined below. + // +kubebuilder:validation:Optional + AdditionalWorkspace []AdditionalWorkspaceParameters `json:"additionalWorkspace,omitempty" tf:"additional_workspace,omitempty"` + + // A list of disabled data sources for the Iot Security Solution. Possible value is TwinData. + // +kubebuilder:validation:Optional + // +listType=set + DisabledDataSources []*string `json:"disabledDataSources,omitempty" tf:"disabled_data_sources,omitempty"` + + // Specifies the Display Name for this Iot Security Solution. + // +kubebuilder:validation:Optional + DisplayName *string `json:"displayName,omitempty" tf:"display_name,omitempty"` + + // Is the Iot Security Solution enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A list of data which is to exported to analytic workspace. Valid values include RawEvents. + // +kubebuilder:validation:Optional + // +listType=set + EventsToExport []*string `json:"eventsToExport,omitempty" tf:"events_to_export,omitempty"` + + // Specifies the IoT Hub resource IDs to which this Iot Security Solution is applied. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("id",true) + // +kubebuilder:validation:Optional + // +listType=set + IOTHubIds []*string `json:"iothubIds,omitempty" tf:"iothub_ids,omitempty"` + + // References to IOTHub in devices to populate iothubIds. + // +kubebuilder:validation:Optional + IOTHubIdsRefs []v1.Reference `json:"iothubIdsRefs,omitempty" tf:"-"` + + // Selector for a list of IOTHub in devices to populate iothubIds. + // +kubebuilder:validation:Optional + IOTHubIdsSelector *v1.Selector `json:"iothubIdsSelector,omitempty" tf:"-"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the Log Analytics Workspace ID to which the security data will be sent. + // +kubebuilder:validation:Optional + LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` + + // Should IP addressed be unmasked in the log? Defaults to false. + // +kubebuilder:validation:Optional + LogUnmaskedIpsEnabled *bool `json:"logUnmaskedIpsEnabled,omitempty" tf:"log_unmasked_ips_enabled,omitempty"` + + // Specifies the name of the Iot Security Solution. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An Azure Resource Graph query used to set the resources monitored. + // +kubebuilder:validation:Optional + QueryForResources *string `json:"queryForResources,omitempty" tf:"query_for_resources,omitempty"` + + // A list of subscription Ids on which the user defined resources query should be executed. + // +kubebuilder:validation:Optional + // +listType=set + QuerySubscriptionIds []*string `json:"querySubscriptionIds,omitempty" tf:"query_subscription_ids,omitempty"` + + // A recommendations_enabled block of options to enable or disable as defined below. + // +kubebuilder:validation:Optional + RecommendationsEnabled *RecommendationsEnabledParameters `json:"recommendationsEnabled,omitempty" tf:"recommendations_enabled,omitempty"` + + // Specifies the name of the resource group in which to create the Iot Security Solution. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type RecommendationsEnabledInitParameters struct { + + // Is Principal Authentication enabled for the ACR repository? Defaults to true. + AcrAuthentication *bool `json:"acrAuthentication,omitempty" tf:"acr_authentication,omitempty"` + + // Is Agent send underutilized messages enabled? Defaults to true. + AgentSendUnutilizedMsg *bool `json:"agentSendUnutilizedMsg,omitempty" tf:"agent_send_unutilized_msg,omitempty"` + + // Is Security related system configuration issues identified? Defaults to true. + Baseline *bool `json:"baseline,omitempty" tf:"baseline,omitempty"` + + // Is IoT Edge Hub memory optimized? Defaults to true. + EdgeHubMemOptimize *bool `json:"edgeHubMemOptimize,omitempty" tf:"edge_hub_mem_optimize,omitempty"` + + // Is logging configured for IoT Edge module? Defaults to true. + EdgeLoggingOption *bool `json:"edgeLoggingOption,omitempty" tf:"edge_logging_option,omitempty"` + + // Is Default IP filter policy denied? Defaults to true. + IPFilterDenyAll *bool `json:"ipFilterDenyAll,omitempty" tf:"ip_filter_deny_all,omitempty"` + + // Is IP filter rule source allowable IP range too large? Defaults to true. + IPFilterPermissiveRule *bool `json:"ipFilterPermissiveRule,omitempty" tf:"ip_filter_permissive_rule,omitempty"` + + // Is inconsistent module settings enabled for SecurityGroup? Defaults to true. + InconsistentModuleSettings *bool `json:"inconsistentModuleSettings,omitempty" tf:"inconsistent_module_settings,omitempty"` + + // is Azure IoT Security agent installed? Defaults to true. + InstallAgent *bool `json:"installAgent,omitempty" tf:"install_agent,omitempty"` + + // Is any ports open on the device? Defaults to true. + OpenPorts *bool `json:"openPorts,omitempty" tf:"open_ports,omitempty"` + + // Does firewall policy exist which allow necessary communication to/from the device? Defaults to true. + PermissiveFirewallPolicy *bool `json:"permissiveFirewallPolicy,omitempty" tf:"permissive_firewall_policy,omitempty"` + + // Is only necessary addresses or ports are permitted in? Defaults to true. + PermissiveInputFirewallRules *bool `json:"permissiveInputFirewallRules,omitempty" tf:"permissive_input_firewall_rules,omitempty"` + + // Is only necessary addresses or ports are permitted out? Defaults to true. + PermissiveOutputFirewallRules *bool `json:"permissiveOutputFirewallRules,omitempty" tf:"permissive_output_firewall_rules,omitempty"` + + // Is high level permissions are needed for the module? Defaults to true. + PrivilegedDockerOptions *bool `json:"privilegedDockerOptions,omitempty" tf:"privileged_docker_options,omitempty"` + + // Is any credentials shared among devices? Defaults to true. + SharedCredentials *bool `json:"sharedCredentials,omitempty" tf:"shared_credentials,omitempty"` + + // Does TLS cipher suite need to be updated? Defaults to true. + VulnerableTLSCipherSuite *bool `json:"vulnerableTlsCipherSuite,omitempty" tf:"vulnerable_tls_cipher_suite,omitempty"` +} + +type RecommendationsEnabledObservation struct { + + // Is Principal Authentication enabled for the ACR repository? Defaults to true. + AcrAuthentication *bool `json:"acrAuthentication,omitempty" tf:"acr_authentication,omitempty"` + + // Is Agent send underutilized messages enabled? Defaults to true. + AgentSendUnutilizedMsg *bool `json:"agentSendUnutilizedMsg,omitempty" tf:"agent_send_unutilized_msg,omitempty"` + + // Is Security related system configuration issues identified? Defaults to true. + Baseline *bool `json:"baseline,omitempty" tf:"baseline,omitempty"` + + // Is IoT Edge Hub memory optimized? Defaults to true. + EdgeHubMemOptimize *bool `json:"edgeHubMemOptimize,omitempty" tf:"edge_hub_mem_optimize,omitempty"` + + // Is logging configured for IoT Edge module? Defaults to true. + EdgeLoggingOption *bool `json:"edgeLoggingOption,omitempty" tf:"edge_logging_option,omitempty"` + + // Is Default IP filter policy denied? Defaults to true. + IPFilterDenyAll *bool `json:"ipFilterDenyAll,omitempty" tf:"ip_filter_deny_all,omitempty"` + + // Is IP filter rule source allowable IP range too large? Defaults to true. + IPFilterPermissiveRule *bool `json:"ipFilterPermissiveRule,omitempty" tf:"ip_filter_permissive_rule,omitempty"` + + // Is inconsistent module settings enabled for SecurityGroup? Defaults to true. + InconsistentModuleSettings *bool `json:"inconsistentModuleSettings,omitempty" tf:"inconsistent_module_settings,omitempty"` + + // is Azure IoT Security agent installed? Defaults to true. + InstallAgent *bool `json:"installAgent,omitempty" tf:"install_agent,omitempty"` + + // Is any ports open on the device? Defaults to true. + OpenPorts *bool `json:"openPorts,omitempty" tf:"open_ports,omitempty"` + + // Does firewall policy exist which allow necessary communication to/from the device? Defaults to true. + PermissiveFirewallPolicy *bool `json:"permissiveFirewallPolicy,omitempty" tf:"permissive_firewall_policy,omitempty"` + + // Is only necessary addresses or ports are permitted in? Defaults to true. + PermissiveInputFirewallRules *bool `json:"permissiveInputFirewallRules,omitempty" tf:"permissive_input_firewall_rules,omitempty"` + + // Is only necessary addresses or ports are permitted out? Defaults to true. + PermissiveOutputFirewallRules *bool `json:"permissiveOutputFirewallRules,omitempty" tf:"permissive_output_firewall_rules,omitempty"` + + // Is high level permissions are needed for the module? Defaults to true. + PrivilegedDockerOptions *bool `json:"privilegedDockerOptions,omitempty" tf:"privileged_docker_options,omitempty"` + + // Is any credentials shared among devices? Defaults to true. + SharedCredentials *bool `json:"sharedCredentials,omitempty" tf:"shared_credentials,omitempty"` + + // Does TLS cipher suite need to be updated? Defaults to true. + VulnerableTLSCipherSuite *bool `json:"vulnerableTlsCipherSuite,omitempty" tf:"vulnerable_tls_cipher_suite,omitempty"` +} + +type RecommendationsEnabledParameters struct { + + // Is Principal Authentication enabled for the ACR repository? Defaults to true. + // +kubebuilder:validation:Optional + AcrAuthentication *bool `json:"acrAuthentication,omitempty" tf:"acr_authentication,omitempty"` + + // Is Agent send underutilized messages enabled? Defaults to true. + // +kubebuilder:validation:Optional + AgentSendUnutilizedMsg *bool `json:"agentSendUnutilizedMsg,omitempty" tf:"agent_send_unutilized_msg,omitempty"` + + // Is Security related system configuration issues identified? Defaults to true. + // +kubebuilder:validation:Optional + Baseline *bool `json:"baseline,omitempty" tf:"baseline,omitempty"` + + // Is IoT Edge Hub memory optimized? Defaults to true. + // +kubebuilder:validation:Optional + EdgeHubMemOptimize *bool `json:"edgeHubMemOptimize,omitempty" tf:"edge_hub_mem_optimize,omitempty"` + + // Is logging configured for IoT Edge module? Defaults to true. + // +kubebuilder:validation:Optional + EdgeLoggingOption *bool `json:"edgeLoggingOption,omitempty" tf:"edge_logging_option,omitempty"` + + // Is Default IP filter policy denied? Defaults to true. + // +kubebuilder:validation:Optional + IPFilterDenyAll *bool `json:"ipFilterDenyAll,omitempty" tf:"ip_filter_deny_all,omitempty"` + + // Is IP filter rule source allowable IP range too large? Defaults to true. + // +kubebuilder:validation:Optional + IPFilterPermissiveRule *bool `json:"ipFilterPermissiveRule,omitempty" tf:"ip_filter_permissive_rule,omitempty"` + + // Is inconsistent module settings enabled for SecurityGroup? Defaults to true. + // +kubebuilder:validation:Optional + InconsistentModuleSettings *bool `json:"inconsistentModuleSettings,omitempty" tf:"inconsistent_module_settings,omitempty"` + + // is Azure IoT Security agent installed? Defaults to true. + // +kubebuilder:validation:Optional + InstallAgent *bool `json:"installAgent,omitempty" tf:"install_agent,omitempty"` + + // Is any ports open on the device? Defaults to true. + // +kubebuilder:validation:Optional + OpenPorts *bool `json:"openPorts,omitempty" tf:"open_ports,omitempty"` + + // Does firewall policy exist which allow necessary communication to/from the device? Defaults to true. + // +kubebuilder:validation:Optional + PermissiveFirewallPolicy *bool `json:"permissiveFirewallPolicy,omitempty" tf:"permissive_firewall_policy,omitempty"` + + // Is only necessary addresses or ports are permitted in? Defaults to true. + // +kubebuilder:validation:Optional + PermissiveInputFirewallRules *bool `json:"permissiveInputFirewallRules,omitempty" tf:"permissive_input_firewall_rules,omitempty"` + + // Is only necessary addresses or ports are permitted out? Defaults to true. + // +kubebuilder:validation:Optional + PermissiveOutputFirewallRules *bool `json:"permissiveOutputFirewallRules,omitempty" tf:"permissive_output_firewall_rules,omitempty"` + + // Is high level permissions are needed for the module? Defaults to true. + // +kubebuilder:validation:Optional + PrivilegedDockerOptions *bool `json:"privilegedDockerOptions,omitempty" tf:"privileged_docker_options,omitempty"` + + // Is any credentials shared among devices? Defaults to true. + // +kubebuilder:validation:Optional + SharedCredentials *bool `json:"sharedCredentials,omitempty" tf:"shared_credentials,omitempty"` + + // Does TLS cipher suite need to be updated? Defaults to true. + // +kubebuilder:validation:Optional + VulnerableTLSCipherSuite *bool `json:"vulnerableTlsCipherSuite,omitempty" tf:"vulnerable_tls_cipher_suite,omitempty"` +} + +// IOTSecuritySolutionSpec defines the desired state of IOTSecuritySolution +type IOTSecuritySolutionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider IOTSecuritySolutionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider IOTSecuritySolutionInitParameters `json:"initProvider,omitempty"` +} + +// IOTSecuritySolutionStatus defines the observed state of IOTSecuritySolution. +type IOTSecuritySolutionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider IOTSecuritySolutionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IOTSecuritySolution is the Schema for the IOTSecuritySolutions API. Manages an iot security solution. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type IOTSecuritySolution struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.displayName) || (has(self.initProvider) && has(self.initProvider.displayName))",message="spec.forProvider.displayName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec IOTSecuritySolutionSpec `json:"spec"` + Status IOTSecuritySolutionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IOTSecuritySolutionList contains a list of IOTSecuritySolutions +type IOTSecuritySolutionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IOTSecuritySolution `json:"items"` +} + +// Repository type metadata. +var ( + IOTSecuritySolution_Kind = "IOTSecuritySolution" + IOTSecuritySolution_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: IOTSecuritySolution_Kind}.String() + IOTSecuritySolution_KindAPIVersion = IOTSecuritySolution_Kind + "." + CRDGroupVersion.String() + IOTSecuritySolution_GroupVersionKind = CRDGroupVersion.WithKind(IOTSecuritySolution_Kind) +) + +func init() { + SchemeBuilder.Register(&IOTSecuritySolution{}, &IOTSecuritySolutionList{}) +} diff --git a/apis/security/v1beta2/zz_securitycenterassessment_terraformed.go b/apis/security/v1beta2/zz_securitycenterassessment_terraformed.go new file mode 100755 index 000000000..11eb8107f --- /dev/null +++ b/apis/security/v1beta2/zz_securitycenterassessment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SecurityCenterAssessment +func (mg *SecurityCenterAssessment) GetTerraformResourceType() string { + return "azurerm_security_center_assessment" +} + +// GetConnectionDetailsMapping for this SecurityCenterAssessment +func (tr *SecurityCenterAssessment) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SecurityCenterAssessment +func (tr *SecurityCenterAssessment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SecurityCenterAssessment +func (tr *SecurityCenterAssessment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SecurityCenterAssessment +func (tr *SecurityCenterAssessment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SecurityCenterAssessment +func (tr *SecurityCenterAssessment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SecurityCenterAssessment +func (tr *SecurityCenterAssessment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SecurityCenterAssessment +func (tr *SecurityCenterAssessment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SecurityCenterAssessment +func (tr *SecurityCenterAssessment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SecurityCenterAssessment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SecurityCenterAssessment) LateInitialize(attrs []byte) (bool, error) { + params := &SecurityCenterAssessmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SecurityCenterAssessment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/security/v1beta2/zz_securitycenterassessment_types.go b/apis/security/v1beta2/zz_securitycenterassessment_types.go new file mode 100755 index 000000000..9d80b13db --- /dev/null +++ b/apis/security/v1beta2/zz_securitycenterassessment_types.go @@ -0,0 +1,209 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type SecurityCenterAssessmentInitParameters struct { + + // A map of additional data to associate with the assessment. + // +mapType=granular + AdditionalData map[string]*string `json:"additionalData,omitempty" tf:"additional_data,omitempty"` + + // The ID of the security Assessment policy to apply to this resource. Changing this forces a new security Assessment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/security/v1beta1.SecurityCenterAssessmentPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + AssessmentPolicyID *string `json:"assessmentPolicyId,omitempty" tf:"assessment_policy_id,omitempty"` + + // Reference to a SecurityCenterAssessmentPolicy in security to populate assessmentPolicyId. + // +kubebuilder:validation:Optional + AssessmentPolicyIDRef *v1.Reference `json:"assessmentPolicyIdRef,omitempty" tf:"-"` + + // Selector for a SecurityCenterAssessmentPolicy in security to populate assessmentPolicyId. + // +kubebuilder:validation:Optional + AssessmentPolicyIDSelector *v1.Selector `json:"assessmentPolicyIdSelector,omitempty" tf:"-"` + + // A status block as defined below. + Status *StatusInitParameters `json:"status,omitempty" tf:"status,omitempty"` + + // The ID of the target resource. Changing this forces a new security Assessment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachineScaleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // Reference to a LinuxVirtualMachineScaleSet in compute to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDRef *v1.Reference `json:"targetResourceIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachineScaleSet in compute to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` +} + +type SecurityCenterAssessmentObservation struct { + + // A map of additional data to associate with the assessment. + // +mapType=granular + AdditionalData map[string]*string `json:"additionalData,omitempty" tf:"additional_data,omitempty"` + + // The ID of the security Assessment policy to apply to this resource. Changing this forces a new security Assessment to be created. + AssessmentPolicyID *string `json:"assessmentPolicyId,omitempty" tf:"assessment_policy_id,omitempty"` + + // The ID of the Security Center Assessment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A status block as defined below. + Status *StatusObservation `json:"status,omitempty" tf:"status,omitempty"` + + // The ID of the target resource. Changing this forces a new security Assessment to be created. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` +} + +type SecurityCenterAssessmentParameters struct { + + // A map of additional data to associate with the assessment. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalData map[string]*string `json:"additionalData,omitempty" tf:"additional_data,omitempty"` + + // The ID of the security Assessment policy to apply to this resource. Changing this forces a new security Assessment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/security/v1beta1.SecurityCenterAssessmentPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + AssessmentPolicyID *string `json:"assessmentPolicyId,omitempty" tf:"assessment_policy_id,omitempty"` + + // Reference to a SecurityCenterAssessmentPolicy in security to populate assessmentPolicyId. + // +kubebuilder:validation:Optional + AssessmentPolicyIDRef *v1.Reference `json:"assessmentPolicyIdRef,omitempty" tf:"-"` + + // Selector for a SecurityCenterAssessmentPolicy in security to populate assessmentPolicyId. + // +kubebuilder:validation:Optional + AssessmentPolicyIDSelector *v1.Selector `json:"assessmentPolicyIdSelector,omitempty" tf:"-"` + + // A status block as defined below. + // +kubebuilder:validation:Optional + Status *StatusParameters `json:"status,omitempty" tf:"status,omitempty"` + + // The ID of the target resource. Changing this forces a new security Assessment to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/compute/v1beta2.LinuxVirtualMachineScaleSet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // Reference to a LinuxVirtualMachineScaleSet in compute to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDRef *v1.Reference `json:"targetResourceIdRef,omitempty" tf:"-"` + + // Selector for a LinuxVirtualMachineScaleSet in compute to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` +} + +type StatusInitParameters struct { + + // Specifies the cause of the assessment status. + Cause *string `json:"cause,omitempty" tf:"cause,omitempty"` + + // Specifies the programmatic code of the assessment status. Possible values are Healthy, Unhealthy and NotApplicable. + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Specifies the human readable description of the assessment status. + Description *string `json:"description,omitempty" tf:"description,omitempty"` +} + +type StatusObservation struct { + + // Specifies the cause of the assessment status. + Cause *string `json:"cause,omitempty" tf:"cause,omitempty"` + + // Specifies the programmatic code of the assessment status. Possible values are Healthy, Unhealthy and NotApplicable. + Code *string `json:"code,omitempty" tf:"code,omitempty"` + + // Specifies the human readable description of the assessment status. + Description *string `json:"description,omitempty" tf:"description,omitempty"` +} + +type StatusParameters struct { + + // Specifies the cause of the assessment status. + // +kubebuilder:validation:Optional + Cause *string `json:"cause,omitempty" tf:"cause,omitempty"` + + // Specifies the programmatic code of the assessment status. Possible values are Healthy, Unhealthy and NotApplicable. + // +kubebuilder:validation:Optional + Code *string `json:"code" tf:"code,omitempty"` + + // Specifies the human readable description of the assessment status. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` +} + +// SecurityCenterAssessmentSpec defines the desired state of SecurityCenterAssessment +type SecurityCenterAssessmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SecurityCenterAssessmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SecurityCenterAssessmentInitParameters `json:"initProvider,omitempty"` +} + +// SecurityCenterAssessmentStatus defines the observed state of SecurityCenterAssessment. +type SecurityCenterAssessmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SecurityCenterAssessmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SecurityCenterAssessment is the Schema for the SecurityCenterAssessments API. Manages the Security Center Assessment for Azure Security Center. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SecurityCenterAssessment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.status) || (has(self.initProvider) && has(self.initProvider.status))",message="spec.forProvider.status is a required parameter" + Spec SecurityCenterAssessmentSpec `json:"spec"` + Status SecurityCenterAssessmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SecurityCenterAssessmentList contains a list of SecurityCenterAssessments +type SecurityCenterAssessmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SecurityCenterAssessment `json:"items"` +} + +// Repository type metadata. +var ( + SecurityCenterAssessment_Kind = "SecurityCenterAssessment" + SecurityCenterAssessment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SecurityCenterAssessment_Kind}.String() + SecurityCenterAssessment_KindAPIVersion = SecurityCenterAssessment_Kind + "." + CRDGroupVersion.String() + SecurityCenterAssessment_GroupVersionKind = CRDGroupVersion.WithKind(SecurityCenterAssessment_Kind) +) + +func init() { + SchemeBuilder.Register(&SecurityCenterAssessment{}, &SecurityCenterAssessmentList{}) +} diff --git a/apis/securityinsights/v1beta1/zz_generated.resolvers.go b/apis/securityinsights/v1beta1/zz_generated.resolvers.go index aa624bce5..b10da4fca 100644 --- a/apis/securityinsights/v1beta1/zz_generated.resolvers.go +++ b/apis/securityinsights/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *SentinelAlertRuleFusion) ResolveReferences( // ResolveReferences of th var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("operationsmanagement.azure.upbound.io", "v1beta1", "LogAnalyticsSolution", "LogAnalyticsSolutionList") + m, l, err = apisresolver.GetManagedResource("operationsmanagement.azure.upbound.io", "v1beta2", "LogAnalyticsSolution", "LogAnalyticsSolutionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -45,7 +45,7 @@ func (mg *SentinelAlertRuleFusion) ResolveReferences( // ResolveReferences of th mg.Spec.ForProvider.LogAnalyticsWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LogAnalyticsWorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationsmanagement.azure.upbound.io", "v1beta1", "LogAnalyticsSolution", "LogAnalyticsSolutionList") + m, l, err = apisresolver.GetManagedResource("operationsmanagement.azure.upbound.io", "v1beta2", "LogAnalyticsSolution", "LogAnalyticsSolutionList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -107,7 +107,7 @@ func (mg *SentinelAlertRuleMachineLearningBehaviorAnalytics) ResolveReferences(c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -126,7 +126,7 @@ func (mg *SentinelAlertRuleMachineLearningBehaviorAnalytics) ResolveReferences(c mg.Spec.ForProvider.LogAnalyticsWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.LogAnalyticsWorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -257,7 +257,7 @@ func (mg *SentinelLogAnalyticsWorkspaceOnboarding) ResolveReferences(ctx context mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -295,7 +295,7 @@ func (mg *SentinelLogAnalyticsWorkspaceOnboarding) ResolveReferences(ctx context mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("operationalinsights.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/securityinsights/v1beta1/zz_sentinelalertrulefusion_types.go b/apis/securityinsights/v1beta1/zz_sentinelalertrulefusion_types.go index 567e1ffcf..ae16b6ce8 100755 --- a/apis/securityinsights/v1beta1/zz_sentinelalertrulefusion_types.go +++ b/apis/securityinsights/v1beta1/zz_sentinelalertrulefusion_types.go @@ -22,7 +22,7 @@ type SentinelAlertRuleFusionInitParameters struct { Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` // The ID of the Log Analytics Workspace this Sentinel Fusion Alert Rule belongs to. Changing this forces a new Sentinel Fusion Alert Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationsmanagement/v1beta1.LogAnalyticsSolution + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationsmanagement/v1beta2.LogAnalyticsSolution // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("workspace_resource_id",false) LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` @@ -73,7 +73,7 @@ type SentinelAlertRuleFusionParameters struct { Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` // The ID of the Log Analytics Workspace this Sentinel Fusion Alert Rule belongs to. Changing this forces a new Sentinel Fusion Alert Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationsmanagement/v1beta1.LogAnalyticsSolution + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationsmanagement/v1beta2.LogAnalyticsSolution // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("workspace_resource_id",false) // +kubebuilder:validation:Optional LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` diff --git a/apis/securityinsights/v1beta1/zz_sentinelalertrulemachinelearningbehavioranalytics_types.go b/apis/securityinsights/v1beta1/zz_sentinelalertrulemachinelearningbehavioranalytics_types.go index a973fd08a..35c240554 100755 --- a/apis/securityinsights/v1beta1/zz_sentinelalertrulemachinelearningbehavioranalytics_types.go +++ b/apis/securityinsights/v1beta1/zz_sentinelalertrulemachinelearningbehavioranalytics_types.go @@ -22,7 +22,7 @@ type SentinelAlertRuleMachineLearningBehaviorAnalyticsInitParameters struct { Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` // The ID of the Log Analytics Workspace this SentinelMachine Learning Behavior Analytics Alert Rule belongs to. Changing this forces a new Sentinel Machine Learning Behavior Analytics Alert Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` @@ -67,7 +67,7 @@ type SentinelAlertRuleMachineLearningBehaviorAnalyticsParameters struct { Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` // The ID of the Log Analytics Workspace this SentinelMachine Learning Behavior Analytics Alert Rule belongs to. Changing this forces a new Sentinel Machine Learning Behavior Analytics Alert Rule to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional LogAnalyticsWorkspaceID *string `json:"logAnalyticsWorkspaceId,omitempty" tf:"log_analytics_workspace_id,omitempty"` diff --git a/apis/securityinsights/v1beta1/zz_sentinelloganalyticsworkspaceonboarding_types.go b/apis/securityinsights/v1beta1/zz_sentinelloganalyticsworkspaceonboarding_types.go index 1d540bab4..b52ce817b 100755 --- a/apis/securityinsights/v1beta1/zz_sentinelloganalyticsworkspaceonboarding_types.go +++ b/apis/securityinsights/v1beta1/zz_sentinelloganalyticsworkspaceonboarding_types.go @@ -34,7 +34,7 @@ type SentinelLogAnalyticsWorkspaceOnboardingInitParameters struct { WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` // Specifies the Workspace Name. Changing this forces the Log Analytics Workspace off the board and onboard again. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace WorkspaceName *string `json:"workspaceName,omitempty" tf:"workspace_name,omitempty"` // Reference to a Workspace in operationalinsights to populate workspaceName. @@ -88,7 +88,7 @@ type SentinelLogAnalyticsWorkspaceOnboardingParameters struct { WorkspaceID *string `json:"workspaceId,omitempty" tf:"workspace_id,omitempty"` // Specifies the Workspace Name. Changing this forces the Log Analytics Workspace off the board and onboard again. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/operationalinsights/v1beta2.Workspace // +kubebuilder:validation:Optional WorkspaceName *string `json:"workspaceName,omitempty" tf:"workspace_name,omitempty"` diff --git a/apis/servicebus/v1beta1/zz_generated.conversion_hubs.go b/apis/servicebus/v1beta1/zz_generated.conversion_hubs.go index b611fa1a9..1d8f77947 100755 --- a/apis/servicebus/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/servicebus/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *ServiceBusNamespace) Hub() {} - // Hub marks this type as a conversion hub. func (tr *NamespaceAuthorizationRule) Hub() {} @@ -24,12 +21,6 @@ func (tr *Queue) Hub() {} // Hub marks this type as a conversion hub. func (tr *QueueAuthorizationRule) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Subscription) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SubscriptionRule) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Topic) Hub() {} diff --git a/apis/servicebus/v1beta1/zz_generated.conversion_spokes.go b/apis/servicebus/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..0e3422adb --- /dev/null +++ b/apis/servicebus/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this ServiceBusNamespace to the hub type. +func (tr *ServiceBusNamespace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ServiceBusNamespace type. +func (tr *ServiceBusNamespace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Subscription to the hub type. +func (tr *Subscription) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Subscription type. +func (tr *Subscription) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SubscriptionRule to the hub type. +func (tr *SubscriptionRule) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SubscriptionRule type. +func (tr *SubscriptionRule) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/servicebus/v1beta1/zz_generated.resolvers.go b/apis/servicebus/v1beta1/zz_generated.resolvers.go index 349b634bc..388678995 100644 --- a/apis/servicebus/v1beta1/zz_generated.resolvers.go +++ b/apis/servicebus/v1beta1/zz_generated.resolvers.go @@ -12,8 +12,9 @@ import ( resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" apisresolver "github.com/upbound/provider-azure/internal/apis" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -27,7 +28,7 @@ func (mg *NamespaceAuthorizationRule) ResolveReferences( // ResolveReferences of var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "ServiceBusNamespace", "ServiceBusNamespaceList") + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -77,7 +78,7 @@ func (mg *NamespaceDisasterRecoveryConfig) ResolveReferences(ctx context.Context mg.Spec.ForProvider.AliasAuthorizationRuleID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.AliasAuthorizationRuleIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "ServiceBusNamespace", "ServiceBusNamespaceList") + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -96,7 +97,7 @@ func (mg *NamespaceDisasterRecoveryConfig) ResolveReferences(ctx context.Context mg.Spec.ForProvider.PartnerNamespaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.PartnerNamespaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "ServiceBusNamespace", "ServiceBusNamespaceList") + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -134,7 +135,7 @@ func (mg *NamespaceDisasterRecoveryConfig) ResolveReferences(ctx context.Context mg.Spec.InitProvider.AliasAuthorizationRuleID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.AliasAuthorizationRuleIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "ServiceBusNamespace", "ServiceBusNamespaceList") + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -165,7 +166,7 @@ func (mg *NamespaceNetworkRuleSet) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "ServiceBusNamespace", "ServiceBusNamespaceList") + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -186,7 +187,7 @@ func (mg *NamespaceNetworkRuleSet) ResolveReferences(ctx context.Context, c clie for i3 := 0; i3 < len(mg.Spec.ForProvider.NetworkRules); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -206,7 +207,7 @@ func (mg *NamespaceNetworkRuleSet) ResolveReferences(ctx context.Context, c clie } { - m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "ServiceBusNamespace", "ServiceBusNamespaceList") + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -226,7 +227,7 @@ func (mg *NamespaceNetworkRuleSet) ResolveReferences(ctx context.Context, c clie for i3 := 0; i3 < len(mg.Spec.InitProvider.NetworkRules); i3++ { { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -258,7 +259,7 @@ func (mg *Queue) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "ServiceBusNamespace", "ServiceBusNamespaceList") + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -460,7 +461,7 @@ func (mg *Topic) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "ServiceBusNamespace", "ServiceBusNamespaceList") + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/servicebus/v1beta1/zz_namespaceauthorizationrule_types.go b/apis/servicebus/v1beta1/zz_namespaceauthorizationrule_types.go index d640845ba..ccf10cea4 100755 --- a/apis/servicebus/v1beta1/zz_namespaceauthorizationrule_types.go +++ b/apis/servicebus/v1beta1/zz_namespaceauthorizationrule_types.go @@ -54,7 +54,7 @@ type NamespaceAuthorizationRuleParameters struct { Manage *bool `json:"manage,omitempty" tf:"manage,omitempty"` // Specifies the ID of the ServiceBus Namespace. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.ServiceBusNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` diff --git a/apis/servicebus/v1beta1/zz_namespacedisasterrecoveryconfig_types.go b/apis/servicebus/v1beta1/zz_namespacedisasterrecoveryconfig_types.go index 9591d5cd1..54cec7a59 100755 --- a/apis/servicebus/v1beta1/zz_namespacedisasterrecoveryconfig_types.go +++ b/apis/servicebus/v1beta1/zz_namespacedisasterrecoveryconfig_types.go @@ -29,7 +29,7 @@ type NamespaceDisasterRecoveryConfigInitParameters struct { AliasAuthorizationRuleIDSelector *v1.Selector `json:"aliasAuthorizationRuleIdSelector,omitempty" tf:"-"` // The ID of the Service Bus Namespace to replicate to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.ServiceBusNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() PartnerNamespaceID *string `json:"partnerNamespaceId,omitempty" tf:"partner_namespace_id,omitempty"` @@ -74,7 +74,7 @@ type NamespaceDisasterRecoveryConfigParameters struct { AliasAuthorizationRuleIDSelector *v1.Selector `json:"aliasAuthorizationRuleIdSelector,omitempty" tf:"-"` // The ID of the Service Bus Namespace to replicate to. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.ServiceBusNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PartnerNamespaceID *string `json:"partnerNamespaceId,omitempty" tf:"partner_namespace_id,omitempty"` @@ -88,7 +88,7 @@ type NamespaceDisasterRecoveryConfigParameters struct { PartnerNamespaceIDSelector *v1.Selector `json:"partnerNamespaceIdSelector,omitempty" tf:"-"` // The ID of the primary Service Bus Namespace to replicate. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.ServiceBusNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional PrimaryNamespaceID *string `json:"primaryNamespaceId,omitempty" tf:"primary_namespace_id,omitempty"` diff --git a/apis/servicebus/v1beta1/zz_namespacenetworkruleset_types.go b/apis/servicebus/v1beta1/zz_namespacenetworkruleset_types.go index 7f0dca836..464994b59 100755 --- a/apis/servicebus/v1beta1/zz_namespacenetworkruleset_types.go +++ b/apis/servicebus/v1beta1/zz_namespacenetworkruleset_types.go @@ -23,7 +23,7 @@ type NamespaceNetworkRuleSetInitParameters struct { IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` // Specifies the ServiceBus Namespace ID to which to attach the ServiceBus Namespace Network Rule Set. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.ServiceBusNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` @@ -51,7 +51,7 @@ type NamespaceNetworkRuleSetNetworkRulesInitParameters struct { IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` // The Subnet ID which should be able to access this ServiceBus Namespace. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -80,7 +80,7 @@ type NamespaceNetworkRuleSetNetworkRulesParameters struct { IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` // The Subnet ID which should be able to access this ServiceBus Namespace. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -131,7 +131,7 @@ type NamespaceNetworkRuleSetParameters struct { IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` // Specifies the ServiceBus Namespace ID to which to attach the ServiceBus Namespace Network Rule Set. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.ServiceBusNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` diff --git a/apis/servicebus/v1beta1/zz_queue_types.go b/apis/servicebus/v1beta1/zz_queue_types.go index e5695178d..cfbf749c9 100755 --- a/apis/servicebus/v1beta1/zz_queue_types.go +++ b/apis/servicebus/v1beta1/zz_queue_types.go @@ -176,7 +176,7 @@ type QueueParameters struct { MaxSizeInMegabytes *float64 `json:"maxSizeInMegabytes,omitempty" tf:"max_size_in_megabytes,omitempty"` // The ID of the ServiceBus Namespace to create this queue in. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.ServiceBusNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` diff --git a/apis/servicebus/v1beta1/zz_topic_types.go b/apis/servicebus/v1beta1/zz_topic_types.go index d8a7b9ef9..2a30e0795 100755 --- a/apis/servicebus/v1beta1/zz_topic_types.go +++ b/apis/servicebus/v1beta1/zz_topic_types.go @@ -126,7 +126,7 @@ type TopicParameters struct { MaxSizeInMegabytes *float64 `json:"maxSizeInMegabytes,omitempty" tf:"max_size_in_megabytes,omitempty"` // The ID of the ServiceBus Namespace to create this topic in. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.ServiceBusNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional NamespaceID *string `json:"namespaceId,omitempty" tf:"namespace_id,omitempty"` diff --git a/apis/servicebus/v1beta2/zz_generated.conversion_hubs.go b/apis/servicebus/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..0636bdc70 --- /dev/null +++ b/apis/servicebus/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *ServiceBusNamespace) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Subscription) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SubscriptionRule) Hub() {} diff --git a/apis/servicebus/v1beta2/zz_generated.deepcopy.go b/apis/servicebus/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..ccd8bb3a6 --- /dev/null +++ b/apis/servicebus/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1709 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientScopedSubscriptionInitParameters) DeepCopyInto(out *ClientScopedSubscriptionInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IsClientScopedSubscriptionShareable != nil { + in, out := &in.IsClientScopedSubscriptionShareable, &out.IsClientScopedSubscriptionShareable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientScopedSubscriptionInitParameters. +func (in *ClientScopedSubscriptionInitParameters) DeepCopy() *ClientScopedSubscriptionInitParameters { + if in == nil { + return nil + } + out := new(ClientScopedSubscriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientScopedSubscriptionObservation) DeepCopyInto(out *ClientScopedSubscriptionObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IsClientScopedSubscriptionDurable != nil { + in, out := &in.IsClientScopedSubscriptionDurable, &out.IsClientScopedSubscriptionDurable + *out = new(bool) + **out = **in + } + if in.IsClientScopedSubscriptionShareable != nil { + in, out := &in.IsClientScopedSubscriptionShareable, &out.IsClientScopedSubscriptionShareable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientScopedSubscriptionObservation. +func (in *ClientScopedSubscriptionObservation) DeepCopy() *ClientScopedSubscriptionObservation { + if in == nil { + return nil + } + out := new(ClientScopedSubscriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientScopedSubscriptionParameters) DeepCopyInto(out *ClientScopedSubscriptionParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.IsClientScopedSubscriptionShareable != nil { + in, out := &in.IsClientScopedSubscriptionShareable, &out.IsClientScopedSubscriptionShareable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientScopedSubscriptionParameters. +func (in *ClientScopedSubscriptionParameters) DeepCopy() *ClientScopedSubscriptionParameters { + if in == nil { + return nil + } + out := new(ClientScopedSubscriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorrelationFilterInitParameters) DeepCopyInto(out *CorrelationFilterInitParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.CorrelationID != nil { + in, out := &in.CorrelationID, &out.CorrelationID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ReplyTo != nil { + in, out := &in.ReplyTo, &out.ReplyTo + *out = new(string) + **out = **in + } + if in.ReplyToSessionID != nil { + in, out := &in.ReplyToSessionID, &out.ReplyToSessionID + *out = new(string) + **out = **in + } + if in.SessionID != nil { + in, out := &in.SessionID, &out.SessionID + *out = new(string) + **out = **in + } + if in.To != nil { + in, out := &in.To, &out.To + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorrelationFilterInitParameters. +func (in *CorrelationFilterInitParameters) DeepCopy() *CorrelationFilterInitParameters { + if in == nil { + return nil + } + out := new(CorrelationFilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorrelationFilterObservation) DeepCopyInto(out *CorrelationFilterObservation) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.CorrelationID != nil { + in, out := &in.CorrelationID, &out.CorrelationID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ReplyTo != nil { + in, out := &in.ReplyTo, &out.ReplyTo + *out = new(string) + **out = **in + } + if in.ReplyToSessionID != nil { + in, out := &in.ReplyToSessionID, &out.ReplyToSessionID + *out = new(string) + **out = **in + } + if in.SessionID != nil { + in, out := &in.SessionID, &out.SessionID + *out = new(string) + **out = **in + } + if in.To != nil { + in, out := &in.To, &out.To + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorrelationFilterObservation. +func (in *CorrelationFilterObservation) DeepCopy() *CorrelationFilterObservation { + if in == nil { + return nil + } + out := new(CorrelationFilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorrelationFilterParameters) DeepCopyInto(out *CorrelationFilterParameters) { + *out = *in + if in.ContentType != nil { + in, out := &in.ContentType, &out.ContentType + *out = new(string) + **out = **in + } + if in.CorrelationID != nil { + in, out := &in.CorrelationID, &out.CorrelationID + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MessageID != nil { + in, out := &in.MessageID, &out.MessageID + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ReplyTo != nil { + in, out := &in.ReplyTo, &out.ReplyTo + *out = new(string) + **out = **in + } + if in.ReplyToSessionID != nil { + in, out := &in.ReplyToSessionID, &out.ReplyToSessionID + *out = new(string) + **out = **in + } + if in.SessionID != nil { + in, out := &in.SessionID, &out.SessionID + *out = new(string) + **out = **in + } + if in.To != nil { + in, out := &in.To, &out.To + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorrelationFilterParameters. +func (in *CorrelationFilterParameters) DeepCopy() *CorrelationFilterParameters { + if in == nil { + return nil + } + out := new(CorrelationFilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyInitParameters) DeepCopyInto(out *CustomerManagedKeyInitParameters) { + *out = *in + if in.IdentityID != nil { + in, out := &in.IdentityID, &out.IdentityID + *out = new(string) + **out = **in + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyInitParameters. +func (in *CustomerManagedKeyInitParameters) DeepCopy() *CustomerManagedKeyInitParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyObservation) DeepCopyInto(out *CustomerManagedKeyObservation) { + *out = *in + if in.IdentityID != nil { + in, out := &in.IdentityID, &out.IdentityID + *out = new(string) + **out = **in + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyObservation. +func (in *CustomerManagedKeyObservation) DeepCopy() *CustomerManagedKeyObservation { + if in == nil { + return nil + } + out := new(CustomerManagedKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyParameters) DeepCopyInto(out *CustomerManagedKeyParameters) { + *out = *in + if in.IdentityID != nil { + in, out := &in.IdentityID, &out.IdentityID + *out = new(string) + **out = **in + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyParameters. +func (in *CustomerManagedKeyParameters) DeepCopy() *CustomerManagedKeyParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetInitParameters) DeepCopyInto(out *NetworkRuleSetInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkRules != nil { + in, out := &in.NetworkRules, &out.NetworkRules + *out = make([]NetworkRulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.TrustedServicesAllowed != nil { + in, out := &in.TrustedServicesAllowed, &out.TrustedServicesAllowed + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetInitParameters. +func (in *NetworkRuleSetInitParameters) DeepCopy() *NetworkRuleSetInitParameters { + if in == nil { + return nil + } + out := new(NetworkRuleSetInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetObservation) DeepCopyInto(out *NetworkRuleSetObservation) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkRules != nil { + in, out := &in.NetworkRules, &out.NetworkRules + *out = make([]NetworkRulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.TrustedServicesAllowed != nil { + in, out := &in.TrustedServicesAllowed, &out.TrustedServicesAllowed + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetObservation. +func (in *NetworkRuleSetObservation) DeepCopy() *NetworkRuleSetObservation { + if in == nil { + return nil + } + out := new(NetworkRuleSetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRuleSetParameters) DeepCopyInto(out *NetworkRuleSetParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.NetworkRules != nil { + in, out := &in.NetworkRules, &out.NetworkRules + *out = make([]NetworkRulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.TrustedServicesAllowed != nil { + in, out := &in.TrustedServicesAllowed, &out.TrustedServicesAllowed + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRuleSetParameters. +func (in *NetworkRuleSetParameters) DeepCopy() *NetworkRuleSetParameters { + if in == nil { + return nil + } + out := new(NetworkRuleSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesInitParameters) DeepCopyInto(out *NetworkRulesInitParameters) { + *out = *in + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesInitParameters. +func (in *NetworkRulesInitParameters) DeepCopy() *NetworkRulesInitParameters { + if in == nil { + return nil + } + out := new(NetworkRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesObservation) DeepCopyInto(out *NetworkRulesObservation) { + *out = *in + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesObservation. +func (in *NetworkRulesObservation) DeepCopy() *NetworkRulesObservation { + if in == nil { + return nil + } + out := new(NetworkRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesParameters) DeepCopyInto(out *NetworkRulesParameters) { + *out = *in + if in.IgnoreMissingVnetServiceEndpoint != nil { + in, out := &in.IgnoreMissingVnetServiceEndpoint, &out.IgnoreMissingVnetServiceEndpoint + *out = new(bool) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesParameters. +func (in *NetworkRulesParameters) DeepCopy() *NetworkRulesParameters { + if in == nil { + return nil + } + out := new(NetworkRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceBusNamespace) DeepCopyInto(out *ServiceBusNamespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBusNamespace. +func (in *ServiceBusNamespace) DeepCopy() *ServiceBusNamespace { + if in == nil { + return nil + } + out := new(ServiceBusNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceBusNamespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceBusNamespaceInitParameters) DeepCopyInto(out *ServiceBusNamespaceInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = new(NetworkRuleSetInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PremiumMessagingPartitions != nil { + in, out := &in.PremiumMessagingPartitions, &out.PremiumMessagingPartitions + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBusNamespaceInitParameters. +func (in *ServiceBusNamespaceInitParameters) DeepCopy() *ServiceBusNamespaceInitParameters { + if in == nil { + return nil + } + out := new(ServiceBusNamespaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceBusNamespaceList) DeepCopyInto(out *ServiceBusNamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceBusNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBusNamespaceList. +func (in *ServiceBusNamespaceList) DeepCopy() *ServiceBusNamespaceList { + if in == nil { + return nil + } + out := new(ServiceBusNamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceBusNamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceBusNamespaceObservation) DeepCopyInto(out *ServiceBusNamespaceObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = new(NetworkRuleSetObservation) + (*in).DeepCopyInto(*out) + } + if in.PremiumMessagingPartitions != nil { + in, out := &in.PremiumMessagingPartitions, &out.PremiumMessagingPartitions + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBusNamespaceObservation. +func (in *ServiceBusNamespaceObservation) DeepCopy() *ServiceBusNamespaceObservation { + if in == nil { + return nil + } + out := new(ServiceBusNamespaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceBusNamespaceParameters) DeepCopyInto(out *ServiceBusNamespaceParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRuleSet != nil { + in, out := &in.NetworkRuleSet, &out.NetworkRuleSet + *out = new(NetworkRuleSetParameters) + (*in).DeepCopyInto(*out) + } + if in.PremiumMessagingPartitions != nil { + in, out := &in.PremiumMessagingPartitions, &out.PremiumMessagingPartitions + *out = new(float64) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBusNamespaceParameters. +func (in *ServiceBusNamespaceParameters) DeepCopy() *ServiceBusNamespaceParameters { + if in == nil { + return nil + } + out := new(ServiceBusNamespaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceBusNamespaceSpec) DeepCopyInto(out *ServiceBusNamespaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBusNamespaceSpec. +func (in *ServiceBusNamespaceSpec) DeepCopy() *ServiceBusNamespaceSpec { + if in == nil { + return nil + } + out := new(ServiceBusNamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceBusNamespaceStatus) DeepCopyInto(out *ServiceBusNamespaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBusNamespaceStatus. +func (in *ServiceBusNamespaceStatus) DeepCopy() *ServiceBusNamespaceStatus { + if in == nil { + return nil + } + out := new(ServiceBusNamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subscription) DeepCopyInto(out *Subscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription. +func (in *Subscription) DeepCopy() *Subscription { + if in == nil { + return nil + } + out := new(Subscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Subscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionInitParameters) DeepCopyInto(out *SubscriptionInitParameters) { + *out = *in + if in.AutoDeleteOnIdle != nil { + in, out := &in.AutoDeleteOnIdle, &out.AutoDeleteOnIdle + *out = new(string) + **out = **in + } + if in.ClientScopedSubscription != nil { + in, out := &in.ClientScopedSubscription, &out.ClientScopedSubscription + *out = new(ClientScopedSubscriptionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientScopedSubscriptionEnabled != nil { + in, out := &in.ClientScopedSubscriptionEnabled, &out.ClientScopedSubscriptionEnabled + *out = new(bool) + **out = **in + } + if in.DeadLetteringOnFilterEvaluationError != nil { + in, out := &in.DeadLetteringOnFilterEvaluationError, &out.DeadLetteringOnFilterEvaluationError + *out = new(bool) + **out = **in + } + if in.DeadLetteringOnMessageExpiration != nil { + in, out := &in.DeadLetteringOnMessageExpiration, &out.DeadLetteringOnMessageExpiration + *out = new(bool) + **out = **in + } + if in.DefaultMessageTTL != nil { + in, out := &in.DefaultMessageTTL, &out.DefaultMessageTTL + *out = new(string) + **out = **in + } + if in.EnableBatchedOperations != nil { + in, out := &in.EnableBatchedOperations, &out.EnableBatchedOperations + *out = new(bool) + **out = **in + } + if in.ForwardDeadLetteredMessagesTo != nil { + in, out := &in.ForwardDeadLetteredMessagesTo, &out.ForwardDeadLetteredMessagesTo + *out = new(string) + **out = **in + } + if in.ForwardTo != nil { + in, out := &in.ForwardTo, &out.ForwardTo + *out = new(string) + **out = **in + } + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.RequiresSession != nil { + in, out := &in.RequiresSession, &out.RequiresSession + *out = new(bool) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionInitParameters. +func (in *SubscriptionInitParameters) DeepCopy() *SubscriptionInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Subscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList. +func (in *SubscriptionList) DeepCopy() *SubscriptionList { + if in == nil { + return nil + } + out := new(SubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionObservation) DeepCopyInto(out *SubscriptionObservation) { + *out = *in + if in.AutoDeleteOnIdle != nil { + in, out := &in.AutoDeleteOnIdle, &out.AutoDeleteOnIdle + *out = new(string) + **out = **in + } + if in.ClientScopedSubscription != nil { + in, out := &in.ClientScopedSubscription, &out.ClientScopedSubscription + *out = new(ClientScopedSubscriptionObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientScopedSubscriptionEnabled != nil { + in, out := &in.ClientScopedSubscriptionEnabled, &out.ClientScopedSubscriptionEnabled + *out = new(bool) + **out = **in + } + if in.DeadLetteringOnFilterEvaluationError != nil { + in, out := &in.DeadLetteringOnFilterEvaluationError, &out.DeadLetteringOnFilterEvaluationError + *out = new(bool) + **out = **in + } + if in.DeadLetteringOnMessageExpiration != nil { + in, out := &in.DeadLetteringOnMessageExpiration, &out.DeadLetteringOnMessageExpiration + *out = new(bool) + **out = **in + } + if in.DefaultMessageTTL != nil { + in, out := &in.DefaultMessageTTL, &out.DefaultMessageTTL + *out = new(string) + **out = **in + } + if in.EnableBatchedOperations != nil { + in, out := &in.EnableBatchedOperations, &out.EnableBatchedOperations + *out = new(bool) + **out = **in + } + if in.ForwardDeadLetteredMessagesTo != nil { + in, out := &in.ForwardDeadLetteredMessagesTo, &out.ForwardDeadLetteredMessagesTo + *out = new(string) + **out = **in + } + if in.ForwardTo != nil { + in, out := &in.ForwardTo, &out.ForwardTo + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.RequiresSession != nil { + in, out := &in.RequiresSession, &out.RequiresSession + *out = new(bool) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TopicID != nil { + in, out := &in.TopicID, &out.TopicID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionObservation. +func (in *SubscriptionObservation) DeepCopy() *SubscriptionObservation { + if in == nil { + return nil + } + out := new(SubscriptionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionParameters) DeepCopyInto(out *SubscriptionParameters) { + *out = *in + if in.AutoDeleteOnIdle != nil { + in, out := &in.AutoDeleteOnIdle, &out.AutoDeleteOnIdle + *out = new(string) + **out = **in + } + if in.ClientScopedSubscription != nil { + in, out := &in.ClientScopedSubscription, &out.ClientScopedSubscription + *out = new(ClientScopedSubscriptionParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientScopedSubscriptionEnabled != nil { + in, out := &in.ClientScopedSubscriptionEnabled, &out.ClientScopedSubscriptionEnabled + *out = new(bool) + **out = **in + } + if in.DeadLetteringOnFilterEvaluationError != nil { + in, out := &in.DeadLetteringOnFilterEvaluationError, &out.DeadLetteringOnFilterEvaluationError + *out = new(bool) + **out = **in + } + if in.DeadLetteringOnMessageExpiration != nil { + in, out := &in.DeadLetteringOnMessageExpiration, &out.DeadLetteringOnMessageExpiration + *out = new(bool) + **out = **in + } + if in.DefaultMessageTTL != nil { + in, out := &in.DefaultMessageTTL, &out.DefaultMessageTTL + *out = new(string) + **out = **in + } + if in.EnableBatchedOperations != nil { + in, out := &in.EnableBatchedOperations, &out.EnableBatchedOperations + *out = new(bool) + **out = **in + } + if in.ForwardDeadLetteredMessagesTo != nil { + in, out := &in.ForwardDeadLetteredMessagesTo, &out.ForwardDeadLetteredMessagesTo + *out = new(string) + **out = **in + } + if in.ForwardTo != nil { + in, out := &in.ForwardTo, &out.ForwardTo + *out = new(string) + **out = **in + } + if in.LockDuration != nil { + in, out := &in.LockDuration, &out.LockDuration + *out = new(string) + **out = **in + } + if in.MaxDeliveryCount != nil { + in, out := &in.MaxDeliveryCount, &out.MaxDeliveryCount + *out = new(float64) + **out = **in + } + if in.RequiresSession != nil { + in, out := &in.RequiresSession, &out.RequiresSession + *out = new(bool) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } + if in.TopicID != nil { + in, out := &in.TopicID, &out.TopicID + *out = new(string) + **out = **in + } + if in.TopicIDRef != nil { + in, out := &in.TopicIDRef, &out.TopicIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TopicIDSelector != nil { + in, out := &in.TopicIDSelector, &out.TopicIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionParameters. +func (in *SubscriptionParameters) DeepCopy() *SubscriptionParameters { + if in == nil { + return nil + } + out := new(SubscriptionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionRule) DeepCopyInto(out *SubscriptionRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionRule. +func (in *SubscriptionRule) DeepCopy() *SubscriptionRule { + if in == nil { + return nil + } + out := new(SubscriptionRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionRuleInitParameters) DeepCopyInto(out *SubscriptionRuleInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.CorrelationFilter != nil { + in, out := &in.CorrelationFilter, &out.CorrelationFilter + *out = new(CorrelationFilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.SQLFilter != nil { + in, out := &in.SQLFilter, &out.SQLFilter + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionRuleInitParameters. +func (in *SubscriptionRuleInitParameters) DeepCopy() *SubscriptionRuleInitParameters { + if in == nil { + return nil + } + out := new(SubscriptionRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionRuleList) DeepCopyInto(out *SubscriptionRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SubscriptionRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionRuleList. +func (in *SubscriptionRuleList) DeepCopy() *SubscriptionRuleList { + if in == nil { + return nil + } + out := new(SubscriptionRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionRuleObservation) DeepCopyInto(out *SubscriptionRuleObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.CorrelationFilter != nil { + in, out := &in.CorrelationFilter, &out.CorrelationFilter + *out = new(CorrelationFilterObservation) + (*in).DeepCopyInto(*out) + } + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.SQLFilter != nil { + in, out := &in.SQLFilter, &out.SQLFilter + *out = new(string) + **out = **in + } + if in.SQLFilterCompatibilityLevel != nil { + in, out := &in.SQLFilterCompatibilityLevel, &out.SQLFilterCompatibilityLevel + *out = new(float64) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionRuleObservation. +func (in *SubscriptionRuleObservation) DeepCopy() *SubscriptionRuleObservation { + if in == nil { + return nil + } + out := new(SubscriptionRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionRuleParameters) DeepCopyInto(out *SubscriptionRuleParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.CorrelationFilter != nil { + in, out := &in.CorrelationFilter, &out.CorrelationFilter + *out = new(CorrelationFilterParameters) + (*in).DeepCopyInto(*out) + } + if in.FilterType != nil { + in, out := &in.FilterType, &out.FilterType + *out = new(string) + **out = **in + } + if in.SQLFilter != nil { + in, out := &in.SQLFilter, &out.SQLFilter + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.SubscriptionIDRef != nil { + in, out := &in.SubscriptionIDRef, &out.SubscriptionIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubscriptionIDSelector != nil { + in, out := &in.SubscriptionIDSelector, &out.SubscriptionIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionRuleParameters. +func (in *SubscriptionRuleParameters) DeepCopy() *SubscriptionRuleParameters { + if in == nil { + return nil + } + out := new(SubscriptionRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionRuleSpec) DeepCopyInto(out *SubscriptionRuleSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionRuleSpec. +func (in *SubscriptionRuleSpec) DeepCopy() *SubscriptionRuleSpec { + if in == nil { + return nil + } + out := new(SubscriptionRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionRuleStatus) DeepCopyInto(out *SubscriptionRuleStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionRuleStatus. +func (in *SubscriptionRuleStatus) DeepCopy() *SubscriptionRuleStatus { + if in == nil { + return nil + } + out := new(SubscriptionRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. +func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec { + if in == nil { + return nil + } + out := new(SubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus. +func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus { + if in == nil { + return nil + } + out := new(SubscriptionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/servicebus/v1beta2/zz_generated.managed.go b/apis/servicebus/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..68cd0261d --- /dev/null +++ b/apis/servicebus/v1beta2/zz_generated.managed.go @@ -0,0 +1,188 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ServiceBusNamespace. +func (mg *ServiceBusNamespace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Subscription. +func (mg *Subscription) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Subscription. +func (mg *Subscription) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Subscription. +func (mg *Subscription) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Subscription. +func (mg *Subscription) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Subscription. +func (mg *Subscription) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Subscription. +func (mg *Subscription) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Subscription. +func (mg *Subscription) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Subscription. +func (mg *Subscription) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Subscription. +func (mg *Subscription) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Subscription. +func (mg *Subscription) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Subscription. +func (mg *Subscription) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Subscription. +func (mg *Subscription) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SubscriptionRule. +func (mg *SubscriptionRule) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SubscriptionRule. +func (mg *SubscriptionRule) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SubscriptionRule. +func (mg *SubscriptionRule) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SubscriptionRule. +func (mg *SubscriptionRule) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SubscriptionRule. +func (mg *SubscriptionRule) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SubscriptionRule. +func (mg *SubscriptionRule) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SubscriptionRule. +func (mg *SubscriptionRule) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SubscriptionRule. +func (mg *SubscriptionRule) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SubscriptionRule. +func (mg *SubscriptionRule) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SubscriptionRule. +func (mg *SubscriptionRule) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SubscriptionRule. +func (mg *SubscriptionRule) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SubscriptionRule. +func (mg *SubscriptionRule) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/servicebus/v1beta2/zz_generated.managedlist.go b/apis/servicebus/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..a607a789f --- /dev/null +++ b/apis/servicebus/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ServiceBusNamespaceList. +func (l *ServiceBusNamespaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SubscriptionList. +func (l *SubscriptionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SubscriptionRuleList. +func (l *SubscriptionRuleList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/servicebus/v1beta2/zz_generated.resolvers.go b/apis/servicebus/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..0551676cf --- /dev/null +++ b/apis/servicebus/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,160 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *ServiceBusNamespace) ResolveReferences( // ResolveReferences of this ServiceBusNamespace. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.NetworkRuleSet != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.NetworkRuleSet.NetworkRules); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.NetworkRuleSet.NetworkRules[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.NetworkRuleSet.NetworkRules[i4].SubnetIDRef, + Selector: mg.Spec.ForProvider.NetworkRuleSet.NetworkRules[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.NetworkRuleSet.NetworkRules[i4].SubnetID") + } + mg.Spec.ForProvider.NetworkRuleSet.NetworkRules[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.NetworkRuleSet.NetworkRules[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.NetworkRuleSet != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.NetworkRuleSet.NetworkRules); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.NetworkRuleSet.NetworkRules[i4].SubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.NetworkRuleSet.NetworkRules[i4].SubnetIDRef, + Selector: mg.Spec.InitProvider.NetworkRuleSet.NetworkRules[i4].SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.NetworkRuleSet.NetworkRules[i4].SubnetID") + } + mg.Spec.InitProvider.NetworkRuleSet.NetworkRules[i4].SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.NetworkRuleSet.NetworkRules[i4].SubnetIDRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this Subscription. +func (mg *Subscription) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TopicID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TopicIDRef, + Selector: mg.Spec.ForProvider.TopicIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TopicID") + } + mg.Spec.ForProvider.TopicID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TopicIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SubscriptionRule. +func (mg *SubscriptionRule) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "Subscription", "SubscriptionList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubscriptionID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubscriptionIDRef, + Selector: mg.Spec.ForProvider.SubscriptionIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubscriptionID") + } + mg.Spec.ForProvider.SubscriptionID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubscriptionIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/servicebus/v1beta2/zz_groupversion_info.go b/apis/servicebus/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..aacaeefe1 --- /dev/null +++ b/apis/servicebus/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=servicebus.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "servicebus.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/servicebus/v1beta2/zz_servicebusnamespace_terraformed.go b/apis/servicebus/v1beta2/zz_servicebusnamespace_terraformed.go new file mode 100755 index 000000000..8b5662d17 --- /dev/null +++ b/apis/servicebus/v1beta2/zz_servicebusnamespace_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ServiceBusNamespace +func (mg *ServiceBusNamespace) GetTerraformResourceType() string { + return "azurerm_servicebus_namespace" +} + +// GetConnectionDetailsMapping for this ServiceBusNamespace +func (tr *ServiceBusNamespace) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"default_primary_connection_string": "status.atProvider.defaultPrimaryConnectionString", "default_primary_key": "status.atProvider.defaultPrimaryKey", "default_secondary_connection_string": "status.atProvider.defaultSecondaryConnectionString", "default_secondary_key": "status.atProvider.defaultSecondaryKey"} +} + +// GetObservation of this ServiceBusNamespace +func (tr *ServiceBusNamespace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ServiceBusNamespace +func (tr *ServiceBusNamespace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ServiceBusNamespace +func (tr *ServiceBusNamespace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ServiceBusNamespace +func (tr *ServiceBusNamespace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ServiceBusNamespace +func (tr *ServiceBusNamespace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ServiceBusNamespace +func (tr *ServiceBusNamespace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ServiceBusNamespace +func (tr *ServiceBusNamespace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ServiceBusNamespace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ServiceBusNamespace) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceBusNamespaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ServiceBusNamespace) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/servicebus/v1beta2/zz_servicebusnamespace_types.go b/apis/servicebus/v1beta2/zz_servicebusnamespace_types.go new file mode 100755 index 000000000..ca1d302fd --- /dev/null +++ b/apis/servicebus/v1beta2/zz_servicebusnamespace_types.go @@ -0,0 +1,420 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CustomerManagedKeyInitParameters struct { + + // The ID of the User Assigned Identity that has access to the key. + IdentityID *string `json:"identityId,omitempty" tf:"identity_id,omitempty"` + + // Used to specify whether enable Infrastructure Encryption (Double Encryption). Changing this forces a new resource to be created. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this ServiceBus Namespace. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` +} + +type CustomerManagedKeyObservation struct { + + // The ID of the User Assigned Identity that has access to the key. + IdentityID *string `json:"identityId,omitempty" tf:"identity_id,omitempty"` + + // Used to specify whether enable Infrastructure Encryption (Double Encryption). Changing this forces a new resource to be created. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this ServiceBus Namespace. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` +} + +type CustomerManagedKeyParameters struct { + + // The ID of the User Assigned Identity that has access to the key. + // +kubebuilder:validation:Optional + IdentityID *string `json:"identityId" tf:"identity_id,omitempty"` + + // Used to specify whether enable Infrastructure Encryption (Double Encryption). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // The ID of the Key Vault Key which should be used to Encrypt the data in this ServiceBus Namespace. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId" tf:"key_vault_key_id,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this ServiceBus namespace. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this ServiceBus Namespace. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this ServiceBus namespace. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this ServiceBus Namespace. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this ServiceBus Namespace. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this ServiceBus Namespace. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this ServiceBus namespace. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this ServiceBus Namespace. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type NetworkRuleSetInitParameters struct { + + // Specifies the default action for the Network Rule Set. Possible values are Allow and Deny. Defaults to Allow. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the ServiceBus Namespace. + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more network_rules blocks as defined below. + NetworkRules []NetworkRulesInitParameters `json:"networkRules,omitempty" tf:"network_rules,omitempty"` + + // Whether to allow traffic over public network. Possible values are true and false. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Are Azure Services that are known and trusted for this resource type are allowed to bypass firewall configuration? See Trusted Microsoft Services + TrustedServicesAllowed *bool `json:"trustedServicesAllowed,omitempty" tf:"trusted_services_allowed,omitempty"` +} + +type NetworkRuleSetObservation struct { + + // Specifies the default action for the Network Rule Set. Possible values are Allow and Deny. Defaults to Allow. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the ServiceBus Namespace. + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more network_rules blocks as defined below. + NetworkRules []NetworkRulesObservation `json:"networkRules,omitempty" tf:"network_rules,omitempty"` + + // Whether to allow traffic over public network. Possible values are true and false. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Are Azure Services that are known and trusted for this resource type are allowed to bypass firewall configuration? See Trusted Microsoft Services + TrustedServicesAllowed *bool `json:"trustedServicesAllowed,omitempty" tf:"trusted_services_allowed,omitempty"` +} + +type NetworkRuleSetParameters struct { + + // Specifies the default action for the Network Rule Set. Possible values are Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // One or more IP Addresses, or CIDR Blocks which should be able to access the ServiceBus Namespace. + // +kubebuilder:validation:Optional + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more network_rules blocks as defined below. + // +kubebuilder:validation:Optional + NetworkRules []NetworkRulesParameters `json:"networkRules,omitempty" tf:"network_rules,omitempty"` + + // Whether to allow traffic over public network. Possible values are true and false. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Are Azure Services that are known and trusted for this resource type are allowed to bypass firewall configuration? See Trusted Microsoft Services + // +kubebuilder:validation:Optional + TrustedServicesAllowed *bool `json:"trustedServicesAllowed,omitempty" tf:"trusted_services_allowed,omitempty"` +} + +type NetworkRulesInitParameters struct { + + // Should the ServiceBus Namespace Network Rule Set ignore missing Virtual Network Service Endpoint option in the Subnet? Defaults to false. + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` + + // The Subnet ID which should be able to access this ServiceBus Namespace. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type NetworkRulesObservation struct { + + // Should the ServiceBus Namespace Network Rule Set ignore missing Virtual Network Service Endpoint option in the Subnet? Defaults to false. + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` + + // The Subnet ID which should be able to access this ServiceBus Namespace. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` +} + +type NetworkRulesParameters struct { + + // Should the ServiceBus Namespace Network Rule Set ignore missing Virtual Network Service Endpoint option in the Subnet? Defaults to false. + // +kubebuilder:validation:Optional + IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` + + // The Subnet ID which should be able to access this ServiceBus Namespace. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` +} + +type ServiceBusNamespaceInitParameters struct { + + // Specifies the capacity. When sku is Premium, capacity can be 1, 2, 4, 8 or 16. When sku is Basic or Standard, capacity can be 0 only. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // An customer_managed_key block as defined below. + CustomerManagedKey *CustomerManagedKeyInitParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not SAS authentication is enabled for the Service Bus namespace. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum supported TLS version for this Service Bus Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default minimum TLS version is 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // An network_rule_set block as defined below. + NetworkRuleSet *NetworkRuleSetInitParameters `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Specifies the number messaging partitions. Only valid when sku is Premium and the minimum number is 1. Possible values include 0, 1, 2, and 4. Defaults to 0 for Standard, Basic namespace. Changing this forces a new resource to be created. + PremiumMessagingPartitions *float64 `json:"premiumMessagingPartitions,omitempty" tf:"premium_messaging_partitions,omitempty"` + + // Is public network access enabled for the Service Bus Namespace? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Defines which tier to use. Options are Basic, Standard or Premium. Please note that setting this field to Premium will force the creation of a new resource. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether or not this resource is zone redundant. sku needs to be Premium. Changing this forces a new resource to be created. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type ServiceBusNamespaceObservation struct { + + // Specifies the capacity. When sku is Premium, capacity can be 1, 2, 4, 8 or 16. When sku is Basic or Standard, capacity can be 0 only. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // An customer_managed_key block as defined below. + CustomerManagedKey *CustomerManagedKeyObservation `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // The URL to access the ServiceBus Namespace. + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The ServiceBus Namespace ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not SAS authentication is enabled for the Service Bus namespace. Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum supported TLS version for this Service Bus Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default minimum TLS version is 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // An network_rule_set block as defined below. + NetworkRuleSet *NetworkRuleSetObservation `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Specifies the number messaging partitions. Only valid when sku is Premium and the minimum number is 1. Possible values include 0, 1, 2, and 4. Defaults to 0 for Standard, Basic namespace. Changing this forces a new resource to be created. + PremiumMessagingPartitions *float64 `json:"premiumMessagingPartitions,omitempty" tf:"premium_messaging_partitions,omitempty"` + + // Is public network access enabled for the Service Bus Namespace? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to Changing this forces a new resource to be created. + // create the namespace. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Defines which tier to use. Options are Basic, Standard or Premium. Please note that setting this field to Premium will force the creation of a new resource. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether or not this resource is zone redundant. sku needs to be Premium. Changing this forces a new resource to be created. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type ServiceBusNamespaceParameters struct { + + // Specifies the capacity. When sku is Premium, capacity can be 1, 2, 4, 8 or 16. When sku is Basic or Standard, capacity can be 0 only. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // An customer_managed_key block as defined below. + // +kubebuilder:validation:Optional + CustomerManagedKey *CustomerManagedKeyParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Whether or not SAS authentication is enabled for the Service Bus namespace. Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum supported TLS version for this Service Bus Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default minimum TLS version is 1.2. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // An network_rule_set block as defined below. + // +kubebuilder:validation:Optional + NetworkRuleSet *NetworkRuleSetParameters `json:"networkRuleSet,omitempty" tf:"network_rule_set,omitempty"` + + // Specifies the number messaging partitions. Only valid when sku is Premium and the minimum number is 1. Possible values include 0, 1, 2, and 4. Defaults to 0 for Standard, Basic namespace. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PremiumMessagingPartitions *float64 `json:"premiumMessagingPartitions,omitempty" tf:"premium_messaging_partitions,omitempty"` + + // Is public network access enabled for the Service Bus Namespace? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to Changing this forces a new resource to be created. + // create the namespace. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Defines which tier to use. Options are Basic, Standard or Premium. Please note that setting this field to Premium will force the creation of a new resource. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether or not this resource is zone redundant. sku needs to be Premium. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +// ServiceBusNamespaceSpec defines the desired state of ServiceBusNamespace +type ServiceBusNamespaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceBusNamespaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceBusNamespaceInitParameters `json:"initProvider,omitempty"` +} + +// ServiceBusNamespaceStatus defines the observed state of ServiceBusNamespace. +type ServiceBusNamespaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceBusNamespaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ServiceBusNamespace is the Schema for the ServiceBusNamespaces API. Manages a ServiceBus Namespace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ServiceBusNamespace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec ServiceBusNamespaceSpec `json:"spec"` + Status ServiceBusNamespaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceBusNamespaceList contains a list of ServiceBusNamespaces +type ServiceBusNamespaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceBusNamespace `json:"items"` +} + +// Repository type metadata. +var ( + ServiceBusNamespace_Kind = "ServiceBusNamespace" + ServiceBusNamespace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ServiceBusNamespace_Kind}.String() + ServiceBusNamespace_KindAPIVersion = ServiceBusNamespace_Kind + "." + CRDGroupVersion.String() + ServiceBusNamespace_GroupVersionKind = CRDGroupVersion.WithKind(ServiceBusNamespace_Kind) +) + +func init() { + SchemeBuilder.Register(&ServiceBusNamespace{}, &ServiceBusNamespaceList{}) +} diff --git a/apis/servicebus/v1beta2/zz_subscription_terraformed.go b/apis/servicebus/v1beta2/zz_subscription_terraformed.go new file mode 100755 index 000000000..4784f65be --- /dev/null +++ b/apis/servicebus/v1beta2/zz_subscription_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Subscription +func (mg *Subscription) GetTerraformResourceType() string { + return "azurerm_servicebus_subscription" +} + +// GetConnectionDetailsMapping for this Subscription +func (tr *Subscription) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Subscription +func (tr *Subscription) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Subscription +func (tr *Subscription) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Subscription +func (tr *Subscription) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Subscription +func (tr *Subscription) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Subscription +func (tr *Subscription) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Subscription +func (tr *Subscription) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Subscription +func (tr *Subscription) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Subscription using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Subscription) LateInitialize(attrs []byte) (bool, error) { + params := &SubscriptionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Subscription) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/servicebus/v1beta2/zz_subscription_types.go b/apis/servicebus/v1beta2/zz_subscription_types.go new file mode 100755 index 000000000..0e99bd7a6 --- /dev/null +++ b/apis/servicebus/v1beta2/zz_subscription_types.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClientScopedSubscriptionInitParameters struct { + + // Specifies the Client ID of the application that created the client-scoped subscription. Changing this forces a new resource to be created. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Whether the client scoped subscription is shareable. Defaults to true Changing this forces a new resource to be created. + IsClientScopedSubscriptionShareable *bool `json:"isClientScopedSubscriptionShareable,omitempty" tf:"is_client_scoped_subscription_shareable,omitempty"` +} + +type ClientScopedSubscriptionObservation struct { + + // Specifies the Client ID of the application that created the client-scoped subscription. Changing this forces a new resource to be created. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Whether the client scoped subscription is durable. This property can only be controlled from the application side. + IsClientScopedSubscriptionDurable *bool `json:"isClientScopedSubscriptionDurable,omitempty" tf:"is_client_scoped_subscription_durable,omitempty"` + + // Whether the client scoped subscription is shareable. Defaults to true Changing this forces a new resource to be created. + IsClientScopedSubscriptionShareable *bool `json:"isClientScopedSubscriptionShareable,omitempty" tf:"is_client_scoped_subscription_shareable,omitempty"` +} + +type ClientScopedSubscriptionParameters struct { + + // Specifies the Client ID of the application that created the client-scoped subscription. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Whether the client scoped subscription is shareable. Defaults to true Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsClientScopedSubscriptionShareable *bool `json:"isClientScopedSubscriptionShareable,omitempty" tf:"is_client_scoped_subscription_shareable,omitempty"` +} + +type SubscriptionInitParameters struct { + + // The idle interval after which the topic is automatically deleted as an ISO 8601 duration. The minimum duration is 5 minutes or PT5M. + AutoDeleteOnIdle *string `json:"autoDeleteOnIdle,omitempty" tf:"auto_delete_on_idle,omitempty"` + + // A client_scoped_subscription block as defined below. + ClientScopedSubscription *ClientScopedSubscriptionInitParameters `json:"clientScopedSubscription,omitempty" tf:"client_scoped_subscription,omitempty"` + + // whether the subscription is scoped to a client id. Defaults to false. + ClientScopedSubscriptionEnabled *bool `json:"clientScopedSubscriptionEnabled,omitempty" tf:"client_scoped_subscription_enabled,omitempty"` + + // Boolean flag which controls whether the Subscription has dead letter support on filter evaluation exceptions. Defaults to true. + DeadLetteringOnFilterEvaluationError *bool `json:"deadLetteringOnFilterEvaluationError,omitempty" tf:"dead_lettering_on_filter_evaluation_error,omitempty"` + + // Boolean flag which controls whether the Subscription has dead letter support when a message expires. + DeadLetteringOnMessageExpiration *bool `json:"deadLetteringOnMessageExpiration,omitempty" tf:"dead_lettering_on_message_expiration,omitempty"` + + // The Default message timespan to live as an ISO 8601 duration. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself. + DefaultMessageTTL *string `json:"defaultMessageTtl,omitempty" tf:"default_message_ttl,omitempty"` + + // Boolean flag which controls whether the Subscription supports batched operations. + EnableBatchedOperations *bool `json:"enableBatchedOperations,omitempty" tf:"enable_batched_operations,omitempty"` + + // The name of a Queue or Topic to automatically forward Dead Letter messages to. + ForwardDeadLetteredMessagesTo *string `json:"forwardDeadLetteredMessagesTo,omitempty" tf:"forward_dead_lettered_messages_to,omitempty"` + + // The name of a Queue or Topic to automatically forward messages to. + ForwardTo *string `json:"forwardTo,omitempty" tf:"forward_to,omitempty"` + + // The lock duration for the subscription as an ISO 8601 duration. The default value is 1 minute or P0DT0H1M0S . The maximum value is 5 minutes or P0DT0H5M0S . + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The maximum number of deliveries. + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // Boolean flag which controls whether this Subscription supports the concept of a session. Changing this forces a new resource to be created. + RequiresSession *bool `json:"requiresSession,omitempty" tf:"requires_session,omitempty"` + + // The status of the Subscription. Possible values are Active,ReceiveDisabled, or Disabled. Defaults to Active. + Status *string `json:"status,omitempty" tf:"status,omitempty"` +} + +type SubscriptionObservation struct { + + // The idle interval after which the topic is automatically deleted as an ISO 8601 duration. The minimum duration is 5 minutes or PT5M. + AutoDeleteOnIdle *string `json:"autoDeleteOnIdle,omitempty" tf:"auto_delete_on_idle,omitempty"` + + // A client_scoped_subscription block as defined below. + ClientScopedSubscription *ClientScopedSubscriptionObservation `json:"clientScopedSubscription,omitempty" tf:"client_scoped_subscription,omitempty"` + + // whether the subscription is scoped to a client id. Defaults to false. + ClientScopedSubscriptionEnabled *bool `json:"clientScopedSubscriptionEnabled,omitempty" tf:"client_scoped_subscription_enabled,omitempty"` + + // Boolean flag which controls whether the Subscription has dead letter support on filter evaluation exceptions. Defaults to true. + DeadLetteringOnFilterEvaluationError *bool `json:"deadLetteringOnFilterEvaluationError,omitempty" tf:"dead_lettering_on_filter_evaluation_error,omitempty"` + + // Boolean flag which controls whether the Subscription has dead letter support when a message expires. + DeadLetteringOnMessageExpiration *bool `json:"deadLetteringOnMessageExpiration,omitempty" tf:"dead_lettering_on_message_expiration,omitempty"` + + // The Default message timespan to live as an ISO 8601 duration. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself. + DefaultMessageTTL *string `json:"defaultMessageTtl,omitempty" tf:"default_message_ttl,omitempty"` + + // Boolean flag which controls whether the Subscription supports batched operations. + EnableBatchedOperations *bool `json:"enableBatchedOperations,omitempty" tf:"enable_batched_operations,omitempty"` + + // The name of a Queue or Topic to automatically forward Dead Letter messages to. + ForwardDeadLetteredMessagesTo *string `json:"forwardDeadLetteredMessagesTo,omitempty" tf:"forward_dead_lettered_messages_to,omitempty"` + + // The name of a Queue or Topic to automatically forward messages to. + ForwardTo *string `json:"forwardTo,omitempty" tf:"forward_to,omitempty"` + + // The ServiceBus Subscription ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The lock duration for the subscription as an ISO 8601 duration. The default value is 1 minute or P0DT0H1M0S . The maximum value is 5 minutes or P0DT0H5M0S . + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The maximum number of deliveries. + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // Boolean flag which controls whether this Subscription supports the concept of a session. Changing this forces a new resource to be created. + RequiresSession *bool `json:"requiresSession,omitempty" tf:"requires_session,omitempty"` + + // The status of the Subscription. Possible values are Active,ReceiveDisabled, or Disabled. Defaults to Active. + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // The ID of the ServiceBus Topic to create this Subscription in. Changing this forces a new resource to be created. + TopicID *string `json:"topicId,omitempty" tf:"topic_id,omitempty"` +} + +type SubscriptionParameters struct { + + // The idle interval after which the topic is automatically deleted as an ISO 8601 duration. The minimum duration is 5 minutes or PT5M. + // +kubebuilder:validation:Optional + AutoDeleteOnIdle *string `json:"autoDeleteOnIdle,omitempty" tf:"auto_delete_on_idle,omitempty"` + + // A client_scoped_subscription block as defined below. + // +kubebuilder:validation:Optional + ClientScopedSubscription *ClientScopedSubscriptionParameters `json:"clientScopedSubscription,omitempty" tf:"client_scoped_subscription,omitempty"` + + // whether the subscription is scoped to a client id. Defaults to false. + // +kubebuilder:validation:Optional + ClientScopedSubscriptionEnabled *bool `json:"clientScopedSubscriptionEnabled,omitempty" tf:"client_scoped_subscription_enabled,omitempty"` + + // Boolean flag which controls whether the Subscription has dead letter support on filter evaluation exceptions. Defaults to true. + // +kubebuilder:validation:Optional + DeadLetteringOnFilterEvaluationError *bool `json:"deadLetteringOnFilterEvaluationError,omitempty" tf:"dead_lettering_on_filter_evaluation_error,omitempty"` + + // Boolean flag which controls whether the Subscription has dead letter support when a message expires. + // +kubebuilder:validation:Optional + DeadLetteringOnMessageExpiration *bool `json:"deadLetteringOnMessageExpiration,omitempty" tf:"dead_lettering_on_message_expiration,omitempty"` + + // The Default message timespan to live as an ISO 8601 duration. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself. + // +kubebuilder:validation:Optional + DefaultMessageTTL *string `json:"defaultMessageTtl,omitempty" tf:"default_message_ttl,omitempty"` + + // Boolean flag which controls whether the Subscription supports batched operations. + // +kubebuilder:validation:Optional + EnableBatchedOperations *bool `json:"enableBatchedOperations,omitempty" tf:"enable_batched_operations,omitempty"` + + // The name of a Queue or Topic to automatically forward Dead Letter messages to. + // +kubebuilder:validation:Optional + ForwardDeadLetteredMessagesTo *string `json:"forwardDeadLetteredMessagesTo,omitempty" tf:"forward_dead_lettered_messages_to,omitempty"` + + // The name of a Queue or Topic to automatically forward messages to. + // +kubebuilder:validation:Optional + ForwardTo *string `json:"forwardTo,omitempty" tf:"forward_to,omitempty"` + + // The lock duration for the subscription as an ISO 8601 duration. The default value is 1 minute or P0DT0H1M0S . The maximum value is 5 minutes or P0DT0H5M0S . + // +kubebuilder:validation:Optional + LockDuration *string `json:"lockDuration,omitempty" tf:"lock_duration,omitempty"` + + // The maximum number of deliveries. + // +kubebuilder:validation:Optional + MaxDeliveryCount *float64 `json:"maxDeliveryCount,omitempty" tf:"max_delivery_count,omitempty"` + + // Boolean flag which controls whether this Subscription supports the concept of a session. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RequiresSession *bool `json:"requiresSession,omitempty" tf:"requires_session,omitempty"` + + // The status of the Subscription. Possible values are Active,ReceiveDisabled, or Disabled. Defaults to Active. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty" tf:"status,omitempty"` + + // The ID of the ServiceBus Topic to create this Subscription in. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.Topic + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TopicID *string `json:"topicId,omitempty" tf:"topic_id,omitempty"` + + // Reference to a Topic in servicebus to populate topicId. + // +kubebuilder:validation:Optional + TopicIDRef *v1.Reference `json:"topicIdRef,omitempty" tf:"-"` + + // Selector for a Topic in servicebus to populate topicId. + // +kubebuilder:validation:Optional + TopicIDSelector *v1.Selector `json:"topicIdSelector,omitempty" tf:"-"` +} + +// SubscriptionSpec defines the desired state of Subscription +type SubscriptionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SubscriptionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SubscriptionInitParameters `json:"initProvider,omitempty"` +} + +// SubscriptionStatus defines the observed state of Subscription. +type SubscriptionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SubscriptionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Subscription is the Schema for the Subscriptions API. Manages a ServiceBus Subscription. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Subscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.maxDeliveryCount) || (has(self.initProvider) && has(self.initProvider.maxDeliveryCount))",message="spec.forProvider.maxDeliveryCount is a required parameter" + Spec SubscriptionSpec `json:"spec"` + Status SubscriptionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubscriptionList contains a list of Subscriptions +type SubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Subscription `json:"items"` +} + +// Repository type metadata. +var ( + Subscription_Kind = "Subscription" + Subscription_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Subscription_Kind}.String() + Subscription_KindAPIVersion = Subscription_Kind + "." + CRDGroupVersion.String() + Subscription_GroupVersionKind = CRDGroupVersion.WithKind(Subscription_Kind) +) + +func init() { + SchemeBuilder.Register(&Subscription{}, &SubscriptionList{}) +} diff --git a/apis/servicebus/v1beta2/zz_subscriptionrule_terraformed.go b/apis/servicebus/v1beta2/zz_subscriptionrule_terraformed.go new file mode 100755 index 000000000..c1fa82a8d --- /dev/null +++ b/apis/servicebus/v1beta2/zz_subscriptionrule_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SubscriptionRule +func (mg *SubscriptionRule) GetTerraformResourceType() string { + return "azurerm_servicebus_subscription_rule" +} + +// GetConnectionDetailsMapping for this SubscriptionRule +func (tr *SubscriptionRule) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SubscriptionRule +func (tr *SubscriptionRule) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SubscriptionRule +func (tr *SubscriptionRule) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SubscriptionRule +func (tr *SubscriptionRule) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SubscriptionRule +func (tr *SubscriptionRule) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SubscriptionRule +func (tr *SubscriptionRule) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SubscriptionRule +func (tr *SubscriptionRule) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SubscriptionRule +func (tr *SubscriptionRule) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SubscriptionRule using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SubscriptionRule) LateInitialize(attrs []byte) (bool, error) { + params := &SubscriptionRuleParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SubscriptionRule) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/servicebus/v1beta2/zz_subscriptionrule_types.go b/apis/servicebus/v1beta2/zz_subscriptionrule_types.go new file mode 100755 index 000000000..db976497c --- /dev/null +++ b/apis/servicebus/v1beta2/zz_subscriptionrule_types.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CorrelationFilterInitParameters struct { + + // Content type of the message. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifier of the correlation. + CorrelationID *string `json:"correlationId,omitempty" tf:"correlation_id,omitempty"` + + // Application specific label. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Identifier of the message. + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // A list of user defined properties to be included in the filter. Specified as a map of name/value pairs. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Address of the queue to reply to. + ReplyTo *string `json:"replyTo,omitempty" tf:"reply_to,omitempty"` + + // Session identifier to reply to. + ReplyToSessionID *string `json:"replyToSessionId,omitempty" tf:"reply_to_session_id,omitempty"` + + // Session identifier. + SessionID *string `json:"sessionId,omitempty" tf:"session_id,omitempty"` + + // Address to send to. + To *string `json:"to,omitempty" tf:"to,omitempty"` +} + +type CorrelationFilterObservation struct { + + // Content type of the message. + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifier of the correlation. + CorrelationID *string `json:"correlationId,omitempty" tf:"correlation_id,omitempty"` + + // Application specific label. + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Identifier of the message. + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // A list of user defined properties to be included in the filter. Specified as a map of name/value pairs. + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Address of the queue to reply to. + ReplyTo *string `json:"replyTo,omitempty" tf:"reply_to,omitempty"` + + // Session identifier to reply to. + ReplyToSessionID *string `json:"replyToSessionId,omitempty" tf:"reply_to_session_id,omitempty"` + + // Session identifier. + SessionID *string `json:"sessionId,omitempty" tf:"session_id,omitempty"` + + // Address to send to. + To *string `json:"to,omitempty" tf:"to,omitempty"` +} + +type CorrelationFilterParameters struct { + + // Content type of the message. + // +kubebuilder:validation:Optional + ContentType *string `json:"contentType,omitempty" tf:"content_type,omitempty"` + + // Identifier of the correlation. + // +kubebuilder:validation:Optional + CorrelationID *string `json:"correlationId,omitempty" tf:"correlation_id,omitempty"` + + // Application specific label. + // +kubebuilder:validation:Optional + Label *string `json:"label,omitempty" tf:"label,omitempty"` + + // Identifier of the message. + // +kubebuilder:validation:Optional + MessageID *string `json:"messageId,omitempty" tf:"message_id,omitempty"` + + // A list of user defined properties to be included in the filter. Specified as a map of name/value pairs. + // +kubebuilder:validation:Optional + // +mapType=granular + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Address of the queue to reply to. + // +kubebuilder:validation:Optional + ReplyTo *string `json:"replyTo,omitempty" tf:"reply_to,omitempty"` + + // Session identifier to reply to. + // +kubebuilder:validation:Optional + ReplyToSessionID *string `json:"replyToSessionId,omitempty" tf:"reply_to_session_id,omitempty"` + + // Session identifier. + // +kubebuilder:validation:Optional + SessionID *string `json:"sessionId,omitempty" tf:"session_id,omitempty"` + + // Address to send to. + // +kubebuilder:validation:Optional + To *string `json:"to,omitempty" tf:"to,omitempty"` +} + +type SubscriptionRuleInitParameters struct { + + // Represents set of actions written in SQL language-based syntax that is performed against a BrokeredMessage. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A correlation_filter block as documented below to be evaluated against a BrokeredMessage. Required when filter_type is set to CorrelationFilter. + CorrelationFilter *CorrelationFilterInitParameters `json:"correlationFilter,omitempty" tf:"correlation_filter,omitempty"` + + // Type of filter to be applied to a BrokeredMessage. Possible values are SqlFilter and CorrelationFilter. + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // Represents a filter written in SQL language-based syntax that to be evaluated against a BrokeredMessage. Required when filter_type is set to SqlFilter. + SQLFilter *string `json:"sqlFilter,omitempty" tf:"sql_filter,omitempty"` +} + +type SubscriptionRuleObservation struct { + + // Represents set of actions written in SQL language-based syntax that is performed against a BrokeredMessage. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A correlation_filter block as documented below to be evaluated against a BrokeredMessage. Required when filter_type is set to CorrelationFilter. + CorrelationFilter *CorrelationFilterObservation `json:"correlationFilter,omitempty" tf:"correlation_filter,omitempty"` + + // Type of filter to be applied to a BrokeredMessage. Possible values are SqlFilter and CorrelationFilter. + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // The ServiceBus Subscription Rule ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Represents a filter written in SQL language-based syntax that to be evaluated against a BrokeredMessage. Required when filter_type is set to SqlFilter. + SQLFilter *string `json:"sqlFilter,omitempty" tf:"sql_filter,omitempty"` + + SQLFilterCompatibilityLevel *float64 `json:"sqlFilterCompatibilityLevel,omitempty" tf:"sql_filter_compatibility_level,omitempty"` + + // The ID of the ServiceBus Subscription in which this Rule should be created. Changing this forces a new resource to be created. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` +} + +type SubscriptionRuleParameters struct { + + // Represents set of actions written in SQL language-based syntax that is performed against a BrokeredMessage. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // A correlation_filter block as documented below to be evaluated against a BrokeredMessage. Required when filter_type is set to CorrelationFilter. + // +kubebuilder:validation:Optional + CorrelationFilter *CorrelationFilterParameters `json:"correlationFilter,omitempty" tf:"correlation_filter,omitempty"` + + // Type of filter to be applied to a BrokeredMessage. Possible values are SqlFilter and CorrelationFilter. + // +kubebuilder:validation:Optional + FilterType *string `json:"filterType,omitempty" tf:"filter_type,omitempty"` + + // Represents a filter written in SQL language-based syntax that to be evaluated against a BrokeredMessage. Required when filter_type is set to SqlFilter. + // +kubebuilder:validation:Optional + SQLFilter *string `json:"sqlFilter,omitempty" tf:"sql_filter,omitempty"` + + // The ID of the ServiceBus Subscription in which this Rule should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.Subscription + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // Reference to a Subscription in servicebus to populate subscriptionId. + // +kubebuilder:validation:Optional + SubscriptionIDRef *v1.Reference `json:"subscriptionIdRef,omitempty" tf:"-"` + + // Selector for a Subscription in servicebus to populate subscriptionId. + // +kubebuilder:validation:Optional + SubscriptionIDSelector *v1.Selector `json:"subscriptionIdSelector,omitempty" tf:"-"` +} + +// SubscriptionRuleSpec defines the desired state of SubscriptionRule +type SubscriptionRuleSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SubscriptionRuleParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SubscriptionRuleInitParameters `json:"initProvider,omitempty"` +} + +// SubscriptionRuleStatus defines the observed state of SubscriptionRule. +type SubscriptionRuleStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SubscriptionRuleObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SubscriptionRule is the Schema for the SubscriptionRules API. Manages a ServiceBus Subscription Rule. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SubscriptionRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.filterType) || (has(self.initProvider) && has(self.initProvider.filterType))",message="spec.forProvider.filterType is a required parameter" + Spec SubscriptionRuleSpec `json:"spec"` + Status SubscriptionRuleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubscriptionRuleList contains a list of SubscriptionRules +type SubscriptionRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SubscriptionRule `json:"items"` +} + +// Repository type metadata. +var ( + SubscriptionRule_Kind = "SubscriptionRule" + SubscriptionRule_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SubscriptionRule_Kind}.String() + SubscriptionRule_KindAPIVersion = SubscriptionRule_Kind + "." + CRDGroupVersion.String() + SubscriptionRule_GroupVersionKind = CRDGroupVersion.WithKind(SubscriptionRule_Kind) +) + +func init() { + SchemeBuilder.Register(&SubscriptionRule{}, &SubscriptionRuleList{}) +} diff --git a/apis/servicefabric/v1beta1/zz_generated.conversion_spokes.go b/apis/servicefabric/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..a1e48dfc9 --- /dev/null +++ b/apis/servicefabric/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Cluster to the hub type. +func (tr *Cluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Cluster type. +func (tr *Cluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ManagedCluster to the hub type. +func (tr *ManagedCluster) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ManagedCluster type. +func (tr *ManagedCluster) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/servicefabric/v1beta2/zz_cluster_terraformed.go b/apis/servicefabric/v1beta2/zz_cluster_terraformed.go new file mode 100755 index 000000000..fdf293d6c --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_cluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "azurerm_service_fabric_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Cluster +func (tr *Cluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/servicefabric/v1beta2/zz_cluster_types.go b/apis/servicefabric/v1beta2/zz_cluster_types.go new file mode 100755 index 000000000..e7a2ce537 --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_cluster_types.go @@ -0,0 +1,1089 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationPortsInitParameters struct { + + // The end of the Ephemeral Port Range on this Node Type. + EndPort *float64 `json:"endPort,omitempty" tf:"end_port,omitempty"` + + // The start of the Ephemeral Port Range on this Node Type. + StartPort *float64 `json:"startPort,omitempty" tf:"start_port,omitempty"` +} + +type ApplicationPortsObservation struct { + + // The end of the Ephemeral Port Range on this Node Type. + EndPort *float64 `json:"endPort,omitempty" tf:"end_port,omitempty"` + + // The start of the Ephemeral Port Range on this Node Type. + StartPort *float64 `json:"startPort,omitempty" tf:"start_port,omitempty"` +} + +type ApplicationPortsParameters struct { + + // The end of the Ephemeral Port Range on this Node Type. + // +kubebuilder:validation:Optional + EndPort *float64 `json:"endPort" tf:"end_port,omitempty"` + + // The start of the Ephemeral Port Range on this Node Type. + // +kubebuilder:validation:Optional + StartPort *float64 `json:"startPort" tf:"start_port,omitempty"` +} + +type AzureActiveDirectoryInitParameters struct { + + // The Azure Active Directory Client ID which should be used for the Client Application. + ClientApplicationID *string `json:"clientApplicationId,omitempty" tf:"client_application_id,omitempty"` + + // The Azure Active Directory Cluster Application ID. + ClusterApplicationID *string `json:"clusterApplicationId,omitempty" tf:"cluster_application_id,omitempty"` + + // The Azure Active Directory Tenant ID. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AzureActiveDirectoryObservation struct { + + // The Azure Active Directory Client ID which should be used for the Client Application. + ClientApplicationID *string `json:"clientApplicationId,omitempty" tf:"client_application_id,omitempty"` + + // The Azure Active Directory Cluster Application ID. + ClusterApplicationID *string `json:"clusterApplicationId,omitempty" tf:"cluster_application_id,omitempty"` + + // The Azure Active Directory Tenant ID. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AzureActiveDirectoryParameters struct { + + // The Azure Active Directory Client ID which should be used for the Client Application. + // +kubebuilder:validation:Optional + ClientApplicationID *string `json:"clientApplicationId" tf:"client_application_id,omitempty"` + + // The Azure Active Directory Cluster Application ID. + // +kubebuilder:validation:Optional + ClusterApplicationID *string `json:"clusterApplicationId" tf:"cluster_application_id,omitempty"` + + // The Azure Active Directory Tenant ID. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId" tf:"tenant_id,omitempty"` +} + +type CertificateCommonNamesInitParameters struct { + + // A common_names block as defined below. + CommonNames []CommonNamesInitParameters `json:"commonNames,omitempty" tf:"common_names,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + X509StoreName *string `json:"x509StoreName,omitempty" tf:"x509_store_name,omitempty"` +} + +type CertificateCommonNamesObservation struct { + + // A common_names block as defined below. + CommonNames []CommonNamesObservation `json:"commonNames,omitempty" tf:"common_names,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + X509StoreName *string `json:"x509StoreName,omitempty" tf:"x509_store_name,omitempty"` +} + +type CertificateCommonNamesParameters struct { + + // A common_names block as defined below. + // +kubebuilder:validation:Optional + CommonNames []CommonNamesParameters `json:"commonNames" tf:"common_names,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + // +kubebuilder:validation:Optional + X509StoreName *string `json:"x509StoreName" tf:"x509_store_name,omitempty"` +} + +type CertificateInitParameters struct { + + // The Thumbprint of the Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` + + // The Secondary Thumbprint of the Certificate. + ThumbprintSecondary *string `json:"thumbprintSecondary,omitempty" tf:"thumbprint_secondary,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + X509StoreName *string `json:"x509StoreName,omitempty" tf:"x509_store_name,omitempty"` +} + +type CertificateObservation struct { + + // The Thumbprint of the Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` + + // The Secondary Thumbprint of the Certificate. + ThumbprintSecondary *string `json:"thumbprintSecondary,omitempty" tf:"thumbprint_secondary,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + X509StoreName *string `json:"x509StoreName,omitempty" tf:"x509_store_name,omitempty"` +} + +type CertificateParameters struct { + + // The Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + Thumbprint *string `json:"thumbprint" tf:"thumbprint,omitempty"` + + // The Secondary Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + ThumbprintSecondary *string `json:"thumbprintSecondary,omitempty" tf:"thumbprint_secondary,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + // +kubebuilder:validation:Optional + X509StoreName *string `json:"x509StoreName" tf:"x509_store_name,omitempty"` +} + +type ClientCertificateCommonNameInitParameters struct { + + // The common or subject name of the certificate. + CommonName *string `json:"commonName,omitempty" tf:"common_name,omitempty"` + + // Does the Client Certificate have Admin Access to the cluster? Non-admin clients can only perform read only operations on the cluster. + IsAdmin *bool `json:"isAdmin,omitempty" tf:"is_admin,omitempty"` + + // The Issuer Thumbprint of the Certificate. + IssuerThumbprint *string `json:"issuerThumbprint,omitempty" tf:"issuer_thumbprint,omitempty"` +} + +type ClientCertificateCommonNameObservation struct { + + // The common or subject name of the certificate. + CommonName *string `json:"commonName,omitempty" tf:"common_name,omitempty"` + + // Does the Client Certificate have Admin Access to the cluster? Non-admin clients can only perform read only operations on the cluster. + IsAdmin *bool `json:"isAdmin,omitempty" tf:"is_admin,omitempty"` + + // The Issuer Thumbprint of the Certificate. + IssuerThumbprint *string `json:"issuerThumbprint,omitempty" tf:"issuer_thumbprint,omitempty"` +} + +type ClientCertificateCommonNameParameters struct { + + // The common or subject name of the certificate. + // +kubebuilder:validation:Optional + CommonName *string `json:"commonName" tf:"common_name,omitempty"` + + // Does the Client Certificate have Admin Access to the cluster? Non-admin clients can only perform read only operations on the cluster. + // +kubebuilder:validation:Optional + IsAdmin *bool `json:"isAdmin" tf:"is_admin,omitempty"` + + // The Issuer Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + IssuerThumbprint *string `json:"issuerThumbprint,omitempty" tf:"issuer_thumbprint,omitempty"` +} + +type ClientCertificateThumbprintInitParameters struct { + + // Does the Client Certificate have Admin Access to the cluster? Non-admin clients can only perform read only operations on the cluster. + IsAdmin *bool `json:"isAdmin,omitempty" tf:"is_admin,omitempty"` + + // The Thumbprint associated with the Client Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type ClientCertificateThumbprintObservation struct { + + // Does the Client Certificate have Admin Access to the cluster? Non-admin clients can only perform read only operations on the cluster. + IsAdmin *bool `json:"isAdmin,omitempty" tf:"is_admin,omitempty"` + + // The Thumbprint associated with the Client Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` +} + +type ClientCertificateThumbprintParameters struct { + + // Does the Client Certificate have Admin Access to the cluster? Non-admin clients can only perform read only operations on the cluster. + // +kubebuilder:validation:Optional + IsAdmin *bool `json:"isAdmin" tf:"is_admin,omitempty"` + + // The Thumbprint associated with the Client Certificate. + // +kubebuilder:validation:Optional + Thumbprint *string `json:"thumbprint" tf:"thumbprint,omitempty"` +} + +type ClusterInitParameters struct { + + // A List of one or more features which should be enabled, such as DnsService. + // +listType=set + AddOnFeatures []*string `json:"addOnFeatures,omitempty" tf:"add_on_features,omitempty"` + + // An azure_active_directory block as defined below. + AzureActiveDirectory *AzureActiveDirectoryInitParameters `json:"azureActiveDirectory,omitempty" tf:"azure_active_directory,omitempty"` + + // A certificate block as defined below. Conflicts with certificate_common_names. + Certificate *CertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A certificate_common_names block as defined below. Conflicts with certificate. + CertificateCommonNames *CertificateCommonNamesInitParameters `json:"certificateCommonNames,omitempty" tf:"certificate_common_names,omitempty"` + + // A client_certificate_common_name block as defined below. + ClientCertificateCommonName []ClientCertificateCommonNameInitParameters `json:"clientCertificateCommonName,omitempty" tf:"client_certificate_common_name,omitempty"` + + // One or more client_certificate_thumbprint blocks as defined below. + ClientCertificateThumbprint []ClientCertificateThumbprintInitParameters `json:"clientCertificateThumbprint,omitempty" tf:"client_certificate_thumbprint,omitempty"` + + // Required if Upgrade Mode set to Manual, Specifies the Version of the Cluster Code of the cluster. + ClusterCodeVersion *string `json:"clusterCodeVersion,omitempty" tf:"cluster_code_version,omitempty"` + + // A diagnostics_config block as defined below. + DiagnosticsConfig *DiagnosticsConfigInitParameters `json:"diagnosticsConfig,omitempty" tf:"diagnostics_config,omitempty"` + + // One or more fabric_settings blocks as defined below. + FabricSettings []FabricSettingsInitParameters `json:"fabricSettings,omitempty" tf:"fabric_settings,omitempty"` + + // Specifies the Azure Region where the Service Fabric Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the Management Endpoint of the cluster such as http://example.com. Changing this forces a new resource to be created. + ManagementEndpoint *string `json:"managementEndpoint,omitempty" tf:"management_endpoint,omitempty"` + + // One or more node_type blocks as defined below. + NodeType []NodeTypeInitParameters `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // Specifies the Reliability Level of the Cluster. Possible values include None, Bronze, Silver, Gold and Platinum. + ReliabilityLevel *string `json:"reliabilityLevel,omitempty" tf:"reliability_level,omitempty"` + + // A reverse_proxy_certificate block as defined below. Conflicts with reverse_proxy_certificate_common_names. + ReverseProxyCertificate *ReverseProxyCertificateInitParameters `json:"reverseProxyCertificate,omitempty" tf:"reverse_proxy_certificate,omitempty"` + + // A reverse_proxy_certificate_common_names block as defined below. Conflicts with reverse_proxy_certificate. + ReverseProxyCertificateCommonNames *ReverseProxyCertificateCommonNamesInitParameters `json:"reverseProxyCertificateCommonNames,omitempty" tf:"reverse_proxy_certificate_common_names,omitempty"` + + // Specifies the logical grouping of VMs in upgrade domains. Possible values are Hierarchical or Parallel. + ServiceFabricZonalUpgradeMode *string `json:"serviceFabricZonalUpgradeMode,omitempty" tf:"service_fabric_zonal_upgrade_mode,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Upgrade Mode of the cluster. Possible values are Automatic or Manual. + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // A upgrade_policy block as defined below. + UpgradePolicy *UpgradePolicyInitParameters `json:"upgradePolicy,omitempty" tf:"upgrade_policy,omitempty"` + + // Specifies the Image expected for the Service Fabric Cluster, such as Windows. Changing this forces a new resource to be created. + VMImage *string `json:"vmImage,omitempty" tf:"vm_image,omitempty"` + + // Specifies the upgrade mode for the virtual machine scale set updates that happen in all availability zones at once. Possible values are Hierarchical or Parallel. + VmssZonalUpgradeMode *string `json:"vmssZonalUpgradeMode,omitempty" tf:"vmss_zonal_upgrade_mode,omitempty"` +} + +type ClusterObservation struct { + + // A List of one or more features which should be enabled, such as DnsService. + // +listType=set + AddOnFeatures []*string `json:"addOnFeatures,omitempty" tf:"add_on_features,omitempty"` + + // An azure_active_directory block as defined below. + AzureActiveDirectory *AzureActiveDirectoryObservation `json:"azureActiveDirectory,omitempty" tf:"azure_active_directory,omitempty"` + + // A certificate block as defined below. Conflicts with certificate_common_names. + Certificate *CertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A certificate_common_names block as defined below. Conflicts with certificate. + CertificateCommonNames *CertificateCommonNamesObservation `json:"certificateCommonNames,omitempty" tf:"certificate_common_names,omitempty"` + + // A client_certificate_common_name block as defined below. + ClientCertificateCommonName []ClientCertificateCommonNameObservation `json:"clientCertificateCommonName,omitempty" tf:"client_certificate_common_name,omitempty"` + + // One or more client_certificate_thumbprint blocks as defined below. + ClientCertificateThumbprint []ClientCertificateThumbprintObservation `json:"clientCertificateThumbprint,omitempty" tf:"client_certificate_thumbprint,omitempty"` + + // Required if Upgrade Mode set to Manual, Specifies the Version of the Cluster Code of the cluster. + ClusterCodeVersion *string `json:"clusterCodeVersion,omitempty" tf:"cluster_code_version,omitempty"` + + // The Cluster Endpoint for this Service Fabric Cluster. + ClusterEndpoint *string `json:"clusterEndpoint,omitempty" tf:"cluster_endpoint,omitempty"` + + // A diagnostics_config block as defined below. + DiagnosticsConfig *DiagnosticsConfigObservation `json:"diagnosticsConfig,omitempty" tf:"diagnostics_config,omitempty"` + + // One or more fabric_settings blocks as defined below. + FabricSettings []FabricSettingsObservation `json:"fabricSettings,omitempty" tf:"fabric_settings,omitempty"` + + // The ID of the Service Fabric Cluster. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the Azure Region where the Service Fabric Cluster should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the Management Endpoint of the cluster such as http://example.com. Changing this forces a new resource to be created. + ManagementEndpoint *string `json:"managementEndpoint,omitempty" tf:"management_endpoint,omitempty"` + + // One or more node_type blocks as defined below. + NodeType []NodeTypeObservation `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // Specifies the Reliability Level of the Cluster. Possible values include None, Bronze, Silver, Gold and Platinum. + ReliabilityLevel *string `json:"reliabilityLevel,omitempty" tf:"reliability_level,omitempty"` + + // The name of the Resource Group in which the Service Fabric Cluster exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A reverse_proxy_certificate block as defined below. Conflicts with reverse_proxy_certificate_common_names. + ReverseProxyCertificate *ReverseProxyCertificateObservation `json:"reverseProxyCertificate,omitempty" tf:"reverse_proxy_certificate,omitempty"` + + // A reverse_proxy_certificate_common_names block as defined below. Conflicts with reverse_proxy_certificate. + ReverseProxyCertificateCommonNames *ReverseProxyCertificateCommonNamesObservation `json:"reverseProxyCertificateCommonNames,omitempty" tf:"reverse_proxy_certificate_common_names,omitempty"` + + // Specifies the logical grouping of VMs in upgrade domains. Possible values are Hierarchical or Parallel. + ServiceFabricZonalUpgradeMode *string `json:"serviceFabricZonalUpgradeMode,omitempty" tf:"service_fabric_zonal_upgrade_mode,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Upgrade Mode of the cluster. Possible values are Automatic or Manual. + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // A upgrade_policy block as defined below. + UpgradePolicy *UpgradePolicyObservation `json:"upgradePolicy,omitempty" tf:"upgrade_policy,omitempty"` + + // Specifies the Image expected for the Service Fabric Cluster, such as Windows. Changing this forces a new resource to be created. + VMImage *string `json:"vmImage,omitempty" tf:"vm_image,omitempty"` + + // Specifies the upgrade mode for the virtual machine scale set updates that happen in all availability zones at once. Possible values are Hierarchical or Parallel. + VmssZonalUpgradeMode *string `json:"vmssZonalUpgradeMode,omitempty" tf:"vmss_zonal_upgrade_mode,omitempty"` +} + +type ClusterParameters struct { + + // A List of one or more features which should be enabled, such as DnsService. + // +kubebuilder:validation:Optional + // +listType=set + AddOnFeatures []*string `json:"addOnFeatures,omitempty" tf:"add_on_features,omitempty"` + + // An azure_active_directory block as defined below. + // +kubebuilder:validation:Optional + AzureActiveDirectory *AzureActiveDirectoryParameters `json:"azureActiveDirectory,omitempty" tf:"azure_active_directory,omitempty"` + + // A certificate block as defined below. Conflicts with certificate_common_names. + // +kubebuilder:validation:Optional + Certificate *CertificateParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` + + // A certificate_common_names block as defined below. Conflicts with certificate. + // +kubebuilder:validation:Optional + CertificateCommonNames *CertificateCommonNamesParameters `json:"certificateCommonNames,omitempty" tf:"certificate_common_names,omitempty"` + + // A client_certificate_common_name block as defined below. + // +kubebuilder:validation:Optional + ClientCertificateCommonName []ClientCertificateCommonNameParameters `json:"clientCertificateCommonName,omitempty" tf:"client_certificate_common_name,omitempty"` + + // One or more client_certificate_thumbprint blocks as defined below. + // +kubebuilder:validation:Optional + ClientCertificateThumbprint []ClientCertificateThumbprintParameters `json:"clientCertificateThumbprint,omitempty" tf:"client_certificate_thumbprint,omitempty"` + + // Required if Upgrade Mode set to Manual, Specifies the Version of the Cluster Code of the cluster. + // +kubebuilder:validation:Optional + ClusterCodeVersion *string `json:"clusterCodeVersion,omitempty" tf:"cluster_code_version,omitempty"` + + // A diagnostics_config block as defined below. + // +kubebuilder:validation:Optional + DiagnosticsConfig *DiagnosticsConfigParameters `json:"diagnosticsConfig,omitempty" tf:"diagnostics_config,omitempty"` + + // One or more fabric_settings blocks as defined below. + // +kubebuilder:validation:Optional + FabricSettings []FabricSettingsParameters `json:"fabricSettings,omitempty" tf:"fabric_settings,omitempty"` + + // Specifies the Azure Region where the Service Fabric Cluster should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the Management Endpoint of the cluster such as http://example.com. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagementEndpoint *string `json:"managementEndpoint,omitempty" tf:"management_endpoint,omitempty"` + + // One or more node_type blocks as defined below. + // +kubebuilder:validation:Optional + NodeType []NodeTypeParameters `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // Specifies the Reliability Level of the Cluster. Possible values include None, Bronze, Silver, Gold and Platinum. + // +kubebuilder:validation:Optional + ReliabilityLevel *string `json:"reliabilityLevel,omitempty" tf:"reliability_level,omitempty"` + + // The name of the Resource Group in which the Service Fabric Cluster exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A reverse_proxy_certificate block as defined below. Conflicts with reverse_proxy_certificate_common_names. + // +kubebuilder:validation:Optional + ReverseProxyCertificate *ReverseProxyCertificateParameters `json:"reverseProxyCertificate,omitempty" tf:"reverse_proxy_certificate,omitempty"` + + // A reverse_proxy_certificate_common_names block as defined below. Conflicts with reverse_proxy_certificate. + // +kubebuilder:validation:Optional + ReverseProxyCertificateCommonNames *ReverseProxyCertificateCommonNamesParameters `json:"reverseProxyCertificateCommonNames,omitempty" tf:"reverse_proxy_certificate_common_names,omitempty"` + + // Specifies the logical grouping of VMs in upgrade domains. Possible values are Hierarchical or Parallel. + // +kubebuilder:validation:Optional + ServiceFabricZonalUpgradeMode *string `json:"serviceFabricZonalUpgradeMode,omitempty" tf:"service_fabric_zonal_upgrade_mode,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the Upgrade Mode of the cluster. Possible values are Automatic or Manual. + // +kubebuilder:validation:Optional + UpgradeMode *string `json:"upgradeMode,omitempty" tf:"upgrade_mode,omitempty"` + + // A upgrade_policy block as defined below. + // +kubebuilder:validation:Optional + UpgradePolicy *UpgradePolicyParameters `json:"upgradePolicy,omitempty" tf:"upgrade_policy,omitempty"` + + // Specifies the Image expected for the Service Fabric Cluster, such as Windows. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + VMImage *string `json:"vmImage,omitempty" tf:"vm_image,omitempty"` + + // Specifies the upgrade mode for the virtual machine scale set updates that happen in all availability zones at once. Possible values are Hierarchical or Parallel. + // +kubebuilder:validation:Optional + VmssZonalUpgradeMode *string `json:"vmssZonalUpgradeMode,omitempty" tf:"vmss_zonal_upgrade_mode,omitempty"` +} + +type CommonNamesInitParameters struct { + + // The common or subject name of the certificate. + CertificateCommonName *string `json:"certificateCommonName,omitempty" tf:"certificate_common_name,omitempty"` + + // The Issuer Thumbprint of the Certificate. + CertificateIssuerThumbprint *string `json:"certificateIssuerThumbprint,omitempty" tf:"certificate_issuer_thumbprint,omitempty"` +} + +type CommonNamesObservation struct { + + // The common or subject name of the certificate. + CertificateCommonName *string `json:"certificateCommonName,omitempty" tf:"certificate_common_name,omitempty"` + + // The Issuer Thumbprint of the Certificate. + CertificateIssuerThumbprint *string `json:"certificateIssuerThumbprint,omitempty" tf:"certificate_issuer_thumbprint,omitempty"` +} + +type CommonNamesParameters struct { + + // The common or subject name of the certificate. + // +kubebuilder:validation:Optional + CertificateCommonName *string `json:"certificateCommonName" tf:"certificate_common_name,omitempty"` + + // The Issuer Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + CertificateIssuerThumbprint *string `json:"certificateIssuerThumbprint,omitempty" tf:"certificate_issuer_thumbprint,omitempty"` +} + +type DeltaHealthPolicyInitParameters struct { + + // Specifies the maximum tolerated percentage of delta unhealthy applications that can have aggregated health states of error. If the current unhealthy applications do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + MaxDeltaUnhealthyApplicationsPercent *float64 `json:"maxDeltaUnhealthyApplicationsPercent,omitempty" tf:"max_delta_unhealthy_applications_percent,omitempty"` + + // Specifies the maximum tolerated percentage of delta unhealthy nodes that can have aggregated health states of error. If the current unhealthy nodes do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + MaxDeltaUnhealthyNodesPercent *float64 `json:"maxDeltaUnhealthyNodesPercent,omitempty" tf:"max_delta_unhealthy_nodes_percent,omitempty"` + + // Specifies the maximum tolerated percentage of upgrade domain delta unhealthy nodes that can have aggregated health state of error. If there is any upgrade domain where the current unhealthy nodes do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + MaxUpgradeDomainDeltaUnhealthyNodesPercent *float64 `json:"maxUpgradeDomainDeltaUnhealthyNodesPercent,omitempty" tf:"max_upgrade_domain_delta_unhealthy_nodes_percent,omitempty"` +} + +type DeltaHealthPolicyObservation struct { + + // Specifies the maximum tolerated percentage of delta unhealthy applications that can have aggregated health states of error. If the current unhealthy applications do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + MaxDeltaUnhealthyApplicationsPercent *float64 `json:"maxDeltaUnhealthyApplicationsPercent,omitempty" tf:"max_delta_unhealthy_applications_percent,omitempty"` + + // Specifies the maximum tolerated percentage of delta unhealthy nodes that can have aggregated health states of error. If the current unhealthy nodes do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + MaxDeltaUnhealthyNodesPercent *float64 `json:"maxDeltaUnhealthyNodesPercent,omitempty" tf:"max_delta_unhealthy_nodes_percent,omitempty"` + + // Specifies the maximum tolerated percentage of upgrade domain delta unhealthy nodes that can have aggregated health state of error. If there is any upgrade domain where the current unhealthy nodes do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + MaxUpgradeDomainDeltaUnhealthyNodesPercent *float64 `json:"maxUpgradeDomainDeltaUnhealthyNodesPercent,omitempty" tf:"max_upgrade_domain_delta_unhealthy_nodes_percent,omitempty"` +} + +type DeltaHealthPolicyParameters struct { + + // Specifies the maximum tolerated percentage of delta unhealthy applications that can have aggregated health states of error. If the current unhealthy applications do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + // +kubebuilder:validation:Optional + MaxDeltaUnhealthyApplicationsPercent *float64 `json:"maxDeltaUnhealthyApplicationsPercent,omitempty" tf:"max_delta_unhealthy_applications_percent,omitempty"` + + // Specifies the maximum tolerated percentage of delta unhealthy nodes that can have aggregated health states of error. If the current unhealthy nodes do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + // +kubebuilder:validation:Optional + MaxDeltaUnhealthyNodesPercent *float64 `json:"maxDeltaUnhealthyNodesPercent,omitempty" tf:"max_delta_unhealthy_nodes_percent,omitempty"` + + // Specifies the maximum tolerated percentage of upgrade domain delta unhealthy nodes that can have aggregated health state of error. If there is any upgrade domain where the current unhealthy nodes do not respect the percentage relative to the state at the beginning of the upgrade, the cluster is unhealthy. Defaults to 0. + // +kubebuilder:validation:Optional + MaxUpgradeDomainDeltaUnhealthyNodesPercent *float64 `json:"maxUpgradeDomainDeltaUnhealthyNodesPercent,omitempty" tf:"max_upgrade_domain_delta_unhealthy_nodes_percent,omitempty"` +} + +type DiagnosticsConfigInitParameters struct { + + // The Blob Endpoint of the Storage Account. + BlobEndpoint *string `json:"blobEndpoint,omitempty" tf:"blob_endpoint,omitempty"` + + // The protected diagnostics storage key name, such as StorageAccountKey1. + ProtectedAccountKeyName *string `json:"protectedAccountKeyName,omitempty" tf:"protected_account_key_name,omitempty"` + + // The Queue Endpoint of the Storage Account. + QueueEndpoint *string `json:"queueEndpoint,omitempty" tf:"queue_endpoint,omitempty"` + + // The name of the Storage Account where the Diagnostics should be sent to. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The Table Endpoint of the Storage Account. + TableEndpoint *string `json:"tableEndpoint,omitempty" tf:"table_endpoint,omitempty"` +} + +type DiagnosticsConfigObservation struct { + + // The Blob Endpoint of the Storage Account. + BlobEndpoint *string `json:"blobEndpoint,omitempty" tf:"blob_endpoint,omitempty"` + + // The protected diagnostics storage key name, such as StorageAccountKey1. + ProtectedAccountKeyName *string `json:"protectedAccountKeyName,omitempty" tf:"protected_account_key_name,omitempty"` + + // The Queue Endpoint of the Storage Account. + QueueEndpoint *string `json:"queueEndpoint,omitempty" tf:"queue_endpoint,omitempty"` + + // The name of the Storage Account where the Diagnostics should be sent to. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The Table Endpoint of the Storage Account. + TableEndpoint *string `json:"tableEndpoint,omitempty" tf:"table_endpoint,omitempty"` +} + +type DiagnosticsConfigParameters struct { + + // The Blob Endpoint of the Storage Account. + // +kubebuilder:validation:Optional + BlobEndpoint *string `json:"blobEndpoint" tf:"blob_endpoint,omitempty"` + + // The protected diagnostics storage key name, such as StorageAccountKey1. + // +kubebuilder:validation:Optional + ProtectedAccountKeyName *string `json:"protectedAccountKeyName" tf:"protected_account_key_name,omitempty"` + + // The Queue Endpoint of the Storage Account. + // +kubebuilder:validation:Optional + QueueEndpoint *string `json:"queueEndpoint" tf:"queue_endpoint,omitempty"` + + // The name of the Storage Account where the Diagnostics should be sent to. + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName" tf:"storage_account_name,omitempty"` + + // The Table Endpoint of the Storage Account. + // +kubebuilder:validation:Optional + TableEndpoint *string `json:"tableEndpoint" tf:"table_endpoint,omitempty"` +} + +type EphemeralPortsInitParameters struct { + + // The end of the Ephemeral Port Range on this Node Type. + EndPort *float64 `json:"endPort,omitempty" tf:"end_port,omitempty"` + + // The start of the Ephemeral Port Range on this Node Type. + StartPort *float64 `json:"startPort,omitempty" tf:"start_port,omitempty"` +} + +type EphemeralPortsObservation struct { + + // The end of the Ephemeral Port Range on this Node Type. + EndPort *float64 `json:"endPort,omitempty" tf:"end_port,omitempty"` + + // The start of the Ephemeral Port Range on this Node Type. + StartPort *float64 `json:"startPort,omitempty" tf:"start_port,omitempty"` +} + +type EphemeralPortsParameters struct { + + // The end of the Ephemeral Port Range on this Node Type. + // +kubebuilder:validation:Optional + EndPort *float64 `json:"endPort" tf:"end_port,omitempty"` + + // The start of the Ephemeral Port Range on this Node Type. + // +kubebuilder:validation:Optional + StartPort *float64 `json:"startPort" tf:"start_port,omitempty"` +} + +type FabricSettingsInitParameters struct { + + // The name of the Fabric Setting, such as Security or Federation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map containing settings for the specified Fabric Setting. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type FabricSettingsObservation struct { + + // The name of the Fabric Setting, such as Security or Federation. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map containing settings for the specified Fabric Setting. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type FabricSettingsParameters struct { + + // The name of the Fabric Setting, such as Security or Federation. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A map containing settings for the specified Fabric Setting. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type HealthPolicyInitParameters struct { + + // Specifies the maximum tolerated percentage of applications that can have aggregated health state of error. If the upgrade exceeds this percentage, the cluster is unhealthy. Defaults to 0. + MaxUnhealthyApplicationsPercent *float64 `json:"maxUnhealthyApplicationsPercent,omitempty" tf:"max_unhealthy_applications_percent,omitempty"` + + // Specifies the maximum tolerated percentage of nodes that can have aggregated health states of error. If an upgrade exceeds this percentage, the cluster is unhealthy. Defaults to 0. + MaxUnhealthyNodesPercent *float64 `json:"maxUnhealthyNodesPercent,omitempty" tf:"max_unhealthy_nodes_percent,omitempty"` +} + +type HealthPolicyObservation struct { + + // Specifies the maximum tolerated percentage of applications that can have aggregated health state of error. If the upgrade exceeds this percentage, the cluster is unhealthy. Defaults to 0. + MaxUnhealthyApplicationsPercent *float64 `json:"maxUnhealthyApplicationsPercent,omitempty" tf:"max_unhealthy_applications_percent,omitempty"` + + // Specifies the maximum tolerated percentage of nodes that can have aggregated health states of error. If an upgrade exceeds this percentage, the cluster is unhealthy. Defaults to 0. + MaxUnhealthyNodesPercent *float64 `json:"maxUnhealthyNodesPercent,omitempty" tf:"max_unhealthy_nodes_percent,omitempty"` +} + +type HealthPolicyParameters struct { + + // Specifies the maximum tolerated percentage of applications that can have aggregated health state of error. If the upgrade exceeds this percentage, the cluster is unhealthy. Defaults to 0. + // +kubebuilder:validation:Optional + MaxUnhealthyApplicationsPercent *float64 `json:"maxUnhealthyApplicationsPercent,omitempty" tf:"max_unhealthy_applications_percent,omitempty"` + + // Specifies the maximum tolerated percentage of nodes that can have aggregated health states of error. If an upgrade exceeds this percentage, the cluster is unhealthy. Defaults to 0. + // +kubebuilder:validation:Optional + MaxUnhealthyNodesPercent *float64 `json:"maxUnhealthyNodesPercent,omitempty" tf:"max_unhealthy_nodes_percent,omitempty"` +} + +type NodeTypeInitParameters struct { + + // A application_ports block as defined below. + ApplicationPorts *ApplicationPortsInitParameters `json:"applicationPorts,omitempty" tf:"application_ports,omitempty"` + + // The capacity tags applied to the nodes in the node type, the cluster resource manager uses these tags to understand how much resource a node has. + // +mapType=granular + Capacities map[string]*string `json:"capacities,omitempty" tf:"capacities,omitempty"` + + // The Port used for the Client Endpoint for this Node Type. + ClientEndpointPort *float64 `json:"clientEndpointPort,omitempty" tf:"client_endpoint_port,omitempty"` + + // The Durability Level for this Node Type. Possible values include Bronze, Gold and Silver. Defaults to Bronze. + DurabilityLevel *string `json:"durabilityLevel,omitempty" tf:"durability_level,omitempty"` + + // A ephemeral_ports block as defined below. + EphemeralPorts *EphemeralPortsInitParameters `json:"ephemeralPorts,omitempty" tf:"ephemeral_ports,omitempty"` + + // The Port used for the HTTP Endpoint for this Node Type. + HTTPEndpointPort *float64 `json:"httpEndpointPort,omitempty" tf:"http_endpoint_port,omitempty"` + + // The number of nodes for this Node Type. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Is this the Primary Node Type? + IsPrimary *bool `json:"isPrimary,omitempty" tf:"is_primary,omitempty"` + + // Should this node type run only stateless services? + IsStateless *bool `json:"isStateless,omitempty" tf:"is_stateless,omitempty"` + + // Does this node type span availability zones? + MultipleAvailabilityZones *bool `json:"multipleAvailabilityZones,omitempty" tf:"multiple_availability_zones,omitempty"` + + // The name of the Node Type. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The placement tags applied to nodes in the node type, which can be used to indicate where certain services (workload) should run. + // +mapType=granular + PlacementProperties map[string]*string `json:"placementProperties,omitempty" tf:"placement_properties,omitempty"` + + // The Port used for the Reverse Proxy Endpoint for this Node Type. Changing this will upgrade the cluster. + ReverseProxyEndpointPort *float64 `json:"reverseProxyEndpointPort,omitempty" tf:"reverse_proxy_endpoint_port,omitempty"` +} + +type NodeTypeObservation struct { + + // A application_ports block as defined below. + ApplicationPorts *ApplicationPortsObservation `json:"applicationPorts,omitempty" tf:"application_ports,omitempty"` + + // The capacity tags applied to the nodes in the node type, the cluster resource manager uses these tags to understand how much resource a node has. + // +mapType=granular + Capacities map[string]*string `json:"capacities,omitempty" tf:"capacities,omitempty"` + + // The Port used for the Client Endpoint for this Node Type. + ClientEndpointPort *float64 `json:"clientEndpointPort,omitempty" tf:"client_endpoint_port,omitempty"` + + // The Durability Level for this Node Type. Possible values include Bronze, Gold and Silver. Defaults to Bronze. + DurabilityLevel *string `json:"durabilityLevel,omitempty" tf:"durability_level,omitempty"` + + // A ephemeral_ports block as defined below. + EphemeralPorts *EphemeralPortsObservation `json:"ephemeralPorts,omitempty" tf:"ephemeral_ports,omitempty"` + + // The Port used for the HTTP Endpoint for this Node Type. + HTTPEndpointPort *float64 `json:"httpEndpointPort,omitempty" tf:"http_endpoint_port,omitempty"` + + // The number of nodes for this Node Type. + InstanceCount *float64 `json:"instanceCount,omitempty" tf:"instance_count,omitempty"` + + // Is this the Primary Node Type? + IsPrimary *bool `json:"isPrimary,omitempty" tf:"is_primary,omitempty"` + + // Should this node type run only stateless services? + IsStateless *bool `json:"isStateless,omitempty" tf:"is_stateless,omitempty"` + + // Does this node type span availability zones? + MultipleAvailabilityZones *bool `json:"multipleAvailabilityZones,omitempty" tf:"multiple_availability_zones,omitempty"` + + // The name of the Node Type. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The placement tags applied to nodes in the node type, which can be used to indicate where certain services (workload) should run. + // +mapType=granular + PlacementProperties map[string]*string `json:"placementProperties,omitempty" tf:"placement_properties,omitempty"` + + // The Port used for the Reverse Proxy Endpoint for this Node Type. Changing this will upgrade the cluster. + ReverseProxyEndpointPort *float64 `json:"reverseProxyEndpointPort,omitempty" tf:"reverse_proxy_endpoint_port,omitempty"` +} + +type NodeTypeParameters struct { + + // A application_ports block as defined below. + // +kubebuilder:validation:Optional + ApplicationPorts *ApplicationPortsParameters `json:"applicationPorts,omitempty" tf:"application_ports,omitempty"` + + // The capacity tags applied to the nodes in the node type, the cluster resource manager uses these tags to understand how much resource a node has. + // +kubebuilder:validation:Optional + // +mapType=granular + Capacities map[string]*string `json:"capacities,omitempty" tf:"capacities,omitempty"` + + // The Port used for the Client Endpoint for this Node Type. + // +kubebuilder:validation:Optional + ClientEndpointPort *float64 `json:"clientEndpointPort" tf:"client_endpoint_port,omitempty"` + + // The Durability Level for this Node Type. Possible values include Bronze, Gold and Silver. Defaults to Bronze. + // +kubebuilder:validation:Optional + DurabilityLevel *string `json:"durabilityLevel,omitempty" tf:"durability_level,omitempty"` + + // A ephemeral_ports block as defined below. + // +kubebuilder:validation:Optional + EphemeralPorts *EphemeralPortsParameters `json:"ephemeralPorts,omitempty" tf:"ephemeral_ports,omitempty"` + + // The Port used for the HTTP Endpoint for this Node Type. + // +kubebuilder:validation:Optional + HTTPEndpointPort *float64 `json:"httpEndpointPort" tf:"http_endpoint_port,omitempty"` + + // The number of nodes for this Node Type. + // +kubebuilder:validation:Optional + InstanceCount *float64 `json:"instanceCount" tf:"instance_count,omitempty"` + + // Is this the Primary Node Type? + // +kubebuilder:validation:Optional + IsPrimary *bool `json:"isPrimary" tf:"is_primary,omitempty"` + + // Should this node type run only stateless services? + // +kubebuilder:validation:Optional + IsStateless *bool `json:"isStateless,omitempty" tf:"is_stateless,omitempty"` + + // Does this node type span availability zones? + // +kubebuilder:validation:Optional + MultipleAvailabilityZones *bool `json:"multipleAvailabilityZones,omitempty" tf:"multiple_availability_zones,omitempty"` + + // The name of the Node Type. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The placement tags applied to nodes in the node type, which can be used to indicate where certain services (workload) should run. + // +kubebuilder:validation:Optional + // +mapType=granular + PlacementProperties map[string]*string `json:"placementProperties,omitempty" tf:"placement_properties,omitempty"` + + // The Port used for the Reverse Proxy Endpoint for this Node Type. Changing this will upgrade the cluster. + // +kubebuilder:validation:Optional + ReverseProxyEndpointPort *float64 `json:"reverseProxyEndpointPort,omitempty" tf:"reverse_proxy_endpoint_port,omitempty"` +} + +type ReverseProxyCertificateCommonNamesCommonNamesInitParameters struct { + + // The common or subject name of the certificate. + CertificateCommonName *string `json:"certificateCommonName,omitempty" tf:"certificate_common_name,omitempty"` + + // The Issuer Thumbprint of the Certificate. + CertificateIssuerThumbprint *string `json:"certificateIssuerThumbprint,omitempty" tf:"certificate_issuer_thumbprint,omitempty"` +} + +type ReverseProxyCertificateCommonNamesCommonNamesObservation struct { + + // The common or subject name of the certificate. + CertificateCommonName *string `json:"certificateCommonName,omitempty" tf:"certificate_common_name,omitempty"` + + // The Issuer Thumbprint of the Certificate. + CertificateIssuerThumbprint *string `json:"certificateIssuerThumbprint,omitempty" tf:"certificate_issuer_thumbprint,omitempty"` +} + +type ReverseProxyCertificateCommonNamesCommonNamesParameters struct { + + // The common or subject name of the certificate. + // +kubebuilder:validation:Optional + CertificateCommonName *string `json:"certificateCommonName" tf:"certificate_common_name,omitempty"` + + // The Issuer Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + CertificateIssuerThumbprint *string `json:"certificateIssuerThumbprint,omitempty" tf:"certificate_issuer_thumbprint,omitempty"` +} + +type ReverseProxyCertificateCommonNamesInitParameters struct { + + // A common_names block as defined below. + CommonNames []ReverseProxyCertificateCommonNamesCommonNamesInitParameters `json:"commonNames,omitempty" tf:"common_names,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + X509StoreName *string `json:"x509StoreName,omitempty" tf:"x509_store_name,omitempty"` +} + +type ReverseProxyCertificateCommonNamesObservation struct { + + // A common_names block as defined below. + CommonNames []ReverseProxyCertificateCommonNamesCommonNamesObservation `json:"commonNames,omitempty" tf:"common_names,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + X509StoreName *string `json:"x509StoreName,omitempty" tf:"x509_store_name,omitempty"` +} + +type ReverseProxyCertificateCommonNamesParameters struct { + + // A common_names block as defined below. + // +kubebuilder:validation:Optional + CommonNames []ReverseProxyCertificateCommonNamesCommonNamesParameters `json:"commonNames" tf:"common_names,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + // +kubebuilder:validation:Optional + X509StoreName *string `json:"x509StoreName" tf:"x509_store_name,omitempty"` +} + +type ReverseProxyCertificateInitParameters struct { + + // The Thumbprint of the Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` + + // The Secondary Thumbprint of the Certificate. + ThumbprintSecondary *string `json:"thumbprintSecondary,omitempty" tf:"thumbprint_secondary,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + X509StoreName *string `json:"x509StoreName,omitempty" tf:"x509_store_name,omitempty"` +} + +type ReverseProxyCertificateObservation struct { + + // The Thumbprint of the Certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` + + // The Secondary Thumbprint of the Certificate. + ThumbprintSecondary *string `json:"thumbprintSecondary,omitempty" tf:"thumbprint_secondary,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + X509StoreName *string `json:"x509StoreName,omitempty" tf:"x509_store_name,omitempty"` +} + +type ReverseProxyCertificateParameters struct { + + // The Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + Thumbprint *string `json:"thumbprint" tf:"thumbprint,omitempty"` + + // The Secondary Thumbprint of the Certificate. + // +kubebuilder:validation:Optional + ThumbprintSecondary *string `json:"thumbprintSecondary,omitempty" tf:"thumbprint_secondary,omitempty"` + + // The X509 Store where the Certificate Exists, such as My. + // +kubebuilder:validation:Optional + X509StoreName *string `json:"x509StoreName" tf:"x509_store_name,omitempty"` +} + +type UpgradePolicyInitParameters struct { + + // A delta_health_policy block as defined below + DeltaHealthPolicy *DeltaHealthPolicyInitParameters `json:"deltaHealthPolicy,omitempty" tf:"delta_health_policy,omitempty"` + + // Indicates whether to restart the Service Fabric node even if only dynamic configurations have changed. + ForceRestartEnabled *bool `json:"forceRestartEnabled,omitempty" tf:"force_restart_enabled,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, after which Service Fabric retries the health check if the previous health check fails. Defaults to 00:45:00. + HealthCheckRetryTimeout *string `json:"healthCheckRetryTimeout,omitempty" tf:"health_check_retry_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits in order to verify that the cluster is stable before it continues to the next upgrade domain or completes the upgrade. This wait duration prevents undetected changes of health right after the health check is performed. Defaults to 00:01:00. + HealthCheckStableDuration *string `json:"healthCheckStableDuration,omitempty" tf:"health_check_stable_duration,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits before it performs the initial health check after it finishes the upgrade on the upgrade domain. Defaults to 00:00:30. + HealthCheckWaitDuration *string `json:"healthCheckWaitDuration,omitempty" tf:"health_check_wait_duration,omitempty"` + + // A health_policy block as defined below + HealthPolicy *HealthPolicyInitParameters `json:"healthPolicy,omitempty" tf:"health_policy,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric takes to upgrade a single upgrade domain. After this period, the upgrade fails. Defaults to 02:00:00. + UpgradeDomainTimeout *string `json:"upgradeDomainTimeout,omitempty" tf:"upgrade_domain_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits for a replica set to reconfigure into a safe state, if it is not already in a safe state, before Service Fabric proceeds with the upgrade. Defaults to 10675199.02:48:05.4775807. + UpgradeReplicaSetCheckTimeout *string `json:"upgradeReplicaSetCheckTimeout,omitempty" tf:"upgrade_replica_set_check_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric takes for the entire upgrade. After this period, the upgrade fails. Defaults to 12:00:00. + UpgradeTimeout *string `json:"upgradeTimeout,omitempty" tf:"upgrade_timeout,omitempty"` +} + +type UpgradePolicyObservation struct { + + // A delta_health_policy block as defined below + DeltaHealthPolicy *DeltaHealthPolicyObservation `json:"deltaHealthPolicy,omitempty" tf:"delta_health_policy,omitempty"` + + // Indicates whether to restart the Service Fabric node even if only dynamic configurations have changed. + ForceRestartEnabled *bool `json:"forceRestartEnabled,omitempty" tf:"force_restart_enabled,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, after which Service Fabric retries the health check if the previous health check fails. Defaults to 00:45:00. + HealthCheckRetryTimeout *string `json:"healthCheckRetryTimeout,omitempty" tf:"health_check_retry_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits in order to verify that the cluster is stable before it continues to the next upgrade domain or completes the upgrade. This wait duration prevents undetected changes of health right after the health check is performed. Defaults to 00:01:00. + HealthCheckStableDuration *string `json:"healthCheckStableDuration,omitempty" tf:"health_check_stable_duration,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits before it performs the initial health check after it finishes the upgrade on the upgrade domain. Defaults to 00:00:30. + HealthCheckWaitDuration *string `json:"healthCheckWaitDuration,omitempty" tf:"health_check_wait_duration,omitempty"` + + // A health_policy block as defined below + HealthPolicy *HealthPolicyObservation `json:"healthPolicy,omitempty" tf:"health_policy,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric takes to upgrade a single upgrade domain. After this period, the upgrade fails. Defaults to 02:00:00. + UpgradeDomainTimeout *string `json:"upgradeDomainTimeout,omitempty" tf:"upgrade_domain_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits for a replica set to reconfigure into a safe state, if it is not already in a safe state, before Service Fabric proceeds with the upgrade. Defaults to 10675199.02:48:05.4775807. + UpgradeReplicaSetCheckTimeout *string `json:"upgradeReplicaSetCheckTimeout,omitempty" tf:"upgrade_replica_set_check_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric takes for the entire upgrade. After this period, the upgrade fails. Defaults to 12:00:00. + UpgradeTimeout *string `json:"upgradeTimeout,omitempty" tf:"upgrade_timeout,omitempty"` +} + +type UpgradePolicyParameters struct { + + // A delta_health_policy block as defined below + // +kubebuilder:validation:Optional + DeltaHealthPolicy *DeltaHealthPolicyParameters `json:"deltaHealthPolicy,omitempty" tf:"delta_health_policy,omitempty"` + + // Indicates whether to restart the Service Fabric node even if only dynamic configurations have changed. + // +kubebuilder:validation:Optional + ForceRestartEnabled *bool `json:"forceRestartEnabled,omitempty" tf:"force_restart_enabled,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, after which Service Fabric retries the health check if the previous health check fails. Defaults to 00:45:00. + // +kubebuilder:validation:Optional + HealthCheckRetryTimeout *string `json:"healthCheckRetryTimeout,omitempty" tf:"health_check_retry_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits in order to verify that the cluster is stable before it continues to the next upgrade domain or completes the upgrade. This wait duration prevents undetected changes of health right after the health check is performed. Defaults to 00:01:00. + // +kubebuilder:validation:Optional + HealthCheckStableDuration *string `json:"healthCheckStableDuration,omitempty" tf:"health_check_stable_duration,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits before it performs the initial health check after it finishes the upgrade on the upgrade domain. Defaults to 00:00:30. + // +kubebuilder:validation:Optional + HealthCheckWaitDuration *string `json:"healthCheckWaitDuration,omitempty" tf:"health_check_wait_duration,omitempty"` + + // A health_policy block as defined below + // +kubebuilder:validation:Optional + HealthPolicy *HealthPolicyParameters `json:"healthPolicy,omitempty" tf:"health_policy,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric takes to upgrade a single upgrade domain. After this period, the upgrade fails. Defaults to 02:00:00. + // +kubebuilder:validation:Optional + UpgradeDomainTimeout *string `json:"upgradeDomainTimeout,omitempty" tf:"upgrade_domain_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric waits for a replica set to reconfigure into a safe state, if it is not already in a safe state, before Service Fabric proceeds with the upgrade. Defaults to 10675199.02:48:05.4775807. + // +kubebuilder:validation:Optional + UpgradeReplicaSetCheckTimeout *string `json:"upgradeReplicaSetCheckTimeout,omitempty" tf:"upgrade_replica_set_check_timeout,omitempty"` + + // Specifies the duration, in "hh:mm:ss" string format, that Service Fabric takes for the entire upgrade. After this period, the upgrade fails. Defaults to 12:00:00. + // +kubebuilder:validation:Optional + UpgradeTimeout *string `json:"upgradeTimeout,omitempty" tf:"upgrade_timeout,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ClusterInitParameters `json:"initProvider,omitempty"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Cluster is the Schema for the Clusters API. Manages a Service Fabric Cluster. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.managementEndpoint) || (has(self.initProvider) && has(self.initProvider.managementEndpoint))",message="spec.forProvider.managementEndpoint is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nodeType) || (has(self.initProvider) && has(self.initProvider.nodeType))",message="spec.forProvider.nodeType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.reliabilityLevel) || (has(self.initProvider) && has(self.initProvider.reliabilityLevel))",message="spec.forProvider.reliabilityLevel is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.upgradeMode) || (has(self.initProvider) && has(self.initProvider.upgradeMode))",message="spec.forProvider.upgradeMode is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vmImage) || (has(self.initProvider) && has(self.initProvider.vmImage))",message="spec.forProvider.vmImage is a required parameter" + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/servicefabric/v1beta1/zz_generated.conversion_hubs.go b/apis/servicefabric/v1beta2/zz_generated.conversion_hubs.go similarity index 95% rename from apis/servicefabric/v1beta1/zz_generated.conversion_hubs.go rename to apis/servicefabric/v1beta2/zz_generated.conversion_hubs.go index c530f36e0..dc9dfa2c0 100755 --- a/apis/servicefabric/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/servicefabric/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *Cluster) Hub() {} diff --git a/apis/servicefabric/v1beta2/zz_generated.deepcopy.go b/apis/servicefabric/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..ae6368f2c --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3783 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryInitParameters) DeepCopyInto(out *ActiveDirectoryInitParameters) { + *out = *in + if in.ClientApplicationID != nil { + in, out := &in.ClientApplicationID, &out.ClientApplicationID + *out = new(string) + **out = **in + } + if in.ClusterApplicationID != nil { + in, out := &in.ClusterApplicationID, &out.ClusterApplicationID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryInitParameters. +func (in *ActiveDirectoryInitParameters) DeepCopy() *ActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryObservation) DeepCopyInto(out *ActiveDirectoryObservation) { + *out = *in + if in.ClientApplicationID != nil { + in, out := &in.ClientApplicationID, &out.ClientApplicationID + *out = new(string) + **out = **in + } + if in.ClusterApplicationID != nil { + in, out := &in.ClusterApplicationID, &out.ClusterApplicationID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryObservation. +func (in *ActiveDirectoryObservation) DeepCopy() *ActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(ActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryParameters) DeepCopyInto(out *ActiveDirectoryParameters) { + *out = *in + if in.ClientApplicationID != nil { + in, out := &in.ClientApplicationID, &out.ClientApplicationID + *out = new(string) + **out = **in + } + if in.ClusterApplicationID != nil { + in, out := &in.ClusterApplicationID, &out.ClusterApplicationID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryParameters. +func (in *ActiveDirectoryParameters) DeepCopy() *ActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationPortsInitParameters) DeepCopyInto(out *ApplicationPortsInitParameters) { + *out = *in + if in.EndPort != nil { + in, out := &in.EndPort, &out.EndPort + *out = new(float64) + **out = **in + } + if in.StartPort != nil { + in, out := &in.StartPort, &out.StartPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationPortsInitParameters. +func (in *ApplicationPortsInitParameters) DeepCopy() *ApplicationPortsInitParameters { + if in == nil { + return nil + } + out := new(ApplicationPortsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationPortsObservation) DeepCopyInto(out *ApplicationPortsObservation) { + *out = *in + if in.EndPort != nil { + in, out := &in.EndPort, &out.EndPort + *out = new(float64) + **out = **in + } + if in.StartPort != nil { + in, out := &in.StartPort, &out.StartPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationPortsObservation. +func (in *ApplicationPortsObservation) DeepCopy() *ApplicationPortsObservation { + if in == nil { + return nil + } + out := new(ApplicationPortsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationPortsParameters) DeepCopyInto(out *ApplicationPortsParameters) { + *out = *in + if in.EndPort != nil { + in, out := &in.EndPort, &out.EndPort + *out = new(float64) + **out = **in + } + if in.StartPort != nil { + in, out := &in.StartPort, &out.StartPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationPortsParameters. +func (in *ApplicationPortsParameters) DeepCopy() *ApplicationPortsParameters { + if in == nil { + return nil + } + out := new(ApplicationPortsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationCertificateInitParameters) DeepCopyInto(out *AuthenticationCertificateInitParameters) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationCertificateInitParameters. +func (in *AuthenticationCertificateInitParameters) DeepCopy() *AuthenticationCertificateInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationCertificateObservation) DeepCopyInto(out *AuthenticationCertificateObservation) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationCertificateObservation. +func (in *AuthenticationCertificateObservation) DeepCopy() *AuthenticationCertificateObservation { + if in == nil { + return nil + } + out := new(AuthenticationCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationCertificateParameters) DeepCopyInto(out *AuthenticationCertificateParameters) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationCertificateParameters. +func (in *AuthenticationCertificateParameters) DeepCopy() *AuthenticationCertificateParameters { + if in == nil { + return nil + } + out := new(AuthenticationCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationInitParameters) DeepCopyInto(out *AuthenticationInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]AuthenticationCertificateInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationInitParameters. +func (in *AuthenticationInitParameters) DeepCopy() *AuthenticationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationObservation) DeepCopyInto(out *AuthenticationObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]AuthenticationCertificateObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationObservation. +func (in *AuthenticationObservation) DeepCopy() *AuthenticationObservation { + if in == nil { + return nil + } + out := new(AuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationParameters) DeepCopyInto(out *AuthenticationParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]AuthenticationCertificateParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationParameters. +func (in *AuthenticationParameters) DeepCopy() *AuthenticationParameters { + if in == nil { + return nil + } + out := new(AuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryInitParameters) DeepCopyInto(out *AzureActiveDirectoryInitParameters) { + *out = *in + if in.ClientApplicationID != nil { + in, out := &in.ClientApplicationID, &out.ClientApplicationID + *out = new(string) + **out = **in + } + if in.ClusterApplicationID != nil { + in, out := &in.ClusterApplicationID, &out.ClusterApplicationID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryInitParameters. +func (in *AzureActiveDirectoryInitParameters) DeepCopy() *AzureActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryObservation) DeepCopyInto(out *AzureActiveDirectoryObservation) { + *out = *in + if in.ClientApplicationID != nil { + in, out := &in.ClientApplicationID, &out.ClientApplicationID + *out = new(string) + **out = **in + } + if in.ClusterApplicationID != nil { + in, out := &in.ClusterApplicationID, &out.ClusterApplicationID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryObservation. +func (in *AzureActiveDirectoryObservation) DeepCopy() *AzureActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureActiveDirectoryParameters) DeepCopyInto(out *AzureActiveDirectoryParameters) { + *out = *in + if in.ClientApplicationID != nil { + in, out := &in.ClientApplicationID, &out.ClientApplicationID + *out = new(string) + **out = **in + } + if in.ClusterApplicationID != nil { + in, out := &in.ClusterApplicationID, &out.ClusterApplicationID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureActiveDirectoryParameters. +func (in *AzureActiveDirectoryParameters) DeepCopy() *AzureActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(AzureActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateCommonNamesInitParameters) DeepCopyInto(out *CertificateCommonNamesInitParameters) { + *out = *in + if in.CommonNames != nil { + in, out := &in.CommonNames, &out.CommonNames + *out = make([]CommonNamesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateCommonNamesInitParameters. +func (in *CertificateCommonNamesInitParameters) DeepCopy() *CertificateCommonNamesInitParameters { + if in == nil { + return nil + } + out := new(CertificateCommonNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateCommonNamesObservation) DeepCopyInto(out *CertificateCommonNamesObservation) { + *out = *in + if in.CommonNames != nil { + in, out := &in.CommonNames, &out.CommonNames + *out = make([]CommonNamesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateCommonNamesObservation. +func (in *CertificateCommonNamesObservation) DeepCopy() *CertificateCommonNamesObservation { + if in == nil { + return nil + } + out := new(CertificateCommonNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateCommonNamesParameters) DeepCopyInto(out *CertificateCommonNamesParameters) { + *out = *in + if in.CommonNames != nil { + in, out := &in.CommonNames, &out.CommonNames + *out = make([]CommonNamesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateCommonNamesParameters. +func (in *CertificateCommonNamesParameters) DeepCopy() *CertificateCommonNamesParameters { + if in == nil { + return nil + } + out := new(CertificateCommonNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateInitParameters) DeepCopyInto(out *CertificateInitParameters) { + *out = *in + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.ThumbprintSecondary != nil { + in, out := &in.ThumbprintSecondary, &out.ThumbprintSecondary + *out = new(string) + **out = **in + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateInitParameters. +func (in *CertificateInitParameters) DeepCopy() *CertificateInitParameters { + if in == nil { + return nil + } + out := new(CertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateObservation) DeepCopyInto(out *CertificateObservation) { + *out = *in + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.ThumbprintSecondary != nil { + in, out := &in.ThumbprintSecondary, &out.ThumbprintSecondary + *out = new(string) + **out = **in + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateObservation. +func (in *CertificateObservation) DeepCopy() *CertificateObservation { + if in == nil { + return nil + } + out := new(CertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateParameters) DeepCopyInto(out *CertificateParameters) { + *out = *in + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.ThumbprintSecondary != nil { + in, out := &in.ThumbprintSecondary, &out.ThumbprintSecondary + *out = new(string) + **out = **in + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateParameters. +func (in *CertificateParameters) DeepCopy() *CertificateParameters { + if in == nil { + return nil + } + out := new(CertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatesInitParameters) DeepCopyInto(out *CertificatesInitParameters) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatesInitParameters. +func (in *CertificatesInitParameters) DeepCopy() *CertificatesInitParameters { + if in == nil { + return nil + } + out := new(CertificatesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatesObservation) DeepCopyInto(out *CertificatesObservation) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatesObservation. +func (in *CertificatesObservation) DeepCopy() *CertificatesObservation { + if in == nil { + return nil + } + out := new(CertificatesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatesParameters) DeepCopyInto(out *CertificatesParameters) { + *out = *in + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(string) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatesParameters. +func (in *CertificatesParameters) DeepCopy() *CertificatesParameters { + if in == nil { + return nil + } + out := new(CertificatesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientCertificateCommonNameInitParameters) DeepCopyInto(out *ClientCertificateCommonNameInitParameters) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.IsAdmin != nil { + in, out := &in.IsAdmin, &out.IsAdmin + *out = new(bool) + **out = **in + } + if in.IssuerThumbprint != nil { + in, out := &in.IssuerThumbprint, &out.IssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientCertificateCommonNameInitParameters. +func (in *ClientCertificateCommonNameInitParameters) DeepCopy() *ClientCertificateCommonNameInitParameters { + if in == nil { + return nil + } + out := new(ClientCertificateCommonNameInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientCertificateCommonNameObservation) DeepCopyInto(out *ClientCertificateCommonNameObservation) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.IsAdmin != nil { + in, out := &in.IsAdmin, &out.IsAdmin + *out = new(bool) + **out = **in + } + if in.IssuerThumbprint != nil { + in, out := &in.IssuerThumbprint, &out.IssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientCertificateCommonNameObservation. +func (in *ClientCertificateCommonNameObservation) DeepCopy() *ClientCertificateCommonNameObservation { + if in == nil { + return nil + } + out := new(ClientCertificateCommonNameObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientCertificateCommonNameParameters) DeepCopyInto(out *ClientCertificateCommonNameParameters) { + *out = *in + if in.CommonName != nil { + in, out := &in.CommonName, &out.CommonName + *out = new(string) + **out = **in + } + if in.IsAdmin != nil { + in, out := &in.IsAdmin, &out.IsAdmin + *out = new(bool) + **out = **in + } + if in.IssuerThumbprint != nil { + in, out := &in.IssuerThumbprint, &out.IssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientCertificateCommonNameParameters. +func (in *ClientCertificateCommonNameParameters) DeepCopy() *ClientCertificateCommonNameParameters { + if in == nil { + return nil + } + out := new(ClientCertificateCommonNameParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientCertificateThumbprintInitParameters) DeepCopyInto(out *ClientCertificateThumbprintInitParameters) { + *out = *in + if in.IsAdmin != nil { + in, out := &in.IsAdmin, &out.IsAdmin + *out = new(bool) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientCertificateThumbprintInitParameters. +func (in *ClientCertificateThumbprintInitParameters) DeepCopy() *ClientCertificateThumbprintInitParameters { + if in == nil { + return nil + } + out := new(ClientCertificateThumbprintInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientCertificateThumbprintObservation) DeepCopyInto(out *ClientCertificateThumbprintObservation) { + *out = *in + if in.IsAdmin != nil { + in, out := &in.IsAdmin, &out.IsAdmin + *out = new(bool) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientCertificateThumbprintObservation. +func (in *ClientCertificateThumbprintObservation) DeepCopy() *ClientCertificateThumbprintObservation { + if in == nil { + return nil + } + out := new(ClientCertificateThumbprintObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientCertificateThumbprintParameters) DeepCopyInto(out *ClientCertificateThumbprintParameters) { + *out = *in + if in.IsAdmin != nil { + in, out := &in.IsAdmin, &out.IsAdmin + *out = new(bool) + **out = **in + } + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientCertificateThumbprintParameters. +func (in *ClientCertificateThumbprintParameters) DeepCopy() *ClientCertificateThumbprintParameters { + if in == nil { + return nil + } + out := new(ClientCertificateThumbprintParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInitParameters) DeepCopyInto(out *ClusterInitParameters) { + *out = *in + if in.AddOnFeatures != nil { + in, out := &in.AddOnFeatures, &out.AddOnFeatures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureActiveDirectory != nil { + in, out := &in.AzureActiveDirectory, &out.AzureActiveDirectory + *out = new(AzureActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CertificateCommonNames != nil { + in, out := &in.CertificateCommonNames, &out.CertificateCommonNames + *out = new(CertificateCommonNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientCertificateCommonName != nil { + in, out := &in.ClientCertificateCommonName, &out.ClientCertificateCommonName + *out = make([]ClientCertificateCommonNameInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientCertificateThumbprint != nil { + in, out := &in.ClientCertificateThumbprint, &out.ClientCertificateThumbprint + *out = make([]ClientCertificateThumbprintInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterCodeVersion != nil { + in, out := &in.ClusterCodeVersion, &out.ClusterCodeVersion + *out = new(string) + **out = **in + } + if in.DiagnosticsConfig != nil { + in, out := &in.DiagnosticsConfig, &out.DiagnosticsConfig + *out = new(DiagnosticsConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FabricSettings != nil { + in, out := &in.FabricSettings, &out.FabricSettings + *out = make([]FabricSettingsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagementEndpoint != nil { + in, out := &in.ManagementEndpoint, &out.ManagementEndpoint + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = make([]NodeTypeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReliabilityLevel != nil { + in, out := &in.ReliabilityLevel, &out.ReliabilityLevel + *out = new(string) + **out = **in + } + if in.ReverseProxyCertificate != nil { + in, out := &in.ReverseProxyCertificate, &out.ReverseProxyCertificate + *out = new(ReverseProxyCertificateInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReverseProxyCertificateCommonNames != nil { + in, out := &in.ReverseProxyCertificateCommonNames, &out.ReverseProxyCertificateCommonNames + *out = new(ReverseProxyCertificateCommonNamesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceFabricZonalUpgradeMode != nil { + in, out := &in.ServiceFabricZonalUpgradeMode, &out.ServiceFabricZonalUpgradeMode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UpgradePolicy != nil { + in, out := &in.UpgradePolicy, &out.UpgradePolicy + *out = new(UpgradePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VMImage != nil { + in, out := &in.VMImage, &out.VMImage + *out = new(string) + **out = **in + } + if in.VmssZonalUpgradeMode != nil { + in, out := &in.VmssZonalUpgradeMode, &out.VmssZonalUpgradeMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitParameters. +func (in *ClusterInitParameters) DeepCopy() *ClusterInitParameters { + if in == nil { + return nil + } + out := new(ClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.AddOnFeatures != nil { + in, out := &in.AddOnFeatures, &out.AddOnFeatures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureActiveDirectory != nil { + in, out := &in.AzureActiveDirectory, &out.AzureActiveDirectory + *out = new(AzureActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.CertificateCommonNames != nil { + in, out := &in.CertificateCommonNames, &out.CertificateCommonNames + *out = new(CertificateCommonNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientCertificateCommonName != nil { + in, out := &in.ClientCertificateCommonName, &out.ClientCertificateCommonName + *out = make([]ClientCertificateCommonNameObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientCertificateThumbprint != nil { + in, out := &in.ClientCertificateThumbprint, &out.ClientCertificateThumbprint + *out = make([]ClientCertificateThumbprintObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterCodeVersion != nil { + in, out := &in.ClusterCodeVersion, &out.ClusterCodeVersion + *out = new(string) + **out = **in + } + if in.ClusterEndpoint != nil { + in, out := &in.ClusterEndpoint, &out.ClusterEndpoint + *out = new(string) + **out = **in + } + if in.DiagnosticsConfig != nil { + in, out := &in.DiagnosticsConfig, &out.DiagnosticsConfig + *out = new(DiagnosticsConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.FabricSettings != nil { + in, out := &in.FabricSettings, &out.FabricSettings + *out = make([]FabricSettingsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagementEndpoint != nil { + in, out := &in.ManagementEndpoint, &out.ManagementEndpoint + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = make([]NodeTypeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReliabilityLevel != nil { + in, out := &in.ReliabilityLevel, &out.ReliabilityLevel + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ReverseProxyCertificate != nil { + in, out := &in.ReverseProxyCertificate, &out.ReverseProxyCertificate + *out = new(ReverseProxyCertificateObservation) + (*in).DeepCopyInto(*out) + } + if in.ReverseProxyCertificateCommonNames != nil { + in, out := &in.ReverseProxyCertificateCommonNames, &out.ReverseProxyCertificateCommonNames + *out = new(ReverseProxyCertificateCommonNamesObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceFabricZonalUpgradeMode != nil { + in, out := &in.ServiceFabricZonalUpgradeMode, &out.ServiceFabricZonalUpgradeMode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UpgradePolicy != nil { + in, out := &in.UpgradePolicy, &out.UpgradePolicy + *out = new(UpgradePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.VMImage != nil { + in, out := &in.VMImage, &out.VMImage + *out = new(string) + **out = **in + } + if in.VmssZonalUpgradeMode != nil { + in, out := &in.VmssZonalUpgradeMode, &out.VmssZonalUpgradeMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.AddOnFeatures != nil { + in, out := &in.AddOnFeatures, &out.AddOnFeatures + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AzureActiveDirectory != nil { + in, out := &in.AzureActiveDirectory, &out.AzureActiveDirectory + *out = new(AzureActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = new(CertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.CertificateCommonNames != nil { + in, out := &in.CertificateCommonNames, &out.CertificateCommonNames + *out = new(CertificateCommonNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientCertificateCommonName != nil { + in, out := &in.ClientCertificateCommonName, &out.ClientCertificateCommonName + *out = make([]ClientCertificateCommonNameParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClientCertificateThumbprint != nil { + in, out := &in.ClientCertificateThumbprint, &out.ClientCertificateThumbprint + *out = make([]ClientCertificateThumbprintParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterCodeVersion != nil { + in, out := &in.ClusterCodeVersion, &out.ClusterCodeVersion + *out = new(string) + **out = **in + } + if in.DiagnosticsConfig != nil { + in, out := &in.DiagnosticsConfig, &out.DiagnosticsConfig + *out = new(DiagnosticsConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.FabricSettings != nil { + in, out := &in.FabricSettings, &out.FabricSettings + *out = make([]FabricSettingsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagementEndpoint != nil { + in, out := &in.ManagementEndpoint, &out.ManagementEndpoint + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = make([]NodeTypeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReliabilityLevel != nil { + in, out := &in.ReliabilityLevel, &out.ReliabilityLevel + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReverseProxyCertificate != nil { + in, out := &in.ReverseProxyCertificate, &out.ReverseProxyCertificate + *out = new(ReverseProxyCertificateParameters) + (*in).DeepCopyInto(*out) + } + if in.ReverseProxyCertificateCommonNames != nil { + in, out := &in.ReverseProxyCertificateCommonNames, &out.ReverseProxyCertificateCommonNames + *out = new(ReverseProxyCertificateCommonNamesParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceFabricZonalUpgradeMode != nil { + in, out := &in.ServiceFabricZonalUpgradeMode, &out.ServiceFabricZonalUpgradeMode + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpgradeMode != nil { + in, out := &in.UpgradeMode, &out.UpgradeMode + *out = new(string) + **out = **in + } + if in.UpgradePolicy != nil { + in, out := &in.UpgradePolicy, &out.UpgradePolicy + *out = new(UpgradePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.VMImage != nil { + in, out := &in.VMImage, &out.VMImage + *out = new(string) + **out = **in + } + if in.VmssZonalUpgradeMode != nil { + in, out := &in.VmssZonalUpgradeMode, &out.VmssZonalUpgradeMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonNamesInitParameters) DeepCopyInto(out *CommonNamesInitParameters) { + *out = *in + if in.CertificateCommonName != nil { + in, out := &in.CertificateCommonName, &out.CertificateCommonName + *out = new(string) + **out = **in + } + if in.CertificateIssuerThumbprint != nil { + in, out := &in.CertificateIssuerThumbprint, &out.CertificateIssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonNamesInitParameters. +func (in *CommonNamesInitParameters) DeepCopy() *CommonNamesInitParameters { + if in == nil { + return nil + } + out := new(CommonNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonNamesObservation) DeepCopyInto(out *CommonNamesObservation) { + *out = *in + if in.CertificateCommonName != nil { + in, out := &in.CertificateCommonName, &out.CertificateCommonName + *out = new(string) + **out = **in + } + if in.CertificateIssuerThumbprint != nil { + in, out := &in.CertificateIssuerThumbprint, &out.CertificateIssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonNamesObservation. +func (in *CommonNamesObservation) DeepCopy() *CommonNamesObservation { + if in == nil { + return nil + } + out := new(CommonNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonNamesParameters) DeepCopyInto(out *CommonNamesParameters) { + *out = *in + if in.CertificateCommonName != nil { + in, out := &in.CertificateCommonName, &out.CertificateCommonName + *out = new(string) + **out = **in + } + if in.CertificateIssuerThumbprint != nil { + in, out := &in.CertificateIssuerThumbprint, &out.CertificateIssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonNamesParameters. +func (in *CommonNamesParameters) DeepCopy() *CommonNamesParameters { + if in == nil { + return nil + } + out := new(CommonNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFabricSettingInitParameters) DeepCopyInto(out *CustomFabricSettingInitParameters) { + *out = *in + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = new(string) + **out = **in + } + if in.Section != nil { + in, out := &in.Section, &out.Section + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFabricSettingInitParameters. +func (in *CustomFabricSettingInitParameters) DeepCopy() *CustomFabricSettingInitParameters { + if in == nil { + return nil + } + out := new(CustomFabricSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFabricSettingObservation) DeepCopyInto(out *CustomFabricSettingObservation) { + *out = *in + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = new(string) + **out = **in + } + if in.Section != nil { + in, out := &in.Section, &out.Section + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFabricSettingObservation. +func (in *CustomFabricSettingObservation) DeepCopy() *CustomFabricSettingObservation { + if in == nil { + return nil + } + out := new(CustomFabricSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFabricSettingParameters) DeepCopyInto(out *CustomFabricSettingParameters) { + *out = *in + if in.Parameter != nil { + in, out := &in.Parameter, &out.Parameter + *out = new(string) + **out = **in + } + if in.Section != nil { + in, out := &in.Section, &out.Section + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFabricSettingParameters. +func (in *CustomFabricSettingParameters) DeepCopy() *CustomFabricSettingParameters { + if in == nil { + return nil + } + out := new(CustomFabricSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaHealthPolicyInitParameters) DeepCopyInto(out *DeltaHealthPolicyInitParameters) { + *out = *in + if in.MaxDeltaUnhealthyApplicationsPercent != nil { + in, out := &in.MaxDeltaUnhealthyApplicationsPercent, &out.MaxDeltaUnhealthyApplicationsPercent + *out = new(float64) + **out = **in + } + if in.MaxDeltaUnhealthyNodesPercent != nil { + in, out := &in.MaxDeltaUnhealthyNodesPercent, &out.MaxDeltaUnhealthyNodesPercent + *out = new(float64) + **out = **in + } + if in.MaxUpgradeDomainDeltaUnhealthyNodesPercent != nil { + in, out := &in.MaxUpgradeDomainDeltaUnhealthyNodesPercent, &out.MaxUpgradeDomainDeltaUnhealthyNodesPercent + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaHealthPolicyInitParameters. +func (in *DeltaHealthPolicyInitParameters) DeepCopy() *DeltaHealthPolicyInitParameters { + if in == nil { + return nil + } + out := new(DeltaHealthPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaHealthPolicyObservation) DeepCopyInto(out *DeltaHealthPolicyObservation) { + *out = *in + if in.MaxDeltaUnhealthyApplicationsPercent != nil { + in, out := &in.MaxDeltaUnhealthyApplicationsPercent, &out.MaxDeltaUnhealthyApplicationsPercent + *out = new(float64) + **out = **in + } + if in.MaxDeltaUnhealthyNodesPercent != nil { + in, out := &in.MaxDeltaUnhealthyNodesPercent, &out.MaxDeltaUnhealthyNodesPercent + *out = new(float64) + **out = **in + } + if in.MaxUpgradeDomainDeltaUnhealthyNodesPercent != nil { + in, out := &in.MaxUpgradeDomainDeltaUnhealthyNodesPercent, &out.MaxUpgradeDomainDeltaUnhealthyNodesPercent + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaHealthPolicyObservation. +func (in *DeltaHealthPolicyObservation) DeepCopy() *DeltaHealthPolicyObservation { + if in == nil { + return nil + } + out := new(DeltaHealthPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeltaHealthPolicyParameters) DeepCopyInto(out *DeltaHealthPolicyParameters) { + *out = *in + if in.MaxDeltaUnhealthyApplicationsPercent != nil { + in, out := &in.MaxDeltaUnhealthyApplicationsPercent, &out.MaxDeltaUnhealthyApplicationsPercent + *out = new(float64) + **out = **in + } + if in.MaxDeltaUnhealthyNodesPercent != nil { + in, out := &in.MaxDeltaUnhealthyNodesPercent, &out.MaxDeltaUnhealthyNodesPercent + *out = new(float64) + **out = **in + } + if in.MaxUpgradeDomainDeltaUnhealthyNodesPercent != nil { + in, out := &in.MaxUpgradeDomainDeltaUnhealthyNodesPercent, &out.MaxUpgradeDomainDeltaUnhealthyNodesPercent + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeltaHealthPolicyParameters. +func (in *DeltaHealthPolicyParameters) DeepCopy() *DeltaHealthPolicyParameters { + if in == nil { + return nil + } + out := new(DeltaHealthPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticsConfigInitParameters) DeepCopyInto(out *DiagnosticsConfigInitParameters) { + *out = *in + if in.BlobEndpoint != nil { + in, out := &in.BlobEndpoint, &out.BlobEndpoint + *out = new(string) + **out = **in + } + if in.ProtectedAccountKeyName != nil { + in, out := &in.ProtectedAccountKeyName, &out.ProtectedAccountKeyName + *out = new(string) + **out = **in + } + if in.QueueEndpoint != nil { + in, out := &in.QueueEndpoint, &out.QueueEndpoint + *out = new(string) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.TableEndpoint != nil { + in, out := &in.TableEndpoint, &out.TableEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticsConfigInitParameters. +func (in *DiagnosticsConfigInitParameters) DeepCopy() *DiagnosticsConfigInitParameters { + if in == nil { + return nil + } + out := new(DiagnosticsConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticsConfigObservation) DeepCopyInto(out *DiagnosticsConfigObservation) { + *out = *in + if in.BlobEndpoint != nil { + in, out := &in.BlobEndpoint, &out.BlobEndpoint + *out = new(string) + **out = **in + } + if in.ProtectedAccountKeyName != nil { + in, out := &in.ProtectedAccountKeyName, &out.ProtectedAccountKeyName + *out = new(string) + **out = **in + } + if in.QueueEndpoint != nil { + in, out := &in.QueueEndpoint, &out.QueueEndpoint + *out = new(string) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.TableEndpoint != nil { + in, out := &in.TableEndpoint, &out.TableEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticsConfigObservation. +func (in *DiagnosticsConfigObservation) DeepCopy() *DiagnosticsConfigObservation { + if in == nil { + return nil + } + out := new(DiagnosticsConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiagnosticsConfigParameters) DeepCopyInto(out *DiagnosticsConfigParameters) { + *out = *in + if in.BlobEndpoint != nil { + in, out := &in.BlobEndpoint, &out.BlobEndpoint + *out = new(string) + **out = **in + } + if in.ProtectedAccountKeyName != nil { + in, out := &in.ProtectedAccountKeyName, &out.ProtectedAccountKeyName + *out = new(string) + **out = **in + } + if in.QueueEndpoint != nil { + in, out := &in.QueueEndpoint, &out.QueueEndpoint + *out = new(string) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.TableEndpoint != nil { + in, out := &in.TableEndpoint, &out.TableEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiagnosticsConfigParameters. +func (in *DiagnosticsConfigParameters) DeepCopy() *DiagnosticsConfigParameters { + if in == nil { + return nil + } + out := new(DiagnosticsConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralPortsInitParameters) DeepCopyInto(out *EphemeralPortsInitParameters) { + *out = *in + if in.EndPort != nil { + in, out := &in.EndPort, &out.EndPort + *out = new(float64) + **out = **in + } + if in.StartPort != nil { + in, out := &in.StartPort, &out.StartPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralPortsInitParameters. +func (in *EphemeralPortsInitParameters) DeepCopy() *EphemeralPortsInitParameters { + if in == nil { + return nil + } + out := new(EphemeralPortsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralPortsObservation) DeepCopyInto(out *EphemeralPortsObservation) { + *out = *in + if in.EndPort != nil { + in, out := &in.EndPort, &out.EndPort + *out = new(float64) + **out = **in + } + if in.StartPort != nil { + in, out := &in.StartPort, &out.StartPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralPortsObservation. +func (in *EphemeralPortsObservation) DeepCopy() *EphemeralPortsObservation { + if in == nil { + return nil + } + out := new(EphemeralPortsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralPortsParameters) DeepCopyInto(out *EphemeralPortsParameters) { + *out = *in + if in.EndPort != nil { + in, out := &in.EndPort, &out.EndPort + *out = new(float64) + **out = **in + } + if in.StartPort != nil { + in, out := &in.StartPort, &out.StartPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralPortsParameters. +func (in *EphemeralPortsParameters) DeepCopy() *EphemeralPortsParameters { + if in == nil { + return nil + } + out := new(EphemeralPortsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FabricSettingsInitParameters) DeepCopyInto(out *FabricSettingsInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricSettingsInitParameters. +func (in *FabricSettingsInitParameters) DeepCopy() *FabricSettingsInitParameters { + if in == nil { + return nil + } + out := new(FabricSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FabricSettingsObservation) DeepCopyInto(out *FabricSettingsObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricSettingsObservation. +func (in *FabricSettingsObservation) DeepCopy() *FabricSettingsObservation { + if in == nil { + return nil + } + out := new(FabricSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FabricSettingsParameters) DeepCopyInto(out *FabricSettingsParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricSettingsParameters. +func (in *FabricSettingsParameters) DeepCopy() *FabricSettingsParameters { + if in == nil { + return nil + } + out := new(FabricSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthPolicyInitParameters) DeepCopyInto(out *HealthPolicyInitParameters) { + *out = *in + if in.MaxUnhealthyApplicationsPercent != nil { + in, out := &in.MaxUnhealthyApplicationsPercent, &out.MaxUnhealthyApplicationsPercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyNodesPercent != nil { + in, out := &in.MaxUnhealthyNodesPercent, &out.MaxUnhealthyNodesPercent + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthPolicyInitParameters. +func (in *HealthPolicyInitParameters) DeepCopy() *HealthPolicyInitParameters { + if in == nil { + return nil + } + out := new(HealthPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthPolicyObservation) DeepCopyInto(out *HealthPolicyObservation) { + *out = *in + if in.MaxUnhealthyApplicationsPercent != nil { + in, out := &in.MaxUnhealthyApplicationsPercent, &out.MaxUnhealthyApplicationsPercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyNodesPercent != nil { + in, out := &in.MaxUnhealthyNodesPercent, &out.MaxUnhealthyNodesPercent + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthPolicyObservation. +func (in *HealthPolicyObservation) DeepCopy() *HealthPolicyObservation { + if in == nil { + return nil + } + out := new(HealthPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthPolicyParameters) DeepCopyInto(out *HealthPolicyParameters) { + *out = *in + if in.MaxUnhealthyApplicationsPercent != nil { + in, out := &in.MaxUnhealthyApplicationsPercent, &out.MaxUnhealthyApplicationsPercent + *out = new(float64) + **out = **in + } + if in.MaxUnhealthyNodesPercent != nil { + in, out := &in.MaxUnhealthyNodesPercent, &out.MaxUnhealthyNodesPercent + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthPolicyParameters. +func (in *HealthPolicyParameters) DeepCopy() *HealthPolicyParameters { + if in == nil { + return nil + } + out := new(HealthPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBRuleInitParameters) DeepCopyInto(out *LBRuleInitParameters) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.FrontendPort != nil { + in, out := &in.FrontendPort, &out.FrontendPort + *out = new(float64) + **out = **in + } + if in.ProbeProtocol != nil { + in, out := &in.ProbeProtocol, &out.ProbeProtocol + *out = new(string) + **out = **in + } + if in.ProbeRequestPath != nil { + in, out := &in.ProbeRequestPath, &out.ProbeRequestPath + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBRuleInitParameters. +func (in *LBRuleInitParameters) DeepCopy() *LBRuleInitParameters { + if in == nil { + return nil + } + out := new(LBRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBRuleObservation) DeepCopyInto(out *LBRuleObservation) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.FrontendPort != nil { + in, out := &in.FrontendPort, &out.FrontendPort + *out = new(float64) + **out = **in + } + if in.ProbeProtocol != nil { + in, out := &in.ProbeProtocol, &out.ProbeProtocol + *out = new(string) + **out = **in + } + if in.ProbeRequestPath != nil { + in, out := &in.ProbeRequestPath, &out.ProbeRequestPath + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBRuleObservation. +func (in *LBRuleObservation) DeepCopy() *LBRuleObservation { + if in == nil { + return nil + } + out := new(LBRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LBRuleParameters) DeepCopyInto(out *LBRuleParameters) { + *out = *in + if in.BackendPort != nil { + in, out := &in.BackendPort, &out.BackendPort + *out = new(float64) + **out = **in + } + if in.FrontendPort != nil { + in, out := &in.FrontendPort, &out.FrontendPort + *out = new(float64) + **out = **in + } + if in.ProbeProtocol != nil { + in, out := &in.ProbeProtocol, &out.ProbeProtocol + *out = new(string) + **out = **in + } + if in.ProbeRequestPath != nil { + in, out := &in.ProbeRequestPath, &out.ProbeRequestPath + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LBRuleParameters. +func (in *LBRuleParameters) DeepCopy() *LBRuleParameters { + if in == nil { + return nil + } + out := new(LBRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedCluster) DeepCopyInto(out *ManagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedCluster. +func (in *ManagedCluster) DeepCopy() *ManagedCluster { + if in == nil { + return nil + } + out := new(ManagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterInitParameters) DeepCopyInto(out *ManagedClusterInitParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BackupServiceEnabled != nil { + in, out := &in.BackupServiceEnabled, &out.BackupServiceEnabled + *out = new(bool) + **out = **in + } + if in.ClientConnectionPort != nil { + in, out := &in.ClientConnectionPort, &out.ClientConnectionPort + *out = new(float64) + **out = **in + } + if in.CustomFabricSetting != nil { + in, out := &in.CustomFabricSetting, &out.CustomFabricSetting + *out = make([]CustomFabricSettingInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DNSServiceEnabled != nil { + in, out := &in.DNSServiceEnabled, &out.DNSServiceEnabled + *out = new(bool) + **out = **in + } + if in.HTTPGatewayPort != nil { + in, out := &in.HTTPGatewayPort, &out.HTTPGatewayPort + *out = new(float64) + **out = **in + } + if in.LBRule != nil { + in, out := &in.LBRule, &out.LBRule + *out = make([]LBRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = make([]ManagedClusterNodeTypeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpgradeWave != nil { + in, out := &in.UpgradeWave, &out.UpgradeWave + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterInitParameters. +func (in *ManagedClusterInitParameters) DeepCopy() *ManagedClusterInitParameters { + if in == nil { + return nil + } + out := new(ManagedClusterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterList) DeepCopyInto(out *ManagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterList. +func (in *ManagedClusterList) DeepCopy() *ManagedClusterList { + if in == nil { + return nil + } + out := new(ManagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterNodeTypeInitParameters) DeepCopyInto(out *ManagedClusterNodeTypeInitParameters) { + *out = *in + if in.ApplicationPortRange != nil { + in, out := &in.ApplicationPortRange, &out.ApplicationPortRange + *out = new(string) + **out = **in + } + if in.Capacities != nil { + in, out := &in.Capacities, &out.Capacities + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DataDiskSizeGb != nil { + in, out := &in.DataDiskSizeGb, &out.DataDiskSizeGb + *out = new(float64) + **out = **in + } + if in.DataDiskType != nil { + in, out := &in.DataDiskType, &out.DataDiskType + *out = new(string) + **out = **in + } + if in.EphemeralPortRange != nil { + in, out := &in.EphemeralPortRange, &out.EphemeralPortRange + *out = new(string) + **out = **in + } + if in.MultiplePlacementGroupsEnabled != nil { + in, out := &in.MultiplePlacementGroupsEnabled, &out.MultiplePlacementGroupsEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementProperties != nil { + in, out := &in.PlacementProperties, &out.PlacementProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Stateless != nil { + in, out := &in.Stateless, &out.Stateless + *out = new(bool) + **out = **in + } + if in.VMImageOffer != nil { + in, out := &in.VMImageOffer, &out.VMImageOffer + *out = new(string) + **out = **in + } + if in.VMImagePublisher != nil { + in, out := &in.VMImagePublisher, &out.VMImagePublisher + *out = new(string) + **out = **in + } + if in.VMImageSku != nil { + in, out := &in.VMImageSku, &out.VMImageSku + *out = new(string) + **out = **in + } + if in.VMImageVersion != nil { + in, out := &in.VMImageVersion, &out.VMImageVersion + *out = new(string) + **out = **in + } + if in.VMInstanceCount != nil { + in, out := &in.VMInstanceCount, &out.VMInstanceCount + *out = new(float64) + **out = **in + } + if in.VMSecrets != nil { + in, out := &in.VMSecrets, &out.VMSecrets + *out = make([]VMSecretsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterNodeTypeInitParameters. +func (in *ManagedClusterNodeTypeInitParameters) DeepCopy() *ManagedClusterNodeTypeInitParameters { + if in == nil { + return nil + } + out := new(ManagedClusterNodeTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterNodeTypeObservation) DeepCopyInto(out *ManagedClusterNodeTypeObservation) { + *out = *in + if in.ApplicationPortRange != nil { + in, out := &in.ApplicationPortRange, &out.ApplicationPortRange + *out = new(string) + **out = **in + } + if in.Capacities != nil { + in, out := &in.Capacities, &out.Capacities + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DataDiskSizeGb != nil { + in, out := &in.DataDiskSizeGb, &out.DataDiskSizeGb + *out = new(float64) + **out = **in + } + if in.DataDiskType != nil { + in, out := &in.DataDiskType, &out.DataDiskType + *out = new(string) + **out = **in + } + if in.EphemeralPortRange != nil { + in, out := &in.EphemeralPortRange, &out.EphemeralPortRange + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.MultiplePlacementGroupsEnabled != nil { + in, out := &in.MultiplePlacementGroupsEnabled, &out.MultiplePlacementGroupsEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementProperties != nil { + in, out := &in.PlacementProperties, &out.PlacementProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Stateless != nil { + in, out := &in.Stateless, &out.Stateless + *out = new(bool) + **out = **in + } + if in.VMImageOffer != nil { + in, out := &in.VMImageOffer, &out.VMImageOffer + *out = new(string) + **out = **in + } + if in.VMImagePublisher != nil { + in, out := &in.VMImagePublisher, &out.VMImagePublisher + *out = new(string) + **out = **in + } + if in.VMImageSku != nil { + in, out := &in.VMImageSku, &out.VMImageSku + *out = new(string) + **out = **in + } + if in.VMImageVersion != nil { + in, out := &in.VMImageVersion, &out.VMImageVersion + *out = new(string) + **out = **in + } + if in.VMInstanceCount != nil { + in, out := &in.VMInstanceCount, &out.VMInstanceCount + *out = new(float64) + **out = **in + } + if in.VMSecrets != nil { + in, out := &in.VMSecrets, &out.VMSecrets + *out = make([]VMSecretsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterNodeTypeObservation. +func (in *ManagedClusterNodeTypeObservation) DeepCopy() *ManagedClusterNodeTypeObservation { + if in == nil { + return nil + } + out := new(ManagedClusterNodeTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterNodeTypeParameters) DeepCopyInto(out *ManagedClusterNodeTypeParameters) { + *out = *in + if in.ApplicationPortRange != nil { + in, out := &in.ApplicationPortRange, &out.ApplicationPortRange + *out = new(string) + **out = **in + } + if in.Capacities != nil { + in, out := &in.Capacities, &out.Capacities + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DataDiskSizeGb != nil { + in, out := &in.DataDiskSizeGb, &out.DataDiskSizeGb + *out = new(float64) + **out = **in + } + if in.DataDiskType != nil { + in, out := &in.DataDiskType, &out.DataDiskType + *out = new(string) + **out = **in + } + if in.EphemeralPortRange != nil { + in, out := &in.EphemeralPortRange, &out.EphemeralPortRange + *out = new(string) + **out = **in + } + if in.MultiplePlacementGroupsEnabled != nil { + in, out := &in.MultiplePlacementGroupsEnabled, &out.MultiplePlacementGroupsEnabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementProperties != nil { + in, out := &in.PlacementProperties, &out.PlacementProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.Stateless != nil { + in, out := &in.Stateless, &out.Stateless + *out = new(bool) + **out = **in + } + if in.VMImageOffer != nil { + in, out := &in.VMImageOffer, &out.VMImageOffer + *out = new(string) + **out = **in + } + if in.VMImagePublisher != nil { + in, out := &in.VMImagePublisher, &out.VMImagePublisher + *out = new(string) + **out = **in + } + if in.VMImageSku != nil { + in, out := &in.VMImageSku, &out.VMImageSku + *out = new(string) + **out = **in + } + if in.VMImageVersion != nil { + in, out := &in.VMImageVersion, &out.VMImageVersion + *out = new(string) + **out = **in + } + if in.VMInstanceCount != nil { + in, out := &in.VMInstanceCount, &out.VMInstanceCount + *out = new(float64) + **out = **in + } + if in.VMSecrets != nil { + in, out := &in.VMSecrets, &out.VMSecrets + *out = make([]VMSecretsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VMSize != nil { + in, out := &in.VMSize, &out.VMSize + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterNodeTypeParameters. +func (in *ManagedClusterNodeTypeParameters) DeepCopy() *ManagedClusterNodeTypeParameters { + if in == nil { + return nil + } + out := new(ManagedClusterNodeTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterObservation) DeepCopyInto(out *ManagedClusterObservation) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.BackupServiceEnabled != nil { + in, out := &in.BackupServiceEnabled, &out.BackupServiceEnabled + *out = new(bool) + **out = **in + } + if in.ClientConnectionPort != nil { + in, out := &in.ClientConnectionPort, &out.ClientConnectionPort + *out = new(float64) + **out = **in + } + if in.CustomFabricSetting != nil { + in, out := &in.CustomFabricSetting, &out.CustomFabricSetting + *out = make([]CustomFabricSettingObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DNSServiceEnabled != nil { + in, out := &in.DNSServiceEnabled, &out.DNSServiceEnabled + *out = new(bool) + **out = **in + } + if in.HTTPGatewayPort != nil { + in, out := &in.HTTPGatewayPort, &out.HTTPGatewayPort + *out = new(float64) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LBRule != nil { + in, out := &in.LBRule, &out.LBRule + *out = make([]LBRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = make([]ManagedClusterNodeTypeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpgradeWave != nil { + in, out := &in.UpgradeWave, &out.UpgradeWave + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterObservation. +func (in *ManagedClusterObservation) DeepCopy() *ManagedClusterObservation { + if in == nil { + return nil + } + out := new(ManagedClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterParameters) DeepCopyInto(out *ManagedClusterParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.BackupServiceEnabled != nil { + in, out := &in.BackupServiceEnabled, &out.BackupServiceEnabled + *out = new(bool) + **out = **in + } + if in.ClientConnectionPort != nil { + in, out := &in.ClientConnectionPort, &out.ClientConnectionPort + *out = new(float64) + **out = **in + } + if in.CustomFabricSetting != nil { + in, out := &in.CustomFabricSetting, &out.CustomFabricSetting + *out = make([]CustomFabricSettingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSName != nil { + in, out := &in.DNSName, &out.DNSName + *out = new(string) + **out = **in + } + if in.DNSServiceEnabled != nil { + in, out := &in.DNSServiceEnabled, &out.DNSServiceEnabled + *out = new(bool) + **out = **in + } + if in.HTTPGatewayPort != nil { + in, out := &in.HTTPGatewayPort, &out.HTTPGatewayPort + *out = new(float64) + **out = **in + } + if in.LBRule != nil { + in, out := &in.LBRule, &out.LBRule + *out = make([]LBRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = make([]ManagedClusterNodeTypeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpgradeWave != nil { + in, out := &in.UpgradeWave, &out.UpgradeWave + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterParameters. +func (in *ManagedClusterParameters) DeepCopy() *ManagedClusterParameters { + if in == nil { + return nil + } + out := new(ManagedClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterSpec) DeepCopyInto(out *ManagedClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSpec. +func (in *ManagedClusterSpec) DeepCopy() *ManagedClusterSpec { + if in == nil { + return nil + } + out := new(ManagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterStatus) DeepCopyInto(out *ManagedClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterStatus. +func (in *ManagedClusterStatus) DeepCopy() *ManagedClusterStatus { + if in == nil { + return nil + } + out := new(ManagedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeTypeInitParameters) DeepCopyInto(out *NodeTypeInitParameters) { + *out = *in + if in.ApplicationPorts != nil { + in, out := &in.ApplicationPorts, &out.ApplicationPorts + *out = new(ApplicationPortsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Capacities != nil { + in, out := &in.Capacities, &out.Capacities + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ClientEndpointPort != nil { + in, out := &in.ClientEndpointPort, &out.ClientEndpointPort + *out = new(float64) + **out = **in + } + if in.DurabilityLevel != nil { + in, out := &in.DurabilityLevel, &out.DurabilityLevel + *out = new(string) + **out = **in + } + if in.EphemeralPorts != nil { + in, out := &in.EphemeralPorts, &out.EphemeralPorts + *out = new(EphemeralPortsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPEndpointPort != nil { + in, out := &in.HTTPEndpointPort, &out.HTTPEndpointPort + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.IsPrimary != nil { + in, out := &in.IsPrimary, &out.IsPrimary + *out = new(bool) + **out = **in + } + if in.IsStateless != nil { + in, out := &in.IsStateless, &out.IsStateless + *out = new(bool) + **out = **in + } + if in.MultipleAvailabilityZones != nil { + in, out := &in.MultipleAvailabilityZones, &out.MultipleAvailabilityZones + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementProperties != nil { + in, out := &in.PlacementProperties, &out.PlacementProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ReverseProxyEndpointPort != nil { + in, out := &in.ReverseProxyEndpointPort, &out.ReverseProxyEndpointPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeTypeInitParameters. +func (in *NodeTypeInitParameters) DeepCopy() *NodeTypeInitParameters { + if in == nil { + return nil + } + out := new(NodeTypeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeTypeObservation) DeepCopyInto(out *NodeTypeObservation) { + *out = *in + if in.ApplicationPorts != nil { + in, out := &in.ApplicationPorts, &out.ApplicationPorts + *out = new(ApplicationPortsObservation) + (*in).DeepCopyInto(*out) + } + if in.Capacities != nil { + in, out := &in.Capacities, &out.Capacities + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ClientEndpointPort != nil { + in, out := &in.ClientEndpointPort, &out.ClientEndpointPort + *out = new(float64) + **out = **in + } + if in.DurabilityLevel != nil { + in, out := &in.DurabilityLevel, &out.DurabilityLevel + *out = new(string) + **out = **in + } + if in.EphemeralPorts != nil { + in, out := &in.EphemeralPorts, &out.EphemeralPorts + *out = new(EphemeralPortsObservation) + (*in).DeepCopyInto(*out) + } + if in.HTTPEndpointPort != nil { + in, out := &in.HTTPEndpointPort, &out.HTTPEndpointPort + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.IsPrimary != nil { + in, out := &in.IsPrimary, &out.IsPrimary + *out = new(bool) + **out = **in + } + if in.IsStateless != nil { + in, out := &in.IsStateless, &out.IsStateless + *out = new(bool) + **out = **in + } + if in.MultipleAvailabilityZones != nil { + in, out := &in.MultipleAvailabilityZones, &out.MultipleAvailabilityZones + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementProperties != nil { + in, out := &in.PlacementProperties, &out.PlacementProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ReverseProxyEndpointPort != nil { + in, out := &in.ReverseProxyEndpointPort, &out.ReverseProxyEndpointPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeTypeObservation. +func (in *NodeTypeObservation) DeepCopy() *NodeTypeObservation { + if in == nil { + return nil + } + out := new(NodeTypeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeTypeParameters) DeepCopyInto(out *NodeTypeParameters) { + *out = *in + if in.ApplicationPorts != nil { + in, out := &in.ApplicationPorts, &out.ApplicationPorts + *out = new(ApplicationPortsParameters) + (*in).DeepCopyInto(*out) + } + if in.Capacities != nil { + in, out := &in.Capacities, &out.Capacities + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ClientEndpointPort != nil { + in, out := &in.ClientEndpointPort, &out.ClientEndpointPort + *out = new(float64) + **out = **in + } + if in.DurabilityLevel != nil { + in, out := &in.DurabilityLevel, &out.DurabilityLevel + *out = new(string) + **out = **in + } + if in.EphemeralPorts != nil { + in, out := &in.EphemeralPorts, &out.EphemeralPorts + *out = new(EphemeralPortsParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPEndpointPort != nil { + in, out := &in.HTTPEndpointPort, &out.HTTPEndpointPort + *out = new(float64) + **out = **in + } + if in.InstanceCount != nil { + in, out := &in.InstanceCount, &out.InstanceCount + *out = new(float64) + **out = **in + } + if in.IsPrimary != nil { + in, out := &in.IsPrimary, &out.IsPrimary + *out = new(bool) + **out = **in + } + if in.IsStateless != nil { + in, out := &in.IsStateless, &out.IsStateless + *out = new(bool) + **out = **in + } + if in.MultipleAvailabilityZones != nil { + in, out := &in.MultipleAvailabilityZones, &out.MultipleAvailabilityZones + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PlacementProperties != nil { + in, out := &in.PlacementProperties, &out.PlacementProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ReverseProxyEndpointPort != nil { + in, out := &in.ReverseProxyEndpointPort, &out.ReverseProxyEndpointPort + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeTypeParameters. +func (in *NodeTypeParameters) DeepCopy() *NodeTypeParameters { + if in == nil { + return nil + } + out := new(NodeTypeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateCommonNamesCommonNamesInitParameters) DeepCopyInto(out *ReverseProxyCertificateCommonNamesCommonNamesInitParameters) { + *out = *in + if in.CertificateCommonName != nil { + in, out := &in.CertificateCommonName, &out.CertificateCommonName + *out = new(string) + **out = **in + } + if in.CertificateIssuerThumbprint != nil { + in, out := &in.CertificateIssuerThumbprint, &out.CertificateIssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateCommonNamesCommonNamesInitParameters. +func (in *ReverseProxyCertificateCommonNamesCommonNamesInitParameters) DeepCopy() *ReverseProxyCertificateCommonNamesCommonNamesInitParameters { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateCommonNamesCommonNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateCommonNamesCommonNamesObservation) DeepCopyInto(out *ReverseProxyCertificateCommonNamesCommonNamesObservation) { + *out = *in + if in.CertificateCommonName != nil { + in, out := &in.CertificateCommonName, &out.CertificateCommonName + *out = new(string) + **out = **in + } + if in.CertificateIssuerThumbprint != nil { + in, out := &in.CertificateIssuerThumbprint, &out.CertificateIssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateCommonNamesCommonNamesObservation. +func (in *ReverseProxyCertificateCommonNamesCommonNamesObservation) DeepCopy() *ReverseProxyCertificateCommonNamesCommonNamesObservation { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateCommonNamesCommonNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateCommonNamesCommonNamesParameters) DeepCopyInto(out *ReverseProxyCertificateCommonNamesCommonNamesParameters) { + *out = *in + if in.CertificateCommonName != nil { + in, out := &in.CertificateCommonName, &out.CertificateCommonName + *out = new(string) + **out = **in + } + if in.CertificateIssuerThumbprint != nil { + in, out := &in.CertificateIssuerThumbprint, &out.CertificateIssuerThumbprint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateCommonNamesCommonNamesParameters. +func (in *ReverseProxyCertificateCommonNamesCommonNamesParameters) DeepCopy() *ReverseProxyCertificateCommonNamesCommonNamesParameters { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateCommonNamesCommonNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateCommonNamesInitParameters) DeepCopyInto(out *ReverseProxyCertificateCommonNamesInitParameters) { + *out = *in + if in.CommonNames != nil { + in, out := &in.CommonNames, &out.CommonNames + *out = make([]ReverseProxyCertificateCommonNamesCommonNamesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateCommonNamesInitParameters. +func (in *ReverseProxyCertificateCommonNamesInitParameters) DeepCopy() *ReverseProxyCertificateCommonNamesInitParameters { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateCommonNamesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateCommonNamesObservation) DeepCopyInto(out *ReverseProxyCertificateCommonNamesObservation) { + *out = *in + if in.CommonNames != nil { + in, out := &in.CommonNames, &out.CommonNames + *out = make([]ReverseProxyCertificateCommonNamesCommonNamesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateCommonNamesObservation. +func (in *ReverseProxyCertificateCommonNamesObservation) DeepCopy() *ReverseProxyCertificateCommonNamesObservation { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateCommonNamesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateCommonNamesParameters) DeepCopyInto(out *ReverseProxyCertificateCommonNamesParameters) { + *out = *in + if in.CommonNames != nil { + in, out := &in.CommonNames, &out.CommonNames + *out = make([]ReverseProxyCertificateCommonNamesCommonNamesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateCommonNamesParameters. +func (in *ReverseProxyCertificateCommonNamesParameters) DeepCopy() *ReverseProxyCertificateCommonNamesParameters { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateCommonNamesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateInitParameters) DeepCopyInto(out *ReverseProxyCertificateInitParameters) { + *out = *in + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.ThumbprintSecondary != nil { + in, out := &in.ThumbprintSecondary, &out.ThumbprintSecondary + *out = new(string) + **out = **in + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateInitParameters. +func (in *ReverseProxyCertificateInitParameters) DeepCopy() *ReverseProxyCertificateInitParameters { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateObservation) DeepCopyInto(out *ReverseProxyCertificateObservation) { + *out = *in + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.ThumbprintSecondary != nil { + in, out := &in.ThumbprintSecondary, &out.ThumbprintSecondary + *out = new(string) + **out = **in + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateObservation. +func (in *ReverseProxyCertificateObservation) DeepCopy() *ReverseProxyCertificateObservation { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReverseProxyCertificateParameters) DeepCopyInto(out *ReverseProxyCertificateParameters) { + *out = *in + if in.Thumbprint != nil { + in, out := &in.Thumbprint, &out.Thumbprint + *out = new(string) + **out = **in + } + if in.ThumbprintSecondary != nil { + in, out := &in.ThumbprintSecondary, &out.ThumbprintSecondary + *out = new(string) + **out = **in + } + if in.X509StoreName != nil { + in, out := &in.X509StoreName, &out.X509StoreName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReverseProxyCertificateParameters. +func (in *ReverseProxyCertificateParameters) DeepCopy() *ReverseProxyCertificateParameters { + if in == nil { + return nil + } + out := new(ReverseProxyCertificateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradePolicyInitParameters) DeepCopyInto(out *UpgradePolicyInitParameters) { + *out = *in + if in.DeltaHealthPolicy != nil { + in, out := &in.DeltaHealthPolicy, &out.DeltaHealthPolicy + *out = new(DeltaHealthPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForceRestartEnabled != nil { + in, out := &in.ForceRestartEnabled, &out.ForceRestartEnabled + *out = new(bool) + **out = **in + } + if in.HealthCheckRetryTimeout != nil { + in, out := &in.HealthCheckRetryTimeout, &out.HealthCheckRetryTimeout + *out = new(string) + **out = **in + } + if in.HealthCheckStableDuration != nil { + in, out := &in.HealthCheckStableDuration, &out.HealthCheckStableDuration + *out = new(string) + **out = **in + } + if in.HealthCheckWaitDuration != nil { + in, out := &in.HealthCheckWaitDuration, &out.HealthCheckWaitDuration + *out = new(string) + **out = **in + } + if in.HealthPolicy != nil { + in, out := &in.HealthPolicy, &out.HealthPolicy + *out = new(HealthPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UpgradeDomainTimeout != nil { + in, out := &in.UpgradeDomainTimeout, &out.UpgradeDomainTimeout + *out = new(string) + **out = **in + } + if in.UpgradeReplicaSetCheckTimeout != nil { + in, out := &in.UpgradeReplicaSetCheckTimeout, &out.UpgradeReplicaSetCheckTimeout + *out = new(string) + **out = **in + } + if in.UpgradeTimeout != nil { + in, out := &in.UpgradeTimeout, &out.UpgradeTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradePolicyInitParameters. +func (in *UpgradePolicyInitParameters) DeepCopy() *UpgradePolicyInitParameters { + if in == nil { + return nil + } + out := new(UpgradePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradePolicyObservation) DeepCopyInto(out *UpgradePolicyObservation) { + *out = *in + if in.DeltaHealthPolicy != nil { + in, out := &in.DeltaHealthPolicy, &out.DeltaHealthPolicy + *out = new(DeltaHealthPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ForceRestartEnabled != nil { + in, out := &in.ForceRestartEnabled, &out.ForceRestartEnabled + *out = new(bool) + **out = **in + } + if in.HealthCheckRetryTimeout != nil { + in, out := &in.HealthCheckRetryTimeout, &out.HealthCheckRetryTimeout + *out = new(string) + **out = **in + } + if in.HealthCheckStableDuration != nil { + in, out := &in.HealthCheckStableDuration, &out.HealthCheckStableDuration + *out = new(string) + **out = **in + } + if in.HealthCheckWaitDuration != nil { + in, out := &in.HealthCheckWaitDuration, &out.HealthCheckWaitDuration + *out = new(string) + **out = **in + } + if in.HealthPolicy != nil { + in, out := &in.HealthPolicy, &out.HealthPolicy + *out = new(HealthPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.UpgradeDomainTimeout != nil { + in, out := &in.UpgradeDomainTimeout, &out.UpgradeDomainTimeout + *out = new(string) + **out = **in + } + if in.UpgradeReplicaSetCheckTimeout != nil { + in, out := &in.UpgradeReplicaSetCheckTimeout, &out.UpgradeReplicaSetCheckTimeout + *out = new(string) + **out = **in + } + if in.UpgradeTimeout != nil { + in, out := &in.UpgradeTimeout, &out.UpgradeTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradePolicyObservation. +func (in *UpgradePolicyObservation) DeepCopy() *UpgradePolicyObservation { + if in == nil { + return nil + } + out := new(UpgradePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradePolicyParameters) DeepCopyInto(out *UpgradePolicyParameters) { + *out = *in + if in.DeltaHealthPolicy != nil { + in, out := &in.DeltaHealthPolicy, &out.DeltaHealthPolicy + *out = new(DeltaHealthPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ForceRestartEnabled != nil { + in, out := &in.ForceRestartEnabled, &out.ForceRestartEnabled + *out = new(bool) + **out = **in + } + if in.HealthCheckRetryTimeout != nil { + in, out := &in.HealthCheckRetryTimeout, &out.HealthCheckRetryTimeout + *out = new(string) + **out = **in + } + if in.HealthCheckStableDuration != nil { + in, out := &in.HealthCheckStableDuration, &out.HealthCheckStableDuration + *out = new(string) + **out = **in + } + if in.HealthCheckWaitDuration != nil { + in, out := &in.HealthCheckWaitDuration, &out.HealthCheckWaitDuration + *out = new(string) + **out = **in + } + if in.HealthPolicy != nil { + in, out := &in.HealthPolicy, &out.HealthPolicy + *out = new(HealthPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.UpgradeDomainTimeout != nil { + in, out := &in.UpgradeDomainTimeout, &out.UpgradeDomainTimeout + *out = new(string) + **out = **in + } + if in.UpgradeReplicaSetCheckTimeout != nil { + in, out := &in.UpgradeReplicaSetCheckTimeout, &out.UpgradeReplicaSetCheckTimeout + *out = new(string) + **out = **in + } + if in.UpgradeTimeout != nil { + in, out := &in.UpgradeTimeout, &out.UpgradeTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradePolicyParameters. +func (in *UpgradePolicyParameters) DeepCopy() *UpgradePolicyParameters { + if in == nil { + return nil + } + out := new(UpgradePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSecretsInitParameters) DeepCopyInto(out *VMSecretsInitParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]CertificatesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VaultID != nil { + in, out := &in.VaultID, &out.VaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSecretsInitParameters. +func (in *VMSecretsInitParameters) DeepCopy() *VMSecretsInitParameters { + if in == nil { + return nil + } + out := new(VMSecretsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSecretsObservation) DeepCopyInto(out *VMSecretsObservation) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]CertificatesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VaultID != nil { + in, out := &in.VaultID, &out.VaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSecretsObservation. +func (in *VMSecretsObservation) DeepCopy() *VMSecretsObservation { + if in == nil { + return nil + } + out := new(VMSecretsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMSecretsParameters) DeepCopyInto(out *VMSecretsParameters) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]CertificatesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VaultID != nil { + in, out := &in.VaultID, &out.VaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMSecretsParameters. +func (in *VMSecretsParameters) DeepCopy() *VMSecretsParameters { + if in == nil { + return nil + } + out := new(VMSecretsParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/servicefabric/v1beta2/zz_generated.managed.go b/apis/servicefabric/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..26553c41e --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_generated.managed.go @@ -0,0 +1,128 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Cluster. +func (mg *Cluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Cluster. +func (mg *Cluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ManagedCluster. +func (mg *ManagedCluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ManagedCluster. +func (mg *ManagedCluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ManagedCluster. +func (mg *ManagedCluster) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ManagedCluster. +func (mg *ManagedCluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ManagedCluster. +func (mg *ManagedCluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ManagedCluster. +func (mg *ManagedCluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ManagedCluster. +func (mg *ManagedCluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ManagedCluster. +func (mg *ManagedCluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ManagedCluster. +func (mg *ManagedCluster) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ManagedCluster. +func (mg *ManagedCluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ManagedCluster. +func (mg *ManagedCluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ManagedCluster. +func (mg *ManagedCluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/servicefabric/v1beta2/zz_generated.managedlist.go b/apis/servicefabric/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..9854016fe --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ManagedClusterList. +func (l *ManagedClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/servicefabric/v1beta2/zz_generated.resolvers.go b/apis/servicefabric/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..48bc8ec38 --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Cluster. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ManagedCluster. +func (mg *ManagedCluster) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/servicefabric/v1beta2/zz_groupversion_info.go b/apis/servicefabric/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..a662d70f2 --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=servicefabric.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "servicefabric.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/servicefabric/v1beta2/zz_managedcluster_terraformed.go b/apis/servicefabric/v1beta2/zz_managedcluster_terraformed.go new file mode 100755 index 000000000..384ee38a1 --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_managedcluster_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ManagedCluster +func (mg *ManagedCluster) GetTerraformResourceType() string { + return "azurerm_service_fabric_managed_cluster" +} + +// GetConnectionDetailsMapping for this ManagedCluster +func (tr *ManagedCluster) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "spec.forProvider.passwordSecretRef"} +} + +// GetObservation of this ManagedCluster +func (tr *ManagedCluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ManagedCluster +func (tr *ManagedCluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ManagedCluster +func (tr *ManagedCluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ManagedCluster +func (tr *ManagedCluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ManagedCluster +func (tr *ManagedCluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ManagedCluster +func (tr *ManagedCluster) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ManagedCluster +func (tr *ManagedCluster) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ManagedCluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ManagedCluster) LateInitialize(attrs []byte) (bool, error) { + params := &ManagedClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ManagedCluster) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/servicefabric/v1beta2/zz_managedcluster_types.go b/apis/servicefabric/v1beta2/zz_managedcluster_types.go new file mode 100755 index 000000000..c28c485fd --- /dev/null +++ b/apis/servicefabric/v1beta2/zz_managedcluster_types.go @@ -0,0 +1,704 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActiveDirectoryInitParameters struct { + + // The ID of the Client Application. + ClientApplicationID *string `json:"clientApplicationId,omitempty" tf:"client_application_id,omitempty"` + + // The ID of the Cluster Application. + ClusterApplicationID *string `json:"clusterApplicationId,omitempty" tf:"cluster_application_id,omitempty"` + + // The ID of the Tenant. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type ActiveDirectoryObservation struct { + + // The ID of the Client Application. + ClientApplicationID *string `json:"clientApplicationId,omitempty" tf:"client_application_id,omitempty"` + + // The ID of the Cluster Application. + ClusterApplicationID *string `json:"clusterApplicationId,omitempty" tf:"cluster_application_id,omitempty"` + + // The ID of the Tenant. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type ActiveDirectoryParameters struct { + + // The ID of the Client Application. + // +kubebuilder:validation:Optional + ClientApplicationID *string `json:"clientApplicationId" tf:"client_application_id,omitempty"` + + // The ID of the Cluster Application. + // +kubebuilder:validation:Optional + ClusterApplicationID *string `json:"clusterApplicationId" tf:"cluster_application_id,omitempty"` + + // The ID of the Tenant. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId" tf:"tenant_id,omitempty"` +} + +type AuthenticationCertificateInitParameters struct { + + // The certificate's CN. + CommonName *string `json:"commonName,omitempty" tf:"common_name,omitempty"` + + // The thumbprint of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` + + // The type of the certificate. Can be AdminClient or ReadOnlyClient. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AuthenticationCertificateObservation struct { + + // The certificate's CN. + CommonName *string `json:"commonName,omitempty" tf:"common_name,omitempty"` + + // The thumbprint of the certificate. + Thumbprint *string `json:"thumbprint,omitempty" tf:"thumbprint,omitempty"` + + // The type of the certificate. Can be AdminClient or ReadOnlyClient. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AuthenticationCertificateParameters struct { + + // The certificate's CN. + // +kubebuilder:validation:Optional + CommonName *string `json:"commonName,omitempty" tf:"common_name,omitempty"` + + // The thumbprint of the certificate. + // +kubebuilder:validation:Optional + Thumbprint *string `json:"thumbprint" tf:"thumbprint,omitempty"` + + // The type of the certificate. Can be AdminClient or ReadOnlyClient. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type AuthenticationInitParameters struct { + + // A active_directory block as defined above. + ActiveDirectory *ActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // One or more certificate blocks as defined below. + Certificate []AuthenticationCertificateInitParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` +} + +type AuthenticationObservation struct { + + // A active_directory block as defined above. + ActiveDirectory *ActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // One or more certificate blocks as defined below. + Certificate []AuthenticationCertificateObservation `json:"certificate,omitempty" tf:"certificate,omitempty"` +} + +type AuthenticationParameters struct { + + // A active_directory block as defined above. + // +kubebuilder:validation:Optional + ActiveDirectory *ActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // One or more certificate blocks as defined below. + // +kubebuilder:validation:Optional + Certificate []AuthenticationCertificateParameters `json:"certificate,omitempty" tf:"certificate,omitempty"` +} + +type CertificatesInitParameters struct { + + // The certificate store on the Virtual Machine to which the certificate should be added. + Store *string `json:"store,omitempty" tf:"store,omitempty"` + + // The URL of a certificate that has been uploaded to Key Vault as a secret + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type CertificatesObservation struct { + + // The certificate store on the Virtual Machine to which the certificate should be added. + Store *string `json:"store,omitempty" tf:"store,omitempty"` + + // The URL of a certificate that has been uploaded to Key Vault as a secret + URL *string `json:"url,omitempty" tf:"url,omitempty"` +} + +type CertificatesParameters struct { + + // The certificate store on the Virtual Machine to which the certificate should be added. + // +kubebuilder:validation:Optional + Store *string `json:"store" tf:"store,omitempty"` + + // The URL of a certificate that has been uploaded to Key Vault as a secret + // +kubebuilder:validation:Optional + URL *string `json:"url" tf:"url,omitempty"` +} + +type CustomFabricSettingInitParameters struct { + + // Parameter name. + Parameter *string `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Section name. + Section *string `json:"section,omitempty" tf:"section,omitempty"` + + // Parameter value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CustomFabricSettingObservation struct { + + // Parameter name. + Parameter *string `json:"parameter,omitempty" tf:"parameter,omitempty"` + + // Section name. + Section *string `json:"section,omitempty" tf:"section,omitempty"` + + // Parameter value. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type CustomFabricSettingParameters struct { + + // Parameter name. + // +kubebuilder:validation:Optional + Parameter *string `json:"parameter" tf:"parameter,omitempty"` + + // Section name. + // +kubebuilder:validation:Optional + Section *string `json:"section" tf:"section,omitempty"` + + // Parameter value. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type LBRuleInitParameters struct { + + // LB Backend port. + BackendPort *float64 `json:"backendPort,omitempty" tf:"backend_port,omitempty"` + + // LB Frontend port. + FrontendPort *float64 `json:"frontendPort,omitempty" tf:"frontend_port,omitempty"` + + // Protocol for the probe. Can be one of tcp, udp, http, or https. + ProbeProtocol *string `json:"probeProtocol,omitempty" tf:"probe_protocol,omitempty"` + + // Path for the probe to check, when probe protocol is set to http. + ProbeRequestPath *string `json:"probeRequestPath,omitempty" tf:"probe_request_path,omitempty"` + + // The transport protocol used in this rule. Can be one of tcp or udp. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type LBRuleObservation struct { + + // LB Backend port. + BackendPort *float64 `json:"backendPort,omitempty" tf:"backend_port,omitempty"` + + // LB Frontend port. + FrontendPort *float64 `json:"frontendPort,omitempty" tf:"frontend_port,omitempty"` + + // Protocol for the probe. Can be one of tcp, udp, http, or https. + ProbeProtocol *string `json:"probeProtocol,omitempty" tf:"probe_protocol,omitempty"` + + // Path for the probe to check, when probe protocol is set to http. + ProbeRequestPath *string `json:"probeRequestPath,omitempty" tf:"probe_request_path,omitempty"` + + // The transport protocol used in this rule. Can be one of tcp or udp. + Protocol *string `json:"protocol,omitempty" tf:"protocol,omitempty"` +} + +type LBRuleParameters struct { + + // LB Backend port. + // +kubebuilder:validation:Optional + BackendPort *float64 `json:"backendPort" tf:"backend_port,omitempty"` + + // LB Frontend port. + // +kubebuilder:validation:Optional + FrontendPort *float64 `json:"frontendPort" tf:"frontend_port,omitempty"` + + // Protocol for the probe. Can be one of tcp, udp, http, or https. + // +kubebuilder:validation:Optional + ProbeProtocol *string `json:"probeProtocol" tf:"probe_protocol,omitempty"` + + // Path for the probe to check, when probe protocol is set to http. + // +kubebuilder:validation:Optional + ProbeRequestPath *string `json:"probeRequestPath,omitempty" tf:"probe_request_path,omitempty"` + + // The transport protocol used in this rule. Can be one of tcp or udp. + // +kubebuilder:validation:Optional + Protocol *string `json:"protocol" tf:"protocol,omitempty"` +} + +type ManagedClusterInitParameters struct { + + // Controls how connections to the cluster are authenticated. A authentication block as defined below. + Authentication *AuthenticationInitParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // If true, backup service is enabled. + BackupServiceEnabled *bool `json:"backupServiceEnabled,omitempty" tf:"backup_service_enabled,omitempty"` + + // Port to use when connecting to the cluster. + ClientConnectionPort *float64 `json:"clientConnectionPort,omitempty" tf:"client_connection_port,omitempty"` + + // One or more custom_fabric_setting blocks as defined below. + CustomFabricSetting []CustomFabricSettingInitParameters `json:"customFabricSetting,omitempty" tf:"custom_fabric_setting,omitempty"` + + // Hostname for the cluster. If unset the cluster's name will be used.. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // If true, DNS service is enabled. + DNSServiceEnabled *bool `json:"dnsServiceEnabled,omitempty" tf:"dns_service_enabled,omitempty"` + + // Port that should be used by the Service Fabric Explorer to visualize applications and cluster status. + HTTPGatewayPort *float64 `json:"httpGatewayPort,omitempty" tf:"http_gateway_port,omitempty"` + + // One or more lb_rule blocks as defined below. + LBRule []LBRuleInitParameters `json:"lbRule,omitempty" tf:"lb_rule,omitempty"` + + // The Azure Region where the Resource Group should exist. Changing this forces a new Resource Group to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more node_type blocks as defined below. + NodeType []ManagedClusterNodeTypeInitParameters `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // SKU for this cluster. Changing this forces a new resource to be created. Default is Basic, allowed values are either Basic or Standard. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags which should be assigned to the Resource Group. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Upgrade wave for the fabric runtime. Default is Wave0, allowed value must be one of Wave0, Wave1, or Wave2. + UpgradeWave *string `json:"upgradeWave,omitempty" tf:"upgrade_wave,omitempty"` + + // Administrator password for the VMs that will be created as part of this cluster. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ManagedClusterNodeTypeInitParameters struct { + + // Sets the port range available for applications. Format is -, for example 10000-20000. + ApplicationPortRange *string `json:"applicationPortRange,omitempty" tf:"application_port_range,omitempty"` + + // Specifies a list of key/value pairs used to set capacity tags for this node type. + // +mapType=granular + Capacities map[string]*string `json:"capacities,omitempty" tf:"capacities,omitempty"` + + // The size of the data disk in gigabytes.. + DataDiskSizeGb *float64 `json:"dataDiskSizeGb,omitempty" tf:"data_disk_size_gb,omitempty"` + + // The type of the disk to use for storing data. It can be one of Premium_LRS, Standard_LRS, or StandardSSD_LRS. Defaults to Standard_LRS. + DataDiskType *string `json:"dataDiskType,omitempty" tf:"data_disk_type,omitempty"` + + // Sets the port range available for the OS. Format is -, for example 10000-20000. There has to be at least 255 ports available and cannot overlap with application_port_range.. + EphemeralPortRange *string `json:"ephemeralPortRange,omitempty" tf:"ephemeral_port_range,omitempty"` + + // If set the node type can be composed of multiple placement groups. + MultiplePlacementGroupsEnabled *bool `json:"multiplePlacementGroupsEnabled,omitempty" tf:"multiple_placement_groups_enabled,omitempty"` + + // The name which should be used for this node type. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of placement tags that can be used to indicate where services should run.. + // +mapType=granular + PlacementProperties map[string]*string `json:"placementProperties,omitempty" tf:"placement_properties,omitempty"` + + // If set to true, system services will run on this node type. Only one node type should be marked as primary. Primary node type cannot be deleted or changed once they're created. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // If set to true, only stateless workloads can run on this node type. + Stateless *bool `json:"stateless,omitempty" tf:"stateless,omitempty"` + + // The offer type of the marketplace image cluster VMs will use. + VMImageOffer *string `json:"vmImageOffer,omitempty" tf:"vm_image_offer,omitempty"` + + // The publisher of the marketplace image cluster VMs will use. + VMImagePublisher *string `json:"vmImagePublisher,omitempty" tf:"vm_image_publisher,omitempty"` + + // The SKU of the marketplace image cluster VMs will use. + VMImageSku *string `json:"vmImageSku,omitempty" tf:"vm_image_sku,omitempty"` + + // The version of the marketplace image cluster VMs will use. + VMImageVersion *string `json:"vmImageVersion,omitempty" tf:"vm_image_version,omitempty"` + + // The number of instances this node type will launch. + VMInstanceCount *float64 `json:"vmInstanceCount,omitempty" tf:"vm_instance_count,omitempty"` + + // One or more vm_secrets blocks as defined below. + VMSecrets []VMSecretsInitParameters `json:"vmSecrets,omitempty" tf:"vm_secrets,omitempty"` + + // The size of the instances in this node type. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` +} + +type ManagedClusterNodeTypeObservation struct { + + // Sets the port range available for applications. Format is -, for example 10000-20000. + ApplicationPortRange *string `json:"applicationPortRange,omitempty" tf:"application_port_range,omitempty"` + + // Specifies a list of key/value pairs used to set capacity tags for this node type. + // +mapType=granular + Capacities map[string]*string `json:"capacities,omitempty" tf:"capacities,omitempty"` + + // The size of the data disk in gigabytes.. + DataDiskSizeGb *float64 `json:"dataDiskSizeGb,omitempty" tf:"data_disk_size_gb,omitempty"` + + // The type of the disk to use for storing data. It can be one of Premium_LRS, Standard_LRS, or StandardSSD_LRS. Defaults to Standard_LRS. + DataDiskType *string `json:"dataDiskType,omitempty" tf:"data_disk_type,omitempty"` + + // Sets the port range available for the OS. Format is -, for example 10000-20000. There has to be at least 255 ports available and cannot overlap with application_port_range.. + EphemeralPortRange *string `json:"ephemeralPortRange,omitempty" tf:"ephemeral_port_range,omitempty"` + + // The ID of the Resource Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // If set the node type can be composed of multiple placement groups. + MultiplePlacementGroupsEnabled *bool `json:"multiplePlacementGroupsEnabled,omitempty" tf:"multiple_placement_groups_enabled,omitempty"` + + // The name which should be used for this node type. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies a list of placement tags that can be used to indicate where services should run.. + // +mapType=granular + PlacementProperties map[string]*string `json:"placementProperties,omitempty" tf:"placement_properties,omitempty"` + + // If set to true, system services will run on this node type. Only one node type should be marked as primary. Primary node type cannot be deleted or changed once they're created. + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // If set to true, only stateless workloads can run on this node type. + Stateless *bool `json:"stateless,omitempty" tf:"stateless,omitempty"` + + // The offer type of the marketplace image cluster VMs will use. + VMImageOffer *string `json:"vmImageOffer,omitempty" tf:"vm_image_offer,omitempty"` + + // The publisher of the marketplace image cluster VMs will use. + VMImagePublisher *string `json:"vmImagePublisher,omitempty" tf:"vm_image_publisher,omitempty"` + + // The SKU of the marketplace image cluster VMs will use. + VMImageSku *string `json:"vmImageSku,omitempty" tf:"vm_image_sku,omitempty"` + + // The version of the marketplace image cluster VMs will use. + VMImageVersion *string `json:"vmImageVersion,omitempty" tf:"vm_image_version,omitempty"` + + // The number of instances this node type will launch. + VMInstanceCount *float64 `json:"vmInstanceCount,omitempty" tf:"vm_instance_count,omitempty"` + + // One or more vm_secrets blocks as defined below. + VMSecrets []VMSecretsObservation `json:"vmSecrets,omitempty" tf:"vm_secrets,omitempty"` + + // The size of the instances in this node type. + VMSize *string `json:"vmSize,omitempty" tf:"vm_size,omitempty"` +} + +type ManagedClusterNodeTypeParameters struct { + + // Sets the port range available for applications. Format is -, for example 10000-20000. + // +kubebuilder:validation:Optional + ApplicationPortRange *string `json:"applicationPortRange" tf:"application_port_range,omitempty"` + + // Specifies a list of key/value pairs used to set capacity tags for this node type. + // +kubebuilder:validation:Optional + // +mapType=granular + Capacities map[string]*string `json:"capacities,omitempty" tf:"capacities,omitempty"` + + // The size of the data disk in gigabytes.. + // +kubebuilder:validation:Optional + DataDiskSizeGb *float64 `json:"dataDiskSizeGb" tf:"data_disk_size_gb,omitempty"` + + // The type of the disk to use for storing data. It can be one of Premium_LRS, Standard_LRS, or StandardSSD_LRS. Defaults to Standard_LRS. + // +kubebuilder:validation:Optional + DataDiskType *string `json:"dataDiskType,omitempty" tf:"data_disk_type,omitempty"` + + // Sets the port range available for the OS. Format is -, for example 10000-20000. There has to be at least 255 ports available and cannot overlap with application_port_range.. + // +kubebuilder:validation:Optional + EphemeralPortRange *string `json:"ephemeralPortRange" tf:"ephemeral_port_range,omitempty"` + + // If set the node type can be composed of multiple placement groups. + // +kubebuilder:validation:Optional + MultiplePlacementGroupsEnabled *bool `json:"multiplePlacementGroupsEnabled,omitempty" tf:"multiple_placement_groups_enabled,omitempty"` + + // The name which should be used for this node type. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Specifies a list of placement tags that can be used to indicate where services should run.. + // +kubebuilder:validation:Optional + // +mapType=granular + PlacementProperties map[string]*string `json:"placementProperties,omitempty" tf:"placement_properties,omitempty"` + + // If set to true, system services will run on this node type. Only one node type should be marked as primary. Primary node type cannot be deleted or changed once they're created. + // +kubebuilder:validation:Optional + Primary *bool `json:"primary,omitempty" tf:"primary,omitempty"` + + // If set to true, only stateless workloads can run on this node type. + // +kubebuilder:validation:Optional + Stateless *bool `json:"stateless,omitempty" tf:"stateless,omitempty"` + + // The offer type of the marketplace image cluster VMs will use. + // +kubebuilder:validation:Optional + VMImageOffer *string `json:"vmImageOffer" tf:"vm_image_offer,omitempty"` + + // The publisher of the marketplace image cluster VMs will use. + // +kubebuilder:validation:Optional + VMImagePublisher *string `json:"vmImagePublisher" tf:"vm_image_publisher,omitempty"` + + // The SKU of the marketplace image cluster VMs will use. + // +kubebuilder:validation:Optional + VMImageSku *string `json:"vmImageSku" tf:"vm_image_sku,omitempty"` + + // The version of the marketplace image cluster VMs will use. + // +kubebuilder:validation:Optional + VMImageVersion *string `json:"vmImageVersion" tf:"vm_image_version,omitempty"` + + // The number of instances this node type will launch. + // +kubebuilder:validation:Optional + VMInstanceCount *float64 `json:"vmInstanceCount" tf:"vm_instance_count,omitempty"` + + // One or more vm_secrets blocks as defined below. + // +kubebuilder:validation:Optional + VMSecrets []VMSecretsParameters `json:"vmSecrets,omitempty" tf:"vm_secrets,omitempty"` + + // The size of the instances in this node type. + // +kubebuilder:validation:Optional + VMSize *string `json:"vmSize" tf:"vm_size,omitempty"` +} + +type ManagedClusterObservation struct { + + // Controls how connections to the cluster are authenticated. A authentication block as defined below. + Authentication *AuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // If true, backup service is enabled. + BackupServiceEnabled *bool `json:"backupServiceEnabled,omitempty" tf:"backup_service_enabled,omitempty"` + + // Port to use when connecting to the cluster. + ClientConnectionPort *float64 `json:"clientConnectionPort,omitempty" tf:"client_connection_port,omitempty"` + + // One or more custom_fabric_setting blocks as defined below. + CustomFabricSetting []CustomFabricSettingObservation `json:"customFabricSetting,omitempty" tf:"custom_fabric_setting,omitempty"` + + // Hostname for the cluster. If unset the cluster's name will be used.. + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // If true, DNS service is enabled. + DNSServiceEnabled *bool `json:"dnsServiceEnabled,omitempty" tf:"dns_service_enabled,omitempty"` + + // Port that should be used by the Service Fabric Explorer to visualize applications and cluster status. + HTTPGatewayPort *float64 `json:"httpGatewayPort,omitempty" tf:"http_gateway_port,omitempty"` + + // The ID of the Resource Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more lb_rule blocks as defined below. + LBRule []LBRuleObservation `json:"lbRule,omitempty" tf:"lb_rule,omitempty"` + + // The Azure Region where the Resource Group should exist. Changing this forces a new Resource Group to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more node_type blocks as defined below. + NodeType []ManagedClusterNodeTypeObservation `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // The name of the Resource Group where the Resource Group should exist. Changing this forces a new Resource Group to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // SKU for this cluster. Changing this forces a new resource to be created. Default is Basic, allowed values are either Basic or Standard. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags which should be assigned to the Resource Group. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Upgrade wave for the fabric runtime. Default is Wave0, allowed value must be one of Wave0, Wave1, or Wave2. + UpgradeWave *string `json:"upgradeWave,omitempty" tf:"upgrade_wave,omitempty"` + + // Administrator password for the VMs that will be created as part of this cluster. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type ManagedClusterParameters struct { + + // Controls how connections to the cluster are authenticated. A authentication block as defined below. + // +kubebuilder:validation:Optional + Authentication *AuthenticationParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // If true, backup service is enabled. + // +kubebuilder:validation:Optional + BackupServiceEnabled *bool `json:"backupServiceEnabled,omitempty" tf:"backup_service_enabled,omitempty"` + + // Port to use when connecting to the cluster. + // +kubebuilder:validation:Optional + ClientConnectionPort *float64 `json:"clientConnectionPort,omitempty" tf:"client_connection_port,omitempty"` + + // One or more custom_fabric_setting blocks as defined below. + // +kubebuilder:validation:Optional + CustomFabricSetting []CustomFabricSettingParameters `json:"customFabricSetting,omitempty" tf:"custom_fabric_setting,omitempty"` + + // Hostname for the cluster. If unset the cluster's name will be used.. + // +kubebuilder:validation:Optional + DNSName *string `json:"dnsName,omitempty" tf:"dns_name,omitempty"` + + // If true, DNS service is enabled. + // +kubebuilder:validation:Optional + DNSServiceEnabled *bool `json:"dnsServiceEnabled,omitempty" tf:"dns_service_enabled,omitempty"` + + // Port that should be used by the Service Fabric Explorer to visualize applications and cluster status. + // +kubebuilder:validation:Optional + HTTPGatewayPort *float64 `json:"httpGatewayPort,omitempty" tf:"http_gateway_port,omitempty"` + + // One or more lb_rule blocks as defined below. + // +kubebuilder:validation:Optional + LBRule []LBRuleParameters `json:"lbRule,omitempty" tf:"lb_rule,omitempty"` + + // The Azure Region where the Resource Group should exist. Changing this forces a new Resource Group to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // One or more node_type blocks as defined below. + // +kubebuilder:validation:Optional + NodeType []ManagedClusterNodeTypeParameters `json:"nodeType,omitempty" tf:"node_type,omitempty"` + + // Administrator password for the VMs that will be created as part of this cluster. + // +kubebuilder:validation:Optional + PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"` + + // The name of the Resource Group where the Resource Group should exist. Changing this forces a new Resource Group to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // SKU for this cluster. Changing this forces a new resource to be created. Default is Basic, allowed values are either Basic or Standard. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags which should be assigned to the Resource Group. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Upgrade wave for the fabric runtime. Default is Wave0, allowed value must be one of Wave0, Wave1, or Wave2. + // +kubebuilder:validation:Optional + UpgradeWave *string `json:"upgradeWave,omitempty" tf:"upgrade_wave,omitempty"` + + // Administrator password for the VMs that will be created as part of this cluster. + // +kubebuilder:validation:Optional + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type VMSecretsInitParameters struct { + + // One or more certificates blocks as defined above. + Certificates []CertificatesInitParameters `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // The ID of the Vault that contain the certificates. + VaultID *string `json:"vaultId,omitempty" tf:"vault_id,omitempty"` +} + +type VMSecretsObservation struct { + + // One or more certificates blocks as defined above. + Certificates []CertificatesObservation `json:"certificates,omitempty" tf:"certificates,omitempty"` + + // The ID of the Vault that contain the certificates. + VaultID *string `json:"vaultId,omitempty" tf:"vault_id,omitempty"` +} + +type VMSecretsParameters struct { + + // One or more certificates blocks as defined above. + // +kubebuilder:validation:Optional + Certificates []CertificatesParameters `json:"certificates" tf:"certificates,omitempty"` + + // The ID of the Vault that contain the certificates. + // +kubebuilder:validation:Optional + VaultID *string `json:"vaultId" tf:"vault_id,omitempty"` +} + +// ManagedClusterSpec defines the desired state of ManagedCluster +type ManagedClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ManagedClusterParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ManagedClusterInitParameters `json:"initProvider,omitempty"` +} + +// ManagedClusterStatus defines the observed state of ManagedCluster. +type ManagedClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ManagedClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ManagedCluster is the Schema for the ManagedClusters API. Manages a Resource Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ManagedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.clientConnectionPort) || (has(self.initProvider) && has(self.initProvider.clientConnectionPort))",message="spec.forProvider.clientConnectionPort is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.httpGatewayPort) || (has(self.initProvider) && has(self.initProvider.httpGatewayPort))",message="spec.forProvider.httpGatewayPort is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.lbRule) || (has(self.initProvider) && has(self.initProvider.lbRule))",message="spec.forProvider.lbRule is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec ManagedClusterSpec `json:"spec"` + Status ManagedClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ManagedClusterList contains a list of ManagedClusters +type ManagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ManagedCluster `json:"items"` +} + +// Repository type metadata. +var ( + ManagedCluster_Kind = "ManagedCluster" + ManagedCluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ManagedCluster_Kind}.String() + ManagedCluster_KindAPIVersion = ManagedCluster_Kind + "." + CRDGroupVersion.String() + ManagedCluster_GroupVersionKind = CRDGroupVersion.WithKind(ManagedCluster_Kind) +) + +func init() { + SchemeBuilder.Register(&ManagedCluster{}, &ManagedClusterList{}) +} diff --git a/apis/servicelinker/v1beta1/zz_generated.conversion_spokes.go b/apis/servicelinker/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..f9631a806 --- /dev/null +++ b/apis/servicelinker/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this SpringCloudConnection to the hub type. +func (tr *SpringCloudConnection) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SpringCloudConnection type. +func (tr *SpringCloudConnection) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/servicelinker/v1beta1/zz_generated.conversion_hubs.go b/apis/servicelinker/v1beta2/zz_generated.conversion_hubs.go similarity index 94% rename from apis/servicelinker/v1beta1/zz_generated.conversion_hubs.go rename to apis/servicelinker/v1beta2/zz_generated.conversion_hubs.go index c46dc6e36..e76b8348e 100755 --- a/apis/servicelinker/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/servicelinker/v1beta2/zz_generated.conversion_hubs.go @@ -4,7 +4,7 @@ // Code generated by upjet. DO NOT EDIT. -package v1beta1 +package v1beta2 // Hub marks this type as a conversion hub. func (tr *SpringCloudConnection) Hub() {} diff --git a/apis/servicelinker/v1beta2/zz_generated.deepcopy.go b/apis/servicelinker/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..46783e977 --- /dev/null +++ b/apis/servicelinker/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,493 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationInitParameters) DeepCopyInto(out *AuthenticationInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationInitParameters. +func (in *AuthenticationInitParameters) DeepCopy() *AuthenticationInitParameters { + if in == nil { + return nil + } + out := new(AuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationObservation) DeepCopyInto(out *AuthenticationObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationObservation. +func (in *AuthenticationObservation) DeepCopy() *AuthenticationObservation { + if in == nil { + return nil + } + out := new(AuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationParameters) DeepCopyInto(out *AuthenticationParameters) { + *out = *in + if in.CertificateSecretRef != nil { + in, out := &in.CertificateSecretRef, &out.CertificateSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.SecretSecretRef != nil { + in, out := &in.SecretSecretRef, &out.SecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SubscriptionID != nil { + in, out := &in.SubscriptionID, &out.SubscriptionID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationParameters. +func (in *AuthenticationParameters) DeepCopy() *AuthenticationParameters { + if in == nil { + return nil + } + out := new(AuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretStoreInitParameters) DeepCopyInto(out *SecretStoreInitParameters) { + *out = *in + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretStoreInitParameters. +func (in *SecretStoreInitParameters) DeepCopy() *SecretStoreInitParameters { + if in == nil { + return nil + } + out := new(SecretStoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretStoreObservation) DeepCopyInto(out *SecretStoreObservation) { + *out = *in + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretStoreObservation. +func (in *SecretStoreObservation) DeepCopy() *SecretStoreObservation { + if in == nil { + return nil + } + out := new(SecretStoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretStoreParameters) DeepCopyInto(out *SecretStoreParameters) { + *out = *in + if in.KeyVaultID != nil { + in, out := &in.KeyVaultID, &out.KeyVaultID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretStoreParameters. +func (in *SecretStoreParameters) DeepCopy() *SecretStoreParameters { + if in == nil { + return nil + } + out := new(SecretStoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudConnection) DeepCopyInto(out *SpringCloudConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudConnection. +func (in *SpringCloudConnection) DeepCopy() *SpringCloudConnection { + if in == nil { + return nil + } + out := new(SpringCloudConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudConnectionInitParameters) DeepCopyInto(out *SpringCloudConnectionInitParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientType != nil { + in, out := &in.ClientType, &out.ClientType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretStore != nil { + in, out := &in.SecretStore, &out.SecretStore + *out = new(SecretStoreInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudID != nil { + in, out := &in.SpringCloudID, &out.SpringCloudID + *out = new(string) + **out = **in + } + if in.SpringCloudIDRef != nil { + in, out := &in.SpringCloudIDRef, &out.SpringCloudIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudIDSelector != nil { + in, out := &in.SpringCloudIDSelector, &out.SpringCloudIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceIDRef != nil { + in, out := &in.TargetResourceIDRef, &out.TargetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceIDSelector != nil { + in, out := &in.TargetResourceIDSelector, &out.TargetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VnetSolution != nil { + in, out := &in.VnetSolution, &out.VnetSolution + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudConnectionInitParameters. +func (in *SpringCloudConnectionInitParameters) DeepCopy() *SpringCloudConnectionInitParameters { + if in == nil { + return nil + } + out := new(SpringCloudConnectionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudConnectionList) DeepCopyInto(out *SpringCloudConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SpringCloudConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudConnectionList. +func (in *SpringCloudConnectionList) DeepCopy() *SpringCloudConnectionList { + if in == nil { + return nil + } + out := new(SpringCloudConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SpringCloudConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudConnectionObservation) DeepCopyInto(out *SpringCloudConnectionObservation) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientType != nil { + in, out := &in.ClientType, &out.ClientType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretStore != nil { + in, out := &in.SecretStore, &out.SecretStore + *out = new(SecretStoreObservation) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudID != nil { + in, out := &in.SpringCloudID, &out.SpringCloudID + *out = new(string) + **out = **in + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.VnetSolution != nil { + in, out := &in.VnetSolution, &out.VnetSolution + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudConnectionObservation. +func (in *SpringCloudConnectionObservation) DeepCopy() *SpringCloudConnectionObservation { + if in == nil { + return nil + } + out := new(SpringCloudConnectionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudConnectionParameters) DeepCopyInto(out *SpringCloudConnectionParameters) { + *out = *in + if in.Authentication != nil { + in, out := &in.Authentication, &out.Authentication + *out = new(AuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientType != nil { + in, out := &in.ClientType, &out.ClientType + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SecretStore != nil { + in, out := &in.SecretStore, &out.SecretStore + *out = new(SecretStoreParameters) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudID != nil { + in, out := &in.SpringCloudID, &out.SpringCloudID + *out = new(string) + **out = **in + } + if in.SpringCloudIDRef != nil { + in, out := &in.SpringCloudIDRef, &out.SpringCloudIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SpringCloudIDSelector != nil { + in, out := &in.SpringCloudIDSelector, &out.SpringCloudIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceID != nil { + in, out := &in.TargetResourceID, &out.TargetResourceID + *out = new(string) + **out = **in + } + if in.TargetResourceIDRef != nil { + in, out := &in.TargetResourceIDRef, &out.TargetResourceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TargetResourceIDSelector != nil { + in, out := &in.TargetResourceIDSelector, &out.TargetResourceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.VnetSolution != nil { + in, out := &in.VnetSolution, &out.VnetSolution + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudConnectionParameters. +func (in *SpringCloudConnectionParameters) DeepCopy() *SpringCloudConnectionParameters { + if in == nil { + return nil + } + out := new(SpringCloudConnectionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudConnectionSpec) DeepCopyInto(out *SpringCloudConnectionSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudConnectionSpec. +func (in *SpringCloudConnectionSpec) DeepCopy() *SpringCloudConnectionSpec { + if in == nil { + return nil + } + out := new(SpringCloudConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpringCloudConnectionStatus) DeepCopyInto(out *SpringCloudConnectionStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpringCloudConnectionStatus. +func (in *SpringCloudConnectionStatus) DeepCopy() *SpringCloudConnectionStatus { + if in == nil { + return nil + } + out := new(SpringCloudConnectionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/servicelinker/v1beta2/zz_generated.managed.go b/apis/servicelinker/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..1507e6276 --- /dev/null +++ b/apis/servicelinker/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this SpringCloudConnection. +func (mg *SpringCloudConnection) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SpringCloudConnection. +func (mg *SpringCloudConnection) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SpringCloudConnection. +func (mg *SpringCloudConnection) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SpringCloudConnection. +func (mg *SpringCloudConnection) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SpringCloudConnection. +func (mg *SpringCloudConnection) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SpringCloudConnection. +func (mg *SpringCloudConnection) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SpringCloudConnection. +func (mg *SpringCloudConnection) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SpringCloudConnection. +func (mg *SpringCloudConnection) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SpringCloudConnection. +func (mg *SpringCloudConnection) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SpringCloudConnection. +func (mg *SpringCloudConnection) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SpringCloudConnection. +func (mg *SpringCloudConnection) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SpringCloudConnection. +func (mg *SpringCloudConnection) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/servicelinker/v1beta2/zz_generated.managedlist.go b/apis/servicelinker/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..1cb1e242c --- /dev/null +++ b/apis/servicelinker/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this SpringCloudConnectionList. +func (l *SpringCloudConnectionList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/servicelinker/v1beta2/zz_generated.resolvers.go b/apis/servicelinker/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..4cfde62e2 --- /dev/null +++ b/apis/servicelinker/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *SpringCloudConnection) ResolveReferences( // ResolveReferences of this SpringCloudConnection. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudJavaDeployment", "SpringCloudJavaDeploymentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SpringCloudID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SpringCloudIDRef, + Selector: mg.Spec.ForProvider.SpringCloudIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SpringCloudID") + } + mg.Spec.ForProvider.SpringCloudID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SpringCloudIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "SQLDatabase", "SQLDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TargetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TargetResourceIDRef, + Selector: mg.Spec.ForProvider.TargetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TargetResourceID") + } + mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudJavaDeployment", "SpringCloudJavaDeploymentList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SpringCloudID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SpringCloudIDRef, + Selector: mg.Spec.InitProvider.SpringCloudIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SpringCloudID") + } + mg.Spec.InitProvider.SpringCloudID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SpringCloudIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("cosmosdb.azure.upbound.io", "v1beta2", "SQLDatabase", "SQLDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TargetResourceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TargetResourceIDRef, + Selector: mg.Spec.InitProvider.TargetResourceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TargetResourceID") + } + mg.Spec.InitProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TargetResourceIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/servicelinker/v1beta2/zz_groupversion_info.go b/apis/servicelinker/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..d2bfd9dc8 --- /dev/null +++ b/apis/servicelinker/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=servicelinker.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "servicelinker.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/servicelinker/v1beta2/zz_springcloudconnection_terraformed.go b/apis/servicelinker/v1beta2/zz_springcloudconnection_terraformed.go new file mode 100755 index 000000000..f71865bda --- /dev/null +++ b/apis/servicelinker/v1beta2/zz_springcloudconnection_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SpringCloudConnection +func (mg *SpringCloudConnection) GetTerraformResourceType() string { + return "azurerm_spring_cloud_connection" +} + +// GetConnectionDetailsMapping for this SpringCloudConnection +func (tr *SpringCloudConnection) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"authentication[*].certificate": "spec.forProvider.authentication[*].certificateSecretRef", "authentication[*].secret": "spec.forProvider.authentication[*].secretSecretRef"} +} + +// GetObservation of this SpringCloudConnection +func (tr *SpringCloudConnection) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SpringCloudConnection +func (tr *SpringCloudConnection) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SpringCloudConnection +func (tr *SpringCloudConnection) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SpringCloudConnection +func (tr *SpringCloudConnection) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SpringCloudConnection +func (tr *SpringCloudConnection) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SpringCloudConnection +func (tr *SpringCloudConnection) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SpringCloudConnection +func (tr *SpringCloudConnection) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SpringCloudConnection using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SpringCloudConnection) LateInitialize(attrs []byte) (bool, error) { + params := &SpringCloudConnectionParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SpringCloudConnection) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/servicelinker/v1beta2/zz_springcloudconnection_types.go b/apis/servicelinker/v1beta2/zz_springcloudconnection_types.go new file mode 100755 index 000000000..b6f8c55ee --- /dev/null +++ b/apis/servicelinker/v1beta2/zz_springcloudconnection_types.go @@ -0,0 +1,284 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthenticationInitParameters struct { + + // Client ID for userAssignedIdentity or servicePrincipal auth. Should be specified when type is set to servicePrincipalSecret or servicePrincipalCertificate. When type is set to userAssignedIdentity, client_id and subscription_id should be either both specified or both not specified. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Username or account name for secret auth. name and secret should be either both specified or both not specified when type is set to secret. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Principal ID for servicePrincipal auth. Should be specified when type is set to servicePrincipalSecret or servicePrincipalCertificate. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // Subscription ID for userAssignedIdentity. subscription_id and client_id should be either both specified or both not specified. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The authentication type. Possible values are systemAssignedIdentity, userAssignedIdentity, servicePrincipalSecret, servicePrincipalCertificate, secret. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AuthenticationObservation struct { + + // Client ID for userAssignedIdentity or servicePrincipal auth. Should be specified when type is set to servicePrincipalSecret or servicePrincipalCertificate. When type is set to userAssignedIdentity, client_id and subscription_id should be either both specified or both not specified. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Username or account name for secret auth. name and secret should be either both specified or both not specified when type is set to secret. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Principal ID for servicePrincipal auth. Should be specified when type is set to servicePrincipalSecret or servicePrincipalCertificate. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // Subscription ID for userAssignedIdentity. subscription_id and client_id should be either both specified or both not specified. + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The authentication type. Possible values are systemAssignedIdentity, userAssignedIdentity, servicePrincipalSecret, servicePrincipalCertificate, secret. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type AuthenticationParameters struct { + + // Service principal certificate for servicePrincipal auth. Should be specified when type is set to servicePrincipalCertificate. + // +kubebuilder:validation:Optional + CertificateSecretRef *v1.SecretKeySelector `json:"certificateSecretRef,omitempty" tf:"-"` + + // Client ID for userAssignedIdentity or servicePrincipal auth. Should be specified when type is set to servicePrincipalSecret or servicePrincipalCertificate. When type is set to userAssignedIdentity, client_id and subscription_id should be either both specified or both not specified. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // Username or account name for secret auth. name and secret should be either both specified or both not specified when type is set to secret. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Principal ID for servicePrincipal auth. Should be specified when type is set to servicePrincipalSecret or servicePrincipalCertificate. + // +kubebuilder:validation:Optional + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // Password or account key for secret auth. secret and name should be either both specified or both not specified when type is set to secret. + // +kubebuilder:validation:Optional + SecretSecretRef *v1.SecretKeySelector `json:"secretSecretRef,omitempty" tf:"-"` + + // Subscription ID for userAssignedIdentity. subscription_id and client_id should be either both specified or both not specified. + // +kubebuilder:validation:Optional + SubscriptionID *string `json:"subscriptionId,omitempty" tf:"subscription_id,omitempty"` + + // The authentication type. Possible values are systemAssignedIdentity, userAssignedIdentity, servicePrincipalSecret, servicePrincipalCertificate, secret. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SecretStoreInitParameters struct { + + // The key vault id to store secret. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type SecretStoreObservation struct { + + // The key vault id to store secret. + KeyVaultID *string `json:"keyVaultId,omitempty" tf:"key_vault_id,omitempty"` +} + +type SecretStoreParameters struct { + + // The key vault id to store secret. + // +kubebuilder:validation:Optional + KeyVaultID *string `json:"keyVaultId" tf:"key_vault_id,omitempty"` +} + +type SpringCloudConnectionInitParameters struct { + + // The authentication info. An authentication block as defined below. + Authentication *AuthenticationInitParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The application client type. Possible values are none, dotnet, java, python, go, php, ruby, django, nodejs and springBoot. Defaults to none. + ClientType *string `json:"clientType,omitempty" tf:"client_type,omitempty"` + + // The name of the service connection. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An option to store secret value in secure place. An secret_store block as defined below. + SecretStore *SecretStoreInitParameters `json:"secretStore,omitempty" tf:"secret_store,omitempty"` + + // The ID of the data source spring cloud. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudJavaDeployment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SpringCloudID *string `json:"springCloudId,omitempty" tf:"spring_cloud_id,omitempty"` + + // Reference to a SpringCloudJavaDeployment in appplatform to populate springCloudId. + // +kubebuilder:validation:Optional + SpringCloudIDRef *v1.Reference `json:"springCloudIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudJavaDeployment in appplatform to populate springCloudId. + // +kubebuilder:validation:Optional + SpringCloudIDSelector *v1.Selector `json:"springCloudIdSelector,omitempty" tf:"-"` + + // The ID of the target resource. Changing this forces a new resource to be created. Possible target resources are Postgres, PostgresFlexible, Mysql, Sql, Redis, RedisEnterprise, CosmosCassandra, CosmosGremlin, CosmosMongo, CosmosSql, CosmosTable, StorageBlob, StorageQueue, StorageFile, StorageTable, AppConfig, EventHub, ServiceBus, SignalR, WebPubSub, ConfluentKafka. The integration guide can be found here. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.SQLDatabase + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // Reference to a SQLDatabase in cosmosdb to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDRef *v1.Reference `json:"targetResourceIdRef,omitempty" tf:"-"` + + // Selector for a SQLDatabase in cosmosdb to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` + + // The type of the VNet solution. Possible values are serviceEndpoint, privateLink. + VnetSolution *string `json:"vnetSolution,omitempty" tf:"vnet_solution,omitempty"` +} + +type SpringCloudConnectionObservation struct { + + // The authentication info. An authentication block as defined below. + Authentication *AuthenticationObservation `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The application client type. Possible values are none, dotnet, java, python, go, php, ruby, django, nodejs and springBoot. Defaults to none. + ClientType *string `json:"clientType,omitempty" tf:"client_type,omitempty"` + + // The ID of the service connector. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the service connection. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An option to store secret value in secure place. An secret_store block as defined below. + SecretStore *SecretStoreObservation `json:"secretStore,omitempty" tf:"secret_store,omitempty"` + + // The ID of the data source spring cloud. Changing this forces a new resource to be created. + SpringCloudID *string `json:"springCloudId,omitempty" tf:"spring_cloud_id,omitempty"` + + // The ID of the target resource. Changing this forces a new resource to be created. Possible target resources are Postgres, PostgresFlexible, Mysql, Sql, Redis, RedisEnterprise, CosmosCassandra, CosmosGremlin, CosmosMongo, CosmosSql, CosmosTable, StorageBlob, StorageQueue, StorageFile, StorageTable, AppConfig, EventHub, ServiceBus, SignalR, WebPubSub, ConfluentKafka. The integration guide can be found here. + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // The type of the VNet solution. Possible values are serviceEndpoint, privateLink. + VnetSolution *string `json:"vnetSolution,omitempty" tf:"vnet_solution,omitempty"` +} + +type SpringCloudConnectionParameters struct { + + // The authentication info. An authentication block as defined below. + // +kubebuilder:validation:Optional + Authentication *AuthenticationParameters `json:"authentication,omitempty" tf:"authentication,omitempty"` + + // The application client type. Possible values are none, dotnet, java, python, go, php, ruby, django, nodejs and springBoot. Defaults to none. + // +kubebuilder:validation:Optional + ClientType *string `json:"clientType,omitempty" tf:"client_type,omitempty"` + + // The name of the service connection. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An option to store secret value in secure place. An secret_store block as defined below. + // +kubebuilder:validation:Optional + SecretStore *SecretStoreParameters `json:"secretStore,omitempty" tf:"secret_store,omitempty"` + + // The ID of the data source spring cloud. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudJavaDeployment + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SpringCloudID *string `json:"springCloudId,omitempty" tf:"spring_cloud_id,omitempty"` + + // Reference to a SpringCloudJavaDeployment in appplatform to populate springCloudId. + // +kubebuilder:validation:Optional + SpringCloudIDRef *v1.Reference `json:"springCloudIdRef,omitempty" tf:"-"` + + // Selector for a SpringCloudJavaDeployment in appplatform to populate springCloudId. + // +kubebuilder:validation:Optional + SpringCloudIDSelector *v1.Selector `json:"springCloudIdSelector,omitempty" tf:"-"` + + // The ID of the target resource. Changing this forces a new resource to be created. Possible target resources are Postgres, PostgresFlexible, Mysql, Sql, Redis, RedisEnterprise, CosmosCassandra, CosmosGremlin, CosmosMongo, CosmosSql, CosmosTable, StorageBlob, StorageQueue, StorageFile, StorageTable, AppConfig, EventHub, ServiceBus, SignalR, WebPubSub, ConfluentKafka. The integration guide can be found here. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/cosmosdb/v1beta2.SQLDatabase + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` + + // Reference to a SQLDatabase in cosmosdb to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDRef *v1.Reference `json:"targetResourceIdRef,omitempty" tf:"-"` + + // Selector for a SQLDatabase in cosmosdb to populate targetResourceId. + // +kubebuilder:validation:Optional + TargetResourceIDSelector *v1.Selector `json:"targetResourceIdSelector,omitempty" tf:"-"` + + // The type of the VNet solution. Possible values are serviceEndpoint, privateLink. + // +kubebuilder:validation:Optional + VnetSolution *string `json:"vnetSolution,omitempty" tf:"vnet_solution,omitempty"` +} + +// SpringCloudConnectionSpec defines the desired state of SpringCloudConnection +type SpringCloudConnectionSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SpringCloudConnectionParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SpringCloudConnectionInitParameters `json:"initProvider,omitempty"` +} + +// SpringCloudConnectionStatus defines the observed state of SpringCloudConnection. +type SpringCloudConnectionStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SpringCloudConnectionObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SpringCloudConnection is the Schema for the SpringCloudConnections API. Manages a service connector for spring cloud app. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SpringCloudConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.authentication) || (has(self.initProvider) && has(self.initProvider.authentication))",message="spec.forProvider.authentication is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec SpringCloudConnectionSpec `json:"spec"` + Status SpringCloudConnectionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SpringCloudConnectionList contains a list of SpringCloudConnections +type SpringCloudConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SpringCloudConnection `json:"items"` +} + +// Repository type metadata. +var ( + SpringCloudConnection_Kind = "SpringCloudConnection" + SpringCloudConnection_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SpringCloudConnection_Kind}.String() + SpringCloudConnection_KindAPIVersion = SpringCloudConnection_Kind + "." + CRDGroupVersion.String() + SpringCloudConnection_GroupVersionKind = CRDGroupVersion.WithKind(SpringCloudConnection_Kind) +) + +func init() { + SchemeBuilder.Register(&SpringCloudConnection{}, &SpringCloudConnectionList{}) +} diff --git a/apis/signalrservice/v1beta1/zz_generated.conversion_hubs.go b/apis/signalrservice/v1beta1/zz_generated.conversion_hubs.go index 2025952c6..0f0e8927e 100755 --- a/apis/signalrservice/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/signalrservice/v1beta1/zz_generated.conversion_hubs.go @@ -6,20 +6,5 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Service) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *NetworkACL) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SignalrSharedPrivateLinkResource) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WebPubsub) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WebPubsubHub) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WebPubsubNetworkACL) Hub() {} diff --git a/apis/signalrservice/v1beta1/zz_generated.conversion_spokes.go b/apis/signalrservice/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..3ab68bb88 --- /dev/null +++ b/apis/signalrservice/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this NetworkACL to the hub type. +func (tr *NetworkACL) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the NetworkACL type. +func (tr *NetworkACL) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Service to the hub type. +func (tr *Service) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Service type. +func (tr *Service) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WebPubsub to the hub type. +func (tr *WebPubsub) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WebPubsub type. +func (tr *WebPubsub) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WebPubsubHub to the hub type. +func (tr *WebPubsubHub) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WebPubsubHub type. +func (tr *WebPubsubHub) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WebPubsubNetworkACL to the hub type. +func (tr *WebPubsubNetworkACL) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WebPubsubNetworkACL type. +func (tr *WebPubsubNetworkACL) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/signalrservice/v1beta1/zz_generated.resolvers.go b/apis/signalrservice/v1beta1/zz_generated.resolvers.go index 3886c8215..439010996 100644 --- a/apis/signalrservice/v1beta1/zz_generated.resolvers.go +++ b/apis/signalrservice/v1beta1/zz_generated.resolvers.go @@ -149,7 +149,7 @@ func (mg *SignalrSharedPrivateLinkResource) ResolveReferences(ctx context.Contex var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta1", "Service", "ServiceList") + m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta2", "Service", "ServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -168,7 +168,7 @@ func (mg *SignalrSharedPrivateLinkResource) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.SignalrServiceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SignalrServiceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -187,7 +187,7 @@ func (mg *SignalrSharedPrivateLinkResource) ResolveReferences(ctx context.Contex mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta1", "Service", "ServiceList") + m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta2", "Service", "ServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -206,7 +206,7 @@ func (mg *SignalrSharedPrivateLinkResource) ResolveReferences(ctx context.Contex mg.Spec.InitProvider.SignalrServiceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.SignalrServiceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Vault", "VaultList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Vault", "VaultList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/signalrservice/v1beta1/zz_signalrsharedprivatelinkresource_types.go b/apis/signalrservice/v1beta1/zz_signalrsharedprivatelinkresource_types.go index 2e5aee8cb..6253df854 100755 --- a/apis/signalrservice/v1beta1/zz_signalrsharedprivatelinkresource_types.go +++ b/apis/signalrservice/v1beta1/zz_signalrsharedprivatelinkresource_types.go @@ -22,7 +22,7 @@ type SignalrSharedPrivateLinkResourceInitParameters struct { RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` // The id of the Signalr Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta1.Service + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta2.Service // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SignalrServiceID *string `json:"signalrServiceId,omitempty" tf:"signalr_service_id,omitempty"` @@ -38,7 +38,7 @@ type SignalrSharedPrivateLinkResourceInitParameters struct { SubResourceName *string `json:"subResourceName,omitempty" tf:"sub_resource_name,omitempty"` // The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` @@ -86,7 +86,7 @@ type SignalrSharedPrivateLinkResourceParameters struct { RequestMessage *string `json:"requestMessage,omitempty" tf:"request_message,omitempty"` // The id of the Signalr Service. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta1.Service + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta2.Service // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SignalrServiceID *string `json:"signalrServiceId,omitempty" tf:"signalr_service_id,omitempty"` @@ -104,7 +104,7 @@ type SignalrSharedPrivateLinkResourceParameters struct { SubResourceName *string `json:"subResourceName,omitempty" tf:"sub_resource_name,omitempty"` // The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Vault + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Vault // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` diff --git a/apis/signalrservice/v1beta2/zz_generated.conversion_hubs.go b/apis/signalrservice/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..8580e7461 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *NetworkACL) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Service) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WebPubsub) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WebPubsubHub) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WebPubsubNetworkACL) Hub() {} diff --git a/apis/signalrservice/v1beta2/zz_generated.deepcopy.go b/apis/signalrservice/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..07ae5bbd8 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3252 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthInitParameters) DeepCopyInto(out *AuthInitParameters) { + *out = *in + if in.ManagedIdentityID != nil { + in, out := &in.ManagedIdentityID, &out.ManagedIdentityID + *out = new(string) + **out = **in + } + if in.ManagedIdentityIDRef != nil { + in, out := &in.ManagedIdentityIDRef, &out.ManagedIdentityIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedIdentityIDSelector != nil { + in, out := &in.ManagedIdentityIDSelector, &out.ManagedIdentityIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInitParameters. +func (in *AuthInitParameters) DeepCopy() *AuthInitParameters { + if in == nil { + return nil + } + out := new(AuthInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthObservation) DeepCopyInto(out *AuthObservation) { + *out = *in + if in.ManagedIdentityID != nil { + in, out := &in.ManagedIdentityID, &out.ManagedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthObservation. +func (in *AuthObservation) DeepCopy() *AuthObservation { + if in == nil { + return nil + } + out := new(AuthObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthParameters) DeepCopyInto(out *AuthParameters) { + *out = *in + if in.ManagedIdentityID != nil { + in, out := &in.ManagedIdentityID, &out.ManagedIdentityID + *out = new(string) + **out = **in + } + if in.ManagedIdentityIDRef != nil { + in, out := &in.ManagedIdentityIDRef, &out.ManagedIdentityIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedIdentityIDSelector != nil { + in, out := &in.ManagedIdentityIDSelector, &out.ManagedIdentityIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthParameters. +func (in *AuthParameters) DeepCopy() *AuthParameters { + if in == nil { + return nil + } + out := new(AuthParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsInitParameters) DeepCopyInto(out *CorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsInitParameters. +func (in *CorsInitParameters) DeepCopy() *CorsInitParameters { + if in == nil { + return nil + } + out := new(CorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsObservation) DeepCopyInto(out *CorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsObservation. +func (in *CorsObservation) DeepCopy() *CorsObservation { + if in == nil { + return nil + } + out := new(CorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsParameters) DeepCopyInto(out *CorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsParameters. +func (in *CorsParameters) DeepCopy() *CorsParameters { + if in == nil { + return nil + } + out := new(CorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHandlerInitParameters) DeepCopyInto(out *EventHandlerInitParameters) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(AuthInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SystemEvents != nil { + in, out := &in.SystemEvents, &out.SystemEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } + if in.UserEventPattern != nil { + in, out := &in.UserEventPattern, &out.UserEventPattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHandlerInitParameters. +func (in *EventHandlerInitParameters) DeepCopy() *EventHandlerInitParameters { + if in == nil { + return nil + } + out := new(EventHandlerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHandlerObservation) DeepCopyInto(out *EventHandlerObservation) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(AuthObservation) + (*in).DeepCopyInto(*out) + } + if in.SystemEvents != nil { + in, out := &in.SystemEvents, &out.SystemEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } + if in.UserEventPattern != nil { + in, out := &in.UserEventPattern, &out.UserEventPattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHandlerObservation. +func (in *EventHandlerObservation) DeepCopy() *EventHandlerObservation { + if in == nil { + return nil + } + out := new(EventHandlerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventHandlerParameters) DeepCopyInto(out *EventHandlerParameters) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(AuthParameters) + (*in).DeepCopyInto(*out) + } + if in.SystemEvents != nil { + in, out := &in.SystemEvents, &out.SystemEvents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } + if in.UserEventPattern != nil { + in, out := &in.UserEventPattern, &out.UserEventPattern + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventHandlerParameters. +func (in *EventHandlerParameters) DeepCopy() *EventHandlerParameters { + if in == nil { + return nil + } + out := new(EventHandlerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventListenerInitParameters) DeepCopyInto(out *EventListenerInitParameters) { + *out = *in + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNameRef != nil { + in, out := &in.EventHubNameRef, &out.EventHubNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNameSelector != nil { + in, out := &in.EventHubNameSelector, &out.EventHubNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventHubNamespaceName != nil { + in, out := &in.EventHubNamespaceName, &out.EventHubNamespaceName + *out = new(string) + **out = **in + } + if in.EventHubNamespaceNameRef != nil { + in, out := &in.EventHubNamespaceNameRef, &out.EventHubNamespaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNamespaceNameSelector != nil { + in, out := &in.EventHubNamespaceNameSelector, &out.EventHubNamespaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemEventNameFilter != nil { + in, out := &in.SystemEventNameFilter, &out.SystemEventNameFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserEventNameFilter != nil { + in, out := &in.UserEventNameFilter, &out.UserEventNameFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventListenerInitParameters. +func (in *EventListenerInitParameters) DeepCopy() *EventListenerInitParameters { + if in == nil { + return nil + } + out := new(EventListenerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventListenerObservation) DeepCopyInto(out *EventListenerObservation) { + *out = *in + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNamespaceName != nil { + in, out := &in.EventHubNamespaceName, &out.EventHubNamespaceName + *out = new(string) + **out = **in + } + if in.SystemEventNameFilter != nil { + in, out := &in.SystemEventNameFilter, &out.SystemEventNameFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserEventNameFilter != nil { + in, out := &in.UserEventNameFilter, &out.UserEventNameFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventListenerObservation. +func (in *EventListenerObservation) DeepCopy() *EventListenerObservation { + if in == nil { + return nil + } + out := new(EventListenerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventListenerParameters) DeepCopyInto(out *EventListenerParameters) { + *out = *in + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNameRef != nil { + in, out := &in.EventHubNameRef, &out.EventHubNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNameSelector != nil { + in, out := &in.EventHubNameSelector, &out.EventHubNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventHubNamespaceName != nil { + in, out := &in.EventHubNamespaceName, &out.EventHubNamespaceName + *out = new(string) + **out = **in + } + if in.EventHubNamespaceNameRef != nil { + in, out := &in.EventHubNamespaceNameRef, &out.EventHubNamespaceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNamespaceNameSelector != nil { + in, out := &in.EventHubNamespaceNameSelector, &out.EventHubNamespaceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemEventNameFilter != nil { + in, out := &in.SystemEventNameFilter, &out.SystemEventNameFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserEventNameFilter != nil { + in, out := &in.UserEventNameFilter, &out.UserEventNameFilter + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventListenerParameters. +func (in *EventListenerParameters) DeepCopy() *EventListenerParameters { + if in == nil { + return nil + } + out := new(EventListenerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveTraceInitParameters) DeepCopyInto(out *LiveTraceInitParameters) { + *out = *in + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveTraceInitParameters. +func (in *LiveTraceInitParameters) DeepCopy() *LiveTraceInitParameters { + if in == nil { + return nil + } + out := new(LiveTraceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveTraceObservation) DeepCopyInto(out *LiveTraceObservation) { + *out = *in + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveTraceObservation. +func (in *LiveTraceObservation) DeepCopy() *LiveTraceObservation { + if in == nil { + return nil + } + out := new(LiveTraceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiveTraceParameters) DeepCopyInto(out *LiveTraceParameters) { + *out = *in + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiveTraceParameters. +func (in *LiveTraceParameters) DeepCopy() *LiveTraceParameters { + if in == nil { + return nil + } + out := new(LiveTraceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkACL) DeepCopyInto(out *NetworkACL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkACL. +func (in *NetworkACL) DeepCopy() *NetworkACL { + if in == nil { + return nil + } + out := new(NetworkACL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkACL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkACLInitParameters) DeepCopyInto(out *NetworkACLInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.PrivateEndpoint != nil { + in, out := &in.PrivateEndpoint, &out.PrivateEndpoint + *out = make([]PrivateEndpointInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetwork != nil { + in, out := &in.PublicNetwork, &out.PublicNetwork + *out = new(PublicNetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SignalrServiceID != nil { + in, out := &in.SignalrServiceID, &out.SignalrServiceID + *out = new(string) + **out = **in + } + if in.SignalrServiceIDRef != nil { + in, out := &in.SignalrServiceIDRef, &out.SignalrServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SignalrServiceIDSelector != nil { + in, out := &in.SignalrServiceIDSelector, &out.SignalrServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkACLInitParameters. +func (in *NetworkACLInitParameters) DeepCopy() *NetworkACLInitParameters { + if in == nil { + return nil + } + out := new(NetworkACLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkACLList) DeepCopyInto(out *NetworkACLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkACL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkACLList. +func (in *NetworkACLList) DeepCopy() *NetworkACLList { + if in == nil { + return nil + } + out := new(NetworkACLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkACLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkACLObservation) DeepCopyInto(out *NetworkACLObservation) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PrivateEndpoint != nil { + in, out := &in.PrivateEndpoint, &out.PrivateEndpoint + *out = make([]PrivateEndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetwork != nil { + in, out := &in.PublicNetwork, &out.PublicNetwork + *out = new(PublicNetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.SignalrServiceID != nil { + in, out := &in.SignalrServiceID, &out.SignalrServiceID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkACLObservation. +func (in *NetworkACLObservation) DeepCopy() *NetworkACLObservation { + if in == nil { + return nil + } + out := new(NetworkACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkACLParameters) DeepCopyInto(out *NetworkACLParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.PrivateEndpoint != nil { + in, out := &in.PrivateEndpoint, &out.PrivateEndpoint + *out = make([]PrivateEndpointParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetwork != nil { + in, out := &in.PublicNetwork, &out.PublicNetwork + *out = new(PublicNetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.SignalrServiceID != nil { + in, out := &in.SignalrServiceID, &out.SignalrServiceID + *out = new(string) + **out = **in + } + if in.SignalrServiceIDRef != nil { + in, out := &in.SignalrServiceIDRef, &out.SignalrServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SignalrServiceIDSelector != nil { + in, out := &in.SignalrServiceIDSelector, &out.SignalrServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkACLParameters. +func (in *NetworkACLParameters) DeepCopy() *NetworkACLParameters { + if in == nil { + return nil + } + out := new(NetworkACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkACLSpec) DeepCopyInto(out *NetworkACLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkACLSpec. +func (in *NetworkACLSpec) DeepCopy() *NetworkACLSpec { + if in == nil { + return nil + } + out := new(NetworkACLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkACLStatus) DeepCopyInto(out *NetworkACLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkACLStatus. +func (in *NetworkACLStatus) DeepCopy() *NetworkACLStatus { + if in == nil { + return nil + } + out := new(NetworkACLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointInitParameters) DeepCopyInto(out *PrivateEndpointInitParameters) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointInitParameters. +func (in *PrivateEndpointInitParameters) DeepCopy() *PrivateEndpointInitParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointObservation) DeepCopyInto(out *PrivateEndpointObservation) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointObservation. +func (in *PrivateEndpointObservation) DeepCopy() *PrivateEndpointObservation { + if in == nil { + return nil + } + out := new(PrivateEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateEndpointParameters) DeepCopyInto(out *PrivateEndpointParameters) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointParameters. +func (in *PrivateEndpointParameters) DeepCopy() *PrivateEndpointParameters { + if in == nil { + return nil + } + out := new(PrivateEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicNetworkInitParameters) DeepCopyInto(out *PublicNetworkInitParameters) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicNetworkInitParameters. +func (in *PublicNetworkInitParameters) DeepCopy() *PublicNetworkInitParameters { + if in == nil { + return nil + } + out := new(PublicNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicNetworkObservation) DeepCopyInto(out *PublicNetworkObservation) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicNetworkObservation. +func (in *PublicNetworkObservation) DeepCopy() *PublicNetworkObservation { + if in == nil { + return nil + } + out := new(PublicNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicNetworkParameters) DeepCopyInto(out *PublicNetworkParameters) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicNetworkParameters. +func (in *PublicNetworkParameters) DeepCopy() *PublicNetworkParameters { + if in == nil { + return nil + } + out := new(PublicNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceInitParameters) DeepCopyInto(out *ServiceInitParameters) { + *out = *in + if in.AADAuthEnabled != nil { + in, out := &in.AADAuthEnabled, &out.AADAuthEnabled + *out = new(bool) + **out = **in + } + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]CorsInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LiveTrace != nil { + in, out := &in.LiveTrace, &out.LiveTrace + *out = new(LiveTraceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LiveTraceEnabled != nil { + in, out := &in.LiveTraceEnabled, &out.LiveTraceEnabled + *out = new(bool) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServerlessConnectionTimeoutInSeconds != nil { + in, out := &in.ServerlessConnectionTimeoutInSeconds, &out.ServerlessConnectionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ServiceMode != nil { + in, out := &in.ServiceMode, &out.ServiceMode + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TLSClientCertEnabled != nil { + in, out := &in.TLSClientCertEnabled, &out.TLSClientCertEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpstreamEndpoint != nil { + in, out := &in.UpstreamEndpoint, &out.UpstreamEndpoint + *out = make([]UpstreamEndpointInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInitParameters. +func (in *ServiceInitParameters) DeepCopy() *ServiceInitParameters { + if in == nil { + return nil + } + out := new(ServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceObservation) DeepCopyInto(out *ServiceObservation) { + *out = *in + if in.AADAuthEnabled != nil { + in, out := &in.AADAuthEnabled, &out.AADAuthEnabled + *out = new(bool) + **out = **in + } + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]CorsObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LiveTrace != nil { + in, out := &in.LiveTrace, &out.LiveTrace + *out = new(LiveTraceObservation) + (*in).DeepCopyInto(*out) + } + if in.LiveTraceEnabled != nil { + in, out := &in.LiveTraceEnabled, &out.LiveTraceEnabled + *out = new(bool) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PublicPort != nil { + in, out := &in.PublicPort, &out.PublicPort + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServerPort != nil { + in, out := &in.ServerPort, &out.ServerPort + *out = new(float64) + **out = **in + } + if in.ServerlessConnectionTimeoutInSeconds != nil { + in, out := &in.ServerlessConnectionTimeoutInSeconds, &out.ServerlessConnectionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ServiceMode != nil { + in, out := &in.ServiceMode, &out.ServiceMode + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuObservation) + (*in).DeepCopyInto(*out) + } + if in.TLSClientCertEnabled != nil { + in, out := &in.TLSClientCertEnabled, &out.TLSClientCertEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpstreamEndpoint != nil { + in, out := &in.UpstreamEndpoint, &out.UpstreamEndpoint + *out = make([]UpstreamEndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceObservation. +func (in *ServiceObservation) DeepCopy() *ServiceObservation { + if in == nil { + return nil + } + out := new(ServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceParameters) DeepCopyInto(out *ServiceParameters) { + *out = *in + if in.AADAuthEnabled != nil { + in, out := &in.AADAuthEnabled, &out.AADAuthEnabled + *out = new(bool) + **out = **in + } + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = make([]CorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LiveTrace != nil { + in, out := &in.LiveTrace, &out.LiveTrace + *out = new(LiveTraceParameters) + (*in).DeepCopyInto(*out) + } + if in.LiveTraceEnabled != nil { + in, out := &in.LiveTraceEnabled, &out.LiveTraceEnabled + *out = new(bool) + **out = **in + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServerlessConnectionTimeoutInSeconds != nil { + in, out := &in.ServerlessConnectionTimeoutInSeconds, &out.ServerlessConnectionTimeoutInSeconds + *out = new(float64) + **out = **in + } + if in.ServiceMode != nil { + in, out := &in.ServiceMode, &out.ServiceMode + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuParameters) + (*in).DeepCopyInto(*out) + } + if in.TLSClientCertEnabled != nil { + in, out := &in.TLSClientCertEnabled, &out.TLSClientCertEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.UpstreamEndpoint != nil { + in, out := &in.UpstreamEndpoint, &out.UpstreamEndpoint + *out = make([]UpstreamEndpointParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceParameters. +func (in *ServiceParameters) DeepCopy() *ServiceParameters { + if in == nil { + return nil + } + out := new(ServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpstreamEndpointInitParameters) DeepCopyInto(out *UpstreamEndpointInitParameters) { + *out = *in + if in.CategoryPattern != nil { + in, out := &in.CategoryPattern, &out.CategoryPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EventPattern != nil { + in, out := &in.EventPattern, &out.EventPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HubPattern != nil { + in, out := &in.HubPattern, &out.HubPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamEndpointInitParameters. +func (in *UpstreamEndpointInitParameters) DeepCopy() *UpstreamEndpointInitParameters { + if in == nil { + return nil + } + out := new(UpstreamEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpstreamEndpointObservation) DeepCopyInto(out *UpstreamEndpointObservation) { + *out = *in + if in.CategoryPattern != nil { + in, out := &in.CategoryPattern, &out.CategoryPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EventPattern != nil { + in, out := &in.EventPattern, &out.EventPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HubPattern != nil { + in, out := &in.HubPattern, &out.HubPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamEndpointObservation. +func (in *UpstreamEndpointObservation) DeepCopy() *UpstreamEndpointObservation { + if in == nil { + return nil + } + out := new(UpstreamEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpstreamEndpointParameters) DeepCopyInto(out *UpstreamEndpointParameters) { + *out = *in + if in.CategoryPattern != nil { + in, out := &in.CategoryPattern, &out.CategoryPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EventPattern != nil { + in, out := &in.EventPattern, &out.EventPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.HubPattern != nil { + in, out := &in.HubPattern, &out.HubPattern + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.URLTemplate != nil { + in, out := &in.URLTemplate, &out.URLTemplate + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamEndpointParameters. +func (in *UpstreamEndpointParameters) DeepCopy() *UpstreamEndpointParameters { + if in == nil { + return nil + } + out := new(UpstreamEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsub) DeepCopyInto(out *WebPubsub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsub. +func (in *WebPubsub) DeepCopy() *WebPubsub { + if in == nil { + return nil + } + out := new(WebPubsub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebPubsub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubHub) DeepCopyInto(out *WebPubsubHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubHub. +func (in *WebPubsubHub) DeepCopy() *WebPubsubHub { + if in == nil { + return nil + } + out := new(WebPubsubHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebPubsubHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubHubInitParameters) DeepCopyInto(out *WebPubsubHubInitParameters) { + *out = *in + if in.AnonymousConnectionsEnabled != nil { + in, out := &in.AnonymousConnectionsEnabled, &out.AnonymousConnectionsEnabled + *out = new(bool) + **out = **in + } + if in.EventHandler != nil { + in, out := &in.EventHandler, &out.EventHandler + *out = make([]EventHandlerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventListener != nil { + in, out := &in.EventListener, &out.EventListener + *out = make([]EventListenerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.WebPubsubID != nil { + in, out := &in.WebPubsubID, &out.WebPubsubID + *out = new(string) + **out = **in + } + if in.WebPubsubIDRef != nil { + in, out := &in.WebPubsubIDRef, &out.WebPubsubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WebPubsubIDSelector != nil { + in, out := &in.WebPubsubIDSelector, &out.WebPubsubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubHubInitParameters. +func (in *WebPubsubHubInitParameters) DeepCopy() *WebPubsubHubInitParameters { + if in == nil { + return nil + } + out := new(WebPubsubHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubHubList) DeepCopyInto(out *WebPubsubHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WebPubsubHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubHubList. +func (in *WebPubsubHubList) DeepCopy() *WebPubsubHubList { + if in == nil { + return nil + } + out := new(WebPubsubHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebPubsubHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubHubObservation) DeepCopyInto(out *WebPubsubHubObservation) { + *out = *in + if in.AnonymousConnectionsEnabled != nil { + in, out := &in.AnonymousConnectionsEnabled, &out.AnonymousConnectionsEnabled + *out = new(bool) + **out = **in + } + if in.EventHandler != nil { + in, out := &in.EventHandler, &out.EventHandler + *out = make([]EventHandlerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventListener != nil { + in, out := &in.EventListener, &out.EventListener + *out = make([]EventListenerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.WebPubsubID != nil { + in, out := &in.WebPubsubID, &out.WebPubsubID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubHubObservation. +func (in *WebPubsubHubObservation) DeepCopy() *WebPubsubHubObservation { + if in == nil { + return nil + } + out := new(WebPubsubHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubHubParameters) DeepCopyInto(out *WebPubsubHubParameters) { + *out = *in + if in.AnonymousConnectionsEnabled != nil { + in, out := &in.AnonymousConnectionsEnabled, &out.AnonymousConnectionsEnabled + *out = new(bool) + **out = **in + } + if in.EventHandler != nil { + in, out := &in.EventHandler, &out.EventHandler + *out = make([]EventHandlerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EventListener != nil { + in, out := &in.EventListener, &out.EventListener + *out = make([]EventListenerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.WebPubsubID != nil { + in, out := &in.WebPubsubID, &out.WebPubsubID + *out = new(string) + **out = **in + } + if in.WebPubsubIDRef != nil { + in, out := &in.WebPubsubIDRef, &out.WebPubsubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WebPubsubIDSelector != nil { + in, out := &in.WebPubsubIDSelector, &out.WebPubsubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubHubParameters. +func (in *WebPubsubHubParameters) DeepCopy() *WebPubsubHubParameters { + if in == nil { + return nil + } + out := new(WebPubsubHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubHubSpec) DeepCopyInto(out *WebPubsubHubSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubHubSpec. +func (in *WebPubsubHubSpec) DeepCopy() *WebPubsubHubSpec { + if in == nil { + return nil + } + out := new(WebPubsubHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubHubStatus) DeepCopyInto(out *WebPubsubHubStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubHubStatus. +func (in *WebPubsubHubStatus) DeepCopy() *WebPubsubHubStatus { + if in == nil { + return nil + } + out := new(WebPubsubHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubIdentityInitParameters) DeepCopyInto(out *WebPubsubIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubIdentityInitParameters. +func (in *WebPubsubIdentityInitParameters) DeepCopy() *WebPubsubIdentityInitParameters { + if in == nil { + return nil + } + out := new(WebPubsubIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubIdentityObservation) DeepCopyInto(out *WebPubsubIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubIdentityObservation. +func (in *WebPubsubIdentityObservation) DeepCopy() *WebPubsubIdentityObservation { + if in == nil { + return nil + } + out := new(WebPubsubIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubIdentityParameters) DeepCopyInto(out *WebPubsubIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubIdentityParameters. +func (in *WebPubsubIdentityParameters) DeepCopy() *WebPubsubIdentityParameters { + if in == nil { + return nil + } + out := new(WebPubsubIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubInitParameters) DeepCopyInto(out *WebPubsubInitParameters) { + *out = *in + if in.AADAuthEnabled != nil { + in, out := &in.AADAuthEnabled, &out.AADAuthEnabled + *out = new(bool) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WebPubsubIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LiveTrace != nil { + in, out := &in.LiveTrace, &out.LiveTrace + *out = new(WebPubsubLiveTraceInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.TLSClientCertEnabled != nil { + in, out := &in.TLSClientCertEnabled, &out.TLSClientCertEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubInitParameters. +func (in *WebPubsubInitParameters) DeepCopy() *WebPubsubInitParameters { + if in == nil { + return nil + } + out := new(WebPubsubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubList) DeepCopyInto(out *WebPubsubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WebPubsub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubList. +func (in *WebPubsubList) DeepCopy() *WebPubsubList { + if in == nil { + return nil + } + out := new(WebPubsubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebPubsubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubLiveTraceInitParameters) DeepCopyInto(out *WebPubsubLiveTraceInitParameters) { + *out = *in + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubLiveTraceInitParameters. +func (in *WebPubsubLiveTraceInitParameters) DeepCopy() *WebPubsubLiveTraceInitParameters { + if in == nil { + return nil + } + out := new(WebPubsubLiveTraceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubLiveTraceObservation) DeepCopyInto(out *WebPubsubLiveTraceObservation) { + *out = *in + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubLiveTraceObservation. +func (in *WebPubsubLiveTraceObservation) DeepCopy() *WebPubsubLiveTraceObservation { + if in == nil { + return nil + } + out := new(WebPubsubLiveTraceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubLiveTraceParameters) DeepCopyInto(out *WebPubsubLiveTraceParameters) { + *out = *in + if in.ConnectivityLogsEnabled != nil { + in, out := &in.ConnectivityLogsEnabled, &out.ConnectivityLogsEnabled + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPRequestLogsEnabled != nil { + in, out := &in.HTTPRequestLogsEnabled, &out.HTTPRequestLogsEnabled + *out = new(bool) + **out = **in + } + if in.MessagingLogsEnabled != nil { + in, out := &in.MessagingLogsEnabled, &out.MessagingLogsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubLiveTraceParameters. +func (in *WebPubsubLiveTraceParameters) DeepCopy() *WebPubsubLiveTraceParameters { + if in == nil { + return nil + } + out := new(WebPubsubLiveTraceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACL) DeepCopyInto(out *WebPubsubNetworkACL) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACL. +func (in *WebPubsubNetworkACL) DeepCopy() *WebPubsubNetworkACL { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACL) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebPubsubNetworkACL) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLInitParameters) DeepCopyInto(out *WebPubsubNetworkACLInitParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.PrivateEndpoint != nil { + in, out := &in.PrivateEndpoint, &out.PrivateEndpoint + *out = make([]WebPubsubNetworkACLPrivateEndpointInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetwork != nil { + in, out := &in.PublicNetwork, &out.PublicNetwork + *out = new(WebPubsubNetworkACLPublicNetworkInitParameters) + (*in).DeepCopyInto(*out) + } + if in.WebPubsubID != nil { + in, out := &in.WebPubsubID, &out.WebPubsubID + *out = new(string) + **out = **in + } + if in.WebPubsubIDRef != nil { + in, out := &in.WebPubsubIDRef, &out.WebPubsubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WebPubsubIDSelector != nil { + in, out := &in.WebPubsubIDSelector, &out.WebPubsubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLInitParameters. +func (in *WebPubsubNetworkACLInitParameters) DeepCopy() *WebPubsubNetworkACLInitParameters { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLList) DeepCopyInto(out *WebPubsubNetworkACLList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WebPubsubNetworkACL, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLList. +func (in *WebPubsubNetworkACLList) DeepCopy() *WebPubsubNetworkACLList { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebPubsubNetworkACLList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLObservation) DeepCopyInto(out *WebPubsubNetworkACLObservation) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PrivateEndpoint != nil { + in, out := &in.PrivateEndpoint, &out.PrivateEndpoint + *out = make([]WebPubsubNetworkACLPrivateEndpointObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetwork != nil { + in, out := &in.PublicNetwork, &out.PublicNetwork + *out = new(WebPubsubNetworkACLPublicNetworkObservation) + (*in).DeepCopyInto(*out) + } + if in.WebPubsubID != nil { + in, out := &in.WebPubsubID, &out.WebPubsubID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLObservation. +func (in *WebPubsubNetworkACLObservation) DeepCopy() *WebPubsubNetworkACLObservation { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLParameters) DeepCopyInto(out *WebPubsubNetworkACLParameters) { + *out = *in + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.PrivateEndpoint != nil { + in, out := &in.PrivateEndpoint, &out.PrivateEndpoint + *out = make([]WebPubsubNetworkACLPrivateEndpointParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PublicNetwork != nil { + in, out := &in.PublicNetwork, &out.PublicNetwork + *out = new(WebPubsubNetworkACLPublicNetworkParameters) + (*in).DeepCopyInto(*out) + } + if in.WebPubsubID != nil { + in, out := &in.WebPubsubID, &out.WebPubsubID + *out = new(string) + **out = **in + } + if in.WebPubsubIDRef != nil { + in, out := &in.WebPubsubIDRef, &out.WebPubsubIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WebPubsubIDSelector != nil { + in, out := &in.WebPubsubIDSelector, &out.WebPubsubIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLParameters. +func (in *WebPubsubNetworkACLParameters) DeepCopy() *WebPubsubNetworkACLParameters { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLPrivateEndpointInitParameters) DeepCopyInto(out *WebPubsubNetworkACLPrivateEndpointInitParameters) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLPrivateEndpointInitParameters. +func (in *WebPubsubNetworkACLPrivateEndpointInitParameters) DeepCopy() *WebPubsubNetworkACLPrivateEndpointInitParameters { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLPrivateEndpointInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLPrivateEndpointObservation) DeepCopyInto(out *WebPubsubNetworkACLPrivateEndpointObservation) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLPrivateEndpointObservation. +func (in *WebPubsubNetworkACLPrivateEndpointObservation) DeepCopy() *WebPubsubNetworkACLPrivateEndpointObservation { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLPrivateEndpointObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLPrivateEndpointParameters) DeepCopyInto(out *WebPubsubNetworkACLPrivateEndpointParameters) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLPrivateEndpointParameters. +func (in *WebPubsubNetworkACLPrivateEndpointParameters) DeepCopy() *WebPubsubNetworkACLPrivateEndpointParameters { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLPrivateEndpointParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLPublicNetworkInitParameters) DeepCopyInto(out *WebPubsubNetworkACLPublicNetworkInitParameters) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLPublicNetworkInitParameters. +func (in *WebPubsubNetworkACLPublicNetworkInitParameters) DeepCopy() *WebPubsubNetworkACLPublicNetworkInitParameters { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLPublicNetworkInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLPublicNetworkObservation) DeepCopyInto(out *WebPubsubNetworkACLPublicNetworkObservation) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLPublicNetworkObservation. +func (in *WebPubsubNetworkACLPublicNetworkObservation) DeepCopy() *WebPubsubNetworkACLPublicNetworkObservation { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLPublicNetworkObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLPublicNetworkParameters) DeepCopyInto(out *WebPubsubNetworkACLPublicNetworkParameters) { + *out = *in + if in.AllowedRequestTypes != nil { + in, out := &in.AllowedRequestTypes, &out.AllowedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DeniedRequestTypes != nil { + in, out := &in.DeniedRequestTypes, &out.DeniedRequestTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLPublicNetworkParameters. +func (in *WebPubsubNetworkACLPublicNetworkParameters) DeepCopy() *WebPubsubNetworkACLPublicNetworkParameters { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLPublicNetworkParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLSpec) DeepCopyInto(out *WebPubsubNetworkACLSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLSpec. +func (in *WebPubsubNetworkACLSpec) DeepCopy() *WebPubsubNetworkACLSpec { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubNetworkACLStatus) DeepCopyInto(out *WebPubsubNetworkACLStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubNetworkACLStatus. +func (in *WebPubsubNetworkACLStatus) DeepCopy() *WebPubsubNetworkACLStatus { + if in == nil { + return nil + } + out := new(WebPubsubNetworkACLStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubObservation) DeepCopyInto(out *WebPubsubObservation) { + *out = *in + if in.AADAuthEnabled != nil { + in, out := &in.AADAuthEnabled, &out.AADAuthEnabled + *out = new(bool) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.ExternalIP != nil { + in, out := &in.ExternalIP, &out.ExternalIP + *out = new(string) + **out = **in + } + if in.HostName != nil { + in, out := &in.HostName, &out.HostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WebPubsubIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LiveTrace != nil { + in, out := &in.LiveTrace, &out.LiveTrace + *out = new(WebPubsubLiveTraceObservation) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PublicPort != nil { + in, out := &in.PublicPort, &out.PublicPort + *out = new(float64) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServerPort != nil { + in, out := &in.ServerPort, &out.ServerPort + *out = new(float64) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.TLSClientCertEnabled != nil { + in, out := &in.TLSClientCertEnabled, &out.TLSClientCertEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubObservation. +func (in *WebPubsubObservation) DeepCopy() *WebPubsubObservation { + if in == nil { + return nil + } + out := new(WebPubsubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubParameters) DeepCopyInto(out *WebPubsubParameters) { + *out = *in + if in.AADAuthEnabled != nil { + in, out := &in.AADAuthEnabled, &out.AADAuthEnabled + *out = new(bool) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WebPubsubIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LiveTrace != nil { + in, out := &in.LiveTrace, &out.LiveTrace + *out = new(WebPubsubLiveTraceParameters) + (*in).DeepCopyInto(*out) + } + if in.LocalAuthEnabled != nil { + in, out := &in.LocalAuthEnabled, &out.LocalAuthEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(string) + **out = **in + } + if in.TLSClientCertEnabled != nil { + in, out := &in.TLSClientCertEnabled, &out.TLSClientCertEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubParameters. +func (in *WebPubsubParameters) DeepCopy() *WebPubsubParameters { + if in == nil { + return nil + } + out := new(WebPubsubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubSpec) DeepCopyInto(out *WebPubsubSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubSpec. +func (in *WebPubsubSpec) DeepCopy() *WebPubsubSpec { + if in == nil { + return nil + } + out := new(WebPubsubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebPubsubStatus) DeepCopyInto(out *WebPubsubStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebPubsubStatus. +func (in *WebPubsubStatus) DeepCopy() *WebPubsubStatus { + if in == nil { + return nil + } + out := new(WebPubsubStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/signalrservice/v1beta2/zz_generated.managed.go b/apis/signalrservice/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..cca870936 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this NetworkACL. +func (mg *NetworkACL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this NetworkACL. +func (mg *NetworkACL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this NetworkACL. +func (mg *NetworkACL) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this NetworkACL. +func (mg *NetworkACL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this NetworkACL. +func (mg *NetworkACL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this NetworkACL. +func (mg *NetworkACL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this NetworkACL. +func (mg *NetworkACL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this NetworkACL. +func (mg *NetworkACL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this NetworkACL. +func (mg *NetworkACL) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this NetworkACL. +func (mg *NetworkACL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this NetworkACL. +func (mg *NetworkACL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this NetworkACL. +func (mg *NetworkACL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Service. +func (mg *Service) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Service. +func (mg *Service) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Service. +func (mg *Service) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Service. +func (mg *Service) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Service. +func (mg *Service) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Service. +func (mg *Service) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Service. +func (mg *Service) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Service. +func (mg *Service) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Service. +func (mg *Service) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Service. +func (mg *Service) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Service. +func (mg *Service) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Service. +func (mg *Service) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WebPubsub. +func (mg *WebPubsub) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WebPubsub. +func (mg *WebPubsub) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WebPubsub. +func (mg *WebPubsub) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WebPubsub. +func (mg *WebPubsub) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WebPubsub. +func (mg *WebPubsub) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WebPubsub. +func (mg *WebPubsub) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WebPubsub. +func (mg *WebPubsub) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WebPubsub. +func (mg *WebPubsub) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WebPubsub. +func (mg *WebPubsub) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WebPubsub. +func (mg *WebPubsub) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WebPubsub. +func (mg *WebPubsub) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WebPubsub. +func (mg *WebPubsub) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WebPubsubHub. +func (mg *WebPubsubHub) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WebPubsubHub. +func (mg *WebPubsubHub) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WebPubsubHub. +func (mg *WebPubsubHub) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WebPubsubHub. +func (mg *WebPubsubHub) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WebPubsubHub. +func (mg *WebPubsubHub) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WebPubsubHub. +func (mg *WebPubsubHub) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WebPubsubHub. +func (mg *WebPubsubHub) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WebPubsubHub. +func (mg *WebPubsubHub) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WebPubsubHub. +func (mg *WebPubsubHub) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WebPubsubHub. +func (mg *WebPubsubHub) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WebPubsubHub. +func (mg *WebPubsubHub) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WebPubsubHub. +func (mg *WebPubsubHub) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/signalrservice/v1beta2/zz_generated.managedlist.go b/apis/signalrservice/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..c73249b0b --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this NetworkACLList. +func (l *NetworkACLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ServiceList. +func (l *ServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WebPubsubHubList. +func (l *WebPubsubHubList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WebPubsubList. +func (l *WebPubsubList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WebPubsubNetworkACLList. +func (l *WebPubsubNetworkACLList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/signalrservice/v1beta2/zz_generated.resolvers.go b/apis/signalrservice/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..611d8d1dd --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,463 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *NetworkACL) ResolveReferences( // ResolveReferences of this NetworkACL. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.PrivateEndpoint); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateEndpoint", "PrivateEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrivateEndpoint[i3].ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrivateEndpoint[i3].IDRef, + Selector: mg.Spec.ForProvider.PrivateEndpoint[i3].IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateEndpoint[i3].ID") + } + mg.Spec.ForProvider.PrivateEndpoint[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrivateEndpoint[i3].IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta2", "Service", "ServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SignalrServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SignalrServiceIDRef, + Selector: mg.Spec.ForProvider.SignalrServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SignalrServiceID") + } + mg.Spec.ForProvider.SignalrServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SignalrServiceIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.PrivateEndpoint); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateEndpoint", "PrivateEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrivateEndpoint[i3].ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrivateEndpoint[i3].IDRef, + Selector: mg.Spec.InitProvider.PrivateEndpoint[i3].IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateEndpoint[i3].ID") + } + mg.Spec.InitProvider.PrivateEndpoint[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrivateEndpoint[i3].IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta2", "Service", "ServiceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SignalrServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SignalrServiceIDRef, + Selector: mg.Spec.InitProvider.SignalrServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SignalrServiceID") + } + mg.Spec.InitProvider.SignalrServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SignalrServiceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Service. +func (mg *Service) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WebPubsub. +func (mg *WebPubsub) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WebPubsubHub. +func (mg *WebPubsubHub) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.EventHandler); i3++ { + if mg.Spec.ForProvider.EventHandler[i3].Auth != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventHandler[i3].Auth.ManagedIdentityID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.EventHandler[i3].Auth.ManagedIdentityIDRef, + Selector: mg.Spec.ForProvider.EventHandler[i3].Auth.ManagedIdentityIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventHandler[i3].Auth.ManagedIdentityID") + } + mg.Spec.ForProvider.EventHandler[i3].Auth.ManagedIdentityID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventHandler[i3].Auth.ManagedIdentityIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.EventListener); i3++ { + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventListener[i3].EventHubName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventListener[i3].EventHubNameRef, + Selector: mg.Spec.ForProvider.EventListener[i3].EventHubNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventListener[i3].EventHubName") + } + mg.Spec.ForProvider.EventListener[i3].EventHubName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventListener[i3].EventHubNameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.ForProvider.EventListener); i3++ { + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventListener[i3].EventHubNamespaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventListener[i3].EventHubNamespaceNameRef, + Selector: mg.Spec.ForProvider.EventListener[i3].EventHubNamespaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventListener[i3].EventHubNamespaceName") + } + mg.Spec.ForProvider.EventListener[i3].EventHubNamespaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventListener[i3].EventHubNamespaceNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta2", "WebPubsub", "WebPubsubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WebPubsubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WebPubsubIDRef, + Selector: mg.Spec.ForProvider.WebPubsubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WebPubsubID") + } + mg.Spec.ForProvider.WebPubsubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WebPubsubIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.EventHandler); i3++ { + if mg.Spec.InitProvider.EventHandler[i3].Auth != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventHandler[i3].Auth.ManagedIdentityID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.EventHandler[i3].Auth.ManagedIdentityIDRef, + Selector: mg.Spec.InitProvider.EventHandler[i3].Auth.ManagedIdentityIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventHandler[i3].Auth.ManagedIdentityID") + } + mg.Spec.InitProvider.EventHandler[i3].Auth.ManagedIdentityID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventHandler[i3].Auth.ManagedIdentityIDRef = rsp.ResolvedReference + + } + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.EventListener); i3++ { + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventListener[i3].EventHubName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventListener[i3].EventHubNameRef, + Selector: mg.Spec.InitProvider.EventListener[i3].EventHubNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventListener[i3].EventHubName") + } + mg.Spec.InitProvider.EventListener[i3].EventHubName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventListener[i3].EventHubNameRef = rsp.ResolvedReference + + } + for i3 := 0; i3 < len(mg.Spec.InitProvider.EventListener); i3++ { + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventListener[i3].EventHubNamespaceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventListener[i3].EventHubNamespaceNameRef, + Selector: mg.Spec.InitProvider.EventListener[i3].EventHubNamespaceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventListener[i3].EventHubNamespaceName") + } + mg.Spec.InitProvider.EventListener[i3].EventHubNamespaceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventListener[i3].EventHubNamespaceNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta2", "WebPubsub", "WebPubsubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WebPubsubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.WebPubsubIDRef, + Selector: mg.Spec.InitProvider.WebPubsubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WebPubsubID") + } + mg.Spec.InitProvider.WebPubsubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WebPubsubIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WebPubsubNetworkACL. +func (mg *WebPubsubNetworkACL) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.PrivateEndpoint); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateEndpoint", "PrivateEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrivateEndpoint[i3].ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrivateEndpoint[i3].IDRef, + Selector: mg.Spec.ForProvider.PrivateEndpoint[i3].IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrivateEndpoint[i3].ID") + } + mg.Spec.ForProvider.PrivateEndpoint[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrivateEndpoint[i3].IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta2", "WebPubsub", "WebPubsubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WebPubsubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WebPubsubIDRef, + Selector: mg.Spec.ForProvider.WebPubsubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WebPubsubID") + } + mg.Spec.ForProvider.WebPubsubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WebPubsubIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.PrivateEndpoint); i3++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "PrivateEndpoint", "PrivateEndpointList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrivateEndpoint[i3].ID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrivateEndpoint[i3].IDRef, + Selector: mg.Spec.InitProvider.PrivateEndpoint[i3].IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrivateEndpoint[i3].ID") + } + mg.Spec.InitProvider.PrivateEndpoint[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrivateEndpoint[i3].IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("signalrservice.azure.upbound.io", "v1beta2", "WebPubsub", "WebPubsubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WebPubsubID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.WebPubsubIDRef, + Selector: mg.Spec.InitProvider.WebPubsubIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WebPubsubID") + } + mg.Spec.InitProvider.WebPubsubID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WebPubsubIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/signalrservice/v1beta2/zz_groupversion_info.go b/apis/signalrservice/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..241c419d9 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=signalrservice.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "signalrservice.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/signalrservice/v1beta2/zz_networkacl_terraformed.go b/apis/signalrservice/v1beta2/zz_networkacl_terraformed.go new file mode 100755 index 000000000..153277ec1 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_networkacl_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this NetworkACL +func (mg *NetworkACL) GetTerraformResourceType() string { + return "azurerm_signalr_service_network_acl" +} + +// GetConnectionDetailsMapping for this NetworkACL +func (tr *NetworkACL) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this NetworkACL +func (tr *NetworkACL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this NetworkACL +func (tr *NetworkACL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this NetworkACL +func (tr *NetworkACL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this NetworkACL +func (tr *NetworkACL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this NetworkACL +func (tr *NetworkACL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this NetworkACL +func (tr *NetworkACL) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this NetworkACL +func (tr *NetworkACL) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this NetworkACL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *NetworkACL) LateInitialize(attrs []byte) (bool, error) { + params := &NetworkACLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *NetworkACL) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/signalrservice/v1beta2/zz_networkacl_types.go b/apis/signalrservice/v1beta2/zz_networkacl_types.go new file mode 100755 index 000000000..6950a0ce3 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_networkacl_types.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type NetworkACLInitParameters struct { + + // The default action to control the network access when no other rule matches. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // A private_endpoint block as defined below. + PrivateEndpoint []PrivateEndpointInitParameters `json:"privateEndpoint,omitempty" tf:"private_endpoint,omitempty"` + + // A public_network block as defined below. + PublicNetwork *PublicNetworkInitParameters `json:"publicNetwork,omitempty" tf:"public_network,omitempty"` + + // The ID of the SignalR service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta2.Service + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SignalrServiceID *string `json:"signalrServiceId,omitempty" tf:"signalr_service_id,omitempty"` + + // Reference to a Service in signalrservice to populate signalrServiceId. + // +kubebuilder:validation:Optional + SignalrServiceIDRef *v1.Reference `json:"signalrServiceIdRef,omitempty" tf:"-"` + + // Selector for a Service in signalrservice to populate signalrServiceId. + // +kubebuilder:validation:Optional + SignalrServiceIDSelector *v1.Selector `json:"signalrServiceIdSelector,omitempty" tf:"-"` +} + +type NetworkACLObservation struct { + + // The default action to control the network access when no other rule matches. Possible values are Allow and Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // The ID of the SignalR service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A private_endpoint block as defined below. + PrivateEndpoint []PrivateEndpointObservation `json:"privateEndpoint,omitempty" tf:"private_endpoint,omitempty"` + + // A public_network block as defined below. + PublicNetwork *PublicNetworkObservation `json:"publicNetwork,omitempty" tf:"public_network,omitempty"` + + // The ID of the SignalR service. Changing this forces a new resource to be created. + SignalrServiceID *string `json:"signalrServiceId,omitempty" tf:"signalr_service_id,omitempty"` +} + +type NetworkACLParameters struct { + + // The default action to control the network access when no other rule matches. Possible values are Allow and Deny. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // A private_endpoint block as defined below. + // +kubebuilder:validation:Optional + PrivateEndpoint []PrivateEndpointParameters `json:"privateEndpoint,omitempty" tf:"private_endpoint,omitempty"` + + // A public_network block as defined below. + // +kubebuilder:validation:Optional + PublicNetwork *PublicNetworkParameters `json:"publicNetwork,omitempty" tf:"public_network,omitempty"` + + // The ID of the SignalR service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta2.Service + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SignalrServiceID *string `json:"signalrServiceId,omitempty" tf:"signalr_service_id,omitempty"` + + // Reference to a Service in signalrservice to populate signalrServiceId. + // +kubebuilder:validation:Optional + SignalrServiceIDRef *v1.Reference `json:"signalrServiceIdRef,omitempty" tf:"-"` + + // Selector for a Service in signalrservice to populate signalrServiceId. + // +kubebuilder:validation:Optional + SignalrServiceIDSelector *v1.Selector `json:"signalrServiceIdSelector,omitempty" tf:"-"` +} + +type PrivateEndpointInitParameters struct { + + // The allowed request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` + + // The ID of the Private Endpoint which is based on the SignalR service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateEndpoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a PrivateEndpoint in network to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a PrivateEndpoint in network to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type PrivateEndpointObservation struct { + + // The allowed request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` + + // The ID of the Private Endpoint which is based on the SignalR service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type PrivateEndpointParameters struct { + + // The allowed request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +kubebuilder:validation:Optional + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +kubebuilder:validation:Optional + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` + + // The ID of the Private Endpoint which is based on the SignalR service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateEndpoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a PrivateEndpoint in network to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a PrivateEndpoint in network to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type PublicNetworkInitParameters struct { + + // The allowed request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` +} + +type PublicNetworkObservation struct { + + // The allowed request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` +} + +type PublicNetworkParameters struct { + + // The allowed request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +kubebuilder:validation:Optional + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +kubebuilder:validation:Optional + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` +} + +// NetworkACLSpec defines the desired state of NetworkACL +type NetworkACLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider NetworkACLParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider NetworkACLInitParameters `json:"initProvider,omitempty"` +} + +// NetworkACLStatus defines the observed state of NetworkACL. +type NetworkACLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider NetworkACLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// NetworkACL is the Schema for the NetworkACLs API. Manages the Network ACL for a SignalR service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type NetworkACL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.defaultAction) || (has(self.initProvider) && has(self.initProvider.defaultAction))",message="spec.forProvider.defaultAction is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.publicNetwork) || (has(self.initProvider) && has(self.initProvider.publicNetwork))",message="spec.forProvider.publicNetwork is a required parameter" + Spec NetworkACLSpec `json:"spec"` + Status NetworkACLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NetworkACLList contains a list of NetworkACLs +type NetworkACLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NetworkACL `json:"items"` +} + +// Repository type metadata. +var ( + NetworkACL_Kind = "NetworkACL" + NetworkACL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: NetworkACL_Kind}.String() + NetworkACL_KindAPIVersion = NetworkACL_Kind + "." + CRDGroupVersion.String() + NetworkACL_GroupVersionKind = CRDGroupVersion.WithKind(NetworkACL_Kind) +) + +func init() { + SchemeBuilder.Register(&NetworkACL{}, &NetworkACLList{}) +} diff --git a/apis/signalrservice/v1beta2/zz_service_terraformed.go b/apis/signalrservice/v1beta2/zz_service_terraformed.go new file mode 100755 index 000000000..6e01db20a --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_service_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Service +func (mg *Service) GetTerraformResourceType() string { + return "azurerm_signalr_service" +} + +// GetConnectionDetailsMapping for this Service +func (tr *Service) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_access_key": "status.atProvider.primaryAccessKey", "primary_connection_string": "status.atProvider.primaryConnectionString", "secondary_access_key": "status.atProvider.secondaryAccessKey", "secondary_connection_string": "status.atProvider.secondaryConnectionString"} +} + +// GetObservation of this Service +func (tr *Service) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Service +func (tr *Service) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Service +func (tr *Service) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Service +func (tr *Service) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Service +func (tr *Service) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Service +func (tr *Service) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Service +func (tr *Service) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Service using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Service) LateInitialize(attrs []byte) (bool, error) { + params := &ServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Service) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/signalrservice/v1beta2/zz_service_types.go b/apis/signalrservice/v1beta2/zz_service_types.go new file mode 100755 index 000000000..7d23cc7be --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_service_types.go @@ -0,0 +1,486 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type CorsInitParameters struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` +} + +type CorsObservation struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` +} + +type CorsParameters struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this signalR. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this signalR. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this signalR. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The ID of the SignalR service. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the SignalR service. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this signalR. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this signalR. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this signalR. Possible values are SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LiveTraceInitParameters struct { + + // Whether the log category ConnectivityLogs is enabled? Defaults to true + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // Whether the live trace is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the log category HttpRequestLogs is enabled? Defaults to true + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // Whether the log category MessagingLogs is enabled? Defaults to true + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` +} + +type LiveTraceObservation struct { + + // Whether the log category ConnectivityLogs is enabled? Defaults to true + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // Whether the live trace is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the log category HttpRequestLogs is enabled? Defaults to true + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // Whether the log category MessagingLogs is enabled? Defaults to true + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` +} + +type LiveTraceParameters struct { + + // Whether the log category ConnectivityLogs is enabled? Defaults to true + // +kubebuilder:validation:Optional + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // Whether the live trace is enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the log category HttpRequestLogs is enabled? Defaults to true + // +kubebuilder:validation:Optional + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // Whether the log category MessagingLogs is enabled? Defaults to true + // +kubebuilder:validation:Optional + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` +} + +type ServiceInitParameters struct { + + // Whether to enable AAD auth? Defaults to true. + AADAuthEnabled *bool `json:"aadAuthEnabled,omitempty" tf:"aad_auth_enabled,omitempty"` + + // Specifies if Connectivity Logs are enabled or not. Defaults to false. + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // A cors block as documented below. + Cors []CorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies if Http Request Logs are enabled or not. Defaults to false. + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A live_trace block as defined below. + LiveTrace *LiveTraceInitParameters `json:"liveTrace,omitempty" tf:"live_trace,omitempty"` + + // Specifies if Live Trace is enabled or not. Defaults to false. + LiveTraceEnabled *bool `json:"liveTraceEnabled,omitempty" tf:"live_trace_enabled,omitempty"` + + // Whether to enable local auth? Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the SignalR service exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies if Messaging Logs are enabled or not. Defaults to false. + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` + + // Whether to enable public network access? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // Specifies the client connection timeout. Defaults to 30. + ServerlessConnectionTimeoutInSeconds *float64 `json:"serverlessConnectionTimeoutInSeconds,omitempty" tf:"serverless_connection_timeout_in_seconds,omitempty"` + + // Specifies the service mode. Possible values are Classic, Default and Serverless. Defaults to Default. + ServiceMode *string `json:"serviceMode,omitempty" tf:"service_mode,omitempty"` + + // A sku block as documented below. + Sku *SkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // Whether to request client certificate during TLS handshake? Defaults to false. + TLSClientCertEnabled *bool `json:"tlsClientCertEnabled,omitempty" tf:"tls_client_cert_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // An upstream_endpoint block as documented below. Using this block requires the SignalR service to be Serverless. When creating multiple blocks they will be processed in the order they are defined in. + UpstreamEndpoint []UpstreamEndpointInitParameters `json:"upstreamEndpoint,omitempty" tf:"upstream_endpoint,omitempty"` +} + +type ServiceObservation struct { + + // Whether to enable AAD auth? Defaults to true. + AADAuthEnabled *bool `json:"aadAuthEnabled,omitempty" tf:"aad_auth_enabled,omitempty"` + + // Specifies if Connectivity Logs are enabled or not. Defaults to false. + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // A cors block as documented below. + Cors []CorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies if Http Request Logs are enabled or not. Defaults to false. + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // The FQDN of the SignalR service. + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // The ID of the SignalR service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The publicly accessible IP of the SignalR service. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // A live_trace block as defined below. + LiveTrace *LiveTraceObservation `json:"liveTrace,omitempty" tf:"live_trace,omitempty"` + + // Specifies if Live Trace is enabled or not. Defaults to false. + LiveTraceEnabled *bool `json:"liveTraceEnabled,omitempty" tf:"live_trace_enabled,omitempty"` + + // Whether to enable local auth? Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the SignalR service exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies if Messaging Logs are enabled or not. Defaults to false. + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` + + // Whether to enable public network access? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The publicly accessible port of the SignalR service which is designed for browser/client use. + PublicPort *float64 `json:"publicPort,omitempty" tf:"public_port,omitempty"` + + // The name of the resource group in which to create the SignalR service. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The publicly accessible port of the SignalR service which is designed for customer server side use. + ServerPort *float64 `json:"serverPort,omitempty" tf:"server_port,omitempty"` + + // Specifies the client connection timeout. Defaults to 30. + ServerlessConnectionTimeoutInSeconds *float64 `json:"serverlessConnectionTimeoutInSeconds,omitempty" tf:"serverless_connection_timeout_in_seconds,omitempty"` + + // Specifies the service mode. Possible values are Classic, Default and Serverless. Defaults to Default. + ServiceMode *string `json:"serviceMode,omitempty" tf:"service_mode,omitempty"` + + // A sku block as documented below. + Sku *SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // Whether to request client certificate during TLS handshake? Defaults to false. + TLSClientCertEnabled *bool `json:"tlsClientCertEnabled,omitempty" tf:"tls_client_cert_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // An upstream_endpoint block as documented below. Using this block requires the SignalR service to be Serverless. When creating multiple blocks they will be processed in the order they are defined in. + UpstreamEndpoint []UpstreamEndpointObservation `json:"upstreamEndpoint,omitempty" tf:"upstream_endpoint,omitempty"` +} + +type ServiceParameters struct { + + // Whether to enable AAD auth? Defaults to true. + // +kubebuilder:validation:Optional + AADAuthEnabled *bool `json:"aadAuthEnabled,omitempty" tf:"aad_auth_enabled,omitempty"` + + // Specifies if Connectivity Logs are enabled or not. Defaults to false. + // +kubebuilder:validation:Optional + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // A cors block as documented below. + // +kubebuilder:validation:Optional + Cors []CorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies if Http Request Logs are enabled or not. Defaults to false. + // +kubebuilder:validation:Optional + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A live_trace block as defined below. + // +kubebuilder:validation:Optional + LiveTrace *LiveTraceParameters `json:"liveTrace,omitempty" tf:"live_trace,omitempty"` + + // Specifies if Live Trace is enabled or not. Defaults to false. + // +kubebuilder:validation:Optional + LiveTraceEnabled *bool `json:"liveTraceEnabled,omitempty" tf:"live_trace_enabled,omitempty"` + + // Whether to enable local auth? Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the SignalR service exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies if Messaging Logs are enabled or not. Defaults to false. + // +kubebuilder:validation:Optional + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` + + // Whether to enable public network access? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the SignalR service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the client connection timeout. Defaults to 30. + // +kubebuilder:validation:Optional + ServerlessConnectionTimeoutInSeconds *float64 `json:"serverlessConnectionTimeoutInSeconds,omitempty" tf:"serverless_connection_timeout_in_seconds,omitempty"` + + // Specifies the service mode. Possible values are Classic, Default and Serverless. Defaults to Default. + // +kubebuilder:validation:Optional + ServiceMode *string `json:"serviceMode,omitempty" tf:"service_mode,omitempty"` + + // A sku block as documented below. + // +kubebuilder:validation:Optional + Sku *SkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // Whether to request client certificate during TLS handshake? Defaults to false. + // +kubebuilder:validation:Optional + TLSClientCertEnabled *bool `json:"tlsClientCertEnabled,omitempty" tf:"tls_client_cert_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // An upstream_endpoint block as documented below. Using this block requires the SignalR service to be Serverless. When creating multiple blocks they will be processed in the order they are defined in. + // +kubebuilder:validation:Optional + UpstreamEndpoint []UpstreamEndpointParameters `json:"upstreamEndpoint,omitempty" tf:"upstream_endpoint,omitempty"` +} + +type SkuInitParameters struct { + + // Specifies the number of units associated with this SignalR service. Valid values are 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90 and 100. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies which tier to use. Valid values are Free_F1, Standard_S1 and Premium_P1. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuObservation struct { + + // Specifies the number of units associated with this SignalR service. Valid values are 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90 and 100. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies which tier to use. Valid values are Free_F1, Standard_S1 and Premium_P1. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type SkuParameters struct { + + // Specifies the number of units associated with this SignalR service. Valid values are 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90 and 100. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity" tf:"capacity,omitempty"` + + // Specifies which tier to use. Valid values are Free_F1, Standard_S1 and Premium_P1. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type UpstreamEndpointInitParameters struct { + + // The categories to match on, or * for all. + CategoryPattern []*string `json:"categoryPattern,omitempty" tf:"category_pattern,omitempty"` + + // The events to match on, or * for all. + EventPattern []*string `json:"eventPattern,omitempty" tf:"event_pattern,omitempty"` + + // The hubs to match on, or * for all. + HubPattern []*string `json:"hubPattern,omitempty" tf:"hub_pattern,omitempty"` + + // The upstream URL Template. This can be a url or a template such as http://host.com/{hub}/api/{category}/{event}. + URLTemplate *string `json:"urlTemplate,omitempty" tf:"url_template,omitempty"` + + // Specifies the Managed Identity IDs to be assigned to this signalR upstream setting by using resource uuid as both system assigned and user assigned identity is supported. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type UpstreamEndpointObservation struct { + + // The categories to match on, or * for all. + CategoryPattern []*string `json:"categoryPattern,omitempty" tf:"category_pattern,omitempty"` + + // The events to match on, or * for all. + EventPattern []*string `json:"eventPattern,omitempty" tf:"event_pattern,omitempty"` + + // The hubs to match on, or * for all. + HubPattern []*string `json:"hubPattern,omitempty" tf:"hub_pattern,omitempty"` + + // The upstream URL Template. This can be a url or a template such as http://host.com/{hub}/api/{category}/{event}. + URLTemplate *string `json:"urlTemplate,omitempty" tf:"url_template,omitempty"` + + // Specifies the Managed Identity IDs to be assigned to this signalR upstream setting by using resource uuid as both system assigned and user assigned identity is supported. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type UpstreamEndpointParameters struct { + + // The categories to match on, or * for all. + // +kubebuilder:validation:Optional + CategoryPattern []*string `json:"categoryPattern" tf:"category_pattern,omitempty"` + + // The events to match on, or * for all. + // +kubebuilder:validation:Optional + EventPattern []*string `json:"eventPattern" tf:"event_pattern,omitempty"` + + // The hubs to match on, or * for all. + // +kubebuilder:validation:Optional + HubPattern []*string `json:"hubPattern" tf:"hub_pattern,omitempty"` + + // The upstream URL Template. This can be a url or a template such as http://host.com/{hub}/api/{category}/{event}. + // +kubebuilder:validation:Optional + URLTemplate *string `json:"urlTemplate" tf:"url_template,omitempty"` + + // Specifies the Managed Identity IDs to be assigned to this signalR upstream setting by using resource uuid as both system assigned and user assigned identity is supported. + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +// ServiceSpec defines the desired state of Service +type ServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ServiceInitParameters `json:"initProvider,omitempty"` +} + +// ServiceStatus defines the observed state of Service. +type ServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Service is the Schema for the Services API. Manages an Azure SignalR service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Service struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec ServiceSpec `json:"spec"` + Status ServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ServiceList contains a list of Services +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Service `json:"items"` +} + +// Repository type metadata. +var ( + Service_Kind = "Service" + Service_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Service_Kind}.String() + Service_KindAPIVersion = Service_Kind + "." + CRDGroupVersion.String() + Service_GroupVersionKind = CRDGroupVersion.WithKind(Service_Kind) +) + +func init() { + SchemeBuilder.Register(&Service{}, &ServiceList{}) +} diff --git a/apis/signalrservice/v1beta2/zz_webpubsub_terraformed.go b/apis/signalrservice/v1beta2/zz_webpubsub_terraformed.go new file mode 100755 index 000000000..84375a282 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_webpubsub_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WebPubsub +func (mg *WebPubsub) GetTerraformResourceType() string { + return "azurerm_web_pubsub" +} + +// GetConnectionDetailsMapping for this WebPubsub +func (tr *WebPubsub) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_access_key": "status.atProvider.primaryAccessKey", "primary_connection_string": "status.atProvider.primaryConnectionString", "secondary_access_key": "status.atProvider.secondaryAccessKey", "secondary_connection_string": "status.atProvider.secondaryConnectionString"} +} + +// GetObservation of this WebPubsub +func (tr *WebPubsub) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WebPubsub +func (tr *WebPubsub) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WebPubsub +func (tr *WebPubsub) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WebPubsub +func (tr *WebPubsub) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WebPubsub +func (tr *WebPubsub) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WebPubsub +func (tr *WebPubsub) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WebPubsub +func (tr *WebPubsub) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WebPubsub using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WebPubsub) LateInitialize(attrs []byte) (bool, error) { + params := &WebPubsubParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WebPubsub) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/signalrservice/v1beta2/zz_webpubsub_types.go b/apis/signalrservice/v1beta2/zz_webpubsub_types.go new file mode 100755 index 000000000..fbd378692 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_webpubsub_types.go @@ -0,0 +1,331 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type WebPubsubIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Web PubSub. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Web PubSub. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WebPubsubIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Web PubSub. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Web PubSub. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WebPubsubIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Web PubSub. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Web PubSub. Possible values are SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WebPubsubInitParameters struct { + + // Whether to enable AAD auth? Defaults to true. + AADAuthEnabled *bool `json:"aadAuthEnabled,omitempty" tf:"aad_auth_enabled,omitempty"` + + // Specifies the number of units associated with this Web PubSub resource. Valid values are: Free: 1, Standard: 1, 2, 5, 10, 20, 50, 100. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // An identity block as defined below. + Identity *WebPubsubIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A live_trace block as defined below. + LiveTrace *WebPubsubLiveTraceInitParameters `json:"liveTrace,omitempty" tf:"live_trace,omitempty"` + + // Whether to enable local auth? Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the Web PubSub service exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Web PubSub service. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to enable public network access? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the Web PubSub service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies which SKU to use. Possible values are Free_F1, Standard_S1, and Premium_P1. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Whether to request client certificate during TLS handshake? Defaults to false. + TLSClientCertEnabled *bool `json:"tlsClientCertEnabled,omitempty" tf:"tls_client_cert_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WebPubsubLiveTraceInitParameters struct { + + // Whether the log category ConnectivityLogs is enabled? Defaults to true + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // Whether the live trace is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the log category HttpRequestLogs is enabled? Defaults to true + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // Whether the log category MessagingLogs is enabled? Defaults to true + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` +} + +type WebPubsubLiveTraceObservation struct { + + // Whether the log category ConnectivityLogs is enabled? Defaults to true + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // Whether the live trace is enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the log category HttpRequestLogs is enabled? Defaults to true + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // Whether the log category MessagingLogs is enabled? Defaults to true + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` +} + +type WebPubsubLiveTraceParameters struct { + + // Whether the log category ConnectivityLogs is enabled? Defaults to true + // +kubebuilder:validation:Optional + ConnectivityLogsEnabled *bool `json:"connectivityLogsEnabled,omitempty" tf:"connectivity_logs_enabled,omitempty"` + + // Whether the live trace is enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Whether the log category HttpRequestLogs is enabled? Defaults to true + // +kubebuilder:validation:Optional + HTTPRequestLogsEnabled *bool `json:"httpRequestLogsEnabled,omitempty" tf:"http_request_logs_enabled,omitempty"` + + // Whether the log category MessagingLogs is enabled? Defaults to true + // +kubebuilder:validation:Optional + MessagingLogsEnabled *bool `json:"messagingLogsEnabled,omitempty" tf:"messaging_logs_enabled,omitempty"` +} + +type WebPubsubObservation struct { + + // Whether to enable AAD auth? Defaults to true. + AADAuthEnabled *bool `json:"aadAuthEnabled,omitempty" tf:"aad_auth_enabled,omitempty"` + + // Specifies the number of units associated with this Web PubSub resource. Valid values are: Free: 1, Standard: 1, 2, 5, 10, 20, 50, 100. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The publicly accessible IP of the Web PubSub service. + ExternalIP *string `json:"externalIp,omitempty" tf:"external_ip,omitempty"` + + // The FQDN of the Web PubSub service. + HostName *string `json:"hostname,omitempty" tf:"hostname,omitempty"` + + // The ID of the Web PubSub service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *WebPubsubIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // A live_trace block as defined below. + LiveTrace *WebPubsubLiveTraceObservation `json:"liveTrace,omitempty" tf:"live_trace,omitempty"` + + // Whether to enable local auth? Defaults to true. + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the Web PubSub service exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Web PubSub service. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to enable public network access? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The publicly accessible port of the Web PubSub service which is designed for browser/client use. + PublicPort *float64 `json:"publicPort,omitempty" tf:"public_port,omitempty"` + + // The name of the resource group in which to create the Web PubSub service. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The publicly accessible port of the Web PubSub service which is designed for customer server side use. + ServerPort *float64 `json:"serverPort,omitempty" tf:"server_port,omitempty"` + + // Specifies which SKU to use. Possible values are Free_F1, Standard_S1, and Premium_P1. + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Whether to request client certificate during TLS handshake? Defaults to false. + TLSClientCertEnabled *bool `json:"tlsClientCertEnabled,omitempty" tf:"tls_client_cert_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type WebPubsubParameters struct { + + // Whether to enable AAD auth? Defaults to true. + // +kubebuilder:validation:Optional + AADAuthEnabled *bool `json:"aadAuthEnabled,omitempty" tf:"aad_auth_enabled,omitempty"` + + // Specifies the number of units associated with this Web PubSub resource. Valid values are: Free: 1, Standard: 1, 2, 5, 10, 20, 50, 100. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *WebPubsubIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A live_trace block as defined below. + // +kubebuilder:validation:Optional + LiveTrace *WebPubsubLiveTraceParameters `json:"liveTrace,omitempty" tf:"live_trace,omitempty"` + + // Whether to enable local auth? Defaults to true. + // +kubebuilder:validation:Optional + LocalAuthEnabled *bool `json:"localAuthEnabled,omitempty" tf:"local_auth_enabled,omitempty"` + + // Specifies the supported Azure location where the Web PubSub service exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Web PubSub service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Whether to enable public network access? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the Web PubSub service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies which SKU to use. Possible values are Free_F1, Standard_S1, and Premium_P1. + // +kubebuilder:validation:Optional + Sku *string `json:"sku,omitempty" tf:"sku,omitempty"` + + // Whether to request client certificate during TLS handshake? Defaults to false. + // +kubebuilder:validation:Optional + TLSClientCertEnabled *bool `json:"tlsClientCertEnabled,omitempty" tf:"tls_client_cert_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WebPubsubSpec defines the desired state of WebPubsub +type WebPubsubSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WebPubsubParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WebPubsubInitParameters `json:"initProvider,omitempty"` +} + +// WebPubsubStatus defines the observed state of WebPubsub. +type WebPubsubStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WebPubsubObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WebPubsub is the Schema for the WebPubsubs API. Manages an Azure Web PubSub service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WebPubsub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec WebPubsubSpec `json:"spec"` + Status WebPubsubStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WebPubsubList contains a list of WebPubsubs +type WebPubsubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WebPubsub `json:"items"` +} + +// Repository type metadata. +var ( + WebPubsub_Kind = "WebPubsub" + WebPubsub_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WebPubsub_Kind}.String() + WebPubsub_KindAPIVersion = WebPubsub_Kind + "." + CRDGroupVersion.String() + WebPubsub_GroupVersionKind = CRDGroupVersion.WithKind(WebPubsub_Kind) +) + +func init() { + SchemeBuilder.Register(&WebPubsub{}, &WebPubsubList{}) +} diff --git a/apis/signalrservice/v1beta2/zz_webpubsubhub_terraformed.go b/apis/signalrservice/v1beta2/zz_webpubsubhub_terraformed.go new file mode 100755 index 000000000..8beeded7b --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_webpubsubhub_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WebPubsubHub +func (mg *WebPubsubHub) GetTerraformResourceType() string { + return "azurerm_web_pubsub_hub" +} + +// GetConnectionDetailsMapping for this WebPubsubHub +func (tr *WebPubsubHub) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this WebPubsubHub +func (tr *WebPubsubHub) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WebPubsubHub +func (tr *WebPubsubHub) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WebPubsubHub +func (tr *WebPubsubHub) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WebPubsubHub +func (tr *WebPubsubHub) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WebPubsubHub +func (tr *WebPubsubHub) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WebPubsubHub +func (tr *WebPubsubHub) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WebPubsubHub +func (tr *WebPubsubHub) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WebPubsubHub using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WebPubsubHub) LateInitialize(attrs []byte) (bool, error) { + params := &WebPubsubHubParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WebPubsubHub) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/signalrservice/v1beta2/zz_webpubsubhub_types.go b/apis/signalrservice/v1beta2/zz_webpubsubhub_types.go new file mode 100755 index 000000000..86cb17b38 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_webpubsubhub_types.go @@ -0,0 +1,336 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthInitParameters struct { + + // Specify the identity ID of the target resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ManagedIdentityID *string `json:"managedIdentityId,omitempty" tf:"managed_identity_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate managedIdentityId. + // +kubebuilder:validation:Optional + ManagedIdentityIDRef *v1.Reference `json:"managedIdentityIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate managedIdentityId. + // +kubebuilder:validation:Optional + ManagedIdentityIDSelector *v1.Selector `json:"managedIdentityIdSelector,omitempty" tf:"-"` +} + +type AuthObservation struct { + + // Specify the identity ID of the target resource. + ManagedIdentityID *string `json:"managedIdentityId,omitempty" tf:"managed_identity_id,omitempty"` +} + +type AuthParameters struct { + + // Specify the identity ID of the target resource. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ManagedIdentityID *string `json:"managedIdentityId,omitempty" tf:"managed_identity_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate managedIdentityId. + // +kubebuilder:validation:Optional + ManagedIdentityIDRef *v1.Reference `json:"managedIdentityIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate managedIdentityId. + // +kubebuilder:validation:Optional + ManagedIdentityIDSelector *v1.Selector `json:"managedIdentityIdSelector,omitempty" tf:"-"` +} + +type EventHandlerInitParameters struct { + + // An auth block as defined below. + Auth *AuthInitParameters `json:"auth,omitempty" tf:"auth,omitempty"` + + // Specifies the list of system events. Supported values are connect, connected and disconnected. + // +listType=set + SystemEvents []*string `json:"systemEvents,omitempty" tf:"system_events,omitempty"` + + // The Event Handler URL Template. Two predefined parameters {hub} and {event} are available to use in the template. The value of the EventHandler URL is dynamically calculated when the client request comes in. Example: http://example.com/api/{hub}/{event}. + URLTemplate *string `json:"urlTemplate,omitempty" tf:"url_template,omitempty"` + + // Specifies the matching event names. There are 3 kind of patterns supported: * * matches any event name * , Combine multiple events with , for example event1,event2, it matches event event1 and event2 * The single event name, for example event1, it matches event1. + UserEventPattern *string `json:"userEventPattern,omitempty" tf:"user_event_pattern,omitempty"` +} + +type EventHandlerObservation struct { + + // An auth block as defined below. + Auth *AuthObservation `json:"auth,omitempty" tf:"auth,omitempty"` + + // Specifies the list of system events. Supported values are connect, connected and disconnected. + // +listType=set + SystemEvents []*string `json:"systemEvents,omitempty" tf:"system_events,omitempty"` + + // The Event Handler URL Template. Two predefined parameters {hub} and {event} are available to use in the template. The value of the EventHandler URL is dynamically calculated when the client request comes in. Example: http://example.com/api/{hub}/{event}. + URLTemplate *string `json:"urlTemplate,omitempty" tf:"url_template,omitempty"` + + // Specifies the matching event names. There are 3 kind of patterns supported: * * matches any event name * , Combine multiple events with , for example event1,event2, it matches event event1 and event2 * The single event name, for example event1, it matches event1. + UserEventPattern *string `json:"userEventPattern,omitempty" tf:"user_event_pattern,omitempty"` +} + +type EventHandlerParameters struct { + + // An auth block as defined below. + // +kubebuilder:validation:Optional + Auth *AuthParameters `json:"auth,omitempty" tf:"auth,omitempty"` + + // Specifies the list of system events. Supported values are connect, connected and disconnected. + // +kubebuilder:validation:Optional + // +listType=set + SystemEvents []*string `json:"systemEvents,omitempty" tf:"system_events,omitempty"` + + // The Event Handler URL Template. Two predefined parameters {hub} and {event} are available to use in the template. The value of the EventHandler URL is dynamically calculated when the client request comes in. Example: http://example.com/api/{hub}/{event}. + // +kubebuilder:validation:Optional + URLTemplate *string `json:"urlTemplate" tf:"url_template,omitempty"` + + // Specifies the matching event names. There are 3 kind of patterns supported: * * matches any event name * , Combine multiple events with , for example event1,event2, it matches event event1 and event2 * The single event name, for example event1, it matches event1. + // +kubebuilder:validation:Optional + UserEventPattern *string `json:"userEventPattern,omitempty" tf:"user_event_pattern,omitempty"` +} + +type EventListenerInitParameters struct { + + // Specifies the event hub name to receive the events. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Reference to a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameRef *v1.Reference `json:"eventhubNameRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` + + // Specifies the event hub namespace name to receive the events. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + EventHubNamespaceName *string `json:"eventhubNamespaceName,omitempty" tf:"eventhub_namespace_name,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate eventhubNamespaceName. + // +kubebuilder:validation:Optional + EventHubNamespaceNameRef *v1.Reference `json:"eventhubNamespaceNameRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate eventhubNamespaceName. + // +kubebuilder:validation:Optional + EventHubNamespaceNameSelector *v1.Selector `json:"eventhubNamespaceNameSelector,omitempty" tf:"-"` + + // Specifies the list of system events. Supported values are connected and disconnected. + SystemEventNameFilter []*string `json:"systemEventNameFilter,omitempty" tf:"system_event_name_filter,omitempty"` + + // Specifies the list of matching user event names. ["*"] can be used to match all events. + UserEventNameFilter []*string `json:"userEventNameFilter,omitempty" tf:"user_event_name_filter,omitempty"` +} + +type EventListenerObservation struct { + + // Specifies the event hub name to receive the events. + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Specifies the event hub namespace name to receive the events. + EventHubNamespaceName *string `json:"eventhubNamespaceName,omitempty" tf:"eventhub_namespace_name,omitempty"` + + // Specifies the list of system events. Supported values are connected and disconnected. + SystemEventNameFilter []*string `json:"systemEventNameFilter,omitempty" tf:"system_event_name_filter,omitempty"` + + // Specifies the list of matching user event names. ["*"] can be used to match all events. + UserEventNameFilter []*string `json:"userEventNameFilter,omitempty" tf:"user_event_name_filter,omitempty"` +} + +type EventListenerParameters struct { + + // Specifies the event hub name to receive the events. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + // +kubebuilder:validation:Optional + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Reference to a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameRef *v1.Reference `json:"eventhubNameRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` + + // Specifies the event hub namespace name to receive the events. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + // +kubebuilder:validation:Optional + EventHubNamespaceName *string `json:"eventhubNamespaceName,omitempty" tf:"eventhub_namespace_name,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate eventhubNamespaceName. + // +kubebuilder:validation:Optional + EventHubNamespaceNameRef *v1.Reference `json:"eventhubNamespaceNameRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate eventhubNamespaceName. + // +kubebuilder:validation:Optional + EventHubNamespaceNameSelector *v1.Selector `json:"eventhubNamespaceNameSelector,omitempty" tf:"-"` + + // Specifies the list of system events. Supported values are connected and disconnected. + // +kubebuilder:validation:Optional + SystemEventNameFilter []*string `json:"systemEventNameFilter,omitempty" tf:"system_event_name_filter,omitempty"` + + // Specifies the list of matching user event names. ["*"] can be used to match all events. + // +kubebuilder:validation:Optional + UserEventNameFilter []*string `json:"userEventNameFilter,omitempty" tf:"user_event_name_filter,omitempty"` +} + +type WebPubsubHubInitParameters struct { + + // Is anonymous connections are allowed for this hub? Defaults to false. + // Possible values are true, false. + AnonymousConnectionsEnabled *bool `json:"anonymousConnectionsEnabled,omitempty" tf:"anonymous_connections_enabled,omitempty"` + + // An event_handler block as defined below. + EventHandler []EventHandlerInitParameters `json:"eventHandler,omitempty" tf:"event_handler,omitempty"` + + // An event_listener block as defined below. + EventListener []EventListenerInitParameters `json:"eventListener,omitempty" tf:"event_listener,omitempty"` + + // The name of the Web Pubsub hub service. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the id of the Web Pubsub. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta2.WebPubsub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + WebPubsubID *string `json:"webPubsubId,omitempty" tf:"web_pubsub_id,omitempty"` + + // Reference to a WebPubsub in signalrservice to populate webPubsubId. + // +kubebuilder:validation:Optional + WebPubsubIDRef *v1.Reference `json:"webPubsubIdRef,omitempty" tf:"-"` + + // Selector for a WebPubsub in signalrservice to populate webPubsubId. + // +kubebuilder:validation:Optional + WebPubsubIDSelector *v1.Selector `json:"webPubsubIdSelector,omitempty" tf:"-"` +} + +type WebPubsubHubObservation struct { + + // Is anonymous connections are allowed for this hub? Defaults to false. + // Possible values are true, false. + AnonymousConnectionsEnabled *bool `json:"anonymousConnectionsEnabled,omitempty" tf:"anonymous_connections_enabled,omitempty"` + + // An event_handler block as defined below. + EventHandler []EventHandlerObservation `json:"eventHandler,omitempty" tf:"event_handler,omitempty"` + + // An event_listener block as defined below. + EventListener []EventListenerObservation `json:"eventListener,omitempty" tf:"event_listener,omitempty"` + + // The ID of the Web Pubsub Hub resource. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Web Pubsub hub service. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the id of the Web Pubsub. Changing this forces a new resource to be created. + WebPubsubID *string `json:"webPubsubId,omitempty" tf:"web_pubsub_id,omitempty"` +} + +type WebPubsubHubParameters struct { + + // Is anonymous connections are allowed for this hub? Defaults to false. + // Possible values are true, false. + // +kubebuilder:validation:Optional + AnonymousConnectionsEnabled *bool `json:"anonymousConnectionsEnabled,omitempty" tf:"anonymous_connections_enabled,omitempty"` + + // An event_handler block as defined below. + // +kubebuilder:validation:Optional + EventHandler []EventHandlerParameters `json:"eventHandler,omitempty" tf:"event_handler,omitempty"` + + // An event_listener block as defined below. + // +kubebuilder:validation:Optional + EventListener []EventListenerParameters `json:"eventListener,omitempty" tf:"event_listener,omitempty"` + + // The name of the Web Pubsub hub service. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Specifies the id of the Web Pubsub. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta2.WebPubsub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WebPubsubID *string `json:"webPubsubId,omitempty" tf:"web_pubsub_id,omitempty"` + + // Reference to a WebPubsub in signalrservice to populate webPubsubId. + // +kubebuilder:validation:Optional + WebPubsubIDRef *v1.Reference `json:"webPubsubIdRef,omitempty" tf:"-"` + + // Selector for a WebPubsub in signalrservice to populate webPubsubId. + // +kubebuilder:validation:Optional + WebPubsubIDSelector *v1.Selector `json:"webPubsubIdSelector,omitempty" tf:"-"` +} + +// WebPubsubHubSpec defines the desired state of WebPubsubHub +type WebPubsubHubSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WebPubsubHubParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WebPubsubHubInitParameters `json:"initProvider,omitempty"` +} + +// WebPubsubHubStatus defines the observed state of WebPubsubHub. +type WebPubsubHubStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WebPubsubHubObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WebPubsubHub is the Schema for the WebPubsubHubs API. Manages the hub settings for a Web Pubsub service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WebPubsubHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + Spec WebPubsubHubSpec `json:"spec"` + Status WebPubsubHubStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WebPubsubHubList contains a list of WebPubsubHubs +type WebPubsubHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WebPubsubHub `json:"items"` +} + +// Repository type metadata. +var ( + WebPubsubHub_Kind = "WebPubsubHub" + WebPubsubHub_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WebPubsubHub_Kind}.String() + WebPubsubHub_KindAPIVersion = WebPubsubHub_Kind + "." + CRDGroupVersion.String() + WebPubsubHub_GroupVersionKind = CRDGroupVersion.WithKind(WebPubsubHub_Kind) +) + +func init() { + SchemeBuilder.Register(&WebPubsubHub{}, &WebPubsubHubList{}) +} diff --git a/apis/signalrservice/v1beta2/zz_webpubsubnetworkacl_terraformed.go b/apis/signalrservice/v1beta2/zz_webpubsubnetworkacl_terraformed.go new file mode 100755 index 000000000..dc040d0a5 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_webpubsubnetworkacl_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WebPubsubNetworkACL +func (mg *WebPubsubNetworkACL) GetTerraformResourceType() string { + return "azurerm_web_pubsub_network_acl" +} + +// GetConnectionDetailsMapping for this WebPubsubNetworkACL +func (tr *WebPubsubNetworkACL) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this WebPubsubNetworkACL +func (tr *WebPubsubNetworkACL) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WebPubsubNetworkACL +func (tr *WebPubsubNetworkACL) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WebPubsubNetworkACL +func (tr *WebPubsubNetworkACL) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WebPubsubNetworkACL +func (tr *WebPubsubNetworkACL) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WebPubsubNetworkACL +func (tr *WebPubsubNetworkACL) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WebPubsubNetworkACL +func (tr *WebPubsubNetworkACL) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WebPubsubNetworkACL +func (tr *WebPubsubNetworkACL) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WebPubsubNetworkACL using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WebPubsubNetworkACL) LateInitialize(attrs []byte) (bool, error) { + params := &WebPubsubNetworkACLParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WebPubsubNetworkACL) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/signalrservice/v1beta2/zz_webpubsubnetworkacl_types.go b/apis/signalrservice/v1beta2/zz_webpubsubnetworkacl_types.go new file mode 100755 index 000000000..e45d6e907 --- /dev/null +++ b/apis/signalrservice/v1beta2/zz_webpubsubnetworkacl_types.go @@ -0,0 +1,247 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type WebPubsubNetworkACLInitParameters struct { + + // The default action to control the network access when no other rule matches. Possible values are Allow and Deny. Defaults to Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // A private_endpoint block as defined below. + PrivateEndpoint []WebPubsubNetworkACLPrivateEndpointInitParameters `json:"privateEndpoint,omitempty" tf:"private_endpoint,omitempty"` + + // A public_network block as defined below. + PublicNetwork *WebPubsubNetworkACLPublicNetworkInitParameters `json:"publicNetwork,omitempty" tf:"public_network,omitempty"` + + // The ID of the Web Pubsub service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta2.WebPubsub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + WebPubsubID *string `json:"webPubsubId,omitempty" tf:"web_pubsub_id,omitempty"` + + // Reference to a WebPubsub in signalrservice to populate webPubsubId. + // +kubebuilder:validation:Optional + WebPubsubIDRef *v1.Reference `json:"webPubsubIdRef,omitempty" tf:"-"` + + // Selector for a WebPubsub in signalrservice to populate webPubsubId. + // +kubebuilder:validation:Optional + WebPubsubIDSelector *v1.Selector `json:"webPubsubIdSelector,omitempty" tf:"-"` +} + +type WebPubsubNetworkACLObservation struct { + + // The default action to control the network access when no other rule matches. Possible values are Allow and Deny. Defaults to Deny. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // The ID of the Web Pubsub service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A private_endpoint block as defined below. + PrivateEndpoint []WebPubsubNetworkACLPrivateEndpointObservation `json:"privateEndpoint,omitempty" tf:"private_endpoint,omitempty"` + + // A public_network block as defined below. + PublicNetwork *WebPubsubNetworkACLPublicNetworkObservation `json:"publicNetwork,omitempty" tf:"public_network,omitempty"` + + // The ID of the Web Pubsub service. Changing this forces a new resource to be created. + WebPubsubID *string `json:"webPubsubId,omitempty" tf:"web_pubsub_id,omitempty"` +} + +type WebPubsubNetworkACLParameters struct { + + // The default action to control the network access when no other rule matches. Possible values are Allow and Deny. Defaults to Deny. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // A private_endpoint block as defined below. + // +kubebuilder:validation:Optional + PrivateEndpoint []WebPubsubNetworkACLPrivateEndpointParameters `json:"privateEndpoint,omitempty" tf:"private_endpoint,omitempty"` + + // A public_network block as defined below. + // +kubebuilder:validation:Optional + PublicNetwork *WebPubsubNetworkACLPublicNetworkParameters `json:"publicNetwork,omitempty" tf:"public_network,omitempty"` + + // The ID of the Web Pubsub service. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/signalrservice/v1beta2.WebPubsub + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WebPubsubID *string `json:"webPubsubId,omitempty" tf:"web_pubsub_id,omitempty"` + + // Reference to a WebPubsub in signalrservice to populate webPubsubId. + // +kubebuilder:validation:Optional + WebPubsubIDRef *v1.Reference `json:"webPubsubIdRef,omitempty" tf:"-"` + + // Selector for a WebPubsub in signalrservice to populate webPubsubId. + // +kubebuilder:validation:Optional + WebPubsubIDSelector *v1.Selector `json:"webPubsubIdSelector,omitempty" tf:"-"` +} + +type WebPubsubNetworkACLPrivateEndpointInitParameters struct { + + // The allowed request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` + + // The ID of the Private Endpoint which is based on the Web Pubsub service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateEndpoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a PrivateEndpoint in network to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a PrivateEndpoint in network to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type WebPubsubNetworkACLPrivateEndpointObservation struct { + + // The allowed request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` + + // The ID of the Private Endpoint which is based on the Web Pubsub service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type WebPubsubNetworkACLPrivateEndpointParameters struct { + + // The allowed request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +kubebuilder:validation:Optional + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the Private Endpoint Connection. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +kubebuilder:validation:Optional + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` + + // The ID of the Private Endpoint which is based on the Web Pubsub service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.PrivateEndpoint + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a PrivateEndpoint in network to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a PrivateEndpoint in network to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type WebPubsubNetworkACLPublicNetworkInitParameters struct { + + // The allowed request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` +} + +type WebPubsubNetworkACLPublicNetworkObservation struct { + + // The allowed request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` +} + +type WebPubsubNetworkACLPublicNetworkParameters struct { + + // The allowed request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +kubebuilder:validation:Optional + // +listType=set + AllowedRequestTypes []*string `json:"allowedRequestTypes,omitempty" tf:"allowed_request_types,omitempty"` + + // The denied request types for the public network. Possible values are ClientConnection, ServerConnection, RESTAPI and Trace. + // +kubebuilder:validation:Optional + // +listType=set + DeniedRequestTypes []*string `json:"deniedRequestTypes,omitempty" tf:"denied_request_types,omitempty"` +} + +// WebPubsubNetworkACLSpec defines the desired state of WebPubsubNetworkACL +type WebPubsubNetworkACLSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WebPubsubNetworkACLParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WebPubsubNetworkACLInitParameters `json:"initProvider,omitempty"` +} + +// WebPubsubNetworkACLStatus defines the observed state of WebPubsubNetworkACL. +type WebPubsubNetworkACLStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WebPubsubNetworkACLObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WebPubsubNetworkACL is the Schema for the WebPubsubNetworkACLs API. Manages the Network ACL for a Web Pubsub service. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WebPubsubNetworkACL struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.publicNetwork) || (has(self.initProvider) && has(self.initProvider.publicNetwork))",message="spec.forProvider.publicNetwork is a required parameter" + Spec WebPubsubNetworkACLSpec `json:"spec"` + Status WebPubsubNetworkACLStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WebPubsubNetworkACLList contains a list of WebPubsubNetworkACLs +type WebPubsubNetworkACLList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WebPubsubNetworkACL `json:"items"` +} + +// Repository type metadata. +var ( + WebPubsubNetworkACL_Kind = "WebPubsubNetworkACL" + WebPubsubNetworkACL_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WebPubsubNetworkACL_Kind}.String() + WebPubsubNetworkACL_KindAPIVersion = WebPubsubNetworkACL_Kind + "." + CRDGroupVersion.String() + WebPubsubNetworkACL_GroupVersionKind = CRDGroupVersion.WithKind(WebPubsubNetworkACL_Kind) +) + +func init() { + SchemeBuilder.Register(&WebPubsubNetworkACL{}, &WebPubsubNetworkACLList{}) +} diff --git a/apis/spring/v1beta1/zz_cloudapplicationliveview_types.go b/apis/spring/v1beta1/zz_cloudapplicationliveview_types.go index 71beace35..93b841ad6 100755 --- a/apis/spring/v1beta1/zz_cloudapplicationliveview_types.go +++ b/apis/spring/v1beta1/zz_cloudapplicationliveview_types.go @@ -28,7 +28,7 @@ type CloudApplicationLiveViewObservation struct { type CloudApplicationLiveViewParameters struct { // The ID of the Spring Cloud Service. Changing this forces a new Spring Cloud Application Live View to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta1.SpringCloudService + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/appplatform/v1beta2.SpringCloudService // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SpringCloudServiceID *string `json:"springCloudServiceId,omitempty" tf:"spring_cloud_service_id,omitempty"` diff --git a/apis/spring/v1beta1/zz_generated.resolvers.go b/apis/spring/v1beta1/zz_generated.resolvers.go index 2f2e61540..02fadcd3e 100644 --- a/apis/spring/v1beta1/zz_generated.resolvers.go +++ b/apis/spring/v1beta1/zz_generated.resolvers.go @@ -26,7 +26,7 @@ func (mg *CloudApplicationLiveView) ResolveReferences( // ResolveReferences of t var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta1", "SpringCloudService", "SpringCloudServiceList") + m, l, err = apisresolver.GetManagedResource("appplatform.azure.upbound.io", "v1beta2", "SpringCloudService", "SpringCloudServiceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/sql/v1beta1/zz_generated.conversion_hubs.go b/apis/sql/v1beta1/zz_generated.conversion_hubs.go index 7310415d4..1b7895d8a 100755 --- a/apis/sql/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/sql/v1beta1/zz_generated.conversion_hubs.go @@ -6,21 +6,12 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *MSSQLDatabase) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MSSQLDatabaseExtendedAuditingPolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *MSSQLDatabaseVulnerabilityAssessmentRuleBaseline) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MSSQLElasticPool) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MSSQLFailoverGroup) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MSSQLFirewallRule) Hub() {} @@ -30,27 +21,12 @@ func (tr *MSSQLJobAgent) Hub() {} // Hub marks this type as a conversion hub. func (tr *MSSQLJobCredential) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MSSQLManagedDatabase) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MSSQLManagedInstance) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MSSQLManagedInstanceActiveDirectoryAdministrator) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MSSQLManagedInstanceFailoverGroup) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *MSSQLManagedInstanceVulnerabilityAssessment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MSSQLOutboundFirewallRule) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MSSQLServer) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MSSQLServerDNSAlias) Hub() {} @@ -63,8 +39,5 @@ func (tr *MSSQLServerSecurityAlertPolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *MSSQLServerTransparentDataEncryption) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *MSSQLServerVulnerabilityAssessment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *MSSQLVirtualNetworkRule) Hub() {} diff --git a/apis/sql/v1beta1/zz_generated.conversion_spokes.go b/apis/sql/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..d3de9d148 --- /dev/null +++ b/apis/sql/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,194 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this MSSQLDatabase to the hub type. +func (tr *MSSQLDatabase) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLDatabase type. +func (tr *MSSQLDatabase) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MSSQLElasticPool to the hub type. +func (tr *MSSQLElasticPool) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLElasticPool type. +func (tr *MSSQLElasticPool) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MSSQLFailoverGroup to the hub type. +func (tr *MSSQLFailoverGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLFailoverGroup type. +func (tr *MSSQLFailoverGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MSSQLManagedDatabase to the hub type. +func (tr *MSSQLManagedDatabase) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLManagedDatabase type. +func (tr *MSSQLManagedDatabase) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MSSQLManagedInstance to the hub type. +func (tr *MSSQLManagedInstance) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLManagedInstance type. +func (tr *MSSQLManagedInstance) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MSSQLManagedInstanceFailoverGroup to the hub type. +func (tr *MSSQLManagedInstanceFailoverGroup) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLManagedInstanceFailoverGroup type. +func (tr *MSSQLManagedInstanceFailoverGroup) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MSSQLManagedInstanceVulnerabilityAssessment to the hub type. +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLManagedInstanceVulnerabilityAssessment type. +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MSSQLServer to the hub type. +func (tr *MSSQLServer) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLServer type. +func (tr *MSSQLServer) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this MSSQLServerVulnerabilityAssessment to the hub type. +func (tr *MSSQLServerVulnerabilityAssessment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the MSSQLServerVulnerabilityAssessment type. +func (tr *MSSQLServerVulnerabilityAssessment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/sql/v1beta1/zz_generated.resolvers.go b/apis/sql/v1beta1/zz_generated.resolvers.go index aecad2414..04a392dc4 100644 --- a/apis/sql/v1beta1/zz_generated.resolvers.go +++ b/apis/sql/v1beta1/zz_generated.resolvers.go @@ -97,7 +97,7 @@ func (mg *MSSQLDatabaseExtendedAuditingPolicy) ResolveReferences(ctx context.Con var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLDatabase", "MSSQLDatabaseList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -116,7 +116,7 @@ func (mg *MSSQLDatabaseExtendedAuditingPolicy) ResolveReferences(ctx context.Con mg.Spec.ForProvider.DatabaseID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DatabaseIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -135,7 +135,7 @@ func (mg *MSSQLDatabaseExtendedAuditingPolicy) ResolveReferences(ctx context.Con mg.Spec.ForProvider.StorageEndpoint = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageEndpointRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -166,7 +166,7 @@ func (mg *MSSQLDatabaseVulnerabilityAssessmentRuleBaseline) ResolveReferences(ct var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLDatabase", "MSSQLDatabaseList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -185,7 +185,7 @@ func (mg *MSSQLDatabaseVulnerabilityAssessmentRuleBaseline) ResolveReferences(ct mg.Spec.ForProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServerVulnerabilityAssessment", "MSSQLServerVulnerabilityAssessmentList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServerVulnerabilityAssessment", "MSSQLServerVulnerabilityAssessmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -204,7 +204,7 @@ func (mg *MSSQLDatabaseVulnerabilityAssessmentRuleBaseline) ResolveReferences(ct mg.Spec.ForProvider.ServerVulnerabilityAssessmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerVulnerabilityAssessmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLDatabase", "MSSQLDatabaseList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -223,7 +223,7 @@ func (mg *MSSQLDatabaseVulnerabilityAssessmentRuleBaseline) ResolveReferences(ct mg.Spec.InitProvider.DatabaseName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DatabaseNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServerVulnerabilityAssessment", "MSSQLServerVulnerabilityAssessmentList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServerVulnerabilityAssessment", "MSSQLServerVulnerabilityAssessmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -417,7 +417,7 @@ func (mg *MSSQLFirewallRule) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -448,7 +448,7 @@ func (mg *MSSQLJobAgent) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLDatabase", "MSSQLDatabaseList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -467,7 +467,7 @@ func (mg *MSSQLJobAgent) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.DatabaseID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.DatabaseIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLDatabase", "MSSQLDatabaseList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -667,7 +667,7 @@ func (mg *MSSQLManagedInstanceActiveDirectoryAdministrator) ResolveReferences(ct var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -817,7 +817,7 @@ func (mg *MSSQLOutboundFirewallRule) ResolveReferences(ctx context.Context, c cl var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1039,7 +1039,7 @@ func (mg *MSSQLServerDNSAlias) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1070,7 +1070,7 @@ func (mg *MSSQLServerMicrosoftSupportAuditingPolicy) ResolveReferences(ctx conte var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1089,7 +1089,7 @@ func (mg *MSSQLServerMicrosoftSupportAuditingPolicy) ResolveReferences(ctx conte mg.Spec.ForProvider.BlobStorageEndpoint = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.BlobStorageEndpointRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1108,7 +1108,7 @@ func (mg *MSSQLServerMicrosoftSupportAuditingPolicy) ResolveReferences(ctx conte mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1158,7 +1158,7 @@ func (mg *MSSQLServerSecurityAlertPolicy) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1177,7 +1177,7 @@ func (mg *MSSQLServerSecurityAlertPolicy) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.ServerName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1196,7 +1196,7 @@ func (mg *MSSQLServerSecurityAlertPolicy) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.StorageEndpoint = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageEndpointRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1227,7 +1227,7 @@ func (mg *MSSQLServerTransparentDataEncryption) ResolveReferences(ctx context.Co var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Key", "KeyList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1246,7 +1246,7 @@ func (mg *MSSQLServerTransparentDataEncryption) ResolveReferences(ctx context.Co mg.Spec.ForProvider.KeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.KeyVaultKeyIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1265,7 +1265,7 @@ func (mg *MSSQLServerTransparentDataEncryption) ResolveReferences(ctx context.Co mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta1", "Key", "KeyList") + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1346,7 +1346,7 @@ func (mg *MSSQLVirtualNetworkRule) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1365,7 +1365,7 @@ func (mg *MSSQLVirtualNetworkRule) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1384,7 +1384,7 @@ func (mg *MSSQLVirtualNetworkRule) ResolveReferences(ctx context.Context, c clie mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/sql/v1beta1/zz_mssqldatabaseextendedauditingpolicy_types.go b/apis/sql/v1beta1/zz_mssqldatabaseextendedauditingpolicy_types.go index f2d7be807..09c276865 100755 --- a/apis/sql/v1beta1/zz_mssqldatabaseextendedauditingpolicy_types.go +++ b/apis/sql/v1beta1/zz_mssqldatabaseextendedauditingpolicy_types.go @@ -28,7 +28,7 @@ type MSSQLDatabaseExtendedAuditingPolicyInitParameters struct { StorageAccountAccessKeyIsSecondary *bool `json:"storageAccountAccessKeyIsSecondary,omitempty" tf:"storage_account_access_key_is_secondary,omitempty"` // The blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all extended auditing logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` @@ -68,7 +68,7 @@ type MSSQLDatabaseExtendedAuditingPolicyObservation struct { type MSSQLDatabaseExtendedAuditingPolicyParameters struct { // The ID of the SQL database to set the extended auditing policy. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DatabaseID *string `json:"databaseId,omitempty" tf:"database_id,omitempty"` @@ -102,7 +102,7 @@ type MSSQLDatabaseExtendedAuditingPolicyParameters struct { StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` // The blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all extended auditing logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) // +kubebuilder:validation:Optional StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqldatabasevulnerabilityassessmentrulebaseline_types.go b/apis/sql/v1beta1/zz_mssqldatabasevulnerabilityassessmentrulebaseline_types.go index 52526a574..952699c2e 100755 --- a/apis/sql/v1beta1/zz_mssqldatabasevulnerabilityassessmentrulebaseline_types.go +++ b/apis/sql/v1beta1/zz_mssqldatabasevulnerabilityassessmentrulebaseline_types.go @@ -41,7 +41,7 @@ type MSSQLDatabaseVulnerabilityAssessmentRuleBaselineInitParameters struct { BaselineResult []BaselineResultInitParameters `json:"baselineResult,omitempty" tf:"baseline_result,omitempty"` // Specifies the name of the MS SQL Database. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` // Reference to a MSSQLDatabase in sql to populate databaseName. @@ -56,7 +56,7 @@ type MSSQLDatabaseVulnerabilityAssessmentRuleBaselineInitParameters struct { RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` // The Vulnerability Assessment ID of the MS SQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServerVulnerabilityAssessment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServerVulnerabilityAssessment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() ServerVulnerabilityAssessmentID *string `json:"serverVulnerabilityAssessmentId,omitempty" tf:"server_vulnerability_assessment_id,omitempty"` @@ -101,7 +101,7 @@ type MSSQLDatabaseVulnerabilityAssessmentRuleBaselineParameters struct { BaselineResult []BaselineResultParameters `json:"baselineResult,omitempty" tf:"baseline_result,omitempty"` // Specifies the name of the MS SQL Database. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase // +kubebuilder:validation:Optional DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"` @@ -118,7 +118,7 @@ type MSSQLDatabaseVulnerabilityAssessmentRuleBaselineParameters struct { RuleID *string `json:"ruleId,omitempty" tf:"rule_id,omitempty"` // The Vulnerability Assessment ID of the MS SQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServerVulnerabilityAssessment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServerVulnerabilityAssessment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ServerVulnerabilityAssessmentID *string `json:"serverVulnerabilityAssessmentId,omitempty" tf:"server_vulnerability_assessment_id,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqlfirewallrule_types.go b/apis/sql/v1beta1/zz_mssqlfirewallrule_types.go index 070fae828..a45549f03 100755 --- a/apis/sql/v1beta1/zz_mssqlfirewallrule_types.go +++ b/apis/sql/v1beta1/zz_mssqlfirewallrule_types.go @@ -44,7 +44,7 @@ type MSSQLFirewallRuleParameters struct { EndIPAddress *string `json:"endIpAddress,omitempty" tf:"end_ip_address,omitempty"` // The resource ID of the SQL Server on which to create the Firewall Rule. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqljobagent_types.go b/apis/sql/v1beta1/zz_mssqljobagent_types.go index b28d69591..c9983bce5 100755 --- a/apis/sql/v1beta1/zz_mssqljobagent_types.go +++ b/apis/sql/v1beta1/zz_mssqljobagent_types.go @@ -16,7 +16,7 @@ import ( type MSSQLJobAgentInitParameters struct { // The ID of the database to store metadata for the Elastic Job Agent. Changing this forces a new Elastic Job Agent to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DatabaseID *string `json:"databaseId,omitempty" tf:"database_id,omitempty"` @@ -61,7 +61,7 @@ type MSSQLJobAgentObservation struct { type MSSQLJobAgentParameters struct { // The ID of the database to store metadata for the Elastic Job Agent. Changing this forces a new Elastic Job Agent to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DatabaseID *string `json:"databaseId,omitempty" tf:"database_id,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqlmanagedinstanceactivedirectoryadministrator_types.go b/apis/sql/v1beta1/zz_mssqlmanagedinstanceactivedirectoryadministrator_types.go index f3758546a..a1f334a5e 100755 --- a/apis/sql/v1beta1/zz_mssqlmanagedinstanceactivedirectoryadministrator_types.go +++ b/apis/sql/v1beta1/zz_mssqlmanagedinstanceactivedirectoryadministrator_types.go @@ -60,7 +60,7 @@ type MSSQLManagedInstanceActiveDirectoryAdministratorParameters struct { LoginUsername *string `json:"loginUsername,omitempty" tf:"login_username,omitempty"` // The ID of the Azure SQL Managed Instance for which to set the administrator. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLManagedInstance + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ManagedInstanceID *string `json:"managedInstanceId,omitempty" tf:"managed_instance_id,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqloutboundfirewallrule_types.go b/apis/sql/v1beta1/zz_mssqloutboundfirewallrule_types.go index 0e3ca15c0..66c8286c0 100755 --- a/apis/sql/v1beta1/zz_mssqloutboundfirewallrule_types.go +++ b/apis/sql/v1beta1/zz_mssqloutboundfirewallrule_types.go @@ -28,7 +28,7 @@ type MSSQLOutboundFirewallRuleObservation struct { type MSSQLOutboundFirewallRuleParameters struct { // The resource ID of the SQL Server on which to create the Outbound Firewall Rule. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqlserverdnsalias_types.go b/apis/sql/v1beta1/zz_mssqlserverdnsalias_types.go index 909fc76f7..ba7d51565 100755 --- a/apis/sql/v1beta1/zz_mssqlserverdnsalias_types.go +++ b/apis/sql/v1beta1/zz_mssqlserverdnsalias_types.go @@ -31,7 +31,7 @@ type MSSQLServerDNSAliasObservation struct { type MSSQLServerDNSAliasParameters struct { // The ID of the mssql server. Changing this forces a new MSSQL Server DNS Alias to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional MSSQLServerID *string `json:"mssqlServerId,omitempty" tf:"mssql_server_id,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqlservermicrosoftsupportauditingpolicy_types.go b/apis/sql/v1beta1/zz_mssqlservermicrosoftsupportauditingpolicy_types.go index aea9b68a6..ba058b296 100755 --- a/apis/sql/v1beta1/zz_mssqlservermicrosoftsupportauditingpolicy_types.go +++ b/apis/sql/v1beta1/zz_mssqlservermicrosoftsupportauditingpolicy_types.go @@ -16,7 +16,7 @@ import ( type MSSQLServerMicrosoftSupportAuditingPolicyInitParameters struct { // The blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Microsoft support auditing logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) BlobStorageEndpoint *string `json:"blobStorageEndpoint,omitempty" tf:"blob_storage_endpoint,omitempty"` @@ -56,7 +56,7 @@ type MSSQLServerMicrosoftSupportAuditingPolicyObservation struct { type MSSQLServerMicrosoftSupportAuditingPolicyParameters struct { // The blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Microsoft support auditing logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) // +kubebuilder:validation:Optional BlobStorageEndpoint *string `json:"blobStorageEndpoint,omitempty" tf:"blob_storage_endpoint,omitempty"` @@ -78,7 +78,7 @@ type MSSQLServerMicrosoftSupportAuditingPolicyParameters struct { LogMonitoringEnabled *bool `json:"logMonitoringEnabled,omitempty" tf:"log_monitoring_enabled,omitempty"` // The ID of the SQL Server to set the extended auditing policy. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqlserversecurityalertpolicy_types.go b/apis/sql/v1beta1/zz_mssqlserversecurityalertpolicy_types.go index 0d201c4fb..74d77f706 100755 --- a/apis/sql/v1beta1/zz_mssqlserversecurityalertpolicy_types.go +++ b/apis/sql/v1beta1/zz_mssqlserversecurityalertpolicy_types.go @@ -33,7 +33,7 @@ type MSSQLServerSecurityAlertPolicyInitParameters struct { State *string `json:"state,omitempty" tf:"state,omitempty"` // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` @@ -112,7 +112,7 @@ type MSSQLServerSecurityAlertPolicyParameters struct { RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` // Specifies the name of the MS SQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +kubebuilder:validation:Optional ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` @@ -133,7 +133,7 @@ type MSSQLServerSecurityAlertPolicyParameters struct { StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) // +kubebuilder:validation:Optional StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqlservertransparentdataencryption_types.go b/apis/sql/v1beta1/zz_mssqlservertransparentdataencryption_types.go index ad3f07b0e..2b85999e8 100755 --- a/apis/sql/v1beta1/zz_mssqlservertransparentdataencryption_types.go +++ b/apis/sql/v1beta1/zz_mssqlservertransparentdataencryption_types.go @@ -19,7 +19,7 @@ type MSSQLServerTransparentDataEncryptionInitParameters struct { AutoRotationEnabled *bool `json:"autoRotationEnabled,omitempty" tf:"auto_rotation_enabled,omitempty"` // To use customer managed keys from Azure Key Vault, provide the AKV Key ID. To use service managed keys, omit this field. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Key + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` @@ -54,7 +54,7 @@ type MSSQLServerTransparentDataEncryptionParameters struct { AutoRotationEnabled *bool `json:"autoRotationEnabled,omitempty" tf:"auto_rotation_enabled,omitempty"` // To use customer managed keys from Azure Key Vault, provide the AKV Key ID. To use service managed keys, omit this field. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta1.Key + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` @@ -68,7 +68,7 @@ type MSSQLServerTransparentDataEncryptionParameters struct { KeyVaultKeyIDSelector *v1.Selector `json:"keyVaultKeyIdSelector,omitempty" tf:"-"` // Specifies the name of the MS SQL Server. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` diff --git a/apis/sql/v1beta1/zz_mssqlvirtualnetworkrule_types.go b/apis/sql/v1beta1/zz_mssqlvirtualnetworkrule_types.go index c3e3914e7..ed23558d9 100755 --- a/apis/sql/v1beta1/zz_mssqlvirtualnetworkrule_types.go +++ b/apis/sql/v1beta1/zz_mssqlvirtualnetworkrule_types.go @@ -19,7 +19,7 @@ type MSSQLVirtualNetworkRuleInitParameters struct { IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` // The ID of the subnet from which the SQL server will accept communications. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -54,7 +54,7 @@ type MSSQLVirtualNetworkRuleParameters struct { IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty" tf:"ignore_missing_vnet_service_endpoint,omitempty"` // The resource ID of the SQL Server to which this SQL virtual network rule will be applied. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` @@ -68,7 +68,7 @@ type MSSQLVirtualNetworkRuleParameters struct { ServerIDSelector *v1.Selector `json:"serverIdSelector,omitempty" tf:"-"` // The ID of the subnet from which the SQL server will accept communications. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/sql/v1beta2/zz_generated.conversion_hubs.go b/apis/sql/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..d74b7ad74 --- /dev/null +++ b/apis/sql/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *MSSQLDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MSSQLElasticPool) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MSSQLFailoverGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MSSQLManagedDatabase) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MSSQLManagedInstance) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MSSQLManagedInstanceFailoverGroup) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MSSQLServer) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *MSSQLServerVulnerabilityAssessment) Hub() {} diff --git a/apis/sql/v1beta2/zz_generated.deepcopy.go b/apis/sql/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..661926489 --- /dev/null +++ b/apis/sql/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,5075 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureadAdministratorInitParameters) DeepCopyInto(out *AzureadAdministratorInitParameters) { + *out = *in + if in.AzureadAuthenticationOnly != nil { + in, out := &in.AzureadAuthenticationOnly, &out.AzureadAuthenticationOnly + *out = new(bool) + **out = **in + } + if in.LoginUsername != nil { + in, out := &in.LoginUsername, &out.LoginUsername + *out = new(string) + **out = **in + } + if in.LoginUsernameRef != nil { + in, out := &in.LoginUsernameRef, &out.LoginUsernameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LoginUsernameSelector != nil { + in, out := &in.LoginUsernameSelector, &out.LoginUsernameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.ObjectIDRef != nil { + in, out := &in.ObjectIDRef, &out.ObjectIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ObjectIDSelector != nil { + in, out := &in.ObjectIDSelector, &out.ObjectIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureadAdministratorInitParameters. +func (in *AzureadAdministratorInitParameters) DeepCopy() *AzureadAdministratorInitParameters { + if in == nil { + return nil + } + out := new(AzureadAdministratorInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureadAdministratorObservation) DeepCopyInto(out *AzureadAdministratorObservation) { + *out = *in + if in.AzureadAuthenticationOnly != nil { + in, out := &in.AzureadAuthenticationOnly, &out.AzureadAuthenticationOnly + *out = new(bool) + **out = **in + } + if in.LoginUsername != nil { + in, out := &in.LoginUsername, &out.LoginUsername + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureadAdministratorObservation. +func (in *AzureadAdministratorObservation) DeepCopy() *AzureadAdministratorObservation { + if in == nil { + return nil + } + out := new(AzureadAdministratorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureadAdministratorParameters) DeepCopyInto(out *AzureadAdministratorParameters) { + *out = *in + if in.AzureadAuthenticationOnly != nil { + in, out := &in.AzureadAuthenticationOnly, &out.AzureadAuthenticationOnly + *out = new(bool) + **out = **in + } + if in.LoginUsername != nil { + in, out := &in.LoginUsername, &out.LoginUsername + *out = new(string) + **out = **in + } + if in.LoginUsernameRef != nil { + in, out := &in.LoginUsernameRef, &out.LoginUsernameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LoginUsernameSelector != nil { + in, out := &in.LoginUsernameSelector, &out.LoginUsernameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.ObjectIDRef != nil { + in, out := &in.ObjectIDRef, &out.ObjectIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ObjectIDSelector != nil { + in, out := &in.ObjectIDSelector, &out.ObjectIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureadAdministratorParameters. +func (in *AzureadAdministratorParameters) DeepCopy() *AzureadAdministratorParameters { + if in == nil { + return nil + } + out := new(AzureadAdministratorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportInitParameters) DeepCopyInto(out *ImportInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageKeyType != nil { + in, out := &in.StorageKeyType, &out.StorageKeyType + *out = new(string) + **out = **in + } + if in.StorageURI != nil { + in, out := &in.StorageURI, &out.StorageURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportInitParameters. +func (in *ImportInitParameters) DeepCopy() *ImportInitParameters { + if in == nil { + return nil + } + out := new(ImportInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportObservation) DeepCopyInto(out *ImportObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageKeyType != nil { + in, out := &in.StorageKeyType, &out.StorageKeyType + *out = new(string) + **out = **in + } + if in.StorageURI != nil { + in, out := &in.StorageURI, &out.StorageURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportObservation. +func (in *ImportObservation) DeepCopy() *ImportObservation { + if in == nil { + return nil + } + out := new(ImportObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportParameters) DeepCopyInto(out *ImportParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + out.AdministratorLoginPasswordSecretRef = in.AdministratorLoginPasswordSecretRef + if in.AuthenticationType != nil { + in, out := &in.AuthenticationType, &out.AuthenticationType + *out = new(string) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + out.StorageKeySecretRef = in.StorageKeySecretRef + if in.StorageKeyType != nil { + in, out := &in.StorageKeyType, &out.StorageKeyType + *out = new(string) + **out = **in + } + if in.StorageURI != nil { + in, out := &in.StorageURI, &out.StorageURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportParameters. +func (in *ImportParameters) DeepCopy() *ImportParameters { + if in == nil { + return nil + } + out := new(ImportParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LongTermRetentionPolicyInitParameters) DeepCopyInto(out *LongTermRetentionPolicyInitParameters) { + *out = *in + if in.ImmutableBackupsEnabled != nil { + in, out := &in.ImmutableBackupsEnabled, &out.ImmutableBackupsEnabled + *out = new(bool) + **out = **in + } + if in.MonthlyRetention != nil { + in, out := &in.MonthlyRetention, &out.MonthlyRetention + *out = new(string) + **out = **in + } + if in.WeekOfYear != nil { + in, out := &in.WeekOfYear, &out.WeekOfYear + *out = new(float64) + **out = **in + } + if in.WeeklyRetention != nil { + in, out := &in.WeeklyRetention, &out.WeeklyRetention + *out = new(string) + **out = **in + } + if in.YearlyRetention != nil { + in, out := &in.YearlyRetention, &out.YearlyRetention + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LongTermRetentionPolicyInitParameters. +func (in *LongTermRetentionPolicyInitParameters) DeepCopy() *LongTermRetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(LongTermRetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LongTermRetentionPolicyObservation) DeepCopyInto(out *LongTermRetentionPolicyObservation) { + *out = *in + if in.ImmutableBackupsEnabled != nil { + in, out := &in.ImmutableBackupsEnabled, &out.ImmutableBackupsEnabled + *out = new(bool) + **out = **in + } + if in.MonthlyRetention != nil { + in, out := &in.MonthlyRetention, &out.MonthlyRetention + *out = new(string) + **out = **in + } + if in.WeekOfYear != nil { + in, out := &in.WeekOfYear, &out.WeekOfYear + *out = new(float64) + **out = **in + } + if in.WeeklyRetention != nil { + in, out := &in.WeeklyRetention, &out.WeeklyRetention + *out = new(string) + **out = **in + } + if in.YearlyRetention != nil { + in, out := &in.YearlyRetention, &out.YearlyRetention + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LongTermRetentionPolicyObservation. +func (in *LongTermRetentionPolicyObservation) DeepCopy() *LongTermRetentionPolicyObservation { + if in == nil { + return nil + } + out := new(LongTermRetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LongTermRetentionPolicyParameters) DeepCopyInto(out *LongTermRetentionPolicyParameters) { + *out = *in + if in.ImmutableBackupsEnabled != nil { + in, out := &in.ImmutableBackupsEnabled, &out.ImmutableBackupsEnabled + *out = new(bool) + **out = **in + } + if in.MonthlyRetention != nil { + in, out := &in.MonthlyRetention, &out.MonthlyRetention + *out = new(string) + **out = **in + } + if in.WeekOfYear != nil { + in, out := &in.WeekOfYear, &out.WeekOfYear + *out = new(float64) + **out = **in + } + if in.WeeklyRetention != nil { + in, out := &in.WeeklyRetention, &out.WeeklyRetention + *out = new(string) + **out = **in + } + if in.YearlyRetention != nil { + in, out := &in.YearlyRetention, &out.YearlyRetention + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LongTermRetentionPolicyParameters. +func (in *LongTermRetentionPolicyParameters) DeepCopy() *LongTermRetentionPolicyParameters { + if in == nil { + return nil + } + out := new(LongTermRetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLDatabase) DeepCopyInto(out *MSSQLDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLDatabase. +func (in *MSSQLDatabase) DeepCopy() *MSSQLDatabase { + if in == nil { + return nil + } + out := new(MSSQLDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLDatabaseInitParameters) DeepCopyInto(out *MSSQLDatabaseInitParameters) { + *out = *in + if in.AutoPauseDelayInMinutes != nil { + in, out := &in.AutoPauseDelayInMinutes, &out.AutoPauseDelayInMinutes + *out = new(float64) + **out = **in + } + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceDatabaseID != nil { + in, out := &in.CreationSourceDatabaseID, &out.CreationSourceDatabaseID + *out = new(string) + **out = **in + } + if in.ElasticPoolID != nil { + in, out := &in.ElasticPoolID, &out.ElasticPoolID + *out = new(string) + **out = **in + } + if in.EnclaveType != nil { + in, out := &in.EnclaveType, &out.EnclaveType + *out = new(string) + **out = **in + } + if in.GeoBackupEnabled != nil { + in, out := &in.GeoBackupEnabled, &out.GeoBackupEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImportInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LedgerEnabled != nil { + in, out := &in.LedgerEnabled, &out.LedgerEnabled + *out = new(bool) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.LongTermRetentionPolicy != nil { + in, out := &in.LongTermRetentionPolicy, &out.LongTermRetentionPolicy + *out = new(LongTermRetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MaxSizeGb != nil { + in, out := &in.MaxSizeGb, &out.MaxSizeGb + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.ReadReplicaCount != nil { + in, out := &in.ReadReplicaCount, &out.ReadReplicaCount + *out = new(float64) + **out = **in + } + if in.ReadScale != nil { + in, out := &in.ReadScale, &out.ReadScale + *out = new(bool) + **out = **in + } + if in.RecoverDatabaseID != nil { + in, out := &in.RecoverDatabaseID, &out.RecoverDatabaseID + *out = new(string) + **out = **in + } + if in.RecoveryPointID != nil { + in, out := &in.RecoveryPointID, &out.RecoveryPointID + *out = new(string) + **out = **in + } + if in.RestoreDroppedDatabaseID != nil { + in, out := &in.RestoreDroppedDatabaseID, &out.RestoreDroppedDatabaseID + *out = new(string) + **out = **in + } + if in.RestoreLongTermRetentionBackupID != nil { + in, out := &in.RestoreLongTermRetentionBackupID, &out.RestoreLongTermRetentionBackupID + *out = new(string) + **out = **in + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SampleName != nil { + in, out := &in.SampleName, &out.SampleName + *out = new(string) + **out = **in + } + if in.ShortTermRetentionPolicy != nil { + in, out := &in.ShortTermRetentionPolicy, &out.ShortTermRetentionPolicy + *out = new(ShortTermRetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TransparentDataEncryptionEnabled != nil { + in, out := &in.TransparentDataEncryptionEnabled, &out.TransparentDataEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.TransparentDataEncryptionKeyAutomaticRotationEnabled != nil { + in, out := &in.TransparentDataEncryptionKeyAutomaticRotationEnabled, &out.TransparentDataEncryptionKeyAutomaticRotationEnabled + *out = new(bool) + **out = **in + } + if in.TransparentDataEncryptionKeyVaultKeyID != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyID, &out.TransparentDataEncryptionKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.TransparentDataEncryptionKeyVaultKeyIDRef != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyIDRef, &out.TransparentDataEncryptionKeyVaultKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransparentDataEncryptionKeyVaultKeyIDSelector != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyIDSelector, &out.TransparentDataEncryptionKeyVaultKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLDatabaseInitParameters. +func (in *MSSQLDatabaseInitParameters) DeepCopy() *MSSQLDatabaseInitParameters { + if in == nil { + return nil + } + out := new(MSSQLDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLDatabaseList) DeepCopyInto(out *MSSQLDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLDatabaseList. +func (in *MSSQLDatabaseList) DeepCopy() *MSSQLDatabaseList { + if in == nil { + return nil + } + out := new(MSSQLDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLDatabaseObservation) DeepCopyInto(out *MSSQLDatabaseObservation) { + *out = *in + if in.AutoPauseDelayInMinutes != nil { + in, out := &in.AutoPauseDelayInMinutes, &out.AutoPauseDelayInMinutes + *out = new(float64) + **out = **in + } + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceDatabaseID != nil { + in, out := &in.CreationSourceDatabaseID, &out.CreationSourceDatabaseID + *out = new(string) + **out = **in + } + if in.ElasticPoolID != nil { + in, out := &in.ElasticPoolID, &out.ElasticPoolID + *out = new(string) + **out = **in + } + if in.EnclaveType != nil { + in, out := &in.EnclaveType, &out.EnclaveType + *out = new(string) + **out = **in + } + if in.GeoBackupEnabled != nil { + in, out := &in.GeoBackupEnabled, &out.GeoBackupEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImportObservation) + (*in).DeepCopyInto(*out) + } + if in.LedgerEnabled != nil { + in, out := &in.LedgerEnabled, &out.LedgerEnabled + *out = new(bool) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.LongTermRetentionPolicy != nil { + in, out := &in.LongTermRetentionPolicy, &out.LongTermRetentionPolicy + *out = new(LongTermRetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MaxSizeGb != nil { + in, out := &in.MaxSizeGb, &out.MaxSizeGb + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.ReadReplicaCount != nil { + in, out := &in.ReadReplicaCount, &out.ReadReplicaCount + *out = new(float64) + **out = **in + } + if in.ReadScale != nil { + in, out := &in.ReadScale, &out.ReadScale + *out = new(bool) + **out = **in + } + if in.RecoverDatabaseID != nil { + in, out := &in.RecoverDatabaseID, &out.RecoverDatabaseID + *out = new(string) + **out = **in + } + if in.RecoveryPointID != nil { + in, out := &in.RecoveryPointID, &out.RecoveryPointID + *out = new(string) + **out = **in + } + if in.RestoreDroppedDatabaseID != nil { + in, out := &in.RestoreDroppedDatabaseID, &out.RestoreDroppedDatabaseID + *out = new(string) + **out = **in + } + if in.RestoreLongTermRetentionBackupID != nil { + in, out := &in.RestoreLongTermRetentionBackupID, &out.RestoreLongTermRetentionBackupID + *out = new(string) + **out = **in + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SampleName != nil { + in, out := &in.SampleName, &out.SampleName + *out = new(string) + **out = **in + } + if in.ServerID != nil { + in, out := &in.ServerID, &out.ServerID + *out = new(string) + **out = **in + } + if in.ShortTermRetentionPolicy != nil { + in, out := &in.ShortTermRetentionPolicy, &out.ShortTermRetentionPolicy + *out = new(ShortTermRetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.TransparentDataEncryptionEnabled != nil { + in, out := &in.TransparentDataEncryptionEnabled, &out.TransparentDataEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.TransparentDataEncryptionKeyAutomaticRotationEnabled != nil { + in, out := &in.TransparentDataEncryptionKeyAutomaticRotationEnabled, &out.TransparentDataEncryptionKeyAutomaticRotationEnabled + *out = new(bool) + **out = **in + } + if in.TransparentDataEncryptionKeyVaultKeyID != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyID, &out.TransparentDataEncryptionKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLDatabaseObservation. +func (in *MSSQLDatabaseObservation) DeepCopy() *MSSQLDatabaseObservation { + if in == nil { + return nil + } + out := new(MSSQLDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLDatabaseParameters) DeepCopyInto(out *MSSQLDatabaseParameters) { + *out = *in + if in.AutoPauseDelayInMinutes != nil { + in, out := &in.AutoPauseDelayInMinutes, &out.AutoPauseDelayInMinutes + *out = new(float64) + **out = **in + } + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.CreationSourceDatabaseID != nil { + in, out := &in.CreationSourceDatabaseID, &out.CreationSourceDatabaseID + *out = new(string) + **out = **in + } + if in.ElasticPoolID != nil { + in, out := &in.ElasticPoolID, &out.ElasticPoolID + *out = new(string) + **out = **in + } + if in.EnclaveType != nil { + in, out := &in.EnclaveType, &out.EnclaveType + *out = new(string) + **out = **in + } + if in.GeoBackupEnabled != nil { + in, out := &in.GeoBackupEnabled, &out.GeoBackupEnabled + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImportParameters) + (*in).DeepCopyInto(*out) + } + if in.LedgerEnabled != nil { + in, out := &in.LedgerEnabled, &out.LedgerEnabled + *out = new(bool) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.LongTermRetentionPolicy != nil { + in, out := &in.LongTermRetentionPolicy, &out.LongTermRetentionPolicy + *out = new(LongTermRetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MaxSizeGb != nil { + in, out := &in.MaxSizeGb, &out.MaxSizeGb + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } + if in.ReadReplicaCount != nil { + in, out := &in.ReadReplicaCount, &out.ReadReplicaCount + *out = new(float64) + **out = **in + } + if in.ReadScale != nil { + in, out := &in.ReadScale, &out.ReadScale + *out = new(bool) + **out = **in + } + if in.RecoverDatabaseID != nil { + in, out := &in.RecoverDatabaseID, &out.RecoverDatabaseID + *out = new(string) + **out = **in + } + if in.RecoveryPointID != nil { + in, out := &in.RecoveryPointID, &out.RecoveryPointID + *out = new(string) + **out = **in + } + if in.RestoreDroppedDatabaseID != nil { + in, out := &in.RestoreDroppedDatabaseID, &out.RestoreDroppedDatabaseID + *out = new(string) + **out = **in + } + if in.RestoreLongTermRetentionBackupID != nil { + in, out := &in.RestoreLongTermRetentionBackupID, &out.RestoreLongTermRetentionBackupID + *out = new(string) + **out = **in + } + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SampleName != nil { + in, out := &in.SampleName, &out.SampleName + *out = new(string) + **out = **in + } + if in.ServerID != nil { + in, out := &in.ServerID, &out.ServerID + *out = new(string) + **out = **in + } + if in.ServerIDRef != nil { + in, out := &in.ServerIDRef, &out.ServerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServerIDSelector != nil { + in, out := &in.ServerIDSelector, &out.ServerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ShortTermRetentionPolicy != nil { + in, out := &in.ShortTermRetentionPolicy, &out.ShortTermRetentionPolicy + *out = new(ShortTermRetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ThreatDetectionPolicy != nil { + in, out := &in.ThreatDetectionPolicy, &out.ThreatDetectionPolicy + *out = new(ThreatDetectionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.TransparentDataEncryptionEnabled != nil { + in, out := &in.TransparentDataEncryptionEnabled, &out.TransparentDataEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.TransparentDataEncryptionKeyAutomaticRotationEnabled != nil { + in, out := &in.TransparentDataEncryptionKeyAutomaticRotationEnabled, &out.TransparentDataEncryptionKeyAutomaticRotationEnabled + *out = new(bool) + **out = **in + } + if in.TransparentDataEncryptionKeyVaultKeyID != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyID, &out.TransparentDataEncryptionKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.TransparentDataEncryptionKeyVaultKeyIDRef != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyIDRef, &out.TransparentDataEncryptionKeyVaultKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransparentDataEncryptionKeyVaultKeyIDSelector != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyIDSelector, &out.TransparentDataEncryptionKeyVaultKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLDatabaseParameters. +func (in *MSSQLDatabaseParameters) DeepCopy() *MSSQLDatabaseParameters { + if in == nil { + return nil + } + out := new(MSSQLDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLDatabaseSpec) DeepCopyInto(out *MSSQLDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLDatabaseSpec. +func (in *MSSQLDatabaseSpec) DeepCopy() *MSSQLDatabaseSpec { + if in == nil { + return nil + } + out := new(MSSQLDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLDatabaseStatus) DeepCopyInto(out *MSSQLDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLDatabaseStatus. +func (in *MSSQLDatabaseStatus) DeepCopy() *MSSQLDatabaseStatus { + if in == nil { + return nil + } + out := new(MSSQLDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLElasticPool) DeepCopyInto(out *MSSQLElasticPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLElasticPool. +func (in *MSSQLElasticPool) DeepCopy() *MSSQLElasticPool { + if in == nil { + return nil + } + out := new(MSSQLElasticPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLElasticPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLElasticPoolInitParameters) DeepCopyInto(out *MSSQLElasticPoolInitParameters) { + *out = *in + if in.EnclaveType != nil { + in, out := &in.EnclaveType, &out.EnclaveType + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MaxSizeBytes != nil { + in, out := &in.MaxSizeBytes, &out.MaxSizeBytes + *out = new(float64) + **out = **in + } + if in.MaxSizeGb != nil { + in, out := &in.MaxSizeGb, &out.MaxSizeGb + *out = new(float64) + **out = **in + } + if in.PerDatabaseSettings != nil { + in, out := &in.PerDatabaseSettings, &out.PerDatabaseSettings + *out = new(PerDatabaseSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLElasticPoolInitParameters. +func (in *MSSQLElasticPoolInitParameters) DeepCopy() *MSSQLElasticPoolInitParameters { + if in == nil { + return nil + } + out := new(MSSQLElasticPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLElasticPoolList) DeepCopyInto(out *MSSQLElasticPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLElasticPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLElasticPoolList. +func (in *MSSQLElasticPoolList) DeepCopy() *MSSQLElasticPoolList { + if in == nil { + return nil + } + out := new(MSSQLElasticPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLElasticPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLElasticPoolObservation) DeepCopyInto(out *MSSQLElasticPoolObservation) { + *out = *in + if in.EnclaveType != nil { + in, out := &in.EnclaveType, &out.EnclaveType + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MaxSizeBytes != nil { + in, out := &in.MaxSizeBytes, &out.MaxSizeBytes + *out = new(float64) + **out = **in + } + if in.MaxSizeGb != nil { + in, out := &in.MaxSizeGb, &out.MaxSizeGb + *out = new(float64) + **out = **in + } + if in.PerDatabaseSettings != nil { + in, out := &in.PerDatabaseSettings, &out.PerDatabaseSettings + *out = new(PerDatabaseSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLElasticPoolObservation. +func (in *MSSQLElasticPoolObservation) DeepCopy() *MSSQLElasticPoolObservation { + if in == nil { + return nil + } + out := new(MSSQLElasticPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLElasticPoolParameters) DeepCopyInto(out *MSSQLElasticPoolParameters) { + *out = *in + if in.EnclaveType != nil { + in, out := &in.EnclaveType, &out.EnclaveType + *out = new(string) + **out = **in + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MaxSizeBytes != nil { + in, out := &in.MaxSizeBytes, &out.MaxSizeBytes + *out = new(float64) + **out = **in + } + if in.MaxSizeGb != nil { + in, out := &in.MaxSizeGb, &out.MaxSizeGb + *out = new(float64) + **out = **in + } + if in.PerDatabaseSettings != nil { + in, out := &in.PerDatabaseSettings, &out.PerDatabaseSettings + *out = new(PerDatabaseSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServerName != nil { + in, out := &in.ServerName, &out.ServerName + *out = new(string) + **out = **in + } + if in.ServerNameRef != nil { + in, out := &in.ServerNameRef, &out.ServerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServerNameSelector != nil { + in, out := &in.ServerNameSelector, &out.ServerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLElasticPoolParameters. +func (in *MSSQLElasticPoolParameters) DeepCopy() *MSSQLElasticPoolParameters { + if in == nil { + return nil + } + out := new(MSSQLElasticPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLElasticPoolSpec) DeepCopyInto(out *MSSQLElasticPoolSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLElasticPoolSpec. +func (in *MSSQLElasticPoolSpec) DeepCopy() *MSSQLElasticPoolSpec { + if in == nil { + return nil + } + out := new(MSSQLElasticPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLElasticPoolStatus) DeepCopyInto(out *MSSQLElasticPoolStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLElasticPoolStatus. +func (in *MSSQLElasticPoolStatus) DeepCopy() *MSSQLElasticPoolStatus { + if in == nil { + return nil + } + out := new(MSSQLElasticPoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLFailoverGroup) DeepCopyInto(out *MSSQLFailoverGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLFailoverGroup. +func (in *MSSQLFailoverGroup) DeepCopy() *MSSQLFailoverGroup { + if in == nil { + return nil + } + out := new(MSSQLFailoverGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLFailoverGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLFailoverGroupInitParameters) DeepCopyInto(out *MSSQLFailoverGroupInitParameters) { + *out = *in + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DatabasesRefs != nil { + in, out := &in.DatabasesRefs, &out.DatabasesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DatabasesSelector != nil { + in, out := &in.DatabasesSelector, &out.DatabasesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PartnerServer != nil { + in, out := &in.PartnerServer, &out.PartnerServer + *out = make([]PartnerServerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReadWriteEndpointFailoverPolicy != nil { + in, out := &in.ReadWriteEndpointFailoverPolicy, &out.ReadWriteEndpointFailoverPolicy + *out = new(ReadWriteEndpointFailoverPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReadonlyEndpointFailoverPolicyEnabled != nil { + in, out := &in.ReadonlyEndpointFailoverPolicyEnabled, &out.ReadonlyEndpointFailoverPolicyEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLFailoverGroupInitParameters. +func (in *MSSQLFailoverGroupInitParameters) DeepCopy() *MSSQLFailoverGroupInitParameters { + if in == nil { + return nil + } + out := new(MSSQLFailoverGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLFailoverGroupList) DeepCopyInto(out *MSSQLFailoverGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLFailoverGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLFailoverGroupList. +func (in *MSSQLFailoverGroupList) DeepCopy() *MSSQLFailoverGroupList { + if in == nil { + return nil + } + out := new(MSSQLFailoverGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLFailoverGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLFailoverGroupObservation) DeepCopyInto(out *MSSQLFailoverGroupObservation) { + *out = *in + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PartnerServer != nil { + in, out := &in.PartnerServer, &out.PartnerServer + *out = make([]PartnerServerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReadWriteEndpointFailoverPolicy != nil { + in, out := &in.ReadWriteEndpointFailoverPolicy, &out.ReadWriteEndpointFailoverPolicy + *out = new(ReadWriteEndpointFailoverPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ReadonlyEndpointFailoverPolicyEnabled != nil { + in, out := &in.ReadonlyEndpointFailoverPolicyEnabled, &out.ReadonlyEndpointFailoverPolicyEnabled + *out = new(bool) + **out = **in + } + if in.ServerID != nil { + in, out := &in.ServerID, &out.ServerID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLFailoverGroupObservation. +func (in *MSSQLFailoverGroupObservation) DeepCopy() *MSSQLFailoverGroupObservation { + if in == nil { + return nil + } + out := new(MSSQLFailoverGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLFailoverGroupParameters) DeepCopyInto(out *MSSQLFailoverGroupParameters) { + *out = *in + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DatabasesRefs != nil { + in, out := &in.DatabasesRefs, &out.DatabasesRefs + *out = make([]v1.Reference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DatabasesSelector != nil { + in, out := &in.DatabasesSelector, &out.DatabasesSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PartnerServer != nil { + in, out := &in.PartnerServer, &out.PartnerServer + *out = make([]PartnerServerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReadWriteEndpointFailoverPolicy != nil { + in, out := &in.ReadWriteEndpointFailoverPolicy, &out.ReadWriteEndpointFailoverPolicy + *out = new(ReadWriteEndpointFailoverPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ReadonlyEndpointFailoverPolicyEnabled != nil { + in, out := &in.ReadonlyEndpointFailoverPolicyEnabled, &out.ReadonlyEndpointFailoverPolicyEnabled + *out = new(bool) + **out = **in + } + if in.ServerID != nil { + in, out := &in.ServerID, &out.ServerID + *out = new(string) + **out = **in + } + if in.ServerIDRef != nil { + in, out := &in.ServerIDRef, &out.ServerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServerIDSelector != nil { + in, out := &in.ServerIDSelector, &out.ServerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLFailoverGroupParameters. +func (in *MSSQLFailoverGroupParameters) DeepCopy() *MSSQLFailoverGroupParameters { + if in == nil { + return nil + } + out := new(MSSQLFailoverGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLFailoverGroupSpec) DeepCopyInto(out *MSSQLFailoverGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLFailoverGroupSpec. +func (in *MSSQLFailoverGroupSpec) DeepCopy() *MSSQLFailoverGroupSpec { + if in == nil { + return nil + } + out := new(MSSQLFailoverGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLFailoverGroupStatus) DeepCopyInto(out *MSSQLFailoverGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLFailoverGroupStatus. +func (in *MSSQLFailoverGroupStatus) DeepCopy() *MSSQLFailoverGroupStatus { + if in == nil { + return nil + } + out := new(MSSQLFailoverGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabase) DeepCopyInto(out *MSSQLManagedDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabase. +func (in *MSSQLManagedDatabase) DeepCopy() *MSSQLManagedDatabase { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLManagedDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseInitParameters) DeepCopyInto(out *MSSQLManagedDatabaseInitParameters) { + *out = *in + if in.LongTermRetentionPolicy != nil { + in, out := &in.LongTermRetentionPolicy, &out.LongTermRetentionPolicy + *out = new(MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRestore != nil { + in, out := &in.PointInTimeRestore, &out.PointInTimeRestore + *out = new(PointInTimeRestoreInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ShortTermRetentionDays != nil { + in, out := &in.ShortTermRetentionDays, &out.ShortTermRetentionDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseInitParameters. +func (in *MSSQLManagedDatabaseInitParameters) DeepCopy() *MSSQLManagedDatabaseInitParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseList) DeepCopyInto(out *MSSQLManagedDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLManagedDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseList. +func (in *MSSQLManagedDatabaseList) DeepCopy() *MSSQLManagedDatabaseList { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLManagedDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters) DeepCopyInto(out *MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters) { + *out = *in + if in.ImmutableBackupsEnabled != nil { + in, out := &in.ImmutableBackupsEnabled, &out.ImmutableBackupsEnabled + *out = new(bool) + **out = **in + } + if in.MonthlyRetention != nil { + in, out := &in.MonthlyRetention, &out.MonthlyRetention + *out = new(string) + **out = **in + } + if in.WeekOfYear != nil { + in, out := &in.WeekOfYear, &out.WeekOfYear + *out = new(float64) + **out = **in + } + if in.WeeklyRetention != nil { + in, out := &in.WeeklyRetention, &out.WeeklyRetention + *out = new(string) + **out = **in + } + if in.YearlyRetention != nil { + in, out := &in.YearlyRetention, &out.YearlyRetention + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters. +func (in *MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters) DeepCopy() *MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseLongTermRetentionPolicyObservation) DeepCopyInto(out *MSSQLManagedDatabaseLongTermRetentionPolicyObservation) { + *out = *in + if in.ImmutableBackupsEnabled != nil { + in, out := &in.ImmutableBackupsEnabled, &out.ImmutableBackupsEnabled + *out = new(bool) + **out = **in + } + if in.MonthlyRetention != nil { + in, out := &in.MonthlyRetention, &out.MonthlyRetention + *out = new(string) + **out = **in + } + if in.WeekOfYear != nil { + in, out := &in.WeekOfYear, &out.WeekOfYear + *out = new(float64) + **out = **in + } + if in.WeeklyRetention != nil { + in, out := &in.WeeklyRetention, &out.WeeklyRetention + *out = new(string) + **out = **in + } + if in.YearlyRetention != nil { + in, out := &in.YearlyRetention, &out.YearlyRetention + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseLongTermRetentionPolicyObservation. +func (in *MSSQLManagedDatabaseLongTermRetentionPolicyObservation) DeepCopy() *MSSQLManagedDatabaseLongTermRetentionPolicyObservation { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseLongTermRetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseLongTermRetentionPolicyParameters) DeepCopyInto(out *MSSQLManagedDatabaseLongTermRetentionPolicyParameters) { + *out = *in + if in.ImmutableBackupsEnabled != nil { + in, out := &in.ImmutableBackupsEnabled, &out.ImmutableBackupsEnabled + *out = new(bool) + **out = **in + } + if in.MonthlyRetention != nil { + in, out := &in.MonthlyRetention, &out.MonthlyRetention + *out = new(string) + **out = **in + } + if in.WeekOfYear != nil { + in, out := &in.WeekOfYear, &out.WeekOfYear + *out = new(float64) + **out = **in + } + if in.WeeklyRetention != nil { + in, out := &in.WeeklyRetention, &out.WeeklyRetention + *out = new(string) + **out = **in + } + if in.YearlyRetention != nil { + in, out := &in.YearlyRetention, &out.YearlyRetention + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseLongTermRetentionPolicyParameters. +func (in *MSSQLManagedDatabaseLongTermRetentionPolicyParameters) DeepCopy() *MSSQLManagedDatabaseLongTermRetentionPolicyParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseLongTermRetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseObservation) DeepCopyInto(out *MSSQLManagedDatabaseObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LongTermRetentionPolicy != nil { + in, out := &in.LongTermRetentionPolicy, &out.LongTermRetentionPolicy + *out = new(MSSQLManagedDatabaseLongTermRetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ManagedInstanceID != nil { + in, out := &in.ManagedInstanceID, &out.ManagedInstanceID + *out = new(string) + **out = **in + } + if in.PointInTimeRestore != nil { + in, out := &in.PointInTimeRestore, &out.PointInTimeRestore + *out = new(PointInTimeRestoreObservation) + (*in).DeepCopyInto(*out) + } + if in.ShortTermRetentionDays != nil { + in, out := &in.ShortTermRetentionDays, &out.ShortTermRetentionDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseObservation. +func (in *MSSQLManagedDatabaseObservation) DeepCopy() *MSSQLManagedDatabaseObservation { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseParameters) DeepCopyInto(out *MSSQLManagedDatabaseParameters) { + *out = *in + if in.LongTermRetentionPolicy != nil { + in, out := &in.LongTermRetentionPolicy, &out.LongTermRetentionPolicy + *out = new(MSSQLManagedDatabaseLongTermRetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ManagedInstanceID != nil { + in, out := &in.ManagedInstanceID, &out.ManagedInstanceID + *out = new(string) + **out = **in + } + if in.ManagedInstanceIDRef != nil { + in, out := &in.ManagedInstanceIDRef, &out.ManagedInstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedInstanceIDSelector != nil { + in, out := &in.ManagedInstanceIDSelector, &out.ManagedInstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PointInTimeRestore != nil { + in, out := &in.PointInTimeRestore, &out.PointInTimeRestore + *out = new(PointInTimeRestoreParameters) + (*in).DeepCopyInto(*out) + } + if in.ShortTermRetentionDays != nil { + in, out := &in.ShortTermRetentionDays, &out.ShortTermRetentionDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseParameters. +func (in *MSSQLManagedDatabaseParameters) DeepCopy() *MSSQLManagedDatabaseParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseSpec) DeepCopyInto(out *MSSQLManagedDatabaseSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseSpec. +func (in *MSSQLManagedDatabaseSpec) DeepCopy() *MSSQLManagedDatabaseSpec { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedDatabaseStatus) DeepCopyInto(out *MSSQLManagedDatabaseStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedDatabaseStatus. +func (in *MSSQLManagedDatabaseStatus) DeepCopy() *MSSQLManagedDatabaseStatus { + if in == nil { + return nil + } + out := new(MSSQLManagedDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstance) DeepCopyInto(out *MSSQLManagedInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstance. +func (in *MSSQLManagedInstance) DeepCopy() *MSSQLManagedInstance { + if in == nil { + return nil + } + out := new(MSSQLManagedInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLManagedInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroup) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroup. +func (in *MSSQLManagedInstanceFailoverGroup) DeepCopy() *MSSQLManagedInstanceFailoverGroup { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLManagedInstanceFailoverGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupInitParameters) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupInitParameters) { + *out = *in + if in.ManagedInstanceID != nil { + in, out := &in.ManagedInstanceID, &out.ManagedInstanceID + *out = new(string) + **out = **in + } + if in.ManagedInstanceIDRef != nil { + in, out := &in.ManagedInstanceIDRef, &out.ManagedInstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedInstanceIDSelector != nil { + in, out := &in.ManagedInstanceIDSelector, &out.ManagedInstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PartnerManagedInstanceID != nil { + in, out := &in.PartnerManagedInstanceID, &out.PartnerManagedInstanceID + *out = new(string) + **out = **in + } + if in.PartnerManagedInstanceIDRef != nil { + in, out := &in.PartnerManagedInstanceIDRef, &out.PartnerManagedInstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PartnerManagedInstanceIDSelector != nil { + in, out := &in.PartnerManagedInstanceIDSelector, &out.PartnerManagedInstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReadWriteEndpointFailoverPolicy != nil { + in, out := &in.ReadWriteEndpointFailoverPolicy, &out.ReadWriteEndpointFailoverPolicy + *out = new(MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ReadonlyEndpointFailoverPolicyEnabled != nil { + in, out := &in.ReadonlyEndpointFailoverPolicyEnabled, &out.ReadonlyEndpointFailoverPolicyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupInitParameters. +func (in *MSSQLManagedInstanceFailoverGroupInitParameters) DeepCopy() *MSSQLManagedInstanceFailoverGroupInitParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupList) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLManagedInstanceFailoverGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupList. +func (in *MSSQLManagedInstanceFailoverGroupList) DeepCopy() *MSSQLManagedInstanceFailoverGroupList { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLManagedInstanceFailoverGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupObservation) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedInstanceID != nil { + in, out := &in.ManagedInstanceID, &out.ManagedInstanceID + *out = new(string) + **out = **in + } + if in.PartnerManagedInstanceID != nil { + in, out := &in.PartnerManagedInstanceID, &out.PartnerManagedInstanceID + *out = new(string) + **out = **in + } + if in.PartnerRegion != nil { + in, out := &in.PartnerRegion, &out.PartnerRegion + *out = make([]PartnerRegionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ReadWriteEndpointFailoverPolicy != nil { + in, out := &in.ReadWriteEndpointFailoverPolicy, &out.ReadWriteEndpointFailoverPolicy + *out = new(MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.ReadonlyEndpointFailoverPolicyEnabled != nil { + in, out := &in.ReadonlyEndpointFailoverPolicyEnabled, &out.ReadonlyEndpointFailoverPolicyEnabled + *out = new(bool) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupObservation. +func (in *MSSQLManagedInstanceFailoverGroupObservation) DeepCopy() *MSSQLManagedInstanceFailoverGroupObservation { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupParameters) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupParameters) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedInstanceID != nil { + in, out := &in.ManagedInstanceID, &out.ManagedInstanceID + *out = new(string) + **out = **in + } + if in.ManagedInstanceIDRef != nil { + in, out := &in.ManagedInstanceIDRef, &out.ManagedInstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedInstanceIDSelector != nil { + in, out := &in.ManagedInstanceIDSelector, &out.ManagedInstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PartnerManagedInstanceID != nil { + in, out := &in.PartnerManagedInstanceID, &out.PartnerManagedInstanceID + *out = new(string) + **out = **in + } + if in.PartnerManagedInstanceIDRef != nil { + in, out := &in.PartnerManagedInstanceIDRef, &out.PartnerManagedInstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PartnerManagedInstanceIDSelector != nil { + in, out := &in.PartnerManagedInstanceIDSelector, &out.PartnerManagedInstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ReadWriteEndpointFailoverPolicy != nil { + in, out := &in.ReadWriteEndpointFailoverPolicy, &out.ReadWriteEndpointFailoverPolicy + *out = new(MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.ReadonlyEndpointFailoverPolicyEnabled != nil { + in, out := &in.ReadonlyEndpointFailoverPolicyEnabled, &out.ReadonlyEndpointFailoverPolicyEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupParameters. +func (in *MSSQLManagedInstanceFailoverGroupParameters) DeepCopy() *MSSQLManagedInstanceFailoverGroupParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters) { + *out = *in + if in.GraceMinutes != nil { + in, out := &in.GraceMinutes, &out.GraceMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters. +func (in *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters) DeepCopy() *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation) { + *out = *in + if in.GraceMinutes != nil { + in, out := &in.GraceMinutes, &out.GraceMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation. +func (in *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation) DeepCopy() *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters) { + *out = *in + if in.GraceMinutes != nil { + in, out := &in.GraceMinutes, &out.GraceMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters. +func (in *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters) DeepCopy() *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupSpec) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupSpec. +func (in *MSSQLManagedInstanceFailoverGroupSpec) DeepCopy() *MSSQLManagedInstanceFailoverGroupSpec { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceFailoverGroupStatus) DeepCopyInto(out *MSSQLManagedInstanceFailoverGroupStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceFailoverGroupStatus. +func (in *MSSQLManagedInstanceFailoverGroupStatus) DeepCopy() *MSSQLManagedInstanceFailoverGroupStatus { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceFailoverGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceIdentityInitParameters) DeepCopyInto(out *MSSQLManagedInstanceIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceIdentityInitParameters. +func (in *MSSQLManagedInstanceIdentityInitParameters) DeepCopy() *MSSQLManagedInstanceIdentityInitParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceIdentityObservation) DeepCopyInto(out *MSSQLManagedInstanceIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceIdentityObservation. +func (in *MSSQLManagedInstanceIdentityObservation) DeepCopy() *MSSQLManagedInstanceIdentityObservation { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceIdentityParameters) DeepCopyInto(out *MSSQLManagedInstanceIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceIdentityParameters. +func (in *MSSQLManagedInstanceIdentityParameters) DeepCopy() *MSSQLManagedInstanceIdentityParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceInitParameters) DeepCopyInto(out *MSSQLManagedInstanceInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.DNSZonePartnerID != nil { + in, out := &in.DNSZonePartnerID, &out.DNSZonePartnerID + *out = new(string) + **out = **in + } + if in.DNSZonePartnerIDRef != nil { + in, out := &in.DNSZonePartnerIDRef, &out.DNSZonePartnerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DNSZonePartnerIDSelector != nil { + in, out := &in.DNSZonePartnerIDSelector, &out.DNSZonePartnerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MSSQLManagedInstanceIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ProxyOverride != nil { + in, out := &in.ProxyOverride, &out.ProxyOverride + *out = new(string) + **out = **in + } + if in.PublicDataEndpointEnabled != nil { + in, out := &in.PublicDataEndpointEnabled, &out.PublicDataEndpointEnabled + *out = new(bool) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.StorageSizeInGb != nil { + in, out := &in.StorageSizeInGb, &out.StorageSizeInGb + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimezoneID != nil { + in, out := &in.TimezoneID, &out.TimezoneID + *out = new(string) + **out = **in + } + if in.Vcores != nil { + in, out := &in.Vcores, &out.Vcores + *out = new(float64) + **out = **in + } + if in.ZoneRedundantEnabled != nil { + in, out := &in.ZoneRedundantEnabled, &out.ZoneRedundantEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceInitParameters. +func (in *MSSQLManagedInstanceInitParameters) DeepCopy() *MSSQLManagedInstanceInitParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceList) DeepCopyInto(out *MSSQLManagedInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLManagedInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceList. +func (in *MSSQLManagedInstanceList) DeepCopy() *MSSQLManagedInstanceList { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLManagedInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceObservation) DeepCopyInto(out *MSSQLManagedInstanceObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.DNSZone != nil { + in, out := &in.DNSZone, &out.DNSZone + *out = new(string) + **out = **in + } + if in.DNSZonePartnerID != nil { + in, out := &in.DNSZonePartnerID, &out.DNSZonePartnerID + *out = new(string) + **out = **in + } + if in.Fqdn != nil { + in, out := &in.Fqdn, &out.Fqdn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MSSQLManagedInstanceIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ProxyOverride != nil { + in, out := &in.ProxyOverride, &out.ProxyOverride + *out = new(string) + **out = **in + } + if in.PublicDataEndpointEnabled != nil { + in, out := &in.PublicDataEndpointEnabled, &out.PublicDataEndpointEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.StorageSizeInGb != nil { + in, out := &in.StorageSizeInGb, &out.StorageSizeInGb + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimezoneID != nil { + in, out := &in.TimezoneID, &out.TimezoneID + *out = new(string) + **out = **in + } + if in.Vcores != nil { + in, out := &in.Vcores, &out.Vcores + *out = new(float64) + **out = **in + } + if in.ZoneRedundantEnabled != nil { + in, out := &in.ZoneRedundantEnabled, &out.ZoneRedundantEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceObservation. +func (in *MSSQLManagedInstanceObservation) DeepCopy() *MSSQLManagedInstanceObservation { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceParameters) DeepCopyInto(out *MSSQLManagedInstanceParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + out.AdministratorLoginPasswordSecretRef = in.AdministratorLoginPasswordSecretRef + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.DNSZonePartnerID != nil { + in, out := &in.DNSZonePartnerID, &out.DNSZonePartnerID + *out = new(string) + **out = **in + } + if in.DNSZonePartnerIDRef != nil { + in, out := &in.DNSZonePartnerIDRef, &out.DNSZonePartnerIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.DNSZonePartnerIDSelector != nil { + in, out := &in.DNSZonePartnerIDSelector, &out.DNSZonePartnerIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MSSQLManagedInstanceIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LicenseType != nil { + in, out := &in.LicenseType, &out.LicenseType + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaintenanceConfigurationName != nil { + in, out := &in.MaintenanceConfigurationName, &out.MaintenanceConfigurationName + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ProxyOverride != nil { + in, out := &in.ProxyOverride, &out.ProxyOverride + *out = new(string) + **out = **in + } + if in.PublicDataEndpointEnabled != nil { + in, out := &in.PublicDataEndpointEnabled, &out.PublicDataEndpointEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.StorageSizeInGb != nil { + in, out := &in.StorageSizeInGb, &out.StorageSizeInGb + *out = new(float64) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TimezoneID != nil { + in, out := &in.TimezoneID, &out.TimezoneID + *out = new(string) + **out = **in + } + if in.Vcores != nil { + in, out := &in.Vcores, &out.Vcores + *out = new(float64) + **out = **in + } + if in.ZoneRedundantEnabled != nil { + in, out := &in.ZoneRedundantEnabled, &out.ZoneRedundantEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceParameters. +func (in *MSSQLManagedInstanceParameters) DeepCopy() *MSSQLManagedInstanceParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceSpec) DeepCopyInto(out *MSSQLManagedInstanceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceSpec. +func (in *MSSQLManagedInstanceSpec) DeepCopy() *MSSQLManagedInstanceSpec { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceStatus) DeepCopyInto(out *MSSQLManagedInstanceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceStatus. +func (in *MSSQLManagedInstanceStatus) DeepCopy() *MSSQLManagedInstanceStatus { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceVulnerabilityAssessment) DeepCopyInto(out *MSSQLManagedInstanceVulnerabilityAssessment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceVulnerabilityAssessment. +func (in *MSSQLManagedInstanceVulnerabilityAssessment) DeepCopy() *MSSQLManagedInstanceVulnerabilityAssessment { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceVulnerabilityAssessment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLManagedInstanceVulnerabilityAssessment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentInitParameters) DeepCopyInto(out *MSSQLManagedInstanceVulnerabilityAssessmentInitParameters) { + *out = *in + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(RecurringScansInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceVulnerabilityAssessmentInitParameters. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentInitParameters) DeepCopy() *MSSQLManagedInstanceVulnerabilityAssessmentInitParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceVulnerabilityAssessmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentList) DeepCopyInto(out *MSSQLManagedInstanceVulnerabilityAssessmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLManagedInstanceVulnerabilityAssessment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceVulnerabilityAssessmentList. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentList) DeepCopy() *MSSQLManagedInstanceVulnerabilityAssessmentList { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceVulnerabilityAssessmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentObservation) DeepCopyInto(out *MSSQLManagedInstanceVulnerabilityAssessmentObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ManagedInstanceID != nil { + in, out := &in.ManagedInstanceID, &out.ManagedInstanceID + *out = new(string) + **out = **in + } + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(RecurringScansObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceVulnerabilityAssessmentObservation. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentObservation) DeepCopy() *MSSQLManagedInstanceVulnerabilityAssessmentObservation { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceVulnerabilityAssessmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentParameters) DeepCopyInto(out *MSSQLManagedInstanceVulnerabilityAssessmentParameters) { + *out = *in + if in.ManagedInstanceID != nil { + in, out := &in.ManagedInstanceID, &out.ManagedInstanceID + *out = new(string) + **out = **in + } + if in.ManagedInstanceIDRef != nil { + in, out := &in.ManagedInstanceIDRef, &out.ManagedInstanceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedInstanceIDSelector != nil { + in, out := &in.ManagedInstanceIDSelector, &out.ManagedInstanceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(RecurringScansParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } + if in.StorageContainerSASKeySecretRef != nil { + in, out := &in.StorageContainerSASKeySecretRef, &out.StorageContainerSASKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceVulnerabilityAssessmentParameters. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentParameters) DeepCopy() *MSSQLManagedInstanceVulnerabilityAssessmentParameters { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceVulnerabilityAssessmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentSpec) DeepCopyInto(out *MSSQLManagedInstanceVulnerabilityAssessmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceVulnerabilityAssessmentSpec. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentSpec) DeepCopy() *MSSQLManagedInstanceVulnerabilityAssessmentSpec { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceVulnerabilityAssessmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentStatus) DeepCopyInto(out *MSSQLManagedInstanceVulnerabilityAssessmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLManagedInstanceVulnerabilityAssessmentStatus. +func (in *MSSQLManagedInstanceVulnerabilityAssessmentStatus) DeepCopy() *MSSQLManagedInstanceVulnerabilityAssessmentStatus { + if in == nil { + return nil + } + out := new(MSSQLManagedInstanceVulnerabilityAssessmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServer) DeepCopyInto(out *MSSQLServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServer. +func (in *MSSQLServer) DeepCopy() *MSSQLServer { + if in == nil { + return nil + } + out := new(MSSQLServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerIdentityInitParameters) DeepCopyInto(out *MSSQLServerIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerIdentityInitParameters. +func (in *MSSQLServerIdentityInitParameters) DeepCopy() *MSSQLServerIdentityInitParameters { + if in == nil { + return nil + } + out := new(MSSQLServerIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerIdentityObservation) DeepCopyInto(out *MSSQLServerIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerIdentityObservation. +func (in *MSSQLServerIdentityObservation) DeepCopy() *MSSQLServerIdentityObservation { + if in == nil { + return nil + } + out := new(MSSQLServerIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerIdentityParameters) DeepCopyInto(out *MSSQLServerIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerIdentityParameters. +func (in *MSSQLServerIdentityParameters) DeepCopy() *MSSQLServerIdentityParameters { + if in == nil { + return nil + } + out := new(MSSQLServerIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerInitParameters) DeepCopyInto(out *MSSQLServerInitParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AzureadAdministrator != nil { + in, out := &in.AzureadAdministrator, &out.AzureadAdministrator + *out = new(AzureadAdministratorInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectionPolicy != nil { + in, out := &in.ConnectionPolicy, &out.ConnectionPolicy + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MSSQLServerIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.OutboundNetworkRestrictionEnabled != nil { + in, out := &in.OutboundNetworkRestrictionEnabled, &out.OutboundNetworkRestrictionEnabled + *out = new(bool) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityIDRef != nil { + in, out := &in.PrimaryUserAssignedIdentityIDRef, &out.PrimaryUserAssignedIdentityIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrimaryUserAssignedIdentityIDSelector != nil { + in, out := &in.PrimaryUserAssignedIdentityIDSelector, &out.PrimaryUserAssignedIdentityIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransparentDataEncryptionKeyVaultKeyID != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyID, &out.TransparentDataEncryptionKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.TransparentDataEncryptionKeyVaultKeyIDRef != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyIDRef, &out.TransparentDataEncryptionKeyVaultKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransparentDataEncryptionKeyVaultKeyIDSelector != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyIDSelector, &out.TransparentDataEncryptionKeyVaultKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerInitParameters. +func (in *MSSQLServerInitParameters) DeepCopy() *MSSQLServerInitParameters { + if in == nil { + return nil + } + out := new(MSSQLServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerList) DeepCopyInto(out *MSSQLServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerList. +func (in *MSSQLServerList) DeepCopy() *MSSQLServerList { + if in == nil { + return nil + } + out := new(MSSQLServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerObservation) DeepCopyInto(out *MSSQLServerObservation) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AzureadAdministrator != nil { + in, out := &in.AzureadAdministrator, &out.AzureadAdministrator + *out = new(AzureadAdministratorObservation) + (*in).DeepCopyInto(*out) + } + if in.ConnectionPolicy != nil { + in, out := &in.ConnectionPolicy, &out.ConnectionPolicy + *out = new(string) + **out = **in + } + if in.FullyQualifiedDomainName != nil { + in, out := &in.FullyQualifiedDomainName, &out.FullyQualifiedDomainName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MSSQLServerIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.OutboundNetworkRestrictionEnabled != nil { + in, out := &in.OutboundNetworkRestrictionEnabled, &out.OutboundNetworkRestrictionEnabled + *out = new(bool) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.RestorableDroppedDatabaseIds != nil { + in, out := &in.RestorableDroppedDatabaseIds, &out.RestorableDroppedDatabaseIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransparentDataEncryptionKeyVaultKeyID != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyID, &out.TransparentDataEncryptionKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerObservation. +func (in *MSSQLServerObservation) DeepCopy() *MSSQLServerObservation { + if in == nil { + return nil + } + out := new(MSSQLServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerParameters) DeepCopyInto(out *MSSQLServerParameters) { + *out = *in + if in.AdministratorLogin != nil { + in, out := &in.AdministratorLogin, &out.AdministratorLogin + *out = new(string) + **out = **in + } + if in.AdministratorLoginPasswordSecretRef != nil { + in, out := &in.AdministratorLoginPasswordSecretRef, &out.AdministratorLoginPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AzureadAdministrator != nil { + in, out := &in.AzureadAdministrator, &out.AzureadAdministrator + *out = new(AzureadAdministratorParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectionPolicy != nil { + in, out := &in.ConnectionPolicy, &out.ConnectionPolicy + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(MSSQLServerIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.OutboundNetworkRestrictionEnabled != nil { + in, out := &in.OutboundNetworkRestrictionEnabled, &out.OutboundNetworkRestrictionEnabled + *out = new(bool) + **out = **in + } + if in.PrimaryUserAssignedIdentityID != nil { + in, out := &in.PrimaryUserAssignedIdentityID, &out.PrimaryUserAssignedIdentityID + *out = new(string) + **out = **in + } + if in.PrimaryUserAssignedIdentityIDRef != nil { + in, out := &in.PrimaryUserAssignedIdentityIDRef, &out.PrimaryUserAssignedIdentityIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.PrimaryUserAssignedIdentityIDSelector != nil { + in, out := &in.PrimaryUserAssignedIdentityIDSelector, &out.PrimaryUserAssignedIdentityIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransparentDataEncryptionKeyVaultKeyID != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyID, &out.TransparentDataEncryptionKeyVaultKeyID + *out = new(string) + **out = **in + } + if in.TransparentDataEncryptionKeyVaultKeyIDRef != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyIDRef, &out.TransparentDataEncryptionKeyVaultKeyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TransparentDataEncryptionKeyVaultKeyIDSelector != nil { + in, out := &in.TransparentDataEncryptionKeyVaultKeyIDSelector, &out.TransparentDataEncryptionKeyVaultKeyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerParameters. +func (in *MSSQLServerParameters) DeepCopy() *MSSQLServerParameters { + if in == nil { + return nil + } + out := new(MSSQLServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerSpec) DeepCopyInto(out *MSSQLServerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerSpec. +func (in *MSSQLServerSpec) DeepCopy() *MSSQLServerSpec { + if in == nil { + return nil + } + out := new(MSSQLServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerStatus) DeepCopyInto(out *MSSQLServerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerStatus. +func (in *MSSQLServerStatus) DeepCopy() *MSSQLServerStatus { + if in == nil { + return nil + } + out := new(MSSQLServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessment) DeepCopyInto(out *MSSQLServerVulnerabilityAssessment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessment. +func (in *MSSQLServerVulnerabilityAssessment) DeepCopy() *MSSQLServerVulnerabilityAssessment { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLServerVulnerabilityAssessment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentInitParameters) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentInitParameters) { + *out = *in + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServerSecurityAlertPolicyID != nil { + in, out := &in.ServerSecurityAlertPolicyID, &out.ServerSecurityAlertPolicyID + *out = new(string) + **out = **in + } + if in.ServerSecurityAlertPolicyIDRef != nil { + in, out := &in.ServerSecurityAlertPolicyIDRef, &out.ServerSecurityAlertPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServerSecurityAlertPolicyIDSelector != nil { + in, out := &in.ServerSecurityAlertPolicyIDSelector, &out.ServerSecurityAlertPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentInitParameters. +func (in *MSSQLServerVulnerabilityAssessmentInitParameters) DeepCopy() *MSSQLServerVulnerabilityAssessmentInitParameters { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentList) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MSSQLServerVulnerabilityAssessment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentList. +func (in *MSSQLServerVulnerabilityAssessmentList) DeepCopy() *MSSQLServerVulnerabilityAssessmentList { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MSSQLServerVulnerabilityAssessmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentObservation) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(MSSQLServerVulnerabilityAssessmentRecurringScansObservation) + (*in).DeepCopyInto(*out) + } + if in.ServerSecurityAlertPolicyID != nil { + in, out := &in.ServerSecurityAlertPolicyID, &out.ServerSecurityAlertPolicyID + *out = new(string) + **out = **in + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentObservation. +func (in *MSSQLServerVulnerabilityAssessmentObservation) DeepCopy() *MSSQLServerVulnerabilityAssessmentObservation { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentParameters) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentParameters) { + *out = *in + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(MSSQLServerVulnerabilityAssessmentRecurringScansParameters) + (*in).DeepCopyInto(*out) + } + if in.ServerSecurityAlertPolicyID != nil { + in, out := &in.ServerSecurityAlertPolicyID, &out.ServerSecurityAlertPolicyID + *out = new(string) + **out = **in + } + if in.ServerSecurityAlertPolicyIDRef != nil { + in, out := &in.ServerSecurityAlertPolicyIDRef, &out.ServerSecurityAlertPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServerSecurityAlertPolicyIDSelector != nil { + in, out := &in.ServerSecurityAlertPolicyIDSelector, &out.ServerSecurityAlertPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } + if in.StorageContainerSASKeySecretRef != nil { + in, out := &in.StorageContainerSASKeySecretRef, &out.StorageContainerSASKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentParameters. +func (in *MSSQLServerVulnerabilityAssessmentParameters) DeepCopy() *MSSQLServerVulnerabilityAssessmentParameters { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters) { + *out = *in + if in.EmailSubscriptionAdmins != nil { + in, out := &in.EmailSubscriptionAdmins, &out.EmailSubscriptionAdmins + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters. +func (in *MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters) DeepCopy() *MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentRecurringScansObservation) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentRecurringScansObservation) { + *out = *in + if in.EmailSubscriptionAdmins != nil { + in, out := &in.EmailSubscriptionAdmins, &out.EmailSubscriptionAdmins + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentRecurringScansObservation. +func (in *MSSQLServerVulnerabilityAssessmentRecurringScansObservation) DeepCopy() *MSSQLServerVulnerabilityAssessmentRecurringScansObservation { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentRecurringScansObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentRecurringScansParameters) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentRecurringScansParameters) { + *out = *in + if in.EmailSubscriptionAdmins != nil { + in, out := &in.EmailSubscriptionAdmins, &out.EmailSubscriptionAdmins + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentRecurringScansParameters. +func (in *MSSQLServerVulnerabilityAssessmentRecurringScansParameters) DeepCopy() *MSSQLServerVulnerabilityAssessmentRecurringScansParameters { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentRecurringScansParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentSpec) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentSpec. +func (in *MSSQLServerVulnerabilityAssessmentSpec) DeepCopy() *MSSQLServerVulnerabilityAssessmentSpec { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MSSQLServerVulnerabilityAssessmentStatus) DeepCopyInto(out *MSSQLServerVulnerabilityAssessmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MSSQLServerVulnerabilityAssessmentStatus. +func (in *MSSQLServerVulnerabilityAssessmentStatus) DeepCopy() *MSSQLServerVulnerabilityAssessmentStatus { + if in == nil { + return nil + } + out := new(MSSQLServerVulnerabilityAssessmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartnerRegionInitParameters) DeepCopyInto(out *PartnerRegionInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartnerRegionInitParameters. +func (in *PartnerRegionInitParameters) DeepCopy() *PartnerRegionInitParameters { + if in == nil { + return nil + } + out := new(PartnerRegionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartnerRegionObservation) DeepCopyInto(out *PartnerRegionObservation) { + *out = *in + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartnerRegionObservation. +func (in *PartnerRegionObservation) DeepCopy() *PartnerRegionObservation { + if in == nil { + return nil + } + out := new(PartnerRegionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartnerRegionParameters) DeepCopyInto(out *PartnerRegionParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartnerRegionParameters. +func (in *PartnerRegionParameters) DeepCopy() *PartnerRegionParameters { + if in == nil { + return nil + } + out := new(PartnerRegionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartnerServerInitParameters) DeepCopyInto(out *PartnerServerInitParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartnerServerInitParameters. +func (in *PartnerServerInitParameters) DeepCopy() *PartnerServerInitParameters { + if in == nil { + return nil + } + out := new(PartnerServerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartnerServerObservation) DeepCopyInto(out *PartnerServerObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartnerServerObservation. +func (in *PartnerServerObservation) DeepCopy() *PartnerServerObservation { + if in == nil { + return nil + } + out := new(PartnerServerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartnerServerParameters) DeepCopyInto(out *PartnerServerParameters) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDRef != nil { + in, out := &in.IDRef, &out.IDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IDSelector != nil { + in, out := &in.IDSelector, &out.IDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartnerServerParameters. +func (in *PartnerServerParameters) DeepCopy() *PartnerServerParameters { + if in == nil { + return nil + } + out := new(PartnerServerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerDatabaseSettingsInitParameters) DeepCopyInto(out *PerDatabaseSettingsInitParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerDatabaseSettingsInitParameters. +func (in *PerDatabaseSettingsInitParameters) DeepCopy() *PerDatabaseSettingsInitParameters { + if in == nil { + return nil + } + out := new(PerDatabaseSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerDatabaseSettingsObservation) DeepCopyInto(out *PerDatabaseSettingsObservation) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerDatabaseSettingsObservation. +func (in *PerDatabaseSettingsObservation) DeepCopy() *PerDatabaseSettingsObservation { + if in == nil { + return nil + } + out := new(PerDatabaseSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerDatabaseSettingsParameters) DeepCopyInto(out *PerDatabaseSettingsParameters) { + *out = *in + if in.MaxCapacity != nil { + in, out := &in.MaxCapacity, &out.MaxCapacity + *out = new(float64) + **out = **in + } + if in.MinCapacity != nil { + in, out := &in.MinCapacity, &out.MinCapacity + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerDatabaseSettingsParameters. +func (in *PerDatabaseSettingsParameters) DeepCopy() *PerDatabaseSettingsParameters { + if in == nil { + return nil + } + out := new(PerDatabaseSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRestoreInitParameters) DeepCopyInto(out *PointInTimeRestoreInitParameters) { + *out = *in + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SourceDatabaseID != nil { + in, out := &in.SourceDatabaseID, &out.SourceDatabaseID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRestoreInitParameters. +func (in *PointInTimeRestoreInitParameters) DeepCopy() *PointInTimeRestoreInitParameters { + if in == nil { + return nil + } + out := new(PointInTimeRestoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRestoreObservation) DeepCopyInto(out *PointInTimeRestoreObservation) { + *out = *in + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SourceDatabaseID != nil { + in, out := &in.SourceDatabaseID, &out.SourceDatabaseID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRestoreObservation. +func (in *PointInTimeRestoreObservation) DeepCopy() *PointInTimeRestoreObservation { + if in == nil { + return nil + } + out := new(PointInTimeRestoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PointInTimeRestoreParameters) DeepCopyInto(out *PointInTimeRestoreParameters) { + *out = *in + if in.RestorePointInTime != nil { + in, out := &in.RestorePointInTime, &out.RestorePointInTime + *out = new(string) + **out = **in + } + if in.SourceDatabaseID != nil { + in, out := &in.SourceDatabaseID, &out.SourceDatabaseID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PointInTimeRestoreParameters. +func (in *PointInTimeRestoreParameters) DeepCopy() *PointInTimeRestoreParameters { + if in == nil { + return nil + } + out := new(PointInTimeRestoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadWriteEndpointFailoverPolicyInitParameters) DeepCopyInto(out *ReadWriteEndpointFailoverPolicyInitParameters) { + *out = *in + if in.GraceMinutes != nil { + in, out := &in.GraceMinutes, &out.GraceMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadWriteEndpointFailoverPolicyInitParameters. +func (in *ReadWriteEndpointFailoverPolicyInitParameters) DeepCopy() *ReadWriteEndpointFailoverPolicyInitParameters { + if in == nil { + return nil + } + out := new(ReadWriteEndpointFailoverPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadWriteEndpointFailoverPolicyObservation) DeepCopyInto(out *ReadWriteEndpointFailoverPolicyObservation) { + *out = *in + if in.GraceMinutes != nil { + in, out := &in.GraceMinutes, &out.GraceMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadWriteEndpointFailoverPolicyObservation. +func (in *ReadWriteEndpointFailoverPolicyObservation) DeepCopy() *ReadWriteEndpointFailoverPolicyObservation { + if in == nil { + return nil + } + out := new(ReadWriteEndpointFailoverPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadWriteEndpointFailoverPolicyParameters) DeepCopyInto(out *ReadWriteEndpointFailoverPolicyParameters) { + *out = *in + if in.GraceMinutes != nil { + in, out := &in.GraceMinutes, &out.GraceMinutes + *out = new(float64) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadWriteEndpointFailoverPolicyParameters. +func (in *ReadWriteEndpointFailoverPolicyParameters) DeepCopy() *ReadWriteEndpointFailoverPolicyParameters { + if in == nil { + return nil + } + out := new(ReadWriteEndpointFailoverPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurringScansInitParameters) DeepCopyInto(out *RecurringScansInitParameters) { + *out = *in + if in.EmailSubscriptionAdmins != nil { + in, out := &in.EmailSubscriptionAdmins, &out.EmailSubscriptionAdmins + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurringScansInitParameters. +func (in *RecurringScansInitParameters) DeepCopy() *RecurringScansInitParameters { + if in == nil { + return nil + } + out := new(RecurringScansInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurringScansObservation) DeepCopyInto(out *RecurringScansObservation) { + *out = *in + if in.EmailSubscriptionAdmins != nil { + in, out := &in.EmailSubscriptionAdmins, &out.EmailSubscriptionAdmins + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurringScansObservation. +func (in *RecurringScansObservation) DeepCopy() *RecurringScansObservation { + if in == nil { + return nil + } + out := new(RecurringScansObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurringScansParameters) DeepCopyInto(out *RecurringScansParameters) { + *out = *in + if in.EmailSubscriptionAdmins != nil { + in, out := &in.EmailSubscriptionAdmins, &out.EmailSubscriptionAdmins + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurringScansParameters. +func (in *RecurringScansParameters) DeepCopy() *RecurringScansParameters { + if in == nil { + return nil + } + out := new(RecurringScansParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShortTermRetentionPolicyInitParameters) DeepCopyInto(out *ShortTermRetentionPolicyInitParameters) { + *out = *in + if in.BackupIntervalInHours != nil { + in, out := &in.BackupIntervalInHours, &out.BackupIntervalInHours + *out = new(float64) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShortTermRetentionPolicyInitParameters. +func (in *ShortTermRetentionPolicyInitParameters) DeepCopy() *ShortTermRetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(ShortTermRetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShortTermRetentionPolicyObservation) DeepCopyInto(out *ShortTermRetentionPolicyObservation) { + *out = *in + if in.BackupIntervalInHours != nil { + in, out := &in.BackupIntervalInHours, &out.BackupIntervalInHours + *out = new(float64) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShortTermRetentionPolicyObservation. +func (in *ShortTermRetentionPolicyObservation) DeepCopy() *ShortTermRetentionPolicyObservation { + if in == nil { + return nil + } + out := new(ShortTermRetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShortTermRetentionPolicyParameters) DeepCopyInto(out *ShortTermRetentionPolicyParameters) { + *out = *in + if in.BackupIntervalInHours != nil { + in, out := &in.BackupIntervalInHours, &out.BackupIntervalInHours + *out = new(float64) + **out = **in + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShortTermRetentionPolicyParameters. +func (in *ShortTermRetentionPolicyParameters) DeepCopy() *ShortTermRetentionPolicyParameters { + if in == nil { + return nil + } + out := new(ShortTermRetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Family != nil { + in, out := &in.Family, &out.Family + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyInitParameters) DeepCopyInto(out *ThreatDetectionPolicyInitParameters) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(string) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyInitParameters. +func (in *ThreatDetectionPolicyInitParameters) DeepCopy() *ThreatDetectionPolicyInitParameters { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyObservation) DeepCopyInto(out *ThreatDetectionPolicyObservation) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(string) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyObservation. +func (in *ThreatDetectionPolicyObservation) DeepCopy() *ThreatDetectionPolicyObservation { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThreatDetectionPolicyParameters) DeepCopyInto(out *ThreatDetectionPolicyParameters) { + *out = *in + if in.DisabledAlerts != nil { + in, out := &in.DisabledAlerts, &out.DisabledAlerts + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.EmailAccountAdmins != nil { + in, out := &in.EmailAccountAdmins, &out.EmailAccountAdmins + *out = new(string) + **out = **in + } + if in.EmailAddresses != nil { + in, out := &in.EmailAddresses, &out.EmailAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.RetentionDays != nil { + in, out := &in.RetentionDays, &out.RetentionDays + *out = new(float64) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageEndpoint != nil { + in, out := &in.StorageEndpoint, &out.StorageEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThreatDetectionPolicyParameters. +func (in *ThreatDetectionPolicyParameters) DeepCopy() *ThreatDetectionPolicyParameters { + if in == nil { + return nil + } + out := new(ThreatDetectionPolicyParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/sql/v1beta2/zz_generated.managed.go b/apis/sql/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..cef8dc967 --- /dev/null +++ b/apis/sql/v1beta2/zz_generated.managed.go @@ -0,0 +1,548 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this MSSQLDatabase. +func (mg *MSSQLDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLDatabase. +func (mg *MSSQLDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLDatabase. +func (mg *MSSQLDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLDatabase. +func (mg *MSSQLDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLDatabase. +func (mg *MSSQLDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLDatabase. +func (mg *MSSQLDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLDatabase. +func (mg *MSSQLDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLDatabase. +func (mg *MSSQLDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLDatabase. +func (mg *MSSQLDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLDatabase. +func (mg *MSSQLDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLDatabase. +func (mg *MSSQLDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLDatabase. +func (mg *MSSQLDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MSSQLServer. +func (mg *MSSQLServer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLServer. +func (mg *MSSQLServer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLServer. +func (mg *MSSQLServer) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLServer. +func (mg *MSSQLServer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLServer. +func (mg *MSSQLServer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLServer. +func (mg *MSSQLServer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLServer. +func (mg *MSSQLServer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLServer. +func (mg *MSSQLServer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLServer. +func (mg *MSSQLServer) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLServer. +func (mg *MSSQLServer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLServer. +func (mg *MSSQLServer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLServer. +func (mg *MSSQLServer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/sql/v1beta2/zz_generated.managedlist.go b/apis/sql/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..2e18a7340 --- /dev/null +++ b/apis/sql/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,89 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this MSSQLDatabaseList. +func (l *MSSQLDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MSSQLElasticPoolList. +func (l *MSSQLElasticPoolList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MSSQLFailoverGroupList. +func (l *MSSQLFailoverGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MSSQLManagedDatabaseList. +func (l *MSSQLManagedDatabaseList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MSSQLManagedInstanceFailoverGroupList. +func (l *MSSQLManagedInstanceFailoverGroupList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MSSQLManagedInstanceList. +func (l *MSSQLManagedInstanceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MSSQLManagedInstanceVulnerabilityAssessmentList. +func (l *MSSQLManagedInstanceVulnerabilityAssessmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MSSQLServerList. +func (l *MSSQLServerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this MSSQLServerVulnerabilityAssessmentList. +func (l *MSSQLServerVulnerabilityAssessmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/sql/v1beta2/zz_generated.resolvers.go b/apis/sql/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..cec9a7925 --- /dev/null +++ b/apis/sql/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,750 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *MSSQLDatabase) ResolveReferences( // ResolveReferences of this MSSQLDatabase. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServerID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServerIDRef, + Selector: mg.Spec.ForProvider.ServerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServerID") + } + mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyIDRef, + Selector: mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyID") + } + mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyIDRef, + Selector: mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyID") + } + mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MSSQLElasticPool. +func (mg *MSSQLElasticPool) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServerNameRef, + Selector: mg.Spec.ForProvider.ServerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServerName") + } + mg.Spec.ForProvider.ServerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServerNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MSSQLFailoverGroup. +func (mg *MSSQLFailoverGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var mrsp reference.MultiResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.ForProvider.Databases), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.ForProvider.DatabasesRefs, + Selector: mg.Spec.ForProvider.DatabasesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Databases") + } + mg.Spec.ForProvider.Databases = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.ForProvider.DatabasesRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.ForProvider.PartnerServer); i3++ { + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PartnerServer[i3].ID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PartnerServer[i3].IDRef, + Selector: mg.Spec.ForProvider.PartnerServer[i3].IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PartnerServer[i3].ID") + } + mg.Spec.ForProvider.PartnerServer[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PartnerServer[i3].IDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServerID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServerIDRef, + Selector: mg.Spec.ForProvider.ServerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServerID") + } + mg.Spec.ForProvider.ServerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServerIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + mrsp, err = r.ResolveMultiple(ctx, reference.MultiResolutionRequest{ + CurrentValues: reference.FromPtrValues(mg.Spec.InitProvider.Databases), + Extract: rconfig.ExtractResourceID(), + References: mg.Spec.InitProvider.DatabasesRefs, + Selector: mg.Spec.InitProvider.DatabasesSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Databases") + } + mg.Spec.InitProvider.Databases = reference.ToPtrValues(mrsp.ResolvedValues) + mg.Spec.InitProvider.DatabasesRefs = mrsp.ResolvedReferences + + for i3 := 0; i3 < len(mg.Spec.InitProvider.PartnerServer); i3++ { + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PartnerServer[i3].ID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PartnerServer[i3].IDRef, + Selector: mg.Spec.InitProvider.PartnerServer[i3].IDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PartnerServer[i3].ID") + } + mg.Spec.InitProvider.PartnerServer[i3].ID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PartnerServer[i3].IDRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this MSSQLManagedDatabase. +func (mg *MSSQLManagedDatabase) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ManagedInstanceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ManagedInstanceIDRef, + Selector: mg.Spec.ForProvider.ManagedInstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ManagedInstanceID") + } + mg.Spec.ForProvider.ManagedInstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ManagedInstanceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MSSQLManagedInstance. +func (mg *MSSQLManagedInstance) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.DNSZonePartnerID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.DNSZonePartnerIDRef, + Selector: mg.Spec.ForProvider.DNSZonePartnerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.DNSZonePartnerID") + } + mg.Spec.ForProvider.DNSZonePartnerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.DNSZonePartnerIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.DNSZonePartnerID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.DNSZonePartnerIDRef, + Selector: mg.Spec.InitProvider.DNSZonePartnerIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.DNSZonePartnerID") + } + mg.Spec.InitProvider.DNSZonePartnerID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.DNSZonePartnerIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MSSQLManagedInstanceFailoverGroup. +func (mg *MSSQLManagedInstanceFailoverGroup) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ManagedInstanceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ManagedInstanceIDRef, + Selector: mg.Spec.ForProvider.ManagedInstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ManagedInstanceID") + } + mg.Spec.ForProvider.ManagedInstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ManagedInstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PartnerManagedInstanceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PartnerManagedInstanceIDRef, + Selector: mg.Spec.ForProvider.PartnerManagedInstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PartnerManagedInstanceID") + } + mg.Spec.ForProvider.PartnerManagedInstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PartnerManagedInstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ManagedInstanceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ManagedInstanceIDRef, + Selector: mg.Spec.InitProvider.ManagedInstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ManagedInstanceID") + } + mg.Spec.InitProvider.ManagedInstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ManagedInstanceIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PartnerManagedInstanceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PartnerManagedInstanceIDRef, + Selector: mg.Spec.InitProvider.PartnerManagedInstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PartnerManagedInstanceID") + } + mg.Spec.InitProvider.PartnerManagedInstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PartnerManagedInstanceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MSSQLManagedInstanceVulnerabilityAssessment. +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLManagedInstance", "MSSQLManagedInstanceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ManagedInstanceID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ManagedInstanceIDRef, + Selector: mg.Spec.ForProvider.ManagedInstanceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ManagedInstanceID") + } + mg.Spec.ForProvider.ManagedInstanceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ManagedInstanceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MSSQLServer. +func (mg *MSSQLServer) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.AzureadAdministrator != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AzureadAdministrator.LoginUsername), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.AzureadAdministrator.LoginUsernameRef, + Selector: mg.Spec.ForProvider.AzureadAdministrator.LoginUsernameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AzureadAdministrator.LoginUsername") + } + mg.Spec.ForProvider.AzureadAdministrator.LoginUsername = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AzureadAdministrator.LoginUsernameRef = rsp.ResolvedReference + + } + if mg.Spec.ForProvider.AzureadAdministrator != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AzureadAdministrator.ObjectID), + Extract: resource.ExtractParamPath("principal_id", true), + Reference: mg.Spec.ForProvider.AzureadAdministrator.ObjectIDRef, + Selector: mg.Spec.ForProvider.AzureadAdministrator.ObjectIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AzureadAdministrator.ObjectID") + } + mg.Spec.ForProvider.AzureadAdministrator.ObjectID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AzureadAdministrator.ObjectIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PrimaryUserAssignedIdentityID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.PrimaryUserAssignedIdentityIDRef, + Selector: mg.Spec.ForProvider.PrimaryUserAssignedIdentityIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PrimaryUserAssignedIdentityID") + } + mg.Spec.ForProvider.PrimaryUserAssignedIdentityID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PrimaryUserAssignedIdentityIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyIDRef, + Selector: mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyID") + } + mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TransparentDataEncryptionKeyVaultKeyIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.AzureadAdministrator != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AzureadAdministrator.LoginUsername), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.InitProvider.AzureadAdministrator.LoginUsernameRef, + Selector: mg.Spec.InitProvider.AzureadAdministrator.LoginUsernameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AzureadAdministrator.LoginUsername") + } + mg.Spec.InitProvider.AzureadAdministrator.LoginUsername = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AzureadAdministrator.LoginUsernameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.AzureadAdministrator != nil { + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AzureadAdministrator.ObjectID), + Extract: resource.ExtractParamPath("principal_id", true), + Reference: mg.Spec.InitProvider.AzureadAdministrator.ObjectIDRef, + Selector: mg.Spec.InitProvider.AzureadAdministrator.ObjectIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AzureadAdministrator.ObjectID") + } + mg.Spec.InitProvider.AzureadAdministrator.ObjectID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AzureadAdministrator.ObjectIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("managedidentity.azure.upbound.io", "v1beta1", "UserAssignedIdentity", "UserAssignedIdentityList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PrimaryUserAssignedIdentityID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.PrimaryUserAssignedIdentityIDRef, + Selector: mg.Spec.InitProvider.PrimaryUserAssignedIdentityIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PrimaryUserAssignedIdentityID") + } + mg.Spec.InitProvider.PrimaryUserAssignedIdentityID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PrimaryUserAssignedIdentityIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyIDRef, + Selector: mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyID") + } + mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TransparentDataEncryptionKeyVaultKeyIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this MSSQLServerVulnerabilityAssessment. +func (mg *MSSQLServerVulnerabilityAssessment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServerSecurityAlertPolicy", "MSSQLServerSecurityAlertPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServerSecurityAlertPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServerSecurityAlertPolicyIDRef, + Selector: mg.Spec.ForProvider.ServerSecurityAlertPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServerSecurityAlertPolicyID") + } + mg.Spec.ForProvider.ServerSecurityAlertPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServerSecurityAlertPolicyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServerSecurityAlertPolicy", "MSSQLServerSecurityAlertPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServerSecurityAlertPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServerSecurityAlertPolicyIDRef, + Selector: mg.Spec.InitProvider.ServerSecurityAlertPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServerSecurityAlertPolicyID") + } + mg.Spec.InitProvider.ServerSecurityAlertPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServerSecurityAlertPolicyIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/sql/v1beta2/zz_groupversion_info.go b/apis/sql/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..5d0d4e807 --- /dev/null +++ b/apis/sql/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=sql.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "sql.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/sql/v1beta2/zz_mssqldatabase_terraformed.go b/apis/sql/v1beta2/zz_mssqldatabase_terraformed.go new file mode 100755 index 000000000..067fbd5a3 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqldatabase_terraformed.go @@ -0,0 +1,131 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLDatabase +func (mg *MSSQLDatabase) GetTerraformResourceType() string { + return "azurerm_mssql_database" +} + +// GetConnectionDetailsMapping for this MSSQLDatabase +func (tr *MSSQLDatabase) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"import[*].administrator_login_password": "spec.forProvider.import[*].administratorLoginPasswordSecretRef", "import[*].storage_key": "spec.forProvider.import[*].storageKeySecretRef", "threat_detection_policy[*].storage_account_access_key": "spec.forProvider.threatDetectionPolicy[*].storageAccountAccessKeySecretRef"} +} + +// GetObservation of this MSSQLDatabase +func (tr *MSSQLDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLDatabase +func (tr *MSSQLDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLDatabase +func (tr *MSSQLDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLDatabase +func (tr *MSSQLDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLDatabase +func (tr *MSSQLDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLDatabase +func (tr *MSSQLDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLDatabase +func (tr *MSSQLDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("ElasticPoolID")) + opts = append(opts, resource.WithNameFilter("MaintenanceConfigurationName")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLDatabase) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/sql/v1beta2/zz_mssqldatabase_types.go b/apis/sql/v1beta2/zz_mssqldatabase_types.go new file mode 100755 index 000000000..3358237e7 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqldatabase_types.go @@ -0,0 +1,712 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Database. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Database. Possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Database. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Database. Possible value is UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Database. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Database. Possible value is UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ImportInitParameters struct { + + // Specifies the name of the SQL administrator. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Specifies the type of authentication used to access the server. Valid values are SQL or ADPassword. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // The resource id for the storage account used to store BACPAC file. If set, private endpoint connection will be created for the storage account. Must match storage account used for storage_uri parameter. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Specifies the type of access key for the storage account. Valid values are StorageAccessKey or SharedAccessKey. + StorageKeyType *string `json:"storageKeyType,omitempty" tf:"storage_key_type,omitempty"` + + // Specifies the blob URI of the .bacpac file. + StorageURI *string `json:"storageUri,omitempty" tf:"storage_uri,omitempty"` +} + +type ImportObservation struct { + + // Specifies the name of the SQL administrator. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Specifies the type of authentication used to access the server. Valid values are SQL or ADPassword. + AuthenticationType *string `json:"authenticationType,omitempty" tf:"authentication_type,omitempty"` + + // The resource id for the storage account used to store BACPAC file. If set, private endpoint connection will be created for the storage account. Must match storage account used for storage_uri parameter. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Specifies the type of access key for the storage account. Valid values are StorageAccessKey or SharedAccessKey. + StorageKeyType *string `json:"storageKeyType,omitempty" tf:"storage_key_type,omitempty"` + + // Specifies the blob URI of the .bacpac file. + StorageURI *string `json:"storageUri,omitempty" tf:"storage_uri,omitempty"` +} + +type ImportParameters struct { + + // Specifies the name of the SQL administrator. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin" tf:"administrator_login,omitempty"` + + // Specifies the password of the SQL administrator. + // +kubebuilder:validation:Required + AdministratorLoginPasswordSecretRef v1.SecretKeySelector `json:"administratorLoginPasswordSecretRef" tf:"-"` + + // Specifies the type of authentication used to access the server. Valid values are SQL or ADPassword. + // +kubebuilder:validation:Optional + AuthenticationType *string `json:"authenticationType" tf:"authentication_type,omitempty"` + + // The resource id for the storage account used to store BACPAC file. If set, private endpoint connection will be created for the storage account. Must match storage account used for storage_uri parameter. + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Specifies the access key for the storage account. + // +kubebuilder:validation:Required + StorageKeySecretRef v1.SecretKeySelector `json:"storageKeySecretRef" tf:"-"` + + // Specifies the type of access key for the storage account. Valid values are StorageAccessKey or SharedAccessKey. + // +kubebuilder:validation:Optional + StorageKeyType *string `json:"storageKeyType" tf:"storage_key_type,omitempty"` + + // Specifies the blob URI of the .bacpac file. + // +kubebuilder:validation:Optional + StorageURI *string `json:"storageUri" tf:"storage_uri,omitempty"` +} + +type LongTermRetentionPolicyInitParameters struct { + + // Specifies if the backups are immutable. Defaults to false. + ImmutableBackupsEnabled *bool `json:"immutableBackupsEnabled,omitempty" tf:"immutable_backups_enabled,omitempty"` + + // The monthly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 120 months. e.g. P1Y, P1M, P4W or P30D. + MonthlyRetention *string `json:"monthlyRetention,omitempty" tf:"monthly_retention,omitempty"` + + // The week of year to take the yearly backup. Value has to be between 1 and 52. + WeekOfYear *float64 `json:"weekOfYear,omitempty" tf:"week_of_year,omitempty"` + + // The weekly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 520 weeks. e.g. P1Y, P1M, P1W or P7D. + WeeklyRetention *string `json:"weeklyRetention,omitempty" tf:"weekly_retention,omitempty"` + + // The yearly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 10 years. e.g. P1Y, P12M, P52W or P365D. + YearlyRetention *string `json:"yearlyRetention,omitempty" tf:"yearly_retention,omitempty"` +} + +type LongTermRetentionPolicyObservation struct { + + // Specifies if the backups are immutable. Defaults to false. + ImmutableBackupsEnabled *bool `json:"immutableBackupsEnabled,omitempty" tf:"immutable_backups_enabled,omitempty"` + + // The monthly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 120 months. e.g. P1Y, P1M, P4W or P30D. + MonthlyRetention *string `json:"monthlyRetention,omitempty" tf:"monthly_retention,omitempty"` + + // The week of year to take the yearly backup. Value has to be between 1 and 52. + WeekOfYear *float64 `json:"weekOfYear,omitempty" tf:"week_of_year,omitempty"` + + // The weekly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 520 weeks. e.g. P1Y, P1M, P1W or P7D. + WeeklyRetention *string `json:"weeklyRetention,omitempty" tf:"weekly_retention,omitempty"` + + // The yearly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 10 years. e.g. P1Y, P12M, P52W or P365D. + YearlyRetention *string `json:"yearlyRetention,omitempty" tf:"yearly_retention,omitempty"` +} + +type LongTermRetentionPolicyParameters struct { + + // Specifies if the backups are immutable. Defaults to false. + // +kubebuilder:validation:Optional + ImmutableBackupsEnabled *bool `json:"immutableBackupsEnabled,omitempty" tf:"immutable_backups_enabled,omitempty"` + + // The monthly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 120 months. e.g. P1Y, P1M, P4W or P30D. + // +kubebuilder:validation:Optional + MonthlyRetention *string `json:"monthlyRetention,omitempty" tf:"monthly_retention,omitempty"` + + // The week of year to take the yearly backup. Value has to be between 1 and 52. + // +kubebuilder:validation:Optional + WeekOfYear *float64 `json:"weekOfYear,omitempty" tf:"week_of_year,omitempty"` + + // The weekly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 520 weeks. e.g. P1Y, P1M, P1W or P7D. + // +kubebuilder:validation:Optional + WeeklyRetention *string `json:"weeklyRetention,omitempty" tf:"weekly_retention,omitempty"` + + // The yearly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 10 years. e.g. P1Y, P12M, P52W or P365D. + // +kubebuilder:validation:Optional + YearlyRetention *string `json:"yearlyRetention,omitempty" tf:"yearly_retention,omitempty"` +} + +type MSSQLDatabaseInitParameters struct { + + // Time in minutes after which database is automatically paused. A value of -1 means that automatic pause is disabled. This property is only settable for Serverless databases. + AutoPauseDelayInMinutes *float64 `json:"autoPauseDelayInMinutes,omitempty" tf:"auto_pause_delay_in_minutes,omitempty"` + + // Specifies the collation of the database. Changing this forces a new resource to be created. + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // The create mode of the database. Possible values are Copy, Default, OnlineSecondary, PointInTimeRestore, Recovery, Restore, RestoreExternalBackup, RestoreExternalBackupSecondary, RestoreLongTermRetentionBackup and Secondary. Mutually exclusive with import. Changing this forces a new resource to be created. Defaults to Default. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // The ID of the source database from which to create the new database. This should only be used for databases with create_mode values that use another database as reference. Changing this forces a new resource to be created. + CreationSourceDatabaseID *string `json:"creationSourceDatabaseId,omitempty" tf:"creation_source_database_id,omitempty"` + + // Specifies the ID of the elastic pool containing this database. + ElasticPoolID *string `json:"elasticPoolId,omitempty" tf:"elastic_pool_id,omitempty"` + + // Specifies the type of enclave to be used by the database. Possible value VBS. + EnclaveType *string `json:"enclaveType,omitempty" tf:"enclave_type,omitempty"` + + // A boolean that specifies if the Geo Backup Policy is enabled. Defaults to true. + GeoBackupEnabled *bool `json:"geoBackupEnabled,omitempty" tf:"geo_backup_enabled,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A import block as documented below. Mutually exclusive with create_mode. + Import *ImportInitParameters `json:"import,omitempty" tf:"import,omitempty"` + + // A boolean that specifies if this is a ledger database. Defaults to false. Changing this forces a new resource to be created. + LedgerEnabled *bool `json:"ledgerEnabled,omitempty" tf:"ledger_enabled,omitempty"` + + // Specifies the license type applied to this database. Possible values are LicenseIncluded and BasePrice. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // A long_term_retention_policy block as defined below. + LongTermRetentionPolicy *LongTermRetentionPolicyInitParameters `json:"longTermRetentionPolicy,omitempty" tf:"long_term_retention_policy,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the database. Valid values include SQL_Default, SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The max size of the database in gigabytes. + MaxSizeGb *float64 `json:"maxSizeGb,omitempty" tf:"max_size_gb,omitempty"` + + // Minimal capacity that database will always have allocated, if not paused. This property is only settable for Serverless databases. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` + + // The number of readonly secondary replicas associated with the database to which readonly application intent connections may be routed. This property is only settable for Hyperscale edition databases. + ReadReplicaCount *float64 `json:"readReplicaCount,omitempty" tf:"read_replica_count,omitempty"` + + // If enabled, connections that have application intent set to readonly in their connection string may be routed to a readonly secondary replica. This property is only settable for Premium and Business Critical databases. + ReadScale *bool `json:"readScale,omitempty" tf:"read_scale,omitempty"` + + // The ID of the database to be recovered. This property is only applicable when the create_mode is Recovery. + RecoverDatabaseID *string `json:"recoverDatabaseId,omitempty" tf:"recover_database_id,omitempty"` + + // The ID of the Recovery Services Recovery Point Id to be restored. This property is only applicable when the create_mode is Recovery. + RecoveryPointID *string `json:"recoveryPointId,omitempty" tf:"recovery_point_id,omitempty"` + + // The ID of the database to be restored. This property is only applicable when the create_mode is Restore. + RestoreDroppedDatabaseID *string `json:"restoreDroppedDatabaseId,omitempty" tf:"restore_dropped_database_id,omitempty"` + + // The ID of the long term retention backup to be restored. This property is only applicable when the create_mode is RestoreLongTermRetentionBackup. + RestoreLongTermRetentionBackupID *string `json:"restoreLongTermRetentionBackupId,omitempty" tf:"restore_long_term_retention_backup_id,omitempty"` + + // Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. This property is only settable for create_mode= PointInTimeRestore databases. + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies the name of the sample schema to apply when creating this database. Possible value is AdventureWorksLT. + SampleName *string `json:"sampleName,omitempty" tf:"sample_name,omitempty"` + + // A short_term_retention_policy block as defined below. + ShortTermRetentionPolicy *ShortTermRetentionPolicyInitParameters `json:"shortTermRetentionPolicy,omitempty" tf:"short_term_retention_policy,omitempty"` + + // Specifies the name of the SKU used by the database. For example, GP_S_Gen5_2,HS_Gen4_1,BC_Gen5_2, ElasticPool, Basic,S0, P2 ,DW100c, DS100. Changing this from the HyperScale service tier to another service tier will create a new resource. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the storage account type used to store backups for this database. Possible values are Geo, GeoZone, Local and Zone. Defaults to Geo. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration. The threat_detection_policy block supports fields documented below. + ThreatDetectionPolicy *ThreatDetectionPolicyInitParameters `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // If set to true, Transparent Data Encryption will be enabled on the database. Defaults to true. + TransparentDataEncryptionEnabled *bool `json:"transparentDataEncryptionEnabled,omitempty" tf:"transparent_data_encryption_enabled,omitempty"` + + // Boolean flag to specify whether TDE automatically rotates the encryption Key to latest version or not. Possible values are true or false. Defaults to false. + TransparentDataEncryptionKeyAutomaticRotationEnabled *bool `json:"transparentDataEncryptionKeyAutomaticRotationEnabled,omitempty" tf:"transparent_data_encryption_key_automatic_rotation_enabled,omitempty"` + + // The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) to be used as the Customer Managed Key(CMK/BYOK) for the Transparent Data Encryption(TDE) layer. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TransparentDataEncryptionKeyVaultKeyID *string `json:"transparentDataEncryptionKeyVaultKeyId,omitempty" tf:"transparent_data_encryption_key_vault_key_id,omitempty"` + + // Reference to a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyIDRef *v1.Reference `json:"transparentDataEncryptionKeyVaultKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyIDSelector *v1.Selector `json:"transparentDataEncryptionKeyVaultKeyIdSelector,omitempty" tf:"-"` + + // Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. This property is only settable for Premium and Business Critical databases. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type MSSQLDatabaseObservation struct { + + // Time in minutes after which database is automatically paused. A value of -1 means that automatic pause is disabled. This property is only settable for Serverless databases. + AutoPauseDelayInMinutes *float64 `json:"autoPauseDelayInMinutes,omitempty" tf:"auto_pause_delay_in_minutes,omitempty"` + + // Specifies the collation of the database. Changing this forces a new resource to be created. + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // The create mode of the database. Possible values are Copy, Default, OnlineSecondary, PointInTimeRestore, Recovery, Restore, RestoreExternalBackup, RestoreExternalBackupSecondary, RestoreLongTermRetentionBackup and Secondary. Mutually exclusive with import. Changing this forces a new resource to be created. Defaults to Default. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // The ID of the source database from which to create the new database. This should only be used for databases with create_mode values that use another database as reference. Changing this forces a new resource to be created. + CreationSourceDatabaseID *string `json:"creationSourceDatabaseId,omitempty" tf:"creation_source_database_id,omitempty"` + + // Specifies the ID of the elastic pool containing this database. + ElasticPoolID *string `json:"elasticPoolId,omitempty" tf:"elastic_pool_id,omitempty"` + + // Specifies the type of enclave to be used by the database. Possible value VBS. + EnclaveType *string `json:"enclaveType,omitempty" tf:"enclave_type,omitempty"` + + // A boolean that specifies if the Geo Backup Policy is enabled. Defaults to true. + GeoBackupEnabled *bool `json:"geoBackupEnabled,omitempty" tf:"geo_backup_enabled,omitempty"` + + // The ID of the MS SQL Database. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // A import block as documented below. Mutually exclusive with create_mode. + Import *ImportObservation `json:"import,omitempty" tf:"import,omitempty"` + + // A boolean that specifies if this is a ledger database. Defaults to false. Changing this forces a new resource to be created. + LedgerEnabled *bool `json:"ledgerEnabled,omitempty" tf:"ledger_enabled,omitempty"` + + // Specifies the license type applied to this database. Possible values are LicenseIncluded and BasePrice. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // A long_term_retention_policy block as defined below. + LongTermRetentionPolicy *LongTermRetentionPolicyObservation `json:"longTermRetentionPolicy,omitempty" tf:"long_term_retention_policy,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the database. Valid values include SQL_Default, SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The max size of the database in gigabytes. + MaxSizeGb *float64 `json:"maxSizeGb,omitempty" tf:"max_size_gb,omitempty"` + + // Minimal capacity that database will always have allocated, if not paused. This property is only settable for Serverless databases. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` + + // The number of readonly secondary replicas associated with the database to which readonly application intent connections may be routed. This property is only settable for Hyperscale edition databases. + ReadReplicaCount *float64 `json:"readReplicaCount,omitempty" tf:"read_replica_count,omitempty"` + + // If enabled, connections that have application intent set to readonly in their connection string may be routed to a readonly secondary replica. This property is only settable for Premium and Business Critical databases. + ReadScale *bool `json:"readScale,omitempty" tf:"read_scale,omitempty"` + + // The ID of the database to be recovered. This property is only applicable when the create_mode is Recovery. + RecoverDatabaseID *string `json:"recoverDatabaseId,omitempty" tf:"recover_database_id,omitempty"` + + // The ID of the Recovery Services Recovery Point Id to be restored. This property is only applicable when the create_mode is Recovery. + RecoveryPointID *string `json:"recoveryPointId,omitempty" tf:"recovery_point_id,omitempty"` + + // The ID of the database to be restored. This property is only applicable when the create_mode is Restore. + RestoreDroppedDatabaseID *string `json:"restoreDroppedDatabaseId,omitempty" tf:"restore_dropped_database_id,omitempty"` + + // The ID of the long term retention backup to be restored. This property is only applicable when the create_mode is RestoreLongTermRetentionBackup. + RestoreLongTermRetentionBackupID *string `json:"restoreLongTermRetentionBackupId,omitempty" tf:"restore_long_term_retention_backup_id,omitempty"` + + // Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. This property is only settable for create_mode= PointInTimeRestore databases. + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies the name of the sample schema to apply when creating this database. Possible value is AdventureWorksLT. + SampleName *string `json:"sampleName,omitempty" tf:"sample_name,omitempty"` + + // The id of the MS SQL Server on which to create the database. Changing this forces a new resource to be created. + ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` + + // A short_term_retention_policy block as defined below. + ShortTermRetentionPolicy *ShortTermRetentionPolicyObservation `json:"shortTermRetentionPolicy,omitempty" tf:"short_term_retention_policy,omitempty"` + + // Specifies the name of the SKU used by the database. For example, GP_S_Gen5_2,HS_Gen4_1,BC_Gen5_2, ElasticPool, Basic,S0, P2 ,DW100c, DS100. Changing this from the HyperScale service tier to another service tier will create a new resource. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the storage account type used to store backups for this database. Possible values are Geo, GeoZone, Local and Zone. Defaults to Geo. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration. The threat_detection_policy block supports fields documented below. + ThreatDetectionPolicy *ThreatDetectionPolicyObservation `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // If set to true, Transparent Data Encryption will be enabled on the database. Defaults to true. + TransparentDataEncryptionEnabled *bool `json:"transparentDataEncryptionEnabled,omitempty" tf:"transparent_data_encryption_enabled,omitempty"` + + // Boolean flag to specify whether TDE automatically rotates the encryption Key to latest version or not. Possible values are true or false. Defaults to false. + TransparentDataEncryptionKeyAutomaticRotationEnabled *bool `json:"transparentDataEncryptionKeyAutomaticRotationEnabled,omitempty" tf:"transparent_data_encryption_key_automatic_rotation_enabled,omitempty"` + + // The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) to be used as the Customer Managed Key(CMK/BYOK) for the Transparent Data Encryption(TDE) layer. + TransparentDataEncryptionKeyVaultKeyID *string `json:"transparentDataEncryptionKeyVaultKeyId,omitempty" tf:"transparent_data_encryption_key_vault_key_id,omitempty"` + + // Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. This property is only settable for Premium and Business Critical databases. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type MSSQLDatabaseParameters struct { + + // Time in minutes after which database is automatically paused. A value of -1 means that automatic pause is disabled. This property is only settable for Serverless databases. + // +kubebuilder:validation:Optional + AutoPauseDelayInMinutes *float64 `json:"autoPauseDelayInMinutes,omitempty" tf:"auto_pause_delay_in_minutes,omitempty"` + + // Specifies the collation of the database. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // The create mode of the database. Possible values are Copy, Default, OnlineSecondary, PointInTimeRestore, Recovery, Restore, RestoreExternalBackup, RestoreExternalBackupSecondary, RestoreLongTermRetentionBackup and Secondary. Mutually exclusive with import. Changing this forces a new resource to be created. Defaults to Default. + // +kubebuilder:validation:Optional + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // The ID of the source database from which to create the new database. This should only be used for databases with create_mode values that use another database as reference. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CreationSourceDatabaseID *string `json:"creationSourceDatabaseId,omitempty" tf:"creation_source_database_id,omitempty"` + + // Specifies the ID of the elastic pool containing this database. + // +kubebuilder:validation:Optional + ElasticPoolID *string `json:"elasticPoolId,omitempty" tf:"elastic_pool_id,omitempty"` + + // Specifies the type of enclave to be used by the database. Possible value VBS. + // +kubebuilder:validation:Optional + EnclaveType *string `json:"enclaveType,omitempty" tf:"enclave_type,omitempty"` + + // A boolean that specifies if the Geo Backup Policy is enabled. Defaults to true. + // +kubebuilder:validation:Optional + GeoBackupEnabled *bool `json:"geoBackupEnabled,omitempty" tf:"geo_backup_enabled,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // A import block as documented below. Mutually exclusive with create_mode. + // +kubebuilder:validation:Optional + Import *ImportParameters `json:"import,omitempty" tf:"import,omitempty"` + + // A boolean that specifies if this is a ledger database. Defaults to false. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + LedgerEnabled *bool `json:"ledgerEnabled,omitempty" tf:"ledger_enabled,omitempty"` + + // Specifies the license type applied to this database. Possible values are LicenseIncluded and BasePrice. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // A long_term_retention_policy block as defined below. + // +kubebuilder:validation:Optional + LongTermRetentionPolicy *LongTermRetentionPolicyParameters `json:"longTermRetentionPolicy,omitempty" tf:"long_term_retention_policy,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the database. Valid values include SQL_Default, SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + // +kubebuilder:validation:Optional + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The max size of the database in gigabytes. + // +kubebuilder:validation:Optional + MaxSizeGb *float64 `json:"maxSizeGb,omitempty" tf:"max_size_gb,omitempty"` + + // Minimal capacity that database will always have allocated, if not paused. This property is only settable for Serverless databases. + // +kubebuilder:validation:Optional + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` + + // The number of readonly secondary replicas associated with the database to which readonly application intent connections may be routed. This property is only settable for Hyperscale edition databases. + // +kubebuilder:validation:Optional + ReadReplicaCount *float64 `json:"readReplicaCount,omitempty" tf:"read_replica_count,omitempty"` + + // If enabled, connections that have application intent set to readonly in their connection string may be routed to a readonly secondary replica. This property is only settable for Premium and Business Critical databases. + // +kubebuilder:validation:Optional + ReadScale *bool `json:"readScale,omitempty" tf:"read_scale,omitempty"` + + // The ID of the database to be recovered. This property is only applicable when the create_mode is Recovery. + // +kubebuilder:validation:Optional + RecoverDatabaseID *string `json:"recoverDatabaseId,omitempty" tf:"recover_database_id,omitempty"` + + // The ID of the Recovery Services Recovery Point Id to be restored. This property is only applicable when the create_mode is Recovery. + // +kubebuilder:validation:Optional + RecoveryPointID *string `json:"recoveryPointId,omitempty" tf:"recovery_point_id,omitempty"` + + // The ID of the database to be restored. This property is only applicable when the create_mode is Restore. + // +kubebuilder:validation:Optional + RestoreDroppedDatabaseID *string `json:"restoreDroppedDatabaseId,omitempty" tf:"restore_dropped_database_id,omitempty"` + + // The ID of the long term retention backup to be restored. This property is only applicable when the create_mode is RestoreLongTermRetentionBackup. + // +kubebuilder:validation:Optional + RestoreLongTermRetentionBackupID *string `json:"restoreLongTermRetentionBackupId,omitempty" tf:"restore_long_term_retention_backup_id,omitempty"` + + // Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. This property is only settable for create_mode= PointInTimeRestore databases. + // +kubebuilder:validation:Optional + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // Specifies the name of the sample schema to apply when creating this database. Possible value is AdventureWorksLT. + // +kubebuilder:validation:Optional + SampleName *string `json:"sampleName,omitempty" tf:"sample_name,omitempty"` + + // The id of the MS SQL Server on which to create the database. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` + + // Reference to a MSSQLServer in sql to populate serverId. + // +kubebuilder:validation:Optional + ServerIDRef *v1.Reference `json:"serverIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLServer in sql to populate serverId. + // +kubebuilder:validation:Optional + ServerIDSelector *v1.Selector `json:"serverIdSelector,omitempty" tf:"-"` + + // A short_term_retention_policy block as defined below. + // +kubebuilder:validation:Optional + ShortTermRetentionPolicy *ShortTermRetentionPolicyParameters `json:"shortTermRetentionPolicy,omitempty" tf:"short_term_retention_policy,omitempty"` + + // Specifies the name of the SKU used by the database. For example, GP_S_Gen5_2,HS_Gen4_1,BC_Gen5_2, ElasticPool, Basic,S0, P2 ,DW100c, DS100. Changing this from the HyperScale service tier to another service tier will create a new resource. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the storage account type used to store backups for this database. Possible values are Geo, GeoZone, Local and Zone. Defaults to Geo. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Threat detection policy configuration. The threat_detection_policy block supports fields documented below. + // +kubebuilder:validation:Optional + ThreatDetectionPolicy *ThreatDetectionPolicyParameters `json:"threatDetectionPolicy,omitempty" tf:"threat_detection_policy,omitempty"` + + // If set to true, Transparent Data Encryption will be enabled on the database. Defaults to true. + // +kubebuilder:validation:Optional + TransparentDataEncryptionEnabled *bool `json:"transparentDataEncryptionEnabled,omitempty" tf:"transparent_data_encryption_enabled,omitempty"` + + // Boolean flag to specify whether TDE automatically rotates the encryption Key to latest version or not. Possible values are true or false. Defaults to false. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyAutomaticRotationEnabled *bool `json:"transparentDataEncryptionKeyAutomaticRotationEnabled,omitempty" tf:"transparent_data_encryption_key_automatic_rotation_enabled,omitempty"` + + // The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) to be used as the Customer Managed Key(CMK/BYOK) for the Transparent Data Encryption(TDE) layer. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyID *string `json:"transparentDataEncryptionKeyVaultKeyId,omitempty" tf:"transparent_data_encryption_key_vault_key_id,omitempty"` + + // Reference to a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyIDRef *v1.Reference `json:"transparentDataEncryptionKeyVaultKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyIDSelector *v1.Selector `json:"transparentDataEncryptionKeyVaultKeyIdSelector,omitempty" tf:"-"` + + // Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. This property is only settable for Premium and Business Critical databases. + // +kubebuilder:validation:Optional + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type ShortTermRetentionPolicyInitParameters struct { + + // The hours between each differential backup. This is only applicable to live databases but not dropped databases. Value has to be 12 or 24. Defaults to 12 hours. + BackupIntervalInHours *float64 `json:"backupIntervalInHours,omitempty" tf:"backup_interval_in_hours,omitempty"` + + // Point In Time Restore configuration. Value has to be between 1 and 35. + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` +} + +type ShortTermRetentionPolicyObservation struct { + + // The hours between each differential backup. This is only applicable to live databases but not dropped databases. Value has to be 12 or 24. Defaults to 12 hours. + BackupIntervalInHours *float64 `json:"backupIntervalInHours,omitempty" tf:"backup_interval_in_hours,omitempty"` + + // Point In Time Restore configuration. Value has to be between 1 and 35. + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` +} + +type ShortTermRetentionPolicyParameters struct { + + // The hours between each differential backup. This is only applicable to live databases but not dropped databases. Value has to be 12 or 24. Defaults to 12 hours. + // +kubebuilder:validation:Optional + BackupIntervalInHours *float64 `json:"backupIntervalInHours,omitempty" tf:"backup_interval_in_hours,omitempty"` + + // Point In Time Restore configuration. Value has to be between 1 and 35. + // +kubebuilder:validation:Optional + RetentionDays *float64 `json:"retentionDays" tf:"retention_days,omitempty"` +} + +type ThreatDetectionPolicyInitParameters struct { + + // Specifies a list of alerts which should be disabled. Possible values include Access_Anomaly, Sql_Injection and Sql_Injection_Vulnerability. + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? Possible values are Enabled or Disabled. Defaults to Disabled. + EmailAccountAdmins *string `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // The State of the Policy. Possible values are Enabled or Disabled. Defaults to Disabled. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. Required if state is Enabled. + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +type ThreatDetectionPolicyObservation struct { + + // Specifies a list of alerts which should be disabled. Possible values include Access_Anomaly, Sql_Injection and Sql_Injection_Vulnerability. + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? Possible values are Enabled or Disabled. Defaults to Disabled. + EmailAccountAdmins *string `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // The State of the Policy. Possible values are Enabled or Disabled. Defaults to Disabled. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. Required if state is Enabled. + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +type ThreatDetectionPolicyParameters struct { + + // Specifies a list of alerts which should be disabled. Possible values include Access_Anomaly, Sql_Injection and Sql_Injection_Vulnerability. + // +kubebuilder:validation:Optional + // +listType=set + DisabledAlerts []*string `json:"disabledAlerts,omitempty" tf:"disabled_alerts,omitempty"` + + // Should the account administrators be emailed when this alert is triggered? Possible values are Enabled or Disabled. Defaults to Disabled. + // +kubebuilder:validation:Optional + EmailAccountAdmins *string `json:"emailAccountAdmins,omitempty" tf:"email_account_admins,omitempty"` + + // A list of email addresses which alerts should be sent to. + // +kubebuilder:validation:Optional + // +listType=set + EmailAddresses []*string `json:"emailAddresses,omitempty" tf:"email_addresses,omitempty"` + + // Specifies the number of days to keep in the Threat Detection audit logs. + // +kubebuilder:validation:Optional + RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` + + // The State of the Policy. Possible values are Enabled or Disabled. Defaults to Disabled. + // +kubebuilder:validation:Optional + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // Specifies the identifier key of the Threat Detection audit storage account. Required if state is Enabled. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. Required if state is Enabled. + // +kubebuilder:validation:Optional + StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` +} + +// MSSQLDatabaseSpec defines the desired state of MSSQLDatabase +type MSSQLDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLDatabaseStatus defines the observed state of MSSQLDatabase. +type MSSQLDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLDatabase is the Schema for the MSSQLDatabases API. Manages a MS SQL Database. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MSSQLDatabaseSpec `json:"spec"` + Status MSSQLDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLDatabaseList contains a list of MSSQLDatabases +type MSSQLDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLDatabase `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLDatabase_Kind = "MSSQLDatabase" + MSSQLDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLDatabase_Kind}.String() + MSSQLDatabase_KindAPIVersion = MSSQLDatabase_Kind + "." + CRDGroupVersion.String() + MSSQLDatabase_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLDatabase{}, &MSSQLDatabaseList{}) +} diff --git a/apis/sql/v1beta2/zz_mssqlelasticpool_terraformed.go b/apis/sql/v1beta2/zz_mssqlelasticpool_terraformed.go new file mode 100755 index 000000000..ad0885fe9 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlelasticpool_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLElasticPool +func (mg *MSSQLElasticPool) GetTerraformResourceType() string { + return "azurerm_mssql_elasticpool" +} + +// GetConnectionDetailsMapping for this MSSQLElasticPool +func (tr *MSSQLElasticPool) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MSSQLElasticPool +func (tr *MSSQLElasticPool) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLElasticPool +func (tr *MSSQLElasticPool) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLElasticPool +func (tr *MSSQLElasticPool) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLElasticPool +func (tr *MSSQLElasticPool) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLElasticPool +func (tr *MSSQLElasticPool) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLElasticPool +func (tr *MSSQLElasticPool) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLElasticPool +func (tr *MSSQLElasticPool) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLElasticPool using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLElasticPool) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLElasticPoolParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("MaxSizeBytes")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLElasticPool) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sql/v1beta2/zz_mssqlelasticpool_types.go b/apis/sql/v1beta2/zz_mssqlelasticpool_types.go new file mode 100755 index 000000000..d0bbcb1cc --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlelasticpool_types.go @@ -0,0 +1,302 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MSSQLElasticPoolInitParameters struct { + + // Specifies the type of enclave to be used by the elastic pool. Possible value VBS. + EnclaveType *string `json:"enclaveType,omitempty" tf:"enclave_type,omitempty"` + + // Specifies the license type applied to this database. Possible values are LicenseIncluded and BasePrice. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the elastic pool. Valid values include SQL_Default, SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The max data size of the elastic pool in bytes. Conflicts with max_size_gb. + MaxSizeBytes *float64 `json:"maxSizeBytes,omitempty" tf:"max_size_bytes,omitempty"` + + // The max data size of the elastic pool in gigabytes. Conflicts with max_size_bytes. + MaxSizeGb *float64 `json:"maxSizeGb,omitempty" tf:"max_size_gb,omitempty"` + + // A per_database_settings block as defined below. + PerDatabaseSettings *PerDatabaseSettingsInitParameters `json:"perDatabaseSettings,omitempty" tf:"per_database_settings,omitempty"` + + // A sku block as defined below. + Sku *SkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether or not this elastic pool is zone redundant. tier needs to be Premium for DTU based or BusinessCritical for vCore based sku. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type MSSQLElasticPoolObservation struct { + + // Specifies the type of enclave to be used by the elastic pool. Possible value VBS. + EnclaveType *string `json:"enclaveType,omitempty" tf:"enclave_type,omitempty"` + + // The ID of the MS SQL Elastic Pool. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Specifies the license type applied to this database. Possible values are LicenseIncluded and BasePrice. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the elastic pool. Valid values include SQL_Default, SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The max data size of the elastic pool in bytes. Conflicts with max_size_gb. + MaxSizeBytes *float64 `json:"maxSizeBytes,omitempty" tf:"max_size_bytes,omitempty"` + + // The max data size of the elastic pool in gigabytes. Conflicts with max_size_bytes. + MaxSizeGb *float64 `json:"maxSizeGb,omitempty" tf:"max_size_gb,omitempty"` + + // A per_database_settings block as defined below. + PerDatabaseSettings *PerDatabaseSettingsObservation `json:"perDatabaseSettings,omitempty" tf:"per_database_settings,omitempty"` + + // The name of the resource group in which to create the elastic pool. This must be the same as the resource group of the underlying SQL server. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The name of the SQL Server on which to create the elastic pool. Changing this forces a new resource to be created. + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` + + // A sku block as defined below. + Sku *SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether or not this elastic pool is zone redundant. tier needs to be Premium for DTU based or BusinessCritical for vCore based sku. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type MSSQLElasticPoolParameters struct { + + // Specifies the type of enclave to be used by the elastic pool. Possible value VBS. + // +kubebuilder:validation:Optional + EnclaveType *string `json:"enclaveType,omitempty" tf:"enclave_type,omitempty"` + + // Specifies the license type applied to this database. Possible values are LicenseIncluded and BasePrice. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the elastic pool. Valid values include SQL_Default, SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + // +kubebuilder:validation:Optional + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The max data size of the elastic pool in bytes. Conflicts with max_size_gb. + // +kubebuilder:validation:Optional + MaxSizeBytes *float64 `json:"maxSizeBytes,omitempty" tf:"max_size_bytes,omitempty"` + + // The max data size of the elastic pool in gigabytes. Conflicts with max_size_bytes. + // +kubebuilder:validation:Optional + MaxSizeGb *float64 `json:"maxSizeGb,omitempty" tf:"max_size_gb,omitempty"` + + // A per_database_settings block as defined below. + // +kubebuilder:validation:Optional + PerDatabaseSettings *PerDatabaseSettingsParameters `json:"perDatabaseSettings,omitempty" tf:"per_database_settings,omitempty"` + + // The name of the resource group in which to create the elastic pool. This must be the same as the resource group of the underlying SQL server. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The name of the SQL Server on which to create the elastic pool. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer + // +kubebuilder:validation:Optional + ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"` + + // Reference to a MSSQLServer in sql to populate serverName. + // +kubebuilder:validation:Optional + ServerNameRef *v1.Reference `json:"serverNameRef,omitempty" tf:"-"` + + // Selector for a MSSQLServer in sql to populate serverName. + // +kubebuilder:validation:Optional + ServerNameSelector *v1.Selector `json:"serverNameSelector,omitempty" tf:"-"` + + // A sku block as defined below. + // +kubebuilder:validation:Optional + Sku *SkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Whether or not this elastic pool is zone redundant. tier needs to be Premium for DTU based or BusinessCritical for vCore based sku. + // +kubebuilder:validation:Optional + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type PerDatabaseSettingsInitParameters struct { + + // The maximum capacity any one database can consume. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // The minimum capacity all databases are guaranteed. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type PerDatabaseSettingsObservation struct { + + // The maximum capacity any one database can consume. + MaxCapacity *float64 `json:"maxCapacity,omitempty" tf:"max_capacity,omitempty"` + + // The minimum capacity all databases are guaranteed. + MinCapacity *float64 `json:"minCapacity,omitempty" tf:"min_capacity,omitempty"` +} + +type PerDatabaseSettingsParameters struct { + + // The maximum capacity any one database can consume. + // +kubebuilder:validation:Optional + MaxCapacity *float64 `json:"maxCapacity" tf:"max_capacity,omitempty"` + + // The minimum capacity all databases are guaranteed. + // +kubebuilder:validation:Optional + MinCapacity *float64 `json:"minCapacity" tf:"min_capacity,omitempty"` +} + +type SkuInitParameters struct { + + // The scale up/out capacity, representing server's compute units. For more information see the documentation for your Elasticpool configuration: vCore-based or DTU-based. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The family of hardware Gen4, Gen5, Fsv2 or DC. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // Specifies the SKU Name for this Elasticpool. The name of the SKU, will be either vCore based or DTU based. Possible DTU based values are BasicPool, StandardPool, PremiumPool while possible vCore based values are GP_Gen4, GP_Gen5, GP_Fsv2, GP_DC, BC_Gen4, BC_Gen5, BC_DC, or HS_Gen5. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The tier of the particular SKU. Possible values are GeneralPurpose, BusinessCritical, Basic, Standard, Premium, or HyperScale. For more information see the documentation for your Elasticpool configuration: vCore-based or DTU-based. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SkuObservation struct { + + // The scale up/out capacity, representing server's compute units. For more information see the documentation for your Elasticpool configuration: vCore-based or DTU-based. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // The family of hardware Gen4, Gen5, Fsv2 or DC. + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // Specifies the SKU Name for this Elasticpool. The name of the SKU, will be either vCore based or DTU based. Possible DTU based values are BasicPool, StandardPool, PremiumPool while possible vCore based values are GP_Gen4, GP_Gen5, GP_Fsv2, GP_DC, BC_Gen4, BC_Gen5, BC_DC, or HS_Gen5. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The tier of the particular SKU. Possible values are GeneralPurpose, BusinessCritical, Basic, Standard, Premium, or HyperScale. For more information see the documentation for your Elasticpool configuration: vCore-based or DTU-based. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SkuParameters struct { + + // The scale up/out capacity, representing server's compute units. For more information see the documentation for your Elasticpool configuration: vCore-based or DTU-based. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity" tf:"capacity,omitempty"` + + // The family of hardware Gen4, Gen5, Fsv2 or DC. + // +kubebuilder:validation:Optional + Family *string `json:"family,omitempty" tf:"family,omitempty"` + + // Specifies the SKU Name for this Elasticpool. The name of the SKU, will be either vCore based or DTU based. Possible DTU based values are BasicPool, StandardPool, PremiumPool while possible vCore based values are GP_Gen4, GP_Gen5, GP_Fsv2, GP_DC, BC_Gen4, BC_Gen5, BC_DC, or HS_Gen5. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The tier of the particular SKU. Possible values are GeneralPurpose, BusinessCritical, Basic, Standard, Premium, or HyperScale. For more information see the documentation for your Elasticpool configuration: vCore-based or DTU-based. + // +kubebuilder:validation:Optional + Tier *string `json:"tier" tf:"tier,omitempty"` +} + +// MSSQLElasticPoolSpec defines the desired state of MSSQLElasticPool +type MSSQLElasticPoolSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLElasticPoolParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLElasticPoolInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLElasticPoolStatus defines the observed state of MSSQLElasticPool. +type MSSQLElasticPoolStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLElasticPoolObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLElasticPool is the Schema for the MSSQLElasticPools API. Manages an Azure SQL Elastic Pool. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLElasticPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.perDatabaseSettings) || (has(self.initProvider) && has(self.initProvider.perDatabaseSettings))",message="spec.forProvider.perDatabaseSettings is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec MSSQLElasticPoolSpec `json:"spec"` + Status MSSQLElasticPoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLElasticPoolList contains a list of MSSQLElasticPools +type MSSQLElasticPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLElasticPool `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLElasticPool_Kind = "MSSQLElasticPool" + MSSQLElasticPool_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLElasticPool_Kind}.String() + MSSQLElasticPool_KindAPIVersion = MSSQLElasticPool_Kind + "." + CRDGroupVersion.String() + MSSQLElasticPool_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLElasticPool_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLElasticPool{}, &MSSQLElasticPoolList{}) +} diff --git a/apis/sql/v1beta2/zz_mssqlfailovergroup_terraformed.go b/apis/sql/v1beta2/zz_mssqlfailovergroup_terraformed.go new file mode 100755 index 000000000..02302f184 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlfailovergroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLFailoverGroup +func (mg *MSSQLFailoverGroup) GetTerraformResourceType() string { + return "azurerm_mssql_failover_group" +} + +// GetConnectionDetailsMapping for this MSSQLFailoverGroup +func (tr *MSSQLFailoverGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MSSQLFailoverGroup +func (tr *MSSQLFailoverGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLFailoverGroup +func (tr *MSSQLFailoverGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLFailoverGroup +func (tr *MSSQLFailoverGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLFailoverGroup +func (tr *MSSQLFailoverGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLFailoverGroup +func (tr *MSSQLFailoverGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLFailoverGroup +func (tr *MSSQLFailoverGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLFailoverGroup +func (tr *MSSQLFailoverGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLFailoverGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLFailoverGroup) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLFailoverGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLFailoverGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sql/v1beta2/zz_mssqlfailovergroup_types.go b/apis/sql/v1beta2/zz_mssqlfailovergroup_types.go new file mode 100755 index 000000000..eb6653fb8 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlfailovergroup_types.go @@ -0,0 +1,255 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MSSQLFailoverGroupInitParameters struct { + + // A set of database names to include in the failover group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +listType=set + Databases []*string `json:"databases,omitempty" tf:"databases,omitempty"` + + // References to MSSQLDatabase in sql to populate databases. + // +kubebuilder:validation:Optional + DatabasesRefs []v1.Reference `json:"databasesRefs,omitempty" tf:"-"` + + // Selector for a list of MSSQLDatabase in sql to populate databases. + // +kubebuilder:validation:Optional + DatabasesSelector *v1.Selector `json:"databasesSelector,omitempty" tf:"-"` + + // A partner_server block as defined below. + PartnerServer []PartnerServerInitParameters `json:"partnerServer,omitempty" tf:"partner_server,omitempty"` + + // A read_write_endpoint_failover_policy block as defined below. + ReadWriteEndpointFailoverPolicy *ReadWriteEndpointFailoverPolicyInitParameters `json:"readWriteEndpointFailoverPolicy,omitempty" tf:"read_write_endpoint_failover_policy,omitempty"` + + // Whether failover is enabled for the readonly endpoint. Defaults to false. + ReadonlyEndpointFailoverPolicyEnabled *bool `json:"readonlyEndpointFailoverPolicyEnabled,omitempty" tf:"readonly_endpoint_failover_policy_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MSSQLFailoverGroupObservation struct { + + // A set of database names to include in the failover group. + // +listType=set + Databases []*string `json:"databases,omitempty" tf:"databases,omitempty"` + + // The ID of the Failover Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A partner_server block as defined below. + PartnerServer []PartnerServerObservation `json:"partnerServer,omitempty" tf:"partner_server,omitempty"` + + // A read_write_endpoint_failover_policy block as defined below. + ReadWriteEndpointFailoverPolicy *ReadWriteEndpointFailoverPolicyObservation `json:"readWriteEndpointFailoverPolicy,omitempty" tf:"read_write_endpoint_failover_policy,omitempty"` + + // Whether failover is enabled for the readonly endpoint. Defaults to false. + ReadonlyEndpointFailoverPolicyEnabled *bool `json:"readonlyEndpointFailoverPolicyEnabled,omitempty" tf:"readonly_endpoint_failover_policy_enabled,omitempty"` + + // The ID of the primary SQL Server on which to create the failover group. Changing this forces a new resource to be created. + ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type MSSQLFailoverGroupParameters struct { + + // A set of database names to include in the failover group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + // +listType=set + Databases []*string `json:"databases,omitempty" tf:"databases,omitempty"` + + // References to MSSQLDatabase in sql to populate databases. + // +kubebuilder:validation:Optional + DatabasesRefs []v1.Reference `json:"databasesRefs,omitempty" tf:"-"` + + // Selector for a list of MSSQLDatabase in sql to populate databases. + // +kubebuilder:validation:Optional + DatabasesSelector *v1.Selector `json:"databasesSelector,omitempty" tf:"-"` + + // A partner_server block as defined below. + // +kubebuilder:validation:Optional + PartnerServer []PartnerServerParameters `json:"partnerServer,omitempty" tf:"partner_server,omitempty"` + + // A read_write_endpoint_failover_policy block as defined below. + // +kubebuilder:validation:Optional + ReadWriteEndpointFailoverPolicy *ReadWriteEndpointFailoverPolicyParameters `json:"readWriteEndpointFailoverPolicy,omitempty" tf:"read_write_endpoint_failover_policy,omitempty"` + + // Whether failover is enabled for the readonly endpoint. Defaults to false. + // +kubebuilder:validation:Optional + ReadonlyEndpointFailoverPolicyEnabled *bool `json:"readonlyEndpointFailoverPolicyEnabled,omitempty" tf:"readonly_endpoint_failover_policy_enabled,omitempty"` + + // The ID of the primary SQL Server on which to create the failover group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ServerID *string `json:"serverId,omitempty" tf:"server_id,omitempty"` + + // Reference to a MSSQLServer in sql to populate serverId. + // +kubebuilder:validation:Optional + ServerIDRef *v1.Reference `json:"serverIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLServer in sql to populate serverId. + // +kubebuilder:validation:Optional + ServerIDSelector *v1.Selector `json:"serverIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type PartnerServerInitParameters struct { + + // The ID of a partner SQL server to include in the failover group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a MSSQLServer in sql to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a MSSQLServer in sql to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type PartnerServerObservation struct { + + // The ID of a partner SQL server to include in the failover group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The location of the partner server. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The replication role of the partner server. Possible values include Primary or Secondary. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type PartnerServerParameters struct { + + // The ID of a partner SQL server to include in the failover group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Reference to a MSSQLServer in sql to populate id. + // +kubebuilder:validation:Optional + IDRef *v1.Reference `json:"idRef,omitempty" tf:"-"` + + // Selector for a MSSQLServer in sql to populate id. + // +kubebuilder:validation:Optional + IDSelector *v1.Selector `json:"idSelector,omitempty" tf:"-"` +} + +type ReadWriteEndpointFailoverPolicyInitParameters struct { + + // The grace period in minutes, before failover with data loss is attempted for the read-write endpoint. Required when mode is Automatic. + GraceMinutes *float64 `json:"graceMinutes,omitempty" tf:"grace_minutes,omitempty"` + + // The failover policy of the read-write endpoint for the failover group. Possible values are Automatic or Manual. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type ReadWriteEndpointFailoverPolicyObservation struct { + + // The grace period in minutes, before failover with data loss is attempted for the read-write endpoint. Required when mode is Automatic. + GraceMinutes *float64 `json:"graceMinutes,omitempty" tf:"grace_minutes,omitempty"` + + // The failover policy of the read-write endpoint for the failover group. Possible values are Automatic or Manual. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type ReadWriteEndpointFailoverPolicyParameters struct { + + // The grace period in minutes, before failover with data loss is attempted for the read-write endpoint. Required when mode is Automatic. + // +kubebuilder:validation:Optional + GraceMinutes *float64 `json:"graceMinutes,omitempty" tf:"grace_minutes,omitempty"` + + // The failover policy of the read-write endpoint for the failover group. Possible values are Automatic or Manual. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` +} + +// MSSQLFailoverGroupSpec defines the desired state of MSSQLFailoverGroup +type MSSQLFailoverGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLFailoverGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLFailoverGroupInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLFailoverGroupStatus defines the observed state of MSSQLFailoverGroup. +type MSSQLFailoverGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLFailoverGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLFailoverGroup is the Schema for the MSSQLFailoverGroups API. Manages a Microsoft Azure SQL Failover Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLFailoverGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.partnerServer) || (has(self.initProvider) && has(self.initProvider.partnerServer))",message="spec.forProvider.partnerServer is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.readWriteEndpointFailoverPolicy) || (has(self.initProvider) && has(self.initProvider.readWriteEndpointFailoverPolicy))",message="spec.forProvider.readWriteEndpointFailoverPolicy is a required parameter" + Spec MSSQLFailoverGroupSpec `json:"spec"` + Status MSSQLFailoverGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLFailoverGroupList contains a list of MSSQLFailoverGroups +type MSSQLFailoverGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLFailoverGroup `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLFailoverGroup_Kind = "MSSQLFailoverGroup" + MSSQLFailoverGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLFailoverGroup_Kind}.String() + MSSQLFailoverGroup_KindAPIVersion = MSSQLFailoverGroup_Kind + "." + CRDGroupVersion.String() + MSSQLFailoverGroup_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLFailoverGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLFailoverGroup{}, &MSSQLFailoverGroupList{}) +} diff --git a/apis/sql/v1beta2/zz_mssqlmanageddatabase_terraformed.go b/apis/sql/v1beta2/zz_mssqlmanageddatabase_terraformed.go new file mode 100755 index 000000000..108e94163 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlmanageddatabase_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLManagedDatabase +func (mg *MSSQLManagedDatabase) GetTerraformResourceType() string { + return "azurerm_mssql_managed_database" +} + +// GetConnectionDetailsMapping for this MSSQLManagedDatabase +func (tr *MSSQLManagedDatabase) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MSSQLManagedDatabase +func (tr *MSSQLManagedDatabase) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLManagedDatabase +func (tr *MSSQLManagedDatabase) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLManagedDatabase +func (tr *MSSQLManagedDatabase) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLManagedDatabase +func (tr *MSSQLManagedDatabase) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLManagedDatabase +func (tr *MSSQLManagedDatabase) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLManagedDatabase +func (tr *MSSQLManagedDatabase) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLManagedDatabase +func (tr *MSSQLManagedDatabase) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLManagedDatabase using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLManagedDatabase) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLManagedDatabaseParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLManagedDatabase) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sql/v1beta2/zz_mssqlmanageddatabase_types.go b/apis/sql/v1beta2/zz_mssqlmanageddatabase_types.go new file mode 100755 index 000000000..bb424bf72 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlmanageddatabase_types.go @@ -0,0 +1,221 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MSSQLManagedDatabaseInitParameters struct { + + // A long_term_retention_policy block as defined below. + LongTermRetentionPolicy *MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters `json:"longTermRetentionPolicy,omitempty" tf:"long_term_retention_policy,omitempty"` + + // A point_in_time_restore block as defined below. Changing this forces a new resource to be created. + PointInTimeRestore *PointInTimeRestoreInitParameters `json:"pointInTimeRestore,omitempty" tf:"point_in_time_restore,omitempty"` + + // The backup retention period in days. This is how many days Point-in-Time Restore will be supported. + ShortTermRetentionDays *float64 `json:"shortTermRetentionDays,omitempty" tf:"short_term_retention_days,omitempty"` +} + +type MSSQLManagedDatabaseLongTermRetentionPolicyInitParameters struct { + + // Specifies if the backups are immutable. Defaults to false. + ImmutableBackupsEnabled *bool `json:"immutableBackupsEnabled,omitempty" tf:"immutable_backups_enabled,omitempty"` + + // The monthly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 120 months. e.g. P1Y, P1M, P4W or P30D. + MonthlyRetention *string `json:"monthlyRetention,omitempty" tf:"monthly_retention,omitempty"` + + // The week of year to take the yearly backup. Value has to be between 1 and 52. + WeekOfYear *float64 `json:"weekOfYear,omitempty" tf:"week_of_year,omitempty"` + + // The weekly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 520 weeks. e.g. P1Y, P1M, P1W or P7D. + WeeklyRetention *string `json:"weeklyRetention,omitempty" tf:"weekly_retention,omitempty"` + + // The yearly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 10 years. e.g. P1Y, P12M, P52W or P365D. + YearlyRetention *string `json:"yearlyRetention,omitempty" tf:"yearly_retention,omitempty"` +} + +type MSSQLManagedDatabaseLongTermRetentionPolicyObservation struct { + + // Specifies if the backups are immutable. Defaults to false. + ImmutableBackupsEnabled *bool `json:"immutableBackupsEnabled,omitempty" tf:"immutable_backups_enabled,omitempty"` + + // The monthly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 120 months. e.g. P1Y, P1M, P4W or P30D. + MonthlyRetention *string `json:"monthlyRetention,omitempty" tf:"monthly_retention,omitempty"` + + // The week of year to take the yearly backup. Value has to be between 1 and 52. + WeekOfYear *float64 `json:"weekOfYear,omitempty" tf:"week_of_year,omitempty"` + + // The weekly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 520 weeks. e.g. P1Y, P1M, P1W or P7D. + WeeklyRetention *string `json:"weeklyRetention,omitempty" tf:"weekly_retention,omitempty"` + + // The yearly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 10 years. e.g. P1Y, P12M, P52W or P365D. + YearlyRetention *string `json:"yearlyRetention,omitempty" tf:"yearly_retention,omitempty"` +} + +type MSSQLManagedDatabaseLongTermRetentionPolicyParameters struct { + + // Specifies if the backups are immutable. Defaults to false. + // +kubebuilder:validation:Optional + ImmutableBackupsEnabled *bool `json:"immutableBackupsEnabled,omitempty" tf:"immutable_backups_enabled,omitempty"` + + // The monthly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 120 months. e.g. P1Y, P1M, P4W or P30D. + // +kubebuilder:validation:Optional + MonthlyRetention *string `json:"monthlyRetention,omitempty" tf:"monthly_retention,omitempty"` + + // The week of year to take the yearly backup. Value has to be between 1 and 52. + // +kubebuilder:validation:Optional + WeekOfYear *float64 `json:"weekOfYear,omitempty" tf:"week_of_year,omitempty"` + + // The weekly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 520 weeks. e.g. P1Y, P1M, P1W or P7D. + // +kubebuilder:validation:Optional + WeeklyRetention *string `json:"weeklyRetention,omitempty" tf:"weekly_retention,omitempty"` + + // The yearly retention policy for an LTR backup in an ISO 8601 format. Valid value is between 1 to 10 years. e.g. P1Y, P12M, P52W or P365D. + // +kubebuilder:validation:Optional + YearlyRetention *string `json:"yearlyRetention,omitempty" tf:"yearly_retention,omitempty"` +} + +type MSSQLManagedDatabaseObservation struct { + + // The Azure SQL Managed Database ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A long_term_retention_policy block as defined below. + LongTermRetentionPolicy *MSSQLManagedDatabaseLongTermRetentionPolicyObservation `json:"longTermRetentionPolicy,omitempty" tf:"long_term_retention_policy,omitempty"` + + // The ID of the Azure SQL Managed Instance on which to create this Managed Database. Changing this forces a new resource to be created. + ManagedInstanceID *string `json:"managedInstanceId,omitempty" tf:"managed_instance_id,omitempty"` + + // A point_in_time_restore block as defined below. Changing this forces a new resource to be created. + PointInTimeRestore *PointInTimeRestoreObservation `json:"pointInTimeRestore,omitempty" tf:"point_in_time_restore,omitempty"` + + // The backup retention period in days. This is how many days Point-in-Time Restore will be supported. + ShortTermRetentionDays *float64 `json:"shortTermRetentionDays,omitempty" tf:"short_term_retention_days,omitempty"` +} + +type MSSQLManagedDatabaseParameters struct { + + // A long_term_retention_policy block as defined below. + // +kubebuilder:validation:Optional + LongTermRetentionPolicy *MSSQLManagedDatabaseLongTermRetentionPolicyParameters `json:"longTermRetentionPolicy,omitempty" tf:"long_term_retention_policy,omitempty"` + + // The ID of the Azure SQL Managed Instance on which to create this Managed Database. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ManagedInstanceID *string `json:"managedInstanceId,omitempty" tf:"managed_instance_id,omitempty"` + + // Reference to a MSSQLManagedInstance in sql to populate managedInstanceId. + // +kubebuilder:validation:Optional + ManagedInstanceIDRef *v1.Reference `json:"managedInstanceIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLManagedInstance in sql to populate managedInstanceId. + // +kubebuilder:validation:Optional + ManagedInstanceIDSelector *v1.Selector `json:"managedInstanceIdSelector,omitempty" tf:"-"` + + // A point_in_time_restore block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + PointInTimeRestore *PointInTimeRestoreParameters `json:"pointInTimeRestore,omitempty" tf:"point_in_time_restore,omitempty"` + + // The backup retention period in days. This is how many days Point-in-Time Restore will be supported. + // +kubebuilder:validation:Optional + ShortTermRetentionDays *float64 `json:"shortTermRetentionDays,omitempty" tf:"short_term_retention_days,omitempty"` +} + +type PointInTimeRestoreInitParameters struct { + + // The point in time for the restore from source_database_id. Changing this forces a new resource to be created. + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // The source database id that will be used to restore from. Changing this forces a new resource to be created. + SourceDatabaseID *string `json:"sourceDatabaseId,omitempty" tf:"source_database_id,omitempty"` +} + +type PointInTimeRestoreObservation struct { + + // The point in time for the restore from source_database_id. Changing this forces a new resource to be created. + RestorePointInTime *string `json:"restorePointInTime,omitempty" tf:"restore_point_in_time,omitempty"` + + // The source database id that will be used to restore from. Changing this forces a new resource to be created. + SourceDatabaseID *string `json:"sourceDatabaseId,omitempty" tf:"source_database_id,omitempty"` +} + +type PointInTimeRestoreParameters struct { + + // The point in time for the restore from source_database_id. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + RestorePointInTime *string `json:"restorePointInTime" tf:"restore_point_in_time,omitempty"` + + // The source database id that will be used to restore from. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SourceDatabaseID *string `json:"sourceDatabaseId" tf:"source_database_id,omitempty"` +} + +// MSSQLManagedDatabaseSpec defines the desired state of MSSQLManagedDatabase +type MSSQLManagedDatabaseSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLManagedDatabaseParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLManagedDatabaseInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLManagedDatabaseStatus defines the observed state of MSSQLManagedDatabase. +type MSSQLManagedDatabaseStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLManagedDatabaseObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLManagedDatabase is the Schema for the MSSQLManagedDatabases API. Manages an Azure SQL Azure Managed Database. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLManagedDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec MSSQLManagedDatabaseSpec `json:"spec"` + Status MSSQLManagedDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLManagedDatabaseList contains a list of MSSQLManagedDatabases +type MSSQLManagedDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLManagedDatabase `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLManagedDatabase_Kind = "MSSQLManagedDatabase" + MSSQLManagedDatabase_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLManagedDatabase_Kind}.String() + MSSQLManagedDatabase_KindAPIVersion = MSSQLManagedDatabase_Kind + "." + CRDGroupVersion.String() + MSSQLManagedDatabase_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLManagedDatabase_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLManagedDatabase{}, &MSSQLManagedDatabaseList{}) +} diff --git a/apis/sql/v1beta2/zz_mssqlmanagedinstance_terraformed.go b/apis/sql/v1beta2/zz_mssqlmanagedinstance_terraformed.go new file mode 100755 index 000000000..3f934ec74 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlmanagedinstance_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLManagedInstance +func (mg *MSSQLManagedInstance) GetTerraformResourceType() string { + return "azurerm_mssql_managed_instance" +} + +// GetConnectionDetailsMapping for this MSSQLManagedInstance +func (tr *MSSQLManagedInstance) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"administrator_login_password": "spec.forProvider.administratorLoginPasswordSecretRef"} +} + +// GetObservation of this MSSQLManagedInstance +func (tr *MSSQLManagedInstance) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLManagedInstance +func (tr *MSSQLManagedInstance) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLManagedInstance +func (tr *MSSQLManagedInstance) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLManagedInstance +func (tr *MSSQLManagedInstance) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLManagedInstance +func (tr *MSSQLManagedInstance) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLManagedInstance +func (tr *MSSQLManagedInstance) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLManagedInstance +func (tr *MSSQLManagedInstance) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLManagedInstance using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLManagedInstance) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLManagedInstanceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLManagedInstance) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sql/v1beta2/zz_mssqlmanagedinstance_types.go b/apis/sql/v1beta2/zz_mssqlmanagedinstance_types.go new file mode 100755 index 000000000..d4928c3bf --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlmanagedinstance_types.go @@ -0,0 +1,380 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MSSQLManagedInstanceIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Managed Instance. Required when type is set to UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Managed Instance. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MSSQLManagedInstanceIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Managed Instance. Required when type is set to UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Identity of this SQL Managed Instance. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Identity of this SQL Managed Instance. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Managed Instance. Possible values are SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MSSQLManagedInstanceIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Managed Instance. Required when type is set to UserAssigned. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Managed Instance. Possible values are SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MSSQLManagedInstanceInitParameters struct { + + // The administrator login name for the new SQL Managed Instance. Changing this forces a new resource to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Specifies how the SQL Managed Instance will be collated. Default value is SQL_Latin1_General_CP1_CI_AS. Changing this forces a new resource to be created. + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // The ID of the SQL Managed Instance which will share the DNS zone. This is a prerequisite for creating an azurerm_sql_managed_instance_failover_group. Setting this after creation forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + DNSZonePartnerID *string `json:"dnsZonePartnerId,omitempty" tf:"dns_zone_partner_id,omitempty"` + + // Reference to a MSSQLManagedInstance in sql to populate dnsZonePartnerId. + // +kubebuilder:validation:Optional + DNSZonePartnerIDRef *v1.Reference `json:"dnsZonePartnerIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLManagedInstance in sql to populate dnsZonePartnerId. + // +kubebuilder:validation:Optional + DNSZonePartnerIDSelector *v1.Selector `json:"dnsZonePartnerIdSelector,omitempty" tf:"-"` + + // An identity block as defined below. + Identity *MSSQLManagedInstanceIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // What type of license the Managed Instance will use. Possible values are LicenseIncluded and BasePrice. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the SQL Managed Instance. Valid values include SQL_Default or an Azure Location in the format SQL_{Location}_MI_{Size}(for example SQL_EastUS_MI_1). Defaults to SQL_Default. + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The Minimum TLS Version. Default value is 1.2 Valid values include 1.0, 1.1, 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Specifies how the SQL Managed Instance will be accessed. Default value is Default. Valid values include Default, Proxy, and Redirect. + ProxyOverride *string `json:"proxyOverride,omitempty" tf:"proxy_override,omitempty"` + + // Is the public data endpoint enabled? Default value is false. + PublicDataEndpointEnabled *bool `json:"publicDataEndpointEnabled,omitempty" tf:"public_data_endpoint_enabled,omitempty"` + + // Specifies the SKU Name for the SQL Managed Instance. Valid values include GP_Gen4, GP_Gen5, GP_Gen8IM, GP_Gen8IH, BC_Gen4, BC_Gen5, BC_Gen8IM or BC_Gen8IH. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the storage account type used to store backups for this database. Changing this forces a new resource to be created. Possible values are GRS, LRS and ZRS. Defaults to GRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Maximum storage space for the SQL Managed instance. This should be a multiple of 32 (GB). + StorageSizeInGb *float64 `json:"storageSizeInGb,omitempty" tf:"storage_size_in_gb,omitempty"` + + // The subnet resource id that the SQL Managed Instance will be associated with. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The TimeZone ID that the SQL Managed Instance will be operating in. Default value is UTC. Changing this forces a new resource to be created. + TimezoneID *string `json:"timezoneId,omitempty" tf:"timezone_id,omitempty"` + + // Number of cores that should be assigned to the SQL Managed Instance. Values can be 8, 16, or 24 for Gen4 SKUs, or 4, 6, 8, 10, 12, 16, 20, 24, 32, 40, 48, 56, 64, 80, 96 or 128 for Gen5 SKUs. + Vcores *float64 `json:"vcores,omitempty" tf:"vcores,omitempty"` + + // Specifies whether or not the SQL Managed Instance is zone redundant. Defaults to false. + ZoneRedundantEnabled *bool `json:"zoneRedundantEnabled,omitempty" tf:"zone_redundant_enabled,omitempty"` +} + +type MSSQLManagedInstanceObservation struct { + + // The administrator login name for the new SQL Managed Instance. Changing this forces a new resource to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // Specifies how the SQL Managed Instance will be collated. Default value is SQL_Latin1_General_CP1_CI_AS. Changing this forces a new resource to be created. + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // The Dns Zone where the SQL Managed Instance is located. + DNSZone *string `json:"dnsZone,omitempty" tf:"dns_zone,omitempty"` + + // The ID of the SQL Managed Instance which will share the DNS zone. This is a prerequisite for creating an azurerm_sql_managed_instance_failover_group. Setting this after creation forces a new resource to be created. + DNSZonePartnerID *string `json:"dnsZonePartnerId,omitempty" tf:"dns_zone_partner_id,omitempty"` + + // The fully qualified domain name of the Azure Managed SQL Instance + Fqdn *string `json:"fqdn,omitempty" tf:"fqdn,omitempty"` + + // The SQL Managed Instance ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *MSSQLManagedInstanceIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // What type of license the Managed Instance will use. Possible values are LicenseIncluded and BasePrice. + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the SQL Managed Instance. Valid values include SQL_Default or an Azure Location in the format SQL_{Location}_MI_{Size}(for example SQL_EastUS_MI_1). Defaults to SQL_Default. + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The Minimum TLS Version. Default value is 1.2 Valid values include 1.0, 1.1, 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Specifies how the SQL Managed Instance will be accessed. Default value is Default. Valid values include Default, Proxy, and Redirect. + ProxyOverride *string `json:"proxyOverride,omitempty" tf:"proxy_override,omitempty"` + + // Is the public data endpoint enabled? Default value is false. + PublicDataEndpointEnabled *bool `json:"publicDataEndpointEnabled,omitempty" tf:"public_data_endpoint_enabled,omitempty"` + + // The name of the resource group in which to create the SQL Managed Instance. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the SKU Name for the SQL Managed Instance. Valid values include GP_Gen4, GP_Gen5, GP_Gen8IM, GP_Gen8IH, BC_Gen4, BC_Gen5, BC_Gen8IM or BC_Gen8IH. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the storage account type used to store backups for this database. Changing this forces a new resource to be created. Possible values are GRS, LRS and ZRS. Defaults to GRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Maximum storage space for the SQL Managed instance. This should be a multiple of 32 (GB). + StorageSizeInGb *float64 `json:"storageSizeInGb,omitempty" tf:"storage_size_in_gb,omitempty"` + + // The subnet resource id that the SQL Managed Instance will be associated with. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The TimeZone ID that the SQL Managed Instance will be operating in. Default value is UTC. Changing this forces a new resource to be created. + TimezoneID *string `json:"timezoneId,omitempty" tf:"timezone_id,omitempty"` + + // Number of cores that should be assigned to the SQL Managed Instance. Values can be 8, 16, or 24 for Gen4 SKUs, or 4, 6, 8, 10, 12, 16, 20, 24, 32, 40, 48, 56, 64, 80, 96 or 128 for Gen5 SKUs. + Vcores *float64 `json:"vcores,omitempty" tf:"vcores,omitempty"` + + // Specifies whether or not the SQL Managed Instance is zone redundant. Defaults to false. + ZoneRedundantEnabled *bool `json:"zoneRedundantEnabled,omitempty" tf:"zone_redundant_enabled,omitempty"` +} + +type MSSQLManagedInstanceParameters struct { + + // The administrator login name for the new SQL Managed Instance. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The password associated with the administrator_login user. Needs to comply with Azure's Password Policy + // +kubebuilder:validation:Optional + AdministratorLoginPasswordSecretRef v1.SecretKeySelector `json:"administratorLoginPasswordSecretRef" tf:"-"` + + // Specifies how the SQL Managed Instance will be collated. Default value is SQL_Latin1_General_CP1_CI_AS. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // The ID of the SQL Managed Instance which will share the DNS zone. This is a prerequisite for creating an azurerm_sql_managed_instance_failover_group. Setting this after creation forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + DNSZonePartnerID *string `json:"dnsZonePartnerId,omitempty" tf:"dns_zone_partner_id,omitempty"` + + // Reference to a MSSQLManagedInstance in sql to populate dnsZonePartnerId. + // +kubebuilder:validation:Optional + DNSZonePartnerIDRef *v1.Reference `json:"dnsZonePartnerIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLManagedInstance in sql to populate dnsZonePartnerId. + // +kubebuilder:validation:Optional + DNSZonePartnerIDSelector *v1.Selector `json:"dnsZonePartnerIdSelector,omitempty" tf:"-"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *MSSQLManagedInstanceIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // What type of license the Managed Instance will use. Possible values are LicenseIncluded and BasePrice. + // +kubebuilder:validation:Optional + LicenseType *string `json:"licenseType,omitempty" tf:"license_type,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Public Maintenance Configuration window to apply to the SQL Managed Instance. Valid values include SQL_Default or an Azure Location in the format SQL_{Location}_MI_{Size}(for example SQL_EastUS_MI_1). Defaults to SQL_Default. + // +kubebuilder:validation:Optional + MaintenanceConfigurationName *string `json:"maintenanceConfigurationName,omitempty" tf:"maintenance_configuration_name,omitempty"` + + // The Minimum TLS Version. Default value is 1.2 Valid values include 1.0, 1.1, 1.2. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Specifies how the SQL Managed Instance will be accessed. Default value is Default. Valid values include Default, Proxy, and Redirect. + // +kubebuilder:validation:Optional + ProxyOverride *string `json:"proxyOverride,omitempty" tf:"proxy_override,omitempty"` + + // Is the public data endpoint enabled? Default value is false. + // +kubebuilder:validation:Optional + PublicDataEndpointEnabled *bool `json:"publicDataEndpointEnabled,omitempty" tf:"public_data_endpoint_enabled,omitempty"` + + // The name of the resource group in which to create the SQL Managed Instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the SKU Name for the SQL Managed Instance. Valid values include GP_Gen4, GP_Gen5, GP_Gen8IM, GP_Gen8IH, BC_Gen4, BC_Gen5, BC_Gen8IM or BC_Gen8IH. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // Specifies the storage account type used to store backups for this database. Changing this forces a new resource to be created. Possible values are GRS, LRS and ZRS. Defaults to GRS. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // Maximum storage space for the SQL Managed instance. This should be a multiple of 32 (GB). + // +kubebuilder:validation:Optional + StorageSizeInGb *float64 `json:"storageSizeInGb,omitempty" tf:"storage_size_in_gb,omitempty"` + + // The subnet resource id that the SQL Managed Instance will be associated with. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The TimeZone ID that the SQL Managed Instance will be operating in. Default value is UTC. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + TimezoneID *string `json:"timezoneId,omitempty" tf:"timezone_id,omitempty"` + + // Number of cores that should be assigned to the SQL Managed Instance. Values can be 8, 16, or 24 for Gen4 SKUs, or 4, 6, 8, 10, 12, 16, 20, 24, 32, 40, 48, 56, 64, 80, 96 or 128 for Gen5 SKUs. + // +kubebuilder:validation:Optional + Vcores *float64 `json:"vcores,omitempty" tf:"vcores,omitempty"` + + // Specifies whether or not the SQL Managed Instance is zone redundant. Defaults to false. + // +kubebuilder:validation:Optional + ZoneRedundantEnabled *bool `json:"zoneRedundantEnabled,omitempty" tf:"zone_redundant_enabled,omitempty"` +} + +// MSSQLManagedInstanceSpec defines the desired state of MSSQLManagedInstance +type MSSQLManagedInstanceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLManagedInstanceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLManagedInstanceInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLManagedInstanceStatus defines the observed state of MSSQLManagedInstance. +type MSSQLManagedInstanceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLManagedInstanceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLManagedInstance is the Schema for the MSSQLManagedInstances API. Manages a Microsoft SQL Azure Managed Instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLManagedInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.administratorLogin) || (has(self.initProvider) && has(self.initProvider.administratorLogin))",message="spec.forProvider.administratorLogin is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.administratorLoginPasswordSecretRef)",message="spec.forProvider.administratorLoginPasswordSecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.licenseType) || (has(self.initProvider) && has(self.initProvider.licenseType))",message="spec.forProvider.licenseType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageSizeInGb) || (has(self.initProvider) && has(self.initProvider.storageSizeInGb))",message="spec.forProvider.storageSizeInGb is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.vcores) || (has(self.initProvider) && has(self.initProvider.vcores))",message="spec.forProvider.vcores is a required parameter" + Spec MSSQLManagedInstanceSpec `json:"spec"` + Status MSSQLManagedInstanceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLManagedInstanceList contains a list of MSSQLManagedInstances +type MSSQLManagedInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLManagedInstance `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLManagedInstance_Kind = "MSSQLManagedInstance" + MSSQLManagedInstance_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLManagedInstance_Kind}.String() + MSSQLManagedInstance_KindAPIVersion = MSSQLManagedInstance_Kind + "." + CRDGroupVersion.String() + MSSQLManagedInstance_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLManagedInstance_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLManagedInstance{}, &MSSQLManagedInstanceList{}) +} diff --git a/apis/sql/v1beta2/zz_mssqlmanagedinstancefailovergroup_terraformed.go b/apis/sql/v1beta2/zz_mssqlmanagedinstancefailovergroup_terraformed.go new file mode 100755 index 000000000..4b3b06228 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlmanagedinstancefailovergroup_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLManagedInstanceFailoverGroup +func (mg *MSSQLManagedInstanceFailoverGroup) GetTerraformResourceType() string { + return "azurerm_mssql_managed_instance_failover_group" +} + +// GetConnectionDetailsMapping for this MSSQLManagedInstanceFailoverGroup +func (tr *MSSQLManagedInstanceFailoverGroup) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this MSSQLManagedInstanceFailoverGroup +func (tr *MSSQLManagedInstanceFailoverGroup) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLManagedInstanceFailoverGroup +func (tr *MSSQLManagedInstanceFailoverGroup) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLManagedInstanceFailoverGroup +func (tr *MSSQLManagedInstanceFailoverGroup) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLManagedInstanceFailoverGroup +func (tr *MSSQLManagedInstanceFailoverGroup) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLManagedInstanceFailoverGroup +func (tr *MSSQLManagedInstanceFailoverGroup) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLManagedInstanceFailoverGroup +func (tr *MSSQLManagedInstanceFailoverGroup) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLManagedInstanceFailoverGroup +func (tr *MSSQLManagedInstanceFailoverGroup) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLManagedInstanceFailoverGroup using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLManagedInstanceFailoverGroup) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLManagedInstanceFailoverGroupParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLManagedInstanceFailoverGroup) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sql/v1beta2/zz_mssqlmanagedinstancefailovergroup_types.go b/apis/sql/v1beta2/zz_mssqlmanagedinstancefailovergroup_types.go new file mode 100755 index 000000000..c6d99c3ec --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlmanagedinstancefailovergroup_types.go @@ -0,0 +1,224 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MSSQLManagedInstanceFailoverGroupInitParameters struct { + + // The ID of the Azure SQL Managed Instance which will be replicated using a Managed Instance Failover Group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + ManagedInstanceID *string `json:"managedInstanceId,omitempty" tf:"managed_instance_id,omitempty"` + + // Reference to a MSSQLManagedInstance in sql to populate managedInstanceId. + // +kubebuilder:validation:Optional + ManagedInstanceIDRef *v1.Reference `json:"managedInstanceIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLManagedInstance in sql to populate managedInstanceId. + // +kubebuilder:validation:Optional + ManagedInstanceIDSelector *v1.Selector `json:"managedInstanceIdSelector,omitempty" tf:"-"` + + // The ID of the Azure SQL Managed Instance which will be replicated to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + PartnerManagedInstanceID *string `json:"partnerManagedInstanceId,omitempty" tf:"partner_managed_instance_id,omitempty"` + + // Reference to a MSSQLManagedInstance in sql to populate partnerManagedInstanceId. + // +kubebuilder:validation:Optional + PartnerManagedInstanceIDRef *v1.Reference `json:"partnerManagedInstanceIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLManagedInstance in sql to populate partnerManagedInstanceId. + // +kubebuilder:validation:Optional + PartnerManagedInstanceIDSelector *v1.Selector `json:"partnerManagedInstanceIdSelector,omitempty" tf:"-"` + + // A read_write_endpoint_failover_policy block as defined below. + ReadWriteEndpointFailoverPolicy *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters `json:"readWriteEndpointFailoverPolicy,omitempty" tf:"read_write_endpoint_failover_policy,omitempty"` + + // Failover policy for the read-only endpoint. Defaults to true. + ReadonlyEndpointFailoverPolicyEnabled *bool `json:"readonlyEndpointFailoverPolicyEnabled,omitempty" tf:"readonly_endpoint_failover_policy_enabled,omitempty"` +} + +type MSSQLManagedInstanceFailoverGroupObservation struct { + + // The ID of the Managed Instance Failover Group. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The Azure Region where the Managed Instance Failover Group should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the Azure SQL Managed Instance which will be replicated using a Managed Instance Failover Group. Changing this forces a new resource to be created. + ManagedInstanceID *string `json:"managedInstanceId,omitempty" tf:"managed_instance_id,omitempty"` + + // The ID of the Azure SQL Managed Instance which will be replicated to. Changing this forces a new resource to be created. + PartnerManagedInstanceID *string `json:"partnerManagedInstanceId,omitempty" tf:"partner_managed_instance_id,omitempty"` + + // A partner_region block as defined below. + PartnerRegion []PartnerRegionObservation `json:"partnerRegion,omitempty" tf:"partner_region,omitempty"` + + // A read_write_endpoint_failover_policy block as defined below. + ReadWriteEndpointFailoverPolicy *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation `json:"readWriteEndpointFailoverPolicy,omitempty" tf:"read_write_endpoint_failover_policy,omitempty"` + + // Failover policy for the read-only endpoint. Defaults to true. + ReadonlyEndpointFailoverPolicyEnabled *bool `json:"readonlyEndpointFailoverPolicyEnabled,omitempty" tf:"readonly_endpoint_failover_policy_enabled,omitempty"` + + // The local replication role of the Managed Instance Failover Group. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type MSSQLManagedInstanceFailoverGroupParameters struct { + + // The Azure Region where the Managed Instance Failover Group should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + Location *string `json:"location" tf:"location,omitempty"` + + // The ID of the Azure SQL Managed Instance which will be replicated using a Managed Instance Failover Group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ManagedInstanceID *string `json:"managedInstanceId,omitempty" tf:"managed_instance_id,omitempty"` + + // Reference to a MSSQLManagedInstance in sql to populate managedInstanceId. + // +kubebuilder:validation:Optional + ManagedInstanceIDRef *v1.Reference `json:"managedInstanceIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLManagedInstance in sql to populate managedInstanceId. + // +kubebuilder:validation:Optional + ManagedInstanceIDSelector *v1.Selector `json:"managedInstanceIdSelector,omitempty" tf:"-"` + + // The ID of the Azure SQL Managed Instance which will be replicated to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + PartnerManagedInstanceID *string `json:"partnerManagedInstanceId,omitempty" tf:"partner_managed_instance_id,omitempty"` + + // Reference to a MSSQLManagedInstance in sql to populate partnerManagedInstanceId. + // +kubebuilder:validation:Optional + PartnerManagedInstanceIDRef *v1.Reference `json:"partnerManagedInstanceIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLManagedInstance in sql to populate partnerManagedInstanceId. + // +kubebuilder:validation:Optional + PartnerManagedInstanceIDSelector *v1.Selector `json:"partnerManagedInstanceIdSelector,omitempty" tf:"-"` + + // A read_write_endpoint_failover_policy block as defined below. + // +kubebuilder:validation:Optional + ReadWriteEndpointFailoverPolicy *MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters `json:"readWriteEndpointFailoverPolicy,omitempty" tf:"read_write_endpoint_failover_policy,omitempty"` + + // Failover policy for the read-only endpoint. Defaults to true. + // +kubebuilder:validation:Optional + ReadonlyEndpointFailoverPolicyEnabled *bool `json:"readonlyEndpointFailoverPolicyEnabled,omitempty" tf:"readonly_endpoint_failover_policy_enabled,omitempty"` +} + +type MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyInitParameters struct { + + // Applies only if mode is Automatic. The grace period in minutes before failover with data loss is attempted. + GraceMinutes *float64 `json:"graceMinutes,omitempty" tf:"grace_minutes,omitempty"` + + // The failover mode. Possible values are Automatic or Manual. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyObservation struct { + + // Applies only if mode is Automatic. The grace period in minutes before failover with data loss is attempted. + GraceMinutes *float64 `json:"graceMinutes,omitempty" tf:"grace_minutes,omitempty"` + + // The failover mode. Possible values are Automatic or Manual. + Mode *string `json:"mode,omitempty" tf:"mode,omitempty"` +} + +type MSSQLManagedInstanceFailoverGroupReadWriteEndpointFailoverPolicyParameters struct { + + // Applies only if mode is Automatic. The grace period in minutes before failover with data loss is attempted. + // +kubebuilder:validation:Optional + GraceMinutes *float64 `json:"graceMinutes,omitempty" tf:"grace_minutes,omitempty"` + + // The failover mode. Possible values are Automatic or Manual. + // +kubebuilder:validation:Optional + Mode *string `json:"mode" tf:"mode,omitempty"` +} + +type PartnerRegionInitParameters struct { +} + +type PartnerRegionObservation struct { + + // The Azure Region where the Managed Instance Failover Group partner exists. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The partner replication role of the Managed Instance Failover Group. + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type PartnerRegionParameters struct { +} + +// MSSQLManagedInstanceFailoverGroupSpec defines the desired state of MSSQLManagedInstanceFailoverGroup +type MSSQLManagedInstanceFailoverGroupSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLManagedInstanceFailoverGroupParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLManagedInstanceFailoverGroupInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLManagedInstanceFailoverGroupStatus defines the observed state of MSSQLManagedInstanceFailoverGroup. +type MSSQLManagedInstanceFailoverGroupStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLManagedInstanceFailoverGroupObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLManagedInstanceFailoverGroup is the Schema for the MSSQLManagedInstanceFailoverGroups API. Manages an Azure SQL Managed Instance Failover Group. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLManagedInstanceFailoverGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.readWriteEndpointFailoverPolicy) || (has(self.initProvider) && has(self.initProvider.readWriteEndpointFailoverPolicy))",message="spec.forProvider.readWriteEndpointFailoverPolicy is a required parameter" + Spec MSSQLManagedInstanceFailoverGroupSpec `json:"spec"` + Status MSSQLManagedInstanceFailoverGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLManagedInstanceFailoverGroupList contains a list of MSSQLManagedInstanceFailoverGroups +type MSSQLManagedInstanceFailoverGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLManagedInstanceFailoverGroup `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLManagedInstanceFailoverGroup_Kind = "MSSQLManagedInstanceFailoverGroup" + MSSQLManagedInstanceFailoverGroup_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLManagedInstanceFailoverGroup_Kind}.String() + MSSQLManagedInstanceFailoverGroup_KindAPIVersion = MSSQLManagedInstanceFailoverGroup_Kind + "." + CRDGroupVersion.String() + MSSQLManagedInstanceFailoverGroup_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLManagedInstanceFailoverGroup_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLManagedInstanceFailoverGroup{}, &MSSQLManagedInstanceFailoverGroupList{}) +} diff --git a/apis/sql/v1beta2/zz_mssqlmanagedinstancevulnerabilityassessment_terraformed.go b/apis/sql/v1beta2/zz_mssqlmanagedinstancevulnerabilityassessment_terraformed.go new file mode 100755 index 000000000..bd92d0b30 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlmanagedinstancevulnerabilityassessment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLManagedInstanceVulnerabilityAssessment +func (mg *MSSQLManagedInstanceVulnerabilityAssessment) GetTerraformResourceType() string { + return "azurerm_mssql_managed_instance_vulnerability_assessment" +} + +// GetConnectionDetailsMapping for this MSSQLManagedInstanceVulnerabilityAssessment +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef", "storage_container_sas_key": "spec.forProvider.storageContainerSasKeySecretRef"} +} + +// GetObservation of this MSSQLManagedInstanceVulnerabilityAssessment +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLManagedInstanceVulnerabilityAssessment +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLManagedInstanceVulnerabilityAssessment +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLManagedInstanceVulnerabilityAssessment +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLManagedInstanceVulnerabilityAssessment +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLManagedInstanceVulnerabilityAssessment +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLManagedInstanceVulnerabilityAssessment +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLManagedInstanceVulnerabilityAssessment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLManagedInstanceVulnerabilityAssessmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLManagedInstanceVulnerabilityAssessment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sql/v1beta2/zz_mssqlmanagedinstancevulnerabilityassessment_types.go b/apis/sql/v1beta2/zz_mssqlmanagedinstancevulnerabilityassessment_types.go new file mode 100755 index 000000000..0821c8e5b --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlmanagedinstancevulnerabilityassessment_types.go @@ -0,0 +1,171 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MSSQLManagedInstanceVulnerabilityAssessmentInitParameters struct { + + // The recurring scans settings. The recurring_scans block supports fields documented below. + RecurringScans *RecurringScansInitParameters `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // A blob storage container path to hold the scan results (e.g. https://myStorage.blob.core.windows.net/VaScans/). + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` +} + +type MSSQLManagedInstanceVulnerabilityAssessmentObservation struct { + + // The ID of the Vulnerability Assessment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The id of the MS SQL Managed Instance. Changing this forces a new resource to be created. + ManagedInstanceID *string `json:"managedInstanceId,omitempty" tf:"managed_instance_id,omitempty"` + + // The recurring scans settings. The recurring_scans block supports fields documented below. + RecurringScans *RecurringScansObservation `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // A blob storage container path to hold the scan results (e.g. https://myStorage.blob.core.windows.net/VaScans/). + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` +} + +type MSSQLManagedInstanceVulnerabilityAssessmentParameters struct { + + // The id of the MS SQL Managed Instance. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLManagedInstance + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ManagedInstanceID *string `json:"managedInstanceId,omitempty" tf:"managed_instance_id,omitempty"` + + // Reference to a MSSQLManagedInstance in sql to populate managedInstanceId. + // +kubebuilder:validation:Optional + ManagedInstanceIDRef *v1.Reference `json:"managedInstanceIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLManagedInstance in sql to populate managedInstanceId. + // +kubebuilder:validation:Optional + ManagedInstanceIDSelector *v1.Selector `json:"managedInstanceIdSelector,omitempty" tf:"-"` + + // The recurring scans settings. The recurring_scans block supports fields documented below. + // +kubebuilder:validation:Optional + RecurringScans *RecurringScansParameters `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // Specifies the identifier key of the storage account for vulnerability assessment scan results. If storage_container_sas_key isn't specified, storage_account_access_key is required. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // A blob storage container path to hold the scan results (e.g. https://myStorage.blob.core.windows.net/VaScans/). + // +kubebuilder:validation:Optional + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` + + // A shared access signature (SAS Key) that has write access to the blob container specified in storage_container_path parameter. If storage_account_access_key isn't specified, storage_container_sas_key is required. + // +kubebuilder:validation:Optional + StorageContainerSASKeySecretRef *v1.SecretKeySelector `json:"storageContainerSasKeySecretRef,omitempty" tf:"-"` +} + +type RecurringScansInitParameters struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to true. + EmailSubscriptionAdmins *bool `json:"emailSubscriptionAdmins,omitempty" tf:"email_subscription_admins,omitempty"` + + // Specifies an array of e-mail addresses to which the scan notification is sent. + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RecurringScansObservation struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to true. + EmailSubscriptionAdmins *bool `json:"emailSubscriptionAdmins,omitempty" tf:"email_subscription_admins,omitempty"` + + // Specifies an array of e-mail addresses to which the scan notification is sent. + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RecurringScansParameters struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to true. + // +kubebuilder:validation:Optional + EmailSubscriptionAdmins *bool `json:"emailSubscriptionAdmins,omitempty" tf:"email_subscription_admins,omitempty"` + + // Specifies an array of e-mail addresses to which the scan notification is sent. + // +kubebuilder:validation:Optional + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +// MSSQLManagedInstanceVulnerabilityAssessmentSpec defines the desired state of MSSQLManagedInstanceVulnerabilityAssessment +type MSSQLManagedInstanceVulnerabilityAssessmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLManagedInstanceVulnerabilityAssessmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLManagedInstanceVulnerabilityAssessmentInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLManagedInstanceVulnerabilityAssessmentStatus defines the observed state of MSSQLManagedInstanceVulnerabilityAssessment. +type MSSQLManagedInstanceVulnerabilityAssessmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLManagedInstanceVulnerabilityAssessmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLManagedInstanceVulnerabilityAssessment is the Schema for the MSSQLManagedInstanceVulnerabilityAssessments API. Manages the Vulnerability Assessment for an MS Managed Instance. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLManagedInstanceVulnerabilityAssessment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageContainerPath) || (has(self.initProvider) && has(self.initProvider.storageContainerPath))",message="spec.forProvider.storageContainerPath is a required parameter" + Spec MSSQLManagedInstanceVulnerabilityAssessmentSpec `json:"spec"` + Status MSSQLManagedInstanceVulnerabilityAssessmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLManagedInstanceVulnerabilityAssessmentList contains a list of MSSQLManagedInstanceVulnerabilityAssessments +type MSSQLManagedInstanceVulnerabilityAssessmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLManagedInstanceVulnerabilityAssessment `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLManagedInstanceVulnerabilityAssessment_Kind = "MSSQLManagedInstanceVulnerabilityAssessment" + MSSQLManagedInstanceVulnerabilityAssessment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLManagedInstanceVulnerabilityAssessment_Kind}.String() + MSSQLManagedInstanceVulnerabilityAssessment_KindAPIVersion = MSSQLManagedInstanceVulnerabilityAssessment_Kind + "." + CRDGroupVersion.String() + MSSQLManagedInstanceVulnerabilityAssessment_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLManagedInstanceVulnerabilityAssessment_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLManagedInstanceVulnerabilityAssessment{}, &MSSQLManagedInstanceVulnerabilityAssessmentList{}) +} diff --git a/apis/sql/v1beta2/zz_mssqlserver_terraformed.go b/apis/sql/v1beta2/zz_mssqlserver_terraformed.go new file mode 100755 index 000000000..e6655754d --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlserver_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLServer +func (mg *MSSQLServer) GetTerraformResourceType() string { + return "azurerm_mssql_server" +} + +// GetConnectionDetailsMapping for this MSSQLServer +func (tr *MSSQLServer) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"administrator_login_password": "spec.forProvider.administratorLoginPasswordSecretRef"} +} + +// GetObservation of this MSSQLServer +func (tr *MSSQLServer) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLServer +func (tr *MSSQLServer) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLServer +func (tr *MSSQLServer) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLServer +func (tr *MSSQLServer) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLServer +func (tr *MSSQLServer) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLServer +func (tr *MSSQLServer) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLServer +func (tr *MSSQLServer) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLServer using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLServer) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLServerParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLServer) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sql/v1beta2/zz_mssqlserver_types.go b/apis/sql/v1beta2/zz_mssqlserver_types.go new file mode 100755 index 000000000..ada46a4a6 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlserver_types.go @@ -0,0 +1,404 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AzureadAdministratorInitParameters struct { + + // Specifies whether only AD Users and administrators (e.g. azuread_administrator[0].login_username) can be used to login, or also local database users (e.g. administrator_login). When true, the administrator_login and administrator_login_password properties can be omitted. + AzureadAuthenticationOnly *bool `json:"azureadAuthenticationOnly,omitempty" tf:"azuread_authentication_only,omitempty"` + + // The login username of the Azure AD Administrator of this SQL Server. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + LoginUsername *string `json:"loginUsername,omitempty" tf:"login_username,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate loginUsername. + // +kubebuilder:validation:Optional + LoginUsernameRef *v1.Reference `json:"loginUsernameRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate loginUsername. + // +kubebuilder:validation:Optional + LoginUsernameSelector *v1.Selector `json:"loginUsernameSelector,omitempty" tf:"-"` + + // The object id of the Azure AD Administrator of this SQL Server. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("principal_id",true) + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate objectId. + // +kubebuilder:validation:Optional + ObjectIDRef *v1.Reference `json:"objectIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate objectId. + // +kubebuilder:validation:Optional + ObjectIDSelector *v1.Selector `json:"objectIdSelector,omitempty" tf:"-"` + + // The tenant id of the Azure AD Administrator of this SQL Server. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AzureadAdministratorObservation struct { + + // Specifies whether only AD Users and administrators (e.g. azuread_administrator[0].login_username) can be used to login, or also local database users (e.g. administrator_login). When true, the administrator_login and administrator_login_password properties can be omitted. + AzureadAuthenticationOnly *bool `json:"azureadAuthenticationOnly,omitempty" tf:"azuread_authentication_only,omitempty"` + + // The login username of the Azure AD Administrator of this SQL Server. + LoginUsername *string `json:"loginUsername,omitempty" tf:"login_username,omitempty"` + + // The object id of the Azure AD Administrator of this SQL Server. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The tenant id of the Azure AD Administrator of this SQL Server. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AzureadAdministratorParameters struct { + + // Specifies whether only AD Users and administrators (e.g. azuread_administrator[0].login_username) can be used to login, or also local database users (e.g. administrator_login). When true, the administrator_login and administrator_login_password properties can be omitted. + // +kubebuilder:validation:Optional + AzureadAuthenticationOnly *bool `json:"azureadAuthenticationOnly,omitempty" tf:"azuread_authentication_only,omitempty"` + + // The login username of the Azure AD Administrator of this SQL Server. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + LoginUsername *string `json:"loginUsername,omitempty" tf:"login_username,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate loginUsername. + // +kubebuilder:validation:Optional + LoginUsernameRef *v1.Reference `json:"loginUsernameRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate loginUsername. + // +kubebuilder:validation:Optional + LoginUsernameSelector *v1.Selector `json:"loginUsernameSelector,omitempty" tf:"-"` + + // The object id of the Azure AD Administrator of this SQL Server. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("principal_id",true) + // +kubebuilder:validation:Optional + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate objectId. + // +kubebuilder:validation:Optional + ObjectIDRef *v1.Reference `json:"objectIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate objectId. + // +kubebuilder:validation:Optional + ObjectIDSelector *v1.Selector `json:"objectIdSelector,omitempty" tf:"-"` + + // The tenant id of the Azure AD Administrator of this SQL Server. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type MSSQLServerIdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Server. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Server. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MSSQLServerIdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Server. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Identity of this SQL Server. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Identity of this SQL Server. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Server. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type MSSQLServerIdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this SQL Server. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this SQL Server. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MSSQLServerInitParameters struct { + + // The administrator login name for the new server. Required unless azuread_authentication_only in the azuread_administrator block is true. When omitted, Azure will generate a default username which cannot be subsequently changed. Changing this forces a new resource to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // An azuread_administrator block as defined below. + AzureadAdministrator *AzureadAdministratorInitParameters `json:"azureadAdministrator,omitempty" tf:"azuread_administrator,omitempty"` + + // The connection policy the server will use. Possible values are Default, Proxy, and Redirect. Defaults to Default. + ConnectionPolicy *string `json:"connectionPolicy,omitempty" tf:"connection_policy,omitempty"` + + // An identity block as defined below. + Identity *MSSQLServerIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Minimum TLS Version for all SQL Database and SQL Data Warehouse databases associated with the server. Valid values are: 1.0, 1.1 , 1.2 and Disabled. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Whether outbound network traffic is restricted for this server. Defaults to false. + OutboundNetworkRestrictionEnabled *bool `json:"outboundNetworkRestrictionEnabled,omitempty" tf:"outbound_network_restriction_enabled,omitempty"` + + // Specifies the primary user managed identity id. Required if type is UserAssigned and should be combined with identity_ids. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate primaryUserAssignedIdentityId. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityIDRef *v1.Reference `json:"primaryUserAssignedIdentityIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate primaryUserAssignedIdentityId. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityIDSelector *v1.Selector `json:"primaryUserAssignedIdentityIdSelector,omitempty" tf:"-"` + + // Whether public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) to be used as the Customer Managed Key(CMK/BYOK) for the Transparent Data Encryption(TDE) layer. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + TransparentDataEncryptionKeyVaultKeyID *string `json:"transparentDataEncryptionKeyVaultKeyId,omitempty" tf:"transparent_data_encryption_key_vault_key_id,omitempty"` + + // Reference to a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyIDRef *v1.Reference `json:"transparentDataEncryptionKeyVaultKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyIDSelector *v1.Selector `json:"transparentDataEncryptionKeyVaultKeyIdSelector,omitempty" tf:"-"` + + // The version for the new server. Valid values are: 2.0 (for v11 server) and 12.0 (for v12 server). Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type MSSQLServerObservation struct { + + // The administrator login name for the new server. Required unless azuread_authentication_only in the azuread_administrator block is true. When omitted, Azure will generate a default username which cannot be subsequently changed. Changing this forces a new resource to be created. + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // An azuread_administrator block as defined below. + AzureadAdministrator *AzureadAdministratorObservation `json:"azureadAdministrator,omitempty" tf:"azuread_administrator,omitempty"` + + // The connection policy the server will use. Possible values are Default, Proxy, and Redirect. Defaults to Default. + ConnectionPolicy *string `json:"connectionPolicy,omitempty" tf:"connection_policy,omitempty"` + + // The fully qualified domain name of the Azure SQL Server (e.g. myServerName.database.windows.net) + FullyQualifiedDomainName *string `json:"fullyQualifiedDomainName,omitempty" tf:"fully_qualified_domain_name,omitempty"` + + // the Microsoft SQL Server ID. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *MSSQLServerIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Minimum TLS Version for all SQL Database and SQL Data Warehouse databases associated with the server. Valid values are: 1.0, 1.1 , 1.2 and Disabled. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Whether outbound network traffic is restricted for this server. Defaults to false. + OutboundNetworkRestrictionEnabled *bool `json:"outboundNetworkRestrictionEnabled,omitempty" tf:"outbound_network_restriction_enabled,omitempty"` + + // Specifies the primary user managed identity id. Required if type is UserAssigned and should be combined with identity_ids. + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` + + // Whether public network access is allowed for this server. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the Microsoft SQL Server. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A list of dropped restorable database IDs on the server. + RestorableDroppedDatabaseIds []*string `json:"restorableDroppedDatabaseIds,omitempty" tf:"restorable_dropped_database_ids,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) to be used as the Customer Managed Key(CMK/BYOK) for the Transparent Data Encryption(TDE) layer. + TransparentDataEncryptionKeyVaultKeyID *string `json:"transparentDataEncryptionKeyVaultKeyId,omitempty" tf:"transparent_data_encryption_key_vault_key_id,omitempty"` + + // The version for the new server. Valid values are: 2.0 (for v11 server) and 12.0 (for v12 server). Changing this forces a new resource to be created. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type MSSQLServerParameters struct { + + // The administrator login name for the new server. Required unless azuread_authentication_only in the azuread_administrator block is true. When omitted, Azure will generate a default username which cannot be subsequently changed. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AdministratorLogin *string `json:"administratorLogin,omitempty" tf:"administrator_login,omitempty"` + + // The password associated with the administrator_login user. Needs to comply with Azure's Password Policy. Required unless azuread_authentication_only in the azuread_administrator block is true. + // +kubebuilder:validation:Optional + AdministratorLoginPasswordSecretRef *v1.SecretKeySelector `json:"administratorLoginPasswordSecretRef,omitempty" tf:"-"` + + // An azuread_administrator block as defined below. + // +kubebuilder:validation:Optional + AzureadAdministrator *AzureadAdministratorParameters `json:"azureadAdministrator,omitempty" tf:"azuread_administrator,omitempty"` + + // The connection policy the server will use. Possible values are Default, Proxy, and Redirect. Defaults to Default. + // +kubebuilder:validation:Optional + ConnectionPolicy *string `json:"connectionPolicy,omitempty" tf:"connection_policy,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *MSSQLServerIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The Minimum TLS Version for all SQL Database and SQL Data Warehouse databases associated with the server. Valid values are: 1.0, 1.1 , 1.2 and Disabled. Defaults to 1.2. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Whether outbound network traffic is restricted for this server. Defaults to false. + // +kubebuilder:validation:Optional + OutboundNetworkRestrictionEnabled *bool `json:"outboundNetworkRestrictionEnabled,omitempty" tf:"outbound_network_restriction_enabled,omitempty"` + + // Specifies the primary user managed identity id. Required if type is UserAssigned and should be combined with identity_ids. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/managedidentity/v1beta1.UserAssignedIdentity + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityID *string `json:"primaryUserAssignedIdentityId,omitempty" tf:"primary_user_assigned_identity_id,omitempty"` + + // Reference to a UserAssignedIdentity in managedidentity to populate primaryUserAssignedIdentityId. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityIDRef *v1.Reference `json:"primaryUserAssignedIdentityIdRef,omitempty" tf:"-"` + + // Selector for a UserAssignedIdentity in managedidentity to populate primaryUserAssignedIdentityId. + // +kubebuilder:validation:Optional + PrimaryUserAssignedIdentityIDSelector *v1.Selector `json:"primaryUserAssignedIdentityIdSelector,omitempty" tf:"-"` + + // Whether public network access is allowed for this server. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the resource group in which to create the Microsoft SQL Server. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) to be used as the Customer Managed Key(CMK/BYOK) for the Transparent Data Encryption(TDE) layer. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyID *string `json:"transparentDataEncryptionKeyVaultKeyId,omitempty" tf:"transparent_data_encryption_key_vault_key_id,omitempty"` + + // Reference to a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyIDRef *v1.Reference `json:"transparentDataEncryptionKeyVaultKeyIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + // +kubebuilder:validation:Optional + TransparentDataEncryptionKeyVaultKeyIDSelector *v1.Selector `json:"transparentDataEncryptionKeyVaultKeyIdSelector,omitempty" tf:"-"` + + // The version for the new server. Valid values are: 2.0 (for v11 server) and 12.0 (for v12 server). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +// MSSQLServerSpec defines the desired state of MSSQLServer +type MSSQLServerSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLServerParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLServerInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLServerStatus defines the observed state of MSSQLServer. +type MSSQLServerStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLServerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLServer is the Schema for the MSSQLServers API. Manages a Microsoft SQL Azure Database Server. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.version) || (has(self.initProvider) && has(self.initProvider.version))",message="spec.forProvider.version is a required parameter" + Spec MSSQLServerSpec `json:"spec"` + Status MSSQLServerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLServerList contains a list of MSSQLServers +type MSSQLServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLServer `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLServer_Kind = "MSSQLServer" + MSSQLServer_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLServer_Kind}.String() + MSSQLServer_KindAPIVersion = MSSQLServer_Kind + "." + CRDGroupVersion.String() + MSSQLServer_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLServer_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLServer{}, &MSSQLServerList{}) +} diff --git a/apis/sql/v1beta2/zz_mssqlservervulnerabilityassessment_terraformed.go b/apis/sql/v1beta2/zz_mssqlservervulnerabilityassessment_terraformed.go new file mode 100755 index 000000000..4d9063fc0 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlservervulnerabilityassessment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this MSSQLServerVulnerabilityAssessment +func (mg *MSSQLServerVulnerabilityAssessment) GetTerraformResourceType() string { + return "azurerm_mssql_server_vulnerability_assessment" +} + +// GetConnectionDetailsMapping for this MSSQLServerVulnerabilityAssessment +func (tr *MSSQLServerVulnerabilityAssessment) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef", "storage_container_sas_key": "spec.forProvider.storageContainerSasKeySecretRef"} +} + +// GetObservation of this MSSQLServerVulnerabilityAssessment +func (tr *MSSQLServerVulnerabilityAssessment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this MSSQLServerVulnerabilityAssessment +func (tr *MSSQLServerVulnerabilityAssessment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this MSSQLServerVulnerabilityAssessment +func (tr *MSSQLServerVulnerabilityAssessment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this MSSQLServerVulnerabilityAssessment +func (tr *MSSQLServerVulnerabilityAssessment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this MSSQLServerVulnerabilityAssessment +func (tr *MSSQLServerVulnerabilityAssessment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this MSSQLServerVulnerabilityAssessment +func (tr *MSSQLServerVulnerabilityAssessment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this MSSQLServerVulnerabilityAssessment +func (tr *MSSQLServerVulnerabilityAssessment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this MSSQLServerVulnerabilityAssessment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *MSSQLServerVulnerabilityAssessment) LateInitialize(attrs []byte) (bool, error) { + params := &MSSQLServerVulnerabilityAssessmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *MSSQLServerVulnerabilityAssessment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/sql/v1beta2/zz_mssqlservervulnerabilityassessment_types.go b/apis/sql/v1beta2/zz_mssqlservervulnerabilityassessment_types.go new file mode 100755 index 000000000..244ab7440 --- /dev/null +++ b/apis/sql/v1beta2/zz_mssqlservervulnerabilityassessment_types.go @@ -0,0 +1,184 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type MSSQLServerVulnerabilityAssessmentInitParameters struct { + + // The recurring scans settings. The recurring_scans block supports fields documented below. + RecurringScans *MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // The id of the security alert policy of the MS SQL Server. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServerSecurityAlertPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ServerSecurityAlertPolicyID *string `json:"serverSecurityAlertPolicyId,omitempty" tf:"server_security_alert_policy_id,omitempty"` + + // Reference to a MSSQLServerSecurityAlertPolicy in sql to populate serverSecurityAlertPolicyId. + // +kubebuilder:validation:Optional + ServerSecurityAlertPolicyIDRef *v1.Reference `json:"serverSecurityAlertPolicyIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLServerSecurityAlertPolicy in sql to populate serverSecurityAlertPolicyId. + // +kubebuilder:validation:Optional + ServerSecurityAlertPolicyIDSelector *v1.Selector `json:"serverSecurityAlertPolicyIdSelector,omitempty" tf:"-"` + + // A blob storage container path to hold the scan results (e.g. https://example.blob.core.windows.net/VaScans/). + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` +} + +type MSSQLServerVulnerabilityAssessmentObservation struct { + + // The ID of the MS SQL Server Vulnerability Assessment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The recurring scans settings. The recurring_scans block supports fields documented below. + RecurringScans *MSSQLServerVulnerabilityAssessmentRecurringScansObservation `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // The id of the security alert policy of the MS SQL Server. Changing this forces a new resource to be created. + ServerSecurityAlertPolicyID *string `json:"serverSecurityAlertPolicyId,omitempty" tf:"server_security_alert_policy_id,omitempty"` + + // A blob storage container path to hold the scan results (e.g. https://example.blob.core.windows.net/VaScans/). + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` +} + +type MSSQLServerVulnerabilityAssessmentParameters struct { + + // The recurring scans settings. The recurring_scans block supports fields documented below. + // +kubebuilder:validation:Optional + RecurringScans *MSSQLServerVulnerabilityAssessmentRecurringScansParameters `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // The id of the security alert policy of the MS SQL Server. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServerSecurityAlertPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ServerSecurityAlertPolicyID *string `json:"serverSecurityAlertPolicyId,omitempty" tf:"server_security_alert_policy_id,omitempty"` + + // Reference to a MSSQLServerSecurityAlertPolicy in sql to populate serverSecurityAlertPolicyId. + // +kubebuilder:validation:Optional + ServerSecurityAlertPolicyIDRef *v1.Reference `json:"serverSecurityAlertPolicyIdRef,omitempty" tf:"-"` + + // Selector for a MSSQLServerSecurityAlertPolicy in sql to populate serverSecurityAlertPolicyId. + // +kubebuilder:validation:Optional + ServerSecurityAlertPolicyIDSelector *v1.Selector `json:"serverSecurityAlertPolicyIdSelector,omitempty" tf:"-"` + + // Specifies the identifier key of the storage account for vulnerability assessment scan results. If storage_container_sas_key isn't specified, storage_account_access_key is required. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // A blob storage container path to hold the scan results (e.g. https://example.blob.core.windows.net/VaScans/). + // +kubebuilder:validation:Optional + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` + + // A shared access signature (SAS Key) that has write access to the blob container specified in storage_container_path parameter. If storage_account_access_key isn't specified, storage_container_sas_key is required. + // +kubebuilder:validation:Optional + StorageContainerSASKeySecretRef *v1.SecretKeySelector `json:"storageContainerSasKeySecretRef,omitempty" tf:"-"` +} + +type MSSQLServerVulnerabilityAssessmentRecurringScansInitParameters struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to false. + EmailSubscriptionAdmins *bool `json:"emailSubscriptionAdmins,omitempty" tf:"email_subscription_admins,omitempty"` + + // Specifies an array of email addresses to which the scan notification is sent. + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type MSSQLServerVulnerabilityAssessmentRecurringScansObservation struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to false. + EmailSubscriptionAdmins *bool `json:"emailSubscriptionAdmins,omitempty" tf:"email_subscription_admins,omitempty"` + + // Specifies an array of email addresses to which the scan notification is sent. + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type MSSQLServerVulnerabilityAssessmentRecurringScansParameters struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to false. + // +kubebuilder:validation:Optional + EmailSubscriptionAdmins *bool `json:"emailSubscriptionAdmins,omitempty" tf:"email_subscription_admins,omitempty"` + + // Specifies an array of email addresses to which the scan notification is sent. + // +kubebuilder:validation:Optional + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +// MSSQLServerVulnerabilityAssessmentSpec defines the desired state of MSSQLServerVulnerabilityAssessment +type MSSQLServerVulnerabilityAssessmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider MSSQLServerVulnerabilityAssessmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider MSSQLServerVulnerabilityAssessmentInitParameters `json:"initProvider,omitempty"` +} + +// MSSQLServerVulnerabilityAssessmentStatus defines the observed state of MSSQLServerVulnerabilityAssessment. +type MSSQLServerVulnerabilityAssessmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider MSSQLServerVulnerabilityAssessmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MSSQLServerVulnerabilityAssessment is the Schema for the MSSQLServerVulnerabilityAssessments API. Manages the Vulnerability Assessment for a MS SQL Server. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type MSSQLServerVulnerabilityAssessment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageContainerPath) || (has(self.initProvider) && has(self.initProvider.storageContainerPath))",message="spec.forProvider.storageContainerPath is a required parameter" + Spec MSSQLServerVulnerabilityAssessmentSpec `json:"spec"` + Status MSSQLServerVulnerabilityAssessmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MSSQLServerVulnerabilityAssessmentList contains a list of MSSQLServerVulnerabilityAssessments +type MSSQLServerVulnerabilityAssessmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MSSQLServerVulnerabilityAssessment `json:"items"` +} + +// Repository type metadata. +var ( + MSSQLServerVulnerabilityAssessment_Kind = "MSSQLServerVulnerabilityAssessment" + MSSQLServerVulnerabilityAssessment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: MSSQLServerVulnerabilityAssessment_Kind}.String() + MSSQLServerVulnerabilityAssessment_KindAPIVersion = MSSQLServerVulnerabilityAssessment_Kind + "." + CRDGroupVersion.String() + MSSQLServerVulnerabilityAssessment_GroupVersionKind = CRDGroupVersion.WithKind(MSSQLServerVulnerabilityAssessment_Kind) +) + +func init() { + SchemeBuilder.Register(&MSSQLServerVulnerabilityAssessment{}, &MSSQLServerVulnerabilityAssessmentList{}) +} diff --git a/apis/storage/v1beta1/zz_accountnetworkrules_types.go b/apis/storage/v1beta1/zz_accountnetworkrules_types.go index 24b8c4a35..5753fe614 100755 --- a/apis/storage/v1beta1/zz_accountnetworkrules_types.go +++ b/apis/storage/v1beta1/zz_accountnetworkrules_types.go @@ -30,7 +30,7 @@ type AccountNetworkRulesInitParameters struct { PrivateLinkAccess []AccountNetworkRulesPrivateLinkAccessInitParameters `json:"privateLinkAccess,omitempty" tf:"private_link_access,omitempty"` // Specifies the ID of the storage account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -95,7 +95,7 @@ type AccountNetworkRulesParameters struct { PrivateLinkAccess []AccountNetworkRulesPrivateLinkAccessParameters `json:"privateLinkAccess,omitempty" tf:"private_link_access,omitempty"` // Specifies the ID of the storage account. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/storage/v1beta1/zz_blob_types.go b/apis/storage/v1beta1/zz_blob_types.go index 94cc2f4b6..984978fdb 100755 --- a/apis/storage/v1beta1/zz_blob_types.go +++ b/apis/storage/v1beta1/zz_blob_types.go @@ -143,7 +143,7 @@ type BlobParameters struct { SourceURI *string `json:"sourceUri,omitempty" tf:"source_uri,omitempty"` // Specifies the storage account in which to create the storage container. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/storage/v1beta1/zz_container_types.go b/apis/storage/v1beta1/zz_container_types.go index d2287291f..53d9b520e 100755 --- a/apis/storage/v1beta1/zz_container_types.go +++ b/apis/storage/v1beta1/zz_container_types.go @@ -60,7 +60,7 @@ type ContainerParameters struct { Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` // The name of the Storage Account where the Container should be created. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/storage/v1beta1/zz_datalakegen2filesystem_types.go b/apis/storage/v1beta1/zz_datalakegen2filesystem_types.go index 0e95a7f31..bfa400a90 100755 --- a/apis/storage/v1beta1/zz_datalakegen2filesystem_types.go +++ b/apis/storage/v1beta1/zz_datalakegen2filesystem_types.go @@ -78,7 +78,7 @@ type DataLakeGen2FileSystemInitParameters struct { Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` // Specifies the ID of the Storage Account in which the Data Lake Gen2 File System should exist. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -133,7 +133,7 @@ type DataLakeGen2FileSystemParameters struct { Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` // Specifies the ID of the Storage Account in which the Data Lake Gen2 File System should exist. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/storage/v1beta1/zz_datalakegen2path_types.go b/apis/storage/v1beta1/zz_datalakegen2path_types.go index d7659b92b..5a0aa0ea3 100755 --- a/apis/storage/v1beta1/zz_datalakegen2path_types.go +++ b/apis/storage/v1beta1/zz_datalakegen2path_types.go @@ -92,7 +92,7 @@ type DataLakeGen2PathInitParameters struct { Resource *string `json:"resource,omitempty" tf:"resource,omitempty"` // Specifies the ID of the Storage Account in which the Data Lake Gen2 File System should exist. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -168,7 +168,7 @@ type DataLakeGen2PathParameters struct { Resource *string `json:"resource,omitempty" tf:"resource,omitempty"` // Specifies the ID of the Storage Account in which the Data Lake Gen2 File System should exist. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/storage/v1beta1/zz_encryptionscope_types.go b/apis/storage/v1beta1/zz_encryptionscope_types.go index e139f4331..d9faf672e 100755 --- a/apis/storage/v1beta1/zz_encryptionscope_types.go +++ b/apis/storage/v1beta1/zz_encryptionscope_types.go @@ -25,7 +25,7 @@ type EncryptionScopeInitParameters struct { Source *string `json:"source,omitempty" tf:"source,omitempty"` // The ID of the Storage Account where this Storage Encryption Scope is created. Changing this forces a new Storage Encryption Scope to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` @@ -71,7 +71,7 @@ type EncryptionScopeParameters struct { Source *string `json:"source,omitempty" tf:"source,omitempty"` // The ID of the Storage Account where this Storage Encryption Scope is created. Changing this forces a new Storage Encryption Scope to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` diff --git a/apis/storage/v1beta1/zz_generated.conversion_hubs.go b/apis/storage/v1beta1/zz_generated.conversion_hubs.go index b8a19fce2..8e0832f0b 100755 --- a/apis/storage/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/storage/v1beta1/zz_generated.conversion_hubs.go @@ -6,21 +6,12 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *Account) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *AccountLocalUser) Hub() {} - // Hub marks this type as a conversion hub. func (tr *AccountNetworkRules) Hub() {} // Hub marks this type as a conversion hub. func (tr *Blob) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *BlobInventoryPolicy) Hub() {} - // Hub marks this type as a conversion hub. func (tr *Container) Hub() {} @@ -33,9 +24,6 @@ func (tr *DataLakeGen2Path) Hub() {} // Hub marks this type as a conversion hub. func (tr *EncryptionScope) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ManagementPolicy) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ObjectReplication) Hub() {} diff --git a/apis/storage/v1beta1/zz_generated.conversion_spokes.go b/apis/storage/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..4aec1cc49 --- /dev/null +++ b/apis/storage/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Account to the hub type. +func (tr *Account) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Account type. +func (tr *Account) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this AccountLocalUser to the hub type. +func (tr *AccountLocalUser) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AccountLocalUser type. +func (tr *AccountLocalUser) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this BlobInventoryPolicy to the hub type. +func (tr *BlobInventoryPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the BlobInventoryPolicy type. +func (tr *BlobInventoryPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ManagementPolicy to the hub type. +func (tr *ManagementPolicy) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ManagementPolicy type. +func (tr *ManagementPolicy) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/storage/v1beta1/zz_generated.resolvers.go b/apis/storage/v1beta1/zz_generated.resolvers.go index 1eb61aab7..131f54d2a 100644 --- a/apis/storage/v1beta1/zz_generated.resolvers.go +++ b/apis/storage/v1beta1/zz_generated.resolvers.go @@ -12,8 +12,9 @@ import ( resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" - xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" client "sigs.k8s.io/controller-runtime/pkg/client" // ResolveReferences of this Account. @@ -133,7 +134,7 @@ func (mg *AccountNetworkRules) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -152,7 +153,7 @@ func (mg *AccountNetworkRules) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -183,7 +184,7 @@ func (mg *Blob) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -325,7 +326,7 @@ func (mg *Container) ResolveReferences(ctx context.Context, c client.Reader) err var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -356,7 +357,7 @@ func (mg *DataLakeGen2FileSystem) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -375,7 +376,7 @@ func (mg *DataLakeGen2FileSystem) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -425,7 +426,7 @@ func (mg *DataLakeGen2Path) ResolveReferences(ctx context.Context, c client.Read mg.Spec.ForProvider.FileSystemName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.FileSystemNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -463,7 +464,7 @@ func (mg *DataLakeGen2Path) ResolveReferences(ctx context.Context, c client.Read mg.Spec.InitProvider.FileSystemName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.FileSystemNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -494,7 +495,7 @@ func (mg *EncryptionScope) ResolveReferences(ctx context.Context, c client.Reade var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -513,7 +514,7 @@ func (mg *EncryptionScope) ResolveReferences(ctx context.Context, c client.Reade mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -594,7 +595,7 @@ func (mg *ObjectReplication) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -656,7 +657,7 @@ func (mg *ObjectReplication) ResolveReferences(ctx context.Context, c client.Rea } { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -674,7 +675,7 @@ func (mg *ObjectReplication) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.SourceStorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SourceStorageAccountIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -736,7 +737,7 @@ func (mg *ObjectReplication) ResolveReferences(ctx context.Context, c client.Rea } { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -766,7 +767,7 @@ func (mg *Queue) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -797,7 +798,7 @@ func (mg *Share) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -847,7 +848,7 @@ func (mg *ShareDirectory) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.ShareName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ShareNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -885,7 +886,7 @@ func (mg *ShareDirectory) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.InitProvider.ShareName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ShareNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -916,7 +917,7 @@ func (mg *Table) ResolveReferences(ctx context.Context, c client.Reader) error { var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -935,7 +936,7 @@ func (mg *Table) ResolveReferences(ctx context.Context, c client.Reader) error { mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -966,7 +967,7 @@ func (mg *TableEntity) ResolveReferences(ctx context.Context, c client.Reader) e var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1004,7 +1005,7 @@ func (mg *TableEntity) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.TableName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TableNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/storage/v1beta1/zz_objectreplication_types.go b/apis/storage/v1beta1/zz_objectreplication_types.go index 5a3cbb0be..527edf28b 100755 --- a/apis/storage/v1beta1/zz_objectreplication_types.go +++ b/apis/storage/v1beta1/zz_objectreplication_types.go @@ -16,7 +16,7 @@ import ( type ObjectReplicationInitParameters struct { // The ID of the destination storage account. Changing this forces a new Storage Object Replication to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() DestinationStorageAccountID *string `json:"destinationStorageAccountId,omitempty" tf:"destination_storage_account_id,omitempty"` @@ -32,7 +32,7 @@ type ObjectReplicationInitParameters struct { Rules []ObjectReplicationRulesInitParameters `json:"rules,omitempty" tf:"rules,omitempty"` // The ID of the source storage account. Changing this forces a new Storage Object Replication to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SourceStorageAccountID *string `json:"sourceStorageAccountId,omitempty" tf:"source_storage_account_id,omitempty"` @@ -69,7 +69,7 @@ type ObjectReplicationObservation struct { type ObjectReplicationParameters struct { // The ID of the destination storage account. Changing this forces a new Storage Object Replication to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional DestinationStorageAccountID *string `json:"destinationStorageAccountId,omitempty" tf:"destination_storage_account_id,omitempty"` @@ -87,7 +87,7 @@ type ObjectReplicationParameters struct { Rules []ObjectReplicationRulesParameters `json:"rules,omitempty" tf:"rules,omitempty"` // The ID of the source storage account. Changing this forces a new Storage Object Replication to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SourceStorageAccountID *string `json:"sourceStorageAccountId,omitempty" tf:"source_storage_account_id,omitempty"` diff --git a/apis/storage/v1beta1/zz_queue_types.go b/apis/storage/v1beta1/zz_queue_types.go index 3e3432bcd..1ad426ab7 100755 --- a/apis/storage/v1beta1/zz_queue_types.go +++ b/apis/storage/v1beta1/zz_queue_types.go @@ -44,7 +44,7 @@ type QueueParameters struct { Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` // Specifies the Storage Account in which the Storage Queue should exist. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/storage/v1beta1/zz_share_types.go b/apis/storage/v1beta1/zz_share_types.go index 7608111ba..dce756a11 100755 --- a/apis/storage/v1beta1/zz_share_types.go +++ b/apis/storage/v1beta1/zz_share_types.go @@ -155,7 +155,7 @@ type ShareParameters struct { Quota *float64 `json:"quota,omitempty" tf:"quota,omitempty"` // Specifies the storage account in which to create the share. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/storage/v1beta1/zz_sharedirectory_types.go b/apis/storage/v1beta1/zz_sharedirectory_types.go index 3ec22a6c9..98892ffc7 100755 --- a/apis/storage/v1beta1/zz_sharedirectory_types.go +++ b/apis/storage/v1beta1/zz_sharedirectory_types.go @@ -35,7 +35,7 @@ type ShareDirectoryInitParameters struct { ShareNameSelector *v1.Selector `json:"shareNameSelector,omitempty" tf:"-"` // The name of the Storage Account within which the File Share is located. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` // Reference to a Account in storage to populate storageAccountName. @@ -91,7 +91,7 @@ type ShareDirectoryParameters struct { ShareNameSelector *v1.Selector `json:"shareNameSelector,omitempty" tf:"-"` // The name of the Storage Account within which the File Share is located. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/storage/v1beta1/zz_table_types.go b/apis/storage/v1beta1/zz_table_types.go index 0c6f98bfb..327ba2a97 100755 --- a/apis/storage/v1beta1/zz_table_types.go +++ b/apis/storage/v1beta1/zz_table_types.go @@ -90,7 +90,7 @@ type TableInitParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Specifies the storage account in which to create the storage table. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` // Reference to a Account in storage to populate storageAccountName. @@ -128,7 +128,7 @@ type TableParameters struct { Name *string `json:"name,omitempty" tf:"name,omitempty"` // Specifies the storage account in which to create the storage table. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/storage/v1beta1/zz_tableentity_types.go b/apis/storage/v1beta1/zz_tableentity_types.go index b84c41c8d..31e880935 100755 --- a/apis/storage/v1beta1/zz_tableentity_types.go +++ b/apis/storage/v1beta1/zz_tableentity_types.go @@ -26,7 +26,7 @@ type TableEntityInitParameters struct { RowKey *string `json:"rowKey,omitempty" tf:"row_key,omitempty"` // Specifies the storage account in which to create the storage table entity. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` // Reference to a Account in storage to populate storageAccountName. @@ -89,7 +89,7 @@ type TableEntityParameters struct { RowKey *string `json:"rowKey,omitempty" tf:"row_key,omitempty"` // Specifies the storage account in which to create the storage table entity. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/storage/v1beta2/zz_account_terraformed.go b/apis/storage/v1beta2/zz_account_terraformed.go new file mode 100755 index 000000000..fdba3f15a --- /dev/null +++ b/apis/storage/v1beta2/zz_account_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Account +func (mg *Account) GetTerraformResourceType() string { + return "azurerm_storage_account" +} + +// GetConnectionDetailsMapping for this Account +func (tr *Account) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"primary_access_key": "status.atProvider.primaryAccessKey", "primary_blob_connection_string": "status.atProvider.primaryBlobConnectionString", "primary_connection_string": "status.atProvider.primaryConnectionString", "secondary_access_key": "status.atProvider.secondaryAccessKey", "secondary_blob_connection_string": "status.atProvider.secondaryBlobConnectionString", "secondary_connection_string": "status.atProvider.secondaryConnectionString"} +} + +// GetObservation of this Account +func (tr *Account) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Account +func (tr *Account) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Account +func (tr *Account) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Account +func (tr *Account) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Account +func (tr *Account) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Account +func (tr *Account) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Account +func (tr *Account) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Account using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Account) LateInitialize(attrs []byte) (bool, error) { + params := &AccountParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Account) GetTerraformSchemaVersion() int { + return 4 +} diff --git a/apis/storage/v1beta2/zz_account_types.go b/apis/storage/v1beta2/zz_account_types.go new file mode 100755 index 000000000..49eccf3e1 --- /dev/null +++ b/apis/storage/v1beta2/zz_account_types.go @@ -0,0 +1,1760 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountInitParameters struct { + + // Defines the access tier for BlobStorage, FileStorage and StorageV2 accounts. Valid options are Hot and Cool, defaults to Hot. + AccessTier *string `json:"accessTier,omitempty" tf:"access_tier,omitempty"` + + // Defines the Kind of account. Valid options are BlobStorage, BlockBlobStorage, FileStorage, Storage and StorageV2. Defaults to StorageV2. + AccountKind *string `json:"accountKind,omitempty" tf:"account_kind,omitempty"` + + // Defines the type of replication to use for this storage account. Valid options are LRS, GRS, RAGRS, ZRS, GZRS and RAGZRS. Changing this forces a new resource to be created when types LRS, GRS and RAGRS are changed to ZRS, GZRS or RAGZRS and vice versa. + AccountReplicationType *string `json:"accountReplicationType,omitempty" tf:"account_replication_type,omitempty"` + + // Defines the Tier to use for this storage account. Valid options are Standard and Premium. For BlockBlobStorage and FileStorage accounts only Premium is valid. Changing this forces a new resource to be created. + AccountTier *string `json:"accountTier,omitempty" tf:"account_tier,omitempty"` + + // Allow or disallow nested items within this Account to opt into being public. Defaults to true. + AllowNestedItemsToBePublic *bool `json:"allowNestedItemsToBePublic,omitempty" tf:"allow_nested_items_to_be_public,omitempty"` + + // Restrict copy to and from Storage Accounts within an AAD tenant or with Private Links to the same VNet. Possible values are AAD and PrivateLink. + AllowedCopyScope *string `json:"allowedCopyScope,omitempty" tf:"allowed_copy_scope,omitempty"` + + // A azure_files_authentication block as defined below. + AzureFilesAuthentication *AzureFilesAuthenticationInitParameters `json:"azureFilesAuthentication,omitempty" tf:"azure_files_authentication,omitempty"` + + // A blob_properties block as defined below. + BlobProperties *BlobPropertiesInitParameters `json:"blobProperties,omitempty" tf:"blob_properties,omitempty"` + + // Should cross Tenant replication be enabled? Defaults to true. + CrossTenantReplicationEnabled *bool `json:"crossTenantReplicationEnabled,omitempty" tf:"cross_tenant_replication_enabled,omitempty"` + + // A custom_domain block as documented below. + CustomDomain *CustomDomainInitParameters `json:"customDomain,omitempty" tf:"custom_domain,omitempty"` + + // A customer_managed_key block as documented below. + CustomerManagedKey *CustomerManagedKeyInitParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Default to Azure Active Directory authorization in the Azure portal when accessing the Storage Account. The default value is false + DefaultToOauthAuthentication *bool `json:"defaultToOauthAuthentication,omitempty" tf:"default_to_oauth_authentication,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Storage Account should exist. Changing this forces a new Storage Account to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Boolean flag which forces HTTPS if enabled, see here for more information. Defaults to true. + EnableHTTPSTrafficOnly *bool `json:"enableHttpsTrafficOnly,omitempty" tf:"enable_https_traffic_only,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An immutability_policy block as defined below. Changing this forces a new resource to be created. + ImmutabilityPolicy *ImmutabilityPolicyInitParameters `json:"immutabilityPolicy,omitempty" tf:"immutability_policy,omitempty"` + + // Is infrastructure encryption enabled? Changing this forces a new resource to be created. Defaults to false. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Is Hierarchical Namespace enabled? This can be used with Azure Data Lake Storage Gen 2 (see here for more information). Changing this forces a new resource to be created. + IsHnsEnabled *bool `json:"isHnsEnabled,omitempty" tf:"is_hns_enabled,omitempty"` + + // Is Large File Share Enabled? + LargeFileShareEnabled *bool `json:"largeFileShareEnabled,omitempty" tf:"large_file_share_enabled,omitempty"` + + // Is Local User Enabled? Defaults to true. + LocalUserEnabled *bool `json:"localUserEnabled,omitempty" tf:"local_user_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum supported TLS version for the storage account. Possible values are TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2 for new storage accounts. + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // A network_rules block as documented below. + NetworkRules *NetworkRulesInitParameters `json:"networkRules,omitempty" tf:"network_rules,omitempty"` + + // Is NFSv3 protocol enabled? Changing this forces a new resource to be created. Defaults to false. + Nfsv3Enabled *bool `json:"nfsv3Enabled,omitempty" tf:"nfsv3_enabled,omitempty"` + + // Whether the public network access is enabled? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The encryption type of the queue service. Possible values are Service and Account. Changing this forces a new resource to be created. Default value is Service. + QueueEncryptionKeyType *string `json:"queueEncryptionKeyType,omitempty" tf:"queue_encryption_key_type,omitempty"` + + // A queue_properties block as defined below. + QueueProperties *QueuePropertiesInitParameters `json:"queueProperties,omitempty" tf:"queue_properties,omitempty"` + + // A routing block as defined below. + Routing *RoutingInitParameters `json:"routing,omitempty" tf:"routing,omitempty"` + + // A sas_policy block as defined below. + SASPolicy *SASPolicyInitParameters `json:"sasPolicy,omitempty" tf:"sas_policy,omitempty"` + + // Boolean, enable SFTP for the storage account + SFTPEnabled *bool `json:"sftpEnabled,omitempty" tf:"sftp_enabled,omitempty"` + + // A share_properties block as defined below. + ShareProperties *SharePropertiesInitParameters `json:"shareProperties,omitempty" tf:"share_properties,omitempty"` + + // Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). Defaults to true. + SharedAccessKeyEnabled *bool `json:"sharedAccessKeyEnabled,omitempty" tf:"shared_access_key_enabled,omitempty"` + + // A static_website block as defined below. + StaticWebsite *StaticWebsiteInitParameters `json:"staticWebsite,omitempty" tf:"static_website,omitempty"` + + // The encryption type of the table service. Possible values are Service and Account. Changing this forces a new resource to be created. Default value is Service. + TableEncryptionKeyType *string `json:"tableEncryptionKeyType,omitempty" tf:"table_encryption_key_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountObservation struct { + + // Defines the access tier for BlobStorage, FileStorage and StorageV2 accounts. Valid options are Hot and Cool, defaults to Hot. + AccessTier *string `json:"accessTier,omitempty" tf:"access_tier,omitempty"` + + // Defines the Kind of account. Valid options are BlobStorage, BlockBlobStorage, FileStorage, Storage and StorageV2. Defaults to StorageV2. + AccountKind *string `json:"accountKind,omitempty" tf:"account_kind,omitempty"` + + // Defines the type of replication to use for this storage account. Valid options are LRS, GRS, RAGRS, ZRS, GZRS and RAGZRS. Changing this forces a new resource to be created when types LRS, GRS and RAGRS are changed to ZRS, GZRS or RAGZRS and vice versa. + AccountReplicationType *string `json:"accountReplicationType,omitempty" tf:"account_replication_type,omitempty"` + + // Defines the Tier to use for this storage account. Valid options are Standard and Premium. For BlockBlobStorage and FileStorage accounts only Premium is valid. Changing this forces a new resource to be created. + AccountTier *string `json:"accountTier,omitempty" tf:"account_tier,omitempty"` + + // Allow or disallow nested items within this Account to opt into being public. Defaults to true. + AllowNestedItemsToBePublic *bool `json:"allowNestedItemsToBePublic,omitempty" tf:"allow_nested_items_to_be_public,omitempty"` + + // Restrict copy to and from Storage Accounts within an AAD tenant or with Private Links to the same VNet. Possible values are AAD and PrivateLink. + AllowedCopyScope *string `json:"allowedCopyScope,omitempty" tf:"allowed_copy_scope,omitempty"` + + // A azure_files_authentication block as defined below. + AzureFilesAuthentication *AzureFilesAuthenticationObservation `json:"azureFilesAuthentication,omitempty" tf:"azure_files_authentication,omitempty"` + + // A blob_properties block as defined below. + BlobProperties *BlobPropertiesObservation `json:"blobProperties,omitempty" tf:"blob_properties,omitempty"` + + // Should cross Tenant replication be enabled? Defaults to true. + CrossTenantReplicationEnabled *bool `json:"crossTenantReplicationEnabled,omitempty" tf:"cross_tenant_replication_enabled,omitempty"` + + // A custom_domain block as documented below. + CustomDomain *CustomDomainObservation `json:"customDomain,omitempty" tf:"custom_domain,omitempty"` + + // A customer_managed_key block as documented below. + CustomerManagedKey *CustomerManagedKeyObservation `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Default to Azure Active Directory authorization in the Azure portal when accessing the Storage Account. The default value is false + DefaultToOauthAuthentication *bool `json:"defaultToOauthAuthentication,omitempty" tf:"default_to_oauth_authentication,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Storage Account should exist. Changing this forces a new Storage Account to be created. + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Boolean flag which forces HTTPS if enabled, see here for more information. Defaults to true. + EnableHTTPSTrafficOnly *bool `json:"enableHttpsTrafficOnly,omitempty" tf:"enable_https_traffic_only,omitempty"` + + // The ID of the Storage Account. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // An immutability_policy block as defined below. Changing this forces a new resource to be created. + ImmutabilityPolicy *ImmutabilityPolicyObservation `json:"immutabilityPolicy,omitempty" tf:"immutability_policy,omitempty"` + + // Is infrastructure encryption enabled? Changing this forces a new resource to be created. Defaults to false. + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Is Hierarchical Namespace enabled? This can be used with Azure Data Lake Storage Gen 2 (see here for more information). Changing this forces a new resource to be created. + IsHnsEnabled *bool `json:"isHnsEnabled,omitempty" tf:"is_hns_enabled,omitempty"` + + // Is Large File Share Enabled? + LargeFileShareEnabled *bool `json:"largeFileShareEnabled,omitempty" tf:"large_file_share_enabled,omitempty"` + + // Is Local User Enabled? Defaults to true. + LocalUserEnabled *bool `json:"localUserEnabled,omitempty" tf:"local_user_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum supported TLS version for the storage account. Possible values are TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2 for new storage accounts. + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // A network_rules block as documented below. + NetworkRules *NetworkRulesObservation `json:"networkRules,omitempty" tf:"network_rules,omitempty"` + + // Is NFSv3 protocol enabled? Changing this forces a new resource to be created. Defaults to false. + Nfsv3Enabled *bool `json:"nfsv3Enabled,omitempty" tf:"nfsv3_enabled,omitempty"` + + // The endpoint URL for blob storage in the primary location. + PrimaryBlobEndpoint *string `json:"primaryBlobEndpoint,omitempty" tf:"primary_blob_endpoint,omitempty"` + + // The hostname with port if applicable for blob storage in the primary location. + PrimaryBlobHost *string `json:"primaryBlobHost,omitempty" tf:"primary_blob_host,omitempty"` + + // The internet routing endpoint URL for blob storage in the primary location. + PrimaryBlobInternetEndpoint *string `json:"primaryBlobInternetEndpoint,omitempty" tf:"primary_blob_internet_endpoint,omitempty"` + + // The internet routing hostname with port if applicable for blob storage in the primary location. + PrimaryBlobInternetHost *string `json:"primaryBlobInternetHost,omitempty" tf:"primary_blob_internet_host,omitempty"` + + // The microsoft routing endpoint URL for blob storage in the primary location. + PrimaryBlobMicrosoftEndpoint *string `json:"primaryBlobMicrosoftEndpoint,omitempty" tf:"primary_blob_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for blob storage in the primary location. + PrimaryBlobMicrosoftHost *string `json:"primaryBlobMicrosoftHost,omitempty" tf:"primary_blob_microsoft_host,omitempty"` + + // The endpoint URL for DFS storage in the primary location. + PrimaryDfsEndpoint *string `json:"primaryDfsEndpoint,omitempty" tf:"primary_dfs_endpoint,omitempty"` + + // The hostname with port if applicable for DFS storage in the primary location. + PrimaryDfsHost *string `json:"primaryDfsHost,omitempty" tf:"primary_dfs_host,omitempty"` + + // The internet routing endpoint URL for DFS storage in the primary location. + PrimaryDfsInternetEndpoint *string `json:"primaryDfsInternetEndpoint,omitempty" tf:"primary_dfs_internet_endpoint,omitempty"` + + // The internet routing hostname with port if applicable for DFS storage in the primary location. + PrimaryDfsInternetHost *string `json:"primaryDfsInternetHost,omitempty" tf:"primary_dfs_internet_host,omitempty"` + + // The microsoft routing endpoint URL for DFS storage in the primary location. + PrimaryDfsMicrosoftEndpoint *string `json:"primaryDfsMicrosoftEndpoint,omitempty" tf:"primary_dfs_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for DFS storage in the primary location. + PrimaryDfsMicrosoftHost *string `json:"primaryDfsMicrosoftHost,omitempty" tf:"primary_dfs_microsoft_host,omitempty"` + + // The endpoint URL for file storage in the primary location. + PrimaryFileEndpoint *string `json:"primaryFileEndpoint,omitempty" tf:"primary_file_endpoint,omitempty"` + + // The hostname with port if applicable for file storage in the primary location. + PrimaryFileHost *string `json:"primaryFileHost,omitempty" tf:"primary_file_host,omitempty"` + + // The internet routing endpoint URL for file storage in the primary location. + PrimaryFileInternetEndpoint *string `json:"primaryFileInternetEndpoint,omitempty" tf:"primary_file_internet_endpoint,omitempty"` + + // The internet routing hostname with port if applicable for file storage in the primary location. + PrimaryFileInternetHost *string `json:"primaryFileInternetHost,omitempty" tf:"primary_file_internet_host,omitempty"` + + // The microsoft routing endpoint URL for file storage in the primary location. + PrimaryFileMicrosoftEndpoint *string `json:"primaryFileMicrosoftEndpoint,omitempty" tf:"primary_file_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for file storage in the primary location. + PrimaryFileMicrosoftHost *string `json:"primaryFileMicrosoftHost,omitempty" tf:"primary_file_microsoft_host,omitempty"` + + // The primary location of the storage account. + PrimaryLocation *string `json:"primaryLocation,omitempty" tf:"primary_location,omitempty"` + + // The endpoint URL for queue storage in the primary location. + PrimaryQueueEndpoint *string `json:"primaryQueueEndpoint,omitempty" tf:"primary_queue_endpoint,omitempty"` + + // The hostname with port if applicable for queue storage in the primary location. + PrimaryQueueHost *string `json:"primaryQueueHost,omitempty" tf:"primary_queue_host,omitempty"` + + // The microsoft routing endpoint URL for queue storage in the primary location. + PrimaryQueueMicrosoftEndpoint *string `json:"primaryQueueMicrosoftEndpoint,omitempty" tf:"primary_queue_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for queue storage in the primary location. + PrimaryQueueMicrosoftHost *string `json:"primaryQueueMicrosoftHost,omitempty" tf:"primary_queue_microsoft_host,omitempty"` + + // The endpoint URL for table storage in the primary location. + PrimaryTableEndpoint *string `json:"primaryTableEndpoint,omitempty" tf:"primary_table_endpoint,omitempty"` + + // The hostname with port if applicable for table storage in the primary location. + PrimaryTableHost *string `json:"primaryTableHost,omitempty" tf:"primary_table_host,omitempty"` + + // The microsoft routing endpoint URL for table storage in the primary location. + PrimaryTableMicrosoftEndpoint *string `json:"primaryTableMicrosoftEndpoint,omitempty" tf:"primary_table_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for table storage in the primary location. + PrimaryTableMicrosoftHost *string `json:"primaryTableMicrosoftHost,omitempty" tf:"primary_table_microsoft_host,omitempty"` + + // The endpoint URL for web storage in the primary location. + PrimaryWebEndpoint *string `json:"primaryWebEndpoint,omitempty" tf:"primary_web_endpoint,omitempty"` + + // The hostname with port if applicable for web storage in the primary location. + PrimaryWebHost *string `json:"primaryWebHost,omitempty" tf:"primary_web_host,omitempty"` + + // The internet routing endpoint URL for web storage in the primary location. + PrimaryWebInternetEndpoint *string `json:"primaryWebInternetEndpoint,omitempty" tf:"primary_web_internet_endpoint,omitempty"` + + // The internet routing hostname with port if applicable for web storage in the primary location. + PrimaryWebInternetHost *string `json:"primaryWebInternetHost,omitempty" tf:"primary_web_internet_host,omitempty"` + + // The microsoft routing endpoint URL for web storage in the primary location. + PrimaryWebMicrosoftEndpoint *string `json:"primaryWebMicrosoftEndpoint,omitempty" tf:"primary_web_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for web storage in the primary location. + PrimaryWebMicrosoftHost *string `json:"primaryWebMicrosoftHost,omitempty" tf:"primary_web_microsoft_host,omitempty"` + + // Whether the public network access is enabled? Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The encryption type of the queue service. Possible values are Service and Account. Changing this forces a new resource to be created. Default value is Service. + QueueEncryptionKeyType *string `json:"queueEncryptionKeyType,omitempty" tf:"queue_encryption_key_type,omitempty"` + + // A queue_properties block as defined below. + QueueProperties *QueuePropertiesObservation `json:"queueProperties,omitempty" tf:"queue_properties,omitempty"` + + // The name of the resource group in which to create the storage account. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A routing block as defined below. + Routing *RoutingObservation `json:"routing,omitempty" tf:"routing,omitempty"` + + // A sas_policy block as defined below. + SASPolicy *SASPolicyObservation `json:"sasPolicy,omitempty" tf:"sas_policy,omitempty"` + + // Boolean, enable SFTP for the storage account + SFTPEnabled *bool `json:"sftpEnabled,omitempty" tf:"sftp_enabled,omitempty"` + + // The endpoint URL for blob storage in the secondary location. + SecondaryBlobEndpoint *string `json:"secondaryBlobEndpoint,omitempty" tf:"secondary_blob_endpoint,omitempty"` + + // The hostname with port if applicable for blob storage in the secondary location. + SecondaryBlobHost *string `json:"secondaryBlobHost,omitempty" tf:"secondary_blob_host,omitempty"` + + // The internet routing endpoint URL for blob storage in the secondary location. + SecondaryBlobInternetEndpoint *string `json:"secondaryBlobInternetEndpoint,omitempty" tf:"secondary_blob_internet_endpoint,omitempty"` + + // The internet routing hostname with port if applicable for blob storage in the secondary location. + SecondaryBlobInternetHost *string `json:"secondaryBlobInternetHost,omitempty" tf:"secondary_blob_internet_host,omitempty"` + + // The microsoft routing endpoint URL for blob storage in the secondary location. + SecondaryBlobMicrosoftEndpoint *string `json:"secondaryBlobMicrosoftEndpoint,omitempty" tf:"secondary_blob_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for blob storage in the secondary location. + SecondaryBlobMicrosoftHost *string `json:"secondaryBlobMicrosoftHost,omitempty" tf:"secondary_blob_microsoft_host,omitempty"` + + // The endpoint URL for DFS storage in the secondary location. + SecondaryDfsEndpoint *string `json:"secondaryDfsEndpoint,omitempty" tf:"secondary_dfs_endpoint,omitempty"` + + // The hostname with port if applicable for DFS storage in the secondary location. + SecondaryDfsHost *string `json:"secondaryDfsHost,omitempty" tf:"secondary_dfs_host,omitempty"` + + // The internet routing endpoint URL for DFS storage in the secondary location. + SecondaryDfsInternetEndpoint *string `json:"secondaryDfsInternetEndpoint,omitempty" tf:"secondary_dfs_internet_endpoint,omitempty"` + + // The internet routing hostname with port if applicable for DFS storage in the secondary location. + SecondaryDfsInternetHost *string `json:"secondaryDfsInternetHost,omitempty" tf:"secondary_dfs_internet_host,omitempty"` + + // The microsoft routing endpoint URL for DFS storage in the secondary location. + SecondaryDfsMicrosoftEndpoint *string `json:"secondaryDfsMicrosoftEndpoint,omitempty" tf:"secondary_dfs_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for DFS storage in the secondary location. + SecondaryDfsMicrosoftHost *string `json:"secondaryDfsMicrosoftHost,omitempty" tf:"secondary_dfs_microsoft_host,omitempty"` + + // The endpoint URL for file storage in the secondary location. + SecondaryFileEndpoint *string `json:"secondaryFileEndpoint,omitempty" tf:"secondary_file_endpoint,omitempty"` + + // The hostname with port if applicable for file storage in the secondary location. + SecondaryFileHost *string `json:"secondaryFileHost,omitempty" tf:"secondary_file_host,omitempty"` + + // The internet routing endpoint URL for file storage in the secondary location. + SecondaryFileInternetEndpoint *string `json:"secondaryFileInternetEndpoint,omitempty" tf:"secondary_file_internet_endpoint,omitempty"` + + // The internet routing hostname with port if applicable for file storage in the secondary location. + SecondaryFileInternetHost *string `json:"secondaryFileInternetHost,omitempty" tf:"secondary_file_internet_host,omitempty"` + + // The microsoft routing endpoint URL for file storage in the secondary location. + SecondaryFileMicrosoftEndpoint *string `json:"secondaryFileMicrosoftEndpoint,omitempty" tf:"secondary_file_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for file storage in the secondary location. + SecondaryFileMicrosoftHost *string `json:"secondaryFileMicrosoftHost,omitempty" tf:"secondary_file_microsoft_host,omitempty"` + + // The secondary location of the storage account. + SecondaryLocation *string `json:"secondaryLocation,omitempty" tf:"secondary_location,omitempty"` + + // The endpoint URL for queue storage in the secondary location. + SecondaryQueueEndpoint *string `json:"secondaryQueueEndpoint,omitempty" tf:"secondary_queue_endpoint,omitempty"` + + // The hostname with port if applicable for queue storage in the secondary location. + SecondaryQueueHost *string `json:"secondaryQueueHost,omitempty" tf:"secondary_queue_host,omitempty"` + + // The microsoft routing endpoint URL for queue storage in the secondary location. + SecondaryQueueMicrosoftEndpoint *string `json:"secondaryQueueMicrosoftEndpoint,omitempty" tf:"secondary_queue_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for queue storage in the secondary location. + SecondaryQueueMicrosoftHost *string `json:"secondaryQueueMicrosoftHost,omitempty" tf:"secondary_queue_microsoft_host,omitempty"` + + // The endpoint URL for table storage in the secondary location. + SecondaryTableEndpoint *string `json:"secondaryTableEndpoint,omitempty" tf:"secondary_table_endpoint,omitempty"` + + // The hostname with port if applicable for table storage in the secondary location. + SecondaryTableHost *string `json:"secondaryTableHost,omitempty" tf:"secondary_table_host,omitempty"` + + // The microsoft routing endpoint URL for table storage in the secondary location. + SecondaryTableMicrosoftEndpoint *string `json:"secondaryTableMicrosoftEndpoint,omitempty" tf:"secondary_table_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for table storage in the secondary location. + SecondaryTableMicrosoftHost *string `json:"secondaryTableMicrosoftHost,omitempty" tf:"secondary_table_microsoft_host,omitempty"` + + // The endpoint URL for web storage in the secondary location. + SecondaryWebEndpoint *string `json:"secondaryWebEndpoint,omitempty" tf:"secondary_web_endpoint,omitempty"` + + // The hostname with port if applicable for web storage in the secondary location. + SecondaryWebHost *string `json:"secondaryWebHost,omitempty" tf:"secondary_web_host,omitempty"` + + // The internet routing endpoint URL for web storage in the secondary location. + SecondaryWebInternetEndpoint *string `json:"secondaryWebInternetEndpoint,omitempty" tf:"secondary_web_internet_endpoint,omitempty"` + + // The internet routing hostname with port if applicable for web storage in the secondary location. + SecondaryWebInternetHost *string `json:"secondaryWebInternetHost,omitempty" tf:"secondary_web_internet_host,omitempty"` + + // The microsoft routing endpoint URL for web storage in the secondary location. + SecondaryWebMicrosoftEndpoint *string `json:"secondaryWebMicrosoftEndpoint,omitempty" tf:"secondary_web_microsoft_endpoint,omitempty"` + + // The microsoft routing hostname with port if applicable for web storage in the secondary location. + SecondaryWebMicrosoftHost *string `json:"secondaryWebMicrosoftHost,omitempty" tf:"secondary_web_microsoft_host,omitempty"` + + // A share_properties block as defined below. + ShareProperties *SharePropertiesObservation `json:"shareProperties,omitempty" tf:"share_properties,omitempty"` + + // Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). Defaults to true. + SharedAccessKeyEnabled *bool `json:"sharedAccessKeyEnabled,omitempty" tf:"shared_access_key_enabled,omitempty"` + + // A static_website block as defined below. + StaticWebsite *StaticWebsiteObservation `json:"staticWebsite,omitempty" tf:"static_website,omitempty"` + + // The encryption type of the table service. Possible values are Service and Account. Changing this forces a new resource to be created. Default value is Service. + TableEncryptionKeyType *string `json:"tableEncryptionKeyType,omitempty" tf:"table_encryption_key_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type AccountParameters struct { + + // Defines the access tier for BlobStorage, FileStorage and StorageV2 accounts. Valid options are Hot and Cool, defaults to Hot. + // +kubebuilder:validation:Optional + AccessTier *string `json:"accessTier,omitempty" tf:"access_tier,omitempty"` + + // Defines the Kind of account. Valid options are BlobStorage, BlockBlobStorage, FileStorage, Storage and StorageV2. Defaults to StorageV2. + // +kubebuilder:validation:Optional + AccountKind *string `json:"accountKind,omitempty" tf:"account_kind,omitempty"` + + // Defines the type of replication to use for this storage account. Valid options are LRS, GRS, RAGRS, ZRS, GZRS and RAGZRS. Changing this forces a new resource to be created when types LRS, GRS and RAGRS are changed to ZRS, GZRS or RAGZRS and vice versa. + // +kubebuilder:validation:Optional + AccountReplicationType *string `json:"accountReplicationType,omitempty" tf:"account_replication_type,omitempty"` + + // Defines the Tier to use for this storage account. Valid options are Standard and Premium. For BlockBlobStorage and FileStorage accounts only Premium is valid. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + AccountTier *string `json:"accountTier,omitempty" tf:"account_tier,omitempty"` + + // Allow or disallow nested items within this Account to opt into being public. Defaults to true. + // +kubebuilder:validation:Optional + AllowNestedItemsToBePublic *bool `json:"allowNestedItemsToBePublic,omitempty" tf:"allow_nested_items_to_be_public,omitempty"` + + // Restrict copy to and from Storage Accounts within an AAD tenant or with Private Links to the same VNet. Possible values are AAD and PrivateLink. + // +kubebuilder:validation:Optional + AllowedCopyScope *string `json:"allowedCopyScope,omitempty" tf:"allowed_copy_scope,omitempty"` + + // A azure_files_authentication block as defined below. + // +kubebuilder:validation:Optional + AzureFilesAuthentication *AzureFilesAuthenticationParameters `json:"azureFilesAuthentication,omitempty" tf:"azure_files_authentication,omitempty"` + + // A blob_properties block as defined below. + // +kubebuilder:validation:Optional + BlobProperties *BlobPropertiesParameters `json:"blobProperties,omitempty" tf:"blob_properties,omitempty"` + + // Should cross Tenant replication be enabled? Defaults to true. + // +kubebuilder:validation:Optional + CrossTenantReplicationEnabled *bool `json:"crossTenantReplicationEnabled,omitempty" tf:"cross_tenant_replication_enabled,omitempty"` + + // A custom_domain block as documented below. + // +kubebuilder:validation:Optional + CustomDomain *CustomDomainParameters `json:"customDomain,omitempty" tf:"custom_domain,omitempty"` + + // A customer_managed_key block as documented below. + // +kubebuilder:validation:Optional + CustomerManagedKey *CustomerManagedKeyParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Default to Azure Active Directory authorization in the Azure portal when accessing the Storage Account. The default value is false + // +kubebuilder:validation:Optional + DefaultToOauthAuthentication *bool `json:"defaultToOauthAuthentication,omitempty" tf:"default_to_oauth_authentication,omitempty"` + + // Specifies the Edge Zone within the Azure Region where this Storage Account should exist. Changing this forces a new Storage Account to be created. + // +kubebuilder:validation:Optional + EdgeZone *string `json:"edgeZone,omitempty" tf:"edge_zone,omitempty"` + + // Boolean flag which forces HTTPS if enabled, see here for more information. Defaults to true. + // +kubebuilder:validation:Optional + EnableHTTPSTrafficOnly *bool `json:"enableHttpsTrafficOnly,omitempty" tf:"enable_https_traffic_only,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // An immutability_policy block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ImmutabilityPolicy *ImmutabilityPolicyParameters `json:"immutabilityPolicy,omitempty" tf:"immutability_policy,omitempty"` + + // Is infrastructure encryption enabled? Changing this forces a new resource to be created. Defaults to false. + // +kubebuilder:validation:Optional + InfrastructureEncryptionEnabled *bool `json:"infrastructureEncryptionEnabled,omitempty" tf:"infrastructure_encryption_enabled,omitempty"` + + // Is Hierarchical Namespace enabled? This can be used with Azure Data Lake Storage Gen 2 (see here for more information). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IsHnsEnabled *bool `json:"isHnsEnabled,omitempty" tf:"is_hns_enabled,omitempty"` + + // Is Large File Share Enabled? + // +kubebuilder:validation:Optional + LargeFileShareEnabled *bool `json:"largeFileShareEnabled,omitempty" tf:"large_file_share_enabled,omitempty"` + + // Is Local User Enabled? Defaults to true. + // +kubebuilder:validation:Optional + LocalUserEnabled *bool `json:"localUserEnabled,omitempty" tf:"local_user_enabled,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The minimum supported TLS version for the storage account. Possible values are TLS1_0, TLS1_1, and TLS1_2. Defaults to TLS1_2 for new storage accounts. + // +kubebuilder:validation:Optional + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // A network_rules block as documented below. + // +kubebuilder:validation:Optional + NetworkRules *NetworkRulesParameters `json:"networkRules,omitempty" tf:"network_rules,omitempty"` + + // Is NFSv3 protocol enabled? Changing this forces a new resource to be created. Defaults to false. + // +kubebuilder:validation:Optional + Nfsv3Enabled *bool `json:"nfsv3Enabled,omitempty" tf:"nfsv3_enabled,omitempty"` + + // Whether the public network access is enabled? Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The encryption type of the queue service. Possible values are Service and Account. Changing this forces a new resource to be created. Default value is Service. + // +kubebuilder:validation:Optional + QueueEncryptionKeyType *string `json:"queueEncryptionKeyType,omitempty" tf:"queue_encryption_key_type,omitempty"` + + // A queue_properties block as defined below. + // +kubebuilder:validation:Optional + QueueProperties *QueuePropertiesParameters `json:"queueProperties,omitempty" tf:"queue_properties,omitempty"` + + // The name of the resource group in which to create the storage account. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A routing block as defined below. + // +kubebuilder:validation:Optional + Routing *RoutingParameters `json:"routing,omitempty" tf:"routing,omitempty"` + + // A sas_policy block as defined below. + // +kubebuilder:validation:Optional + SASPolicy *SASPolicyParameters `json:"sasPolicy,omitempty" tf:"sas_policy,omitempty"` + + // Boolean, enable SFTP for the storage account + // +kubebuilder:validation:Optional + SFTPEnabled *bool `json:"sftpEnabled,omitempty" tf:"sftp_enabled,omitempty"` + + // A share_properties block as defined below. + // +kubebuilder:validation:Optional + ShareProperties *SharePropertiesParameters `json:"shareProperties,omitempty" tf:"share_properties,omitempty"` + + // Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). Defaults to true. + // +kubebuilder:validation:Optional + SharedAccessKeyEnabled *bool `json:"sharedAccessKeyEnabled,omitempty" tf:"shared_access_key_enabled,omitempty"` + + // A static_website block as defined below. + // +kubebuilder:validation:Optional + StaticWebsite *StaticWebsiteParameters `json:"staticWebsite,omitempty" tf:"static_website,omitempty"` + + // The encryption type of the table service. Possible values are Service and Account. Changing this forces a new resource to be created. Default value is Service. + // +kubebuilder:validation:Optional + TableEncryptionKeyType *string `json:"tableEncryptionKeyType,omitempty" tf:"table_encryption_key_type,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type ActiveDirectoryInitParameters struct { + + // Specifies the domain GUID. + DomainGUID *string `json:"domainGuid,omitempty" tf:"domain_guid,omitempty"` + + // Specifies the primary domain that the AD DNS server is authoritative for. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Specifies the security identifier (SID). This is required when directory_type is set to AD. + DomainSid *string `json:"domainSid,omitempty" tf:"domain_sid,omitempty"` + + // Specifies the Active Directory forest. This is required when directory_type is set to AD. + ForestName *string `json:"forestName,omitempty" tf:"forest_name,omitempty"` + + // Specifies the NetBIOS domain name. This is required when directory_type is set to AD. + NetbiosDomainName *string `json:"netbiosDomainName,omitempty" tf:"netbios_domain_name,omitempty"` + + // Specifies the security identifier (SID) for Azure Storage. This is required when directory_type is set to AD. + StorageSid *string `json:"storageSid,omitempty" tf:"storage_sid,omitempty"` +} + +type ActiveDirectoryObservation struct { + + // Specifies the domain GUID. + DomainGUID *string `json:"domainGuid,omitempty" tf:"domain_guid,omitempty"` + + // Specifies the primary domain that the AD DNS server is authoritative for. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // Specifies the security identifier (SID). This is required when directory_type is set to AD. + DomainSid *string `json:"domainSid,omitempty" tf:"domain_sid,omitempty"` + + // Specifies the Active Directory forest. This is required when directory_type is set to AD. + ForestName *string `json:"forestName,omitempty" tf:"forest_name,omitempty"` + + // Specifies the NetBIOS domain name. This is required when directory_type is set to AD. + NetbiosDomainName *string `json:"netbiosDomainName,omitempty" tf:"netbios_domain_name,omitempty"` + + // Specifies the security identifier (SID) for Azure Storage. This is required when directory_type is set to AD. + StorageSid *string `json:"storageSid,omitempty" tf:"storage_sid,omitempty"` +} + +type ActiveDirectoryParameters struct { + + // Specifies the domain GUID. + // +kubebuilder:validation:Optional + DomainGUID *string `json:"domainGuid" tf:"domain_guid,omitempty"` + + // Specifies the primary domain that the AD DNS server is authoritative for. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // Specifies the security identifier (SID). This is required when directory_type is set to AD. + // +kubebuilder:validation:Optional + DomainSid *string `json:"domainSid,omitempty" tf:"domain_sid,omitempty"` + + // Specifies the Active Directory forest. This is required when directory_type is set to AD. + // +kubebuilder:validation:Optional + ForestName *string `json:"forestName,omitempty" tf:"forest_name,omitempty"` + + // Specifies the NetBIOS domain name. This is required when directory_type is set to AD. + // +kubebuilder:validation:Optional + NetbiosDomainName *string `json:"netbiosDomainName,omitempty" tf:"netbios_domain_name,omitempty"` + + // Specifies the security identifier (SID) for Azure Storage. This is required when directory_type is set to AD. + // +kubebuilder:validation:Optional + StorageSid *string `json:"storageSid,omitempty" tf:"storage_sid,omitempty"` +} + +type AzureFilesAuthenticationInitParameters struct { + + // A active_directory block as defined below. Required when directory_type is AD. + ActiveDirectory *ActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies the directory service used. Possible values are AADDS, AD and AADKERB. + DirectoryType *string `json:"directoryType,omitempty" tf:"directory_type,omitempty"` +} + +type AzureFilesAuthenticationObservation struct { + + // A active_directory block as defined below. Required when directory_type is AD. + ActiveDirectory *ActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies the directory service used. Possible values are AADDS, AD and AADKERB. + DirectoryType *string `json:"directoryType,omitempty" tf:"directory_type,omitempty"` +} + +type AzureFilesAuthenticationParameters struct { + + // A active_directory block as defined below. Required when directory_type is AD. + // +kubebuilder:validation:Optional + ActiveDirectory *ActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies the directory service used. Possible values are AADDS, AD and AADKERB. + // +kubebuilder:validation:Optional + DirectoryType *string `json:"directoryType" tf:"directory_type,omitempty"` +} + +type BlobPropertiesInitParameters struct { + + // Is the blob service properties for change feed events enabled? Default to false. + ChangeFeedEnabled *bool `json:"changeFeedEnabled,omitempty" tf:"change_feed_enabled,omitempty"` + + // The duration of change feed events retention in days. The possible values are between 1 and 146000 days (400 years). Setting this to null (or omit this in the configuration file) indicates an infinite retention of the change feed. + ChangeFeedRetentionInDays *float64 `json:"changeFeedRetentionInDays,omitempty" tf:"change_feed_retention_in_days,omitempty"` + + // A container_delete_retention_policy block as defined below. + ContainerDeleteRetentionPolicy *ContainerDeleteRetentionPolicyInitParameters `json:"containerDeleteRetentionPolicy,omitempty" tf:"container_delete_retention_policy,omitempty"` + + // A cors_rule block as defined below. + CorsRule []CorsRuleInitParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // The API Version which should be used by default for requests to the Data Plane API if an incoming request doesn't specify an API Version. + DefaultServiceVersion *string `json:"defaultServiceVersion,omitempty" tf:"default_service_version,omitempty"` + + // A delete_retention_policy block as defined below. + DeleteRetentionPolicy *DeleteRetentionPolicyInitParameters `json:"deleteRetentionPolicy,omitempty" tf:"delete_retention_policy,omitempty"` + + // Is the last access time based tracking enabled? Default to false. + LastAccessTimeEnabled *bool `json:"lastAccessTimeEnabled,omitempty" tf:"last_access_time_enabled,omitempty"` + + // A restore_policy block as defined below. This must be used together with delete_retention_policy set, versioning_enabled and change_feed_enabled set to true. + RestorePolicy *RestorePolicyInitParameters `json:"restorePolicy,omitempty" tf:"restore_policy,omitempty"` + + // Is versioning enabled? Default to false. + VersioningEnabled *bool `json:"versioningEnabled,omitempty" tf:"versioning_enabled,omitempty"` +} + +type BlobPropertiesObservation struct { + + // Is the blob service properties for change feed events enabled? Default to false. + ChangeFeedEnabled *bool `json:"changeFeedEnabled,omitempty" tf:"change_feed_enabled,omitempty"` + + // The duration of change feed events retention in days. The possible values are between 1 and 146000 days (400 years). Setting this to null (or omit this in the configuration file) indicates an infinite retention of the change feed. + ChangeFeedRetentionInDays *float64 `json:"changeFeedRetentionInDays,omitempty" tf:"change_feed_retention_in_days,omitempty"` + + // A container_delete_retention_policy block as defined below. + ContainerDeleteRetentionPolicy *ContainerDeleteRetentionPolicyObservation `json:"containerDeleteRetentionPolicy,omitempty" tf:"container_delete_retention_policy,omitempty"` + + // A cors_rule block as defined below. + CorsRule []CorsRuleObservation `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // The API Version which should be used by default for requests to the Data Plane API if an incoming request doesn't specify an API Version. + DefaultServiceVersion *string `json:"defaultServiceVersion,omitempty" tf:"default_service_version,omitempty"` + + // A delete_retention_policy block as defined below. + DeleteRetentionPolicy *DeleteRetentionPolicyObservation `json:"deleteRetentionPolicy,omitempty" tf:"delete_retention_policy,omitempty"` + + // Is the last access time based tracking enabled? Default to false. + LastAccessTimeEnabled *bool `json:"lastAccessTimeEnabled,omitempty" tf:"last_access_time_enabled,omitempty"` + + // A restore_policy block as defined below. This must be used together with delete_retention_policy set, versioning_enabled and change_feed_enabled set to true. + RestorePolicy *RestorePolicyObservation `json:"restorePolicy,omitempty" tf:"restore_policy,omitempty"` + + // Is versioning enabled? Default to false. + VersioningEnabled *bool `json:"versioningEnabled,omitempty" tf:"versioning_enabled,omitempty"` +} + +type BlobPropertiesParameters struct { + + // Is the blob service properties for change feed events enabled? Default to false. + // +kubebuilder:validation:Optional + ChangeFeedEnabled *bool `json:"changeFeedEnabled,omitempty" tf:"change_feed_enabled,omitempty"` + + // The duration of change feed events retention in days. The possible values are between 1 and 146000 days (400 years). Setting this to null (or omit this in the configuration file) indicates an infinite retention of the change feed. + // +kubebuilder:validation:Optional + ChangeFeedRetentionInDays *float64 `json:"changeFeedRetentionInDays,omitempty" tf:"change_feed_retention_in_days,omitempty"` + + // A container_delete_retention_policy block as defined below. + // +kubebuilder:validation:Optional + ContainerDeleteRetentionPolicy *ContainerDeleteRetentionPolicyParameters `json:"containerDeleteRetentionPolicy,omitempty" tf:"container_delete_retention_policy,omitempty"` + + // A cors_rule block as defined below. + // +kubebuilder:validation:Optional + CorsRule []CorsRuleParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // The API Version which should be used by default for requests to the Data Plane API if an incoming request doesn't specify an API Version. + // +kubebuilder:validation:Optional + DefaultServiceVersion *string `json:"defaultServiceVersion,omitempty" tf:"default_service_version,omitempty"` + + // A delete_retention_policy block as defined below. + // +kubebuilder:validation:Optional + DeleteRetentionPolicy *DeleteRetentionPolicyParameters `json:"deleteRetentionPolicy,omitempty" tf:"delete_retention_policy,omitempty"` + + // Is the last access time based tracking enabled? Default to false. + // +kubebuilder:validation:Optional + LastAccessTimeEnabled *bool `json:"lastAccessTimeEnabled,omitempty" tf:"last_access_time_enabled,omitempty"` + + // A restore_policy block as defined below. This must be used together with delete_retention_policy set, versioning_enabled and change_feed_enabled set to true. + // +kubebuilder:validation:Optional + RestorePolicy *RestorePolicyParameters `json:"restorePolicy,omitempty" tf:"restore_policy,omitempty"` + + // Is versioning enabled? Default to false. + // +kubebuilder:validation:Optional + VersioningEnabled *bool `json:"versioningEnabled,omitempty" tf:"versioning_enabled,omitempty"` +} + +type ContainerDeleteRetentionPolicyInitParameters struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type ContainerDeleteRetentionPolicyObservation struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type ContainerDeleteRetentionPolicyParameters struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type CorsRuleInitParameters struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type CorsRuleObservation struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type CorsRuleParameters struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + // +kubebuilder:validation:Optional + AllowedHeaders []*string `json:"allowedHeaders" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + // +kubebuilder:validation:Optional + AllowedMethods []*string `json:"allowedMethods" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + // +kubebuilder:validation:Optional + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + // +kubebuilder:validation:Optional + ExposedHeaders []*string `json:"exposedHeaders" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + // +kubebuilder:validation:Optional + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds" tf:"max_age_in_seconds,omitempty"` +} + +type CustomDomainInitParameters struct { + + // The Custom Domain Name to use for the Storage Account, which will be validated by Azure. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Should the Custom Domain Name be validated by using indirect CNAME validation? + UseSubdomain *bool `json:"useSubdomain,omitempty" tf:"use_subdomain,omitempty"` +} + +type CustomDomainObservation struct { + + // The Custom Domain Name to use for the Storage Account, which will be validated by Azure. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Should the Custom Domain Name be validated by using indirect CNAME validation? + UseSubdomain *bool `json:"useSubdomain,omitempty" tf:"use_subdomain,omitempty"` +} + +type CustomDomainParameters struct { + + // The Custom Domain Name to use for the Storage Account, which will be validated by Azure. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Should the Custom Domain Name be validated by using indirect CNAME validation? + // +kubebuilder:validation:Optional + UseSubdomain *bool `json:"useSubdomain,omitempty" tf:"use_subdomain,omitempty"` +} + +type CustomerManagedKeyInitParameters struct { + + // The ID of the Key Vault Key, supplying a version-less key ID will enable auto-rotation of this key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // The ID of a user assigned identity. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type CustomerManagedKeyObservation struct { + + // The ID of the Key Vault Key, supplying a version-less key ID will enable auto-rotation of this key. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // The ID of a user assigned identity. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type CustomerManagedKeyParameters struct { + + // The ID of the Key Vault Key, supplying a version-less key ID will enable auto-rotation of this key. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId" tf:"key_vault_key_id,omitempty"` + + // The ID of a user assigned identity. + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId" tf:"user_assigned_identity_id,omitempty"` +} + +type DeleteRetentionPolicyInitParameters struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type DeleteRetentionPolicyObservation struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type DeleteRetentionPolicyParameters struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type HourMetricsInitParameters struct { + + // Indicates whether minute metrics are enabled for the Queue service. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeApis *bool `json:"includeApis,omitempty" tf:"include_apis,omitempty"` + + // Specifies the number of days that logs will be retained. + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type HourMetricsObservation struct { + + // Indicates whether minute metrics are enabled for the Queue service. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeApis *bool `json:"includeApis,omitempty" tf:"include_apis,omitempty"` + + // Specifies the number of days that logs will be retained. + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type HourMetricsParameters struct { + + // Indicates whether minute metrics are enabled for the Queue service. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Indicates whether metrics should generate summary statistics for called API operations. + // +kubebuilder:validation:Optional + IncludeApis *bool `json:"includeApis,omitempty" tf:"include_apis,omitempty"` + + // Specifies the number of days that logs will be retained. + // +kubebuilder:validation:Optional + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Storage Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Storage Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Storage Account. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Identity of this Storage Account. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Identity of this Storage Account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Storage Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Storage Account. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Storage Account. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type ImmutabilityPolicyInitParameters struct { + + // When enabled, new blocks can be written to an append blob while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. + AllowProtectedAppendWrites *bool `json:"allowProtectedAppendWrites,omitempty" tf:"allow_protected_append_writes,omitempty"` + + // The immutability period for the blobs in the container since the policy creation, in days. + PeriodSinceCreationInDays *float64 `json:"periodSinceCreationInDays,omitempty" tf:"period_since_creation_in_days,omitempty"` + + // Defines the mode of the policy. Disabled state disables the policy, Unlocked state allows increase and decrease of immutability retention time and also allows toggling allowProtectedAppendWrites property, Locked state only allows the increase of the immutability retention time. A policy can only be created in a Disabled or Unlocked state and can be toggled between the two states. Only a policy in an Unlocked state can transition to a Locked state which cannot be reverted. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type ImmutabilityPolicyObservation struct { + + // When enabled, new blocks can be written to an append blob while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. + AllowProtectedAppendWrites *bool `json:"allowProtectedAppendWrites,omitempty" tf:"allow_protected_append_writes,omitempty"` + + // The immutability period for the blobs in the container since the policy creation, in days. + PeriodSinceCreationInDays *float64 `json:"periodSinceCreationInDays,omitempty" tf:"period_since_creation_in_days,omitempty"` + + // Defines the mode of the policy. Disabled state disables the policy, Unlocked state allows increase and decrease of immutability retention time and also allows toggling allowProtectedAppendWrites property, Locked state only allows the increase of the immutability retention time. A policy can only be created in a Disabled or Unlocked state and can be toggled between the two states. Only a policy in an Unlocked state can transition to a Locked state which cannot be reverted. + State *string `json:"state,omitempty" tf:"state,omitempty"` +} + +type ImmutabilityPolicyParameters struct { + + // When enabled, new blocks can be written to an append blob while maintaining immutability protection and compliance. Only new blocks can be added and any existing blocks cannot be modified or deleted. + // +kubebuilder:validation:Optional + AllowProtectedAppendWrites *bool `json:"allowProtectedAppendWrites" tf:"allow_protected_append_writes,omitempty"` + + // The immutability period for the blobs in the container since the policy creation, in days. + // +kubebuilder:validation:Optional + PeriodSinceCreationInDays *float64 `json:"periodSinceCreationInDays" tf:"period_since_creation_in_days,omitempty"` + + // Defines the mode of the policy. Disabled state disables the policy, Unlocked state allows increase and decrease of immutability retention time and also allows toggling allowProtectedAppendWrites property, Locked state only allows the increase of the immutability retention time. A policy can only be created in a Disabled or Unlocked state and can be toggled between the two states. Only a policy in an Unlocked state can transition to a Locked state which cannot be reverted. + // +kubebuilder:validation:Optional + State *string `json:"state" tf:"state,omitempty"` +} + +type LoggingInitParameters struct { + + // (Defaults to 60 minutes) Used when deleting the Storage Account. + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + + // (Defaults to 5 minutes) Used when retrieving the Storage Account. + Read *bool `json:"read,omitempty" tf:"read,omitempty"` + + // Specifies the number of days that logs will be retained. + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Indicates whether all write requests should be logged. + Write *bool `json:"write,omitempty" tf:"write,omitempty"` +} + +type LoggingObservation struct { + + // (Defaults to 60 minutes) Used when deleting the Storage Account. + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + + // (Defaults to 5 minutes) Used when retrieving the Storage Account. + Read *bool `json:"read,omitempty" tf:"read,omitempty"` + + // Specifies the number of days that logs will be retained. + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + Version *string `json:"version,omitempty" tf:"version,omitempty"` + + // Indicates whether all write requests should be logged. + Write *bool `json:"write,omitempty" tf:"write,omitempty"` +} + +type LoggingParameters struct { + + // (Defaults to 60 minutes) Used when deleting the Storage Account. + // +kubebuilder:validation:Optional + Delete *bool `json:"delete" tf:"delete,omitempty"` + + // (Defaults to 5 minutes) Used when retrieving the Storage Account. + // +kubebuilder:validation:Optional + Read *bool `json:"read" tf:"read,omitempty"` + + // Specifies the number of days that logs will be retained. + // +kubebuilder:validation:Optional + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` + + // Indicates whether all write requests should be logged. + // +kubebuilder:validation:Optional + Write *bool `json:"write" tf:"write,omitempty"` +} + +type MinuteMetricsInitParameters struct { + + // Indicates whether minute metrics are enabled for the Queue service. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeApis *bool `json:"includeApis,omitempty" tf:"include_apis,omitempty"` + + // Specifies the number of days that logs will be retained. + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type MinuteMetricsObservation struct { + + // Indicates whether minute metrics are enabled for the Queue service. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeApis *bool `json:"includeApis,omitempty" tf:"include_apis,omitempty"` + + // Specifies the number of days that logs will be retained. + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type MinuteMetricsParameters struct { + + // Indicates whether minute metrics are enabled for the Queue service. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // Indicates whether metrics should generate summary statistics for called API operations. + // +kubebuilder:validation:Optional + IncludeApis *bool `json:"includeApis,omitempty" tf:"include_apis,omitempty"` + + // Specifies the number of days that logs will be retained. + // +kubebuilder:validation:Optional + RetentionPolicyDays *float64 `json:"retentionPolicyDays,omitempty" tf:"retention_policy_days,omitempty"` + + // The version of storage analytics to configure. + // +kubebuilder:validation:Optional + Version *string `json:"version" tf:"version,omitempty"` +} + +type NetworkRulesInitParameters struct { + + // Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Valid options are any combination of Logging, Metrics, AzureServices, or None. + // +listType=set + Bypass []*string `json:"bypass,omitempty" tf:"bypass,omitempty"` + + // Specifies the default action of allow or deny when no other rules match. Valid options are Deny or Allow. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // List of public IP or IP ranges in CIDR Format. Only IPv4 addresses are allowed. /31 CIDRs, /32 CIDRs, and Private IP address ranges (as defined in RFC 1918), are not allowed. + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more private_link_access block as defined below. + PrivateLinkAccess []PrivateLinkAccessInitParameters `json:"privateLinkAccess,omitempty" tf:"private_link_access,omitempty"` + + // A list of resource ids for subnets. + // +listType=set + VirtualNetworkSubnetIds []*string `json:"virtualNetworkSubnetIds,omitempty" tf:"virtual_network_subnet_ids,omitempty"` +} + +type NetworkRulesObservation struct { + + // Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Valid options are any combination of Logging, Metrics, AzureServices, or None. + // +listType=set + Bypass []*string `json:"bypass,omitempty" tf:"bypass,omitempty"` + + // Specifies the default action of allow or deny when no other rules match. Valid options are Deny or Allow. + DefaultAction *string `json:"defaultAction,omitempty" tf:"default_action,omitempty"` + + // List of public IP or IP ranges in CIDR Format. Only IPv4 addresses are allowed. /31 CIDRs, /32 CIDRs, and Private IP address ranges (as defined in RFC 1918), are not allowed. + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more private_link_access block as defined below. + PrivateLinkAccess []PrivateLinkAccessObservation `json:"privateLinkAccess,omitempty" tf:"private_link_access,omitempty"` + + // A list of resource ids for subnets. + // +listType=set + VirtualNetworkSubnetIds []*string `json:"virtualNetworkSubnetIds,omitempty" tf:"virtual_network_subnet_ids,omitempty"` +} + +type NetworkRulesParameters struct { + + // Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Valid options are any combination of Logging, Metrics, AzureServices, or None. + // +kubebuilder:validation:Optional + // +listType=set + Bypass []*string `json:"bypass,omitempty" tf:"bypass,omitempty"` + + // Specifies the default action of allow or deny when no other rules match. Valid options are Deny or Allow. + // +kubebuilder:validation:Optional + DefaultAction *string `json:"defaultAction" tf:"default_action,omitempty"` + + // List of public IP or IP ranges in CIDR Format. Only IPv4 addresses are allowed. /31 CIDRs, /32 CIDRs, and Private IP address ranges (as defined in RFC 1918), are not allowed. + // +kubebuilder:validation:Optional + // +listType=set + IPRules []*string `json:"ipRules,omitempty" tf:"ip_rules,omitempty"` + + // One or more private_link_access block as defined below. + // +kubebuilder:validation:Optional + PrivateLinkAccess []PrivateLinkAccessParameters `json:"privateLinkAccess,omitempty" tf:"private_link_access,omitempty"` + + // A list of resource ids for subnets. + // +kubebuilder:validation:Optional + // +listType=set + VirtualNetworkSubnetIds []*string `json:"virtualNetworkSubnetIds,omitempty" tf:"virtual_network_subnet_ids,omitempty"` +} + +type PrivateLinkAccessInitParameters struct { + + // The ID of the Azure resource that should be allowed access to the target storage account. + EndpointResourceID *string `json:"endpointResourceId,omitempty" tf:"endpoint_resource_id,omitempty"` + + // The tenant id of the resource of the resource access rule to be granted access. Defaults to the current tenant id. + EndpointTenantID *string `json:"endpointTenantId,omitempty" tf:"endpoint_tenant_id,omitempty"` +} + +type PrivateLinkAccessObservation struct { + + // The ID of the Azure resource that should be allowed access to the target storage account. + EndpointResourceID *string `json:"endpointResourceId,omitempty" tf:"endpoint_resource_id,omitempty"` + + // The tenant id of the resource of the resource access rule to be granted access. Defaults to the current tenant id. + EndpointTenantID *string `json:"endpointTenantId,omitempty" tf:"endpoint_tenant_id,omitempty"` +} + +type PrivateLinkAccessParameters struct { + + // The ID of the Azure resource that should be allowed access to the target storage account. + // +kubebuilder:validation:Optional + EndpointResourceID *string `json:"endpointResourceId" tf:"endpoint_resource_id,omitempty"` + + // The tenant id of the resource of the resource access rule to be granted access. Defaults to the current tenant id. + // +kubebuilder:validation:Optional + EndpointTenantID *string `json:"endpointTenantId,omitempty" tf:"endpoint_tenant_id,omitempty"` +} + +type QueuePropertiesCorsRuleInitParameters struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type QueuePropertiesCorsRuleObservation struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type QueuePropertiesCorsRuleParameters struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + // +kubebuilder:validation:Optional + AllowedHeaders []*string `json:"allowedHeaders" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + // +kubebuilder:validation:Optional + AllowedMethods []*string `json:"allowedMethods" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + // +kubebuilder:validation:Optional + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + // +kubebuilder:validation:Optional + ExposedHeaders []*string `json:"exposedHeaders" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + // +kubebuilder:validation:Optional + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds" tf:"max_age_in_seconds,omitempty"` +} + +type QueuePropertiesInitParameters struct { + + // A cors_rule block as defined above. + CorsRule []QueuePropertiesCorsRuleInitParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // A hour_metrics block as defined below. + HourMetrics *HourMetricsInitParameters `json:"hourMetrics,omitempty" tf:"hour_metrics,omitempty"` + + // A logging block as defined below. + Logging *LoggingInitParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // A minute_metrics block as defined below. + MinuteMetrics *MinuteMetricsInitParameters `json:"minuteMetrics,omitempty" tf:"minute_metrics,omitempty"` +} + +type QueuePropertiesObservation struct { + + // A cors_rule block as defined above. + CorsRule []QueuePropertiesCorsRuleObservation `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // A hour_metrics block as defined below. + HourMetrics *HourMetricsObservation `json:"hourMetrics,omitempty" tf:"hour_metrics,omitempty"` + + // A logging block as defined below. + Logging *LoggingObservation `json:"logging,omitempty" tf:"logging,omitempty"` + + // A minute_metrics block as defined below. + MinuteMetrics *MinuteMetricsObservation `json:"minuteMetrics,omitempty" tf:"minute_metrics,omitempty"` +} + +type QueuePropertiesParameters struct { + + // A cors_rule block as defined above. + // +kubebuilder:validation:Optional + CorsRule []QueuePropertiesCorsRuleParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // A hour_metrics block as defined below. + // +kubebuilder:validation:Optional + HourMetrics *HourMetricsParameters `json:"hourMetrics,omitempty" tf:"hour_metrics,omitempty"` + + // A logging block as defined below. + // +kubebuilder:validation:Optional + Logging *LoggingParameters `json:"logging,omitempty" tf:"logging,omitempty"` + + // A minute_metrics block as defined below. + // +kubebuilder:validation:Optional + MinuteMetrics *MinuteMetricsParameters `json:"minuteMetrics,omitempty" tf:"minute_metrics,omitempty"` +} + +type RestorePolicyInitParameters struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type RestorePolicyObservation struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type RestorePolicyParameters struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + // +kubebuilder:validation:Optional + Days *float64 `json:"days" tf:"days,omitempty"` +} + +type RetentionPolicyInitParameters struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type RetentionPolicyObservation struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type RetentionPolicyParameters struct { + + // Specifies the number of days that the azurerm_storage_share should be retained, between 1 and 365 days. Defaults to 7. + // +kubebuilder:validation:Optional + Days *float64 `json:"days,omitempty" tf:"days,omitempty"` +} + +type RoutingInitParameters struct { + + // Specifies the kind of network routing opted by the user. Possible values are InternetRouting and MicrosoftRouting. Defaults to MicrosoftRouting. + Choice *string `json:"choice,omitempty" tf:"choice,omitempty"` + + // Should internet routing storage endpoints be published? Defaults to false. + PublishInternetEndpoints *bool `json:"publishInternetEndpoints,omitempty" tf:"publish_internet_endpoints,omitempty"` + + // Should Microsoft routing storage endpoints be published? Defaults to false. + PublishMicrosoftEndpoints *bool `json:"publishMicrosoftEndpoints,omitempty" tf:"publish_microsoft_endpoints,omitempty"` +} + +type RoutingObservation struct { + + // Specifies the kind of network routing opted by the user. Possible values are InternetRouting and MicrosoftRouting. Defaults to MicrosoftRouting. + Choice *string `json:"choice,omitempty" tf:"choice,omitempty"` + + // Should internet routing storage endpoints be published? Defaults to false. + PublishInternetEndpoints *bool `json:"publishInternetEndpoints,omitempty" tf:"publish_internet_endpoints,omitempty"` + + // Should Microsoft routing storage endpoints be published? Defaults to false. + PublishMicrosoftEndpoints *bool `json:"publishMicrosoftEndpoints,omitempty" tf:"publish_microsoft_endpoints,omitempty"` +} + +type RoutingParameters struct { + + // Specifies the kind of network routing opted by the user. Possible values are InternetRouting and MicrosoftRouting. Defaults to MicrosoftRouting. + // +kubebuilder:validation:Optional + Choice *string `json:"choice,omitempty" tf:"choice,omitempty"` + + // Should internet routing storage endpoints be published? Defaults to false. + // +kubebuilder:validation:Optional + PublishInternetEndpoints *bool `json:"publishInternetEndpoints,omitempty" tf:"publish_internet_endpoints,omitempty"` + + // Should Microsoft routing storage endpoints be published? Defaults to false. + // +kubebuilder:validation:Optional + PublishMicrosoftEndpoints *bool `json:"publishMicrosoftEndpoints,omitempty" tf:"publish_microsoft_endpoints,omitempty"` +} + +type SASPolicyInitParameters struct { + + // The SAS expiration action. The only possible value is Log at this moment. Defaults to Log. + ExpirationAction *string `json:"expirationAction,omitempty" tf:"expiration_action,omitempty"` + + // The SAS expiration period in format of DD.HH:MM:SS. + ExpirationPeriod *string `json:"expirationPeriod,omitempty" tf:"expiration_period,omitempty"` +} + +type SASPolicyObservation struct { + + // The SAS expiration action. The only possible value is Log at this moment. Defaults to Log. + ExpirationAction *string `json:"expirationAction,omitempty" tf:"expiration_action,omitempty"` + + // The SAS expiration period in format of DD.HH:MM:SS. + ExpirationPeriod *string `json:"expirationPeriod,omitempty" tf:"expiration_period,omitempty"` +} + +type SASPolicyParameters struct { + + // The SAS expiration action. The only possible value is Log at this moment. Defaults to Log. + // +kubebuilder:validation:Optional + ExpirationAction *string `json:"expirationAction,omitempty" tf:"expiration_action,omitempty"` + + // The SAS expiration period in format of DD.HH:MM:SS. + // +kubebuilder:validation:Optional + ExpirationPeriod *string `json:"expirationPeriod" tf:"expiration_period,omitempty"` +} + +type SMBInitParameters struct { + + // A set of SMB authentication methods. Possible values are NTLMv2, and Kerberos. + // +listType=set + AuthenticationTypes []*string `json:"authenticationTypes,omitempty" tf:"authentication_types,omitempty"` + + // A set of SMB channel encryption. Possible values are AES-128-CCM, AES-128-GCM, and AES-256-GCM. + // +listType=set + ChannelEncryptionType []*string `json:"channelEncryptionType,omitempty" tf:"channel_encryption_type,omitempty"` + + // A set of Kerberos ticket encryption. Possible values are RC4-HMAC, and AES-256. + // +listType=set + KerberosTicketEncryptionType []*string `json:"kerberosTicketEncryptionType,omitempty" tf:"kerberos_ticket_encryption_type,omitempty"` + + // Indicates whether multichannel is enabled. Defaults to false. This is only supported on Premium storage accounts. + MultichannelEnabled *bool `json:"multichannelEnabled,omitempty" tf:"multichannel_enabled,omitempty"` + + // A set of SMB protocol versions. Possible values are SMB2.1, SMB3.0, and SMB3.1.1. + // +listType=set + Versions []*string `json:"versions,omitempty" tf:"versions,omitempty"` +} + +type SMBObservation struct { + + // A set of SMB authentication methods. Possible values are NTLMv2, and Kerberos. + // +listType=set + AuthenticationTypes []*string `json:"authenticationTypes,omitempty" tf:"authentication_types,omitempty"` + + // A set of SMB channel encryption. Possible values are AES-128-CCM, AES-128-GCM, and AES-256-GCM. + // +listType=set + ChannelEncryptionType []*string `json:"channelEncryptionType,omitempty" tf:"channel_encryption_type,omitempty"` + + // A set of Kerberos ticket encryption. Possible values are RC4-HMAC, and AES-256. + // +listType=set + KerberosTicketEncryptionType []*string `json:"kerberosTicketEncryptionType,omitempty" tf:"kerberos_ticket_encryption_type,omitempty"` + + // Indicates whether multichannel is enabled. Defaults to false. This is only supported on Premium storage accounts. + MultichannelEnabled *bool `json:"multichannelEnabled,omitempty" tf:"multichannel_enabled,omitempty"` + + // A set of SMB protocol versions. Possible values are SMB2.1, SMB3.0, and SMB3.1.1. + // +listType=set + Versions []*string `json:"versions,omitempty" tf:"versions,omitempty"` +} + +type SMBParameters struct { + + // A set of SMB authentication methods. Possible values are NTLMv2, and Kerberos. + // +kubebuilder:validation:Optional + // +listType=set + AuthenticationTypes []*string `json:"authenticationTypes,omitempty" tf:"authentication_types,omitempty"` + + // A set of SMB channel encryption. Possible values are AES-128-CCM, AES-128-GCM, and AES-256-GCM. + // +kubebuilder:validation:Optional + // +listType=set + ChannelEncryptionType []*string `json:"channelEncryptionType,omitempty" tf:"channel_encryption_type,omitempty"` + + // A set of Kerberos ticket encryption. Possible values are RC4-HMAC, and AES-256. + // +kubebuilder:validation:Optional + // +listType=set + KerberosTicketEncryptionType []*string `json:"kerberosTicketEncryptionType,omitempty" tf:"kerberos_ticket_encryption_type,omitempty"` + + // Indicates whether multichannel is enabled. Defaults to false. This is only supported on Premium storage accounts. + // +kubebuilder:validation:Optional + MultichannelEnabled *bool `json:"multichannelEnabled,omitempty" tf:"multichannel_enabled,omitempty"` + + // A set of SMB protocol versions. Possible values are SMB2.1, SMB3.0, and SMB3.1.1. + // +kubebuilder:validation:Optional + // +listType=set + Versions []*string `json:"versions,omitempty" tf:"versions,omitempty"` +} + +type SharePropertiesCorsRuleInitParameters struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type SharePropertiesCorsRuleObservation struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + AllowedHeaders []*string `json:"allowedHeaders,omitempty" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + AllowedMethods []*string `json:"allowedMethods,omitempty" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + ExposedHeaders []*string `json:"exposedHeaders,omitempty" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds,omitempty" tf:"max_age_in_seconds,omitempty"` +} + +type SharePropertiesCorsRuleParameters struct { + + // A list of headers that are allowed to be a part of the cross-origin request. + // +kubebuilder:validation:Optional + AllowedHeaders []*string `json:"allowedHeaders" tf:"allowed_headers,omitempty"` + + // A list of HTTP methods that are allowed to be executed by the origin. Valid options are + // DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + // +kubebuilder:validation:Optional + AllowedMethods []*string `json:"allowedMethods" tf:"allowed_methods,omitempty"` + + // A list of origin domains that will be allowed by CORS. + // +kubebuilder:validation:Optional + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` + + // A list of response headers that are exposed to CORS clients. + // +kubebuilder:validation:Optional + ExposedHeaders []*string `json:"exposedHeaders" tf:"exposed_headers,omitempty"` + + // The number of seconds the client should cache a preflight response. + // +kubebuilder:validation:Optional + MaxAgeInSeconds *float64 `json:"maxAgeInSeconds" tf:"max_age_in_seconds,omitempty"` +} + +type SharePropertiesInitParameters struct { + + // A cors_rule block as defined below. + CorsRule []SharePropertiesCorsRuleInitParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // A retention_policy block as defined below. + RetentionPolicy *RetentionPolicyInitParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // A smb block as defined below. + SMB *SMBInitParameters `json:"smb,omitempty" tf:"smb,omitempty"` +} + +type SharePropertiesObservation struct { + + // A cors_rule block as defined below. + CorsRule []SharePropertiesCorsRuleObservation `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // A retention_policy block as defined below. + RetentionPolicy *RetentionPolicyObservation `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // A smb block as defined below. + SMB *SMBObservation `json:"smb,omitempty" tf:"smb,omitempty"` +} + +type SharePropertiesParameters struct { + + // A cors_rule block as defined below. + // +kubebuilder:validation:Optional + CorsRule []SharePropertiesCorsRuleParameters `json:"corsRule,omitempty" tf:"cors_rule,omitempty"` + + // A retention_policy block as defined below. + // +kubebuilder:validation:Optional + RetentionPolicy *RetentionPolicyParameters `json:"retentionPolicy,omitempty" tf:"retention_policy,omitempty"` + + // A smb block as defined below. + // +kubebuilder:validation:Optional + SMB *SMBParameters `json:"smb,omitempty" tf:"smb,omitempty"` +} + +type StaticWebsiteInitParameters struct { + + // The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. + Error404Document *string `json:"error404Document,omitempty" tf:"error_404_document,omitempty"` + + // The webpage that Azure Storage serves for requests to the root of a website or any subfolder. For example, index.html. The value is case-sensitive. + IndexDocument *string `json:"indexDocument,omitempty" tf:"index_document,omitempty"` +} + +type StaticWebsiteObservation struct { + + // The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. + Error404Document *string `json:"error404Document,omitempty" tf:"error_404_document,omitempty"` + + // The webpage that Azure Storage serves for requests to the root of a website or any subfolder. For example, index.html. The value is case-sensitive. + IndexDocument *string `json:"indexDocument,omitempty" tf:"index_document,omitempty"` +} + +type StaticWebsiteParameters struct { + + // The absolute path to a custom webpage that should be used when a request is made which does not correspond to an existing file. + // +kubebuilder:validation:Optional + Error404Document *string `json:"error404Document,omitempty" tf:"error_404_document,omitempty"` + + // The webpage that Azure Storage serves for requests to the root of a website or any subfolder. For example, index.html. The value is case-sensitive. + // +kubebuilder:validation:Optional + IndexDocument *string `json:"indexDocument,omitempty" tf:"index_document,omitempty"` +} + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccountInitParameters `json:"initProvider,omitempty"` +} + +// AccountStatus defines the observed state of Account. +type AccountStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Account is the Schema for the Accounts API. Manages a Azure Storage Account. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.accountReplicationType) || (has(self.initProvider) && has(self.initProvider.accountReplicationType))",message="spec.forProvider.accountReplicationType is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.accountTier) || (has(self.initProvider) && has(self.initProvider.accountTier))",message="spec.forProvider.accountTier is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Accounts +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Repository type metadata. +var ( + Account_Kind = "Account" + Account_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Account_Kind}.String() + Account_KindAPIVersion = Account_Kind + "." + CRDGroupVersion.String() + Account_GroupVersionKind = CRDGroupVersion.WithKind(Account_Kind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/apis/storage/v1beta2/zz_accountlocaluser_terraformed.go b/apis/storage/v1beta2/zz_accountlocaluser_terraformed.go new file mode 100755 index 000000000..e62eac4bc --- /dev/null +++ b/apis/storage/v1beta2/zz_accountlocaluser_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AccountLocalUser +func (mg *AccountLocalUser) GetTerraformResourceType() string { + return "azurerm_storage_account_local_user" +} + +// GetConnectionDetailsMapping for this AccountLocalUser +func (tr *AccountLocalUser) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"password": "status.atProvider.password", "sid": "status.atProvider.sid"} +} + +// GetObservation of this AccountLocalUser +func (tr *AccountLocalUser) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AccountLocalUser +func (tr *AccountLocalUser) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AccountLocalUser +func (tr *AccountLocalUser) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AccountLocalUser +func (tr *AccountLocalUser) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AccountLocalUser +func (tr *AccountLocalUser) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AccountLocalUser +func (tr *AccountLocalUser) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AccountLocalUser +func (tr *AccountLocalUser) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AccountLocalUser using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AccountLocalUser) LateInitialize(attrs []byte) (bool, error) { + params := &AccountLocalUserParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AccountLocalUser) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/storage/v1beta2/zz_accountlocaluser_types.go b/apis/storage/v1beta2/zz_accountlocaluser_types.go new file mode 100755 index 000000000..1c1a0bbf6 --- /dev/null +++ b/apis/storage/v1beta2/zz_accountlocaluser_types.go @@ -0,0 +1,298 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccountLocalUserInitParameters struct { + + // The home directory of the Storage Account Local User. + HomeDirectory *string `json:"homeDirectory,omitempty" tf:"home_directory,omitempty"` + + // One or more permission_scope blocks as defined below. + PermissionScope []PermissionScopeInitParameters `json:"permissionScope,omitempty" tf:"permission_scope,omitempty"` + + // One or more ssh_authorized_key blocks as defined below. + SSHAuthorizedKey []SSHAuthorizedKeyInitParameters `json:"sshAuthorizedKey,omitempty" tf:"ssh_authorized_key,omitempty"` + + // Specifies whether SSH Key Authentication is enabled. Defaults to false. + SSHKeyEnabled *bool `json:"sshKeyEnabled,omitempty" tf:"ssh_key_enabled,omitempty"` + + // Specifies whether SSH Password Authentication is enabled. Defaults to false. + SSHPasswordEnabled *bool `json:"sshPasswordEnabled,omitempty" tf:"ssh_password_enabled,omitempty"` +} + +type AccountLocalUserObservation struct { + + // The home directory of the Storage Account Local User. + HomeDirectory *string `json:"homeDirectory,omitempty" tf:"home_directory,omitempty"` + + // The ID of the Storage Account Local User. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more permission_scope blocks as defined below. + PermissionScope []PermissionScopeObservation `json:"permissionScope,omitempty" tf:"permission_scope,omitempty"` + + // One or more ssh_authorized_key blocks as defined below. + SSHAuthorizedKey []SSHAuthorizedKeyObservation `json:"sshAuthorizedKey,omitempty" tf:"ssh_authorized_key,omitempty"` + + // Specifies whether SSH Key Authentication is enabled. Defaults to false. + SSHKeyEnabled *bool `json:"sshKeyEnabled,omitempty" tf:"ssh_key_enabled,omitempty"` + + // Specifies whether SSH Password Authentication is enabled. Defaults to false. + SSHPasswordEnabled *bool `json:"sshPasswordEnabled,omitempty" tf:"ssh_password_enabled,omitempty"` + + // The ID of the Storage Account that this Storage Account Local User resides in. Changing this forces a new Storage Account Local User to be created. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type AccountLocalUserParameters struct { + + // The home directory of the Storage Account Local User. + // +kubebuilder:validation:Optional + HomeDirectory *string `json:"homeDirectory,omitempty" tf:"home_directory,omitempty"` + + // One or more permission_scope blocks as defined below. + // +kubebuilder:validation:Optional + PermissionScope []PermissionScopeParameters `json:"permissionScope,omitempty" tf:"permission_scope,omitempty"` + + // One or more ssh_authorized_key blocks as defined below. + // +kubebuilder:validation:Optional + SSHAuthorizedKey []SSHAuthorizedKeyParameters `json:"sshAuthorizedKey,omitempty" tf:"ssh_authorized_key,omitempty"` + + // Specifies whether SSH Key Authentication is enabled. Defaults to false. + // +kubebuilder:validation:Optional + SSHKeyEnabled *bool `json:"sshKeyEnabled,omitempty" tf:"ssh_key_enabled,omitempty"` + + // Specifies whether SSH Password Authentication is enabled. Defaults to false. + // +kubebuilder:validation:Optional + SSHPasswordEnabled *bool `json:"sshPasswordEnabled,omitempty" tf:"ssh_password_enabled,omitempty"` + + // The ID of the Storage Account that this Storage Account Local User resides in. Changing this forces a new Storage Account Local User to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type PermissionScopeInitParameters struct { + + // A permissions block as defined below. + Permissions *PermissionsInitParameters `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // The container name (when service is set to blob) or the file share name (when service is set to file), used by the Storage Account Local User. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + ResourceName *string `json:"resourceName,omitempty" tf:"resource_name,omitempty"` + + // Reference to a Container in storage to populate resourceName. + // +kubebuilder:validation:Optional + ResourceNameRef *v1.Reference `json:"resourceNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate resourceName. + // +kubebuilder:validation:Optional + ResourceNameSelector *v1.Selector `json:"resourceNameSelector,omitempty" tf:"-"` + + // The storage service used by this Storage Account Local User. Possible values are blob and file. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type PermissionScopeObservation struct { + + // A permissions block as defined below. + Permissions *PermissionsObservation `json:"permissions,omitempty" tf:"permissions,omitempty"` + + // The container name (when service is set to blob) or the file share name (when service is set to file), used by the Storage Account Local User. + ResourceName *string `json:"resourceName,omitempty" tf:"resource_name,omitempty"` + + // The storage service used by this Storage Account Local User. Possible values are blob and file. + Service *string `json:"service,omitempty" tf:"service,omitempty"` +} + +type PermissionScopeParameters struct { + + // A permissions block as defined below. + // +kubebuilder:validation:Optional + Permissions *PermissionsParameters `json:"permissions" tf:"permissions,omitempty"` + + // The container name (when service is set to blob) or the file share name (when service is set to file), used by the Storage Account Local User. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +kubebuilder:validation:Optional + ResourceName *string `json:"resourceName,omitempty" tf:"resource_name,omitempty"` + + // Reference to a Container in storage to populate resourceName. + // +kubebuilder:validation:Optional + ResourceNameRef *v1.Reference `json:"resourceNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate resourceName. + // +kubebuilder:validation:Optional + ResourceNameSelector *v1.Selector `json:"resourceNameSelector,omitempty" tf:"-"` + + // The storage service used by this Storage Account Local User. Possible values are blob and file. + // +kubebuilder:validation:Optional + Service *string `json:"service" tf:"service,omitempty"` +} + +type PermissionsInitParameters struct { + + // (Defaults to 30 minutes) Used when creating the Storage Account Local User. + Create *bool `json:"create,omitempty" tf:"create,omitempty"` + + // (Defaults to 30 minutes) Used when deleting the Storage Account Local User. + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + + // Specifies if the Local User has the list permission for this scope. Defaults to false. + List *bool `json:"list,omitempty" tf:"list,omitempty"` + + // (Defaults to 5 minutes) Used when retrieving the Storage Account Local User. + Read *bool `json:"read,omitempty" tf:"read,omitempty"` + + // Specifies if the Local User has the write permission for this scope. Defaults to false. + Write *bool `json:"write,omitempty" tf:"write,omitempty"` +} + +type PermissionsObservation struct { + + // (Defaults to 30 minutes) Used when creating the Storage Account Local User. + Create *bool `json:"create,omitempty" tf:"create,omitempty"` + + // (Defaults to 30 minutes) Used when deleting the Storage Account Local User. + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + + // Specifies if the Local User has the list permission for this scope. Defaults to false. + List *bool `json:"list,omitempty" tf:"list,omitempty"` + + // (Defaults to 5 minutes) Used when retrieving the Storage Account Local User. + Read *bool `json:"read,omitempty" tf:"read,omitempty"` + + // Specifies if the Local User has the write permission for this scope. Defaults to false. + Write *bool `json:"write,omitempty" tf:"write,omitempty"` +} + +type PermissionsParameters struct { + + // (Defaults to 30 minutes) Used when creating the Storage Account Local User. + // +kubebuilder:validation:Optional + Create *bool `json:"create,omitempty" tf:"create,omitempty"` + + // (Defaults to 30 minutes) Used when deleting the Storage Account Local User. + // +kubebuilder:validation:Optional + Delete *bool `json:"delete,omitempty" tf:"delete,omitempty"` + + // Specifies if the Local User has the list permission for this scope. Defaults to false. + // +kubebuilder:validation:Optional + List *bool `json:"list,omitempty" tf:"list,omitempty"` + + // (Defaults to 5 minutes) Used when retrieving the Storage Account Local User. + // +kubebuilder:validation:Optional + Read *bool `json:"read,omitempty" tf:"read,omitempty"` + + // Specifies if the Local User has the write permission for this scope. Defaults to false. + // +kubebuilder:validation:Optional + Write *bool `json:"write,omitempty" tf:"write,omitempty"` +} + +type SSHAuthorizedKeyInitParameters struct { + + // The description of this SSH authorized key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The public key value of this SSH authorized key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type SSHAuthorizedKeyObservation struct { + + // The description of this SSH authorized key. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The public key value of this SSH authorized key. + Key *string `json:"key,omitempty" tf:"key,omitempty"` +} + +type SSHAuthorizedKeyParameters struct { + + // The description of this SSH authorized key. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The public key value of this SSH authorized key. + // +kubebuilder:validation:Optional + Key *string `json:"key" tf:"key,omitempty"` +} + +// AccountLocalUserSpec defines the desired state of AccountLocalUser +type AccountLocalUserSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AccountLocalUserParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AccountLocalUserInitParameters `json:"initProvider,omitempty"` +} + +// AccountLocalUserStatus defines the observed state of AccountLocalUser. +type AccountLocalUserStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AccountLocalUserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AccountLocalUser is the Schema for the AccountLocalUsers API. Manages a Storage Account Local User. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type AccountLocalUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AccountLocalUserSpec `json:"spec"` + Status AccountLocalUserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountLocalUserList contains a list of AccountLocalUsers +type AccountLocalUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AccountLocalUser `json:"items"` +} + +// Repository type metadata. +var ( + AccountLocalUser_Kind = "AccountLocalUser" + AccountLocalUser_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AccountLocalUser_Kind}.String() + AccountLocalUser_KindAPIVersion = AccountLocalUser_Kind + "." + CRDGroupVersion.String() + AccountLocalUser_GroupVersionKind = CRDGroupVersion.WithKind(AccountLocalUser_Kind) +) + +func init() { + SchemeBuilder.Register(&AccountLocalUser{}, &AccountLocalUserList{}) +} diff --git a/apis/storage/v1beta2/zz_blobinventorypolicy_terraformed.go b/apis/storage/v1beta2/zz_blobinventorypolicy_terraformed.go new file mode 100755 index 000000000..f5de5e756 --- /dev/null +++ b/apis/storage/v1beta2/zz_blobinventorypolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this BlobInventoryPolicy +func (mg *BlobInventoryPolicy) GetTerraformResourceType() string { + return "azurerm_storage_blob_inventory_policy" +} + +// GetConnectionDetailsMapping for this BlobInventoryPolicy +func (tr *BlobInventoryPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this BlobInventoryPolicy +func (tr *BlobInventoryPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this BlobInventoryPolicy +func (tr *BlobInventoryPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this BlobInventoryPolicy +func (tr *BlobInventoryPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this BlobInventoryPolicy +func (tr *BlobInventoryPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this BlobInventoryPolicy +func (tr *BlobInventoryPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this BlobInventoryPolicy +func (tr *BlobInventoryPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this BlobInventoryPolicy +func (tr *BlobInventoryPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this BlobInventoryPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *BlobInventoryPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &BlobInventoryPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *BlobInventoryPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/storage/v1beta2/zz_blobinventorypolicy_types.go b/apis/storage/v1beta2/zz_blobinventorypolicy_types.go new file mode 100755 index 000000000..a8f3f65b3 --- /dev/null +++ b/apis/storage/v1beta2/zz_blobinventorypolicy_types.go @@ -0,0 +1,302 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type BlobInventoryPolicyInitParameters struct { + + // One or more rules blocks as defined below. + Rules []RulesInitParameters `json:"rules,omitempty" tf:"rules,omitempty"` + + // The ID of the storage account to apply this Blob Inventory Policy to. Changing this forces a new Storage Blob Inventory Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type BlobInventoryPolicyObservation struct { + + // The ID of the Storage Blob Inventory Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more rules blocks as defined below. + Rules []RulesObservation `json:"rules,omitempty" tf:"rules,omitempty"` + + // The ID of the storage account to apply this Blob Inventory Policy to. Changing this forces a new Storage Blob Inventory Policy to be created. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type BlobInventoryPolicyParameters struct { + + // One or more rules blocks as defined below. + // +kubebuilder:validation:Optional + Rules []RulesParameters `json:"rules,omitempty" tf:"rules,omitempty"` + + // The ID of the storage account to apply this Blob Inventory Policy to. Changing this forces a new Storage Blob Inventory Policy to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type FilterInitParameters struct { + + // A set of blob types. Possible values are blockBlob, appendBlob, and pageBlob. The storage account with is_hns_enabled is true doesn't support pageBlob. + // +listType=set + BlobTypes []*string `json:"blobTypes,omitempty" tf:"blob_types,omitempty"` + + // A set of strings for blob prefixes to be excluded. Maximum of 10 blob prefixes. + // +listType=set + ExcludePrefixes []*string `json:"excludePrefixes,omitempty" tf:"exclude_prefixes,omitempty"` + + // Includes blob versions in blob inventory or not? Defaults to false. + IncludeBlobVersions *bool `json:"includeBlobVersions,omitempty" tf:"include_blob_versions,omitempty"` + + // Includes deleted blobs in blob inventory or not? Defaults to false. + IncludeDeleted *bool `json:"includeDeleted,omitempty" tf:"include_deleted,omitempty"` + + // Includes blob snapshots in blob inventory or not? Defaults to false. + IncludeSnapshots *bool `json:"includeSnapshots,omitempty" tf:"include_snapshots,omitempty"` + + // A set of strings for blob prefixes to be matched. Maximum of 10 blob prefixes. + // +listType=set + PrefixMatch []*string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` +} + +type FilterObservation struct { + + // A set of blob types. Possible values are blockBlob, appendBlob, and pageBlob. The storage account with is_hns_enabled is true doesn't support pageBlob. + // +listType=set + BlobTypes []*string `json:"blobTypes,omitempty" tf:"blob_types,omitempty"` + + // A set of strings for blob prefixes to be excluded. Maximum of 10 blob prefixes. + // +listType=set + ExcludePrefixes []*string `json:"excludePrefixes,omitempty" tf:"exclude_prefixes,omitempty"` + + // Includes blob versions in blob inventory or not? Defaults to false. + IncludeBlobVersions *bool `json:"includeBlobVersions,omitempty" tf:"include_blob_versions,omitempty"` + + // Includes deleted blobs in blob inventory or not? Defaults to false. + IncludeDeleted *bool `json:"includeDeleted,omitempty" tf:"include_deleted,omitempty"` + + // Includes blob snapshots in blob inventory or not? Defaults to false. + IncludeSnapshots *bool `json:"includeSnapshots,omitempty" tf:"include_snapshots,omitempty"` + + // A set of strings for blob prefixes to be matched. Maximum of 10 blob prefixes. + // +listType=set + PrefixMatch []*string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` +} + +type FilterParameters struct { + + // A set of blob types. Possible values are blockBlob, appendBlob, and pageBlob. The storage account with is_hns_enabled is true doesn't support pageBlob. + // +kubebuilder:validation:Optional + // +listType=set + BlobTypes []*string `json:"blobTypes" tf:"blob_types,omitempty"` + + // A set of strings for blob prefixes to be excluded. Maximum of 10 blob prefixes. + // +kubebuilder:validation:Optional + // +listType=set + ExcludePrefixes []*string `json:"excludePrefixes,omitempty" tf:"exclude_prefixes,omitempty"` + + // Includes blob versions in blob inventory or not? Defaults to false. + // +kubebuilder:validation:Optional + IncludeBlobVersions *bool `json:"includeBlobVersions,omitempty" tf:"include_blob_versions,omitempty"` + + // Includes deleted blobs in blob inventory or not? Defaults to false. + // +kubebuilder:validation:Optional + IncludeDeleted *bool `json:"includeDeleted,omitempty" tf:"include_deleted,omitempty"` + + // Includes blob snapshots in blob inventory or not? Defaults to false. + // +kubebuilder:validation:Optional + IncludeSnapshots *bool `json:"includeSnapshots,omitempty" tf:"include_snapshots,omitempty"` + + // A set of strings for blob prefixes to be matched. Maximum of 10 blob prefixes. + // +kubebuilder:validation:Optional + // +listType=set + PrefixMatch []*string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` +} + +type RulesInitParameters struct { + + // A filter block as defined above. Can only be set when the scope is Blob. + Filter *FilterInitParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // The format of the inventory files. Possible values are Csv and Parquet. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The name which should be used for this Blob Inventory Policy Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The inventory schedule applied by this rule. Possible values are Daily and Weekly. + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A list of fields to be included in the inventory. See the Azure API reference for all the supported fields. + SchemaFields []*string `json:"schemaFields,omitempty" tf:"schema_fields,omitempty"` + + // The scope of the inventory for this rule. Possible values are Blob and Container. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // The storage container name to store the blob inventory files for this rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // Reference to a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameRef *v1.Reference `json:"storageContainerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameSelector *v1.Selector `json:"storageContainerNameSelector,omitempty" tf:"-"` +} + +type RulesObservation struct { + + // A filter block as defined above. Can only be set when the scope is Blob. + Filter *FilterObservation `json:"filter,omitempty" tf:"filter,omitempty"` + + // The format of the inventory files. Possible values are Csv and Parquet. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The name which should be used for this Blob Inventory Policy Rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The inventory schedule applied by this rule. Possible values are Daily and Weekly. + Schedule *string `json:"schedule,omitempty" tf:"schedule,omitempty"` + + // A list of fields to be included in the inventory. See the Azure API reference for all the supported fields. + SchemaFields []*string `json:"schemaFields,omitempty" tf:"schema_fields,omitempty"` + + // The scope of the inventory for this rule. Possible values are Blob and Container. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // The storage container name to store the blob inventory files for this rule. + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` +} + +type RulesParameters struct { + + // A filter block as defined above. Can only be set when the scope is Blob. + // +kubebuilder:validation:Optional + Filter *FilterParameters `json:"filter,omitempty" tf:"filter,omitempty"` + + // The format of the inventory files. Possible values are Csv and Parquet. + // +kubebuilder:validation:Optional + Format *string `json:"format" tf:"format,omitempty"` + + // The name which should be used for this Blob Inventory Policy Rule. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The inventory schedule applied by this rule. Possible values are Daily and Weekly. + // +kubebuilder:validation:Optional + Schedule *string `json:"schedule" tf:"schedule,omitempty"` + + // A list of fields to be included in the inventory. See the Azure API reference for all the supported fields. + // +kubebuilder:validation:Optional + SchemaFields []*string `json:"schemaFields" tf:"schema_fields,omitempty"` + + // The scope of the inventory for this rule. Possible values are Blob and Container. + // +kubebuilder:validation:Optional + Scope *string `json:"scope" tf:"scope,omitempty"` + + // The storage container name to store the blob inventory files for this rule. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +kubebuilder:validation:Optional + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // Reference to a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameRef *v1.Reference `json:"storageContainerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameSelector *v1.Selector `json:"storageContainerNameSelector,omitempty" tf:"-"` +} + +// BlobInventoryPolicySpec defines the desired state of BlobInventoryPolicy +type BlobInventoryPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider BlobInventoryPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider BlobInventoryPolicyInitParameters `json:"initProvider,omitempty"` +} + +// BlobInventoryPolicyStatus defines the observed state of BlobInventoryPolicy. +type BlobInventoryPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider BlobInventoryPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BlobInventoryPolicy is the Schema for the BlobInventoryPolicys API. Manages a Storage Blob Inventory Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type BlobInventoryPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.rules) || (has(self.initProvider) && has(self.initProvider.rules))",message="spec.forProvider.rules is a required parameter" + Spec BlobInventoryPolicySpec `json:"spec"` + Status BlobInventoryPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BlobInventoryPolicyList contains a list of BlobInventoryPolicys +type BlobInventoryPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BlobInventoryPolicy `json:"items"` +} + +// Repository type metadata. +var ( + BlobInventoryPolicy_Kind = "BlobInventoryPolicy" + BlobInventoryPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: BlobInventoryPolicy_Kind}.String() + BlobInventoryPolicy_KindAPIVersion = BlobInventoryPolicy_Kind + "." + CRDGroupVersion.String() + BlobInventoryPolicy_GroupVersionKind = CRDGroupVersion.WithKind(BlobInventoryPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&BlobInventoryPolicy{}, &BlobInventoryPolicyList{}) +} diff --git a/apis/storage/v1beta2/zz_generated.conversion_hubs.go b/apis/storage/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..62647fbc8 --- /dev/null +++ b/apis/storage/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,19 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Account) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *AccountLocalUser) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *BlobInventoryPolicy) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ManagementPolicy) Hub() {} diff --git a/apis/storage/v1beta2/zz_generated.deepcopy.go b/apis/storage/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..cc0da8775 --- /dev/null +++ b/apis/storage/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,6090 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountInitParameters) DeepCopyInto(out *AccountInitParameters) { + *out = *in + if in.AccessTier != nil { + in, out := &in.AccessTier, &out.AccessTier + *out = new(string) + **out = **in + } + if in.AccountKind != nil { + in, out := &in.AccountKind, &out.AccountKind + *out = new(string) + **out = **in + } + if in.AccountReplicationType != nil { + in, out := &in.AccountReplicationType, &out.AccountReplicationType + *out = new(string) + **out = **in + } + if in.AccountTier != nil { + in, out := &in.AccountTier, &out.AccountTier + *out = new(string) + **out = **in + } + if in.AllowNestedItemsToBePublic != nil { + in, out := &in.AllowNestedItemsToBePublic, &out.AllowNestedItemsToBePublic + *out = new(bool) + **out = **in + } + if in.AllowedCopyScope != nil { + in, out := &in.AllowedCopyScope, &out.AllowedCopyScope + *out = new(string) + **out = **in + } + if in.AzureFilesAuthentication != nil { + in, out := &in.AzureFilesAuthentication, &out.AzureFilesAuthentication + *out = new(AzureFilesAuthenticationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BlobProperties != nil { + in, out := &in.BlobProperties, &out.BlobProperties + *out = new(BlobPropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CrossTenantReplicationEnabled != nil { + in, out := &in.CrossTenantReplicationEnabled, &out.CrossTenantReplicationEnabled + *out = new(bool) + **out = **in + } + if in.CustomDomain != nil { + in, out := &in.CustomDomain, &out.CustomDomain + *out = new(CustomDomainInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultToOauthAuthentication != nil { + in, out := &in.DefaultToOauthAuthentication, &out.DefaultToOauthAuthentication + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableHTTPSTrafficOnly != nil { + in, out := &in.EnableHTTPSTrafficOnly, &out.EnableHTTPSTrafficOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ImmutabilityPolicy != nil { + in, out := &in.ImmutabilityPolicy, &out.ImmutabilityPolicy + *out = new(ImmutabilityPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.IsHnsEnabled != nil { + in, out := &in.IsHnsEnabled, &out.IsHnsEnabled + *out = new(bool) + **out = **in + } + if in.LargeFileShareEnabled != nil { + in, out := &in.LargeFileShareEnabled, &out.LargeFileShareEnabled + *out = new(bool) + **out = **in + } + if in.LocalUserEnabled != nil { + in, out := &in.LocalUserEnabled, &out.LocalUserEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRules != nil { + in, out := &in.NetworkRules, &out.NetworkRules + *out = new(NetworkRulesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Nfsv3Enabled != nil { + in, out := &in.Nfsv3Enabled, &out.Nfsv3Enabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QueueEncryptionKeyType != nil { + in, out := &in.QueueEncryptionKeyType, &out.QueueEncryptionKeyType + *out = new(string) + **out = **in + } + if in.QueueProperties != nil { + in, out := &in.QueueProperties, &out.QueueProperties + *out = new(QueuePropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(RoutingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SASPolicy != nil { + in, out := &in.SASPolicy, &out.SASPolicy + *out = new(SASPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SFTPEnabled != nil { + in, out := &in.SFTPEnabled, &out.SFTPEnabled + *out = new(bool) + **out = **in + } + if in.ShareProperties != nil { + in, out := &in.ShareProperties, &out.ShareProperties + *out = new(SharePropertiesInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessKeyEnabled != nil { + in, out := &in.SharedAccessKeyEnabled, &out.SharedAccessKeyEnabled + *out = new(bool) + **out = **in + } + if in.StaticWebsite != nil { + in, out := &in.StaticWebsite, &out.StaticWebsite + *out = new(StaticWebsiteInitParameters) + (*in).DeepCopyInto(*out) + } + if in.TableEncryptionKeyType != nil { + in, out := &in.TableEncryptionKeyType, &out.TableEncryptionKeyType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountInitParameters. +func (in *AccountInitParameters) DeepCopy() *AccountInitParameters { + if in == nil { + return nil + } + out := new(AccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLocalUser) DeepCopyInto(out *AccountLocalUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLocalUser. +func (in *AccountLocalUser) DeepCopy() *AccountLocalUser { + if in == nil { + return nil + } + out := new(AccountLocalUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountLocalUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLocalUserInitParameters) DeepCopyInto(out *AccountLocalUserInitParameters) { + *out = *in + if in.HomeDirectory != nil { + in, out := &in.HomeDirectory, &out.HomeDirectory + *out = new(string) + **out = **in + } + if in.PermissionScope != nil { + in, out := &in.PermissionScope, &out.PermissionScope + *out = make([]PermissionScopeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHAuthorizedKey != nil { + in, out := &in.SSHAuthorizedKey, &out.SSHAuthorizedKey + *out = make([]SSHAuthorizedKeyInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHKeyEnabled != nil { + in, out := &in.SSHKeyEnabled, &out.SSHKeyEnabled + *out = new(bool) + **out = **in + } + if in.SSHPasswordEnabled != nil { + in, out := &in.SSHPasswordEnabled, &out.SSHPasswordEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLocalUserInitParameters. +func (in *AccountLocalUserInitParameters) DeepCopy() *AccountLocalUserInitParameters { + if in == nil { + return nil + } + out := new(AccountLocalUserInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLocalUserList) DeepCopyInto(out *AccountLocalUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AccountLocalUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLocalUserList. +func (in *AccountLocalUserList) DeepCopy() *AccountLocalUserList { + if in == nil { + return nil + } + out := new(AccountLocalUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountLocalUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLocalUserObservation) DeepCopyInto(out *AccountLocalUserObservation) { + *out = *in + if in.HomeDirectory != nil { + in, out := &in.HomeDirectory, &out.HomeDirectory + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PermissionScope != nil { + in, out := &in.PermissionScope, &out.PermissionScope + *out = make([]PermissionScopeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHAuthorizedKey != nil { + in, out := &in.SSHAuthorizedKey, &out.SSHAuthorizedKey + *out = make([]SSHAuthorizedKeyObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHKeyEnabled != nil { + in, out := &in.SSHKeyEnabled, &out.SSHKeyEnabled + *out = new(bool) + **out = **in + } + if in.SSHPasswordEnabled != nil { + in, out := &in.SSHPasswordEnabled, &out.SSHPasswordEnabled + *out = new(bool) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLocalUserObservation. +func (in *AccountLocalUserObservation) DeepCopy() *AccountLocalUserObservation { + if in == nil { + return nil + } + out := new(AccountLocalUserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLocalUserParameters) DeepCopyInto(out *AccountLocalUserParameters) { + *out = *in + if in.HomeDirectory != nil { + in, out := &in.HomeDirectory, &out.HomeDirectory + *out = new(string) + **out = **in + } + if in.PermissionScope != nil { + in, out := &in.PermissionScope, &out.PermissionScope + *out = make([]PermissionScopeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHAuthorizedKey != nil { + in, out := &in.SSHAuthorizedKey, &out.SSHAuthorizedKey + *out = make([]SSHAuthorizedKeyParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SSHKeyEnabled != nil { + in, out := &in.SSHKeyEnabled, &out.SSHKeyEnabled + *out = new(bool) + **out = **in + } + if in.SSHPasswordEnabled != nil { + in, out := &in.SSHPasswordEnabled, &out.SSHPasswordEnabled + *out = new(bool) + **out = **in + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLocalUserParameters. +func (in *AccountLocalUserParameters) DeepCopy() *AccountLocalUserParameters { + if in == nil { + return nil + } + out := new(AccountLocalUserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLocalUserSpec) DeepCopyInto(out *AccountLocalUserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLocalUserSpec. +func (in *AccountLocalUserSpec) DeepCopy() *AccountLocalUserSpec { + if in == nil { + return nil + } + out := new(AccountLocalUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLocalUserStatus) DeepCopyInto(out *AccountLocalUserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLocalUserStatus. +func (in *AccountLocalUserStatus) DeepCopy() *AccountLocalUserStatus { + if in == nil { + return nil + } + out := new(AccountLocalUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in + if in.AccessTier != nil { + in, out := &in.AccessTier, &out.AccessTier + *out = new(string) + **out = **in + } + if in.AccountKind != nil { + in, out := &in.AccountKind, &out.AccountKind + *out = new(string) + **out = **in + } + if in.AccountReplicationType != nil { + in, out := &in.AccountReplicationType, &out.AccountReplicationType + *out = new(string) + **out = **in + } + if in.AccountTier != nil { + in, out := &in.AccountTier, &out.AccountTier + *out = new(string) + **out = **in + } + if in.AllowNestedItemsToBePublic != nil { + in, out := &in.AllowNestedItemsToBePublic, &out.AllowNestedItemsToBePublic + *out = new(bool) + **out = **in + } + if in.AllowedCopyScope != nil { + in, out := &in.AllowedCopyScope, &out.AllowedCopyScope + *out = new(string) + **out = **in + } + if in.AzureFilesAuthentication != nil { + in, out := &in.AzureFilesAuthentication, &out.AzureFilesAuthentication + *out = new(AzureFilesAuthenticationObservation) + (*in).DeepCopyInto(*out) + } + if in.BlobProperties != nil { + in, out := &in.BlobProperties, &out.BlobProperties + *out = new(BlobPropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.CrossTenantReplicationEnabled != nil { + in, out := &in.CrossTenantReplicationEnabled, &out.CrossTenantReplicationEnabled + *out = new(bool) + **out = **in + } + if in.CustomDomain != nil { + in, out := &in.CustomDomain, &out.CustomDomain + *out = new(CustomDomainObservation) + (*in).DeepCopyInto(*out) + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultToOauthAuthentication != nil { + in, out := &in.DefaultToOauthAuthentication, &out.DefaultToOauthAuthentication + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableHTTPSTrafficOnly != nil { + in, out := &in.EnableHTTPSTrafficOnly, &out.EnableHTTPSTrafficOnly + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.ImmutabilityPolicy != nil { + in, out := &in.ImmutabilityPolicy, &out.ImmutabilityPolicy + *out = new(ImmutabilityPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.IsHnsEnabled != nil { + in, out := &in.IsHnsEnabled, &out.IsHnsEnabled + *out = new(bool) + **out = **in + } + if in.LargeFileShareEnabled != nil { + in, out := &in.LargeFileShareEnabled, &out.LargeFileShareEnabled + *out = new(bool) + **out = **in + } + if in.LocalUserEnabled != nil { + in, out := &in.LocalUserEnabled, &out.LocalUserEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRules != nil { + in, out := &in.NetworkRules, &out.NetworkRules + *out = new(NetworkRulesObservation) + (*in).DeepCopyInto(*out) + } + if in.Nfsv3Enabled != nil { + in, out := &in.Nfsv3Enabled, &out.Nfsv3Enabled + *out = new(bool) + **out = **in + } + if in.PrimaryBlobEndpoint != nil { + in, out := &in.PrimaryBlobEndpoint, &out.PrimaryBlobEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryBlobHost != nil { + in, out := &in.PrimaryBlobHost, &out.PrimaryBlobHost + *out = new(string) + **out = **in + } + if in.PrimaryBlobInternetEndpoint != nil { + in, out := &in.PrimaryBlobInternetEndpoint, &out.PrimaryBlobInternetEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryBlobInternetHost != nil { + in, out := &in.PrimaryBlobInternetHost, &out.PrimaryBlobInternetHost + *out = new(string) + **out = **in + } + if in.PrimaryBlobMicrosoftEndpoint != nil { + in, out := &in.PrimaryBlobMicrosoftEndpoint, &out.PrimaryBlobMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryBlobMicrosoftHost != nil { + in, out := &in.PrimaryBlobMicrosoftHost, &out.PrimaryBlobMicrosoftHost + *out = new(string) + **out = **in + } + if in.PrimaryDfsEndpoint != nil { + in, out := &in.PrimaryDfsEndpoint, &out.PrimaryDfsEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryDfsHost != nil { + in, out := &in.PrimaryDfsHost, &out.PrimaryDfsHost + *out = new(string) + **out = **in + } + if in.PrimaryDfsInternetEndpoint != nil { + in, out := &in.PrimaryDfsInternetEndpoint, &out.PrimaryDfsInternetEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryDfsInternetHost != nil { + in, out := &in.PrimaryDfsInternetHost, &out.PrimaryDfsInternetHost + *out = new(string) + **out = **in + } + if in.PrimaryDfsMicrosoftEndpoint != nil { + in, out := &in.PrimaryDfsMicrosoftEndpoint, &out.PrimaryDfsMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryDfsMicrosoftHost != nil { + in, out := &in.PrimaryDfsMicrosoftHost, &out.PrimaryDfsMicrosoftHost + *out = new(string) + **out = **in + } + if in.PrimaryFileEndpoint != nil { + in, out := &in.PrimaryFileEndpoint, &out.PrimaryFileEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryFileHost != nil { + in, out := &in.PrimaryFileHost, &out.PrimaryFileHost + *out = new(string) + **out = **in + } + if in.PrimaryFileInternetEndpoint != nil { + in, out := &in.PrimaryFileInternetEndpoint, &out.PrimaryFileInternetEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryFileInternetHost != nil { + in, out := &in.PrimaryFileInternetHost, &out.PrimaryFileInternetHost + *out = new(string) + **out = **in + } + if in.PrimaryFileMicrosoftEndpoint != nil { + in, out := &in.PrimaryFileMicrosoftEndpoint, &out.PrimaryFileMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryFileMicrosoftHost != nil { + in, out := &in.PrimaryFileMicrosoftHost, &out.PrimaryFileMicrosoftHost + *out = new(string) + **out = **in + } + if in.PrimaryLocation != nil { + in, out := &in.PrimaryLocation, &out.PrimaryLocation + *out = new(string) + **out = **in + } + if in.PrimaryQueueEndpoint != nil { + in, out := &in.PrimaryQueueEndpoint, &out.PrimaryQueueEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryQueueHost != nil { + in, out := &in.PrimaryQueueHost, &out.PrimaryQueueHost + *out = new(string) + **out = **in + } + if in.PrimaryQueueMicrosoftEndpoint != nil { + in, out := &in.PrimaryQueueMicrosoftEndpoint, &out.PrimaryQueueMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryQueueMicrosoftHost != nil { + in, out := &in.PrimaryQueueMicrosoftHost, &out.PrimaryQueueMicrosoftHost + *out = new(string) + **out = **in + } + if in.PrimaryTableEndpoint != nil { + in, out := &in.PrimaryTableEndpoint, &out.PrimaryTableEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryTableHost != nil { + in, out := &in.PrimaryTableHost, &out.PrimaryTableHost + *out = new(string) + **out = **in + } + if in.PrimaryTableMicrosoftEndpoint != nil { + in, out := &in.PrimaryTableMicrosoftEndpoint, &out.PrimaryTableMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryTableMicrosoftHost != nil { + in, out := &in.PrimaryTableMicrosoftHost, &out.PrimaryTableMicrosoftHost + *out = new(string) + **out = **in + } + if in.PrimaryWebEndpoint != nil { + in, out := &in.PrimaryWebEndpoint, &out.PrimaryWebEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryWebHost != nil { + in, out := &in.PrimaryWebHost, &out.PrimaryWebHost + *out = new(string) + **out = **in + } + if in.PrimaryWebInternetEndpoint != nil { + in, out := &in.PrimaryWebInternetEndpoint, &out.PrimaryWebInternetEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryWebInternetHost != nil { + in, out := &in.PrimaryWebInternetHost, &out.PrimaryWebInternetHost + *out = new(string) + **out = **in + } + if in.PrimaryWebMicrosoftEndpoint != nil { + in, out := &in.PrimaryWebMicrosoftEndpoint, &out.PrimaryWebMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.PrimaryWebMicrosoftHost != nil { + in, out := &in.PrimaryWebMicrosoftHost, &out.PrimaryWebMicrosoftHost + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QueueEncryptionKeyType != nil { + in, out := &in.QueueEncryptionKeyType, &out.QueueEncryptionKeyType + *out = new(string) + **out = **in + } + if in.QueueProperties != nil { + in, out := &in.QueueProperties, &out.QueueProperties + *out = new(QueuePropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(RoutingObservation) + (*in).DeepCopyInto(*out) + } + if in.SASPolicy != nil { + in, out := &in.SASPolicy, &out.SASPolicy + *out = new(SASPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.SFTPEnabled != nil { + in, out := &in.SFTPEnabled, &out.SFTPEnabled + *out = new(bool) + **out = **in + } + if in.SecondaryBlobEndpoint != nil { + in, out := &in.SecondaryBlobEndpoint, &out.SecondaryBlobEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryBlobHost != nil { + in, out := &in.SecondaryBlobHost, &out.SecondaryBlobHost + *out = new(string) + **out = **in + } + if in.SecondaryBlobInternetEndpoint != nil { + in, out := &in.SecondaryBlobInternetEndpoint, &out.SecondaryBlobInternetEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryBlobInternetHost != nil { + in, out := &in.SecondaryBlobInternetHost, &out.SecondaryBlobInternetHost + *out = new(string) + **out = **in + } + if in.SecondaryBlobMicrosoftEndpoint != nil { + in, out := &in.SecondaryBlobMicrosoftEndpoint, &out.SecondaryBlobMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryBlobMicrosoftHost != nil { + in, out := &in.SecondaryBlobMicrosoftHost, &out.SecondaryBlobMicrosoftHost + *out = new(string) + **out = **in + } + if in.SecondaryDfsEndpoint != nil { + in, out := &in.SecondaryDfsEndpoint, &out.SecondaryDfsEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryDfsHost != nil { + in, out := &in.SecondaryDfsHost, &out.SecondaryDfsHost + *out = new(string) + **out = **in + } + if in.SecondaryDfsInternetEndpoint != nil { + in, out := &in.SecondaryDfsInternetEndpoint, &out.SecondaryDfsInternetEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryDfsInternetHost != nil { + in, out := &in.SecondaryDfsInternetHost, &out.SecondaryDfsInternetHost + *out = new(string) + **out = **in + } + if in.SecondaryDfsMicrosoftEndpoint != nil { + in, out := &in.SecondaryDfsMicrosoftEndpoint, &out.SecondaryDfsMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryDfsMicrosoftHost != nil { + in, out := &in.SecondaryDfsMicrosoftHost, &out.SecondaryDfsMicrosoftHost + *out = new(string) + **out = **in + } + if in.SecondaryFileEndpoint != nil { + in, out := &in.SecondaryFileEndpoint, &out.SecondaryFileEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryFileHost != nil { + in, out := &in.SecondaryFileHost, &out.SecondaryFileHost + *out = new(string) + **out = **in + } + if in.SecondaryFileInternetEndpoint != nil { + in, out := &in.SecondaryFileInternetEndpoint, &out.SecondaryFileInternetEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryFileInternetHost != nil { + in, out := &in.SecondaryFileInternetHost, &out.SecondaryFileInternetHost + *out = new(string) + **out = **in + } + if in.SecondaryFileMicrosoftEndpoint != nil { + in, out := &in.SecondaryFileMicrosoftEndpoint, &out.SecondaryFileMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryFileMicrosoftHost != nil { + in, out := &in.SecondaryFileMicrosoftHost, &out.SecondaryFileMicrosoftHost + *out = new(string) + **out = **in + } + if in.SecondaryLocation != nil { + in, out := &in.SecondaryLocation, &out.SecondaryLocation + *out = new(string) + **out = **in + } + if in.SecondaryQueueEndpoint != nil { + in, out := &in.SecondaryQueueEndpoint, &out.SecondaryQueueEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryQueueHost != nil { + in, out := &in.SecondaryQueueHost, &out.SecondaryQueueHost + *out = new(string) + **out = **in + } + if in.SecondaryQueueMicrosoftEndpoint != nil { + in, out := &in.SecondaryQueueMicrosoftEndpoint, &out.SecondaryQueueMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryQueueMicrosoftHost != nil { + in, out := &in.SecondaryQueueMicrosoftHost, &out.SecondaryQueueMicrosoftHost + *out = new(string) + **out = **in + } + if in.SecondaryTableEndpoint != nil { + in, out := &in.SecondaryTableEndpoint, &out.SecondaryTableEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryTableHost != nil { + in, out := &in.SecondaryTableHost, &out.SecondaryTableHost + *out = new(string) + **out = **in + } + if in.SecondaryTableMicrosoftEndpoint != nil { + in, out := &in.SecondaryTableMicrosoftEndpoint, &out.SecondaryTableMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryTableMicrosoftHost != nil { + in, out := &in.SecondaryTableMicrosoftHost, &out.SecondaryTableMicrosoftHost + *out = new(string) + **out = **in + } + if in.SecondaryWebEndpoint != nil { + in, out := &in.SecondaryWebEndpoint, &out.SecondaryWebEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryWebHost != nil { + in, out := &in.SecondaryWebHost, &out.SecondaryWebHost + *out = new(string) + **out = **in + } + if in.SecondaryWebInternetEndpoint != nil { + in, out := &in.SecondaryWebInternetEndpoint, &out.SecondaryWebInternetEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryWebInternetHost != nil { + in, out := &in.SecondaryWebInternetHost, &out.SecondaryWebInternetHost + *out = new(string) + **out = **in + } + if in.SecondaryWebMicrosoftEndpoint != nil { + in, out := &in.SecondaryWebMicrosoftEndpoint, &out.SecondaryWebMicrosoftEndpoint + *out = new(string) + **out = **in + } + if in.SecondaryWebMicrosoftHost != nil { + in, out := &in.SecondaryWebMicrosoftHost, &out.SecondaryWebMicrosoftHost + *out = new(string) + **out = **in + } + if in.ShareProperties != nil { + in, out := &in.ShareProperties, &out.ShareProperties + *out = new(SharePropertiesObservation) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessKeyEnabled != nil { + in, out := &in.SharedAccessKeyEnabled, &out.SharedAccessKeyEnabled + *out = new(bool) + **out = **in + } + if in.StaticWebsite != nil { + in, out := &in.StaticWebsite, &out.StaticWebsite + *out = new(StaticWebsiteObservation) + (*in).DeepCopyInto(*out) + } + if in.TableEncryptionKeyType != nil { + in, out := &in.TableEncryptionKeyType, &out.TableEncryptionKeyType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + if in.AccessTier != nil { + in, out := &in.AccessTier, &out.AccessTier + *out = new(string) + **out = **in + } + if in.AccountKind != nil { + in, out := &in.AccountKind, &out.AccountKind + *out = new(string) + **out = **in + } + if in.AccountReplicationType != nil { + in, out := &in.AccountReplicationType, &out.AccountReplicationType + *out = new(string) + **out = **in + } + if in.AccountTier != nil { + in, out := &in.AccountTier, &out.AccountTier + *out = new(string) + **out = **in + } + if in.AllowNestedItemsToBePublic != nil { + in, out := &in.AllowNestedItemsToBePublic, &out.AllowNestedItemsToBePublic + *out = new(bool) + **out = **in + } + if in.AllowedCopyScope != nil { + in, out := &in.AllowedCopyScope, &out.AllowedCopyScope + *out = new(string) + **out = **in + } + if in.AzureFilesAuthentication != nil { + in, out := &in.AzureFilesAuthentication, &out.AzureFilesAuthentication + *out = new(AzureFilesAuthenticationParameters) + (*in).DeepCopyInto(*out) + } + if in.BlobProperties != nil { + in, out := &in.BlobProperties, &out.BlobProperties + *out = new(BlobPropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.CrossTenantReplicationEnabled != nil { + in, out := &in.CrossTenantReplicationEnabled, &out.CrossTenantReplicationEnabled + *out = new(bool) + **out = **in + } + if in.CustomDomain != nil { + in, out := &in.CustomDomain, &out.CustomDomain + *out = new(CustomDomainParameters) + (*in).DeepCopyInto(*out) + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultToOauthAuthentication != nil { + in, out := &in.DefaultToOauthAuthentication, &out.DefaultToOauthAuthentication + *out = new(bool) + **out = **in + } + if in.EdgeZone != nil { + in, out := &in.EdgeZone, &out.EdgeZone + *out = new(string) + **out = **in + } + if in.EnableHTTPSTrafficOnly != nil { + in, out := &in.EnableHTTPSTrafficOnly, &out.EnableHTTPSTrafficOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.ImmutabilityPolicy != nil { + in, out := &in.ImmutabilityPolicy, &out.ImmutabilityPolicy + *out = new(ImmutabilityPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureEncryptionEnabled != nil { + in, out := &in.InfrastructureEncryptionEnabled, &out.InfrastructureEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.IsHnsEnabled != nil { + in, out := &in.IsHnsEnabled, &out.IsHnsEnabled + *out = new(bool) + **out = **in + } + if in.LargeFileShareEnabled != nil { + in, out := &in.LargeFileShareEnabled, &out.LargeFileShareEnabled + *out = new(bool) + **out = **in + } + if in.LocalUserEnabled != nil { + in, out := &in.LocalUserEnabled, &out.LocalUserEnabled + *out = new(bool) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.NetworkRules != nil { + in, out := &in.NetworkRules, &out.NetworkRules + *out = new(NetworkRulesParameters) + (*in).DeepCopyInto(*out) + } + if in.Nfsv3Enabled != nil { + in, out := &in.Nfsv3Enabled, &out.Nfsv3Enabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.QueueEncryptionKeyType != nil { + in, out := &in.QueueEncryptionKeyType, &out.QueueEncryptionKeyType + *out = new(string) + **out = **in + } + if in.QueueProperties != nil { + in, out := &in.QueueProperties, &out.QueueProperties + *out = new(QueuePropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(RoutingParameters) + (*in).DeepCopyInto(*out) + } + if in.SASPolicy != nil { + in, out := &in.SASPolicy, &out.SASPolicy + *out = new(SASPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.SFTPEnabled != nil { + in, out := &in.SFTPEnabled, &out.SFTPEnabled + *out = new(bool) + **out = **in + } + if in.ShareProperties != nil { + in, out := &in.ShareProperties, &out.ShareProperties + *out = new(SharePropertiesParameters) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessKeyEnabled != nil { + in, out := &in.SharedAccessKeyEnabled, &out.SharedAccessKeyEnabled + *out = new(bool) + **out = **in + } + if in.StaticWebsite != nil { + in, out := &in.StaticWebsite, &out.StaticWebsite + *out = new(StaticWebsiteParameters) + (*in).DeepCopyInto(*out) + } + if in.TableEncryptionKeyType != nil { + in, out := &in.TableEncryptionKeyType, &out.TableEncryptionKeyType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsInitParameters) DeepCopyInto(out *ActionsInitParameters) { + *out = *in + if in.BaseBlob != nil { + in, out := &in.BaseBlob, &out.BaseBlob + *out = new(BaseBlobInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Snapshot != nil { + in, out := &in.Snapshot, &out.Snapshot + *out = new(SnapshotInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(VersionInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsInitParameters. +func (in *ActionsInitParameters) DeepCopy() *ActionsInitParameters { + if in == nil { + return nil + } + out := new(ActionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsObservation) DeepCopyInto(out *ActionsObservation) { + *out = *in + if in.BaseBlob != nil { + in, out := &in.BaseBlob, &out.BaseBlob + *out = new(BaseBlobObservation) + (*in).DeepCopyInto(*out) + } + if in.Snapshot != nil { + in, out := &in.Snapshot, &out.Snapshot + *out = new(SnapshotObservation) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(VersionObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsObservation. +func (in *ActionsObservation) DeepCopy() *ActionsObservation { + if in == nil { + return nil + } + out := new(ActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionsParameters) DeepCopyInto(out *ActionsParameters) { + *out = *in + if in.BaseBlob != nil { + in, out := &in.BaseBlob, &out.BaseBlob + *out = new(BaseBlobParameters) + (*in).DeepCopyInto(*out) + } + if in.Snapshot != nil { + in, out := &in.Snapshot, &out.Snapshot + *out = new(SnapshotParameters) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(VersionParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsParameters. +func (in *ActionsParameters) DeepCopy() *ActionsParameters { + if in == nil { + return nil + } + out := new(ActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryInitParameters) DeepCopyInto(out *ActiveDirectoryInitParameters) { + *out = *in + if in.DomainGUID != nil { + in, out := &in.DomainGUID, &out.DomainGUID + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainSid != nil { + in, out := &in.DomainSid, &out.DomainSid + *out = new(string) + **out = **in + } + if in.ForestName != nil { + in, out := &in.ForestName, &out.ForestName + *out = new(string) + **out = **in + } + if in.NetbiosDomainName != nil { + in, out := &in.NetbiosDomainName, &out.NetbiosDomainName + *out = new(string) + **out = **in + } + if in.StorageSid != nil { + in, out := &in.StorageSid, &out.StorageSid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryInitParameters. +func (in *ActiveDirectoryInitParameters) DeepCopy() *ActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryObservation) DeepCopyInto(out *ActiveDirectoryObservation) { + *out = *in + if in.DomainGUID != nil { + in, out := &in.DomainGUID, &out.DomainGUID + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainSid != nil { + in, out := &in.DomainSid, &out.DomainSid + *out = new(string) + **out = **in + } + if in.ForestName != nil { + in, out := &in.ForestName, &out.ForestName + *out = new(string) + **out = **in + } + if in.NetbiosDomainName != nil { + in, out := &in.NetbiosDomainName, &out.NetbiosDomainName + *out = new(string) + **out = **in + } + if in.StorageSid != nil { + in, out := &in.StorageSid, &out.StorageSid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryObservation. +func (in *ActiveDirectoryObservation) DeepCopy() *ActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(ActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryParameters) DeepCopyInto(out *ActiveDirectoryParameters) { + *out = *in + if in.DomainGUID != nil { + in, out := &in.DomainGUID, &out.DomainGUID + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainSid != nil { + in, out := &in.DomainSid, &out.DomainSid + *out = new(string) + **out = **in + } + if in.ForestName != nil { + in, out := &in.ForestName, &out.ForestName + *out = new(string) + **out = **in + } + if in.NetbiosDomainName != nil { + in, out := &in.NetbiosDomainName, &out.NetbiosDomainName + *out = new(string) + **out = **in + } + if in.StorageSid != nil { + in, out := &in.StorageSid, &out.StorageSid + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryParameters. +func (in *ActiveDirectoryParameters) DeepCopy() *ActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilesAuthenticationInitParameters) DeepCopyInto(out *AzureFilesAuthenticationInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryType != nil { + in, out := &in.DirectoryType, &out.DirectoryType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilesAuthenticationInitParameters. +func (in *AzureFilesAuthenticationInitParameters) DeepCopy() *AzureFilesAuthenticationInitParameters { + if in == nil { + return nil + } + out := new(AzureFilesAuthenticationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilesAuthenticationObservation) DeepCopyInto(out *AzureFilesAuthenticationObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.DirectoryType != nil { + in, out := &in.DirectoryType, &out.DirectoryType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilesAuthenticationObservation. +func (in *AzureFilesAuthenticationObservation) DeepCopy() *AzureFilesAuthenticationObservation { + if in == nil { + return nil + } + out := new(AzureFilesAuthenticationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilesAuthenticationParameters) DeepCopyInto(out *AzureFilesAuthenticationParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryType != nil { + in, out := &in.DirectoryType, &out.DirectoryType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilesAuthenticationParameters. +func (in *AzureFilesAuthenticationParameters) DeepCopy() *AzureFilesAuthenticationParameters { + if in == nil { + return nil + } + out := new(AzureFilesAuthenticationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseBlobInitParameters) DeepCopyInto(out *BaseBlobInitParameters) { + *out = *in + if in.AutoTierToHotFromCoolEnabled != nil { + in, out := &in.AutoTierToHotFromCoolEnabled, &out.AutoTierToHotFromCoolEnabled + *out = new(bool) + **out = **in + } + if in.DeleteAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceCreationGreaterThan, &out.DeleteAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceLastAccessTimeGreaterThan, &out.DeleteAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceModificationGreaterThan, &out.DeleteAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceCreationGreaterThan, &out.TierToArchiveAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceModificationGreaterThan, &out.TierToArchiveAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToColdAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceModificationGreaterThan, &out.TierToColdAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceCreationGreaterThan, &out.TierToCoolAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceModificationGreaterThan, &out.TierToCoolAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseBlobInitParameters. +func (in *BaseBlobInitParameters) DeepCopy() *BaseBlobInitParameters { + if in == nil { + return nil + } + out := new(BaseBlobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseBlobObservation) DeepCopyInto(out *BaseBlobObservation) { + *out = *in + if in.AutoTierToHotFromCoolEnabled != nil { + in, out := &in.AutoTierToHotFromCoolEnabled, &out.AutoTierToHotFromCoolEnabled + *out = new(bool) + **out = **in + } + if in.DeleteAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceCreationGreaterThan, &out.DeleteAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceLastAccessTimeGreaterThan, &out.DeleteAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceModificationGreaterThan, &out.DeleteAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceCreationGreaterThan, &out.TierToArchiveAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceModificationGreaterThan, &out.TierToArchiveAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToColdAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceModificationGreaterThan, &out.TierToColdAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceCreationGreaterThan, &out.TierToCoolAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceModificationGreaterThan, &out.TierToCoolAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseBlobObservation. +func (in *BaseBlobObservation) DeepCopy() *BaseBlobObservation { + if in == nil { + return nil + } + out := new(BaseBlobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseBlobParameters) DeepCopyInto(out *BaseBlobParameters) { + *out = *in + if in.AutoTierToHotFromCoolEnabled != nil { + in, out := &in.AutoTierToHotFromCoolEnabled, &out.AutoTierToHotFromCoolEnabled + *out = new(bool) + **out = **in + } + if in.DeleteAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceCreationGreaterThan, &out.DeleteAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceLastAccessTimeGreaterThan, &out.DeleteAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceModificationGreaterThan, &out.DeleteAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceCreationGreaterThan, &out.TierToArchiveAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceModificationGreaterThan, &out.TierToArchiveAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToColdAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceModificationGreaterThan, &out.TierToColdAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceCreationGreaterThan, &out.TierToCoolAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan, &out.TierToCoolAfterDaysSinceLastAccessTimeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToCoolAfterDaysSinceModificationGreaterThan != nil { + in, out := &in.TierToCoolAfterDaysSinceModificationGreaterThan, &out.TierToCoolAfterDaysSinceModificationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseBlobParameters. +func (in *BaseBlobParameters) DeepCopy() *BaseBlobParameters { + if in == nil { + return nil + } + out := new(BaseBlobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobInventoryPolicy) DeepCopyInto(out *BlobInventoryPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobInventoryPolicy. +func (in *BlobInventoryPolicy) DeepCopy() *BlobInventoryPolicy { + if in == nil { + return nil + } + out := new(BlobInventoryPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BlobInventoryPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobInventoryPolicyInitParameters) DeepCopyInto(out *BlobInventoryPolicyInitParameters) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobInventoryPolicyInitParameters. +func (in *BlobInventoryPolicyInitParameters) DeepCopy() *BlobInventoryPolicyInitParameters { + if in == nil { + return nil + } + out := new(BlobInventoryPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobInventoryPolicyList) DeepCopyInto(out *BlobInventoryPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BlobInventoryPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobInventoryPolicyList. +func (in *BlobInventoryPolicyList) DeepCopy() *BlobInventoryPolicyList { + if in == nil { + return nil + } + out := new(BlobInventoryPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BlobInventoryPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobInventoryPolicyObservation) DeepCopyInto(out *BlobInventoryPolicyObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobInventoryPolicyObservation. +func (in *BlobInventoryPolicyObservation) DeepCopy() *BlobInventoryPolicyObservation { + if in == nil { + return nil + } + out := new(BlobInventoryPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobInventoryPolicyParameters) DeepCopyInto(out *BlobInventoryPolicyParameters) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RulesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobInventoryPolicyParameters. +func (in *BlobInventoryPolicyParameters) DeepCopy() *BlobInventoryPolicyParameters { + if in == nil { + return nil + } + out := new(BlobInventoryPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobInventoryPolicySpec) DeepCopyInto(out *BlobInventoryPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobInventoryPolicySpec. +func (in *BlobInventoryPolicySpec) DeepCopy() *BlobInventoryPolicySpec { + if in == nil { + return nil + } + out := new(BlobInventoryPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobInventoryPolicyStatus) DeepCopyInto(out *BlobInventoryPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobInventoryPolicyStatus. +func (in *BlobInventoryPolicyStatus) DeepCopy() *BlobInventoryPolicyStatus { + if in == nil { + return nil + } + out := new(BlobInventoryPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobPropertiesInitParameters) DeepCopyInto(out *BlobPropertiesInitParameters) { + *out = *in + if in.ChangeFeedEnabled != nil { + in, out := &in.ChangeFeedEnabled, &out.ChangeFeedEnabled + *out = new(bool) + **out = **in + } + if in.ChangeFeedRetentionInDays != nil { + in, out := &in.ChangeFeedRetentionInDays, &out.ChangeFeedRetentionInDays + *out = new(float64) + **out = **in + } + if in.ContainerDeleteRetentionPolicy != nil { + in, out := &in.ContainerDeleteRetentionPolicy, &out.ContainerDeleteRetentionPolicy + *out = new(ContainerDeleteRetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]CorsRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultServiceVersion != nil { + in, out := &in.DefaultServiceVersion, &out.DefaultServiceVersion + *out = new(string) + **out = **in + } + if in.DeleteRetentionPolicy != nil { + in, out := &in.DeleteRetentionPolicy, &out.DeleteRetentionPolicy + *out = new(DeleteRetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LastAccessTimeEnabled != nil { + in, out := &in.LastAccessTimeEnabled, &out.LastAccessTimeEnabled + *out = new(bool) + **out = **in + } + if in.RestorePolicy != nil { + in, out := &in.RestorePolicy, &out.RestorePolicy + *out = new(RestorePolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.VersioningEnabled != nil { + in, out := &in.VersioningEnabled, &out.VersioningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobPropertiesInitParameters. +func (in *BlobPropertiesInitParameters) DeepCopy() *BlobPropertiesInitParameters { + if in == nil { + return nil + } + out := new(BlobPropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobPropertiesObservation) DeepCopyInto(out *BlobPropertiesObservation) { + *out = *in + if in.ChangeFeedEnabled != nil { + in, out := &in.ChangeFeedEnabled, &out.ChangeFeedEnabled + *out = new(bool) + **out = **in + } + if in.ChangeFeedRetentionInDays != nil { + in, out := &in.ChangeFeedRetentionInDays, &out.ChangeFeedRetentionInDays + *out = new(float64) + **out = **in + } + if in.ContainerDeleteRetentionPolicy != nil { + in, out := &in.ContainerDeleteRetentionPolicy, &out.ContainerDeleteRetentionPolicy + *out = new(ContainerDeleteRetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]CorsRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultServiceVersion != nil { + in, out := &in.DefaultServiceVersion, &out.DefaultServiceVersion + *out = new(string) + **out = **in + } + if in.DeleteRetentionPolicy != nil { + in, out := &in.DeleteRetentionPolicy, &out.DeleteRetentionPolicy + *out = new(DeleteRetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.LastAccessTimeEnabled != nil { + in, out := &in.LastAccessTimeEnabled, &out.LastAccessTimeEnabled + *out = new(bool) + **out = **in + } + if in.RestorePolicy != nil { + in, out := &in.RestorePolicy, &out.RestorePolicy + *out = new(RestorePolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.VersioningEnabled != nil { + in, out := &in.VersioningEnabled, &out.VersioningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobPropertiesObservation. +func (in *BlobPropertiesObservation) DeepCopy() *BlobPropertiesObservation { + if in == nil { + return nil + } + out := new(BlobPropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlobPropertiesParameters) DeepCopyInto(out *BlobPropertiesParameters) { + *out = *in + if in.ChangeFeedEnabled != nil { + in, out := &in.ChangeFeedEnabled, &out.ChangeFeedEnabled + *out = new(bool) + **out = **in + } + if in.ChangeFeedRetentionInDays != nil { + in, out := &in.ChangeFeedRetentionInDays, &out.ChangeFeedRetentionInDays + *out = new(float64) + **out = **in + } + if in.ContainerDeleteRetentionPolicy != nil { + in, out := &in.ContainerDeleteRetentionPolicy, &out.ContainerDeleteRetentionPolicy + *out = new(ContainerDeleteRetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]CorsRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultServiceVersion != nil { + in, out := &in.DefaultServiceVersion, &out.DefaultServiceVersion + *out = new(string) + **out = **in + } + if in.DeleteRetentionPolicy != nil { + in, out := &in.DeleteRetentionPolicy, &out.DeleteRetentionPolicy + *out = new(DeleteRetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.LastAccessTimeEnabled != nil { + in, out := &in.LastAccessTimeEnabled, &out.LastAccessTimeEnabled + *out = new(bool) + **out = **in + } + if in.RestorePolicy != nil { + in, out := &in.RestorePolicy, &out.RestorePolicy + *out = new(RestorePolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.VersioningEnabled != nil { + in, out := &in.VersioningEnabled, &out.VersioningEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlobPropertiesParameters. +func (in *BlobPropertiesParameters) DeepCopy() *BlobPropertiesParameters { + if in == nil { + return nil + } + out := new(BlobPropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDeleteRetentionPolicyInitParameters) DeepCopyInto(out *ContainerDeleteRetentionPolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDeleteRetentionPolicyInitParameters. +func (in *ContainerDeleteRetentionPolicyInitParameters) DeepCopy() *ContainerDeleteRetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(ContainerDeleteRetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDeleteRetentionPolicyObservation) DeepCopyInto(out *ContainerDeleteRetentionPolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDeleteRetentionPolicyObservation. +func (in *ContainerDeleteRetentionPolicyObservation) DeepCopy() *ContainerDeleteRetentionPolicyObservation { + if in == nil { + return nil + } + out := new(ContainerDeleteRetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDeleteRetentionPolicyParameters) DeepCopyInto(out *ContainerDeleteRetentionPolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDeleteRetentionPolicyParameters. +func (in *ContainerDeleteRetentionPolicyParameters) DeepCopy() *ContainerDeleteRetentionPolicyParameters { + if in == nil { + return nil + } + out := new(ContainerDeleteRetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleInitParameters) DeepCopyInto(out *CorsRuleInitParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleInitParameters. +func (in *CorsRuleInitParameters) DeepCopy() *CorsRuleInitParameters { + if in == nil { + return nil + } + out := new(CorsRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleObservation) DeepCopyInto(out *CorsRuleObservation) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleObservation. +func (in *CorsRuleObservation) DeepCopy() *CorsRuleObservation { + if in == nil { + return nil + } + out := new(CorsRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsRuleParameters) DeepCopyInto(out *CorsRuleParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsRuleParameters. +func (in *CorsRuleParameters) DeepCopy() *CorsRuleParameters { + if in == nil { + return nil + } + out := new(CorsRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainInitParameters) DeepCopyInto(out *CustomDomainInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseSubdomain != nil { + in, out := &in.UseSubdomain, &out.UseSubdomain + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainInitParameters. +func (in *CustomDomainInitParameters) DeepCopy() *CustomDomainInitParameters { + if in == nil { + return nil + } + out := new(CustomDomainInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainObservation) DeepCopyInto(out *CustomDomainObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseSubdomain != nil { + in, out := &in.UseSubdomain, &out.UseSubdomain + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainObservation. +func (in *CustomDomainObservation) DeepCopy() *CustomDomainObservation { + if in == nil { + return nil + } + out := new(CustomDomainObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDomainParameters) DeepCopyInto(out *CustomDomainParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.UseSubdomain != nil { + in, out := &in.UseSubdomain, &out.UseSubdomain + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDomainParameters. +func (in *CustomDomainParameters) DeepCopy() *CustomDomainParameters { + if in == nil { + return nil + } + out := new(CustomDomainParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyInitParameters) DeepCopyInto(out *CustomerManagedKeyInitParameters) { + *out = *in + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyInitParameters. +func (in *CustomerManagedKeyInitParameters) DeepCopy() *CustomerManagedKeyInitParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyObservation) DeepCopyInto(out *CustomerManagedKeyObservation) { + *out = *in + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyObservation. +func (in *CustomerManagedKeyObservation) DeepCopy() *CustomerManagedKeyObservation { + if in == nil { + return nil + } + out := new(CustomerManagedKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyParameters) DeepCopyInto(out *CustomerManagedKeyParameters) { + *out = *in + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyParameters. +func (in *CustomerManagedKeyParameters) DeepCopy() *CustomerManagedKeyParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteRetentionPolicyInitParameters) DeepCopyInto(out *DeleteRetentionPolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteRetentionPolicyInitParameters. +func (in *DeleteRetentionPolicyInitParameters) DeepCopy() *DeleteRetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(DeleteRetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteRetentionPolicyObservation) DeepCopyInto(out *DeleteRetentionPolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteRetentionPolicyObservation. +func (in *DeleteRetentionPolicyObservation) DeepCopy() *DeleteRetentionPolicyObservation { + if in == nil { + return nil + } + out := new(DeleteRetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteRetentionPolicyParameters) DeepCopyInto(out *DeleteRetentionPolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteRetentionPolicyParameters. +func (in *DeleteRetentionPolicyParameters) DeepCopy() *DeleteRetentionPolicyParameters { + if in == nil { + return nil + } + out := new(DeleteRetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterInitParameters) DeepCopyInto(out *FilterInitParameters) { + *out = *in + if in.BlobTypes != nil { + in, out := &in.BlobTypes, &out.BlobTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludePrefixes != nil { + in, out := &in.ExcludePrefixes, &out.ExcludePrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeBlobVersions != nil { + in, out := &in.IncludeBlobVersions, &out.IncludeBlobVersions + *out = new(bool) + **out = **in + } + if in.IncludeDeleted != nil { + in, out := &in.IncludeDeleted, &out.IncludeDeleted + *out = new(bool) + **out = **in + } + if in.IncludeSnapshots != nil { + in, out := &in.IncludeSnapshots, &out.IncludeSnapshots + *out = new(bool) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterInitParameters. +func (in *FilterInitParameters) DeepCopy() *FilterInitParameters { + if in == nil { + return nil + } + out := new(FilterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterObservation) DeepCopyInto(out *FilterObservation) { + *out = *in + if in.BlobTypes != nil { + in, out := &in.BlobTypes, &out.BlobTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludePrefixes != nil { + in, out := &in.ExcludePrefixes, &out.ExcludePrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeBlobVersions != nil { + in, out := &in.IncludeBlobVersions, &out.IncludeBlobVersions + *out = new(bool) + **out = **in + } + if in.IncludeDeleted != nil { + in, out := &in.IncludeDeleted, &out.IncludeDeleted + *out = new(bool) + **out = **in + } + if in.IncludeSnapshots != nil { + in, out := &in.IncludeSnapshots, &out.IncludeSnapshots + *out = new(bool) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterObservation. +func (in *FilterObservation) DeepCopy() *FilterObservation { + if in == nil { + return nil + } + out := new(FilterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilterParameters) DeepCopyInto(out *FilterParameters) { + *out = *in + if in.BlobTypes != nil { + in, out := &in.BlobTypes, &out.BlobTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExcludePrefixes != nil { + in, out := &in.ExcludePrefixes, &out.ExcludePrefixes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IncludeBlobVersions != nil { + in, out := &in.IncludeBlobVersions, &out.IncludeBlobVersions + *out = new(bool) + **out = **in + } + if in.IncludeDeleted != nil { + in, out := &in.IncludeDeleted, &out.IncludeDeleted + *out = new(bool) + **out = **in + } + if in.IncludeSnapshots != nil { + in, out := &in.IncludeSnapshots, &out.IncludeSnapshots + *out = new(bool) + **out = **in + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterParameters. +func (in *FilterParameters) DeepCopy() *FilterParameters { + if in == nil { + return nil + } + out := new(FilterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FiltersInitParameters) DeepCopyInto(out *FiltersInitParameters) { + *out = *in + if in.BlobTypes != nil { + in, out := &in.BlobTypes, &out.BlobTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchBlobIndexTag != nil { + in, out := &in.MatchBlobIndexTag, &out.MatchBlobIndexTag + *out = make([]MatchBlobIndexTagInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FiltersInitParameters. +func (in *FiltersInitParameters) DeepCopy() *FiltersInitParameters { + if in == nil { + return nil + } + out := new(FiltersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FiltersObservation) DeepCopyInto(out *FiltersObservation) { + *out = *in + if in.BlobTypes != nil { + in, out := &in.BlobTypes, &out.BlobTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchBlobIndexTag != nil { + in, out := &in.MatchBlobIndexTag, &out.MatchBlobIndexTag + *out = make([]MatchBlobIndexTagObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FiltersObservation. +func (in *FiltersObservation) DeepCopy() *FiltersObservation { + if in == nil { + return nil + } + out := new(FiltersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FiltersParameters) DeepCopyInto(out *FiltersParameters) { + *out = *in + if in.BlobTypes != nil { + in, out := &in.BlobTypes, &out.BlobTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MatchBlobIndexTag != nil { + in, out := &in.MatchBlobIndexTag, &out.MatchBlobIndexTag + *out = make([]MatchBlobIndexTagParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrefixMatch != nil { + in, out := &in.PrefixMatch, &out.PrefixMatch + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FiltersParameters. +func (in *FiltersParameters) DeepCopy() *FiltersParameters { + if in == nil { + return nil + } + out := new(FiltersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourMetricsInitParameters) DeepCopyInto(out *HourMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IncludeApis != nil { + in, out := &in.IncludeApis, &out.IncludeApis + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourMetricsInitParameters. +func (in *HourMetricsInitParameters) DeepCopy() *HourMetricsInitParameters { + if in == nil { + return nil + } + out := new(HourMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourMetricsObservation) DeepCopyInto(out *HourMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IncludeApis != nil { + in, out := &in.IncludeApis, &out.IncludeApis + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourMetricsObservation. +func (in *HourMetricsObservation) DeepCopy() *HourMetricsObservation { + if in == nil { + return nil + } + out := new(HourMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HourMetricsParameters) DeepCopyInto(out *HourMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IncludeApis != nil { + in, out := &in.IncludeApis, &out.IncludeApis + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HourMetricsParameters. +func (in *HourMetricsParameters) DeepCopy() *HourMetricsParameters { + if in == nil { + return nil + } + out := new(HourMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImmutabilityPolicyInitParameters) DeepCopyInto(out *ImmutabilityPolicyInitParameters) { + *out = *in + if in.AllowProtectedAppendWrites != nil { + in, out := &in.AllowProtectedAppendWrites, &out.AllowProtectedAppendWrites + *out = new(bool) + **out = **in + } + if in.PeriodSinceCreationInDays != nil { + in, out := &in.PeriodSinceCreationInDays, &out.PeriodSinceCreationInDays + *out = new(float64) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImmutabilityPolicyInitParameters. +func (in *ImmutabilityPolicyInitParameters) DeepCopy() *ImmutabilityPolicyInitParameters { + if in == nil { + return nil + } + out := new(ImmutabilityPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImmutabilityPolicyObservation) DeepCopyInto(out *ImmutabilityPolicyObservation) { + *out = *in + if in.AllowProtectedAppendWrites != nil { + in, out := &in.AllowProtectedAppendWrites, &out.AllowProtectedAppendWrites + *out = new(bool) + **out = **in + } + if in.PeriodSinceCreationInDays != nil { + in, out := &in.PeriodSinceCreationInDays, &out.PeriodSinceCreationInDays + *out = new(float64) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImmutabilityPolicyObservation. +func (in *ImmutabilityPolicyObservation) DeepCopy() *ImmutabilityPolicyObservation { + if in == nil { + return nil + } + out := new(ImmutabilityPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImmutabilityPolicyParameters) DeepCopyInto(out *ImmutabilityPolicyParameters) { + *out = *in + if in.AllowProtectedAppendWrites != nil { + in, out := &in.AllowProtectedAppendWrites, &out.AllowProtectedAppendWrites + *out = new(bool) + **out = **in + } + if in.PeriodSinceCreationInDays != nil { + in, out := &in.PeriodSinceCreationInDays, &out.PeriodSinceCreationInDays + *out = new(float64) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImmutabilityPolicyParameters. +func (in *ImmutabilityPolicyParameters) DeepCopy() *ImmutabilityPolicyParameters { + if in == nil { + return nil + } + out := new(ImmutabilityPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingInitParameters) DeepCopyInto(out *LoggingInitParameters) { + *out = *in + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Write != nil { + in, out := &in.Write, &out.Write + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingInitParameters. +func (in *LoggingInitParameters) DeepCopy() *LoggingInitParameters { + if in == nil { + return nil + } + out := new(LoggingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingObservation) DeepCopyInto(out *LoggingObservation) { + *out = *in + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Write != nil { + in, out := &in.Write, &out.Write + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingObservation. +func (in *LoggingObservation) DeepCopy() *LoggingObservation { + if in == nil { + return nil + } + out := new(LoggingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingParameters) DeepCopyInto(out *LoggingParameters) { + *out = *in + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Write != nil { + in, out := &in.Write, &out.Write + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingParameters. +func (in *LoggingParameters) DeepCopy() *LoggingParameters { + if in == nil { + return nil + } + out := new(LoggingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementPolicy) DeepCopyInto(out *ManagementPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementPolicy. +func (in *ManagementPolicy) DeepCopy() *ManagementPolicy { + if in == nil { + return nil + } + out := new(ManagementPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagementPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementPolicyInitParameters) DeepCopyInto(out *ManagementPolicyInitParameters) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementPolicyInitParameters. +func (in *ManagementPolicyInitParameters) DeepCopy() *ManagementPolicyInitParameters { + if in == nil { + return nil + } + out := new(ManagementPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementPolicyList) DeepCopyInto(out *ManagementPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagementPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementPolicyList. +func (in *ManagementPolicyList) DeepCopy() *ManagementPolicyList { + if in == nil { + return nil + } + out := new(ManagementPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagementPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementPolicyObservation) DeepCopyInto(out *ManagementPolicyObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementPolicyObservation. +func (in *ManagementPolicyObservation) DeepCopy() *ManagementPolicyObservation { + if in == nil { + return nil + } + out := new(ManagementPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementPolicyParameters) DeepCopyInto(out *ManagementPolicyParameters) { + *out = *in + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = make([]RuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountID != nil { + in, out := &in.StorageAccountID, &out.StorageAccountID + *out = new(string) + **out = **in + } + if in.StorageAccountIDRef != nil { + in, out := &in.StorageAccountIDRef, &out.StorageAccountIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountIDSelector != nil { + in, out := &in.StorageAccountIDSelector, &out.StorageAccountIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementPolicyParameters. +func (in *ManagementPolicyParameters) DeepCopy() *ManagementPolicyParameters { + if in == nil { + return nil + } + out := new(ManagementPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementPolicySpec) DeepCopyInto(out *ManagementPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementPolicySpec. +func (in *ManagementPolicySpec) DeepCopy() *ManagementPolicySpec { + if in == nil { + return nil + } + out := new(ManagementPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagementPolicyStatus) DeepCopyInto(out *ManagementPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementPolicyStatus. +func (in *ManagementPolicyStatus) DeepCopy() *ManagementPolicyStatus { + if in == nil { + return nil + } + out := new(ManagementPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchBlobIndexTagInitParameters) DeepCopyInto(out *MatchBlobIndexTagInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchBlobIndexTagInitParameters. +func (in *MatchBlobIndexTagInitParameters) DeepCopy() *MatchBlobIndexTagInitParameters { + if in == nil { + return nil + } + out := new(MatchBlobIndexTagInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchBlobIndexTagObservation) DeepCopyInto(out *MatchBlobIndexTagObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchBlobIndexTagObservation. +func (in *MatchBlobIndexTagObservation) DeepCopy() *MatchBlobIndexTagObservation { + if in == nil { + return nil + } + out := new(MatchBlobIndexTagObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchBlobIndexTagParameters) DeepCopyInto(out *MatchBlobIndexTagParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Operation != nil { + in, out := &in.Operation, &out.Operation + *out = new(string) + **out = **in + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchBlobIndexTagParameters. +func (in *MatchBlobIndexTagParameters) DeepCopy() *MatchBlobIndexTagParameters { + if in == nil { + return nil + } + out := new(MatchBlobIndexTagParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MinuteMetricsInitParameters) DeepCopyInto(out *MinuteMetricsInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IncludeApis != nil { + in, out := &in.IncludeApis, &out.IncludeApis + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MinuteMetricsInitParameters. +func (in *MinuteMetricsInitParameters) DeepCopy() *MinuteMetricsInitParameters { + if in == nil { + return nil + } + out := new(MinuteMetricsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MinuteMetricsObservation) DeepCopyInto(out *MinuteMetricsObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IncludeApis != nil { + in, out := &in.IncludeApis, &out.IncludeApis + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MinuteMetricsObservation. +func (in *MinuteMetricsObservation) DeepCopy() *MinuteMetricsObservation { + if in == nil { + return nil + } + out := new(MinuteMetricsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MinuteMetricsParameters) DeepCopyInto(out *MinuteMetricsParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.IncludeApis != nil { + in, out := &in.IncludeApis, &out.IncludeApis + *out = new(bool) + **out = **in + } + if in.RetentionPolicyDays != nil { + in, out := &in.RetentionPolicyDays, &out.RetentionPolicyDays + *out = new(float64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MinuteMetricsParameters. +func (in *MinuteMetricsParameters) DeepCopy() *MinuteMetricsParameters { + if in == nil { + return nil + } + out := new(MinuteMetricsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesInitParameters) DeepCopyInto(out *NetworkRulesInitParameters) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrivateLinkAccess != nil { + in, out := &in.PrivateLinkAccess, &out.PrivateLinkAccess + *out = make([]PrivateLinkAccessInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualNetworkSubnetIds != nil { + in, out := &in.VirtualNetworkSubnetIds, &out.VirtualNetworkSubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesInitParameters. +func (in *NetworkRulesInitParameters) DeepCopy() *NetworkRulesInitParameters { + if in == nil { + return nil + } + out := new(NetworkRulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesObservation) DeepCopyInto(out *NetworkRulesObservation) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrivateLinkAccess != nil { + in, out := &in.PrivateLinkAccess, &out.PrivateLinkAccess + *out = make([]PrivateLinkAccessObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualNetworkSubnetIds != nil { + in, out := &in.VirtualNetworkSubnetIds, &out.VirtualNetworkSubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesObservation. +func (in *NetworkRulesObservation) DeepCopy() *NetworkRulesObservation { + if in == nil { + return nil + } + out := new(NetworkRulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRulesParameters) DeepCopyInto(out *NetworkRulesParameters) { + *out = *in + if in.Bypass != nil { + in, out := &in.Bypass, &out.Bypass + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultAction != nil { + in, out := &in.DefaultAction, &out.DefaultAction + *out = new(string) + **out = **in + } + if in.IPRules != nil { + in, out := &in.IPRules, &out.IPRules + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrivateLinkAccess != nil { + in, out := &in.PrivateLinkAccess, &out.PrivateLinkAccess + *out = make([]PrivateLinkAccessParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualNetworkSubnetIds != nil { + in, out := &in.VirtualNetworkSubnetIds, &out.VirtualNetworkSubnetIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRulesParameters. +func (in *NetworkRulesParameters) DeepCopy() *NetworkRulesParameters { + if in == nil { + return nil + } + out := new(NetworkRulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionScopeInitParameters) DeepCopyInto(out *PermissionScopeInitParameters) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(PermissionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceName != nil { + in, out := &in.ResourceName, &out.ResourceName + *out = new(string) + **out = **in + } + if in.ResourceNameRef != nil { + in, out := &in.ResourceNameRef, &out.ResourceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceNameSelector != nil { + in, out := &in.ResourceNameSelector, &out.ResourceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionScopeInitParameters. +func (in *PermissionScopeInitParameters) DeepCopy() *PermissionScopeInitParameters { + if in == nil { + return nil + } + out := new(PermissionScopeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionScopeObservation) DeepCopyInto(out *PermissionScopeObservation) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(PermissionsObservation) + (*in).DeepCopyInto(*out) + } + if in.ResourceName != nil { + in, out := &in.ResourceName, &out.ResourceName + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionScopeObservation. +func (in *PermissionScopeObservation) DeepCopy() *PermissionScopeObservation { + if in == nil { + return nil + } + out := new(PermissionScopeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionScopeParameters) DeepCopyInto(out *PermissionScopeParameters) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(PermissionsParameters) + (*in).DeepCopyInto(*out) + } + if in.ResourceName != nil { + in, out := &in.ResourceName, &out.ResourceName + *out = new(string) + **out = **in + } + if in.ResourceNameRef != nil { + in, out := &in.ResourceNameRef, &out.ResourceNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceNameSelector != nil { + in, out := &in.ResourceNameSelector, &out.ResourceNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionScopeParameters. +func (in *PermissionScopeParameters) DeepCopy() *PermissionScopeParameters { + if in == nil { + return nil + } + out := new(PermissionScopeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsInitParameters) DeepCopyInto(out *PermissionsInitParameters) { + *out = *in + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.List != nil { + in, out := &in.List, &out.List + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } + if in.Write != nil { + in, out := &in.Write, &out.Write + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsInitParameters. +func (in *PermissionsInitParameters) DeepCopy() *PermissionsInitParameters { + if in == nil { + return nil + } + out := new(PermissionsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsObservation) DeepCopyInto(out *PermissionsObservation) { + *out = *in + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.List != nil { + in, out := &in.List, &out.List + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } + if in.Write != nil { + in, out := &in.Write, &out.Write + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsObservation. +func (in *PermissionsObservation) DeepCopy() *PermissionsObservation { + if in == nil { + return nil + } + out := new(PermissionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionsParameters) DeepCopyInto(out *PermissionsParameters) { + *out = *in + if in.Create != nil { + in, out := &in.Create, &out.Create + *out = new(bool) + **out = **in + } + if in.Delete != nil { + in, out := &in.Delete, &out.Delete + *out = new(bool) + **out = **in + } + if in.List != nil { + in, out := &in.List, &out.List + *out = new(bool) + **out = **in + } + if in.Read != nil { + in, out := &in.Read, &out.Read + *out = new(bool) + **out = **in + } + if in.Write != nil { + in, out := &in.Write, &out.Write + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionsParameters. +func (in *PermissionsParameters) DeepCopy() *PermissionsParameters { + if in == nil { + return nil + } + out := new(PermissionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkAccessInitParameters) DeepCopyInto(out *PrivateLinkAccessInitParameters) { + *out = *in + if in.EndpointResourceID != nil { + in, out := &in.EndpointResourceID, &out.EndpointResourceID + *out = new(string) + **out = **in + } + if in.EndpointTenantID != nil { + in, out := &in.EndpointTenantID, &out.EndpointTenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkAccessInitParameters. +func (in *PrivateLinkAccessInitParameters) DeepCopy() *PrivateLinkAccessInitParameters { + if in == nil { + return nil + } + out := new(PrivateLinkAccessInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkAccessObservation) DeepCopyInto(out *PrivateLinkAccessObservation) { + *out = *in + if in.EndpointResourceID != nil { + in, out := &in.EndpointResourceID, &out.EndpointResourceID + *out = new(string) + **out = **in + } + if in.EndpointTenantID != nil { + in, out := &in.EndpointTenantID, &out.EndpointTenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkAccessObservation. +func (in *PrivateLinkAccessObservation) DeepCopy() *PrivateLinkAccessObservation { + if in == nil { + return nil + } + out := new(PrivateLinkAccessObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateLinkAccessParameters) DeepCopyInto(out *PrivateLinkAccessParameters) { + *out = *in + if in.EndpointResourceID != nil { + in, out := &in.EndpointResourceID, &out.EndpointResourceID + *out = new(string) + **out = **in + } + if in.EndpointTenantID != nil { + in, out := &in.EndpointTenantID, &out.EndpointTenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkAccessParameters. +func (in *PrivateLinkAccessParameters) DeepCopy() *PrivateLinkAccessParameters { + if in == nil { + return nil + } + out := new(PrivateLinkAccessParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuePropertiesCorsRuleInitParameters) DeepCopyInto(out *QueuePropertiesCorsRuleInitParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuePropertiesCorsRuleInitParameters. +func (in *QueuePropertiesCorsRuleInitParameters) DeepCopy() *QueuePropertiesCorsRuleInitParameters { + if in == nil { + return nil + } + out := new(QueuePropertiesCorsRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuePropertiesCorsRuleObservation) DeepCopyInto(out *QueuePropertiesCorsRuleObservation) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuePropertiesCorsRuleObservation. +func (in *QueuePropertiesCorsRuleObservation) DeepCopy() *QueuePropertiesCorsRuleObservation { + if in == nil { + return nil + } + out := new(QueuePropertiesCorsRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuePropertiesCorsRuleParameters) DeepCopyInto(out *QueuePropertiesCorsRuleParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuePropertiesCorsRuleParameters. +func (in *QueuePropertiesCorsRuleParameters) DeepCopy() *QueuePropertiesCorsRuleParameters { + if in == nil { + return nil + } + out := new(QueuePropertiesCorsRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuePropertiesInitParameters) DeepCopyInto(out *QueuePropertiesInitParameters) { + *out = *in + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]QueuePropertiesCorsRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HourMetrics != nil { + in, out := &in.HourMetrics, &out.HourMetrics + *out = new(HourMetricsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MinuteMetrics != nil { + in, out := &in.MinuteMetrics, &out.MinuteMetrics + *out = new(MinuteMetricsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuePropertiesInitParameters. +func (in *QueuePropertiesInitParameters) DeepCopy() *QueuePropertiesInitParameters { + if in == nil { + return nil + } + out := new(QueuePropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuePropertiesObservation) DeepCopyInto(out *QueuePropertiesObservation) { + *out = *in + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]QueuePropertiesCorsRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HourMetrics != nil { + in, out := &in.HourMetrics, &out.HourMetrics + *out = new(HourMetricsObservation) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingObservation) + (*in).DeepCopyInto(*out) + } + if in.MinuteMetrics != nil { + in, out := &in.MinuteMetrics, &out.MinuteMetrics + *out = new(MinuteMetricsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuePropertiesObservation. +func (in *QueuePropertiesObservation) DeepCopy() *QueuePropertiesObservation { + if in == nil { + return nil + } + out := new(QueuePropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuePropertiesParameters) DeepCopyInto(out *QueuePropertiesParameters) { + *out = *in + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]QueuePropertiesCorsRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HourMetrics != nil { + in, out := &in.HourMetrics, &out.HourMetrics + *out = new(HourMetricsParameters) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(LoggingParameters) + (*in).DeepCopyInto(*out) + } + if in.MinuteMetrics != nil { + in, out := &in.MinuteMetrics, &out.MinuteMetrics + *out = new(MinuteMetricsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuePropertiesParameters. +func (in *QueuePropertiesParameters) DeepCopy() *QueuePropertiesParameters { + if in == nil { + return nil + } + out := new(QueuePropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestorePolicyInitParameters) DeepCopyInto(out *RestorePolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestorePolicyInitParameters. +func (in *RestorePolicyInitParameters) DeepCopy() *RestorePolicyInitParameters { + if in == nil { + return nil + } + out := new(RestorePolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestorePolicyObservation) DeepCopyInto(out *RestorePolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestorePolicyObservation. +func (in *RestorePolicyObservation) DeepCopy() *RestorePolicyObservation { + if in == nil { + return nil + } + out := new(RestorePolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestorePolicyParameters) DeepCopyInto(out *RestorePolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestorePolicyParameters. +func (in *RestorePolicyParameters) DeepCopy() *RestorePolicyParameters { + if in == nil { + return nil + } + out := new(RestorePolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyInitParameters) DeepCopyInto(out *RetentionPolicyInitParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyInitParameters. +func (in *RetentionPolicyInitParameters) DeepCopy() *RetentionPolicyInitParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyObservation) DeepCopyInto(out *RetentionPolicyObservation) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyObservation. +func (in *RetentionPolicyObservation) DeepCopy() *RetentionPolicyObservation { + if in == nil { + return nil + } + out := new(RetentionPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicyParameters) DeepCopyInto(out *RetentionPolicyParameters) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicyParameters. +func (in *RetentionPolicyParameters) DeepCopy() *RetentionPolicyParameters { + if in == nil { + return nil + } + out := new(RetentionPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingInitParameters) DeepCopyInto(out *RoutingInitParameters) { + *out = *in + if in.Choice != nil { + in, out := &in.Choice, &out.Choice + *out = new(string) + **out = **in + } + if in.PublishInternetEndpoints != nil { + in, out := &in.PublishInternetEndpoints, &out.PublishInternetEndpoints + *out = new(bool) + **out = **in + } + if in.PublishMicrosoftEndpoints != nil { + in, out := &in.PublishMicrosoftEndpoints, &out.PublishMicrosoftEndpoints + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingInitParameters. +func (in *RoutingInitParameters) DeepCopy() *RoutingInitParameters { + if in == nil { + return nil + } + out := new(RoutingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingObservation) DeepCopyInto(out *RoutingObservation) { + *out = *in + if in.Choice != nil { + in, out := &in.Choice, &out.Choice + *out = new(string) + **out = **in + } + if in.PublishInternetEndpoints != nil { + in, out := &in.PublishInternetEndpoints, &out.PublishInternetEndpoints + *out = new(bool) + **out = **in + } + if in.PublishMicrosoftEndpoints != nil { + in, out := &in.PublishMicrosoftEndpoints, &out.PublishMicrosoftEndpoints + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingObservation. +func (in *RoutingObservation) DeepCopy() *RoutingObservation { + if in == nil { + return nil + } + out := new(RoutingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingParameters) DeepCopyInto(out *RoutingParameters) { + *out = *in + if in.Choice != nil { + in, out := &in.Choice, &out.Choice + *out = new(string) + **out = **in + } + if in.PublishInternetEndpoints != nil { + in, out := &in.PublishInternetEndpoints, &out.PublishInternetEndpoints + *out = new(bool) + **out = **in + } + if in.PublishMicrosoftEndpoints != nil { + in, out := &in.PublishMicrosoftEndpoints, &out.PublishMicrosoftEndpoints + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingParameters. +func (in *RoutingParameters) DeepCopy() *RoutingParameters { + if in == nil { + return nil + } + out := new(RoutingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleInitParameters) DeepCopyInto(out *RuleInitParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new(FiltersInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleInitParameters. +func (in *RuleInitParameters) DeepCopy() *RuleInitParameters { + if in == nil { + return nil + } + out := new(RuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleObservation) DeepCopyInto(out *RuleObservation) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsObservation) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new(FiltersObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleObservation. +func (in *RuleObservation) DeepCopy() *RuleObservation { + if in == nil { + return nil + } + out := new(RuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleParameters) DeepCopyInto(out *RuleParameters) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = new(ActionsParameters) + (*in).DeepCopyInto(*out) + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new(FiltersParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleParameters. +func (in *RuleParameters) DeepCopy() *RuleParameters { + if in == nil { + return nil + } + out := new(RuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesInitParameters) DeepCopyInto(out *RulesInitParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.SchemaFields != nil { + in, out := &in.SchemaFields, &out.SchemaFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StorageContainerNameRef != nil { + in, out := &in.StorageContainerNameRef, &out.StorageContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerNameSelector != nil { + in, out := &in.StorageContainerNameSelector, &out.StorageContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesInitParameters. +func (in *RulesInitParameters) DeepCopy() *RulesInitParameters { + if in == nil { + return nil + } + out := new(RulesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesObservation) DeepCopyInto(out *RulesObservation) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterObservation) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.SchemaFields != nil { + in, out := &in.SchemaFields, &out.SchemaFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesObservation. +func (in *RulesObservation) DeepCopy() *RulesObservation { + if in == nil { + return nil + } + out := new(RulesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RulesParameters) DeepCopyInto(out *RulesParameters) { + *out = *in + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(FilterParameters) + (*in).DeepCopyInto(*out) + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.SchemaFields != nil { + in, out := &in.SchemaFields, &out.SchemaFields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StorageContainerNameRef != nil { + in, out := &in.StorageContainerNameRef, &out.StorageContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerNameSelector != nil { + in, out := &in.StorageContainerNameSelector, &out.StorageContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesParameters. +func (in *RulesParameters) DeepCopy() *RulesParameters { + if in == nil { + return nil + } + out := new(RulesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SASPolicyInitParameters) DeepCopyInto(out *SASPolicyInitParameters) { + *out = *in + if in.ExpirationAction != nil { + in, out := &in.ExpirationAction, &out.ExpirationAction + *out = new(string) + **out = **in + } + if in.ExpirationPeriod != nil { + in, out := &in.ExpirationPeriod, &out.ExpirationPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SASPolicyInitParameters. +func (in *SASPolicyInitParameters) DeepCopy() *SASPolicyInitParameters { + if in == nil { + return nil + } + out := new(SASPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SASPolicyObservation) DeepCopyInto(out *SASPolicyObservation) { + *out = *in + if in.ExpirationAction != nil { + in, out := &in.ExpirationAction, &out.ExpirationAction + *out = new(string) + **out = **in + } + if in.ExpirationPeriod != nil { + in, out := &in.ExpirationPeriod, &out.ExpirationPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SASPolicyObservation. +func (in *SASPolicyObservation) DeepCopy() *SASPolicyObservation { + if in == nil { + return nil + } + out := new(SASPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SASPolicyParameters) DeepCopyInto(out *SASPolicyParameters) { + *out = *in + if in.ExpirationAction != nil { + in, out := &in.ExpirationAction, &out.ExpirationAction + *out = new(string) + **out = **in + } + if in.ExpirationPeriod != nil { + in, out := &in.ExpirationPeriod, &out.ExpirationPeriod + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SASPolicyParameters. +func (in *SASPolicyParameters) DeepCopy() *SASPolicyParameters { + if in == nil { + return nil + } + out := new(SASPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMBInitParameters) DeepCopyInto(out *SMBInitParameters) { + *out = *in + if in.AuthenticationTypes != nil { + in, out := &in.AuthenticationTypes, &out.AuthenticationTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ChannelEncryptionType != nil { + in, out := &in.ChannelEncryptionType, &out.ChannelEncryptionType + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KerberosTicketEncryptionType != nil { + in, out := &in.KerberosTicketEncryptionType, &out.KerberosTicketEncryptionType + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MultichannelEnabled != nil { + in, out := &in.MultichannelEnabled, &out.MultichannelEnabled + *out = new(bool) + **out = **in + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMBInitParameters. +func (in *SMBInitParameters) DeepCopy() *SMBInitParameters { + if in == nil { + return nil + } + out := new(SMBInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMBObservation) DeepCopyInto(out *SMBObservation) { + *out = *in + if in.AuthenticationTypes != nil { + in, out := &in.AuthenticationTypes, &out.AuthenticationTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ChannelEncryptionType != nil { + in, out := &in.ChannelEncryptionType, &out.ChannelEncryptionType + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KerberosTicketEncryptionType != nil { + in, out := &in.KerberosTicketEncryptionType, &out.KerberosTicketEncryptionType + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MultichannelEnabled != nil { + in, out := &in.MultichannelEnabled, &out.MultichannelEnabled + *out = new(bool) + **out = **in + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMBObservation. +func (in *SMBObservation) DeepCopy() *SMBObservation { + if in == nil { + return nil + } + out := new(SMBObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMBParameters) DeepCopyInto(out *SMBParameters) { + *out = *in + if in.AuthenticationTypes != nil { + in, out := &in.AuthenticationTypes, &out.AuthenticationTypes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ChannelEncryptionType != nil { + in, out := &in.ChannelEncryptionType, &out.ChannelEncryptionType + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.KerberosTicketEncryptionType != nil { + in, out := &in.KerberosTicketEncryptionType, &out.KerberosTicketEncryptionType + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MultichannelEnabled != nil { + in, out := &in.MultichannelEnabled, &out.MultichannelEnabled + *out = new(bool) + **out = **in + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMBParameters. +func (in *SMBParameters) DeepCopy() *SMBParameters { + if in == nil { + return nil + } + out := new(SMBParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHAuthorizedKeyInitParameters) DeepCopyInto(out *SSHAuthorizedKeyInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHAuthorizedKeyInitParameters. +func (in *SSHAuthorizedKeyInitParameters) DeepCopy() *SSHAuthorizedKeyInitParameters { + if in == nil { + return nil + } + out := new(SSHAuthorizedKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHAuthorizedKeyObservation) DeepCopyInto(out *SSHAuthorizedKeyObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHAuthorizedKeyObservation. +func (in *SSHAuthorizedKeyObservation) DeepCopy() *SSHAuthorizedKeyObservation { + if in == nil { + return nil + } + out := new(SSHAuthorizedKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHAuthorizedKeyParameters) DeepCopyInto(out *SSHAuthorizedKeyParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHAuthorizedKeyParameters. +func (in *SSHAuthorizedKeyParameters) DeepCopy() *SSHAuthorizedKeyParameters { + if in == nil { + return nil + } + out := new(SSHAuthorizedKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharePropertiesCorsRuleInitParameters) DeepCopyInto(out *SharePropertiesCorsRuleInitParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharePropertiesCorsRuleInitParameters. +func (in *SharePropertiesCorsRuleInitParameters) DeepCopy() *SharePropertiesCorsRuleInitParameters { + if in == nil { + return nil + } + out := new(SharePropertiesCorsRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharePropertiesCorsRuleObservation) DeepCopyInto(out *SharePropertiesCorsRuleObservation) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharePropertiesCorsRuleObservation. +func (in *SharePropertiesCorsRuleObservation) DeepCopy() *SharePropertiesCorsRuleObservation { + if in == nil { + return nil + } + out := new(SharePropertiesCorsRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharePropertiesCorsRuleParameters) DeepCopyInto(out *SharePropertiesCorsRuleParameters) { + *out = *in + if in.AllowedHeaders != nil { + in, out := &in.AllowedHeaders, &out.AllowedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedMethods != nil { + in, out := &in.AllowedMethods, &out.AllowedMethods + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ExposedHeaders != nil { + in, out := &in.ExposedHeaders, &out.ExposedHeaders + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.MaxAgeInSeconds != nil { + in, out := &in.MaxAgeInSeconds, &out.MaxAgeInSeconds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharePropertiesCorsRuleParameters. +func (in *SharePropertiesCorsRuleParameters) DeepCopy() *SharePropertiesCorsRuleParameters { + if in == nil { + return nil + } + out := new(SharePropertiesCorsRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharePropertiesInitParameters) DeepCopyInto(out *SharePropertiesInitParameters) { + *out = *in + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]SharePropertiesCorsRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SMB != nil { + in, out := &in.SMB, &out.SMB + *out = new(SMBInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharePropertiesInitParameters. +func (in *SharePropertiesInitParameters) DeepCopy() *SharePropertiesInitParameters { + if in == nil { + return nil + } + out := new(SharePropertiesInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharePropertiesObservation) DeepCopyInto(out *SharePropertiesObservation) { + *out = *in + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]SharePropertiesCorsRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.SMB != nil { + in, out := &in.SMB, &out.SMB + *out = new(SMBObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharePropertiesObservation. +func (in *SharePropertiesObservation) DeepCopy() *SharePropertiesObservation { + if in == nil { + return nil + } + out := new(SharePropertiesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharePropertiesParameters) DeepCopyInto(out *SharePropertiesParameters) { + *out = *in + if in.CorsRule != nil { + in, out := &in.CorsRule, &out.CorsRule + *out = make([]SharePropertiesCorsRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(RetentionPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.SMB != nil { + in, out := &in.SMB, &out.SMB + *out = new(SMBParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharePropertiesParameters. +func (in *SharePropertiesParameters) DeepCopy() *SharePropertiesParameters { + if in == nil { + return nil + } + out := new(SharePropertiesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotInitParameters) DeepCopyInto(out *SnapshotInitParameters) { + *out = *in + if in.ChangeTierToArchiveAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToArchiveAfterDaysSinceCreation, &out.ChangeTierToArchiveAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.ChangeTierToCoolAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToCoolAfterDaysSinceCreation, &out.ChangeTierToCoolAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceCreationGreaterThan, &out.DeleteAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotInitParameters. +func (in *SnapshotInitParameters) DeepCopy() *SnapshotInitParameters { + if in == nil { + return nil + } + out := new(SnapshotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotObservation) DeepCopyInto(out *SnapshotObservation) { + *out = *in + if in.ChangeTierToArchiveAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToArchiveAfterDaysSinceCreation, &out.ChangeTierToArchiveAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.ChangeTierToCoolAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToCoolAfterDaysSinceCreation, &out.ChangeTierToCoolAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceCreationGreaterThan, &out.DeleteAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotObservation. +func (in *SnapshotObservation) DeepCopy() *SnapshotObservation { + if in == nil { + return nil + } + out := new(SnapshotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotParameters) DeepCopyInto(out *SnapshotParameters) { + *out = *in + if in.ChangeTierToArchiveAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToArchiveAfterDaysSinceCreation, &out.ChangeTierToArchiveAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.ChangeTierToCoolAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToCoolAfterDaysSinceCreation, &out.ChangeTierToCoolAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.DeleteAfterDaysSinceCreationGreaterThan, &out.DeleteAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotParameters. +func (in *SnapshotParameters) DeepCopy() *SnapshotParameters { + if in == nil { + return nil + } + out := new(SnapshotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebsiteInitParameters) DeepCopyInto(out *StaticWebsiteInitParameters) { + *out = *in + if in.Error404Document != nil { + in, out := &in.Error404Document, &out.Error404Document + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebsiteInitParameters. +func (in *StaticWebsiteInitParameters) DeepCopy() *StaticWebsiteInitParameters { + if in == nil { + return nil + } + out := new(StaticWebsiteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebsiteObservation) DeepCopyInto(out *StaticWebsiteObservation) { + *out = *in + if in.Error404Document != nil { + in, out := &in.Error404Document, &out.Error404Document + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebsiteObservation. +func (in *StaticWebsiteObservation) DeepCopy() *StaticWebsiteObservation { + if in == nil { + return nil + } + out := new(StaticWebsiteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticWebsiteParameters) DeepCopyInto(out *StaticWebsiteParameters) { + *out = *in + if in.Error404Document != nil { + in, out := &in.Error404Document, &out.Error404Document + *out = new(string) + **out = **in + } + if in.IndexDocument != nil { + in, out := &in.IndexDocument, &out.IndexDocument + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticWebsiteParameters. +func (in *StaticWebsiteParameters) DeepCopy() *StaticWebsiteParameters { + if in == nil { + return nil + } + out := new(StaticWebsiteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionInitParameters) DeepCopyInto(out *VersionInitParameters) { + *out = *in + if in.ChangeTierToArchiveAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToArchiveAfterDaysSinceCreation, &out.ChangeTierToArchiveAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.ChangeTierToCoolAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToCoolAfterDaysSinceCreation, &out.ChangeTierToCoolAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceCreation != nil { + in, out := &in.DeleteAfterDaysSinceCreation, &out.DeleteAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionInitParameters. +func (in *VersionInitParameters) DeepCopy() *VersionInitParameters { + if in == nil { + return nil + } + out := new(VersionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionObservation) DeepCopyInto(out *VersionObservation) { + *out = *in + if in.ChangeTierToArchiveAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToArchiveAfterDaysSinceCreation, &out.ChangeTierToArchiveAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.ChangeTierToCoolAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToCoolAfterDaysSinceCreation, &out.ChangeTierToCoolAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceCreation != nil { + in, out := &in.DeleteAfterDaysSinceCreation, &out.DeleteAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionObservation. +func (in *VersionObservation) DeepCopy() *VersionObservation { + if in == nil { + return nil + } + out := new(VersionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionParameters) DeepCopyInto(out *VersionParameters) { + *out = *in + if in.ChangeTierToArchiveAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToArchiveAfterDaysSinceCreation, &out.ChangeTierToArchiveAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.ChangeTierToCoolAfterDaysSinceCreation != nil { + in, out := &in.ChangeTierToCoolAfterDaysSinceCreation, &out.ChangeTierToCoolAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.DeleteAfterDaysSinceCreation != nil { + in, out := &in.DeleteAfterDaysSinceCreation, &out.DeleteAfterDaysSinceCreation + *out = new(float64) + **out = **in + } + if in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan != nil { + in, out := &in.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan, &out.TierToArchiveAfterDaysSinceLastTierChangeGreaterThan + *out = new(float64) + **out = **in + } + if in.TierToColdAfterDaysSinceCreationGreaterThan != nil { + in, out := &in.TierToColdAfterDaysSinceCreationGreaterThan, &out.TierToColdAfterDaysSinceCreationGreaterThan + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionParameters. +func (in *VersionParameters) DeepCopy() *VersionParameters { + if in == nil { + return nil + } + out := new(VersionParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/storage/v1beta2/zz_generated.managed.go b/apis/storage/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..58a852228 --- /dev/null +++ b/apis/storage/v1beta2/zz_generated.managed.go @@ -0,0 +1,248 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Account. +func (mg *Account) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Account. +func (mg *Account) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this AccountLocalUser. +func (mg *AccountLocalUser) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AccountLocalUser. +func (mg *AccountLocalUser) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AccountLocalUser. +func (mg *AccountLocalUser) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AccountLocalUser. +func (mg *AccountLocalUser) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AccountLocalUser. +func (mg *AccountLocalUser) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AccountLocalUser. +func (mg *AccountLocalUser) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AccountLocalUser. +func (mg *AccountLocalUser) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AccountLocalUser. +func (mg *AccountLocalUser) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AccountLocalUser. +func (mg *AccountLocalUser) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AccountLocalUser. +func (mg *AccountLocalUser) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AccountLocalUser. +func (mg *AccountLocalUser) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AccountLocalUser. +func (mg *AccountLocalUser) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ManagementPolicy. +func (mg *ManagementPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ManagementPolicy. +func (mg *ManagementPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ManagementPolicy. +func (mg *ManagementPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ManagementPolicy. +func (mg *ManagementPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ManagementPolicy. +func (mg *ManagementPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ManagementPolicy. +func (mg *ManagementPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ManagementPolicy. +func (mg *ManagementPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ManagementPolicy. +func (mg *ManagementPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ManagementPolicy. +func (mg *ManagementPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ManagementPolicy. +func (mg *ManagementPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ManagementPolicy. +func (mg *ManagementPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ManagementPolicy. +func (mg *ManagementPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/storage/v1beta2/zz_generated.managedlist.go b/apis/storage/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..a0f7c1a78 --- /dev/null +++ b/apis/storage/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this AccountLocalUserList. +func (l *AccountLocalUserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this BlobInventoryPolicyList. +func (l *BlobInventoryPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ManagementPolicyList. +func (l *ManagementPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/storage/v1beta2/zz_generated.resolvers.go b/apis/storage/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..4362f1611 --- /dev/null +++ b/apis/storage/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this Account. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *Account) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this AccountLocalUser. +func (mg *AccountLocalUser) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.PermissionScope); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.PermissionScope[i3].ResourceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.PermissionScope[i3].ResourceNameRef, + Selector: mg.Spec.ForProvider.PermissionScope[i3].ResourceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.PermissionScope[i3].ResourceName") + } + mg.Spec.ForProvider.PermissionScope[i3].ResourceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.PermissionScope[i3].ResourceNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccountIDRef, + Selector: mg.Spec.ForProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountID") + } + mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.PermissionScope); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.PermissionScope[i3].ResourceName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.PermissionScope[i3].ResourceNameRef, + Selector: mg.Spec.InitProvider.PermissionScope[i3].ResourceNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.PermissionScope[i3].ResourceName") + } + mg.Spec.InitProvider.PermissionScope[i3].ResourceName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.PermissionScope[i3].ResourceNameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this BlobInventoryPolicy. +func (mg *BlobInventoryPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Rules); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Rules[i3].StorageContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Rules[i3].StorageContainerNameRef, + Selector: mg.Spec.ForProvider.Rules[i3].StorageContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Rules[i3].StorageContainerName") + } + mg.Spec.ForProvider.Rules[i3].StorageContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Rules[i3].StorageContainerNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccountIDRef, + Selector: mg.Spec.ForProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountID") + } + mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference + + for i3 := 0; i3 < len(mg.Spec.InitProvider.Rules); i3++ { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Rules[i3].StorageContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Rules[i3].StorageContainerNameRef, + Selector: mg.Spec.InitProvider.Rules[i3].StorageContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Rules[i3].StorageContainerName") + } + mg.Spec.InitProvider.Rules[i3].StorageContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Rules[i3].StorageContainerNameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccountIDRef, + Selector: mg.Spec.InitProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountID") + } + mg.Spec.InitProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ManagementPolicy. +func (mg *ManagementPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageAccountIDRef, + Selector: mg.Spec.ForProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountID") + } + mg.Spec.ForProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageAccountIDRef, + Selector: mg.Spec.InitProvider.StorageAccountIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountID") + } + mg.Spec.InitProvider.StorageAccountID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/storage/v1beta2/zz_groupversion_info.go b/apis/storage/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..6c13461d8 --- /dev/null +++ b/apis/storage/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=storage.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "storage.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/storage/v1beta2/zz_managementpolicy_terraformed.go b/apis/storage/v1beta2/zz_managementpolicy_terraformed.go new file mode 100755 index 000000000..d1270a60d --- /dev/null +++ b/apis/storage/v1beta2/zz_managementpolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ManagementPolicy +func (mg *ManagementPolicy) GetTerraformResourceType() string { + return "azurerm_storage_management_policy" +} + +// GetConnectionDetailsMapping for this ManagementPolicy +func (tr *ManagementPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this ManagementPolicy +func (tr *ManagementPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ManagementPolicy +func (tr *ManagementPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ManagementPolicy +func (tr *ManagementPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ManagementPolicy +func (tr *ManagementPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ManagementPolicy +func (tr *ManagementPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ManagementPolicy +func (tr *ManagementPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ManagementPolicy +func (tr *ManagementPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ManagementPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ManagementPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &ManagementPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ManagementPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/storage/v1beta2/zz_managementpolicy_types.go b/apis/storage/v1beta2/zz_managementpolicy_types.go new file mode 100755 index 000000000..69b4beac6 --- /dev/null +++ b/apis/storage/v1beta2/zz_managementpolicy_types.go @@ -0,0 +1,565 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionsInitParameters struct { + + // A base_blob block as documented below. + BaseBlob *BaseBlobInitParameters `json:"baseBlob,omitempty" tf:"base_blob,omitempty"` + + // A snapshot block as documented below. + Snapshot *SnapshotInitParameters `json:"snapshot,omitempty" tf:"snapshot,omitempty"` + + // A version block as documented below. + Version *VersionInitParameters `json:"version,omitempty" tf:"version,omitempty"` +} + +type ActionsObservation struct { + + // A base_blob block as documented below. + BaseBlob *BaseBlobObservation `json:"baseBlob,omitempty" tf:"base_blob,omitempty"` + + // A snapshot block as documented below. + Snapshot *SnapshotObservation `json:"snapshot,omitempty" tf:"snapshot,omitempty"` + + // A version block as documented below. + Version *VersionObservation `json:"version,omitempty" tf:"version,omitempty"` +} + +type ActionsParameters struct { + + // A base_blob block as documented below. + // +kubebuilder:validation:Optional + BaseBlob *BaseBlobParameters `json:"baseBlob,omitempty" tf:"base_blob,omitempty"` + + // A snapshot block as documented below. + // +kubebuilder:validation:Optional + Snapshot *SnapshotParameters `json:"snapshot,omitempty" tf:"snapshot,omitempty"` + + // A version block as documented below. + // +kubebuilder:validation:Optional + Version *VersionParameters `json:"version,omitempty" tf:"version,omitempty"` +} + +type BaseBlobInitParameters struct { + + // Whether a blob should automatically be tiered from cool back to hot if it's accessed again after being tiered to cool. Defaults to false. + AutoTierToHotFromCoolEnabled *bool `json:"autoTierToHotFromCoolEnabled,omitempty" tf:"auto_tier_to_hot_from_cool_enabled,omitempty"` + + // The age in days after creation to delete the blob snapshot. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceCreationGreaterThan *float64 `json:"deleteAfterDaysSinceCreationGreaterThan,omitempty" tf:"delete_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to delete the blob. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"deleteAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"delete_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to delete the blob. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceModificationGreaterThan *float64 `json:"deleteAfterDaysSinceModificationGreaterThan,omitempty" tf:"delete_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and99999. Defaults to -1. + TierToArchiveAfterDaysSinceCreationGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and99999. Defaults to -1. + TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and 99999. Defaults to -1. + TierToArchiveAfterDaysSinceModificationGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToColdAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceModificationGreaterThan *float64 `json:"tierToColdAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToCoolAfterDaysSinceCreationGreaterThan *float64 `json:"tierToCoolAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToCoolAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToCoolAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToCoolAfterDaysSinceModificationGreaterThan *float64 `json:"tierToCoolAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_modification_greater_than,omitempty"` +} + +type BaseBlobObservation struct { + + // Whether a blob should automatically be tiered from cool back to hot if it's accessed again after being tiered to cool. Defaults to false. + AutoTierToHotFromCoolEnabled *bool `json:"autoTierToHotFromCoolEnabled,omitempty" tf:"auto_tier_to_hot_from_cool_enabled,omitempty"` + + // The age in days after creation to delete the blob snapshot. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceCreationGreaterThan *float64 `json:"deleteAfterDaysSinceCreationGreaterThan,omitempty" tf:"delete_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to delete the blob. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"deleteAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"delete_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to delete the blob. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceModificationGreaterThan *float64 `json:"deleteAfterDaysSinceModificationGreaterThan,omitempty" tf:"delete_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and99999. Defaults to -1. + TierToArchiveAfterDaysSinceCreationGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and99999. Defaults to -1. + TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and 99999. Defaults to -1. + TierToArchiveAfterDaysSinceModificationGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToColdAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceModificationGreaterThan *float64 `json:"tierToColdAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToCoolAfterDaysSinceCreationGreaterThan *float64 `json:"tierToCoolAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToCoolAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToCoolAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToCoolAfterDaysSinceModificationGreaterThan *float64 `json:"tierToCoolAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_modification_greater_than,omitempty"` +} + +type BaseBlobParameters struct { + + // Whether a blob should automatically be tiered from cool back to hot if it's accessed again after being tiered to cool. Defaults to false. + // +kubebuilder:validation:Optional + AutoTierToHotFromCoolEnabled *bool `json:"autoTierToHotFromCoolEnabled,omitempty" tf:"auto_tier_to_hot_from_cool_enabled,omitempty"` + + // The age in days after creation to delete the blob snapshot. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + DeleteAfterDaysSinceCreationGreaterThan *float64 `json:"deleteAfterDaysSinceCreationGreaterThan,omitempty" tf:"delete_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to delete the blob. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + DeleteAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"deleteAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"delete_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to delete the blob. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + DeleteAfterDaysSinceModificationGreaterThan *float64 `json:"deleteAfterDaysSinceModificationGreaterThan,omitempty" tf:"delete_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToArchiveAfterDaysSinceCreationGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToArchiveAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to archive storage. Supports blob currently at Hot or Cool tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToArchiveAfterDaysSinceModificationGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToColdAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToColdAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToColdAfterDaysSinceModificationGreaterThan *float64 `json:"tierToColdAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_modification_greater_than,omitempty"` + + // The age in days after creation to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToCoolAfterDaysSinceCreationGreaterThan *float64 `json:"tierToCoolAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last access time to tier blobs to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToCoolAfterDaysSinceLastAccessTimeGreaterThan *float64 `json:"tierToCoolAfterDaysSinceLastAccessTimeGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_last_access_time_greater_than,omitempty"` + + // The age in days after last modification to tier blobs to cool storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToCoolAfterDaysSinceModificationGreaterThan *float64 `json:"tierToCoolAfterDaysSinceModificationGreaterThan,omitempty" tf:"tier_to_cool_after_days_since_modification_greater_than,omitempty"` +} + +type FiltersInitParameters struct { + + // An array of predefined values. Valid options are blockBlob and appendBlob. + // +listType=set + BlobTypes []*string `json:"blobTypes,omitempty" tf:"blob_types,omitempty"` + + // A match_blob_index_tag block as defined below. The block defines the blob index tag based filtering for blob objects. + MatchBlobIndexTag []MatchBlobIndexTagInitParameters `json:"matchBlobIndexTag,omitempty" tf:"match_blob_index_tag,omitempty"` + + // An array of strings for prefixes to be matched. + // +listType=set + PrefixMatch []*string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` +} + +type FiltersObservation struct { + + // An array of predefined values. Valid options are blockBlob and appendBlob. + // +listType=set + BlobTypes []*string `json:"blobTypes,omitempty" tf:"blob_types,omitempty"` + + // A match_blob_index_tag block as defined below. The block defines the blob index tag based filtering for blob objects. + MatchBlobIndexTag []MatchBlobIndexTagObservation `json:"matchBlobIndexTag,omitempty" tf:"match_blob_index_tag,omitempty"` + + // An array of strings for prefixes to be matched. + // +listType=set + PrefixMatch []*string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` +} + +type FiltersParameters struct { + + // An array of predefined values. Valid options are blockBlob and appendBlob. + // +kubebuilder:validation:Optional + // +listType=set + BlobTypes []*string `json:"blobTypes" tf:"blob_types,omitempty"` + + // A match_blob_index_tag block as defined below. The block defines the blob index tag based filtering for blob objects. + // +kubebuilder:validation:Optional + MatchBlobIndexTag []MatchBlobIndexTagParameters `json:"matchBlobIndexTag,omitempty" tf:"match_blob_index_tag,omitempty"` + + // An array of strings for prefixes to be matched. + // +kubebuilder:validation:Optional + // +listType=set + PrefixMatch []*string `json:"prefixMatch,omitempty" tf:"prefix_match,omitempty"` +} + +type ManagementPolicyInitParameters struct { + + // A rule block as documented below. + Rule []RuleInitParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Specifies the id of the storage account to apply the management policy to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type ManagementPolicyObservation struct { + + // The ID of the Storage Account Management Policy. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A rule block as documented below. + Rule []RuleObservation `json:"rule,omitempty" tf:"rule,omitempty"` + + // Specifies the id of the storage account to apply the management policy to. Changing this forces a new resource to be created. + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` +} + +type ManagementPolicyParameters struct { + + // A rule block as documented below. + // +kubebuilder:validation:Optional + Rule []RuleParameters `json:"rule,omitempty" tf:"rule,omitempty"` + + // Specifies the id of the storage account to apply the management policy to. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageAccountID *string `json:"storageAccountId,omitempty" tf:"storage_account_id,omitempty"` + + // Reference to a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDRef *v1.Reference `json:"storageAccountIdRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountId. + // +kubebuilder:validation:Optional + StorageAccountIDSelector *v1.Selector `json:"storageAccountIdSelector,omitempty" tf:"-"` +} + +type MatchBlobIndexTagInitParameters struct { + + // The name of the rule. Rule name is case-sensitive. It must be unique within a policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The comparison operator which is used for object comparison and filtering. Possible value is ==. Defaults to ==. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The filter tag value used for tag based filtering for blob objects. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MatchBlobIndexTagObservation struct { + + // The name of the rule. Rule name is case-sensitive. It must be unique within a policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The comparison operator which is used for object comparison and filtering. Possible value is ==. Defaults to ==. + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The filter tag value used for tag based filtering for blob objects. + Value *string `json:"value,omitempty" tf:"value,omitempty"` +} + +type MatchBlobIndexTagParameters struct { + + // The name of the rule. Rule name is case-sensitive. It must be unique within a policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The comparison operator which is used for object comparison and filtering. Possible value is ==. Defaults to ==. + // +kubebuilder:validation:Optional + Operation *string `json:"operation,omitempty" tf:"operation,omitempty"` + + // The filter tag value used for tag based filtering for blob objects. + // +kubebuilder:validation:Optional + Value *string `json:"value" tf:"value,omitempty"` +} + +type RuleInitParameters struct { + + // An actions block as documented below. + Actions *ActionsInitParameters `json:"actions,omitempty" tf:"actions,omitempty"` + + // Boolean to specify whether the rule is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A filters block as documented below. + Filters *FiltersInitParameters `json:"filters,omitempty" tf:"filters,omitempty"` + + // The name of the rule. Rule name is case-sensitive. It must be unique within a policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type RuleObservation struct { + + // An actions block as documented below. + Actions *ActionsObservation `json:"actions,omitempty" tf:"actions,omitempty"` + + // Boolean to specify whether the rule is enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A filters block as documented below. + Filters *FiltersObservation `json:"filters,omitempty" tf:"filters,omitempty"` + + // The name of the rule. Rule name is case-sensitive. It must be unique within a policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type RuleParameters struct { + + // An actions block as documented below. + // +kubebuilder:validation:Optional + Actions *ActionsParameters `json:"actions" tf:"actions,omitempty"` + + // Boolean to specify whether the rule is enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A filters block as documented below. + // +kubebuilder:validation:Optional + Filters *FiltersParameters `json:"filters" tf:"filters,omitempty"` + + // The name of the rule. Rule name is case-sensitive. It must be unique within a policy. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` +} + +type SnapshotInitParameters struct { + + // The age in days after creation to tier blob version to archive storage. Must be between 0 and 99999. Defaults to -1. + ChangeTierToArchiveAfterDaysSinceCreation *float64 `json:"changeTierToArchiveAfterDaysSinceCreation,omitempty" tf:"change_tier_to_archive_after_days_since_creation,omitempty"` + + // The age in days creation create to tier blob version to cool storage. Must be between 0 and 99999. Defaults to -1. + ChangeTierToCoolAfterDaysSinceCreation *float64 `json:"changeTierToCoolAfterDaysSinceCreation,omitempty" tf:"change_tier_to_cool_after_days_since_creation,omitempty"` + + // The age in days after creation to delete the blob snapshot. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceCreationGreaterThan *float64 `json:"deleteAfterDaysSinceCreationGreaterThan,omitempty" tf:"delete_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` +} + +type SnapshotObservation struct { + + // The age in days after creation to tier blob version to archive storage. Must be between 0 and 99999. Defaults to -1. + ChangeTierToArchiveAfterDaysSinceCreation *float64 `json:"changeTierToArchiveAfterDaysSinceCreation,omitempty" tf:"change_tier_to_archive_after_days_since_creation,omitempty"` + + // The age in days creation create to tier blob version to cool storage. Must be between 0 and 99999. Defaults to -1. + ChangeTierToCoolAfterDaysSinceCreation *float64 `json:"changeTierToCoolAfterDaysSinceCreation,omitempty" tf:"change_tier_to_cool_after_days_since_creation,omitempty"` + + // The age in days after creation to delete the blob snapshot. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceCreationGreaterThan *float64 `json:"deleteAfterDaysSinceCreationGreaterThan,omitempty" tf:"delete_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` +} + +type SnapshotParameters struct { + + // The age in days after creation to tier blob version to archive storage. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + ChangeTierToArchiveAfterDaysSinceCreation *float64 `json:"changeTierToArchiveAfterDaysSinceCreation,omitempty" tf:"change_tier_to_archive_after_days_since_creation,omitempty"` + + // The age in days creation create to tier blob version to cool storage. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + ChangeTierToCoolAfterDaysSinceCreation *float64 `json:"changeTierToCoolAfterDaysSinceCreation,omitempty" tf:"change_tier_to_cool_after_days_since_creation,omitempty"` + + // The age in days after creation to delete the blob snapshot. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + DeleteAfterDaysSinceCreationGreaterThan *float64 `json:"deleteAfterDaysSinceCreationGreaterThan,omitempty" tf:"delete_after_days_since_creation_greater_than,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` +} + +type VersionInitParameters struct { + + // The age in days after creation to tier blob version to archive storage. Must be between 0 and 99999. Defaults to -1. + ChangeTierToArchiveAfterDaysSinceCreation *float64 `json:"changeTierToArchiveAfterDaysSinceCreation,omitempty" tf:"change_tier_to_archive_after_days_since_creation,omitempty"` + + // The age in days creation create to tier blob version to cool storage. Must be between 0 and 99999. Defaults to -1. + ChangeTierToCoolAfterDaysSinceCreation *float64 `json:"changeTierToCoolAfterDaysSinceCreation,omitempty" tf:"change_tier_to_cool_after_days_since_creation,omitempty"` + + // The age in days after creation to delete the blob version. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceCreation *float64 `json:"deleteAfterDaysSinceCreation,omitempty" tf:"delete_after_days_since_creation,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` +} + +type VersionObservation struct { + + // The age in days after creation to tier blob version to archive storage. Must be between 0 and 99999. Defaults to -1. + ChangeTierToArchiveAfterDaysSinceCreation *float64 `json:"changeTierToArchiveAfterDaysSinceCreation,omitempty" tf:"change_tier_to_archive_after_days_since_creation,omitempty"` + + // The age in days creation create to tier blob version to cool storage. Must be between 0 and 99999. Defaults to -1. + ChangeTierToCoolAfterDaysSinceCreation *float64 `json:"changeTierToCoolAfterDaysSinceCreation,omitempty" tf:"change_tier_to_cool_after_days_since_creation,omitempty"` + + // The age in days after creation to delete the blob version. Must be between 0 and 99999. Defaults to -1. + DeleteAfterDaysSinceCreation *float64 `json:"deleteAfterDaysSinceCreation,omitempty" tf:"delete_after_days_since_creation,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` +} + +type VersionParameters struct { + + // The age in days after creation to tier blob version to archive storage. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + ChangeTierToArchiveAfterDaysSinceCreation *float64 `json:"changeTierToArchiveAfterDaysSinceCreation,omitempty" tf:"change_tier_to_archive_after_days_since_creation,omitempty"` + + // The age in days creation create to tier blob version to cool storage. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + ChangeTierToCoolAfterDaysSinceCreation *float64 `json:"changeTierToCoolAfterDaysSinceCreation,omitempty" tf:"change_tier_to_cool_after_days_since_creation,omitempty"` + + // The age in days after creation to delete the blob version. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + DeleteAfterDaysSinceCreation *float64 `json:"deleteAfterDaysSinceCreation,omitempty" tf:"delete_after_days_since_creation,omitempty"` + + // The age in days after last tier change to the blobs to skip to be archved. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToArchiveAfterDaysSinceLastTierChangeGreaterThan *float64 `json:"tierToArchiveAfterDaysSinceLastTierChangeGreaterThan,omitempty" tf:"tier_to_archive_after_days_since_last_tier_change_greater_than,omitempty"` + + // The age in days after creation to cold storage. Supports blob currently at Hot tier. Must be between 0 and 99999. Defaults to -1. + // +kubebuilder:validation:Optional + TierToColdAfterDaysSinceCreationGreaterThan *float64 `json:"tierToColdAfterDaysSinceCreationGreaterThan,omitempty" tf:"tier_to_cold_after_days_since_creation_greater_than,omitempty"` +} + +// ManagementPolicySpec defines the desired state of ManagementPolicy +type ManagementPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ManagementPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ManagementPolicyInitParameters `json:"initProvider,omitempty"` +} + +// ManagementPolicyStatus defines the observed state of ManagementPolicy. +type ManagementPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ManagementPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ManagementPolicy is the Schema for the ManagementPolicys API. Manages an Azure Storage Account Management Policy. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ManagementPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ManagementPolicySpec `json:"spec"` + Status ManagementPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ManagementPolicyList contains a list of ManagementPolicys +type ManagementPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ManagementPolicy `json:"items"` +} + +// Repository type metadata. +var ( + ManagementPolicy_Kind = "ManagementPolicy" + ManagementPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ManagementPolicy_Kind}.String() + ManagementPolicy_KindAPIVersion = ManagementPolicy_Kind + "." + CRDGroupVersion.String() + ManagementPolicy_GroupVersionKind = CRDGroupVersion.WithKind(ManagementPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&ManagementPolicy{}, &ManagementPolicyList{}) +} diff --git a/apis/storagecache/v1beta1/zz_generated.conversion_hubs.go b/apis/storagecache/v1beta1/zz_generated.conversion_hubs.go index 2d4b99362..636883a90 100755 --- a/apis/storagecache/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/storagecache/v1beta1/zz_generated.conversion_hubs.go @@ -6,9 +6,6 @@ package v1beta1 -// Hub marks this type as a conversion hub. -func (tr *HPCCache) Hub() {} - // Hub marks this type as a conversion hub. func (tr *HPCCacheAccessPolicy) Hub() {} diff --git a/apis/storagecache/v1beta1/zz_generated.conversion_spokes.go b/apis/storagecache/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..c83f2ca6f --- /dev/null +++ b/apis/storagecache/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this HPCCache to the hub type. +func (tr *HPCCache) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the HPCCache type. +func (tr *HPCCache) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/storagecache/v1beta1/zz_generated.resolvers.go b/apis/storagecache/v1beta1/zz_generated.resolvers.go index cb05c00c2..ef9705715 100644 --- a/apis/storagecache/v1beta1/zz_generated.resolvers.go +++ b/apis/storagecache/v1beta1/zz_generated.resolvers.go @@ -96,7 +96,7 @@ func (mg *HPCCacheAccessPolicy) ResolveReferences(ctx context.Context, c client. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta1", "HPCCache", "HPCCacheList") + m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta2", "HPCCache", "HPCCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -115,7 +115,7 @@ func (mg *HPCCacheAccessPolicy) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.HPCCacheID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.HPCCacheIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta1", "HPCCache", "HPCCacheList") + m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta2", "HPCCache", "HPCCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -146,7 +146,7 @@ func (mg *HPCCacheBlobNFSTarget) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta1", "HPCCache", "HPCCacheList") + m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta2", "HPCCache", "HPCCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -184,7 +184,7 @@ func (mg *HPCCacheBlobNFSTarget) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta1", "HPCCache", "HPCCacheList") + m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta2", "HPCCache", "HPCCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -215,7 +215,7 @@ func (mg *HPCCacheBlobTarget) ResolveReferences(ctx context.Context, c client.Re var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta1", "HPCCache", "HPCCacheList") + m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta2", "HPCCache", "HPCCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -272,7 +272,7 @@ func (mg *HPCCacheBlobTarget) ResolveReferences(ctx context.Context, c client.Re mg.Spec.ForProvider.StorageContainerID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageContainerIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta1", "HPCCache", "HPCCacheList") + m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta2", "HPCCache", "HPCCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -322,7 +322,7 @@ func (mg *HPCCacheNFSTarget) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta1", "HPCCache", "HPCCacheList") + m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta2", "HPCCache", "HPCCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -360,7 +360,7 @@ func (mg *HPCCacheNFSTarget) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta1", "HPCCache", "HPCCacheList") + m, l, err = apisresolver.GetManagedResource("storagecache.azure.upbound.io", "v1beta2", "HPCCache", "HPCCacheList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/storagecache/v1beta1/zz_hpccacheaccesspolicy_types.go b/apis/storagecache/v1beta1/zz_hpccacheaccesspolicy_types.go index 8d4694ee7..d53554fe2 100755 --- a/apis/storagecache/v1beta1/zz_hpccacheaccesspolicy_types.go +++ b/apis/storagecache/v1beta1/zz_hpccacheaccesspolicy_types.go @@ -108,7 +108,7 @@ type HPCCacheAccessPolicyInitParameters struct { AccessRule []HPCCacheAccessPolicyAccessRuleInitParameters `json:"accessRule,omitempty" tf:"access_rule,omitempty"` // The ID of the HPC Cache that this HPC Cache Access Policy resides in. Changing this forces a new HPC Cache Access Policy to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta1.HPCCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta2.HPCCache // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() HPCCacheID *string `json:"hpcCacheId,omitempty" tf:"hpc_cache_id,omitempty"` @@ -140,7 +140,7 @@ type HPCCacheAccessPolicyParameters struct { AccessRule []HPCCacheAccessPolicyAccessRuleParameters `json:"accessRule,omitempty" tf:"access_rule,omitempty"` // The ID of the HPC Cache that this HPC Cache Access Policy resides in. Changing this forces a new HPC Cache Access Policy to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta1.HPCCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta2.HPCCache // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional HPCCacheID *string `json:"hpcCacheId,omitempty" tf:"hpc_cache_id,omitempty"` diff --git a/apis/storagecache/v1beta1/zz_hpccacheblobnfstarget_types.go b/apis/storagecache/v1beta1/zz_hpccacheblobnfstarget_types.go index bcb00c7d4..ae3586f29 100755 --- a/apis/storagecache/v1beta1/zz_hpccacheblobnfstarget_types.go +++ b/apis/storagecache/v1beta1/zz_hpccacheblobnfstarget_types.go @@ -19,7 +19,7 @@ type HPCCacheBlobNFSTargetInitParameters struct { AccessPolicyName *string `json:"accessPolicyName,omitempty" tf:"access_policy_name,omitempty"` // The name of the HPC Cache, which the HPC Cache Blob NFS Target will be added to. Changing this forces a new HPC Cache Blob NFS Target to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta1.HPCCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta2.HPCCache CacheName *string `json:"cacheName,omitempty" tf:"cache_name,omitempty"` // Reference to a HPCCache in storagecache to populate cacheName. @@ -83,7 +83,7 @@ type HPCCacheBlobNFSTargetParameters struct { AccessPolicyName *string `json:"accessPolicyName,omitempty" tf:"access_policy_name,omitempty"` // The name of the HPC Cache, which the HPC Cache Blob NFS Target will be added to. Changing this forces a new HPC Cache Blob NFS Target to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta1.HPCCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta2.HPCCache // +kubebuilder:validation:Optional CacheName *string `json:"cacheName,omitempty" tf:"cache_name,omitempty"` diff --git a/apis/storagecache/v1beta1/zz_hpccacheblobtarget_types.go b/apis/storagecache/v1beta1/zz_hpccacheblobtarget_types.go index eb4cad2b8..4cf754449 100755 --- a/apis/storagecache/v1beta1/zz_hpccacheblobtarget_types.go +++ b/apis/storagecache/v1beta1/zz_hpccacheblobtarget_types.go @@ -19,7 +19,7 @@ type HPCCacheBlobTargetInitParameters struct { AccessPolicyName *string `json:"accessPolicyName,omitempty" tf:"access_policy_name,omitempty"` // The name HPC Cache, which the HPC Cache Blob Target will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta1.HPCCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta2.HPCCache CacheName *string `json:"cacheName,omitempty" tf:"cache_name,omitempty"` // Reference to a HPCCache in storagecache to populate cacheName. @@ -75,7 +75,7 @@ type HPCCacheBlobTargetParameters struct { AccessPolicyName *string `json:"accessPolicyName,omitempty" tf:"access_policy_name,omitempty"` // The name HPC Cache, which the HPC Cache Blob Target will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta1.HPCCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta2.HPCCache // +kubebuilder:validation:Optional CacheName *string `json:"cacheName,omitempty" tf:"cache_name,omitempty"` diff --git a/apis/storagecache/v1beta1/zz_hpccachenfstarget_types.go b/apis/storagecache/v1beta1/zz_hpccachenfstarget_types.go index 6ccbebad1..60e6e9e33 100755 --- a/apis/storagecache/v1beta1/zz_hpccachenfstarget_types.go +++ b/apis/storagecache/v1beta1/zz_hpccachenfstarget_types.go @@ -16,7 +16,7 @@ import ( type HPCCacheNFSTargetInitParameters struct { // The name HPC Cache, which the HPC Cache NFS Target will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta1.HPCCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta2.HPCCache CacheName *string `json:"cacheName,omitempty" tf:"cache_name,omitempty"` // Reference to a HPCCache in storagecache to populate cacheName. @@ -73,7 +73,7 @@ type HPCCacheNFSTargetObservation struct { type HPCCacheNFSTargetParameters struct { // The name HPC Cache, which the HPC Cache NFS Target will be added to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta1.HPCCache + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storagecache/v1beta2.HPCCache // +kubebuilder:validation:Optional CacheName *string `json:"cacheName,omitempty" tf:"cache_name,omitempty"` diff --git a/apis/storagecache/v1beta2/zz_generated.conversion_hubs.go b/apis/storagecache/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..a4a911b7a --- /dev/null +++ b/apis/storagecache/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *HPCCache) Hub() {} diff --git a/apis/storagecache/v1beta2/zz_generated.deepcopy.go b/apis/storagecache/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..896edff17 --- /dev/null +++ b/apis/storagecache/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,1301 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessRuleInitParameters) DeepCopyInto(out *AccessRuleInitParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = new(string) + **out = **in + } + if in.AnonymousGID != nil { + in, out := &in.AnonymousGID, &out.AnonymousGID + *out = new(float64) + **out = **in + } + if in.AnonymousUID != nil { + in, out := &in.AnonymousUID, &out.AnonymousUID + *out = new(float64) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.RootSquashEnabled != nil { + in, out := &in.RootSquashEnabled, &out.RootSquashEnabled + *out = new(bool) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SubmountAccessEnabled != nil { + in, out := &in.SubmountAccessEnabled, &out.SubmountAccessEnabled + *out = new(bool) + **out = **in + } + if in.SuidEnabled != nil { + in, out := &in.SuidEnabled, &out.SuidEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessRuleInitParameters. +func (in *AccessRuleInitParameters) DeepCopy() *AccessRuleInitParameters { + if in == nil { + return nil + } + out := new(AccessRuleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessRuleObservation) DeepCopyInto(out *AccessRuleObservation) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = new(string) + **out = **in + } + if in.AnonymousGID != nil { + in, out := &in.AnonymousGID, &out.AnonymousGID + *out = new(float64) + **out = **in + } + if in.AnonymousUID != nil { + in, out := &in.AnonymousUID, &out.AnonymousUID + *out = new(float64) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.RootSquashEnabled != nil { + in, out := &in.RootSquashEnabled, &out.RootSquashEnabled + *out = new(bool) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SubmountAccessEnabled != nil { + in, out := &in.SubmountAccessEnabled, &out.SubmountAccessEnabled + *out = new(bool) + **out = **in + } + if in.SuidEnabled != nil { + in, out := &in.SuidEnabled, &out.SuidEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessRuleObservation. +func (in *AccessRuleObservation) DeepCopy() *AccessRuleObservation { + if in == nil { + return nil + } + out := new(AccessRuleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessRuleParameters) DeepCopyInto(out *AccessRuleParameters) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = new(string) + **out = **in + } + if in.AnonymousGID != nil { + in, out := &in.AnonymousGID, &out.AnonymousGID + *out = new(float64) + **out = **in + } + if in.AnonymousUID != nil { + in, out := &in.AnonymousUID, &out.AnonymousUID + *out = new(float64) + **out = **in + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(string) + **out = **in + } + if in.RootSquashEnabled != nil { + in, out := &in.RootSquashEnabled, &out.RootSquashEnabled + *out = new(bool) + **out = **in + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(string) + **out = **in + } + if in.SubmountAccessEnabled != nil { + in, out := &in.SubmountAccessEnabled, &out.SubmountAccessEnabled + *out = new(bool) + **out = **in + } + if in.SuidEnabled != nil { + in, out := &in.SuidEnabled, &out.SuidEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessRuleParameters. +func (in *AccessRuleParameters) DeepCopy() *AccessRuleParameters { + if in == nil { + return nil + } + out := new(AccessRuleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindInitParameters) DeepCopyInto(out *BindInitParameters) { + *out = *in + if in.Dn != nil { + in, out := &in.Dn, &out.Dn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindInitParameters. +func (in *BindInitParameters) DeepCopy() *BindInitParameters { + if in == nil { + return nil + } + out := new(BindInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindObservation) DeepCopyInto(out *BindObservation) { + *out = *in + if in.Dn != nil { + in, out := &in.Dn, &out.Dn + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindObservation. +func (in *BindObservation) DeepCopy() *BindObservation { + if in == nil { + return nil + } + out := new(BindObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindParameters) DeepCopyInto(out *BindParameters) { + *out = *in + if in.Dn != nil { + in, out := &in.Dn, &out.Dn + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindParameters. +func (in *BindParameters) DeepCopy() *BindParameters { + if in == nil { + return nil + } + out := new(BindParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSInitParameters) DeepCopyInto(out *DNSInitParameters) { + *out = *in + if in.SearchDomain != nil { + in, out := &in.SearchDomain, &out.SearchDomain + *out = new(string) + **out = **in + } + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSInitParameters. +func (in *DNSInitParameters) DeepCopy() *DNSInitParameters { + if in == nil { + return nil + } + out := new(DNSInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSObservation) DeepCopyInto(out *DNSObservation) { + *out = *in + if in.SearchDomain != nil { + in, out := &in.SearchDomain, &out.SearchDomain + *out = new(string) + **out = **in + } + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSObservation. +func (in *DNSObservation) DeepCopy() *DNSObservation { + if in == nil { + return nil + } + out := new(DNSObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSParameters) DeepCopyInto(out *DNSParameters) { + *out = *in + if in.SearchDomain != nil { + in, out := &in.SearchDomain, &out.SearchDomain + *out = new(string) + **out = **in + } + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSParameters. +func (in *DNSParameters) DeepCopy() *DNSParameters { + if in == nil { + return nil + } + out := new(DNSParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultAccessPolicyInitParameters) DeepCopyInto(out *DefaultAccessPolicyInitParameters) { + *out = *in + if in.AccessRule != nil { + in, out := &in.AccessRule, &out.AccessRule + *out = make([]AccessRuleInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAccessPolicyInitParameters. +func (in *DefaultAccessPolicyInitParameters) DeepCopy() *DefaultAccessPolicyInitParameters { + if in == nil { + return nil + } + out := new(DefaultAccessPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultAccessPolicyObservation) DeepCopyInto(out *DefaultAccessPolicyObservation) { + *out = *in + if in.AccessRule != nil { + in, out := &in.AccessRule, &out.AccessRule + *out = make([]AccessRuleObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAccessPolicyObservation. +func (in *DefaultAccessPolicyObservation) DeepCopy() *DefaultAccessPolicyObservation { + if in == nil { + return nil + } + out := new(DefaultAccessPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultAccessPolicyParameters) DeepCopyInto(out *DefaultAccessPolicyParameters) { + *out = *in + if in.AccessRule != nil { + in, out := &in.AccessRule, &out.AccessRule + *out = make([]AccessRuleParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAccessPolicyParameters. +func (in *DefaultAccessPolicyParameters) DeepCopy() *DefaultAccessPolicyParameters { + if in == nil { + return nil + } + out := new(DefaultAccessPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryActiveDirectoryInitParameters) DeepCopyInto(out *DirectoryActiveDirectoryInitParameters) { + *out = *in + if in.CacheNetbiosName != nil { + in, out := &in.CacheNetbiosName, &out.CacheNetbiosName + *out = new(string) + **out = **in + } + if in.DNSPrimaryIP != nil { + in, out := &in.DNSPrimaryIP, &out.DNSPrimaryIP + *out = new(string) + **out = **in + } + if in.DNSSecondaryIP != nil { + in, out := &in.DNSSecondaryIP, &out.DNSSecondaryIP + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainNetbiosName != nil { + in, out := &in.DomainNetbiosName, &out.DomainNetbiosName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryActiveDirectoryInitParameters. +func (in *DirectoryActiveDirectoryInitParameters) DeepCopy() *DirectoryActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(DirectoryActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryActiveDirectoryObservation) DeepCopyInto(out *DirectoryActiveDirectoryObservation) { + *out = *in + if in.CacheNetbiosName != nil { + in, out := &in.CacheNetbiosName, &out.CacheNetbiosName + *out = new(string) + **out = **in + } + if in.DNSPrimaryIP != nil { + in, out := &in.DNSPrimaryIP, &out.DNSPrimaryIP + *out = new(string) + **out = **in + } + if in.DNSSecondaryIP != nil { + in, out := &in.DNSSecondaryIP, &out.DNSSecondaryIP + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainNetbiosName != nil { + in, out := &in.DomainNetbiosName, &out.DomainNetbiosName + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryActiveDirectoryObservation. +func (in *DirectoryActiveDirectoryObservation) DeepCopy() *DirectoryActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(DirectoryActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryActiveDirectoryParameters) DeepCopyInto(out *DirectoryActiveDirectoryParameters) { + *out = *in + if in.CacheNetbiosName != nil { + in, out := &in.CacheNetbiosName, &out.CacheNetbiosName + *out = new(string) + **out = **in + } + if in.DNSPrimaryIP != nil { + in, out := &in.DNSPrimaryIP, &out.DNSPrimaryIP + *out = new(string) + **out = **in + } + if in.DNSSecondaryIP != nil { + in, out := &in.DNSSecondaryIP, &out.DNSSecondaryIP + *out = new(string) + **out = **in + } + if in.DomainName != nil { + in, out := &in.DomainName, &out.DomainName + *out = new(string) + **out = **in + } + if in.DomainNetbiosName != nil { + in, out := &in.DomainNetbiosName, &out.DomainNetbiosName + *out = new(string) + **out = **in + } + out.PasswordSecretRef = in.PasswordSecretRef + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryActiveDirectoryParameters. +func (in *DirectoryActiveDirectoryParameters) DeepCopy() *DirectoryActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(DirectoryActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryFlatFileInitParameters) DeepCopyInto(out *DirectoryFlatFileInitParameters) { + *out = *in + if in.GroupFileURI != nil { + in, out := &in.GroupFileURI, &out.GroupFileURI + *out = new(string) + **out = **in + } + if in.PasswordFileURI != nil { + in, out := &in.PasswordFileURI, &out.PasswordFileURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryFlatFileInitParameters. +func (in *DirectoryFlatFileInitParameters) DeepCopy() *DirectoryFlatFileInitParameters { + if in == nil { + return nil + } + out := new(DirectoryFlatFileInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryFlatFileObservation) DeepCopyInto(out *DirectoryFlatFileObservation) { + *out = *in + if in.GroupFileURI != nil { + in, out := &in.GroupFileURI, &out.GroupFileURI + *out = new(string) + **out = **in + } + if in.PasswordFileURI != nil { + in, out := &in.PasswordFileURI, &out.PasswordFileURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryFlatFileObservation. +func (in *DirectoryFlatFileObservation) DeepCopy() *DirectoryFlatFileObservation { + if in == nil { + return nil + } + out := new(DirectoryFlatFileObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryFlatFileParameters) DeepCopyInto(out *DirectoryFlatFileParameters) { + *out = *in + if in.GroupFileURI != nil { + in, out := &in.GroupFileURI, &out.GroupFileURI + *out = new(string) + **out = **in + } + if in.PasswordFileURI != nil { + in, out := &in.PasswordFileURI, &out.PasswordFileURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryFlatFileParameters. +func (in *DirectoryFlatFileParameters) DeepCopy() *DirectoryFlatFileParameters { + if in == nil { + return nil + } + out := new(DirectoryFlatFileParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryLdapInitParameters) DeepCopyInto(out *DirectoryLdapInitParameters) { + *out = *in + if in.BaseDn != nil { + in, out := &in.BaseDn, &out.BaseDn + *out = new(string) + **out = **in + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(BindInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CertificateValidationURI != nil { + in, out := &in.CertificateValidationURI, &out.CertificateValidationURI + *out = new(string) + **out = **in + } + if in.DownloadCertificateAutomatically != nil { + in, out := &in.DownloadCertificateAutomatically, &out.DownloadCertificateAutomatically + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryLdapInitParameters. +func (in *DirectoryLdapInitParameters) DeepCopy() *DirectoryLdapInitParameters { + if in == nil { + return nil + } + out := new(DirectoryLdapInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryLdapObservation) DeepCopyInto(out *DirectoryLdapObservation) { + *out = *in + if in.BaseDn != nil { + in, out := &in.BaseDn, &out.BaseDn + *out = new(string) + **out = **in + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(BindObservation) + (*in).DeepCopyInto(*out) + } + if in.CertificateValidationURI != nil { + in, out := &in.CertificateValidationURI, &out.CertificateValidationURI + *out = new(string) + **out = **in + } + if in.DownloadCertificateAutomatically != nil { + in, out := &in.DownloadCertificateAutomatically, &out.DownloadCertificateAutomatically + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryLdapObservation. +func (in *DirectoryLdapObservation) DeepCopy() *DirectoryLdapObservation { + if in == nil { + return nil + } + out := new(DirectoryLdapObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DirectoryLdapParameters) DeepCopyInto(out *DirectoryLdapParameters) { + *out = *in + if in.BaseDn != nil { + in, out := &in.BaseDn, &out.BaseDn + *out = new(string) + **out = **in + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(BindParameters) + (*in).DeepCopyInto(*out) + } + if in.CertificateValidationURI != nil { + in, out := &in.CertificateValidationURI, &out.CertificateValidationURI + *out = new(string) + **out = **in + } + if in.DownloadCertificateAutomatically != nil { + in, out := &in.DownloadCertificateAutomatically, &out.DownloadCertificateAutomatically + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DirectoryLdapParameters. +func (in *DirectoryLdapParameters) DeepCopy() *DirectoryLdapParameters { + if in == nil { + return nil + } + out := new(DirectoryLdapParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPCCache) DeepCopyInto(out *HPCCache) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPCCache. +func (in *HPCCache) DeepCopy() *HPCCache { + if in == nil { + return nil + } + out := new(HPCCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HPCCache) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPCCacheInitParameters) DeepCopyInto(out *HPCCacheInitParameters) { + *out = *in + if in.AutomaticallyRotateKeyToLatestEnabled != nil { + in, out := &in.AutomaticallyRotateKeyToLatestEnabled, &out.AutomaticallyRotateKeyToLatestEnabled + *out = new(bool) + **out = **in + } + if in.CacheSizeInGb != nil { + in, out := &in.CacheSizeInGb, &out.CacheSizeInGb + *out = new(float64) + **out = **in + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultAccessPolicy != nil { + in, out := &in.DefaultAccessPolicy, &out.DefaultAccessPolicy + *out = new(DefaultAccessPolicyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryActiveDirectory != nil { + in, out := &in.DirectoryActiveDirectory, &out.DirectoryActiveDirectory + *out = new(DirectoryActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryFlatFile != nil { + in, out := &in.DirectoryFlatFile, &out.DirectoryFlatFile + *out = new(DirectoryFlatFileInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryLdap != nil { + in, out := &in.DirectoryLdap, &out.DirectoryLdap + *out = new(DirectoryLdapInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Mtu != nil { + in, out := &in.Mtu, &out.Mtu + *out = new(float64) + **out = **in + } + if in.NtpServer != nil { + in, out := &in.NtpServer, &out.NtpServer + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPCCacheInitParameters. +func (in *HPCCacheInitParameters) DeepCopy() *HPCCacheInitParameters { + if in == nil { + return nil + } + out := new(HPCCacheInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPCCacheList) DeepCopyInto(out *HPCCacheList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HPCCache, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPCCacheList. +func (in *HPCCacheList) DeepCopy() *HPCCacheList { + if in == nil { + return nil + } + out := new(HPCCacheList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HPCCacheList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPCCacheObservation) DeepCopyInto(out *HPCCacheObservation) { + *out = *in + if in.AutomaticallyRotateKeyToLatestEnabled != nil { + in, out := &in.AutomaticallyRotateKeyToLatestEnabled, &out.AutomaticallyRotateKeyToLatestEnabled + *out = new(bool) + **out = **in + } + if in.CacheSizeInGb != nil { + in, out := &in.CacheSizeInGb, &out.CacheSizeInGb + *out = new(float64) + **out = **in + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultAccessPolicy != nil { + in, out := &in.DefaultAccessPolicy, &out.DefaultAccessPolicy + *out = new(DefaultAccessPolicyObservation) + (*in).DeepCopyInto(*out) + } + if in.DirectoryActiveDirectory != nil { + in, out := &in.DirectoryActiveDirectory, &out.DirectoryActiveDirectory + *out = new(DirectoryActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.DirectoryFlatFile != nil { + in, out := &in.DirectoryFlatFile, &out.DirectoryFlatFile + *out = new(DirectoryFlatFileObservation) + (*in).DeepCopyInto(*out) + } + if in.DirectoryLdap != nil { + in, out := &in.DirectoryLdap, &out.DirectoryLdap + *out = new(DirectoryLdapObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MountAddresses != nil { + in, out := &in.MountAddresses, &out.MountAddresses + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Mtu != nil { + in, out := &in.Mtu, &out.Mtu + *out = new(float64) + **out = **in + } + if in.NtpServer != nil { + in, out := &in.NtpServer, &out.NtpServer + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPCCacheObservation. +func (in *HPCCacheObservation) DeepCopy() *HPCCacheObservation { + if in == nil { + return nil + } + out := new(HPCCacheObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPCCacheParameters) DeepCopyInto(out *HPCCacheParameters) { + *out = *in + if in.AutomaticallyRotateKeyToLatestEnabled != nil { + in, out := &in.AutomaticallyRotateKeyToLatestEnabled, &out.AutomaticallyRotateKeyToLatestEnabled + *out = new(bool) + **out = **in + } + if in.CacheSizeInGb != nil { + in, out := &in.CacheSizeInGb, &out.CacheSizeInGb + *out = new(float64) + **out = **in + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNSParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultAccessPolicy != nil { + in, out := &in.DefaultAccessPolicy, &out.DefaultAccessPolicy + *out = new(DefaultAccessPolicyParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryActiveDirectory != nil { + in, out := &in.DirectoryActiveDirectory, &out.DirectoryActiveDirectory + *out = new(DirectoryActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryFlatFile != nil { + in, out := &in.DirectoryFlatFile, &out.DirectoryFlatFile + *out = new(DirectoryFlatFileParameters) + (*in).DeepCopyInto(*out) + } + if in.DirectoryLdap != nil { + in, out := &in.DirectoryLdap, &out.DirectoryLdap + *out = new(DirectoryLdapParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultKeyID != nil { + in, out := &in.KeyVaultKeyID, &out.KeyVaultKeyID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Mtu != nil { + in, out := &in.Mtu, &out.Mtu + *out = new(float64) + **out = **in + } + if in.NtpServer != nil { + in, out := &in.NtpServer, &out.NtpServer + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.SubnetID != nil { + in, out := &in.SubnetID, &out.SubnetID + *out = new(string) + **out = **in + } + if in.SubnetIDRef != nil { + in, out := &in.SubnetIDRef, &out.SubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SubnetIDSelector != nil { + in, out := &in.SubnetIDSelector, &out.SubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPCCacheParameters. +func (in *HPCCacheParameters) DeepCopy() *HPCCacheParameters { + if in == nil { + return nil + } + out := new(HPCCacheParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPCCacheSpec) DeepCopyInto(out *HPCCacheSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPCCacheSpec. +func (in *HPCCacheSpec) DeepCopy() *HPCCacheSpec { + if in == nil { + return nil + } + out := new(HPCCacheSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPCCacheStatus) DeepCopyInto(out *HPCCacheStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPCCacheStatus. +func (in *HPCCacheStatus) DeepCopy() *HPCCacheStatus { + if in == nil { + return nil + } + out := new(HPCCacheStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/storagecache/v1beta2/zz_generated.managed.go b/apis/storagecache/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..6ca49fe9e --- /dev/null +++ b/apis/storagecache/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this HPCCache. +func (mg *HPCCache) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this HPCCache. +func (mg *HPCCache) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this HPCCache. +func (mg *HPCCache) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this HPCCache. +func (mg *HPCCache) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this HPCCache. +func (mg *HPCCache) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this HPCCache. +func (mg *HPCCache) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this HPCCache. +func (mg *HPCCache) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this HPCCache. +func (mg *HPCCache) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this HPCCache. +func (mg *HPCCache) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this HPCCache. +func (mg *HPCCache) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this HPCCache. +func (mg *HPCCache) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this HPCCache. +func (mg *HPCCache) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/storagecache/v1beta2/zz_generated.managedlist.go b/apis/storagecache/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..b451122f5 --- /dev/null +++ b/apis/storagecache/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this HPCCacheList. +func (l *HPCCacheList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/storagecache/v1beta2/zz_generated.resolvers.go b/apis/storagecache/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..f781696d0 --- /dev/null +++ b/apis/storagecache/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,88 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" + + // ResolveReferences of this HPCCache. + apisresolver "github.com/upbound/provider-azure/internal/apis" +) + +func (mg *HPCCache) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SubnetIDRef, + Selector: mg.Spec.ForProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SubnetID") + } + mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SubnetID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SubnetIDRef, + Selector: mg.Spec.InitProvider.SubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SubnetID") + } + mg.Spec.InitProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SubnetIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/storagecache/v1beta2/zz_groupversion_info.go b/apis/storagecache/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..726818013 --- /dev/null +++ b/apis/storagecache/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=storagecache.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "storagecache.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/storagecache/v1beta2/zz_hpccache_terraformed.go b/apis/storagecache/v1beta2/zz_hpccache_terraformed.go new file mode 100755 index 000000000..9e95e05d3 --- /dev/null +++ b/apis/storagecache/v1beta2/zz_hpccache_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this HPCCache +func (mg *HPCCache) GetTerraformResourceType() string { + return "azurerm_hpc_cache" +} + +// GetConnectionDetailsMapping for this HPCCache +func (tr *HPCCache) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"directory_active_directory[*].password": "spec.forProvider.directoryActiveDirectory[*].passwordSecretRef", "directory_ldap[*].bind[*].password": "spec.forProvider.directoryLdap[*].bind[*].passwordSecretRef"} +} + +// GetObservation of this HPCCache +func (tr *HPCCache) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this HPCCache +func (tr *HPCCache) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this HPCCache +func (tr *HPCCache) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this HPCCache +func (tr *HPCCache) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this HPCCache +func (tr *HPCCache) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this HPCCache +func (tr *HPCCache) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this HPCCache +func (tr *HPCCache) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this HPCCache using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *HPCCache) LateInitialize(attrs []byte) (bool, error) { + params := &HPCCacheParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *HPCCache) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/storagecache/v1beta2/zz_hpccache_types.go b/apis/storagecache/v1beta2/zz_hpccache_types.go new file mode 100755 index 000000000..630e6ffc3 --- /dev/null +++ b/apis/storagecache/v1beta2/zz_hpccache_types.go @@ -0,0 +1,650 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AccessRuleInitParameters struct { + + // The access level for this rule. Possible values are: rw, ro, no. + Access *string `json:"access,omitempty" tf:"access,omitempty"` + + // The anonymous GID used when root_squash_enabled is true. + AnonymousGID *float64 `json:"anonymousGid,omitempty" tf:"anonymous_gid,omitempty"` + + // The anonymous UID used when root_squash_enabled is true. + AnonymousUID *float64 `json:"anonymousUid,omitempty" tf:"anonymous_uid,omitempty"` + + // The filter applied to the scope for this rule. The filter's format depends on its scope: default scope matches all clients and has no filter value; network scope takes a CIDR format; host takes an IP address or fully qualified domain name. If a client does not match any filter rule and there is no default rule, access is denied. + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` + + // Whether to enable root squash? + RootSquashEnabled *bool `json:"rootSquashEnabled,omitempty" tf:"root_squash_enabled,omitempty"` + + // The scope of this rule. The scope and (potentially) the filter determine which clients match the rule. Possible values are: default, network, host. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Whether allow access to subdirectories under the root export? + SubmountAccessEnabled *bool `json:"submountAccessEnabled,omitempty" tf:"submount_access_enabled,omitempty"` + + // Whether SUID is allowed? + SuidEnabled *bool `json:"suidEnabled,omitempty" tf:"suid_enabled,omitempty"` +} + +type AccessRuleObservation struct { + + // The access level for this rule. Possible values are: rw, ro, no. + Access *string `json:"access,omitempty" tf:"access,omitempty"` + + // The anonymous GID used when root_squash_enabled is true. + AnonymousGID *float64 `json:"anonymousGid,omitempty" tf:"anonymous_gid,omitempty"` + + // The anonymous UID used when root_squash_enabled is true. + AnonymousUID *float64 `json:"anonymousUid,omitempty" tf:"anonymous_uid,omitempty"` + + // The filter applied to the scope for this rule. The filter's format depends on its scope: default scope matches all clients and has no filter value; network scope takes a CIDR format; host takes an IP address or fully qualified domain name. If a client does not match any filter rule and there is no default rule, access is denied. + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` + + // Whether to enable root squash? + RootSquashEnabled *bool `json:"rootSquashEnabled,omitempty" tf:"root_squash_enabled,omitempty"` + + // The scope of this rule. The scope and (potentially) the filter determine which clients match the rule. Possible values are: default, network, host. + Scope *string `json:"scope,omitempty" tf:"scope,omitempty"` + + // Whether allow access to subdirectories under the root export? + SubmountAccessEnabled *bool `json:"submountAccessEnabled,omitempty" tf:"submount_access_enabled,omitempty"` + + // Whether SUID is allowed? + SuidEnabled *bool `json:"suidEnabled,omitempty" tf:"suid_enabled,omitempty"` +} + +type AccessRuleParameters struct { + + // The access level for this rule. Possible values are: rw, ro, no. + // +kubebuilder:validation:Optional + Access *string `json:"access" tf:"access,omitempty"` + + // The anonymous GID used when root_squash_enabled is true. + // +kubebuilder:validation:Optional + AnonymousGID *float64 `json:"anonymousGid,omitempty" tf:"anonymous_gid,omitempty"` + + // The anonymous UID used when root_squash_enabled is true. + // +kubebuilder:validation:Optional + AnonymousUID *float64 `json:"anonymousUid,omitempty" tf:"anonymous_uid,omitempty"` + + // The filter applied to the scope for this rule. The filter's format depends on its scope: default scope matches all clients and has no filter value; network scope takes a CIDR format; host takes an IP address or fully qualified domain name. If a client does not match any filter rule and there is no default rule, access is denied. + // +kubebuilder:validation:Optional + Filter *string `json:"filter,omitempty" tf:"filter,omitempty"` + + // Whether to enable root squash? + // +kubebuilder:validation:Optional + RootSquashEnabled *bool `json:"rootSquashEnabled,omitempty" tf:"root_squash_enabled,omitempty"` + + // The scope of this rule. The scope and (potentially) the filter determine which clients match the rule. Possible values are: default, network, host. + // +kubebuilder:validation:Optional + Scope *string `json:"scope" tf:"scope,omitempty"` + + // Whether allow access to subdirectories under the root export? + // +kubebuilder:validation:Optional + SubmountAccessEnabled *bool `json:"submountAccessEnabled,omitempty" tf:"submount_access_enabled,omitempty"` + + // Whether SUID is allowed? + // +kubebuilder:validation:Optional + SuidEnabled *bool `json:"suidEnabled,omitempty" tf:"suid_enabled,omitempty"` +} + +type BindInitParameters struct { + + // The Bind Distinguished Name (DN) identity to be used in the secure LDAP connection. + Dn *string `json:"dn,omitempty" tf:"dn,omitempty"` +} + +type BindObservation struct { + + // The Bind Distinguished Name (DN) identity to be used in the secure LDAP connection. + Dn *string `json:"dn,omitempty" tf:"dn,omitempty"` +} + +type BindParameters struct { + + // The Bind Distinguished Name (DN) identity to be used in the secure LDAP connection. + // +kubebuilder:validation:Optional + Dn *string `json:"dn" tf:"dn,omitempty"` + + // The password of the Active Directory domain administrator. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` +} + +type DNSInitParameters struct { + + // The DNS search domain for the HPC Cache. + SearchDomain *string `json:"searchDomain,omitempty" tf:"search_domain,omitempty"` + + // A list of DNS servers for the HPC Cache. At most three IP(s) are allowed to set. + Servers []*string `json:"servers,omitempty" tf:"servers,omitempty"` +} + +type DNSObservation struct { + + // The DNS search domain for the HPC Cache. + SearchDomain *string `json:"searchDomain,omitempty" tf:"search_domain,omitempty"` + + // A list of DNS servers for the HPC Cache. At most three IP(s) are allowed to set. + Servers []*string `json:"servers,omitempty" tf:"servers,omitempty"` +} + +type DNSParameters struct { + + // The DNS search domain for the HPC Cache. + // +kubebuilder:validation:Optional + SearchDomain *string `json:"searchDomain,omitempty" tf:"search_domain,omitempty"` + + // A list of DNS servers for the HPC Cache. At most three IP(s) are allowed to set. + // +kubebuilder:validation:Optional + Servers []*string `json:"servers" tf:"servers,omitempty"` +} + +type DefaultAccessPolicyInitParameters struct { + + // One or more access_rule blocks (up to three) as defined above. + AccessRule []AccessRuleInitParameters `json:"accessRule,omitempty" tf:"access_rule,omitempty"` +} + +type DefaultAccessPolicyObservation struct { + + // One or more access_rule blocks (up to three) as defined above. + AccessRule []AccessRuleObservation `json:"accessRule,omitempty" tf:"access_rule,omitempty"` +} + +type DefaultAccessPolicyParameters struct { + + // One or more access_rule blocks (up to three) as defined above. + // +kubebuilder:validation:Optional + AccessRule []AccessRuleParameters `json:"accessRule" tf:"access_rule,omitempty"` +} + +type DirectoryActiveDirectoryInitParameters struct { + + // The NetBIOS name to assign to the HPC Cache when it joins the Active Directory domain as a server. + CacheNetbiosName *string `json:"cacheNetbiosName,omitempty" tf:"cache_netbios_name,omitempty"` + + // The primary DNS IP address used to resolve the Active Directory domain controller's FQDN. + DNSPrimaryIP *string `json:"dnsPrimaryIp,omitempty" tf:"dns_primary_ip,omitempty"` + + // The secondary DNS IP address used to resolve the Active Directory domain controller's FQDN. + DNSSecondaryIP *string `json:"dnsSecondaryIp,omitempty" tf:"dns_secondary_ip,omitempty"` + + // The fully qualified domain name of the Active Directory domain controller. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The Active Directory domain's NetBIOS name. + DomainNetbiosName *string `json:"domainNetbiosName,omitempty" tf:"domain_netbios_name,omitempty"` + + // The username of the Active Directory domain administrator. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type DirectoryActiveDirectoryObservation struct { + + // The NetBIOS name to assign to the HPC Cache when it joins the Active Directory domain as a server. + CacheNetbiosName *string `json:"cacheNetbiosName,omitempty" tf:"cache_netbios_name,omitempty"` + + // The primary DNS IP address used to resolve the Active Directory domain controller's FQDN. + DNSPrimaryIP *string `json:"dnsPrimaryIp,omitempty" tf:"dns_primary_ip,omitempty"` + + // The secondary DNS IP address used to resolve the Active Directory domain controller's FQDN. + DNSSecondaryIP *string `json:"dnsSecondaryIp,omitempty" tf:"dns_secondary_ip,omitempty"` + + // The fully qualified domain name of the Active Directory domain controller. + DomainName *string `json:"domainName,omitempty" tf:"domain_name,omitempty"` + + // The Active Directory domain's NetBIOS name. + DomainNetbiosName *string `json:"domainNetbiosName,omitempty" tf:"domain_netbios_name,omitempty"` + + // The username of the Active Directory domain administrator. + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type DirectoryActiveDirectoryParameters struct { + + // The NetBIOS name to assign to the HPC Cache when it joins the Active Directory domain as a server. + // +kubebuilder:validation:Optional + CacheNetbiosName *string `json:"cacheNetbiosName" tf:"cache_netbios_name,omitempty"` + + // The primary DNS IP address used to resolve the Active Directory domain controller's FQDN. + // +kubebuilder:validation:Optional + DNSPrimaryIP *string `json:"dnsPrimaryIp" tf:"dns_primary_ip,omitempty"` + + // The secondary DNS IP address used to resolve the Active Directory domain controller's FQDN. + // +kubebuilder:validation:Optional + DNSSecondaryIP *string `json:"dnsSecondaryIp,omitempty" tf:"dns_secondary_ip,omitempty"` + + // The fully qualified domain name of the Active Directory domain controller. + // +kubebuilder:validation:Optional + DomainName *string `json:"domainName" tf:"domain_name,omitempty"` + + // The Active Directory domain's NetBIOS name. + // +kubebuilder:validation:Optional + DomainNetbiosName *string `json:"domainNetbiosName" tf:"domain_netbios_name,omitempty"` + + // The password of the Active Directory domain administrator. + // +kubebuilder:validation:Required + PasswordSecretRef v1.SecretKeySelector `json:"passwordSecretRef" tf:"-"` + + // The username of the Active Directory domain administrator. + // +kubebuilder:validation:Optional + Username *string `json:"username" tf:"username,omitempty"` +} + +type DirectoryFlatFileInitParameters struct { + + // The URI of the file containing group information (/etc/group file format in Unix-like OS). + GroupFileURI *string `json:"groupFileUri,omitempty" tf:"group_file_uri,omitempty"` + + // The URI of the file containing user information (/etc/passwd file format in Unix-like OS). + PasswordFileURI *string `json:"passwordFileUri,omitempty" tf:"password_file_uri,omitempty"` +} + +type DirectoryFlatFileObservation struct { + + // The URI of the file containing group information (/etc/group file format in Unix-like OS). + GroupFileURI *string `json:"groupFileUri,omitempty" tf:"group_file_uri,omitempty"` + + // The URI of the file containing user information (/etc/passwd file format in Unix-like OS). + PasswordFileURI *string `json:"passwordFileUri,omitempty" tf:"password_file_uri,omitempty"` +} + +type DirectoryFlatFileParameters struct { + + // The URI of the file containing group information (/etc/group file format in Unix-like OS). + // +kubebuilder:validation:Optional + GroupFileURI *string `json:"groupFileUri" tf:"group_file_uri,omitempty"` + + // The URI of the file containing user information (/etc/passwd file format in Unix-like OS). + // +kubebuilder:validation:Optional + PasswordFileURI *string `json:"passwordFileUri" tf:"password_file_uri,omitempty"` +} + +type DirectoryLdapInitParameters struct { + + // The base distinguished name (DN) for the LDAP domain. + BaseDn *string `json:"baseDn,omitempty" tf:"base_dn,omitempty"` + + // A bind block as defined above. + Bind *BindInitParameters `json:"bind,omitempty" tf:"bind,omitempty"` + + // The URI of the CA certificate to validate the LDAP secure connection. + CertificateValidationURI *string `json:"certificateValidationUri,omitempty" tf:"certificate_validation_uri,omitempty"` + + // Whether the certificate should be automatically downloaded. This can be set to true only when certificate_validation_uri is provided. + DownloadCertificateAutomatically *bool `json:"downloadCertificateAutomatically,omitempty" tf:"download_certificate_automatically,omitempty"` + + // Whether the LDAP connection should be encrypted? + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The FQDN or IP address of the LDAP server. + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type DirectoryLdapObservation struct { + + // The base distinguished name (DN) for the LDAP domain. + BaseDn *string `json:"baseDn,omitempty" tf:"base_dn,omitempty"` + + // A bind block as defined above. + Bind *BindObservation `json:"bind,omitempty" tf:"bind,omitempty"` + + // The URI of the CA certificate to validate the LDAP secure connection. + CertificateValidationURI *string `json:"certificateValidationUri,omitempty" tf:"certificate_validation_uri,omitempty"` + + // Whether the certificate should be automatically downloaded. This can be set to true only when certificate_validation_uri is provided. + DownloadCertificateAutomatically *bool `json:"downloadCertificateAutomatically,omitempty" tf:"download_certificate_automatically,omitempty"` + + // Whether the LDAP connection should be encrypted? + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The FQDN or IP address of the LDAP server. + Server *string `json:"server,omitempty" tf:"server,omitempty"` +} + +type DirectoryLdapParameters struct { + + // The base distinguished name (DN) for the LDAP domain. + // +kubebuilder:validation:Optional + BaseDn *string `json:"baseDn" tf:"base_dn,omitempty"` + + // A bind block as defined above. + // +kubebuilder:validation:Optional + Bind *BindParameters `json:"bind,omitempty" tf:"bind,omitempty"` + + // The URI of the CA certificate to validate the LDAP secure connection. + // +kubebuilder:validation:Optional + CertificateValidationURI *string `json:"certificateValidationUri,omitempty" tf:"certificate_validation_uri,omitempty"` + + // Whether the certificate should be automatically downloaded. This can be set to true only when certificate_validation_uri is provided. + // +kubebuilder:validation:Optional + DownloadCertificateAutomatically *bool `json:"downloadCertificateAutomatically,omitempty" tf:"download_certificate_automatically,omitempty"` + + // Whether the LDAP connection should be encrypted? + // +kubebuilder:validation:Optional + Encrypted *bool `json:"encrypted,omitempty" tf:"encrypted,omitempty"` + + // The FQDN or IP address of the LDAP server. + // +kubebuilder:validation:Optional + Server *string `json:"server" tf:"server,omitempty"` +} + +type HPCCacheInitParameters struct { + + // Specifies whether the HPC Cache automatically rotates Encryption Key to the latest version. + AutomaticallyRotateKeyToLatestEnabled *bool `json:"automaticallyRotateKeyToLatestEnabled,omitempty" tf:"automatically_rotate_key_to_latest_enabled,omitempty"` + + // The size of the HPC Cache, in GB. Possible values are 3072, 6144, 12288, 21623, 24576, 43246, 49152 and 86491. Changing this forces a new resource to be created. + CacheSizeInGb *float64 `json:"cacheSizeInGb,omitempty" tf:"cache_size_in_gb,omitempty"` + + // A dns block as defined below. + DNS *DNSInitParameters `json:"dns,omitempty" tf:"dns,omitempty"` + + // A default_access_policy block as defined below. + DefaultAccessPolicy *DefaultAccessPolicyInitParameters `json:"defaultAccessPolicy,omitempty" tf:"default_access_policy,omitempty"` + + // A directory_active_directory block as defined below. + DirectoryActiveDirectory *DirectoryActiveDirectoryInitParameters `json:"directoryActiveDirectory,omitempty" tf:"directory_active_directory,omitempty"` + + // A directory_flat_file block as defined below. + DirectoryFlatFile *DirectoryFlatFileInitParameters `json:"directoryFlatFile,omitempty" tf:"directory_flat_file,omitempty"` + + // A directory_ldap block as defined below. + DirectoryLdap *DirectoryLdapInitParameters `json:"directoryLdap,omitempty" tf:"directory_ldap,omitempty"` + + // An identity block as defined below. Changing this forces a new resource to be created. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The ID of the Key Vault Key which should be used to encrypt the data in this HPC Cache. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the supported Azure Region where the HPC Cache should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The IPv4 maximum transmission unit configured for the subnet of the HPC Cache. Possible values range from 576 - 1500. Defaults to 1500. + Mtu *float64 `json:"mtu,omitempty" tf:"mtu,omitempty"` + + // The NTP server IP Address or FQDN for the HPC Cache. Defaults to time.windows.com. + NtpServer *string `json:"ntpServer,omitempty" tf:"ntp_server,omitempty"` + + // The SKU of HPC Cache to use. Possible values are (ReadWrite) - Standard_2G, Standard_4G Standard_8G or (ReadOnly) - Standard_L4_5G, Standard_L9G, and Standard_L16G. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of the Subnet for the HPC Cache. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the HPC Cache. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type HPCCacheObservation struct { + + // Specifies whether the HPC Cache automatically rotates Encryption Key to the latest version. + AutomaticallyRotateKeyToLatestEnabled *bool `json:"automaticallyRotateKeyToLatestEnabled,omitempty" tf:"automatically_rotate_key_to_latest_enabled,omitempty"` + + // The size of the HPC Cache, in GB. Possible values are 3072, 6144, 12288, 21623, 24576, 43246, 49152 and 86491. Changing this forces a new resource to be created. + CacheSizeInGb *float64 `json:"cacheSizeInGb,omitempty" tf:"cache_size_in_gb,omitempty"` + + // A dns block as defined below. + DNS *DNSObservation `json:"dns,omitempty" tf:"dns,omitempty"` + + // A default_access_policy block as defined below. + DefaultAccessPolicy *DefaultAccessPolicyObservation `json:"defaultAccessPolicy,omitempty" tf:"default_access_policy,omitempty"` + + // A directory_active_directory block as defined below. + DirectoryActiveDirectory *DirectoryActiveDirectoryObservation `json:"directoryActiveDirectory,omitempty" tf:"directory_active_directory,omitempty"` + + // A directory_flat_file block as defined below. + DirectoryFlatFile *DirectoryFlatFileObservation `json:"directoryFlatFile,omitempty" tf:"directory_flat_file,omitempty"` + + // A directory_ldap block as defined below. + DirectoryLdap *DirectoryLdapObservation `json:"directoryLdap,omitempty" tf:"directory_ldap,omitempty"` + + // The id of the HPC Cache. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. Changing this forces a new resource to be created. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The ID of the Key Vault Key which should be used to encrypt the data in this HPC Cache. + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the supported Azure Region where the HPC Cache should be created. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of IP Addresses where the HPC Cache can be mounted. + MountAddresses []*string `json:"mountAddresses,omitempty" tf:"mount_addresses,omitempty"` + + // The IPv4 maximum transmission unit configured for the subnet of the HPC Cache. Possible values range from 576 - 1500. Defaults to 1500. + Mtu *float64 `json:"mtu,omitempty" tf:"mtu,omitempty"` + + // The NTP server IP Address or FQDN for the HPC Cache. Defaults to time.windows.com. + NtpServer *string `json:"ntpServer,omitempty" tf:"ntp_server,omitempty"` + + // The name of the Resource Group in which to create the HPC Cache. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The SKU of HPC Cache to use. Possible values are (ReadWrite) - Standard_2G, Standard_4G Standard_8G or (ReadOnly) - Standard_L4_5G, Standard_L9G, and Standard_L16G. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of the Subnet for the HPC Cache. Changing this forces a new resource to be created. + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // A mapping of tags to assign to the HPC Cache. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type HPCCacheParameters struct { + + // Specifies whether the HPC Cache automatically rotates Encryption Key to the latest version. + // +kubebuilder:validation:Optional + AutomaticallyRotateKeyToLatestEnabled *bool `json:"automaticallyRotateKeyToLatestEnabled,omitempty" tf:"automatically_rotate_key_to_latest_enabled,omitempty"` + + // The size of the HPC Cache, in GB. Possible values are 3072, 6144, 12288, 21623, 24576, 43246, 49152 and 86491. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + CacheSizeInGb *float64 `json:"cacheSizeInGb,omitempty" tf:"cache_size_in_gb,omitempty"` + + // A dns block as defined below. + // +kubebuilder:validation:Optional + DNS *DNSParameters `json:"dns,omitempty" tf:"dns,omitempty"` + + // A default_access_policy block as defined below. + // +kubebuilder:validation:Optional + DefaultAccessPolicy *DefaultAccessPolicyParameters `json:"defaultAccessPolicy,omitempty" tf:"default_access_policy,omitempty"` + + // A directory_active_directory block as defined below. + // +kubebuilder:validation:Optional + DirectoryActiveDirectory *DirectoryActiveDirectoryParameters `json:"directoryActiveDirectory,omitempty" tf:"directory_active_directory,omitempty"` + + // A directory_flat_file block as defined below. + // +kubebuilder:validation:Optional + DirectoryFlatFile *DirectoryFlatFileParameters `json:"directoryFlatFile,omitempty" tf:"directory_flat_file,omitempty"` + + // A directory_ldap block as defined below. + // +kubebuilder:validation:Optional + DirectoryLdap *DirectoryLdapParameters `json:"directoryLdap,omitempty" tf:"directory_ldap,omitempty"` + + // An identity block as defined below. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The ID of the Key Vault Key which should be used to encrypt the data in this HPC Cache. + // +kubebuilder:validation:Optional + KeyVaultKeyID *string `json:"keyVaultKeyId,omitempty" tf:"key_vault_key_id,omitempty"` + + // Specifies the supported Azure Region where the HPC Cache should be created. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The IPv4 maximum transmission unit configured for the subnet of the HPC Cache. Possible values range from 576 - 1500. Defaults to 1500. + // +kubebuilder:validation:Optional + Mtu *float64 `json:"mtu,omitempty" tf:"mtu,omitempty"` + + // The NTP server IP Address or FQDN for the HPC Cache. Defaults to time.windows.com. + // +kubebuilder:validation:Optional + NtpServer *string `json:"ntpServer,omitempty" tf:"ntp_server,omitempty"` + + // The name of the Resource Group in which to create the HPC Cache. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The SKU of HPC Cache to use. Possible values are (ReadWrite) - Standard_2G, Standard_4G Standard_8G or (ReadOnly) - Standard_L4_5G, Standard_L9G, and Standard_L16G. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of the Subnet for the HPC Cache. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` + + // Reference to a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDRef *v1.Reference `json:"subnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate subnetId. + // +kubebuilder:validation:Optional + SubnetIDSelector *v1.Selector `json:"subnetIdSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the HPC Cache. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this HPC Cache. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this HPC Cache. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this HPC Cache. Changing this forces a new resource to be created. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this HPC Cache. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this HPC Cache. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this HPC Cache. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// HPCCacheSpec defines the desired state of HPCCache +type HPCCacheSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider HPCCacheParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider HPCCacheInitParameters `json:"initProvider,omitempty"` +} + +// HPCCacheStatus defines the observed state of HPCCache. +type HPCCacheStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider HPCCacheObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// HPCCache is the Schema for the HPCCaches API. Manages a HPC Cache. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type HPCCache struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.cacheSizeInGb) || (has(self.initProvider) && has(self.initProvider.cacheSizeInGb))",message="spec.forProvider.cacheSizeInGb is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + Spec HPCCacheSpec `json:"spec"` + Status HPCCacheStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HPCCacheList contains a list of HPCCaches +type HPCCacheList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HPCCache `json:"items"` +} + +// Repository type metadata. +var ( + HPCCache_Kind = "HPCCache" + HPCCache_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: HPCCache_Kind}.String() + HPCCache_KindAPIVersion = HPCCache_Kind + "." + CRDGroupVersion.String() + HPCCache_GroupVersionKind = CRDGroupVersion.WithKind(HPCCache_Kind) +) + +func init() { + SchemeBuilder.Register(&HPCCache{}, &HPCCacheList{}) +} diff --git a/apis/storagepool/v1beta1/zz_diskpool_types.go b/apis/storagepool/v1beta1/zz_diskpool_types.go index 4906b949b..5f43fd89e 100755 --- a/apis/storagepool/v1beta1/zz_diskpool_types.go +++ b/apis/storagepool/v1beta1/zz_diskpool_types.go @@ -22,7 +22,7 @@ type DiskPoolInitParameters struct { SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` // The ID of the Subnet where the Disk Pool should be created. Changing this forces a new Disk Pool to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` @@ -93,7 +93,7 @@ type DiskPoolParameters struct { SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` // The ID of the Subnet where the Disk Pool should be created. Changing this forces a new Disk Pool to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta1.Subnet + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SubnetID *string `json:"subnetId,omitempty" tf:"subnet_id,omitempty"` diff --git a/apis/storagepool/v1beta1/zz_generated.resolvers.go b/apis/storagepool/v1beta1/zz_generated.resolvers.go index 158883742..6357c54be 100644 --- a/apis/storagepool/v1beta1/zz_generated.resolvers.go +++ b/apis/storagepool/v1beta1/zz_generated.resolvers.go @@ -46,7 +46,7 @@ func (mg *DiskPool) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -65,7 +65,7 @@ func (mg *DiskPool) ResolveReferences(ctx context.Context, c client.Reader) erro mg.Spec.ForProvider.SubnetID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SubnetIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta1", "Subnet", "SubnetList") + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/streamanalytics/v1beta1/zz_generated.conversion_hubs.go b/apis/streamanalytics/v1beta1/zz_generated.conversion_hubs.go index fba4498b9..d38f9932e 100755 --- a/apis/streamanalytics/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/streamanalytics/v1beta1/zz_generated.conversion_hubs.go @@ -9,21 +9,9 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *Cluster) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FunctionJavascriptUda) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *Job) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ManagedPrivateEndpoint) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *OutputBlob) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *OutputEventHub) Hub() {} - // Hub marks this type as a conversion hub. func (tr *OutputFunction) Hub() {} @@ -33,29 +21,11 @@ func (tr *OutputMSSQL) Hub() {} // Hub marks this type as a conversion hub. func (tr *OutputPowerBI) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *OutputServiceBusQueue) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *OutputServiceBusTopic) Hub() {} - // Hub marks this type as a conversion hub. func (tr *OutputSynapse) Hub() {} // Hub marks this type as a conversion hub. func (tr *OutputTable) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *ReferenceInputBlob) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ReferenceInputMSSQL) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StreamInputBlob) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StreamInputEventHub) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StreamInputIOTHub) Hub() {} diff --git a/apis/streamanalytics/v1beta1/zz_generated.conversion_spokes.go b/apis/streamanalytics/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..651ede47d --- /dev/null +++ b/apis/streamanalytics/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,214 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this FunctionJavascriptUda to the hub type. +func (tr *FunctionJavascriptUda) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FunctionJavascriptUda type. +func (tr *FunctionJavascriptUda) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Job to the hub type. +func (tr *Job) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Job type. +func (tr *Job) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this OutputBlob to the hub type. +func (tr *OutputBlob) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the OutputBlob type. +func (tr *OutputBlob) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this OutputEventHub to the hub type. +func (tr *OutputEventHub) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the OutputEventHub type. +func (tr *OutputEventHub) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this OutputServiceBusQueue to the hub type. +func (tr *OutputServiceBusQueue) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the OutputServiceBusQueue type. +func (tr *OutputServiceBusQueue) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this OutputServiceBusTopic to the hub type. +func (tr *OutputServiceBusTopic) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the OutputServiceBusTopic type. +func (tr *OutputServiceBusTopic) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this ReferenceInputBlob to the hub type. +func (tr *ReferenceInputBlob) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the ReferenceInputBlob type. +func (tr *ReferenceInputBlob) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StreamInputBlob to the hub type. +func (tr *StreamInputBlob) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StreamInputBlob type. +func (tr *StreamInputBlob) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StreamInputEventHub to the hub type. +func (tr *StreamInputEventHub) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StreamInputEventHub type. +func (tr *StreamInputEventHub) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StreamInputIOTHub to the hub type. +func (tr *StreamInputIOTHub) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StreamInputIOTHub type. +func (tr *StreamInputIOTHub) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/streamanalytics/v1beta1/zz_generated.resolvers.go b/apis/streamanalytics/v1beta1/zz_generated.resolvers.go index 0e0e588f0..4939803c2 100644 --- a/apis/streamanalytics/v1beta1/zz_generated.resolvers.go +++ b/apis/streamanalytics/v1beta1/zz_generated.resolvers.go @@ -179,7 +179,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.StreamAnalyticsClusterName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StreamAnalyticsClusterNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -198,7 +198,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -481,7 +481,7 @@ func (mg *OutputFunction) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "FunctionApp", "FunctionAppList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "FunctionApp", "FunctionAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -519,7 +519,7 @@ func (mg *OutputFunction) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta1", "Job", "JobList") + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -538,7 +538,7 @@ func (mg *OutputFunction) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "FunctionApp", "FunctionAppList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "FunctionApp", "FunctionAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -557,7 +557,7 @@ func (mg *OutputFunction) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.InitProvider.FunctionApp = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.FunctionAppRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta1", "Job", "JobList") + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -607,7 +607,7 @@ func (mg *OutputMSSQL) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -626,7 +626,7 @@ func (mg *OutputMSSQL) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.Server = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta1", "Job", "JobList") + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -683,7 +683,7 @@ func (mg *OutputMSSQL) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -702,7 +702,7 @@ func (mg *OutputMSSQL) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.InitProvider.Server = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ServerRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta1", "Job", "JobList") + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -752,7 +752,7 @@ func (mg *OutputPowerBI) ResolveReferences(ctx context.Context, c client.Reader) var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta1", "Job", "JobList") + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -771,7 +771,7 @@ func (mg *OutputPowerBI) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.StreamAnalyticsJobID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StreamAnalyticsJobIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta1", "Job", "JobList") + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1149,7 +1149,7 @@ func (mg *OutputSynapse) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta1", "Job", "JobList") + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1168,7 +1168,7 @@ func (mg *OutputSynapse) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1187,7 +1187,7 @@ func (mg *OutputSynapse) ResolveReferences(ctx context.Context, c client.Reader) mg.Spec.ForProvider.User = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.UserRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1237,7 +1237,7 @@ func (mg *OutputTable) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1275,7 +1275,7 @@ func (mg *OutputTable) ResolveReferences(ctx context.Context, c client.Reader) e mg.Spec.ForProvider.Table = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TableRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1489,7 +1489,7 @@ func (mg *ReferenceInputMSSQL) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLDatabase", "MSSQLDatabaseList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1527,7 +1527,7 @@ func (mg *ReferenceInputMSSQL) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1546,7 +1546,7 @@ func (mg *ReferenceInputMSSQL) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.Server = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ServerRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLDatabase", "MSSQLDatabaseList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLDatabase", "MSSQLDatabaseList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1565,7 +1565,7 @@ func (mg *ReferenceInputMSSQL) ResolveReferences(ctx context.Context, c client.R mg.Spec.InitProvider.Database = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.DatabaseRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta1", "MSSQLServer", "MSSQLServerList") + m, l, err = apisresolver.GetManagedResource("sql.azure.upbound.io", "v1beta2", "MSSQLServer", "MSSQLServerList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/streamanalytics/v1beta1/zz_managedprivateendpoint_types.go b/apis/streamanalytics/v1beta1/zz_managedprivateendpoint_types.go index 469fd441e..8342a4c81 100755 --- a/apis/streamanalytics/v1beta1/zz_managedprivateendpoint_types.go +++ b/apis/streamanalytics/v1beta1/zz_managedprivateendpoint_types.go @@ -19,7 +19,7 @@ type ManagedPrivateEndpointInitParameters struct { SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` // The ID of the Private Link Enabled Remote Resource which this Stream Analytics Private endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` @@ -83,7 +83,7 @@ type ManagedPrivateEndpointParameters struct { SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` // The ID of the Private Link Enabled Remote Resource which this Stream Analytics Private endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` diff --git a/apis/streamanalytics/v1beta1/zz_outputfunction_types.go b/apis/streamanalytics/v1beta1/zz_outputfunction_types.go index eb059c8a9..5068323ff 100755 --- a/apis/streamanalytics/v1beta1/zz_outputfunction_types.go +++ b/apis/streamanalytics/v1beta1/zz_outputfunction_types.go @@ -22,7 +22,7 @@ type OutputFunctionInitParameters struct { BatchMaxInBytes *float64 `json:"batchMaxInBytes,omitempty" tf:"batch_max_in_bytes,omitempty"` // The name of the Function App. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.FunctionApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.FunctionApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) FunctionApp *string `json:"functionApp,omitempty" tf:"function_app,omitempty"` @@ -38,7 +38,7 @@ type OutputFunctionInitParameters struct { FunctionName *string `json:"functionName,omitempty" tf:"function_name,omitempty"` // The name of the Stream Analytics Job. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta1.Job + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. @@ -89,7 +89,7 @@ type OutputFunctionParameters struct { BatchMaxInBytes *float64 `json:"batchMaxInBytes,omitempty" tf:"batch_max_in_bytes,omitempty"` // The name of the Function App. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.FunctionApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.FunctionApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) // +kubebuilder:validation:Optional FunctionApp *string `json:"functionApp,omitempty" tf:"function_app,omitempty"` @@ -120,7 +120,7 @@ type OutputFunctionParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The name of the Stream Analytics Job. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta1.Job + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job // +kubebuilder:validation:Optional StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` diff --git a/apis/streamanalytics/v1beta1/zz_outputmssql_types.go b/apis/streamanalytics/v1beta1/zz_outputmssql_types.go index 72f0c911c..1bd4254a9 100755 --- a/apis/streamanalytics/v1beta1/zz_outputmssql_types.go +++ b/apis/streamanalytics/v1beta1/zz_outputmssql_types.go @@ -43,7 +43,7 @@ type OutputMSSQLInitParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The SQL server url. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer Server *string `json:"server,omitempty" tf:"server,omitempty"` // Reference to a MSSQLServer in sql to populate server. @@ -55,7 +55,7 @@ type OutputMSSQLInitParameters struct { ServerSelector *v1.Selector `json:"serverSelector,omitempty" tf:"-"` // The name of the Stream Analytics Job. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta1.Job + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. @@ -158,7 +158,7 @@ type OutputMSSQLParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The SQL server url. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +kubebuilder:validation:Optional Server *string `json:"server,omitempty" tf:"server,omitempty"` @@ -171,7 +171,7 @@ type OutputMSSQLParameters struct { ServerSelector *v1.Selector `json:"serverSelector,omitempty" tf:"-"` // The name of the Stream Analytics Job. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta1.Job + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job // +kubebuilder:validation:Optional StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` diff --git a/apis/streamanalytics/v1beta1/zz_outputpowerbi_types.go b/apis/streamanalytics/v1beta1/zz_outputpowerbi_types.go index 5b24bc927..ab764a3d0 100755 --- a/apis/streamanalytics/v1beta1/zz_outputpowerbi_types.go +++ b/apis/streamanalytics/v1beta1/zz_outputpowerbi_types.go @@ -25,7 +25,7 @@ type OutputPowerBIInitParameters struct { GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` // The ID of the Stream Analytics Job. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta1.Job + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() StreamAnalyticsJobID *string `json:"streamAnalyticsJobId,omitempty" tf:"stream_analytics_job_id,omitempty"` @@ -88,7 +88,7 @@ type OutputPowerBIParameters struct { GroupName *string `json:"groupName,omitempty" tf:"group_name,omitempty"` // The ID of the Stream Analytics Job. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta1.Job + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional StreamAnalyticsJobID *string `json:"streamAnalyticsJobId,omitempty" tf:"stream_analytics_job_id,omitempty"` diff --git a/apis/streamanalytics/v1beta1/zz_outputsynapse_types.go b/apis/streamanalytics/v1beta1/zz_outputsynapse_types.go index 28dfe384a..ae2176b31 100755 --- a/apis/streamanalytics/v1beta1/zz_outputsynapse_types.go +++ b/apis/streamanalytics/v1beta1/zz_outputsynapse_types.go @@ -25,7 +25,7 @@ type OutputSynapseInitParameters struct { Table *string `json:"table,omitempty" tf:"table,omitempty"` // The user name that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("sql_administrator_login",false) User *string `json:"user,omitempty" tf:"user,omitempty"` @@ -90,7 +90,7 @@ type OutputSynapseParameters struct { Server *string `json:"server,omitempty" tf:"server,omitempty"` // The name of the Stream Analytics Job. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta1.Job + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job // +kubebuilder:validation:Optional StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` @@ -107,7 +107,7 @@ type OutputSynapseParameters struct { Table *string `json:"table,omitempty" tf:"table,omitempty"` // The user name that will be used to connect to the Azure SQL database. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("sql_administrator_login",false) // +kubebuilder:validation:Optional User *string `json:"user,omitempty" tf:"user,omitempty"` diff --git a/apis/streamanalytics/v1beta1/zz_outputtable_types.go b/apis/streamanalytics/v1beta1/zz_outputtable_types.go index e07666c23..8d1cdf1eb 100755 --- a/apis/streamanalytics/v1beta1/zz_outputtable_types.go +++ b/apis/streamanalytics/v1beta1/zz_outputtable_types.go @@ -28,7 +28,7 @@ type OutputTableInitParameters struct { RowKey *string `json:"rowKey,omitempty" tf:"row_key,omitempty"` // The name of the Storage Account. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` // Reference to a Account in storage to populate storageAccountName. @@ -119,7 +119,7 @@ type OutputTableParameters struct { StorageAccountKeySecretRef v1.SecretKeySelector `json:"storageAccountKeySecretRef" tf:"-"` // The name of the Storage Account. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +kubebuilder:validation:Optional StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` diff --git a/apis/streamanalytics/v1beta1/zz_referenceinputmssql_types.go b/apis/streamanalytics/v1beta1/zz_referenceinputmssql_types.go index 530082c59..1ae402333 100755 --- a/apis/streamanalytics/v1beta1/zz_referenceinputmssql_types.go +++ b/apis/streamanalytics/v1beta1/zz_referenceinputmssql_types.go @@ -16,7 +16,7 @@ import ( type ReferenceInputMSSQLInitParameters struct { // The MS SQL database name where the reference data exists. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase Database *string `json:"database,omitempty" tf:"database,omitempty"` // Reference to a MSSQLDatabase in sql to populate database. @@ -40,7 +40,7 @@ type ReferenceInputMSSQLInitParameters struct { RefreshType *string `json:"refreshType,omitempty" tf:"refresh_type,omitempty"` // The fully qualified domain name of the MS SQL server. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("fully_qualified_domain_name",true) Server *string `json:"server,omitempty" tf:"server,omitempty"` @@ -98,7 +98,7 @@ type ReferenceInputMSSQLObservation struct { type ReferenceInputMSSQLParameters struct { // The MS SQL database name where the reference data exists. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLDatabase + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLDatabase // +kubebuilder:validation:Optional Database *string `json:"database,omitempty" tf:"database,omitempty"` @@ -144,7 +144,7 @@ type ReferenceInputMSSQLParameters struct { ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` // The fully qualified domain name of the MS SQL server. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta1.MSSQLServer + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/sql/v1beta2.MSSQLServer // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("fully_qualified_domain_name",true) // +kubebuilder:validation:Optional Server *string `json:"server,omitempty" tf:"server,omitempty"` diff --git a/apis/streamanalytics/v1beta2/zz_functionjavascriptuda_terraformed.go b/apis/streamanalytics/v1beta2/zz_functionjavascriptuda_terraformed.go new file mode 100755 index 000000000..d5c6f8a15 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_functionjavascriptuda_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FunctionJavascriptUda +func (mg *FunctionJavascriptUda) GetTerraformResourceType() string { + return "azurerm_stream_analytics_function_javascript_uda" +} + +// GetConnectionDetailsMapping for this FunctionJavascriptUda +func (tr *FunctionJavascriptUda) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this FunctionJavascriptUda +func (tr *FunctionJavascriptUda) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FunctionJavascriptUda +func (tr *FunctionJavascriptUda) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FunctionJavascriptUda +func (tr *FunctionJavascriptUda) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FunctionJavascriptUda +func (tr *FunctionJavascriptUda) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FunctionJavascriptUda +func (tr *FunctionJavascriptUda) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FunctionJavascriptUda +func (tr *FunctionJavascriptUda) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FunctionJavascriptUda +func (tr *FunctionJavascriptUda) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FunctionJavascriptUda using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FunctionJavascriptUda) LateInitialize(attrs []byte) (bool, error) { + params := &FunctionJavascriptUdaParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FunctionJavascriptUda) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_functionjavascriptuda_types.go b/apis/streamanalytics/v1beta2/zz_functionjavascriptuda_types.go new file mode 100755 index 000000000..89de477f5 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_functionjavascriptuda_types.go @@ -0,0 +1,197 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type FunctionJavascriptUdaInitParameters struct { + + // One or more input blocks as defined below. + Input []InputInitParameters `json:"input,omitempty" tf:"input,omitempty"` + + // An output block as defined below. + Output *OutputInitParameters `json:"output,omitempty" tf:"output,omitempty"` + + // The JavaScript of this UDA Function. + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + // The resource ID of the Stream Analytics Job where this Function should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + StreamAnalyticsJobID *string `json:"streamAnalyticsJobId,omitempty" tf:"stream_analytics_job_id,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobId. + // +kubebuilder:validation:Optional + StreamAnalyticsJobIDRef *v1.Reference `json:"streamAnalyticsJobIdRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobId. + // +kubebuilder:validation:Optional + StreamAnalyticsJobIDSelector *v1.Selector `json:"streamAnalyticsJobIdSelector,omitempty" tf:"-"` +} + +type FunctionJavascriptUdaObservation struct { + + // The ID of the Stream Analytics JavaScript UDA Function. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // One or more input blocks as defined below. + Input []InputObservation `json:"input,omitempty" tf:"input,omitempty"` + + // An output block as defined below. + Output *OutputObservation `json:"output,omitempty" tf:"output,omitempty"` + + // The JavaScript of this UDA Function. + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + // The resource ID of the Stream Analytics Job where this Function should be created. Changing this forces a new resource to be created. + StreamAnalyticsJobID *string `json:"streamAnalyticsJobId,omitempty" tf:"stream_analytics_job_id,omitempty"` +} + +type FunctionJavascriptUdaParameters struct { + + // One or more input blocks as defined below. + // +kubebuilder:validation:Optional + Input []InputParameters `json:"input,omitempty" tf:"input,omitempty"` + + // An output block as defined below. + // +kubebuilder:validation:Optional + Output *OutputParameters `json:"output,omitempty" tf:"output,omitempty"` + + // The JavaScript of this UDA Function. + // +kubebuilder:validation:Optional + Script *string `json:"script,omitempty" tf:"script,omitempty"` + + // The resource ID of the Stream Analytics Job where this Function should be created. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + StreamAnalyticsJobID *string `json:"streamAnalyticsJobId,omitempty" tf:"stream_analytics_job_id,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobId. + // +kubebuilder:validation:Optional + StreamAnalyticsJobIDRef *v1.Reference `json:"streamAnalyticsJobIdRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobId. + // +kubebuilder:validation:Optional + StreamAnalyticsJobIDSelector *v1.Selector `json:"streamAnalyticsJobIdSelector,omitempty" tf:"-"` +} + +type InputInitParameters struct { + + // Is this input parameter a configuration parameter? Defaults to false. + ConfigurationParameter *bool `json:"configurationParameter,omitempty" tf:"configuration_parameter,omitempty"` + + // The input data type of this JavaScript Function. Possible values include any, array, bigint, datetime, float, nvarchar(max) and record. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type InputObservation struct { + + // Is this input parameter a configuration parameter? Defaults to false. + ConfigurationParameter *bool `json:"configurationParameter,omitempty" tf:"configuration_parameter,omitempty"` + + // The input data type of this JavaScript Function. Possible values include any, array, bigint, datetime, float, nvarchar(max) and record. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type InputParameters struct { + + // Is this input parameter a configuration parameter? Defaults to false. + // +kubebuilder:validation:Optional + ConfigurationParameter *bool `json:"configurationParameter,omitempty" tf:"configuration_parameter,omitempty"` + + // The input data type of this JavaScript Function. Possible values include any, array, bigint, datetime, float, nvarchar(max) and record. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type OutputInitParameters struct { + + // The output data type from this JavaScript Function. Possible values include any, array, bigint, datetime, float, nvarchar(max) and record. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OutputObservation struct { + + // The output data type from this JavaScript Function. Possible values include any, array, bigint, datetime, float, nvarchar(max) and record. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OutputParameters struct { + + // The output data type from this JavaScript Function. Possible values include any, array, bigint, datetime, float, nvarchar(max) and record. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// FunctionJavascriptUdaSpec defines the desired state of FunctionJavascriptUda +type FunctionJavascriptUdaSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FunctionJavascriptUdaParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FunctionJavascriptUdaInitParameters `json:"initProvider,omitempty"` +} + +// FunctionJavascriptUdaStatus defines the observed state of FunctionJavascriptUda. +type FunctionJavascriptUdaStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FunctionJavascriptUdaObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FunctionJavascriptUda is the Schema for the FunctionJavascriptUdas API. Manages a JavaScript UDA Function within a Stream Analytics Streaming Job. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure},path=functionjavascriptudas +type FunctionJavascriptUda struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.input) || (has(self.initProvider) && has(self.initProvider.input))",message="spec.forProvider.input is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.output) || (has(self.initProvider) && has(self.initProvider.output))",message="spec.forProvider.output is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.script) || (has(self.initProvider) && has(self.initProvider.script))",message="spec.forProvider.script is a required parameter" + Spec FunctionJavascriptUdaSpec `json:"spec"` + Status FunctionJavascriptUdaStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionJavascriptUdaList contains a list of FunctionJavascriptUdas +type FunctionJavascriptUdaList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FunctionJavascriptUda `json:"items"` +} + +// Repository type metadata. +var ( + FunctionJavascriptUda_Kind = "FunctionJavascriptUda" + FunctionJavascriptUda_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FunctionJavascriptUda_Kind}.String() + FunctionJavascriptUda_KindAPIVersion = FunctionJavascriptUda_Kind + "." + CRDGroupVersion.String() + FunctionJavascriptUda_GroupVersionKind = CRDGroupVersion.WithKind(FunctionJavascriptUda_Kind) +) + +func init() { + SchemeBuilder.Register(&FunctionJavascriptUda{}, &FunctionJavascriptUdaList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_generated.conversion_hubs.go b/apis/streamanalytics/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..e2064bf54 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *FunctionJavascriptUda) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Job) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OutputBlob) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OutputEventHub) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OutputServiceBusQueue) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *OutputServiceBusTopic) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *ReferenceInputBlob) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StreamInputBlob) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StreamInputEventHub) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StreamInputIOTHub) Hub() {} diff --git a/apis/streamanalytics/v1beta2/zz_generated.deepcopy.go b/apis/streamanalytics/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..958b83d17 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,4865 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionJavascriptUda) DeepCopyInto(out *FunctionJavascriptUda) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionJavascriptUda. +func (in *FunctionJavascriptUda) DeepCopy() *FunctionJavascriptUda { + if in == nil { + return nil + } + out := new(FunctionJavascriptUda) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionJavascriptUda) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionJavascriptUdaInitParameters) DeepCopyInto(out *FunctionJavascriptUdaInitParameters) { + *out = *in + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = make([]InputInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = new(OutputInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobID != nil { + in, out := &in.StreamAnalyticsJobID, &out.StreamAnalyticsJobID + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobIDRef != nil { + in, out := &in.StreamAnalyticsJobIDRef, &out.StreamAnalyticsJobIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobIDSelector != nil { + in, out := &in.StreamAnalyticsJobIDSelector, &out.StreamAnalyticsJobIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionJavascriptUdaInitParameters. +func (in *FunctionJavascriptUdaInitParameters) DeepCopy() *FunctionJavascriptUdaInitParameters { + if in == nil { + return nil + } + out := new(FunctionJavascriptUdaInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionJavascriptUdaList) DeepCopyInto(out *FunctionJavascriptUdaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FunctionJavascriptUda, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionJavascriptUdaList. +func (in *FunctionJavascriptUdaList) DeepCopy() *FunctionJavascriptUdaList { + if in == nil { + return nil + } + out := new(FunctionJavascriptUdaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionJavascriptUdaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionJavascriptUdaObservation) DeepCopyInto(out *FunctionJavascriptUdaObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = make([]InputObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = new(OutputObservation) + (*in).DeepCopyInto(*out) + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobID != nil { + in, out := &in.StreamAnalyticsJobID, &out.StreamAnalyticsJobID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionJavascriptUdaObservation. +func (in *FunctionJavascriptUdaObservation) DeepCopy() *FunctionJavascriptUdaObservation { + if in == nil { + return nil + } + out := new(FunctionJavascriptUdaObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionJavascriptUdaParameters) DeepCopyInto(out *FunctionJavascriptUdaParameters) { + *out = *in + if in.Input != nil { + in, out := &in.Input, &out.Input + *out = make([]InputParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Output != nil { + in, out := &in.Output, &out.Output + *out = new(OutputParameters) + (*in).DeepCopyInto(*out) + } + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobID != nil { + in, out := &in.StreamAnalyticsJobID, &out.StreamAnalyticsJobID + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobIDRef != nil { + in, out := &in.StreamAnalyticsJobIDRef, &out.StreamAnalyticsJobIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobIDSelector != nil { + in, out := &in.StreamAnalyticsJobIDSelector, &out.StreamAnalyticsJobIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionJavascriptUdaParameters. +func (in *FunctionJavascriptUdaParameters) DeepCopy() *FunctionJavascriptUdaParameters { + if in == nil { + return nil + } + out := new(FunctionJavascriptUdaParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionJavascriptUdaSpec) DeepCopyInto(out *FunctionJavascriptUdaSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionJavascriptUdaSpec. +func (in *FunctionJavascriptUdaSpec) DeepCopy() *FunctionJavascriptUdaSpec { + if in == nil { + return nil + } + out := new(FunctionJavascriptUdaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionJavascriptUdaStatus) DeepCopyInto(out *FunctionJavascriptUdaStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionJavascriptUdaStatus. +func (in *FunctionJavascriptUdaStatus) DeepCopy() *FunctionJavascriptUdaStatus { + if in == nil { + return nil + } + out := new(FunctionJavascriptUdaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputInitParameters) DeepCopyInto(out *InputInitParameters) { + *out = *in + if in.ConfigurationParameter != nil { + in, out := &in.ConfigurationParameter, &out.ConfigurationParameter + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputInitParameters. +func (in *InputInitParameters) DeepCopy() *InputInitParameters { + if in == nil { + return nil + } + out := new(InputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputObservation) DeepCopyInto(out *InputObservation) { + *out = *in + if in.ConfigurationParameter != nil { + in, out := &in.ConfigurationParameter, &out.ConfigurationParameter + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputObservation. +func (in *InputObservation) DeepCopy() *InputObservation { + if in == nil { + return nil + } + out := new(InputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InputParameters) DeepCopyInto(out *InputParameters) { + *out = *in + if in.ConfigurationParameter != nil { + in, out := &in.ConfigurationParameter, &out.ConfigurationParameter + *out = new(bool) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InputParameters. +func (in *InputParameters) DeepCopy() *InputParameters { + if in == nil { + return nil + } + out := new(InputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Job) DeepCopyInto(out *Job) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job. +func (in *Job) DeepCopy() *Job { + if in == nil { + return nil + } + out := new(Job) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Job) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobInitParameters) DeepCopyInto(out *JobInitParameters) { + *out = *in + if in.CompatibilityLevel != nil { + in, out := &in.CompatibilityLevel, &out.CompatibilityLevel + *out = new(string) + **out = **in + } + if in.ContentStoragePolicy != nil { + in, out := &in.ContentStoragePolicy, &out.ContentStoragePolicy + *out = new(string) + **out = **in + } + if in.DataLocale != nil { + in, out := &in.DataLocale, &out.DataLocale + *out = new(string) + **out = **in + } + if in.EventsLateArrivalMaxDelayInSeconds != nil { + in, out := &in.EventsLateArrivalMaxDelayInSeconds, &out.EventsLateArrivalMaxDelayInSeconds + *out = new(float64) + **out = **in + } + if in.EventsOutOfOrderMaxDelayInSeconds != nil { + in, out := &in.EventsOutOfOrderMaxDelayInSeconds, &out.EventsOutOfOrderMaxDelayInSeconds + *out = new(float64) + **out = **in + } + if in.EventsOutOfOrderPolicy != nil { + in, out := &in.EventsOutOfOrderPolicy, &out.EventsOutOfOrderPolicy + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.JobStorageAccount != nil { + in, out := &in.JobStorageAccount, &out.JobStorageAccount + *out = make([]JobStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OutputErrorPolicy != nil { + in, out := &in.OutputErrorPolicy, &out.OutputErrorPolicy + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsClusterID != nil { + in, out := &in.StreamAnalyticsClusterID, &out.StreamAnalyticsClusterID + *out = new(string) + **out = **in + } + if in.StreamingUnits != nil { + in, out := &in.StreamingUnits, &out.StreamingUnits + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransformationQuery != nil { + in, out := &in.TransformationQuery, &out.TransformationQuery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobInitParameters. +func (in *JobInitParameters) DeepCopy() *JobInitParameters { + if in == nil { + return nil + } + out := new(JobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobList) DeepCopyInto(out *JobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList. +func (in *JobList) DeepCopy() *JobList { + if in == nil { + return nil + } + out := new(JobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobObservation) DeepCopyInto(out *JobObservation) { + *out = *in + if in.CompatibilityLevel != nil { + in, out := &in.CompatibilityLevel, &out.CompatibilityLevel + *out = new(string) + **out = **in + } + if in.ContentStoragePolicy != nil { + in, out := &in.ContentStoragePolicy, &out.ContentStoragePolicy + *out = new(string) + **out = **in + } + if in.DataLocale != nil { + in, out := &in.DataLocale, &out.DataLocale + *out = new(string) + **out = **in + } + if in.EventsLateArrivalMaxDelayInSeconds != nil { + in, out := &in.EventsLateArrivalMaxDelayInSeconds, &out.EventsLateArrivalMaxDelayInSeconds + *out = new(float64) + **out = **in + } + if in.EventsOutOfOrderMaxDelayInSeconds != nil { + in, out := &in.EventsOutOfOrderMaxDelayInSeconds, &out.EventsOutOfOrderMaxDelayInSeconds + *out = new(float64) + **out = **in + } + if in.EventsOutOfOrderPolicy != nil { + in, out := &in.EventsOutOfOrderPolicy, &out.EventsOutOfOrderPolicy + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.JobID != nil { + in, out := &in.JobID, &out.JobID + *out = new(string) + **out = **in + } + if in.JobStorageAccount != nil { + in, out := &in.JobStorageAccount, &out.JobStorageAccount + *out = make([]JobStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OutputErrorPolicy != nil { + in, out := &in.OutputErrorPolicy, &out.OutputErrorPolicy + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsClusterID != nil { + in, out := &in.StreamAnalyticsClusterID, &out.StreamAnalyticsClusterID + *out = new(string) + **out = **in + } + if in.StreamingUnits != nil { + in, out := &in.StreamingUnits, &out.StreamingUnits + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransformationQuery != nil { + in, out := &in.TransformationQuery, &out.TransformationQuery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobObservation. +func (in *JobObservation) DeepCopy() *JobObservation { + if in == nil { + return nil + } + out := new(JobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobParameters) DeepCopyInto(out *JobParameters) { + *out = *in + if in.CompatibilityLevel != nil { + in, out := &in.CompatibilityLevel, &out.CompatibilityLevel + *out = new(string) + **out = **in + } + if in.ContentStoragePolicy != nil { + in, out := &in.ContentStoragePolicy, &out.ContentStoragePolicy + *out = new(string) + **out = **in + } + if in.DataLocale != nil { + in, out := &in.DataLocale, &out.DataLocale + *out = new(string) + **out = **in + } + if in.EventsLateArrivalMaxDelayInSeconds != nil { + in, out := &in.EventsLateArrivalMaxDelayInSeconds, &out.EventsLateArrivalMaxDelayInSeconds + *out = new(float64) + **out = **in + } + if in.EventsOutOfOrderMaxDelayInSeconds != nil { + in, out := &in.EventsOutOfOrderMaxDelayInSeconds, &out.EventsOutOfOrderMaxDelayInSeconds + *out = new(float64) + **out = **in + } + if in.EventsOutOfOrderPolicy != nil { + in, out := &in.EventsOutOfOrderPolicy, &out.EventsOutOfOrderPolicy + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.JobStorageAccount != nil { + in, out := &in.JobStorageAccount, &out.JobStorageAccount + *out = make([]JobStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OutputErrorPolicy != nil { + in, out := &in.OutputErrorPolicy, &out.OutputErrorPolicy + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsClusterID != nil { + in, out := &in.StreamAnalyticsClusterID, &out.StreamAnalyticsClusterID + *out = new(string) + **out = **in + } + if in.StreamingUnits != nil { + in, out := &in.StreamingUnits, &out.StreamingUnits + *out = new(float64) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TransformationQuery != nil { + in, out := &in.TransformationQuery, &out.TransformationQuery + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobParameters. +func (in *JobParameters) DeepCopy() *JobParameters { + if in == nil { + return nil + } + out := new(JobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobSpec) DeepCopyInto(out *JobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec. +func (in *JobSpec) DeepCopy() *JobSpec { + if in == nil { + return nil + } + out := new(JobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStatus) DeepCopyInto(out *JobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. +func (in *JobStatus) DeepCopy() *JobStatus { + if in == nil { + return nil + } + out := new(JobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStorageAccountInitParameters) DeepCopyInto(out *JobStorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStorageAccountInitParameters. +func (in *JobStorageAccountInitParameters) DeepCopy() *JobStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(JobStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStorageAccountObservation) DeepCopyInto(out *JobStorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStorageAccountObservation. +func (in *JobStorageAccountObservation) DeepCopy() *JobStorageAccountObservation { + if in == nil { + return nil + } + out := new(JobStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStorageAccountParameters) DeepCopyInto(out *JobStorageAccountParameters) { + *out = *in + out.AccountKeySecretRef = in.AccountKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStorageAccountParameters. +func (in *JobStorageAccountParameters) DeepCopy() *JobStorageAccountParameters { + if in == nil { + return nil + } + out := new(JobStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlob) DeepCopyInto(out *OutputBlob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlob. +func (in *OutputBlob) DeepCopy() *OutputBlob { + if in == nil { + return nil + } + out := new(OutputBlob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputBlob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobInitParameters) DeepCopyInto(out *OutputBlobInitParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.BatchMaxWaitTime != nil { + in, out := &in.BatchMaxWaitTime, &out.BatchMaxWaitTime + *out = new(string) + **out = **in + } + if in.BatchMinRows != nil { + in, out := &in.BatchMinRows, &out.BatchMinRows + *out = new(float64) + **out = **in + } + if in.BlobWriteMode != nil { + in, out := &in.BlobWriteMode, &out.BlobWriteMode + *out = new(string) + **out = **in + } + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(SerializationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StorageContainerNameRef != nil { + in, out := &in.StorageContainerNameRef, &out.StorageContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerNameSelector != nil { + in, out := &in.StorageContainerNameSelector, &out.StorageContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobInitParameters. +func (in *OutputBlobInitParameters) DeepCopy() *OutputBlobInitParameters { + if in == nil { + return nil + } + out := new(OutputBlobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobList) DeepCopyInto(out *OutputBlobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OutputBlob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobList. +func (in *OutputBlobList) DeepCopy() *OutputBlobList { + if in == nil { + return nil + } + out := new(OutputBlobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputBlobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobObservation) DeepCopyInto(out *OutputBlobObservation) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.BatchMaxWaitTime != nil { + in, out := &in.BatchMaxWaitTime, &out.BatchMaxWaitTime + *out = new(string) + **out = **in + } + if in.BatchMinRows != nil { + in, out := &in.BatchMinRows, &out.BatchMinRows + *out = new(float64) + **out = **in + } + if in.BlobWriteMode != nil { + in, out := &in.BlobWriteMode, &out.BlobWriteMode + *out = new(string) + **out = **in + } + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(SerializationObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobObservation. +func (in *OutputBlobObservation) DeepCopy() *OutputBlobObservation { + if in == nil { + return nil + } + out := new(OutputBlobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobParameters) DeepCopyInto(out *OutputBlobParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.BatchMaxWaitTime != nil { + in, out := &in.BatchMaxWaitTime, &out.BatchMaxWaitTime + *out = new(string) + **out = **in + } + if in.BatchMinRows != nil { + in, out := &in.BatchMinRows, &out.BatchMinRows + *out = new(float64) + **out = **in + } + if in.BlobWriteMode != nil { + in, out := &in.BlobWriteMode, &out.BlobWriteMode + *out = new(string) + **out = **in + } + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(SerializationParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountKeySecretRef != nil { + in, out := &in.StorageAccountKeySecretRef, &out.StorageAccountKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StorageContainerNameRef != nil { + in, out := &in.StorageContainerNameRef, &out.StorageContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerNameSelector != nil { + in, out := &in.StorageContainerNameSelector, &out.StorageContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobParameters. +func (in *OutputBlobParameters) DeepCopy() *OutputBlobParameters { + if in == nil { + return nil + } + out := new(OutputBlobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobSpec) DeepCopyInto(out *OutputBlobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobSpec. +func (in *OutputBlobSpec) DeepCopy() *OutputBlobSpec { + if in == nil { + return nil + } + out := new(OutputBlobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputBlobStatus) DeepCopyInto(out *OutputBlobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputBlobStatus. +func (in *OutputBlobStatus) DeepCopy() *OutputBlobStatus { + if in == nil { + return nil + } + out := new(OutputBlobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHub) DeepCopyInto(out *OutputEventHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHub. +func (in *OutputEventHub) DeepCopy() *OutputEventHub { + if in == nil { + return nil + } + out := new(OutputEventHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputEventHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubInitParameters) DeepCopyInto(out *OutputEventHubInitParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNameRef != nil { + in, out := &in.EventHubNameRef, &out.EventHubNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNameSelector != nil { + in, out := &in.EventHubNameSelector, &out.EventHubNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputEventHubSerializationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.ServiceBusNamespaceRef != nil { + in, out := &in.ServiceBusNamespaceRef, &out.ServiceBusNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespaceSelector != nil { + in, out := &in.ServiceBusNamespaceSelector, &out.ServiceBusNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubInitParameters. +func (in *OutputEventHubInitParameters) DeepCopy() *OutputEventHubInitParameters { + if in == nil { + return nil + } + out := new(OutputEventHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubList) DeepCopyInto(out *OutputEventHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OutputEventHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubList. +func (in *OutputEventHubList) DeepCopy() *OutputEventHubList { + if in == nil { + return nil + } + out := new(OutputEventHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputEventHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubObservation) DeepCopyInto(out *OutputEventHubObservation) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputEventHubSerializationObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubObservation. +func (in *OutputEventHubObservation) DeepCopy() *OutputEventHubObservation { + if in == nil { + return nil + } + out := new(OutputEventHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubParameters) DeepCopyInto(out *OutputEventHubParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNameRef != nil { + in, out := &in.EventHubNameRef, &out.EventHubNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNameSelector != nil { + in, out := &in.EventHubNameSelector, &out.EventHubNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputEventHubSerializationParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.ServiceBusNamespaceRef != nil { + in, out := &in.ServiceBusNamespaceRef, &out.ServiceBusNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespaceSelector != nil { + in, out := &in.ServiceBusNamespaceSelector, &out.ServiceBusNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyKeySecretRef != nil { + in, out := &in.SharedAccessPolicyKeySecretRef, &out.SharedAccessPolicyKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubParameters. +func (in *OutputEventHubParameters) DeepCopy() *OutputEventHubParameters { + if in == nil { + return nil + } + out := new(OutputEventHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubSerializationInitParameters) DeepCopyInto(out *OutputEventHubSerializationInitParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubSerializationInitParameters. +func (in *OutputEventHubSerializationInitParameters) DeepCopy() *OutputEventHubSerializationInitParameters { + if in == nil { + return nil + } + out := new(OutputEventHubSerializationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubSerializationObservation) DeepCopyInto(out *OutputEventHubSerializationObservation) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubSerializationObservation. +func (in *OutputEventHubSerializationObservation) DeepCopy() *OutputEventHubSerializationObservation { + if in == nil { + return nil + } + out := new(OutputEventHubSerializationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubSerializationParameters) DeepCopyInto(out *OutputEventHubSerializationParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubSerializationParameters. +func (in *OutputEventHubSerializationParameters) DeepCopy() *OutputEventHubSerializationParameters { + if in == nil { + return nil + } + out := new(OutputEventHubSerializationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubSpec) DeepCopyInto(out *OutputEventHubSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubSpec. +func (in *OutputEventHubSpec) DeepCopy() *OutputEventHubSpec { + if in == nil { + return nil + } + out := new(OutputEventHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputEventHubStatus) DeepCopyInto(out *OutputEventHubStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputEventHubStatus. +func (in *OutputEventHubStatus) DeepCopy() *OutputEventHubStatus { + if in == nil { + return nil + } + out := new(OutputEventHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputInitParameters) DeepCopyInto(out *OutputInitParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputInitParameters. +func (in *OutputInitParameters) DeepCopy() *OutputInitParameters { + if in == nil { + return nil + } + out := new(OutputInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputObservation) DeepCopyInto(out *OutputObservation) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputObservation. +func (in *OutputObservation) DeepCopy() *OutputObservation { + if in == nil { + return nil + } + out := new(OutputObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputParameters) DeepCopyInto(out *OutputParameters) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputParameters. +func (in *OutputParameters) DeepCopy() *OutputParameters { + if in == nil { + return nil + } + out := new(OutputParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueue) DeepCopyInto(out *OutputServiceBusQueue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueue. +func (in *OutputServiceBusQueue) DeepCopy() *OutputServiceBusQueue { + if in == nil { + return nil + } + out := new(OutputServiceBusQueue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputServiceBusQueue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueInitParameters) DeepCopyInto(out *OutputServiceBusQueueInitParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } + if in.QueueNameRef != nil { + in, out := &in.QueueNameRef, &out.QueueNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.QueueNameSelector != nil { + in, out := &in.QueueNameSelector, &out.QueueNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputServiceBusQueueSerializationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.ServiceBusNamespaceRef != nil { + in, out := &in.ServiceBusNamespaceRef, &out.ServiceBusNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespaceSelector != nil { + in, out := &in.ServiceBusNamespaceSelector, &out.ServiceBusNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPropertyColumns != nil { + in, out := &in.SystemPropertyColumns, &out.SystemPropertyColumns + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueInitParameters. +func (in *OutputServiceBusQueueInitParameters) DeepCopy() *OutputServiceBusQueueInitParameters { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueList) DeepCopyInto(out *OutputServiceBusQueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OutputServiceBusQueue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueList. +func (in *OutputServiceBusQueueList) DeepCopy() *OutputServiceBusQueueList { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputServiceBusQueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueObservation) DeepCopyInto(out *OutputServiceBusQueueObservation) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputServiceBusQueueSerializationObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.SystemPropertyColumns != nil { + in, out := &in.SystemPropertyColumns, &out.SystemPropertyColumns + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueObservation. +func (in *OutputServiceBusQueueObservation) DeepCopy() *OutputServiceBusQueueObservation { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueParameters) DeepCopyInto(out *OutputServiceBusQueueParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.QueueName != nil { + in, out := &in.QueueName, &out.QueueName + *out = new(string) + **out = **in + } + if in.QueueNameRef != nil { + in, out := &in.QueueNameRef, &out.QueueNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.QueueNameSelector != nil { + in, out := &in.QueueNameSelector, &out.QueueNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputServiceBusQueueSerializationParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.ServiceBusNamespaceRef != nil { + in, out := &in.ServiceBusNamespaceRef, &out.ServiceBusNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespaceSelector != nil { + in, out := &in.ServiceBusNamespaceSelector, &out.ServiceBusNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyKeySecretRef != nil { + in, out := &in.SharedAccessPolicyKeySecretRef, &out.SharedAccessPolicyKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPropertyColumns != nil { + in, out := &in.SystemPropertyColumns, &out.SystemPropertyColumns + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueParameters. +func (in *OutputServiceBusQueueParameters) DeepCopy() *OutputServiceBusQueueParameters { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueSerializationInitParameters) DeepCopyInto(out *OutputServiceBusQueueSerializationInitParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueSerializationInitParameters. +func (in *OutputServiceBusQueueSerializationInitParameters) DeepCopy() *OutputServiceBusQueueSerializationInitParameters { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueSerializationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueSerializationObservation) DeepCopyInto(out *OutputServiceBusQueueSerializationObservation) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueSerializationObservation. +func (in *OutputServiceBusQueueSerializationObservation) DeepCopy() *OutputServiceBusQueueSerializationObservation { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueSerializationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueSerializationParameters) DeepCopyInto(out *OutputServiceBusQueueSerializationParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueSerializationParameters. +func (in *OutputServiceBusQueueSerializationParameters) DeepCopy() *OutputServiceBusQueueSerializationParameters { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueSerializationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueSpec) DeepCopyInto(out *OutputServiceBusQueueSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueSpec. +func (in *OutputServiceBusQueueSpec) DeepCopy() *OutputServiceBusQueueSpec { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusQueueStatus) DeepCopyInto(out *OutputServiceBusQueueStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusQueueStatus. +func (in *OutputServiceBusQueueStatus) DeepCopy() *OutputServiceBusQueueStatus { + if in == nil { + return nil + } + out := new(OutputServiceBusQueueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopic) DeepCopyInto(out *OutputServiceBusTopic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopic. +func (in *OutputServiceBusTopic) DeepCopy() *OutputServiceBusTopic { + if in == nil { + return nil + } + out := new(OutputServiceBusTopic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputServiceBusTopic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicInitParameters) DeepCopyInto(out *OutputServiceBusTopicInitParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputServiceBusTopicSerializationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.ServiceBusNamespaceRef != nil { + in, out := &in.ServiceBusNamespaceRef, &out.ServiceBusNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespaceSelector != nil { + in, out := &in.ServiceBusNamespaceSelector, &out.ServiceBusNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPropertyColumns != nil { + in, out := &in.SystemPropertyColumns, &out.SystemPropertyColumns + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } + if in.TopicNameRef != nil { + in, out := &in.TopicNameRef, &out.TopicNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TopicNameSelector != nil { + in, out := &in.TopicNameSelector, &out.TopicNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicInitParameters. +func (in *OutputServiceBusTopicInitParameters) DeepCopy() *OutputServiceBusTopicInitParameters { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicList) DeepCopyInto(out *OutputServiceBusTopicList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OutputServiceBusTopic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicList. +func (in *OutputServiceBusTopicList) DeepCopy() *OutputServiceBusTopicList { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OutputServiceBusTopicList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicObservation) DeepCopyInto(out *OutputServiceBusTopicObservation) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputServiceBusTopicSerializationObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.SystemPropertyColumns != nil { + in, out := &in.SystemPropertyColumns, &out.SystemPropertyColumns + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicObservation. +func (in *OutputServiceBusTopicObservation) DeepCopy() *OutputServiceBusTopicObservation { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicParameters) DeepCopyInto(out *OutputServiceBusTopicParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PropertyColumns != nil { + in, out := &in.PropertyColumns, &out.PropertyColumns + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(OutputServiceBusTopicSerializationParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.ServiceBusNamespaceRef != nil { + in, out := &in.ServiceBusNamespaceRef, &out.ServiceBusNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespaceSelector != nil { + in, out := &in.ServiceBusNamespaceSelector, &out.ServiceBusNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyKeySecretRef != nil { + in, out := &in.SharedAccessPolicyKeySecretRef, &out.SharedAccessPolicyKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SystemPropertyColumns != nil { + in, out := &in.SystemPropertyColumns, &out.SystemPropertyColumns + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TopicName != nil { + in, out := &in.TopicName, &out.TopicName + *out = new(string) + **out = **in + } + if in.TopicNameRef != nil { + in, out := &in.TopicNameRef, &out.TopicNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.TopicNameSelector != nil { + in, out := &in.TopicNameSelector, &out.TopicNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicParameters. +func (in *OutputServiceBusTopicParameters) DeepCopy() *OutputServiceBusTopicParameters { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicSerializationInitParameters) DeepCopyInto(out *OutputServiceBusTopicSerializationInitParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicSerializationInitParameters. +func (in *OutputServiceBusTopicSerializationInitParameters) DeepCopy() *OutputServiceBusTopicSerializationInitParameters { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicSerializationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicSerializationObservation) DeepCopyInto(out *OutputServiceBusTopicSerializationObservation) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicSerializationObservation. +func (in *OutputServiceBusTopicSerializationObservation) DeepCopy() *OutputServiceBusTopicSerializationObservation { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicSerializationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicSerializationParameters) DeepCopyInto(out *OutputServiceBusTopicSerializationParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicSerializationParameters. +func (in *OutputServiceBusTopicSerializationParameters) DeepCopy() *OutputServiceBusTopicSerializationParameters { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicSerializationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicSpec) DeepCopyInto(out *OutputServiceBusTopicSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicSpec. +func (in *OutputServiceBusTopicSpec) DeepCopy() *OutputServiceBusTopicSpec { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputServiceBusTopicStatus) DeepCopyInto(out *OutputServiceBusTopicStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputServiceBusTopicStatus. +func (in *OutputServiceBusTopicStatus) DeepCopy() *OutputServiceBusTopicStatus { + if in == nil { + return nil + } + out := new(OutputServiceBusTopicStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlob) DeepCopyInto(out *ReferenceInputBlob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlob. +func (in *ReferenceInputBlob) DeepCopy() *ReferenceInputBlob { + if in == nil { + return nil + } + out := new(ReferenceInputBlob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReferenceInputBlob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobInitParameters) DeepCopyInto(out *ReferenceInputBlobInitParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(ReferenceInputBlobSerializationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StorageContainerNameRef != nil { + in, out := &in.StorageContainerNameRef, &out.StorageContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerNameSelector != nil { + in, out := &in.StorageContainerNameSelector, &out.StorageContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobInitParameters. +func (in *ReferenceInputBlobInitParameters) DeepCopy() *ReferenceInputBlobInitParameters { + if in == nil { + return nil + } + out := new(ReferenceInputBlobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobList) DeepCopyInto(out *ReferenceInputBlobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReferenceInputBlob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobList. +func (in *ReferenceInputBlobList) DeepCopy() *ReferenceInputBlobList { + if in == nil { + return nil + } + out := new(ReferenceInputBlobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReferenceInputBlobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobObservation) DeepCopyInto(out *ReferenceInputBlobObservation) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(ReferenceInputBlobSerializationObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobObservation. +func (in *ReferenceInputBlobObservation) DeepCopy() *ReferenceInputBlobObservation { + if in == nil { + return nil + } + out := new(ReferenceInputBlobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobParameters) DeepCopyInto(out *ReferenceInputBlobParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(ReferenceInputBlobSerializationParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountKeySecretRef != nil { + in, out := &in.StorageAccountKeySecretRef, &out.StorageAccountKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StorageContainerNameRef != nil { + in, out := &in.StorageContainerNameRef, &out.StorageContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerNameSelector != nil { + in, out := &in.StorageContainerNameSelector, &out.StorageContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobParameters. +func (in *ReferenceInputBlobParameters) DeepCopy() *ReferenceInputBlobParameters { + if in == nil { + return nil + } + out := new(ReferenceInputBlobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobSerializationInitParameters) DeepCopyInto(out *ReferenceInputBlobSerializationInitParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobSerializationInitParameters. +func (in *ReferenceInputBlobSerializationInitParameters) DeepCopy() *ReferenceInputBlobSerializationInitParameters { + if in == nil { + return nil + } + out := new(ReferenceInputBlobSerializationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobSerializationObservation) DeepCopyInto(out *ReferenceInputBlobSerializationObservation) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobSerializationObservation. +func (in *ReferenceInputBlobSerializationObservation) DeepCopy() *ReferenceInputBlobSerializationObservation { + if in == nil { + return nil + } + out := new(ReferenceInputBlobSerializationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobSerializationParameters) DeepCopyInto(out *ReferenceInputBlobSerializationParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobSerializationParameters. +func (in *ReferenceInputBlobSerializationParameters) DeepCopy() *ReferenceInputBlobSerializationParameters { + if in == nil { + return nil + } + out := new(ReferenceInputBlobSerializationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobSpec) DeepCopyInto(out *ReferenceInputBlobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobSpec. +func (in *ReferenceInputBlobSpec) DeepCopy() *ReferenceInputBlobSpec { + if in == nil { + return nil + } + out := new(ReferenceInputBlobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceInputBlobStatus) DeepCopyInto(out *ReferenceInputBlobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceInputBlobStatus. +func (in *ReferenceInputBlobStatus) DeepCopy() *ReferenceInputBlobStatus { + if in == nil { + return nil + } + out := new(ReferenceInputBlobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializationInitParameters) DeepCopyInto(out *SerializationInitParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializationInitParameters. +func (in *SerializationInitParameters) DeepCopy() *SerializationInitParameters { + if in == nil { + return nil + } + out := new(SerializationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializationObservation) DeepCopyInto(out *SerializationObservation) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializationObservation. +func (in *SerializationObservation) DeepCopy() *SerializationObservation { + if in == nil { + return nil + } + out := new(SerializationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializationParameters) DeepCopyInto(out *SerializationParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Format != nil { + in, out := &in.Format, &out.Format + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializationParameters. +func (in *SerializationParameters) DeepCopy() *SerializationParameters { + if in == nil { + return nil + } + out := new(SerializationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlob) DeepCopyInto(out *StreamInputBlob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlob. +func (in *StreamInputBlob) DeepCopy() *StreamInputBlob { + if in == nil { + return nil + } + out := new(StreamInputBlob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamInputBlob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobInitParameters) DeepCopyInto(out *StreamInputBlobInitParameters) { + *out = *in + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputBlobSerializationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StorageContainerNameRef != nil { + in, out := &in.StorageContainerNameRef, &out.StorageContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerNameSelector != nil { + in, out := &in.StorageContainerNameSelector, &out.StorageContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobInitParameters. +func (in *StreamInputBlobInitParameters) DeepCopy() *StreamInputBlobInitParameters { + if in == nil { + return nil + } + out := new(StreamInputBlobInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobList) DeepCopyInto(out *StreamInputBlobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StreamInputBlob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobList. +func (in *StreamInputBlobList) DeepCopy() *StreamInputBlobList { + if in == nil { + return nil + } + out := new(StreamInputBlobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamInputBlobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobObservation) DeepCopyInto(out *StreamInputBlobObservation) { + *out = *in + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputBlobSerializationObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobObservation. +func (in *StreamInputBlobObservation) DeepCopy() *StreamInputBlobObservation { + if in == nil { + return nil + } + out := new(StreamInputBlobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobParameters) DeepCopyInto(out *StreamInputBlobParameters) { + *out = *in + if in.DateFormat != nil { + in, out := &in.DateFormat, &out.DateFormat + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PathPattern != nil { + in, out := &in.PathPattern, &out.PathPattern + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputBlobSerializationParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountKeySecretRef = in.StorageAccountKeySecretRef + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerName != nil { + in, out := &in.StorageContainerName, &out.StorageContainerName + *out = new(string) + **out = **in + } + if in.StorageContainerNameRef != nil { + in, out := &in.StorageContainerNameRef, &out.StorageContainerNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerNameSelector != nil { + in, out := &in.StorageContainerNameSelector, &out.StorageContainerNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.TimeFormat != nil { + in, out := &in.TimeFormat, &out.TimeFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobParameters. +func (in *StreamInputBlobParameters) DeepCopy() *StreamInputBlobParameters { + if in == nil { + return nil + } + out := new(StreamInputBlobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobSerializationInitParameters) DeepCopyInto(out *StreamInputBlobSerializationInitParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobSerializationInitParameters. +func (in *StreamInputBlobSerializationInitParameters) DeepCopy() *StreamInputBlobSerializationInitParameters { + if in == nil { + return nil + } + out := new(StreamInputBlobSerializationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobSerializationObservation) DeepCopyInto(out *StreamInputBlobSerializationObservation) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobSerializationObservation. +func (in *StreamInputBlobSerializationObservation) DeepCopy() *StreamInputBlobSerializationObservation { + if in == nil { + return nil + } + out := new(StreamInputBlobSerializationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobSerializationParameters) DeepCopyInto(out *StreamInputBlobSerializationParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobSerializationParameters. +func (in *StreamInputBlobSerializationParameters) DeepCopy() *StreamInputBlobSerializationParameters { + if in == nil { + return nil + } + out := new(StreamInputBlobSerializationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobSpec) DeepCopyInto(out *StreamInputBlobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobSpec. +func (in *StreamInputBlobSpec) DeepCopy() *StreamInputBlobSpec { + if in == nil { + return nil + } + out := new(StreamInputBlobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputBlobStatus) DeepCopyInto(out *StreamInputBlobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputBlobStatus. +func (in *StreamInputBlobStatus) DeepCopy() *StreamInputBlobStatus { + if in == nil { + return nil + } + out := new(StreamInputBlobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHub) DeepCopyInto(out *StreamInputEventHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHub. +func (in *StreamInputEventHub) DeepCopy() *StreamInputEventHub { + if in == nil { + return nil + } + out := new(StreamInputEventHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamInputEventHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubInitParameters) DeepCopyInto(out *StreamInputEventHubInitParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupNameRef != nil { + in, out := &in.EventHubConsumerGroupNameRef, &out.EventHubConsumerGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubConsumerGroupNameSelector != nil { + in, out := &in.EventHubConsumerGroupNameSelector, &out.EventHubConsumerGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNameRef != nil { + in, out := &in.EventHubNameRef, &out.EventHubNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNameSelector != nil { + in, out := &in.EventHubNameSelector, &out.EventHubNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputEventHubSerializationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.ServiceBusNamespaceRef != nil { + in, out := &in.ServiceBusNamespaceRef, &out.ServiceBusNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespaceSelector != nil { + in, out := &in.ServiceBusNamespaceSelector, &out.ServiceBusNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubInitParameters. +func (in *StreamInputEventHubInitParameters) DeepCopy() *StreamInputEventHubInitParameters { + if in == nil { + return nil + } + out := new(StreamInputEventHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubList) DeepCopyInto(out *StreamInputEventHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StreamInputEventHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubList. +func (in *StreamInputEventHubList) DeepCopy() *StreamInputEventHubList { + if in == nil { + return nil + } + out := new(StreamInputEventHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamInputEventHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubObservation) DeepCopyInto(out *StreamInputEventHubObservation) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputEventHubSerializationObservation) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubObservation. +func (in *StreamInputEventHubObservation) DeepCopy() *StreamInputEventHubObservation { + if in == nil { + return nil + } + out := new(StreamInputEventHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubParameters) DeepCopyInto(out *StreamInputEventHubParameters) { + *out = *in + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupNameRef != nil { + in, out := &in.EventHubConsumerGroupNameRef, &out.EventHubConsumerGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubConsumerGroupNameSelector != nil { + in, out := &in.EventHubConsumerGroupNameSelector, &out.EventHubConsumerGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.EventHubName != nil { + in, out := &in.EventHubName, &out.EventHubName + *out = new(string) + **out = **in + } + if in.EventHubNameRef != nil { + in, out := &in.EventHubNameRef, &out.EventHubNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubNameSelector != nil { + in, out := &in.EventHubNameSelector, &out.EventHubNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PartitionKey != nil { + in, out := &in.PartitionKey, &out.PartitionKey + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputEventHubSerializationParameters) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespace != nil { + in, out := &in.ServiceBusNamespace, &out.ServiceBusNamespace + *out = new(string) + **out = **in + } + if in.ServiceBusNamespaceRef != nil { + in, out := &in.ServiceBusNamespaceRef, &out.ServiceBusNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceBusNamespaceSelector != nil { + in, out := &in.ServiceBusNamespaceSelector, &out.ServiceBusNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyKeySecretRef != nil { + in, out := &in.SharedAccessPolicyKeySecretRef, &out.SharedAccessPolicyKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubParameters. +func (in *StreamInputEventHubParameters) DeepCopy() *StreamInputEventHubParameters { + if in == nil { + return nil + } + out := new(StreamInputEventHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubSerializationInitParameters) DeepCopyInto(out *StreamInputEventHubSerializationInitParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubSerializationInitParameters. +func (in *StreamInputEventHubSerializationInitParameters) DeepCopy() *StreamInputEventHubSerializationInitParameters { + if in == nil { + return nil + } + out := new(StreamInputEventHubSerializationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubSerializationObservation) DeepCopyInto(out *StreamInputEventHubSerializationObservation) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubSerializationObservation. +func (in *StreamInputEventHubSerializationObservation) DeepCopy() *StreamInputEventHubSerializationObservation { + if in == nil { + return nil + } + out := new(StreamInputEventHubSerializationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubSerializationParameters) DeepCopyInto(out *StreamInputEventHubSerializationParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubSerializationParameters. +func (in *StreamInputEventHubSerializationParameters) DeepCopy() *StreamInputEventHubSerializationParameters { + if in == nil { + return nil + } + out := new(StreamInputEventHubSerializationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubSpec) DeepCopyInto(out *StreamInputEventHubSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubSpec. +func (in *StreamInputEventHubSpec) DeepCopy() *StreamInputEventHubSpec { + if in == nil { + return nil + } + out := new(StreamInputEventHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputEventHubStatus) DeepCopyInto(out *StreamInputEventHubStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputEventHubStatus. +func (in *StreamInputEventHubStatus) DeepCopy() *StreamInputEventHubStatus { + if in == nil { + return nil + } + out := new(StreamInputEventHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHub) DeepCopyInto(out *StreamInputIOTHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHub. +func (in *StreamInputIOTHub) DeepCopy() *StreamInputIOTHub { + if in == nil { + return nil + } + out := new(StreamInputIOTHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamInputIOTHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubInitParameters) DeepCopyInto(out *StreamInputIOTHubInitParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupNameRef != nil { + in, out := &in.EventHubConsumerGroupNameRef, &out.EventHubConsumerGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubConsumerGroupNameSelector != nil { + in, out := &in.EventHubConsumerGroupNameSelector, &out.EventHubConsumerGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IOTHubNamespace != nil { + in, out := &in.IOTHubNamespace, &out.IOTHubNamespace + *out = new(string) + **out = **in + } + if in.IOTHubNamespaceRef != nil { + in, out := &in.IOTHubNamespaceRef, &out.IOTHubNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IOTHubNamespaceSelector != nil { + in, out := &in.IOTHubNamespaceSelector, &out.IOTHubNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputIOTHubSerializationInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubInitParameters. +func (in *StreamInputIOTHubInitParameters) DeepCopy() *StreamInputIOTHubInitParameters { + if in == nil { + return nil + } + out := new(StreamInputIOTHubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubList) DeepCopyInto(out *StreamInputIOTHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StreamInputIOTHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubList. +func (in *StreamInputIOTHubList) DeepCopy() *StreamInputIOTHubList { + if in == nil { + return nil + } + out := new(StreamInputIOTHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamInputIOTHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubObservation) DeepCopyInto(out *StreamInputIOTHubObservation) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IOTHubNamespace != nil { + in, out := &in.IOTHubNamespace, &out.IOTHubNamespace + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputIOTHubSerializationObservation) + (*in).DeepCopyInto(*out) + } + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubObservation. +func (in *StreamInputIOTHubObservation) DeepCopy() *StreamInputIOTHubObservation { + if in == nil { + return nil + } + out := new(StreamInputIOTHubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubParameters) DeepCopyInto(out *StreamInputIOTHubParameters) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupName != nil { + in, out := &in.EventHubConsumerGroupName, &out.EventHubConsumerGroupName + *out = new(string) + **out = **in + } + if in.EventHubConsumerGroupNameRef != nil { + in, out := &in.EventHubConsumerGroupNameRef, &out.EventHubConsumerGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.EventHubConsumerGroupNameSelector != nil { + in, out := &in.EventHubConsumerGroupNameSelector, &out.EventHubConsumerGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.IOTHubNamespace != nil { + in, out := &in.IOTHubNamespace, &out.IOTHubNamespace + *out = new(string) + **out = **in + } + if in.IOTHubNamespaceRef != nil { + in, out := &in.IOTHubNamespaceRef, &out.IOTHubNamespaceRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.IOTHubNamespaceSelector != nil { + in, out := &in.IOTHubNamespaceSelector, &out.IOTHubNamespaceSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Serialization != nil { + in, out := &in.Serialization, &out.Serialization + *out = new(StreamInputIOTHubSerializationParameters) + (*in).DeepCopyInto(*out) + } + out.SharedAccessPolicyKeySecretRef = in.SharedAccessPolicyKeySecretRef + if in.SharedAccessPolicyName != nil { + in, out := &in.SharedAccessPolicyName, &out.SharedAccessPolicyName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobName != nil { + in, out := &in.StreamAnalyticsJobName, &out.StreamAnalyticsJobName + *out = new(string) + **out = **in + } + if in.StreamAnalyticsJobNameRef != nil { + in, out := &in.StreamAnalyticsJobNameRef, &out.StreamAnalyticsJobNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StreamAnalyticsJobNameSelector != nil { + in, out := &in.StreamAnalyticsJobNameSelector, &out.StreamAnalyticsJobNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubParameters. +func (in *StreamInputIOTHubParameters) DeepCopy() *StreamInputIOTHubParameters { + if in == nil { + return nil + } + out := new(StreamInputIOTHubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubSerializationInitParameters) DeepCopyInto(out *StreamInputIOTHubSerializationInitParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubSerializationInitParameters. +func (in *StreamInputIOTHubSerializationInitParameters) DeepCopy() *StreamInputIOTHubSerializationInitParameters { + if in == nil { + return nil + } + out := new(StreamInputIOTHubSerializationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubSerializationObservation) DeepCopyInto(out *StreamInputIOTHubSerializationObservation) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubSerializationObservation. +func (in *StreamInputIOTHubSerializationObservation) DeepCopy() *StreamInputIOTHubSerializationObservation { + if in == nil { + return nil + } + out := new(StreamInputIOTHubSerializationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubSerializationParameters) DeepCopyInto(out *StreamInputIOTHubSerializationParameters) { + *out = *in + if in.Encoding != nil { + in, out := &in.Encoding, &out.Encoding + *out = new(string) + **out = **in + } + if in.FieldDelimiter != nil { + in, out := &in.FieldDelimiter, &out.FieldDelimiter + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubSerializationParameters. +func (in *StreamInputIOTHubSerializationParameters) DeepCopy() *StreamInputIOTHubSerializationParameters { + if in == nil { + return nil + } + out := new(StreamInputIOTHubSerializationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubSpec) DeepCopyInto(out *StreamInputIOTHubSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubSpec. +func (in *StreamInputIOTHubSpec) DeepCopy() *StreamInputIOTHubSpec { + if in == nil { + return nil + } + out := new(StreamInputIOTHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamInputIOTHubStatus) DeepCopyInto(out *StreamInputIOTHubStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamInputIOTHubStatus. +func (in *StreamInputIOTHubStatus) DeepCopy() *StreamInputIOTHubStatus { + if in == nil { + return nil + } + out := new(StreamInputIOTHubStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/streamanalytics/v1beta2/zz_generated.managed.go b/apis/streamanalytics/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..24e0752eb --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_generated.managed.go @@ -0,0 +1,608 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FunctionJavascriptUda. +func (mg *FunctionJavascriptUda) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Job. +func (mg *Job) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Job. +func (mg *Job) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Job. +func (mg *Job) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Job. +func (mg *Job) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Job. +func (mg *Job) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Job. +func (mg *Job) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Job. +func (mg *Job) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Job. +func (mg *Job) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Job. +func (mg *Job) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Job. +func (mg *Job) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Job. +func (mg *Job) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Job. +func (mg *Job) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this OutputBlob. +func (mg *OutputBlob) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OutputBlob. +func (mg *OutputBlob) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OutputBlob. +func (mg *OutputBlob) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OutputBlob. +func (mg *OutputBlob) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OutputBlob. +func (mg *OutputBlob) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OutputBlob. +func (mg *OutputBlob) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OutputBlob. +func (mg *OutputBlob) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OutputBlob. +func (mg *OutputBlob) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OutputBlob. +func (mg *OutputBlob) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OutputBlob. +func (mg *OutputBlob) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OutputBlob. +func (mg *OutputBlob) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OutputBlob. +func (mg *OutputBlob) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this OutputEventHub. +func (mg *OutputEventHub) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OutputEventHub. +func (mg *OutputEventHub) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OutputEventHub. +func (mg *OutputEventHub) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OutputEventHub. +func (mg *OutputEventHub) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OutputEventHub. +func (mg *OutputEventHub) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OutputEventHub. +func (mg *OutputEventHub) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OutputEventHub. +func (mg *OutputEventHub) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OutputEventHub. +func (mg *OutputEventHub) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OutputEventHub. +func (mg *OutputEventHub) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OutputEventHub. +func (mg *OutputEventHub) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OutputEventHub. +func (mg *OutputEventHub) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OutputEventHub. +func (mg *OutputEventHub) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StreamInputBlob. +func (mg *StreamInputBlob) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StreamInputBlob. +func (mg *StreamInputBlob) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StreamInputBlob. +func (mg *StreamInputBlob) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StreamInputBlob. +func (mg *StreamInputBlob) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StreamInputBlob. +func (mg *StreamInputBlob) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StreamInputBlob. +func (mg *StreamInputBlob) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StreamInputBlob. +func (mg *StreamInputBlob) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StreamInputBlob. +func (mg *StreamInputBlob) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StreamInputBlob. +func (mg *StreamInputBlob) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StreamInputBlob. +func (mg *StreamInputBlob) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StreamInputBlob. +func (mg *StreamInputBlob) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StreamInputBlob. +func (mg *StreamInputBlob) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StreamInputEventHub. +func (mg *StreamInputEventHub) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StreamInputEventHub. +func (mg *StreamInputEventHub) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StreamInputEventHub. +func (mg *StreamInputEventHub) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StreamInputEventHub. +func (mg *StreamInputEventHub) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StreamInputEventHub. +func (mg *StreamInputEventHub) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StreamInputEventHub. +func (mg *StreamInputEventHub) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StreamInputEventHub. +func (mg *StreamInputEventHub) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StreamInputEventHub. +func (mg *StreamInputEventHub) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StreamInputEventHub. +func (mg *StreamInputEventHub) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StreamInputEventHub. +func (mg *StreamInputEventHub) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StreamInputEventHub. +func (mg *StreamInputEventHub) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StreamInputEventHub. +func (mg *StreamInputEventHub) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/streamanalytics/v1beta2/zz_generated.managedlist.go b/apis/streamanalytics/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..869a0a258 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,98 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this FunctionJavascriptUdaList. +func (l *FunctionJavascriptUdaList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this JobList. +func (l *JobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this OutputBlobList. +func (l *OutputBlobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this OutputEventHubList. +func (l *OutputEventHubList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this OutputServiceBusQueueList. +func (l *OutputServiceBusQueueList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this OutputServiceBusTopicList. +func (l *OutputServiceBusTopicList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ReferenceInputBlobList. +func (l *ReferenceInputBlobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StreamInputBlobList. +func (l *StreamInputBlobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StreamInputEventHubList. +func (l *StreamInputEventHubList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StreamInputIOTHubList. +func (l *StreamInputIOTHubList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/streamanalytics/v1beta2/zz_generated.resolvers.go b/apis/streamanalytics/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..a9fae78cd --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,1374 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *FunctionJavascriptUda) ResolveReferences( // ResolveReferences of this FunctionJavascriptUda. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StreamAnalyticsJobID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StreamAnalyticsJobIDRef, + Selector: mg.Spec.ForProvider.StreamAnalyticsJobIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamAnalyticsJobID") + } + mg.Spec.ForProvider.StreamAnalyticsJobID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StreamAnalyticsJobIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StreamAnalyticsJobID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StreamAnalyticsJobIDRef, + Selector: mg.Spec.InitProvider.StreamAnalyticsJobIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamAnalyticsJobID") + } + mg.Spec.InitProvider.StreamAnalyticsJobID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StreamAnalyticsJobIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Job. +func (mg *Job) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this OutputBlob. +func (mg *OutputBlob) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageContainerNameRef, + Selector: mg.Spec.ForProvider.StorageContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageContainerName") + } + mg.Spec.ForProvider.StorageContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageContainerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.ForProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamAnalyticsJobName") + } + mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageContainerNameRef, + Selector: mg.Spec.InitProvider.StorageContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageContainerName") + } + mg.Spec.InitProvider.StorageContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageContainerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.InitProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamAnalyticsJobName") + } + mg.Spec.InitProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this OutputEventHub. +func (mg *OutputEventHub) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventHubName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventHubNameRef, + Selector: mg.Spec.ForProvider.EventHubNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventHubName") + } + mg.Spec.ForProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventHubNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceBusNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceBusNamespaceRef, + Selector: mg.Spec.ForProvider.ServiceBusNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceBusNamespace") + } + mg.Spec.ForProvider.ServiceBusNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceBusNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventHubName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventHubNameRef, + Selector: mg.Spec.InitProvider.EventHubNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventHubName") + } + mg.Spec.InitProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventHubNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceBusNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceBusNamespaceRef, + Selector: mg.Spec.InitProvider.ServiceBusNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceBusNamespace") + } + mg.Spec.InitProvider.ServiceBusNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceBusNamespaceRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this OutputServiceBusQueue. +func (mg *OutputServiceBusQueue) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "Queue", "QueueList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.QueueName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.QueueNameRef, + Selector: mg.Spec.ForProvider.QueueNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.QueueName") + } + mg.Spec.ForProvider.QueueName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.QueueNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceBusNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceBusNamespaceRef, + Selector: mg.Spec.ForProvider.ServiceBusNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceBusNamespace") + } + mg.Spec.ForProvider.ServiceBusNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceBusNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.ForProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamAnalyticsJobName") + } + mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "Queue", "QueueList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.QueueName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.QueueNameRef, + Selector: mg.Spec.InitProvider.QueueNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.QueueName") + } + mg.Spec.InitProvider.QueueName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.QueueNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceBusNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceBusNamespaceRef, + Selector: mg.Spec.InitProvider.ServiceBusNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceBusNamespace") + } + mg.Spec.InitProvider.ServiceBusNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceBusNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.InitProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamAnalyticsJobName") + } + mg.Spec.InitProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this OutputServiceBusTopic. +func (mg *OutputServiceBusTopic) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceBusNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceBusNamespaceRef, + Selector: mg.Spec.ForProvider.ServiceBusNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceBusNamespace") + } + mg.Spec.ForProvider.ServiceBusNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceBusNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.ForProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamAnalyticsJobName") + } + mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.TopicName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.TopicNameRef, + Selector: mg.Spec.ForProvider.TopicNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.TopicName") + } + mg.Spec.ForProvider.TopicName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.TopicNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta2", "ServiceBusNamespace", "ServiceBusNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceBusNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceBusNamespaceRef, + Selector: mg.Spec.InitProvider.ServiceBusNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceBusNamespace") + } + mg.Spec.InitProvider.ServiceBusNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceBusNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.InitProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamAnalyticsJobName") + } + mg.Spec.InitProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("servicebus.azure.upbound.io", "v1beta1", "Topic", "TopicList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.TopicName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.TopicNameRef, + Selector: mg.Spec.InitProvider.TopicNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.TopicName") + } + mg.Spec.InitProvider.TopicName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.TopicNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this ReferenceInputBlob. +func (mg *ReferenceInputBlob) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageContainerNameRef, + Selector: mg.Spec.ForProvider.StorageContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageContainerName") + } + mg.Spec.ForProvider.StorageContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageContainerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.ForProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamAnalyticsJobName") + } + mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageContainerNameRef, + Selector: mg.Spec.InitProvider.StorageContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageContainerName") + } + mg.Spec.InitProvider.StorageContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageContainerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.InitProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamAnalyticsJobName") + } + mg.Spec.InitProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this StreamInputBlob. +func (mg *StreamInputBlob) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageContainerNameRef, + Selector: mg.Spec.ForProvider.StorageContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageContainerName") + } + mg.Spec.ForProvider.StorageContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageContainerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.ForProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamAnalyticsJobName") + } + mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Container", "ContainerList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageContainerName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageContainerNameRef, + Selector: mg.Spec.InitProvider.StorageContainerNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageContainerName") + } + mg.Spec.InitProvider.StorageContainerName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageContainerNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.InitProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamAnalyticsJobName") + } + mg.Spec.InitProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this StreamInputEventHub. +func (mg *StreamInputEventHub) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "ConsumerGroup", "ConsumerGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventHubConsumerGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventHubConsumerGroupNameRef, + Selector: mg.Spec.ForProvider.EventHubConsumerGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventHubConsumerGroupName") + } + mg.Spec.ForProvider.EventHubConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventHubConsumerGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventHubName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventHubNameRef, + Selector: mg.Spec.ForProvider.EventHubNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventHubName") + } + mg.Spec.ForProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventHubNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServiceBusNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ServiceBusNamespaceRef, + Selector: mg.Spec.ForProvider.ServiceBusNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServiceBusNamespace") + } + mg.Spec.ForProvider.ServiceBusNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServiceBusNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.ForProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamAnalyticsJobName") + } + mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "ConsumerGroup", "ConsumerGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventHubConsumerGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventHubConsumerGroupNameRef, + Selector: mg.Spec.InitProvider.EventHubConsumerGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventHubConsumerGroupName") + } + mg.Spec.InitProvider.EventHubConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventHubConsumerGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventHubName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventHubNameRef, + Selector: mg.Spec.InitProvider.EventHubNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventHubName") + } + mg.Spec.InitProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventHubNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServiceBusNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ServiceBusNamespaceRef, + Selector: mg.Spec.InitProvider.ServiceBusNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServiceBusNamespace") + } + mg.Spec.InitProvider.ServiceBusNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServiceBusNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.InitProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamAnalyticsJobName") + } + mg.Spec.InitProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this StreamInputIOTHub. +func (mg *StreamInputIOTHub) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "ConsumerGroup", "ConsumerGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.EventHubConsumerGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.EventHubConsumerGroupNameRef, + Selector: mg.Spec.ForProvider.EventHubConsumerGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.EventHubConsumerGroupName") + } + mg.Spec.ForProvider.EventHubConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.EventHubConsumerGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IOTHubNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.IOTHubNamespaceRef, + Selector: mg.Spec.ForProvider.IOTHubNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IOTHubNamespace") + } + mg.Spec.ForProvider.IOTHubNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IOTHubNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.ForProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StreamAnalyticsJobName") + } + mg.Spec.ForProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "ConsumerGroup", "ConsumerGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.EventHubConsumerGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.EventHubConsumerGroupNameRef, + Selector: mg.Spec.InitProvider.EventHubConsumerGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.EventHubConsumerGroupName") + } + mg.Spec.InitProvider.EventHubConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.EventHubConsumerGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IOTHubNamespace), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.IOTHubNamespaceRef, + Selector: mg.Spec.InitProvider.IOTHubNamespaceSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IOTHubNamespace") + } + mg.Spec.InitProvider.IOTHubNamespace = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IOTHubNamespaceRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("streamanalytics.azure.upbound.io", "v1beta2", "Job", "JobList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StreamAnalyticsJobName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StreamAnalyticsJobNameRef, + Selector: mg.Spec.InitProvider.StreamAnalyticsJobNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StreamAnalyticsJobName") + } + mg.Spec.InitProvider.StreamAnalyticsJobName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StreamAnalyticsJobNameRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/streamanalytics/v1beta2/zz_groupversion_info.go b/apis/streamanalytics/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..60f75c81f --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=streamanalytics.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "streamanalytics.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/streamanalytics/v1beta2/zz_job_terraformed.go b/apis/streamanalytics/v1beta2/zz_job_terraformed.go new file mode 100755 index 000000000..0abd18d80 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_job_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Job +func (mg *Job) GetTerraformResourceType() string { + return "azurerm_stream_analytics_job" +} + +// GetConnectionDetailsMapping for this Job +func (tr *Job) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"job_storage_account[*].account_key": "spec.forProvider.jobStorageAccount[*].accountKeySecretRef"} +} + +// GetObservation of this Job +func (tr *Job) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Job +func (tr *Job) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Job +func (tr *Job) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Job +func (tr *Job) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Job +func (tr *Job) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Job +func (tr *Job) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Job +func (tr *Job) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Job using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Job) LateInitialize(attrs []byte) (bool, error) { + params := &JobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Job) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_job_types.go b/apis/streamanalytics/v1beta2/zz_job_types.go new file mode 100755 index 000000000..2d9198afa --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_job_types.go @@ -0,0 +1,341 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IdentityInitParameters struct { + + // The identity id of the user assigned identity to use when type is UserAssigned + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Stream Analytics Job. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // The identity id of the user assigned identity to use when type is UserAssigned + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Stream Analytics Job. Possible values are SystemAssigned and UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // The identity id of the user assigned identity to use when type is UserAssigned + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Stream Analytics Job. Possible values are SystemAssigned and UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type JobInitParameters struct { + + // Specifies the compatibility level for this job - which controls certain runtime behaviours of the streaming job. Possible values are 1.0, 1.1 and 1.2. + CompatibilityLevel *string `json:"compatibilityLevel,omitempty" tf:"compatibility_level,omitempty"` + + // The policy for storing stream analytics content. Possible values are JobStorageAccount, SystemAccount. Defaults to SystemAccount. + ContentStoragePolicy *string `json:"contentStoragePolicy,omitempty" tf:"content_storage_policy,omitempty"` + + // Specifies the Data Locale of the Job, which should be a supported .NET Culture. + DataLocale *string `json:"dataLocale,omitempty" tf:"data_locale,omitempty"` + + // Specifies the maximum tolerable delay in seconds where events arriving late could be included. Supported range is -1 (indefinite) to 1814399 (20d 23h 59m 59s). Default is 5. + EventsLateArrivalMaxDelayInSeconds *float64 `json:"eventsLateArrivalMaxDelayInSeconds,omitempty" tf:"events_late_arrival_max_delay_in_seconds,omitempty"` + + // Specifies the maximum tolerable delay in seconds where out-of-order events can be adjusted to be back in order. Supported range is 0 to 599 (9m 59s). Default is 0. + EventsOutOfOrderMaxDelayInSeconds *float64 `json:"eventsOutOfOrderMaxDelayInSeconds,omitempty" tf:"events_out_of_order_max_delay_in_seconds,omitempty"` + + // Specifies the policy which should be applied to events which arrive out of order in the input event stream. Possible values are Adjust and Drop. Default is Adjust. + EventsOutOfOrderPolicy *string `json:"eventsOutOfOrderPolicy,omitempty" tf:"events_out_of_order_policy,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The details of the job storage account. A job_storage_account block as defined below. + JobStorageAccount []JobStorageAccountInitParameters `json:"jobStorageAccount,omitempty" tf:"job_storage_account,omitempty"` + + // The Azure Region in which the Resource Group exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the policy which should be applied to events which arrive at the output and cannot be written to the external storage due to being malformed (such as missing column values, column values of wrong type or size). Possible values are Drop and Stop. Default is Drop. + OutputErrorPolicy *string `json:"outputErrorPolicy,omitempty" tf:"output_error_policy,omitempty"` + + // The SKU Name to use for the Stream Analytics Job. Possible values are Standard, StandardV2. Defaults to Standard. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of an existing Stream Analytics Cluster where the Stream Analytics Job should run. + StreamAnalyticsClusterID *string `json:"streamAnalyticsClusterId,omitempty" tf:"stream_analytics_cluster_id,omitempty"` + + // Specifies the number of streaming units that the streaming job uses. Supported values are 1, 3, 6 and multiples of 6 up to 120. + StreamingUnits *float64 `json:"streamingUnits,omitempty" tf:"streaming_units,omitempty"` + + // A mapping of tags assigned to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the query that will be run in the streaming job, written in Stream Analytics Query Language (SAQL). + TransformationQuery *string `json:"transformationQuery,omitempty" tf:"transformation_query,omitempty"` + + // The type of the Stream Analytics Job. Possible values are Cloud and Edge. Defaults to Cloud. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type JobObservation struct { + + // Specifies the compatibility level for this job - which controls certain runtime behaviours of the streaming job. Possible values are 1.0, 1.1 and 1.2. + CompatibilityLevel *string `json:"compatibilityLevel,omitempty" tf:"compatibility_level,omitempty"` + + // The policy for storing stream analytics content. Possible values are JobStorageAccount, SystemAccount. Defaults to SystemAccount. + ContentStoragePolicy *string `json:"contentStoragePolicy,omitempty" tf:"content_storage_policy,omitempty"` + + // Specifies the Data Locale of the Job, which should be a supported .NET Culture. + DataLocale *string `json:"dataLocale,omitempty" tf:"data_locale,omitempty"` + + // Specifies the maximum tolerable delay in seconds where events arriving late could be included. Supported range is -1 (indefinite) to 1814399 (20d 23h 59m 59s). Default is 5. + EventsLateArrivalMaxDelayInSeconds *float64 `json:"eventsLateArrivalMaxDelayInSeconds,omitempty" tf:"events_late_arrival_max_delay_in_seconds,omitempty"` + + // Specifies the maximum tolerable delay in seconds where out-of-order events can be adjusted to be back in order. Supported range is 0 to 599 (9m 59s). Default is 0. + EventsOutOfOrderMaxDelayInSeconds *float64 `json:"eventsOutOfOrderMaxDelayInSeconds,omitempty" tf:"events_out_of_order_max_delay_in_seconds,omitempty"` + + // Specifies the policy which should be applied to events which arrive out of order in the input event stream. Possible values are Adjust and Drop. Default is Adjust. + EventsOutOfOrderPolicy *string `json:"eventsOutOfOrderPolicy,omitempty" tf:"events_out_of_order_policy,omitempty"` + + // The ID of the Stream Analytics Job. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Job ID assigned by the Stream Analytics Job. + JobID *string `json:"jobId,omitempty" tf:"job_id,omitempty"` + + // The details of the job storage account. A job_storage_account block as defined below. + JobStorageAccount []JobStorageAccountObservation `json:"jobStorageAccount,omitempty" tf:"job_storage_account,omitempty"` + + // The Azure Region in which the Resource Group exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the policy which should be applied to events which arrive at the output and cannot be written to the external storage due to being malformed (such as missing column values, column values of wrong type or size). Possible values are Drop and Stop. Default is Drop. + OutputErrorPolicy *string `json:"outputErrorPolicy,omitempty" tf:"output_error_policy,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The SKU Name to use for the Stream Analytics Job. Possible values are Standard, StandardV2. Defaults to Standard. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of an existing Stream Analytics Cluster where the Stream Analytics Job should run. + StreamAnalyticsClusterID *string `json:"streamAnalyticsClusterId,omitempty" tf:"stream_analytics_cluster_id,omitempty"` + + // Specifies the number of streaming units that the streaming job uses. Supported values are 1, 3, 6 and multiples of 6 up to 120. + StreamingUnits *float64 `json:"streamingUnits,omitempty" tf:"streaming_units,omitempty"` + + // A mapping of tags assigned to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the query that will be run in the streaming job, written in Stream Analytics Query Language (SAQL). + TransformationQuery *string `json:"transformationQuery,omitempty" tf:"transformation_query,omitempty"` + + // The type of the Stream Analytics Job. Possible values are Cloud and Edge. Defaults to Cloud. Changing this forces a new resource to be created. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type JobParameters struct { + + // Specifies the compatibility level for this job - which controls certain runtime behaviours of the streaming job. Possible values are 1.0, 1.1 and 1.2. + // +kubebuilder:validation:Optional + CompatibilityLevel *string `json:"compatibilityLevel,omitempty" tf:"compatibility_level,omitempty"` + + // The policy for storing stream analytics content. Possible values are JobStorageAccount, SystemAccount. Defaults to SystemAccount. + // +kubebuilder:validation:Optional + ContentStoragePolicy *string `json:"contentStoragePolicy,omitempty" tf:"content_storage_policy,omitempty"` + + // Specifies the Data Locale of the Job, which should be a supported .NET Culture. + // +kubebuilder:validation:Optional + DataLocale *string `json:"dataLocale,omitempty" tf:"data_locale,omitempty"` + + // Specifies the maximum tolerable delay in seconds where events arriving late could be included. Supported range is -1 (indefinite) to 1814399 (20d 23h 59m 59s). Default is 5. + // +kubebuilder:validation:Optional + EventsLateArrivalMaxDelayInSeconds *float64 `json:"eventsLateArrivalMaxDelayInSeconds,omitempty" tf:"events_late_arrival_max_delay_in_seconds,omitempty"` + + // Specifies the maximum tolerable delay in seconds where out-of-order events can be adjusted to be back in order. Supported range is 0 to 599 (9m 59s). Default is 0. + // +kubebuilder:validation:Optional + EventsOutOfOrderMaxDelayInSeconds *float64 `json:"eventsOutOfOrderMaxDelayInSeconds,omitempty" tf:"events_out_of_order_max_delay_in_seconds,omitempty"` + + // Specifies the policy which should be applied to events which arrive out of order in the input event stream. Possible values are Adjust and Drop. Default is Adjust. + // +kubebuilder:validation:Optional + EventsOutOfOrderPolicy *string `json:"eventsOutOfOrderPolicy,omitempty" tf:"events_out_of_order_policy,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The details of the job storage account. A job_storage_account block as defined below. + // +kubebuilder:validation:Optional + JobStorageAccount []JobStorageAccountParameters `json:"jobStorageAccount,omitempty" tf:"job_storage_account,omitempty"` + + // The Azure Region in which the Resource Group exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the policy which should be applied to events which arrive at the output and cannot be written to the external storage due to being malformed (such as missing column values, column values of wrong type or size). Possible values are Drop and Stop. Default is Drop. + // +kubebuilder:validation:Optional + OutputErrorPolicy *string `json:"outputErrorPolicy,omitempty" tf:"output_error_policy,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The SKU Name to use for the Stream Analytics Job. Possible values are Standard, StandardV2. Defaults to Standard. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The ID of an existing Stream Analytics Cluster where the Stream Analytics Job should run. + // +kubebuilder:validation:Optional + StreamAnalyticsClusterID *string `json:"streamAnalyticsClusterId,omitempty" tf:"stream_analytics_cluster_id,omitempty"` + + // Specifies the number of streaming units that the streaming job uses. Supported values are 1, 3, 6 and multiples of 6 up to 120. + // +kubebuilder:validation:Optional + StreamingUnits *float64 `json:"streamingUnits,omitempty" tf:"streaming_units,omitempty"` + + // A mapping of tags assigned to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the query that will be run in the streaming job, written in Stream Analytics Query Language (SAQL). + // +kubebuilder:validation:Optional + TransformationQuery *string `json:"transformationQuery,omitempty" tf:"transformation_query,omitempty"` + + // The type of the Stream Analytics Job. Possible values are Cloud and Edge. Defaults to Cloud. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type JobStorageAccountInitParameters struct { + + // The name of the Azure storage account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The authentication mode of the storage account. The only supported value is ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` +} + +type JobStorageAccountObservation struct { + + // The name of the Azure storage account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The authentication mode of the storage account. The only supported value is ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` +} + +type JobStorageAccountParameters struct { + + // The account key for the Azure storage account. + // +kubebuilder:validation:Required + AccountKeySecretRef v1.SecretKeySelector `json:"accountKeySecretRef" tf:"-"` + + // The name of the Azure storage account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The authentication mode of the storage account. The only supported value is ConnectionString. Defaults to ConnectionString. + // +kubebuilder:validation:Optional + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` +} + +// JobSpec defines the desired state of Job +type JobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider JobParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider JobInitParameters `json:"initProvider,omitempty"` +} + +// JobStatus defines the observed state of Job. +type JobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider JobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Job is the Schema for the Jobs API. Manages a Stream Analytics Job. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Job struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.transformationQuery) || (has(self.initProvider) && has(self.initProvider.transformationQuery))",message="spec.forProvider.transformationQuery is a required parameter" + Spec JobSpec `json:"spec"` + Status JobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// JobList contains a list of Jobs +type JobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Job `json:"items"` +} + +// Repository type metadata. +var ( + Job_Kind = "Job" + Job_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Job_Kind}.String() + Job_KindAPIVersion = Job_Kind + "." + CRDGroupVersion.String() + Job_GroupVersionKind = CRDGroupVersion.WithKind(Job_Kind) +) + +func init() { + SchemeBuilder.Register(&Job{}, &JobList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_outputblob_terraformed.go b/apis/streamanalytics/v1beta2/zz_outputblob_terraformed.go new file mode 100755 index 000000000..f49584d9c --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_outputblob_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OutputBlob +func (mg *OutputBlob) GetTerraformResourceType() string { + return "azurerm_stream_analytics_output_blob" +} + +// GetConnectionDetailsMapping for this OutputBlob +func (tr *OutputBlob) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"storage_account_key": "spec.forProvider.storageAccountKeySecretRef"} +} + +// GetObservation of this OutputBlob +func (tr *OutputBlob) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OutputBlob +func (tr *OutputBlob) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OutputBlob +func (tr *OutputBlob) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OutputBlob +func (tr *OutputBlob) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OutputBlob +func (tr *OutputBlob) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OutputBlob +func (tr *OutputBlob) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OutputBlob +func (tr *OutputBlob) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OutputBlob using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OutputBlob) LateInitialize(attrs []byte) (bool, error) { + params := &OutputBlobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OutputBlob) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_outputblob_types.go b/apis/streamanalytics/v1beta2/zz_outputblob_types.go new file mode 100755 index 000000000..95ad93287 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_outputblob_types.go @@ -0,0 +1,323 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OutputBlobInitParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The maximum wait time per batch in hh:mm:ss e.g. 00:02:00 for two minutes. + BatchMaxWaitTime *string `json:"batchMaxWaitTime,omitempty" tf:"batch_max_wait_time,omitempty"` + + // The minimum number of rows per batch (must be between 0 and 1000000). + BatchMinRows *float64 `json:"batchMinRows,omitempty" tf:"batch_min_rows,omitempty"` + + // Determines whether blob blocks are either committed automatically or appended. Possible values are Append and Once. Defaults to Append. + BlobWriteMode *string `json:"blobWriteMode,omitempty" tf:"blob_write_mode,omitempty"` + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // A serialization block as defined below. + Serialization *SerializationInitParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The name of the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The name of the Container within the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // Reference to a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameRef *v1.Reference `json:"storageContainerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameSelector *v1.Selector `json:"storageContainerNameSelector,omitempty" tf:"-"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type OutputBlobObservation struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The maximum wait time per batch in hh:mm:ss e.g. 00:02:00 for two minutes. + BatchMaxWaitTime *string `json:"batchMaxWaitTime,omitempty" tf:"batch_max_wait_time,omitempty"` + + // The minimum number of rows per batch (must be between 0 and 1000000). + BatchMinRows *float64 `json:"batchMinRows,omitempty" tf:"batch_min_rows,omitempty"` + + // Determines whether blob blocks are either committed automatically or appended. Possible values are Append and Once. Defaults to Append. + BlobWriteMode *string `json:"blobWriteMode,omitempty" tf:"blob_write_mode,omitempty"` + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The ID of the Stream Analytics Output Blob Storage. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A serialization block as defined below. + Serialization *SerializationObservation `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The name of the Storage Account. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The name of the Container within the Storage Account. + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type OutputBlobParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + // +kubebuilder:validation:Optional + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The maximum wait time per batch in hh:mm:ss e.g. 00:02:00 for two minutes. + // +kubebuilder:validation:Optional + BatchMaxWaitTime *string `json:"batchMaxWaitTime,omitempty" tf:"batch_max_wait_time,omitempty"` + + // The minimum number of rows per batch (must be between 0 and 1000000). + // +kubebuilder:validation:Optional + BatchMinRows *float64 `json:"batchMinRows,omitempty" tf:"batch_min_rows,omitempty"` + + // Determines whether blob blocks are either committed automatically or appended. Possible values are Append and Once. Defaults to Append. + // +kubebuilder:validation:Optional + BlobWriteMode *string `json:"blobWriteMode,omitempty" tf:"blob_write_mode,omitempty"` + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + // +kubebuilder:validation:Optional + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + // +kubebuilder:validation:Optional + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + // +kubebuilder:validation:Optional + Serialization *SerializationParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The Access Key which should be used to connect to this Storage Account. + // +kubebuilder:validation:Optional + StorageAccountKeySecretRef *v1.SecretKeySelector `json:"storageAccountKeySecretRef,omitempty" tf:"-"` + + // The name of the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The name of the Container within the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +kubebuilder:validation:Optional + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // Reference to a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameRef *v1.Reference `json:"storageContainerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameSelector *v1.Selector `json:"storageContainerNameSelector,omitempty" tf:"-"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +kubebuilder:validation:Optional + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + // +kubebuilder:validation:Optional + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type SerializationInitParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SerializationObservation struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type SerializationParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + // +kubebuilder:validation:Optional + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// OutputBlobSpec defines the desired state of OutputBlob +type OutputBlobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OutputBlobParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OutputBlobInitParameters `json:"initProvider,omitempty"` +} + +// OutputBlobStatus defines the observed state of OutputBlob. +type OutputBlobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OutputBlobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// OutputBlob is the Schema for the OutputBlobs API. Manages a Stream Analytics Output to Blob Storage. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type OutputBlob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dateFormat) || (has(self.initProvider) && has(self.initProvider.dateFormat))",message="spec.forProvider.dateFormat is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.pathPattern) || (has(self.initProvider) && has(self.initProvider.pathPattern))",message="spec.forProvider.pathPattern is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serialization) || (has(self.initProvider) && has(self.initProvider.serialization))",message="spec.forProvider.serialization is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timeFormat) || (has(self.initProvider) && has(self.initProvider.timeFormat))",message="spec.forProvider.timeFormat is a required parameter" + Spec OutputBlobSpec `json:"spec"` + Status OutputBlobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OutputBlobList contains a list of OutputBlobs +type OutputBlobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OutputBlob `json:"items"` +} + +// Repository type metadata. +var ( + OutputBlob_Kind = "OutputBlob" + OutputBlob_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OutputBlob_Kind}.String() + OutputBlob_KindAPIVersion = OutputBlob_Kind + "." + CRDGroupVersion.String() + OutputBlob_GroupVersionKind = CRDGroupVersion.WithKind(OutputBlob_Kind) +) + +func init() { + SchemeBuilder.Register(&OutputBlob{}, &OutputBlobList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_outputeventhub_terraformed.go b/apis/streamanalytics/v1beta2/zz_outputeventhub_terraformed.go new file mode 100755 index 000000000..99748ea0d --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_outputeventhub_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OutputEventHub +func (mg *OutputEventHub) GetTerraformResourceType() string { + return "azurerm_stream_analytics_output_eventhub" +} + +// GetConnectionDetailsMapping for this OutputEventHub +func (tr *OutputEventHub) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"shared_access_policy_key": "spec.forProvider.sharedAccessPolicyKeySecretRef"} +} + +// GetObservation of this OutputEventHub +func (tr *OutputEventHub) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OutputEventHub +func (tr *OutputEventHub) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OutputEventHub +func (tr *OutputEventHub) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OutputEventHub +func (tr *OutputEventHub) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OutputEventHub +func (tr *OutputEventHub) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OutputEventHub +func (tr *OutputEventHub) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OutputEventHub +func (tr *OutputEventHub) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OutputEventHub using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OutputEventHub) LateInitialize(attrs []byte) (bool, error) { + params := &OutputEventHubParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OutputEventHub) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_outputeventhub_types.go b/apis/streamanalytics/v1beta2/zz_outputeventhub_types.go new file mode 100755 index 000000000..e50d6b1f2 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_outputeventhub_types.go @@ -0,0 +1,269 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OutputEventHubInitParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of the Event Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Reference to a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameRef *v1.Reference `json:"eventhubNameRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` + + // The column that is used for the Event Hub partition key. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // A list of property columns to add to the Event Hub output. + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // A serialization block as defined below. + Serialization *OutputEventHubSerializationInitParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceRef *v1.Reference `json:"servicebusNamespaceRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceSelector *v1.Selector `json:"servicebusNamespaceSelector,omitempty" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required when authentication_mode is set to ConnectionString. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` +} + +type OutputEventHubObservation struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of the Event Hub. + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // The ID of the Stream Analytics Output EventHub. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The column that is used for the Event Hub partition key. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // A list of property columns to add to the Event Hub output. + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A serialization block as defined below. + Serialization *OutputEventHubSerializationObservation `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required when authentication_mode is set to ConnectionString. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` +} + +type OutputEventHubParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + // +kubebuilder:validation:Optional + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of the Event Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + // +kubebuilder:validation:Optional + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Reference to a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameRef *v1.Reference `json:"eventhubNameRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` + + // The column that is used for the Event Hub partition key. + // +kubebuilder:validation:Optional + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // A list of property columns to add to the Event Hub output. + // +kubebuilder:validation:Optional + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + // +kubebuilder:validation:Optional + Serialization *OutputEventHubSerializationParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + // +kubebuilder:validation:Optional + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceRef *v1.Reference `json:"servicebusNamespaceRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceSelector *v1.Selector `json:"servicebusNamespaceSelector,omitempty" tf:"-"` + + // The shared access policy key for the specified shared access policy. Required when authentication_mode is set to ConnectionString. + // +kubebuilder:validation:Optional + SharedAccessPolicyKeySecretRef *v1.SecretKeySelector `json:"sharedAccessPolicyKeySecretRef,omitempty" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required when authentication_mode is set to ConnectionString. + // +kubebuilder:validation:Optional + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +kubebuilder:validation:Required + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName" tf:"stream_analytics_job_name,omitempty"` +} + +type OutputEventHubSerializationInitParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OutputEventHubSerializationObservation struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OutputEventHubSerializationParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + // +kubebuilder:validation:Optional + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// OutputEventHubSpec defines the desired state of OutputEventHub +type OutputEventHubSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OutputEventHubParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OutputEventHubInitParameters `json:"initProvider,omitempty"` +} + +// OutputEventHubStatus defines the observed state of OutputEventHub. +type OutputEventHubStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OutputEventHubObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// OutputEventHub is the Schema for the OutputEventHubs API. Manages a Stream Analytics Output to an EventHub. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type OutputEventHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serialization) || (has(self.initProvider) && has(self.initProvider.serialization))",message="spec.forProvider.serialization is a required parameter" + Spec OutputEventHubSpec `json:"spec"` + Status OutputEventHubStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OutputEventHubList contains a list of OutputEventHubs +type OutputEventHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OutputEventHub `json:"items"` +} + +// Repository type metadata. +var ( + OutputEventHub_Kind = "OutputEventHub" + OutputEventHub_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OutputEventHub_Kind}.String() + OutputEventHub_KindAPIVersion = OutputEventHub_Kind + "." + CRDGroupVersion.String() + OutputEventHub_GroupVersionKind = CRDGroupVersion.WithKind(OutputEventHub_Kind) +) + +func init() { + SchemeBuilder.Register(&OutputEventHub{}, &OutputEventHubList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_outputservicebusqueue_terraformed.go b/apis/streamanalytics/v1beta2/zz_outputservicebusqueue_terraformed.go new file mode 100755 index 000000000..08ed0d1f9 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_outputservicebusqueue_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OutputServiceBusQueue +func (mg *OutputServiceBusQueue) GetTerraformResourceType() string { + return "azurerm_stream_analytics_output_servicebus_queue" +} + +// GetConnectionDetailsMapping for this OutputServiceBusQueue +func (tr *OutputServiceBusQueue) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"shared_access_policy_key": "spec.forProvider.sharedAccessPolicyKeySecretRef"} +} + +// GetObservation of this OutputServiceBusQueue +func (tr *OutputServiceBusQueue) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OutputServiceBusQueue +func (tr *OutputServiceBusQueue) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OutputServiceBusQueue +func (tr *OutputServiceBusQueue) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OutputServiceBusQueue +func (tr *OutputServiceBusQueue) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OutputServiceBusQueue +func (tr *OutputServiceBusQueue) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OutputServiceBusQueue +func (tr *OutputServiceBusQueue) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OutputServiceBusQueue +func (tr *OutputServiceBusQueue) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OutputServiceBusQueue using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OutputServiceBusQueue) LateInitialize(attrs []byte) (bool, error) { + params := &OutputServiceBusQueueParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OutputServiceBusQueue) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_outputservicebusqueue_types.go b/apis/streamanalytics/v1beta2/zz_outputservicebusqueue_types.go new file mode 100755 index 000000000..f83d2a854 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_outputservicebusqueue_types.go @@ -0,0 +1,316 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OutputServiceBusQueueInitParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of the Stream Output. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of property columns to add to the Service Bus Queue output. + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // The name of the Service Bus Queue. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.Queue + QueueName *string `json:"queueName,omitempty" tf:"queue_name,omitempty"` + + // Reference to a Queue in servicebus to populate queueName. + // +kubebuilder:validation:Optional + QueueNameRef *v1.Reference `json:"queueNameRef,omitempty" tf:"-"` + + // Selector for a Queue in servicebus to populate queueName. + // +kubebuilder:validation:Optional + QueueNameSelector *v1.Selector `json:"queueNameSelector,omitempty" tf:"-"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + Serialization *OutputServiceBusQueueSerializationInitParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // Reference to a ServiceBusNamespace in servicebus to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceRef *v1.Reference `json:"servicebusNamespaceRef,omitempty" tf:"-"` + + // Selector for a ServiceBusNamespace in servicebus to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceSelector *v1.Selector `json:"servicebusNamespaceSelector,omitempty" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode is ConnectionString. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // A key-value pair of system property columns that will be attached to the outgoing messages for the Service Bus Queue Output. + // +mapType=granular + SystemPropertyColumns map[string]*string `json:"systemPropertyColumns,omitempty" tf:"system_property_columns,omitempty"` +} + +type OutputServiceBusQueueObservation struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The ID of the Stream Analytics Output ServiceBus Queue. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Stream Output. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of property columns to add to the Service Bus Queue output. + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // The name of the Service Bus Queue. + QueueName *string `json:"queueName,omitempty" tf:"queue_name,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A serialization block as defined below. + Serialization *OutputServiceBusQueueSerializationObservation `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode is ConnectionString. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // A key-value pair of system property columns that will be attached to the outgoing messages for the Service Bus Queue Output. + // +mapType=granular + SystemPropertyColumns map[string]*string `json:"systemPropertyColumns,omitempty" tf:"system_property_columns,omitempty"` +} + +type OutputServiceBusQueueParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + // +kubebuilder:validation:Optional + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of the Stream Output. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of property columns to add to the Service Bus Queue output. + // +kubebuilder:validation:Optional + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // The name of the Service Bus Queue. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.Queue + // +kubebuilder:validation:Optional + QueueName *string `json:"queueName,omitempty" tf:"queue_name,omitempty"` + + // Reference to a Queue in servicebus to populate queueName. + // +kubebuilder:validation:Optional + QueueNameRef *v1.Reference `json:"queueNameRef,omitempty" tf:"-"` + + // Selector for a Queue in servicebus to populate queueName. + // +kubebuilder:validation:Optional + QueueNameSelector *v1.Selector `json:"queueNameSelector,omitempty" tf:"-"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + // +kubebuilder:validation:Optional + Serialization *OutputServiceBusQueueSerializationParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace + // +kubebuilder:validation:Optional + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // Reference to a ServiceBusNamespace in servicebus to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceRef *v1.Reference `json:"servicebusNamespaceRef,omitempty" tf:"-"` + + // Selector for a ServiceBusNamespace in servicebus to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceSelector *v1.Selector `json:"servicebusNamespaceSelector,omitempty" tf:"-"` + + // The shared access policy key for the specified shared access policy. Required if authentication_mode is ConnectionString. + // +kubebuilder:validation:Optional + SharedAccessPolicyKeySecretRef *v1.SecretKeySelector `json:"sharedAccessPolicyKeySecretRef,omitempty" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode is ConnectionString. + // +kubebuilder:validation:Optional + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +kubebuilder:validation:Optional + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // A key-value pair of system property columns that will be attached to the outgoing messages for the Service Bus Queue Output. + // +kubebuilder:validation:Optional + // +mapType=granular + SystemPropertyColumns map[string]*string `json:"systemPropertyColumns,omitempty" tf:"system_property_columns,omitempty"` +} + +type OutputServiceBusQueueSerializationInitParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OutputServiceBusQueueSerializationObservation struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OutputServiceBusQueueSerializationParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + // +kubebuilder:validation:Optional + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// OutputServiceBusQueueSpec defines the desired state of OutputServiceBusQueue +type OutputServiceBusQueueSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OutputServiceBusQueueParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OutputServiceBusQueueInitParameters `json:"initProvider,omitempty"` +} + +// OutputServiceBusQueueStatus defines the observed state of OutputServiceBusQueue. +type OutputServiceBusQueueStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OutputServiceBusQueueObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// OutputServiceBusQueue is the Schema for the OutputServiceBusQueues API. Manages a Stream Analytics Output to a ServiceBus Queue. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type OutputServiceBusQueue struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serialization) || (has(self.initProvider) && has(self.initProvider.serialization))",message="spec.forProvider.serialization is a required parameter" + Spec OutputServiceBusQueueSpec `json:"spec"` + Status OutputServiceBusQueueStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OutputServiceBusQueueList contains a list of OutputServiceBusQueues +type OutputServiceBusQueueList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OutputServiceBusQueue `json:"items"` +} + +// Repository type metadata. +var ( + OutputServiceBusQueue_Kind = "OutputServiceBusQueue" + OutputServiceBusQueue_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OutputServiceBusQueue_Kind}.String() + OutputServiceBusQueue_KindAPIVersion = OutputServiceBusQueue_Kind + "." + CRDGroupVersion.String() + OutputServiceBusQueue_GroupVersionKind = CRDGroupVersion.WithKind(OutputServiceBusQueue_Kind) +) + +func init() { + SchemeBuilder.Register(&OutputServiceBusQueue{}, &OutputServiceBusQueueList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_outputservicebustopic_terraformed.go b/apis/streamanalytics/v1beta2/zz_outputservicebustopic_terraformed.go new file mode 100755 index 000000000..ed8c3ca0f --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_outputservicebustopic_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this OutputServiceBusTopic +func (mg *OutputServiceBusTopic) GetTerraformResourceType() string { + return "azurerm_stream_analytics_output_servicebus_topic" +} + +// GetConnectionDetailsMapping for this OutputServiceBusTopic +func (tr *OutputServiceBusTopic) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"shared_access_policy_key": "spec.forProvider.sharedAccessPolicyKeySecretRef"} +} + +// GetObservation of this OutputServiceBusTopic +func (tr *OutputServiceBusTopic) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this OutputServiceBusTopic +func (tr *OutputServiceBusTopic) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this OutputServiceBusTopic +func (tr *OutputServiceBusTopic) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this OutputServiceBusTopic +func (tr *OutputServiceBusTopic) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this OutputServiceBusTopic +func (tr *OutputServiceBusTopic) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this OutputServiceBusTopic +func (tr *OutputServiceBusTopic) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this OutputServiceBusTopic +func (tr *OutputServiceBusTopic) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this OutputServiceBusTopic using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *OutputServiceBusTopic) LateInitialize(attrs []byte) (bool, error) { + params := &OutputServiceBusTopicParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *OutputServiceBusTopic) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_outputservicebustopic_types.go b/apis/streamanalytics/v1beta2/zz_outputservicebustopic_types.go new file mode 100755 index 000000000..21f10b47a --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_outputservicebustopic_types.go @@ -0,0 +1,316 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type OutputServiceBusTopicInitParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of the Stream Output. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of property columns to add to the Service Bus Topic output. + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + Serialization *OutputServiceBusTopicSerializationInitParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Topic, Service Bus Topic, etc. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // Reference to a ServiceBusNamespace in servicebus to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceRef *v1.Reference `json:"servicebusNamespaceRef,omitempty" tf:"-"` + + // Selector for a ServiceBusNamespace in servicebus to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceSelector *v1.Selector `json:"servicebusNamespaceSelector,omitempty" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode is ConnectionString. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // A key-value pair of system property columns that will be attached to the outgoing messages for the Service Bus Topic Output. + // +mapType=granular + SystemPropertyColumns map[string]*string `json:"systemPropertyColumns,omitempty" tf:"system_property_columns,omitempty"` + + // The name of the Service Bus Topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.Topic + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + + // Reference to a Topic in servicebus to populate topicName. + // +kubebuilder:validation:Optional + TopicNameRef *v1.Reference `json:"topicNameRef,omitempty" tf:"-"` + + // Selector for a Topic in servicebus to populate topicName. + // +kubebuilder:validation:Optional + TopicNameSelector *v1.Selector `json:"topicNameSelector,omitempty" tf:"-"` +} + +type OutputServiceBusTopicObservation struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The ID of the Stream Analytics Output ServiceBus Topic. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Stream Output. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of property columns to add to the Service Bus Topic output. + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A serialization block as defined below. + Serialization *OutputServiceBusTopicSerializationObservation `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Topic, Service Bus Topic, etc. + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode is ConnectionString. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // A key-value pair of system property columns that will be attached to the outgoing messages for the Service Bus Topic Output. + // +mapType=granular + SystemPropertyColumns map[string]*string `json:"systemPropertyColumns,omitempty" tf:"system_property_columns,omitempty"` + + // The name of the Service Bus Topic. + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` +} + +type OutputServiceBusTopicParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + // +kubebuilder:validation:Optional + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of the Stream Output. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of property columns to add to the Service Bus Topic output. + // +kubebuilder:validation:Optional + PropertyColumns []*string `json:"propertyColumns,omitempty" tf:"property_columns,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + // +kubebuilder:validation:Optional + Serialization *OutputServiceBusTopicSerializationParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Topic, Service Bus Topic, etc. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta2.ServiceBusNamespace + // +kubebuilder:validation:Optional + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // Reference to a ServiceBusNamespace in servicebus to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceRef *v1.Reference `json:"servicebusNamespaceRef,omitempty" tf:"-"` + + // Selector for a ServiceBusNamespace in servicebus to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceSelector *v1.Selector `json:"servicebusNamespaceSelector,omitempty" tf:"-"` + + // The shared access policy key for the specified shared access policy. Required if authentication_mode is ConnectionString. + // +kubebuilder:validation:Optional + SharedAccessPolicyKeySecretRef *v1.SecretKeySelector `json:"sharedAccessPolicyKeySecretRef,omitempty" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode is ConnectionString. + // +kubebuilder:validation:Optional + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +kubebuilder:validation:Optional + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // A key-value pair of system property columns that will be attached to the outgoing messages for the Service Bus Topic Output. + // +kubebuilder:validation:Optional + // +mapType=granular + SystemPropertyColumns map[string]*string `json:"systemPropertyColumns,omitempty" tf:"system_property_columns,omitempty"` + + // The name of the Service Bus Topic. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/servicebus/v1beta1.Topic + // +kubebuilder:validation:Optional + TopicName *string `json:"topicName,omitempty" tf:"topic_name,omitempty"` + + // Reference to a Topic in servicebus to populate topicName. + // +kubebuilder:validation:Optional + TopicNameRef *v1.Reference `json:"topicNameRef,omitempty" tf:"-"` + + // Selector for a Topic in servicebus to populate topicName. + // +kubebuilder:validation:Optional + TopicNameSelector *v1.Selector `json:"topicNameSelector,omitempty" tf:"-"` +} + +type OutputServiceBusTopicSerializationInitParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OutputServiceBusTopicSerializationObservation struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type OutputServiceBusTopicSerializationParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + // +kubebuilder:validation:Optional + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // Specifies the format of the JSON the output will be written in. Possible values are Array and LineSeparated. + // +kubebuilder:validation:Optional + Format *string `json:"format,omitempty" tf:"format,omitempty"` + + // The serialization format used for outgoing data streams. Possible values are Avro, Csv, Json and Parquet. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// OutputServiceBusTopicSpec defines the desired state of OutputServiceBusTopic +type OutputServiceBusTopicSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider OutputServiceBusTopicParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider OutputServiceBusTopicInitParameters `json:"initProvider,omitempty"` +} + +// OutputServiceBusTopicStatus defines the observed state of OutputServiceBusTopic. +type OutputServiceBusTopicStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider OutputServiceBusTopicObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// OutputServiceBusTopic is the Schema for the OutputServiceBusTopics API. Manages a Stream Analytics Output to a ServiceBus Topic. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type OutputServiceBusTopic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serialization) || (has(self.initProvider) && has(self.initProvider.serialization))",message="spec.forProvider.serialization is a required parameter" + Spec OutputServiceBusTopicSpec `json:"spec"` + Status OutputServiceBusTopicStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OutputServiceBusTopicList contains a list of OutputServiceBusTopics +type OutputServiceBusTopicList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OutputServiceBusTopic `json:"items"` +} + +// Repository type metadata. +var ( + OutputServiceBusTopic_Kind = "OutputServiceBusTopic" + OutputServiceBusTopic_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OutputServiceBusTopic_Kind}.String() + OutputServiceBusTopic_KindAPIVersion = OutputServiceBusTopic_Kind + "." + CRDGroupVersion.String() + OutputServiceBusTopic_GroupVersionKind = CRDGroupVersion.WithKind(OutputServiceBusTopic_Kind) +) + +func init() { + SchemeBuilder.Register(&OutputServiceBusTopic{}, &OutputServiceBusTopicList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_referenceinputblob_terraformed.go b/apis/streamanalytics/v1beta2/zz_referenceinputblob_terraformed.go new file mode 100755 index 000000000..e45a5dc64 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_referenceinputblob_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this ReferenceInputBlob +func (mg *ReferenceInputBlob) GetTerraformResourceType() string { + return "azurerm_stream_analytics_reference_input_blob" +} + +// GetConnectionDetailsMapping for this ReferenceInputBlob +func (tr *ReferenceInputBlob) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"storage_account_key": "spec.forProvider.storageAccountKeySecretRef"} +} + +// GetObservation of this ReferenceInputBlob +func (tr *ReferenceInputBlob) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this ReferenceInputBlob +func (tr *ReferenceInputBlob) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this ReferenceInputBlob +func (tr *ReferenceInputBlob) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this ReferenceInputBlob +func (tr *ReferenceInputBlob) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this ReferenceInputBlob +func (tr *ReferenceInputBlob) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this ReferenceInputBlob +func (tr *ReferenceInputBlob) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this ReferenceInputBlob +func (tr *ReferenceInputBlob) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this ReferenceInputBlob using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *ReferenceInputBlob) LateInitialize(attrs []byte) (bool, error) { + params := &ReferenceInputBlobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *ReferenceInputBlob) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_referenceinputblob_types.go b/apis/streamanalytics/v1beta2/zz_referenceinputblob_types.go new file mode 100755 index 000000000..3460db99d --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_referenceinputblob_types.go @@ -0,0 +1,306 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ReferenceInputBlobInitParameters struct { + + // The authentication mode for the Stream Analytics Reference Input. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The name of the Reference Input Blob. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + Serialization *ReferenceInputBlobSerializationInitParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The name of the Storage Account that has the blob container with reference data. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The name of the Container within the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // Reference to a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameRef *v1.Reference `json:"storageContainerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameSelector *v1.Selector `json:"storageContainerNameSelector,omitempty" tf:"-"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type ReferenceInputBlobObservation struct { + + // The authentication mode for the Stream Analytics Reference Input. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The ID of the Stream Analytics Reference Input Blob. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Reference Input Blob. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A serialization block as defined below. + Serialization *ReferenceInputBlobSerializationObservation `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The name of the Storage Account that has the blob container with reference data. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The name of the Container within the Storage Account. + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type ReferenceInputBlobParameters struct { + + // The authentication mode for the Stream Analytics Reference Input. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + // +kubebuilder:validation:Optional + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + // +kubebuilder:validation:Optional + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The name of the Reference Input Blob. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + // +kubebuilder:validation:Optional + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + // +kubebuilder:validation:Optional + Serialization *ReferenceInputBlobSerializationParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The Access Key which should be used to connect to this Storage Account. Required if authentication_mode is ConnectionString. + // +kubebuilder:validation:Optional + StorageAccountKeySecretRef *v1.SecretKeySelector `json:"storageAccountKeySecretRef,omitempty" tf:"-"` + + // The name of the Storage Account that has the blob container with reference data. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The name of the Container within the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +kubebuilder:validation:Optional + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // Reference to a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameRef *v1.Reference `json:"storageContainerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameSelector *v1.Selector `json:"storageContainerNameSelector,omitempty" tf:"-"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +kubebuilder:validation:Optional + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + // +kubebuilder:validation:Optional + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type ReferenceInputBlobSerializationInitParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for the reference data. Possible values are Avro, Csv and Json. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ReferenceInputBlobSerializationObservation struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for the reference data. Possible values are Avro, Csv and Json. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ReferenceInputBlobSerializationParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + // +kubebuilder:validation:Optional + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for the reference data. Possible values are Avro, Csv and Json. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// ReferenceInputBlobSpec defines the desired state of ReferenceInputBlob +type ReferenceInputBlobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ReferenceInputBlobParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider ReferenceInputBlobInitParameters `json:"initProvider,omitempty"` +} + +// ReferenceInputBlobStatus defines the observed state of ReferenceInputBlob. +type ReferenceInputBlobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ReferenceInputBlobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ReferenceInputBlob is the Schema for the ReferenceInputBlobs API. Manages a Stream Analytics Reference Input Blob. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type ReferenceInputBlob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dateFormat) || (has(self.initProvider) && has(self.initProvider.dateFormat))",message="spec.forProvider.dateFormat is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.pathPattern) || (has(self.initProvider) && has(self.initProvider.pathPattern))",message="spec.forProvider.pathPattern is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serialization) || (has(self.initProvider) && has(self.initProvider.serialization))",message="spec.forProvider.serialization is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timeFormat) || (has(self.initProvider) && has(self.initProvider.timeFormat))",message="spec.forProvider.timeFormat is a required parameter" + Spec ReferenceInputBlobSpec `json:"spec"` + Status ReferenceInputBlobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ReferenceInputBlobList contains a list of ReferenceInputBlobs +type ReferenceInputBlobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ReferenceInputBlob `json:"items"` +} + +// Repository type metadata. +var ( + ReferenceInputBlob_Kind = "ReferenceInputBlob" + ReferenceInputBlob_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: ReferenceInputBlob_Kind}.String() + ReferenceInputBlob_KindAPIVersion = ReferenceInputBlob_Kind + "." + CRDGroupVersion.String() + ReferenceInputBlob_GroupVersionKind = CRDGroupVersion.WithKind(ReferenceInputBlob_Kind) +) + +func init() { + SchemeBuilder.Register(&ReferenceInputBlob{}, &ReferenceInputBlobList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_streaminputblob_terraformed.go b/apis/streamanalytics/v1beta2/zz_streaminputblob_terraformed.go new file mode 100755 index 000000000..205960d0d --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_streaminputblob_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StreamInputBlob +func (mg *StreamInputBlob) GetTerraformResourceType() string { + return "azurerm_stream_analytics_stream_input_blob" +} + +// GetConnectionDetailsMapping for this StreamInputBlob +func (tr *StreamInputBlob) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"storage_account_key": "spec.forProvider.storageAccountKeySecretRef"} +} + +// GetObservation of this StreamInputBlob +func (tr *StreamInputBlob) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StreamInputBlob +func (tr *StreamInputBlob) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StreamInputBlob +func (tr *StreamInputBlob) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StreamInputBlob +func (tr *StreamInputBlob) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StreamInputBlob +func (tr *StreamInputBlob) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StreamInputBlob +func (tr *StreamInputBlob) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StreamInputBlob +func (tr *StreamInputBlob) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StreamInputBlob using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StreamInputBlob) LateInitialize(attrs []byte) (bool, error) { + params := &StreamInputBlobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StreamInputBlob) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_streaminputblob_types.go b/apis/streamanalytics/v1beta2/zz_streaminputblob_types.go new file mode 100755 index 000000000..dfa7e947b --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_streaminputblob_types.go @@ -0,0 +1,297 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type StreamInputBlobInitParameters struct { + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The name of the Stream Input Blob. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + Serialization *StreamInputBlobSerializationInitParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The name of the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The name of the Container within the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // Reference to a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameRef *v1.Reference `json:"storageContainerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameSelector *v1.Selector `json:"storageContainerNameSelector,omitempty" tf:"-"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type StreamInputBlobObservation struct { + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The ID of the Stream Analytics Stream Input Blob. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Stream Input Blob. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A serialization block as defined below. + Serialization *StreamInputBlobSerializationObservation `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The name of the Storage Account. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The name of the Container within the Storage Account. + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type StreamInputBlobParameters struct { + + // The date format. Wherever {date} appears in path_pattern, the value of this property is used as the date format instead. + // +kubebuilder:validation:Optional + DateFormat *string `json:"dateFormat,omitempty" tf:"date_format,omitempty"` + + // The name of the Stream Input Blob. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The blob path pattern. Not a regular expression. It represents a pattern against which blob names will be matched to determine whether or not they should be included as input or output to the job. + // +kubebuilder:validation:Optional + PathPattern *string `json:"pathPattern,omitempty" tf:"path_pattern,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + // +kubebuilder:validation:Optional + Serialization *StreamInputBlobSerializationParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The Access Key which should be used to connect to this Storage Account. + // +kubebuilder:validation:Optional + StorageAccountKeySecretRef v1.SecretKeySelector `json:"storageAccountKeySecretRef" tf:"-"` + + // The name of the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The name of the Container within the Storage Account. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Container + // +kubebuilder:validation:Optional + StorageContainerName *string `json:"storageContainerName,omitempty" tf:"storage_container_name,omitempty"` + + // Reference to a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameRef *v1.Reference `json:"storageContainerNameRef,omitempty" tf:"-"` + + // Selector for a Container in storage to populate storageContainerName. + // +kubebuilder:validation:Optional + StorageContainerNameSelector *v1.Selector `json:"storageContainerNameSelector,omitempty" tf:"-"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +kubebuilder:validation:Optional + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` + + // The time format. Wherever {time} appears in path_pattern, the value of this property is used as the time format instead. + // +kubebuilder:validation:Optional + TimeFormat *string `json:"timeFormat,omitempty" tf:"time_format,omitempty"` +} + +type StreamInputBlobSerializationInitParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StreamInputBlobSerializationObservation struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StreamInputBlobSerializationParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + // +kubebuilder:validation:Optional + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// StreamInputBlobSpec defines the desired state of StreamInputBlob +type StreamInputBlobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StreamInputBlobParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StreamInputBlobInitParameters `json:"initProvider,omitempty"` +} + +// StreamInputBlobStatus defines the observed state of StreamInputBlob. +type StreamInputBlobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StreamInputBlobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StreamInputBlob is the Schema for the StreamInputBlobs API. Manages a Stream Analytics Stream Input Blob. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type StreamInputBlob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.dateFormat) || (has(self.initProvider) && has(self.initProvider.dateFormat))",message="spec.forProvider.dateFormat is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.pathPattern) || (has(self.initProvider) && has(self.initProvider.pathPattern))",message="spec.forProvider.pathPattern is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serialization) || (has(self.initProvider) && has(self.initProvider.serialization))",message="spec.forProvider.serialization is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageAccountKeySecretRef)",message="spec.forProvider.storageAccountKeySecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.timeFormat) || (has(self.initProvider) && has(self.initProvider.timeFormat))",message="spec.forProvider.timeFormat is a required parameter" + Spec StreamInputBlobSpec `json:"spec"` + Status StreamInputBlobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamInputBlobList contains a list of StreamInputBlobs +type StreamInputBlobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StreamInputBlob `json:"items"` +} + +// Repository type metadata. +var ( + StreamInputBlob_Kind = "StreamInputBlob" + StreamInputBlob_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StreamInputBlob_Kind}.String() + StreamInputBlob_KindAPIVersion = StreamInputBlob_Kind + "." + CRDGroupVersion.String() + StreamInputBlob_GroupVersionKind = CRDGroupVersion.WithKind(StreamInputBlob_Kind) +) + +func init() { + SchemeBuilder.Register(&StreamInputBlob{}, &StreamInputBlobList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_streaminputeventhub_terraformed.go b/apis/streamanalytics/v1beta2/zz_streaminputeventhub_terraformed.go new file mode 100755 index 000000000..afb461f62 --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_streaminputeventhub_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StreamInputEventHub +func (mg *StreamInputEventHub) GetTerraformResourceType() string { + return "azurerm_stream_analytics_stream_input_eventhub" +} + +// GetConnectionDetailsMapping for this StreamInputEventHub +func (tr *StreamInputEventHub) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"shared_access_policy_key": "spec.forProvider.sharedAccessPolicyKeySecretRef"} +} + +// GetObservation of this StreamInputEventHub +func (tr *StreamInputEventHub) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StreamInputEventHub +func (tr *StreamInputEventHub) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StreamInputEventHub +func (tr *StreamInputEventHub) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StreamInputEventHub +func (tr *StreamInputEventHub) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StreamInputEventHub +func (tr *StreamInputEventHub) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StreamInputEventHub +func (tr *StreamInputEventHub) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StreamInputEventHub +func (tr *StreamInputEventHub) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StreamInputEventHub using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StreamInputEventHub) LateInitialize(attrs []byte) (bool, error) { + params := &StreamInputEventHubParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StreamInputEventHub) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_streaminputeventhub_types.go b/apis/streamanalytics/v1beta2/zz_streaminputeventhub_types.go new file mode 100755 index 000000000..7dba4b38b --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_streaminputeventhub_types.go @@ -0,0 +1,321 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type StreamInputEventHubInitParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of an Event Hub Consumer Group that should be used to read events from the Event Hub. Specifying distinct consumer group names for multiple inputs allows each of those inputs to receive the same events from the Event Hub. If not set the input will use the Event Hub's default consumer group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.ConsumerGroup + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // Reference to a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameRef *v1.Reference `json:"eventhubConsumerGroupNameRef,omitempty" tf:"-"` + + // Selector for a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameSelector *v1.Selector `json:"eventhubConsumerGroupNameSelector,omitempty" tf:"-"` + + // The name of the Event Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Reference to a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameRef *v1.Reference `json:"eventhubNameRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` + + // The name of the Stream Input EventHub. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The property the input Event Hub has been partitioned by. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + Serialization *StreamInputEventHubSerializationInitParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceRef *v1.Reference `json:"servicebusNamespaceRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceSelector *v1.Selector `json:"servicebusNamespaceSelector,omitempty" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` +} + +type StreamInputEventHubObservation struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of an Event Hub Consumer Group that should be used to read events from the Event Hub. Specifying distinct consumer group names for multiple inputs allows each of those inputs to receive the same events from the Event Hub. If not set the input will use the Event Hub's default consumer group. + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // The name of the Event Hub. + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // The ID of the Stream Analytics Stream Input EventHub. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name of the Stream Input EventHub. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The property the input Event Hub has been partitioned by. + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A serialization block as defined below. + Serialization *StreamInputEventHubSerializationObservation `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` +} + +type StreamInputEventHubParameters struct { + + // The authentication mode for the Stream Output. Possible values are Msi and ConnectionString. Defaults to ConnectionString. + // +kubebuilder:validation:Optional + AuthenticationMode *string `json:"authenticationMode,omitempty" tf:"authentication_mode,omitempty"` + + // The name of an Event Hub Consumer Group that should be used to read events from the Event Hub. Specifying distinct consumer group names for multiple inputs allows each of those inputs to receive the same events from the Event Hub. If not set the input will use the Event Hub's default consumer group. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.ConsumerGroup + // +kubebuilder:validation:Optional + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // Reference to a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameRef *v1.Reference `json:"eventhubConsumerGroupNameRef,omitempty" tf:"-"` + + // Selector for a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameSelector *v1.Selector `json:"eventhubConsumerGroupNameSelector,omitempty" tf:"-"` + + // The name of the Event Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub + // +kubebuilder:validation:Optional + EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` + + // Reference to a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameRef *v1.Reference `json:"eventhubNameRef,omitempty" tf:"-"` + + // Selector for a EventHub in eventhub to populate eventhubName. + // +kubebuilder:validation:Optional + EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` + + // The name of the Stream Input EventHub. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The property the input Event Hub has been partitioned by. + // +kubebuilder:validation:Optional + PartitionKey *string `json:"partitionKey,omitempty" tf:"partition_key,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + // +kubebuilder:validation:Optional + Serialization *StreamInputEventHubSerializationParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The namespace that is associated with the desired Event Hub, Service Bus Queue, Service Bus Topic, etc. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace + // +kubebuilder:validation:Optional + ServiceBusNamespace *string `json:"servicebusNamespace,omitempty" tf:"servicebus_namespace,omitempty"` + + // Reference to a EventHubNamespace in eventhub to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceRef *v1.Reference `json:"servicebusNamespaceRef,omitempty" tf:"-"` + + // Selector for a EventHubNamespace in eventhub to populate servicebusNamespace. + // +kubebuilder:validation:Optional + ServiceBusNamespaceSelector *v1.Selector `json:"servicebusNamespaceSelector,omitempty" tf:"-"` + + // The shared access policy key for the specified shared access policy. + // +kubebuilder:validation:Optional + SharedAccessPolicyKeySecretRef *v1.SecretKeySelector `json:"sharedAccessPolicyKeySecretRef,omitempty" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. + // +kubebuilder:validation:Optional + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +kubebuilder:validation:Optional + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` +} + +type StreamInputEventHubSerializationInitParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StreamInputEventHubSerializationObservation struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StreamInputEventHubSerializationParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + // +kubebuilder:validation:Optional + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// StreamInputEventHubSpec defines the desired state of StreamInputEventHub +type StreamInputEventHubSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StreamInputEventHubParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StreamInputEventHubInitParameters `json:"initProvider,omitempty"` +} + +// StreamInputEventHubStatus defines the observed state of StreamInputEventHub. +type StreamInputEventHubStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StreamInputEventHubObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StreamInputEventHub is the Schema for the StreamInputEventHubs API. Manages a Stream Analytics Stream Input EventHub. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type StreamInputEventHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serialization) || (has(self.initProvider) && has(self.initProvider.serialization))",message="spec.forProvider.serialization is a required parameter" + Spec StreamInputEventHubSpec `json:"spec"` + Status StreamInputEventHubStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamInputEventHubList contains a list of StreamInputEventHubs +type StreamInputEventHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StreamInputEventHub `json:"items"` +} + +// Repository type metadata. +var ( + StreamInputEventHub_Kind = "StreamInputEventHub" + StreamInputEventHub_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StreamInputEventHub_Kind}.String() + StreamInputEventHub_KindAPIVersion = StreamInputEventHub_Kind + "." + CRDGroupVersion.String() + StreamInputEventHub_GroupVersionKind = CRDGroupVersion.WithKind(StreamInputEventHub_Kind) +) + +func init() { + SchemeBuilder.Register(&StreamInputEventHub{}, &StreamInputEventHubList{}) +} diff --git a/apis/streamanalytics/v1beta2/zz_streaminputiothub_terraformed.go b/apis/streamanalytics/v1beta2/zz_streaminputiothub_terraformed.go new file mode 100755 index 000000000..4fa149a1f --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_streaminputiothub_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StreamInputIOTHub +func (mg *StreamInputIOTHub) GetTerraformResourceType() string { + return "azurerm_stream_analytics_stream_input_iothub" +} + +// GetConnectionDetailsMapping for this StreamInputIOTHub +func (tr *StreamInputIOTHub) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"shared_access_policy_key": "spec.forProvider.sharedAccessPolicyKeySecretRef"} +} + +// GetObservation of this StreamInputIOTHub +func (tr *StreamInputIOTHub) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StreamInputIOTHub +func (tr *StreamInputIOTHub) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StreamInputIOTHub +func (tr *StreamInputIOTHub) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StreamInputIOTHub +func (tr *StreamInputIOTHub) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StreamInputIOTHub +func (tr *StreamInputIOTHub) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StreamInputIOTHub +func (tr *StreamInputIOTHub) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StreamInputIOTHub +func (tr *StreamInputIOTHub) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StreamInputIOTHub using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StreamInputIOTHub) LateInitialize(attrs []byte) (bool, error) { + params := &StreamInputIOTHubParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StreamInputIOTHub) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/streamanalytics/v1beta2/zz_streaminputiothub_types.go b/apis/streamanalytics/v1beta2/zz_streaminputiothub_types.go new file mode 100755 index 000000000..c5f5fdadb --- /dev/null +++ b/apis/streamanalytics/v1beta2/zz_streaminputiothub_types.go @@ -0,0 +1,286 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type StreamInputIOTHubInitParameters struct { + + // The IoT Hub endpoint to connect to (ie. messages/events, messages/operationsMonitoringEvents, etc.). + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The name of an Event Hub Consumer Group that should be used to read events from the Event Hub. Specifying distinct consumer group names for multiple inputs allows each of those inputs to receive the same events from the Event Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.ConsumerGroup + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // Reference to a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameRef *v1.Reference `json:"eventhubConsumerGroupNameRef,omitempty" tf:"-"` + + // Selector for a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameSelector *v1.Selector `json:"eventhubConsumerGroupNameSelector,omitempty" tf:"-"` + + // The name or the URI of the IoT Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub + IOTHubNamespace *string `json:"iothubNamespace,omitempty" tf:"iothub_namespace,omitempty"` + + // Reference to a IOTHub in devices to populate iothubNamespace. + // +kubebuilder:validation:Optional + IOTHubNamespaceRef *v1.Reference `json:"iothubNamespaceRef,omitempty" tf:"-"` + + // Selector for a IOTHub in devices to populate iothubNamespace. + // +kubebuilder:validation:Optional + IOTHubNamespaceSelector *v1.Selector `json:"iothubNamespaceSelector,omitempty" tf:"-"` + + // The name of the Stream Input IoTHub. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + Serialization *StreamInputIOTHubSerializationInitParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` +} + +type StreamInputIOTHubObservation struct { + + // The IoT Hub endpoint to connect to (ie. messages/events, messages/operationsMonitoringEvents, etc.). + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The name of an Event Hub Consumer Group that should be used to read events from the Event Hub. Specifying distinct consumer group names for multiple inputs allows each of those inputs to receive the same events from the Event Hub. + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // The ID of the Stream Analytics Stream Input IoTHub. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The name or the URI of the IoT Hub. + IOTHubNamespace *string `json:"iothubNamespace,omitempty" tf:"iothub_namespace,omitempty"` + + // The name of the Stream Input IoTHub. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A serialization block as defined below. + Serialization *StreamInputIOTHubSerializationObservation `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` +} + +type StreamInputIOTHubParameters struct { + + // The IoT Hub endpoint to connect to (ie. messages/events, messages/operationsMonitoringEvents, etc.). + // +kubebuilder:validation:Optional + Endpoint *string `json:"endpoint,omitempty" tf:"endpoint,omitempty"` + + // The name of an Event Hub Consumer Group that should be used to read events from the Event Hub. Specifying distinct consumer group names for multiple inputs allows each of those inputs to receive the same events from the Event Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.ConsumerGroup + // +kubebuilder:validation:Optional + EventHubConsumerGroupName *string `json:"eventhubConsumerGroupName,omitempty" tf:"eventhub_consumer_group_name,omitempty"` + + // Reference to a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameRef *v1.Reference `json:"eventhubConsumerGroupNameRef,omitempty" tf:"-"` + + // Selector for a ConsumerGroup in eventhub to populate eventhubConsumerGroupName. + // +kubebuilder:validation:Optional + EventHubConsumerGroupNameSelector *v1.Selector `json:"eventhubConsumerGroupNameSelector,omitempty" tf:"-"` + + // The name or the URI of the IoT Hub. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub + // +kubebuilder:validation:Optional + IOTHubNamespace *string `json:"iothubNamespace,omitempty" tf:"iothub_namespace,omitempty"` + + // Reference to a IOTHub in devices to populate iothubNamespace. + // +kubebuilder:validation:Optional + IOTHubNamespaceRef *v1.Reference `json:"iothubNamespaceRef,omitempty" tf:"-"` + + // Selector for a IOTHub in devices to populate iothubNamespace. + // +kubebuilder:validation:Optional + IOTHubNamespaceSelector *v1.Selector `json:"iothubNamespaceSelector,omitempty" tf:"-"` + + // The name of the Stream Input IoTHub. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A serialization block as defined below. + // +kubebuilder:validation:Optional + Serialization *StreamInputIOTHubSerializationParameters `json:"serialization,omitempty" tf:"serialization,omitempty"` + + // The shared access policy key for the specified shared access policy. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SharedAccessPolicyKeySecretRef v1.SecretKeySelector `json:"sharedAccessPolicyKeySecretRef" tf:"-"` + + // The shared access policy name for the Event Hub, Service Bus Queue, Service Bus Topic, etc. + // +kubebuilder:validation:Optional + SharedAccessPolicyName *string `json:"sharedAccessPolicyName,omitempty" tf:"shared_access_policy_name,omitempty"` + + // The name of the Stream Analytics Job. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/streamanalytics/v1beta2.Job + // +kubebuilder:validation:Optional + StreamAnalyticsJobName *string `json:"streamAnalyticsJobName,omitempty" tf:"stream_analytics_job_name,omitempty"` + + // Reference to a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameRef *v1.Reference `json:"streamAnalyticsJobNameRef,omitempty" tf:"-"` + + // Selector for a Job in streamanalytics to populate streamAnalyticsJobName. + // +kubebuilder:validation:Optional + StreamAnalyticsJobNameSelector *v1.Selector `json:"streamAnalyticsJobNameSelector,omitempty" tf:"-"` +} + +type StreamInputIOTHubSerializationInitParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StreamInputIOTHubSerializationObservation struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StreamInputIOTHubSerializationParameters struct { + + // The encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. It currently can only be set to UTF8. + // +kubebuilder:validation:Optional + Encoding *string `json:"encoding,omitempty" tf:"encoding,omitempty"` + + // The delimiter that will be used to separate comma-separated value (CSV) records. Possible values are (space), , (comma), (tab), | (pipe) and ;. + // +kubebuilder:validation:Optional + FieldDelimiter *string `json:"fieldDelimiter,omitempty" tf:"field_delimiter,omitempty"` + + // The serialization format used for incoming data streams. Possible values are Avro, Csv and Json. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// StreamInputIOTHubSpec defines the desired state of StreamInputIOTHub +type StreamInputIOTHubSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StreamInputIOTHubParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StreamInputIOTHubInitParameters `json:"initProvider,omitempty"` +} + +// StreamInputIOTHubStatus defines the observed state of StreamInputIOTHub. +type StreamInputIOTHubStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StreamInputIOTHubObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StreamInputIOTHub is the Schema for the StreamInputIOTHubs API. Manages a Stream Analytics Stream Input IoTHub. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type StreamInputIOTHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.endpoint) || (has(self.initProvider) && has(self.initProvider.endpoint))",message="spec.forProvider.endpoint is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.serialization) || (has(self.initProvider) && has(self.initProvider.serialization))",message="spec.forProvider.serialization is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sharedAccessPolicyKeySecretRef)",message="spec.forProvider.sharedAccessPolicyKeySecretRef is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sharedAccessPolicyName) || (has(self.initProvider) && has(self.initProvider.sharedAccessPolicyName))",message="spec.forProvider.sharedAccessPolicyName is a required parameter" + Spec StreamInputIOTHubSpec `json:"spec"` + Status StreamInputIOTHubStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamInputIOTHubList contains a list of StreamInputIOTHubs +type StreamInputIOTHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StreamInputIOTHub `json:"items"` +} + +// Repository type metadata. +var ( + StreamInputIOTHub_Kind = "StreamInputIOTHub" + StreamInputIOTHub_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StreamInputIOTHub_Kind}.String() + StreamInputIOTHub_KindAPIVersion = StreamInputIOTHub_Kind + "." + CRDGroupVersion.String() + StreamInputIOTHub_GroupVersionKind = CRDGroupVersion.WithKind(StreamInputIOTHub_Kind) +) + +func init() { + SchemeBuilder.Register(&StreamInputIOTHub{}, &StreamInputIOTHubList{}) +} diff --git a/apis/synapse/v1beta1/zz_firewallrule_types.go b/apis/synapse/v1beta1/zz_firewallrule_types.go index 46fee7b44..3c22ffe18 100755 --- a/apis/synapse/v1beta1/zz_firewallrule_types.go +++ b/apis/synapse/v1beta1/zz_firewallrule_types.go @@ -48,7 +48,7 @@ type FirewallRuleParameters struct { StartIPAddress *string `json:"startIpAddress,omitempty" tf:"start_ip_address,omitempty"` // The ID of the Synapse Workspace on which to create the Firewall Rule. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_generated.conversion_hubs.go b/apis/synapse/v1beta1/zz_generated.conversion_hubs.go index dc80976c2..430cb8941 100755 --- a/apis/synapse/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/synapse/v1beta1/zz_generated.conversion_hubs.go @@ -15,9 +15,6 @@ func (tr *IntegrationRuntimeAzure) Hub() {} // Hub marks this type as a conversion hub. func (tr *IntegrationRuntimeSelfHosted) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *LinkedService) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ManagedPrivateEndpoint) Hub() {} @@ -27,12 +24,6 @@ func (tr *PrivateLinkHub) Hub() {} // Hub marks this type as a conversion hub. func (tr *RoleAssignment) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *SparkPool) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *SQLPool) Hub() {} - // Hub marks this type as a conversion hub. func (tr *SQLPoolExtendedAuditingPolicy) Hub() {} @@ -45,9 +36,6 @@ func (tr *SQLPoolWorkloadClassifier) Hub() {} // Hub marks this type as a conversion hub. func (tr *SQLPoolWorkloadGroup) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Workspace) Hub() {} - // Hub marks this type as a conversion hub. func (tr *WorkspaceAADAdmin) Hub() {} @@ -59,6 +47,3 @@ func (tr *WorkspaceSecurityAlertPolicy) Hub() {} // Hub marks this type as a conversion hub. func (tr *WorkspaceSQLAADAdmin) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WorkspaceVulnerabilityAssessment) Hub() {} diff --git a/apis/synapse/v1beta1/zz_generated.conversion_spokes.go b/apis/synapse/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..8c3681315 --- /dev/null +++ b/apis/synapse/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this LinkedService to the hub type. +func (tr *LinkedService) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinkedService type. +func (tr *LinkedService) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SparkPool to the hub type. +func (tr *SparkPool) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SparkPool type. +func (tr *SparkPool) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this SQLPool to the hub type. +func (tr *SQLPool) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the SQLPool type. +func (tr *SQLPool) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this Workspace to the hub type. +func (tr *Workspace) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Workspace type. +func (tr *Workspace) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WorkspaceVulnerabilityAssessment to the hub type. +func (tr *WorkspaceVulnerabilityAssessment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WorkspaceVulnerabilityAssessment type. +func (tr *WorkspaceVulnerabilityAssessment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/synapse/v1beta1/zz_generated.resolvers.go b/apis/synapse/v1beta1/zz_generated.resolvers.go index 14770cb2f..de67009d2 100644 --- a/apis/synapse/v1beta1/zz_generated.resolvers.go +++ b/apis/synapse/v1beta1/zz_generated.resolvers.go @@ -28,7 +28,7 @@ func (mg *FirewallRule) ResolveReferences( // ResolveReferences of this Firewall var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -59,7 +59,7 @@ func (mg *IntegrationRuntimeAzure) ResolveReferences(ctx context.Context, c clie var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -90,7 +90,7 @@ func (mg *IntegrationRuntimeSelfHosted) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -195,7 +195,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -214,7 +214,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -233,7 +233,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien mg.Spec.ForProvider.TargetResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.TargetResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -252,7 +252,7 @@ func (mg *ManagedPrivateEndpoint) ResolveReferences(ctx context.Context, c clien mg.Spec.InitProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -314,7 +314,7 @@ func (mg *RoleAssignment) ResolveReferences(ctx context.Context, c client.Reader var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -333,7 +333,7 @@ func (mg *RoleAssignment) ResolveReferences(ctx context.Context, c client.Reader mg.Spec.ForProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -395,7 +395,7 @@ func (mg *SQLPoolExtendedAuditingPolicy) ResolveReferences(ctx context.Context, var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "SQLPool", "SQLPoolList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "SQLPool", "SQLPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -414,7 +414,7 @@ func (mg *SQLPoolExtendedAuditingPolicy) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.SQLPoolID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SQLPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -433,7 +433,7 @@ func (mg *SQLPoolExtendedAuditingPolicy) ResolveReferences(ctx context.Context, mg.Spec.ForProvider.StorageEndpoint = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageEndpointRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -464,7 +464,7 @@ func (mg *SQLPoolSecurityAlertPolicy) ResolveReferences(ctx context.Context, c c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "SQLPool", "SQLPoolList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "SQLPool", "SQLPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -483,7 +483,7 @@ func (mg *SQLPoolSecurityAlertPolicy) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.SQLPoolID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SQLPoolIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -502,7 +502,7 @@ func (mg *SQLPoolSecurityAlertPolicy) ResolveReferences(ctx context.Context, c c mg.Spec.ForProvider.StorageEndpoint = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageEndpointRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -564,7 +564,7 @@ func (mg *SQLPoolWorkloadGroup) ResolveReferences(ctx context.Context, c client. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "SQLPool", "SQLPoolList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "SQLPool", "SQLPoolList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -813,7 +813,7 @@ func (mg *WorkspaceAADAdmin) ResolveReferences(ctx context.Context, c client.Rea var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -844,7 +844,7 @@ func (mg *WorkspaceExtendedAuditingPolicy) ResolveReferences(ctx context.Context var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -863,7 +863,7 @@ func (mg *WorkspaceExtendedAuditingPolicy) ResolveReferences(ctx context.Context mg.Spec.ForProvider.StorageEndpoint = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageEndpointRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -882,7 +882,7 @@ func (mg *WorkspaceExtendedAuditingPolicy) ResolveReferences(ctx context.Context mg.Spec.ForProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -913,7 +913,7 @@ func (mg *WorkspaceSQLAADAdmin) ResolveReferences(ctx context.Context, c client. var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -932,7 +932,7 @@ func (mg *WorkspaceSQLAADAdmin) ResolveReferences(ctx context.Context, c client. mg.Spec.ForProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -963,7 +963,7 @@ func (mg *WorkspaceSecurityAlertPolicy) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -982,7 +982,7 @@ func (mg *WorkspaceSecurityAlertPolicy) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.StorageEndpoint = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.StorageEndpointRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "Workspace", "WorkspaceList") + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -1001,7 +1001,7 @@ func (mg *WorkspaceSecurityAlertPolicy) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "Account", "AccountList") + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/synapse/v1beta1/zz_integrationruntimeazure_types.go b/apis/synapse/v1beta1/zz_integrationruntimeazure_types.go index 1019cc8b9..be73ebb9a 100755 --- a/apis/synapse/v1beta1/zz_integrationruntimeazure_types.go +++ b/apis/synapse/v1beta1/zz_integrationruntimeazure_types.go @@ -74,7 +74,7 @@ type IntegrationRuntimeAzureParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // The Synapse Workspace ID in which to associate the Integration Runtime with. Changing this forces a new Synapse Azure Integration Runtime to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_integrationruntimeselfhosted_types.go b/apis/synapse/v1beta1/zz_integrationruntimeselfhosted_types.go index 461c841d1..8fd2f5461 100755 --- a/apis/synapse/v1beta1/zz_integrationruntimeselfhosted_types.go +++ b/apis/synapse/v1beta1/zz_integrationruntimeselfhosted_types.go @@ -44,7 +44,7 @@ type IntegrationRuntimeSelfHostedParameters struct { Description *string `json:"description,omitempty" tf:"description,omitempty"` // The Synapse Workspace ID in which to associate the Integration Runtime with. Changing this forces a new Synapse Self-hosted Integration Runtime to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_managedprivateendpoint_types.go b/apis/synapse/v1beta1/zz_managedprivateendpoint_types.go index a30c1178e..906cda70b 100755 --- a/apis/synapse/v1beta1/zz_managedprivateendpoint_types.go +++ b/apis/synapse/v1beta1/zz_managedprivateendpoint_types.go @@ -22,7 +22,7 @@ type ManagedPrivateEndpointInitParameters struct { SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` // The ID of the Synapse Workspace on which to create the Managed Private Endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` @@ -35,7 +35,7 @@ type ManagedPrivateEndpointInitParameters struct { SynapseWorkspaceIDSelector *v1.Selector `json:"synapseWorkspaceIdSelector,omitempty" tf:"-"` // The ID of the Private Link Enabled Remote Resource which this Synapse Private Endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` @@ -77,7 +77,7 @@ type ManagedPrivateEndpointParameters struct { SubresourceName *string `json:"subresourceName,omitempty" tf:"subresource_name,omitempty"` // The ID of the Synapse Workspace on which to create the Managed Private Endpoint. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` @@ -91,7 +91,7 @@ type ManagedPrivateEndpointParameters struct { SynapseWorkspaceIDSelector *v1.Selector `json:"synapseWorkspaceIdSelector,omitempty" tf:"-"` // The ID of the Private Link Enabled Remote Resource which this Synapse Private Endpoint should be connected to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional TargetResourceID *string `json:"targetResourceId,omitempty" tf:"target_resource_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_roleassignment_types.go b/apis/synapse/v1beta1/zz_roleassignment_types.go index 0195ab5d3..97d710407 100755 --- a/apis/synapse/v1beta1/zz_roleassignment_types.go +++ b/apis/synapse/v1beta1/zz_roleassignment_types.go @@ -28,7 +28,7 @@ type RoleAssignmentInitParameters struct { SynapseSparkPoolID *string `json:"synapseSparkPoolId,omitempty" tf:"synapse_spark_pool_id,omitempty"` // The Synapse Workspace which the Synapse Role Assignment applies to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` @@ -81,7 +81,7 @@ type RoleAssignmentParameters struct { SynapseSparkPoolID *string `json:"synapseSparkPoolId,omitempty" tf:"synapse_spark_pool_id,omitempty"` // The Synapse Workspace which the Synapse Role Assignment applies to. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_sqlpoolextendedauditingpolicy_types.go b/apis/synapse/v1beta1/zz_sqlpoolextendedauditingpolicy_types.go index 4fb174846..ad13b66ff 100755 --- a/apis/synapse/v1beta1/zz_sqlpoolextendedauditingpolicy_types.go +++ b/apis/synapse/v1beta1/zz_sqlpoolextendedauditingpolicy_types.go @@ -25,7 +25,7 @@ type SQLPoolExtendedAuditingPolicyInitParameters struct { StorageAccountAccessKeyIsSecondary *bool `json:"storageAccountAccessKeyIsSecondary,omitempty" tf:"storage_account_access_key_is_secondary,omitempty"` // The blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all extended auditing logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` @@ -70,7 +70,7 @@ type SQLPoolExtendedAuditingPolicyParameters struct { RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` // The ID of the Synapse SQL pool to set the extended auditing policy. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.SQLPool + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.SQLPool // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SQLPoolID *string `json:"sqlPoolId,omitempty" tf:"sql_pool_id,omitempty"` @@ -92,7 +92,7 @@ type SQLPoolExtendedAuditingPolicyParameters struct { StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` // The blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all extended auditing logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) // +kubebuilder:validation:Optional StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` diff --git a/apis/synapse/v1beta1/zz_sqlpoolsecurityalertpolicy_types.go b/apis/synapse/v1beta1/zz_sqlpoolsecurityalertpolicy_types.go index d09f83a1f..96696c979 100755 --- a/apis/synapse/v1beta1/zz_sqlpoolsecurityalertpolicy_types.go +++ b/apis/synapse/v1beta1/zz_sqlpoolsecurityalertpolicy_types.go @@ -33,7 +33,7 @@ type SQLPoolSecurityAlertPolicyInitParameters struct { RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` @@ -100,7 +100,7 @@ type SQLPoolSecurityAlertPolicyParameters struct { RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` // Specifies the ID of the Synapse SQL Pool. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.SQLPool + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.SQLPool // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SQLPoolID *string `json:"sqlPoolId,omitempty" tf:"sql_pool_id,omitempty"` @@ -118,7 +118,7 @@ type SQLPoolSecurityAlertPolicyParameters struct { StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) // +kubebuilder:validation:Optional StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` diff --git a/apis/synapse/v1beta1/zz_sqlpoolworkloadgroup_types.go b/apis/synapse/v1beta1/zz_sqlpoolworkloadgroup_types.go index ebcd187a9..1b38a1ad8 100755 --- a/apis/synapse/v1beta1/zz_sqlpoolworkloadgroup_types.go +++ b/apis/synapse/v1beta1/zz_sqlpoolworkloadgroup_types.go @@ -88,7 +88,7 @@ type SQLPoolWorkloadGroupParameters struct { QueryExecutionTimeoutInSeconds *float64 `json:"queryExecutionTimeoutInSeconds,omitempty" tf:"query_execution_timeout_in_seconds,omitempty"` // The ID of the Synapse SQL Pool. Changing this forces a new Synapse SQL Pool Workload Group to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.SQLPool + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.SQLPool // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SQLPoolID *string `json:"sqlPoolId,omitempty" tf:"sql_pool_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_workspaceaadadmin_types.go b/apis/synapse/v1beta1/zz_workspaceaadadmin_types.go index 44cec3dc5..7d504c514 100755 --- a/apis/synapse/v1beta1/zz_workspaceaadadmin_types.go +++ b/apis/synapse/v1beta1/zz_workspaceaadadmin_types.go @@ -52,7 +52,7 @@ type WorkspaceAADAdminParameters struct { ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` // The ID of the Synapse Workspace where the Azure AD Administrator should be configured. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_workspaceextendedauditingpolicy_types.go b/apis/synapse/v1beta1/zz_workspaceextendedauditingpolicy_types.go index 41f0d86c0..7096596b5 100755 --- a/apis/synapse/v1beta1/zz_workspaceextendedauditingpolicy_types.go +++ b/apis/synapse/v1beta1/zz_workspaceextendedauditingpolicy_types.go @@ -25,7 +25,7 @@ type WorkspaceExtendedAuditingPolicyInitParameters struct { StorageAccountAccessKeyIsSecondary *bool `json:"storageAccountAccessKeyIsSecondary,omitempty" tf:"storage_account_access_key_is_secondary,omitempty"` // The blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all extended auditing logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` @@ -78,7 +78,7 @@ type WorkspaceExtendedAuditingPolicyParameters struct { StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` // The blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all extended auditing logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) // +kubebuilder:validation:Optional StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` @@ -92,7 +92,7 @@ type WorkspaceExtendedAuditingPolicyParameters struct { StorageEndpointSelector *v1.Selector `json:"storageEndpointSelector,omitempty" tf:"-"` // The ID of the Synapse workspace to set the extended auditing policy. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_workspacesecurityalertpolicy_types.go b/apis/synapse/v1beta1/zz_workspacesecurityalertpolicy_types.go index 1f53164c8..0e3e29d03 100755 --- a/apis/synapse/v1beta1/zz_workspacesecurityalertpolicy_types.go +++ b/apis/synapse/v1beta1/zz_workspacesecurityalertpolicy_types.go @@ -33,7 +33,7 @@ type WorkspaceSecurityAlertPolicyInitParameters struct { RetentionDays *float64 `json:"retentionDays,omitempty" tf:"retention_days,omitempty"` // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` @@ -104,7 +104,7 @@ type WorkspaceSecurityAlertPolicyParameters struct { StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` // Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). This blob storage will hold all Threat Detection audit logs. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.Account + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("primary_blob_endpoint",true) // +kubebuilder:validation:Optional StorageEndpoint *string `json:"storageEndpoint,omitempty" tf:"storage_endpoint,omitempty"` @@ -118,7 +118,7 @@ type WorkspaceSecurityAlertPolicyParameters struct { StorageEndpointSelector *v1.Selector `json:"storageEndpointSelector,omitempty" tf:"-"` // Specifies the ID of the Synapse Workspace. Changing this forces a new resource to be created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` diff --git a/apis/synapse/v1beta1/zz_workspacesqlaadadmin_types.go b/apis/synapse/v1beta1/zz_workspacesqlaadadmin_types.go index 97c0c6372..b4175ff42 100755 --- a/apis/synapse/v1beta1/zz_workspacesqlaadadmin_types.go +++ b/apis/synapse/v1beta1/zz_workspacesqlaadadmin_types.go @@ -22,7 +22,7 @@ type WorkspaceSQLAADAdminInitParameters struct { ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` // The ID of the Synapse Workspace where the Azure AD Administrator should be configured. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` @@ -65,7 +65,7 @@ type WorkspaceSQLAADAdminParameters struct { ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` // The ID of the Synapse Workspace where the Azure AD Administrator should be configured. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.Workspace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` diff --git a/apis/synapse/v1beta2/zz_generated.conversion_hubs.go b/apis/synapse/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..b41339b34 --- /dev/null +++ b/apis/synapse/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *LinkedService) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SparkPool) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *SQLPool) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *Workspace) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WorkspaceVulnerabilityAssessment) Hub() {} diff --git a/apis/synapse/v1beta2/zz_generated.deepcopy.go b/apis/synapse/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..6076789b3 --- /dev/null +++ b/apis/synapse/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,3237 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AADAdminInitParameters) DeepCopyInto(out *AADAdminInitParameters) { + *out = *in + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AADAdminInitParameters. +func (in *AADAdminInitParameters) DeepCopy() *AADAdminInitParameters { + if in == nil { + return nil + } + out := new(AADAdminInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AADAdminObservation) DeepCopyInto(out *AADAdminObservation) { + *out = *in + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AADAdminObservation. +func (in *AADAdminObservation) DeepCopy() *AADAdminObservation { + if in == nil { + return nil + } + out := new(AADAdminObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AADAdminParameters) DeepCopyInto(out *AADAdminParameters) { + *out = *in + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AADAdminParameters. +func (in *AADAdminParameters) DeepCopy() *AADAdminParameters { + if in == nil { + return nil + } + out := new(AADAdminParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoPauseInitParameters) DeepCopyInto(out *AutoPauseInitParameters) { + *out = *in + if in.DelayInMinutes != nil { + in, out := &in.DelayInMinutes, &out.DelayInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoPauseInitParameters. +func (in *AutoPauseInitParameters) DeepCopy() *AutoPauseInitParameters { + if in == nil { + return nil + } + out := new(AutoPauseInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoPauseObservation) DeepCopyInto(out *AutoPauseObservation) { + *out = *in + if in.DelayInMinutes != nil { + in, out := &in.DelayInMinutes, &out.DelayInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoPauseObservation. +func (in *AutoPauseObservation) DeepCopy() *AutoPauseObservation { + if in == nil { + return nil + } + out := new(AutoPauseObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoPauseParameters) DeepCopyInto(out *AutoPauseParameters) { + *out = *in + if in.DelayInMinutes != nil { + in, out := &in.DelayInMinutes, &out.DelayInMinutes + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoPauseParameters. +func (in *AutoPauseParameters) DeepCopy() *AutoPauseParameters { + if in == nil { + return nil + } + out := new(AutoPauseParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleInitParameters) DeepCopyInto(out *AutoScaleInitParameters) { + *out = *in + if in.MaxNodeCount != nil { + in, out := &in.MaxNodeCount, &out.MaxNodeCount + *out = new(float64) + **out = **in + } + if in.MinNodeCount != nil { + in, out := &in.MinNodeCount, &out.MinNodeCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleInitParameters. +func (in *AutoScaleInitParameters) DeepCopy() *AutoScaleInitParameters { + if in == nil { + return nil + } + out := new(AutoScaleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleObservation) DeepCopyInto(out *AutoScaleObservation) { + *out = *in + if in.MaxNodeCount != nil { + in, out := &in.MaxNodeCount, &out.MaxNodeCount + *out = new(float64) + **out = **in + } + if in.MinNodeCount != nil { + in, out := &in.MinNodeCount, &out.MinNodeCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleObservation. +func (in *AutoScaleObservation) DeepCopy() *AutoScaleObservation { + if in == nil { + return nil + } + out := new(AutoScaleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaleParameters) DeepCopyInto(out *AutoScaleParameters) { + *out = *in + if in.MaxNodeCount != nil { + in, out := &in.MaxNodeCount, &out.MaxNodeCount + *out = new(float64) + **out = **in + } + if in.MinNodeCount != nil { + in, out := &in.MinNodeCount, &out.MinNodeCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaleParameters. +func (in *AutoScaleParameters) DeepCopy() *AutoScaleParameters { + if in == nil { + return nil + } + out := new(AutoScaleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDevopsRepoInitParameters) DeepCopyInto(out *AzureDevopsRepoInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.LastCommitID != nil { + in, out := &in.LastCommitID, &out.LastCommitID + *out = new(string) + **out = **in + } + if in.ProjectName != nil { + in, out := &in.ProjectName, &out.ProjectName + *out = new(string) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDevopsRepoInitParameters. +func (in *AzureDevopsRepoInitParameters) DeepCopy() *AzureDevopsRepoInitParameters { + if in == nil { + return nil + } + out := new(AzureDevopsRepoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDevopsRepoObservation) DeepCopyInto(out *AzureDevopsRepoObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.LastCommitID != nil { + in, out := &in.LastCommitID, &out.LastCommitID + *out = new(string) + **out = **in + } + if in.ProjectName != nil { + in, out := &in.ProjectName, &out.ProjectName + *out = new(string) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDevopsRepoObservation. +func (in *AzureDevopsRepoObservation) DeepCopy() *AzureDevopsRepoObservation { + if in == nil { + return nil + } + out := new(AzureDevopsRepoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDevopsRepoParameters) DeepCopyInto(out *AzureDevopsRepoParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.LastCommitID != nil { + in, out := &in.LastCommitID, &out.LastCommitID + *out = new(string) + **out = **in + } + if in.ProjectName != nil { + in, out := &in.ProjectName, &out.ProjectName + *out = new(string) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDevopsRepoParameters. +func (in *AzureDevopsRepoParameters) DeepCopy() *AzureDevopsRepoParameters { + if in == nil { + return nil + } + out := new(AzureDevopsRepoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyInitParameters) DeepCopyInto(out *CustomerManagedKeyInitParameters) { + *out = *in + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.KeyVersionlessID != nil { + in, out := &in.KeyVersionlessID, &out.KeyVersionlessID + *out = new(string) + **out = **in + } + if in.KeyVersionlessIDRef != nil { + in, out := &in.KeyVersionlessIDRef, &out.KeyVersionlessIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVersionlessIDSelector != nil { + in, out := &in.KeyVersionlessIDSelector, &out.KeyVersionlessIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyInitParameters. +func (in *CustomerManagedKeyInitParameters) DeepCopy() *CustomerManagedKeyInitParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyObservation) DeepCopyInto(out *CustomerManagedKeyObservation) { + *out = *in + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.KeyVersionlessID != nil { + in, out := &in.KeyVersionlessID, &out.KeyVersionlessID + *out = new(string) + **out = **in + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyObservation. +func (in *CustomerManagedKeyObservation) DeepCopy() *CustomerManagedKeyObservation { + if in == nil { + return nil + } + out := new(CustomerManagedKeyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKeyParameters) DeepCopyInto(out *CustomerManagedKeyParameters) { + *out = *in + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.KeyVersionlessID != nil { + in, out := &in.KeyVersionlessID, &out.KeyVersionlessID + *out = new(string) + **out = **in + } + if in.KeyVersionlessIDRef != nil { + in, out := &in.KeyVersionlessIDRef, &out.KeyVersionlessIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.KeyVersionlessIDSelector != nil { + in, out := &in.KeyVersionlessIDSelector, &out.KeyVersionlessIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.UserAssignedIdentityID != nil { + in, out := &in.UserAssignedIdentityID, &out.UserAssignedIdentityID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKeyParameters. +func (in *CustomerManagedKeyParameters) DeepCopy() *CustomerManagedKeyParameters { + if in == nil { + return nil + } + out := new(CustomerManagedKeyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubRepoInitParameters) DeepCopyInto(out *GithubRepoInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.GitURL != nil { + in, out := &in.GitURL, &out.GitURL + *out = new(string) + **out = **in + } + if in.LastCommitID != nil { + in, out := &in.LastCommitID, &out.LastCommitID + *out = new(string) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubRepoInitParameters. +func (in *GithubRepoInitParameters) DeepCopy() *GithubRepoInitParameters { + if in == nil { + return nil + } + out := new(GithubRepoInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubRepoObservation) DeepCopyInto(out *GithubRepoObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.GitURL != nil { + in, out := &in.GitURL, &out.GitURL + *out = new(string) + **out = **in + } + if in.LastCommitID != nil { + in, out := &in.LastCommitID, &out.LastCommitID + *out = new(string) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubRepoObservation. +func (in *GithubRepoObservation) DeepCopy() *GithubRepoObservation { + if in == nil { + return nil + } + out := new(GithubRepoObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubRepoParameters) DeepCopyInto(out *GithubRepoParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.BranchName != nil { + in, out := &in.BranchName, &out.BranchName + *out = new(string) + **out = **in + } + if in.GitURL != nil { + in, out := &in.GitURL, &out.GitURL + *out = new(string) + **out = **in + } + if in.LastCommitID != nil { + in, out := &in.LastCommitID, &out.LastCommitID + *out = new(string) + **out = **in + } + if in.RepositoryName != nil { + in, out := &in.RepositoryName, &out.RepositoryName + *out = new(string) + **out = **in + } + if in.RootFolder != nil { + in, out := &in.RootFolder, &out.RootFolder + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubRepoParameters. +func (in *GithubRepoParameters) DeepCopy() *GithubRepoParameters { + if in == nil { + return nil + } + out := new(GithubRepoParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeInitParameters) DeepCopyInto(out *IntegrationRuntimeInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeInitParameters. +func (in *IntegrationRuntimeInitParameters) DeepCopy() *IntegrationRuntimeInitParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeObservation) DeepCopyInto(out *IntegrationRuntimeObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeObservation. +func (in *IntegrationRuntimeObservation) DeepCopy() *IntegrationRuntimeObservation { + if in == nil { + return nil + } + out := new(IntegrationRuntimeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationRuntimeParameters) DeepCopyInto(out *IntegrationRuntimeParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationRuntimeParameters. +func (in *IntegrationRuntimeParameters) DeepCopy() *IntegrationRuntimeParameters { + if in == nil { + return nil + } + out := new(IntegrationRuntimeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LibraryRequirementInitParameters) DeepCopyInto(out *LibraryRequirementInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LibraryRequirementInitParameters. +func (in *LibraryRequirementInitParameters) DeepCopy() *LibraryRequirementInitParameters { + if in == nil { + return nil + } + out := new(LibraryRequirementInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LibraryRequirementObservation) DeepCopyInto(out *LibraryRequirementObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LibraryRequirementObservation. +func (in *LibraryRequirementObservation) DeepCopy() *LibraryRequirementObservation { + if in == nil { + return nil + } + out := new(LibraryRequirementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LibraryRequirementParameters) DeepCopyInto(out *LibraryRequirementParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LibraryRequirementParameters. +func (in *LibraryRequirementParameters) DeepCopy() *LibraryRequirementParameters { + if in == nil { + return nil + } + out := new(LibraryRequirementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedService) DeepCopyInto(out *LinkedService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedService. +func (in *LinkedService) DeepCopy() *LinkedService { + if in == nil { + return nil + } + out := new(LinkedService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceInitParameters) DeepCopyInto(out *LinkedServiceInitParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntime != nil { + in, out := &in.IntegrationRuntime, &out.IntegrationRuntime + *out = new(IntegrationRuntimeInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceInitParameters. +func (in *LinkedServiceInitParameters) DeepCopy() *LinkedServiceInitParameters { + if in == nil { + return nil + } + out := new(LinkedServiceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceList) DeepCopyInto(out *LinkedServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinkedService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceList. +func (in *LinkedServiceList) DeepCopy() *LinkedServiceList { + if in == nil { + return nil + } + out := new(LinkedServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinkedServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceObservation) DeepCopyInto(out *LinkedServiceObservation) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IntegrationRuntime != nil { + in, out := &in.IntegrationRuntime, &out.IntegrationRuntime + *out = new(IntegrationRuntimeObservation) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SynapseWorkspaceID != nil { + in, out := &in.SynapseWorkspaceID, &out.SynapseWorkspaceID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceObservation. +func (in *LinkedServiceObservation) DeepCopy() *LinkedServiceObservation { + if in == nil { + return nil + } + out := new(LinkedServiceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceParameters) DeepCopyInto(out *LinkedServiceParameters) { + *out = *in + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.IntegrationRuntime != nil { + in, out := &in.IntegrationRuntime, &out.IntegrationRuntime + *out = new(IntegrationRuntimeParameters) + (*in).DeepCopyInto(*out) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.SynapseWorkspaceID != nil { + in, out := &in.SynapseWorkspaceID, &out.SynapseWorkspaceID + *out = new(string) + **out = **in + } + if in.SynapseWorkspaceIDRef != nil { + in, out := &in.SynapseWorkspaceIDRef, &out.SynapseWorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SynapseWorkspaceIDSelector != nil { + in, out := &in.SynapseWorkspaceIDSelector, &out.SynapseWorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.TypePropertiesJSON != nil { + in, out := &in.TypePropertiesJSON, &out.TypePropertiesJSON + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceParameters. +func (in *LinkedServiceParameters) DeepCopy() *LinkedServiceParameters { + if in == nil { + return nil + } + out := new(LinkedServiceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceSpec) DeepCopyInto(out *LinkedServiceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceSpec. +func (in *LinkedServiceSpec) DeepCopy() *LinkedServiceSpec { + if in == nil { + return nil + } + out := new(LinkedServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinkedServiceStatus) DeepCopyInto(out *LinkedServiceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkedServiceStatus. +func (in *LinkedServiceStatus) DeepCopy() *LinkedServiceStatus { + if in == nil { + return nil + } + out := new(LinkedServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurringScansInitParameters) DeepCopyInto(out *RecurringScansInitParameters) { + *out = *in + if in.EmailSubscriptionAdminsEnabled != nil { + in, out := &in.EmailSubscriptionAdminsEnabled, &out.EmailSubscriptionAdminsEnabled + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurringScansInitParameters. +func (in *RecurringScansInitParameters) DeepCopy() *RecurringScansInitParameters { + if in == nil { + return nil + } + out := new(RecurringScansInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurringScansObservation) DeepCopyInto(out *RecurringScansObservation) { + *out = *in + if in.EmailSubscriptionAdminsEnabled != nil { + in, out := &in.EmailSubscriptionAdminsEnabled, &out.EmailSubscriptionAdminsEnabled + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurringScansObservation. +func (in *RecurringScansObservation) DeepCopy() *RecurringScansObservation { + if in == nil { + return nil + } + out := new(RecurringScansObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecurringScansParameters) DeepCopyInto(out *RecurringScansParameters) { + *out = *in + if in.EmailSubscriptionAdminsEnabled != nil { + in, out := &in.EmailSubscriptionAdminsEnabled, &out.EmailSubscriptionAdminsEnabled + *out = new(bool) + **out = **in + } + if in.Emails != nil { + in, out := &in.Emails, &out.Emails + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurringScansParameters. +func (in *RecurringScansParameters) DeepCopy() *RecurringScansParameters { + if in == nil { + return nil + } + out := new(RecurringScansParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreInitParameters) DeepCopyInto(out *RestoreInitParameters) { + *out = *in + if in.PointInTime != nil { + in, out := &in.PointInTime, &out.PointInTime + *out = new(string) + **out = **in + } + if in.SourceDatabaseID != nil { + in, out := &in.SourceDatabaseID, &out.SourceDatabaseID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreInitParameters. +func (in *RestoreInitParameters) DeepCopy() *RestoreInitParameters { + if in == nil { + return nil + } + out := new(RestoreInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreObservation) DeepCopyInto(out *RestoreObservation) { + *out = *in + if in.PointInTime != nil { + in, out := &in.PointInTime, &out.PointInTime + *out = new(string) + **out = **in + } + if in.SourceDatabaseID != nil { + in, out := &in.SourceDatabaseID, &out.SourceDatabaseID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreObservation. +func (in *RestoreObservation) DeepCopy() *RestoreObservation { + if in == nil { + return nil + } + out := new(RestoreObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreParameters) DeepCopyInto(out *RestoreParameters) { + *out = *in + if in.PointInTime != nil { + in, out := &in.PointInTime, &out.PointInTime + *out = new(string) + **out = **in + } + if in.SourceDatabaseID != nil { + in, out := &in.SourceDatabaseID, &out.SourceDatabaseID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreParameters. +func (in *RestoreParameters) DeepCopy() *RestoreParameters { + if in == nil { + return nil + } + out := new(RestoreParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLAADAdminInitParameters) DeepCopyInto(out *SQLAADAdminInitParameters) { + *out = *in + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLAADAdminInitParameters. +func (in *SQLAADAdminInitParameters) DeepCopy() *SQLAADAdminInitParameters { + if in == nil { + return nil + } + out := new(SQLAADAdminInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLAADAdminObservation) DeepCopyInto(out *SQLAADAdminObservation) { + *out = *in + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLAADAdminObservation. +func (in *SQLAADAdminObservation) DeepCopy() *SQLAADAdminObservation { + if in == nil { + return nil + } + out := new(SQLAADAdminObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLAADAdminParameters) DeepCopyInto(out *SQLAADAdminParameters) { + *out = *in + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(string) + **out = **in + } + if in.ObjectID != nil { + in, out := &in.ObjectID, &out.ObjectID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLAADAdminParameters. +func (in *SQLAADAdminParameters) DeepCopy() *SQLAADAdminParameters { + if in == nil { + return nil + } + out := new(SQLAADAdminParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLPool) DeepCopyInto(out *SQLPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLPool. +func (in *SQLPool) DeepCopy() *SQLPool { + if in == nil { + return nil + } + out := new(SQLPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLPoolInitParameters) DeepCopyInto(out *SQLPoolInitParameters) { + *out = *in + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.DataEncrypted != nil { + in, out := &in.DataEncrypted, &out.DataEncrypted + *out = new(bool) + **out = **in + } + if in.GeoBackupPolicyEnabled != nil { + in, out := &in.GeoBackupPolicyEnabled, &out.GeoBackupPolicyEnabled + *out = new(bool) + **out = **in + } + if in.RecoveryDatabaseID != nil { + in, out := &in.RecoveryDatabaseID, &out.RecoveryDatabaseID + *out = new(string) + **out = **in + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(RestoreInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLPoolInitParameters. +func (in *SQLPoolInitParameters) DeepCopy() *SQLPoolInitParameters { + if in == nil { + return nil + } + out := new(SQLPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLPoolList) DeepCopyInto(out *SQLPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SQLPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLPoolList. +func (in *SQLPoolList) DeepCopy() *SQLPoolList { + if in == nil { + return nil + } + out := new(SQLPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SQLPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLPoolObservation) DeepCopyInto(out *SQLPoolObservation) { + *out = *in + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.DataEncrypted != nil { + in, out := &in.DataEncrypted, &out.DataEncrypted + *out = new(bool) + **out = **in + } + if in.GeoBackupPolicyEnabled != nil { + in, out := &in.GeoBackupPolicyEnabled, &out.GeoBackupPolicyEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RecoveryDatabaseID != nil { + in, out := &in.RecoveryDatabaseID, &out.RecoveryDatabaseID + *out = new(string) + **out = **in + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(RestoreObservation) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.SynapseWorkspaceID != nil { + in, out := &in.SynapseWorkspaceID, &out.SynapseWorkspaceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLPoolObservation. +func (in *SQLPoolObservation) DeepCopy() *SQLPoolObservation { + if in == nil { + return nil + } + out := new(SQLPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLPoolParameters) DeepCopyInto(out *SQLPoolParameters) { + *out = *in + if in.Collation != nil { + in, out := &in.Collation, &out.Collation + *out = new(string) + **out = **in + } + if in.CreateMode != nil { + in, out := &in.CreateMode, &out.CreateMode + *out = new(string) + **out = **in + } + if in.DataEncrypted != nil { + in, out := &in.DataEncrypted, &out.DataEncrypted + *out = new(bool) + **out = **in + } + if in.GeoBackupPolicyEnabled != nil { + in, out := &in.GeoBackupPolicyEnabled, &out.GeoBackupPolicyEnabled + *out = new(bool) + **out = **in + } + if in.RecoveryDatabaseID != nil { + in, out := &in.RecoveryDatabaseID, &out.RecoveryDatabaseID + *out = new(string) + **out = **in + } + if in.Restore != nil { + in, out := &in.Restore, &out.Restore + *out = new(RestoreParameters) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.StorageAccountType != nil { + in, out := &in.StorageAccountType, &out.StorageAccountType + *out = new(string) + **out = **in + } + if in.SynapseWorkspaceID != nil { + in, out := &in.SynapseWorkspaceID, &out.SynapseWorkspaceID + *out = new(string) + **out = **in + } + if in.SynapseWorkspaceIDRef != nil { + in, out := &in.SynapseWorkspaceIDRef, &out.SynapseWorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SynapseWorkspaceIDSelector != nil { + in, out := &in.SynapseWorkspaceIDSelector, &out.SynapseWorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLPoolParameters. +func (in *SQLPoolParameters) DeepCopy() *SQLPoolParameters { + if in == nil { + return nil + } + out := new(SQLPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLPoolSpec) DeepCopyInto(out *SQLPoolSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLPoolSpec. +func (in *SQLPoolSpec) DeepCopy() *SQLPoolSpec { + if in == nil { + return nil + } + out := new(SQLPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLPoolStatus) DeepCopyInto(out *SQLPoolStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLPoolStatus. +func (in *SQLPoolStatus) DeepCopy() *SQLPoolStatus { + if in == nil { + return nil + } + out := new(SQLPoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkConfigInitParameters) DeepCopyInto(out *SparkConfigInitParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConfigInitParameters. +func (in *SparkConfigInitParameters) DeepCopy() *SparkConfigInitParameters { + if in == nil { + return nil + } + out := new(SparkConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkConfigObservation) DeepCopyInto(out *SparkConfigObservation) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConfigObservation. +func (in *SparkConfigObservation) DeepCopy() *SparkConfigObservation { + if in == nil { + return nil + } + out := new(SparkConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkConfigParameters) DeepCopyInto(out *SparkConfigParameters) { + *out = *in + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.Filename != nil { + in, out := &in.Filename, &out.Filename + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConfigParameters. +func (in *SparkConfigParameters) DeepCopy() *SparkConfigParameters { + if in == nil { + return nil + } + out := new(SparkConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPool) DeepCopyInto(out *SparkPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPool. +func (in *SparkPool) DeepCopy() *SparkPool { + if in == nil { + return nil + } + out := new(SparkPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SparkPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPoolInitParameters) DeepCopyInto(out *SparkPoolInitParameters) { + *out = *in + if in.AutoPause != nil { + in, out := &in.AutoPause, &out.AutoPause + *out = new(AutoPauseInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = new(AutoScaleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(float64) + **out = **in + } + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.DynamicExecutorAllocationEnabled != nil { + in, out := &in.DynamicExecutorAllocationEnabled, &out.DynamicExecutorAllocationEnabled + *out = new(bool) + **out = **in + } + if in.LibraryRequirement != nil { + in, out := &in.LibraryRequirement, &out.LibraryRequirement + *out = new(LibraryRequirementInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxExecutors != nil { + in, out := &in.MaxExecutors, &out.MaxExecutors + *out = new(float64) + **out = **in + } + if in.MinExecutors != nil { + in, out := &in.MinExecutors, &out.MinExecutors + *out = new(float64) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NodeSizeFamily != nil { + in, out := &in.NodeSizeFamily, &out.NodeSizeFamily + *out = new(string) + **out = **in + } + if in.SessionLevelPackagesEnabled != nil { + in, out := &in.SessionLevelPackagesEnabled, &out.SessionLevelPackagesEnabled + *out = new(bool) + **out = **in + } + if in.SparkConfig != nil { + in, out := &in.SparkConfig, &out.SparkConfig + *out = new(SparkConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SparkEventsFolder != nil { + in, out := &in.SparkEventsFolder, &out.SparkEventsFolder + *out = new(string) + **out = **in + } + if in.SparkLogFolder != nil { + in, out := &in.SparkLogFolder, &out.SparkLogFolder + *out = new(string) + **out = **in + } + if in.SparkVersion != nil { + in, out := &in.SparkVersion, &out.SparkVersion + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPoolInitParameters. +func (in *SparkPoolInitParameters) DeepCopy() *SparkPoolInitParameters { + if in == nil { + return nil + } + out := new(SparkPoolInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPoolList) DeepCopyInto(out *SparkPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SparkPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPoolList. +func (in *SparkPoolList) DeepCopy() *SparkPoolList { + if in == nil { + return nil + } + out := new(SparkPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SparkPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPoolObservation) DeepCopyInto(out *SparkPoolObservation) { + *out = *in + if in.AutoPause != nil { + in, out := &in.AutoPause, &out.AutoPause + *out = new(AutoPauseObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = new(AutoScaleObservation) + (*in).DeepCopyInto(*out) + } + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(float64) + **out = **in + } + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.DynamicExecutorAllocationEnabled != nil { + in, out := &in.DynamicExecutorAllocationEnabled, &out.DynamicExecutorAllocationEnabled + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.LibraryRequirement != nil { + in, out := &in.LibraryRequirement, &out.LibraryRequirement + *out = new(LibraryRequirementObservation) + (*in).DeepCopyInto(*out) + } + if in.MaxExecutors != nil { + in, out := &in.MaxExecutors, &out.MaxExecutors + *out = new(float64) + **out = **in + } + if in.MinExecutors != nil { + in, out := &in.MinExecutors, &out.MinExecutors + *out = new(float64) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NodeSizeFamily != nil { + in, out := &in.NodeSizeFamily, &out.NodeSizeFamily + *out = new(string) + **out = **in + } + if in.SessionLevelPackagesEnabled != nil { + in, out := &in.SessionLevelPackagesEnabled, &out.SessionLevelPackagesEnabled + *out = new(bool) + **out = **in + } + if in.SparkConfig != nil { + in, out := &in.SparkConfig, &out.SparkConfig + *out = new(SparkConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SparkEventsFolder != nil { + in, out := &in.SparkEventsFolder, &out.SparkEventsFolder + *out = new(string) + **out = **in + } + if in.SparkLogFolder != nil { + in, out := &in.SparkLogFolder, &out.SparkLogFolder + *out = new(string) + **out = **in + } + if in.SparkVersion != nil { + in, out := &in.SparkVersion, &out.SparkVersion + *out = new(string) + **out = **in + } + if in.SynapseWorkspaceID != nil { + in, out := &in.SynapseWorkspaceID, &out.SynapseWorkspaceID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPoolObservation. +func (in *SparkPoolObservation) DeepCopy() *SparkPoolObservation { + if in == nil { + return nil + } + out := new(SparkPoolObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPoolParameters) DeepCopyInto(out *SparkPoolParameters) { + *out = *in + if in.AutoPause != nil { + in, out := &in.AutoPause, &out.AutoPause + *out = new(AutoPauseParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoScale != nil { + in, out := &in.AutoScale, &out.AutoScale + *out = new(AutoScaleParameters) + (*in).DeepCopyInto(*out) + } + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(float64) + **out = **in + } + if in.ComputeIsolationEnabled != nil { + in, out := &in.ComputeIsolationEnabled, &out.ComputeIsolationEnabled + *out = new(bool) + **out = **in + } + if in.DynamicExecutorAllocationEnabled != nil { + in, out := &in.DynamicExecutorAllocationEnabled, &out.DynamicExecutorAllocationEnabled + *out = new(bool) + **out = **in + } + if in.LibraryRequirement != nil { + in, out := &in.LibraryRequirement, &out.LibraryRequirement + *out = new(LibraryRequirementParameters) + (*in).DeepCopyInto(*out) + } + if in.MaxExecutors != nil { + in, out := &in.MaxExecutors, &out.MaxExecutors + *out = new(float64) + **out = **in + } + if in.MinExecutors != nil { + in, out := &in.MinExecutors, &out.MinExecutors + *out = new(float64) + **out = **in + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(float64) + **out = **in + } + if in.NodeSize != nil { + in, out := &in.NodeSize, &out.NodeSize + *out = new(string) + **out = **in + } + if in.NodeSizeFamily != nil { + in, out := &in.NodeSizeFamily, &out.NodeSizeFamily + *out = new(string) + **out = **in + } + if in.SessionLevelPackagesEnabled != nil { + in, out := &in.SessionLevelPackagesEnabled, &out.SessionLevelPackagesEnabled + *out = new(bool) + **out = **in + } + if in.SparkConfig != nil { + in, out := &in.SparkConfig, &out.SparkConfig + *out = new(SparkConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SparkEventsFolder != nil { + in, out := &in.SparkEventsFolder, &out.SparkEventsFolder + *out = new(string) + **out = **in + } + if in.SparkLogFolder != nil { + in, out := &in.SparkLogFolder, &out.SparkLogFolder + *out = new(string) + **out = **in + } + if in.SparkVersion != nil { + in, out := &in.SparkVersion, &out.SparkVersion + *out = new(string) + **out = **in + } + if in.SynapseWorkspaceID != nil { + in, out := &in.SynapseWorkspaceID, &out.SynapseWorkspaceID + *out = new(string) + **out = **in + } + if in.SynapseWorkspaceIDRef != nil { + in, out := &in.SynapseWorkspaceIDRef, &out.SynapseWorkspaceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.SynapseWorkspaceIDSelector != nil { + in, out := &in.SynapseWorkspaceIDSelector, &out.SynapseWorkspaceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPoolParameters. +func (in *SparkPoolParameters) DeepCopy() *SparkPoolParameters { + if in == nil { + return nil + } + out := new(SparkPoolParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPoolSpec) DeepCopyInto(out *SparkPoolSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPoolSpec. +func (in *SparkPoolSpec) DeepCopy() *SparkPoolSpec { + if in == nil { + return nil + } + out := new(SparkPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkPoolStatus) DeepCopyInto(out *SparkPoolStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPoolStatus. +func (in *SparkPoolStatus) DeepCopy() *SparkPoolStatus { + if in == nil { + return nil + } + out := new(SparkPoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceInitParameters) DeepCopyInto(out *WorkspaceInitParameters) { + *out = *in + if in.AADAdmin != nil { + in, out := &in.AADAdmin, &out.AADAdmin + *out = make([]AADAdminInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureDevopsRepo != nil { + in, out := &in.AzureDevopsRepo, &out.AzureDevopsRepo + *out = new(AzureDevopsRepoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AzureadAuthenticationOnly != nil { + in, out := &in.AzureadAuthenticationOnly, &out.AzureadAuthenticationOnly + *out = new(bool) + **out = **in + } + if in.ComputeSubnetID != nil { + in, out := &in.ComputeSubnetID, &out.ComputeSubnetID + *out = new(string) + **out = **in + } + if in.ComputeSubnetIDRef != nil { + in, out := &in.ComputeSubnetIDRef, &out.ComputeSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ComputeSubnetIDSelector != nil { + in, out := &in.ComputeSubnetIDSelector, &out.ComputeSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DataExfiltrationProtectionEnabled != nil { + in, out := &in.DataExfiltrationProtectionEnabled, &out.DataExfiltrationProtectionEnabled + *out = new(bool) + **out = **in + } + if in.GithubRepo != nil { + in, out := &in.GithubRepo, &out.GithubRepo + *out = new(GithubRepoInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkingAllowedForAADTenantIds != nil { + in, out := &in.LinkingAllowedForAADTenantIds, &out.LinkingAllowedForAADTenantIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupNameRef != nil { + in, out := &in.ManagedResourceGroupNameRef, &out.ManagedResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedResourceGroupNameSelector != nil { + in, out := &in.ManagedResourceGroupNameSelector, &out.ManagedResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ManagedVirtualNetworkEnabled != nil { + in, out := &in.ManagedVirtualNetworkEnabled, &out.ManagedVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurviewID != nil { + in, out := &in.PurviewID, &out.PurviewID + *out = new(string) + **out = **in + } + if in.SQLAADAdmin != nil { + in, out := &in.SQLAADAdmin, &out.SQLAADAdmin + *out = make([]SQLAADAdminInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQLAdministratorLogin != nil { + in, out := &in.SQLAdministratorLogin, &out.SQLAdministratorLogin + *out = new(string) + **out = **in + } + if in.SQLIdentityControlEnabled != nil { + in, out := &in.SQLIdentityControlEnabled, &out.SQLIdentityControlEnabled + *out = new(bool) + **out = **in + } + if in.StorageDataLakeGen2FileSystemID != nil { + in, out := &in.StorageDataLakeGen2FileSystemID, &out.StorageDataLakeGen2FileSystemID + *out = new(string) + **out = **in + } + if in.StorageDataLakeGen2FileSystemIDRef != nil { + in, out := &in.StorageDataLakeGen2FileSystemIDRef, &out.StorageDataLakeGen2FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageDataLakeGen2FileSystemIDSelector != nil { + in, out := &in.StorageDataLakeGen2FileSystemIDSelector, &out.StorageDataLakeGen2FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceInitParameters. +func (in *WorkspaceInitParameters) DeepCopy() *WorkspaceInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. +func (in *WorkspaceList) DeepCopy() *WorkspaceList { + if in == nil { + return nil + } + out := new(WorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceObservation) DeepCopyInto(out *WorkspaceObservation) { + *out = *in + if in.AADAdmin != nil { + in, out := &in.AADAdmin, &out.AADAdmin + *out = make([]AADAdminObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureDevopsRepo != nil { + in, out := &in.AzureDevopsRepo, &out.AzureDevopsRepo + *out = new(AzureDevopsRepoObservation) + (*in).DeepCopyInto(*out) + } + if in.AzureadAuthenticationOnly != nil { + in, out := &in.AzureadAuthenticationOnly, &out.AzureadAuthenticationOnly + *out = new(bool) + **out = **in + } + if in.ComputeSubnetID != nil { + in, out := &in.ComputeSubnetID, &out.ComputeSubnetID + *out = new(string) + **out = **in + } + if in.ConnectivityEndpoints != nil { + in, out := &in.ConnectivityEndpoints, &out.ConnectivityEndpoints + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyObservation) + (*in).DeepCopyInto(*out) + } + if in.DataExfiltrationProtectionEnabled != nil { + in, out := &in.DataExfiltrationProtectionEnabled, &out.DataExfiltrationProtectionEnabled + *out = new(bool) + **out = **in + } + if in.GithubRepo != nil { + in, out := &in.GithubRepo, &out.GithubRepo + *out = new(GithubRepoObservation) + (*in).DeepCopyInto(*out) + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.LinkingAllowedForAADTenantIds != nil { + in, out := &in.LinkingAllowedForAADTenantIds, &out.LinkingAllowedForAADTenantIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedVirtualNetworkEnabled != nil { + in, out := &in.ManagedVirtualNetworkEnabled, &out.ManagedVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurviewID != nil { + in, out := &in.PurviewID, &out.PurviewID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SQLAADAdmin != nil { + in, out := &in.SQLAADAdmin, &out.SQLAADAdmin + *out = make([]SQLAADAdminObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQLAdministratorLogin != nil { + in, out := &in.SQLAdministratorLogin, &out.SQLAdministratorLogin + *out = new(string) + **out = **in + } + if in.SQLIdentityControlEnabled != nil { + in, out := &in.SQLIdentityControlEnabled, &out.SQLIdentityControlEnabled + *out = new(bool) + **out = **in + } + if in.StorageDataLakeGen2FileSystemID != nil { + in, out := &in.StorageDataLakeGen2FileSystemID, &out.StorageDataLakeGen2FileSystemID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceObservation. +func (in *WorkspaceObservation) DeepCopy() *WorkspaceObservation { + if in == nil { + return nil + } + out := new(WorkspaceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceParameters) DeepCopyInto(out *WorkspaceParameters) { + *out = *in + if in.AADAdmin != nil { + in, out := &in.AADAdmin, &out.AADAdmin + *out = make([]AADAdminParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureDevopsRepo != nil { + in, out := &in.AzureDevopsRepo, &out.AzureDevopsRepo + *out = new(AzureDevopsRepoParameters) + (*in).DeepCopyInto(*out) + } + if in.AzureadAuthenticationOnly != nil { + in, out := &in.AzureadAuthenticationOnly, &out.AzureadAuthenticationOnly + *out = new(bool) + **out = **in + } + if in.ComputeSubnetID != nil { + in, out := &in.ComputeSubnetID, &out.ComputeSubnetID + *out = new(string) + **out = **in + } + if in.ComputeSubnetIDRef != nil { + in, out := &in.ComputeSubnetIDRef, &out.ComputeSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ComputeSubnetIDSelector != nil { + in, out := &in.ComputeSubnetIDSelector, &out.ComputeSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKeyParameters) + (*in).DeepCopyInto(*out) + } + if in.DataExfiltrationProtectionEnabled != nil { + in, out := &in.DataExfiltrationProtectionEnabled, &out.DataExfiltrationProtectionEnabled + *out = new(bool) + **out = **in + } + if in.GithubRepo != nil { + in, out := &in.GithubRepo, &out.GithubRepo + *out = new(GithubRepoParameters) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.LinkingAllowedForAADTenantIds != nil { + in, out := &in.LinkingAllowedForAADTenantIds, &out.LinkingAllowedForAADTenantIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupName != nil { + in, out := &in.ManagedResourceGroupName, &out.ManagedResourceGroupName + *out = new(string) + **out = **in + } + if in.ManagedResourceGroupNameRef != nil { + in, out := &in.ManagedResourceGroupNameRef, &out.ManagedResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ManagedResourceGroupNameSelector != nil { + in, out := &in.ManagedResourceGroupNameSelector, &out.ManagedResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ManagedVirtualNetworkEnabled != nil { + in, out := &in.ManagedVirtualNetworkEnabled, &out.ManagedVirtualNetworkEnabled + *out = new(bool) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.PurviewID != nil { + in, out := &in.PurviewID, &out.PurviewID + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SQLAADAdmin != nil { + in, out := &in.SQLAADAdmin, &out.SQLAADAdmin + *out = make([]SQLAADAdminParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SQLAdministratorLogin != nil { + in, out := &in.SQLAdministratorLogin, &out.SQLAdministratorLogin + *out = new(string) + **out = **in + } + if in.SQLAdministratorLoginPasswordSecretRef != nil { + in, out := &in.SQLAdministratorLoginPasswordSecretRef, &out.SQLAdministratorLoginPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.SQLIdentityControlEnabled != nil { + in, out := &in.SQLIdentityControlEnabled, &out.SQLIdentityControlEnabled + *out = new(bool) + **out = **in + } + if in.StorageDataLakeGen2FileSystemID != nil { + in, out := &in.StorageDataLakeGen2FileSystemID, &out.StorageDataLakeGen2FileSystemID + *out = new(string) + **out = **in + } + if in.StorageDataLakeGen2FileSystemIDRef != nil { + in, out := &in.StorageDataLakeGen2FileSystemIDRef, &out.StorageDataLakeGen2FileSystemIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageDataLakeGen2FileSystemIDSelector != nil { + in, out := &in.StorageDataLakeGen2FileSystemIDSelector, &out.StorageDataLakeGen2FileSystemIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceParameters. +func (in *WorkspaceParameters) DeepCopy() *WorkspaceParameters { + if in == nil { + return nil + } + out := new(WorkspaceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. +func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { + if in == nil { + return nil + } + out := new(WorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. +func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus { + if in == nil { + return nil + } + out := new(WorkspaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceVulnerabilityAssessment) DeepCopyInto(out *WorkspaceVulnerabilityAssessment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceVulnerabilityAssessment. +func (in *WorkspaceVulnerabilityAssessment) DeepCopy() *WorkspaceVulnerabilityAssessment { + if in == nil { + return nil + } + out := new(WorkspaceVulnerabilityAssessment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceVulnerabilityAssessment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceVulnerabilityAssessmentInitParameters) DeepCopyInto(out *WorkspaceVulnerabilityAssessmentInitParameters) { + *out = *in + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(RecurringScansInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } + if in.WorkspaceSecurityAlertPolicyID != nil { + in, out := &in.WorkspaceSecurityAlertPolicyID, &out.WorkspaceSecurityAlertPolicyID + *out = new(string) + **out = **in + } + if in.WorkspaceSecurityAlertPolicyIDRef != nil { + in, out := &in.WorkspaceSecurityAlertPolicyIDRef, &out.WorkspaceSecurityAlertPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSecurityAlertPolicyIDSelector != nil { + in, out := &in.WorkspaceSecurityAlertPolicyIDSelector, &out.WorkspaceSecurityAlertPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceVulnerabilityAssessmentInitParameters. +func (in *WorkspaceVulnerabilityAssessmentInitParameters) DeepCopy() *WorkspaceVulnerabilityAssessmentInitParameters { + if in == nil { + return nil + } + out := new(WorkspaceVulnerabilityAssessmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceVulnerabilityAssessmentList) DeepCopyInto(out *WorkspaceVulnerabilityAssessmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WorkspaceVulnerabilityAssessment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceVulnerabilityAssessmentList. +func (in *WorkspaceVulnerabilityAssessmentList) DeepCopy() *WorkspaceVulnerabilityAssessmentList { + if in == nil { + return nil + } + out := new(WorkspaceVulnerabilityAssessmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceVulnerabilityAssessmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceVulnerabilityAssessmentObservation) DeepCopyInto(out *WorkspaceVulnerabilityAssessmentObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(RecurringScansObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } + if in.WorkspaceSecurityAlertPolicyID != nil { + in, out := &in.WorkspaceSecurityAlertPolicyID, &out.WorkspaceSecurityAlertPolicyID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceVulnerabilityAssessmentObservation. +func (in *WorkspaceVulnerabilityAssessmentObservation) DeepCopy() *WorkspaceVulnerabilityAssessmentObservation { + if in == nil { + return nil + } + out := new(WorkspaceVulnerabilityAssessmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceVulnerabilityAssessmentParameters) DeepCopyInto(out *WorkspaceVulnerabilityAssessmentParameters) { + *out = *in + if in.RecurringScans != nil { + in, out := &in.RecurringScans, &out.RecurringScans + *out = new(RecurringScansParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageContainerPath != nil { + in, out := &in.StorageContainerPath, &out.StorageContainerPath + *out = new(string) + **out = **in + } + if in.StorageContainerSASKeySecretRef != nil { + in, out := &in.StorageContainerSASKeySecretRef, &out.StorageContainerSASKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.WorkspaceSecurityAlertPolicyID != nil { + in, out := &in.WorkspaceSecurityAlertPolicyID, &out.WorkspaceSecurityAlertPolicyID + *out = new(string) + **out = **in + } + if in.WorkspaceSecurityAlertPolicyIDRef != nil { + in, out := &in.WorkspaceSecurityAlertPolicyIDRef, &out.WorkspaceSecurityAlertPolicyIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.WorkspaceSecurityAlertPolicyIDSelector != nil { + in, out := &in.WorkspaceSecurityAlertPolicyIDSelector, &out.WorkspaceSecurityAlertPolicyIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceVulnerabilityAssessmentParameters. +func (in *WorkspaceVulnerabilityAssessmentParameters) DeepCopy() *WorkspaceVulnerabilityAssessmentParameters { + if in == nil { + return nil + } + out := new(WorkspaceVulnerabilityAssessmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceVulnerabilityAssessmentSpec) DeepCopyInto(out *WorkspaceVulnerabilityAssessmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceVulnerabilityAssessmentSpec. +func (in *WorkspaceVulnerabilityAssessmentSpec) DeepCopy() *WorkspaceVulnerabilityAssessmentSpec { + if in == nil { + return nil + } + out := new(WorkspaceVulnerabilityAssessmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceVulnerabilityAssessmentStatus) DeepCopyInto(out *WorkspaceVulnerabilityAssessmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceVulnerabilityAssessmentStatus. +func (in *WorkspaceVulnerabilityAssessmentStatus) DeepCopy() *WorkspaceVulnerabilityAssessmentStatus { + if in == nil { + return nil + } + out := new(WorkspaceVulnerabilityAssessmentStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/synapse/v1beta2/zz_generated.managed.go b/apis/synapse/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..8f32b2bfc --- /dev/null +++ b/apis/synapse/v1beta2/zz_generated.managed.go @@ -0,0 +1,308 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this LinkedService. +func (mg *LinkedService) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinkedService. +func (mg *LinkedService) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinkedService. +func (mg *LinkedService) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinkedService. +func (mg *LinkedService) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinkedService. +func (mg *LinkedService) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinkedService. +func (mg *LinkedService) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinkedService. +func (mg *LinkedService) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinkedService. +func (mg *LinkedService) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinkedService. +func (mg *LinkedService) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinkedService. +func (mg *LinkedService) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinkedService. +func (mg *LinkedService) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinkedService. +func (mg *LinkedService) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SQLPool. +func (mg *SQLPool) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SQLPool. +func (mg *SQLPool) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SQLPool. +func (mg *SQLPool) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SQLPool. +func (mg *SQLPool) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SQLPool. +func (mg *SQLPool) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SQLPool. +func (mg *SQLPool) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SQLPool. +func (mg *SQLPool) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SQLPool. +func (mg *SQLPool) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SQLPool. +func (mg *SQLPool) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SQLPool. +func (mg *SQLPool) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SQLPool. +func (mg *SQLPool) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SQLPool. +func (mg *SQLPool) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this SparkPool. +func (mg *SparkPool) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this SparkPool. +func (mg *SparkPool) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this SparkPool. +func (mg *SparkPool) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this SparkPool. +func (mg *SparkPool) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this SparkPool. +func (mg *SparkPool) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this SparkPool. +func (mg *SparkPool) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this SparkPool. +func (mg *SparkPool) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this SparkPool. +func (mg *SparkPool) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this SparkPool. +func (mg *SparkPool) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this SparkPool. +func (mg *SparkPool) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this SparkPool. +func (mg *SparkPool) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this SparkPool. +func (mg *SparkPool) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Workspace. +func (mg *Workspace) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Workspace. +func (mg *Workspace) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Workspace. +func (mg *Workspace) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Workspace. +func (mg *Workspace) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Workspace. +func (mg *Workspace) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Workspace. +func (mg *Workspace) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Workspace. +func (mg *Workspace) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Workspace. +func (mg *Workspace) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Workspace. +func (mg *Workspace) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Workspace. +func (mg *Workspace) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/synapse/v1beta2/zz_generated.managedlist.go b/apis/synapse/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..0d05ea630 --- /dev/null +++ b/apis/synapse/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,53 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this LinkedServiceList. +func (l *LinkedServiceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SQLPoolList. +func (l *SQLPoolList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this SparkPoolList. +func (l *SparkPoolList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkspaceList. +func (l *WorkspaceList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkspaceVulnerabilityAssessmentList. +func (l *WorkspaceVulnerabilityAssessmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/synapse/v1beta2/zz_generated.resolvers.go b/apis/synapse/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..7c39756f2 --- /dev/null +++ b/apis/synapse/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,393 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *LinkedService) ResolveReferences( // ResolveReferences of this LinkedService. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + if mg.Spec.ForProvider.IntegrationRuntime != nil { + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "IntegrationRuntimeAzure", "IntegrationRuntimeAzureList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.IntegrationRuntime.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.IntegrationRuntime.NameRef, + Selector: mg.Spec.ForProvider.IntegrationRuntime.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.IntegrationRuntime.Name") + } + mg.Spec.ForProvider.IntegrationRuntime.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.IntegrationRuntime.NameRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SynapseWorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SynapseWorkspaceIDRef, + Selector: mg.Spec.ForProvider.SynapseWorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SynapseWorkspaceID") + } + mg.Spec.ForProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.IntegrationRuntime != nil { + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "IntegrationRuntimeAzure", "IntegrationRuntimeAzureList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.IntegrationRuntime.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.IntegrationRuntime.NameRef, + Selector: mg.Spec.InitProvider.IntegrationRuntime.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.IntegrationRuntime.Name") + } + mg.Spec.InitProvider.IntegrationRuntime.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.IntegrationRuntime.NameRef = rsp.ResolvedReference + + } + + return nil +} + +// ResolveReferences of this SQLPool. +func (mg *SQLPool) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SynapseWorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SynapseWorkspaceIDRef, + Selector: mg.Spec.ForProvider.SynapseWorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SynapseWorkspaceID") + } + mg.Spec.ForProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this SparkPool. +func (mg *SparkPool) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta2", "Workspace", "WorkspaceList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SynapseWorkspaceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SynapseWorkspaceIDRef, + Selector: mg.Spec.ForProvider.SynapseWorkspaceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SynapseWorkspaceID") + } + mg.Spec.ForProvider.SynapseWorkspaceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SynapseWorkspaceIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this Workspace. +func (mg *Workspace) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ComputeSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ComputeSubnetIDRef, + Selector: mg.Spec.ForProvider.ComputeSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ComputeSubnetID") + } + mg.Spec.ForProvider.ComputeSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ComputeSubnetIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.CustomerManagedKey != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.CustomerManagedKey.KeyVersionlessID), + Extract: resource.ExtractParamPath("versionless_id", true), + Reference: mg.Spec.ForProvider.CustomerManagedKey.KeyVersionlessIDRef, + Selector: mg.Spec.ForProvider.CustomerManagedKey.KeyVersionlessIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.CustomerManagedKey.KeyVersionlessID") + } + mg.Spec.ForProvider.CustomerManagedKey.KeyVersionlessID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.CustomerManagedKey.KeyVersionlessIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ManagedResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ManagedResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ManagedResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ManagedResourceGroupName") + } + mg.Spec.ForProvider.ManagedResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ManagedResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "DataLakeGen2FileSystem", "DataLakeGen2FileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageDataLakeGen2FileSystemID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.StorageDataLakeGen2FileSystemIDRef, + Selector: mg.Spec.ForProvider.StorageDataLakeGen2FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageDataLakeGen2FileSystemID") + } + mg.Spec.ForProvider.StorageDataLakeGen2FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageDataLakeGen2FileSystemIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ComputeSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ComputeSubnetIDRef, + Selector: mg.Spec.InitProvider.ComputeSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ComputeSubnetID") + } + mg.Spec.InitProvider.ComputeSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ComputeSubnetIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.CustomerManagedKey != nil { + { + m, l, err = apisresolver.GetManagedResource("keyvault.azure.upbound.io", "v1beta2", "Key", "KeyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.CustomerManagedKey.KeyVersionlessID), + Extract: resource.ExtractParamPath("versionless_id", true), + Reference: mg.Spec.InitProvider.CustomerManagedKey.KeyVersionlessIDRef, + Selector: mg.Spec.InitProvider.CustomerManagedKey.KeyVersionlessIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.CustomerManagedKey.KeyVersionlessID") + } + mg.Spec.InitProvider.CustomerManagedKey.KeyVersionlessID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.CustomerManagedKey.KeyVersionlessIDRef = rsp.ResolvedReference + + } + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ManagedResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ManagedResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ManagedResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ManagedResourceGroupName") + } + mg.Spec.InitProvider.ManagedResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ManagedResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta1", "DataLakeGen2FileSystem", "DataLakeGen2FileSystemList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageDataLakeGen2FileSystemID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.StorageDataLakeGen2FileSystemIDRef, + Selector: mg.Spec.InitProvider.StorageDataLakeGen2FileSystemIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageDataLakeGen2FileSystemID") + } + mg.Spec.InitProvider.StorageDataLakeGen2FileSystemID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageDataLakeGen2FileSystemIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WorkspaceVulnerabilityAssessment. +func (mg *WorkspaceVulnerabilityAssessment) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "WorkspaceSecurityAlertPolicy", "WorkspaceSecurityAlertPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.WorkspaceSecurityAlertPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.WorkspaceSecurityAlertPolicyIDRef, + Selector: mg.Spec.ForProvider.WorkspaceSecurityAlertPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.WorkspaceSecurityAlertPolicyID") + } + mg.Spec.ForProvider.WorkspaceSecurityAlertPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.WorkspaceSecurityAlertPolicyIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("synapse.azure.upbound.io", "v1beta1", "WorkspaceSecurityAlertPolicy", "WorkspaceSecurityAlertPolicyList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.WorkspaceSecurityAlertPolicyID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.WorkspaceSecurityAlertPolicyIDRef, + Selector: mg.Spec.InitProvider.WorkspaceSecurityAlertPolicyIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.WorkspaceSecurityAlertPolicyID") + } + mg.Spec.InitProvider.WorkspaceSecurityAlertPolicyID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.WorkspaceSecurityAlertPolicyIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/synapse/v1beta2/zz_groupversion_info.go b/apis/synapse/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..be7bb17b0 --- /dev/null +++ b/apis/synapse/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=synapse.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "synapse.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/synapse/v1beta2/zz_linkedservice_terraformed.go b/apis/synapse/v1beta2/zz_linkedservice_terraformed.go new file mode 100755 index 000000000..faf43e2d9 --- /dev/null +++ b/apis/synapse/v1beta2/zz_linkedservice_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinkedService +func (mg *LinkedService) GetTerraformResourceType() string { + return "azurerm_synapse_linked_service" +} + +// GetConnectionDetailsMapping for this LinkedService +func (tr *LinkedService) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LinkedService +func (tr *LinkedService) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinkedService +func (tr *LinkedService) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinkedService +func (tr *LinkedService) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinkedService +func (tr *LinkedService) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinkedService +func (tr *LinkedService) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinkedService +func (tr *LinkedService) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinkedService +func (tr *LinkedService) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinkedService using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinkedService) LateInitialize(attrs []byte) (bool, error) { + params := &LinkedServiceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinkedService) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/synapse/v1beta2/zz_linkedservice_types.go b/apis/synapse/v1beta2/zz_linkedservice_types.go new file mode 100755 index 000000000..7a269a2a9 --- /dev/null +++ b/apis/synapse/v1beta2/zz_linkedservice_types.go @@ -0,0 +1,255 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type IntegrationRuntimeInitParameters struct { + + // The integration runtime reference to associate with the Synapse Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.IntegrationRuntimeAzure + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a IntegrationRuntimeAzure in synapse to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a IntegrationRuntimeAzure in synapse to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the integration runtime. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type IntegrationRuntimeObservation struct { + + // The integration runtime reference to associate with the Synapse Linked Service. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A map of parameters to associate with the integration runtime. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type IntegrationRuntimeParameters struct { + + // The integration runtime reference to associate with the Synapse Linked Service. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.IntegrationRuntimeAzure + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a IntegrationRuntimeAzure in synapse to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a IntegrationRuntimeAzure in synapse to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` + + // A map of parameters to associate with the integration runtime. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LinkedServiceInitParameters struct { + + // A map of additional properties to associate with the Synapse Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Synapse Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Synapse Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A integration_runtime block as defined below. + IntegrationRuntime *IntegrationRuntimeInitParameters `json:"integrationRuntime,omitempty" tf:"integration_runtime,omitempty"` + + // A map of parameters to associate with the Synapse Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The type of data stores that will be connected to Synapse. Valid Values include AmazonMWS, AmazonRdsForOracle, AmazonRdsForSqlServer, AmazonRedshift, AmazonS3, AzureBatch. Changing this forces a new resource to be created. + // AzureBlobFS, AzureBlobStorage, AzureDataExplorer, AzureDataLakeAnalytics, AzureDataLakeStore, AzureDatabricks, AzureDatabricksDeltaLake, AzureFileStorage, AzureFunction, + // AzureKeyVault, AzureML, AzureMLService, AzureMariaDB, AzureMySql, AzurePostgreSql, AzureSqlDW, AzureSqlDatabase, AzureSqlMI, AzureSearch, AzureStorage, + // AzureTableStorage, Cassandra, CommonDataServiceForApps, Concur, CosmosDb, CosmosDbMongoDbApi, Couchbase, CustomDataSource, Db2, Drill, + // Dynamics, DynamicsAX, DynamicsCrm, Eloqua, FileServer, FtpServer, GoogleAdWords, GoogleBigQuery, GoogleCloudStorage, Greenplum, HBase, HDInsight, + // HDInsightOnDemand, HttpServer, Hdfs, Hive, Hubspot, Impala, Informix, Jira, LinkedService, Magento, MariaDB, Marketo, MicrosoftAccess, MongoDb, + // MongoDbAtlas, MongoDbV2, MySql, Netezza, OData, Odbc, Office365, Oracle, OracleServiceCloud, Paypal, Phoenix, PostgreSql, Presto, QuickBooks, + // Responsys, RestService, SqlServer, Salesforce, SalesforceMarketingCloud, SalesforceServiceCloud, SapBW, SapCloudForCustomer, SapEcc, SapHana, SapOpenHub, + // SapTable, ServiceNow, Sftp, SharePointOnlineList, Shopify, Snowflake, Spark, Square, Sybase, Teradata, Vertica, Web, Xero, Zoho. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Synapse Linked Service. + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +type LinkedServiceObservation struct { + + // A map of additional properties to associate with the Synapse Linked Service. + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Synapse Linked Service. + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Synapse Linked Service. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // The ID of the Synapse Linked Service. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A integration_runtime block as defined below. + IntegrationRuntime *IntegrationRuntimeObservation `json:"integrationRuntime,omitempty" tf:"integration_runtime,omitempty"` + + // A map of parameters to associate with the Synapse Linked Service. + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The Synapse Workspace ID in which to associate the Linked Service with. Changing this forces a new Synapse Linked Service to be created. + SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` + + // The type of data stores that will be connected to Synapse. Valid Values include AmazonMWS, AmazonRdsForOracle, AmazonRdsForSqlServer, AmazonRedshift, AmazonS3, AzureBatch. Changing this forces a new resource to be created. + // AzureBlobFS, AzureBlobStorage, AzureDataExplorer, AzureDataLakeAnalytics, AzureDataLakeStore, AzureDatabricks, AzureDatabricksDeltaLake, AzureFileStorage, AzureFunction, + // AzureKeyVault, AzureML, AzureMLService, AzureMariaDB, AzureMySql, AzurePostgreSql, AzureSqlDW, AzureSqlDatabase, AzureSqlMI, AzureSearch, AzureStorage, + // AzureTableStorage, Cassandra, CommonDataServiceForApps, Concur, CosmosDb, CosmosDbMongoDbApi, Couchbase, CustomDataSource, Db2, Drill, + // Dynamics, DynamicsAX, DynamicsCrm, Eloqua, FileServer, FtpServer, GoogleAdWords, GoogleBigQuery, GoogleCloudStorage, Greenplum, HBase, HDInsight, + // HDInsightOnDemand, HttpServer, Hdfs, Hive, Hubspot, Impala, Informix, Jira, LinkedService, Magento, MariaDB, Marketo, MicrosoftAccess, MongoDb, + // MongoDbAtlas, MongoDbV2, MySql, Netezza, OData, Odbc, Office365, Oracle, OracleServiceCloud, Paypal, Phoenix, PostgreSql, Presto, QuickBooks, + // Responsys, RestService, SqlServer, Salesforce, SalesforceMarketingCloud, SalesforceServiceCloud, SapBW, SapCloudForCustomer, SapEcc, SapHana, SapOpenHub, + // SapTable, ServiceNow, Sftp, SharePointOnlineList, Shopify, Snowflake, Spark, Square, Sybase, Teradata, Vertica, Web, Xero, Zoho. + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Synapse Linked Service. + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +type LinkedServiceParameters struct { + + // A map of additional properties to associate with the Synapse Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalProperties map[string]*string `json:"additionalProperties,omitempty" tf:"additional_properties,omitempty"` + + // List of tags that can be used for describing the Synapse Linked Service. + // +kubebuilder:validation:Optional + Annotations []*string `json:"annotations,omitempty" tf:"annotations,omitempty"` + + // The description for the Synapse Linked Service. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A integration_runtime block as defined below. + // +kubebuilder:validation:Optional + IntegrationRuntime *IntegrationRuntimeParameters `json:"integrationRuntime,omitempty" tf:"integration_runtime,omitempty"` + + // A map of parameters to associate with the Synapse Linked Service. + // +kubebuilder:validation:Optional + // +mapType=granular + Parameters map[string]*string `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // The Synapse Workspace ID in which to associate the Linked Service with. Changing this forces a new Synapse Linked Service to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` + + // Reference to a Workspace in synapse to populate synapseWorkspaceId. + // +kubebuilder:validation:Optional + SynapseWorkspaceIDRef *v1.Reference `json:"synapseWorkspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in synapse to populate synapseWorkspaceId. + // +kubebuilder:validation:Optional + SynapseWorkspaceIDSelector *v1.Selector `json:"synapseWorkspaceIdSelector,omitempty" tf:"-"` + + // The type of data stores that will be connected to Synapse. Valid Values include AmazonMWS, AmazonRdsForOracle, AmazonRdsForSqlServer, AmazonRedshift, AmazonS3, AzureBatch. Changing this forces a new resource to be created. + // AzureBlobFS, AzureBlobStorage, AzureDataExplorer, AzureDataLakeAnalytics, AzureDataLakeStore, AzureDatabricks, AzureDatabricksDeltaLake, AzureFileStorage, AzureFunction, + // AzureKeyVault, AzureML, AzureMLService, AzureMariaDB, AzureMySql, AzurePostgreSql, AzureSqlDW, AzureSqlDatabase, AzureSqlMI, AzureSearch, AzureStorage, + // AzureTableStorage, Cassandra, CommonDataServiceForApps, Concur, CosmosDb, CosmosDbMongoDbApi, Couchbase, CustomDataSource, Db2, Drill, + // Dynamics, DynamicsAX, DynamicsCrm, Eloqua, FileServer, FtpServer, GoogleAdWords, GoogleBigQuery, GoogleCloudStorage, Greenplum, HBase, HDInsight, + // HDInsightOnDemand, HttpServer, Hdfs, Hive, Hubspot, Impala, Informix, Jira, LinkedService, Magento, MariaDB, Marketo, MicrosoftAccess, MongoDb, + // MongoDbAtlas, MongoDbV2, MySql, Netezza, OData, Odbc, Office365, Oracle, OracleServiceCloud, Paypal, Phoenix, PostgreSql, Presto, QuickBooks, + // Responsys, RestService, SqlServer, Salesforce, SalesforceMarketingCloud, SalesforceServiceCloud, SapBW, SapCloudForCustomer, SapEcc, SapHana, SapOpenHub, + // SapTable, ServiceNow, Sftp, SharePointOnlineList, Shopify, Snowflake, Spark, Square, Sybase, Teradata, Vertica, Web, Xero, Zoho. + // +kubebuilder:validation:Optional + Type *string `json:"type,omitempty" tf:"type,omitempty"` + + // A JSON object that contains the properties of the Synapse Linked Service. + // +kubebuilder:validation:Optional + TypePropertiesJSON *string `json:"typePropertiesJson,omitempty" tf:"type_properties_json,omitempty"` +} + +// LinkedServiceSpec defines the desired state of LinkedService +type LinkedServiceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinkedServiceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinkedServiceInitParameters `json:"initProvider,omitempty"` +} + +// LinkedServiceStatus defines the observed state of LinkedService. +type LinkedServiceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinkedServiceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinkedService is the Schema for the LinkedServices API. Manages a Linked Service (connection) between a resource and Azure Synapse. This is a generic resource that supports all different Linked Service Types. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinkedService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.type) || (has(self.initProvider) && has(self.initProvider.type))",message="spec.forProvider.type is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.typePropertiesJson) || (has(self.initProvider) && has(self.initProvider.typePropertiesJson))",message="spec.forProvider.typePropertiesJson is a required parameter" + Spec LinkedServiceSpec `json:"spec"` + Status LinkedServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinkedServiceList contains a list of LinkedServices +type LinkedServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinkedService `json:"items"` +} + +// Repository type metadata. +var ( + LinkedService_Kind = "LinkedService" + LinkedService_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinkedService_Kind}.String() + LinkedService_KindAPIVersion = LinkedService_Kind + "." + CRDGroupVersion.String() + LinkedService_GroupVersionKind = CRDGroupVersion.WithKind(LinkedService_Kind) +) + +func init() { + SchemeBuilder.Register(&LinkedService{}, &LinkedServiceList{}) +} diff --git a/apis/synapse/v1beta2/zz_sparkpool_terraformed.go b/apis/synapse/v1beta2/zz_sparkpool_terraformed.go new file mode 100755 index 000000000..4a43b7408 --- /dev/null +++ b/apis/synapse/v1beta2/zz_sparkpool_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SparkPool +func (mg *SparkPool) GetTerraformResourceType() string { + return "azurerm_synapse_spark_pool" +} + +// GetConnectionDetailsMapping for this SparkPool +func (tr *SparkPool) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SparkPool +func (tr *SparkPool) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SparkPool +func (tr *SparkPool) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SparkPool +func (tr *SparkPool) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SparkPool +func (tr *SparkPool) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SparkPool +func (tr *SparkPool) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SparkPool +func (tr *SparkPool) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SparkPool +func (tr *SparkPool) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SparkPool using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SparkPool) LateInitialize(attrs []byte) (bool, error) { + params := &SparkPoolParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SparkPool) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/synapse/v1beta2/zz_sparkpool_types.go b/apis/synapse/v1beta2/zz_sparkpool_types.go new file mode 100755 index 000000000..1756109cb --- /dev/null +++ b/apis/synapse/v1beta2/zz_sparkpool_types.go @@ -0,0 +1,384 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoPauseInitParameters struct { + + // Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080. + DelayInMinutes *float64 `json:"delayInMinutes,omitempty" tf:"delay_in_minutes,omitempty"` +} + +type AutoPauseObservation struct { + + // Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080. + DelayInMinutes *float64 `json:"delayInMinutes,omitempty" tf:"delay_in_minutes,omitempty"` +} + +type AutoPauseParameters struct { + + // Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 5 and 10080. + // +kubebuilder:validation:Optional + DelayInMinutes *float64 `json:"delayInMinutes" tf:"delay_in_minutes,omitempty"` +} + +type AutoScaleInitParameters struct { + + // The maximum number of nodes the Spark Pool can support. Must be between 3 and 200. + MaxNodeCount *float64 `json:"maxNodeCount,omitempty" tf:"max_node_count,omitempty"` + + // The minimum number of nodes the Spark Pool can support. Must be between 3 and 200. + MinNodeCount *float64 `json:"minNodeCount,omitempty" tf:"min_node_count,omitempty"` +} + +type AutoScaleObservation struct { + + // The maximum number of nodes the Spark Pool can support. Must be between 3 and 200. + MaxNodeCount *float64 `json:"maxNodeCount,omitempty" tf:"max_node_count,omitempty"` + + // The minimum number of nodes the Spark Pool can support. Must be between 3 and 200. + MinNodeCount *float64 `json:"minNodeCount,omitempty" tf:"min_node_count,omitempty"` +} + +type AutoScaleParameters struct { + + // The maximum number of nodes the Spark Pool can support. Must be between 3 and 200. + // +kubebuilder:validation:Optional + MaxNodeCount *float64 `json:"maxNodeCount" tf:"max_node_count,omitempty"` + + // The minimum number of nodes the Spark Pool can support. Must be between 3 and 200. + // +kubebuilder:validation:Optional + MinNodeCount *float64 `json:"minNodeCount" tf:"min_node_count,omitempty"` +} + +type LibraryRequirementInitParameters struct { + + // The content of library requirements. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The name of the library requirements file. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` +} + +type LibraryRequirementObservation struct { + + // The content of library requirements. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The name of the library requirements file. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` +} + +type LibraryRequirementParameters struct { + + // The content of library requirements. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The name of the library requirements file. + // +kubebuilder:validation:Optional + Filename *string `json:"filename" tf:"filename,omitempty"` +} + +type SparkConfigInitParameters struct { + + // The contents of a spark configuration. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The name of the file where the spark configuration content will be stored. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` +} + +type SparkConfigObservation struct { + + // The contents of a spark configuration. + Content *string `json:"content,omitempty" tf:"content,omitempty"` + + // The name of the file where the spark configuration content will be stored. + Filename *string `json:"filename,omitempty" tf:"filename,omitempty"` +} + +type SparkConfigParameters struct { + + // The contents of a spark configuration. + // +kubebuilder:validation:Optional + Content *string `json:"content" tf:"content,omitempty"` + + // The name of the file where the spark configuration content will be stored. + // +kubebuilder:validation:Optional + Filename *string `json:"filename" tf:"filename,omitempty"` +} + +type SparkPoolInitParameters struct { + + // An auto_pause block as defined below. + AutoPause *AutoPauseInitParameters `json:"autoPause,omitempty" tf:"auto_pause,omitempty"` + + // An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified. + AutoScale *AutoScaleInitParameters `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // The cache size in the Spark Pool. + CacheSize *float64 `json:"cacheSize,omitempty" tf:"cache_size,omitempty"` + + // Indicates whether compute isolation is enabled or not. Defaults to false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false. + DynamicExecutorAllocationEnabled *bool `json:"dynamicExecutorAllocationEnabled,omitempty" tf:"dynamic_executor_allocation_enabled,omitempty"` + + // A library_requirement block as defined below. + LibraryRequirement *LibraryRequirementInitParameters `json:"libraryRequirement,omitempty" tf:"library_requirement,omitempty"` + + // The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true. + MaxExecutors *float64 `json:"maxExecutors,omitempty" tf:"max_executors,omitempty"` + + // The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true. + MinExecutors *float64 `json:"minExecutors,omitempty" tf:"min_executors,omitempty"` + + // The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified. + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge. + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None. + NodeSizeFamily *string `json:"nodeSizeFamily,omitempty" tf:"node_size_family,omitempty"` + + // Indicates whether session level packages are enabled or not. Defaults to false. + SessionLevelPackagesEnabled *bool `json:"sessionLevelPackagesEnabled,omitempty" tf:"session_level_packages_enabled,omitempty"` + + // A spark_config block as defined below. + SparkConfig *SparkConfigInitParameters `json:"sparkConfig,omitempty" tf:"spark_config,omitempty"` + + // The Spark events folder. Defaults to /events. + SparkEventsFolder *string `json:"sparkEventsFolder,omitempty" tf:"spark_events_folder,omitempty"` + + // The default folder where Spark logs will be written. Defaults to /logs. + SparkLogFolder *string `json:"sparkLogFolder,omitempty" tf:"spark_log_folder,omitempty"` + + // The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2 and 3.3. Defaults to 2.4. + SparkVersion *string `json:"sparkVersion,omitempty" tf:"spark_version,omitempty"` + + // A mapping of tags which should be assigned to the Synapse Spark Pool. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SparkPoolObservation struct { + + // An auto_pause block as defined below. + AutoPause *AutoPauseObservation `json:"autoPause,omitempty" tf:"auto_pause,omitempty"` + + // An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified. + AutoScale *AutoScaleObservation `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // The cache size in the Spark Pool. + CacheSize *float64 `json:"cacheSize,omitempty" tf:"cache_size,omitempty"` + + // Indicates whether compute isolation is enabled or not. Defaults to false. + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false. + DynamicExecutorAllocationEnabled *bool `json:"dynamicExecutorAllocationEnabled,omitempty" tf:"dynamic_executor_allocation_enabled,omitempty"` + + // The ID of the Synapse Spark Pool. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A library_requirement block as defined below. + LibraryRequirement *LibraryRequirementObservation `json:"libraryRequirement,omitempty" tf:"library_requirement,omitempty"` + + // The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true. + MaxExecutors *float64 `json:"maxExecutors,omitempty" tf:"max_executors,omitempty"` + + // The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true. + MinExecutors *float64 `json:"minExecutors,omitempty" tf:"min_executors,omitempty"` + + // The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified. + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge. + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None. + NodeSizeFamily *string `json:"nodeSizeFamily,omitempty" tf:"node_size_family,omitempty"` + + // Indicates whether session level packages are enabled or not. Defaults to false. + SessionLevelPackagesEnabled *bool `json:"sessionLevelPackagesEnabled,omitempty" tf:"session_level_packages_enabled,omitempty"` + + // A spark_config block as defined below. + SparkConfig *SparkConfigObservation `json:"sparkConfig,omitempty" tf:"spark_config,omitempty"` + + // The Spark events folder. Defaults to /events. + SparkEventsFolder *string `json:"sparkEventsFolder,omitempty" tf:"spark_events_folder,omitempty"` + + // The default folder where Spark logs will be written. Defaults to /logs. + SparkLogFolder *string `json:"sparkLogFolder,omitempty" tf:"spark_log_folder,omitempty"` + + // The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2 and 3.3. Defaults to 2.4. + SparkVersion *string `json:"sparkVersion,omitempty" tf:"spark_version,omitempty"` + + // The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created. + SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` + + // A mapping of tags which should be assigned to the Synapse Spark Pool. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SparkPoolParameters struct { + + // An auto_pause block as defined below. + // +kubebuilder:validation:Optional + AutoPause *AutoPauseParameters `json:"autoPause,omitempty" tf:"auto_pause,omitempty"` + + // An auto_scale block as defined below. Exactly one of node_count or auto_scale must be specified. + // +kubebuilder:validation:Optional + AutoScale *AutoScaleParameters `json:"autoScale,omitempty" tf:"auto_scale,omitempty"` + + // The cache size in the Spark Pool. + // +kubebuilder:validation:Optional + CacheSize *float64 `json:"cacheSize,omitempty" tf:"cache_size,omitempty"` + + // Indicates whether compute isolation is enabled or not. Defaults to false. + // +kubebuilder:validation:Optional + ComputeIsolationEnabled *bool `json:"computeIsolationEnabled,omitempty" tf:"compute_isolation_enabled,omitempty"` + + // Indicates whether Dynamic Executor Allocation is enabled or not. Defaults to false. + // +kubebuilder:validation:Optional + DynamicExecutorAllocationEnabled *bool `json:"dynamicExecutorAllocationEnabled,omitempty" tf:"dynamic_executor_allocation_enabled,omitempty"` + + // A library_requirement block as defined below. + // +kubebuilder:validation:Optional + LibraryRequirement *LibraryRequirementParameters `json:"libraryRequirement,omitempty" tf:"library_requirement,omitempty"` + + // The maximum number of executors allocated only when dynamic_executor_allocation_enabled set to true. + // +kubebuilder:validation:Optional + MaxExecutors *float64 `json:"maxExecutors,omitempty" tf:"max_executors,omitempty"` + + // The minimum number of executors allocated only when dynamic_executor_allocation_enabled set to true. + // +kubebuilder:validation:Optional + MinExecutors *float64 `json:"minExecutors,omitempty" tf:"min_executors,omitempty"` + + // The number of nodes in the Spark Pool. Exactly one of node_count or auto_scale must be specified. + // +kubebuilder:validation:Optional + NodeCount *float64 `json:"nodeCount,omitempty" tf:"node_count,omitempty"` + + // The level of node in the Spark Pool. Possible values are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge. + // +kubebuilder:validation:Optional + NodeSize *string `json:"nodeSize,omitempty" tf:"node_size,omitempty"` + + // The kind of nodes that the Spark Pool provides. Possible values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, MemoryOptimized, and None. + // +kubebuilder:validation:Optional + NodeSizeFamily *string `json:"nodeSizeFamily,omitempty" tf:"node_size_family,omitempty"` + + // Indicates whether session level packages are enabled or not. Defaults to false. + // +kubebuilder:validation:Optional + SessionLevelPackagesEnabled *bool `json:"sessionLevelPackagesEnabled,omitempty" tf:"session_level_packages_enabled,omitempty"` + + // A spark_config block as defined below. + // +kubebuilder:validation:Optional + SparkConfig *SparkConfigParameters `json:"sparkConfig,omitempty" tf:"spark_config,omitempty"` + + // The Spark events folder. Defaults to /events. + // +kubebuilder:validation:Optional + SparkEventsFolder *string `json:"sparkEventsFolder,omitempty" tf:"spark_events_folder,omitempty"` + + // The default folder where Spark logs will be written. Defaults to /logs. + // +kubebuilder:validation:Optional + SparkLogFolder *string `json:"sparkLogFolder,omitempty" tf:"spark_log_folder,omitempty"` + + // The Apache Spark version. Possible values are 2.4 , 3.1 , 3.2 and 3.3. Defaults to 2.4. + // +kubebuilder:validation:Optional + SparkVersion *string `json:"sparkVersion,omitempty" tf:"spark_version,omitempty"` + + // The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` + + // Reference to a Workspace in synapse to populate synapseWorkspaceId. + // +kubebuilder:validation:Optional + SynapseWorkspaceIDRef *v1.Reference `json:"synapseWorkspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in synapse to populate synapseWorkspaceId. + // +kubebuilder:validation:Optional + SynapseWorkspaceIDSelector *v1.Selector `json:"synapseWorkspaceIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Synapse Spark Pool. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// SparkPoolSpec defines the desired state of SparkPool +type SparkPoolSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SparkPoolParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SparkPoolInitParameters `json:"initProvider,omitempty"` +} + +// SparkPoolStatus defines the observed state of SparkPool. +type SparkPoolStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SparkPoolObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SparkPool is the Schema for the SparkPools API. Manages a Synapse Spark Pool. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SparkPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nodeSize) || (has(self.initProvider) && has(self.initProvider.nodeSize))",message="spec.forProvider.nodeSize is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.nodeSizeFamily) || (has(self.initProvider) && has(self.initProvider.nodeSizeFamily))",message="spec.forProvider.nodeSizeFamily is a required parameter" + Spec SparkPoolSpec `json:"spec"` + Status SparkPoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SparkPoolList contains a list of SparkPools +type SparkPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SparkPool `json:"items"` +} + +// Repository type metadata. +var ( + SparkPool_Kind = "SparkPool" + SparkPool_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SparkPool_Kind}.String() + SparkPool_KindAPIVersion = SparkPool_Kind + "." + CRDGroupVersion.String() + SparkPool_GroupVersionKind = CRDGroupVersion.WithKind(SparkPool_Kind) +) + +func init() { + SchemeBuilder.Register(&SparkPool{}, &SparkPoolList{}) +} diff --git a/apis/synapse/v1beta2/zz_sqlpool_terraformed.go b/apis/synapse/v1beta2/zz_sqlpool_terraformed.go new file mode 100755 index 000000000..94446d934 --- /dev/null +++ b/apis/synapse/v1beta2/zz_sqlpool_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this SQLPool +func (mg *SQLPool) GetTerraformResourceType() string { + return "azurerm_synapse_sql_pool" +} + +// GetConnectionDetailsMapping for this SQLPool +func (tr *SQLPool) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this SQLPool +func (tr *SQLPool) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this SQLPool +func (tr *SQLPool) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this SQLPool +func (tr *SQLPool) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this SQLPool +func (tr *SQLPool) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this SQLPool +func (tr *SQLPool) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this SQLPool +func (tr *SQLPool) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this SQLPool +func (tr *SQLPool) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this SQLPool using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *SQLPool) LateInitialize(attrs []byte) (bool, error) { + params := &SQLPoolParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *SQLPool) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/synapse/v1beta2/zz_sqlpool_types.go b/apis/synapse/v1beta2/zz_sqlpool_types.go new file mode 100755 index 000000000..2fc25c48d --- /dev/null +++ b/apis/synapse/v1beta2/zz_sqlpool_types.go @@ -0,0 +1,226 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RestoreInitParameters struct { + + // Specifies the Snapshot time to restore formatted as an RFC3339 date string. Changing this forces a new Synapse SQL Pool to be created. + PointInTime *string `json:"pointInTime,omitempty" tf:"point_in_time,omitempty"` + + // The ID of the Synapse SQL Pool or SQL Database which is to restore. Changing this forces a new Synapse SQL Pool to be created. + SourceDatabaseID *string `json:"sourceDatabaseId,omitempty" tf:"source_database_id,omitempty"` +} + +type RestoreObservation struct { + + // Specifies the Snapshot time to restore formatted as an RFC3339 date string. Changing this forces a new Synapse SQL Pool to be created. + PointInTime *string `json:"pointInTime,omitempty" tf:"point_in_time,omitempty"` + + // The ID of the Synapse SQL Pool or SQL Database which is to restore. Changing this forces a new Synapse SQL Pool to be created. + SourceDatabaseID *string `json:"sourceDatabaseId,omitempty" tf:"source_database_id,omitempty"` +} + +type RestoreParameters struct { + + // Specifies the Snapshot time to restore formatted as an RFC3339 date string. Changing this forces a new Synapse SQL Pool to be created. + // +kubebuilder:validation:Optional + PointInTime *string `json:"pointInTime" tf:"point_in_time,omitempty"` + + // The ID of the Synapse SQL Pool or SQL Database which is to restore. Changing this forces a new Synapse SQL Pool to be created. + // +kubebuilder:validation:Optional + SourceDatabaseID *string `json:"sourceDatabaseId" tf:"source_database_id,omitempty"` +} + +type SQLPoolInitParameters struct { + + // The name of the collation to use with this pool, only applicable when create_mode is set to Default. Azure default is SQL_LATIN1_GENERAL_CP1_CI_AS. Changing this forces a new Synapse SQL Pool to be created. + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // Specifies how to create the SQL Pool. Valid values are: Default, Recovery or PointInTimeRestore. Must be Default to create a new database. Defaults to Default. Changing this forces a new Synapse SQL Pool to be created. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // Is transparent data encryption enabled? + DataEncrypted *bool `json:"dataEncrypted,omitempty" tf:"data_encrypted,omitempty"` + + // Is geo-backup policy enabled? Possible values include true or false. Defaults to true. + GeoBackupPolicyEnabled *bool `json:"geoBackupPolicyEnabled,omitempty" tf:"geo_backup_policy_enabled,omitempty"` + + // The ID of the Synapse SQL Pool or SQL Database which is to back up, only applicable when create_mode is set to Recovery. Changing this forces a new Synapse SQL Pool to be created. + RecoveryDatabaseID *string `json:"recoveryDatabaseId,omitempty" tf:"recovery_database_id,omitempty"` + + // A restore block as defined below. Only applicable when create_mode is set to PointInTimeRestore. Changing this forces a new Synapse SQL Pool to be created. + Restore *RestoreInitParameters `json:"restore,omitempty" tf:"restore,omitempty"` + + // Specifies the SKU Name for this Synapse SQL Pool. Possible values are DW100c, DW200c, DW300c, DW400c, DW500c, DW1000c, DW1500c, DW2000c, DW2500c, DW3000c, DW5000c, DW6000c, DW7500c, DW10000c, DW15000c or DW30000c. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The storage account type that will be used to store backups for this Synapse SQL Pool. Possible values are LRS or GRS. Changing this forces a new Synapse SQL Pool to be created. Defaults to GRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // A mapping of tags which should be assigned to the Synapse SQL Pool. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SQLPoolObservation struct { + + // The name of the collation to use with this pool, only applicable when create_mode is set to Default. Azure default is SQL_LATIN1_GENERAL_CP1_CI_AS. Changing this forces a new Synapse SQL Pool to be created. + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // Specifies how to create the SQL Pool. Valid values are: Default, Recovery or PointInTimeRestore. Must be Default to create a new database. Defaults to Default. Changing this forces a new Synapse SQL Pool to be created. + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // Is transparent data encryption enabled? + DataEncrypted *bool `json:"dataEncrypted,omitempty" tf:"data_encrypted,omitempty"` + + // Is geo-backup policy enabled? Possible values include true or false. Defaults to true. + GeoBackupPolicyEnabled *bool `json:"geoBackupPolicyEnabled,omitempty" tf:"geo_backup_policy_enabled,omitempty"` + + // The ID of the Synapse SQL Pool. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The ID of the Synapse SQL Pool or SQL Database which is to back up, only applicable when create_mode is set to Recovery. Changing this forces a new Synapse SQL Pool to be created. + RecoveryDatabaseID *string `json:"recoveryDatabaseId,omitempty" tf:"recovery_database_id,omitempty"` + + // A restore block as defined below. Only applicable when create_mode is set to PointInTimeRestore. Changing this forces a new Synapse SQL Pool to be created. + Restore *RestoreObservation `json:"restore,omitempty" tf:"restore,omitempty"` + + // Specifies the SKU Name for this Synapse SQL Pool. Possible values are DW100c, DW200c, DW300c, DW400c, DW500c, DW1000c, DW1500c, DW2000c, DW2500c, DW3000c, DW5000c, DW6000c, DW7500c, DW10000c, DW15000c or DW30000c. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The storage account type that will be used to store backups for this Synapse SQL Pool. Possible values are LRS or GRS. Changing this forces a new Synapse SQL Pool to be created. Defaults to GRS. + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // The ID of Synapse Workspace within which this SQL Pool should be created. Changing this forces a new Synapse SQL Pool to be created. + SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` + + // A mapping of tags which should be assigned to the Synapse SQL Pool. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type SQLPoolParameters struct { + + // The name of the collation to use with this pool, only applicable when create_mode is set to Default. Azure default is SQL_LATIN1_GENERAL_CP1_CI_AS. Changing this forces a new Synapse SQL Pool to be created. + // +kubebuilder:validation:Optional + Collation *string `json:"collation,omitempty" tf:"collation,omitempty"` + + // Specifies how to create the SQL Pool. Valid values are: Default, Recovery or PointInTimeRestore. Must be Default to create a new database. Defaults to Default. Changing this forces a new Synapse SQL Pool to be created. + // +kubebuilder:validation:Optional + CreateMode *string `json:"createMode,omitempty" tf:"create_mode,omitempty"` + + // Is transparent data encryption enabled? + // +kubebuilder:validation:Optional + DataEncrypted *bool `json:"dataEncrypted,omitempty" tf:"data_encrypted,omitempty"` + + // Is geo-backup policy enabled? Possible values include true or false. Defaults to true. + // +kubebuilder:validation:Optional + GeoBackupPolicyEnabled *bool `json:"geoBackupPolicyEnabled,omitempty" tf:"geo_backup_policy_enabled,omitempty"` + + // The ID of the Synapse SQL Pool or SQL Database which is to back up, only applicable when create_mode is set to Recovery. Changing this forces a new Synapse SQL Pool to be created. + // +kubebuilder:validation:Optional + RecoveryDatabaseID *string `json:"recoveryDatabaseId,omitempty" tf:"recovery_database_id,omitempty"` + + // A restore block as defined below. Only applicable when create_mode is set to PointInTimeRestore. Changing this forces a new Synapse SQL Pool to be created. + // +kubebuilder:validation:Optional + Restore *RestoreParameters `json:"restore,omitempty" tf:"restore,omitempty"` + + // Specifies the SKU Name for this Synapse SQL Pool. Possible values are DW100c, DW200c, DW300c, DW400c, DW500c, DW1000c, DW1500c, DW2000c, DW2500c, DW3000c, DW5000c, DW6000c, DW7500c, DW10000c, DW15000c or DW30000c. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // The storage account type that will be used to store backups for this Synapse SQL Pool. Possible values are LRS or GRS. Changing this forces a new Synapse SQL Pool to be created. Defaults to GRS. + // +kubebuilder:validation:Optional + StorageAccountType *string `json:"storageAccountType,omitempty" tf:"storage_account_type,omitempty"` + + // The ID of Synapse Workspace within which this SQL Pool should be created. Changing this forces a new Synapse SQL Pool to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta2.Workspace + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + SynapseWorkspaceID *string `json:"synapseWorkspaceId,omitempty" tf:"synapse_workspace_id,omitempty"` + + // Reference to a Workspace in synapse to populate synapseWorkspaceId. + // +kubebuilder:validation:Optional + SynapseWorkspaceIDRef *v1.Reference `json:"synapseWorkspaceIdRef,omitempty" tf:"-"` + + // Selector for a Workspace in synapse to populate synapseWorkspaceId. + // +kubebuilder:validation:Optional + SynapseWorkspaceIDSelector *v1.Selector `json:"synapseWorkspaceIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Synapse SQL Pool. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// SQLPoolSpec defines the desired state of SQLPool +type SQLPoolSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider SQLPoolParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider SQLPoolInitParameters `json:"initProvider,omitempty"` +} + +// SQLPoolStatus defines the observed state of SQLPool. +type SQLPoolStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider SQLPoolObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SQLPool is the Schema for the SQLPools API. Manages a Synapse SQL Pool. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type SQLPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + Spec SQLPoolSpec `json:"spec"` + Status SQLPoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SQLPoolList contains a list of SQLPools +type SQLPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SQLPool `json:"items"` +} + +// Repository type metadata. +var ( + SQLPool_Kind = "SQLPool" + SQLPool_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: SQLPool_Kind}.String() + SQLPool_KindAPIVersion = SQLPool_Kind + "." + CRDGroupVersion.String() + SQLPool_GroupVersionKind = CRDGroupVersion.WithKind(SQLPool_Kind) +) + +func init() { + SchemeBuilder.Register(&SQLPool{}, &SQLPoolList{}) +} diff --git a/apis/synapse/v1beta2/zz_workspace_terraformed.go b/apis/synapse/v1beta2/zz_workspace_terraformed.go new file mode 100755 index 000000000..ff1092993 --- /dev/null +++ b/apis/synapse/v1beta2/zz_workspace_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Workspace +func (mg *Workspace) GetTerraformResourceType() string { + return "azurerm_synapse_workspace" +} + +// GetConnectionDetailsMapping for this Workspace +func (tr *Workspace) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"sql_administrator_login_password": "spec.forProvider.sqlAdministratorLoginPasswordSecretRef"} +} + +// GetObservation of this Workspace +func (tr *Workspace) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Workspace +func (tr *Workspace) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Workspace +func (tr *Workspace) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Workspace +func (tr *Workspace) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Workspace +func (tr *Workspace) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Workspace +func (tr *Workspace) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Workspace using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Workspace) LateInitialize(attrs []byte) (bool, error) { + params := &WorkspaceParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Workspace) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/synapse/v1beta2/zz_workspace_types.go b/apis/synapse/v1beta2/zz_workspace_types.go new file mode 100755 index 000000000..924ddaea2 --- /dev/null +++ b/apis/synapse/v1beta2/zz_workspace_types.go @@ -0,0 +1,685 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AADAdminInitParameters struct { + + // The login name of the Azure AD Administrator of this Synapse Workspace. + Login *string `json:"login,omitempty" tf:"login"` + + // The object id of the Azure AD Administrator of this Synapse Workspace. + ObjectID *string `json:"objectId,omitempty" tf:"object_id"` + + // The tenant id of the Azure AD Administrator of this Synapse Workspace. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id"` +} + +type AADAdminObservation struct { + + // The login name of the Azure AD Administrator of this Synapse Workspace. + Login *string `json:"login,omitempty" tf:"login,omitempty"` + + // The object id of the Azure AD Administrator of this Synapse Workspace. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The tenant id of the Azure AD Administrator of this Synapse Workspace. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AADAdminParameters struct { + + // The login name of the Azure AD Administrator of this Synapse Workspace. + // +kubebuilder:validation:Optional + Login *string `json:"login,omitempty" tf:"login"` + + // The object id of the Azure AD Administrator of this Synapse Workspace. + // +kubebuilder:validation:Optional + ObjectID *string `json:"objectId,omitempty" tf:"object_id"` + + // The tenant id of the Azure AD Administrator of this Synapse Workspace. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id"` +} + +type AzureDevopsRepoInitParameters struct { + + // Specifies the Azure DevOps account name. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Specifies the collaboration branch of the repository to get code from. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // The last commit ID. + LastCommitID *string `json:"lastCommitId,omitempty" tf:"last_commit_id,omitempty"` + + // Specifies the name of the Azure DevOps project. + ProjectName *string `json:"projectName,omitempty" tf:"project_name,omitempty"` + + // Specifies the name of the git repository. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + RootFolder *string `json:"rootFolder,omitempty" tf:"root_folder,omitempty"` + + // the ID of the tenant for the Azure DevOps account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AzureDevopsRepoObservation struct { + + // Specifies the Azure DevOps account name. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Specifies the collaboration branch of the repository to get code from. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // The last commit ID. + LastCommitID *string `json:"lastCommitId,omitempty" tf:"last_commit_id,omitempty"` + + // Specifies the name of the Azure DevOps project. + ProjectName *string `json:"projectName,omitempty" tf:"project_name,omitempty"` + + // Specifies the name of the git repository. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + RootFolder *string `json:"rootFolder,omitempty" tf:"root_folder,omitempty"` + + // the ID of the tenant for the Azure DevOps account. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type AzureDevopsRepoParameters struct { + + // Specifies the Azure DevOps account name. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // Specifies the collaboration branch of the repository to get code from. + // +kubebuilder:validation:Optional + BranchName *string `json:"branchName" tf:"branch_name,omitempty"` + + // The last commit ID. + // +kubebuilder:validation:Optional + LastCommitID *string `json:"lastCommitId,omitempty" tf:"last_commit_id,omitempty"` + + // Specifies the name of the Azure DevOps project. + // +kubebuilder:validation:Optional + ProjectName *string `json:"projectName" tf:"project_name,omitempty"` + + // Specifies the name of the git repository. + // +kubebuilder:validation:Optional + RepositoryName *string `json:"repositoryName" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + // +kubebuilder:validation:Optional + RootFolder *string `json:"rootFolder" tf:"root_folder,omitempty"` + + // the ID of the tenant for the Azure DevOps account. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type CustomerManagedKeyInitParameters struct { + + // An identifier for the key. Name needs to match the name of the key used with the azurerm_synapse_workspace_key resource. Defaults to "cmk" if not specified. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // The Azure Key Vault Key Versionless ID to be used as the Customer Managed Key (CMK) for double encryption (e.g. https://example-keyvault.vault.azure.net/type/cmk/). + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("versionless_id",true) + KeyVersionlessID *string `json:"keyVersionlessId,omitempty" tf:"key_versionless_id,omitempty"` + + // Reference to a Key in keyvault to populate keyVersionlessId. + // +kubebuilder:validation:Optional + KeyVersionlessIDRef *v1.Reference `json:"keyVersionlessIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate keyVersionlessId. + // +kubebuilder:validation:Optional + KeyVersionlessIDSelector *v1.Selector `json:"keyVersionlessIdSelector,omitempty" tf:"-"` + + // The User Assigned Identity ID to be used for accessing the Customer Managed Key for encryption. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type CustomerManagedKeyObservation struct { + + // An identifier for the key. Name needs to match the name of the key used with the azurerm_synapse_workspace_key resource. Defaults to "cmk" if not specified. + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // The Azure Key Vault Key Versionless ID to be used as the Customer Managed Key (CMK) for double encryption (e.g. https://example-keyvault.vault.azure.net/type/cmk/). + KeyVersionlessID *string `json:"keyVersionlessId,omitempty" tf:"key_versionless_id,omitempty"` + + // The User Assigned Identity ID to be used for accessing the Customer Managed Key for encryption. + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type CustomerManagedKeyParameters struct { + + // An identifier for the key. Name needs to match the name of the key used with the azurerm_synapse_workspace_key resource. Defaults to "cmk" if not specified. + // +kubebuilder:validation:Optional + KeyName *string `json:"keyName,omitempty" tf:"key_name,omitempty"` + + // The Azure Key Vault Key Versionless ID to be used as the Customer Managed Key (CMK) for double encryption (e.g. https://example-keyvault.vault.azure.net/type/cmk/). + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/keyvault/v1beta2.Key + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("versionless_id",true) + // +kubebuilder:validation:Optional + KeyVersionlessID *string `json:"keyVersionlessId,omitempty" tf:"key_versionless_id,omitempty"` + + // Reference to a Key in keyvault to populate keyVersionlessId. + // +kubebuilder:validation:Optional + KeyVersionlessIDRef *v1.Reference `json:"keyVersionlessIdRef,omitempty" tf:"-"` + + // Selector for a Key in keyvault to populate keyVersionlessId. + // +kubebuilder:validation:Optional + KeyVersionlessIDSelector *v1.Selector `json:"keyVersionlessIdSelector,omitempty" tf:"-"` + + // The User Assigned Identity ID to be used for accessing the Customer Managed Key for encryption. + // +kubebuilder:validation:Optional + UserAssignedIdentityID *string `json:"userAssignedIdentityId,omitempty" tf:"user_assigned_identity_id,omitempty"` +} + +type GithubRepoInitParameters struct { + + // Specifies the GitHub account name. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Specifies the collaboration branch of the repository to get code from. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // Specifies the GitHub Enterprise host name. For example: https://github.mydomain.com. + GitURL *string `json:"gitUrl,omitempty" tf:"git_url,omitempty"` + + // The last commit ID. + LastCommitID *string `json:"lastCommitId,omitempty" tf:"last_commit_id,omitempty"` + + // Specifies the name of the git repository. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + RootFolder *string `json:"rootFolder,omitempty" tf:"root_folder,omitempty"` +} + +type GithubRepoObservation struct { + + // Specifies the GitHub account name. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // Specifies the collaboration branch of the repository to get code from. + BranchName *string `json:"branchName,omitempty" tf:"branch_name,omitempty"` + + // Specifies the GitHub Enterprise host name. For example: https://github.mydomain.com. + GitURL *string `json:"gitUrl,omitempty" tf:"git_url,omitempty"` + + // The last commit ID. + LastCommitID *string `json:"lastCommitId,omitempty" tf:"last_commit_id,omitempty"` + + // Specifies the name of the git repository. + RepositoryName *string `json:"repositoryName,omitempty" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + RootFolder *string `json:"rootFolder,omitempty" tf:"root_folder,omitempty"` +} + +type GithubRepoParameters struct { + + // Specifies the GitHub account name. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // Specifies the collaboration branch of the repository to get code from. + // +kubebuilder:validation:Optional + BranchName *string `json:"branchName" tf:"branch_name,omitempty"` + + // Specifies the GitHub Enterprise host name. For example: https://github.mydomain.com. + // +kubebuilder:validation:Optional + GitURL *string `json:"gitUrl,omitempty" tf:"git_url,omitempty"` + + // The last commit ID. + // +kubebuilder:validation:Optional + LastCommitID *string `json:"lastCommitId,omitempty" tf:"last_commit_id,omitempty"` + + // Specifies the name of the git repository. + // +kubebuilder:validation:Optional + RepositoryName *string `json:"repositoryName" tf:"repository_name,omitempty"` + + // Specifies the root folder within the repository. Set to / for the top level. + // +kubebuilder:validation:Optional + RootFolder *string `json:"rootFolder" tf:"root_folder,omitempty"` +} + +type IdentityInitParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Synapse Workspace. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be associated with this Synapse Workspace. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Synapse Workspace. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this Synapse Workspace. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this Synapse Workspace. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be associated with this Synapse Workspace. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of User Assigned Managed Identity IDs to be assigned to this Synapse Workspace. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be associated with this Synapse Workspace. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SQLAADAdminInitParameters struct { + + // The login name of the Azure AD Administrator of this Synapse Workspace SQL. + Login *string `json:"login,omitempty" tf:"login"` + + // The object id of the Azure AD Administrator of this Synapse Workspace SQL. + ObjectID *string `json:"objectId,omitempty" tf:"object_id"` + + // The tenant id of the Azure AD Administrator of this Synapse Workspace SQL. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id"` +} + +type SQLAADAdminObservation struct { + + // The login name of the Azure AD Administrator of this Synapse Workspace SQL. + Login *string `json:"login,omitempty" tf:"login,omitempty"` + + // The object id of the Azure AD Administrator of this Synapse Workspace SQL. + ObjectID *string `json:"objectId,omitempty" tf:"object_id,omitempty"` + + // The tenant id of the Azure AD Administrator of this Synapse Workspace SQL. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` +} + +type SQLAADAdminParameters struct { + + // The login name of the Azure AD Administrator of this Synapse Workspace SQL. + // +kubebuilder:validation:Optional + Login *string `json:"login,omitempty" tf:"login"` + + // The object id of the Azure AD Administrator of this Synapse Workspace SQL. + // +kubebuilder:validation:Optional + ObjectID *string `json:"objectId,omitempty" tf:"object_id"` + + // The tenant id of the Azure AD Administrator of this Synapse Workspace SQL. + // +kubebuilder:validation:Optional + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id"` +} + +type WorkspaceInitParameters struct { + + // An aad_admin block as defined below. + AADAdmin []AADAdminInitParameters `json:"aadAdmin,omitempty" tf:"aad_admin,omitempty"` + + // An azure_devops_repo block as defined below. + AzureDevopsRepo *AzureDevopsRepoInitParameters `json:"azureDevopsRepo,omitempty" tf:"azure_devops_repo,omitempty"` + + // Is Azure Active Directory Authentication the only way to authenticate with resources inside this synapse Workspace. Defaults to false. + AzureadAuthenticationOnly *bool `json:"azureadAuthenticationOnly,omitempty" tf:"azuread_authentication_only,omitempty"` + + // Subnet ID used for computes in workspace Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + ComputeSubnetID *string `json:"computeSubnetId,omitempty" tf:"compute_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate computeSubnetId. + // +kubebuilder:validation:Optional + ComputeSubnetIDRef *v1.Reference `json:"computeSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate computeSubnetId. + // +kubebuilder:validation:Optional + ComputeSubnetIDSelector *v1.Selector `json:"computeSubnetIdSelector,omitempty" tf:"-"` + + // A customer_managed_key block as defined below. + CustomerManagedKey *CustomerManagedKeyInitParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Is data exfiltration protection enabled in this workspace? If set to true, managed_virtual_network_enabled must also be set to true. Changing this forces a new resource to be created. + DataExfiltrationProtectionEnabled *bool `json:"dataExfiltrationProtectionEnabled,omitempty" tf:"data_exfiltration_protection_enabled,omitempty"` + + // A github_repo block as defined below. + GithubRepo *GithubRepoInitParameters `json:"githubRepo,omitempty" tf:"github_repo,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Allowed AAD Tenant Ids For Linking. + LinkingAllowedForAADTenantIds []*string `json:"linkingAllowedForAadTenantIds,omitempty" tf:"linking_allowed_for_aad_tenant_ids,omitempty"` + + // Specifies the Azure Region where the synapse Workspace should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Workspace managed resource group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameRef *v1.Reference `json:"managedResourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameSelector *v1.Selector `json:"managedResourceGroupNameSelector,omitempty" tf:"-"` + + // Is Virtual Network enabled for all computes in this workspace? Changing this forces a new resource to be created. + ManagedVirtualNetworkEnabled *bool `json:"managedVirtualNetworkEnabled,omitempty" tf:"managed_virtual_network_enabled,omitempty"` + + // Whether public network access is allowed for the Cognitive Account. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of purview account. + PurviewID *string `json:"purviewId,omitempty" tf:"purview_id,omitempty"` + + // An sql_aad_admin block as defined below. + SQLAADAdmin []SQLAADAdminInitParameters `json:"sqlAadAdmin,omitempty" tf:"sql_aad_admin,omitempty"` + + // Specifies The login name of the SQL administrator. Changing this forces a new resource to be created. If this is not provided aad_admin or customer_managed_key must be provided. + SQLAdministratorLogin *string `json:"sqlAdministratorLogin,omitempty" tf:"sql_administrator_login,omitempty"` + + // Are pipelines (running as workspace's system assigned identity) allowed to access SQL pools? + SQLIdentityControlEnabled *bool `json:"sqlIdentityControlEnabled,omitempty" tf:"sql_identity_control_enabled,omitempty"` + + // Specifies the ID of storage data lake gen2 filesystem resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.DataLakeGen2FileSystem + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + StorageDataLakeGen2FileSystemID *string `json:"storageDataLakeGen2FilesystemId,omitempty" tf:"storage_data_lake_gen2_filesystem_id,omitempty"` + + // Reference to a DataLakeGen2FileSystem in storage to populate storageDataLakeGen2FilesystemId. + // +kubebuilder:validation:Optional + StorageDataLakeGen2FileSystemIDRef *v1.Reference `json:"storageDataLakeGen2FilesystemIdRef,omitempty" tf:"-"` + + // Selector for a DataLakeGen2FileSystem in storage to populate storageDataLakeGen2FilesystemId. + // +kubebuilder:validation:Optional + StorageDataLakeGen2FileSystemIDSelector *v1.Selector `json:"storageDataLakeGen2FilesystemIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Synapse Workspace. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WorkspaceObservation struct { + + // An aad_admin block as defined below. + AADAdmin []AADAdminObservation `json:"aadAdmin,omitempty" tf:"aad_admin,omitempty"` + + // An azure_devops_repo block as defined below. + AzureDevopsRepo *AzureDevopsRepoObservation `json:"azureDevopsRepo,omitempty" tf:"azure_devops_repo,omitempty"` + + // Is Azure Active Directory Authentication the only way to authenticate with resources inside this synapse Workspace. Defaults to false. + AzureadAuthenticationOnly *bool `json:"azureadAuthenticationOnly,omitempty" tf:"azuread_authentication_only,omitempty"` + + // Subnet ID used for computes in workspace Changing this forces a new resource to be created. + ComputeSubnetID *string `json:"computeSubnetId,omitempty" tf:"compute_subnet_id,omitempty"` + + // A list of Connectivity endpoints for this Synapse Workspace. + // +mapType=granular + ConnectivityEndpoints map[string]*string `json:"connectivityEndpoints,omitempty" tf:"connectivity_endpoints,omitempty"` + + // A customer_managed_key block as defined below. + CustomerManagedKey *CustomerManagedKeyObservation `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Is data exfiltration protection enabled in this workspace? If set to true, managed_virtual_network_enabled must also be set to true. Changing this forces a new resource to be created. + DataExfiltrationProtectionEnabled *bool `json:"dataExfiltrationProtectionEnabled,omitempty" tf:"data_exfiltration_protection_enabled,omitempty"` + + // A github_repo block as defined below. + GithubRepo *GithubRepoObservation `json:"githubRepo,omitempty" tf:"github_repo,omitempty"` + + // The ID of the synapse Workspace. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // Allowed AAD Tenant Ids For Linking. + LinkingAllowedForAADTenantIds []*string `json:"linkingAllowedForAadTenantIds,omitempty" tf:"linking_allowed_for_aad_tenant_ids,omitempty"` + + // Specifies the Azure Region where the synapse Workspace should exist. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Workspace managed resource group. Changing this forces a new resource to be created. + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // Is Virtual Network enabled for all computes in this workspace? Changing this forces a new resource to be created. + ManagedVirtualNetworkEnabled *bool `json:"managedVirtualNetworkEnabled,omitempty" tf:"managed_virtual_network_enabled,omitempty"` + + // Whether public network access is allowed for the Cognitive Account. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of purview account. + PurviewID *string `json:"purviewId,omitempty" tf:"purview_id,omitempty"` + + // Specifies the name of the Resource Group where the synapse Workspace should exist. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // An sql_aad_admin block as defined below. + SQLAADAdmin []SQLAADAdminObservation `json:"sqlAadAdmin,omitempty" tf:"sql_aad_admin,omitempty"` + + // Specifies The login name of the SQL administrator. Changing this forces a new resource to be created. If this is not provided aad_admin or customer_managed_key must be provided. + SQLAdministratorLogin *string `json:"sqlAdministratorLogin,omitempty" tf:"sql_administrator_login,omitempty"` + + // Are pipelines (running as workspace's system assigned identity) allowed to access SQL pools? + SQLIdentityControlEnabled *bool `json:"sqlIdentityControlEnabled,omitempty" tf:"sql_identity_control_enabled,omitempty"` + + // Specifies the ID of storage data lake gen2 filesystem resource. Changing this forces a new resource to be created. + StorageDataLakeGen2FileSystemID *string `json:"storageDataLakeGen2FilesystemId,omitempty" tf:"storage_data_lake_gen2_filesystem_id,omitempty"` + + // A mapping of tags which should be assigned to the Synapse Workspace. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type WorkspaceParameters struct { + + // An aad_admin block as defined below. + // +kubebuilder:validation:Optional + AADAdmin []AADAdminParameters `json:"aadAdmin,omitempty" tf:"aad_admin,omitempty"` + + // An azure_devops_repo block as defined below. + // +kubebuilder:validation:Optional + AzureDevopsRepo *AzureDevopsRepoParameters `json:"azureDevopsRepo,omitempty" tf:"azure_devops_repo,omitempty"` + + // Is Azure Active Directory Authentication the only way to authenticate with resources inside this synapse Workspace. Defaults to false. + // +kubebuilder:validation:Optional + AzureadAuthenticationOnly *bool `json:"azureadAuthenticationOnly,omitempty" tf:"azuread_authentication_only,omitempty"` + + // Subnet ID used for computes in workspace Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + ComputeSubnetID *string `json:"computeSubnetId,omitempty" tf:"compute_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate computeSubnetId. + // +kubebuilder:validation:Optional + ComputeSubnetIDRef *v1.Reference `json:"computeSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate computeSubnetId. + // +kubebuilder:validation:Optional + ComputeSubnetIDSelector *v1.Selector `json:"computeSubnetIdSelector,omitempty" tf:"-"` + + // A customer_managed_key block as defined below. + // +kubebuilder:validation:Optional + CustomerManagedKey *CustomerManagedKeyParameters `json:"customerManagedKey,omitempty" tf:"customer_managed_key,omitempty"` + + // Is data exfiltration protection enabled in this workspace? If set to true, managed_virtual_network_enabled must also be set to true. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + DataExfiltrationProtectionEnabled *bool `json:"dataExfiltrationProtectionEnabled,omitempty" tf:"data_exfiltration_protection_enabled,omitempty"` + + // A github_repo block as defined below. + // +kubebuilder:validation:Optional + GithubRepo *GithubRepoParameters `json:"githubRepo,omitempty" tf:"github_repo,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Allowed AAD Tenant Ids For Linking. + // +kubebuilder:validation:Optional + LinkingAllowedForAADTenantIds []*string `json:"linkingAllowedForAadTenantIds,omitempty" tf:"linking_allowed_for_aad_tenant_ids,omitempty"` + + // Specifies the Azure Region where the synapse Workspace should exist. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Workspace managed resource group. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ManagedResourceGroupName *string `json:"managedResourceGroupName,omitempty" tf:"managed_resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameRef *v1.Reference `json:"managedResourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate managedResourceGroupName. + // +kubebuilder:validation:Optional + ManagedResourceGroupNameSelector *v1.Selector `json:"managedResourceGroupNameSelector,omitempty" tf:"-"` + + // Is Virtual Network enabled for all computes in this workspace? Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ManagedVirtualNetworkEnabled *bool `json:"managedVirtualNetworkEnabled,omitempty" tf:"managed_virtual_network_enabled,omitempty"` + + // Whether public network access is allowed for the Cognitive Account. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of purview account. + // +kubebuilder:validation:Optional + PurviewID *string `json:"purviewId,omitempty" tf:"purview_id,omitempty"` + + // Specifies the name of the Resource Group where the synapse Workspace should exist. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // An sql_aad_admin block as defined below. + // +kubebuilder:validation:Optional + SQLAADAdmin []SQLAADAdminParameters `json:"sqlAadAdmin,omitempty" tf:"sql_aad_admin,omitempty"` + + // Specifies The login name of the SQL administrator. Changing this forces a new resource to be created. If this is not provided aad_admin or customer_managed_key must be provided. + // +kubebuilder:validation:Optional + SQLAdministratorLogin *string `json:"sqlAdministratorLogin,omitempty" tf:"sql_administrator_login,omitempty"` + + // The Password associated with the sql_administrator_login for the SQL administrator. If this is not provided aad_admin or customer_managed_key must be provided. + // +kubebuilder:validation:Optional + SQLAdministratorLoginPasswordSecretRef *v1.SecretKeySelector `json:"sqlAdministratorLoginPasswordSecretRef,omitempty" tf:"-"` + + // Are pipelines (running as workspace's system assigned identity) allowed to access SQL pools? + // +kubebuilder:validation:Optional + SQLIdentityControlEnabled *bool `json:"sqlIdentityControlEnabled,omitempty" tf:"sql_identity_control_enabled,omitempty"` + + // Specifies the ID of storage data lake gen2 filesystem resource. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta1.DataLakeGen2FileSystem + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + StorageDataLakeGen2FileSystemID *string `json:"storageDataLakeGen2FilesystemId,omitempty" tf:"storage_data_lake_gen2_filesystem_id,omitempty"` + + // Reference to a DataLakeGen2FileSystem in storage to populate storageDataLakeGen2FilesystemId. + // +kubebuilder:validation:Optional + StorageDataLakeGen2FileSystemIDRef *v1.Reference `json:"storageDataLakeGen2FilesystemIdRef,omitempty" tf:"-"` + + // Selector for a DataLakeGen2FileSystem in storage to populate storageDataLakeGen2FilesystemId. + // +kubebuilder:validation:Optional + StorageDataLakeGen2FileSystemIDSelector *v1.Selector `json:"storageDataLakeGen2FilesystemIdSelector,omitempty" tf:"-"` + + // A mapping of tags which should be assigned to the Synapse Workspace. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// WorkspaceSpec defines the desired state of Workspace +type WorkspaceSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkspaceParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkspaceInitParameters `json:"initProvider,omitempty"` +} + +// WorkspaceStatus defines the observed state of Workspace. +type WorkspaceStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkspaceObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Workspace is the Schema for the Workspaces API. Manages a Synapse Workspace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Workspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec WorkspaceSpec `json:"spec"` + Status WorkspaceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkspaceList contains a list of Workspaces +type WorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workspace `json:"items"` +} + +// Repository type metadata. +var ( + Workspace_Kind = "Workspace" + Workspace_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Workspace_Kind}.String() + Workspace_KindAPIVersion = Workspace_Kind + "." + CRDGroupVersion.String() + Workspace_GroupVersionKind = CRDGroupVersion.WithKind(Workspace_Kind) +) + +func init() { + SchemeBuilder.Register(&Workspace{}, &WorkspaceList{}) +} diff --git a/apis/synapse/v1beta2/zz_workspacevulnerabilityassessment_terraformed.go b/apis/synapse/v1beta2/zz_workspacevulnerabilityassessment_terraformed.go new file mode 100755 index 000000000..805ddf451 --- /dev/null +++ b/apis/synapse/v1beta2/zz_workspacevulnerabilityassessment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WorkspaceVulnerabilityAssessment +func (mg *WorkspaceVulnerabilityAssessment) GetTerraformResourceType() string { + return "azurerm_synapse_workspace_vulnerability_assessment" +} + +// GetConnectionDetailsMapping for this WorkspaceVulnerabilityAssessment +func (tr *WorkspaceVulnerabilityAssessment) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef", "storage_container_sas_key": "spec.forProvider.storageContainerSasKeySecretRef"} +} + +// GetObservation of this WorkspaceVulnerabilityAssessment +func (tr *WorkspaceVulnerabilityAssessment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WorkspaceVulnerabilityAssessment +func (tr *WorkspaceVulnerabilityAssessment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WorkspaceVulnerabilityAssessment +func (tr *WorkspaceVulnerabilityAssessment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WorkspaceVulnerabilityAssessment +func (tr *WorkspaceVulnerabilityAssessment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WorkspaceVulnerabilityAssessment +func (tr *WorkspaceVulnerabilityAssessment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WorkspaceVulnerabilityAssessment +func (tr *WorkspaceVulnerabilityAssessment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WorkspaceVulnerabilityAssessment +func (tr *WorkspaceVulnerabilityAssessment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WorkspaceVulnerabilityAssessment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WorkspaceVulnerabilityAssessment) LateInitialize(attrs []byte) (bool, error) { + params := &WorkspaceVulnerabilityAssessmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WorkspaceVulnerabilityAssessment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/synapse/v1beta2/zz_workspacevulnerabilityassessment_types.go b/apis/synapse/v1beta2/zz_workspacevulnerabilityassessment_types.go new file mode 100755 index 000000000..f4843f8b0 --- /dev/null +++ b/apis/synapse/v1beta2/zz_workspacevulnerabilityassessment_types.go @@ -0,0 +1,184 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type RecurringScansInitParameters struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to false. + EmailSubscriptionAdminsEnabled *bool `json:"emailSubscriptionAdminsEnabled,omitempty" tf:"email_subscription_admins_enabled,omitempty"` + + // Specifies an array of email addresses to which the scan notification is sent. + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RecurringScansObservation struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to false. + EmailSubscriptionAdminsEnabled *bool `json:"emailSubscriptionAdminsEnabled,omitempty" tf:"email_subscription_admins_enabled,omitempty"` + + // Specifies an array of email addresses to which the scan notification is sent. + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type RecurringScansParameters struct { + + // Boolean flag which specifies if the schedule scan notification will be sent to the subscription administrators. Defaults to false. + // +kubebuilder:validation:Optional + EmailSubscriptionAdminsEnabled *bool `json:"emailSubscriptionAdminsEnabled,omitempty" tf:"email_subscription_admins_enabled,omitempty"` + + // Specifies an array of email addresses to which the scan notification is sent. + // +kubebuilder:validation:Optional + Emails []*string `json:"emails,omitempty" tf:"emails,omitempty"` + + // Boolean flag which specifies if recurring scans is enabled or disabled. Defaults to false. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` +} + +type WorkspaceVulnerabilityAssessmentInitParameters struct { + + // The recurring scans settings. The recurring_scans block supports fields documented below. + RecurringScans *RecurringScansInitParameters `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // A blob storage container path to hold the scan results (e.g. https://example.blob.core.windows.net/VaScans/). + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` + + // The ID of the security alert policy of the Synapse Workspace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.WorkspaceSecurityAlertPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + WorkspaceSecurityAlertPolicyID *string `json:"workspaceSecurityAlertPolicyId,omitempty" tf:"workspace_security_alert_policy_id,omitempty"` + + // Reference to a WorkspaceSecurityAlertPolicy in synapse to populate workspaceSecurityAlertPolicyId. + // +kubebuilder:validation:Optional + WorkspaceSecurityAlertPolicyIDRef *v1.Reference `json:"workspaceSecurityAlertPolicyIdRef,omitempty" tf:"-"` + + // Selector for a WorkspaceSecurityAlertPolicy in synapse to populate workspaceSecurityAlertPolicyId. + // +kubebuilder:validation:Optional + WorkspaceSecurityAlertPolicyIDSelector *v1.Selector `json:"workspaceSecurityAlertPolicyIdSelector,omitempty" tf:"-"` +} + +type WorkspaceVulnerabilityAssessmentObservation struct { + + // The ID of the Synapse Workspace Vulnerability Assessment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The recurring scans settings. The recurring_scans block supports fields documented below. + RecurringScans *RecurringScansObservation `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // A blob storage container path to hold the scan results (e.g. https://example.blob.core.windows.net/VaScans/). + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` + + // The ID of the security alert policy of the Synapse Workspace. Changing this forces a new resource to be created. + WorkspaceSecurityAlertPolicyID *string `json:"workspaceSecurityAlertPolicyId,omitempty" tf:"workspace_security_alert_policy_id,omitempty"` +} + +type WorkspaceVulnerabilityAssessmentParameters struct { + + // The recurring scans settings. The recurring_scans block supports fields documented below. + // +kubebuilder:validation:Optional + RecurringScans *RecurringScansParameters `json:"recurringScans,omitempty" tf:"recurring_scans,omitempty"` + + // Specifies the identifier key of the storage account for vulnerability assessment scan results. If storage_container_sas_key isn't specified, storage_account_access_key is required. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // A blob storage container path to hold the scan results (e.g. https://example.blob.core.windows.net/VaScans/). + // +kubebuilder:validation:Optional + StorageContainerPath *string `json:"storageContainerPath,omitempty" tf:"storage_container_path,omitempty"` + + // A shared access signature (SAS Key) that has write access to the blob container specified in storage_container_path parameter. If storage_account_access_key isn't specified, storage_container_sas_key is required. + // +kubebuilder:validation:Optional + StorageContainerSASKeySecretRef *v1.SecretKeySelector `json:"storageContainerSasKeySecretRef,omitempty" tf:"-"` + + // The ID of the security alert policy of the Synapse Workspace. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/synapse/v1beta1.WorkspaceSecurityAlertPolicy + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + WorkspaceSecurityAlertPolicyID *string `json:"workspaceSecurityAlertPolicyId,omitempty" tf:"workspace_security_alert_policy_id,omitempty"` + + // Reference to a WorkspaceSecurityAlertPolicy in synapse to populate workspaceSecurityAlertPolicyId. + // +kubebuilder:validation:Optional + WorkspaceSecurityAlertPolicyIDRef *v1.Reference `json:"workspaceSecurityAlertPolicyIdRef,omitempty" tf:"-"` + + // Selector for a WorkspaceSecurityAlertPolicy in synapse to populate workspaceSecurityAlertPolicyId. + // +kubebuilder:validation:Optional + WorkspaceSecurityAlertPolicyIDSelector *v1.Selector `json:"workspaceSecurityAlertPolicyIdSelector,omitempty" tf:"-"` +} + +// WorkspaceVulnerabilityAssessmentSpec defines the desired state of WorkspaceVulnerabilityAssessment +type WorkspaceVulnerabilityAssessmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkspaceVulnerabilityAssessmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WorkspaceVulnerabilityAssessmentInitParameters `json:"initProvider,omitempty"` +} + +// WorkspaceVulnerabilityAssessmentStatus defines the observed state of WorkspaceVulnerabilityAssessment. +type WorkspaceVulnerabilityAssessmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkspaceVulnerabilityAssessmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WorkspaceVulnerabilityAssessment is the Schema for the WorkspaceVulnerabilityAssessments API. Manages the Vulnerability Assessment for a Synapse Workspace. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WorkspaceVulnerabilityAssessment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageContainerPath) || (has(self.initProvider) && has(self.initProvider.storageContainerPath))",message="spec.forProvider.storageContainerPath is a required parameter" + Spec WorkspaceVulnerabilityAssessmentSpec `json:"spec"` + Status WorkspaceVulnerabilityAssessmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkspaceVulnerabilityAssessmentList contains a list of WorkspaceVulnerabilityAssessments +type WorkspaceVulnerabilityAssessmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WorkspaceVulnerabilityAssessment `json:"items"` +} + +// Repository type metadata. +var ( + WorkspaceVulnerabilityAssessment_Kind = "WorkspaceVulnerabilityAssessment" + WorkspaceVulnerabilityAssessment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WorkspaceVulnerabilityAssessment_Kind}.String() + WorkspaceVulnerabilityAssessment_KindAPIVersion = WorkspaceVulnerabilityAssessment_Kind + "." + CRDGroupVersion.String() + WorkspaceVulnerabilityAssessment_GroupVersionKind = CRDGroupVersion.WithKind(WorkspaceVulnerabilityAssessment_Kind) +) + +func init() { + SchemeBuilder.Register(&WorkspaceVulnerabilityAssessment{}, &WorkspaceVulnerabilityAssessmentList{}) +} diff --git a/apis/timeseriesinsights/v1beta1/zz_eventsourceeventhub_types.go b/apis/timeseriesinsights/v1beta1/zz_eventsourceeventhub_types.go index 3d3afbb9f..be35bd29a 100755 --- a/apis/timeseriesinsights/v1beta1/zz_eventsourceeventhub_types.go +++ b/apis/timeseriesinsights/v1beta1/zz_eventsourceeventhub_types.go @@ -28,7 +28,7 @@ type EventSourceEventHubInitParameters struct { ConsumerGroupNameSelector *v1.Selector `json:"consumerGroupNameSelector,omitempty" tf:"-"` // Specifies the name of the EventHub which will be associated with this resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` // Reference to a EventHub in eventhub to populate eventhubName. @@ -40,7 +40,7 @@ type EventSourceEventHubInitParameters struct { EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` // Specifies the resource id where events will be coming from. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() EventSourceResourceID *string `json:"eventSourceResourceId,omitempty" tf:"event_source_resource_id,omitempty"` @@ -56,7 +56,7 @@ type EventSourceEventHubInitParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // Specifies the EventHub Namespace name. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` // Reference to a EventHubNamespace in eventhub to populate namespaceName. @@ -137,7 +137,7 @@ type EventSourceEventHubParameters struct { ConsumerGroupNameSelector *v1.Selector `json:"consumerGroupNameSelector,omitempty" tf:"-"` // Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/timeseriesinsights/v1beta1.Gen2Environment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/timeseriesinsights/v1beta2.Gen2Environment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional EnvironmentID *string `json:"environmentId,omitempty" tf:"environment_id,omitempty"` @@ -151,7 +151,7 @@ type EventSourceEventHubParameters struct { EnvironmentIDSelector *v1.Selector `json:"environmentIdSelector,omitempty" tf:"-"` // Specifies the name of the EventHub which will be associated with this resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +kubebuilder:validation:Optional EventHubName *string `json:"eventhubName,omitempty" tf:"eventhub_name,omitempty"` @@ -164,7 +164,7 @@ type EventSourceEventHubParameters struct { EventHubNameSelector *v1.Selector `json:"eventhubNameSelector,omitempty" tf:"-"` // Specifies the resource id where events will be coming from. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional EventSourceResourceID *string `json:"eventSourceResourceId,omitempty" tf:"event_source_resource_id,omitempty"` @@ -182,7 +182,7 @@ type EventSourceEventHubParameters struct { Location *string `json:"location,omitempty" tf:"location,omitempty"` // Specifies the EventHub Namespace name. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta1.EventHubNamespace + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/eventhub/v1beta2.EventHubNamespace // +kubebuilder:validation:Optional NamespaceName *string `json:"namespaceName,omitempty" tf:"namespace_name,omitempty"` diff --git a/apis/timeseriesinsights/v1beta1/zz_eventsourceiothub_types.go b/apis/timeseriesinsights/v1beta1/zz_eventsourceiothub_types.go index 5daa752a7..3f87e8d32 100755 --- a/apis/timeseriesinsights/v1beta1/zz_eventsourceiothub_types.go +++ b/apis/timeseriesinsights/v1beta1/zz_eventsourceiothub_types.go @@ -28,7 +28,7 @@ type EventSourceIOTHubInitParameters struct { ConsumerGroupNameSelector *v1.Selector `json:"consumerGroupNameSelector,omitempty" tf:"-"` // Specifies the resource id where events will be coming from. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() EventSourceResourceID *string `json:"eventSourceResourceId,omitempty" tf:"event_source_resource_id,omitempty"` @@ -41,7 +41,7 @@ type EventSourceIOTHubInitParameters struct { EventSourceResourceIDSelector *v1.Selector `json:"eventSourceResourceIdSelector,omitempty" tf:"-"` // Specifies the name of the IotHub which will be associated with this resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` // Reference to a IOTHub in devices to populate iothubName. @@ -113,7 +113,7 @@ type EventSourceIOTHubParameters struct { ConsumerGroupNameSelector *v1.Selector `json:"consumerGroupNameSelector,omitempty" tf:"-"` // Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/timeseriesinsights/v1beta1.Gen2Environment + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/timeseriesinsights/v1beta2.Gen2Environment // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional EnvironmentID *string `json:"environmentId,omitempty" tf:"environment_id,omitempty"` @@ -127,7 +127,7 @@ type EventSourceIOTHubParameters struct { EnvironmentIDSelector *v1.Selector `json:"environmentIdSelector,omitempty" tf:"-"` // Specifies the resource id where events will be coming from. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional EventSourceResourceID *string `json:"eventSourceResourceId,omitempty" tf:"event_source_resource_id,omitempty"` @@ -141,7 +141,7 @@ type EventSourceIOTHubParameters struct { EventSourceResourceIDSelector *v1.Selector `json:"eventSourceResourceIdSelector,omitempty" tf:"-"` // Specifies the name of the IotHub which will be associated with this resource. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta1.IOTHub + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/devices/v1beta2.IOTHub // +kubebuilder:validation:Optional IOTHubName *string `json:"iothubName,omitempty" tf:"iothub_name,omitempty"` diff --git a/apis/timeseriesinsights/v1beta1/zz_generated.conversion_hubs.go b/apis/timeseriesinsights/v1beta1/zz_generated.conversion_hubs.go index 6c0a98fd0..936b96b15 100755 --- a/apis/timeseriesinsights/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/timeseriesinsights/v1beta1/zz_generated.conversion_hubs.go @@ -12,9 +12,6 @@ func (tr *EventSourceEventHub) Hub() {} // Hub marks this type as a conversion hub. func (tr *EventSourceIOTHub) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *Gen2Environment) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ReferenceDataSet) Hub() {} diff --git a/apis/timeseriesinsights/v1beta1/zz_generated.conversion_spokes.go b/apis/timeseriesinsights/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..f2f4fb1ae --- /dev/null +++ b/apis/timeseriesinsights/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this Gen2Environment to the hub type. +func (tr *Gen2Environment) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the Gen2Environment type. +func (tr *Gen2Environment) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/timeseriesinsights/v1beta1/zz_generated.resolvers.go b/apis/timeseriesinsights/v1beta1/zz_generated.resolvers.go index cd5b933db..079971f5c 100644 --- a/apis/timeseriesinsights/v1beta1/zz_generated.resolvers.go +++ b/apis/timeseriesinsights/v1beta1/zz_generated.resolvers.go @@ -45,7 +45,7 @@ func (mg *EventSourceEventHub) ResolveReferences( // ResolveReferences of this E mg.Spec.ForProvider.ConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ConsumerGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("timeseriesinsights.azure.upbound.io", "v1beta1", "Gen2Environment", "Gen2EnvironmentList") + m, l, err = apisresolver.GetManagedResource("timeseriesinsights.azure.upbound.io", "v1beta2", "Gen2Environment", "Gen2EnvironmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -64,7 +64,7 @@ func (mg *EventSourceEventHub) ResolveReferences( // ResolveReferences of this E mg.Spec.ForProvider.EnvironmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.EnvironmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -83,7 +83,7 @@ func (mg *EventSourceEventHub) ResolveReferences( // ResolveReferences of this E mg.Spec.ForProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.EventHubNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -102,7 +102,7 @@ func (mg *EventSourceEventHub) ResolveReferences( // ResolveReferences of this E mg.Spec.ForProvider.EventSourceResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.EventSourceResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -159,7 +159,7 @@ func (mg *EventSourceEventHub) ResolveReferences( // ResolveReferences of this E mg.Spec.InitProvider.ConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ConsumerGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -178,7 +178,7 @@ func (mg *EventSourceEventHub) ResolveReferences( // ResolveReferences of this E mg.Spec.InitProvider.EventHubName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.EventHubNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHub", "EventHubList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHub", "EventHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -197,7 +197,7 @@ func (mg *EventSourceEventHub) ResolveReferences( // ResolveReferences of this E mg.Spec.InitProvider.EventSourceResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.EventSourceResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta1", "EventHubNamespace", "EventHubNamespaceList") + m, l, err = apisresolver.GetManagedResource("eventhub.azure.upbound.io", "v1beta2", "EventHubNamespace", "EventHubNamespaceList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -266,7 +266,7 @@ func (mg *EventSourceIOTHub) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.ConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.ConsumerGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("timeseriesinsights.azure.upbound.io", "v1beta1", "Gen2Environment", "Gen2EnvironmentList") + m, l, err = apisresolver.GetManagedResource("timeseriesinsights.azure.upbound.io", "v1beta2", "Gen2Environment", "Gen2EnvironmentList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -285,7 +285,7 @@ func (mg *EventSourceIOTHub) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.EnvironmentID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.EnvironmentIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -304,7 +304,7 @@ func (mg *EventSourceIOTHub) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.ForProvider.EventSourceResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.EventSourceResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -342,7 +342,7 @@ func (mg *EventSourceIOTHub) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.InitProvider.ConsumerGroupName = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.ConsumerGroupNameRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -361,7 +361,7 @@ func (mg *EventSourceIOTHub) ResolveReferences(ctx context.Context, c client.Rea mg.Spec.InitProvider.EventSourceResourceID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.EventSourceResourceIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta1", "IOTHub", "IOTHubList") + m, l, err = apisresolver.GetManagedResource("devices.azure.upbound.io", "v1beta2", "IOTHub", "IOTHubList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/timeseriesinsights/v1beta2/zz_gen2environment_terraformed.go b/apis/timeseriesinsights/v1beta2/zz_gen2environment_terraformed.go new file mode 100755 index 000000000..98868e6d0 --- /dev/null +++ b/apis/timeseriesinsights/v1beta2/zz_gen2environment_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this Gen2Environment +func (mg *Gen2Environment) GetTerraformResourceType() string { + return "azurerm_iot_time_series_insights_gen2_environment" +} + +// GetConnectionDetailsMapping for this Gen2Environment +func (tr *Gen2Environment) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"storage[*].key": "spec.forProvider.storage[*].keySecretRef"} +} + +// GetObservation of this Gen2Environment +func (tr *Gen2Environment) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Gen2Environment +func (tr *Gen2Environment) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Gen2Environment +func (tr *Gen2Environment) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Gen2Environment +func (tr *Gen2Environment) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Gen2Environment +func (tr *Gen2Environment) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this Gen2Environment +func (tr *Gen2Environment) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this Gen2Environment +func (tr *Gen2Environment) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this Gen2Environment using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Gen2Environment) LateInitialize(attrs []byte) (bool, error) { + params := &Gen2EnvironmentParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Gen2Environment) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/timeseriesinsights/v1beta2/zz_gen2environment_types.go b/apis/timeseriesinsights/v1beta2/zz_gen2environment_types.go new file mode 100755 index 000000000..db4a9b8e2 --- /dev/null +++ b/apis/timeseriesinsights/v1beta2/zz_gen2environment_types.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type Gen2EnvironmentInitParameters struct { + + // A list of property ids for the Azure IoT Time Series Insights Gen2 Environment. Changing this forces a new resource to be created. + IDProperties []*string `json:"idProperties,omitempty" tf:"id_properties,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the SKU Name for this IoT Time Series Insights Gen2 Environment. Currently it supports only L1. For gen2, capacity cannot be specified. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A storage block as defined below. + Storage *StorageInitParameters `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the ISO8601 timespan specifying the minimum number of days the environment's events will be available for query. + WarmStoreDataRetentionTime *string `json:"warmStoreDataRetentionTime,omitempty" tf:"warm_store_data_retention_time,omitempty"` +} + +type Gen2EnvironmentObservation struct { + + // The FQDN used to access the environment data. + DataAccessFqdn *string `json:"dataAccessFqdn,omitempty" tf:"data_access_fqdn,omitempty"` + + // The ID of the IoT Time Series Insights Gen2 Environment. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A list of property ids for the Azure IoT Time Series Insights Gen2 Environment. Changing this forces a new resource to be created. + IDProperties []*string `json:"idProperties,omitempty" tf:"id_properties,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the Azure IoT Time Series Insights Gen2 Environment. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the SKU Name for this IoT Time Series Insights Gen2 Environment. Currently it supports only L1. For gen2, capacity cannot be specified. Changing this forces a new resource to be created. + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A storage block as defined below. + Storage *StorageObservation `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the ISO8601 timespan specifying the minimum number of days the environment's events will be available for query. + WarmStoreDataRetentionTime *string `json:"warmStoreDataRetentionTime,omitempty" tf:"warm_store_data_retention_time,omitempty"` +} + +type Gen2EnvironmentParameters struct { + + // A list of property ids for the Azure IoT Time Series Insights Gen2 Environment. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + IDProperties []*string `json:"idProperties,omitempty" tf:"id_properties,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the resource group in which to create the Azure IoT Time Series Insights Gen2 Environment. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the SKU Name for this IoT Time Series Insights Gen2 Environment. Currently it supports only L1. For gen2, capacity cannot be specified. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + SkuName *string `json:"skuName,omitempty" tf:"sku_name,omitempty"` + + // A storage block as defined below. + // +kubebuilder:validation:Optional + Storage *StorageParameters `json:"storage,omitempty" tf:"storage,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies the ISO8601 timespan specifying the minimum number of days the environment's events will be available for query. + // +kubebuilder:validation:Optional + WarmStoreDataRetentionTime *string `json:"warmStoreDataRetentionTime,omitempty" tf:"warm_store_data_retention_time,omitempty"` +} + +type StorageInitParameters struct { + + // Name of storage account for Azure IoT Time Series Insights Gen2 Environment. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Account in storage to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +type StorageObservation struct { + + // Name of storage account for Azure IoT Time Series Insights Gen2 Environment. Changing this forces a new resource to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type StorageParameters struct { + + // Access key of storage account for Azure IoT Time Series Insights Gen2 Environment + // +kubebuilder:validation:Required + KeySecretRef v1.SecretKeySelector `json:"keySecretRef" tf:"-"` + + // Name of storage account for Azure IoT Time Series Insights Gen2 Environment. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Reference to a Account in storage to populate name. + // +kubebuilder:validation:Optional + NameRef *v1.Reference `json:"nameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate name. + // +kubebuilder:validation:Optional + NameSelector *v1.Selector `json:"nameSelector,omitempty" tf:"-"` +} + +// Gen2EnvironmentSpec defines the desired state of Gen2Environment +type Gen2EnvironmentSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider Gen2EnvironmentParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider Gen2EnvironmentInitParameters `json:"initProvider,omitempty"` +} + +// Gen2EnvironmentStatus defines the observed state of Gen2Environment. +type Gen2EnvironmentStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider Gen2EnvironmentObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Gen2Environment is the Schema for the Gen2Environments API. Manages an Azure IoT Time Series Insights Gen2 Environment. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type Gen2Environment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.idProperties) || (has(self.initProvider) && has(self.initProvider.idProperties))",message="spec.forProvider.idProperties is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.skuName) || (has(self.initProvider) && has(self.initProvider.skuName))",message="spec.forProvider.skuName is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storage) || (has(self.initProvider) && has(self.initProvider.storage))",message="spec.forProvider.storage is a required parameter" + Spec Gen2EnvironmentSpec `json:"spec"` + Status Gen2EnvironmentStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// Gen2EnvironmentList contains a list of Gen2Environments +type Gen2EnvironmentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Gen2Environment `json:"items"` +} + +// Repository type metadata. +var ( + Gen2Environment_Kind = "Gen2Environment" + Gen2Environment_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Gen2Environment_Kind}.String() + Gen2Environment_KindAPIVersion = Gen2Environment_Kind + "." + CRDGroupVersion.String() + Gen2Environment_GroupVersionKind = CRDGroupVersion.WithKind(Gen2Environment_Kind) +) + +func init() { + SchemeBuilder.Register(&Gen2Environment{}, &Gen2EnvironmentList{}) +} diff --git a/apis/timeseriesinsights/v1beta2/zz_generated.conversion_hubs.go b/apis/timeseriesinsights/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..4ae38c74b --- /dev/null +++ b/apis/timeseriesinsights/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *Gen2Environment) Hub() {} diff --git a/apis/timeseriesinsights/v1beta2/zz_generated.deepcopy.go b/apis/timeseriesinsights/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..a4d56f6a4 --- /dev/null +++ b/apis/timeseriesinsights/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,405 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gen2Environment) DeepCopyInto(out *Gen2Environment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gen2Environment. +func (in *Gen2Environment) DeepCopy() *Gen2Environment { + if in == nil { + return nil + } + out := new(Gen2Environment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gen2Environment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gen2EnvironmentInitParameters) DeepCopyInto(out *Gen2EnvironmentInitParameters) { + *out = *in + if in.IDProperties != nil { + in, out := &in.IDProperties, &out.IDProperties + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WarmStoreDataRetentionTime != nil { + in, out := &in.WarmStoreDataRetentionTime, &out.WarmStoreDataRetentionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gen2EnvironmentInitParameters. +func (in *Gen2EnvironmentInitParameters) DeepCopy() *Gen2EnvironmentInitParameters { + if in == nil { + return nil + } + out := new(Gen2EnvironmentInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gen2EnvironmentList) DeepCopyInto(out *Gen2EnvironmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gen2Environment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gen2EnvironmentList. +func (in *Gen2EnvironmentList) DeepCopy() *Gen2EnvironmentList { + if in == nil { + return nil + } + out := new(Gen2EnvironmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gen2EnvironmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gen2EnvironmentObservation) DeepCopyInto(out *Gen2EnvironmentObservation) { + *out = *in + if in.DataAccessFqdn != nil { + in, out := &in.DataAccessFqdn, &out.DataAccessFqdn + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IDProperties != nil { + in, out := &in.IDProperties, &out.IDProperties + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WarmStoreDataRetentionTime != nil { + in, out := &in.WarmStoreDataRetentionTime, &out.WarmStoreDataRetentionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gen2EnvironmentObservation. +func (in *Gen2EnvironmentObservation) DeepCopy() *Gen2EnvironmentObservation { + if in == nil { + return nil + } + out := new(Gen2EnvironmentObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gen2EnvironmentParameters) DeepCopyInto(out *Gen2EnvironmentParameters) { + *out = *in + if in.IDProperties != nil { + in, out := &in.IDProperties, &out.IDProperties + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuName != nil { + in, out := &in.SkuName, &out.SkuName + *out = new(string) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.WarmStoreDataRetentionTime != nil { + in, out := &in.WarmStoreDataRetentionTime, &out.WarmStoreDataRetentionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gen2EnvironmentParameters. +func (in *Gen2EnvironmentParameters) DeepCopy() *Gen2EnvironmentParameters { + if in == nil { + return nil + } + out := new(Gen2EnvironmentParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gen2EnvironmentSpec) DeepCopyInto(out *Gen2EnvironmentSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gen2EnvironmentSpec. +func (in *Gen2EnvironmentSpec) DeepCopy() *Gen2EnvironmentSpec { + if in == nil { + return nil + } + out := new(Gen2EnvironmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gen2EnvironmentStatus) DeepCopyInto(out *Gen2EnvironmentStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gen2EnvironmentStatus. +func (in *Gen2EnvironmentStatus) DeepCopy() *Gen2EnvironmentStatus { + if in == nil { + return nil + } + out := new(Gen2EnvironmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageInitParameters) DeepCopyInto(out *StorageInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageInitParameters. +func (in *StorageInitParameters) DeepCopy() *StorageInitParameters { + if in == nil { + return nil + } + out := new(StorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageObservation) DeepCopyInto(out *StorageObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageObservation. +func (in *StorageObservation) DeepCopy() *StorageObservation { + if in == nil { + return nil + } + out := new(StorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageParameters) DeepCopyInto(out *StorageParameters) { + *out = *in + out.KeySecretRef = in.KeySecretRef + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameRef != nil { + in, out := &in.NameRef, &out.NameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.NameSelector != nil { + in, out := &in.NameSelector, &out.NameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageParameters. +func (in *StorageParameters) DeepCopy() *StorageParameters { + if in == nil { + return nil + } + out := new(StorageParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/timeseriesinsights/v1beta2/zz_generated.managed.go b/apis/timeseriesinsights/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..9eb0d52e2 --- /dev/null +++ b/apis/timeseriesinsights/v1beta2/zz_generated.managed.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Gen2Environment. +func (mg *Gen2Environment) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Gen2Environment. +func (mg *Gen2Environment) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this Gen2Environment. +func (mg *Gen2Environment) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this Gen2Environment. +func (mg *Gen2Environment) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this Gen2Environment. +func (mg *Gen2Environment) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Gen2Environment. +func (mg *Gen2Environment) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Gen2Environment. +func (mg *Gen2Environment) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Gen2Environment. +func (mg *Gen2Environment) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this Gen2Environment. +func (mg *Gen2Environment) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this Gen2Environment. +func (mg *Gen2Environment) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this Gen2Environment. +func (mg *Gen2Environment) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Gen2Environment. +func (mg *Gen2Environment) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/timeseriesinsights/v1beta2/zz_generated.managedlist.go b/apis/timeseriesinsights/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..262e2c925 --- /dev/null +++ b/apis/timeseriesinsights/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this Gen2EnvironmentList. +func (l *Gen2EnvironmentList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/timeseriesinsights/v1beta2/zz_generated.resolvers.go b/apis/timeseriesinsights/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..de31ecd6b --- /dev/null +++ b/apis/timeseriesinsights/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *Gen2Environment) ResolveReferences( // ResolveReferences of this Gen2Environment. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.Storage != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Storage.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.Storage.NameRef, + Selector: mg.Spec.ForProvider.Storage.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Storage.Name") + } + mg.Spec.ForProvider.Storage.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Storage.NameRef = rsp.ResolvedReference + + } + if mg.Spec.InitProvider.Storage != nil { + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Storage.Name), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.Storage.NameRef, + Selector: mg.Spec.InitProvider.Storage.NameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Storage.Name") + } + mg.Spec.InitProvider.Storage.Name = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.Storage.NameRef = rsp.ResolvedReference + + } + + return nil +} diff --git a/apis/timeseriesinsights/v1beta2/zz_groupversion_info.go b/apis/timeseriesinsights/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..732a469a4 --- /dev/null +++ b/apis/timeseriesinsights/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=timeseriesinsights.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "timeseriesinsights.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/web/v1beta1/zz_appactiveslot_types.go b/apis/web/v1beta1/zz_appactiveslot_types.go index 04c1af637..88403247a 100755 --- a/apis/web/v1beta1/zz_appactiveslot_types.go +++ b/apis/web/v1beta1/zz_appactiveslot_types.go @@ -21,7 +21,7 @@ type AppActiveSlotInitParameters struct { // The ID of the Slot to swap with Production. // The ID of the Slot to swap with `Production`. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.WindowsWebAppSlot + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsWebAppSlot // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SlotID *string `json:"slotId,omitempty" tf:"slot_id,omitempty"` @@ -61,7 +61,7 @@ type AppActiveSlotParameters struct { // The ID of the Slot to swap with Production. // The ID of the Slot to swap with `Production`. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.WindowsWebAppSlot + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsWebAppSlot // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SlotID *string `json:"slotId,omitempty" tf:"slot_id,omitempty"` diff --git a/apis/web/v1beta1/zz_apphybridconnection_types.go b/apis/web/v1beta1/zz_apphybridconnection_types.go index 98d7d9994..62eb08b86 100755 --- a/apis/web/v1beta1/zz_apphybridconnection_types.go +++ b/apis/web/v1beta1/zz_apphybridconnection_types.go @@ -43,7 +43,7 @@ type AppHybridConnectionInitParameters struct { // The ID of the Web App for this Hybrid Connection. Changing this forces a new resource to be created. // The ID of the Web App for this Hybrid Connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.WindowsWebApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsWebApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() WebAppID *string `json:"webAppId,omitempty" tf:"web_app_id,omitempty"` @@ -132,7 +132,7 @@ type AppHybridConnectionParameters struct { // The ID of the Web App for this Hybrid Connection. Changing this forces a new resource to be created. // The ID of the Web App for this Hybrid Connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.WindowsWebApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsWebApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional WebAppID *string `json:"webAppId,omitempty" tf:"web_app_id,omitempty"` diff --git a/apis/web/v1beta1/zz_functionappactiveslot_types.go b/apis/web/v1beta1/zz_functionappactiveslot_types.go index 67c63e758..a9440b782 100755 --- a/apis/web/v1beta1/zz_functionappactiveslot_types.go +++ b/apis/web/v1beta1/zz_functionappactiveslot_types.go @@ -21,7 +21,7 @@ type FunctionAppActiveSlotInitParameters struct { // The ID of the Slot to swap with Production. // The ID of the Slot to swap with `Production`. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.WindowsFunctionAppSlot + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsFunctionAppSlot // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() SlotID *string `json:"slotId,omitempty" tf:"slot_id,omitempty"` @@ -61,7 +61,7 @@ type FunctionAppActiveSlotParameters struct { // The ID of the Slot to swap with Production. // The ID of the Slot to swap with `Production`. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.WindowsFunctionAppSlot + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsFunctionAppSlot // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional SlotID *string `json:"slotId,omitempty" tf:"slot_id,omitempty"` diff --git a/apis/web/v1beta1/zz_functionappfunction_types.go b/apis/web/v1beta1/zz_functionappfunction_types.go index 4409c7354..c35192383 100755 --- a/apis/web/v1beta1/zz_functionappfunction_types.go +++ b/apis/web/v1beta1/zz_functionappfunction_types.go @@ -63,7 +63,7 @@ type FunctionAppFunctionInitParameters struct { // The ID of the Function App in which this function should reside. Changing this forces a new resource to be created. // The ID of the Function App in which this function should reside. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.LinuxFunctionApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.LinuxFunctionApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() FunctionAppID *string `json:"functionAppId,omitempty" tf:"function_app_id,omitempty"` @@ -167,7 +167,7 @@ type FunctionAppFunctionParameters struct { // The ID of the Function App in which this function should reside. Changing this forces a new resource to be created. // The ID of the Function App in which this function should reside. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.LinuxFunctionApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.LinuxFunctionApp // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() // +kubebuilder:validation:Optional FunctionAppID *string `json:"functionAppId,omitempty" tf:"function_app_id,omitempty"` diff --git a/apis/web/v1beta1/zz_functionapphybridconnection_types.go b/apis/web/v1beta1/zz_functionapphybridconnection_types.go index 9a7d47fae..6bd6545db 100755 --- a/apis/web/v1beta1/zz_functionapphybridconnection_types.go +++ b/apis/web/v1beta1/zz_functionapphybridconnection_types.go @@ -17,7 +17,7 @@ type FunctionAppHybridConnectionInitParameters struct { // The ID of the Function App for this Hybrid Connection. Changing this forces a new resource to be created. // The ID of the Function App for this Hybrid Connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.WindowsFunctionApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsFunctionApp // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() FunctionAppID *string `json:"functionAppId,omitempty" tf:"function_app_id,omitempty"` @@ -102,7 +102,7 @@ type FunctionAppHybridConnectionParameters struct { // The ID of the Function App for this Hybrid Connection. Changing this forces a new resource to be created. // The ID of the Function App for this Hybrid Connection. - // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.WindowsFunctionApp + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsFunctionApp // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() // +kubebuilder:validation:Optional FunctionAppID *string `json:"functionAppId,omitempty" tf:"function_app_id,omitempty"` diff --git a/apis/web/v1beta1/zz_generated.conversion_hubs.go b/apis/web/v1beta1/zz_generated.conversion_hubs.go index 73ed62480..da98efc5d 100755 --- a/apis/web/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/web/v1beta1/zz_generated.conversion_hubs.go @@ -7,10 +7,10 @@ package v1beta1 // Hub marks this type as a conversion hub. -func (tr *AppServicePlan) Hub() {} +func (tr *AppActiveSlot) Hub() {} // Hub marks this type as a conversion hub. -func (tr *FunctionApp) Hub() {} +func (tr *AppHybridConnection) Hub() {} // Hub marks this type as a conversion hub. func (tr *FunctionAppActiveSlot) Hub() {} @@ -21,44 +21,8 @@ func (tr *FunctionAppFunction) Hub() {} // Hub marks this type as a conversion hub. func (tr *FunctionAppHybridConnection) Hub() {} -// Hub marks this type as a conversion hub. -func (tr *FunctionAppSlot) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinuxFunctionApp) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinuxFunctionAppSlot) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinuxWebApp) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *LinuxWebAppSlot) Hub() {} - // Hub marks this type as a conversion hub. func (tr *ServicePlan) Hub() {} // Hub marks this type as a conversion hub. func (tr *SourceControlToken) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *StaticSite) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *AppActiveSlot) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *AppHybridConnection) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WindowsFunctionApp) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WindowsFunctionAppSlot) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WindowsWebApp) Hub() {} - -// Hub marks this type as a conversion hub. -func (tr *WindowsWebAppSlot) Hub() {} diff --git a/apis/web/v1beta1/zz_generated.conversion_spokes.go b/apis/web/v1beta1/zz_generated.conversion_spokes.go new file mode 100755 index 000000000..11583a62f --- /dev/null +++ b/apis/web/v1beta1/zz_generated.conversion_spokes.go @@ -0,0 +1,254 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + ujconversion "github.com/crossplane/upjet/pkg/controller/conversion" + "github.com/crossplane/upjet/pkg/resource" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AppServicePlan to the hub type. +func (tr *AppServicePlan) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the AppServicePlan type. +func (tr *AppServicePlan) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FunctionApp to the hub type. +func (tr *FunctionApp) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FunctionApp type. +func (tr *FunctionApp) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this FunctionAppSlot to the hub type. +func (tr *FunctionAppSlot) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the FunctionAppSlot type. +func (tr *FunctionAppSlot) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinuxFunctionApp to the hub type. +func (tr *LinuxFunctionApp) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinuxFunctionApp type. +func (tr *LinuxFunctionApp) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinuxFunctionAppSlot to the hub type. +func (tr *LinuxFunctionAppSlot) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinuxFunctionAppSlot type. +func (tr *LinuxFunctionAppSlot) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinuxWebApp to the hub type. +func (tr *LinuxWebApp) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinuxWebApp type. +func (tr *LinuxWebApp) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this LinuxWebAppSlot to the hub type. +func (tr *LinuxWebAppSlot) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the LinuxWebAppSlot type. +func (tr *LinuxWebAppSlot) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this StaticSite to the hub type. +func (tr *StaticSite) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the StaticSite type. +func (tr *StaticSite) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WindowsFunctionApp to the hub type. +func (tr *WindowsFunctionApp) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WindowsFunctionApp type. +func (tr *WindowsFunctionApp) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WindowsFunctionAppSlot to the hub type. +func (tr *WindowsFunctionAppSlot) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WindowsFunctionAppSlot type. +func (tr *WindowsFunctionAppSlot) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WindowsWebApp to the hub type. +func (tr *WindowsWebApp) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WindowsWebApp type. +func (tr *WindowsWebApp) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} + +// ConvertTo converts this WindowsWebAppSlot to the hub type. +func (tr *WindowsWebAppSlot) ConvertTo(dstRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := dstRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(dstRaw.(resource.Terraformed), tr); err != nil { + return errors.Wrapf(err, "cannot convert from the spoke version %q to the hub version %q", spokeVersion, hubVersion) + } + return nil +} + +// ConvertFrom converts from the hub type to the WindowsWebAppSlot type. +func (tr *WindowsWebAppSlot) ConvertFrom(srcRaw conversion.Hub) error { + spokeVersion := tr.GetObjectKind().GroupVersionKind().Version + hubVersion := srcRaw.GetObjectKind().GroupVersionKind().Version + if err := ujconversion.RoundTrip(tr, srcRaw.(resource.Terraformed)); err != nil { + return errors.Wrapf(err, "cannot convert from the hub version %q to the spoke version %q", hubVersion, spokeVersion) + } + return nil +} diff --git a/apis/web/v1beta1/zz_generated.resolvers.go b/apis/web/v1beta1/zz_generated.resolvers.go index 9b364f794..6a41db3db 100644 --- a/apis/web/v1beta1/zz_generated.resolvers.go +++ b/apis/web/v1beta1/zz_generated.resolvers.go @@ -28,7 +28,7 @@ func (mg *AppActiveSlot) ResolveReferences( // ResolveReferences of this AppActi var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "WindowsWebAppSlot", "WindowsWebAppSlotList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsWebAppSlot", "WindowsWebAppSlotList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -47,7 +47,7 @@ func (mg *AppActiveSlot) ResolveReferences( // ResolveReferences of this AppActi mg.Spec.ForProvider.SlotID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SlotIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "WindowsWebAppSlot", "WindowsWebAppSlotList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsWebAppSlot", "WindowsWebAppSlotList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -97,7 +97,7 @@ func (mg *AppHybridConnection) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.RelayID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RelayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "WindowsWebApp", "WindowsWebAppList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsWebApp", "WindowsWebAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -135,7 +135,7 @@ func (mg *AppHybridConnection) ResolveReferences(ctx context.Context, c client.R mg.Spec.InitProvider.RelayID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.InitProvider.RelayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "WindowsWebApp", "WindowsWebAppList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsWebApp", "WindowsWebAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -415,7 +415,7 @@ func (mg *FunctionAppActiveSlot) ResolveReferences(ctx context.Context, c client var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "WindowsFunctionAppSlot", "WindowsFunctionAppSlotList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsFunctionAppSlot", "WindowsFunctionAppSlotList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -434,7 +434,7 @@ func (mg *FunctionAppActiveSlot) ResolveReferences(ctx context.Context, c client mg.Spec.ForProvider.SlotID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.SlotIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "WindowsFunctionAppSlot", "WindowsFunctionAppSlotList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsFunctionAppSlot", "WindowsFunctionAppSlotList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -465,7 +465,7 @@ func (mg *FunctionAppFunction) ResolveReferences(ctx context.Context, c client.R var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "LinuxFunctionApp", "LinuxFunctionAppList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "LinuxFunctionApp", "LinuxFunctionAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -484,7 +484,7 @@ func (mg *FunctionAppFunction) ResolveReferences(ctx context.Context, c client.R mg.Spec.ForProvider.FunctionAppID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.FunctionAppIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "LinuxFunctionApp", "LinuxFunctionAppList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "LinuxFunctionApp", "LinuxFunctionAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -515,7 +515,7 @@ func (mg *FunctionAppHybridConnection) ResolveReferences(ctx context.Context, c var rsp reference.ResolutionResponse var err error { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "WindowsFunctionApp", "WindowsFunctionAppList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsFunctionApp", "WindowsFunctionAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } @@ -553,7 +553,7 @@ func (mg *FunctionAppHybridConnection) ResolveReferences(ctx context.Context, c mg.Spec.ForProvider.RelayID = reference.ToPtrValue(rsp.ResolvedValue) mg.Spec.ForProvider.RelayIDRef = rsp.ResolvedReference { - m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "WindowsFunctionApp", "WindowsFunctionAppList") + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsFunctionApp", "WindowsFunctionAppList") if err != nil { return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") } diff --git a/apis/web/v1beta2/zz_appserviceplan_terraformed.go b/apis/web/v1beta2/zz_appserviceplan_terraformed.go new file mode 100755 index 000000000..cdb7e6be7 --- /dev/null +++ b/apis/web/v1beta2/zz_appserviceplan_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AppServicePlan +func (mg *AppServicePlan) GetTerraformResourceType() string { + return "azurerm_app_service_plan" +} + +// GetConnectionDetailsMapping for this AppServicePlan +func (tr *AppServicePlan) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AppServicePlan +func (tr *AppServicePlan) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AppServicePlan +func (tr *AppServicePlan) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AppServicePlan +func (tr *AppServicePlan) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AppServicePlan +func (tr *AppServicePlan) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AppServicePlan +func (tr *AppServicePlan) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this AppServicePlan +func (tr *AppServicePlan) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this AppServicePlan +func (tr *AppServicePlan) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this AppServicePlan using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AppServicePlan) LateInitialize(attrs []byte) (bool, error) { + params := &AppServicePlanParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AppServicePlan) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_appserviceplan_types.go b/apis/web/v1beta2/zz_appserviceplan_types.go new file mode 100755 index 000000000..f13f2b25f --- /dev/null +++ b/apis/web/v1beta2/zz_appserviceplan_types.go @@ -0,0 +1,249 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AppServicePlanInitParameters struct { + + // The ID of the App Service Environment where the App Service Plan should be located. Changing forces a new resource to be created. + AppServiceEnvironmentID *string `json:"appServiceEnvironmentId,omitempty" tf:"app_service_environment_id,omitempty"` + + // Whether to create a xenon App Service Plan. + IsXenon *bool `json:"isXenon,omitempty" tf:"is_xenon,omitempty"` + + // The kind of the App Service Plan to create. Possible values are Windows (also available as App), Linux, elastic (for Premium Consumption), xenon and FunctionApp (for a Consumption Plan). Defaults to Windows. Changing this forces a new resource to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum number of total workers allowed for this ElasticScaleEnabled App Service Plan. + MaximumElasticWorkerCount *float64 `json:"maximumElasticWorkerCount,omitempty" tf:"maximum_elastic_worker_count,omitempty"` + + // Can Apps assigned to this App Service Plan be scaled independently? If set to false apps assigned to this plan will scale to all instances of the plan. + PerSiteScaling *bool `json:"perSiteScaling,omitempty" tf:"per_site_scaling,omitempty"` + + // Is this App Service Plan Reserved. + Reserved *bool `json:"reserved,omitempty" tf:"reserved,omitempty"` + + // A sku block as documented below. + Sku *SkuInitParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if the App Service Plan should be Zone Redundant. Changing this forces a new resource to be created. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type AppServicePlanObservation struct { + + // The ID of the App Service Environment where the App Service Plan should be located. Changing forces a new resource to be created. + AppServiceEnvironmentID *string `json:"appServiceEnvironmentId,omitempty" tf:"app_service_environment_id,omitempty"` + + // The ID of the App Service Plan component. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Whether to create a xenon App Service Plan. + IsXenon *bool `json:"isXenon,omitempty" tf:"is_xenon,omitempty"` + + // The kind of the App Service Plan to create. Possible values are Windows (also available as App), Linux, elastic (for Premium Consumption), xenon and FunctionApp (for a Consumption Plan). Defaults to Windows. Changing this forces a new resource to be created. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum number of total workers allowed for this ElasticScaleEnabled App Service Plan. + MaximumElasticWorkerCount *float64 `json:"maximumElasticWorkerCount,omitempty" tf:"maximum_elastic_worker_count,omitempty"` + + // The maximum number of workers supported with the App Service Plan's sku. + MaximumNumberOfWorkers *float64 `json:"maximumNumberOfWorkers,omitempty" tf:"maximum_number_of_workers,omitempty"` + + // Can Apps assigned to this App Service Plan be scaled independently? If set to false apps assigned to this plan will scale to all instances of the plan. + PerSiteScaling *bool `json:"perSiteScaling,omitempty" tf:"per_site_scaling,omitempty"` + + // Is this App Service Plan Reserved. + Reserved *bool `json:"reserved,omitempty" tf:"reserved,omitempty"` + + // The name of the resource group in which to create the App Service Plan component. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A sku block as documented below. + Sku *SkuObservation `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if the App Service Plan should be Zone Redundant. Changing this forces a new resource to be created. + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type AppServicePlanParameters struct { + + // The ID of the App Service Environment where the App Service Plan should be located. Changing forces a new resource to be created. + // +kubebuilder:validation:Optional + AppServiceEnvironmentID *string `json:"appServiceEnvironmentId,omitempty" tf:"app_service_environment_id,omitempty"` + + // Whether to create a xenon App Service Plan. + // +kubebuilder:validation:Optional + IsXenon *bool `json:"isXenon,omitempty" tf:"is_xenon,omitempty"` + + // The kind of the App Service Plan to create. Possible values are Windows (also available as App), Linux, elastic (for Premium Consumption), xenon and FunctionApp (for a Consumption Plan). Defaults to Windows. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The maximum number of total workers allowed for this ElasticScaleEnabled App Service Plan. + // +kubebuilder:validation:Optional + MaximumElasticWorkerCount *float64 `json:"maximumElasticWorkerCount,omitempty" tf:"maximum_elastic_worker_count,omitempty"` + + // Can Apps assigned to this App Service Plan be scaled independently? If set to false apps assigned to this plan will scale to all instances of the plan. + // +kubebuilder:validation:Optional + PerSiteScaling *bool `json:"perSiteScaling,omitempty" tf:"per_site_scaling,omitempty"` + + // Is this App Service Plan Reserved. + // +kubebuilder:validation:Optional + Reserved *bool `json:"reserved,omitempty" tf:"reserved,omitempty"` + + // The name of the resource group in which to create the App Service Plan component. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A sku block as documented below. + // +kubebuilder:validation:Optional + Sku *SkuParameters `json:"sku,omitempty" tf:"sku,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Specifies if the App Service Plan should be Zone Redundant. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + ZoneRedundant *bool `json:"zoneRedundant,omitempty" tf:"zone_redundant,omitempty"` +} + +type SkuInitParameters struct { + + // Specifies the number of workers associated with this App Service Plan. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies the plan's instance size. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Specifies the plan's pricing tier. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SkuObservation struct { + + // Specifies the number of workers associated with this App Service Plan. + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies the plan's instance size. + Size *string `json:"size,omitempty" tf:"size,omitempty"` + + // Specifies the plan's pricing tier. + Tier *string `json:"tier,omitempty" tf:"tier,omitempty"` +} + +type SkuParameters struct { + + // Specifies the number of workers associated with this App Service Plan. + // +kubebuilder:validation:Optional + Capacity *float64 `json:"capacity,omitempty" tf:"capacity,omitempty"` + + // Specifies the plan's instance size. + // +kubebuilder:validation:Optional + Size *string `json:"size" tf:"size,omitempty"` + + // Specifies the plan's pricing tier. + // +kubebuilder:validation:Optional + Tier *string `json:"tier" tf:"tier,omitempty"` +} + +// AppServicePlanSpec defines the desired state of AppServicePlan +type AppServicePlanSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AppServicePlanParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider AppServicePlanInitParameters `json:"initProvider,omitempty"` +} + +// AppServicePlanStatus defines the observed state of AppServicePlan. +type AppServicePlanStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AppServicePlanObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AppServicePlan is the Schema for the AppServicePlans API. Manages an App Service Plan component. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type AppServicePlan struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.sku) || (has(self.initProvider) && has(self.initProvider.sku))",message="spec.forProvider.sku is a required parameter" + Spec AppServicePlanSpec `json:"spec"` + Status AppServicePlanStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AppServicePlanList contains a list of AppServicePlans +type AppServicePlanList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AppServicePlan `json:"items"` +} + +// Repository type metadata. +var ( + AppServicePlan_Kind = "AppServicePlan" + AppServicePlan_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AppServicePlan_Kind}.String() + AppServicePlan_KindAPIVersion = AppServicePlan_Kind + "." + CRDGroupVersion.String() + AppServicePlan_GroupVersionKind = CRDGroupVersion.WithKind(AppServicePlan_Kind) +) + +func init() { + SchemeBuilder.Register(&AppServicePlan{}, &AppServicePlanList{}) +} diff --git a/apis/web/v1beta2/zz_functionapp_terraformed.go b/apis/web/v1beta2/zz_functionapp_terraformed.go new file mode 100755 index 000000000..1251f56d4 --- /dev/null +++ b/apis/web/v1beta2/zz_functionapp_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FunctionApp +func (mg *FunctionApp) GetTerraformResourceType() string { + return "azurerm_function_app" +} + +// GetConnectionDetailsMapping for this FunctionApp +func (tr *FunctionApp) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef"} +} + +// GetObservation of this FunctionApp +func (tr *FunctionApp) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FunctionApp +func (tr *FunctionApp) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FunctionApp +func (tr *FunctionApp) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FunctionApp +func (tr *FunctionApp) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FunctionApp +func (tr *FunctionApp) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FunctionApp +func (tr *FunctionApp) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FunctionApp +func (tr *FunctionApp) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FunctionApp using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FunctionApp) LateInitialize(attrs []byte) (bool, error) { + params := &FunctionAppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FunctionApp) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/web/v1beta2/zz_functionapp_types.go b/apis/web/v1beta2/zz_functionapp_types.go new file mode 100755 index 000000000..4adeeb7f9 --- /dev/null +++ b/apis/web/v1beta2/zz_functionapp_types.go @@ -0,0 +1,1396 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActiveDirectoryInitParameters struct { + + // Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type ActiveDirectoryObservation struct { + + // Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type ActiveDirectoryParameters struct { + + // Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` +} + +type AuthSettingsInitParameters struct { + + // A active_directory block as defined below. + ActiveDirectory *ActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Login parameters to send to the OpenID Connect authorization endpoint when a user logs in. Each parameter must be in the form "key=value". + // +mapType=granular + AdditionalLoginParams map[string]*string `json:"additionalLoginParams,omitempty" tf:"additional_login_params,omitempty"` + + // External URLs that can be redirected to as part of logging in or logging out of the app. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default provider to use when multiple providers have been set up. Possible values are AzureActiveDirectory, Facebook, Google, MicrosoftAccount and Twitter. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Is Authentication enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *FacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A google block as defined below. + Google *GoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // Issuer URI. When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *MicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The runtime version of the Authentication/Authorization module. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // If enabled the module will durably store platform-specific security tokens that are obtained during login flows. Defaults to false. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *TwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values are AllowAnonymous and RedirectToLoginPage. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type AuthSettingsObservation struct { + + // A active_directory block as defined below. + ActiveDirectory *ActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Login parameters to send to the OpenID Connect authorization endpoint when a user logs in. Each parameter must be in the form "key=value". + // +mapType=granular + AdditionalLoginParams map[string]*string `json:"additionalLoginParams,omitempty" tf:"additional_login_params,omitempty"` + + // External URLs that can be redirected to as part of logging in or logging out of the app. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default provider to use when multiple providers have been set up. Possible values are AzureActiveDirectory, Facebook, Google, MicrosoftAccount and Twitter. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Is Authentication enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *FacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A google block as defined below. + Google *GoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // Issuer URI. When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *MicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The runtime version of the Authentication/Authorization module. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // If enabled the module will durably store platform-specific security tokens that are obtained during login flows. Defaults to false. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *TwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values are AllowAnonymous and RedirectToLoginPage. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type AuthSettingsParameters struct { + + // A active_directory block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectory *ActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Login parameters to send to the OpenID Connect authorization endpoint when a user logs in. Each parameter must be in the form "key=value". + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParams map[string]*string `json:"additionalLoginParams,omitempty" tf:"additional_login_params,omitempty"` + + // External URLs that can be redirected to as part of logging in or logging out of the app. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default provider to use when multiple providers have been set up. Possible values are AzureActiveDirectory, Facebook, Google, MicrosoftAccount and Twitter. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Is Authentication enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A facebook block as defined below. + // +kubebuilder:validation:Optional + Facebook *FacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A google block as defined below. + // +kubebuilder:validation:Optional + Google *GoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // Issuer URI. When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + // +kubebuilder:validation:Optional + Microsoft *MicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The runtime version of the Authentication/Authorization module. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // If enabled the module will durably store platform-specific security tokens that are obtained during login flows. Defaults to false. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + // +kubebuilder:validation:Optional + Twitter *TwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values are AllowAnonymous and RedirectToLoginPage. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type ConnectionStringInitParameters struct { + + // The name of the Connection String. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the Connection String. Possible values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ConnectionStringObservation struct { + + // The name of the Connection String. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the Connection String. Possible values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type ConnectionStringParameters struct { + + // The name of the Connection String. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The type of the Connection String. Possible values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The value for the Connection String. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type CorsInitParameters struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials supported? + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type CorsObservation struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials supported? + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type CorsParameters struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` + + // Are credentials supported? + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type FacebookInitParameters struct { + + // The App ID of the Facebook app used for login + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type FacebookObservation struct { + + // The App ID of the Facebook app used for login + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type FacebookParameters struct { + + // The App ID of the Facebook app used for login + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. + // +kubebuilder:validation:Required + AppSecretSecretRef v1.SecretKeySelector `json:"appSecretSecretRef" tf:"-"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type FunctionAppInitParameters struct { + + // The ID of the App Service Plan within which to create this Function App. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.AppServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + AppServicePlanID *string `json:"appServicePlanId,omitempty" tf:"app_service_plan_id,omitempty"` + + // Reference to a AppServicePlan in web to populate appServicePlanId. + // +kubebuilder:validation:Optional + AppServicePlanIDRef *v1.Reference `json:"appServicePlanIdRef,omitempty" tf:"-"` + + // Selector for a AppServicePlan in web to populate appServicePlanId. + // +kubebuilder:validation:Optional + AppServicePlanIDSelector *v1.Selector `json:"appServicePlanIdSelector,omitempty" tf:"-"` + + // A map of key-value pairs for App Settings and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + AuthSettings *AuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required and Optional. + ClientCertMode *string `json:"clientCertMode,omitempty" tf:"client_cert_mode,omitempty"` + + // An connection_string block as defined below. + ConnectionString []ConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Should the built-in logging of this Function App be enabled? Defaults to true. + EnableBuiltinLogging *bool `json:"enableBuiltinLogging,omitempty" tf:"enable_builtin_logging,omitempty"` + + // Is the Function App enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + Identity *IdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity Id used for looking up KeyVault secrets. The identity must be assigned to the application. See Access vaults with a user-assigned identity for more information. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Function App. Changing this forces a new resource to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A string indicating the Operating System type for this function app. Possible values are linux and “(empty string). Changing this forces a new resource to be created. Defaults to "". + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The name of the resource group in which to create the Function App. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A site_config object as defined below. + SiteConfig *SiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A source_control block, as defined below. + SourceControl *SourceControlInitParameters `json:"sourceControl,omitempty" tf:"source_control,omitempty"` + + // The backend storage account name which will be used by this Function App (such as the dashboard, logs). Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~1. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type FunctionAppObservation struct { + + // The ID of the App Service Plan within which to create this Function App. + AppServicePlanID *string `json:"appServicePlanId,omitempty" tf:"app_service_plan_id,omitempty"` + + // A map of key-value pairs for App Settings and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + AuthSettings *AuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required and Optional. + ClientCertMode *string `json:"clientCertMode,omitempty" tf:"client_cert_mode,omitempty"` + + // An connection_string block as defined below. + ConnectionString []ConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // An identifier used by App Service to perform domain ownership verification via DNS TXT record. + CustomDomainVerificationID *string `json:"customDomainVerificationId,omitempty" tf:"custom_domain_verification_id,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // The default hostname associated with the Function App - such as mysite.azurewebsites.net + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Should the built-in logging of this Function App be enabled? Defaults to true. + EnableBuiltinLogging *bool `json:"enableBuiltinLogging,omitempty" tf:"enable_builtin_logging,omitempty"` + + // Is the Function App enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the Function App + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *IdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity Id used for looking up KeyVault secrets. The identity must be assigned to the application. See Access vaults with a user-assigned identity for more information. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Function App kind - such as functionapp,linux,container + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Function App. Changing this forces a new resource to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A string indicating the Operating System type for this function app. Possible values are linux and “(empty string). Changing this forces a new resource to be created. Defaults to "". + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12 + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which are necessarily in use. Superset of outbound_ip_addresses. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // The name of the resource group in which to create the Function App. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A site_config object as defined below. + SiteConfig *SiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A site_credential block as defined below, which contains the site-level credentials used to publish to this App Service. + SiteCredential []SiteCredentialObservation `json:"siteCredential,omitempty" tf:"site_credential,omitempty"` + + // A source_control block, as defined below. + SourceControl *SourceControlObservation `json:"sourceControl,omitempty" tf:"source_control,omitempty"` + + // The backend storage account name which will be used by this Function App (such as the dashboard, logs). Changing this forces a new resource to be created. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~1. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type FunctionAppParameters struct { + + // The ID of the App Service Plan within which to create this Function App. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.AppServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + AppServicePlanID *string `json:"appServicePlanId,omitempty" tf:"app_service_plan_id,omitempty"` + + // Reference to a AppServicePlan in web to populate appServicePlanId. + // +kubebuilder:validation:Optional + AppServicePlanIDRef *v1.Reference `json:"appServicePlanIdRef,omitempty" tf:"-"` + + // Selector for a AppServicePlan in web to populate appServicePlanId. + // +kubebuilder:validation:Optional + AppServicePlanIDSelector *v1.Selector `json:"appServicePlanIdSelector,omitempty" tf:"-"` + + // A map of key-value pairs for App Settings and custom values. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + // +kubebuilder:validation:Optional + AuthSettings *AuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required and Optional. + // +kubebuilder:validation:Optional + ClientCertMode *string `json:"clientCertMode,omitempty" tf:"client_cert_mode,omitempty"` + + // An connection_string block as defined below. + // +kubebuilder:validation:Optional + ConnectionString []ConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. + // +kubebuilder:validation:Optional + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Should the built-in logging of this Function App be enabled? Defaults to true. + // +kubebuilder:validation:Optional + EnableBuiltinLogging *bool `json:"enableBuiltinLogging,omitempty" tf:"enable_builtin_logging,omitempty"` + + // Is the Function App enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *IdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity Id used for looking up KeyVault secrets. The identity must be assigned to the application. See Access vaults with a user-assigned identity for more information. + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the name of the Function App. Changing this forces a new resource to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A string indicating the Operating System type for this function app. Possible values are linux and “(empty string). Changing this forces a new resource to be created. Defaults to "". + // +kubebuilder:validation:Optional + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The name of the resource group in which to create the Function App. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A site_config object as defined below. + // +kubebuilder:validation:Optional + SiteConfig *SiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A source_control block, as defined below. + // +kubebuilder:validation:Optional + SourceControl *SourceControlParameters `json:"sourceControl,omitempty" tf:"source_control,omitempty"` + + // The access key which will be used to access the backend storage account for the Function App. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef" tf:"-"` + + // The backend storage account name which will be used by this Function App (such as the dashboard, logs). Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~1. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type GoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type GoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type GoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Required + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type HeadersInitParameters struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type HeadersObservation struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type HeadersParameters struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +kubebuilder:validation:Optional + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +kubebuilder:validation:Optional + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +kubebuilder:validation:Optional + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +kubebuilder:validation:Optional + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type IPRestrictionInitParameters struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action"` + + // The headers block for this specific scm_ip_restriction as defined below. + Headers []HeadersInitParameters `json:"headers,omitempty" tf:"headers"` + + // The IP Address used for this IP Restriction in CIDR notation. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address"` + + // The name for this IP Restriction. + Name *string `json:"name,omitempty" tf:"name"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + Priority *float64 `json:"priority,omitempty" tf:"priority"` + + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag"` + + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type IPRestrictionObservation struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The headers block for this specific scm_ip_restriction as defined below. + Headers []HeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The IP Address used for this IP Restriction in CIDR notation. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name for this IP Restriction. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type IPRestrictionParameters struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The headers block for this specific scm_ip_restriction as defined below. + // +kubebuilder:validation:Optional + Headers []HeadersParameters `json:"headers,omitempty" tf:"headers"` + + // The IP Address used for this IP Restriction in CIDR notation. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address"` + + // The name for this IP Restriction. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority"` + + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag"` + + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type IdentityInitParameters struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the identity type of the Function App. Possible values are SystemAssigned (where Azure will generate a Service Principal for you), UserAssigned where you can specify the Service Principal IDs in the identity_ids field, and SystemAssigned, UserAssigned which assigns both a system managed identity as well as the specified user assigned identities. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityObservation struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this App Service. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this App Service. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the identity type of the Function App. Possible values are SystemAssigned (where Azure will generate a Service Principal for you), UserAssigned where you can specify the Service Principal IDs in the identity_ids field, and SystemAssigned, UserAssigned which assigns both a system managed identity as well as the specified user assigned identities. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type IdentityParameters struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the identity type of the Function App. Possible values are SystemAssigned (where Azure will generate a Service Principal for you), UserAssigned where you can specify the Service Principal IDs in the identity_ids field, and SystemAssigned, UserAssigned which assigns both a system managed identity as well as the specified user assigned identities. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type MicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type MicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type MicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Required + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type ScmIPRestrictionHeadersInitParameters struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type ScmIPRestrictionHeadersObservation struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type ScmIPRestrictionHeadersParameters struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +kubebuilder:validation:Optional + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +kubebuilder:validation:Optional + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +kubebuilder:validation:Optional + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +kubebuilder:validation:Optional + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type ScmIPRestrictionInitParameters struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action"` + + // The headers block for this specific scm_ip_restriction as defined below. + Headers []ScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers"` + + // The IP Address used for this IP Restriction in CIDR notation. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address"` + + // The name for this IP Restriction. + Name *string `json:"name,omitempty" tf:"name"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + Priority *float64 `json:"priority,omitempty" tf:"priority"` + + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag"` + + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type ScmIPRestrictionObservation struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The headers block for this specific scm_ip_restriction as defined below. + Headers []ScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The IP Address used for this IP Restriction in CIDR notation. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name for this IP Restriction. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type ScmIPRestrictionParameters struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The headers block for this specific scm_ip_restriction as defined below. + // +kubebuilder:validation:Optional + Headers []ScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers"` + + // The IP Address used for this IP Restriction in CIDR notation. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address"` + + // The name for this IP Restriction. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority"` + + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag"` + + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type SiteConfigInitParameters struct { + + // Should the Function App be loaded at all times? Defaults to false. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // The name of the slot to automatically swap to during deployment + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // A cors block as defined below. + Cors *CorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // The version of the .NET framework's CLR used in this function app. Possible values are v4.0 (including .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information on which .NET Framework version to use based on the runtime version you're targeting - please see this table. Defaults to v4.0. + DotnetFrameworkVersion *string `json:"dotnetFrameworkVersion,omitempty" tf:"dotnet_framework_version,omitempty"` + + // The number of minimum instances for this function app. Only affects apps on the Premium plan. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to AllAllowed. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // Path which will be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies whether or not the HTTP2 protocol should be enabled. Defaults to false. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // A list of ip_restriction objects representing IP restrictions as defined below. + IPRestriction []IPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // Java version hosted by the function app in Azure. Possible values are 1.8, 11 & 17 (In-Preview). + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // Linux App Framework and version for the AppService, e.g. DOCKER|(golang:latest). + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The minimum supported TLS version for the function app. Possible values are 1.0, 1.1, and 1.2. Defaults to 1.2 for new function apps. + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on the Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Runtime Scale Monitoring be enabled?. Only applicable to apps on the Premium plan. Defaults to false. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // A list of scm_ip_restriction objects representing IP restrictions as defined below. + ScmIPRestriction []ScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The type of Source Control used by the Function App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, None (default), OneDrive, Tfs, VSO, and VSTSRM. + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // IP security restrictions for scm to use main. Defaults to false. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to true. + Use32BitWorkerProcess *bool `json:"use32BitWorkerProcess,omitempty" tf:"use_32_bit_worker_process,omitempty"` + + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to false. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should WebSockets be enabled? + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` +} + +type SiteConfigObservation struct { + + // Should the Function App be loaded at all times? Defaults to false. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // The name of the slot to automatically swap to during deployment + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // A cors block as defined below. + Cors *CorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // The version of the .NET framework's CLR used in this function app. Possible values are v4.0 (including .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information on which .NET Framework version to use based on the runtime version you're targeting - please see this table. Defaults to v4.0. + DotnetFrameworkVersion *string `json:"dotnetFrameworkVersion,omitempty" tf:"dotnet_framework_version,omitempty"` + + // The number of minimum instances for this function app. Only affects apps on the Premium plan. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to AllAllowed. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // Path which will be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies whether or not the HTTP2 protocol should be enabled. Defaults to false. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // A list of ip_restriction objects representing IP restrictions as defined below. + IPRestriction []IPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // Java version hosted by the function app in Azure. Possible values are 1.8, 11 & 17 (In-Preview). + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // Linux App Framework and version for the AppService, e.g. DOCKER|(golang:latest). + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The minimum supported TLS version for the function app. Possible values are 1.0, 1.1, and 1.2. Defaults to 1.2 for new function apps. + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on the Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Runtime Scale Monitoring be enabled?. Only applicable to apps on the Premium plan. Defaults to false. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // A list of scm_ip_restriction objects representing IP restrictions as defined below. + ScmIPRestriction []ScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The type of Source Control used by the Function App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, None (default), OneDrive, Tfs, VSO, and VSTSRM. + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // IP security restrictions for scm to use main. Defaults to false. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to true. + Use32BitWorkerProcess *bool `json:"use32BitWorkerProcess,omitempty" tf:"use_32_bit_worker_process,omitempty"` + + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to false. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should WebSockets be enabled? + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` +} + +type SiteConfigParameters struct { + + // Should the Function App be loaded at all times? Defaults to false. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // +kubebuilder:validation:Optional + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // The name of the slot to automatically swap to during deployment + // +kubebuilder:validation:Optional + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // A cors block as defined below. + // +kubebuilder:validation:Optional + Cors *CorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // The version of the .NET framework's CLR used in this function app. Possible values are v4.0 (including .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information on which .NET Framework version to use based on the runtime version you're targeting - please see this table. Defaults to v4.0. + // +kubebuilder:validation:Optional + DotnetFrameworkVersion *string `json:"dotnetFrameworkVersion,omitempty" tf:"dotnet_framework_version,omitempty"` + + // The number of minimum instances for this function app. Only affects apps on the Premium plan. + // +kubebuilder:validation:Optional + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to AllAllowed. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // Path which will be checked for this function app health. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies whether or not the HTTP2 protocol should be enabled. Defaults to false. + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // A list of ip_restriction objects representing IP restrictions as defined below. + // +kubebuilder:validation:Optional + IPRestriction []IPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // Java version hosted by the function app in Azure. Possible values are 1.8, 11 & 17 (In-Preview). + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // Linux App Framework and version for the AppService, e.g. DOCKER|(golang:latest). + // +kubebuilder:validation:Optional + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The minimum supported TLS version for the function app. Possible values are 1.0, 1.1, and 1.2. Defaults to 1.2 for new function apps. + // +kubebuilder:validation:Optional + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on the Premium plan. + // +kubebuilder:validation:Optional + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Runtime Scale Monitoring be enabled?. Only applicable to apps on the Premium plan. Defaults to false. + // +kubebuilder:validation:Optional + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // A list of scm_ip_restriction objects representing IP restrictions as defined below. + // +kubebuilder:validation:Optional + ScmIPRestriction []ScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The type of Source Control used by the Function App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, None (default), OneDrive, Tfs, VSO, and VSTSRM. + // +kubebuilder:validation:Optional + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // IP security restrictions for scm to use main. Defaults to false. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to true. + // +kubebuilder:validation:Optional + Use32BitWorkerProcess *bool `json:"use32BitWorkerProcess,omitempty" tf:"use_32_bit_worker_process,omitempty"` + + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to false. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should WebSockets be enabled? + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` +} + +type SiteCredentialInitParameters struct { +} + +type SiteCredentialObservation struct { + + // The password associated with the username, which can be used to publish to this App Service. + Password *string `json:"password,omitempty" tf:"password,omitempty"` + + // The username which can be used to publish to this App Service + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type SiteCredentialParameters struct { +} + +type SourceControlInitParameters struct { + + // The branch of the remote repository to use. Defaults to 'master'. + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // Limits to manual integration. Defaults to false if not specified. + ManualIntegration *bool `json:"manualIntegration,omitempty" tf:"manual_integration,omitempty"` + + // The URL of the source code repository. + RepoURL *string `json:"repoUrl,omitempty" tf:"repo_url,omitempty"` + + // Enable roll-back for the repository. Defaults to false if not specified. + RollbackEnabled *bool `json:"rollbackEnabled,omitempty" tf:"rollback_enabled,omitempty"` + + // Use Mercurial if true, otherwise uses Git. + UseMercurial *bool `json:"useMercurial,omitempty" tf:"use_mercurial,omitempty"` +} + +type SourceControlObservation struct { + + // The branch of the remote repository to use. Defaults to 'master'. + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // Limits to manual integration. Defaults to false if not specified. + ManualIntegration *bool `json:"manualIntegration,omitempty" tf:"manual_integration,omitempty"` + + // The URL of the source code repository. + RepoURL *string `json:"repoUrl,omitempty" tf:"repo_url,omitempty"` + + // Enable roll-back for the repository. Defaults to false if not specified. + RollbackEnabled *bool `json:"rollbackEnabled,omitempty" tf:"rollback_enabled,omitempty"` + + // Use Mercurial if true, otherwise uses Git. + UseMercurial *bool `json:"useMercurial,omitempty" tf:"use_mercurial,omitempty"` +} + +type SourceControlParameters struct { + + // The branch of the remote repository to use. Defaults to 'master'. + // +kubebuilder:validation:Optional + Branch *string `json:"branch,omitempty" tf:"branch,omitempty"` + + // Limits to manual integration. Defaults to false if not specified. + // +kubebuilder:validation:Optional + ManualIntegration *bool `json:"manualIntegration,omitempty" tf:"manual_integration,omitempty"` + + // The URL of the source code repository. + // +kubebuilder:validation:Optional + RepoURL *string `json:"repoUrl,omitempty" tf:"repo_url,omitempty"` + + // Enable roll-back for the repository. Defaults to false if not specified. + // +kubebuilder:validation:Optional + RollbackEnabled *bool `json:"rollbackEnabled,omitempty" tf:"rollback_enabled,omitempty"` + + // Use Mercurial if true, otherwise uses Git. + // +kubebuilder:validation:Optional + UseMercurial *bool `json:"useMercurial,omitempty" tf:"use_mercurial,omitempty"` +} + +type TwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` +} + +type TwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` +} + +type TwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Required + ConsumerSecretSecretRef v1.SecretKeySelector `json:"consumerSecretSecretRef" tf:"-"` +} + +// FunctionAppSpec defines the desired state of FunctionApp +type FunctionAppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FunctionAppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FunctionAppInitParameters `json:"initProvider,omitempty"` +} + +// FunctionAppStatus defines the observed state of FunctionApp. +type FunctionAppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FunctionAppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FunctionApp is the Schema for the FunctionApps API. Manages a Function App. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FunctionApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageAccountAccessKeySecretRef)",message="spec.forProvider.storageAccountAccessKeySecretRef is a required parameter" + Spec FunctionAppSpec `json:"spec"` + Status FunctionAppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionAppList contains a list of FunctionApps +type FunctionAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FunctionApp `json:"items"` +} + +// Repository type metadata. +var ( + FunctionApp_Kind = "FunctionApp" + FunctionApp_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FunctionApp_Kind}.String() + FunctionApp_KindAPIVersion = FunctionApp_Kind + "." + CRDGroupVersion.String() + FunctionApp_GroupVersionKind = CRDGroupVersion.WithKind(FunctionApp_Kind) +) + +func init() { + SchemeBuilder.Register(&FunctionApp{}, &FunctionAppList{}) +} diff --git a/apis/web/v1beta2/zz_functionappslot_terraformed.go b/apis/web/v1beta2/zz_functionappslot_terraformed.go new file mode 100755 index 000000000..5e4eb773d --- /dev/null +++ b/apis/web/v1beta2/zz_functionappslot_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this FunctionAppSlot +func (mg *FunctionAppSlot) GetTerraformResourceType() string { + return "azurerm_function_app_slot" +} + +// GetConnectionDetailsMapping for this FunctionAppSlot +func (tr *FunctionAppSlot) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef"} +} + +// GetObservation of this FunctionAppSlot +func (tr *FunctionAppSlot) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this FunctionAppSlot +func (tr *FunctionAppSlot) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this FunctionAppSlot +func (tr *FunctionAppSlot) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this FunctionAppSlot +func (tr *FunctionAppSlot) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this FunctionAppSlot +func (tr *FunctionAppSlot) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this FunctionAppSlot +func (tr *FunctionAppSlot) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this FunctionAppSlot +func (tr *FunctionAppSlot) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this FunctionAppSlot using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *FunctionAppSlot) LateInitialize(attrs []byte) (bool, error) { + params := &FunctionAppSlotParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *FunctionAppSlot) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/web/v1beta2/zz_functionappslot_types.go b/apis/web/v1beta2/zz_functionappslot_types.go new file mode 100755 index 000000000..cedda9d90 --- /dev/null +++ b/apis/web/v1beta2/zz_functionappslot_types.go @@ -0,0 +1,1298 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AuthSettingsActiveDirectoryInitParameters struct { + + // Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type AuthSettingsActiveDirectoryObservation struct { + + // Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type AuthSettingsActiveDirectoryParameters struct { + + // Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` +} + +type AuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. + // +kubebuilder:validation:Required + AppSecretSecretRef v1.SecretKeySelector `json:"appSecretSecretRef" tf:"-"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Required + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Required + ClientSecretSecretRef v1.SecretKeySelector `json:"clientSecretSecretRef" tf:"-"` + + // The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` +} + +type AuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` +} + +type AuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Required + ConsumerSecretSecretRef v1.SecretKeySelector `json:"consumerSecretSecretRef" tf:"-"` +} + +type FunctionAppSlotAuthSettingsInitParameters struct { + + // An active_directory block as defined below. + ActiveDirectory *AuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // login parameters to send to the OpenID Connect authorization endpoint when a user logs in. Each parameter must be in the form "key=value". + // +mapType=granular + AdditionalLoginParams map[string]*string `json:"additionalLoginParams,omitempty" tf:"additional_login_params,omitempty"` + + // External URLs that can be redirected to as part of logging in or logging out of the app. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default provider to use when multiple providers have been set up. Possible values are AzureActiveDirectory, Facebook, Google, MicrosoftAccount and Twitter. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Is Authentication enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *AuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A google block as defined below. + Google *AuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // Issuer URI. When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *AuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The runtime version of the Authentication/Authorization module. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // If enabled the module will durably store platform-specific security tokens that are obtained during login flows. Defaults to false. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *AuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values are AllowAnonymous and RedirectToLoginPage. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type FunctionAppSlotAuthSettingsObservation struct { + + // An active_directory block as defined below. + ActiveDirectory *AuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // login parameters to send to the OpenID Connect authorization endpoint when a user logs in. Each parameter must be in the form "key=value". + // +mapType=granular + AdditionalLoginParams map[string]*string `json:"additionalLoginParams,omitempty" tf:"additional_login_params,omitempty"` + + // External URLs that can be redirected to as part of logging in or logging out of the app. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default provider to use when multiple providers have been set up. Possible values are AzureActiveDirectory, Facebook, Google, MicrosoftAccount and Twitter. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Is Authentication enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *AuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A google block as defined below. + Google *AuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // Issuer URI. When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *AuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The runtime version of the Authentication/Authorization module. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // If enabled the module will durably store platform-specific security tokens that are obtained during login flows. Defaults to false. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *AuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values are AllowAnonymous and RedirectToLoginPage. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type FunctionAppSlotAuthSettingsParameters struct { + + // An active_directory block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectory *AuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // login parameters to send to the OpenID Connect authorization endpoint when a user logs in. Each parameter must be in the form "key=value". + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParams map[string]*string `json:"additionalLoginParams,omitempty" tf:"additional_login_params,omitempty"` + + // External URLs that can be redirected to as part of logging in or logging out of the app. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default provider to use when multiple providers have been set up. Possible values are AzureActiveDirectory, Facebook, Google, MicrosoftAccount and Twitter. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Is Authentication enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A facebook block as defined below. + // +kubebuilder:validation:Optional + Facebook *AuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A google block as defined below. + // +kubebuilder:validation:Optional + Google *AuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // Issuer URI. When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + // +kubebuilder:validation:Optional + Microsoft *AuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The runtime version of the Authentication/Authorization module. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // If enabled the module will durably store platform-specific security tokens that are obtained during login flows. Defaults to false. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + // +kubebuilder:validation:Optional + Twitter *AuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values are AllowAnonymous and RedirectToLoginPage. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type FunctionAppSlotConnectionStringInitParameters struct { + + // The name of the Connection String. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the Connection String. Possible values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FunctionAppSlotConnectionStringObservation struct { + + // The name of the Connection String. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The type of the Connection String. Possible values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FunctionAppSlotConnectionStringParameters struct { + + // The name of the Connection String. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The type of the Connection String. Possible values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The value for the Connection String. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type FunctionAppSlotIdentityInitParameters struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the identity type of the Function App. Possible values are SystemAssigned (where Azure will generate a Service Principal for you), UserAssigned where you can specify the Service Principal IDs in the identity_ids field, and SystemAssigned, UserAssigned which assigns both a system managed identity as well as the specified user assigned identities. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FunctionAppSlotIdentityObservation struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID for the Service Principal associated with the Managed Service Identity of this App Service. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID for the Service Principal associated with the Managed Service Identity of this App Service. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the identity type of the Function App. Possible values are SystemAssigned (where Azure will generate a Service Principal for you), UserAssigned where you can specify the Service Principal IDs in the identity_ids field, and SystemAssigned, UserAssigned which assigns both a system managed identity as well as the specified user assigned identities. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type FunctionAppSlotIdentityParameters struct { + + // Specifies a list of user managed identity ids to be assigned. Required if type is UserAssigned. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the identity type of the Function App. Possible values are SystemAssigned (where Azure will generate a Service Principal for you), UserAssigned where you can specify the Service Principal IDs in the identity_ids field, and SystemAssigned, UserAssigned which assigns both a system managed identity as well as the specified user assigned identities. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type FunctionAppSlotInitParameters struct { + + // The ID of the App Service Plan within which to create this Function App Slot. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.AppServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + AppServicePlanID *string `json:"appServicePlanId,omitempty" tf:"app_service_plan_id,omitempty"` + + // Reference to a AppServicePlan in web to populate appServicePlanId. + // +kubebuilder:validation:Optional + AppServicePlanIDRef *v1.Reference `json:"appServicePlanIdRef,omitempty" tf:"-"` + + // Selector for a AppServicePlan in web to populate appServicePlanId. + // +kubebuilder:validation:Optional + AppServicePlanIDSelector *v1.Selector `json:"appServicePlanIdSelector,omitempty" tf:"-"` + + // A key-value pair of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + AuthSettings *FunctionAppSlotAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // A connection_string block as defined below. + ConnectionString []FunctionAppSlotConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Should the built-in logging of the Function App be enabled? Defaults to true. + EnableBuiltinLogging *bool `json:"enableBuiltinLogging,omitempty" tf:"enable_builtin_logging,omitempty"` + + // Is the Function App enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + Identity *FunctionAppSlotIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A string indicating the Operating System type for this function app. The only possible value is linux. Changing this forces a new resource to be created. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // A site_config object as defined below. + SiteConfig *FunctionAppSlotSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // The backend storage account name which will be used by the Function App (such as the dashboard, logs). Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~1. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type FunctionAppSlotObservation struct { + + // The ID of the App Service Plan within which to create this Function App Slot. Changing this forces a new resource to be created. + AppServicePlanID *string `json:"appServicePlanId,omitempty" tf:"app_service_plan_id,omitempty"` + + // A key-value pair of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + AuthSettings *FunctionAppSlotAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // A connection_string block as defined below. + ConnectionString []FunctionAppSlotConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // The default hostname associated with the Function App - such as mysite.azurewebsites.net + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Should the built-in logging of the Function App be enabled? Defaults to true. + EnableBuiltinLogging *bool `json:"enableBuiltinLogging,omitempty" tf:"enable_builtin_logging,omitempty"` + + // Is the Function App enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the Function App within which to create the Function App Slot. Changing this forces a new resource to be created. + FunctionAppName *string `json:"functionAppName,omitempty" tf:"function_app_name,omitempty"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the Function App Slot + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *FunctionAppSlotIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Function App kind - such as functionapp,linux,container + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A string indicating the Operating System type for this function app. The only possible value is linux. Changing this forces a new resource to be created. + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12 + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which are necessarily in use. Superset of outbound_ip_addresses. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // The name of the resource group in which to create the Function App Slot. Changing this forces a new resource to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // A site_config object as defined below. + SiteConfig *FunctionAppSlotSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A site_credential block as defined below, which contains the site-level credentials used to publish to this Function App Slot. + SiteCredential []FunctionAppSlotSiteCredentialObservation `json:"siteCredential,omitempty" tf:"site_credential,omitempty"` + + // The backend storage account name which will be used by the Function App (such as the dashboard, logs). Changing this forces a new resource to be created. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~1. + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type FunctionAppSlotParameters struct { + + // The ID of the App Service Plan within which to create this Function App Slot. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.AppServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + AppServicePlanID *string `json:"appServicePlanId,omitempty" tf:"app_service_plan_id,omitempty"` + + // Reference to a AppServicePlan in web to populate appServicePlanId. + // +kubebuilder:validation:Optional + AppServicePlanIDRef *v1.Reference `json:"appServicePlanIdRef,omitempty" tf:"-"` + + // Selector for a AppServicePlan in web to populate appServicePlanId. + // +kubebuilder:validation:Optional + AppServicePlanIDSelector *v1.Selector `json:"appServicePlanIdSelector,omitempty" tf:"-"` + + // A key-value pair of App Settings. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + // +kubebuilder:validation:Optional + AuthSettings *FunctionAppSlotAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // A connection_string block as defined below. + // +kubebuilder:validation:Optional + ConnectionString []FunctionAppSlotConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. + // +kubebuilder:validation:Optional + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Should the built-in logging of the Function App be enabled? Defaults to true. + // +kubebuilder:validation:Optional + EnableBuiltinLogging *bool `json:"enableBuiltinLogging,omitempty" tf:"enable_builtin_logging,omitempty"` + + // Is the Function App enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name of the Function App within which to create the Function App Slot. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.FunctionApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + FunctionAppName *string `json:"functionAppName,omitempty" tf:"function_app_name,omitempty"` + + // Reference to a FunctionApp in web to populate functionAppName. + // +kubebuilder:validation:Optional + FunctionAppNameRef *v1.Reference `json:"functionAppNameRef,omitempty" tf:"-"` + + // Selector for a FunctionApp in web to populate functionAppName. + // +kubebuilder:validation:Optional + FunctionAppNameSelector *v1.Selector `json:"functionAppNameSelector,omitempty" tf:"-"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *FunctionAppSlotIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A string indicating the Operating System type for this function app. The only possible value is linux. Changing this forces a new resource to be created. + // +kubebuilder:validation:Optional + OsType *string `json:"osType,omitempty" tf:"os_type,omitempty"` + + // The name of the resource group in which to create the Function App Slot. Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // A site_config object as defined below. + // +kubebuilder:validation:Optional + SiteConfig *FunctionAppSlotSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // The access key which will be used to access the backend storage account for the Function App. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef" tf:"-"` + + // The backend storage account name which will be used by the Function App (such as the dashboard, logs). Changing this forces a new resource to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~1. + // +kubebuilder:validation:Optional + Version *string `json:"version,omitempty" tf:"version,omitempty"` +} + +type FunctionAppSlotSiteConfigInitParameters struct { + + // Should the Function App be loaded at all times? Defaults to false. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // The name of the slot to automatically swap to during deployment + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // A cors block as defined below. + Cors *SiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // The version of the .NET framework's CLR used in this function app. Possible values are v4.0 (including .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information on which .NET Framework version to use based on the runtime version you're targeting - please see this table. Defaults to v4.0. + DotnetFrameworkVersion *string `json:"dotnetFrameworkVersion,omitempty" tf:"dotnet_framework_version,omitempty"` + + // The number of minimum instances for this function app. Only applicable to apps on the Premium plan. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // Path which will be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies whether or not the HTTP2 protocol should be enabled. Defaults to false. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // A list of ip_restriction objects representing IP restrictions as defined below. + IPRestriction []SiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // Java version hosted by the function app in Azure. Possible values are 1.8, 11 & 17 (In-Preview). + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // Linux App Framework and version for the AppService, e.g. DOCKER|(golang:latest). + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The minimum supported TLS version for the function app. Possible values are 1.0, 1.1, and 1.2. Defaults to 1.2 for new function apps. + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on the Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Runtime Scale Monitoring be enabled?. Only applicable to apps on the Premium plan. Defaults to false. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // A list of scm_ip_restriction objects representing IP restrictions as defined below. + ScmIPRestriction []SiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The type of Source Control used by this function App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, None (default), OneDrive, Tfs, VSO, and VSTSRM. + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // IP security restrictions for scm to use main. Defaults to false. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to true. + Use32BitWorkerProcess *bool `json:"use32BitWorkerProcess,omitempty" tf:"use_32_bit_worker_process,omitempty"` + + // Is the Function App enabled? Defaults to true. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should WebSockets be enabled? + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` +} + +type FunctionAppSlotSiteConfigObservation struct { + + // Should the Function App be loaded at all times? Defaults to false. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // The name of the slot to automatically swap to during deployment + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // A cors block as defined below. + Cors *SiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // The version of the .NET framework's CLR used in this function app. Possible values are v4.0 (including .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information on which .NET Framework version to use based on the runtime version you're targeting - please see this table. Defaults to v4.0. + DotnetFrameworkVersion *string `json:"dotnetFrameworkVersion,omitempty" tf:"dotnet_framework_version,omitempty"` + + // The number of minimum instances for this function app. Only applicable to apps on the Premium plan. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // Path which will be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies whether or not the HTTP2 protocol should be enabled. Defaults to false. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // A list of ip_restriction objects representing IP restrictions as defined below. + IPRestriction []SiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // Java version hosted by the function app in Azure. Possible values are 1.8, 11 & 17 (In-Preview). + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // Linux App Framework and version for the AppService, e.g. DOCKER|(golang:latest). + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The minimum supported TLS version for the function app. Possible values are 1.0, 1.1, and 1.2. Defaults to 1.2 for new function apps. + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on the Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Runtime Scale Monitoring be enabled?. Only applicable to apps on the Premium plan. Defaults to false. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // A list of scm_ip_restriction objects representing IP restrictions as defined below. + ScmIPRestriction []SiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The type of Source Control used by this function App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, None (default), OneDrive, Tfs, VSO, and VSTSRM. + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // IP security restrictions for scm to use main. Defaults to false. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to true. + Use32BitWorkerProcess *bool `json:"use32BitWorkerProcess,omitempty" tf:"use_32_bit_worker_process,omitempty"` + + // Is the Function App enabled? Defaults to true. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should WebSockets be enabled? + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` +} + +type FunctionAppSlotSiteConfigParameters struct { + + // Should the Function App be loaded at all times? Defaults to false. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // +kubebuilder:validation:Optional + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // The name of the slot to automatically swap to during deployment + // +kubebuilder:validation:Optional + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // A cors block as defined below. + // +kubebuilder:validation:Optional + Cors *SiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // The version of the .NET framework's CLR used in this function app. Possible values are v4.0 (including .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information on which .NET Framework version to use based on the runtime version you're targeting - please see this table. Defaults to v4.0. + // +kubebuilder:validation:Optional + DotnetFrameworkVersion *string `json:"dotnetFrameworkVersion,omitempty" tf:"dotnet_framework_version,omitempty"` + + // The number of minimum instances for this function app. Only applicable to apps on the Premium plan. + // +kubebuilder:validation:Optional + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // Path which will be checked for this function app health. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies whether or not the HTTP2 protocol should be enabled. Defaults to false. + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // A list of ip_restriction objects representing IP restrictions as defined below. + // +kubebuilder:validation:Optional + IPRestriction []SiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // Java version hosted by the function app in Azure. Possible values are 1.8, 11 & 17 (In-Preview). + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // Linux App Framework and version for the AppService, e.g. DOCKER|(golang:latest). + // +kubebuilder:validation:Optional + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The minimum supported TLS version for the function app. Possible values are 1.0, 1.1, and 1.2. Defaults to 1.2 for new function apps. + // +kubebuilder:validation:Optional + MinTLSVersion *string `json:"minTlsVersion,omitempty" tf:"min_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on the Premium plan. + // +kubebuilder:validation:Optional + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Runtime Scale Monitoring be enabled?. Only applicable to apps on the Premium plan. Defaults to false. + // +kubebuilder:validation:Optional + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // A list of scm_ip_restriction objects representing IP restrictions as defined below. + // +kubebuilder:validation:Optional + ScmIPRestriction []SiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The type of Source Control used by this function App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, None (default), OneDrive, Tfs, VSO, and VSTSRM. + // +kubebuilder:validation:Optional + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // IP security restrictions for scm to use main. Defaults to false. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Function App run in 32 bit mode, rather than 64 bit mode? Defaults to true. + // +kubebuilder:validation:Optional + Use32BitWorkerProcess *bool `json:"use32BitWorkerProcess,omitempty" tf:"use_32_bit_worker_process,omitempty"` + + // Is the Function App enabled? Defaults to true. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should WebSockets be enabled? + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` +} + +type FunctionAppSlotSiteCredentialInitParameters struct { +} + +type FunctionAppSlotSiteCredentialObservation struct { + + // The password associated with the username, which can be used to publish to this App Service. + Password *string `json:"password,omitempty" tf:"password,omitempty"` + + // The username which can be used to publish to this App Service + Username *string `json:"username,omitempty" tf:"username,omitempty"` +} + +type FunctionAppSlotSiteCredentialParameters struct { +} + +type IPRestrictionHeadersInitParameters struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type IPRestrictionHeadersObservation struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type IPRestrictionHeadersParameters struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +kubebuilder:validation:Optional + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +kubebuilder:validation:Optional + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +kubebuilder:validation:Optional + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +kubebuilder:validation:Optional + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type SiteConfigCorsInitParameters struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials supported? + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type SiteConfigCorsObservation struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials supported? + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type SiteConfigCorsParameters struct { + + // A list of origins which should be able to make cross-origin calls. * can be used to allow all calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins" tf:"allowed_origins,omitempty"` + + // Are credentials supported? + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type SiteConfigIPRestrictionInitParameters struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action"` + + // The headers block for this specific scm_ip_restriction as defined below. + Headers []IPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers"` + + // The IP Address used for this IP Restriction in CIDR notation. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address"` + + // The name for this IP Restriction. + Name *string `json:"name,omitempty" tf:"name"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + Priority *float64 `json:"priority,omitempty" tf:"priority"` + + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag"` + + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type SiteConfigIPRestrictionObservation struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The headers block for this specific scm_ip_restriction as defined below. + Headers []IPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The IP Address used for this IP Restriction in CIDR notation. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name for this IP Restriction. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type SiteConfigIPRestrictionParameters struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The headers block for this specific scm_ip_restriction as defined below. + // +kubebuilder:validation:Optional + Headers []IPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers"` + + // The IP Address used for this IP Restriction in CIDR notation. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address"` + + // The name for this IP Restriction. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority"` + + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag"` + + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type SiteConfigScmIPRestrictionHeadersInitParameters struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type SiteConfigScmIPRestrictionHeadersObservation struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type SiteConfigScmIPRestrictionHeadersParameters struct { + + // A list of allowed Azure FrontDoor IDs in UUID notation with a maximum of 8. + // +kubebuilder:validation:Optional + // +listType=set + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // A list to allow the Azure FrontDoor health probe header. Only allowed value is "1". + // +kubebuilder:validation:Optional + // +listType=set + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // A list of allowed 'X-Forwarded-For' IPs in CIDR notation with a maximum of 8 + // +kubebuilder:validation:Optional + // +listType=set + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // A list of allowed 'X-Forwarded-Host' domains with a maximum of 8. + // +kubebuilder:validation:Optional + // +listType=set + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type SiteConfigScmIPRestrictionInitParameters struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action"` + + // The headers block for this specific scm_ip_restriction as defined below. + Headers []SiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers"` + + // The IP Address used for this IP Restriction in CIDR notation. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address"` + + // The name for this IP Restriction. + Name *string `json:"name,omitempty" tf:"name"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + Priority *float64 `json:"priority,omitempty" tf:"priority"` + + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag"` + + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type SiteConfigScmIPRestrictionObservation struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The headers block for this specific scm_ip_restriction as defined below. + Headers []SiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The IP Address used for this IP Restriction in CIDR notation. + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name for this IP Restriction. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type SiteConfigScmIPRestrictionParameters struct { + + // Allow or Deny access for this IP range. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action"` + + // The headers block for this specific scm_ip_restriction as defined below. + // +kubebuilder:validation:Optional + Headers []SiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers"` + + // The IP Address used for this IP Restriction in CIDR notation. + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address"` + + // The name for this IP Restriction. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name"` + + // The priority for this IP Restriction. Restrictions are enforced in priority order. By default, priority is set to 65000 if not specified. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority"` + + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag"` + + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +// FunctionAppSlotSpec defines the desired state of FunctionAppSlot +type FunctionAppSlotSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider FunctionAppSlotParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider FunctionAppSlotInitParameters `json:"initProvider,omitempty"` +} + +// FunctionAppSlotStatus defines the observed state of FunctionAppSlot. +type FunctionAppSlotStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider FunctionAppSlotObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// FunctionAppSlot is the Schema for the FunctionAppSlots API. Manages a Function App Deployment Slot. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type FunctionAppSlot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.storageAccountAccessKeySecretRef)",message="spec.forProvider.storageAccountAccessKeySecretRef is a required parameter" + Spec FunctionAppSlotSpec `json:"spec"` + Status FunctionAppSlotStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionAppSlotList contains a list of FunctionAppSlots +type FunctionAppSlotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FunctionAppSlot `json:"items"` +} + +// Repository type metadata. +var ( + FunctionAppSlot_Kind = "FunctionAppSlot" + FunctionAppSlot_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: FunctionAppSlot_Kind}.String() + FunctionAppSlot_KindAPIVersion = FunctionAppSlot_Kind + "." + CRDGroupVersion.String() + FunctionAppSlot_GroupVersionKind = CRDGroupVersion.WithKind(FunctionAppSlot_Kind) +) + +func init() { + SchemeBuilder.Register(&FunctionAppSlot{}, &FunctionAppSlotList{}) +} diff --git a/apis/web/v1beta2/zz_generated.conversion_hubs.go b/apis/web/v1beta2/zz_generated.conversion_hubs.go new file mode 100755 index 000000000..a196a1508 --- /dev/null +++ b/apis/web/v1beta2/zz_generated.conversion_hubs.go @@ -0,0 +1,43 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +// Hub marks this type as a conversion hub. +func (tr *AppServicePlan) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FunctionApp) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *FunctionAppSlot) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinuxFunctionApp) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinuxFunctionAppSlot) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinuxWebApp) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LinuxWebAppSlot) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *StaticSite) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WindowsFunctionApp) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WindowsFunctionAppSlot) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WindowsWebApp) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *WindowsWebAppSlot) Hub() {} diff --git a/apis/web/v1beta2/zz_generated.deepcopy.go b/apis/web/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..5dc18741a --- /dev/null +++ b/apis/web/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,58545 @@ +//go:build !ignore_autogenerated + +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionCustomActionInitParameters) DeepCopyInto(out *ActionCustomActionInitParameters) { + *out = *in + if in.Executable != nil { + in, out := &in.Executable, &out.Executable + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionCustomActionInitParameters. +func (in *ActionCustomActionInitParameters) DeepCopy() *ActionCustomActionInitParameters { + if in == nil { + return nil + } + out := new(ActionCustomActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionCustomActionObservation) DeepCopyInto(out *ActionCustomActionObservation) { + *out = *in + if in.Executable != nil { + in, out := &in.Executable, &out.Executable + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionCustomActionObservation. +func (in *ActionCustomActionObservation) DeepCopy() *ActionCustomActionObservation { + if in == nil { + return nil + } + out := new(ActionCustomActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionCustomActionParameters) DeepCopyInto(out *ActionCustomActionParameters) { + *out = *in + if in.Executable != nil { + in, out := &in.Executable, &out.Executable + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionCustomActionParameters. +func (in *ActionCustomActionParameters) DeepCopy() *ActionCustomActionParameters { + if in == nil { + return nil + } + out := new(ActionCustomActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionInitParameters) DeepCopyInto(out *ActionInitParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionInitParameters. +func (in *ActionInitParameters) DeepCopy() *ActionInitParameters { + if in == nil { + return nil + } + out := new(ActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionObservation) DeepCopyInto(out *ActionObservation) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionObservation. +func (in *ActionObservation) DeepCopy() *ActionObservation { + if in == nil { + return nil + } + out := new(ActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionParameters) DeepCopyInto(out *ActionParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionParameters. +func (in *ActionParameters) DeepCopy() *ActionParameters { + if in == nil { + return nil + } + out := new(ActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryInitParameters) DeepCopyInto(out *ActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryInitParameters. +func (in *ActiveDirectoryInitParameters) DeepCopy() *ActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryObservation) DeepCopyInto(out *ActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryObservation. +func (in *ActiveDirectoryObservation) DeepCopy() *ActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(ActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryParameters) DeepCopyInto(out *ActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryParameters. +func (in *ActiveDirectoryParameters) DeepCopy() *ActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryV2InitParameters) DeepCopyInto(out *ActiveDirectoryV2InitParameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryV2InitParameters. +func (in *ActiveDirectoryV2InitParameters) DeepCopy() *ActiveDirectoryV2InitParameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryV2Observation) DeepCopyInto(out *ActiveDirectoryV2Observation) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryV2Observation. +func (in *ActiveDirectoryV2Observation) DeepCopy() *ActiveDirectoryV2Observation { + if in == nil { + return nil + } + out := new(ActiveDirectoryV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryV2Parameters) DeepCopyInto(out *ActiveDirectoryV2Parameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryV2Parameters. +func (in *ActiveDirectoryV2Parameters) DeepCopy() *ActiveDirectoryV2Parameters { + if in == nil { + return nil + } + out := new(ActiveDirectoryV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServiceLogsInitParameters) DeepCopyInto(out *AppServiceLogsInitParameters) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServiceLogsInitParameters. +func (in *AppServiceLogsInitParameters) DeepCopy() *AppServiceLogsInitParameters { + if in == nil { + return nil + } + out := new(AppServiceLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServiceLogsObservation) DeepCopyInto(out *AppServiceLogsObservation) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServiceLogsObservation. +func (in *AppServiceLogsObservation) DeepCopy() *AppServiceLogsObservation { + if in == nil { + return nil + } + out := new(AppServiceLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServiceLogsParameters) DeepCopyInto(out *AppServiceLogsParameters) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServiceLogsParameters. +func (in *AppServiceLogsParameters) DeepCopy() *AppServiceLogsParameters { + if in == nil { + return nil + } + out := new(AppServiceLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServicePlan) DeepCopyInto(out *AppServicePlan) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServicePlan. +func (in *AppServicePlan) DeepCopy() *AppServicePlan { + if in == nil { + return nil + } + out := new(AppServicePlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppServicePlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServicePlanInitParameters) DeepCopyInto(out *AppServicePlanInitParameters) { + *out = *in + if in.AppServiceEnvironmentID != nil { + in, out := &in.AppServiceEnvironmentID, &out.AppServiceEnvironmentID + *out = new(string) + **out = **in + } + if in.IsXenon != nil { + in, out := &in.IsXenon, &out.IsXenon + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaximumElasticWorkerCount != nil { + in, out := &in.MaximumElasticWorkerCount, &out.MaximumElasticWorkerCount + *out = new(float64) + **out = **in + } + if in.PerSiteScaling != nil { + in, out := &in.PerSiteScaling, &out.PerSiteScaling + *out = new(bool) + **out = **in + } + if in.Reserved != nil { + in, out := &in.Reserved, &out.Reserved + *out = new(bool) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServicePlanInitParameters. +func (in *AppServicePlanInitParameters) DeepCopy() *AppServicePlanInitParameters { + if in == nil { + return nil + } + out := new(AppServicePlanInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServicePlanList) DeepCopyInto(out *AppServicePlanList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppServicePlan, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServicePlanList. +func (in *AppServicePlanList) DeepCopy() *AppServicePlanList { + if in == nil { + return nil + } + out := new(AppServicePlanList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppServicePlanList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServicePlanObservation) DeepCopyInto(out *AppServicePlanObservation) { + *out = *in + if in.AppServiceEnvironmentID != nil { + in, out := &in.AppServiceEnvironmentID, &out.AppServiceEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.IsXenon != nil { + in, out := &in.IsXenon, &out.IsXenon + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaximumElasticWorkerCount != nil { + in, out := &in.MaximumElasticWorkerCount, &out.MaximumElasticWorkerCount + *out = new(float64) + **out = **in + } + if in.MaximumNumberOfWorkers != nil { + in, out := &in.MaximumNumberOfWorkers, &out.MaximumNumberOfWorkers + *out = new(float64) + **out = **in + } + if in.PerSiteScaling != nil { + in, out := &in.PerSiteScaling, &out.PerSiteScaling + *out = new(bool) + **out = **in + } + if in.Reserved != nil { + in, out := &in.Reserved, &out.Reserved + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuObservation) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServicePlanObservation. +func (in *AppServicePlanObservation) DeepCopy() *AppServicePlanObservation { + if in == nil { + return nil + } + out := new(AppServicePlanObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServicePlanParameters) DeepCopyInto(out *AppServicePlanParameters) { + *out = *in + if in.AppServiceEnvironmentID != nil { + in, out := &in.AppServiceEnvironmentID, &out.AppServiceEnvironmentID + *out = new(string) + **out = **in + } + if in.IsXenon != nil { + in, out := &in.IsXenon, &out.IsXenon + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.MaximumElasticWorkerCount != nil { + in, out := &in.MaximumElasticWorkerCount, &out.MaximumElasticWorkerCount + *out = new(float64) + **out = **in + } + if in.PerSiteScaling != nil { + in, out := &in.PerSiteScaling, &out.PerSiteScaling + *out = new(bool) + **out = **in + } + if in.Reserved != nil { + in, out := &in.Reserved, &out.Reserved + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Sku != nil { + in, out := &in.Sku, &out.Sku + *out = new(SkuParameters) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.ZoneRedundant != nil { + in, out := &in.ZoneRedundant, &out.ZoneRedundant + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServicePlanParameters. +func (in *AppServicePlanParameters) DeepCopy() *AppServicePlanParameters { + if in == nil { + return nil + } + out := new(AppServicePlanParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServicePlanSpec) DeepCopyInto(out *AppServicePlanSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServicePlanSpec. +func (in *AppServicePlanSpec) DeepCopy() *AppServicePlanSpec { + if in == nil { + return nil + } + out := new(AppServicePlanSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppServicePlanStatus) DeepCopyInto(out *AppServicePlanStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppServicePlanStatus. +func (in *AppServicePlanStatus) DeepCopy() *AppServicePlanStatus { + if in == nil { + return nil + } + out := new(AppServicePlanStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppleV2InitParameters) DeepCopyInto(out *AppleV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppleV2InitParameters. +func (in *AppleV2InitParameters) DeepCopy() *AppleV2InitParameters { + if in == nil { + return nil + } + out := new(AppleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppleV2Observation) DeepCopyInto(out *AppleV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppleV2Observation. +func (in *AppleV2Observation) DeepCopy() *AppleV2Observation { + if in == nil { + return nil + } + out := new(AppleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppleV2Parameters) DeepCopyInto(out *AppleV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppleV2Parameters. +func (in *AppleV2Parameters) DeepCopy() *AppleV2Parameters { + if in == nil { + return nil + } + out := new(AppleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLogsAzureBlobStorageInitParameters) DeepCopyInto(out *ApplicationLogsAzureBlobStorageInitParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLogsAzureBlobStorageInitParameters. +func (in *ApplicationLogsAzureBlobStorageInitParameters) DeepCopy() *ApplicationLogsAzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(ApplicationLogsAzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLogsAzureBlobStorageObservation) DeepCopyInto(out *ApplicationLogsAzureBlobStorageObservation) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLogsAzureBlobStorageObservation. +func (in *ApplicationLogsAzureBlobStorageObservation) DeepCopy() *ApplicationLogsAzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(ApplicationLogsAzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLogsAzureBlobStorageParameters) DeepCopyInto(out *ApplicationLogsAzureBlobStorageParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLogsAzureBlobStorageParameters. +func (in *ApplicationLogsAzureBlobStorageParameters) DeepCopy() *ApplicationLogsAzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(ApplicationLogsAzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLogsInitParameters) DeepCopyInto(out *ApplicationLogsInitParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(AzureBlobStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLogsInitParameters. +func (in *ApplicationLogsInitParameters) DeepCopy() *ApplicationLogsInitParameters { + if in == nil { + return nil + } + out := new(ApplicationLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLogsObservation) DeepCopyInto(out *ApplicationLogsObservation) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(AzureBlobStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLogsObservation. +func (in *ApplicationLogsObservation) DeepCopy() *ApplicationLogsObservation { + if in == nil { + return nil + } + out := new(ApplicationLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationLogsParameters) DeepCopyInto(out *ApplicationLogsParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(AzureBlobStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationLogsParameters. +func (in *ApplicationLogsParameters) DeepCopy() *ApplicationLogsParameters { + if in == nil { + return nil + } + out := new(ApplicationLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStackDockerInitParameters) DeepCopyInto(out *ApplicationStackDockerInitParameters) { + *out = *in + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageTag != nil { + in, out := &in.ImageTag, &out.ImageTag + *out = new(string) + **out = **in + } + if in.RegistryURL != nil { + in, out := &in.RegistryURL, &out.RegistryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStackDockerInitParameters. +func (in *ApplicationStackDockerInitParameters) DeepCopy() *ApplicationStackDockerInitParameters { + if in == nil { + return nil + } + out := new(ApplicationStackDockerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStackDockerObservation) DeepCopyInto(out *ApplicationStackDockerObservation) { + *out = *in + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageTag != nil { + in, out := &in.ImageTag, &out.ImageTag + *out = new(string) + **out = **in + } + if in.RegistryURL != nil { + in, out := &in.RegistryURL, &out.RegistryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStackDockerObservation. +func (in *ApplicationStackDockerObservation) DeepCopy() *ApplicationStackDockerObservation { + if in == nil { + return nil + } + out := new(ApplicationStackDockerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStackDockerParameters) DeepCopyInto(out *ApplicationStackDockerParameters) { + *out = *in + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageTag != nil { + in, out := &in.ImageTag, &out.ImageTag + *out = new(string) + **out = **in + } + if in.RegistryPasswordSecretRef != nil { + in, out := &in.RegistryPasswordSecretRef, &out.RegistryPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RegistryURL != nil { + in, out := &in.RegistryURL, &out.RegistryURL + *out = new(string) + **out = **in + } + if in.RegistryUsernameSecretRef != nil { + in, out := &in.RegistryUsernameSecretRef, &out.RegistryUsernameSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStackDockerParameters. +func (in *ApplicationStackDockerParameters) DeepCopy() *ApplicationStackDockerParameters { + if in == nil { + return nil + } + out := new(ApplicationStackDockerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStackInitParameters) DeepCopyInto(out *ApplicationStackInitParameters) { + *out = *in + if in.Docker != nil { + in, out := &in.Docker, &out.Docker + *out = make([]DockerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStackInitParameters. +func (in *ApplicationStackInitParameters) DeepCopy() *ApplicationStackInitParameters { + if in == nil { + return nil + } + out := new(ApplicationStackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStackObservation) DeepCopyInto(out *ApplicationStackObservation) { + *out = *in + if in.Docker != nil { + in, out := &in.Docker, &out.Docker + *out = make([]DockerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStackObservation. +func (in *ApplicationStackObservation) DeepCopy() *ApplicationStackObservation { + if in == nil { + return nil + } + out := new(ApplicationStackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationStackParameters) DeepCopyInto(out *ApplicationStackParameters) { + *out = *in + if in.Docker != nil { + in, out := &in.Docker, &out.Docker + *out = make([]DockerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStackParameters. +func (in *ApplicationStackParameters) DeepCopy() *ApplicationStackParameters { + if in == nil { + return nil + } + out := new(ApplicationStackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *AuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsActiveDirectoryInitParameters. +func (in *AuthSettingsActiveDirectoryInitParameters) DeepCopy() *AuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsActiveDirectoryObservation) DeepCopyInto(out *AuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsActiveDirectoryObservation. +func (in *AuthSettingsActiveDirectoryObservation) DeepCopy() *AuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(AuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsActiveDirectoryParameters) DeepCopyInto(out *AuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsActiveDirectoryParameters. +func (in *AuthSettingsActiveDirectoryParameters) DeepCopy() *AuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(AuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsFacebookInitParameters) DeepCopyInto(out *AuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsFacebookInitParameters. +func (in *AuthSettingsFacebookInitParameters) DeepCopy() *AuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsFacebookObservation) DeepCopyInto(out *AuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsFacebookObservation. +func (in *AuthSettingsFacebookObservation) DeepCopy() *AuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(AuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsFacebookParameters) DeepCopyInto(out *AuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + out.AppSecretSecretRef = in.AppSecretSecretRef + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsFacebookParameters. +func (in *AuthSettingsFacebookParameters) DeepCopy() *AuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(AuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsGithubInitParameters) DeepCopyInto(out *AuthSettingsGithubInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsGithubInitParameters. +func (in *AuthSettingsGithubInitParameters) DeepCopy() *AuthSettingsGithubInitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsGithubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsGithubObservation) DeepCopyInto(out *AuthSettingsGithubObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsGithubObservation. +func (in *AuthSettingsGithubObservation) DeepCopy() *AuthSettingsGithubObservation { + if in == nil { + return nil + } + out := new(AuthSettingsGithubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsGithubParameters) DeepCopyInto(out *AuthSettingsGithubParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsGithubParameters. +func (in *AuthSettingsGithubParameters) DeepCopy() *AuthSettingsGithubParameters { + if in == nil { + return nil + } + out := new(AuthSettingsGithubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsGoogleInitParameters) DeepCopyInto(out *AuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsGoogleInitParameters. +func (in *AuthSettingsGoogleInitParameters) DeepCopy() *AuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsGoogleObservation) DeepCopyInto(out *AuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsGoogleObservation. +func (in *AuthSettingsGoogleObservation) DeepCopy() *AuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(AuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsGoogleParameters) DeepCopyInto(out *AuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsGoogleParameters. +func (in *AuthSettingsGoogleParameters) DeepCopy() *AuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(AuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsInitParameters) DeepCopyInto(out *AuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParams != nil { + in, out := &in.AdditionalLoginParams, &out.AdditionalLoginParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(FacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(MicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(TwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsInitParameters. +func (in *AuthSettingsInitParameters) DeepCopy() *AuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsMicrosoftInitParameters) DeepCopyInto(out *AuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsMicrosoftInitParameters. +func (in *AuthSettingsMicrosoftInitParameters) DeepCopy() *AuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsMicrosoftObservation) DeepCopyInto(out *AuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsMicrosoftObservation. +func (in *AuthSettingsMicrosoftObservation) DeepCopy() *AuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(AuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsMicrosoftParameters) DeepCopyInto(out *AuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsMicrosoftParameters. +func (in *AuthSettingsMicrosoftParameters) DeepCopy() *AuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(AuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsObservation) DeepCopyInto(out *AuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParams != nil { + in, out := &in.AdditionalLoginParams, &out.AdditionalLoginParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(FacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(MicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(TwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsObservation. +func (in *AuthSettingsObservation) DeepCopy() *AuthSettingsObservation { + if in == nil { + return nil + } + out := new(AuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsParameters) DeepCopyInto(out *AuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(ActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParams != nil { + in, out := &in.AdditionalLoginParams, &out.AdditionalLoginParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(FacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(MicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(TwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsParameters. +func (in *AuthSettingsParameters) DeepCopy() *AuthSettingsParameters { + if in == nil { + return nil + } + out := new(AuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsTwitterInitParameters) DeepCopyInto(out *AuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsTwitterInitParameters. +func (in *AuthSettingsTwitterInitParameters) DeepCopy() *AuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsTwitterObservation) DeepCopyInto(out *AuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsTwitterObservation. +func (in *AuthSettingsTwitterObservation) DeepCopy() *AuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(AuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsTwitterParameters) DeepCopyInto(out *AuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + out.ConsumerSecretSecretRef = in.ConsumerSecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsTwitterParameters. +func (in *AuthSettingsTwitterParameters) DeepCopy() *AuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(AuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopyInto(out *AuthSettingsV2ActiveDirectoryV2InitParameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2ActiveDirectoryV2InitParameters. +func (in *AuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopy() *AuthSettingsV2ActiveDirectoryV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2ActiveDirectoryV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2ActiveDirectoryV2Observation) DeepCopyInto(out *AuthSettingsV2ActiveDirectoryV2Observation) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2ActiveDirectoryV2Observation. +func (in *AuthSettingsV2ActiveDirectoryV2Observation) DeepCopy() *AuthSettingsV2ActiveDirectoryV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2ActiveDirectoryV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2ActiveDirectoryV2Parameters) DeepCopyInto(out *AuthSettingsV2ActiveDirectoryV2Parameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2ActiveDirectoryV2Parameters. +func (in *AuthSettingsV2ActiveDirectoryV2Parameters) DeepCopy() *AuthSettingsV2ActiveDirectoryV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2ActiveDirectoryV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2AppleV2InitParameters) DeepCopyInto(out *AuthSettingsV2AppleV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2AppleV2InitParameters. +func (in *AuthSettingsV2AppleV2InitParameters) DeepCopy() *AuthSettingsV2AppleV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2AppleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2AppleV2Observation) DeepCopyInto(out *AuthSettingsV2AppleV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2AppleV2Observation. +func (in *AuthSettingsV2AppleV2Observation) DeepCopy() *AuthSettingsV2AppleV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2AppleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2AppleV2Parameters) DeepCopyInto(out *AuthSettingsV2AppleV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2AppleV2Parameters. +func (in *AuthSettingsV2AppleV2Parameters) DeepCopy() *AuthSettingsV2AppleV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2AppleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopyInto(out *AuthSettingsV2AzureStaticWebAppV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2AzureStaticWebAppV2InitParameters. +func (in *AuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopy() *AuthSettingsV2AzureStaticWebAppV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2AzureStaticWebAppV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2AzureStaticWebAppV2Observation) DeepCopyInto(out *AuthSettingsV2AzureStaticWebAppV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2AzureStaticWebAppV2Observation. +func (in *AuthSettingsV2AzureStaticWebAppV2Observation) DeepCopy() *AuthSettingsV2AzureStaticWebAppV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2AzureStaticWebAppV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopyInto(out *AuthSettingsV2AzureStaticWebAppV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2AzureStaticWebAppV2Parameters. +func (in *AuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopy() *AuthSettingsV2AzureStaticWebAppV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2AzureStaticWebAppV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2CustomOidcV2InitParameters) DeepCopyInto(out *AuthSettingsV2CustomOidcV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2CustomOidcV2InitParameters. +func (in *AuthSettingsV2CustomOidcV2InitParameters) DeepCopy() *AuthSettingsV2CustomOidcV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2CustomOidcV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2CustomOidcV2Observation) DeepCopyInto(out *AuthSettingsV2CustomOidcV2Observation) { + *out = *in + if in.AuthorisationEndpoint != nil { + in, out := &in.AuthorisationEndpoint, &out.AuthorisationEndpoint + *out = new(string) + **out = **in + } + if in.CertificationURI != nil { + in, out := &in.CertificationURI, &out.CertificationURI + *out = new(string) + **out = **in + } + if in.ClientCredentialMethod != nil { + in, out := &in.ClientCredentialMethod, &out.ClientCredentialMethod + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.IssuerEndpoint != nil { + in, out := &in.IssuerEndpoint, &out.IssuerEndpoint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2CustomOidcV2Observation. +func (in *AuthSettingsV2CustomOidcV2Observation) DeepCopy() *AuthSettingsV2CustomOidcV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2CustomOidcV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2CustomOidcV2Parameters) DeepCopyInto(out *AuthSettingsV2CustomOidcV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2CustomOidcV2Parameters. +func (in *AuthSettingsV2CustomOidcV2Parameters) DeepCopy() *AuthSettingsV2CustomOidcV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2CustomOidcV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2FacebookV2InitParameters) DeepCopyInto(out *AuthSettingsV2FacebookV2InitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2FacebookV2InitParameters. +func (in *AuthSettingsV2FacebookV2InitParameters) DeepCopy() *AuthSettingsV2FacebookV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2FacebookV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2FacebookV2Observation) DeepCopyInto(out *AuthSettingsV2FacebookV2Observation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2FacebookV2Observation. +func (in *AuthSettingsV2FacebookV2Observation) DeepCopy() *AuthSettingsV2FacebookV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2FacebookV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2FacebookV2Parameters) DeepCopyInto(out *AuthSettingsV2FacebookV2Parameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2FacebookV2Parameters. +func (in *AuthSettingsV2FacebookV2Parameters) DeepCopy() *AuthSettingsV2FacebookV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2FacebookV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2GithubV2InitParameters) DeepCopyInto(out *AuthSettingsV2GithubV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2GithubV2InitParameters. +func (in *AuthSettingsV2GithubV2InitParameters) DeepCopy() *AuthSettingsV2GithubV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2GithubV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2GithubV2Observation) DeepCopyInto(out *AuthSettingsV2GithubV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2GithubV2Observation. +func (in *AuthSettingsV2GithubV2Observation) DeepCopy() *AuthSettingsV2GithubV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2GithubV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2GithubV2Parameters) DeepCopyInto(out *AuthSettingsV2GithubV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2GithubV2Parameters. +func (in *AuthSettingsV2GithubV2Parameters) DeepCopy() *AuthSettingsV2GithubV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2GithubV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2GoogleV2InitParameters) DeepCopyInto(out *AuthSettingsV2GoogleV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2GoogleV2InitParameters. +func (in *AuthSettingsV2GoogleV2InitParameters) DeepCopy() *AuthSettingsV2GoogleV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2GoogleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2GoogleV2Observation) DeepCopyInto(out *AuthSettingsV2GoogleV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2GoogleV2Observation. +func (in *AuthSettingsV2GoogleV2Observation) DeepCopy() *AuthSettingsV2GoogleV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2GoogleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2GoogleV2Parameters) DeepCopyInto(out *AuthSettingsV2GoogleV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2GoogleV2Parameters. +func (in *AuthSettingsV2GoogleV2Parameters) DeepCopy() *AuthSettingsV2GoogleV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2GoogleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2InitParameters) DeepCopyInto(out *AuthSettingsV2InitParameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(ActiveDirectoryV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(AppleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(AzureStaticWebAppV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]CustomOidcV2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(FacebookV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(GithubV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(GoogleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LoginInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(MicrosoftV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(TwitterV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2InitParameters. +func (in *AuthSettingsV2InitParameters) DeepCopy() *AuthSettingsV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2LoginInitParameters) DeepCopyInto(out *AuthSettingsV2LoginInitParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2LoginInitParameters. +func (in *AuthSettingsV2LoginInitParameters) DeepCopy() *AuthSettingsV2LoginInitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2LoginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2LoginObservation) DeepCopyInto(out *AuthSettingsV2LoginObservation) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2LoginObservation. +func (in *AuthSettingsV2LoginObservation) DeepCopy() *AuthSettingsV2LoginObservation { + if in == nil { + return nil + } + out := new(AuthSettingsV2LoginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2LoginParameters) DeepCopyInto(out *AuthSettingsV2LoginParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2LoginParameters. +func (in *AuthSettingsV2LoginParameters) DeepCopy() *AuthSettingsV2LoginParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2LoginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2MicrosoftV2InitParameters) DeepCopyInto(out *AuthSettingsV2MicrosoftV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2MicrosoftV2InitParameters. +func (in *AuthSettingsV2MicrosoftV2InitParameters) DeepCopy() *AuthSettingsV2MicrosoftV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2MicrosoftV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2MicrosoftV2Observation) DeepCopyInto(out *AuthSettingsV2MicrosoftV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2MicrosoftV2Observation. +func (in *AuthSettingsV2MicrosoftV2Observation) DeepCopy() *AuthSettingsV2MicrosoftV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2MicrosoftV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2MicrosoftV2Parameters) DeepCopyInto(out *AuthSettingsV2MicrosoftV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2MicrosoftV2Parameters. +func (in *AuthSettingsV2MicrosoftV2Parameters) DeepCopy() *AuthSettingsV2MicrosoftV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2MicrosoftV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2Observation) DeepCopyInto(out *AuthSettingsV2Observation) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(ActiveDirectoryV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(AppleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(AzureStaticWebAppV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]CustomOidcV2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(FacebookV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(GithubV2Observation) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(GoogleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LoginObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(MicrosoftV2Observation) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(TwitterV2Observation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2Observation. +func (in *AuthSettingsV2Observation) DeepCopy() *AuthSettingsV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2Parameters) DeepCopyInto(out *AuthSettingsV2Parameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(ActiveDirectoryV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(AppleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(AzureStaticWebAppV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]CustomOidcV2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(FacebookV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(GithubV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(GoogleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LoginParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(MicrosoftV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(TwitterV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2Parameters. +func (in *AuthSettingsV2Parameters) DeepCopy() *AuthSettingsV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2TwitterV2InitParameters) DeepCopyInto(out *AuthSettingsV2TwitterV2InitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2TwitterV2InitParameters. +func (in *AuthSettingsV2TwitterV2InitParameters) DeepCopy() *AuthSettingsV2TwitterV2InitParameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2TwitterV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2TwitterV2Observation) DeepCopyInto(out *AuthSettingsV2TwitterV2Observation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2TwitterV2Observation. +func (in *AuthSettingsV2TwitterV2Observation) DeepCopy() *AuthSettingsV2TwitterV2Observation { + if in == nil { + return nil + } + out := new(AuthSettingsV2TwitterV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSettingsV2TwitterV2Parameters) DeepCopyInto(out *AuthSettingsV2TwitterV2Parameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSettingsV2TwitterV2Parameters. +func (in *AuthSettingsV2TwitterV2Parameters) DeepCopy() *AuthSettingsV2TwitterV2Parameters { + if in == nil { + return nil + } + out := new(AuthSettingsV2TwitterV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingActionInitParameters) DeepCopyInto(out *AutoHealSettingActionInitParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingActionInitParameters. +func (in *AutoHealSettingActionInitParameters) DeepCopy() *AutoHealSettingActionInitParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingActionObservation) DeepCopyInto(out *AutoHealSettingActionObservation) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingActionObservation. +func (in *AutoHealSettingActionObservation) DeepCopy() *AutoHealSettingActionObservation { + if in == nil { + return nil + } + out := new(AutoHealSettingActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingActionParameters) DeepCopyInto(out *AutoHealSettingActionParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingActionParameters. +func (in *AutoHealSettingActionParameters) DeepCopy() *AutoHealSettingActionParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingInitParameters) DeepCopyInto(out *AutoHealSettingInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingInitParameters. +func (in *AutoHealSettingInitParameters) DeepCopy() *AutoHealSettingInitParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingObservation) DeepCopyInto(out *AutoHealSettingObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingObservation. +func (in *AutoHealSettingObservation) DeepCopy() *AutoHealSettingObservation { + if in == nil { + return nil + } + out := new(AutoHealSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingParameters) DeepCopyInto(out *AutoHealSettingParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(ActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(TriggerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingParameters. +func (in *AutoHealSettingParameters) DeepCopy() *AutoHealSettingParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerInitParameters) DeepCopyInto(out *AutoHealSettingTriggerInitParameters) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(TriggerRequestsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(TriggerSlowRequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]TriggerStatusCodeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerInitParameters. +func (in *AutoHealSettingTriggerInitParameters) DeepCopy() *AutoHealSettingTriggerInitParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerObservation) DeepCopyInto(out *AutoHealSettingTriggerObservation) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(TriggerRequestsObservation) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(TriggerSlowRequestObservation) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]TriggerStatusCodeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerObservation. +func (in *AutoHealSettingTriggerObservation) DeepCopy() *AutoHealSettingTriggerObservation { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerParameters) DeepCopyInto(out *AutoHealSettingTriggerParameters) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(TriggerRequestsParameters) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(TriggerSlowRequestParameters) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]TriggerStatusCodeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerParameters. +func (in *AutoHealSettingTriggerParameters) DeepCopy() *AutoHealSettingTriggerParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerRequestsInitParameters) DeepCopyInto(out *AutoHealSettingTriggerRequestsInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerRequestsInitParameters. +func (in *AutoHealSettingTriggerRequestsInitParameters) DeepCopy() *AutoHealSettingTriggerRequestsInitParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerRequestsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerRequestsObservation) DeepCopyInto(out *AutoHealSettingTriggerRequestsObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerRequestsObservation. +func (in *AutoHealSettingTriggerRequestsObservation) DeepCopy() *AutoHealSettingTriggerRequestsObservation { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerRequestsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerRequestsParameters) DeepCopyInto(out *AutoHealSettingTriggerRequestsParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerRequestsParameters. +func (in *AutoHealSettingTriggerRequestsParameters) DeepCopy() *AutoHealSettingTriggerRequestsParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerRequestsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerSlowRequestInitParameters) DeepCopyInto(out *AutoHealSettingTriggerSlowRequestInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerSlowRequestInitParameters. +func (in *AutoHealSettingTriggerSlowRequestInitParameters) DeepCopy() *AutoHealSettingTriggerSlowRequestInitParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerSlowRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerSlowRequestObservation) DeepCopyInto(out *AutoHealSettingTriggerSlowRequestObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerSlowRequestObservation. +func (in *AutoHealSettingTriggerSlowRequestObservation) DeepCopy() *AutoHealSettingTriggerSlowRequestObservation { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerSlowRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerSlowRequestParameters) DeepCopyInto(out *AutoHealSettingTriggerSlowRequestParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerSlowRequestParameters. +func (in *AutoHealSettingTriggerSlowRequestParameters) DeepCopy() *AutoHealSettingTriggerSlowRequestParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerSlowRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerStatusCodeInitParameters) DeepCopyInto(out *AutoHealSettingTriggerStatusCodeInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerStatusCodeInitParameters. +func (in *AutoHealSettingTriggerStatusCodeInitParameters) DeepCopy() *AutoHealSettingTriggerStatusCodeInitParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerStatusCodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerStatusCodeObservation) DeepCopyInto(out *AutoHealSettingTriggerStatusCodeObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerStatusCodeObservation. +func (in *AutoHealSettingTriggerStatusCodeObservation) DeepCopy() *AutoHealSettingTriggerStatusCodeObservation { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerStatusCodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHealSettingTriggerStatusCodeParameters) DeepCopyInto(out *AutoHealSettingTriggerStatusCodeParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHealSettingTriggerStatusCodeParameters. +func (in *AutoHealSettingTriggerStatusCodeParameters) DeepCopy() *AutoHealSettingTriggerStatusCodeParameters { + if in == nil { + return nil + } + out := new(AutoHealSettingTriggerStatusCodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobStorageInitParameters) DeepCopyInto(out *AzureBlobStorageInitParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobStorageInitParameters. +func (in *AzureBlobStorageInitParameters) DeepCopy() *AzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(AzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobStorageObservation) DeepCopyInto(out *AzureBlobStorageObservation) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobStorageObservation. +func (in *AzureBlobStorageObservation) DeepCopy() *AzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(AzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBlobStorageParameters) DeepCopyInto(out *AzureBlobStorageParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobStorageParameters. +func (in *AzureBlobStorageParameters) DeepCopy() *AzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(AzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureStaticWebAppV2InitParameters) DeepCopyInto(out *AzureStaticWebAppV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStaticWebAppV2InitParameters. +func (in *AzureStaticWebAppV2InitParameters) DeepCopy() *AzureStaticWebAppV2InitParameters { + if in == nil { + return nil + } + out := new(AzureStaticWebAppV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureStaticWebAppV2Observation) DeepCopyInto(out *AzureStaticWebAppV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStaticWebAppV2Observation. +func (in *AzureStaticWebAppV2Observation) DeepCopy() *AzureStaticWebAppV2Observation { + if in == nil { + return nil + } + out := new(AzureStaticWebAppV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureStaticWebAppV2Parameters) DeepCopyInto(out *AzureStaticWebAppV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStaticWebAppV2Parameters. +func (in *AzureStaticWebAppV2Parameters) DeepCopy() *AzureStaticWebAppV2Parameters { + if in == nil { + return nil + } + out := new(AzureStaticWebAppV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupInitParameters) DeepCopyInto(out *BackupInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupInitParameters. +func (in *BackupInitParameters) DeepCopy() *BackupInitParameters { + if in == nil { + return nil + } + out := new(BackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupObservation) DeepCopyInto(out *BackupObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupObservation. +func (in *BackupObservation) DeepCopy() *BackupObservation { + if in == nil { + return nil + } + out := new(BackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupParameters) DeepCopyInto(out *BackupParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(ScheduleParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountURLSecretRef = in.StorageAccountURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupParameters. +func (in *BackupParameters) DeepCopy() *BackupParameters { + if in == nil { + return nil + } + out := new(BackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleInitParameters) DeepCopyInto(out *BackupScheduleInitParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleInitParameters. +func (in *BackupScheduleInitParameters) DeepCopy() *BackupScheduleInitParameters { + if in == nil { + return nil + } + out := new(BackupScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleObservation) DeepCopyInto(out *BackupScheduleObservation) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.LastExecutionTime != nil { + in, out := &in.LastExecutionTime, &out.LastExecutionTime + *out = new(string) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleObservation. +func (in *BackupScheduleObservation) DeepCopy() *BackupScheduleObservation { + if in == nil { + return nil + } + out := new(BackupScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupScheduleParameters) DeepCopyInto(out *BackupScheduleParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupScheduleParameters. +func (in *BackupScheduleParameters) DeepCopy() *BackupScheduleParameters { + if in == nil { + return nil + } + out := new(BackupScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringInitParameters) DeepCopyInto(out *ConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringInitParameters. +func (in *ConnectionStringInitParameters) DeepCopy() *ConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(ConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringObservation) DeepCopyInto(out *ConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringObservation. +func (in *ConnectionStringObservation) DeepCopy() *ConnectionStringObservation { + if in == nil { + return nil + } + out := new(ConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringParameters) DeepCopyInto(out *ConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringParameters. +func (in *ConnectionStringParameters) DeepCopy() *ConnectionStringParameters { + if in == nil { + return nil + } + out := new(ConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsInitParameters) DeepCopyInto(out *CorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsInitParameters. +func (in *CorsInitParameters) DeepCopy() *CorsInitParameters { + if in == nil { + return nil + } + out := new(CorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsObservation) DeepCopyInto(out *CorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsObservation. +func (in *CorsObservation) DeepCopy() *CorsObservation { + if in == nil { + return nil + } + out := new(CorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsParameters) DeepCopyInto(out *CorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsParameters. +func (in *CorsParameters) DeepCopy() *CorsParameters { + if in == nil { + return nil + } + out := new(CorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionInitParameters) DeepCopyInto(out *CustomActionInitParameters) { + *out = *in + if in.Executable != nil { + in, out := &in.Executable, &out.Executable + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionInitParameters. +func (in *CustomActionInitParameters) DeepCopy() *CustomActionInitParameters { + if in == nil { + return nil + } + out := new(CustomActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionObservation) DeepCopyInto(out *CustomActionObservation) { + *out = *in + if in.Executable != nil { + in, out := &in.Executable, &out.Executable + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionObservation. +func (in *CustomActionObservation) DeepCopy() *CustomActionObservation { + if in == nil { + return nil + } + out := new(CustomActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomActionParameters) DeepCopyInto(out *CustomActionParameters) { + *out = *in + if in.Executable != nil { + in, out := &in.Executable, &out.Executable + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomActionParameters. +func (in *CustomActionParameters) DeepCopy() *CustomActionParameters { + if in == nil { + return nil + } + out := new(CustomActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomOidcV2InitParameters) DeepCopyInto(out *CustomOidcV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOidcV2InitParameters. +func (in *CustomOidcV2InitParameters) DeepCopy() *CustomOidcV2InitParameters { + if in == nil { + return nil + } + out := new(CustomOidcV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomOidcV2Observation) DeepCopyInto(out *CustomOidcV2Observation) { + *out = *in + if in.AuthorisationEndpoint != nil { + in, out := &in.AuthorisationEndpoint, &out.AuthorisationEndpoint + *out = new(string) + **out = **in + } + if in.CertificationURI != nil { + in, out := &in.CertificationURI, &out.CertificationURI + *out = new(string) + **out = **in + } + if in.ClientCredentialMethod != nil { + in, out := &in.ClientCredentialMethod, &out.ClientCredentialMethod + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.IssuerEndpoint != nil { + in, out := &in.IssuerEndpoint, &out.IssuerEndpoint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOidcV2Observation. +func (in *CustomOidcV2Observation) DeepCopy() *CustomOidcV2Observation { + if in == nil { + return nil + } + out := new(CustomOidcV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomOidcV2Parameters) DeepCopyInto(out *CustomOidcV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOidcV2Parameters. +func (in *CustomOidcV2Parameters) DeepCopy() *CustomOidcV2Parameters { + if in == nil { + return nil + } + out := new(CustomOidcV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerInitParameters) DeepCopyInto(out *DockerInitParameters) { + *out = *in + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageTag != nil { + in, out := &in.ImageTag, &out.ImageTag + *out = new(string) + **out = **in + } + if in.RegistryURL != nil { + in, out := &in.RegistryURL, &out.RegistryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerInitParameters. +func (in *DockerInitParameters) DeepCopy() *DockerInitParameters { + if in == nil { + return nil + } + out := new(DockerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerObservation) DeepCopyInto(out *DockerObservation) { + *out = *in + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageTag != nil { + in, out := &in.ImageTag, &out.ImageTag + *out = new(string) + **out = **in + } + if in.RegistryURL != nil { + in, out := &in.RegistryURL, &out.RegistryURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerObservation. +func (in *DockerObservation) DeepCopy() *DockerObservation { + if in == nil { + return nil + } + out := new(DockerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerParameters) DeepCopyInto(out *DockerParameters) { + *out = *in + if in.ImageName != nil { + in, out := &in.ImageName, &out.ImageName + *out = new(string) + **out = **in + } + if in.ImageTag != nil { + in, out := &in.ImageTag, &out.ImageTag + *out = new(string) + **out = **in + } + if in.RegistryPasswordSecretRef != nil { + in, out := &in.RegistryPasswordSecretRef, &out.RegistryPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.RegistryURL != nil { + in, out := &in.RegistryURL, &out.RegistryURL + *out = new(string) + **out = **in + } + if in.RegistryUsernameSecretRef != nil { + in, out := &in.RegistryUsernameSecretRef, &out.RegistryUsernameSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerParameters. +func (in *DockerParameters) DeepCopy() *DockerParameters { + if in == nil { + return nil + } + out := new(DockerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FacebookInitParameters) DeepCopyInto(out *FacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FacebookInitParameters. +func (in *FacebookInitParameters) DeepCopy() *FacebookInitParameters { + if in == nil { + return nil + } + out := new(FacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FacebookObservation) DeepCopyInto(out *FacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FacebookObservation. +func (in *FacebookObservation) DeepCopy() *FacebookObservation { + if in == nil { + return nil + } + out := new(FacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FacebookParameters) DeepCopyInto(out *FacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + out.AppSecretSecretRef = in.AppSecretSecretRef + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FacebookParameters. +func (in *FacebookParameters) DeepCopy() *FacebookParameters { + if in == nil { + return nil + } + out := new(FacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FacebookV2InitParameters) DeepCopyInto(out *FacebookV2InitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FacebookV2InitParameters. +func (in *FacebookV2InitParameters) DeepCopy() *FacebookV2InitParameters { + if in == nil { + return nil + } + out := new(FacebookV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FacebookV2Observation) DeepCopyInto(out *FacebookV2Observation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FacebookV2Observation. +func (in *FacebookV2Observation) DeepCopy() *FacebookV2Observation { + if in == nil { + return nil + } + out := new(FacebookV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FacebookV2Parameters) DeepCopyInto(out *FacebookV2Parameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FacebookV2Parameters. +func (in *FacebookV2Parameters) DeepCopy() *FacebookV2Parameters { + if in == nil { + return nil + } + out := new(FacebookV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemInitParameters) DeepCopyInto(out *FileSystemInitParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemInitParameters. +func (in *FileSystemInitParameters) DeepCopy() *FileSystemInitParameters { + if in == nil { + return nil + } + out := new(FileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemObservation) DeepCopyInto(out *FileSystemObservation) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemObservation. +func (in *FileSystemObservation) DeepCopy() *FileSystemObservation { + if in == nil { + return nil + } + out := new(FileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileSystemParameters) DeepCopyInto(out *FileSystemParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileSystemParameters. +func (in *FileSystemParameters) DeepCopy() *FileSystemParameters { + if in == nil { + return nil + } + out := new(FileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionApp) DeepCopyInto(out *FunctionApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionApp. +func (in *FunctionApp) DeepCopy() *FunctionApp { + if in == nil { + return nil + } + out := new(FunctionApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppInitParameters) DeepCopyInto(out *FunctionAppInitParameters) { + *out = *in + if in.AppServicePlanID != nil { + in, out := &in.AppServicePlanID, &out.AppServicePlanID + *out = new(string) + **out = **in + } + if in.AppServicePlanIDRef != nil { + in, out := &in.AppServicePlanIDRef, &out.AppServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppServicePlanIDSelector != nil { + in, out := &in.AppServicePlanIDSelector, &out.AppServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(AuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientCertMode != nil { + in, out := &in.ClientCertMode, &out.ClientCertMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]ConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.EnableBuiltinLogging != nil { + in, out := &in.EnableBuiltinLogging, &out.EnableBuiltinLogging + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(SiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceControl != nil { + in, out := &in.SourceControl, &out.SourceControl + *out = new(SourceControlInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppInitParameters. +func (in *FunctionAppInitParameters) DeepCopy() *FunctionAppInitParameters { + if in == nil { + return nil + } + out := new(FunctionAppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppList) DeepCopyInto(out *FunctionAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FunctionApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppList. +func (in *FunctionAppList) DeepCopy() *FunctionAppList { + if in == nil { + return nil + } + out := new(FunctionAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppObservation) DeepCopyInto(out *FunctionAppObservation) { + *out = *in + if in.AppServicePlanID != nil { + in, out := &in.AppServicePlanID, &out.AppServicePlanID + *out = new(string) + **out = **in + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(AuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientCertMode != nil { + in, out := &in.ClientCertMode, &out.ClientCertMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]ConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CustomDomainVerificationID != nil { + in, out := &in.CustomDomainVerificationID, &out.CustomDomainVerificationID + *out = new(string) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.EnableBuiltinLogging != nil { + in, out := &in.EnableBuiltinLogging, &out.EnableBuiltinLogging + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(SiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SiteCredential != nil { + in, out := &in.SiteCredential, &out.SiteCredential + *out = make([]SiteCredentialObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceControl != nil { + in, out := &in.SourceControl, &out.SourceControl + *out = new(SourceControlObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppObservation. +func (in *FunctionAppObservation) DeepCopy() *FunctionAppObservation { + if in == nil { + return nil + } + out := new(FunctionAppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppParameters) DeepCopyInto(out *FunctionAppParameters) { + *out = *in + if in.AppServicePlanID != nil { + in, out := &in.AppServicePlanID, &out.AppServicePlanID + *out = new(string) + **out = **in + } + if in.AppServicePlanIDRef != nil { + in, out := &in.AppServicePlanIDRef, &out.AppServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppServicePlanIDSelector != nil { + in, out := &in.AppServicePlanIDSelector, &out.AppServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(AuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientCertMode != nil { + in, out := &in.ClientCertMode, &out.ClientCertMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]ConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.EnableBuiltinLogging != nil { + in, out := &in.EnableBuiltinLogging, &out.EnableBuiltinLogging + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(SiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.SourceControl != nil { + in, out := &in.SourceControl, &out.SourceControl + *out = new(SourceControlParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountAccessKeySecretRef = in.StorageAccountAccessKeySecretRef + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppParameters. +func (in *FunctionAppParameters) DeepCopy() *FunctionAppParameters { + if in == nil { + return nil + } + out := new(FunctionAppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlot) DeepCopyInto(out *FunctionAppSlot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlot. +func (in *FunctionAppSlot) DeepCopy() *FunctionAppSlot { + if in == nil { + return nil + } + out := new(FunctionAppSlot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionAppSlot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotAuthSettingsInitParameters) DeepCopyInto(out *FunctionAppSlotAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(AuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParams != nil { + in, out := &in.AdditionalLoginParams, &out.AdditionalLoginParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(AuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(AuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(AuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(AuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotAuthSettingsInitParameters. +func (in *FunctionAppSlotAuthSettingsInitParameters) DeepCopy() *FunctionAppSlotAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotAuthSettingsObservation) DeepCopyInto(out *FunctionAppSlotAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(AuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParams != nil { + in, out := &in.AdditionalLoginParams, &out.AdditionalLoginParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(AuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(AuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(AuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(AuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotAuthSettingsObservation. +func (in *FunctionAppSlotAuthSettingsObservation) DeepCopy() *FunctionAppSlotAuthSettingsObservation { + if in == nil { + return nil + } + out := new(FunctionAppSlotAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotAuthSettingsParameters) DeepCopyInto(out *FunctionAppSlotAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(AuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParams != nil { + in, out := &in.AdditionalLoginParams, &out.AdditionalLoginParams + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(AuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(AuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(AuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(AuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotAuthSettingsParameters. +func (in *FunctionAppSlotAuthSettingsParameters) DeepCopy() *FunctionAppSlotAuthSettingsParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotConnectionStringInitParameters) DeepCopyInto(out *FunctionAppSlotConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotConnectionStringInitParameters. +func (in *FunctionAppSlotConnectionStringInitParameters) DeepCopy() *FunctionAppSlotConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotConnectionStringObservation) DeepCopyInto(out *FunctionAppSlotConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotConnectionStringObservation. +func (in *FunctionAppSlotConnectionStringObservation) DeepCopy() *FunctionAppSlotConnectionStringObservation { + if in == nil { + return nil + } + out := new(FunctionAppSlotConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotConnectionStringParameters) DeepCopyInto(out *FunctionAppSlotConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotConnectionStringParameters. +func (in *FunctionAppSlotConnectionStringParameters) DeepCopy() *FunctionAppSlotConnectionStringParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotIdentityInitParameters) DeepCopyInto(out *FunctionAppSlotIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotIdentityInitParameters. +func (in *FunctionAppSlotIdentityInitParameters) DeepCopy() *FunctionAppSlotIdentityInitParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotIdentityObservation) DeepCopyInto(out *FunctionAppSlotIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotIdentityObservation. +func (in *FunctionAppSlotIdentityObservation) DeepCopy() *FunctionAppSlotIdentityObservation { + if in == nil { + return nil + } + out := new(FunctionAppSlotIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotIdentityParameters) DeepCopyInto(out *FunctionAppSlotIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotIdentityParameters. +func (in *FunctionAppSlotIdentityParameters) DeepCopy() *FunctionAppSlotIdentityParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotInitParameters) DeepCopyInto(out *FunctionAppSlotInitParameters) { + *out = *in + if in.AppServicePlanID != nil { + in, out := &in.AppServicePlanID, &out.AppServicePlanID + *out = new(string) + **out = **in + } + if in.AppServicePlanIDRef != nil { + in, out := &in.AppServicePlanIDRef, &out.AppServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppServicePlanIDSelector != nil { + in, out := &in.AppServicePlanIDSelector, &out.AppServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(FunctionAppSlotAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]FunctionAppSlotConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.EnableBuiltinLogging != nil { + in, out := &in.EnableBuiltinLogging, &out.EnableBuiltinLogging + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(FunctionAppSlotIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(FunctionAppSlotSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotInitParameters. +func (in *FunctionAppSlotInitParameters) DeepCopy() *FunctionAppSlotInitParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotList) DeepCopyInto(out *FunctionAppSlotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FunctionAppSlot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotList. +func (in *FunctionAppSlotList) DeepCopy() *FunctionAppSlotList { + if in == nil { + return nil + } + out := new(FunctionAppSlotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionAppSlotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotObservation) DeepCopyInto(out *FunctionAppSlotObservation) { + *out = *in + if in.AppServicePlanID != nil { + in, out := &in.AppServicePlanID, &out.AppServicePlanID + *out = new(string) + **out = **in + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(FunctionAppSlotAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]FunctionAppSlotConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.EnableBuiltinLogging != nil { + in, out := &in.EnableBuiltinLogging, &out.EnableBuiltinLogging + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FunctionAppName != nil { + in, out := &in.FunctionAppName, &out.FunctionAppName + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(FunctionAppSlotIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(FunctionAppSlotSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.SiteCredential != nil { + in, out := &in.SiteCredential, &out.SiteCredential + *out = make([]FunctionAppSlotSiteCredentialObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotObservation. +func (in *FunctionAppSlotObservation) DeepCopy() *FunctionAppSlotObservation { + if in == nil { + return nil + } + out := new(FunctionAppSlotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotParameters) DeepCopyInto(out *FunctionAppSlotParameters) { + *out = *in + if in.AppServicePlanID != nil { + in, out := &in.AppServicePlanID, &out.AppServicePlanID + *out = new(string) + **out = **in + } + if in.AppServicePlanIDRef != nil { + in, out := &in.AppServicePlanIDRef, &out.AppServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppServicePlanIDSelector != nil { + in, out := &in.AppServicePlanIDSelector, &out.AppServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(FunctionAppSlotAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]FunctionAppSlotConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.EnableBuiltinLogging != nil { + in, out := &in.EnableBuiltinLogging, &out.EnableBuiltinLogging + *out = new(bool) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FunctionAppName != nil { + in, out := &in.FunctionAppName, &out.FunctionAppName + *out = new(string) + **out = **in + } + if in.FunctionAppNameRef != nil { + in, out := &in.FunctionAppNameRef, &out.FunctionAppNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionAppNameSelector != nil { + in, out := &in.FunctionAppNameSelector, &out.FunctionAppNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(FunctionAppSlotIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OsType != nil { + in, out := &in.OsType, &out.OsType + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(FunctionAppSlotSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountAccessKeySecretRef = in.StorageAccountAccessKeySecretRef + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotParameters. +func (in *FunctionAppSlotParameters) DeepCopy() *FunctionAppSlotParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotSiteConfigInitParameters) DeepCopyInto(out *FunctionAppSlotSiteConfigInitParameters) { + *out = *in + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(SiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DotnetFrameworkVersion != nil { + in, out := &in.DotnetFrameworkVersion, &out.DotnetFrameworkVersion + *out = new(string) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]SiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]SiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorkerProcess != nil { + in, out := &in.Use32BitWorkerProcess, &out.Use32BitWorkerProcess + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotSiteConfigInitParameters. +func (in *FunctionAppSlotSiteConfigInitParameters) DeepCopy() *FunctionAppSlotSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotSiteConfigObservation) DeepCopyInto(out *FunctionAppSlotSiteConfigObservation) { + *out = *in + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(SiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DotnetFrameworkVersion != nil { + in, out := &in.DotnetFrameworkVersion, &out.DotnetFrameworkVersion + *out = new(string) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]SiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]SiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorkerProcess != nil { + in, out := &in.Use32BitWorkerProcess, &out.Use32BitWorkerProcess + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotSiteConfigObservation. +func (in *FunctionAppSlotSiteConfigObservation) DeepCopy() *FunctionAppSlotSiteConfigObservation { + if in == nil { + return nil + } + out := new(FunctionAppSlotSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotSiteConfigParameters) DeepCopyInto(out *FunctionAppSlotSiteConfigParameters) { + *out = *in + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(SiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DotnetFrameworkVersion != nil { + in, out := &in.DotnetFrameworkVersion, &out.DotnetFrameworkVersion + *out = new(string) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]SiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]SiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorkerProcess != nil { + in, out := &in.Use32BitWorkerProcess, &out.Use32BitWorkerProcess + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotSiteConfigParameters. +func (in *FunctionAppSlotSiteConfigParameters) DeepCopy() *FunctionAppSlotSiteConfigParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotSiteCredentialInitParameters) DeepCopyInto(out *FunctionAppSlotSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotSiteCredentialInitParameters. +func (in *FunctionAppSlotSiteCredentialInitParameters) DeepCopy() *FunctionAppSlotSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotSiteCredentialObservation) DeepCopyInto(out *FunctionAppSlotSiteCredentialObservation) { + *out = *in + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotSiteCredentialObservation. +func (in *FunctionAppSlotSiteCredentialObservation) DeepCopy() *FunctionAppSlotSiteCredentialObservation { + if in == nil { + return nil + } + out := new(FunctionAppSlotSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotSiteCredentialParameters) DeepCopyInto(out *FunctionAppSlotSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotSiteCredentialParameters. +func (in *FunctionAppSlotSiteCredentialParameters) DeepCopy() *FunctionAppSlotSiteCredentialParameters { + if in == nil { + return nil + } + out := new(FunctionAppSlotSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotSpec) DeepCopyInto(out *FunctionAppSlotSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotSpec. +func (in *FunctionAppSlotSpec) DeepCopy() *FunctionAppSlotSpec { + if in == nil { + return nil + } + out := new(FunctionAppSlotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSlotStatus) DeepCopyInto(out *FunctionAppSlotStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSlotStatus. +func (in *FunctionAppSlotStatus) DeepCopy() *FunctionAppSlotStatus { + if in == nil { + return nil + } + out := new(FunctionAppSlotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppSpec) DeepCopyInto(out *FunctionAppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppSpec. +func (in *FunctionAppSpec) DeepCopy() *FunctionAppSpec { + if in == nil { + return nil + } + out := new(FunctionAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionAppStatus) DeepCopyInto(out *FunctionAppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionAppStatus. +func (in *FunctionAppStatus) DeepCopy() *FunctionAppStatus { + if in == nil { + return nil + } + out := new(FunctionAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubInitParameters) DeepCopyInto(out *GithubInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubInitParameters. +func (in *GithubInitParameters) DeepCopy() *GithubInitParameters { + if in == nil { + return nil + } + out := new(GithubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubObservation) DeepCopyInto(out *GithubObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubObservation. +func (in *GithubObservation) DeepCopy() *GithubObservation { + if in == nil { + return nil + } + out := new(GithubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubParameters) DeepCopyInto(out *GithubParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubParameters. +func (in *GithubParameters) DeepCopy() *GithubParameters { + if in == nil { + return nil + } + out := new(GithubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubV2InitParameters) DeepCopyInto(out *GithubV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubV2InitParameters. +func (in *GithubV2InitParameters) DeepCopy() *GithubV2InitParameters { + if in == nil { + return nil + } + out := new(GithubV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubV2Observation) DeepCopyInto(out *GithubV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubV2Observation. +func (in *GithubV2Observation) DeepCopy() *GithubV2Observation { + if in == nil { + return nil + } + out := new(GithubV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubV2Parameters) DeepCopyInto(out *GithubV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubV2Parameters. +func (in *GithubV2Parameters) DeepCopy() *GithubV2Parameters { + if in == nil { + return nil + } + out := new(GithubV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleInitParameters) DeepCopyInto(out *GoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleInitParameters. +func (in *GoogleInitParameters) DeepCopy() *GoogleInitParameters { + if in == nil { + return nil + } + out := new(GoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleObservation) DeepCopyInto(out *GoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleObservation. +func (in *GoogleObservation) DeepCopy() *GoogleObservation { + if in == nil { + return nil + } + out := new(GoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleParameters) DeepCopyInto(out *GoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleParameters. +func (in *GoogleParameters) DeepCopy() *GoogleParameters { + if in == nil { + return nil + } + out := new(GoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleV2InitParameters) DeepCopyInto(out *GoogleV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleV2InitParameters. +func (in *GoogleV2InitParameters) DeepCopy() *GoogleV2InitParameters { + if in == nil { + return nil + } + out := new(GoogleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleV2Observation) DeepCopyInto(out *GoogleV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleV2Observation. +func (in *GoogleV2Observation) DeepCopy() *GoogleV2Observation { + if in == nil { + return nil + } + out := new(GoogleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleV2Parameters) DeepCopyInto(out *GoogleV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleV2Parameters. +func (in *GoogleV2Parameters) DeepCopy() *GoogleV2Parameters { + if in == nil { + return nil + } + out := new(GoogleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsAzureBlobStorageInitParameters) DeepCopyInto(out *HTTPLogsAzureBlobStorageInitParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsAzureBlobStorageInitParameters. +func (in *HTTPLogsAzureBlobStorageInitParameters) DeepCopy() *HTTPLogsAzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(HTTPLogsAzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsAzureBlobStorageObservation) DeepCopyInto(out *HTTPLogsAzureBlobStorageObservation) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsAzureBlobStorageObservation. +func (in *HTTPLogsAzureBlobStorageObservation) DeepCopy() *HTTPLogsAzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(HTTPLogsAzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsAzureBlobStorageParameters) DeepCopyInto(out *HTTPLogsAzureBlobStorageParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + out.SASURLSecretRef = in.SASURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsAzureBlobStorageParameters. +func (in *HTTPLogsAzureBlobStorageParameters) DeepCopy() *HTTPLogsAzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(HTTPLogsAzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsFileSystemInitParameters) DeepCopyInto(out *HTTPLogsFileSystemInitParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsFileSystemInitParameters. +func (in *HTTPLogsFileSystemInitParameters) DeepCopy() *HTTPLogsFileSystemInitParameters { + if in == nil { + return nil + } + out := new(HTTPLogsFileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsFileSystemObservation) DeepCopyInto(out *HTTPLogsFileSystemObservation) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsFileSystemObservation. +func (in *HTTPLogsFileSystemObservation) DeepCopy() *HTTPLogsFileSystemObservation { + if in == nil { + return nil + } + out := new(HTTPLogsFileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsFileSystemParameters) DeepCopyInto(out *HTTPLogsFileSystemParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsFileSystemParameters. +func (in *HTTPLogsFileSystemParameters) DeepCopy() *HTTPLogsFileSystemParameters { + if in == nil { + return nil + } + out := new(HTTPLogsFileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsInitParameters) DeepCopyInto(out *HTTPLogsInitParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(HTTPLogsAzureBlobStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(FileSystemInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsInitParameters. +func (in *HTTPLogsInitParameters) DeepCopy() *HTTPLogsInitParameters { + if in == nil { + return nil + } + out := new(HTTPLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsObservation) DeepCopyInto(out *HTTPLogsObservation) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(HTTPLogsAzureBlobStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(FileSystemObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsObservation. +func (in *HTTPLogsObservation) DeepCopy() *HTTPLogsObservation { + if in == nil { + return nil + } + out := new(HTTPLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPLogsParameters) DeepCopyInto(out *HTTPLogsParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(HTTPLogsAzureBlobStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(FileSystemParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPLogsParameters. +func (in *HTTPLogsParameters) DeepCopy() *HTTPLogsParameters { + if in == nil { + return nil + } + out := new(HTTPLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersInitParameters) DeepCopyInto(out *HeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersInitParameters. +func (in *HeadersInitParameters) DeepCopy() *HeadersInitParameters { + if in == nil { + return nil + } + out := new(HeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersObservation) DeepCopyInto(out *HeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersObservation. +func (in *HeadersObservation) DeepCopy() *HeadersObservation { + if in == nil { + return nil + } + out := new(HeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HeadersParameters) DeepCopyInto(out *HeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersParameters. +func (in *HeadersParameters) DeepCopy() *HeadersParameters { + if in == nil { + return nil + } + out := new(HeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRestrictionHeadersInitParameters) DeepCopyInto(out *IPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRestrictionHeadersInitParameters. +func (in *IPRestrictionHeadersInitParameters) DeepCopy() *IPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(IPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRestrictionHeadersObservation) DeepCopyInto(out *IPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRestrictionHeadersObservation. +func (in *IPRestrictionHeadersObservation) DeepCopy() *IPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(IPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRestrictionHeadersParameters) DeepCopyInto(out *IPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRestrictionHeadersParameters. +func (in *IPRestrictionHeadersParameters) DeepCopy() *IPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(IPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRestrictionInitParameters) DeepCopyInto(out *IPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRestrictionInitParameters. +func (in *IPRestrictionInitParameters) DeepCopy() *IPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(IPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRestrictionObservation) DeepCopyInto(out *IPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRestrictionObservation. +func (in *IPRestrictionObservation) DeepCopy() *IPRestrictionObservation { + if in == nil { + return nil + } + out := new(IPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPRestrictionParameters) DeepCopyInto(out *IPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]HeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPRestrictionParameters. +func (in *IPRestrictionParameters) DeepCopy() *IPRestrictionParameters { + if in == nil { + return nil + } + out := new(IPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityInitParameters) DeepCopyInto(out *IdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityInitParameters. +func (in *IdentityInitParameters) DeepCopy() *IdentityInitParameters { + if in == nil { + return nil + } + out := new(IdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityObservation) DeepCopyInto(out *IdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityObservation. +func (in *IdentityObservation) DeepCopy() *IdentityObservation { + if in == nil { + return nil + } + out := new(IdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityParameters) DeepCopyInto(out *IdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityParameters. +func (in *IdentityParameters) DeepCopy() *IdentityParameters { + if in == nil { + return nil + } + out := new(IdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionApp) DeepCopyInto(out *LinuxFunctionApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionApp. +func (in *LinuxFunctionApp) DeepCopy() *LinuxFunctionApp { + if in == nil { + return nil + } + out := new(LinuxFunctionApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxFunctionApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters. +func (in *LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters) DeepCopy() *LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsActiveDirectoryObservation) DeepCopyInto(out *LinuxFunctionAppAuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsActiveDirectoryObservation. +func (in *LinuxFunctionAppAuthSettingsActiveDirectoryObservation) DeepCopy() *LinuxFunctionAppAuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsActiveDirectoryParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsActiveDirectoryParameters. +func (in *LinuxFunctionAppAuthSettingsActiveDirectoryParameters) DeepCopy() *LinuxFunctionAppAuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsFacebookInitParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsFacebookInitParameters. +func (in *LinuxFunctionAppAuthSettingsFacebookInitParameters) DeepCopy() *LinuxFunctionAppAuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsFacebookObservation) DeepCopyInto(out *LinuxFunctionAppAuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsFacebookObservation. +func (in *LinuxFunctionAppAuthSettingsFacebookObservation) DeepCopy() *LinuxFunctionAppAuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsFacebookParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSecretRef != nil { + in, out := &in.AppSecretSecretRef, &out.AppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsFacebookParameters. +func (in *LinuxFunctionAppAuthSettingsFacebookParameters) DeepCopy() *LinuxFunctionAppAuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsGoogleInitParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsGoogleInitParameters. +func (in *LinuxFunctionAppAuthSettingsGoogleInitParameters) DeepCopy() *LinuxFunctionAppAuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsGoogleObservation) DeepCopyInto(out *LinuxFunctionAppAuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsGoogleObservation. +func (in *LinuxFunctionAppAuthSettingsGoogleObservation) DeepCopy() *LinuxFunctionAppAuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsGoogleParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsGoogleParameters. +func (in *LinuxFunctionAppAuthSettingsGoogleParameters) DeepCopy() *LinuxFunctionAppAuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsInitParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxFunctionAppAuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(GithubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxFunctionAppAuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxFunctionAppAuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxFunctionAppAuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsInitParameters. +func (in *LinuxFunctionAppAuthSettingsInitParameters) DeepCopy() *LinuxFunctionAppAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsMicrosoftInitParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsMicrosoftInitParameters. +func (in *LinuxFunctionAppAuthSettingsMicrosoftInitParameters) DeepCopy() *LinuxFunctionAppAuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsMicrosoftObservation) DeepCopyInto(out *LinuxFunctionAppAuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsMicrosoftObservation. +func (in *LinuxFunctionAppAuthSettingsMicrosoftObservation) DeepCopy() *LinuxFunctionAppAuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsMicrosoftParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsMicrosoftParameters. +func (in *LinuxFunctionAppAuthSettingsMicrosoftParameters) DeepCopy() *LinuxFunctionAppAuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsObservation) DeepCopyInto(out *LinuxFunctionAppAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxFunctionAppAuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxFunctionAppAuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(GithubObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxFunctionAppAuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxFunctionAppAuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxFunctionAppAuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsObservation. +func (in *LinuxFunctionAppAuthSettingsObservation) DeepCopy() *LinuxFunctionAppAuthSettingsObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxFunctionAppAuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxFunctionAppAuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(GithubParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxFunctionAppAuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxFunctionAppAuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxFunctionAppAuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsParameters. +func (in *LinuxFunctionAppAuthSettingsParameters) DeepCopy() *LinuxFunctionAppAuthSettingsParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsTwitterInitParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsTwitterInitParameters. +func (in *LinuxFunctionAppAuthSettingsTwitterInitParameters) DeepCopy() *LinuxFunctionAppAuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsTwitterObservation) DeepCopyInto(out *LinuxFunctionAppAuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsTwitterObservation. +func (in *LinuxFunctionAppAuthSettingsTwitterObservation) DeepCopy() *LinuxFunctionAppAuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppAuthSettingsTwitterParameters) DeepCopyInto(out *LinuxFunctionAppAuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSecretRef != nil { + in, out := &in.ConsumerSecretSecretRef, &out.ConsumerSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppAuthSettingsTwitterParameters. +func (in *LinuxFunctionAppAuthSettingsTwitterParameters) DeepCopy() *LinuxFunctionAppAuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppAuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppConnectionStringInitParameters) DeepCopyInto(out *LinuxFunctionAppConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppConnectionStringInitParameters. +func (in *LinuxFunctionAppConnectionStringInitParameters) DeepCopy() *LinuxFunctionAppConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppConnectionStringObservation) DeepCopyInto(out *LinuxFunctionAppConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppConnectionStringObservation. +func (in *LinuxFunctionAppConnectionStringObservation) DeepCopy() *LinuxFunctionAppConnectionStringObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppConnectionStringParameters) DeepCopyInto(out *LinuxFunctionAppConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppConnectionStringParameters. +func (in *LinuxFunctionAppConnectionStringParameters) DeepCopy() *LinuxFunctionAppConnectionStringParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppIdentityInitParameters) DeepCopyInto(out *LinuxFunctionAppIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppIdentityInitParameters. +func (in *LinuxFunctionAppIdentityInitParameters) DeepCopy() *LinuxFunctionAppIdentityInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppIdentityObservation) DeepCopyInto(out *LinuxFunctionAppIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppIdentityObservation. +func (in *LinuxFunctionAppIdentityObservation) DeepCopy() *LinuxFunctionAppIdentityObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppIdentityParameters) DeepCopyInto(out *LinuxFunctionAppIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppIdentityParameters. +func (in *LinuxFunctionAppIdentityParameters) DeepCopy() *LinuxFunctionAppIdentityParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppInitParameters) DeepCopyInto(out *LinuxFunctionAppInitParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxFunctionAppAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(AuthSettingsV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxFunctionAppConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxFunctionAppIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.ServicePlanIDRef != nil { + in, out := &in.ServicePlanIDRef, &out.ServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanIDSelector != nil { + in, out := &in.ServicePlanIDSelector, &out.ServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxFunctionAppSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(StickySettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppInitParameters. +func (in *LinuxFunctionAppInitParameters) DeepCopy() *LinuxFunctionAppInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppList) DeepCopyInto(out *LinuxFunctionAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinuxFunctionApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppList. +func (in *LinuxFunctionAppList) DeepCopy() *LinuxFunctionAppList { + if in == nil { + return nil + } + out := new(LinuxFunctionAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxFunctionAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppObservation) DeepCopyInto(out *LinuxFunctionAppObservation) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxFunctionAppAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(AuthSettingsV2Observation) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupObservation) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxFunctionAppConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.HostingEnvironmentID != nil { + in, out := &in.HostingEnvironmentID, &out.HostingEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxFunctionAppIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutboundIPAddressList != nil { + in, out := &in.OutboundIPAddressList, &out.OutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddressList != nil { + in, out := &in.PossibleOutboundIPAddressList, &out.PossibleOutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxFunctionAppSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(StickySettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppObservation. +func (in *LinuxFunctionAppObservation) DeepCopy() *LinuxFunctionAppObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppParameters) DeepCopyInto(out *LinuxFunctionAppParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxFunctionAppAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(AuthSettingsV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxFunctionAppConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxFunctionAppIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.ServicePlanIDRef != nil { + in, out := &in.ServicePlanIDRef, &out.ServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanIDSelector != nil { + in, out := &in.ServicePlanIDSelector, &out.ServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxFunctionAppSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(StickySettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]StorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppParameters. +func (in *LinuxFunctionAppParameters) DeepCopy() *LinuxFunctionAppParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigCorsInitParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigCorsInitParameters. +func (in *LinuxFunctionAppSiteConfigCorsInitParameters) DeepCopy() *LinuxFunctionAppSiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigCorsObservation) DeepCopyInto(out *LinuxFunctionAppSiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigCorsObservation. +func (in *LinuxFunctionAppSiteConfigCorsObservation) DeepCopy() *LinuxFunctionAppSiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigCorsParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigCorsParameters. +func (in *LinuxFunctionAppSiteConfigCorsParameters) DeepCopy() *LinuxFunctionAppSiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigIPRestrictionInitParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]SiteConfigIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigIPRestrictionInitParameters. +func (in *LinuxFunctionAppSiteConfigIPRestrictionInitParameters) DeepCopy() *LinuxFunctionAppSiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigIPRestrictionObservation) DeepCopyInto(out *LinuxFunctionAppSiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]SiteConfigIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigIPRestrictionObservation. +func (in *LinuxFunctionAppSiteConfigIPRestrictionObservation) DeepCopy() *LinuxFunctionAppSiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigIPRestrictionParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]SiteConfigIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigIPRestrictionParameters. +func (in *LinuxFunctionAppSiteConfigIPRestrictionParameters) DeepCopy() *LinuxFunctionAppSiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigInitParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigInitParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(AppServiceLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(ApplicationStackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxFunctionAppSiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxFunctionAppSiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigInitParameters. +func (in *LinuxFunctionAppSiteConfigInitParameters) DeepCopy() *LinuxFunctionAppSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigObservation) DeepCopyInto(out *LinuxFunctionAppSiteConfigObservation) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(AppServiceLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(ApplicationStackObservation) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxFunctionAppSiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DetailedErrorLoggingEnabled != nil { + in, out := &in.DetailedErrorLoggingEnabled, &out.DetailedErrorLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxFunctionAppSiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxFunctionAppSiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigObservation. +func (in *LinuxFunctionAppSiteConfigObservation) DeepCopy() *LinuxFunctionAppSiteConfigObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(AppServiceLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsightsConnectionStringSecretRef != nil { + in, out := &in.ApplicationInsightsConnectionStringSecretRef, &out.ApplicationInsightsConnectionStringSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ApplicationInsightsKeySecretRef != nil { + in, out := &in.ApplicationInsightsKeySecretRef, &out.ApplicationInsightsKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(ApplicationStackParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxFunctionAppSiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxFunctionAppSiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxFunctionAppSiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigParameters. +func (in *LinuxFunctionAppSiteConfigParameters) DeepCopy() *LinuxFunctionAppSiteConfigParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters) DeepCopy() *LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionObservation) DeepCopyInto(out *LinuxFunctionAppSiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigScmIPRestrictionObservation. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionObservation) DeepCopy() *LinuxFunctionAppSiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionParameters) DeepCopyInto(out *LinuxFunctionAppSiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteConfigScmIPRestrictionParameters. +func (in *LinuxFunctionAppSiteConfigScmIPRestrictionParameters) DeepCopy() *LinuxFunctionAppSiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteCredentialInitParameters) DeepCopyInto(out *LinuxFunctionAppSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteCredentialInitParameters. +func (in *LinuxFunctionAppSiteCredentialInitParameters) DeepCopy() *LinuxFunctionAppSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteCredentialObservation) DeepCopyInto(out *LinuxFunctionAppSiteCredentialObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteCredentialObservation. +func (in *LinuxFunctionAppSiteCredentialObservation) DeepCopy() *LinuxFunctionAppSiteCredentialObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSiteCredentialParameters) DeepCopyInto(out *LinuxFunctionAppSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSiteCredentialParameters. +func (in *LinuxFunctionAppSiteCredentialParameters) DeepCopy() *LinuxFunctionAppSiteCredentialParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlot) DeepCopyInto(out *LinuxFunctionAppSlot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlot. +func (in *LinuxFunctionAppSlot) DeepCopy() *LinuxFunctionAppSlot { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxFunctionAppSlot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters. +func (in *LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation. +func (in *LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation) DeepCopy() *LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters. +func (in *LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsFacebookInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsFacebookInitParameters. +func (in *LinuxFunctionAppSlotAuthSettingsFacebookInitParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsFacebookObservation) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsFacebookObservation. +func (in *LinuxFunctionAppSlotAuthSettingsFacebookObservation) DeepCopy() *LinuxFunctionAppSlotAuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsFacebookParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSecretRef != nil { + in, out := &in.AppSecretSecretRef, &out.AppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsFacebookParameters. +func (in *LinuxFunctionAppSlotAuthSettingsFacebookParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsGoogleInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsGoogleInitParameters. +func (in *LinuxFunctionAppSlotAuthSettingsGoogleInitParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsGoogleObservation) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsGoogleObservation. +func (in *LinuxFunctionAppSlotAuthSettingsGoogleObservation) DeepCopy() *LinuxFunctionAppSlotAuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsGoogleParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsGoogleParameters. +func (in *LinuxFunctionAppSlotAuthSettingsGoogleParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxFunctionAppSlotAuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(AuthSettingsGithubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxFunctionAppSlotAuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxFunctionAppSlotAuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsInitParameters. +func (in *LinuxFunctionAppSlotAuthSettingsInitParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters. +func (in *LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsMicrosoftObservation) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsMicrosoftObservation. +func (in *LinuxFunctionAppSlotAuthSettingsMicrosoftObservation) DeepCopy() *LinuxFunctionAppSlotAuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsMicrosoftParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsMicrosoftParameters. +func (in *LinuxFunctionAppSlotAuthSettingsMicrosoftParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsObservation) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxFunctionAppSlotAuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(AuthSettingsGithubObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxFunctionAppSlotAuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxFunctionAppSlotAuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxFunctionAppSlotAuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsObservation. +func (in *LinuxFunctionAppSlotAuthSettingsObservation) DeepCopy() *LinuxFunctionAppSlotAuthSettingsObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxFunctionAppSlotAuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(AuthSettingsGithubParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxFunctionAppSlotAuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxFunctionAppSlotAuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxFunctionAppSlotAuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsParameters. +func (in *LinuxFunctionAppSlotAuthSettingsParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsTwitterInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsTwitterInitParameters. +func (in *LinuxFunctionAppSlotAuthSettingsTwitterInitParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsTwitterObservation) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsTwitterObservation. +func (in *LinuxFunctionAppSlotAuthSettingsTwitterObservation) DeepCopy() *LinuxFunctionAppSlotAuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsTwitterParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSecretRef != nil { + in, out := &in.ConsumerSecretSecretRef, &out.ConsumerSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsTwitterParameters. +func (in *LinuxFunctionAppSlotAuthSettingsTwitterParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsV2InitParameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsV2InitParameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(AuthSettingsV2ActiveDirectoryV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(AuthSettingsV2AppleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(AuthSettingsV2AzureStaticWebAppV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]AuthSettingsV2CustomOidcV2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(AuthSettingsV2FacebookV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(AuthSettingsV2GithubV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(AuthSettingsV2GoogleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(AuthSettingsV2LoginInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(AuthSettingsV2MicrosoftV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(AuthSettingsV2TwitterV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsV2InitParameters. +func (in *LinuxFunctionAppSlotAuthSettingsV2InitParameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsV2Observation) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsV2Observation) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(AuthSettingsV2ActiveDirectoryV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(AuthSettingsV2AppleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(AuthSettingsV2AzureStaticWebAppV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]AuthSettingsV2CustomOidcV2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(AuthSettingsV2FacebookV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(AuthSettingsV2GithubV2Observation) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(AuthSettingsV2GoogleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(AuthSettingsV2LoginObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(AuthSettingsV2MicrosoftV2Observation) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(AuthSettingsV2TwitterV2Observation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsV2Observation. +func (in *LinuxFunctionAppSlotAuthSettingsV2Observation) DeepCopy() *LinuxFunctionAppSlotAuthSettingsV2Observation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotAuthSettingsV2Parameters) DeepCopyInto(out *LinuxFunctionAppSlotAuthSettingsV2Parameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(AuthSettingsV2ActiveDirectoryV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(AuthSettingsV2AppleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(AuthSettingsV2AzureStaticWebAppV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]AuthSettingsV2CustomOidcV2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(AuthSettingsV2FacebookV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(AuthSettingsV2GithubV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(AuthSettingsV2GoogleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(AuthSettingsV2LoginParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(AuthSettingsV2MicrosoftV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(AuthSettingsV2TwitterV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotAuthSettingsV2Parameters. +func (in *LinuxFunctionAppSlotAuthSettingsV2Parameters) DeepCopy() *LinuxFunctionAppSlotAuthSettingsV2Parameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotAuthSettingsV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotBackupInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotBackupInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(BackupScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotBackupInitParameters. +func (in *LinuxFunctionAppSlotBackupInitParameters) DeepCopy() *LinuxFunctionAppSlotBackupInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotBackupObservation) DeepCopyInto(out *LinuxFunctionAppSlotBackupObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(BackupScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotBackupObservation. +func (in *LinuxFunctionAppSlotBackupObservation) DeepCopy() *LinuxFunctionAppSlotBackupObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotBackupParameters) DeepCopyInto(out *LinuxFunctionAppSlotBackupParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(BackupScheduleParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountURLSecretRef = in.StorageAccountURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotBackupParameters. +func (in *LinuxFunctionAppSlotBackupParameters) DeepCopy() *LinuxFunctionAppSlotBackupParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotConnectionStringInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotConnectionStringInitParameters. +func (in *LinuxFunctionAppSlotConnectionStringInitParameters) DeepCopy() *LinuxFunctionAppSlotConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotConnectionStringObservation) DeepCopyInto(out *LinuxFunctionAppSlotConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotConnectionStringObservation. +func (in *LinuxFunctionAppSlotConnectionStringObservation) DeepCopy() *LinuxFunctionAppSlotConnectionStringObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotConnectionStringParameters) DeepCopyInto(out *LinuxFunctionAppSlotConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotConnectionStringParameters. +func (in *LinuxFunctionAppSlotConnectionStringParameters) DeepCopy() *LinuxFunctionAppSlotConnectionStringParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotIdentityInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotIdentityInitParameters. +func (in *LinuxFunctionAppSlotIdentityInitParameters) DeepCopy() *LinuxFunctionAppSlotIdentityInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotIdentityObservation) DeepCopyInto(out *LinuxFunctionAppSlotIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotIdentityObservation. +func (in *LinuxFunctionAppSlotIdentityObservation) DeepCopy() *LinuxFunctionAppSlotIdentityObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotIdentityParameters) DeepCopyInto(out *LinuxFunctionAppSlotIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotIdentityParameters. +func (in *LinuxFunctionAppSlotIdentityParameters) DeepCopy() *LinuxFunctionAppSlotIdentityParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotInitParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxFunctionAppSlotAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxFunctionAppSlotAuthSettingsV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxFunctionAppSlotBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxFunctionAppSlotConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxFunctionAppSlotIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxFunctionAppSlotSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxFunctionAppSlotStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotInitParameters. +func (in *LinuxFunctionAppSlotInitParameters) DeepCopy() *LinuxFunctionAppSlotInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotList) DeepCopyInto(out *LinuxFunctionAppSlotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinuxFunctionAppSlot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotList. +func (in *LinuxFunctionAppSlotList) DeepCopy() *LinuxFunctionAppSlotList { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxFunctionAppSlotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotObservation) DeepCopyInto(out *LinuxFunctionAppSlotObservation) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxFunctionAppSlotAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxFunctionAppSlotAuthSettingsV2Observation) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxFunctionAppSlotBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxFunctionAppSlotConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionAppID != nil { + in, out := &in.FunctionAppID, &out.FunctionAppID + *out = new(string) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.HostingEnvironmentID != nil { + in, out := &in.HostingEnvironmentID, &out.HostingEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxFunctionAppSlotIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.OutboundIPAddressList != nil { + in, out := &in.OutboundIPAddressList, &out.OutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddressList != nil { + in, out := &in.PossibleOutboundIPAddressList, &out.PossibleOutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxFunctionAppSlotSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxFunctionAppSlotStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotObservation. +func (in *LinuxFunctionAppSlotObservation) DeepCopy() *LinuxFunctionAppSlotObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotParameters) DeepCopyInto(out *LinuxFunctionAppSlotParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxFunctionAppSlotAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxFunctionAppSlotAuthSettingsV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxFunctionAppSlotBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxFunctionAppSlotConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionAppID != nil { + in, out := &in.FunctionAppID, &out.FunctionAppID + *out = new(string) + **out = **in + } + if in.FunctionAppIDRef != nil { + in, out := &in.FunctionAppIDRef, &out.FunctionAppIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionAppIDSelector != nil { + in, out := &in.FunctionAppIDSelector, &out.FunctionAppIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxFunctionAppSlotIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxFunctionAppSlotSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxFunctionAppSlotStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotParameters. +func (in *LinuxFunctionAppSlotParameters) DeepCopy() *LinuxFunctionAppSlotParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigCorsInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigCorsInitParameters. +func (in *LinuxFunctionAppSlotSiteConfigCorsInitParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigCorsObservation) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigCorsObservation. +func (in *LinuxFunctionAppSlotSiteConfigCorsObservation) DeepCopy() *LinuxFunctionAppSlotSiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigCorsParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigCorsParameters. +func (in *LinuxFunctionAppSlotSiteConfigCorsParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation) DeepCopy() *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionObservation) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigIPRestrictionObservation. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionObservation) DeepCopy() *LinuxFunctionAppSlotSiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigIPRestrictionParameters. +func (in *LinuxFunctionAppSlotSiteConfigIPRestrictionParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigInitParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(SiteConfigAppServiceLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(SiteConfigApplicationStackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxFunctionAppSlotSiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigInitParameters. +func (in *LinuxFunctionAppSlotSiteConfigInitParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigObservation) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigObservation) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(SiteConfigAppServiceLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(SiteConfigApplicationStackObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxFunctionAppSlotSiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DetailedErrorLoggingEnabled != nil { + in, out := &in.DetailedErrorLoggingEnabled, &out.DetailedErrorLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxFunctionAppSlotSiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigObservation. +func (in *LinuxFunctionAppSlotSiteConfigObservation) DeepCopy() *LinuxFunctionAppSlotSiteConfigObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(SiteConfigAppServiceLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsightsConnectionStringSecretRef != nil { + in, out := &in.ApplicationInsightsConnectionStringSecretRef, &out.ApplicationInsightsConnectionStringSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ApplicationInsightsKeySecretRef != nil { + in, out := &in.ApplicationInsightsKeySecretRef, &out.ApplicationInsightsKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(SiteConfigApplicationStackParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxFunctionAppSlotSiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxFunctionAppSlotSiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigParameters. +func (in *LinuxFunctionAppSlotSiteConfigParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation) DeepCopy() *LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters. +func (in *LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters) DeepCopy() *LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteCredentialInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteCredentialInitParameters. +func (in *LinuxFunctionAppSlotSiteCredentialInitParameters) DeepCopy() *LinuxFunctionAppSlotSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteCredentialObservation) DeepCopyInto(out *LinuxFunctionAppSlotSiteCredentialObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteCredentialObservation. +func (in *LinuxFunctionAppSlotSiteCredentialObservation) DeepCopy() *LinuxFunctionAppSlotSiteCredentialObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSiteCredentialParameters) DeepCopyInto(out *LinuxFunctionAppSlotSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSiteCredentialParameters. +func (in *LinuxFunctionAppSlotSiteCredentialParameters) DeepCopy() *LinuxFunctionAppSlotSiteCredentialParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotSpec) DeepCopyInto(out *LinuxFunctionAppSlotSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotSpec. +func (in *LinuxFunctionAppSlotSpec) DeepCopy() *LinuxFunctionAppSlotSpec { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotStatus) DeepCopyInto(out *LinuxFunctionAppSlotStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotStatus. +func (in *LinuxFunctionAppSlotStatus) DeepCopy() *LinuxFunctionAppSlotStatus { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotStorageAccountInitParameters) DeepCopyInto(out *LinuxFunctionAppSlotStorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotStorageAccountInitParameters. +func (in *LinuxFunctionAppSlotStorageAccountInitParameters) DeepCopy() *LinuxFunctionAppSlotStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotStorageAccountObservation) DeepCopyInto(out *LinuxFunctionAppSlotStorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotStorageAccountObservation. +func (in *LinuxFunctionAppSlotStorageAccountObservation) DeepCopy() *LinuxFunctionAppSlotStorageAccountObservation { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSlotStorageAccountParameters) DeepCopyInto(out *LinuxFunctionAppSlotStorageAccountParameters) { + *out = *in + out.AccessKeySecretRef = in.AccessKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSlotStorageAccountParameters. +func (in *LinuxFunctionAppSlotStorageAccountParameters) DeepCopy() *LinuxFunctionAppSlotStorageAccountParameters { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSlotStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppSpec) DeepCopyInto(out *LinuxFunctionAppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppSpec. +func (in *LinuxFunctionAppSpec) DeepCopy() *LinuxFunctionAppSpec { + if in == nil { + return nil + } + out := new(LinuxFunctionAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxFunctionAppStatus) DeepCopyInto(out *LinuxFunctionAppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxFunctionAppStatus. +func (in *LinuxFunctionAppStatus) DeepCopy() *LinuxFunctionAppStatus { + if in == nil { + return nil + } + out := new(LinuxFunctionAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebApp) DeepCopyInto(out *LinuxWebApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebApp. +func (in *LinuxWebApp) DeepCopy() *LinuxWebApp { + if in == nil { + return nil + } + out := new(LinuxWebApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxWebApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsActiveDirectoryInitParameters. +func (in *LinuxWebAppAuthSettingsActiveDirectoryInitParameters) DeepCopy() *LinuxWebAppAuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsActiveDirectoryObservation) DeepCopyInto(out *LinuxWebAppAuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsActiveDirectoryObservation. +func (in *LinuxWebAppAuthSettingsActiveDirectoryObservation) DeepCopy() *LinuxWebAppAuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsActiveDirectoryParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsActiveDirectoryParameters. +func (in *LinuxWebAppAuthSettingsActiveDirectoryParameters) DeepCopy() *LinuxWebAppAuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsFacebookInitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsFacebookInitParameters. +func (in *LinuxWebAppAuthSettingsFacebookInitParameters) DeepCopy() *LinuxWebAppAuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsFacebookObservation) DeepCopyInto(out *LinuxWebAppAuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsFacebookObservation. +func (in *LinuxWebAppAuthSettingsFacebookObservation) DeepCopy() *LinuxWebAppAuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsFacebookParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSecretRef != nil { + in, out := &in.AppSecretSecretRef, &out.AppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsFacebookParameters. +func (in *LinuxWebAppAuthSettingsFacebookParameters) DeepCopy() *LinuxWebAppAuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsGithubInitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsGithubInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsGithubInitParameters. +func (in *LinuxWebAppAuthSettingsGithubInitParameters) DeepCopy() *LinuxWebAppAuthSettingsGithubInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsGithubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsGithubObservation) DeepCopyInto(out *LinuxWebAppAuthSettingsGithubObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsGithubObservation. +func (in *LinuxWebAppAuthSettingsGithubObservation) DeepCopy() *LinuxWebAppAuthSettingsGithubObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsGithubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsGithubParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsGithubParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsGithubParameters. +func (in *LinuxWebAppAuthSettingsGithubParameters) DeepCopy() *LinuxWebAppAuthSettingsGithubParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsGithubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsGoogleInitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsGoogleInitParameters. +func (in *LinuxWebAppAuthSettingsGoogleInitParameters) DeepCopy() *LinuxWebAppAuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsGoogleObservation) DeepCopyInto(out *LinuxWebAppAuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsGoogleObservation. +func (in *LinuxWebAppAuthSettingsGoogleObservation) DeepCopy() *LinuxWebAppAuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsGoogleParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsGoogleParameters. +func (in *LinuxWebAppAuthSettingsGoogleParameters) DeepCopy() *LinuxWebAppAuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsInitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxWebAppAuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxWebAppAuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(LinuxWebAppAuthSettingsGithubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxWebAppAuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxWebAppAuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxWebAppAuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsInitParameters. +func (in *LinuxWebAppAuthSettingsInitParameters) DeepCopy() *LinuxWebAppAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsMicrosoftInitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsMicrosoftInitParameters. +func (in *LinuxWebAppAuthSettingsMicrosoftInitParameters) DeepCopy() *LinuxWebAppAuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsMicrosoftObservation) DeepCopyInto(out *LinuxWebAppAuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsMicrosoftObservation. +func (in *LinuxWebAppAuthSettingsMicrosoftObservation) DeepCopy() *LinuxWebAppAuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsMicrosoftParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsMicrosoftParameters. +func (in *LinuxWebAppAuthSettingsMicrosoftParameters) DeepCopy() *LinuxWebAppAuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsObservation) DeepCopyInto(out *LinuxWebAppAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxWebAppAuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxWebAppAuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(LinuxWebAppAuthSettingsGithubObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxWebAppAuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxWebAppAuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxWebAppAuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsObservation. +func (in *LinuxWebAppAuthSettingsObservation) DeepCopy() *LinuxWebAppAuthSettingsObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxWebAppAuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxWebAppAuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(LinuxWebAppAuthSettingsGithubParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxWebAppAuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxWebAppAuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxWebAppAuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsParameters. +func (in *LinuxWebAppAuthSettingsParameters) DeepCopy() *LinuxWebAppAuthSettingsParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsTwitterInitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsTwitterInitParameters. +func (in *LinuxWebAppAuthSettingsTwitterInitParameters) DeepCopy() *LinuxWebAppAuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsTwitterObservation) DeepCopyInto(out *LinuxWebAppAuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsTwitterObservation. +func (in *LinuxWebAppAuthSettingsTwitterObservation) DeepCopy() *LinuxWebAppAuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsTwitterParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSecretRef != nil { + in, out := &in.ConsumerSecretSecretRef, &out.ConsumerSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsTwitterParameters. +func (in *LinuxWebAppAuthSettingsTwitterParameters) DeepCopy() *LinuxWebAppAuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation. +func (in *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters. +func (in *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2AppleV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2AppleV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2AppleV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2AppleV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2AppleV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2AppleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2AppleV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2AppleV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2AppleV2Observation. +func (in *LinuxWebAppAuthSettingsV2AppleV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2AppleV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2AppleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2AppleV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2AppleV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2AppleV2Parameters. +func (in *LinuxWebAppAuthSettingsV2AppleV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2AppleV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2AppleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation. +func (in *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters. +func (in *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2CustomOidcV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2CustomOidcV2Observation) { + *out = *in + if in.AuthorisationEndpoint != nil { + in, out := &in.AuthorisationEndpoint, &out.AuthorisationEndpoint + *out = new(string) + **out = **in + } + if in.CertificationURI != nil { + in, out := &in.CertificationURI, &out.CertificationURI + *out = new(string) + **out = **in + } + if in.ClientCredentialMethod != nil { + in, out := &in.ClientCredentialMethod, &out.ClientCredentialMethod + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.IssuerEndpoint != nil { + in, out := &in.IssuerEndpoint, &out.IssuerEndpoint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2CustomOidcV2Observation. +func (in *LinuxWebAppAuthSettingsV2CustomOidcV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2CustomOidcV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2CustomOidcV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2CustomOidcV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2CustomOidcV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2CustomOidcV2Parameters. +func (in *LinuxWebAppAuthSettingsV2CustomOidcV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2CustomOidcV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2CustomOidcV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2FacebookV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2FacebookV2InitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2FacebookV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2FacebookV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2FacebookV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2FacebookV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2FacebookV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2FacebookV2Observation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2FacebookV2Observation. +func (in *LinuxWebAppAuthSettingsV2FacebookV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2FacebookV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2FacebookV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2FacebookV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2FacebookV2Parameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2FacebookV2Parameters. +func (in *LinuxWebAppAuthSettingsV2FacebookV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2FacebookV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2FacebookV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2GithubV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2GithubV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2GithubV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2GithubV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2GithubV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2GithubV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2GithubV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2GithubV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2GithubV2Observation. +func (in *LinuxWebAppAuthSettingsV2GithubV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2GithubV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2GithubV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2GithubV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2GithubV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2GithubV2Parameters. +func (in *LinuxWebAppAuthSettingsV2GithubV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2GithubV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2GithubV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2GoogleV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2GoogleV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2GoogleV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2GoogleV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2GoogleV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2GoogleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2GoogleV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2GoogleV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2GoogleV2Observation. +func (in *LinuxWebAppAuthSettingsV2GoogleV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2GoogleV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2GoogleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2GoogleV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2GoogleV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2GoogleV2Parameters. +func (in *LinuxWebAppAuthSettingsV2GoogleV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2GoogleV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2GoogleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2InitParameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(LinuxWebAppAuthSettingsV2AppleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(LinuxWebAppAuthSettingsV2FacebookV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(LinuxWebAppAuthSettingsV2GithubV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(LinuxWebAppAuthSettingsV2GoogleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LinuxWebAppAuthSettingsV2LoginInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(LinuxWebAppAuthSettingsV2TwitterV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2LoginInitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2LoginInitParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2LoginInitParameters. +func (in *LinuxWebAppAuthSettingsV2LoginInitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2LoginInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2LoginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2LoginObservation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2LoginObservation) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2LoginObservation. +func (in *LinuxWebAppAuthSettingsV2LoginObservation) DeepCopy() *LinuxWebAppAuthSettingsV2LoginObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2LoginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2LoginParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2LoginParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2LoginParameters. +func (in *LinuxWebAppAuthSettingsV2LoginParameters) DeepCopy() *LinuxWebAppAuthSettingsV2LoginParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2LoginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2MicrosoftV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2MicrosoftV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2MicrosoftV2Observation. +func (in *LinuxWebAppAuthSettingsV2MicrosoftV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2MicrosoftV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2MicrosoftV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2MicrosoftV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2MicrosoftV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2MicrosoftV2Parameters. +func (in *LinuxWebAppAuthSettingsV2MicrosoftV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2MicrosoftV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2MicrosoftV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2Observation) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(LinuxWebAppAuthSettingsV2AppleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]LinuxWebAppAuthSettingsV2CustomOidcV2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(LinuxWebAppAuthSettingsV2FacebookV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(LinuxWebAppAuthSettingsV2GithubV2Observation) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(LinuxWebAppAuthSettingsV2GoogleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LinuxWebAppAuthSettingsV2LoginObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(LinuxWebAppAuthSettingsV2MicrosoftV2Observation) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(LinuxWebAppAuthSettingsV2TwitterV2Observation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2Observation. +func (in *LinuxWebAppAuthSettingsV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2Parameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(LinuxWebAppAuthSettingsV2AppleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]LinuxWebAppAuthSettingsV2CustomOidcV2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(LinuxWebAppAuthSettingsV2FacebookV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(LinuxWebAppAuthSettingsV2GithubV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(LinuxWebAppAuthSettingsV2GoogleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LinuxWebAppAuthSettingsV2LoginParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(LinuxWebAppAuthSettingsV2MicrosoftV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(LinuxWebAppAuthSettingsV2TwitterV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2Parameters. +func (in *LinuxWebAppAuthSettingsV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2TwitterV2InitParameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2TwitterV2InitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2TwitterV2InitParameters. +func (in *LinuxWebAppAuthSettingsV2TwitterV2InitParameters) DeepCopy() *LinuxWebAppAuthSettingsV2TwitterV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2TwitterV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2TwitterV2Observation) DeepCopyInto(out *LinuxWebAppAuthSettingsV2TwitterV2Observation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2TwitterV2Observation. +func (in *LinuxWebAppAuthSettingsV2TwitterV2Observation) DeepCopy() *LinuxWebAppAuthSettingsV2TwitterV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2TwitterV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppAuthSettingsV2TwitterV2Parameters) DeepCopyInto(out *LinuxWebAppAuthSettingsV2TwitterV2Parameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppAuthSettingsV2TwitterV2Parameters. +func (in *LinuxWebAppAuthSettingsV2TwitterV2Parameters) DeepCopy() *LinuxWebAppAuthSettingsV2TwitterV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppAuthSettingsV2TwitterV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppBackupInitParameters) DeepCopyInto(out *LinuxWebAppBackupInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(LinuxWebAppBackupScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppBackupInitParameters. +func (in *LinuxWebAppBackupInitParameters) DeepCopy() *LinuxWebAppBackupInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppBackupObservation) DeepCopyInto(out *LinuxWebAppBackupObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(LinuxWebAppBackupScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppBackupObservation. +func (in *LinuxWebAppBackupObservation) DeepCopy() *LinuxWebAppBackupObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppBackupParameters) DeepCopyInto(out *LinuxWebAppBackupParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(LinuxWebAppBackupScheduleParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountURLSecretRef = in.StorageAccountURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppBackupParameters. +func (in *LinuxWebAppBackupParameters) DeepCopy() *LinuxWebAppBackupParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppBackupScheduleInitParameters) DeepCopyInto(out *LinuxWebAppBackupScheduleInitParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppBackupScheduleInitParameters. +func (in *LinuxWebAppBackupScheduleInitParameters) DeepCopy() *LinuxWebAppBackupScheduleInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppBackupScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppBackupScheduleObservation) DeepCopyInto(out *LinuxWebAppBackupScheduleObservation) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.LastExecutionTime != nil { + in, out := &in.LastExecutionTime, &out.LastExecutionTime + *out = new(string) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppBackupScheduleObservation. +func (in *LinuxWebAppBackupScheduleObservation) DeepCopy() *LinuxWebAppBackupScheduleObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppBackupScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppBackupScheduleParameters) DeepCopyInto(out *LinuxWebAppBackupScheduleParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppBackupScheduleParameters. +func (in *LinuxWebAppBackupScheduleParameters) DeepCopy() *LinuxWebAppBackupScheduleParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppBackupScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppConnectionStringInitParameters) DeepCopyInto(out *LinuxWebAppConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppConnectionStringInitParameters. +func (in *LinuxWebAppConnectionStringInitParameters) DeepCopy() *LinuxWebAppConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppConnectionStringObservation) DeepCopyInto(out *LinuxWebAppConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppConnectionStringObservation. +func (in *LinuxWebAppConnectionStringObservation) DeepCopy() *LinuxWebAppConnectionStringObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppConnectionStringParameters) DeepCopyInto(out *LinuxWebAppConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppConnectionStringParameters. +func (in *LinuxWebAppConnectionStringParameters) DeepCopy() *LinuxWebAppConnectionStringParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppIdentityInitParameters) DeepCopyInto(out *LinuxWebAppIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppIdentityInitParameters. +func (in *LinuxWebAppIdentityInitParameters) DeepCopy() *LinuxWebAppIdentityInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppIdentityObservation) DeepCopyInto(out *LinuxWebAppIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppIdentityObservation. +func (in *LinuxWebAppIdentityObservation) DeepCopy() *LinuxWebAppIdentityObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppIdentityParameters) DeepCopyInto(out *LinuxWebAppIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppIdentityParameters. +func (in *LinuxWebAppIdentityParameters) DeepCopy() *LinuxWebAppIdentityParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppInitParameters) DeepCopyInto(out *LinuxWebAppInitParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxWebAppAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxWebAppAuthSettingsV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxWebAppBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxWebAppConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxWebAppIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.ServicePlanIDRef != nil { + in, out := &in.ServicePlanIDRef, &out.ServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanIDSelector != nil { + in, out := &in.ServicePlanIDSelector, &out.ServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxWebAppSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(LinuxWebAppStickySettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxWebAppStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppInitParameters. +func (in *LinuxWebAppInitParameters) DeepCopy() *LinuxWebAppInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppList) DeepCopyInto(out *LinuxWebAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinuxWebApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppList. +func (in *LinuxWebAppList) DeepCopy() *LinuxWebAppList { + if in == nil { + return nil + } + out := new(LinuxWebAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxWebAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppObservation) DeepCopyInto(out *LinuxWebAppObservation) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxWebAppAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxWebAppAuthSettingsV2Observation) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxWebAppBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxWebAppConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.HostingEnvironmentID != nil { + in, out := &in.HostingEnvironmentID, &out.HostingEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxWebAppIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsObservation) + (*in).DeepCopyInto(*out) + } + if in.OutboundIPAddressList != nil { + in, out := &in.OutboundIPAddressList, &out.OutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddressList != nil { + in, out := &in.PossibleOutboundIPAddressList, &out.PossibleOutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxWebAppSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(LinuxWebAppStickySettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxWebAppStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppObservation. +func (in *LinuxWebAppObservation) DeepCopy() *LinuxWebAppObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppParameters) DeepCopyInto(out *LinuxWebAppParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxWebAppAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxWebAppAuthSettingsV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxWebAppBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxWebAppConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxWebAppIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LogsParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.ServicePlanIDRef != nil { + in, out := &in.ServicePlanIDRef, &out.ServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanIDSelector != nil { + in, out := &in.ServicePlanIDSelector, &out.ServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxWebAppSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(LinuxWebAppStickySettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxWebAppStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppParameters. +func (in *LinuxWebAppParameters) DeepCopy() *LinuxWebAppParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigApplicationStackInitParameters) DeepCopyInto(out *LinuxWebAppSiteConfigApplicationStackInitParameters) { + *out = *in + if in.DockerImage != nil { + in, out := &in.DockerImage, &out.DockerImage + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerImageTag != nil { + in, out := &in.DockerImageTag, &out.DockerImageTag + *out = new(string) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.GoVersion != nil { + in, out := &in.GoVersion, &out.GoVersion + *out = new(string) + **out = **in + } + if in.JavaServer != nil { + in, out := &in.JavaServer, &out.JavaServer + *out = new(string) + **out = **in + } + if in.JavaServerVersion != nil { + in, out := &in.JavaServerVersion, &out.JavaServerVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigApplicationStackInitParameters. +func (in *LinuxWebAppSiteConfigApplicationStackInitParameters) DeepCopy() *LinuxWebAppSiteConfigApplicationStackInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigApplicationStackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigApplicationStackObservation) DeepCopyInto(out *LinuxWebAppSiteConfigApplicationStackObservation) { + *out = *in + if in.DockerImage != nil { + in, out := &in.DockerImage, &out.DockerImage + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerImageTag != nil { + in, out := &in.DockerImageTag, &out.DockerImageTag + *out = new(string) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.GoVersion != nil { + in, out := &in.GoVersion, &out.GoVersion + *out = new(string) + **out = **in + } + if in.JavaServer != nil { + in, out := &in.JavaServer, &out.JavaServer + *out = new(string) + **out = **in + } + if in.JavaServerVersion != nil { + in, out := &in.JavaServerVersion, &out.JavaServerVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigApplicationStackObservation. +func (in *LinuxWebAppSiteConfigApplicationStackObservation) DeepCopy() *LinuxWebAppSiteConfigApplicationStackObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigApplicationStackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigApplicationStackParameters) DeepCopyInto(out *LinuxWebAppSiteConfigApplicationStackParameters) { + *out = *in + if in.DockerImage != nil { + in, out := &in.DockerImage, &out.DockerImage + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerImageTag != nil { + in, out := &in.DockerImageTag, &out.DockerImageTag + *out = new(string) + **out = **in + } + if in.DockerRegistryPasswordSecretRef != nil { + in, out := &in.DockerRegistryPasswordSecretRef, &out.DockerRegistryPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.GoVersion != nil { + in, out := &in.GoVersion, &out.GoVersion + *out = new(string) + **out = **in + } + if in.JavaServer != nil { + in, out := &in.JavaServer, &out.JavaServer + *out = new(string) + **out = **in + } + if in.JavaServerVersion != nil { + in, out := &in.JavaServerVersion, &out.JavaServerVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigApplicationStackParameters. +func (in *LinuxWebAppSiteConfigApplicationStackParameters) DeepCopy() *LinuxWebAppSiteConfigApplicationStackParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigApplicationStackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigCorsInitParameters) DeepCopyInto(out *LinuxWebAppSiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigCorsInitParameters. +func (in *LinuxWebAppSiteConfigCorsInitParameters) DeepCopy() *LinuxWebAppSiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigCorsObservation) DeepCopyInto(out *LinuxWebAppSiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigCorsObservation. +func (in *LinuxWebAppSiteConfigCorsObservation) DeepCopy() *LinuxWebAppSiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigCorsParameters) DeepCopyInto(out *LinuxWebAppSiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigCorsParameters. +func (in *LinuxWebAppSiteConfigCorsParameters) DeepCopy() *LinuxWebAppSiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters) DeepCopyInto(out *LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters. +func (in *LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters) DeepCopy() *LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigIPRestrictionHeadersObservation) DeepCopyInto(out *LinuxWebAppSiteConfigIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigIPRestrictionHeadersObservation. +func (in *LinuxWebAppSiteConfigIPRestrictionHeadersObservation) DeepCopy() *LinuxWebAppSiteConfigIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigIPRestrictionHeadersParameters) DeepCopyInto(out *LinuxWebAppSiteConfigIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigIPRestrictionHeadersParameters. +func (in *LinuxWebAppSiteConfigIPRestrictionHeadersParameters) DeepCopy() *LinuxWebAppSiteConfigIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigIPRestrictionInitParameters) DeepCopyInto(out *LinuxWebAppSiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigIPRestrictionInitParameters. +func (in *LinuxWebAppSiteConfigIPRestrictionInitParameters) DeepCopy() *LinuxWebAppSiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigIPRestrictionObservation) DeepCopyInto(out *LinuxWebAppSiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSiteConfigIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigIPRestrictionObservation. +func (in *LinuxWebAppSiteConfigIPRestrictionObservation) DeepCopy() *LinuxWebAppSiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigIPRestrictionParameters) DeepCopyInto(out *LinuxWebAppSiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSiteConfigIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigIPRestrictionParameters. +func (in *LinuxWebAppSiteConfigIPRestrictionParameters) DeepCopy() *LinuxWebAppSiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigInitParameters) DeepCopyInto(out *LinuxWebAppSiteConfigInitParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(LinuxWebAppSiteConfigApplicationStackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(AutoHealSettingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxWebAppSiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxWebAppSiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxWebAppSiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigInitParameters. +func (in *LinuxWebAppSiteConfigInitParameters) DeepCopy() *LinuxWebAppSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigObservation) DeepCopyInto(out *LinuxWebAppSiteConfigObservation) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(LinuxWebAppSiteConfigApplicationStackObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(AutoHealSettingObservation) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxWebAppSiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DetailedErrorLoggingEnabled != nil { + in, out := &in.DetailedErrorLoggingEnabled, &out.DetailedErrorLoggingEnabled + *out = new(bool) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxWebAppSiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxWebAppSiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigObservation. +func (in *LinuxWebAppSiteConfigObservation) DeepCopy() *LinuxWebAppSiteConfigObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigParameters) DeepCopyInto(out *LinuxWebAppSiteConfigParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(LinuxWebAppSiteConfigApplicationStackParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(AutoHealSettingParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxWebAppSiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxWebAppSiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxWebAppSiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigParameters. +func (in *LinuxWebAppSiteConfigParameters) DeepCopy() *LinuxWebAppSiteConfigParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters. +func (in *LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation. +func (in *LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters. +func (in *LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *LinuxWebAppSiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigScmIPRestrictionInitParameters. +func (in *LinuxWebAppSiteConfigScmIPRestrictionInitParameters) DeepCopy() *LinuxWebAppSiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigScmIPRestrictionObservation) DeepCopyInto(out *LinuxWebAppSiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigScmIPRestrictionObservation. +func (in *LinuxWebAppSiteConfigScmIPRestrictionObservation) DeepCopy() *LinuxWebAppSiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteConfigScmIPRestrictionParameters) DeepCopyInto(out *LinuxWebAppSiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteConfigScmIPRestrictionParameters. +func (in *LinuxWebAppSiteConfigScmIPRestrictionParameters) DeepCopy() *LinuxWebAppSiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteCredentialInitParameters) DeepCopyInto(out *LinuxWebAppSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteCredentialInitParameters. +func (in *LinuxWebAppSiteCredentialInitParameters) DeepCopy() *LinuxWebAppSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteCredentialObservation) DeepCopyInto(out *LinuxWebAppSiteCredentialObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteCredentialObservation. +func (in *LinuxWebAppSiteCredentialObservation) DeepCopy() *LinuxWebAppSiteCredentialObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSiteCredentialParameters) DeepCopyInto(out *LinuxWebAppSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSiteCredentialParameters. +func (in *LinuxWebAppSiteCredentialParameters) DeepCopy() *LinuxWebAppSiteCredentialParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlot) DeepCopyInto(out *LinuxWebAppSlot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlot. +func (in *LinuxWebAppSlot) DeepCopy() *LinuxWebAppSlot { + if in == nil { + return nil + } + out := new(LinuxWebAppSlot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxWebAppSlot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters. +func (in *LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsActiveDirectoryObservation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsActiveDirectoryObservation. +func (in *LinuxWebAppSlotAuthSettingsActiveDirectoryObservation) DeepCopy() *LinuxWebAppSlotAuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsActiveDirectoryParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsActiveDirectoryParameters. +func (in *LinuxWebAppSlotAuthSettingsActiveDirectoryParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsFacebookInitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsFacebookInitParameters. +func (in *LinuxWebAppSlotAuthSettingsFacebookInitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsFacebookObservation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsFacebookObservation. +func (in *LinuxWebAppSlotAuthSettingsFacebookObservation) DeepCopy() *LinuxWebAppSlotAuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsFacebookParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSecretRef != nil { + in, out := &in.AppSecretSecretRef, &out.AppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsFacebookParameters. +func (in *LinuxWebAppSlotAuthSettingsFacebookParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsGithubInitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsGithubInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsGithubInitParameters. +func (in *LinuxWebAppSlotAuthSettingsGithubInitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsGithubInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsGithubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsGithubObservation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsGithubObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsGithubObservation. +func (in *LinuxWebAppSlotAuthSettingsGithubObservation) DeepCopy() *LinuxWebAppSlotAuthSettingsGithubObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsGithubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsGithubParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsGithubParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsGithubParameters. +func (in *LinuxWebAppSlotAuthSettingsGithubParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsGithubParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsGithubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsGoogleInitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsGoogleInitParameters. +func (in *LinuxWebAppSlotAuthSettingsGoogleInitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsGoogleObservation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsGoogleObservation. +func (in *LinuxWebAppSlotAuthSettingsGoogleObservation) DeepCopy() *LinuxWebAppSlotAuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsGoogleParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsGoogleParameters. +func (in *LinuxWebAppSlotAuthSettingsGoogleParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsInitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxWebAppSlotAuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(LinuxWebAppSlotAuthSettingsGithubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxWebAppSlotAuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxWebAppSlotAuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxWebAppSlotAuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsInitParameters. +func (in *LinuxWebAppSlotAuthSettingsInitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsMicrosoftInitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsMicrosoftInitParameters. +func (in *LinuxWebAppSlotAuthSettingsMicrosoftInitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsMicrosoftObservation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsMicrosoftObservation. +func (in *LinuxWebAppSlotAuthSettingsMicrosoftObservation) DeepCopy() *LinuxWebAppSlotAuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsMicrosoftParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsMicrosoftParameters. +func (in *LinuxWebAppSlotAuthSettingsMicrosoftParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsObservation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxWebAppSlotAuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxWebAppSlotAuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(LinuxWebAppSlotAuthSettingsGithubObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxWebAppSlotAuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxWebAppSlotAuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxWebAppSlotAuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsObservation. +func (in *LinuxWebAppSlotAuthSettingsObservation) DeepCopy() *LinuxWebAppSlotAuthSettingsObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(LinuxWebAppSlotAuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(LinuxWebAppSlotAuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(LinuxWebAppSlotAuthSettingsGithubParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(LinuxWebAppSlotAuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(LinuxWebAppSlotAuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(LinuxWebAppSlotAuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsParameters. +func (in *LinuxWebAppSlotAuthSettingsParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsTwitterInitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsTwitterInitParameters. +func (in *LinuxWebAppSlotAuthSettingsTwitterInitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsTwitterObservation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsTwitterObservation. +func (in *LinuxWebAppSlotAuthSettingsTwitterObservation) DeepCopy() *LinuxWebAppSlotAuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsTwitterParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSecretRef != nil { + in, out := &in.ConsumerSecretSecretRef, &out.ConsumerSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsTwitterParameters. +func (in *LinuxWebAppSlotAuthSettingsTwitterParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2AppleV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2AppleV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2AppleV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2AppleV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2AppleV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2AppleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2AppleV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2AppleV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2AppleV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2AppleV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2AppleV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2AppleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation) { + *out = *in + if in.AuthorisationEndpoint != nil { + in, out := &in.AuthorisationEndpoint, &out.AuthorisationEndpoint + *out = new(string) + **out = **in + } + if in.CertificationURI != nil { + in, out := &in.CertificationURI, &out.CertificationURI + *out = new(string) + **out = **in + } + if in.ClientCredentialMethod != nil { + in, out := &in.ClientCredentialMethod, &out.ClientCredentialMethod + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.IssuerEndpoint != nil { + in, out := &in.IssuerEndpoint, &out.IssuerEndpoint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2FacebookV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2FacebookV2Observation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2FacebookV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2FacebookV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2FacebookV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2FacebookV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2GithubV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2GithubV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2GithubV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2GithubV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2GithubV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2GithubV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2GithubV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2GithubV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2GithubV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2GithubV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2GithubV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2GithubV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2GoogleV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2GoogleV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2GoogleV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2GoogleV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2GoogleV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2GoogleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2InitParameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LinuxWebAppSlotAuthSettingsV2LoginInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2LoginInitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2LoginInitParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2LoginInitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2LoginInitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2LoginInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2LoginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2LoginObservation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2LoginObservation) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2LoginObservation. +func (in *LinuxWebAppSlotAuthSettingsV2LoginObservation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2LoginObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2LoginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2LoginParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2LoginParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2LoginParameters. +func (in *LinuxWebAppSlotAuthSettingsV2LoginParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2LoginParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2LoginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2Observation) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(LinuxWebAppSlotAuthSettingsV2AppleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(LinuxWebAppSlotAuthSettingsV2FacebookV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(LinuxWebAppSlotAuthSettingsV2GithubV2Observation) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(LinuxWebAppSlotAuthSettingsV2GoogleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LinuxWebAppSlotAuthSettingsV2LoginObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(LinuxWebAppSlotAuthSettingsV2TwitterV2Observation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2Parameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(LinuxWebAppSlotAuthSettingsV2AppleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(LinuxWebAppSlotAuthSettingsV2GithubV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(LinuxWebAppSlotAuthSettingsV2LoginParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters. +func (in *LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2TwitterV2Observation) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2TwitterV2Observation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2TwitterV2Observation. +func (in *LinuxWebAppSlotAuthSettingsV2TwitterV2Observation) DeepCopy() *LinuxWebAppSlotAuthSettingsV2TwitterV2Observation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2TwitterV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters) DeepCopyInto(out *LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters. +func (in *LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters) DeepCopy() *LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotBackupInitParameters) DeepCopyInto(out *LinuxWebAppSlotBackupInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(LinuxWebAppSlotBackupScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotBackupInitParameters. +func (in *LinuxWebAppSlotBackupInitParameters) DeepCopy() *LinuxWebAppSlotBackupInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotBackupObservation) DeepCopyInto(out *LinuxWebAppSlotBackupObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(LinuxWebAppSlotBackupScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotBackupObservation. +func (in *LinuxWebAppSlotBackupObservation) DeepCopy() *LinuxWebAppSlotBackupObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotBackupParameters) DeepCopyInto(out *LinuxWebAppSlotBackupParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(LinuxWebAppSlotBackupScheduleParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountURLSecretRef = in.StorageAccountURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotBackupParameters. +func (in *LinuxWebAppSlotBackupParameters) DeepCopy() *LinuxWebAppSlotBackupParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotBackupScheduleInitParameters) DeepCopyInto(out *LinuxWebAppSlotBackupScheduleInitParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotBackupScheduleInitParameters. +func (in *LinuxWebAppSlotBackupScheduleInitParameters) DeepCopy() *LinuxWebAppSlotBackupScheduleInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotBackupScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotBackupScheduleObservation) DeepCopyInto(out *LinuxWebAppSlotBackupScheduleObservation) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.LastExecutionTime != nil { + in, out := &in.LastExecutionTime, &out.LastExecutionTime + *out = new(string) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotBackupScheduleObservation. +func (in *LinuxWebAppSlotBackupScheduleObservation) DeepCopy() *LinuxWebAppSlotBackupScheduleObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotBackupScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotBackupScheduleParameters) DeepCopyInto(out *LinuxWebAppSlotBackupScheduleParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotBackupScheduleParameters. +func (in *LinuxWebAppSlotBackupScheduleParameters) DeepCopy() *LinuxWebAppSlotBackupScheduleParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotBackupScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotConnectionStringInitParameters) DeepCopyInto(out *LinuxWebAppSlotConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotConnectionStringInitParameters. +func (in *LinuxWebAppSlotConnectionStringInitParameters) DeepCopy() *LinuxWebAppSlotConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotConnectionStringObservation) DeepCopyInto(out *LinuxWebAppSlotConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotConnectionStringObservation. +func (in *LinuxWebAppSlotConnectionStringObservation) DeepCopy() *LinuxWebAppSlotConnectionStringObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotConnectionStringParameters) DeepCopyInto(out *LinuxWebAppSlotConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotConnectionStringParameters. +func (in *LinuxWebAppSlotConnectionStringParameters) DeepCopy() *LinuxWebAppSlotConnectionStringParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotIdentityInitParameters) DeepCopyInto(out *LinuxWebAppSlotIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotIdentityInitParameters. +func (in *LinuxWebAppSlotIdentityInitParameters) DeepCopy() *LinuxWebAppSlotIdentityInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotIdentityObservation) DeepCopyInto(out *LinuxWebAppSlotIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotIdentityObservation. +func (in *LinuxWebAppSlotIdentityObservation) DeepCopy() *LinuxWebAppSlotIdentityObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotIdentityParameters) DeepCopyInto(out *LinuxWebAppSlotIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotIdentityParameters. +func (in *LinuxWebAppSlotIdentityParameters) DeepCopy() *LinuxWebAppSlotIdentityParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotInitParameters) DeepCopyInto(out *LinuxWebAppSlotInitParameters) { + *out = *in + if in.AppServiceID != nil { + in, out := &in.AppServiceID, &out.AppServiceID + *out = new(string) + **out = **in + } + if in.AppServiceIDRef != nil { + in, out := &in.AppServiceIDRef, &out.AppServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppServiceIDSelector != nil { + in, out := &in.AppServiceIDSelector, &out.AppServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxWebAppSlotAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxWebAppSlotAuthSettingsV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxWebAppSlotBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxWebAppSlotConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxWebAppSlotIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LinuxWebAppSlotLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxWebAppSlotSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxWebAppSlotStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotInitParameters. +func (in *LinuxWebAppSlotInitParameters) DeepCopy() *LinuxWebAppSlotInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotList) DeepCopyInto(out *LinuxWebAppSlotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LinuxWebAppSlot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotList. +func (in *LinuxWebAppSlotList) DeepCopy() *LinuxWebAppSlotList { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LinuxWebAppSlotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotLogsInitParameters) DeepCopyInto(out *LinuxWebAppSlotLogsInitParameters) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(LogsApplicationLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(LogsHTTPLogsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotLogsInitParameters. +func (in *LinuxWebAppSlotLogsInitParameters) DeepCopy() *LinuxWebAppSlotLogsInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotLogsObservation) DeepCopyInto(out *LinuxWebAppSlotLogsObservation) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(LogsApplicationLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(LogsHTTPLogsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotLogsObservation. +func (in *LinuxWebAppSlotLogsObservation) DeepCopy() *LinuxWebAppSlotLogsObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotLogsParameters) DeepCopyInto(out *LinuxWebAppSlotLogsParameters) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(LogsApplicationLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(LogsHTTPLogsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotLogsParameters. +func (in *LinuxWebAppSlotLogsParameters) DeepCopy() *LinuxWebAppSlotLogsParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotObservation) DeepCopyInto(out *LinuxWebAppSlotObservation) { + *out = *in + if in.AppMetadata != nil { + in, out := &in.AppMetadata, &out.AppMetadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AppServiceID != nil { + in, out := &in.AppServiceID, &out.AppServiceID + *out = new(string) + **out = **in + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxWebAppSlotAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxWebAppSlotAuthSettingsV2Observation) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxWebAppSlotBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxWebAppSlotConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.HostingEnvironmentID != nil { + in, out := &in.HostingEnvironmentID, &out.HostingEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxWebAppSlotIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LinuxWebAppSlotLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OutboundIPAddressList != nil { + in, out := &in.OutboundIPAddressList, &out.OutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddressList != nil { + in, out := &in.PossibleOutboundIPAddressList, &out.PossibleOutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxWebAppSlotSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxWebAppSlotStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotObservation. +func (in *LinuxWebAppSlotObservation) DeepCopy() *LinuxWebAppSlotObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotParameters) DeepCopyInto(out *LinuxWebAppSlotParameters) { + *out = *in + if in.AppServiceID != nil { + in, out := &in.AppServiceID, &out.AppServiceID + *out = new(string) + **out = **in + } + if in.AppServiceIDRef != nil { + in, out := &in.AppServiceIDRef, &out.AppServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppServiceIDSelector != nil { + in, out := &in.AppServiceIDSelector, &out.AppServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(LinuxWebAppSlotAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(LinuxWebAppSlotAuthSettingsV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(LinuxWebAppSlotBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]LinuxWebAppSlotConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(LinuxWebAppSlotIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(LinuxWebAppSlotLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(LinuxWebAppSlotSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]LinuxWebAppSlotStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotParameters. +func (in *LinuxWebAppSlotParameters) DeepCopy() *LinuxWebAppSlotParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigApplicationStackInitParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigApplicationStackInitParameters) { + *out = *in + if in.DockerImage != nil { + in, out := &in.DockerImage, &out.DockerImage + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerImageTag != nil { + in, out := &in.DockerImageTag, &out.DockerImageTag + *out = new(string) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.GoVersion != nil { + in, out := &in.GoVersion, &out.GoVersion + *out = new(string) + **out = **in + } + if in.JavaServer != nil { + in, out := &in.JavaServer, &out.JavaServer + *out = new(string) + **out = **in + } + if in.JavaServerVersion != nil { + in, out := &in.JavaServerVersion, &out.JavaServerVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigApplicationStackInitParameters. +func (in *LinuxWebAppSlotSiteConfigApplicationStackInitParameters) DeepCopy() *LinuxWebAppSlotSiteConfigApplicationStackInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigApplicationStackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigApplicationStackObservation) DeepCopyInto(out *LinuxWebAppSlotSiteConfigApplicationStackObservation) { + *out = *in + if in.DockerImage != nil { + in, out := &in.DockerImage, &out.DockerImage + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerImageTag != nil { + in, out := &in.DockerImageTag, &out.DockerImageTag + *out = new(string) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.GoVersion != nil { + in, out := &in.GoVersion, &out.GoVersion + *out = new(string) + **out = **in + } + if in.JavaServer != nil { + in, out := &in.JavaServer, &out.JavaServer + *out = new(string) + **out = **in + } + if in.JavaServerVersion != nil { + in, out := &in.JavaServerVersion, &out.JavaServerVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigApplicationStackObservation. +func (in *LinuxWebAppSlotSiteConfigApplicationStackObservation) DeepCopy() *LinuxWebAppSlotSiteConfigApplicationStackObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigApplicationStackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigApplicationStackParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigApplicationStackParameters) { + *out = *in + if in.DockerImage != nil { + in, out := &in.DockerImage, &out.DockerImage + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerImageTag != nil { + in, out := &in.DockerImageTag, &out.DockerImageTag + *out = new(string) + **out = **in + } + if in.DockerRegistryPasswordSecretRef != nil { + in, out := &in.DockerRegistryPasswordSecretRef, &out.DockerRegistryPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.GoVersion != nil { + in, out := &in.GoVersion, &out.GoVersion + *out = new(string) + **out = **in + } + if in.JavaServer != nil { + in, out := &in.JavaServer, &out.JavaServer + *out = new(string) + **out = **in + } + if in.JavaServerVersion != nil { + in, out := &in.JavaServerVersion, &out.JavaServerVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.RubyVersion != nil { + in, out := &in.RubyVersion, &out.RubyVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigApplicationStackParameters. +func (in *LinuxWebAppSlotSiteConfigApplicationStackParameters) DeepCopy() *LinuxWebAppSlotSiteConfigApplicationStackParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigApplicationStackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigCorsInitParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigCorsInitParameters. +func (in *LinuxWebAppSlotSiteConfigCorsInitParameters) DeepCopy() *LinuxWebAppSlotSiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigCorsObservation) DeepCopyInto(out *LinuxWebAppSlotSiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigCorsObservation. +func (in *LinuxWebAppSlotSiteConfigCorsObservation) DeepCopy() *LinuxWebAppSlotSiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigCorsParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigCorsParameters. +func (in *LinuxWebAppSlotSiteConfigCorsParameters) DeepCopy() *LinuxWebAppSlotSiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters) DeepCopy() *LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation) DeepCopyInto(out *LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation) DeepCopy() *LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters) DeepCopy() *LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionInitParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigIPRestrictionInitParameters. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionInitParameters) DeepCopy() *LinuxWebAppSlotSiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionObservation) DeepCopyInto(out *LinuxWebAppSlotSiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigIPRestrictionObservation. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionObservation) DeepCopy() *LinuxWebAppSlotSiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigIPRestrictionParameters. +func (in *LinuxWebAppSlotSiteConfigIPRestrictionParameters) DeepCopy() *LinuxWebAppSlotSiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigInitParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigInitParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(LinuxWebAppSlotSiteConfigApplicationStackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(SiteConfigAutoHealSettingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxWebAppSlotSiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxWebAppSlotSiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigInitParameters. +func (in *LinuxWebAppSlotSiteConfigInitParameters) DeepCopy() *LinuxWebAppSlotSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigObservation) DeepCopyInto(out *LinuxWebAppSlotSiteConfigObservation) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(LinuxWebAppSlotSiteConfigApplicationStackObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(SiteConfigAutoHealSettingObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxWebAppSlotSiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DetailedErrorLoggingEnabled != nil { + in, out := &in.DetailedErrorLoggingEnabled, &out.DetailedErrorLoggingEnabled + *out = new(bool) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxWebAppSlotSiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxWebAppSlotSiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigObservation. +func (in *LinuxWebAppSlotSiteConfigObservation) DeepCopy() *LinuxWebAppSlotSiteConfigObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(LinuxWebAppSlotSiteConfigApplicationStackParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(SiteConfigAutoHealSettingParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(LinuxWebAppSlotSiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]LinuxWebAppSlotSiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]LinuxWebAppSlotSiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigParameters. +func (in *LinuxWebAppSlotSiteConfigParameters) DeepCopy() *LinuxWebAppSlotSiteConfigParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters) DeepCopy() *LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionObservation) DeepCopyInto(out *LinuxWebAppSlotSiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigScmIPRestrictionObservation. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionObservation) DeepCopy() *LinuxWebAppSlotSiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionParameters) DeepCopyInto(out *LinuxWebAppSlotSiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteConfigScmIPRestrictionParameters. +func (in *LinuxWebAppSlotSiteConfigScmIPRestrictionParameters) DeepCopy() *LinuxWebAppSlotSiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteCredentialInitParameters) DeepCopyInto(out *LinuxWebAppSlotSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteCredentialInitParameters. +func (in *LinuxWebAppSlotSiteCredentialInitParameters) DeepCopy() *LinuxWebAppSlotSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteCredentialObservation) DeepCopyInto(out *LinuxWebAppSlotSiteCredentialObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteCredentialObservation. +func (in *LinuxWebAppSlotSiteCredentialObservation) DeepCopy() *LinuxWebAppSlotSiteCredentialObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSiteCredentialParameters) DeepCopyInto(out *LinuxWebAppSlotSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSiteCredentialParameters. +func (in *LinuxWebAppSlotSiteCredentialParameters) DeepCopy() *LinuxWebAppSlotSiteCredentialParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotSpec) DeepCopyInto(out *LinuxWebAppSlotSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotSpec. +func (in *LinuxWebAppSlotSpec) DeepCopy() *LinuxWebAppSlotSpec { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotStatus) DeepCopyInto(out *LinuxWebAppSlotStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotStatus. +func (in *LinuxWebAppSlotStatus) DeepCopy() *LinuxWebAppSlotStatus { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotStorageAccountInitParameters) DeepCopyInto(out *LinuxWebAppSlotStorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotStorageAccountInitParameters. +func (in *LinuxWebAppSlotStorageAccountInitParameters) DeepCopy() *LinuxWebAppSlotStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotStorageAccountObservation) DeepCopyInto(out *LinuxWebAppSlotStorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotStorageAccountObservation. +func (in *LinuxWebAppSlotStorageAccountObservation) DeepCopy() *LinuxWebAppSlotStorageAccountObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSlotStorageAccountParameters) DeepCopyInto(out *LinuxWebAppSlotStorageAccountParameters) { + *out = *in + out.AccessKeySecretRef = in.AccessKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSlotStorageAccountParameters. +func (in *LinuxWebAppSlotStorageAccountParameters) DeepCopy() *LinuxWebAppSlotStorageAccountParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppSlotStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppSpec) DeepCopyInto(out *LinuxWebAppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppSpec. +func (in *LinuxWebAppSpec) DeepCopy() *LinuxWebAppSpec { + if in == nil { + return nil + } + out := new(LinuxWebAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppStatus) DeepCopyInto(out *LinuxWebAppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppStatus. +func (in *LinuxWebAppStatus) DeepCopy() *LinuxWebAppStatus { + if in == nil { + return nil + } + out := new(LinuxWebAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppStickySettingsInitParameters) DeepCopyInto(out *LinuxWebAppStickySettingsInitParameters) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppStickySettingsInitParameters. +func (in *LinuxWebAppStickySettingsInitParameters) DeepCopy() *LinuxWebAppStickySettingsInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppStickySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppStickySettingsObservation) DeepCopyInto(out *LinuxWebAppStickySettingsObservation) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppStickySettingsObservation. +func (in *LinuxWebAppStickySettingsObservation) DeepCopy() *LinuxWebAppStickySettingsObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppStickySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppStickySettingsParameters) DeepCopyInto(out *LinuxWebAppStickySettingsParameters) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppStickySettingsParameters. +func (in *LinuxWebAppStickySettingsParameters) DeepCopy() *LinuxWebAppStickySettingsParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppStickySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppStorageAccountInitParameters) DeepCopyInto(out *LinuxWebAppStorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppStorageAccountInitParameters. +func (in *LinuxWebAppStorageAccountInitParameters) DeepCopy() *LinuxWebAppStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppStorageAccountObservation) DeepCopyInto(out *LinuxWebAppStorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppStorageAccountObservation. +func (in *LinuxWebAppStorageAccountObservation) DeepCopy() *LinuxWebAppStorageAccountObservation { + if in == nil { + return nil + } + out := new(LinuxWebAppStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxWebAppStorageAccountParameters) DeepCopyInto(out *LinuxWebAppStorageAccountParameters) { + *out = *in + out.AccessKeySecretRef = in.AccessKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxWebAppStorageAccountParameters. +func (in *LinuxWebAppStorageAccountParameters) DeepCopy() *LinuxWebAppStorageAccountParameters { + if in == nil { + return nil + } + out := new(LinuxWebAppStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoginInitParameters) DeepCopyInto(out *LoginInitParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoginInitParameters. +func (in *LoginInitParameters) DeepCopy() *LoginInitParameters { + if in == nil { + return nil + } + out := new(LoginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoginObservation) DeepCopyInto(out *LoginObservation) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoginObservation. +func (in *LoginObservation) DeepCopy() *LoginObservation { + if in == nil { + return nil + } + out := new(LoginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoginParameters) DeepCopyInto(out *LoginParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoginParameters. +func (in *LoginParameters) DeepCopy() *LoginParameters { + if in == nil { + return nil + } + out := new(LoginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsApplicationLogsAzureBlobStorageInitParameters) DeepCopyInto(out *LogsApplicationLogsAzureBlobStorageInitParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsApplicationLogsAzureBlobStorageInitParameters. +func (in *LogsApplicationLogsAzureBlobStorageInitParameters) DeepCopy() *LogsApplicationLogsAzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(LogsApplicationLogsAzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsApplicationLogsAzureBlobStorageObservation) DeepCopyInto(out *LogsApplicationLogsAzureBlobStorageObservation) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsApplicationLogsAzureBlobStorageObservation. +func (in *LogsApplicationLogsAzureBlobStorageObservation) DeepCopy() *LogsApplicationLogsAzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(LogsApplicationLogsAzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsApplicationLogsAzureBlobStorageParameters) DeepCopyInto(out *LogsApplicationLogsAzureBlobStorageParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsApplicationLogsAzureBlobStorageParameters. +func (in *LogsApplicationLogsAzureBlobStorageParameters) DeepCopy() *LogsApplicationLogsAzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(LogsApplicationLogsAzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsApplicationLogsInitParameters) DeepCopyInto(out *LogsApplicationLogsInitParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(ApplicationLogsAzureBlobStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsApplicationLogsInitParameters. +func (in *LogsApplicationLogsInitParameters) DeepCopy() *LogsApplicationLogsInitParameters { + if in == nil { + return nil + } + out := new(LogsApplicationLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsApplicationLogsObservation) DeepCopyInto(out *LogsApplicationLogsObservation) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(ApplicationLogsAzureBlobStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsApplicationLogsObservation. +func (in *LogsApplicationLogsObservation) DeepCopy() *LogsApplicationLogsObservation { + if in == nil { + return nil + } + out := new(LogsApplicationLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsApplicationLogsParameters) DeepCopyInto(out *LogsApplicationLogsParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(ApplicationLogsAzureBlobStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsApplicationLogsParameters. +func (in *LogsApplicationLogsParameters) DeepCopy() *LogsApplicationLogsParameters { + if in == nil { + return nil + } + out := new(LogsApplicationLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsAzureBlobStorageInitParameters) DeepCopyInto(out *LogsHTTPLogsAzureBlobStorageInitParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsAzureBlobStorageInitParameters. +func (in *LogsHTTPLogsAzureBlobStorageInitParameters) DeepCopy() *LogsHTTPLogsAzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(LogsHTTPLogsAzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsAzureBlobStorageObservation) DeepCopyInto(out *LogsHTTPLogsAzureBlobStorageObservation) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsAzureBlobStorageObservation. +func (in *LogsHTTPLogsAzureBlobStorageObservation) DeepCopy() *LogsHTTPLogsAzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(LogsHTTPLogsAzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsAzureBlobStorageParameters) DeepCopyInto(out *LogsHTTPLogsAzureBlobStorageParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + out.SASURLSecretRef = in.SASURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsAzureBlobStorageParameters. +func (in *LogsHTTPLogsAzureBlobStorageParameters) DeepCopy() *LogsHTTPLogsAzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(LogsHTTPLogsAzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsFileSystemInitParameters) DeepCopyInto(out *LogsHTTPLogsFileSystemInitParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsFileSystemInitParameters. +func (in *LogsHTTPLogsFileSystemInitParameters) DeepCopy() *LogsHTTPLogsFileSystemInitParameters { + if in == nil { + return nil + } + out := new(LogsHTTPLogsFileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsFileSystemObservation) DeepCopyInto(out *LogsHTTPLogsFileSystemObservation) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsFileSystemObservation. +func (in *LogsHTTPLogsFileSystemObservation) DeepCopy() *LogsHTTPLogsFileSystemObservation { + if in == nil { + return nil + } + out := new(LogsHTTPLogsFileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsFileSystemParameters) DeepCopyInto(out *LogsHTTPLogsFileSystemParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsFileSystemParameters. +func (in *LogsHTTPLogsFileSystemParameters) DeepCopy() *LogsHTTPLogsFileSystemParameters { + if in == nil { + return nil + } + out := new(LogsHTTPLogsFileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsInitParameters) DeepCopyInto(out *LogsHTTPLogsInitParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(LogsHTTPLogsAzureBlobStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(HTTPLogsFileSystemInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsInitParameters. +func (in *LogsHTTPLogsInitParameters) DeepCopy() *LogsHTTPLogsInitParameters { + if in == nil { + return nil + } + out := new(LogsHTTPLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsObservation) DeepCopyInto(out *LogsHTTPLogsObservation) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(LogsHTTPLogsAzureBlobStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(HTTPLogsFileSystemObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsObservation. +func (in *LogsHTTPLogsObservation) DeepCopy() *LogsHTTPLogsObservation { + if in == nil { + return nil + } + out := new(LogsHTTPLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsHTTPLogsParameters) DeepCopyInto(out *LogsHTTPLogsParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(LogsHTTPLogsAzureBlobStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(HTTPLogsFileSystemParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsHTTPLogsParameters. +func (in *LogsHTTPLogsParameters) DeepCopy() *LogsHTTPLogsParameters { + if in == nil { + return nil + } + out := new(LogsHTTPLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsInitParameters) DeepCopyInto(out *LogsInitParameters) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(ApplicationLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(HTTPLogsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsInitParameters. +func (in *LogsInitParameters) DeepCopy() *LogsInitParameters { + if in == nil { + return nil + } + out := new(LogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsObservation) DeepCopyInto(out *LogsObservation) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(ApplicationLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(HTTPLogsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsObservation. +func (in *LogsObservation) DeepCopy() *LogsObservation { + if in == nil { + return nil + } + out := new(LogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogsParameters) DeepCopyInto(out *LogsParameters) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(ApplicationLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(HTTPLogsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogsParameters. +func (in *LogsParameters) DeepCopy() *LogsParameters { + if in == nil { + return nil + } + out := new(LogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftInitParameters) DeepCopyInto(out *MicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftInitParameters. +func (in *MicrosoftInitParameters) DeepCopy() *MicrosoftInitParameters { + if in == nil { + return nil + } + out := new(MicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftObservation) DeepCopyInto(out *MicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftObservation. +func (in *MicrosoftObservation) DeepCopy() *MicrosoftObservation { + if in == nil { + return nil + } + out := new(MicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftParameters) DeepCopyInto(out *MicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + out.ClientSecretSecretRef = in.ClientSecretSecretRef + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftParameters. +func (in *MicrosoftParameters) DeepCopy() *MicrosoftParameters { + if in == nil { + return nil + } + out := new(MicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftV2InitParameters) DeepCopyInto(out *MicrosoftV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftV2InitParameters. +func (in *MicrosoftV2InitParameters) DeepCopy() *MicrosoftV2InitParameters { + if in == nil { + return nil + } + out := new(MicrosoftV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftV2Observation) DeepCopyInto(out *MicrosoftV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftV2Observation. +func (in *MicrosoftV2Observation) DeepCopy() *MicrosoftV2Observation { + if in == nil { + return nil + } + out := new(MicrosoftV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftV2Parameters) DeepCopyInto(out *MicrosoftV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftV2Parameters. +func (in *MicrosoftV2Parameters) DeepCopy() *MicrosoftV2Parameters { + if in == nil { + return nil + } + out := new(MicrosoftV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestsInitParameters) DeepCopyInto(out *RequestsInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestsInitParameters. +func (in *RequestsInitParameters) DeepCopy() *RequestsInitParameters { + if in == nil { + return nil + } + out := new(RequestsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestsObservation) DeepCopyInto(out *RequestsObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestsObservation. +func (in *RequestsObservation) DeepCopy() *RequestsObservation { + if in == nil { + return nil + } + out := new(RequestsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestsParameters) DeepCopyInto(out *RequestsParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestsParameters. +func (in *RequestsParameters) DeepCopy() *RequestsParameters { + if in == nil { + return nil + } + out := new(RequestsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleInitParameters) DeepCopyInto(out *ScheduleInitParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleInitParameters. +func (in *ScheduleInitParameters) DeepCopy() *ScheduleInitParameters { + if in == nil { + return nil + } + out := new(ScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleObservation) DeepCopyInto(out *ScheduleObservation) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.LastExecutionTime != nil { + in, out := &in.LastExecutionTime, &out.LastExecutionTime + *out = new(string) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleObservation. +func (in *ScheduleObservation) DeepCopy() *ScheduleObservation { + if in == nil { + return nil + } + out := new(ScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleParameters) DeepCopyInto(out *ScheduleParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleParameters. +func (in *ScheduleParameters) DeepCopy() *ScheduleParameters { + if in == nil { + return nil + } + out := new(ScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *ScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmIPRestrictionHeadersInitParameters. +func (in *ScmIPRestrictionHeadersInitParameters) DeepCopy() *ScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(ScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmIPRestrictionHeadersObservation) DeepCopyInto(out *ScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmIPRestrictionHeadersObservation. +func (in *ScmIPRestrictionHeadersObservation) DeepCopy() *ScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(ScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmIPRestrictionHeadersParameters) DeepCopyInto(out *ScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmIPRestrictionHeadersParameters. +func (in *ScmIPRestrictionHeadersParameters) DeepCopy() *ScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(ScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmIPRestrictionInitParameters) DeepCopyInto(out *ScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]ScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmIPRestrictionInitParameters. +func (in *ScmIPRestrictionInitParameters) DeepCopy() *ScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(ScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmIPRestrictionObservation) DeepCopyInto(out *ScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]ScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmIPRestrictionObservation. +func (in *ScmIPRestrictionObservation) DeepCopy() *ScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(ScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScmIPRestrictionParameters) DeepCopyInto(out *ScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]ScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScmIPRestrictionParameters. +func (in *ScmIPRestrictionParameters) DeepCopy() *ScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(ScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAppServiceLogsInitParameters) DeepCopyInto(out *SiteConfigAppServiceLogsInitParameters) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAppServiceLogsInitParameters. +func (in *SiteConfigAppServiceLogsInitParameters) DeepCopy() *SiteConfigAppServiceLogsInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigAppServiceLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAppServiceLogsObservation) DeepCopyInto(out *SiteConfigAppServiceLogsObservation) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAppServiceLogsObservation. +func (in *SiteConfigAppServiceLogsObservation) DeepCopy() *SiteConfigAppServiceLogsObservation { + if in == nil { + return nil + } + out := new(SiteConfigAppServiceLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAppServiceLogsParameters) DeepCopyInto(out *SiteConfigAppServiceLogsParameters) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAppServiceLogsParameters. +func (in *SiteConfigAppServiceLogsParameters) DeepCopy() *SiteConfigAppServiceLogsParameters { + if in == nil { + return nil + } + out := new(SiteConfigAppServiceLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigApplicationStackInitParameters) DeepCopyInto(out *SiteConfigApplicationStackInitParameters) { + *out = *in + if in.Docker != nil { + in, out := &in.Docker, &out.Docker + *out = make([]ApplicationStackDockerInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigApplicationStackInitParameters. +func (in *SiteConfigApplicationStackInitParameters) DeepCopy() *SiteConfigApplicationStackInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigApplicationStackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigApplicationStackObservation) DeepCopyInto(out *SiteConfigApplicationStackObservation) { + *out = *in + if in.Docker != nil { + in, out := &in.Docker, &out.Docker + *out = make([]ApplicationStackDockerObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigApplicationStackObservation. +func (in *SiteConfigApplicationStackObservation) DeepCopy() *SiteConfigApplicationStackObservation { + if in == nil { + return nil + } + out := new(SiteConfigApplicationStackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigApplicationStackParameters) DeepCopyInto(out *SiteConfigApplicationStackParameters) { + *out = *in + if in.Docker != nil { + in, out := &in.Docker, &out.Docker + *out = make([]ApplicationStackDockerParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigApplicationStackParameters. +func (in *SiteConfigApplicationStackParameters) DeepCopy() *SiteConfigApplicationStackParameters { + if in == nil { + return nil + } + out := new(SiteConfigApplicationStackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingActionInitParameters) DeepCopyInto(out *SiteConfigAutoHealSettingActionInitParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = new(CustomActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingActionInitParameters. +func (in *SiteConfigAutoHealSettingActionInitParameters) DeepCopy() *SiteConfigAutoHealSettingActionInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingActionObservation) DeepCopyInto(out *SiteConfigAutoHealSettingActionObservation) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = new(CustomActionObservation) + (*in).DeepCopyInto(*out) + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingActionObservation. +func (in *SiteConfigAutoHealSettingActionObservation) DeepCopy() *SiteConfigAutoHealSettingActionObservation { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingActionParameters) DeepCopyInto(out *SiteConfigAutoHealSettingActionParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = new(CustomActionParameters) + (*in).DeepCopyInto(*out) + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingActionParameters. +func (in *SiteConfigAutoHealSettingActionParameters) DeepCopy() *SiteConfigAutoHealSettingActionParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingInitParameters) DeepCopyInto(out *SiteConfigAutoHealSettingInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(AutoHealSettingActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(AutoHealSettingTriggerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingInitParameters. +func (in *SiteConfigAutoHealSettingInitParameters) DeepCopy() *SiteConfigAutoHealSettingInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingObservation) DeepCopyInto(out *SiteConfigAutoHealSettingObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(AutoHealSettingActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(AutoHealSettingTriggerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingObservation. +func (in *SiteConfigAutoHealSettingObservation) DeepCopy() *SiteConfigAutoHealSettingObservation { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingParameters) DeepCopyInto(out *SiteConfigAutoHealSettingParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(AutoHealSettingActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(AutoHealSettingTriggerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingParameters. +func (in *SiteConfigAutoHealSettingParameters) DeepCopy() *SiteConfigAutoHealSettingParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerInitParameters) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerInitParameters) { + *out = *in + if in.PrivateMemoryKb != nil { + in, out := &in.PrivateMemoryKb, &out.PrivateMemoryKb + *out = new(float64) + **out = **in + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(AutoHealSettingTriggerRequestsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(AutoHealSettingTriggerSlowRequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]AutoHealSettingTriggerStatusCodeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerInitParameters. +func (in *SiteConfigAutoHealSettingTriggerInitParameters) DeepCopy() *SiteConfigAutoHealSettingTriggerInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerObservation) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerObservation) { + *out = *in + if in.PrivateMemoryKb != nil { + in, out := &in.PrivateMemoryKb, &out.PrivateMemoryKb + *out = new(float64) + **out = **in + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(AutoHealSettingTriggerRequestsObservation) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(AutoHealSettingTriggerSlowRequestObservation) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]AutoHealSettingTriggerStatusCodeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerObservation. +func (in *SiteConfigAutoHealSettingTriggerObservation) DeepCopy() *SiteConfigAutoHealSettingTriggerObservation { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerParameters) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerParameters) { + *out = *in + if in.PrivateMemoryKb != nil { + in, out := &in.PrivateMemoryKb, &out.PrivateMemoryKb + *out = new(float64) + **out = **in + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(AutoHealSettingTriggerRequestsParameters) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(AutoHealSettingTriggerSlowRequestParameters) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]AutoHealSettingTriggerStatusCodeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerParameters. +func (in *SiteConfigAutoHealSettingTriggerParameters) DeepCopy() *SiteConfigAutoHealSettingTriggerParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerRequestsInitParameters) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerRequestsInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerRequestsInitParameters. +func (in *SiteConfigAutoHealSettingTriggerRequestsInitParameters) DeepCopy() *SiteConfigAutoHealSettingTriggerRequestsInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerRequestsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerRequestsObservation) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerRequestsObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerRequestsObservation. +func (in *SiteConfigAutoHealSettingTriggerRequestsObservation) DeepCopy() *SiteConfigAutoHealSettingTriggerRequestsObservation { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerRequestsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerRequestsParameters) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerRequestsParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerRequestsParameters. +func (in *SiteConfigAutoHealSettingTriggerRequestsParameters) DeepCopy() *SiteConfigAutoHealSettingTriggerRequestsParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerRequestsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerSlowRequestInitParameters) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerSlowRequestInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerSlowRequestInitParameters. +func (in *SiteConfigAutoHealSettingTriggerSlowRequestInitParameters) DeepCopy() *SiteConfigAutoHealSettingTriggerSlowRequestInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerSlowRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerSlowRequestObservation) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerSlowRequestObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerSlowRequestObservation. +func (in *SiteConfigAutoHealSettingTriggerSlowRequestObservation) DeepCopy() *SiteConfigAutoHealSettingTriggerSlowRequestObservation { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerSlowRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerSlowRequestParameters) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerSlowRequestParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerSlowRequestParameters. +func (in *SiteConfigAutoHealSettingTriggerSlowRequestParameters) DeepCopy() *SiteConfigAutoHealSettingTriggerSlowRequestParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerSlowRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerStatusCodeInitParameters) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerStatusCodeInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerStatusCodeInitParameters. +func (in *SiteConfigAutoHealSettingTriggerStatusCodeInitParameters) DeepCopy() *SiteConfigAutoHealSettingTriggerStatusCodeInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerStatusCodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerStatusCodeObservation) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerStatusCodeObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerStatusCodeObservation. +func (in *SiteConfigAutoHealSettingTriggerStatusCodeObservation) DeepCopy() *SiteConfigAutoHealSettingTriggerStatusCodeObservation { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerStatusCodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigAutoHealSettingTriggerStatusCodeParameters) DeepCopyInto(out *SiteConfigAutoHealSettingTriggerStatusCodeParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigAutoHealSettingTriggerStatusCodeParameters. +func (in *SiteConfigAutoHealSettingTriggerStatusCodeParameters) DeepCopy() *SiteConfigAutoHealSettingTriggerStatusCodeParameters { + if in == nil { + return nil + } + out := new(SiteConfigAutoHealSettingTriggerStatusCodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigCorsInitParameters) DeepCopyInto(out *SiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigCorsInitParameters. +func (in *SiteConfigCorsInitParameters) DeepCopy() *SiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigCorsObservation) DeepCopyInto(out *SiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigCorsObservation. +func (in *SiteConfigCorsObservation) DeepCopy() *SiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(SiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigCorsParameters) DeepCopyInto(out *SiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigCorsParameters. +func (in *SiteConfigCorsParameters) DeepCopy() *SiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(SiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigIPRestrictionHeadersInitParameters) DeepCopyInto(out *SiteConfigIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigIPRestrictionHeadersInitParameters. +func (in *SiteConfigIPRestrictionHeadersInitParameters) DeepCopy() *SiteConfigIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigIPRestrictionHeadersObservation) DeepCopyInto(out *SiteConfigIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigIPRestrictionHeadersObservation. +func (in *SiteConfigIPRestrictionHeadersObservation) DeepCopy() *SiteConfigIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(SiteConfigIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigIPRestrictionHeadersParameters) DeepCopyInto(out *SiteConfigIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigIPRestrictionHeadersParameters. +func (in *SiteConfigIPRestrictionHeadersParameters) DeepCopy() *SiteConfigIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(SiteConfigIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigIPRestrictionInitParameters) DeepCopyInto(out *SiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]IPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigIPRestrictionInitParameters. +func (in *SiteConfigIPRestrictionInitParameters) DeepCopy() *SiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigIPRestrictionObservation) DeepCopyInto(out *SiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]IPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigIPRestrictionObservation. +func (in *SiteConfigIPRestrictionObservation) DeepCopy() *SiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(SiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigIPRestrictionParameters) DeepCopyInto(out *SiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]IPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigIPRestrictionParameters. +func (in *SiteConfigIPRestrictionParameters) DeepCopy() *SiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(SiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigInitParameters) DeepCopyInto(out *SiteConfigInitParameters) { + *out = *in + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DotnetFrameworkVersion != nil { + in, out := &in.DotnetFrameworkVersion, &out.DotnetFrameworkVersion + *out = new(string) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]IPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]ScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorkerProcess != nil { + in, out := &in.Use32BitWorkerProcess, &out.Use32BitWorkerProcess + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigInitParameters. +func (in *SiteConfigInitParameters) DeepCopy() *SiteConfigInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigObservation) DeepCopyInto(out *SiteConfigObservation) { + *out = *in + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DotnetFrameworkVersion != nil { + in, out := &in.DotnetFrameworkVersion, &out.DotnetFrameworkVersion + *out = new(string) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]IPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]ScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorkerProcess != nil { + in, out := &in.Use32BitWorkerProcess, &out.Use32BitWorkerProcess + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigObservation. +func (in *SiteConfigObservation) DeepCopy() *SiteConfigObservation { + if in == nil { + return nil + } + out := new(SiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigParameters) DeepCopyInto(out *SiteConfigParameters) { + *out = *in + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(CorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DotnetFrameworkVersion != nil { + in, out := &in.DotnetFrameworkVersion, &out.DotnetFrameworkVersion + *out = new(string) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]IPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.MinTLSVersion != nil { + in, out := &in.MinTLSVersion, &out.MinTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]ScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorkerProcess != nil { + in, out := &in.Use32BitWorkerProcess, &out.Use32BitWorkerProcess + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigParameters. +func (in *SiteConfigParameters) DeepCopy() *SiteConfigParameters { + if in == nil { + return nil + } + out := new(SiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *SiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigScmIPRestrictionHeadersInitParameters. +func (in *SiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *SiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *SiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigScmIPRestrictionHeadersObservation. +func (in *SiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *SiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(SiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *SiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigScmIPRestrictionHeadersParameters. +func (in *SiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *SiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(SiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *SiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]SiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigScmIPRestrictionInitParameters. +func (in *SiteConfigScmIPRestrictionInitParameters) DeepCopy() *SiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigScmIPRestrictionObservation) DeepCopyInto(out *SiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]SiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigScmIPRestrictionObservation. +func (in *SiteConfigScmIPRestrictionObservation) DeepCopy() *SiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(SiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigScmIPRestrictionParameters) DeepCopyInto(out *SiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]SiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigScmIPRestrictionParameters. +func (in *SiteConfigScmIPRestrictionParameters) DeepCopy() *SiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(SiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigVirtualApplicationInitParameters) DeepCopyInto(out *SiteConfigVirtualApplicationInitParameters) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } + if in.VirtualDirectory != nil { + in, out := &in.VirtualDirectory, &out.VirtualDirectory + *out = make([]VirtualApplicationVirtualDirectoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigVirtualApplicationInitParameters. +func (in *SiteConfigVirtualApplicationInitParameters) DeepCopy() *SiteConfigVirtualApplicationInitParameters { + if in == nil { + return nil + } + out := new(SiteConfigVirtualApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigVirtualApplicationObservation) DeepCopyInto(out *SiteConfigVirtualApplicationObservation) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } + if in.VirtualDirectory != nil { + in, out := &in.VirtualDirectory, &out.VirtualDirectory + *out = make([]VirtualApplicationVirtualDirectoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigVirtualApplicationObservation. +func (in *SiteConfigVirtualApplicationObservation) DeepCopy() *SiteConfigVirtualApplicationObservation { + if in == nil { + return nil + } + out := new(SiteConfigVirtualApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteConfigVirtualApplicationParameters) DeepCopyInto(out *SiteConfigVirtualApplicationParameters) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } + if in.VirtualDirectory != nil { + in, out := &in.VirtualDirectory, &out.VirtualDirectory + *out = make([]VirtualApplicationVirtualDirectoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteConfigVirtualApplicationParameters. +func (in *SiteConfigVirtualApplicationParameters) DeepCopy() *SiteConfigVirtualApplicationParameters { + if in == nil { + return nil + } + out := new(SiteConfigVirtualApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteCredentialInitParameters) DeepCopyInto(out *SiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteCredentialInitParameters. +func (in *SiteCredentialInitParameters) DeepCopy() *SiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(SiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteCredentialObservation) DeepCopyInto(out *SiteCredentialObservation) { + *out = *in + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteCredentialObservation. +func (in *SiteCredentialObservation) DeepCopy() *SiteCredentialObservation { + if in == nil { + return nil + } + out := new(SiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SiteCredentialParameters) DeepCopyInto(out *SiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SiteCredentialParameters. +func (in *SiteCredentialParameters) DeepCopy() *SiteCredentialParameters { + if in == nil { + return nil + } + out := new(SiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuInitParameters) DeepCopyInto(out *SkuInitParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuInitParameters. +func (in *SkuInitParameters) DeepCopy() *SkuInitParameters { + if in == nil { + return nil + } + out := new(SkuInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuObservation) DeepCopyInto(out *SkuObservation) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuObservation. +func (in *SkuObservation) DeepCopy() *SkuObservation { + if in == nil { + return nil + } + out := new(SkuObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkuParameters) DeepCopyInto(out *SkuParameters) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = new(float64) + **out = **in + } + if in.Size != nil { + in, out := &in.Size, &out.Size + *out = new(string) + **out = **in + } + if in.Tier != nil { + in, out := &in.Tier, &out.Tier + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkuParameters. +func (in *SkuParameters) DeepCopy() *SkuParameters { + if in == nil { + return nil + } + out := new(SkuParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlowRequestInitParameters) DeepCopyInto(out *SlowRequestInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlowRequestInitParameters. +func (in *SlowRequestInitParameters) DeepCopy() *SlowRequestInitParameters { + if in == nil { + return nil + } + out := new(SlowRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlowRequestObservation) DeepCopyInto(out *SlowRequestObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlowRequestObservation. +func (in *SlowRequestObservation) DeepCopy() *SlowRequestObservation { + if in == nil { + return nil + } + out := new(SlowRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlowRequestParameters) DeepCopyInto(out *SlowRequestParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlowRequestParameters. +func (in *SlowRequestParameters) DeepCopy() *SlowRequestParameters { + if in == nil { + return nil + } + out := new(SlowRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceControlInitParameters) DeepCopyInto(out *SourceControlInitParameters) { + *out = *in + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.ManualIntegration != nil { + in, out := &in.ManualIntegration, &out.ManualIntegration + *out = new(bool) + **out = **in + } + if in.RepoURL != nil { + in, out := &in.RepoURL, &out.RepoURL + *out = new(string) + **out = **in + } + if in.RollbackEnabled != nil { + in, out := &in.RollbackEnabled, &out.RollbackEnabled + *out = new(bool) + **out = **in + } + if in.UseMercurial != nil { + in, out := &in.UseMercurial, &out.UseMercurial + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceControlInitParameters. +func (in *SourceControlInitParameters) DeepCopy() *SourceControlInitParameters { + if in == nil { + return nil + } + out := new(SourceControlInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceControlObservation) DeepCopyInto(out *SourceControlObservation) { + *out = *in + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.ManualIntegration != nil { + in, out := &in.ManualIntegration, &out.ManualIntegration + *out = new(bool) + **out = **in + } + if in.RepoURL != nil { + in, out := &in.RepoURL, &out.RepoURL + *out = new(string) + **out = **in + } + if in.RollbackEnabled != nil { + in, out := &in.RollbackEnabled, &out.RollbackEnabled + *out = new(bool) + **out = **in + } + if in.UseMercurial != nil { + in, out := &in.UseMercurial, &out.UseMercurial + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceControlObservation. +func (in *SourceControlObservation) DeepCopy() *SourceControlObservation { + if in == nil { + return nil + } + out := new(SourceControlObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceControlParameters) DeepCopyInto(out *SourceControlParameters) { + *out = *in + if in.Branch != nil { + in, out := &in.Branch, &out.Branch + *out = new(string) + **out = **in + } + if in.ManualIntegration != nil { + in, out := &in.ManualIntegration, &out.ManualIntegration + *out = new(bool) + **out = **in + } + if in.RepoURL != nil { + in, out := &in.RepoURL, &out.RepoURL + *out = new(string) + **out = **in + } + if in.RollbackEnabled != nil { + in, out := &in.RollbackEnabled, &out.RollbackEnabled + *out = new(bool) + **out = **in + } + if in.UseMercurial != nil { + in, out := &in.UseMercurial, &out.UseMercurial + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceControlParameters. +func (in *SourceControlParameters) DeepCopy() *SourceControlParameters { + if in == nil { + return nil + } + out := new(SourceControlParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSite) DeepCopyInto(out *StaticSite) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSite. +func (in *StaticSite) DeepCopy() *StaticSite { + if in == nil { + return nil + } + out := new(StaticSite) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StaticSite) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteIdentityInitParameters) DeepCopyInto(out *StaticSiteIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteIdentityInitParameters. +func (in *StaticSiteIdentityInitParameters) DeepCopy() *StaticSiteIdentityInitParameters { + if in == nil { + return nil + } + out := new(StaticSiteIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteIdentityObservation) DeepCopyInto(out *StaticSiteIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteIdentityObservation. +func (in *StaticSiteIdentityObservation) DeepCopy() *StaticSiteIdentityObservation { + if in == nil { + return nil + } + out := new(StaticSiteIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteIdentityParameters) DeepCopyInto(out *StaticSiteIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteIdentityParameters. +func (in *StaticSiteIdentityParameters) DeepCopy() *StaticSiteIdentityParameters { + if in == nil { + return nil + } + out := new(StaticSiteIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteInitParameters) DeepCopyInto(out *StaticSiteInitParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(StaticSiteIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.SkuSize != nil { + in, out := &in.SkuSize, &out.SkuSize + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteInitParameters. +func (in *StaticSiteInitParameters) DeepCopy() *StaticSiteInitParameters { + if in == nil { + return nil + } + out := new(StaticSiteInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteList) DeepCopyInto(out *StaticSiteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StaticSite, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteList. +func (in *StaticSiteList) DeepCopy() *StaticSiteList { + if in == nil { + return nil + } + out := new(StaticSiteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StaticSiteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteObservation) DeepCopyInto(out *StaticSiteObservation) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(StaticSiteIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.SkuSize != nil { + in, out := &in.SkuSize, &out.SkuSize + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteObservation. +func (in *StaticSiteObservation) DeepCopy() *StaticSiteObservation { + if in == nil { + return nil + } + out := new(StaticSiteObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteParameters) DeepCopyInto(out *StaticSiteParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(StaticSiteIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SkuSize != nil { + in, out := &in.SkuSize, &out.SkuSize + *out = new(string) + **out = **in + } + if in.SkuTier != nil { + in, out := &in.SkuTier, &out.SkuTier + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteParameters. +func (in *StaticSiteParameters) DeepCopy() *StaticSiteParameters { + if in == nil { + return nil + } + out := new(StaticSiteParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteSpec) DeepCopyInto(out *StaticSiteSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteSpec. +func (in *StaticSiteSpec) DeepCopy() *StaticSiteSpec { + if in == nil { + return nil + } + out := new(StaticSiteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticSiteStatus) DeepCopyInto(out *StaticSiteStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticSiteStatus. +func (in *StaticSiteStatus) DeepCopy() *StaticSiteStatus { + if in == nil { + return nil + } + out := new(StaticSiteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCodeInitParameters) DeepCopyInto(out *StatusCodeInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCodeInitParameters. +func (in *StatusCodeInitParameters) DeepCopy() *StatusCodeInitParameters { + if in == nil { + return nil + } + out := new(StatusCodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCodeObservation) DeepCopyInto(out *StatusCodeObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCodeObservation. +func (in *StatusCodeObservation) DeepCopy() *StatusCodeObservation { + if in == nil { + return nil + } + out := new(StatusCodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCodeParameters) DeepCopyInto(out *StatusCodeParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCodeParameters. +func (in *StatusCodeParameters) DeepCopy() *StatusCodeParameters { + if in == nil { + return nil + } + out := new(StatusCodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StickySettingsInitParameters) DeepCopyInto(out *StickySettingsInitParameters) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StickySettingsInitParameters. +func (in *StickySettingsInitParameters) DeepCopy() *StickySettingsInitParameters { + if in == nil { + return nil + } + out := new(StickySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StickySettingsObservation) DeepCopyInto(out *StickySettingsObservation) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StickySettingsObservation. +func (in *StickySettingsObservation) DeepCopy() *StickySettingsObservation { + if in == nil { + return nil + } + out := new(StickySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StickySettingsParameters) DeepCopyInto(out *StickySettingsParameters) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StickySettingsParameters. +func (in *StickySettingsParameters) DeepCopy() *StickySettingsParameters { + if in == nil { + return nil + } + out := new(StickySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountInitParameters) DeepCopyInto(out *StorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountInitParameters. +func (in *StorageAccountInitParameters) DeepCopy() *StorageAccountInitParameters { + if in == nil { + return nil + } + out := new(StorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountObservation) DeepCopyInto(out *StorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountObservation. +func (in *StorageAccountObservation) DeepCopy() *StorageAccountObservation { + if in == nil { + return nil + } + out := new(StorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAccountParameters) DeepCopyInto(out *StorageAccountParameters) { + *out = *in + out.AccessKeySecretRef = in.AccessKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAccountParameters. +func (in *StorageAccountParameters) DeepCopy() *StorageAccountParameters { + if in == nil { + return nil + } + out := new(StorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerInitParameters) DeepCopyInto(out *TriggerInitParameters) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(RequestsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(SlowRequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]StatusCodeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerInitParameters. +func (in *TriggerInitParameters) DeepCopy() *TriggerInitParameters { + if in == nil { + return nil + } + out := new(TriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerObservation) DeepCopyInto(out *TriggerObservation) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(RequestsObservation) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(SlowRequestObservation) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]StatusCodeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerObservation. +func (in *TriggerObservation) DeepCopy() *TriggerObservation { + if in == nil { + return nil + } + out := new(TriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerParameters) DeepCopyInto(out *TriggerParameters) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(RequestsParameters) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(SlowRequestParameters) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]StatusCodeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerParameters. +func (in *TriggerParameters) DeepCopy() *TriggerParameters { + if in == nil { + return nil + } + out := new(TriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerRequestsInitParameters) DeepCopyInto(out *TriggerRequestsInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerRequestsInitParameters. +func (in *TriggerRequestsInitParameters) DeepCopy() *TriggerRequestsInitParameters { + if in == nil { + return nil + } + out := new(TriggerRequestsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerRequestsObservation) DeepCopyInto(out *TriggerRequestsObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerRequestsObservation. +func (in *TriggerRequestsObservation) DeepCopy() *TriggerRequestsObservation { + if in == nil { + return nil + } + out := new(TriggerRequestsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerRequestsParameters) DeepCopyInto(out *TriggerRequestsParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerRequestsParameters. +func (in *TriggerRequestsParameters) DeepCopy() *TriggerRequestsParameters { + if in == nil { + return nil + } + out := new(TriggerRequestsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerSlowRequestInitParameters) DeepCopyInto(out *TriggerSlowRequestInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSlowRequestInitParameters. +func (in *TriggerSlowRequestInitParameters) DeepCopy() *TriggerSlowRequestInitParameters { + if in == nil { + return nil + } + out := new(TriggerSlowRequestInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerSlowRequestObservation) DeepCopyInto(out *TriggerSlowRequestObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSlowRequestObservation. +func (in *TriggerSlowRequestObservation) DeepCopy() *TriggerSlowRequestObservation { + if in == nil { + return nil + } + out := new(TriggerSlowRequestObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerSlowRequestParameters) DeepCopyInto(out *TriggerSlowRequestParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.TimeTaken != nil { + in, out := &in.TimeTaken, &out.TimeTaken + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSlowRequestParameters. +func (in *TriggerSlowRequestParameters) DeepCopy() *TriggerSlowRequestParameters { + if in == nil { + return nil + } + out := new(TriggerSlowRequestParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerStatusCodeInitParameters) DeepCopyInto(out *TriggerStatusCodeInitParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatusCodeInitParameters. +func (in *TriggerStatusCodeInitParameters) DeepCopy() *TriggerStatusCodeInitParameters { + if in == nil { + return nil + } + out := new(TriggerStatusCodeInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerStatusCodeObservation) DeepCopyInto(out *TriggerStatusCodeObservation) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatusCodeObservation. +func (in *TriggerStatusCodeObservation) DeepCopy() *TriggerStatusCodeObservation { + if in == nil { + return nil + } + out := new(TriggerStatusCodeObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TriggerStatusCodeParameters) DeepCopyInto(out *TriggerStatusCodeParameters) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(float64) + **out = **in + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.StatusCodeRange != nil { + in, out := &in.StatusCodeRange, &out.StatusCodeRange + *out = new(string) + **out = **in + } + if in.SubStatus != nil { + in, out := &in.SubStatus, &out.SubStatus + *out = new(float64) + **out = **in + } + if in.Win32StatusCode != nil { + in, out := &in.Win32StatusCode, &out.Win32StatusCode + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatusCodeParameters. +func (in *TriggerStatusCodeParameters) DeepCopy() *TriggerStatusCodeParameters { + if in == nil { + return nil + } + out := new(TriggerStatusCodeParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TwitterInitParameters) DeepCopyInto(out *TwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TwitterInitParameters. +func (in *TwitterInitParameters) DeepCopy() *TwitterInitParameters { + if in == nil { + return nil + } + out := new(TwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TwitterObservation) DeepCopyInto(out *TwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TwitterObservation. +func (in *TwitterObservation) DeepCopy() *TwitterObservation { + if in == nil { + return nil + } + out := new(TwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TwitterParameters) DeepCopyInto(out *TwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + out.ConsumerSecretSecretRef = in.ConsumerSecretSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TwitterParameters. +func (in *TwitterParameters) DeepCopy() *TwitterParameters { + if in == nil { + return nil + } + out := new(TwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TwitterV2InitParameters) DeepCopyInto(out *TwitterV2InitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TwitterV2InitParameters. +func (in *TwitterV2InitParameters) DeepCopy() *TwitterV2InitParameters { + if in == nil { + return nil + } + out := new(TwitterV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TwitterV2Observation) DeepCopyInto(out *TwitterV2Observation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TwitterV2Observation. +func (in *TwitterV2Observation) DeepCopy() *TwitterV2Observation { + if in == nil { + return nil + } + out := new(TwitterV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TwitterV2Parameters) DeepCopyInto(out *TwitterV2Parameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TwitterV2Parameters. +func (in *TwitterV2Parameters) DeepCopy() *TwitterV2Parameters { + if in == nil { + return nil + } + out := new(TwitterV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualApplicationInitParameters) DeepCopyInto(out *VirtualApplicationInitParameters) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } + if in.VirtualDirectory != nil { + in, out := &in.VirtualDirectory, &out.VirtualDirectory + *out = make([]VirtualDirectoryInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualApplicationInitParameters. +func (in *VirtualApplicationInitParameters) DeepCopy() *VirtualApplicationInitParameters { + if in == nil { + return nil + } + out := new(VirtualApplicationInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualApplicationObservation) DeepCopyInto(out *VirtualApplicationObservation) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } + if in.VirtualDirectory != nil { + in, out := &in.VirtualDirectory, &out.VirtualDirectory + *out = make([]VirtualDirectoryObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualApplicationObservation. +func (in *VirtualApplicationObservation) DeepCopy() *VirtualApplicationObservation { + if in == nil { + return nil + } + out := new(VirtualApplicationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualApplicationParameters) DeepCopyInto(out *VirtualApplicationParameters) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.Preload != nil { + in, out := &in.Preload, &out.Preload + *out = new(bool) + **out = **in + } + if in.VirtualDirectory != nil { + in, out := &in.VirtualDirectory, &out.VirtualDirectory + *out = make([]VirtualDirectoryParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualApplicationParameters. +func (in *VirtualApplicationParameters) DeepCopy() *VirtualApplicationParameters { + if in == nil { + return nil + } + out := new(VirtualApplicationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualApplicationVirtualDirectoryInitParameters) DeepCopyInto(out *VirtualApplicationVirtualDirectoryInitParameters) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualApplicationVirtualDirectoryInitParameters. +func (in *VirtualApplicationVirtualDirectoryInitParameters) DeepCopy() *VirtualApplicationVirtualDirectoryInitParameters { + if in == nil { + return nil + } + out := new(VirtualApplicationVirtualDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualApplicationVirtualDirectoryObservation) DeepCopyInto(out *VirtualApplicationVirtualDirectoryObservation) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualApplicationVirtualDirectoryObservation. +func (in *VirtualApplicationVirtualDirectoryObservation) DeepCopy() *VirtualApplicationVirtualDirectoryObservation { + if in == nil { + return nil + } + out := new(VirtualApplicationVirtualDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualApplicationVirtualDirectoryParameters) DeepCopyInto(out *VirtualApplicationVirtualDirectoryParameters) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualApplicationVirtualDirectoryParameters. +func (in *VirtualApplicationVirtualDirectoryParameters) DeepCopy() *VirtualApplicationVirtualDirectoryParameters { + if in == nil { + return nil + } + out := new(VirtualApplicationVirtualDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualDirectoryInitParameters) DeepCopyInto(out *VirtualDirectoryInitParameters) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualDirectoryInitParameters. +func (in *VirtualDirectoryInitParameters) DeepCopy() *VirtualDirectoryInitParameters { + if in == nil { + return nil + } + out := new(VirtualDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualDirectoryObservation) DeepCopyInto(out *VirtualDirectoryObservation) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualDirectoryObservation. +func (in *VirtualDirectoryObservation) DeepCopy() *VirtualDirectoryObservation { + if in == nil { + return nil + } + out := new(VirtualDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualDirectoryParameters) DeepCopyInto(out *VirtualDirectoryParameters) { + *out = *in + if in.PhysicalPath != nil { + in, out := &in.PhysicalPath, &out.PhysicalPath + *out = new(string) + **out = **in + } + if in.VirtualPath != nil { + in, out := &in.VirtualPath, &out.VirtualPath + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualDirectoryParameters. +func (in *VirtualDirectoryParameters) DeepCopy() *VirtualDirectoryParameters { + if in == nil { + return nil + } + out := new(VirtualDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionApp) DeepCopyInto(out *WindowsFunctionApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionApp. +func (in *WindowsFunctionApp) DeepCopy() *WindowsFunctionApp { + if in == nil { + return nil + } + out := new(WindowsFunctionApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsFunctionApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters. +func (in *WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsActiveDirectoryObservation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsActiveDirectoryObservation. +func (in *WindowsFunctionAppAuthSettingsActiveDirectoryObservation) DeepCopy() *WindowsFunctionAppAuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsActiveDirectoryParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsActiveDirectoryParameters. +func (in *WindowsFunctionAppAuthSettingsActiveDirectoryParameters) DeepCopy() *WindowsFunctionAppAuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsFacebookInitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsFacebookInitParameters. +func (in *WindowsFunctionAppAuthSettingsFacebookInitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsFacebookObservation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsFacebookObservation. +func (in *WindowsFunctionAppAuthSettingsFacebookObservation) DeepCopy() *WindowsFunctionAppAuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsFacebookParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSecretRef != nil { + in, out := &in.AppSecretSecretRef, &out.AppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsFacebookParameters. +func (in *WindowsFunctionAppAuthSettingsFacebookParameters) DeepCopy() *WindowsFunctionAppAuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsGithubInitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsGithubInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsGithubInitParameters. +func (in *WindowsFunctionAppAuthSettingsGithubInitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsGithubInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsGithubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsGithubObservation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsGithubObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsGithubObservation. +func (in *WindowsFunctionAppAuthSettingsGithubObservation) DeepCopy() *WindowsFunctionAppAuthSettingsGithubObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsGithubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsGithubParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsGithubParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsGithubParameters. +func (in *WindowsFunctionAppAuthSettingsGithubParameters) DeepCopy() *WindowsFunctionAppAuthSettingsGithubParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsGithubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsGoogleInitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsGoogleInitParameters. +func (in *WindowsFunctionAppAuthSettingsGoogleInitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsGoogleObservation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsGoogleObservation. +func (in *WindowsFunctionAppAuthSettingsGoogleObservation) DeepCopy() *WindowsFunctionAppAuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsGoogleParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsGoogleParameters. +func (in *WindowsFunctionAppAuthSettingsGoogleParameters) DeepCopy() *WindowsFunctionAppAuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsInitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsFunctionAppAuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsFunctionAppAuthSettingsGithubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsFunctionAppAuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsFunctionAppAuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsFunctionAppAuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsInitParameters. +func (in *WindowsFunctionAppAuthSettingsInitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsMicrosoftInitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsMicrosoftInitParameters. +func (in *WindowsFunctionAppAuthSettingsMicrosoftInitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsMicrosoftObservation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsMicrosoftObservation. +func (in *WindowsFunctionAppAuthSettingsMicrosoftObservation) DeepCopy() *WindowsFunctionAppAuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsMicrosoftParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsMicrosoftParameters. +func (in *WindowsFunctionAppAuthSettingsMicrosoftParameters) DeepCopy() *WindowsFunctionAppAuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsObservation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsFunctionAppAuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsFunctionAppAuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsFunctionAppAuthSettingsGithubObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsFunctionAppAuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsFunctionAppAuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsFunctionAppAuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsObservation. +func (in *WindowsFunctionAppAuthSettingsObservation) DeepCopy() *WindowsFunctionAppAuthSettingsObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsFunctionAppAuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsFunctionAppAuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsFunctionAppAuthSettingsGithubParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsFunctionAppAuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsFunctionAppAuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsFunctionAppAuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsParameters. +func (in *WindowsFunctionAppAuthSettingsParameters) DeepCopy() *WindowsFunctionAppAuthSettingsParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsTwitterInitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsTwitterInitParameters. +func (in *WindowsFunctionAppAuthSettingsTwitterInitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsTwitterObservation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsTwitterObservation. +func (in *WindowsFunctionAppAuthSettingsTwitterObservation) DeepCopy() *WindowsFunctionAppAuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsTwitterParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSecretRef != nil { + in, out := &in.ConsumerSecretSecretRef, &out.ConsumerSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsTwitterParameters. +func (in *WindowsFunctionAppAuthSettingsTwitterParameters) DeepCopy() *WindowsFunctionAppAuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2AppleV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2AppleV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2AppleV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2AppleV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2AppleV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2AppleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2AppleV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2AppleV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2AppleV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2AppleV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2AppleV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2AppleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2AppleV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2AppleV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2AppleV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2AppleV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2AppleV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2AppleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation) { + *out = *in + if in.AuthorisationEndpoint != nil { + in, out := &in.AuthorisationEndpoint, &out.AuthorisationEndpoint + *out = new(string) + **out = **in + } + if in.CertificationURI != nil { + in, out := &in.CertificationURI, &out.CertificationURI + *out = new(string) + **out = **in + } + if in.ClientCredentialMethod != nil { + in, out := &in.ClientCredentialMethod, &out.ClientCredentialMethod + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.IssuerEndpoint != nil { + in, out := &in.IssuerEndpoint, &out.IssuerEndpoint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2FacebookV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2FacebookV2Observation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2FacebookV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2FacebookV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2FacebookV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2FacebookV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2FacebookV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2FacebookV2Parameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2FacebookV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2FacebookV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2FacebookV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2FacebookV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2GithubV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2GithubV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2GithubV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2GithubV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2GithubV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2GithubV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2GithubV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2GithubV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2GithubV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2GithubV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2GithubV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2GithubV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2GithubV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2GithubV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2GithubV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2GithubV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2GithubV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2GithubV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2GoogleV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2GoogleV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2GoogleV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2GoogleV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2GoogleV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2GoogleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2GoogleV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2GoogleV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2GoogleV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2GoogleV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2GoogleV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2GoogleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2InitParameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsFunctionAppAuthSettingsV2AppleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsFunctionAppAuthSettingsV2GithubV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsFunctionAppAuthSettingsV2LoginInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2LoginInitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2LoginInitParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2LoginInitParameters. +func (in *WindowsFunctionAppAuthSettingsV2LoginInitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2LoginInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2LoginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2LoginObservation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2LoginObservation) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2LoginObservation. +func (in *WindowsFunctionAppAuthSettingsV2LoginObservation) DeepCopy() *WindowsFunctionAppAuthSettingsV2LoginObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2LoginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2LoginParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2LoginParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2LoginParameters. +func (in *WindowsFunctionAppAuthSettingsV2LoginParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2LoginParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2LoginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2Observation) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsFunctionAppAuthSettingsV2AppleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsFunctionAppAuthSettingsV2FacebookV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsFunctionAppAuthSettingsV2GithubV2Observation) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsFunctionAppAuthSettingsV2GoogleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsFunctionAppAuthSettingsV2LoginObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsFunctionAppAuthSettingsV2TwitterV2Observation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2Parameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsFunctionAppAuthSettingsV2AppleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsFunctionAppAuthSettingsV2FacebookV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsFunctionAppAuthSettingsV2GithubV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsFunctionAppAuthSettingsV2GoogleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsFunctionAppAuthSettingsV2LoginParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsFunctionAppAuthSettingsV2TwitterV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters. +func (in *WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2TwitterV2Observation) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2TwitterV2Observation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2TwitterV2Observation. +func (in *WindowsFunctionAppAuthSettingsV2TwitterV2Observation) DeepCopy() *WindowsFunctionAppAuthSettingsV2TwitterV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2TwitterV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppAuthSettingsV2TwitterV2Parameters) DeepCopyInto(out *WindowsFunctionAppAuthSettingsV2TwitterV2Parameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppAuthSettingsV2TwitterV2Parameters. +func (in *WindowsFunctionAppAuthSettingsV2TwitterV2Parameters) DeepCopy() *WindowsFunctionAppAuthSettingsV2TwitterV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppAuthSettingsV2TwitterV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppBackupInitParameters) DeepCopyInto(out *WindowsFunctionAppBackupInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsFunctionAppBackupScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppBackupInitParameters. +func (in *WindowsFunctionAppBackupInitParameters) DeepCopy() *WindowsFunctionAppBackupInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppBackupObservation) DeepCopyInto(out *WindowsFunctionAppBackupObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsFunctionAppBackupScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppBackupObservation. +func (in *WindowsFunctionAppBackupObservation) DeepCopy() *WindowsFunctionAppBackupObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppBackupParameters) DeepCopyInto(out *WindowsFunctionAppBackupParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsFunctionAppBackupScheduleParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountURLSecretRef = in.StorageAccountURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppBackupParameters. +func (in *WindowsFunctionAppBackupParameters) DeepCopy() *WindowsFunctionAppBackupParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppBackupScheduleInitParameters) DeepCopyInto(out *WindowsFunctionAppBackupScheduleInitParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppBackupScheduleInitParameters. +func (in *WindowsFunctionAppBackupScheduleInitParameters) DeepCopy() *WindowsFunctionAppBackupScheduleInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppBackupScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppBackupScheduleObservation) DeepCopyInto(out *WindowsFunctionAppBackupScheduleObservation) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.LastExecutionTime != nil { + in, out := &in.LastExecutionTime, &out.LastExecutionTime + *out = new(string) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppBackupScheduleObservation. +func (in *WindowsFunctionAppBackupScheduleObservation) DeepCopy() *WindowsFunctionAppBackupScheduleObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppBackupScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppBackupScheduleParameters) DeepCopyInto(out *WindowsFunctionAppBackupScheduleParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppBackupScheduleParameters. +func (in *WindowsFunctionAppBackupScheduleParameters) DeepCopy() *WindowsFunctionAppBackupScheduleParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppBackupScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppConnectionStringInitParameters) DeepCopyInto(out *WindowsFunctionAppConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppConnectionStringInitParameters. +func (in *WindowsFunctionAppConnectionStringInitParameters) DeepCopy() *WindowsFunctionAppConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppConnectionStringObservation) DeepCopyInto(out *WindowsFunctionAppConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppConnectionStringObservation. +func (in *WindowsFunctionAppConnectionStringObservation) DeepCopy() *WindowsFunctionAppConnectionStringObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppConnectionStringParameters) DeepCopyInto(out *WindowsFunctionAppConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppConnectionStringParameters. +func (in *WindowsFunctionAppConnectionStringParameters) DeepCopy() *WindowsFunctionAppConnectionStringParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppIdentityInitParameters) DeepCopyInto(out *WindowsFunctionAppIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppIdentityInitParameters. +func (in *WindowsFunctionAppIdentityInitParameters) DeepCopy() *WindowsFunctionAppIdentityInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppIdentityObservation) DeepCopyInto(out *WindowsFunctionAppIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppIdentityObservation. +func (in *WindowsFunctionAppIdentityObservation) DeepCopy() *WindowsFunctionAppIdentityObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppIdentityParameters) DeepCopyInto(out *WindowsFunctionAppIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppIdentityParameters. +func (in *WindowsFunctionAppIdentityParameters) DeepCopy() *WindowsFunctionAppIdentityParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppInitParameters) DeepCopyInto(out *WindowsFunctionAppInitParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsFunctionAppAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsFunctionAppAuthSettingsV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsFunctionAppBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsFunctionAppConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsFunctionAppIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.ServicePlanIDRef != nil { + in, out := &in.ServicePlanIDRef, &out.ServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanIDSelector != nil { + in, out := &in.ServicePlanIDSelector, &out.ServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsFunctionAppSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(WindowsFunctionAppStickySettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsFunctionAppStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppInitParameters. +func (in *WindowsFunctionAppInitParameters) DeepCopy() *WindowsFunctionAppInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppList) DeepCopyInto(out *WindowsFunctionAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WindowsFunctionApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppList. +func (in *WindowsFunctionAppList) DeepCopy() *WindowsFunctionAppList { + if in == nil { + return nil + } + out := new(WindowsFunctionAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsFunctionAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppObservation) DeepCopyInto(out *WindowsFunctionAppObservation) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsFunctionAppAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsFunctionAppAuthSettingsV2Observation) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsFunctionAppBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsFunctionAppConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.HostingEnvironmentID != nil { + in, out := &in.HostingEnvironmentID, &out.HostingEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsFunctionAppIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.OutboundIPAddressList != nil { + in, out := &in.OutboundIPAddressList, &out.OutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddressList != nil { + in, out := &in.PossibleOutboundIPAddressList, &out.PossibleOutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsFunctionAppSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(WindowsFunctionAppStickySettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsFunctionAppStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppObservation. +func (in *WindowsFunctionAppObservation) DeepCopy() *WindowsFunctionAppObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppParameters) DeepCopyInto(out *WindowsFunctionAppParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsFunctionAppAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsFunctionAppAuthSettingsV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsFunctionAppBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsFunctionAppConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsFunctionAppIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.ServicePlanIDRef != nil { + in, out := &in.ServicePlanIDRef, &out.ServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanIDSelector != nil { + in, out := &in.ServicePlanIDSelector, &out.ServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsFunctionAppSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(WindowsFunctionAppStickySettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsFunctionAppStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppParameters. +func (in *WindowsFunctionAppParameters) DeepCopy() *WindowsFunctionAppParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigAppServiceLogsInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigAppServiceLogsInitParameters) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigAppServiceLogsInitParameters. +func (in *WindowsFunctionAppSiteConfigAppServiceLogsInitParameters) DeepCopy() *WindowsFunctionAppSiteConfigAppServiceLogsInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigAppServiceLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigAppServiceLogsObservation) DeepCopyInto(out *WindowsFunctionAppSiteConfigAppServiceLogsObservation) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigAppServiceLogsObservation. +func (in *WindowsFunctionAppSiteConfigAppServiceLogsObservation) DeepCopy() *WindowsFunctionAppSiteConfigAppServiceLogsObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigAppServiceLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigAppServiceLogsParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigAppServiceLogsParameters) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigAppServiceLogsParameters. +func (in *WindowsFunctionAppSiteConfigAppServiceLogsParameters) DeepCopy() *WindowsFunctionAppSiteConfigAppServiceLogsParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigAppServiceLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigApplicationStackInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigApplicationStackInitParameters) { + *out = *in + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigApplicationStackInitParameters. +func (in *WindowsFunctionAppSiteConfigApplicationStackInitParameters) DeepCopy() *WindowsFunctionAppSiteConfigApplicationStackInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigApplicationStackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigApplicationStackObservation) DeepCopyInto(out *WindowsFunctionAppSiteConfigApplicationStackObservation) { + *out = *in + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigApplicationStackObservation. +func (in *WindowsFunctionAppSiteConfigApplicationStackObservation) DeepCopy() *WindowsFunctionAppSiteConfigApplicationStackObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigApplicationStackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigApplicationStackParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigApplicationStackParameters) { + *out = *in + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigApplicationStackParameters. +func (in *WindowsFunctionAppSiteConfigApplicationStackParameters) DeepCopy() *WindowsFunctionAppSiteConfigApplicationStackParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigApplicationStackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigCorsInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigCorsInitParameters. +func (in *WindowsFunctionAppSiteConfigCorsInitParameters) DeepCopy() *WindowsFunctionAppSiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigCorsObservation) DeepCopyInto(out *WindowsFunctionAppSiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigCorsObservation. +func (in *WindowsFunctionAppSiteConfigCorsObservation) DeepCopy() *WindowsFunctionAppSiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigCorsParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigCorsParameters. +func (in *WindowsFunctionAppSiteConfigCorsParameters) DeepCopy() *WindowsFunctionAppSiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters. +func (in *WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters) DeepCopy() *WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation) DeepCopyInto(out *WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation. +func (in *WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation) DeepCopy() *WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters. +func (in *WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters) DeepCopy() *WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigIPRestrictionInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigIPRestrictionInitParameters. +func (in *WindowsFunctionAppSiteConfigIPRestrictionInitParameters) DeepCopy() *WindowsFunctionAppSiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigIPRestrictionObservation) DeepCopyInto(out *WindowsFunctionAppSiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigIPRestrictionObservation. +func (in *WindowsFunctionAppSiteConfigIPRestrictionObservation) DeepCopy() *WindowsFunctionAppSiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigIPRestrictionParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigIPRestrictionParameters. +func (in *WindowsFunctionAppSiteConfigIPRestrictionParameters) DeepCopy() *WindowsFunctionAppSiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigInitParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(WindowsFunctionAppSiteConfigAppServiceLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsFunctionAppSiteConfigApplicationStackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsFunctionAppSiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsFunctionAppSiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigInitParameters. +func (in *WindowsFunctionAppSiteConfigInitParameters) DeepCopy() *WindowsFunctionAppSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigObservation) DeepCopyInto(out *WindowsFunctionAppSiteConfigObservation) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(WindowsFunctionAppSiteConfigAppServiceLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsFunctionAppSiteConfigApplicationStackObservation) + (*in).DeepCopyInto(*out) + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsFunctionAppSiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DetailedErrorLoggingEnabled != nil { + in, out := &in.DetailedErrorLoggingEnabled, &out.DetailedErrorLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsFunctionAppSiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsFunctionAppSiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WindowsFxVersion != nil { + in, out := &in.WindowsFxVersion, &out.WindowsFxVersion + *out = new(string) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigObservation. +func (in *WindowsFunctionAppSiteConfigObservation) DeepCopy() *WindowsFunctionAppSiteConfigObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(WindowsFunctionAppSiteConfigAppServiceLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsightsConnectionStringSecretRef != nil { + in, out := &in.ApplicationInsightsConnectionStringSecretRef, &out.ApplicationInsightsConnectionStringSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ApplicationInsightsKeySecretRef != nil { + in, out := &in.ApplicationInsightsKeySecretRef, &out.ApplicationInsightsKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsFunctionAppSiteConfigApplicationStackParameters) + (*in).DeepCopyInto(*out) + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsFunctionAppSiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsFunctionAppSiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsFunctionAppSiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigParameters. +func (in *WindowsFunctionAppSiteConfigParameters) DeepCopy() *WindowsFunctionAppSiteConfigParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters) DeepCopy() *WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionObservation) DeepCopyInto(out *WindowsFunctionAppSiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigScmIPRestrictionObservation. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionObservation) DeepCopy() *WindowsFunctionAppSiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionParameters) DeepCopyInto(out *WindowsFunctionAppSiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteConfigScmIPRestrictionParameters. +func (in *WindowsFunctionAppSiteConfigScmIPRestrictionParameters) DeepCopy() *WindowsFunctionAppSiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteCredentialInitParameters) DeepCopyInto(out *WindowsFunctionAppSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteCredentialInitParameters. +func (in *WindowsFunctionAppSiteCredentialInitParameters) DeepCopy() *WindowsFunctionAppSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteCredentialObservation) DeepCopyInto(out *WindowsFunctionAppSiteCredentialObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteCredentialObservation. +func (in *WindowsFunctionAppSiteCredentialObservation) DeepCopy() *WindowsFunctionAppSiteCredentialObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSiteCredentialParameters) DeepCopyInto(out *WindowsFunctionAppSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSiteCredentialParameters. +func (in *WindowsFunctionAppSiteCredentialParameters) DeepCopy() *WindowsFunctionAppSiteCredentialParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlot) DeepCopyInto(out *WindowsFunctionAppSlot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlot. +func (in *WindowsFunctionAppSlot) DeepCopy() *WindowsFunctionAppSlot { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsFunctionAppSlot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation. +func (in *WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters. +func (in *WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsFacebookInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsFacebookInitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsFacebookInitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsFacebookObservation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsFacebookObservation. +func (in *WindowsFunctionAppSlotAuthSettingsFacebookObservation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsFacebookParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSecretRef != nil { + in, out := &in.AppSecretSecretRef, &out.AppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsFacebookParameters. +func (in *WindowsFunctionAppSlotAuthSettingsFacebookParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsGithubInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsGithubInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsGithubInitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsGithubInitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsGithubInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsGithubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsGithubObservation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsGithubObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsGithubObservation. +func (in *WindowsFunctionAppSlotAuthSettingsGithubObservation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsGithubObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsGithubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsGithubParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsGithubParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsGithubParameters. +func (in *WindowsFunctionAppSlotAuthSettingsGithubParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsGithubParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsGithubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsGoogleInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsGoogleInitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsGoogleInitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsGoogleObservation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsGoogleObservation. +func (in *WindowsFunctionAppSlotAuthSettingsGoogleObservation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsGoogleParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsGoogleParameters. +func (in *WindowsFunctionAppSlotAuthSettingsGoogleParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsFunctionAppSlotAuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsFunctionAppSlotAuthSettingsGithubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsFunctionAppSlotAuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsFunctionAppSlotAuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsInitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsInitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsMicrosoftObservation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsMicrosoftObservation. +func (in *WindowsFunctionAppSlotAuthSettingsMicrosoftObservation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsMicrosoftParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsMicrosoftParameters. +func (in *WindowsFunctionAppSlotAuthSettingsMicrosoftParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsObservation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsFunctionAppSlotAuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsFunctionAppSlotAuthSettingsGithubObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsFunctionAppSlotAuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsFunctionAppSlotAuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsFunctionAppSlotAuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsObservation. +func (in *WindowsFunctionAppSlotAuthSettingsObservation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsFunctionAppSlotAuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsFunctionAppSlotAuthSettingsGithubParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsFunctionAppSlotAuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsFunctionAppSlotAuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsFunctionAppSlotAuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsParameters. +func (in *WindowsFunctionAppSlotAuthSettingsParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsTwitterInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsTwitterInitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsTwitterInitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsTwitterObservation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsTwitterObservation. +func (in *WindowsFunctionAppSlotAuthSettingsTwitterObservation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsTwitterParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSecretRef != nil { + in, out := &in.ConsumerSecretSecretRef, &out.ConsumerSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsTwitterParameters. +func (in *WindowsFunctionAppSlotAuthSettingsTwitterParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation) { + *out = *in + if in.AuthorisationEndpoint != nil { + in, out := &in.AuthorisationEndpoint, &out.AuthorisationEndpoint + *out = new(string) + **out = **in + } + if in.CertificationURI != nil { + in, out := &in.CertificationURI, &out.CertificationURI + *out = new(string) + **out = **in + } + if in.ClientCredentialMethod != nil { + in, out := &in.ClientCredentialMethod, &out.ClientCredentialMethod + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.IssuerEndpoint != nil { + in, out := &in.IssuerEndpoint, &out.IssuerEndpoint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2InitParameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2LoginObservation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2LoginObservation) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2LoginObservation. +func (in *WindowsFunctionAppSlotAuthSettingsV2LoginObservation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2LoginObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2LoginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2LoginParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2LoginParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2LoginParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2LoginParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2LoginParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2LoginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2Observation) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsFunctionAppSlotAuthSettingsV2LoginObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2Parameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsFunctionAppSlotAuthSettingsV2LoginParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation. +func (in *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters) DeepCopyInto(out *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters. +func (in *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters) DeepCopy() *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotBackupInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotBackupInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsFunctionAppSlotBackupScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotBackupInitParameters. +func (in *WindowsFunctionAppSlotBackupInitParameters) DeepCopy() *WindowsFunctionAppSlotBackupInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotBackupObservation) DeepCopyInto(out *WindowsFunctionAppSlotBackupObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsFunctionAppSlotBackupScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotBackupObservation. +func (in *WindowsFunctionAppSlotBackupObservation) DeepCopy() *WindowsFunctionAppSlotBackupObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotBackupParameters) DeepCopyInto(out *WindowsFunctionAppSlotBackupParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsFunctionAppSlotBackupScheduleParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountURLSecretRef = in.StorageAccountURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotBackupParameters. +func (in *WindowsFunctionAppSlotBackupParameters) DeepCopy() *WindowsFunctionAppSlotBackupParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotBackupScheduleInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotBackupScheduleInitParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotBackupScheduleInitParameters. +func (in *WindowsFunctionAppSlotBackupScheduleInitParameters) DeepCopy() *WindowsFunctionAppSlotBackupScheduleInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotBackupScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotBackupScheduleObservation) DeepCopyInto(out *WindowsFunctionAppSlotBackupScheduleObservation) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.LastExecutionTime != nil { + in, out := &in.LastExecutionTime, &out.LastExecutionTime + *out = new(string) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotBackupScheduleObservation. +func (in *WindowsFunctionAppSlotBackupScheduleObservation) DeepCopy() *WindowsFunctionAppSlotBackupScheduleObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotBackupScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotBackupScheduleParameters) DeepCopyInto(out *WindowsFunctionAppSlotBackupScheduleParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotBackupScheduleParameters. +func (in *WindowsFunctionAppSlotBackupScheduleParameters) DeepCopy() *WindowsFunctionAppSlotBackupScheduleParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotBackupScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotConnectionStringInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotConnectionStringInitParameters. +func (in *WindowsFunctionAppSlotConnectionStringInitParameters) DeepCopy() *WindowsFunctionAppSlotConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotConnectionStringObservation) DeepCopyInto(out *WindowsFunctionAppSlotConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotConnectionStringObservation. +func (in *WindowsFunctionAppSlotConnectionStringObservation) DeepCopy() *WindowsFunctionAppSlotConnectionStringObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotConnectionStringParameters) DeepCopyInto(out *WindowsFunctionAppSlotConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotConnectionStringParameters. +func (in *WindowsFunctionAppSlotConnectionStringParameters) DeepCopy() *WindowsFunctionAppSlotConnectionStringParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotIdentityInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotIdentityInitParameters. +func (in *WindowsFunctionAppSlotIdentityInitParameters) DeepCopy() *WindowsFunctionAppSlotIdentityInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotIdentityObservation) DeepCopyInto(out *WindowsFunctionAppSlotIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotIdentityObservation. +func (in *WindowsFunctionAppSlotIdentityObservation) DeepCopy() *WindowsFunctionAppSlotIdentityObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotIdentityParameters) DeepCopyInto(out *WindowsFunctionAppSlotIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotIdentityParameters. +func (in *WindowsFunctionAppSlotIdentityParameters) DeepCopy() *WindowsFunctionAppSlotIdentityParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotInitParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsFunctionAppSlotAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsFunctionAppSlotBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsFunctionAppSlotConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsFunctionAppSlotIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsFunctionAppSlotSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsFunctionAppSlotStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotInitParameters. +func (in *WindowsFunctionAppSlotInitParameters) DeepCopy() *WindowsFunctionAppSlotInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotList) DeepCopyInto(out *WindowsFunctionAppSlotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WindowsFunctionAppSlot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotList. +func (in *WindowsFunctionAppSlotList) DeepCopy() *WindowsFunctionAppSlotList { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsFunctionAppSlotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotObservation) DeepCopyInto(out *WindowsFunctionAppSlotObservation) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsFunctionAppSlotAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2Observation) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsFunctionAppSlotBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsFunctionAppSlotConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionAppID != nil { + in, out := &in.FunctionAppID, &out.FunctionAppID + *out = new(string) + **out = **in + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.HostingEnvironmentID != nil { + in, out := &in.HostingEnvironmentID, &out.HostingEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsFunctionAppSlotIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.OutboundIPAddressList != nil { + in, out := &in.OutboundIPAddressList, &out.OutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddressList != nil { + in, out := &in.PossibleOutboundIPAddressList, &out.PossibleOutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsFunctionAppSlotSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsFunctionAppSlotStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotObservation. +func (in *WindowsFunctionAppSlotObservation) DeepCopy() *WindowsFunctionAppSlotObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotParameters) DeepCopyInto(out *WindowsFunctionAppSlotParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsFunctionAppSlotAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsFunctionAppSlotAuthSettingsV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsFunctionAppSlotBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.BuiltinLoggingEnabled != nil { + in, out := &in.BuiltinLoggingEnabled, &out.BuiltinLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsFunctionAppSlotConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContentShareForceDisabled != nil { + in, out := &in.ContentShareForceDisabled, &out.ContentShareForceDisabled + *out = new(bool) + **out = **in + } + if in.DailyMemoryTimeQuota != nil { + in, out := &in.DailyMemoryTimeQuota, &out.DailyMemoryTimeQuota + *out = new(float64) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.FunctionAppID != nil { + in, out := &in.FunctionAppID, &out.FunctionAppID + *out = new(string) + **out = **in + } + if in.FunctionAppIDRef != nil { + in, out := &in.FunctionAppIDRef, &out.FunctionAppIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.FunctionAppIDSelector != nil { + in, out := &in.FunctionAppIDSelector, &out.FunctionAppIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.FunctionsExtensionVersion != nil { + in, out := &in.FunctionsExtensionVersion, &out.FunctionsExtensionVersion + *out = new(string) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsFunctionAppSlotIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsFunctionAppSlotSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsFunctionAppSlotStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageAccountAccessKeySecretRef != nil { + in, out := &in.StorageAccountAccessKeySecretRef, &out.StorageAccountAccessKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.StorageAccountName != nil { + in, out := &in.StorageAccountName, &out.StorageAccountName + *out = new(string) + **out = **in + } + if in.StorageAccountNameRef != nil { + in, out := &in.StorageAccountNameRef, &out.StorageAccountNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.StorageAccountNameSelector != nil { + in, out := &in.StorageAccountNameSelector, &out.StorageAccountNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.StorageKeyVaultSecretID != nil { + in, out := &in.StorageKeyVaultSecretID, &out.StorageKeyVaultSecretID + *out = new(string) + **out = **in + } + if in.StorageUsesManagedIdentity != nil { + in, out := &in.StorageUsesManagedIdentity, &out.StorageUsesManagedIdentity + *out = new(bool) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotParameters. +func (in *WindowsFunctionAppSlotParameters) DeepCopy() *WindowsFunctionAppSlotParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters. +func (in *WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation. +func (in *WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation) DeepCopy() *WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters) { + *out = *in + if in.DiskQuotaMb != nil { + in, out := &in.DiskQuotaMb, &out.DiskQuotaMb + *out = new(float64) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters. +func (in *WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters) { + *out = *in + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters. +func (in *WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigApplicationStackObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigApplicationStackObservation) { + *out = *in + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigApplicationStackObservation. +func (in *WindowsFunctionAppSlotSiteConfigApplicationStackObservation) DeepCopy() *WindowsFunctionAppSlotSiteConfigApplicationStackObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigApplicationStackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigApplicationStackParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigApplicationStackParameters) { + *out = *in + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PowershellCoreVersion != nil { + in, out := &in.PowershellCoreVersion, &out.PowershellCoreVersion + *out = new(string) + **out = **in + } + if in.UseCustomRuntime != nil { + in, out := &in.UseCustomRuntime, &out.UseCustomRuntime + *out = new(bool) + **out = **in + } + if in.UseDotnetIsolatedRuntime != nil { + in, out := &in.UseDotnetIsolatedRuntime, &out.UseDotnetIsolatedRuntime + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigApplicationStackParameters. +func (in *WindowsFunctionAppSlotSiteConfigApplicationStackParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigApplicationStackParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigApplicationStackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigCorsInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigCorsInitParameters. +func (in *WindowsFunctionAppSlotSiteConfigCorsInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigCorsObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigCorsObservation. +func (in *WindowsFunctionAppSlotSiteConfigCorsObservation) DeepCopy() *WindowsFunctionAppSlotSiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigCorsParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigCorsParameters. +func (in *WindowsFunctionAppSlotSiteConfigCorsParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation) DeepCopy() *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigIPRestrictionObservation. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionObservation) DeepCopy() *WindowsFunctionAppSlotSiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigIPRestrictionParameters. +func (in *WindowsFunctionAppSlotSiteConfigIPRestrictionParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigInitParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsFunctionAppSlotSiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigInitParameters. +func (in *WindowsFunctionAppSlotSiteConfigInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigObservation) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsFunctionAppSlotSiteConfigApplicationStackObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsFunctionAppSlotSiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DetailedErrorLoggingEnabled != nil { + in, out := &in.DetailedErrorLoggingEnabled, &out.DetailedErrorLoggingEnabled + *out = new(bool) + **out = **in + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsFunctionAppSlotSiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WindowsFxVersion != nil { + in, out := &in.WindowsFxVersion, &out.WindowsFxVersion + *out = new(string) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigObservation. +func (in *WindowsFunctionAppSlotSiteConfigObservation) DeepCopy() *WindowsFunctionAppSlotSiteConfigObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.AppScaleLimit != nil { + in, out := &in.AppScaleLimit, &out.AppScaleLimit + *out = new(float64) + **out = **in + } + if in.AppServiceLogs != nil { + in, out := &in.AppServiceLogs, &out.AppServiceLogs + *out = new(WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.ApplicationInsightsConnectionStringSecretRef != nil { + in, out := &in.ApplicationInsightsConnectionStringSecretRef, &out.ApplicationInsightsConnectionStringSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ApplicationInsightsKeySecretRef != nil { + in, out := &in.ApplicationInsightsKeySecretRef, &out.ApplicationInsightsKeySecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsFunctionAppSlotSiteConfigApplicationStackParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsFunctionAppSlotSiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ElasticInstanceMinimum != nil { + in, out := &in.ElasticInstanceMinimum, &out.ElasticInstanceMinimum + *out = new(float64) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsFunctionAppSlotSiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.PreWarmedInstanceCount != nil { + in, out := &in.PreWarmedInstanceCount, &out.PreWarmedInstanceCount + *out = new(float64) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.RuntimeScaleMonitoringEnabled != nil { + in, out := &in.RuntimeScaleMonitoringEnabled, &out.RuntimeScaleMonitoringEnabled + *out = new(bool) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigParameters. +func (in *WindowsFunctionAppSlotSiteConfigParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation) DeepCopy() *WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters. +func (in *WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters) DeepCopy() *WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteCredentialInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteCredentialInitParameters. +func (in *WindowsFunctionAppSlotSiteCredentialInitParameters) DeepCopy() *WindowsFunctionAppSlotSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteCredentialObservation) DeepCopyInto(out *WindowsFunctionAppSlotSiteCredentialObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteCredentialObservation. +func (in *WindowsFunctionAppSlotSiteCredentialObservation) DeepCopy() *WindowsFunctionAppSlotSiteCredentialObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSiteCredentialParameters) DeepCopyInto(out *WindowsFunctionAppSlotSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSiteCredentialParameters. +func (in *WindowsFunctionAppSlotSiteCredentialParameters) DeepCopy() *WindowsFunctionAppSlotSiteCredentialParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotSpec) DeepCopyInto(out *WindowsFunctionAppSlotSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotSpec. +func (in *WindowsFunctionAppSlotSpec) DeepCopy() *WindowsFunctionAppSlotSpec { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotStatus) DeepCopyInto(out *WindowsFunctionAppSlotStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotStatus. +func (in *WindowsFunctionAppSlotStatus) DeepCopy() *WindowsFunctionAppSlotStatus { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotStorageAccountInitParameters) DeepCopyInto(out *WindowsFunctionAppSlotStorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotStorageAccountInitParameters. +func (in *WindowsFunctionAppSlotStorageAccountInitParameters) DeepCopy() *WindowsFunctionAppSlotStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotStorageAccountObservation) DeepCopyInto(out *WindowsFunctionAppSlotStorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotStorageAccountObservation. +func (in *WindowsFunctionAppSlotStorageAccountObservation) DeepCopy() *WindowsFunctionAppSlotStorageAccountObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSlotStorageAccountParameters) DeepCopyInto(out *WindowsFunctionAppSlotStorageAccountParameters) { + *out = *in + out.AccessKeySecretRef = in.AccessKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSlotStorageAccountParameters. +func (in *WindowsFunctionAppSlotStorageAccountParameters) DeepCopy() *WindowsFunctionAppSlotStorageAccountParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSlotStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppSpec) DeepCopyInto(out *WindowsFunctionAppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppSpec. +func (in *WindowsFunctionAppSpec) DeepCopy() *WindowsFunctionAppSpec { + if in == nil { + return nil + } + out := new(WindowsFunctionAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppStatus) DeepCopyInto(out *WindowsFunctionAppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppStatus. +func (in *WindowsFunctionAppStatus) DeepCopy() *WindowsFunctionAppStatus { + if in == nil { + return nil + } + out := new(WindowsFunctionAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppStickySettingsInitParameters) DeepCopyInto(out *WindowsFunctionAppStickySettingsInitParameters) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppStickySettingsInitParameters. +func (in *WindowsFunctionAppStickySettingsInitParameters) DeepCopy() *WindowsFunctionAppStickySettingsInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppStickySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppStickySettingsObservation) DeepCopyInto(out *WindowsFunctionAppStickySettingsObservation) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppStickySettingsObservation. +func (in *WindowsFunctionAppStickySettingsObservation) DeepCopy() *WindowsFunctionAppStickySettingsObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppStickySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppStickySettingsParameters) DeepCopyInto(out *WindowsFunctionAppStickySettingsParameters) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppStickySettingsParameters. +func (in *WindowsFunctionAppStickySettingsParameters) DeepCopy() *WindowsFunctionAppStickySettingsParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppStickySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppStorageAccountInitParameters) DeepCopyInto(out *WindowsFunctionAppStorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppStorageAccountInitParameters. +func (in *WindowsFunctionAppStorageAccountInitParameters) DeepCopy() *WindowsFunctionAppStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppStorageAccountObservation) DeepCopyInto(out *WindowsFunctionAppStorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppStorageAccountObservation. +func (in *WindowsFunctionAppStorageAccountObservation) DeepCopy() *WindowsFunctionAppStorageAccountObservation { + if in == nil { + return nil + } + out := new(WindowsFunctionAppStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsFunctionAppStorageAccountParameters) DeepCopyInto(out *WindowsFunctionAppStorageAccountParameters) { + *out = *in + out.AccessKeySecretRef = in.AccessKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsFunctionAppStorageAccountParameters. +func (in *WindowsFunctionAppStorageAccountParameters) DeepCopy() *WindowsFunctionAppStorageAccountParameters { + if in == nil { + return nil + } + out := new(WindowsFunctionAppStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebApp) DeepCopyInto(out *WindowsWebApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebApp. +func (in *WindowsWebApp) DeepCopy() *WindowsWebApp { + if in == nil { + return nil + } + out := new(WindowsWebApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsWebApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsActiveDirectoryInitParameters. +func (in *WindowsWebAppAuthSettingsActiveDirectoryInitParameters) DeepCopy() *WindowsWebAppAuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsActiveDirectoryObservation) DeepCopyInto(out *WindowsWebAppAuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsActiveDirectoryObservation. +func (in *WindowsWebAppAuthSettingsActiveDirectoryObservation) DeepCopy() *WindowsWebAppAuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsActiveDirectoryParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsActiveDirectoryParameters. +func (in *WindowsWebAppAuthSettingsActiveDirectoryParameters) DeepCopy() *WindowsWebAppAuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsFacebookInitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsFacebookInitParameters. +func (in *WindowsWebAppAuthSettingsFacebookInitParameters) DeepCopy() *WindowsWebAppAuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsFacebookObservation) DeepCopyInto(out *WindowsWebAppAuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsFacebookObservation. +func (in *WindowsWebAppAuthSettingsFacebookObservation) DeepCopy() *WindowsWebAppAuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsFacebookParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSecretRef != nil { + in, out := &in.AppSecretSecretRef, &out.AppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsFacebookParameters. +func (in *WindowsWebAppAuthSettingsFacebookParameters) DeepCopy() *WindowsWebAppAuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsGithubInitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsGithubInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsGithubInitParameters. +func (in *WindowsWebAppAuthSettingsGithubInitParameters) DeepCopy() *WindowsWebAppAuthSettingsGithubInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsGithubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsGithubObservation) DeepCopyInto(out *WindowsWebAppAuthSettingsGithubObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsGithubObservation. +func (in *WindowsWebAppAuthSettingsGithubObservation) DeepCopy() *WindowsWebAppAuthSettingsGithubObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsGithubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsGithubParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsGithubParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsGithubParameters. +func (in *WindowsWebAppAuthSettingsGithubParameters) DeepCopy() *WindowsWebAppAuthSettingsGithubParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsGithubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsGoogleInitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsGoogleInitParameters. +func (in *WindowsWebAppAuthSettingsGoogleInitParameters) DeepCopy() *WindowsWebAppAuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsGoogleObservation) DeepCopyInto(out *WindowsWebAppAuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsGoogleObservation. +func (in *WindowsWebAppAuthSettingsGoogleObservation) DeepCopy() *WindowsWebAppAuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsGoogleParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsGoogleParameters. +func (in *WindowsWebAppAuthSettingsGoogleParameters) DeepCopy() *WindowsWebAppAuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsInitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsWebAppAuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsWebAppAuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsWebAppAuthSettingsGithubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsWebAppAuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsWebAppAuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsWebAppAuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsInitParameters. +func (in *WindowsWebAppAuthSettingsInitParameters) DeepCopy() *WindowsWebAppAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsMicrosoftInitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsMicrosoftInitParameters. +func (in *WindowsWebAppAuthSettingsMicrosoftInitParameters) DeepCopy() *WindowsWebAppAuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsMicrosoftObservation) DeepCopyInto(out *WindowsWebAppAuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsMicrosoftObservation. +func (in *WindowsWebAppAuthSettingsMicrosoftObservation) DeepCopy() *WindowsWebAppAuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsMicrosoftParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsMicrosoftParameters. +func (in *WindowsWebAppAuthSettingsMicrosoftParameters) DeepCopy() *WindowsWebAppAuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsObservation) DeepCopyInto(out *WindowsWebAppAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsWebAppAuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsWebAppAuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsWebAppAuthSettingsGithubObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsWebAppAuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsWebAppAuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsWebAppAuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsObservation. +func (in *WindowsWebAppAuthSettingsObservation) DeepCopy() *WindowsWebAppAuthSettingsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsWebAppAuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsWebAppAuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsWebAppAuthSettingsGithubParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsWebAppAuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsWebAppAuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsWebAppAuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsParameters. +func (in *WindowsWebAppAuthSettingsParameters) DeepCopy() *WindowsWebAppAuthSettingsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsTwitterInitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsTwitterInitParameters. +func (in *WindowsWebAppAuthSettingsTwitterInitParameters) DeepCopy() *WindowsWebAppAuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsTwitterObservation) DeepCopyInto(out *WindowsWebAppAuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsTwitterObservation. +func (in *WindowsWebAppAuthSettingsTwitterObservation) DeepCopy() *WindowsWebAppAuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsTwitterParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSecretRef != nil { + in, out := &in.ConsumerSecretSecretRef, &out.ConsumerSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsTwitterParameters. +func (in *WindowsWebAppAuthSettingsTwitterParameters) DeepCopy() *WindowsWebAppAuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation. +func (in *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters. +func (in *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2AppleV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2AppleV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2AppleV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2AppleV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2AppleV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2AppleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2AppleV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2AppleV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2AppleV2Observation. +func (in *WindowsWebAppAuthSettingsV2AppleV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2AppleV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2AppleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2AppleV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2AppleV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2AppleV2Parameters. +func (in *WindowsWebAppAuthSettingsV2AppleV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2AppleV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2AppleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation. +func (in *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters. +func (in *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2CustomOidcV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2CustomOidcV2Observation) { + *out = *in + if in.AuthorisationEndpoint != nil { + in, out := &in.AuthorisationEndpoint, &out.AuthorisationEndpoint + *out = new(string) + **out = **in + } + if in.CertificationURI != nil { + in, out := &in.CertificationURI, &out.CertificationURI + *out = new(string) + **out = **in + } + if in.ClientCredentialMethod != nil { + in, out := &in.ClientCredentialMethod, &out.ClientCredentialMethod + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.IssuerEndpoint != nil { + in, out := &in.IssuerEndpoint, &out.IssuerEndpoint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2CustomOidcV2Observation. +func (in *WindowsWebAppAuthSettingsV2CustomOidcV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2CustomOidcV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2CustomOidcV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2CustomOidcV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2CustomOidcV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2CustomOidcV2Parameters. +func (in *WindowsWebAppAuthSettingsV2CustomOidcV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2CustomOidcV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2CustomOidcV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2FacebookV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2FacebookV2InitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2FacebookV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2FacebookV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2FacebookV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2FacebookV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2FacebookV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2FacebookV2Observation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2FacebookV2Observation. +func (in *WindowsWebAppAuthSettingsV2FacebookV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2FacebookV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2FacebookV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2FacebookV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2FacebookV2Parameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2FacebookV2Parameters. +func (in *WindowsWebAppAuthSettingsV2FacebookV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2FacebookV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2FacebookV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2GithubV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2GithubV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2GithubV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2GithubV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2GithubV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2GithubV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2GithubV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2GithubV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2GithubV2Observation. +func (in *WindowsWebAppAuthSettingsV2GithubV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2GithubV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2GithubV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2GithubV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2GithubV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2GithubV2Parameters. +func (in *WindowsWebAppAuthSettingsV2GithubV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2GithubV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2GithubV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2GoogleV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2GoogleV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2GoogleV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2GoogleV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2GoogleV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2GoogleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2GoogleV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2GoogleV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2GoogleV2Observation. +func (in *WindowsWebAppAuthSettingsV2GoogleV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2GoogleV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2GoogleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2GoogleV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2GoogleV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2GoogleV2Parameters. +func (in *WindowsWebAppAuthSettingsV2GoogleV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2GoogleV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2GoogleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2InitParameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsWebAppAuthSettingsV2AppleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsWebAppAuthSettingsV2FacebookV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsWebAppAuthSettingsV2GithubV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsWebAppAuthSettingsV2GoogleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsWebAppAuthSettingsV2LoginInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsWebAppAuthSettingsV2TwitterV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2LoginInitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2LoginInitParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2LoginInitParameters. +func (in *WindowsWebAppAuthSettingsV2LoginInitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2LoginInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2LoginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2LoginObservation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2LoginObservation) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2LoginObservation. +func (in *WindowsWebAppAuthSettingsV2LoginObservation) DeepCopy() *WindowsWebAppAuthSettingsV2LoginObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2LoginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2LoginParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2LoginParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2LoginParameters. +func (in *WindowsWebAppAuthSettingsV2LoginParameters) DeepCopy() *WindowsWebAppAuthSettingsV2LoginParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2LoginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2MicrosoftV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2MicrosoftV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2MicrosoftV2Observation. +func (in *WindowsWebAppAuthSettingsV2MicrosoftV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2MicrosoftV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2MicrosoftV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2MicrosoftV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2MicrosoftV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2MicrosoftV2Parameters. +func (in *WindowsWebAppAuthSettingsV2MicrosoftV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2MicrosoftV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2MicrosoftV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2Observation) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsWebAppAuthSettingsV2AppleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsWebAppAuthSettingsV2CustomOidcV2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsWebAppAuthSettingsV2FacebookV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsWebAppAuthSettingsV2GithubV2Observation) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsWebAppAuthSettingsV2GoogleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsWebAppAuthSettingsV2LoginObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsWebAppAuthSettingsV2MicrosoftV2Observation) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsWebAppAuthSettingsV2TwitterV2Observation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2Observation. +func (in *WindowsWebAppAuthSettingsV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2Parameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsWebAppAuthSettingsV2AppleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsWebAppAuthSettingsV2CustomOidcV2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsWebAppAuthSettingsV2FacebookV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsWebAppAuthSettingsV2GithubV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsWebAppAuthSettingsV2GoogleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsWebAppAuthSettingsV2LoginParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsWebAppAuthSettingsV2MicrosoftV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsWebAppAuthSettingsV2TwitterV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2Parameters. +func (in *WindowsWebAppAuthSettingsV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2TwitterV2InitParameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2TwitterV2InitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2TwitterV2InitParameters. +func (in *WindowsWebAppAuthSettingsV2TwitterV2InitParameters) DeepCopy() *WindowsWebAppAuthSettingsV2TwitterV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2TwitterV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2TwitterV2Observation) DeepCopyInto(out *WindowsWebAppAuthSettingsV2TwitterV2Observation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2TwitterV2Observation. +func (in *WindowsWebAppAuthSettingsV2TwitterV2Observation) DeepCopy() *WindowsWebAppAuthSettingsV2TwitterV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2TwitterV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppAuthSettingsV2TwitterV2Parameters) DeepCopyInto(out *WindowsWebAppAuthSettingsV2TwitterV2Parameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppAuthSettingsV2TwitterV2Parameters. +func (in *WindowsWebAppAuthSettingsV2TwitterV2Parameters) DeepCopy() *WindowsWebAppAuthSettingsV2TwitterV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppAuthSettingsV2TwitterV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppBackupInitParameters) DeepCopyInto(out *WindowsWebAppBackupInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsWebAppBackupScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppBackupInitParameters. +func (in *WindowsWebAppBackupInitParameters) DeepCopy() *WindowsWebAppBackupInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppBackupObservation) DeepCopyInto(out *WindowsWebAppBackupObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsWebAppBackupScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppBackupObservation. +func (in *WindowsWebAppBackupObservation) DeepCopy() *WindowsWebAppBackupObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppBackupParameters) DeepCopyInto(out *WindowsWebAppBackupParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsWebAppBackupScheduleParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountURLSecretRef = in.StorageAccountURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppBackupParameters. +func (in *WindowsWebAppBackupParameters) DeepCopy() *WindowsWebAppBackupParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppBackupScheduleInitParameters) DeepCopyInto(out *WindowsWebAppBackupScheduleInitParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppBackupScheduleInitParameters. +func (in *WindowsWebAppBackupScheduleInitParameters) DeepCopy() *WindowsWebAppBackupScheduleInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppBackupScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppBackupScheduleObservation) DeepCopyInto(out *WindowsWebAppBackupScheduleObservation) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.LastExecutionTime != nil { + in, out := &in.LastExecutionTime, &out.LastExecutionTime + *out = new(string) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppBackupScheduleObservation. +func (in *WindowsWebAppBackupScheduleObservation) DeepCopy() *WindowsWebAppBackupScheduleObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppBackupScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppBackupScheduleParameters) DeepCopyInto(out *WindowsWebAppBackupScheduleParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppBackupScheduleParameters. +func (in *WindowsWebAppBackupScheduleParameters) DeepCopy() *WindowsWebAppBackupScheduleParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppBackupScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppConnectionStringInitParameters) DeepCopyInto(out *WindowsWebAppConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppConnectionStringInitParameters. +func (in *WindowsWebAppConnectionStringInitParameters) DeepCopy() *WindowsWebAppConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppConnectionStringObservation) DeepCopyInto(out *WindowsWebAppConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppConnectionStringObservation. +func (in *WindowsWebAppConnectionStringObservation) DeepCopy() *WindowsWebAppConnectionStringObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppConnectionStringParameters) DeepCopyInto(out *WindowsWebAppConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppConnectionStringParameters. +func (in *WindowsWebAppConnectionStringParameters) DeepCopy() *WindowsWebAppConnectionStringParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppIdentityInitParameters) DeepCopyInto(out *WindowsWebAppIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppIdentityInitParameters. +func (in *WindowsWebAppIdentityInitParameters) DeepCopy() *WindowsWebAppIdentityInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppIdentityObservation) DeepCopyInto(out *WindowsWebAppIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppIdentityObservation. +func (in *WindowsWebAppIdentityObservation) DeepCopy() *WindowsWebAppIdentityObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppIdentityParameters) DeepCopyInto(out *WindowsWebAppIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppIdentityParameters. +func (in *WindowsWebAppIdentityParameters) DeepCopy() *WindowsWebAppIdentityParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppInitParameters) DeepCopyInto(out *WindowsWebAppInitParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsWebAppAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsWebAppAuthSettingsV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsWebAppBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsWebAppConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsWebAppIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(WindowsWebAppLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.ServicePlanIDRef != nil { + in, out := &in.ServicePlanIDRef, &out.ServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanIDSelector != nil { + in, out := &in.ServicePlanIDSelector, &out.ServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsWebAppSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(WindowsWebAppStickySettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsWebAppStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppInitParameters. +func (in *WindowsWebAppInitParameters) DeepCopy() *WindowsWebAppInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppList) DeepCopyInto(out *WindowsWebAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WindowsWebApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppList. +func (in *WindowsWebAppList) DeepCopy() *WindowsWebAppList { + if in == nil { + return nil + } + out := new(WindowsWebAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsWebAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsApplicationLogsInitParameters) DeepCopyInto(out *WindowsWebAppLogsApplicationLogsInitParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(LogsApplicationLogsAzureBlobStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsApplicationLogsInitParameters. +func (in *WindowsWebAppLogsApplicationLogsInitParameters) DeepCopy() *WindowsWebAppLogsApplicationLogsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsApplicationLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsApplicationLogsObservation) DeepCopyInto(out *WindowsWebAppLogsApplicationLogsObservation) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(LogsApplicationLogsAzureBlobStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsApplicationLogsObservation. +func (in *WindowsWebAppLogsApplicationLogsObservation) DeepCopy() *WindowsWebAppLogsApplicationLogsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsApplicationLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsApplicationLogsParameters) DeepCopyInto(out *WindowsWebAppLogsApplicationLogsParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(LogsApplicationLogsAzureBlobStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsApplicationLogsParameters. +func (in *WindowsWebAppLogsApplicationLogsParameters) DeepCopy() *WindowsWebAppLogsApplicationLogsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsApplicationLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters) DeepCopyInto(out *WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters. +func (in *WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters) DeepCopy() *WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation) DeepCopyInto(out *WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation. +func (in *WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation) DeepCopy() *WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters) DeepCopyInto(out *WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + out.SASURLSecretRef = in.SASURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters. +func (in *WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters) DeepCopy() *WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsHTTPLogsInitParameters) DeepCopyInto(out *WindowsWebAppLogsHTTPLogsInitParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(LogsHTTPLogsFileSystemInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsHTTPLogsInitParameters. +func (in *WindowsWebAppLogsHTTPLogsInitParameters) DeepCopy() *WindowsWebAppLogsHTTPLogsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsHTTPLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsHTTPLogsObservation) DeepCopyInto(out *WindowsWebAppLogsHTTPLogsObservation) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(LogsHTTPLogsFileSystemObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsHTTPLogsObservation. +func (in *WindowsWebAppLogsHTTPLogsObservation) DeepCopy() *WindowsWebAppLogsHTTPLogsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsHTTPLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsHTTPLogsParameters) DeepCopyInto(out *WindowsWebAppLogsHTTPLogsParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(LogsHTTPLogsFileSystemParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsHTTPLogsParameters. +func (in *WindowsWebAppLogsHTTPLogsParameters) DeepCopy() *WindowsWebAppLogsHTTPLogsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsHTTPLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsInitParameters) DeepCopyInto(out *WindowsWebAppLogsInitParameters) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(WindowsWebAppLogsApplicationLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(WindowsWebAppLogsHTTPLogsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsInitParameters. +func (in *WindowsWebAppLogsInitParameters) DeepCopy() *WindowsWebAppLogsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsObservation) DeepCopyInto(out *WindowsWebAppLogsObservation) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(WindowsWebAppLogsApplicationLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(WindowsWebAppLogsHTTPLogsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsObservation. +func (in *WindowsWebAppLogsObservation) DeepCopy() *WindowsWebAppLogsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppLogsParameters) DeepCopyInto(out *WindowsWebAppLogsParameters) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(WindowsWebAppLogsApplicationLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(WindowsWebAppLogsHTTPLogsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppLogsParameters. +func (in *WindowsWebAppLogsParameters) DeepCopy() *WindowsWebAppLogsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppObservation) DeepCopyInto(out *WindowsWebAppObservation) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsWebAppAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsWebAppAuthSettingsV2Observation) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsWebAppBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsWebAppConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.HostingEnvironmentID != nil { + in, out := &in.HostingEnvironmentID, &out.HostingEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsWebAppIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(WindowsWebAppLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.OutboundIPAddressList != nil { + in, out := &in.OutboundIPAddressList, &out.OutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddressList != nil { + in, out := &in.PossibleOutboundIPAddressList, &out.PossibleOutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsWebAppSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(WindowsWebAppStickySettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsWebAppStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppObservation. +func (in *WindowsWebAppObservation) DeepCopy() *WindowsWebAppObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppParameters) DeepCopyInto(out *WindowsWebAppParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsWebAppAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsWebAppAuthSettingsV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsWebAppBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsWebAppConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsWebAppIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(WindowsWebAppLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ResourceGroupName != nil { + in, out := &in.ResourceGroupName, &out.ResourceGroupName + *out = new(string) + **out = **in + } + if in.ResourceGroupNameRef != nil { + in, out := &in.ResourceGroupNameRef, &out.ResourceGroupNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ResourceGroupNameSelector != nil { + in, out := &in.ResourceGroupNameSelector, &out.ResourceGroupNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.ServicePlanIDRef != nil { + in, out := &in.ServicePlanIDRef, &out.ServicePlanIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServicePlanIDSelector != nil { + in, out := &in.ServicePlanIDSelector, &out.ServicePlanIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsWebAppSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StickySettings != nil { + in, out := &in.StickySettings, &out.StickySettings + *out = new(WindowsWebAppStickySettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsWebAppStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppParameters. +func (in *WindowsWebAppParameters) DeepCopy() *WindowsWebAppParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigApplicationStackInitParameters) DeepCopyInto(out *WindowsWebAppSiteConfigApplicationStackInitParameters) { + *out = *in + if in.CurrentStack != nil { + in, out := &in.CurrentStack, &out.CurrentStack + *out = new(string) + **out = **in + } + if in.DockerContainerName != nil { + in, out := &in.DockerContainerName, &out.DockerContainerName + *out = new(string) + **out = **in + } + if in.DockerContainerRegistry != nil { + in, out := &in.DockerContainerRegistry, &out.DockerContainerRegistry + *out = new(string) + **out = **in + } + if in.DockerContainerTag != nil { + in, out := &in.DockerContainerTag, &out.DockerContainerTag + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetCoreVersion != nil { + in, out := &in.DotnetCoreVersion, &out.DotnetCoreVersion + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaContainer != nil { + in, out := &in.JavaContainer, &out.JavaContainer + *out = new(string) + **out = **in + } + if in.JavaContainerVersion != nil { + in, out := &in.JavaContainerVersion, &out.JavaContainerVersion + *out = new(string) + **out = **in + } + if in.JavaEmbeddedServerEnabled != nil { + in, out := &in.JavaEmbeddedServerEnabled, &out.JavaEmbeddedServerEnabled + *out = new(bool) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.Python != nil { + in, out := &in.Python, &out.Python + *out = new(bool) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.TomcatVersion != nil { + in, out := &in.TomcatVersion, &out.TomcatVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigApplicationStackInitParameters. +func (in *WindowsWebAppSiteConfigApplicationStackInitParameters) DeepCopy() *WindowsWebAppSiteConfigApplicationStackInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigApplicationStackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigApplicationStackObservation) DeepCopyInto(out *WindowsWebAppSiteConfigApplicationStackObservation) { + *out = *in + if in.CurrentStack != nil { + in, out := &in.CurrentStack, &out.CurrentStack + *out = new(string) + **out = **in + } + if in.DockerContainerName != nil { + in, out := &in.DockerContainerName, &out.DockerContainerName + *out = new(string) + **out = **in + } + if in.DockerContainerRegistry != nil { + in, out := &in.DockerContainerRegistry, &out.DockerContainerRegistry + *out = new(string) + **out = **in + } + if in.DockerContainerTag != nil { + in, out := &in.DockerContainerTag, &out.DockerContainerTag + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetCoreVersion != nil { + in, out := &in.DotnetCoreVersion, &out.DotnetCoreVersion + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaContainer != nil { + in, out := &in.JavaContainer, &out.JavaContainer + *out = new(string) + **out = **in + } + if in.JavaContainerVersion != nil { + in, out := &in.JavaContainerVersion, &out.JavaContainerVersion + *out = new(string) + **out = **in + } + if in.JavaEmbeddedServerEnabled != nil { + in, out := &in.JavaEmbeddedServerEnabled, &out.JavaEmbeddedServerEnabled + *out = new(bool) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.Python != nil { + in, out := &in.Python, &out.Python + *out = new(bool) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.TomcatVersion != nil { + in, out := &in.TomcatVersion, &out.TomcatVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigApplicationStackObservation. +func (in *WindowsWebAppSiteConfigApplicationStackObservation) DeepCopy() *WindowsWebAppSiteConfigApplicationStackObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigApplicationStackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigApplicationStackParameters) DeepCopyInto(out *WindowsWebAppSiteConfigApplicationStackParameters) { + *out = *in + if in.CurrentStack != nil { + in, out := &in.CurrentStack, &out.CurrentStack + *out = new(string) + **out = **in + } + if in.DockerContainerName != nil { + in, out := &in.DockerContainerName, &out.DockerContainerName + *out = new(string) + **out = **in + } + if in.DockerContainerRegistry != nil { + in, out := &in.DockerContainerRegistry, &out.DockerContainerRegistry + *out = new(string) + **out = **in + } + if in.DockerContainerTag != nil { + in, out := &in.DockerContainerTag, &out.DockerContainerTag + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerRegistryPasswordSecretRef != nil { + in, out := &in.DockerRegistryPasswordSecretRef, &out.DockerRegistryPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetCoreVersion != nil { + in, out := &in.DotnetCoreVersion, &out.DotnetCoreVersion + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaContainer != nil { + in, out := &in.JavaContainer, &out.JavaContainer + *out = new(string) + **out = **in + } + if in.JavaContainerVersion != nil { + in, out := &in.JavaContainerVersion, &out.JavaContainerVersion + *out = new(string) + **out = **in + } + if in.JavaEmbeddedServerEnabled != nil { + in, out := &in.JavaEmbeddedServerEnabled, &out.JavaEmbeddedServerEnabled + *out = new(bool) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.Python != nil { + in, out := &in.Python, &out.Python + *out = new(bool) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.TomcatVersion != nil { + in, out := &in.TomcatVersion, &out.TomcatVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigApplicationStackParameters. +func (in *WindowsWebAppSiteConfigApplicationStackParameters) DeepCopy() *WindowsWebAppSiteConfigApplicationStackParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigApplicationStackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigAutoHealSettingInitParameters) DeepCopyInto(out *WindowsWebAppSiteConfigAutoHealSettingInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SiteConfigAutoHealSettingActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(SiteConfigAutoHealSettingTriggerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigAutoHealSettingInitParameters. +func (in *WindowsWebAppSiteConfigAutoHealSettingInitParameters) DeepCopy() *WindowsWebAppSiteConfigAutoHealSettingInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigAutoHealSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigAutoHealSettingObservation) DeepCopyInto(out *WindowsWebAppSiteConfigAutoHealSettingObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SiteConfigAutoHealSettingActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(SiteConfigAutoHealSettingTriggerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigAutoHealSettingObservation. +func (in *WindowsWebAppSiteConfigAutoHealSettingObservation) DeepCopy() *WindowsWebAppSiteConfigAutoHealSettingObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigAutoHealSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigAutoHealSettingParameters) DeepCopyInto(out *WindowsWebAppSiteConfigAutoHealSettingParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(SiteConfigAutoHealSettingActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(SiteConfigAutoHealSettingTriggerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigAutoHealSettingParameters. +func (in *WindowsWebAppSiteConfigAutoHealSettingParameters) DeepCopy() *WindowsWebAppSiteConfigAutoHealSettingParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigAutoHealSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigCorsInitParameters) DeepCopyInto(out *WindowsWebAppSiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigCorsInitParameters. +func (in *WindowsWebAppSiteConfigCorsInitParameters) DeepCopy() *WindowsWebAppSiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigCorsObservation) DeepCopyInto(out *WindowsWebAppSiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigCorsObservation. +func (in *WindowsWebAppSiteConfigCorsObservation) DeepCopy() *WindowsWebAppSiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigCorsParameters) DeepCopyInto(out *WindowsWebAppSiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigCorsParameters. +func (in *WindowsWebAppSiteConfigCorsParameters) DeepCopy() *WindowsWebAppSiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters) DeepCopyInto(out *WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters. +func (in *WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters) DeepCopy() *WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigIPRestrictionHeadersObservation) DeepCopyInto(out *WindowsWebAppSiteConfigIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigIPRestrictionHeadersObservation. +func (in *WindowsWebAppSiteConfigIPRestrictionHeadersObservation) DeepCopy() *WindowsWebAppSiteConfigIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigIPRestrictionHeadersParameters) DeepCopyInto(out *WindowsWebAppSiteConfigIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigIPRestrictionHeadersParameters. +func (in *WindowsWebAppSiteConfigIPRestrictionHeadersParameters) DeepCopy() *WindowsWebAppSiteConfigIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigIPRestrictionInitParameters) DeepCopyInto(out *WindowsWebAppSiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigIPRestrictionInitParameters. +func (in *WindowsWebAppSiteConfigIPRestrictionInitParameters) DeepCopy() *WindowsWebAppSiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigIPRestrictionObservation) DeepCopyInto(out *WindowsWebAppSiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSiteConfigIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigIPRestrictionObservation. +func (in *WindowsWebAppSiteConfigIPRestrictionObservation) DeepCopy() *WindowsWebAppSiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigIPRestrictionParameters) DeepCopyInto(out *WindowsWebAppSiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSiteConfigIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigIPRestrictionParameters. +func (in *WindowsWebAppSiteConfigIPRestrictionParameters) DeepCopy() *WindowsWebAppSiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigInitParameters) DeepCopyInto(out *WindowsWebAppSiteConfigInitParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsWebAppSiteConfigApplicationStackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(WindowsWebAppSiteConfigAutoHealSettingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsWebAppSiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsWebAppSiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsWebAppSiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VirtualApplication != nil { + in, out := &in.VirtualApplication, &out.VirtualApplication + *out = make([]VirtualApplicationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigInitParameters. +func (in *WindowsWebAppSiteConfigInitParameters) DeepCopy() *WindowsWebAppSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigObservation) DeepCopyInto(out *WindowsWebAppSiteConfigObservation) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsWebAppSiteConfigApplicationStackObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(WindowsWebAppSiteConfigAutoHealSettingObservation) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsWebAppSiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DetailedErrorLoggingEnabled != nil { + in, out := &in.DetailedErrorLoggingEnabled, &out.DetailedErrorLoggingEnabled + *out = new(bool) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsWebAppSiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LinuxFxVersion != nil { + in, out := &in.LinuxFxVersion, &out.LinuxFxVersion + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsWebAppSiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VirtualApplication != nil { + in, out := &in.VirtualApplication, &out.VirtualApplication + *out = make([]VirtualApplicationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WindowsFxVersion != nil { + in, out := &in.WindowsFxVersion, &out.WindowsFxVersion + *out = new(string) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigObservation. +func (in *WindowsWebAppSiteConfigObservation) DeepCopy() *WindowsWebAppSiteConfigObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigParameters) DeepCopyInto(out *WindowsWebAppSiteConfigParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsWebAppSiteConfigApplicationStackParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(WindowsWebAppSiteConfigAutoHealSettingParameters) + (*in).DeepCopyInto(*out) + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsWebAppSiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsWebAppSiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsWebAppSiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VirtualApplication != nil { + in, out := &in.VirtualApplication, &out.VirtualApplication + *out = make([]VirtualApplicationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigParameters. +func (in *WindowsWebAppSiteConfigParameters) DeepCopy() *WindowsWebAppSiteConfigParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters. +func (in *WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation. +func (in *WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters. +func (in *WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *WindowsWebAppSiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigScmIPRestrictionInitParameters. +func (in *WindowsWebAppSiteConfigScmIPRestrictionInitParameters) DeepCopy() *WindowsWebAppSiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigScmIPRestrictionObservation) DeepCopyInto(out *WindowsWebAppSiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigScmIPRestrictionObservation. +func (in *WindowsWebAppSiteConfigScmIPRestrictionObservation) DeepCopy() *WindowsWebAppSiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteConfigScmIPRestrictionParameters) DeepCopyInto(out *WindowsWebAppSiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteConfigScmIPRestrictionParameters. +func (in *WindowsWebAppSiteConfigScmIPRestrictionParameters) DeepCopy() *WindowsWebAppSiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteCredentialInitParameters) DeepCopyInto(out *WindowsWebAppSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteCredentialInitParameters. +func (in *WindowsWebAppSiteCredentialInitParameters) DeepCopy() *WindowsWebAppSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteCredentialObservation) DeepCopyInto(out *WindowsWebAppSiteCredentialObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteCredentialObservation. +func (in *WindowsWebAppSiteCredentialObservation) DeepCopy() *WindowsWebAppSiteCredentialObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSiteCredentialParameters) DeepCopyInto(out *WindowsWebAppSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSiteCredentialParameters. +func (in *WindowsWebAppSiteCredentialParameters) DeepCopy() *WindowsWebAppSiteCredentialParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlot) DeepCopyInto(out *WindowsWebAppSlot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlot. +func (in *WindowsWebAppSlot) DeepCopy() *WindowsWebAppSlot { + if in == nil { + return nil + } + out := new(WindowsWebAppSlot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsWebAppSlot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters. +func (in *WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsActiveDirectoryObservation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsActiveDirectoryObservation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsActiveDirectoryObservation. +func (in *WindowsWebAppSlotAuthSettingsActiveDirectoryObservation) DeepCopy() *WindowsWebAppSlotAuthSettingsActiveDirectoryObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsActiveDirectoryObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsActiveDirectoryParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsActiveDirectoryParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsActiveDirectoryParameters. +func (in *WindowsWebAppSlotAuthSettingsActiveDirectoryParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsActiveDirectoryParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsActiveDirectoryParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsFacebookInitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsFacebookInitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsFacebookInitParameters. +func (in *WindowsWebAppSlotAuthSettingsFacebookInitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsFacebookInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsFacebookInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsFacebookObservation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsFacebookObservation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsFacebookObservation. +func (in *WindowsWebAppSlotAuthSettingsFacebookObservation) DeepCopy() *WindowsWebAppSlotAuthSettingsFacebookObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsFacebookObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsFacebookParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsFacebookParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSecretRef != nil { + in, out := &in.AppSecretSecretRef, &out.AppSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsFacebookParameters. +func (in *WindowsWebAppSlotAuthSettingsFacebookParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsFacebookParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsFacebookParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsGithubInitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsGithubInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsGithubInitParameters. +func (in *WindowsWebAppSlotAuthSettingsGithubInitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsGithubInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsGithubInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsGithubObservation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsGithubObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsGithubObservation. +func (in *WindowsWebAppSlotAuthSettingsGithubObservation) DeepCopy() *WindowsWebAppSlotAuthSettingsGithubObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsGithubObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsGithubParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsGithubParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsGithubParameters. +func (in *WindowsWebAppSlotAuthSettingsGithubParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsGithubParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsGithubParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsGoogleInitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsGoogleInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsGoogleInitParameters. +func (in *WindowsWebAppSlotAuthSettingsGoogleInitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsGoogleInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsGoogleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsGoogleObservation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsGoogleObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsGoogleObservation. +func (in *WindowsWebAppSlotAuthSettingsGoogleObservation) DeepCopy() *WindowsWebAppSlotAuthSettingsGoogleObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsGoogleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsGoogleParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsGoogleParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsGoogleParameters. +func (in *WindowsWebAppSlotAuthSettingsGoogleParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsGoogleParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsGoogleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsInitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsInitParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsWebAppSlotAuthSettingsFacebookInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsWebAppSlotAuthSettingsGithubInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsWebAppSlotAuthSettingsGoogleInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsWebAppSlotAuthSettingsMicrosoftInitParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsWebAppSlotAuthSettingsTwitterInitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsInitParameters. +func (in *WindowsWebAppSlotAuthSettingsInitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsMicrosoftInitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsMicrosoftInitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsMicrosoftInitParameters. +func (in *WindowsWebAppSlotAuthSettingsMicrosoftInitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsMicrosoftInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsMicrosoftInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsMicrosoftObservation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsMicrosoftObservation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsMicrosoftObservation. +func (in *WindowsWebAppSlotAuthSettingsMicrosoftObservation) DeepCopy() *WindowsWebAppSlotAuthSettingsMicrosoftObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsMicrosoftObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsMicrosoftParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsMicrosoftParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSecretRef != nil { + in, out := &in.ClientSecretSecretRef, &out.ClientSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.OauthScopes != nil { + in, out := &in.OauthScopes, &out.OauthScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsMicrosoftParameters. +func (in *WindowsWebAppSlotAuthSettingsMicrosoftParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsMicrosoftParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsMicrosoftParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsObservation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsObservation) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsWebAppSlotAuthSettingsActiveDirectoryObservation) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsWebAppSlotAuthSettingsFacebookObservation) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsWebAppSlotAuthSettingsGithubObservation) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsWebAppSlotAuthSettingsGoogleObservation) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsWebAppSlotAuthSettingsMicrosoftObservation) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsWebAppSlotAuthSettingsTwitterObservation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsObservation. +func (in *WindowsWebAppSlotAuthSettingsObservation) DeepCopy() *WindowsWebAppSlotAuthSettingsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsParameters) { + *out = *in + if in.ActiveDirectory != nil { + in, out := &in.ActiveDirectory, &out.ActiveDirectory + *out = new(WindowsWebAppSlotAuthSettingsActiveDirectoryParameters) + (*in).DeepCopyInto(*out) + } + if in.AdditionalLoginParameters != nil { + in, out := &in.AdditionalLoginParameters, &out.AdditionalLoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Facebook != nil { + in, out := &in.Facebook, &out.Facebook + *out = new(WindowsWebAppSlotAuthSettingsFacebookParameters) + (*in).DeepCopyInto(*out) + } + if in.Github != nil { + in, out := &in.Github, &out.Github + *out = new(WindowsWebAppSlotAuthSettingsGithubParameters) + (*in).DeepCopyInto(*out) + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(WindowsWebAppSlotAuthSettingsGoogleParameters) + (*in).DeepCopyInto(*out) + } + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.Microsoft != nil { + in, out := &in.Microsoft, &out.Microsoft + *out = new(WindowsWebAppSlotAuthSettingsMicrosoftParameters) + (*in).DeepCopyInto(*out) + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TokenRefreshExtensionHours != nil { + in, out := &in.TokenRefreshExtensionHours, &out.TokenRefreshExtensionHours + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.Twitter != nil { + in, out := &in.Twitter, &out.Twitter + *out = new(WindowsWebAppSlotAuthSettingsTwitterParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedClientAction != nil { + in, out := &in.UnauthenticatedClientAction, &out.UnauthenticatedClientAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsParameters. +func (in *WindowsWebAppSlotAuthSettingsParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsTwitterInitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsTwitterInitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsTwitterInitParameters. +func (in *WindowsWebAppSlotAuthSettingsTwitterInitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsTwitterInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsTwitterInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsTwitterObservation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsTwitterObservation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsTwitterObservation. +func (in *WindowsWebAppSlotAuthSettingsTwitterObservation) DeepCopy() *WindowsWebAppSlotAuthSettingsTwitterObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsTwitterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsTwitterParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsTwitterParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSecretRef != nil { + in, out := &in.ConsumerSecretSecretRef, &out.ConsumerSecretSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsTwitterParameters. +func (in *WindowsWebAppSlotAuthSettingsTwitterParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsTwitterParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsTwitterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) { + *out = *in + if in.AllowedApplications != nil { + in, out := &in.AllowedApplications, &out.AllowedApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.AllowedIdentities != nil { + in, out := &in.AllowedIdentities, &out.AllowedIdentities + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretCertificateThumbprint != nil { + in, out := &in.ClientSecretCertificateThumbprint, &out.ClientSecretCertificateThumbprint + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.JwtAllowedClientApplications != nil { + in, out := &in.JwtAllowedClientApplications, &out.JwtAllowedClientApplications + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JwtAllowedGroups != nil { + in, out := &in.JwtAllowedGroups, &out.JwtAllowedGroups + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoginParameters != nil { + in, out := &in.LoginParameters, &out.LoginParameters + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.TenantAuthEndpoint != nil { + in, out := &in.TenantAuthEndpoint, &out.TenantAuthEndpoint + *out = new(string) + **out = **in + } + if in.WwwAuthenticationDisabled != nil { + in, out := &in.WwwAuthenticationDisabled, &out.WwwAuthenticationDisabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2AppleV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2AppleV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2AppleV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2AppleV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2AppleV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2AppleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2AppleV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2AppleV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2AppleV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2AppleV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2AppleV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2AppleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation) { + *out = *in + if in.AuthorisationEndpoint != nil { + in, out := &in.AuthorisationEndpoint, &out.AuthorisationEndpoint + *out = new(string) + **out = **in + } + if in.CertificationURI != nil { + in, out := &in.CertificationURI, &out.CertificationURI + *out = new(string) + **out = **in + } + if in.ClientCredentialMethod != nil { + in, out := &in.ClientCredentialMethod, &out.ClientCredentialMethod + *out = new(string) + **out = **in + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.IssuerEndpoint != nil { + in, out := &in.IssuerEndpoint, &out.IssuerEndpoint + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.TokenEndpoint != nil { + in, out := &in.TokenEndpoint, &out.TokenEndpoint + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.NameClaimType != nil { + in, out := &in.NameClaimType, &out.NameClaimType + *out = new(string) + **out = **in + } + if in.OpenIDConfigurationEndpoint != nil { + in, out := &in.OpenIDConfigurationEndpoint, &out.OpenIDConfigurationEndpoint + *out = new(string) + **out = **in + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2FacebookV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2FacebookV2Observation) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2FacebookV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2FacebookV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2FacebookV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2FacebookV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters) { + *out = *in + if in.AppID != nil { + in, out := &in.AppID, &out.AppID + *out = new(string) + **out = **in + } + if in.AppSecretSettingName != nil { + in, out := &in.AppSecretSettingName, &out.AppSecretSettingName + *out = new(string) + **out = **in + } + if in.GraphAPIVersion != nil { + in, out := &in.GraphAPIVersion, &out.GraphAPIVersion + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2GithubV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2GithubV2Observation) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2GithubV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2GithubV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2GithubV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2GithubV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2GithubV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2GithubV2Parameters) { + *out = *in + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2GithubV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2GithubV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2GithubV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2GithubV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2GoogleV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2GoogleV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2GoogleV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2GoogleV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2GoogleV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2GoogleV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2InitParameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsWebAppSlotAuthSettingsV2LoginInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2LoginInitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2LoginInitParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2LoginInitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2LoginInitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2LoginInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2LoginInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2LoginObservation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2LoginObservation) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2LoginObservation. +func (in *WindowsWebAppSlotAuthSettingsV2LoginObservation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2LoginObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2LoginObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2LoginParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2LoginParameters) { + *out = *in + if in.AllowedExternalRedirectUrls != nil { + in, out := &in.AllowedExternalRedirectUrls, &out.AllowedExternalRedirectUrls + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CookieExpirationConvention != nil { + in, out := &in.CookieExpirationConvention, &out.CookieExpirationConvention + *out = new(string) + **out = **in + } + if in.CookieExpirationTime != nil { + in, out := &in.CookieExpirationTime, &out.CookieExpirationTime + *out = new(string) + **out = **in + } + if in.LogoutEndpoint != nil { + in, out := &in.LogoutEndpoint, &out.LogoutEndpoint + *out = new(string) + **out = **in + } + if in.NonceExpirationTime != nil { + in, out := &in.NonceExpirationTime, &out.NonceExpirationTime + *out = new(string) + **out = **in + } + if in.PreserveURLFragmentsForLogins != nil { + in, out := &in.PreserveURLFragmentsForLogins, &out.PreserveURLFragmentsForLogins + *out = new(bool) + **out = **in + } + if in.TokenRefreshExtensionTime != nil { + in, out := &in.TokenRefreshExtensionTime, &out.TokenRefreshExtensionTime + *out = new(float64) + **out = **in + } + if in.TokenStoreEnabled != nil { + in, out := &in.TokenStoreEnabled, &out.TokenStoreEnabled + *out = new(bool) + **out = **in + } + if in.TokenStorePath != nil { + in, out := &in.TokenStorePath, &out.TokenStorePath + *out = new(string) + **out = **in + } + if in.TokenStoreSASSettingName != nil { + in, out := &in.TokenStoreSASSettingName, &out.TokenStoreSASSettingName + *out = new(string) + **out = **in + } + if in.ValidateNonce != nil { + in, out := &in.ValidateNonce, &out.ValidateNonce + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2LoginParameters. +func (in *WindowsWebAppSlotAuthSettingsV2LoginParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2LoginParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2LoginParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters) { + *out = *in + if in.AllowedAudiences != nil { + in, out := &in.AllowedAudiences, &out.AllowedAudiences + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.ClientSecretSettingName != nil { + in, out := &in.ClientSecretSettingName, &out.ClientSecretSettingName + *out = new(string) + **out = **in + } + if in.LoginScopes != nil { + in, out := &in.LoginScopes, &out.LoginScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2Observation) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsWebAppSlotAuthSettingsV2AppleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsWebAppSlotAuthSettingsV2FacebookV2Observation) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsWebAppSlotAuthSettingsV2GithubV2Observation) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsWebAppSlotAuthSettingsV2GoogleV2Observation) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsWebAppSlotAuthSettingsV2LoginObservation) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsWebAppSlotAuthSettingsV2TwitterV2Observation) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2Parameters) { + *out = *in + if in.ActiveDirectoryV2 != nil { + in, out := &in.ActiveDirectoryV2, &out.ActiveDirectoryV2 + *out = new(WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AppleV2 != nil { + in, out := &in.AppleV2, &out.AppleV2 + *out = new(WindowsWebAppSlotAuthSettingsV2AppleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.AuthEnabled != nil { + in, out := &in.AuthEnabled, &out.AuthEnabled + *out = new(bool) + **out = **in + } + if in.AzureStaticWebAppV2 != nil { + in, out := &in.AzureStaticWebAppV2, &out.AzureStaticWebAppV2 + *out = new(WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ConfigFilePath != nil { + in, out := &in.ConfigFilePath, &out.ConfigFilePath + *out = new(string) + **out = **in + } + if in.CustomOidcV2 != nil { + in, out := &in.CustomOidcV2, &out.CustomOidcV2 + *out = make([]WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultProvider != nil { + in, out := &in.DefaultProvider, &out.DefaultProvider + *out = new(string) + **out = **in + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FacebookV2 != nil { + in, out := &in.FacebookV2, &out.FacebookV2 + *out = new(WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.ForwardProxyConvention != nil { + in, out := &in.ForwardProxyConvention, &out.ForwardProxyConvention + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomHostHeaderName != nil { + in, out := &in.ForwardProxyCustomHostHeaderName, &out.ForwardProxyCustomHostHeaderName + *out = new(string) + **out = **in + } + if in.ForwardProxyCustomSchemeHeaderName != nil { + in, out := &in.ForwardProxyCustomSchemeHeaderName, &out.ForwardProxyCustomSchemeHeaderName + *out = new(string) + **out = **in + } + if in.GithubV2 != nil { + in, out := &in.GithubV2, &out.GithubV2 + *out = new(WindowsWebAppSlotAuthSettingsV2GithubV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.GoogleV2 != nil { + in, out := &in.GoogleV2, &out.GoogleV2 + *out = new(WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.HTTPRouteAPIPrefix != nil { + in, out := &in.HTTPRouteAPIPrefix, &out.HTTPRouteAPIPrefix + *out = new(string) + **out = **in + } + if in.Login != nil { + in, out := &in.Login, &out.Login + *out = new(WindowsWebAppSlotAuthSettingsV2LoginParameters) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftV2 != nil { + in, out := &in.MicrosoftV2, &out.MicrosoftV2 + *out = new(WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.RequireAuthentication != nil { + in, out := &in.RequireAuthentication, &out.RequireAuthentication + *out = new(bool) + **out = **in + } + if in.RequireHTTPS != nil { + in, out := &in.RequireHTTPS, &out.RequireHTTPS + *out = new(bool) + **out = **in + } + if in.RuntimeVersion != nil { + in, out := &in.RuntimeVersion, &out.RuntimeVersion + *out = new(string) + **out = **in + } + if in.TwitterV2 != nil { + in, out := &in.TwitterV2, &out.TwitterV2 + *out = new(WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.UnauthenticatedAction != nil { + in, out := &in.UnauthenticatedAction, &out.UnauthenticatedAction + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters. +func (in *WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2TwitterV2Observation) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2TwitterV2Observation) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2TwitterV2Observation. +func (in *WindowsWebAppSlotAuthSettingsV2TwitterV2Observation) DeepCopy() *WindowsWebAppSlotAuthSettingsV2TwitterV2Observation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2TwitterV2Observation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters) DeepCopyInto(out *WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters) { + *out = *in + if in.ConsumerKey != nil { + in, out := &in.ConsumerKey, &out.ConsumerKey + *out = new(string) + **out = **in + } + if in.ConsumerSecretSettingName != nil { + in, out := &in.ConsumerSecretSettingName, &out.ConsumerSecretSettingName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters. +func (in *WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters) DeepCopy() *WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotBackupInitParameters) DeepCopyInto(out *WindowsWebAppSlotBackupInitParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsWebAppSlotBackupScheduleInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotBackupInitParameters. +func (in *WindowsWebAppSlotBackupInitParameters) DeepCopy() *WindowsWebAppSlotBackupInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotBackupInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotBackupObservation) DeepCopyInto(out *WindowsWebAppSlotBackupObservation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsWebAppSlotBackupScheduleObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotBackupObservation. +func (in *WindowsWebAppSlotBackupObservation) DeepCopy() *WindowsWebAppSlotBackupObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotBackupObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotBackupParameters) DeepCopyInto(out *WindowsWebAppSlotBackupParameters) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(WindowsWebAppSlotBackupScheduleParameters) + (*in).DeepCopyInto(*out) + } + out.StorageAccountURLSecretRef = in.StorageAccountURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotBackupParameters. +func (in *WindowsWebAppSlotBackupParameters) DeepCopy() *WindowsWebAppSlotBackupParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotBackupParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotBackupScheduleInitParameters) DeepCopyInto(out *WindowsWebAppSlotBackupScheduleInitParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotBackupScheduleInitParameters. +func (in *WindowsWebAppSlotBackupScheduleInitParameters) DeepCopy() *WindowsWebAppSlotBackupScheduleInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotBackupScheduleInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotBackupScheduleObservation) DeepCopyInto(out *WindowsWebAppSlotBackupScheduleObservation) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.LastExecutionTime != nil { + in, out := &in.LastExecutionTime, &out.LastExecutionTime + *out = new(string) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotBackupScheduleObservation. +func (in *WindowsWebAppSlotBackupScheduleObservation) DeepCopy() *WindowsWebAppSlotBackupScheduleObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotBackupScheduleObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotBackupScheduleParameters) DeepCopyInto(out *WindowsWebAppSlotBackupScheduleParameters) { + *out = *in + if in.FrequencyInterval != nil { + in, out := &in.FrequencyInterval, &out.FrequencyInterval + *out = new(float64) + **out = **in + } + if in.FrequencyUnit != nil { + in, out := &in.FrequencyUnit, &out.FrequencyUnit + *out = new(string) + **out = **in + } + if in.KeepAtLeastOneBackup != nil { + in, out := &in.KeepAtLeastOneBackup, &out.KeepAtLeastOneBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodDays != nil { + in, out := &in.RetentionPeriodDays, &out.RetentionPeriodDays + *out = new(float64) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotBackupScheduleParameters. +func (in *WindowsWebAppSlotBackupScheduleParameters) DeepCopy() *WindowsWebAppSlotBackupScheduleParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotBackupScheduleParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotConnectionStringInitParameters) DeepCopyInto(out *WindowsWebAppSlotConnectionStringInitParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotConnectionStringInitParameters. +func (in *WindowsWebAppSlotConnectionStringInitParameters) DeepCopy() *WindowsWebAppSlotConnectionStringInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotConnectionStringInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotConnectionStringObservation) DeepCopyInto(out *WindowsWebAppSlotConnectionStringObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotConnectionStringObservation. +func (in *WindowsWebAppSlotConnectionStringObservation) DeepCopy() *WindowsWebAppSlotConnectionStringObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotConnectionStringObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotConnectionStringParameters) DeepCopyInto(out *WindowsWebAppSlotConnectionStringParameters) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + out.ValueSecretRef = in.ValueSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotConnectionStringParameters. +func (in *WindowsWebAppSlotConnectionStringParameters) DeepCopy() *WindowsWebAppSlotConnectionStringParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotConnectionStringParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotIdentityInitParameters) DeepCopyInto(out *WindowsWebAppSlotIdentityInitParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotIdentityInitParameters. +func (in *WindowsWebAppSlotIdentityInitParameters) DeepCopy() *WindowsWebAppSlotIdentityInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotIdentityInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotIdentityObservation) DeepCopyInto(out *WindowsWebAppSlotIdentityObservation) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrincipalID != nil { + in, out := &in.PrincipalID, &out.PrincipalID + *out = new(string) + **out = **in + } + if in.TenantID != nil { + in, out := &in.TenantID, &out.TenantID + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotIdentityObservation. +func (in *WindowsWebAppSlotIdentityObservation) DeepCopy() *WindowsWebAppSlotIdentityObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotIdentityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotIdentityParameters) DeepCopyInto(out *WindowsWebAppSlotIdentityParameters) { + *out = *in + if in.IdentityIds != nil { + in, out := &in.IdentityIds, &out.IdentityIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotIdentityParameters. +func (in *WindowsWebAppSlotIdentityParameters) DeepCopy() *WindowsWebAppSlotIdentityParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotIdentityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotInitParameters) DeepCopyInto(out *WindowsWebAppSlotInitParameters) { + *out = *in + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsWebAppSlotAuthSettingsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsWebAppSlotAuthSettingsV2InitParameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsWebAppSlotBackupInitParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsWebAppSlotConnectionStringInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsWebAppSlotIdentityInitParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(WindowsWebAppSlotLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsWebAppSlotSiteConfigInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsWebAppSlotStorageAccountInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotInitParameters. +func (in *WindowsWebAppSlotInitParameters) DeepCopy() *WindowsWebAppSlotInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotList) DeepCopyInto(out *WindowsWebAppSlotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WindowsWebAppSlot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotList. +func (in *WindowsWebAppSlotList) DeepCopy() *WindowsWebAppSlotList { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WindowsWebAppSlotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters) DeepCopyInto(out *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters. +func (in *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters) DeepCopy() *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation) DeepCopyInto(out *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation. +func (in *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation) DeepCopy() *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters) DeepCopyInto(out *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(string) + **out = **in + } + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.SASURL != nil { + in, out := &in.SASURL, &out.SASURL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters. +func (in *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters) DeepCopy() *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsApplicationLogsInitParameters) DeepCopyInto(out *WindowsWebAppSlotLogsApplicationLogsInitParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsApplicationLogsInitParameters. +func (in *WindowsWebAppSlotLogsApplicationLogsInitParameters) DeepCopy() *WindowsWebAppSlotLogsApplicationLogsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsApplicationLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsApplicationLogsObservation) DeepCopyInto(out *WindowsWebAppSlotLogsApplicationLogsObservation) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsApplicationLogsObservation. +func (in *WindowsWebAppSlotLogsApplicationLogsObservation) DeepCopy() *WindowsWebAppSlotLogsApplicationLogsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsApplicationLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsApplicationLogsParameters) DeepCopyInto(out *WindowsWebAppSlotLogsApplicationLogsParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystemLevel != nil { + in, out := &in.FileSystemLevel, &out.FileSystemLevel + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsApplicationLogsParameters. +func (in *WindowsWebAppSlotLogsApplicationLogsParameters) DeepCopy() *WindowsWebAppSlotLogsApplicationLogsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsApplicationLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters. +func (in *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation. +func (in *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + out.SASURLSecretRef = in.SASURLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters. +func (in *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters. +func (in *WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsFileSystemObservation) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsFileSystemObservation) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsFileSystemObservation. +func (in *WindowsWebAppSlotLogsHTTPLogsFileSystemObservation) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsFileSystemObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsFileSystemObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsFileSystemParameters) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsFileSystemParameters) { + *out = *in + if in.RetentionInDays != nil { + in, out := &in.RetentionInDays, &out.RetentionInDays + *out = new(float64) + **out = **in + } + if in.RetentionInMb != nil { + in, out := &in.RetentionInMb, &out.RetentionInMb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsFileSystemParameters. +func (in *WindowsWebAppSlotLogsHTTPLogsFileSystemParameters) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsFileSystemParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsFileSystemParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsInitParameters) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsInitParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsInitParameters. +func (in *WindowsWebAppSlotLogsHTTPLogsInitParameters) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsObservation) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsObservation) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(WindowsWebAppSlotLogsHTTPLogsFileSystemObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsObservation. +func (in *WindowsWebAppSlotLogsHTTPLogsObservation) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsHTTPLogsParameters) DeepCopyInto(out *WindowsWebAppSlotLogsHTTPLogsParameters) { + *out = *in + if in.AzureBlobStorage != nil { + in, out := &in.AzureBlobStorage, &out.AzureBlobStorage + *out = new(WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters) + (*in).DeepCopyInto(*out) + } + if in.FileSystem != nil { + in, out := &in.FileSystem, &out.FileSystem + *out = new(WindowsWebAppSlotLogsHTTPLogsFileSystemParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsHTTPLogsParameters. +func (in *WindowsWebAppSlotLogsHTTPLogsParameters) DeepCopy() *WindowsWebAppSlotLogsHTTPLogsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsHTTPLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsInitParameters) DeepCopyInto(out *WindowsWebAppSlotLogsInitParameters) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(WindowsWebAppSlotLogsApplicationLogsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(WindowsWebAppSlotLogsHTTPLogsInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsInitParameters. +func (in *WindowsWebAppSlotLogsInitParameters) DeepCopy() *WindowsWebAppSlotLogsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsObservation) DeepCopyInto(out *WindowsWebAppSlotLogsObservation) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(WindowsWebAppSlotLogsApplicationLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(WindowsWebAppSlotLogsHTTPLogsObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsObservation. +func (in *WindowsWebAppSlotLogsObservation) DeepCopy() *WindowsWebAppSlotLogsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotLogsParameters) DeepCopyInto(out *WindowsWebAppSlotLogsParameters) { + *out = *in + if in.ApplicationLogs != nil { + in, out := &in.ApplicationLogs, &out.ApplicationLogs + *out = new(WindowsWebAppSlotLogsApplicationLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.DetailedErrorMessages != nil { + in, out := &in.DetailedErrorMessages, &out.DetailedErrorMessages + *out = new(bool) + **out = **in + } + if in.FailedRequestTracing != nil { + in, out := &in.FailedRequestTracing, &out.FailedRequestTracing + *out = new(bool) + **out = **in + } + if in.HTTPLogs != nil { + in, out := &in.HTTPLogs, &out.HTTPLogs + *out = new(WindowsWebAppSlotLogsHTTPLogsParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotLogsParameters. +func (in *WindowsWebAppSlotLogsParameters) DeepCopy() *WindowsWebAppSlotLogsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotLogsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotObservation) DeepCopyInto(out *WindowsWebAppSlotObservation) { + *out = *in + if in.AppServiceID != nil { + in, out := &in.AppServiceID, &out.AppServiceID + *out = new(string) + **out = **in + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsWebAppSlotAuthSettingsObservation) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsWebAppSlotAuthSettingsV2Observation) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsWebAppSlotBackupObservation) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsWebAppSlotConnectionStringObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultHostName != nil { + in, out := &in.DefaultHostName, &out.DefaultHostName + *out = new(string) + **out = **in + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.HostingEnvironmentID != nil { + in, out := &in.HostingEnvironmentID, &out.HostingEnvironmentID + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsWebAppSlotIdentityObservation) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(WindowsWebAppSlotLogsObservation) + (*in).DeepCopyInto(*out) + } + if in.OutboundIPAddressList != nil { + in, out := &in.OutboundIPAddressList, &out.OutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OutboundIPAddresses != nil { + in, out := &in.OutboundIPAddresses, &out.OutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PossibleOutboundIPAddressList != nil { + in, out := &in.PossibleOutboundIPAddressList, &out.PossibleOutboundIPAddressList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PossibleOutboundIPAddresses != nil { + in, out := &in.PossibleOutboundIPAddresses, &out.PossibleOutboundIPAddresses + *out = new(string) + **out = **in + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsWebAppSlotSiteConfigObservation) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsWebAppSlotStorageAccountObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotObservation. +func (in *WindowsWebAppSlotObservation) DeepCopy() *WindowsWebAppSlotObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotParameters) DeepCopyInto(out *WindowsWebAppSlotParameters) { + *out = *in + if in.AppServiceID != nil { + in, out := &in.AppServiceID, &out.AppServiceID + *out = new(string) + **out = **in + } + if in.AppServiceIDRef != nil { + in, out := &in.AppServiceIDRef, &out.AppServiceIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.AppServiceIDSelector != nil { + in, out := &in.AppServiceIDSelector, &out.AppServiceIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.AppSettings != nil { + in, out := &in.AppSettings, &out.AppSettings + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.AuthSettings != nil { + in, out := &in.AuthSettings, &out.AuthSettings + *out = new(WindowsWebAppSlotAuthSettingsParameters) + (*in).DeepCopyInto(*out) + } + if in.AuthSettingsV2 != nil { + in, out := &in.AuthSettingsV2, &out.AuthSettingsV2 + *out = new(WindowsWebAppSlotAuthSettingsV2Parameters) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(WindowsWebAppSlotBackupParameters) + (*in).DeepCopyInto(*out) + } + if in.ClientAffinityEnabled != nil { + in, out := &in.ClientAffinityEnabled, &out.ClientAffinityEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateEnabled != nil { + in, out := &in.ClientCertificateEnabled, &out.ClientCertificateEnabled + *out = new(bool) + **out = **in + } + if in.ClientCertificateExclusionPaths != nil { + in, out := &in.ClientCertificateExclusionPaths, &out.ClientCertificateExclusionPaths + *out = new(string) + **out = **in + } + if in.ClientCertificateMode != nil { + in, out := &in.ClientCertificateMode, &out.ClientCertificateMode + *out = new(string) + **out = **in + } + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = make([]WindowsWebAppSlotConnectionStringParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.FtpPublishBasicAuthenticationEnabled != nil { + in, out := &in.FtpPublishBasicAuthenticationEnabled, &out.FtpPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.HTTPSOnly != nil { + in, out := &in.HTTPSOnly, &out.HTTPSOnly + *out = new(bool) + **out = **in + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(WindowsWebAppSlotIdentityParameters) + (*in).DeepCopyInto(*out) + } + if in.KeyVaultReferenceIdentityID != nil { + in, out := &in.KeyVaultReferenceIdentityID, &out.KeyVaultReferenceIdentityID + *out = new(string) + **out = **in + } + if in.Logs != nil { + in, out := &in.Logs, &out.Logs + *out = new(WindowsWebAppSlotLogsParameters) + (*in).DeepCopyInto(*out) + } + if in.PublicNetworkAccessEnabled != nil { + in, out := &in.PublicNetworkAccessEnabled, &out.PublicNetworkAccessEnabled + *out = new(bool) + **out = **in + } + if in.ServicePlanID != nil { + in, out := &in.ServicePlanID, &out.ServicePlanID + *out = new(string) + **out = **in + } + if in.SiteConfig != nil { + in, out := &in.SiteConfig, &out.SiteConfig + *out = new(WindowsWebAppSlotSiteConfigParameters) + (*in).DeepCopyInto(*out) + } + if in.StorageAccount != nil { + in, out := &in.StorageAccount, &out.StorageAccount + *out = make([]WindowsWebAppSlotStorageAccountParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.WebdeployPublishBasicAuthenticationEnabled != nil { + in, out := &in.WebdeployPublishBasicAuthenticationEnabled, &out.WebdeployPublishBasicAuthenticationEnabled + *out = new(bool) + **out = **in + } + if in.ZipDeployFile != nil { + in, out := &in.ZipDeployFile, &out.ZipDeployFile + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotParameters. +func (in *WindowsWebAppSlotParameters) DeepCopy() *WindowsWebAppSlotParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigApplicationStackInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigApplicationStackInitParameters) { + *out = *in + if in.CurrentStack != nil { + in, out := &in.CurrentStack, &out.CurrentStack + *out = new(string) + **out = **in + } + if in.DockerContainerName != nil { + in, out := &in.DockerContainerName, &out.DockerContainerName + *out = new(string) + **out = **in + } + if in.DockerContainerRegistry != nil { + in, out := &in.DockerContainerRegistry, &out.DockerContainerRegistry + *out = new(string) + **out = **in + } + if in.DockerContainerTag != nil { + in, out := &in.DockerContainerTag, &out.DockerContainerTag + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetCoreVersion != nil { + in, out := &in.DotnetCoreVersion, &out.DotnetCoreVersion + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaContainer != nil { + in, out := &in.JavaContainer, &out.JavaContainer + *out = new(string) + **out = **in + } + if in.JavaContainerVersion != nil { + in, out := &in.JavaContainerVersion, &out.JavaContainerVersion + *out = new(string) + **out = **in + } + if in.JavaEmbeddedServerEnabled != nil { + in, out := &in.JavaEmbeddedServerEnabled, &out.JavaEmbeddedServerEnabled + *out = new(bool) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.Python != nil { + in, out := &in.Python, &out.Python + *out = new(bool) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.TomcatVersion != nil { + in, out := &in.TomcatVersion, &out.TomcatVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigApplicationStackInitParameters. +func (in *WindowsWebAppSlotSiteConfigApplicationStackInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigApplicationStackInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigApplicationStackInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigApplicationStackObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigApplicationStackObservation) { + *out = *in + if in.CurrentStack != nil { + in, out := &in.CurrentStack, &out.CurrentStack + *out = new(string) + **out = **in + } + if in.DockerContainerName != nil { + in, out := &in.DockerContainerName, &out.DockerContainerName + *out = new(string) + **out = **in + } + if in.DockerContainerRegistry != nil { + in, out := &in.DockerContainerRegistry, &out.DockerContainerRegistry + *out = new(string) + **out = **in + } + if in.DockerContainerTag != nil { + in, out := &in.DockerContainerTag, &out.DockerContainerTag + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetCoreVersion != nil { + in, out := &in.DotnetCoreVersion, &out.DotnetCoreVersion + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaContainer != nil { + in, out := &in.JavaContainer, &out.JavaContainer + *out = new(string) + **out = **in + } + if in.JavaContainerVersion != nil { + in, out := &in.JavaContainerVersion, &out.JavaContainerVersion + *out = new(string) + **out = **in + } + if in.JavaEmbeddedServerEnabled != nil { + in, out := &in.JavaEmbeddedServerEnabled, &out.JavaEmbeddedServerEnabled + *out = new(bool) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.Python != nil { + in, out := &in.Python, &out.Python + *out = new(bool) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.TomcatVersion != nil { + in, out := &in.TomcatVersion, &out.TomcatVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigApplicationStackObservation. +func (in *WindowsWebAppSlotSiteConfigApplicationStackObservation) DeepCopy() *WindowsWebAppSlotSiteConfigApplicationStackObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigApplicationStackObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigApplicationStackParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigApplicationStackParameters) { + *out = *in + if in.CurrentStack != nil { + in, out := &in.CurrentStack, &out.CurrentStack + *out = new(string) + **out = **in + } + if in.DockerContainerName != nil { + in, out := &in.DockerContainerName, &out.DockerContainerName + *out = new(string) + **out = **in + } + if in.DockerContainerRegistry != nil { + in, out := &in.DockerContainerRegistry, &out.DockerContainerRegistry + *out = new(string) + **out = **in + } + if in.DockerContainerTag != nil { + in, out := &in.DockerContainerTag, &out.DockerContainerTag + *out = new(string) + **out = **in + } + if in.DockerImageName != nil { + in, out := &in.DockerImageName, &out.DockerImageName + *out = new(string) + **out = **in + } + if in.DockerRegistryPasswordSecretRef != nil { + in, out := &in.DockerRegistryPasswordSecretRef, &out.DockerRegistryPasswordSecretRef + *out = new(v1.SecretKeySelector) + **out = **in + } + if in.DockerRegistryURL != nil { + in, out := &in.DockerRegistryURL, &out.DockerRegistryURL + *out = new(string) + **out = **in + } + if in.DockerRegistryUsername != nil { + in, out := &in.DockerRegistryUsername, &out.DockerRegistryUsername + *out = new(string) + **out = **in + } + if in.DotnetCoreVersion != nil { + in, out := &in.DotnetCoreVersion, &out.DotnetCoreVersion + *out = new(string) + **out = **in + } + if in.DotnetVersion != nil { + in, out := &in.DotnetVersion, &out.DotnetVersion + *out = new(string) + **out = **in + } + if in.JavaContainer != nil { + in, out := &in.JavaContainer, &out.JavaContainer + *out = new(string) + **out = **in + } + if in.JavaContainerVersion != nil { + in, out := &in.JavaContainerVersion, &out.JavaContainerVersion + *out = new(string) + **out = **in + } + if in.JavaEmbeddedServerEnabled != nil { + in, out := &in.JavaEmbeddedServerEnabled, &out.JavaEmbeddedServerEnabled + *out = new(bool) + **out = **in + } + if in.JavaVersion != nil { + in, out := &in.JavaVersion, &out.JavaVersion + *out = new(string) + **out = **in + } + if in.NodeVersion != nil { + in, out := &in.NodeVersion, &out.NodeVersion + *out = new(string) + **out = **in + } + if in.PHPVersion != nil { + in, out := &in.PHPVersion, &out.PHPVersion + *out = new(string) + **out = **in + } + if in.Python != nil { + in, out := &in.Python, &out.Python + *out = new(bool) + **out = **in + } + if in.PythonVersion != nil { + in, out := &in.PythonVersion, &out.PythonVersion + *out = new(string) + **out = **in + } + if in.TomcatVersion != nil { + in, out := &in.TomcatVersion, &out.TomcatVersion + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigApplicationStackParameters. +func (in *WindowsWebAppSlotSiteConfigApplicationStackParameters) DeepCopy() *WindowsWebAppSlotSiteConfigApplicationStackParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigApplicationStackParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = new(ActionCustomActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = new(ActionCustomActionObservation) + (*in).DeepCopyInto(*out) + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters) { + *out = *in + if in.ActionType != nil { + in, out := &in.ActionType, &out.ActionType + *out = new(string) + **out = **in + } + if in.CustomAction != nil { + in, out := &in.CustomAction, &out.CustomAction + *out = new(ActionCustomActionParameters) + (*in).DeepCopyInto(*out) + } + if in.MinimumProcessExecutionTime != nil { + in, out := &in.MinimumProcessExecutionTime, &out.MinimumProcessExecutionTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingObservation. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingObservation) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters) + (*in).DeepCopyInto(*out) + } + if in.Trigger != nil { + in, out := &in.Trigger, &out.Trigger + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingParameters. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingParameters) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters) { + *out = *in + if in.PrivateMemoryKb != nil { + in, out := &in.PrivateMemoryKb, &out.PrivateMemoryKb + *out = new(float64) + **out = **in + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(SiteConfigAutoHealSettingTriggerRequestsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(SiteConfigAutoHealSettingTriggerSlowRequestInitParameters) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]SiteConfigAutoHealSettingTriggerStatusCodeInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation) { + *out = *in + if in.PrivateMemoryKb != nil { + in, out := &in.PrivateMemoryKb, &out.PrivateMemoryKb + *out = new(float64) + **out = **in + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(SiteConfigAutoHealSettingTriggerRequestsObservation) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(SiteConfigAutoHealSettingTriggerSlowRequestObservation) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]SiteConfigAutoHealSettingTriggerStatusCodeObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters) { + *out = *in + if in.PrivateMemoryKb != nil { + in, out := &in.PrivateMemoryKb, &out.PrivateMemoryKb + *out = new(float64) + **out = **in + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(SiteConfigAutoHealSettingTriggerRequestsParameters) + (*in).DeepCopyInto(*out) + } + if in.SlowRequest != nil { + in, out := &in.SlowRequest, &out.SlowRequest + *out = new(SiteConfigAutoHealSettingTriggerSlowRequestParameters) + (*in).DeepCopyInto(*out) + } + if in.StatusCode != nil { + in, out := &in.StatusCode, &out.StatusCode + *out = make([]SiteConfigAutoHealSettingTriggerStatusCodeParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters. +func (in *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters) DeepCopy() *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigCorsInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigCorsInitParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigCorsInitParameters. +func (in *WindowsWebAppSlotSiteConfigCorsInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigCorsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigCorsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigCorsObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigCorsObservation) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigCorsObservation. +func (in *WindowsWebAppSlotSiteConfigCorsObservation) DeepCopy() *WindowsWebAppSlotSiteConfigCorsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigCorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigCorsParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigCorsParameters) { + *out = *in + if in.AllowedOrigins != nil { + in, out := &in.AllowedOrigins, &out.AllowedOrigins + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SupportCredentials != nil { + in, out := &in.SupportCredentials, &out.SupportCredentials + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigCorsParameters. +func (in *WindowsWebAppSlotSiteConfigCorsParameters) DeepCopy() *WindowsWebAppSlotSiteConfigCorsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigCorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation) DeepCopy() *WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters) DeepCopy() *WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigIPRestrictionInitParameters. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigIPRestrictionObservation. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionObservation) DeepCopy() *WindowsWebAppSlotSiteConfigIPRestrictionObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigIPRestrictionParameters. +func (in *WindowsWebAppSlotSiteConfigIPRestrictionParameters) DeepCopy() *WindowsWebAppSlotSiteConfigIPRestrictionParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigInitParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsWebAppSlotSiteConfigApplicationStackInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsWebAppSlotSiteConfigCorsInitParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsWebAppSlotSiteConfigIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VirtualApplication != nil { + in, out := &in.VirtualApplication, &out.VirtualApplication + *out = make([]SiteConfigVirtualApplicationInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigInitParameters. +func (in *WindowsWebAppSlotSiteConfigInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigObservation) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsWebAppSlotSiteConfigApplicationStackObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingObservation) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsWebAppSlotSiteConfigCorsObservation) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.DetailedErrorLoggingEnabled != nil { + in, out := &in.DetailedErrorLoggingEnabled, &out.DetailedErrorLoggingEnabled + *out = new(bool) + **out = **in + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsWebAppSlotSiteConfigIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsWebAppSlotSiteConfigScmIPRestrictionObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmType != nil { + in, out := &in.ScmType, &out.ScmType + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VirtualApplication != nil { + in, out := &in.VirtualApplication, &out.VirtualApplication + *out = make([]SiteConfigVirtualApplicationObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WindowsFxVersion != nil { + in, out := &in.WindowsFxVersion, &out.WindowsFxVersion + *out = new(string) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigObservation. +func (in *WindowsWebAppSlotSiteConfigObservation) DeepCopy() *WindowsWebAppSlotSiteConfigObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigParameters) { + *out = *in + if in.APIDefinitionURL != nil { + in, out := &in.APIDefinitionURL, &out.APIDefinitionURL + *out = new(string) + **out = **in + } + if in.APIManagementAPIID != nil { + in, out := &in.APIManagementAPIID, &out.APIManagementAPIID + *out = new(string) + **out = **in + } + if in.AlwaysOn != nil { + in, out := &in.AlwaysOn, &out.AlwaysOn + *out = new(bool) + **out = **in + } + if in.AppCommandLine != nil { + in, out := &in.AppCommandLine, &out.AppCommandLine + *out = new(string) + **out = **in + } + if in.ApplicationStack != nil { + in, out := &in.ApplicationStack, &out.ApplicationStack + *out = new(WindowsWebAppSlotSiteConfigApplicationStackParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoHealEnabled != nil { + in, out := &in.AutoHealEnabled, &out.AutoHealEnabled + *out = new(bool) + **out = **in + } + if in.AutoHealSetting != nil { + in, out := &in.AutoHealSetting, &out.AutoHealSetting + *out = new(WindowsWebAppSlotSiteConfigAutoHealSettingParameters) + (*in).DeepCopyInto(*out) + } + if in.AutoSwapSlotName != nil { + in, out := &in.AutoSwapSlotName, &out.AutoSwapSlotName + *out = new(string) + **out = **in + } + if in.ContainerRegistryManagedIdentityClientID != nil { + in, out := &in.ContainerRegistryManagedIdentityClientID, &out.ContainerRegistryManagedIdentityClientID + *out = new(string) + **out = **in + } + if in.ContainerRegistryUseManagedIdentity != nil { + in, out := &in.ContainerRegistryUseManagedIdentity, &out.ContainerRegistryUseManagedIdentity + *out = new(bool) + **out = **in + } + if in.Cors != nil { + in, out := &in.Cors, &out.Cors + *out = new(WindowsWebAppSlotSiteConfigCorsParameters) + (*in).DeepCopyInto(*out) + } + if in.DefaultDocuments != nil { + in, out := &in.DefaultDocuments, &out.DefaultDocuments + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FtpsState != nil { + in, out := &in.FtpsState, &out.FtpsState + *out = new(string) + **out = **in + } + if in.HealthCheckEvictionTimeInMin != nil { + in, out := &in.HealthCheckEvictionTimeInMin, &out.HealthCheckEvictionTimeInMin + *out = new(float64) + **out = **in + } + if in.HealthCheckPath != nil { + in, out := &in.HealthCheckPath, &out.HealthCheckPath + *out = new(string) + **out = **in + } + if in.Http2Enabled != nil { + in, out := &in.Http2Enabled, &out.Http2Enabled + *out = new(bool) + **out = **in + } + if in.IPRestriction != nil { + in, out := &in.IPRestriction, &out.IPRestriction + *out = make([]WindowsWebAppSlotSiteConfigIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPRestrictionDefaultAction != nil { + in, out := &in.IPRestrictionDefaultAction, &out.IPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.LoadBalancingMode != nil { + in, out := &in.LoadBalancingMode, &out.LoadBalancingMode + *out = new(string) + **out = **in + } + if in.LocalMySQLEnabled != nil { + in, out := &in.LocalMySQLEnabled, &out.LocalMySQLEnabled + *out = new(bool) + **out = **in + } + if in.ManagedPipelineMode != nil { + in, out := &in.ManagedPipelineMode, &out.ManagedPipelineMode + *out = new(string) + **out = **in + } + if in.MinimumTLSVersion != nil { + in, out := &in.MinimumTLSVersion, &out.MinimumTLSVersion + *out = new(string) + **out = **in + } + if in.RemoteDebuggingEnabled != nil { + in, out := &in.RemoteDebuggingEnabled, &out.RemoteDebuggingEnabled + *out = new(bool) + **out = **in + } + if in.RemoteDebuggingVersion != nil { + in, out := &in.RemoteDebuggingVersion, &out.RemoteDebuggingVersion + *out = new(string) + **out = **in + } + if in.ScmIPRestriction != nil { + in, out := &in.ScmIPRestriction, &out.ScmIPRestriction + *out = make([]WindowsWebAppSlotSiteConfigScmIPRestrictionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScmIPRestrictionDefaultAction != nil { + in, out := &in.ScmIPRestrictionDefaultAction, &out.ScmIPRestrictionDefaultAction + *out = new(string) + **out = **in + } + if in.ScmMinimumTLSVersion != nil { + in, out := &in.ScmMinimumTLSVersion, &out.ScmMinimumTLSVersion + *out = new(string) + **out = **in + } + if in.ScmUseMainIPRestriction != nil { + in, out := &in.ScmUseMainIPRestriction, &out.ScmUseMainIPRestriction + *out = new(bool) + **out = **in + } + if in.Use32BitWorker != nil { + in, out := &in.Use32BitWorker, &out.Use32BitWorker + *out = new(bool) + **out = **in + } + if in.VirtualApplication != nil { + in, out := &in.VirtualApplication, &out.VirtualApplication + *out = make([]SiteConfigVirtualApplicationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VnetRouteAllEnabled != nil { + in, out := &in.VnetRouteAllEnabled, &out.VnetRouteAllEnabled + *out = new(bool) + **out = **in + } + if in.WebsocketsEnabled != nil { + in, out := &in.WebsocketsEnabled, &out.WebsocketsEnabled + *out = new(bool) + **out = **in + } + if in.WorkerCount != nil { + in, out := &in.WorkerCount, &out.WorkerCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigParameters. +func (in *WindowsWebAppSlotSiteConfigParameters) DeepCopy() *WindowsWebAppSlotSiteConfigParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation) DeepCopy() *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters) { + *out = *in + if in.XAzureFdid != nil { + in, out := &in.XAzureFdid, &out.XAzureFdid + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XFdHealthProbe != nil { + in, out := &in.XFdHealthProbe, &out.XFdHealthProbe + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedFor != nil { + in, out := &in.XForwardedFor, &out.XForwardedFor + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.XForwardedHost != nil { + in, out := &in.XForwardedHost, &out.XForwardedHost + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters) DeepCopy() *WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters) DeepCopy() *WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionObservation) DeepCopyInto(out *WindowsWebAppSlotSiteConfigScmIPRestrictionObservation) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigScmIPRestrictionObservation. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionObservation) DeepCopy() *WindowsWebAppSlotSiteConfigScmIPRestrictionObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigScmIPRestrictionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionParameters) DeepCopyInto(out *WindowsWebAppSlotSiteConfigScmIPRestrictionParameters) { + *out = *in + if in.Action != nil { + in, out := &in.Action, &out.Action + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(float64) + **out = **in + } + if in.ServiceTag != nil { + in, out := &in.ServiceTag, &out.ServiceTag + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetID != nil { + in, out := &in.VirtualNetworkSubnetID, &out.VirtualNetworkSubnetID + *out = new(string) + **out = **in + } + if in.VirtualNetworkSubnetIDRef != nil { + in, out := &in.VirtualNetworkSubnetIDRef, &out.VirtualNetworkSubnetIDRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.VirtualNetworkSubnetIDSelector != nil { + in, out := &in.VirtualNetworkSubnetIDSelector, &out.VirtualNetworkSubnetIDSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteConfigScmIPRestrictionParameters. +func (in *WindowsWebAppSlotSiteConfigScmIPRestrictionParameters) DeepCopy() *WindowsWebAppSlotSiteConfigScmIPRestrictionParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteConfigScmIPRestrictionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteCredentialInitParameters) DeepCopyInto(out *WindowsWebAppSlotSiteCredentialInitParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteCredentialInitParameters. +func (in *WindowsWebAppSlotSiteCredentialInitParameters) DeepCopy() *WindowsWebAppSlotSiteCredentialInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteCredentialInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteCredentialObservation) DeepCopyInto(out *WindowsWebAppSlotSiteCredentialObservation) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteCredentialObservation. +func (in *WindowsWebAppSlotSiteCredentialObservation) DeepCopy() *WindowsWebAppSlotSiteCredentialObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteCredentialObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSiteCredentialParameters) DeepCopyInto(out *WindowsWebAppSlotSiteCredentialParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSiteCredentialParameters. +func (in *WindowsWebAppSlotSiteCredentialParameters) DeepCopy() *WindowsWebAppSlotSiteCredentialParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSiteCredentialParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotSpec) DeepCopyInto(out *WindowsWebAppSlotSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotSpec. +func (in *WindowsWebAppSlotSpec) DeepCopy() *WindowsWebAppSlotSpec { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotStatus) DeepCopyInto(out *WindowsWebAppSlotStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotStatus. +func (in *WindowsWebAppSlotStatus) DeepCopy() *WindowsWebAppSlotStatus { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotStorageAccountInitParameters) DeepCopyInto(out *WindowsWebAppSlotStorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotStorageAccountInitParameters. +func (in *WindowsWebAppSlotStorageAccountInitParameters) DeepCopy() *WindowsWebAppSlotStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotStorageAccountObservation) DeepCopyInto(out *WindowsWebAppSlotStorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotStorageAccountObservation. +func (in *WindowsWebAppSlotStorageAccountObservation) DeepCopy() *WindowsWebAppSlotStorageAccountObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSlotStorageAccountParameters) DeepCopyInto(out *WindowsWebAppSlotStorageAccountParameters) { + *out = *in + out.AccessKeySecretRef = in.AccessKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSlotStorageAccountParameters. +func (in *WindowsWebAppSlotStorageAccountParameters) DeepCopy() *WindowsWebAppSlotStorageAccountParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppSlotStorageAccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppSpec) DeepCopyInto(out *WindowsWebAppSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppSpec. +func (in *WindowsWebAppSpec) DeepCopy() *WindowsWebAppSpec { + if in == nil { + return nil + } + out := new(WindowsWebAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppStatus) DeepCopyInto(out *WindowsWebAppStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppStatus. +func (in *WindowsWebAppStatus) DeepCopy() *WindowsWebAppStatus { + if in == nil { + return nil + } + out := new(WindowsWebAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppStickySettingsInitParameters) DeepCopyInto(out *WindowsWebAppStickySettingsInitParameters) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppStickySettingsInitParameters. +func (in *WindowsWebAppStickySettingsInitParameters) DeepCopy() *WindowsWebAppStickySettingsInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppStickySettingsInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppStickySettingsObservation) DeepCopyInto(out *WindowsWebAppStickySettingsObservation) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppStickySettingsObservation. +func (in *WindowsWebAppStickySettingsObservation) DeepCopy() *WindowsWebAppStickySettingsObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppStickySettingsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppStickySettingsParameters) DeepCopyInto(out *WindowsWebAppStickySettingsParameters) { + *out = *in + if in.AppSettingNames != nil { + in, out := &in.AppSettingNames, &out.AppSettingNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ConnectionStringNames != nil { + in, out := &in.ConnectionStringNames, &out.ConnectionStringNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppStickySettingsParameters. +func (in *WindowsWebAppStickySettingsParameters) DeepCopy() *WindowsWebAppStickySettingsParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppStickySettingsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppStorageAccountInitParameters) DeepCopyInto(out *WindowsWebAppStorageAccountInitParameters) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppStorageAccountInitParameters. +func (in *WindowsWebAppStorageAccountInitParameters) DeepCopy() *WindowsWebAppStorageAccountInitParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppStorageAccountInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppStorageAccountObservation) DeepCopyInto(out *WindowsWebAppStorageAccountObservation) { + *out = *in + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppStorageAccountObservation. +func (in *WindowsWebAppStorageAccountObservation) DeepCopy() *WindowsWebAppStorageAccountObservation { + if in == nil { + return nil + } + out := new(WindowsWebAppStorageAccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsWebAppStorageAccountParameters) DeepCopyInto(out *WindowsWebAppStorageAccountParameters) { + *out = *in + out.AccessKeySecretRef = in.AccessKeySecretRef + if in.AccountName != nil { + in, out := &in.AccountName, &out.AccountName + *out = new(string) + **out = **in + } + if in.MountPath != nil { + in, out := &in.MountPath, &out.MountPath + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.ShareName != nil { + in, out := &in.ShareName, &out.ShareName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsWebAppStorageAccountParameters. +func (in *WindowsWebAppStorageAccountParameters) DeepCopy() *WindowsWebAppStorageAccountParameters { + if in == nil { + return nil + } + out := new(WindowsWebAppStorageAccountParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/web/v1beta2/zz_generated.managed.go b/apis/web/v1beta2/zz_generated.managed.go new file mode 100644 index 000000000..e96ca4237 --- /dev/null +++ b/apis/web/v1beta2/zz_generated.managed.go @@ -0,0 +1,728 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AppServicePlan. +func (mg *AppServicePlan) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AppServicePlan. +func (mg *AppServicePlan) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this AppServicePlan. +func (mg *AppServicePlan) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this AppServicePlan. +func (mg *AppServicePlan) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this AppServicePlan. +func (mg *AppServicePlan) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AppServicePlan. +func (mg *AppServicePlan) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AppServicePlan. +func (mg *AppServicePlan) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AppServicePlan. +func (mg *AppServicePlan) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this AppServicePlan. +func (mg *AppServicePlan) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this AppServicePlan. +func (mg *AppServicePlan) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this AppServicePlan. +func (mg *AppServicePlan) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AppServicePlan. +func (mg *AppServicePlan) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FunctionApp. +func (mg *FunctionApp) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FunctionApp. +func (mg *FunctionApp) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FunctionApp. +func (mg *FunctionApp) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FunctionApp. +func (mg *FunctionApp) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FunctionApp. +func (mg *FunctionApp) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FunctionApp. +func (mg *FunctionApp) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FunctionApp. +func (mg *FunctionApp) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FunctionApp. +func (mg *FunctionApp) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FunctionApp. +func (mg *FunctionApp) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FunctionApp. +func (mg *FunctionApp) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FunctionApp. +func (mg *FunctionApp) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FunctionApp. +func (mg *FunctionApp) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this FunctionAppSlot. +func (mg *FunctionAppSlot) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this FunctionAppSlot. +func (mg *FunctionAppSlot) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this FunctionAppSlot. +func (mg *FunctionAppSlot) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this FunctionAppSlot. +func (mg *FunctionAppSlot) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this FunctionAppSlot. +func (mg *FunctionAppSlot) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this FunctionAppSlot. +func (mg *FunctionAppSlot) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this FunctionAppSlot. +func (mg *FunctionAppSlot) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this FunctionAppSlot. +func (mg *FunctionAppSlot) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this FunctionAppSlot. +func (mg *FunctionAppSlot) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this FunctionAppSlot. +func (mg *FunctionAppSlot) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this FunctionAppSlot. +func (mg *FunctionAppSlot) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this FunctionAppSlot. +func (mg *FunctionAppSlot) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinuxWebApp. +func (mg *LinuxWebApp) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinuxWebApp. +func (mg *LinuxWebApp) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinuxWebApp. +func (mg *LinuxWebApp) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinuxWebApp. +func (mg *LinuxWebApp) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinuxWebApp. +func (mg *LinuxWebApp) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinuxWebApp. +func (mg *LinuxWebApp) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinuxWebApp. +func (mg *LinuxWebApp) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinuxWebApp. +func (mg *LinuxWebApp) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinuxWebApp. +func (mg *LinuxWebApp) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinuxWebApp. +func (mg *LinuxWebApp) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinuxWebApp. +func (mg *LinuxWebApp) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinuxWebApp. +func (mg *LinuxWebApp) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this StaticSite. +func (mg *StaticSite) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this StaticSite. +func (mg *StaticSite) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this StaticSite. +func (mg *StaticSite) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this StaticSite. +func (mg *StaticSite) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this StaticSite. +func (mg *StaticSite) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this StaticSite. +func (mg *StaticSite) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this StaticSite. +func (mg *StaticSite) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this StaticSite. +func (mg *StaticSite) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this StaticSite. +func (mg *StaticSite) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this StaticSite. +func (mg *StaticSite) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this StaticSite. +func (mg *StaticSite) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this StaticSite. +func (mg *StaticSite) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WindowsWebApp. +func (mg *WindowsWebApp) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WindowsWebApp. +func (mg *WindowsWebApp) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WindowsWebApp. +func (mg *WindowsWebApp) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WindowsWebApp. +func (mg *WindowsWebApp) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WindowsWebApp. +func (mg *WindowsWebApp) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WindowsWebApp. +func (mg *WindowsWebApp) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WindowsWebApp. +func (mg *WindowsWebApp) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WindowsWebApp. +func (mg *WindowsWebApp) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WindowsWebApp. +func (mg *WindowsWebApp) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WindowsWebApp. +func (mg *WindowsWebApp) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WindowsWebApp. +func (mg *WindowsWebApp) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WindowsWebApp. +func (mg *WindowsWebApp) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/web/v1beta2/zz_generated.managedlist.go b/apis/web/v1beta2/zz_generated.managedlist.go new file mode 100644 index 000000000..f4bccc998 --- /dev/null +++ b/apis/web/v1beta2/zz_generated.managedlist.go @@ -0,0 +1,116 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. + +package v1beta2 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AppServicePlanList. +func (l *AppServicePlanList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FunctionAppList. +func (l *FunctionAppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this FunctionAppSlotList. +func (l *FunctionAppSlotList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinuxFunctionAppList. +func (l *LinuxFunctionAppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinuxFunctionAppSlotList. +func (l *LinuxFunctionAppSlotList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinuxWebAppList. +func (l *LinuxWebAppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LinuxWebAppSlotList. +func (l *LinuxWebAppSlotList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this StaticSiteList. +func (l *StaticSiteList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WindowsFunctionAppList. +func (l *WindowsFunctionAppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WindowsFunctionAppSlotList. +func (l *WindowsFunctionAppSlotList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WindowsWebAppList. +func (l *WindowsWebAppList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WindowsWebAppSlotList. +func (l *WindowsWebAppSlotList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/web/v1beta2/zz_generated.resolvers.go b/apis/web/v1beta2/zz_generated.resolvers.go new file mode 100644 index 000000000..17859f946 --- /dev/null +++ b/apis/web/v1beta2/zz_generated.resolvers.go @@ -0,0 +1,2148 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 +// Code generated by angryjet. DO NOT EDIT. +// Code transformed by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" + errors "github.com/pkg/errors" + + rconfig "github.com/upbound/provider-azure/apis/rconfig" + + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + apisresolver "github.com/upbound/provider-azure/internal/apis" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (mg *AppServicePlan) ResolveReferences( // ResolveReferences of this AppServicePlan. + ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FunctionApp. +func (mg *FunctionApp) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "AppServicePlan", "AppServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AppServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.AppServicePlanIDRef, + Selector: mg.Spec.ForProvider.AppServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AppServicePlanID") + } + mg.Spec.ForProvider.AppServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AppServicePlanIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "AppServicePlan", "AppServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AppServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.AppServicePlanIDRef, + Selector: mg.Spec.InitProvider.AppServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AppServicePlanID") + } + mg.Spec.InitProvider.AppServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AppServicePlanIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this FunctionAppSlot. +func (mg *FunctionAppSlot) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "AppServicePlan", "AppServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AppServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.AppServicePlanIDRef, + Selector: mg.Spec.ForProvider.AppServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AppServicePlanID") + } + mg.Spec.ForProvider.AppServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AppServicePlanIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "FunctionApp", "FunctionAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FunctionAppName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.FunctionAppNameRef, + Selector: mg.Spec.ForProvider.FunctionAppNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FunctionAppName") + } + mg.Spec.ForProvider.FunctionAppName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FunctionAppNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "AppServicePlan", "AppServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AppServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.AppServicePlanIDRef, + Selector: mg.Spec.InitProvider.AppServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AppServicePlanID") + } + mg.Spec.InitProvider.AppServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AppServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinuxFunctionApp. +func (mg *LinuxFunctionApp) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "ServicePlan", "ServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServicePlanIDRef, + Selector: mg.Spec.ForProvider.ServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServicePlanID") + } + mg.Spec.ForProvider.ServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.ResourceGroupNameRef, + Selector: mg.Spec.InitProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ResourceGroupName") + } + mg.Spec.InitProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "ServicePlan", "ServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServicePlanIDRef, + Selector: mg.Spec.InitProvider.ServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServicePlanID") + } + mg.Spec.InitProvider.ServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinuxFunctionAppSlot. +func (mg *LinuxFunctionAppSlot) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "LinuxFunctionApp", "LinuxFunctionAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FunctionAppID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.FunctionAppIDRef, + Selector: mg.Spec.ForProvider.FunctionAppIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FunctionAppID") + } + mg.Spec.ForProvider.FunctionAppID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FunctionAppIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinuxWebApp. +func (mg *LinuxWebApp) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "ServicePlan", "ServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServicePlanIDRef, + Selector: mg.Spec.ForProvider.ServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServicePlanID") + } + mg.Spec.ForProvider.ServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "ServicePlan", "ServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServicePlanIDRef, + Selector: mg.Spec.InitProvider.ServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServicePlanID") + } + mg.Spec.InitProvider.ServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this LinuxWebAppSlot. +func (mg *LinuxWebAppSlot) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "LinuxWebApp", "LinuxWebAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AppServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.AppServiceIDRef, + Selector: mg.Spec.ForProvider.AppServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AppServiceID") + } + mg.Spec.ForProvider.AppServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AppServiceIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "LinuxWebApp", "LinuxWebAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.AppServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.AppServiceIDRef, + Selector: mg.Spec.InitProvider.AppServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.AppServiceID") + } + mg.Spec.InitProvider.AppServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.AppServiceIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this StaticSite. +func (mg *StaticSite) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WindowsFunctionApp. +func (mg *WindowsFunctionApp) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "ServicePlan", "ServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServicePlanIDRef, + Selector: mg.Spec.ForProvider.ServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServicePlanID") + } + mg.Spec.ForProvider.ServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "ServicePlan", "ServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServicePlanIDRef, + Selector: mg.Spec.InitProvider.ServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServicePlanID") + } + mg.Spec.InitProvider.ServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WindowsFunctionAppSlot. +func (mg *WindowsFunctionAppSlot) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsFunctionApp", "WindowsFunctionAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.FunctionAppID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.FunctionAppIDRef, + Selector: mg.Spec.ForProvider.FunctionAppIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.FunctionAppID") + } + mg.Spec.ForProvider.FunctionAppID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.FunctionAppIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.StorageAccountNameRef, + Selector: mg.Spec.ForProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.StorageAccountName") + } + mg.Spec.ForProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("storage.azure.upbound.io", "v1beta2", "Account", "AccountList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.StorageAccountName), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.StorageAccountNameRef, + Selector: mg.Spec.InitProvider.StorageAccountNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.StorageAccountName") + } + mg.Spec.InitProvider.StorageAccountName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.StorageAccountNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WindowsWebApp. +func (mg *WindowsWebApp) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("azure.upbound.io", "v1beta1", "ResourceGroup", "ResourceGroupList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ResourceGroupName), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.ResourceGroupNameRef, + Selector: mg.Spec.ForProvider.ResourceGroupNameSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ResourceGroupName") + } + mg.Spec.ForProvider.ResourceGroupName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ResourceGroupNameRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "ServicePlan", "ServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.ServicePlanIDRef, + Selector: mg.Spec.ForProvider.ServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ServicePlanID") + } + mg.Spec.ForProvider.ServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta1", "ServicePlan", "ServicePlanList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.ServicePlanID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.InitProvider.ServicePlanIDRef, + Selector: mg.Spec.InitProvider.ServicePlanIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.ServicePlanID") + } + mg.Spec.InitProvider.ServicePlanID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ServicePlanIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} + +// ResolveReferences of this WindowsWebAppSlot. +func (mg *WindowsWebAppSlot) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("web.azure.upbound.io", "v1beta2", "WindowsWebApp", "WindowsWebAppList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.AppServiceID), + Extract: resource.ExtractResourceID(), + Reference: mg.Spec.ForProvider.AppServiceIDRef, + Selector: mg.Spec.ForProvider.AppServiceIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.AppServiceID") + } + mg.Spec.ForProvider.AppServiceID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.AppServiceIDRef = rsp.ResolvedReference + + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.ForProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.ForProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.ForProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.VirtualNetworkSubnetID") + } + mg.Spec.ForProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.IPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.IPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + if mg.Spec.InitProvider.SiteConfig != nil { + for i4 := 0; i4 < len(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction); i4++ { + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.SiteConfig.ScmIPRestriction[i4].VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + } + } + { + m, l, err = apisresolver.GetManagedResource("network.azure.upbound.io", "v1beta2", "Subnet", "SubnetList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.VirtualNetworkSubnetID), + Extract: rconfig.ExtractResourceID(), + Reference: mg.Spec.InitProvider.VirtualNetworkSubnetIDRef, + Selector: mg.Spec.InitProvider.VirtualNetworkSubnetIDSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.VirtualNetworkSubnetID") + } + mg.Spec.InitProvider.VirtualNetworkSubnetID = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.VirtualNetworkSubnetIDRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/web/v1beta2/zz_groupversion_info.go b/apis/web/v1beta2/zz_groupversion_info.go new file mode 100755 index 000000000..a13cf460a --- /dev/null +++ b/apis/web/v1beta2/zz_groupversion_info.go @@ -0,0 +1,32 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=web.azure.upbound.io +// +versionName=v1beta2 +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "web.azure.upbound.io" + CRDVersion = "v1beta2" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/web/v1beta2/zz_linuxfunctionapp_terraformed.go b/apis/web/v1beta2/zz_linuxfunctionapp_terraformed.go new file mode 100755 index 000000000..2fbf7c6fd --- /dev/null +++ b/apis/web/v1beta2/zz_linuxfunctionapp_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinuxFunctionApp +func (mg *LinuxFunctionApp) GetTerraformResourceType() string { + return "azurerm_linux_function_app" +} + +// GetConnectionDetailsMapping for this LinuxFunctionApp +func (tr *LinuxFunctionApp) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].github[*].client_secret": "spec.forProvider.authSettings[*].github[*].clientSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "backup[*].storage_account_url": "spec.forProvider.backup[*].storageAccountUrlSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "site_config[*].application_insights_connection_string": "spec.forProvider.siteConfig[*].applicationInsightsConnectionStringSecretRef", "site_config[*].application_insights_key": "spec.forProvider.siteConfig[*].applicationInsightsKeySecretRef", "site_config[*].application_stack[*].docker[*].registry_password": "spec.forProvider.siteConfig[*].applicationStack[*].docker[*].registryPasswordSecretRef", "site_config[*].application_stack[*].docker[*].registry_username": "spec.forProvider.siteConfig[*].applicationStack[*].docker[*].registryUsernameSecretRef", "site_credential[*]": "status.atProvider.siteCredential[*]", "storage_account[*].access_key": "spec.forProvider.storageAccount[*].accessKeySecretRef", "storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef"} +} + +// GetObservation of this LinuxFunctionApp +func (tr *LinuxFunctionApp) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinuxFunctionApp +func (tr *LinuxFunctionApp) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinuxFunctionApp +func (tr *LinuxFunctionApp) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinuxFunctionApp +func (tr *LinuxFunctionApp) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinuxFunctionApp +func (tr *LinuxFunctionApp) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinuxFunctionApp +func (tr *LinuxFunctionApp) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinuxFunctionApp +func (tr *LinuxFunctionApp) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinuxFunctionApp using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinuxFunctionApp) LateInitialize(attrs []byte) (bool, error) { + params := &LinuxFunctionAppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinuxFunctionApp) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_linuxfunctionapp_types.go b/apis/web/v1beta2/zz_linuxfunctionapp_types.go new file mode 100755 index 000000000..fba750124 --- /dev/null +++ b/apis/web/v1beta2/zz_linuxfunctionapp_types.go @@ -0,0 +1,3419 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActiveDirectoryV2InitParameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type ActiveDirectoryV2Observation struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type ActiveDirectoryV2Parameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + // +kubebuilder:validation:Optional + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + // +kubebuilder:validation:Optional + TenantAuthEndpoint *string `json:"tenantAuthEndpoint" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + // +kubebuilder:validation:Optional + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type AppServiceLogsInitParameters struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type AppServiceLogsObservation struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type AppServiceLogsParameters struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + // +kubebuilder:validation:Optional + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type AppleV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type AppleV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AppleV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` +} + +type ApplicationStackInitParameters struct { + + // One or more docker blocks as defined below. + // A docker block + Docker []DockerInitParameters `json:"docker,omitempty" tf:"docker,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 6.0, 7.0 and 8.0. + // The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The Version of Java to use. Supported versions include 8, 11 & 17. + // The version of Java to use. Possible values are `8`, `11`, and `17` + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include 12, 14, 16 and 18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to run. Possible values are 7, and 7.2. + // The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // The version of Python to run. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + // The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // Should the Linux Function App use a custom runtime? + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type ApplicationStackObservation struct { + + // One or more docker blocks as defined below. + // A docker block + Docker []DockerObservation `json:"docker,omitempty" tf:"docker,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 6.0, 7.0 and 8.0. + // The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The Version of Java to use. Supported versions include 8, 11 & 17. + // The version of Java to use. Possible values are `8`, `11`, and `17` + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include 12, 14, 16 and 18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to run. Possible values are 7, and 7.2. + // The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // The version of Python to run. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + // The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // Should the Linux Function App use a custom runtime? + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type ApplicationStackParameters struct { + + // One or more docker blocks as defined below. + // A docker block + // +kubebuilder:validation:Optional + Docker []DockerParameters `json:"docker,omitempty" tf:"docker,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 6.0, 7.0 and 8.0. + // The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + // +kubebuilder:validation:Optional + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The Version of Java to use. Supported versions include 8, 11 & 17. + // The version of Java to use. Possible values are `8`, `11`, and `17` + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include 12, 14, 16 and 18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + // +kubebuilder:validation:Optional + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to run. Possible values are 7, and 7.2. + // The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + // +kubebuilder:validation:Optional + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // The version of Python to run. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + // The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + // +kubebuilder:validation:Optional + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // Should the Linux Function App use a custom runtime? + // +kubebuilder:validation:Optional + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + // +kubebuilder:validation:Optional + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type AuthSettingsV2InitParameters struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *ActiveDirectoryV2InitParameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *AppleV2InitParameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *AzureStaticWebAppV2InitParameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []CustomOidcV2InitParameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *FacebookV2InitParameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *GithubV2InitParameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *GoogleV2InitParameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *LoginInitParameters `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *MicrosoftV2InitParameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *TwitterV2InitParameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type AuthSettingsV2Observation struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *ActiveDirectoryV2Observation `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *AppleV2Observation `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *AzureStaticWebAppV2Observation `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []CustomOidcV2Observation `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *FacebookV2Observation `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *GithubV2Observation `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *GoogleV2Observation `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *LoginObservation `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *MicrosoftV2Observation `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *TwitterV2Observation `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type AuthSettingsV2Parameters struct { + + // An active_directory_v2 block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectoryV2 *ActiveDirectoryV2Parameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + // +kubebuilder:validation:Optional + AppleV2 *AppleV2Parameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + // +kubebuilder:validation:Optional + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + // +kubebuilder:validation:Optional + AzureStaticWebAppV2 *AzureStaticWebAppV2Parameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + // +kubebuilder:validation:Optional + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + // +kubebuilder:validation:Optional + CustomOidcV2 []CustomOidcV2Parameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + // +kubebuilder:validation:Optional + FacebookV2 *FacebookV2Parameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + // +kubebuilder:validation:Optional + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + // +kubebuilder:validation:Optional + GithubV2 *GithubV2Parameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + // +kubebuilder:validation:Optional + GoogleV2 *GoogleV2Parameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + // +kubebuilder:validation:Optional + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + // +kubebuilder:validation:Optional + Login *LoginParameters `json:"login" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + // +kubebuilder:validation:Optional + MicrosoftV2 *MicrosoftV2Parameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + // +kubebuilder:validation:Optional + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + // +kubebuilder:validation:Optional + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + // +kubebuilder:validation:Optional + TwitterV2 *TwitterV2Parameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type AzureStaticWebAppV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type AzureStaticWebAppV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type AzureStaticWebAppV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` +} + +type BackupInitParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *ScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type BackupObservation struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *ScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type BackupParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A schedule block as defined below. + // +kubebuilder:validation:Optional + Schedule *ScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The SAS URL to the container. + // The SAS URL to the container. + // +kubebuilder:validation:Required + StorageAccountURLSecretRef v1.SecretKeySelector `json:"storageAccountUrlSecretRef" tf:"-"` +} + +type CustomOidcV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type CustomOidcV2Observation struct { + + // The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + // The endpoint to make the Authorisation Request. + AuthorisationEndpoint *string `json:"authorisationEndpoint,omitempty" tf:"authorisation_endpoint,omitempty"` + + // The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + // The endpoint that provides the keys necessary to validate the token. + CertificationURI *string `json:"certificationUri,omitempty" tf:"certification_uri,omitempty"` + + // The Client Credential Method used. + // The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + ClientCredentialMethod *string `json:"clientCredentialMethod,omitempty" tf:"client_credential_method,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the secret for this Custom OIDC Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + // The endpoint that issued the Token. + IssuerEndpoint *string `json:"issuerEndpoint,omitempty" tf:"issuer_endpoint,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + // The endpoint used to request a Token. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` +} + +type CustomOidcV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + // +kubebuilder:validation:Optional + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + // +kubebuilder:validation:Optional + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type DockerInitParameters struct { + + // The name of the Docker image to use. + // The name of the Docker image to use. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The image tag of the image to use. + // The image tag of the image to use. + ImageTag *string `json:"imageTag,omitempty" tf:"image_tag,omitempty"` + + // The URL of the docker registry. + // The URL of the docker registry. + RegistryURL *string `json:"registryUrl,omitempty" tf:"registry_url,omitempty"` +} + +type DockerObservation struct { + + // The name of the Docker image to use. + // The name of the Docker image to use. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The image tag of the image to use. + // The image tag of the image to use. + ImageTag *string `json:"imageTag,omitempty" tf:"image_tag,omitempty"` + + // The URL of the docker registry. + // The URL of the docker registry. + RegistryURL *string `json:"registryUrl,omitempty" tf:"registry_url,omitempty"` +} + +type DockerParameters struct { + + // The name of the Docker image to use. + // The name of the Docker image to use. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The image tag of the image to use. + // The image tag of the image to use. + // +kubebuilder:validation:Optional + ImageTag *string `json:"imageTag" tf:"image_tag,omitempty"` + + // The password for the account to use to connect to the registry. + // The password for the account to use to connect to the registry. + // +kubebuilder:validation:Optional + RegistryPasswordSecretRef *v1.SecretKeySelector `json:"registryPasswordSecretRef,omitempty" tf:"-"` + + // The URL of the docker registry. + // The URL of the docker registry. + // +kubebuilder:validation:Optional + RegistryURL *string `json:"registryUrl" tf:"registry_url,omitempty"` + + // The username to use for connections to the registry. + // The username to use for connections to the registry. + // +kubebuilder:validation:Optional + RegistryUsernameSecretRef *v1.SecretKeySelector `json:"registryUsernameSecretRef,omitempty" tf:"-"` +} + +type FacebookV2InitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type FacebookV2Observation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type FacebookV2Parameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + // +kubebuilder:validation:Optional + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type GithubInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type GithubObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type GithubParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type GithubV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type GithubV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type GithubV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type GoogleV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type GoogleV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type GoogleV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppAuthSettingsActiveDirectoryObservation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppAuthSettingsActiveDirectoryParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppAuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + // The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + // +kubebuilder:validation:Optional + AppSecretSecretRef *v1.SecretKeySelector `json:"appSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsInitParameters struct { + + // An active_directory block as defined above. + ActiveDirectory *LinuxFunctionAppAuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *LinuxFunctionAppAuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *GithubInitParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *LinuxFunctionAppAuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *LinuxFunctionAppAuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *LinuxFunctionAppAuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxFunctionAppAuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppAuthSettingsObservation struct { + + // An active_directory block as defined above. + ActiveDirectory *LinuxFunctionAppAuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *LinuxFunctionAppAuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *GithubObservation `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *LinuxFunctionAppAuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *LinuxFunctionAppAuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *LinuxFunctionAppAuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxFunctionAppAuthSettingsParameters struct { + + // An active_directory block as defined above. + // +kubebuilder:validation:Optional + ActiveDirectory *LinuxFunctionAppAuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A facebook block as defined below. + // +kubebuilder:validation:Optional + Facebook *LinuxFunctionAppAuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + // +kubebuilder:validation:Optional + Github *GithubParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + // +kubebuilder:validation:Optional + Google *LinuxFunctionAppAuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + // +kubebuilder:validation:Optional + Microsoft *LinuxFunctionAppAuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + // +kubebuilder:validation:Optional + Twitter *LinuxFunctionAppAuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxFunctionAppAuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppAuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppAuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + // +kubebuilder:validation:Optional + ConsumerSecretSecretRef *v1.SecretKeySelector `json:"consumerSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppConnectionStringInitParameters struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppConnectionStringObservation struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppConnectionStringParameters struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The connection string value. + // The connection string value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type LinuxFunctionAppIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Function App. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Function App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Function App. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Function App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Function App. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Function App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LinuxFunctionAppInitParameters struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + AuthSettings *LinuxFunctionAppAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *AuthSettingsV2InitParameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *BackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the function app use Client Certificates. + // Should the function app use Client Certificates + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []LinuxFunctionAppConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the settings for linking the Function App to storage be suppressed. + // Force disable the content share settings. + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Is the Function App enabled? Defaults to true. + // Is the Linux Function App enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~4. + // The runtime version associated with the Function App. + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + // Can the Function App only be accessed via HTTPS? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // A identity block as defined below. + Identity *LinuxFunctionAppIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Azure Region where the Linux Function App should exist. Changing this forces a new Linux Function App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for this Linux Function App. Changing this forces a new Linux Function App to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule and Host ID Collisions + // Specifies the name of the Function App. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Linux Function App should exist. Changing this forces a new Linux Function App to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the App Service Plan within which to create this Function App. + // The ID of the App Service Plan within which to create this Function App + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.ServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // Reference to a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDRef *v1.Reference `json:"servicePlanIdRef,omitempty" tf:"-"` + + // Selector for a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDSelector *v1.Selector `json:"servicePlanIdSelector,omitempty" tf:"-"` + + // A site_config block as defined below. + SiteConfig *LinuxFunctionAppSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + StickySettings *StickySettingsInitParameters `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []StorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The backend storage account name which will be used by this Function App. + // The backend storage account name which will be used by this Function App. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + // Should the Function App use its Managed Identity to access storage? + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Linux Function App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Function App. + // The local path and filename of the Zip packaged application to deploy to this Linux Function App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxFunctionAppObservation struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + AuthSettings *LinuxFunctionAppAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *AuthSettingsV2Observation `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *BackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the function app use Client Certificates. + // Should the function app use Client Certificates + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []LinuxFunctionAppConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the settings for linking the Function App to storage be suppressed. + // Force disable the content share settings. + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // The default hostname of the Linux Function App. + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Is the Function App enabled? Defaults to true. + // Is the Linux Function App enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~4. + // The runtime version associated with the Function App. + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + // Can the Function App only be accessed via HTTPS? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the App Service Environment used by Function App. + HostingEnvironmentID *string `json:"hostingEnvironmentId,omitempty" tf:"hosting_environment_id,omitempty"` + + // The ID of the Linux Function App. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A identity block as defined below. + Identity *LinuxFunctionAppIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Kind value for this Linux Function App. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The Azure Region where the Linux Function App should exist. Changing this forces a new Linux Function App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for this Linux Function App. Changing this forces a new Linux Function App to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule and Host ID Collisions + // Specifies the name of the Function App. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of outbound IP addresses. For example ["52.23.25.3", "52.143.43.12"] + OutboundIPAddressList []*string `json:"outboundIpAddressList,omitempty" tf:"outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12. + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A list of possible outbound IP addresses, not all of which are necessarily in use. This is a superset of outbound_ip_address_list. For example ["52.23.25.3", "52.143.43.12"]. + PossibleOutboundIPAddressList []*string `json:"possibleOutboundIpAddressList,omitempty" tf:"possible_outbound_ip_address_list,omitempty"` + + // A comma separated list of possible outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12,52.143.43.17. This is a superset of outbound_ip_addresses. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Linux Function App should exist. Changing this forces a new Linux Function App to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The ID of the App Service Plan within which to create this Function App. + // The ID of the App Service Plan within which to create this Function App + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + SiteConfig *LinuxFunctionAppSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + StickySettings *StickySettingsObservation `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []StorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The backend storage account name which will be used by this Function App. + // The backend storage account name which will be used by this Function App. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + // Should the Function App use its Managed Identity to access storage? + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Linux Function App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Function App. + // The local path and filename of the Zip packaged application to deploy to this Linux Function App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxFunctionAppParameters struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + // +kubebuilder:validation:Optional + AuthSettings *LinuxFunctionAppAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + // +kubebuilder:validation:Optional + AuthSettingsV2 *AuthSettingsV2Parameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + // +kubebuilder:validation:Optional + Backup *BackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + // +kubebuilder:validation:Optional + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the function app use Client Certificates. + // Should the function app use Client Certificates + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + // +kubebuilder:validation:Optional + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + // +kubebuilder:validation:Optional + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + // +kubebuilder:validation:Optional + ConnectionString []LinuxFunctionAppConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the settings for linking the Function App to storage be suppressed. + // Force disable the content share settings. + // +kubebuilder:validation:Optional + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + // +kubebuilder:validation:Optional + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Is the Function App enabled? Defaults to true. + // Is the Linux Function App enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + // +kubebuilder:validation:Optional + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~4. + // The runtime version associated with the Function App. + // +kubebuilder:validation:Optional + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App only be accessed via HTTPS? Defaults to false. + // Can the Function App only be accessed via HTTPS? + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // A identity block as defined below. + // +kubebuilder:validation:Optional + Identity *LinuxFunctionAppIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Azure Region where the Linux Function App should exist. Changing this forces a new Linux Function App to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name which should be used for this Linux Function App. Changing this forces a new Linux Function App to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule and Host ID Collisions + // Specifies the name of the Function App. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Linux Function App should exist. Changing this forces a new Linux Function App to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the App Service Plan within which to create this Function App. + // The ID of the App Service Plan within which to create this Function App + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.ServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // Reference to a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDRef *v1.Reference `json:"servicePlanIdRef,omitempty" tf:"-"` + + // Selector for a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDSelector *v1.Selector `json:"servicePlanIdSelector,omitempty" tf:"-"` + + // A site_config block as defined below. + // +kubebuilder:validation:Optional + SiteConfig *LinuxFunctionAppSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + // +kubebuilder:validation:Optional + StickySettings *StickySettingsParameters `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []StorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The access key which will be used to access the backend storage account for the Function App. Conflicts with storage_uses_managed_identity. + // The access key which will be used to access the storage account for the Function App. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // The backend storage account name which will be used by this Function App. + // The backend storage account name which will be used by this Function App. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + // +kubebuilder:validation:Optional + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + // Should the Function App use its Managed Identity to access storage? + // +kubebuilder:validation:Optional + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Linux Function App. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Function App. + // The local path and filename of the Zip packaged application to deploy to this Linux Function App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + // +kubebuilder:validation:Optional + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxFunctionAppSiteConfigCorsInitParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxFunctionAppSiteConfigCorsObservation struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxFunctionAppSiteConfigCorsParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxFunctionAppSiteConfigIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []SiteConfigIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxFunctionAppSiteConfigIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []SiteConfigIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type LinuxFunctionAppSiteConfigIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []SiteConfigIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxFunctionAppSiteConfigInitParameters struct { + + // The URL of the API definition that describes this Linux Function App. + // The URL of the API definition that describes this Linux Function App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Linux Function App. + // The ID of the API Management API for this Linux Function App. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to false. + // If this Linux Web App is Always On enabled. Defaults to `false`. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // An app_service_logs block as defined above. + AppServiceLogs *AppServiceLogsInitParameters `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // An application_stack block as defined above. + ApplicationStack *ApplicationStackInitParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *LinuxFunctionAppSiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + // Specifies a list of Default Documents for the Linux Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []LinuxFunctionAppSiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022“ + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Linux Function App ip_restriction configuration be used for the SCM also. + // Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker process. Defaults to false. + // Should the Linux Web App use a 32-bit worker. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux Function App. + // The number of Workers for this Linux Function App. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxFunctionAppSiteConfigObservation struct { + + // The URL of the API definition that describes this Linux Function App. + // The URL of the API definition that describes this Linux Function App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Linux Function App. + // The ID of the API Management API for this Linux Function App. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to false. + // If this Linux Web App is Always On enabled. Defaults to `false`. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // An app_service_logs block as defined above. + AppServiceLogs *AppServiceLogsObservation `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // An application_stack block as defined above. + ApplicationStack *ApplicationStackObservation `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *LinuxFunctionAppSiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + // Specifies a list of Default Documents for the Linux Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // Is the Function App enabled? Defaults to true. + // Is detailed error logging enabled + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty" tf:"detailed_error_logging_enabled,omitempty"` + + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []LinuxFunctionAppSiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Linux FX Version + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022“ + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []LinuxFunctionAppSiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // The SCM Type in use by the Linux Function App. + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // Should the Linux Function App ip_restriction configuration be used for the SCM also. + // Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker process. Defaults to false. + // Should the Linux Web App use a 32-bit worker. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux Function App. + // The number of Workers for this Linux Function App. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxFunctionAppSiteConfigParameters struct { + + // The URL of the API definition that describes this Linux Function App. + // The URL of the API definition that describes this Linux Function App. + // +kubebuilder:validation:Optional + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Linux Function App. + // The ID of the API Management API for this Linux Function App. + // +kubebuilder:validation:Optional + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to false. + // If this Linux Web App is Always On enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + // +kubebuilder:validation:Optional + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // +kubebuilder:validation:Optional + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // An app_service_logs block as defined above. + // +kubebuilder:validation:Optional + AppServiceLogs *AppServiceLogsParameters `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // The Connection String for linking the Linux Function App to Application Insights. + // The Connection String for linking the Linux Function App to Application Insights. + // +kubebuilder:validation:Optional + ApplicationInsightsConnectionStringSecretRef *v1.SecretKeySelector `json:"applicationInsightsConnectionStringSecretRef,omitempty" tf:"-"` + + // The Instrumentation Key for connecting the Linux Function App to Application Insights. + // The Instrumentation Key for connecting the Linux Function App to Application Insights. + // +kubebuilder:validation:Optional + ApplicationInsightsKeySecretRef *v1.SecretKeySelector `json:"applicationInsightsKeySecretRef,omitempty" tf:"-"` + + // An application_stack block as defined above. + // +kubebuilder:validation:Optional + ApplicationStack *ApplicationStackParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // +kubebuilder:validation:Optional + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // Should connections for Azure Container Registry use Managed Identity. + // +kubebuilder:validation:Optional + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + // +kubebuilder:validation:Optional + Cors *LinuxFunctionAppSiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + // Specifies a list of Default Documents for the Linux Web App. + // +kubebuilder:validation:Optional + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + // +kubebuilder:validation:Optional + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + // +kubebuilder:validation:Optional + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + IPRestriction []LinuxFunctionAppSiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + // +kubebuilder:validation:Optional + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + // +kubebuilder:validation:Optional + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // +kubebuilder:validation:Optional + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022“ + // +kubebuilder:validation:Optional + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + // +kubebuilder:validation:Optional + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + ScmIPRestriction []LinuxFunctionAppSiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + // +kubebuilder:validation:Optional + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Linux Function App ip_restriction configuration be used for the SCM also. + // Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker process. Defaults to false. + // Should the Linux Web App use a 32-bit worker. + // +kubebuilder:validation:Optional + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux Function App. + // The number of Workers for this Linux Function App. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxFunctionAppSiteConfigScmIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxFunctionAppSiteConfigScmIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxFunctionAppSiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type LinuxFunctionAppSiteConfigScmIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []LinuxFunctionAppSiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxFunctionAppSiteCredentialInitParameters struct { +} + +type LinuxFunctionAppSiteCredentialObservation struct { + + // The Site Credentials Username used for publishing. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Site Credentials Password used for publishing. + Password *string `json:"password,omitempty" tf:"password,omitempty"` +} + +type LinuxFunctionAppSiteCredentialParameters struct { +} + +type LoginInitParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type LoginObservation struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type LoginParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + // +kubebuilder:validation:Optional + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + // +kubebuilder:validation:Optional + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + // +kubebuilder:validation:Optional + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + // +kubebuilder:validation:Optional + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + // +kubebuilder:validation:Optional + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // +kubebuilder:validation:Optional + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + // +kubebuilder:validation:Optional + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type MicrosoftV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type MicrosoftV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type MicrosoftV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type ScheduleInitParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type ScheduleObservation struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // The time the backup was last attempted. + LastExecutionTime *string `json:"lastExecutionTime,omitempty" tf:"last_execution_time,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type ScheduleParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + // +kubebuilder:validation:Optional + FrequencyInterval *float64 `json:"frequencyInterval" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + // +kubebuilder:validation:Optional + FrequencyUnit *string `json:"frequencyUnit" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + // +kubebuilder:validation:Optional + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type SiteConfigIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type SiteConfigIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type SiteConfigIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type StickySettingsInitParameters struct { + + // A list of app_setting names that the Linux Function App will not swap between Slots when a swap operation is triggered. + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Linux Function App will not swap between Slots when a swap operation is triggered. + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type StickySettingsObservation struct { + + // A list of app_setting names that the Linux Function App will not swap between Slots when a swap operation is triggered. + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Linux Function App will not swap between Slots when a swap operation is triggered. + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type StickySettingsParameters struct { + + // A list of app_setting names that the Linux Function App will not swap between Slots when a swap operation is triggered. + // +kubebuilder:validation:Optional + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Linux Function App will not swap between Slots when a swap operation is triggered. + // +kubebuilder:validation:Optional + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type StorageAccountInitParameters struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StorageAccountObservation struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StorageAccountParameters struct { + + // The Access key for the storage account. + // +kubebuilder:validation:Required + AccessKeySecretRef v1.SecretKeySelector `json:"accessKeySecretRef" tf:"-"` + + // The Name of the Storage Account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type TwitterV2InitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type TwitterV2Observation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type TwitterV2Parameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName" tf:"consumer_secret_setting_name,omitempty"` +} + +// LinuxFunctionAppSpec defines the desired state of LinuxFunctionApp +type LinuxFunctionAppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinuxFunctionAppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinuxFunctionAppInitParameters `json:"initProvider,omitempty"` +} + +// LinuxFunctionAppStatus defines the observed state of LinuxFunctionApp. +type LinuxFunctionAppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinuxFunctionAppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinuxFunctionApp is the Schema for the LinuxFunctionApps API. Manages a Linux Function App. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinuxFunctionApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.siteConfig) || (has(self.initProvider) && has(self.initProvider.siteConfig))",message="spec.forProvider.siteConfig is a required parameter" + Spec LinuxFunctionAppSpec `json:"spec"` + Status LinuxFunctionAppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinuxFunctionAppList contains a list of LinuxFunctionApps +type LinuxFunctionAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinuxFunctionApp `json:"items"` +} + +// Repository type metadata. +var ( + LinuxFunctionApp_Kind = "LinuxFunctionApp" + LinuxFunctionApp_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinuxFunctionApp_Kind}.String() + LinuxFunctionApp_KindAPIVersion = LinuxFunctionApp_Kind + "." + CRDGroupVersion.String() + LinuxFunctionApp_GroupVersionKind = CRDGroupVersion.WithKind(LinuxFunctionApp_Kind) +) + +func init() { + SchemeBuilder.Register(&LinuxFunctionApp{}, &LinuxFunctionAppList{}) +} diff --git a/apis/web/v1beta2/zz_linuxfunctionappslot_terraformed.go b/apis/web/v1beta2/zz_linuxfunctionappslot_terraformed.go new file mode 100755 index 000000000..240103f46 --- /dev/null +++ b/apis/web/v1beta2/zz_linuxfunctionappslot_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinuxFunctionAppSlot +func (mg *LinuxFunctionAppSlot) GetTerraformResourceType() string { + return "azurerm_linux_function_app_slot" +} + +// GetConnectionDetailsMapping for this LinuxFunctionAppSlot +func (tr *LinuxFunctionAppSlot) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].github[*].client_secret": "spec.forProvider.authSettings[*].github[*].clientSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "backup[*].storage_account_url": "spec.forProvider.backup[*].storageAccountUrlSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "site_config[*].application_insights_connection_string": "spec.forProvider.siteConfig[*].applicationInsightsConnectionStringSecretRef", "site_config[*].application_insights_key": "spec.forProvider.siteConfig[*].applicationInsightsKeySecretRef", "site_config[*].application_stack[*].docker[*].registry_password": "spec.forProvider.siteConfig[*].applicationStack[*].docker[*].registryPasswordSecretRef", "site_config[*].application_stack[*].docker[*].registry_username": "spec.forProvider.siteConfig[*].applicationStack[*].docker[*].registryUsernameSecretRef", "site_credential[*]": "status.atProvider.siteCredential[*]", "storage_account[*].access_key": "spec.forProvider.storageAccount[*].accessKeySecretRef", "storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef"} +} + +// GetObservation of this LinuxFunctionAppSlot +func (tr *LinuxFunctionAppSlot) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinuxFunctionAppSlot +func (tr *LinuxFunctionAppSlot) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinuxFunctionAppSlot +func (tr *LinuxFunctionAppSlot) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinuxFunctionAppSlot +func (tr *LinuxFunctionAppSlot) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinuxFunctionAppSlot +func (tr *LinuxFunctionAppSlot) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinuxFunctionAppSlot +func (tr *LinuxFunctionAppSlot) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinuxFunctionAppSlot +func (tr *LinuxFunctionAppSlot) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinuxFunctionAppSlot using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinuxFunctionAppSlot) LateInitialize(attrs []byte) (bool, error) { + params := &LinuxFunctionAppSlotParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinuxFunctionAppSlot) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_linuxfunctionappslot_types.go b/apis/web/v1beta2/zz_linuxfunctionappslot_types.go new file mode 100755 index 000000000..6b737f3e5 --- /dev/null +++ b/apis/web/v1beta2/zz_linuxfunctionappslot_types.go @@ -0,0 +1,3323 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationStackDockerInitParameters struct { + + // The name of the Docker image to use. + // The name of the Docker image to use. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The image tag of the image to use. + // The image tag of the image to use. + ImageTag *string `json:"imageTag,omitempty" tf:"image_tag,omitempty"` + + // The URL of the docker registry. + // The URL of the docker registry. + RegistryURL *string `json:"registryUrl,omitempty" tf:"registry_url,omitempty"` +} + +type ApplicationStackDockerObservation struct { + + // The name of the Docker image to use. + // The name of the Docker image to use. + ImageName *string `json:"imageName,omitempty" tf:"image_name,omitempty"` + + // The image tag of the image to use. + // The image tag of the image to use. + ImageTag *string `json:"imageTag,omitempty" tf:"image_tag,omitempty"` + + // The URL of the docker registry. + // The URL of the docker registry. + RegistryURL *string `json:"registryUrl,omitempty" tf:"registry_url,omitempty"` +} + +type ApplicationStackDockerParameters struct { + + // The name of the Docker image to use. + // The name of the Docker image to use. + // +kubebuilder:validation:Optional + ImageName *string `json:"imageName" tf:"image_name,omitempty"` + + // The image tag of the image to use. + // The image tag of the image to use. + // +kubebuilder:validation:Optional + ImageTag *string `json:"imageTag" tf:"image_tag,omitempty"` + + // The password for the account to use to connect to the registry. + // The password for the account to use to connect to the registry. + // +kubebuilder:validation:Optional + RegistryPasswordSecretRef *v1.SecretKeySelector `json:"registryPasswordSecretRef,omitempty" tf:"-"` + + // The URL of the docker registry. + // The URL of the docker registry. + // +kubebuilder:validation:Optional + RegistryURL *string `json:"registryUrl" tf:"registry_url,omitempty"` + + // The username to use for connections to the registry. + // The username to use for connections to the registry. + // +kubebuilder:validation:Optional + RegistryUsernameSecretRef *v1.SecretKeySelector `json:"registryUsernameSecretRef,omitempty" tf:"-"` +} + +type AuthSettingsGithubInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsGithubObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsGithubParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type AuthSettingsV2ActiveDirectoryV2InitParameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type AuthSettingsV2ActiveDirectoryV2Observation struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type AuthSettingsV2ActiveDirectoryV2Parameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + // +kubebuilder:validation:Optional + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + // +kubebuilder:validation:Optional + TenantAuthEndpoint *string `json:"tenantAuthEndpoint" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + // +kubebuilder:validation:Optional + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type AuthSettingsV2AppleV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type AuthSettingsV2AppleV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2AppleV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` +} + +type AuthSettingsV2AzureStaticWebAppV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type AuthSettingsV2AzureStaticWebAppV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type AuthSettingsV2AzureStaticWebAppV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` +} + +type AuthSettingsV2CustomOidcV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type AuthSettingsV2CustomOidcV2Observation struct { + + // The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + // The endpoint to make the Authorisation Request. + AuthorisationEndpoint *string `json:"authorisationEndpoint,omitempty" tf:"authorisation_endpoint,omitempty"` + + // The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + // The endpoint that provides the keys necessary to validate the token. + CertificationURI *string `json:"certificationUri,omitempty" tf:"certification_uri,omitempty"` + + // The Client Credential Method used. + // The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + ClientCredentialMethod *string `json:"clientCredentialMethod,omitempty" tf:"client_credential_method,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the secret for this Custom OIDC Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + // The endpoint that issued the Token. + IssuerEndpoint *string `json:"issuerEndpoint,omitempty" tf:"issuer_endpoint,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + // The endpoint used to request a Token. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` +} + +type AuthSettingsV2CustomOidcV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + // +kubebuilder:validation:Optional + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + // +kubebuilder:validation:Optional + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type AuthSettingsV2FacebookV2InitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2FacebookV2Observation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2FacebookV2Parameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + // +kubebuilder:validation:Optional + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2GithubV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2GithubV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2GithubV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2GoogleV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2GoogleV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2GoogleV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2LoginInitParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type AuthSettingsV2LoginObservation struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type AuthSettingsV2LoginParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + // +kubebuilder:validation:Optional + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + // +kubebuilder:validation:Optional + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + // +kubebuilder:validation:Optional + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + // +kubebuilder:validation:Optional + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + // +kubebuilder:validation:Optional + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // +kubebuilder:validation:Optional + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + // +kubebuilder:validation:Optional + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type AuthSettingsV2MicrosoftV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2MicrosoftV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2MicrosoftV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type AuthSettingsV2TwitterV2InitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type AuthSettingsV2TwitterV2Observation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type AuthSettingsV2TwitterV2Parameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName" tf:"consumer_secret_setting_name,omitempty"` +} + +type BackupScheduleInitParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type BackupScheduleObservation struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // The time the backup was last attempted. + // The time the backup was last attempted. + LastExecutionTime *string `json:"lastExecutionTime,omitempty" tf:"last_execution_time,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type BackupScheduleParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + // +kubebuilder:validation:Optional + FrequencyInterval *float64 `json:"frequencyInterval" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + // +kubebuilder:validation:Optional + FrequencyUnit *string `json:"frequencyUnit" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + // +kubebuilder:validation:Optional + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + // The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + // +kubebuilder:validation:Optional + AppSecretSecretRef *v1.SecretKeySelector `json:"appSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsInitParameters struct { + + // an active_directory block as detailed below. + ActiveDirectory *LinuxFunctionAppSlotAuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // a facebook block as detailed below. + Facebook *LinuxFunctionAppSlotAuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // a github block as detailed below. + Github *AuthSettingsGithubInitParameters `json:"github,omitempty" tf:"github,omitempty"` + + // a google block as detailed below. + Google *LinuxFunctionAppSlotAuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // a microsoft block as detailed below. + Microsoft *LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // a twitter block as detailed below. + Twitter *LinuxFunctionAppSlotAuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsObservation struct { + + // an active_directory block as detailed below. + ActiveDirectory *LinuxFunctionAppSlotAuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // a facebook block as detailed below. + Facebook *LinuxFunctionAppSlotAuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // a github block as detailed below. + Github *AuthSettingsGithubObservation `json:"github,omitempty" tf:"github,omitempty"` + + // a google block as detailed below. + Google *LinuxFunctionAppSlotAuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // a microsoft block as detailed below. + Microsoft *LinuxFunctionAppSlotAuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // a twitter block as detailed below. + Twitter *LinuxFunctionAppSlotAuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsParameters struct { + + // an active_directory block as detailed below. + // +kubebuilder:validation:Optional + ActiveDirectory *LinuxFunctionAppSlotAuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled? + // Should the Authentication / Authorization feature be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // a facebook block as detailed below. + // +kubebuilder:validation:Optional + Facebook *LinuxFunctionAppSlotAuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // a github block as detailed below. + // +kubebuilder:validation:Optional + Github *AuthSettingsGithubParameters `json:"github,omitempty" tf:"github,omitempty"` + + // a google block as detailed below. + // +kubebuilder:validation:Optional + Google *LinuxFunctionAppSlotAuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // a microsoft block as detailed below. + // +kubebuilder:validation:Optional + Microsoft *LinuxFunctionAppSlotAuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use. + // The RuntimeVersion of the Authentication / Authorization feature in use. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // a twitter block as detailed below. + // +kubebuilder:validation:Optional + Twitter *LinuxFunctionAppSlotAuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + // +kubebuilder:validation:Optional + ConsumerSecretSecretRef *v1.SecretKeySelector `json:"consumerSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsV2InitParameters struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *AuthSettingsV2ActiveDirectoryV2InitParameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *AuthSettingsV2AppleV2InitParameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *AuthSettingsV2AzureStaticWebAppV2InitParameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []AuthSettingsV2CustomOidcV2InitParameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *AuthSettingsV2FacebookV2InitParameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *AuthSettingsV2GithubV2InitParameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *AuthSettingsV2GoogleV2InitParameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *AuthSettingsV2LoginInitParameters `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *AuthSettingsV2MicrosoftV2InitParameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *AuthSettingsV2TwitterV2InitParameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsV2Observation struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *AuthSettingsV2ActiveDirectoryV2Observation `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *AuthSettingsV2AppleV2Observation `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *AuthSettingsV2AzureStaticWebAppV2Observation `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []AuthSettingsV2CustomOidcV2Observation `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *AuthSettingsV2FacebookV2Observation `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *AuthSettingsV2GithubV2Observation `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *AuthSettingsV2GoogleV2Observation `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *AuthSettingsV2LoginObservation `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *AuthSettingsV2MicrosoftV2Observation `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *AuthSettingsV2TwitterV2Observation `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxFunctionAppSlotAuthSettingsV2Parameters struct { + + // An active_directory_v2 block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectoryV2 *AuthSettingsV2ActiveDirectoryV2Parameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + // +kubebuilder:validation:Optional + AppleV2 *AuthSettingsV2AppleV2Parameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + // +kubebuilder:validation:Optional + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + // +kubebuilder:validation:Optional + AzureStaticWebAppV2 *AuthSettingsV2AzureStaticWebAppV2Parameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + // +kubebuilder:validation:Optional + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + // +kubebuilder:validation:Optional + CustomOidcV2 []AuthSettingsV2CustomOidcV2Parameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + // +kubebuilder:validation:Optional + FacebookV2 *AuthSettingsV2FacebookV2Parameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + // +kubebuilder:validation:Optional + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + // +kubebuilder:validation:Optional + GithubV2 *AuthSettingsV2GithubV2Parameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + // +kubebuilder:validation:Optional + GoogleV2 *AuthSettingsV2GoogleV2Parameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + // +kubebuilder:validation:Optional + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + // +kubebuilder:validation:Optional + Login *AuthSettingsV2LoginParameters `json:"login" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + // +kubebuilder:validation:Optional + MicrosoftV2 *AuthSettingsV2MicrosoftV2Parameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + // +kubebuilder:validation:Optional + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + // +kubebuilder:validation:Optional + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + // +kubebuilder:validation:Optional + TwitterV2 *AuthSettingsV2TwitterV2Parameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxFunctionAppSlotBackupInitParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // a schedule block as detailed below. + Schedule *BackupScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type LinuxFunctionAppSlotBackupObservation struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // a schedule block as detailed below. + Schedule *BackupScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type LinuxFunctionAppSlotBackupParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // a schedule block as detailed below. + // +kubebuilder:validation:Optional + Schedule *BackupScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The SAS URL to the container. + // The SAS URL to the container. + // +kubebuilder:validation:Required + StorageAccountURLSecretRef v1.SecretKeySelector `json:"storageAccountUrlSecretRef" tf:"-"` +} + +type LinuxFunctionAppSlotConnectionStringInitParameters struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppSlotConnectionStringObservation struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppSlotConnectionStringParameters struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The connection string value. + // The connection string value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type LinuxFunctionAppSlotIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Function App Slot. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Function App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppSlotIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Function App Slot. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Function App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppSlotIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Function App Slot. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Function App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LinuxFunctionAppSlotInitParameters struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // an auth_settings block as detailed below. + AuthSettings *LinuxFunctionAppSlotAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // an auth_settings_v2 block as detailed below. + AuthSettingsV2 *LinuxFunctionAppSlotAuthSettingsV2InitParameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // a backup block as detailed below. + Backup *LinuxFunctionAppSlotBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the Function App Slot use Client Certificates. + // Should the Function App Slot use Client Certificates. + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // a connection_string block as detailed below. + ConnectionString []LinuxFunctionAppSlotConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Force disable the content share settings. + // Force disable the content share settings. + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Is the Linux Function App Slot enabled. Defaults to true. + // Is the Linux Function App Slot enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Are the default FTP Basic Authentication publishing credentials enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The runtime version associated with the Function App Slot. Defaults to ~4. + // The runtime version associated with the Function App Slot. + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + // Can the Function App Slot only be accessed via HTTPS? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as detailed below. + Identity *LinuxFunctionAppSlotIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Linux Function App will be used. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // a site_config block as detailed below. + SiteConfig *LinuxFunctionAppSlotSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []LinuxFunctionAppSlotStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The backend storage account name which will be used by this Function App Slot. + // The backend storage account name which will be used by this Function App Slot. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App Slot use its Managed Identity to access storage. + // Should the Function App Slot use its Managed Identity to access storage? + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Linux Function App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` +} + +type LinuxFunctionAppSlotObservation struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // an auth_settings block as detailed below. + AuthSettings *LinuxFunctionAppSlotAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // an auth_settings_v2 block as detailed below. + AuthSettingsV2 *LinuxFunctionAppSlotAuthSettingsV2Observation `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // a backup block as detailed below. + Backup *LinuxFunctionAppSlotBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the Function App Slot use Client Certificates. + // Should the Function App Slot use Client Certificates. + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // a connection_string block as detailed below. + ConnectionString []LinuxFunctionAppSlotConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Force disable the content share settings. + // Force disable the content share settings. + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // The default hostname of the Linux Function App Slot. + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Is the Linux Function App Slot enabled. Defaults to true. + // Is the Linux Function App Slot enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Are the default FTP Basic Authentication publishing credentials enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The ID of the Linux Function App this Slot is a member of. Changing this forces a new resource to be created. + // The ID of the Linux Function App this Slot is a member of. + FunctionAppID *string `json:"functionAppId,omitempty" tf:"function_app_id,omitempty"` + + // The runtime version associated with the Function App Slot. Defaults to ~4. + // The runtime version associated with the Function App Slot. + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + // Can the Function App Slot only be accessed via HTTPS? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the App Service Environment used by Function App Slot. + HostingEnvironmentID *string `json:"hostingEnvironmentId,omitempty" tf:"hosting_environment_id,omitempty"` + + // The ID of the Linux Function App Slot + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as detailed below. + Identity *LinuxFunctionAppSlotIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Kind value for this Linux Function App Slot. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // A list of outbound IP addresses. For example ["52.23.25.3", "52.143.43.12"] + OutboundIPAddressList []*string `json:"outboundIpAddressList,omitempty" tf:"outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12. + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A list of possible outbound IP addresses, not all of which are necessarily in use. This is a superset of outbound_ip_address_list. For example ["52.23.25.3", "52.143.43.12"]. + PossibleOutboundIPAddressList []*string `json:"possibleOutboundIpAddressList,omitempty" tf:"possible_outbound_ip_address_list,omitempty"` + + // A comma separated list of possible outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12,52.143.43.17. This is a superset of outbound_ip_addresses. For example ["52.23.25.3", "52.143.43.12","52.143.43.17"]. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Linux Function App will be used. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // a site_config block as detailed below. + SiteConfig *LinuxFunctionAppSlotSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []LinuxFunctionAppSlotStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The backend storage account name which will be used by this Function App Slot. + // The backend storage account name which will be used by this Function App Slot. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App Slot use its Managed Identity to access storage. + // Should the Function App Slot use its Managed Identity to access storage? + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Linux Function App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` +} + +type LinuxFunctionAppSlotParameters struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // an auth_settings block as detailed below. + // +kubebuilder:validation:Optional + AuthSettings *LinuxFunctionAppSlotAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // an auth_settings_v2 block as detailed below. + // +kubebuilder:validation:Optional + AuthSettingsV2 *LinuxFunctionAppSlotAuthSettingsV2Parameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // a backup block as detailed below. + // +kubebuilder:validation:Optional + Backup *LinuxFunctionAppSlotBackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + // +kubebuilder:validation:Optional + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the Function App Slot use Client Certificates. + // Should the Function App Slot use Client Certificates. + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + // +kubebuilder:validation:Optional + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + // +kubebuilder:validation:Optional + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // a connection_string block as detailed below. + // +kubebuilder:validation:Optional + ConnectionString []LinuxFunctionAppSlotConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Force disable the content share settings. + // Force disable the content share settings. + // +kubebuilder:validation:Optional + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + // +kubebuilder:validation:Optional + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Is the Linux Function App Slot enabled. Defaults to true. + // Is the Linux Function App Slot enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Are the default FTP Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The ID of the Linux Function App this Slot is a member of. Changing this forces a new resource to be created. + // The ID of the Linux Function App this Slot is a member of. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.LinuxFunctionApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + FunctionAppID *string `json:"functionAppId,omitempty" tf:"function_app_id,omitempty"` + + // Reference to a LinuxFunctionApp in web to populate functionAppId. + // +kubebuilder:validation:Optional + FunctionAppIDRef *v1.Reference `json:"functionAppIdRef,omitempty" tf:"-"` + + // Selector for a LinuxFunctionApp in web to populate functionAppId. + // +kubebuilder:validation:Optional + FunctionAppIDSelector *v1.Selector `json:"functionAppIdSelector,omitempty" tf:"-"` + + // The runtime version associated with the Function App Slot. Defaults to ~4. + // The runtime version associated with the Function App Slot. + // +kubebuilder:validation:Optional + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + // Can the Function App Slot only be accessed via HTTPS? + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as detailed below. + // +kubebuilder:validation:Optional + Identity *LinuxFunctionAppSlotIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Linux Function App will be used. + // +kubebuilder:validation:Optional + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // a site_config block as detailed below. + // +kubebuilder:validation:Optional + SiteConfig *LinuxFunctionAppSlotSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []LinuxFunctionAppSlotStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The access key which will be used to access the storage account for the Function App Slot. + // The access key which will be used to access the storage account for the Function App Slot. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // The backend storage account name which will be used by this Function App Slot. + // The backend storage account name which will be used by this Function App Slot. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + // +kubebuilder:validation:Optional + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App Slot use its Managed Identity to access storage. + // Should the Function App Slot use its Managed Identity to access storage? + // +kubebuilder:validation:Optional + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Linux Function App. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigCorsInitParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigCorsObservation struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigCorsParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + Headers []LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxFunctionAppSlotSiteConfigIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + Headers []LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + // +kubebuilder:validation:Optional + Headers []LinuxFunctionAppSlotSiteConfigIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxFunctionAppSlotSiteConfigInitParameters struct { + + // The URL of the API definition that describes this Linux Function App. + // The URL of the API definition that describes this Linux Function App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Linux Function App. + // The ID of the API Management API for this Linux Function App. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to false. + // If this Linux Web App is Always On enabled. Defaults to `false`. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The program and any arguments used to launch this app via the command line. (Example node myapp.js). + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // an app_service_logs block as detailed below. + AppServiceLogs *SiteConfigAppServiceLogsInitParameters `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // an application_stack block as detailed below. + ApplicationStack *SiteConfigApplicationStackInitParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The name of the slot to automatically swap with when this slot is successfully deployed. + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // a cors block as detailed below. + Cors *LinuxFunctionAppSlotSiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + // Specifies a list of Default Documents for the Linux Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // an ip_restriction block as detailed below. + IPRestriction []LinuxFunctionAppSlotSiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Functions Runtime Scale Monitoring be enabled. + // Should Functions Runtime Scale Monitoring be enabled. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // a scm_ip_restriction block as detailed below. + ScmIPRestriction []LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Linux Function App ip_restriction configuration be used for the SCM also. + // Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker. + // Should the Linux Web App use a 32-bit worker. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux Function App. + // The number of Workers for this Linux Function App. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigObservation struct { + + // The URL of the API definition that describes this Linux Function App. + // The URL of the API definition that describes this Linux Function App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Linux Function App. + // The ID of the API Management API for this Linux Function App. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to false. + // If this Linux Web App is Always On enabled. Defaults to `false`. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The program and any arguments used to launch this app via the command line. (Example node myapp.js). + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // an app_service_logs block as detailed below. + AppServiceLogs *SiteConfigAppServiceLogsObservation `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // an application_stack block as detailed below. + ApplicationStack *SiteConfigApplicationStackObservation `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The name of the slot to automatically swap with when this slot is successfully deployed. + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // a cors block as detailed below. + Cors *LinuxFunctionAppSlotSiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + // Specifies a list of Default Documents for the Linux Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // Is detailed error logging enabled + // Is detailed error logging enabled + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty" tf:"detailed_error_logging_enabled,omitempty"` + + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // an ip_restriction block as detailed below. + IPRestriction []LinuxFunctionAppSlotSiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Linux FX Version + // The Linux FX Version + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Functions Runtime Scale Monitoring be enabled. + // Should Functions Runtime Scale Monitoring be enabled. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // a scm_ip_restriction block as detailed below. + ScmIPRestriction []LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // The SCM Type in use by the Linux Function App. + // The SCM Type in use by the Linux Function App. + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // Should the Linux Function App ip_restriction configuration be used for the SCM also. + // Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker. + // Should the Linux Web App use a 32-bit worker. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux Function App. + // The number of Workers for this Linux Function App. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigParameters struct { + + // The URL of the API definition that describes this Linux Function App. + // The URL of the API definition that describes this Linux Function App. + // +kubebuilder:validation:Optional + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Linux Function App. + // The ID of the API Management API for this Linux Function App. + // +kubebuilder:validation:Optional + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to false. + // If this Linux Web App is Always On enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The program and any arguments used to launch this app via the command line. (Example node myapp.js). + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + // +kubebuilder:validation:Optional + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // +kubebuilder:validation:Optional + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // an app_service_logs block as detailed below. + // +kubebuilder:validation:Optional + AppServiceLogs *SiteConfigAppServiceLogsParameters `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // The Connection String for linking the Linux Function App to Application Insights. + // The Connection String for linking the Linux Function App to Application Insights. + // +kubebuilder:validation:Optional + ApplicationInsightsConnectionStringSecretRef *v1.SecretKeySelector `json:"applicationInsightsConnectionStringSecretRef,omitempty" tf:"-"` + + // The Instrumentation Key for connecting the Linux Function App to Application Insights. + // The Instrumentation Key for connecting the Linux Function App to Application Insights. + // +kubebuilder:validation:Optional + ApplicationInsightsKeySecretRef *v1.SecretKeySelector `json:"applicationInsightsKeySecretRef,omitempty" tf:"-"` + + // an application_stack block as detailed below. + // +kubebuilder:validation:Optional + ApplicationStack *SiteConfigApplicationStackParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The name of the slot to automatically swap with when this slot is successfully deployed. + // +kubebuilder:validation:Optional + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // +kubebuilder:validation:Optional + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // Should connections for Azure Container Registry use Managed Identity. + // +kubebuilder:validation:Optional + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // a cors block as detailed below. + // +kubebuilder:validation:Optional + Cors *LinuxFunctionAppSlotSiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + // Specifies a list of Default Documents for the Linux Web App. + // +kubebuilder:validation:Optional + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + // +kubebuilder:validation:Optional + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + // +kubebuilder:validation:Optional + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // an ip_restriction block as detailed below. + // +kubebuilder:validation:Optional + IPRestriction []LinuxFunctionAppSlotSiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + // +kubebuilder:validation:Optional + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + // +kubebuilder:validation:Optional + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // +kubebuilder:validation:Optional + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + // +kubebuilder:validation:Optional + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Functions Runtime Scale Monitoring be enabled. + // Should Functions Runtime Scale Monitoring be enabled. + // +kubebuilder:validation:Optional + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // a scm_ip_restriction block as detailed below. + // +kubebuilder:validation:Optional + ScmIPRestriction []LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + // +kubebuilder:validation:Optional + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Linux Function App ip_restriction configuration be used for the SCM also. + // Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker. + // Should the Linux Web App use a 32-bit worker. + // +kubebuilder:validation:Optional + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux Function App. + // The number of Workers for this Linux Function App. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxFunctionAppSlotSiteConfigScmIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + Headers []LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxFunctionAppSlotSiteConfigScmIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + Headers []LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type LinuxFunctionAppSlotSiteConfigScmIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + // +kubebuilder:validation:Optional + Headers []LinuxFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxFunctionAppSlotSiteCredentialInitParameters struct { +} + +type LinuxFunctionAppSlotSiteCredentialObservation struct { + + // The Site Credentials Username used for publishing. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Site Credentials Password used for publishing. + Password *string `json:"password,omitempty" tf:"password,omitempty"` +} + +type LinuxFunctionAppSlotSiteCredentialParameters struct { +} + +type LinuxFunctionAppSlotStorageAccountInitParameters struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppSlotStorageAccountObservation struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxFunctionAppSlotStorageAccountParameters struct { + + // The Access key for the storage account. + // +kubebuilder:validation:Required + AccessKeySecretRef v1.SecretKeySelector `json:"accessKeySecretRef" tf:"-"` + + // The Name of the Storage Account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type SiteConfigAppServiceLogsInitParameters struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type SiteConfigAppServiceLogsObservation struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type SiteConfigAppServiceLogsParameters struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + // +kubebuilder:validation:Optional + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type SiteConfigApplicationStackInitParameters struct { + + // a docker block as detailed below. + // A docker block + Docker []ApplicationStackDockerInitParameters `json:"docker,omitempty" tf:"docker,omitempty"` + + // The version of .Net. Possible values are 3.1, 6.0, 7.0 and 8.0. + // The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Java to use. Possible values are 8, 11 & 17 (In-Preview). + // The version of Java to use. Possible values are `8`, `11`, and `17` + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to use. Possible values include 12, 14, 16 and 18 + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to use. Possibles values are 7 , and 7.2. + // The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // The version of Python to use. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + // The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // Should the Linux Function App use a custom runtime? + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type SiteConfigApplicationStackObservation struct { + + // a docker block as detailed below. + // A docker block + Docker []ApplicationStackDockerObservation `json:"docker,omitempty" tf:"docker,omitempty"` + + // The version of .Net. Possible values are 3.1, 6.0, 7.0 and 8.0. + // The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Java to use. Possible values are 8, 11 & 17 (In-Preview). + // The version of Java to use. Possible values are `8`, `11`, and `17` + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to use. Possible values include 12, 14, 16 and 18 + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to use. Possibles values are 7 , and 7.2. + // The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // The version of Python to use. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + // The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // Should the Linux Function App use a custom runtime? + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type SiteConfigApplicationStackParameters struct { + + // a docker block as detailed below. + // A docker block + // +kubebuilder:validation:Optional + Docker []ApplicationStackDockerParameters `json:"docker,omitempty" tf:"docker,omitempty"` + + // The version of .Net. Possible values are 3.1, 6.0, 7.0 and 8.0. + // The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + // +kubebuilder:validation:Optional + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Java to use. Possible values are 8, 11 & 17 (In-Preview). + // The version of Java to use. Possible values are `8`, `11`, and `17` + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to use. Possible values include 12, 14, 16 and 18 + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + // +kubebuilder:validation:Optional + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to use. Possibles values are 7 , and 7.2. + // The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + // +kubebuilder:validation:Optional + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // The version of Python to use. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + // The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + // +kubebuilder:validation:Optional + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // Should the Linux Function App use a custom runtime? + // +kubebuilder:validation:Optional + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + // +kubebuilder:validation:Optional + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +// LinuxFunctionAppSlotSpec defines the desired state of LinuxFunctionAppSlot +type LinuxFunctionAppSlotSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinuxFunctionAppSlotParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinuxFunctionAppSlotInitParameters `json:"initProvider,omitempty"` +} + +// LinuxFunctionAppSlotStatus defines the observed state of LinuxFunctionAppSlot. +type LinuxFunctionAppSlotStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinuxFunctionAppSlotObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinuxFunctionAppSlot is the Schema for the LinuxFunctionAppSlots API. Manages a Linux Function App Slot. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinuxFunctionAppSlot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.siteConfig) || (has(self.initProvider) && has(self.initProvider.siteConfig))",message="spec.forProvider.siteConfig is a required parameter" + Spec LinuxFunctionAppSlotSpec `json:"spec"` + Status LinuxFunctionAppSlotStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinuxFunctionAppSlotList contains a list of LinuxFunctionAppSlots +type LinuxFunctionAppSlotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinuxFunctionAppSlot `json:"items"` +} + +// Repository type metadata. +var ( + LinuxFunctionAppSlot_Kind = "LinuxFunctionAppSlot" + LinuxFunctionAppSlot_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinuxFunctionAppSlot_Kind}.String() + LinuxFunctionAppSlot_KindAPIVersion = LinuxFunctionAppSlot_Kind + "." + CRDGroupVersion.String() + LinuxFunctionAppSlot_GroupVersionKind = CRDGroupVersion.WithKind(LinuxFunctionAppSlot_Kind) +) + +func init() { + SchemeBuilder.Register(&LinuxFunctionAppSlot{}, &LinuxFunctionAppSlotList{}) +} diff --git a/apis/web/v1beta2/zz_linuxwebapp_terraformed.go b/apis/web/v1beta2/zz_linuxwebapp_terraformed.go new file mode 100755 index 000000000..0d8ccd515 --- /dev/null +++ b/apis/web/v1beta2/zz_linuxwebapp_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinuxWebApp +func (mg *LinuxWebApp) GetTerraformResourceType() string { + return "azurerm_linux_web_app" +} + +// GetConnectionDetailsMapping for this LinuxWebApp +func (tr *LinuxWebApp) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].github[*].client_secret": "spec.forProvider.authSettings[*].github[*].clientSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "backup[*].storage_account_url": "spec.forProvider.backup[*].storageAccountUrlSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "logs[*].http_logs[*].azure_blob_storage[*].sas_url": "spec.forProvider.logs[*].httpLogs[*].azureBlobStorage[*].sasUrlSecretRef", "site_config[*].application_stack[*].docker_registry_password": "spec.forProvider.siteConfig[*].applicationStack[*].dockerRegistryPasswordSecretRef", "site_credential[*]": "status.atProvider.siteCredential[*]", "storage_account[*].access_key": "spec.forProvider.storageAccount[*].accessKeySecretRef"} +} + +// GetObservation of this LinuxWebApp +func (tr *LinuxWebApp) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinuxWebApp +func (tr *LinuxWebApp) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinuxWebApp +func (tr *LinuxWebApp) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinuxWebApp +func (tr *LinuxWebApp) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinuxWebApp +func (tr *LinuxWebApp) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinuxWebApp +func (tr *LinuxWebApp) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinuxWebApp +func (tr *LinuxWebApp) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinuxWebApp using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinuxWebApp) LateInitialize(attrs []byte) (bool, error) { + params := &LinuxWebAppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinuxWebApp) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_linuxwebapp_types.go b/apis/web/v1beta2/zz_linuxwebapp_types.go new file mode 100755 index 000000000..a38e9767f --- /dev/null +++ b/apis/web/v1beta2/zz_linuxwebapp_types.go @@ -0,0 +1,3557 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionInitParameters struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // The minimum amount of time in hh:mm:ss the Linux Web App must have been running before the defined action will be run in the event of a trigger. + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type ActionObservation struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // The minimum amount of time in hh:mm:ss the Linux Web App must have been running before the defined action will be run in the event of a trigger. + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type ActionParameters struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle. + // +kubebuilder:validation:Optional + ActionType *string `json:"actionType" tf:"action_type,omitempty"` + + // The minimum amount of time in hh:mm:ss the Linux Web App must have been running before the defined action will be run in the event of a trigger. + // +kubebuilder:validation:Optional + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type ApplicationLogsInitParameters struct { + + // A azure_blob_storage_http block as defined below. + AzureBlobStorage *AzureBlobStorageInitParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + FileSystemLevel *string `json:"fileSystemLevel,omitempty" tf:"file_system_level,omitempty"` +} + +type ApplicationLogsObservation struct { + + // A azure_blob_storage_http block as defined below. + AzureBlobStorage *AzureBlobStorageObservation `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + FileSystemLevel *string `json:"fileSystemLevel,omitempty" tf:"file_system_level,omitempty"` +} + +type ApplicationLogsParameters struct { + + // A azure_blob_storage_http block as defined below. + // +kubebuilder:validation:Optional + AzureBlobStorage *AzureBlobStorageParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + // +kubebuilder:validation:Optional + FileSystemLevel *string `json:"fileSystemLevel" tf:"file_system_level,omitempty"` +} + +type AutoHealSettingInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + Action *ActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *TriggerInitParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type AutoHealSettingObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + Action *ActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *TriggerObservation `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type AutoHealSettingParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *ActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + // +kubebuilder:validation:Optional + Trigger *TriggerParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type AzureBlobStorageInitParameters struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The retention period in days. A value of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + SASURL *string `json:"sasUrl,omitempty" tf:"sas_url,omitempty"` +} + +type AzureBlobStorageObservation struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The retention period in days. A value of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + SASURL *string `json:"sasUrl,omitempty" tf:"sas_url,omitempty"` +} + +type AzureBlobStorageParameters struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + // +kubebuilder:validation:Optional + Level *string `json:"level" tf:"level,omitempty"` + + // The retention period in days. A value of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + // +kubebuilder:validation:Optional + SASURL *string `json:"sasUrl" tf:"sas_url,omitempty"` +} + +type FileSystemInitParameters struct { + + // The retention period in days. A value of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + RetentionInMb *float64 `json:"retentionInMb,omitempty" tf:"retention_in_mb,omitempty"` +} + +type FileSystemObservation struct { + + // The retention period in days. A value of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + RetentionInMb *float64 `json:"retentionInMb,omitempty" tf:"retention_in_mb,omitempty"` +} + +type FileSystemParameters struct { + + // The retention period in days. A value of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + // +kubebuilder:validation:Optional + RetentionInMb *float64 `json:"retentionInMb" tf:"retention_in_mb,omitempty"` +} + +type HTTPLogsAzureBlobStorageInitParameters struct { + + // The retention period in days. A value of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type HTTPLogsAzureBlobStorageObservation struct { + + // The retention period in days. A value of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type HTTPLogsAzureBlobStorageParameters struct { + + // The retention period in days. A value of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + // +kubebuilder:validation:Required + SASURLSecretRef v1.SecretKeySelector `json:"sasurlSecretRef" tf:"-"` +} + +type HTTPLogsInitParameters struct { + + // A azure_blob_storage_http block as defined below. + AzureBlobStorage *HTTPLogsAzureBlobStorageInitParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + FileSystem *FileSystemInitParameters `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type HTTPLogsObservation struct { + + // A azure_blob_storage_http block as defined below. + AzureBlobStorage *HTTPLogsAzureBlobStorageObservation `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + FileSystem *FileSystemObservation `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type HTTPLogsParameters struct { + + // A azure_blob_storage_http block as defined below. + // +kubebuilder:validation:Optional + AzureBlobStorage *HTTPLogsAzureBlobStorageParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + // +kubebuilder:validation:Optional + FileSystem *FileSystemParameters `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type LinuxWebAppAuthSettingsActiveDirectoryInitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsActiveDirectoryObservation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsActiveDirectoryParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + // The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + // +kubebuilder:validation:Optional + AppSecretSecretRef *v1.SecretKeySelector `json:"appSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsGithubInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsGithubObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsGithubParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsInitParameters struct { + + // An active_directory block as defined above. + ActiveDirectory *LinuxWebAppAuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *LinuxWebAppAuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *LinuxWebAppAuthSettingsGithubInitParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *LinuxWebAppAuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *LinuxWebAppAuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *LinuxWebAppAuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxWebAppAuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsObservation struct { + + // An active_directory block as defined above. + ActiveDirectory *LinuxWebAppAuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *LinuxWebAppAuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *LinuxWebAppAuthSettingsGithubObservation `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *LinuxWebAppAuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *LinuxWebAppAuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *LinuxWebAppAuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxWebAppAuthSettingsParameters struct { + + // An active_directory block as defined above. + // +kubebuilder:validation:Optional + ActiveDirectory *LinuxWebAppAuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A facebook block as defined below. + // +kubebuilder:validation:Optional + Facebook *LinuxWebAppAuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + // +kubebuilder:validation:Optional + Github *LinuxWebAppAuthSettingsGithubParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + // +kubebuilder:validation:Optional + Google *LinuxWebAppAuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + // +kubebuilder:validation:Optional + Microsoft *LinuxWebAppAuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + // +kubebuilder:validation:Optional + Twitter *LinuxWebAppAuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxWebAppAuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + // +kubebuilder:validation:Optional + ConsumerSecretSecretRef *v1.SecretKeySelector `json:"consumerSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + // +kubebuilder:validation:Optional + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + // +kubebuilder:validation:Optional + TenantAuthEndpoint *string `json:"tenantAuthEndpoint" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + // +kubebuilder:validation:Optional + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type LinuxWebAppAuthSettingsV2AppleV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsV2AppleV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2AppleV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` +} + +type LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // Specifies the endpoint used for OpenID Connect Discovery. For example https://example.com/.well-known/openid-configuration. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2CustomOidcV2Observation struct { + + // The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + // The endpoint to make the Authorisation Request. + AuthorisationEndpoint *string `json:"authorisationEndpoint,omitempty" tf:"authorisation_endpoint,omitempty"` + + // The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + // The endpoint that provides the keys necessary to validate the token. + CertificationURI *string `json:"certificationUri,omitempty" tf:"certification_uri,omitempty"` + + // The Client Credential Method used. + // The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + ClientCredentialMethod *string `json:"clientCredentialMethod,omitempty" tf:"client_credential_method,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the secret for this Custom OIDC Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + // The endpoint that issued the Token. + IssuerEndpoint *string `json:"issuerEndpoint,omitempty" tf:"issuer_endpoint,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // Specifies the endpoint used for OpenID Connect Discovery. For example https://example.com/.well-known/openid-configuration. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + // The endpoint used to request a Token. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` +} + +type LinuxWebAppAuthSettingsV2CustomOidcV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + // +kubebuilder:validation:Optional + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // Specifies the endpoint used for OpenID Connect Discovery. For example https://example.com/.well-known/openid-configuration. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + // +kubebuilder:validation:Optional + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2FacebookV2InitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2FacebookV2Observation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2FacebookV2Parameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + // +kubebuilder:validation:Optional + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2GithubV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2GithubV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2GithubV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2GoogleV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2GoogleV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2GoogleV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2InitParameters struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *LinuxWebAppAuthSettingsV2ActiveDirectoryV2InitParameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *LinuxWebAppAuthSettingsV2AppleV2InitParameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []LinuxWebAppAuthSettingsV2CustomOidcV2InitParameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *LinuxWebAppAuthSettingsV2FacebookV2InitParameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *LinuxWebAppAuthSettingsV2GithubV2InitParameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *LinuxWebAppAuthSettingsV2GoogleV2InitParameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *LinuxWebAppAuthSettingsV2LoginInitParameters `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *LinuxWebAppAuthSettingsV2TwitterV2InitParameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxWebAppAuthSettingsV2LoginInitParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type LinuxWebAppAuthSettingsV2LoginObservation struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type LinuxWebAppAuthSettingsV2LoginParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + // +kubebuilder:validation:Optional + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + // +kubebuilder:validation:Optional + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + // +kubebuilder:validation:Optional + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + // +kubebuilder:validation:Optional + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + // +kubebuilder:validation:Optional + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // +kubebuilder:validation:Optional + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + // +kubebuilder:validation:Optional + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type LinuxWebAppAuthSettingsV2MicrosoftV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2MicrosoftV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2MicrosoftV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppAuthSettingsV2Observation struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Observation `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *LinuxWebAppAuthSettingsV2AppleV2Observation `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Observation `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []LinuxWebAppAuthSettingsV2CustomOidcV2Observation `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *LinuxWebAppAuthSettingsV2FacebookV2Observation `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *LinuxWebAppAuthSettingsV2GithubV2Observation `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *LinuxWebAppAuthSettingsV2GoogleV2Observation `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *LinuxWebAppAuthSettingsV2LoginObservation `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *LinuxWebAppAuthSettingsV2MicrosoftV2Observation `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *LinuxWebAppAuthSettingsV2TwitterV2Observation `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxWebAppAuthSettingsV2Parameters struct { + + // An active_directory_v2 block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectoryV2 *LinuxWebAppAuthSettingsV2ActiveDirectoryV2Parameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + // +kubebuilder:validation:Optional + AppleV2 *LinuxWebAppAuthSettingsV2AppleV2Parameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + // +kubebuilder:validation:Optional + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + // +kubebuilder:validation:Optional + AzureStaticWebAppV2 *LinuxWebAppAuthSettingsV2AzureStaticWebAppV2Parameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + // +kubebuilder:validation:Optional + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + // +kubebuilder:validation:Optional + CustomOidcV2 []LinuxWebAppAuthSettingsV2CustomOidcV2Parameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + // +kubebuilder:validation:Optional + FacebookV2 *LinuxWebAppAuthSettingsV2FacebookV2Parameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + // +kubebuilder:validation:Optional + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + // +kubebuilder:validation:Optional + GithubV2 *LinuxWebAppAuthSettingsV2GithubV2Parameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + // +kubebuilder:validation:Optional + GoogleV2 *LinuxWebAppAuthSettingsV2GoogleV2Parameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + // +kubebuilder:validation:Optional + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + // +kubebuilder:validation:Optional + Login *LinuxWebAppAuthSettingsV2LoginParameters `json:"login" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + // +kubebuilder:validation:Optional + MicrosoftV2 *LinuxWebAppAuthSettingsV2MicrosoftV2Parameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + // +kubebuilder:validation:Optional + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + // +kubebuilder:validation:Optional + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + // +kubebuilder:validation:Optional + TwitterV2 *LinuxWebAppAuthSettingsV2TwitterV2Parameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxWebAppAuthSettingsV2TwitterV2InitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsV2TwitterV2Observation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppAuthSettingsV2TwitterV2Parameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppBackupInitParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *LinuxWebAppBackupScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type LinuxWebAppBackupObservation struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *LinuxWebAppBackupScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type LinuxWebAppBackupParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A schedule block as defined below. + // +kubebuilder:validation:Optional + Schedule *LinuxWebAppBackupScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The SAS URL to the container. + // The SAS URL to the container. + // +kubebuilder:validation:Required + StorageAccountURLSecretRef v1.SecretKeySelector `json:"storageAccountUrlSecretRef" tf:"-"` +} + +type LinuxWebAppBackupScheduleInitParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type LinuxWebAppBackupScheduleObservation struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // The time the backup was last attempted. + LastExecutionTime *string `json:"lastExecutionTime,omitempty" tf:"last_execution_time,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type LinuxWebAppBackupScheduleParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + // +kubebuilder:validation:Optional + FrequencyInterval *float64 `json:"frequencyInterval" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + // +kubebuilder:validation:Optional + FrequencyUnit *string `json:"frequencyUnit" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + // +kubebuilder:validation:Optional + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type LinuxWebAppConnectionStringInitParameters struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppConnectionStringObservation struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppConnectionStringParameters struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The connection string value. + // The connection string value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type LinuxWebAppIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Web App. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Web App. Possible values are SystemAssigned, UserAssigned, and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Web App. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Web App. Possible values are SystemAssigned, UserAssigned, and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Web App. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Web App. Possible values are SystemAssigned, UserAssigned, and SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LinuxWebAppInitParameters struct { + + // A map of key-value pairs of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + AuthSettings *LinuxWebAppAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *LinuxWebAppAuthSettingsV2InitParameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *LinuxWebAppBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_certificate_enabled is false. Defaults to Required. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []LinuxWebAppConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the Linux Web App be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Linux Web App require HTTPS connections. Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + Identity *LinuxWebAppIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Azure Region where the Linux Web App should exist. Changing this forces a new Linux Web App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + Logs *LogsInitParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan that this Linux App Service will be created in. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.ServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // Reference to a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDRef *v1.Reference `json:"servicePlanIdRef,omitempty" tf:"-"` + + // Selector for a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDSelector *v1.Selector `json:"servicePlanIdSelector,omitempty" tf:"-"` + + // A site_config block as defined below. + SiteConfig *LinuxWebAppSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + StickySettings *LinuxWebAppStickySettingsInitParameters `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []LinuxWebAppStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Linux Web App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxWebAppObservation struct { + + // A map of key-value pairs of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + AuthSettings *LinuxWebAppAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *LinuxWebAppAuthSettingsV2Observation `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *LinuxWebAppBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_certificate_enabled is false. Defaults to Required. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []LinuxWebAppConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The default hostname of the Linux Web App. + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Should the Linux Web App be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Linux Web App require HTTPS connections. Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the App Service Environment used by App Service. + HostingEnvironmentID *string `json:"hostingEnvironmentId,omitempty" tf:"hosting_environment_id,omitempty"` + + // The ID of the Linux Web App. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *LinuxWebAppIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Kind value for this Linux Web App. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The Azure Region where the Linux Web App should exist. Changing this forces a new Linux Web App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + Logs *LogsObservation `json:"logs,omitempty" tf:"logs,omitempty"` + + // A list of outbound IP addresses - such as ["52.23.25.3", "52.143.43.12"] + OutboundIPAddressList []*string `json:"outboundIpAddressList,omitempty" tf:"outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12. + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A list of possible outbound ip address. + PossibleOutboundIPAddressList []*string `json:"possibleOutboundIpAddressList,omitempty" tf:"possible_outbound_ip_address_list,omitempty"` + + // A comma-separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which are necessarily in use. Superset of outbound_ip_addresses. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Linux Web App should exist. Changing this forces a new Linux Web App to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The ID of the Service Plan that this Linux App Service will be created in. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + SiteConfig *LinuxWebAppSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + StickySettings *LinuxWebAppStickySettingsObservation `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []LinuxWebAppStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Linux Web App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxWebAppParameters struct { + + // A map of key-value pairs of App Settings. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + // +kubebuilder:validation:Optional + AuthSettings *LinuxWebAppAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + // +kubebuilder:validation:Optional + AuthSettingsV2 *LinuxWebAppAuthSettingsV2Parameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + // +kubebuilder:validation:Optional + Backup *LinuxWebAppBackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + // +kubebuilder:validation:Optional + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + // +kubebuilder:validation:Optional + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_certificate_enabled is false. Defaults to Required. + // +kubebuilder:validation:Optional + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + // +kubebuilder:validation:Optional + ConnectionString []LinuxWebAppConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the Linux Web App be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + // +kubebuilder:validation:Optional + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Linux Web App require HTTPS connections. Defaults to false. + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *LinuxWebAppIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity. + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Azure Region where the Linux Web App should exist. Changing this forces a new Linux Web App to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + // +kubebuilder:validation:Optional + Logs *LogsParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Linux Web App should exist. Changing this forces a new Linux Web App to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the Service Plan that this Linux App Service will be created in. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.ServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // Reference to a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDRef *v1.Reference `json:"servicePlanIdRef,omitempty" tf:"-"` + + // Selector for a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDSelector *v1.Selector `json:"servicePlanIdSelector,omitempty" tf:"-"` + + // A site_config block as defined below. + // +kubebuilder:validation:Optional + SiteConfig *LinuxWebAppSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + // +kubebuilder:validation:Optional + StickySettings *LinuxWebAppStickySettingsParameters `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []LinuxWebAppStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Linux Web App. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + // +kubebuilder:validation:Optional + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxWebAppSiteConfigApplicationStackInitParameters struct { + DockerImage *string `json:"dockerImage,omitempty" tf:"docker_image,omitempty"` + + // The docker image, including tag, to be used. e.g. appsvc/staticsite:latest. + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + DockerImageTag *string `json:"dockerImageTag,omitempty" tf:"docker_image_tag,omitempty"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 5.0, 6.0, 7.0 and 8.0. + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Go to use. Possible values include 1.18, and 1.19. + GoVersion *string `json:"goVersion,omitempty" tf:"go_version,omitempty"` + + // The Java server type. Possible values include JAVA, TOMCAT, and JBOSSEAP. + JavaServer *string `json:"javaServer,omitempty" tf:"java_server,omitempty"` + + // The Version of the java_server to use. + JavaServerVersion *string `json:"javaServerVersion,omitempty" tf:"java_server_version,omitempty"` + + // The Version of Java to use. Possible values include 8, 11, and 17. + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This property conflicts with java_version. + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to run. Possible values are 7.4, 8.0, 8.1 and 8.2. + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The version of Python to run. Possible values include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Ruby to run. Possible values include 2.6 and 2.7. + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` +} + +type LinuxWebAppSiteConfigApplicationStackObservation struct { + DockerImage *string `json:"dockerImage,omitempty" tf:"docker_image,omitempty"` + + // The docker image, including tag, to be used. e.g. appsvc/staticsite:latest. + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + DockerImageTag *string `json:"dockerImageTag,omitempty" tf:"docker_image_tag,omitempty"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 5.0, 6.0, 7.0 and 8.0. + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Go to use. Possible values include 1.18, and 1.19. + GoVersion *string `json:"goVersion,omitempty" tf:"go_version,omitempty"` + + // The Java server type. Possible values include JAVA, TOMCAT, and JBOSSEAP. + JavaServer *string `json:"javaServer,omitempty" tf:"java_server,omitempty"` + + // The Version of the java_server to use. + JavaServerVersion *string `json:"javaServerVersion,omitempty" tf:"java_server_version,omitempty"` + + // The Version of Java to use. Possible values include 8, 11, and 17. + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This property conflicts with java_version. + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to run. Possible values are 7.4, 8.0, 8.1 and 8.2. + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The version of Python to run. Possible values include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Ruby to run. Possible values include 2.6 and 2.7. + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` +} + +type LinuxWebAppSiteConfigApplicationStackParameters struct { + + // +kubebuilder:validation:Optional + DockerImage *string `json:"dockerImage,omitempty" tf:"docker_image,omitempty"` + + // The docker image, including tag, to be used. e.g. appsvc/staticsite:latest. + // +kubebuilder:validation:Optional + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + // +kubebuilder:validation:Optional + DockerImageTag *string `json:"dockerImageTag,omitempty" tf:"docker_image_tag,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + // +kubebuilder:validation:Optional + DockerRegistryPasswordSecretRef *v1.SecretKeySelector `json:"dockerRegistryPasswordSecretRef,omitempty" tf:"-"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + // +kubebuilder:validation:Optional + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + // +kubebuilder:validation:Optional + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 5.0, 6.0, 7.0 and 8.0. + // +kubebuilder:validation:Optional + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Go to use. Possible values include 1.18, and 1.19. + // +kubebuilder:validation:Optional + GoVersion *string `json:"goVersion,omitempty" tf:"go_version,omitempty"` + + // The Java server type. Possible values include JAVA, TOMCAT, and JBOSSEAP. + // +kubebuilder:validation:Optional + JavaServer *string `json:"javaServer,omitempty" tf:"java_server,omitempty"` + + // The Version of the java_server to use. + // +kubebuilder:validation:Optional + JavaServerVersion *string `json:"javaServerVersion,omitempty" tf:"java_server_version,omitempty"` + + // The Version of Java to use. Possible values include 8, 11, and 17. + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This property conflicts with java_version. + // +kubebuilder:validation:Optional + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to run. Possible values are 7.4, 8.0, 8.1 and 8.2. + // +kubebuilder:validation:Optional + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The version of Python to run. Possible values include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + // +kubebuilder:validation:Optional + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Ruby to run. Possible values include 2.6 and 2.7. + // +kubebuilder:validation:Optional + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` +} + +type LinuxWebAppSiteConfigCorsInitParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxWebAppSiteConfigCorsObservation struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxWebAppSiteConfigCorsParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxWebAppSiteConfigIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type LinuxWebAppSiteConfigIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxWebAppSiteConfigIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxWebAppSiteConfigIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxWebAppSiteConfigIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxWebAppSiteConfigIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type LinuxWebAppSiteConfigIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []LinuxWebAppSiteConfigIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxWebAppSiteConfigInitParameters struct { + + // The URL to the API Definition for this Linux Web App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Linux Web App is associated with. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to true. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + ApplicationStack *LinuxWebAppSiteConfigApplicationStackInitParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled? Required with auto_heal_setting. + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + AutoHealSetting *AutoHealSettingInitParameters `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *LinuxWebAppSiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The State of FTP / FTPS service. Possible values include AllAllowed, FtpsOnly, and Disabled. Defaults to Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []LinuxWebAppSiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include Integrated, and Classic. Defaults to Integrated. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled? Defaults to false. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019 and VS2022. + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []LinuxWebAppSiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Linux Web App ip_restriction configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker? Defaults to true. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled? Defaults to false. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux App Service. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxWebAppSiteConfigObservation struct { + + // The URL to the API Definition for this Linux Web App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Linux Web App is associated with. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to true. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + ApplicationStack *LinuxWebAppSiteConfigApplicationStackObservation `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled? Required with auto_heal_setting. + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + AutoHealSetting *AutoHealSettingObservation `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *LinuxWebAppSiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // Should the Linux Web App be enabled? Defaults to true. + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty" tf:"detailed_error_logging_enabled,omitempty"` + + // The State of FTP / FTPS service. Possible values include AllAllowed, FtpsOnly, and Disabled. Defaults to Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []LinuxWebAppSiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include Integrated, and Classic. Defaults to Integrated. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled? Defaults to false. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019 and VS2022. + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []LinuxWebAppSiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // Should the Linux Web App ip_restriction configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker? Defaults to true. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled? Defaults to false. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux App Service. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxWebAppSiteConfigParameters struct { + + // The URL to the API Definition for this Linux Web App. + // +kubebuilder:validation:Optional + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Linux Web App is associated with. + // +kubebuilder:validation:Optional + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to true. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // +kubebuilder:validation:Optional + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + // +kubebuilder:validation:Optional + ApplicationStack *LinuxWebAppSiteConfigApplicationStackParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled? Required with auto_heal_setting. + // +kubebuilder:validation:Optional + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + // +kubebuilder:validation:Optional + AutoHealSetting *AutoHealSettingParameters `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // +kubebuilder:validation:Optional + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // +kubebuilder:validation:Optional + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + // +kubebuilder:validation:Optional + Cors *LinuxWebAppSiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + // +kubebuilder:validation:Optional + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The State of FTP / FTPS service. Possible values include AllAllowed, FtpsOnly, and Disabled. Defaults to Disabled. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + // +kubebuilder:validation:Optional + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + IPRestriction []LinuxWebAppSiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // +kubebuilder:validation:Optional + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + // +kubebuilder:validation:Optional + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include Integrated, and Classic. Defaults to Integrated. + // +kubebuilder:validation:Optional + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled? Defaults to false. + // +kubebuilder:validation:Optional + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019 and VS2022. + // +kubebuilder:validation:Optional + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + ScmIPRestriction []LinuxWebAppSiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // +kubebuilder:validation:Optional + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Linux Web App ip_restriction configuration be used for the SCM also. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker? Defaults to true. + // +kubebuilder:validation:Optional + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled? Defaults to false. + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux App Service. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxWebAppSiteConfigScmIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxWebAppSiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxWebAppSiteConfigScmIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxWebAppSiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type LinuxWebAppSiteConfigScmIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []LinuxWebAppSiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxWebAppSiteCredentialInitParameters struct { +} + +type LinuxWebAppSiteCredentialObservation struct { + + // The Site Credentials Username used for publishing. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Site Credentials Password used for publishing. + Password *string `json:"password,omitempty" tf:"password,omitempty"` +} + +type LinuxWebAppSiteCredentialParameters struct { +} + +type LinuxWebAppStickySettingsInitParameters struct { + + // A list of app_setting names that the Linux Web App will not swap between Slots when a swap operation is triggered. + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Linux Web App will not swap between Slots when a swap operation is triggered. + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type LinuxWebAppStickySettingsObservation struct { + + // A list of app_setting names that the Linux Web App will not swap between Slots when a swap operation is triggered. + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Linux Web App will not swap between Slots when a swap operation is triggered. + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type LinuxWebAppStickySettingsParameters struct { + + // A list of app_setting names that the Linux Web App will not swap between Slots when a swap operation is triggered. + // +kubebuilder:validation:Optional + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Linux Web App will not swap between Slots when a swap operation is triggered. + // +kubebuilder:validation:Optional + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type LinuxWebAppStorageAccountInitParameters struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppStorageAccountObservation struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppStorageAccountParameters struct { + + // The Access key for the storage account. + // +kubebuilder:validation:Required + AccessKeySecretRef v1.SecretKeySelector `json:"accessKeySecretRef" tf:"-"` + + // The Name of the Storage Account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LogsInitParameters struct { + + // A application_logs block as defined above. + ApplicationLogs *ApplicationLogsInitParameters `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled? + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should the failed request tracing be enabled? + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + HTTPLogs *HTTPLogsInitParameters `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type LogsObservation struct { + + // A application_logs block as defined above. + ApplicationLogs *ApplicationLogsObservation `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled? + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should the failed request tracing be enabled? + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + HTTPLogs *HTTPLogsObservation `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type LogsParameters struct { + + // A application_logs block as defined above. + // +kubebuilder:validation:Optional + ApplicationLogs *ApplicationLogsParameters `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled? + // +kubebuilder:validation:Optional + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should the failed request tracing be enabled? + // +kubebuilder:validation:Optional + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + // +kubebuilder:validation:Optional + HTTPLogs *HTTPLogsParameters `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type RequestsInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type RequestsObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type RequestsParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` +} + +type SlowRequestInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + TimeTaken *string `json:"timeTaken,omitempty" tf:"time_taken,omitempty"` +} + +type SlowRequestObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + TimeTaken *string `json:"timeTaken,omitempty" tf:"time_taken,omitempty"` +} + +type SlowRequestParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + // +kubebuilder:validation:Optional + TimeTaken *string `json:"timeTaken" tf:"time_taken,omitempty"` +} + +type StatusCodeInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + StatusCodeRange *string `json:"statusCodeRange,omitempty" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type StatusCodeObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + StatusCodeRange *string `json:"statusCodeRange,omitempty" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type StatusCodeParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + // +kubebuilder:validation:Optional + StatusCodeRange *string `json:"statusCodeRange" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + // +kubebuilder:validation:Optional + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + // +kubebuilder:validation:Optional + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type TriggerInitParameters struct { + + // A requests block as defined above. + Requests *RequestsInitParameters `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + SlowRequest *SlowRequestInitParameters `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + StatusCode []StatusCodeInitParameters `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type TriggerObservation struct { + + // A requests block as defined above. + Requests *RequestsObservation `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + SlowRequest *SlowRequestObservation `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + StatusCode []StatusCodeObservation `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type TriggerParameters struct { + + // A requests block as defined above. + // +kubebuilder:validation:Optional + Requests *RequestsParameters `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + // +kubebuilder:validation:Optional + SlowRequest *SlowRequestParameters `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + // +kubebuilder:validation:Optional + StatusCode []StatusCodeParameters `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +// LinuxWebAppSpec defines the desired state of LinuxWebApp +type LinuxWebAppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinuxWebAppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinuxWebAppInitParameters `json:"initProvider,omitempty"` +} + +// LinuxWebAppStatus defines the observed state of LinuxWebApp. +type LinuxWebAppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinuxWebAppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinuxWebApp is the Schema for the LinuxWebApps API. Manages a Linux Web App. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinuxWebApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.siteConfig) || (has(self.initProvider) && has(self.initProvider.siteConfig))",message="spec.forProvider.siteConfig is a required parameter" + Spec LinuxWebAppSpec `json:"spec"` + Status LinuxWebAppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinuxWebAppList contains a list of LinuxWebApps +type LinuxWebAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinuxWebApp `json:"items"` +} + +// Repository type metadata. +var ( + LinuxWebApp_Kind = "LinuxWebApp" + LinuxWebApp_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinuxWebApp_Kind}.String() + LinuxWebApp_KindAPIVersion = LinuxWebApp_Kind + "." + CRDGroupVersion.String() + LinuxWebApp_GroupVersionKind = CRDGroupVersion.WithKind(LinuxWebApp_Kind) +) + +func init() { + SchemeBuilder.Register(&LinuxWebApp{}, &LinuxWebAppList{}) +} diff --git a/apis/web/v1beta2/zz_linuxwebappslot_terraformed.go b/apis/web/v1beta2/zz_linuxwebappslot_terraformed.go new file mode 100755 index 000000000..4b1d6b7c3 --- /dev/null +++ b/apis/web/v1beta2/zz_linuxwebappslot_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LinuxWebAppSlot +func (mg *LinuxWebAppSlot) GetTerraformResourceType() string { + return "azurerm_linux_web_app_slot" +} + +// GetConnectionDetailsMapping for this LinuxWebAppSlot +func (tr *LinuxWebAppSlot) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].github[*].client_secret": "spec.forProvider.authSettings[*].github[*].clientSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "backup[*].storage_account_url": "spec.forProvider.backup[*].storageAccountUrlSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "logs[*].http_logs[*].azure_blob_storage[*].sas_url": "spec.forProvider.logs[*].httpLogs[*].azureBlobStorage[*].sasUrlSecretRef", "site_config[*].application_stack[*].docker_registry_password": "spec.forProvider.siteConfig[*].applicationStack[*].dockerRegistryPasswordSecretRef", "site_credential[*]": "status.atProvider.siteCredential[*]", "storage_account[*].access_key": "spec.forProvider.storageAccount[*].accessKeySecretRef"} +} + +// GetObservation of this LinuxWebAppSlot +func (tr *LinuxWebAppSlot) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LinuxWebAppSlot +func (tr *LinuxWebAppSlot) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LinuxWebAppSlot +func (tr *LinuxWebAppSlot) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LinuxWebAppSlot +func (tr *LinuxWebAppSlot) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LinuxWebAppSlot +func (tr *LinuxWebAppSlot) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LinuxWebAppSlot +func (tr *LinuxWebAppSlot) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LinuxWebAppSlot +func (tr *LinuxWebAppSlot) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LinuxWebAppSlot using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LinuxWebAppSlot) LateInitialize(attrs []byte) (bool, error) { + params := &LinuxWebAppSlotParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LinuxWebAppSlot) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_linuxwebappslot_types.go b/apis/web/v1beta2/zz_linuxwebappslot_types.go new file mode 100755 index 000000000..6cbdcbb2b --- /dev/null +++ b/apis/web/v1beta2/zz_linuxwebappslot_types.go @@ -0,0 +1,3526 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ApplicationLogsAzureBlobStorageInitParameters struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + SASURL *string `json:"sasUrl,omitempty" tf:"sas_url,omitempty"` +} + +type ApplicationLogsAzureBlobStorageObservation struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + SASURL *string `json:"sasUrl,omitempty" tf:"sas_url,omitempty"` +} + +type ApplicationLogsAzureBlobStorageParameters struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + // +kubebuilder:validation:Optional + Level *string `json:"level" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + // +kubebuilder:validation:Optional + SASURL *string `json:"sasUrl" tf:"sas_url,omitempty"` +} + +type AutoHealSettingActionInitParameters struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // The minimum amount of time in hh:mm:ss the Linux Web App must have been running before the defined action will be run in the event of a trigger. + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type AutoHealSettingActionObservation struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // The minimum amount of time in hh:mm:ss the Linux Web App must have been running before the defined action will be run in the event of a trigger. + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type AutoHealSettingActionParameters struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle. + // +kubebuilder:validation:Optional + ActionType *string `json:"actionType" tf:"action_type,omitempty"` + + // The minimum amount of time in hh:mm:ss the Linux Web App must have been running before the defined action will be run in the event of a trigger. + // +kubebuilder:validation:Optional + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type AutoHealSettingTriggerInitParameters struct { + + // A requests block as defined above. + Requests *TriggerRequestsInitParameters `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + SlowRequest *TriggerSlowRequestInitParameters `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + StatusCode []TriggerStatusCodeInitParameters `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type AutoHealSettingTriggerObservation struct { + + // A requests block as defined above. + Requests *TriggerRequestsObservation `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + SlowRequest *TriggerSlowRequestObservation `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + StatusCode []TriggerStatusCodeObservation `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type AutoHealSettingTriggerParameters struct { + + // A requests block as defined above. + // +kubebuilder:validation:Optional + Requests *TriggerRequestsParameters `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + // +kubebuilder:validation:Optional + SlowRequest *TriggerSlowRequestParameters `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + // +kubebuilder:validation:Optional + StatusCode []TriggerStatusCodeParameters `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type HTTPLogsFileSystemInitParameters struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + RetentionInMb *float64 `json:"retentionInMb,omitempty" tf:"retention_in_mb,omitempty"` +} + +type HTTPLogsFileSystemObservation struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + RetentionInMb *float64 `json:"retentionInMb,omitempty" tf:"retention_in_mb,omitempty"` +} + +type HTTPLogsFileSystemParameters struct { + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + // +kubebuilder:validation:Optional + RetentionInMb *float64 `json:"retentionInMb" tf:"retention_in_mb,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsActiveDirectoryObservation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsActiveDirectoryParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + // The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + // +kubebuilder:validation:Optional + AppSecretSecretRef *v1.SecretKeySelector `json:"appSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsGithubInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsGithubObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsGithubParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsInitParameters struct { + + // An active_directory block as defined above. + ActiveDirectory *LinuxWebAppSlotAuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *LinuxWebAppSlotAuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *LinuxWebAppSlotAuthSettingsGithubInitParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *LinuxWebAppSlotAuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *LinuxWebAppSlotAuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *LinuxWebAppSlotAuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsObservation struct { + + // An active_directory block as defined above. + ActiveDirectory *LinuxWebAppSlotAuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *LinuxWebAppSlotAuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *LinuxWebAppSlotAuthSettingsGithubObservation `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *LinuxWebAppSlotAuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *LinuxWebAppSlotAuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *LinuxWebAppSlotAuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsParameters struct { + + // An active_directory block as defined above. + // +kubebuilder:validation:Optional + ActiveDirectory *LinuxWebAppSlotAuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Linux Web App? + // Should the Authentication / Authorization feature be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A facebook block as defined below. + // +kubebuilder:validation:Optional + Facebook *LinuxWebAppSlotAuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + // +kubebuilder:validation:Optional + Github *LinuxWebAppSlotAuthSettingsGithubParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + // +kubebuilder:validation:Optional + Google *LinuxWebAppSlotAuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + // +kubebuilder:validation:Optional + Microsoft *LinuxWebAppSlotAuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + // +kubebuilder:validation:Optional + Twitter *LinuxWebAppSlotAuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + // +kubebuilder:validation:Optional + ConsumerSecretSecretRef *v1.SecretKeySelector `json:"consumerSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + // +kubebuilder:validation:Optional + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + // +kubebuilder:validation:Optional + TenantAuthEndpoint *string `json:"tenantAuthEndpoint" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + // +kubebuilder:validation:Optional + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2AppleV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2AppleV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation struct { + + // The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + // The endpoint to make the Authorisation Request. + AuthorisationEndpoint *string `json:"authorisationEndpoint,omitempty" tf:"authorisation_endpoint,omitempty"` + + // The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + // The endpoint that provides the keys necessary to validate the token. + CertificationURI *string `json:"certificationUri,omitempty" tf:"certification_uri,omitempty"` + + // The Client Credential Method used. + // The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + ClientCredentialMethod *string `json:"clientCredentialMethod,omitempty" tf:"client_credential_method,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the secret for this Custom OIDC Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + // The endpoint that issued the Token. + IssuerEndpoint *string `json:"issuerEndpoint,omitempty" tf:"issuer_endpoint,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + // The endpoint used to request a Token. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + // +kubebuilder:validation:Optional + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + // +kubebuilder:validation:Optional + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2FacebookV2Observation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + // +kubebuilder:validation:Optional + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2GithubV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2GithubV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2GoogleV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2InitParameters struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *LinuxWebAppSlotAuthSettingsV2AppleV2InitParameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []LinuxWebAppSlotAuthSettingsV2CustomOidcV2InitParameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *LinuxWebAppSlotAuthSettingsV2FacebookV2InitParameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *LinuxWebAppSlotAuthSettingsV2GithubV2InitParameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *LinuxWebAppSlotAuthSettingsV2GoogleV2InitParameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *LinuxWebAppSlotAuthSettingsV2LoginInitParameters `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2LoginInitParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2LoginObservation struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2LoginParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + // +kubebuilder:validation:Optional + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + // +kubebuilder:validation:Optional + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + // +kubebuilder:validation:Optional + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + // +kubebuilder:validation:Optional + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + // +kubebuilder:validation:Optional + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // +kubebuilder:validation:Optional + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + // +kubebuilder:validation:Optional + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2MicrosoftV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2Observation struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *LinuxWebAppSlotAuthSettingsV2AppleV2Observation `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []LinuxWebAppSlotAuthSettingsV2CustomOidcV2Observation `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *LinuxWebAppSlotAuthSettingsV2FacebookV2Observation `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *LinuxWebAppSlotAuthSettingsV2GithubV2Observation `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *LinuxWebAppSlotAuthSettingsV2GoogleV2Observation `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *LinuxWebAppSlotAuthSettingsV2LoginObservation `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Observation `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *LinuxWebAppSlotAuthSettingsV2TwitterV2Observation `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2Parameters struct { + + // An active_directory_v2 block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectoryV2 *LinuxWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + // +kubebuilder:validation:Optional + AppleV2 *LinuxWebAppSlotAuthSettingsV2AppleV2Parameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + // +kubebuilder:validation:Optional + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + // +kubebuilder:validation:Optional + AzureStaticWebAppV2 *LinuxWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + // +kubebuilder:validation:Optional + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + // +kubebuilder:validation:Optional + CustomOidcV2 []LinuxWebAppSlotAuthSettingsV2CustomOidcV2Parameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + // +kubebuilder:validation:Optional + FacebookV2 *LinuxWebAppSlotAuthSettingsV2FacebookV2Parameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + // +kubebuilder:validation:Optional + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + // +kubebuilder:validation:Optional + GithubV2 *LinuxWebAppSlotAuthSettingsV2GithubV2Parameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + // +kubebuilder:validation:Optional + GoogleV2 *LinuxWebAppSlotAuthSettingsV2GoogleV2Parameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + // +kubebuilder:validation:Optional + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + // +kubebuilder:validation:Optional + Login *LinuxWebAppSlotAuthSettingsV2LoginParameters `json:"login" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + // +kubebuilder:validation:Optional + MicrosoftV2 *LinuxWebAppSlotAuthSettingsV2MicrosoftV2Parameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + // +kubebuilder:validation:Optional + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + // +kubebuilder:validation:Optional + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + // +kubebuilder:validation:Optional + TwitterV2 *LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2TwitterV2InitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2TwitterV2Observation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotAuthSettingsV2TwitterV2Parameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName" tf:"consumer_secret_setting_name,omitempty"` +} + +type LinuxWebAppSlotBackupInitParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An schedule block as defined below. + Schedule *LinuxWebAppSlotBackupScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type LinuxWebAppSlotBackupObservation struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // An schedule block as defined below. + Schedule *LinuxWebAppSlotBackupScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type LinuxWebAppSlotBackupParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // An schedule block as defined below. + // +kubebuilder:validation:Optional + Schedule *LinuxWebAppSlotBackupScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The SAS URL to the container. + // The SAS URL to the container. + // +kubebuilder:validation:Required + StorageAccountURLSecretRef v1.SecretKeySelector `json:"storageAccountUrlSecretRef" tf:"-"` +} + +type LinuxWebAppSlotBackupScheduleInitParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type LinuxWebAppSlotBackupScheduleObservation struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // The time the backup was last attempted. + LastExecutionTime *string `json:"lastExecutionTime,omitempty" tf:"last_execution_time,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type LinuxWebAppSlotBackupScheduleParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + // +kubebuilder:validation:Optional + FrequencyInterval *float64 `json:"frequencyInterval" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + // +kubebuilder:validation:Optional + FrequencyUnit *string `json:"frequencyUnit" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + // +kubebuilder:validation:Optional + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type LinuxWebAppSlotConnectionStringInitParameters struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppSlotConnectionStringObservation struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppSlotConnectionStringParameters struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of database. Possible values include APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The connection string value. + // The connection string value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type LinuxWebAppSlotIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Web App Slot. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Web App Slot. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppSlotIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Web App Slot. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Web App Slot. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppSlotIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Linux Web App Slot. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Linux Web App Slot. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LinuxWebAppSlotInitParameters struct { + + // The ID of the Linux Web App this Deployment Slot will be part of. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.LinuxWebApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + AppServiceID *string `json:"appServiceId,omitempty" tf:"app_service_id,omitempty"` + + // Reference to a LinuxWebApp in web to populate appServiceId. + // +kubebuilder:validation:Optional + AppServiceIDRef *v1.Reference `json:"appServiceIdRef,omitempty" tf:"-"` + + // Selector for a LinuxWebApp in web to populate appServiceId. + // +kubebuilder:validation:Optional + AppServiceIDSelector *v1.Selector `json:"appServiceIdSelector,omitempty" tf:"-"` + + // A map of key-value pairs of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + AuthSettings *LinuxWebAppSlotAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *LinuxWebAppSlotAuthSettingsV2InitParameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *LinuxWebAppSlotBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []LinuxWebAppSlotConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the Linux Web App be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Linux Web App require HTTPS connections. Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + Identity *LinuxWebAppSlotIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // A logs block as defined below. + Logs *LinuxWebAppSlotLogsInitParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // The name which should be used for this Linux Web App Slot. Changing this forces a new Linux Web App Slot to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Linux Web App will be used. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + SiteConfig *LinuxWebAppSlotSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []LinuxWebAppSlotStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags that should be assigned to the Linux Web App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxWebAppSlotLogsInitParameters struct { + + // A application_logs block as defined above. + ApplicationLogs *LogsApplicationLogsInitParameters `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled? + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should the failed request tracing be enabled? + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + HTTPLogs *LogsHTTPLogsInitParameters `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type LinuxWebAppSlotLogsObservation struct { + + // A application_logs block as defined above. + ApplicationLogs *LogsApplicationLogsObservation `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled? + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should the failed request tracing be enabled? + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + HTTPLogs *LogsHTTPLogsObservation `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type LinuxWebAppSlotLogsParameters struct { + + // A application_logs block as defined above. + // +kubebuilder:validation:Optional + ApplicationLogs *LogsApplicationLogsParameters `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled? + // +kubebuilder:validation:Optional + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should the failed request tracing be enabled? + // +kubebuilder:validation:Optional + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + // +kubebuilder:validation:Optional + HTTPLogs *LogsHTTPLogsParameters `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type LinuxWebAppSlotObservation struct { + + // A app_metadata. + // +mapType=granular + AppMetadata map[string]*string `json:"appMetadata,omitempty" tf:"app_metadata,omitempty"` + + // The ID of the Linux Web App this Deployment Slot will be part of. + AppServiceID *string `json:"appServiceId,omitempty" tf:"app_service_id,omitempty"` + + // A map of key-value pairs of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + AuthSettings *LinuxWebAppSlotAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *LinuxWebAppSlotAuthSettingsV2Observation `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *LinuxWebAppSlotBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []LinuxWebAppSlotConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The default hostname of the Linux Web App. + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Should the Linux Web App be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Linux Web App require HTTPS connections. Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the App Service Environment used by App Service Slot. + HostingEnvironmentID *string `json:"hostingEnvironmentId,omitempty" tf:"hosting_environment_id,omitempty"` + + // The ID of the Linux Web App. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *LinuxWebAppSlotIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Kind value for this Linux Web App. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // A logs block as defined below. + Logs *LinuxWebAppSlotLogsObservation `json:"logs,omitempty" tf:"logs,omitempty"` + + // The name which should be used for this Linux Web App Slot. Changing this forces a new Linux Web App Slot to be created. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A list of outbound IP addresses - such as ["52.23.25.3", "52.143.43.12"] + OutboundIPAddressList []*string `json:"outboundIpAddressList,omitempty" tf:"outbound_ip_address_list,omitempty"` + + // A comma-separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12. + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A possible_outbound_ip_address_list. + PossibleOutboundIPAddressList []*string `json:"possibleOutboundIpAddressList,omitempty" tf:"possible_outbound_ip_address_list,omitempty"` + + // A comma-separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which are necessarily in use. Superset of outbound_ip_addresses. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Linux Web App will be used. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + SiteConfig *LinuxWebAppSlotSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []LinuxWebAppSlotStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags that should be assigned to the Linux Web App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxWebAppSlotParameters struct { + + // The ID of the Linux Web App this Deployment Slot will be part of. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.LinuxWebApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + AppServiceID *string `json:"appServiceId,omitempty" tf:"app_service_id,omitempty"` + + // Reference to a LinuxWebApp in web to populate appServiceId. + // +kubebuilder:validation:Optional + AppServiceIDRef *v1.Reference `json:"appServiceIdRef,omitempty" tf:"-"` + + // Selector for a LinuxWebApp in web to populate appServiceId. + // +kubebuilder:validation:Optional + AppServiceIDSelector *v1.Selector `json:"appServiceIdSelector,omitempty" tf:"-"` + + // A map of key-value pairs of App Settings. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + // +kubebuilder:validation:Optional + AuthSettings *LinuxWebAppSlotAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + // +kubebuilder:validation:Optional + AuthSettingsV2 *LinuxWebAppSlotAuthSettingsV2Parameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + // +kubebuilder:validation:Optional + Backup *LinuxWebAppSlotBackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + // +kubebuilder:validation:Optional + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + // +kubebuilder:validation:Optional + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + // +kubebuilder:validation:Optional + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + // +kubebuilder:validation:Optional + ConnectionString []LinuxWebAppSlotConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the Linux Web App be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + // +kubebuilder:validation:Optional + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Linux Web App require HTTPS connections. Defaults to false. + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *LinuxWebAppSlotIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity. + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // A logs block as defined below. + // +kubebuilder:validation:Optional + Logs *LinuxWebAppSlotLogsParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // The name which should be used for this Linux Web App Slot. Changing this forces a new Linux Web App Slot to be created. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Linux Web App will be used. + // +kubebuilder:validation:Optional + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + // +kubebuilder:validation:Optional + SiteConfig *LinuxWebAppSlotSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []LinuxWebAppSlotStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags that should be assigned to the Linux Web App. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Linux Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + // +kubebuilder:validation:Optional + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type LinuxWebAppSlotSiteConfigApplicationStackInitParameters struct { + DockerImage *string `json:"dockerImage,omitempty" tf:"docker_image,omitempty"` + + // The docker image, including tag, to be used. e.g. appsvc/staticsite:latest. + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + DockerImageTag *string `json:"dockerImageTag,omitempty" tf:"docker_image_tag,omitempty"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 5.0, 6.0, 7.0 and 8.0. + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Go to use. Possible values include 1.18, and 1.19. + GoVersion *string `json:"goVersion,omitempty" tf:"go_version,omitempty"` + + // The Java server type. Possible values include JAVA, TOMCAT, and JBOSSEAP. + JavaServer *string `json:"javaServer,omitempty" tf:"java_server,omitempty"` + + // The Version of the java_server to use. + JavaServerVersion *string `json:"javaServerVersion,omitempty" tf:"java_server_version,omitempty"` + + // The Version of Java to use. Possible values include 8, 11, and 17. + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values are 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This property conflicts with java_version. + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to run. Possible values are 7.4, 8.0, 8.1 and 8.2. + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The version of Python to run. Possible values include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Ruby to run. Possible values include 2.6 and 2.7. + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` +} + +type LinuxWebAppSlotSiteConfigApplicationStackObservation struct { + DockerImage *string `json:"dockerImage,omitempty" tf:"docker_image,omitempty"` + + // The docker image, including tag, to be used. e.g. appsvc/staticsite:latest. + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + DockerImageTag *string `json:"dockerImageTag,omitempty" tf:"docker_image_tag,omitempty"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 5.0, 6.0, 7.0 and 8.0. + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Go to use. Possible values include 1.18, and 1.19. + GoVersion *string `json:"goVersion,omitempty" tf:"go_version,omitempty"` + + // The Java server type. Possible values include JAVA, TOMCAT, and JBOSSEAP. + JavaServer *string `json:"javaServer,omitempty" tf:"java_server,omitempty"` + + // The Version of the java_server to use. + JavaServerVersion *string `json:"javaServerVersion,omitempty" tf:"java_server_version,omitempty"` + + // The Version of Java to use. Possible values include 8, 11, and 17. + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values are 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This property conflicts with java_version. + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to run. Possible values are 7.4, 8.0, 8.1 and 8.2. + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The version of Python to run. Possible values include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Ruby to run. Possible values include 2.6 and 2.7. + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` +} + +type LinuxWebAppSlotSiteConfigApplicationStackParameters struct { + + // +kubebuilder:validation:Optional + DockerImage *string `json:"dockerImage,omitempty" tf:"docker_image,omitempty"` + + // The docker image, including tag, to be used. e.g. appsvc/staticsite:latest. + // +kubebuilder:validation:Optional + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + // +kubebuilder:validation:Optional + DockerImageTag *string `json:"dockerImageTag,omitempty" tf:"docker_image_tag,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + // +kubebuilder:validation:Optional + DockerRegistryPasswordSecretRef *v1.SecretKeySelector `json:"dockerRegistryPasswordSecretRef,omitempty" tf:"-"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + // +kubebuilder:validation:Optional + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + // +kubebuilder:validation:Optional + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use. Possible values include 3.1, 5.0, 6.0, 7.0 and 8.0. + // +kubebuilder:validation:Optional + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Go to use. Possible values include 1.18, and 1.19. + // +kubebuilder:validation:Optional + GoVersion *string `json:"goVersion,omitempty" tf:"go_version,omitempty"` + + // The Java server type. Possible values include JAVA, TOMCAT, and JBOSSEAP. + // +kubebuilder:validation:Optional + JavaServer *string `json:"javaServer,omitempty" tf:"java_server,omitempty"` + + // The Version of the java_server to use. + // +kubebuilder:validation:Optional + JavaServerVersion *string `json:"javaServerVersion,omitempty" tf:"java_server_version,omitempty"` + + // The Version of Java to use. Possible values include 8, 11, and 17. + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values are 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This property conflicts with java_version. + // +kubebuilder:validation:Optional + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to run. Possible values are 7.4, 8.0, 8.1 and 8.2. + // +kubebuilder:validation:Optional + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The version of Python to run. Possible values include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + // +kubebuilder:validation:Optional + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Ruby to run. Possible values include 2.6 and 2.7. + // +kubebuilder:validation:Optional + RubyVersion *string `json:"rubyVersion,omitempty" tf:"ruby_version,omitempty"` +} + +type LinuxWebAppSlotSiteConfigCorsInitParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxWebAppSlotSiteConfigCorsObservation struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxWebAppSlotSiteConfigCorsParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxWebAppSlotSiteConfigIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxWebAppSlotSiteConfigIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxWebAppSlotSiteConfigIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxWebAppSlotSiteConfigIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type LinuxWebAppSlotSiteConfigIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []LinuxWebAppSlotSiteConfigIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxWebAppSlotSiteConfigInitParameters struct { + + // The URL to the API Definition for this Linux Web App Slot. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Linux Web App Slot is associated with. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to true. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + ApplicationStack *LinuxWebAppSlotSiteConfigApplicationStackInitParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled? Required with auto_heal_setting. + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + AutoHealSetting *SiteConfigAutoHealSettingInitParameters `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Linux Web App Slot Name to automatically swap to when deployment to that slot is successfully completed. + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *LinuxWebAppSlotSiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The State of FTP / FTPS service. Possible values include AllAllowed, FtpsOnly, and Disabled. Defaults to Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []LinuxWebAppSlotSiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled? Defaults to false. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017 and VS2019 + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Linux Web App ip_restriction configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker? Defaults to true. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled? Defaults to false. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux App Service Slot. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxWebAppSlotSiteConfigObservation struct { + + // The URL to the API Definition for this Linux Web App Slot. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Linux Web App Slot is associated with. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to true. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + ApplicationStack *LinuxWebAppSlotSiteConfigApplicationStackObservation `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled? Required with auto_heal_setting. + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + AutoHealSetting *SiteConfigAutoHealSettingObservation `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Linux Web App Slot Name to automatically swap to when deployment to that slot is successfully completed. + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *LinuxWebAppSlotSiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // Should the Linux Web App be enabled? Defaults to true. + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty" tf:"detailed_error_logging_enabled,omitempty"` + + // The State of FTP / FTPS service. Possible values include AllAllowed, FtpsOnly, and Disabled. Defaults to Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []LinuxWebAppSlotSiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled? Defaults to false. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017 and VS2019 + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []LinuxWebAppSlotSiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // Should the Linux Web App ip_restriction configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker? Defaults to true. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled? Defaults to false. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux App Service Slot. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxWebAppSlotSiteConfigParameters struct { + + // The URL to the API Definition for this Linux Web App Slot. + // +kubebuilder:validation:Optional + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Linux Web App Slot is associated with. + // +kubebuilder:validation:Optional + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Linux Web App is Always On enabled. Defaults to true. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // +kubebuilder:validation:Optional + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + // +kubebuilder:validation:Optional + ApplicationStack *LinuxWebAppSlotSiteConfigApplicationStackParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled? Required with auto_heal_setting. + // +kubebuilder:validation:Optional + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + // +kubebuilder:validation:Optional + AutoHealSetting *SiteConfigAutoHealSettingParameters `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Linux Web App Slot Name to automatically swap to when deployment to that slot is successfully completed. + // +kubebuilder:validation:Optional + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // +kubebuilder:validation:Optional + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // +kubebuilder:validation:Optional + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + // +kubebuilder:validation:Optional + Cors *LinuxWebAppSlotSiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Linux Web App. + // +kubebuilder:validation:Optional + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The State of FTP / FTPS service. Possible values include AllAllowed, FtpsOnly, and Disabled. Defaults to Disabled. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + // +kubebuilder:validation:Optional + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + IPRestriction []LinuxWebAppSlotSiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // +kubebuilder:validation:Optional + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + // +kubebuilder:validation:Optional + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // +kubebuilder:validation:Optional + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled? Defaults to false. + // +kubebuilder:validation:Optional + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017 and VS2019 + // +kubebuilder:validation:Optional + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + ScmIPRestriction []LinuxWebAppSlotSiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // +kubebuilder:validation:Optional + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Linux Web App ip_restriction configuration be used for the SCM also. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Linux Web App use a 32-bit worker? Defaults to true. + // +kubebuilder:validation:Optional + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled? Defaults to false. + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Linux App Service Slot. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type LinuxWebAppSlotSiteConfigScmIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxWebAppSlotSiteConfigScmIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type LinuxWebAppSlotSiteConfigScmIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []LinuxWebAppSlotSiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type LinuxWebAppSlotSiteCredentialInitParameters struct { +} + +type LinuxWebAppSlotSiteCredentialObservation struct { + + // The Site Credentials Username used for publishing. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Site Credentials Password used for publishing. + Password *string `json:"password,omitempty" tf:"password,omitempty"` +} + +type LinuxWebAppSlotSiteCredentialParameters struct { +} + +type LinuxWebAppSlotStorageAccountInitParameters struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppSlotStorageAccountObservation struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type LinuxWebAppSlotStorageAccountParameters struct { + + // The Access key for the storage account. + // +kubebuilder:validation:Required + AccessKeySecretRef v1.SecretKeySelector `json:"accessKeySecretRef" tf:"-"` + + // The Name of the Storage Account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type LogsApplicationLogsInitParameters struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *ApplicationLogsAzureBlobStorageInitParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include Off, Verbose, Information, Warning, and Error. + FileSystemLevel *string `json:"fileSystemLevel,omitempty" tf:"file_system_level,omitempty"` +} + +type LogsApplicationLogsObservation struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *ApplicationLogsAzureBlobStorageObservation `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include Off, Verbose, Information, Warning, and Error. + FileSystemLevel *string `json:"fileSystemLevel,omitempty" tf:"file_system_level,omitempty"` +} + +type LogsApplicationLogsParameters struct { + + // A azure_blob_storage_http block as defined above. + // +kubebuilder:validation:Optional + AzureBlobStorage *ApplicationLogsAzureBlobStorageParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include Off, Verbose, Information, Warning, and Error. + // +kubebuilder:validation:Optional + FileSystemLevel *string `json:"fileSystemLevel" tf:"file_system_level,omitempty"` +} + +type LogsHTTPLogsAzureBlobStorageInitParameters struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type LogsHTTPLogsAzureBlobStorageObservation struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type LogsHTTPLogsAzureBlobStorageParameters struct { + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + // +kubebuilder:validation:Required + SASURLSecretRef v1.SecretKeySelector `json:"sasurlSecretRef" tf:"-"` +} + +type LogsHTTPLogsInitParameters struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *LogsHTTPLogsAzureBlobStorageInitParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + FileSystem *HTTPLogsFileSystemInitParameters `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type LogsHTTPLogsObservation struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *LogsHTTPLogsAzureBlobStorageObservation `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + FileSystem *HTTPLogsFileSystemObservation `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type LogsHTTPLogsParameters struct { + + // A azure_blob_storage_http block as defined above. + // +kubebuilder:validation:Optional + AzureBlobStorage *LogsHTTPLogsAzureBlobStorageParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + // +kubebuilder:validation:Optional + FileSystem *HTTPLogsFileSystemParameters `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type SiteConfigAutoHealSettingInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + Action *AutoHealSettingActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *AutoHealSettingTriggerInitParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type SiteConfigAutoHealSettingObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + Action *AutoHealSettingActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *AutoHealSettingTriggerObservation `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type SiteConfigAutoHealSettingParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *AutoHealSettingActionParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + // +kubebuilder:validation:Optional + Trigger *AutoHealSettingTriggerParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type TriggerRequestsInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type TriggerRequestsObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type TriggerRequestsParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` +} + +type TriggerSlowRequestInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + TimeTaken *string `json:"timeTaken,omitempty" tf:"time_taken,omitempty"` +} + +type TriggerSlowRequestObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + TimeTaken *string `json:"timeTaken,omitempty" tf:"time_taken,omitempty"` +} + +type TriggerSlowRequestParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + // +kubebuilder:validation:Optional + TimeTaken *string `json:"timeTaken" tf:"time_taken,omitempty"` +} + +type TriggerStatusCodeInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + StatusCodeRange *string `json:"statusCodeRange,omitempty" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type TriggerStatusCodeObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + StatusCodeRange *string `json:"statusCodeRange,omitempty" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type TriggerStatusCodeParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + // +kubebuilder:validation:Optional + StatusCodeRange *string `json:"statusCodeRange" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + // +kubebuilder:validation:Optional + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + // +kubebuilder:validation:Optional + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +// LinuxWebAppSlotSpec defines the desired state of LinuxWebAppSlot +type LinuxWebAppSlotSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LinuxWebAppSlotParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LinuxWebAppSlotInitParameters `json:"initProvider,omitempty"` +} + +// LinuxWebAppSlotStatus defines the observed state of LinuxWebAppSlot. +type LinuxWebAppSlotStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LinuxWebAppSlotObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LinuxWebAppSlot is the Schema for the LinuxWebAppSlots API. Manages a Linux Web App Slot. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type LinuxWebAppSlot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.name) || (has(self.initProvider) && has(self.initProvider.name))",message="spec.forProvider.name is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.siteConfig) || (has(self.initProvider) && has(self.initProvider.siteConfig))",message="spec.forProvider.siteConfig is a required parameter" + Spec LinuxWebAppSlotSpec `json:"spec"` + Status LinuxWebAppSlotStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LinuxWebAppSlotList contains a list of LinuxWebAppSlots +type LinuxWebAppSlotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LinuxWebAppSlot `json:"items"` +} + +// Repository type metadata. +var ( + LinuxWebAppSlot_Kind = "LinuxWebAppSlot" + LinuxWebAppSlot_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LinuxWebAppSlot_Kind}.String() + LinuxWebAppSlot_KindAPIVersion = LinuxWebAppSlot_Kind + "." + CRDGroupVersion.String() + LinuxWebAppSlot_GroupVersionKind = CRDGroupVersion.WithKind(LinuxWebAppSlot_Kind) +) + +func init() { + SchemeBuilder.Register(&LinuxWebAppSlot{}, &LinuxWebAppSlotList{}) +} diff --git a/apis/web/v1beta2/zz_staticsite_terraformed.go b/apis/web/v1beta2/zz_staticsite_terraformed.go new file mode 100755 index 000000000..3df84d54c --- /dev/null +++ b/apis/web/v1beta2/zz_staticsite_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this StaticSite +func (mg *StaticSite) GetTerraformResourceType() string { + return "azurerm_static_site" +} + +// GetConnectionDetailsMapping for this StaticSite +func (tr *StaticSite) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"api_key": "status.atProvider.apiKey"} +} + +// GetObservation of this StaticSite +func (tr *StaticSite) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this StaticSite +func (tr *StaticSite) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this StaticSite +func (tr *StaticSite) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this StaticSite +func (tr *StaticSite) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this StaticSite +func (tr *StaticSite) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this StaticSite +func (tr *StaticSite) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this StaticSite +func (tr *StaticSite) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this StaticSite using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *StaticSite) LateInitialize(attrs []byte) (bool, error) { + params := &StaticSiteParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *StaticSite) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/web/v1beta2/zz_staticsite_types.go b/apis/web/v1beta2/zz_staticsite_types.go new file mode 100755 index 000000000..93941c6a0 --- /dev/null +++ b/apis/web/v1beta2/zz_staticsite_types.go @@ -0,0 +1,210 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type StaticSiteIdentityInitParameters struct { + + // A list of Managed Identity IDs which should be assigned to this Static Site resource. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity assigned to this Static Site resource. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StaticSiteIdentityObservation struct { + + // A list of Managed Identity IDs which should be assigned to this Static Site resource. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The ID of the Static Web App. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // The Type of Managed Identity assigned to this Static Site resource. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type StaticSiteIdentityParameters struct { + + // A list of Managed Identity IDs which should be assigned to this Static Site resource. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Type of Managed Identity assigned to this Static Site resource. Possible values are SystemAssigned, UserAssigned and SystemAssigned, UserAssigned. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type StaticSiteInitParameters struct { + + // A key-value pair of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An identity block as defined below. + Identity *StaticSiteIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Static Web App should exist. Changing this forces a new Static Web App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Specifies the SKU size of the Static Web App. Possible values are Free or Standard. Defaults to Free. + SkuSize *string `json:"skuSize,omitempty" tf:"sku_size,omitempty"` + + // Specifies the SKU tier of the Static Web App. Possible values are Free or Standard. Defaults to Free. + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StaticSiteObservation struct { + + // A key-value pair of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // The default host name of the Static Web App. + DefaultHostName *string `json:"defaultHostName,omitempty" tf:"default_host_name,omitempty"` + + // The ID of the Static Web App. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *StaticSiteIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Static Web App should exist. Changing this forces a new Static Web App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Static Web App should exist. Changing this forces a new Static Web App to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Specifies the SKU size of the Static Web App. Possible values are Free or Standard. Defaults to Free. + SkuSize *string `json:"skuSize,omitempty" tf:"sku_size,omitempty"` + + // Specifies the SKU tier of the Static Web App. Possible values are Free or Standard. Defaults to Free. + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +type StaticSiteParameters struct { + + // A key-value pair of App Settings. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *StaticSiteIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The Azure Region where the Static Web App should exist. Changing this forces a new Static Web App to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The name of the Resource Group where the Static Web App should exist. Changing this forces a new Static Web App to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // Specifies the SKU size of the Static Web App. Possible values are Free or Standard. Defaults to Free. + // +kubebuilder:validation:Optional + SkuSize *string `json:"skuSize,omitempty" tf:"sku_size,omitempty"` + + // Specifies the SKU tier of the Static Web App. Possible values are Free or Standard. Defaults to Free. + // +kubebuilder:validation:Optional + SkuTier *string `json:"skuTier,omitempty" tf:"sku_tier,omitempty"` + + // A mapping of tags to assign to the resource. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` +} + +// StaticSiteSpec defines the desired state of StaticSite +type StaticSiteSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider StaticSiteParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider StaticSiteInitParameters `json:"initProvider,omitempty"` +} + +// StaticSiteStatus defines the observed state of StaticSite. +type StaticSiteStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider StaticSiteObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StaticSite is the Schema for the StaticSites API. Manages a Static Site. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type StaticSite struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + Spec StaticSiteSpec `json:"spec"` + Status StaticSiteStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StaticSiteList contains a list of StaticSites +type StaticSiteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StaticSite `json:"items"` +} + +// Repository type metadata. +var ( + StaticSite_Kind = "StaticSite" + StaticSite_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: StaticSite_Kind}.String() + StaticSite_KindAPIVersion = StaticSite_Kind + "." + CRDGroupVersion.String() + StaticSite_GroupVersionKind = CRDGroupVersion.WithKind(StaticSite_Kind) +) + +func init() { + SchemeBuilder.Register(&StaticSite{}, &StaticSiteList{}) +} diff --git a/apis/web/v1beta2/zz_windowsfunctionapp_terraformed.go b/apis/web/v1beta2/zz_windowsfunctionapp_terraformed.go new file mode 100755 index 000000000..b7f52c9a1 --- /dev/null +++ b/apis/web/v1beta2/zz_windowsfunctionapp_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WindowsFunctionApp +func (mg *WindowsFunctionApp) GetTerraformResourceType() string { + return "azurerm_windows_function_app" +} + +// GetConnectionDetailsMapping for this WindowsFunctionApp +func (tr *WindowsFunctionApp) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].github[*].client_secret": "spec.forProvider.authSettings[*].github[*].clientSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "backup[*].storage_account_url": "spec.forProvider.backup[*].storageAccountUrlSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "site_config[*].application_insights_connection_string": "spec.forProvider.siteConfig[*].applicationInsightsConnectionStringSecretRef", "site_config[*].application_insights_key": "spec.forProvider.siteConfig[*].applicationInsightsKeySecretRef", "site_credential[*]": "status.atProvider.siteCredential[*]", "storage_account[*].access_key": "spec.forProvider.storageAccount[*].accessKeySecretRef", "storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef"} +} + +// GetObservation of this WindowsFunctionApp +func (tr *WindowsFunctionApp) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WindowsFunctionApp +func (tr *WindowsFunctionApp) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WindowsFunctionApp +func (tr *WindowsFunctionApp) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WindowsFunctionApp +func (tr *WindowsFunctionApp) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WindowsFunctionApp +func (tr *WindowsFunctionApp) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WindowsFunctionApp +func (tr *WindowsFunctionApp) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WindowsFunctionApp +func (tr *WindowsFunctionApp) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WindowsFunctionApp using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WindowsFunctionApp) LateInitialize(attrs []byte) (bool, error) { + params := &WindowsFunctionAppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WindowsFunctionApp) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_windowsfunctionapp_types.go b/apis/web/v1beta2/zz_windowsfunctionapp_types.go new file mode 100755 index 000000000..e5bfa1e7b --- /dev/null +++ b/apis/web/v1beta2/zz_windowsfunctionapp_types.go @@ -0,0 +1,3286 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsActiveDirectoryObservation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsActiveDirectoryParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + // The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + // +kubebuilder:validation:Optional + AppSecretSecretRef *v1.SecretKeySelector `json:"appSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsGithubInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsGithubObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsGithubParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsInitParameters struct { + + // An active_directory block as defined above. + ActiveDirectory *WindowsFunctionAppAuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Function App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Windows Function App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *WindowsFunctionAppAuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *WindowsFunctionAppAuthSettingsGithubInitParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *WindowsFunctionAppAuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Function App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *WindowsFunctionAppAuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The Runtime Version of the Authentication / Authorization feature in use for the Windows Function App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Function App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *WindowsFunctionAppAuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsFunctionAppAuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsObservation struct { + + // An active_directory block as defined above. + ActiveDirectory *WindowsFunctionAppAuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Function App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Windows Function App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *WindowsFunctionAppAuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *WindowsFunctionAppAuthSettingsGithubObservation `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *WindowsFunctionAppAuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Function App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *WindowsFunctionAppAuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The Runtime Version of the Authentication / Authorization feature in use for the Windows Function App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Function App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *WindowsFunctionAppAuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsFunctionAppAuthSettingsParameters struct { + + // An active_directory block as defined above. + // +kubebuilder:validation:Optional + ActiveDirectory *WindowsFunctionAppAuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Function App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Windows Function App? + // Should the Authentication / Authorization feature be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A facebook block as defined below. + // +kubebuilder:validation:Optional + Facebook *WindowsFunctionAppAuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + // +kubebuilder:validation:Optional + Github *WindowsFunctionAppAuthSettingsGithubParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + // +kubebuilder:validation:Optional + Google *WindowsFunctionAppAuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Function App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + // +kubebuilder:validation:Optional + Microsoft *WindowsFunctionAppAuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The Runtime Version of the Authentication / Authorization feature in use for the Windows Function App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Function App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + // +kubebuilder:validation:Optional + Twitter *WindowsFunctionAppAuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsFunctionAppAuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + // +kubebuilder:validation:Optional + ConsumerSecretSecretRef *v1.SecretKeySelector `json:"consumerSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + // +kubebuilder:validation:Optional + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + // +kubebuilder:validation:Optional + TenantAuthEndpoint *string `json:"tenantAuthEndpoint" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + // +kubebuilder:validation:Optional + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2AppleV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2AppleV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2AppleV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation struct { + + // The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + // The endpoint to make the Authorisation Request. + AuthorisationEndpoint *string `json:"authorisationEndpoint,omitempty" tf:"authorisation_endpoint,omitempty"` + + // The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + // The endpoint that provides the keys necessary to validate the token. + CertificationURI *string `json:"certificationUri,omitempty" tf:"certification_uri,omitempty"` + + // The Client Credential Method used. + // The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + ClientCredentialMethod *string `json:"clientCredentialMethod,omitempty" tf:"client_credential_method,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the secret for this Custom OIDC Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + // The endpoint that issued the Token. + IssuerEndpoint *string `json:"issuerEndpoint,omitempty" tf:"issuer_endpoint,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + // The endpoint used to request a Token. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + // +kubebuilder:validation:Optional + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + // +kubebuilder:validation:Optional + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2FacebookV2Observation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2FacebookV2Parameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + // +kubebuilder:validation:Optional + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2GithubV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2GithubV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2GithubV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2GoogleV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2GoogleV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2InitParameters struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2InitParameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *WindowsFunctionAppAuthSettingsV2AppleV2InitParameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2InitParameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []WindowsFunctionAppAuthSettingsV2CustomOidcV2InitParameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *WindowsFunctionAppAuthSettingsV2FacebookV2InitParameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *WindowsFunctionAppAuthSettingsV2GithubV2InitParameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *WindowsFunctionAppAuthSettingsV2GoogleV2InitParameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *WindowsFunctionAppAuthSettingsV2LoginInitParameters `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2LoginInitParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2LoginObservation struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2LoginParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + // +kubebuilder:validation:Optional + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + // +kubebuilder:validation:Optional + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + // +kubebuilder:validation:Optional + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + // +kubebuilder:validation:Optional + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + // +kubebuilder:validation:Optional + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // +kubebuilder:validation:Optional + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + // +kubebuilder:validation:Optional + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2MicrosoftV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2Observation struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Observation `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *WindowsFunctionAppAuthSettingsV2AppleV2Observation `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Observation `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []WindowsFunctionAppAuthSettingsV2CustomOidcV2Observation `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *WindowsFunctionAppAuthSettingsV2FacebookV2Observation `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *WindowsFunctionAppAuthSettingsV2GithubV2Observation `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *WindowsFunctionAppAuthSettingsV2GoogleV2Observation `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *WindowsFunctionAppAuthSettingsV2LoginObservation `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *WindowsFunctionAppAuthSettingsV2MicrosoftV2Observation `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *WindowsFunctionAppAuthSettingsV2TwitterV2Observation `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2Parameters struct { + + // An active_directory_v2 block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectoryV2 *WindowsFunctionAppAuthSettingsV2ActiveDirectoryV2Parameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + // +kubebuilder:validation:Optional + AppleV2 *WindowsFunctionAppAuthSettingsV2AppleV2Parameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + // +kubebuilder:validation:Optional + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + // +kubebuilder:validation:Optional + AzureStaticWebAppV2 *WindowsFunctionAppAuthSettingsV2AzureStaticWebAppV2Parameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + // +kubebuilder:validation:Optional + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + // +kubebuilder:validation:Optional + CustomOidcV2 []WindowsFunctionAppAuthSettingsV2CustomOidcV2Parameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + // +kubebuilder:validation:Optional + FacebookV2 *WindowsFunctionAppAuthSettingsV2FacebookV2Parameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + // +kubebuilder:validation:Optional + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + // +kubebuilder:validation:Optional + GithubV2 *WindowsFunctionAppAuthSettingsV2GithubV2Parameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + // +kubebuilder:validation:Optional + GoogleV2 *WindowsFunctionAppAuthSettingsV2GoogleV2Parameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + // +kubebuilder:validation:Optional + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + // +kubebuilder:validation:Optional + Login *WindowsFunctionAppAuthSettingsV2LoginParameters `json:"login" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + // +kubebuilder:validation:Optional + MicrosoftV2 *WindowsFunctionAppAuthSettingsV2MicrosoftV2Parameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + // +kubebuilder:validation:Optional + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + // +kubebuilder:validation:Optional + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + // +kubebuilder:validation:Optional + TwitterV2 *WindowsFunctionAppAuthSettingsV2TwitterV2Parameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2TwitterV2InitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2TwitterV2Observation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppAuthSettingsV2TwitterV2Parameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppBackupInitParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *WindowsFunctionAppBackupScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type WindowsFunctionAppBackupObservation struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *WindowsFunctionAppBackupScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type WindowsFunctionAppBackupParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A schedule block as defined below. + // +kubebuilder:validation:Optional + Schedule *WindowsFunctionAppBackupScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The SAS URL to the container. + // The SAS URL to the container. + // +kubebuilder:validation:Required + StorageAccountURLSecretRef v1.SecretKeySelector `json:"storageAccountUrlSecretRef" tf:"-"` +} + +type WindowsFunctionAppBackupScheduleInitParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsFunctionAppBackupScheduleObservation struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // The time the backup was last attempted. + LastExecutionTime *string `json:"lastExecutionTime,omitempty" tf:"last_execution_time,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsFunctionAppBackupScheduleParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + // +kubebuilder:validation:Optional + FrequencyInterval *float64 `json:"frequencyInterval" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + // +kubebuilder:validation:Optional + FrequencyUnit *string `json:"frequencyUnit" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + // +kubebuilder:validation:Optional + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsFunctionAppConnectionStringInitParameters struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppConnectionStringObservation struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppConnectionStringParameters struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The connection string value. + // The connection string value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type WindowsFunctionAppIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Function App. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Function App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Function App. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Function App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Function App. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Function App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WindowsFunctionAppInitParameters struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + AuthSettings *WindowsFunctionAppAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // A auth_settings_v2 block as defined below. + AuthSettingsV2 *WindowsFunctionAppAuthSettingsV2InitParameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *WindowsFunctionAppBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the function app use Client Certificates. + // Should the function app use Client Certificates + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []WindowsFunctionAppConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should Content Share Settings be disabled. Defaults to false. + // Force disable the content share settings. + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Is the Function App enabled? Defaults to true. + // Is the Windows Function App enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~4. + // The runtime version associated with the Function App. + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App only be accessed via HTTPS?. Defaults to false. + // Can the Function App only be accessed via HTTPS? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // A identity block as defined below. + Identity *WindowsFunctionAppIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Azure Region where the Windows Function App should exist. Changing this forces a new Windows Function App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the App Service Plan within which to create this Function App. + // The ID of the App Service Plan within which to create this Function App + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.ServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // Reference to a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDRef *v1.Reference `json:"servicePlanIdRef,omitempty" tf:"-"` + + // Selector for a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDSelector *v1.Selector `json:"servicePlanIdSelector,omitempty" tf:"-"` + + // A site_config block as defined below. + SiteConfig *WindowsFunctionAppSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + StickySettings *WindowsFunctionAppStickySettingsInitParameters `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []WindowsFunctionAppStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The backend storage account name which will be used by this Function App. + // The backend storage account name which will be used by this Function App. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + // Should the Function App use its Managed Identity to access storage? + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Windows Function App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Function App. + // The local path and filename of the Zip packaged application to deploy to this Windows Function App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` to be set on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsFunctionAppObservation struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + AuthSettings *WindowsFunctionAppAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // A auth_settings_v2 block as defined below. + AuthSettingsV2 *WindowsFunctionAppAuthSettingsV2Observation `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *WindowsFunctionAppBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the function app use Client Certificates. + // Should the function app use Client Certificates + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []WindowsFunctionAppConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should Content Share Settings be disabled. Defaults to false. + // Force disable the content share settings. + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // The default hostname of the Windows Function App. + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Is the Function App enabled? Defaults to true. + // Is the Windows Function App enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~4. + // The runtime version associated with the Function App. + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App only be accessed via HTTPS?. Defaults to false. + // Can the Function App only be accessed via HTTPS? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the App Service Environment used by Function App. + HostingEnvironmentID *string `json:"hostingEnvironmentId,omitempty" tf:"hosting_environment_id,omitempty"` + + // The ID of the Windows Function App. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // A identity block as defined below. + Identity *WindowsFunctionAppIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Kind value for this Windows Function App. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The Azure Region where the Windows Function App should exist. Changing this forces a new Windows Function App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A list of outbound IP addresses. For example ["52.23.25.3", "52.143.43.12"] + OutboundIPAddressList []*string `json:"outboundIpAddressList,omitempty" tf:"outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12. + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A list of possible outbound IP addresses, not all of which are necessarily in use. This is a superset of outbound_ip_address_list. For example ["52.23.25.3", "52.143.43.12"]. + PossibleOutboundIPAddressList []*string `json:"possibleOutboundIpAddressList,omitempty" tf:"possible_outbound_ip_address_list,omitempty"` + + // A comma separated list of possible outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12,52.143.43.17. This is a superset of outbound_ip_addresses. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Windows Function App should exist. Changing this forces a new Windows Function App to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The ID of the App Service Plan within which to create this Function App. + // The ID of the App Service Plan within which to create this Function App + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + SiteConfig *WindowsFunctionAppSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + StickySettings *WindowsFunctionAppStickySettingsObservation `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []WindowsFunctionAppStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The backend storage account name which will be used by this Function App. + // The backend storage account name which will be used by this Function App. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + // Should the Function App use its Managed Identity to access storage? + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Windows Function App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Function App. + // The local path and filename of the Zip packaged application to deploy to this Windows Function App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` to be set on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsFunctionAppParameters struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // A auth_settings block as defined below. + // +kubebuilder:validation:Optional + AuthSettings *WindowsFunctionAppAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // A auth_settings_v2 block as defined below. + // +kubebuilder:validation:Optional + AuthSettingsV2 *WindowsFunctionAppAuthSettingsV2Parameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + // +kubebuilder:validation:Optional + Backup *WindowsFunctionAppBackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + // +kubebuilder:validation:Optional + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the function app use Client Certificates. + // Should the function app use Client Certificates + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + // +kubebuilder:validation:Optional + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + // +kubebuilder:validation:Optional + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + // +kubebuilder:validation:Optional + ConnectionString []WindowsFunctionAppConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should Content Share Settings be disabled. Defaults to false. + // Force disable the content share settings. + // +kubebuilder:validation:Optional + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + // +kubebuilder:validation:Optional + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Is the Function App enabled? Defaults to true. + // Is the Windows Function App enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + // +kubebuilder:validation:Optional + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The runtime version associated with the Function App. Defaults to ~4. + // The runtime version associated with the Function App. + // +kubebuilder:validation:Optional + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App only be accessed via HTTPS?. Defaults to false. + // Can the Function App only be accessed via HTTPS? + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // A identity block as defined below. + // +kubebuilder:validation:Optional + Identity *WindowsFunctionAppIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Azure Region where the Windows Function App should exist. Changing this forces a new Windows Function App to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Windows Function App should exist. Changing this forces a new Windows Function App to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the App Service Plan within which to create this Function App. + // The ID of the App Service Plan within which to create this Function App + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.ServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // Reference to a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDRef *v1.Reference `json:"servicePlanIdRef,omitempty" tf:"-"` + + // Selector for a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDSelector *v1.Selector `json:"servicePlanIdSelector,omitempty" tf:"-"` + + // A site_config block as defined below. + // +kubebuilder:validation:Optional + SiteConfig *WindowsFunctionAppSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + // +kubebuilder:validation:Optional + StickySettings *WindowsFunctionAppStickySettingsParameters `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []WindowsFunctionAppStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The access key which will be used to access the backend storage account for the Function App. Conflicts with storage_uses_managed_identity. + // The access key which will be used to access the storage account for the Function App. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // The backend storage account name which will be used by this Function App. + // The backend storage account name which will be used by this Function App. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + // +kubebuilder:validation:Optional + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + // Should the Function App use its Managed Identity to access storage? + // +kubebuilder:validation:Optional + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Windows Function App. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Function App. + // The local path and filename of the Zip packaged application to deploy to this Windows Function App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` to be set on the App in `app_settings`. + // +kubebuilder:validation:Optional + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsFunctionAppSiteConfigAppServiceLogsInitParameters struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type WindowsFunctionAppSiteConfigAppServiceLogsObservation struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type WindowsFunctionAppSiteConfigAppServiceLogsParameters struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + // +kubebuilder:validation:Optional + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type WindowsFunctionAppSiteConfigApplicationStackInitParameters struct { + + // The version of .NET to use. Possible values include v3.0, v4.0 v6.0, v7.0 and v8.0. Defaults to v4.0. + // The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The Version of Java to use. Supported versions include 1.8, 11 & 17 (In-Preview). + // The version of Java to use. Possible values are `1.8`, `11` and `17` + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include ~12, ~14, ~16 and ~18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to run. Possible values are 7, and 7.2. + // The PowerShell Core version to use. Possible values are `7`, and `7.2` + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // Should the Windows Function App use a custom runtime? + // Does the Function App use a custom Application Stack? + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type WindowsFunctionAppSiteConfigApplicationStackObservation struct { + + // The version of .NET to use. Possible values include v3.0, v4.0 v6.0, v7.0 and v8.0. Defaults to v4.0. + // The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The Version of Java to use. Supported versions include 1.8, 11 & 17 (In-Preview). + // The version of Java to use. Possible values are `1.8`, `11` and `17` + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include ~12, ~14, ~16 and ~18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to run. Possible values are 7, and 7.2. + // The PowerShell Core version to use. Possible values are `7`, and `7.2` + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // Should the Windows Function App use a custom runtime? + // Does the Function App use a custom Application Stack? + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type WindowsFunctionAppSiteConfigApplicationStackParameters struct { + + // The version of .NET to use. Possible values include v3.0, v4.0 v6.0, v7.0 and v8.0. Defaults to v4.0. + // The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + // +kubebuilder:validation:Optional + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The Version of Java to use. Supported versions include 1.8, 11 & 17 (In-Preview). + // The version of Java to use. Possible values are `1.8`, `11` and `17` + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to run. Possible values include ~12, ~14, ~16 and ~18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + // +kubebuilder:validation:Optional + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PowerShell Core to run. Possible values are 7, and 7.2. + // The PowerShell Core version to use. Possible values are `7`, and `7.2` + // +kubebuilder:validation:Optional + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // Should the Windows Function App use a custom runtime? + // Does the Function App use a custom Application Stack? + // +kubebuilder:validation:Optional + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + // +kubebuilder:validation:Optional + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type WindowsFunctionAppSiteConfigCorsInitParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsFunctionAppSiteConfigCorsObservation struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsFunctionAppSiteConfigCorsParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsFunctionAppSiteConfigIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsFunctionAppSiteConfigIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsFunctionAppSiteConfigIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsFunctionAppSiteConfigIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type WindowsFunctionAppSiteConfigIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []WindowsFunctionAppSiteConfigIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsFunctionAppSiteConfigInitParameters struct { + + // The URL of the API definition that describes this Windows Function App. + // The URL of the API definition that describes this Windows Function App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Windows Function App. + // The ID of the API Management API for this Windows Function App. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Function App is Always On enabled. Defaults to false. + // If this Windows Web App is Always On enabled. Defaults to `false`. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // An app_service_logs block as defined above. + AppServiceLogs *WindowsFunctionAppSiteConfigAppServiceLogsInitParameters `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // An application_stack block as defined above. + ApplicationStack *WindowsFunctionAppSiteConfigApplicationStackInitParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // A cors block as defined above. + Cors *WindowsFunctionAppSiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Function App. + // Specifies a list of Default Documents for the Windows Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this Windows Function App. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this Windows Function App health. + // The path to be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []WindowsFunctionAppSiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // Configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this Windows Function App. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Windows Function App ip_restriction configuration be used for the SCM also. + // Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Function App use a 32-bit worker process. Defaults to true. + // Should the Windows Web App use a 32-bit worker. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Windows Function App. + // The number of Workers for this Windows Function App. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsFunctionAppSiteConfigObservation struct { + + // The URL of the API definition that describes this Windows Function App. + // The URL of the API definition that describes this Windows Function App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Windows Function App. + // The ID of the API Management API for this Windows Function App. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Function App is Always On enabled. Defaults to false. + // If this Windows Web App is Always On enabled. Defaults to `false`. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // An app_service_logs block as defined above. + AppServiceLogs *WindowsFunctionAppSiteConfigAppServiceLogsObservation `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // An application_stack block as defined above. + ApplicationStack *WindowsFunctionAppSiteConfigApplicationStackObservation `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // A cors block as defined above. + Cors *WindowsFunctionAppSiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Function App. + // Specifies a list of Default Documents for the Windows Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // Is the Function App enabled? Defaults to true. + // Is detailed error logging enabled + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty" tf:"detailed_error_logging_enabled,omitempty"` + + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this Windows Function App. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this Windows Function App health. + // The path to be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []WindowsFunctionAppSiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // Configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this Windows Function App. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []WindowsFunctionAppSiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // The SCM Type in use by the Windows Function App. + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // Should the Windows Function App ip_restriction configuration be used for the SCM also. + // Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Function App use a 32-bit worker process. Defaults to true. + // Should the Windows Web App use a 32-bit worker. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The Windows FX Version string. + WindowsFxVersion *string `json:"windowsFxVersion,omitempty" tf:"windows_fx_version,omitempty"` + + // The number of Workers for this Windows Function App. + // The number of Workers for this Windows Function App. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsFunctionAppSiteConfigParameters struct { + + // The URL of the API definition that describes this Windows Function App. + // The URL of the API definition that describes this Windows Function App. + // +kubebuilder:validation:Optional + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Windows Function App. + // The ID of the API Management API for this Windows Function App. + // +kubebuilder:validation:Optional + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Function App is Always On enabled. Defaults to false. + // If this Windows Web App is Always On enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + // +kubebuilder:validation:Optional + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // +kubebuilder:validation:Optional + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // An app_service_logs block as defined above. + // +kubebuilder:validation:Optional + AppServiceLogs *WindowsFunctionAppSiteConfigAppServiceLogsParameters `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // The Connection String for linking the Windows Function App to Application Insights. + // The Connection String for linking the Windows Function App to Application Insights. + // +kubebuilder:validation:Optional + ApplicationInsightsConnectionStringSecretRef *v1.SecretKeySelector `json:"applicationInsightsConnectionStringSecretRef,omitempty" tf:"-"` + + // The Instrumentation Key for connecting the Windows Function App to Application Insights. + // The Instrumentation Key for connecting the Windows Function App to Application Insights. + // +kubebuilder:validation:Optional + ApplicationInsightsKeySecretRef *v1.SecretKeySelector `json:"applicationInsightsKeySecretRef,omitempty" tf:"-"` + + // An application_stack block as defined above. + // +kubebuilder:validation:Optional + ApplicationStack *WindowsFunctionAppSiteConfigApplicationStackParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // A cors block as defined above. + // +kubebuilder:validation:Optional + Cors *WindowsFunctionAppSiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Function App. + // Specifies a list of Default Documents for the Windows Web App. + // +kubebuilder:validation:Optional + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + // +kubebuilder:validation:Optional + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this Windows Function App. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + // +kubebuilder:validation:Optional + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this Windows Function App health. + // The path to be checked for this function app health. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + IPRestriction []WindowsFunctionAppSiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + // +kubebuilder:validation:Optional + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + // +kubebuilder:validation:Optional + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // Configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this Windows Function App. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // +kubebuilder:validation:Optional + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + // +kubebuilder:validation:Optional + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + // +kubebuilder:validation:Optional + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + ScmIPRestriction []WindowsFunctionAppSiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + // +kubebuilder:validation:Optional + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Windows Function App ip_restriction configuration be used for the SCM also. + // Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Function App use a 32-bit worker process. Defaults to true. + // Should the Windows Web App use a 32-bit worker. + // +kubebuilder:validation:Optional + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Windows Function App. + // The number of Workers for this Windows Function App. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsFunctionAppSiteConfigScmIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsFunctionAppSiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsFunctionAppSiteConfigScmIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsFunctionAppSiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type WindowsFunctionAppSiteConfigScmIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []WindowsFunctionAppSiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsFunctionAppSiteCredentialInitParameters struct { +} + +type WindowsFunctionAppSiteCredentialObservation struct { + + // The Site Credentials Username used for publishing. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Site Credentials Password used for publishing. + Password *string `json:"password,omitempty" tf:"password,omitempty"` +} + +type WindowsFunctionAppSiteCredentialParameters struct { +} + +type WindowsFunctionAppStickySettingsInitParameters struct { + + // A list of app_setting names that the Windows Function App will not swap between Slots when a swap operation is triggered. + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Windows Function App will not swap between Slots when a swap operation is triggered. + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type WindowsFunctionAppStickySettingsObservation struct { + + // A list of app_setting names that the Windows Function App will not swap between Slots when a swap operation is triggered. + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Windows Function App will not swap between Slots when a swap operation is triggered. + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type WindowsFunctionAppStickySettingsParameters struct { + + // A list of app_setting names that the Windows Function App will not swap between Slots when a swap operation is triggered. + // +kubebuilder:validation:Optional + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Windows Function App will not swap between Slots when a swap operation is triggered. + // +kubebuilder:validation:Optional + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type WindowsFunctionAppStorageAccountInitParameters struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppStorageAccountObservation struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppStorageAccountParameters struct { + + // The Access key for the storage account. + // +kubebuilder:validation:Required + AccessKeySecretRef v1.SecretKeySelector `json:"accessKeySecretRef" tf:"-"` + + // The Name of the Storage Account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// WindowsFunctionAppSpec defines the desired state of WindowsFunctionApp +type WindowsFunctionAppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WindowsFunctionAppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WindowsFunctionAppInitParameters `json:"initProvider,omitempty"` +} + +// WindowsFunctionAppStatus defines the observed state of WindowsFunctionApp. +type WindowsFunctionAppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WindowsFunctionAppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WindowsFunctionApp is the Schema for the WindowsFunctionApps API. Manages a Windows Function App. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WindowsFunctionApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.siteConfig) || (has(self.initProvider) && has(self.initProvider.siteConfig))",message="spec.forProvider.siteConfig is a required parameter" + Spec WindowsFunctionAppSpec `json:"spec"` + Status WindowsFunctionAppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WindowsFunctionAppList contains a list of WindowsFunctionApps +type WindowsFunctionAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WindowsFunctionApp `json:"items"` +} + +// Repository type metadata. +var ( + WindowsFunctionApp_Kind = "WindowsFunctionApp" + WindowsFunctionApp_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WindowsFunctionApp_Kind}.String() + WindowsFunctionApp_KindAPIVersion = WindowsFunctionApp_Kind + "." + CRDGroupVersion.String() + WindowsFunctionApp_GroupVersionKind = CRDGroupVersion.WithKind(WindowsFunctionApp_Kind) +) + +func init() { + SchemeBuilder.Register(&WindowsFunctionApp{}, &WindowsFunctionAppList{}) +} diff --git a/apis/web/v1beta2/zz_windowsfunctionappslot_terraformed.go b/apis/web/v1beta2/zz_windowsfunctionappslot_terraformed.go new file mode 100755 index 000000000..692a2f6a1 --- /dev/null +++ b/apis/web/v1beta2/zz_windowsfunctionappslot_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WindowsFunctionAppSlot +func (mg *WindowsFunctionAppSlot) GetTerraformResourceType() string { + return "azurerm_windows_function_app_slot" +} + +// GetConnectionDetailsMapping for this WindowsFunctionAppSlot +func (tr *WindowsFunctionAppSlot) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].github[*].client_secret": "spec.forProvider.authSettings[*].github[*].clientSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "backup[*].storage_account_url": "spec.forProvider.backup[*].storageAccountUrlSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "site_config[*].application_insights_connection_string": "spec.forProvider.siteConfig[*].applicationInsightsConnectionStringSecretRef", "site_config[*].application_insights_key": "spec.forProvider.siteConfig[*].applicationInsightsKeySecretRef", "site_credential[*]": "status.atProvider.siteCredential[*]", "storage_account[*].access_key": "spec.forProvider.storageAccount[*].accessKeySecretRef", "storage_account_access_key": "spec.forProvider.storageAccountAccessKeySecretRef"} +} + +// GetObservation of this WindowsFunctionAppSlot +func (tr *WindowsFunctionAppSlot) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WindowsFunctionAppSlot +func (tr *WindowsFunctionAppSlot) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WindowsFunctionAppSlot +func (tr *WindowsFunctionAppSlot) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WindowsFunctionAppSlot +func (tr *WindowsFunctionAppSlot) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WindowsFunctionAppSlot +func (tr *WindowsFunctionAppSlot) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WindowsFunctionAppSlot +func (tr *WindowsFunctionAppSlot) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WindowsFunctionAppSlot +func (tr *WindowsFunctionAppSlot) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WindowsFunctionAppSlot using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WindowsFunctionAppSlot) LateInitialize(attrs []byte) (bool, error) { + params := &WindowsFunctionAppSlotParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WindowsFunctionAppSlot) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_windowsfunctionappslot_types.go b/apis/web/v1beta2/zz_windowsfunctionappslot_types.go new file mode 100755 index 000000000..7afea809e --- /dev/null +++ b/apis/web/v1beta2/zz_windowsfunctionappslot_types.go @@ -0,0 +1,3222 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + // The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + // +kubebuilder:validation:Optional + AppSecretSecretRef *v1.SecretKeySelector `json:"appSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsGithubInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsGithubObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsGithubParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsInitParameters struct { + + // an active_directory block as detailed below. + ActiveDirectory *WindowsFunctionAppSlotAuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // a facebook block as detailed below. + Facebook *WindowsFunctionAppSlotAuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // a github block as detailed below. + Github *WindowsFunctionAppSlotAuthSettingsGithubInitParameters `json:"github,omitempty" tf:"github,omitempty"` + + // a google block as detailed below. + Google *WindowsFunctionAppSlotAuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // a microsoft block as detailed below. + Microsoft *WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // a twitter block as detailed below. + Twitter *WindowsFunctionAppSlotAuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsObservation struct { + + // an active_directory block as detailed below. + ActiveDirectory *WindowsFunctionAppSlotAuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // a facebook block as detailed below. + Facebook *WindowsFunctionAppSlotAuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // a github block as detailed below. + Github *WindowsFunctionAppSlotAuthSettingsGithubObservation `json:"github,omitempty" tf:"github,omitempty"` + + // a google block as detailed below. + Google *WindowsFunctionAppSlotAuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // a microsoft block as detailed below. + Microsoft *WindowsFunctionAppSlotAuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // a twitter block as detailed below. + Twitter *WindowsFunctionAppSlotAuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsParameters struct { + + // an active_directory block as detailed below. + // +kubebuilder:validation:Optional + ActiveDirectory *WindowsFunctionAppSlotAuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled? + // Should the Authentication / Authorization feature be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // a facebook block as detailed below. + // +kubebuilder:validation:Optional + Facebook *WindowsFunctionAppSlotAuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // a github block as detailed below. + // +kubebuilder:validation:Optional + Github *WindowsFunctionAppSlotAuthSettingsGithubParameters `json:"github,omitempty" tf:"github,omitempty"` + + // a google block as detailed below. + // +kubebuilder:validation:Optional + Google *WindowsFunctionAppSlotAuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // a microsoft block as detailed below. + // +kubebuilder:validation:Optional + Microsoft *WindowsFunctionAppSlotAuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use. + // The RuntimeVersion of the Authentication / Authorization feature in use. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // a twitter block as detailed below. + // +kubebuilder:validation:Optional + Twitter *WindowsFunctionAppSlotAuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + // +kubebuilder:validation:Optional + ConsumerSecretSecretRef *v1.SecretKeySelector `json:"consumerSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + // +kubebuilder:validation:Optional + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + // +kubebuilder:validation:Optional + TenantAuthEndpoint *string `json:"tenantAuthEndpoint" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + // +kubebuilder:validation:Optional + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation struct { + + // The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + // The endpoint to make the Authorisation Request. + AuthorisationEndpoint *string `json:"authorisationEndpoint,omitempty" tf:"authorisation_endpoint,omitempty"` + + // The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + // The endpoint that provides the keys necessary to validate the token. + CertificationURI *string `json:"certificationUri,omitempty" tf:"certification_uri,omitempty"` + + // The Client Credential Method used. + // The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + ClientCredentialMethod *string `json:"clientCredentialMethod,omitempty" tf:"client_credential_method,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the secret for this Custom OIDC Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + // The endpoint that issued the Token. + IssuerEndpoint *string `json:"issuerEndpoint,omitempty" tf:"issuer_endpoint,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + // The endpoint used to request a Token. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + // +kubebuilder:validation:Optional + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + // +kubebuilder:validation:Optional + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + // +kubebuilder:validation:Optional + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2InitParameters struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *WindowsFunctionAppSlotAuthSettingsV2AppleV2InitParameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2InitParameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *WindowsFunctionAppSlotAuthSettingsV2FacebookV2InitParameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *WindowsFunctionAppSlotAuthSettingsV2GithubV2InitParameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *WindowsFunctionAppSlotAuthSettingsV2GoogleV2InitParameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2LoginInitParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2LoginObservation struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2LoginParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + // +kubebuilder:validation:Optional + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + // +kubebuilder:validation:Optional + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + // +kubebuilder:validation:Optional + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + // +kubebuilder:validation:Optional + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + // +kubebuilder:validation:Optional + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // +kubebuilder:validation:Optional + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + // +kubebuilder:validation:Optional + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2Observation struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Observation `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *WindowsFunctionAppSlotAuthSettingsV2AppleV2Observation `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Observation `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Observation `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Observation `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *WindowsFunctionAppSlotAuthSettingsV2GithubV2Observation `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Observation `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *WindowsFunctionAppSlotAuthSettingsV2LoginObservation `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Observation `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2Parameters struct { + + // An active_directory_v2 block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectoryV2 *WindowsFunctionAppSlotAuthSettingsV2ActiveDirectoryV2Parameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + // +kubebuilder:validation:Optional + AppleV2 *WindowsFunctionAppSlotAuthSettingsV2AppleV2Parameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + // +kubebuilder:validation:Optional + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + // +kubebuilder:validation:Optional + AzureStaticWebAppV2 *WindowsFunctionAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + // +kubebuilder:validation:Optional + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + // +kubebuilder:validation:Optional + CustomOidcV2 []WindowsFunctionAppSlotAuthSettingsV2CustomOidcV2Parameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + // +kubebuilder:validation:Optional + FacebookV2 *WindowsFunctionAppSlotAuthSettingsV2FacebookV2Parameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + // +kubebuilder:validation:Optional + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + // +kubebuilder:validation:Optional + GithubV2 *WindowsFunctionAppSlotAuthSettingsV2GithubV2Parameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + // +kubebuilder:validation:Optional + GoogleV2 *WindowsFunctionAppSlotAuthSettingsV2GoogleV2Parameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + // +kubebuilder:validation:Optional + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + // +kubebuilder:validation:Optional + Login *WindowsFunctionAppSlotAuthSettingsV2LoginParameters `json:"login" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + // +kubebuilder:validation:Optional + MicrosoftV2 *WindowsFunctionAppSlotAuthSettingsV2MicrosoftV2Parameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + // +kubebuilder:validation:Optional + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + // +kubebuilder:validation:Optional + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + // +kubebuilder:validation:Optional + TwitterV2 *WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2TwitterV2InitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2TwitterV2Observation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotAuthSettingsV2TwitterV2Parameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsFunctionAppSlotBackupInitParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // a schedule block as detailed below. + Schedule *WindowsFunctionAppSlotBackupScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type WindowsFunctionAppSlotBackupObservation struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // a schedule block as detailed below. + Schedule *WindowsFunctionAppSlotBackupScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type WindowsFunctionAppSlotBackupParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // a schedule block as detailed below. + // +kubebuilder:validation:Optional + Schedule *WindowsFunctionAppSlotBackupScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The SAS URL to the container. + // The SAS URL to the container. + // +kubebuilder:validation:Required + StorageAccountURLSecretRef v1.SecretKeySelector `json:"storageAccountUrlSecretRef" tf:"-"` +} + +type WindowsFunctionAppSlotBackupScheduleInitParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsFunctionAppSlotBackupScheduleObservation struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // The time the backup was last attempted. + // The time the backup was last attempted. + LastExecutionTime *string `json:"lastExecutionTime,omitempty" tf:"last_execution_time,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsFunctionAppSlotBackupScheduleParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + // +kubebuilder:validation:Optional + FrequencyInterval *float64 `json:"frequencyInterval" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day and Hour. + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + // +kubebuilder:validation:Optional + FrequencyUnit *string `json:"frequencyUnit" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + // +kubebuilder:validation:Optional + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsFunctionAppSlotConnectionStringInitParameters struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppSlotConnectionStringObservation struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppSlotConnectionStringParameters struct { + + // The name which should be used for this Connection. + // The name which should be used for this Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The connection string value. + // The connection string value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type WindowsFunctionAppSlotIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Function App Slot. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Function App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppSlotIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Function App Slot. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Function App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppSlotIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Function App Slot. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Function App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WindowsFunctionAppSlotInitParameters struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // an auth_settings block as detailed below. + AuthSettings *WindowsFunctionAppSlotAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // an auth_settings_v2 block as detailed below. + AuthSettingsV2 *WindowsFunctionAppSlotAuthSettingsV2InitParameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // a backup block as detailed below. + Backup *WindowsFunctionAppSlotBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built-in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the Function App Slot use Client Certificates. + // Should the Function App Slot use Client Certificates. + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // a connection_string block as detailed below. + ConnectionString []WindowsFunctionAppSlotConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Force disable the content share settings. + // Force disable the content share settings. + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Is the Windows Function App Slot enabled. Defaults to true. + // Is the Windows Function App Slot enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The runtime version associated with the Function App Slot. Defaults to ~4. + // The runtime version associated with the Function App Slot. + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + // Can the Function App Slot only be accessed via HTTPS? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // an identity block as detailed below. + Identity *WindowsFunctionAppSlotIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Windows Function App will be used. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // a site_config block as detailed below. + SiteConfig *WindowsFunctionAppSlotSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []WindowsFunctionAppSlotStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The backend storage account name which will be used by this Function App Slot. + // The backend storage account name which will be used by this Function App Slot. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App Slot. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App Slot use its Managed Identity to access storage. + // Should the Function App Slot use its Managed Identity to access storage? + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Windows Function App Slot. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` +} + +type WindowsFunctionAppSlotObservation struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // an auth_settings block as detailed below. + AuthSettings *WindowsFunctionAppSlotAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // an auth_settings_v2 block as detailed below. + AuthSettingsV2 *WindowsFunctionAppSlotAuthSettingsV2Observation `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // a backup block as detailed below. + Backup *WindowsFunctionAppSlotBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built-in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the Function App Slot use Client Certificates. + // Should the Function App Slot use Client Certificates. + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // a connection_string block as detailed below. + ConnectionString []WindowsFunctionAppSlotConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Force disable the content share settings. + // Force disable the content share settings. + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // The default hostname of the Windows Function App Slot. + // The default hostname of the Windows Function App Slot. + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Is the Windows Function App Slot enabled. Defaults to true. + // Is the Windows Function App Slot enabled. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The name of the Windows Function App this Slot is a member of. Changing this forces a new resource to be created. + // The ID of the Windows Function App this Slot is a member of. + FunctionAppID *string `json:"functionAppId,omitempty" tf:"function_app_id,omitempty"` + + // The runtime version associated with the Function App Slot. Defaults to ~4. + // The runtime version associated with the Function App Slot. + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + // Can the Function App Slot only be accessed via HTTPS? + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the App Service Environment used by Function App Slot. + HostingEnvironmentID *string `json:"hostingEnvironmentId,omitempty" tf:"hosting_environment_id,omitempty"` + + // The ID of the Windows Function App Slot + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // an identity block as detailed below. + Identity *WindowsFunctionAppSlotIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Kind value for this Windows Function App Slot. + // The Kind value for this Windows Function App Slot. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // A list of outbound IP addresses. For example ["52.23.25.3", "52.143.43.12"]. + // A list of outbound IP addresses. For example `["52.23.25.3", "52.143.43.12"]`. + OutboundIPAddressList []*string `json:"outboundIpAddressList,omitempty" tf:"outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12. + // A comma separated list of outbound IP addresses as a string. For example `52.23.25.3,52.143.43.12`. + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A list of possible outbound IP addresses, not all of which are necessarily in use. This is a superset of outbound_ip_address_list. For example ["52.23.25.3", "52.143.43.12"]. + // A list of possible outbound IP addresses, not all of which are necessarily in use. This is a superset of `outbound_ip_address_list`. For example `["52.23.25.3", "52.143.43.12"]`. + PossibleOutboundIPAddressList []*string `json:"possibleOutboundIpAddressList,omitempty" tf:"possible_outbound_ip_address_list,omitempty"` + + // A comma separated list of possible outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12,52.143.43.17. This is a superset of outbound_ip_addresses. For example ["52.23.25.3", "52.143.43.12","52.143.43.17"]. + // A comma separated list of possible outbound IP addresses as a string. For example `52.23.25.3,52.143.43.12,52.143.43.17`. This is a superset of `outbound_ip_addresses`. For example `["52.23.25.3", "52.143.43.12","52.143.43.17"]`. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Windows Function App will be used. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // a site_config block as detailed below. + SiteConfig *WindowsFunctionAppSlotSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []WindowsFunctionAppSlotStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The backend storage account name which will be used by this Function App Slot. + // The backend storage account name which will be used by this Function App Slot. + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App Slot. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App Slot use its Managed Identity to access storage. + // Should the Function App Slot use its Managed Identity to access storage? + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Windows Function App Slot. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` +} + +type WindowsFunctionAppSlotParameters struct { + + // A map of key-value pairs for App Settings and custom values. + // A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // an auth_settings block as detailed below. + // +kubebuilder:validation:Optional + AuthSettings *WindowsFunctionAppSlotAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // an auth_settings_v2 block as detailed below. + // +kubebuilder:validation:Optional + AuthSettingsV2 *WindowsFunctionAppSlotAuthSettingsV2Parameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // a backup block as detailed below. + // +kubebuilder:validation:Optional + Backup *WindowsFunctionAppSlotBackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should built-in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + // Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + // +kubebuilder:validation:Optional + BuiltinLoggingEnabled *bool `json:"builtinLoggingEnabled,omitempty" tf:"builtin_logging_enabled,omitempty"` + + // Should the Function App Slot use Client Certificates. + // Should the Function App Slot use Client Certificates. + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + // +kubebuilder:validation:Optional + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + // The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + // +kubebuilder:validation:Optional + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // a connection_string block as detailed below. + // +kubebuilder:validation:Optional + ConnectionString []WindowsFunctionAppSlotConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Force disable the content share settings. + // Force disable the content share settings. + // +kubebuilder:validation:Optional + ContentShareForceDisabled *bool `json:"contentShareForceDisabled,omitempty" tf:"content_share_force_disabled,omitempty"` + + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + // The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + // +kubebuilder:validation:Optional + DailyMemoryTimeQuota *float64 `json:"dailyMemoryTimeQuota,omitempty" tf:"daily_memory_time_quota,omitempty"` + + // Is the Windows Function App Slot enabled. Defaults to true. + // Is the Windows Function App Slot enabled. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + // +kubebuilder:validation:Optional + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // The name of the Windows Function App this Slot is a member of. Changing this forces a new resource to be created. + // The ID of the Windows Function App this Slot is a member of. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsFunctionApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + FunctionAppID *string `json:"functionAppId,omitempty" tf:"function_app_id,omitempty"` + + // Reference to a WindowsFunctionApp in web to populate functionAppId. + // +kubebuilder:validation:Optional + FunctionAppIDRef *v1.Reference `json:"functionAppIdRef,omitempty" tf:"-"` + + // Selector for a WindowsFunctionApp in web to populate functionAppId. + // +kubebuilder:validation:Optional + FunctionAppIDSelector *v1.Selector `json:"functionAppIdSelector,omitempty" tf:"-"` + + // The runtime version associated with the Function App Slot. Defaults to ~4. + // The runtime version associated with the Function App Slot. + // +kubebuilder:validation:Optional + FunctionsExtensionVersion *string `json:"functionsExtensionVersion,omitempty" tf:"functions_extension_version,omitempty"` + + // Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + // Can the Function App Slot only be accessed via HTTPS? + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // an identity block as detailed below. + // +kubebuilder:validation:Optional + Identity *WindowsFunctionAppSlotIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // The User Assigned Identity to use for Key Vault access. + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // Should public network access be enabled for the Function App. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Windows Function App will be used. + // +kubebuilder:validation:Optional + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // a site_config block as detailed below. + // +kubebuilder:validation:Optional + SiteConfig *WindowsFunctionAppSlotSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []WindowsFunctionAppSlotStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // The access key which will be used to access the storage account for the Function App Slot. + // The access key which will be used to access the storage account for the Function App Slot. + // +kubebuilder:validation:Optional + StorageAccountAccessKeySecretRef *v1.SecretKeySelector `json:"storageAccountAccessKeySecretRef,omitempty" tf:"-"` + + // The backend storage account name which will be used by this Function App Slot. + // The backend storage account name which will be used by this Function App Slot. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/storage/v1beta2.Account + // +kubebuilder:validation:Optional + StorageAccountName *string `json:"storageAccountName,omitempty" tf:"storage_account_name,omitempty"` + + // Reference to a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameRef *v1.Reference `json:"storageAccountNameRef,omitempty" tf:"-"` + + // Selector for a Account in storage to populate storageAccountName. + // +kubebuilder:validation:Optional + StorageAccountNameSelector *v1.Selector `json:"storageAccountNameSelector,omitempty" tf:"-"` + + // The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App Slot. + // The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + // +kubebuilder:validation:Optional + StorageKeyVaultSecretID *string `json:"storageKeyVaultSecretId,omitempty" tf:"storage_key_vault_secret_id,omitempty"` + + // Should the Function App Slot use its Managed Identity to access storage. + // Should the Function App Slot use its Managed Identity to access storage? + // +kubebuilder:validation:Optional + StorageUsesManagedIdentity *bool `json:"storageUsesManagedIdentity,omitempty" tf:"storage_uses_managed_identity,omitempty"` + + // A mapping of tags which should be assigned to the Windows Function App Slot. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters struct { + + // The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + // The amount of disk space to use for logs. Valid values are between `25` and `100`. + // +kubebuilder:validation:Optional + DiskQuotaMb *float64 `json:"diskQuotaMb,omitempty" tf:"disk_quota_mb,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters struct { + + // The version of .Net. Possible values are v3.0, v4.0, v6.0, v7.0 and v8.0. Defaults to v4.0. + // The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Java to use. Possible values are 1.8, 11 and 17 (In-Preview). + // The version of Java to use. Possible values are `1.8`, `11` and `17` + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to use. Possible values are ~12, ~14, ~16 and ~18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The PowerShell Core version to use. Possible values are 7, and 7.2. + // The PowerShell Core version to use. Possible values are `7`, and `7.2` + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // Does the Function App use a custom Application Stack? + // Does the Function App use a custom Application Stack? + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigApplicationStackObservation struct { + + // The version of .Net. Possible values are v3.0, v4.0, v6.0, v7.0 and v8.0. Defaults to v4.0. + // The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Java to use. Possible values are 1.8, 11 and 17 (In-Preview). + // The version of Java to use. Possible values are `1.8`, `11` and `17` + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to use. Possible values are ~12, ~14, ~16 and ~18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The PowerShell Core version to use. Possible values are 7, and 7.2. + // The PowerShell Core version to use. Possible values are `7`, and `7.2` + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // Does the Function App use a custom Application Stack? + // Does the Function App use a custom Application Stack? + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigApplicationStackParameters struct { + + // The version of .Net. Possible values are v3.0, v4.0, v6.0, v7.0 and v8.0. Defaults to v4.0. + // The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + // +kubebuilder:validation:Optional + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // The version of Java to use. Possible values are 1.8, 11 and 17 (In-Preview). + // The version of Java to use. Possible values are `1.8`, `11` and `17` + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of Node to use. Possible values are ~12, ~14, ~16 and ~18. + // The version of Node to use. Possible values include `12`, `14`, `16` and `18` + // +kubebuilder:validation:Optional + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The PowerShell Core version to use. Possible values are 7, and 7.2. + // The PowerShell Core version to use. Possible values are `7`, and `7.2` + // +kubebuilder:validation:Optional + PowershellCoreVersion *string `json:"powershellCoreVersion,omitempty" tf:"powershell_core_version,omitempty"` + + // Does the Function App use a custom Application Stack? + // Does the Function App use a custom Application Stack? + // +kubebuilder:validation:Optional + UseCustomRuntime *bool `json:"useCustomRuntime,omitempty" tf:"use_custom_runtime,omitempty"` + + // Should the DotNet process use an isolated runtime. Defaults to false. + // Should the DotNet process use an isolated runtime. Defaults to `false`. + // +kubebuilder:validation:Optional + UseDotnetIsolatedRuntime *bool `json:"useDotnetIsolatedRuntime,omitempty" tf:"use_dotnet_isolated_runtime,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigCorsInitParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigCorsObservation struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigCorsParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Are credentials allowed in CORS requests? Defaults to false. + // Are credentials allowed in CORS requests? Defaults to `false`. + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + Headers []WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsFunctionAppSlotSiteConfigIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + Headers []WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + // +kubebuilder:validation:Optional + Headers []WindowsFunctionAppSlotSiteConfigIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsFunctionAppSlotSiteConfigInitParameters struct { + + // The URL of the API definition that describes this Windows Function App. + // The URL of the API definition that describes this Windows Function App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Windows Function App. + // The ID of the API Management API for this Windows Function App. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App is Always On enabled. Defaults to false. + // If this Windows Web App is Always On enabled. Defaults to `false`. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The program and any arguments used to launch this app via the command line. (Example node myapp.js). + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // an app_service_logs block as detailed below. + AppServiceLogs *WindowsFunctionAppSlotSiteConfigAppServiceLogsInitParameters `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // an application_stack block as detailed below. + ApplicationStack *WindowsFunctionAppSlotSiteConfigApplicationStackInitParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The name of the slot to automatically swap with when this slot is successfully deployed. + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // a cors block as detailed below. + Cors *WindowsFunctionAppSlotSiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App. + // Specifies a list of Default Documents for the Windows Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // an ip_restriction block as detailed below. + IPRestriction []WindowsFunctionAppSlotSiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // a scm_ip_restriction block as detailed below. + ScmIPRestriction []WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Windows Function App ip_restriction configuration be used for the SCM also. + // Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App use a 32-bit worker. Defaults to true. + // Should the Windows Web App use a 32-bit worker. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Windows Function App. + // The number of Workers for this Windows Function App. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigObservation struct { + + // The URL of the API definition that describes this Windows Function App. + // The URL of the API definition that describes this Windows Function App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Windows Function App. + // The ID of the API Management API for this Windows Function App. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App is Always On enabled. Defaults to false. + // If this Windows Web App is Always On enabled. Defaults to `false`. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The program and any arguments used to launch this app via the command line. (Example node myapp.js). + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // an app_service_logs block as detailed below. + AppServiceLogs *WindowsFunctionAppSlotSiteConfigAppServiceLogsObservation `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // an application_stack block as detailed below. + ApplicationStack *WindowsFunctionAppSlotSiteConfigApplicationStackObservation `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The name of the slot to automatically swap with when this slot is successfully deployed. + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // a cors block as detailed below. + Cors *WindowsFunctionAppSlotSiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App. + // Specifies a list of Default Documents for the Windows Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // Is detailed error logging enabled + // Is detailed error logging enabled + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty" tf:"detailed_error_logging_enabled,omitempty"` + + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // an ip_restriction block as detailed below. + IPRestriction []WindowsFunctionAppSlotSiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // a scm_ip_restriction block as detailed below. + ScmIPRestriction []WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // The SCM Type in use by the Windows Function App. + // The SCM Type in use by the Windows Function App. + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // Should the Windows Function App ip_restriction configuration be used for the SCM also. + // Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App use a 32-bit worker. Defaults to true. + // Should the Windows Web App use a 32-bit worker. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The Windows FX Version string. + // The Windows FX Version string. + WindowsFxVersion *string `json:"windowsFxVersion,omitempty" tf:"windows_fx_version,omitempty"` + + // The number of Workers for this Windows Function App. + // The number of Workers for this Windows Function App. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigParameters struct { + + // The URL of the API definition that describes this Windows Function App. + // The URL of the API definition that describes this Windows Function App. + // +kubebuilder:validation:Optional + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The ID of the API Management API for this Windows Function App. + // The ID of the API Management API for this Windows Function App. + // +kubebuilder:validation:Optional + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App is Always On enabled. Defaults to false. + // If this Windows Web App is Always On enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The program and any arguments used to launch this app via the command line. (Example node myapp.js). + // The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + // +kubebuilder:validation:Optional + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + // +kubebuilder:validation:Optional + AppScaleLimit *float64 `json:"appScaleLimit,omitempty" tf:"app_scale_limit,omitempty"` + + // an app_service_logs block as detailed below. + // +kubebuilder:validation:Optional + AppServiceLogs *WindowsFunctionAppSlotSiteConfigAppServiceLogsParameters `json:"appServiceLogs,omitempty" tf:"app_service_logs,omitempty"` + + // The Connection String for linking the Windows Function App to Application Insights. + // The Connection String for linking the Windows Function App to Application Insights. + // +kubebuilder:validation:Optional + ApplicationInsightsConnectionStringSecretRef *v1.SecretKeySelector `json:"applicationInsightsConnectionStringSecretRef,omitempty" tf:"-"` + + // The Instrumentation Key for connecting the Windows Function App to Application Insights. + // The Instrumentation Key for connecting the Windows Function App to Application Insights. + // +kubebuilder:validation:Optional + ApplicationInsightsKeySecretRef *v1.SecretKeySelector `json:"applicationInsightsKeySecretRef,omitempty" tf:"-"` + + // an application_stack block as detailed below. + // +kubebuilder:validation:Optional + ApplicationStack *WindowsFunctionAppSlotSiteConfigApplicationStackParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // The name of the slot to automatically swap with when this slot is successfully deployed. + // +kubebuilder:validation:Optional + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // a cors block as detailed below. + // +kubebuilder:validation:Optional + Cors *WindowsFunctionAppSlotSiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App. + // Specifies a list of Default Documents for the Windows Web App. + // +kubebuilder:validation:Optional + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + // The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + // +kubebuilder:validation:Optional + ElasticInstanceMinimum *float64 `json:"elasticInstanceMinimum,omitempty" tf:"elastic_instance_minimum,omitempty"` + + // State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + // State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + // +kubebuilder:validation:Optional + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to be checked for this function app health. + // The path to be checked for this function app health. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Specifies if the HTTP2 protocol should be enabled. Defaults to false. + // Specifies if the http2 protocol should be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // an ip_restriction block as detailed below. + // +kubebuilder:validation:Optional + IPRestriction []WindowsFunctionAppSlotSiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + // +kubebuilder:validation:Optional + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + // +kubebuilder:validation:Optional + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + // +kubebuilder:validation:Optional + PreWarmedInstanceCount *float64 `json:"preWarmedInstanceCount,omitempty" tf:"pre_warmed_instance_count,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // Should Remote Debugging be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + // The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + // +kubebuilder:validation:Optional + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // Should Scale Monitoring of the Functions Runtime be enabled? + // Should Functions Runtime Scale Monitoring be enabled. + // +kubebuilder:validation:Optional + RuntimeScaleMonitoringEnabled *bool `json:"runtimeScaleMonitoringEnabled,omitempty" tf:"runtime_scale_monitoring_enabled,omitempty"` + + // a scm_ip_restriction block as detailed below. + // +kubebuilder:validation:Optional + ScmIPRestriction []WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + // +kubebuilder:validation:Optional + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Windows Function App ip_restriction configuration be used for the SCM also. + // Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App use a 32-bit worker. Defaults to true. + // Should the Windows Web App use a 32-bit worker. + // +kubebuilder:validation:Optional + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // Should Web Sockets be enabled. Defaults to `false`. + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Windows Function App. + // The number of Workers for this Windows Function App. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsFunctionAppSlotSiteConfigScmIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + Headers []WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsFunctionAppSlotSiteConfigScmIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + Headers []WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type WindowsFunctionAppSlotSiteConfigScmIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // a headers block as detailed below. + // +kubebuilder:validation:Optional + Headers []WindowsFunctionAppSlotSiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Function App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsFunctionAppSlotSiteCredentialInitParameters struct { +} + +type WindowsFunctionAppSlotSiteCredentialObservation struct { + + // The Site Credentials Username used for publishing. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Site Credentials Password used for publishing. + Password *string `json:"password,omitempty" tf:"password,omitempty"` +} + +type WindowsFunctionAppSlotSiteCredentialParameters struct { +} + +type WindowsFunctionAppSlotStorageAccountInitParameters struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppSlotStorageAccountObservation struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsFunctionAppSlotStorageAccountParameters struct { + + // The Access key for the storage account. + // +kubebuilder:validation:Required + AccessKeySecretRef v1.SecretKeySelector `json:"accessKeySecretRef" tf:"-"` + + // The Name of the Storage Account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// WindowsFunctionAppSlotSpec defines the desired state of WindowsFunctionAppSlot +type WindowsFunctionAppSlotSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WindowsFunctionAppSlotParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WindowsFunctionAppSlotInitParameters `json:"initProvider,omitempty"` +} + +// WindowsFunctionAppSlotStatus defines the observed state of WindowsFunctionAppSlot. +type WindowsFunctionAppSlotStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WindowsFunctionAppSlotObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WindowsFunctionAppSlot is the Schema for the WindowsFunctionAppSlots API. Manages a Windows Function App Slot. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WindowsFunctionAppSlot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.siteConfig) || (has(self.initProvider) && has(self.initProvider.siteConfig))",message="spec.forProvider.siteConfig is a required parameter" + Spec WindowsFunctionAppSlotSpec `json:"spec"` + Status WindowsFunctionAppSlotStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WindowsFunctionAppSlotList contains a list of WindowsFunctionAppSlots +type WindowsFunctionAppSlotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WindowsFunctionAppSlot `json:"items"` +} + +// Repository type metadata. +var ( + WindowsFunctionAppSlot_Kind = "WindowsFunctionAppSlot" + WindowsFunctionAppSlot_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WindowsFunctionAppSlot_Kind}.String() + WindowsFunctionAppSlot_KindAPIVersion = WindowsFunctionAppSlot_Kind + "." + CRDGroupVersion.String() + WindowsFunctionAppSlot_GroupVersionKind = CRDGroupVersion.WithKind(WindowsFunctionAppSlot_Kind) +) + +func init() { + SchemeBuilder.Register(&WindowsFunctionAppSlot{}, &WindowsFunctionAppSlotList{}) +} diff --git a/apis/web/v1beta2/zz_windowswebapp_terraformed.go b/apis/web/v1beta2/zz_windowswebapp_terraformed.go new file mode 100755 index 000000000..7ed52e6f0 --- /dev/null +++ b/apis/web/v1beta2/zz_windowswebapp_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WindowsWebApp +func (mg *WindowsWebApp) GetTerraformResourceType() string { + return "azurerm_windows_web_app" +} + +// GetConnectionDetailsMapping for this WindowsWebApp +func (tr *WindowsWebApp) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].github[*].client_secret": "spec.forProvider.authSettings[*].github[*].clientSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "backup[*].storage_account_url": "spec.forProvider.backup[*].storageAccountUrlSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "logs[*].http_logs[*].azure_blob_storage[*].sas_url": "spec.forProvider.logs[*].httpLogs[*].azureBlobStorage[*].sasUrlSecretRef", "site_config[*].application_stack[*].docker_registry_password": "spec.forProvider.siteConfig[*].applicationStack[*].dockerRegistryPasswordSecretRef", "site_credential[*]": "status.atProvider.siteCredential[*]", "storage_account[*].access_key": "spec.forProvider.storageAccount[*].accessKeySecretRef"} +} + +// GetObservation of this WindowsWebApp +func (tr *WindowsWebApp) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WindowsWebApp +func (tr *WindowsWebApp) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WindowsWebApp +func (tr *WindowsWebApp) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WindowsWebApp +func (tr *WindowsWebApp) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WindowsWebApp +func (tr *WindowsWebApp) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WindowsWebApp +func (tr *WindowsWebApp) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WindowsWebApp +func (tr *WindowsWebApp) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WindowsWebApp using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WindowsWebApp) LateInitialize(attrs []byte) (bool, error) { + params := &WindowsWebAppParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WindowsWebApp) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_windowswebapp_types.go b/apis/web/v1beta2/zz_windowswebapp_types.go new file mode 100755 index 000000000..8dc154bf7 --- /dev/null +++ b/apis/web/v1beta2/zz_windowswebapp_types.go @@ -0,0 +1,3738 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoHealSettingTriggerRequestsInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type AutoHealSettingTriggerRequestsObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type AutoHealSettingTriggerRequestsParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` +} + +type AutoHealSettingTriggerSlowRequestInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + TimeTaken *string `json:"timeTaken,omitempty" tf:"time_taken,omitempty"` +} + +type AutoHealSettingTriggerSlowRequestObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + TimeTaken *string `json:"timeTaken,omitempty" tf:"time_taken,omitempty"` +} + +type AutoHealSettingTriggerSlowRequestParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + // +kubebuilder:validation:Optional + TimeTaken *string `json:"timeTaken" tf:"time_taken,omitempty"` +} + +type AutoHealSettingTriggerStatusCodeInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + StatusCodeRange *string `json:"statusCodeRange,omitempty" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type AutoHealSettingTriggerStatusCodeObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + StatusCodeRange *string `json:"statusCodeRange,omitempty" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type AutoHealSettingTriggerStatusCodeParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + // +kubebuilder:validation:Optional + StatusCodeRange *string `json:"statusCodeRange" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + // +kubebuilder:validation:Optional + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + // +kubebuilder:validation:Optional + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type CustomActionInitParameters struct { + + // The executable to run for the custom_action. + Executable *string `json:"executable,omitempty" tf:"executable,omitempty"` + + // The parameters to pass to the specified executable. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type CustomActionObservation struct { + + // The executable to run for the custom_action. + Executable *string `json:"executable,omitempty" tf:"executable,omitempty"` + + // The parameters to pass to the specified executable. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type CustomActionParameters struct { + + // The executable to run for the custom_action. + // +kubebuilder:validation:Optional + Executable *string `json:"executable" tf:"executable,omitempty"` + + // The parameters to pass to the specified executable. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type LogsApplicationLogsAzureBlobStorageInitParameters struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + SASURL *string `json:"sasUrl,omitempty" tf:"sas_url,omitempty"` +} + +type LogsApplicationLogsAzureBlobStorageObservation struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + SASURL *string `json:"sasUrl,omitempty" tf:"sas_url,omitempty"` +} + +type LogsApplicationLogsAzureBlobStorageParameters struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + // +kubebuilder:validation:Optional + Level *string `json:"level" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + // +kubebuilder:validation:Optional + SASURL *string `json:"sasUrl" tf:"sas_url,omitempty"` +} + +type LogsHTTPLogsFileSystemInitParameters struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + RetentionInMb *float64 `json:"retentionInMb,omitempty" tf:"retention_in_mb,omitempty"` +} + +type LogsHTTPLogsFileSystemObservation struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + RetentionInMb *float64 `json:"retentionInMb,omitempty" tf:"retention_in_mb,omitempty"` +} + +type LogsHTTPLogsFileSystemParameters struct { + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + // +kubebuilder:validation:Optional + RetentionInMb *float64 `json:"retentionInMb" tf:"retention_in_mb,omitempty"` +} + +type SiteConfigAutoHealSettingActionInitParameters struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle, LogEvent, and CustomAction. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // A custom_action block as defined below. + CustomAction *CustomActionInitParameters `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // The minimum amount of time in hh:mm:ss the Windows Web App must have been running before the defined action will be run in the event of a trigger. + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type SiteConfigAutoHealSettingActionObservation struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle, LogEvent, and CustomAction. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // A custom_action block as defined below. + CustomAction *CustomActionObservation `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // The minimum amount of time in hh:mm:ss the Windows Web App must have been running before the defined action will be run in the event of a trigger. + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type SiteConfigAutoHealSettingActionParameters struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values include: Recycle, LogEvent, and CustomAction. + // +kubebuilder:validation:Optional + ActionType *string `json:"actionType" tf:"action_type,omitempty"` + + // A custom_action block as defined below. + // +kubebuilder:validation:Optional + CustomAction *CustomActionParameters `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // The minimum amount of time in hh:mm:ss the Windows Web App must have been running before the defined action will be run in the event of a trigger. + // +kubebuilder:validation:Optional + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerInitParameters struct { + + // The amount of Private Memory to be consumed for this rule to trigger. Possible values are between 102400 and 13631488. + PrivateMemoryKb *float64 `json:"privateMemoryKb,omitempty" tf:"private_memory_kb,omitempty"` + + // A requests block as defined above. + Requests *AutoHealSettingTriggerRequestsInitParameters `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + SlowRequest *AutoHealSettingTriggerSlowRequestInitParameters `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + StatusCode []AutoHealSettingTriggerStatusCodeInitParameters `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerObservation struct { + + // The amount of Private Memory to be consumed for this rule to trigger. Possible values are between 102400 and 13631488. + PrivateMemoryKb *float64 `json:"privateMemoryKb,omitempty" tf:"private_memory_kb,omitempty"` + + // A requests block as defined above. + Requests *AutoHealSettingTriggerRequestsObservation `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + SlowRequest *AutoHealSettingTriggerSlowRequestObservation `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + StatusCode []AutoHealSettingTriggerStatusCodeObservation `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerParameters struct { + + // The amount of Private Memory to be consumed for this rule to trigger. Possible values are between 102400 and 13631488. + // +kubebuilder:validation:Optional + PrivateMemoryKb *float64 `json:"privateMemoryKb,omitempty" tf:"private_memory_kb,omitempty"` + + // A requests block as defined above. + // +kubebuilder:validation:Optional + Requests *AutoHealSettingTriggerRequestsParameters `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + // +kubebuilder:validation:Optional + SlowRequest *AutoHealSettingTriggerSlowRequestParameters `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + // +kubebuilder:validation:Optional + StatusCode []AutoHealSettingTriggerStatusCodeParameters `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type VirtualApplicationInitParameters struct { + + // The physical path for the Virtual Application. + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // Should pre-loading be enabled. + Preload *bool `json:"preload,omitempty" tf:"preload,omitempty"` + + // One or more virtual_directory blocks as defined below. + VirtualDirectory []VirtualDirectoryInitParameters `json:"virtualDirectory,omitempty" tf:"virtual_directory,omitempty"` + + // The Virtual Path for the Virtual Application. + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type VirtualApplicationObservation struct { + + // The physical path for the Virtual Application. + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // Should pre-loading be enabled. + Preload *bool `json:"preload,omitempty" tf:"preload,omitempty"` + + // One or more virtual_directory blocks as defined below. + VirtualDirectory []VirtualDirectoryObservation `json:"virtualDirectory,omitempty" tf:"virtual_directory,omitempty"` + + // The Virtual Path for the Virtual Application. + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type VirtualApplicationParameters struct { + + // The physical path for the Virtual Application. + // +kubebuilder:validation:Optional + PhysicalPath *string `json:"physicalPath" tf:"physical_path,omitempty"` + + // Should pre-loading be enabled. + // +kubebuilder:validation:Optional + Preload *bool `json:"preload" tf:"preload,omitempty"` + + // One or more virtual_directory blocks as defined below. + // +kubebuilder:validation:Optional + VirtualDirectory []VirtualDirectoryParameters `json:"virtualDirectory,omitempty" tf:"virtual_directory,omitempty"` + + // The Virtual Path for the Virtual Application. + // +kubebuilder:validation:Optional + VirtualPath *string `json:"virtualPath" tf:"virtual_path,omitempty"` +} + +type VirtualDirectoryInitParameters struct { + + // The physical path for the Virtual Application. + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // The Virtual Path for the Virtual Application. + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type VirtualDirectoryObservation struct { + + // The physical path for the Virtual Application. + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // The Virtual Path for the Virtual Application. + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type VirtualDirectoryParameters struct { + + // The physical path for the Virtual Application. + // +kubebuilder:validation:Optional + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // The Virtual Path for the Virtual Application. + // +kubebuilder:validation:Optional + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type WindowsWebAppAuthSettingsActiveDirectoryInitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsActiveDirectoryObservation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsActiveDirectoryParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + // The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + // +kubebuilder:validation:Optional + AppSecretSecretRef *v1.SecretKeySelector `json:"appSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsGithubInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsGithubObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsGithubParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsInitParameters struct { + + // An active_directory block as defined above. + ActiveDirectory *WindowsWebAppAuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature is enabled for the Windows Web App be enabled? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *WindowsWebAppAuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *WindowsWebAppAuthSettingsGithubInitParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *WindowsWebAppAuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *WindowsWebAppAuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *WindowsWebAppAuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsWebAppAuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsObservation struct { + + // An active_directory block as defined above. + ActiveDirectory *WindowsWebAppAuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature is enabled for the Windows Web App be enabled? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *WindowsWebAppAuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *WindowsWebAppAuthSettingsGithubObservation `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *WindowsWebAppAuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *WindowsWebAppAuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *WindowsWebAppAuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsWebAppAuthSettingsParameters struct { + + // An active_directory block as defined above. + // +kubebuilder:validation:Optional + ActiveDirectory *WindowsWebAppAuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature is enabled for the Windows Web App be enabled? + // Should the Authentication / Authorization feature be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A facebook block as defined below. + // +kubebuilder:validation:Optional + Facebook *WindowsWebAppAuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + // +kubebuilder:validation:Optional + Github *WindowsWebAppAuthSettingsGithubParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + // +kubebuilder:validation:Optional + Google *WindowsWebAppAuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + // +kubebuilder:validation:Optional + Microsoft *WindowsWebAppAuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App. + // The RuntimeVersion of the Authentication / Authorization feature in use. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + // +kubebuilder:validation:Optional + Twitter *WindowsWebAppAuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsWebAppAuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + // +kubebuilder:validation:Optional + ConsumerSecretSecretRef *v1.SecretKeySelector `json:"consumerSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + // +kubebuilder:validation:Optional + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + // +kubebuilder:validation:Optional + TenantAuthEndpoint *string `json:"tenantAuthEndpoint" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + // +kubebuilder:validation:Optional + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsWebAppAuthSettingsV2AppleV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsV2AppleV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2AppleV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` +} + +type WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The name which should be used for this TODO. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2CustomOidcV2Observation struct { + + // The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + // The endpoint to make the Authorisation Request. + AuthorisationEndpoint *string `json:"authorisationEndpoint,omitempty" tf:"authorisation_endpoint,omitempty"` + + // The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + // The endpoint that provides the keys necessary to validate the token. + CertificationURI *string `json:"certificationUri,omitempty" tf:"certification_uri,omitempty"` + + // The Client Credential Method used. + // The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + ClientCredentialMethod *string `json:"clientCredentialMethod,omitempty" tf:"client_credential_method,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the secret for this Custom OIDC Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + // The endpoint that issued the Token. + IssuerEndpoint *string `json:"issuerEndpoint,omitempty" tf:"issuer_endpoint,omitempty"` + + // The name which should be used for this TODO. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + // The endpoint used to request a Token. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` +} + +type WindowsWebAppAuthSettingsV2CustomOidcV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The name which should be used for this TODO. + // The name of the Custom OIDC Authentication Provider. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + // +kubebuilder:validation:Optional + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + // +kubebuilder:validation:Optional + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2FacebookV2InitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2FacebookV2Observation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2FacebookV2Parameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + // +kubebuilder:validation:Optional + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2GithubV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2GithubV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2GithubV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2GoogleV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2GoogleV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2GoogleV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2InitParameters struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *WindowsWebAppAuthSettingsV2ActiveDirectoryV2InitParameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *WindowsWebAppAuthSettingsV2AppleV2InitParameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2InitParameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []WindowsWebAppAuthSettingsV2CustomOidcV2InitParameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *WindowsWebAppAuthSettingsV2FacebookV2InitParameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *WindowsWebAppAuthSettingsV2GithubV2InitParameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *WindowsWebAppAuthSettingsV2GoogleV2InitParameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *WindowsWebAppAuthSettingsV2LoginInitParameters `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *WindowsWebAppAuthSettingsV2TwitterV2InitParameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsWebAppAuthSettingsV2LoginInitParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsWebAppAuthSettingsV2LoginObservation struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsWebAppAuthSettingsV2LoginParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + // +kubebuilder:validation:Optional + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + // +kubebuilder:validation:Optional + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + // +kubebuilder:validation:Optional + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + // +kubebuilder:validation:Optional + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + // +kubebuilder:validation:Optional + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // +kubebuilder:validation:Optional + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + // +kubebuilder:validation:Optional + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsWebAppAuthSettingsV2MicrosoftV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2MicrosoftV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2MicrosoftV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppAuthSettingsV2Observation struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Observation `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *WindowsWebAppAuthSettingsV2AppleV2Observation `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Observation `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []WindowsWebAppAuthSettingsV2CustomOidcV2Observation `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *WindowsWebAppAuthSettingsV2FacebookV2Observation `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *WindowsWebAppAuthSettingsV2GithubV2Observation `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *WindowsWebAppAuthSettingsV2GoogleV2Observation `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *WindowsWebAppAuthSettingsV2LoginObservation `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *WindowsWebAppAuthSettingsV2MicrosoftV2Observation `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *WindowsWebAppAuthSettingsV2TwitterV2Observation `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsWebAppAuthSettingsV2Parameters struct { + + // An active_directory_v2 block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectoryV2 *WindowsWebAppAuthSettingsV2ActiveDirectoryV2Parameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + // +kubebuilder:validation:Optional + AppleV2 *WindowsWebAppAuthSettingsV2AppleV2Parameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + // +kubebuilder:validation:Optional + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + // +kubebuilder:validation:Optional + AzureStaticWebAppV2 *WindowsWebAppAuthSettingsV2AzureStaticWebAppV2Parameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + // +kubebuilder:validation:Optional + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + // +kubebuilder:validation:Optional + CustomOidcV2 []WindowsWebAppAuthSettingsV2CustomOidcV2Parameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + // +kubebuilder:validation:Optional + FacebookV2 *WindowsWebAppAuthSettingsV2FacebookV2Parameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + // +kubebuilder:validation:Optional + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + // +kubebuilder:validation:Optional + GithubV2 *WindowsWebAppAuthSettingsV2GithubV2Parameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + // +kubebuilder:validation:Optional + GoogleV2 *WindowsWebAppAuthSettingsV2GoogleV2Parameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + // +kubebuilder:validation:Optional + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + // +kubebuilder:validation:Optional + Login *WindowsWebAppAuthSettingsV2LoginParameters `json:"login" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + // +kubebuilder:validation:Optional + MicrosoftV2 *WindowsWebAppAuthSettingsV2MicrosoftV2Parameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + // +kubebuilder:validation:Optional + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + // +kubebuilder:validation:Optional + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + // +kubebuilder:validation:Optional + TwitterV2 *WindowsWebAppAuthSettingsV2TwitterV2Parameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsWebAppAuthSettingsV2TwitterV2InitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsV2TwitterV2Observation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppAuthSettingsV2TwitterV2Parameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppBackupInitParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *WindowsWebAppBackupScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type WindowsWebAppBackupObservation struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *WindowsWebAppBackupScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type WindowsWebAppBackupParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A schedule block as defined below. + // +kubebuilder:validation:Optional + Schedule *WindowsWebAppBackupScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The SAS URL to the container. + // The SAS URL to the container. + // +kubebuilder:validation:Required + StorageAccountURLSecretRef v1.SecretKeySelector `json:"storageAccountUrlSecretRef" tf:"-"` +} + +type WindowsWebAppBackupScheduleInitParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsWebAppBackupScheduleObservation struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // The time the backup was last attempted. + LastExecutionTime *string `json:"lastExecutionTime,omitempty" tf:"last_execution_time,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsWebAppBackupScheduleParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + // +kubebuilder:validation:Optional + FrequencyInterval *float64 `json:"frequencyInterval" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + // +kubebuilder:validation:Optional + FrequencyUnit *string `json:"frequencyUnit" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + // +kubebuilder:validation:Optional + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsWebAppConnectionStringInitParameters struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppConnectionStringObservation struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppConnectionStringParameters struct { + + // The name of the Connection String. + // The name which should be used for this Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The connection string value. + // The connection string value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type WindowsWebAppIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Web App. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Web App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Web App. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Web App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Web App. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Web App. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WindowsWebAppInitParameters struct { + + // A map of key-value pairs of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + AuthSettings *WindowsWebAppAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *WindowsWebAppAuthSettingsV2InitParameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *WindowsWebAppBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []WindowsWebAppConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the Windows Web App be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Windows Web App require HTTPS connections. Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + Identity *WindowsWebAppIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Azure Region where the Windows Web App should exist. Changing this forces a new Windows Web App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + Logs *WindowsWebAppLogsInitParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan that this Windows App Service will be created in. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.ServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // Reference to a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDRef *v1.Reference `json:"servicePlanIdRef,omitempty" tf:"-"` + + // Selector for a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDSelector *v1.Selector `json:"servicePlanIdSelector,omitempty" tf:"-"` + + // A site_config block as defined below. + SiteConfig *WindowsWebAppSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + StickySettings *WindowsWebAppStickySettingsInitParameters `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []WindowsWebAppStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Windows Web App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsWebAppLogsApplicationLogsInitParameters struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *LogsApplicationLogsAzureBlobStorageInitParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + FileSystemLevel *string `json:"fileSystemLevel,omitempty" tf:"file_system_level,omitempty"` +} + +type WindowsWebAppLogsApplicationLogsObservation struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *LogsApplicationLogsAzureBlobStorageObservation `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + FileSystemLevel *string `json:"fileSystemLevel,omitempty" tf:"file_system_level,omitempty"` +} + +type WindowsWebAppLogsApplicationLogsParameters struct { + + // A azure_blob_storage_http block as defined above. + // +kubebuilder:validation:Optional + AzureBlobStorage *LogsApplicationLogsAzureBlobStorageParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + // +kubebuilder:validation:Optional + FileSystemLevel *string `json:"fileSystemLevel" tf:"file_system_level,omitempty"` +} + +type WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters struct { + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + // +kubebuilder:validation:Required + SASURLSecretRef v1.SecretKeySelector `json:"sasurlSecretRef" tf:"-"` +} + +type WindowsWebAppLogsHTTPLogsInitParameters struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *WindowsWebAppLogsHTTPLogsAzureBlobStorageInitParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + FileSystem *LogsHTTPLogsFileSystemInitParameters `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type WindowsWebAppLogsHTTPLogsObservation struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *WindowsWebAppLogsHTTPLogsAzureBlobStorageObservation `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + FileSystem *LogsHTTPLogsFileSystemObservation `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type WindowsWebAppLogsHTTPLogsParameters struct { + + // A azure_blob_storage_http block as defined above. + // +kubebuilder:validation:Optional + AzureBlobStorage *WindowsWebAppLogsHTTPLogsAzureBlobStorageParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + // +kubebuilder:validation:Optional + FileSystem *LogsHTTPLogsFileSystemParameters `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type WindowsWebAppLogsInitParameters struct { + + // A application_logs block as defined above. + ApplicationLogs *WindowsWebAppLogsApplicationLogsInitParameters `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled. + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should tracing be enabled for failed requests. + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // A http_logs block as defined above. + HTTPLogs *WindowsWebAppLogsHTTPLogsInitParameters `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type WindowsWebAppLogsObservation struct { + + // A application_logs block as defined above. + ApplicationLogs *WindowsWebAppLogsApplicationLogsObservation `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled. + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should tracing be enabled for failed requests. + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // A http_logs block as defined above. + HTTPLogs *WindowsWebAppLogsHTTPLogsObservation `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type WindowsWebAppLogsParameters struct { + + // A application_logs block as defined above. + // +kubebuilder:validation:Optional + ApplicationLogs *WindowsWebAppLogsApplicationLogsParameters `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled. + // +kubebuilder:validation:Optional + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should tracing be enabled for failed requests. + // +kubebuilder:validation:Optional + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // A http_logs block as defined above. + // +kubebuilder:validation:Optional + HTTPLogs *WindowsWebAppLogsHTTPLogsParameters `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type WindowsWebAppObservation struct { + + // A map of key-value pairs of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + AuthSettings *WindowsWebAppAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *WindowsWebAppAuthSettingsV2Observation `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *WindowsWebAppBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []WindowsWebAppConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The default hostname of the Windows Web App. + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Should the Windows Web App be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Windows Web App require HTTPS connections. Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the App Service Environment used by App Service. + HostingEnvironmentID *string `json:"hostingEnvironmentId,omitempty" tf:"hosting_environment_id,omitempty"` + + // The ID of the Windows Web App. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *WindowsWebAppIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Kind value for this Windows Web App. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // The Azure Region where the Windows Web App should exist. Changing this forces a new Windows Web App to be created. + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + Logs *WindowsWebAppLogsObservation `json:"logs,omitempty" tf:"logs,omitempty"` + + // A list of outbound IP addresses - such as ["52.23.25.3", "52.143.43.12"] + OutboundIPAddressList []*string `json:"outboundIpAddressList,omitempty" tf:"outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12. + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A list of possible outbound ip address. + PossibleOutboundIPAddressList []*string `json:"possibleOutboundIpAddressList,omitempty" tf:"possible_outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which are necessarily in use. Superset of outbound_ip_addresses. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Windows Web App should exist. Changing this forces a new Windows Web App to be created. + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // The ID of the Service Plan that this Windows App Service will be created in. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + SiteConfig *WindowsWebAppSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + StickySettings *WindowsWebAppStickySettingsObservation `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []WindowsWebAppStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Windows Web App. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsWebAppParameters struct { + + // A map of key-value pairs of App Settings. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + // +kubebuilder:validation:Optional + AuthSettings *WindowsWebAppAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + // +kubebuilder:validation:Optional + AuthSettingsV2 *WindowsWebAppAuthSettingsV2Parameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + // +kubebuilder:validation:Optional + Backup *WindowsWebAppBackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + // +kubebuilder:validation:Optional + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + // +kubebuilder:validation:Optional + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + // +kubebuilder:validation:Optional + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + // +kubebuilder:validation:Optional + ConnectionString []WindowsWebAppConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the Windows Web App be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + // +kubebuilder:validation:Optional + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Windows Web App require HTTPS connections. Defaults to false. + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *WindowsWebAppIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Azure Region where the Windows Web App should exist. Changing this forces a new Windows Web App to be created. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // A logs block as defined below. + // +kubebuilder:validation:Optional + Logs *WindowsWebAppLogsParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The name of the Resource Group where the Windows Web App should exist. Changing this forces a new Windows Web App to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/azure/v1beta1.ResourceGroup + // +kubebuilder:validation:Optional + ResourceGroupName *string `json:"resourceGroupName,omitempty" tf:"resource_group_name,omitempty"` + + // Reference to a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameRef *v1.Reference `json:"resourceGroupNameRef,omitempty" tf:"-"` + + // Selector for a ResourceGroup in azure to populate resourceGroupName. + // +kubebuilder:validation:Optional + ResourceGroupNameSelector *v1.Selector `json:"resourceGroupNameSelector,omitempty" tf:"-"` + + // The ID of the Service Plan that this Windows App Service will be created in. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta1.ServicePlan + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // Reference to a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDRef *v1.Reference `json:"servicePlanIdRef,omitempty" tf:"-"` + + // Selector for a ServicePlan in web to populate servicePlanId. + // +kubebuilder:validation:Optional + ServicePlanIDSelector *v1.Selector `json:"servicePlanIdSelector,omitempty" tf:"-"` + + // A site_config block as defined below. + // +kubebuilder:validation:Optional + SiteConfig *WindowsWebAppSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // A sticky_settings block as defined below. + // +kubebuilder:validation:Optional + StickySettings *WindowsWebAppStickySettingsParameters `json:"stickySettings,omitempty" tf:"sticky_settings,omitempty"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []WindowsWebAppStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Windows Web App. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + // +kubebuilder:validation:Optional + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsWebAppSiteConfigApplicationStackInitParameters struct { + + // The Application Stack for the Windows Web App. Possible values include dotnet, dotnetcore, node, python, php, and java. + CurrentStack *string `json:"currentStack,omitempty" tf:"current_stack,omitempty"` + + // The name of the container to be used. This value is required with docker_container_tag. + DockerContainerName *string `json:"dockerContainerName,omitempty" tf:"docker_container_name,omitempty"` + + DockerContainerRegistry *string `json:"dockerContainerRegistry,omitempty" tf:"docker_container_registry,omitempty"` + + // The tag of the container to be used. This value is required with docker_container_name. + DockerContainerTag *string `json:"dockerContainerTag,omitempty" tf:"docker_container_tag,omitempty"` + + // The docker image, including tag, to be used. e.g. azure-app-service/windows/parkingpage:latest. + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + // The version of DotNetCore to use. + DotnetCoreVersion *string `json:"dotnetCoreVersion,omitempty" tf:"dotnet_core_version,omitempty"` + + // The version of .NET to use when current_stack is set to dotnet. Possible values include v2.0,v3.0, v4.0, v5.0, v6.0, v7.0 and v8.0. + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + JavaContainer *string `json:"javaContainer,omitempty" tf:"java_container,omitempty"` + + JavaContainerVersion *string `json:"javaContainerVersion,omitempty" tf:"java_container_version,omitempty"` + + // Should the Java Embedded Server (Java SE) be used to run the app. + // Should the application use the embedded web server for the version of Java in use. + JavaEmbeddedServerEnabled *bool `json:"javaEmbeddedServerEnabled,omitempty" tf:"java_embedded_server_enabled,omitempty"` + + // The version of Java to use when current_stack is set to java. + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of node to use when current_stack is set to node. Possible values are ~12, ~14, ~16, and ~18. + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to use when current_stack is set to php. Possible values are 7.1, 7.4 and Off. + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // Specifies whether this is a Python app. Defaults to false. + Python *bool `json:"python,omitempty" tf:"python,omitempty"` + + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Tomcat the Java App should use. Conflicts with java_embedded_server_enabled + TomcatVersion *string `json:"tomcatVersion,omitempty" tf:"tomcat_version,omitempty"` +} + +type WindowsWebAppSiteConfigApplicationStackObservation struct { + + // The Application Stack for the Windows Web App. Possible values include dotnet, dotnetcore, node, python, php, and java. + CurrentStack *string `json:"currentStack,omitempty" tf:"current_stack,omitempty"` + + // The name of the container to be used. This value is required with docker_container_tag. + DockerContainerName *string `json:"dockerContainerName,omitempty" tf:"docker_container_name,omitempty"` + + DockerContainerRegistry *string `json:"dockerContainerRegistry,omitempty" tf:"docker_container_registry,omitempty"` + + // The tag of the container to be used. This value is required with docker_container_name. + DockerContainerTag *string `json:"dockerContainerTag,omitempty" tf:"docker_container_tag,omitempty"` + + // The docker image, including tag, to be used. e.g. azure-app-service/windows/parkingpage:latest. + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + // The version of DotNetCore to use. + DotnetCoreVersion *string `json:"dotnetCoreVersion,omitempty" tf:"dotnet_core_version,omitempty"` + + // The version of .NET to use when current_stack is set to dotnet. Possible values include v2.0,v3.0, v4.0, v5.0, v6.0, v7.0 and v8.0. + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + JavaContainer *string `json:"javaContainer,omitempty" tf:"java_container,omitempty"` + + JavaContainerVersion *string `json:"javaContainerVersion,omitempty" tf:"java_container_version,omitempty"` + + // Should the Java Embedded Server (Java SE) be used to run the app. + // Should the application use the embedded web server for the version of Java in use. + JavaEmbeddedServerEnabled *bool `json:"javaEmbeddedServerEnabled,omitempty" tf:"java_embedded_server_enabled,omitempty"` + + // The version of Java to use when current_stack is set to java. + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of node to use when current_stack is set to node. Possible values are ~12, ~14, ~16, and ~18. + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to use when current_stack is set to php. Possible values are 7.1, 7.4 and Off. + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // Specifies whether this is a Python app. Defaults to false. + Python *bool `json:"python,omitempty" tf:"python,omitempty"` + + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Tomcat the Java App should use. Conflicts with java_embedded_server_enabled + TomcatVersion *string `json:"tomcatVersion,omitempty" tf:"tomcat_version,omitempty"` +} + +type WindowsWebAppSiteConfigApplicationStackParameters struct { + + // The Application Stack for the Windows Web App. Possible values include dotnet, dotnetcore, node, python, php, and java. + // +kubebuilder:validation:Optional + CurrentStack *string `json:"currentStack,omitempty" tf:"current_stack,omitempty"` + + // The name of the container to be used. This value is required with docker_container_tag. + // +kubebuilder:validation:Optional + DockerContainerName *string `json:"dockerContainerName,omitempty" tf:"docker_container_name,omitempty"` + + // +kubebuilder:validation:Optional + DockerContainerRegistry *string `json:"dockerContainerRegistry,omitempty" tf:"docker_container_registry,omitempty"` + + // The tag of the container to be used. This value is required with docker_container_name. + // +kubebuilder:validation:Optional + DockerContainerTag *string `json:"dockerContainerTag,omitempty" tf:"docker_container_tag,omitempty"` + + // The docker image, including tag, to be used. e.g. azure-app-service/windows/parkingpage:latest. + // +kubebuilder:validation:Optional + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + // +kubebuilder:validation:Optional + DockerRegistryPasswordSecretRef *v1.SecretKeySelector `json:"dockerRegistryPasswordSecretRef,omitempty" tf:"-"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + // +kubebuilder:validation:Optional + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + // +kubebuilder:validation:Optional + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + // The version of DotNetCore to use. + // +kubebuilder:validation:Optional + DotnetCoreVersion *string `json:"dotnetCoreVersion,omitempty" tf:"dotnet_core_version,omitempty"` + + // The version of .NET to use when current_stack is set to dotnet. Possible values include v2.0,v3.0, v4.0, v5.0, v6.0, v7.0 and v8.0. + // +kubebuilder:validation:Optional + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // +kubebuilder:validation:Optional + JavaContainer *string `json:"javaContainer,omitempty" tf:"java_container,omitempty"` + + // +kubebuilder:validation:Optional + JavaContainerVersion *string `json:"javaContainerVersion,omitempty" tf:"java_container_version,omitempty"` + + // Should the Java Embedded Server (Java SE) be used to run the app. + // Should the application use the embedded web server for the version of Java in use. + // +kubebuilder:validation:Optional + JavaEmbeddedServerEnabled *bool `json:"javaEmbeddedServerEnabled,omitempty" tf:"java_embedded_server_enabled,omitempty"` + + // The version of Java to use when current_stack is set to java. + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of node to use when current_stack is set to node. Possible values are ~12, ~14, ~16, and ~18. + // +kubebuilder:validation:Optional + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to use when current_stack is set to php. Possible values are 7.1, 7.4 and Off. + // +kubebuilder:validation:Optional + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // Specifies whether this is a Python app. Defaults to false. + // +kubebuilder:validation:Optional + Python *bool `json:"python,omitempty" tf:"python,omitempty"` + + // +kubebuilder:validation:Optional + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Tomcat the Java App should use. Conflicts with java_embedded_server_enabled + // +kubebuilder:validation:Optional + TomcatVersion *string `json:"tomcatVersion,omitempty" tf:"tomcat_version,omitempty"` +} + +type WindowsWebAppSiteConfigAutoHealSettingInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + Action *SiteConfigAutoHealSettingActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *SiteConfigAutoHealSettingTriggerInitParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type WindowsWebAppSiteConfigAutoHealSettingObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + Action *SiteConfigAutoHealSettingActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *SiteConfigAutoHealSettingTriggerObservation `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type WindowsWebAppSiteConfigAutoHealSettingParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *SiteConfigAutoHealSettingActionParameters `json:"action" tf:"action,omitempty"` + + // A trigger block as defined below. + // +kubebuilder:validation:Optional + Trigger *SiteConfigAutoHealSettingTriggerParameters `json:"trigger" tf:"trigger,omitempty"` +} + +type WindowsWebAppSiteConfigCorsInitParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsWebAppSiteConfigCorsObservation struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsWebAppSiteConfigCorsParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsWebAppSiteConfigIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type WindowsWebAppSiteConfigIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsWebAppSiteConfigIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsWebAppSiteConfigIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this TODO. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsWebAppSiteConfigIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsWebAppSiteConfigIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this TODO. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type WindowsWebAppSiteConfigIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []WindowsWebAppSiteConfigIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this TODO. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsWebAppSiteConfigInitParameters struct { + + // The URL to the API Definition for this Windows Web App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Windows Web App Slot is associated with. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App is Always On enabled. Defaults to true. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + ApplicationStack *WindowsWebAppSiteConfigApplicationStackInitParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled. Required with auto_heal_setting. + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + AutoHealSetting *WindowsWebAppSiteConfigAutoHealSettingInitParameters `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *WindowsWebAppSiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The State of FTP / FTPS service. Possible values include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []WindowsWebAppSiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019 and VS2022. + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []WindowsWebAppSiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Windows Web App ip_restriction configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App use a 32-bit worker. Defaults to true. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // One or more virtual_application blocks as defined below. + VirtualApplication []VirtualApplicationInitParameters `json:"virtualApplication,omitempty" tf:"virtual_application,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Windows App Service. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsWebAppSiteConfigObservation struct { + + // The URL to the API Definition for this Windows Web App. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Windows Web App Slot is associated with. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App is Always On enabled. Defaults to true. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + ApplicationStack *WindowsWebAppSiteConfigApplicationStackObservation `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled. Required with auto_heal_setting. + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + AutoHealSetting *WindowsWebAppSiteConfigAutoHealSettingObservation `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *WindowsWebAppSiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // Should the Windows Web App be enabled? Defaults to true. + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty" tf:"detailed_error_logging_enabled,omitempty"` + + // The State of FTP / FTPS service. Possible values include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []WindowsWebAppSiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + LinuxFxVersion *string `json:"linuxFxVersion,omitempty" tf:"linux_fx_version,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019 and VS2022. + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []WindowsWebAppSiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // Should the Windows Web App ip_restriction configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App use a 32-bit worker. Defaults to true. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // One or more virtual_application blocks as defined below. + VirtualApplication []VirtualApplicationObservation `json:"virtualApplication,omitempty" tf:"virtual_application,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + WindowsFxVersion *string `json:"windowsFxVersion,omitempty" tf:"windows_fx_version,omitempty"` + + // The number of Workers for this Windows App Service. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsWebAppSiteConfigParameters struct { + + // The URL to the API Definition for this Windows Web App. + // +kubebuilder:validation:Optional + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Windows Web App Slot is associated with. + // +kubebuilder:validation:Optional + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App is Always On enabled. Defaults to true. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // +kubebuilder:validation:Optional + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + // +kubebuilder:validation:Optional + ApplicationStack *WindowsWebAppSiteConfigApplicationStackParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled. Required with auto_heal_setting. + // +kubebuilder:validation:Optional + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + // +kubebuilder:validation:Optional + AutoHealSetting *WindowsWebAppSiteConfigAutoHealSettingParameters `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // +kubebuilder:validation:Optional + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // +kubebuilder:validation:Optional + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + // +kubebuilder:validation:Optional + Cors *WindowsWebAppSiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App. + // +kubebuilder:validation:Optional + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The State of FTP / FTPS service. Possible values include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + // +kubebuilder:validation:Optional + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + IPRestriction []WindowsWebAppSiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // +kubebuilder:validation:Optional + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + // +kubebuilder:validation:Optional + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // +kubebuilder:validation:Optional + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // +kubebuilder:validation:Optional + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017, VS2019 and VS2022. + // +kubebuilder:validation:Optional + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + ScmIPRestriction []WindowsWebAppSiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // +kubebuilder:validation:Optional + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Windows Web App ip_restriction configuration be used for the SCM also. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App use a 32-bit worker. Defaults to true. + // +kubebuilder:validation:Optional + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // One or more virtual_application blocks as defined below. + // +kubebuilder:validation:Optional + VirtualApplication []VirtualApplicationParameters `json:"virtualApplication,omitempty" tf:"virtual_application,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Windows App Service. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsWebAppSiteConfigScmIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsWebAppSiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this TODO. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsWebAppSiteConfigScmIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsWebAppSiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this TODO. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type WindowsWebAppSiteConfigScmIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []WindowsWebAppSiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this TODO. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsWebAppSiteCredentialInitParameters struct { +} + +type WindowsWebAppSiteCredentialObservation struct { + + // The Site Credentials Username used for publishing. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Site Credentials Password used for publishing. + Password *string `json:"password,omitempty" tf:"password,omitempty"` +} + +type WindowsWebAppSiteCredentialParameters struct { +} + +type WindowsWebAppStickySettingsInitParameters struct { + + // A list of app_setting names that the Windows Web App will not swap between Slots when a swap operation is triggered. + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Windows Web App will not swap between Slots when a swap operation is triggered. + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type WindowsWebAppStickySettingsObservation struct { + + // A list of app_setting names that the Windows Web App will not swap between Slots when a swap operation is triggered. + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Windows Web App will not swap between Slots when a swap operation is triggered. + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type WindowsWebAppStickySettingsParameters struct { + + // A list of app_setting names that the Windows Web App will not swap between Slots when a swap operation is triggered. + // +kubebuilder:validation:Optional + AppSettingNames []*string `json:"appSettingNames,omitempty" tf:"app_setting_names,omitempty"` + + // A list of connection_string names that the Windows Web App will not swap between Slots when a swap operation is triggered. + // +kubebuilder:validation:Optional + ConnectionStringNames []*string `json:"connectionStringNames,omitempty" tf:"connection_string_names,omitempty"` +} + +type WindowsWebAppStorageAccountInitParameters struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this TODO. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppStorageAccountObservation struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this TODO. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppStorageAccountParameters struct { + + // The Access key for the storage account. + // +kubebuilder:validation:Required + AccessKeySecretRef v1.SecretKeySelector `json:"accessKeySecretRef" tf:"-"` + + // The Name of the Storage Account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this TODO. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// WindowsWebAppSpec defines the desired state of WindowsWebApp +type WindowsWebAppSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WindowsWebAppParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WindowsWebAppInitParameters `json:"initProvider,omitempty"` +} + +// WindowsWebAppStatus defines the observed state of WindowsWebApp. +type WindowsWebAppStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WindowsWebAppObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WindowsWebApp is the Schema for the WindowsWebApps API. Manages a Windows Web App. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WindowsWebApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.location) || (has(self.initProvider) && has(self.initProvider.location))",message="spec.forProvider.location is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.siteConfig) || (has(self.initProvider) && has(self.initProvider.siteConfig))",message="spec.forProvider.siteConfig is a required parameter" + Spec WindowsWebAppSpec `json:"spec"` + Status WindowsWebAppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WindowsWebAppList contains a list of WindowsWebApps +type WindowsWebAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WindowsWebApp `json:"items"` +} + +// Repository type metadata. +var ( + WindowsWebApp_Kind = "WindowsWebApp" + WindowsWebApp_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WindowsWebApp_Kind}.String() + WindowsWebApp_KindAPIVersion = WindowsWebApp_Kind + "." + CRDGroupVersion.String() + WindowsWebApp_GroupVersionKind = CRDGroupVersion.WithKind(WindowsWebApp_Kind) +) + +func init() { + SchemeBuilder.Register(&WindowsWebApp{}, &WindowsWebAppList{}) +} diff --git a/apis/web/v1beta2/zz_windowswebappslot_terraformed.go b/apis/web/v1beta2/zz_windowswebappslot_terraformed.go new file mode 100755 index 000000000..538ea8ac9 --- /dev/null +++ b/apis/web/v1beta2/zz_windowswebappslot_terraformed.go @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this WindowsWebAppSlot +func (mg *WindowsWebAppSlot) GetTerraformResourceType() string { + return "azurerm_windows_web_app_slot" +} + +// GetConnectionDetailsMapping for this WindowsWebAppSlot +func (tr *WindowsWebAppSlot) GetConnectionDetailsMapping() map[string]string { + return map[string]string{"auth_settings[*].active_directory[*].client_secret": "spec.forProvider.authSettings[*].activeDirectory[*].clientSecretSecretRef", "auth_settings[*].facebook[*].app_secret": "spec.forProvider.authSettings[*].facebook[*].appSecretSecretRef", "auth_settings[*].github[*].client_secret": "spec.forProvider.authSettings[*].github[*].clientSecretSecretRef", "auth_settings[*].google[*].client_secret": "spec.forProvider.authSettings[*].google[*].clientSecretSecretRef", "auth_settings[*].microsoft[*].client_secret": "spec.forProvider.authSettings[*].microsoft[*].clientSecretSecretRef", "auth_settings[*].twitter[*].consumer_secret": "spec.forProvider.authSettings[*].twitter[*].consumerSecretSecretRef", "backup[*].storage_account_url": "spec.forProvider.backup[*].storageAccountUrlSecretRef", "connection_string[*].value": "spec.forProvider.connectionString[*].valueSecretRef", "custom_domain_verification_id": "status.atProvider.customDomainVerificationId", "logs[*].http_logs[*].azure_blob_storage[*].sas_url": "spec.forProvider.logs[*].httpLogs[*].azureBlobStorage[*].sasUrlSecretRef", "site_config[*].application_stack[*].docker_registry_password": "spec.forProvider.siteConfig[*].applicationStack[*].dockerRegistryPasswordSecretRef", "site_credential[*]": "status.atProvider.siteCredential[*]", "storage_account[*].access_key": "spec.forProvider.storageAccount[*].accessKeySecretRef"} +} + +// GetObservation of this WindowsWebAppSlot +func (tr *WindowsWebAppSlot) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WindowsWebAppSlot +func (tr *WindowsWebAppSlot) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WindowsWebAppSlot +func (tr *WindowsWebAppSlot) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WindowsWebAppSlot +func (tr *WindowsWebAppSlot) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WindowsWebAppSlot +func (tr *WindowsWebAppSlot) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this WindowsWebAppSlot +func (tr *WindowsWebAppSlot) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this WindowsWebAppSlot +func (tr *WindowsWebAppSlot) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this WindowsWebAppSlot using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WindowsWebAppSlot) LateInitialize(attrs []byte) (bool, error) { + params := &WindowsWebAppSlotParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + opts = append(opts, resource.WithNameFilter("KeyVaultReferenceIdentityID")) + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WindowsWebAppSlot) GetTerraformSchemaVersion() int { + return 1 +} diff --git a/apis/web/v1beta2/zz_windowswebappslot_types.go b/apis/web/v1beta2/zz_windowswebappslot_types.go new file mode 100755 index 000000000..4fbf2dd61 --- /dev/null +++ b/apis/web/v1beta2/zz_windowswebappslot_types.go @@ -0,0 +1,3677 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ActionCustomActionInitParameters struct { + + // The executable to run for the custom_action. + Executable *string `json:"executable,omitempty" tf:"executable,omitempty"` + + // The parameters to pass to the specified executable. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type ActionCustomActionObservation struct { + + // The executable to run for the custom_action. + Executable *string `json:"executable,omitempty" tf:"executable,omitempty"` + + // The parameters to pass to the specified executable. + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type ActionCustomActionParameters struct { + + // The executable to run for the custom_action. + // +kubebuilder:validation:Optional + Executable *string `json:"executable" tf:"executable,omitempty"` + + // The parameters to pass to the specified executable. + // +kubebuilder:validation:Optional + Parameters *string `json:"parameters,omitempty" tf:"parameters,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerRequestsInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerRequestsObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerRequestsParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerSlowRequestInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + TimeTaken *string `json:"timeTaken,omitempty" tf:"time_taken,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerSlowRequestObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + TimeTaken *string `json:"timeTaken,omitempty" tf:"time_taken,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerSlowRequestParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The threshold of time passed to qualify as a Slow Request in hh:mm:ss. + // +kubebuilder:validation:Optional + TimeTaken *string `json:"timeTaken" tf:"time_taken,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerStatusCodeInitParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + StatusCodeRange *string `json:"statusCodeRange,omitempty" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerStatusCodeObservation struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + Count *float64 `json:"count,omitempty" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + Interval *string `json:"interval,omitempty" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + StatusCodeRange *string `json:"statusCodeRange,omitempty" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type SiteConfigAutoHealSettingTriggerStatusCodeParameters struct { + + // The number of occurrences of the defined status_code in the specified interval on which to trigger this rule. + // +kubebuilder:validation:Optional + Count *float64 `json:"count" tf:"count,omitempty"` + + // The time interval in the form hh:mm:ss. + // +kubebuilder:validation:Optional + Interval *string `json:"interval" tf:"interval,omitempty"` + + // The path to which this rule status code applies. + // +kubebuilder:validation:Optional + Path *string `json:"path,omitempty" tf:"path,omitempty"` + + // The status code for this rule, accepts single status codes and status code ranges. e.g. 500 or 400-499. Possible values are integers between 101 and 599 + // +kubebuilder:validation:Optional + StatusCodeRange *string `json:"statusCodeRange" tf:"status_code_range,omitempty"` + + // The Request Sub Status of the Status Code. + // +kubebuilder:validation:Optional + SubStatus *float64 `json:"subStatus,omitempty" tf:"sub_status,omitempty"` + + // The Win32 Status Code of the Request. + // +kubebuilder:validation:Optional + Win32StatusCode *float64 `json:"win32StatusCode,omitempty" tf:"win32_status_code,omitempty"` +} + +type SiteConfigVirtualApplicationInitParameters struct { + + // The physical path for the Virtual Application. + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // Should pre-loading be enabled. + Preload *bool `json:"preload,omitempty" tf:"preload,omitempty"` + + // One or more virtual_directory blocks as defined below. + VirtualDirectory []VirtualApplicationVirtualDirectoryInitParameters `json:"virtualDirectory,omitempty" tf:"virtual_directory,omitempty"` + + // The Virtual Path for the Virtual Application. + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type SiteConfigVirtualApplicationObservation struct { + + // The physical path for the Virtual Application. + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // Should pre-loading be enabled. + Preload *bool `json:"preload,omitempty" tf:"preload,omitempty"` + + // One or more virtual_directory blocks as defined below. + VirtualDirectory []VirtualApplicationVirtualDirectoryObservation `json:"virtualDirectory,omitempty" tf:"virtual_directory,omitempty"` + + // The Virtual Path for the Virtual Application. + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type SiteConfigVirtualApplicationParameters struct { + + // The physical path for the Virtual Application. + // +kubebuilder:validation:Optional + PhysicalPath *string `json:"physicalPath" tf:"physical_path,omitempty"` + + // Should pre-loading be enabled. + // +kubebuilder:validation:Optional + Preload *bool `json:"preload" tf:"preload,omitempty"` + + // One or more virtual_directory blocks as defined below. + // +kubebuilder:validation:Optional + VirtualDirectory []VirtualApplicationVirtualDirectoryParameters `json:"virtualDirectory,omitempty" tf:"virtual_directory,omitempty"` + + // The Virtual Path for the Virtual Application. + // +kubebuilder:validation:Optional + VirtualPath *string `json:"virtualPath" tf:"virtual_path,omitempty"` +} + +type VirtualApplicationVirtualDirectoryInitParameters struct { + + // The physical path for the Virtual Application. + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // The Virtual Path for the Virtual Application. + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type VirtualApplicationVirtualDirectoryObservation struct { + + // The physical path for the Virtual Application. + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // The Virtual Path for the Virtual Application. + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type VirtualApplicationVirtualDirectoryParameters struct { + + // The physical path for the Virtual Application. + // +kubebuilder:validation:Optional + PhysicalPath *string `json:"physicalPath,omitempty" tf:"physical_path,omitempty"` + + // The Virtual Path for the Virtual Application. + // +kubebuilder:validation:Optional + VirtualPath *string `json:"virtualPath,omitempty" tf:"virtual_path,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsActiveDirectoryObservation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsActiveDirectoryParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsFacebookInitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsFacebookObservation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsFacebookParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + // The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + // +kubebuilder:validation:Optional + AppSecretSecretRef *v1.SecretKeySelector `json:"appSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsGithubInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsGithubObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsGithubParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsGoogleInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsGoogleObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsGoogleParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsInitParameters struct { + + // An active_directory block as defined above. + ActiveDirectory *WindowsWebAppSlotAuthSettingsActiveDirectoryInitParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App Slot. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Windows Web App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *WindowsWebAppSlotAuthSettingsFacebookInitParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *WindowsWebAppSlotAuthSettingsGithubInitParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *WindowsWebAppSlotAuthSettingsGoogleInitParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App Slot. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *WindowsWebAppSlotAuthSettingsMicrosoftInitParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App Slot. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App Slot durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *WindowsWebAppSlotAuthSettingsTwitterInitParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsMicrosoftInitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsMicrosoftObservation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsMicrosoftParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + // The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + // +kubebuilder:validation:Optional + ClientSecretSecretRef *v1.SecretKeySelector `json:"clientSecretSecretRef,omitempty" tf:"-"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + // The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + // +kubebuilder:validation:Optional + OauthScopes []*string `json:"oauthScopes,omitempty" tf:"oauth_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsObservation struct { + + // An active_directory block as defined above. + ActiveDirectory *WindowsWebAppSlotAuthSettingsActiveDirectoryObservation `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App Slot. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Windows Web App? + // Should the Authentication / Authorization feature be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // A facebook block as defined below. + Facebook *WindowsWebAppSlotAuthSettingsFacebookObservation `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + Github *WindowsWebAppSlotAuthSettingsGithubObservation `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + Google *WindowsWebAppSlotAuthSettingsGoogleObservation `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App Slot. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + Microsoft *WindowsWebAppSlotAuthSettingsMicrosoftObservation `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App Slot. + // The RuntimeVersion of the Authentication / Authorization feature in use. + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App Slot durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + Twitter *WindowsWebAppSlotAuthSettingsTwitterObservation `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsParameters struct { + + // An active_directory block as defined above. + // +kubebuilder:validation:Optional + ActiveDirectory *WindowsWebAppSlotAuthSettingsActiveDirectoryParameters `json:"activeDirectory,omitempty" tf:"active_directory,omitempty"` + + // Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + AdditionalLoginParameters map[string]*string `json:"additionalLoginParameters,omitempty" tf:"additional_login_parameters,omitempty"` + + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App Slot. + // Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + // The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // Should the Authentication / Authorization feature be enabled for the Windows Web App? + // Should the Authentication / Authorization feature be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled" tf:"enabled,omitempty"` + + // A facebook block as defined below. + // +kubebuilder:validation:Optional + Facebook *WindowsWebAppSlotAuthSettingsFacebookParameters `json:"facebook,omitempty" tf:"facebook,omitempty"` + + // A github block as defined below. + // +kubebuilder:validation:Optional + Github *WindowsWebAppSlotAuthSettingsGithubParameters `json:"github,omitempty" tf:"github,omitempty"` + + // A google block as defined below. + // +kubebuilder:validation:Optional + Google *WindowsWebAppSlotAuthSettingsGoogleParameters `json:"google,omitempty" tf:"google,omitempty"` + + // The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App Slot. + // The OpenID Connect Issuer URI that represents the entity which issues access tokens. + // +kubebuilder:validation:Optional + Issuer *string `json:"issuer,omitempty" tf:"issuer,omitempty"` + + // A microsoft block as defined below. + // +kubebuilder:validation:Optional + Microsoft *WindowsWebAppSlotAuthSettingsMicrosoftParameters `json:"microsoft,omitempty" tf:"microsoft,omitempty"` + + // The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App Slot. + // The RuntimeVersion of the Authentication / Authorization feature in use. + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty" tf:"token_refresh_extension_hours,omitempty"` + + // Should the Windows Web App Slot durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + // Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // A twitter block as defined below. + // +kubebuilder:validation:Optional + Twitter *WindowsWebAppSlotAuthSettingsTwitterParameters `json:"twitter,omitempty" tf:"twitter,omitempty"` + + // The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + // The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + // +kubebuilder:validation:Optional + UnauthenticatedClientAction *string `json:"unauthenticatedClientAction,omitempty" tf:"unauthenticated_client_action,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsTwitterInitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsTwitterObservation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsTwitterParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + // The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + // +kubebuilder:validation:Optional + ConsumerSecretSecretRef *v1.SecretKeySelector `json:"consumerSecretSecretRef,omitempty" tf:"-"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + TenantAuthEndpoint *string `json:"tenantAuthEndpoint,omitempty" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters struct { + + // The list of allowed Applications for the Default Authorisation Policy. + // The list of allowed Applications for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedApplications []*string `json:"allowedApplications,omitempty" tf:"allowed_applications,omitempty"` + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The list of allowed Group Names for the Default Authorisation Policy. + // The list of allowed Group Names for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedGroups []*string `json:"allowedGroups,omitempty" tf:"allowed_groups,omitempty"` + + // The list of allowed Identities for the Default Authorisation Policy. + // The list of allowed Identities for the Default Authorisation Policy. + // +kubebuilder:validation:Optional + AllowedIdentities []*string `json:"allowedIdentities,omitempty" tf:"allowed_identities,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Active Directory. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The thumbprint of the certificate used for signing purposes. + // The thumbprint of the certificate used for signing purposes. + // +kubebuilder:validation:Optional + ClientSecretCertificateThumbprint *string `json:"clientSecretCertificateThumbprint,omitempty" tf:"client_secret_certificate_thumbprint,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the client secret of the Client. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // A list of Allowed Client Applications in the JWT Claim. + // A list of Allowed Client Applications in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedClientApplications []*string `json:"jwtAllowedClientApplications,omitempty" tf:"jwt_allowed_client_applications,omitempty"` + + // A list of Allowed Groups in the JWT Claim. + // A list of Allowed Groups in the JWT Claim. + // +kubebuilder:validation:Optional + JwtAllowedGroups []*string `json:"jwtAllowedGroups,omitempty" tf:"jwt_allowed_groups,omitempty"` + + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + // +kubebuilder:validation:Optional + // +mapType=granular + LoginParameters map[string]*string `json:"loginParameters,omitempty" tf:"login_parameters,omitempty"` + + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + // The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + // +kubebuilder:validation:Optional + TenantAuthEndpoint *string `json:"tenantAuthEndpoint" tf:"tenant_auth_endpoint,omitempty"` + + // Should the www-authenticate provider should be omitted from the request? Defaults to false. + // Should the www-authenticate provider should be omitted from the request? Defaults to `false` + // +kubebuilder:validation:Optional + WwwAuthenticationDisabled *bool `json:"wwwAuthenticationDisabled,omitempty" tf:"www_authentication_disabled,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2AppleV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2AppleV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Apple web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Apple Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with Azure Static Web App Authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation struct { + + // The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + // The endpoint to make the Authorisation Request. + AuthorisationEndpoint *string `json:"authorisationEndpoint,omitempty" tf:"authorisation_endpoint,omitempty"` + + // The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + // The endpoint that provides the keys necessary to validate the token. + CertificationURI *string `json:"certificationUri,omitempty" tf:"certification_uri,omitempty"` + + // The Client Credential Method used. + // The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + ClientCredentialMethod *string `json:"clientCredentialMethod,omitempty" tf:"client_credential_method,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The App Setting name that contains the secret for this Custom OIDC Client. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + // The endpoint that issued the Token. + IssuerEndpoint *string `json:"issuerEndpoint,omitempty" tf:"issuer_endpoint,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint,omitempty" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` + + // The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + // The endpoint used to request a Token. + TokenEndpoint *string `json:"tokenEndpoint,omitempty" tf:"token_endpoint,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the Client to use to authenticate with this Custom OIDC. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The name which should be used for this Storage Account. + // The name of the Custom OIDC Authentication Provider. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The name of the claim that contains the users name. + // The name of the claim that contains the users name. + // +kubebuilder:validation:Optional + NameClaimType *string `json:"nameClaimType,omitempty" tf:"name_claim_type,omitempty"` + + // The app setting name that contains the client_secret value used for the Custom OIDC Login. + // The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + // +kubebuilder:validation:Optional + OpenIDConfigurationEndpoint *string `json:"openidConfigurationEndpoint" tf:"openid_configuration_endpoint,omitempty"` + + // The list of the scopes that should be requested while authenticating. + // The list of the scopes that should be requested while authenticating. + // +kubebuilder:validation:Optional + Scopes []*string `json:"scopes,omitempty" tf:"scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2FacebookV2Observation struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + AppID *string `json:"appId,omitempty" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + AppSecretSettingName *string `json:"appSecretSettingName,omitempty" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters struct { + + // The App ID of the Facebook app used for login. + // The App ID of the Facebook app used for login. + // +kubebuilder:validation:Optional + AppID *string `json:"appId" tf:"app_id,omitempty"` + + // The app setting name that contains the app_secret value used for Facebook Login. + // The app setting name that contains the `app_secret` value used for Facebook Login. + // +kubebuilder:validation:Optional + AppSecretSettingName *string `json:"appSecretSettingName" tf:"app_secret_setting_name,omitempty"` + + // The version of the Facebook API to be used while logging in. + // The version of the Facebook API to be used while logging in. + // +kubebuilder:validation:Optional + GraphAPIVersion *string `json:"graphApiVersion,omitempty" tf:"graph_api_version,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of scopes to be requested as part of Facebook Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2GithubV2Observation struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2GithubV2Parameters struct { + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The ID of the GitHub app used for login. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for GitHub Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2GoogleV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OpenID Connect Client ID for the Google web application. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name that contains the `client_secret` value used for Google Login. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2InitParameters struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2InitParameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *WindowsWebAppSlotAuthSettingsV2AppleV2InitParameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2InitParameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []WindowsWebAppSlotAuthSettingsV2CustomOidcV2InitParameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *WindowsWebAppSlotAuthSettingsV2FacebookV2InitParameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *WindowsWebAppSlotAuthSettingsV2GithubV2InitParameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *WindowsWebAppSlotAuthSettingsV2GoogleV2InitParameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *WindowsWebAppSlotAuthSettingsV2LoginInitParameters `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2LoginInitParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2LoginObservation struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2LoginParameters struct { + + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + // External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + // +kubebuilder:validation:Optional + AllowedExternalRedirectUrls []*string `json:"allowedExternalRedirectUrls,omitempty" tf:"allowed_external_redirect_urls,omitempty"` + + // The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + // The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + // +kubebuilder:validation:Optional + CookieExpirationConvention *string `json:"cookieExpirationConvention,omitempty" tf:"cookie_expiration_convention,omitempty"` + + // The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + // The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + // +kubebuilder:validation:Optional + CookieExpirationTime *string `json:"cookieExpirationTime,omitempty" tf:"cookie_expiration_time,omitempty"` + + // The endpoint to which logout requests should be made. + // The endpoint to which logout requests should be made. + // +kubebuilder:validation:Optional + LogoutEndpoint *string `json:"logoutEndpoint,omitempty" tf:"logout_endpoint,omitempty"` + + // The time after the request is made when the nonce should expire. Defaults to 00:05:00. + // The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + // +kubebuilder:validation:Optional + NonceExpirationTime *string `json:"nonceExpirationTime,omitempty" tf:"nonce_expiration_time,omitempty"` + + // Should the fragments from the request be preserved after the login request is made. Defaults to false. + // Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + // +kubebuilder:validation:Optional + PreserveURLFragmentsForLogins *bool `json:"preserveUrlFragmentsForLogins,omitempty" tf:"preserve_url_fragments_for_logins,omitempty"` + + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + // The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + // +kubebuilder:validation:Optional + TokenRefreshExtensionTime *float64 `json:"tokenRefreshExtensionTime,omitempty" tf:"token_refresh_extension_time,omitempty"` + + // Should the Token Store configuration Enabled. Defaults to false + // Should the Token Store configuration Enabled. Defaults to `false` + // +kubebuilder:validation:Optional + TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty" tf:"token_store_enabled,omitempty"` + + // The directory path in the App Filesystem in which the tokens will be stored. + // The directory path in the App Filesystem in which the tokens will be stored. + // +kubebuilder:validation:Optional + TokenStorePath *string `json:"tokenStorePath,omitempty" tf:"token_store_path,omitempty"` + + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + // +kubebuilder:validation:Optional + TokenStoreSASSettingName *string `json:"tokenStoreSasSettingName,omitempty" tf:"token_store_sas_setting_name,omitempty"` + + // Should the nonce be validated while completing the login flow. Defaults to true. + // Should the nonce be validated while completing the login flow. Defaults to `true`. + // +kubebuilder:validation:Optional + ValidateNonce *bool `json:"validateNonce,omitempty" tf:"validate_nonce,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2MicrosoftV2InitParameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + ClientID *string `json:"clientId,omitempty" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + ClientSecretSettingName *string `json:"clientSecretSettingName,omitempty" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters struct { + + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + // +kubebuilder:validation:Optional + AllowedAudiences []*string `json:"allowedAudiences,omitempty" tf:"allowed_audiences,omitempty"` + + // The OAuth 2.0 client ID that was created for the app used for authentication. + // The OAuth 2.0 client ID that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientID *string `json:"clientId" tf:"client_id,omitempty"` + + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + // +kubebuilder:validation:Optional + ClientSecretSettingName *string `json:"clientSecretSettingName" tf:"client_secret_setting_name,omitempty"` + + // The list of Login scopes that should be requested as part of Microsoft Account authentication. + // The list of Login scopes that will be requested as part of Microsoft Account authentication. + // +kubebuilder:validation:Optional + LoginScopes []*string `json:"loginScopes,omitempty" tf:"login_scopes,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2Observation struct { + + // An active_directory_v2 block as defined below. + ActiveDirectoryV2 *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Observation `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + AppleV2 *WindowsWebAppSlotAuthSettingsV2AppleV2Observation `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + AzureStaticWebAppV2 *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Observation `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + CustomOidcV2 []WindowsWebAppSlotAuthSettingsV2CustomOidcV2Observation `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + FacebookV2 *WindowsWebAppSlotAuthSettingsV2FacebookV2Observation `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + GithubV2 *WindowsWebAppSlotAuthSettingsV2GithubV2Observation `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + GoogleV2 *WindowsWebAppSlotAuthSettingsV2GoogleV2Observation `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + Login *WindowsWebAppSlotAuthSettingsV2LoginObservation `json:"login,omitempty" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + MicrosoftV2 *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Observation `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + TwitterV2 *WindowsWebAppSlotAuthSettingsV2TwitterV2Observation `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2Parameters struct { + + // An active_directory_v2 block as defined below. + // +kubebuilder:validation:Optional + ActiveDirectoryV2 *WindowsWebAppSlotAuthSettingsV2ActiveDirectoryV2Parameters `json:"activeDirectoryV2,omitempty" tf:"active_directory_v2,omitempty"` + + // An apple_v2 block as defined below. + // +kubebuilder:validation:Optional + AppleV2 *WindowsWebAppSlotAuthSettingsV2AppleV2Parameters `json:"appleV2,omitempty" tf:"apple_v2,omitempty"` + + // Should the AuthV2 Settings be enabled. Defaults to false. + // Should the AuthV2 Settings be enabled. Defaults to `false` + // +kubebuilder:validation:Optional + AuthEnabled *bool `json:"authEnabled,omitempty" tf:"auth_enabled,omitempty"` + + // An azure_static_web_app_v2 block as defined below. + // +kubebuilder:validation:Optional + AzureStaticWebAppV2 *WindowsWebAppSlotAuthSettingsV2AzureStaticWebAppV2Parameters `json:"azureStaticWebAppV2,omitempty" tf:"azure_static_web_app_v2,omitempty"` + + // The path to the App Auth settings. + // The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + // +kubebuilder:validation:Optional + ConfigFilePath *string `json:"configFilePath,omitempty" tf:"config_file_path,omitempty"` + + // Zero or more custom_oidc_v2 blocks as defined below. + // +kubebuilder:validation:Optional + CustomOidcV2 []WindowsWebAppSlotAuthSettingsV2CustomOidcV2Parameters `json:"customOidcV2,omitempty" tf:"custom_oidc_v2,omitempty"` + + // The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + // The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + // +kubebuilder:validation:Optional + DefaultProvider *string `json:"defaultProvider,omitempty" tf:"default_provider,omitempty"` + + // The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + // The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + ExcludedPaths []*string `json:"excludedPaths,omitempty" tf:"excluded_paths,omitempty"` + + // A facebook_v2 block as defined below. + // +kubebuilder:validation:Optional + FacebookV2 *WindowsWebAppSlotAuthSettingsV2FacebookV2Parameters `json:"facebookV2,omitempty" tf:"facebook_v2,omitempty"` + + // The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + // The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + // +kubebuilder:validation:Optional + ForwardProxyConvention *string `json:"forwardProxyConvention,omitempty" tf:"forward_proxy_convention,omitempty"` + + // The name of the custom header containing the host of the request. + // The name of the header containing the host of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomHostHeaderName *string `json:"forwardProxyCustomHostHeaderName,omitempty" tf:"forward_proxy_custom_host_header_name,omitempty"` + + // The name of the custom header containing the scheme of the request. + // The name of the header containing the scheme of the request. + // +kubebuilder:validation:Optional + ForwardProxyCustomSchemeHeaderName *string `json:"forwardProxyCustomSchemeHeaderName,omitempty" tf:"forward_proxy_custom_scheme_header_name,omitempty"` + + // A github_v2 block as defined below. + // +kubebuilder:validation:Optional + GithubV2 *WindowsWebAppSlotAuthSettingsV2GithubV2Parameters `json:"githubV2,omitempty" tf:"github_v2,omitempty"` + + // A google_v2 block as defined below. + // +kubebuilder:validation:Optional + GoogleV2 *WindowsWebAppSlotAuthSettingsV2GoogleV2Parameters `json:"googleV2,omitempty" tf:"google_v2,omitempty"` + + // The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + // The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + // +kubebuilder:validation:Optional + HTTPRouteAPIPrefix *string `json:"httpRouteApiPrefix,omitempty" tf:"http_route_api_prefix,omitempty"` + + // A login block as defined below. + // +kubebuilder:validation:Optional + Login *WindowsWebAppSlotAuthSettingsV2LoginParameters `json:"login" tf:"login,omitempty"` + + // A microsoft_v2 block as defined below. + // +kubebuilder:validation:Optional + MicrosoftV2 *WindowsWebAppSlotAuthSettingsV2MicrosoftV2Parameters `json:"microsoftV2,omitempty" tf:"microsoft_v2,omitempty"` + + // Should the authentication flow be used for all requests. + // Should the authentication flow be used for all requests. + // +kubebuilder:validation:Optional + RequireAuthentication *bool `json:"requireAuthentication,omitempty" tf:"require_authentication,omitempty"` + + // Should HTTPS be required on connections? Defaults to true. + // Should HTTPS be required on connections? Defaults to true. + // +kubebuilder:validation:Optional + RequireHTTPS *bool `json:"requireHttps,omitempty" tf:"require_https,omitempty"` + + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + // The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + // +kubebuilder:validation:Optional + RuntimeVersion *string `json:"runtimeVersion,omitempty" tf:"runtime_version,omitempty"` + + // A twitter_v2 block as defined below. + // +kubebuilder:validation:Optional + TwitterV2 *WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters `json:"twitterV2,omitempty" tf:"twitter_v2,omitempty"` + + // The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + // The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + // +kubebuilder:validation:Optional + UnauthenticatedAction *string `json:"unauthenticatedAction,omitempty" tf:"unauthenticated_action,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2TwitterV2InitParameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2TwitterV2Observation struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + ConsumerKey *string `json:"consumerKey,omitempty" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + ConsumerSecretSettingName *string `json:"consumerSecretSettingName,omitempty" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotAuthSettingsV2TwitterV2Parameters struct { + + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // The OAuth 1.0a consumer key of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerKey *string `json:"consumerKey" tf:"consumer_key,omitempty"` + + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + // +kubebuilder:validation:Optional + ConsumerSecretSettingName *string `json:"consumerSecretSettingName" tf:"consumer_secret_setting_name,omitempty"` +} + +type WindowsWebAppSlotBackupInitParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *WindowsWebAppSlotBackupScheduleInitParameters `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type WindowsWebAppSlotBackupObservation struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // A schedule block as defined below. + Schedule *WindowsWebAppSlotBackupScheduleObservation `json:"schedule,omitempty" tf:"schedule,omitempty"` +} + +type WindowsWebAppSlotBackupParameters struct { + + // Should this backup job be enabled? Defaults to true. + // Should this backup job be enabled? + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // The name which should be used for this Backup. + // The name which should be used for this Backup. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // A schedule block as defined below. + // +kubebuilder:validation:Optional + Schedule *WindowsWebAppSlotBackupScheduleParameters `json:"schedule" tf:"schedule,omitempty"` + + // The SAS URL to the container. + // The SAS URL to the container. + // +kubebuilder:validation:Required + StorageAccountURLSecretRef v1.SecretKeySelector `json:"storageAccountUrlSecretRef" tf:"-"` +} + +type WindowsWebAppSlotBackupScheduleInitParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsWebAppSlotBackupScheduleObservation struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + FrequencyUnit *string `json:"frequencyUnit,omitempty" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // The time the backup was last attempted. + LastExecutionTime *string `json:"lastExecutionTime,omitempty" tf:"last_execution_time,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsWebAppSlotBackupScheduleParameters struct { + + // How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + // How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + // +kubebuilder:validation:Optional + FrequencyInterval *float64 `json:"frequencyInterval" tf:"frequency_interval,omitempty"` + + // The unit of time for how often the backup should take place. Possible values include: Day, Hour + // The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + // +kubebuilder:validation:Optional + FrequencyUnit *string `json:"frequencyUnit" tf:"frequency_unit,omitempty"` + + // Should the service keep at least one backup, regardless of age of backup. Defaults to false. + // Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + // +kubebuilder:validation:Optional + KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty" tf:"keep_at_least_one_backup,omitempty"` + + // After how many days backups should be deleted. Defaults to 30. + // After how many days backups should be deleted. + // +kubebuilder:validation:Optional + RetentionPeriodDays *float64 `json:"retentionPeriodDays,omitempty" tf:"retention_period_days,omitempty"` + + // When the schedule should start working in RFC-3339 format. + // When the schedule should start working in RFC-3339 format. + // +kubebuilder:validation:Optional + StartTime *string `json:"startTime,omitempty" tf:"start_time,omitempty"` +} + +type WindowsWebAppSlotConnectionStringInitParameters struct { + + // The name of the connection String. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppSlotConnectionStringObservation struct { + + // The name of the connection String. + // The name which should be used for this Connection. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppSlotConnectionStringParameters struct { + + // The name of the connection String. + // The name which should be used for this Connection. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + // Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` + + // The connection string value. + // The connection string value. + // +kubebuilder:validation:Required + ValueSecretRef v1.SecretKeySelector `json:"valueSecretRef" tf:"-"` +} + +type WindowsWebAppSlotIdentityInitParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Web App Slot. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Web App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppSlotIdentityObservation struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Web App Slot. + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // The Principal ID associated with this Managed Service Identity. + PrincipalID *string `json:"principalId,omitempty" tf:"principal_id,omitempty"` + + // The Tenant ID associated with this Managed Service Identity. + TenantID *string `json:"tenantId,omitempty" tf:"tenant_id,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Web App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppSlotIdentityParameters struct { + + // A list of User Assigned Managed Identity IDs to be assigned to this Windows Web App Slot. + // +kubebuilder:validation:Optional + // +listType=set + IdentityIds []*string `json:"identityIds,omitempty" tf:"identity_ids,omitempty"` + + // Specifies the type of Managed Service Identity that should be configured on this Windows Web App Slot. Possible values are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned (to enable both). + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +type WindowsWebAppSlotInitParameters struct { + + // A map of key-value pairs of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + AuthSettings *WindowsWebAppSlotAuthSettingsInitParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *WindowsWebAppSlotAuthSettingsV2InitParameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *WindowsWebAppSlotBackupInitParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []WindowsWebAppSlotConnectionStringInitParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the Windows Web App Slot be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Windows Web App Slot require HTTPS connections. Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + Identity *WindowsWebAppSlotIdentityInitParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // A logs block as defined below. + Logs *WindowsWebAppSlotLogsInitParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Windows Web App will be used. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + SiteConfig *WindowsWebAppSlotSiteConfigInitParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []WindowsWebAppSlotStorageAccountInitParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Windows Web App Slot. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + SASURL *string `json:"sasUrl,omitempty" tf:"sas_url,omitempty"` +} + +type WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + Level *string `json:"level,omitempty" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + SASURL *string `json:"sasUrl,omitempty" tf:"sas_url,omitempty"` +} + +type WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters struct { + + // The level at which to log. Possible values include Error, Warning, Information, Verbose and Off. NOTE: this field is not available for http_logs + // +kubebuilder:validation:Optional + Level *string `json:"level" tf:"level,omitempty"` + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + // +kubebuilder:validation:Optional + SASURL *string `json:"sasUrl" tf:"sas_url,omitempty"` +} + +type WindowsWebAppSlotLogsApplicationLogsInitParameters struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageInitParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + FileSystemLevel *string `json:"fileSystemLevel,omitempty" tf:"file_system_level,omitempty"` +} + +type WindowsWebAppSlotLogsApplicationLogsObservation struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageObservation `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + FileSystemLevel *string `json:"fileSystemLevel,omitempty" tf:"file_system_level,omitempty"` +} + +type WindowsWebAppSlotLogsApplicationLogsParameters struct { + + // A azure_blob_storage_http block as defined above. + // +kubebuilder:validation:Optional + AzureBlobStorage *WindowsWebAppSlotLogsApplicationLogsAzureBlobStorageParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // Log level. Possible values include: Off, Verbose, Information, Warning, and Error. + // +kubebuilder:validation:Optional + FileSystemLevel *string `json:"fileSystemLevel" tf:"file_system_level,omitempty"` +} + +type WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` +} + +type WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters struct { + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // SAS url to an Azure blob container with read/write/list/delete permissions. + // +kubebuilder:validation:Required + SASURLSecretRef v1.SecretKeySelector `json:"sasurlSecretRef" tf:"-"` +} + +type WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + RetentionInMb *float64 `json:"retentionInMb,omitempty" tf:"retention_in_mb,omitempty"` +} + +type WindowsWebAppSlotLogsHTTPLogsFileSystemObservation struct { + + // The retention period in days. A values of 0 means no retention. + RetentionInDays *float64 `json:"retentionInDays,omitempty" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + RetentionInMb *float64 `json:"retentionInMb,omitempty" tf:"retention_in_mb,omitempty"` +} + +type WindowsWebAppSlotLogsHTTPLogsFileSystemParameters struct { + + // The retention period in days. A values of 0 means no retention. + // +kubebuilder:validation:Optional + RetentionInDays *float64 `json:"retentionInDays" tf:"retention_in_days,omitempty"` + + // The maximum size in megabytes that log files can use. + // +kubebuilder:validation:Optional + RetentionInMb *float64 `json:"retentionInMb" tf:"retention_in_mb,omitempty"` +} + +type WindowsWebAppSlotLogsHTTPLogsInitParameters struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageInitParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + FileSystem *WindowsWebAppSlotLogsHTTPLogsFileSystemInitParameters `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type WindowsWebAppSlotLogsHTTPLogsObservation struct { + + // A azure_blob_storage_http block as defined above. + AzureBlobStorage *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageObservation `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + FileSystem *WindowsWebAppSlotLogsHTTPLogsFileSystemObservation `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type WindowsWebAppSlotLogsHTTPLogsParameters struct { + + // A azure_blob_storage_http block as defined above. + // +kubebuilder:validation:Optional + AzureBlobStorage *WindowsWebAppSlotLogsHTTPLogsAzureBlobStorageParameters `json:"azureBlobStorage,omitempty" tf:"azure_blob_storage,omitempty"` + + // A file_system block as defined above. + // +kubebuilder:validation:Optional + FileSystem *WindowsWebAppSlotLogsHTTPLogsFileSystemParameters `json:"fileSystem,omitempty" tf:"file_system,omitempty"` +} + +type WindowsWebAppSlotLogsInitParameters struct { + + // A application_logs block as defined above. + ApplicationLogs *WindowsWebAppSlotLogsApplicationLogsInitParameters `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled. + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should failed request tracing be enabled. + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + HTTPLogs *WindowsWebAppSlotLogsHTTPLogsInitParameters `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type WindowsWebAppSlotLogsObservation struct { + + // A application_logs block as defined above. + ApplicationLogs *WindowsWebAppSlotLogsApplicationLogsObservation `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled. + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should failed request tracing be enabled. + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + HTTPLogs *WindowsWebAppSlotLogsHTTPLogsObservation `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type WindowsWebAppSlotLogsParameters struct { + + // A application_logs block as defined above. + // +kubebuilder:validation:Optional + ApplicationLogs *WindowsWebAppSlotLogsApplicationLogsParameters `json:"applicationLogs,omitempty" tf:"application_logs,omitempty"` + + // Should detailed error messages be enabled. + // +kubebuilder:validation:Optional + DetailedErrorMessages *bool `json:"detailedErrorMessages,omitempty" tf:"detailed_error_messages,omitempty"` + + // Should failed request tracing be enabled. + // +kubebuilder:validation:Optional + FailedRequestTracing *bool `json:"failedRequestTracing,omitempty" tf:"failed_request_tracing,omitempty"` + + // An http_logs block as defined above. + // +kubebuilder:validation:Optional + HTTPLogs *WindowsWebAppSlotLogsHTTPLogsParameters `json:"httpLogs,omitempty" tf:"http_logs,omitempty"` +} + +type WindowsWebAppSlotObservation struct { + + // The ID of the Windows Web App this Deployment Slot will be part of. Changing this forces a new Windows Web App to be created. + AppServiceID *string `json:"appServiceId,omitempty" tf:"app_service_id,omitempty"` + + // A map of key-value pairs of App Settings. + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + AuthSettings *WindowsWebAppSlotAuthSettingsObservation `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + AuthSettingsV2 *WindowsWebAppSlotAuthSettingsV2Observation `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + Backup *WindowsWebAppSlotBackupObservation `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + ConnectionString []WindowsWebAppSlotConnectionStringObservation `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // The default hostname of the Windows Web App Slot. + DefaultHostName *string `json:"defaultHostname,omitempty" tf:"default_hostname,omitempty"` + + // Should the Windows Web App Slot be enabled? Defaults to true. + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Windows Web App Slot require HTTPS connections. Defaults to false. + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // The ID of the App Service Environment used by App Service Slot. + HostingEnvironmentID *string `json:"hostingEnvironmentId,omitempty" tf:"hosting_environment_id,omitempty"` + + // The ID of the Windows Web App Slot. + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // An identity block as defined below. + Identity *WindowsWebAppSlotIdentityObservation `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // The Kind value for this Windows Web App Slot. + Kind *string `json:"kind,omitempty" tf:"kind,omitempty"` + + // A logs block as defined below. + Logs *WindowsWebAppSlotLogsObservation `json:"logs,omitempty" tf:"logs,omitempty"` + + // A list of outbound IP addresses - such as ["52.23.25.3", "52.143.43.12"] + OutboundIPAddressList []*string `json:"outboundIpAddressList,omitempty" tf:"outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12. + OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty" tf:"outbound_ip_addresses,omitempty"` + + // A list of possible outbound ip address. + PossibleOutboundIPAddressList []*string `json:"possibleOutboundIpAddressList,omitempty" tf:"possible_outbound_ip_address_list,omitempty"` + + // A comma separated list of outbound IP addresses - such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which are necessarily in use. Superset of outbound_ip_addresses. + PossibleOutboundIPAddresses *string `json:"possibleOutboundIpAddresses,omitempty" tf:"possible_outbound_ip_addresses,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Windows Web App will be used. + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + SiteConfig *WindowsWebAppSlotSiteConfigObservation `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + StorageAccount []WindowsWebAppSlotStorageAccountObservation `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Windows Web App Slot. + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsWebAppSlotParameters struct { + + // The ID of the Windows Web App this Deployment Slot will be part of. Changing this forces a new Windows Web App to be created. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/web/v1beta2.WindowsWebApp + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractResourceID() + // +kubebuilder:validation:Optional + AppServiceID *string `json:"appServiceId,omitempty" tf:"app_service_id,omitempty"` + + // Reference to a WindowsWebApp in web to populate appServiceId. + // +kubebuilder:validation:Optional + AppServiceIDRef *v1.Reference `json:"appServiceIdRef,omitempty" tf:"-"` + + // Selector for a WindowsWebApp in web to populate appServiceId. + // +kubebuilder:validation:Optional + AppServiceIDSelector *v1.Selector `json:"appServiceIdSelector,omitempty" tf:"-"` + + // A map of key-value pairs of App Settings. + // +kubebuilder:validation:Optional + // +mapType=granular + AppSettings map[string]*string `json:"appSettings,omitempty" tf:"app_settings,omitempty"` + + // An auth_settings block as defined below. + // +kubebuilder:validation:Optional + AuthSettings *WindowsWebAppSlotAuthSettingsParameters `json:"authSettings,omitempty" tf:"auth_settings,omitempty"` + + // An auth_settings_v2 block as defined below. + // +kubebuilder:validation:Optional + AuthSettingsV2 *WindowsWebAppSlotAuthSettingsV2Parameters `json:"authSettingsV2,omitempty" tf:"auth_settings_v2,omitempty"` + + // A backup block as defined below. + // +kubebuilder:validation:Optional + Backup *WindowsWebAppSlotBackupParameters `json:"backup,omitempty" tf:"backup,omitempty"` + + // Should Client Affinity be enabled? + // +kubebuilder:validation:Optional + ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty" tf:"client_affinity_enabled,omitempty"` + + // Should Client Certificates be enabled? + // +kubebuilder:validation:Optional + ClientCertificateEnabled *bool `json:"clientCertificateEnabled,omitempty" tf:"client_certificate_enabled,omitempty"` + + // Paths to exclude when using client certificates, separated by ; + // Paths to exclude when using client certificates, separated by ; + // +kubebuilder:validation:Optional + ClientCertificateExclusionPaths *string `json:"clientCertificateExclusionPaths,omitempty" tf:"client_certificate_exclusion_paths,omitempty"` + + // The Client Certificate mode. Possible values are Required, Optional, and OptionalInteractiveUser. This property has no effect when client_cert_enabled is false. Defaults to Required. + // +kubebuilder:validation:Optional + ClientCertificateMode *string `json:"clientCertificateMode,omitempty" tf:"client_certificate_mode,omitempty"` + + // One or more connection_string blocks as defined below. + // +kubebuilder:validation:Optional + ConnectionString []WindowsWebAppSlotConnectionStringParameters `json:"connectionString,omitempty" tf:"connection_string,omitempty"` + + // Should the Windows Web App Slot be enabled? Defaults to true. + // +kubebuilder:validation:Optional + Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` + + // Should the default FTP Basic Authentication publishing profile be enabled. Defaults to true. + // +kubebuilder:validation:Optional + FtpPublishBasicAuthenticationEnabled *bool `json:"ftpPublishBasicAuthenticationEnabled,omitempty" tf:"ftp_publish_basic_authentication_enabled,omitempty"` + + // Should the Windows Web App Slot require HTTPS connections. Defaults to false. + // +kubebuilder:validation:Optional + HTTPSOnly *bool `json:"httpsOnly,omitempty" tf:"https_only,omitempty"` + + // An identity block as defined below. + // +kubebuilder:validation:Optional + Identity *WindowsWebAppSlotIdentityParameters `json:"identity,omitempty" tf:"identity,omitempty"` + + // The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + // +kubebuilder:validation:Optional + KeyVaultReferenceIdentityID *string `json:"keyVaultReferenceIdentityId,omitempty" tf:"key_vault_reference_identity_id,omitempty"` + + // A logs block as defined below. + // +kubebuilder:validation:Optional + Logs *WindowsWebAppSlotLogsParameters `json:"logs,omitempty" tf:"logs,omitempty"` + + // Should public network access be enabled for the Web App. Defaults to true. + // +kubebuilder:validation:Optional + PublicNetworkAccessEnabled *bool `json:"publicNetworkAccessEnabled,omitempty" tf:"public_network_access_enabled,omitempty"` + + // The ID of the Service Plan in which to run this slot. If not specified the same Service Plan as the Windows Web App will be used. + // +kubebuilder:validation:Optional + ServicePlanID *string `json:"servicePlanId,omitempty" tf:"service_plan_id,omitempty"` + + // A site_config block as defined below. + // +kubebuilder:validation:Optional + SiteConfig *WindowsWebAppSlotSiteConfigParameters `json:"siteConfig,omitempty" tf:"site_config,omitempty"` + + // One or more storage_account blocks as defined below. + // +kubebuilder:validation:Optional + StorageAccount []WindowsWebAppSlotStorageAccountParameters `json:"storageAccount,omitempty" tf:"storage_account,omitempty"` + + // A mapping of tags which should be assigned to the Windows Web App Slot. + // +kubebuilder:validation:Optional + // +mapType=granular + Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` + + // Should the default WebDeploy Basic Authentication publishing credentials enabled. Defaults to true. + // +kubebuilder:validation:Optional + WebdeployPublishBasicAuthenticationEnabled *bool `json:"webdeployPublishBasicAuthenticationEnabled,omitempty" tf:"webdeploy_publish_basic_authentication_enabled,omitempty"` + + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. + // The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + // +kubebuilder:validation:Optional + ZipDeployFile *string `json:"zipDeployFile,omitempty" tf:"zip_deploy_file,omitempty"` +} + +type WindowsWebAppSlotSiteConfigApplicationStackInitParameters struct { + + // The Application Stack for the Windows Web App. Possible values include dotnet, dotnetcore, node, python, php, and java. + CurrentStack *string `json:"currentStack,omitempty" tf:"current_stack,omitempty"` + + // The name of the container to be used. This value is required with docker_container_tag. + DockerContainerName *string `json:"dockerContainerName,omitempty" tf:"docker_container_name,omitempty"` + + DockerContainerRegistry *string `json:"dockerContainerRegistry,omitempty" tf:"docker_container_registry,omitempty"` + + // The tag of the container to be used. This value is required with docker_container_name. + DockerContainerTag *string `json:"dockerContainerTag,omitempty" tf:"docker_container_tag,omitempty"` + + // The docker image, including tag, to be used. e.g. azure-app-service/windows/parkingpage:latest. + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + // The version of DotNetCore to use. + DotnetCoreVersion *string `json:"dotnetCoreVersion,omitempty" tf:"dotnet_core_version,omitempty"` + + // The version of .NET to use when current_stack is set to dotnet. Possible values include v2.0,v3.0, v4.0, v5.0, v6.0, v7.0 and v8.0. + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + JavaContainer *string `json:"javaContainer,omitempty" tf:"java_container,omitempty"` + + JavaContainerVersion *string `json:"javaContainerVersion,omitempty" tf:"java_container_version,omitempty"` + + // Should the Java Embedded Server (Java SE) be used to run the app. + // Should the application use the embedded web server for the version of Java in use. + JavaEmbeddedServerEnabled *bool `json:"javaEmbeddedServerEnabled,omitempty" tf:"java_embedded_server_enabled,omitempty"` + + // The version of Java to use when current_stack is set to java. Possible values include 1.7, 1.8, 11 and 17. Required with java_container and java_container_version. + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of node to use when current_stack is set to node. Possible values include ~12, ~14, ~16, and ~18. + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to use when current_stack is set to php. Possible values are 7.1, 7.4 and Off. + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The app is a Python app. Defaults to false. + Python *bool `json:"python,omitempty" tf:"python,omitempty"` + + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Tomcat the Java App should use. + TomcatVersion *string `json:"tomcatVersion,omitempty" tf:"tomcat_version,omitempty"` +} + +type WindowsWebAppSlotSiteConfigApplicationStackObservation struct { + + // The Application Stack for the Windows Web App. Possible values include dotnet, dotnetcore, node, python, php, and java. + CurrentStack *string `json:"currentStack,omitempty" tf:"current_stack,omitempty"` + + // The name of the container to be used. This value is required with docker_container_tag. + DockerContainerName *string `json:"dockerContainerName,omitempty" tf:"docker_container_name,omitempty"` + + DockerContainerRegistry *string `json:"dockerContainerRegistry,omitempty" tf:"docker_container_registry,omitempty"` + + // The tag of the container to be used. This value is required with docker_container_name. + DockerContainerTag *string `json:"dockerContainerTag,omitempty" tf:"docker_container_tag,omitempty"` + + // The docker image, including tag, to be used. e.g. azure-app-service/windows/parkingpage:latest. + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + // The version of DotNetCore to use. + DotnetCoreVersion *string `json:"dotnetCoreVersion,omitempty" tf:"dotnet_core_version,omitempty"` + + // The version of .NET to use when current_stack is set to dotnet. Possible values include v2.0,v3.0, v4.0, v5.0, v6.0, v7.0 and v8.0. + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + JavaContainer *string `json:"javaContainer,omitempty" tf:"java_container,omitempty"` + + JavaContainerVersion *string `json:"javaContainerVersion,omitempty" tf:"java_container_version,omitempty"` + + // Should the Java Embedded Server (Java SE) be used to run the app. + // Should the application use the embedded web server for the version of Java in use. + JavaEmbeddedServerEnabled *bool `json:"javaEmbeddedServerEnabled,omitempty" tf:"java_embedded_server_enabled,omitempty"` + + // The version of Java to use when current_stack is set to java. Possible values include 1.7, 1.8, 11 and 17. Required with java_container and java_container_version. + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of node to use when current_stack is set to node. Possible values include ~12, ~14, ~16, and ~18. + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to use when current_stack is set to php. Possible values are 7.1, 7.4 and Off. + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The app is a Python app. Defaults to false. + Python *bool `json:"python,omitempty" tf:"python,omitempty"` + + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Tomcat the Java App should use. + TomcatVersion *string `json:"tomcatVersion,omitempty" tf:"tomcat_version,omitempty"` +} + +type WindowsWebAppSlotSiteConfigApplicationStackParameters struct { + + // The Application Stack for the Windows Web App. Possible values include dotnet, dotnetcore, node, python, php, and java. + // +kubebuilder:validation:Optional + CurrentStack *string `json:"currentStack,omitempty" tf:"current_stack,omitempty"` + + // The name of the container to be used. This value is required with docker_container_tag. + // +kubebuilder:validation:Optional + DockerContainerName *string `json:"dockerContainerName,omitempty" tf:"docker_container_name,omitempty"` + + // +kubebuilder:validation:Optional + DockerContainerRegistry *string `json:"dockerContainerRegistry,omitempty" tf:"docker_container_registry,omitempty"` + + // The tag of the container to be used. This value is required with docker_container_name. + // +kubebuilder:validation:Optional + DockerContainerTag *string `json:"dockerContainerTag,omitempty" tf:"docker_container_tag,omitempty"` + + // The docker image, including tag, to be used. e.g. azure-app-service/windows/parkingpage:latest. + // +kubebuilder:validation:Optional + DockerImageName *string `json:"dockerImageName,omitempty" tf:"docker_image_name,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + // +kubebuilder:validation:Optional + DockerRegistryPasswordSecretRef *v1.SecretKeySelector `json:"dockerRegistryPasswordSecretRef,omitempty" tf:"-"` + + // The URL of the container registry where the docker_image_name is located. e.g. https://index.docker.io or https://mcr.microsoft.com. This value is required with docker_image_name. + // +kubebuilder:validation:Optional + DockerRegistryURL *string `json:"dockerRegistryUrl,omitempty" tf:"docker_registry_url,omitempty"` + + // The User Name to use for authentication against the registry to pull the image. + // +kubebuilder:validation:Optional + DockerRegistryUsername *string `json:"dockerRegistryUsername,omitempty" tf:"docker_registry_username,omitempty"` + + // The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + // The version of DotNetCore to use. + // +kubebuilder:validation:Optional + DotnetCoreVersion *string `json:"dotnetCoreVersion,omitempty" tf:"dotnet_core_version,omitempty"` + + // The version of .NET to use when current_stack is set to dotnet. Possible values include v2.0,v3.0, v4.0, v5.0, v6.0, v7.0 and v8.0. + // +kubebuilder:validation:Optional + DotnetVersion *string `json:"dotnetVersion,omitempty" tf:"dotnet_version,omitempty"` + + // +kubebuilder:validation:Optional + JavaContainer *string `json:"javaContainer,omitempty" tf:"java_container,omitempty"` + + // +kubebuilder:validation:Optional + JavaContainerVersion *string `json:"javaContainerVersion,omitempty" tf:"java_container_version,omitempty"` + + // Should the Java Embedded Server (Java SE) be used to run the app. + // Should the application use the embedded web server for the version of Java in use. + // +kubebuilder:validation:Optional + JavaEmbeddedServerEnabled *bool `json:"javaEmbeddedServerEnabled,omitempty" tf:"java_embedded_server_enabled,omitempty"` + + // The version of Java to use when current_stack is set to java. Possible values include 1.7, 1.8, 11 and 17. Required with java_container and java_container_version. + // +kubebuilder:validation:Optional + JavaVersion *string `json:"javaVersion,omitempty" tf:"java_version,omitempty"` + + // The version of node to use when current_stack is set to node. Possible values include ~12, ~14, ~16, and ~18. + // +kubebuilder:validation:Optional + NodeVersion *string `json:"nodeVersion,omitempty" tf:"node_version,omitempty"` + + // The version of PHP to use when current_stack is set to php. Possible values are 7.1, 7.4 and Off. + // +kubebuilder:validation:Optional + PHPVersion *string `json:"phpVersion,omitempty" tf:"php_version,omitempty"` + + // The app is a Python app. Defaults to false. + // +kubebuilder:validation:Optional + Python *bool `json:"python,omitempty" tf:"python,omitempty"` + + // +kubebuilder:validation:Optional + PythonVersion *string `json:"pythonVersion,omitempty" tf:"python_version,omitempty"` + + // The version of Tomcat the Java App should use. + // +kubebuilder:validation:Optional + TomcatVersion *string `json:"tomcatVersion,omitempty" tf:"tomcat_version,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values are CustomAction, LogEvent and Recycle. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // A custom_action block as defined below. + CustomAction *ActionCustomActionInitParameters `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // The minimum amount of time in hh:mm:ss the Windows Web App Slot must have been running before the defined action will be run in the event of a trigger. + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values are CustomAction, LogEvent and Recycle. + ActionType *string `json:"actionType,omitempty" tf:"action_type,omitempty"` + + // A custom_action block as defined below. + CustomAction *ActionCustomActionObservation `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // The minimum amount of time in hh:mm:ss the Windows Web App Slot must have been running before the defined action will be run in the event of a trigger. + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters struct { + + // Predefined action to be taken to an Auto Heal trigger. Possible values are CustomAction, LogEvent and Recycle. + // +kubebuilder:validation:Optional + ActionType *string `json:"actionType" tf:"action_type,omitempty"` + + // A custom_action block as defined below. + // +kubebuilder:validation:Optional + CustomAction *ActionCustomActionParameters `json:"customAction,omitempty" tf:"custom_action,omitempty"` + + // The minimum amount of time in hh:mm:ss the Windows Web App Slot must have been running before the defined action will be run in the event of a trigger. + // +kubebuilder:validation:Optional + MinimumProcessExecutionTime *string `json:"minimumProcessExecutionTime,omitempty" tf:"minimum_process_execution_time,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + Action *WindowsWebAppSlotSiteConfigAutoHealSettingActionInitParameters `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + Action *WindowsWebAppSlotSiteConfigAutoHealSettingActionObservation `json:"action,omitempty" tf:"action,omitempty"` + + // A trigger block as defined below. + Trigger *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation `json:"trigger,omitempty" tf:"trigger,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + Action *WindowsWebAppSlotSiteConfigAutoHealSettingActionParameters `json:"action" tf:"action,omitempty"` + + // A trigger block as defined below. + // +kubebuilder:validation:Optional + Trigger *WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters `json:"trigger" tf:"trigger,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingTriggerInitParameters struct { + + // The amount of Private Memory to be consumed for this rule to trigger. Possible values are between 102400 and 13631488. + PrivateMemoryKb *float64 `json:"privateMemoryKb,omitempty" tf:"private_memory_kb,omitempty"` + + // A requests block as defined above. + Requests *SiteConfigAutoHealSettingTriggerRequestsInitParameters `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + SlowRequest *SiteConfigAutoHealSettingTriggerSlowRequestInitParameters `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + StatusCode []SiteConfigAutoHealSettingTriggerStatusCodeInitParameters `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingTriggerObservation struct { + + // The amount of Private Memory to be consumed for this rule to trigger. Possible values are between 102400 and 13631488. + PrivateMemoryKb *float64 `json:"privateMemoryKb,omitempty" tf:"private_memory_kb,omitempty"` + + // A requests block as defined above. + Requests *SiteConfigAutoHealSettingTriggerRequestsObservation `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + SlowRequest *SiteConfigAutoHealSettingTriggerSlowRequestObservation `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + StatusCode []SiteConfigAutoHealSettingTriggerStatusCodeObservation `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type WindowsWebAppSlotSiteConfigAutoHealSettingTriggerParameters struct { + + // The amount of Private Memory to be consumed for this rule to trigger. Possible values are between 102400 and 13631488. + // +kubebuilder:validation:Optional + PrivateMemoryKb *float64 `json:"privateMemoryKb,omitempty" tf:"private_memory_kb,omitempty"` + + // A requests block as defined above. + // +kubebuilder:validation:Optional + Requests *SiteConfigAutoHealSettingTriggerRequestsParameters `json:"requests,omitempty" tf:"requests,omitempty"` + + // One or more slow_request blocks as defined above. + // +kubebuilder:validation:Optional + SlowRequest *SiteConfigAutoHealSettingTriggerSlowRequestParameters `json:"slowRequest,omitempty" tf:"slow_request,omitempty"` + + // One or more status_code blocks as defined above. + // +kubebuilder:validation:Optional + StatusCode []SiteConfigAutoHealSettingTriggerStatusCodeParameters `json:"statusCode,omitempty" tf:"status_code,omitempty"` +} + +type WindowsWebAppSlotSiteConfigCorsInitParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsWebAppSlotSiteConfigCorsObservation struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsWebAppSlotSiteConfigCorsParameters struct { + + // Specifies a list of origins that should be allowed to make cross-origin calls. + // Specifies a list of origins that should be allowed to make cross-origin calls. + // +kubebuilder:validation:Optional + // +listType=set + AllowedOrigins []*string `json:"allowedOrigins,omitempty" tf:"allowed_origins,omitempty"` + + // Whether CORS requests with credentials are allowed. Defaults to false + // Are credentials allowed in CORS requests? Defaults to `false`. + // +kubebuilder:validation:Optional + SupportCredentials *bool `json:"supportCredentials,omitempty" tf:"support_credentials,omitempty"` +} + +type WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsWebAppSlotSiteConfigIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsWebAppSlotSiteConfigIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsWebAppSlotSiteConfigIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsWebAppSlotSiteConfigIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type WindowsWebAppSlotSiteConfigIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []WindowsWebAppSlotSiteConfigIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsWebAppSlotSiteConfigInitParameters struct { + + // The URL to the API Definition for this Windows Web App Slot. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Windows Web App Slot os associated with. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App Slot is Always On enabled. Defaults to true. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + ApplicationStack *WindowsWebAppSlotSiteConfigApplicationStackInitParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled. Required with auto_heal_setting. + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + AutoHealSetting *WindowsWebAppSlotSiteConfigAutoHealSettingInitParameters `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Windows Web App Slot Name to automatically swap to when deployment to that slot is successfully completed. + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *WindowsWebAppSlotSiteConfigCorsInitParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App Slot. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The State of FTP / FTPS service. Possible values include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []WindowsWebAppSlotSiteConfigIPRestrictionInitParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017 and VS2019 + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Windows Web App Slot ip_restriction configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App Slotuse a 32-bit worker. Defaults to true. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // One or more virtual_application blocks as defined below. + VirtualApplication []SiteConfigVirtualApplicationInitParameters `json:"virtualApplication,omitempty" tf:"virtual_application,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Windows App Service Slot. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsWebAppSlotSiteConfigObservation struct { + + // The URL to the API Definition for this Windows Web App Slot. + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Windows Web App Slot os associated with. + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App Slot is Always On enabled. Defaults to true. + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + ApplicationStack *WindowsWebAppSlotSiteConfigApplicationStackObservation `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled. Required with auto_heal_setting. + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + AutoHealSetting *WindowsWebAppSlotSiteConfigAutoHealSettingObservation `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Windows Web App Slot Name to automatically swap to when deployment to that slot is successfully completed. + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + Cors *WindowsWebAppSlotSiteConfigCorsObservation `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App Slot. + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // Should the Windows Web App Slot be enabled? Defaults to true. + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty" tf:"detailed_error_logging_enabled,omitempty"` + + // The State of FTP / FTPS service. Possible values include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled. + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + IPRestriction []WindowsWebAppSlotSiteConfigIPRestrictionObservation `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017 and VS2019 + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + ScmIPRestriction []WindowsWebAppSlotSiteConfigScmIPRestrictionObservation `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + ScmType *string `json:"scmType,omitempty" tf:"scm_type,omitempty"` + + // Should the Windows Web App Slot ip_restriction configuration be used for the SCM also. + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App Slotuse a 32-bit worker. Defaults to true. + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // One or more virtual_application blocks as defined below. + VirtualApplication []SiteConfigVirtualApplicationObservation `json:"virtualApplication,omitempty" tf:"virtual_application,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + WindowsFxVersion *string `json:"windowsFxVersion,omitempty" tf:"windows_fx_version,omitempty"` + + // The number of Workers for this Windows App Service Slot. + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsWebAppSlotSiteConfigParameters struct { + + // The URL to the API Definition for this Windows Web App Slot. + // +kubebuilder:validation:Optional + APIDefinitionURL *string `json:"apiDefinitionUrl,omitempty" tf:"api_definition_url,omitempty"` + + // The API Management API ID this Windows Web App Slot os associated with. + // +kubebuilder:validation:Optional + APIManagementAPIID *string `json:"apiManagementApiId,omitempty" tf:"api_management_api_id,omitempty"` + + // If this Windows Web App Slot is Always On enabled. Defaults to true. + // +kubebuilder:validation:Optional + AlwaysOn *bool `json:"alwaysOn,omitempty" tf:"always_on,omitempty"` + + // The App command line to launch. + // +kubebuilder:validation:Optional + AppCommandLine *string `json:"appCommandLine,omitempty" tf:"app_command_line,omitempty"` + + // A application_stack block as defined above. + // +kubebuilder:validation:Optional + ApplicationStack *WindowsWebAppSlotSiteConfigApplicationStackParameters `json:"applicationStack,omitempty" tf:"application_stack,omitempty"` + + // Should Auto heal rules be enabled. Required with auto_heal_setting. + // +kubebuilder:validation:Optional + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty" tf:"auto_heal_enabled,omitempty"` + + // A auto_heal_setting block as defined above. Required with auto_heal. + // +kubebuilder:validation:Optional + AutoHealSetting *WindowsWebAppSlotSiteConfigAutoHealSettingParameters `json:"autoHealSetting,omitempty" tf:"auto_heal_setting,omitempty"` + + // The Windows Web App Slot Name to automatically swap to when deployment to that slot is successfully completed. + // +kubebuilder:validation:Optional + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty" tf:"auto_swap_slot_name,omitempty"` + + // The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + // +kubebuilder:validation:Optional + ContainerRegistryManagedIdentityClientID *string `json:"containerRegistryManagedIdentityClientId,omitempty" tf:"container_registry_managed_identity_client_id,omitempty"` + + // Should connections for Azure Container Registry use Managed Identity. + // +kubebuilder:validation:Optional + ContainerRegistryUseManagedIdentity *bool `json:"containerRegistryUseManagedIdentity,omitempty" tf:"container_registry_use_managed_identity,omitempty"` + + // A cors block as defined above. + // +kubebuilder:validation:Optional + Cors *WindowsWebAppSlotSiteConfigCorsParameters `json:"cors,omitempty" tf:"cors,omitempty"` + + // Specifies a list of Default Documents for the Windows Web App Slot. + // +kubebuilder:validation:Optional + DefaultDocuments []*string `json:"defaultDocuments,omitempty" tf:"default_documents,omitempty"` + + // The State of FTP / FTPS service. Possible values include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled. + // +kubebuilder:validation:Optional + FtpsState *string `json:"ftpsState,omitempty" tf:"ftps_state,omitempty"` + + // The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + // The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + // +kubebuilder:validation:Optional + HealthCheckEvictionTimeInMin *float64 `json:"healthCheckEvictionTimeInMin,omitempty" tf:"health_check_eviction_time_in_min,omitempty"` + + // The path to the Health Check. + // +kubebuilder:validation:Optional + HealthCheckPath *string `json:"healthCheckPath,omitempty" tf:"health_check_path,omitempty"` + + // Should the HTTP2 be enabled? + // +kubebuilder:validation:Optional + Http2Enabled *bool `json:"http2Enabled,omitempty" tf:"http2_enabled,omitempty"` + + // One or more ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + IPRestriction []WindowsWebAppSlotSiteConfigIPRestrictionParameters `json:"ipRestriction,omitempty" tf:"ip_restriction,omitempty"` + + // The Default action for traffic that does not match any ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + IPRestrictionDefaultAction *string `json:"ipRestrictionDefaultAction,omitempty" tf:"ip_restriction_default_action,omitempty"` + + // The Site load balancing. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + // +kubebuilder:validation:Optional + LoadBalancingMode *string `json:"loadBalancingMode,omitempty" tf:"load_balancing_mode,omitempty"` + + // Use Local MySQL. Defaults to false. + // +kubebuilder:validation:Optional + LocalMySQLEnabled *bool `json:"localMysqlEnabled,omitempty" tf:"local_mysql_enabled,omitempty"` + + // Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + // +kubebuilder:validation:Optional + ManagedPipelineMode *string `json:"managedPipelineMode,omitempty" tf:"managed_pipeline_mode,omitempty"` + + // The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // +kubebuilder:validation:Optional + MinimumTLSVersion *string `json:"minimumTlsVersion,omitempty" tf:"minimum_tls_version,omitempty"` + + // Should Remote Debugging be enabled. Defaults to false. + // +kubebuilder:validation:Optional + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty" tf:"remote_debugging_enabled,omitempty"` + + // The Remote Debugging Version. Possible values include VS2017 and VS2019 + // +kubebuilder:validation:Optional + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty" tf:"remote_debugging_version,omitempty"` + + // One or more scm_ip_restriction blocks as defined above. + // +kubebuilder:validation:Optional + ScmIPRestriction []WindowsWebAppSlotSiteConfigScmIPRestrictionParameters `json:"scmIpRestriction,omitempty" tf:"scm_ip_restriction,omitempty"` + + // The Default action for traffic that does not match any scm_ip_restriction rule. possible values include Allow and Deny. Defaults to Allow. + // +kubebuilder:validation:Optional + ScmIPRestrictionDefaultAction *string `json:"scmIpRestrictionDefaultAction,omitempty" tf:"scm_ip_restriction_default_action,omitempty"` + + // The configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + // +kubebuilder:validation:Optional + ScmMinimumTLSVersion *string `json:"scmMinimumTlsVersion,omitempty" tf:"scm_minimum_tls_version,omitempty"` + + // Should the Windows Web App Slot ip_restriction configuration be used for the SCM also. + // +kubebuilder:validation:Optional + ScmUseMainIPRestriction *bool `json:"scmUseMainIpRestriction,omitempty" tf:"scm_use_main_ip_restriction,omitempty"` + + // Should the Windows Web App Slotuse a 32-bit worker. Defaults to true. + // +kubebuilder:validation:Optional + Use32BitWorker *bool `json:"use32BitWorker,omitempty" tf:"use_32_bit_worker,omitempty"` + + // One or more virtual_application blocks as defined below. + // +kubebuilder:validation:Optional + VirtualApplication []SiteConfigVirtualApplicationParameters `json:"virtualApplication,omitempty" tf:"virtual_application,omitempty"` + + // Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + // Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + // +kubebuilder:validation:Optional + VnetRouteAllEnabled *bool `json:"vnetRouteAllEnabled,omitempty" tf:"vnet_route_all_enabled,omitempty"` + + // Should Web Sockets be enabled. Defaults to false. + // +kubebuilder:validation:Optional + WebsocketsEnabled *bool `json:"websocketsEnabled,omitempty" tf:"websockets_enabled,omitempty"` + + // The number of Workers for this Windows App Service Slot. + // +kubebuilder:validation:Optional + WorkerCount *float64 `json:"workerCount,omitempty" tf:"worker_count,omitempty"` +} + +type WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation struct { + + // Specifies a list of Azure Front Door IDs. + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid,omitempty"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe,omitempty"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for,omitempty"` + + // Specifies a list of Hosts for which matching should be applied. + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host,omitempty"` +} + +type WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters struct { + + // Specifies a list of Azure Front Door IDs. + // +kubebuilder:validation:Optional + XAzureFdid []*string `json:"xAzureFdid,omitempty" tf:"x_azure_fdid"` + + // Specifies if a Front Door Health Probe should be expected. The only possible value is 1. + // +kubebuilder:validation:Optional + XFdHealthProbe []*string `json:"xFdHealthProbe,omitempty" tf:"x_fd_health_probe"` + + // Specifies a list of addresses for which matching should be applied. Omitting this value means allow any. + // +kubebuilder:validation:Optional + XForwardedFor []*string `json:"xForwardedFor,omitempty" tf:"x_forwarded_for"` + + // Specifies a list of Hosts for which matching should be applied. + // +kubebuilder:validation:Optional + XForwardedHost []*string `json:"xForwardedHost,omitempty" tf:"x_forwarded_host"` +} + +type WindowsWebAppSlotSiteConfigScmIPRestrictionInitParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersInitParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsWebAppSlotSiteConfigScmIPRestrictionObservation struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + Headers []WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersObservation `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` +} + +type WindowsWebAppSlotSiteConfigScmIPRestrictionParameters struct { + + // The action to take. Possible values are Allow or Deny. Defaults to Allow. + // The action to take. Possible values are `Allow` or `Deny`. + // +kubebuilder:validation:Optional + Action *string `json:"action,omitempty" tf:"action,omitempty"` + + // The Description of this IP Restriction. + // The description of the IP restriction rule. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // A headers block as defined above. + // +kubebuilder:validation:Optional + Headers []WindowsWebAppSlotSiteConfigScmIPRestrictionHeadersParameters `json:"headers,omitempty" tf:"headers,omitempty"` + + // The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + // The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + // +kubebuilder:validation:Optional + IPAddress *string `json:"ipAddress,omitempty" tf:"ip_address,omitempty"` + + // The name which should be used for this Storage Account. + // The name which should be used for this `ip_restriction`. + // +kubebuilder:validation:Optional + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The priority value of this ip_restriction. Defaults to 65000. + // The priority value of this `ip_restriction`. + // +kubebuilder:validation:Optional + Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` + + // The Service Tag used for this IP Restriction. + // The Service Tag used for this IP Restriction. + // +kubebuilder:validation:Optional + ServiceTag *string `json:"serviceTag,omitempty" tf:"service_tag,omitempty"` + + // The subnet id which will be used by this Web App Slot for regional virtual network integration. + // The Virtual Network Subnet ID used for this IP Restriction. + // +crossplane:generate:reference:type=github.com/upbound/provider-azure/apis/network/v1beta2.Subnet + // +crossplane:generate:reference:extractor=github.com/upbound/provider-azure/apis/rconfig.ExtractResourceID() + // +kubebuilder:validation:Optional + VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty" tf:"virtual_network_subnet_id,omitempty"` + + // Reference to a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDRef *v1.Reference `json:"virtualNetworkSubnetIdRef,omitempty" tf:"-"` + + // Selector for a Subnet in network to populate virtualNetworkSubnetId. + // +kubebuilder:validation:Optional + VirtualNetworkSubnetIDSelector *v1.Selector `json:"virtualNetworkSubnetIdSelector,omitempty" tf:"-"` +} + +type WindowsWebAppSlotSiteCredentialInitParameters struct { +} + +type WindowsWebAppSlotSiteCredentialObservation struct { + + // The Site Credentials Username used for publishing. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Site Credentials Password used for publishing. + Password *string `json:"password,omitempty" tf:"password,omitempty"` +} + +type WindowsWebAppSlotSiteCredentialParameters struct { +} + +type WindowsWebAppSlotStorageAccountInitParameters struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppSlotStorageAccountObservation struct { + + // The Name of the Storage Account. + AccountName *string `json:"accountName,omitempty" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + Name *string `json:"name,omitempty" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + ShareName *string `json:"shareName,omitempty" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + Type *string `json:"type,omitempty" tf:"type,omitempty"` +} + +type WindowsWebAppSlotStorageAccountParameters struct { + + // The Access key for the storage account. + // +kubebuilder:validation:Required + AccessKeySecretRef v1.SecretKeySelector `json:"accessKeySecretRef" tf:"-"` + + // The Name of the Storage Account. + // +kubebuilder:validation:Optional + AccountName *string `json:"accountName" tf:"account_name,omitempty"` + + // The path at which to mount the storage share. + // +kubebuilder:validation:Optional + MountPath *string `json:"mountPath,omitempty" tf:"mount_path,omitempty"` + + // The name which should be used for this Storage Account. + // +kubebuilder:validation:Optional + Name *string `json:"name" tf:"name,omitempty"` + + // The Name of the File Share or Container Name for Blob storage. + // +kubebuilder:validation:Optional + ShareName *string `json:"shareName" tf:"share_name,omitempty"` + + // The Azure Storage Type. Possible values include AzureFiles and AzureBlob + // +kubebuilder:validation:Optional + Type *string `json:"type" tf:"type,omitempty"` +} + +// WindowsWebAppSlotSpec defines the desired state of WindowsWebAppSlot +type WindowsWebAppSlotSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WindowsWebAppSlotParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider WindowsWebAppSlotInitParameters `json:"initProvider,omitempty"` +} + +// WindowsWebAppSlotStatus defines the observed state of WindowsWebAppSlot. +type WindowsWebAppSlotStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WindowsWebAppSlotObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// WindowsWebAppSlot is the Schema for the WindowsWebAppSlots API. Manages a Windows Web App Slot. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,azure} +type WindowsWebAppSlot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.siteConfig) || (has(self.initProvider) && has(self.initProvider.siteConfig))",message="spec.forProvider.siteConfig is a required parameter" + Spec WindowsWebAppSlotSpec `json:"spec"` + Status WindowsWebAppSlotStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WindowsWebAppSlotList contains a list of WindowsWebAppSlots +type WindowsWebAppSlotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WindowsWebAppSlot `json:"items"` +} + +// Repository type metadata. +var ( + WindowsWebAppSlot_Kind = "WindowsWebAppSlot" + WindowsWebAppSlot_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WindowsWebAppSlot_Kind}.String() + WindowsWebAppSlot_KindAPIVersion = WindowsWebAppSlot_Kind + "." + CRDGroupVersion.String() + WindowsWebAppSlot_GroupVersionKind = CRDGroupVersion.WithKind(WindowsWebAppSlot_Kind) +) + +func init() { + SchemeBuilder.Register(&WindowsWebAppSlot{}, &WindowsWebAppSlotList{}) +} diff --git a/apis/zz_register.go b/apis/zz_register.go index 1545ddc3e..66eea1ad4 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -11,200 +11,330 @@ import ( "k8s.io/apimachinery/pkg/runtime" v1beta1 "github.com/upbound/provider-azure/apis/alertsmanagement/v1beta1" + v1beta2 "github.com/upbound/provider-azure/apis/alertsmanagement/v1beta2" v1beta1analysisservices "github.com/upbound/provider-azure/apis/analysisservices/v1beta1" v1beta1apimanagement "github.com/upbound/provider-azure/apis/apimanagement/v1beta1" + v1beta2apimanagement "github.com/upbound/provider-azure/apis/apimanagement/v1beta2" v1beta1appconfiguration "github.com/upbound/provider-azure/apis/appconfiguration/v1beta1" + v1beta2appconfiguration "github.com/upbound/provider-azure/apis/appconfiguration/v1beta2" v1beta1appplatform "github.com/upbound/provider-azure/apis/appplatform/v1beta1" + v1beta2appplatform "github.com/upbound/provider-azure/apis/appplatform/v1beta2" v1beta1attestation "github.com/upbound/provider-azure/apis/attestation/v1beta1" v1beta1authorization "github.com/upbound/provider-azure/apis/authorization/v1beta1" + v1beta2authorization "github.com/upbound/provider-azure/apis/authorization/v1beta2" v1beta1automation "github.com/upbound/provider-azure/apis/automation/v1beta1" + v1beta2automation "github.com/upbound/provider-azure/apis/automation/v1beta2" v1beta1azure "github.com/upbound/provider-azure/apis/azure/v1beta1" v1beta1azurestackhci "github.com/upbound/provider-azure/apis/azurestackhci/v1beta1" v1beta1botservice "github.com/upbound/provider-azure/apis/botservice/v1beta1" v1beta1cache "github.com/upbound/provider-azure/apis/cache/v1beta1" + v1beta2cache "github.com/upbound/provider-azure/apis/cache/v1beta2" v1beta1cdn "github.com/upbound/provider-azure/apis/cdn/v1beta1" + v1beta2cdn "github.com/upbound/provider-azure/apis/cdn/v1beta2" v1beta1certificateregistration "github.com/upbound/provider-azure/apis/certificateregistration/v1beta1" v1beta1cognitiveservices "github.com/upbound/provider-azure/apis/cognitiveservices/v1beta1" + v1beta2cognitiveservices "github.com/upbound/provider-azure/apis/cognitiveservices/v1beta2" v1beta1communication "github.com/upbound/provider-azure/apis/communication/v1beta1" v1beta1compute "github.com/upbound/provider-azure/apis/compute/v1beta1" + v1beta2compute "github.com/upbound/provider-azure/apis/compute/v1beta2" v1beta1confidentialledger "github.com/upbound/provider-azure/apis/confidentialledger/v1beta1" v1beta1consumption "github.com/upbound/provider-azure/apis/consumption/v1beta1" + v1beta2consumption "github.com/upbound/provider-azure/apis/consumption/v1beta2" v1beta1containerapp "github.com/upbound/provider-azure/apis/containerapp/v1beta1" + v1beta2containerapp "github.com/upbound/provider-azure/apis/containerapp/v1beta2" v1beta1containerregistry "github.com/upbound/provider-azure/apis/containerregistry/v1beta1" + v1beta2containerregistry "github.com/upbound/provider-azure/apis/containerregistry/v1beta2" v1beta1containerservice "github.com/upbound/provider-azure/apis/containerservice/v1beta1" + v1beta2containerservice "github.com/upbound/provider-azure/apis/containerservice/v1beta2" v1beta1cosmosdb "github.com/upbound/provider-azure/apis/cosmosdb/v1beta1" + v1beta2cosmosdb "github.com/upbound/provider-azure/apis/cosmosdb/v1beta2" v1beta1costmanagement "github.com/upbound/provider-azure/apis/costmanagement/v1beta1" + v1beta2costmanagement "github.com/upbound/provider-azure/apis/costmanagement/v1beta2" v1beta1customproviders "github.com/upbound/provider-azure/apis/customproviders/v1beta1" v1beta1databoxedge "github.com/upbound/provider-azure/apis/databoxedge/v1beta1" v1beta1databricks "github.com/upbound/provider-azure/apis/databricks/v1beta1" + v1beta2databricks "github.com/upbound/provider-azure/apis/databricks/v1beta2" v1beta1datafactory "github.com/upbound/provider-azure/apis/datafactory/v1beta1" + v1beta2datafactory "github.com/upbound/provider-azure/apis/datafactory/v1beta2" v1beta1datamigration "github.com/upbound/provider-azure/apis/datamigration/v1beta1" v1beta1dataprotection "github.com/upbound/provider-azure/apis/dataprotection/v1beta1" + v1beta2dataprotection "github.com/upbound/provider-azure/apis/dataprotection/v1beta2" v1beta1datashare "github.com/upbound/provider-azure/apis/datashare/v1beta1" + v1beta2datashare "github.com/upbound/provider-azure/apis/datashare/v1beta2" v1beta1dbformariadb "github.com/upbound/provider-azure/apis/dbformariadb/v1beta1" v1beta1dbformysql "github.com/upbound/provider-azure/apis/dbformysql/v1beta1" + v1beta2dbformysql "github.com/upbound/provider-azure/apis/dbformysql/v1beta2" v1beta1dbforpostgresql "github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta1" + v1beta2dbforpostgresql "github.com/upbound/provider-azure/apis/dbforpostgresql/v1beta2" v1beta1devices "github.com/upbound/provider-azure/apis/devices/v1beta1" + v1beta2devices "github.com/upbound/provider-azure/apis/devices/v1beta2" v1beta1deviceupdate "github.com/upbound/provider-azure/apis/deviceupdate/v1beta1" + v1beta2deviceupdate "github.com/upbound/provider-azure/apis/deviceupdate/v1beta2" v1beta1devtestlab "github.com/upbound/provider-azure/apis/devtestlab/v1beta1" + v1beta2devtestlab "github.com/upbound/provider-azure/apis/devtestlab/v1beta2" v1beta1digitaltwins "github.com/upbound/provider-azure/apis/digitaltwins/v1beta1" + v1beta2digitaltwins "github.com/upbound/provider-azure/apis/digitaltwins/v1beta2" v1beta1elastic "github.com/upbound/provider-azure/apis/elastic/v1beta1" + v1beta2elastic "github.com/upbound/provider-azure/apis/elastic/v1beta2" v1beta1eventgrid "github.com/upbound/provider-azure/apis/eventgrid/v1beta1" + v1beta2eventgrid "github.com/upbound/provider-azure/apis/eventgrid/v1beta2" v1beta1eventhub "github.com/upbound/provider-azure/apis/eventhub/v1beta1" + v1beta2eventhub "github.com/upbound/provider-azure/apis/eventhub/v1beta2" v1beta1fluidrelay "github.com/upbound/provider-azure/apis/fluidrelay/v1beta1" + v1beta2fluidrelay "github.com/upbound/provider-azure/apis/fluidrelay/v1beta2" v1beta1guestconfiguration "github.com/upbound/provider-azure/apis/guestconfiguration/v1beta1" + v1beta2guestconfiguration "github.com/upbound/provider-azure/apis/guestconfiguration/v1beta2" v1beta1hdinsight "github.com/upbound/provider-azure/apis/hdinsight/v1beta1" + v1beta2hdinsight "github.com/upbound/provider-azure/apis/hdinsight/v1beta2" v1beta1healthbot "github.com/upbound/provider-azure/apis/healthbot/v1beta1" v1beta1healthcareapis "github.com/upbound/provider-azure/apis/healthcareapis/v1beta1" + v1beta2healthcareapis "github.com/upbound/provider-azure/apis/healthcareapis/v1beta2" v1beta1insights "github.com/upbound/provider-azure/apis/insights/v1beta1" + v1beta2insights "github.com/upbound/provider-azure/apis/insights/v1beta2" v1beta1iotcentral "github.com/upbound/provider-azure/apis/iotcentral/v1beta1" + v1beta2iotcentral "github.com/upbound/provider-azure/apis/iotcentral/v1beta2" v1beta1keyvault "github.com/upbound/provider-azure/apis/keyvault/v1beta1" + v1beta2keyvault "github.com/upbound/provider-azure/apis/keyvault/v1beta2" v1beta1kusto "github.com/upbound/provider-azure/apis/kusto/v1beta1" + v1beta2kusto "github.com/upbound/provider-azure/apis/kusto/v1beta2" v1beta1labservices "github.com/upbound/provider-azure/apis/labservices/v1beta1" + v1beta2labservices "github.com/upbound/provider-azure/apis/labservices/v1beta2" v1beta1loadtestservice "github.com/upbound/provider-azure/apis/loadtestservice/v1beta1" + v1beta2loadtestservice "github.com/upbound/provider-azure/apis/loadtestservice/v1beta2" v1beta1logic "github.com/upbound/provider-azure/apis/logic/v1beta1" + v1beta2logic "github.com/upbound/provider-azure/apis/logic/v1beta2" v1beta1logz "github.com/upbound/provider-azure/apis/logz/v1beta1" + v1beta2logz "github.com/upbound/provider-azure/apis/logz/v1beta2" v1beta1machinelearningservices "github.com/upbound/provider-azure/apis/machinelearningservices/v1beta1" + v1beta2machinelearningservices "github.com/upbound/provider-azure/apis/machinelearningservices/v1beta2" v1beta1maintenance "github.com/upbound/provider-azure/apis/maintenance/v1beta1" + v1beta2maintenance "github.com/upbound/provider-azure/apis/maintenance/v1beta2" v1beta1managedidentity "github.com/upbound/provider-azure/apis/managedidentity/v1beta1" v1beta1management "github.com/upbound/provider-azure/apis/management/v1beta1" v1beta1maps "github.com/upbound/provider-azure/apis/maps/v1beta1" v1beta1marketplaceordering "github.com/upbound/provider-azure/apis/marketplaceordering/v1beta1" v1beta1media "github.com/upbound/provider-azure/apis/media/v1beta1" + v1beta2media "github.com/upbound/provider-azure/apis/media/v1beta2" v1beta1mixedreality "github.com/upbound/provider-azure/apis/mixedreality/v1beta1" v1beta1netapp "github.com/upbound/provider-azure/apis/netapp/v1beta1" + v1beta2netapp "github.com/upbound/provider-azure/apis/netapp/v1beta2" v1beta1network "github.com/upbound/provider-azure/apis/network/v1beta1" + v1beta2network "github.com/upbound/provider-azure/apis/network/v1beta2" v1beta1notificationhubs "github.com/upbound/provider-azure/apis/notificationhubs/v1beta1" + v1beta2notificationhubs "github.com/upbound/provider-azure/apis/notificationhubs/v1beta2" v1beta1operationalinsights "github.com/upbound/provider-azure/apis/operationalinsights/v1beta1" + v1beta2operationalinsights "github.com/upbound/provider-azure/apis/operationalinsights/v1beta2" v1beta1operationsmanagement "github.com/upbound/provider-azure/apis/operationsmanagement/v1beta1" + v1beta2operationsmanagement "github.com/upbound/provider-azure/apis/operationsmanagement/v1beta2" v1beta1orbital "github.com/upbound/provider-azure/apis/orbital/v1beta1" v1beta1policyinsights "github.com/upbound/provider-azure/apis/policyinsights/v1beta1" v1beta1portal "github.com/upbound/provider-azure/apis/portal/v1beta1" v1beta1powerbidedicated "github.com/upbound/provider-azure/apis/powerbidedicated/v1beta1" v1beta1purview "github.com/upbound/provider-azure/apis/purview/v1beta1" + v1beta2purview "github.com/upbound/provider-azure/apis/purview/v1beta2" v1beta1recoveryservices "github.com/upbound/provider-azure/apis/recoveryservices/v1beta1" + v1beta2recoveryservices "github.com/upbound/provider-azure/apis/recoveryservices/v1beta2" v1beta1relay "github.com/upbound/provider-azure/apis/relay/v1beta1" v1beta1resources "github.com/upbound/provider-azure/apis/resources/v1beta1" + v1beta2resources "github.com/upbound/provider-azure/apis/resources/v1beta2" v1beta1search "github.com/upbound/provider-azure/apis/search/v1beta1" + v1beta2search "github.com/upbound/provider-azure/apis/search/v1beta2" v1beta1security "github.com/upbound/provider-azure/apis/security/v1beta1" + v1beta2security "github.com/upbound/provider-azure/apis/security/v1beta2" v1beta1securityinsights "github.com/upbound/provider-azure/apis/securityinsights/v1beta1" v1beta1servicebus "github.com/upbound/provider-azure/apis/servicebus/v1beta1" + v1beta2servicebus "github.com/upbound/provider-azure/apis/servicebus/v1beta2" v1beta1servicefabric "github.com/upbound/provider-azure/apis/servicefabric/v1beta1" + v1beta2servicefabric "github.com/upbound/provider-azure/apis/servicefabric/v1beta2" v1beta1servicelinker "github.com/upbound/provider-azure/apis/servicelinker/v1beta1" + v1beta2servicelinker "github.com/upbound/provider-azure/apis/servicelinker/v1beta2" v1beta1signalrservice "github.com/upbound/provider-azure/apis/signalrservice/v1beta1" + v1beta2signalrservice "github.com/upbound/provider-azure/apis/signalrservice/v1beta2" v1beta1solutions "github.com/upbound/provider-azure/apis/solutions/v1beta1" v1beta1spring "github.com/upbound/provider-azure/apis/spring/v1beta1" v1beta1sql "github.com/upbound/provider-azure/apis/sql/v1beta1" + v1beta2sql "github.com/upbound/provider-azure/apis/sql/v1beta2" v1beta1storage "github.com/upbound/provider-azure/apis/storage/v1beta1" + v1beta2storage "github.com/upbound/provider-azure/apis/storage/v1beta2" v1beta1storagecache "github.com/upbound/provider-azure/apis/storagecache/v1beta1" + v1beta2storagecache "github.com/upbound/provider-azure/apis/storagecache/v1beta2" v1beta1storagepool "github.com/upbound/provider-azure/apis/storagepool/v1beta1" v1beta1storagesync "github.com/upbound/provider-azure/apis/storagesync/v1beta1" v1beta1streamanalytics "github.com/upbound/provider-azure/apis/streamanalytics/v1beta1" + v1beta2streamanalytics "github.com/upbound/provider-azure/apis/streamanalytics/v1beta2" v1beta1synapse "github.com/upbound/provider-azure/apis/synapse/v1beta1" + v1beta2synapse "github.com/upbound/provider-azure/apis/synapse/v1beta2" v1beta1timeseriesinsights "github.com/upbound/provider-azure/apis/timeseriesinsights/v1beta1" + v1beta2timeseriesinsights "github.com/upbound/provider-azure/apis/timeseriesinsights/v1beta2" v1alpha1 "github.com/upbound/provider-azure/apis/v1alpha1" v1beta1apis "github.com/upbound/provider-azure/apis/v1beta1" v1beta1web "github.com/upbound/provider-azure/apis/web/v1beta1" + v1beta2web "github.com/upbound/provider-azure/apis/web/v1beta2" ) func init() { // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back AddToSchemes = append(AddToSchemes, v1beta1.SchemeBuilder.AddToScheme, + v1beta2.SchemeBuilder.AddToScheme, v1beta1analysisservices.SchemeBuilder.AddToScheme, v1beta1apimanagement.SchemeBuilder.AddToScheme, + v1beta2apimanagement.SchemeBuilder.AddToScheme, v1beta1appconfiguration.SchemeBuilder.AddToScheme, + v1beta2appconfiguration.SchemeBuilder.AddToScheme, v1beta1appplatform.SchemeBuilder.AddToScheme, + v1beta2appplatform.SchemeBuilder.AddToScheme, v1beta1attestation.SchemeBuilder.AddToScheme, v1beta1authorization.SchemeBuilder.AddToScheme, + v1beta2authorization.SchemeBuilder.AddToScheme, v1beta1automation.SchemeBuilder.AddToScheme, + v1beta2automation.SchemeBuilder.AddToScheme, v1beta1azure.SchemeBuilder.AddToScheme, v1beta1azurestackhci.SchemeBuilder.AddToScheme, v1beta1botservice.SchemeBuilder.AddToScheme, v1beta1cache.SchemeBuilder.AddToScheme, + v1beta2cache.SchemeBuilder.AddToScheme, v1beta1cdn.SchemeBuilder.AddToScheme, + v1beta2cdn.SchemeBuilder.AddToScheme, v1beta1certificateregistration.SchemeBuilder.AddToScheme, v1beta1cognitiveservices.SchemeBuilder.AddToScheme, + v1beta2cognitiveservices.SchemeBuilder.AddToScheme, v1beta1communication.SchemeBuilder.AddToScheme, v1beta1compute.SchemeBuilder.AddToScheme, + v1beta2compute.SchemeBuilder.AddToScheme, v1beta1confidentialledger.SchemeBuilder.AddToScheme, v1beta1consumption.SchemeBuilder.AddToScheme, + v1beta2consumption.SchemeBuilder.AddToScheme, v1beta1containerapp.SchemeBuilder.AddToScheme, + v1beta2containerapp.SchemeBuilder.AddToScheme, v1beta1containerregistry.SchemeBuilder.AddToScheme, + v1beta2containerregistry.SchemeBuilder.AddToScheme, v1beta1containerservice.SchemeBuilder.AddToScheme, + v1beta2containerservice.SchemeBuilder.AddToScheme, v1beta1cosmosdb.SchemeBuilder.AddToScheme, + v1beta2cosmosdb.SchemeBuilder.AddToScheme, v1beta1costmanagement.SchemeBuilder.AddToScheme, + v1beta2costmanagement.SchemeBuilder.AddToScheme, v1beta1customproviders.SchemeBuilder.AddToScheme, v1beta1databoxedge.SchemeBuilder.AddToScheme, v1beta1databricks.SchemeBuilder.AddToScheme, + v1beta2databricks.SchemeBuilder.AddToScheme, v1beta1datafactory.SchemeBuilder.AddToScheme, + v1beta2datafactory.SchemeBuilder.AddToScheme, v1beta1datamigration.SchemeBuilder.AddToScheme, v1beta1dataprotection.SchemeBuilder.AddToScheme, + v1beta2dataprotection.SchemeBuilder.AddToScheme, v1beta1datashare.SchemeBuilder.AddToScheme, + v1beta2datashare.SchemeBuilder.AddToScheme, v1beta1dbformariadb.SchemeBuilder.AddToScheme, v1beta1dbformysql.SchemeBuilder.AddToScheme, + v1beta2dbformysql.SchemeBuilder.AddToScheme, v1beta1dbforpostgresql.SchemeBuilder.AddToScheme, + v1beta2dbforpostgresql.SchemeBuilder.AddToScheme, v1beta1devices.SchemeBuilder.AddToScheme, + v1beta2devices.SchemeBuilder.AddToScheme, v1beta1deviceupdate.SchemeBuilder.AddToScheme, + v1beta2deviceupdate.SchemeBuilder.AddToScheme, v1beta1devtestlab.SchemeBuilder.AddToScheme, + v1beta2devtestlab.SchemeBuilder.AddToScheme, v1beta1digitaltwins.SchemeBuilder.AddToScheme, + v1beta2digitaltwins.SchemeBuilder.AddToScheme, v1beta1elastic.SchemeBuilder.AddToScheme, + v1beta2elastic.SchemeBuilder.AddToScheme, v1beta1eventgrid.SchemeBuilder.AddToScheme, + v1beta2eventgrid.SchemeBuilder.AddToScheme, v1beta1eventhub.SchemeBuilder.AddToScheme, + v1beta2eventhub.SchemeBuilder.AddToScheme, v1beta1fluidrelay.SchemeBuilder.AddToScheme, + v1beta2fluidrelay.SchemeBuilder.AddToScheme, v1beta1guestconfiguration.SchemeBuilder.AddToScheme, + v1beta2guestconfiguration.SchemeBuilder.AddToScheme, v1beta1hdinsight.SchemeBuilder.AddToScheme, + v1beta2hdinsight.SchemeBuilder.AddToScheme, v1beta1healthbot.SchemeBuilder.AddToScheme, v1beta1healthcareapis.SchemeBuilder.AddToScheme, + v1beta2healthcareapis.SchemeBuilder.AddToScheme, v1beta1insights.SchemeBuilder.AddToScheme, + v1beta2insights.SchemeBuilder.AddToScheme, v1beta1iotcentral.SchemeBuilder.AddToScheme, + v1beta2iotcentral.SchemeBuilder.AddToScheme, v1beta1keyvault.SchemeBuilder.AddToScheme, + v1beta2keyvault.SchemeBuilder.AddToScheme, v1beta1kusto.SchemeBuilder.AddToScheme, + v1beta2kusto.SchemeBuilder.AddToScheme, v1beta1labservices.SchemeBuilder.AddToScheme, + v1beta2labservices.SchemeBuilder.AddToScheme, v1beta1loadtestservice.SchemeBuilder.AddToScheme, + v1beta2loadtestservice.SchemeBuilder.AddToScheme, v1beta1logic.SchemeBuilder.AddToScheme, + v1beta2logic.SchemeBuilder.AddToScheme, v1beta1logz.SchemeBuilder.AddToScheme, + v1beta2logz.SchemeBuilder.AddToScheme, v1beta1machinelearningservices.SchemeBuilder.AddToScheme, + v1beta2machinelearningservices.SchemeBuilder.AddToScheme, v1beta1maintenance.SchemeBuilder.AddToScheme, + v1beta2maintenance.SchemeBuilder.AddToScheme, v1beta1managedidentity.SchemeBuilder.AddToScheme, v1beta1management.SchemeBuilder.AddToScheme, v1beta1maps.SchemeBuilder.AddToScheme, v1beta1marketplaceordering.SchemeBuilder.AddToScheme, v1beta1media.SchemeBuilder.AddToScheme, + v1beta2media.SchemeBuilder.AddToScheme, v1beta1mixedreality.SchemeBuilder.AddToScheme, v1beta1netapp.SchemeBuilder.AddToScheme, + v1beta2netapp.SchemeBuilder.AddToScheme, v1beta1network.SchemeBuilder.AddToScheme, + v1beta2network.SchemeBuilder.AddToScheme, v1beta1notificationhubs.SchemeBuilder.AddToScheme, + v1beta2notificationhubs.SchemeBuilder.AddToScheme, v1beta1operationalinsights.SchemeBuilder.AddToScheme, + v1beta2operationalinsights.SchemeBuilder.AddToScheme, v1beta1operationsmanagement.SchemeBuilder.AddToScheme, + v1beta2operationsmanagement.SchemeBuilder.AddToScheme, v1beta1orbital.SchemeBuilder.AddToScheme, v1beta1policyinsights.SchemeBuilder.AddToScheme, v1beta1portal.SchemeBuilder.AddToScheme, v1beta1powerbidedicated.SchemeBuilder.AddToScheme, v1beta1purview.SchemeBuilder.AddToScheme, + v1beta2purview.SchemeBuilder.AddToScheme, v1beta1recoveryservices.SchemeBuilder.AddToScheme, + v1beta2recoveryservices.SchemeBuilder.AddToScheme, v1beta1relay.SchemeBuilder.AddToScheme, v1beta1resources.SchemeBuilder.AddToScheme, + v1beta2resources.SchemeBuilder.AddToScheme, v1beta1search.SchemeBuilder.AddToScheme, + v1beta2search.SchemeBuilder.AddToScheme, v1beta1security.SchemeBuilder.AddToScheme, + v1beta2security.SchemeBuilder.AddToScheme, v1beta1securityinsights.SchemeBuilder.AddToScheme, v1beta1servicebus.SchemeBuilder.AddToScheme, + v1beta2servicebus.SchemeBuilder.AddToScheme, v1beta1servicefabric.SchemeBuilder.AddToScheme, + v1beta2servicefabric.SchemeBuilder.AddToScheme, v1beta1servicelinker.SchemeBuilder.AddToScheme, + v1beta2servicelinker.SchemeBuilder.AddToScheme, v1beta1signalrservice.SchemeBuilder.AddToScheme, + v1beta2signalrservice.SchemeBuilder.AddToScheme, v1beta1solutions.SchemeBuilder.AddToScheme, v1beta1spring.SchemeBuilder.AddToScheme, v1beta1sql.SchemeBuilder.AddToScheme, + v1beta2sql.SchemeBuilder.AddToScheme, v1beta1storage.SchemeBuilder.AddToScheme, + v1beta2storage.SchemeBuilder.AddToScheme, v1beta1storagecache.SchemeBuilder.AddToScheme, + v1beta2storagecache.SchemeBuilder.AddToScheme, v1beta1storagepool.SchemeBuilder.AddToScheme, v1beta1storagesync.SchemeBuilder.AddToScheme, v1beta1streamanalytics.SchemeBuilder.AddToScheme, + v1beta2streamanalytics.SchemeBuilder.AddToScheme, v1beta1synapse.SchemeBuilder.AddToScheme, + v1beta2synapse.SchemeBuilder.AddToScheme, v1beta1timeseriesinsights.SchemeBuilder.AddToScheme, + v1beta2timeseriesinsights.SchemeBuilder.AddToScheme, v1alpha1.SchemeBuilder.AddToScheme, v1beta1apis.SchemeBuilder.AddToScheme, v1beta1web.SchemeBuilder.AddToScheme, + v1beta2web.SchemeBuilder.AddToScheme, ) } diff --git a/cmd/provider/alertsmanagement/zz_main.go b/cmd/provider/alertsmanagement/zz_main.go index 4692b65d5..dd5690c89 100644 --- a/cmd/provider/alertsmanagement/zz_main.go +++ b/cmd/provider/alertsmanagement/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_alertsmanagement(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/analysisservices/zz_main.go b/cmd/provider/analysisservices/zz_main.go index 61eb99deb..ff4f3959a 100644 --- a/cmd/provider/analysisservices/zz_main.go +++ b/cmd/provider/analysisservices/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_analysisservices(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/apimanagement/zz_main.go b/cmd/provider/apimanagement/zz_main.go index 59fa135c6..17cc26237 100644 --- a/cmd/provider/apimanagement/zz_main.go +++ b/cmd/provider/apimanagement/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_apimanagement(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/appconfiguration/zz_main.go b/cmd/provider/appconfiguration/zz_main.go index c52346f2d..1ec58c1b8 100644 --- a/cmd/provider/appconfiguration/zz_main.go +++ b/cmd/provider/appconfiguration/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_appconfiguration(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/appplatform/zz_main.go b/cmd/provider/appplatform/zz_main.go index 5b44c4cbc..9e68e142e 100644 --- a/cmd/provider/appplatform/zz_main.go +++ b/cmd/provider/appplatform/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_appplatform(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/attestation/zz_main.go b/cmd/provider/attestation/zz_main.go index 34e0fc25d..c0f7b8ae0 100644 --- a/cmd/provider/attestation/zz_main.go +++ b/cmd/provider/attestation/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_attestation(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/authorization/zz_main.go b/cmd/provider/authorization/zz_main.go index 48353d333..17a5bb78b 100644 --- a/cmd/provider/authorization/zz_main.go +++ b/cmd/provider/authorization/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_authorization(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/automation/zz_main.go b/cmd/provider/automation/zz_main.go index 11d2fb189..605abee4f 100644 --- a/cmd/provider/automation/zz_main.go +++ b/cmd/provider/automation/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_automation(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/azure/zz_main.go b/cmd/provider/azure/zz_main.go index 531df2467..31c2a55b8 100644 --- a/cmd/provider/azure/zz_main.go +++ b/cmd/provider/azure/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_azure(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/azurestackhci/zz_main.go b/cmd/provider/azurestackhci/zz_main.go index 0650ecc01..d7bfe44da 100644 --- a/cmd/provider/azurestackhci/zz_main.go +++ b/cmd/provider/azurestackhci/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_azurestackhci(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/botservice/zz_main.go b/cmd/provider/botservice/zz_main.go index b777dbc7d..cbf8793ca 100644 --- a/cmd/provider/botservice/zz_main.go +++ b/cmd/provider/botservice/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_botservice(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/cache/zz_main.go b/cmd/provider/cache/zz_main.go index 3f3ded28c..67a7f0c4e 100644 --- a/cmd/provider/cache/zz_main.go +++ b/cmd/provider/cache/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_cache(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/cdn/zz_main.go b/cmd/provider/cdn/zz_main.go index 741b403d4..ff150e320 100644 --- a/cmd/provider/cdn/zz_main.go +++ b/cmd/provider/cdn/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_cdn(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/certificateregistration/zz_main.go b/cmd/provider/certificateregistration/zz_main.go index 0d6fe3a0e..eb8c9377e 100644 --- a/cmd/provider/certificateregistration/zz_main.go +++ b/cmd/provider/certificateregistration/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_certificateregistration(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/cognitiveservices/zz_main.go b/cmd/provider/cognitiveservices/zz_main.go index fdb17de7a..a01dbefa3 100644 --- a/cmd/provider/cognitiveservices/zz_main.go +++ b/cmd/provider/cognitiveservices/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_cognitiveservices(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/communication/zz_main.go b/cmd/provider/communication/zz_main.go index 9a0a15f15..d1af16665 100644 --- a/cmd/provider/communication/zz_main.go +++ b/cmd/provider/communication/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_communication(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/compute/zz_main.go b/cmd/provider/compute/zz_main.go index 9e2949b01..d42557002 100644 --- a/cmd/provider/compute/zz_main.go +++ b/cmd/provider/compute/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_compute(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/confidentialledger/zz_main.go b/cmd/provider/confidentialledger/zz_main.go index 2a9f8f29d..71308fd03 100644 --- a/cmd/provider/confidentialledger/zz_main.go +++ b/cmd/provider/confidentialledger/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_confidentialledger(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/config/zz_main.go b/cmd/provider/config/zz_main.go index 6f8fba1d8..7e43a46bc 100644 --- a/cmd/provider/config/zz_main.go +++ b/cmd/provider/config/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_config(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/consumption/zz_main.go b/cmd/provider/consumption/zz_main.go index db4016c4e..3aa326ad6 100644 --- a/cmd/provider/consumption/zz_main.go +++ b/cmd/provider/consumption/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_consumption(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/containerapp/zz_main.go b/cmd/provider/containerapp/zz_main.go index 68dcb3766..a855069aa 100644 --- a/cmd/provider/containerapp/zz_main.go +++ b/cmd/provider/containerapp/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_containerapp(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/containerregistry/zz_main.go b/cmd/provider/containerregistry/zz_main.go index 59ebb50a5..21f6248e5 100644 --- a/cmd/provider/containerregistry/zz_main.go +++ b/cmd/provider/containerregistry/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_containerregistry(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/containerservice/zz_main.go b/cmd/provider/containerservice/zz_main.go index b1f538170..93f6de924 100644 --- a/cmd/provider/containerservice/zz_main.go +++ b/cmd/provider/containerservice/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_containerservice(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/cosmosdb/zz_main.go b/cmd/provider/cosmosdb/zz_main.go index 70633ba62..121d168fc 100644 --- a/cmd/provider/cosmosdb/zz_main.go +++ b/cmd/provider/cosmosdb/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_cosmosdb(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/costmanagement/zz_main.go b/cmd/provider/costmanagement/zz_main.go index 509febd01..16ee9302f 100644 --- a/cmd/provider/costmanagement/zz_main.go +++ b/cmd/provider/costmanagement/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_costmanagement(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/customproviders/zz_main.go b/cmd/provider/customproviders/zz_main.go index 84ade0028..e03ab07b3 100644 --- a/cmd/provider/customproviders/zz_main.go +++ b/cmd/provider/customproviders/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_customproviders(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/databoxedge/zz_main.go b/cmd/provider/databoxedge/zz_main.go index 855380820..ff9638d6f 100644 --- a/cmd/provider/databoxedge/zz_main.go +++ b/cmd/provider/databoxedge/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_databoxedge(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/databricks/zz_main.go b/cmd/provider/databricks/zz_main.go index 436e248e0..56a4dcaad 100644 --- a/cmd/provider/databricks/zz_main.go +++ b/cmd/provider/databricks/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_databricks(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/datafactory/zz_main.go b/cmd/provider/datafactory/zz_main.go index 27ea1e285..d2c357901 100644 --- a/cmd/provider/datafactory/zz_main.go +++ b/cmd/provider/datafactory/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_datafactory(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/datamigration/zz_main.go b/cmd/provider/datamigration/zz_main.go index 9cc006b0b..44ea4fb5c 100644 --- a/cmd/provider/datamigration/zz_main.go +++ b/cmd/provider/datamigration/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_datamigration(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/dataprotection/zz_main.go b/cmd/provider/dataprotection/zz_main.go index 25da4ba37..085e0408a 100644 --- a/cmd/provider/dataprotection/zz_main.go +++ b/cmd/provider/dataprotection/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_dataprotection(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/datashare/zz_main.go b/cmd/provider/datashare/zz_main.go index b27018fb8..4d72c2533 100644 --- a/cmd/provider/datashare/zz_main.go +++ b/cmd/provider/datashare/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_datashare(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/dbformariadb/zz_main.go b/cmd/provider/dbformariadb/zz_main.go index 1b9ed6748..a58b84310 100644 --- a/cmd/provider/dbformariadb/zz_main.go +++ b/cmd/provider/dbformariadb/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_dbformariadb(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/dbformysql/zz_main.go b/cmd/provider/dbformysql/zz_main.go index 0cb7b468b..a07056585 100644 --- a/cmd/provider/dbformysql/zz_main.go +++ b/cmd/provider/dbformysql/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_dbformysql(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/dbforpostgresql/zz_main.go b/cmd/provider/dbforpostgresql/zz_main.go index e39c2c767..5b06b88c5 100644 --- a/cmd/provider/dbforpostgresql/zz_main.go +++ b/cmd/provider/dbforpostgresql/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_dbforpostgresql(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/devices/zz_main.go b/cmd/provider/devices/zz_main.go index 8b37ac2b3..efef64985 100644 --- a/cmd/provider/devices/zz_main.go +++ b/cmd/provider/devices/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_devices(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/deviceupdate/zz_main.go b/cmd/provider/deviceupdate/zz_main.go index 845c8f890..907359685 100644 --- a/cmd/provider/deviceupdate/zz_main.go +++ b/cmd/provider/deviceupdate/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_deviceupdate(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/devtestlab/zz_main.go b/cmd/provider/devtestlab/zz_main.go index 770d3de5e..f80b3e90c 100644 --- a/cmd/provider/devtestlab/zz_main.go +++ b/cmd/provider/devtestlab/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_devtestlab(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/digitaltwins/zz_main.go b/cmd/provider/digitaltwins/zz_main.go index f4f894099..7280a9e0c 100644 --- a/cmd/provider/digitaltwins/zz_main.go +++ b/cmd/provider/digitaltwins/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_digitaltwins(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/elastic/zz_main.go b/cmd/provider/elastic/zz_main.go index 19e96e835..ee85400f2 100644 --- a/cmd/provider/elastic/zz_main.go +++ b/cmd/provider/elastic/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_elastic(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/eventgrid/zz_main.go b/cmd/provider/eventgrid/zz_main.go index 6a920a207..402d7a4ee 100644 --- a/cmd/provider/eventgrid/zz_main.go +++ b/cmd/provider/eventgrid/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_eventgrid(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/eventhub/zz_main.go b/cmd/provider/eventhub/zz_main.go index 527b9ca83..737bbaa46 100644 --- a/cmd/provider/eventhub/zz_main.go +++ b/cmd/provider/eventhub/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_eventhub(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/fluidrelay/zz_main.go b/cmd/provider/fluidrelay/zz_main.go index 93040922d..d0c22beb9 100644 --- a/cmd/provider/fluidrelay/zz_main.go +++ b/cmd/provider/fluidrelay/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_fluidrelay(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/guestconfiguration/zz_main.go b/cmd/provider/guestconfiguration/zz_main.go index 475a69de5..de5bd7f1b 100644 --- a/cmd/provider/guestconfiguration/zz_main.go +++ b/cmd/provider/guestconfiguration/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_guestconfiguration(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/hdinsight/zz_main.go b/cmd/provider/hdinsight/zz_main.go index 38361e37b..9c383ab9c 100644 --- a/cmd/provider/hdinsight/zz_main.go +++ b/cmd/provider/hdinsight/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_hdinsight(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/healthbot/zz_main.go b/cmd/provider/healthbot/zz_main.go index 25f785a67..2cf40e2b6 100644 --- a/cmd/provider/healthbot/zz_main.go +++ b/cmd/provider/healthbot/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_healthbot(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/healthcareapis/zz_main.go b/cmd/provider/healthcareapis/zz_main.go index 9defd74c7..e02f72e5d 100644 --- a/cmd/provider/healthcareapis/zz_main.go +++ b/cmd/provider/healthcareapis/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_healthcareapis(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/insights/zz_main.go b/cmd/provider/insights/zz_main.go index 9ab1fec49..8f1e50a9d 100644 --- a/cmd/provider/insights/zz_main.go +++ b/cmd/provider/insights/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_insights(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/iotcentral/zz_main.go b/cmd/provider/iotcentral/zz_main.go index 9f172f535..9f0e50178 100644 --- a/cmd/provider/iotcentral/zz_main.go +++ b/cmd/provider/iotcentral/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_iotcentral(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/keyvault/zz_main.go b/cmd/provider/keyvault/zz_main.go index a84195e26..317cbd7a3 100644 --- a/cmd/provider/keyvault/zz_main.go +++ b/cmd/provider/keyvault/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_keyvault(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/kusto/zz_main.go b/cmd/provider/kusto/zz_main.go index 097a6e482..e48d591fd 100644 --- a/cmd/provider/kusto/zz_main.go +++ b/cmd/provider/kusto/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_kusto(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/labservices/zz_main.go b/cmd/provider/labservices/zz_main.go index 907a40f3c..8a54380dc 100644 --- a/cmd/provider/labservices/zz_main.go +++ b/cmd/provider/labservices/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_labservices(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/loadtestservice/zz_main.go b/cmd/provider/loadtestservice/zz_main.go index da30fce50..e1283172a 100644 --- a/cmd/provider/loadtestservice/zz_main.go +++ b/cmd/provider/loadtestservice/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_loadtestservice(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/logic/zz_main.go b/cmd/provider/logic/zz_main.go index 9f54d2669..3ecba1b18 100644 --- a/cmd/provider/logic/zz_main.go +++ b/cmd/provider/logic/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_logic(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/logz/zz_main.go b/cmd/provider/logz/zz_main.go index 8593e1e02..9c5d6d55c 100644 --- a/cmd/provider/logz/zz_main.go +++ b/cmd/provider/logz/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_logz(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/machinelearningservices/zz_main.go b/cmd/provider/machinelearningservices/zz_main.go index 93b7f4b57..5146947b8 100644 --- a/cmd/provider/machinelearningservices/zz_main.go +++ b/cmd/provider/machinelearningservices/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_machinelearningservices(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/maintenance/zz_main.go b/cmd/provider/maintenance/zz_main.go index 429d5fbe8..5f4be6588 100644 --- a/cmd/provider/maintenance/zz_main.go +++ b/cmd/provider/maintenance/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_maintenance(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/managedidentity/zz_main.go b/cmd/provider/managedidentity/zz_main.go index 6fbed9f76..f40e8bb70 100644 --- a/cmd/provider/managedidentity/zz_main.go +++ b/cmd/provider/managedidentity/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_managedidentity(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/management/zz_main.go b/cmd/provider/management/zz_main.go index 62e09e291..4dbe04f56 100644 --- a/cmd/provider/management/zz_main.go +++ b/cmd/provider/management/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_management(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/maps/zz_main.go b/cmd/provider/maps/zz_main.go index e7a05a804..110e5830c 100644 --- a/cmd/provider/maps/zz_main.go +++ b/cmd/provider/maps/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_maps(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/marketplaceordering/zz_main.go b/cmd/provider/marketplaceordering/zz_main.go index 31082a651..f3259ae62 100644 --- a/cmd/provider/marketplaceordering/zz_main.go +++ b/cmd/provider/marketplaceordering/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_marketplaceordering(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/media/zz_main.go b/cmd/provider/media/zz_main.go index 4a8ad33e8..b31bb77d5 100644 --- a/cmd/provider/media/zz_main.go +++ b/cmd/provider/media/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_media(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/mixedreality/zz_main.go b/cmd/provider/mixedreality/zz_main.go index ce937b876..b184cf9ba 100644 --- a/cmd/provider/mixedreality/zz_main.go +++ b/cmd/provider/mixedreality/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_mixedreality(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/monolith/zz_main.go b/cmd/provider/monolith/zz_main.go index a92b6d1d2..7cdb983ca 100644 --- a/cmd/provider/monolith/zz_main.go +++ b/cmd/provider/monolith/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -216,6 +217,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_monolith(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/netapp/zz_main.go b/cmd/provider/netapp/zz_main.go index 3d5a342f8..7a0a084b5 100644 --- a/cmd/provider/netapp/zz_main.go +++ b/cmd/provider/netapp/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_netapp(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/network/zz_main.go b/cmd/provider/network/zz_main.go index cd1ef7130..8c71249ae 100644 --- a/cmd/provider/network/zz_main.go +++ b/cmd/provider/network/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_network(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/notificationhubs/zz_main.go b/cmd/provider/notificationhubs/zz_main.go index c6ec64036..4eefda174 100644 --- a/cmd/provider/notificationhubs/zz_main.go +++ b/cmd/provider/notificationhubs/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_notificationhubs(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/operationalinsights/zz_main.go b/cmd/provider/operationalinsights/zz_main.go index 461cf874c..fa5d93e1a 100644 --- a/cmd/provider/operationalinsights/zz_main.go +++ b/cmd/provider/operationalinsights/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_operationalinsights(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/operationsmanagement/zz_main.go b/cmd/provider/operationsmanagement/zz_main.go index f3743aaba..a80ce8a93 100644 --- a/cmd/provider/operationsmanagement/zz_main.go +++ b/cmd/provider/operationsmanagement/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_operationsmanagement(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/orbital/zz_main.go b/cmd/provider/orbital/zz_main.go index eaf02aa20..aed4405df 100644 --- a/cmd/provider/orbital/zz_main.go +++ b/cmd/provider/orbital/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_orbital(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/policyinsights/zz_main.go b/cmd/provider/policyinsights/zz_main.go index aca8f02c7..5052893e4 100644 --- a/cmd/provider/policyinsights/zz_main.go +++ b/cmd/provider/policyinsights/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_policyinsights(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/portal/zz_main.go b/cmd/provider/portal/zz_main.go index 9105ca6b8..a17a39dcf 100644 --- a/cmd/provider/portal/zz_main.go +++ b/cmd/provider/portal/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_portal(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/powerbidedicated/zz_main.go b/cmd/provider/powerbidedicated/zz_main.go index c552efb57..fcd29950e 100644 --- a/cmd/provider/powerbidedicated/zz_main.go +++ b/cmd/provider/powerbidedicated/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_powerbidedicated(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/purview/zz_main.go b/cmd/provider/purview/zz_main.go index 9636d40a8..c095ca931 100644 --- a/cmd/provider/purview/zz_main.go +++ b/cmd/provider/purview/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_purview(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/recoveryservices/zz_main.go b/cmd/provider/recoveryservices/zz_main.go index 59f691386..9b73c9c58 100644 --- a/cmd/provider/recoveryservices/zz_main.go +++ b/cmd/provider/recoveryservices/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_recoveryservices(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/relay/zz_main.go b/cmd/provider/relay/zz_main.go index 45f7f4219..52b92dfb6 100644 --- a/cmd/provider/relay/zz_main.go +++ b/cmd/provider/relay/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_relay(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/resources/zz_main.go b/cmd/provider/resources/zz_main.go index 486a63a84..601435914 100644 --- a/cmd/provider/resources/zz_main.go +++ b/cmd/provider/resources/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_resources(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/search/zz_main.go b/cmd/provider/search/zz_main.go index 02bf610ce..f038162e4 100644 --- a/cmd/provider/search/zz_main.go +++ b/cmd/provider/search/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_search(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/security/zz_main.go b/cmd/provider/security/zz_main.go index 099e6c779..fa47a6788 100644 --- a/cmd/provider/security/zz_main.go +++ b/cmd/provider/security/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_security(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/securityinsights/zz_main.go b/cmd/provider/securityinsights/zz_main.go index 5d1cef399..ffa0397a4 100644 --- a/cmd/provider/securityinsights/zz_main.go +++ b/cmd/provider/securityinsights/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_securityinsights(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/servicebus/zz_main.go b/cmd/provider/servicebus/zz_main.go index ffb962b31..a5a860a2d 100644 --- a/cmd/provider/servicebus/zz_main.go +++ b/cmd/provider/servicebus/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_servicebus(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/servicefabric/zz_main.go b/cmd/provider/servicefabric/zz_main.go index b13b489ad..6528c94c2 100644 --- a/cmd/provider/servicefabric/zz_main.go +++ b/cmd/provider/servicefabric/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_servicefabric(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/servicelinker/zz_main.go b/cmd/provider/servicelinker/zz_main.go index 52b06617c..7138d3dfb 100644 --- a/cmd/provider/servicelinker/zz_main.go +++ b/cmd/provider/servicelinker/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_servicelinker(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/signalrservice/zz_main.go b/cmd/provider/signalrservice/zz_main.go index 4ab30ed31..5cfb4ee0d 100644 --- a/cmd/provider/signalrservice/zz_main.go +++ b/cmd/provider/signalrservice/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_signalrservice(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/solutions/zz_main.go b/cmd/provider/solutions/zz_main.go index 3e397fb6f..05a9c1914 100644 --- a/cmd/provider/solutions/zz_main.go +++ b/cmd/provider/solutions/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_solutions(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/spring/zz_main.go b/cmd/provider/spring/zz_main.go index dcb7ff1ec..a8dea0f34 100644 --- a/cmd/provider/spring/zz_main.go +++ b/cmd/provider/spring/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_spring(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/sql/zz_main.go b/cmd/provider/sql/zz_main.go index b073f0240..980a9b4ef 100644 --- a/cmd/provider/sql/zz_main.go +++ b/cmd/provider/sql/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_sql(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/storage/zz_main.go b/cmd/provider/storage/zz_main.go index f30fc8608..b9c674eb2 100644 --- a/cmd/provider/storage/zz_main.go +++ b/cmd/provider/storage/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_storage(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/storagecache/zz_main.go b/cmd/provider/storagecache/zz_main.go index 71206556c..0bc3a333a 100644 --- a/cmd/provider/storagecache/zz_main.go +++ b/cmd/provider/storagecache/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_storagecache(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/storagepool/zz_main.go b/cmd/provider/storagepool/zz_main.go index cd178637c..7c4a0569d 100644 --- a/cmd/provider/storagepool/zz_main.go +++ b/cmd/provider/storagepool/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_storagepool(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/storagesync/zz_main.go b/cmd/provider/storagesync/zz_main.go index 529ecb8ec..823c1d5fb 100644 --- a/cmd/provider/storagesync/zz_main.go +++ b/cmd/provider/storagesync/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_storagesync(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/streamanalytics/zz_main.go b/cmd/provider/streamanalytics/zz_main.go index 02557e180..5f637c838 100644 --- a/cmd/provider/streamanalytics/zz_main.go +++ b/cmd/provider/streamanalytics/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_streamanalytics(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/synapse/zz_main.go b/cmd/provider/synapse/zz_main.go index aaadb0ab9..0a3cf7208 100644 --- a/cmd/provider/synapse/zz_main.go +++ b/cmd/provider/synapse/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_synapse(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/timeseriesinsights/zz_main.go b/cmd/provider/timeseriesinsights/zz_main.go index b531c513f..7d050b482 100644 --- a/cmd/provider/timeseriesinsights/zz_main.go +++ b/cmd/provider/timeseriesinsights/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_timeseriesinsights(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/cmd/provider/web/zz_main.go b/cmd/provider/web/zz_main.go index 5e3fb5b42..fea2ea293 100644 --- a/cmd/provider/web/zz_main.go +++ b/cmd/provider/web/zz_main.go @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -211,6 +212,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_web(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/config/registry.go b/config/registry.go index 374518cd6..fcd76f511 100644 --- a/config/registry.go +++ b/config/registry.go @@ -9,7 +9,8 @@ import ( _ "embed" "github.com/crossplane/upjet/pkg/config" - tjconfig "github.com/crossplane/upjet/pkg/config" + ujconfig "github.com/crossplane/upjet/pkg/config" + "github.com/crossplane/upjet/pkg/config/conversion" "github.com/crossplane/upjet/pkg/registry/reference" conversiontfjson "github.com/crossplane/upjet/pkg/types/conversion/tfjson" tfjson "github.com/hashicorp/terraform-json" @@ -124,7 +125,7 @@ func getProviderSchema(s string) (*schema.Provider, error) { } // GetProvider returns provider configuration -func GetProvider(ctx context.Context, generationProvider bool) (*tjconfig.Provider, error) { +func GetProvider(ctx context.Context, generationProvider bool) (*ujconfig.Provider, error) { var p *schema.Provider var err error if generationProvider { @@ -136,18 +137,21 @@ func GetProvider(ctx context.Context, generationProvider bool) (*tjconfig.Provid return nil, errors.Wrapf(err, "cannot get the Terraform provider schema with generation mode set to %t", generationProvider) } - pc := tjconfig.NewProvider([]byte(providerSchema), resourcePrefix, modulePath, providerMetadata, - tjconfig.WithShortName("azure"), - tjconfig.WithRootGroup("azure.upbound.io"), - tjconfig.WithIncludeList(CLIReconciledResourceList()), - tjconfig.WithTerraformPluginSDKIncludeList(TerraformPluginSDKResourceList()), - tjconfig.WithSkipList(skipList), - tjconfig.WithDefaultResourceOptions(ResourceConfigurator()), - tjconfig.WithReferenceInjectors([]tjconfig.ReferenceInjector{reference.NewInjector(modulePath)}), - tjconfig.WithFeaturesPackage("internal/features"), - tjconfig.WithMainTemplate(hack.MainTemplate), - tjconfig.WithTerraformProvider(p), + pc := ujconfig.NewProvider([]byte(providerSchema), resourcePrefix, modulePath, providerMetadata, + ujconfig.WithShortName("azure"), + ujconfig.WithRootGroup("azure.upbound.io"), + ujconfig.WithIncludeList(CLIReconciledResourceList()), + ujconfig.WithTerraformPluginSDKIncludeList(TerraformPluginSDKResourceList()), + ujconfig.WithSkipList(skipList), + ujconfig.WithDefaultResourceOptions(ResourceConfigurator()), + ujconfig.WithReferenceInjectors([]ujconfig.ReferenceInjector{reference.NewInjector(modulePath)}), + ujconfig.WithFeaturesPackage("internal/features"), + ujconfig.WithMainTemplate(hack.MainTemplate), + ujconfig.WithTerraformProvider(p), + ujconfig.WithSchemaTraversers(&ujconfig.SingletonListEmbedder{}), ) + + bumpVersionsWithEmbeddedLists(pc) // "azure" group contains resources that actually do not have a specific // group, e.g. ResourceGroup with APIVersion "azure.upbound.io/v1beta1". // We need to include the controllers for this group into the base packages @@ -180,6 +184,28 @@ func GetProvider(ctx context.Context, generationProvider bool) (*tjconfig.Provid return pc, nil } +func bumpVersionsWithEmbeddedLists(pc *ujconfig.Provider) { + for name, r := range pc.Resources { + r := r + // nothing to do if no singleton list has been converted to + // an embedded object + if len(r.CRDListConversionPaths()) == 0 { + continue + } + r.Version = "v1beta2" + r.PreviousVersions = []string{"v1beta1"} + // we would like to set the storage version to v1beta1 to facilitate + // downgrades. + r.SetCRDStorageVersion("v1beta1") + r.ControllerReconcileVersion = "v1beta1" + r.Conversions = []conversion.Conversion{ + conversion.NewIdentityConversionExpandPaths(conversion.AllVersions, conversion.AllVersions, conversion.DefaultPathPrefixes(), r.CRDListConversionPaths()...), + conversion.NewSingletonListConversion("v1beta1", "v1beta2", conversion.DefaultPathPrefixes(), r.CRDListConversionPaths(), conversion.ToEmbeddedObject), + conversion.NewSingletonListConversion("v1beta2", "v1beta1", conversion.DefaultPathPrefixes(), r.CRDListConversionPaths(), conversion.ToSingletonList)} + pc.Resources[name] = r + } +} + // CLIReconciledResourceList returns the list of resources that have external // name configured in ExternalNameConfigs table and to be reconciled under // the TF CLI based architecture. diff --git a/examples-generated/alertsmanagement/v1beta2/monitoractionruleactiongroup.yaml b/examples-generated/alertsmanagement/v1beta2/monitoractionruleactiongroup.yaml new file mode 100644 index 000000000..03dcbdaaf --- /dev/null +++ b/examples-generated/alertsmanagement/v1beta2/monitoractionruleactiongroup.yaml @@ -0,0 +1,53 @@ +apiVersion: alertsmanagement.azure.upbound.io/v1beta2 +kind: MonitorActionRuleActionGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoractionruleactiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + actionGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scope: + - resourceIds: + - ${azurerm_resource_group.example.id} + type: ResourceGroup + tags: + foo: bar + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoractionruleactiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoractionruleactiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/alertsmanagement/v1beta2/monitoractionrulesuppression.yaml b/examples-generated/alertsmanagement/v1beta2/monitoractionrulesuppression.yaml new file mode 100644 index 000000000..966a6f8b5 --- /dev/null +++ b/examples-generated/alertsmanagement/v1beta2/monitoractionrulesuppression.yaml @@ -0,0 +1,43 @@ +apiVersion: alertsmanagement.azure.upbound.io/v1beta2 +kind: MonitorActionRuleSuppression +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoractionrulesuppression + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scope: + - resourceIds: + - ${azurerm_resource_group.example.id} + type: ResourceGroup + suppression: + - recurrenceType: Weekly + schedule: + - endDateUtc: "2019-01-03T15:02:07Z" + recurrenceWeekly: + - Sunday + - Monday + - Friday + - Saturday + startDateUtc: "2019-01-01T01:02:03Z" + tags: + foo: bar + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoractionrulesuppression + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/alertsmanagement/v1beta2/monitoralertprocessingruleactiongroup.yaml b/examples-generated/alertsmanagement/v1beta2/monitoralertprocessingruleactiongroup.yaml new file mode 100644 index 000000000..7a0c0e7dc --- /dev/null +++ b/examples-generated/alertsmanagement/v1beta2/monitoralertprocessingruleactiongroup.yaml @@ -0,0 +1,73 @@ +apiVersion: alertsmanagement.azure.upbound.io/v1beta2 +kind: MonitorAlertProcessingRuleActionGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoralertprocessingruleactiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addActionGroupIdsRefs: + - name: example + condition: + - severity: + - operator: Equals + values: + - Sev0 + - Sev1 + - Sev2 + targetResourceType: + - operator: Equals + values: + - Microsoft.Compute/VirtualMachines + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + schedule: + - effectiveFrom: 2022-01-01T01:02:03 + effectiveUntil: 2022-02-02T01:02:03 + recurrence: + - daily: + - endTime: "09:00:00" + startTime: "17:00:00" + weekly: + - daysOfWeek: + - Saturday + - Sunday + timeZone: Pacific Standard Time + scopesRefs: + - name: example + tags: + foo: bar + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoralertprocessingruleactiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: action + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoralertprocessingruleactiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/alertsmanagement/v1beta2/monitoralertprocessingrulesuppression.yaml b/examples-generated/alertsmanagement/v1beta2/monitoralertprocessingrulesuppression.yaml new file mode 100644 index 000000000..4582973d2 --- /dev/null +++ b/examples-generated/alertsmanagement/v1beta2/monitoralertprocessingrulesuppression.yaml @@ -0,0 +1,54 @@ +apiVersion: alertsmanagement.azure.upbound.io/v1beta2 +kind: MonitorAlertProcessingRuleSuppression +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoralertprocessingrulesuppression + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + condition: + - severity: + - operator: Equals + values: + - Sev0 + - Sev1 + - Sev2 + targetResourceType: + - operator: Equals + values: + - Microsoft.Compute/VirtualMachines + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + schedule: + - effectiveFrom: 2022-01-01T01:02:03 + effectiveUntil: 2022-02-02T01:02:03 + recurrence: + - daily: + - endTime: "09:00:00" + startTime: "17:00:00" + weekly: + - daysOfWeek: + - Saturday + - Sunday + timeZone: Pacific Standard Time + scopesRefs: + - name: example + tags: + foo: bar + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitoralertprocessingrulesuppression + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/alertsmanagement/v1beta2/monitorsmartdetectoralertrule.yaml b/examples-generated/alertsmanagement/v1beta2/monitorsmartdetectoralertrule.yaml new file mode 100644 index 000000000..75b491beb --- /dev/null +++ b/examples-generated/alertsmanagement/v1beta2/monitorsmartdetectoralertrule.yaml @@ -0,0 +1,71 @@ +apiVersion: alertsmanagement.azure.upbound.io/v1beta2 +kind: MonitorSmartDetectorAlertRule +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitorsmartdetectoralertrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + actionGroup: + - idsRefs: + - name: example + detectorType: FailureAnomaliesDetector + frequency: PT1M + name: example-smart-detector-alert-rule + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scopeResourceIdsRefs: + - name: example + severity: Sev0 + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitorsmartdetectoralertrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitorsmartdetectoralertrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: alertsmanagement/v1beta2/monitorsmartdetectoralertrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/apimanagement/v1beta1/apioperationpolicy.yaml b/examples-generated/apimanagement/v1beta1/apioperationpolicy.yaml index 8f5204f33..a61c270e2 100644 --- a/examples-generated/apimanagement/v1beta1/apioperationpolicy.yaml +++ b/examples-generated/apimanagement/v1beta1/apioperationpolicy.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: @@ -49,7 +49,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: API metadata: annotations: @@ -69,7 +69,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: APIOperation metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/apioperationtag.yaml b/examples-generated/apimanagement/v1beta1/apioperationtag.yaml index 2d22b0d9c..330b236fb 100644 --- a/examples-generated/apimanagement/v1beta1/apioperationtag.yaml +++ b/examples-generated/apimanagement/v1beta1/apioperationtag.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: APIOperation metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/apirelease.yaml b/examples-generated/apimanagement/v1beta1/apirelease.yaml index 1d8f8de0e..3d7f3ef2c 100644 --- a/examples-generated/apimanagement/v1beta1/apirelease.yaml +++ b/examples-generated/apimanagement/v1beta1/apirelease.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: @@ -34,7 +34,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: API metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/apitag.yaml b/examples-generated/apimanagement/v1beta1/apitag.yaml index 067078083..812861ce9 100644 --- a/examples-generated/apimanagement/v1beta1/apitag.yaml +++ b/examples-generated/apimanagement/v1beta1/apitag.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: API metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/apiversionset.yaml b/examples-generated/apimanagement/v1beta1/apiversionset.yaml index ccbda0534..090e5059e 100644 --- a/examples-generated/apimanagement/v1beta1/apiversionset.yaml +++ b/examples-generated/apimanagement/v1beta1/apiversionset.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/certificate.yaml b/examples-generated/apimanagement/v1beta1/certificate.yaml index 6fd4b3bad..b1bbae5da 100644 --- a/examples-generated/apimanagement/v1beta1/certificate.yaml +++ b/examples-generated/apimanagement/v1beta1/certificate.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/customdomain.yaml b/examples-generated/apimanagement/v1beta1/customdomain.yaml index cbe4d9335..289bb7cb2 100644 --- a/examples-generated/apimanagement/v1beta1/customdomain.yaml +++ b/examples-generated/apimanagement/v1beta1/customdomain.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: @@ -44,7 +44,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Certificate metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/emailtemplate.yaml b/examples-generated/apimanagement/v1beta1/emailtemplate.yaml index 35bdec8ae..75b5f6b72 100644 --- a/examples-generated/apimanagement/v1beta1/emailtemplate.yaml +++ b/examples-generated/apimanagement/v1beta1/emailtemplate.yaml @@ -30,7 +30,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/globalschema.yaml b/examples-generated/apimanagement/v1beta1/globalschema.yaml index 58183a396..db37c414f 100644 --- a/examples-generated/apimanagement/v1beta1/globalschema.yaml +++ b/examples-generated/apimanagement/v1beta1/globalschema.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/identityprovideraad.yaml b/examples-generated/apimanagement/v1beta1/identityprovideraad.yaml index 4195c07a4..b3e8e7c36 100644 --- a/examples-generated/apimanagement/v1beta1/identityprovideraad.yaml +++ b/examples-generated/apimanagement/v1beta1/identityprovideraad.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/identityproviderfacebook.yaml b/examples-generated/apimanagement/v1beta1/identityproviderfacebook.yaml index 3d3b42f25..d936c4d0b 100644 --- a/examples-generated/apimanagement/v1beta1/identityproviderfacebook.yaml +++ b/examples-generated/apimanagement/v1beta1/identityproviderfacebook.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/identityprovidergoogle.yaml b/examples-generated/apimanagement/v1beta1/identityprovidergoogle.yaml index 5406ef5c0..3589b6b13 100644 --- a/examples-generated/apimanagement/v1beta1/identityprovidergoogle.yaml +++ b/examples-generated/apimanagement/v1beta1/identityprovidergoogle.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/identityprovidermicrosoft.yaml b/examples-generated/apimanagement/v1beta1/identityprovidermicrosoft.yaml index 2111a50f0..852b13f47 100644 --- a/examples-generated/apimanagement/v1beta1/identityprovidermicrosoft.yaml +++ b/examples-generated/apimanagement/v1beta1/identityprovidermicrosoft.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/identityprovidertwitter.yaml b/examples-generated/apimanagement/v1beta1/identityprovidertwitter.yaml index 5d676d9c7..b68509f76 100644 --- a/examples-generated/apimanagement/v1beta1/identityprovidertwitter.yaml +++ b/examples-generated/apimanagement/v1beta1/identityprovidertwitter.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/notificationrecipientemail.yaml b/examples-generated/apimanagement/v1beta1/notificationrecipientemail.yaml index f16278b4a..97db9399f 100644 --- a/examples-generated/apimanagement/v1beta1/notificationrecipientemail.yaml +++ b/examples-generated/apimanagement/v1beta1/notificationrecipientemail.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/notificationrecipientuser.yaml b/examples-generated/apimanagement/v1beta1/notificationrecipientuser.yaml index 9ded259e0..b38b0ac4f 100644 --- a/examples-generated/apimanagement/v1beta1/notificationrecipientuser.yaml +++ b/examples-generated/apimanagement/v1beta1/notificationrecipientuser.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/openidconnectprovider.yaml b/examples-generated/apimanagement/v1beta1/openidconnectprovider.yaml index da7881303..0cf9c6c08 100644 --- a/examples-generated/apimanagement/v1beta1/openidconnectprovider.yaml +++ b/examples-generated/apimanagement/v1beta1/openidconnectprovider.yaml @@ -27,7 +27,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/policy.yaml b/examples-generated/apimanagement/v1beta1/policy.yaml index 3e7ae7955..51b6e9ac2 100644 --- a/examples-generated/apimanagement/v1beta1/policy.yaml +++ b/examples-generated/apimanagement/v1beta1/policy.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: @@ -35,7 +35,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: NamedValue metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/product.yaml b/examples-generated/apimanagement/v1beta1/product.yaml index 943c99701..aaf7be013 100644 --- a/examples-generated/apimanagement/v1beta1/product.yaml +++ b/examples-generated/apimanagement/v1beta1/product.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/producttag.yaml b/examples-generated/apimanagement/v1beta1/producttag.yaml index 4ef5bd7b7..4457854cf 100644 --- a/examples-generated/apimanagement/v1beta1/producttag.yaml +++ b/examples-generated/apimanagement/v1beta1/producttag.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/rediscache.yaml b/examples-generated/apimanagement/v1beta1/rediscache.yaml index c3c334983..1b10b05d4 100644 --- a/examples-generated/apimanagement/v1beta1/rediscache.yaml +++ b/examples-generated/apimanagement/v1beta1/rediscache.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: @@ -43,7 +43,7 @@ spec: --- -apiVersion: cache.azure.upbound.io/v1beta1 +apiVersion: cache.azure.upbound.io/v1beta2 kind: RedisCache metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/tag.yaml b/examples-generated/apimanagement/v1beta1/tag.yaml index 8eb817806..3a52f978e 100644 --- a/examples-generated/apimanagement/v1beta1/tag.yaml +++ b/examples-generated/apimanagement/v1beta1/tag.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta1/user.yaml b/examples-generated/apimanagement/v1beta1/user.yaml index 55fe437d6..7aeda47e6 100644 --- a/examples-generated/apimanagement/v1beta1/user.yaml +++ b/examples-generated/apimanagement/v1beta1/user.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: apimanagement.azure.upbound.io/v1beta1 +apiVersion: apimanagement.azure.upbound.io/v1beta2 kind: Management metadata: annotations: diff --git a/examples-generated/apimanagement/v1beta2/api.yaml b/examples-generated/apimanagement/v1beta2/api.yaml new file mode 100644 index 000000000..dc9125815 --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/api.yaml @@ -0,0 +1,58 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: API +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/api + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + displayName: Example API + import: + - contentFormat: swagger-link-json + contentValue: http://conferenceapi.azurewebsites.net/?format=json + path: example + protocols: + - https + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + revision: "1" + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Management +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/api + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + publisherEmail: company@terraform.io + publisherName: My Company + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Developer_1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/api + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/apimanagement/v1beta2/apidiagnostic.yaml b/examples-generated/apimanagement/v1beta2/apidiagnostic.yaml new file mode 100644 index 000000000..ad7fb009e --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/apidiagnostic.yaml @@ -0,0 +1,154 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: APIDiagnostic +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/apidiagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + alwaysLogErrors: true + apiManagementLoggerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + apiNameSelector: + matchLabels: + testing.upbound.io/example-name: example + backendRequest: + - bodyBytes: 32 + headersToLog: + - content-type + - accept + - origin + backendResponse: + - bodyBytes: 32 + headersToLog: + - content-type + - content-length + - origin + frontendRequest: + - bodyBytes: 32 + headersToLog: + - content-type + - accept + - origin + frontendResponse: + - bodyBytes: 32 + headersToLog: + - content-type + - content-length + - origin + httpCorrelationProtocol: W3C + logClientIp: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + samplingPercentage: 5 + verbosity: verbose + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Management +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/apidiagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + publisherEmail: company@terraform.io + publisherName: My Company + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Developer_1 + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: API +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/apidiagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + displayName: Example API + import: + - contentFormat: swagger-link-json + contentValue: http://conferenceapi.azurewebsites.net/?format=json + path: example + protocols: + - https + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + revision: "1" + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Logger +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/apidiagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + applicationInsights: + - instrumentationKeySecretRef: + key: attribute.instrumentation_key + name: example-application-insights + namespace: upbound-system + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/apidiagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/apidiagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/apimanagement/v1beta2/apioperation.yaml b/examples-generated/apimanagement/v1beta2/apioperation.yaml new file mode 100644 index 000000000..19e53a955 --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/apioperation.yaml @@ -0,0 +1,29 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: APIOperation +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/apioperation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_api_management_api + apiNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_api_management_api + description: This can only be done by the logged in user. + displayName: Delete User Operation + method: DELETE + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_api_management_api + response: + - statusCode: 200 + templateParameter: + - name: id + required: true + type: number + urlTemplate: /users/{id}/delete diff --git a/examples-generated/apimanagement/v1beta2/backend.yaml b/examples-generated/apimanagement/v1beta2/backend.yaml new file mode 100644 index 000000000..091363ec0 --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/backend.yaml @@ -0,0 +1,52 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Backend +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/backend + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + protocol: http + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + url: https://backend + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Management +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/backend + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + publisherEmail: company@terraform.io + publisherName: My Company + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Developer_1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/backend + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/apimanagement/v1beta2/diagnostic.yaml b/examples-generated/apimanagement/v1beta2/diagnostic.yaml new file mode 100644 index 000000000..37f2c81ef --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/diagnostic.yaml @@ -0,0 +1,124 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Diagnostic +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/diagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + alwaysLogErrors: true + apiManagementLoggerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + backendRequest: + - bodyBytes: 32 + headersToLog: + - content-type + - accept + - origin + backendResponse: + - bodyBytes: 32 + headersToLog: + - content-type + - content-length + - origin + frontendRequest: + - bodyBytes: 32 + headersToLog: + - content-type + - accept + - origin + frontendResponse: + - bodyBytes: 32 + headersToLog: + - content-type + - content-length + - origin + httpCorrelationProtocol: W3C + logClientIp: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + samplingPercentage: 5 + verbosity: verbose + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Management +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/diagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + publisherEmail: company@terraform.io + publisherName: My Company + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Developer_1 + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Logger +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/diagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + applicationInsights: + - instrumentationKeySecretRef: + key: attribute.instrumentation_key + name: example-application-insights + namespace: upbound-system + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/diagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/diagnostic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/apimanagement/v1beta2/gateway.yaml b/examples-generated/apimanagement/v1beta2/gateway.yaml new file mode 100644 index 000000000..a66134089 --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/gateway.yaml @@ -0,0 +1,53 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Gateway +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/gateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementIdSelector: + matchLabels: + testing.upbound.io/example-name: example + description: Example API Management gateway + locationData: + - city: example city + district: example district + name: example name + region: example region + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Management +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/gateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + publisherEmail: pub1@email.com + publisherName: pub1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Consumption_0 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/gateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/apimanagement/v1beta2/logger.yaml b/examples-generated/apimanagement/v1beta2/logger.yaml new file mode 100644 index 000000000..64e14f678 --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/logger.yaml @@ -0,0 +1,76 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Logger +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/logger + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + applicationInsights: + - instrumentationKeySecretRef: + key: attribute.instrumentation_key + name: example-application-insights + namespace: upbound-system + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Management +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/logger + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + publisherEmail: company@terraform.io + publisherName: My Company + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Developer_1 + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/logger + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: other + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/logger + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/apimanagement/v1beta2/management.yaml b/examples-generated/apimanagement/v1beta2/management.yaml new file mode 100644 index 000000000..134f903da --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/management.yaml @@ -0,0 +1,31 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Management +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/management + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + publisherEmail: company@terraform.io + publisherName: My Company + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Developer_1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/management + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/apimanagement/v1beta2/namedvalue.yaml b/examples-generated/apimanagement/v1beta2/namedvalue.yaml new file mode 100644 index 000000000..30cf6dd44 --- /dev/null +++ b/examples-generated/apimanagement/v1beta2/namedvalue.yaml @@ -0,0 +1,55 @@ +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: NamedValue +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/namedvalue + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiManagementNameSelector: + matchLabels: + testing.upbound.io/example-name: example + displayName: ExampleProperty + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + valueSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + +--- + +apiVersion: apimanagement.azure.upbound.io/v1beta2 +kind: Management +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/namedvalue + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + publisherEmail: pub1@email.com + publisherName: pub1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Developer_1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: apimanagement/v1beta2/namedvalue + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/appconfiguration/v1beta2/configuration.yaml b/examples-generated/appconfiguration/v1beta2/configuration.yaml new file mode 100644 index 000000000..0cd556fc7 --- /dev/null +++ b/examples-generated/appconfiguration/v1beta2/configuration.yaml @@ -0,0 +1,28 @@ +apiVersion: appconfiguration.azure.upbound.io/v1beta2 +kind: Configuration +metadata: + annotations: + meta.upbound.io/example-id: appconfiguration/v1beta2/configuration + labels: + testing.upbound.io/example-name: appconf + name: appconf +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appconfiguration/v1beta2/configuration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/appplatform/v1beta1/springcloudaccelerator.yaml b/examples-generated/appplatform/v1beta1/springcloudaccelerator.yaml index 67af8faf9..876efb18d 100644 --- a/examples-generated/appplatform/v1beta1/springcloudaccelerator.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudaccelerator.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudactivedeployment.yaml b/examples-generated/appplatform/v1beta1/springcloudactivedeployment.yaml index 992193330..4c857a9a2 100644 --- a/examples-generated/appplatform/v1beta1/springcloudactivedeployment.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudactivedeployment.yaml @@ -31,7 +31,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudApp metadata: annotations: @@ -52,7 +52,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudJavaDeployment metadata: annotations: @@ -76,7 +76,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudapiportalcustomdomain.yaml b/examples-generated/appplatform/v1beta1/springcloudapiportalcustomdomain.yaml index d486de3bb..c49581f14 100644 --- a/examples-generated/appplatform/v1beta1/springcloudapiportalcustomdomain.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudapiportalcustomdomain.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudAPIPortal metadata: annotations: @@ -47,7 +47,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudGateway metadata: annotations: @@ -63,7 +63,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudappcosmosdbassociation.yaml b/examples-generated/appplatform/v1beta1/springcloudappcosmosdbassociation.yaml index 6d5c64ceb..aa89c72d8 100644 --- a/examples-generated/appplatform/v1beta1/springcloudappcosmosdbassociation.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudappcosmosdbassociation.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -59,7 +59,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudApp metadata: annotations: @@ -78,7 +78,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudappmysqlassociation.yaml b/examples-generated/appplatform/v1beta1/springcloudappmysqlassociation.yaml index 541807714..a015b6f61 100644 --- a/examples-generated/appplatform/v1beta1/springcloudappmysqlassociation.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudappmysqlassociation.yaml @@ -48,7 +48,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: @@ -90,7 +90,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudApp metadata: annotations: @@ -109,7 +109,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudappredisassociation.yaml b/examples-generated/appplatform/v1beta1/springcloudappredisassociation.yaml index 1944b8283..a0ed813fa 100644 --- a/examples-generated/appplatform/v1beta1/springcloudappredisassociation.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudappredisassociation.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: cache.azure.upbound.io/v1beta1 +apiVersion: cache.azure.upbound.io/v1beta2 kind: RedisCache metadata: annotations: @@ -56,7 +56,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudApp metadata: annotations: @@ -75,7 +75,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudcertificate.yaml b/examples-generated/appplatform/v1beta1/springcloudcertificate.yaml index b8b0ffde9..0fdeefd80 100644 --- a/examples-generated/appplatform/v1beta1/springcloudcertificate.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudcertificate.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -40,7 +40,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Certificate metadata: annotations: @@ -96,7 +96,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudconfigurationservice.yaml b/examples-generated/appplatform/v1beta1/springcloudconfigurationservice.yaml index a00291ab0..e1987baca 100644 --- a/examples-generated/appplatform/v1beta1/springcloudconfigurationservice.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudconfigurationservice.yaml @@ -43,7 +43,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudcustomdomain.yaml b/examples-generated/appplatform/v1beta1/springcloudcustomdomain.yaml index 6dd0a0e49..f63cf1083 100644 --- a/examples-generated/appplatform/v1beta1/springcloudcustomdomain.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudcustomdomain.yaml @@ -50,7 +50,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudApp metadata: annotations: @@ -69,7 +69,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudgatewaycustomdomain.yaml b/examples-generated/appplatform/v1beta1/springcloudgatewaycustomdomain.yaml index 3d6675308..f8020adac 100644 --- a/examples-generated/appplatform/v1beta1/springcloudgatewaycustomdomain.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudgatewaycustomdomain.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudGateway metadata: annotations: @@ -45,7 +45,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/appplatform/v1beta1/springcloudstorage.yaml b/examples-generated/appplatform/v1beta1/springcloudstorage.yaml index 1b2ce4b91..08e05c9c2 100644 --- a/examples-generated/appplatform/v1beta1/springcloudstorage.yaml +++ b/examples-generated/appplatform/v1beta1/springcloudstorage.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: @@ -51,7 +51,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/appplatform/v1beta2/springcloudapiportal.yaml b/examples-generated/appplatform/v1beta2/springcloudapiportal.yaml new file mode 100644 index 000000000..4f8928517 --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudapiportal.yaml @@ -0,0 +1,73 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudAPIPortal +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudapiportal + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiTryOutEnabled: true + gatewayIdsRefs: + - name: example + httpsOnlyEnabled: false + instanceCount: 1 + publicNetworkAccessEnabled: true + springCloudServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + sso: + - clientId: test + clientSecret: secret + issuerUri: https://www.example.com/issueToken + scope: + - read + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudapiportal + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudGateway +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudapiportal + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + springCloudServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudapiportal + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: E0 diff --git a/examples-generated/appplatform/v1beta2/springcloudapp.yaml b/examples-generated/appplatform/v1beta2/springcloudapp.yaml new file mode 100644 index 000000000..dd1ef8601 --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudapp.yaml @@ -0,0 +1,49 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudApp +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/appplatform/v1beta2/springcloudbuilddeployment.yaml b/examples-generated/appplatform/v1beta2/springcloudbuilddeployment.yaml new file mode 100644 index 000000000..97eb1e0fa --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudbuilddeployment.yaml @@ -0,0 +1,72 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudBuildDeployment +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuilddeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + buildResultId: + environmentVariables: + Env: Staging + Foo: Bar + instanceCount: 2 + quota: + - cpu: "2" + memory: 4Gi + springCloudAppIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuilddeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudApp +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuilddeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuilddeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: E0 diff --git a/examples-generated/appplatform/v1beta2/springcloudbuilder.yaml b/examples-generated/appplatform/v1beta2/springcloudbuilder.yaml new file mode 100644 index 000000000..e3dc4db8b --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudbuilder.yaml @@ -0,0 +1,53 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudBuilder +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuilder + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + buildPackGroup: + - buildPackIds: + - tanzu-buildpacks/java-azure + name: mix + name: example + springCloudServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stack: + - id: io.buildpacks.stacks.bionic + version: base + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuilder + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuilder + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: E0 diff --git a/examples-generated/appplatform/v1beta2/springcloudbuildpackbinding.yaml b/examples-generated/appplatform/v1beta2/springcloudbuildpackbinding.yaml new file mode 100644 index 000000000..34d82e4d5 --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudbuildpackbinding.yaml @@ -0,0 +1,77 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudBuildPackBinding +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuildpackbinding + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bindingType: ApplicationInsights + launch: + - properties: + abc: def + any-string: any-string + sampling-rate: "12.0" + secrets: + connection-string: XXXXXXXXXXXXXXXXX=XXXXXXXXXXXXX-XXXXXXXXXXXXXXXXXXX;XXXXXXXXXXXXXXXXX=XXXXXXXXXXXXXXXXXXX + springCloudBuilderIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuildpackbinding + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudBuilder +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuildpackbinding + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + buildPackGroup: + - buildPackIds: + - tanzu-Build Packs/java-azure + name: mix + name: example + springCloudServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + stack: + - id: io.Build Packs.stacks.bionic + version: base + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudbuildpackbinding + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: E0 diff --git a/examples-generated/appplatform/v1beta2/springcloudcontainerdeployment.yaml b/examples-generated/appplatform/v1beta2/springcloudcontainerdeployment.yaml new file mode 100644 index 000000000..138e2ce97 --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudcontainerdeployment.yaml @@ -0,0 +1,77 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudContainerDeployment +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudcontainerdeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + arguments: + - -cp + - /app/resources:/app/classes:/app/libs/* + - hello.Application + commands: + - java + environmentVariables: + Env: Staging + Foo: Bar + image: springio/gs-spring-boot-docker + instanceCount: 2 + languageFramework: springboot + server: docker.io + springCloudAppIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudcontainerdeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudApp +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudcontainerdeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudcontainerdeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: E0 diff --git a/examples-generated/appplatform/v1beta2/springcloudcustomizedaccelerator.yaml b/examples-generated/appplatform/v1beta2/springcloudcustomizedaccelerator.yaml new file mode 100644 index 000000000..000df6321 --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudcustomizedaccelerator.yaml @@ -0,0 +1,72 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudCustomizedAccelerator +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudcustomizedaccelerator + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + acceleratorTags: + - tag-a + - tag-b + description: example description + displayName: example name + gitRepository: + - gitTag: spring.version.2.0.3 + intervalInSeconds: 100 + url: https://github.com/Azure-Samples/piggymetrics + iconUrl: https://images.freecreatives.com/wp-content/uploads/2015/05/smiley-559124_640.jpg + springCloudAcceleratorIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudcustomizedaccelerator + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: west europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta1 +kind: SpringCloudAccelerator +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudcustomizedaccelerator + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: default + springCloudServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudcustomizedaccelerator + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: E0 diff --git a/examples-generated/appplatform/v1beta2/springclouddevtoolportal.yaml b/examples-generated/appplatform/v1beta2/springclouddevtoolportal.yaml new file mode 100644 index 000000000..47848af00 --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springclouddevtoolportal.yaml @@ -0,0 +1,57 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudDevToolPortal +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springclouddevtoolportal + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationAcceleratorEnabled: true + applicationLiveViewEnabled: true + name: default + publicNetworkAccessEnabled: true + springCloudServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + sso: + - clientId: example id + clientSecret: example secret + metadataUrl: https://login.microsoftonline.com/${data.azurerm_client_config.current.tenant_id}/v2.0/.well-known/openid-configuration + scope: + - openid + - profile + - email + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springclouddevtoolportal + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springclouddevtoolportal + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: E0 diff --git a/examples-generated/appplatform/v1beta2/springcloudgateway.yaml b/examples-generated/appplatform/v1beta2/springcloudgateway.yaml new file mode 100644 index 000000000..9d8e43d73 --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudgateway.yaml @@ -0,0 +1,77 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudGateway +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiMetadata: + - description: example description + documentationUrl: https://www.example.com/docs + serverUrl: https://wwww.example.com + title: example title + version: "1.0" + cors: + - allowedHeaders: + - '*' + allowedMethods: + - PUT + allowedOrigins: + - example.com + credentialsAllowed: false + exposedHeaders: + - x-example-header + maxAgeSeconds: 86400 + httpsOnly: false + instanceCount: 2 + localResponseCachePerInstance: + - size: 100MB + timeToLive: 30s + publicNetworkAccessEnabled: true + quota: + - cpu: "1" + memory: 2Gi + springCloudServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + sso: + - clientId: example id + clientSecret: example secret + issuerUri: https://www.test.com/issueToken + scope: + - read + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: E0 diff --git a/examples-generated/appplatform/v1beta2/springcloudjavadeployment.yaml b/examples-generated/appplatform/v1beta2/springcloudjavadeployment.yaml new file mode 100644 index 000000000..fdd3ec8da --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudjavadeployment.yaml @@ -0,0 +1,74 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudJavaDeployment +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudjavadeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + environmentVariables: + Env: Staging + Foo: Bar + instanceCount: 2 + jvmOptions: -XX:+PrintGC + quota: + - cpu: "2" + memory: 4Gi + runtimeVersion: Java_11 + springCloudAppIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudjavadeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudApp +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudjavadeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudjavadeployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/appplatform/v1beta2/springcloudservice.yaml b/examples-generated/appplatform/v1beta2/springcloudservice.yaml new file mode 100644 index 000000000..c9b3f8d4e --- /dev/null +++ b/examples-generated/appplatform/v1beta2/springcloudservice.yaml @@ -0,0 +1,60 @@ +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + configServerGitSetting: + - label: config + searchPaths: + - dir1 + - dir2 + uri: https://github.com/Azure-Samples/piggymetrics + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: S0 + tags: + Env: staging + trace: + - connectionStringSelector: + matchLabels: + testing.upbound.io/example-name: example + sampleRate: 10 + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: appplatform/v1beta2/springcloudservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/authorization/v1beta1/resourcepolicyexemption.yaml b/examples-generated/authorization/v1beta1/resourcepolicyexemption.yaml index 2ae17ca61..e23921da3 100644 --- a/examples-generated/authorization/v1beta1/resourcepolicyexemption.yaml +++ b/examples-generated/authorization/v1beta1/resourcepolicyexemption.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: authorization.azure.upbound.io/v1beta1 +apiVersion: authorization.azure.upbound.io/v1beta2 kind: ResourcePolicyAssignment metadata: annotations: @@ -54,7 +54,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/authorization/v1beta1/subscriptionpolicyexemption.yaml b/examples-generated/authorization/v1beta1/subscriptionpolicyexemption.yaml index 9647c4303..d9d7af7eb 100644 --- a/examples-generated/authorization/v1beta1/subscriptionpolicyexemption.yaml +++ b/examples-generated/authorization/v1beta1/subscriptionpolicyexemption.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: authorization.azure.upbound.io/v1beta1 +apiVersion: authorization.azure.upbound.io/v1beta2 kind: SubscriptionPolicyAssignment metadata: annotations: diff --git a/examples-generated/authorization/v1beta2/resourcegrouppolicyassignment.yaml b/examples-generated/authorization/v1beta2/resourcegrouppolicyassignment.yaml new file mode 100644 index 000000000..f666ef40e --- /dev/null +++ b/examples-generated/authorization/v1beta2/resourcegrouppolicyassignment.yaml @@ -0,0 +1,67 @@ +apiVersion: authorization.azure.upbound.io/v1beta2 +kind: ResourceGroupPolicyAssignment +metadata: + annotations: + meta.upbound.io/example-id: authorization/v1beta2/resourcegrouppolicyassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + parameters: |2 + { + "tagName": { + "value": "Business Unit" + }, + "tagValue": { + "value": "BU" + } + } + policyDefinitionIdSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: authorization.azure.upbound.io/v1beta1 +kind: PolicyDefinition +metadata: + annotations: + meta.upbound.io/example-id: authorization/v1beta2/resourcegrouppolicyassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + displayName: my-policy-definition + mode: All + policyRule: |2 + { + "if": { + "not": { + "field": "location", + "equals": "westeurope" + } + }, + "then": { + "effect": "Deny" + } + } + policyType: Custom + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: authorization/v1beta2/resourcegrouppolicyassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/authorization/v1beta2/resourcepolicyassignment.yaml b/examples-generated/authorization/v1beta2/resourcepolicyassignment.yaml new file mode 100644 index 000000000..904b3b389 --- /dev/null +++ b/examples-generated/authorization/v1beta2/resourcepolicyassignment.yaml @@ -0,0 +1,43 @@ +apiVersion: authorization.azure.upbound.io/v1beta2 +kind: ResourcePolicyAssignment +metadata: + annotations: + meta.upbound.io/example-id: authorization/v1beta2/resourcepolicyassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example-policy-assignment + policyDefinitionIdSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceId: ${data.azurerm_virtual_network.example.id} + +--- + +apiVersion: authorization.azure.upbound.io/v1beta1 +kind: PolicyDefinition +metadata: + annotations: + meta.upbound.io/example-id: authorization/v1beta2/resourcepolicyassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + displayName: my-policy-definition + mode: All + policyRule: |2 + { + "if": { + "not": { + "field": "location", + "equals": "westeurope" + } + }, + "then": { + "effect": "Deny" + } + } + policyType: Custom diff --git a/examples-generated/authorization/v1beta2/subscriptionpolicyassignment.yaml b/examples-generated/authorization/v1beta2/subscriptionpolicyassignment.yaml new file mode 100644 index 000000000..e84a55cd9 --- /dev/null +++ b/examples-generated/authorization/v1beta2/subscriptionpolicyassignment.yaml @@ -0,0 +1,42 @@ +apiVersion: authorization.azure.upbound.io/v1beta2 +kind: SubscriptionPolicyAssignment +metadata: + annotations: + meta.upbound.io/example-id: authorization/v1beta2/subscriptionpolicyassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + policyDefinitionIdSelector: + matchLabels: + testing.upbound.io/example-name: example + subscriptionId: ${data.azurerm_subscription.current.id} + +--- + +apiVersion: authorization.azure.upbound.io/v1beta1 +kind: PolicyDefinition +metadata: + annotations: + meta.upbound.io/example-id: authorization/v1beta2/subscriptionpolicyassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + displayName: Allowed resource types + mode: All + policyRule: |2 + { + "if": { + "not": { + "field": "location", + "equals": "westeurope" + } + }, + "then": { + "effect": "Deny" + } + } + policyType: Custom diff --git a/examples-generated/automation/v1beta1/connection.yaml b/examples-generated/automation/v1beta1/connection.yaml index 4b1bd773b..7677afe9f 100644 --- a/examples-generated/automation/v1beta1/connection.yaml +++ b/examples-generated/automation/v1beta1/connection.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/connectionclassiccertificate.yaml b/examples-generated/automation/v1beta1/connectionclassiccertificate.yaml index b836237b2..d2378b8ec 100644 --- a/examples-generated/automation/v1beta1/connectionclassiccertificate.yaml +++ b/examples-generated/automation/v1beta1/connectionclassiccertificate.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/connectiontype.yaml b/examples-generated/automation/v1beta1/connectiontype.yaml index 60fdb2647..d855caa1e 100644 --- a/examples-generated/automation/v1beta1/connectiontype.yaml +++ b/examples-generated/automation/v1beta1/connectiontype.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/credential.yaml b/examples-generated/automation/v1beta1/credential.yaml index a86efb4bf..a619ea5ae 100644 --- a/examples-generated/automation/v1beta1/credential.yaml +++ b/examples-generated/automation/v1beta1/credential.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/hybridrunbookworkergroup.yaml b/examples-generated/automation/v1beta1/hybridrunbookworkergroup.yaml index cacd82048..8a7e3f1bc 100644 --- a/examples-generated/automation/v1beta1/hybridrunbookworkergroup.yaml +++ b/examples-generated/automation/v1beta1/hybridrunbookworkergroup.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/variablebool.yaml b/examples-generated/automation/v1beta1/variablebool.yaml index 8d90bdc90..ac97d559a 100644 --- a/examples-generated/automation/v1beta1/variablebool.yaml +++ b/examples-generated/automation/v1beta1/variablebool.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/variabledatetime.yaml b/examples-generated/automation/v1beta1/variabledatetime.yaml index 568f4fdeb..491bd2cb1 100644 --- a/examples-generated/automation/v1beta1/variabledatetime.yaml +++ b/examples-generated/automation/v1beta1/variabledatetime.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/variableint.yaml b/examples-generated/automation/v1beta1/variableint.yaml index 23158510c..da4376142 100644 --- a/examples-generated/automation/v1beta1/variableint.yaml +++ b/examples-generated/automation/v1beta1/variableint.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/variablestring.yaml b/examples-generated/automation/v1beta1/variablestring.yaml index b072119a9..519cb4582 100644 --- a/examples-generated/automation/v1beta1/variablestring.yaml +++ b/examples-generated/automation/v1beta1/variablestring.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/automation/v1beta1/webhook.yaml b/examples-generated/automation/v1beta1/webhook.yaml index dffc35525..ea93d324e 100644 --- a/examples-generated/automation/v1beta1/webhook.yaml +++ b/examples-generated/automation/v1beta1/webhook.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -43,7 +43,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: RunBook metadata: annotations: diff --git a/examples-generated/automation/v1beta2/account.yaml b/examples-generated/automation/v1beta2/account.yaml new file mode 100644 index 000000000..8a78f5ef9 --- /dev/null +++ b/examples-generated/automation/v1beta2/account.yaml @@ -0,0 +1,31 @@ +apiVersion: automation.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Basic + tags: + environment: development + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/automation/v1beta2/module.yaml b/examples-generated/automation/v1beta2/module.yaml new file mode 100644 index 000000000..f41f6a82f --- /dev/null +++ b/examples-generated/automation/v1beta2/module.yaml @@ -0,0 +1,50 @@ +apiVersion: automation.azure.upbound.io/v1beta2 +kind: Module +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/module + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + automationAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + moduleLink: + - uri: https://devopsgallerystorage.blob.core.windows.net/packages/xactivedirectory.2.19.0.nupkg + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: automation.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/module + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Basic + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/module + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/automation/v1beta2/runbook.yaml b/examples-generated/automation/v1beta2/runbook.yaml new file mode 100644 index 000000000..2596e31fc --- /dev/null +++ b/examples-generated/automation/v1beta2/runbook.yaml @@ -0,0 +1,56 @@ +apiVersion: automation.azure.upbound.io/v1beta2 +kind: RunBook +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/runbook + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + automationAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + description: This is an example runbook + location: West Europe + logProgress: "true" + logVerbose: "true" + name: Get-AzureVMTutorial + publishContentLink: + - uri: https://raw.githubusercontent.com/Azure/azure-quickstart-templates/c4935ffb69246a6058eb24f54640f53f69d3ac9f/101-automation-runbook-getvms/Runbooks/Get-AzureVMTutorial.ps1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + runbookType: PowerShellWorkflow + +--- + +apiVersion: automation.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/runbook + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Basic + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/runbook + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/automation/v1beta2/schedule.yaml b/examples-generated/automation/v1beta2/schedule.yaml new file mode 100644 index 000000000..bd0b09fc1 --- /dev/null +++ b/examples-generated/automation/v1beta2/schedule.yaml @@ -0,0 +1,55 @@ +apiVersion: automation.azure.upbound.io/v1beta2 +kind: Schedule +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/schedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + automationAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + description: This is an example schedule + frequency: Week + interval: 1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + startTime: "2014-04-15T18:00:15+02:00" + timezone: Australia/Perth + weekDays: + - Friday + +--- + +apiVersion: automation.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/schedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Basic + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: automation/v1beta2/schedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cache/v1beta1/redisfirewallrule.yaml b/examples-generated/cache/v1beta1/redisfirewallrule.yaml index 18fad14e0..50dcaf3ca 100644 --- a/examples-generated/cache/v1beta1/redisfirewallrule.yaml +++ b/examples-generated/cache/v1beta1/redisfirewallrule.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: cache.azure.upbound.io/v1beta1 +apiVersion: cache.azure.upbound.io/v1beta2 kind: RedisCache metadata: annotations: diff --git a/examples-generated/cache/v1beta1/redislinkedserver.yaml b/examples-generated/cache/v1beta1/redislinkedserver.yaml index be6cac0be..3b842b75d 100644 --- a/examples-generated/cache/v1beta1/redislinkedserver.yaml +++ b/examples-generated/cache/v1beta1/redislinkedserver.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: cache.azure.upbound.io/v1beta1 +apiVersion: cache.azure.upbound.io/v1beta2 kind: RedisCache metadata: annotations: @@ -47,7 +47,7 @@ spec: --- -apiVersion: cache.azure.upbound.io/v1beta1 +apiVersion: cache.azure.upbound.io/v1beta2 kind: RedisCache metadata: annotations: diff --git a/examples-generated/cache/v1beta2/rediscache.yaml b/examples-generated/cache/v1beta2/rediscache.yaml new file mode 100644 index 000000000..d515145e9 --- /dev/null +++ b/examples-generated/cache/v1beta2/rediscache.yaml @@ -0,0 +1,35 @@ +apiVersion: cache.azure.upbound.io/v1beta2 +kind: RedisCache +metadata: + annotations: + meta.upbound.io/example-id: cache/v1beta2/rediscache + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 2 + enableNonSslPort: false + family: C + location: West Europe + minimumTlsVersion: "1.2" + redisConfiguration: + - {} + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cache/v1beta2/rediscache + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cdn/v1beta1/frontdoorcustomdomainassociation.yaml b/examples-generated/cdn/v1beta1/frontdoorcustomdomainassociation.yaml index 90e1663b6..e66dc9808 100644 --- a/examples-generated/cdn/v1beta1/frontdoorcustomdomainassociation.yaml +++ b/examples-generated/cdn/v1beta1/frontdoorcustomdomainassociation.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: cdn.azure.upbound.io/v1beta1 +apiVersion: cdn.azure.upbound.io/v1beta2 kind: FrontdoorCustomDomain metadata: annotations: @@ -55,7 +55,7 @@ spec: --- -apiVersion: cdn.azure.upbound.io/v1beta1 +apiVersion: cdn.azure.upbound.io/v1beta2 kind: FrontdoorOrigin metadata: annotations: @@ -83,7 +83,7 @@ spec: --- -apiVersion: cdn.azure.upbound.io/v1beta1 +apiVersion: cdn.azure.upbound.io/v1beta2 kind: FrontdoorOriginGroup metadata: annotations: @@ -127,7 +127,7 @@ spec: --- -apiVersion: cdn.azure.upbound.io/v1beta1 +apiVersion: cdn.azure.upbound.io/v1beta2 kind: FrontdoorRoute metadata: annotations: @@ -177,7 +177,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/cdn/v1beta2/endpoint.yaml b/examples-generated/cdn/v1beta2/endpoint.yaml new file mode 100644 index 000000000..6cddbaed5 --- /dev/null +++ b/examples-generated/cdn/v1beta2/endpoint.yaml @@ -0,0 +1,52 @@ +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: Endpoint +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/endpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + origin: + - hostName: www.contoso.com + name: example + profileNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: Profile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/endpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard_Verizon + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/endpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cdn/v1beta2/frontdoorcustomdomain.yaml b/examples-generated/cdn/v1beta2/frontdoorcustomdomain.yaml new file mode 100644 index 000000000..d01130a0c --- /dev/null +++ b/examples-generated/cdn/v1beta2/frontdoorcustomdomain.yaml @@ -0,0 +1,67 @@ +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorCustomDomain +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorcustomdomain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + dnsZoneIdSelector: + matchLabels: + testing.upbound.io/example-name: example + hostName: contoso.fabrikam.com + tls: + - certificateType: ManagedCertificate + minimumTlsVersion: TLS12 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorProfile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorcustomdomain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Standard_AzureFrontDoor + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: DNSZone +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorcustomdomain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorcustomdomain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cdn/v1beta2/frontdoororigin.yaml b/examples-generated/cdn/v1beta2/frontdoororigin.yaml new file mode 100644 index 000000000..b69a8093a --- /dev/null +++ b/examples-generated/cdn/v1beta2/frontdoororigin.yaml @@ -0,0 +1,74 @@ +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOrigin +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoororigin + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorOriginGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + certificateNameCheckEnabled: false + enabled: true + hostNameSelector: + matchLabels: + testing.upbound.io/example-name: example + httpPort: 80 + httpsPort: 443 + originHostHeaderSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 1 + weight: 1 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOriginGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoororigin + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + loadBalancing: + - {} + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorProfile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoororigin + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Premium_AzureFrontDoor + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoororigin + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cdn/v1beta2/frontdoororigingroup.yaml b/examples-generated/cdn/v1beta2/frontdoororigingroup.yaml new file mode 100644 index 000000000..b924773cc --- /dev/null +++ b/examples-generated/cdn/v1beta2/frontdoororigingroup.yaml @@ -0,0 +1,55 @@ +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOriginGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoororigingroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + healthProbe: + - intervalInSeconds: 240 + path: /healthProbe + protocol: Https + requestType: HEAD + loadBalancing: + - additionalLatencyInMilliseconds: 0 + sampleSize: 16 + successfulSamplesRequired: 3 + restoreTrafficTimeToHealedOrNewEndpointInMinutes: 10 + sessionAffinityEnabled: true + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorProfile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoororigingroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Standard_AzureFrontDoor + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoororigingroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cdn/v1beta2/frontdoorroute.yaml b/examples-generated/cdn/v1beta2/frontdoorroute.yaml new file mode 100644 index 000000000..6d0bf0fce --- /dev/null +++ b/examples-generated/cdn/v1beta2/frontdoorroute.yaml @@ -0,0 +1,251 @@ +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorRoute +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cache: + - compressionEnabled: true + contentTypesToCompress: + - text/html + - text/javascript + - text/xml + queryStringCachingBehavior: IgnoreSpecifiedQueryStrings + queryStrings: + - account + - settings + cdnFrontdoorCustomDomainIdsRefs: + - name: contoso + - name: fabrikam + cdnFrontdoorEndpointIdSelector: + matchLabels: + testing.upbound.io/example-name: example + cdnFrontdoorOriginGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + cdnFrontdoorOriginIdsRefs: + - name: example + cdnFrontdoorRuleSetIdsRefs: + - name: example + enabled: true + forwardingProtocol: HttpsOnly + httpsRedirectEnabled: true + linkToDefaultDomain: false + patternsToMatch: + - /* + supportedProtocols: + - Http + - Https + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorCustomDomain +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: contoso + name: contoso +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + dnsZoneIdSelector: + matchLabels: + testing.upbound.io/example-name: example + hostName: ${join(".", ["contoso", azurerm_dns_zone.example.name])} + tls: + - certificateType: ManagedCertificate + minimumTlsVersion: TLS12 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorCustomDomain +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: fabrikam + name: fabrikam +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + dnsZoneIdSelector: + matchLabels: + testing.upbound.io/example-name: example + hostName: ${join(".", ["fabrikam", azurerm_dns_zone.example.name])} + tls: + - certificateType: ManagedCertificate + minimumTlsVersion: TLS12 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorCustomDomainAssociation +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: contoso + name: contoso +spec: + forProvider: + cdnFrontdoorCustomDomainIdSelector: + matchLabels: + testing.upbound.io/example-name: contoso + cdnFrontdoorRouteIdsRefs: + - name: example + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorCustomDomainAssociation +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: fabrikam + name: fabrikam +spec: + forProvider: + cdnFrontdoorCustomDomainIdSelector: + matchLabels: + testing.upbound.io/example-name: fabrikam + cdnFrontdoorRouteIdsRefs: + - name: example + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorEndpoint +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOrigin +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorOriginGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + certificateNameCheckEnabled: false + enabled: true + hostNameSelector: + matchLabels: + testing.upbound.io/example-name: example + httpPort: 80 + httpsPort: 443 + originHostHeaderSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 1 + weight: 1 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOriginGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + loadBalancing: + - additionalLatencyInMilliseconds: 0 + sampleSize: 16 + successfulSamplesRequired: 3 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorProfile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Standard_AzureFrontDoor + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorRuleSet +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: DNSZone +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorroute + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cdn/v1beta2/frontdoorrule.yaml b/examples-generated/cdn/v1beta2/frontdoorrule.yaml new file mode 100644 index 000000000..029c9c985 --- /dev/null +++ b/examples-generated/cdn/v1beta2/frontdoorrule.yaml @@ -0,0 +1,193 @@ +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorRule +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + actions: + - routeConfigurationOverrideAction: + - cacheBehavior: OverrideIfOriginMissing + cacheDuration: 365.23:59:59 + cdnFrontdoorOriginGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + compressionEnabled: true + forwardingProtocol: HttpsOnly + queryStringCachingBehavior: IncludeSpecifiedQueryStrings + queryStringParameters: + - foo + - clientIp={client_ip} + urlRedirectAction: + - destinationFragment: UrlRedirect + destinationHostname: contoso.com + destinationPath: /exampleredirection + queryString: clientIp={client_ip} + redirectProtocol: MatchRequest + redirectType: PermanentRedirect + behaviorOnMatch: Continue + cdnFrontdoorRuleSetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + conditions: + - hostNameCondition: + - matchValues: + - www.contoso.com + - images.contoso.com + - video.contoso.com + negateCondition: false + operator: Equal + transforms: + - Lowercase + - Trim + isDeviceCondition: + - matchValues: + - Mobile + negateCondition: false + operator: Equal + postArgsCondition: + - matchValues: + - J + - K + operator: BeginsWith + postArgsName: customerName + transforms: + - Uppercase + requestMethodCondition: + - matchValues: + - DELETE + negateCondition: false + operator: Equal + urlFilenameCondition: + - matchValues: + - media.mp4 + negateCondition: false + operator: Equal + transforms: + - Lowercase + - RemoveNulls + - Trim + order: 1 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorEndpoint +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + endpoint: contoso.com + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOrigin +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorOriginGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + certificateNameCheckEnabled: false + enabled: true + hostNameSelector: + matchLabels: + testing.upbound.io/example-name: example + httpPort: 80 + httpsPort: 443 + originHostHeaderSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 1 + weight: 500 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOriginGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + healthProbe: + - intervalInSeconds: 240 + path: /healthProbe + protocol: Https + requestType: GET + loadBalancing: + - additionalLatencyInMilliseconds: 0 + sampleSize: 16 + successfulSamplesRequired: 3 + restoreTrafficTimeToHealedOrNewEndpointInMinutes: 10 + sessionAffinityEnabled: true + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorProfile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Premium_AzureFrontDoor + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorRuleSet +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cdn/v1beta2/frontdoorsecuritypolicy.yaml b/examples-generated/cdn/v1beta2/frontdoorsecuritypolicy.yaml new file mode 100644 index 000000000..1e6c895f4 --- /dev/null +++ b/examples-generated/cdn/v1beta2/frontdoorsecuritypolicy.yaml @@ -0,0 +1,134 @@ +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorSecurityPolicy +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorsecuritypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + securityPolicies: + - firewall: + - association: + - domain: + - cdnFrontdoorDomainIdSelector: + matchLabels: + testing.upbound.io/example-name: example + patternsToMatch: + - /* + cdnFrontdoorFirewallPolicyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorCustomDomain +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorsecuritypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + dnsZoneIdSelector: + matchLabels: + testing.upbound.io/example-name: example + hostName: contoso.fabrikam.com + tls: + - certificateType: ManagedCertificate + minimumTlsVersion: TLS12 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorFirewallPolicy +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorsecuritypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + customBlockResponseBody: PGh0bWw+CjxoZWFkZXI+PHRpdGxlPkhlbGxvPC90aXRsZT48L2hlYWRlcj4KPGJvZHk+CkhlbGxvIHdvcmxkCjwvYm9keT4KPC9odG1sPg== + customBlockResponseStatusCode: 403 + customRule: + - action: Block + enabled: true + matchCondition: + - matchValues: + - 192.168.1.0/24 + - 10.0.1.0/24 + matchVariable: RemoteAddr + negationCondition: false + operator: IPMatch + name: Rule1 + priority: 1 + rateLimitDurationInMinutes: 1 + rateLimitThreshold: 10 + type: MatchRule + enabled: true + mode: Prevention + redirectUrl: https://www.contoso.com + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorProfile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorsecuritypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Standard_AzureFrontDoor + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: DNSZone +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorsecuritypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorsecuritypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cognitiveservices/v1beta2/account.yaml b/examples-generated/cognitiveservices/v1beta2/account.yaml new file mode 100644 index 000000000..9c99f412c --- /dev/null +++ b/examples-generated/cognitiveservices/v1beta2/account.yaml @@ -0,0 +1,32 @@ +apiVersion: cognitiveservices.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: cognitiveservices/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + kind: Face + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: S0 + tags: + Acceptance: Test + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cognitiveservices/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cognitiveservices/v1beta2/deployment.yaml b/examples-generated/cognitiveservices/v1beta2/deployment.yaml new file mode 100644 index 000000000..f0d4cf0c2 --- /dev/null +++ b/examples-generated/cognitiveservices/v1beta2/deployment.yaml @@ -0,0 +1,52 @@ +apiVersion: cognitiveservices.azure.upbound.io/v1beta2 +kind: Deployment +metadata: + annotations: + meta.upbound.io/example-id: cognitiveservices/v1beta2/deployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cognitiveAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + model: + - format: OpenAI + name: text-curie-001 + version: "1" + scale: + - type: Standard + +--- + +apiVersion: cognitiveservices.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: cognitiveservices/v1beta2/deployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + kind: OpenAI + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: S0 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cognitiveservices/v1beta2/deployment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/compute/v1beta1/galleryapplication.yaml b/examples-generated/compute/v1beta1/galleryapplication.yaml index 752a60544..36729c18c 100644 --- a/examples-generated/compute/v1beta1/galleryapplication.yaml +++ b/examples-generated/compute/v1beta1/galleryapplication.yaml @@ -30,7 +30,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: SharedImageGallery metadata: annotations: diff --git a/examples-generated/compute/v1beta1/manageddisksastoken.yaml b/examples-generated/compute/v1beta1/manageddisksastoken.yaml index 3db15d019..41d792734 100644 --- a/examples-generated/compute/v1beta1/manageddisksastoken.yaml +++ b/examples-generated/compute/v1beta1/manageddisksastoken.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: ManagedDisk metadata: annotations: diff --git a/examples-generated/compute/v1beta1/virtualmachinedatadiskattachment.yaml b/examples-generated/compute/v1beta1/virtualmachinedatadiskattachment.yaml index b78b890f5..4021f0021 100644 --- a/examples-generated/compute/v1beta1/virtualmachinedatadiskattachment.yaml +++ b/examples-generated/compute/v1beta1/virtualmachinedatadiskattachment.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: ManagedDisk metadata: annotations: @@ -76,7 +76,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -97,7 +97,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/compute/v1beta2/capacityreservation.yaml b/examples-generated/compute/v1beta2/capacityreservation.yaml new file mode 100644 index 000000000..d6ce5fee8 --- /dev/null +++ b/examples-generated/compute/v1beta2/capacityreservation.yaml @@ -0,0 +1,47 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: CapacityReservation +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/capacityreservation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacityReservationGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: 1 + name: Standard_D2s_v3 + +--- + +apiVersion: compute.azure.upbound.io/v1beta1 +kind: CapacityReservationGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/capacityreservation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/capacityreservation + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/compute/v1beta2/diskencryptionset.yaml b/examples-generated/compute/v1beta2/diskencryptionset.yaml new file mode 100644 index 000000000..976c77aea --- /dev/null +++ b/examples-generated/compute/v1beta2/diskencryptionset.yaml @@ -0,0 +1,153 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: DiskEncryptionSet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/diskencryptionset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + keyVaultKeyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/diskencryptionset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + enabledForDiskEncryption: true + location: West Europe + purgeProtectionEnabled: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: premium + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta1 +kind: AccessPolicy +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/diskencryptionset + labels: + testing.upbound.io/example-name: example-disk + name: example-disk +spec: + forProvider: + keyPermissions: + - Create + - Delete + - Get + - Purge + - Recover + - Update + - List + - Decrypt + - Sign + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + objectId: ${azurerm_disk_encryption_set.example.identity[0].principal_id} + tenantId: ${azurerm_disk_encryption_set.example.identity[0].tenant_id} + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta1 +kind: AccessPolicy +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/diskencryptionset + labels: + testing.upbound.io/example-name: example-user + name: example-user +spec: + forProvider: + keyPermissions: + - Create + - Delete + - Get + - Purge + - Recover + - Update + - List + - Decrypt + - Sign + - GetRotationPolicy + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + objectId: ${data.azurerm_client_config.current.object_id} + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Key +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/diskencryptionset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + keyOpts: + - decrypt + - encrypt + - sign + - unwrapKey + - verify + - wrapKey + keySize: 2048 + keyType: RSA + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: des-example-key + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/diskencryptionset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: authorization.azure.upbound.io/v1beta1 +kind: RoleAssignment +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/diskencryptionset + labels: + testing.upbound.io/example-name: example-disk + name: example-disk +spec: + forProvider: + principalId: ${azurerm_disk_encryption_set.example.identity[0].principal_id} + roleDefinitionName: Key Vault Crypto Service Encryption User + scope: ${azurerm_key_vault.example.id} diff --git a/examples-generated/compute/v1beta2/galleryapplicationversion.yaml b/examples-generated/compute/v1beta2/galleryapplicationversion.yaml new file mode 100644 index 000000000..5dc8a69e4 --- /dev/null +++ b/examples-generated/compute/v1beta2/galleryapplicationversion.yaml @@ -0,0 +1,133 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: GalleryApplicationVersion +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/galleryapplicationversion + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + galleryApplicationIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + manageAction: + - install: '[install command]' + remove: '[remove command]' + name: 0.0.1 + source: + - mediaLinkSelector: + matchLabels: + testing.upbound.io/example-name: example + targetRegion: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: example + regionalReplicaCount: 1 + +--- + +apiVersion: compute.azure.upbound.io/v1beta1 +kind: GalleryApplication +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/galleryapplicationversion + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + galleryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + supportedOsType: Linux + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/galleryapplicationversion + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: SharedImageGallery +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/galleryapplicationversion + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/galleryapplicationversion + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Blob +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/galleryapplicationversion + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + sourceContent: '[scripts file content]' + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + type: Block + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/galleryapplicationversion + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: blob + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1beta2/image.yaml b/examples-generated/compute/v1beta2/image.yaml new file mode 100644 index 000000000..ffb945b7e --- /dev/null +++ b/examples-generated/compute/v1beta2/image.yaml @@ -0,0 +1,15 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: Image +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/image + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: ${data.azurerm_virtual_machine.example.location} + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_virtual_machine + sourceVirtualMachineId: ${data.azurerm_virtual_machine.example.id} diff --git a/examples-generated/compute/v1beta2/linuxvirtualmachine.yaml b/examples-generated/compute/v1beta2/linuxvirtualmachine.yaml new file mode 100644 index 000000000..b8b487944 --- /dev/null +++ b/examples-generated/compute/v1beta2/linuxvirtualmachine.yaml @@ -0,0 +1,106 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: LinuxVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminSshKey: + - publicKey: ${file("~/.ssh/id_rsa.pub")} + username: adminuser + adminUsername: adminuser + location: West Europe + networkInterfaceIdsRefs: + - name: example + osDisk: + - caching: ReadWrite + storageAccountType: Standard_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + size: Standard_F2 + sourceImageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: NetworkInterface +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: internal + privateIpAddressAllocation: Dynamic + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1beta2/linuxvirtualmachinescaleset.yaml b/examples-generated/compute/v1beta2/linuxvirtualmachinescaleset.yaml new file mode 100644 index 000000000..4e4ebf48d --- /dev/null +++ b/examples-generated/compute/v1beta2/linuxvirtualmachinescaleset.yaml @@ -0,0 +1,91 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: LinuxVirtualMachineScaleSet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachinescaleset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminSshKey: + - publicKey: ${local.first_public_key} + username: adminuser + adminUsername: adminuser + instances: 1 + location: West Europe + networkInterface: + - ipConfiguration: + - name: internal + primary: true + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: internal + name: example + primary: true + osDisk: + - caching: ReadWrite + storageAccountType: Standard_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard_F2 + sourceImageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachinescaleset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachinescaleset + labels: + testing.upbound.io/example-name: internal + name: internal +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/linuxvirtualmachinescaleset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1beta2/manageddisk.yaml b/examples-generated/compute/v1beta2/manageddisk.yaml new file mode 100644 index 000000000..0ab682bdd --- /dev/null +++ b/examples-generated/compute/v1beta2/manageddisk.yaml @@ -0,0 +1,33 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: ManagedDisk +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/manageddisk + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + createOption: Empty + diskSizeGb: "1" + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountType: Standard_LRS + tags: + environment: staging + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/manageddisk + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/compute/v1beta2/orchestratedvirtualmachinescaleset.yaml b/examples-generated/compute/v1beta2/orchestratedvirtualmachinescaleset.yaml new file mode 100644 index 000000000..e4502a50e --- /dev/null +++ b/examples-generated/compute/v1beta2/orchestratedvirtualmachinescaleset.yaml @@ -0,0 +1,31 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: OrchestratedVirtualMachineScaleSet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/orchestratedvirtualmachinescaleset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + platformFaultDomainCount: 1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + zones: + - "1" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/orchestratedvirtualmachinescaleset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/compute/v1beta2/sharedimage.yaml b/examples-generated/compute/v1beta2/sharedimage.yaml new file mode 100644 index 000000000..024d45671 --- /dev/null +++ b/examples-generated/compute/v1beta2/sharedimage.yaml @@ -0,0 +1,57 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: SharedImage +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/sharedimage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + galleryNameSelector: + matchLabels: + testing.upbound.io/example-name: example + identifier: + - offer: OfferName + publisher: PublisherName + sku: ExampleSku + location: West Europe + osType: Linux + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/sharedimage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: SharedImageGallery +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/sharedimage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: Shared images and things. + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Hello: There + World: Example diff --git a/examples-generated/compute/v1beta2/sharedimagegallery.yaml b/examples-generated/compute/v1beta2/sharedimagegallery.yaml new file mode 100644 index 000000000..5fada42a0 --- /dev/null +++ b/examples-generated/compute/v1beta2/sharedimagegallery.yaml @@ -0,0 +1,32 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: SharedImageGallery +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/sharedimagegallery + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: Shared images and things. + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Hello: There + World: Example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/sharedimagegallery + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/compute/v1beta2/snapshot.yaml b/examples-generated/compute/v1beta2/snapshot.yaml new file mode 100644 index 000000000..89142090b --- /dev/null +++ b/examples-generated/compute/v1beta2/snapshot.yaml @@ -0,0 +1,52 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: Snapshot +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/snapshot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + createOption: Copy + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceUriSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: ManagedDisk +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/snapshot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + createOption: Empty + diskSizeGb: "10" + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountType: Standard_LRS + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/snapshot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/compute/v1beta2/virtualmachineextension.yaml b/examples-generated/compute/v1beta2/virtualmachineextension.yaml new file mode 100644 index 000000000..46e95cc93 --- /dev/null +++ b/examples-generated/compute/v1beta2/virtualmachineextension.yaml @@ -0,0 +1,131 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: VirtualMachineExtension +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineextension + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + publisher: Microsoft.Azure.Extensions + settings: |2 + { + "commandToExecute": "hostname && uptime" + } + tags: + environment: Production + type: CustomScript + typeHandlerVersion: "2.0" + virtualMachineIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: LinuxVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineextension + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminSshKey: + - publicKey: ${file("~/.ssh/id_rsa.pub")} + username: adminuser + adminUsername: adminuser + location: West Europe + networkInterfaceIdsRefs: + - name: example + osDisk: + - caching: ReadWrite + storageAccountType: Standard_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + size: Standard_F2 + sourceImageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: NetworkInterface +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineextension + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: testconfiguration1 + privateIpAddressAllocation: Dynamic + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineextension + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineextension + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineextension + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1beta2/virtualmachineruncommand.yaml b/examples-generated/compute/v1beta2/virtualmachineruncommand.yaml new file mode 100644 index 000000000..3a2310c57 --- /dev/null +++ b/examples-generated/compute/v1beta2/virtualmachineruncommand.yaml @@ -0,0 +1,262 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: VirtualMachineRunCommand +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + source: + - script: echo 'hello world' + virtualMachineIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: LinuxVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + adminUsername: adminuser + disablePasswordAuthentication: false + identity: + - identityIds: + - ${azurerm_user_assigned_identity.example.id} + type: SystemAssigned, UserAssigned + location: West Europe + networkInterfaceIdsRefs: + - name: example + osDisk: + - caching: ReadWrite + storageAccountType: Premium_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + size: Standard_B2s + sourceImageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: NetworkInterface +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: internal + privateIpAddressAllocation: Dynamic + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: authorization.azure.upbound.io/v1beta1 +kind: RoleAssignment +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + principalId: ${azurerm_user_assigned_identity.example.principal_id} + roleDefinitionName: Storage Blob Data Contributor + scope: ${azurerm_storage_account.example.id} + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Blob +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example1 + name: example1 +spec: + forProvider: + sourceContent: echo 'hello world' + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + type: Block + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Blob +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example2 + name: example2 +spec: + forProvider: + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + type: Append + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Blob +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example3 + name: example3 +spec: + forProvider: + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + type: Append + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: blob + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: managedidentity.azure.upbound.io/v1beta1 +kind: UserAssignedIdentity +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example-uai + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/virtualmachineruncommand + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1beta2/windowsvirtualmachine.yaml b/examples-generated/compute/v1beta2/windowsvirtualmachine.yaml new file mode 100644 index 000000000..deebc95ca --- /dev/null +++ b/examples-generated/compute/v1beta2/windowsvirtualmachine.yaml @@ -0,0 +1,107 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: WindowsVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + adminUsername: adminuser + location: West Europe + networkInterfaceIdsRefs: + - name: example + osDisk: + - caching: ReadWrite + storageAccountType: Standard_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + size: Standard_F2 + sourceImageReference: + - offer: WindowsServer + publisher: MicrosoftWindowsServer + sku: 2016-Datacenter + version: latest + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: NetworkInterface +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: internal + privateIpAddressAllocation: Dynamic + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/compute/v1beta2/windowsvirtualmachinescaleset.yaml b/examples-generated/compute/v1beta2/windowsvirtualmachinescaleset.yaml new file mode 100644 index 000000000..a9c1ff750 --- /dev/null +++ b/examples-generated/compute/v1beta2/windowsvirtualmachinescaleset.yaml @@ -0,0 +1,93 @@ +apiVersion: compute.azure.upbound.io/v1beta2 +kind: WindowsVirtualMachineScaleSet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachinescaleset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + adminUsername: adminuser + computerNamePrefix: vm- + instances: 1 + location: West Europe + networkInterface: + - ipConfiguration: + - name: internal + primary: true + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: internal + name: example + primary: true + osDisk: + - caching: ReadWrite + storageAccountType: Standard_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard_F2 + sourceImageReference: + - offer: WindowsServer + publisher: MicrosoftWindowsServer + sku: 2016-Datacenter-Server-Core + version: latest + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachinescaleset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachinescaleset + labels: + testing.upbound.io/example-name: internal + name: internal +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/windowsvirtualmachinescaleset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/consumption/v1beta2/budgetmanagementgroup.yaml b/examples-generated/consumption/v1beta2/budgetmanagementgroup.yaml new file mode 100644 index 000000000..cee0faa89 --- /dev/null +++ b/examples-generated/consumption/v1beta2/budgetmanagementgroup.yaml @@ -0,0 +1,71 @@ +apiVersion: consumption.azure.upbound.io/v1beta2 +kind: BudgetManagementGroup +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetmanagementgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + amount: 1000 + filter: + - dimension: + - name: ResourceGroupName + values: + - ${azurerm_resource_group.example.name} + tag: + - name: foo + values: + - bar + - baz + managementGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + notification: + - contactEmails: + - foo@example.com + - bar@example.com + enabled: true + operator: EqualTo + threshold: 90 + - contactEmails: + - foo@example.com + - bar@example.com + enabled: false + operator: GreaterThan + threshold: 100 + thresholdType: Forecasted + timeGrain: Monthly + timePeriod: + - endDate: "2022-07-01T00:00:00Z" + startDate: "2022-06-01T00:00:00Z" + +--- + +apiVersion: management.azure.upbound.io/v1beta1 +kind: ManagementGroup +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetmanagementgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + displayName: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetmanagementgroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: eastus diff --git a/examples-generated/consumption/v1beta2/budgetresourcegroup.yaml b/examples-generated/consumption/v1beta2/budgetresourcegroup.yaml new file mode 100644 index 000000000..a246caf13 --- /dev/null +++ b/examples-generated/consumption/v1beta2/budgetresourcegroup.yaml @@ -0,0 +1,78 @@ +apiVersion: consumption.azure.upbound.io/v1beta2 +kind: BudgetResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetresourcegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + amount: 1000 + filter: + - dimension: + - name: ResourceId + values: + - ${azurerm_monitor_action_group.example.id} + tag: + - name: foo + values: + - bar + - baz + name: example + notification: + - contactEmails: + - foo@example.com + - bar@example.com + contactGroups: + - ${azurerm_monitor_action_group.example.id} + contactRoles: + - Owner + enabled: true + operator: EqualTo + threshold: 90 + thresholdType: Forecasted + - contactEmails: + - foo@example.com + - bar@example.com + enabled: false + operator: GreaterThan + threshold: 100 + resourceGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + timeGrain: Monthly + timePeriod: + - endDate: "2022-07-01T00:00:00Z" + startDate: "2022-06-01T00:00:00Z" + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetresourcegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetresourcegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: eastus diff --git a/examples-generated/consumption/v1beta2/budgetsubscription.yaml b/examples-generated/consumption/v1beta2/budgetsubscription.yaml new file mode 100644 index 000000000..e73763924 --- /dev/null +++ b/examples-generated/consumption/v1beta2/budgetsubscription.yaml @@ -0,0 +1,75 @@ +apiVersion: consumption.azure.upbound.io/v1beta2 +kind: BudgetSubscription +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetsubscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + amount: 1000 + filter: + - dimension: + - name: ResourceGroupName + values: + - ${azurerm_resource_group.example.name} + tag: + - name: foo + values: + - bar + - baz + notification: + - contactEmails: + - foo@example.com + - bar@example.com + contactGroupsRefs: + - name: example + contactRoles: + - Owner + enabled: true + operator: EqualTo + threshold: 90 + - contactEmails: + - foo@example.com + - bar@example.com + enabled: false + operator: GreaterThan + threshold: 100 + thresholdType: Forecasted + subscriptionId: ${data.azurerm_subscription.current.id} + timeGrain: Monthly + timePeriod: + - endDate: "2022-07-01T00:00:00Z" + startDate: "2022-06-01T00:00:00Z" + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetsubscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: consumption/v1beta2/budgetsubscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: eastus diff --git a/examples-generated/containerapp/v1beta1/environment.yaml b/examples-generated/containerapp/v1beta1/environment.yaml index 8ec63d0c6..6285e4ede 100644 --- a/examples-generated/containerapp/v1beta1/environment.yaml +++ b/examples-generated/containerapp/v1beta1/environment.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/containerapp/v1beta2/containerapp.yaml b/examples-generated/containerapp/v1beta2/containerapp.yaml new file mode 100644 index 000000000..dbc550990 --- /dev/null +++ b/examples-generated/containerapp/v1beta2/containerapp.yaml @@ -0,0 +1,76 @@ +apiVersion: containerapp.azure.upbound.io/v1beta2 +kind: ContainerApp +metadata: + annotations: + meta.upbound.io/example-id: containerapp/v1beta2/containerapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAppEnvironmentIdSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + revisionMode: Single + template: + - container: + - cpu: 0.25 + image: mcr.microsoft.com/azuredocs/containerapps-helloworld:latest + memory: 0.5Gi + name: examplecontainerapp + +--- + +apiVersion: containerapp.azure.upbound.io/v1beta1 +kind: Environment +metadata: + annotations: + meta.upbound.io/example-id: containerapp/v1beta2/containerapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + logAnalyticsWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: operationalinsights.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: containerapp/v1beta2/containerapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionInDays: 30 + sku: PerGB2018 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: containerapp/v1beta2/containerapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/containerregistry/v1beta1/agentpool.yaml b/examples-generated/containerregistry/v1beta1/agentpool.yaml index aa261e53d..316c5e79c 100644 --- a/examples-generated/containerregistry/v1beta1/agentpool.yaml +++ b/examples-generated/containerregistry/v1beta1/agentpool.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: containerregistry.azure.upbound.io/v1beta1 +apiVersion: containerregistry.azure.upbound.io/v1beta2 kind: Registry metadata: annotations: diff --git a/examples-generated/containerregistry/v1beta1/containerconnectedregistry.yaml b/examples-generated/containerregistry/v1beta1/containerconnectedregistry.yaml index 3c017bfc2..c7084aab2 100644 --- a/examples-generated/containerregistry/v1beta1/containerconnectedregistry.yaml +++ b/examples-generated/containerregistry/v1beta1/containerconnectedregistry.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: containerregistry.azure.upbound.io/v1beta1 +apiVersion: containerregistry.azure.upbound.io/v1beta2 kind: Registry metadata: annotations: diff --git a/examples-generated/containerregistry/v1beta1/scopemap.yaml b/examples-generated/containerregistry/v1beta1/scopemap.yaml index e9b2909a9..d7ecbdfe4 100644 --- a/examples-generated/containerregistry/v1beta1/scopemap.yaml +++ b/examples-generated/containerregistry/v1beta1/scopemap.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: containerregistry.azure.upbound.io/v1beta1 +apiVersion: containerregistry.azure.upbound.io/v1beta2 kind: Registry metadata: annotations: diff --git a/examples-generated/containerregistry/v1beta1/token.yaml b/examples-generated/containerregistry/v1beta1/token.yaml index 3b721df49..1d1058d38 100644 --- a/examples-generated/containerregistry/v1beta1/token.yaml +++ b/examples-generated/containerregistry/v1beta1/token.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: containerregistry.azure.upbound.io/v1beta1 +apiVersion: containerregistry.azure.upbound.io/v1beta2 kind: Registry metadata: annotations: diff --git a/examples-generated/containerregistry/v1beta1/webhook.yaml b/examples-generated/containerregistry/v1beta1/webhook.yaml index a17e8d8df..b32797ea1 100644 --- a/examples-generated/containerregistry/v1beta1/webhook.yaml +++ b/examples-generated/containerregistry/v1beta1/webhook.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: containerregistry.azure.upbound.io/v1beta1 +apiVersion: containerregistry.azure.upbound.io/v1beta2 kind: Registry metadata: annotations: diff --git a/examples-generated/containerregistry/v1beta2/registry.yaml b/examples-generated/containerregistry/v1beta2/registry.yaml new file mode 100644 index 000000000..d75d4c06e --- /dev/null +++ b/examples-generated/containerregistry/v1beta2/registry.yaml @@ -0,0 +1,37 @@ +apiVersion: containerregistry.azure.upbound.io/v1beta2 +kind: Registry +metadata: + annotations: + meta.upbound.io/example-id: containerregistry/v1beta2/registry + labels: + testing.upbound.io/example-name: acr + name: acr +spec: + forProvider: + adminEnabled: false + georeplications: + - location: East US + tags: {} + zoneRedundancyEnabled: true + - location: North Europe + tags: {} + zoneRedundancyEnabled: true + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Premium + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: containerregistry/v1beta2/registry + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/containerregistry/v1beta2/tokenpassword.yaml b/examples-generated/containerregistry/v1beta2/tokenpassword.yaml new file mode 100644 index 000000000..1cadaf5c4 --- /dev/null +++ b/examples-generated/containerregistry/v1beta2/tokenpassword.yaml @@ -0,0 +1,95 @@ +apiVersion: containerregistry.azure.upbound.io/v1beta2 +kind: TokenPassword +metadata: + annotations: + meta.upbound.io/example-id: containerregistry/v1beta2/tokenpassword + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerRegistryTokenIdSelector: + matchLabels: + testing.upbound.io/example-name: example + password1: + - expiry: "2023-03-22T17:57:36+08:00" + +--- + +apiVersion: containerregistry.azure.upbound.io/v1beta2 +kind: Registry +metadata: + annotations: + meta.upbound.io/example-id: containerregistry/v1beta2/tokenpassword + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminEnabled: false + georeplication_locations: + - East US + - West Europe + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Premium + +--- + +apiVersion: containerregistry.azure.upbound.io/v1beta1 +kind: ScopeMap +metadata: + annotations: + meta.upbound.io/example-id: containerregistry/v1beta2/tokenpassword + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + actions: + - repositories/repo1/content/read + - repositories/repo1/content/write + containerRegistryNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: containerregistry.azure.upbound.io/v1beta1 +kind: Token +metadata: + annotations: + meta.upbound.io/example-id: containerregistry/v1beta2/tokenpassword + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerRegistryNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scopeMapIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: containerregistry/v1beta2/tokenpassword + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/containerservice/v1beta2/kubernetescluster.yaml b/examples-generated/containerservice/v1beta2/kubernetescluster.yaml new file mode 100644 index 000000000..bef2f40fe --- /dev/null +++ b/examples-generated/containerservice/v1beta2/kubernetescluster.yaml @@ -0,0 +1,37 @@ +apiVersion: containerservice.azure.upbound.io/v1beta2 +kind: KubernetesCluster +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetescluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + defaultNodePool: + - name: default + nodeCount: 1 + vmSize: Standard_D2_v2 + dnsPrefix: exampleaks1 + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetescluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/containerservice/v1beta2/kubernetesclusternodepool.yaml b/examples-generated/containerservice/v1beta2/kubernetesclusternodepool.yaml new file mode 100644 index 000000000..276feb145 --- /dev/null +++ b/examples-generated/containerservice/v1beta2/kubernetesclusternodepool.yaml @@ -0,0 +1,59 @@ +apiVersion: containerservice.azure.upbound.io/v1beta2 +kind: KubernetesClusterNodePool +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetesclusternodepool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + kubernetesClusterIdSelector: + matchLabels: + testing.upbound.io/example-name: example + nodeCount: 1 + tags: + Environment: Production + vmSize: Standard_DS2_v2 + +--- + +apiVersion: containerservice.azure.upbound.io/v1beta2 +kind: KubernetesCluster +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetesclusternodepool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + defaultNodePool: + - name: default + nodeCount: 1 + vmSize: Standard_D2_v2 + dnsPrefix: exampleaks1 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePrincipal: + - clientId: 00000000-0000-0000-0000-000000000000 + clientSecretSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetesclusternodepool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/containerservice/v1beta2/kubernetesfleetmanager.yaml b/examples-generated/containerservice/v1beta2/kubernetesfleetmanager.yaml new file mode 100644 index 000000000..41c6cbc04 --- /dev/null +++ b/examples-generated/containerservice/v1beta2/kubernetesfleetmanager.yaml @@ -0,0 +1,28 @@ +apiVersion: containerservice.azure.upbound.io/v1beta2 +kind: KubernetesFleetManager +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetesfleetmanager + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetesfleetmanager + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cosmosdb/v1beta1/cassandradatacenter.yaml b/examples-generated/cosmosdb/v1beta1/cassandradatacenter.yaml index 9b42e8ad7..3781f52b9 100644 --- a/examples-generated/cosmosdb/v1beta1/cassandradatacenter.yaml +++ b/examples-generated/cosmosdb/v1beta1/cassandradatacenter.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: CassandraCluster metadata: annotations: @@ -76,7 +76,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -97,7 +97,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/cosmosdb/v1beta1/sqldedicatedgateway.yaml b/examples-generated/cosmosdb/v1beta1/sqldedicatedgateway.yaml index 8a14c8213..ec87abe40 100644 --- a/examples-generated/cosmosdb/v1beta1/sqldedicatedgateway.yaml +++ b/examples-generated/cosmosdb/v1beta1/sqldedicatedgateway.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/cosmosdb/v1beta1/sqlfunction.yaml b/examples-generated/cosmosdb/v1beta1/sqlfunction.yaml index 802659401..4e68d62c6 100644 --- a/examples-generated/cosmosdb/v1beta1/sqlfunction.yaml +++ b/examples-generated/cosmosdb/v1beta1/sqlfunction.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: SQLContainer metadata: annotations: @@ -38,7 +38,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: SQLDatabase metadata: annotations: diff --git a/examples-generated/cosmosdb/v1beta1/sqlroleassignment.yaml b/examples-generated/cosmosdb/v1beta1/sqlroleassignment.yaml index 980e033ee..e7f668a7c 100644 --- a/examples-generated/cosmosdb/v1beta1/sqlroleassignment.yaml +++ b/examples-generated/cosmosdb/v1beta1/sqlroleassignment.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/cosmosdb/v1beta1/sqlroledefinition.yaml b/examples-generated/cosmosdb/v1beta1/sqlroledefinition.yaml index 9222f71d2..2f6aadc67 100644 --- a/examples-generated/cosmosdb/v1beta1/sqlroledefinition.yaml +++ b/examples-generated/cosmosdb/v1beta1/sqlroledefinition.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/cosmosdb/v1beta1/sqlstoredprocedure.yaml b/examples-generated/cosmosdb/v1beta1/sqlstoredprocedure.yaml index 6441b9af0..57e4801af 100644 --- a/examples-generated/cosmosdb/v1beta1/sqlstoredprocedure.yaml +++ b/examples-generated/cosmosdb/v1beta1/sqlstoredprocedure.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: SQLContainer metadata: annotations: @@ -48,7 +48,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: SQLDatabase metadata: annotations: diff --git a/examples-generated/cosmosdb/v1beta1/sqltrigger.yaml b/examples-generated/cosmosdb/v1beta1/sqltrigger.yaml index e07452074..05e008d7d 100644 --- a/examples-generated/cosmosdb/v1beta1/sqltrigger.yaml +++ b/examples-generated/cosmosdb/v1beta1/sqltrigger.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: SQLContainer metadata: annotations: @@ -40,7 +40,7 @@ spec: --- -apiVersion: cosmosdb.azure.upbound.io/v1beta1 +apiVersion: cosmosdb.azure.upbound.io/v1beta2 kind: SQLDatabase metadata: annotations: diff --git a/examples-generated/cosmosdb/v1beta2/account.yaml b/examples-generated/cosmosdb/v1beta2/account.yaml new file mode 100644 index 000000000..fed0949ef --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/account.yaml @@ -0,0 +1,45 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/account + labels: + testing.upbound.io/example-name: db + name: db +spec: + forProvider: + capabilities: + - name: EnableAggregationPipeline + - name: mongoEnableDocLevelTTL + - name: MongoDBv3.4 + - name: EnableMongo + consistencyPolicy: + - consistencyLevel: BoundedStaleness + maxIntervalInSeconds: 300 + maxStalenessPrefix: 100000 + enableAutomaticFailover: true + geoLocation: + - failoverPriority: 1 + location: eastus + - failoverPriority: 0 + location: westus + kind: MongoDB + location: West Europe + offerType: Standard + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cosmosdb/v1beta2/cassandracluster.yaml b/examples-generated/cosmosdb/v1beta2/cassandracluster.yaml new file mode 100644 index 000000000..917903098 --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/cassandracluster.yaml @@ -0,0 +1,91 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: CassandraCluster +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandracluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + defaultAdminPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + delegatedManagementSubnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandracluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: authorization.azure.upbound.io/v1beta1 +kind: RoleAssignment +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandracluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + principalId: ${data.azuread_service_principal.example.object_id} + roleDefinitionName: Network Contributor + scope: ${azurerm_virtual_network.example.id} + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandracluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandracluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/cosmosdb/v1beta2/cassandrakeyspace.yaml b/examples-generated/cosmosdb/v1beta2/cassandrakeyspace.yaml new file mode 100644 index 000000000..ad1790e18 --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/cassandrakeyspace.yaml @@ -0,0 +1,56 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: CassandraKeySpace +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandrakeyspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + throughput: 400 + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandrakeyspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capabilities: + - name: EnableCassandra + consistencyPolicy: + - consistencyLevel: Strong + geoLocation: + - failoverPriority: 0 + location: West Europe + location: West Europe + offerType: Standard + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandrakeyspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cosmosdb/v1beta2/cassandratable.yaml b/examples-generated/cosmosdb/v1beta2/cassandratable.yaml new file mode 100644 index 000000000..9387722f4 --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/cassandratable.yaml @@ -0,0 +1,80 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: CassandraTable +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandratable + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cassandraKeyspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + schema: + - column: + - name: test1 + type: ascii + - name: test2 + type: int + partitionKey: + - name: test1 + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandratable + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capabilities: + - name: EnableCassandra + consistencyPolicy: + - consistencyLevel: Strong + geoLocation: + - failoverPriority: 0 + location: West Europe + location: West Europe + offerType: Standard + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: CassandraKeySpace +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandratable + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + throughput: 400 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/cassandratable + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/cosmosdb/v1beta2/gremlindatabase.yaml b/examples-generated/cosmosdb/v1beta2/gremlindatabase.yaml new file mode 100644 index 000000000..fd7ce8bc1 --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/gremlindatabase.yaml @@ -0,0 +1,17 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: GremlinDatabase +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/gremlindatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + throughput: 400 diff --git a/examples-generated/cosmosdb/v1beta2/gremlingraph.yaml b/examples-generated/cosmosdb/v1beta2/gremlingraph.yaml new file mode 100644 index 000000000..a73a0db4e --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/gremlingraph.yaml @@ -0,0 +1,54 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: GremlinGraph +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/gremlingraph + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + conflictResolutionPolicy: + - conflictResolutionPath: /_ts + mode: LastWriterWins + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + indexPolicy: + - automatic: true + excludedPaths: + - /"_etag"/? + includedPaths: + - /* + indexingMode: consistent + partitionKeyPath: /Example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + throughput: 400 + uniqueKey: + - paths: + - /definition/id1 + - /definition/id2 + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: GremlinDatabase +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/gremlingraph + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account diff --git a/examples-generated/cosmosdb/v1beta2/mongocollection.yaml b/examples-generated/cosmosdb/v1beta2/mongocollection.yaml new file mode 100644 index 000000000..15e4c4972 --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/mongocollection.yaml @@ -0,0 +1,45 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: MongoCollection +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/mongocollection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + defaultTtlSeconds: "777" + index: + - keys: + - _id + unique: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + shardKey: uniqueKey + throughput: 400 + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: MongoDatabase +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/mongocollection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account diff --git a/examples-generated/cosmosdb/v1beta2/mongodatabase.yaml b/examples-generated/cosmosdb/v1beta2/mongodatabase.yaml new file mode 100644 index 000000000..a399058ab --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/mongodatabase.yaml @@ -0,0 +1,17 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: MongoDatabase +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/mongodatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + throughput: 400 diff --git a/examples-generated/cosmosdb/v1beta2/sqlcontainer.yaml b/examples-generated/cosmosdb/v1beta2/sqlcontainer.yaml new file mode 100644 index 000000000..e43411fcc --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/sqlcontainer.yaml @@ -0,0 +1,52 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: SQLContainer +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/sqlcontainer + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + indexingPolicy: + - excludedPath: + - path: /excluded/? + includedPath: + - path: /* + - path: /included/? + indexingMode: consistent + partitionKeyPath: /definition/id + partitionKeyVersion: 1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + throughput: 400 + uniqueKey: + - paths: + - /definition/idlong + - /definition/idshort + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: SQLDatabase +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/sqlcontainer + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account diff --git a/examples-generated/cosmosdb/v1beta2/sqldatabase.yaml b/examples-generated/cosmosdb/v1beta2/sqldatabase.yaml new file mode 100644 index 000000000..ced1c294c --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/sqldatabase.yaml @@ -0,0 +1,17 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: SQLDatabase +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/sqldatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + throughput: 400 diff --git a/examples-generated/cosmosdb/v1beta2/table.yaml b/examples-generated/cosmosdb/v1beta2/table.yaml new file mode 100644 index 000000000..d59d0556b --- /dev/null +++ b/examples-generated/cosmosdb/v1beta2/table.yaml @@ -0,0 +1,17 @@ +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: Table +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/table + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_cosmosdb_account + throughput: 400 diff --git a/examples-generated/costmanagement/v1beta2/resourcegroupcostmanagementexport.yaml b/examples-generated/costmanagement/v1beta2/resourcegroupcostmanagementexport.yaml new file mode 100644 index 000000000..7081c6851 --- /dev/null +++ b/examples-generated/costmanagement/v1beta2/resourcegroupcostmanagementexport.yaml @@ -0,0 +1,73 @@ +apiVersion: costmanagement.azure.upbound.io/v1beta2 +kind: ResourceGroupCostManagementExport +metadata: + annotations: + meta.upbound.io/example-id: costmanagement/v1beta2/resourcegroupcostmanagementexport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + exportDataOptions: + - timeFrame: WeekToDate + type: Usage + exportDataStorageLocation: + - containerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + rootFolderPath: /root/updated + recurrencePeriodEndDate: "2020-09-18T00:00:00Z" + recurrencePeriodStartDate: "2020-08-18T00:00:00Z" + recurrenceType: Monthly + resourceGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: costmanagement/v1beta2/resourcegroupcostmanagementexport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: costmanagement/v1beta2/resourcegroupcostmanagementexport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: costmanagement/v1beta2/resourcegroupcostmanagementexport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/costmanagement/v1beta2/subscriptioncostmanagementexport.yaml b/examples-generated/costmanagement/v1beta2/subscriptioncostmanagementexport.yaml new file mode 100644 index 000000000..b7e9b2ea8 --- /dev/null +++ b/examples-generated/costmanagement/v1beta2/subscriptioncostmanagementexport.yaml @@ -0,0 +1,74 @@ +apiVersion: costmanagement.azure.upbound.io/v1beta2 +kind: SubscriptionCostManagementExport +metadata: + annotations: + meta.upbound.io/example-id: costmanagement/v1beta2/subscriptioncostmanagementexport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + exportDataOptions: + - timeFrame: WeekToDate + type: Usage + exportDataStorageLocation: + - containerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + rootFolderPath: /root/updated + name: example + recurrencePeriodEndDate: "2020-09-18T00:00:00Z" + recurrencePeriodStartDate: "2020-08-18T00:00:00Z" + recurrenceType: Monthly + subscriptionIdSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_subscription + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: costmanagement/v1beta2/subscriptioncostmanagementexport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: costmanagement/v1beta2/subscriptioncostmanagementexport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: costmanagement/v1beta2/subscriptioncostmanagementexport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/databricks/v1beta1/workspacerootdbfscustomermanagedkey.yaml b/examples-generated/databricks/v1beta1/workspacerootdbfscustomermanagedkey.yaml index 37397617c..d4c5f89fb 100644 --- a/examples-generated/databricks/v1beta1/workspacerootdbfscustomermanagedkey.yaml +++ b/examples-generated/databricks/v1beta1/workspacerootdbfscustomermanagedkey.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: databricks.azure.upbound.io/v1beta1 +apiVersion: databricks.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: @@ -38,7 +38,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -116,7 +116,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Key metadata: annotations: diff --git a/examples-generated/databricks/v1beta2/accessconnector.yaml b/examples-generated/databricks/v1beta2/accessconnector.yaml new file mode 100644 index 000000000..78b78a320 --- /dev/null +++ b/examples-generated/databricks/v1beta2/accessconnector.yaml @@ -0,0 +1,32 @@ +apiVersion: databricks.azure.upbound.io/v1beta2 +kind: AccessConnector +metadata: + annotations: + meta.upbound.io/example-id: databricks/v1beta2/accessconnector + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: databricks/v1beta2/accessconnector + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/databricks/v1beta2/workspace.yaml b/examples-generated/databricks/v1beta2/workspace.yaml new file mode 100644 index 000000000..6c337d589 --- /dev/null +++ b/examples-generated/databricks/v1beta2/workspace.yaml @@ -0,0 +1,31 @@ +apiVersion: databricks.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: databricks/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: standard + tags: + Environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: databricks/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta1/datasetazureblob.yaml b/examples-generated/datafactory/v1beta1/datasetazureblob.yaml index d9d22f5dc..13ccf3c9a 100644 --- a/examples-generated/datafactory/v1beta1/datasetazureblob.yaml +++ b/examples-generated/datafactory/v1beta1/datasetazureblob.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -36,7 +36,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: LinkedServiceAzureBlobStorage metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/datasetcosmosdbsqlapi.yaml b/examples-generated/datafactory/v1beta1/datasetcosmosdbsqlapi.yaml index 4a7333e57..7481f1a44 100644 --- a/examples-generated/datafactory/v1beta1/datasetcosmosdbsqlapi.yaml +++ b/examples-generated/datafactory/v1beta1/datasetcosmosdbsqlapi.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/datasethttp.yaml b/examples-generated/datafactory/v1beta1/datasethttp.yaml index 9e08f460c..635e8f37b 100644 --- a/examples-generated/datafactory/v1beta1/datasethttp.yaml +++ b/examples-generated/datafactory/v1beta1/datasethttp.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/datasetmysql.yaml b/examples-generated/datafactory/v1beta1/datasetmysql.yaml index 8e952b0d0..6176ce341 100644 --- a/examples-generated/datafactory/v1beta1/datasetmysql.yaml +++ b/examples-generated/datafactory/v1beta1/datasetmysql.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/datasetpostgresql.yaml b/examples-generated/datafactory/v1beta1/datasetpostgresql.yaml index d881c2994..19e386682 100644 --- a/examples-generated/datafactory/v1beta1/datasetpostgresql.yaml +++ b/examples-generated/datafactory/v1beta1/datasetpostgresql.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/datasetsnowflake.yaml b/examples-generated/datafactory/v1beta1/datasetsnowflake.yaml index e7680de4f..b908c15e8 100644 --- a/examples-generated/datafactory/v1beta1/datasetsnowflake.yaml +++ b/examples-generated/datafactory/v1beta1/datasetsnowflake.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -36,7 +36,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: LinkedServiceSnowflake metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/datasetsqlservertable.yaml b/examples-generated/datafactory/v1beta1/datasetsqlservertable.yaml index 7add05736..5202d2410 100644 --- a/examples-generated/datafactory/v1beta1/datasetsqlservertable.yaml +++ b/examples-generated/datafactory/v1beta1/datasetsqlservertable.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -34,7 +34,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: LinkedServiceSQLServer metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/integrationruntimeazure.yaml b/examples-generated/datafactory/v1beta1/integrationruntimeazure.yaml index 29bb9b956..47d862099 100644 --- a/examples-generated/datafactory/v1beta1/integrationruntimeazure.yaml +++ b/examples-generated/datafactory/v1beta1/integrationruntimeazure.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/integrationruntimeselfhosted.yaml b/examples-generated/datafactory/v1beta1/integrationruntimeselfhosted.yaml index acb8f3479..8282ea24f 100644 --- a/examples-generated/datafactory/v1beta1/integrationruntimeselfhosted.yaml +++ b/examples-generated/datafactory/v1beta1/integrationruntimeselfhosted.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedserviceazuresearch.yaml b/examples-generated/datafactory/v1beta1/linkedserviceazuresearch.yaml index 766cafcd1..18500ab50 100644 --- a/examples-generated/datafactory/v1beta1/linkedserviceazuresearch.yaml +++ b/examples-generated/datafactory/v1beta1/linkedserviceazuresearch.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -49,7 +49,7 @@ spec: --- -apiVersion: search.azure.upbound.io/v1beta1 +apiVersion: search.azure.upbound.io/v1beta2 kind: Service metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedserviceazuretablestorage.yaml b/examples-generated/datafactory/v1beta1/linkedserviceazuretablestorage.yaml index 6eba914db..bb889f23d 100644 --- a/examples-generated/datafactory/v1beta1/linkedserviceazuretablestorage.yaml +++ b/examples-generated/datafactory/v1beta1/linkedserviceazuretablestorage.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedservicecosmosdb.yaml b/examples-generated/datafactory/v1beta1/linkedservicecosmosdb.yaml index 5e1368e0b..e2d266085 100644 --- a/examples-generated/datafactory/v1beta1/linkedservicecosmosdb.yaml +++ b/examples-generated/datafactory/v1beta1/linkedservicecosmosdb.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedservicecosmosdbmongoapi.yaml b/examples-generated/datafactory/v1beta1/linkedservicecosmosdbmongoapi.yaml index 7bc37422d..cbb4a5bf0 100644 --- a/examples-generated/datafactory/v1beta1/linkedservicecosmosdbmongoapi.yaml +++ b/examples-generated/datafactory/v1beta1/linkedservicecosmosdbmongoapi.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedservicedatalakestoragegen2.yaml b/examples-generated/datafactory/v1beta1/linkedservicedatalakestoragegen2.yaml index 661b10a6e..32916cc03 100644 --- a/examples-generated/datafactory/v1beta1/linkedservicedatalakestoragegen2.yaml +++ b/examples-generated/datafactory/v1beta1/linkedservicedatalakestoragegen2.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedservicekeyvault.yaml b/examples-generated/datafactory/v1beta1/linkedservicekeyvault.yaml index 8b6c21b24..654f06035 100644 --- a/examples-generated/datafactory/v1beta1/linkedservicekeyvault.yaml +++ b/examples-generated/datafactory/v1beta1/linkedservicekeyvault.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -34,7 +34,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedservicekusto.yaml b/examples-generated/datafactory/v1beta1/linkedservicekusto.yaml index f1672a31a..ccb05194e 100644 --- a/examples-generated/datafactory/v1beta1/linkedservicekusto.yaml +++ b/examples-generated/datafactory/v1beta1/linkedservicekusto.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -40,7 +40,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedservicemysql.yaml b/examples-generated/datafactory/v1beta1/linkedservicemysql.yaml index 5cc460d88..2ede151ee 100644 --- a/examples-generated/datafactory/v1beta1/linkedservicemysql.yaml +++ b/examples-generated/datafactory/v1beta1/linkedservicemysql.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedservicepostgresql.yaml b/examples-generated/datafactory/v1beta1/linkedservicepostgresql.yaml index 8eff98341..5504516d0 100644 --- a/examples-generated/datafactory/v1beta1/linkedservicepostgresql.yaml +++ b/examples-generated/datafactory/v1beta1/linkedservicepostgresql.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedservicesftp.yaml b/examples-generated/datafactory/v1beta1/linkedservicesftp.yaml index 00e9d5d3d..eb1555366 100644 --- a/examples-generated/datafactory/v1beta1/linkedservicesftp.yaml +++ b/examples-generated/datafactory/v1beta1/linkedservicesftp.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/linkedserviceweb.yaml b/examples-generated/datafactory/v1beta1/linkedserviceweb.yaml index 38b1fd31e..96eae3ab1 100644 --- a/examples-generated/datafactory/v1beta1/linkedserviceweb.yaml +++ b/examples-generated/datafactory/v1beta1/linkedserviceweb.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/managedprivateendpoint.yaml b/examples-generated/datafactory/v1beta1/managedprivateendpoint.yaml index 93a95daee..c07b78ca7 100644 --- a/examples-generated/datafactory/v1beta1/managedprivateendpoint.yaml +++ b/examples-generated/datafactory/v1beta1/managedprivateendpoint.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -51,7 +51,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/pipeline.yaml b/examples-generated/datafactory/v1beta1/pipeline.yaml index 884eafb02..cafccd325 100644 --- a/examples-generated/datafactory/v1beta1/pipeline.yaml +++ b/examples-generated/datafactory/v1beta1/pipeline.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/triggerblobevent.yaml b/examples-generated/datafactory/v1beta1/triggerblobevent.yaml index 95586d438..ac74ff4c3 100644 --- a/examples-generated/datafactory/v1beta1/triggerblobevent.yaml +++ b/examples-generated/datafactory/v1beta1/triggerblobevent.yaml @@ -37,7 +37,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -84,7 +84,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/datafactory/v1beta1/triggercustomevent.yaml b/examples-generated/datafactory/v1beta1/triggercustomevent.yaml index 0a72b370e..95cb821dc 100644 --- a/examples-generated/datafactory/v1beta1/triggercustomevent.yaml +++ b/examples-generated/datafactory/v1beta1/triggercustomevent.yaml @@ -36,7 +36,7 @@ spec: --- -apiVersion: datafactory.azure.upbound.io/v1beta1 +apiVersion: datafactory.azure.upbound.io/v1beta2 kind: Factory metadata: annotations: @@ -69,7 +69,7 @@ spec: --- -apiVersion: eventgrid.azure.upbound.io/v1beta1 +apiVersion: eventgrid.azure.upbound.io/v1beta2 kind: Topic metadata: annotations: diff --git a/examples-generated/datafactory/v1beta2/customdataset.yaml b/examples-generated/datafactory/v1beta2/customdataset.yaml new file mode 100644 index 000000000..72a58156e --- /dev/null +++ b/examples-generated/datafactory/v1beta2/customdataset.yaml @@ -0,0 +1,153 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: CustomDataSet +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/customdataset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + additionalProperties: + bar: test2 + foo: test1 + annotations: + - test1 + - test2 + - test3 + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + description: test description + folder: testFolder + linkedService: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: example + parameters: + key1: value1 + parameters: + Bar: Test2 + foo: test1 + schemaJson: | + { + "type": "object", + "properties": { + "name": { + "type": "object", + "properties": { + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + } + } + }, + "age": { + "type": "integer" + } + } + } + type: Json + typePropertiesJson: | + { + "location": { + "container":"${azurerm_storage_container.example.name}", + "fileName":"foo.txt", + "folderPath": "foo/bar/", + "type":"AzureBlobStorageLocation" + }, + "encodingName":"UTF-8" + } + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/customdataset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedCustomService +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/customdataset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + type: AzureBlobStorage + typePropertiesJson: | + { + "connectionString":"${azurerm_storage_account.example.primary_connection_string}" + } + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/customdataset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/customdataset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: BlobStorage + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/customdataset + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/datafactory/v1beta2/dataflow.yaml b/examples-generated/datafactory/v1beta2/dataflow.yaml new file mode 100644 index 000000000..9827ba119 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/dataflow.yaml @@ -0,0 +1,156 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: DataFlow +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/dataflow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + script: "source(\n allowSchemaDrift: true, \n validateSchema: false, \n limit: + 100, \n ignoreNoFilesFound: false, \n documentForm: 'documentPerLine') ~> + source1 \nsource1 sink(\n allowSchemaDrift: true, \n validateSchema: false, + \n skipDuplicateMapInputs: true, \n skipDuplicateMapOutputs: true) ~> sink1\n" + sink: + - dataset: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: example2 + flowlet: + - name: ${azurerm_data_factory_flowlet_data_flow.example2.name} + parameters: + Key1: value1 + name: sink1 + source: + - dataset: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: example1 + flowlet: + - name: ${azurerm_data_factory_flowlet_data_flow.example1.name} + parameters: + Key1: value1 + name: source1 + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/dataflow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: DataSetJSON +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/dataflow + labels: + testing.upbound.io/example-name: example1 + name: example1 +spec: + forProvider: + azureBlobStorageLocation: + - container: container + filename: foo.txt + path: foo/bar/ + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + encoding: UTF-8 + linkedServiceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: DataSetJSON +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/dataflow + labels: + testing.upbound.io/example-name: example2 + name: example2 +spec: + forProvider: + azureBlobStorageLocation: + - container: container + filename: bar.txt + path: foo/bar/ + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + encoding: UTF-8 + linkedServiceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedCustomService +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/dataflow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + type: AzureBlobStorage + typePropertiesJson: | + { + "connectionString": "${azurerm_storage_account.example.primary_connection_string}" + } + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/dataflow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/dataflow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/datafactory/v1beta2/datasetbinary.yaml b/examples-generated/datafactory/v1beta2/datasetbinary.yaml new file mode 100644 index 000000000..df9b75593 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/datasetbinary.yaml @@ -0,0 +1,74 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: DataSetBinary +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetbinary + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + linkedServiceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sftpServerLocation: + - filename: '**' + path: /test/ + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetbinary + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta1 +kind: LinkedServiceSFTP +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetbinary + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authenticationType: Basic + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + host: http://www.bing.com + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + port: 22 + username: foo + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetbinary + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/datasetdelimitedtext.yaml b/examples-generated/datafactory/v1beta2/datasetdelimitedtext.yaml new file mode 100644 index 000000000..245eec673 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/datasetdelimitedtext.yaml @@ -0,0 +1,76 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: DataSetDelimitedText +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetdelimitedtext + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + columnDelimiter: ',' + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + encoding: UTF-8 + escapeCharacter: f + firstRowAsHeader: true + httpServerLocation: + - filename: fizz.txt + path: foo/bar/ + relativeUrl: http://www.bing.com + linkedServiceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + nullValue: "NULL" + quoteCharacter: x + rowDelimiter: NEW + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetdelimitedtext + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta1 +kind: LinkedServiceWeb +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetdelimitedtext + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authenticationType: Anonymous + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + url: https://www.bing.com + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetdelimitedtext + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/datasetjson.yaml b/examples-generated/datafactory/v1beta2/datasetjson.yaml new file mode 100644 index 000000000..6d22fc208 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/datasetjson.yaml @@ -0,0 +1,70 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: DataSetJSON +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetjson + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + encoding: UTF-8 + httpServerLocation: + - filename: foo.txt + path: foo/bar/ + relativeUrl: /fizz/buzz/ + linkedServiceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetjson + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta1 +kind: LinkedServiceWeb +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetjson + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authenticationType: Anonymous + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + url: https://www.bing.com + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetjson + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/datasetparquet.yaml b/examples-generated/datafactory/v1beta2/datasetparquet.yaml new file mode 100644 index 000000000..3213230cc --- /dev/null +++ b/examples-generated/datafactory/v1beta2/datasetparquet.yaml @@ -0,0 +1,69 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: DataSetParquet +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetparquet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + httpServerLocation: + - filename: fizz.txt + path: foo/bar/ + relativeUrl: http://www.bing.com + linkedServiceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetparquet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta1 +kind: LinkedServiceWeb +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetparquet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authenticationType: Anonymous + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + url: https://www.bing.com + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/datasetparquet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/factory.yaml b/examples-generated/datafactory/v1beta2/factory.yaml new file mode 100644 index 000000000..b439f13d9 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/factory.yaml @@ -0,0 +1,28 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/factory + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/factory + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/integrationruntimeazuressis.yaml b/examples-generated/datafactory/v1beta2/integrationruntimeazuressis.yaml new file mode 100644 index 000000000..6094d4372 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/integrationruntimeazuressis.yaml @@ -0,0 +1,46 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: IntegrationRuntimeAzureSSIS +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/integrationruntimeazuressis + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + nodeSize: Standard_D8_v3 + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/integrationruntimeazuressis + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/integrationruntimeazuressis + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/integrationruntimemanaged.yaml b/examples-generated/datafactory/v1beta2/integrationruntimemanaged.yaml new file mode 100644 index 000000000..d6f37ad75 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/integrationruntimemanaged.yaml @@ -0,0 +1,46 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: IntegrationRuntimeManaged +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/integrationruntimemanaged + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + nodeSize: Standard_D8_v3 + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/integrationruntimemanaged + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/integrationruntimemanaged + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedcustomservice.yaml b/examples-generated/datafactory/v1beta2/linkedcustomservice.yaml new file mode 100644 index 000000000..b67aa0ce2 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedcustomservice.yaml @@ -0,0 +1,79 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedCustomService +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedcustomservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + annotations: + - test1 + - test2 + - test3 + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + description: test description + parameters: + Env: Test + foo: bar + type: AzureBlobStorage + typePropertiesJson: | + { + "connectionString":"${azurerm_storage_account.example.primary_connection_string}" + } + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedcustomservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedcustomservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedcustomservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: BlobStorage + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/datafactory/v1beta2/linkedserviceazureblobstorage.yaml b/examples-generated/datafactory/v1beta2/linkedserviceazureblobstorage.yaml new file mode 100644 index 000000000..cb7d3494e --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedserviceazureblobstorage.yaml @@ -0,0 +1,48 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceAzureBlobStorage +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazureblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionStringSecretRef: + key: attribute.example.primary_connection_string + name: example- + namespace: upbound-system + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazureblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazureblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedserviceazuredatabricks.yaml b/examples-generated/datafactory/v1beta2/linkedserviceazuredatabricks.yaml new file mode 100644 index 000000000..10c258c8b --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedserviceazuredatabricks.yaml @@ -0,0 +1,88 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceAzureDatabricks +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazuredatabricks + labels: + testing.upbound.io/example-name: msi_linked + name: msi-linked +spec: + forProvider: + adbDomain: https://${azurerm_databricks_workspace.example.workspace_url} + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + description: ADB Linked Service via MSI + msiWorkSpaceResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + newClusterConfig: + - clusterVersion: 5.5.x-gpu-scala2.11 + customTags: + custom_tag1: sct_value_1 + custom_tag2: sct_value_2 + driverNodeType: Standard_NC12 + initScripts: + - init.sh + - init2.sh + logDestination: dbfs:/logs + maxNumberOfWorkers: 5 + minNumberOfWorkers: 1 + nodeType: Standard_NC12 + sparkConfig: + config1: value1 + config2: value2 + sparkEnvironmentVariables: + envVar1: value1 + envVar2: value2 + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazuredatabricks + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: East US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: databricks.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazuredatabricks + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: East US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazuredatabricks + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: East US diff --git a/examples-generated/datafactory/v1beta2/linkedserviceazurefilestorage.yaml b/examples-generated/datafactory/v1beta2/linkedserviceazurefilestorage.yaml new file mode 100644 index 000000000..6546b9731 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedserviceazurefilestorage.yaml @@ -0,0 +1,48 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceAzureFileStorage +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazurefilestorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionStringSecretRef: + key: attribute.example.primary_connection_string + name: example- + namespace: upbound-system + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazurefilestorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazurefilestorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedserviceazurefunction.yaml b/examples-generated/datafactory/v1beta2/linkedserviceazurefunction.yaml new file mode 100644 index 000000000..a44bdc26c --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedserviceazurefunction.yaml @@ -0,0 +1,49 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceAzureFunction +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazurefunction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + keySecretRef: + key: example-key + name: example-secret + namespace: upbound-system + url: https://${data.azurerm_function_app.example.default_hostname} + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazurefunction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazurefunction + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedserviceazuresqldatabase.yaml b/examples-generated/datafactory/v1beta2/linkedserviceazuresqldatabase.yaml new file mode 100644 index 000000000..deadf6568 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedserviceazuresqldatabase.yaml @@ -0,0 +1,46 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceAzureSQLDatabase +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazuresqldatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionString: data source=serverhostname;initial catalog=master;user id=testUser;Password=test;integrated + security=False;encrypt=True;connection timeout=30 + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazuresqldatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceazuresqldatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedserviceodata.yaml b/examples-generated/datafactory/v1beta2/linkedserviceodata.yaml new file mode 100644 index 000000000..51aa26d54 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedserviceodata.yaml @@ -0,0 +1,45 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceOData +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceodata + labels: + testing.upbound.io/example-name: anonymous + name: anonymous +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + url: https://services.odata.org/v4/TripPinServiceRW/People + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceodata + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceodata + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedserviceodbc.yaml b/examples-generated/datafactory/v1beta2/linkedserviceodbc.yaml new file mode 100644 index 000000000..2d01bfff4 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedserviceodbc.yaml @@ -0,0 +1,45 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceOdbc +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceodbc + labels: + testing.upbound.io/example-name: anonymous + name: anonymous +spec: + forProvider: + connectionString: Driver={SQL Server};Server=test;Database=test;Uid=test;Pwd=test; + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceodbc + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedserviceodbc + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedservicesnowflake.yaml b/examples-generated/datafactory/v1beta2/linkedservicesnowflake.yaml new file mode 100644 index 000000000..e44869d29 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedservicesnowflake.yaml @@ -0,0 +1,45 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceSnowflake +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesnowflake + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionString: jdbc:snowflake://account.region.snowflakecomputing.com/?user=user&db=db&warehouse=wh + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesnowflake + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesnowflake + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedservicesqlserver.yaml b/examples-generated/datafactory/v1beta2/linkedservicesqlserver.yaml new file mode 100644 index 000000000..72aeecf51 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedservicesqlserver.yaml @@ -0,0 +1,46 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceSQLServer +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesqlserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionString: Integrated Security=False;Data Source=test;Initial Catalog=test;User + ID=test;Password=test + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesqlserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesqlserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/linkedservicesynapse.yaml b/examples-generated/datafactory/v1beta2/linkedservicesynapse.yaml new file mode 100644 index 000000000..8a16dd662 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/linkedservicesynapse.yaml @@ -0,0 +1,46 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: LinkedServiceSynapse +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesynapse + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionString: Integrated Security=False;Data Source=test;Initial Catalog=test;User + ID=test;Password=test + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesynapse + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/linkedservicesynapse + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datafactory/v1beta2/triggerschedule.yaml b/examples-generated/datafactory/v1beta2/triggerschedule.yaml new file mode 100644 index 000000000..e527ba5e4 --- /dev/null +++ b/examples-generated/datafactory/v1beta2/triggerschedule.yaml @@ -0,0 +1,65 @@ +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: TriggerSchedule +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/triggerschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + frequency: Day + interval: 5 + pipelineNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta2 +kind: Factory +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/triggerschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: datafactory.azure.upbound.io/v1beta1 +kind: Pipeline +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/triggerschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataFactoryIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datafactory/v1beta2/triggerschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datamigration/v1beta1/databasemigrationproject.yaml b/examples-generated/datamigration/v1beta1/databasemigrationproject.yaml index b25bea2dd..b219a05d4 100644 --- a/examples-generated/datamigration/v1beta1/databasemigrationproject.yaml +++ b/examples-generated/datamigration/v1beta1/databasemigrationproject.yaml @@ -57,7 +57,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -78,7 +78,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/datamigration/v1beta1/databasemigrationservice.yaml b/examples-generated/datamigration/v1beta1/databasemigrationservice.yaml index 85a0de2b4..dbc7980d5 100644 --- a/examples-generated/datamigration/v1beta1/databasemigrationservice.yaml +++ b/examples-generated/datamigration/v1beta1/databasemigrationservice.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -55,7 +55,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/dataprotection/v1beta1/backupinstanceblobstorage.yaml b/examples-generated/dataprotection/v1beta1/backupinstanceblobstorage.yaml index c8d9ff043..76181f1ee 100644 --- a/examples-generated/dataprotection/v1beta1/backupinstanceblobstorage.yaml +++ b/examples-generated/dataprotection/v1beta1/backupinstanceblobstorage.yaml @@ -38,7 +38,7 @@ spec: --- -apiVersion: dataprotection.azure.upbound.io/v1beta1 +apiVersion: dataprotection.azure.upbound.io/v1beta2 kind: BackupVault metadata: annotations: @@ -89,7 +89,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/dataprotection/v1beta1/backupinstancedisk.yaml b/examples-generated/dataprotection/v1beta1/backupinstancedisk.yaml index ce0eaf872..64d93403a 100644 --- a/examples-generated/dataprotection/v1beta1/backupinstancedisk.yaml +++ b/examples-generated/dataprotection/v1beta1/backupinstancedisk.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: dataprotection.azure.upbound.io/v1beta1 +apiVersion: dataprotection.azure.upbound.io/v1beta2 kind: BackupPolicyDisk metadata: annotations: @@ -43,7 +43,7 @@ spec: --- -apiVersion: dataprotection.azure.upbound.io/v1beta1 +apiVersion: dataprotection.azure.upbound.io/v1beta2 kind: BackupVault metadata: annotations: @@ -64,7 +64,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: ManagedDisk metadata: annotations: diff --git a/examples-generated/dataprotection/v1beta1/backupinstancepostgresql.yaml b/examples-generated/dataprotection/v1beta1/backupinstancepostgresql.yaml index 58b45a1ae..ec8523905 100644 --- a/examples-generated/dataprotection/v1beta1/backupinstancepostgresql.yaml +++ b/examples-generated/dataprotection/v1beta1/backupinstancepostgresql.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: dataprotection.azure.upbound.io/v1beta1 +apiVersion: dataprotection.azure.upbound.io/v1beta2 kind: BackupPolicyPostgreSQL metadata: annotations: @@ -46,7 +46,7 @@ spec: --- -apiVersion: dataprotection.azure.upbound.io/v1beta1 +apiVersion: dataprotection.azure.upbound.io/v1beta2 kind: BackupVault metadata: annotations: @@ -67,7 +67,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -150,7 +150,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dataprotection/v1beta1/backuppolicyblobstorage.yaml b/examples-generated/dataprotection/v1beta1/backuppolicyblobstorage.yaml index 54389f60c..76ca32556 100644 --- a/examples-generated/dataprotection/v1beta1/backuppolicyblobstorage.yaml +++ b/examples-generated/dataprotection/v1beta1/backuppolicyblobstorage.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: dataprotection.azure.upbound.io/v1beta1 +apiVersion: dataprotection.azure.upbound.io/v1beta2 kind: BackupVault metadata: annotations: diff --git a/examples-generated/dataprotection/v1beta2/backuppolicydisk.yaml b/examples-generated/dataprotection/v1beta2/backuppolicydisk.yaml new file mode 100644 index 000000000..b5b3b7a42 --- /dev/null +++ b/examples-generated/dataprotection/v1beta2/backuppolicydisk.yaml @@ -0,0 +1,61 @@ +apiVersion: dataprotection.azure.upbound.io/v1beta2 +kind: BackupPolicyDisk +metadata: + annotations: + meta.upbound.io/example-id: dataprotection/v1beta2/backuppolicydisk + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + backupRepeatingTimeIntervals: + - R/2021-05-19T06:33:16+00:00/PT4H + defaultRetentionDuration: P7D + retentionRule: + - criteria: + - absoluteCriteria: FirstOfDay + duration: P7D + name: Daily + priority: 25 + - criteria: + - absoluteCriteria: FirstOfWeek + duration: P7D + name: Weekly + priority: 20 + timeZone: W. Europe Standard Time + vaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: dataprotection.azure.upbound.io/v1beta2 +kind: BackupVault +metadata: + annotations: + meta.upbound.io/example-id: dataprotection/v1beta2/backuppolicydisk + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + datastoreType: VaultStore + location: West Europe + redundancy: LocallyRedundant + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: dataprotection/v1beta2/backuppolicydisk + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/dataprotection/v1beta2/backuppolicypostgresql.yaml b/examples-generated/dataprotection/v1beta2/backuppolicypostgresql.yaml new file mode 100644 index 000000000..054574fc1 --- /dev/null +++ b/examples-generated/dataprotection/v1beta2/backuppolicypostgresql.yaml @@ -0,0 +1,78 @@ +apiVersion: dataprotection.azure.upbound.io/v1beta2 +kind: BackupPolicyPostgreSQL +metadata: + annotations: + meta.upbound.io/example-id: dataprotection/v1beta2/backuppolicypostgresql + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + backupRepeatingTimeIntervals: + - R/2021-05-23T02:30:00+00:00/P1W + defaultRetentionDuration: P4M + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionRule: + - criteria: + - absoluteCriteria: FirstOfWeek + duration: P6M + name: weekly + priority: 20 + - criteria: + - daysOfWeek: + - Thursday + scheduledBackupTimes: + - "2021-05-23T02:30:00Z" + duration: P1W + name: thursday + priority: 25 + - criteria: + - daysOfWeek: + - Tuesday + scheduledBackupTimes: + - "2021-05-23T02:30:00Z" + weeksOfMonth: + - First + - Last + duration: P1D + name: monthly + priority: 15 + timeZone: India Standard Time + vaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: dataprotection.azure.upbound.io/v1beta2 +kind: BackupVault +metadata: + annotations: + meta.upbound.io/example-id: dataprotection/v1beta2/backuppolicypostgresql + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + datastoreType: VaultStore + location: West Europe + redundancy: LocallyRedundant + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: dataprotection/v1beta2/backuppolicypostgresql + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/dataprotection/v1beta2/backupvault.yaml b/examples-generated/dataprotection/v1beta2/backupvault.yaml new file mode 100644 index 000000000..9bace5bd5 --- /dev/null +++ b/examples-generated/dataprotection/v1beta2/backupvault.yaml @@ -0,0 +1,30 @@ +apiVersion: dataprotection.azure.upbound.io/v1beta2 +kind: BackupVault +metadata: + annotations: + meta.upbound.io/example-id: dataprotection/v1beta2/backupvault + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + datastoreType: VaultStore + location: West Europe + redundancy: LocallyRedundant + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: dataprotection/v1beta2/backupvault + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datashare/v1beta1/datasetdatalakegen2.yaml b/examples-generated/datashare/v1beta1/datasetdatalakegen2.yaml index 47ea174cf..dc018ff1a 100644 --- a/examples-generated/datashare/v1beta1/datasetdatalakegen2.yaml +++ b/examples-generated/datashare/v1beta1/datasetdatalakegen2.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: datashare.azure.upbound.io/v1beta1 +apiVersion: datashare.azure.upbound.io/v1beta2 kind: DataShare metadata: annotations: @@ -38,7 +38,7 @@ spec: --- -apiVersion: datashare.azure.upbound.io/v1beta1 +apiVersion: datashare.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -87,7 +87,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/datashare/v1beta1/datasetkustocluster.yaml b/examples-generated/datashare/v1beta1/datasetkustocluster.yaml index 0eef28a4a..d00f72eea 100644 --- a/examples-generated/datashare/v1beta1/datasetkustocluster.yaml +++ b/examples-generated/datashare/v1beta1/datasetkustocluster.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: datashare.azure.upbound.io/v1beta1 +apiVersion: datashare.azure.upbound.io/v1beta2 kind: DataShare metadata: annotations: @@ -34,7 +34,7 @@ spec: --- -apiVersion: datashare.azure.upbound.io/v1beta1 +apiVersion: datashare.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -53,7 +53,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/datashare/v1beta1/datasetkustodatabase.yaml b/examples-generated/datashare/v1beta1/datasetkustodatabase.yaml index d0a7a7528..48bb3c629 100644 --- a/examples-generated/datashare/v1beta1/datasetkustodatabase.yaml +++ b/examples-generated/datashare/v1beta1/datasetkustodatabase.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: datashare.azure.upbound.io/v1beta1 +apiVersion: datashare.azure.upbound.io/v1beta2 kind: DataShare metadata: annotations: @@ -34,7 +34,7 @@ spec: --- -apiVersion: datashare.azure.upbound.io/v1beta1 +apiVersion: datashare.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -53,7 +53,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/datashare/v1beta2/account.yaml b/examples-generated/datashare/v1beta2/account.yaml new file mode 100644 index 000000000..d8c849aed --- /dev/null +++ b/examples-generated/datashare/v1beta2/account.yaml @@ -0,0 +1,32 @@ +apiVersion: datashare.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + foo: bar + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/datashare/v1beta2/datasetblobstorage.yaml b/examples-generated/datashare/v1beta2/datasetblobstorage.yaml new file mode 100644 index 000000000..8ce9be6c8 --- /dev/null +++ b/examples-generated/datashare/v1beta2/datasetblobstorage.yaml @@ -0,0 +1,127 @@ +apiVersion: datashare.azure.upbound.io/v1beta2 +kind: DataSetBlobStorage +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datasetblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + dataShareIdSelector: + matchLabels: + testing.upbound.io/example-name: example + filePath: myfile.txt + storageAccount: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + subscriptionId: 00000000-0000-0000-0000-000000000000 + +--- + +apiVersion: datashare.azure.upbound.io/v1beta2 +kind: DataShare +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datasetblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + kind: CopyBased + +--- + +apiVersion: datashare.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datasetblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datasetblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: authorization.azure.upbound.io/v1beta1 +kind: RoleAssignment +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datasetblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + principalId: ${data.azuread_service_principal.example.object_id} + roleDefinitionName: Storage Blob Data Reader + scope: ${azurerm_storage_account.example.id} + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datasetblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: RAGRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datasetblobstorage + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: container + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/datashare/v1beta2/datashare.yaml b/examples-generated/datashare/v1beta2/datashare.yaml new file mode 100644 index 000000000..3e61fce4c --- /dev/null +++ b/examples-generated/datashare/v1beta2/datashare.yaml @@ -0,0 +1,55 @@ +apiVersion: datashare.azure.upbound.io/v1beta2 +kind: DataShare +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datashare + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + description: example desc + kind: CopyBased + snapshotSchedule: + - name: example-ss + recurrence: Day + startTime: "2020-04-17T04:47:52.9614956Z" + terms: example terms + +--- + +apiVersion: datashare.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datashare + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + foo: bar + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: datashare/v1beta2/datashare + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/dbformariadb/v1beta1/virtualnetworkrule.yaml b/examples-generated/dbformariadb/v1beta1/virtualnetworkrule.yaml index fbe82fd5f..940f8a5f8 100644 --- a/examples-generated/dbformariadb/v1beta1/virtualnetworkrule.yaml +++ b/examples-generated/dbformariadb/v1beta1/virtualnetworkrule.yaml @@ -59,7 +59,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -82,7 +82,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta1/activedirectoryadministrator.yaml b/examples-generated/dbformysql/v1beta1/activedirectoryadministrator.yaml index 6a00e45fc..a293eb849 100644 --- a/examples-generated/dbformysql/v1beta1/activedirectoryadministrator.yaml +++ b/examples-generated/dbformysql/v1beta1/activedirectoryadministrator.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta1/configuration.yaml b/examples-generated/dbformysql/v1beta1/configuration.yaml index 82c07b317..a758f0b47 100644 --- a/examples-generated/dbformysql/v1beta1/configuration.yaml +++ b/examples-generated/dbformysql/v1beta1/configuration.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta1/database.yaml b/examples-generated/dbformysql/v1beta1/database.yaml index bf266ccc2..51ac4d643 100644 --- a/examples-generated/dbformysql/v1beta1/database.yaml +++ b/examples-generated/dbformysql/v1beta1/database.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta1/firewallrule.yaml b/examples-generated/dbformysql/v1beta1/firewallrule.yaml index 89e7c3923..8bb1a2fd8 100644 --- a/examples-generated/dbformysql/v1beta1/firewallrule.yaml +++ b/examples-generated/dbformysql/v1beta1/firewallrule.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta1/flexibledatabase.yaml b/examples-generated/dbformysql/v1beta1/flexibledatabase.yaml index 849db648b..82be4d00f 100644 --- a/examples-generated/dbformysql/v1beta1/flexibledatabase.yaml +++ b/examples-generated/dbformysql/v1beta1/flexibledatabase.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: FlexibleServer metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta1/flexibleserverconfiguration.yaml b/examples-generated/dbformysql/v1beta1/flexibleserverconfiguration.yaml index 7d2d2aa13..afe80a8de 100644 --- a/examples-generated/dbformysql/v1beta1/flexibleserverconfiguration.yaml +++ b/examples-generated/dbformysql/v1beta1/flexibleserverconfiguration.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: FlexibleServer metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta1/flexibleserverfirewallrule.yaml b/examples-generated/dbformysql/v1beta1/flexibleserverfirewallrule.yaml index 7a761e07b..adae9c8b5 100644 --- a/examples-generated/dbformysql/v1beta1/flexibleserverfirewallrule.yaml +++ b/examples-generated/dbformysql/v1beta1/flexibleserverfirewallrule.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: FlexibleServer metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta1/virtualnetworkrule.yaml b/examples-generated/dbformysql/v1beta1/virtualnetworkrule.yaml index 20d4e3251..88f7b0a56 100644 --- a/examples-generated/dbformysql/v1beta1/virtualnetworkrule.yaml +++ b/examples-generated/dbformysql/v1beta1/virtualnetworkrule.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: dbformysql.azure.upbound.io/v1beta1 +apiVersion: dbformysql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: @@ -63,7 +63,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -86,7 +86,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/dbformysql/v1beta2/flexibleserver.yaml b/examples-generated/dbformysql/v1beta2/flexibleserver.yaml new file mode 100644 index 000000000..b14436b8c --- /dev/null +++ b/examples-generated/dbformysql/v1beta2/flexibleserver.yaml @@ -0,0 +1,127 @@ +apiVersion: dbformysql.azure.upbound.io/v1beta2 +kind: FlexibleServer +metadata: + annotations: + meta.upbound.io/example-id: dbformysql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: psqladmin + administratorPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + backupRetentionDays: 7 + delegatedSubnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + privateDnsZoneIdSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Standard_D2ds_v4 + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: PrivateDNSZone +metadata: + annotations: + meta.upbound.io/example-id: dbformysql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: PrivateDNSZoneVirtualNetworkLink +metadata: + annotations: + meta.upbound.io/example-id: dbformysql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + privateDnsZoneNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: dbformysql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: dbformysql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + delegation: + - name: fs + serviceDelegation: + - actions: + - Microsoft.Network/virtualNetworks/subnets/join/action + name: Microsoft.DBforMySQL/flexibleServers + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceEndpoints: + - Microsoft.Storage + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: dbformysql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/dbformysql/v1beta2/server.yaml b/examples-generated/dbformysql/v1beta2/server.yaml new file mode 100644 index 000000000..9ad1f4970 --- /dev/null +++ b/examples-generated/dbformysql/v1beta2/server.yaml @@ -0,0 +1,44 @@ +apiVersion: dbformysql.azure.upbound.io/v1beta2 +kind: Server +metadata: + annotations: + crossplane.io/external-name: ${Rand.RFC1123Subdomain} + meta.upbound.io/example-id: dbformysql/v1beta2/server + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: mysqladminun + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + autoGrowEnabled: true + backupRetentionDays: 7 + geoRedundantBackupEnabled: false + infrastructureEncryptionEnabled: false + location: West Europe + publicNetworkAccessEnabled: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: B_Gen5_2 + sslEnforcementEnabled: true + sslMinimalTlsVersionEnforced: TLS1_2 + storageMb: 5120 + version: "5.7" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: dbformysql/v1beta2/server + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/dbforpostgresql/v1beta1/activedirectoryadministrator.yaml b/examples-generated/dbforpostgresql/v1beta1/activedirectoryadministrator.yaml index 9a45dc6c9..9e3806404 100644 --- a/examples-generated/dbforpostgresql/v1beta1/activedirectoryadministrator.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/activedirectoryadministrator.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/configuration.yaml b/examples-generated/dbforpostgresql/v1beta1/configuration.yaml index 79ef12bc7..47e6a0f71 100644 --- a/examples-generated/dbforpostgresql/v1beta1/configuration.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/configuration.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/database.yaml b/examples-generated/dbforpostgresql/v1beta1/database.yaml index de34a7fb7..83ee2c91f 100644 --- a/examples-generated/dbforpostgresql/v1beta1/database.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/database.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/firewallrule.yaml b/examples-generated/dbforpostgresql/v1beta1/firewallrule.yaml index cbfe3244b..25ac5c013 100644 --- a/examples-generated/dbforpostgresql/v1beta1/firewallrule.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/firewallrule.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/flexibleserveractivedirectoryadministrator.yaml b/examples-generated/dbforpostgresql/v1beta1/flexibleserveractivedirectoryadministrator.yaml index 125020a39..0f2019404 100644 --- a/examples-generated/dbforpostgresql/v1beta1/flexibleserveractivedirectoryadministrator.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/flexibleserveractivedirectoryadministrator.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: FlexibleServer metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/flexibleserverconfiguration.yaml b/examples-generated/dbforpostgresql/v1beta1/flexibleserverconfiguration.yaml index 78a5eb352..5ca0ed298 100644 --- a/examples-generated/dbforpostgresql/v1beta1/flexibleserverconfiguration.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/flexibleserverconfiguration.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: FlexibleServer metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/flexibleserverdatabase.yaml b/examples-generated/dbforpostgresql/v1beta1/flexibleserverdatabase.yaml index 8f175614a..abf83b8e9 100644 --- a/examples-generated/dbforpostgresql/v1beta1/flexibleserverdatabase.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/flexibleserverdatabase.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: FlexibleServer metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/flexibleserverfirewallrule.yaml b/examples-generated/dbforpostgresql/v1beta1/flexibleserverfirewallrule.yaml index d8de738e7..c81caf8fd 100644 --- a/examples-generated/dbforpostgresql/v1beta1/flexibleserverfirewallrule.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/flexibleserverfirewallrule.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: FlexibleServer metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/serverkey.yaml b/examples-generated/dbforpostgresql/v1beta1/serverkey.yaml index ad2faffa4..95816b64b 100644 --- a/examples-generated/dbforpostgresql/v1beta1/serverkey.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/serverkey.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -96,7 +96,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Key metadata: annotations: @@ -122,7 +122,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta1/virtualnetworkrule.yaml b/examples-generated/dbforpostgresql/v1beta1/virtualnetworkrule.yaml index 16ea7f361..eb58443b1 100644 --- a/examples-generated/dbforpostgresql/v1beta1/virtualnetworkrule.yaml +++ b/examples-generated/dbforpostgresql/v1beta1/virtualnetworkrule.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: dbforpostgresql.azure.upbound.io/v1beta1 +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 kind: Server metadata: annotations: @@ -62,7 +62,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -85,7 +85,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/dbforpostgresql/v1beta2/flexibleserver.yaml b/examples-generated/dbforpostgresql/v1beta2/flexibleserver.yaml new file mode 100644 index 000000000..d543f5090 --- /dev/null +++ b/examples-generated/dbforpostgresql/v1beta2/flexibleserver.yaml @@ -0,0 +1,130 @@ +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 +kind: FlexibleServer +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: psqladmin + administratorPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + delegatedSubnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + privateDnsZoneIdSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Standard_D4s_v3 + storageMb: 32768 + storageTier: P30 + version: "12" + zone: "1" + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: PrivateDNSZone +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: PrivateDNSZoneVirtualNetworkLink +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + privateDnsZoneNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + delegation: + - name: fs + serviceDelegation: + - actions: + - Microsoft.Network/virtualNetworks/subnets/join/action + name: Microsoft.DBforPostgreSQL/flexibleServers + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceEndpoints: + - Microsoft.Storage + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/flexibleserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/dbforpostgresql/v1beta2/server.yaml b/examples-generated/dbforpostgresql/v1beta2/server.yaml new file mode 100644 index 000000000..8c59f900e --- /dev/null +++ b/examples-generated/dbforpostgresql/v1beta2/server.yaml @@ -0,0 +1,42 @@ +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 +kind: Server +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/server + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: psqladmin + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + autoGrowEnabled: true + backupRetentionDays: 7 + geoRedundantBackupEnabled: true + location: West Europe + publicNetworkAccessEnabled: false + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Gen5_4 + sslEnforcementEnabled: true + sslMinimalTlsVersionEnforced: TLS1_2 + storageMb: 640000 + version: "11" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/server + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/devices/v1beta1/iothubcertificate.yaml b/examples-generated/devices/v1beta1/iothubcertificate.yaml index 62a5b679d..a3aeb4cef 100644 --- a/examples-generated/devices/v1beta1/iothubcertificate.yaml +++ b/examples-generated/devices/v1beta1/iothubcertificate.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubconsumergroup.yaml b/examples-generated/devices/v1beta1/iothubconsumergroup.yaml index 7fa97d942..6f44d6e6c 100644 --- a/examples-generated/devices/v1beta1/iothubconsumergroup.yaml +++ b/examples-generated/devices/v1beta1/iothubconsumergroup.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubdpscertificate.yaml b/examples-generated/devices/v1beta1/iothubdpscertificate.yaml index 167eddf0d..359a3c198 100644 --- a/examples-generated/devices/v1beta1/iothubdpscertificate.yaml +++ b/examples-generated/devices/v1beta1/iothubdpscertificate.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHubDPS metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubdpssharedaccesspolicy.yaml b/examples-generated/devices/v1beta1/iothubdpssharedaccesspolicy.yaml index 3b3317d73..1a7f749b2 100644 --- a/examples-generated/devices/v1beta1/iothubdpssharedaccesspolicy.yaml +++ b/examples-generated/devices/v1beta1/iothubdpssharedaccesspolicy.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHubDPS metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubendpointeventhub.yaml b/examples-generated/devices/v1beta1/iothubendpointeventhub.yaml index c762ce6be..0363bb513 100644 --- a/examples-generated/devices/v1beta1/iothubendpointeventhub.yaml +++ b/examples-generated/devices/v1beta1/iothubendpointeventhub.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHub metadata: annotations: @@ -67,7 +67,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: @@ -85,7 +85,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubendpointservicebusqueue.yaml b/examples-generated/devices/v1beta1/iothubendpointservicebusqueue.yaml index 1e52bd378..fd05baf86 100644 --- a/examples-generated/devices/v1beta1/iothubendpointservicebusqueue.yaml +++ b/examples-generated/devices/v1beta1/iothubendpointservicebusqueue.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: @@ -57,7 +57,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubendpointservicebustopic.yaml b/examples-generated/devices/v1beta1/iothubendpointservicebustopic.yaml index 104bbcf2e..aab41297d 100644 --- a/examples-generated/devices/v1beta1/iothubendpointservicebustopic.yaml +++ b/examples-generated/devices/v1beta1/iothubendpointservicebustopic.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: @@ -57,7 +57,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubendpointstoragecontainer.yaml b/examples-generated/devices/v1beta1/iothubendpointstoragecontainer.yaml index fe5214968..2773eee79 100644 --- a/examples-generated/devices/v1beta1/iothubendpointstoragecontainer.yaml +++ b/examples-generated/devices/v1beta1/iothubendpointstoragecontainer.yaml @@ -28,7 +28,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: @@ -62,7 +62,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubenrichment.yaml b/examples-generated/devices/v1beta1/iothubenrichment.yaml index d3db2a4b4..cd61871eb 100644 --- a/examples-generated/devices/v1beta1/iothubenrichment.yaml +++ b/examples-generated/devices/v1beta1/iothubenrichment.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: @@ -111,7 +111,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubfallbackroute.yaml b/examples-generated/devices/v1beta1/iothubfallbackroute.yaml index 212194c6a..2118a89e9 100644 --- a/examples-generated/devices/v1beta1/iothubfallbackroute.yaml +++ b/examples-generated/devices/v1beta1/iothubfallbackroute.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: @@ -87,7 +87,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubroute.yaml b/examples-generated/devices/v1beta1/iothubroute.yaml index c0dc7c94c..60ea18147 100644 --- a/examples-generated/devices/v1beta1/iothubroute.yaml +++ b/examples-generated/devices/v1beta1/iothubroute.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: @@ -88,7 +88,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/devices/v1beta1/iothubsharedaccesspolicy.yaml b/examples-generated/devices/v1beta1/iothubsharedaccesspolicy.yaml index e5440778f..110bea129 100644 --- a/examples-generated/devices/v1beta1/iothubsharedaccesspolicy.yaml +++ b/examples-generated/devices/v1beta1/iothubsharedaccesspolicy.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: diff --git a/examples-generated/devices/v1beta2/iothub.yaml b/examples-generated/devices/v1beta2/iothub.yaml new file mode 100644 index 000000000..d179365ed --- /dev/null +++ b/examples-generated/devices/v1beta2/iothub.yaml @@ -0,0 +1,139 @@ +apiVersion: devices.azure.upbound.io/v1beta2 +kind: IOTHub +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cloudToDevice: + - defaultTtl: PT1H + feedback: + - lockDuration: PT30S + maxDeliveryCount: 15 + timeToLive: PT1H10M + maxDeliveryCount: 30 + localAuthenticationEnabled: false + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: "1" + name: S1 + tags: + purpose: testing + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHub +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + messageRetention: 1 + namespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + partitionCount: 2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta1 +kind: AuthorizationRule +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + eventhubNameSelector: + matchLabels: + testing.upbound.io/example-name: example + namespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + send: true + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHubNamespace +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Basic + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/devices/v1beta2/iothubdps.yaml b/examples-generated/devices/v1beta2/iothubdps.yaml new file mode 100644 index 000000000..1be7b590a --- /dev/null +++ b/examples-generated/devices/v1beta2/iothubdps.yaml @@ -0,0 +1,32 @@ +apiVersion: devices.azure.upbound.io/v1beta2 +kind: IOTHubDPS +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothubdps + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allocationPolicy: Hashed + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: "1" + name: S1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: devices/v1beta2/iothubdps + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/deviceupdate/v1beta2/iothubdeviceupdateaccount.yaml b/examples-generated/deviceupdate/v1beta2/iothubdeviceupdateaccount.yaml new file mode 100644 index 000000000..b2edf147c --- /dev/null +++ b/examples-generated/deviceupdate/v1beta2/iothubdeviceupdateaccount.yaml @@ -0,0 +1,32 @@ +apiVersion: deviceupdate.azure.upbound.io/v1beta2 +kind: IOTHubDeviceUpdateAccount +metadata: + annotations: + meta.upbound.io/example-id: deviceupdate/v1beta2/iothubdeviceupdateaccount + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + key: value + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: deviceupdate/v1beta2/iothubdeviceupdateaccount + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: East US diff --git a/examples-generated/deviceupdate/v1beta2/iothubdeviceupdateinstance.yaml b/examples-generated/deviceupdate/v1beta2/iothubdeviceupdateinstance.yaml new file mode 100644 index 000000000..6b7d5a0eb --- /dev/null +++ b/examples-generated/deviceupdate/v1beta2/iothubdeviceupdateinstance.yaml @@ -0,0 +1,97 @@ +apiVersion: deviceupdate.azure.upbound.io/v1beta2 +kind: IOTHubDeviceUpdateInstance +metadata: + annotations: + meta.upbound.io/example-id: deviceupdate/v1beta2/iothubdeviceupdateinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + deviceUpdateAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + diagnosticEnabled: true + diagnosticStorageAccount: + - connectionStringSecretRef: + key: attribute.primary_connection_string + name: example-storage-account + namespace: upbound-system + idSelector: + matchLabels: + testing.upbound.io/example-name: example + iothubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + key: value + +--- + +apiVersion: devices.azure.upbound.io/v1beta2 +kind: IOTHub +metadata: + annotations: + meta.upbound.io/example-id: deviceupdate/v1beta2/iothubdeviceupdateinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: East US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: "1" + name: S1 + +--- + +apiVersion: deviceupdate.azure.upbound.io/v1beta2 +kind: IOTHubDeviceUpdateAccount +metadata: + annotations: + meta.upbound.io/example-id: deviceupdate/v1beta2/iothubdeviceupdateinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: East US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: deviceupdate/v1beta2/iothubdeviceupdateinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: East US + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: deviceupdate/v1beta2/iothubdeviceupdateinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: East US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/devtestlab/v1beta2/globalvmshutdownschedule.yaml b/examples-generated/devtestlab/v1beta2/globalvmshutdownschedule.yaml new file mode 100644 index 000000000..e3a61db3e --- /dev/null +++ b/examples-generated/devtestlab/v1beta2/globalvmshutdownschedule.yaml @@ -0,0 +1,133 @@ +apiVersion: devtestlab.azure.upbound.io/v1beta2 +kind: GlobalVMShutdownSchedule +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/globalvmshutdownschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dailyRecurrenceTime: "1100" + enabled: true + location: West Europe + notificationSettings: + - enabled: true + timeInMinutes: "60" + webhookUrl: https://sample-webhook-url.example.com + timezone: Pacific Standard Time + virtualMachineIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: LinuxVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/globalvmshutdownschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + adminUsername: testadmin + disablePasswordAuthentication: false + location: West Europe + networkInterfaceIdsRefs: + - name: example + osDisk: + - caching: ReadWrite + name: myosdisk-example + storageAccountType: Standard_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + size: Standard_B2s + sourceImageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: NetworkInterface +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/globalvmshutdownschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: testconfiguration1 + privateIpAddressAllocation: Dynamic + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/globalvmshutdownschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/globalvmshutdownschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/globalvmshutdownschedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/devtestlab/v1beta2/linuxvirtualmachine.yaml b/examples-generated/devtestlab/v1beta2/linuxvirtualmachine.yaml new file mode 100644 index 000000000..c8ac8324d --- /dev/null +++ b/examples-generated/devtestlab/v1beta2/linuxvirtualmachine.yaml @@ -0,0 +1,90 @@ +apiVersion: devtestlab.azure.upbound.io/v1beta2 +kind: LinuxVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + galleryImageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + labNameSelector: + matchLabels: + testing.upbound.io/example-name: example + labSubnetNameSelector: + matchLabels: + testing.upbound.io/example-name: example + labVirtualNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + name: example-vm03 + notes: Some notes about this Virtual Machine. + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + size: Standard_DS2 + sshKey: ${file("~/.ssh/id_rsa.pub")} + storageType: Premium + username: exampleuser99 + +--- + +apiVersion: devtestlab.azure.upbound.io/v1beta1 +kind: Lab +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Sydney: Australia + +--- + +apiVersion: devtestlab.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + labNameSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example-network + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + subnet: + - useInVirtualMachineCreation: Allow + usePublicIpAddress: Allow + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/linuxvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/devtestlab/v1beta2/schedule.yaml b/examples-generated/devtestlab/v1beta2/schedule.yaml new file mode 100644 index 000000000..fd7a3c614 --- /dev/null +++ b/examples-generated/devtestlab/v1beta2/schedule.yaml @@ -0,0 +1,59 @@ +apiVersion: devtestlab.azure.upbound.io/v1beta2 +kind: Schedule +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/schedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + labNameSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + notificationSettings: + - {} + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: Production + taskType: LabVmsStartupTask + timeZoneId: Pacific Standard Time + weeklyRecurrence: + - time: "1100" + weekDays: + - Monday + - Tuesday + +--- + +apiVersion: devtestlab.azure.upbound.io/v1beta1 +kind: Lab +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/schedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/schedule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/devtestlab/v1beta2/virtualnetwork.yaml b/examples-generated/devtestlab/v1beta2/virtualnetwork.yaml new file mode 100644 index 000000000..4e45a4dee --- /dev/null +++ b/examples-generated/devtestlab/v1beta2/virtualnetwork.yaml @@ -0,0 +1,53 @@ +apiVersion: devtestlab.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/virtualnetwork + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + labNameSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example-network + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + subnet: + - useInVirtualMachineCreation: Allow + usePublicIpAddress: Allow + +--- + +apiVersion: devtestlab.azure.upbound.io/v1beta1 +kind: Lab +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/virtualnetwork + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Sydney: Australia + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/virtualnetwork + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/devtestlab/v1beta2/windowsvirtualmachine.yaml b/examples-generated/devtestlab/v1beta2/windowsvirtualmachine.yaml new file mode 100644 index 000000000..2e3dcbd0a --- /dev/null +++ b/examples-generated/devtestlab/v1beta2/windowsvirtualmachine.yaml @@ -0,0 +1,93 @@ +apiVersion: devtestlab.azure.upbound.io/v1beta2 +kind: WindowsVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + galleryImageReference: + - offer: WindowsServer + publisher: MicrosoftWindowsServer + sku: 2019-Datacenter + version: latest + labNameSelector: + matchLabels: + testing.upbound.io/example-name: example + labSubnetNameSelector: + matchLabels: + testing.upbound.io/example-name: example + labVirtualNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + name: example-vm03 + notes: Some notes about this Virtual Machine. + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + size: Standard_DS2 + storageType: Premium + username: exampleuser99 + +--- + +apiVersion: devtestlab.azure.upbound.io/v1beta1 +kind: Lab +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Sydney: Australia + +--- + +apiVersion: devtestlab.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + labNameSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example-network + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + subnet: + - useInVirtualMachineCreation: Allow + usePublicIpAddress: Allow + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: devtestlab/v1beta2/windowsvirtualmachine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/digitaltwins/v1beta2/instance.yaml b/examples-generated/digitaltwins/v1beta2/instance.yaml new file mode 100644 index 000000000..5dca91506 --- /dev/null +++ b/examples-generated/digitaltwins/v1beta2/instance.yaml @@ -0,0 +1,30 @@ +apiVersion: digitaltwins.azure.upbound.io/v1beta2 +kind: Instance +metadata: + annotations: + meta.upbound.io/example-id: digitaltwins/v1beta2/instance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + foo: bar + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: digitaltwins/v1beta2/instance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/elastic/v1beta2/cloudelasticsearch.yaml b/examples-generated/elastic/v1beta2/cloudelasticsearch.yaml new file mode 100644 index 000000000..b9a7985a3 --- /dev/null +++ b/examples-generated/elastic/v1beta2/cloudelasticsearch.yaml @@ -0,0 +1,30 @@ +apiVersion: elastic.azure.upbound.io/v1beta2 +kind: CloudElasticsearch +metadata: + annotations: + meta.upbound.io/example-id: elastic/v1beta2/cloudelasticsearch + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + elasticCloudEmailAddress: user@example.com + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: test + skuName: ess-consumption-2024_Monthly + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: elastic/v1beta2/cloudelasticsearch + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + location: West Europe diff --git a/examples-generated/eventgrid/v1beta1/domaintopic.yaml b/examples-generated/eventgrid/v1beta1/domaintopic.yaml index de095facc..df98948dc 100644 --- a/examples-generated/eventgrid/v1beta1/domaintopic.yaml +++ b/examples-generated/eventgrid/v1beta1/domaintopic.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: eventgrid.azure.upbound.io/v1beta1 +apiVersion: eventgrid.azure.upbound.io/v1beta2 kind: Domain metadata: annotations: diff --git a/examples-generated/eventgrid/v1beta2/domain.yaml b/examples-generated/eventgrid/v1beta2/domain.yaml new file mode 100644 index 000000000..1b93a7b64 --- /dev/null +++ b/examples-generated/eventgrid/v1beta2/domain.yaml @@ -0,0 +1,30 @@ +apiVersion: eventgrid.azure.upbound.io/v1beta2 +kind: Domain +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/domain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/domain + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/eventgrid/v1beta2/eventsubscription.yaml b/examples-generated/eventgrid/v1beta2/eventsubscription.yaml new file mode 100644 index 000000000..ef72a1f27 --- /dev/null +++ b/examples-generated/eventgrid/v1beta2/eventsubscription.yaml @@ -0,0 +1,72 @@ +apiVersion: eventgrid.azure.upbound.io/v1beta2 +kind: EventSubscription +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/eventsubscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: example-aees + scopeSelector: + matchLabels: + testing.upbound.io/example-name: example + storageQueueEndpoint: + - queueNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/eventsubscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/eventsubscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: staging + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Queue +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/eventsubscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/eventgrid/v1beta2/systemtopic.yaml b/examples-generated/eventgrid/v1beta2/systemtopic.yaml new file mode 100644 index 000000000..469b31127 --- /dev/null +++ b/examples-generated/eventgrid/v1beta2/systemtopic.yaml @@ -0,0 +1,53 @@ +apiVersion: eventgrid.azure.upbound.io/v1beta2 +kind: SystemTopic +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/systemtopic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceArmResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + topicType: Microsoft.Storage.StorageAccounts + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/systemtopic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/systemtopic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: staging diff --git a/examples-generated/eventgrid/v1beta2/topic.yaml b/examples-generated/eventgrid/v1beta2/topic.yaml new file mode 100644 index 000000000..316483700 --- /dev/null +++ b/examples-generated/eventgrid/v1beta2/topic.yaml @@ -0,0 +1,30 @@ +apiVersion: eventgrid.azure.upbound.io/v1beta2 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/topic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: eventgrid/v1beta2/topic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/eventhub/v1beta1/authorizationrule.yaml b/examples-generated/eventhub/v1beta1/authorizationrule.yaml index c98b58540..99d0cb541 100644 --- a/examples-generated/eventhub/v1beta1/authorizationrule.yaml +++ b/examples-generated/eventhub/v1beta1/authorizationrule.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHub metadata: annotations: @@ -44,7 +44,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: diff --git a/examples-generated/eventhub/v1beta1/consumergroup.yaml b/examples-generated/eventhub/v1beta1/consumergroup.yaml index c202d0058..7b913034b 100644 --- a/examples-generated/eventhub/v1beta1/consumergroup.yaml +++ b/examples-generated/eventhub/v1beta1/consumergroup.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHub metadata: annotations: @@ -42,7 +42,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: diff --git a/examples-generated/eventhub/v1beta1/namespaceauthorizationrule.yaml b/examples-generated/eventhub/v1beta1/namespaceauthorizationrule.yaml index 2a1b8a1a3..3efd0bd37 100644 --- a/examples-generated/eventhub/v1beta1/namespaceauthorizationrule.yaml +++ b/examples-generated/eventhub/v1beta1/namespaceauthorizationrule.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: diff --git a/examples-generated/eventhub/v1beta1/namespacedisasterrecoveryconfig.yaml b/examples-generated/eventhub/v1beta1/namespacedisasterrecoveryconfig.yaml index 66c6ddbe7..6358c9297 100644 --- a/examples-generated/eventhub/v1beta1/namespacedisasterrecoveryconfig.yaml +++ b/examples-generated/eventhub/v1beta1/namespacedisasterrecoveryconfig.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: @@ -38,7 +38,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: diff --git a/examples-generated/eventhub/v1beta1/namespaceschemagroup.yaml b/examples-generated/eventhub/v1beta1/namespaceschemagroup.yaml index da0918df7..d8f76a450 100644 --- a/examples-generated/eventhub/v1beta1/namespaceschemagroup.yaml +++ b/examples-generated/eventhub/v1beta1/namespaceschemagroup.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: diff --git a/examples-generated/eventhub/v1beta2/eventhub.yaml b/examples-generated/eventhub/v1beta2/eventhub.yaml new file mode 100644 index 000000000..99fa6ed59 --- /dev/null +++ b/examples-generated/eventhub/v1beta2/eventhub.yaml @@ -0,0 +1,53 @@ +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHub +metadata: + annotations: + meta.upbound.io/example-id: eventhub/v1beta2/eventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + messageRetention: 1 + namespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + partitionCount: 2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHubNamespace +metadata: + annotations: + meta.upbound.io/example-id: eventhub/v1beta2/eventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 1 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + tags: + environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: eventhub/v1beta2/eventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/eventhub/v1beta2/eventhubnamespace.yaml b/examples-generated/eventhub/v1beta2/eventhubnamespace.yaml new file mode 100644 index 000000000..77a977089 --- /dev/null +++ b/examples-generated/eventhub/v1beta2/eventhubnamespace.yaml @@ -0,0 +1,32 @@ +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHubNamespace +metadata: + annotations: + meta.upbound.io/example-id: eventhub/v1beta2/eventhubnamespace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 2 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + tags: + environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: eventhub/v1beta2/eventhubnamespace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/fluidrelay/v1beta2/server.yaml b/examples-generated/fluidrelay/v1beta2/server.yaml new file mode 100644 index 000000000..16341ca84 --- /dev/null +++ b/examples-generated/fluidrelay/v1beta2/server.yaml @@ -0,0 +1,29 @@ +apiVersion: fluidrelay.azure.upbound.io/v1beta2 +kind: Server +metadata: + annotations: + meta.upbound.io/example-id: fluidrelay/v1beta2/server + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: fluidrelay/v1beta2/server + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment.yaml b/examples-generated/guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment.yaml new file mode 100644 index 000000000..67aafd13b --- /dev/null +++ b/examples-generated/guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment.yaml @@ -0,0 +1,160 @@ +apiVersion: guestconfiguration.azure.upbound.io/v1beta2 +kind: PolicyVirtualMachineConfigurationAssignment +metadata: + annotations: + meta.upbound.io/example-id: guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + configuration: + - assignmentType: ApplyAndMonitor + parameter: + - name: Minimum Password Length;ExpectedValue + value: "16" + - name: Minimum Password Age;ExpectedValue + value: "0" + - name: Maximum Password Age;ExpectedValue + value: 30,45 + - name: Enforce Password History;ExpectedValue + value: "10" + - name: Password Must Meet Complexity Requirements;ExpectedValue + value: "1" + version: 1.* + location: West Europe + virtualMachineIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: NetworkInterface +metadata: + annotations: + meta.upbound.io/example-id: guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: internal + privateIpAddressAllocation: Dynamic + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: VirtualMachineExtension +metadata: + annotations: + meta.upbound.io/example-id: guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + autoUpgradeMinorVersion: "true" + publisher: Microsoft.GuestConfiguration + type: ConfigurationforWindows + typeHandlerVersion: "1.29" + virtualMachineIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: WindowsVirtualMachine +metadata: + annotations: + meta.upbound.io/example-id: guestconfiguration/v1beta2/policyvirtualmachineconfigurationassignment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + adminUsername: adminuser + identity: + - type: SystemAssigned + location: West Europe + networkInterfaceIdsRefs: + - name: example + osDisk: + - caching: ReadWrite + storageAccountType: Standard_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + size: Standard_F2 + sourceImageReference: + - offer: WindowsServer + publisher: MicrosoftWindowsServer + sku: 2019-Datacenter + version: latest diff --git a/examples-generated/hdinsight/v1beta2/hadoopcluster.yaml b/examples-generated/hdinsight/v1beta2/hadoopcluster.yaml new file mode 100644 index 000000000..c337243c1 --- /dev/null +++ b/examples-generated/hdinsight/v1beta2/hadoopcluster.yaml @@ -0,0 +1,106 @@ +apiVersion: hdinsight.azure.upbound.io/v1beta2 +kind: HadoopCluster +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/hadoopcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterVersion: "3.6" + componentVersion: + - hadoop: "2.7" + gateway: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrgw + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + roles: + - headNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_D3_V2 + workerNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + targetInstanceCount: 3 + username: acctestusrvm + vmSize: Standard_D4_V2 + zookeeperNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_D3_V2 + storageAccount: + - isDefault: true + storageAccountKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageContainerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tier: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/hadoopcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/hadoopcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/hadoopcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/hdinsight/v1beta2/hbasecluster.yaml b/examples-generated/hdinsight/v1beta2/hbasecluster.yaml new file mode 100644 index 000000000..bb6a651b9 --- /dev/null +++ b/examples-generated/hdinsight/v1beta2/hbasecluster.yaml @@ -0,0 +1,106 @@ +apiVersion: hdinsight.azure.upbound.io/v1beta2 +kind: HBaseCluster +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/hbasecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterVersion: "3.6" + componentVersion: + - hbase: "1.1" + gateway: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrgw + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + roles: + - headNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_D3_V2 + workerNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + targetInstanceCount: 3 + username: acctestusrvm + vmSize: Standard_D3_V2 + zookeeperNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_D3_V2 + storageAccount: + - isDefault: true + storageAccountKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageContainerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tier: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/hbasecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/hbasecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/hbasecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/hdinsight/v1beta2/interactivequerycluster.yaml b/examples-generated/hdinsight/v1beta2/interactivequerycluster.yaml new file mode 100644 index 000000000..257c44f37 --- /dev/null +++ b/examples-generated/hdinsight/v1beta2/interactivequerycluster.yaml @@ -0,0 +1,106 @@ +apiVersion: hdinsight.azure.upbound.io/v1beta2 +kind: InteractiveQueryCluster +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/interactivequerycluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterVersion: "3.6" + componentVersion: + - interactiveHive: "2.1" + gateway: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrgw + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + roles: + - headNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_D13_V2 + workerNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + targetInstanceCount: 3 + username: acctestusrvm + vmSize: Standard_D14_V2 + zookeeperNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_A4_V2 + storageAccount: + - isDefault: true + storageAccountKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageContainerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tier: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/interactivequerycluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/interactivequerycluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/interactivequerycluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/hdinsight/v1beta2/kafkacluster.yaml b/examples-generated/hdinsight/v1beta2/kafkacluster.yaml new file mode 100644 index 000000000..b059f92ce --- /dev/null +++ b/examples-generated/hdinsight/v1beta2/kafkacluster.yaml @@ -0,0 +1,107 @@ +apiVersion: hdinsight.azure.upbound.io/v1beta2 +kind: KafkaCluster +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/kafkacluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterVersion: "4.0" + componentVersion: + - kafka: "2.1" + gateway: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrgw + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + roles: + - headNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_D3_V2 + workerNode: + - numberOfDisksPerNode: 3 + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + targetInstanceCount: 3 + username: acctestusrvm + vmSize: Standard_D3_V2 + zookeeperNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_D3_V2 + storageAccount: + - isDefault: true + storageAccountKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageContainerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tier: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/kafkacluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/kafkacluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/kafkacluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/hdinsight/v1beta2/sparkcluster.yaml b/examples-generated/hdinsight/v1beta2/sparkcluster.yaml new file mode 100644 index 000000000..c90416044 --- /dev/null +++ b/examples-generated/hdinsight/v1beta2/sparkcluster.yaml @@ -0,0 +1,106 @@ +apiVersion: hdinsight.azure.upbound.io/v1beta2 +kind: SparkCluster +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/sparkcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterVersion: "3.6" + componentVersion: + - spark: "2.3" + gateway: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrgw + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + roles: + - headNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Standard_A3 + workerNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + targetInstanceCount: 3 + username: acctestusrvm + vmSize: Standard_A3 + zookeeperNode: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: acctestusrvm + vmSize: Medium + storageAccount: + - isDefault: true + storageAccountKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageContainerIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tier: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/sparkcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/sparkcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: hdinsight/v1beta2/sparkcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/healthcareapis/v1beta1/healthcaremedtechservicefhirdestination.yaml b/examples-generated/healthcareapis/v1beta1/healthcaremedtechservicefhirdestination.yaml index d190db1f8..14a8c3b3d 100644 --- a/examples-generated/healthcareapis/v1beta1/healthcaremedtechservicefhirdestination.yaml +++ b/examples-generated/healthcareapis/v1beta1/healthcaremedtechservicefhirdestination.yaml @@ -45,7 +45,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHub metadata: annotations: @@ -88,7 +88,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: @@ -106,7 +106,7 @@ spec: --- -apiVersion: healthcareapis.azure.upbound.io/v1beta1 +apiVersion: healthcareapis.azure.upbound.io/v1beta2 kind: HealthcareFHIRService metadata: annotations: @@ -130,7 +130,7 @@ spec: --- -apiVersion: healthcareapis.azure.upbound.io/v1beta1 +apiVersion: healthcareapis.azure.upbound.io/v1beta2 kind: HealthcareMedtechService metadata: annotations: diff --git a/examples-generated/healthcareapis/v1beta2/healthcaredicomservice.yaml b/examples-generated/healthcareapis/v1beta2/healthcaredicomservice.yaml new file mode 100644 index 000000000..5a1d9480e --- /dev/null +++ b/examples-generated/healthcareapis/v1beta2/healthcaredicomservice.yaml @@ -0,0 +1,35 @@ +apiVersion: healthcareapis.azure.upbound.io/v1beta2 +kind: HealthcareDICOMService +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcaredicomservice + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + identity: + - type: SystemAssigned + location: east us + tags: + environment: None + workspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: test + +--- + +apiVersion: healthcareapis.azure.upbound.io/v1beta1 +kind: HealthcareWorkspace +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcaredicomservice + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + location: east us + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/healthcareapis/v1beta2/healthcarefhirservice.yaml b/examples-generated/healthcareapis/v1beta2/healthcarefhirservice.yaml new file mode 100644 index 000000000..e76e3b2d0 --- /dev/null +++ b/examples-generated/healthcareapis/v1beta2/healthcarefhirservice.yaml @@ -0,0 +1,71 @@ +apiVersion: healthcareapis.azure.upbound.io/v1beta2 +kind: HealthcareFHIRService +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcarefhirservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accessPolicyObjectIds: + - ${data.azurerm_client_config.current.object_id} + authentication: + - audience: https://tfexfhir.fhir.azurehealthcareapis.com + authority: https://login.microsoftonline.com/tenantId + configurationExportStorageAccountName: storage_account_name + containerRegistryLoginServerUrl: + - tfex-container_registry_login_server + cors: + - allowedHeaders: + - '*' + allowedMethods: + - GET + - DELETE + - PUT + allowedOrigins: + - https://tfex.com:123 + - https://tfex1.com:3389 + credentialsAllowed: true + maxAgeInSeconds: 3600 + identity: + - type: SystemAssigned + kind: fhir-R4 + location: east us + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + workspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: healthcareapis.azure.upbound.io/v1beta1 +kind: HealthcareWorkspace +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcarefhirservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcarefhirservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/healthcareapis/v1beta2/healthcaremedtechservice.yaml b/examples-generated/healthcareapis/v1beta2/healthcaremedtechservice.yaml new file mode 100644 index 000000000..b15ecee40 --- /dev/null +++ b/examples-generated/healthcareapis/v1beta2/healthcaremedtechservice.yaml @@ -0,0 +1,78 @@ +apiVersion: healthcareapis.azure.upbound.io/v1beta2 +kind: HealthcareMedtechService +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcaremedtechservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + deviceMappingJson: |- + ${jsonencode({ + "templateType" : "CollectionContent", + "template" : [ + { + "templateType" : "JsonPathContent", + "template" : { + "typeName" : "heartrate", + "typeMatchExpression" : "$..[?(@heartrate)]", + "deviceIdExpression" : "$.deviceid", + "timestampExpression" : "$.measurementdatetime", + "values" : [ + { + "required" : "true", + "valueExpression" : "$.heartrate", + "valueName" : "hr" + } + ] + } + } + ] + })} + eventhubConsumerGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + eventhubNameSelector: + matchLabels: + testing.upbound.io/example-name: example + eventhubNamespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + identity: + - type: SystemAssigned + location: east us + workspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: healthcareapis.azure.upbound.io/v1beta1 +kind: HealthcareWorkspace +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcaremedtechservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: east us + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcaremedtechservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: east us diff --git a/examples-generated/healthcareapis/v1beta2/healthcareservice.yaml b/examples-generated/healthcareapis/v1beta2/healthcareservice.yaml new file mode 100644 index 000000000..eca6831eb --- /dev/null +++ b/examples-generated/healthcareapis/v1beta2/healthcareservice.yaml @@ -0,0 +1,36 @@ +apiVersion: healthcareapis.azure.upbound.io/v1beta2 +kind: HealthcareService +metadata: + annotations: + meta.upbound.io/example-id: healthcareapis/v1beta2/healthcareservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accessPolicyObjectIds: ${data.azurerm_client_config.current.object_id} + authenticationConfiguration: + - audience: https://azurehealthcareapis.com/ + authority: https://login.microsoftonline.com/$%7Bdata.azurerm_client_config.current.tenant_id%7D + smartProxyEnabled: "true" + corsConfiguration: + - allowCredentials: "true" + allowedHeaders: + - x-tempo-* + - x-tempo2-* + allowedMethods: + - GET + - PUT + allowedOrigins: + - http://www.example.com + - http://www.example2.com + maxAgeInSeconds: "500" + cosmosdbThroughput: "2000" + kind: fhir-R4 + location: westus2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: testenv + purpose: AcceptanceTests diff --git a/examples-generated/insights/v1beta1/monitordatacollectionruleassociation.yaml b/examples-generated/insights/v1beta1/monitordatacollectionruleassociation.yaml index 8378152d4..09dd923aa 100644 --- a/examples-generated/insights/v1beta1/monitordatacollectionruleassociation.yaml +++ b/examples-generated/insights/v1beta1/monitordatacollectionruleassociation.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: LinuxVirtualMachine metadata: annotations: @@ -69,7 +69,7 @@ spec: --- -apiVersion: insights.azure.upbound.io/v1beta1 +apiVersion: insights.azure.upbound.io/v1beta2 kind: MonitorDataCollectionRule metadata: annotations: @@ -131,7 +131,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -152,7 +152,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/insights/v1beta2/applicationinsightsstandardwebtest.yaml b/examples-generated/insights/v1beta2/applicationinsightsstandardwebtest.yaml new file mode 100644 index 000000000..682ca432c --- /dev/null +++ b/examples-generated/insights/v1beta2/applicationinsightsstandardwebtest.yaml @@ -0,0 +1,53 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: ApplicationInsightsStandardWebTest +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/applicationinsightsstandardwebtest + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationInsightsIdSelector: + matchLabels: + testing.upbound.io/example-name: example + geoLocations: + - example + location: West Europe + request: + - url: http://www.example.com + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/applicationinsightsstandardwebtest + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/applicationinsightsstandardwebtest + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/insights/v1beta2/applicationinsightsworkbook.yaml b/examples-generated/insights/v1beta2/applicationinsightsworkbook.yaml new file mode 100644 index 000000000..a00adf336 --- /dev/null +++ b/examples-generated/insights/v1beta2/applicationinsightsworkbook.yaml @@ -0,0 +1,49 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: ApplicationInsightsWorkbook +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/applicationinsightsworkbook + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataJson: |- + ${jsonencode({ + "version" = "Notebook/1.0", + "items" = [ + { + "type" = 1, + "content" = { + "json" = "Test2022" + }, + "name" = "text - 0" + } + ], + "isLocked" = false, + "fallbackResourceIds" = [ + "Azure Monitor" + ] + })} + displayName: workbook1 + location: West Europe + name: 85b3e8bb-fc93-40be-83f2-98f6bec18ba0 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + ENV: Test + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/applicationinsightsworkbook + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/insights/v1beta2/monitoractiongroup.yaml b/examples-generated/insights/v1beta2/monitoractiongroup.yaml new file mode 100644 index 000000000..defe298e3 --- /dev/null +++ b/examples-generated/insights/v1beta2/monitoractiongroup.yaml @@ -0,0 +1,101 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitoractiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + armRoleReceiver: + - name: armroleaction + roleId: de139f84-1756-47ae-9be6-808fbbe84772 + useCommonAlertSchema: true + automationRunbookReceiver: + - automationAccountId: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-runbooks/providers/Microsoft.Automation/automationAccounts/aaa001 + isGlobalRunbook: true + name: action_name_1 + runbookName: my runbook + serviceUri: https://s13events.azure-automation.net/webhooks?token=randomtoken + useCommonAlertSchema: true + webhookResourceId: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-runbooks/providers/Microsoft.Automation/automationAccounts/aaa001/webHooks/webhook_alert + azureAppPushReceiver: + - emailAddress: admin@contoso.com + name: pushtoadmin + azureFunctionReceiver: + - functionAppResourceId: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-funcapp/providers/Microsoft.Web/sites/funcapp + functionName: myfunc + httpTriggerUrl: https://example.com/trigger + name: funcaction + useCommonAlertSchema: true + emailReceiver: + - emailAddress: admin@contoso.com + name: sendtoadmin + - emailAddress: devops@contoso.com + name: sendtodevops + useCommonAlertSchema: true + eventHubReceiver: + - eventHubName: eventhub1 + eventHubNamespace: eventhubnamespace + name: sendtoeventhub + subscriptionId: 00000000-0000-0000-0000-000000000000 + useCommonAlertSchema: false + itsmReceiver: + - connectionId: 53de6956-42b4-41ba-be3c-b154cdf17b13 + name: createorupdateticket + region: southcentralus + ticketConfiguration: '{"PayloadRevision":0,"WorkItemType":"Incident","UseTemplate":false,"WorkItemData":"{}","CreateOneWIPerCI":false}' + workspaceId: ${data.azurerm_client_config.current.subscription_id}|${azurerm_log_analytics_workspace.example.workspace_id} + logicAppReceiver: + - callbackUrl: https://logicapptriggerurl/... + name: logicappaction + resourceId: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-logicapp/providers/Microsoft.Logic/workflows/logicapp + useCommonAlertSchema: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: p0action + smsReceiver: + - countryCode: "1" + name: oncallmsg + phoneNumber: "1231231234" + voiceReceiver: + - countryCode: "86" + name: remotesupport + phoneNumber: "13888888888" + webhookReceiver: + - name: callmyapiaswell + serviceUri: http://example.com/alert + useCommonAlertSchema: true + +--- + +apiVersion: operationalinsights.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitoractiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitoractiongroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/insights/v1beta2/monitoractivitylogalert.yaml b/examples-generated/insights/v1beta2/monitoractivitylogalert.yaml new file mode 100644 index 000000000..fbe8e2b8f --- /dev/null +++ b/examples-generated/insights/v1beta2/monitoractivitylogalert.yaml @@ -0,0 +1,82 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActivityLogAlert +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitoractivitylogalert + labels: + testing.upbound.io/example-name: main + name: main +spec: + forProvider: + action: + - actionGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: main + webhookProperties: + from: terraform + criteria: + - category: Recommendation + operationName: Microsoft.Storage/storageAccounts/write + resourceIdSelector: + matchLabels: + testing.upbound.io/example-name: to_monitor + description: This alert will monitor a specific storage account updates. + name: example-activitylogalert + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scopesRefs: + - name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitoractivitylogalert + labels: + testing.upbound.io/example-name: main + name: main +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: p0action + webhookReceiver: + - name: callmyapi + serviceUri: http://example.com/alert + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitoractivitylogalert + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitoractivitylogalert + labels: + testing.upbound.io/example-name: to_monitor + name: to-monitor +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/insights/v1beta2/monitorautoscalesetting.yaml b/examples-generated/insights/v1beta2/monitorautoscalesetting.yaml new file mode 100644 index 000000000..d6cf75ff1 --- /dev/null +++ b/examples-generated/insights/v1beta2/monitorautoscalesetting.yaml @@ -0,0 +1,168 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorAutoscaleSetting +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorautoscalesetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: myAutoscaleSetting + notification: + - email: + - customEmails: + - admin@contoso.com + sendToSubscriptionAdministrator: true + sendToSubscriptionCoAdministrator: true + predictive: + - lookAheadTime: PT5M + scaleMode: Enabled + profile: + - capacity: + - default: 1 + maximum: 10 + minimum: 1 + name: defaultProfile + rule: + - metricTrigger: + - dimensions: + - name: AppName + operator: Equals + values: + - App1 + metricName: Percentage CPU + metricNamespace: microsoft.compute/virtualmachinescalesets + metricResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + operator: GreaterThan + statistic: Average + threshold: 75 + timeAggregation: Average + timeGrain: PT1M + timeWindow: PT5M + scaleAction: + - cooldown: PT1M + direction: Increase + type: ChangeCount + value: "1" + - metricTrigger: + - metricName: Percentage CPU + metricResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + operator: LessThan + statistic: Average + threshold: 25 + timeAggregation: Average + timeGrain: PT1M + timeWindow: PT5M + scaleAction: + - cooldown: PT1M + direction: Decrease + type: ChangeCount + value: "1" + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + targetResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: LinuxVirtualMachineScaleSet +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorautoscalesetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminSshKey: + - publicKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCsTcryUl51Q2VSEHqDRNmceUFo55ZtcIwxl2QITbN1RREti5ml/VTytC0yeBOvnZA4x4CFpdw/lCDPk0yrH9Ei5vVkXmOrExdTlT3qI7YaAzj1tUVlBd4S6LX1F7y6VLActvdHuDDuXZXzCDd/97420jrDfWZqJMlUK/EmCE5ParCeHIRIvmBxcEnGfFIsw8xQZl0HphxWOtJil8qsUWSdMyCiJYYQpMoMliO99X40AUc4/AlsyPyT5ddbKk08YrZ+rKDVHF7o29rh4vi5MmHkVgVQHKiKybWlHq+b71gIAUQk9wrJxD+dqt4igrmDSpIjfjwnd+l5UIn5fJSO5DYV4YT/4hwK7OKmuo7OFHD0WyY5YnkYEMtFgzemnRBdE8ulcT60DQpVgRMXFWHvhyCWy0L6sgj1QWDZlLpvsIvNfHsyhKFMG1frLnMt/nP0+YCcfg+v1JYeCKjeoJxB8DWcRBsjzItY0CGmzP8UYZiYKl/2u+2TgFS5r7NWH11bxoUzjKdaa1NLw+ieA8GlBFfCbfWe6YVB9ggUte4VtYFMZGxOjS2bAiYtfgTKFJv+XqORAwExG6+G2eDxIDyo80/OA9IG7Xv/jwQr7D6KDjDuULFcN/iTxuttoKrHeYz1hf5ZQlBdllwJHYx6fK2g8kha6r2JIQKocvsAXiiONqSfw== + hello@world.com + username: myadmin + adminUsername: myadmin + instances: 2 + location: West Europe + networkInterface: + - ipConfiguration: + - name: TestIPConfiguration + primary: true + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: TestNetworkProfile + primary: true + osDisk: + - caching: ReadWrite + storageAccountType: StandardSSD_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard_F2 + sourceImageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + upgradeMode: Manual + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorautoscalesetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorautoscalesetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorautoscalesetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/insights/v1beta2/monitordatacollectionrule.yaml b/examples-generated/insights/v1beta2/monitordatacollectionrule.yaml new file mode 100644 index 000000000..cf47d6d0d --- /dev/null +++ b/examples-generated/insights/v1beta2/monitordatacollectionrule.yaml @@ -0,0 +1,291 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorDataCollectionRule +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dataCollectionEndpointIdSelector: + matchLabels: + testing.upbound.io/example-name: example + dataFlow: + - destinations: + - example-destination-metrics + streams: + - Microsoft-InsightsMetrics + - destinations: + - example-destination-log + streams: + - Microsoft-InsightsMetrics + - Microsoft-Syslog + - Microsoft-Perf + - destinations: + - example-destination-log + outputStream: Microsoft-Syslog + streams: + - Custom-MyTableRawData + transformKql: source | project TimeGenerated = Time, Computer, Message = AdditionalContext + dataSources: + - extension: + - extensionJson: |- + ${jsonencode({ + a = 1 + b = "hello" + })} + extensionName: example-extension-name + inputDataSources: + - example-datasource-wineventlog + name: example-datasource-extension + streams: + - Microsoft-WindowsEvent + iisLog: + - logDirectories: + - C:\Logs\W3SVC1 + name: example-datasource-iis + streams: + - Microsoft-W3CIISLog + logFile: + - filePatterns: + - C:\JavaLogs\*.log + format: text + name: example-datasource-logfile + settings: + - text: + - recordStartTimestampFormat: ISO 8601 + streams: + - Custom-MyTableRawData + performanceCounter: + - counterSpecifiers: + - Processor(*)\% Processor Time + name: example-datasource-perfcounter + samplingFrequencyInSeconds: 60 + streams: + - Microsoft-Perf + - Microsoft-InsightsMetrics + syslog: + - facilityNames: + - '*' + logLevels: + - '*' + name: example-datasource-syslog + streams: + - Microsoft-Syslog + windowsEventLog: + - name: example-datasource-wineventlog + streams: + - Microsoft-WindowsEvent + xPathQueries: + - '*![System/Level=1]' + description: data collection rule example + destinations: + - azureMonitorMetrics: + - name: example-destination-metrics + eventHub: + - eventHubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example-destination-eventhub + logAnalytics: + - name: example-destination-log + workspaceResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + storageBlob: + - containerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example-destination-storage + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + identity: + - identityIds: + - ${azurerm_user_assigned_identity.example.id} + type: UserAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + streamDeclaration: + - column: + - name: Time + type: datetime + - name: Computer + type: string + - name: AdditionalContext + type: string + streamName: Custom-MyTableRawData + tags: + foo: bar + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHub +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + messageRetention: 1 + namespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + partitionCount: 2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHubNamespace +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 1 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: operationsmanagement.azure.upbound.io/v1beta2 +kind: LogAnalyticsSolution +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + plan: + - product: OMSGallery/WindowsEventForwarding + publisher: Microsoft + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + solutionName: WindowsEventForwarding + workspaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + workspaceResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: operationalinsights.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: MonitorDataCollectionEndpoint +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: managedidentity.azure.upbound.io/v1beta1 +kind: UserAssignedIdentity +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordatacollectionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example-uai + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/insights/v1beta2/monitordiagnosticsetting.yaml b/examples-generated/insights/v1beta2/monitordiagnosticsetting.yaml new file mode 100644 index 000000000..620947878 --- /dev/null +++ b/examples-generated/insights/v1beta2/monitordiagnosticsetting.yaml @@ -0,0 +1,77 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorDiagnosticSetting +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordiagnosticsetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + enabledLog: + - category: AuditEvent + retentionPolicy: + - enabled: false + metric: + - category: AllMetrics + retentionPolicy: + - enabled: false + name: example + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + targetResourceId: ${azurerm_key_vault.example.id} + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordiagnosticsetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + purgeProtectionEnabled: false + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: standard + softDeleteRetentionDays: 7 + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordiagnosticsetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitordiagnosticsetting + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/insights/v1beta2/monitormetricalert.yaml b/examples-generated/insights/v1beta2/monitormetricalert.yaml new file mode 100644 index 000000000..e8ed53238 --- /dev/null +++ b/examples-generated/insights/v1beta2/monitormetricalert.yaml @@ -0,0 +1,85 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorMetricAlert +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitormetricalert + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + action: + - actionGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: main + criteria: + - aggregation: Total + dimension: + - name: ApiName + operator: Include + values: + - '*' + metricName: Transactions + metricNamespace: Microsoft.Storage/storageAccounts + operator: GreaterThan + threshold: 50 + description: Action will be triggered when Transactions count is greater than + 50. + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scopesRefs: + - name: to_monitor + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitormetricalert + labels: + testing.upbound.io/example-name: main + name: main +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: exampleact + webhookReceiver: + - name: callmyapi + serviceUri: http://example.com/alert + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitormetricalert + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitormetricalert + labels: + testing.upbound.io/example-name: to_monitor + name: to-monitor +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/insights/v1beta2/monitorscheduledqueryrulesalert.yaml b/examples-generated/insights/v1beta2/monitorscheduledqueryrulesalert.yaml new file mode 100644 index 000000000..ba405bd42 --- /dev/null +++ b/examples-generated/insights/v1beta2/monitorscheduledqueryrulesalert.yaml @@ -0,0 +1,86 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorScheduledQueryRulesAlert +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryrulesalert + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + action: + - actionGroupRefs: [] + customWebhookPayload: '{}' + emailSubject: Email Header + dataSourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + description: Alert when total results cross threshold + enabled: true + frequency: 5 + location: West Europe + name: example + query: | + requests + | where tolong(resultCode) >= 500 + | summarize count() by bin(timestamp, 5m) + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + severity: 1 + tags: + foo: bar + timeWindow: 30 + trigger: + - operator: GreaterThan + threshold: 3 + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryrulesalert + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryrulesalert + labels: + testing.upbound.io/example-name: example2 + name: example2 +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryrulesalert + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/insights/v1beta2/monitorscheduledqueryrulesalertv2.yaml b/examples-generated/insights/v1beta2/monitorscheduledqueryrulesalertv2.yaml new file mode 100644 index 000000000..09040ecbd --- /dev/null +++ b/examples-generated/insights/v1beta2/monitorscheduledqueryrulesalertv2.yaml @@ -0,0 +1,101 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorScheduledQueryRulesAlertV2 +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryrulesalertv2 + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + action: + - actionGroups: + - ${azurerm_monitor_action_group.example.id} + customProperties: + key: value + key2: value2 + autoMitigationEnabled: true + criteria: + - dimension: + - name: client_CountryOrRegion + operator: Exclude + values: + - "123" + failingPeriods: + - minimumFailingPeriodsToTriggerAlert: 1 + numberOfEvaluationPeriods: 1 + metricMeasureColumn: CountByCountry + operator: LessThan + query: | + requests + | summarize CountByCountry=count() by client_CountryOrRegion + resourceIdColumn: client_CountryOrRegion + threshold: 17.5 + timeAggregationMethod: Maximum + description: example sqr + displayName: example-sqr + enabled: true + evaluationFrequency: PT10M + location: West Europe + queryTimeRangeOverride: PT1H + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scopesRefs: + - name: example + severity: 4 + skipQueryValidation: true + tags: + key: value + key2: value2 + windowDuration: PT10M + workspaceAlertsStorageEnabled: false + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryrulesalertv2 + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryrulesalertv2 + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: test mag + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryrulesalertv2 + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/insights/v1beta2/monitorscheduledqueryruleslog.yaml b/examples-generated/insights/v1beta2/monitorscheduledqueryruleslog.yaml new file mode 100644 index 000000000..fb6545d92 --- /dev/null +++ b/examples-generated/insights/v1beta2/monitorscheduledqueryruleslog.yaml @@ -0,0 +1,114 @@ +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorScheduledQueryRulesLog +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryruleslog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + criteria: + - dimension: + - name: Computer + operator: Include + values: + - targetVM + metricName: Average_% Idle Time + dataSourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + description: Scheduled query rule LogToMetric example + enabled: true + location: West Europe + name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + foo: bar + +--- + +apiVersion: operationalinsights.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryruleslog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionInDays: 30 + sku: PerGB2018 + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorActionGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryruleslog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + shortName: exampleact + webhookReceiver: + - name: callmyapi + serviceUri: http://example.com/alert + +--- + +apiVersion: insights.azure.upbound.io/v1beta2 +kind: MonitorMetricAlert +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryruleslog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + action: + - actionGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + criteria: + - aggregation: Average + metricName: UsedCapacity + metricNamespace: Microsoft.OperationalInsights/workspaces + operator: LessThan + threshold: 10 + description: Action will be triggered when Average_% Idle Time metric is less + than 10. + frequency: PT1M + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scopesRefs: + - name: example + windowSize: PT5M + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: insights/v1beta2/monitorscheduledqueryruleslog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/iotcentral/v1beta1/applicationnetworkruleset.yaml b/examples-generated/iotcentral/v1beta1/applicationnetworkruleset.yaml index bdf0ba7b6..78a1520e8 100644 --- a/examples-generated/iotcentral/v1beta1/applicationnetworkruleset.yaml +++ b/examples-generated/iotcentral/v1beta1/applicationnetworkruleset.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: iotcentral.azure.upbound.io/v1beta1 +apiVersion: iotcentral.azure.upbound.io/v1beta2 kind: Application metadata: annotations: diff --git a/examples-generated/iotcentral/v1beta2/application.yaml b/examples-generated/iotcentral/v1beta2/application.yaml new file mode 100644 index 000000000..b6410d2f7 --- /dev/null +++ b/examples-generated/iotcentral/v1beta2/application.yaml @@ -0,0 +1,35 @@ +apiVersion: iotcentral.azure.upbound.io/v1beta2 +kind: Application +metadata: + annotations: + meta.upbound.io/example-id: iotcentral/v1beta2/application + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + displayName: example-iotcentral-app-display-name + location: West Europe + name: example-iotcentral-app + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: ST1 + subDomain: example-iotcentral-app-subdomain + tags: + Foo: Bar + template: iotc-default@1.0.0 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: iotcentral/v1beta2/application + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/keyvault/v1beta1/accesspolicy.yaml b/examples-generated/keyvault/v1beta1/accesspolicy.yaml index 4441f06bd..ad8d18e9a 100644 --- a/examples-generated/keyvault/v1beta1/accesspolicy.yaml +++ b/examples-generated/keyvault/v1beta1/accesspolicy.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/keyvault/v1beta1/certificatecontacts.yaml b/examples-generated/keyvault/v1beta1/certificatecontacts.yaml index f62c3a667..8b4544469 100644 --- a/examples-generated/keyvault/v1beta1/certificatecontacts.yaml +++ b/examples-generated/keyvault/v1beta1/certificatecontacts.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/keyvault/v1beta1/certificateissuer.yaml b/examples-generated/keyvault/v1beta1/certificateissuer.yaml index e8bf49409..1c2ac46e0 100644 --- a/examples-generated/keyvault/v1beta1/certificateissuer.yaml +++ b/examples-generated/keyvault/v1beta1/certificateissuer.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/keyvault/v1beta1/managedstorageaccount.yaml b/examples-generated/keyvault/v1beta1/managedstorageaccount.yaml index 5af39c017..b86ac7240 100644 --- a/examples-generated/keyvault/v1beta1/managedstorageaccount.yaml +++ b/examples-generated/keyvault/v1beta1/managedstorageaccount.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -53,7 +53,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/keyvault/v1beta1/managedstorageaccountsastokendefinition.yaml b/examples-generated/keyvault/v1beta1/managedstorageaccountsastokendefinition.yaml index 4525606b8..b5386b57a 100644 --- a/examples-generated/keyvault/v1beta1/managedstorageaccountsastokendefinition.yaml +++ b/examples-generated/keyvault/v1beta1/managedstorageaccountsastokendefinition.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -72,7 +72,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/keyvault/v1beta1/secret.yaml b/examples-generated/keyvault/v1beta1/secret.yaml index 771aca004..cf4d86071 100644 --- a/examples-generated/keyvault/v1beta1/secret.yaml +++ b/examples-generated/keyvault/v1beta1/secret.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/keyvault/v1beta2/certificate.yaml b/examples-generated/keyvault/v1beta2/certificate.yaml new file mode 100644 index 000000000..2c7297b1d --- /dev/null +++ b/examples-generated/keyvault/v1beta2/certificate.yaml @@ -0,0 +1,56 @@ +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Certificate +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/certificate + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + certificate: + - contentsSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: imported-cert + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/certificate + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: premium + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/certificate + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/keyvault/v1beta2/key.yaml b/examples-generated/keyvault/v1beta2/key.yaml new file mode 100644 index 000000000..f04614d01 --- /dev/null +++ b/examples-generated/keyvault/v1beta2/key.yaml @@ -0,0 +1,62 @@ +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Key +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/key + labels: + testing.upbound.io/example-name: generated + name: generated +spec: + forProvider: + keyOpts: + - decrypt + - encrypt + - sign + - unwrapKey + - verify + - wrapKey + keySize: 2048 + keyType: RSA + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: generated-certificate + rotationPolicy: + - automatic: + - timeBeforeExpiry: P30D + expireAfter: P90D + notifyBeforeExpiry: P29D + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/key + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: premium + softDeleteRetentionDays: 7 + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/key + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/keyvault/v1beta2/managedhardwaresecuritymodule.yaml b/examples-generated/keyvault/v1beta2/managedhardwaresecuritymodule.yaml new file mode 100644 index 000000000..ea8dec062 --- /dev/null +++ b/examples-generated/keyvault/v1beta2/managedhardwaresecuritymodule.yaml @@ -0,0 +1,36 @@ +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: ManagedHardwareSecurityModule +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/managedhardwaresecuritymodule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminObjectIds: + - ${data.azurerm_client_config.current.object_id} + location: West Europe + purgeProtectionEnabled: false + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Standard_B1 + softDeleteRetentionDays: 90 + tags: + Env: Test + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/managedhardwaresecuritymodule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/keyvault/v1beta2/vault.yaml b/examples-generated/keyvault/v1beta2/vault.yaml new file mode 100644 index 000000000..096d4c44b --- /dev/null +++ b/examples-generated/keyvault/v1beta2/vault.yaml @@ -0,0 +1,33 @@ +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/vault + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + enabledForDiskEncryption: true + location: West Europe + purgeProtectionEnabled: false + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: standard + softDeleteRetentionDays: 7 + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/vault + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/kusto/v1beta1/clustermanagedprivateendpoint.yaml b/examples-generated/kusto/v1beta1/clustermanagedprivateendpoint.yaml index 10a026aa3..0a1789279 100644 --- a/examples-generated/kusto/v1beta1/clustermanagedprivateendpoint.yaml +++ b/examples-generated/kusto/v1beta1/clustermanagedprivateendpoint.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: @@ -59,7 +59,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/kusto/v1beta1/clusterprincipalassignment.yaml b/examples-generated/kusto/v1beta1/clusterprincipalassignment.yaml index 7413dbf31..5e37c714b 100644 --- a/examples-generated/kusto/v1beta1/clusterprincipalassignment.yaml +++ b/examples-generated/kusto/v1beta1/clusterprincipalassignment.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/kusto/v1beta1/database.yaml b/examples-generated/kusto/v1beta1/database.yaml index 4879924c9..7faf15a51 100644 --- a/examples-generated/kusto/v1beta1/database.yaml +++ b/examples-generated/kusto/v1beta1/database.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/kusto/v1beta1/databaseprincipalassignment.yaml b/examples-generated/kusto/v1beta1/databaseprincipalassignment.yaml index e39404d77..465c079d2 100644 --- a/examples-generated/kusto/v1beta1/databaseprincipalassignment.yaml +++ b/examples-generated/kusto/v1beta1/databaseprincipalassignment.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/kusto/v1beta1/eventgriddataconnection.yaml b/examples-generated/kusto/v1beta1/eventgriddataconnection.yaml index da400f9e9..016a3308c 100644 --- a/examples-generated/kusto/v1beta1/eventgriddataconnection.yaml +++ b/examples-generated/kusto/v1beta1/eventgriddataconnection.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: eventgrid.azure.upbound.io/v1beta1 +apiVersion: eventgrid.azure.upbound.io/v1beta2 kind: EventSubscription metadata: annotations: @@ -58,7 +58,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHub metadata: annotations: @@ -101,7 +101,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: @@ -119,7 +119,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: @@ -175,7 +175,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/kusto/v1beta1/eventhubdataconnection.yaml b/examples-generated/kusto/v1beta1/eventhubdataconnection.yaml index f4547430c..ad01177d5 100644 --- a/examples-generated/kusto/v1beta1/eventhubdataconnection.yaml +++ b/examples-generated/kusto/v1beta1/eventhubdataconnection.yaml @@ -30,7 +30,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHub metadata: annotations: @@ -73,7 +73,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: @@ -91,7 +91,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/kusto/v1beta1/iothubdataconnection.yaml b/examples-generated/kusto/v1beta1/iothubdataconnection.yaml index a6dea3e75..d181c195a 100644 --- a/examples-generated/kusto/v1beta1/iothubdataconnection.yaml +++ b/examples-generated/kusto/v1beta1/iothubdataconnection.yaml @@ -38,7 +38,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: @@ -98,7 +98,7 @@ spec: --- -apiVersion: kusto.azure.upbound.io/v1beta1 +apiVersion: kusto.azure.upbound.io/v1beta2 kind: Cluster metadata: annotations: diff --git a/examples-generated/kusto/v1beta2/attacheddatabaseconfiguration.yaml b/examples-generated/kusto/v1beta2/attacheddatabaseconfiguration.yaml new file mode 100644 index 000000000..54f7f97b9 --- /dev/null +++ b/examples-generated/kusto/v1beta2/attacheddatabaseconfiguration.yaml @@ -0,0 +1,131 @@ +apiVersion: kusto.azure.upbound.io/v1beta2 +kind: AttachedDatabaseConfiguration +metadata: + annotations: + meta.upbound.io/example-id: kusto/v1beta2/attacheddatabaseconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterNameSelector: + matchLabels: + testing.upbound.io/example-name: follower_cluster + clusterResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: followed_cluster + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + name: configuration1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sharing: + - externalTablesToExclude: + - ExternalTable2 + externalTablesToInclude: + - ExternalTable1 + materializedViewsToExclude: + - MaterializedViewTable2 + materializedViewsToInclude: + - MaterializedViewTable1 + tablesToExclude: + - Table2 + tablesToInclude: + - Table1 + +--- + +apiVersion: kusto.azure.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: kusto/v1beta2/attacheddatabaseconfiguration + labels: + testing.upbound.io/example-name: followed_cluster + name: followed-cluster +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: 1 + name: Dev(No SLA)_Standard_D11_v2 + +--- + +apiVersion: kusto.azure.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: kusto/v1beta2/attacheddatabaseconfiguration + labels: + testing.upbound.io/example-name: follower_cluster + name: follower-cluster +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: 1 + name: Dev(No SLA)_Standard_D11_v2 + +--- + +apiVersion: kusto.azure.upbound.io/v1beta1 +kind: Database +metadata: + annotations: + meta.upbound.io/example-id: kusto/v1beta2/attacheddatabaseconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterNameSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: kusto.azure.upbound.io/v1beta1 +kind: Database +metadata: + annotations: + meta.upbound.io/example-id: kusto/v1beta2/attacheddatabaseconfiguration + labels: + testing.upbound.io/example-name: followed_database + name: followed-database +spec: + forProvider: + clusterNameSelector: + matchLabels: + testing.upbound.io/example-name: follower_cluster + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: kusto/v1beta2/attacheddatabaseconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/kusto/v1beta2/cluster.yaml b/examples-generated/kusto/v1beta2/cluster.yaml new file mode 100644 index 000000000..471effff2 --- /dev/null +++ b/examples-generated/kusto/v1beta2/cluster.yaml @@ -0,0 +1,33 @@ +apiVersion: kusto.azure.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: kusto/v1beta2/cluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: 2 + name: Standard_D13_v2 + tags: + Environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: kusto/v1beta2/cluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/labservices/v1beta2/labservicelab.yaml b/examples-generated/labservices/v1beta2/labservicelab.yaml new file mode 100644 index 000000000..ceb5531fd --- /dev/null +++ b/examples-generated/labservices/v1beta2/labservicelab.yaml @@ -0,0 +1,46 @@ +apiVersion: labservices.azure.upbound.io/v1beta2 +kind: LabServiceLab +metadata: + annotations: + meta.upbound.io/example-id: labservices/v1beta2/labservicelab + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + security: + - openAccessEnabled: false + title: Test Title + virtualMachine: + - adminUser: + - passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + username: testadmin + imageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + sku: + - capacity: 0 + name: Classic_Fsv2_2_4GB_128_S_SSD + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: labservices/v1beta2/labservicelab + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/labservices/v1beta2/labserviceplan.yaml b/examples-generated/labservices/v1beta2/labserviceplan.yaml new file mode 100644 index 000000000..260dccf47 --- /dev/null +++ b/examples-generated/labservices/v1beta2/labserviceplan.yaml @@ -0,0 +1,30 @@ +apiVersion: labservices.azure.upbound.io/v1beta2 +kind: LabServicePlan +metadata: + annotations: + meta.upbound.io/example-id: labservices/v1beta2/labserviceplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allowedRegions: + - ${azurerm_resource_group.example.location} + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: labservices/v1beta2/labserviceplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/loadtestservice/v1beta2/loadtest.yaml b/examples-generated/loadtestservice/v1beta2/loadtest.yaml new file mode 100644 index 000000000..49cc09bca --- /dev/null +++ b/examples-generated/loadtestservice/v1beta2/loadtest.yaml @@ -0,0 +1,46 @@ +apiVersion: loadtestservice.azure.upbound.io/v1beta2 +kind: LoadTest +metadata: + annotations: + meta.upbound.io/example-id: loadtestservice/v1beta2/loadtest + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: loadtestservice/v1beta2/loadtest + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: managedidentity.azure.upbound.io/v1beta1 +kind: UserAssignedIdentity +metadata: + annotations: + meta.upbound.io/example-id: loadtestservice/v1beta2/loadtest + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/logic/v1beta1/appactioncustom.yaml b/examples-generated/logic/v1beta1/appactioncustom.yaml index 4c3e1e645..0305dd0ca 100644 --- a/examples-generated/logic/v1beta1/appactioncustom.yaml +++ b/examples-generated/logic/v1beta1/appactioncustom.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: logic.azure.upbound.io/v1beta1 +apiVersion: logic.azure.upbound.io/v1beta2 kind: AppWorkflow metadata: annotations: diff --git a/examples-generated/logic/v1beta1/appactionhttp.yaml b/examples-generated/logic/v1beta1/appactionhttp.yaml index 73e5d2c9b..8a9d9020b 100644 --- a/examples-generated/logic/v1beta1/appactionhttp.yaml +++ b/examples-generated/logic/v1beta1/appactionhttp.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: logic.azure.upbound.io/v1beta1 +apiVersion: logic.azure.upbound.io/v1beta2 kind: AppWorkflow metadata: annotations: diff --git a/examples-generated/logic/v1beta1/apptriggercustom.yaml b/examples-generated/logic/v1beta1/apptriggercustom.yaml index ec7ea6d34..4c89b488a 100644 --- a/examples-generated/logic/v1beta1/apptriggercustom.yaml +++ b/examples-generated/logic/v1beta1/apptriggercustom.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: logic.azure.upbound.io/v1beta1 +apiVersion: logic.azure.upbound.io/v1beta2 kind: AppWorkflow metadata: annotations: diff --git a/examples-generated/logic/v1beta1/apptriggerhttprequest.yaml b/examples-generated/logic/v1beta1/apptriggerhttprequest.yaml index fe6651050..8ead230d7 100644 --- a/examples-generated/logic/v1beta1/apptriggerhttprequest.yaml +++ b/examples-generated/logic/v1beta1/apptriggerhttprequest.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: logic.azure.upbound.io/v1beta1 +apiVersion: logic.azure.upbound.io/v1beta2 kind: AppWorkflow metadata: annotations: diff --git a/examples-generated/logic/v1beta1/integrationserviceenvironment.yaml b/examples-generated/logic/v1beta1/integrationserviceenvironment.yaml index 06ed9b077..8942ebca6 100644 --- a/examples-generated/logic/v1beta1/integrationserviceenvironment.yaml +++ b/examples-generated/logic/v1beta1/integrationserviceenvironment.yaml @@ -38,7 +38,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -63,7 +63,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -84,7 +84,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -105,7 +105,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -126,7 +126,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/logic/v1beta2/appintegrationaccountbatchconfiguration.yaml b/examples-generated/logic/v1beta2/appintegrationaccountbatchconfiguration.yaml new file mode 100644 index 000000000..485ffcede --- /dev/null +++ b/examples-generated/logic/v1beta2/appintegrationaccountbatchconfiguration.yaml @@ -0,0 +1,53 @@ +apiVersion: logic.azure.upbound.io/v1beta2 +kind: AppIntegrationAccountBatchConfiguration +metadata: + annotations: + meta.upbound.io/example-id: logic/v1beta2/appintegrationaccountbatchconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + batchGroupName: TestBatchGroup + integrationAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + name: exampleiabc + releaseCriteria: + - messageCount: 80 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: logic.azure.upbound.io/v1beta1 +kind: AppIntegrationAccount +metadata: + annotations: + meta.upbound.io/example-id: logic/v1beta2/appintegrationaccountbatchconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example-ia + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: logic/v1beta2/appintegrationaccountbatchconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/logic/v1beta2/apptriggerrecurrence.yaml b/examples-generated/logic/v1beta2/apptriggerrecurrence.yaml new file mode 100644 index 000000000..a2bed64de --- /dev/null +++ b/examples-generated/logic/v1beta2/apptriggerrecurrence.yaml @@ -0,0 +1,46 @@ +apiVersion: logic.azure.upbound.io/v1beta2 +kind: AppTriggerRecurrence +metadata: + annotations: + meta.upbound.io/example-id: logic/v1beta2/apptriggerrecurrence + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + frequency: Day + interval: 1 + logicAppIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: logic.azure.upbound.io/v1beta2 +kind: AppWorkflow +metadata: + annotations: + meta.upbound.io/example-id: logic/v1beta2/apptriggerrecurrence + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: logic/v1beta2/apptriggerrecurrence + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/logic/v1beta2/appworkflow.yaml b/examples-generated/logic/v1beta2/appworkflow.yaml new file mode 100644 index 000000000..034abb85b --- /dev/null +++ b/examples-generated/logic/v1beta2/appworkflow.yaml @@ -0,0 +1,28 @@ +apiVersion: logic.azure.upbound.io/v1beta2 +kind: AppWorkflow +metadata: + annotations: + meta.upbound.io/example-id: logic/v1beta2/appworkflow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: logic/v1beta2/appworkflow + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/logz/v1beta1/subaccounttagrule.yaml b/examples-generated/logz/v1beta1/subaccounttagrule.yaml index 96e215ef2..9df3c85a1 100644 --- a/examples-generated/logz/v1beta1/subaccounttagrule.yaml +++ b/examples-generated/logz/v1beta1/subaccounttagrule.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: logz.azure.upbound.io/v1beta1 +apiVersion: logz.azure.upbound.io/v1beta2 kind: Monitor metadata: annotations: @@ -50,7 +50,7 @@ spec: --- -apiVersion: logz.azure.upbound.io/v1beta1 +apiVersion: logz.azure.upbound.io/v1beta2 kind: SubAccount metadata: annotations: diff --git a/examples-generated/logz/v1beta1/tagrule.yaml b/examples-generated/logz/v1beta1/tagrule.yaml index c66806727..d59d49530 100644 --- a/examples-generated/logz/v1beta1/tagrule.yaml +++ b/examples-generated/logz/v1beta1/tagrule.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: logz.azure.upbound.io/v1beta1 +apiVersion: logz.azure.upbound.io/v1beta2 kind: Monitor metadata: annotations: diff --git a/examples-generated/logz/v1beta2/monitor.yaml b/examples-generated/logz/v1beta2/monitor.yaml new file mode 100644 index 000000000..a6bcfed71 --- /dev/null +++ b/examples-generated/logz/v1beta2/monitor.yaml @@ -0,0 +1,37 @@ +apiVersion: logz.azure.upbound.io/v1beta2 +kind: Monitor +metadata: + annotations: + meta.upbound.io/example-id: logz/v1beta2/monitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + plan: + - billingCycle: MONTHLY + effectiveDate: "2022-06-06T00:00:00Z" + usageType: COMMITTED + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + user: + - email: user@example.com + firstName: Example + lastName: User + phoneNumber: "+12313803556" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: logz/v1beta2/monitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/logz/v1beta2/subaccount.yaml b/examples-generated/logz/v1beta2/subaccount.yaml new file mode 100644 index 000000000..93af0c6c5 --- /dev/null +++ b/examples-generated/logz/v1beta2/subaccount.yaml @@ -0,0 +1,58 @@ +apiVersion: logz.azure.upbound.io/v1beta2 +kind: SubAccount +metadata: + annotations: + meta.upbound.io/example-id: logz/v1beta2/subaccount + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + logzMonitorIdSelector: + matchLabels: + testing.upbound.io/example-name: example + user: + - email: user@example.com + firstName: ${azurerm_logz_monitor.example.user[0].first_name} + lastName: ${azurerm_logz_monitor.example.user[0].last_name} + phoneNumber: ${azurerm_logz_monitor.example.user[0].phone_number} + +--- + +apiVersion: logz.azure.upbound.io/v1beta2 +kind: Monitor +metadata: + annotations: + meta.upbound.io/example-id: logz/v1beta2/subaccount + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + plan: + - billingCycle: MONTHLY + effectiveDate: "2022-06-06T00:00:00Z" + usageType: COMMITTED + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + user: + - email: user@example.com + firstName: Example + lastName: User + phoneNumber: "+12313803556" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: logz/v1beta2/subaccount + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/machinelearningservices/v1beta2/computecluster.yaml b/examples-generated/machinelearningservices/v1beta2/computecluster.yaml new file mode 100644 index 000000000..f243a5743 --- /dev/null +++ b/examples-generated/machinelearningservices/v1beta2/computecluster.yaml @@ -0,0 +1,167 @@ +apiVersion: machinelearningservices.azure.upbound.io/v1beta2 +kind: ComputeCluster +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computecluster + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + machineLearningWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example + scaleSettings: + - maxNodeCount: 1 + minNodeCount: 0 + scaleDownNodesAfterIdleDuration: PT30S + subnetResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + vmPriority: LowPriority + vmSize: Standard_DS2_v2 + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: west europe + purgeProtectionEnabled: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: standard + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: machinelearningservices.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationInsightsIdSelector: + matchLabels: + testing.upbound.io/example-name: example + identity: + - type: SystemAssigned + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: west europe + tags: + stage: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.1.0.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computecluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.1.0.0/16 + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/machinelearningservices/v1beta2/computeinstance.yaml b/examples-generated/machinelearningservices/v1beta2/computeinstance.yaml new file mode 100644 index 000000000..6d754adef --- /dev/null +++ b/examples-generated/machinelearningservices/v1beta2/computeinstance.yaml @@ -0,0 +1,165 @@ +apiVersion: machinelearningservices.azure.upbound.io/v1beta2 +kind: ComputeInstance +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computeinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authorizationType: personal + description: foo + location: West Europe + machineLearningWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + ssh: + - publicKey: ${var.ssh_key} + subnetResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + foo: bar + virtualMachineSize: STANDARD_DS2_V2 + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computeinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computeinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: west europe + purgeProtectionEnabled: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: standard + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: machinelearningservices.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computeinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationInsightsIdSelector: + matchLabels: + testing.upbound.io/example-name: example + identity: + - type: SystemAssigned + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computeinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: west europe + tags: + stage: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computeinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computeinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.1.0.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/computeinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.1.0.0/16 + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/machinelearningservices/v1beta2/synapsespark.yaml b/examples-generated/machinelearningservices/v1beta2/synapsespark.yaml new file mode 100644 index 000000000..bbb3f1f4c --- /dev/null +++ b/examples-generated/machinelearningservices/v1beta2/synapsespark.yaml @@ -0,0 +1,182 @@ +apiVersion: machinelearningservices.azure.upbound.io/v1beta2 +kind: SynapseSpark +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + machineLearningWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + synapseSparkPoolIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: west europe + purgeProtectionEnabled: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: standard + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: machinelearningservices.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationInsightsIdSelector: + matchLabels: + testing.upbound.io/example-name: example + identity: + - type: SystemAssigned + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: west europe + tags: + stage: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: DataLakeGen2FileSystem +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: SparkPool +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + nodeCount: 3 + nodeSize: Small + nodeSizeFamily: MemoryOptimized + synapseWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/synapsespark + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: west europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sqlAdministratorLogin: sqladminuser + sqlAdministratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + storageDataLakeGen2FilesystemIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/machinelearningservices/v1beta2/workspace.yaml b/examples-generated/machinelearningservices/v1beta2/workspace.yaml new file mode 100644 index 000000000..86237b321 --- /dev/null +++ b/examples-generated/machinelearningservices/v1beta2/workspace.yaml @@ -0,0 +1,95 @@ +apiVersion: machinelearningservices.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationInsightsIdSelector: + matchLabels: + testing.upbound.io/example-name: example + identity: + - type: SystemAssigned + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: insights.azure.upbound.io/v1beta1 +kind: ApplicationInsights +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + applicationType: web + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: premium + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: machinelearningservices/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/maintenance/v1beta1/maintenanceassignmentdedicatedhost.yaml b/examples-generated/maintenance/v1beta1/maintenanceassignmentdedicatedhost.yaml index 0d946daca..2fb199401 100644 --- a/examples-generated/maintenance/v1beta1/maintenanceassignmentdedicatedhost.yaml +++ b/examples-generated/maintenance/v1beta1/maintenanceassignmentdedicatedhost.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: maintenance.azure.upbound.io/v1beta1 +apiVersion: maintenance.azure.upbound.io/v1beta2 kind: MaintenanceConfiguration metadata: annotations: diff --git a/examples-generated/maintenance/v1beta1/maintenanceassignmentvirtualmachine.yaml b/examples-generated/maintenance/v1beta1/maintenanceassignmentvirtualmachine.yaml index 9bc4664ab..225182759 100644 --- a/examples-generated/maintenance/v1beta1/maintenanceassignmentvirtualmachine.yaml +++ b/examples-generated/maintenance/v1beta1/maintenanceassignmentvirtualmachine.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: LinuxVirtualMachine metadata: annotations: @@ -50,7 +50,7 @@ spec: --- -apiVersion: maintenance.azure.upbound.io/v1beta1 +apiVersion: maintenance.azure.upbound.io/v1beta2 kind: MaintenanceConfiguration metadata: annotations: @@ -105,7 +105,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -126,7 +126,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/maintenance/v1beta2/maintenanceconfiguration.yaml b/examples-generated/maintenance/v1beta2/maintenanceconfiguration.yaml new file mode 100644 index 000000000..69c0b0c44 --- /dev/null +++ b/examples-generated/maintenance/v1beta2/maintenanceconfiguration.yaml @@ -0,0 +1,31 @@ +apiVersion: maintenance.azure.upbound.io/v1beta2 +kind: MaintenanceConfiguration +metadata: + annotations: + meta.upbound.io/example-id: maintenance/v1beta2/maintenanceconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scope: SQLDB + tags: + Env: prod + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: maintenance/v1beta2/maintenanceconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/media/v1beta1/asset.yaml b/examples-generated/media/v1beta1/asset.yaml index 892c5fe36..6a5427a11 100644 --- a/examples-generated/media/v1beta1/asset.yaml +++ b/examples-generated/media/v1beta1/asset.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: media.azure.upbound.io/v1beta1 +apiVersion: media.azure.upbound.io/v1beta2 kind: ServicesAccount metadata: annotations: @@ -54,7 +54,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/media/v1beta1/liveeventoutput.yaml b/examples-generated/media/v1beta1/liveeventoutput.yaml index 3caf86dc2..4464e0725 100644 --- a/examples-generated/media/v1beta1/liveeventoutput.yaml +++ b/examples-generated/media/v1beta1/liveeventoutput.yaml @@ -42,7 +42,7 @@ spec: --- -apiVersion: media.azure.upbound.io/v1beta1 +apiVersion: media.azure.upbound.io/v1beta2 kind: LiveEvent metadata: annotations: @@ -70,7 +70,7 @@ spec: --- -apiVersion: media.azure.upbound.io/v1beta1 +apiVersion: media.azure.upbound.io/v1beta2 kind: ServicesAccount metadata: annotations: @@ -106,7 +106,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/media/v1beta1/streaminglocator.yaml b/examples-generated/media/v1beta1/streaminglocator.yaml index bd6cd0d27..581b156ed 100644 --- a/examples-generated/media/v1beta1/streaminglocator.yaml +++ b/examples-generated/media/v1beta1/streaminglocator.yaml @@ -43,7 +43,7 @@ spec: --- -apiVersion: media.azure.upbound.io/v1beta1 +apiVersion: media.azure.upbound.io/v1beta2 kind: ServicesAccount metadata: annotations: @@ -65,7 +65,7 @@ spec: --- -apiVersion: media.azure.upbound.io/v1beta1 +apiVersion: media.azure.upbound.io/v1beta2 kind: ServicesAccountFilter metadata: annotations: @@ -98,7 +98,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/media/v1beta2/assetfilter.yaml b/examples-generated/media/v1beta2/assetfilter.yaml new file mode 100644 index 000000000..0a96f9cec --- /dev/null +++ b/examples-generated/media/v1beta2/assetfilter.yaml @@ -0,0 +1,114 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: AssetFilter +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/assetfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + firstQualityBitrate: 128000 + presentationTimeRange: + - endInUnits: 15 + forceEnd: false + liveBackoffInUnits: 0 + presentationWindowInUnits: 90 + startInUnits: 0 + unitTimescaleInMiliseconds: 1000 + trackSelection: + - condition: + - operation: Equal + property: Type + value: Audio + - operation: NotEqual + property: Language + value: en + - operation: NotEqual + property: FourCC + value: EC-3 + - condition: + - operation: Equal + property: Type + value: Video + - operation: Equal + property: Bitrate + value: 3000000-5000000 + +--- + +apiVersion: media.azure.upbound.io/v1beta1 +kind: Asset +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/assetfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: Asset description + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/assetfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/assetfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/assetfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/media/v1beta2/contentkeypolicy.yaml b/examples-generated/media/v1beta2/contentkeypolicy.yaml new file mode 100644 index 000000000..1bb072824 --- /dev/null +++ b/examples-generated/media/v1beta2/contentkeypolicy.yaml @@ -0,0 +1,153 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: ContentKeyPolicy +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/contentkeypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + policyOption: + - fairplayConfiguration: + - askSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + pfxPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + pfxSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + rentalAndLeaseKeyType: PersistentUnlimited + rentalDurationSeconds: 2249 + name: fairPlay + openRestrictionEnabled: true + - name: playReady + openRestrictionEnabled: true + playreadyConfigurationLicense: + - allowTestDevices: true + beginDate: "2017-10-16T18:22:53Z" + contentKeyLocationFromHeaderEnabled: true + contentType: UltraVioletDownload + licenseType: Persistent + playRight: + - allowPassingVideoContentToUnknownOutput: NotAllowed + analogVideoOpl: 150 + compressedDigitalAudioOpl: 250 + compressedDigitalVideoOpl: 400 + digitalVideoOnlyContentRestriction: false + explicitAnalogTelevisionOutputRestriction: + - bestEffortEnforced: true + controlBits: 3 + imageConstraintForAnalogComponentVideoRestriction: false + imageConstraintForAnalogComputerMonitorRestriction: false + scmsRestriction: 2 + uncompressedDigitalAudioOpl: 100 + uncompressedDigitalVideoOpl: 100 + securityLevel: SL150 + - clearKeyConfigurationEnabled: true + name: clearKey + tokenRestriction: + - alternateKey: + - rsaTokenKeyExponentSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + rsaTokenKeyModulusSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + - symmetricTokenKeySecretRef: + key: example-key + name: example-secret + namespace: upbound-system + audience: urn:audience + issuer: urn:issuer + primarySymmetricTokenKeySecretRef: + key: example-key + name: example-secret + namespace: upbound-system + tokenType: Swt + - name: widevine + openRestrictionEnabled: true + widevineConfigurationTemplate: |- + ${jsonencode({ + "allowed_track_types" : "SD_HD", + "content_key_specs" : [{ + "track_type" : "SD", + "security_level" : 1, + "required_output_protection" : { + "hdcp" : "HDCP_V2" + }, + }], + "policy_overrides" : { + "can_play" : true, + "can_persist" : true, + "can_renew" : false, + }, + })} + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/contentkeypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/contentkeypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/contentkeypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/media/v1beta2/job.yaml b/examples-generated/media/v1beta2/job.yaml new file mode 100644 index 000000000..c67572651 --- /dev/null +++ b/examples-generated/media/v1beta2/job.yaml @@ -0,0 +1,149 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: Job +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/job + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: My Job description + inputAsset: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: input + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + outputAsset: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: output + priority: Normal + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + transformNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: media.azure.upbound.io/v1beta1 +kind: Asset +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/job + labels: + testing.upbound.io/example-name: input + name: input +spec: + forProvider: + description: Input Asset description + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: media.azure.upbound.io/v1beta1 +kind: Asset +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/job + labels: + testing.upbound.io/example-name: output + name: output +spec: + forProvider: + description: Output Asset description + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/job + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: Transform +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/job + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: My transform description + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + output: + - builtinPreset: + - presetName: AACGoodQualityAudio + onErrorAction: ContinueJob + relativePriority: Normal + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/job + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/job + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/media/v1beta2/liveevent.yaml b/examples-generated/media/v1beta2/liveevent.yaml new file mode 100644 index 000000000..2b2b46fac --- /dev/null +++ b/examples-generated/media/v1beta2/liveevent.yaml @@ -0,0 +1,95 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: LiveEvent +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/liveevent + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: My Event Description + encoding: + - keyFrameInterval: PT2S + presetName: Default720p + stretchMode: AutoFit + type: Standard + hostnamePrefix: special-event + input: + - ipAccessControlAllow: + - address: 0.0.0.0 + name: AllowAll + subnetPrefixLength: 0 + streamingProtocol: RTMP + location: West Europe + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + preview: + - ipAccessControlAllow: + - address: 0.0.0.0 + name: AllowAll + subnetPrefixLength: 0 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + streamOptions: + - LowLatency + transcriptionLanguages: + - en-US + useStaticHostname: true + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/liveevent + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/liveevent + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/liveevent + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/media/v1beta2/servicesaccount.yaml b/examples-generated/media/v1beta2/servicesaccount.yaml new file mode 100644 index 000000000..8e99cf141 --- /dev/null +++ b/examples-generated/media/v1beta2/servicesaccount.yaml @@ -0,0 +1,52 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/servicesaccount + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/servicesaccount + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/servicesaccount + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/media/v1beta2/servicesaccountfilter.yaml b/examples-generated/media/v1beta2/servicesaccountfilter.yaml new file mode 100644 index 000000000..001f34980 --- /dev/null +++ b/examples-generated/media/v1beta2/servicesaccountfilter.yaml @@ -0,0 +1,97 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccountFilter +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/servicesaccountfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + firstQualityBitrate: 128000 + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: test + presentationTimeRange: + - endInUnits: 15 + forceEnd: false + liveBackoffInUnits: 0 + presentationWindowInUnits: 90 + startInUnits: 0 + unitTimescaleInMilliseconds: 1000 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: test + trackSelection: + - condition: + - operation: Equal + property: Type + value: Audio + - operation: NotEqual + property: Language + value: en + - operation: NotEqual + property: FourCC + value: EC-3 + - condition: + - operation: Equal + property: Type + value: Video + - operation: Equal + property: Bitrate + value: 3000000-5000000 + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/servicesaccountfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/servicesaccountfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/servicesaccountfilter + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/media/v1beta2/streamingendpoint.yaml b/examples-generated/media/v1beta2/streamingendpoint.yaml new file mode 100644 index 000000000..dc9f24446 --- /dev/null +++ b/examples-generated/media/v1beta2/streamingendpoint.yaml @@ -0,0 +1,73 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: StreamingEndpoint +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scaleUnits: 2 + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/media/v1beta2/streamingpolicy.yaml b/examples-generated/media/v1beta2/streamingpolicy.yaml new file mode 100644 index 000000000..e4b2e8a67 --- /dev/null +++ b/examples-generated/media/v1beta2/streamingpolicy.yaml @@ -0,0 +1,137 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: StreamingPolicy +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + commonEncryptionCbcs: + - drmFairplay: + - allowPersistentLicense: true + customLicenseAcquisitionUrlTemplate: https://contoso.com/{AssetAlternativeId}/fairplay/{ContentKeyId} + enabledProtocols: + - dash: true + download: false + hls: false + smoothStreaming: false + commonEncryptionCenc: + - clearTrack: + - condition: + - operation: Equal + property: FourCC + value: hev2 + defaultContentKey: + - label: aesDefaultKey + policyNameSelector: + matchLabels: + testing.upbound.io/example-name: example + drmPlayready: + - customAttributes: PlayReady CustomAttributes + customLicenseAcquisitionUrlTemplate: https://contoso.com/{AssetAlternativeId}/playready/{ContentKeyId} + drmWidevineCustomLicenseAcquisitionUrlTemplate: https://contoso.com/{AssetAlternativeId}/widevine/{ContentKeyId} + enabledProtocols: + - dash: true + download: false + hls: false + smoothStreaming: false + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ContentKeyPolicy +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + policyOption: + - fairplayConfiguration: + - askSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + pfxPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + pfxSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + rentalAndLeaseKeyType: PersistentUnlimited + rentalDurationSeconds: 2249 + name: fairPlay + openRestrictionEnabled: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/streamingpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/media/v1beta2/transform.yaml b/examples-generated/media/v1beta2/transform.yaml new file mode 100644 index 000000000..43821a2b9 --- /dev/null +++ b/examples-generated/media/v1beta2/transform.yaml @@ -0,0 +1,77 @@ +apiVersion: media.azure.upbound.io/v1beta2 +kind: Transform +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/transform + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: My transform description + mediaServicesAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + output: + - builtinPreset: + - presetName: AACGoodQualityAudio + onErrorAction: ContinueJob + relativePriority: Normal + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: media.azure.upbound.io/v1beta2 +kind: ServicesAccount +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/transform + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccount: + - idSelector: + matchLabels: + testing.upbound.io/example-name: example + isPrimary: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/transform + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: media/v1beta2/transform + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/netapp/v1beta1/pool.yaml b/examples-generated/netapp/v1beta1/pool.yaml index ac2ef9fe0..663d74e58 100644 --- a/examples-generated/netapp/v1beta1/pool.yaml +++ b/examples-generated/netapp/v1beta1/pool.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: netapp.azure.upbound.io/v1beta1 +apiVersion: netapp.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/netapp/v1beta1/snapshot.yaml b/examples-generated/netapp/v1beta1/snapshot.yaml index e814927ba..9952a820c 100644 --- a/examples-generated/netapp/v1beta1/snapshot.yaml +++ b/examples-generated/netapp/v1beta1/snapshot.yaml @@ -24,7 +24,7 @@ spec: --- -apiVersion: netapp.azure.upbound.io/v1beta1 +apiVersion: netapp.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -63,7 +63,7 @@ spec: --- -apiVersion: netapp.azure.upbound.io/v1beta1 +apiVersion: netapp.azure.upbound.io/v1beta2 kind: Volume metadata: annotations: @@ -106,7 +106,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -134,7 +134,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/netapp/v1beta2/account.yaml b/examples-generated/netapp/v1beta2/account.yaml new file mode 100644 index 000000000..707ba8713 --- /dev/null +++ b/examples-generated/netapp/v1beta2/account.yaml @@ -0,0 +1,61 @@ +apiVersion: netapp.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + activeDirectory: + - dnsServers: + - 1.2.3.4 + domain: westcentralus.com + organizationalUnit: OU=FirstLevel + passwordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + smbServerName: SMBSERVER + username: aduser + identity: + - identityIds: + - ${azurerm_user_assigned_identity.example.id} + type: UserAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: managedidentity.azure.upbound.io/v1beta1 +kind: UserAssignedIdentity +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: anf-user-assigned-identity + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/netapp/v1beta2/snapshotpolicy.yaml b/examples-generated/netapp/v1beta2/snapshotpolicy.yaml new file mode 100644 index 000000000..cc2ed58ad --- /dev/null +++ b/examples-generated/netapp/v1beta2/snapshotpolicy.yaml @@ -0,0 +1,72 @@ +apiVersion: netapp.azure.upbound.io/v1beta2 +kind: SnapshotPolicy +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/snapshotpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + dailySchedule: + - hour: 20 + minute: 15 + snapshotsToKeep: 2 + enabled: true + hourlySchedule: + - minute: 15 + snapshotsToKeep: 4 + location: West Europe + monthlySchedule: + - daysOfMonth: + - 1 + - 15 + - 20 + - 30 + hour: 5 + minute: 45 + snapshotsToKeep: 1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + weeklySchedule: + - daysOfWeek: + - Monday + - Friday + hour: 23 + minute: 0 + snapshotsToKeep: 1 + +--- + +apiVersion: netapp.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/snapshotpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: East US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/snapshotpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: East US diff --git a/examples-generated/netapp/v1beta2/volume.yaml b/examples-generated/netapp/v1beta2/volume.yaml new file mode 100644 index 000000000..b16a178bb --- /dev/null +++ b/examples-generated/netapp/v1beta2/volume.yaml @@ -0,0 +1,146 @@ +apiVersion: netapp.azure.upbound.io/v1beta2 +kind: Volume +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/volume + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + createFromSnapshotResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + dataProtectionReplication: + - endpointType: dst + remoteVolumeLocation: West Europe + remoteVolumeResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + replicationFrequency: 10minutes + dataProtectionSnapshotPolicy: + - snapshotPolicyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + networkFeatures: Basic + poolNameSelector: + matchLabels: + testing.upbound.io/example-name: example + protocols: + - NFSv4.1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + securityStyle: unix + serviceLevel: Premium + snapshotDirectoryVisible: false + storageQuotaInGb: 100 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + volumePath: my-unique-file-path + zone: "1" + +--- + +apiVersion: netapp.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/volume + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: netapp.azure.upbound.io/v1beta1 +kind: Pool +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/volume + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceLevel: Premium + sizeInTb: 4 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/volume + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/volume + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + delegation: + - name: netapp + serviceDelegation: + - actions: + - Microsoft.Network/networkinterfaces/* + - Microsoft.Network/virtualNetworks/subnets/join/action + name: Microsoft.Netapp/volumes + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: netapp/v1beta2/volume + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta1/dnsaaaarecord.yaml b/examples-generated/network/v1beta1/dnsaaaarecord.yaml index a28cf1039..3d7fb4ac2 100644 --- a/examples-generated/network/v1beta1/dnsaaaarecord.yaml +++ b/examples-generated/network/v1beta1/dnsaaaarecord.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/dnsarecord.yaml b/examples-generated/network/v1beta1/dnsarecord.yaml index ea5fc2127..cc50a3d1e 100644 --- a/examples-generated/network/v1beta1/dnsarecord.yaml +++ b/examples-generated/network/v1beta1/dnsarecord.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/dnscaarecord.yaml b/examples-generated/network/v1beta1/dnscaarecord.yaml index 2bb9b96ea..16e91f6e0 100644 --- a/examples-generated/network/v1beta1/dnscaarecord.yaml +++ b/examples-generated/network/v1beta1/dnscaarecord.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/dnscnamerecord.yaml b/examples-generated/network/v1beta1/dnscnamerecord.yaml index 801d59887..08af9276c 100644 --- a/examples-generated/network/v1beta1/dnscnamerecord.yaml +++ b/examples-generated/network/v1beta1/dnscnamerecord.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/dnsmxrecord.yaml b/examples-generated/network/v1beta1/dnsmxrecord.yaml index 9ef9d70e6..60b1ee651 100644 --- a/examples-generated/network/v1beta1/dnsmxrecord.yaml +++ b/examples-generated/network/v1beta1/dnsmxrecord.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/dnsnsrecord.yaml b/examples-generated/network/v1beta1/dnsnsrecord.yaml index d479fc835..11df88b1b 100644 --- a/examples-generated/network/v1beta1/dnsnsrecord.yaml +++ b/examples-generated/network/v1beta1/dnsnsrecord.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/dnsptrrecord.yaml b/examples-generated/network/v1beta1/dnsptrrecord.yaml index 6a020aaed..c713b83a0 100644 --- a/examples-generated/network/v1beta1/dnsptrrecord.yaml +++ b/examples-generated/network/v1beta1/dnsptrrecord.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/dnssrvrecord.yaml b/examples-generated/network/v1beta1/dnssrvrecord.yaml index 933c9190b..0fd97e848 100644 --- a/examples-generated/network/v1beta1/dnssrvrecord.yaml +++ b/examples-generated/network/v1beta1/dnssrvrecord.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/dnstxtrecord.yaml b/examples-generated/network/v1beta1/dnstxtrecord.yaml index 589d4db6a..de0950fa0 100644 --- a/examples-generated/network/v1beta1/dnstxtrecord.yaml +++ b/examples-generated/network/v1beta1/dnstxtrecord.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: DNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/expressroutecircuitauthorization.yaml b/examples-generated/network/v1beta1/expressroutecircuitauthorization.yaml index 6c7abdc31..1b9fa1cae 100644 --- a/examples-generated/network/v1beta1/expressroutecircuitauthorization.yaml +++ b/examples-generated/network/v1beta1/expressroutecircuitauthorization.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: ExpressRouteCircuit metadata: annotations: diff --git a/examples-generated/network/v1beta1/expressroutecircuitconnection.yaml b/examples-generated/network/v1beta1/expressroutecircuitconnection.yaml index 921e18282..ca860c0ed 100644 --- a/examples-generated/network/v1beta1/expressroutecircuitconnection.yaml +++ b/examples-generated/network/v1beta1/expressroutecircuitconnection.yaml @@ -22,7 +22,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: ExpressRouteCircuit metadata: annotations: @@ -44,7 +44,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: ExpressRouteCircuit metadata: annotations: @@ -66,7 +66,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: ExpressRouteCircuitPeering metadata: annotations: @@ -93,7 +93,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: ExpressRouteCircuitPeering metadata: annotations: @@ -120,7 +120,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: ExpressRoutePort metadata: annotations: @@ -140,7 +140,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: ExpressRoutePort metadata: annotations: diff --git a/examples-generated/network/v1beta1/firewallapplicationrulecollection.yaml b/examples-generated/network/v1beta1/firewallapplicationrulecollection.yaml index 4dfb298cd..cfd943c19 100644 --- a/examples-generated/network/v1beta1/firewallapplicationrulecollection.yaml +++ b/examples-generated/network/v1beta1/firewallapplicationrulecollection.yaml @@ -28,7 +28,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Firewall metadata: annotations: @@ -88,7 +88,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -109,7 +109,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/firewallnatrulecollection.yaml b/examples-generated/network/v1beta1/firewallnatrulecollection.yaml index 3966f07d8..ae5b0226d 100644 --- a/examples-generated/network/v1beta1/firewallnatrulecollection.yaml +++ b/examples-generated/network/v1beta1/firewallnatrulecollection.yaml @@ -32,7 +32,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Firewall metadata: annotations: @@ -92,7 +92,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -113,7 +113,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/firewallnetworkrulecollection.yaml b/examples-generated/network/v1beta1/firewallnetworkrulecollection.yaml index f572e4835..cf1dd3e4e 100644 --- a/examples-generated/network/v1beta1/firewallnetworkrulecollection.yaml +++ b/examples-generated/network/v1beta1/firewallnetworkrulecollection.yaml @@ -31,7 +31,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Firewall metadata: annotations: @@ -91,7 +91,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -112,7 +112,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/firewallpolicyrulecollectiongroup.yaml b/examples-generated/network/v1beta1/firewallpolicyrulecollectiongroup.yaml index 9bbf4ec6a..8479559ab 100644 --- a/examples-generated/network/v1beta1/firewallpolicyrulecollectiongroup.yaml +++ b/examples-generated/network/v1beta1/firewallpolicyrulecollectiongroup.yaml @@ -64,7 +64,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: FirewallPolicy metadata: annotations: diff --git a/examples-generated/network/v1beta1/managermanagementgroupconnection.yaml b/examples-generated/network/v1beta1/managermanagementgroupconnection.yaml index 3becd586f..5b2f60caa 100644 --- a/examples-generated/network/v1beta1/managermanagementgroupconnection.yaml +++ b/examples-generated/network/v1beta1/managermanagementgroupconnection.yaml @@ -50,7 +50,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Manager metadata: annotations: diff --git a/examples-generated/network/v1beta1/managernetworkgroup.yaml b/examples-generated/network/v1beta1/managernetworkgroup.yaml index 352c0d041..0aa2084f9 100644 --- a/examples-generated/network/v1beta1/managernetworkgroup.yaml +++ b/examples-generated/network/v1beta1/managernetworkgroup.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Manager metadata: annotations: diff --git a/examples-generated/network/v1beta1/managerstaticmember.yaml b/examples-generated/network/v1beta1/managerstaticmember.yaml index 43a5442ab..75cff5069 100644 --- a/examples-generated/network/v1beta1/managerstaticmember.yaml +++ b/examples-generated/network/v1beta1/managerstaticmember.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Manager metadata: annotations: @@ -72,7 +72,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/managersubscriptionconnection.yaml b/examples-generated/network/v1beta1/managersubscriptionconnection.yaml index 78decb3b4..2d6d68517 100644 --- a/examples-generated/network/v1beta1/managersubscriptionconnection.yaml +++ b/examples-generated/network/v1beta1/managersubscriptionconnection.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Manager metadata: annotations: diff --git a/examples-generated/network/v1beta1/networkinterface.yaml b/examples-generated/network/v1beta1/networkinterface.yaml index 699f61c76..fbd77ec99 100644 --- a/examples-generated/network/v1beta1/networkinterface.yaml +++ b/examples-generated/network/v1beta1/networkinterface.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -56,7 +56,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/networkinterfaceapplicationsecuritygroupassociation.yaml b/examples-generated/network/v1beta1/networkinterfaceapplicationsecuritygroupassociation.yaml index e554931b7..df2b2c59d 100644 --- a/examples-generated/network/v1beta1/networkinterfaceapplicationsecuritygroupassociation.yaml +++ b/examples-generated/network/v1beta1/networkinterfaceapplicationsecuritygroupassociation.yaml @@ -71,7 +71,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -92,7 +92,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/networkinterfacebackendaddresspoolassociation.yaml b/examples-generated/network/v1beta1/networkinterfacebackendaddresspoolassociation.yaml index 91e698849..22bc09fb4 100644 --- a/examples-generated/network/v1beta1/networkinterfacebackendaddresspoolassociation.yaml +++ b/examples-generated/network/v1beta1/networkinterfacebackendaddresspoolassociation.yaml @@ -111,7 +111,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -132,7 +132,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/networkinterfacenatruleassociation.yaml b/examples-generated/network/v1beta1/networkinterfacenatruleassociation.yaml index f7fb3b7f8..d74dbfc95 100644 --- a/examples-generated/network/v1beta1/networkinterfacenatruleassociation.yaml +++ b/examples-generated/network/v1beta1/networkinterfacenatruleassociation.yaml @@ -118,7 +118,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -139,7 +139,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/networkinterfacesecuritygroupassociation.yaml b/examples-generated/network/v1beta1/networkinterfacesecuritygroupassociation.yaml index ab3821e01..85aacd748 100644 --- a/examples-generated/network/v1beta1/networkinterfacesecuritygroupassociation.yaml +++ b/examples-generated/network/v1beta1/networkinterfacesecuritygroupassociation.yaml @@ -71,7 +71,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -92,7 +92,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednsaaaarecord.yaml b/examples-generated/network/v1beta1/privatednsaaaarecord.yaml index d22adaa61..c9960e9e6 100644 --- a/examples-generated/network/v1beta1/privatednsaaaarecord.yaml +++ b/examples-generated/network/v1beta1/privatednsaaaarecord.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateDNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednsarecord.yaml b/examples-generated/network/v1beta1/privatednsarecord.yaml index 068e6b9a0..3cc4be83c 100644 --- a/examples-generated/network/v1beta1/privatednsarecord.yaml +++ b/examples-generated/network/v1beta1/privatednsarecord.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateDNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednscnamerecord.yaml b/examples-generated/network/v1beta1/privatednscnamerecord.yaml index e2b9b972e..3ee744069 100644 --- a/examples-generated/network/v1beta1/privatednscnamerecord.yaml +++ b/examples-generated/network/v1beta1/privatednscnamerecord.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateDNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednsmxrecord.yaml b/examples-generated/network/v1beta1/privatednsmxrecord.yaml index ece4d48be..cc8eb92ce 100644 --- a/examples-generated/network/v1beta1/privatednsmxrecord.yaml +++ b/examples-generated/network/v1beta1/privatednsmxrecord.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateDNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednsptrrecord.yaml b/examples-generated/network/v1beta1/privatednsptrrecord.yaml index 3cb234d6d..e627a2193 100644 --- a/examples-generated/network/v1beta1/privatednsptrrecord.yaml +++ b/examples-generated/network/v1beta1/privatednsptrrecord.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateDNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednsresolver.yaml b/examples-generated/network/v1beta1/privatednsresolver.yaml index c816be7da..154c4ab0d 100644 --- a/examples-generated/network/v1beta1/privatednsresolver.yaml +++ b/examples-generated/network/v1beta1/privatednsresolver.yaml @@ -32,7 +32,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednssrvrecord.yaml b/examples-generated/network/v1beta1/privatednssrvrecord.yaml index a1a76408c..4abfdc4e0 100644 --- a/examples-generated/network/v1beta1/privatednssrvrecord.yaml +++ b/examples-generated/network/v1beta1/privatednssrvrecord.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateDNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednstxtrecord.yaml b/examples-generated/network/v1beta1/privatednstxtrecord.yaml index 92da671ad..3dc9216ee 100644 --- a/examples-generated/network/v1beta1/privatednstxtrecord.yaml +++ b/examples-generated/network/v1beta1/privatednstxtrecord.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateDNSZone metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatednszonevirtualnetworklink.yaml b/examples-generated/network/v1beta1/privatednszonevirtualnetworklink.yaml index dc0e29acf..19871c9f6 100644 --- a/examples-generated/network/v1beta1/privatednszonevirtualnetworklink.yaml +++ b/examples-generated/network/v1beta1/privatednszonevirtualnetworklink.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateDNSZone metadata: annotations: @@ -50,7 +50,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/privateendpointapplicationsecuritygroupassociation.yaml b/examples-generated/network/v1beta1/privateendpointapplicationsecuritygroupassociation.yaml index af9fef20b..3e3ecf2bd 100644 --- a/examples-generated/network/v1beta1/privateendpointapplicationsecuritygroupassociation.yaml +++ b/examples-generated/network/v1beta1/privateendpointapplicationsecuritygroupassociation.yaml @@ -57,7 +57,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: PrivateEndpoint metadata: annotations: @@ -143,7 +143,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -165,7 +165,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -187,7 +187,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/privatelinkservice.yaml b/examples-generated/network/v1beta1/privatelinkservice.yaml index fde0210e2..a6cd08c25 100644 --- a/examples-generated/network/v1beta1/privatelinkservice.yaml +++ b/examples-generated/network/v1beta1/privatelinkservice.yaml @@ -92,7 +92,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -114,7 +114,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/routeserver.yaml b/examples-generated/network/v1beta1/routeserver.yaml index 738a5bd7c..af8295f03 100644 --- a/examples-generated/network/v1beta1/routeserver.yaml +++ b/examples-generated/network/v1beta1/routeserver.yaml @@ -56,7 +56,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -77,7 +77,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/routeserverbgpconnection.yaml b/examples-generated/network/v1beta1/routeserverbgpconnection.yaml index 70b986ab0..89b456ebc 100644 --- a/examples-generated/network/v1beta1/routeserverbgpconnection.yaml +++ b/examples-generated/network/v1beta1/routeserverbgpconnection.yaml @@ -74,7 +74,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -95,7 +95,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/subnetnatgatewayassociation.yaml b/examples-generated/network/v1beta1/subnetnatgatewayassociation.yaml index 4fa5bcd79..52a0686ce 100644 --- a/examples-generated/network/v1beta1/subnetnatgatewayassociation.yaml +++ b/examples-generated/network/v1beta1/subnetnatgatewayassociation.yaml @@ -48,7 +48,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -69,7 +69,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/subnetnetworksecuritygroupassociation.yaml b/examples-generated/network/v1beta1/subnetnetworksecuritygroupassociation.yaml index 908ad8581..10932dff4 100644 --- a/examples-generated/network/v1beta1/subnetnetworksecuritygroupassociation.yaml +++ b/examples-generated/network/v1beta1/subnetnetworksecuritygroupassociation.yaml @@ -58,7 +58,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -79,7 +79,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/subnetroutetableassociation.yaml b/examples-generated/network/v1beta1/subnetroutetableassociation.yaml index 28e91fcc9..c54ea62b4 100644 --- a/examples-generated/network/v1beta1/subnetroutetableassociation.yaml +++ b/examples-generated/network/v1beta1/subnetroutetableassociation.yaml @@ -53,7 +53,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -74,7 +74,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/subnetserviceendpointstoragepolicy.yaml b/examples-generated/network/v1beta1/subnetserviceendpointstoragepolicy.yaml index c68f7c4ac..048254b16 100644 --- a/examples-generated/network/v1beta1/subnetserviceendpointstoragepolicy.yaml +++ b/examples-generated/network/v1beta1/subnetserviceendpointstoragepolicy.yaml @@ -46,7 +46,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/network/v1beta1/trafficmanagerazureendpoint.yaml b/examples-generated/network/v1beta1/trafficmanagerazureendpoint.yaml index fa32a82ca..e136d2da8 100644 --- a/examples-generated/network/v1beta1/trafficmanagerazureendpoint.yaml +++ b/examples-generated/network/v1beta1/trafficmanagerazureendpoint.yaml @@ -52,7 +52,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: TrafficManagerProfile metadata: annotations: diff --git a/examples-generated/network/v1beta1/trafficmanagerexternalendpoint.yaml b/examples-generated/network/v1beta1/trafficmanagerexternalendpoint.yaml index 025ce54f1..32a13e42a 100644 --- a/examples-generated/network/v1beta1/trafficmanagerexternalendpoint.yaml +++ b/examples-generated/network/v1beta1/trafficmanagerexternalendpoint.yaml @@ -31,7 +31,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: TrafficManagerProfile metadata: annotations: diff --git a/examples-generated/network/v1beta1/trafficmanagernestedendpoint.yaml b/examples-generated/network/v1beta1/trafficmanagernestedendpoint.yaml index c06c89f2b..9e2614b98 100644 --- a/examples-generated/network/v1beta1/trafficmanagernestedendpoint.yaml +++ b/examples-generated/network/v1beta1/trafficmanagernestedendpoint.yaml @@ -53,7 +53,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: TrafficManagerProfile metadata: annotations: @@ -77,7 +77,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: TrafficManagerProfile metadata: annotations: diff --git a/examples-generated/network/v1beta1/virtualhubip.yaml b/examples-generated/network/v1beta1/virtualhubip.yaml index 2c87e6c10..7b1f49dbc 100644 --- a/examples-generated/network/v1beta1/virtualhubip.yaml +++ b/examples-generated/network/v1beta1/virtualhubip.yaml @@ -55,7 +55,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -94,7 +94,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/virtualhubroutetable.yaml b/examples-generated/network/v1beta1/virtualhubroutetable.yaml index 0c804da52..0d7c09623 100644 --- a/examples-generated/network/v1beta1/virtualhubroutetable.yaml +++ b/examples-generated/network/v1beta1/virtualhubroutetable.yaml @@ -56,7 +56,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -117,7 +117,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualHubConnection metadata: annotations: @@ -136,7 +136,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/virtualhubroutetableroute.yaml b/examples-generated/network/v1beta1/virtualhubroutetableroute.yaml index 80f439dc6..24e89e5b9 100644 --- a/examples-generated/network/v1beta1/virtualhubroutetableroute.yaml +++ b/examples-generated/network/v1beta1/virtualhubroutetableroute.yaml @@ -52,7 +52,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -113,7 +113,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualHubConnection metadata: annotations: @@ -154,7 +154,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/virtualhubsecuritypartnerprovider.yaml b/examples-generated/network/v1beta1/virtualhubsecuritypartnerprovider.yaml index ad044952d..2604d652f 100644 --- a/examples-generated/network/v1beta1/virtualhubsecuritypartnerprovider.yaml +++ b/examples-generated/network/v1beta1/virtualhubsecuritypartnerprovider.yaml @@ -73,7 +73,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VPNGateway metadata: annotations: diff --git a/examples-generated/network/v1beta1/virtualnetworkpeering.yaml b/examples-generated/network/v1beta1/virtualnetworkpeering.yaml index b5e81ead8..9c2059d48 100644 --- a/examples-generated/network/v1beta1/virtualnetworkpeering.yaml +++ b/examples-generated/network/v1beta1/virtualnetworkpeering.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: @@ -53,7 +53,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/network/v1beta1/vpnserverconfigurationpolicygroup.yaml b/examples-generated/network/v1beta1/vpnserverconfigurationpolicygroup.yaml index 3590d14d2..be6fa9203 100644 --- a/examples-generated/network/v1beta1/vpnserverconfigurationpolicygroup.yaml +++ b/examples-generated/network/v1beta1/vpnserverconfigurationpolicygroup.yaml @@ -32,7 +32,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VPNServerConfiguration metadata: annotations: diff --git a/examples-generated/network/v1beta2/applicationgateway.yaml b/examples-generated/network/v1beta2/applicationgateway.yaml new file mode 100644 index 000000000..b98996cc9 --- /dev/null +++ b/examples-generated/network/v1beta2/applicationgateway.yaml @@ -0,0 +1,124 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: ApplicationGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/applicationgateway + labels: + testing.upbound.io/example-name: network + name: network +spec: + forProvider: + backendAddressPool: + - name: ${local.backend_address_pool_name} + backendHttpSettings: + - cookieBasedAffinity: Disabled + name: ${local.http_setting_name} + path: /path1/ + port: 80 + protocol: Http + requestTimeout: 60 + frontendIpConfiguration: + - name: ${local.frontend_ip_configuration_name} + publicIpAddressIdSelector: + matchLabels: + testing.upbound.io/example-name: example + frontendPort: + - name: ${local.frontend_port_name} + port: 80 + gatewayIpConfiguration: + - name: my-gateway-ip-configuration + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + httpListener: + - frontendIpConfigurationName: ${local.frontend_ip_configuration_name} + frontendPortName: ${local.frontend_port_name} + name: ${local.listener_name} + protocol: Http + location: West Europe + requestRoutingRule: + - backendAddressPoolName: ${local.backend_address_pool_name} + backendHttpSettingsName: ${local.http_setting_name} + httpListenerName: ${local.listener_name} + name: ${local.request_routing_rule_name} + priority: 9 + ruleType: Basic + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: 2 + name: Standard_v2 + tier: Standard_v2 + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: PublicIP +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/applicationgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allocationMethod: Dynamic + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/applicationgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/applicationgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.254.0.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/applicationgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.254.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/connectionmonitor.yaml b/examples-generated/network/v1beta2/connectionmonitor.yaml new file mode 100644 index 000000000..5acbc8e10 --- /dev/null +++ b/examples-generated/network/v1beta2/connectionmonitor.yaml @@ -0,0 +1,173 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: ConnectionMonitor +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/connectionmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + endpoint: + - filter: + - item: + - address: ${azurerm_virtual_machine.example.id} + type: AgentAddress + type: Include + name: source + targetResourceId: ${azurerm_virtual_machine.example.id} + - address: terraform.io + name: destination + location: West Europe + networkWatcherIdSelector: + matchLabels: + testing.upbound.io/example-name: example + notes: examplenote + outputWorkspaceResourceIds: + - ${azurerm_log_analytics_workspace.example.id} + testConfiguration: + - name: tcpName + protocol: Tcp + tcpConfiguration: + - port: 80 + testFrequencyInSeconds: 60 + testGroup: + - destinationEndpoints: + - destination + name: exampletg + sourceEndpoints: + - source + testConfigurationNames: + - tcpName + +--- + +apiVersion: operationalinsights.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/connectionmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: PerGB2018 + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: NetworkInterface +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/connectionmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: testconfiguration1 + privateIpAddressAllocation: Dynamic + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: Watcher +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/connectionmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/connectionmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/connectionmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: VirtualMachineExtension +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/connectionmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + autoUpgradeMinorVersion: true + publisher: Microsoft.Azure.NetworkWatcher + type: NetworkWatcherAgentLinux + typeHandlerVersion: "1.4" + virtualMachineIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/connectionmonitor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/dnszone.yaml b/examples-generated/network/v1beta2/dnszone.yaml new file mode 100644 index 000000000..125ab0dd2 --- /dev/null +++ b/examples-generated/network/v1beta2/dnszone.yaml @@ -0,0 +1,27 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: DNSZone +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/dnszone + labels: + testing.upbound.io/example-name: example-public + name: example-public +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/dnszone + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/expressroutecircuit.yaml b/examples-generated/network/v1beta2/expressroutecircuit.yaml new file mode 100644 index 000000000..2e06f5113 --- /dev/null +++ b/examples-generated/network/v1beta2/expressroutecircuit.yaml @@ -0,0 +1,36 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: ExpressRouteCircuit +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressroutecircuit + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bandwidthInMbps: 50 + location: West Europe + peeringLocation: Silicon Valley + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceProviderName: Equinix + sku: + - family: MeteredData + tier: Standard + tags: + environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressroutecircuit + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/expressroutecircuitpeering.yaml b/examples-generated/network/v1beta2/expressroutecircuitpeering.yaml new file mode 100644 index 000000000..0f8ebdcd7 --- /dev/null +++ b/examples-generated/network/v1beta2/expressroutecircuitpeering.yaml @@ -0,0 +1,71 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: ExpressRouteCircuitPeering +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressroutecircuitpeering + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + expressRouteCircuitNameSelector: + matchLabels: + testing.upbound.io/example-name: example + ipv4Enabled: true + ipv6: + - enabled: true + microsoftPeering: + - advertisedPublicPrefixes: + - 2002:db01::/126 + primaryPeerAddressPrefix: 2002:db01::/126 + secondaryPeerAddressPrefix: 2003:db01::/126 + microsoftPeeringConfig: + - advertisedPublicPrefixes: + - 123.1.0.0/24 + peerAsn: 100 + primaryPeerAddressPrefix: 123.0.0.0/30 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + secondaryPeerAddressPrefix: 123.0.0.4/30 + vlanId: 300 + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: ExpressRouteCircuit +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressroutecircuitpeering + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allowClassicOperations: false + bandwidthInMbps: 50 + location: West Europe + peeringLocation: Silicon Valley + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceProviderName: Equinix + sku: + - family: MeteredData + tier: Standard + tags: + environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressroutecircuitpeering + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/expressrouteconnection.yaml b/examples-generated/network/v1beta2/expressrouteconnection.yaml new file mode 100644 index 000000000..33f2ffa23 --- /dev/null +++ b/examples-generated/network/v1beta2/expressrouteconnection.yaml @@ -0,0 +1,158 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: ExpressRouteConnection +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + expressRouteCircuitPeeringIdSelector: + matchLabels: + testing.upbound.io/example-name: example + expressRouteGatewayIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: ExpressRouteCircuit +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bandwidthInGbps: 5 + expressRoutePortId: ${azurerm_express_route_port.example.id} + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - family: MeteredData + tier: Standard + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: ExpressRouteCircuitPeering +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + expressRouteCircuitNameSelector: + matchLabels: + testing.upbound.io/example-name: example + peerAsn: 100 + primaryPeerAddressPrefix: 192.168.1.0/30 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + secondaryPeerAddressPrefix: 192.168.2.0/30 + sharedKeySecretRef: + key: example-key + name: example-secret + namespace: upbound-system + vlanId: 100 + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: ExpressRouteGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scaleUnits: 1 + virtualHubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: ExpressRoutePort +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bandwidthInGbps: 10 + encapsulation: Dot1Q + location: West Europe + peeringLocation: Equinix-Seattle-SE2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualHub +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefix: 10.0.1.0/24 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualWanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualWAN +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/expressrouteport.yaml b/examples-generated/network/v1beta2/expressrouteport.yaml new file mode 100644 index 000000000..beba93778 --- /dev/null +++ b/examples-generated/network/v1beta2/expressrouteport.yaml @@ -0,0 +1,31 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: ExpressRoutePort +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + bandwidthInGbps: 10 + encapsulation: Dot1Q + location: West Europe + peeringLocation: Airtel-Chennai-CLS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/expressrouteport + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West US diff --git a/examples-generated/network/v1beta2/firewall.yaml b/examples-generated/network/v1beta2/firewall.yaml new file mode 100644 index 000000000..242dd44be --- /dev/null +++ b/examples-generated/network/v1beta2/firewall.yaml @@ -0,0 +1,97 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: Firewall +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/firewall + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: configuration + publicIpAddressIdSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: AZFW_VNet + skuTier: Standard + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: PublicIP +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/firewall + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allocationMethod: Static + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/firewall + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/firewall + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/firewall + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/firewallpolicy.yaml b/examples-generated/network/v1beta2/firewallpolicy.yaml new file mode 100644 index 000000000..a4bbb3594 --- /dev/null +++ b/examples-generated/network/v1beta2/firewallpolicy.yaml @@ -0,0 +1,28 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: FirewallPolicy +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/firewallpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/firewallpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/frontdoor.yaml b/examples-generated/network/v1beta2/frontdoor.yaml new file mode 100644 index 000000000..5cbe48e02 --- /dev/null +++ b/examples-generated/network/v1beta2/frontdoor.yaml @@ -0,0 +1,55 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: FrontDoor +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/frontdoor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + backendPool: + - backend: + - address: www.bing.com + hostHeader: www.bing.com + httpPort: 80 + httpsPort: 443 + healthProbeName: exampleHealthProbeSetting1 + loadBalancingName: exampleLoadBalancingSettings1 + name: exampleBackendBing + backendPoolHealthProbe: + - name: exampleHealthProbeSetting1 + backendPoolLoadBalancing: + - name: exampleLoadBalancingSettings1 + frontendEndpoint: + - hostName: example-FrontDoor.azurefd.net + name: exampleFrontendEndpoint1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + routingRule: + - acceptedProtocols: + - Http + - Https + forwardingConfiguration: + - backendPoolName: exampleBackendBing + forwardingProtocol: MatchRequest + frontendEndpoints: + - exampleFrontendEndpoint1 + name: exampleRoutingRule1 + patternsToMatch: + - /* + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/frontdoor + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/frontdoorcustomhttpsconfiguration.yaml b/examples-generated/network/v1beta2/frontdoorcustomhttpsconfiguration.yaml new file mode 100644 index 000000000..67a327992 --- /dev/null +++ b/examples-generated/network/v1beta2/frontdoorcustomhttpsconfiguration.yaml @@ -0,0 +1,72 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: FrontdoorCustomHTTPSConfiguration +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/frontdoorcustomhttpsconfiguration + labels: + testing.upbound.io/example-name: example_custom_https_0 + name: example-custom-https-0 +spec: + forProvider: + customHttpsProvisioningEnabled: false + frontendEndpointId: ${azurerm_frontdoor.example.frontend_endpoints["exampleFrontendEndpoint1"]} + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: FrontDoor +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/frontdoorcustomhttpsconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + backendPool: + - backend: + - address: www.bing.com + hostHeader: www.bing.com + httpPort: 80 + httpsPort: 443 + healthProbeName: exampleHealthProbeSetting1 + loadBalancingName: exampleLoadBalancingSettings1 + name: exampleBackendBing + backendPoolHealthProbe: + - name: exampleHealthProbeSetting1 + backendPoolLoadBalancing: + - name: exampleLoadBalancingSettings1 + frontendEndpoint: + - hostName: example-FrontDoor.azurefd.net + name: exampleFrontendEndpoint1 + - hostName: examplefd1.examplefd.net + name: exampleFrontendEndpoint2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + routingRule: + - acceptedProtocols: + - Http + - Https + forwardingConfiguration: + - backendPoolName: exampleBackendBing + forwardingProtocol: MatchRequest + frontendEndpoints: + - exampleFrontendEndpoint1 + name: exampleRoutingRule1 + patternsToMatch: + - /* + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/frontdoorcustomhttpsconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/frontdoorrulesengine.yaml b/examples-generated/network/v1beta2/frontdoorrulesengine.yaml new file mode 100644 index 000000000..6f9663de5 --- /dev/null +++ b/examples-generated/network/v1beta2/frontdoorrulesengine.yaml @@ -0,0 +1,95 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: FrontdoorRulesEngine +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/frontdoorrulesengine + labels: + testing.upbound.io/example-name: example_rules_engine + name: example-rules-engine +spec: + forProvider: + frontdoorNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + rule: + - action: + - responseHeader: + - headerActionType: Append + headerName: X-TEST-HEADER + value: Append Header Rule + name: debuggingoutput + priority: 1 + - action: + - responseHeader: + - headerActionType: Overwrite + headerName: Access-Control-Allow-Origin + value: '*' + - headerActionType: Overwrite + headerName: Access-Control-Allow-Credentials + value: "true" + matchCondition: + - operator: Equal + value: + - GET + - POST + variable: RequestMethod + name: overwriteorigin + priority: 2 + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: FrontDoor +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/frontdoorrulesengine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + backendPool: + - backend: + - address: www.bing.com + hostHeader: www.bing.com + httpPort: 80 + httpsPort: 443 + healthProbeName: exampleHealthProbeSetting1 + loadBalancingName: exampleLoadBalancingSettings1 + name: exampleBackendBing + backendPoolHealthProbe: + - name: exampleHealthProbeSetting1 + backendPoolLoadBalancing: + - name: exampleLoadBalancingSettings1 + frontendEndpoint: + - hostName: example-FrontDoor.azurefd.net + name: exampleFrontendEndpoint1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + routingRule: + - acceptedProtocols: + - Http + - Https + frontendEndpoints: + - exampleFrontendEndpoint1 + name: exampleRoutingRule1 + patternsToMatch: + - /* + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/frontdoorrulesengine + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/localnetworkgateway.yaml b/examples-generated/network/v1beta2/localnetworkgateway.yaml new file mode 100644 index 000000000..2617f34ad --- /dev/null +++ b/examples-generated/network/v1beta2/localnetworkgateway.yaml @@ -0,0 +1,31 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: LocalNetworkGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/localnetworkgateway + labels: + testing.upbound.io/example-name: home + name: home +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + gatewayAddress: 12.13.14.15 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/localnetworkgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/manager.yaml b/examples-generated/network/v1beta2/manager.yaml new file mode 100644 index 000000000..b140137ed --- /dev/null +++ b/examples-generated/network/v1beta2/manager.yaml @@ -0,0 +1,37 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: Manager +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/manager + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: example network manager + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scope: + - subscriptionIds: + - ${data.azurerm_subscription.current.id} + scopeAccesses: + - Connectivity + - SecurityAdmin + tags: + foo: bar + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/manager + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/packetcapture.yaml b/examples-generated/network/v1beta2/packetcapture.yaml new file mode 100644 index 000000000..3e949c9b2 --- /dev/null +++ b/examples-generated/network/v1beta2/packetcapture.yaml @@ -0,0 +1,154 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: PacketCapture +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/packetcapture + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + networkWatcherNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageLocation: + - storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + targetResourceId: ${azurerm_virtual_machine.example.id} + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: NetworkInterface +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/packetcapture + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + ipConfiguration: + - name: testconfiguration1 + privateIpAddressAllocation: Dynamic + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: Watcher +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/packetcapture + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/packetcapture + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/packetcapture + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/packetcapture + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: VirtualMachineExtension +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/packetcapture + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + autoUpgradeMinorVersion: true + publisher: Microsoft.Azure.NetworkWatcher + type: NetworkWatcherAgentLinux + typeHandlerVersion: "1.4" + virtualMachineIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/packetcapture + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/pointtositevpngateway.yaml b/examples-generated/network/v1beta2/pointtositevpngateway.yaml new file mode 100644 index 000000000..e13c63cf2 --- /dev/null +++ b/examples-generated/network/v1beta2/pointtositevpngateway.yaml @@ -0,0 +1,120 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: PointToSiteVPNGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/pointtositevpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectionConfiguration: + - name: example-gateway-config + vpnClientAddressPool: + - addressPrefixes: + - 10.0.2.0/24 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + scaleUnit: 1 + virtualHubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + vpnServerConfigurationIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/pointtositevpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualHub +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/pointtositevpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefix: 10.0.0.0/23 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualWanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualWAN +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/pointtositevpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VPNServerConfiguration +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/pointtositevpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clientRootCertificate: + - name: DigiCert-Federated-ID-Root-CA + publicCertData: | + MIIDuzCCAqOgAwIBAgIQCHTZWCM+IlfFIRXIvyKSrjANBgkqhkiG9w0BAQsFADBn + MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 + d3cuZGlnaWNlcnQuY29tMSYwJAYDVQQDEx1EaWdpQ2VydCBGZWRlcmF0ZWQgSUQg + Um9vdCBDQTAeFw0xMzAxMTUxMjAwMDBaFw0zMzAxMTUxMjAwMDBaMGcxCzAJBgNV + BAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdp + Y2VydC5jb20xJjAkBgNVBAMTHURpZ2lDZXJ0IEZlZGVyYXRlZCBJRCBSb290IENB + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvAEB4pcCqnNNOWE6Ur5j + QPUH+1y1F9KdHTRSza6k5iDlXq1kGS1qAkuKtw9JsiNRrjltmFnzMZRBbX8Tlfl8 + zAhBmb6dDduDGED01kBsTkgywYPxXVTKec0WxYEEF0oMn4wSYNl0lt2eJAKHXjNf + GTwiibdP8CUR2ghSM2sUTI8Nt1Omfc4SMHhGhYD64uJMbX98THQ/4LMGuYegou+d + GTiahfHtjn7AboSEknwAMJHCh5RlYZZ6B1O4QbKJ+34Q0eKgnI3X6Vc9u0zf6DH8 + Dk+4zQDYRRTqTnVO3VT8jzqDlCRuNtq6YvryOWN74/dq8LQhUnXHvFyrsdMaE1X2 + DwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNV + HQ4EFgQUGRdkFnbGt1EWjKwbUne+5OaZvRYwHwYDVR0jBBgwFoAUGRdkFnbGt1EW + jKwbUne+5OaZvRYwDQYJKoZIhvcNAQELBQADggEBAHcqsHkrjpESqfuVTRiptJfP + 9JbdtWqRTmOf6uJi2c8YVqI6XlKXsD8C1dUUaaHKLUJzvKiazibVuBwMIT84AyqR + QELn3e0BtgEymEygMU569b01ZPxoFSnNXc7qDZBDef8WfqAV/sxkTi8L9BkmFYfL + uGLOhRJOFprPdoDIUBB+tmCl3oDcBy3vnUeOEioz8zAkprcb3GHwHAK+vHmmfgcn + WsfMLH4JCLa/tRYL+Rw/N3ybCkDp00s0WUZ+AoDywSl0Q/ZEnNY0MsFiw6LyIdbq + M/s/1JRtO3bDSzD9TazRVzn2oBqzSa8VgIo5C1nOnoAKJTlsClJKvIhnRlaLQqk= + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + vpnAuthenticationTypes: + - Certificate diff --git a/examples-generated/network/v1beta2/privatednszone.yaml b/examples-generated/network/v1beta2/privatednszone.yaml new file mode 100644 index 000000000..cd90a179d --- /dev/null +++ b/examples-generated/network/v1beta2/privatednszone.yaml @@ -0,0 +1,27 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: PrivateDNSZone +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privatednszone + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privatednszone + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/privateendpoint.yaml b/examples-generated/network/v1beta2/privateendpoint.yaml new file mode 100644 index 000000000..1bfe4ac13 --- /dev/null +++ b/examples-generated/network/v1beta2/privateendpoint.yaml @@ -0,0 +1,165 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: PrivateEndpoint +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privateendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + privateServiceConnection: + - isManualConnection: false + name: example-privateserviceconnection + privateConnectionResourceId: ${azurerm_private_link_service.example.id} + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: endpoint + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: LoadBalancer +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privateendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + frontendIpConfiguration: + - name: example-pip + publicIpAddressIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: PrivateLinkService +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privateendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + loadBalancerFrontendIpConfigurationIds: + - ${azurerm_lb.example.frontend_ip_configuration[0].id} + location: West Europe + natIpConfiguration: + - name: example-pip + primary: true + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: service + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: PublicIP +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privateendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allocationMethod: Static + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privateendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privateendpoint + labels: + testing.upbound.io/example-name: endpoint + name: endpoint +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + enforcePrivateLinkEndpointNetworkPolicies: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privateendpoint + labels: + testing.upbound.io/example-name: service + name: service +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + enforcePrivateLinkServiceNetworkPolicies: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/privateendpoint + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/profile.yaml b/examples-generated/network/v1beta2/profile.yaml new file mode 100644 index 000000000..501724b55 --- /dev/null +++ b/examples-generated/network/v1beta2/profile.yaml @@ -0,0 +1,81 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: Profile +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/profile + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerNetworkInterface: + - ipConfiguration: + - name: exampleipconfig + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: examplecnic + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/profile + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/profile + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.1.0.0/24 + delegation: + - name: delegation + serviceDelegation: + - actions: + - Microsoft.Network/virtualNetworks/subnets/action + name: Microsoft.ContainerInstance/containerGroups + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/profile + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.1.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/subnet.yaml b/examples-generated/network/v1beta2/subnet.yaml new file mode 100644 index 000000000..6d3eca376 --- /dev/null +++ b/examples-generated/network/v1beta2/subnet.yaml @@ -0,0 +1,58 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/subnet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + delegation: + - name: delegation + serviceDelegation: + - actions: + - Microsoft.Network/virtualNetworks/subnets/join/action + - Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action + name: Microsoft.ContainerInstance/containerGroups + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/subnet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/subnet + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/trafficmanagerprofile.yaml b/examples-generated/network/v1beta2/trafficmanagerprofile.yaml new file mode 100644 index 000000000..2ded08b3b --- /dev/null +++ b/examples-generated/network/v1beta2/trafficmanagerprofile.yaml @@ -0,0 +1,40 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: TrafficManagerProfile +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/trafficmanagerprofile + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dnsConfig: + - relativeName: ${random_id.server.hex} + ttl: 100 + monitorConfig: + - intervalInSeconds: 30 + path: / + port: 80 + protocol: HTTP + timeoutInSeconds: 9 + toleratedNumberOfFailures: 3 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: Production + trafficRoutingMethod: Weighted + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/trafficmanagerprofile + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/virtualhubconnection.yaml b/examples-generated/network/v1beta2/virtualhubconnection.yaml new file mode 100644 index 000000000..f393a53fe --- /dev/null +++ b/examples-generated/network/v1beta2/virtualhubconnection.yaml @@ -0,0 +1,87 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualHubConnection +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualhubconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + remoteVirtualNetworkIdSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualHubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualhubconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualHub +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualhubconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefix: 10.0.1.0/24 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualWanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualhubconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 172.16.0.0/12 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualWAN +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualhubconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/virtualnetwork.yaml b/examples-generated/network/v1beta2/virtualnetwork.yaml new file mode 100644 index 000000000..d06209fa1 --- /dev/null +++ b/examples-generated/network/v1beta2/virtualnetwork.yaml @@ -0,0 +1,52 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetwork + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + dnsServers: + - 10.0.0.4 + - 10.0.0.5 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: Production + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetwork + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetwork + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/virtualnetworkgateway.yaml b/examples-generated/network/v1beta2/virtualnetworkgateway.yaml new file mode 100644 index 000000000..f43a8ea58 --- /dev/null +++ b/examples-generated/network/v1beta2/virtualnetworkgateway.yaml @@ -0,0 +1,129 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetworkGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + activeActive: false + enableBgp: false + ipConfiguration: + - name: vnetGatewayConfig + privateIpAddressAllocation: Dynamic + publicIpAddressIdSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Basic + type: Vpn + vpnClientConfiguration: + - addressSpace: + - 10.2.0.0/24 + revokedCertificate: + - name: Verizon-Global-Root-CA + thumbprint: 912198EEF23DCAC40939312FEE97DD560BAE49B1 + rootCertificate: + - name: DigiCert-Federated-ID-Root-CA + publicCertData: | + MIIDuzCCAqOgAwIBAgIQCHTZWCM+IlfFIRXIvyKSrjANBgkqhkiG9w0BAQsFADBn + MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 + d3cuZGlnaWNlcnQuY29tMSYwJAYDVQQDEx1EaWdpQ2VydCBGZWRlcmF0ZWQgSUQg + Um9vdCBDQTAeFw0xMzAxMTUxMjAwMDBaFw0zMzAxMTUxMjAwMDBaMGcxCzAJBgNV + BAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdp + Y2VydC5jb20xJjAkBgNVBAMTHURpZ2lDZXJ0IEZlZGVyYXRlZCBJRCBSb290IENB + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvAEB4pcCqnNNOWE6Ur5j + QPUH+1y1F9KdHTRSza6k5iDlXq1kGS1qAkuKtw9JsiNRrjltmFnzMZRBbX8Tlfl8 + zAhBmb6dDduDGED01kBsTkgywYPxXVTKec0WxYEEF0oMn4wSYNl0lt2eJAKHXjNf + GTwiibdP8CUR2ghSM2sUTI8Nt1Omfc4SMHhGhYD64uJMbX98THQ/4LMGuYegou+d + GTiahfHtjn7AboSEknwAMJHCh5RlYZZ6B1O4QbKJ+34Q0eKgnI3X6Vc9u0zf6DH8 + Dk+4zQDYRRTqTnVO3VT8jzqDlCRuNtq6YvryOWN74/dq8LQhUnXHvFyrsdMaE1X2 + DwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNV + HQ4EFgQUGRdkFnbGt1EWjKwbUne+5OaZvRYwHwYDVR0jBBgwFoAUGRdkFnbGt1EW + jKwbUne+5OaZvRYwDQYJKoZIhvcNAQELBQADggEBAHcqsHkrjpESqfuVTRiptJfP + 9JbdtWqRTmOf6uJi2c8YVqI6XlKXsD8C1dUUaaHKLUJzvKiazibVuBwMIT84AyqR + QELn3e0BtgEymEygMU569b01ZPxoFSnNXc7qDZBDef8WfqAV/sxkTi8L9BkmFYfL + uGLOhRJOFprPdoDIUBB+tmCl3oDcBy3vnUeOEioz8zAkprcb3GHwHAK+vHmmfgcn + WsfMLH4JCLa/tRYL+Rw/N3ybCkDp00s0WUZ+AoDywSl0Q/ZEnNY0MsFiw6LyIdbq + M/s/1JRtO3bDSzD9TazRVzn2oBqzSa8VgIo5C1nOnoAKJTlsClJKvIhnRlaLQqk= + vpnType: RouteBased + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: PublicIP +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allocationMethod: Dynamic + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/virtualnetworkgatewayconnection.yaml b/examples-generated/network/v1beta2/virtualnetworkgatewayconnection.yaml new file mode 100644 index 000000000..be3e21e1d --- /dev/null +++ b/examples-generated/network/v1beta2/virtualnetworkgatewayconnection.yaml @@ -0,0 +1,147 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetworkGatewayConnection +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgatewayconnection + labels: + testing.upbound.io/example-name: onpremise + name: onpremise +spec: + forProvider: + localNetworkGatewayIdSelector: + matchLabels: + testing.upbound.io/example-name: onpremise + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sharedKeySecretRef: + key: example-key + name: example-secret + namespace: upbound-system + type: IPsec + virtualNetworkGatewayIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: LocalNetworkGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgatewayconnection + labels: + testing.upbound.io/example-name: onpremise + name: onpremise +spec: + forProvider: + addressSpace: + - 10.1.1.0/24 + gatewayAddress: 168.62.225.23 + location: West US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: PublicIP +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allocationMethod: Dynamic + location: West US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West US + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetworkGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/virtualnetworkgatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + activeActive: false + enableBgp: false + ipConfiguration: + - privateIpAddressAllocation: Dynamic + publicIpAddressIdSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Basic + type: Vpn + vpnType: RouteBased diff --git a/examples-generated/network/v1beta2/vpngateway.yaml b/examples-generated/network/v1beta2/vpngateway.yaml new file mode 100644 index 000000000..8c3d35ad4 --- /dev/null +++ b/examples-generated/network/v1beta2/vpngateway.yaml @@ -0,0 +1,88 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: VPNGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualHubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualHub +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefix: 10.0.1.0/24 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualWanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualWAN +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngateway + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/vpngatewayconnection.yaml b/examples-generated/network/v1beta2/vpngatewayconnection.yaml new file mode 100644 index 000000000..56e427239 --- /dev/null +++ b/examples-generated/network/v1beta2/vpngatewayconnection.yaml @@ -0,0 +1,118 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: VPNGatewayConnection +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + remoteVpnSiteIdSelector: + matchLabels: + testing.upbound.io/example-name: example + vpnGatewayIdSelector: + matchLabels: + testing.upbound.io/example-name: example + vpnLink: + - name: link1 + vpnSiteLinkId: ${azurerm_vpn_site.example.link[0].id} + - name: link2 + vpnSiteLinkId: ${azurerm_vpn_site.example.link[1].id} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualHub +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefix: 10.0.0.0/24 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualWanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualWAN +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VPNGateway +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualHubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VPNSite +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpngatewayconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + link: + - ipAddress: 10.1.0.0 + name: link1 + - ipAddress: 10.2.0.0 + name: link2 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualWanIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/vpnserverconfiguration.yaml b/examples-generated/network/v1beta2/vpnserverconfiguration.yaml new file mode 100644 index 000000000..f0596dffc --- /dev/null +++ b/examples-generated/network/v1beta2/vpnserverconfiguration.yaml @@ -0,0 +1,53 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: VPNServerConfiguration +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpnserverconfiguration + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + clientRootCertificate: + - name: DigiCert-Federated-ID-Root-CA + publicCertData: | + MIIDuzCCAqOgAwIBAgIQCHTZWCM+IlfFIRXIvyKSrjANBgkqhkiG9w0BAQsFADBn + MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 + d3cuZGlnaWNlcnQuY29tMSYwJAYDVQQDEx1EaWdpQ2VydCBGZWRlcmF0ZWQgSUQg + Um9vdCBDQTAeFw0xMzAxMTUxMjAwMDBaFw0zMzAxMTUxMjAwMDBaMGcxCzAJBgNV + BAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdp + Y2VydC5jb20xJjAkBgNVBAMTHURpZ2lDZXJ0IEZlZGVyYXRlZCBJRCBSb290IENB + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvAEB4pcCqnNNOWE6Ur5j + QPUH+1y1F9KdHTRSza6k5iDlXq1kGS1qAkuKtw9JsiNRrjltmFnzMZRBbX8Tlfl8 + zAhBmb6dDduDGED01kBsTkgywYPxXVTKec0WxYEEF0oMn4wSYNl0lt2eJAKHXjNf + GTwiibdP8CUR2ghSM2sUTI8Nt1Omfc4SMHhGhYD64uJMbX98THQ/4LMGuYegou+d + GTiahfHtjn7AboSEknwAMJHCh5RlYZZ6B1O4QbKJ+34Q0eKgnI3X6Vc9u0zf6DH8 + Dk+4zQDYRRTqTnVO3VT8jzqDlCRuNtq6YvryOWN74/dq8LQhUnXHvFyrsdMaE1X2 + DwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNV + HQ4EFgQUGRdkFnbGt1EWjKwbUne+5OaZvRYwHwYDVR0jBBgwFoAUGRdkFnbGt1EW + jKwbUne+5OaZvRYwDQYJKoZIhvcNAQELBQADggEBAHcqsHkrjpESqfuVTRiptJfP + 9JbdtWqRTmOf6uJi2c8YVqI6XlKXsD8C1dUUaaHKLUJzvKiazibVuBwMIT84AyqR + QELn3e0BtgEymEygMU569b01ZPxoFSnNXc7qDZBDef8WfqAV/sxkTi8L9BkmFYfL + uGLOhRJOFprPdoDIUBB+tmCl3oDcBy3vnUeOEioz8zAkprcb3GHwHAK+vHmmfgcn + WsfMLH4JCLa/tRYL+Rw/N3ybCkDp00s0WUZ+AoDywSl0Q/ZEnNY0MsFiw6LyIdbq + M/s/1JRtO3bDSzD9TazRVzn2oBqzSa8VgIo5C1nOnoAKJTlsClJKvIhnRlaLQqk= + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + vpnAuthenticationTypes: + - Certificate + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpnserverconfiguration + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/network/v1beta2/vpnsite.yaml b/examples-generated/network/v1beta2/vpnsite.yaml new file mode 100644 index 000000000..2d155ce96 --- /dev/null +++ b/examples-generated/network/v1beta2/vpnsite.yaml @@ -0,0 +1,53 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: VPNSite +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpnsite + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressCidrs: + - 10.0.0.0/24 + link: + - ipAddress: 10.0.0.1 + name: link1 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualWanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpnsite + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: VirtualWAN +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/vpnsite + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/watcherflowlog.yaml b/examples-generated/network/v1beta2/watcherflowlog.yaml new file mode 100644 index 000000000..eb781f3ef --- /dev/null +++ b/examples-generated/network/v1beta2/watcherflowlog.yaml @@ -0,0 +1,123 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: WatcherFlowLog +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/watcherflowlog + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + enabled: true + networkSecurityGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: test + networkWatcherNameSelector: + matchLabels: + testing.upbound.io/example-name: test + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionPolicy: + - days: 7 + enabled: true + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: test + trafficAnalytics: + - enabled: true + intervalInMinutes: 10 + workspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: test + workspaceRegion: West Europe + workspaceResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: test + +--- + +apiVersion: operationalinsights.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/watcherflowlog + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: PerGB2018 + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/watcherflowlog + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: Watcher +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/watcherflowlog + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/watcherflowlog + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/watcherflowlog + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + accountKind: StorageV2 + accountReplicationType: LRS + accountTier: Standard + enableHttpsTrafficOnly: true + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/network/v1beta2/webapplicationfirewallpolicy.yaml b/examples-generated/network/v1beta2/webapplicationfirewallpolicy.yaml new file mode 100644 index 000000000..98f2bfdd3 --- /dev/null +++ b/examples-generated/network/v1beta2/webapplicationfirewallpolicy.yaml @@ -0,0 +1,85 @@ +apiVersion: network.azure.upbound.io/v1beta2 +kind: WebApplicationFirewallPolicy +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/webapplicationfirewallpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + customRules: + - action: Block + matchConditions: + - matchValues: + - 192.168.1.0/24 + - 10.0.0.0/24 + matchVariables: + - variableName: RemoteAddr + negationCondition: false + operator: IPMatch + name: Rule1 + priority: 1 + ruleType: MatchRule + - action: Block + matchConditions: + - matchValues: + - 192.168.1.0/24 + matchVariables: + - variableName: RemoteAddr + negationCondition: false + operator: IPMatch + - matchValues: + - Windows + matchVariables: + - selector: UserAgent + variableName: RequestHeaders + negationCondition: false + operator: Contains + name: Rule2 + priority: 2 + ruleType: MatchRule + location: West Europe + managedRules: + - exclusion: + - matchVariable: RequestHeaderNames + selector: x-company-secret-header + selectorMatchOperator: Equals + - matchVariable: RequestCookieNames + selector: too-tasty + selectorMatchOperator: EndsWith + managedRuleSet: + - ruleGroupOverride: + - rule: + - action: Log + enabled: true + id: "920300" + - action: Block + enabled: true + id: "920440" + ruleGroupName: REQUEST-920-PROTOCOL-ENFORCEMENT + type: OWASP + version: "3.2" + policySettings: + - enabled: true + fileUploadLimitInMb: 100 + maxRequestBodySizeInKb: 128 + mode: Prevention + requestBodyCheck: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/webapplicationfirewallpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/notificationhubs/v1beta1/authorizationrule.yaml b/examples-generated/notificationhubs/v1beta1/authorizationrule.yaml index e4c65562c..9e629c97a 100644 --- a/examples-generated/notificationhubs/v1beta1/authorizationrule.yaml +++ b/examples-generated/notificationhubs/v1beta1/authorizationrule.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: notificationhubs.azure.upbound.io/v1beta1 +apiVersion: notificationhubs.azure.upbound.io/v1beta2 kind: NotificationHub metadata: annotations: diff --git a/examples-generated/notificationhubs/v1beta2/notificationhub.yaml b/examples-generated/notificationhubs/v1beta2/notificationhub.yaml new file mode 100644 index 000000000..de135d005 --- /dev/null +++ b/examples-generated/notificationhubs/v1beta2/notificationhub.yaml @@ -0,0 +1,50 @@ +apiVersion: notificationhubs.azure.upbound.io/v1beta2 +kind: NotificationHub +metadata: + annotations: + meta.upbound.io/example-id: notificationhubs/v1beta2/notificationhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + namespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: notificationhubs.azure.upbound.io/v1beta1 +kind: NotificationHubNamespace +metadata: + annotations: + meta.upbound.io/example-id: notificationhubs/v1beta2/notificationhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + namespaceType: NotificationHub + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Free + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: notificationhubs/v1beta2/notificationhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/operationalinsights/v1beta1/loganalyticsdataexportrule.yaml b/examples-generated/operationalinsights/v1beta1/loganalyticsdataexportrule.yaml index b647c3954..2aec5db20 100644 --- a/examples-generated/operationalinsights/v1beta1/loganalyticsdataexportrule.yaml +++ b/examples-generated/operationalinsights/v1beta1/loganalyticsdataexportrule.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: @@ -56,7 +56,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/operationalinsights/v1beta1/loganalyticsdatasourcewindowsevent.yaml b/examples-generated/operationalinsights/v1beta1/loganalyticsdatasourcewindowsevent.yaml index 5b1990036..a24a3a94d 100644 --- a/examples-generated/operationalinsights/v1beta1/loganalyticsdatasourcewindowsevent.yaml +++ b/examples-generated/operationalinsights/v1beta1/loganalyticsdatasourcewindowsevent.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/operationalinsights/v1beta1/loganalyticsdatasourcewindowsperformancecounter.yaml b/examples-generated/operationalinsights/v1beta1/loganalyticsdatasourcewindowsperformancecounter.yaml index 0d7607368..76f533398 100644 --- a/examples-generated/operationalinsights/v1beta1/loganalyticsdatasourcewindowsperformancecounter.yaml +++ b/examples-generated/operationalinsights/v1beta1/loganalyticsdatasourcewindowsperformancecounter.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/operationalinsights/v1beta1/loganalyticslinkedservice.yaml b/examples-generated/operationalinsights/v1beta1/loganalyticslinkedservice.yaml index f5c7a1544..ee036521a 100644 --- a/examples-generated/operationalinsights/v1beta1/loganalyticslinkedservice.yaml +++ b/examples-generated/operationalinsights/v1beta1/loganalyticslinkedservice.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: automation.azure.upbound.io/v1beta1 +apiVersion: automation.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -40,7 +40,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/operationalinsights/v1beta1/loganalyticslinkedstorageaccount.yaml b/examples-generated/operationalinsights/v1beta1/loganalyticslinkedstorageaccount.yaml index c48365d7f..720dfaa4b 100644 --- a/examples-generated/operationalinsights/v1beta1/loganalyticslinkedstorageaccount.yaml +++ b/examples-generated/operationalinsights/v1beta1/loganalyticslinkedstorageaccount.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: @@ -52,7 +52,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/operationalinsights/v1beta1/loganalyticssavedsearch.yaml b/examples-generated/operationalinsights/v1beta1/loganalyticssavedsearch.yaml index f72a7bdd9..8f3e55410 100644 --- a/examples-generated/operationalinsights/v1beta1/loganalyticssavedsearch.yaml +++ b/examples-generated/operationalinsights/v1beta1/loganalyticssavedsearch.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/operationalinsights/v1beta2/workspace.yaml b/examples-generated/operationalinsights/v1beta2/workspace.yaml new file mode 100644 index 000000000..91a5a176c --- /dev/null +++ b/examples-generated/operationalinsights/v1beta2/workspace.yaml @@ -0,0 +1,30 @@ +apiVersion: operationalinsights.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: operationalinsights/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionInDays: 30 + sku: PerGB2018 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: operationalinsights/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/operationsmanagement/v1beta2/loganalyticssolution.yaml b/examples-generated/operationsmanagement/v1beta2/loganalyticssolution.yaml new file mode 100644 index 000000000..70621c10d --- /dev/null +++ b/examples-generated/operationsmanagement/v1beta2/loganalyticssolution.yaml @@ -0,0 +1,56 @@ +apiVersion: operationsmanagement.azure.upbound.io/v1beta2 +kind: LogAnalyticsSolution +metadata: + annotations: + meta.upbound.io/example-id: operationsmanagement/v1beta2/loganalyticssolution + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + plan: + - product: OMSGallery/ContainerInsights + publisher: Microsoft + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + solutionName: ContainerInsights + workspaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + workspaceResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: operationalinsights.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: operationsmanagement/v1beta2/loganalyticssolution + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: PerGB2018 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: operationsmanagement/v1beta2/loganalyticssolution + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/orbital/v1beta1/contactprofile.yaml b/examples-generated/orbital/v1beta1/contactprofile.yaml index f743bb9d1..81abbaebe 100644 --- a/examples-generated/orbital/v1beta1/contactprofile.yaml +++ b/examples-generated/orbital/v1beta1/contactprofile.yaml @@ -47,7 +47,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -77,7 +77,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/policyinsights/v1beta1/resourcepolicyremediation.yaml b/examples-generated/policyinsights/v1beta1/resourcepolicyremediation.yaml index e689aed31..4cdb9ea83 100644 --- a/examples-generated/policyinsights/v1beta1/resourcepolicyremediation.yaml +++ b/examples-generated/policyinsights/v1beta1/resourcepolicyremediation.yaml @@ -48,7 +48,7 @@ spec: --- -apiVersion: authorization.azure.upbound.io/v1beta1 +apiVersion: authorization.azure.upbound.io/v1beta2 kind: ResourceGroupPolicyAssignment metadata: annotations: @@ -67,7 +67,7 @@ spec: --- -apiVersion: authorization.azure.upbound.io/v1beta1 +apiVersion: authorization.azure.upbound.io/v1beta2 kind: ResourcePolicyAssignment metadata: annotations: @@ -91,7 +91,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/policyinsights/v1beta1/subscriptionpolicyremediation.yaml b/examples-generated/policyinsights/v1beta1/subscriptionpolicyremediation.yaml index d947d5870..dff928c6a 100644 --- a/examples-generated/policyinsights/v1beta1/subscriptionpolicyremediation.yaml +++ b/examples-generated/policyinsights/v1beta1/subscriptionpolicyremediation.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: authorization.azure.upbound.io/v1beta1 +apiVersion: authorization.azure.upbound.io/v1beta2 kind: SubscriptionPolicyAssignment metadata: annotations: diff --git a/examples-generated/purview/v1beta2/account.yaml b/examples-generated/purview/v1beta2/account.yaml new file mode 100644 index 000000000..0e950cdc6 --- /dev/null +++ b/examples-generated/purview/v1beta2/account.yaml @@ -0,0 +1,30 @@ +apiVersion: purview.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: purview/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: purview/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/recoveryservices/v1beta1/backupcontainerstorageaccount.yaml b/examples-generated/recoveryservices/v1beta1/backupcontainerstorageaccount.yaml index aa1e41963..635784139 100644 --- a/examples-generated/recoveryservices/v1beta1/backupcontainerstorageaccount.yaml +++ b/examples-generated/recoveryservices/v1beta1/backupcontainerstorageaccount.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -52,7 +52,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/recoveryservices/v1beta1/backupprotectedfileshare.yaml b/examples-generated/recoveryservices/v1beta1/backupprotectedfileshare.yaml index a6318ce0c..ec4dd173c 100644 --- a/examples-generated/recoveryservices/v1beta1/backupprotectedfileshare.yaml +++ b/examples-generated/recoveryservices/v1beta1/backupprotectedfileshare.yaml @@ -48,7 +48,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: BackupPolicyFileShare metadata: annotations: @@ -72,7 +72,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -104,7 +104,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/recoveryservices/v1beta1/backupprotectedvm.yaml b/examples-generated/recoveryservices/v1beta1/backupprotectedvm.yaml index 7c7276801..66b2a2eff 100644 --- a/examples-generated/recoveryservices/v1beta1/backupprotectedvm.yaml +++ b/examples-generated/recoveryservices/v1beta1/backupprotectedvm.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: BackupPolicyVM metadata: annotations: @@ -45,7 +45,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/recoveryservices/v1beta1/siterecoveryfabric.yaml b/examples-generated/recoveryservices/v1beta1/siterecoveryfabric.yaml index be7cd7b92..85d598631 100644 --- a/examples-generated/recoveryservices/v1beta1/siterecoveryfabric.yaml +++ b/examples-generated/recoveryservices/v1beta1/siterecoveryfabric.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/recoveryservices/v1beta1/siterecoverynetworkmapping.yaml b/examples-generated/recoveryservices/v1beta1/siterecoverynetworkmapping.yaml index e079eb8fe..0b8a381ca 100644 --- a/examples-generated/recoveryservices/v1beta1/siterecoverynetworkmapping.yaml +++ b/examples-generated/recoveryservices/v1beta1/siterecoverynetworkmapping.yaml @@ -26,7 +26,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -112,7 +112,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: @@ -131,7 +131,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/recoveryservices/v1beta1/siterecoveryprotectioncontainer.yaml b/examples-generated/recoveryservices/v1beta1/siterecoveryprotectioncontainer.yaml index 30702822a..628fee1e9 100644 --- a/examples-generated/recoveryservices/v1beta1/siterecoveryprotectioncontainer.yaml +++ b/examples-generated/recoveryservices/v1beta1/siterecoveryprotectioncontainer.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/recoveryservices/v1beta1/siterecoveryreplicationpolicy.yaml b/examples-generated/recoveryservices/v1beta1/siterecoveryreplicationpolicy.yaml index d4e3522fe..7709b0e6c 100644 --- a/examples-generated/recoveryservices/v1beta1/siterecoveryreplicationpolicy.yaml +++ b/examples-generated/recoveryservices/v1beta1/siterecoveryreplicationpolicy.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: recoveryservices.azure.upbound.io/v1beta1 +apiVersion: recoveryservices.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: diff --git a/examples-generated/recoveryservices/v1beta2/backuppolicyfileshare.yaml b/examples-generated/recoveryservices/v1beta2/backuppolicyfileshare.yaml new file mode 100644 index 000000000..0b84f9408 --- /dev/null +++ b/examples-generated/recoveryservices/v1beta2/backuppolicyfileshare.yaml @@ -0,0 +1,77 @@ +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: BackupPolicyFileShare +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyfileshare + labels: + testing.upbound.io/example-name: policy + name: policy +spec: + forProvider: + backup: + - frequency: Daily + time: "23:00" + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionDaily: + - count: 10 + retentionMonthly: + - count: 7 + weekdays: + - Sunday + - Wednesday + weeks: + - First + - Last + retentionWeekly: + - count: 7 + weekdays: + - Sunday + - Wednesday + - Friday + - Saturday + retentionYearly: + - count: 7 + months: + - January + weekdays: + - Sunday + weeks: + - Last + timezone: UTC + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyfileshare + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyfileshare + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/recoveryservices/v1beta2/backuppolicyvm.yaml b/examples-generated/recoveryservices/v1beta2/backuppolicyvm.yaml new file mode 100644 index 000000000..699f89d8d --- /dev/null +++ b/examples-generated/recoveryservices/v1beta2/backuppolicyvm.yaml @@ -0,0 +1,77 @@ +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: BackupPolicyVM +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyvm + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + backup: + - frequency: Daily + time: "23:00" + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionDaily: + - count: 10 + retentionMonthly: + - count: 7 + weekdays: + - Sunday + - Wednesday + weeks: + - First + - Last + retentionWeekly: + - count: 42 + weekdays: + - Sunday + - Wednesday + - Friday + - Saturday + retentionYearly: + - count: 77 + months: + - January + weekdays: + - Sunday + weeks: + - Last + timezone: UTC + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyvm + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyvm + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/recoveryservices/v1beta2/backuppolicyvmworkload.yaml b/examples-generated/recoveryservices/v1beta2/backuppolicyvmworkload.yaml new file mode 100644 index 000000000..e45b72b29 --- /dev/null +++ b/examples-generated/recoveryservices/v1beta2/backuppolicyvmworkload.yaml @@ -0,0 +1,65 @@ +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: BackupPolicyVMWorkload +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyvmworkload + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + protectionPolicy: + - backup: + - frequency: Daily + time: "15:00" + policyType: Full + retentionDaily: + - count: 8 + - backup: + - frequencyInMinutes: 15 + policyType: Log + simpleRetention: + - count: 8 + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + settings: + - compressionEnabled: false + timeZone: UTC + workloadType: SQLDataBase + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyvmworkload + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + softDeleteEnabled: false + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/backuppolicyvmworkload + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/recoveryservices/v1beta2/siterecoveryprotectioncontainermapping.yaml b/examples-generated/recoveryservices/v1beta2/siterecoveryprotectioncontainermapping.yaml new file mode 100644 index 000000000..ec7206410 --- /dev/null +++ b/examples-generated/recoveryservices/v1beta2/siterecoveryprotectioncontainermapping.yaml @@ -0,0 +1,179 @@ +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: SiteRecoveryProtectionContainerMapping +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: container-mapping + name: container-mapping +spec: + forProvider: + recoveryFabricNameSelector: + matchLabels: + testing.upbound.io/example-name: primary + recoveryReplicationPolicyIdSelector: + matchLabels: + testing.upbound.io/example-name: policy + recoverySourceProtectionContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: primary + recoveryTargetProtectionContainerIdSelector: + matchLabels: + testing.upbound.io/example-name: secondary + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: vault + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: secondary + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: vault + name: vault +spec: + forProvider: + location: East US + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: primary + name: primary +spec: + forProvider: + location: West US + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: secondary + name: secondary +spec: + forProvider: + location: East US + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta1 +kind: SiteRecoveryFabric +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: primary + name: primary +spec: + forProvider: + location: West US + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta1 +kind: SiteRecoveryFabric +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: secondary + name: secondary +spec: + forProvider: + location: East US + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta1 +kind: SiteRecoveryProtectionContainer +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: primary + name: primary +spec: + forProvider: + recoveryFabricNameSelector: + matchLabels: + testing.upbound.io/example-name: example + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta1 +kind: SiteRecoveryProtectionContainer +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: secondary + name: secondary +spec: + forProvider: + recoveryFabricNameSelector: + matchLabels: + testing.upbound.io/example-name: example + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: recoveryservices.azure.upbound.io/v1beta1 +kind: SiteRecoveryReplicationPolicy +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/siterecoveryprotectioncontainermapping + labels: + testing.upbound.io/example-name: policy + name: policy +spec: + forProvider: + applicationConsistentSnapshotFrequencyInMinutes: ${4 * 60} + recoveryPointRetentionInMinutes: ${24 * 60} + recoveryVaultNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/recoveryservices/v1beta2/vault.yaml b/examples-generated/recoveryservices/v1beta2/vault.yaml new file mode 100644 index 000000000..14867f268 --- /dev/null +++ b/examples-generated/recoveryservices/v1beta2/vault.yaml @@ -0,0 +1,30 @@ +apiVersion: recoveryservices.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/vault + labels: + testing.upbound.io/example-name: vault + name: vault +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + softDeleteEnabled: true + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: recoveryservices/v1beta2/vault + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/resources/v1beta2/resourcedeploymentscriptazurecli.yaml b/examples-generated/resources/v1beta2/resourcedeploymentscriptazurecli.yaml new file mode 100644 index 000000000..7b672785c --- /dev/null +++ b/examples-generated/resources/v1beta2/resourcedeploymentscriptazurecli.yaml @@ -0,0 +1,61 @@ +apiVersion: resources.azure.upbound.io/v1beta2 +kind: ResourceDeploymentScriptAzureCli +metadata: + annotations: + meta.upbound.io/example-id: resources/v1beta2/resourcedeploymentscriptazurecli + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cleanupPreference: OnSuccess + commandLine: '''foo'' ''bar''' + forceUpdateTag: "1" + identity: + - identityIdsRefs: + - name: example + type: UserAssigned + location: West Europe + name: example-rdsac + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionInterval: P1D + scriptContent: |2 + echo "{\"name\":{\"displayName\":\"$1 $2\"}}" > $AZ_SCRIPTS_OUTPUT_PATH + tags: + key: value + timeout: PT30M + version: 2.40.0 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: resources/v1beta2/resourcedeploymentscriptazurecli + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: managedidentity.azure.upbound.io/v1beta1 +kind: UserAssignedIdentity +metadata: + annotations: + meta.upbound.io/example-id: resources/v1beta2/resourcedeploymentscriptazurecli + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example-uai + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/resources/v1beta2/resourcedeploymentscriptazurepowershell.yaml b/examples-generated/resources/v1beta2/resourcedeploymentscriptazurepowershell.yaml new file mode 100644 index 000000000..b2ddd377c --- /dev/null +++ b/examples-generated/resources/v1beta2/resourcedeploymentscriptazurepowershell.yaml @@ -0,0 +1,65 @@ +apiVersion: resources.azure.upbound.io/v1beta2 +kind: ResourceDeploymentScriptAzurePowerShell +metadata: + annotations: + meta.upbound.io/example-id: resources/v1beta2/resourcedeploymentscriptazurepowershell + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cleanupPreference: OnSuccess + commandLine: -name "John Dole" + forceUpdateTag: "1" + identity: + - identityIdsRefs: + - name: example + type: UserAssigned + location: West Europe + name: example-rdsaps + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + retentionInterval: P1D + scriptContent: |2 + param([string] $name) + $output = 'Hello {0}.' -f $name + Write-Output $output + $DeploymentScriptOutputs = @{} + $DeploymentScriptOutputs['text'] = $output + tags: + key: value + timeout: PT30M + version: "8.3" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: resources/v1beta2/resourcedeploymentscriptazurepowershell + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: managedidentity.azure.upbound.io/v1beta1 +kind: UserAssignedIdentity +metadata: + annotations: + meta.upbound.io/example-id: resources/v1beta2/resourcedeploymentscriptazurepowershell + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example-uai + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/search/v1beta1/sharedprivatelinkservice.yaml b/examples-generated/search/v1beta1/sharedprivatelinkservice.yaml index 20df3a154..cd9933adf 100644 --- a/examples-generated/search/v1beta1/sharedprivatelinkservice.yaml +++ b/examples-generated/search/v1beta1/sharedprivatelinkservice.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: search.azure.upbound.io/v1beta1 +apiVersion: search.azure.upbound.io/v1beta2 kind: Service metadata: annotations: @@ -51,7 +51,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/search/v1beta2/service.yaml b/examples-generated/search/v1beta2/service.yaml new file mode 100644 index 000000000..f32d6c7b8 --- /dev/null +++ b/examples-generated/search/v1beta2/service.yaml @@ -0,0 +1,29 @@ +apiVersion: search.azure.upbound.io/v1beta2 +kind: Service +metadata: + annotations: + meta.upbound.io/example-id: search/v1beta2/service + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: search/v1beta2/service + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/security/v1beta1/advancedthreatprotection.yaml b/examples-generated/security/v1beta1/advancedthreatprotection.yaml index 2c7a878e3..7ae9354e9 100644 --- a/examples-generated/security/v1beta1/advancedthreatprotection.yaml +++ b/examples-generated/security/v1beta1/advancedthreatprotection.yaml @@ -27,7 +27,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/security/v1beta1/securitycenterservervulnerabilityassessment.yaml b/examples-generated/security/v1beta1/securitycenterservervulnerabilityassessment.yaml index 330114d30..0873c700a 100644 --- a/examples-generated/security/v1beta1/securitycenterservervulnerabilityassessment.yaml +++ b/examples-generated/security/v1beta1/securitycenterservervulnerabilityassessment.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: LinuxVirtualMachine metadata: annotations: @@ -85,7 +85,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -106,7 +106,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/security/v1beta1/securitycenterservervulnerabilityassessmentvirtualmachine.yaml b/examples-generated/security/v1beta1/securitycenterservervulnerabilityassessmentvirtualmachine.yaml index 467823ce1..555795803 100644 --- a/examples-generated/security/v1beta1/securitycenterservervulnerabilityassessmentvirtualmachine.yaml +++ b/examples-generated/security/v1beta1/securitycenterservervulnerabilityassessmentvirtualmachine.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: LinuxVirtualMachine metadata: annotations: @@ -85,7 +85,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -106,7 +106,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/security/v1beta1/securitycenterworkspace.yaml b/examples-generated/security/v1beta1/securitycenterworkspace.yaml index 2773aba0b..788ced3a9 100644 --- a/examples-generated/security/v1beta1/securitycenterworkspace.yaml +++ b/examples-generated/security/v1beta1/securitycenterworkspace.yaml @@ -15,7 +15,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/security/v1beta2/iotsecuritydevicegroup.yaml b/examples-generated/security/v1beta2/iotsecuritydevicegroup.yaml new file mode 100644 index 000000000..f837872fc --- /dev/null +++ b/examples-generated/security/v1beta2/iotsecuritydevicegroup.yaml @@ -0,0 +1,77 @@ +apiVersion: security.azure.upbound.io/v1beta2 +kind: IOTSecurityDeviceGroup +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/iotsecuritydevicegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + allowRule: + - connectionToIpsNotAllowed: + - 10.0.0.0/24 + iothubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example-device-security-group + rangeRule: + - duration: PT5M + max: 30 + min: 0 + type: ActiveConnectionsNotInAllowedRange + +--- + +apiVersion: security.azure.upbound.io/v1beta2 +kind: IOTSecuritySolution +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/iotsecuritydevicegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + displayName: Iot Security Solution + iothubIdsRefs: + - name: example + location: West Europe + name: example-Iot-Security-Solution + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: devices.azure.upbound.io/v1beta2 +kind: IOTHub +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/iotsecuritydevicegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: "1" + name: S1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/iotsecuritydevicegroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/security/v1beta2/iotsecuritysolution.yaml b/examples-generated/security/v1beta2/iotsecuritysolution.yaml new file mode 100644 index 000000000..56d59b2f0 --- /dev/null +++ b/examples-generated/security/v1beta2/iotsecuritysolution.yaml @@ -0,0 +1,52 @@ +apiVersion: security.azure.upbound.io/v1beta2 +kind: IOTSecuritySolution +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/iotsecuritysolution + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + displayName: Iot Security Solution + iothubIdsRefs: + - name: example + location: West Europe + name: example-Iot-Security-Solution + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: devices.azure.upbound.io/v1beta2 +kind: IOTHub +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/iotsecuritysolution + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: "1" + name: S1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/iotsecuritysolution + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/security/v1beta2/securitycenterassessment.yaml b/examples-generated/security/v1beta2/securitycenterassessment.yaml new file mode 100644 index 000000000..08302021c --- /dev/null +++ b/examples-generated/security/v1beta2/securitycenterassessment.yaml @@ -0,0 +1,128 @@ +apiVersion: security.azure.upbound.io/v1beta2 +kind: SecurityCenterAssessment +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/securitycenterassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + assessmentPolicyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + status: + - code: Healthy + targetResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: LinuxVirtualMachineScaleSet +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/securitycenterassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + adminSshKey: + - publicKey: ${file("~/.ssh/id_rsa.pub")} + username: adminuser + adminUsername: adminuser + instances: 1 + location: West Europe + networkInterface: + - ipConfiguration: + - name: internal + primary: true + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: internal + name: example + primary: true + osDisk: + - caching: ReadWrite + storageAccountType: Standard_LRS + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard_F2 + sourceImageReference: + - offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts + version: latest + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/securitycenterassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: security.azure.upbound.io/v1beta1 +kind: SecurityCenterAssessmentPolicy +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/securitycenterassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + description: Test Description + displayName: Test Display Name + severity: Medium + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/securitycenterassessment + labels: + testing.upbound.io/example-name: internal + name: internal +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: security/v1beta2/securitycenterassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/securityinsights/v1beta1/sentinelalertrulefusion.yaml b/examples-generated/securityinsights/v1beta1/sentinelalertrulefusion.yaml index af7c44e23..82d91ceaf 100644 --- a/examples-generated/securityinsights/v1beta1/sentinelalertrulefusion.yaml +++ b/examples-generated/securityinsights/v1beta1/sentinelalertrulefusion.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: operationsmanagement.azure.upbound.io/v1beta1 +apiVersion: operationsmanagement.azure.upbound.io/v1beta2 kind: LogAnalyticsSolution metadata: annotations: @@ -43,7 +43,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/securityinsights/v1beta1/sentinelalertrulemachinelearningbehavioranalytics.yaml b/examples-generated/securityinsights/v1beta1/sentinelalertrulemachinelearningbehavioranalytics.yaml index eec0d3b2e..42ac4e3b6 100644 --- a/examples-generated/securityinsights/v1beta1/sentinelalertrulemachinelearningbehavioranalytics.yaml +++ b/examples-generated/securityinsights/v1beta1/sentinelalertrulemachinelearningbehavioranalytics.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/securityinsights/v1beta1/sentinelalertrulemssecurityincident.yaml b/examples-generated/securityinsights/v1beta1/sentinelalertrulemssecurityincident.yaml index 7278810fd..47d5d047e 100644 --- a/examples-generated/securityinsights/v1beta1/sentinelalertrulemssecurityincident.yaml +++ b/examples-generated/securityinsights/v1beta1/sentinelalertrulemssecurityincident.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/securityinsights/v1beta1/sentinelautomationrule.yaml b/examples-generated/securityinsights/v1beta1/sentinelautomationrule.yaml index 8eb24b958..1f309c05c 100644 --- a/examples-generated/securityinsights/v1beta1/sentinelautomationrule.yaml +++ b/examples-generated/securityinsights/v1beta1/sentinelautomationrule.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/securityinsights/v1beta1/sentineldataconnectoriot.yaml b/examples-generated/securityinsights/v1beta1/sentineldataconnectoriot.yaml index 2982ec01f..1b049db2f 100644 --- a/examples-generated/securityinsights/v1beta1/sentineldataconnectoriot.yaml +++ b/examples-generated/securityinsights/v1beta1/sentineldataconnectoriot.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/securityinsights/v1beta1/sentinelloganalyticsworkspaceonboarding.yaml b/examples-generated/securityinsights/v1beta1/sentinelloganalyticsworkspaceonboarding.yaml index 8c270474a..904ecae9c 100644 --- a/examples-generated/securityinsights/v1beta1/sentinelloganalyticsworkspaceonboarding.yaml +++ b/examples-generated/securityinsights/v1beta1/sentinelloganalyticsworkspaceonboarding.yaml @@ -18,7 +18,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/securityinsights/v1beta1/sentinelwatchlist.yaml b/examples-generated/securityinsights/v1beta1/sentinelwatchlist.yaml index 3be93546a..f96c54dbd 100644 --- a/examples-generated/securityinsights/v1beta1/sentinelwatchlist.yaml +++ b/examples-generated/securityinsights/v1beta1/sentinelwatchlist.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: operationalinsights.azure.upbound.io/v1beta1 +apiVersion: operationalinsights.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/servicebus/v1beta1/namespaceauthorizationrule.yaml b/examples-generated/servicebus/v1beta1/namespaceauthorizationrule.yaml index 71710bd01..875907279 100644 --- a/examples-generated/servicebus/v1beta1/namespaceauthorizationrule.yaml +++ b/examples-generated/servicebus/v1beta1/namespaceauthorizationrule.yaml @@ -31,7 +31,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: diff --git a/examples-generated/servicebus/v1beta1/namespacedisasterrecoveryconfig.yaml b/examples-generated/servicebus/v1beta1/namespacedisasterrecoveryconfig.yaml index 165920d84..a24f6a287 100644 --- a/examples-generated/servicebus/v1beta1/namespacedisasterrecoveryconfig.yaml +++ b/examples-generated/servicebus/v1beta1/namespacedisasterrecoveryconfig.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: @@ -53,7 +53,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: diff --git a/examples-generated/servicebus/v1beta1/namespacenetworkruleset.yaml b/examples-generated/servicebus/v1beta1/namespacenetworkruleset.yaml index ce2684f84..a13671648 100644 --- a/examples-generated/servicebus/v1beta1/namespacenetworkruleset.yaml +++ b/examples-generated/servicebus/v1beta1/namespacenetworkruleset.yaml @@ -37,7 +37,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: @@ -56,7 +56,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -79,7 +79,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/servicebus/v1beta1/queue.yaml b/examples-generated/servicebus/v1beta1/queue.yaml index db2d3fe73..fea9df95a 100644 --- a/examples-generated/servicebus/v1beta1/queue.yaml +++ b/examples-generated/servicebus/v1beta1/queue.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: diff --git a/examples-generated/servicebus/v1beta1/queueauthorizationrule.yaml b/examples-generated/servicebus/v1beta1/queueauthorizationrule.yaml index 95b7228ec..c21aaa06f 100644 --- a/examples-generated/servicebus/v1beta1/queueauthorizationrule.yaml +++ b/examples-generated/servicebus/v1beta1/queueauthorizationrule.yaml @@ -31,7 +31,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: diff --git a/examples-generated/servicebus/v1beta1/topic.yaml b/examples-generated/servicebus/v1beta1/topic.yaml index 8e62ae8ba..52c3cc24e 100644 --- a/examples-generated/servicebus/v1beta1/topic.yaml +++ b/examples-generated/servicebus/v1beta1/topic.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: diff --git a/examples-generated/servicebus/v1beta1/topicauthorizationrule.yaml b/examples-generated/servicebus/v1beta1/topicauthorizationrule.yaml index 74e6e1def..525867738 100644 --- a/examples-generated/servicebus/v1beta1/topicauthorizationrule.yaml +++ b/examples-generated/servicebus/v1beta1/topicauthorizationrule.yaml @@ -31,7 +31,7 @@ spec: --- -apiVersion: servicebus.azure.upbound.io/v1beta1 +apiVersion: servicebus.azure.upbound.io/v1beta2 kind: ServiceBusNamespace metadata: annotations: diff --git a/examples-generated/servicebus/v1beta2/servicebusnamespace.yaml b/examples-generated/servicebus/v1beta2/servicebusnamespace.yaml new file mode 100644 index 000000000..fe20b50c4 --- /dev/null +++ b/examples-generated/servicebus/v1beta2/servicebusnamespace.yaml @@ -0,0 +1,31 @@ +apiVersion: servicebus.azure.upbound.io/v1beta2 +kind: ServiceBusNamespace +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/servicebusnamespace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + tags: + source: terraform + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/servicebusnamespace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/servicebus/v1beta2/subscription.yaml b/examples-generated/servicebus/v1beta2/subscription.yaml new file mode 100644 index 000000000..072c93cd4 --- /dev/null +++ b/examples-generated/servicebus/v1beta2/subscription.yaml @@ -0,0 +1,65 @@ +apiVersion: servicebus.azure.upbound.io/v1beta2 +kind: Subscription +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + maxDeliveryCount: 1 + topicIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta2 +kind: ServiceBusNamespace +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + tags: + source: terraform + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta1 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscription + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + enablePartitioning: true + namespaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/servicebus/v1beta2/subscriptionrule.yaml b/examples-generated/servicebus/v1beta2/subscriptionrule.yaml new file mode 100644 index 000000000..b0ab9523f --- /dev/null +++ b/examples-generated/servicebus/v1beta2/subscriptionrule.yaml @@ -0,0 +1,83 @@ +apiVersion: servicebus.azure.upbound.io/v1beta2 +kind: SubscriptionRule +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscriptionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + filterType: SqlFilter + sqlFilter: colour = 'red' + subscriptionIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscriptionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta2 +kind: ServiceBusNamespace +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscriptionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + tags: + source: terraform + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta2 +kind: Subscription +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscriptionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + maxDeliveryCount: 1 + topicIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta1 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: servicebus/v1beta2/subscriptionrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + enablePartitioning: true + namespaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/servicefabric/v1beta2/cluster.yaml b/examples-generated/servicefabric/v1beta2/cluster.yaml new file mode 100644 index 000000000..7f029092a --- /dev/null +++ b/examples-generated/servicefabric/v1beta2/cluster.yaml @@ -0,0 +1,39 @@ +apiVersion: servicefabric.azure.upbound.io/v1beta2 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: servicefabric/v1beta2/cluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clusterCodeVersion: 7.1.456.959 + location: West Europe + managementEndpoint: https://example:80 + nodeType: + - clientEndpointPort: 2020 + httpEndpointPort: 80 + instanceCount: 3 + isPrimary: true + name: first + reliabilityLevel: Bronze + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + upgradeMode: Manual + vmImage: Windows + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: servicefabric/v1beta2/cluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/servicefabric/v1beta2/managedcluster.yaml b/examples-generated/servicefabric/v1beta2/managedcluster.yaml new file mode 100644 index 000000000..46648c1c9 --- /dev/null +++ b/examples-generated/servicefabric/v1beta2/managedcluster.yaml @@ -0,0 +1,34 @@ +apiVersion: servicefabric.azure.upbound.io/v1beta2 +kind: ManagedCluster +metadata: + annotations: + meta.upbound.io/example-id: servicefabric/v1beta2/managedcluster + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + clientConnectionPort: 12345 + httpGatewayPort: 4567 + lbRule: + - backendPort: 38080 + frontendPort: 80 + probeProtocol: http + probeRequestPath: /test + protocol: tcp + location: West Europe + nodeType: + - applicationPortRange: 30000-49000 + dataDiskSizeGb: 130 + ephemeralPortRange: 10000-20000 + name: test1 + primary: true + vmImageOffer: WindowsServer + vmImagePublisher: MicrosoftWindowsServer + vmImageSku: 2019-Datacenter-with-Containers + vmImageVersion: latest + vmInstanceCount: 5 + vmSize: Standard_DS1_v2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/servicelinker/v1beta2/springcloudconnection.yaml b/examples-generated/servicelinker/v1beta2/springcloudconnection.yaml new file mode 100644 index 000000000..c623eda56 --- /dev/null +++ b/examples-generated/servicelinker/v1beta2/springcloudconnection.yaml @@ -0,0 +1,156 @@ +apiVersion: servicelinker.azure.upbound.io/v1beta2 +kind: SpringCloudConnection +metadata: + annotations: + meta.upbound.io/example-id: servicelinker/v1beta2/springcloudconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + authentication: + - type: systemAssignedIdentity + name: example-serviceconnector + springCloudIdSelector: + matchLabels: + testing.upbound.io/example-name: example + targetResourceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: servicelinker/v1beta2/springcloudconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + consistencyPolicy: + - consistencyLevel: BoundedStaleness + maxIntervalInSeconds: 10 + maxStalenessPrefix: 200 + geoLocation: + - failoverPriority: 0 + location: West Europe + kind: GlobalDocumentDB + location: West Europe + offerType: Standard + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: SQLContainer +metadata: + annotations: + meta.upbound.io/example-id: servicelinker/v1beta2/springcloudconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + partitionKeyPath: /definition + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: SQLDatabase +metadata: + annotations: + meta.upbound.io/example-id: servicelinker/v1beta2/springcloudconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + throughput: 400 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: servicelinker/v1beta2/springcloudconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudApp +metadata: + annotations: + meta.upbound.io/example-id: servicelinker/v1beta2/springcloudconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudJavaDeployment +metadata: + annotations: + meta.upbound.io/example-id: servicelinker/v1beta2/springcloudconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + springCloudAppIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: appplatform.azure.upbound.io/v1beta2 +kind: SpringCloudService +metadata: + annotations: + meta.upbound.io/example-id: servicelinker/v1beta2/springcloudconnection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/signalrservice/v1beta1/signalrsharedprivatelinkresource.yaml b/examples-generated/signalrservice/v1beta1/signalrsharedprivatelinkresource.yaml index ff0b54b3b..45e7d02c9 100644 --- a/examples-generated/signalrservice/v1beta1/signalrsharedprivatelinkresource.yaml +++ b/examples-generated/signalrservice/v1beta1/signalrsharedprivatelinkresource.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -53,7 +53,7 @@ spec: --- -apiVersion: signalrservice.azure.upbound.io/v1beta1 +apiVersion: signalrservice.azure.upbound.io/v1beta2 kind: Service metadata: annotations: diff --git a/examples-generated/signalrservice/v1beta2/networkacl.yaml b/examples-generated/signalrservice/v1beta2/networkacl.yaml new file mode 100644 index 000000000..53561f11d --- /dev/null +++ b/examples-generated/signalrservice/v1beta2/networkacl.yaml @@ -0,0 +1,124 @@ +apiVersion: signalrservice.azure.upbound.io/v1beta2 +kind: NetworkACL +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/networkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + defaultAction: Deny + privateEndpoint: + - allowedRequestTypes: + - ServerConnection + idSelector: + matchLabels: + testing.upbound.io/example-name: example + publicNetwork: + - allowedRequestTypes: + - ClientConnection + signalrServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: PrivateEndpoint +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/networkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + privateServiceConnection: + - isManualConnection: false + name: psc-sig-test + privateConnectionResourceId: ${azurerm_signalr_service.example.id} + subresourceNames: + - signalr + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/networkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: signalrservice.azure.upbound.io/v1beta2 +kind: Service +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/networkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: 1 + name: Standard_S1 + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/networkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.5.2.0/24 + enforcePrivateLinkEndpointNetworkPolicies: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/networkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.5.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/signalrservice/v1beta2/service.yaml b/examples-generated/signalrservice/v1beta2/service.yaml new file mode 100644 index 000000000..230ca72c0 --- /dev/null +++ b/examples-generated/signalrservice/v1beta2/service.yaml @@ -0,0 +1,47 @@ +apiVersion: signalrservice.azure.upbound.io/v1beta2 +kind: Service +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/service + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + connectivityLogsEnabled: true + cors: + - allowedOrigins: + - http://www.example.com + location: West Europe + messagingLogsEnabled: true + publicNetworkAccessEnabled: false + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serviceMode: Default + sku: + - capacity: 1 + name: Free_F1 + upstreamEndpoint: + - categoryPattern: + - connections + - messages + eventPattern: + - '*' + hubPattern: + - hub1 + urlTemplate: http://foo.com + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/service + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/signalrservice/v1beta2/webpubsub.yaml b/examples-generated/signalrservice/v1beta2/webpubsub.yaml new file mode 100644 index 000000000..ce944dcd1 --- /dev/null +++ b/examples-generated/signalrservice/v1beta2/webpubsub.yaml @@ -0,0 +1,38 @@ +apiVersion: signalrservice.azure.upbound.io/v1beta2 +kind: WebPubsub +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 1 + identity: + - type: SystemAssigned + liveTrace: + - connectivityLogsEnabled: false + enabled: true + messagingLogsEnabled: true + location: West Europe + name: tfex-webpubsub + publicNetworkAccessEnabled: false + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard_S1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: east us diff --git a/examples-generated/signalrservice/v1beta2/webpubsubhub.yaml b/examples-generated/signalrservice/v1beta2/webpubsubhub.yaml new file mode 100644 index 000000000..a86b15478 --- /dev/null +++ b/examples-generated/signalrservice/v1beta2/webpubsubhub.yaml @@ -0,0 +1,113 @@ +apiVersion: signalrservice.azure.upbound.io/v1beta2 +kind: WebPubsubHub +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + anonymousConnectionsEnabled: true + eventHandler: + - systemEvents: + - connect + - connected + urlTemplate: https://test.com/api/{hub}/{event} + userEventPattern: '*' + - auth: + - managedIdentityIdSelector: + matchLabels: + testing.upbound.io/example-name: example + systemEvents: + - connected + urlTemplate: https://test.com/api/{hub}/{event} + userEventPattern: event1, event2 + eventListener: + - eventhubNameSelector: + matchLabels: + testing.upbound.io/example-name: test1 + eventhubNamespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: test + systemEventNameFilter: + - connected + userEventNameFilter: + - event1 + - event2 + - eventhubNameSelector: + matchLabels: + testing.upbound.io/example-name: test1 + eventhubNamespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: test + systemEventNameFilter: + - connected + userEventNameFilter: + - '*' + - eventhubNameSelector: + matchLabels: + testing.upbound.io/example-name: test1 + eventhubNamespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: test + systemEventNameFilter: + - connected + userEventNameFilter: + - event1 + name: tfex_wpsh + webPubsubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: east us + +--- + +apiVersion: managedidentity.azure.upbound.io/v1beta1 +kind: UserAssignedIdentity +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: east us + name: tfex-uai + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: signalrservice.azure.upbound.io/v1beta2 +kind: WebPubsub +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 1 + location: east us + name: tfex-webpubsub + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard_S1 diff --git a/examples-generated/signalrservice/v1beta2/webpubsubnetworkacl.yaml b/examples-generated/signalrservice/v1beta2/webpubsubnetworkacl.yaml new file mode 100644 index 000000000..04269fea7 --- /dev/null +++ b/examples-generated/signalrservice/v1beta2/webpubsubnetworkacl.yaml @@ -0,0 +1,125 @@ +apiVersion: signalrservice.azure.upbound.io/v1beta2 +kind: WebPubsubNetworkACL +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubnetworkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + defaultAction: Allow + privateEndpoint: + - deniedRequestTypes: + - RESTAPI + - ClientConnection + idSelector: + matchLabels: + testing.upbound.io/example-name: example + publicNetwork: + - deniedRequestTypes: + - ClientConnection + webPubsubIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: PrivateEndpoint +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubnetworkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: east us + privateServiceConnection: + - isManualConnection: false + name: psc-sig-test + privateConnectionResourceId: ${azurerm_web_pubsub.example.id} + subresourceNames: + - webpubsub + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubnetworkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: east us + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubnetworkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.5.2.0/24 + enforcePrivateLinkEndpointNetworkPolicies: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubnetworkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.5.0.0/16 + location: east us + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: signalrservice.azure.upbound.io/v1beta2 +kind: WebPubsub +metadata: + annotations: + meta.upbound.io/example-id: signalrservice/v1beta2/webpubsubnetworkacl + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 1 + location: east us + name: tfex-webpubsub + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard_S1 diff --git a/examples-generated/spring/v1beta1/cloudapplicationliveview.yaml b/examples-generated/spring/v1beta1/cloudapplicationliveview.yaml index dd5e5347a..0bc32cf9e 100644 --- a/examples-generated/spring/v1beta1/cloudapplicationliveview.yaml +++ b/examples-generated/spring/v1beta1/cloudapplicationliveview.yaml @@ -28,7 +28,7 @@ spec: --- -apiVersion: appplatform.azure.upbound.io/v1beta1 +apiVersion: appplatform.azure.upbound.io/v1beta2 kind: SpringCloudService metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqldatabaseextendedauditingpolicy.yaml b/examples-generated/sql/v1beta1/mssqldatabaseextendedauditingpolicy.yaml index d956c18ae..b1394bd61 100644 --- a/examples-generated/sql/v1beta1/mssqldatabaseextendedauditingpolicy.yaml +++ b/examples-generated/sql/v1beta1/mssqldatabaseextendedauditingpolicy.yaml @@ -23,7 +23,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLDatabase metadata: annotations: @@ -39,7 +39,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: @@ -76,7 +76,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqldatabasevulnerabilityassessmentrulebaseline.yaml b/examples-generated/sql/v1beta1/mssqldatabasevulnerabilityassessmentrulebaseline.yaml index 2efe9c5a3..9cbb7388a 100644 --- a/examples-generated/sql/v1beta1/mssqldatabasevulnerabilityassessmentrulebaseline.yaml +++ b/examples-generated/sql/v1beta1/mssqldatabasevulnerabilityassessmentrulebaseline.yaml @@ -48,7 +48,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServerVulnerabilityAssessment metadata: annotations: @@ -83,7 +83,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqlfirewallrule.yaml b/examples-generated/sql/v1beta1/mssqlfirewallrule.yaml index d08a80b29..c7e9d835c 100644 --- a/examples-generated/sql/v1beta1/mssqlfirewallrule.yaml +++ b/examples-generated/sql/v1beta1/mssqlfirewallrule.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqljobagent.yaml b/examples-generated/sql/v1beta1/mssqljobagent.yaml index 34e5b67ac..2683d6aa3 100644 --- a/examples-generated/sql/v1beta1/mssqljobagent.yaml +++ b/examples-generated/sql/v1beta1/mssqljobagent.yaml @@ -16,7 +16,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLDatabase metadata: annotations: @@ -34,7 +34,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqljobcredential.yaml b/examples-generated/sql/v1beta1/mssqljobcredential.yaml index 78aca0e53..1a229e1fc 100644 --- a/examples-generated/sql/v1beta1/mssqljobcredential.yaml +++ b/examples-generated/sql/v1beta1/mssqljobcredential.yaml @@ -19,7 +19,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLDatabase metadata: annotations: @@ -55,7 +55,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqlmanagedinstanceactivedirectoryadministrator.yaml b/examples-generated/sql/v1beta1/mssqlmanagedinstanceactivedirectoryadministrator.yaml index dbdcfeed4..57fc727c9 100644 --- a/examples-generated/sql/v1beta1/mssqlmanagedinstanceactivedirectoryadministrator.yaml +++ b/examples-generated/sql/v1beta1/mssqlmanagedinstanceactivedirectoryadministrator.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLManagedInstance metadata: annotations: @@ -62,7 +62,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -83,7 +83,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqloutboundfirewallrule.yaml b/examples-generated/sql/v1beta1/mssqloutboundfirewallrule.yaml index e4b4ca081..6d82d9d74 100644 --- a/examples-generated/sql/v1beta1/mssqloutboundfirewallrule.yaml +++ b/examples-generated/sql/v1beta1/mssqloutboundfirewallrule.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqlserverdnsalias.yaml b/examples-generated/sql/v1beta1/mssqlserverdnsalias.yaml index 8ccd3e02d..d09967a9c 100644 --- a/examples-generated/sql/v1beta1/mssqlserverdnsalias.yaml +++ b/examples-generated/sql/v1beta1/mssqlserverdnsalias.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqlservermicrosoftsupportauditingpolicy.yaml b/examples-generated/sql/v1beta1/mssqlservermicrosoftsupportauditingpolicy.yaml index e972653d5..19c1df8db 100644 --- a/examples-generated/sql/v1beta1/mssqlservermicrosoftsupportauditingpolicy.yaml +++ b/examples-generated/sql/v1beta1/mssqlservermicrosoftsupportauditingpolicy.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: @@ -58,7 +58,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqlserversecurityalertpolicy.yaml b/examples-generated/sql/v1beta1/mssqlserversecurityalertpolicy.yaml index fa7303a3d..259fef90f 100644 --- a/examples-generated/sql/v1beta1/mssqlserversecurityalertpolicy.yaml +++ b/examples-generated/sql/v1beta1/mssqlserversecurityalertpolicy.yaml @@ -43,7 +43,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqlservertransparentdataencryption.yaml b/examples-generated/sql/v1beta1/mssqlservertransparentdataencryption.yaml index 08d048ce2..93d519cb8 100644 --- a/examples-generated/sql/v1beta1/mssqlservertransparentdataencryption.yaml +++ b/examples-generated/sql/v1beta1/mssqlservertransparentdataencryption.yaml @@ -14,7 +14,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: diff --git a/examples-generated/sql/v1beta1/mssqlvirtualnetworkrule.yaml b/examples-generated/sql/v1beta1/mssqlvirtualnetworkrule.yaml index 6144a9ad9..6047b7a4e 100644 --- a/examples-generated/sql/v1beta1/mssqlvirtualnetworkrule.yaml +++ b/examples-generated/sql/v1beta1/mssqlvirtualnetworkrule.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: @@ -54,7 +54,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -77,7 +77,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/sql/v1beta2/mssqldatabase.yaml b/examples-generated/sql/v1beta2/mssqldatabase.yaml new file mode 100644 index 000000000..db45019db --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqldatabase.yaml @@ -0,0 +1,78 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLDatabase +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqldatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + collation: SQL_Latin1_General_CP1_CI_AS + enclaveType: VBS + licenseType: LicenseIncluded + maxSizeGb: 4 + readScale: true + serverIdSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: S0 + tags: + foo: bar + zoneRedundant: true + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLServer +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqldatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: 4dm1n157r470r + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + version: "12.0" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqldatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqldatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/sql/v1beta2/mssqlelasticpool.yaml b/examples-generated/sql/v1beta2/mssqlelasticpool.yaml new file mode 100644 index 000000000..93b77d486 --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqlelasticpool.yaml @@ -0,0 +1,64 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLElasticPool +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlelasticpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + licenseType: LicenseIncluded + location: West Europe + maxSizeGb: 756 + perDatabaseSettings: + - maxCapacity: 4 + minCapacity: 0.25 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serverNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: 4 + family: Gen4 + name: BasicPool + tier: Basic + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLServer +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlelasticpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: 4dm1n157r470r + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + version: "12.0" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlelasticpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/sql/v1beta2/mssqlfailovergroup.yaml b/examples-generated/sql/v1beta2/mssqlfailovergroup.yaml new file mode 100644 index 000000000..5d04d3a56 --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqlfailovergroup.yaml @@ -0,0 +1,104 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLFailoverGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlfailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + databasesRefs: + - name: example + partnerServer: + - idSelector: + matchLabels: + testing.upbound.io/example-name: secondary + readWriteEndpointFailoverPolicy: + - graceMinutes: 80 + mode: Automatic + serverIdSelector: + matchLabels: + testing.upbound.io/example-name: primary + tags: + database: example + environment: prod + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLDatabase +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlfailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + collation: SQL_Latin1_General_CP1_CI_AS + maxSizeGb: "200" + serverIdSelector: + matchLabels: + testing.upbound.io/example-name: primary + skuName: S1 + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLServer +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlfailovergroup + labels: + testing.upbound.io/example-name: primary + name: primary +spec: + forProvider: + administratorLogin: missadministrator + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + version: "12.0" + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLServer +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlfailovergroup + labels: + testing.upbound.io/example-name: secondary + name: secondary +spec: + forProvider: + administratorLogin: missadministrator + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + location: North Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + version: "12.0" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlfailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/sql/v1beta2/mssqlmanageddatabase.yaml b/examples-generated/sql/v1beta2/mssqlmanageddatabase.yaml new file mode 100644 index 000000000..0b545d072 --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqlmanageddatabase.yaml @@ -0,0 +1,96 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLManagedDatabase +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanageddatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + managedInstanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLManagedInstance +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanageddatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: msadministrator + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + licenseType: BasePrice + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Gen5 + storageSizeInGb: 32 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + vcores: 4 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanageddatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanageddatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanageddatabase + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/sql/v1beta2/mssqlmanagedinstance.yaml b/examples-generated/sql/v1beta2/mssqlmanagedinstance.yaml new file mode 100644 index 000000000..f82de233f --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqlmanagedinstance.yaml @@ -0,0 +1,385 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLManagedInstance +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: mradministrator + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + licenseType: BasePrice + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Gen5 + storageSizeInGb: 32 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + vcores: 4 + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityRule +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: allow_health_probe_inbound + name: allow-health-probe-inbound +spec: + forProvider: + access: Allow + destinationAddressPrefix: '*' + destinationPortRange: '*' + direction: Inbound + networkSecurityGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 300 + protocol: '*' + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceAddressPrefix: AzureLoadBalancer + sourcePortRange: '*' + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityRule +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: allow_management_inbound + name: allow-management-inbound +spec: + forProvider: + access: Allow + destinationAddressPrefix: '*' + destinationPortRanges: + - "9000" + - "9003" + - "1438" + - "1440" + - "1452" + direction: Inbound + networkSecurityGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 106 + protocol: Tcp + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceAddressPrefix: '*' + sourcePortRange: '*' + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityRule +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: allow_management_outbound + name: allow-management-outbound +spec: + forProvider: + access: Allow + destinationAddressPrefix: '*' + destinationPortRanges: + - "80" + - "443" + - "12000" + direction: Outbound + networkSecurityGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 102 + protocol: Tcp + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceAddressPrefix: '*' + sourcePortRange: '*' + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityRule +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: allow_misubnet_inbound + name: allow-misubnet-inbound +spec: + forProvider: + access: Allow + destinationAddressPrefix: '*' + destinationPortRange: '*' + direction: Inbound + networkSecurityGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 200 + protocol: '*' + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceAddressPrefix: 10.0.0.0/24 + sourcePortRange: '*' + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityRule +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: allow_misubnet_outbound + name: allow-misubnet-outbound +spec: + forProvider: + access: Allow + destinationAddressPrefix: '*' + destinationPortRange: '*' + direction: Outbound + networkSecurityGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 200 + protocol: '*' + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceAddressPrefix: 10.0.0.0/24 + sourcePortRange: '*' + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityRule +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: allow_tds_inbound + name: allow-tds-inbound +spec: + forProvider: + access: Allow + destinationAddressPrefix: '*' + destinationPortRange: "1433" + direction: Inbound + networkSecurityGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 1000 + protocol: Tcp + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceAddressPrefix: VirtualNetwork + sourcePortRange: '*' + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityRule +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: deny_all_inbound + name: deny-all-inbound +spec: + forProvider: + access: Deny + destinationAddressPrefix: '*' + destinationPortRange: '*' + direction: Inbound + networkSecurityGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 4096 + protocol: '*' + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceAddressPrefix: '*' + sourcePortRange: '*' + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityRule +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: deny_all_outbound + name: deny-all-outbound +spec: + forProvider: + access: Deny + destinationAddressPrefix: '*' + destinationPortRange: '*' + direction: Outbound + networkSecurityGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + priority: 4096 + protocol: '*' + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sourceAddressPrefix: '*' + sourcePortRange: '*' + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: RouteTable +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + disableBgpRoutePropagation: false + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.0.0/24 + delegation: + - name: managedinstancedelegation + serviceDelegation: + - actions: + - Microsoft.Network/virtualNetworks/subnets/join/action + - Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action + - Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action + name: Microsoft.Sql/managedInstances + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SubnetNetworkSecurityGroupAssociation +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + networkSecurityGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SubnetRouteTableAssociation +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + routeTableIdSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstance + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/sql/v1beta2/mssqlmanagedinstancefailovergroup.yaml b/examples-generated/sql/v1beta2/mssqlmanagedinstancefailovergroup.yaml new file mode 100644 index 000000000..01b4cce4d --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqlmanagedinstancefailovergroup.yaml @@ -0,0 +1,208 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLManagedInstanceFailoverGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + managedInstanceIdSelector: + matchLabels: + testing.upbound.io/example-name: primary + partnerManagedInstanceIdSelector: + matchLabels: + testing.upbound.io/example-name: secondary + readWriteEndpointFailoverPolicy: + - graceMinutes: 60 + mode: Automatic + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLManagedInstance +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: primary + name: primary +spec: + forProvider: + administratorLogin: mradministrator + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + licenseType: BasePrice + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Gen5 + storageSizeInGb: 32 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: prod + vcores: 4 + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLManagedInstance +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: secondary + name: secondary +spec: + forProvider: + administratorLogin: mradministrator + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + licenseType: BasePrice + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Gen5 + storageSizeInGb: 32 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: prod + vcores: 4 + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SecurityGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: RouteTable +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SubnetNetworkSecurityGroupAssociation +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + networkSecurityGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta1 +kind: SubnetRouteTableAssociation +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + routeTableIdSelector: + matchLabels: + testing.upbound.io/example-name: example + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancefailovergroup + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment.yaml b/examples-generated/sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment.yaml new file mode 100644 index 000000000..71ec576d0 --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment.yaml @@ -0,0 +1,143 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLManagedInstanceVulnerabilityAssessment +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + managedInstanceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + recurringScans: + - emailSubscriptionAdmins: true + emails: + - email@example1.com + - email@example2.com + enabled: true + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageContainerPath: ${azurerm_storage_account.example.primary_blob_endpoint}${azurerm_storage_container.example.name}/ + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLManagedInstance +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: missadministrator + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + licenseType: BasePrice + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Gen5 + storageSizeInGb: 32 + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + vcores: 4 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.2.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlmanagedinstancevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/sql/v1beta2/mssqlserver.yaml b/examples-generated/sql/v1beta2/mssqlserver.yaml new file mode 100644 index 000000000..ebe2795da --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqlserver.yaml @@ -0,0 +1,44 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLServer +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: missadministrator + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + azureadAdministrator: + - loginUsernameSelector: + matchLabels: + testing.upbound.io/example-name: example + objectIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + minimumTlsVersion: "1.2" + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: production + version: "12.0" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlserver + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/sql/v1beta2/mssqlservervulnerabilityassessment.yaml b/examples-generated/sql/v1beta2/mssqlservervulnerabilityassessment.yaml new file mode 100644 index 000000000..5ffec0a1a --- /dev/null +++ b/examples-generated/sql/v1beta2/mssqlservervulnerabilityassessment.yaml @@ -0,0 +1,117 @@ +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLServerVulnerabilityAssessment +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlservervulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + recurringScans: + - emailSubscriptionAdmins: true + emails: + - email@example1.com + - email@example2.com + enabled: true + serverSecurityAlertPolicyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageContainerPath: ${azurerm_storage_account.example.primary_blob_endpoint}${azurerm_storage_container.example.name}/ + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLServer +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlservervulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + administratorLogin: 4dm1n157r470r + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + version: "12.0" + +--- + +apiVersion: sql.azure.upbound.io/v1beta1 +kind: MSSQLServerSecurityAlertPolicy +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlservervulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + serverNameSelector: + matchLabels: + testing.upbound.io/example-name: example + state: Enabled + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlservervulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlservervulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlservervulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/storage/v1beta1/accountnetworkrules.yaml b/examples-generated/storage/v1beta1/accountnetworkrules.yaml index d46dceab5..af8124893 100644 --- a/examples-generated/storage/v1beta1/accountnetworkrules.yaml +++ b/examples-generated/storage/v1beta1/accountnetworkrules.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -56,7 +56,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -79,7 +79,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/storage/v1beta1/blob.yaml b/examples-generated/storage/v1beta1/blob.yaml index 70ac08084..a71311600 100644 --- a/examples-generated/storage/v1beta1/blob.yaml +++ b/examples-generated/storage/v1beta1/blob.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/container.yaml b/examples-generated/storage/v1beta1/container.yaml index 78b627e4e..999a0e601 100644 --- a/examples-generated/storage/v1beta1/container.yaml +++ b/examples-generated/storage/v1beta1/container.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/datalakegen2filesystem.yaml b/examples-generated/storage/v1beta1/datalakegen2filesystem.yaml index dfc9e3067..fd32177d7 100644 --- a/examples-generated/storage/v1beta1/datalakegen2filesystem.yaml +++ b/examples-generated/storage/v1beta1/datalakegen2filesystem.yaml @@ -30,7 +30,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/datalakegen2path.yaml b/examples-generated/storage/v1beta1/datalakegen2path.yaml index 9d8056e03..12c3038dc 100644 --- a/examples-generated/storage/v1beta1/datalakegen2path.yaml +++ b/examples-generated/storage/v1beta1/datalakegen2path.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/encryptionscope.yaml b/examples-generated/storage/v1beta1/encryptionscope.yaml index 4cfeba6f1..bb36f1757 100644 --- a/examples-generated/storage/v1beta1/encryptionscope.yaml +++ b/examples-generated/storage/v1beta1/encryptionscope.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/objectreplication.yaml b/examples-generated/storage/v1beta1/objectreplication.yaml index 763379594..8f6bc9307 100644 --- a/examples-generated/storage/v1beta1/objectreplication.yaml +++ b/examples-generated/storage/v1beta1/objectreplication.yaml @@ -52,7 +52,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -74,7 +74,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/queue.yaml b/examples-generated/storage/v1beta1/queue.yaml index 4b0453502..abd50354a 100644 --- a/examples-generated/storage/v1beta1/queue.yaml +++ b/examples-generated/storage/v1beta1/queue.yaml @@ -28,7 +28,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/share.yaml b/examples-generated/storage/v1beta1/share.yaml index e6d0be684..a29cd132b 100644 --- a/examples-generated/storage/v1beta1/share.yaml +++ b/examples-generated/storage/v1beta1/share.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/sharedirectory.yaml b/examples-generated/storage/v1beta1/sharedirectory.yaml index 59e345896..db9e78347 100644 --- a/examples-generated/storage/v1beta1/sharedirectory.yaml +++ b/examples-generated/storage/v1beta1/sharedirectory.yaml @@ -32,7 +32,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/table.yaml b/examples-generated/storage/v1beta1/table.yaml index 879da753d..b571ad7dc 100644 --- a/examples-generated/storage/v1beta1/table.yaml +++ b/examples-generated/storage/v1beta1/table.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta1/tableentity.yaml b/examples-generated/storage/v1beta1/tableentity.yaml index c2c2f18ac..39887b734 100644 --- a/examples-generated/storage/v1beta1/tableentity.yaml +++ b/examples-generated/storage/v1beta1/tableentity.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/storage/v1beta2/account.yaml b/examples-generated/storage/v1beta2/account.yaml new file mode 100644 index 000000000..d7c0383b2 --- /dev/null +++ b/examples-generated/storage/v1beta2/account.yaml @@ -0,0 +1,32 @@ +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: GRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + environment: staging + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/account + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/storage/v1beta2/accountlocaluser.yaml b/examples-generated/storage/v1beta2/accountlocaluser.yaml new file mode 100644 index 000000000..c8b118179 --- /dev/null +++ b/examples-generated/storage/v1beta2/accountlocaluser.yaml @@ -0,0 +1,80 @@ +apiVersion: storage.azure.upbound.io/v1beta2 +kind: AccountLocalUser +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/accountlocaluser + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + homeDirectory: example_path + permissionScope: + - permissions: + - create: true + read: true + resourceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + service: blob + sshAuthorizedKey: + - description: key1 + key: ${local.first_public_key} + - description: key2 + key: ${local.second_public_key} + sshKeyEnabled: true + sshPasswordEnabled: true + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/accountlocaluser + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: WestEurope + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/accountlocaluser + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: StorageV2 + accountReplicationType: LRS + accountTier: Standard + isHnsEnabled: true + location: WestEurope + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/accountlocaluser + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/storage/v1beta2/blobinventorypolicy.yaml b/examples-generated/storage/v1beta2/blobinventorypolicy.yaml new file mode 100644 index 000000000..6109b0b0e --- /dev/null +++ b/examples-generated/storage/v1beta2/blobinventorypolicy.yaml @@ -0,0 +1,76 @@ +apiVersion: storage.azure.upbound.io/v1beta2 +kind: BlobInventoryPolicy +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/blobinventorypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + rules: + - format: Csv + name: rule1 + schedule: Daily + schemaFields: + - Name + - Last-Modified + scope: Container + storageContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/blobinventorypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/blobinventorypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + blobProperties: + - versioningEnabled: true + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/blobinventorypolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/storage/v1beta2/managementpolicy.yaml b/examples-generated/storage/v1beta2/managementpolicy.yaml new file mode 100644 index 000000000..17e1856ad --- /dev/null +++ b/examples-generated/storage/v1beta2/managementpolicy.yaml @@ -0,0 +1,87 @@ +apiVersion: storage.azure.upbound.io/v1beta2 +kind: ManagementPolicy +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/managementpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + rule: + - actions: + - baseBlob: + - deleteAfterDaysSinceModificationGreaterThan: 100 + tierToArchiveAfterDaysSinceModificationGreaterThan: 50 + tierToCoolAfterDaysSinceModificationGreaterThan: 10 + snapshot: + - deleteAfterDaysSinceCreationGreaterThan: 30 + enabled: true + filters: + - blobTypes: + - blockBlob + matchBlobIndexTag: + - name: tag1 + operation: == + value: val1 + prefixMatch: + - container1/prefix1 + name: rule1 + - actions: + - baseBlob: + - deleteAfterDaysSinceModificationGreaterThan: 101 + tierToArchiveAfterDaysSinceModificationGreaterThan: 51 + tierToCoolAfterDaysSinceModificationGreaterThan: 11 + snapshot: + - changeTierToArchiveAfterDaysSinceCreation: 90 + changeTierToCoolAfterDaysSinceCreation: 23 + deleteAfterDaysSinceCreationGreaterThan: 31 + version: + - changeTierToArchiveAfterDaysSinceCreation: 9 + changeTierToCoolAfterDaysSinceCreation: 90 + deleteAfterDaysSinceCreation: 3 + enabled: false + filters: + - blobTypes: + - blockBlob + prefixMatch: + - container2/prefix1 + - container2/prefix2 + name: rule2 + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/managementpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: storage/v1beta2/managementpolicy + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: BlobStorage + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/storagecache/v1beta1/hpccacheaccesspolicy.yaml b/examples-generated/storagecache/v1beta1/hpccacheaccesspolicy.yaml index 61d8655cd..c1eb7a7b2 100644 --- a/examples-generated/storagecache/v1beta1/hpccacheaccesspolicy.yaml +++ b/examples-generated/storagecache/v1beta1/hpccacheaccesspolicy.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: storagecache.azure.upbound.io/v1beta1 +apiVersion: storagecache.azure.upbound.io/v1beta2 kind: HPCCache metadata: annotations: @@ -53,7 +53,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -74,7 +74,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/storagecache/v1beta1/hpccacheblobnfstarget.yaml b/examples-generated/storagecache/v1beta1/hpccacheblobnfstarget.yaml index 70d66c7a0..dc11ac68e 100644 --- a/examples-generated/storagecache/v1beta1/hpccacheblobnfstarget.yaml +++ b/examples-generated/storagecache/v1beta1/hpccacheblobnfstarget.yaml @@ -20,7 +20,7 @@ spec: --- -apiVersion: storagecache.azure.upbound.io/v1beta1 +apiVersion: storagecache.azure.upbound.io/v1beta2 kind: HPCCache metadata: annotations: @@ -166,7 +166,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -193,7 +193,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -216,7 +216,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/storagecache/v1beta1/hpccacheblobtarget.yaml b/examples-generated/storagecache/v1beta1/hpccacheblobtarget.yaml index 91e738ca2..003b13131 100644 --- a/examples-generated/storagecache/v1beta1/hpccacheblobtarget.yaml +++ b/examples-generated/storagecache/v1beta1/hpccacheblobtarget.yaml @@ -21,7 +21,7 @@ spec: --- -apiVersion: storagecache.azure.upbound.io/v1beta1 +apiVersion: storagecache.azure.upbound.io/v1beta2 kind: HPCCache metadata: annotations: @@ -89,7 +89,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -124,7 +124,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -145,7 +145,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/storagecache/v1beta1/hpccachenfstarget.yaml b/examples-generated/storagecache/v1beta1/hpccachenfstarget.yaml index 6ef421007..b8dce55b6 100644 --- a/examples-generated/storagecache/v1beta1/hpccachenfstarget.yaml +++ b/examples-generated/storagecache/v1beta1/hpccachenfstarget.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: storagecache.azure.upbound.io/v1beta1 +apiVersion: storagecache.azure.upbound.io/v1beta2 kind: HPCCache metadata: annotations: @@ -47,7 +47,7 @@ spec: --- -apiVersion: compute.azure.upbound.io/v1beta1 +apiVersion: compute.azure.upbound.io/v1beta2 kind: LinuxVirtualMachine metadata: annotations: @@ -120,7 +120,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -141,7 +141,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -162,7 +162,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/storagecache/v1beta2/hpccache.yaml b/examples-generated/storagecache/v1beta2/hpccache.yaml new file mode 100644 index 000000000..96c3f088a --- /dev/null +++ b/examples-generated/storagecache/v1beta2/hpccache.yaml @@ -0,0 +1,73 @@ +apiVersion: storagecache.azure.upbound.io/v1beta2 +kind: HPCCache +metadata: + annotations: + meta.upbound.io/example-id: storagecache/v1beta2/hpccache + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cacheSizeInGb: 3072 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Standard_2G + subnetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: storagecache/v1beta2/hpccache + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: storagecache/v1beta2/hpccache + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: storagecache/v1beta2/hpccache + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/storagepool/v1beta1/diskpool.yaml b/examples-generated/storagepool/v1beta1/diskpool.yaml index f8a2f4676..e1078b169 100644 --- a/examples-generated/storagepool/v1beta1/diskpool.yaml +++ b/examples-generated/storagepool/v1beta1/diskpool.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: Subnet metadata: annotations: @@ -62,7 +62,7 @@ spec: --- -apiVersion: network.azure.upbound.io/v1beta1 +apiVersion: network.azure.upbound.io/v1beta2 kind: VirtualNetwork metadata: annotations: diff --git a/examples-generated/streamanalytics/v1beta1/managedprivateendpoint.yaml b/examples-generated/streamanalytics/v1beta1/managedprivateendpoint.yaml index de439e5b6..758132c6d 100644 --- a/examples-generated/streamanalytics/v1beta1/managedprivateendpoint.yaml +++ b/examples-generated/streamanalytics/v1beta1/managedprivateendpoint.yaml @@ -35,7 +35,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/streamanalytics/v1beta1/outputfunction.yaml b/examples-generated/streamanalytics/v1beta1/outputfunction.yaml index ab84c8df6..b769a69f5 100644 --- a/examples-generated/streamanalytics/v1beta1/outputfunction.yaml +++ b/examples-generated/streamanalytics/v1beta1/outputfunction.yaml @@ -25,7 +25,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: AppServicePlan metadata: annotations: @@ -47,7 +47,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: FunctionApp metadata: annotations: @@ -91,7 +91,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -110,7 +110,7 @@ spec: --- -apiVersion: streamanalytics.azure.upbound.io/v1beta1 +apiVersion: streamanalytics.azure.upbound.io/v1beta2 kind: Job metadata: annotations: diff --git a/examples-generated/streamanalytics/v1beta1/outputsynapse.yaml b/examples-generated/streamanalytics/v1beta1/outputsynapse.yaml index 55409e1bd..46dff56fb 100644 --- a/examples-generated/streamanalytics/v1beta1/outputsynapse.yaml +++ b/examples-generated/streamanalytics/v1beta1/outputsynapse.yaml @@ -41,7 +41,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -78,7 +78,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/streamanalytics/v1beta1/outputtable.yaml b/examples-generated/streamanalytics/v1beta1/outputtable.yaml index bba4104f3..95c5434e9 100644 --- a/examples-generated/streamanalytics/v1beta1/outputtable.yaml +++ b/examples-generated/streamanalytics/v1beta1/outputtable.yaml @@ -42,7 +42,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/streamanalytics/v1beta1/referenceinputmssql.yaml b/examples-generated/streamanalytics/v1beta1/referenceinputmssql.yaml index 84765ec15..616bb0fcc 100644 --- a/examples-generated/streamanalytics/v1beta1/referenceinputmssql.yaml +++ b/examples-generated/streamanalytics/v1beta1/referenceinputmssql.yaml @@ -32,7 +32,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLDatabase metadata: annotations: @@ -48,7 +48,7 @@ spec: --- -apiVersion: sql.azure.upbound.io/v1beta1 +apiVersion: sql.azure.upbound.io/v1beta2 kind: MSSQLServer metadata: annotations: diff --git a/examples-generated/streamanalytics/v1beta2/functionjavascriptuda.yaml b/examples-generated/streamanalytics/v1beta2/functionjavascriptuda.yaml new file mode 100644 index 000000000..6c0f5745d --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/functionjavascriptuda.yaml @@ -0,0 +1,31 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: FunctionJavascriptUda +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/functionjavascriptuda + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + input: + - type: bigint + output: + - type: bigint + script: | + function main() { + this.init = function () { + this.state = 0; + } + + this.accumulate = function (value, timestamp) { + this.state += value; + } + + this.computeResult = function () { + return this.state; + } + } + streamAnalyticsJobIdSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job diff --git a/examples-generated/streamanalytics/v1beta2/job.yaml b/examples-generated/streamanalytics/v1beta2/job.yaml new file mode 100644 index 000000000..79285145c --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/job.yaml @@ -0,0 +1,41 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: Job +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/job + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + compatibilityLevel: "1.2" + dataLocale: en-GB + eventsLateArrivalMaxDelayInSeconds: 60 + eventsOutOfOrderMaxDelayInSeconds: 50 + eventsOutOfOrderPolicy: Adjust + location: West Europe + outputErrorPolicy: Drop + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + streamingUnits: 3 + tags: + environment: Example + transformationQuery: |2 + SELECT * + INTO [YourOutputAlias] + FROM [YourInputAlias] + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/job + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/streamanalytics/v1beta2/outputblob.yaml b/examples-generated/streamanalytics/v1beta2/outputblob.yaml new file mode 100644 index 000000000..1babe844a --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/outputblob.yaml @@ -0,0 +1,83 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: OutputBlob +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dateFormat: yyyy-MM-dd + pathPattern: some-pattern + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + serialization: + - encoding: UTF8 + fieldDelimiter: ',' + type: Csv + storageAccountKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + streamAnalyticsJobNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + timeFormat: HH + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/streamanalytics/v1beta2/outputeventhub.yaml b/examples-generated/streamanalytics/v1beta2/outputeventhub.yaml new file mode 100644 index 000000000..b3e1c9186 --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/outputeventhub.yaml @@ -0,0 +1,81 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: OutputEventHub +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + eventhubNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + serialization: + - type: Avro + servicebusNamespaceSelector: + matchLabels: + testing.upbound.io/example-name: example + sharedAccessPolicyKeySecretRef: + key: attribute.default_primary_key + name: example-eventhub-namespace + namespace: upbound-system + sharedAccessPolicyName: RootManageSharedAccessKey + streamAnalyticsJobName: ${data.azurerm_stream_analytics_job.example.name} + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHub +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + messageRetention: 1 + namespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + partitionCount: 2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHubNamespace +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 1 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/streamanalytics/v1beta2/outputservicebusqueue.yaml b/examples-generated/streamanalytics/v1beta2/outputservicebusqueue.yaml new file mode 100644 index 000000000..c11238593 --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/outputservicebusqueue.yaml @@ -0,0 +1,80 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: OutputServiceBusQueue +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputservicebusqueue + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: blob-storage-output + queueNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + serialization: + - format: Array + type: Csv + servicebusNamespaceSelector: + matchLabels: + testing.upbound.io/example-name: example + sharedAccessPolicyKeySecretRef: + key: attribute.default_primary_key + name: example-servicebus-namespace + namespace: upbound-system + sharedAccessPolicyName: RootManageSharedAccessKey + streamAnalyticsJobNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputservicebusqueue + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta2 +kind: ServiceBusNamespace +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputservicebusqueue + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta1 +kind: Queue +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputservicebusqueue + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + enablePartitioning: true + namespaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/streamanalytics/v1beta2/outputservicebustopic.yaml b/examples-generated/streamanalytics/v1beta2/outputservicebustopic.yaml new file mode 100644 index 000000000..40602b3b7 --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/outputservicebustopic.yaml @@ -0,0 +1,83 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: OutputServiceBusTopic +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputservicebustopic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + name: service-bus-topic-output + propertyColumns: + - col1 + - col2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + serialization: + - format: Array + type: Csv + servicebusNamespaceSelector: + matchLabels: + testing.upbound.io/example-name: example + sharedAccessPolicyKeySecretRef: + key: attribute.default_primary_key + name: example-servicebus-namespace + namespace: upbound-system + sharedAccessPolicyName: RootManageSharedAccessKey + streamAnalyticsJobNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + topicNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputservicebustopic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta2 +kind: ServiceBusNamespace +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputservicebustopic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: servicebus.azure.upbound.io/v1beta1 +kind: Topic +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/outputservicebustopic + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + enablePartitioning: true + namespaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/streamanalytics/v1beta2/referenceinputblob.yaml b/examples-generated/streamanalytics/v1beta2/referenceinputblob.yaml new file mode 100644 index 000000000..c702e4f85 --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/referenceinputblob.yaml @@ -0,0 +1,83 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: ReferenceInputBlob +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/referenceinputblob + labels: + testing.upbound.io/example-name: test + name: test +spec: + forProvider: + dateFormat: yyyy/MM/dd + name: blob-reference-input + pathPattern: some-random-pattern + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + serialization: + - encoding: UTF8 + type: Json + storageAccountKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + streamAnalyticsJobNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + timeFormat: HH + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/referenceinputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/referenceinputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/referenceinputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/streamanalytics/v1beta2/streaminputblob.yaml b/examples-generated/streamanalytics/v1beta2/streaminputblob.yaml new file mode 100644 index 000000000..d601fc7d0 --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/streaminputblob.yaml @@ -0,0 +1,83 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: StreamInputBlob +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + dateFormat: yyyy/MM/dd + name: blob-stream-input + pathPattern: some-random-pattern + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + serialization: + - encoding: UTF8 + type: Json + storageAccountKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageContainerNameSelector: + matchLabels: + testing.upbound.io/example-name: example + streamAnalyticsJobNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + timeFormat: HH + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputblob + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + containerAccessType: private + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/streamanalytics/v1beta2/streaminputeventhub.yaml b/examples-generated/streamanalytics/v1beta2/streaminputeventhub.yaml new file mode 100644 index 000000000..68183275e --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/streaminputeventhub.yaml @@ -0,0 +1,110 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: StreamInputEventHub +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + eventhubConsumerGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + eventhubNameSelector: + matchLabels: + testing.upbound.io/example-name: example + name: eventhub-stream-input + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + serialization: + - encoding: UTF8 + type: Json + servicebusNamespaceSelector: + matchLabels: + testing.upbound.io/example-name: example + sharedAccessPolicyKeySecretRef: + key: attribute.default_primary_key + name: example-eventhub-namespace + namespace: upbound-system + sharedAccessPolicyName: RootManageSharedAccessKey + streamAnalyticsJobNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHub +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + messageRetention: 1 + namespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + partitionCount: 2 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta1 +kind: ConsumerGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + eventhubNameSelector: + matchLabels: + testing.upbound.io/example-name: example + namespaceNameSelector: + matchLabels: + testing.upbound.io/example-name: example + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: eventhub.azure.upbound.io/v1beta2 +kind: EventHubNamespace +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + capacity: 1 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputeventhub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/streamanalytics/v1beta2/streaminputiothub.yaml b/examples-generated/streamanalytics/v1beta2/streaminputiothub.yaml new file mode 100644 index 000000000..098120037 --- /dev/null +++ b/examples-generated/streamanalytics/v1beta2/streaminputiothub.yaml @@ -0,0 +1,66 @@ +apiVersion: streamanalytics.azure.upbound.io/v1beta2 +kind: StreamInputIOTHub +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputiothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + endpoint: messages/events + eventhubConsumerGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + iothubNamespaceSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example-iothub-input + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + serialization: + - encoding: UTF8 + type: Json + sharedAccessPolicyKeySecretRef: + key: attribute.shared_access_policy[0].primary_key + name: example-iothub + namespace: upbound-system + sharedAccessPolicyName: iothubowner + streamAnalyticsJobNameSelector: + matchLabels: + testing.upbound.io/example-name: azurerm_stream_analytics_job + +--- + +apiVersion: devices.azure.upbound.io/v1beta2 +kind: IOTHub +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputiothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - capacity: "1" + name: S1 + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: streamanalytics/v1beta2/streaminputiothub + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/synapse/v1beta1/firewallrule.yaml b/examples-generated/synapse/v1beta1/firewallrule.yaml index 05e5c2916..768a1a45e 100644 --- a/examples-generated/synapse/v1beta1/firewallrule.yaml +++ b/examples-generated/synapse/v1beta1/firewallrule.yaml @@ -30,7 +30,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -67,7 +67,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/integrationruntimeazure.yaml b/examples-generated/synapse/v1beta1/integrationruntimeazure.yaml index 18711dcb4..f07a9716c 100644 --- a/examples-generated/synapse/v1beta1/integrationruntimeazure.yaml +++ b/examples-generated/synapse/v1beta1/integrationruntimeazure.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -99,7 +99,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/integrationruntimeselfhosted.yaml b/examples-generated/synapse/v1beta1/integrationruntimeselfhosted.yaml index 3531b5c15..8658a946b 100644 --- a/examples-generated/synapse/v1beta1/integrationruntimeselfhosted.yaml +++ b/examples-generated/synapse/v1beta1/integrationruntimeselfhosted.yaml @@ -28,7 +28,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -98,7 +98,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/managedprivateendpoint.yaml b/examples-generated/synapse/v1beta1/managedprivateendpoint.yaml index 3d9af4624..394a3dda5 100644 --- a/examples-generated/synapse/v1beta1/managedprivateendpoint.yaml +++ b/examples-generated/synapse/v1beta1/managedprivateendpoint.yaml @@ -33,7 +33,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -54,7 +54,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -108,7 +108,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/roleassignment.yaml b/examples-generated/synapse/v1beta1/roleassignment.yaml index 8ea240ecd..1824ddca3 100644 --- a/examples-generated/synapse/v1beta1/roleassignment.yaml +++ b/examples-generated/synapse/v1beta1/roleassignment.yaml @@ -30,7 +30,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -85,7 +85,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/sqlpoolextendedauditingpolicy.yaml b/examples-generated/synapse/v1beta1/sqlpoolextendedauditingpolicy.yaml index e2b039984..d5784c537 100644 --- a/examples-generated/synapse/v1beta1/sqlpoolextendedauditingpolicy.yaml +++ b/examples-generated/synapse/v1beta1/sqlpoolextendedauditingpolicy.yaml @@ -37,7 +37,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -56,7 +56,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -92,7 +92,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: SQLPool metadata: annotations: @@ -110,7 +110,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/sqlpoolsecurityalertpolicy.yaml b/examples-generated/synapse/v1beta1/sqlpoolsecurityalertpolicy.yaml index 5603c0e83..1e330f949 100644 --- a/examples-generated/synapse/v1beta1/sqlpoolsecurityalertpolicy.yaml +++ b/examples-generated/synapse/v1beta1/sqlpoolsecurityalertpolicy.yaml @@ -40,7 +40,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -59,7 +59,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -96,7 +96,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: SQLPool metadata: annotations: @@ -114,7 +114,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/sqlpoolworkloadclassifier.yaml b/examples-generated/synapse/v1beta1/sqlpoolworkloadclassifier.yaml index 0376a831c..0f18dc6ed 100644 --- a/examples-generated/synapse/v1beta1/sqlpoolworkloadclassifier.yaml +++ b/examples-generated/synapse/v1beta1/sqlpoolworkloadclassifier.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -70,7 +70,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: SQLPool metadata: annotations: @@ -110,7 +110,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/sqlpoolworkloadgroup.yaml b/examples-generated/synapse/v1beta1/sqlpoolworkloadgroup.yaml index f0cfce57b..9a1ead445 100644 --- a/examples-generated/synapse/v1beta1/sqlpoolworkloadgroup.yaml +++ b/examples-generated/synapse/v1beta1/sqlpoolworkloadgroup.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -70,7 +70,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: SQLPool metadata: annotations: @@ -88,7 +88,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/workspaceaadadmin.yaml b/examples-generated/synapse/v1beta1/workspaceaadadmin.yaml index d9a9fc97d..eecc9b518 100644 --- a/examples-generated/synapse/v1beta1/workspaceaadadmin.yaml +++ b/examples-generated/synapse/v1beta1/workspaceaadadmin.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -61,7 +61,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Key metadata: annotations: @@ -97,7 +97,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -134,7 +134,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/workspaceextendedauditingpolicy.yaml b/examples-generated/synapse/v1beta1/workspaceextendedauditingpolicy.yaml index fade0c7da..6c82776a7 100644 --- a/examples-generated/synapse/v1beta1/workspaceextendedauditingpolicy.yaml +++ b/examples-generated/synapse/v1beta1/workspaceextendedauditingpolicy.yaml @@ -37,7 +37,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -56,7 +56,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -92,7 +92,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/workspacesecurityalertpolicy.yaml b/examples-generated/synapse/v1beta1/workspacesecurityalertpolicy.yaml index d7e63b7f5..2bca0030a 100644 --- a/examples-generated/synapse/v1beta1/workspacesecurityalertpolicy.yaml +++ b/examples-generated/synapse/v1beta1/workspacesecurityalertpolicy.yaml @@ -40,7 +40,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -59,7 +59,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -96,7 +96,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta1/workspacesqlaadadmin.yaml b/examples-generated/synapse/v1beta1/workspacesqlaadadmin.yaml index 28af7c20f..8ace39cfa 100644 --- a/examples-generated/synapse/v1beta1/workspacesqlaadadmin.yaml +++ b/examples-generated/synapse/v1beta1/workspacesqlaadadmin.yaml @@ -17,7 +17,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Vault metadata: annotations: @@ -61,7 +61,7 @@ spec: --- -apiVersion: keyvault.azure.upbound.io/v1beta1 +apiVersion: keyvault.azure.upbound.io/v1beta2 kind: Key metadata: annotations: @@ -97,7 +97,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -134,7 +134,7 @@ spec: --- -apiVersion: synapse.azure.upbound.io/v1beta1 +apiVersion: synapse.azure.upbound.io/v1beta2 kind: Workspace metadata: annotations: diff --git a/examples-generated/synapse/v1beta2/linkedservice.yaml b/examples-generated/synapse/v1beta2/linkedservice.yaml new file mode 100644 index 000000000..2ccf8dc2b --- /dev/null +++ b/examples-generated/synapse/v1beta2/linkedservice.yaml @@ -0,0 +1,135 @@ +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: LinkedService +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/linkedservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + integrationRuntime: + - nameSelector: + matchLabels: + testing.upbound.io/example-name: example + synapseWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + type: AzureBlobStorage + typePropertiesJson: | + { + "connectionString": "${azurerm_storage_account.example.primary_connection_string}" + } + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/linkedservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/linkedservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: BlobStorage + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: DataLakeGen2FileSystem +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/linkedservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: synapse.azure.upbound.io/v1beta1 +kind: FirewallRule +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/linkedservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + endIpAddress: 255.255.255.255 + startIpAddress: 0.0.0.0 + synapseWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: synapse.azure.upbound.io/v1beta1 +kind: IntegrationRuntimeAzure +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/linkedservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + synapseWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/linkedservice + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + managedVirtualNetworkEnabled: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sqlAdministratorLogin: sqladminuser + sqlAdministratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + storageDataLakeGen2FilesystemIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/synapse/v1beta2/sparkpool.yaml b/examples-generated/synapse/v1beta2/sparkpool.yaml new file mode 100644 index 000000000..dada94897 --- /dev/null +++ b/examples-generated/synapse/v1beta2/sparkpool.yaml @@ -0,0 +1,110 @@ +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: SparkPool +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sparkpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + autoPause: + - delayInMinutes: 15 + autoScale: + - maxNodeCount: 50 + minNodeCount: 3 + cacheSize: 100 + libraryRequirement: + - content: | + appnope==0.1.0 + beautifulsoup4==4.6.3 + filename: requirements.txt + nodeSize: Small + nodeSizeFamily: MemoryOptimized + sparkConfig: + - content: | + spark.shuffle.spill true + filename: config.txt + synapseWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + ENV: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sparkpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sparkpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: StorageV2 + accountReplicationType: LRS + accountTier: Standard + isHnsEnabled: "true" + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: DataLakeGen2FileSystem +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sparkpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sparkpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sqlAdministratorLogin: sqladminuser + sqlAdministratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + storageDataLakeGen2FilesystemIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/synapse/v1beta2/sqlpool.yaml b/examples-generated/synapse/v1beta2/sqlpool.yaml new file mode 100644 index 000000000..0c7aa16e2 --- /dev/null +++ b/examples-generated/synapse/v1beta2/sqlpool.yaml @@ -0,0 +1,93 @@ +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: SQLPool +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sqlpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + createMode: Default + skuName: DW100c + storageAccountType: GRS + synapseWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sqlpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sqlpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: BlobStorage + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: DataLakeGen2FileSystem +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sqlpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/sqlpool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sqlAdministratorLogin: sqladminuser + sqlAdministratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + storageDataLakeGen2FilesystemIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/synapse/v1beta2/workspace.yaml b/examples-generated/synapse/v1beta2/workspace.yaml new file mode 100644 index 000000000..d05eab600 --- /dev/null +++ b/examples-generated/synapse/v1beta2/workspace.yaml @@ -0,0 +1,81 @@ +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + aadAdmin: + - login: AzureAD Admin + objectId: 00000000-0000-0000-0000-000000000000 + tenantId: 00000000-0000-0000-0000-000000000000 + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sqlAdministratorLogin: sqladminuser + sqlAdministratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + storageDataLakeGen2FilesystemIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Env: production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: StorageV2 + accountReplicationType: LRS + accountTier: Standard + isHnsEnabled: "true" + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: DataLakeGen2FileSystem +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspace + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/synapse/v1beta2/workspacevulnerabilityassessment.yaml b/examples-generated/synapse/v1beta2/workspacevulnerabilityassessment.yaml new file mode 100644 index 000000000..87bca5515 --- /dev/null +++ b/examples-generated/synapse/v1beta2/workspacevulnerabilityassessment.yaml @@ -0,0 +1,171 @@ +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: WorkspaceVulnerabilityAssessment +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspacevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + recurringScans: + - emailSubscriptionAdminsEnabled: true + emails: + - email@example1.com + - email@example2.com + enabled: true + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageContainerPath: ${azurerm_storage_account.example.primary_blob_endpoint}${azurerm_storage_container.example.name}/ + workspaceSecurityAlertPolicyIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspacevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspacevulnerabilityassessment + labels: + testing.upbound.io/example-name: audit_logs + name: audit-logs +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspacevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountKind: StorageV2 + accountReplicationType: LRS + accountTier: Standard + isHnsEnabled: "true" + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: Container +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspacevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: storage.azure.upbound.io/v1beta1 +kind: DataLakeGen2FileSystem +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspacevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + storageAccountIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: synapse.azure.upbound.io/v1beta2 +kind: Workspace +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspacevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + aadAdmin: + - login: AzureAD Admin + objectId: 00000000-0000-0000-0000-000000000000 + tenantId: 00000000-0000-0000-0000-000000000000 + identity: + - type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sqlAdministratorLogin: sqladminuser + sqlAdministratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + storageDataLakeGen2FilesystemIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + Env: production + +--- + +apiVersion: synapse.azure.upbound.io/v1beta1 +kind: WorkspaceSecurityAlertPolicy +metadata: + annotations: + meta.upbound.io/example-id: synapse/v1beta2/workspacevulnerabilityassessment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + disabledAlerts: + - Sql_Injection + - Data_Exfiltration + policyState: Enabled + retentionDays: 20 + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageEndpointSelector: + matchLabels: + testing.upbound.io/example-name: audit_logs + synapseWorkspaceIdSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/timeseriesinsights/v1beta1/eventsourceeventhub.yaml b/examples-generated/timeseriesinsights/v1beta1/eventsourceeventhub.yaml index 897fbe9c1..1b61df5ea 100644 --- a/examples-generated/timeseriesinsights/v1beta1/eventsourceeventhub.yaml +++ b/examples-generated/timeseriesinsights/v1beta1/eventsourceeventhub.yaml @@ -34,7 +34,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHub metadata: annotations: @@ -102,7 +102,7 @@ spec: --- -apiVersion: eventhub.azure.upbound.io/v1beta1 +apiVersion: eventhub.azure.upbound.io/v1beta2 kind: EventHubNamespace metadata: annotations: @@ -120,7 +120,7 @@ spec: --- -apiVersion: timeseriesinsights.azure.upbound.io/v1beta1 +apiVersion: timeseriesinsights.azure.upbound.io/v1beta2 kind: Gen2Environment metadata: annotations: @@ -162,7 +162,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/timeseriesinsights/v1beta1/eventsourceiothub.yaml b/examples-generated/timeseriesinsights/v1beta1/eventsourceiothub.yaml index 921ee8f25..6a324188c 100644 --- a/examples-generated/timeseriesinsights/v1beta1/eventsourceiothub.yaml +++ b/examples-generated/timeseriesinsights/v1beta1/eventsourceiothub.yaml @@ -29,7 +29,7 @@ spec: --- -apiVersion: timeseriesinsights.azure.upbound.io/v1beta1 +apiVersion: timeseriesinsights.azure.upbound.io/v1beta2 kind: Gen2Environment metadata: annotations: @@ -57,7 +57,7 @@ spec: --- -apiVersion: devices.azure.upbound.io/v1beta1 +apiVersion: devices.azure.upbound.io/v1beta2 kind: IOTHub metadata: annotations: @@ -111,7 +111,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/timeseriesinsights/v1beta2/gen2environment.yaml b/examples-generated/timeseriesinsights/v1beta2/gen2environment.yaml new file mode 100644 index 000000000..4dd824e28 --- /dev/null +++ b/examples-generated/timeseriesinsights/v1beta2/gen2environment.yaml @@ -0,0 +1,59 @@ +apiVersion: timeseriesinsights.azure.upbound.io/v1beta2 +kind: Gen2Environment +metadata: + annotations: + meta.upbound.io/example-id: timeseriesinsights/v1beta2/gen2environment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + idProperties: + - id + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: L1 + storage: + - keySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + nameSelector: + matchLabels: + testing.upbound.io/example-name: storage + warmStoreDataRetentionTime: P30D + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: timeseriesinsights/v1beta2/gen2environment + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: timeseriesinsights/v1beta2/gen2environment + labels: + testing.upbound.io/example-name: storage + name: storage +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/web/v1beta1/appactiveslot.yaml b/examples-generated/web/v1beta1/appactiveslot.yaml index 6efb40a8d..272b04cc4 100644 --- a/examples-generated/web/v1beta1/appactiveslot.yaml +++ b/examples-generated/web/v1beta1/appactiveslot.yaml @@ -47,7 +47,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: WindowsWebApp metadata: annotations: @@ -69,7 +69,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: WindowsWebAppSlot metadata: annotations: diff --git a/examples-generated/web/v1beta1/apphybridconnection.yaml b/examples-generated/web/v1beta1/apphybridconnection.yaml index c8de30b10..b30cd2f31 100644 --- a/examples-generated/web/v1beta1/apphybridconnection.yaml +++ b/examples-generated/web/v1beta1/apphybridconnection.yaml @@ -90,7 +90,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: WindowsWebApp metadata: annotations: diff --git a/examples-generated/web/v1beta1/functionappactiveslot.yaml b/examples-generated/web/v1beta1/functionappactiveslot.yaml index 0c2c57337..ddd7e0165 100644 --- a/examples-generated/web/v1beta1/functionappactiveslot.yaml +++ b/examples-generated/web/v1beta1/functionappactiveslot.yaml @@ -47,7 +47,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -66,7 +66,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: WindowsFunctionApp metadata: annotations: @@ -91,7 +91,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: WindowsFunctionAppSlot metadata: annotations: diff --git a/examples-generated/web/v1beta1/functionappfunction.yaml b/examples-generated/web/v1beta1/functionappfunction.yaml index b5ec2b2b7..54b82ddca 100644 --- a/examples-generated/web/v1beta1/functionappfunction.yaml +++ b/examples-generated/web/v1beta1/functionappfunction.yaml @@ -40,7 +40,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: LinuxFunctionApp metadata: annotations: @@ -104,7 +104,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: diff --git a/examples-generated/web/v1beta1/functionapphybridconnection.yaml b/examples-generated/web/v1beta1/functionapphybridconnection.yaml index d4c00a5b7..b65cd3736 100644 --- a/examples-generated/web/v1beta1/functionapphybridconnection.yaml +++ b/examples-generated/web/v1beta1/functionapphybridconnection.yaml @@ -90,7 +90,7 @@ spec: --- -apiVersion: storage.azure.upbound.io/v1beta1 +apiVersion: storage.azure.upbound.io/v1beta2 kind: Account metadata: annotations: @@ -109,7 +109,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: WindowsFunctionApp metadata: annotations: @@ -138,7 +138,7 @@ spec: --- -apiVersion: web.azure.upbound.io/v1beta1 +apiVersion: web.azure.upbound.io/v1beta2 kind: WindowsWebApp metadata: annotations: diff --git a/examples-generated/web/v1beta2/appserviceplan.yaml b/examples-generated/web/v1beta2/appserviceplan.yaml new file mode 100644 index 000000000..82d33a372 --- /dev/null +++ b/examples-generated/web/v1beta2/appserviceplan.yaml @@ -0,0 +1,31 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: AppServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/appserviceplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - size: S1 + tier: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/appserviceplan + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/web/v1beta2/functionapp.yaml b/examples-generated/web/v1beta2/functionapp.yaml new file mode 100644 index 000000000..e0f7b7ac6 --- /dev/null +++ b/examples-generated/web/v1beta2/functionapp.yaml @@ -0,0 +1,78 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: FunctionApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + appServicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + name: test-azure-functions + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: web.azure.upbound.io/v1beta2 +kind: AppServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - size: S1 + tier: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/web/v1beta2/functionappslot.yaml b/examples-generated/web/v1beta2/functionappslot.yaml new file mode 100644 index 000000000..cae7f77be --- /dev/null +++ b/examples-generated/web/v1beta2/functionappslot.yaml @@ -0,0 +1,108 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: FunctionAppSlot +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + appServicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + functionAppNameSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: web.azure.upbound.io/v1beta2 +kind: AppServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + sku: + - size: S1 + tier: Standard + +--- + +apiVersion: web.azure.upbound.io/v1beta2 +kind: FunctionApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + appServicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + location: West Europe + name: test-azure-functions + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/functionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/web/v1beta2/linuxfunctionapp.yaml b/examples-generated/web/v1beta2/linuxfunctionapp.yaml new file mode 100644 index 000000000..a383c85ab --- /dev/null +++ b/examples-generated/web/v1beta2/linuxfunctionapp.yaml @@ -0,0 +1,79 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: LinuxFunctionApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example-linux-function-app + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: web.azure.upbound.io/v1beta1 +kind: ServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + osType: Linux + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Y1 + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/web/v1beta2/linuxfunctionappslot.yaml b/examples-generated/web/v1beta2/linuxfunctionappslot.yaml new file mode 100644 index 000000000..5fb95c64d --- /dev/null +++ b/examples-generated/web/v1beta2/linuxfunctionappslot.yaml @@ -0,0 +1,96 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: LinuxFunctionAppSlot +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + functionAppIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: web.azure.upbound.io/v1beta2 +kind: LinuxFunctionApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + name: example-linux-function-app + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: web.azure.upbound.io/v1beta1 +kind: ServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + osType: Linux + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Y1 + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/web/v1beta2/linuxwebapp.yaml b/examples-generated/web/v1beta2/linuxwebapp.yaml new file mode 100644 index 000000000..f9fec8283 --- /dev/null +++ b/examples-generated/web/v1beta2/linuxwebapp.yaml @@ -0,0 +1,52 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: LinuxWebApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxwebapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxwebapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: web.azure.upbound.io/v1beta1 +kind: ServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxwebapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + osType: Linux + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: P1v2 diff --git a/examples-generated/web/v1beta2/linuxwebappslot.yaml b/examples-generated/web/v1beta2/linuxwebappslot.yaml new file mode 100644 index 000000000..aa8d61db8 --- /dev/null +++ b/examples-generated/web/v1beta2/linuxwebappslot.yaml @@ -0,0 +1,71 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: LinuxWebAppSlot +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxwebappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + appServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: example-slot + siteConfig: + - {} + +--- + +apiVersion: web.azure.upbound.io/v1beta2 +kind: LinuxWebApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxwebappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxwebappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: web.azure.upbound.io/v1beta1 +kind: ServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/linuxwebappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + osType: Linux + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: P1v2 diff --git a/examples-generated/web/v1beta2/staticsite.yaml b/examples-generated/web/v1beta2/staticsite.yaml new file mode 100644 index 000000000..778062929 --- /dev/null +++ b/examples-generated/web/v1beta2/staticsite.yaml @@ -0,0 +1,28 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: StaticSite +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/staticsite + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/staticsite + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe diff --git a/examples-generated/web/v1beta2/windowsfunctionapp.yaml b/examples-generated/web/v1beta2/windowsfunctionapp.yaml new file mode 100644 index 000000000..c07758a30 --- /dev/null +++ b/examples-generated/web/v1beta2/windowsfunctionapp.yaml @@ -0,0 +1,78 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: WindowsFunctionApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + storageAccountAccessKeySecretRef: + key: attribute.primary_access_key + name: example-storage-account + namespace: upbound-system + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: web.azure.upbound.io/v1beta1 +kind: ServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + osType: Windows + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Y1 + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/web/v1beta2/windowsfunctionappslot.yaml b/examples-generated/web/v1beta2/windowsfunctionappslot.yaml new file mode 100644 index 000000000..b8114c692 --- /dev/null +++ b/examples-generated/web/v1beta2/windowsfunctionappslot.yaml @@ -0,0 +1,95 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: WindowsFunctionAppSlot +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + functionAppIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: web.azure.upbound.io/v1beta1 +kind: ServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + osType: Windows + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: Y1 + +--- + +apiVersion: storage.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountReplicationType: LRS + accountTier: Standard + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: web.azure.upbound.io/v1beta2 +kind: WindowsFunctionApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowsfunctionappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + storageAccountNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples-generated/web/v1beta2/windowswebapp.yaml b/examples-generated/web/v1beta2/windowswebapp.yaml new file mode 100644 index 000000000..76bc5d4fb --- /dev/null +++ b/examples-generated/web/v1beta2/windowswebapp.yaml @@ -0,0 +1,52 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: WindowsWebApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowswebapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowswebapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: web.azure.upbound.io/v1beta1 +kind: ServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowswebapp + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + osType: Windows + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: P1v2 diff --git a/examples-generated/web/v1beta2/windowswebappslot.yaml b/examples-generated/web/v1beta2/windowswebappslot.yaml new file mode 100644 index 000000000..09f7bd619 --- /dev/null +++ b/examples-generated/web/v1beta2/windowswebappslot.yaml @@ -0,0 +1,70 @@ +apiVersion: web.azure.upbound.io/v1beta2 +kind: WindowsWebAppSlot +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowswebappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + appServiceIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowswebappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + +--- + +apiVersion: web.azure.upbound.io/v1beta1 +kind: ServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowswebappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + osType: Windows + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: P1v2 + +--- + +apiVersion: web.azure.upbound.io/v1beta2 +kind: WindowsWebApp +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/windowswebappslot + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + servicePlanIdSelector: + matchLabels: + testing.upbound.io/example-name: example + siteConfig: + - {} diff --git a/examples/alertsmanagement/monitoractionruleactiongroup.yaml b/examples/alertsmanagement/v1beta1/monitoractionruleactiongroup.yaml similarity index 100% rename from examples/alertsmanagement/monitoractionruleactiongroup.yaml rename to examples/alertsmanagement/v1beta1/monitoractionruleactiongroup.yaml diff --git a/examples/alertsmanagement/monitoractionrulesuppression.yaml b/examples/alertsmanagement/v1beta1/monitoractionrulesuppression.yaml similarity index 100% rename from examples/alertsmanagement/monitoractionrulesuppression.yaml rename to examples/alertsmanagement/v1beta1/monitoractionrulesuppression.yaml diff --git a/examples/alertsmanagement/monitoralertprocessingruleactiongroup.yaml b/examples/alertsmanagement/v1beta1/monitoralertprocessingruleactiongroup.yaml similarity index 100% rename from examples/alertsmanagement/monitoralertprocessingruleactiongroup.yaml rename to examples/alertsmanagement/v1beta1/monitoralertprocessingruleactiongroup.yaml diff --git a/examples/alertsmanagement/monitoralertprocessingrulesuppression.yaml b/examples/alertsmanagement/v1beta1/monitoralertprocessingrulesuppression.yaml similarity index 100% rename from examples/alertsmanagement/monitoralertprocessingrulesuppression.yaml rename to examples/alertsmanagement/v1beta1/monitoralertprocessingrulesuppression.yaml diff --git a/examples/alertsmanagement/monitorsmartdetectoralertrule.yaml b/examples/alertsmanagement/v1beta1/monitorsmartdetectoralertrule.yaml similarity index 100% rename from examples/alertsmanagement/monitorsmartdetectoralertrule.yaml rename to examples/alertsmanagement/v1beta1/monitorsmartdetectoralertrule.yaml diff --git a/examples/analysisservices/server.yaml b/examples/analysisservices/v1beta1/server.yaml similarity index 100% rename from examples/analysisservices/server.yaml rename to examples/analysisservices/v1beta1/server.yaml diff --git a/examples/apimanagement/api.yaml b/examples/apimanagement/v1beta1/api.yaml similarity index 100% rename from examples/apimanagement/api.yaml rename to examples/apimanagement/v1beta1/api.yaml diff --git a/examples/apimanagement/apidiagnostic.yaml b/examples/apimanagement/v1beta1/apidiagnostic.yaml similarity index 100% rename from examples/apimanagement/apidiagnostic.yaml rename to examples/apimanagement/v1beta1/apidiagnostic.yaml diff --git a/examples/apimanagement/apioperation.yaml b/examples/apimanagement/v1beta1/apioperation.yaml similarity index 100% rename from examples/apimanagement/apioperation.yaml rename to examples/apimanagement/v1beta1/apioperation.yaml diff --git a/examples/apimanagement/apioperationpolicy.yaml b/examples/apimanagement/v1beta1/apioperationpolicy.yaml similarity index 100% rename from examples/apimanagement/apioperationpolicy.yaml rename to examples/apimanagement/v1beta1/apioperationpolicy.yaml diff --git a/examples/apimanagement/apioperationtag.yaml b/examples/apimanagement/v1beta1/apioperationtag.yaml similarity index 100% rename from examples/apimanagement/apioperationtag.yaml rename to examples/apimanagement/v1beta1/apioperationtag.yaml diff --git a/examples/apimanagement/apipolicy.yaml b/examples/apimanagement/v1beta1/apipolicy.yaml similarity index 100% rename from examples/apimanagement/apipolicy.yaml rename to examples/apimanagement/v1beta1/apipolicy.yaml diff --git a/examples/apimanagement/apirelease.yaml b/examples/apimanagement/v1beta1/apirelease.yaml similarity index 100% rename from examples/apimanagement/apirelease.yaml rename to examples/apimanagement/v1beta1/apirelease.yaml diff --git a/examples/apimanagement/apischema.yaml b/examples/apimanagement/v1beta1/apischema.yaml similarity index 100% rename from examples/apimanagement/apischema.yaml rename to examples/apimanagement/v1beta1/apischema.yaml diff --git a/examples/apimanagement/apitag.yaml b/examples/apimanagement/v1beta1/apitag.yaml similarity index 100% rename from examples/apimanagement/apitag.yaml rename to examples/apimanagement/v1beta1/apitag.yaml diff --git a/examples/apimanagement/apiversionset.yaml b/examples/apimanagement/v1beta1/apiversionset.yaml similarity index 100% rename from examples/apimanagement/apiversionset.yaml rename to examples/apimanagement/v1beta1/apiversionset.yaml diff --git a/examples/apimanagement/authorizationserver.yaml b/examples/apimanagement/v1beta1/authorizationserver.yaml similarity index 100% rename from examples/apimanagement/authorizationserver.yaml rename to examples/apimanagement/v1beta1/authorizationserver.yaml diff --git a/examples/apimanagement/backend.yaml b/examples/apimanagement/v1beta1/backend.yaml similarity index 100% rename from examples/apimanagement/backend.yaml rename to examples/apimanagement/v1beta1/backend.yaml diff --git a/examples/apimanagement/certificate.yaml b/examples/apimanagement/v1beta1/certificate.yaml similarity index 100% rename from examples/apimanagement/certificate.yaml rename to examples/apimanagement/v1beta1/certificate.yaml diff --git a/examples/apimanagement/customdomain.yaml b/examples/apimanagement/v1beta1/customdomain.yaml similarity index 100% rename from examples/apimanagement/customdomain.yaml rename to examples/apimanagement/v1beta1/customdomain.yaml diff --git a/examples/apimanagement/diagnostic.yaml b/examples/apimanagement/v1beta1/diagnostic.yaml similarity index 97% rename from examples/apimanagement/diagnostic.yaml rename to examples/apimanagement/v1beta1/diagnostic.yaml index f9843457b..d9846a132 100644 --- a/examples/apimanagement/diagnostic.yaml +++ b/examples/apimanagement/v1beta1/diagnostic.yaml @@ -80,7 +80,7 @@ apiVersion: insights.azure.upbound.io/v1beta1 kind: ApplicationInsights metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-logger.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-logger.sh meta.upbound.io/example-id: apimanagement/v1beta1/diagnostic labels: testing.upbound.io/example-name: example diff --git a/examples/apimanagement/emailtemplate.yaml b/examples/apimanagement/v1beta1/emailtemplate.yaml similarity index 100% rename from examples/apimanagement/emailtemplate.yaml rename to examples/apimanagement/v1beta1/emailtemplate.yaml diff --git a/examples/apimanagement/gateway.yaml b/examples/apimanagement/v1beta1/gateway.yaml similarity index 100% rename from examples/apimanagement/gateway.yaml rename to examples/apimanagement/v1beta1/gateway.yaml diff --git a/examples/apimanagement/gatewayapi.yaml b/examples/apimanagement/v1beta1/gatewayapi.yaml similarity index 100% rename from examples/apimanagement/gatewayapi.yaml rename to examples/apimanagement/v1beta1/gatewayapi.yaml diff --git a/examples/apimanagement/globalschema.yaml b/examples/apimanagement/v1beta1/globalschema.yaml similarity index 100% rename from examples/apimanagement/globalschema.yaml rename to examples/apimanagement/v1beta1/globalschema.yaml diff --git a/examples/apimanagement/identityprovideraad.yaml b/examples/apimanagement/v1beta1/identityprovideraad.yaml similarity index 100% rename from examples/apimanagement/identityprovideraad.yaml rename to examples/apimanagement/v1beta1/identityprovideraad.yaml diff --git a/examples/apimanagement/identityproviderfacebook.yaml b/examples/apimanagement/v1beta1/identityproviderfacebook.yaml similarity index 100% rename from examples/apimanagement/identityproviderfacebook.yaml rename to examples/apimanagement/v1beta1/identityproviderfacebook.yaml diff --git a/examples/apimanagement/identityprovidergoogle.yaml b/examples/apimanagement/v1beta1/identityprovidergoogle.yaml similarity index 100% rename from examples/apimanagement/identityprovidergoogle.yaml rename to examples/apimanagement/v1beta1/identityprovidergoogle.yaml diff --git a/examples/apimanagement/identityprovidermicrosoft.yaml b/examples/apimanagement/v1beta1/identityprovidermicrosoft.yaml similarity index 100% rename from examples/apimanagement/identityprovidermicrosoft.yaml rename to examples/apimanagement/v1beta1/identityprovidermicrosoft.yaml diff --git a/examples/apimanagement/identityprovidertwitter.yaml b/examples/apimanagement/v1beta1/identityprovidertwitter.yaml similarity index 100% rename from examples/apimanagement/identityprovidertwitter.yaml rename to examples/apimanagement/v1beta1/identityprovidertwitter.yaml diff --git a/examples/apimanagement/logger.yaml b/examples/apimanagement/v1beta1/logger.yaml similarity index 100% rename from examples/apimanagement/logger.yaml rename to examples/apimanagement/v1beta1/logger.yaml diff --git a/examples/apimanagement/management.yaml b/examples/apimanagement/v1beta1/management.yaml similarity index 100% rename from examples/apimanagement/management.yaml rename to examples/apimanagement/v1beta1/management.yaml diff --git a/examples/apimanagement/namedvalue.yaml b/examples/apimanagement/v1beta1/namedvalue.yaml similarity index 100% rename from examples/apimanagement/namedvalue.yaml rename to examples/apimanagement/v1beta1/namedvalue.yaml diff --git a/examples/apimanagement/notificationrecipientemail.yaml b/examples/apimanagement/v1beta1/notificationrecipientemail.yaml similarity index 100% rename from examples/apimanagement/notificationrecipientemail.yaml rename to examples/apimanagement/v1beta1/notificationrecipientemail.yaml diff --git a/examples/apimanagement/notificationrecipientuser.yaml b/examples/apimanagement/v1beta1/notificationrecipientuser.yaml similarity index 100% rename from examples/apimanagement/notificationrecipientuser.yaml rename to examples/apimanagement/v1beta1/notificationrecipientuser.yaml diff --git a/examples/apimanagement/openidconnectprovider.yaml b/examples/apimanagement/v1beta1/openidconnectprovider.yaml similarity index 100% rename from examples/apimanagement/openidconnectprovider.yaml rename to examples/apimanagement/v1beta1/openidconnectprovider.yaml diff --git a/examples/apimanagement/policy.yaml b/examples/apimanagement/v1beta1/policy.yaml similarity index 100% rename from examples/apimanagement/policy.yaml rename to examples/apimanagement/v1beta1/policy.yaml diff --git a/examples/apimanagement/product.yaml b/examples/apimanagement/v1beta1/product.yaml similarity index 100% rename from examples/apimanagement/product.yaml rename to examples/apimanagement/v1beta1/product.yaml diff --git a/examples/apimanagement/productapi.yaml b/examples/apimanagement/v1beta1/productapi.yaml similarity index 100% rename from examples/apimanagement/productapi.yaml rename to examples/apimanagement/v1beta1/productapi.yaml diff --git a/examples/apimanagement/productpolicy.yaml b/examples/apimanagement/v1beta1/productpolicy.yaml similarity index 100% rename from examples/apimanagement/productpolicy.yaml rename to examples/apimanagement/v1beta1/productpolicy.yaml diff --git a/examples/apimanagement/producttag.yaml b/examples/apimanagement/v1beta1/producttag.yaml similarity index 100% rename from examples/apimanagement/producttag.yaml rename to examples/apimanagement/v1beta1/producttag.yaml diff --git a/examples/apimanagement/rediscache.yaml b/examples/apimanagement/v1beta1/rediscache.yaml similarity index 100% rename from examples/apimanagement/rediscache.yaml rename to examples/apimanagement/v1beta1/rediscache.yaml diff --git a/examples/apimanagement/subscription.yaml b/examples/apimanagement/v1beta1/subscription.yaml similarity index 100% rename from examples/apimanagement/subscription.yaml rename to examples/apimanagement/v1beta1/subscription.yaml diff --git a/examples/apimanagement/tag.yaml b/examples/apimanagement/v1beta1/tag.yaml similarity index 100% rename from examples/apimanagement/tag.yaml rename to examples/apimanagement/v1beta1/tag.yaml diff --git a/examples/apimanagement/user.yaml b/examples/apimanagement/v1beta1/user.yaml similarity index 100% rename from examples/apimanagement/user.yaml rename to examples/apimanagement/v1beta1/user.yaml diff --git a/examples/appconfiguration/configuration.yaml b/examples/appconfiguration/v1beta1/configuration.yaml similarity index 100% rename from examples/appconfiguration/configuration.yaml rename to examples/appconfiguration/v1beta1/configuration.yaml diff --git a/examples/appplatform/springcloudaccelerator.yaml b/examples/appplatform/v1beta1/springcloudaccelerator.yaml similarity index 100% rename from examples/appplatform/springcloudaccelerator.yaml rename to examples/appplatform/v1beta1/springcloudaccelerator.yaml diff --git a/examples/appplatform/springcloudactivedeployment.yaml b/examples/appplatform/v1beta1/springcloudactivedeployment.yaml similarity index 96% rename from examples/appplatform/springcloudactivedeployment.yaml rename to examples/appplatform/v1beta1/springcloudactivedeployment.yaml index 4f233255c..bd2ff88f1 100644 --- a/examples/appplatform/springcloudactivedeployment.yaml +++ b/examples/appplatform/v1beta1/springcloudactivedeployment.yaml @@ -40,7 +40,7 @@ kind: SpringCloudApp metadata: annotations: meta.upbound.io/example-id: appplatform/v1beta1/springcloudactivedeployment - uptest.upbound.io/pre-delete-hook: testhooks/delete-spring-active-deployment.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-spring-active-deployment.sh labels: testing.upbound.io/example-name: example name: example-app diff --git a/examples/appplatform/springcloudapiportal.yaml b/examples/appplatform/v1beta1/springcloudapiportal.yaml similarity index 100% rename from examples/appplatform/springcloudapiportal.yaml rename to examples/appplatform/v1beta1/springcloudapiportal.yaml diff --git a/examples/appplatform/springcloudapiportalcustomdomain.yaml b/examples/appplatform/v1beta1/springcloudapiportalcustomdomain.yaml similarity index 100% rename from examples/appplatform/springcloudapiportalcustomdomain.yaml rename to examples/appplatform/v1beta1/springcloudapiportalcustomdomain.yaml diff --git a/examples/appplatform/springcloudapp.yaml b/examples/appplatform/v1beta1/springcloudapp.yaml similarity index 100% rename from examples/appplatform/springcloudapp.yaml rename to examples/appplatform/v1beta1/springcloudapp.yaml diff --git a/examples/appplatform/springcloudappcosmosdbassociation.yaml b/examples/appplatform/v1beta1/springcloudappcosmosdbassociation.yaml similarity index 100% rename from examples/appplatform/springcloudappcosmosdbassociation.yaml rename to examples/appplatform/v1beta1/springcloudappcosmosdbassociation.yaml diff --git a/examples/appplatform/springcloudappmysqlassociation.yaml b/examples/appplatform/v1beta1/springcloudappmysqlassociation.yaml similarity index 97% rename from examples/appplatform/springcloudappmysqlassociation.yaml rename to examples/appplatform/v1beta1/springcloudappmysqlassociation.yaml index 701671aab..316039da3 100644 --- a/examples/appplatform/springcloudappmysqlassociation.yaml +++ b/examples/appplatform/v1beta1/springcloudappmysqlassociation.yaml @@ -125,7 +125,7 @@ metadata: name: example-mysql-server namespace: upbound-system annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-mysql-assiciation.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-mysql-assiciation.sh type: Opaque data: password: dGVzdFBhc3N3b3JkITEyMw== diff --git a/examples/appplatform/springcloudappredisassociation.yaml b/examples/appplatform/v1beta1/springcloudappredisassociation.yaml similarity index 100% rename from examples/appplatform/springcloudappredisassociation.yaml rename to examples/appplatform/v1beta1/springcloudappredisassociation.yaml diff --git a/examples/appplatform/springcloudbuilddeployment.yaml b/examples/appplatform/v1beta1/springcloudbuilddeployment.yaml similarity index 100% rename from examples/appplatform/springcloudbuilddeployment.yaml rename to examples/appplatform/v1beta1/springcloudbuilddeployment.yaml diff --git a/examples/appplatform/springcloudbuilder.yaml b/examples/appplatform/v1beta1/springcloudbuilder.yaml similarity index 100% rename from examples/appplatform/springcloudbuilder.yaml rename to examples/appplatform/v1beta1/springcloudbuilder.yaml diff --git a/examples/appplatform/springcloudbuildpackbinding.yaml b/examples/appplatform/v1beta1/springcloudbuildpackbinding.yaml similarity index 100% rename from examples/appplatform/springcloudbuildpackbinding.yaml rename to examples/appplatform/v1beta1/springcloudbuildpackbinding.yaml diff --git a/examples/appplatform/springcloudcertificate.yaml b/examples/appplatform/v1beta1/springcloudcertificate.yaml similarity index 100% rename from examples/appplatform/springcloudcertificate.yaml rename to examples/appplatform/v1beta1/springcloudcertificate.yaml diff --git a/examples/appplatform/springcloudconfigurationservice.yaml b/examples/appplatform/v1beta1/springcloudconfigurationservice.yaml similarity index 100% rename from examples/appplatform/springcloudconfigurationservice.yaml rename to examples/appplatform/v1beta1/springcloudconfigurationservice.yaml diff --git a/examples/appplatform/springcloudcontainerdeployment.yaml b/examples/appplatform/v1beta1/springcloudcontainerdeployment.yaml similarity index 100% rename from examples/appplatform/springcloudcontainerdeployment.yaml rename to examples/appplatform/v1beta1/springcloudcontainerdeployment.yaml diff --git a/examples/appplatform/springcloudcustomdomain.yaml b/examples/appplatform/v1beta1/springcloudcustomdomain.yaml similarity index 100% rename from examples/appplatform/springcloudcustomdomain.yaml rename to examples/appplatform/v1beta1/springcloudcustomdomain.yaml diff --git a/examples/appplatform/springcloudcustomizedaccelerator.yaml b/examples/appplatform/v1beta1/springcloudcustomizedaccelerator.yaml similarity index 100% rename from examples/appplatform/springcloudcustomizedaccelerator.yaml rename to examples/appplatform/v1beta1/springcloudcustomizedaccelerator.yaml diff --git a/examples/appplatform/springclouddevtoolportal.yaml b/examples/appplatform/v1beta1/springclouddevtoolportal.yaml similarity index 100% rename from examples/appplatform/springclouddevtoolportal.yaml rename to examples/appplatform/v1beta1/springclouddevtoolportal.yaml diff --git a/examples/appplatform/springcloudgateway.yaml b/examples/appplatform/v1beta1/springcloudgateway.yaml similarity index 100% rename from examples/appplatform/springcloudgateway.yaml rename to examples/appplatform/v1beta1/springcloudgateway.yaml diff --git a/examples/appplatform/springcloudgatewaycustomdomain.yaml b/examples/appplatform/v1beta1/springcloudgatewaycustomdomain.yaml similarity index 100% rename from examples/appplatform/springcloudgatewaycustomdomain.yaml rename to examples/appplatform/v1beta1/springcloudgatewaycustomdomain.yaml diff --git a/examples/appplatform/springcloudjavadeployment.yaml b/examples/appplatform/v1beta1/springcloudjavadeployment.yaml similarity index 100% rename from examples/appplatform/springcloudjavadeployment.yaml rename to examples/appplatform/v1beta1/springcloudjavadeployment.yaml diff --git a/examples/appplatform/springcloudservice.yaml b/examples/appplatform/v1beta1/springcloudservice.yaml similarity index 100% rename from examples/appplatform/springcloudservice.yaml rename to examples/appplatform/v1beta1/springcloudservice.yaml diff --git a/examples/appplatform/springcloudstorage.yaml b/examples/appplatform/v1beta1/springcloudstorage.yaml similarity index 100% rename from examples/appplatform/springcloudstorage.yaml rename to examples/appplatform/v1beta1/springcloudstorage.yaml diff --git a/examples/attestation/provider.yaml b/examples/attestation/v1beta1/provider.yaml similarity index 100% rename from examples/attestation/provider.yaml rename to examples/attestation/v1beta1/provider.yaml diff --git a/examples/authorization/managementlock.yaml b/examples/authorization/v1beta1/managementlock.yaml similarity index 100% rename from examples/authorization/managementlock.yaml rename to examples/authorization/v1beta1/managementlock.yaml diff --git a/examples/authorization/policydefinition.yaml b/examples/authorization/v1beta1/policydefinition.yaml similarity index 100% rename from examples/authorization/policydefinition.yaml rename to examples/authorization/v1beta1/policydefinition.yaml diff --git a/examples/authorization/resourcegrouppolicyassignment.yaml b/examples/authorization/v1beta1/resourcegrouppolicyassignment.yaml similarity index 100% rename from examples/authorization/resourcegrouppolicyassignment.yaml rename to examples/authorization/v1beta1/resourcegrouppolicyassignment.yaml diff --git a/examples/authorization/resourcepolicyassignment.yaml b/examples/authorization/v1beta1/resourcepolicyassignment.yaml similarity index 100% rename from examples/authorization/resourcepolicyassignment.yaml rename to examples/authorization/v1beta1/resourcepolicyassignment.yaml diff --git a/examples/authorization/resourcepolicyexemption.yaml b/examples/authorization/v1beta1/resourcepolicyexemption.yaml similarity index 100% rename from examples/authorization/resourcepolicyexemption.yaml rename to examples/authorization/v1beta1/resourcepolicyexemption.yaml diff --git a/examples/authorization/roleassignment.yaml b/examples/authorization/v1beta1/roleassignment.yaml similarity index 100% rename from examples/authorization/roleassignment.yaml rename to examples/authorization/v1beta1/roleassignment.yaml diff --git a/examples/authorization/roledefinition.yaml b/examples/authorization/v1beta1/roledefinition.yaml similarity index 100% rename from examples/authorization/roledefinition.yaml rename to examples/authorization/v1beta1/roledefinition.yaml diff --git a/examples/authorization/subscriptionpolicyassignment.yaml b/examples/authorization/v1beta1/subscriptionpolicyassignment.yaml similarity index 100% rename from examples/authorization/subscriptionpolicyassignment.yaml rename to examples/authorization/v1beta1/subscriptionpolicyassignment.yaml diff --git a/examples/authorization/subscriptionpolicyexemption.yaml b/examples/authorization/v1beta1/subscriptionpolicyexemption.yaml similarity index 100% rename from examples/authorization/subscriptionpolicyexemption.yaml rename to examples/authorization/v1beta1/subscriptionpolicyexemption.yaml diff --git a/examples/automation/account.yaml b/examples/automation/v1beta1/account.yaml similarity index 100% rename from examples/automation/account.yaml rename to examples/automation/v1beta1/account.yaml diff --git a/examples/automation/connection.yaml b/examples/automation/v1beta1/connection.yaml similarity index 100% rename from examples/automation/connection.yaml rename to examples/automation/v1beta1/connection.yaml diff --git a/examples/automation/connectionclassiccertificate.yaml b/examples/automation/v1beta1/connectionclassiccertificate.yaml similarity index 100% rename from examples/automation/connectionclassiccertificate.yaml rename to examples/automation/v1beta1/connectionclassiccertificate.yaml diff --git a/examples/automation/connectiontype.yaml b/examples/automation/v1beta1/connectiontype.yaml similarity index 100% rename from examples/automation/connectiontype.yaml rename to examples/automation/v1beta1/connectiontype.yaml diff --git a/examples/automation/credential.yaml b/examples/automation/v1beta1/credential.yaml similarity index 100% rename from examples/automation/credential.yaml rename to examples/automation/v1beta1/credential.yaml diff --git a/examples/automation/hybridrunbookworkergroup.yaml b/examples/automation/v1beta1/hybridrunbookworkergroup.yaml similarity index 100% rename from examples/automation/hybridrunbookworkergroup.yaml rename to examples/automation/v1beta1/hybridrunbookworkergroup.yaml diff --git a/examples/automation/module.yaml b/examples/automation/v1beta1/module.yaml similarity index 100% rename from examples/automation/module.yaml rename to examples/automation/v1beta1/module.yaml diff --git a/examples/automation/runbook.yaml b/examples/automation/v1beta1/runbook.yaml similarity index 100% rename from examples/automation/runbook.yaml rename to examples/automation/v1beta1/runbook.yaml diff --git a/examples/automation/schedule.yaml b/examples/automation/v1beta1/schedule.yaml similarity index 100% rename from examples/automation/schedule.yaml rename to examples/automation/v1beta1/schedule.yaml diff --git a/examples/automation/variablebool.yaml b/examples/automation/v1beta1/variablebool.yaml similarity index 100% rename from examples/automation/variablebool.yaml rename to examples/automation/v1beta1/variablebool.yaml diff --git a/examples/automation/variabledatetime.yaml b/examples/automation/v1beta1/variabledatetime.yaml similarity index 100% rename from examples/automation/variabledatetime.yaml rename to examples/automation/v1beta1/variabledatetime.yaml diff --git a/examples/automation/variableint.yaml b/examples/automation/v1beta1/variableint.yaml similarity index 100% rename from examples/automation/variableint.yaml rename to examples/automation/v1beta1/variableint.yaml diff --git a/examples/automation/variablestring.yaml b/examples/automation/v1beta1/variablestring.yaml similarity index 100% rename from examples/automation/variablestring.yaml rename to examples/automation/v1beta1/variablestring.yaml diff --git a/examples/automation/webhook.yaml b/examples/automation/v1beta1/webhook.yaml similarity index 100% rename from examples/automation/webhook.yaml rename to examples/automation/v1beta1/webhook.yaml diff --git a/examples/azure/providerconfig.yaml b/examples/azure/v1beta1/providerconfig.yaml similarity index 100% rename from examples/azure/providerconfig.yaml rename to examples/azure/v1beta1/providerconfig.yaml diff --git a/examples/azure/resourcegroup.yaml b/examples/azure/v1beta1/resourcegroup.yaml similarity index 100% rename from examples/azure/resourcegroup.yaml rename to examples/azure/v1beta1/resourcegroup.yaml diff --git a/examples/azure/resourceproviderregistration.yaml b/examples/azure/v1beta1/resourceproviderregistration.yaml similarity index 100% rename from examples/azure/resourceproviderregistration.yaml rename to examples/azure/v1beta1/resourceproviderregistration.yaml diff --git a/examples/azure/subscription.yaml b/examples/azure/v1beta1/subscription.yaml similarity index 100% rename from examples/azure/subscription.yaml rename to examples/azure/v1beta1/subscription.yaml diff --git a/examples/azure/system-assigned-managed-identity.yaml b/examples/azure/v1beta1/system-assigned-managed-identity.yaml similarity index 100% rename from examples/azure/system-assigned-managed-identity.yaml rename to examples/azure/v1beta1/system-assigned-managed-identity.yaml diff --git a/examples/azure/user-assigned-managed-identity.yaml b/examples/azure/v1beta1/user-assigned-managed-identity.yaml similarity index 100% rename from examples/azure/user-assigned-managed-identity.yaml rename to examples/azure/v1beta1/user-assigned-managed-identity.yaml diff --git a/examples/azurestackhci/cluster.yaml b/examples/azurestackhci/v1beta1/cluster.yaml similarity index 100% rename from examples/azurestackhci/cluster.yaml rename to examples/azurestackhci/v1beta1/cluster.yaml diff --git a/examples/botservice/botchannelalexa.yaml b/examples/botservice/v1beta1/botchannelalexa.yaml similarity index 100% rename from examples/botservice/botchannelalexa.yaml rename to examples/botservice/v1beta1/botchannelalexa.yaml diff --git a/examples/botservice/botchanneldirectline.yaml b/examples/botservice/v1beta1/botchanneldirectline.yaml similarity index 100% rename from examples/botservice/botchanneldirectline.yaml rename to examples/botservice/v1beta1/botchanneldirectline.yaml diff --git a/examples/botservice/botchannelline.yaml b/examples/botservice/v1beta1/botchannelline.yaml similarity index 100% rename from examples/botservice/botchannelline.yaml rename to examples/botservice/v1beta1/botchannelline.yaml diff --git a/examples/botservice/botchannelmsteams.yaml b/examples/botservice/v1beta1/botchannelmsteams.yaml similarity index 100% rename from examples/botservice/botchannelmsteams.yaml rename to examples/botservice/v1beta1/botchannelmsteams.yaml diff --git a/examples/botservice/botchannelslack.yaml b/examples/botservice/v1beta1/botchannelslack.yaml similarity index 100% rename from examples/botservice/botchannelslack.yaml rename to examples/botservice/v1beta1/botchannelslack.yaml diff --git a/examples/botservice/botchannelsms.yaml b/examples/botservice/v1beta1/botchannelsms.yaml similarity index 100% rename from examples/botservice/botchannelsms.yaml rename to examples/botservice/v1beta1/botchannelsms.yaml diff --git a/examples/botservice/botchannelsregistration.yaml b/examples/botservice/v1beta1/botchannelsregistration.yaml similarity index 100% rename from examples/botservice/botchannelsregistration.yaml rename to examples/botservice/v1beta1/botchannelsregistration.yaml diff --git a/examples/botservice/botchannelwebchat.yaml b/examples/botservice/v1beta1/botchannelwebchat.yaml similarity index 100% rename from examples/botservice/botchannelwebchat.yaml rename to examples/botservice/v1beta1/botchannelwebchat.yaml diff --git a/examples/botservice/botconnection.yaml b/examples/botservice/v1beta1/botconnection.yaml similarity index 100% rename from examples/botservice/botconnection.yaml rename to examples/botservice/v1beta1/botconnection.yaml diff --git a/examples/botservice/botwebapp.yaml b/examples/botservice/v1beta1/botwebapp.yaml similarity index 100% rename from examples/botservice/botwebapp.yaml rename to examples/botservice/v1beta1/botwebapp.yaml diff --git a/examples/cache/redis-caches.yaml b/examples/cache/v1beta1/redis-caches.yaml similarity index 100% rename from examples/cache/redis-caches.yaml rename to examples/cache/v1beta1/redis-caches.yaml diff --git a/examples/cache/redislinkedserver.yaml b/examples/cache/v1beta1/redislinkedserver.yaml similarity index 100% rename from examples/cache/redislinkedserver.yaml rename to examples/cache/v1beta1/redislinkedserver.yaml diff --git a/examples/cdn/endpoint.yaml b/examples/cdn/v1beta1/endpoint.yaml similarity index 100% rename from examples/cdn/endpoint.yaml rename to examples/cdn/v1beta1/endpoint.yaml diff --git a/examples/cdn/frontdoorcustomdomain.yaml b/examples/cdn/v1beta1/frontdoorcustomdomain.yaml similarity index 100% rename from examples/cdn/frontdoorcustomdomain.yaml rename to examples/cdn/v1beta1/frontdoorcustomdomain.yaml diff --git a/examples/cdn/frontdoorcustomdomainassociation.yaml b/examples/cdn/v1beta1/frontdoorcustomdomainassociation.yaml similarity index 100% rename from examples/cdn/frontdoorcustomdomainassociation.yaml rename to examples/cdn/v1beta1/frontdoorcustomdomainassociation.yaml diff --git a/examples/cdn/frontdoorendpoint.yaml b/examples/cdn/v1beta1/frontdoorendpoint.yaml similarity index 100% rename from examples/cdn/frontdoorendpoint.yaml rename to examples/cdn/v1beta1/frontdoorendpoint.yaml diff --git a/examples/cdn/frontdoorfirewallpolicy.yaml b/examples/cdn/v1beta1/frontdoorfirewallpolicy.yaml similarity index 100% rename from examples/cdn/frontdoorfirewallpolicy.yaml rename to examples/cdn/v1beta1/frontdoorfirewallpolicy.yaml diff --git a/examples/cdn/frontdoororigin.yaml b/examples/cdn/v1beta1/frontdoororigin.yaml similarity index 100% rename from examples/cdn/frontdoororigin.yaml rename to examples/cdn/v1beta1/frontdoororigin.yaml diff --git a/examples/cdn/frontdoororigingroup.yaml b/examples/cdn/v1beta1/frontdoororigingroup.yaml similarity index 100% rename from examples/cdn/frontdoororigingroup.yaml rename to examples/cdn/v1beta1/frontdoororigingroup.yaml diff --git a/examples/cdn/frontdoorprofile.yaml b/examples/cdn/v1beta1/frontdoorprofile.yaml similarity index 100% rename from examples/cdn/frontdoorprofile.yaml rename to examples/cdn/v1beta1/frontdoorprofile.yaml diff --git a/examples/cdn/frontdoorroute.yaml b/examples/cdn/v1beta1/frontdoorroute.yaml similarity index 100% rename from examples/cdn/frontdoorroute.yaml rename to examples/cdn/v1beta1/frontdoorroute.yaml diff --git a/examples/cdn/frontdoorrule.yaml b/examples/cdn/v1beta1/frontdoorrule.yaml similarity index 100% rename from examples/cdn/frontdoorrule.yaml rename to examples/cdn/v1beta1/frontdoorrule.yaml diff --git a/examples/cdn/frontdoorruleset.yaml b/examples/cdn/v1beta1/frontdoorruleset.yaml similarity index 100% rename from examples/cdn/frontdoorruleset.yaml rename to examples/cdn/v1beta1/frontdoorruleset.yaml diff --git a/examples/cdn/frontdoorsecuritypolicy.yaml b/examples/cdn/v1beta1/frontdoorsecuritypolicy.yaml similarity index 100% rename from examples/cdn/frontdoorsecuritypolicy.yaml rename to examples/cdn/v1beta1/frontdoorsecuritypolicy.yaml diff --git a/examples/cdn/profile.yaml b/examples/cdn/v1beta1/profile.yaml similarity index 100% rename from examples/cdn/profile.yaml rename to examples/cdn/v1beta1/profile.yaml diff --git a/examples/cdn/v1beta2/endpoint.yaml b/examples/cdn/v1beta2/endpoint.yaml new file mode 100644 index 000000000..8d5f64c83 --- /dev/null +++ b/examples/cdn/v1beta2/endpoint.yaml @@ -0,0 +1,59 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: Endpoint +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/endpoint + labels: + testing.upbound.io/example-name: endpointlabel + name: endpointnm +spec: + forProvider: + location: West Europe + origin: + - hostName: www.contoso.com + name: example + profileNameSelector: + matchLabels: + testing.upbound.io/example-name: profilelabel + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: rgroupnamelabel + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: Profile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/endpoint + labels: + testing.upbound.io/example-name: profilelabel + name: profilename +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: rgroupnamelabel + sku: Standard_Verizon + tags: + cost_center: MSFT + environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta1/endpoint + labels: + testing.upbound.io/example-name: rgroupnamelabel + name: r-group-test-name +spec: + forProvider: + location: West Europe diff --git a/examples/cdn/v1beta2/frontdoorrule.yaml b/examples/cdn/v1beta2/frontdoorrule.yaml new file mode 100644 index 000000000..1ead91107 --- /dev/null +++ b/examples/cdn/v1beta2/frontdoorrule.yaml @@ -0,0 +1,193 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorRule +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + actions: + routeConfigurationOverrideAction: + cacheBehavior: OverrideIfOriginMissing + cacheDuration: 365.23:59:59 + cdnFrontdoorOriginGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + compressionEnabled: true + forwardingProtocol: HttpsOnly + queryStringCachingBehavior: IncludeSpecifiedQueryStrings + queryStringParameters: + - foo + - clientIp={client_ip} + urlRedirectAction: + destinationFragment: UrlRedirect + destinationHostname: contoso.com + destinationPath: /exampleredirection + queryString: clientIp={client_ip} + redirectProtocol: MatchRequest + redirectType: PermanentRedirect + behaviorOnMatch: Continue + cdnFrontdoorRuleSetIdSelector: + matchLabels: + testing.upbound.io/example-name: example + conditions: + hostNameCondition: + - matchValues: + - www.contoso.com + - images.contoso.com + - video.contoso.com + negateCondition: false + operator: Equal + transforms: + - Lowercase + - Trim + isDeviceCondition: + - matchValues: + - Mobile + negateCondition: false + operator: Equal + postArgsCondition: + - matchValues: + - J + - K + operator: BeginsWith + postArgsName: customerName + transforms: + - Uppercase + requestMethodCondition: + - matchValues: + - DELETE + negateCondition: false + operator: Equal + urlFilenameCondition: + - matchValues: + - media.mp4 + negateCondition: false + operator: Equal + transforms: + - Lowercase + - RemoveNulls + - Trim + order: 1 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorEndpoint +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + tags: + endpoint: contoso.com + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOrigin +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorOriginGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: example + certificateNameCheckEnabled: false + enabled: true + hostName: contoso.com + httpPort: 80 + httpsPort: 443 + originHostHeader: www.contoso.com + priority: 1 + weight: 500 + +--- + +apiVersion: cdn.azure.upbound.io/v1beta2 +kind: FrontdoorOriginGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + healthProbe: + intervalInSeconds: 240 + path: /healthProbe + protocol: Https + requestType: GET + loadBalancing: + additionalLatencyInMilliseconds: 0 + sampleSize: 16 + successfulSamplesRequired: 3 + restoreTrafficTimeToHealedOrNewEndpointInMinutes: 10 + sessionAffinityEnabled: true + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorProfile +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: rgfrontdoorrule + skuName: Standard_AzureFrontDoor + +--- + +apiVersion: cdn.azure.upbound.io/v1beta1 +kind: FrontdoorRuleSet +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta2/frontdoorrule + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + cdnFrontdoorProfileIdSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cdn/v1beta1/frontdoorrule + labels: + testing.upbound.io/example-name: rgfrontdoorrule + name: rgfrontdoorrule +spec: + forProvider: + location: West Europe diff --git a/examples/certificateregistration/appservicecertificateorder.yaml b/examples/certificateregistration/v1beta1/appservicecertificateorder.yaml similarity index 100% rename from examples/certificateregistration/appservicecertificateorder.yaml rename to examples/certificateregistration/v1beta1/appservicecertificateorder.yaml diff --git a/examples/cognitiveservices/account.yaml b/examples/cognitiveservices/v1beta1/account.yaml similarity index 100% rename from examples/cognitiveservices/account.yaml rename to examples/cognitiveservices/v1beta1/account.yaml diff --git a/examples/cognitiveservices/deployment.yaml b/examples/cognitiveservices/v1beta1/deployment.yaml similarity index 100% rename from examples/cognitiveservices/deployment.yaml rename to examples/cognitiveservices/v1beta1/deployment.yaml diff --git a/examples/communication/service.yaml b/examples/communication/v1beta1/service.yaml similarity index 100% rename from examples/communication/service.yaml rename to examples/communication/v1beta1/service.yaml diff --git a/examples/compute/availabilityset.yaml b/examples/compute/v1beta1/availabilityset.yaml similarity index 100% rename from examples/compute/availabilityset.yaml rename to examples/compute/v1beta1/availabilityset.yaml diff --git a/examples/compute/capacityreservation.yaml b/examples/compute/v1beta1/capacityreservation.yaml similarity index 100% rename from examples/compute/capacityreservation.yaml rename to examples/compute/v1beta1/capacityreservation.yaml diff --git a/examples/compute/capacityreservationgroup.yaml b/examples/compute/v1beta1/capacityreservationgroup.yaml similarity index 100% rename from examples/compute/capacityreservationgroup.yaml rename to examples/compute/v1beta1/capacityreservationgroup.yaml diff --git a/examples/compute/dedicatedhost.yaml b/examples/compute/v1beta1/dedicatedhost.yaml similarity index 100% rename from examples/compute/dedicatedhost.yaml rename to examples/compute/v1beta1/dedicatedhost.yaml diff --git a/examples/compute/diskaccess.yaml b/examples/compute/v1beta1/diskaccess.yaml similarity index 100% rename from examples/compute/diskaccess.yaml rename to examples/compute/v1beta1/diskaccess.yaml diff --git a/examples/compute/diskencryptionset.yaml b/examples/compute/v1beta1/diskencryptionset.yaml similarity index 100% rename from examples/compute/diskencryptionset.yaml rename to examples/compute/v1beta1/diskencryptionset.yaml diff --git a/examples/compute/galleryapplication.yaml b/examples/compute/v1beta1/galleryapplication.yaml similarity index 100% rename from examples/compute/galleryapplication.yaml rename to examples/compute/v1beta1/galleryapplication.yaml diff --git a/examples/compute/galleryapplicationversion.yaml b/examples/compute/v1beta1/galleryapplicationversion.yaml similarity index 100% rename from examples/compute/galleryapplicationversion.yaml rename to examples/compute/v1beta1/galleryapplicationversion.yaml diff --git a/examples/compute/image.yaml b/examples/compute/v1beta1/image.yaml similarity index 100% rename from examples/compute/image.yaml rename to examples/compute/v1beta1/image.yaml diff --git a/examples/compute/linuxvirtualmachine.yaml b/examples/compute/v1beta1/linuxvirtualmachine.yaml similarity index 100% rename from examples/compute/linuxvirtualmachine.yaml rename to examples/compute/v1beta1/linuxvirtualmachine.yaml diff --git a/examples/compute/linuxvirtualmachinescaleset.yaml b/examples/compute/v1beta1/linuxvirtualmachinescaleset.yaml similarity index 100% rename from examples/compute/linuxvirtualmachinescaleset.yaml rename to examples/compute/v1beta1/linuxvirtualmachinescaleset.yaml diff --git a/examples/compute/manageddisk.yaml b/examples/compute/v1beta1/manageddisk.yaml similarity index 100% rename from examples/compute/manageddisk.yaml rename to examples/compute/v1beta1/manageddisk.yaml diff --git a/examples/compute/manageddisksastoken.yaml b/examples/compute/v1beta1/manageddisksastoken.yaml similarity index 100% rename from examples/compute/manageddisksastoken.yaml rename to examples/compute/v1beta1/manageddisksastoken.yaml diff --git a/examples/compute/marketplaceagreement.yaml b/examples/compute/v1beta1/marketplaceagreement.yaml similarity index 100% rename from examples/compute/marketplaceagreement.yaml rename to examples/compute/v1beta1/marketplaceagreement.yaml diff --git a/examples/compute/orchestratedvirtualmachinescaleset.yaml b/examples/compute/v1beta1/orchestratedvirtualmachinescaleset.yaml similarity index 100% rename from examples/compute/orchestratedvirtualmachinescaleset.yaml rename to examples/compute/v1beta1/orchestratedvirtualmachinescaleset.yaml diff --git a/examples/compute/proximityplacementgroup.yaml b/examples/compute/v1beta1/proximityplacementgroup.yaml similarity index 100% rename from examples/compute/proximityplacementgroup.yaml rename to examples/compute/v1beta1/proximityplacementgroup.yaml diff --git a/examples/compute/sharedimage.yaml b/examples/compute/v1beta1/sharedimage.yaml similarity index 100% rename from examples/compute/sharedimage.yaml rename to examples/compute/v1beta1/sharedimage.yaml diff --git a/examples/compute/sharedimagegallery.yaml b/examples/compute/v1beta1/sharedimagegallery.yaml similarity index 100% rename from examples/compute/sharedimagegallery.yaml rename to examples/compute/v1beta1/sharedimagegallery.yaml diff --git a/examples/compute/snapshot.yaml b/examples/compute/v1beta1/snapshot.yaml similarity index 100% rename from examples/compute/snapshot.yaml rename to examples/compute/v1beta1/snapshot.yaml diff --git a/examples/compute/sshpublickey.yaml b/examples/compute/v1beta1/sshpublickey.yaml similarity index 100% rename from examples/compute/sshpublickey.yaml rename to examples/compute/v1beta1/sshpublickey.yaml diff --git a/examples/compute/virtualmachinedatadiskattachment.yaml b/examples/compute/v1beta1/virtualmachinedatadiskattachment.yaml similarity index 100% rename from examples/compute/virtualmachinedatadiskattachment.yaml rename to examples/compute/v1beta1/virtualmachinedatadiskattachment.yaml diff --git a/examples/compute/virtualmachineextension.yaml b/examples/compute/v1beta1/virtualmachineextension.yaml similarity index 100% rename from examples/compute/virtualmachineextension.yaml rename to examples/compute/v1beta1/virtualmachineextension.yaml diff --git a/examples/compute/windowsvirtualmachine.yaml b/examples/compute/v1beta1/windowsvirtualmachine.yaml similarity index 100% rename from examples/compute/windowsvirtualmachine.yaml rename to examples/compute/v1beta1/windowsvirtualmachine.yaml diff --git a/examples/compute/windowsvirtualmachinescaleset.yaml b/examples/compute/v1beta1/windowsvirtualmachinescaleset.yaml similarity index 100% rename from examples/compute/windowsvirtualmachinescaleset.yaml rename to examples/compute/v1beta1/windowsvirtualmachinescaleset.yaml diff --git a/examples/compute/v1beta2/capacityreservation.yaml b/examples/compute/v1beta2/capacityreservation.yaml new file mode 100644 index 000000000..9c3e1b2f4 --- /dev/null +++ b/examples/compute/v1beta2/capacityreservation.yaml @@ -0,0 +1,51 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: CapacityReservation +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/capacityreservation + labels: + testing.upbound.io/example-name: examplecapacityreserv + name: examplecapacityreserv +spec: + forProvider: + capacityReservationGroupIdSelector: + matchLabels: + testing.upbound.io/example-name: capacityreserv-cg + sku: + capacity: 1 + name: Standard_D2s_v3 + +--- + +apiVersion: compute.azure.upbound.io/v1beta1 +kind: CapacityReservationGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/capacityreservation + labels: + testing.upbound.io/example-name: capacityreserv-cg + name: capacityreserv-cg +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: capacityreserv-rg + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta1/capacityreservation + labels: + testing.upbound.io/example-name: capacityreserv-rg + name: capacityreserv-rg +spec: + forProvider: + location: West Europe diff --git a/examples/compute/v1beta2/sharedimage.yaml b/examples/compute/v1beta2/sharedimage.yaml new file mode 100644 index 000000000..f9fb1dbef --- /dev/null +++ b/examples/compute/v1beta2/sharedimage.yaml @@ -0,0 +1,61 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: SharedImage +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/sharedimage + labels: + testing.upbound.io/example-name: examplesharedimage + name: examplesharedimage +spec: + forProvider: + galleryNameSelector: + matchLabels: + testing.upbound.io/example-name: sharedimageig + identifier: + offer: OfferName + publisher: PublisherName + sku: ExampleSku + location: West Europe + osType: Linux + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: sharedimage-rg + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta1/sharedimage + labels: + testing.upbound.io/example-name: sharedimage-rg + name: sharedimage-rg +spec: + forProvider: + location: West Europe + +--- + +apiVersion: compute.azure.upbound.io/v1beta2 +kind: SharedImageGallery +metadata: + annotations: + meta.upbound.io/example-id: compute/v1beta2/sharedimage + labels: + testing.upbound.io/example-name: sharedimageig + name: sharedimageig +spec: + forProvider: + description: Shared images and things. + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: sharedimage-rg + tags: + Hello: There + World: Example diff --git a/examples/confidentialledger/ledger.yaml b/examples/confidentialledger/v1beta1/ledger.yaml similarity index 100% rename from examples/confidentialledger/ledger.yaml rename to examples/confidentialledger/v1beta1/ledger.yaml diff --git a/examples/consumption/budgetmanagementgroup.yaml b/examples/consumption/v1beta1/budgetmanagementgroup.yaml similarity index 100% rename from examples/consumption/budgetmanagementgroup.yaml rename to examples/consumption/v1beta1/budgetmanagementgroup.yaml diff --git a/examples/consumption/budgetresourcegroup.yaml b/examples/consumption/v1beta1/budgetresourcegroup.yaml similarity index 100% rename from examples/consumption/budgetresourcegroup.yaml rename to examples/consumption/v1beta1/budgetresourcegroup.yaml diff --git a/examples/consumption/budgetsubscription.yaml b/examples/consumption/v1beta1/budgetsubscription.yaml similarity index 100% rename from examples/consumption/budgetsubscription.yaml rename to examples/consumption/v1beta1/budgetsubscription.yaml diff --git a/examples/containerapp/containerapp.yaml b/examples/containerapp/v1beta1/containerapp.yaml similarity index 100% rename from examples/containerapp/containerapp.yaml rename to examples/containerapp/v1beta1/containerapp.yaml diff --git a/examples/containerapp/environment.yaml b/examples/containerapp/v1beta1/environment.yaml similarity index 100% rename from examples/containerapp/environment.yaml rename to examples/containerapp/v1beta1/environment.yaml diff --git a/examples/containerregistry/agentpool.yaml b/examples/containerregistry/v1beta1/agentpool.yaml similarity index 100% rename from examples/containerregistry/agentpool.yaml rename to examples/containerregistry/v1beta1/agentpool.yaml diff --git a/examples/containerregistry/containerconnectedregistry.yaml b/examples/containerregistry/v1beta1/containerconnectedregistry.yaml similarity index 100% rename from examples/containerregistry/containerconnectedregistry.yaml rename to examples/containerregistry/v1beta1/containerconnectedregistry.yaml diff --git a/examples/containerregistry/registry.yaml b/examples/containerregistry/v1beta1/registry.yaml similarity index 100% rename from examples/containerregistry/registry.yaml rename to examples/containerregistry/v1beta1/registry.yaml diff --git a/examples/containerregistry/scopemap.yaml b/examples/containerregistry/v1beta1/scopemap.yaml similarity index 100% rename from examples/containerregistry/scopemap.yaml rename to examples/containerregistry/v1beta1/scopemap.yaml diff --git a/examples/containerregistry/token.yaml b/examples/containerregistry/v1beta1/token.yaml similarity index 100% rename from examples/containerregistry/token.yaml rename to examples/containerregistry/v1beta1/token.yaml diff --git a/examples/containerregistry/tokenpassword.yaml b/examples/containerregistry/v1beta1/tokenpassword.yaml similarity index 92% rename from examples/containerregistry/tokenpassword.yaml rename to examples/containerregistry/v1beta1/tokenpassword.yaml index 8c989bc18..a3932dfe8 100644 --- a/examples/containerregistry/tokenpassword.yaml +++ b/examples/containerregistry/v1beta1/tokenpassword.yaml @@ -6,7 +6,7 @@ apiVersion: containerregistry.azure.upbound.io/v1beta1 kind: TokenPassword metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-token.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-token.sh meta.upbound.io/example-id: containerregistry/v1beta1/tokenpassword labels: testing.upbound.io/example-name: example @@ -44,7 +44,7 @@ apiVersion: containerregistry.azure.upbound.io/v1beta1 kind: ScopeMap metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-token.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-token.sh meta.upbound.io/example-id: containerregistry/v1beta1/tokenpassword labels: testing.upbound.io/example-name: tokenpasswordsm @@ -67,7 +67,7 @@ apiVersion: containerregistry.azure.upbound.io/v1beta1 kind: Token metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-token.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-token.sh meta.upbound.io/example-id: containerregistry/v1beta1/tokenpassword labels: testing.upbound.io/example-name: tokenpasswordtoken diff --git a/examples/containerregistry/webhook.yaml b/examples/containerregistry/v1beta1/webhook.yaml similarity index 100% rename from examples/containerregistry/webhook.yaml rename to examples/containerregistry/v1beta1/webhook.yaml diff --git a/examples/containerservice/kubernetescluster.yaml b/examples/containerservice/v1beta1/kubernetescluster.yaml similarity index 100% rename from examples/containerservice/kubernetescluster.yaml rename to examples/containerservice/v1beta1/kubernetescluster.yaml diff --git a/examples/containerservice/kubernetesfleetmanager.yaml b/examples/containerservice/v1beta1/kubernetesfleetmanager.yaml similarity index 100% rename from examples/containerservice/kubernetesfleetmanager.yaml rename to examples/containerservice/v1beta1/kubernetesfleetmanager.yaml diff --git a/examples/containerservice/v1beta2/kubernetescluster.yaml b/examples/containerservice/v1beta2/kubernetescluster.yaml new file mode 100644 index 000000000..89a519b08 --- /dev/null +++ b/examples/containerservice/v1beta2/kubernetescluster.yaml @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: containerservice.azure.upbound.io/v1beta2 +kind: KubernetesClusterNodePool +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetesclusternodepool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + kubernetesClusterIdSelector: + matchLabels: + testing.upbound.io/example-name: example + nodeCount: 1 + tags: + Environment: Production + vmSize: Standard_DS2_v2 + +--- + +apiVersion: containerservice.azure.upbound.io/v1beta2 +kind: KubernetesCluster +metadata: + annotations: + meta.upbound.io/example-id: containerservice/v1beta2/kubernetesclusternodepool + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + apiServerAccessProfile: + authorizedIpRanges: + - 192.168.1.0/24 + defaultNodePool: + name: default + nodeCount: 1 + vmSize: Standard_D2_v2 + dnsPrefix: exampleaks1 + identity: + type: SystemAssigned + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example-containerservice + tags: + Environment: Production + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + labels: + testing.upbound.io/example-name: example-containerservice + name: example-containerservice-${Rand.RFC1123Subdomain} +spec: + forProvider: + location: West Europe diff --git a/examples/cosmosdb/account.yaml b/examples/cosmosdb/v1beta1/account.yaml similarity index 100% rename from examples/cosmosdb/account.yaml rename to examples/cosmosdb/v1beta1/account.yaml diff --git a/examples/cosmosdb/cassandradatacenter.yaml b/examples/cosmosdb/v1beta1/cassandradatacenter.yaml similarity index 100% rename from examples/cosmosdb/cassandradatacenter.yaml rename to examples/cosmosdb/v1beta1/cassandradatacenter.yaml diff --git a/examples/cosmosdb/cassandratable.yaml b/examples/cosmosdb/v1beta1/cassandratable.yaml similarity index 100% rename from examples/cosmosdb/cassandratable.yaml rename to examples/cosmosdb/v1beta1/cassandratable.yaml diff --git a/examples/cosmosdb/gremlingraph.yaml b/examples/cosmosdb/v1beta1/gremlingraph.yaml similarity index 100% rename from examples/cosmosdb/gremlingraph.yaml rename to examples/cosmosdb/v1beta1/gremlingraph.yaml diff --git a/examples/cosmosdb/mongocollection.yaml b/examples/cosmosdb/v1beta1/mongocollection.yaml similarity index 100% rename from examples/cosmosdb/mongocollection.yaml rename to examples/cosmosdb/v1beta1/mongocollection.yaml diff --git a/examples/cosmosdb/sqldedicatedgateway.yaml b/examples/cosmosdb/v1beta1/sqldedicatedgateway.yaml similarity index 100% rename from examples/cosmosdb/sqldedicatedgateway.yaml rename to examples/cosmosdb/v1beta1/sqldedicatedgateway.yaml diff --git a/examples/cosmosdb/sqlfunction.yaml b/examples/cosmosdb/v1beta1/sqlfunction.yaml similarity index 100% rename from examples/cosmosdb/sqlfunction.yaml rename to examples/cosmosdb/v1beta1/sqlfunction.yaml diff --git a/examples/cosmosdb/sqlroleassignment.yaml b/examples/cosmosdb/v1beta1/sqlroleassignment.yaml similarity index 100% rename from examples/cosmosdb/sqlroleassignment.yaml rename to examples/cosmosdb/v1beta1/sqlroleassignment.yaml diff --git a/examples/cosmosdb/sqlroledefinition.yaml b/examples/cosmosdb/v1beta1/sqlroledefinition.yaml similarity index 100% rename from examples/cosmosdb/sqlroledefinition.yaml rename to examples/cosmosdb/v1beta1/sqlroledefinition.yaml diff --git a/examples/cosmosdb/table.yaml b/examples/cosmosdb/v1beta1/table.yaml similarity index 100% rename from examples/cosmosdb/table.yaml rename to examples/cosmosdb/v1beta1/table.yaml diff --git a/examples/cosmosdb/v1beta2/account.yaml b/examples/cosmosdb/v1beta2/account.yaml new file mode 100644 index 000000000..f7a7e3b7d --- /dev/null +++ b/examples/cosmosdb/v1beta2/account.yaml @@ -0,0 +1,49 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/account + labels: + testing.upbound.io/example-name: db + name: example-cosmosdb-${Rand.RFC1123Subdomain} +spec: + forProvider: + capabilities: + - name: EnableAggregationPipeline + - name: mongoEnableDocLevelTTL + - name: MongoDBv3.4 + - name: EnableMongo + consistencyPolicy: + consistencyLevel: BoundedStaleness + maxIntervalInSeconds: 300 + maxStalenessPrefix: 100000 + enableAutomaticFailover: true + geoLocation: + - failoverPriority: 1 + location: eastus + - failoverPriority: 0 + location: westus + kind: MongoDB + location: West Europe + offerType: Standard + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta1/account + labels: + testing.upbound.io/example-name: example + name: cosmosdb-rg +spec: + forProvider: + location: West Europe diff --git a/examples/cosmosdb/v1beta2/mongocollection.yaml b/examples/cosmosdb/v1beta2/mongocollection.yaml new file mode 100644 index 000000000..e595a7fad --- /dev/null +++ b/examples/cosmosdb/v1beta2/mongocollection.yaml @@ -0,0 +1,77 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: MongoCollection +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/mongocollection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: mongo + databaseNameSelector: + matchLabels: + testing.upbound.io/example-name: example + defaultTtlSeconds: 777 + index: + - keys: + - _id + unique: true + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: rg1 + shardKey: uniqueKey + throughput: 400 + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: MongoDatabase +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/mongocollection + labels: + testing.upbound.io/example-name: example + name: example +spec: + forProvider: + accountNameSelector: + matchLabels: + testing.upbound.io/example-name: mongo + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: rg1 + +--- + +apiVersion: cosmosdb.azure.upbound.io/v1beta2 +kind: Account +metadata: + annotations: + meta.upbound.io/example-id: cosmosdb/v1beta2/mongocollection + labels: + testing.upbound.io/example-name: mongo + name: example-cosmosdb-mongo-${Rand.RFC1123Subdomain} +spec: + forProvider: + capabilities: + - name: EnableMongo + - name: MongoDBv3.4 + - name: mongoEnableDocLevelTTL + consistencyPolicy: + consistencyLevel: Strong + geoLocation: + - failoverPriority: 0 + location: West Europe + kind: MongoDB + location: West Europe + offerType: Standard + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: rg1 diff --git a/examples/costmanagement/costanomalyalert.yaml b/examples/costmanagement/v1beta1/costanomalyalert.yaml similarity index 100% rename from examples/costmanagement/costanomalyalert.yaml rename to examples/costmanagement/v1beta1/costanomalyalert.yaml diff --git a/examples/costmanagement/resourcegroupcostmanagementexport.yaml b/examples/costmanagement/v1beta1/resourcegroupcostmanagementexport.yaml similarity index 100% rename from examples/costmanagement/resourcegroupcostmanagementexport.yaml rename to examples/costmanagement/v1beta1/resourcegroupcostmanagementexport.yaml diff --git a/examples/costmanagement/subscriptioncostmanagementexport.yaml b/examples/costmanagement/v1beta1/subscriptioncostmanagementexport.yaml similarity index 100% rename from examples/costmanagement/subscriptioncostmanagementexport.yaml rename to examples/costmanagement/v1beta1/subscriptioncostmanagementexport.yaml diff --git a/examples/customproviders/customprovider.yaml b/examples/customproviders/v1beta1/customprovider.yaml similarity index 100% rename from examples/customproviders/customprovider.yaml rename to examples/customproviders/v1beta1/customprovider.yaml diff --git a/examples/databoxedge/device.yaml b/examples/databoxedge/v1beta1/device.yaml similarity index 100% rename from examples/databoxedge/device.yaml rename to examples/databoxedge/v1beta1/device.yaml diff --git a/examples/databricks/accessconnector.yaml b/examples/databricks/v1beta1/accessconnector.yaml similarity index 100% rename from examples/databricks/accessconnector.yaml rename to examples/databricks/v1beta1/accessconnector.yaml diff --git a/examples/databricks/workspace.yaml b/examples/databricks/v1beta1/workspace.yaml similarity index 100% rename from examples/databricks/workspace.yaml rename to examples/databricks/v1beta1/workspace.yaml diff --git a/examples/databricks/workspacecustomermanagedkey.yaml b/examples/databricks/v1beta1/workspacecustomermanagedkey.yaml similarity index 100% rename from examples/databricks/workspacecustomermanagedkey.yaml rename to examples/databricks/v1beta1/workspacecustomermanagedkey.yaml diff --git a/examples/databricks/workspacerootdbfscustomermanagedkey.yaml b/examples/databricks/v1beta1/workspacerootdbfscustomermanagedkey.yaml similarity index 100% rename from examples/databricks/workspacerootdbfscustomermanagedkey.yaml rename to examples/databricks/v1beta1/workspacerootdbfscustomermanagedkey.yaml diff --git a/examples/datafactory/customdataset.yaml b/examples/datafactory/v1beta1/customdataset.yaml similarity index 100% rename from examples/datafactory/customdataset.yaml rename to examples/datafactory/v1beta1/customdataset.yaml diff --git a/examples/datafactory/dataflow.yaml b/examples/datafactory/v1beta1/dataflow.yaml similarity index 100% rename from examples/datafactory/dataflow.yaml rename to examples/datafactory/v1beta1/dataflow.yaml diff --git a/examples/datafactory/datasetazureblob.yaml b/examples/datafactory/v1beta1/datasetazureblob.yaml similarity index 100% rename from examples/datafactory/datasetazureblob.yaml rename to examples/datafactory/v1beta1/datasetazureblob.yaml diff --git a/examples/datafactory/datasetbinary.yaml b/examples/datafactory/v1beta1/datasetbinary.yaml similarity index 100% rename from examples/datafactory/datasetbinary.yaml rename to examples/datafactory/v1beta1/datasetbinary.yaml diff --git a/examples/datafactory/datasetcosmosdbsqlapi.yaml b/examples/datafactory/v1beta1/datasetcosmosdbsqlapi.yaml similarity index 100% rename from examples/datafactory/datasetcosmosdbsqlapi.yaml rename to examples/datafactory/v1beta1/datasetcosmosdbsqlapi.yaml diff --git a/examples/datafactory/datasetdelimitedtext.yaml b/examples/datafactory/v1beta1/datasetdelimitedtext.yaml similarity index 100% rename from examples/datafactory/datasetdelimitedtext.yaml rename to examples/datafactory/v1beta1/datasetdelimitedtext.yaml diff --git a/examples/datafactory/datasethttp.yaml b/examples/datafactory/v1beta1/datasethttp.yaml similarity index 100% rename from examples/datafactory/datasethttp.yaml rename to examples/datafactory/v1beta1/datasethttp.yaml diff --git a/examples/datafactory/datasetjson.yaml b/examples/datafactory/v1beta1/datasetjson.yaml similarity index 100% rename from examples/datafactory/datasetjson.yaml rename to examples/datafactory/v1beta1/datasetjson.yaml diff --git a/examples/datafactory/datasetmysql.yaml b/examples/datafactory/v1beta1/datasetmysql.yaml similarity index 100% rename from examples/datafactory/datasetmysql.yaml rename to examples/datafactory/v1beta1/datasetmysql.yaml diff --git a/examples/datafactory/datasetparquet.yaml b/examples/datafactory/v1beta1/datasetparquet.yaml similarity index 100% rename from examples/datafactory/datasetparquet.yaml rename to examples/datafactory/v1beta1/datasetparquet.yaml diff --git a/examples/datafactory/datasetpostgresql.yaml b/examples/datafactory/v1beta1/datasetpostgresql.yaml similarity index 100% rename from examples/datafactory/datasetpostgresql.yaml rename to examples/datafactory/v1beta1/datasetpostgresql.yaml diff --git a/examples/datafactory/datasetsnowflake.yaml b/examples/datafactory/v1beta1/datasetsnowflake.yaml similarity index 100% rename from examples/datafactory/datasetsnowflake.yaml rename to examples/datafactory/v1beta1/datasetsnowflake.yaml diff --git a/examples/datafactory/datasetsqlservertable.yaml b/examples/datafactory/v1beta1/datasetsqlservertable.yaml similarity index 100% rename from examples/datafactory/datasetsqlservertable.yaml rename to examples/datafactory/v1beta1/datasetsqlservertable.yaml diff --git a/examples/datafactory/factory.yaml b/examples/datafactory/v1beta1/factory.yaml similarity index 100% rename from examples/datafactory/factory.yaml rename to examples/datafactory/v1beta1/factory.yaml diff --git a/examples/datafactory/integrationruntimeazure.yaml b/examples/datafactory/v1beta1/integrationruntimeazure.yaml similarity index 100% rename from examples/datafactory/integrationruntimeazure.yaml rename to examples/datafactory/v1beta1/integrationruntimeazure.yaml diff --git a/examples/datafactory/integrationruntimeazuressis.yaml b/examples/datafactory/v1beta1/integrationruntimeazuressis.yaml similarity index 100% rename from examples/datafactory/integrationruntimeazuressis.yaml rename to examples/datafactory/v1beta1/integrationruntimeazuressis.yaml diff --git a/examples/datafactory/integrationruntimemanaged.yaml b/examples/datafactory/v1beta1/integrationruntimemanaged.yaml similarity index 100% rename from examples/datafactory/integrationruntimemanaged.yaml rename to examples/datafactory/v1beta1/integrationruntimemanaged.yaml diff --git a/examples/datafactory/integrationruntimeselfhosted.yaml b/examples/datafactory/v1beta1/integrationruntimeselfhosted.yaml similarity index 100% rename from examples/datafactory/integrationruntimeselfhosted.yaml rename to examples/datafactory/v1beta1/integrationruntimeselfhosted.yaml diff --git a/examples/datafactory/linkedcustomservice.yaml b/examples/datafactory/v1beta1/linkedcustomservice.yaml similarity index 100% rename from examples/datafactory/linkedcustomservice.yaml rename to examples/datafactory/v1beta1/linkedcustomservice.yaml diff --git a/examples/datafactory/linkedserviceazureblobstorage.yaml b/examples/datafactory/v1beta1/linkedserviceazureblobstorage.yaml similarity index 96% rename from examples/datafactory/linkedserviceazureblobstorage.yaml rename to examples/datafactory/v1beta1/linkedserviceazureblobstorage.yaml index 8444c5c02..5c902770e 100644 --- a/examples/datafactory/linkedserviceazureblobstorage.yaml +++ b/examples/datafactory/v1beta1/linkedserviceazureblobstorage.yaml @@ -29,7 +29,7 @@ metadata: name: example-secret namespace: upbound-system annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-blob-storage.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-blob-storage.sh type: Opaque stringData: primary_connection_string: DefaultEndpointsProtocol=https;AccountName=lsabsexample;AccountKey=a5zs3v5FCnopLrBjP8eLpdQ1HyY3EO42GjdLixC6q289pxcoQCnn22lOicAJYGb+StUvndFe4JmQ+AStY6pi0w==;EndpointSuffix=core.windows.net diff --git a/examples/datafactory/linkedserviceazuredatabricks.yaml b/examples/datafactory/v1beta1/linkedserviceazuredatabricks.yaml similarity index 100% rename from examples/datafactory/linkedserviceazuredatabricks.yaml rename to examples/datafactory/v1beta1/linkedserviceazuredatabricks.yaml diff --git a/examples/datafactory/linkedserviceazurefilestorage.yaml b/examples/datafactory/v1beta1/linkedserviceazurefilestorage.yaml similarity index 100% rename from examples/datafactory/linkedserviceazurefilestorage.yaml rename to examples/datafactory/v1beta1/linkedserviceazurefilestorage.yaml diff --git a/examples/datafactory/linkedserviceazurefunction.yaml b/examples/datafactory/v1beta1/linkedserviceazurefunction.yaml similarity index 100% rename from examples/datafactory/linkedserviceazurefunction.yaml rename to examples/datafactory/v1beta1/linkedserviceazurefunction.yaml diff --git a/examples/datafactory/linkedserviceazuresearch.yaml b/examples/datafactory/v1beta1/linkedserviceazuresearch.yaml similarity index 100% rename from examples/datafactory/linkedserviceazuresearch.yaml rename to examples/datafactory/v1beta1/linkedserviceazuresearch.yaml diff --git a/examples/datafactory/linkedserviceazuresqldatabase.yaml b/examples/datafactory/v1beta1/linkedserviceazuresqldatabase.yaml similarity index 100% rename from examples/datafactory/linkedserviceazuresqldatabase.yaml rename to examples/datafactory/v1beta1/linkedserviceazuresqldatabase.yaml diff --git a/examples/datafactory/linkedserviceazuretablestorage.yaml b/examples/datafactory/v1beta1/linkedserviceazuretablestorage.yaml similarity index 100% rename from examples/datafactory/linkedserviceazuretablestorage.yaml rename to examples/datafactory/v1beta1/linkedserviceazuretablestorage.yaml diff --git a/examples/datafactory/linkedservicecosmosdb.yaml b/examples/datafactory/v1beta1/linkedservicecosmosdb.yaml similarity index 100% rename from examples/datafactory/linkedservicecosmosdb.yaml rename to examples/datafactory/v1beta1/linkedservicecosmosdb.yaml diff --git a/examples/datafactory/linkedservicecosmosdbmongoapi.yaml b/examples/datafactory/v1beta1/linkedservicecosmosdbmongoapi.yaml similarity index 100% rename from examples/datafactory/linkedservicecosmosdbmongoapi.yaml rename to examples/datafactory/v1beta1/linkedservicecosmosdbmongoapi.yaml diff --git a/examples/datafactory/linkedservicedatalakestoragegen2.yaml b/examples/datafactory/v1beta1/linkedservicedatalakestoragegen2.yaml similarity index 100% rename from examples/datafactory/linkedservicedatalakestoragegen2.yaml rename to examples/datafactory/v1beta1/linkedservicedatalakestoragegen2.yaml diff --git a/examples/datafactory/linkedservicekeyvault.yaml b/examples/datafactory/v1beta1/linkedservicekeyvault.yaml similarity index 100% rename from examples/datafactory/linkedservicekeyvault.yaml rename to examples/datafactory/v1beta1/linkedservicekeyvault.yaml diff --git a/examples/datafactory/linkedservicekusto.yaml b/examples/datafactory/v1beta1/linkedservicekusto.yaml similarity index 100% rename from examples/datafactory/linkedservicekusto.yaml rename to examples/datafactory/v1beta1/linkedservicekusto.yaml diff --git a/examples/datafactory/linkedservicemysql.yaml b/examples/datafactory/v1beta1/linkedservicemysql.yaml similarity index 100% rename from examples/datafactory/linkedservicemysql.yaml rename to examples/datafactory/v1beta1/linkedservicemysql.yaml diff --git a/examples/datafactory/linkedserviceodata.yaml b/examples/datafactory/v1beta1/linkedserviceodata.yaml similarity index 100% rename from examples/datafactory/linkedserviceodata.yaml rename to examples/datafactory/v1beta1/linkedserviceodata.yaml diff --git a/examples/datafactory/linkedserviceodbc.yaml b/examples/datafactory/v1beta1/linkedserviceodbc.yaml similarity index 100% rename from examples/datafactory/linkedserviceodbc.yaml rename to examples/datafactory/v1beta1/linkedserviceodbc.yaml diff --git a/examples/datafactory/linkedservicepostgresql.yaml b/examples/datafactory/v1beta1/linkedservicepostgresql.yaml similarity index 100% rename from examples/datafactory/linkedservicepostgresql.yaml rename to examples/datafactory/v1beta1/linkedservicepostgresql.yaml diff --git a/examples/datafactory/linkedservicesftp.yaml b/examples/datafactory/v1beta1/linkedservicesftp.yaml similarity index 100% rename from examples/datafactory/linkedservicesftp.yaml rename to examples/datafactory/v1beta1/linkedservicesftp.yaml diff --git a/examples/datafactory/linkedservicesnowflake.yaml b/examples/datafactory/v1beta1/linkedservicesnowflake.yaml similarity index 100% rename from examples/datafactory/linkedservicesnowflake.yaml rename to examples/datafactory/v1beta1/linkedservicesnowflake.yaml diff --git a/examples/datafactory/linkedservicesqlserver.yaml b/examples/datafactory/v1beta1/linkedservicesqlserver.yaml similarity index 100% rename from examples/datafactory/linkedservicesqlserver.yaml rename to examples/datafactory/v1beta1/linkedservicesqlserver.yaml diff --git a/examples/datafactory/linkedservicesynapse.yaml b/examples/datafactory/v1beta1/linkedservicesynapse.yaml similarity index 100% rename from examples/datafactory/linkedservicesynapse.yaml rename to examples/datafactory/v1beta1/linkedservicesynapse.yaml diff --git a/examples/datafactory/linkedserviceweb.yaml b/examples/datafactory/v1beta1/linkedserviceweb.yaml similarity index 100% rename from examples/datafactory/linkedserviceweb.yaml rename to examples/datafactory/v1beta1/linkedserviceweb.yaml diff --git a/examples/datafactory/managedprivateendpoint.yaml b/examples/datafactory/v1beta1/managedprivateendpoint.yaml similarity index 100% rename from examples/datafactory/managedprivateendpoint.yaml rename to examples/datafactory/v1beta1/managedprivateendpoint.yaml diff --git a/examples/datafactory/pipeline.yaml b/examples/datafactory/v1beta1/pipeline.yaml similarity index 100% rename from examples/datafactory/pipeline.yaml rename to examples/datafactory/v1beta1/pipeline.yaml diff --git a/examples/datafactory/triggerblobevent.yaml b/examples/datafactory/v1beta1/triggerblobevent.yaml similarity index 100% rename from examples/datafactory/triggerblobevent.yaml rename to examples/datafactory/v1beta1/triggerblobevent.yaml diff --git a/examples/datafactory/triggercustomevent.yaml b/examples/datafactory/v1beta1/triggercustomevent.yaml similarity index 100% rename from examples/datafactory/triggercustomevent.yaml rename to examples/datafactory/v1beta1/triggercustomevent.yaml diff --git a/examples/datafactory/triggerschedule.yaml b/examples/datafactory/v1beta1/triggerschedule.yaml similarity index 100% rename from examples/datafactory/triggerschedule.yaml rename to examples/datafactory/v1beta1/triggerschedule.yaml diff --git a/examples/datamigration/databasemigrationproject.yaml b/examples/datamigration/v1beta1/databasemigrationproject.yaml similarity index 100% rename from examples/datamigration/databasemigrationproject.yaml rename to examples/datamigration/v1beta1/databasemigrationproject.yaml diff --git a/examples/datamigration/databasemigrationservice.yaml b/examples/datamigration/v1beta1/databasemigrationservice.yaml similarity index 100% rename from examples/datamigration/databasemigrationservice.yaml rename to examples/datamigration/v1beta1/databasemigrationservice.yaml diff --git a/examples/dataprotection/backupinstanceblobstorage.yaml b/examples/dataprotection/v1beta1/backupinstanceblobstorage.yaml similarity index 100% rename from examples/dataprotection/backupinstanceblobstorage.yaml rename to examples/dataprotection/v1beta1/backupinstanceblobstorage.yaml diff --git a/examples/dataprotection/backupinstancedisk.yaml b/examples/dataprotection/v1beta1/backupinstancedisk.yaml similarity index 100% rename from examples/dataprotection/backupinstancedisk.yaml rename to examples/dataprotection/v1beta1/backupinstancedisk.yaml diff --git a/examples/dataprotection/backupinstancepostgresql.yaml b/examples/dataprotection/v1beta1/backupinstancepostgresql.yaml similarity index 100% rename from examples/dataprotection/backupinstancepostgresql.yaml rename to examples/dataprotection/v1beta1/backupinstancepostgresql.yaml diff --git a/examples/dataprotection/backuppolicyblobstorage.yaml b/examples/dataprotection/v1beta1/backuppolicyblobstorage.yaml similarity index 100% rename from examples/dataprotection/backuppolicyblobstorage.yaml rename to examples/dataprotection/v1beta1/backuppolicyblobstorage.yaml diff --git a/examples/dataprotection/backuppolicydisk.yaml b/examples/dataprotection/v1beta1/backuppolicydisk.yaml similarity index 100% rename from examples/dataprotection/backuppolicydisk.yaml rename to examples/dataprotection/v1beta1/backuppolicydisk.yaml diff --git a/examples/dataprotection/backuppolicypostgresql.yaml b/examples/dataprotection/v1beta1/backuppolicypostgresql.yaml similarity index 100% rename from examples/dataprotection/backuppolicypostgresql.yaml rename to examples/dataprotection/v1beta1/backuppolicypostgresql.yaml diff --git a/examples/dataprotection/backupvault.yaml b/examples/dataprotection/v1beta1/backupvault.yaml similarity index 100% rename from examples/dataprotection/backupvault.yaml rename to examples/dataprotection/v1beta1/backupvault.yaml diff --git a/examples/dataprotection/resourceguard.yaml b/examples/dataprotection/v1beta1/resourceguard.yaml similarity index 100% rename from examples/dataprotection/resourceguard.yaml rename to examples/dataprotection/v1beta1/resourceguard.yaml diff --git a/examples/datashare/account.yaml b/examples/datashare/v1beta1/account.yaml similarity index 100% rename from examples/datashare/account.yaml rename to examples/datashare/v1beta1/account.yaml diff --git a/examples/datashare/datasetblobstorage.yaml b/examples/datashare/v1beta1/datasetblobstorage.yaml similarity index 100% rename from examples/datashare/datasetblobstorage.yaml rename to examples/datashare/v1beta1/datasetblobstorage.yaml diff --git a/examples/datashare/datasetdatalakegen2.yaml b/examples/datashare/v1beta1/datasetdatalakegen2.yaml similarity index 100% rename from examples/datashare/datasetdatalakegen2.yaml rename to examples/datashare/v1beta1/datasetdatalakegen2.yaml diff --git a/examples/datashare/datasetkustocluster.yaml b/examples/datashare/v1beta1/datasetkustocluster.yaml similarity index 100% rename from examples/datashare/datasetkustocluster.yaml rename to examples/datashare/v1beta1/datasetkustocluster.yaml diff --git a/examples/datashare/datasetkustodatabase.yaml b/examples/datashare/v1beta1/datasetkustodatabase.yaml similarity index 100% rename from examples/datashare/datasetkustodatabase.yaml rename to examples/datashare/v1beta1/datasetkustodatabase.yaml diff --git a/examples/dbformariadb/dbformariadb-all-in-one.yaml b/examples/dbformariadb/v1beta1/dbformariadb-all-in-one.yaml similarity index 100% rename from examples/dbformariadb/dbformariadb-all-in-one.yaml rename to examples/dbformariadb/v1beta1/dbformariadb-all-in-one.yaml diff --git a/examples/dbformariadb/virtualnetworkrule.yaml b/examples/dbformariadb/v1beta1/virtualnetworkrule.yaml similarity index 100% rename from examples/dbformariadb/virtualnetworkrule.yaml rename to examples/dbformariadb/v1beta1/virtualnetworkrule.yaml diff --git a/examples/dbformysql/activedirectoryadministrator.yaml b/examples/dbformysql/v1beta1/activedirectoryadministrator.yaml similarity index 100% rename from examples/dbformysql/activedirectoryadministrator.yaml rename to examples/dbformysql/v1beta1/activedirectoryadministrator.yaml diff --git a/examples/dbformysql/configuration.yaml b/examples/dbformysql/v1beta1/configuration.yaml similarity index 100% rename from examples/dbformysql/configuration.yaml rename to examples/dbformysql/v1beta1/configuration.yaml diff --git a/examples/dbformysql/database.yaml b/examples/dbformysql/v1beta1/database.yaml similarity index 100% rename from examples/dbformysql/database.yaml rename to examples/dbformysql/v1beta1/database.yaml diff --git a/examples/dbformysql/firewallrule.yaml b/examples/dbformysql/v1beta1/firewallrule.yaml similarity index 100% rename from examples/dbformysql/firewallrule.yaml rename to examples/dbformysql/v1beta1/firewallrule.yaml diff --git a/examples/dbformysql/flexibledatabase.yaml b/examples/dbformysql/v1beta1/flexibledatabase.yaml similarity index 100% rename from examples/dbformysql/flexibledatabase.yaml rename to examples/dbformysql/v1beta1/flexibledatabase.yaml diff --git a/examples/dbformysql/flexibleserver.yaml b/examples/dbformysql/v1beta1/flexibleserver.yaml similarity index 100% rename from examples/dbformysql/flexibleserver.yaml rename to examples/dbformysql/v1beta1/flexibleserver.yaml diff --git a/examples/dbformysql/flexibleserverconfiguration.yaml b/examples/dbformysql/v1beta1/flexibleserverconfiguration.yaml similarity index 100% rename from examples/dbformysql/flexibleserverconfiguration.yaml rename to examples/dbformysql/v1beta1/flexibleserverconfiguration.yaml diff --git a/examples/dbformysql/flexibleserverfirewallrule.yaml b/examples/dbformysql/v1beta1/flexibleserverfirewallrule.yaml similarity index 100% rename from examples/dbformysql/flexibleserverfirewallrule.yaml rename to examples/dbformysql/v1beta1/flexibleserverfirewallrule.yaml diff --git a/examples/dbformysql/server.yaml b/examples/dbformysql/v1beta1/server.yaml similarity index 100% rename from examples/dbformysql/server.yaml rename to examples/dbformysql/v1beta1/server.yaml diff --git a/examples/dbformysql/virtualnetworkrule.yaml b/examples/dbformysql/v1beta1/virtualnetworkrule.yaml similarity index 100% rename from examples/dbformysql/virtualnetworkrule.yaml rename to examples/dbformysql/v1beta1/virtualnetworkrule.yaml diff --git a/examples/dbforpostgresql/activedirectoryadministrator.yaml b/examples/dbforpostgresql/v1beta1/activedirectoryadministrator.yaml similarity index 100% rename from examples/dbforpostgresql/activedirectoryadministrator.yaml rename to examples/dbforpostgresql/v1beta1/activedirectoryadministrator.yaml diff --git a/examples/dbforpostgresql/database.yaml b/examples/dbforpostgresql/v1beta1/database.yaml similarity index 100% rename from examples/dbforpostgresql/database.yaml rename to examples/dbforpostgresql/v1beta1/database.yaml diff --git a/examples/dbforpostgresql/flexibleserver-all-in-one.yaml b/examples/dbforpostgresql/v1beta1/flexibleserver-all-in-one.yaml similarity index 100% rename from examples/dbforpostgresql/flexibleserver-all-in-one.yaml rename to examples/dbforpostgresql/v1beta1/flexibleserver-all-in-one.yaml diff --git a/examples/dbforpostgresql/flexibleserveractivedirectoryadministrator.yaml b/examples/dbforpostgresql/v1beta1/flexibleserveractivedirectoryadministrator.yaml similarity index 100% rename from examples/dbforpostgresql/flexibleserveractivedirectoryadministrator.yaml rename to examples/dbforpostgresql/v1beta1/flexibleserveractivedirectoryadministrator.yaml diff --git a/examples/dbforpostgresql/server-all-in-one.yaml b/examples/dbforpostgresql/v1beta1/server-all-in-one.yaml similarity index 100% rename from examples/dbforpostgresql/server-all-in-one.yaml rename to examples/dbforpostgresql/v1beta1/server-all-in-one.yaml diff --git a/examples/dbforpostgresql/server-key.yaml b/examples/dbforpostgresql/v1beta1/server-key.yaml similarity index 100% rename from examples/dbforpostgresql/server-key.yaml rename to examples/dbforpostgresql/v1beta1/server-key.yaml diff --git a/examples/dbforpostgresql/server.yaml b/examples/dbforpostgresql/v1beta1/server.yaml similarity index 100% rename from examples/dbforpostgresql/server.yaml rename to examples/dbforpostgresql/v1beta1/server.yaml diff --git a/examples/dbforpostgresql/virtualnetworkrule.yaml b/examples/dbforpostgresql/v1beta1/virtualnetworkrule.yaml similarity index 100% rename from examples/dbforpostgresql/virtualnetworkrule.yaml rename to examples/dbforpostgresql/v1beta1/virtualnetworkrule.yaml diff --git a/examples/dbforpostgresql/v1beta2/server.yaml b/examples/dbforpostgresql/v1beta2/server.yaml new file mode 100644 index 000000000..de21630ce --- /dev/null +++ b/examples/dbforpostgresql/v1beta2/server.yaml @@ -0,0 +1,57 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: dbforpostgresql.azure.upbound.io/v1beta2 +kind: Server +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta2/server + labels: + testing.upbound.io/example-name: example + name: example-upbound-pg-single-server +spec: + forProvider: + administratorLogin: psqladmin + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + autoGrowEnabled: true + backupRetentionDays: 7 + geoRedundantBackupEnabled: true + location: West Europe + publicNetworkAccessEnabled: false + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: GP_Gen5_4 + sslEnforcementEnabled: true + sslMinimalTlsVersionEnforced: TLS1_2 + storageMb: 640000 + version: "11" + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: dbforpostgresql/v1beta1/server + labels: + testing.upbound.io/example-name: example + name: example-dbforpostgresql-${Rand.RFC1123Subdomain} +spec: + forProvider: + location: West Europe + +--- + +apiVersion: v1 +data: + example-key: dXBiMHVuZHIwY2s1ITMxMzM3 +kind: Secret +metadata: + name: example-secret + namespace: upbound-system +type: Opaque diff --git a/examples/devices/iothub-all-in-one.yaml b/examples/devices/v1beta1/iothub-all-in-one.yaml similarity index 100% rename from examples/devices/iothub-all-in-one.yaml rename to examples/devices/v1beta1/iothub-all-in-one.yaml diff --git a/examples/devices/iothubcertificate.yaml b/examples/devices/v1beta1/iothubcertificate.yaml similarity index 100% rename from examples/devices/iothubcertificate.yaml rename to examples/devices/v1beta1/iothubcertificate.yaml diff --git a/examples/devices/iothubdpscertificate.yaml b/examples/devices/v1beta1/iothubdpscertificate.yaml similarity index 100% rename from examples/devices/iothubdpscertificate.yaml rename to examples/devices/v1beta1/iothubdpscertificate.yaml diff --git a/examples/devices/iothubendpointeventhub.yaml b/examples/devices/v1beta1/iothubendpointeventhub.yaml similarity index 100% rename from examples/devices/iothubendpointeventhub.yaml rename to examples/devices/v1beta1/iothubendpointeventhub.yaml diff --git a/examples/deviceupdate/iothubdeviceupdateaccount.yaml b/examples/deviceupdate/v1beta1/iothubdeviceupdateaccount.yaml similarity index 100% rename from examples/deviceupdate/iothubdeviceupdateaccount.yaml rename to examples/deviceupdate/v1beta1/iothubdeviceupdateaccount.yaml diff --git a/examples/deviceupdate/iothubdeviceupdateinstance.yaml b/examples/deviceupdate/v1beta1/iothubdeviceupdateinstance.yaml similarity index 100% rename from examples/deviceupdate/iothubdeviceupdateinstance.yaml rename to examples/deviceupdate/v1beta1/iothubdeviceupdateinstance.yaml diff --git a/examples/devtestlab/globalvmshutdownschedule.yaml b/examples/devtestlab/v1beta1/globalvmshutdownschedule.yaml similarity index 98% rename from examples/devtestlab/globalvmshutdownschedule.yaml rename to examples/devtestlab/v1beta1/globalvmshutdownschedule.yaml index 2942121fe..d6de2cee9 100644 --- a/examples/devtestlab/globalvmshutdownschedule.yaml +++ b/examples/devtestlab/v1beta1/globalvmshutdownschedule.yaml @@ -88,7 +88,7 @@ kind: ResourceGroup metadata: annotations: meta.upbound.io/example-id: devtestlab/v1beta1/globalvmshutdownschedule - uptest.upbound.io/pre-delete-hook: testhooks/delete-network.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-network.sh labels: testing.upbound.io/example-name: example name: gvmssexample diff --git a/examples/devtestlab/lab.yaml b/examples/devtestlab/v1beta1/lab.yaml similarity index 100% rename from examples/devtestlab/lab.yaml rename to examples/devtestlab/v1beta1/lab.yaml diff --git a/examples/devtestlab/linuxvirtualmachine.yaml b/examples/devtestlab/v1beta1/linuxvirtualmachine.yaml similarity index 97% rename from examples/devtestlab/linuxvirtualmachine.yaml rename to examples/devtestlab/v1beta1/linuxvirtualmachine.yaml index fadb9ae19..0af7c0b71 100644 --- a/examples/devtestlab/linuxvirtualmachine.yaml +++ b/examples/devtestlab/v1beta1/linuxvirtualmachine.yaml @@ -104,7 +104,7 @@ metadata: annotations: upjet.upbound.io/manual-intervention: "The dependent resource needs a manual patching and is thus skipped." meta.upbound.io/example-id: devtestlab/v1beta1/linuxvirtualmachine - uptest.upbound.io/pre-delete-hook: testhooks/delete-network.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-network.sh labels: testing.upbound.io/example-name: example name: stlvmex diff --git a/examples/devtestlab/policy.yaml b/examples/devtestlab/v1beta1/policy.yaml similarity index 100% rename from examples/devtestlab/policy.yaml rename to examples/devtestlab/v1beta1/policy.yaml diff --git a/examples/devtestlab/schedule.yaml b/examples/devtestlab/v1beta1/schedule.yaml similarity index 100% rename from examples/devtestlab/schedule.yaml rename to examples/devtestlab/v1beta1/schedule.yaml diff --git a/examples/devtestlab/virtualnetwork.yaml b/examples/devtestlab/v1beta1/virtualnetwork.yaml similarity index 100% rename from examples/devtestlab/virtualnetwork.yaml rename to examples/devtestlab/v1beta1/virtualnetwork.yaml diff --git a/examples/devtestlab/windowsvirtualmachine.yaml b/examples/devtestlab/v1beta1/windowsvirtualmachine.yaml similarity index 97% rename from examples/devtestlab/windowsvirtualmachine.yaml rename to examples/devtestlab/v1beta1/windowsvirtualmachine.yaml index 80e980305..3627f4693 100644 --- a/examples/devtestlab/windowsvirtualmachine.yaml +++ b/examples/devtestlab/v1beta1/windowsvirtualmachine.yaml @@ -104,7 +104,7 @@ metadata: annotations: upjet.upbound.io/manual-intervention: "The dependent resource needs a manual patching and is thus skipped." meta.upbound.io/example-id: devtestlab/v1beta1/windowsvirtualmachine - uptest.upbound.io/pre-delete-hook: testhooks/delete-network.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-network.sh labels: testing.upbound.io/example-name: example name: stexample diff --git a/examples/digitaltwins/instance.yaml b/examples/digitaltwins/v1beta1/instance.yaml similarity index 100% rename from examples/digitaltwins/instance.yaml rename to examples/digitaltwins/v1beta1/instance.yaml diff --git a/examples/elastic/cloudelasticsearch.yaml b/examples/elastic/v1beta1/cloudelasticsearch.yaml similarity index 100% rename from examples/elastic/cloudelasticsearch.yaml rename to examples/elastic/v1beta1/cloudelasticsearch.yaml diff --git a/examples/eventgrid/domain.yaml b/examples/eventgrid/v1beta1/domain.yaml similarity index 100% rename from examples/eventgrid/domain.yaml rename to examples/eventgrid/v1beta1/domain.yaml diff --git a/examples/eventgrid/domaintopic.yaml b/examples/eventgrid/v1beta1/domaintopic.yaml similarity index 100% rename from examples/eventgrid/domaintopic.yaml rename to examples/eventgrid/v1beta1/domaintopic.yaml diff --git a/examples/eventgrid/eventsubscription.yaml b/examples/eventgrid/v1beta1/eventsubscription.yaml similarity index 100% rename from examples/eventgrid/eventsubscription.yaml rename to examples/eventgrid/v1beta1/eventsubscription.yaml diff --git a/examples/eventgrid/systemtopic.yaml b/examples/eventgrid/v1beta1/systemtopic.yaml similarity index 100% rename from examples/eventgrid/systemtopic.yaml rename to examples/eventgrid/v1beta1/systemtopic.yaml diff --git a/examples/eventgrid/topic.yaml b/examples/eventgrid/v1beta1/topic.yaml similarity index 100% rename from examples/eventgrid/topic.yaml rename to examples/eventgrid/v1beta1/topic.yaml diff --git a/examples/eventhub/eventhub-all-in-one.yaml b/examples/eventhub/v1beta1/eventhub-all-in-one.yaml similarity index 100% rename from examples/eventhub/eventhub-all-in-one.yaml rename to examples/eventhub/v1beta1/eventhub-all-in-one.yaml diff --git a/examples/eventhub/namespaceauthorizationrule.yaml b/examples/eventhub/v1beta1/namespaceauthorizationrule.yaml similarity index 100% rename from examples/eventhub/namespaceauthorizationrule.yaml rename to examples/eventhub/v1beta1/namespaceauthorizationrule.yaml diff --git a/examples/eventhub/namespacedisasterrecoveryconfig.yaml b/examples/eventhub/v1beta1/namespacedisasterrecoveryconfig.yaml similarity index 100% rename from examples/eventhub/namespacedisasterrecoveryconfig.yaml rename to examples/eventhub/v1beta1/namespacedisasterrecoveryconfig.yaml diff --git a/examples/eventhub/namespaceschemagroup.yaml b/examples/eventhub/v1beta1/namespaceschemagroup.yaml similarity index 100% rename from examples/eventhub/namespaceschemagroup.yaml rename to examples/eventhub/v1beta1/namespaceschemagroup.yaml diff --git a/examples/fluidrelay/server.yaml b/examples/fluidrelay/v1beta1/server.yaml similarity index 100% rename from examples/fluidrelay/server.yaml rename to examples/fluidrelay/v1beta1/server.yaml diff --git a/examples/guestconfiguration/policyvirtualmachineconfigurationassignment.yaml b/examples/guestconfiguration/v1beta1/policyvirtualmachineconfigurationassignment.yaml similarity index 100% rename from examples/guestconfiguration/policyvirtualmachineconfigurationassignment.yaml rename to examples/guestconfiguration/v1beta1/policyvirtualmachineconfigurationassignment.yaml diff --git a/examples/hdinsight/hadoopcluster.yaml b/examples/hdinsight/v1beta1/hadoopcluster.yaml similarity index 97% rename from examples/hdinsight/hadoopcluster.yaml rename to examples/hdinsight/v1beta1/hadoopcluster.yaml index 2b809d4ed..82e14f853 100644 --- a/examples/hdinsight/hadoopcluster.yaml +++ b/examples/hdinsight/v1beta1/hadoopcluster.yaml @@ -77,7 +77,7 @@ kind: Account metadata: annotations: meta.upbound.io/example-id: hdinsight/v1beta1/hadoopcluster - uptest.upbound.io/pre-delete-hook: testhooks/delete-hadoopcluster.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-hadoopcluster.sh labels: testing.upbound.io/example-name: example0001 name: example0001 diff --git a/examples/hdinsight/hbasecluster.yaml b/examples/hdinsight/v1beta1/hbasecluster.yaml similarity index 97% rename from examples/hdinsight/hbasecluster.yaml rename to examples/hdinsight/v1beta1/hbasecluster.yaml index 5dd3202d5..b8740d5bc 100644 --- a/examples/hdinsight/hbasecluster.yaml +++ b/examples/hdinsight/v1beta1/hbasecluster.yaml @@ -77,7 +77,7 @@ kind: Account metadata: annotations: meta.upbound.io/example-id: hdinsight/v1beta1/hbasecluster - uptest.upbound.io/pre-delete-hook: testhooks/delete-hbasecluster.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-hbasecluster.sh labels: testing.upbound.io/example-name: example0001 name: example0001 diff --git a/examples/hdinsight/interactivequerycluster.yaml b/examples/hdinsight/v1beta1/interactivequerycluster.yaml similarity index 97% rename from examples/hdinsight/interactivequerycluster.yaml rename to examples/hdinsight/v1beta1/interactivequerycluster.yaml index 2e947e394..8ef2ab990 100644 --- a/examples/hdinsight/interactivequerycluster.yaml +++ b/examples/hdinsight/v1beta1/interactivequerycluster.yaml @@ -77,7 +77,7 @@ kind: Account metadata: annotations: meta.upbound.io/example-id: hdinsight/v1beta1/interactivequerycluster - uptest.upbound.io/pre-delete-hook: testhooks/delete-interactivecluster.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-interactivecluster.sh labels: testing.upbound.io/example-name: example0001 name: example0001 diff --git a/examples/hdinsight/kafkacluster.yaml b/examples/hdinsight/v1beta1/kafkacluster.yaml similarity index 97% rename from examples/hdinsight/kafkacluster.yaml rename to examples/hdinsight/v1beta1/kafkacluster.yaml index fc5cc26eb..4f072e598 100644 --- a/examples/hdinsight/kafkacluster.yaml +++ b/examples/hdinsight/v1beta1/kafkacluster.yaml @@ -78,7 +78,7 @@ kind: Account metadata: annotations: meta.upbound.io/example-id: hdinsight/v1beta1/kafkacluster - uptest.upbound.io/pre-delete-hook: testhooks/delete-kafka.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-kafka.sh labels: testing.upbound.io/example-name: example0001 name: example0001 diff --git a/examples/hdinsight/sparkcluster.yaml b/examples/hdinsight/v1beta1/sparkcluster.yaml similarity index 97% rename from examples/hdinsight/sparkcluster.yaml rename to examples/hdinsight/v1beta1/sparkcluster.yaml index 16898a201..51779b097 100644 --- a/examples/hdinsight/sparkcluster.yaml +++ b/examples/hdinsight/v1beta1/sparkcluster.yaml @@ -76,7 +76,7 @@ apiVersion: storage.azure.upbound.io/v1beta1 kind: Account metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-sparkcluster.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-sparkcluster.sh meta.upbound.io/example-id: hdinsight/v1beta1/sparkcluster labels: testing.upbound.io/example-name: example0001 diff --git a/examples/healthbot/healthbot.yaml b/examples/healthbot/v1beta1/healthbot.yaml similarity index 100% rename from examples/healthbot/healthbot.yaml rename to examples/healthbot/v1beta1/healthbot.yaml diff --git a/examples/healthcareapis/healthcaredicomservice.yaml b/examples/healthcareapis/v1beta1/healthcaredicomservice.yaml similarity index 100% rename from examples/healthcareapis/healthcaredicomservice.yaml rename to examples/healthcareapis/v1beta1/healthcaredicomservice.yaml diff --git a/examples/healthcareapis/healthcarefhirservice.yaml b/examples/healthcareapis/v1beta1/healthcarefhirservice.yaml similarity index 100% rename from examples/healthcareapis/healthcarefhirservice.yaml rename to examples/healthcareapis/v1beta1/healthcarefhirservice.yaml diff --git a/examples/healthcareapis/healthcaremedtechservice.yaml b/examples/healthcareapis/v1beta1/healthcaremedtechservice.yaml similarity index 100% rename from examples/healthcareapis/healthcaremedtechservice.yaml rename to examples/healthcareapis/v1beta1/healthcaremedtechservice.yaml diff --git a/examples/healthcareapis/healthcaremedtechservicefhirdestination.yaml b/examples/healthcareapis/v1beta1/healthcaremedtechservicefhirdestination.yaml similarity index 100% rename from examples/healthcareapis/healthcaremedtechservicefhirdestination.yaml rename to examples/healthcareapis/v1beta1/healthcaremedtechservicefhirdestination.yaml diff --git a/examples/healthcareapis/healthcareservice.yaml b/examples/healthcareapis/v1beta1/healthcareservice.yaml similarity index 100% rename from examples/healthcareapis/healthcareservice.yaml rename to examples/healthcareapis/v1beta1/healthcareservice.yaml diff --git a/examples/healthcareapis/healthcareworkspace.yaml b/examples/healthcareapis/v1beta1/healthcareworkspace.yaml similarity index 100% rename from examples/healthcareapis/healthcareworkspace.yaml rename to examples/healthcareapis/v1beta1/healthcareworkspace.yaml diff --git a/examples/insights/applicationinsightsanalyticsitem.yaml b/examples/insights/v1beta1/applicationinsightsanalyticsitem.yaml similarity index 95% rename from examples/insights/applicationinsightsanalyticsitem.yaml rename to examples/insights/v1beta1/applicationinsightsanalyticsitem.yaml index cb224e632..153e732b3 100644 --- a/examples/insights/applicationinsightsanalyticsitem.yaml +++ b/examples/insights/v1beta1/applicationinsightsanalyticsitem.yaml @@ -45,7 +45,7 @@ apiVersion: azure.upbound.io/v1beta1 kind: ResourceGroup metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-app-insights.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-app-insights.sh meta.upbound.io/example-id: insights/v1beta1/applicationinsightsanalyticsitem labels: testing.upbound.io/example-name: example diff --git a/examples/insights/applicationinsightsapikey.yaml b/examples/insights/v1beta1/applicationinsightsapikey.yaml similarity index 100% rename from examples/insights/applicationinsightsapikey.yaml rename to examples/insights/v1beta1/applicationinsightsapikey.yaml diff --git a/examples/insights/applicationinsightssmartdetectionrule.yaml b/examples/insights/v1beta1/applicationinsightssmartdetectionrule.yaml similarity index 100% rename from examples/insights/applicationinsightssmartdetectionrule.yaml rename to examples/insights/v1beta1/applicationinsightssmartdetectionrule.yaml diff --git a/examples/insights/applicationinsightsstandardwebtest.yaml b/examples/insights/v1beta1/applicationinsightsstandardwebtest.yaml similarity index 100% rename from examples/insights/applicationinsightsstandardwebtest.yaml rename to examples/insights/v1beta1/applicationinsightsstandardwebtest.yaml diff --git a/examples/insights/applicationinsightswebtest.yaml b/examples/insights/v1beta1/applicationinsightswebtest.yaml similarity index 100% rename from examples/insights/applicationinsightswebtest.yaml rename to examples/insights/v1beta1/applicationinsightswebtest.yaml diff --git a/examples/insights/applicationinsightsworkbook.yaml b/examples/insights/v1beta1/applicationinsightsworkbook.yaml similarity index 100% rename from examples/insights/applicationinsightsworkbook.yaml rename to examples/insights/v1beta1/applicationinsightsworkbook.yaml diff --git a/examples/insights/applicationinsightsworkbooktemplate.yaml b/examples/insights/v1beta1/applicationinsightsworkbooktemplate.yaml similarity index 100% rename from examples/insights/applicationinsightsworkbooktemplate.yaml rename to examples/insights/v1beta1/applicationinsightsworkbooktemplate.yaml diff --git a/examples/insights/insights-all-in-one.yaml b/examples/insights/v1beta1/insights-all-in-one.yaml similarity index 100% rename from examples/insights/insights-all-in-one.yaml rename to examples/insights/v1beta1/insights-all-in-one.yaml diff --git a/examples/insights/monitoractiongroup.yaml b/examples/insights/v1beta1/monitoractiongroup.yaml similarity index 100% rename from examples/insights/monitoractiongroup.yaml rename to examples/insights/v1beta1/monitoractiongroup.yaml diff --git a/examples/insights/monitoractivitylogalert.yaml b/examples/insights/v1beta1/monitoractivitylogalert.yaml similarity index 100% rename from examples/insights/monitoractivitylogalert.yaml rename to examples/insights/v1beta1/monitoractivitylogalert.yaml diff --git a/examples/insights/monitorautoscalesetting.yaml b/examples/insights/v1beta1/monitorautoscalesetting.yaml similarity index 100% rename from examples/insights/monitorautoscalesetting.yaml rename to examples/insights/v1beta1/monitorautoscalesetting.yaml diff --git a/examples/insights/monitordatacollectionendpoint.yaml b/examples/insights/v1beta1/monitordatacollectionendpoint.yaml similarity index 100% rename from examples/insights/monitordatacollectionendpoint.yaml rename to examples/insights/v1beta1/monitordatacollectionendpoint.yaml diff --git a/examples/insights/monitordatacollectionrule.yaml b/examples/insights/v1beta1/monitordatacollectionrule.yaml similarity index 100% rename from examples/insights/monitordatacollectionrule.yaml rename to examples/insights/v1beta1/monitordatacollectionrule.yaml diff --git a/examples/insights/monitordatacollectionruleassociation.yaml b/examples/insights/v1beta1/monitordatacollectionruleassociation.yaml similarity index 100% rename from examples/insights/monitordatacollectionruleassociation.yaml rename to examples/insights/v1beta1/monitordatacollectionruleassociation.yaml diff --git a/examples/insights/monitordiagnosticsetting.yaml b/examples/insights/v1beta1/monitordiagnosticsetting.yaml similarity index 100% rename from examples/insights/monitordiagnosticsetting.yaml rename to examples/insights/v1beta1/monitordiagnosticsetting.yaml diff --git a/examples/insights/monitorscheduledqueryrulesalert.yaml b/examples/insights/v1beta1/monitorscheduledqueryrulesalert.yaml similarity index 100% rename from examples/insights/monitorscheduledqueryrulesalert.yaml rename to examples/insights/v1beta1/monitorscheduledqueryrulesalert.yaml diff --git a/examples/insights/monitorscheduledqueryrulesalertv2.yaml b/examples/insights/v1beta1/monitorscheduledqueryrulesalertv2.yaml similarity index 100% rename from examples/insights/monitorscheduledqueryrulesalertv2.yaml rename to examples/insights/v1beta1/monitorscheduledqueryrulesalertv2.yaml diff --git a/examples/insights/monitorscheduledqueryruleslog.yaml b/examples/insights/v1beta1/monitorscheduledqueryruleslog.yaml similarity index 100% rename from examples/insights/monitorscheduledqueryruleslog.yaml rename to examples/insights/v1beta1/monitorscheduledqueryruleslog.yaml diff --git a/examples/iotcentral/application.yaml b/examples/iotcentral/v1beta1/application.yaml similarity index 100% rename from examples/iotcentral/application.yaml rename to examples/iotcentral/v1beta1/application.yaml diff --git a/examples/iotcentral/applicationnetworkruleset.yaml b/examples/iotcentral/v1beta1/applicationnetworkruleset.yaml similarity index 100% rename from examples/iotcentral/applicationnetworkruleset.yaml rename to examples/iotcentral/v1beta1/applicationnetworkruleset.yaml diff --git a/examples/keyvault/certificate.yaml b/examples/keyvault/v1beta1/certificate.yaml similarity index 100% rename from examples/keyvault/certificate.yaml rename to examples/keyvault/v1beta1/certificate.yaml diff --git a/examples/keyvault/certificatecontacts.yaml b/examples/keyvault/v1beta1/certificatecontacts.yaml similarity index 100% rename from examples/keyvault/certificatecontacts.yaml rename to examples/keyvault/v1beta1/certificatecontacts.yaml diff --git a/examples/keyvault/key.yaml b/examples/keyvault/v1beta1/key.yaml similarity index 100% rename from examples/keyvault/key.yaml rename to examples/keyvault/v1beta1/key.yaml diff --git a/examples/keyvault/keyvault-all-in-one.yaml b/examples/keyvault/v1beta1/keyvault-all-in-one.yaml similarity index 100% rename from examples/keyvault/keyvault-all-in-one.yaml rename to examples/keyvault/v1beta1/keyvault-all-in-one.yaml diff --git a/examples/keyvault/secret.yaml b/examples/keyvault/v1beta1/secret.yaml similarity index 100% rename from examples/keyvault/secret.yaml rename to examples/keyvault/v1beta1/secret.yaml diff --git a/examples/keyvault/vault.yaml b/examples/keyvault/v1beta1/vault.yaml similarity index 100% rename from examples/keyvault/vault.yaml rename to examples/keyvault/v1beta1/vault.yaml diff --git a/examples/keyvault/v1beta2/key.yaml b/examples/keyvault/v1beta2/key.yaml new file mode 100644 index 000000000..7f9a81494 --- /dev/null +++ b/examples/keyvault/v1beta2/key.yaml @@ -0,0 +1,116 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Key +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/key + labels: + testing.upbound.io/example-name: example + name: uptest-${Rand.RFC1123Subdomain} +spec: + forProvider: + keyOpts: + - decrypt + - encrypt + - sign + - unwrapKey + - verify + - wrapKey + keySize: 2048 + keyType: RSA + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + name: custom-Non-RFC1123Name + rotationPolicy: + automatic: + timeBeforeExpiry: P30D + expireAfter: P90D + notifyBeforeExpiry: P29D + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta1 +kind: AccessPolicy +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/key + labels: + testing.upbound.io/example-name: example + name: example-${Rand.RFC1123Subdomain} +spec: + forProvider: + certificatePermissions: + - Create + - Delete + - DeleteIssuers + - Get + - GetIssuers + - SetIssuers + - Import + - List + - ListIssuers + - ManageContacts + - ManageIssuers + - Purge + - SetIssuers + - Update + - Recover + keyPermissions: + - Get + - UnwrapKey + - WrapKey + - Create + - Delete + - Recover + - List + - Update + - GetRotationPolicy + - SetRotationPolicy + keyVaultIdSelector: + matchLabels: + testing.upbound.io/example-name: example + objectId: ${data.azurerm_client_config.service_principal.object_id} + secretPermissions: + - Get + - Set + - Delete + - Recover + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: keyvault.azure.upbound.io/v1beta2 +kind: Vault +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta2/key + labels: + testing.upbound.io/example-name: example + name: uptest-${Rand.RFC1123Subdomain} +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + skuName: premium + softDeleteRetentionDays: 7 + tenantId: ${data.azurerm_client_config.current.tenant_id} + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: keyvault/v1beta1/key + labels: + testing.upbound.io/example-name: example + name: uptest-${Rand.RFC1123Subdomain} +spec: + forProvider: + location: West Europe diff --git a/examples/kusto/attacheddatabaseconfiguration.yaml b/examples/kusto/v1beta1/attacheddatabaseconfiguration.yaml similarity index 100% rename from examples/kusto/attacheddatabaseconfiguration.yaml rename to examples/kusto/v1beta1/attacheddatabaseconfiguration.yaml diff --git a/examples/kusto/cluster.yaml b/examples/kusto/v1beta1/cluster.yaml similarity index 100% rename from examples/kusto/cluster.yaml rename to examples/kusto/v1beta1/cluster.yaml diff --git a/examples/kusto/clustermanagedprivateendpoint.yaml b/examples/kusto/v1beta1/clustermanagedprivateendpoint.yaml similarity index 100% rename from examples/kusto/clustermanagedprivateendpoint.yaml rename to examples/kusto/v1beta1/clustermanagedprivateendpoint.yaml diff --git a/examples/kusto/clusterprincipalassignment.yaml b/examples/kusto/v1beta1/clusterprincipalassignment.yaml similarity index 100% rename from examples/kusto/clusterprincipalassignment.yaml rename to examples/kusto/v1beta1/clusterprincipalassignment.yaml diff --git a/examples/kusto/database.yaml b/examples/kusto/v1beta1/database.yaml similarity index 100% rename from examples/kusto/database.yaml rename to examples/kusto/v1beta1/database.yaml diff --git a/examples/kusto/databaseprincipalassignment.yaml b/examples/kusto/v1beta1/databaseprincipalassignment.yaml similarity index 100% rename from examples/kusto/databaseprincipalassignment.yaml rename to examples/kusto/v1beta1/databaseprincipalassignment.yaml diff --git a/examples/kusto/eventgriddataconnection.yaml b/examples/kusto/v1beta1/eventgriddataconnection.yaml similarity index 100% rename from examples/kusto/eventgriddataconnection.yaml rename to examples/kusto/v1beta1/eventgriddataconnection.yaml diff --git a/examples/kusto/eventhubdataconnection.yaml b/examples/kusto/v1beta1/eventhubdataconnection.yaml similarity index 100% rename from examples/kusto/eventhubdataconnection.yaml rename to examples/kusto/v1beta1/eventhubdataconnection.yaml diff --git a/examples/kusto/iothubdataconnection.yaml b/examples/kusto/v1beta1/iothubdataconnection.yaml similarity index 100% rename from examples/kusto/iothubdataconnection.yaml rename to examples/kusto/v1beta1/iothubdataconnection.yaml diff --git a/examples/labservices/labserviceplan.yaml b/examples/labservices/v1beta1/labserviceplan.yaml similarity index 100% rename from examples/labservices/labserviceplan.yaml rename to examples/labservices/v1beta1/labserviceplan.yaml diff --git a/examples/labservices/labserviceslab.yaml b/examples/labservices/v1beta1/labserviceslab.yaml similarity index 100% rename from examples/labservices/labserviceslab.yaml rename to examples/labservices/v1beta1/labserviceslab.yaml diff --git a/examples/loadtestservice/loadtest.yaml b/examples/loadtestservice/v1beta1/loadtest.yaml similarity index 100% rename from examples/loadtestservice/loadtest.yaml rename to examples/loadtestservice/v1beta1/loadtest.yaml diff --git a/examples/logic/appactioncustom.yaml b/examples/logic/v1beta1/appactioncustom.yaml similarity index 100% rename from examples/logic/appactioncustom.yaml rename to examples/logic/v1beta1/appactioncustom.yaml diff --git a/examples/logic/appactionhttp.yaml b/examples/logic/v1beta1/appactionhttp.yaml similarity index 100% rename from examples/logic/appactionhttp.yaml rename to examples/logic/v1beta1/appactionhttp.yaml diff --git a/examples/logic/appintegrationaccount.yaml b/examples/logic/v1beta1/appintegrationaccount.yaml similarity index 100% rename from examples/logic/appintegrationaccount.yaml rename to examples/logic/v1beta1/appintegrationaccount.yaml diff --git a/examples/logic/appintegrationaccountbatchconfiguration.yaml b/examples/logic/v1beta1/appintegrationaccountbatchconfiguration.yaml similarity index 100% rename from examples/logic/appintegrationaccountbatchconfiguration.yaml rename to examples/logic/v1beta1/appintegrationaccountbatchconfiguration.yaml diff --git a/examples/logic/appintegrationaccountpartner.yaml b/examples/logic/v1beta1/appintegrationaccountpartner.yaml similarity index 100% rename from examples/logic/appintegrationaccountpartner.yaml rename to examples/logic/v1beta1/appintegrationaccountpartner.yaml diff --git a/examples/logic/appintegrationaccountschema.yaml b/examples/logic/v1beta1/appintegrationaccountschema.yaml similarity index 100% rename from examples/logic/appintegrationaccountschema.yaml rename to examples/logic/v1beta1/appintegrationaccountschema.yaml diff --git a/examples/logic/appintegrationaccountsession.yaml b/examples/logic/v1beta1/appintegrationaccountsession.yaml similarity index 100% rename from examples/logic/appintegrationaccountsession.yaml rename to examples/logic/v1beta1/appintegrationaccountsession.yaml diff --git a/examples/logic/apptriggercustom.yaml b/examples/logic/v1beta1/apptriggercustom.yaml similarity index 100% rename from examples/logic/apptriggercustom.yaml rename to examples/logic/v1beta1/apptriggercustom.yaml diff --git a/examples/logic/apptriggerhttprequest.yaml b/examples/logic/v1beta1/apptriggerhttprequest.yaml similarity index 100% rename from examples/logic/apptriggerhttprequest.yaml rename to examples/logic/v1beta1/apptriggerhttprequest.yaml diff --git a/examples/logic/apptriggerrecurrence.yaml b/examples/logic/v1beta1/apptriggerrecurrence.yaml similarity index 100% rename from examples/logic/apptriggerrecurrence.yaml rename to examples/logic/v1beta1/apptriggerrecurrence.yaml diff --git a/examples/logic/appworkflow.yaml b/examples/logic/v1beta1/appworkflow.yaml similarity index 100% rename from examples/logic/appworkflow.yaml rename to examples/logic/v1beta1/appworkflow.yaml diff --git a/examples/logic/integrationserviceenvironment.yaml b/examples/logic/v1beta1/integrationserviceenvironment.yaml similarity index 100% rename from examples/logic/integrationserviceenvironment.yaml rename to examples/logic/v1beta1/integrationserviceenvironment.yaml diff --git a/examples/logz/monitor.yaml b/examples/logz/v1beta1/monitor.yaml similarity index 100% rename from examples/logz/monitor.yaml rename to examples/logz/v1beta1/monitor.yaml diff --git a/examples/logz/subaccount.yaml b/examples/logz/v1beta1/subaccount.yaml similarity index 100% rename from examples/logz/subaccount.yaml rename to examples/logz/v1beta1/subaccount.yaml diff --git a/examples/logz/subaccounttagrule.yaml b/examples/logz/v1beta1/subaccounttagrule.yaml similarity index 100% rename from examples/logz/subaccounttagrule.yaml rename to examples/logz/v1beta1/subaccounttagrule.yaml diff --git a/examples/logz/tagrule.yaml b/examples/logz/v1beta1/tagrule.yaml similarity index 100% rename from examples/logz/tagrule.yaml rename to examples/logz/v1beta1/tagrule.yaml diff --git a/examples/machinelearningservices/computecluster.yaml b/examples/machinelearningservices/v1beta1/computecluster.yaml similarity index 100% rename from examples/machinelearningservices/computecluster.yaml rename to examples/machinelearningservices/v1beta1/computecluster.yaml diff --git a/examples/machinelearningservices/computeinstance.yaml b/examples/machinelearningservices/v1beta1/computeinstance.yaml similarity index 100% rename from examples/machinelearningservices/computeinstance.yaml rename to examples/machinelearningservices/v1beta1/computeinstance.yaml diff --git a/examples/machinelearningservices/synapsespark.yaml b/examples/machinelearningservices/v1beta1/synapsespark.yaml similarity index 100% rename from examples/machinelearningservices/synapsespark.yaml rename to examples/machinelearningservices/v1beta1/synapsespark.yaml diff --git a/examples/machinelearningservices/workspace.yaml b/examples/machinelearningservices/v1beta1/workspace.yaml similarity index 100% rename from examples/machinelearningservices/workspace.yaml rename to examples/machinelearningservices/v1beta1/workspace.yaml diff --git a/examples/maintenance/maintenanceassignmentdedicatedhost.yaml b/examples/maintenance/v1beta1/maintenanceassignmentdedicatedhost.yaml similarity index 100% rename from examples/maintenance/maintenanceassignmentdedicatedhost.yaml rename to examples/maintenance/v1beta1/maintenanceassignmentdedicatedhost.yaml diff --git a/examples/maintenance/maintenanceassignmentvirtualmachine.yaml b/examples/maintenance/v1beta1/maintenanceassignmentvirtualmachine.yaml similarity index 100% rename from examples/maintenance/maintenanceassignmentvirtualmachine.yaml rename to examples/maintenance/v1beta1/maintenanceassignmentvirtualmachine.yaml diff --git a/examples/maintenance/maintenanceconfiguration.yaml b/examples/maintenance/v1beta1/maintenanceconfiguration.yaml similarity index 100% rename from examples/maintenance/maintenanceconfiguration.yaml rename to examples/maintenance/v1beta1/maintenanceconfiguration.yaml diff --git a/examples/managedidentity/federatedidentitycredential.yaml b/examples/managedidentity/v1beta1/federatedidentitycredential.yaml similarity index 100% rename from examples/managedidentity/federatedidentitycredential.yaml rename to examples/managedidentity/v1beta1/federatedidentitycredential.yaml diff --git a/examples/managedidentity/userassignedidentity.yaml b/examples/managedidentity/v1beta1/userassignedidentity.yaml similarity index 100% rename from examples/managedidentity/userassignedidentity.yaml rename to examples/managedidentity/v1beta1/userassignedidentity.yaml diff --git a/examples/management/managementgroup.yaml b/examples/management/v1beta1/managementgroup.yaml similarity index 100% rename from examples/management/managementgroup.yaml rename to examples/management/v1beta1/managementgroup.yaml diff --git a/examples/management/managementgroupsubscriptionassociation.yaml b/examples/management/v1beta1/managementgroupsubscriptionassociation.yaml similarity index 91% rename from examples/management/managementgroupsubscriptionassociation.yaml rename to examples/management/v1beta1/managementgroupsubscriptionassociation.yaml index 15c0b15c6..256b34f23 100644 --- a/examples/management/managementgroupsubscriptionassociation.yaml +++ b/examples/management/v1beta1/managementgroupsubscriptionassociation.yaml @@ -22,7 +22,7 @@ kind: ManagementGroup metadata: annotations: meta.upbound.io/example-id: management/v1beta1/managementgroupsubscriptionassociation - uptest.upbound.io/pre-delete-hook: testhooks/delete-management-group-association.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-management-group-association.sh labels: testing.upbound.io/example-name: example_sub name: example-sub diff --git a/examples/maps/account.yaml b/examples/maps/v1beta1/account.yaml similarity index 100% rename from examples/maps/account.yaml rename to examples/maps/v1beta1/account.yaml diff --git a/examples/maps/creator.yaml b/examples/maps/v1beta1/creator.yaml similarity index 100% rename from examples/maps/creator.yaml rename to examples/maps/v1beta1/creator.yaml diff --git a/examples/media/asset.yaml b/examples/media/v1beta1/asset.yaml similarity index 100% rename from examples/media/asset.yaml rename to examples/media/v1beta1/asset.yaml diff --git a/examples/media/assetfilter.yaml b/examples/media/v1beta1/assetfilter.yaml similarity index 100% rename from examples/media/assetfilter.yaml rename to examples/media/v1beta1/assetfilter.yaml diff --git a/examples/media/contentkeypolicy.yaml b/examples/media/v1beta1/contentkeypolicy.yaml similarity index 100% rename from examples/media/contentkeypolicy.yaml rename to examples/media/v1beta1/contentkeypolicy.yaml diff --git a/examples/media/job.yaml b/examples/media/v1beta1/job.yaml similarity index 100% rename from examples/media/job.yaml rename to examples/media/v1beta1/job.yaml diff --git a/examples/media/liveevent.yaml b/examples/media/v1beta1/liveevent.yaml similarity index 100% rename from examples/media/liveevent.yaml rename to examples/media/v1beta1/liveevent.yaml diff --git a/examples/media/liveeventoutput.yaml b/examples/media/v1beta1/liveeventoutput.yaml similarity index 100% rename from examples/media/liveeventoutput.yaml rename to examples/media/v1beta1/liveeventoutput.yaml diff --git a/examples/media/serviceaccountfilter.yaml b/examples/media/v1beta1/serviceaccountfilter.yaml similarity index 100% rename from examples/media/serviceaccountfilter.yaml rename to examples/media/v1beta1/serviceaccountfilter.yaml diff --git a/examples/media/servicesaccount.yaml b/examples/media/v1beta1/servicesaccount.yaml similarity index 100% rename from examples/media/servicesaccount.yaml rename to examples/media/v1beta1/servicesaccount.yaml diff --git a/examples/media/streamingendpoint.yaml b/examples/media/v1beta1/streamingendpoint.yaml similarity index 100% rename from examples/media/streamingendpoint.yaml rename to examples/media/v1beta1/streamingendpoint.yaml diff --git a/examples/media/streaminglocator.yaml b/examples/media/v1beta1/streaminglocator.yaml similarity index 100% rename from examples/media/streaminglocator.yaml rename to examples/media/v1beta1/streaminglocator.yaml diff --git a/examples/media/streamingpolicy.yaml b/examples/media/v1beta1/streamingpolicy.yaml similarity index 100% rename from examples/media/streamingpolicy.yaml rename to examples/media/v1beta1/streamingpolicy.yaml diff --git a/examples/media/transform.yaml b/examples/media/v1beta1/transform.yaml similarity index 100% rename from examples/media/transform.yaml rename to examples/media/v1beta1/transform.yaml diff --git a/examples/mixedreality/spatialanchorsaccount.yaml b/examples/mixedreality/v1beta1/spatialanchorsaccount.yaml similarity index 100% rename from examples/mixedreality/spatialanchorsaccount.yaml rename to examples/mixedreality/v1beta1/spatialanchorsaccount.yaml diff --git a/examples/netapp/account.yaml b/examples/netapp/v1beta1/account.yaml similarity index 100% rename from examples/netapp/account.yaml rename to examples/netapp/v1beta1/account.yaml diff --git a/examples/netapp/pool.yaml b/examples/netapp/v1beta1/pool.yaml similarity index 100% rename from examples/netapp/pool.yaml rename to examples/netapp/v1beta1/pool.yaml diff --git a/examples/netapp/snapshot.yaml b/examples/netapp/v1beta1/snapshot.yaml similarity index 100% rename from examples/netapp/snapshot.yaml rename to examples/netapp/v1beta1/snapshot.yaml diff --git a/examples/netapp/snapshotpolicy.yaml b/examples/netapp/v1beta1/snapshotpolicy.yaml similarity index 100% rename from examples/netapp/snapshotpolicy.yaml rename to examples/netapp/v1beta1/snapshotpolicy.yaml diff --git a/examples/netapp/volume.yaml b/examples/netapp/v1beta1/volume.yaml similarity index 100% rename from examples/netapp/volume.yaml rename to examples/netapp/v1beta1/volume.yaml diff --git a/examples/network/applicationgateway.yaml b/examples/network/v1beta1/applicationgateway.yaml similarity index 100% rename from examples/network/applicationgateway.yaml rename to examples/network/v1beta1/applicationgateway.yaml diff --git a/examples/network/applicationsecuritygroup.yaml b/examples/network/v1beta1/applicationsecuritygroup.yaml similarity index 100% rename from examples/network/applicationsecuritygroup.yaml rename to examples/network/v1beta1/applicationsecuritygroup.yaml diff --git a/examples/network/connectionmonitor.yaml b/examples/network/v1beta1/connectionmonitor.yaml similarity index 100% rename from examples/network/connectionmonitor.yaml rename to examples/network/v1beta1/connectionmonitor.yaml diff --git a/examples/network/ddosprotectionplan.yaml b/examples/network/v1beta1/ddosprotectionplan.yaml similarity index 100% rename from examples/network/ddosprotectionplan.yaml rename to examples/network/v1beta1/ddosprotectionplan.yaml diff --git a/examples/network/dns-all-in-one.yaml b/examples/network/v1beta1/dns-all-in-one.yaml similarity index 100% rename from examples/network/dns-all-in-one.yaml rename to examples/network/v1beta1/dns-all-in-one.yaml diff --git a/examples/network/dnsaaaarecord.yaml b/examples/network/v1beta1/dnsaaaarecord.yaml similarity index 100% rename from examples/network/dnsaaaarecord.yaml rename to examples/network/v1beta1/dnsaaaarecord.yaml diff --git a/examples/network/dnsarecord.yaml b/examples/network/v1beta1/dnsarecord.yaml similarity index 100% rename from examples/network/dnsarecord.yaml rename to examples/network/v1beta1/dnsarecord.yaml diff --git a/examples/network/dnscaarecord.yaml b/examples/network/v1beta1/dnscaarecord.yaml similarity index 100% rename from examples/network/dnscaarecord.yaml rename to examples/network/v1beta1/dnscaarecord.yaml diff --git a/examples/network/dnscnamerecord.yaml b/examples/network/v1beta1/dnscnamerecord.yaml similarity index 100% rename from examples/network/dnscnamerecord.yaml rename to examples/network/v1beta1/dnscnamerecord.yaml diff --git a/examples/network/dnsmxrecord.yaml b/examples/network/v1beta1/dnsmxrecord.yaml similarity index 100% rename from examples/network/dnsmxrecord.yaml rename to examples/network/v1beta1/dnsmxrecord.yaml diff --git a/examples/network/dnsnsrecord.yaml b/examples/network/v1beta1/dnsnsrecord.yaml similarity index 100% rename from examples/network/dnsnsrecord.yaml rename to examples/network/v1beta1/dnsnsrecord.yaml diff --git a/examples/network/dnsptrrecord.yaml b/examples/network/v1beta1/dnsptrrecord.yaml similarity index 100% rename from examples/network/dnsptrrecord.yaml rename to examples/network/v1beta1/dnsptrrecord.yaml diff --git a/examples/network/dnssrvrecord.yaml b/examples/network/v1beta1/dnssrvrecord.yaml similarity index 100% rename from examples/network/dnssrvrecord.yaml rename to examples/network/v1beta1/dnssrvrecord.yaml diff --git a/examples/network/dnstxtrecord.yaml b/examples/network/v1beta1/dnstxtrecord.yaml similarity index 100% rename from examples/network/dnstxtrecord.yaml rename to examples/network/v1beta1/dnstxtrecord.yaml diff --git a/examples/network/dnszone.yaml b/examples/network/v1beta1/dnszone.yaml similarity index 100% rename from examples/network/dnszone.yaml rename to examples/network/v1beta1/dnszone.yaml diff --git a/examples/network/expressroute-all-in-one.yaml b/examples/network/v1beta1/expressroute-all-in-one.yaml similarity index 100% rename from examples/network/expressroute-all-in-one.yaml rename to examples/network/v1beta1/expressroute-all-in-one.yaml diff --git a/examples/network/expressroutecircuit.yaml b/examples/network/v1beta1/expressroutecircuit.yaml similarity index 100% rename from examples/network/expressroutecircuit.yaml rename to examples/network/v1beta1/expressroutecircuit.yaml diff --git a/examples/network/expressroutecircuitauthorization.yaml b/examples/network/v1beta1/expressroutecircuitauthorization.yaml similarity index 100% rename from examples/network/expressroutecircuitauthorization.yaml rename to examples/network/v1beta1/expressroutecircuitauthorization.yaml diff --git a/examples/network/expressroutecircuitconnection.yaml b/examples/network/v1beta1/expressroutecircuitconnection.yaml similarity index 100% rename from examples/network/expressroutecircuitconnection.yaml rename to examples/network/v1beta1/expressroutecircuitconnection.yaml diff --git a/examples/network/expressroutecircuitpeering.yaml b/examples/network/v1beta1/expressroutecircuitpeering.yaml similarity index 100% rename from examples/network/expressroutecircuitpeering.yaml rename to examples/network/v1beta1/expressroutecircuitpeering.yaml diff --git a/examples/network/expressrouteconnection.yaml b/examples/network/v1beta1/expressrouteconnection.yaml similarity index 100% rename from examples/network/expressrouteconnection.yaml rename to examples/network/v1beta1/expressrouteconnection.yaml diff --git a/examples/network/expressroutegateway.yaml b/examples/network/v1beta1/expressroutegateway.yaml similarity index 100% rename from examples/network/expressroutegateway.yaml rename to examples/network/v1beta1/expressroutegateway.yaml diff --git a/examples/network/expressrouteport.yaml b/examples/network/v1beta1/expressrouteport.yaml similarity index 100% rename from examples/network/expressrouteport.yaml rename to examples/network/v1beta1/expressrouteport.yaml diff --git a/examples/network/firewall-all-in-one.yaml b/examples/network/v1beta1/firewall-all-in-one.yaml similarity index 100% rename from examples/network/firewall-all-in-one.yaml rename to examples/network/v1beta1/firewall-all-in-one.yaml diff --git a/examples/network/firewall.yaml b/examples/network/v1beta1/firewall.yaml similarity index 100% rename from examples/network/firewall.yaml rename to examples/network/v1beta1/firewall.yaml diff --git a/examples/network/firewallapplicationrulecollection.yaml b/examples/network/v1beta1/firewallapplicationrulecollection.yaml similarity index 100% rename from examples/network/firewallapplicationrulecollection.yaml rename to examples/network/v1beta1/firewallapplicationrulecollection.yaml diff --git a/examples/network/firewallnatrulecollection.yaml b/examples/network/v1beta1/firewallnatrulecollection.yaml similarity index 100% rename from examples/network/firewallnatrulecollection.yaml rename to examples/network/v1beta1/firewallnatrulecollection.yaml diff --git a/examples/network/firewallnetworkrulecollection.yaml b/examples/network/v1beta1/firewallnetworkrulecollection.yaml similarity index 100% rename from examples/network/firewallnetworkrulecollection.yaml rename to examples/network/v1beta1/firewallnetworkrulecollection.yaml diff --git a/examples/network/firewallpolicy.yaml b/examples/network/v1beta1/firewallpolicy.yaml similarity index 100% rename from examples/network/firewallpolicy.yaml rename to examples/network/v1beta1/firewallpolicy.yaml diff --git a/examples/network/firewallpolicyrulecollectiongroup.yaml b/examples/network/v1beta1/firewallpolicyrulecollectiongroup.yaml similarity index 100% rename from examples/network/firewallpolicyrulecollectiongroup.yaml rename to examples/network/v1beta1/firewallpolicyrulecollectiongroup.yaml diff --git a/examples/network/frontdoor-all-in-one.yaml b/examples/network/v1beta1/frontdoor-all-in-one.yaml similarity index 100% rename from examples/network/frontdoor-all-in-one.yaml rename to examples/network/v1beta1/frontdoor-all-in-one.yaml diff --git a/examples/network/ipgroup.yaml b/examples/network/v1beta1/ipgroup.yaml similarity index 100% rename from examples/network/ipgroup.yaml rename to examples/network/v1beta1/ipgroup.yaml diff --git a/examples/network/loadbalancer-all-in-one.yaml b/examples/network/v1beta1/loadbalancer-all-in-one.yaml similarity index 100% rename from examples/network/loadbalancer-all-in-one.yaml rename to examples/network/v1beta1/loadbalancer-all-in-one.yaml diff --git a/examples/network/loadbalancer.yaml b/examples/network/v1beta1/loadbalancer.yaml similarity index 100% rename from examples/network/loadbalancer.yaml rename to examples/network/v1beta1/loadbalancer.yaml diff --git a/examples/network/loadbalancerbackendaddresspool.yaml b/examples/network/v1beta1/loadbalancerbackendaddresspool.yaml similarity index 100% rename from examples/network/loadbalancerbackendaddresspool.yaml rename to examples/network/v1beta1/loadbalancerbackendaddresspool.yaml diff --git a/examples/network/loadbalancerbackendaddresspooladdress.yaml b/examples/network/v1beta1/loadbalancerbackendaddresspooladdress.yaml similarity index 100% rename from examples/network/loadbalancerbackendaddresspooladdress.yaml rename to examples/network/v1beta1/loadbalancerbackendaddresspooladdress.yaml diff --git a/examples/network/loadbalancernatpool.yaml b/examples/network/v1beta1/loadbalancernatpool.yaml similarity index 100% rename from examples/network/loadbalancernatpool.yaml rename to examples/network/v1beta1/loadbalancernatpool.yaml diff --git a/examples/network/loadbalancernatrule.yaml b/examples/network/v1beta1/loadbalancernatrule.yaml similarity index 100% rename from examples/network/loadbalancernatrule.yaml rename to examples/network/v1beta1/loadbalancernatrule.yaml diff --git a/examples/network/loadbalanceroutboundrule.yaml b/examples/network/v1beta1/loadbalanceroutboundrule.yaml similarity index 100% rename from examples/network/loadbalanceroutboundrule.yaml rename to examples/network/v1beta1/loadbalanceroutboundrule.yaml diff --git a/examples/network/loadbalancerprobe.yaml b/examples/network/v1beta1/loadbalancerprobe.yaml similarity index 100% rename from examples/network/loadbalancerprobe.yaml rename to examples/network/v1beta1/loadbalancerprobe.yaml diff --git a/examples/network/loadbalancerrule.yaml b/examples/network/v1beta1/loadbalancerrule.yaml similarity index 100% rename from examples/network/loadbalancerrule.yaml rename to examples/network/v1beta1/loadbalancerrule.yaml diff --git a/examples/network/localnetworkgateway.yaml b/examples/network/v1beta1/localnetworkgateway.yaml similarity index 100% rename from examples/network/localnetworkgateway.yaml rename to examples/network/v1beta1/localnetworkgateway.yaml diff --git a/examples/network/manager.yaml b/examples/network/v1beta1/manager.yaml similarity index 100% rename from examples/network/manager.yaml rename to examples/network/v1beta1/manager.yaml diff --git a/examples/network/managermanagementgroupconnection.yaml b/examples/network/v1beta1/managermanagementgroupconnection.yaml similarity index 100% rename from examples/network/managermanagementgroupconnection.yaml rename to examples/network/v1beta1/managermanagementgroupconnection.yaml diff --git a/examples/network/managernetworkgroup.yaml b/examples/network/v1beta1/managernetworkgroup.yaml similarity index 100% rename from examples/network/managernetworkgroup.yaml rename to examples/network/v1beta1/managernetworkgroup.yaml diff --git a/examples/network/managerstaticmember.yaml b/examples/network/v1beta1/managerstaticmember.yaml similarity index 100% rename from examples/network/managerstaticmember.yaml rename to examples/network/v1beta1/managerstaticmember.yaml diff --git a/examples/network/managersubscriptionconnection.yaml b/examples/network/v1beta1/managersubscriptionconnection.yaml similarity index 100% rename from examples/network/managersubscriptionconnection.yaml rename to examples/network/v1beta1/managersubscriptionconnection.yaml diff --git a/examples/network/natgateway-all-in-one.yaml b/examples/network/v1beta1/natgateway-all-in-one.yaml similarity index 100% rename from examples/network/natgateway-all-in-one.yaml rename to examples/network/v1beta1/natgateway-all-in-one.yaml diff --git a/examples/network/natgateway.yaml b/examples/network/v1beta1/natgateway.yaml similarity index 100% rename from examples/network/natgateway.yaml rename to examples/network/v1beta1/natgateway.yaml diff --git a/examples/network/natgatewaypublicipassociation.yaml b/examples/network/v1beta1/natgatewaypublicipassociation.yaml similarity index 100% rename from examples/network/natgatewaypublicipassociation.yaml rename to examples/network/v1beta1/natgatewaypublicipassociation.yaml diff --git a/examples/network/natgatewaypublicipprefixassociation.yaml b/examples/network/v1beta1/natgatewaypublicipprefixassociation.yaml similarity index 100% rename from examples/network/natgatewaypublicipprefixassociation.yaml rename to examples/network/v1beta1/natgatewaypublicipprefixassociation.yaml diff --git a/examples/network/networkinterface-all-in-one.yaml b/examples/network/v1beta1/networkinterface-all-in-one.yaml similarity index 100% rename from examples/network/networkinterface-all-in-one.yaml rename to examples/network/v1beta1/networkinterface-all-in-one.yaml diff --git a/examples/network/networkinterface-publicip.yaml b/examples/network/v1beta1/networkinterface-publicip.yaml similarity index 100% rename from examples/network/networkinterface-publicip.yaml rename to examples/network/v1beta1/networkinterface-publicip.yaml diff --git a/examples/network/networkinterface.yaml b/examples/network/v1beta1/networkinterface.yaml similarity index 100% rename from examples/network/networkinterface.yaml rename to examples/network/v1beta1/networkinterface.yaml diff --git a/examples/network/networkinterfaceapplicationsecuritygroupassociation.yaml b/examples/network/v1beta1/networkinterfaceapplicationsecuritygroupassociation.yaml similarity index 100% rename from examples/network/networkinterfaceapplicationsecuritygroupassociation.yaml rename to examples/network/v1beta1/networkinterfaceapplicationsecuritygroupassociation.yaml diff --git a/examples/network/networkinterfacebackendaddresspoolassociation.yaml b/examples/network/v1beta1/networkinterfacebackendaddresspoolassociation.yaml similarity index 100% rename from examples/network/networkinterfacebackendaddresspoolassociation.yaml rename to examples/network/v1beta1/networkinterfacebackendaddresspoolassociation.yaml diff --git a/examples/network/networkinterfacenatruleassociation.yaml b/examples/network/v1beta1/networkinterfacenatruleassociation.yaml similarity index 100% rename from examples/network/networkinterfacenatruleassociation.yaml rename to examples/network/v1beta1/networkinterfacenatruleassociation.yaml diff --git a/examples/network/networkinterfacesecuritygroupassociation.yaml b/examples/network/v1beta1/networkinterfacesecuritygroupassociation.yaml similarity index 100% rename from examples/network/networkinterfacesecuritygroupassociation.yaml rename to examples/network/v1beta1/networkinterfacesecuritygroupassociation.yaml diff --git a/examples/network/packetcapture.yaml b/examples/network/v1beta1/packetcapture.yaml similarity index 100% rename from examples/network/packetcapture.yaml rename to examples/network/v1beta1/packetcapture.yaml diff --git a/examples/network/pointtositevpngateway.yaml b/examples/network/v1beta1/pointtositevpngateway.yaml similarity index 100% rename from examples/network/pointtositevpngateway.yaml rename to examples/network/v1beta1/pointtositevpngateway.yaml diff --git a/examples/network/privatedns-all-in-one.yaml b/examples/network/v1beta1/privatedns-all-in-one.yaml similarity index 100% rename from examples/network/privatedns-all-in-one.yaml rename to examples/network/v1beta1/privatedns-all-in-one.yaml diff --git a/examples/network/privatednsaaaarecord.yaml b/examples/network/v1beta1/privatednsaaaarecord.yaml similarity index 100% rename from examples/network/privatednsaaaarecord.yaml rename to examples/network/v1beta1/privatednsaaaarecord.yaml diff --git a/examples/network/privatednsarecord.yaml b/examples/network/v1beta1/privatednsarecord.yaml similarity index 100% rename from examples/network/privatednsarecord.yaml rename to examples/network/v1beta1/privatednsarecord.yaml diff --git a/examples/network/privatednscnamerecord.yaml b/examples/network/v1beta1/privatednscnamerecord.yaml similarity index 100% rename from examples/network/privatednscnamerecord.yaml rename to examples/network/v1beta1/privatednscnamerecord.yaml diff --git a/examples/network/privatednsmxrecord.yaml b/examples/network/v1beta1/privatednsmxrecord.yaml similarity index 100% rename from examples/network/privatednsmxrecord.yaml rename to examples/network/v1beta1/privatednsmxrecord.yaml diff --git a/examples/network/privatednsptrrecord.yaml b/examples/network/v1beta1/privatednsptrrecord.yaml similarity index 100% rename from examples/network/privatednsptrrecord.yaml rename to examples/network/v1beta1/privatednsptrrecord.yaml diff --git a/examples/network/privatednsresolver.yaml b/examples/network/v1beta1/privatednsresolver.yaml similarity index 100% rename from examples/network/privatednsresolver.yaml rename to examples/network/v1beta1/privatednsresolver.yaml diff --git a/examples/network/privatednssrvrecord.yaml b/examples/network/v1beta1/privatednssrvrecord.yaml similarity index 100% rename from examples/network/privatednssrvrecord.yaml rename to examples/network/v1beta1/privatednssrvrecord.yaml diff --git a/examples/network/privatednstxtrecord.yaml b/examples/network/v1beta1/privatednstxtrecord.yaml similarity index 100% rename from examples/network/privatednstxtrecord.yaml rename to examples/network/v1beta1/privatednstxtrecord.yaml diff --git a/examples/network/privatednszone.yaml b/examples/network/v1beta1/privatednszone.yaml similarity index 100% rename from examples/network/privatednszone.yaml rename to examples/network/v1beta1/privatednszone.yaml diff --git a/examples/network/privatednszonevirtualnetworklink.yaml b/examples/network/v1beta1/privatednszonevirtualnetworklink.yaml similarity index 100% rename from examples/network/privatednszonevirtualnetworklink.yaml rename to examples/network/v1beta1/privatednszonevirtualnetworklink.yaml diff --git a/examples/network/privateendpoint.yaml b/examples/network/v1beta1/privateendpoint.yaml similarity index 100% rename from examples/network/privateendpoint.yaml rename to examples/network/v1beta1/privateendpoint.yaml diff --git a/examples/network/privateendpointapplicationsecuritygroupassociation.yaml b/examples/network/v1beta1/privateendpointapplicationsecuritygroupassociation.yaml similarity index 100% rename from examples/network/privateendpointapplicationsecuritygroupassociation.yaml rename to examples/network/v1beta1/privateendpointapplicationsecuritygroupassociation.yaml diff --git a/examples/network/privatelinkservice.yaml b/examples/network/v1beta1/privatelinkservice.yaml similarity index 100% rename from examples/network/privatelinkservice.yaml rename to examples/network/v1beta1/privatelinkservice.yaml diff --git a/examples/network/profile.yaml b/examples/network/v1beta1/profile.yaml similarity index 100% rename from examples/network/profile.yaml rename to examples/network/v1beta1/profile.yaml diff --git a/examples/network/publicip.yaml b/examples/network/v1beta1/publicip.yaml similarity index 100% rename from examples/network/publicip.yaml rename to examples/network/v1beta1/publicip.yaml diff --git a/examples/network/publicipprefix.yaml b/examples/network/v1beta1/publicipprefix.yaml similarity index 100% rename from examples/network/publicipprefix.yaml rename to examples/network/v1beta1/publicipprefix.yaml diff --git a/examples/network/route.yaml b/examples/network/v1beta1/route.yaml similarity index 100% rename from examples/network/route.yaml rename to examples/network/v1beta1/route.yaml diff --git a/examples/network/routefilter.yaml b/examples/network/v1beta1/routefilter.yaml similarity index 100% rename from examples/network/routefilter.yaml rename to examples/network/v1beta1/routefilter.yaml diff --git a/examples/network/routemap.yaml b/examples/network/v1beta1/routemap.yaml similarity index 100% rename from examples/network/routemap.yaml rename to examples/network/v1beta1/routemap.yaml diff --git a/examples/network/routeserver.yaml b/examples/network/v1beta1/routeserver.yaml similarity index 100% rename from examples/network/routeserver.yaml rename to examples/network/v1beta1/routeserver.yaml diff --git a/examples/network/routeserverbgpconnection.yaml b/examples/network/v1beta1/routeserverbgpconnection.yaml similarity index 100% rename from examples/network/routeserverbgpconnection.yaml rename to examples/network/v1beta1/routeserverbgpconnection.yaml diff --git a/examples/network/routetable.yaml b/examples/network/v1beta1/routetable.yaml similarity index 100% rename from examples/network/routetable.yaml rename to examples/network/v1beta1/routetable.yaml diff --git a/examples/network/securitygroup.yaml b/examples/network/v1beta1/securitygroup.yaml similarity index 100% rename from examples/network/securitygroup.yaml rename to examples/network/v1beta1/securitygroup.yaml diff --git a/examples/network/securityrule.yaml b/examples/network/v1beta1/securityrule.yaml similarity index 100% rename from examples/network/securityrule.yaml rename to examples/network/v1beta1/securityrule.yaml diff --git a/examples/network/subnet.yaml b/examples/network/v1beta1/subnet.yaml similarity index 100% rename from examples/network/subnet.yaml rename to examples/network/v1beta1/subnet.yaml diff --git a/examples/network/subnetnatgatewayassociation.yaml b/examples/network/v1beta1/subnetnatgatewayassociation.yaml similarity index 100% rename from examples/network/subnetnatgatewayassociation.yaml rename to examples/network/v1beta1/subnetnatgatewayassociation.yaml diff --git a/examples/network/subnetnetworksecuritygroupassociation.yaml b/examples/network/v1beta1/subnetnetworksecuritygroupassociation.yaml similarity index 100% rename from examples/network/subnetnetworksecuritygroupassociation.yaml rename to examples/network/v1beta1/subnetnetworksecuritygroupassociation.yaml diff --git a/examples/network/subnetroutetableassociation.yaml b/examples/network/v1beta1/subnetroutetableassociation.yaml similarity index 100% rename from examples/network/subnetroutetableassociation.yaml rename to examples/network/v1beta1/subnetroutetableassociation.yaml diff --git a/examples/network/subnetserviceendpointstoragepolicy.yaml b/examples/network/v1beta1/subnetserviceendpointstoragepolicy.yaml similarity index 100% rename from examples/network/subnetserviceendpointstoragepolicy.yaml rename to examples/network/v1beta1/subnetserviceendpointstoragepolicy.yaml diff --git a/examples/network/trafficmanagerazureendpoint.yaml b/examples/network/v1beta1/trafficmanagerazureendpoint.yaml similarity index 100% rename from examples/network/trafficmanagerazureendpoint.yaml rename to examples/network/v1beta1/trafficmanagerazureendpoint.yaml diff --git a/examples/network/trafficmanagerexternalendpoint.yaml b/examples/network/v1beta1/trafficmanagerexternalendpoint.yaml similarity index 100% rename from examples/network/trafficmanagerexternalendpoint.yaml rename to examples/network/v1beta1/trafficmanagerexternalendpoint.yaml diff --git a/examples/network/trafficmanagernestedendpoint.yaml b/examples/network/v1beta1/trafficmanagernestedendpoint.yaml similarity index 100% rename from examples/network/trafficmanagernestedendpoint.yaml rename to examples/network/v1beta1/trafficmanagernestedendpoint.yaml diff --git a/examples/network/trafficmanagerprofile.yaml b/examples/network/v1beta1/trafficmanagerprofile.yaml similarity index 100% rename from examples/network/trafficmanagerprofile.yaml rename to examples/network/v1beta1/trafficmanagerprofile.yaml diff --git a/examples/network/virtualhub.yaml b/examples/network/v1beta1/virtualhub.yaml similarity index 100% rename from examples/network/virtualhub.yaml rename to examples/network/v1beta1/virtualhub.yaml diff --git a/examples/network/virtualhubconnection.yaml b/examples/network/v1beta1/virtualhubconnection.yaml similarity index 100% rename from examples/network/virtualhubconnection.yaml rename to examples/network/v1beta1/virtualhubconnection.yaml diff --git a/examples/network/virtualhubip.yaml b/examples/network/v1beta1/virtualhubip.yaml similarity index 100% rename from examples/network/virtualhubip.yaml rename to examples/network/v1beta1/virtualhubip.yaml diff --git a/examples/network/virtualhubroutetable.yaml b/examples/network/v1beta1/virtualhubroutetable.yaml similarity index 100% rename from examples/network/virtualhubroutetable.yaml rename to examples/network/v1beta1/virtualhubroutetable.yaml diff --git a/examples/network/virtualhubroutetableroute.yaml b/examples/network/v1beta1/virtualhubroutetableroute.yaml similarity index 100% rename from examples/network/virtualhubroutetableroute.yaml rename to examples/network/v1beta1/virtualhubroutetableroute.yaml diff --git a/examples/network/virtualhubsecuritypartnerprovider.yaml b/examples/network/v1beta1/virtualhubsecuritypartnerprovider.yaml similarity index 100% rename from examples/network/virtualhubsecuritypartnerprovider.yaml rename to examples/network/v1beta1/virtualhubsecuritypartnerprovider.yaml diff --git a/examples/network/virtualnetwork.yaml b/examples/network/v1beta1/virtualnetwork.yaml similarity index 100% rename from examples/network/virtualnetwork.yaml rename to examples/network/v1beta1/virtualnetwork.yaml diff --git a/examples/network/virtualnetworkgateway.yaml b/examples/network/v1beta1/virtualnetworkgateway.yaml similarity index 100% rename from examples/network/virtualnetworkgateway.yaml rename to examples/network/v1beta1/virtualnetworkgateway.yaml diff --git a/examples/network/virtualnetworkgatewayconnection.yaml b/examples/network/v1beta1/virtualnetworkgatewayconnection.yaml similarity index 100% rename from examples/network/virtualnetworkgatewayconnection.yaml rename to examples/network/v1beta1/virtualnetworkgatewayconnection.yaml diff --git a/examples/network/virtualnetworkpeering.yaml b/examples/network/v1beta1/virtualnetworkpeering.yaml similarity index 100% rename from examples/network/virtualnetworkpeering.yaml rename to examples/network/v1beta1/virtualnetworkpeering.yaml diff --git a/examples/network/virtualwan.yaml b/examples/network/v1beta1/virtualwan.yaml similarity index 100% rename from examples/network/virtualwan.yaml rename to examples/network/v1beta1/virtualwan.yaml diff --git a/examples/network/vpngateway.yaml b/examples/network/v1beta1/vpngateway.yaml similarity index 100% rename from examples/network/vpngateway.yaml rename to examples/network/v1beta1/vpngateway.yaml diff --git a/examples/network/vpngatewayconnection.yaml b/examples/network/v1beta1/vpngatewayconnection.yaml similarity index 100% rename from examples/network/vpngatewayconnection.yaml rename to examples/network/v1beta1/vpngatewayconnection.yaml diff --git a/examples/network/vpnserverconfiguration.yaml b/examples/network/v1beta1/vpnserverconfiguration.yaml similarity index 100% rename from examples/network/vpnserverconfiguration.yaml rename to examples/network/v1beta1/vpnserverconfiguration.yaml diff --git a/examples/network/vpnserverconfigurationpolicygroup.yaml b/examples/network/v1beta1/vpnserverconfigurationpolicygroup.yaml similarity index 100% rename from examples/network/vpnserverconfigurationpolicygroup.yaml rename to examples/network/v1beta1/vpnserverconfigurationpolicygroup.yaml diff --git a/examples/network/vpnsite.yaml b/examples/network/v1beta1/vpnsite.yaml similarity index 100% rename from examples/network/vpnsite.yaml rename to examples/network/v1beta1/vpnsite.yaml diff --git a/examples/network/watcher.yaml b/examples/network/v1beta1/watcher.yaml similarity index 100% rename from examples/network/watcher.yaml rename to examples/network/v1beta1/watcher.yaml diff --git a/examples/network/watcherflowlog.yaml b/examples/network/v1beta1/watcherflowlog.yaml similarity index 100% rename from examples/network/watcherflowlog.yaml rename to examples/network/v1beta1/watcherflowlog.yaml diff --git a/examples/network/webapplicationfirewallpolicy.yaml b/examples/network/v1beta1/webapplicationfirewallpolicy.yaml similarity index 100% rename from examples/network/webapplicationfirewallpolicy.yaml rename to examples/network/v1beta1/webapplicationfirewallpolicy.yaml diff --git a/examples/network/v1beta2/profile.yaml b/examples/network/v1beta2/profile.yaml new file mode 100644 index 000000000..c1d8bd761 --- /dev/null +++ b/examples/network/v1beta2/profile.yaml @@ -0,0 +1,69 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Profile +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/profile + name: example +spec: + forProvider: + containerNetworkInterface: + ipConfiguration: + - name: exampleipconfig + subnetIdRef: + name: example-subnet-for-network-profile + name: examplecnic + location: West Europe + resourceGroupNameRef: + name: networkprofile-rg + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + name: networkprofile-rg +spec: + forProvider: + location: West Europe + tags: + provisioner: crossplane + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/profile + name: example-vn-for-network-profile +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + dnsServers: + - 10.0.0.4 + - 10.0.0.5 + location: West Europe + resourceGroupNameRef: + name: networkprofile-rg + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/profile + name: example-subnet-for-network-profile +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + resourceGroupNameRef: + name: networkprofile-rg + virtualNetworkNameRef: + name: example-vn-for-network-profile diff --git a/examples/network/v1beta2/subnet.yaml b/examples/network/v1beta2/subnet.yaml new file mode 100644 index 000000000..c46f327ef --- /dev/null +++ b/examples/network/v1beta2/subnet.yaml @@ -0,0 +1,61 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: network.azure.upbound.io/v1beta2 +kind: Subnet +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/subnet + labels: + testing.upbound.io/example-name: example + name: example-subnet +spec: + forProvider: + addressPrefixes: + - 10.0.1.0/24 + delegation: + - name: delegation + serviceDelegation: + actions: + - Microsoft.Network/virtualNetworks/subnets/action + name: Microsoft.ContainerInstance/containerGroups + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example + virtualNetworkNameSelector: + matchLabels: + testing.upbound.io/example-name: example + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta1/subnet + labels: + testing.upbound.io/example-name: example + name: example-subnet +spec: + forProvider: + location: West Europe + +--- + +apiVersion: network.azure.upbound.io/v1beta2 +kind: VirtualNetwork +metadata: + annotations: + meta.upbound.io/example-id: network/v1beta2/subnet + labels: + testing.upbound.io/example-name: example + name: example-subnet +spec: + forProvider: + addressSpace: + - 10.0.0.0/16 + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples/notificationhubs/authorizationrule.yaml b/examples/notificationhubs/v1beta1/authorizationrule.yaml similarity index 100% rename from examples/notificationhubs/authorizationrule.yaml rename to examples/notificationhubs/v1beta1/authorizationrule.yaml diff --git a/examples/notificationhubs/notificationhub.yaml b/examples/notificationhubs/v1beta1/notificationhub.yaml similarity index 100% rename from examples/notificationhubs/notificationhub.yaml rename to examples/notificationhubs/v1beta1/notificationhub.yaml diff --git a/examples/notificationhubs/notificationhubnamespace.yaml b/examples/notificationhubs/v1beta1/notificationhubnamespace.yaml similarity index 100% rename from examples/notificationhubs/notificationhubnamespace.yaml rename to examples/notificationhubs/v1beta1/notificationhubnamespace.yaml diff --git a/examples/operationalinsights/loganalyticsdataexportrule.yaml b/examples/operationalinsights/v1beta1/loganalyticsdataexportrule.yaml similarity index 100% rename from examples/operationalinsights/loganalyticsdataexportrule.yaml rename to examples/operationalinsights/v1beta1/loganalyticsdataexportrule.yaml diff --git a/examples/operationalinsights/loganalyticsdatasourcewindowsevent.yaml b/examples/operationalinsights/v1beta1/loganalyticsdatasourcewindowsevent.yaml similarity index 100% rename from examples/operationalinsights/loganalyticsdatasourcewindowsevent.yaml rename to examples/operationalinsights/v1beta1/loganalyticsdatasourcewindowsevent.yaml diff --git a/examples/operationalinsights/loganalyticsdatasourcewindowsperformancecounter.yaml b/examples/operationalinsights/v1beta1/loganalyticsdatasourcewindowsperformancecounter.yaml similarity index 100% rename from examples/operationalinsights/loganalyticsdatasourcewindowsperformancecounter.yaml rename to examples/operationalinsights/v1beta1/loganalyticsdatasourcewindowsperformancecounter.yaml diff --git a/examples/operationalinsights/loganalyticslinkedservice.yaml b/examples/operationalinsights/v1beta1/loganalyticslinkedservice.yaml similarity index 100% rename from examples/operationalinsights/loganalyticslinkedservice.yaml rename to examples/operationalinsights/v1beta1/loganalyticslinkedservice.yaml diff --git a/examples/operationalinsights/loganalyticslinkedstorageaccount.yaml b/examples/operationalinsights/v1beta1/loganalyticslinkedstorageaccount.yaml similarity index 100% rename from examples/operationalinsights/loganalyticslinkedstorageaccount.yaml rename to examples/operationalinsights/v1beta1/loganalyticslinkedstorageaccount.yaml diff --git a/examples/operationalinsights/loganalyticsquerypack.yaml b/examples/operationalinsights/v1beta1/loganalyticsquerypack.yaml similarity index 100% rename from examples/operationalinsights/loganalyticsquerypack.yaml rename to examples/operationalinsights/v1beta1/loganalyticsquerypack.yaml diff --git a/examples/operationalinsights/loganalyticsquerypackquery.yaml b/examples/operationalinsights/v1beta1/loganalyticsquerypackquery.yaml similarity index 100% rename from examples/operationalinsights/loganalyticsquerypackquery.yaml rename to examples/operationalinsights/v1beta1/loganalyticsquerypackquery.yaml diff --git a/examples/operationalinsights/loganalyticssavedsearch.yaml b/examples/operationalinsights/v1beta1/loganalyticssavedsearch.yaml similarity index 100% rename from examples/operationalinsights/loganalyticssavedsearch.yaml rename to examples/operationalinsights/v1beta1/loganalyticssavedsearch.yaml diff --git a/examples/operationalinsights/workspace.yaml b/examples/operationalinsights/v1beta1/workspace.yaml similarity index 100% rename from examples/operationalinsights/workspace.yaml rename to examples/operationalinsights/v1beta1/workspace.yaml diff --git a/examples/operationsmanagement/loganalyticssolution.yaml b/examples/operationsmanagement/v1beta1/loganalyticssolution.yaml similarity index 95% rename from examples/operationsmanagement/loganalyticssolution.yaml rename to examples/operationsmanagement/v1beta1/loganalyticssolution.yaml index 62b84185a..cfd7fbdb2 100644 --- a/examples/operationsmanagement/loganalyticssolution.yaml +++ b/examples/operationsmanagement/v1beta1/loganalyticssolution.yaml @@ -32,7 +32,7 @@ kind: Workspace metadata: annotations: meta.upbound.io/example-id: operationsmanagement/v1beta1/loganalyticssolution - uptest.upbound.io/pre-delete-hook: testhooks/delete-loganalyticssolution.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-loganalyticssolution.sh labels: testing.upbound.io/example-name: example name: example diff --git a/examples/orbital/contactprofile.yaml b/examples/orbital/v1beta1/contactprofile.yaml similarity index 100% rename from examples/orbital/contactprofile.yaml rename to examples/orbital/v1beta1/contactprofile.yaml diff --git a/examples/orbital/spacecraft.yaml b/examples/orbital/v1beta1/spacecraft.yaml similarity index 100% rename from examples/orbital/spacecraft.yaml rename to examples/orbital/v1beta1/spacecraft.yaml diff --git a/examples/policyinsights/resourcepolicyremediation.yaml b/examples/policyinsights/v1beta1/resourcepolicyremediation.yaml similarity index 100% rename from examples/policyinsights/resourcepolicyremediation.yaml rename to examples/policyinsights/v1beta1/resourcepolicyremediation.yaml diff --git a/examples/policyinsights/subscriptionpolicyremediation.yaml b/examples/policyinsights/v1beta1/subscriptionpolicyremediation.yaml similarity index 100% rename from examples/policyinsights/subscriptionpolicyremediation.yaml rename to examples/policyinsights/v1beta1/subscriptionpolicyremediation.yaml diff --git a/examples/portal/dashboard.yaml b/examples/portal/v1beta1/dashboard.yaml similarity index 100% rename from examples/portal/dashboard.yaml rename to examples/portal/v1beta1/dashboard.yaml diff --git a/examples/powerbidedicated/powerbiembedded.yaml b/examples/powerbidedicated/v1beta1/powerbiembedded.yaml similarity index 100% rename from examples/powerbidedicated/powerbiembedded.yaml rename to examples/powerbidedicated/v1beta1/powerbiembedded.yaml diff --git a/examples/provider/config.yaml b/examples/provider/v1beta1/config.yaml similarity index 100% rename from examples/provider/config.yaml rename to examples/provider/v1beta1/config.yaml diff --git a/examples/provider/upbound.yaml b/examples/provider/v1beta1/upbound.yaml similarity index 100% rename from examples/provider/upbound.yaml rename to examples/provider/v1beta1/upbound.yaml diff --git a/examples/purview/account.yaml b/examples/purview/v1beta1/account.yaml similarity index 100% rename from examples/purview/account.yaml rename to examples/purview/v1beta1/account.yaml diff --git a/examples/recoveryservices/backupcontainerstorageaccount.yaml b/examples/recoveryservices/v1beta1/backupcontainerstorageaccount.yaml similarity index 100% rename from examples/recoveryservices/backupcontainerstorageaccount.yaml rename to examples/recoveryservices/v1beta1/backupcontainerstorageaccount.yaml diff --git a/examples/recoveryservices/backuppolicyfileshare.yaml b/examples/recoveryservices/v1beta1/backuppolicyfileshare.yaml similarity index 100% rename from examples/recoveryservices/backuppolicyfileshare.yaml rename to examples/recoveryservices/v1beta1/backuppolicyfileshare.yaml diff --git a/examples/recoveryservices/backuppolicyvm.yaml b/examples/recoveryservices/v1beta1/backuppolicyvm.yaml similarity index 100% rename from examples/recoveryservices/backuppolicyvm.yaml rename to examples/recoveryservices/v1beta1/backuppolicyvm.yaml diff --git a/examples/recoveryservices/backuppolicyvmworkload.yaml b/examples/recoveryservices/v1beta1/backuppolicyvmworkload.yaml similarity index 100% rename from examples/recoveryservices/backuppolicyvmworkload.yaml rename to examples/recoveryservices/v1beta1/backuppolicyvmworkload.yaml diff --git a/examples/recoveryservices/backupprotectedfileshare.yaml b/examples/recoveryservices/v1beta1/backupprotectedfileshare.yaml similarity index 100% rename from examples/recoveryservices/backupprotectedfileshare.yaml rename to examples/recoveryservices/v1beta1/backupprotectedfileshare.yaml diff --git a/examples/recoveryservices/backupprotectedvm.yaml b/examples/recoveryservices/v1beta1/backupprotectedvm.yaml similarity index 100% rename from examples/recoveryservices/backupprotectedvm.yaml rename to examples/recoveryservices/v1beta1/backupprotectedvm.yaml diff --git a/examples/recoveryservices/siterecoveryfabric.yaml b/examples/recoveryservices/v1beta1/siterecoveryfabric.yaml similarity index 100% rename from examples/recoveryservices/siterecoveryfabric.yaml rename to examples/recoveryservices/v1beta1/siterecoveryfabric.yaml diff --git a/examples/recoveryservices/siterecoverynetworkmapping.yaml b/examples/recoveryservices/v1beta1/siterecoverynetworkmapping.yaml similarity index 100% rename from examples/recoveryservices/siterecoverynetworkmapping.yaml rename to examples/recoveryservices/v1beta1/siterecoverynetworkmapping.yaml diff --git a/examples/recoveryservices/siterecoveryprotectioncontainer.yaml b/examples/recoveryservices/v1beta1/siterecoveryprotectioncontainer.yaml similarity index 100% rename from examples/recoveryservices/siterecoveryprotectioncontainer.yaml rename to examples/recoveryservices/v1beta1/siterecoveryprotectioncontainer.yaml diff --git a/examples/recoveryservices/siterecoveryprotectioncontainermapping.yaml b/examples/recoveryservices/v1beta1/siterecoveryprotectioncontainermapping.yaml similarity index 100% rename from examples/recoveryservices/siterecoveryprotectioncontainermapping.yaml rename to examples/recoveryservices/v1beta1/siterecoveryprotectioncontainermapping.yaml diff --git a/examples/recoveryservices/siterecoveryreplicationpolicy.yaml b/examples/recoveryservices/v1beta1/siterecoveryreplicationpolicy.yaml similarity index 100% rename from examples/recoveryservices/siterecoveryreplicationpolicy.yaml rename to examples/recoveryservices/v1beta1/siterecoveryreplicationpolicy.yaml diff --git a/examples/recoveryservices/vault.yaml b/examples/recoveryservices/v1beta1/vault.yaml similarity index 100% rename from examples/recoveryservices/vault.yaml rename to examples/recoveryservices/v1beta1/vault.yaml diff --git a/examples/relay/eventrelaynamespace.yaml b/examples/relay/v1beta1/eventrelaynamespace.yaml similarity index 100% rename from examples/relay/eventrelaynamespace.yaml rename to examples/relay/v1beta1/eventrelaynamespace.yaml diff --git a/examples/relay/hybridconnection.yaml b/examples/relay/v1beta1/hybridconnection.yaml similarity index 100% rename from examples/relay/hybridconnection.yaml rename to examples/relay/v1beta1/hybridconnection.yaml diff --git a/examples/relay/hybridconnectionauthorizationrule.yaml b/examples/relay/v1beta1/hybridconnectionauthorizationrule.yaml similarity index 100% rename from examples/relay/hybridconnectionauthorizationrule.yaml rename to examples/relay/v1beta1/hybridconnectionauthorizationrule.yaml diff --git a/examples/relay/namespaceauthorizationrule.yaml b/examples/relay/v1beta1/namespaceauthorizationrule.yaml similarity index 100% rename from examples/relay/namespaceauthorizationrule.yaml rename to examples/relay/v1beta1/namespaceauthorizationrule.yaml diff --git a/examples/resources/resourcedeploymentscriptazurecli.yaml b/examples/resources/v1beta1/resourcedeploymentscriptazurecli.yaml similarity index 100% rename from examples/resources/resourcedeploymentscriptazurecli.yaml rename to examples/resources/v1beta1/resourcedeploymentscriptazurecli.yaml diff --git a/examples/resources/resourcedeploymentscriptazurepowershell.yaml b/examples/resources/v1beta1/resourcedeploymentscriptazurepowershell.yaml similarity index 100% rename from examples/resources/resourcedeploymentscriptazurepowershell.yaml rename to examples/resources/v1beta1/resourcedeploymentscriptazurepowershell.yaml diff --git a/examples/resources/resourcegrouptemplatedeployment.yaml b/examples/resources/v1beta1/resourcegrouptemplatedeployment.yaml similarity index 100% rename from examples/resources/resourcegrouptemplatedeployment.yaml rename to examples/resources/v1beta1/resourcegrouptemplatedeployment.yaml diff --git a/examples/resources/subscriptiontemplatedeployment.yaml b/examples/resources/v1beta1/subscriptiontemplatedeployment.yaml similarity index 100% rename from examples/resources/subscriptiontemplatedeployment.yaml rename to examples/resources/v1beta1/subscriptiontemplatedeployment.yaml diff --git a/examples/search/service.yaml b/examples/search/v1beta1/service.yaml similarity index 100% rename from examples/search/service.yaml rename to examples/search/v1beta1/service.yaml diff --git a/examples/search/sharedprivatelinkservice.yaml b/examples/search/v1beta1/sharedprivatelinkservice.yaml similarity index 100% rename from examples/search/sharedprivatelinkservice.yaml rename to examples/search/v1beta1/sharedprivatelinkservice.yaml diff --git a/examples/security/advancedthreatprotection.yaml b/examples/security/v1beta1/advancedthreatprotection.yaml similarity index 100% rename from examples/security/advancedthreatprotection.yaml rename to examples/security/v1beta1/advancedthreatprotection.yaml diff --git a/examples/security/iotsecuritydevicegroup.yaml b/examples/security/v1beta1/iotsecuritydevicegroup.yaml similarity index 100% rename from examples/security/iotsecuritydevicegroup.yaml rename to examples/security/v1beta1/iotsecuritydevicegroup.yaml diff --git a/examples/security/securitycenterassessment.yaml b/examples/security/v1beta1/securitycenterassessment.yaml similarity index 100% rename from examples/security/securitycenterassessment.yaml rename to examples/security/v1beta1/securitycenterassessment.yaml diff --git a/examples/security/securitycenterassessmentpolicy.yaml b/examples/security/v1beta1/securitycenterassessmentpolicy.yaml similarity index 100% rename from examples/security/securitycenterassessmentpolicy.yaml rename to examples/security/v1beta1/securitycenterassessmentpolicy.yaml diff --git a/examples/security/securitycenterautoprovisioning.yaml b/examples/security/v1beta1/securitycenterautoprovisioning.yaml similarity index 100% rename from examples/security/securitycenterautoprovisioning.yaml rename to examples/security/v1beta1/securitycenterautoprovisioning.yaml diff --git a/examples/security/securitycentercontact.yaml b/examples/security/v1beta1/securitycentercontact.yaml similarity index 100% rename from examples/security/securitycentercontact.yaml rename to examples/security/v1beta1/securitycentercontact.yaml diff --git a/examples/security/securitycenterservervulnerabilityassessment.yaml b/examples/security/v1beta1/securitycenterservervulnerabilityassessment.yaml similarity index 100% rename from examples/security/securitycenterservervulnerabilityassessment.yaml rename to examples/security/v1beta1/securitycenterservervulnerabilityassessment.yaml diff --git a/examples/security/securitycenterservervulnerabilityassessmentvirtualmachine.yaml b/examples/security/v1beta1/securitycenterservervulnerabilityassessmentvirtualmachine.yaml similarity index 100% rename from examples/security/securitycenterservervulnerabilityassessmentvirtualmachine.yaml rename to examples/security/v1beta1/securitycenterservervulnerabilityassessmentvirtualmachine.yaml diff --git a/examples/security/securitycentersetting.yaml b/examples/security/v1beta1/securitycentersetting.yaml similarity index 100% rename from examples/security/securitycentersetting.yaml rename to examples/security/v1beta1/securitycentersetting.yaml diff --git a/examples/security/securitycentersubscriptionpricing.yaml b/examples/security/v1beta1/securitycentersubscriptionpricing.yaml similarity index 100% rename from examples/security/securitycentersubscriptionpricing.yaml rename to examples/security/v1beta1/securitycentersubscriptionpricing.yaml diff --git a/examples/security/securitycenterworkspace.yaml b/examples/security/v1beta1/securitycenterworkspace.yaml similarity index 100% rename from examples/security/securitycenterworkspace.yaml rename to examples/security/v1beta1/securitycenterworkspace.yaml diff --git a/examples/securityinsights/sentinelalertrulefusion.yaml b/examples/securityinsights/v1beta1/sentinelalertrulefusion.yaml similarity index 100% rename from examples/securityinsights/sentinelalertrulefusion.yaml rename to examples/securityinsights/v1beta1/sentinelalertrulefusion.yaml diff --git a/examples/securityinsights/sentinelalertrulemachinelearningbehavioranalytics.yaml b/examples/securityinsights/v1beta1/sentinelalertrulemachinelearningbehavioranalytics.yaml similarity index 100% rename from examples/securityinsights/sentinelalertrulemachinelearningbehavioranalytics.yaml rename to examples/securityinsights/v1beta1/sentinelalertrulemachinelearningbehavioranalytics.yaml diff --git a/examples/securityinsights/sentinelalertrulemssecurityincident.yaml b/examples/securityinsights/v1beta1/sentinelalertrulemssecurityincident.yaml similarity index 100% rename from examples/securityinsights/sentinelalertrulemssecurityincident.yaml rename to examples/securityinsights/v1beta1/sentinelalertrulemssecurityincident.yaml diff --git a/examples/securityinsights/sentinelautomationrule.yaml b/examples/securityinsights/v1beta1/sentinelautomationrule.yaml similarity index 100% rename from examples/securityinsights/sentinelautomationrule.yaml rename to examples/securityinsights/v1beta1/sentinelautomationrule.yaml diff --git a/examples/securityinsights/sentineldataconnectoriot.yaml b/examples/securityinsights/v1beta1/sentineldataconnectoriot.yaml similarity index 100% rename from examples/securityinsights/sentineldataconnectoriot.yaml rename to examples/securityinsights/v1beta1/sentineldataconnectoriot.yaml diff --git a/examples/securityinsights/sentinelloganalyticsworkspaceonboarding.yaml b/examples/securityinsights/v1beta1/sentinelloganalyticsworkspaceonboarding.yaml similarity index 94% rename from examples/securityinsights/sentinelloganalyticsworkspaceonboarding.yaml rename to examples/securityinsights/v1beta1/sentinelloganalyticsworkspaceonboarding.yaml index 614ab61f8..a4671c704 100644 --- a/examples/securityinsights/sentinelloganalyticsworkspaceonboarding.yaml +++ b/examples/securityinsights/v1beta1/sentinelloganalyticsworkspaceonboarding.yaml @@ -25,7 +25,7 @@ apiVersion: operationalinsights.azure.upbound.io/v1beta1 kind: Workspace metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-securityinsights.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-securityinsights.sh meta.upbound.io/example-id: securityinsights/v1beta1/sentinelloganalyticsworkspaceonboarding labels: testing.upbound.io/example-name: example diff --git a/examples/securityinsights/sentinelwatchlist.yaml b/examples/securityinsights/v1beta1/sentinelwatchlist.yaml similarity index 100% rename from examples/securityinsights/sentinelwatchlist.yaml rename to examples/securityinsights/v1beta1/sentinelwatchlist.yaml diff --git a/examples/servicebus/namespaceauthorizationrule.yaml b/examples/servicebus/v1beta1/namespaceauthorizationrule.yaml similarity index 100% rename from examples/servicebus/namespaceauthorizationrule.yaml rename to examples/servicebus/v1beta1/namespaceauthorizationrule.yaml diff --git a/examples/servicebus/namespacedisasterrecoveryconfig.yaml b/examples/servicebus/v1beta1/namespacedisasterrecoveryconfig.yaml similarity index 100% rename from examples/servicebus/namespacedisasterrecoveryconfig.yaml rename to examples/servicebus/v1beta1/namespacedisasterrecoveryconfig.yaml diff --git a/examples/servicebus/namespacenetworkruleset.yaml b/examples/servicebus/v1beta1/namespacenetworkruleset.yaml similarity index 100% rename from examples/servicebus/namespacenetworkruleset.yaml rename to examples/servicebus/v1beta1/namespacenetworkruleset.yaml diff --git a/examples/servicebus/queue.yaml b/examples/servicebus/v1beta1/queue.yaml similarity index 100% rename from examples/servicebus/queue.yaml rename to examples/servicebus/v1beta1/queue.yaml diff --git a/examples/servicebus/queueauthorizationrule.yaml b/examples/servicebus/v1beta1/queueauthorizationrule.yaml similarity index 100% rename from examples/servicebus/queueauthorizationrule.yaml rename to examples/servicebus/v1beta1/queueauthorizationrule.yaml diff --git a/examples/servicebus/servicebusnamespace.yaml b/examples/servicebus/v1beta1/servicebusnamespace.yaml similarity index 100% rename from examples/servicebus/servicebusnamespace.yaml rename to examples/servicebus/v1beta1/servicebusnamespace.yaml diff --git a/examples/servicebus/subscription.yaml b/examples/servicebus/v1beta1/subscription.yaml similarity index 100% rename from examples/servicebus/subscription.yaml rename to examples/servicebus/v1beta1/subscription.yaml diff --git a/examples/servicebus/subscriptionrule.yaml b/examples/servicebus/v1beta1/subscriptionrule.yaml similarity index 100% rename from examples/servicebus/subscriptionrule.yaml rename to examples/servicebus/v1beta1/subscriptionrule.yaml diff --git a/examples/servicebus/topic.yaml b/examples/servicebus/v1beta1/topic.yaml similarity index 100% rename from examples/servicebus/topic.yaml rename to examples/servicebus/v1beta1/topic.yaml diff --git a/examples/servicebus/topicauthorizationrule.yaml b/examples/servicebus/v1beta1/topicauthorizationrule.yaml similarity index 100% rename from examples/servicebus/topicauthorizationrule.yaml rename to examples/servicebus/v1beta1/topicauthorizationrule.yaml diff --git a/examples/servicefabric/cluster.yaml b/examples/servicefabric/v1beta1/cluster.yaml similarity index 100% rename from examples/servicefabric/cluster.yaml rename to examples/servicefabric/v1beta1/cluster.yaml diff --git a/examples/servicefabric/managedcluster.yaml b/examples/servicefabric/v1beta1/managedcluster.yaml similarity index 100% rename from examples/servicefabric/managedcluster.yaml rename to examples/servicefabric/v1beta1/managedcluster.yaml diff --git a/examples/servicelinker/springcloudconnection.yaml b/examples/servicelinker/v1beta1/springcloudconnection.yaml similarity index 100% rename from examples/servicelinker/springcloudconnection.yaml rename to examples/servicelinker/v1beta1/springcloudconnection.yaml diff --git a/examples/signalrservice/networkacl.yaml b/examples/signalrservice/v1beta1/networkacl.yaml similarity index 100% rename from examples/signalrservice/networkacl.yaml rename to examples/signalrservice/v1beta1/networkacl.yaml diff --git a/examples/signalrservice/service.yaml b/examples/signalrservice/v1beta1/service.yaml similarity index 100% rename from examples/signalrservice/service.yaml rename to examples/signalrservice/v1beta1/service.yaml diff --git a/examples/signalrservice/signalrsharedprivatelinkresource.yaml b/examples/signalrservice/v1beta1/signalrsharedprivatelinkresource.yaml similarity index 100% rename from examples/signalrservice/signalrsharedprivatelinkresource.yaml rename to examples/signalrservice/v1beta1/signalrsharedprivatelinkresource.yaml diff --git a/examples/signalrservice/webpubsub.yaml b/examples/signalrservice/v1beta1/webpubsub.yaml similarity index 100% rename from examples/signalrservice/webpubsub.yaml rename to examples/signalrservice/v1beta1/webpubsub.yaml diff --git a/examples/signalrservice/webpubsubhub.yaml b/examples/signalrservice/v1beta1/webpubsubhub.yaml similarity index 100% rename from examples/signalrservice/webpubsubhub.yaml rename to examples/signalrservice/v1beta1/webpubsubhub.yaml diff --git a/examples/signalrservice/webpubsubnetworkacl.yaml b/examples/signalrservice/v1beta1/webpubsubnetworkacl.yaml similarity index 100% rename from examples/signalrservice/webpubsubnetworkacl.yaml rename to examples/signalrservice/v1beta1/webpubsubnetworkacl.yaml diff --git a/examples/solutions/managedapplicationdefinition.yaml b/examples/solutions/v1beta1/managedapplicationdefinition.yaml similarity index 100% rename from examples/solutions/managedapplicationdefinition.yaml rename to examples/solutions/v1beta1/managedapplicationdefinition.yaml diff --git a/examples/spring/cloudapplicationliveview.yaml b/examples/spring/v1beta1/cloudapplicationliveview.yaml similarity index 100% rename from examples/spring/cloudapplicationliveview.yaml rename to examples/spring/v1beta1/cloudapplicationliveview.yaml diff --git a/examples/sql/mssqldatabaseextendedauditingpolicy.yaml b/examples/sql/v1beta1/mssqldatabaseextendedauditingpolicy.yaml similarity index 100% rename from examples/sql/mssqldatabaseextendedauditingpolicy.yaml rename to examples/sql/v1beta1/mssqldatabaseextendedauditingpolicy.yaml diff --git a/examples/sql/mssqldatabasevulnerabilityassessmentrulebaseline.yaml b/examples/sql/v1beta1/mssqldatabasevulnerabilityassessmentrulebaseline.yaml similarity index 100% rename from examples/sql/mssqldatabasevulnerabilityassessmentrulebaseline.yaml rename to examples/sql/v1beta1/mssqldatabasevulnerabilityassessmentrulebaseline.yaml diff --git a/examples/sql/mssqlelasticpool.yaml b/examples/sql/v1beta1/mssqlelasticpool.yaml similarity index 100% rename from examples/sql/mssqlelasticpool.yaml rename to examples/sql/v1beta1/mssqlelasticpool.yaml diff --git a/examples/sql/mssqlfailovergroup.yaml b/examples/sql/v1beta1/mssqlfailovergroup.yaml similarity index 100% rename from examples/sql/mssqlfailovergroup.yaml rename to examples/sql/v1beta1/mssqlfailovergroup.yaml diff --git a/examples/sql/mssqlfirewallrule.yaml b/examples/sql/v1beta1/mssqlfirewallrule.yaml similarity index 100% rename from examples/sql/mssqlfirewallrule.yaml rename to examples/sql/v1beta1/mssqlfirewallrule.yaml diff --git a/examples/sql/mssqljobagent.yaml b/examples/sql/v1beta1/mssqljobagent.yaml similarity index 100% rename from examples/sql/mssqljobagent.yaml rename to examples/sql/v1beta1/mssqljobagent.yaml diff --git a/examples/sql/mssqljobcredential.yaml b/examples/sql/v1beta1/mssqljobcredential.yaml similarity index 100% rename from examples/sql/mssqljobcredential.yaml rename to examples/sql/v1beta1/mssqljobcredential.yaml diff --git a/examples/sql/mssqlmanageddatabase.yaml b/examples/sql/v1beta1/mssqlmanageddatabase.yaml similarity index 100% rename from examples/sql/mssqlmanageddatabase.yaml rename to examples/sql/v1beta1/mssqlmanageddatabase.yaml diff --git a/examples/sql/mssqlmanagedinstance.yaml b/examples/sql/v1beta1/mssqlmanagedinstance.yaml similarity index 100% rename from examples/sql/mssqlmanagedinstance.yaml rename to examples/sql/v1beta1/mssqlmanagedinstance.yaml diff --git a/examples/sql/mssqlmanagedinstanceactivedirectoryadministrator.yaml b/examples/sql/v1beta1/mssqlmanagedinstanceactivedirectoryadministrator.yaml similarity index 100% rename from examples/sql/mssqlmanagedinstanceactivedirectoryadministrator.yaml rename to examples/sql/v1beta1/mssqlmanagedinstanceactivedirectoryadministrator.yaml diff --git a/examples/sql/mssqlmanagedinstancefailovergroup.yaml b/examples/sql/v1beta1/mssqlmanagedinstancefailovergroup.yaml similarity index 100% rename from examples/sql/mssqlmanagedinstancefailovergroup.yaml rename to examples/sql/v1beta1/mssqlmanagedinstancefailovergroup.yaml diff --git a/examples/sql/mssqlmanagedinstancevulnerabilityassessment.yaml b/examples/sql/v1beta1/mssqlmanagedinstancevulnerabilityassessment.yaml similarity index 100% rename from examples/sql/mssqlmanagedinstancevulnerabilityassessment.yaml rename to examples/sql/v1beta1/mssqlmanagedinstancevulnerabilityassessment.yaml diff --git a/examples/sql/mssqlservermicrosoftsupportauditingpolicy.yaml b/examples/sql/v1beta1/mssqlservermicrosoftsupportauditingpolicy.yaml similarity index 96% rename from examples/sql/mssqlservermicrosoftsupportauditingpolicy.yaml rename to examples/sql/v1beta1/mssqlservermicrosoftsupportauditingpolicy.yaml index 5ca17e330..9dadd9cc7 100644 --- a/examples/sql/mssqlservermicrosoftsupportauditingpolicy.yaml +++ b/examples/sql/v1beta1/mssqlservermicrosoftsupportauditingpolicy.yaml @@ -60,7 +60,7 @@ apiVersion: storage.azure.upbound.io/v1beta1 kind: Account metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-supportauditingpolicy.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-supportauditingpolicy.sh meta.upbound.io/example-id: sql/v1beta1/mssqlservermicrosoftsupportauditingpolicy labels: testing.upbound.io/example-name: staccexample0001 diff --git a/examples/sql/mssqlserversecurityalertpolicy.yaml b/examples/sql/v1beta1/mssqlserversecurityalertpolicy.yaml similarity index 100% rename from examples/sql/mssqlserversecurityalertpolicy.yaml rename to examples/sql/v1beta1/mssqlserversecurityalertpolicy.yaml diff --git a/examples/sql/mssqlservertransparentdataencryption.yaml b/examples/sql/v1beta1/mssqlservertransparentdataencryption.yaml similarity index 100% rename from examples/sql/mssqlservertransparentdataencryption.yaml rename to examples/sql/v1beta1/mssqlservertransparentdataencryption.yaml diff --git a/examples/sql/mssqlservervulnerabilityassessment.yaml b/examples/sql/v1beta1/mssqlservervulnerabilityassessment.yaml similarity index 100% rename from examples/sql/mssqlservervulnerabilityassessment.yaml rename to examples/sql/v1beta1/mssqlservervulnerabilityassessment.yaml diff --git a/examples/sql/mssqlvirtualnetworkrule.yaml b/examples/sql/v1beta1/mssqlvirtualnetworkrule.yaml similarity index 100% rename from examples/sql/mssqlvirtualnetworkrule.yaml rename to examples/sql/v1beta1/mssqlvirtualnetworkrule.yaml diff --git a/examples/sql/v1beta2/mssqlelasticpool.yaml b/examples/sql/v1beta2/mssqlelasticpool.yaml new file mode 100644 index 000000000..01441db31 --- /dev/null +++ b/examples/sql/v1beta2/mssqlelasticpool.yaml @@ -0,0 +1,79 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLElasticPool +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlelasticpool + labels: + testing.upbound.io/example-name: examplemssqlelasticpool + name: examplemssqlelasticpool +spec: + forProvider: + location: West Europe + maxSizeGb: 5 + perDatabaseSettings: + maxCapacity: 4 + minCapacity: 1 + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: mssqlelasticpool-rg + serverNameSelector: + matchLabels: + testing.upbound.io/example-name: mssqlelasticpoolsrv + sku: + capacity: 4 + family: Gen5 + name: GP_Gen5 + tier: GeneralPurpose + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta1/mssqlelasticpool + labels: + testing.upbound.io/example-name: mssqlelasticpool-rg + name: mssqlelasticpool-rg +spec: + forProvider: + location: West Europe + +--- + +apiVersion: sql.azure.upbound.io/v1beta2 +kind: MSSQLServer +metadata: + annotations: + meta.upbound.io/example-id: sql/v1beta2/mssqlelasticpool + uptest.upbound.io/timeout: "3600" + labels: + testing.upbound.io/example-name: mssqlelasticpoolsrv + name: mssqlelasticpoolsrv +spec: + forProvider: + administratorLogin: 4dm1n157r470r + administratorLoginPasswordSecretRef: + key: example-key + name: example-secret + namespace: upbound-system + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: mssqlelasticpool-rg + version: "12.0" + +--- + +apiVersion: v1 +data: + example-key: dGVzdFBhc3N3b3JkITEyMw== +kind: Secret +metadata: + name: example-secret + namespace: upbound-system +type: Opaque diff --git a/examples/storage/accountcustomermanagedkey.yaml b/examples/storage/v1beta1/accountcustomermanagedkey.yaml similarity index 100% rename from examples/storage/accountcustomermanagedkey.yaml rename to examples/storage/v1beta1/accountcustomermanagedkey.yaml diff --git a/examples/storage/accountlocaluser.yaml b/examples/storage/v1beta1/accountlocaluser.yaml similarity index 100% rename from examples/storage/accountlocaluser.yaml rename to examples/storage/v1beta1/accountlocaluser.yaml diff --git a/examples/storage/accountnetworkrules.yaml b/examples/storage/v1beta1/accountnetworkrules.yaml similarity index 100% rename from examples/storage/accountnetworkrules.yaml rename to examples/storage/v1beta1/accountnetworkrules.yaml diff --git a/examples/storage/blob.yaml b/examples/storage/v1beta1/blob.yaml similarity index 100% rename from examples/storage/blob.yaml rename to examples/storage/v1beta1/blob.yaml diff --git a/examples/storage/blobinventorypolicy.yaml b/examples/storage/v1beta1/blobinventorypolicy.yaml similarity index 100% rename from examples/storage/blobinventorypolicy.yaml rename to examples/storage/v1beta1/blobinventorypolicy.yaml diff --git a/examples/storage/datalakegen2filesystem.yaml b/examples/storage/v1beta1/datalakegen2filesystem.yaml similarity index 100% rename from examples/storage/datalakegen2filesystem.yaml rename to examples/storage/v1beta1/datalakegen2filesystem.yaml diff --git a/examples/storage/datalakegen2path.yaml b/examples/storage/v1beta1/datalakegen2path.yaml similarity index 100% rename from examples/storage/datalakegen2path.yaml rename to examples/storage/v1beta1/datalakegen2path.yaml diff --git a/examples/storage/encryptionscope.yaml b/examples/storage/v1beta1/encryptionscope.yaml similarity index 100% rename from examples/storage/encryptionscope.yaml rename to examples/storage/v1beta1/encryptionscope.yaml diff --git a/examples/storage/hpccache.yaml b/examples/storage/v1beta1/hpccache.yaml similarity index 100% rename from examples/storage/hpccache.yaml rename to examples/storage/v1beta1/hpccache.yaml diff --git a/examples/storage/hpccacheaccesspolicy.yaml b/examples/storage/v1beta1/hpccacheaccesspolicy.yaml similarity index 100% rename from examples/storage/hpccacheaccesspolicy.yaml rename to examples/storage/v1beta1/hpccacheaccesspolicy.yaml diff --git a/examples/storage/hpccacheblobnfstarget.yaml b/examples/storage/v1beta1/hpccacheblobnfstarget.yaml similarity index 100% rename from examples/storage/hpccacheblobnfstarget.yaml rename to examples/storage/v1beta1/hpccacheblobnfstarget.yaml diff --git a/examples/storage/hpccacheblobtarget.yaml b/examples/storage/v1beta1/hpccacheblobtarget.yaml similarity index 100% rename from examples/storage/hpccacheblobtarget.yaml rename to examples/storage/v1beta1/hpccacheblobtarget.yaml diff --git a/examples/storage/hpccachenfstarget.yaml b/examples/storage/v1beta1/hpccachenfstarget.yaml similarity index 100% rename from examples/storage/hpccachenfstarget.yaml rename to examples/storage/v1beta1/hpccachenfstarget.yaml diff --git a/examples/storage/managementpolicy.yaml b/examples/storage/v1beta1/managementpolicy.yaml similarity index 100% rename from examples/storage/managementpolicy.yaml rename to examples/storage/v1beta1/managementpolicy.yaml diff --git a/examples/storage/objectreplication.yaml b/examples/storage/v1beta1/objectreplication.yaml similarity index 100% rename from examples/storage/objectreplication.yaml rename to examples/storage/v1beta1/objectreplication.yaml diff --git a/examples/storage/queue.yaml b/examples/storage/v1beta1/queue.yaml similarity index 100% rename from examples/storage/queue.yaml rename to examples/storage/v1beta1/queue.yaml diff --git a/examples/storage/share.yaml b/examples/storage/v1beta1/share.yaml similarity index 100% rename from examples/storage/share.yaml rename to examples/storage/v1beta1/share.yaml diff --git a/examples/storage/sharedirectory.yaml b/examples/storage/v1beta1/sharedirectory.yaml similarity index 100% rename from examples/storage/sharedirectory.yaml rename to examples/storage/v1beta1/sharedirectory.yaml diff --git a/examples/storage/sharefile.yaml b/examples/storage/v1beta1/sharefile.yaml similarity index 100% rename from examples/storage/sharefile.yaml rename to examples/storage/v1beta1/sharefile.yaml diff --git a/examples/storage/storagesync.yaml b/examples/storage/v1beta1/storagesync.yaml similarity index 100% rename from examples/storage/storagesync.yaml rename to examples/storage/v1beta1/storagesync.yaml diff --git a/examples/storage/table.yaml b/examples/storage/v1beta1/table.yaml similarity index 100% rename from examples/storage/table.yaml rename to examples/storage/v1beta1/table.yaml diff --git a/examples/storage/tableentity.yaml b/examples/storage/v1beta1/tableentity.yaml similarity index 100% rename from examples/storage/tableentity.yaml rename to examples/storage/v1beta1/tableentity.yaml diff --git a/examples/storagepool/diskpool.yaml b/examples/storagepool/v1beta1/diskpool.yaml similarity index 100% rename from examples/storagepool/diskpool.yaml rename to examples/storagepool/v1beta1/diskpool.yaml diff --git a/examples/storeconfig/vault.yaml b/examples/storeconfig/v1beta1/vault.yaml similarity index 100% rename from examples/storeconfig/vault.yaml rename to examples/storeconfig/v1beta1/vault.yaml diff --git a/examples/streamanalytics/analyticscluster.yaml b/examples/streamanalytics/v1beta1/analyticscluster.yaml similarity index 100% rename from examples/streamanalytics/analyticscluster.yaml rename to examples/streamanalytics/v1beta1/analyticscluster.yaml diff --git a/examples/streamanalytics/analyticsfunctionjavascriptuda.yaml b/examples/streamanalytics/v1beta1/analyticsfunctionjavascriptuda.yaml similarity index 100% rename from examples/streamanalytics/analyticsfunctionjavascriptuda.yaml rename to examples/streamanalytics/v1beta1/analyticsfunctionjavascriptuda.yaml diff --git a/examples/streamanalytics/analyticsmanagedprivateendpoint.yaml b/examples/streamanalytics/v1beta1/analyticsmanagedprivateendpoint.yaml similarity index 100% rename from examples/streamanalytics/analyticsmanagedprivateendpoint.yaml rename to examples/streamanalytics/v1beta1/analyticsmanagedprivateendpoint.yaml diff --git a/examples/streamanalytics/analyticsoutputfunction.yaml b/examples/streamanalytics/v1beta1/analyticsoutputfunction.yaml similarity index 100% rename from examples/streamanalytics/analyticsoutputfunction.yaml rename to examples/streamanalytics/v1beta1/analyticsoutputfunction.yaml diff --git a/examples/streamanalytics/analyticsoutputsynapse.yaml b/examples/streamanalytics/v1beta1/analyticsoutputsynapse.yaml similarity index 97% rename from examples/streamanalytics/analyticsoutputsynapse.yaml rename to examples/streamanalytics/v1beta1/analyticsoutputsynapse.yaml index 77a25253f..35d6ca0e7 100644 --- a/examples/streamanalytics/analyticsoutputsynapse.yaml +++ b/examples/streamanalytics/v1beta1/analyticsoutputsynapse.yaml @@ -33,7 +33,7 @@ metadata: name: example-synapse-workspace namespace: upbound-system annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-outputSynapse.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-outputSynapse.sh type: Opaque stringData: example-key: dXBib3VuZHJvY2tz diff --git a/examples/streamanalytics/job.yaml b/examples/streamanalytics/v1beta1/job.yaml similarity index 100% rename from examples/streamanalytics/job.yaml rename to examples/streamanalytics/v1beta1/job.yaml diff --git a/examples/streamanalytics/outputblob.yaml b/examples/streamanalytics/v1beta1/outputblob.yaml similarity index 100% rename from examples/streamanalytics/outputblob.yaml rename to examples/streamanalytics/v1beta1/outputblob.yaml diff --git a/examples/streamanalytics/outputeventhub.yaml b/examples/streamanalytics/v1beta1/outputeventhub.yaml similarity index 100% rename from examples/streamanalytics/outputeventhub.yaml rename to examples/streamanalytics/v1beta1/outputeventhub.yaml diff --git a/examples/streamanalytics/outputmssql.yaml b/examples/streamanalytics/v1beta1/outputmssql.yaml similarity index 97% rename from examples/streamanalytics/outputmssql.yaml rename to examples/streamanalytics/v1beta1/outputmssql.yaml index d41e83b42..1740243b7 100644 --- a/examples/streamanalytics/outputmssql.yaml +++ b/examples/streamanalytics/v1beta1/outputmssql.yaml @@ -107,7 +107,7 @@ metadata: name: example-sql-server namespace: upbound-system annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-mysql-assiciation.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-mysql-assiciation.sh type: Opaque data: password: dGVzdFBhc3N3b3JkITEyMw== diff --git a/examples/streamanalytics/outputpowerbi.yaml b/examples/streamanalytics/v1beta1/outputpowerbi.yaml similarity index 100% rename from examples/streamanalytics/outputpowerbi.yaml rename to examples/streamanalytics/v1beta1/outputpowerbi.yaml diff --git a/examples/streamanalytics/outputservicebusqueue.yaml b/examples/streamanalytics/v1beta1/outputservicebusqueue.yaml similarity index 98% rename from examples/streamanalytics/outputservicebusqueue.yaml rename to examples/streamanalytics/v1beta1/outputservicebusqueue.yaml index 8a4a430fd..f69e96a09 100644 --- a/examples/streamanalytics/outputservicebusqueue.yaml +++ b/examples/streamanalytics/v1beta1/outputservicebusqueue.yaml @@ -45,7 +45,7 @@ metadata: namespace: upbound-system annotations: upjet.upbound.io/manual-intervention: "The dependent is a long-running resource and has manual-intervention annotation." - uptest.upbound.io/pre-delete-hook: testhooks/delete-queue.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-queue.sh type: Opaque data: default_primary_key: key1 diff --git a/examples/streamanalytics/outputservicebustopic.yaml b/examples/streamanalytics/v1beta1/outputservicebustopic.yaml similarity index 98% rename from examples/streamanalytics/outputservicebustopic.yaml rename to examples/streamanalytics/v1beta1/outputservicebustopic.yaml index dffcd6e9b..85dbf4032 100644 --- a/examples/streamanalytics/outputservicebustopic.yaml +++ b/examples/streamanalytics/v1beta1/outputservicebustopic.yaml @@ -48,7 +48,7 @@ metadata: namespace: upbound-system annotations: upjet.upbound.io/manual-intervention: "The dependent is a long-running resource and has manual-intervention annotation." - uptest.upbound.io/pre-delete-hook: testhooks/delete-topic.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-topic.sh type: Opaque data: default_primary_key: key1 diff --git a/examples/streamanalytics/outputtable.yaml b/examples/streamanalytics/v1beta1/outputtable.yaml similarity index 100% rename from examples/streamanalytics/outputtable.yaml rename to examples/streamanalytics/v1beta1/outputtable.yaml diff --git a/examples/streamanalytics/referenceinputblob.yaml b/examples/streamanalytics/v1beta1/referenceinputblob.yaml similarity index 97% rename from examples/streamanalytics/referenceinputblob.yaml rename to examples/streamanalytics/v1beta1/referenceinputblob.yaml index ec90cf364..24e485353 100644 --- a/examples/streamanalytics/referenceinputblob.yaml +++ b/examples/streamanalytics/v1beta1/referenceinputblob.yaml @@ -44,7 +44,7 @@ metadata: name: example-storage-account namespace: upbound-system annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-reference.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-reference.sh type: Opaque data: primary_access_key: key1 diff --git a/examples/streamanalytics/referenceinputmssql.yaml b/examples/streamanalytics/v1beta1/referenceinputmssql.yaml similarity index 100% rename from examples/streamanalytics/referenceinputmssql.yaml rename to examples/streamanalytics/v1beta1/referenceinputmssql.yaml diff --git a/examples/streamanalytics/streaminputblob.yaml b/examples/streamanalytics/v1beta1/streaminputblob.yaml similarity index 97% rename from examples/streamanalytics/streaminputblob.yaml rename to examples/streamanalytics/v1beta1/streaminputblob.yaml index c8df3d8fa..4874ce588 100644 --- a/examples/streamanalytics/streaminputblob.yaml +++ b/examples/streamanalytics/v1beta1/streaminputblob.yaml @@ -94,7 +94,7 @@ metadata: name: example-storage-account namespace: upbound-system annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-stream.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-stream.sh type: Opaque data: primary_access_key: key1 diff --git a/examples/streamanalytics/streaminputeventhub.yaml b/examples/streamanalytics/v1beta1/streaminputeventhub.yaml similarity index 98% rename from examples/streamanalytics/streaminputeventhub.yaml rename to examples/streamanalytics/v1beta1/streaminputeventhub.yaml index 51d158f84..9ca8afa2c 100644 --- a/examples/streamanalytics/streaminputeventhub.yaml +++ b/examples/streamanalytics/v1beta1/streaminputeventhub.yaml @@ -45,7 +45,7 @@ metadata: name: example-eventhub-namespace namespace: upbound-system annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-srteam-hub.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-srteam-hub.sh type: Opaque data: default_primary_key: key1 diff --git a/examples/streamanalytics/streaminputiothub.yaml b/examples/streamanalytics/v1beta1/streaminputiothub.yaml similarity index 98% rename from examples/streamanalytics/streaminputiothub.yaml rename to examples/streamanalytics/v1beta1/streaminputiothub.yaml index 657565e40..51bd995c0 100644 --- a/examples/streamanalytics/streaminputiothub.yaml +++ b/examples/streamanalytics/v1beta1/streaminputiothub.yaml @@ -43,7 +43,7 @@ metadata: name: example-iothub namespace: upbound-system annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-iot-hub.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-iot-hub.sh type: Opaque data: primary_key: key1 diff --git a/examples/synapse/firewallrule.yaml b/examples/synapse/v1beta1/firewallrule.yaml similarity index 100% rename from examples/synapse/firewallrule.yaml rename to examples/synapse/v1beta1/firewallrule.yaml diff --git a/examples/synapse/integrationruntimeazure.yaml b/examples/synapse/v1beta1/integrationruntimeazure.yaml similarity index 100% rename from examples/synapse/integrationruntimeazure.yaml rename to examples/synapse/v1beta1/integrationruntimeazure.yaml diff --git a/examples/synapse/integrationruntimeselfhosted.yaml b/examples/synapse/v1beta1/integrationruntimeselfhosted.yaml similarity index 100% rename from examples/synapse/integrationruntimeselfhosted.yaml rename to examples/synapse/v1beta1/integrationruntimeselfhosted.yaml diff --git a/examples/synapse/linkedservice.yaml b/examples/synapse/v1beta1/linkedservice.yaml similarity index 100% rename from examples/synapse/linkedservice.yaml rename to examples/synapse/v1beta1/linkedservice.yaml diff --git a/examples/synapse/managedprivateendpoint.yaml b/examples/synapse/v1beta1/managedprivateendpoint.yaml similarity index 100% rename from examples/synapse/managedprivateendpoint.yaml rename to examples/synapse/v1beta1/managedprivateendpoint.yaml diff --git a/examples/synapse/privatelinkhub.yaml b/examples/synapse/v1beta1/privatelinkhub.yaml similarity index 100% rename from examples/synapse/privatelinkhub.yaml rename to examples/synapse/v1beta1/privatelinkhub.yaml diff --git a/examples/synapse/roleassignment.yaml b/examples/synapse/v1beta1/roleassignment.yaml similarity index 100% rename from examples/synapse/roleassignment.yaml rename to examples/synapse/v1beta1/roleassignment.yaml diff --git a/examples/synapse/sparkpool.yaml b/examples/synapse/v1beta1/sparkpool.yaml similarity index 100% rename from examples/synapse/sparkpool.yaml rename to examples/synapse/v1beta1/sparkpool.yaml diff --git a/examples/synapse/sqlpool.yaml b/examples/synapse/v1beta1/sqlpool.yaml similarity index 100% rename from examples/synapse/sqlpool.yaml rename to examples/synapse/v1beta1/sqlpool.yaml diff --git a/examples/synapse/sqlpoolextendedauditingpolicy.yaml b/examples/synapse/v1beta1/sqlpoolextendedauditingpolicy.yaml similarity index 100% rename from examples/synapse/sqlpoolextendedauditingpolicy.yaml rename to examples/synapse/v1beta1/sqlpoolextendedauditingpolicy.yaml diff --git a/examples/synapse/sqlpoolsecurityalertpolicy.yaml b/examples/synapse/v1beta1/sqlpoolsecurityalertpolicy.yaml similarity index 100% rename from examples/synapse/sqlpoolsecurityalertpolicy.yaml rename to examples/synapse/v1beta1/sqlpoolsecurityalertpolicy.yaml diff --git a/examples/synapse/sqlpoolworkloadclassifier.yaml b/examples/synapse/v1beta1/sqlpoolworkloadclassifier.yaml similarity index 100% rename from examples/synapse/sqlpoolworkloadclassifier.yaml rename to examples/synapse/v1beta1/sqlpoolworkloadclassifier.yaml diff --git a/examples/synapse/sqlpoolworkloadgroup.yaml b/examples/synapse/v1beta1/sqlpoolworkloadgroup.yaml similarity index 100% rename from examples/synapse/sqlpoolworkloadgroup.yaml rename to examples/synapse/v1beta1/sqlpoolworkloadgroup.yaml diff --git a/examples/synapse/workspace.yaml b/examples/synapse/v1beta1/workspace.yaml similarity index 100% rename from examples/synapse/workspace.yaml rename to examples/synapse/v1beta1/workspace.yaml diff --git a/examples/synapse/workspaceaadadmin.yaml b/examples/synapse/v1beta1/workspaceaadadmin.yaml similarity index 100% rename from examples/synapse/workspaceaadadmin.yaml rename to examples/synapse/v1beta1/workspaceaadadmin.yaml diff --git a/examples/synapse/workspaceextendedauditingpolicy.yaml b/examples/synapse/v1beta1/workspaceextendedauditingpolicy.yaml similarity index 100% rename from examples/synapse/workspaceextendedauditingpolicy.yaml rename to examples/synapse/v1beta1/workspaceextendedauditingpolicy.yaml diff --git a/examples/synapse/workspacesecurityalertpolicy.yaml b/examples/synapse/v1beta1/workspacesecurityalertpolicy.yaml similarity index 100% rename from examples/synapse/workspacesecurityalertpolicy.yaml rename to examples/synapse/v1beta1/workspacesecurityalertpolicy.yaml diff --git a/examples/synapse/workspacesqlaadadmin.yaml b/examples/synapse/v1beta1/workspacesqlaadadmin.yaml similarity index 100% rename from examples/synapse/workspacesqlaadadmin.yaml rename to examples/synapse/v1beta1/workspacesqlaadadmin.yaml diff --git a/examples/synapse/workspacevulnerabilityassessment.yaml b/examples/synapse/v1beta1/workspacevulnerabilityassessment.yaml similarity index 100% rename from examples/synapse/workspacevulnerabilityassessment.yaml rename to examples/synapse/v1beta1/workspacevulnerabilityassessment.yaml diff --git a/examples/timeseriesinsights/eventsourceeventhub.yaml b/examples/timeseriesinsights/v1beta1/eventsourceeventhub.yaml similarity index 100% rename from examples/timeseriesinsights/eventsourceeventhub.yaml rename to examples/timeseriesinsights/v1beta1/eventsourceeventhub.yaml diff --git a/examples/timeseriesinsights/eventsourceiothub.yaml b/examples/timeseriesinsights/v1beta1/eventsourceiothub.yaml similarity index 100% rename from examples/timeseriesinsights/eventsourceiothub.yaml rename to examples/timeseriesinsights/v1beta1/eventsourceiothub.yaml diff --git a/examples/timeseriesinsights/gen2environment.yaml b/examples/timeseriesinsights/v1beta1/gen2environment.yaml similarity index 100% rename from examples/timeseriesinsights/gen2environment.yaml rename to examples/timeseriesinsights/v1beta1/gen2environment.yaml diff --git a/examples/timeseriesinsights/referencedataset.yaml b/examples/timeseriesinsights/v1beta1/referencedataset.yaml similarity index 100% rename from examples/timeseriesinsights/referencedataset.yaml rename to examples/timeseriesinsights/v1beta1/referencedataset.yaml diff --git a/examples/timeseriesinsights/standardenvironment.yaml b/examples/timeseriesinsights/v1beta1/standardenvironment.yaml similarity index 100% rename from examples/timeseriesinsights/standardenvironment.yaml rename to examples/timeseriesinsights/v1beta1/standardenvironment.yaml diff --git a/examples/web/appactiveslot.yaml b/examples/web/v1beta1/appactiveslot.yaml similarity index 100% rename from examples/web/appactiveslot.yaml rename to examples/web/v1beta1/appactiveslot.yaml diff --git a/examples/web/apphybridconnection.yaml b/examples/web/v1beta1/apphybridconnection.yaml similarity index 100% rename from examples/web/apphybridconnection.yaml rename to examples/web/v1beta1/apphybridconnection.yaml diff --git a/examples/web/appserviceplan.yaml b/examples/web/v1beta1/appserviceplan.yaml similarity index 100% rename from examples/web/appserviceplan.yaml rename to examples/web/v1beta1/appserviceplan.yaml diff --git a/examples/web/functionapp.yaml b/examples/web/v1beta1/functionapp.yaml similarity index 96% rename from examples/web/functionapp.yaml rename to examples/web/v1beta1/functionapp.yaml index 54c733ae3..c78b8a4ed 100644 --- a/examples/web/functionapp.yaml +++ b/examples/web/v1beta1/functionapp.yaml @@ -36,7 +36,7 @@ apiVersion: web.azure.upbound.io/v1beta1 kind: AppServicePlan metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-function-app.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-function-app.sh meta.upbound.io/example-id: web/v1beta1/functionapp labels: testing.upbound.io/example-name: functionapp-sp diff --git a/examples/web/functionappactiveslot.yaml b/examples/web/v1beta1/functionappactiveslot.yaml similarity index 97% rename from examples/web/functionappactiveslot.yaml rename to examples/web/v1beta1/functionappactiveslot.yaml index 8c9a62983..56946fb57 100644 --- a/examples/web/functionappactiveslot.yaml +++ b/examples/web/v1beta1/functionappactiveslot.yaml @@ -83,7 +83,7 @@ apiVersion: web.azure.upbound.io/v1beta1 kind: ServicePlan metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-linux-func-app.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-linux-func-app.sh meta.upbound.io/example-id: web/v1beta1/functionappactiveslot labels: testing.upbound.io/example-name: funcappactiveslot-sp diff --git a/examples/web/functionappfunction.yaml b/examples/web/v1beta1/functionappfunction.yaml similarity index 97% rename from examples/web/functionappfunction.yaml rename to examples/web/v1beta1/functionappfunction.yaml index fb986ac24..a8fdba1da 100644 --- a/examples/web/functionappfunction.yaml +++ b/examples/web/v1beta1/functionappfunction.yaml @@ -93,7 +93,7 @@ apiVersion: web.azure.upbound.io/v1beta1 kind: ServicePlan metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-linux-func-app.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-linux-func-app.sh meta.upbound.io/example-id: web/v1beta1/functionappfunction labels: testing.upbound.io/example-name: funcappfunction-sp diff --git a/examples/web/functionapphybridconnection.yaml b/examples/web/v1beta1/functionapphybridconnection.yaml similarity index 100% rename from examples/web/functionapphybridconnection.yaml rename to examples/web/v1beta1/functionapphybridconnection.yaml diff --git a/examples/web/functionappslot.yaml b/examples/web/v1beta1/functionappslot.yaml similarity index 97% rename from examples/web/functionappslot.yaml rename to examples/web/v1beta1/functionappslot.yaml index 1bed6ea64..ad798032c 100644 --- a/examples/web/functionappslot.yaml +++ b/examples/web/v1beta1/functionappslot.yaml @@ -38,7 +38,7 @@ apiVersion: web.azure.upbound.io/v1beta1 kind: AppServicePlan metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-function-app.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-function-app.sh meta.upbound.io/example-id: web/v1beta1/functionappslot labels: testing.upbound.io/example-name: funcappslot-sp diff --git a/examples/web/linuxfunctionapp.yaml b/examples/web/v1beta1/linuxfunctionapp.yaml similarity index 96% rename from examples/web/linuxfunctionapp.yaml rename to examples/web/v1beta1/linuxfunctionapp.yaml index 49de0bd75..46f2bb4bf 100644 --- a/examples/web/linuxfunctionapp.yaml +++ b/examples/web/v1beta1/linuxfunctionapp.yaml @@ -50,7 +50,7 @@ apiVersion: web.azure.upbound.io/v1beta1 kind: ServicePlan metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-linux-func-app.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-linux-func-app.sh meta.upbound.io/example-id: web/v1beta1/linuxfunctionapp labels: testing.upbound.io/example-name: linuxfuncapp-sp diff --git a/examples/web/linuxfunctionappslot.yaml b/examples/web/v1beta1/linuxfunctionappslot.yaml similarity index 97% rename from examples/web/linuxfunctionappslot.yaml rename to examples/web/v1beta1/linuxfunctionappslot.yaml index 9e1e2dd31..fc96caf73 100644 --- a/examples/web/linuxfunctionappslot.yaml +++ b/examples/web/v1beta1/linuxfunctionappslot.yaml @@ -71,7 +71,7 @@ apiVersion: web.azure.upbound.io/v1beta1 kind: ServicePlan metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-linux-func-app.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-linux-func-app.sh meta.upbound.io/example-id: web/v1beta1/linuxfunctionappslot labels: testing.upbound.io/example-name: linuxfuncnappslot-sp diff --git a/examples/web/linuxwebapp.yaml b/examples/web/v1beta1/linuxwebapp.yaml similarity index 94% rename from examples/web/linuxwebapp.yaml rename to examples/web/v1beta1/linuxwebapp.yaml index 0583b3b99..eedf49048 100644 --- a/examples/web/linuxwebapp.yaml +++ b/examples/web/v1beta1/linuxwebapp.yaml @@ -42,7 +42,7 @@ apiVersion: web.azure.upbound.io/v1beta1 kind: ServicePlan metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-linux-app.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-linux-app.sh meta.upbound.io/example-id: web/v1beta1/linuxwebapp labels: testing.upbound.io/example-name: linuxwebapp-sp diff --git a/examples/web/linuxwebappslot.yaml b/examples/web/v1beta1/linuxwebappslot.yaml similarity index 96% rename from examples/web/linuxwebappslot.yaml rename to examples/web/v1beta1/linuxwebappslot.yaml index 6bb990ffe..fd0835716 100644 --- a/examples/web/linuxwebappslot.yaml +++ b/examples/web/v1beta1/linuxwebappslot.yaml @@ -61,7 +61,7 @@ apiVersion: web.azure.upbound.io/v1beta1 kind: ServicePlan metadata: annotations: - uptest.upbound.io/pre-delete-hook: testhooks/delete-linux-app.sh + uptest.upbound.io/pre-delete-hook: ../testhooks/delete-linux-app.sh meta.upbound.io/example-id: web/v1beta1/linuxwebappslot labels: testing.upbound.io/example-name: linuxwebappslot-sp diff --git a/examples/web/serviceplan.yaml b/examples/web/v1beta1/serviceplan.yaml similarity index 100% rename from examples/web/serviceplan.yaml rename to examples/web/v1beta1/serviceplan.yaml diff --git a/examples/web/sourcecontroltoken.yaml b/examples/web/v1beta1/sourcecontroltoken.yaml similarity index 100% rename from examples/web/sourcecontroltoken.yaml rename to examples/web/v1beta1/sourcecontroltoken.yaml diff --git a/examples/web/staticsite.yaml b/examples/web/v1beta1/staticsite.yaml similarity index 100% rename from examples/web/staticsite.yaml rename to examples/web/v1beta1/staticsite.yaml diff --git a/examples/web/windowsfunctionapp.yaml b/examples/web/v1beta1/windowsfunctionapp.yaml similarity index 100% rename from examples/web/windowsfunctionapp.yaml rename to examples/web/v1beta1/windowsfunctionapp.yaml diff --git a/examples/web/windowsfunctionappslot.yaml b/examples/web/v1beta1/windowsfunctionappslot.yaml similarity index 100% rename from examples/web/windowsfunctionappslot.yaml rename to examples/web/v1beta1/windowsfunctionappslot.yaml diff --git a/examples/web/windowswebapp.yaml b/examples/web/v1beta1/windowswebapp.yaml similarity index 100% rename from examples/web/windowswebapp.yaml rename to examples/web/v1beta1/windowswebapp.yaml diff --git a/examples/web/windowswebappslot.yaml b/examples/web/v1beta1/windowswebappslot.yaml similarity index 100% rename from examples/web/windowswebappslot.yaml rename to examples/web/v1beta1/windowswebappslot.yaml diff --git a/examples/web/v1beta2/appserviceplan.yaml b/examples/web/v1beta2/appserviceplan.yaml new file mode 100644 index 000000000..8684ac20f --- /dev/null +++ b/examples/web/v1beta2/appserviceplan.yaml @@ -0,0 +1,35 @@ +# SPDX-FileCopyrightText: 2024 The Crossplane Authors +# +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: web.azure.upbound.io/v1beta2 +kind: AppServicePlan +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta2/appserviceplan + labels: + testing.upbound.io/example-name: appserviceplansample + name: appserviceplansample +spec: + forProvider: + location: West Europe + resourceGroupNameSelector: + matchLabels: + testing.upbound.io/example-name: appserviceplan-rg + sku: + size: S1 + tier: Standard + +--- + +apiVersion: azure.upbound.io/v1beta1 +kind: ResourceGroup +metadata: + annotations: + meta.upbound.io/example-id: web/v1beta1/appserviceplan + labels: + testing.upbound.io/example-name: appserviceplan-rg + name: appserviceplan-rg +spec: + forProvider: + location: West Europe diff --git a/go.mod b/go.mod index 19824823f..dfc20b8fd 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( dario.cat/mergo v1.0.0 github.com/crossplane/crossplane-runtime v1.16.0-rc.2.0.20240510094504-3f697876fa57 github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79 - github.com/crossplane/upjet v1.3.0 + github.com/crossplane/upjet v1.4.0-rc.0.0.20240515193317-92d1af84d242 github.com/google/go-cmp v0.6.0 github.com/hashicorp/terraform-json v0.17.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 diff --git a/go.sum b/go.sum index 95ce9e43b..23046f96d 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,8 @@ github.com/crossplane/crossplane-runtime v1.16.0-rc.2.0.20240510094504-3f697876f github.com/crossplane/crossplane-runtime v1.16.0-rc.2.0.20240510094504-3f697876fa57/go.mod h1:Pz2tdGVMF6KDGzHZOkvKro0nKc8EzK0sb/nSA7pH4Dc= github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79 h1:HigXs5tEQxWz0fcj8hzbU2UAZgEM7wPe0XRFOsrtF8Y= github.com/crossplane/crossplane-tools v0.0.0-20230925130601-628280f8bf79/go.mod h1:+e4OaFlOcmr0JvINHl/yvEYBrZawzTgj6pQumOH1SS0= -github.com/crossplane/upjet v1.3.0 h1:qRgcfqLz4M2v7enUku3xEriY5poc5XVbRl98nbvvu+E= -github.com/crossplane/upjet v1.3.0/go.mod h1:3pDVtCgyBc5f2Zx4K5HEPxxhjndmOc5CHCJNpIivK/g= +github.com/crossplane/upjet v1.4.0-rc.0.0.20240515193317-92d1af84d242 h1:ylmj67qVNh+AIDK+CH8BiXu41PlGSKBzAwMZApDEOds= +github.com/crossplane/upjet v1.4.0-rc.0.0.20240515193317-92d1af84d242/go.mod h1:3pDVtCgyBc5f2Zx4K5HEPxxhjndmOc5CHCJNpIivK/g= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/dave/jennifer v1.6.0 h1:MQ/6emI2xM7wt0tJzJzyUik2Q3Tcn2eE0vtYgh4GPVI= diff --git a/hack/main.go.tmpl b/hack/main.go.tmpl index c0c0e3d77..32b913e05 100644 --- a/hack/main.go.tmpl +++ b/hack/main.go.tmpl @@ -23,6 +23,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/statemetrics" tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/conversion" "gopkg.in/alecthomas/kingpin.v2" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -216,6 +217,7 @@ func main() { })), "cannot create default store config") } + kingpin.FatalIfError(conversion.RegisterConversions(o.Provider), "Cannot initialize the webhook conversion registry") kingpin.FatalIfError(controller.Setup_{{ .Group }}(mgr, o), "Cannot setup Azure controllers") kingpin.FatalIfError(mgr.Start(ctrl.SetupSignalHandler()), "Cannot start controller manager") } diff --git a/package/crds/alertsmanagement.azure.upbound.io_monitoractionruleactiongroups.yaml b/package/crds/alertsmanagement.azure.upbound.io_monitoractionruleactiongroups.yaml index b4383bad4..e319ec8f4 100644 --- a/package/crds/alertsmanagement.azure.upbound.io_monitoractionruleactiongroups.yaml +++ b/package/crds/alertsmanagement.azure.upbound.io_monitoractionruleactiongroups.yaml @@ -1036,3 +1036,967 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorActionRuleActionGroup is the Schema for the MonitorActionRuleActionGroups + API. Manages an Monitor Action Rule which type is action group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorActionRuleActionGroupSpec defines the desired state + of MonitorActionRuleActionGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + actionGroupId: + description: Specifies the resource id of monitor action group. + type: string + actionGroupIdRef: + description: Reference to a MonitorActionGroup in insights to + populate actionGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + actionGroupIdSelector: + description: Selector for a MonitorActionGroup in insights to + populate actionGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + alertRuleId: + description: A alert_rule_id block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitor: + description: A monitor block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + description: + description: Specifies a description for the Action Rule. + type: string + enabled: + description: Is the Action Rule enabled? Defaults to true. + type: boolean + resourceGroupName: + description: Specifies the name of the resource group in which + the Monitor Action Rule should exist. Changing this forces a + new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scope: + description: A scope block as defined below. + properties: + resourceIds: + description: A list of resource IDs of the given scope type + which will be the target of action rule. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of target scope. Possible + values are ResourceGroup and Resource. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + actionGroupId: + description: Specifies the resource id of monitor action group. + type: string + actionGroupIdRef: + description: Reference to a MonitorActionGroup in insights to + populate actionGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + actionGroupIdSelector: + description: Selector for a MonitorActionGroup in insights to + populate actionGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + alertRuleId: + description: A alert_rule_id block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitor: + description: A monitor block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + description: + description: Specifies a description for the Action Rule. + type: string + enabled: + description: Is the Action Rule enabled? Defaults to true. + type: boolean + scope: + description: A scope block as defined below. + properties: + resourceIds: + description: A list of resource IDs of the given scope type + which will be the target of action rule. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of target scope. Possible + values are ResourceGroup and Resource. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MonitorActionRuleActionGroupStatus defines the observed state + of MonitorActionRuleActionGroup. + properties: + atProvider: + properties: + actionGroupId: + description: Specifies the resource id of monitor action group. + type: string + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + alertRuleId: + description: A alert_rule_id block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitor: + description: A monitor block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + description: + description: Specifies a description for the Action Rule. + type: string + enabled: + description: Is the Action Rule enabled? Defaults to true. + type: boolean + id: + description: The ID of the Monitor Action Rule. + type: string + resourceGroupName: + description: Specifies the name of the resource group in which + the Monitor Action Rule should exist. Changing this forces a + new resource to be created. + type: string + scope: + description: A scope block as defined below. + properties: + resourceIds: + description: A list of resource IDs of the given scope type + which will be the target of action rule. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of target scope. Possible + values are ResourceGroup and Resource. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/alertsmanagement.azure.upbound.io_monitoractionrulesuppressions.yaml b/package/crds/alertsmanagement.azure.upbound.io_monitoractionrulesuppressions.yaml index 37cfd7a83..170ee3ad3 100644 --- a/package/crds/alertsmanagement.azure.upbound.io_monitoractionrulesuppressions.yaml +++ b/package/crds/alertsmanagement.azure.upbound.io_monitoractionrulesuppressions.yaml @@ -1003,3 +1003,922 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorActionRuleSuppression is the Schema for the MonitorActionRuleSuppressions + API. Manages an Monitor Action Rule which type is suppression. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorActionRuleSuppressionSpec defines the desired state + of MonitorActionRuleSuppression + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + alertRuleId: + description: A alert_rule_id block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitor: + description: A monitor block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + description: + description: Specifies a description for the Action Rule. + type: string + enabled: + description: Is the Action Rule enabled? Defaults to true. + type: boolean + resourceGroupName: + description: Specifies the name of the resource group in which + the Monitor Action Rule should exist. Changing this forces a + new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scope: + description: A scope block as defined below. + properties: + resourceIds: + description: A list of resource IDs of the given scope type + which will be the target of action rule. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of target scope. Possible + values are ResourceGroup and Resource. + type: string + type: object + suppression: + description: A suppression block as defined below. + properties: + recurrenceType: + description: Specifies the type of suppression. Possible values + are Always, Daily, Monthly, Once, and Weekly. + type: string + schedule: + description: A schedule block as defined below. Required if + recurrence_type is Daily, Monthly, Once or Weekly. + properties: + endDateUtc: + description: specifies the recurrence UTC end datetime + (Y-m-d'T'H:M:S'Z'). + type: string + recurrenceMonthly: + description: specifies the list of dayOfMonth to recurrence. + Possible values are between 1 - 31. Required if recurrence_type + is Monthly. + items: + type: number + type: array + x-kubernetes-list-type: set + recurrenceWeekly: + description: specifies the list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday and Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + startDateUtc: + description: specifies the recurrence UTC start datetime + (Y-m-d'T'H:M:S'Z'). + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + alertRuleId: + description: A alert_rule_id block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitor: + description: A monitor block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + description: + description: Specifies a description for the Action Rule. + type: string + enabled: + description: Is the Action Rule enabled? Defaults to true. + type: boolean + scope: + description: A scope block as defined below. + properties: + resourceIds: + description: A list of resource IDs of the given scope type + which will be the target of action rule. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of target scope. Possible + values are ResourceGroup and Resource. + type: string + type: object + suppression: + description: A suppression block as defined below. + properties: + recurrenceType: + description: Specifies the type of suppression. Possible values + are Always, Daily, Monthly, Once, and Weekly. + type: string + schedule: + description: A schedule block as defined below. Required if + recurrence_type is Daily, Monthly, Once or Weekly. + properties: + endDateUtc: + description: specifies the recurrence UTC end datetime + (Y-m-d'T'H:M:S'Z'). + type: string + recurrenceMonthly: + description: specifies the list of dayOfMonth to recurrence. + Possible values are between 1 - 31. Required if recurrence_type + is Monthly. + items: + type: number + type: array + x-kubernetes-list-type: set + recurrenceWeekly: + description: specifies the list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday and Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + startDateUtc: + description: specifies the recurrence UTC start datetime + (Y-m-d'T'H:M:S'Z'). + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.suppression is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.suppression) + || (has(self.initProvider) && has(self.initProvider.suppression))' + status: + description: MonitorActionRuleSuppressionStatus defines the observed state + of MonitorActionRuleSuppression. + properties: + atProvider: + properties: + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + alertRuleId: + description: A alert_rule_id block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitor: + description: A monitor block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals and NotEquals. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + description: + description: Specifies a description for the Action Rule. + type: string + enabled: + description: Is the Action Rule enabled? Defaults to true. + type: boolean + id: + description: The ID of the Monitor Action Rule. + type: string + resourceGroupName: + description: Specifies the name of the resource group in which + the Monitor Action Rule should exist. Changing this forces a + new resource to be created. + type: string + scope: + description: A scope block as defined below. + properties: + resourceIds: + description: A list of resource IDs of the given scope type + which will be the target of action rule. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of target scope. Possible + values are ResourceGroup and Resource. + type: string + type: object + suppression: + description: A suppression block as defined below. + properties: + recurrenceType: + description: Specifies the type of suppression. Possible values + are Always, Daily, Monthly, Once, and Weekly. + type: string + schedule: + description: A schedule block as defined below. Required if + recurrence_type is Daily, Monthly, Once or Weekly. + properties: + endDateUtc: + description: specifies the recurrence UTC end datetime + (Y-m-d'T'H:M:S'Z'). + type: string + recurrenceMonthly: + description: specifies the list of dayOfMonth to recurrence. + Possible values are between 1 - 31. Required if recurrence_type + is Monthly. + items: + type: number + type: array + x-kubernetes-list-type: set + recurrenceWeekly: + description: specifies the list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday and Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + startDateUtc: + description: specifies the recurrence UTC start datetime + (Y-m-d'T'H:M:S'Z'). + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/alertsmanagement.azure.upbound.io_monitoralertprocessingruleactiongroups.yaml b/package/crds/alertsmanagement.azure.upbound.io_monitoralertprocessingruleactiongroups.yaml index 2d41def4f..4086a8754 100644 --- a/package/crds/alertsmanagement.azure.upbound.io_monitoralertprocessingruleactiongroups.yaml +++ b/package/crds/alertsmanagement.azure.upbound.io_monitoralertprocessingruleactiongroups.yaml @@ -1638,3 +1638,1485 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorAlertProcessingRuleActionGroup is the Schema for the MonitorAlertProcessingRuleActionGroups + API. Manages an Alert Processing Rule which apply action group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorAlertProcessingRuleActionGroupSpec defines the desired + state of MonitorAlertProcessingRuleActionGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addActionGroupIds: + description: Specifies a list of Action Group IDs. + items: + type: string + type: array + addActionGroupIdsRefs: + description: References to MonitorActionGroup in insights to populate + addActionGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + addActionGroupIdsSelector: + description: Selector for a list of MonitorActionGroup in insights + to populate addActionGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleId: + description: A alert_rule_id block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleName: + description: A alert_rule_name block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorCondition: + description: A monitor_condition block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + signalType: + description: A signal_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResource: + description: A target_resource block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceGroup: + description: A target_resource_group block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + type: object + description: + description: Specifies a description for the Alert Processing + Rule. + type: string + enabled: + description: Should the Alert Processing Rule be enabled? Defaults + to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Alert Processing + Rule should exist. Changing this forces a new Alert Processing + Rule to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + schedule: + description: A schedule block as defined below. + properties: + effectiveFrom: + description: Specifies the Alert Processing Rule effective + start time (Y-m-d'T'H:M:S). + type: string + effectiveUntil: + description: Specifies the Alert Processing Rule effective + end time (Y-m-d'T'H:M:S). + type: string + recurrence: + description: A recurrence block as defined above. + properties: + daily: + description: One or more daily blocks as defined above. + items: + properties: + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + monthly: + description: One or more monthly blocks as defined above. + items: + properties: + daysOfMonth: + description: Specifies a list of dayOfMonth to recurrence. + Possible values are integers between 1 - 31. + items: + type: number + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + weekly: + description: One or more weekly blocks as defined below. + items: + properties: + daysOfWeek: + description: Specifies a list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday, and Saturday. + items: + type: string + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + type: object + timeZone: + description: The time zone (e.g. Pacific Standard time, Eastern + Standard Time). Defaults to UTC. possible values are defined + here. + type: string + type: object + scopes: + description: A list of resource IDs which will be the target of + alert processing rule. + items: + type: string + type: array + scopesRefs: + description: References to ResourceGroup in azure to populate + scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of ResourceGroup in azure to + populate scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Alert Processing Rule. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addActionGroupIds: + description: Specifies a list of Action Group IDs. + items: + type: string + type: array + addActionGroupIdsRefs: + description: References to MonitorActionGroup in insights to populate + addActionGroupIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + addActionGroupIdsSelector: + description: Selector for a list of MonitorActionGroup in insights + to populate addActionGroupIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleId: + description: A alert_rule_id block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleName: + description: A alert_rule_name block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorCondition: + description: A monitor_condition block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + signalType: + description: A signal_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResource: + description: A target_resource block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceGroup: + description: A target_resource_group block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + type: object + description: + description: Specifies a description for the Alert Processing + Rule. + type: string + enabled: + description: Should the Alert Processing Rule be enabled? Defaults + to true. + type: boolean + schedule: + description: A schedule block as defined below. + properties: + effectiveFrom: + description: Specifies the Alert Processing Rule effective + start time (Y-m-d'T'H:M:S). + type: string + effectiveUntil: + description: Specifies the Alert Processing Rule effective + end time (Y-m-d'T'H:M:S). + type: string + recurrence: + description: A recurrence block as defined above. + properties: + daily: + description: One or more daily blocks as defined above. + items: + properties: + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + monthly: + description: One or more monthly blocks as defined above. + items: + properties: + daysOfMonth: + description: Specifies a list of dayOfMonth to recurrence. + Possible values are integers between 1 - 31. + items: + type: number + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + weekly: + description: One or more weekly blocks as defined below. + items: + properties: + daysOfWeek: + description: Specifies a list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday, and Saturday. + items: + type: string + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + type: object + timeZone: + description: The time zone (e.g. Pacific Standard time, Eastern + Standard Time). Defaults to UTC. possible values are defined + here. + type: string + type: object + scopes: + description: A list of resource IDs which will be the target of + alert processing rule. + items: + type: string + type: array + scopesRefs: + description: References to ResourceGroup in azure to populate + scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of ResourceGroup in azure to + populate scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Alert Processing Rule. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MonitorAlertProcessingRuleActionGroupStatus defines the observed + state of MonitorAlertProcessingRuleActionGroup. + properties: + atProvider: + properties: + addActionGroupIds: + description: Specifies a list of Action Group IDs. + items: + type: string + type: array + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleId: + description: A alert_rule_id block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleName: + description: A alert_rule_name block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorCondition: + description: A monitor_condition block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + signalType: + description: A signal_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResource: + description: A target_resource block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceGroup: + description: A target_resource_group block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + type: object + description: + description: Specifies a description for the Alert Processing + Rule. + type: string + enabled: + description: Should the Alert Processing Rule be enabled? Defaults + to true. + type: boolean + id: + description: The ID of the Alert Processing Rule. + type: string + resourceGroupName: + description: The name of the Resource Group where the Alert Processing + Rule should exist. Changing this forces a new Alert Processing + Rule to be created. + type: string + schedule: + description: A schedule block as defined below. + properties: + effectiveFrom: + description: Specifies the Alert Processing Rule effective + start time (Y-m-d'T'H:M:S). + type: string + effectiveUntil: + description: Specifies the Alert Processing Rule effective + end time (Y-m-d'T'H:M:S). + type: string + recurrence: + description: A recurrence block as defined above. + properties: + daily: + description: One or more daily blocks as defined above. + items: + properties: + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + monthly: + description: One or more monthly blocks as defined above. + items: + properties: + daysOfMonth: + description: Specifies a list of dayOfMonth to recurrence. + Possible values are integers between 1 - 31. + items: + type: number + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + weekly: + description: One or more weekly blocks as defined below. + items: + properties: + daysOfWeek: + description: Specifies a list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday, and Saturday. + items: + type: string + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + type: object + timeZone: + description: The time zone (e.g. Pacific Standard time, Eastern + Standard Time). Defaults to UTC. possible values are defined + here. + type: string + type: object + scopes: + description: A list of resource IDs which will be the target of + alert processing rule. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Alert Processing Rule. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/alertsmanagement.azure.upbound.io_monitoralertprocessingrulesuppressions.yaml b/package/crds/alertsmanagement.azure.upbound.io_monitoralertprocessingrulesuppressions.yaml index d628e631c..96ec77ad4 100644 --- a/package/crds/alertsmanagement.azure.upbound.io_monitoralertprocessingrulesuppressions.yaml +++ b/package/crds/alertsmanagement.azure.upbound.io_monitoralertprocessingrulesuppressions.yaml @@ -1465,3 +1465,1312 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorAlertProcessingRuleSuppression is the Schema for the MonitorAlertProcessingRuleSuppressions + API. Manages an Alert Processing Rule which suppress notifications. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorAlertProcessingRuleSuppressionSpec defines the desired + state of MonitorAlertProcessingRuleSuppression + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleId: + description: A alert_rule_id block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleName: + description: A alert_rule_name block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorCondition: + description: A monitor_condition block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + signalType: + description: A signal_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResource: + description: A target_resource block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceGroup: + description: A target_resource_group block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + type: object + description: + description: Specifies a description for the Alert Processing + Rule. + type: string + enabled: + description: Should the Alert Processing Rule be enabled? Defaults + to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Alert Processing + Rule should exist. Changing this forces a new Alert Processing + Rule to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + schedule: + description: A schedule block as defined below. + properties: + effectiveFrom: + description: Specifies the Alert Processing Rule effective + start time (Y-m-d'T'H:M:S). + type: string + effectiveUntil: + description: Specifies the Alert Processing Rule effective + end time (Y-m-d'T'H:M:S). + type: string + recurrence: + description: A recurrence block as defined above. + properties: + daily: + description: One or more daily blocks as defined above. + items: + properties: + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + monthly: + description: One or more monthly blocks as defined above. + items: + properties: + daysOfMonth: + description: Specifies a list of dayOfMonth to recurrence. + Possible values are integers between 1 - 31. + items: + type: number + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + weekly: + description: One or more weekly blocks as defined below. + items: + properties: + daysOfWeek: + description: Specifies a list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday, and Saturday. + items: + type: string + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + type: object + timeZone: + description: The time zone (e.g. Pacific Standard time, Eastern + Standard Time). Defaults to UTC. possible values are defined + here. + type: string + type: object + scopes: + description: A list of resource IDs which will be the target of + Alert Processing Rule. + items: + type: string + type: array + scopesRefs: + description: References to ResourceGroup in azure to populate + scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of ResourceGroup in azure to + populate scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Alert Processing Rule. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleId: + description: A alert_rule_id block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleName: + description: A alert_rule_name block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorCondition: + description: A monitor_condition block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + signalType: + description: A signal_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResource: + description: A target_resource block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceGroup: + description: A target_resource_group block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + type: object + description: + description: Specifies a description for the Alert Processing + Rule. + type: string + enabled: + description: Should the Alert Processing Rule be enabled? Defaults + to true. + type: boolean + schedule: + description: A schedule block as defined below. + properties: + effectiveFrom: + description: Specifies the Alert Processing Rule effective + start time (Y-m-d'T'H:M:S). + type: string + effectiveUntil: + description: Specifies the Alert Processing Rule effective + end time (Y-m-d'T'H:M:S). + type: string + recurrence: + description: A recurrence block as defined above. + properties: + daily: + description: One or more daily blocks as defined above. + items: + properties: + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + monthly: + description: One or more monthly blocks as defined above. + items: + properties: + daysOfMonth: + description: Specifies a list of dayOfMonth to recurrence. + Possible values are integers between 1 - 31. + items: + type: number + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + weekly: + description: One or more weekly blocks as defined below. + items: + properties: + daysOfWeek: + description: Specifies a list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday, and Saturday. + items: + type: string + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + type: object + timeZone: + description: The time zone (e.g. Pacific Standard time, Eastern + Standard Time). Defaults to UTC. possible values are defined + here. + type: string + type: object + scopes: + description: A list of resource IDs which will be the target of + Alert Processing Rule. + items: + type: string + type: array + scopesRefs: + description: References to ResourceGroup in azure to populate + scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of ResourceGroup in azure to + populate scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Alert Processing Rule. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MonitorAlertProcessingRuleSuppressionStatus defines the observed + state of MonitorAlertProcessingRuleSuppression. + properties: + atProvider: + properties: + condition: + description: A condition block as defined below. + properties: + alertContext: + description: A alert_context block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleId: + description: A alert_rule_id block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + alertRuleName: + description: A alert_rule_name block as defined above. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + description: + description: A description block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorCondition: + description: A monitor_condition block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + monitorService: + description: A monitor_service block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + severity: + description: A severity block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + signalType: + description: A signal_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResource: + description: A target_resource block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceGroup: + description: A target_resource_group block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + targetResourceType: + description: A target_resource_type block as defined below. + properties: + operator: + description: The operator for a given condition. Possible + values are Equals, NotEquals, Contains, and DoesNotContain. + type: string + values: + description: A list of values to match for a given condition. + The values should be valid resource types. (e.g. Microsoft.Compute/VirtualMachines) + items: + type: string + type: array + type: object + type: object + description: + description: Specifies a description for the Alert Processing + Rule. + type: string + enabled: + description: Should the Alert Processing Rule be enabled? Defaults + to true. + type: boolean + id: + description: The ID of the Alert Processing Rule. + type: string + resourceGroupName: + description: The name of the Resource Group where the Alert Processing + Rule should exist. Changing this forces a new Alert Processing + Rule to be created. + type: string + schedule: + description: A schedule block as defined below. + properties: + effectiveFrom: + description: Specifies the Alert Processing Rule effective + start time (Y-m-d'T'H:M:S). + type: string + effectiveUntil: + description: Specifies the Alert Processing Rule effective + end time (Y-m-d'T'H:M:S). + type: string + recurrence: + description: A recurrence block as defined above. + properties: + daily: + description: One or more daily blocks as defined above. + items: + properties: + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + monthly: + description: One or more monthly blocks as defined above. + items: + properties: + daysOfMonth: + description: Specifies a list of dayOfMonth to recurrence. + Possible values are integers between 1 - 31. + items: + type: number + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + weekly: + description: One or more weekly blocks as defined below. + items: + properties: + daysOfWeek: + description: Specifies a list of dayOfWeek to recurrence. + Possible values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday, and Saturday. + items: + type: string + type: array + endTime: + description: Specifies the recurrence end time (H:M:S). + type: string + startTime: + description: Specifies the recurrence start time + (H:M:S). + type: string + type: object + type: array + type: object + timeZone: + description: The time zone (e.g. Pacific Standard time, Eastern + Standard Time). Defaults to UTC. possible values are defined + here. + type: string + type: object + scopes: + description: A list of resource IDs which will be the target of + Alert Processing Rule. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Alert Processing Rule. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/alertsmanagement.azure.upbound.io_monitorsmartdetectoralertrules.yaml b/package/crds/alertsmanagement.azure.upbound.io_monitorsmartdetectoralertrules.yaml index ef193722e..9dbf9049b 100644 --- a/package/crds/alertsmanagement.azure.upbound.io_monitorsmartdetectoralertrules.yaml +++ b/package/crds/alertsmanagement.azure.upbound.io_monitorsmartdetectoralertrules.yaml @@ -1026,3 +1026,1005 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorSmartDetectorAlertRule is the Schema for the MonitorSmartDetectorAlertRules + API. Manages an Monitor Smart Detector Alert Rule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorSmartDetectorAlertRuleSpec defines the desired state + of MonitorSmartDetectorAlertRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + actionGroup: + description: An action_group block as defined below. + properties: + emailSubject: + description: Specifies a custom email subject if Email Receiver + is specified in Monitor Action Group resource. + type: string + ids: + description: Specifies the action group ids. + items: + type: string + type: array + x-kubernetes-list-type: set + idsRefs: + description: References to MonitorActionGroup in insights + to populate ids. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + idsSelector: + description: Selector for a list of MonitorActionGroup in + insights to populate ids. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webhookPayload: + description: A JSON String which Specifies the custom webhook + payload if Webhook Receiver is specified in Monitor Action + Group resource. + type: string + type: object + description: + description: Specifies a description for the Smart Detector Alert + Rule. + type: string + detectorType: + description: Specifies the Built-In Smart Detector type that this + alert rule will use. Currently the only possible values are + FailureAnomaliesDetector, RequestPerformanceDegradationDetector, + DependencyPerformanceDegradationDetector, ExceptionVolumeChangedDetector, + TraceSeverityDetector, MemoryLeakDetector. + type: string + enabled: + description: Is the Smart Detector Alert Rule enabled? Defaults + to true. + type: boolean + frequency: + description: Specifies the frequency of this Smart Detector Alert + Rule in ISO8601 format. + type: string + name: + description: Specifies the name of the Monitor Smart Detector + Alert Rule. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the name of the resource group in which + the Monitor Smart Detector Alert Rule should exist. Changing + this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scopeResourceIds: + description: Specifies the scopes of this Smart Detector Alert + Rule. + items: + type: string + type: array + x-kubernetes-list-type: set + scopeResourceIdsRefs: + description: References to ApplicationInsights in insights to + populate scopeResourceIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopeResourceIdsSelector: + description: Selector for a list of ApplicationInsights in insights + to populate scopeResourceIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + severity: + description: Specifies the severity of this Smart Detector Alert + Rule. Possible values are Sev0, Sev1, Sev2, Sev3 or Sev4. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throttlingDuration: + description: Specifies the duration (in ISO8601 format) to wait + before notifying on the alert rule again. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + actionGroup: + description: An action_group block as defined below. + properties: + emailSubject: + description: Specifies a custom email subject if Email Receiver + is specified in Monitor Action Group resource. + type: string + ids: + description: Specifies the action group ids. + items: + type: string + type: array + x-kubernetes-list-type: set + idsRefs: + description: References to MonitorActionGroup in insights + to populate ids. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + idsSelector: + description: Selector for a list of MonitorActionGroup in + insights to populate ids. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webhookPayload: + description: A JSON String which Specifies the custom webhook + payload if Webhook Receiver is specified in Monitor Action + Group resource. + type: string + type: object + description: + description: Specifies a description for the Smart Detector Alert + Rule. + type: string + detectorType: + description: Specifies the Built-In Smart Detector type that this + alert rule will use. Currently the only possible values are + FailureAnomaliesDetector, RequestPerformanceDegradationDetector, + DependencyPerformanceDegradationDetector, ExceptionVolumeChangedDetector, + TraceSeverityDetector, MemoryLeakDetector. + type: string + enabled: + description: Is the Smart Detector Alert Rule enabled? Defaults + to true. + type: boolean + frequency: + description: Specifies the frequency of this Smart Detector Alert + Rule in ISO8601 format. + type: string + name: + description: Specifies the name of the Monitor Smart Detector + Alert Rule. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the name of the resource group in which + the Monitor Smart Detector Alert Rule should exist. Changing + this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scopeResourceIds: + description: Specifies the scopes of this Smart Detector Alert + Rule. + items: + type: string + type: array + x-kubernetes-list-type: set + scopeResourceIdsRefs: + description: References to ApplicationInsights in insights to + populate scopeResourceIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopeResourceIdsSelector: + description: Selector for a list of ApplicationInsights in insights + to populate scopeResourceIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + severity: + description: Specifies the severity of this Smart Detector Alert + Rule. Possible values are Sev0, Sev1, Sev2, Sev3 or Sev4. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throttlingDuration: + description: Specifies the duration (in ISO8601 format) to wait + before notifying on the alert rule again. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.actionGroup is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.actionGroup) + || (has(self.initProvider) && has(self.initProvider.actionGroup))' + - message: spec.forProvider.detectorType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.detectorType) + || (has(self.initProvider) && has(self.initProvider.detectorType))' + - message: spec.forProvider.frequency is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.frequency) + || (has(self.initProvider) && has(self.initProvider.frequency))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.severity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.severity) + || (has(self.initProvider) && has(self.initProvider.severity))' + status: + description: MonitorSmartDetectorAlertRuleStatus defines the observed + state of MonitorSmartDetectorAlertRule. + properties: + atProvider: + properties: + actionGroup: + description: An action_group block as defined below. + properties: + emailSubject: + description: Specifies a custom email subject if Email Receiver + is specified in Monitor Action Group resource. + type: string + ids: + description: Specifies the action group ids. + items: + type: string + type: array + x-kubernetes-list-type: set + webhookPayload: + description: A JSON String which Specifies the custom webhook + payload if Webhook Receiver is specified in Monitor Action + Group resource. + type: string + type: object + description: + description: Specifies a description for the Smart Detector Alert + Rule. + type: string + detectorType: + description: Specifies the Built-In Smart Detector type that this + alert rule will use. Currently the only possible values are + FailureAnomaliesDetector, RequestPerformanceDegradationDetector, + DependencyPerformanceDegradationDetector, ExceptionVolumeChangedDetector, + TraceSeverityDetector, MemoryLeakDetector. + type: string + enabled: + description: Is the Smart Detector Alert Rule enabled? Defaults + to true. + type: boolean + frequency: + description: Specifies the frequency of this Smart Detector Alert + Rule in ISO8601 format. + type: string + id: + description: The ID of the Monitor Smart Detector Alert Rule. + type: string + name: + description: Specifies the name of the Monitor Smart Detector + Alert Rule. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the name of the resource group in which + the Monitor Smart Detector Alert Rule should exist. Changing + this forces a new resource to be created. + type: string + scopeResourceIds: + description: Specifies the scopes of this Smart Detector Alert + Rule. + items: + type: string + type: array + x-kubernetes-list-type: set + severity: + description: Specifies the severity of this Smart Detector Alert + Rule. Possible values are Sev0, Sev1, Sev2, Sev3 or Sev4. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throttlingDuration: + description: Specifies the duration (in ISO8601 format) to wait + before notifying on the alert rule again. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_apidiagnostics.yaml b/package/crds/apimanagement.azure.upbound.io_apidiagnostics.yaml index d46d737fd..2f35147ac 100644 --- a/package/crds/apimanagement.azure.upbound.io_apidiagnostics.yaml +++ b/package/crds/apimanagement.azure.upbound.io_apidiagnostics.yaml @@ -1430,3 +1430,1367 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: APIDiagnostic is the Schema for the APIDiagnostics API. Manages + a API Management Service API Diagnostics Logs. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: APIDiagnosticSpec defines the desired state of APIDiagnostic + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + alwaysLogErrors: + description: Always log errors. Send telemetry if there is an + erroneous condition, regardless of sampling settings. + type: boolean + apiManagementLoggerId: + description: The ID (name) of the Diagnostics Logger. + type: string + apiManagementLoggerIdRef: + description: Reference to a Logger in apimanagement to populate + apiManagementLoggerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementLoggerIdSelector: + description: Selector for a Logger in apimanagement to populate + apiManagementLoggerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + apiManagementName: + description: The name of the API Management Service instance. + Changing this forces a new API Management Service API Diagnostics + Logs to be created. + type: string + apiManagementNameRef: + description: Reference to a Management in apimanagement to populate + apiManagementName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementNameSelector: + description: Selector for a Management in apimanagement to populate + apiManagementName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + apiName: + description: The name of the API on which to configure the Diagnostics + Logs. Changing this forces a new API Management Service API + Diagnostics Logs to be created. + type: string + apiNameRef: + description: Reference to a API in apimanagement to populate apiName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiNameSelector: + description: Selector for a API in apimanagement to populate apiName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + backendRequest: + description: A backend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + backendResponse: + description: A backend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendRequest: + description: A frontend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendResponse: + description: A frontend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpCorrelationProtocol: + description: The HTTP Correlation Protocol to use. Possible values + are None, Legacy or W3C. + type: string + logClientIp: + description: Log client IP address. + type: boolean + operationNameFormat: + description: The format of the Operation Name for Application + Insights telemetries. Possible values are Name, and Url. Defaults + to Name. + type: string + resourceGroupName: + description: The name of the Resource Group where the API Management + Service API Diagnostics Logs should exist. Changing this forces + a new API Management Service API Diagnostics Logs to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + samplingPercentage: + description: Sampling (%). For high traffic APIs, please read + this documentation to understand performance implications and + log sampling. Valid values are between 0.0 and 100.0. + type: number + verbosity: + description: Logging verbosity. Possible values are verbose, information + or error. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + alwaysLogErrors: + description: Always log errors. Send telemetry if there is an + erroneous condition, regardless of sampling settings. + type: boolean + apiManagementLoggerId: + description: The ID (name) of the Diagnostics Logger. + type: string + apiManagementLoggerIdRef: + description: Reference to a Logger in apimanagement to populate + apiManagementLoggerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementLoggerIdSelector: + description: Selector for a Logger in apimanagement to populate + apiManagementLoggerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + backendRequest: + description: A backend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + backendResponse: + description: A backend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendRequest: + description: A frontend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendResponse: + description: A frontend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpCorrelationProtocol: + description: The HTTP Correlation Protocol to use. Possible values + are None, Legacy or W3C. + type: string + logClientIp: + description: Log client IP address. + type: boolean + operationNameFormat: + description: The format of the Operation Name for Application + Insights telemetries. Possible values are Name, and Url. Defaults + to Name. + type: string + samplingPercentage: + description: Sampling (%). For high traffic APIs, please read + this documentation to understand performance implications and + log sampling. Valid values are between 0.0 and 100.0. + type: number + verbosity: + description: Logging verbosity. Possible values are verbose, information + or error. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: APIDiagnosticStatus defines the observed state of APIDiagnostic. + properties: + atProvider: + properties: + alwaysLogErrors: + description: Always log errors. Send telemetry if there is an + erroneous condition, regardless of sampling settings. + type: boolean + apiManagementLoggerId: + description: The ID (name) of the Diagnostics Logger. + type: string + apiManagementName: + description: The name of the API Management Service instance. + Changing this forces a new API Management Service API Diagnostics + Logs to be created. + type: string + apiName: + description: The name of the API on which to configure the Diagnostics + Logs. Changing this forces a new API Management Service API + Diagnostics Logs to be created. + type: string + backendRequest: + description: A backend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + backendResponse: + description: A backend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendRequest: + description: A frontend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendResponse: + description: A frontend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpCorrelationProtocol: + description: The HTTP Correlation Protocol to use. Possible values + are None, Legacy or W3C. + type: string + id: + description: The ID of the API Management Service API Diagnostics + Logs. + type: string + logClientIp: + description: Log client IP address. + type: boolean + operationNameFormat: + description: The format of the Operation Name for Application + Insights telemetries. Possible values are Name, and Url. Defaults + to Name. + type: string + resourceGroupName: + description: The name of the Resource Group where the API Management + Service API Diagnostics Logs should exist. Changing this forces + a new API Management Service API Diagnostics Logs to be created. + type: string + samplingPercentage: + description: Sampling (%). For high traffic APIs, please read + this documentation to understand performance implications and + log sampling. Valid values are between 0.0 and 100.0. + type: number + verbosity: + description: Logging verbosity. Possible values are verbose, information + or error. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_apioperations.yaml b/package/crds/apimanagement.azure.upbound.io_apioperations.yaml index e3132114c..3a3072471 100644 --- a/package/crds/apimanagement.azure.upbound.io_apioperations.yaml +++ b/package/crds/apimanagement.azure.upbound.io_apioperations.yaml @@ -2039,3 +2039,1994 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: APIOperation is the Schema for the APIOperations API. Manages + an API Operation within an API Management Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: APIOperationSpec defines the desired state of APIOperation + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiManagementName: + description: The Name of the API Management Service where the + API exists. Changing this forces a new resource to be created. + type: string + apiManagementNameRef: + description: Reference to a Management in apimanagement to populate + apiManagementName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementNameSelector: + description: Selector for a Management in apimanagement to populate + apiManagementName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + apiName: + description: The name of the API within the API Management Service + where this API Operation should be created. Changing this forces + a new resource to be created. + type: string + apiNameRef: + description: Reference to a API in apimanagement to populate apiName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiNameSelector: + description: Selector for a API in apimanagement to populate apiName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: A description for this API Operation, which may include + HTML formatting tags. + type: string + displayName: + description: The Display Name for this API Management Operation. + type: string + method: + description: The HTTP Method used for this API Management Operation, + like GET, DELETE, PUT or POST - but not limited to these values. + type: string + request: + description: A request block as defined below. + properties: + description: + description: A description of the HTTP Request, which may + include HTML tags. + type: string + header: + description: One or more header blocks as defined above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such + as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + queryParameter: + description: One or more query_parameter blocks as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such + as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + representation: + description: One or more representation blocks as defined + below. + items: + properties: + contentType: + description: The Content Type of this representation, + such as application/json. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + formParameter: + description: One or more form_parameter block as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template + Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this + example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for + this Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + schemaId: + description: The name of the Schema. + type: string + typeName: + description: The type name defined by the Schema. + type: string + type: object + type: array + type: object + resourceGroupName: + description: The Name of the Resource Group in which the API Management + Service exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + response: + description: One or more response blocks as defined below. + items: + properties: + description: + description: A description of the HTTP Response, which may + include HTML tags. + type: string + header: + description: One or more header blocks as defined above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + representation: + description: One or more representation blocks as defined + below. + items: + properties: + contentType: + description: The Content Type of this representation, + such as application/json. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + formParameter: + description: One or more form_parameter block as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template + Parameter. + type: string + description: + description: A description of this Template + Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the + literal example. + type: string + name: + description: The Name of this Template + Parameter. + type: string + summary: + description: A short description for this + example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for + this Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + schemaId: + description: The name of the Schema. + type: string + typeName: + description: The type name defined by the Schema. + type: string + type: object + type: array + statusCode: + description: The HTTP Status Code. + type: number + type: object + type: array + templateParameter: + description: One or more template_parameter blocks as defined + below. Required if url_template contains one or more parameters. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such as + a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this Template + Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlTemplate: + description: The relative URL Template identifying the target + resource for this operation, which may include parameters. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description for this API Operation, which may include + HTML formatting tags. + type: string + displayName: + description: The Display Name for this API Management Operation. + type: string + method: + description: The HTTP Method used for this API Management Operation, + like GET, DELETE, PUT or POST - but not limited to these values. + type: string + request: + description: A request block as defined below. + properties: + description: + description: A description of the HTTP Request, which may + include HTML tags. + type: string + header: + description: One or more header blocks as defined above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such + as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + queryParameter: + description: One or more query_parameter blocks as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such + as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + representation: + description: One or more representation blocks as defined + below. + items: + properties: + contentType: + description: The Content Type of this representation, + such as application/json. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + formParameter: + description: One or more form_parameter block as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template + Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this + example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for + this Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + schemaId: + description: The name of the Schema. + type: string + typeName: + description: The type name defined by the Schema. + type: string + type: object + type: array + type: object + response: + description: One or more response blocks as defined below. + items: + properties: + description: + description: A description of the HTTP Response, which may + include HTML tags. + type: string + header: + description: One or more header blocks as defined above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + representation: + description: One or more representation blocks as defined + below. + items: + properties: + contentType: + description: The Content Type of this representation, + such as application/json. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + formParameter: + description: One or more form_parameter block as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template + Parameter. + type: string + description: + description: A description of this Template + Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the + literal example. + type: string + name: + description: The Name of this Template + Parameter. + type: string + summary: + description: A short description for this + example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for + this Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + schemaId: + description: The name of the Schema. + type: string + typeName: + description: The type name defined by the Schema. + type: string + type: object + type: array + statusCode: + description: The HTTP Status Code. + type: number + type: object + type: array + templateParameter: + description: One or more template_parameter blocks as defined + below. Required if url_template contains one or more parameters. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such as + a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this Template + Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlTemplate: + description: The relative URL Template identifying the target + resource for this operation, which may include parameters. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.displayName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.displayName) + || (has(self.initProvider) && has(self.initProvider.displayName))' + - message: spec.forProvider.method is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.method) + || (has(self.initProvider) && has(self.initProvider.method))' + - message: spec.forProvider.urlTemplate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.urlTemplate) + || (has(self.initProvider) && has(self.initProvider.urlTemplate))' + status: + description: APIOperationStatus defines the observed state of APIOperation. + properties: + atProvider: + properties: + apiManagementName: + description: The Name of the API Management Service where the + API exists. Changing this forces a new resource to be created. + type: string + apiName: + description: The name of the API within the API Management Service + where this API Operation should be created. Changing this forces + a new resource to be created. + type: string + description: + description: A description for this API Operation, which may include + HTML formatting tags. + type: string + displayName: + description: The Display Name for this API Management Operation. + type: string + id: + description: The ID of the API Management API Operation. + type: string + method: + description: The HTTP Method used for this API Management Operation, + like GET, DELETE, PUT or POST - but not limited to these values. + type: string + request: + description: A request block as defined below. + properties: + description: + description: A description of the HTTP Request, which may + include HTML tags. + type: string + header: + description: One or more header blocks as defined above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such + as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + queryParameter: + description: One or more query_parameter blocks as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such + as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + representation: + description: One or more representation blocks as defined + below. + items: + properties: + contentType: + description: The Content Type of this representation, + such as application/json. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + formParameter: + description: One or more form_parameter block as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template + Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this + example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for + this Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + schemaId: + description: The name of the Schema. + type: string + typeName: + description: The type name defined by the Schema. + type: string + type: object + type: array + type: object + resourceGroupName: + description: The Name of the Resource Group in which the API Management + Service exists. Changing this forces a new resource to be created. + type: string + response: + description: One or more response blocks as defined below. + items: + properties: + description: + description: A description of the HTTP Response, which may + include HTML tags. + type: string + header: + description: One or more header blocks as defined above. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this + Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + representation: + description: One or more representation blocks as defined + below. + items: + properties: + contentType: + description: The Content Type of this representation, + such as application/json. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the literal + example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + formParameter: + description: One or more form_parameter block as defined + above. + items: + properties: + defaultValue: + description: The default value for this Template + Parameter. + type: string + description: + description: A description of this Template + Parameter. + type: string + example: + description: One or more example blocks as defined + above. + items: + properties: + description: + description: A description of this Template + Parameter. + type: string + externalValue: + description: A URL that points to the + literal example. + type: string + name: + description: The Name of this Template + Parameter. + type: string + summary: + description: A short description for this + example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, + such as a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for + this Template Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + schemaId: + description: The name of the Schema. + type: string + typeName: + description: The type name defined by the Schema. + type: string + type: object + type: array + statusCode: + description: The HTTP Status Code. + type: number + type: object + type: array + templateParameter: + description: One or more template_parameter blocks as defined + below. Required if url_template contains one or more parameters. + items: + properties: + defaultValue: + description: The default value for this Template Parameter. + type: string + description: + description: A description of this Template Parameter. + type: string + example: + description: One or more example blocks as defined above. + items: + properties: + description: + description: A description of this Template Parameter. + type: string + externalValue: + description: A URL that points to the literal example. + type: string + name: + description: The Name of this Template Parameter. + type: string + summary: + description: A short description for this example. + type: string + value: + description: The example of the representation. + type: string + type: object + type: array + name: + description: The Name of this Template Parameter. + type: string + required: + description: Is this Template Parameter Required? + type: boolean + schemaId: + description: The name of the Schema. + type: string + type: + description: The Type of this Template Parameter, such as + a string. + type: string + typeName: + description: The type name defined by the Schema. + type: string + values: + description: One or more acceptable values for this Template + Parameter. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlTemplate: + description: The relative URL Template identifying the target + resource for this operation, which may include parameters. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_apis.yaml b/package/crds/apimanagement.azure.upbound.io_apis.yaml index 2fb6cf631..0224c96d8 100644 --- a/package/crds/apimanagement.azure.upbound.io_apis.yaml +++ b/package/crds/apimanagement.azure.upbound.io_apis.yaml @@ -1003,3 +1003,943 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: API is the Schema for the APIs API. Manages an API within an + API Management Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: APISpec defines the desired state of API + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiManagementName: + description: The Name of the API Management Service where this + API should be created. Changing this forces a new resource to + be created. + type: string + apiManagementNameRef: + description: Reference to a Management in apimanagement to populate + apiManagementName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementNameSelector: + description: Selector for a Management in apimanagement to populate + apiManagementName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + apiType: + description: Type of API. Possible values are graphql, http, soap, + and websocket. Defaults to http. + type: string + contact: + description: A contact block as documented below. + properties: + email: + description: The email address of the contact person/organization. + type: string + name: + description: The name of the contact person/organization. + type: string + url: + description: Absolute URL of the contact information. + type: string + type: object + description: + description: A description of the API Management API, which may + include HTML formatting tags. + type: string + displayName: + description: The display name of the API. + type: string + import: + description: A import block as documented below. + properties: + contentFormat: + description: 'The format of the content from which the API + Definition should be imported. Possible values are: openapi, + openapi+json, openapi+json-link, openapi-link, swagger-json, + swagger-link-json, wadl-link-json, wadl-xml, wsdl and wsdl-link.' + type: string + contentValue: + description: The Content from which the API Definition should + be imported. When a content_format of *-link-* is specified + this must be a URL, otherwise this must be defined inline. + type: string + wsdlSelector: + description: A wsdl_selector block as defined below, which + allows you to limit the import of a WSDL to only a subset + of the document. This can only be specified when content_format + is wsdl or wsdl-link. + properties: + endpointName: + description: The name of endpoint (port) to import from + WSDL. + type: string + serviceName: + description: The name of service to import from WSDL. + type: string + type: object + type: object + license: + description: A license block as documented below. + properties: + name: + description: The name of the license . + type: string + url: + description: Absolute URL of the license. + type: string + type: object + oauth2Authorization: + description: An oauth2_authorization block as documented below. + properties: + authorizationServerName: + description: OAuth authorization server identifier. The name + of an OAuth2 Authorization Server. + type: string + scope: + description: Operations scope. + type: string + type: object + openidAuthentication: + description: An openid_authentication block as documented below. + properties: + bearerTokenSendingMethods: + description: How to send token to the server. A list of zero + or more methods. Valid values are authorizationHeader and + query. + items: + type: string + type: array + x-kubernetes-list-type: set + openidProviderName: + description: OpenID Connect provider identifier. The name + of an OpenID Connect Provider. + type: string + type: object + path: + description: The Path for this API Management API, which is a + relative URL which uniquely identifies this API and all of its + resource paths within the API Management Service. + type: string + protocols: + description: A list of protocols the operations in this API can + be invoked. Possible values are http, https, ws, and wss. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceGroupName: + description: The Name of the Resource Group where the API Management + API exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revision: + description: The Revision which used for this API. Changing this + forces a new resource to be created. + type: string + revisionDescription: + description: The description of the API Revision of the API Management + API. + type: string + serviceUrl: + description: Absolute URL of the backend service implementing + this API. + type: string + soapPassThrough: + description: Should this API expose a SOAP frontend, rather than + a HTTP frontend? Defaults to false. + type: boolean + sourceApiId: + description: The API id of the source API, which could be in format + azurerm_api_management_api.example.id or in format azurerm_api_management_api.example.id;rev=1 + type: string + subscriptionKeyParameterNames: + description: A subscription_key_parameter_names block as documented + below. + properties: + header: + description: The name of the HTTP Header which should be used + for the Subscription Key. + type: string + query: + description: The name of the QueryString parameter which should + be used for the Subscription Key. + type: string + type: object + subscriptionRequired: + description: Should this API require a subscription key? Defaults + to true. + type: boolean + termsOfServiceUrl: + description: Absolute URL of the Terms of Service for the API. + type: string + version: + description: The Version number of this API, if this API is versioned. + type: string + versionDescription: + description: The description of the API Version of the API Management + API. + type: string + versionSetId: + description: The ID of the Version Set which this API is associated + with. + type: string + required: + - revision + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiType: + description: Type of API. Possible values are graphql, http, soap, + and websocket. Defaults to http. + type: string + contact: + description: A contact block as documented below. + properties: + email: + description: The email address of the contact person/organization. + type: string + name: + description: The name of the contact person/organization. + type: string + url: + description: Absolute URL of the contact information. + type: string + type: object + description: + description: A description of the API Management API, which may + include HTML formatting tags. + type: string + displayName: + description: The display name of the API. + type: string + import: + description: A import block as documented below. + properties: + contentFormat: + description: 'The format of the content from which the API + Definition should be imported. Possible values are: openapi, + openapi+json, openapi+json-link, openapi-link, swagger-json, + swagger-link-json, wadl-link-json, wadl-xml, wsdl and wsdl-link.' + type: string + contentValue: + description: The Content from which the API Definition should + be imported. When a content_format of *-link-* is specified + this must be a URL, otherwise this must be defined inline. + type: string + wsdlSelector: + description: A wsdl_selector block as defined below, which + allows you to limit the import of a WSDL to only a subset + of the document. This can only be specified when content_format + is wsdl or wsdl-link. + properties: + endpointName: + description: The name of endpoint (port) to import from + WSDL. + type: string + serviceName: + description: The name of service to import from WSDL. + type: string + type: object + type: object + license: + description: A license block as documented below. + properties: + name: + description: The name of the license . + type: string + url: + description: Absolute URL of the license. + type: string + type: object + oauth2Authorization: + description: An oauth2_authorization block as documented below. + properties: + authorizationServerName: + description: OAuth authorization server identifier. The name + of an OAuth2 Authorization Server. + type: string + scope: + description: Operations scope. + type: string + type: object + openidAuthentication: + description: An openid_authentication block as documented below. + properties: + bearerTokenSendingMethods: + description: How to send token to the server. A list of zero + or more methods. Valid values are authorizationHeader and + query. + items: + type: string + type: array + x-kubernetes-list-type: set + openidProviderName: + description: OpenID Connect provider identifier. The name + of an OpenID Connect Provider. + type: string + type: object + path: + description: The Path for this API Management API, which is a + relative URL which uniquely identifies this API and all of its + resource paths within the API Management Service. + type: string + protocols: + description: A list of protocols the operations in this API can + be invoked. Possible values are http, https, ws, and wss. + items: + type: string + type: array + x-kubernetes-list-type: set + revisionDescription: + description: The description of the API Revision of the API Management + API. + type: string + serviceUrl: + description: Absolute URL of the backend service implementing + this API. + type: string + soapPassThrough: + description: Should this API expose a SOAP frontend, rather than + a HTTP frontend? Defaults to false. + type: boolean + sourceApiId: + description: The API id of the source API, which could be in format + azurerm_api_management_api.example.id or in format azurerm_api_management_api.example.id;rev=1 + type: string + subscriptionKeyParameterNames: + description: A subscription_key_parameter_names block as documented + below. + properties: + header: + description: The name of the HTTP Header which should be used + for the Subscription Key. + type: string + query: + description: The name of the QueryString parameter which should + be used for the Subscription Key. + type: string + type: object + subscriptionRequired: + description: Should this API require a subscription key? Defaults + to true. + type: boolean + termsOfServiceUrl: + description: Absolute URL of the Terms of Service for the API. + type: string + version: + description: The Version number of this API, if this API is versioned. + type: string + versionDescription: + description: The description of the API Version of the API Management + API. + type: string + versionSetId: + description: The ID of the Version Set which this API is associated + with. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: APIStatus defines the observed state of API. + properties: + atProvider: + properties: + apiManagementName: + description: The Name of the API Management Service where this + API should be created. Changing this forces a new resource to + be created. + type: string + apiType: + description: Type of API. Possible values are graphql, http, soap, + and websocket. Defaults to http. + type: string + contact: + description: A contact block as documented below. + properties: + email: + description: The email address of the contact person/organization. + type: string + name: + description: The name of the contact person/organization. + type: string + url: + description: Absolute URL of the contact information. + type: string + type: object + description: + description: A description of the API Management API, which may + include HTML formatting tags. + type: string + displayName: + description: The display name of the API. + type: string + id: + description: The ID of the API Management API. + type: string + import: + description: A import block as documented below. + properties: + contentFormat: + description: 'The format of the content from which the API + Definition should be imported. Possible values are: openapi, + openapi+json, openapi+json-link, openapi-link, swagger-json, + swagger-link-json, wadl-link-json, wadl-xml, wsdl and wsdl-link.' + type: string + contentValue: + description: The Content from which the API Definition should + be imported. When a content_format of *-link-* is specified + this must be a URL, otherwise this must be defined inline. + type: string + wsdlSelector: + description: A wsdl_selector block as defined below, which + allows you to limit the import of a WSDL to only a subset + of the document. This can only be specified when content_format + is wsdl or wsdl-link. + properties: + endpointName: + description: The name of endpoint (port) to import from + WSDL. + type: string + serviceName: + description: The name of service to import from WSDL. + type: string + type: object + type: object + isCurrent: + description: Is this the current API Revision? + type: boolean + isOnline: + description: Is this API Revision online/accessible via the Gateway? + type: boolean + license: + description: A license block as documented below. + properties: + name: + description: The name of the license . + type: string + url: + description: Absolute URL of the license. + type: string + type: object + oauth2Authorization: + description: An oauth2_authorization block as documented below. + properties: + authorizationServerName: + description: OAuth authorization server identifier. The name + of an OAuth2 Authorization Server. + type: string + scope: + description: Operations scope. + type: string + type: object + openidAuthentication: + description: An openid_authentication block as documented below. + properties: + bearerTokenSendingMethods: + description: How to send token to the server. A list of zero + or more methods. Valid values are authorizationHeader and + query. + items: + type: string + type: array + x-kubernetes-list-type: set + openidProviderName: + description: OpenID Connect provider identifier. The name + of an OpenID Connect Provider. + type: string + type: object + path: + description: The Path for this API Management API, which is a + relative URL which uniquely identifies this API and all of its + resource paths within the API Management Service. + type: string + protocols: + description: A list of protocols the operations in this API can + be invoked. Possible values are http, https, ws, and wss. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceGroupName: + description: The Name of the Resource Group where the API Management + API exists. Changing this forces a new resource to be created. + type: string + revision: + description: The Revision which used for this API. Changing this + forces a new resource to be created. + type: string + revisionDescription: + description: The description of the API Revision of the API Management + API. + type: string + serviceUrl: + description: Absolute URL of the backend service implementing + this API. + type: string + soapPassThrough: + description: Should this API expose a SOAP frontend, rather than + a HTTP frontend? Defaults to false. + type: boolean + sourceApiId: + description: The API id of the source API, which could be in format + azurerm_api_management_api.example.id or in format azurerm_api_management_api.example.id;rev=1 + type: string + subscriptionKeyParameterNames: + description: A subscription_key_parameter_names block as documented + below. + properties: + header: + description: The name of the HTTP Header which should be used + for the Subscription Key. + type: string + query: + description: The name of the QueryString parameter which should + be used for the Subscription Key. + type: string + type: object + subscriptionRequired: + description: Should this API require a subscription key? Defaults + to true. + type: boolean + termsOfServiceUrl: + description: Absolute URL of the Terms of Service for the API. + type: string + version: + description: The Version number of this API, if this API is versioned. + type: string + versionDescription: + description: The description of the API Version of the API Management + API. + type: string + versionSetId: + description: The ID of the Version Set which this API is associated + with. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_backends.yaml b/package/crds/apimanagement.azure.upbound.io_backends.yaml index 4dc0479c0..c7c4f5fd2 100644 --- a/package/crds/apimanagement.azure.upbound.io_backends.yaml +++ b/package/crds/apimanagement.azure.upbound.io_backends.yaml @@ -926,3 +926,878 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Backend is the Schema for the Backends API. Manages a backend + within an API Management Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackendSpec defines the desired state of Backend + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiManagementName: + description: The Name of the API Management Service where this + backend should be created. Changing this forces a new resource + to be created. + type: string + apiManagementNameRef: + description: Reference to a Management in apimanagement to populate + apiManagementName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementNameSelector: + description: Selector for a Management in apimanagement to populate + apiManagementName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + credentials: + description: A credentials block as documented below. + properties: + authorization: + description: An authorization block as defined below. + properties: + parameter: + description: The authentication Parameter value. + type: string + scheme: + description: The authentication Scheme name. + type: string + type: object + certificate: + description: A list of client certificate thumbprints to present + to the backend host. The certificates must exist within + the API Management Service. + items: + type: string + type: array + header: + additionalProperties: + type: string + description: A mapping of header parameters to pass to the + backend host. The keys are the header names and the values + are a comma separated string of header values. This is converted + to a list before being passed to the API. + type: object + x-kubernetes-map-type: granular + query: + additionalProperties: + type: string + description: A mapping of query parameters to pass to the + backend host. The keys are the query names and the values + are a comma separated string of query values. This is converted + to a list before being passed to the API. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description of the backend. + type: string + protocol: + description: The protocol used by the backend host. Possible values + are http or soap. + type: string + proxy: + description: A proxy block as documented below. + properties: + passwordSecretRef: + description: The password to connect to the proxy server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + url: + description: The URL of the proxy server. + type: string + username: + description: The username to connect to the proxy server. + type: string + type: object + resourceGroupName: + description: The Name of the Resource Group where the API Management + Service exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceId: + description: The management URI of the backend host in an external + system. This URI can be the ARM Resource ID of Logic Apps, Function + Apps or API Apps, or the management endpoint of a Service Fabric + cluster. + type: string + serviceFabricCluster: + description: A service_fabric_cluster block as documented below. + properties: + clientCertificateId: + description: The client certificate resource id for the management + endpoint. + type: string + clientCertificateThumbprint: + description: The client certificate thumbprint for the management + endpoint. + type: string + managementEndpoints: + description: A list of cluster management endpoints. + items: + type: string + type: array + x-kubernetes-list-type: set + maxPartitionResolutionRetries: + description: The maximum number of retries when attempting + resolve the partition. + type: number + serverCertificateThumbprints: + description: A list of thumbprints of the server certificates + of the Service Fabric cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + serverX509Name: + description: One or more server_x509_name blocks as documented + below. + items: + properties: + issuerCertificateThumbprint: + description: The thumbprint for the issuer of the certificate. + type: string + name: + description: The common name of the certificate. + type: string + type: object + type: array + type: object + title: + description: The title of the backend. + type: string + tls: + description: A tls block as documented below. + properties: + validateCertificateChain: + description: Flag indicating whether SSL certificate chain + validation should be done when using self-signed certificates + for the backend host. + type: boolean + validateCertificateName: + description: Flag indicating whether SSL certificate name + validation should be done when using self-signed certificates + for the backend host. + type: boolean + type: object + url: + description: The URL of the backend host. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + credentials: + description: A credentials block as documented below. + properties: + authorization: + description: An authorization block as defined below. + properties: + parameter: + description: The authentication Parameter value. + type: string + scheme: + description: The authentication Scheme name. + type: string + type: object + certificate: + description: A list of client certificate thumbprints to present + to the backend host. The certificates must exist within + the API Management Service. + items: + type: string + type: array + header: + additionalProperties: + type: string + description: A mapping of header parameters to pass to the + backend host. The keys are the header names and the values + are a comma separated string of header values. This is converted + to a list before being passed to the API. + type: object + x-kubernetes-map-type: granular + query: + additionalProperties: + type: string + description: A mapping of query parameters to pass to the + backend host. The keys are the query names and the values + are a comma separated string of query values. This is converted + to a list before being passed to the API. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description of the backend. + type: string + protocol: + description: The protocol used by the backend host. Possible values + are http or soap. + type: string + proxy: + description: A proxy block as documented below. + properties: + url: + description: The URL of the proxy server. + type: string + username: + description: The username to connect to the proxy server. + type: string + type: object + resourceId: + description: The management URI of the backend host in an external + system. This URI can be the ARM Resource ID of Logic Apps, Function + Apps or API Apps, or the management endpoint of a Service Fabric + cluster. + type: string + serviceFabricCluster: + description: A service_fabric_cluster block as documented below. + properties: + clientCertificateId: + description: The client certificate resource id for the management + endpoint. + type: string + clientCertificateThumbprint: + description: The client certificate thumbprint for the management + endpoint. + type: string + managementEndpoints: + description: A list of cluster management endpoints. + items: + type: string + type: array + x-kubernetes-list-type: set + maxPartitionResolutionRetries: + description: The maximum number of retries when attempting + resolve the partition. + type: number + serverCertificateThumbprints: + description: A list of thumbprints of the server certificates + of the Service Fabric cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + serverX509Name: + description: One or more server_x509_name blocks as documented + below. + items: + properties: + issuerCertificateThumbprint: + description: The thumbprint for the issuer of the certificate. + type: string + name: + description: The common name of the certificate. + type: string + type: object + type: array + type: object + title: + description: The title of the backend. + type: string + tls: + description: A tls block as documented below. + properties: + validateCertificateChain: + description: Flag indicating whether SSL certificate chain + validation should be done when using self-signed certificates + for the backend host. + type: boolean + validateCertificateName: + description: Flag indicating whether SSL certificate name + validation should be done when using self-signed certificates + for the backend host. + type: boolean + type: object + url: + description: The URL of the backend host. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.protocol is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.protocol) + || (has(self.initProvider) && has(self.initProvider.protocol))' + - message: spec.forProvider.url is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.url) + || (has(self.initProvider) && has(self.initProvider.url))' + status: + description: BackendStatus defines the observed state of Backend. + properties: + atProvider: + properties: + apiManagementName: + description: The Name of the API Management Service where this + backend should be created. Changing this forces a new resource + to be created. + type: string + credentials: + description: A credentials block as documented below. + properties: + authorization: + description: An authorization block as defined below. + properties: + parameter: + description: The authentication Parameter value. + type: string + scheme: + description: The authentication Scheme name. + type: string + type: object + certificate: + description: A list of client certificate thumbprints to present + to the backend host. The certificates must exist within + the API Management Service. + items: + type: string + type: array + header: + additionalProperties: + type: string + description: A mapping of header parameters to pass to the + backend host. The keys are the header names and the values + are a comma separated string of header values. This is converted + to a list before being passed to the API. + type: object + x-kubernetes-map-type: granular + query: + additionalProperties: + type: string + description: A mapping of query parameters to pass to the + backend host. The keys are the query names and the values + are a comma separated string of query values. This is converted + to a list before being passed to the API. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description of the backend. + type: string + id: + description: The ID of the API Management API. + type: string + protocol: + description: The protocol used by the backend host. Possible values + are http or soap. + type: string + proxy: + description: A proxy block as documented below. + properties: + url: + description: The URL of the proxy server. + type: string + username: + description: The username to connect to the proxy server. + type: string + type: object + resourceGroupName: + description: The Name of the Resource Group where the API Management + Service exists. Changing this forces a new resource to be created. + type: string + resourceId: + description: The management URI of the backend host in an external + system. This URI can be the ARM Resource ID of Logic Apps, Function + Apps or API Apps, or the management endpoint of a Service Fabric + cluster. + type: string + serviceFabricCluster: + description: A service_fabric_cluster block as documented below. + properties: + clientCertificateId: + description: The client certificate resource id for the management + endpoint. + type: string + clientCertificateThumbprint: + description: The client certificate thumbprint for the management + endpoint. + type: string + managementEndpoints: + description: A list of cluster management endpoints. + items: + type: string + type: array + x-kubernetes-list-type: set + maxPartitionResolutionRetries: + description: The maximum number of retries when attempting + resolve the partition. + type: number + serverCertificateThumbprints: + description: A list of thumbprints of the server certificates + of the Service Fabric cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + serverX509Name: + description: One or more server_x509_name blocks as documented + below. + items: + properties: + issuerCertificateThumbprint: + description: The thumbprint for the issuer of the certificate. + type: string + name: + description: The common name of the certificate. + type: string + type: object + type: array + type: object + title: + description: The title of the backend. + type: string + tls: + description: A tls block as documented below. + properties: + validateCertificateChain: + description: Flag indicating whether SSL certificate chain + validation should be done when using self-signed certificates + for the backend host. + type: boolean + validateCertificateName: + description: Flag indicating whether SSL certificate name + validation should be done when using self-signed certificates + for the backend host. + type: boolean + type: object + url: + description: The URL of the backend host. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_diagnostics.yaml b/package/crds/apimanagement.azure.upbound.io_diagnostics.yaml index 1eed77cf1..fb00c51e3 100644 --- a/package/crds/apimanagement.azure.upbound.io_diagnostics.yaml +++ b/package/crds/apimanagement.azure.upbound.io_diagnostics.yaml @@ -1346,3 +1346,1283 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Diagnostic is the Schema for the Diagnostics API. Manages an + API Management Service Diagnostic. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DiagnosticSpec defines the desired state of Diagnostic + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + alwaysLogErrors: + description: Always log errors. Send telemetry if there is an + erroneous condition, regardless of sampling settings. + type: boolean + apiManagementLoggerId: + description: The id of the target API Management Logger where + the API Management Diagnostic should be saved. + type: string + apiManagementLoggerIdRef: + description: Reference to a Logger in apimanagement to populate + apiManagementLoggerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementLoggerIdSelector: + description: Selector for a Logger in apimanagement to populate + apiManagementLoggerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + apiManagementName: + description: The Name of the API Management Service where this + Diagnostic should be created. Changing this forces a new resource + to be created. + type: string + apiManagementNameRef: + description: Reference to a Management in apimanagement to populate + apiManagementName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementNameSelector: + description: Selector for a Management in apimanagement to populate + apiManagementName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + backendRequest: + description: A backend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + backendResponse: + description: A backend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendRequest: + description: A frontend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendResponse: + description: A frontend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpCorrelationProtocol: + description: The HTTP Correlation Protocol to use. Possible values + are None, Legacy or W3C. + type: string + logClientIp: + description: Log client IP address. + type: boolean + operationNameFormat: + description: The format of the Operation Name for Application + Insights telemetries. Possible values are Name, and Url. Defaults + to Name. + type: string + resourceGroupName: + description: The Name of the Resource Group where the API Management + Service exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + samplingPercentage: + description: Sampling (%). For high traffic APIs, please read + this documentation to understand performance implications and + log sampling. Valid values are between 0.0 and 100.0. + type: number + verbosity: + description: Logging verbosity. Possible values are verbose, information + or error. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + alwaysLogErrors: + description: Always log errors. Send telemetry if there is an + erroneous condition, regardless of sampling settings. + type: boolean + apiManagementLoggerId: + description: The id of the target API Management Logger where + the API Management Diagnostic should be saved. + type: string + apiManagementLoggerIdRef: + description: Reference to a Logger in apimanagement to populate + apiManagementLoggerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementLoggerIdSelector: + description: Selector for a Logger in apimanagement to populate + apiManagementLoggerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + backendRequest: + description: A backend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + backendResponse: + description: A backend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendRequest: + description: A frontend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendResponse: + description: A frontend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpCorrelationProtocol: + description: The HTTP Correlation Protocol to use. Possible values + are None, Legacy or W3C. + type: string + logClientIp: + description: Log client IP address. + type: boolean + operationNameFormat: + description: The format of the Operation Name for Application + Insights telemetries. Possible values are Name, and Url. Defaults + to Name. + type: string + samplingPercentage: + description: Sampling (%). For high traffic APIs, please read + this documentation to understand performance implications and + log sampling. Valid values are between 0.0 and 100.0. + type: number + verbosity: + description: Logging verbosity. Possible values are verbose, information + or error. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DiagnosticStatus defines the observed state of Diagnostic. + properties: + atProvider: + properties: + alwaysLogErrors: + description: Always log errors. Send telemetry if there is an + erroneous condition, regardless of sampling settings. + type: boolean + apiManagementLoggerId: + description: The id of the target API Management Logger where + the API Management Diagnostic should be saved. + type: string + apiManagementName: + description: The Name of the API Management Service where this + Diagnostic should be created. Changing this forces a new resource + to be created. + type: string + backendRequest: + description: A backend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + backendResponse: + description: A backend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendRequest: + description: A frontend_request block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + frontendResponse: + description: A frontend_response block as defined below. + properties: + bodyBytes: + description: Number of payload bytes to log (up to 8192). + type: number + dataMasking: + description: A data_masking block as defined below. + properties: + headers: + description: A headers block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + queryParams: + description: A query_params block as defined below. + items: + properties: + mode: + description: The data masking mode. Possible values + are Mask and Hide for query_params. The only possible + value is Mask for headers. + type: string + value: + description: The name of the header or the query + parameter to mask. + type: string + type: object + type: array + type: object + headersToLog: + description: Specifies a list of headers to log. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + httpCorrelationProtocol: + description: The HTTP Correlation Protocol to use. Possible values + are None, Legacy or W3C. + type: string + id: + description: The ID of the API Management Diagnostic. + type: string + logClientIp: + description: Log client IP address. + type: boolean + operationNameFormat: + description: The format of the Operation Name for Application + Insights telemetries. Possible values are Name, and Url. Defaults + to Name. + type: string + resourceGroupName: + description: The Name of the Resource Group where the API Management + Service exists. Changing this forces a new resource to be created. + type: string + samplingPercentage: + description: Sampling (%). For high traffic APIs, please read + this documentation to understand performance implications and + log sampling. Valid values are between 0.0 and 100.0. + type: number + verbosity: + description: Logging verbosity. Possible values are verbose, information + or error. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_gateways.yaml b/package/crds/apimanagement.azure.upbound.io_gateways.yaml index 1d2947c3e..df0dbf345 100644 --- a/package/crds/apimanagement.azure.upbound.io_gateways.yaml +++ b/package/crds/apimanagement.azure.upbound.io_gateways.yaml @@ -572,3 +572,545 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Gateway is the Schema for the Gateways API. Manages an API Management + Gateway. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GatewaySpec defines the desired state of Gateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiManagementId: + description: The ID of the API Management Resource in which the + gateway will be created. Changing this forces a new API Management + Gateway resource to be created. + type: string + apiManagementIdRef: + description: Reference to a Management in apimanagement to populate + apiManagementId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementIdSelector: + description: Selector for a Management in apimanagement to populate + apiManagementId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description of the API Management Gateway. + type: string + locationData: + description: A location_data block as documented below. + properties: + city: + description: The city or locality where the resource is located. + type: string + district: + description: The district, state, or province where the resource + is located. + type: string + name: + description: A canonical name for the geographic or physical + location. + type: string + region: + description: The country or region where the resource is located. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiManagementId: + description: The ID of the API Management Resource in which the + gateway will be created. Changing this forces a new API Management + Gateway resource to be created. + type: string + apiManagementIdRef: + description: Reference to a Management in apimanagement to populate + apiManagementId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementIdSelector: + description: Selector for a Management in apimanagement to populate + apiManagementId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description of the API Management Gateway. + type: string + locationData: + description: A location_data block as documented below. + properties: + city: + description: The city or locality where the resource is located. + type: string + district: + description: The district, state, or province where the resource + is located. + type: string + name: + description: A canonical name for the geographic or physical + location. + type: string + region: + description: The country or region where the resource is located. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.locationData is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.locationData) + || (has(self.initProvider) && has(self.initProvider.locationData))' + status: + description: GatewayStatus defines the observed state of Gateway. + properties: + atProvider: + properties: + apiManagementId: + description: The ID of the API Management Resource in which the + gateway will be created. Changing this forces a new API Management + Gateway resource to be created. + type: string + description: + description: The description of the API Management Gateway. + type: string + id: + description: The ID of the API Management Gateway. + type: string + locationData: + description: A location_data block as documented below. + properties: + city: + description: The city or locality where the resource is located. + type: string + district: + description: The district, state, or province where the resource + is located. + type: string + name: + description: A canonical name for the geographic or physical + location. + type: string + region: + description: The country or region where the resource is located. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_loggers.yaml b/package/crds/apimanagement.azure.upbound.io_loggers.yaml index 3dc95f2a1..430914136 100644 --- a/package/crds/apimanagement.azure.upbound.io_loggers.yaml +++ b/package/crds/apimanagement.azure.upbound.io_loggers.yaml @@ -821,3 +821,791 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Logger is the Schema for the Loggers API. Manages a Logger within + an API Management Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LoggerSpec defines the desired state of Logger + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiManagementName: + description: The name of the API Management Service. Changing + this forces a new resource to be created. + type: string + apiManagementNameRef: + description: Reference to a Management in apimanagement to populate + apiManagementName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementNameSelector: + description: Selector for a Management in apimanagement to populate + apiManagementName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + applicationInsights: + description: An application_insights block as documented below. + Changing this forces a new resource to be created. + properties: + instrumentationKeySecretRef: + description: The instrumentation key used to push data to + Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - instrumentationKeySecretRef + type: object + buffered: + description: Specifies whether records should be buffered in the + Logger prior to publishing. Defaults to true. + type: boolean + description: + description: A description of this Logger. + type: string + eventhub: + description: An eventhub block as documented below. Changing this + forces a new resource to be created. + properties: + connectionStringSecretRef: + description: The connection string of an EventHub Namespace. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + endpointUri: + description: The endpoint address of an EventHub Namespace. + Required when client_id is set. + type: string + name: + description: The name of an EventHub. + type: string + userAssignedIdentityClientId: + description: The Client Id of the User Assigned Identity with + the "Azure Event Hubs Data Sender" role to the target EventHub + Namespace. Required when endpoint_uri is set. If not specified + the System Assigned Identity will be used. + type: string + type: object + resourceGroupName: + description: The name of the Resource Group in which the API Management + Service exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceId: + description: The target resource id which will be linked in the + API-Management portal page. Changing this forces a new resource + to be created. + type: string + resourceIdRef: + description: Reference to a ApplicationInsights in insights to + populate resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a ApplicationInsights in insights to + populate resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + applicationInsights: + description: An application_insights block as documented below. + Changing this forces a new resource to be created. + type: object + buffered: + description: Specifies whether records should be buffered in the + Logger prior to publishing. Defaults to true. + type: boolean + description: + description: A description of this Logger. + type: string + eventhub: + description: An eventhub block as documented below. Changing this + forces a new resource to be created. + properties: + endpointUri: + description: The endpoint address of an EventHub Namespace. + Required when client_id is set. + type: string + name: + description: The name of an EventHub. + type: string + userAssignedIdentityClientId: + description: The Client Id of the User Assigned Identity with + the "Azure Event Hubs Data Sender" role to the target EventHub + Namespace. Required when endpoint_uri is set. If not specified + the System Assigned Identity will be used. + type: string + type: object + resourceId: + description: The target resource id which will be linked in the + API-Management portal page. Changing this forces a new resource + to be created. + type: string + resourceIdRef: + description: Reference to a ApplicationInsights in insights to + populate resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a ApplicationInsights in insights to + populate resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: LoggerStatus defines the observed state of Logger. + properties: + atProvider: + properties: + apiManagementName: + description: The name of the API Management Service. Changing + this forces a new resource to be created. + type: string + applicationInsights: + description: An application_insights block as documented below. + Changing this forces a new resource to be created. + properties: + instrumentationKeySecretRef: + description: The instrumentation key used to push data to + Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - instrumentationKeySecretRef + type: object + buffered: + description: Specifies whether records should be buffered in the + Logger prior to publishing. Defaults to true. + type: boolean + description: + description: A description of this Logger. + type: string + eventhub: + description: An eventhub block as documented below. Changing this + forces a new resource to be created. + properties: + endpointUri: + description: The endpoint address of an EventHub Namespace. + Required when client_id is set. + type: string + name: + description: The name of an EventHub. + type: string + userAssignedIdentityClientId: + description: The Client Id of the User Assigned Identity with + the "Azure Event Hubs Data Sender" role to the target EventHub + Namespace. Required when endpoint_uri is set. If not specified + the System Assigned Identity will be used. + type: string + type: object + id: + description: The ID of the API Management Logger. + type: string + resourceGroupName: + description: The name of the Resource Group in which the API Management + Service exists. Changing this forces a new resource to be created. + type: string + resourceId: + description: The target resource id which will be linked in the + API-Management portal page. Changing this forces a new resource + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_managements.yaml b/package/crds/apimanagement.azure.upbound.io_managements.yaml index bb9604cfb..35526eaa9 100644 --- a/package/crds/apimanagement.azure.upbound.io_managements.yaml +++ b/package/crds/apimanagement.azure.upbound.io_managements.yaml @@ -1986,3 +1986,1905 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Management is the Schema for the Managements API. Manages an + API Management Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ManagementSpec defines the desired state of Management + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalLocation: + description: One or more additional_location blocks as defined + below. + items: + properties: + capacity: + description: The number of compute units in this region. + Defaults to the capacity of the main region. + type: number + gatewayDisabled: + description: Only valid for an Api Management service deployed + in multiple locations. This can be used to disable the + gateway in this additional location. + type: boolean + location: + description: The name of the Azure Region in which the API + Management Service should be expanded to. + type: string + publicIpAddressId: + description: ID of a standard SKU IPv4 Public IP. + type: string + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Required when virtual_network_type is External + or Internal. + properties: + subnetId: + description: The id of the subnet that will be used + for the API Management. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + zones: + description: A list of availability zones. Changing this + forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + certificate: + description: One or more certificate blocks (up to 10) as defined + below. + items: + properties: + certificatePasswordSecretRef: + description: The password for the certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + encodedCertificateSecretRef: + description: The Base64 Encoded PFX or Base64 Encoded X.509 + Certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storeName: + description: The name of the Certificate Store where this + certificate should be stored. Possible values are CertificateAuthority + and Root. + type: string + required: + - encodedCertificateSecretRef + type: object + type: array + clientCertificateEnabled: + description: Enforce a client certificate to be presented on each + request to the gateway? This is only supported when SKU type + is Consumption. + type: boolean + delegation: + description: A delegation block as defined below. + properties: + subscriptionsEnabled: + description: Should subscription requests be delegated to + an external url? Defaults to false. + type: boolean + url: + description: The delegation URL. + type: string + userRegistrationEnabled: + description: Should user registration requests be delegated + to an external url? Defaults to false. + type: boolean + validationKeySecretRef: + description: A base64-encoded validation key to validate, + that a request is coming from Azure API Management. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + gatewayDisabled: + description: Disable the gateway in main region? This is only + supported when additional_location is set. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this API Management Service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this API Management Service. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The Azure location where the API Management Service + exists. Changing this forces a new resource to be created. + type: string + minApiVersion: + description: The version which the control plane API calls to + API Management service are limited with version equal to or + newer than. + type: string + notificationSenderEmail: + description: Email address from which the notification will be + sent. + type: string + policy: + description: A policy block as defined below. + items: + properties: + xmlContent: + description: The XML Content for this Policy. + type: string + xmlLink: + description: A link to an API Management Policy XML Document, + which must be publicly available. + type: string + type: object + type: array + protocols: + description: A protocols block as defined below. + properties: + enableHttp2: + description: Should HTTP/2 be supported by the API Management + Service? Defaults to false. + type: boolean + type: object + publicIpAddressId: + description: ID of a standard SKU IPv4 Public IP. + type: string + publicNetworkAccessEnabled: + description: Is public access to the service allowed? Defaults + to true. + type: boolean + publisherEmail: + description: The email of publisher/company. + type: string + publisherName: + description: The name of publisher/company. + type: string + resourceGroupName: + description: The name of the Resource Group in which the API Management + Service should be exist. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + security: + description: A security block as defined below. + properties: + enableBackendSsl30: + description: Should SSL 3.0 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableBackendTls10: + description: Should TLS 1.0 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableBackendTls11: + description: Should TLS 1.1 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableFrontendSsl30: + description: Should SSL 3.0 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + enableFrontendTls10: + description: Should TLS 1.0 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + enableFrontendTls11: + description: Should TLS 1.1 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + tlsEcdheEcdsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheEcdsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheRsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheRsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128CbcSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_CBC_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_CBC_SHA cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128GcmSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_GCM_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256CbcSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_CBC_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_CBC_SHA cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256GcmSha384CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_GCM_SHA384 cipher + be enabled? Defaults to false. + type: boolean + tripleDesCiphersEnabled: + description: Should the TLS_RSA_WITH_3DES_EDE_CBC_SHA cipher + be enabled for alL TLS versions (1.0, 1.1 and 1.2)? + type: boolean + type: object + signIn: + description: A sign_in block as defined below. + properties: + enabled: + description: Should anonymous users be redirected to the sign + in page? + type: boolean + type: object + signUp: + description: A sign_up block as defined below. + properties: + enabled: + description: Can users sign up on the development portal? + type: boolean + termsOfService: + description: A terms_of_service block as defined below. + properties: + consentRequired: + description: Should the user be asked for consent during + sign up? + type: boolean + enabled: + description: Should Terms of Service be displayed during + sign up?. + type: boolean + text: + description: The Terms of Service which users are required + to agree to in order to sign up. + type: string + type: object + type: object + skuName: + description: 'sku_name is a string consisting of two parts separated + by an underscore(_). The first part is the name, valid values + include: Consumption, Developer, Basic, Standard and Premium. + The second part is the capacity (e.g. the number of deployed + units of the sku), which must be a positive integer (e.g. Developer_1).' + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + tenantAccess: + description: A tenant_access block as defined below. + properties: + enabled: + description: Should the access to the management API be enabled? + type: boolean + type: object + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Required when virtual_network_type is External or Internal. + properties: + subnetId: + description: The id of the subnet that will be used for the + API Management. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + virtualNetworkType: + description: 'The type of virtual network you want to use, valid + values include: None, External, Internal. Defaults to None.' + type: string + zones: + description: Specifies a list of Availability Zones in which this + API Management service should be located. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalLocation: + description: One or more additional_location blocks as defined + below. + items: + properties: + capacity: + description: The number of compute units in this region. + Defaults to the capacity of the main region. + type: number + gatewayDisabled: + description: Only valid for an Api Management service deployed + in multiple locations. This can be used to disable the + gateway in this additional location. + type: boolean + location: + description: The name of the Azure Region in which the API + Management Service should be expanded to. + type: string + publicIpAddressId: + description: ID of a standard SKU IPv4 Public IP. + type: string + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Required when virtual_network_type is External + or Internal. + properties: + subnetId: + description: The id of the subnet that will be used + for the API Management. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + zones: + description: A list of availability zones. Changing this + forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + certificate: + description: One or more certificate blocks (up to 10) as defined + below. + items: + properties: + storeName: + description: The name of the Certificate Store where this + certificate should be stored. Possible values are CertificateAuthority + and Root. + type: string + type: object + type: array + clientCertificateEnabled: + description: Enforce a client certificate to be presented on each + request to the gateway? This is only supported when SKU type + is Consumption. + type: boolean + delegation: + description: A delegation block as defined below. + properties: + subscriptionsEnabled: + description: Should subscription requests be delegated to + an external url? Defaults to false. + type: boolean + url: + description: The delegation URL. + type: string + userRegistrationEnabled: + description: Should user registration requests be delegated + to an external url? Defaults to false. + type: boolean + type: object + gatewayDisabled: + description: Disable the gateway in main region? This is only + supported when additional_location is set. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this API Management Service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this API Management Service. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The Azure location where the API Management Service + exists. Changing this forces a new resource to be created. + type: string + minApiVersion: + description: The version which the control plane API calls to + API Management service are limited with version equal to or + newer than. + type: string + notificationSenderEmail: + description: Email address from which the notification will be + sent. + type: string + policy: + description: A policy block as defined below. + items: + properties: + xmlContent: + description: The XML Content for this Policy. + type: string + xmlLink: + description: A link to an API Management Policy XML Document, + which must be publicly available. + type: string + type: object + type: array + protocols: + description: A protocols block as defined below. + properties: + enableHttp2: + description: Should HTTP/2 be supported by the API Management + Service? Defaults to false. + type: boolean + type: object + publicIpAddressId: + description: ID of a standard SKU IPv4 Public IP. + type: string + publicNetworkAccessEnabled: + description: Is public access to the service allowed? Defaults + to true. + type: boolean + publisherEmail: + description: The email of publisher/company. + type: string + publisherName: + description: The name of publisher/company. + type: string + security: + description: A security block as defined below. + properties: + enableBackendSsl30: + description: Should SSL 3.0 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableBackendTls10: + description: Should TLS 1.0 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableBackendTls11: + description: Should TLS 1.1 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableFrontendSsl30: + description: Should SSL 3.0 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + enableFrontendTls10: + description: Should TLS 1.0 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + enableFrontendTls11: + description: Should TLS 1.1 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + tlsEcdheEcdsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheEcdsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheRsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheRsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128CbcSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_CBC_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_CBC_SHA cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128GcmSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_GCM_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256CbcSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_CBC_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_CBC_SHA cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256GcmSha384CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_GCM_SHA384 cipher + be enabled? Defaults to false. + type: boolean + tripleDesCiphersEnabled: + description: Should the TLS_RSA_WITH_3DES_EDE_CBC_SHA cipher + be enabled for alL TLS versions (1.0, 1.1 and 1.2)? + type: boolean + type: object + signIn: + description: A sign_in block as defined below. + properties: + enabled: + description: Should anonymous users be redirected to the sign + in page? + type: boolean + type: object + signUp: + description: A sign_up block as defined below. + properties: + enabled: + description: Can users sign up on the development portal? + type: boolean + termsOfService: + description: A terms_of_service block as defined below. + properties: + consentRequired: + description: Should the user be asked for consent during + sign up? + type: boolean + enabled: + description: Should Terms of Service be displayed during + sign up?. + type: boolean + text: + description: The Terms of Service which users are required + to agree to in order to sign up. + type: string + type: object + type: object + skuName: + description: 'sku_name is a string consisting of two parts separated + by an underscore(_). The first part is the name, valid values + include: Consumption, Developer, Basic, Standard and Premium. + The second part is the capacity (e.g. the number of deployed + units of the sku), which must be a positive integer (e.g. Developer_1).' + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + tenantAccess: + description: A tenant_access block as defined below. + properties: + enabled: + description: Should the access to the management API be enabled? + type: boolean + type: object + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Required when virtual_network_type is External or Internal. + properties: + subnetId: + description: The id of the subnet that will be used for the + API Management. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + virtualNetworkType: + description: 'The type of virtual network you want to use, valid + values include: None, External, Internal. Defaults to None.' + type: string + zones: + description: Specifies a list of Availability Zones in which this + API Management service should be located. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.publisherEmail is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.publisherEmail) + || (has(self.initProvider) && has(self.initProvider.publisherEmail))' + - message: spec.forProvider.publisherName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.publisherName) + || (has(self.initProvider) && has(self.initProvider.publisherName))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + status: + description: ManagementStatus defines the observed state of Management. + properties: + atProvider: + properties: + additionalLocation: + description: One or more additional_location blocks as defined + below. + items: + properties: + capacity: + description: The number of compute units in this region. + Defaults to the capacity of the main region. + type: number + gatewayDisabled: + description: Only valid for an Api Management service deployed + in multiple locations. This can be used to disable the + gateway in this additional location. + type: boolean + gatewayRegionalUrl: + description: The URL of the Regional Gateway for the API + Management Service in the specified region. + type: string + location: + description: The name of the Azure Region in which the API + Management Service should be expanded to. + type: string + privateIpAddresses: + description: The Private IP addresses of the API Management + Service. Available only when the API Manager instance + is using Virtual Network mode. + items: + type: string + type: array + publicIpAddressId: + description: ID of a standard SKU IPv4 Public IP. + type: string + publicIpAddresses: + description: Public Static Load Balanced IP addresses of + the API Management service in the additional location. + Available only for Basic, Standard and Premium SKU. + items: + type: string + type: array + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Required when virtual_network_type is External + or Internal. + properties: + subnetId: + description: The id of the subnet that will be used + for the API Management. + type: string + type: object + zones: + description: A list of availability zones. Changing this + forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + certificate: + description: One or more certificate blocks (up to 10) as defined + below. + items: + properties: + expiry: + description: 'The expiration date of the certificate in + RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + storeName: + description: The name of the Certificate Store where this + certificate should be stored. Possible values are CertificateAuthority + and Root. + type: string + subject: + description: The subject of the certificate. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: object + type: array + clientCertificateEnabled: + description: Enforce a client certificate to be presented on each + request to the gateway? This is only supported when SKU type + is Consumption. + type: boolean + delegation: + description: A delegation block as defined below. + properties: + subscriptionsEnabled: + description: Should subscription requests be delegated to + an external url? Defaults to false. + type: boolean + url: + description: The delegation URL. + type: string + userRegistrationEnabled: + description: Should user registration requests be delegated + to an external url? Defaults to false. + type: boolean + type: object + developerPortalUrl: + description: The URL for the Developer Portal associated with + this API Management service. + type: string + gatewayDisabled: + description: Disable the gateway in main region? This is only + supported when additional_location is set. + type: boolean + gatewayRegionalUrl: + description: The Region URL for the Gateway of the API Management + Service. + type: string + gatewayUrl: + description: The URL of the Gateway for the API Management Service. + type: string + hostnameConfiguration: + description: A hostname_configuration block as defined below. + properties: + developerPortal: + description: One or more developer_portal blocks as documented + below. + items: + properties: + certificateSource: + description: The source of the certificate. + type: string + certificateStatus: + description: The status of the certificate. + type: string + expiry: + description: 'The expiration date of the certificate + in RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + hostName: + description: The Hostname to use for the Management + API. + type: string + keyVaultId: + description: The ID of the Key Vault Secret containing + the SSL Certificate, which must be should be of the + type application/x-pkcs12. + type: string + negotiateClientCertificate: + description: Should Client Certificate Negotiation be + enabled for this Hostname? Defaults to false. + type: boolean + sslKeyvaultIdentityClientId: + description: System or User Assigned Managed identity + clientId as generated by Azure AD, which has GET access + to the keyVault containing the SSL certificate. + type: string + subject: + description: The subject of the certificate. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: object + type: array + management: + description: One or more management blocks as documented below. + items: + properties: + certificateSource: + description: The source of the certificate. + type: string + certificateStatus: + description: The status of the certificate. + type: string + expiry: + description: 'The expiration date of the certificate + in RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + hostName: + description: The Hostname to use for the Management + API. + type: string + keyVaultId: + description: The ID of the Key Vault Secret containing + the SSL Certificate, which must be should be of the + type application/x-pkcs12. + type: string + negotiateClientCertificate: + description: Should Client Certificate Negotiation be + enabled for this Hostname? Defaults to false. + type: boolean + sslKeyvaultIdentityClientId: + description: System or User Assigned Managed identity + clientId as generated by Azure AD, which has GET access + to the keyVault containing the SSL certificate. + type: string + subject: + description: The subject of the certificate. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: object + type: array + portal: + description: One or more portal blocks as documented below. + items: + properties: + certificateSource: + description: The source of the certificate. + type: string + certificateStatus: + description: The status of the certificate. + type: string + expiry: + description: 'The expiration date of the certificate + in RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + hostName: + description: The Hostname to use for the Management + API. + type: string + keyVaultId: + description: The ID of the Key Vault Secret containing + the SSL Certificate, which must be should be of the + type application/x-pkcs12. + type: string + negotiateClientCertificate: + description: Should Client Certificate Negotiation be + enabled for this Hostname? Defaults to false. + type: boolean + sslKeyvaultIdentityClientId: + description: System or User Assigned Managed identity + clientId as generated by Azure AD, which has GET access + to the keyVault containing the SSL certificate. + type: string + subject: + description: The subject of the certificate. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: object + type: array + proxy: + description: One or more proxy blocks as documented below. + items: + properties: + certificateSource: + description: The source of the certificate. + type: string + certificateStatus: + description: The status of the certificate. + type: string + defaultSslBinding: + description: Is the certificate associated with this + Hostname the Default SSL Certificate? This is used + when an SNI header isn't specified by a client. Defaults + to false. + type: boolean + expiry: + description: 'The expiration date of the certificate + in RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + hostName: + description: The Hostname to use for the Management + API. + type: string + keyVaultId: + description: The ID of the Key Vault Secret containing + the SSL Certificate, which must be should be of the + type application/x-pkcs12. + type: string + negotiateClientCertificate: + description: Should Client Certificate Negotiation be + enabled for this Hostname? Defaults to false. + type: boolean + sslKeyvaultIdentityClientId: + description: System or User Assigned Managed identity + clientId as generated by Azure AD, which has GET access + to the keyVault containing the SSL certificate. + type: string + subject: + description: The subject of the certificate. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: object + type: array + scm: + description: One or more scm blocks as documented below. + items: + properties: + certificateSource: + description: The source of the certificate. + type: string + certificateStatus: + description: The status of the certificate. + type: string + expiry: + description: 'The expiration date of the certificate + in RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + hostName: + description: The Hostname to use for the Management + API. + type: string + keyVaultId: + description: The ID of the Key Vault Secret containing + the SSL Certificate, which must be should be of the + type application/x-pkcs12. + type: string + negotiateClientCertificate: + description: Should Client Certificate Negotiation be + enabled for this Hostname? Defaults to false. + type: boolean + sslKeyvaultIdentityClientId: + description: System or User Assigned Managed identity + clientId as generated by Azure AD, which has GET access + to the keyVault containing the SSL certificate. + type: string + subject: + description: The subject of the certificate. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: object + type: array + type: object + id: + description: The ID of the API Management Service. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this API Management Service. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this API Management Service. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The Azure location where the API Management Service + exists. Changing this forces a new resource to be created. + type: string + managementApiUrl: + description: The URL for the Management API associated with this + API Management service. + type: string + minApiVersion: + description: The version which the control plane API calls to + API Management service are limited with version equal to or + newer than. + type: string + notificationSenderEmail: + description: Email address from which the notification will be + sent. + type: string + policy: + description: A policy block as defined below. + items: + properties: + xmlContent: + description: The XML Content for this Policy. + type: string + xmlLink: + description: A link to an API Management Policy XML Document, + which must be publicly available. + type: string + type: object + type: array + portalUrl: + description: The URL for the Publisher Portal associated with + this API Management service. + type: string + privateIpAddresses: + description: The Private IP addresses of the API Management Service. + items: + type: string + type: array + protocols: + description: A protocols block as defined below. + properties: + enableHttp2: + description: Should HTTP/2 be supported by the API Management + Service? Defaults to false. + type: boolean + type: object + publicIpAddressId: + description: ID of a standard SKU IPv4 Public IP. + type: string + publicIpAddresses: + description: The Public IP addresses of the API Management Service. + items: + type: string + type: array + publicNetworkAccessEnabled: + description: Is public access to the service allowed? Defaults + to true. + type: boolean + publisherEmail: + description: The email of publisher/company. + type: string + publisherName: + description: The name of publisher/company. + type: string + resourceGroupName: + description: The name of the Resource Group in which the API Management + Service should be exist. Changing this forces a new resource + to be created. + type: string + scmUrl: + description: The URL for the SCM (Source Code Management) Endpoint + associated with this API Management service. + type: string + security: + description: A security block as defined below. + properties: + enableBackendSsl30: + description: Should SSL 3.0 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableBackendTls10: + description: Should TLS 1.0 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableBackendTls11: + description: Should TLS 1.1 be enabled on the backend of the + gateway? Defaults to false. + type: boolean + enableFrontendSsl30: + description: Should SSL 3.0 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + enableFrontendTls10: + description: Should TLS 1.0 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + enableFrontendTls11: + description: Should TLS 1.1 be enabled on the frontend of + the gateway? Defaults to false. + type: boolean + tlsEcdheEcdsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheEcdsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheRsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsEcdheRsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA + cipher be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128CbcSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_CBC_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128CbcShaCiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_CBC_SHA cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes128GcmSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_128_GCM_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256CbcSha256CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_CBC_SHA256 cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256CbcShaCiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_CBC_SHA cipher + be enabled? Defaults to false. + type: boolean + tlsRsaWithAes256GcmSha384CiphersEnabled: + description: Should the TLS_RSA_WITH_AES_256_GCM_SHA384 cipher + be enabled? Defaults to false. + type: boolean + tripleDesCiphersEnabled: + description: Should the TLS_RSA_WITH_3DES_EDE_CBC_SHA cipher + be enabled for alL TLS versions (1.0, 1.1 and 1.2)? + type: boolean + type: object + signIn: + description: A sign_in block as defined below. + properties: + enabled: + description: Should anonymous users be redirected to the sign + in page? + type: boolean + type: object + signUp: + description: A sign_up block as defined below. + properties: + enabled: + description: Can users sign up on the development portal? + type: boolean + termsOfService: + description: A terms_of_service block as defined below. + properties: + consentRequired: + description: Should the user be asked for consent during + sign up? + type: boolean + enabled: + description: Should Terms of Service be displayed during + sign up?. + type: boolean + text: + description: The Terms of Service which users are required + to agree to in order to sign up. + type: string + type: object + type: object + skuName: + description: 'sku_name is a string consisting of two parts separated + by an underscore(_). The first part is the name, valid values + include: Consumption, Developer, Basic, Standard and Premium. + The second part is the capacity (e.g. the number of deployed + units of the sku), which must be a positive integer (e.g. Developer_1).' + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + tenantAccess: + description: A tenant_access block as defined below. + properties: + enabled: + description: Should the access to the management API be enabled? + type: boolean + tenantId: + description: The identifier for the tenant access information + contract. + type: string + type: object + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Required when virtual_network_type is External or Internal. + properties: + subnetId: + description: The id of the subnet that will be used for the + API Management. + type: string + type: object + virtualNetworkType: + description: 'The type of virtual network you want to use, valid + values include: None, External, Internal. Defaults to None.' + type: string + zones: + description: Specifies a list of Availability Zones in which this + API Management service should be located. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/apimanagement.azure.upbound.io_namedvalues.yaml b/package/crds/apimanagement.azure.upbound.io_namedvalues.yaml index fc2060d38..77efbcb0b 100644 --- a/package/crds/apimanagement.azure.upbound.io_namedvalues.yaml +++ b/package/crds/apimanagement.azure.upbound.io_namedvalues.yaml @@ -606,3 +606,585 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: NamedValue is the Schema for the NamedValues API. Manages an + API Management Named Value. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NamedValueSpec defines the desired state of NamedValue + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiManagementName: + description: The name of the API Management Service in which the + API Management Named Value should exist. Changing this forces + a new resource to be created. + type: string + apiManagementNameRef: + description: Reference to a Management in apimanagement to populate + apiManagementName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + apiManagementNameSelector: + description: Selector for a Management in apimanagement to populate + apiManagementName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + displayName: + description: The display name of this API Management Named Value. + type: string + resourceGroupName: + description: The name of the Resource Group in which the API Management + Named Value should exist. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secret: + description: Specifies whether the API Management Named Value + is secret. Valid values are true or false. The default value + is false. + type: boolean + tags: + description: A list of tags to be applied to the API Management + Named Value. + items: + type: string + type: array + valueFromKeyVault: + description: A value_from_key_vault block as defined below. + properties: + identityClientId: + description: The client ID of User Assigned Identity, for + the API Management Service, which will be used to access + the key vault secret. The System Assigned Identity will + be used in absence. + type: string + secretId: + description: The resource ID of the Key Vault Secret. + type: string + type: object + valueSecretRef: + description: The value of this API Management Named Value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + displayName: + description: The display name of this API Management Named Value. + type: string + secret: + description: Specifies whether the API Management Named Value + is secret. Valid values are true or false. The default value + is false. + type: boolean + tags: + description: A list of tags to be applied to the API Management + Named Value. + items: + type: string + type: array + valueFromKeyVault: + description: A value_from_key_vault block as defined below. + properties: + identityClientId: + description: The client ID of User Assigned Identity, for + the API Management Service, which will be used to access + the key vault secret. The System Assigned Identity will + be used in absence. + type: string + secretId: + description: The resource ID of the Key Vault Secret. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.displayName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.displayName) + || (has(self.initProvider) && has(self.initProvider.displayName))' + status: + description: NamedValueStatus defines the observed state of NamedValue. + properties: + atProvider: + properties: + apiManagementName: + description: The name of the API Management Service in which the + API Management Named Value should exist. Changing this forces + a new resource to be created. + type: string + displayName: + description: The display name of this API Management Named Value. + type: string + id: + description: The ID of the API Management Named Value. + type: string + resourceGroupName: + description: The name of the Resource Group in which the API Management + Named Value should exist. Changing this forces a new resource + to be created. + type: string + secret: + description: Specifies whether the API Management Named Value + is secret. Valid values are true or false. The default value + is false. + type: boolean + tags: + description: A list of tags to be applied to the API Management + Named Value. + items: + type: string + type: array + valueFromKeyVault: + description: A value_from_key_vault block as defined below. + properties: + identityClientId: + description: The client ID of User Assigned Identity, for + the API Management Service, which will be used to access + the key vault secret. The System Assigned Identity will + be used in absence. + type: string + secretId: + description: The resource ID of the Key Vault Secret. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appconfiguration.azure.upbound.io_configurations.yaml b/package/crds/appconfiguration.azure.upbound.io_configurations.yaml index b2b5bfc31..5786db628 100644 --- a/package/crds/appconfiguration.azure.upbound.io_configurations.yaml +++ b/package/crds/appconfiguration.azure.upbound.io_configurations.yaml @@ -1039,3 +1039,1008 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Configuration is the Schema for the Configurations API. Manages + an Azure App Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConfigurationSpec defines the desired state of Configuration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + encryption: + description: An encryption block as defined below. + properties: + identityClientId: + description: Specifies the client id of the identity which + will be used to access key vault. + type: string + identityClientIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate identityClientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + identityClientIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate identityClientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + keyVaultKeyIdentifier: + description: Specifies the URI of the key vault key used to + encrypt data. + type: string + keyVaultKeyIdentifierRef: + description: Reference to a Key in keyvault to populate keyVaultKeyIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultKeyIdentifierSelector: + description: Selector for a Key in keyvault to populate keyVaultKeyIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this App Configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this App Configuration. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccess: + description: The Public Network Access setting of the App Configuration. + Possible values are Enabled and Disabled. + type: string + purgeProtectionEnabled: + description: Whether Purge Protection is enabled. This field only + works for standard sku. Defaults to false. + type: boolean + replica: + description: One or more replica blocks as defined below. + items: + properties: + location: + description: Specifies the supported Azure location where + the replica exists. + type: string + name: + description: Specifies the name of the replica. + type: string + type: object + type: array + resourceGroupName: + description: The name of the resource group in which to create + the App Configuration. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: The SKU name of the App Configuration. Possible values + are free and standard. Defaults to free. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This field only works for standard sku. + This value can be between 1 and 7 days. Defaults to 7. Changing + this forces a new resource to be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + encryption: + description: An encryption block as defined below. + properties: + identityClientId: + description: Specifies the client id of the identity which + will be used to access key vault. + type: string + identityClientIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate identityClientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + identityClientIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate identityClientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + keyVaultKeyIdentifier: + description: Specifies the URI of the key vault key used to + encrypt data. + type: string + keyVaultKeyIdentifierRef: + description: Reference to a Key in keyvault to populate keyVaultKeyIdentifier. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultKeyIdentifierSelector: + description: Selector for a Key in keyvault to populate keyVaultKeyIdentifier. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this App Configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this App Configuration. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccess: + description: The Public Network Access setting of the App Configuration. + Possible values are Enabled and Disabled. + type: string + purgeProtectionEnabled: + description: Whether Purge Protection is enabled. This field only + works for standard sku. Defaults to false. + type: boolean + replica: + description: One or more replica blocks as defined below. + items: + properties: + location: + description: Specifies the supported Azure location where + the replica exists. + type: string + name: + description: Specifies the name of the replica. + type: string + type: object + type: array + sku: + description: The SKU name of the App Configuration. Possible values + are free and standard. Defaults to free. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This field only works for standard sku. + This value can be between 1 and 7 days. Defaults to 7. Changing + this forces a new resource to be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: ConfigurationStatus defines the observed state of Configuration. + properties: + atProvider: + properties: + encryption: + description: An encryption block as defined below. + properties: + identityClientId: + description: Specifies the client id of the identity which + will be used to access key vault. + type: string + keyVaultKeyIdentifier: + description: Specifies the URI of the key vault key used to + encrypt data. + type: string + type: object + endpoint: + description: The URL of the App Configuration. + type: string + id: + description: The App Configuration ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this App Configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this App Configuration. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + primaryReadKey: + description: A primary_read_key block as defined below containing + the primary read access key. + items: + properties: + connectionString: + description: The Connection String for this Access Key - + comprising of the Endpoint, ID and Secret. + type: string + id: + description: The ID of the Access Key. + type: string + secret: + description: The Secret of the Access Key. + type: string + type: object + type: array + primaryWriteKey: + description: A primary_write_key block as defined below containing + the primary write access key. + items: + properties: + connectionString: + description: The Connection String for this Access Key - + comprising of the Endpoint, ID and Secret. + type: string + id: + description: The ID of the Access Key. + type: string + secret: + description: The Secret of the Access Key. + type: string + type: object + type: array + publicNetworkAccess: + description: The Public Network Access setting of the App Configuration. + Possible values are Enabled and Disabled. + type: string + purgeProtectionEnabled: + description: Whether Purge Protection is enabled. This field only + works for standard sku. Defaults to false. + type: boolean + replica: + description: One or more replica blocks as defined below. + items: + properties: + endpoint: + description: The URL of the App Configuration Replica. + type: string + id: + description: The ID of the App Configuration Replica. + type: string + location: + description: Specifies the supported Azure location where + the replica exists. + type: string + name: + description: Specifies the name of the replica. + type: string + type: object + type: array + resourceGroupName: + description: The name of the resource group in which to create + the App Configuration. Changing this forces a new resource to + be created. + type: string + secondaryReadKey: + description: A secondary_read_key block as defined below containing + the secondary read access key. + items: + properties: + connectionString: + description: The Connection String for this Access Key - + comprising of the Endpoint, ID and Secret. + type: string + id: + description: The ID of the Access Key. + type: string + secret: + description: The Secret of the Access Key. + type: string + type: object + type: array + secondaryWriteKey: + description: A secondary_write_key block as defined below containing + the secondary write access key. + items: + properties: + connectionString: + description: The Connection String for this Access Key - + comprising of the Endpoint, ID and Secret. + type: string + id: + description: The ID of the Access Key. + type: string + secret: + description: The Secret of the Access Key. + type: string + type: object + type: array + sku: + description: The SKU name of the App Configuration. Possible values + are free and standard. Defaults to free. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This field only works for standard sku. + This value can be between 1 and 7 days. Defaults to 7. Changing + this forces a new resource to be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudapiportals.yaml b/package/crds/appplatform.azure.upbound.io_springcloudapiportals.yaml index d156c7b8f..9456d3465 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudapiportals.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudapiportals.yaml @@ -706,3 +706,685 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudAPIPortal is the Schema for the SpringCloudAPIPortals + API. Manages a Spring Cloud API Portal. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudAPIPortalSpec defines the desired state of SpringCloudAPIPortal + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiTryOutEnabled: + description: Specifies whether the API try-out feature is enabled. + When enabled, users can try out the API by sending requests + and viewing responses in API portal. + type: boolean + gatewayIds: + description: Specifies a list of Spring Cloud Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + gatewayIdsRefs: + description: References to SpringCloudGateway in appplatform to + populate gatewayIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + gatewayIdsSelector: + description: Selector for a list of SpringCloudGateway in appplatform + to populate gatewayIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + httpsOnlyEnabled: + description: is only https is allowed? + type: boolean + instanceCount: + description: Specifies the required instance count of the Spring + Cloud API Portal. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + publicNetworkAccessEnabled: + description: Is the public network access enabled? + type: boolean + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud API Portal to be created. + type: string + springCloudServiceIdRef: + description: Reference to a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudServiceIdSelector: + description: Selector for a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sso: + description: A sso block as defined below. + properties: + clientId: + description: The public identifier for the application. + type: string + clientSecret: + description: The secret known only to the application and + the authorization server. + type: string + issuerUri: + description: The URI of Issuer Identifier. + type: string + scope: + description: It defines the specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiTryOutEnabled: + description: Specifies whether the API try-out feature is enabled. + When enabled, users can try out the API by sending requests + and viewing responses in API portal. + type: boolean + gatewayIds: + description: Specifies a list of Spring Cloud Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + gatewayIdsRefs: + description: References to SpringCloudGateway in appplatform to + populate gatewayIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + gatewayIdsSelector: + description: Selector for a list of SpringCloudGateway in appplatform + to populate gatewayIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + httpsOnlyEnabled: + description: is only https is allowed? + type: boolean + instanceCount: + description: Specifies the required instance count of the Spring + Cloud API Portal. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + publicNetworkAccessEnabled: + description: Is the public network access enabled? + type: boolean + sso: + description: A sso block as defined below. + properties: + clientId: + description: The public identifier for the application. + type: string + clientSecret: + description: The secret known only to the application and + the authorization server. + type: string + issuerUri: + description: The URI of Issuer Identifier. + type: string + scope: + description: It defines the specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SpringCloudAPIPortalStatus defines the observed state of + SpringCloudAPIPortal. + properties: + atProvider: + properties: + apiTryOutEnabled: + description: Specifies whether the API try-out feature is enabled. + When enabled, users can try out the API by sending requests + and viewing responses in API portal. + type: boolean + gatewayIds: + description: Specifies a list of Spring Cloud Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + httpsOnlyEnabled: + description: is only https is allowed? + type: boolean + id: + description: The ID of the Spring Cloud API Portal. + type: string + instanceCount: + description: Specifies the required instance count of the Spring + Cloud API Portal. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + publicNetworkAccessEnabled: + description: Is the public network access enabled? + type: boolean + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud API Portal to be created. + type: string + sso: + description: A sso block as defined below. + properties: + clientId: + description: The public identifier for the application. + type: string + clientSecret: + description: The secret known only to the application and + the authorization server. + type: string + issuerUri: + description: The URI of Issuer Identifier. + type: string + scope: + description: It defines the specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + url: + description: TODO. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudapps.yaml b/package/crds/appplatform.azure.upbound.io_springcloudapps.yaml index dab3513f0..dceb07b52 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudapps.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudapps.yaml @@ -827,3 +827,792 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudApp is the Schema for the SpringCloudApps API. Manage + an Azure Spring Cloud Application. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudAppSpec defines the desired state of SpringCloudApp + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Service. + type: string + customPersistentDisk: + description: A custom_persistent_disk block as defined below. + items: + properties: + mountOptions: + description: These are the mount options for a persistent + disk. + items: + type: string + type: array + x-kubernetes-list-type: set + mountPath: + description: The mount path of the persistent disk. + type: string + readOnlyEnabled: + description: Indicates whether the persistent disk is a + readOnly one. + type: boolean + shareName: + description: The share name of the Azure File share. + type: string + storageName: + description: The name of the Spring Cloud Storage. + type: string + type: object + type: array + httpsOnly: + description: Is only HTTPS allowed? Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Spring Cloud Application. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Spring Cloud Application. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + ingressSettings: + description: An ingress_settings block as defined below. + properties: + backendProtocol: + description: Specifies how ingress should communicate with + this app backend service. Allowed values are GRPC and Default. + Defaults to Default. + type: string + readTimeoutInSeconds: + description: Specifies the ingress read time out in seconds. + Defaults to 300. + type: number + sendTimeoutInSeconds: + description: Specifies the ingress send time out in seconds. + Defaults to 60. + type: number + sessionAffinity: + description: Specifies the type of the affinity, set this + to Cookie to enable session affinity. Allowed values are + Cookie and None. Defaults to None. + type: string + sessionCookieMaxAge: + description: Specifies the time in seconds until the cookie + expires. + type: number + type: object + isPublic: + description: Does the Spring Cloud Application have public endpoint? + Defaults to false. + type: boolean + persistentDisk: + description: An persistent_disk block as defined below. + properties: + mountPath: + description: Specifies the mount path of the persistent disk. + Defaults to /persistent. + type: string + sizeInGb: + description: Specifies the size of the persistent disk in + GB. Possible values are between 0 and 50. + type: number + type: object + publicEndpointEnabled: + description: Should the App in vnet injection instance exposes + endpoint which could be accessed from Internet? + type: boolean + resourceGroupName: + description: Specifies the name of the resource group in which + to create the Spring Cloud Application. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceName: + description: Specifies the name of the Spring Cloud Service resource. + Changing this forces a new resource to be created. + type: string + serviceNameRef: + description: Reference to a SpringCloudService in appplatform + to populate serviceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceNameSelector: + description: Selector for a SpringCloudService in appplatform + to populate serviceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tlsEnabled: + description: Is End to End TLS Enabled? Defaults to false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Service. + type: string + customPersistentDisk: + description: A custom_persistent_disk block as defined below. + items: + properties: + mountOptions: + description: These are the mount options for a persistent + disk. + items: + type: string + type: array + x-kubernetes-list-type: set + mountPath: + description: The mount path of the persistent disk. + type: string + readOnlyEnabled: + description: Indicates whether the persistent disk is a + readOnly one. + type: boolean + shareName: + description: The share name of the Azure File share. + type: string + storageName: + description: The name of the Spring Cloud Storage. + type: string + type: object + type: array + httpsOnly: + description: Is only HTTPS allowed? Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Spring Cloud Application. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Spring Cloud Application. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + ingressSettings: + description: An ingress_settings block as defined below. + properties: + backendProtocol: + description: Specifies how ingress should communicate with + this app backend service. Allowed values are GRPC and Default. + Defaults to Default. + type: string + readTimeoutInSeconds: + description: Specifies the ingress read time out in seconds. + Defaults to 300. + type: number + sendTimeoutInSeconds: + description: Specifies the ingress send time out in seconds. + Defaults to 60. + type: number + sessionAffinity: + description: Specifies the type of the affinity, set this + to Cookie to enable session affinity. Allowed values are + Cookie and None. Defaults to None. + type: string + sessionCookieMaxAge: + description: Specifies the time in seconds until the cookie + expires. + type: number + type: object + isPublic: + description: Does the Spring Cloud Application have public endpoint? + Defaults to false. + type: boolean + persistentDisk: + description: An persistent_disk block as defined below. + properties: + mountPath: + description: Specifies the mount path of the persistent disk. + Defaults to /persistent. + type: string + sizeInGb: + description: Specifies the size of the persistent disk in + GB. Possible values are between 0 and 50. + type: number + type: object + publicEndpointEnabled: + description: Should the App in vnet injection instance exposes + endpoint which could be accessed from Internet? + type: boolean + tlsEnabled: + description: Is End to End TLS Enabled? Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SpringCloudAppStatus defines the observed state of SpringCloudApp. + properties: + atProvider: + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Service. + type: string + customPersistentDisk: + description: A custom_persistent_disk block as defined below. + items: + properties: + mountOptions: + description: These are the mount options for a persistent + disk. + items: + type: string + type: array + x-kubernetes-list-type: set + mountPath: + description: The mount path of the persistent disk. + type: string + readOnlyEnabled: + description: Indicates whether the persistent disk is a + readOnly one. + type: boolean + shareName: + description: The share name of the Azure File share. + type: string + storageName: + description: The name of the Spring Cloud Storage. + type: string + type: object + type: array + fqdn: + description: The Fully Qualified DNS Name of the Spring Application + in the service. + type: string + httpsOnly: + description: Is only HTTPS allowed? Defaults to false. + type: boolean + id: + description: The ID of the Spring Cloud Application. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Spring Cloud Application. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this Spring Cloud Application. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this Spring Cloud Application. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Spring Cloud Application. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + ingressSettings: + description: An ingress_settings block as defined below. + properties: + backendProtocol: + description: Specifies how ingress should communicate with + this app backend service. Allowed values are GRPC and Default. + Defaults to Default. + type: string + readTimeoutInSeconds: + description: Specifies the ingress read time out in seconds. + Defaults to 300. + type: number + sendTimeoutInSeconds: + description: Specifies the ingress send time out in seconds. + Defaults to 60. + type: number + sessionAffinity: + description: Specifies the type of the affinity, set this + to Cookie to enable session affinity. Allowed values are + Cookie and None. Defaults to None. + type: string + sessionCookieMaxAge: + description: Specifies the time in seconds until the cookie + expires. + type: number + type: object + isPublic: + description: Does the Spring Cloud Application have public endpoint? + Defaults to false. + type: boolean + persistentDisk: + description: An persistent_disk block as defined below. + properties: + mountPath: + description: Specifies the mount path of the persistent disk. + Defaults to /persistent. + type: string + sizeInGb: + description: Specifies the size of the persistent disk in + GB. Possible values are between 0 and 50. + type: number + type: object + publicEndpointEnabled: + description: Should the App in vnet injection instance exposes + endpoint which could be accessed from Internet? + type: boolean + resourceGroupName: + description: Specifies the name of the resource group in which + to create the Spring Cloud Application. Changing this forces + a new resource to be created. + type: string + serviceName: + description: Specifies the name of the Spring Cloud Service resource. + Changing this forces a new resource to be created. + type: string + tlsEnabled: + description: Is End to End TLS Enabled? Defaults to false. + type: boolean + url: + description: The public endpoint of the Spring Cloud Application. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudbuilddeployments.yaml b/package/crds/appplatform.azure.upbound.io_springcloudbuilddeployments.yaml index ab148d79e..e2859ac74 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudbuilddeployments.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudbuilddeployments.yaml @@ -542,3 +542,518 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudBuildDeployment is the Schema for the SpringCloudBuildDeployments + API. Manages a Spring Cloud Build Deployment. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudBuildDeploymentSpec defines the desired state + of SpringCloudBuildDeployment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Build Deployment. + type: string + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + buildResultId: + description: The ID of the Spring Cloud Build Result. + type: string + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + springCloudAppId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Build Deployment to be created. + type: string + springCloudAppIdRef: + description: Reference to a SpringCloudApp in appplatform to populate + springCloudAppId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudAppIdSelector: + description: Selector for a SpringCloudApp in appplatform to populate + springCloudAppId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Build Deployment. + type: string + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + buildResultId: + description: The ID of the Spring Cloud Build Result. + type: string + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.buildResultId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.buildResultId) + || (has(self.initProvider) && has(self.initProvider.buildResultId))' + status: + description: SpringCloudBuildDeploymentStatus defines the observed state + of SpringCloudBuildDeployment. + properties: + atProvider: + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Build Deployment. + type: string + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + buildResultId: + description: The ID of the Spring Cloud Build Result. + type: string + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + id: + description: The ID of the Spring Cloud Build Deployment. + type: string + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + springCloudAppId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Build Deployment to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudbuilders.yaml b/package/crds/appplatform.azure.upbound.io_springcloudbuilders.yaml index 4b33942ca..895056eec 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudbuilders.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudbuilders.yaml @@ -598,3 +598,577 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudBuilder is the Schema for the SpringCloudBuilders + API. Manages a Spring Cloud Builder. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudBuilderSpec defines the desired state of SpringCloudBuilder + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + buildPackGroup: + description: One or more build_pack_group blocks as defined below. + items: + properties: + buildPackIds: + description: Specifies a list of the build pack's ID. + items: + type: string + type: array + name: + description: The name which should be used for this build + pack group. + type: string + type: object + type: array + name: + description: The name which should be used for this Spring Cloud + Builder. Changing this forces a new Spring Cloud Builder to + be created. + type: string + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Builder to be created. + type: string + springCloudServiceIdRef: + description: Reference to a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudServiceIdSelector: + description: Selector for a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stack: + description: A stack block as defined below. + properties: + id: + description: Specifies the ID of the ClusterStack. + type: string + version: + description: Specifies the version of the ClusterStack + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + buildPackGroup: + description: One or more build_pack_group blocks as defined below. + items: + properties: + buildPackIds: + description: Specifies a list of the build pack's ID. + items: + type: string + type: array + name: + description: The name which should be used for this build + pack group. + type: string + type: object + type: array + name: + description: The name which should be used for this Spring Cloud + Builder. Changing this forces a new Spring Cloud Builder to + be created. + type: string + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Builder to be created. + type: string + springCloudServiceIdRef: + description: Reference to a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudServiceIdSelector: + description: Selector for a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + stack: + description: A stack block as defined below. + properties: + id: + description: Specifies the ID of the ClusterStack. + type: string + version: + description: Specifies the version of the ClusterStack + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.buildPackGroup is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.buildPackGroup) + || (has(self.initProvider) && has(self.initProvider.buildPackGroup))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.stack is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.stack) + || (has(self.initProvider) && has(self.initProvider.stack))' + status: + description: SpringCloudBuilderStatus defines the observed state of SpringCloudBuilder. + properties: + atProvider: + properties: + buildPackGroup: + description: One or more build_pack_group blocks as defined below. + items: + properties: + buildPackIds: + description: Specifies a list of the build pack's ID. + items: + type: string + type: array + name: + description: The name which should be used for this build + pack group. + type: string + type: object + type: array + id: + description: The ID of the Spring Cloud Builder. + type: string + name: + description: The name which should be used for this Spring Cloud + Builder. Changing this forces a new Spring Cloud Builder to + be created. + type: string + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Builder to be created. + type: string + stack: + description: A stack block as defined below. + properties: + id: + description: Specifies the ID of the ClusterStack. + type: string + version: + description: Specifies the version of the ClusterStack + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudbuildpackbindings.yaml b/package/crds/appplatform.azure.upbound.io_springcloudbuildpackbindings.yaml index 8b96c3b4d..d2508a92b 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudbuildpackbindings.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudbuildpackbindings.yaml @@ -486,3 +486,462 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudBuildPackBinding is the Schema for the SpringCloudBuildPackBindings + API. Manages a Spring Cloud Build Pack Binding. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudBuildPackBindingSpec defines the desired state + of SpringCloudBuildPackBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bindingType: + description: Specifies the Build Pack Binding Type. Allowed values + are ApacheSkyWalking, AppDynamics, ApplicationInsights, Dynatrace, + ElasticAPM and NewRelic. + type: string + launch: + description: A launch block as defined below. + properties: + properties: + additionalProperties: + type: string + description: Specifies a map of non-sensitive properties for + launchProperties. + type: object + x-kubernetes-map-type: granular + secrets: + additionalProperties: + type: string + description: Specifies a map of sensitive properties for launchProperties. + type: object + x-kubernetes-map-type: granular + type: object + springCloudBuilderId: + description: The ID of the Spring Cloud Builder. Changing this + forces a new Spring Cloud Build Pack Binding to be created. + type: string + springCloudBuilderIdRef: + description: Reference to a SpringCloudBuilder in appplatform + to populate springCloudBuilderId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudBuilderIdSelector: + description: Selector for a SpringCloudBuilder in appplatform + to populate springCloudBuilderId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bindingType: + description: Specifies the Build Pack Binding Type. Allowed values + are ApacheSkyWalking, AppDynamics, ApplicationInsights, Dynatrace, + ElasticAPM and NewRelic. + type: string + launch: + description: A launch block as defined below. + properties: + properties: + additionalProperties: + type: string + description: Specifies a map of non-sensitive properties for + launchProperties. + type: object + x-kubernetes-map-type: granular + secrets: + additionalProperties: + type: string + description: Specifies a map of sensitive properties for launchProperties. + type: object + x-kubernetes-map-type: granular + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SpringCloudBuildPackBindingStatus defines the observed state + of SpringCloudBuildPackBinding. + properties: + atProvider: + properties: + bindingType: + description: Specifies the Build Pack Binding Type. Allowed values + are ApacheSkyWalking, AppDynamics, ApplicationInsights, Dynatrace, + ElasticAPM and NewRelic. + type: string + id: + description: The ID of the Spring Cloud Build Pack Binding. + type: string + launch: + description: A launch block as defined below. + properties: + properties: + additionalProperties: + type: string + description: Specifies a map of non-sensitive properties for + launchProperties. + type: object + x-kubernetes-map-type: granular + secrets: + additionalProperties: + type: string + description: Specifies a map of sensitive properties for launchProperties. + type: object + x-kubernetes-map-type: granular + type: object + springCloudBuilderId: + description: The ID of the Spring Cloud Builder. Changing this + forces a new Spring Cloud Build Pack Binding to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudcontainerdeployments.yaml b/package/crds/appplatform.azure.upbound.io_springcloudcontainerdeployments.yaml index db1d343d2..1383c6340 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudcontainerdeployments.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudcontainerdeployments.yaml @@ -615,3 +615,591 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudContainerDeployment is the Schema for the SpringCloudContainerDeployments + API. Manages a Spring Cloud Container Deployment. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudContainerDeploymentSpec defines the desired state + of SpringCloudContainerDeployment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Container Deployment. + type: string + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + arguments: + description: Specifies the arguments to the entrypoint. The docker + image's CMD is used if not specified. + items: + type: string + type: array + commands: + description: Specifies the entrypoint array. It will not be executed + within a shell. The docker image's ENTRYPOINT is used if not + specified. + items: + type: string + type: array + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + image: + description: Container image of the custom container. This should + be in the form of : without the server name + of the registry. + type: string + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + languageFramework: + description: Specifies the language framework of the container + image. The only possible value is springboot. + type: string + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + server: + description: The name of the registry that contains the container + image. + type: string + springCloudAppId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Container Deployment to be created. + type: string + springCloudAppIdRef: + description: Reference to a SpringCloudApp in appplatform to populate + springCloudAppId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudAppIdSelector: + description: Selector for a SpringCloudApp in appplatform to populate + springCloudAppId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Container Deployment. + type: string + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + arguments: + description: Specifies the arguments to the entrypoint. The docker + image's CMD is used if not specified. + items: + type: string + type: array + commands: + description: Specifies the entrypoint array. It will not be executed + within a shell. The docker image's ENTRYPOINT is used if not + specified. + items: + type: string + type: array + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + image: + description: Container image of the custom container. This should + be in the form of : without the server name + of the registry. + type: string + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + languageFramework: + description: Specifies the language framework of the container + image. The only possible value is springboot. + type: string + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + server: + description: The name of the registry that contains the container + image. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.image is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.image) + || (has(self.initProvider) && has(self.initProvider.image))' + - message: spec.forProvider.server is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.server) + || (has(self.initProvider) && has(self.initProvider.server))' + status: + description: SpringCloudContainerDeploymentStatus defines the observed + state of SpringCloudContainerDeployment. + properties: + atProvider: + properties: + addonJson: + description: A JSON object that contains the addon configurations + of the Spring Cloud Container Deployment. + type: string + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + arguments: + description: Specifies the arguments to the entrypoint. The docker + image's CMD is used if not specified. + items: + type: string + type: array + commands: + description: Specifies the entrypoint array. It will not be executed + within a shell. The docker image's ENTRYPOINT is used if not + specified. + items: + type: string + type: array + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + id: + description: The ID of the Spring Cloud Container Deployment. + type: string + image: + description: Container image of the custom container. This should + be in the form of : without the server name + of the registry. + type: string + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + languageFramework: + description: Specifies the language framework of the container + image. The only possible value is springboot. + type: string + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + server: + description: The name of the registry that contains the container + image. + type: string + springCloudAppId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Container Deployment to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudcustomizedaccelerators.yaml b/package/crds/appplatform.azure.upbound.io_springcloudcustomizedaccelerators.yaml index 574aec704..2b5570dd3 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudcustomizedaccelerators.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudcustomizedaccelerators.yaml @@ -710,3 +710,674 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudCustomizedAccelerator is the Schema for the SpringCloudCustomizedAccelerators + API. Manages a Spring Cloud Customized Accelerator. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudCustomizedAcceleratorSpec defines the desired + state of SpringCloudCustomizedAccelerator + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + acceleratorTags: + description: Specifies a list of accelerator tags. + items: + type: string + type: array + acceleratorType: + description: Specifies the type of the Spring Cloud Customized + Accelerator. Possible values are Accelerator and Fragment. Defaults + to Accelerator. + type: string + description: + description: Specifies the description of the Spring Cloud Customized + Accelerator. + type: string + displayName: + description: Specifies the display name of the Spring Cloud Customized + Accelerator.. + type: string + gitRepository: + description: A git_repository block as defined below. + properties: + basicAuth: + description: A basic_auth block as defined below. Conflicts + with git_repository[0].ssh_auth. Changing this forces a + new Spring Cloud Customized Accelerator to be created. + properties: + passwordSecretRef: + description: Specifies the password of git repository + basic auth. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: Specifies the username of git repository + basic auth. + type: string + required: + - passwordSecretRef + type: object + branch: + description: Specifies the Git repository branch to be used. + type: string + caCertificateId: + description: Specifies the ID of the CA Spring Cloud Certificate + for https URL of Git repository. + type: string + commit: + description: Specifies the Git repository commit to be used. + type: string + gitTag: + description: Specifies the Git repository tag to be used. + type: string + intervalInSeconds: + description: Specifies the interval for checking for updates + to Git or image repository. It should be greater than 10. + type: number + path: + description: Specifies the path under the git repository to + be treated as the root directory of the accelerator or the + fragment (depending on accelerator_type). + type: string + sshAuth: + description: A ssh_auth block as defined below. Conflicts + with git_repository[0].basic_auth. Changing this forces + a new Spring Cloud Customized Accelerator to be created. + properties: + hostKeyAlgorithm: + description: Specifies the SSH Key algorithm of git repository + basic auth. + type: string + hostKeySecretRef: + description: Specifies the Public SSH Key of git repository + basic auth. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + privateKeySecretRef: + description: Specifies the Private SSH Key of git repository + basic auth. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - privateKeySecretRef + type: object + url: + description: Specifies Git repository URL for the accelerator. + type: string + type: object + iconUrl: + description: Specifies the icon URL of the Spring Cloud Customized + Accelerator.. + type: string + springCloudAcceleratorId: + description: The ID of the Spring Cloud Accelerator. Changing + this forces a new Spring Cloud Customized Accelerator to be + created. + type: string + springCloudAcceleratorIdRef: + description: Reference to a SpringCloudAccelerator in appplatform + to populate springCloudAcceleratorId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudAcceleratorIdSelector: + description: Selector for a SpringCloudAccelerator in appplatform + to populate springCloudAcceleratorId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + acceleratorTags: + description: Specifies a list of accelerator tags. + items: + type: string + type: array + acceleratorType: + description: Specifies the type of the Spring Cloud Customized + Accelerator. Possible values are Accelerator and Fragment. Defaults + to Accelerator. + type: string + description: + description: Specifies the description of the Spring Cloud Customized + Accelerator. + type: string + displayName: + description: Specifies the display name of the Spring Cloud Customized + Accelerator.. + type: string + gitRepository: + description: A git_repository block as defined below. + properties: + basicAuth: + description: A basic_auth block as defined below. Conflicts + with git_repository[0].ssh_auth. Changing this forces a + new Spring Cloud Customized Accelerator to be created. + properties: + username: + description: Specifies the username of git repository + basic auth. + type: string + type: object + branch: + description: Specifies the Git repository branch to be used. + type: string + caCertificateId: + description: Specifies the ID of the CA Spring Cloud Certificate + for https URL of Git repository. + type: string + commit: + description: Specifies the Git repository commit to be used. + type: string + gitTag: + description: Specifies the Git repository tag to be used. + type: string + intervalInSeconds: + description: Specifies the interval for checking for updates + to Git or image repository. It should be greater than 10. + type: number + path: + description: Specifies the path under the git repository to + be treated as the root directory of the accelerator or the + fragment (depending on accelerator_type). + type: string + sshAuth: + description: A ssh_auth block as defined below. Conflicts + with git_repository[0].basic_auth. Changing this forces + a new Spring Cloud Customized Accelerator to be created. + properties: + hostKeyAlgorithm: + description: Specifies the SSH Key algorithm of git repository + basic auth. + type: string + type: object + url: + description: Specifies Git repository URL for the accelerator. + type: string + type: object + iconUrl: + description: Specifies the icon URL of the Spring Cloud Customized + Accelerator.. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.gitRepository is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gitRepository) + || (has(self.initProvider) && has(self.initProvider.gitRepository))' + status: + description: SpringCloudCustomizedAcceleratorStatus defines the observed + state of SpringCloudCustomizedAccelerator. + properties: + atProvider: + properties: + acceleratorTags: + description: Specifies a list of accelerator tags. + items: + type: string + type: array + acceleratorType: + description: Specifies the type of the Spring Cloud Customized + Accelerator. Possible values are Accelerator and Fragment. Defaults + to Accelerator. + type: string + description: + description: Specifies the description of the Spring Cloud Customized + Accelerator. + type: string + displayName: + description: Specifies the display name of the Spring Cloud Customized + Accelerator.. + type: string + gitRepository: + description: A git_repository block as defined below. + properties: + basicAuth: + description: A basic_auth block as defined below. Conflicts + with git_repository[0].ssh_auth. Changing this forces a + new Spring Cloud Customized Accelerator to be created. + properties: + username: + description: Specifies the username of git repository + basic auth. + type: string + type: object + branch: + description: Specifies the Git repository branch to be used. + type: string + caCertificateId: + description: Specifies the ID of the CA Spring Cloud Certificate + for https URL of Git repository. + type: string + commit: + description: Specifies the Git repository commit to be used. + type: string + gitTag: + description: Specifies the Git repository tag to be used. + type: string + intervalInSeconds: + description: Specifies the interval for checking for updates + to Git or image repository. It should be greater than 10. + type: number + path: + description: Specifies the path under the git repository to + be treated as the root directory of the accelerator or the + fragment (depending on accelerator_type). + type: string + sshAuth: + description: A ssh_auth block as defined below. Conflicts + with git_repository[0].basic_auth. Changing this forces + a new Spring Cloud Customized Accelerator to be created. + properties: + hostKeyAlgorithm: + description: Specifies the SSH Key algorithm of git repository + basic auth. + type: string + type: object + url: + description: Specifies Git repository URL for the accelerator. + type: string + type: object + iconUrl: + description: Specifies the icon URL of the Spring Cloud Customized + Accelerator.. + type: string + id: + description: The ID of the Spring Cloud Customized Accelerator. + type: string + springCloudAcceleratorId: + description: The ID of the Spring Cloud Accelerator. Changing + this forces a new Spring Cloud Customized Accelerator to be + created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springclouddevtoolportals.yaml b/package/crds/appplatform.azure.upbound.io_springclouddevtoolportals.yaml index b7f114820..7b8b33cec 100644 --- a/package/crds/appplatform.azure.upbound.io_springclouddevtoolportals.yaml +++ b/package/crds/appplatform.azure.upbound.io_springclouddevtoolportals.yaml @@ -610,3 +610,589 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudDevToolPortal is the Schema for the SpringCloudDevToolPortals + API. Manages a Spring Cloud Dev Tool Portal. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudDevToolPortalSpec defines the desired state of + SpringCloudDevToolPortal + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + applicationAcceleratorEnabled: + description: Should the Accelerator plugin be enabled? + type: boolean + applicationLiveViewEnabled: + description: Should the Application Live View be enabled? + type: boolean + name: + description: The name which should be used for this Spring Cloud + Dev Tool Portal. The only possible value is default. Changing + this forces a new Spring Cloud Dev Tool Portal to be created. + type: string + publicNetworkAccessEnabled: + description: Is public network access enabled? + type: boolean + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Dev Tool Portal to be created. + type: string + springCloudServiceIdRef: + description: Reference to a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudServiceIdSelector: + description: Selector for a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sso: + description: A sso block as defined below. + properties: + clientId: + description: Specifies the public identifier for the application. + type: string + clientSecret: + description: Specifies the secret known only to the application + and the authorization server. + type: string + metadataUrl: + description: Specifies the URI of a JSON file with generic + OIDC provider configuration. + type: string + scope: + description: Specifies a list of specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + applicationAcceleratorEnabled: + description: Should the Accelerator plugin be enabled? + type: boolean + applicationLiveViewEnabled: + description: Should the Application Live View be enabled? + type: boolean + name: + description: The name which should be used for this Spring Cloud + Dev Tool Portal. The only possible value is default. Changing + this forces a new Spring Cloud Dev Tool Portal to be created. + type: string + publicNetworkAccessEnabled: + description: Is public network access enabled? + type: boolean + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Dev Tool Portal to be created. + type: string + springCloudServiceIdRef: + description: Reference to a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudServiceIdSelector: + description: Selector for a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sso: + description: A sso block as defined below. + properties: + clientId: + description: Specifies the public identifier for the application. + type: string + clientSecret: + description: Specifies the secret known only to the application + and the authorization server. + type: string + metadataUrl: + description: Specifies the URI of a JSON file with generic + OIDC provider configuration. + type: string + scope: + description: Specifies a list of specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: SpringCloudDevToolPortalStatus defines the observed state + of SpringCloudDevToolPortal. + properties: + atProvider: + properties: + applicationAcceleratorEnabled: + description: Should the Accelerator plugin be enabled? + type: boolean + applicationLiveViewEnabled: + description: Should the Application Live View be enabled? + type: boolean + id: + description: The ID of the Spring Cloud Dev Tool Portal. + type: string + name: + description: The name which should be used for this Spring Cloud + Dev Tool Portal. The only possible value is default. Changing + this forces a new Spring Cloud Dev Tool Portal to be created. + type: string + publicNetworkAccessEnabled: + description: Is public network access enabled? + type: boolean + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Dev Tool Portal to be created. + type: string + sso: + description: A sso block as defined below. + properties: + clientId: + description: Specifies the public identifier for the application. + type: string + clientSecret: + description: Specifies the secret known only to the application + and the authorization server. + type: string + metadataUrl: + description: Specifies the URI of a JSON file with generic + OIDC provider configuration. + type: string + scope: + description: Specifies a list of specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudgateways.yaml b/package/crds/appplatform.azure.upbound.io_springcloudgateways.yaml index 2851fd7a6..7758e0da4 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudgateways.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudgateways.yaml @@ -1031,3 +1031,962 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudGateway is the Schema for the SpringCloudGateways + API. Manages a Spring Cloud Gateway. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudGatewaySpec defines the desired state of SpringCloudGateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apiMetadata: + description: A api_metadata block as defined below. + properties: + description: + description: Detailed description of the APIs available on + the Gateway instance. + type: string + documentationUrl: + description: Location of additional documentation for the + APIs available on the Gateway instance. + type: string + serverUrl: + description: Base URL that API consumers will use to access + APIs on the Gateway instance. + type: string + title: + description: Specifies the title describing the context of + the APIs available on the Gateway instance. + type: string + version: + description: Specifies the version of APIs available on this + Gateway instance. + type: string + type: object + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + applicationPerformanceMonitoringTypes: + description: Specifies a list of application performance monitoring + types used in the Spring Cloud Gateway. The allowed values are + AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and + NewRelic. + items: + type: string + type: array + clientAuthorization: + description: A client_authorization block as defined below. + properties: + certificateIds: + description: Specifies the Spring Cloud Certificate IDs of + the Spring Cloud Gateway. + items: + type: string + type: array + verificationEnabled: + description: Specifies whether the client certificate verification + is enabled. + type: boolean + type: object + cors: + description: A cors block as defined below. + properties: + allowedHeaders: + description: Allowed headers in cross-site requests. The special + value * allows actual requests to send any header. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: Allowed HTTP methods on cross-site requests. + The special value * allows all methods. If not set, GET + and HEAD are allowed by default. Possible values are DELETE, + GET, HEAD, MERGE, POST, OPTIONS and PUT. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOriginPatterns: + description: Allowed origin patterns to make cross-site requests. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOrigins: + description: Allowed origins to make cross-site requests. + The special value * allows all domains. + items: + type: string + type: array + x-kubernetes-list-type: set + credentialsAllowed: + description: is user credentials are supported on cross-site + requests? + type: boolean + exposedHeaders: + description: HTTP response headers to expose for cross-site + requests. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAgeSeconds: + description: How long, in seconds, the response from a pre-flight + request can be cached by clients. + type: number + type: object + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Gateway as a map of key-value pairs. Changing this forces + a new resource to be created. + type: object + x-kubernetes-map-type: granular + httpsOnly: + description: is only https is allowed? + type: boolean + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Gateway. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + localResponseCachePerInstance: + description: A local_response_cache_per_instance block as defined + below. Only one of local_response_cache_per_instance or local_response_cache_per_route + can be specified. + properties: + size: + description: Specifies the maximum size of cache (10MB, 900KB, + 1GB...) to determine if the cache needs to evict some entries. + type: string + timeToLive: + description: Specifies the time before a cached entry is expired + (300s, 5m, 1h...). + type: string + type: object + localResponseCachePerRoute: + description: A local_response_cache_per_route block as defined + below. Only one of local_response_cache_per_instance or local_response_cache_per_route + can be specified. + properties: + size: + description: Specifies the maximum size of cache (10MB, 900KB, + 1GB...) to determine if the cache needs to evict some entries. + type: string + timeToLive: + description: Specifies the time before a cached entry is expired + (300s, 5m, 1h...). + type: string + type: object + publicNetworkAccessEnabled: + description: Indicates whether the Spring Cloud Gateway exposes + endpoint. + type: boolean + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 2Gi if not specified. + type: string + type: object + sensitiveEnvironmentVariablesSecretRef: + description: Specifies the sensitive environment variables of + the Spring Cloud Gateway as a map of key-value pairs. Changing + this forces a new resource to be created. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Gateway to be created. + type: string + springCloudServiceIdRef: + description: Reference to a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudServiceIdSelector: + description: Selector for a SpringCloudService in appplatform + to populate springCloudServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sso: + description: A sso block as defined below. + properties: + clientId: + description: The public identifier for the application. + type: string + clientSecret: + description: The secret known only to the application and + the authorization server. + type: string + issuerUri: + description: The URI of Issuer Identifier. + type: string + scope: + description: It defines the specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apiMetadata: + description: A api_metadata block as defined below. + properties: + description: + description: Detailed description of the APIs available on + the Gateway instance. + type: string + documentationUrl: + description: Location of additional documentation for the + APIs available on the Gateway instance. + type: string + serverUrl: + description: Base URL that API consumers will use to access + APIs on the Gateway instance. + type: string + title: + description: Specifies the title describing the context of + the APIs available on the Gateway instance. + type: string + version: + description: Specifies the version of APIs available on this + Gateway instance. + type: string + type: object + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + applicationPerformanceMonitoringTypes: + description: Specifies a list of application performance monitoring + types used in the Spring Cloud Gateway. The allowed values are + AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and + NewRelic. + items: + type: string + type: array + clientAuthorization: + description: A client_authorization block as defined below. + properties: + certificateIds: + description: Specifies the Spring Cloud Certificate IDs of + the Spring Cloud Gateway. + items: + type: string + type: array + verificationEnabled: + description: Specifies whether the client certificate verification + is enabled. + type: boolean + type: object + cors: + description: A cors block as defined below. + properties: + allowedHeaders: + description: Allowed headers in cross-site requests. The special + value * allows actual requests to send any header. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: Allowed HTTP methods on cross-site requests. + The special value * allows all methods. If not set, GET + and HEAD are allowed by default. Possible values are DELETE, + GET, HEAD, MERGE, POST, OPTIONS and PUT. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOriginPatterns: + description: Allowed origin patterns to make cross-site requests. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOrigins: + description: Allowed origins to make cross-site requests. + The special value * allows all domains. + items: + type: string + type: array + x-kubernetes-list-type: set + credentialsAllowed: + description: is user credentials are supported on cross-site + requests? + type: boolean + exposedHeaders: + description: HTTP response headers to expose for cross-site + requests. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAgeSeconds: + description: How long, in seconds, the response from a pre-flight + request can be cached by clients. + type: number + type: object + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Gateway as a map of key-value pairs. Changing this forces + a new resource to be created. + type: object + x-kubernetes-map-type: granular + httpsOnly: + description: is only https is allowed? + type: boolean + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Gateway. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + localResponseCachePerInstance: + description: A local_response_cache_per_instance block as defined + below. Only one of local_response_cache_per_instance or local_response_cache_per_route + can be specified. + properties: + size: + description: Specifies the maximum size of cache (10MB, 900KB, + 1GB...) to determine if the cache needs to evict some entries. + type: string + timeToLive: + description: Specifies the time before a cached entry is expired + (300s, 5m, 1h...). + type: string + type: object + localResponseCachePerRoute: + description: A local_response_cache_per_route block as defined + below. Only one of local_response_cache_per_instance or local_response_cache_per_route + can be specified. + properties: + size: + description: Specifies the maximum size of cache (10MB, 900KB, + 1GB...) to determine if the cache needs to evict some entries. + type: string + timeToLive: + description: Specifies the time before a cached entry is expired + (300s, 5m, 1h...). + type: string + type: object + publicNetworkAccessEnabled: + description: Indicates whether the Spring Cloud Gateway exposes + endpoint. + type: boolean + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 2Gi if not specified. + type: string + type: object + sso: + description: A sso block as defined below. + properties: + clientId: + description: The public identifier for the application. + type: string + clientSecret: + description: The secret known only to the application and + the authorization server. + type: string + issuerUri: + description: The URI of Issuer Identifier. + type: string + scope: + description: It defines the specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SpringCloudGatewayStatus defines the observed state of SpringCloudGateway. + properties: + atProvider: + properties: + apiMetadata: + description: A api_metadata block as defined below. + properties: + description: + description: Detailed description of the APIs available on + the Gateway instance. + type: string + documentationUrl: + description: Location of additional documentation for the + APIs available on the Gateway instance. + type: string + serverUrl: + description: Base URL that API consumers will use to access + APIs on the Gateway instance. + type: string + title: + description: Specifies the title describing the context of + the APIs available on the Gateway instance. + type: string + version: + description: Specifies the version of APIs available on this + Gateway instance. + type: string + type: object + applicationPerformanceMonitoringIds: + description: Specifies a list of Spring Cloud Application Performance + Monitoring IDs. + items: + type: string + type: array + applicationPerformanceMonitoringTypes: + description: Specifies a list of application performance monitoring + types used in the Spring Cloud Gateway. The allowed values are + AppDynamics, ApplicationInsights, Dynatrace, ElasticAPM and + NewRelic. + items: + type: string + type: array + clientAuthorization: + description: A client_authorization block as defined below. + properties: + certificateIds: + description: Specifies the Spring Cloud Certificate IDs of + the Spring Cloud Gateway. + items: + type: string + type: array + verificationEnabled: + description: Specifies whether the client certificate verification + is enabled. + type: boolean + type: object + cors: + description: A cors block as defined below. + properties: + allowedHeaders: + description: Allowed headers in cross-site requests. The special + value * allows actual requests to send any header. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: Allowed HTTP methods on cross-site requests. + The special value * allows all methods. If not set, GET + and HEAD are allowed by default. Possible values are DELETE, + GET, HEAD, MERGE, POST, OPTIONS and PUT. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOriginPatterns: + description: Allowed origin patterns to make cross-site requests. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOrigins: + description: Allowed origins to make cross-site requests. + The special value * allows all domains. + items: + type: string + type: array + x-kubernetes-list-type: set + credentialsAllowed: + description: is user credentials are supported on cross-site + requests? + type: boolean + exposedHeaders: + description: HTTP response headers to expose for cross-site + requests. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAgeSeconds: + description: How long, in seconds, the response from a pre-flight + request can be cached by clients. + type: number + type: object + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Gateway as a map of key-value pairs. Changing this forces + a new resource to be created. + type: object + x-kubernetes-map-type: granular + httpsOnly: + description: is only https is allowed? + type: boolean + id: + description: The ID of the Spring Cloud Gateway. + type: string + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Gateway. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + localResponseCachePerInstance: + description: A local_response_cache_per_instance block as defined + below. Only one of local_response_cache_per_instance or local_response_cache_per_route + can be specified. + properties: + size: + description: Specifies the maximum size of cache (10MB, 900KB, + 1GB...) to determine if the cache needs to evict some entries. + type: string + timeToLive: + description: Specifies the time before a cached entry is expired + (300s, 5m, 1h...). + type: string + type: object + localResponseCachePerRoute: + description: A local_response_cache_per_route block as defined + below. Only one of local_response_cache_per_instance or local_response_cache_per_route + can be specified. + properties: + size: + description: Specifies the maximum size of cache (10MB, 900KB, + 1GB...) to determine if the cache needs to evict some entries. + type: string + timeToLive: + description: Specifies the time before a cached entry is expired + (300s, 5m, 1h...). + type: string + type: object + publicNetworkAccessEnabled: + description: Indicates whether the Spring Cloud Gateway exposes + endpoint. + type: boolean + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 2Gi if not specified. + type: string + type: object + springCloudServiceId: + description: The ID of the Spring Cloud Service. Changing this + forces a new Spring Cloud Gateway to be created. + type: string + sso: + description: A sso block as defined below. + properties: + clientId: + description: The public identifier for the application. + type: string + clientSecret: + description: The secret known only to the application and + the authorization server. + type: string + issuerUri: + description: The URI of Issuer Identifier. + type: string + scope: + description: It defines the specific actions applications + can be allowed to do on a user's behalf. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + url: + description: URL of the Spring Cloud Gateway, exposed when 'public_network_access_enabled' + is true. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudjavadeployments.yaml b/package/crds/appplatform.azure.upbound.io_springcloudjavadeployments.yaml index c470e7169..811743649 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudjavadeployments.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudjavadeployments.yaml @@ -524,3 +524,500 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudJavaDeployment is the Schema for the SpringCloudJavaDeployments + API. Manages an Azure Spring Cloud Deployment with a Java runtime. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudJavaDeploymentSpec defines the desired state of + SpringCloudJavaDeployment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + jvmOptions: + description: Specifies the jvm option of the Spring Cloud Deployment. + type: string + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + runtimeVersion: + description: Specifies the runtime version of the Spring Cloud + Deployment. Possible Values are Java_8, Java_11 and Java_17. + Defaults to Java_8. + type: string + springCloudAppId: + description: Specifies the id of the Spring Cloud Application + in which to create the Deployment. Changing this forces a new + resource to be created. + type: string + springCloudAppIdRef: + description: Reference to a SpringCloudApp in appplatform to populate + springCloudAppId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudAppIdSelector: + description: Selector for a SpringCloudApp in appplatform to populate + springCloudAppId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + jvmOptions: + description: Specifies the jvm option of the Spring Cloud Deployment. + type: string + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + runtimeVersion: + description: Specifies the runtime version of the Spring Cloud + Deployment. Possible Values are Java_8, Java_11 and Java_17. + Defaults to Java_8. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SpringCloudJavaDeploymentStatus defines the observed state + of SpringCloudJavaDeployment. + properties: + atProvider: + properties: + environmentVariables: + additionalProperties: + type: string + description: Specifies the environment variables of the Spring + Cloud Deployment as a map of key-value pairs. + type: object + x-kubernetes-map-type: granular + id: + description: The ID of the Spring Cloud Deployment. + type: string + instanceCount: + description: Specifies the required instance count of the Spring + Cloud Deployment. Possible Values are between 1 and 500. Defaults + to 1 if not specified. + type: number + jvmOptions: + description: Specifies the jvm option of the Spring Cloud Deployment. + type: string + quota: + description: A quota block as defined below. + properties: + cpu: + description: Specifies the required cpu of the Spring Cloud + Deployment. Possible Values are 500m, 1, 2, 3 and 4. Defaults + to 1 if not specified. + type: string + memory: + description: Specifies the required memory size of the Spring + Cloud Deployment. Possible Values are 512Mi, 1Gi, 2Gi, 3Gi, + 4Gi, 5Gi, 6Gi, 7Gi, and 8Gi. Defaults to 1Gi if not specified. + type: string + type: object + runtimeVersion: + description: Specifies the runtime version of the Spring Cloud + Deployment. Possible Values are Java_8, Java_11 and Java_17. + Defaults to Java_8. + type: string + springCloudAppId: + description: Specifies the id of the Spring Cloud Application + in which to create the Deployment. Changing this forces a new + resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/appplatform.azure.upbound.io_springcloudservices.yaml b/package/crds/appplatform.azure.upbound.io_springcloudservices.yaml index d8669d8a1..439a08d85 100644 --- a/package/crds/appplatform.azure.upbound.io_springcloudservices.yaml +++ b/package/crds/appplatform.azure.upbound.io_springcloudservices.yaml @@ -1853,3 +1853,1768 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudService is the Schema for the SpringCloudServices + API. Manages an Azure Spring Cloud Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudServiceSpec defines the desired state of SpringCloudService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + buildAgentPoolSize: + description: Specifies the size for this Spring Cloud Service's + default build agent pool. Possible values are S1, S2, S3, S4 + and S5. This field is applicable only for Spring Cloud Service + with enterprise tier. + type: string + configServerGitSetting: + description: A config_server_git_setting block as defined below. + This field is applicable only for Spring Cloud Service with + basic and standard tier. + properties: + httpBasicAuth: + description: A http_basic_auth block as defined below. + properties: + passwordSecretRef: + description: The password used to access the Git repository + server, required when the Git repository server supports + HTTP Basic Authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username that's used to access the Git + repository server, required when the Git repository + server supports HTTP Basic Authentication. + type: string + required: + - passwordSecretRef + type: object + label: + description: The default label of the Git repository, should + be the branch name, tag name, or commit-id of the repository. + type: string + repository: + description: One or more repository blocks as defined below. + items: + properties: + httpBasicAuth: + description: A http_basic_auth block as defined below. + properties: + passwordSecretRef: + description: The password used to access the Git + repository server, required when the Git repository + server supports HTTP Basic Authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username that's used to access + the Git repository server, required when the Git + repository server supports HTTP Basic Authentication. + type: string + required: + - passwordSecretRef + type: object + label: + description: The default label of the Git repository, + should be the branch name, tag name, or commit-id + of the repository. + type: string + name: + description: A name to identify on the Git repository, + required only if repos exists. + type: string + pattern: + description: An array of strings used to match an application + name. For each pattern, use the {application}/{profile} + format with wildcards. + items: + type: string + type: array + searchPaths: + description: An array of strings used to search subdirectories + of the Git repository. + items: + type: string + type: array + sshAuth: + description: A ssh_auth block as defined below. + properties: + hostKeyAlgorithm: + description: The host key algorithm, should be ssh-dss, + ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, + or ecdsa-sha2-nistp521. Required only if host-key + exists. + type: string + hostKeySecretRef: + description: The host key of the Git repository + server, should not include the algorithm prefix + as covered by host-key-algorithm. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + privateKeySecretRef: + description: The SSH private key to access the Git + repository, required when the URI starts with + git@ or ssh://. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + strictHostKeyCheckingEnabled: + description: Indicates whether the Config Server + instance will fail to start if the host_key does + not match. Defaults to true. + type: boolean + required: + - privateKeySecretRef + type: object + uri: + description: The URI of the Git repository that's used + as the Config Server back end should be started with + http://, https://, git@, or ssh://. + type: string + type: object + type: array + searchPaths: + description: An array of strings used to search subdirectories + of the Git repository. + items: + type: string + type: array + sshAuth: + description: A ssh_auth block as defined below. + properties: + hostKeyAlgorithm: + description: The host key algorithm, should be ssh-dss, + ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or + ecdsa-sha2-nistp521. Required only if host-key exists. + type: string + hostKeySecretRef: + description: The host key of the Git repository server, + should not include the algorithm prefix as covered by + host-key-algorithm. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + privateKeySecretRef: + description: The SSH private key to access the Git repository, + required when the URI starts with git@ or ssh://. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + strictHostKeyCheckingEnabled: + description: Indicates whether the Config Server instance + will fail to start if the host_key does not match. Defaults + to true. + type: boolean + required: + - privateKeySecretRef + type: object + uri: + description: The URI of the default Git repository used as + the Config Server back end, should be started with http://, + https://, git@, or ssh://. + type: string + type: object + containerRegistry: + description: One or more container_registry block as defined below. + This field is applicable only for Spring Cloud Service with + enterprise tier. + items: + properties: + name: + description: Specifies the name of the container registry. + type: string + passwordSecretRef: + description: Specifies the password of the container registry. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: Specifies the login server of the container + registry. + type: string + username: + description: Specifies the username of the container registry. + type: string + required: + - passwordSecretRef + type: object + type: array + defaultBuildService: + description: A default_build_service block as defined below. This + field is applicable only for Spring Cloud Service with enterprise + tier. + properties: + containerRegistryName: + description: Specifies the name of the container registry + used in the default build service. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logStreamPublicEndpointEnabled: + description: Should the log stream in vnet injection instance + could be accessed from Internet? + type: boolean + managedEnvironmentId: + description: The resource Id of the Managed Environment that the + Spring Apps instance builds on. Can only be specified when sku_tier + is set to StandardGen2. + type: string + marketplace: + description: A marketplace block as defined below. Can only be + specified when sku is set to E0. + properties: + plan: + description: Specifies the plan ID of the 3rd Party Artifact + that is being procured. + type: string + product: + description: Specifies the 3rd Party artifact that is being + procured. + type: string + publisher: + description: Specifies the publisher ID of the 3rd Party Artifact + that is being procured. + type: string + type: object + network: + description: A network block as defined below. Changing this forces + a new resource to be created. + properties: + appNetworkResourceGroup: + description: Specifies the Name of the resource group containing + network resources of Azure Spring Cloud Apps. Changing this + forces a new resource to be created. + type: string + appSubnetId: + description: Specifies the ID of the Subnet which should host + the Spring Boot Applications deployed in this Spring Cloud + Service. Changing this forces a new resource to be created. + type: string + appSubnetIdRef: + description: Reference to a Subnet in network to populate + appSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appSubnetIdSelector: + description: Selector for a Subnet in network to populate + appSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cidrRanges: + description: A list of (at least 3) CIDR ranges (at least + /16) which are used to host the Spring Cloud infrastructure, + which must not overlap with any existing CIDR ranges in + the Subnet. Changing this forces a new resource to be created. + items: + type: string + type: array + outboundType: + description: Specifies the egress traffic type of the Spring + Cloud Service. Possible values are loadBalancer and userDefinedRouting. + Defaults to loadBalancer. Changing this forces a new resource + to be created. + type: string + readTimeoutSeconds: + description: Ingress read time out in seconds. + type: number + serviceRuntimeNetworkResourceGroup: + description: Specifies the Name of the resource group containing + network resources of Azure Spring Cloud Service Runtime. + Changing this forces a new resource to be created. + type: string + serviceRuntimeSubnetId: + description: Specifies the ID of the Subnet where the Service + Runtime components of the Spring Cloud Service will exist. + Changing this forces a new resource to be created. + type: string + serviceRuntimeSubnetIdRef: + description: Reference to a Subnet in network to populate + serviceRuntimeSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRuntimeSubnetIdSelector: + description: Selector for a Subnet in network to populate + serviceRuntimeSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + resourceGroupName: + description: Specifies The name of the resource group in which + to create the Spring Cloud Service. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceRegistryEnabled: + description: Whether enable the default Service Registry. This + field is applicable only for Spring Cloud Service with enterprise + tier. + type: boolean + skuName: + description: Specifies the SKU Name for this Spring Cloud Service. + Possible values are B0, S0 and E0. Defaults to S0. Changing + this forces a new resource to be created. + type: string + skuTier: + description: Specifies the SKU Tier for this Spring Cloud Service. + Possible values are Basic, Enterprise, Standard and StandardGen2. + The attribute is automatically computed from API response except + when managed_environment_id is defined. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trace: + description: A trace block as defined below. + properties: + connectionString: + description: The connection string used for Application Insights. + type: string + connectionStringRef: + description: Reference to a ApplicationInsights in insights + to populate connectionString. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionStringSelector: + description: Selector for a ApplicationInsights in insights + to populate connectionString. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sampleRate: + description: The sampling rate of Application Insights Agent. + Must be between 0.0 and 100.0. Defaults to 10.0. + type: number + type: object + zoneRedundant: + description: Whether zone redundancy is enabled for this Spring + Cloud Service. Defaults to false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + buildAgentPoolSize: + description: Specifies the size for this Spring Cloud Service's + default build agent pool. Possible values are S1, S2, S3, S4 + and S5. This field is applicable only for Spring Cloud Service + with enterprise tier. + type: string + configServerGitSetting: + description: A config_server_git_setting block as defined below. + This field is applicable only for Spring Cloud Service with + basic and standard tier. + properties: + httpBasicAuth: + description: A http_basic_auth block as defined below. + properties: + username: + description: The username that's used to access the Git + repository server, required when the Git repository + server supports HTTP Basic Authentication. + type: string + type: object + label: + description: The default label of the Git repository, should + be the branch name, tag name, or commit-id of the repository. + type: string + repository: + description: One or more repository blocks as defined below. + items: + properties: + httpBasicAuth: + description: A http_basic_auth block as defined below. + properties: + username: + description: The username that's used to access + the Git repository server, required when the Git + repository server supports HTTP Basic Authentication. + type: string + type: object + label: + description: The default label of the Git repository, + should be the branch name, tag name, or commit-id + of the repository. + type: string + name: + description: A name to identify on the Git repository, + required only if repos exists. + type: string + pattern: + description: An array of strings used to match an application + name. For each pattern, use the {application}/{profile} + format with wildcards. + items: + type: string + type: array + searchPaths: + description: An array of strings used to search subdirectories + of the Git repository. + items: + type: string + type: array + sshAuth: + description: A ssh_auth block as defined below. + properties: + hostKeyAlgorithm: + description: The host key algorithm, should be ssh-dss, + ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, + or ecdsa-sha2-nistp521. Required only if host-key + exists. + type: string + strictHostKeyCheckingEnabled: + description: Indicates whether the Config Server + instance will fail to start if the host_key does + not match. Defaults to true. + type: boolean + type: object + uri: + description: The URI of the Git repository that's used + as the Config Server back end should be started with + http://, https://, git@, or ssh://. + type: string + type: object + type: array + searchPaths: + description: An array of strings used to search subdirectories + of the Git repository. + items: + type: string + type: array + sshAuth: + description: A ssh_auth block as defined below. + properties: + hostKeyAlgorithm: + description: The host key algorithm, should be ssh-dss, + ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or + ecdsa-sha2-nistp521. Required only if host-key exists. + type: string + strictHostKeyCheckingEnabled: + description: Indicates whether the Config Server instance + will fail to start if the host_key does not match. Defaults + to true. + type: boolean + type: object + uri: + description: The URI of the default Git repository used as + the Config Server back end, should be started with http://, + https://, git@, or ssh://. + type: string + type: object + containerRegistry: + description: One or more container_registry block as defined below. + This field is applicable only for Spring Cloud Service with + enterprise tier. + items: + properties: + name: + description: Specifies the name of the container registry. + type: string + server: + description: Specifies the login server of the container + registry. + type: string + username: + description: Specifies the username of the container registry. + type: string + type: object + type: array + defaultBuildService: + description: A default_build_service block as defined below. This + field is applicable only for Spring Cloud Service with enterprise + tier. + properties: + containerRegistryName: + description: Specifies the name of the container registry + used in the default build service. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logStreamPublicEndpointEnabled: + description: Should the log stream in vnet injection instance + could be accessed from Internet? + type: boolean + managedEnvironmentId: + description: The resource Id of the Managed Environment that the + Spring Apps instance builds on. Can only be specified when sku_tier + is set to StandardGen2. + type: string + marketplace: + description: A marketplace block as defined below. Can only be + specified when sku is set to E0. + properties: + plan: + description: Specifies the plan ID of the 3rd Party Artifact + that is being procured. + type: string + product: + description: Specifies the 3rd Party artifact that is being + procured. + type: string + publisher: + description: Specifies the publisher ID of the 3rd Party Artifact + that is being procured. + type: string + type: object + network: + description: A network block as defined below. Changing this forces + a new resource to be created. + properties: + appNetworkResourceGroup: + description: Specifies the Name of the resource group containing + network resources of Azure Spring Cloud Apps. Changing this + forces a new resource to be created. + type: string + appSubnetId: + description: Specifies the ID of the Subnet which should host + the Spring Boot Applications deployed in this Spring Cloud + Service. Changing this forces a new resource to be created. + type: string + appSubnetIdRef: + description: Reference to a Subnet in network to populate + appSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appSubnetIdSelector: + description: Selector for a Subnet in network to populate + appSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cidrRanges: + description: A list of (at least 3) CIDR ranges (at least + /16) which are used to host the Spring Cloud infrastructure, + which must not overlap with any existing CIDR ranges in + the Subnet. Changing this forces a new resource to be created. + items: + type: string + type: array + outboundType: + description: Specifies the egress traffic type of the Spring + Cloud Service. Possible values are loadBalancer and userDefinedRouting. + Defaults to loadBalancer. Changing this forces a new resource + to be created. + type: string + readTimeoutSeconds: + description: Ingress read time out in seconds. + type: number + serviceRuntimeNetworkResourceGroup: + description: Specifies the Name of the resource group containing + network resources of Azure Spring Cloud Service Runtime. + Changing this forces a new resource to be created. + type: string + serviceRuntimeSubnetId: + description: Specifies the ID of the Subnet where the Service + Runtime components of the Spring Cloud Service will exist. + Changing this forces a new resource to be created. + type: string + serviceRuntimeSubnetIdRef: + description: Reference to a Subnet in network to populate + serviceRuntimeSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceRuntimeSubnetIdSelector: + description: Selector for a Subnet in network to populate + serviceRuntimeSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + serviceRegistryEnabled: + description: Whether enable the default Service Registry. This + field is applicable only for Spring Cloud Service with enterprise + tier. + type: boolean + skuName: + description: Specifies the SKU Name for this Spring Cloud Service. + Possible values are B0, S0 and E0. Defaults to S0. Changing + this forces a new resource to be created. + type: string + skuTier: + description: Specifies the SKU Tier for this Spring Cloud Service. + Possible values are Basic, Enterprise, Standard and StandardGen2. + The attribute is automatically computed from API response except + when managed_environment_id is defined. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trace: + description: A trace block as defined below. + properties: + connectionString: + description: The connection string used for Application Insights. + type: string + connectionStringRef: + description: Reference to a ApplicationInsights in insights + to populate connectionString. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + connectionStringSelector: + description: Selector for a ApplicationInsights in insights + to populate connectionString. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sampleRate: + description: The sampling rate of Application Insights Agent. + Must be between 0.0 and 100.0. Defaults to 10.0. + type: number + type: object + zoneRedundant: + description: Whether zone redundancy is enabled for this Spring + Cloud Service. Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: SpringCloudServiceStatus defines the observed state of SpringCloudService. + properties: + atProvider: + properties: + buildAgentPoolSize: + description: Specifies the size for this Spring Cloud Service's + default build agent pool. Possible values are S1, S2, S3, S4 + and S5. This field is applicable only for Spring Cloud Service + with enterprise tier. + type: string + configServerGitSetting: + description: A config_server_git_setting block as defined below. + This field is applicable only for Spring Cloud Service with + basic and standard tier. + properties: + httpBasicAuth: + description: A http_basic_auth block as defined below. + properties: + username: + description: The username that's used to access the Git + repository server, required when the Git repository + server supports HTTP Basic Authentication. + type: string + type: object + label: + description: The default label of the Git repository, should + be the branch name, tag name, or commit-id of the repository. + type: string + repository: + description: One or more repository blocks as defined below. + items: + properties: + httpBasicAuth: + description: A http_basic_auth block as defined below. + properties: + username: + description: The username that's used to access + the Git repository server, required when the Git + repository server supports HTTP Basic Authentication. + type: string + type: object + label: + description: The default label of the Git repository, + should be the branch name, tag name, or commit-id + of the repository. + type: string + name: + description: A name to identify on the Git repository, + required only if repos exists. + type: string + pattern: + description: An array of strings used to match an application + name. For each pattern, use the {application}/{profile} + format with wildcards. + items: + type: string + type: array + searchPaths: + description: An array of strings used to search subdirectories + of the Git repository. + items: + type: string + type: array + sshAuth: + description: A ssh_auth block as defined below. + properties: + hostKeyAlgorithm: + description: The host key algorithm, should be ssh-dss, + ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, + or ecdsa-sha2-nistp521. Required only if host-key + exists. + type: string + strictHostKeyCheckingEnabled: + description: Indicates whether the Config Server + instance will fail to start if the host_key does + not match. Defaults to true. + type: boolean + type: object + uri: + description: The URI of the Git repository that's used + as the Config Server back end should be started with + http://, https://, git@, or ssh://. + type: string + type: object + type: array + searchPaths: + description: An array of strings used to search subdirectories + of the Git repository. + items: + type: string + type: array + sshAuth: + description: A ssh_auth block as defined below. + properties: + hostKeyAlgorithm: + description: The host key algorithm, should be ssh-dss, + ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or + ecdsa-sha2-nistp521. Required only if host-key exists. + type: string + strictHostKeyCheckingEnabled: + description: Indicates whether the Config Server instance + will fail to start if the host_key does not match. Defaults + to true. + type: boolean + type: object + uri: + description: The URI of the default Git repository used as + the Config Server back end, should be started with http://, + https://, git@, or ssh://. + type: string + type: object + containerRegistry: + description: One or more container_registry block as defined below. + This field is applicable only for Spring Cloud Service with + enterprise tier. + items: + properties: + name: + description: Specifies the name of the container registry. + type: string + server: + description: Specifies the login server of the container + registry. + type: string + username: + description: Specifies the username of the container registry. + type: string + type: object + type: array + defaultBuildService: + description: A default_build_service block as defined below. This + field is applicable only for Spring Cloud Service with enterprise + tier. + properties: + containerRegistryName: + description: Specifies the name of the container registry + used in the default build service. + type: string + type: object + id: + description: The ID of the Spring Cloud Service. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logStreamPublicEndpointEnabled: + description: Should the log stream in vnet injection instance + could be accessed from Internet? + type: boolean + managedEnvironmentId: + description: The resource Id of the Managed Environment that the + Spring Apps instance builds on. Can only be specified when sku_tier + is set to StandardGen2. + type: string + marketplace: + description: A marketplace block as defined below. Can only be + specified when sku is set to E0. + properties: + plan: + description: Specifies the plan ID of the 3rd Party Artifact + that is being procured. + type: string + product: + description: Specifies the 3rd Party artifact that is being + procured. + type: string + publisher: + description: Specifies the publisher ID of the 3rd Party Artifact + that is being procured. + type: string + type: object + network: + description: A network block as defined below. Changing this forces + a new resource to be created. + properties: + appNetworkResourceGroup: + description: Specifies the Name of the resource group containing + network resources of Azure Spring Cloud Apps. Changing this + forces a new resource to be created. + type: string + appSubnetId: + description: Specifies the ID of the Subnet which should host + the Spring Boot Applications deployed in this Spring Cloud + Service. Changing this forces a new resource to be created. + type: string + cidrRanges: + description: A list of (at least 3) CIDR ranges (at least + /16) which are used to host the Spring Cloud infrastructure, + which must not overlap with any existing CIDR ranges in + the Subnet. Changing this forces a new resource to be created. + items: + type: string + type: array + outboundType: + description: Specifies the egress traffic type of the Spring + Cloud Service. Possible values are loadBalancer and userDefinedRouting. + Defaults to loadBalancer. Changing this forces a new resource + to be created. + type: string + readTimeoutSeconds: + description: Ingress read time out in seconds. + type: number + serviceRuntimeNetworkResourceGroup: + description: Specifies the Name of the resource group containing + network resources of Azure Spring Cloud Service Runtime. + Changing this forces a new resource to be created. + type: string + serviceRuntimeSubnetId: + description: Specifies the ID of the Subnet where the Service + Runtime components of the Spring Cloud Service will exist. + Changing this forces a new resource to be created. + type: string + type: object + outboundPublicIpAddresses: + description: A list of the outbound Public IP Addresses used by + this Spring Cloud Service. + items: + type: string + type: array + requiredNetworkTrafficRules: + description: A list of required_network_traffic_rules blocks as + defined below. + items: + properties: + direction: + description: The direction of required traffic. Possible + values are Inbound, Outbound. + type: string + fqdns: + description: The FQDN list of required traffic. + items: + type: string + type: array + ipAddresses: + description: The IP list of required traffic. + items: + type: string + type: array + port: + description: The port of required traffic. + type: number + protocol: + description: The protocol of required traffic. + type: string + type: object + type: array + resourceGroupName: + description: Specifies The name of the resource group in which + to create the Spring Cloud Service. Changing this forces a new + resource to be created. + type: string + serviceRegistryEnabled: + description: Whether enable the default Service Registry. This + field is applicable only for Spring Cloud Service with enterprise + tier. + type: boolean + serviceRegistryId: + description: The ID of the Spring Cloud Service Registry. + type: string + skuName: + description: Specifies the SKU Name for this Spring Cloud Service. + Possible values are B0, S0 and E0. Defaults to S0. Changing + this forces a new resource to be created. + type: string + skuTier: + description: Specifies the SKU Tier for this Spring Cloud Service. + Possible values are Basic, Enterprise, Standard and StandardGen2. + The attribute is automatically computed from API response except + when managed_environment_id is defined. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trace: + description: A trace block as defined below. + properties: + connectionString: + description: The connection string used for Application Insights. + type: string + sampleRate: + description: The sampling rate of Application Insights Agent. + Must be between 0.0 and 100.0. Defaults to 10.0. + type: number + type: object + zoneRedundant: + description: Whether zone redundancy is enabled for this Spring + Cloud Service. Defaults to false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/authorization.azure.upbound.io_resourcegrouppolicyassignments.yaml b/package/crds/authorization.azure.upbound.io_resourcegrouppolicyassignments.yaml index b414c9346..cc7b35ad1 100644 --- a/package/crds/authorization.azure.upbound.io_resourcegrouppolicyassignments.yaml +++ b/package/crds/authorization.azure.upbound.io_resourcegrouppolicyassignments.yaml @@ -1073,3 +1073,1052 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResourceGroupPolicyAssignment is the Schema for the ResourceGroupPolicyAssignments + API. Manages a Resource Group Policy Assignment. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceGroupPolicyAssignmentSpec defines the desired state + of ResourceGroupPolicyAssignment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + policyDefinitionIdRef: + description: Reference to a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyDefinitionIdSelector: + description: Selector for a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupId: + description: The ID of the Resource Group where this Policy Assignment + should be created. Changing this forces a new Policy Assignment + to be created. + type: string + resourceGroupIdRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupIdSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + policyDefinitionIdRef: + description: Reference to a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyDefinitionIdSelector: + description: Selector for a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupId: + description: The ID of the Resource Group where this Policy Assignment + should be created. Changing this forces a new Policy Assignment + to be created. + type: string + resourceGroupIdRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupIdSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ResourceGroupPolicyAssignmentStatus defines the observed + state of ResourceGroupPolicyAssignment. + properties: + atProvider: + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + id: + description: The ID of the Resource Group Policy Assignment. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID of the Policy Assignment for + this Resource Group. + type: string + tenantId: + description: The Tenant ID of the Policy Assignment for this + Resource Group. + type: string + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + resourceGroupId: + description: The ID of the Resource Group where this Policy Assignment + should be created. Changing this forces a new Policy Assignment + to be created. + type: string + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/authorization.azure.upbound.io_resourcepolicyassignments.yaml b/package/crds/authorization.azure.upbound.io_resourcepolicyassignments.yaml index a6359137b..b75976479 100644 --- a/package/crds/authorization.azure.upbound.io_resourcepolicyassignments.yaml +++ b/package/crds/authorization.azure.upbound.io_resourcepolicyassignments.yaml @@ -945,3 +945,924 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResourcePolicyAssignment is the Schema for the ResourcePolicyAssignments + API. Manages a Policy Assignment to a Resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourcePolicyAssignmentSpec defines the desired state of + ResourcePolicyAssignment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + name: + description: The name which should be used for this Policy Assignment. + Changing this forces a new Resource Policy Assignment to be + created. Cannot exceed 64 characters in length. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + policyDefinitionIdRef: + description: Reference to a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyDefinitionIdSelector: + description: Selector for a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceId: + description: The ID of the Resource (or Resource Scope) where + this should be applied. Changing this forces a new Resource + Policy Assignment to be created. + type: string + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + name: + description: The name which should be used for this Policy Assignment. + Changing this forces a new Resource Policy Assignment to be + created. Cannot exceed 64 characters in length. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + policyDefinitionIdRef: + description: Reference to a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyDefinitionIdSelector: + description: Selector for a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceId: + description: The ID of the Resource (or Resource Scope) where + this should be applied. Changing this forces a new Resource + Policy Assignment to be created. + type: string + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.resourceId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.resourceId) + || (has(self.initProvider) && has(self.initProvider.resourceId))' + status: + description: ResourcePolicyAssignmentStatus defines the observed state + of ResourcePolicyAssignment. + properties: + atProvider: + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + id: + description: The ID of the Resource Policy Assignment. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID of the Policy Assignment for + this Resource. + type: string + tenantId: + description: The Tenant ID of the Policy Assignment for this + Resource. + type: string + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + name: + description: The name which should be used for this Policy Assignment. + Changing this forces a new Resource Policy Assignment to be + created. Cannot exceed 64 characters in length. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + resourceId: + description: The ID of the Resource (or Resource Scope) where + this should be applied. Changing this forces a new Resource + Policy Assignment to be created. + type: string + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/authorization.azure.upbound.io_subscriptionpolicyassignments.yaml b/package/crds/authorization.azure.upbound.io_subscriptionpolicyassignments.yaml index ff4f0f109..ba1edbb91 100644 --- a/package/crds/authorization.azure.upbound.io_subscriptionpolicyassignments.yaml +++ b/package/crds/authorization.azure.upbound.io_subscriptionpolicyassignments.yaml @@ -926,3 +926,905 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SubscriptionPolicyAssignment is the Schema for the SubscriptionPolicyAssignments + API. Manages a Subscription Policy Assignment. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionPolicyAssignmentSpec defines the desired state + of SubscriptionPolicyAssignment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + or UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + policyDefinitionIdRef: + description: Reference to a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyDefinitionIdSelector: + description: Selector for a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + subscriptionId: + description: The ID of the Subscription where this Policy Assignment + should be created. Changing this forces a new Policy Assignment + to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + or UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + policyDefinitionIdRef: + description: Reference to a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyDefinitionIdSelector: + description: Selector for a PolicyDefinition in authorization + to populate policyDefinitionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + subscriptionId: + description: The ID of the Subscription where this Policy Assignment + should be created. Changing this forces a new Policy Assignment + to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.subscriptionId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subscriptionId) + || (has(self.initProvider) && has(self.initProvider.subscriptionId))' + status: + description: SubscriptionPolicyAssignmentStatus defines the observed state + of SubscriptionPolicyAssignment. + properties: + atProvider: + properties: + description: + description: A description which should be used for this Policy + Assignment. + type: string + displayName: + description: The Display Name for this Policy Assignment. + type: string + enforce: + description: Specifies if this Policy should be enforced or not? + Defaults to true. + type: boolean + id: + description: The ID of the Subscription Policy Assignment. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Managed Identity IDs which should + be assigned to the Policy Definition. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID of the Policy Assignment for + this Subscription. + type: string + tenantId: + description: The Tenant ID of the Policy Assignment for this + Subscription. + type: string + type: + description: The Type of Managed Identity which should be + added to this Policy Definition. Possible values are SystemAssigned + or UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Policy Assignment should + exist. Changing this forces a new Policy Assignment to be created. + type: string + metadata: + description: A JSON mapping of any Metadata for this Policy. + type: string + nonComplianceMessage: + description: One or more non_compliance_message blocks as defined + below. + items: + properties: + content: + description: The non-compliance message text. When assigning + policy sets (initiatives), unless policy_definition_reference_id + is specified then this message will be the default for + all policies. + type: string + policyDefinitionReferenceId: + description: When assigning policy sets (initiatives), this + is the ID of the policy definition that the non-compliance + message applies to. + type: string + type: object + type: array + notScopes: + description: Specifies a list of Resource Scopes (for example + a Subscription, or a Resource Group) within this Management + Group which are excluded from this Policy. + items: + type: string + type: array + overrides: + description: One or more overrides blocks as defined below. More + detail about overrides and resource_selectors see policy assignment + structure + items: + properties: + selectors: + description: One or more override_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + value: + description: Specifies the value to override the policy + property. Possible values for policyEffect override listed + policy effects. + type: string + type: object + type: array + parameters: + description: A JSON mapping of any Parameters for this Policy. + type: string + policyDefinitionId: + description: The ID of the Policy Definition or Policy Definition + Set. Changing this forces a new Policy Assignment to be created. + type: string + resourceSelectors: + description: One or more resource_selectors blocks as defined + below to filter polices by resource properties. + items: + properties: + name: + description: Specifies a name for the resource selector. + type: string + selectors: + description: One or more resource_selector block as defined + below. + items: + properties: + in: + description: The list of allowed values for the specified + kind. Cannot be used with not_in. Can contain up + to 50 values. + items: + type: string + type: array + kind: + description: Specifies which characteristic will narrow + down the set of evaluated resources. Possible values + are resourceLocation, resourceType and resourceWithoutLocation. + type: string + notIn: + description: The list of not-allowed values for the + specified kind. Cannot be used with in. Can contain + up to 50 values. + items: + type: string + type: array + type: object + type: array + type: object + type: array + subscriptionId: + description: The ID of the Subscription where this Policy Assignment + should be created. Changing this forces a new Policy Assignment + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/automation.azure.upbound.io_accounts.yaml b/package/crds/automation.azure.upbound.io_accounts.yaml index 1a46e9bae..e6e518873 100644 --- a/package/crds/automation.azure.upbound.io_accounts.yaml +++ b/package/crds/automation.azure.upbound.io_accounts.yaml @@ -616,3 +616,595 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Account is the Schema for the Accounts API. Manages a Automation + Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + encryption: + description: An encryption block as defined below. + items: + properties: + keySource: + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key which should be + used to Encrypt the data in this Automation Account. + type: string + userAssignedIdentityId: + description: The User Assigned Managed Identity ID to be + used for accessing the Customer Managed Key for encryption. + type: string + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: The ID of the User Assigned Identity which should + be assigned to this Automation Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of identity used for this Automation + Account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Whether requests using non-AAD authentication are + blocked. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + automation account. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which the Automation + Account is created. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: The SKU of the account. Possible values are Basic + and Free. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + encryption: + description: An encryption block as defined below. + items: + properties: + keySource: + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key which should be + used to Encrypt the data in this Automation Account. + type: string + userAssignedIdentityId: + description: The User Assigned Managed Identity ID to be + used for accessing the Customer Managed Key for encryption. + type: string + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: The ID of the User Assigned Identity which should + be assigned to this Automation Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of identity used for this Automation + Account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Whether requests using non-AAD authentication are + blocked. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + automation account. Defaults to true. + type: boolean + skuName: + description: The SKU of the account. Possible values are Basic + and Free. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + status: + description: AccountStatus defines the observed state of Account. + properties: + atProvider: + properties: + dscServerEndpoint: + description: The DSC Server Endpoint associated with this Automation + Account. + type: string + encryption: + description: An encryption block as defined below. + items: + properties: + keySource: + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key which should be + used to Encrypt the data in this Automation Account. + type: string + userAssignedIdentityId: + description: The User Assigned Managed Identity ID to be + used for accessing the Customer Managed Key for encryption. + type: string + type: object + type: array + hybridServiceUrl: + description: The URL of automation hybrid service which is used + for hybrid worker on-boarding With this Automation Account. + type: string + id: + description: The ID of the Automation Account. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: The ID of the User Assigned Identity which should + be assigned to this Automation Account. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: The type of identity used for this Automation + Account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Whether requests using non-AAD authentication are + blocked. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + privateEndpointConnection: + items: + properties: + id: + description: The ID of the Automation Account. + type: string + name: + description: Specifies the name of the Automation Account. + Changing this forces a new resource to be created. + type: string + type: object + type: array + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + automation account. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which the Automation + Account is created. Changing this forces a new resource to be + created. + type: string + skuName: + description: The SKU of the account. Possible values are Basic + and Free. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/automation.azure.upbound.io_modules.yaml b/package/crds/automation.azure.upbound.io_modules.yaml index 525a6e2e9..f35e4dcef 100644 --- a/package/crds/automation.azure.upbound.io_modules.yaml +++ b/package/crds/automation.azure.upbound.io_modules.yaml @@ -564,3 +564,537 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Module is the Schema for the Modules API. Manages a Automation + Module. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ModuleSpec defines the desired state of Module + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + automationAccountName: + description: The name of the automation account in which the Module + is created. Changing this forces a new resource to be created. + type: string + automationAccountNameRef: + description: Reference to a Account in automation to populate + automationAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + automationAccountNameSelector: + description: Selector for a Account in automation to populate + automationAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + moduleLink: + description: A module_link block as defined below. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the algorithm used for the hash + content. + type: string + value: + description: The hash value of the content. + type: string + type: object + uri: + description: The URI of the module content (zip or nupkg). + type: string + type: object + resourceGroupName: + description: The name of the resource group in which the Module + is created. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + moduleLink: + description: A module_link block as defined below. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the algorithm used for the hash + content. + type: string + value: + description: The hash value of the content. + type: string + type: object + uri: + description: The URI of the module content (zip or nupkg). + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.moduleLink is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.moduleLink) + || (has(self.initProvider) && has(self.initProvider.moduleLink))' + status: + description: ModuleStatus defines the observed state of Module. + properties: + atProvider: + properties: + automationAccountName: + description: The name of the automation account in which the Module + is created. Changing this forces a new resource to be created. + type: string + id: + description: The Automation Module ID. + type: string + moduleLink: + description: A module_link block as defined below. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the algorithm used for the hash + content. + type: string + value: + description: The hash value of the content. + type: string + type: object + uri: + description: The URI of the module content (zip or nupkg). + type: string + type: object + resourceGroupName: + description: The name of the resource group in which the Module + is created. Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/automation.azure.upbound.io_runbooks.yaml b/package/crds/automation.azure.upbound.io_runbooks.yaml index d88faa5e1..02cb6a032 100644 --- a/package/crds/automation.azure.upbound.io_runbooks.yaml +++ b/package/crds/automation.azure.upbound.io_runbooks.yaml @@ -1113,3 +1113,1068 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RunBook is the Schema for the RunBooks API. Manages a Automation + Runbook. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RunBookSpec defines the desired state of RunBook + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + automationAccountName: + description: The name of the automation account in which the Runbook + is created. Changing this forces a new resource to be created. + type: string + automationAccountNameRef: + description: Reference to a Account in automation to populate + automationAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + automationAccountNameSelector: + description: Selector for a Account in automation to populate + automationAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + content: + description: The desired content of the runbook. + type: string + description: + description: A description for this credential. + type: string + draft: + description: A draft block as defined below . + properties: + contentLink: + description: A publish_content_link block as defined above. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the hash algorithm used to + hash the content. + type: string + value: + description: Specifies the expected hash value of + the content. + type: string + type: object + uri: + description: The URI of the runbook content. + type: string + version: + description: Specifies the version of the content + type: string + type: object + editModeEnabled: + description: Whether the draft in edit mode. + type: boolean + outputTypes: + description: Specifies the output types of the runbook. + items: + type: string + type: array + parameters: + description: A list of parameters block as defined below. + items: + properties: + defaultValue: + description: Specifies the default value of the parameter. + type: string + key: + description: The name of the parameter. + type: string + mandatory: + description: Whether this parameter is mandatory. + type: boolean + position: + description: Specifies the position of the parameter. + type: number + type: + description: Specifies the type of this parameter. + type: string + type: object + type: array + type: object + jobSchedule: + items: + properties: + jobScheduleId: + description: The Automation Runbook ID. + type: string + parameters: + additionalProperties: + type: string + description: A list of parameters block as defined below. + type: object + x-kubernetes-map-type: granular + runOn: + type: string + scheduleName: + description: Specifies the name of the Runbook. Changing + this forces a new resource to be created. + type: string + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logActivityTraceLevel: + description: Specifies the activity-level tracing options of the + runbook, available only for Graphical runbooks. Possible values + are 0 for None, 9 for Basic, and 15 for Detailed. Must turn + on Verbose logging in order to see the tracing. + type: number + logProgress: + description: Progress log option. + type: boolean + logVerbose: + description: Verbose log option. + type: boolean + name: + description: Specifies the name of the Runbook. Changing this + forces a new resource to be created. + type: string + publishContentLink: + description: One publish_content_link block as defined below. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the hash algorithm used to hash + the content. + type: string + value: + description: Specifies the expected hash value of the + content. + type: string + type: object + uri: + description: The URI of the runbook content. + type: string + version: + description: Specifies the version of the content + type: string + type: object + resourceGroupName: + description: The name of the resource group in which the Runbook + is created. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + runbookType: + description: The type of the runbook - can be either Graph, GraphPowerShell, + GraphPowerShellWorkflow, PowerShellWorkflow, PowerShell, PowerShell72, + Python3, Python2 or Script. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + automationAccountName: + description: The name of the automation account in which the Runbook + is created. Changing this forces a new resource to be created. + type: string + automationAccountNameRef: + description: Reference to a Account in automation to populate + automationAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + automationAccountNameSelector: + description: Selector for a Account in automation to populate + automationAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + content: + description: The desired content of the runbook. + type: string + description: + description: A description for this credential. + type: string + draft: + description: A draft block as defined below . + properties: + contentLink: + description: A publish_content_link block as defined above. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the hash algorithm used to + hash the content. + type: string + value: + description: Specifies the expected hash value of + the content. + type: string + type: object + uri: + description: The URI of the runbook content. + type: string + version: + description: Specifies the version of the content + type: string + type: object + editModeEnabled: + description: Whether the draft in edit mode. + type: boolean + outputTypes: + description: Specifies the output types of the runbook. + items: + type: string + type: array + parameters: + description: A list of parameters block as defined below. + items: + properties: + defaultValue: + description: Specifies the default value of the parameter. + type: string + key: + description: The name of the parameter. + type: string + mandatory: + description: Whether this parameter is mandatory. + type: boolean + position: + description: Specifies the position of the parameter. + type: number + type: + description: Specifies the type of this parameter. + type: string + type: object + type: array + type: object + jobSchedule: + items: + properties: + jobScheduleId: + description: The Automation Runbook ID. + type: string + parameters: + additionalProperties: + type: string + description: A list of parameters block as defined below. + type: object + x-kubernetes-map-type: granular + runOn: + type: string + scheduleName: + description: Specifies the name of the Runbook. Changing + this forces a new resource to be created. + type: string + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logActivityTraceLevel: + description: Specifies the activity-level tracing options of the + runbook, available only for Graphical runbooks. Possible values + are 0 for None, 9 for Basic, and 15 for Detailed. Must turn + on Verbose logging in order to see the tracing. + type: number + logProgress: + description: Progress log option. + type: boolean + logVerbose: + description: Verbose log option. + type: boolean + name: + description: Specifies the name of the Runbook. Changing this + forces a new resource to be created. + type: string + publishContentLink: + description: One publish_content_link block as defined below. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the hash algorithm used to hash + the content. + type: string + value: + description: Specifies the expected hash value of the + content. + type: string + type: object + uri: + description: The URI of the runbook content. + type: string + version: + description: Specifies the version of the content + type: string + type: object + resourceGroupName: + description: The name of the resource group in which the Runbook + is created. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + runbookType: + description: The type of the runbook - can be either Graph, GraphPowerShell, + GraphPowerShellWorkflow, PowerShellWorkflow, PowerShell, PowerShell72, + Python3, Python2 or Script. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.logProgress is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.logProgress) + || (has(self.initProvider) && has(self.initProvider.logProgress))' + - message: spec.forProvider.logVerbose is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.logVerbose) + || (has(self.initProvider) && has(self.initProvider.logVerbose))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.runbookType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.runbookType) + || (has(self.initProvider) && has(self.initProvider.runbookType))' + status: + description: RunBookStatus defines the observed state of RunBook. + properties: + atProvider: + properties: + automationAccountName: + description: The name of the automation account in which the Runbook + is created. Changing this forces a new resource to be created. + type: string + content: + description: The desired content of the runbook. + type: string + description: + description: A description for this credential. + type: string + draft: + description: A draft block as defined below . + properties: + contentLink: + description: A publish_content_link block as defined above. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the hash algorithm used to + hash the content. + type: string + value: + description: Specifies the expected hash value of + the content. + type: string + type: object + uri: + description: The URI of the runbook content. + type: string + version: + description: Specifies the version of the content + type: string + type: object + creationTime: + type: string + editModeEnabled: + description: Whether the draft in edit mode. + type: boolean + lastModifiedTime: + type: string + outputTypes: + description: Specifies the output types of the runbook. + items: + type: string + type: array + parameters: + description: A list of parameters block as defined below. + items: + properties: + defaultValue: + description: Specifies the default value of the parameter. + type: string + key: + description: The name of the parameter. + type: string + mandatory: + description: Whether this parameter is mandatory. + type: boolean + position: + description: Specifies the position of the parameter. + type: number + type: + description: Specifies the type of this parameter. + type: string + type: object + type: array + type: object + id: + description: The Automation Runbook ID. + type: string + jobSchedule: + items: + properties: + jobScheduleId: + description: The Automation Runbook ID. + type: string + parameters: + additionalProperties: + type: string + description: A list of parameters block as defined below. + type: object + x-kubernetes-map-type: granular + runOn: + type: string + scheduleName: + description: Specifies the name of the Runbook. Changing + this forces a new resource to be created. + type: string + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logActivityTraceLevel: + description: Specifies the activity-level tracing options of the + runbook, available only for Graphical runbooks. Possible values + are 0 for None, 9 for Basic, and 15 for Detailed. Must turn + on Verbose logging in order to see the tracing. + type: number + logProgress: + description: Progress log option. + type: boolean + logVerbose: + description: Verbose log option. + type: boolean + name: + description: Specifies the name of the Runbook. Changing this + forces a new resource to be created. + type: string + publishContentLink: + description: One publish_content_link block as defined below. + properties: + hash: + description: A hash block as defined below. + properties: + algorithm: + description: Specifies the hash algorithm used to hash + the content. + type: string + value: + description: Specifies the expected hash value of the + content. + type: string + type: object + uri: + description: The URI of the runbook content. + type: string + version: + description: Specifies the version of the content + type: string + type: object + resourceGroupName: + description: The name of the resource group in which the Runbook + is created. Changing this forces a new resource to be created. + type: string + runbookType: + description: The type of the runbook - can be either Graph, GraphPowerShell, + GraphPowerShellWorkflow, PowerShellWorkflow, PowerShell, PowerShell72, + Python3, Python2 or Script. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/automation.azure.upbound.io_schedules.yaml b/package/crds/automation.azure.upbound.io_schedules.yaml index d88a723f8..4431cb276 100644 --- a/package/crds/automation.azure.upbound.io_schedules.yaml +++ b/package/crds/automation.azure.upbound.io_schedules.yaml @@ -669,3 +669,648 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Schedule is the Schema for the Schedules API. Manages a Automation + Schedule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScheduleSpec defines the desired state of Schedule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + automationAccountName: + description: The name of the automation account in which the Schedule + is created. Changing this forces a new resource to be created. + type: string + automationAccountNameRef: + description: Reference to a Account in automation to populate + automationAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + automationAccountNameSelector: + description: Selector for a Account in automation to populate + automationAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: A description for this Schedule. + type: string + expiryTime: + description: The end time of the schedule. + type: string + frequency: + description: The frequency of the schedule. - can be either OneTime, + Day, Hour, Week, or Month. + type: string + interval: + description: The number of frequencys between runs. Only valid + when frequency is Day, Hour, Week, or Month and defaults to + 1. + type: number + monthDays: + description: List of days of the month that the job should execute + on. Must be between 1 and 31. -1 for last day of the month. + Only valid when frequency is Month. + items: + type: number + type: array + x-kubernetes-list-type: set + monthlyOccurrence: + description: One monthly_occurrence blocks as defined below to + specifies occurrences of days within a month. Only valid when + frequency is Month. The monthly_occurrence block supports fields + documented below. + properties: + day: + description: Day of the occurrence. Must be one of Monday, + Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday. + type: string + occurrence: + description: Occurrence of the week within the month. Must + be between 1 and 5. -1 for last week within the month. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which the Schedule + is created. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + startTime: + description: Start time of the schedule. Must be at least five + minutes in the future. Defaults to seven minutes in the future + from the time the resource is created. + type: string + timezone: + description: 'The timezone of the start time. Defaults to Etc/UTC. + For possible values see: https://docs.microsoft.com/en-us/rest/api/maps/timezone/gettimezoneenumwindows' + type: string + weekDays: + description: List of days of the week that the job should execute + on. Only valid when frequency is Week. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description for this Schedule. + type: string + expiryTime: + description: The end time of the schedule. + type: string + frequency: + description: The frequency of the schedule. - can be either OneTime, + Day, Hour, Week, or Month. + type: string + interval: + description: The number of frequencys between runs. Only valid + when frequency is Day, Hour, Week, or Month and defaults to + 1. + type: number + monthDays: + description: List of days of the month that the job should execute + on. Must be between 1 and 31. -1 for last day of the month. + Only valid when frequency is Month. + items: + type: number + type: array + x-kubernetes-list-type: set + monthlyOccurrence: + description: One monthly_occurrence blocks as defined below to + specifies occurrences of days within a month. Only valid when + frequency is Month. The monthly_occurrence block supports fields + documented below. + properties: + day: + description: Day of the occurrence. Must be one of Monday, + Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday. + type: string + occurrence: + description: Occurrence of the week within the month. Must + be between 1 and 5. -1 for last week within the month. + type: number + type: object + startTime: + description: Start time of the schedule. Must be at least five + minutes in the future. Defaults to seven minutes in the future + from the time the resource is created. + type: string + timezone: + description: 'The timezone of the start time. Defaults to Etc/UTC. + For possible values see: https://docs.microsoft.com/en-us/rest/api/maps/timezone/gettimezoneenumwindows' + type: string + weekDays: + description: List of days of the week that the job should execute + on. Only valid when frequency is Week. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.frequency is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.frequency) + || (has(self.initProvider) && has(self.initProvider.frequency))' + status: + description: ScheduleStatus defines the observed state of Schedule. + properties: + atProvider: + properties: + automationAccountName: + description: The name of the automation account in which the Schedule + is created. Changing this forces a new resource to be created. + type: string + description: + description: A description for this Schedule. + type: string + expiryTime: + description: The end time of the schedule. + type: string + frequency: + description: The frequency of the schedule. - can be either OneTime, + Day, Hour, Week, or Month. + type: string + id: + description: The Automation Schedule ID. + type: string + interval: + description: The number of frequencys between runs. Only valid + when frequency is Day, Hour, Week, or Month and defaults to + 1. + type: number + monthDays: + description: List of days of the month that the job should execute + on. Must be between 1 and 31. -1 for last day of the month. + Only valid when frequency is Month. + items: + type: number + type: array + x-kubernetes-list-type: set + monthlyOccurrence: + description: One monthly_occurrence blocks as defined below to + specifies occurrences of days within a month. Only valid when + frequency is Month. The monthly_occurrence block supports fields + documented below. + properties: + day: + description: Day of the occurrence. Must be one of Monday, + Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday. + type: string + occurrence: + description: Occurrence of the week within the month. Must + be between 1 and 5. -1 for last week within the month. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which the Schedule + is created. Changing this forces a new resource to be created. + type: string + startTime: + description: Start time of the schedule. Must be at least five + minutes in the future. Defaults to seven minutes in the future + from the time the resource is created. + type: string + timezone: + description: 'The timezone of the start time. Defaults to Etc/UTC. + For possible values see: https://docs.microsoft.com/en-us/rest/api/maps/timezone/gettimezoneenumwindows' + type: string + weekDays: + description: List of days of the week that the job should execute + on. Only valid when frequency is Week. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cache.azure.upbound.io_rediscaches.yaml b/package/crds/cache.azure.upbound.io_rediscaches.yaml index 54fd3e334..0ee1abd08 100644 --- a/package/crds/cache.azure.upbound.io_rediscaches.yaml +++ b/package/crds/cache.azure.upbound.io_rediscaches.yaml @@ -1190,3 +1190,1163 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: RedisCache is the Schema for the RedisCaches API. Manages a Redis + Cache + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RedisCacheSpec defines the desired state of RedisCache + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + capacity: + description: The size of the Redis cache to deploy. Valid values + for a SKU family of C (Basic/Standard) are 0, 1, 2, 3, 4, 5, + 6, and for P (Premium) family are 1, 2, 3, 4, 5. + type: number + enableNonSslPort: + description: Enable the non-SSL port (6379) - disabled by default. + type: boolean + family: + description: The SKU family/pricing group to use. Valid values + are C (for Basic/Standard SKU family) and P (for Premium) + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Redis Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Redis Cluster. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The location of the resource group. Changing this + forces a new resource to be created. + type: string + minimumTlsVersion: + description: The minimum TLS version. Possible values are 1.0, + 1.1 and 1.2. Defaults to 1.0. + type: string + patchSchedule: + description: A list of patch_schedule blocks as defined below. + items: + properties: + dayOfWeek: + description: the Weekday name - possible values include + Monday, Tuesday, Wednesday etc. + type: string + maintenanceWindow: + description: The ISO 8601 timespan which specifies the amount + of time the Redis Cache can be updated. Defaults to PT5H. + type: string + startHourUtc: + description: the Start Hour for maintenance in UTC - possible + values range from 0 - 23. + type: number + type: object + type: array + privateStaticIpAddress: + description: The Static IP Address to assign to the Redis Cache + when hosted inside the Virtual Network. This argument implies + the use of subnet_id. Changing this forces a new resource to + be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this Redis Cache. true means this resource could be accessed + by both public and private endpoint. false means only private + endpoint access is allowed. Defaults to true. + type: boolean + redisConfiguration: + description: A redis_configuration block as defined below - with + some limitations by SKU - defaults/details are shown below. + properties: + activeDirectoryAuthenticationEnabled: + description: Enable Microsoft Entra (AAD) authentication. + Defaults to false. + type: boolean + aofBackupEnabled: + description: Enable or disable AOF persistence for this Redis + Cache. Defaults to false. + type: boolean + aofStorageConnectionString0SecretRef: + description: First Storage Account connection string for AOF + persistence. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + aofStorageConnectionString1SecretRef: + description: Second Storage Account connection string for + AOF persistence. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dataPersistenceAuthenticationMethod: + description: Preferred auth method to communicate to storage + account used for data persistence. Possible values are SAS + and ManagedIdentity. Defaults to SAS. + type: string + enableAuthentication: + description: If set to false, the Redis instance will be accessible + without authentication. Defaults to true. + type: boolean + maxfragmentationmemoryReserved: + description: Value in megabytes reserved to accommodate for + memory fragmentation. Defaults are shown below. + type: number + maxmemoryDelta: + description: The max-memory delta for this Redis instance. + Defaults are shown below. + type: number + maxmemoryPolicy: + description: How Redis will select what to remove when maxmemory + is reached. Defaults to volatile-lru. + type: string + maxmemoryReserved: + description: Value in megabytes reserved for non-cache usage + e.g. failover. Defaults are shown below. + type: number + notifyKeyspaceEvents: + description: Keyspace notifications allows clients to subscribe + to Pub/Sub channels in order to receive events affecting + the Redis data set in some way. Reference + type: string + rdbBackupEnabled: + description: Is Backup Enabled? Only supported on Premium + SKUs. Defaults to false. + type: boolean + rdbBackupFrequency: + description: 'The Backup Frequency in Minutes. Only supported + on Premium SKUs. Possible values are: 15, 30, 60, 360, 720 + and 1440.' + type: number + rdbBackupMaxSnapshotCount: + description: The maximum number of snapshots to create as + a backup. Only supported for Premium SKUs. + type: number + rdbStorageConnectionStringSecretRef: + description: 'The Connection String to the Storage Account. + Only supported for Premium SKUs. In the format: DefaultEndpointsProtocol=https;BlobEndpoint=${azurerm_storage_account.example.primary_blob_endpoint};AccountName=${azurerm_storage_account.example.name};AccountKey=${azurerm_storage_account.example.primary_access_key}.' + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountSubscriptionId: + description: The ID of the Subscription containing the Storage + Account. + type: string + type: object + redisVersion: + description: 'Redis version. Only major version needed. Valid + values: 4, 6.' + type: string + replicasPerMaster: + description: Amount of replicas to create per master for this + Redis Cache. + type: number + replicasPerPrimary: + description: Amount of replicas to create per primary for this + Redis Cache. If both replicas_per_primary and replicas_per_master + are set, they need to be equal. + type: number + resourceGroupName: + description: The name of the resource group in which to create + the Redis instance. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + shardCount: + description: Only available when using the Premium SKU The number + of Shards to create on the Redis Cluster. + type: number + skuName: + description: The SKU of Redis to use. Possible values are Basic, + Standard and Premium. + type: string + subnetId: + description: Only available when using the Premium SKU The ID + of the Subnet within which the Redis Cache should be deployed. + This Subnet must only contain Azure Cache for Redis instances + without any other type of resources. Changing this forces a + new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantSettings: + additionalProperties: + type: string + description: A mapping of tenant settings to assign to the resource. + type: object + x-kubernetes-map-type: granular + zones: + description: Specifies a list of Availability Zones in which this + Redis Cache should be located. Changing this forces a new Redis + Cache to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + capacity: + description: The size of the Redis cache to deploy. Valid values + for a SKU family of C (Basic/Standard) are 0, 1, 2, 3, 4, 5, + 6, and for P (Premium) family are 1, 2, 3, 4, 5. + type: number + enableNonSslPort: + description: Enable the non-SSL port (6379) - disabled by default. + type: boolean + family: + description: The SKU family/pricing group to use. Valid values + are C (for Basic/Standard SKU family) and P (for Premium) + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Redis Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Redis Cluster. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The location of the resource group. Changing this + forces a new resource to be created. + type: string + minimumTlsVersion: + description: The minimum TLS version. Possible values are 1.0, + 1.1 and 1.2. Defaults to 1.0. + type: string + patchSchedule: + description: A list of patch_schedule blocks as defined below. + items: + properties: + dayOfWeek: + description: the Weekday name - possible values include + Monday, Tuesday, Wednesday etc. + type: string + maintenanceWindow: + description: The ISO 8601 timespan which specifies the amount + of time the Redis Cache can be updated. Defaults to PT5H. + type: string + startHourUtc: + description: the Start Hour for maintenance in UTC - possible + values range from 0 - 23. + type: number + type: object + type: array + privateStaticIpAddress: + description: The Static IP Address to assign to the Redis Cache + when hosted inside the Virtual Network. This argument implies + the use of subnet_id. Changing this forces a new resource to + be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this Redis Cache. true means this resource could be accessed + by both public and private endpoint. false means only private + endpoint access is allowed. Defaults to true. + type: boolean + redisConfiguration: + description: A redis_configuration block as defined below - with + some limitations by SKU - defaults/details are shown below. + properties: + activeDirectoryAuthenticationEnabled: + description: Enable Microsoft Entra (AAD) authentication. + Defaults to false. + type: boolean + aofBackupEnabled: + description: Enable or disable AOF persistence for this Redis + Cache. Defaults to false. + type: boolean + dataPersistenceAuthenticationMethod: + description: Preferred auth method to communicate to storage + account used for data persistence. Possible values are SAS + and ManagedIdentity. Defaults to SAS. + type: string + enableAuthentication: + description: If set to false, the Redis instance will be accessible + without authentication. Defaults to true. + type: boolean + maxfragmentationmemoryReserved: + description: Value in megabytes reserved to accommodate for + memory fragmentation. Defaults are shown below. + type: number + maxmemoryDelta: + description: The max-memory delta for this Redis instance. + Defaults are shown below. + type: number + maxmemoryPolicy: + description: How Redis will select what to remove when maxmemory + is reached. Defaults to volatile-lru. + type: string + maxmemoryReserved: + description: Value in megabytes reserved for non-cache usage + e.g. failover. Defaults are shown below. + type: number + notifyKeyspaceEvents: + description: Keyspace notifications allows clients to subscribe + to Pub/Sub channels in order to receive events affecting + the Redis data set in some way. Reference + type: string + rdbBackupEnabled: + description: Is Backup Enabled? Only supported on Premium + SKUs. Defaults to false. + type: boolean + rdbBackupFrequency: + description: 'The Backup Frequency in Minutes. Only supported + on Premium SKUs. Possible values are: 15, 30, 60, 360, 720 + and 1440.' + type: number + rdbBackupMaxSnapshotCount: + description: The maximum number of snapshots to create as + a backup. Only supported for Premium SKUs. + type: number + storageAccountSubscriptionId: + description: The ID of the Subscription containing the Storage + Account. + type: string + type: object + redisVersion: + description: 'Redis version. Only major version needed. Valid + values: 4, 6.' + type: string + replicasPerMaster: + description: Amount of replicas to create per master for this + Redis Cache. + type: number + replicasPerPrimary: + description: Amount of replicas to create per primary for this + Redis Cache. If both replicas_per_primary and replicas_per_master + are set, they need to be equal. + type: number + shardCount: + description: Only available when using the Premium SKU The number + of Shards to create on the Redis Cluster. + type: number + skuName: + description: The SKU of Redis to use. Possible values are Basic, + Standard and Premium. + type: string + subnetId: + description: Only available when using the Premium SKU The ID + of the Subnet within which the Redis Cache should be deployed. + This Subnet must only contain Azure Cache for Redis instances + without any other type of resources. Changing this forces a + new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantSettings: + additionalProperties: + type: string + description: A mapping of tenant settings to assign to the resource. + type: object + x-kubernetes-map-type: granular + zones: + description: Specifies a list of Availability Zones in which this + Redis Cache should be located. Changing this forces a new Redis + Cache to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.capacity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.capacity) + || (has(self.initProvider) && has(self.initProvider.capacity))' + - message: spec.forProvider.family is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.family) + || (has(self.initProvider) && has(self.initProvider.family))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.redisVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.redisVersion) + || (has(self.initProvider) && has(self.initProvider.redisVersion))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + status: + description: RedisCacheStatus defines the observed state of RedisCache. + properties: + atProvider: + properties: + capacity: + description: The size of the Redis cache to deploy. Valid values + for a SKU family of C (Basic/Standard) are 0, 1, 2, 3, 4, 5, + 6, and for P (Premium) family are 1, 2, 3, 4, 5. + type: number + enableNonSslPort: + description: Enable the non-SSL port (6379) - disabled by default. + type: boolean + family: + description: The SKU family/pricing group to use. Valid values + are C (for Basic/Standard SKU family) and P (for Premium) + type: string + hostname: + description: The Hostname of the Redis Instance + type: string + id: + description: The Route ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Redis Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Route ID. + type: string + tenantId: + description: The Route ID. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Redis Cluster. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The location of the resource group. Changing this + forces a new resource to be created. + type: string + minimumTlsVersion: + description: The minimum TLS version. Possible values are 1.0, + 1.1 and 1.2. Defaults to 1.0. + type: string + patchSchedule: + description: A list of patch_schedule blocks as defined below. + items: + properties: + dayOfWeek: + description: the Weekday name - possible values include + Monday, Tuesday, Wednesday etc. + type: string + maintenanceWindow: + description: The ISO 8601 timespan which specifies the amount + of time the Redis Cache can be updated. Defaults to PT5H. + type: string + startHourUtc: + description: the Start Hour for maintenance in UTC - possible + values range from 0 - 23. + type: number + type: object + type: array + port: + description: The non-SSL Port of the Redis Instance + type: number + privateStaticIpAddress: + description: The Static IP Address to assign to the Redis Cache + when hosted inside the Virtual Network. This argument implies + the use of subnet_id. Changing this forces a new resource to + be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this Redis Cache. true means this resource could be accessed + by both public and private endpoint. false means only private + endpoint access is allowed. Defaults to true. + type: boolean + redisConfiguration: + description: A redis_configuration block as defined below - with + some limitations by SKU - defaults/details are shown below. + properties: + activeDirectoryAuthenticationEnabled: + description: Enable Microsoft Entra (AAD) authentication. + Defaults to false. + type: boolean + aofBackupEnabled: + description: Enable or disable AOF persistence for this Redis + Cache. Defaults to false. + type: boolean + dataPersistenceAuthenticationMethod: + description: Preferred auth method to communicate to storage + account used for data persistence. Possible values are SAS + and ManagedIdentity. Defaults to SAS. + type: string + enableAuthentication: + description: If set to false, the Redis instance will be accessible + without authentication. Defaults to true. + type: boolean + maxclients: + description: Returns the max number of connected clients at + the same time. + type: number + maxfragmentationmemoryReserved: + description: Value in megabytes reserved to accommodate for + memory fragmentation. Defaults are shown below. + type: number + maxmemoryDelta: + description: The max-memory delta for this Redis instance. + Defaults are shown below. + type: number + maxmemoryPolicy: + description: How Redis will select what to remove when maxmemory + is reached. Defaults to volatile-lru. + type: string + maxmemoryReserved: + description: Value in megabytes reserved for non-cache usage + e.g. failover. Defaults are shown below. + type: number + notifyKeyspaceEvents: + description: Keyspace notifications allows clients to subscribe + to Pub/Sub channels in order to receive events affecting + the Redis data set in some way. Reference + type: string + rdbBackupEnabled: + description: Is Backup Enabled? Only supported on Premium + SKUs. Defaults to false. + type: boolean + rdbBackupFrequency: + description: 'The Backup Frequency in Minutes. Only supported + on Premium SKUs. Possible values are: 15, 30, 60, 360, 720 + and 1440.' + type: number + rdbBackupMaxSnapshotCount: + description: The maximum number of snapshots to create as + a backup. Only supported for Premium SKUs. + type: number + storageAccountSubscriptionId: + description: The ID of the Subscription containing the Storage + Account. + type: string + type: object + redisVersion: + description: 'Redis version. Only major version needed. Valid + values: 4, 6.' + type: string + replicasPerMaster: + description: Amount of replicas to create per master for this + Redis Cache. + type: number + replicasPerPrimary: + description: Amount of replicas to create per primary for this + Redis Cache. If both replicas_per_primary and replicas_per_master + are set, they need to be equal. + type: number + resourceGroupName: + description: The name of the resource group in which to create + the Redis instance. Changing this forces a new resource to be + created. + type: string + shardCount: + description: Only available when using the Premium SKU The number + of Shards to create on the Redis Cluster. + type: number + skuName: + description: The SKU of Redis to use. Possible values are Basic, + Standard and Premium. + type: string + sslPort: + description: The SSL Port of the Redis Instance + type: number + subnetId: + description: Only available when using the Premium SKU The ID + of the Subnet within which the Redis Cache should be deployed. + This Subnet must only contain Azure Cache for Redis instances + without any other type of resources. Changing this forces a + new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantSettings: + additionalProperties: + type: string + description: A mapping of tenant settings to assign to the resource. + type: object + x-kubernetes-map-type: granular + zones: + description: Specifies a list of Availability Zones in which this + Redis Cache should be located. Changing this forces a new Redis + Cache to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cdn.azure.upbound.io_endpoints.yaml b/package/crds/cdn.azure.upbound.io_endpoints.yaml index 3e337ca2f..6fbe9aef0 100644 --- a/package/crds/cdn.azure.upbound.io_endpoints.yaml +++ b/package/crds/cdn.azure.upbound.io_endpoints.yaml @@ -2703,3 +2703,2607 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Endpoint is the Schema for the Endpoints API. Manages a CDN Endpoint. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EndpointSpec defines the desired state of Endpoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + contentTypesToCompress: + description: An array of strings that indicates a content types + on which compression will be applied. The value for the elements + should be MIME types. + items: + type: string + type: array + x-kubernetes-list-type: set + deliveryRule: + description: Rules for the rules engine. An endpoint can contain + up until 4 of those rules that consist of conditions and actions. + A delivery_rule blocks as defined below. + items: + properties: + cacheExpirationAction: + description: A cache_expiration_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query + strings. Valid values are Exclude, ExcludeAll, Include + and IncludeAll. + type: string + duration: + description: 'Duration of the cache. Only allowed when + behavior is set to Override or SetIfMissing. Format: + [d.]hh:mm:ss' + type: string + type: object + cacheKeyQueryStringAction: + description: A cache_key_query_string_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query + strings. Valid values are Exclude, ExcludeAll, Include + and IncludeAll. + type: string + parameters: + description: Comma separated list of parameter values. + type: string + type: object + cookiesCondition: + description: A cookies_condition block as defined above. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + deviceCondition: + description: A device_condition block as defined below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + httpVersionCondition: + description: A http_version_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + type: array + modifyRequestHeaderAction: + description: A modify_request_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed + when action is set to Append or overwrite. + type: string + type: object + type: array + modifyResponseHeaderAction: + description: A modify_response_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed + when action is set to Append or overwrite. + type: string + type: object + type: array + name: + description: The Name which should be used for this Delivery + Rule. + type: string + order: + description: The order used for this rule. The order values + should be sequential and begin at 1. + type: number + postArgCondition: + description: A post_arg_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + queryStringCondition: + description: A query_string_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + remoteAddressCondition: + description: A remote_address_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + type: array + requestBodyCondition: + description: A request_body_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + requestHeaderCondition: + description: A request_header_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + requestMethodCondition: + description: A request_method_condition block as defined + below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + requestSchemeCondition: + description: A request_scheme_condition block as defined + below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + requestUriCondition: + description: A request_uri_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlFileExtensionCondition: + description: A url_file_extension_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlFileNameCondition: + description: A url_file_name_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlPathCondition: + description: A url_path_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlRedirectAction: + description: A url_redirect_action block as defined below. + properties: + fragment: + description: 'Specifies the fragment part of the URL. + This value must not start with a #.' + type: string + hostname: + description: Specifies the hostname part of the URL. + type: string + path: + description: Specifies the path part of the URL. This + value must begin with a /. + type: string + protocol: + description: Specifies the protocol part of the URL. + Valid values are MatchRequest, Http and Https. Defaults + to MatchRequest. + type: string + queryString: + description: Specifies the query string part of the + URL. This value must not start with a ? or & and must + be in = format separated by &. + type: string + redirectType: + description: Type of the redirect. Valid values are + Found, Moved, PermanentRedirect and TemporaryRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + properties: + destination: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + preserveUnmatchedPath: + description: Whether preserve an unmatched path. Defaults + to true. + type: boolean + sourcePattern: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + type: object + type: object + type: array + geoFilter: + description: A set of Geo Filters for this CDN Endpoint. Each + geo_filter block supports fields documented below. + items: + properties: + action: + description: The Action of the Geo Filter. Possible values + include Allow and Block. + type: string + countryCodes: + description: A List of two letter country codes (e.g. US, + GB) to be associated with this Geo Filter. + items: + type: string + type: array + relativePath: + description: The relative path applicable to geo filter. + type: string + type: object + type: array + globalDeliveryRule: + description: Actions that are valid for all resources regardless + of any conditions. A global_delivery_rule block as defined below. + properties: + cacheExpirationAction: + description: A cache_expiration_action block as defined above. + properties: + behavior: + description: The behavior of the cache key for query strings. + Valid values are Exclude, ExcludeAll, Include and IncludeAll. + type: string + duration: + description: 'Duration of the cache. Only allowed when + behavior is set to Override or SetIfMissing. Format: + [d.]hh:mm:ss' + type: string + type: object + cacheKeyQueryStringAction: + description: A cache_key_query_string_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query strings. + Valid values are Exclude, ExcludeAll, Include and IncludeAll. + type: string + parameters: + description: Comma separated list of parameter values. + type: string + type: object + modifyRequestHeaderAction: + description: A modify_request_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed when + action is set to Append or overwrite. + type: string + type: object + type: array + modifyResponseHeaderAction: + description: A modify_response_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed when + action is set to Append or overwrite. + type: string + type: object + type: array + urlRedirectAction: + description: A url_redirect_action block as defined below. + properties: + fragment: + description: 'Specifies the fragment part of the URL. + This value must not start with a #.' + type: string + hostname: + description: Specifies the hostname part of the URL. + type: string + path: + description: Specifies the path part of the URL. This + value must begin with a /. + type: string + protocol: + description: Specifies the protocol part of the URL. Valid + values are MatchRequest, Http and Https. Defaults to + MatchRequest. + type: string + queryString: + description: Specifies the query string part of the URL. + This value must not start with a ? or & and must be + in = format separated by &. + type: string + redirectType: + description: Type of the redirect. Valid values are Found, + Moved, PermanentRedirect and TemporaryRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + properties: + destination: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + preserveUnmatchedPath: + description: Whether preserve an unmatched path. Defaults + to true. + type: boolean + sourcePattern: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + type: object + type: object + isCompressionEnabled: + description: Indicates whether compression is to be enabled. + type: boolean + isHttpAllowed: + description: Specifies if http allowed. Defaults to true. + type: boolean + isHttpsAllowed: + description: Specifies if https allowed. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + optimizationType: + description: What types of optimization should this CDN Endpoint + optimize for? Possible values include DynamicSiteAcceleration, + GeneralMediaStreaming, GeneralWebDelivery, LargeFileDownload + and VideoOnDemandMediaStreaming. + type: string + origin: + description: The set of origins of the CDN endpoint. When multiple + origins exist, the first origin will be used as primary and + rest will be used as failover options. Each origin block supports + fields documented below. Changing this forces a new resource + to be created. + items: + properties: + hostName: + description: A string that determines the hostname/IP address + of the origin server. This string can be a domain name, + Storage Account endpoint, Web App endpoint, IPv4 address + or IPv6 address. Changing this forces a new resource to + be created. + type: string + httpPort: + description: The HTTP port of the origin. Defaults to 80. + Changing this forces a new resource to be created. + type: number + httpsPort: + description: The HTTPS port of the origin. Defaults to 443. + Changing this forces a new resource to be created. + type: number + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under the + endpoint. Changing this forces a new resource to be created. + type: string + type: object + type: array + originHostHeader: + description: The host header CDN provider will send along with + content requests to origins. + type: string + originPath: + description: The path used at for origin requests. + type: string + probePath: + description: the path to a file hosted on the origin which helps + accelerate delivery of the dynamic content and calculate the + most optimal routes for the CDN. This is relative to the origin_path. + type: string + profileName: + description: The CDN Profile to which to attach the CDN Endpoint. + Changing this forces a new resource to be created. + type: string + profileNameRef: + description: Reference to a Profile in cdn to populate profileName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + profileNameSelector: + description: Selector for a Profile in cdn to populate profileName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + querystringCachingBehaviour: + description: Sets query string caching behavior. Allowed values + are IgnoreQueryString, BypassCaching and UseQueryString. NotSet + value can be used for Premium Verizon CDN profile. Defaults + to IgnoreQueryString. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the CDN Endpoint. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + contentTypesToCompress: + description: An array of strings that indicates a content types + on which compression will be applied. The value for the elements + should be MIME types. + items: + type: string + type: array + x-kubernetes-list-type: set + deliveryRule: + description: Rules for the rules engine. An endpoint can contain + up until 4 of those rules that consist of conditions and actions. + A delivery_rule blocks as defined below. + items: + properties: + cacheExpirationAction: + description: A cache_expiration_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query + strings. Valid values are Exclude, ExcludeAll, Include + and IncludeAll. + type: string + duration: + description: 'Duration of the cache. Only allowed when + behavior is set to Override or SetIfMissing. Format: + [d.]hh:mm:ss' + type: string + type: object + cacheKeyQueryStringAction: + description: A cache_key_query_string_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query + strings. Valid values are Exclude, ExcludeAll, Include + and IncludeAll. + type: string + parameters: + description: Comma separated list of parameter values. + type: string + type: object + cookiesCondition: + description: A cookies_condition block as defined above. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + deviceCondition: + description: A device_condition block as defined below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + httpVersionCondition: + description: A http_version_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + type: array + modifyRequestHeaderAction: + description: A modify_request_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed + when action is set to Append or overwrite. + type: string + type: object + type: array + modifyResponseHeaderAction: + description: A modify_response_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed + when action is set to Append or overwrite. + type: string + type: object + type: array + name: + description: The Name which should be used for this Delivery + Rule. + type: string + order: + description: The order used for this rule. The order values + should be sequential and begin at 1. + type: number + postArgCondition: + description: A post_arg_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + queryStringCondition: + description: A query_string_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + remoteAddressCondition: + description: A remote_address_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + type: array + requestBodyCondition: + description: A request_body_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + requestHeaderCondition: + description: A request_header_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + requestMethodCondition: + description: A request_method_condition block as defined + below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + requestSchemeCondition: + description: A request_scheme_condition block as defined + below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + requestUriCondition: + description: A request_uri_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlFileExtensionCondition: + description: A url_file_extension_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlFileNameCondition: + description: A url_file_name_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlPathCondition: + description: A url_path_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlRedirectAction: + description: A url_redirect_action block as defined below. + properties: + fragment: + description: 'Specifies the fragment part of the URL. + This value must not start with a #.' + type: string + hostname: + description: Specifies the hostname part of the URL. + type: string + path: + description: Specifies the path part of the URL. This + value must begin with a /. + type: string + protocol: + description: Specifies the protocol part of the URL. + Valid values are MatchRequest, Http and Https. Defaults + to MatchRequest. + type: string + queryString: + description: Specifies the query string part of the + URL. This value must not start with a ? or & and must + be in = format separated by &. + type: string + redirectType: + description: Type of the redirect. Valid values are + Found, Moved, PermanentRedirect and TemporaryRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + properties: + destination: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + preserveUnmatchedPath: + description: Whether preserve an unmatched path. Defaults + to true. + type: boolean + sourcePattern: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + type: object + type: object + type: array + geoFilter: + description: A set of Geo Filters for this CDN Endpoint. Each + geo_filter block supports fields documented below. + items: + properties: + action: + description: The Action of the Geo Filter. Possible values + include Allow and Block. + type: string + countryCodes: + description: A List of two letter country codes (e.g. US, + GB) to be associated with this Geo Filter. + items: + type: string + type: array + relativePath: + description: The relative path applicable to geo filter. + type: string + type: object + type: array + globalDeliveryRule: + description: Actions that are valid for all resources regardless + of any conditions. A global_delivery_rule block as defined below. + properties: + cacheExpirationAction: + description: A cache_expiration_action block as defined above. + properties: + behavior: + description: The behavior of the cache key for query strings. + Valid values are Exclude, ExcludeAll, Include and IncludeAll. + type: string + duration: + description: 'Duration of the cache. Only allowed when + behavior is set to Override or SetIfMissing. Format: + [d.]hh:mm:ss' + type: string + type: object + cacheKeyQueryStringAction: + description: A cache_key_query_string_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query strings. + Valid values are Exclude, ExcludeAll, Include and IncludeAll. + type: string + parameters: + description: Comma separated list of parameter values. + type: string + type: object + modifyRequestHeaderAction: + description: A modify_request_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed when + action is set to Append or overwrite. + type: string + type: object + type: array + modifyResponseHeaderAction: + description: A modify_response_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed when + action is set to Append or overwrite. + type: string + type: object + type: array + urlRedirectAction: + description: A url_redirect_action block as defined below. + properties: + fragment: + description: 'Specifies the fragment part of the URL. + This value must not start with a #.' + type: string + hostname: + description: Specifies the hostname part of the URL. + type: string + path: + description: Specifies the path part of the URL. This + value must begin with a /. + type: string + protocol: + description: Specifies the protocol part of the URL. Valid + values are MatchRequest, Http and Https. Defaults to + MatchRequest. + type: string + queryString: + description: Specifies the query string part of the URL. + This value must not start with a ? or & and must be + in = format separated by &. + type: string + redirectType: + description: Type of the redirect. Valid values are Found, + Moved, PermanentRedirect and TemporaryRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + properties: + destination: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + preserveUnmatchedPath: + description: Whether preserve an unmatched path. Defaults + to true. + type: boolean + sourcePattern: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + type: object + type: object + isCompressionEnabled: + description: Indicates whether compression is to be enabled. + type: boolean + isHttpAllowed: + description: Specifies if http allowed. Defaults to true. + type: boolean + isHttpsAllowed: + description: Specifies if https allowed. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + optimizationType: + description: What types of optimization should this CDN Endpoint + optimize for? Possible values include DynamicSiteAcceleration, + GeneralMediaStreaming, GeneralWebDelivery, LargeFileDownload + and VideoOnDemandMediaStreaming. + type: string + origin: + description: The set of origins of the CDN endpoint. When multiple + origins exist, the first origin will be used as primary and + rest will be used as failover options. Each origin block supports + fields documented below. Changing this forces a new resource + to be created. + items: + properties: + hostName: + description: A string that determines the hostname/IP address + of the origin server. This string can be a domain name, + Storage Account endpoint, Web App endpoint, IPv4 address + or IPv6 address. Changing this forces a new resource to + be created. + type: string + httpPort: + description: The HTTP port of the origin. Defaults to 80. + Changing this forces a new resource to be created. + type: number + httpsPort: + description: The HTTPS port of the origin. Defaults to 443. + Changing this forces a new resource to be created. + type: number + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under the + endpoint. Changing this forces a new resource to be created. + type: string + type: object + type: array + originHostHeader: + description: The host header CDN provider will send along with + content requests to origins. + type: string + originPath: + description: The path used at for origin requests. + type: string + probePath: + description: the path to a file hosted on the origin which helps + accelerate delivery of the dynamic content and calculate the + most optimal routes for the CDN. This is relative to the origin_path. + type: string + querystringCachingBehaviour: + description: Sets query string caching behavior. Allowed values + are IgnoreQueryString, BypassCaching and UseQueryString. NotSet + value can be used for Premium Verizon CDN profile. Defaults + to IgnoreQueryString. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.origin is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.origin) + || (has(self.initProvider) && has(self.initProvider.origin))' + status: + description: EndpointStatus defines the observed state of Endpoint. + properties: + atProvider: + properties: + contentTypesToCompress: + description: An array of strings that indicates a content types + on which compression will be applied. The value for the elements + should be MIME types. + items: + type: string + type: array + x-kubernetes-list-type: set + deliveryRule: + description: Rules for the rules engine. An endpoint can contain + up until 4 of those rules that consist of conditions and actions. + A delivery_rule blocks as defined below. + items: + properties: + cacheExpirationAction: + description: A cache_expiration_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query + strings. Valid values are Exclude, ExcludeAll, Include + and IncludeAll. + type: string + duration: + description: 'Duration of the cache. Only allowed when + behavior is set to Override or SetIfMissing. Format: + [d.]hh:mm:ss' + type: string + type: object + cacheKeyQueryStringAction: + description: A cache_key_query_string_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query + strings. Valid values are Exclude, ExcludeAll, Include + and IncludeAll. + type: string + parameters: + description: Comma separated list of parameter values. + type: string + type: object + cookiesCondition: + description: A cookies_condition block as defined above. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + deviceCondition: + description: A device_condition block as defined below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + httpVersionCondition: + description: A http_version_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + type: array + modifyRequestHeaderAction: + description: A modify_request_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed + when action is set to Append or overwrite. + type: string + type: object + type: array + modifyResponseHeaderAction: + description: A modify_response_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed + when action is set to Append or overwrite. + type: string + type: object + type: array + name: + description: The Name which should be used for this Delivery + Rule. + type: string + order: + description: The order used for this rule. The order values + should be sequential and begin at 1. + type: number + postArgCondition: + description: A post_arg_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + queryStringCondition: + description: A query_string_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + remoteAddressCondition: + description: A remote_address_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + type: array + requestBodyCondition: + description: A request_body_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + requestHeaderCondition: + description: A request_header_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + selector: + description: Header name. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + requestMethodCondition: + description: A request_method_condition block as defined + below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + requestSchemeCondition: + description: A request_scheme_condition block as defined + below. + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + type: object + requestUriCondition: + description: A request_uri_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlFileExtensionCondition: + description: A url_file_extension_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlFileNameCondition: + description: A url_file_name_condition block as defined + below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlPathCondition: + description: A url_path_condition block as defined below. + items: + properties: + matchValues: + description: List of string values. This is required + if operator is not Any. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: Defaults to false. + type: boolean + operator: + description: Valid values are Any, BeginsWith, Contains, + EndsWith, Equal, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual, RegEx and Wildcard. + type: string + transforms: + description: A list of transforms. Valid values are + Lowercase and Uppercase. + items: + type: string + type: array + type: object + type: array + urlRedirectAction: + description: A url_redirect_action block as defined below. + properties: + fragment: + description: 'Specifies the fragment part of the URL. + This value must not start with a #.' + type: string + hostname: + description: Specifies the hostname part of the URL. + type: string + path: + description: Specifies the path part of the URL. This + value must begin with a /. + type: string + protocol: + description: Specifies the protocol part of the URL. + Valid values are MatchRequest, Http and Https. Defaults + to MatchRequest. + type: string + queryString: + description: Specifies the query string part of the + URL. This value must not start with a ? or & and must + be in = format separated by &. + type: string + redirectType: + description: Type of the redirect. Valid values are + Found, Moved, PermanentRedirect and TemporaryRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + properties: + destination: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + preserveUnmatchedPath: + description: Whether preserve an unmatched path. Defaults + to true. + type: boolean + sourcePattern: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + type: object + type: object + type: array + fqdn: + description: The Fully Qualified Domain Name of the CDN Endpoint. + type: string + geoFilter: + description: A set of Geo Filters for this CDN Endpoint. Each + geo_filter block supports fields documented below. + items: + properties: + action: + description: The Action of the Geo Filter. Possible values + include Allow and Block. + type: string + countryCodes: + description: A List of two letter country codes (e.g. US, + GB) to be associated with this Geo Filter. + items: + type: string + type: array + relativePath: + description: The relative path applicable to geo filter. + type: string + type: object + type: array + globalDeliveryRule: + description: Actions that are valid for all resources regardless + of any conditions. A global_delivery_rule block as defined below. + properties: + cacheExpirationAction: + description: A cache_expiration_action block as defined above. + properties: + behavior: + description: The behavior of the cache key for query strings. + Valid values are Exclude, ExcludeAll, Include and IncludeAll. + type: string + duration: + description: 'Duration of the cache. Only allowed when + behavior is set to Override or SetIfMissing. Format: + [d.]hh:mm:ss' + type: string + type: object + cacheKeyQueryStringAction: + description: A cache_key_query_string_action block as defined + above. + properties: + behavior: + description: The behavior of the cache key for query strings. + Valid values are Exclude, ExcludeAll, Include and IncludeAll. + type: string + parameters: + description: Comma separated list of parameter values. + type: string + type: object + modifyRequestHeaderAction: + description: A modify_request_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed when + action is set to Append or overwrite. + type: string + type: object + type: array + modifyResponseHeaderAction: + description: A modify_response_header_action block as defined + below. + items: + properties: + action: + description: Action to be executed on a header value. + Valid values are Append, Delete and Overwrite. + type: string + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under + the endpoint. Changing this forces a new resource + to be created. + type: string + value: + description: The value of the header. Only needed when + action is set to Append or overwrite. + type: string + type: object + type: array + urlRedirectAction: + description: A url_redirect_action block as defined below. + properties: + fragment: + description: 'Specifies the fragment part of the URL. + This value must not start with a #.' + type: string + hostname: + description: Specifies the hostname part of the URL. + type: string + path: + description: Specifies the path part of the URL. This + value must begin with a /. + type: string + protocol: + description: Specifies the protocol part of the URL. Valid + values are MatchRequest, Http and Https. Defaults to + MatchRequest. + type: string + queryString: + description: Specifies the query string part of the URL. + This value must not start with a ? or & and must be + in = format separated by &. + type: string + redirectType: + description: Type of the redirect. Valid values are Found, + Moved, PermanentRedirect and TemporaryRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + properties: + destination: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + preserveUnmatchedPath: + description: Whether preserve an unmatched path. Defaults + to true. + type: boolean + sourcePattern: + description: This value must start with a / and can't + be longer than 260 characters. + type: string + type: object + type: object + id: + description: The ID of the CDN Endpoint. + type: string + isCompressionEnabled: + description: Indicates whether compression is to be enabled. + type: boolean + isHttpAllowed: + description: Specifies if http allowed. Defaults to true. + type: boolean + isHttpsAllowed: + description: Specifies if https allowed. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + optimizationType: + description: What types of optimization should this CDN Endpoint + optimize for? Possible values include DynamicSiteAcceleration, + GeneralMediaStreaming, GeneralWebDelivery, LargeFileDownload + and VideoOnDemandMediaStreaming. + type: string + origin: + description: The set of origins of the CDN endpoint. When multiple + origins exist, the first origin will be used as primary and + rest will be used as failover options. Each origin block supports + fields documented below. Changing this forces a new resource + to be created. + items: + properties: + hostName: + description: A string that determines the hostname/IP address + of the origin server. This string can be a domain name, + Storage Account endpoint, Web App endpoint, IPv4 address + or IPv6 address. Changing this forces a new resource to + be created. + type: string + httpPort: + description: The HTTP port of the origin. Defaults to 80. + Changing this forces a new resource to be created. + type: number + httpsPort: + description: The HTTPS port of the origin. Defaults to 443. + Changing this forces a new resource to be created. + type: number + name: + description: The name of the origin. This is an arbitrary + value. However, this value needs to be unique under the + endpoint. Changing this forces a new resource to be created. + type: string + type: object + type: array + originHostHeader: + description: The host header CDN provider will send along with + content requests to origins. + type: string + originPath: + description: The path used at for origin requests. + type: string + probePath: + description: the path to a file hosted on the origin which helps + accelerate delivery of the dynamic content and calculate the + most optimal routes for the CDN. This is relative to the origin_path. + type: string + profileName: + description: The CDN Profile to which to attach the CDN Endpoint. + Changing this forces a new resource to be created. + type: string + querystringCachingBehaviour: + description: Sets query string caching behavior. Allowed values + are IgnoreQueryString, BypassCaching and UseQueryString. NotSet + value can be used for Premium Verizon CDN profile. Defaults + to IgnoreQueryString. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the CDN Endpoint. Changing this forces a new resource to be + created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cdn.azure.upbound.io_frontdoorcustomdomains.yaml b/package/crds/cdn.azure.upbound.io_frontdoorcustomdomains.yaml index 61d32e6db..2207ec20f 100644 --- a/package/crds/cdn.azure.upbound.io_frontdoorcustomdomains.yaml +++ b/package/crds/cdn.azure.upbound.io_frontdoorcustomdomains.yaml @@ -675,3 +675,651 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontdoorCustomDomain is the Schema for the FrontdoorCustomDomains + API. Manages a Front Door (standard/premium) Custom Domain. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontdoorCustomDomainSpec defines the desired state of FrontdoorCustomDomain + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cdnFrontdoorProfileId: + description: The ID of the Front Door Profile. Changing this forces + a new Front Door Custom Domain to be created. + type: string + cdnFrontdoorProfileIdRef: + description: Reference to a FrontdoorProfile in cdn to populate + cdnFrontdoorProfileId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorProfileIdSelector: + description: Selector for a FrontdoorProfile in cdn to populate + cdnFrontdoorProfileId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dnsZoneId: + description: The ID of the Azure DNS Zone which should be used + for this Front Door Custom Domain. If you are using Azure to + host your DNS domains, you must delegate the domain provider's + domain name system (DNS) to an Azure DNS Zone. For more information, + see Delegate a domain to Azure DNS. Otherwise, if you're using + your own domain provider to handle your DNS, you must validate + the Front Door Custom Domain by creating the DNS TXT records + manually. + type: string + dnsZoneIdRef: + description: Reference to a DNSZone in network to populate dnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dnsZoneIdSelector: + description: Selector for a DNSZone in network to populate dnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostName: + description: The host name of the domain. The host_name field + must be the FQDN of your domain(e.g. contoso.fabrikam.com). + Changing this forces a new Front Door Custom Domain to be created. + type: string + tls: + description: A tls block as defined below. + properties: + cdnFrontdoorSecretId: + description: Resource ID of the Front Door Secret. + type: string + certificateType: + description: Defines the source of the SSL certificate. Possible + values include CustomerCertificate and ManagedCertificate. + Defaults to ManagedCertificate. + type: string + minimumTlsVersion: + description: TLS protocol version that will be used for Https. + Possible values include TLS10 and TLS12. Defaults to TLS12. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dnsZoneId: + description: The ID of the Azure DNS Zone which should be used + for this Front Door Custom Domain. If you are using Azure to + host your DNS domains, you must delegate the domain provider's + domain name system (DNS) to an Azure DNS Zone. For more information, + see Delegate a domain to Azure DNS. Otherwise, if you're using + your own domain provider to handle your DNS, you must validate + the Front Door Custom Domain by creating the DNS TXT records + manually. + type: string + dnsZoneIdRef: + description: Reference to a DNSZone in network to populate dnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dnsZoneIdSelector: + description: Selector for a DNSZone in network to populate dnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hostName: + description: The host name of the domain. The host_name field + must be the FQDN of your domain(e.g. contoso.fabrikam.com). + Changing this forces a new Front Door Custom Domain to be created. + type: string + tls: + description: A tls block as defined below. + properties: + cdnFrontdoorSecretId: + description: Resource ID of the Front Door Secret. + type: string + certificateType: + description: Defines the source of the SSL certificate. Possible + values include CustomerCertificate and ManagedCertificate. + Defaults to ManagedCertificate. + type: string + minimumTlsVersion: + description: TLS protocol version that will be used for Https. + Possible values include TLS10 and TLS12. Defaults to TLS12. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.hostName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.hostName) + || (has(self.initProvider) && has(self.initProvider.hostName))' + - message: spec.forProvider.tls is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tls) + || (has(self.initProvider) && has(self.initProvider.tls))' + status: + description: FrontdoorCustomDomainStatus defines the observed state of + FrontdoorCustomDomain. + properties: + atProvider: + properties: + cdnFrontdoorProfileId: + description: The ID of the Front Door Profile. Changing this forces + a new Front Door Custom Domain to be created. + type: string + dnsZoneId: + description: The ID of the Azure DNS Zone which should be used + for this Front Door Custom Domain. If you are using Azure to + host your DNS domains, you must delegate the domain provider's + domain name system (DNS) to an Azure DNS Zone. For more information, + see Delegate a domain to Azure DNS. Otherwise, if you're using + your own domain provider to handle your DNS, you must validate + the Front Door Custom Domain by creating the DNS TXT records + manually. + type: string + expirationDate: + description: The date time that the token expires. + type: string + hostName: + description: The host name of the domain. The host_name field + must be the FQDN of your domain(e.g. contoso.fabrikam.com). + Changing this forces a new Front Door Custom Domain to be created. + type: string + id: + description: The ID of the Front Door Custom Domain. + type: string + tls: + description: A tls block as defined below. + properties: + cdnFrontdoorSecretId: + description: Resource ID of the Front Door Secret. + type: string + certificateType: + description: Defines the source of the SSL certificate. Possible + values include CustomerCertificate and ManagedCertificate. + Defaults to ManagedCertificate. + type: string + minimumTlsVersion: + description: TLS protocol version that will be used for Https. + Possible values include TLS10 and TLS12. Defaults to TLS12. + type: string + type: object + validationToken: + description: Challenge used for DNS TXT record or file based validation. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cdn.azure.upbound.io_frontdoororigingroups.yaml b/package/crds/cdn.azure.upbound.io_frontdoororigingroups.yaml index 97d9e0a28..17067b269 100644 --- a/package/crds/cdn.azure.upbound.io_frontdoororigingroups.yaml +++ b/package/crds/cdn.azure.upbound.io_frontdoororigingroups.yaml @@ -588,3 +588,558 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontdoorOriginGroup is the Schema for the FrontdoorOriginGroups + API. Manages a Front Door (standard/premium) Origin Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontdoorOriginGroupSpec defines the desired state of FrontdoorOriginGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cdnFrontdoorProfileId: + description: The ID of the Front Door Profile within which this + Front Door Origin Group should exist. Changing this forces a + new Front Door Origin Group to be created. + type: string + cdnFrontdoorProfileIdRef: + description: Reference to a FrontdoorProfile in cdn to populate + cdnFrontdoorProfileId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorProfileIdSelector: + description: Selector for a FrontdoorProfile in cdn to populate + cdnFrontdoorProfileId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + healthProbe: + description: A health_probe block as defined below. + properties: + intervalInSeconds: + description: Specifies the number of seconds between health + probes. Possible values are between 5 and 31536000 seconds + (inclusive). + type: number + path: + description: Specifies the path relative to the origin that + is used to determine the health of the origin. Defaults + to /. + type: string + protocol: + description: Specifies the protocol to use for health probe. + Possible values are Http and Https. + type: string + requestType: + description: Specifies the type of health probe request that + is made. Possible values are GET and HEAD. Defaults to HEAD. + type: string + type: object + loadBalancing: + description: A load_balancing block as defined below. + properties: + additionalLatencyInMilliseconds: + description: Specifies the additional latency in milliseconds + for probes to fall into the lowest latency bucket. Possible + values are between 0 and 1000 milliseconds (inclusive). + Defaults to 50. + type: number + sampleSize: + description: Specifies the number of samples to consider for + load balancing decisions. Possible values are between 0 + and 255 (inclusive). Defaults to 4. + type: number + successfulSamplesRequired: + description: Specifies the number of samples within the sample + period that must succeed. Possible values are between 0 + and 255 (inclusive). Defaults to 3. + type: number + type: object + restoreTrafficTimeToHealedOrNewEndpointInMinutes: + description: Specifies the amount of time which should elapse + before shifting traffic to another endpoint when a healthy endpoint + becomes unhealthy or a new endpoint is added. Possible values + are between 0 and 50 minutes (inclusive). Default is 10 minutes. + type: number + sessionAffinityEnabled: + description: Specifies whether session affinity should be enabled + on this host. Defaults to true. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + healthProbe: + description: A health_probe block as defined below. + properties: + intervalInSeconds: + description: Specifies the number of seconds between health + probes. Possible values are between 5 and 31536000 seconds + (inclusive). + type: number + path: + description: Specifies the path relative to the origin that + is used to determine the health of the origin. Defaults + to /. + type: string + protocol: + description: Specifies the protocol to use for health probe. + Possible values are Http and Https. + type: string + requestType: + description: Specifies the type of health probe request that + is made. Possible values are GET and HEAD. Defaults to HEAD. + type: string + type: object + loadBalancing: + description: A load_balancing block as defined below. + properties: + additionalLatencyInMilliseconds: + description: Specifies the additional latency in milliseconds + for probes to fall into the lowest latency bucket. Possible + values are between 0 and 1000 milliseconds (inclusive). + Defaults to 50. + type: number + sampleSize: + description: Specifies the number of samples to consider for + load balancing decisions. Possible values are between 0 + and 255 (inclusive). Defaults to 4. + type: number + successfulSamplesRequired: + description: Specifies the number of samples within the sample + period that must succeed. Possible values are between 0 + and 255 (inclusive). Defaults to 3. + type: number + type: object + restoreTrafficTimeToHealedOrNewEndpointInMinutes: + description: Specifies the amount of time which should elapse + before shifting traffic to another endpoint when a healthy endpoint + becomes unhealthy or a new endpoint is added. Possible values + are between 0 and 50 minutes (inclusive). Default is 10 minutes. + type: number + sessionAffinityEnabled: + description: Specifies whether session affinity should be enabled + on this host. Defaults to true. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.loadBalancing is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.loadBalancing) + || (has(self.initProvider) && has(self.initProvider.loadBalancing))' + status: + description: FrontdoorOriginGroupStatus defines the observed state of + FrontdoorOriginGroup. + properties: + atProvider: + properties: + cdnFrontdoorProfileId: + description: The ID of the Front Door Profile within which this + Front Door Origin Group should exist. Changing this forces a + new Front Door Origin Group to be created. + type: string + healthProbe: + description: A health_probe block as defined below. + properties: + intervalInSeconds: + description: Specifies the number of seconds between health + probes. Possible values are between 5 and 31536000 seconds + (inclusive). + type: number + path: + description: Specifies the path relative to the origin that + is used to determine the health of the origin. Defaults + to /. + type: string + protocol: + description: Specifies the protocol to use for health probe. + Possible values are Http and Https. + type: string + requestType: + description: Specifies the type of health probe request that + is made. Possible values are GET and HEAD. Defaults to HEAD. + type: string + type: object + id: + description: The ID of the Front Door Origin Group. + type: string + loadBalancing: + description: A load_balancing block as defined below. + properties: + additionalLatencyInMilliseconds: + description: Specifies the additional latency in milliseconds + for probes to fall into the lowest latency bucket. Possible + values are between 0 and 1000 milliseconds (inclusive). + Defaults to 50. + type: number + sampleSize: + description: Specifies the number of samples to consider for + load balancing decisions. Possible values are between 0 + and 255 (inclusive). Defaults to 4. + type: number + successfulSamplesRequired: + description: Specifies the number of samples within the sample + period that must succeed. Possible values are between 0 + and 255 (inclusive). Defaults to 3. + type: number + type: object + restoreTrafficTimeToHealedOrNewEndpointInMinutes: + description: Specifies the amount of time which should elapse + before shifting traffic to another endpoint when a healthy endpoint + becomes unhealthy or a new endpoint is added. Possible values + are between 0 and 50 minutes (inclusive). Default is 10 minutes. + type: number + sessionAffinityEnabled: + description: Specifies whether session affinity should be enabled + on this host. Defaults to true. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cdn.azure.upbound.io_frontdoororigins.yaml b/package/crds/cdn.azure.upbound.io_frontdoororigins.yaml index fb16c1262..e08af8462 100644 --- a/package/crds/cdn.azure.upbound.io_frontdoororigins.yaml +++ b/package/crds/cdn.azure.upbound.io_frontdoororigins.yaml @@ -1218,3 +1218,1197 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontdoorOrigin is the Schema for the FrontdoorOrigins API. Manages + a Front Door (standard/premium) Origin. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontdoorOriginSpec defines the desired state of FrontdoorOrigin + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cdnFrontdoorOriginGroupId: + description: The ID of the Front Door Origin Group within which + this Front Door Origin should exist. Changing this forces a + new Front Door Origin to be created. + type: string + cdnFrontdoorOriginGroupIdRef: + description: Reference to a FrontdoorOriginGroup in cdn to populate + cdnFrontdoorOriginGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorOriginGroupIdSelector: + description: Selector for a FrontdoorOriginGroup in cdn to populate + cdnFrontdoorOriginGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateNameCheckEnabled: + description: Specifies whether certificate name checks are enabled + for this origin. + type: boolean + enabled: + description: Should the origin be enabled? Possible values are + true or false. Defaults to true. + type: boolean + healthProbesEnabled: + description: Should the origin be enabled? Possible values are + true or false. Defaults to true. + type: boolean + hostName: + description: The IPv4 address, IPv6 address or Domain name of + the Origin. + type: string + hostNameRef: + description: Reference to a Account in storage to populate hostName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + hostNameSelector: + description: Selector for a Account in storage to populate hostName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + httpPort: + description: The value of the HTTP port. Must be between 1 and + 65535. Defaults to 80. + type: number + httpsPort: + description: The value of the HTTPS port. Must be between 1 and + 65535. Defaults to 443. + type: number + originHostHeader: + description: The host header value (an IPv4 address, IPv6 address + or Domain name) which is sent to the origin with each request. + If unspecified the hostname from the request will be used. + type: string + originHostHeaderRef: + description: Reference to a Account in storage to populate originHostHeader. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originHostHeaderSelector: + description: Selector for a Account in storage to populate originHostHeader. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + priority: + description: Priority of origin in given origin group for load + balancing. Higher priorities will not be used for load balancing + if any lower priority origin is healthy. Must be between 1 and + 5 (inclusive). Defaults to 1. + type: number + privateLink: + description: A private_link block as defined below. + properties: + location: + description: Specifies the location where the Private Link + resource should exist. Changing this forces a new resource + to be created. + type: string + locationRef: + description: Reference to a Account in storage to populate + location. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + locationSelector: + description: Selector for a Account in storage to populate + location. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + privateLinkTargetId: + description: The ID of the Azure Resource to connect to via + the Private Link. + type: string + privateLinkTargetIdRef: + description: Reference to a Account in storage to populate + privateLinkTargetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateLinkTargetIdSelector: + description: Selector for a Account in storage to populate + privateLinkTargetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + requestMessage: + description: Specifies the request message that will be submitted + to the private_link_target_id when requesting the private + link endpoint connection. Values must be between 1 and 140 + characters in length. Defaults to Access request for CDN + FrontDoor Private Link Origin. + type: string + targetType: + description: Specifies the type of target for this Private + Link Endpoint. Possible values are blob, blob_secondary, + web and sites. + type: string + type: object + weight: + description: The weight of the origin in a given origin group + for load balancing. Must be between 1 and 1000. Defaults to + 500. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificateNameCheckEnabled: + description: Specifies whether certificate name checks are enabled + for this origin. + type: boolean + enabled: + description: Should the origin be enabled? Possible values are + true or false. Defaults to true. + type: boolean + healthProbesEnabled: + description: Should the origin be enabled? Possible values are + true or false. Defaults to true. + type: boolean + hostName: + description: The IPv4 address, IPv6 address or Domain name of + the Origin. + type: string + hostNameRef: + description: Reference to a Account in storage to populate hostName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + hostNameSelector: + description: Selector for a Account in storage to populate hostName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + httpPort: + description: The value of the HTTP port. Must be between 1 and + 65535. Defaults to 80. + type: number + httpsPort: + description: The value of the HTTPS port. Must be between 1 and + 65535. Defaults to 443. + type: number + originHostHeader: + description: The host header value (an IPv4 address, IPv6 address + or Domain name) which is sent to the origin with each request. + If unspecified the hostname from the request will be used. + type: string + originHostHeaderRef: + description: Reference to a Account in storage to populate originHostHeader. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + originHostHeaderSelector: + description: Selector for a Account in storage to populate originHostHeader. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + priority: + description: Priority of origin in given origin group for load + balancing. Higher priorities will not be used for load balancing + if any lower priority origin is healthy. Must be between 1 and + 5 (inclusive). Defaults to 1. + type: number + privateLink: + description: A private_link block as defined below. + properties: + location: + description: Specifies the location where the Private Link + resource should exist. Changing this forces a new resource + to be created. + type: string + locationRef: + description: Reference to a Account in storage to populate + location. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + locationSelector: + description: Selector for a Account in storage to populate + location. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + privateLinkTargetId: + description: The ID of the Azure Resource to connect to via + the Private Link. + type: string + privateLinkTargetIdRef: + description: Reference to a Account in storage to populate + privateLinkTargetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateLinkTargetIdSelector: + description: Selector for a Account in storage to populate + privateLinkTargetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + requestMessage: + description: Specifies the request message that will be submitted + to the private_link_target_id when requesting the private + link endpoint connection. Values must be between 1 and 140 + characters in length. Defaults to Access request for CDN + FrontDoor Private Link Origin. + type: string + targetType: + description: Specifies the type of target for this Private + Link Endpoint. Possible values are blob, blob_secondary, + web and sites. + type: string + type: object + weight: + description: The weight of the origin in a given origin group + for load balancing. Must be between 1 and 1000. Defaults to + 500. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.certificateNameCheckEnabled is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.certificateNameCheckEnabled) + || (has(self.initProvider) && has(self.initProvider.certificateNameCheckEnabled))' + status: + description: FrontdoorOriginStatus defines the observed state of FrontdoorOrigin. + properties: + atProvider: + properties: + cdnFrontdoorOriginGroupId: + description: The ID of the Front Door Origin Group within which + this Front Door Origin should exist. Changing this forces a + new Front Door Origin to be created. + type: string + certificateNameCheckEnabled: + description: Specifies whether certificate name checks are enabled + for this origin. + type: boolean + enabled: + description: Should the origin be enabled? Possible values are + true or false. Defaults to true. + type: boolean + healthProbesEnabled: + description: Should the origin be enabled? Possible values are + true or false. Defaults to true. + type: boolean + hostName: + description: The IPv4 address, IPv6 address or Domain name of + the Origin. + type: string + httpPort: + description: The value of the HTTP port. Must be between 1 and + 65535. Defaults to 80. + type: number + httpsPort: + description: The value of the HTTPS port. Must be between 1 and + 65535. Defaults to 443. + type: number + id: + description: The ID of the Front Door Origin. + type: string + originHostHeader: + description: The host header value (an IPv4 address, IPv6 address + or Domain name) which is sent to the origin with each request. + If unspecified the hostname from the request will be used. + type: string + priority: + description: Priority of origin in given origin group for load + balancing. Higher priorities will not be used for load balancing + if any lower priority origin is healthy. Must be between 1 and + 5 (inclusive). Defaults to 1. + type: number + privateLink: + description: A private_link block as defined below. + properties: + location: + description: Specifies the location where the Private Link + resource should exist. Changing this forces a new resource + to be created. + type: string + privateLinkTargetId: + description: The ID of the Azure Resource to connect to via + the Private Link. + type: string + requestMessage: + description: Specifies the request message that will be submitted + to the private_link_target_id when requesting the private + link endpoint connection. Values must be between 1 and 140 + characters in length. Defaults to Access request for CDN + FrontDoor Private Link Origin. + type: string + targetType: + description: Specifies the type of target for this Private + Link Endpoint. Possible values are blob, blob_secondary, + web and sites. + type: string + type: object + weight: + description: The weight of the origin in a given origin group + for load balancing. Must be between 1 and 1000. Defaults to + 500. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cdn.azure.upbound.io_frontdoorroutes.yaml b/package/crds/cdn.azure.upbound.io_frontdoorroutes.yaml index cb90cbfee..9fbd50456 100644 --- a/package/crds/cdn.azure.upbound.io_frontdoorroutes.yaml +++ b/package/crds/cdn.azure.upbound.io_frontdoorroutes.yaml @@ -1337,3 +1337,1316 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontdoorRoute is the Schema for the FrontdoorRoutes API. Manages + a Front Door (standard/premium) Route. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontdoorRouteSpec defines the desired state of FrontdoorRoute + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cache: + description: A cache block as defined below. + properties: + compressionEnabled: + description: Is content compression enabled? Possible values + are true or false. Defaults to false. + type: boolean + contentTypesToCompress: + description: A list of one or more Content types (formerly + known as MIME types) to compress. Possible values include + application/eot, application/font, application/font-sfnt, + application/javascript, application/json, application/opentype, + application/otf, application/pkcs7-mime, application/truetype, + application/ttf, application/vnd.ms-fontobject, application/xhtml+xml, + application/xml, application/xml+rss, application/x-font-opentype, + application/x-font-truetype, application/x-font-ttf, application/x-httpd-cgi, + application/x-mpegurl, application/x-opentype, application/x-otf, + application/x-perl, application/x-ttf, application/x-javascript, + font/eot, font/ttf, font/otf, font/opentype, image/svg+xml, + text/css, text/csv, text/html, text/javascript, text/js, + text/plain, text/richtext, text/tab-separated-values, text/xml, + text/x-script, text/x-component or text/x-java-source. + items: + type: string + type: array + queryStringCachingBehavior: + description: Defines how the Front Door Route will cache requests + that include query strings. Possible values include IgnoreQueryString, + IgnoreSpecifiedQueryStrings, IncludeSpecifiedQueryStrings + or UseQueryString. Defaults to IgnoreQueryString. + type: string + queryStrings: + description: Query strings to include or ignore. + items: + type: string + type: array + type: object + cdnFrontdoorCustomDomainIds: + description: The IDs of the Front Door Custom Domains which are + associated with this Front Door Route. + items: + type: string + type: array + x-kubernetes-list-type: set + cdnFrontdoorCustomDomainIdsRefs: + description: References to FrontdoorCustomDomain in cdn to populate + cdnFrontdoorCustomDomainIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + cdnFrontdoorCustomDomainIdsSelector: + description: Selector for a list of FrontdoorCustomDomain in cdn + to populate cdnFrontdoorCustomDomainIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cdnFrontdoorEndpointId: + description: The resource ID of the Front Door Endpoint where + this Front Door Route should exist. Changing this forces a new + Front Door Route to be created. + type: string + cdnFrontdoorEndpointIdRef: + description: Reference to a FrontdoorEndpoint in cdn to populate + cdnFrontdoorEndpointId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorEndpointIdSelector: + description: Selector for a FrontdoorEndpoint in cdn to populate + cdnFrontdoorEndpointId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cdnFrontdoorOriginGroupId: + description: The resource ID of the Front Door Origin Group where + this Front Door Route should be created. + type: string + cdnFrontdoorOriginGroupIdRef: + description: Reference to a FrontdoorOriginGroup in cdn to populate + cdnFrontdoorOriginGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorOriginGroupIdSelector: + description: Selector for a FrontdoorOriginGroup in cdn to populate + cdnFrontdoorOriginGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cdnFrontdoorOriginIds: + description: One or more Front Door Origin resource IDs that this + Front Door Route will link to. + items: + type: string + type: array + cdnFrontdoorOriginIdsRefs: + description: References to FrontdoorOrigin in cdn to populate + cdnFrontdoorOriginIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + cdnFrontdoorOriginIdsSelector: + description: Selector for a list of FrontdoorOrigin in cdn to + populate cdnFrontdoorOriginIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cdnFrontdoorOriginPath: + description: A directory path on the Front Door Origin that can + be used to retrieve content (e.g. contoso.cloudapp.net/originpath). + type: string + cdnFrontdoorRuleSetIds: + description: A list of the Front Door Rule Set IDs which should + be assigned to this Front Door Route. + items: + type: string + type: array + x-kubernetes-list-type: set + cdnFrontdoorRuleSetIdsRefs: + description: References to FrontdoorRuleSet in cdn to populate + cdnFrontdoorRuleSetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + cdnFrontdoorRuleSetIdsSelector: + description: Selector for a list of FrontdoorRuleSet in cdn to + populate cdnFrontdoorRuleSetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Is this Front Door Route enabled? Possible values + are true or false. Defaults to true. + type: boolean + forwardingProtocol: + description: The Protocol that will be use when forwarding traffic + to backends. Possible values are HttpOnly, HttpsOnly or MatchRequest. + Defaults to MatchRequest. + type: string + httpsRedirectEnabled: + description: Automatically redirect HTTP traffic to HTTPS traffic? + Possible values are true or false. Defaults to true. + type: boolean + linkToDefaultDomain: + description: Should this Front Door Route be linked to the default + endpoint? Possible values include true or false. Defaults to + true. + type: boolean + patternsToMatch: + description: The route patterns of the rule. + items: + type: string + type: array + supportedProtocols: + description: One or more Protocols supported by this Front Door + Route. Possible values are Http or Https. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cache: + description: A cache block as defined below. + properties: + compressionEnabled: + description: Is content compression enabled? Possible values + are true or false. Defaults to false. + type: boolean + contentTypesToCompress: + description: A list of one or more Content types (formerly + known as MIME types) to compress. Possible values include + application/eot, application/font, application/font-sfnt, + application/javascript, application/json, application/opentype, + application/otf, application/pkcs7-mime, application/truetype, + application/ttf, application/vnd.ms-fontobject, application/xhtml+xml, + application/xml, application/xml+rss, application/x-font-opentype, + application/x-font-truetype, application/x-font-ttf, application/x-httpd-cgi, + application/x-mpegurl, application/x-opentype, application/x-otf, + application/x-perl, application/x-ttf, application/x-javascript, + font/eot, font/ttf, font/otf, font/opentype, image/svg+xml, + text/css, text/csv, text/html, text/javascript, text/js, + text/plain, text/richtext, text/tab-separated-values, text/xml, + text/x-script, text/x-component or text/x-java-source. + items: + type: string + type: array + queryStringCachingBehavior: + description: Defines how the Front Door Route will cache requests + that include query strings. Possible values include IgnoreQueryString, + IgnoreSpecifiedQueryStrings, IncludeSpecifiedQueryStrings + or UseQueryString. Defaults to IgnoreQueryString. + type: string + queryStrings: + description: Query strings to include or ignore. + items: + type: string + type: array + type: object + cdnFrontdoorCustomDomainIds: + description: The IDs of the Front Door Custom Domains which are + associated with this Front Door Route. + items: + type: string + type: array + x-kubernetes-list-type: set + cdnFrontdoorCustomDomainIdsRefs: + description: References to FrontdoorCustomDomain in cdn to populate + cdnFrontdoorCustomDomainIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + cdnFrontdoorCustomDomainIdsSelector: + description: Selector for a list of FrontdoorCustomDomain in cdn + to populate cdnFrontdoorCustomDomainIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cdnFrontdoorOriginGroupId: + description: The resource ID of the Front Door Origin Group where + this Front Door Route should be created. + type: string + cdnFrontdoorOriginGroupIdRef: + description: Reference to a FrontdoorOriginGroup in cdn to populate + cdnFrontdoorOriginGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorOriginGroupIdSelector: + description: Selector for a FrontdoorOriginGroup in cdn to populate + cdnFrontdoorOriginGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cdnFrontdoorOriginIds: + description: One or more Front Door Origin resource IDs that this + Front Door Route will link to. + items: + type: string + type: array + cdnFrontdoorOriginIdsRefs: + description: References to FrontdoorOrigin in cdn to populate + cdnFrontdoorOriginIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + cdnFrontdoorOriginIdsSelector: + description: Selector for a list of FrontdoorOrigin in cdn to + populate cdnFrontdoorOriginIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + cdnFrontdoorOriginPath: + description: A directory path on the Front Door Origin that can + be used to retrieve content (e.g. contoso.cloudapp.net/originpath). + type: string + cdnFrontdoorRuleSetIds: + description: A list of the Front Door Rule Set IDs which should + be assigned to this Front Door Route. + items: + type: string + type: array + x-kubernetes-list-type: set + cdnFrontdoorRuleSetIdsRefs: + description: References to FrontdoorRuleSet in cdn to populate + cdnFrontdoorRuleSetIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + cdnFrontdoorRuleSetIdsSelector: + description: Selector for a list of FrontdoorRuleSet in cdn to + populate cdnFrontdoorRuleSetIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + enabled: + description: Is this Front Door Route enabled? Possible values + are true or false. Defaults to true. + type: boolean + forwardingProtocol: + description: The Protocol that will be use when forwarding traffic + to backends. Possible values are HttpOnly, HttpsOnly or MatchRequest. + Defaults to MatchRequest. + type: string + httpsRedirectEnabled: + description: Automatically redirect HTTP traffic to HTTPS traffic? + Possible values are true or false. Defaults to true. + type: boolean + linkToDefaultDomain: + description: Should this Front Door Route be linked to the default + endpoint? Possible values include true or false. Defaults to + true. + type: boolean + patternsToMatch: + description: The route patterns of the rule. + items: + type: string + type: array + supportedProtocols: + description: One or more Protocols supported by this Front Door + Route. Possible values are Http or Https. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.patternsToMatch is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.patternsToMatch) + || (has(self.initProvider) && has(self.initProvider.patternsToMatch))' + - message: spec.forProvider.supportedProtocols is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.supportedProtocols) + || (has(self.initProvider) && has(self.initProvider.supportedProtocols))' + status: + description: FrontdoorRouteStatus defines the observed state of FrontdoorRoute. + properties: + atProvider: + properties: + cache: + description: A cache block as defined below. + properties: + compressionEnabled: + description: Is content compression enabled? Possible values + are true or false. Defaults to false. + type: boolean + contentTypesToCompress: + description: A list of one or more Content types (formerly + known as MIME types) to compress. Possible values include + application/eot, application/font, application/font-sfnt, + application/javascript, application/json, application/opentype, + application/otf, application/pkcs7-mime, application/truetype, + application/ttf, application/vnd.ms-fontobject, application/xhtml+xml, + application/xml, application/xml+rss, application/x-font-opentype, + application/x-font-truetype, application/x-font-ttf, application/x-httpd-cgi, + application/x-mpegurl, application/x-opentype, application/x-otf, + application/x-perl, application/x-ttf, application/x-javascript, + font/eot, font/ttf, font/otf, font/opentype, image/svg+xml, + text/css, text/csv, text/html, text/javascript, text/js, + text/plain, text/richtext, text/tab-separated-values, text/xml, + text/x-script, text/x-component or text/x-java-source. + items: + type: string + type: array + queryStringCachingBehavior: + description: Defines how the Front Door Route will cache requests + that include query strings. Possible values include IgnoreQueryString, + IgnoreSpecifiedQueryStrings, IncludeSpecifiedQueryStrings + or UseQueryString. Defaults to IgnoreQueryString. + type: string + queryStrings: + description: Query strings to include or ignore. + items: + type: string + type: array + type: object + cdnFrontdoorCustomDomainIds: + description: The IDs of the Front Door Custom Domains which are + associated with this Front Door Route. + items: + type: string + type: array + x-kubernetes-list-type: set + cdnFrontdoorEndpointId: + description: The resource ID of the Front Door Endpoint where + this Front Door Route should exist. Changing this forces a new + Front Door Route to be created. + type: string + cdnFrontdoorOriginGroupId: + description: The resource ID of the Front Door Origin Group where + this Front Door Route should be created. + type: string + cdnFrontdoorOriginIds: + description: One or more Front Door Origin resource IDs that this + Front Door Route will link to. + items: + type: string + type: array + cdnFrontdoorOriginPath: + description: A directory path on the Front Door Origin that can + be used to retrieve content (e.g. contoso.cloudapp.net/originpath). + type: string + cdnFrontdoorRuleSetIds: + description: A list of the Front Door Rule Set IDs which should + be assigned to this Front Door Route. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Is this Front Door Route enabled? Possible values + are true or false. Defaults to true. + type: boolean + forwardingProtocol: + description: The Protocol that will be use when forwarding traffic + to backends. Possible values are HttpOnly, HttpsOnly or MatchRequest. + Defaults to MatchRequest. + type: string + httpsRedirectEnabled: + description: Automatically redirect HTTP traffic to HTTPS traffic? + Possible values are true or false. Defaults to true. + type: boolean + id: + description: The ID of the Front Door Route. + type: string + linkToDefaultDomain: + description: Should this Front Door Route be linked to the default + endpoint? Possible values include true or false. Defaults to + true. + type: boolean + patternsToMatch: + description: The route patterns of the rule. + items: + type: string + type: array + supportedProtocols: + description: One or more Protocols supported by this Front Door + Route. Possible values are Http or Https. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cdn.azure.upbound.io_frontdoorrules.yaml b/package/crds/cdn.azure.upbound.io_frontdoorrules.yaml index 324f7f69b..39e717911 100644 --- a/package/crds/cdn.azure.upbound.io_frontdoorrules.yaml +++ b/package/crds/cdn.azure.upbound.io_frontdoorrules.yaml @@ -3081,3 +3081,2952 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontdoorRule is the Schema for the FrontdoorRules API. Manages + a Front Door (standard/premium) Rule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontdoorRuleSpec defines the desired state of FrontdoorRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + actions: + description: An actions block as defined below. + properties: + requestHeaderAction: + description: A request_header_action block as defined below. + items: + properties: + headerAction: + description: The action to be taken on the specified + header_name. Possible values include Append, Overwrite + or Delete. + type: string + headerName: + description: The name of the header to modify. + type: string + value: + description: The value to append or overwrite. + type: string + type: object + type: array + responseHeaderAction: + description: A response_header_action block as defined below. + items: + properties: + headerAction: + description: The action to be taken on the specified + header_name. Possible values include Append, Overwrite + or Delete. + type: string + headerName: + description: The name of the header to modify. + type: string + value: + description: The value to append or overwrite. + type: string + type: object + type: array + routeConfigurationOverrideAction: + description: A route_configuration_override_action block as + defined below. + properties: + cacheBehavior: + description: HonorOrigin the Front Door will always honor + origin response header directive. If the origin directive + is missing, Front Door will cache contents anywhere + from 1 to 3 days. OverrideAlways the TTL value returned + from your Front Door Origin is overwritten with the + value specified in the action. This behavior will only + be applied if the response is cacheable. OverrideIfOriginMissing + if no TTL value gets returned from your Front Door Origin, + the rule sets the TTL to the value specified in the + action. This behavior will only be applied if the response + is cacheable. Disabled the Front Door will not cache + the response contents, irrespective of Front Door Origin + response directives. Possible values include HonorOrigin, + OverrideAlways, OverrideIfOriginMissing or Disabled. + type: string + cacheDuration: + description: When Cache behavior is set to Override or + SetIfMissing, this field specifies the cache duration + to use. The maximum duration is 366 days specified in + the d.HH:MM:SS format(e.g. 365.23:59:59). If the desired + maximum cache duration is less than 1 day then the maximum + cache duration should be specified in the HH:MM:SS format(e.g. + 23:59:59). + type: string + cdnFrontdoorOriginGroupId: + description: The Front Door Origin Group resource ID that + the request should be routed to. This overrides the + configuration specified in the Front Door Endpoint route. + type: string + cdnFrontdoorOriginGroupIdRef: + description: Reference to a FrontdoorOriginGroup in cdn + to populate cdnFrontdoorOriginGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorOriginGroupIdSelector: + description: Selector for a FrontdoorOriginGroup in cdn + to populate cdnFrontdoorOriginGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + compressionEnabled: + description: Should the Front Door dynamically compress + the content? Possible values include true or false. + type: boolean + forwardingProtocol: + description: The forwarding protocol the request will + be redirected as. This overrides the configuration specified + in the route to be associated with. Possible values + include MatchRequest, HttpOnly or HttpsOnly. + type: string + queryStringCachingBehavior: + description: IncludeSpecifiedQueryStrings query strings + specified in the query_string_parameters field get included + when the cache key gets generated. UseQueryString cache + every unique URL, each unique URL will have its own + cache key. IgnoreSpecifiedQueryStrings query strings + specified in the query_string_parameters field get excluded + when the cache key gets generated. IgnoreQueryString + query strings aren't considered when the cache key gets + generated. Possible values include IgnoreQueryString, + UseQueryString, IgnoreSpecifiedQueryStrings or IncludeSpecifiedQueryStrings. + type: string + queryStringParameters: + description: A list of query string parameter names. + items: + type: string + type: array + type: object + urlRedirectAction: + description: A url_redirect_action block as defined below. + You may not have a url_redirect_action and a url_rewrite_action + defined in the same actions block. + properties: + destinationFragment: + description: The fragment to use in the redirect. The + value must be a string between 0 and 1024 characters + in length, leave blank to preserve the incoming fragment. + Defaults to "". + type: string + destinationHostname: + description: The host name you want the request to be + redirected to. The value must be a string between 0 + and 2048 characters in length, leave blank to preserve + the incoming host. + type: string + destinationPath: + description: The path to use in the redirect. The value + must be a string and include the leading /, leave blank + to preserve the incoming path. Defaults to "". + type: string + queryString: + description: The query string used in the redirect URL. + The value must be in the = or ={action_server_variable} + format and must not include the leading ?, leave blank + to preserve the incoming query string. Maximum allowed + length for this field is 2048 characters. Defaults to + "". + type: string + redirectProtocol: + description: The protocol the request will be redirected + as. Possible values include MatchRequest, Http or Https. + Defaults to MatchRequest. + type: string + redirectType: + description: The response type to return to the requestor. + Possible values include Moved, Found , TemporaryRedirect + or PermanentRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + You may not have a url_rewrite_action and a url_redirect_action + defined in the same actions block. + properties: + destination: + description: The destination path to use in the rewrite. + The destination path overwrites the source pattern. + type: string + preserveUnmatchedPath: + description: Append the remaining path after the source + pattern to the new destination path? Possible values + true or false. Defaults to false. + type: boolean + sourcePattern: + description: The source pattern in the URL path to replace. + This uses prefix-based matching. For example, to match + all URL paths use a forward slash "/" as the source + pattern value. + type: string + type: object + type: object + behaviorOnMatch: + description: If this rule is a match should the rules engine continue + processing the remaining rules or stop? Possible values are + Continue and Stop. Defaults to Continue. + type: string + cdnFrontdoorRuleSetId: + description: The resource ID of the Front Door Rule Set for this + Front Door Rule. Changing this forces a new Front Door Rule + to be created. + type: string + cdnFrontdoorRuleSetIdRef: + description: Reference to a FrontdoorRuleSet in cdn to populate + cdnFrontdoorRuleSetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorRuleSetIdSelector: + description: Selector for a FrontdoorRuleSet in cdn to populate + cdnFrontdoorRuleSetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + conditions: + description: A conditions block as defined below. + properties: + clientPortCondition: + description: A client_port_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + cookiesCondition: + description: A cookies_condition block as defined below. + items: + properties: + cookieName: + description: A string value representing the name of + the cookie. + type: string + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + hostNameCondition: + description: A host_name_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + httpVersionCondition: + description: A http_version_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + isDeviceCondition: + description: A is_device_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + postArgsCondition: + description: A post_args_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + postArgsName: + description: A string value representing the name of + the POST argument. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + queryStringCondition: + description: A query_string_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + remoteAddressCondition: + description: A remote_address_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestBodyCondition: + description: A request_body_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + requestHeaderCondition: + description: A request_header_condition block as defined below. + items: + properties: + headerName: + description: The name of the header to modify. + type: string + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + requestMethodCondition: + description: A request_method_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestSchemeCondition: + description: A request_scheme_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestUriCondition: + description: A request_uri_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + serverPortCondition: + description: A server_port_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + socketAddressCondition: + description: A socket_address_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + sslProtocolCondition: + description: A ssl_protocol_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + urlFileExtensionCondition: + description: A url_file_extension_condition block as defined + below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlFilenameCondition: + description: A url_filename_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlPathCondition: + description: A url_path_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + order: + description: The order in which the rules will be applied for + the Front Door Endpoint. The order value should be sequential + and begin at 1(e.g. 1, 2, 3...). A Front Door Rule with a lesser + order value will be applied before a rule with a greater order + value. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + actions: + description: An actions block as defined below. + properties: + requestHeaderAction: + description: A request_header_action block as defined below. + items: + properties: + headerAction: + description: The action to be taken on the specified + header_name. Possible values include Append, Overwrite + or Delete. + type: string + headerName: + description: The name of the header to modify. + type: string + value: + description: The value to append or overwrite. + type: string + type: object + type: array + responseHeaderAction: + description: A response_header_action block as defined below. + items: + properties: + headerAction: + description: The action to be taken on the specified + header_name. Possible values include Append, Overwrite + or Delete. + type: string + headerName: + description: The name of the header to modify. + type: string + value: + description: The value to append or overwrite. + type: string + type: object + type: array + routeConfigurationOverrideAction: + description: A route_configuration_override_action block as + defined below. + properties: + cacheBehavior: + description: HonorOrigin the Front Door will always honor + origin response header directive. If the origin directive + is missing, Front Door will cache contents anywhere + from 1 to 3 days. OverrideAlways the TTL value returned + from your Front Door Origin is overwritten with the + value specified in the action. This behavior will only + be applied if the response is cacheable. OverrideIfOriginMissing + if no TTL value gets returned from your Front Door Origin, + the rule sets the TTL to the value specified in the + action. This behavior will only be applied if the response + is cacheable. Disabled the Front Door will not cache + the response contents, irrespective of Front Door Origin + response directives. Possible values include HonorOrigin, + OverrideAlways, OverrideIfOriginMissing or Disabled. + type: string + cacheDuration: + description: When Cache behavior is set to Override or + SetIfMissing, this field specifies the cache duration + to use. The maximum duration is 366 days specified in + the d.HH:MM:SS format(e.g. 365.23:59:59). If the desired + maximum cache duration is less than 1 day then the maximum + cache duration should be specified in the HH:MM:SS format(e.g. + 23:59:59). + type: string + cdnFrontdoorOriginGroupId: + description: The Front Door Origin Group resource ID that + the request should be routed to. This overrides the + configuration specified in the Front Door Endpoint route. + type: string + cdnFrontdoorOriginGroupIdRef: + description: Reference to a FrontdoorOriginGroup in cdn + to populate cdnFrontdoorOriginGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorOriginGroupIdSelector: + description: Selector for a FrontdoorOriginGroup in cdn + to populate cdnFrontdoorOriginGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + compressionEnabled: + description: Should the Front Door dynamically compress + the content? Possible values include true or false. + type: boolean + forwardingProtocol: + description: The forwarding protocol the request will + be redirected as. This overrides the configuration specified + in the route to be associated with. Possible values + include MatchRequest, HttpOnly or HttpsOnly. + type: string + queryStringCachingBehavior: + description: IncludeSpecifiedQueryStrings query strings + specified in the query_string_parameters field get included + when the cache key gets generated. UseQueryString cache + every unique URL, each unique URL will have its own + cache key. IgnoreSpecifiedQueryStrings query strings + specified in the query_string_parameters field get excluded + when the cache key gets generated. IgnoreQueryString + query strings aren't considered when the cache key gets + generated. Possible values include IgnoreQueryString, + UseQueryString, IgnoreSpecifiedQueryStrings or IncludeSpecifiedQueryStrings. + type: string + queryStringParameters: + description: A list of query string parameter names. + items: + type: string + type: array + type: object + urlRedirectAction: + description: A url_redirect_action block as defined below. + You may not have a url_redirect_action and a url_rewrite_action + defined in the same actions block. + properties: + destinationFragment: + description: The fragment to use in the redirect. The + value must be a string between 0 and 1024 characters + in length, leave blank to preserve the incoming fragment. + Defaults to "". + type: string + destinationHostname: + description: The host name you want the request to be + redirected to. The value must be a string between 0 + and 2048 characters in length, leave blank to preserve + the incoming host. + type: string + destinationPath: + description: The path to use in the redirect. The value + must be a string and include the leading /, leave blank + to preserve the incoming path. Defaults to "". + type: string + queryString: + description: The query string used in the redirect URL. + The value must be in the = or ={action_server_variable} + format and must not include the leading ?, leave blank + to preserve the incoming query string. Maximum allowed + length for this field is 2048 characters. Defaults to + "". + type: string + redirectProtocol: + description: The protocol the request will be redirected + as. Possible values include MatchRequest, Http or Https. + Defaults to MatchRequest. + type: string + redirectType: + description: The response type to return to the requestor. + Possible values include Moved, Found , TemporaryRedirect + or PermanentRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + You may not have a url_rewrite_action and a url_redirect_action + defined in the same actions block. + properties: + destination: + description: The destination path to use in the rewrite. + The destination path overwrites the source pattern. + type: string + preserveUnmatchedPath: + description: Append the remaining path after the source + pattern to the new destination path? Possible values + true or false. Defaults to false. + type: boolean + sourcePattern: + description: The source pattern in the URL path to replace. + This uses prefix-based matching. For example, to match + all URL paths use a forward slash "/" as the source + pattern value. + type: string + type: object + type: object + behaviorOnMatch: + description: If this rule is a match should the rules engine continue + processing the remaining rules or stop? Possible values are + Continue and Stop. Defaults to Continue. + type: string + conditions: + description: A conditions block as defined below. + properties: + clientPortCondition: + description: A client_port_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + cookiesCondition: + description: A cookies_condition block as defined below. + items: + properties: + cookieName: + description: A string value representing the name of + the cookie. + type: string + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + hostNameCondition: + description: A host_name_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + httpVersionCondition: + description: A http_version_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + isDeviceCondition: + description: A is_device_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + postArgsCondition: + description: A post_args_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + postArgsName: + description: A string value representing the name of + the POST argument. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + queryStringCondition: + description: A query_string_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + remoteAddressCondition: + description: A remote_address_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestBodyCondition: + description: A request_body_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + requestHeaderCondition: + description: A request_header_condition block as defined below. + items: + properties: + headerName: + description: The name of the header to modify. + type: string + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + requestMethodCondition: + description: A request_method_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestSchemeCondition: + description: A request_scheme_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestUriCondition: + description: A request_uri_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + serverPortCondition: + description: A server_port_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + socketAddressCondition: + description: A socket_address_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + sslProtocolCondition: + description: A ssl_protocol_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + urlFileExtensionCondition: + description: A url_file_extension_condition block as defined + below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlFilenameCondition: + description: A url_filename_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlPathCondition: + description: A url_path_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + order: + description: The order in which the rules will be applied for + the Front Door Endpoint. The order value should be sequential + and begin at 1(e.g. 1, 2, 3...). A Front Door Rule with a lesser + order value will be applied before a rule with a greater order + value. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.actions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.actions) + || (has(self.initProvider) && has(self.initProvider.actions))' + - message: spec.forProvider.order is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.order) + || (has(self.initProvider) && has(self.initProvider.order))' + status: + description: FrontdoorRuleStatus defines the observed state of FrontdoorRule. + properties: + atProvider: + properties: + actions: + description: An actions block as defined below. + properties: + requestHeaderAction: + description: A request_header_action block as defined below. + items: + properties: + headerAction: + description: The action to be taken on the specified + header_name. Possible values include Append, Overwrite + or Delete. + type: string + headerName: + description: The name of the header to modify. + type: string + value: + description: The value to append or overwrite. + type: string + type: object + type: array + responseHeaderAction: + description: A response_header_action block as defined below. + items: + properties: + headerAction: + description: The action to be taken on the specified + header_name. Possible values include Append, Overwrite + or Delete. + type: string + headerName: + description: The name of the header to modify. + type: string + value: + description: The value to append or overwrite. + type: string + type: object + type: array + routeConfigurationOverrideAction: + description: A route_configuration_override_action block as + defined below. + properties: + cacheBehavior: + description: HonorOrigin the Front Door will always honor + origin response header directive. If the origin directive + is missing, Front Door will cache contents anywhere + from 1 to 3 days. OverrideAlways the TTL value returned + from your Front Door Origin is overwritten with the + value specified in the action. This behavior will only + be applied if the response is cacheable. OverrideIfOriginMissing + if no TTL value gets returned from your Front Door Origin, + the rule sets the TTL to the value specified in the + action. This behavior will only be applied if the response + is cacheable. Disabled the Front Door will not cache + the response contents, irrespective of Front Door Origin + response directives. Possible values include HonorOrigin, + OverrideAlways, OverrideIfOriginMissing or Disabled. + type: string + cacheDuration: + description: When Cache behavior is set to Override or + SetIfMissing, this field specifies the cache duration + to use. The maximum duration is 366 days specified in + the d.HH:MM:SS format(e.g. 365.23:59:59). If the desired + maximum cache duration is less than 1 day then the maximum + cache duration should be specified in the HH:MM:SS format(e.g. + 23:59:59). + type: string + cdnFrontdoorOriginGroupId: + description: The Front Door Origin Group resource ID that + the request should be routed to. This overrides the + configuration specified in the Front Door Endpoint route. + type: string + compressionEnabled: + description: Should the Front Door dynamically compress + the content? Possible values include true or false. + type: boolean + forwardingProtocol: + description: The forwarding protocol the request will + be redirected as. This overrides the configuration specified + in the route to be associated with. Possible values + include MatchRequest, HttpOnly or HttpsOnly. + type: string + queryStringCachingBehavior: + description: IncludeSpecifiedQueryStrings query strings + specified in the query_string_parameters field get included + when the cache key gets generated. UseQueryString cache + every unique URL, each unique URL will have its own + cache key. IgnoreSpecifiedQueryStrings query strings + specified in the query_string_parameters field get excluded + when the cache key gets generated. IgnoreQueryString + query strings aren't considered when the cache key gets + generated. Possible values include IgnoreQueryString, + UseQueryString, IgnoreSpecifiedQueryStrings or IncludeSpecifiedQueryStrings. + type: string + queryStringParameters: + description: A list of query string parameter names. + items: + type: string + type: array + type: object + urlRedirectAction: + description: A url_redirect_action block as defined below. + You may not have a url_redirect_action and a url_rewrite_action + defined in the same actions block. + properties: + destinationFragment: + description: The fragment to use in the redirect. The + value must be a string between 0 and 1024 characters + in length, leave blank to preserve the incoming fragment. + Defaults to "". + type: string + destinationHostname: + description: The host name you want the request to be + redirected to. The value must be a string between 0 + and 2048 characters in length, leave blank to preserve + the incoming host. + type: string + destinationPath: + description: The path to use in the redirect. The value + must be a string and include the leading /, leave blank + to preserve the incoming path. Defaults to "". + type: string + queryString: + description: The query string used in the redirect URL. + The value must be in the = or ={action_server_variable} + format and must not include the leading ?, leave blank + to preserve the incoming query string. Maximum allowed + length for this field is 2048 characters. Defaults to + "". + type: string + redirectProtocol: + description: The protocol the request will be redirected + as. Possible values include MatchRequest, Http or Https. + Defaults to MatchRequest. + type: string + redirectType: + description: The response type to return to the requestor. + Possible values include Moved, Found , TemporaryRedirect + or PermanentRedirect. + type: string + type: object + urlRewriteAction: + description: A url_rewrite_action block as defined below. + You may not have a url_rewrite_action and a url_redirect_action + defined in the same actions block. + properties: + destination: + description: The destination path to use in the rewrite. + The destination path overwrites the source pattern. + type: string + preserveUnmatchedPath: + description: Append the remaining path after the source + pattern to the new destination path? Possible values + true or false. Defaults to false. + type: boolean + sourcePattern: + description: The source pattern in the URL path to replace. + This uses prefix-based matching. For example, to match + all URL paths use a forward slash "/" as the source + pattern value. + type: string + type: object + type: object + behaviorOnMatch: + description: If this rule is a match should the rules engine continue + processing the remaining rules or stop? Possible values are + Continue and Stop. Defaults to Continue. + type: string + cdnFrontdoorRuleSetId: + description: The resource ID of the Front Door Rule Set for this + Front Door Rule. Changing this forces a new Front Door Rule + to be created. + type: string + cdnFrontdoorRuleSetName: + description: The name of the Front Door Rule Set containing this + Front Door Rule. + type: string + conditions: + description: A conditions block as defined below. + properties: + clientPortCondition: + description: A client_port_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + cookiesCondition: + description: A cookies_condition block as defined below. + items: + properties: + cookieName: + description: A string value representing the name of + the cookie. + type: string + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + hostNameCondition: + description: A host_name_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + httpVersionCondition: + description: A http_version_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + isDeviceCondition: + description: A is_device_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + postArgsCondition: + description: A post_args_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + postArgsName: + description: A string value representing the name of + the POST argument. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + queryStringCondition: + description: A query_string_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + remoteAddressCondition: + description: A remote_address_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestBodyCondition: + description: A request_body_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + requestHeaderCondition: + description: A request_header_condition block as defined below. + items: + properties: + headerName: + description: The name of the header to modify. + type: string + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + requestMethodCondition: + description: A request_method_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestSchemeCondition: + description: A request_scheme_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + requestUriCondition: + description: A request_uri_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + serverPortCondition: + description: A server_port_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + socketAddressCondition: + description: A socket_address_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + sslProtocolCondition: + description: A ssl_protocol_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + x-kubernetes-list-type: set + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + type: object + type: array + urlFileExtensionCondition: + description: A url_file_extension_condition block as defined + below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlFilenameCondition: + description: A url_filename_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + urlPathCondition: + description: A url_path_condition block as defined below. + items: + properties: + matchValues: + description: One or more string or integer values(e.g. + "1") representing the value of the request path to + match. Don't include the leading slash (/). If multiple + values are specified, they're evaluated using OR logic. + items: + type: string + type: array + negateCondition: + description: If true operator becomes the opposite of + its value. Possible values true or false. Defaults + to false. Details can be found in the Condition Operator + List below. + type: boolean + operator: + description: A Conditional operator. Possible values + include Any, Equal, Contains, BeginsWith, EndsWith, + LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual + or RegEx. Details can be found in the Condition Operator + List below. + type: string + transforms: + description: A Conditional operator. Possible values + include Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + or UrlEncode. Details can be found in the Condition + Transform List below. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + id: + description: The ID of the Front Door Rule. + type: string + order: + description: The order in which the rules will be applied for + the Front Door Endpoint. The order value should be sequential + and begin at 1(e.g. 1, 2, 3...). A Front Door Rule with a lesser + order value will be applied before a rule with a greater order + value. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cdn.azure.upbound.io_frontdoorsecuritypolicies.yaml b/package/crds/cdn.azure.upbound.io_frontdoorsecuritypolicies.yaml index 6a57ebafc..7fc846b91 100644 --- a/package/crds/cdn.azure.upbound.io_frontdoorsecuritypolicies.yaml +++ b/package/crds/cdn.azure.upbound.io_frontdoorsecuritypolicies.yaml @@ -884,3 +884,840 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontdoorSecurityPolicy is the Schema for the FrontdoorSecurityPolicys + API. Manages a Front Door (standard/premium) Security Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontdoorSecurityPolicySpec defines the desired state of + FrontdoorSecurityPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cdnFrontdoorProfileId: + description: The Front Door Profile Resource Id that is linked + to this Front Door Security Policy. Changing this forces a new + Front Door Security Policy to be created. + type: string + cdnFrontdoorProfileIdRef: + description: Reference to a FrontdoorProfile in cdn to populate + cdnFrontdoorProfileId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorProfileIdSelector: + description: Selector for a FrontdoorProfile in cdn to populate + cdnFrontdoorProfileId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityPolicies: + description: An security_policies block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + firewall: + description: An firewall block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + association: + description: An association block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + domain: + description: One or more domain blocks as defined + below. Changing this forces a new Front Door Security + Policy to be created. + items: + properties: + cdnFrontdoorDomainId: + description: The Resource Id of the Front Door + Custom Domain or Front Door Endpoint that + should be bound to this Front Door Security + Policy. Changing this forces a new Front Door + Security Policy to be created. + type: string + cdnFrontdoorDomainIdRef: + description: Reference to a FrontdoorCustomDomain + in cdn to populate cdnFrontdoorDomainId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorDomainIdSelector: + description: Selector for a FrontdoorCustomDomain + in cdn to populate cdnFrontdoorDomainId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + patternsToMatch: + description: The list of paths to match for this firewall + policy. Possible value includes /*. Changing this + forces a new Front Door Security Policy to be created. + items: + type: string + type: array + type: object + cdnFrontdoorFirewallPolicyId: + description: The Resource Id of the Front Door Firewall + Policy that should be linked to this Front Door Security + Policy. Changing this forces a new Front Door Security + Policy to be created. + type: string + cdnFrontdoorFirewallPolicyIdRef: + description: Reference to a FrontdoorFirewallPolicy in + cdn to populate cdnFrontdoorFirewallPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorFirewallPolicyIdSelector: + description: Selector for a FrontdoorFirewallPolicy in + cdn to populate cdnFrontdoorFirewallPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + securityPolicies: + description: An security_policies block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + firewall: + description: An firewall block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + association: + description: An association block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + domain: + description: One or more domain blocks as defined + below. Changing this forces a new Front Door Security + Policy to be created. + items: + properties: + cdnFrontdoorDomainId: + description: The Resource Id of the Front Door + Custom Domain or Front Door Endpoint that + should be bound to this Front Door Security + Policy. Changing this forces a new Front Door + Security Policy to be created. + type: string + cdnFrontdoorDomainIdRef: + description: Reference to a FrontdoorCustomDomain + in cdn to populate cdnFrontdoorDomainId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorDomainIdSelector: + description: Selector for a FrontdoorCustomDomain + in cdn to populate cdnFrontdoorDomainId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + patternsToMatch: + description: The list of paths to match for this firewall + policy. Possible value includes /*. Changing this + forces a new Front Door Security Policy to be created. + items: + type: string + type: array + type: object + cdnFrontdoorFirewallPolicyId: + description: The Resource Id of the Front Door Firewall + Policy that should be linked to this Front Door Security + Policy. Changing this forces a new Front Door Security + Policy to be created. + type: string + cdnFrontdoorFirewallPolicyIdRef: + description: Reference to a FrontdoorFirewallPolicy in + cdn to populate cdnFrontdoorFirewallPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cdnFrontdoorFirewallPolicyIdSelector: + description: Selector for a FrontdoorFirewallPolicy in + cdn to populate cdnFrontdoorFirewallPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.securityPolicies is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.securityPolicies) + || (has(self.initProvider) && has(self.initProvider.securityPolicies))' + status: + description: FrontdoorSecurityPolicyStatus defines the observed state + of FrontdoorSecurityPolicy. + properties: + atProvider: + properties: + cdnFrontdoorProfileId: + description: The Front Door Profile Resource Id that is linked + to this Front Door Security Policy. Changing this forces a new + Front Door Security Policy to be created. + type: string + id: + description: The ID of the Front Door Security Policy. + type: string + securityPolicies: + description: An security_policies block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + firewall: + description: An firewall block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + association: + description: An association block as defined below. Changing + this forces a new Front Door Security Policy to be created. + properties: + domain: + description: One or more domain blocks as defined + below. Changing this forces a new Front Door Security + Policy to be created. + items: + properties: + active: + description: (Computed) Is the Front Door Custom + Domain/Endpoint activated? + type: boolean + cdnFrontdoorDomainId: + description: The Resource Id of the Front Door + Custom Domain or Front Door Endpoint that + should be bound to this Front Door Security + Policy. Changing this forces a new Front Door + Security Policy to be created. + type: string + type: object + type: array + patternsToMatch: + description: The list of paths to match for this firewall + policy. Possible value includes /*. Changing this + forces a new Front Door Security Policy to be created. + items: + type: string + type: array + type: object + cdnFrontdoorFirewallPolicyId: + description: The Resource Id of the Front Door Firewall + Policy that should be linked to this Front Door Security + Policy. Changing this forces a new Front Door Security + Policy to be created. + type: string + type: object + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cognitiveservices.azure.upbound.io_accounts.yaml b/package/crds/cognitiveservices.azure.upbound.io_accounts.yaml index 7c6935437..2883fd429 100644 --- a/package/crds/cognitiveservices.azure.upbound.io_accounts.yaml +++ b/package/crds/cognitiveservices.azure.upbound.io_accounts.yaml @@ -1098,3 +1098,1065 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Account is the Schema for the Accounts API. Manages a Cognitive + Services Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customQuestionAnsweringSearchServiceId: + description: If kind is TextAnalytics this specifies the ID of + the Search service. + type: string + customQuestionAnsweringSearchServiceKeySecretRef: + description: If kind is TextAnalytics this specifies the key of + the Search service. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + customSubdomainName: + description: The subdomain name used for token-based authentication. + This property is required when network_acls is specified. Changing + this forces a new resource to be created. + type: string + customerManagedKey: + description: A customer_managed_key block as documented below. + properties: + identityClientId: + description: The Client ID of the User Assigned Identity that + has access to the key. This property only needs to be specified + when there're multiple identities attached to the Cognitive + Account. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to Encrypt the data in this Cognitive Account. + type: string + type: object + dynamicThrottlingEnabled: + description: Whether to enable the dynamic throttling for this + Cognitive Service Account. + type: boolean + fqdns: + description: List of FQDNs allowed for the Cognitive Account. + items: + type: string + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Cognitive Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Cognitive Account. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + kind: + description: Specifies the type of Cognitive Service Account that + should be created. Possible values are Academic, AnomalyDetector, + Bing.Autosuggest, Bing.Autosuggest.v7, Bing.CustomSearch, Bing.Search, + Bing.Search.v7, Bing.Speech, Bing.SpellCheck, Bing.SpellCheck.v7, + CognitiveServices, ComputerVision, ContentModerator, ContentSafety, + CustomSpeech, CustomVision.Prediction, CustomVision.Training, + Emotion, Face, FormRecognizer, ImmersiveReader, LUIS, LUIS.Authoring, + MetricsAdvisor, OpenAI, Personalizer, QnAMaker, Recommendations, + SpeakerRecognition, Speech, SpeechServices, SpeechTranslation, + TextAnalytics, TextTranslation and WebLM. Changing this forces + a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the Cognitive Account. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + metricsAdvisorAadClientId: + description: The Azure AD Client ID (Application ID). This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + metricsAdvisorAadTenantId: + description: The Azure AD Tenant ID. This attribute is only set + when kind is MetricsAdvisor. Changing this forces a new resource + to be created. + type: string + metricsAdvisorSuperUserName: + description: The super user of Metrics Advisor. This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + metricsAdvisorWebsiteName: + description: The website name of Metrics Advisor. This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. When this + property is specified, custom_subdomain_name is also required + to be set. + properties: + defaultAction: + description: The Default Action to use when no rules match + from ip_rules / virtual_network_rules. Possible values are + Allow and Deny. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Cognitive Account. + items: + type: string + type: array + x-kubernetes-list-type: set + virtualNetworkRules: + description: A virtual_network_rules block as defined below. + items: + properties: + ignoreMissingVnetServiceEndpoint: + description: Whether ignore missing vnet service endpoint + or not. Default to false. + type: boolean + subnetId: + description: The ID of the subnet which should be able + to access this Cognitive Account. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + outboundNetworkAccessRestricted: + description: Whether outbound network access is restricted for + the Cognitive Account. Defaults to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + Cognitive Account. Defaults to true. + type: boolean + qnaRuntimeEndpoint: + description: A URL to link a QnAMaker cognitive account to a QnA + runtime. + type: string + resourceGroupName: + description: The name of the resource group in which the Cognitive + Service Account is created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: Specifies the SKU Name for this Cognitive Service + Account. Possible values are F0, F1, S0, S, S1, S2, S3, S4, + S5, S6, P0, P1, P2, E0 and DC0. + type: string + storage: + description: A storage block as defined below. + items: + properties: + identityClientId: + description: The client ID of the managed identity associated + with the storage resource. + type: string + storageAccountId: + description: Full resource id of a Microsoft.Storage resource. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customQuestionAnsweringSearchServiceId: + description: If kind is TextAnalytics this specifies the ID of + the Search service. + type: string + customSubdomainName: + description: The subdomain name used for token-based authentication. + This property is required when network_acls is specified. Changing + this forces a new resource to be created. + type: string + customerManagedKey: + description: A customer_managed_key block as documented below. + properties: + identityClientId: + description: The Client ID of the User Assigned Identity that + has access to the key. This property only needs to be specified + when there're multiple identities attached to the Cognitive + Account. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to Encrypt the data in this Cognitive Account. + type: string + type: object + dynamicThrottlingEnabled: + description: Whether to enable the dynamic throttling for this + Cognitive Service Account. + type: boolean + fqdns: + description: List of FQDNs allowed for the Cognitive Account. + items: + type: string + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Cognitive Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Cognitive Account. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + kind: + description: Specifies the type of Cognitive Service Account that + should be created. Possible values are Academic, AnomalyDetector, + Bing.Autosuggest, Bing.Autosuggest.v7, Bing.CustomSearch, Bing.Search, + Bing.Search.v7, Bing.Speech, Bing.SpellCheck, Bing.SpellCheck.v7, + CognitiveServices, ComputerVision, ContentModerator, ContentSafety, + CustomSpeech, CustomVision.Prediction, CustomVision.Training, + Emotion, Face, FormRecognizer, ImmersiveReader, LUIS, LUIS.Authoring, + MetricsAdvisor, OpenAI, Personalizer, QnAMaker, Recommendations, + SpeakerRecognition, Speech, SpeechServices, SpeechTranslation, + TextAnalytics, TextTranslation and WebLM. Changing this forces + a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the Cognitive Account. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + metricsAdvisorAadClientId: + description: The Azure AD Client ID (Application ID). This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + metricsAdvisorAadTenantId: + description: The Azure AD Tenant ID. This attribute is only set + when kind is MetricsAdvisor. Changing this forces a new resource + to be created. + type: string + metricsAdvisorSuperUserName: + description: The super user of Metrics Advisor. This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + metricsAdvisorWebsiteName: + description: The website name of Metrics Advisor. This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. When this + property is specified, custom_subdomain_name is also required + to be set. + properties: + defaultAction: + description: The Default Action to use when no rules match + from ip_rules / virtual_network_rules. Possible values are + Allow and Deny. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Cognitive Account. + items: + type: string + type: array + x-kubernetes-list-type: set + virtualNetworkRules: + description: A virtual_network_rules block as defined below. + items: + properties: + ignoreMissingVnetServiceEndpoint: + description: Whether ignore missing vnet service endpoint + or not. Default to false. + type: boolean + subnetId: + description: The ID of the subnet which should be able + to access this Cognitive Account. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + outboundNetworkAccessRestricted: + description: Whether outbound network access is restricted for + the Cognitive Account. Defaults to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + Cognitive Account. Defaults to true. + type: boolean + qnaRuntimeEndpoint: + description: A URL to link a QnAMaker cognitive account to a QnA + runtime. + type: string + skuName: + description: Specifies the SKU Name for this Cognitive Service + Account. Possible values are F0, F1, S0, S, S1, S2, S3, S4, + S5, S6, P0, P1, P2, E0 and DC0. + type: string + storage: + description: A storage block as defined below. + items: + properties: + identityClientId: + description: The client ID of the managed identity associated + with the storage resource. + type: string + storageAccountId: + description: Full resource id of a Microsoft.Storage resource. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.kind is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kind) + || (has(self.initProvider) && has(self.initProvider.kind))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + status: + description: AccountStatus defines the observed state of Account. + properties: + atProvider: + properties: + customQuestionAnsweringSearchServiceId: + description: If kind is TextAnalytics this specifies the ID of + the Search service. + type: string + customSubdomainName: + description: The subdomain name used for token-based authentication. + This property is required when network_acls is specified. Changing + this forces a new resource to be created. + type: string + customerManagedKey: + description: A customer_managed_key block as documented below. + properties: + identityClientId: + description: The Client ID of the User Assigned Identity that + has access to the key. This property only needs to be specified + when there're multiple identities attached to the Cognitive + Account. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to Encrypt the data in this Cognitive Account. + type: string + type: object + dynamicThrottlingEnabled: + description: Whether to enable the dynamic throttling for this + Cognitive Service Account. + type: boolean + endpoint: + description: The endpoint used to connect to the Cognitive Service + Account. + type: string + fqdns: + description: List of FQDNs allowed for the Cognitive Account. + items: + type: string + type: array + id: + description: The ID of the Cognitive Service Account. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Cognitive Account. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Cognitive Account. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + kind: + description: Specifies the type of Cognitive Service Account that + should be created. Possible values are Academic, AnomalyDetector, + Bing.Autosuggest, Bing.Autosuggest.v7, Bing.CustomSearch, Bing.Search, + Bing.Search.v7, Bing.Speech, Bing.SpellCheck, Bing.SpellCheck.v7, + CognitiveServices, ComputerVision, ContentModerator, ContentSafety, + CustomSpeech, CustomVision.Prediction, CustomVision.Training, + Emotion, Face, FormRecognizer, ImmersiveReader, LUIS, LUIS.Authoring, + MetricsAdvisor, OpenAI, Personalizer, QnAMaker, Recommendations, + SpeakerRecognition, Speech, SpeechServices, SpeechTranslation, + TextAnalytics, TextTranslation and WebLM. Changing this forces + a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the Cognitive Account. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + metricsAdvisorAadClientId: + description: The Azure AD Client ID (Application ID). This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + metricsAdvisorAadTenantId: + description: The Azure AD Tenant ID. This attribute is only set + when kind is MetricsAdvisor. Changing this forces a new resource + to be created. + type: string + metricsAdvisorSuperUserName: + description: The super user of Metrics Advisor. This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + metricsAdvisorWebsiteName: + description: The website name of Metrics Advisor. This attribute + is only set when kind is MetricsAdvisor. Changing this forces + a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. When this + property is specified, custom_subdomain_name is also required + to be set. + properties: + defaultAction: + description: The Default Action to use when no rules match + from ip_rules / virtual_network_rules. Possible values are + Allow and Deny. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Cognitive Account. + items: + type: string + type: array + x-kubernetes-list-type: set + virtualNetworkRules: + description: A virtual_network_rules block as defined below. + items: + properties: + ignoreMissingVnetServiceEndpoint: + description: Whether ignore missing vnet service endpoint + or not. Default to false. + type: boolean + subnetId: + description: The ID of the subnet which should be able + to access this Cognitive Account. + type: string + type: object + type: array + type: object + outboundNetworkAccessRestricted: + description: Whether outbound network access is restricted for + the Cognitive Account. Defaults to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + Cognitive Account. Defaults to true. + type: boolean + qnaRuntimeEndpoint: + description: A URL to link a QnAMaker cognitive account to a QnA + runtime. + type: string + resourceGroupName: + description: The name of the resource group in which the Cognitive + Service Account is created. Changing this forces a new resource + to be created. + type: string + skuName: + description: Specifies the SKU Name for this Cognitive Service + Account. Possible values are F0, F1, S0, S, S1, S2, S3, S4, + S5, S6, P0, P1, P2, E0 and DC0. + type: string + storage: + description: A storage block as defined below. + items: + properties: + identityClientId: + description: The client ID of the managed identity associated + with the storage resource. + type: string + storageAccountId: + description: Full resource id of a Microsoft.Storage resource. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cognitiveservices.azure.upbound.io_deployments.yaml b/package/crds/cognitiveservices.azure.upbound.io_deployments.yaml index 302a9e9f5..251bb5003 100644 --- a/package/crds/cognitiveservices.azure.upbound.io_deployments.yaml +++ b/package/crds/cognitiveservices.azure.upbound.io_deployments.yaml @@ -620,3 +620,584 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Deployment is the Schema for the Deployments API. Manages a Cognitive + Services Account Deployment. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DeploymentSpec defines the desired state of Deployment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cognitiveAccountId: + description: The ID of the Cognitive Services Account. Changing + this forces a new resource to be created. + type: string + cognitiveAccountIdRef: + description: Reference to a Account in cognitiveservices to populate + cognitiveAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cognitiveAccountIdSelector: + description: Selector for a Account in cognitiveservices to populate + cognitiveAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + model: + description: A model block as defined below. Changing this forces + a new resource to be created. + properties: + format: + description: The format of the Cognitive Services Account + Deployment model. Changing this forces a new resource to + be created. Possible value is OpenAI. + type: string + name: + description: The name of the Cognitive Services Account Deployment + model. Changing this forces a new resource to be created. + type: string + version: + description: The version of Cognitive Services Account Deployment + model. If version is not specified, the default version + of the model at the time will be assigned. + type: string + type: object + raiPolicyName: + description: The name of RAI policy. + type: string + scale: + description: A scale block as defined below. + properties: + capacity: + description: Tokens-per-Minute (TPM). The unit of measure + for this field is in the thousands of Tokens-per-Minute. + Defaults to 1 which means that the limitation is 1000 tokens + per minute. If the resources SKU supports scale in/out then + the capacity field should be included in the resources' + configuration. If the scale in/out is not supported by the + resources SKU then this field can be safely omitted. For + more information about TPM please see the product documentation. + type: number + family: + description: If the service has different generations of hardware, + for the same SKU, then that can be captured here. Changing + this forces a new resource to be created. + type: string + size: + description: The SKU size. When the name field is the combination + of tier and some other value, this would be the standalone + code. Changing this forces a new resource to be created. + type: string + tier: + description: Possible values are Free, Basic, Standard, Premium, + Enterprise. Changing this forces a new resource to be created. + type: string + type: + description: The name of the SKU. Ex - Standard or P3. It + is typically a letter+number code. Changing this forces + a new resource to be created. + type: string + type: object + versionUpgradeOption: + description: Deployment model version upgrade option. Possible + values are OnceNewDefaultVersionAvailable, OnceCurrentVersionExpired, + and NoAutoUpgrade. Defaults to OnceNewDefaultVersionAvailable. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + model: + description: A model block as defined below. Changing this forces + a new resource to be created. + properties: + format: + description: The format of the Cognitive Services Account + Deployment model. Changing this forces a new resource to + be created. Possible value is OpenAI. + type: string + name: + description: The name of the Cognitive Services Account Deployment + model. Changing this forces a new resource to be created. + type: string + version: + description: The version of Cognitive Services Account Deployment + model. If version is not specified, the default version + of the model at the time will be assigned. + type: string + type: object + raiPolicyName: + description: The name of RAI policy. + type: string + scale: + description: A scale block as defined below. + properties: + capacity: + description: Tokens-per-Minute (TPM). The unit of measure + for this field is in the thousands of Tokens-per-Minute. + Defaults to 1 which means that the limitation is 1000 tokens + per minute. If the resources SKU supports scale in/out then + the capacity field should be included in the resources' + configuration. If the scale in/out is not supported by the + resources SKU then this field can be safely omitted. For + more information about TPM please see the product documentation. + type: number + family: + description: If the service has different generations of hardware, + for the same SKU, then that can be captured here. Changing + this forces a new resource to be created. + type: string + size: + description: The SKU size. When the name field is the combination + of tier and some other value, this would be the standalone + code. Changing this forces a new resource to be created. + type: string + tier: + description: Possible values are Free, Basic, Standard, Premium, + Enterprise. Changing this forces a new resource to be created. + type: string + type: + description: The name of the SKU. Ex - Standard or P3. It + is typically a letter+number code. Changing this forces + a new resource to be created. + type: string + type: object + versionUpgradeOption: + description: Deployment model version upgrade option. Possible + values are OnceNewDefaultVersionAvailable, OnceCurrentVersionExpired, + and NoAutoUpgrade. Defaults to OnceNewDefaultVersionAvailable. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.model is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.model) + || (has(self.initProvider) && has(self.initProvider.model))' + - message: spec.forProvider.scale is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scale) + || (has(self.initProvider) && has(self.initProvider.scale))' + status: + description: DeploymentStatus defines the observed state of Deployment. + properties: + atProvider: + properties: + cognitiveAccountId: + description: The ID of the Cognitive Services Account. Changing + this forces a new resource to be created. + type: string + id: + description: The ID of the Deployment for Azure Cognitive Services + Account. + type: string + model: + description: A model block as defined below. Changing this forces + a new resource to be created. + properties: + format: + description: The format of the Cognitive Services Account + Deployment model. Changing this forces a new resource to + be created. Possible value is OpenAI. + type: string + name: + description: The name of the Cognitive Services Account Deployment + model. Changing this forces a new resource to be created. + type: string + version: + description: The version of Cognitive Services Account Deployment + model. If version is not specified, the default version + of the model at the time will be assigned. + type: string + type: object + raiPolicyName: + description: The name of RAI policy. + type: string + scale: + description: A scale block as defined below. + properties: + capacity: + description: Tokens-per-Minute (TPM). The unit of measure + for this field is in the thousands of Tokens-per-Minute. + Defaults to 1 which means that the limitation is 1000 tokens + per minute. If the resources SKU supports scale in/out then + the capacity field should be included in the resources' + configuration. If the scale in/out is not supported by the + resources SKU then this field can be safely omitted. For + more information about TPM please see the product documentation. + type: number + family: + description: If the service has different generations of hardware, + for the same SKU, then that can be captured here. Changing + this forces a new resource to be created. + type: string + size: + description: The SKU size. When the name field is the combination + of tier and some other value, this would be the standalone + code. Changing this forces a new resource to be created. + type: string + tier: + description: Possible values are Free, Basic, Standard, Premium, + Enterprise. Changing this forces a new resource to be created. + type: string + type: + description: The name of the SKU. Ex - Standard or P3. It + is typically a letter+number code. Changing this forces + a new resource to be created. + type: string + type: object + versionUpgradeOption: + description: Deployment model version upgrade option. Possible + values are OnceNewDefaultVersionAvailable, OnceCurrentVersionExpired, + and NoAutoUpgrade. Defaults to OnceNewDefaultVersionAvailable. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_capacityreservations.yaml b/package/crds/compute.azure.upbound.io_capacityreservations.yaml index 825710e03..1be14862f 100644 --- a/package/crds/compute.azure.upbound.io_capacityreservations.yaml +++ b/package/crds/compute.azure.upbound.io_capacityreservations.yaml @@ -491,3 +491,470 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CapacityReservation is the Schema for the CapacityReservations + API. Manages a Capacity Reservation within a Capacity Reservation Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CapacityReservationSpec defines the desired state of CapacityReservation + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + capacityReservationGroupId: + description: The ID of the Capacity Reservation Group where the + Capacity Reservation exists. Changing this forces a new resource + to be created. + type: string + capacityReservationGroupIdRef: + description: Reference to a CapacityReservationGroup in compute + to populate capacityReservationGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + capacityReservationGroupIdSelector: + description: Selector for a CapacityReservationGroup in compute + to populate capacityReservationGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: A sku block as defined below. + properties: + capacity: + description: Specifies the number of instances to be reserved. + It must be a positive integer and not exceed the quota in + the subscription. + type: number + name: + description: Name of the sku, such as Standard_F2. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zone: + description: Specifies the Availability Zone for this Capacity + Reservation. Changing this forces a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + sku: + description: A sku block as defined below. + properties: + capacity: + description: Specifies the number of instances to be reserved. + It must be a positive integer and not exceed the quota in + the subscription. + type: number + name: + description: Name of the sku, such as Standard_F2. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zone: + description: Specifies the Availability Zone for this Capacity + Reservation. Changing this forces a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: CapacityReservationStatus defines the observed state of CapacityReservation. + properties: + atProvider: + properties: + capacityReservationGroupId: + description: The ID of the Capacity Reservation Group where the + Capacity Reservation exists. Changing this forces a new resource + to be created. + type: string + id: + description: The ID of the Capacity Reservation. + type: string + sku: + description: A sku block as defined below. + properties: + capacity: + description: Specifies the number of instances to be reserved. + It must be a positive integer and not exceed the quota in + the subscription. + type: number + name: + description: Name of the sku, such as Standard_F2. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zone: + description: Specifies the Availability Zone for this Capacity + Reservation. Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_diskencryptionsets.yaml b/package/crds/compute.azure.upbound.io_diskencryptionsets.yaml index a31076fd3..76d73bf16 100644 --- a/package/crds/compute.azure.upbound.io_diskencryptionsets.yaml +++ b/package/crds/compute.azure.upbound.io_diskencryptionsets.yaml @@ -726,3 +726,705 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DiskEncryptionSet is the Schema for the DiskEncryptionSets API. + Manages a Disk Encryption Set. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DiskEncryptionSetSpec defines the desired state of DiskEncryptionSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoKeyRotationEnabled: + description: Boolean flag to specify whether Azure Disk Encryption + Set automatically rotates the encryption Key to latest version + or not. Possible values are true or false. Defaults to false. + type: boolean + encryptionType: + description: The type of key used to encrypt the data of the disk. + Possible values are EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys + and ConfidentialVmEncryptedWithCustomerKey. Defaults to EncryptionAtRestWithCustomerKey. + Changing this forces a new resource to be created. + type: string + federatedClientId: + description: Multi-tenant application client id to access key + vault in a different tenant. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Disk Encryption Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of Managed Service Identity that is + configured on this Disk Encryption Set. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + keyVaultKeyId: + description: Specifies the URL to a Key Vault Key (either from + a Key Vault Key, or the Key URL for the Key Vault Secret). + type: string + keyVaultKeyIdRef: + description: Reference to a Key in keyvault to populate keyVaultKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultKeyIdSelector: + description: Selector for a Key in keyvault to populate keyVaultKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: Specifies the Azure Region where the Disk Encryption + Set exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Disk Encryption Set should exist. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Disk Encryption + Set. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoKeyRotationEnabled: + description: Boolean flag to specify whether Azure Disk Encryption + Set automatically rotates the encryption Key to latest version + or not. Possible values are true or false. Defaults to false. + type: boolean + encryptionType: + description: The type of key used to encrypt the data of the disk. + Possible values are EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys + and ConfidentialVmEncryptedWithCustomerKey. Defaults to EncryptionAtRestWithCustomerKey. + Changing this forces a new resource to be created. + type: string + federatedClientId: + description: Multi-tenant application client id to access key + vault in a different tenant. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Disk Encryption Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of Managed Service Identity that is + configured on this Disk Encryption Set. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + keyVaultKeyId: + description: Specifies the URL to a Key Vault Key (either from + a Key Vault Key, or the Key URL for the Key Vault Secret). + type: string + keyVaultKeyIdRef: + description: Reference to a Key in keyvault to populate keyVaultKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultKeyIdSelector: + description: Selector for a Key in keyvault to populate keyVaultKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: Specifies the Azure Region where the Disk Encryption + Set exists. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Disk Encryption + Set. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.identity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.identity) + || (has(self.initProvider) && has(self.initProvider.identity))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: DiskEncryptionSetStatus defines the observed state of DiskEncryptionSet. + properties: + atProvider: + properties: + autoKeyRotationEnabled: + description: Boolean flag to specify whether Azure Disk Encryption + Set automatically rotates the encryption Key to latest version + or not. Possible values are true or false. Defaults to false. + type: boolean + encryptionType: + description: The type of key used to encrypt the data of the disk. + Possible values are EncryptionAtRestWithCustomerKey, EncryptionAtRestWithPlatformAndCustomerKeys + and ConfidentialVmEncryptedWithCustomerKey. Defaults to EncryptionAtRestWithCustomerKey. + Changing this forces a new resource to be created. + type: string + federatedClientId: + description: Multi-tenant application client id to access key + vault in a different tenant. + type: string + id: + description: The ID of the Disk Encryption Set. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Disk Encryption Set. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The (Client) ID of the Service Principal. + type: string + tenantId: + description: The ID of the Tenant the Service Principal is + assigned in. + type: string + type: + description: The type of Managed Service Identity that is + configured on this Disk Encryption Set. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + keyVaultKeyId: + description: Specifies the URL to a Key Vault Key (either from + a Key Vault Key, or the Key URL for the Key Vault Secret). + type: string + keyVaultKeyUrl: + description: The URL for the Key Vault Key or Key Vault Secret + that is currently being used by the service. + type: string + location: + description: Specifies the Azure Region where the Disk Encryption + Set exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Disk Encryption Set should exist. Changing this forces a new + resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Disk Encryption + Set. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_galleryapplicationversions.yaml b/package/crds/compute.azure.upbound.io_galleryapplicationversions.yaml index d76e0c22d..0e5304be8 100644 --- a/package/crds/compute.azure.upbound.io_galleryapplicationversions.yaml +++ b/package/crds/compute.azure.upbound.io_galleryapplicationversions.yaml @@ -1101,3 +1101,1070 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GalleryApplicationVersion is the Schema for the GalleryApplicationVersions + API. Manages a Gallery Application Version. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GalleryApplicationVersionSpec defines the desired state of + GalleryApplicationVersion + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configFile: + description: Specifies the name of the config file on the VM. + Changing this forces a new resource to be created. + type: string + enableHealthCheck: + description: Should the Gallery Application reports health. Defaults + to false. + type: boolean + endOfLifeDate: + description: The end of life date in RFC3339 format of the Gallery + Application Version. + type: string + excludeFromLatest: + description: Should the Gallery Application Version be excluded + from the latest filter? If set to true this Gallery Application + Version won't be returned for the latest version. Defaults to + false. + type: boolean + galleryApplicationId: + description: The ID of the Gallery Application. Changing this + forces a new resource to be created. + type: string + galleryApplicationIdRef: + description: Reference to a GalleryApplication in compute to populate + galleryApplicationId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + galleryApplicationIdSelector: + description: Selector for a GalleryApplication in compute to populate + galleryApplicationId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The Azure Region where the Gallery Application Version + exists. Changing this forces a new resource to be created. + type: string + manageAction: + description: A manage_action block as defined below. + properties: + install: + description: The command to install the Gallery Application. + Changing this forces a new resource to be created. + type: string + remove: + description: The command to remove the Gallery Application. + Changing this forces a new resource to be created. + type: string + update: + description: The command to update the Gallery Application. + Changing this forces a new resource to be created. + type: string + type: object + name: + description: The version name of the Gallery Application Version, + such as 1.0.0. Changing this forces a new resource to be created. + type: string + packageFile: + description: Specifies the name of the package file on the VM. + Changing this forces a new resource to be created. + type: string + source: + description: A source block as defined below. + properties: + defaultConfigurationLink: + description: The Storage Blob URI of the default configuration. + Changing this forces a new resource to be created. + type: string + mediaLink: + description: The Storage Blob URI of the source application + package. Changing this forces a new resource to be created. + type: string + mediaLinkRef: + description: Reference to a Blob in storage to populate mediaLink. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaLinkSelector: + description: Selector for a Blob in storage to populate mediaLink. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Gallery Application + Version. + type: object + x-kubernetes-map-type: granular + targetRegion: + description: One or more target_region blocks as defined below. + items: + properties: + excludeFromLatest: + description: Specifies whether this Gallery Application + Version should be excluded from the latest filter. If + set to true, this Gallery Application Version won't be + returned for the latest version. Defaults to false. + type: boolean + name: + description: The Azure Region in which the Gallery Application + Version exists. + type: string + nameRef: + description: Reference to a GalleryApplication in compute + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a GalleryApplication in compute + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + regionalReplicaCount: + description: The number of replicas of the Gallery Application + Version to be created per region. Possible values are + between 1 and 10. + type: number + storageAccountType: + description: The storage account type for the Gallery Application + Version. Possible values are Standard_LRS, Premium_LRS + and Standard_ZRS. Defaults to Standard_LRS. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configFile: + description: Specifies the name of the config file on the VM. + Changing this forces a new resource to be created. + type: string + enableHealthCheck: + description: Should the Gallery Application reports health. Defaults + to false. + type: boolean + endOfLifeDate: + description: The end of life date in RFC3339 format of the Gallery + Application Version. + type: string + excludeFromLatest: + description: Should the Gallery Application Version be excluded + from the latest filter? If set to true this Gallery Application + Version won't be returned for the latest version. Defaults to + false. + type: boolean + galleryApplicationId: + description: The ID of the Gallery Application. Changing this + forces a new resource to be created. + type: string + galleryApplicationIdRef: + description: Reference to a GalleryApplication in compute to populate + galleryApplicationId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + galleryApplicationIdSelector: + description: Selector for a GalleryApplication in compute to populate + galleryApplicationId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The Azure Region where the Gallery Application Version + exists. Changing this forces a new resource to be created. + type: string + manageAction: + description: A manage_action block as defined below. + properties: + install: + description: The command to install the Gallery Application. + Changing this forces a new resource to be created. + type: string + remove: + description: The command to remove the Gallery Application. + Changing this forces a new resource to be created. + type: string + update: + description: The command to update the Gallery Application. + Changing this forces a new resource to be created. + type: string + type: object + name: + description: The version name of the Gallery Application Version, + such as 1.0.0. Changing this forces a new resource to be created. + type: string + packageFile: + description: Specifies the name of the package file on the VM. + Changing this forces a new resource to be created. + type: string + source: + description: A source block as defined below. + properties: + defaultConfigurationLink: + description: The Storage Blob URI of the default configuration. + Changing this forces a new resource to be created. + type: string + mediaLink: + description: The Storage Blob URI of the source application + package. Changing this forces a new resource to be created. + type: string + mediaLinkRef: + description: Reference to a Blob in storage to populate mediaLink. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaLinkSelector: + description: Selector for a Blob in storage to populate mediaLink. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Gallery Application + Version. + type: object + x-kubernetes-map-type: granular + targetRegion: + description: One or more target_region blocks as defined below. + items: + properties: + excludeFromLatest: + description: Specifies whether this Gallery Application + Version should be excluded from the latest filter. If + set to true, this Gallery Application Version won't be + returned for the latest version. Defaults to false. + type: boolean + name: + description: The Azure Region in which the Gallery Application + Version exists. + type: string + nameRef: + description: Reference to a GalleryApplication in compute + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a GalleryApplication in compute + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + regionalReplicaCount: + description: The number of replicas of the Gallery Application + Version to be created per region. Possible values are + between 1 and 10. + type: number + storageAccountType: + description: The storage account type for the Gallery Application + Version. Possible values are Standard_LRS, Premium_LRS + and Standard_ZRS. Defaults to Standard_LRS. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.manageAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.manageAction) + || (has(self.initProvider) && has(self.initProvider.manageAction))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.source is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.source) + || (has(self.initProvider) && has(self.initProvider.source))' + - message: spec.forProvider.targetRegion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetRegion) + || (has(self.initProvider) && has(self.initProvider.targetRegion))' + status: + description: GalleryApplicationVersionStatus defines the observed state + of GalleryApplicationVersion. + properties: + atProvider: + properties: + configFile: + description: Specifies the name of the config file on the VM. + Changing this forces a new resource to be created. + type: string + enableHealthCheck: + description: Should the Gallery Application reports health. Defaults + to false. + type: boolean + endOfLifeDate: + description: The end of life date in RFC3339 format of the Gallery + Application Version. + type: string + excludeFromLatest: + description: Should the Gallery Application Version be excluded + from the latest filter? If set to true this Gallery Application + Version won't be returned for the latest version. Defaults to + false. + type: boolean + galleryApplicationId: + description: The ID of the Gallery Application. Changing this + forces a new resource to be created. + type: string + id: + description: The ID of the Gallery Application Version. + type: string + location: + description: The Azure Region where the Gallery Application Version + exists. Changing this forces a new resource to be created. + type: string + manageAction: + description: A manage_action block as defined below. + properties: + install: + description: The command to install the Gallery Application. + Changing this forces a new resource to be created. + type: string + remove: + description: The command to remove the Gallery Application. + Changing this forces a new resource to be created. + type: string + update: + description: The command to update the Gallery Application. + Changing this forces a new resource to be created. + type: string + type: object + name: + description: The version name of the Gallery Application Version, + such as 1.0.0. Changing this forces a new resource to be created. + type: string + packageFile: + description: Specifies the name of the package file on the VM. + Changing this forces a new resource to be created. + type: string + source: + description: A source block as defined below. + properties: + defaultConfigurationLink: + description: The Storage Blob URI of the default configuration. + Changing this forces a new resource to be created. + type: string + mediaLink: + description: The Storage Blob URI of the source application + package. Changing this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Gallery Application + Version. + type: object + x-kubernetes-map-type: granular + targetRegion: + description: One or more target_region blocks as defined below. + items: + properties: + excludeFromLatest: + description: Specifies whether this Gallery Application + Version should be excluded from the latest filter. If + set to true, this Gallery Application Version won't be + returned for the latest version. Defaults to false. + type: boolean + name: + description: The Azure Region in which the Gallery Application + Version exists. + type: string + regionalReplicaCount: + description: The number of replicas of the Gallery Application + Version to be created per region. Possible values are + between 1 and 10. + type: number + storageAccountType: + description: The storage account type for the Gallery Application + Version. Possible values are Standard_LRS, Premium_LRS + and Standard_ZRS. Defaults to Standard_LRS. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_images.yaml b/package/crds/compute.azure.upbound.io_images.yaml index 09b69bc44..2776085ac 100644 --- a/package/crds/compute.azure.upbound.io_images.yaml +++ b/package/crds/compute.azure.upbound.io_images.yaml @@ -678,3 +678,657 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Image is the Schema for the Images API. Manages a custom virtual + machine image that can be used to create virtual machines. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ImageSpec defines the desired state of Image + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + blobUri: + description: Specifies the URI in Azure storage of the blob + that you want to use to create the image. + type: string + caching: + description: Specifies the caching mode as ReadWrite, ReadOnly, + or None. Defaults to None. + type: string + lun: + description: Specifies the logical unit number of the data + disk. + type: number + managedDiskId: + description: Specifies the ID of the managed disk resource + that you want to use to create the image. Changing this + forces a new resource to be created. + type: string + sizeGb: + description: Specifies the size of the image to be created. + The target size can't be smaller than the source size. + type: number + type: object + type: array + hyperVGeneration: + description: The HyperVGenerationType of the VirtualMachine created + from the image as V1, V2. Defaults to V1. Changing this forces + a new resource to be created. + type: string + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + osDisk: + description: One or more os_disk blocks as defined below. Changing + this forces a new resource to be created. + properties: + blobUri: + description: Specifies the URI in Azure storage of the blob + that you want to use to create the image. Changing this + forces a new resource to be created. + type: string + caching: + description: Specifies the caching mode as ReadWrite, ReadOnly, + or None. The default is None. + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this image. Changing this forces a new + resource to be created. + type: string + managedDiskId: + description: Specifies the ID of the managed disk resource + that you want to use to create the image. + type: string + osState: + description: Specifies the state of the operating system contained + in the blob. Currently, the only value is Generalized. Possible + values are Generalized and Specialized. + type: string + osType: + description: 'Specifies the type of operating system contained + in the virtual machine image. Possible values are: Windows + or Linux.' + type: string + sizeGb: + description: Specifies the size of the image to be created. + Changing this forces a new resource to be created. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which to create + the image. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceVirtualMachineId: + description: The Virtual Machine ID from which to create the image. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneResilient: + description: Is zone resiliency enabled? Defaults to false. Changing + this forces a new resource to be created. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + blobUri: + description: Specifies the URI in Azure storage of the blob + that you want to use to create the image. + type: string + caching: + description: Specifies the caching mode as ReadWrite, ReadOnly, + or None. Defaults to None. + type: string + lun: + description: Specifies the logical unit number of the data + disk. + type: number + managedDiskId: + description: Specifies the ID of the managed disk resource + that you want to use to create the image. Changing this + forces a new resource to be created. + type: string + sizeGb: + description: Specifies the size of the image to be created. + The target size can't be smaller than the source size. + type: number + type: object + type: array + hyperVGeneration: + description: The HyperVGenerationType of the VirtualMachine created + from the image as V1, V2. Defaults to V1. Changing this forces + a new resource to be created. + type: string + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + osDisk: + description: One or more os_disk blocks as defined below. Changing + this forces a new resource to be created. + properties: + blobUri: + description: Specifies the URI in Azure storage of the blob + that you want to use to create the image. Changing this + forces a new resource to be created. + type: string + caching: + description: Specifies the caching mode as ReadWrite, ReadOnly, + or None. The default is None. + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this image. Changing this forces a new + resource to be created. + type: string + managedDiskId: + description: Specifies the ID of the managed disk resource + that you want to use to create the image. + type: string + osState: + description: Specifies the state of the operating system contained + in the blob. Currently, the only value is Generalized. Possible + values are Generalized and Specialized. + type: string + osType: + description: 'Specifies the type of operating system contained + in the virtual machine image. Possible values are: Windows + or Linux.' + type: string + sizeGb: + description: Specifies the size of the image to be created. + Changing this forces a new resource to be created. + type: number + type: object + sourceVirtualMachineId: + description: The Virtual Machine ID from which to create the image. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneResilient: + description: Is zone resiliency enabled? Defaults to false. Changing + this forces a new resource to be created. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: ImageStatus defines the observed state of Image. + properties: + atProvider: + properties: + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + blobUri: + description: Specifies the URI in Azure storage of the blob + that you want to use to create the image. + type: string + caching: + description: Specifies the caching mode as ReadWrite, ReadOnly, + or None. Defaults to None. + type: string + lun: + description: Specifies the logical unit number of the data + disk. + type: number + managedDiskId: + description: Specifies the ID of the managed disk resource + that you want to use to create the image. Changing this + forces a new resource to be created. + type: string + sizeGb: + description: Specifies the size of the image to be created. + The target size can't be smaller than the source size. + type: number + type: object + type: array + hyperVGeneration: + description: The HyperVGenerationType of the VirtualMachine created + from the image as V1, V2. Defaults to V1. Changing this forces + a new resource to be created. + type: string + id: + description: The ID of the Image. + type: string + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + osDisk: + description: One or more os_disk blocks as defined below. Changing + this forces a new resource to be created. + properties: + blobUri: + description: Specifies the URI in Azure storage of the blob + that you want to use to create the image. Changing this + forces a new resource to be created. + type: string + caching: + description: Specifies the caching mode as ReadWrite, ReadOnly, + or None. The default is None. + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this image. Changing this forces a new + resource to be created. + type: string + managedDiskId: + description: Specifies the ID of the managed disk resource + that you want to use to create the image. + type: string + osState: + description: Specifies the state of the operating system contained + in the blob. Currently, the only value is Generalized. Possible + values are Generalized and Specialized. + type: string + osType: + description: 'Specifies the type of operating system contained + in the virtual machine image. Possible values are: Windows + or Linux.' + type: string + sizeGb: + description: Specifies the size of the image to be created. + Changing this forces a new resource to be created. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which to create + the image. Changing this forces a new resource to be created. + type: string + sourceVirtualMachineId: + description: The Virtual Machine ID from which to create the image. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneResilient: + description: Is zone resiliency enabled? Defaults to false. Changing + this forces a new resource to be created. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_linuxvirtualmachines.yaml b/package/crds/compute.azure.upbound.io_linuxvirtualmachines.yaml index 9ea7d00ac..f167282b6 100644 --- a/package/crds/compute.azure.upbound.io_linuxvirtualmachines.yaml +++ b/package/crds/compute.azure.upbound.io_linuxvirtualmachines.yaml @@ -1934,3 +1934,1859 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinuxVirtualMachine is the Schema for the LinuxVirtualMachines + API. Manages a Linux Virtual Machine. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinuxVirtualMachineSpec defines the desired state of LinuxVirtualMachine + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalCapabilities: + description: A additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine? Defaults to false. + type: boolean + type: object + adminPasswordSecretRef: + description: The Password which should be used for the local-administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + adminSshKey: + description: One or more admin_ssh_key blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + publicKey: + description: The Public Key which should be used for authentication, + which needs to be at least 2048-bit and in ssh-rsa format. + Changing this forces a new resource to be created. + type: string + username: + description: The Username for which this Public SSH Key + should be configured. Changing this forces a new resource + to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator used for + the Virtual Machine. Changing this forces a new resource to + be created. + type: string + allowExtensionOperations: + description: Should Extension Operations be allowed on this Virtual + Machine? Defaults to true. + type: boolean + availabilitySetId: + description: Specifies the ID of the Availability Set in which + the Virtual Machine should exist. Changing this forces a new + resource to be created. + type: string + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + bypassPlatformSafetyChecksOnUserScheduleEnabled: + description: Specifies whether to skip platform scheduled patching + when a user schedule is associated with the VM. Defaults to + false. + type: boolean + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine should be allocated to. + type: string + computerName: + description: Specifies the Hostname which should be used for this + Virtual Machine. If unspecified this defaults to the value for + the name field. If the value of the name field is not a valid + computer_name, then you must specify computer_name. Changing + this forces a new resource to be created. + type: string + customDataSecretRef: + description: The Base64-Encoded Custom Data which should be used + for this Virtual Machine. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dedicatedHostGroupId: + description: The ID of a Dedicated Host Group that this Linux + Virtual Machine should be run within. Conflicts with dedicated_host_id. + type: string + dedicatedHostId: + description: The ID of a Dedicated Host where this machine should + be run on. Conflicts with dedicated_host_group_id. + type: string + disablePasswordAuthentication: + description: Should Password Authentication be disabled on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + diskControllerType: + description: Specifies the Disk Controller Type used for this + Virtual Machine. Possible values are SCSI and NVMe. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Linux Virtual Machine should exist. Changing this forces + a new Linux Virtual Machine to be created. + type: string + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies what should happen when the Virtual Machine + is evicted for price reasons when using a Spot instance. Possible + values are Deallocate and Delete. Changing this forces a new + resource to be created. + type: string + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + automaticUpgradeEnabled: + description: Specifies whether the version will be automatically + updated for the VM when a new Gallery Application version + is available in PIR/SIG. Defaults to false. + type: boolean + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. + type: string + treatFailureAsDeploymentFailureEnabled: + description: Specifies whether any failure for any operation + in the VmApplication will fail the deployment of the VM. + Defaults to false. + type: boolean + versionId: + description: Specifies the Gallery Application Version resource + ID. + type: string + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Linux Virtual Machine. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Virtual Machine. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + licenseType: + description: Specifies the BYOL Type for this Virtual Machine. + Possible values are RHEL_BYOS and SLES_BYOS. + type: string + location: + description: The Azure location where the Linux Virtual Machine + should exist. Changing this forces a new resource to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for this + Virtual Machine, in US Dollars; which must be greater than the + current spot price. If this bid price falls below the current + spot price the Virtual Machine will be evicted using the eviction_policy. + Defaults to -1, which means that the Virtual Machine should + not be evicted for price reasons. + type: number + networkInterfaceIds: + description: . A list of Network Interface IDs which should be + attached to this Virtual Machine. The first Network Interface + ID in this list will be the Primary Network Interface on the + Virtual Machine. + items: + type: string + type: array + networkInterfaceIdsRefs: + description: References to NetworkInterface in network to populate + networkInterfaceIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + networkInterfaceIdsSelector: + description: Selector for a list of NetworkInterface in network + to populate networkInterfaceIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + osDisk: + description: A os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine is sourced from. + type: number + name: + description: The name which should be used for the Internal + OS Disk. Changing this forces a new resource to be created. + type: string + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk when the Virtual Machine + is a Confidential VM. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine is a + Confidential VM. Possible values are VMGuestStateOnly and + DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values are Standard_LRS, + StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + osImageNotification: + description: A os_image_notification block as defined below. + properties: + timeout: + description: Length of time a notification to be sent to the + VM on the instance metadata server till the VM gets OS upgraded. + The only possible value is PT15M. Defaults to PT15M. + type: string + type: object + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for the Virtual + Machine. Possible values are AutomaticByPlatform or ImageDefault. + Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching to this Linux + Virtual Machine. Possible values are AutomaticByPlatform and + ImageDefault. Defaults to ImageDefault. For more information + on patch modes please see the product documentation. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the Name of the Marketplace Image this + Virtual Machine should be created from. Changing this forces + a new resource to be created. + type: string + product: + description: Specifies the Product of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + publisher: + description: Specifies the Publisher of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + type: object + platformFaultDomain: + description: Specifies the Platform Fault Domain in which this + Linux Virtual Machine should be created. Defaults to -1, which + means this will be automatically assigned to a fault domain + that best maintains balance across the available fault domains. + Changing this forces a new Linux Virtual Machine to be created. + type: number + priority: + description: Specifies the priority of this Virtual Machine. Possible + values are Regular and Spot. Defaults to Regular. Changing this + forces a new resource to be created. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. + type: string + rebootSetting: + description: Specifies the reboot setting for platform scheduled + patching. Possible values are Always, IfRequired and Never. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Linux + Virtual Machine should be exist. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies whether secure boot should be enabled on + the virtual machine. Changing this forces a new resource to + be created. + type: boolean + size: + description: The SKU which should be used for this Virtual Machine, + such as Standard_F2. + type: string + sourceImageId: + description: The ID of the Image which this Virtual Machine should + be created from. Changing this forces a new resource to be created. + Possible Image ID types include Image IDs, Shared Image IDs, + Shared Image Version IDs, Community Gallery Image IDs, Community + Gallery Image Version IDs, Shared Gallery Image IDs and Shared + Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + Changing this forces a new resource to be created. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine. + type: string + virtualMachineScaleSetId: + description: Specifies the Orchestrated Virtual Machine Scale + Set that this Virtual Machine should be created within. + type: string + vmAgentPlatformUpdatesEnabled: + description: Specifies whether VMAgent Platform Updates is enabled. + Defaults to false. + type: boolean + vtpmEnabled: + description: Specifies whether vTPM should be enabled on the virtual + machine. Changing this forces a new resource to be created. + type: boolean + zone: + description: Specifies the Availability Zones in which this Linux + Virtual Machine should be located. Changing this forces a new + Linux Virtual Machine to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalCapabilities: + description: A additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine? Defaults to false. + type: boolean + type: object + adminSshKey: + description: One or more admin_ssh_key blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + publicKey: + description: The Public Key which should be used for authentication, + which needs to be at least 2048-bit and in ssh-rsa format. + Changing this forces a new resource to be created. + type: string + username: + description: The Username for which this Public SSH Key + should be configured. Changing this forces a new resource + to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator used for + the Virtual Machine. Changing this forces a new resource to + be created. + type: string + allowExtensionOperations: + description: Should Extension Operations be allowed on this Virtual + Machine? Defaults to true. + type: boolean + availabilitySetId: + description: Specifies the ID of the Availability Set in which + the Virtual Machine should exist. Changing this forces a new + resource to be created. + type: string + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + bypassPlatformSafetyChecksOnUserScheduleEnabled: + description: Specifies whether to skip platform scheduled patching + when a user schedule is associated with the VM. Defaults to + false. + type: boolean + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine should be allocated to. + type: string + computerName: + description: Specifies the Hostname which should be used for this + Virtual Machine. If unspecified this defaults to the value for + the name field. If the value of the name field is not a valid + computer_name, then you must specify computer_name. Changing + this forces a new resource to be created. + type: string + dedicatedHostGroupId: + description: The ID of a Dedicated Host Group that this Linux + Virtual Machine should be run within. Conflicts with dedicated_host_id. + type: string + dedicatedHostId: + description: The ID of a Dedicated Host where this machine should + be run on. Conflicts with dedicated_host_group_id. + type: string + disablePasswordAuthentication: + description: Should Password Authentication be disabled on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + diskControllerType: + description: Specifies the Disk Controller Type used for this + Virtual Machine. Possible values are SCSI and NVMe. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Linux Virtual Machine should exist. Changing this forces + a new Linux Virtual Machine to be created. + type: string + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies what should happen when the Virtual Machine + is evicted for price reasons when using a Spot instance. Possible + values are Deallocate and Delete. Changing this forces a new + resource to be created. + type: string + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + automaticUpgradeEnabled: + description: Specifies whether the version will be automatically + updated for the VM when a new Gallery Application version + is available in PIR/SIG. Defaults to false. + type: boolean + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. + type: string + treatFailureAsDeploymentFailureEnabled: + description: Specifies whether any failure for any operation + in the VmApplication will fail the deployment of the VM. + Defaults to false. + type: boolean + versionId: + description: Specifies the Gallery Application Version resource + ID. + type: string + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Linux Virtual Machine. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Virtual Machine. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + licenseType: + description: Specifies the BYOL Type for this Virtual Machine. + Possible values are RHEL_BYOS and SLES_BYOS. + type: string + location: + description: The Azure location where the Linux Virtual Machine + should exist. Changing this forces a new resource to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for this + Virtual Machine, in US Dollars; which must be greater than the + current spot price. If this bid price falls below the current + spot price the Virtual Machine will be evicted using the eviction_policy. + Defaults to -1, which means that the Virtual Machine should + not be evicted for price reasons. + type: number + networkInterfaceIds: + description: . A list of Network Interface IDs which should be + attached to this Virtual Machine. The first Network Interface + ID in this list will be the Primary Network Interface on the + Virtual Machine. + items: + type: string + type: array + networkInterfaceIdsRefs: + description: References to NetworkInterface in network to populate + networkInterfaceIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + networkInterfaceIdsSelector: + description: Selector for a list of NetworkInterface in network + to populate networkInterfaceIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + osDisk: + description: A os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine is sourced from. + type: number + name: + description: The name which should be used for the Internal + OS Disk. Changing this forces a new resource to be created. + type: string + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk when the Virtual Machine + is a Confidential VM. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine is a + Confidential VM. Possible values are VMGuestStateOnly and + DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values are Standard_LRS, + StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + osImageNotification: + description: A os_image_notification block as defined below. + properties: + timeout: + description: Length of time a notification to be sent to the + VM on the instance metadata server till the VM gets OS upgraded. + The only possible value is PT15M. Defaults to PT15M. + type: string + type: object + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for the Virtual + Machine. Possible values are AutomaticByPlatform or ImageDefault. + Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching to this Linux + Virtual Machine. Possible values are AutomaticByPlatform and + ImageDefault. Defaults to ImageDefault. For more information + on patch modes please see the product documentation. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the Name of the Marketplace Image this + Virtual Machine should be created from. Changing this forces + a new resource to be created. + type: string + product: + description: Specifies the Product of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + publisher: + description: Specifies the Publisher of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + type: object + platformFaultDomain: + description: Specifies the Platform Fault Domain in which this + Linux Virtual Machine should be created. Defaults to -1, which + means this will be automatically assigned to a fault domain + that best maintains balance across the available fault domains. + Changing this forces a new Linux Virtual Machine to be created. + type: number + priority: + description: Specifies the priority of this Virtual Machine. Possible + values are Regular and Spot. Defaults to Regular. Changing this + forces a new resource to be created. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. + type: string + rebootSetting: + description: Specifies the reboot setting for platform scheduled + patching. Possible values are Always, IfRequired and Never. + type: string + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies whether secure boot should be enabled on + the virtual machine. Changing this forces a new resource to + be created. + type: boolean + size: + description: The SKU which should be used for this Virtual Machine, + such as Standard_F2. + type: string + sourceImageId: + description: The ID of the Image which this Virtual Machine should + be created from. Changing this forces a new resource to be created. + Possible Image ID types include Image IDs, Shared Image IDs, + Shared Image Version IDs, Community Gallery Image IDs, Community + Gallery Image Version IDs, Shared Gallery Image IDs and Shared + Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + Changing this forces a new resource to be created. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine. + type: string + virtualMachineScaleSetId: + description: Specifies the Orchestrated Virtual Machine Scale + Set that this Virtual Machine should be created within. + type: string + vmAgentPlatformUpdatesEnabled: + description: Specifies whether VMAgent Platform Updates is enabled. + Defaults to false. + type: boolean + vtpmEnabled: + description: Specifies whether vTPM should be enabled on the virtual + machine. Changing this forces a new resource to be created. + type: boolean + zone: + description: Specifies the Availability Zones in which this Linux + Virtual Machine should be located. Changing this forces a new + Linux Virtual Machine to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.adminUsername is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.adminUsername) + || (has(self.initProvider) && has(self.initProvider.adminUsername))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.osDisk is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.osDisk) + || (has(self.initProvider) && has(self.initProvider.osDisk))' + - message: spec.forProvider.size is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.size) + || (has(self.initProvider) && has(self.initProvider.size))' + status: + description: LinuxVirtualMachineStatus defines the observed state of LinuxVirtualMachine. + properties: + atProvider: + properties: + additionalCapabilities: + description: A additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine? Defaults to false. + type: boolean + type: object + adminSshKey: + description: One or more admin_ssh_key blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + publicKey: + description: The Public Key which should be used for authentication, + which needs to be at least 2048-bit and in ssh-rsa format. + Changing this forces a new resource to be created. + type: string + username: + description: The Username for which this Public SSH Key + should be configured. Changing this forces a new resource + to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator used for + the Virtual Machine. Changing this forces a new resource to + be created. + type: string + allowExtensionOperations: + description: Should Extension Operations be allowed on this Virtual + Machine? Defaults to true. + type: boolean + availabilitySetId: + description: Specifies the ID of the Availability Set in which + the Virtual Machine should exist. Changing this forces a new + resource to be created. + type: string + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + bypassPlatformSafetyChecksOnUserScheduleEnabled: + description: Specifies whether to skip platform scheduled patching + when a user schedule is associated with the VM. Defaults to + false. + type: boolean + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine should be allocated to. + type: string + computerName: + description: Specifies the Hostname which should be used for this + Virtual Machine. If unspecified this defaults to the value for + the name field. If the value of the name field is not a valid + computer_name, then you must specify computer_name. Changing + this forces a new resource to be created. + type: string + dedicatedHostGroupId: + description: The ID of a Dedicated Host Group that this Linux + Virtual Machine should be run within. Conflicts with dedicated_host_id. + type: string + dedicatedHostId: + description: The ID of a Dedicated Host where this machine should + be run on. Conflicts with dedicated_host_group_id. + type: string + disablePasswordAuthentication: + description: Should Password Authentication be disabled on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + diskControllerType: + description: Specifies the Disk Controller Type used for this + Virtual Machine. Possible values are SCSI and NVMe. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Linux Virtual Machine should exist. Changing this forces + a new Linux Virtual Machine to be created. + type: string + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies what should happen when the Virtual Machine + is evicted for price reasons when using a Spot instance. Possible + values are Deallocate and Delete. Changing this forces a new + resource to be created. + type: string + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + automaticUpgradeEnabled: + description: Specifies whether the version will be automatically + updated for the VM when a new Gallery Application version + is available in PIR/SIG. Defaults to false. + type: boolean + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. + type: string + treatFailureAsDeploymentFailureEnabled: + description: Specifies whether any failure for any operation + in the VmApplication will fail the deployment of the VM. + Defaults to false. + type: boolean + versionId: + description: Specifies the Gallery Application Version resource + ID. + type: string + type: object + type: array + id: + description: The ID of the Linux Virtual Machine. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Linux Virtual Machine. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Virtual Machine. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + licenseType: + description: Specifies the BYOL Type for this Virtual Machine. + Possible values are RHEL_BYOS and SLES_BYOS. + type: string + location: + description: The Azure location where the Linux Virtual Machine + should exist. Changing this forces a new resource to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for this + Virtual Machine, in US Dollars; which must be greater than the + current spot price. If this bid price falls below the current + spot price the Virtual Machine will be evicted using the eviction_policy. + Defaults to -1, which means that the Virtual Machine should + not be evicted for price reasons. + type: number + networkInterfaceIds: + description: . A list of Network Interface IDs which should be + attached to this Virtual Machine. The first Network Interface + ID in this list will be the Primary Network Interface on the + Virtual Machine. + items: + type: string + type: array + osDisk: + description: A os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine is sourced from. + type: number + name: + description: The name which should be used for the Internal + OS Disk. Changing this forces a new resource to be created. + type: string + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk when the Virtual Machine + is a Confidential VM. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine is a + Confidential VM. Possible values are VMGuestStateOnly and + DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values are Standard_LRS, + StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + osImageNotification: + description: A os_image_notification block as defined below. + properties: + timeout: + description: Length of time a notification to be sent to the + VM on the instance metadata server till the VM gets OS upgraded. + The only possible value is PT15M. Defaults to PT15M. + type: string + type: object + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for the Virtual + Machine. Possible values are AutomaticByPlatform or ImageDefault. + Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching to this Linux + Virtual Machine. Possible values are AutomaticByPlatform and + ImageDefault. Defaults to ImageDefault. For more information + on patch modes please see the product documentation. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the Name of the Marketplace Image this + Virtual Machine should be created from. Changing this forces + a new resource to be created. + type: string + product: + description: Specifies the Product of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + publisher: + description: Specifies the Publisher of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + type: object + platformFaultDomain: + description: Specifies the Platform Fault Domain in which this + Linux Virtual Machine should be created. Defaults to -1, which + means this will be automatically assigned to a fault domain + that best maintains balance across the available fault domains. + Changing this forces a new Linux Virtual Machine to be created. + type: number + priority: + description: Specifies the priority of this Virtual Machine. Possible + values are Regular and Spot. Defaults to Regular. Changing this + forces a new resource to be created. + type: string + privateIpAddress: + description: The Primary Private IP Address assigned to this Virtual + Machine. + type: string + privateIpAddresses: + description: A list of Private IP Addresses assigned to this Virtual + Machine. + items: + type: string + type: array + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. + type: string + publicIpAddress: + description: The Primary Public IP Address assigned to this Virtual + Machine. + type: string + publicIpAddresses: + description: A list of the Public IP Addresses assigned to this + Virtual Machine. + items: + type: string + type: array + rebootSetting: + description: Specifies the reboot setting for platform scheduled + patching. Possible values are Always, IfRequired and Never. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Linux + Virtual Machine should be exist. Changing this forces a new + resource to be created. + type: string + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies whether secure boot should be enabled on + the virtual machine. Changing this forces a new resource to + be created. + type: boolean + size: + description: The SKU which should be used for this Virtual Machine, + such as Standard_F2. + type: string + sourceImageId: + description: The ID of the Image which this Virtual Machine should + be created from. Changing this forces a new resource to be created. + Possible Image ID types include Image IDs, Shared Image IDs, + Shared Image Version IDs, Community Gallery Image IDs, Community + Gallery Image Version IDs, Shared Gallery Image IDs and Shared + Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + Changing this forces a new resource to be created. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine. + type: string + virtualMachineId: + description: A 128-bit identifier which uniquely identifies this + Virtual Machine. + type: string + virtualMachineScaleSetId: + description: Specifies the Orchestrated Virtual Machine Scale + Set that this Virtual Machine should be created within. + type: string + vmAgentPlatformUpdatesEnabled: + description: Specifies whether VMAgent Platform Updates is enabled. + Defaults to false. + type: boolean + vtpmEnabled: + description: Specifies whether vTPM should be enabled on the virtual + machine. Changing this forces a new resource to be created. + type: boolean + zone: + description: Specifies the Availability Zones in which this Linux + Virtual Machine should be located. Changing this forces a new + Linux Virtual Machine to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_linuxvirtualmachinescalesets.yaml b/package/crds/compute.azure.upbound.io_linuxvirtualmachinescalesets.yaml index dc68b04a9..ba5debfe0 100644 --- a/package/crds/compute.azure.upbound.io_linuxvirtualmachinescalesets.yaml +++ b/package/crds/compute.azure.upbound.io_linuxvirtualmachinescalesets.yaml @@ -3061,3 +3061,2932 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinuxVirtualMachineScaleSet is the Schema for the LinuxVirtualMachineScaleSets + API. Manages a Linux Virtual Machine Scale Set. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinuxVirtualMachineScaleSetSpec defines the desired state + of LinuxVirtualMachineScaleSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + type: object + adminPasswordSecretRef: + description: The Password which should be used for the local-administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + adminSshKey: + description: One or more admin_ssh_key blocks as defined below. + items: + properties: + publicKey: + description: The Public Key which should be used for authentication, + which needs to be at least 2048-bit and in ssh-rsa format. + type: string + username: + description: The Username for which this Public SSH Key + should be configured. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on each Virtual + Machine Scale Set instance. Changing this forces a new resource + to be created. + type: string + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + To enable the automatic instance repair, this Virtual Machine + Scale Set must have a valid health_probe_id or an Application + Health Extension. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? + type: boolean + gracePeriod: + description: Amount of time (in minutes, between 30 and 90) + for which automatic repairs will be delayed. The grace period + starts right after the VM is found unhealthy. The time duration + should be specified in ISO 8601 format. Defaults to PT30M. + type: string + type: object + automaticOsUpgradePolicy: + description: An automatic_os_upgrade_policy block as defined below. + This can only be specified when upgrade_mode is set to either + Automatic or Rolling. + properties: + disableAutomaticRollback: + description: Should automatic rollbacks be disabled? + type: boolean + enableAutomaticOsUpgrade: + description: Should OS Upgrades automatically be applied to + Scale Set instances in a rolling fashion when a newer version + of the OS Image becomes available? + type: boolean + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name of the + Virtual Machines in this Scale Set. If unspecified this defaults + to the value for the name field. If the value of the name field + is not a valid computer_name_prefix, then you must specify computer_name_prefix. + Changing this forces a new resource to be created. + type: string + customDataSecretRef: + description: The Base64-Encoded Custom Data which should be used + for this Virtual Machine Scale Set. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. + type: number + name: + description: The name of the Data Disk. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + type: array + disablePasswordAuthentication: + description: Should Password Authentication be disabled on this + Virtual Machine Scale Set? Defaults to true. + type: boolean + doNotRunExtensionsOnOverprovisionedMachines: + description: Should Virtual Machine Extensions be run on Overprovisioned + Virtual Machines in the Scale Set? Defaults to false. + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Linux Virtual Machine Scale Set should exist. Changing + this forces a new Linux Virtual Machine Scale Set to be created. + type: string + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies the eviction policy for Virtual Machines + in this Scale Set. Possible values are Deallocate and Delete. + Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersion: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated + whenever the Publisher releases a new version of this + VM Extension? + type: boolean + forceUpdateTag: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + protectedSettingsSecretRef: + description: A JSON String which specifies Sensitive Settings + (such as Passwords) for the Extension. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + provisionAfterExtensions: + description: An ordered list of Extension names which this + should be provisioned after. + items: + type: string + type: array + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Linux Virtual Machine Scale + Set to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + Changing this forces a new resource to be created. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. Changing + this forces a new resource to be created. + type: string + versionId: + description: Specifies the Gallery Application Version resource + ID. Changing this forces a new resource to be created. + type: string + type: object + type: array + galleryApplications: + items: + properties: + configurationReferenceBlobUri: + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + packageReferenceId: + description: The ID of the Linux Virtual Machine Scale Set. + type: string + tag: + description: The IP Tag associated with the Public IP, such + as SQL or Storage. Changing this forces a new resource + to be created. + type: string + type: object + type: array + healthProbeId: + description: The ID of a Load Balancer Probe which should be used + to determine the health of an instance. This is Required and + can only be specified when upgrade_mode is set to Automatic + or Rolling. + type: string + hostGroupId: + description: Specifies the ID of the dedicated host group that + the virtual machine scale set resides in. Changing this forces + a new resource to be created. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Linux Virtual Machine Scale Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Virtual Machine + Scale Set. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). + type: string + type: object + instances: + description: The number of Virtual Machines in the Scale Set. + Defaults to 0. + type: number + location: + description: The Azure location where the Linux Virtual Machine + Scale Set should exist. Changing this forces a new resource + to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in this Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Defaults to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + ID's which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Load Balancer which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerInboundNatRulesIds: + description: A list of NAT Rule ID's from a Load Balancer + which this Virtual Machine Scale Set should be connected + to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt the OS Disk when the Virtual Machine + Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine Scale + Set is Confidential VMSS. Possible values are VMGuestStateOnly + and DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + overprovision: + description: Should Azure over-provision Virtual Machines in this + Scale Set? This means that multiple Virtual Machines will be + provisioned and Azure will keep the instances which become available + first - which improves provisioning success rates and improves + deployment time. You're not billed for these over-provisioned + VM's and they don't count towards the Subscription Quota. Defaults + to true. + type: boolean + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Linux Virtual Machine Scale Set. Changing this forces + a new resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on each + Virtual Machine in the Scale Set? Defaults to true. Changing + this value forces a new resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group in which + the Virtual Machine Scale Set should be assigned to. Changing + this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Linux + Virtual Machine Scale Set should be exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rollingUpgradePolicy: + description: A rolling_upgrade_policy block as defined below. + This is Required and can only be specified when upgrade_mode + is set to Automatic or Rolling. Changing this forces a new resource + to be created. + properties: + crossZoneUpgradesEnabled: + description: Should the Virtual Machine Scale Set ignore the + Azure Zone boundaries when constructing upgrade batches? + Possible values are true or false. + type: boolean + maxBatchInstancePercent: + description: The maximum percent of total virtual machine + instances that will be upgraded simultaneously by the rolling + upgrade in one batch. As this is a maximum, unhealthy instances + in previous or future batches can cause the percentage of + instances in a batch to decrease to ensure higher reliability. + type: number + maxUnhealthyInstancePercent: + description: The maximum percentage of the total virtual machine + instances in the scale set that can be simultaneously unhealthy, + either as a result of being upgraded, or by being found + in an unhealthy state by the virtual machine health checks + before the rolling upgrade aborts. This constraint will + be checked prior to starting any batch. + type: number + maxUnhealthyUpgradedInstancePercent: + description: The maximum percentage of upgraded virtual machine + instances that can be found to be in an unhealthy state. + This check will happen after each batch is upgraded. If + this percentage is ever exceeded, the rolling update aborts. + type: number + pauseTimeBetweenBatches: + description: The wait time between completing the update for + all virtual machines in one batch and starting the next + batch. The time duration should be specified in ISO 8601 + format. + type: string + prioritizeUnhealthyInstancesEnabled: + description: Upgrade all unhealthy instances in a scale set + before any healthy instances. Possible values are true or + false. + type: boolean + type: object + scaleIn: + description: A scale_in block as defined below. + properties: + forceDeletionEnabled: + description: Should the virtual machines chosen for removal + be force deleted when the virtual machine scale set is being + scaled-in? Possible values are true or false. Defaults to + false. + type: boolean + rule: + description: The scale-in policy rule that decides which virtual + machines are chosen for removal when a Virtual Machine Scale + Set is scaled in. Possible values for the scale-in policy + rules are Default, NewestVM and OldestVM, defaults to Default. + For more information about scale in policy, please refer + to this doc. + type: string + type: object + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies whether secure boot should be enabled on + the virtual machine. Changing this forces a new resource to + be created. + type: boolean + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Defaults to true. + type: boolean + sku: + description: The Virtual Machine SKU for the Scale Set, such as + Standard_F2. + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image ID, Shared Image ID, Shared Image Version ID, Community + Gallery Image ID, Community Gallery Image Version ID, Shared + Gallery Image ID and Shared Gallery Image Version ID. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + spotRestore: + description: A spot_restore block as defined below. + properties: + enabled: + description: Should the Spot-Try-Restore feature be enabled? + The Spot-Try-Restore feature will attempt to automatically + restore the evicted Spot Virtual Machine Scale Set VM instances + opportunistically based on capacity availability and pricing + constraints. Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + timeout: + description: The length of time that the Virtual Machine Scale + Set should attempt to restore the Spot VM instances which + have been evicted. The time duration should be between 15 + minutes and 120 minutes (inclusive). The time duration should + be specified in the ISO 8601 format. Defaults to PT1H. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminateNotification: + description: A terminate_notification block as defined below. + properties: + enabled: + description: Should the terminate notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + upgradeMode: + description: Specifies how Upgrades (e.g. changing the Image/SKU) + should be performed to Virtual Machine Instances. Possible values + are Automatic, Manual and Rolling. Defaults to Manual. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine Scale Set. + type: string + vtpmEnabled: + description: Specifies whether vTPM should be enabled on the virtual + machine. Changing this forces a new resource to be created. + type: boolean + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones in which this + Linux Virtual Machine Scale Set should be located. Changing + this forces a new Linux Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + type: object + adminSshKey: + description: One or more admin_ssh_key blocks as defined below. + items: + properties: + publicKey: + description: The Public Key which should be used for authentication, + which needs to be at least 2048-bit and in ssh-rsa format. + type: string + username: + description: The Username for which this Public SSH Key + should be configured. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on each Virtual + Machine Scale Set instance. Changing this forces a new resource + to be created. + type: string + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + To enable the automatic instance repair, this Virtual Machine + Scale Set must have a valid health_probe_id or an Application + Health Extension. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? + type: boolean + gracePeriod: + description: Amount of time (in minutes, between 30 and 90) + for which automatic repairs will be delayed. The grace period + starts right after the VM is found unhealthy. The time duration + should be specified in ISO 8601 format. Defaults to PT30M. + type: string + type: object + automaticOsUpgradePolicy: + description: An automatic_os_upgrade_policy block as defined below. + This can only be specified when upgrade_mode is set to either + Automatic or Rolling. + properties: + disableAutomaticRollback: + description: Should automatic rollbacks be disabled? + type: boolean + enableAutomaticOsUpgrade: + description: Should OS Upgrades automatically be applied to + Scale Set instances in a rolling fashion when a newer version + of the OS Image becomes available? + type: boolean + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name of the + Virtual Machines in this Scale Set. If unspecified this defaults + to the value for the name field. If the value of the name field + is not a valid computer_name_prefix, then you must specify computer_name_prefix. + Changing this forces a new resource to be created. + type: string + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. + type: number + name: + description: The name of the Data Disk. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + type: array + disablePasswordAuthentication: + description: Should Password Authentication be disabled on this + Virtual Machine Scale Set? Defaults to true. + type: boolean + doNotRunExtensionsOnOverprovisionedMachines: + description: Should Virtual Machine Extensions be run on Overprovisioned + Virtual Machines in the Scale Set? Defaults to false. + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Linux Virtual Machine Scale Set should exist. Changing + this forces a new Linux Virtual Machine Scale Set to be created. + type: string + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies the eviction policy for Virtual Machines + in this Scale Set. Possible values are Deallocate and Delete. + Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersion: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated + whenever the Publisher releases a new version of this + VM Extension? + type: boolean + forceUpdateTag: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + provisionAfterExtensions: + description: An ordered list of Extension names which this + should be provisioned after. + items: + type: string + type: array + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Linux Virtual Machine Scale + Set to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + Changing this forces a new resource to be created. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. Changing + this forces a new resource to be created. + type: string + versionId: + description: Specifies the Gallery Application Version resource + ID. Changing this forces a new resource to be created. + type: string + type: object + type: array + galleryApplications: + items: + properties: + configurationReferenceBlobUri: + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + packageReferenceId: + description: The ID of the Linux Virtual Machine Scale Set. + type: string + tag: + description: The IP Tag associated with the Public IP, such + as SQL or Storage. Changing this forces a new resource + to be created. + type: string + type: object + type: array + healthProbeId: + description: The ID of a Load Balancer Probe which should be used + to determine the health of an instance. This is Required and + can only be specified when upgrade_mode is set to Automatic + or Rolling. + type: string + hostGroupId: + description: Specifies the ID of the dedicated host group that + the virtual machine scale set resides in. Changing this forces + a new resource to be created. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Linux Virtual Machine Scale Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Virtual Machine + Scale Set. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). + type: string + type: object + instances: + description: The number of Virtual Machines in the Scale Set. + Defaults to 0. + type: number + location: + description: The Azure location where the Linux Virtual Machine + Scale Set should exist. Changing this forces a new resource + to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in this Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Defaults to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + ID's which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Load Balancer which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerInboundNatRulesIds: + description: A list of NAT Rule ID's from a Load Balancer + which this Virtual Machine Scale Set should be connected + to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt the OS Disk when the Virtual Machine + Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine Scale + Set is Confidential VMSS. Possible values are VMGuestStateOnly + and DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + overprovision: + description: Should Azure over-provision Virtual Machines in this + Scale Set? This means that multiple Virtual Machines will be + provisioned and Azure will keep the instances which become available + first - which improves provisioning success rates and improves + deployment time. You're not billed for these over-provisioned + VM's and they don't count towards the Subscription Quota. Defaults + to true. + type: boolean + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Linux Virtual Machine Scale Set. Changing this forces + a new resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on each + Virtual Machine in the Scale Set? Defaults to true. Changing + this value forces a new resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group in which + the Virtual Machine Scale Set should be assigned to. Changing + this forces a new resource to be created. + type: string + rollingUpgradePolicy: + description: A rolling_upgrade_policy block as defined below. + This is Required and can only be specified when upgrade_mode + is set to Automatic or Rolling. Changing this forces a new resource + to be created. + properties: + crossZoneUpgradesEnabled: + description: Should the Virtual Machine Scale Set ignore the + Azure Zone boundaries when constructing upgrade batches? + Possible values are true or false. + type: boolean + maxBatchInstancePercent: + description: The maximum percent of total virtual machine + instances that will be upgraded simultaneously by the rolling + upgrade in one batch. As this is a maximum, unhealthy instances + in previous or future batches can cause the percentage of + instances in a batch to decrease to ensure higher reliability. + type: number + maxUnhealthyInstancePercent: + description: The maximum percentage of the total virtual machine + instances in the scale set that can be simultaneously unhealthy, + either as a result of being upgraded, or by being found + in an unhealthy state by the virtual machine health checks + before the rolling upgrade aborts. This constraint will + be checked prior to starting any batch. + type: number + maxUnhealthyUpgradedInstancePercent: + description: The maximum percentage of upgraded virtual machine + instances that can be found to be in an unhealthy state. + This check will happen after each batch is upgraded. If + this percentage is ever exceeded, the rolling update aborts. + type: number + pauseTimeBetweenBatches: + description: The wait time between completing the update for + all virtual machines in one batch and starting the next + batch. The time duration should be specified in ISO 8601 + format. + type: string + prioritizeUnhealthyInstancesEnabled: + description: Upgrade all unhealthy instances in a scale set + before any healthy instances. Possible values are true or + false. + type: boolean + type: object + scaleIn: + description: A scale_in block as defined below. + properties: + forceDeletionEnabled: + description: Should the virtual machines chosen for removal + be force deleted when the virtual machine scale set is being + scaled-in? Possible values are true or false. Defaults to + false. + type: boolean + rule: + description: The scale-in policy rule that decides which virtual + machines are chosen for removal when a Virtual Machine Scale + Set is scaled in. Possible values for the scale-in policy + rules are Default, NewestVM and OldestVM, defaults to Default. + For more information about scale in policy, please refer + to this doc. + type: string + type: object + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies whether secure boot should be enabled on + the virtual machine. Changing this forces a new resource to + be created. + type: boolean + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Defaults to true. + type: boolean + sku: + description: The Virtual Machine SKU for the Scale Set, such as + Standard_F2. + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image ID, Shared Image ID, Shared Image Version ID, Community + Gallery Image ID, Community Gallery Image Version ID, Shared + Gallery Image ID and Shared Gallery Image Version ID. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + spotRestore: + description: A spot_restore block as defined below. + properties: + enabled: + description: Should the Spot-Try-Restore feature be enabled? + The Spot-Try-Restore feature will attempt to automatically + restore the evicted Spot Virtual Machine Scale Set VM instances + opportunistically based on capacity availability and pricing + constraints. Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + timeout: + description: The length of time that the Virtual Machine Scale + Set should attempt to restore the Spot VM instances which + have been evicted. The time duration should be between 15 + minutes and 120 minutes (inclusive). The time duration should + be specified in the ISO 8601 format. Defaults to PT1H. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminateNotification: + description: A terminate_notification block as defined below. + properties: + enabled: + description: Should the terminate notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + upgradeMode: + description: Specifies how Upgrades (e.g. changing the Image/SKU) + should be performed to Virtual Machine Instances. Possible values + are Automatic, Manual and Rolling. Defaults to Manual. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine Scale Set. + type: string + vtpmEnabled: + description: Specifies whether vTPM should be enabled on the virtual + machine. Changing this forces a new resource to be created. + type: boolean + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones in which this + Linux Virtual Machine Scale Set should be located. Changing + this forces a new Linux Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.adminUsername is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.adminUsername) + || (has(self.initProvider) && has(self.initProvider.adminUsername))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.networkInterface is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.networkInterface) + || (has(self.initProvider) && has(self.initProvider.networkInterface))' + - message: spec.forProvider.osDisk is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.osDisk) + || (has(self.initProvider) && has(self.initProvider.osDisk))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: LinuxVirtualMachineScaleSetStatus defines the observed state + of LinuxVirtualMachineScaleSet. + properties: + atProvider: + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + type: object + adminSshKey: + description: One or more admin_ssh_key blocks as defined below. + items: + properties: + publicKey: + description: The Public Key which should be used for authentication, + which needs to be at least 2048-bit and in ssh-rsa format. + type: string + username: + description: The Username for which this Public SSH Key + should be configured. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on each Virtual + Machine Scale Set instance. Changing this forces a new resource + to be created. + type: string + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + To enable the automatic instance repair, this Virtual Machine + Scale Set must have a valid health_probe_id or an Application + Health Extension. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? + type: boolean + gracePeriod: + description: Amount of time (in minutes, between 30 and 90) + for which automatic repairs will be delayed. The grace period + starts right after the VM is found unhealthy. The time duration + should be specified in ISO 8601 format. Defaults to PT30M. + type: string + type: object + automaticOsUpgradePolicy: + description: An automatic_os_upgrade_policy block as defined below. + This can only be specified when upgrade_mode is set to either + Automatic or Rolling. + properties: + disableAutomaticRollback: + description: Should automatic rollbacks be disabled? + type: boolean + enableAutomaticOsUpgrade: + description: Should OS Upgrades automatically be applied to + Scale Set instances in a rolling fashion when a newer version + of the OS Image becomes available? + type: boolean + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name of the + Virtual Machines in this Scale Set. If unspecified this defaults + to the value for the name field. If the value of the name field + is not a valid computer_name_prefix, then you must specify computer_name_prefix. + Changing this forces a new resource to be created. + type: string + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. + type: number + name: + description: The name of the Data Disk. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + type: array + disablePasswordAuthentication: + description: Should Password Authentication be disabled on this + Virtual Machine Scale Set? Defaults to true. + type: boolean + doNotRunExtensionsOnOverprovisionedMachines: + description: Should Virtual Machine Extensions be run on Overprovisioned + Virtual Machines in the Scale Set? Defaults to false. + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Linux Virtual Machine Scale Set should exist. Changing + this forces a new Linux Virtual Machine Scale Set to be created. + type: string + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies the eviction policy for Virtual Machines + in this Scale Set. Possible values are Deallocate and Delete. + Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersion: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated + whenever the Publisher releases a new version of this + VM Extension? + type: boolean + forceUpdateTag: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + provisionAfterExtensions: + description: An ordered list of Extension names which this + should be provisioned after. + items: + type: string + type: array + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Linux Virtual Machine Scale + Set to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + Changing this forces a new resource to be created. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. Changing + this forces a new resource to be created. + type: string + versionId: + description: Specifies the Gallery Application Version resource + ID. Changing this forces a new resource to be created. + type: string + type: object + type: array + galleryApplications: + items: + properties: + configurationReferenceBlobUri: + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + packageReferenceId: + description: The ID of the Linux Virtual Machine Scale Set. + type: string + tag: + description: The IP Tag associated with the Public IP, such + as SQL or Storage. Changing this forces a new resource + to be created. + type: string + type: object + type: array + healthProbeId: + description: The ID of a Load Balancer Probe which should be used + to determine the health of an instance. This is Required and + can only be specified when upgrade_mode is set to Automatic + or Rolling. + type: string + hostGroupId: + description: Specifies the ID of the dedicated host group that + the virtual machine scale set resides in. Changing this forces + a new resource to be created. + type: string + id: + description: The ID of the Linux Virtual Machine Scale Set. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Linux Virtual Machine Scale Set. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Virtual Machine + Scale Set. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). + type: string + type: object + instances: + description: The number of Virtual Machines in the Scale Set. + Defaults to 0. + type: number + location: + description: The Azure location where the Linux Virtual Machine + Scale Set should exist. Changing this forces a new resource + to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in this Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Defaults to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + ID's which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Load Balancer which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerInboundNatRulesIds: + description: A list of NAT Rule ID's from a Load Balancer + which this Virtual Machine Scale Set should be connected + to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt the OS Disk when the Virtual Machine + Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine Scale + Set is Confidential VMSS. Possible values are VMGuestStateOnly + and DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + overprovision: + description: Should Azure over-provision Virtual Machines in this + Scale Set? This means that multiple Virtual Machines will be + provisioned and Azure will keep the instances which become available + first - which improves provisioning success rates and improves + deployment time. You're not billed for these over-provisioned + VM's and they don't count towards the Subscription Quota. Defaults + to true. + type: boolean + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Linux Virtual Machine Scale Set. Changing this forces + a new resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on each + Virtual Machine in the Scale Set? Defaults to true. Changing + this value forces a new resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group in which + the Virtual Machine Scale Set should be assigned to. Changing + this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Linux + Virtual Machine Scale Set should be exist. Changing this forces + a new resource to be created. + type: string + rollingUpgradePolicy: + description: A rolling_upgrade_policy block as defined below. + This is Required and can only be specified when upgrade_mode + is set to Automatic or Rolling. Changing this forces a new resource + to be created. + properties: + crossZoneUpgradesEnabled: + description: Should the Virtual Machine Scale Set ignore the + Azure Zone boundaries when constructing upgrade batches? + Possible values are true or false. + type: boolean + maxBatchInstancePercent: + description: The maximum percent of total virtual machine + instances that will be upgraded simultaneously by the rolling + upgrade in one batch. As this is a maximum, unhealthy instances + in previous or future batches can cause the percentage of + instances in a batch to decrease to ensure higher reliability. + type: number + maxUnhealthyInstancePercent: + description: The maximum percentage of the total virtual machine + instances in the scale set that can be simultaneously unhealthy, + either as a result of being upgraded, or by being found + in an unhealthy state by the virtual machine health checks + before the rolling upgrade aborts. This constraint will + be checked prior to starting any batch. + type: number + maxUnhealthyUpgradedInstancePercent: + description: The maximum percentage of upgraded virtual machine + instances that can be found to be in an unhealthy state. + This check will happen after each batch is upgraded. If + this percentage is ever exceeded, the rolling update aborts. + type: number + pauseTimeBetweenBatches: + description: The wait time between completing the update for + all virtual machines in one batch and starting the next + batch. The time duration should be specified in ISO 8601 + format. + type: string + prioritizeUnhealthyInstancesEnabled: + description: Upgrade all unhealthy instances in a scale set + before any healthy instances. Possible values are true or + false. + type: boolean + type: object + scaleIn: + description: A scale_in block as defined below. + properties: + forceDeletionEnabled: + description: Should the virtual machines chosen for removal + be force deleted when the virtual machine scale set is being + scaled-in? Possible values are true or false. Defaults to + false. + type: boolean + rule: + description: The scale-in policy rule that decides which virtual + machines are chosen for removal when a Virtual Machine Scale + Set is scaled in. Possible values for the scale-in policy + rules are Default, NewestVM and OldestVM, defaults to Default. + For more information about scale in policy, please refer + to this doc. + type: string + type: object + scaleInPolicy: + type: string + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies whether secure boot should be enabled on + the virtual machine. Changing this forces a new resource to + be created. + type: boolean + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Defaults to true. + type: boolean + sku: + description: The Virtual Machine SKU for the Scale Set, such as + Standard_F2. + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image ID, Shared Image ID, Shared Image Version ID, Community + Gallery Image ID, Community Gallery Image Version ID, Shared + Gallery Image ID and Shared Gallery Image Version ID. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + spotRestore: + description: A spot_restore block as defined below. + properties: + enabled: + description: Should the Spot-Try-Restore feature be enabled? + The Spot-Try-Restore feature will attempt to automatically + restore the evicted Spot Virtual Machine Scale Set VM instances + opportunistically based on capacity availability and pricing + constraints. Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + timeout: + description: The length of time that the Virtual Machine Scale + Set should attempt to restore the Spot VM instances which + have been evicted. The time duration should be between 15 + minutes and 120 minutes (inclusive). The time duration should + be specified in the ISO 8601 format. Defaults to PT1H. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminateNotification: + description: A terminate_notification block as defined below. + properties: + enabled: + description: Should the terminate notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + uniqueId: + description: The Unique ID for this Linux Virtual Machine Scale + Set. + type: string + upgradeMode: + description: Specifies how Upgrades (e.g. changing the Image/SKU) + should be performed to Virtual Machine Instances. Possible values + are Automatic, Manual and Rolling. Defaults to Manual. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine Scale Set. + type: string + vtpmEnabled: + description: Specifies whether vTPM should be enabled on the virtual + machine. Changing this forces a new resource to be created. + type: boolean + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones in which this + Linux Virtual Machine Scale Set should be located. Changing + this forces a new Linux Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_manageddisks.yaml b/package/crds/compute.azure.upbound.io_manageddisks.yaml index 96dd06b67..752390624 100644 --- a/package/crds/compute.azure.upbound.io_manageddisks.yaml +++ b/package/crds/compute.azure.upbound.io_manageddisks.yaml @@ -1215,3 +1215,1182 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ManagedDisk is the Schema for the ManagedDisks API. Manages a + Managed Disk. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ManagedDiskSpec defines the desired state of ManagedDisk + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + createOption: + description: 'The method to use when creating the managed disk. + Changing this forces a new resource to be created. Possible + values include: * Import - Import a VHD file in to the managed + disk (VHD specified with source_uri). * ImportSecure - Securely + import a VHD file in to the managed disk (VHD specified with + source_uri). * Empty - Create an empty managed disk. * Copy + - Copy an existing managed disk or snapshot (specified with + source_resource_id). * FromImage - Copy a Platform Image (specified + with image_reference_id) * Restore - Set by Azure Backup or + Site Recovery on a restored disk (specified with source_resource_id). + * Upload - Upload a VHD disk with the help of SAS URL (to be + used with upload_size_bytes).' + type: string + diskAccessId: + description: The ID of the disk access resource for using private + endpoints on disks. + type: string + diskEncryptionSetId: + description: The ID of a Disk Encryption Set which should be used + to encrypt this Managed Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskIopsReadOnly: + description: The number of IOPS allowed across all VMs mounting + the shared disk as read-only; only settable for UltraSSD disks + and PremiumV2 disks with shared disk enabled. One operation + can transfer between 4k and 256k bytes. + type: number + diskIopsReadWrite: + description: The number of IOPS allowed for this disk; only settable + for UltraSSD disks and PremiumV2 disks. One operation can transfer + between 4k and 256k bytes. + type: number + diskMbpsReadOnly: + description: The bandwidth allowed across all VMs mounting the + shared disk as read-only; only settable for UltraSSD disks and + PremiumV2 disks with shared disk enabled. MBps means millions + of bytes per second. + type: number + diskMbpsReadWrite: + description: The bandwidth allowed for this disk; only settable + for UltraSSD disks and PremiumV2 disks. MBps means millions + of bytes per second. + type: number + diskSizeGb: + description: Specifies the size of the managed disk to create + in gigabytes. If create_option is Copy or FromImage, then the + value must be equal to or greater than the source's size. The + size can only be increased. + type: number + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Managed Disk should exist. Changing this forces a new Managed + Disk to be created. + type: string + encryptionSettings: + description: A encryption_settings block as defined below. + properties: + diskEncryptionKey: + description: A disk_encryption_key block as defined above. + properties: + secretUrl: + description: The URL to the Key Vault Secret used as the + Disk Encryption Key. This can be found as id on the + azurerm_key_vault_secret resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + enabled: + type: boolean + keyEncryptionKey: + description: A key_encryption_key block as defined below. + properties: + keyUrl: + description: The URL to the Key Vault Key used as the + Key Encryption Key. This can be found as id on the azurerm_key_vault_key + resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + type: object + galleryImageReferenceId: + description: ID of a Gallery Image Version to copy when create_option + is FromImage. This field cannot be specified if image_reference_id + is specified. Changing this forces a new resource to be created. + type: string + hyperVGeneration: + description: The HyperV Generation of the Disk when the source + of an Import or Copy operation targets a source that contains + an operating system. Possible values are V1 and V2. For ImportSecure + it must be set to V2. Changing this forces a new resource to + be created. + type: string + imageReferenceId: + description: ID of an existing platform/marketplace disk image + to copy when create_option is FromImage. This field cannot be + specified if gallery_image_reference_id is specified. Changing + this forces a new resource to be created. + type: string + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logicalSectorSize: + description: 'Logical Sector Size. Possible values are: 512 and + 4096. Defaults to 4096. Changing this forces a new resource + to be created.' + type: number + maxShares: + description: The maximum number of VMs that can attach to the + disk at the same time. Value greater than one indicates a disk + that can be mounted on multiple VMs at the same time. + type: number + networkAccessPolicy: + description: Policy for accessing the disk via network. Allowed + values are AllowAll, AllowPrivate, and DenyAll. + type: string + onDemandBurstingEnabled: + description: Specifies if On-Demand Bursting is enabled for the + Managed Disk. + type: boolean + optimizedFrequentAttachEnabled: + description: Specifies whether this Managed Disk should be optimized + for frequent disk attachments (where a disk is attached/detached + more than 5 times in a day). Defaults to false. + type: boolean + osType: + description: Specify a value when the source of an Import, ImportSecure + or Copy operation targets a source that contains an operating + system. Valid values are Linux or Windows. + type: string + performancePlusEnabled: + description: Specifies whether Performance Plus is enabled for + this Managed Disk. Defaults to false. Changing this forces a + new resource to be created. + type: boolean + publicNetworkAccessEnabled: + description: Whether it is allowed to access the disk via public + network. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Managed + Disk should exist. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should be + used to Encrypt this OS Disk when the Virtual Machine is a Confidential + VM. Conflicts with disk_encryption_set_id. Changing this forces + a new resource to be created. + type: string + securityType: + description: Security Type of the Managed Disk when it is used + for a Confidential VM. Possible values are ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey, + ConfidentialVM_DiskEncryptedWithPlatformKey and ConfidentialVM_DiskEncryptedWithCustomerKey. + Changing this forces a new resource to be created. + type: string + sourceResourceId: + description: The ID of an existing Managed Disk or Snapshot to + copy when create_option is Copy or the recovery point to restore + when create_option is Restore. Changing this forces a new resource + to be created. + type: string + sourceResourceIdRef: + description: Reference to a ManagedDisk in compute to populate + sourceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceResourceIdSelector: + description: Selector for a ManagedDisk in compute to populate + sourceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceUri: + description: URI to a valid VHD file to be used when create_option + is Import or ImportSecure. Changing this forces a new resource + to be created. + type: string + storageAccountId: + description: The ID of the Storage Account where the source_uri + is located. Required when create_option is set to Import or + ImportSecure. Changing this forces a new resource to be created. + type: string + storageAccountType: + description: The type of storage to use for the managed disk. + Possible values are Standard_LRS, StandardSSD_ZRS, Premium_LRS, + PremiumV2_LRS, Premium_ZRS, StandardSSD_LRS or UltraSSD_LRS. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tier: + description: The disk performance tier to use. Possible values + are documented here. This feature is currently supported only + for premium SSDs. + type: string + trustedLaunchEnabled: + description: Specifies if Trusted Launch is enabled for the Managed + Disk. Changing this forces a new resource to be created. + type: boolean + uploadSizeBytes: + description: Specifies the size of the managed disk to create + in bytes. Required when create_option is Upload. The value must + be equal to the source disk to be copied in bytes. Source disk + size could be calculated with ls -l or wc -c. More information + can be found at Copy a managed disk. Changing this forces a + new resource to be created. + type: number + zone: + description: Specifies the Availability Zone in which this Managed + Disk should be located. Changing this property forces a new + resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + createOption: + description: 'The method to use when creating the managed disk. + Changing this forces a new resource to be created. Possible + values include: * Import - Import a VHD file in to the managed + disk (VHD specified with source_uri). * ImportSecure - Securely + import a VHD file in to the managed disk (VHD specified with + source_uri). * Empty - Create an empty managed disk. * Copy + - Copy an existing managed disk or snapshot (specified with + source_resource_id). * FromImage - Copy a Platform Image (specified + with image_reference_id) * Restore - Set by Azure Backup or + Site Recovery on a restored disk (specified with source_resource_id). + * Upload - Upload a VHD disk with the help of SAS URL (to be + used with upload_size_bytes).' + type: string + diskAccessId: + description: The ID of the disk access resource for using private + endpoints on disks. + type: string + diskEncryptionSetId: + description: The ID of a Disk Encryption Set which should be used + to encrypt this Managed Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskIopsReadOnly: + description: The number of IOPS allowed across all VMs mounting + the shared disk as read-only; only settable for UltraSSD disks + and PremiumV2 disks with shared disk enabled. One operation + can transfer between 4k and 256k bytes. + type: number + diskIopsReadWrite: + description: The number of IOPS allowed for this disk; only settable + for UltraSSD disks and PremiumV2 disks. One operation can transfer + between 4k and 256k bytes. + type: number + diskMbpsReadOnly: + description: The bandwidth allowed across all VMs mounting the + shared disk as read-only; only settable for UltraSSD disks and + PremiumV2 disks with shared disk enabled. MBps means millions + of bytes per second. + type: number + diskMbpsReadWrite: + description: The bandwidth allowed for this disk; only settable + for UltraSSD disks and PremiumV2 disks. MBps means millions + of bytes per second. + type: number + diskSizeGb: + description: Specifies the size of the managed disk to create + in gigabytes. If create_option is Copy or FromImage, then the + value must be equal to or greater than the source's size. The + size can only be increased. + type: number + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Managed Disk should exist. Changing this forces a new Managed + Disk to be created. + type: string + encryptionSettings: + description: A encryption_settings block as defined below. + properties: + diskEncryptionKey: + description: A disk_encryption_key block as defined above. + properties: + secretUrl: + description: The URL to the Key Vault Secret used as the + Disk Encryption Key. This can be found as id on the + azurerm_key_vault_secret resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + enabled: + type: boolean + keyEncryptionKey: + description: A key_encryption_key block as defined below. + properties: + keyUrl: + description: The URL to the Key Vault Key used as the + Key Encryption Key. This can be found as id on the azurerm_key_vault_key + resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + type: object + galleryImageReferenceId: + description: ID of a Gallery Image Version to copy when create_option + is FromImage. This field cannot be specified if image_reference_id + is specified. Changing this forces a new resource to be created. + type: string + hyperVGeneration: + description: The HyperV Generation of the Disk when the source + of an Import or Copy operation targets a source that contains + an operating system. Possible values are V1 and V2. For ImportSecure + it must be set to V2. Changing this forces a new resource to + be created. + type: string + imageReferenceId: + description: ID of an existing platform/marketplace disk image + to copy when create_option is FromImage. This field cannot be + specified if gallery_image_reference_id is specified. Changing + this forces a new resource to be created. + type: string + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logicalSectorSize: + description: 'Logical Sector Size. Possible values are: 512 and + 4096. Defaults to 4096. Changing this forces a new resource + to be created.' + type: number + maxShares: + description: The maximum number of VMs that can attach to the + disk at the same time. Value greater than one indicates a disk + that can be mounted on multiple VMs at the same time. + type: number + networkAccessPolicy: + description: Policy for accessing the disk via network. Allowed + values are AllowAll, AllowPrivate, and DenyAll. + type: string + onDemandBurstingEnabled: + description: Specifies if On-Demand Bursting is enabled for the + Managed Disk. + type: boolean + optimizedFrequentAttachEnabled: + description: Specifies whether this Managed Disk should be optimized + for frequent disk attachments (where a disk is attached/detached + more than 5 times in a day). Defaults to false. + type: boolean + osType: + description: Specify a value when the source of an Import, ImportSecure + or Copy operation targets a source that contains an operating + system. Valid values are Linux or Windows. + type: string + performancePlusEnabled: + description: Specifies whether Performance Plus is enabled for + this Managed Disk. Defaults to false. Changing this forces a + new resource to be created. + type: boolean + publicNetworkAccessEnabled: + description: Whether it is allowed to access the disk via public + network. Defaults to true. + type: boolean + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should be + used to Encrypt this OS Disk when the Virtual Machine is a Confidential + VM. Conflicts with disk_encryption_set_id. Changing this forces + a new resource to be created. + type: string + securityType: + description: Security Type of the Managed Disk when it is used + for a Confidential VM. Possible values are ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey, + ConfidentialVM_DiskEncryptedWithPlatformKey and ConfidentialVM_DiskEncryptedWithCustomerKey. + Changing this forces a new resource to be created. + type: string + sourceResourceId: + description: The ID of an existing Managed Disk or Snapshot to + copy when create_option is Copy or the recovery point to restore + when create_option is Restore. Changing this forces a new resource + to be created. + type: string + sourceResourceIdRef: + description: Reference to a ManagedDisk in compute to populate + sourceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceResourceIdSelector: + description: Selector for a ManagedDisk in compute to populate + sourceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceUri: + description: URI to a valid VHD file to be used when create_option + is Import or ImportSecure. Changing this forces a new resource + to be created. + type: string + storageAccountId: + description: The ID of the Storage Account where the source_uri + is located. Required when create_option is set to Import or + ImportSecure. Changing this forces a new resource to be created. + type: string + storageAccountType: + description: The type of storage to use for the managed disk. + Possible values are Standard_LRS, StandardSSD_ZRS, Premium_LRS, + PremiumV2_LRS, Premium_ZRS, StandardSSD_LRS or UltraSSD_LRS. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tier: + description: The disk performance tier to use. Possible values + are documented here. This feature is currently supported only + for premium SSDs. + type: string + trustedLaunchEnabled: + description: Specifies if Trusted Launch is enabled for the Managed + Disk. Changing this forces a new resource to be created. + type: boolean + uploadSizeBytes: + description: Specifies the size of the managed disk to create + in bytes. Required when create_option is Upload. The value must + be equal to the source disk to be copied in bytes. Source disk + size could be calculated with ls -l or wc -c. More information + can be found at Copy a managed disk. Changing this forces a + new resource to be created. + type: number + zone: + description: Specifies the Availability Zone in which this Managed + Disk should be located. Changing this property forces a new + resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.createOption is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.createOption) + || (has(self.initProvider) && has(self.initProvider.createOption))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.storageAccountType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageAccountType) + || (has(self.initProvider) && has(self.initProvider.storageAccountType))' + status: + description: ManagedDiskStatus defines the observed state of ManagedDisk. + properties: + atProvider: + properties: + createOption: + description: 'The method to use when creating the managed disk. + Changing this forces a new resource to be created. Possible + values include: * Import - Import a VHD file in to the managed + disk (VHD specified with source_uri). * ImportSecure - Securely + import a VHD file in to the managed disk (VHD specified with + source_uri). * Empty - Create an empty managed disk. * Copy + - Copy an existing managed disk or snapshot (specified with + source_resource_id). * FromImage - Copy a Platform Image (specified + with image_reference_id) * Restore - Set by Azure Backup or + Site Recovery on a restored disk (specified with source_resource_id). + * Upload - Upload a VHD disk with the help of SAS URL (to be + used with upload_size_bytes).' + type: string + diskAccessId: + description: The ID of the disk access resource for using private + endpoints on disks. + type: string + diskEncryptionSetId: + description: The ID of a Disk Encryption Set which should be used + to encrypt this Managed Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskIopsReadOnly: + description: The number of IOPS allowed across all VMs mounting + the shared disk as read-only; only settable for UltraSSD disks + and PremiumV2 disks with shared disk enabled. One operation + can transfer between 4k and 256k bytes. + type: number + diskIopsReadWrite: + description: The number of IOPS allowed for this disk; only settable + for UltraSSD disks and PremiumV2 disks. One operation can transfer + between 4k and 256k bytes. + type: number + diskMbpsReadOnly: + description: The bandwidth allowed across all VMs mounting the + shared disk as read-only; only settable for UltraSSD disks and + PremiumV2 disks with shared disk enabled. MBps means millions + of bytes per second. + type: number + diskMbpsReadWrite: + description: The bandwidth allowed for this disk; only settable + for UltraSSD disks and PremiumV2 disks. MBps means millions + of bytes per second. + type: number + diskSizeGb: + description: Specifies the size of the managed disk to create + in gigabytes. If create_option is Copy or FromImage, then the + value must be equal to or greater than the source's size. The + size can only be increased. + type: number + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Managed Disk should exist. Changing this forces a new Managed + Disk to be created. + type: string + encryptionSettings: + description: A encryption_settings block as defined below. + properties: + diskEncryptionKey: + description: A disk_encryption_key block as defined above. + properties: + secretUrl: + description: The URL to the Key Vault Secret used as the + Disk Encryption Key. This can be found as id on the + azurerm_key_vault_secret resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + enabled: + type: boolean + keyEncryptionKey: + description: A key_encryption_key block as defined below. + properties: + keyUrl: + description: The URL to the Key Vault Key used as the + Key Encryption Key. This can be found as id on the azurerm_key_vault_key + resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + type: object + galleryImageReferenceId: + description: ID of a Gallery Image Version to copy when create_option + is FromImage. This field cannot be specified if image_reference_id + is specified. Changing this forces a new resource to be created. + type: string + hyperVGeneration: + description: The HyperV Generation of the Disk when the source + of an Import or Copy operation targets a source that contains + an operating system. Possible values are V1 and V2. For ImportSecure + it must be set to V2. Changing this forces a new resource to + be created. + type: string + id: + description: The ID of the Managed Disk. + type: string + imageReferenceId: + description: ID of an existing platform/marketplace disk image + to copy when create_option is FromImage. This field cannot be + specified if gallery_image_reference_id is specified. Changing + this forces a new resource to be created. + type: string + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logicalSectorSize: + description: 'Logical Sector Size. Possible values are: 512 and + 4096. Defaults to 4096. Changing this forces a new resource + to be created.' + type: number + maxShares: + description: The maximum number of VMs that can attach to the + disk at the same time. Value greater than one indicates a disk + that can be mounted on multiple VMs at the same time. + type: number + networkAccessPolicy: + description: Policy for accessing the disk via network. Allowed + values are AllowAll, AllowPrivate, and DenyAll. + type: string + onDemandBurstingEnabled: + description: Specifies if On-Demand Bursting is enabled for the + Managed Disk. + type: boolean + optimizedFrequentAttachEnabled: + description: Specifies whether this Managed Disk should be optimized + for frequent disk attachments (where a disk is attached/detached + more than 5 times in a day). Defaults to false. + type: boolean + osType: + description: Specify a value when the source of an Import, ImportSecure + or Copy operation targets a source that contains an operating + system. Valid values are Linux or Windows. + type: string + performancePlusEnabled: + description: Specifies whether Performance Plus is enabled for + this Managed Disk. Defaults to false. Changing this forces a + new resource to be created. + type: boolean + publicNetworkAccessEnabled: + description: Whether it is allowed to access the disk via public + network. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Managed + Disk should exist. Changing this forces a new resource to be + created. + type: string + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should be + used to Encrypt this OS Disk when the Virtual Machine is a Confidential + VM. Conflicts with disk_encryption_set_id. Changing this forces + a new resource to be created. + type: string + securityType: + description: Security Type of the Managed Disk when it is used + for a Confidential VM. Possible values are ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey, + ConfidentialVM_DiskEncryptedWithPlatformKey and ConfidentialVM_DiskEncryptedWithCustomerKey. + Changing this forces a new resource to be created. + type: string + sourceResourceId: + description: The ID of an existing Managed Disk or Snapshot to + copy when create_option is Copy or the recovery point to restore + when create_option is Restore. Changing this forces a new resource + to be created. + type: string + sourceUri: + description: URI to a valid VHD file to be used when create_option + is Import or ImportSecure. Changing this forces a new resource + to be created. + type: string + storageAccountId: + description: The ID of the Storage Account where the source_uri + is located. Required when create_option is set to Import or + ImportSecure. Changing this forces a new resource to be created. + type: string + storageAccountType: + description: The type of storage to use for the managed disk. + Possible values are Standard_LRS, StandardSSD_ZRS, Premium_LRS, + PremiumV2_LRS, Premium_ZRS, StandardSSD_LRS or UltraSSD_LRS. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tier: + description: The disk performance tier to use. Possible values + are documented here. This feature is currently supported only + for premium SSDs. + type: string + trustedLaunchEnabled: + description: Specifies if Trusted Launch is enabled for the Managed + Disk. Changing this forces a new resource to be created. + type: boolean + uploadSizeBytes: + description: Specifies the size of the managed disk to create + in bytes. Required when create_option is Upload. The value must + be equal to the source disk to be copied in bytes. Source disk + size could be calculated with ls -l or wc -c. More information + can be found at Copy a managed disk. Changing this forces a + new resource to be created. + type: number + zone: + description: Specifies the Availability Zone in which this Managed + Disk should be located. Changing this property forces a new + resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_orchestratedvirtualmachinescalesets.yaml b/package/crds/compute.azure.upbound.io_orchestratedvirtualmachinescalesets.yaml index cd0f9fa74..0484a7df9 100644 --- a/package/crds/compute.azure.upbound.io_orchestratedvirtualmachinescalesets.yaml +++ b/package/crds/compute.azure.upbound.io_orchestratedvirtualmachinescalesets.yaml @@ -2955,3 +2955,2823 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: OrchestratedVirtualMachineScaleSet is the Schema for the OrchestratedVirtualMachineScaleSets + API. Manages an Virtual Machine Scale Set in Flexible Orchestration Mode. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OrchestratedVirtualMachineScaleSetSpec defines the desired + state of OrchestratedVirtualMachineScaleSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Defaults to false. Changing this forces + a new resource to be created. + type: boolean + type: object + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? Possible values are true + and false. + type: boolean + gracePeriod: + description: Amount of time for which automatic repairs will + be delayed. The grace period starts right after the VM is + found unhealthy. Possible values are between 30 and 90 minutes. + The time duration should be specified in ISO 8601 format + (e.g. PT30M to PT90M). Defaults to PT30M. + type: string + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + By including a boot_diagnostics block without passing the + storage_account_uri field will cause the API to utilize + a Managed Storage Account to store the Boot Diagnostics + output. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt the Data Disk. Changing this forces + a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + Required if create_option is specified as Empty. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. Required if + create_option is specified as Empty. + type: number + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Specifies if Write Accelerator is enabled on + the Data Disk. Defaults to false. + type: boolean + type: object + type: array + encryptionAtHostEnabled: + description: Should disks attached to this Virtual Machine Scale + Set be encrypted by enabling Encryption at Host? + type: boolean + evictionPolicy: + description: The Policy which should be used by Spot Virtual Machines + that are Evicted from the Scale Set. Possible values are Deallocate + and Delete. Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersionEnabled: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + extensionsToProvisionAfterVmCreation: + description: An ordered list of Extension names which Virtual + Machine Scale Set should provision after VM creation. + items: + type: string + type: array + failureSuppressionEnabled: + description: Should failures from the extension be suppressed? + Possible values are true or false. + type: boolean + forceExtensionExecutionOnChange: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + protectedSettingsSecretRef: + description: A JSON String which specifies Sensitive Settings + (such as Passwords) for the Extension. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Virtual Machine Scale Set + to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the time alloted for all extensions to + start. The time duration should be between 15 minutes and 120 + minutes (inclusive) and should be specified in ISO 8601 format. + Defaults to PT1H30M. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Managed Identity IDs + to be assigned to this Windows Virtual Machine Scale Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of Managed Identity that should be configured + on this Windows Virtual Machine Scale Set. Only possible + value is UserAssigned. + type: string + type: object + instances: + description: The number of Virtual Machines in the Virtual Machine + Scale Set. + type: number + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine Scale Set. Possible values are None, Windows_Client + and Windows_Server. + type: string + location: + description: The Azure location where the Virtual Machine Scale + Set should exist. Changing this forces a new resource to be + created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in the Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Possible values are true and false. Defaults + to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Possible values are true and false. Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools IDs from + a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + IDs which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools IDs from + a Load Balancer which this Virtual Machine Scale + Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + Possible values are true and false. Defaults to + false. + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. Valid values must be between 1 and + 26 characters long, start with a lower case + letter, end with a lower case letter or number + and contains only a-z, 0-9 and hyphens. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + skuName: + description: 'The name of the SKU to be used + by this Virtual Machine Scale Set. Valid values + include: any of the General purpose, Compute + optimized, Memory optimized, Storage optimized, + GPU optimized, FPGA optimized, High performance, + or Previous generation virtual machine SKUs.' + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? Possible + values are true and false. Defaults to false. + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Changing this forces a + new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Specifies if Write Accelerator is enabled on + the OS Disk. Defaults to false. + type: boolean + type: object + osProfile: + description: An os_profile block as defined below. + properties: + customDataSecretRef: + description: The Base64-Encoded Custom Data which should be + used for this Virtual Machine Scale Set. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + linuxConfiguration: + description: A linux_configuration block as documented below. + properties: + adminPasswordSecretRef: + description: The Password which should be used for the + local-administrator on this Virtual Machine. Changing + this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + adminSshKey: + description: A admin_ssh_key block as documented below. + items: + properties: + publicKey: + description: The Public Key which should be used + for authentication, which needs to be at least + 2048-bit and in ssh-rsa format. + type: string + username: + description: The Username for which this Public + SSH Key should be configured. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on + each Virtual Machine Scale Set instance. Changing this + forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name + of the Virtual Machines in this Scale Set. If unspecified + this defaults to the value for the name field. If the + value of the name field is not a valid computer_name_prefix, + then you must specify computer_name_prefix. Changing + this forces a new resource to be created. + type: string + disablePasswordAuthentication: + description: When an admin_password is specified disable_password_authentication + must be set to false. Defaults to true. + type: boolean + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for + the virtual machines that are associated to the Virtual + Machine Scale Set. Possible values are AutomaticByPlatform + or ImageDefault. Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching of + this Windows Virtual Machine. Possible values are Manual, + AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. + For more information on patch modes please see the product + documentation. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned + on each Virtual Machine in the Scale Set? Defaults to + true. Changing this value forces a new resource to be + created. + type: boolean + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined + below. + items: + properties: + url: + description: The Secret URL of a Key Vault + Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which + all Secrets should be sourced. + type: string + type: object + type: array + type: object + windowsConfiguration: + description: A windows_configuration block as documented below. + properties: + additionalUnattendContent: + description: One or more additional_unattend_content blocks + as defined below. Changing this forces a new resource + to be created. + items: + properties: + contentSecretRef: + description: The XML formatted content that is added + to the unattend.xml file for the specified path + and component. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + setting: + description: The name of the setting to which the + content applies. Possible values are AutoLogon + and FirstLogonCommands. Changing this forces a + new resource to be created. + type: string + required: + - contentSecretRef + type: object + type: array + adminPasswordSecretRef: + description: The Password which should be used for the + local-administrator on this Virtual Machine. Changing + this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + adminUsername: + description: The username of the local administrator on + each Virtual Machine Scale Set instance. Changing this + forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name + of the Virtual Machines in this Scale Set. If unspecified + this defaults to the value for the name field. If the + value of the name field is not a valid computer_name_prefix, + then you must specify computer_name_prefix. Changing + this forces a new resource to be created. + type: string + enableAutomaticUpdates: + description: Are automatic updates enabled for this Virtual + Machine? Defaults to true. + type: boolean + hotpatchingEnabled: + description: Should the VM be patched without requiring + a reboot? Possible values are true or false. Defaults + to false. For more information about hot patching please + see the product documentation. + type: boolean + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for + the virtual machines that are associated to the Virtual + Machine Scale Set. Possible values are AutomaticByPlatform + or ImageDefault. Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching of + this Windows Virtual Machine. Possible values are Manual, + AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. + For more information on patch modes please see the product + documentation. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned + on each Virtual Machine in the Scale Set? Defaults to + true. Changing this value forces a new resource to be + created. + type: boolean + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined + below. + items: + properties: + store: + description: The certificate store on the + Virtual Machine where the certificate should + be added. + type: string + url: + description: The Secret URL of a Key Vault + Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which + all Secrets should be sourced. + type: string + type: object + type: array + timezone: + description: Specifies the time zone of the virtual machine, + the possible values are defined here. + type: string + winrmListener: + description: One or more winrm_listener blocks as defined + below. Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to + Https. Changing this forces a new resource to + be created. + type: string + protocol: + description: Specifies the protocol of listener. + Possible values are Http or Https. Changing this + forces a new resource to be created. + type: string + type: object + type: array + required: + - adminPasswordSecretRef + type: object + type: object + plan: + description: A plan block as documented below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Virtual Machine Scale Set. Changing this forces a new + resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + priorityMix: + description: a priority_mix block as defined below + properties: + baseRegularCount: + description: Specifies the base number of VMs of Regular priority + that will be created before any VMs of priority Spot are + created. Possible values are integers between 0 and 1000. + Defaults to 0. + type: number + regularPercentageAboveBase: + description: Specifies the desired percentage of VM instances + that are of Regular priority after the base count has been + reached. Possible values are integers between 0 and 100. + Defaults to 0. + type: number + type: object + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. Changing this forces + a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Virtual + Machine Scale Set should exist. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Possible values are + true or false. + type: boolean + skuName: + description: 'The name of the SKU to be used by this Virtual Machine + Scale Set. Valid values include: any of the General purpose, + Compute optimized, Memory optimized, Storage optimized, GPU + optimized, FPGA optimized, High performance, or Previous generation + virtual machine SKUs.' + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image IDs, Shared Image IDs, Shared Image Version IDs, Community + Gallery Image IDs, Community Gallery Image Version IDs, Shared + Gallery Image IDs and Shared Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? Possible values true + or false + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + userDataBase64SecretRef: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine Scale Set. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones across which + the Virtual Machine Scale Set will create instances. Changing + this forces a new Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Defaults to false. Changing this forces + a new resource to be created. + type: boolean + type: object + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? Possible values are true + and false. + type: boolean + gracePeriod: + description: Amount of time for which automatic repairs will + be delayed. The grace period starts right after the VM is + found unhealthy. Possible values are between 30 and 90 minutes. + The time duration should be specified in ISO 8601 format + (e.g. PT30M to PT90M). Defaults to PT30M. + type: string + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + By including a boot_diagnostics block without passing the + storage_account_uri field will cause the API to utilize + a Managed Storage Account to store the Boot Diagnostics + output. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt the Data Disk. Changing this forces + a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + Required if create_option is specified as Empty. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. Required if + create_option is specified as Empty. + type: number + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Specifies if Write Accelerator is enabled on + the Data Disk. Defaults to false. + type: boolean + type: object + type: array + encryptionAtHostEnabled: + description: Should disks attached to this Virtual Machine Scale + Set be encrypted by enabling Encryption at Host? + type: boolean + evictionPolicy: + description: The Policy which should be used by Spot Virtual Machines + that are Evicted from the Scale Set. Possible values are Deallocate + and Delete. Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersionEnabled: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + extensionsToProvisionAfterVmCreation: + description: An ordered list of Extension names which Virtual + Machine Scale Set should provision after VM creation. + items: + type: string + type: array + failureSuppressionEnabled: + description: Should failures from the extension be suppressed? + Possible values are true or false. + type: boolean + forceExtensionExecutionOnChange: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Virtual Machine Scale Set + to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the time alloted for all extensions to + start. The time duration should be between 15 minutes and 120 + minutes (inclusive) and should be specified in ISO 8601 format. + Defaults to PT1H30M. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Managed Identity IDs + to be assigned to this Windows Virtual Machine Scale Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of Managed Identity that should be configured + on this Windows Virtual Machine Scale Set. Only possible + value is UserAssigned. + type: string + type: object + instances: + description: The number of Virtual Machines in the Virtual Machine + Scale Set. + type: number + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine Scale Set. Possible values are None, Windows_Client + and Windows_Server. + type: string + location: + description: The Azure location where the Virtual Machine Scale + Set should exist. Changing this forces a new resource to be + created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in the Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Possible values are true and false. Defaults + to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Possible values are true and false. Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools IDs from + a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + IDs which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools IDs from + a Load Balancer which this Virtual Machine Scale + Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + Possible values are true and false. Defaults to + false. + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. Valid values must be between 1 and + 26 characters long, start with a lower case + letter, end with a lower case letter or number + and contains only a-z, 0-9 and hyphens. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + skuName: + description: 'The name of the SKU to be used + by this Virtual Machine Scale Set. Valid values + include: any of the General purpose, Compute + optimized, Memory optimized, Storage optimized, + GPU optimized, FPGA optimized, High performance, + or Previous generation virtual machine SKUs.' + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? Possible + values are true and false. Defaults to false. + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Changing this forces a + new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Specifies if Write Accelerator is enabled on + the OS Disk. Defaults to false. + type: boolean + type: object + osProfile: + description: An os_profile block as defined below. + properties: + linuxConfiguration: + description: A linux_configuration block as documented below. + properties: + adminSshKey: + description: A admin_ssh_key block as documented below. + items: + properties: + publicKey: + description: The Public Key which should be used + for authentication, which needs to be at least + 2048-bit and in ssh-rsa format. + type: string + username: + description: The Username for which this Public + SSH Key should be configured. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on + each Virtual Machine Scale Set instance. Changing this + forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name + of the Virtual Machines in this Scale Set. If unspecified + this defaults to the value for the name field. If the + value of the name field is not a valid computer_name_prefix, + then you must specify computer_name_prefix. Changing + this forces a new resource to be created. + type: string + disablePasswordAuthentication: + description: When an admin_password is specified disable_password_authentication + must be set to false. Defaults to true. + type: boolean + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for + the virtual machines that are associated to the Virtual + Machine Scale Set. Possible values are AutomaticByPlatform + or ImageDefault. Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching of + this Windows Virtual Machine. Possible values are Manual, + AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. + For more information on patch modes please see the product + documentation. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned + on each Virtual Machine in the Scale Set? Defaults to + true. Changing this value forces a new resource to be + created. + type: boolean + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined + below. + items: + properties: + url: + description: The Secret URL of a Key Vault + Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which + all Secrets should be sourced. + type: string + type: object + type: array + type: object + windowsConfiguration: + description: A windows_configuration block as documented below. + properties: + additionalUnattendContent: + description: One or more additional_unattend_content blocks + as defined below. Changing this forces a new resource + to be created. + items: + properties: + setting: + description: The name of the setting to which the + content applies. Possible values are AutoLogon + and FirstLogonCommands. Changing this forces a + new resource to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on + each Virtual Machine Scale Set instance. Changing this + forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name + of the Virtual Machines in this Scale Set. If unspecified + this defaults to the value for the name field. If the + value of the name field is not a valid computer_name_prefix, + then you must specify computer_name_prefix. Changing + this forces a new resource to be created. + type: string + enableAutomaticUpdates: + description: Are automatic updates enabled for this Virtual + Machine? Defaults to true. + type: boolean + hotpatchingEnabled: + description: Should the VM be patched without requiring + a reboot? Possible values are true or false. Defaults + to false. For more information about hot patching please + see the product documentation. + type: boolean + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for + the virtual machines that are associated to the Virtual + Machine Scale Set. Possible values are AutomaticByPlatform + or ImageDefault. Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching of + this Windows Virtual Machine. Possible values are Manual, + AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. + For more information on patch modes please see the product + documentation. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned + on each Virtual Machine in the Scale Set? Defaults to + true. Changing this value forces a new resource to be + created. + type: boolean + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined + below. + items: + properties: + store: + description: The certificate store on the + Virtual Machine where the certificate should + be added. + type: string + url: + description: The Secret URL of a Key Vault + Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which + all Secrets should be sourced. + type: string + type: object + type: array + timezone: + description: Specifies the time zone of the virtual machine, + the possible values are defined here. + type: string + winrmListener: + description: One or more winrm_listener blocks as defined + below. Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to + Https. Changing this forces a new resource to + be created. + type: string + protocol: + description: Specifies the protocol of listener. + Possible values are Http or Https. Changing this + forces a new resource to be created. + type: string + type: object + type: array + type: object + type: object + plan: + description: A plan block as documented below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Virtual Machine Scale Set. Changing this forces a new + resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + priorityMix: + description: a priority_mix block as defined below + properties: + baseRegularCount: + description: Specifies the base number of VMs of Regular priority + that will be created before any VMs of priority Spot are + created. Possible values are integers between 0 and 1000. + Defaults to 0. + type: number + regularPercentageAboveBase: + description: Specifies the desired percentage of VM instances + that are of Regular priority after the base count has been + reached. Possible values are integers between 0 and 100. + Defaults to 0. + type: number + type: object + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. Changing this forces + a new resource to be created. + type: string + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Possible values are + true or false. + type: boolean + skuName: + description: 'The name of the SKU to be used by this Virtual Machine + Scale Set. Valid values include: any of the General purpose, + Compute optimized, Memory optimized, Storage optimized, GPU + optimized, FPGA optimized, High performance, or Previous generation + virtual machine SKUs.' + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image IDs, Shared Image IDs, Shared Image Version IDs, Community + Gallery Image IDs, Community Gallery Image Version IDs, Shared + Gallery Image IDs and Shared Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? Possible values true + or false + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones across which + the Virtual Machine Scale Set will create instances. Changing + this forces a new Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.platformFaultDomainCount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.platformFaultDomainCount) + || (has(self.initProvider) && has(self.initProvider.platformFaultDomainCount))' + status: + description: OrchestratedVirtualMachineScaleSetStatus defines the observed + state of OrchestratedVirtualMachineScaleSet. + properties: + atProvider: + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Defaults to false. Changing this forces + a new resource to be created. + type: boolean + type: object + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? Possible values are true + and false. + type: boolean + gracePeriod: + description: Amount of time for which automatic repairs will + be delayed. The grace period starts right after the VM is + found unhealthy. Possible values are between 30 and 90 minutes. + The time duration should be specified in ISO 8601 format + (e.g. PT30M to PT90M). Defaults to PT30M. + type: string + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + By including a boot_diagnostics block without passing the + storage_account_uri field will cause the API to utilize + a Managed Storage Account to store the Boot Diagnostics + output. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt the Data Disk. Changing this forces + a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + Required if create_option is specified as Empty. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. Required if + create_option is specified as Empty. + type: number + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Specifies if Write Accelerator is enabled on + the Data Disk. Defaults to false. + type: boolean + type: object + type: array + encryptionAtHostEnabled: + description: Should disks attached to this Virtual Machine Scale + Set be encrypted by enabling Encryption at Host? + type: boolean + evictionPolicy: + description: The Policy which should be used by Spot Virtual Machines + that are Evicted from the Scale Set. Possible values are Deallocate + and Delete. Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersionEnabled: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + extensionsToProvisionAfterVmCreation: + description: An ordered list of Extension names which Virtual + Machine Scale Set should provision after VM creation. + items: + type: string + type: array + failureSuppressionEnabled: + description: Should failures from the extension be suppressed? + Possible values are true or false. + type: boolean + forceExtensionExecutionOnChange: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Virtual Machine Scale Set + to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the time alloted for all extensions to + start. The time duration should be between 15 minutes and 120 + minutes (inclusive) and should be specified in ISO 8601 format. + Defaults to PT1H30M. + type: string + id: + description: The ID of the Virtual Machine Scale Set. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Managed Identity IDs + to be assigned to this Windows Virtual Machine Scale Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of Managed Identity that should be configured + on this Windows Virtual Machine Scale Set. Only possible + value is UserAssigned. + type: string + type: object + instances: + description: The number of Virtual Machines in the Virtual Machine + Scale Set. + type: number + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine Scale Set. Possible values are None, Windows_Client + and Windows_Server. + type: string + location: + description: The Azure location where the Virtual Machine Scale + Set should exist. Changing this forces a new resource to be + created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in the Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Possible values are true and false. Defaults + to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Possible values are true and false. Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools IDs from + a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + IDs which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools IDs from + a Load Balancer which this Virtual Machine Scale + Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + Possible values are true and false. Defaults to + false. + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. Valid values must be between 1 and + 26 characters long, start with a lower case + letter, end with a lower case letter or number + and contains only a-z, 0-9 and hyphens. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + skuName: + description: 'The name of the SKU to be used + by this Virtual Machine Scale Set. Valid values + include: any of the General purpose, Compute + optimized, Memory optimized, Storage optimized, + GPU optimized, FPGA optimized, High performance, + or Previous generation virtual machine SKUs.' + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? Possible + values are true and false. Defaults to false. + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Changing this forces a + new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Specifies if Write Accelerator is enabled on + the OS Disk. Defaults to false. + type: boolean + type: object + osProfile: + description: An os_profile block as defined below. + properties: + linuxConfiguration: + description: A linux_configuration block as documented below. + properties: + adminSshKey: + description: A admin_ssh_key block as documented below. + items: + properties: + publicKey: + description: The Public Key which should be used + for authentication, which needs to be at least + 2048-bit and in ssh-rsa format. + type: string + username: + description: The Username for which this Public + SSH Key should be configured. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on + each Virtual Machine Scale Set instance. Changing this + forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name + of the Virtual Machines in this Scale Set. If unspecified + this defaults to the value for the name field. If the + value of the name field is not a valid computer_name_prefix, + then you must specify computer_name_prefix. Changing + this forces a new resource to be created. + type: string + disablePasswordAuthentication: + description: When an admin_password is specified disable_password_authentication + must be set to false. Defaults to true. + type: boolean + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for + the virtual machines that are associated to the Virtual + Machine Scale Set. Possible values are AutomaticByPlatform + or ImageDefault. Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching of + this Windows Virtual Machine. Possible values are Manual, + AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. + For more information on patch modes please see the product + documentation. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned + on each Virtual Machine in the Scale Set? Defaults to + true. Changing this value forces a new resource to be + created. + type: boolean + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined + below. + items: + properties: + url: + description: The Secret URL of a Key Vault + Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which + all Secrets should be sourced. + type: string + type: object + type: array + type: object + windowsConfiguration: + description: A windows_configuration block as documented below. + properties: + additionalUnattendContent: + description: One or more additional_unattend_content blocks + as defined below. Changing this forces a new resource + to be created. + items: + properties: + setting: + description: The name of the setting to which the + content applies. Possible values are AutoLogon + and FirstLogonCommands. Changing this forces a + new resource to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on + each Virtual Machine Scale Set instance. Changing this + forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name + of the Virtual Machines in this Scale Set. If unspecified + this defaults to the value for the name field. If the + value of the name field is not a valid computer_name_prefix, + then you must specify computer_name_prefix. Changing + this forces a new resource to be created. + type: string + enableAutomaticUpdates: + description: Are automatic updates enabled for this Virtual + Machine? Defaults to true. + type: boolean + hotpatchingEnabled: + description: Should the VM be patched without requiring + a reboot? Possible values are true or false. Defaults + to false. For more information about hot patching please + see the product documentation. + type: boolean + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for + the virtual machines that are associated to the Virtual + Machine Scale Set. Possible values are AutomaticByPlatform + or ImageDefault. Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching of + this Windows Virtual Machine. Possible values are Manual, + AutomaticByOS and AutomaticByPlatform. Defaults to AutomaticByOS. + For more information on patch modes please see the product + documentation. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned + on each Virtual Machine in the Scale Set? Defaults to + true. Changing this value forces a new resource to be + created. + type: boolean + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined + below. + items: + properties: + store: + description: The certificate store on the + Virtual Machine where the certificate should + be added. + type: string + url: + description: The Secret URL of a Key Vault + Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which + all Secrets should be sourced. + type: string + type: object + type: array + timezone: + description: Specifies the time zone of the virtual machine, + the possible values are defined here. + type: string + winrmListener: + description: One or more winrm_listener blocks as defined + below. Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to + Https. Changing this forces a new resource to + be created. + type: string + protocol: + description: Specifies the protocol of listener. + Possible values are Http or Https. Changing this + forces a new resource to be created. + type: string + type: object + type: array + type: object + type: object + plan: + description: A plan block as documented below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Virtual Machine Scale Set. Changing this forces a new + resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + priorityMix: + description: a priority_mix block as defined below + properties: + baseRegularCount: + description: Specifies the base number of VMs of Regular priority + that will be created before any VMs of priority Spot are + created. Possible values are integers between 0 and 1000. + Defaults to 0. + type: number + regularPercentageAboveBase: + description: Specifies the desired percentage of VM instances + that are of Regular priority after the base count has been + reached. Possible values are integers between 0 and 100. + Defaults to 0. + type: number + type: object + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. Changing this forces + a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Virtual + Machine Scale Set should exist. Changing this forces a new resource + to be created. + type: string + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Possible values are + true or false. + type: boolean + skuName: + description: 'The name of the SKU to be used by this Virtual Machine + Scale Set. Valid values include: any of the General purpose, + Compute optimized, Memory optimized, Storage optimized, GPU + optimized, FPGA optimized, High performance, or Previous generation + virtual machine SKUs.' + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image IDs, Shared Image IDs, Shared Image Version IDs, Community + Gallery Image IDs, Community Gallery Image Version IDs, Shared + Gallery Image IDs and Shared Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? Possible values true + or false + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + uniqueId: + description: The Unique ID for the Virtual Machine Scale Set. + type: string + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones across which + the Virtual Machine Scale Set will create instances. Changing + this forces a new Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_sharedimagegalleries.yaml b/package/crds/compute.azure.upbound.io_sharedimagegalleries.yaml index d722a4aa8..cacbce5ef 100644 --- a/package/crds/compute.azure.upbound.io_sharedimagegalleries.yaml +++ b/package/crds/compute.azure.upbound.io_sharedimagegalleries.yaml @@ -580,3 +580,546 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SharedImageGallery is the Schema for the SharedImageGallerys + API. Manages a Shared Image Gallery. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SharedImageGallerySpec defines the desired state of SharedImageGallery + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description for this Shared Image Gallery. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Shared Image Gallery. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharing: + description: A sharing block as defined below. Changing this forces + a new resource to be created. + properties: + communityGallery: + description: A community_gallery block as defined below. Changing + this forces a new resource to be created. + properties: + eula: + description: The End User Licence Agreement for the Shared + Image Gallery. Changing this forces a new resource to + be created. + type: string + prefix: + description: Prefix of the community public name for the + Shared Image Gallery. Changing this forces a new resource + to be created. + type: string + publisherEmail: + description: Email of the publisher for the Shared Image + Gallery. Changing this forces a new resource to be created. + type: string + publisherUri: + description: URI of the publisher for the Shared Image + Gallery. Changing this forces a new resource to be created. + type: string + type: object + permission: + description: The permission of the Shared Image Gallery when + sharing. Possible values are Community, Groups and Private. + Changing this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Shared Image Gallery. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description for this Shared Image Gallery. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + sharing: + description: A sharing block as defined below. Changing this forces + a new resource to be created. + properties: + communityGallery: + description: A community_gallery block as defined below. Changing + this forces a new resource to be created. + properties: + eula: + description: The End User Licence Agreement for the Shared + Image Gallery. Changing this forces a new resource to + be created. + type: string + prefix: + description: Prefix of the community public name for the + Shared Image Gallery. Changing this forces a new resource + to be created. + type: string + publisherEmail: + description: Email of the publisher for the Shared Image + Gallery. Changing this forces a new resource to be created. + type: string + publisherUri: + description: URI of the publisher for the Shared Image + Gallery. Changing this forces a new resource to be created. + type: string + type: object + permission: + description: The permission of the Shared Image Gallery when + sharing. Possible values are Community, Groups and Private. + Changing this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Shared Image Gallery. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: SharedImageGalleryStatus defines the observed state of SharedImageGallery. + properties: + atProvider: + properties: + description: + description: A description for this Shared Image Gallery. + type: string + id: + description: The ID of the Shared Image Gallery. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Shared Image Gallery. Changing this forces a new resource + to be created. + type: string + sharing: + description: A sharing block as defined below. Changing this forces + a new resource to be created. + properties: + communityGallery: + description: A community_gallery block as defined below. Changing + this forces a new resource to be created. + properties: + eula: + description: The End User Licence Agreement for the Shared + Image Gallery. Changing this forces a new resource to + be created. + type: string + name: + description: Specifies the name of the Shared Image Gallery. + Changing this forces a new resource to be created. + type: string + prefix: + description: Prefix of the community public name for the + Shared Image Gallery. Changing this forces a new resource + to be created. + type: string + publisherEmail: + description: Email of the publisher for the Shared Image + Gallery. Changing this forces a new resource to be created. + type: string + publisherUri: + description: URI of the publisher for the Shared Image + Gallery. Changing this forces a new resource to be created. + type: string + type: object + permission: + description: The permission of the Shared Image Gallery when + sharing. Possible values are Community, Groups and Private. + Changing this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Shared Image Gallery. + type: object + x-kubernetes-map-type: granular + uniqueName: + description: The Unique Name for this Shared Image Gallery. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_sharedimages.yaml b/package/crds/compute.azure.upbound.io_sharedimages.yaml index aedd057bb..97dbb82a0 100644 --- a/package/crds/compute.azure.upbound.io_sharedimages.yaml +++ b/package/crds/compute.azure.upbound.io_sharedimages.yaml @@ -912,3 +912,885 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SharedImage is the Schema for the SharedImages API. Manages a + Shared Image within a Shared Image Gallery. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SharedImageSpec defines the desired state of SharedImage + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + acceleratedNetworkSupportEnabled: + description: Specifies if the Shared Image supports Accelerated + Network. Changing this forces a new resource to be created. + type: boolean + architecture: + description: CPU architecture supported by an OS. Possible values + are x64 and Arm64. Defaults to x64. Changing this forces a new + resource to be created. + type: string + confidentialVmEnabled: + description: Specifies if Confidential Virtual Machines enabled. + It will enable all the features of trusted, with higher confidentiality + features for isolate machines or encrypted data. Available for + Gen2 machines. Changing this forces a new resource to be created. + type: boolean + confidentialVmSupported: + description: Specifies if supports creation of both Confidential + virtual machines and Gen2 virtual machines with standard security + from a compatible Gen2 OS disk VHD or Gen2 Managed image. Changing + this forces a new resource to be created. + type: boolean + description: + description: A description of this Shared Image. + type: string + diskTypesNotAllowed: + description: One or more Disk Types not allowed for the Image. + Possible values include Standard_LRS and Premium_LRS. + items: + type: string + type: array + x-kubernetes-list-type: set + endOfLifeDate: + description: The end of life date in RFC3339 format of the Image. + type: string + eula: + description: The End User Licence Agreement for the Shared Image. + Changing this forces a new resource to be created. + type: string + galleryName: + description: Specifies the name of the Shared Image Gallery in + which this Shared Image should exist. Changing this forces a + new resource to be created. + type: string + galleryNameRef: + description: Reference to a SharedImageGallery in compute to populate + galleryName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + galleryNameSelector: + description: Selector for a SharedImageGallery in compute to populate + galleryName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + hyperVGeneration: + description: The generation of HyperV that the Virtual Machine + used to create the Shared Image is based on. Possible values + are V1 and V2. Defaults to V1. Changing this forces a new resource + to be created. + type: string + identifier: + description: An identifier block as defined below. + properties: + offer: + description: The Offer Name for this Shared Image. Changing + this forces a new resource to be created. + type: string + publisher: + description: The Publisher Name for this Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The Name of the SKU for this Gallery Image. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the supported Azure location where the + Shared Image Gallery exists. Changing this forces a new resource + to be created. + type: string + maxRecommendedMemoryInGb: + description: Maximum memory in GB recommended for the Image. + type: number + maxRecommendedVcpuCount: + description: Maximum count of vCPUs recommended for the Image. + type: number + minRecommendedMemoryInGb: + description: Minimum memory in GB recommended for the Image. + type: number + minRecommendedVcpuCount: + description: Minimum count of vCPUs recommended for the Image. + type: number + osType: + description: The type of Operating System present in this Shared + Image. Possible values are Linux and Windows. Changing this + forces a new resource to be created. + type: string + privacyStatementUri: + description: The URI containing the Privacy Statement associated + with this Shared Image. Changing this forces a new resource + to be created. + type: string + purchasePlan: + description: A purchase_plan block as defined below. + properties: + name: + description: The Purchase Plan Name for this Shared Image. + Changing this forces a new resource to be created. + type: string + product: + description: The Purchase Plan Product for this Gallery Image. + Changing this forces a new resource to be created. + type: string + publisher: + description: The Purchase Plan Publisher for this Gallery + Image. Changing this forces a new resource to be created. + type: string + type: object + releaseNoteUri: + description: The URI containing the Release Notes associated with + this Shared Image. + type: string + resourceGroupName: + description: The name of the resource group in which the Shared + Image Gallery exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + specialized: + description: Specifies that the Operating System used inside this + Image has not been Generalized (for example, sysprep on Windows + has not been run). Changing this forces a new resource to be + created. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Shared Image. + type: object + x-kubernetes-map-type: granular + trustedLaunchEnabled: + description: Specifies if Trusted Launch has to be enabled for + the Virtual Machine created from the Shared Image. Changing + this forces a new resource to be created. + type: boolean + trustedLaunchSupported: + description: Specifies if supports creation of both Trusted Launch + virtual machines and Gen2 virtual machines with standard security + created from the Shared Image. Changing this forces a new resource + to be created. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + acceleratedNetworkSupportEnabled: + description: Specifies if the Shared Image supports Accelerated + Network. Changing this forces a new resource to be created. + type: boolean + architecture: + description: CPU architecture supported by an OS. Possible values + are x64 and Arm64. Defaults to x64. Changing this forces a new + resource to be created. + type: string + confidentialVmEnabled: + description: Specifies if Confidential Virtual Machines enabled. + It will enable all the features of trusted, with higher confidentiality + features for isolate machines or encrypted data. Available for + Gen2 machines. Changing this forces a new resource to be created. + type: boolean + confidentialVmSupported: + description: Specifies if supports creation of both Confidential + virtual machines and Gen2 virtual machines with standard security + from a compatible Gen2 OS disk VHD or Gen2 Managed image. Changing + this forces a new resource to be created. + type: boolean + description: + description: A description of this Shared Image. + type: string + diskTypesNotAllowed: + description: One or more Disk Types not allowed for the Image. + Possible values include Standard_LRS and Premium_LRS. + items: + type: string + type: array + x-kubernetes-list-type: set + endOfLifeDate: + description: The end of life date in RFC3339 format of the Image. + type: string + eula: + description: The End User Licence Agreement for the Shared Image. + Changing this forces a new resource to be created. + type: string + hyperVGeneration: + description: The generation of HyperV that the Virtual Machine + used to create the Shared Image is based on. Possible values + are V1 and V2. Defaults to V1. Changing this forces a new resource + to be created. + type: string + identifier: + description: An identifier block as defined below. + properties: + offer: + description: The Offer Name for this Shared Image. Changing + this forces a new resource to be created. + type: string + publisher: + description: The Publisher Name for this Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The Name of the SKU for this Gallery Image. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the supported Azure location where the + Shared Image Gallery exists. Changing this forces a new resource + to be created. + type: string + maxRecommendedMemoryInGb: + description: Maximum memory in GB recommended for the Image. + type: number + maxRecommendedVcpuCount: + description: Maximum count of vCPUs recommended for the Image. + type: number + minRecommendedMemoryInGb: + description: Minimum memory in GB recommended for the Image. + type: number + minRecommendedVcpuCount: + description: Minimum count of vCPUs recommended for the Image. + type: number + osType: + description: The type of Operating System present in this Shared + Image. Possible values are Linux and Windows. Changing this + forces a new resource to be created. + type: string + privacyStatementUri: + description: The URI containing the Privacy Statement associated + with this Shared Image. Changing this forces a new resource + to be created. + type: string + purchasePlan: + description: A purchase_plan block as defined below. + properties: + name: + description: The Purchase Plan Name for this Shared Image. + Changing this forces a new resource to be created. + type: string + product: + description: The Purchase Plan Product for this Gallery Image. + Changing this forces a new resource to be created. + type: string + publisher: + description: The Purchase Plan Publisher for this Gallery + Image. Changing this forces a new resource to be created. + type: string + type: object + releaseNoteUri: + description: The URI containing the Release Notes associated with + this Shared Image. + type: string + specialized: + description: Specifies that the Operating System used inside this + Image has not been Generalized (for example, sysprep on Windows + has not been run). Changing this forces a new resource to be + created. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Shared Image. + type: object + x-kubernetes-map-type: granular + trustedLaunchEnabled: + description: Specifies if Trusted Launch has to be enabled for + the Virtual Machine created from the Shared Image. Changing + this forces a new resource to be created. + type: boolean + trustedLaunchSupported: + description: Specifies if supports creation of both Trusted Launch + virtual machines and Gen2 virtual machines with standard security + created from the Shared Image. Changing this forces a new resource + to be created. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.identifier is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.identifier) + || (has(self.initProvider) && has(self.initProvider.identifier))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.osType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.osType) + || (has(self.initProvider) && has(self.initProvider.osType))' + status: + description: SharedImageStatus defines the observed state of SharedImage. + properties: + atProvider: + properties: + acceleratedNetworkSupportEnabled: + description: Specifies if the Shared Image supports Accelerated + Network. Changing this forces a new resource to be created. + type: boolean + architecture: + description: CPU architecture supported by an OS. Possible values + are x64 and Arm64. Defaults to x64. Changing this forces a new + resource to be created. + type: string + confidentialVmEnabled: + description: Specifies if Confidential Virtual Machines enabled. + It will enable all the features of trusted, with higher confidentiality + features for isolate machines or encrypted data. Available for + Gen2 machines. Changing this forces a new resource to be created. + type: boolean + confidentialVmSupported: + description: Specifies if supports creation of both Confidential + virtual machines and Gen2 virtual machines with standard security + from a compatible Gen2 OS disk VHD or Gen2 Managed image. Changing + this forces a new resource to be created. + type: boolean + description: + description: A description of this Shared Image. + type: string + diskTypesNotAllowed: + description: One or more Disk Types not allowed for the Image. + Possible values include Standard_LRS and Premium_LRS. + items: + type: string + type: array + x-kubernetes-list-type: set + endOfLifeDate: + description: The end of life date in RFC3339 format of the Image. + type: string + eula: + description: The End User Licence Agreement for the Shared Image. + Changing this forces a new resource to be created. + type: string + galleryName: + description: Specifies the name of the Shared Image Gallery in + which this Shared Image should exist. Changing this forces a + new resource to be created. + type: string + hyperVGeneration: + description: The generation of HyperV that the Virtual Machine + used to create the Shared Image is based on. Possible values + are V1 and V2. Defaults to V1. Changing this forces a new resource + to be created. + type: string + id: + description: The ID of the Shared Image. + type: string + identifier: + description: An identifier block as defined below. + properties: + offer: + description: The Offer Name for this Shared Image. Changing + this forces a new resource to be created. + type: string + publisher: + description: The Publisher Name for this Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The Name of the SKU for this Gallery Image. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the supported Azure location where the + Shared Image Gallery exists. Changing this forces a new resource + to be created. + type: string + maxRecommendedMemoryInGb: + description: Maximum memory in GB recommended for the Image. + type: number + maxRecommendedVcpuCount: + description: Maximum count of vCPUs recommended for the Image. + type: number + minRecommendedMemoryInGb: + description: Minimum memory in GB recommended for the Image. + type: number + minRecommendedVcpuCount: + description: Minimum count of vCPUs recommended for the Image. + type: number + osType: + description: The type of Operating System present in this Shared + Image. Possible values are Linux and Windows. Changing this + forces a new resource to be created. + type: string + privacyStatementUri: + description: The URI containing the Privacy Statement associated + with this Shared Image. Changing this forces a new resource + to be created. + type: string + purchasePlan: + description: A purchase_plan block as defined below. + properties: + name: + description: The Purchase Plan Name for this Shared Image. + Changing this forces a new resource to be created. + type: string + product: + description: The Purchase Plan Product for this Gallery Image. + Changing this forces a new resource to be created. + type: string + publisher: + description: The Purchase Plan Publisher for this Gallery + Image. Changing this forces a new resource to be created. + type: string + type: object + releaseNoteUri: + description: The URI containing the Release Notes associated with + this Shared Image. + type: string + resourceGroupName: + description: The name of the resource group in which the Shared + Image Gallery exists. Changing this forces a new resource to + be created. + type: string + specialized: + description: Specifies that the Operating System used inside this + Image has not been Generalized (for example, sysprep on Windows + has not been run). Changing this forces a new resource to be + created. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Shared Image. + type: object + x-kubernetes-map-type: granular + trustedLaunchEnabled: + description: Specifies if Trusted Launch has to be enabled for + the Virtual Machine created from the Shared Image. Changing + this forces a new resource to be created. + type: boolean + trustedLaunchSupported: + description: Specifies if supports creation of both Trusted Launch + virtual machines and Gen2 virtual machines with standard security + created from the Shared Image. Changing this forces a new resource + to be created. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_snapshots.yaml b/package/crds/compute.azure.upbound.io_snapshots.yaml index 7314eba12..ac76b0d48 100644 --- a/package/crds/compute.azure.upbound.io_snapshots.yaml +++ b/package/crds/compute.azure.upbound.io_snapshots.yaml @@ -795,3 +795,762 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Snapshot is the Schema for the Snapshots API. Manages a Disk + Snapshot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotSpec defines the desired state of Snapshot + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + createOption: + description: Indicates how the snapshot is to be created. Possible + values are Copy or Import. + type: string + diskSizeGb: + description: The size of the Snapshotted Disk in GB. + type: number + encryptionSettings: + description: A encryption_settings block as defined below. + properties: + diskEncryptionKey: + description: A disk_encryption_key block as defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret used as the + Disk Encryption Key. This can be found as id on the + azurerm_key_vault_secret resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + enabled: + type: boolean + keyEncryptionKey: + description: A key_encryption_key block as defined below. + properties: + keyUrl: + description: The URL to the Key Vault Key used as the + Key Encryption Key. This can be found as id on the azurerm_key_vault_key + resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + type: object + incrementalEnabled: + description: Specifies if the Snapshot is incremental. Changing + this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Snapshot. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceResourceId: + description: Specifies a reference to an existing snapshot, when + create_option is Copy. Changing this forces a new resource to + be created. + type: string + sourceUri: + description: Specifies the URI to a Managed or Unmanaged Disk. + Changing this forces a new resource to be created. + type: string + sourceUriRef: + description: Reference to a ManagedDisk in compute to populate + sourceUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceUriSelector: + description: Selector for a ManagedDisk in compute to populate + sourceUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageAccountId: + description: Specifies the ID of an storage account. Used with + source_uri to allow authorization during import of unmanaged + blobs from a different subscription. Changing this forces a + new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + createOption: + description: Indicates how the snapshot is to be created. Possible + values are Copy or Import. + type: string + diskSizeGb: + description: The size of the Snapshotted Disk in GB. + type: number + encryptionSettings: + description: A encryption_settings block as defined below. + properties: + diskEncryptionKey: + description: A disk_encryption_key block as defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret used as the + Disk Encryption Key. This can be found as id on the + azurerm_key_vault_secret resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + enabled: + type: boolean + keyEncryptionKey: + description: A key_encryption_key block as defined below. + properties: + keyUrl: + description: The URL to the Key Vault Key used as the + Key Encryption Key. This can be found as id on the azurerm_key_vault_key + resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + type: object + incrementalEnabled: + description: Specifies if the Snapshot is incremental. Changing + this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + sourceResourceId: + description: Specifies a reference to an existing snapshot, when + create_option is Copy. Changing this forces a new resource to + be created. + type: string + sourceUri: + description: Specifies the URI to a Managed or Unmanaged Disk. + Changing this forces a new resource to be created. + type: string + sourceUriRef: + description: Reference to a ManagedDisk in compute to populate + sourceUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceUriSelector: + description: Selector for a ManagedDisk in compute to populate + sourceUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageAccountId: + description: Specifies the ID of an storage account. Used with + source_uri to allow authorization during import of unmanaged + blobs from a different subscription. Changing this forces a + new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.createOption is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.createOption) + || (has(self.initProvider) && has(self.initProvider.createOption))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: SnapshotStatus defines the observed state of Snapshot. + properties: + atProvider: + properties: + createOption: + description: Indicates how the snapshot is to be created. Possible + values are Copy or Import. + type: string + diskSizeGb: + description: The size of the Snapshotted Disk in GB. + type: number + encryptionSettings: + description: A encryption_settings block as defined below. + properties: + diskEncryptionKey: + description: A disk_encryption_key block as defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret used as the + Disk Encryption Key. This can be found as id on the + azurerm_key_vault_secret resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + enabled: + type: boolean + keyEncryptionKey: + description: A key_encryption_key block as defined below. + properties: + keyUrl: + description: The URL to the Key Vault Key used as the + Key Encryption Key. This can be found as id on the azurerm_key_vault_key + resource. + type: string + sourceVaultId: + description: The ID of the source Key Vault. This can + be found as id on the azurerm_key_vault resource. + type: string + type: object + type: object + id: + description: The Snapshot ID. + type: string + incrementalEnabled: + description: Specifies if the Snapshot is incremental. Changing + this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Snapshot. Changing this forces a new resource to be created. + type: string + sourceResourceId: + description: Specifies a reference to an existing snapshot, when + create_option is Copy. Changing this forces a new resource to + be created. + type: string + sourceUri: + description: Specifies the URI to a Managed or Unmanaged Disk. + Changing this forces a new resource to be created. + type: string + storageAccountId: + description: Specifies the ID of an storage account. Used with + source_uri to allow authorization during import of unmanaged + blobs from a different subscription. Changing this forces a + new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustedLaunchEnabled: + description: Whether Trusted Launch is enabled for the Snapshot. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_virtualmachineextensions.yaml b/package/crds/compute.azure.upbound.io_virtualmachineextensions.yaml index 135a1eb66..13845e501 100644 --- a/package/crds/compute.azure.upbound.io_virtualmachineextensions.yaml +++ b/package/crds/compute.azure.upbound.io_virtualmachineextensions.yaml @@ -608,3 +608,587 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualMachineExtension is the Schema for the VirtualMachineExtensions + API. Manages a Virtual Machine Extension to provide post deployment configuration + and run automated tasks. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualMachineExtensionSpec defines the desired state of + VirtualMachineExtension + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoUpgradeMinorVersion: + description: Specifies if the platform deploys the latest minor + version update to the type_handler_version specified. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated whenever + the Publisher releases a new version of this VM Extension? + type: boolean + failureSuppressionEnabled: + description: Should failures from the extension be suppressed? + Possible values are true or false. Defaults to false. + type: boolean + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as defined + below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + protectedSettingsSecretRef: + description: The protected_settings passed to the extension, like + settings, these are specified as a JSON object in a string. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + provisionAfterExtensions: + description: Specifies the collection of extension names after + which this extension needs to be provisioned. + items: + type: string + type: array + publisher: + description: The publisher of the extension, available publishers + can be found by using the Azure CLI. Changing this forces a + new resource to be created. + type: string + settings: + description: The settings passed to the extension, these are specified + as a JSON object in a string. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: + description: The type of extension, available types for a publisher + can be found using the Azure CLI. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, available + versions can be found using the Azure CLI. + type: string + virtualMachineId: + description: The ID of the Virtual Machine. Changing this forces + a new resource to be created + type: string + virtualMachineIdRef: + description: Reference to a LinuxVirtualMachine in compute to + populate virtualMachineId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualMachineIdSelector: + description: Selector for a LinuxVirtualMachine in compute to + populate virtualMachineId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoUpgradeMinorVersion: + description: Specifies if the platform deploys the latest minor + version update to the type_handler_version specified. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated whenever + the Publisher releases a new version of this VM Extension? + type: boolean + failureSuppressionEnabled: + description: Should failures from the extension be suppressed? + Possible values are true or false. Defaults to false. + type: boolean + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as defined + below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + provisionAfterExtensions: + description: Specifies the collection of extension names after + which this extension needs to be provisioned. + items: + type: string + type: array + publisher: + description: The publisher of the extension, available publishers + can be found by using the Azure CLI. Changing this forces a + new resource to be created. + type: string + settings: + description: The settings passed to the extension, these are specified + as a JSON object in a string. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: + description: The type of extension, available types for a publisher + can be found using the Azure CLI. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, available + versions can be found using the Azure CLI. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.publisher is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.publisher) + || (has(self.initProvider) && has(self.initProvider.publisher))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + - message: spec.forProvider.typeHandlerVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.typeHandlerVersion) + || (has(self.initProvider) && has(self.initProvider.typeHandlerVersion))' + status: + description: VirtualMachineExtensionStatus defines the observed state + of VirtualMachineExtension. + properties: + atProvider: + properties: + autoUpgradeMinorVersion: + description: Specifies if the platform deploys the latest minor + version update to the type_handler_version specified. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated whenever + the Publisher releases a new version of this VM Extension? + type: boolean + failureSuppressionEnabled: + description: Should failures from the extension be suppressed? + Possible values are true or false. Defaults to false. + type: boolean + id: + description: The ID of the Virtual Machine Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as defined + below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + provisionAfterExtensions: + description: Specifies the collection of extension names after + which this extension needs to be provisioned. + items: + type: string + type: array + publisher: + description: The publisher of the extension, available publishers + can be found by using the Azure CLI. Changing this forces a + new resource to be created. + type: string + settings: + description: The settings passed to the extension, these are specified + as a JSON object in a string. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: + description: The type of extension, available types for a publisher + can be found using the Azure CLI. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, available + versions can be found using the Azure CLI. + type: string + virtualMachineId: + description: The ID of the Virtual Machine. Changing this forces + a new resource to be created + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_virtualmachineruncommands.yaml b/package/crds/compute.azure.upbound.io_virtualmachineruncommands.yaml index 4dbc9ceda..6c71431e8 100644 --- a/package/crds/compute.azure.upbound.io_virtualmachineruncommands.yaml +++ b/package/crds/compute.azure.upbound.io_virtualmachineruncommands.yaml @@ -1421,3 +1421,1378 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualMachineRunCommand is the Schema for the VirtualMachineRunCommands + API. Manages a Virtual Machine Run Command. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualMachineRunCommandSpec defines the desired state of + VirtualMachineRunCommand + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + errorBlobManagedIdentity: + description: An error_blob_managed_identity block as defined below. + User-assigned managed Identity that has access to errorBlobUri + storage blob. + properties: + clientIdSecretRef: + description: The client ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + objectIdSecretRef: + description: The object ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + errorBlobUri: + description: Specifies the Azure storage blob where script error + stream will be uploaded. + type: string + errorBlobUriRef: + description: Reference to a Blob in storage to populate errorBlobUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + errorBlobUriSelector: + description: Selector for a Blob in storage to populate errorBlobUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The Azure Region where the Virtual Machine Run Command + should exist. Changing this forces a new Virtual Machine Run + Command to be created. + type: string + outputBlobManagedIdentity: + description: An output_blob_managed_identity block as defined + below. User-assigned managed Identity that has access to outputBlobUri + storage blob. + properties: + clientIdSecretRef: + description: The client ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + objectIdSecretRef: + description: The object ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + outputBlobUri: + description: Specifies the Azure storage blob where script output + stream will be uploaded. It can be basic blob URI with SAS token. + type: string + outputBlobUriRef: + description: Reference to a Blob in storage to populate outputBlobUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + outputBlobUriSelector: + description: Selector for a Blob in storage to populate outputBlobUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameter: + description: A list of parameter blocks as defined below. The + parameters used by the script. + items: + properties: + name: + description: The run parameter name. + type: string + value: + description: The run parameter value. + type: string + type: object + type: array + protectedParameter: + description: A list of protected_parameter blocks as defined below. + The protected parameters used by the script. + items: + properties: + nameSecretRef: + description: The run parameter name. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + valueSecretRef: + description: The run parameter value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - nameSecretRef + - valueSecretRef + type: object + type: array + runAsPasswordSecretRef: + description: Specifies the user account password on the VM when + executing the Virtual Machine Run Command. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + runAsUser: + description: Specifies the user account on the VM when executing + the Virtual Machine Run Command. + type: string + source: + description: A source block as defined below. The source of the + run command script. + properties: + commandId: + type: string + script: + type: string + scriptUri: + type: string + scriptUriManagedIdentity: + description: A script_uri_managed_identity block as defined + above. + properties: + clientIdSecretRef: + description: The client ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + objectIdSecretRef: + description: The object ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + scriptUriRef: + description: Reference to a Blob in storage to populate scriptUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + scriptUriSelector: + description: Selector for a Blob in storage to populate scriptUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Virtual Machine Run Command. + type: object + x-kubernetes-map-type: granular + virtualMachineId: + description: Specifies the Virtual Machine ID within which this + Virtual Machine Run Command should exist. Changing this forces + a new Virtual Machine Run Command to be created. + type: string + virtualMachineIdRef: + description: Reference to a LinuxVirtualMachine in compute to + populate virtualMachineId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualMachineIdSelector: + description: Selector for a LinuxVirtualMachine in compute to + populate virtualMachineId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + errorBlobManagedIdentity: + description: An error_blob_managed_identity block as defined below. + User-assigned managed Identity that has access to errorBlobUri + storage blob. + type: object + errorBlobUri: + description: Specifies the Azure storage blob where script error + stream will be uploaded. + type: string + errorBlobUriRef: + description: Reference to a Blob in storage to populate errorBlobUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + errorBlobUriSelector: + description: Selector for a Blob in storage to populate errorBlobUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The Azure Region where the Virtual Machine Run Command + should exist. Changing this forces a new Virtual Machine Run + Command to be created. + type: string + outputBlobManagedIdentity: + description: An output_blob_managed_identity block as defined + below. User-assigned managed Identity that has access to outputBlobUri + storage blob. + type: object + outputBlobUri: + description: Specifies the Azure storage blob where script output + stream will be uploaded. It can be basic blob URI with SAS token. + type: string + outputBlobUriRef: + description: Reference to a Blob in storage to populate outputBlobUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + outputBlobUriSelector: + description: Selector for a Blob in storage to populate outputBlobUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameter: + description: A list of parameter blocks as defined below. The + parameters used by the script. + items: + properties: + name: + description: The run parameter name. + type: string + value: + description: The run parameter value. + type: string + type: object + type: array + protectedParameter: + description: A list of protected_parameter blocks as defined below. + The protected parameters used by the script. + items: + type: object + type: array + runAsUser: + description: Specifies the user account on the VM when executing + the Virtual Machine Run Command. + type: string + source: + description: A source block as defined below. The source of the + run command script. + properties: + commandId: + type: string + script: + type: string + scriptUri: + type: string + scriptUriManagedIdentity: + description: A script_uri_managed_identity block as defined + above. + type: object + scriptUriRef: + description: Reference to a Blob in storage to populate scriptUri. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + scriptUriSelector: + description: Selector for a Blob in storage to populate scriptUri. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Virtual Machine Run Command. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.source is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.source) + || (has(self.initProvider) && has(self.initProvider.source))' + status: + description: VirtualMachineRunCommandStatus defines the observed state + of VirtualMachineRunCommand. + properties: + atProvider: + properties: + errorBlobManagedIdentity: + description: An error_blob_managed_identity block as defined below. + User-assigned managed Identity that has access to errorBlobUri + storage blob. + properties: + clientIdSecretRef: + description: The client ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + objectIdSecretRef: + description: The object ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + errorBlobUri: + description: Specifies the Azure storage blob where script error + stream will be uploaded. + type: string + id: + description: The ID of the Virtual Machine Run Command. + type: string + instanceView: + items: + properties: + endTime: + type: string + errorMessage: + type: string + executionMessage: + type: string + executionState: + type: string + exitCode: + type: number + output: + type: string + startTime: + type: string + type: object + type: array + location: + description: The Azure Region where the Virtual Machine Run Command + should exist. Changing this forces a new Virtual Machine Run + Command to be created. + type: string + outputBlobManagedIdentity: + description: An output_blob_managed_identity block as defined + below. User-assigned managed Identity that has access to outputBlobUri + storage blob. + properties: + clientIdSecretRef: + description: The client ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + objectIdSecretRef: + description: The object ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + outputBlobUri: + description: Specifies the Azure storage blob where script output + stream will be uploaded. It can be basic blob URI with SAS token. + type: string + parameter: + description: A list of parameter blocks as defined below. The + parameters used by the script. + items: + properties: + name: + description: The run parameter name. + type: string + value: + description: The run parameter value. + type: string + type: object + type: array + protectedParameter: + description: A list of protected_parameter blocks as defined below. + The protected parameters used by the script. + items: + properties: + nameSecretRef: + description: The run parameter name. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + valueSecretRef: + description: The run parameter value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - nameSecretRef + - valueSecretRef + type: object + type: array + runAsUser: + description: Specifies the user account on the VM when executing + the Virtual Machine Run Command. + type: string + source: + description: A source block as defined below. The source of the + run command script. + properties: + commandId: + type: string + script: + type: string + scriptUri: + type: string + scriptUriManagedIdentity: + description: A script_uri_managed_identity block as defined + above. + properties: + clientIdSecretRef: + description: The client ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + objectIdSecretRef: + description: The object ID of the managed identity. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Virtual Machine Run Command. + type: object + x-kubernetes-map-type: granular + virtualMachineId: + description: Specifies the Virtual Machine ID within which this + Virtual Machine Run Command should exist. Changing this forces + a new Virtual Machine Run Command to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_windowsvirtualmachines.yaml b/package/crds/compute.azure.upbound.io_windowsvirtualmachines.yaml index c70c7034e..995d66146 100644 --- a/package/crds/compute.azure.upbound.io_windowsvirtualmachines.yaml +++ b/package/crds/compute.azure.upbound.io_windowsvirtualmachines.yaml @@ -2043,3 +2043,1968 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WindowsVirtualMachine is the Schema for the WindowsVirtualMachines + API. Manages a Windows Virtual Machine. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WindowsVirtualMachineSpec defines the desired state of WindowsVirtualMachine + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalCapabilities: + description: A additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine? Defaults to false. + type: boolean + type: object + additionalUnattendContent: + description: One or more additional_unattend_content blocks as + defined below. Changing this forces a new resource to be created. + items: + properties: + contentSecretRef: + description: The XML formatted content that is added to + the unattend.xml file for the specified path and component. + Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + setting: + description: The name of the setting to which the content + applies. Possible values are AutoLogon and FirstLogonCommands. + Changing this forces a new resource to be created. + type: string + required: + - contentSecretRef + type: object + type: array + adminPasswordSecretRef: + description: The Password which should be used for the local-administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + adminUsername: + description: The username of the local administrator used for + the Virtual Machine. Changing this forces a new resource to + be created. + type: string + allowExtensionOperations: + description: Should Extension Operations be allowed on this Virtual + Machine? Defaults to true. + type: boolean + availabilitySetId: + description: Specifies the ID of the Availability Set in which + the Virtual Machine should exist. Changing this forces a new + resource to be created. + type: string + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + bypassPlatformSafetyChecksOnUserScheduleEnabled: + description: Specifies whether to skip platform scheduled patching + when a user schedule is associated with the VM. Defaults to + false. + type: boolean + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine should be allocated to. + type: string + computerName: + description: Specifies the Hostname which should be used for this + Virtual Machine. If unspecified this defaults to the value for + the name field. If the value of the name field is not a valid + computer_name, then you must specify computer_name. Changing + this forces a new resource to be created. + type: string + customDataSecretRef: + description: The Base64-Encoded Custom Data which should be used + for this Virtual Machine. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dedicatedHostGroupId: + description: The ID of a Dedicated Host Group that this Windows + Virtual Machine should be run within. Conflicts with dedicated_host_id. + type: string + dedicatedHostId: + description: The ID of a Dedicated Host where this machine should + be run on. Conflicts with dedicated_host_group_id. + type: string + diskControllerType: + description: Specifies the Disk Controller Type used for this + Virtual Machine. Possible values are SCSI and NVMe. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Windows Virtual Machine should exist. Changing this forces + a new Windows Virtual Machine to be created. + type: string + enableAutomaticUpdates: + description: Specifies if Automatic Updates are Enabled for the + Windows Virtual Machine. Changing this forces a new resource + to be created. Defaults to true. + type: boolean + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies what should happen when the Virtual Machine + is evicted for price reasons when using a Spot instance. Possible + values are Deallocate and Delete. Changing this forces a new + resource to be created. + type: string + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + automaticUpgradeEnabled: + description: Specifies whether the version will be automatically + updated for the VM when a new Gallery Application version + is available in PIR/SIG. Defaults to false. + type: boolean + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. + type: string + treatFailureAsDeploymentFailureEnabled: + description: Specifies whether any failure for any operation + in the VmApplication will fail the deployment of the VM. + Defaults to false. + type: boolean + versionId: + description: Specifies the Gallery Application Version resource + ID. + type: string + type: object + type: array + hotpatchingEnabled: + description: Should the VM be patched without requiring a reboot? + Possible values are true or false. Defaults to false. For more + information about hot patching please see the product documentation. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Windows Virtual Machine. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Virtual Machine. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine. Possible values are None, Windows_Client and Windows_Server. + type: string + location: + description: The Azure location where the Windows Virtual Machine + should exist. Changing this forces a new resource to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for this + Virtual Machine, in US Dollars; which must be greater than the + current spot price. If this bid price falls below the current + spot price the Virtual Machine will be evicted using the eviction_policy. + Defaults to -1, which means that the Virtual Machine should + not be evicted for price reasons. + type: number + networkInterfaceIds: + description: . A list of Network Interface IDs which should be + attached to this Virtual Machine. The first Network Interface + ID in this list will be the Primary Network Interface on the + Virtual Machine. + items: + type: string + type: array + networkInterfaceIdsRefs: + description: References to NetworkInterface in network to populate + networkInterfaceIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + networkInterfaceIdsSelector: + description: Selector for a list of NetworkInterface in network + to populate networkInterfaceIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + osDisk: + description: A os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine is sourced from. + type: number + name: + description: The name which should be used for the Internal + OS Disk. Changing this forces a new resource to be created. + type: string + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk when the Virtual Machine + is a Confidential VM. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine is a + Confidential VM. Possible values are VMGuestStateOnly and + DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values are Standard_LRS, + StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + osImageNotification: + description: A os_image_notification block as defined below. + properties: + timeout: + description: Length of time a notification to be sent to the + VM on the instance metadata server till the VM gets OS upgraded. + The only possible value is PT15M. Defaults to PT15M. + type: string + type: object + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for the Virtual + Machine. Possible values are AutomaticByPlatform or ImageDefault. + Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching to this Windows + Virtual Machine. Possible values are Manual, AutomaticByOS and + AutomaticByPlatform. Defaults to AutomaticByOS. For more information + on patch modes please see the product documentation. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the Name of the Marketplace Image this + Virtual Machine should be created from. Changing this forces + a new resource to be created. + type: string + product: + description: Specifies the Product of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + publisher: + description: Specifies the Publisher of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + type: object + platformFaultDomain: + description: Specifies the Platform Fault Domain in which this + Windows Virtual Machine should be created. Defaults to -1, which + means this will be automatically assigned to a fault domain + that best maintains balance across the available fault domains. + Changing this forces a new Windows Virtual Machine to be created. + type: number + priority: + description: Specifies the priority of this Virtual Machine. Possible + values are Regular and Spot. Defaults to Regular. Changing this + forces a new resource to be created. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. + type: string + rebootSetting: + description: Specifies the reboot setting for platform scheduled + patching. Possible values are Always, IfRequired and Never. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Windows + Virtual Machine should be exist. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine where the certificate should be added. + type: string + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies if Secure Boot and Trusted Launch is enabled + for the Virtual Machine. Changing this forces a new resource + to be created. + type: boolean + size: + description: The SKU which should be used for this Virtual Machine, + such as Standard_F2. + type: string + sourceImageId: + description: The ID of the Image which this Virtual Machine should + be created from. Changing this forces a new resource to be created. + Possible Image ID types include Image IDs, Shared Image IDs, + Shared Image Version IDs, Community Gallery Image IDs, Community + Gallery Image Version IDs, Shared Gallery Image IDs and Shared + Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + Changing this forces a new resource to be created. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + timezone: + description: Specifies the Time Zone which should be used by the + Virtual Machine, the possible values are defined here. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine. + type: string + virtualMachineScaleSetId: + description: Specifies the Orchestrated Virtual Machine Scale + Set that this Virtual Machine should be created within. + type: string + vmAgentPlatformUpdatesEnabled: + description: Specifies whether VMAgent Platform Updates is enabled. + Defaults to false. + type: boolean + vtpmEnabled: + description: Specifies if vTPM (virtual Trusted Platform Module) + and Trusted Launch is enabled for the Virtual Machine. Changing + this forces a new resource to be created. + type: boolean + winrmListener: + description: One or more winrm_listener blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to Https. + Changing this forces a new resource to be created. + type: string + protocol: + description: Specifies the protocol of listener. Possible + values are Http or Https. Changing this forces a new resource + to be created. + type: string + type: object + type: array + zone: + description: '* zones - Specifies the Availability Zone in which + this Windows Virtual Machine should be located. Changing this + forces a new Windows Virtual Machine to be created.' + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalCapabilities: + description: A additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine? Defaults to false. + type: boolean + type: object + additionalUnattendContent: + description: One or more additional_unattend_content blocks as + defined below. Changing this forces a new resource to be created. + items: + properties: + setting: + description: The name of the setting to which the content + applies. Possible values are AutoLogon and FirstLogonCommands. + Changing this forces a new resource to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator used for + the Virtual Machine. Changing this forces a new resource to + be created. + type: string + allowExtensionOperations: + description: Should Extension Operations be allowed on this Virtual + Machine? Defaults to true. + type: boolean + availabilitySetId: + description: Specifies the ID of the Availability Set in which + the Virtual Machine should exist. Changing this forces a new + resource to be created. + type: string + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + bypassPlatformSafetyChecksOnUserScheduleEnabled: + description: Specifies whether to skip platform scheduled patching + when a user schedule is associated with the VM. Defaults to + false. + type: boolean + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine should be allocated to. + type: string + computerName: + description: Specifies the Hostname which should be used for this + Virtual Machine. If unspecified this defaults to the value for + the name field. If the value of the name field is not a valid + computer_name, then you must specify computer_name. Changing + this forces a new resource to be created. + type: string + dedicatedHostGroupId: + description: The ID of a Dedicated Host Group that this Windows + Virtual Machine should be run within. Conflicts with dedicated_host_id. + type: string + dedicatedHostId: + description: The ID of a Dedicated Host where this machine should + be run on. Conflicts with dedicated_host_group_id. + type: string + diskControllerType: + description: Specifies the Disk Controller Type used for this + Virtual Machine. Possible values are SCSI and NVMe. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Windows Virtual Machine should exist. Changing this forces + a new Windows Virtual Machine to be created. + type: string + enableAutomaticUpdates: + description: Specifies if Automatic Updates are Enabled for the + Windows Virtual Machine. Changing this forces a new resource + to be created. Defaults to true. + type: boolean + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies what should happen when the Virtual Machine + is evicted for price reasons when using a Spot instance. Possible + values are Deallocate and Delete. Changing this forces a new + resource to be created. + type: string + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + automaticUpgradeEnabled: + description: Specifies whether the version will be automatically + updated for the VM when a new Gallery Application version + is available in PIR/SIG. Defaults to false. + type: boolean + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. + type: string + treatFailureAsDeploymentFailureEnabled: + description: Specifies whether any failure for any operation + in the VmApplication will fail the deployment of the VM. + Defaults to false. + type: boolean + versionId: + description: Specifies the Gallery Application Version resource + ID. + type: string + type: object + type: array + hotpatchingEnabled: + description: Should the VM be patched without requiring a reboot? + Possible values are true or false. Defaults to false. For more + information about hot patching please see the product documentation. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Windows Virtual Machine. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Virtual Machine. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine. Possible values are None, Windows_Client and Windows_Server. + type: string + location: + description: The Azure location where the Windows Virtual Machine + should exist. Changing this forces a new resource to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for this + Virtual Machine, in US Dollars; which must be greater than the + current spot price. If this bid price falls below the current + spot price the Virtual Machine will be evicted using the eviction_policy. + Defaults to -1, which means that the Virtual Machine should + not be evicted for price reasons. + type: number + networkInterfaceIds: + description: . A list of Network Interface IDs which should be + attached to this Virtual Machine. The first Network Interface + ID in this list will be the Primary Network Interface on the + Virtual Machine. + items: + type: string + type: array + networkInterfaceIdsRefs: + description: References to NetworkInterface in network to populate + networkInterfaceIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + networkInterfaceIdsSelector: + description: Selector for a list of NetworkInterface in network + to populate networkInterfaceIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + osDisk: + description: A os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine is sourced from. + type: number + name: + description: The name which should be used for the Internal + OS Disk. Changing this forces a new resource to be created. + type: string + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk when the Virtual Machine + is a Confidential VM. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine is a + Confidential VM. Possible values are VMGuestStateOnly and + DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values are Standard_LRS, + StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + osImageNotification: + description: A os_image_notification block as defined below. + properties: + timeout: + description: Length of time a notification to be sent to the + VM on the instance metadata server till the VM gets OS upgraded. + The only possible value is PT15M. Defaults to PT15M. + type: string + type: object + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for the Virtual + Machine. Possible values are AutomaticByPlatform or ImageDefault. + Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching to this Windows + Virtual Machine. Possible values are Manual, AutomaticByOS and + AutomaticByPlatform. Defaults to AutomaticByOS. For more information + on patch modes please see the product documentation. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the Name of the Marketplace Image this + Virtual Machine should be created from. Changing this forces + a new resource to be created. + type: string + product: + description: Specifies the Product of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + publisher: + description: Specifies the Publisher of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + type: object + platformFaultDomain: + description: Specifies the Platform Fault Domain in which this + Windows Virtual Machine should be created. Defaults to -1, which + means this will be automatically assigned to a fault domain + that best maintains balance across the available fault domains. + Changing this forces a new Windows Virtual Machine to be created. + type: number + priority: + description: Specifies the priority of this Virtual Machine. Possible + values are Regular and Spot. Defaults to Regular. Changing this + forces a new resource to be created. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. + type: string + rebootSetting: + description: Specifies the reboot setting for platform scheduled + patching. Possible values are Always, IfRequired and Never. + type: string + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine where the certificate should be added. + type: string + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies if Secure Boot and Trusted Launch is enabled + for the Virtual Machine. Changing this forces a new resource + to be created. + type: boolean + size: + description: The SKU which should be used for this Virtual Machine, + such as Standard_F2. + type: string + sourceImageId: + description: The ID of the Image which this Virtual Machine should + be created from. Changing this forces a new resource to be created. + Possible Image ID types include Image IDs, Shared Image IDs, + Shared Image Version IDs, Community Gallery Image IDs, Community + Gallery Image Version IDs, Shared Gallery Image IDs and Shared + Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + Changing this forces a new resource to be created. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + timezone: + description: Specifies the Time Zone which should be used by the + Virtual Machine, the possible values are defined here. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine. + type: string + virtualMachineScaleSetId: + description: Specifies the Orchestrated Virtual Machine Scale + Set that this Virtual Machine should be created within. + type: string + vmAgentPlatformUpdatesEnabled: + description: Specifies whether VMAgent Platform Updates is enabled. + Defaults to false. + type: boolean + vtpmEnabled: + description: Specifies if vTPM (virtual Trusted Platform Module) + and Trusted Launch is enabled for the Virtual Machine. Changing + this forces a new resource to be created. + type: boolean + winrmListener: + description: One or more winrm_listener blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to Https. + Changing this forces a new resource to be created. + type: string + protocol: + description: Specifies the protocol of listener. Possible + values are Http or Https. Changing this forces a new resource + to be created. + type: string + type: object + type: array + zone: + description: '* zones - Specifies the Availability Zone in which + this Windows Virtual Machine should be located. Changing this + forces a new Windows Virtual Machine to be created.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.adminPasswordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.adminPasswordSecretRef)' + - message: spec.forProvider.adminUsername is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.adminUsername) + || (has(self.initProvider) && has(self.initProvider.adminUsername))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.osDisk is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.osDisk) + || (has(self.initProvider) && has(self.initProvider.osDisk))' + - message: spec.forProvider.size is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.size) + || (has(self.initProvider) && has(self.initProvider.size))' + status: + description: WindowsVirtualMachineStatus defines the observed state of + WindowsVirtualMachine. + properties: + atProvider: + properties: + additionalCapabilities: + description: A additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine? Defaults to false. + type: boolean + type: object + additionalUnattendContent: + description: One or more additional_unattend_content blocks as + defined below. Changing this forces a new resource to be created. + items: + properties: + setting: + description: The name of the setting to which the content + applies. Possible values are AutoLogon and FirstLogonCommands. + Changing this forces a new resource to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator used for + the Virtual Machine. Changing this forces a new resource to + be created. + type: string + allowExtensionOperations: + description: Should Extension Operations be allowed on this Virtual + Machine? Defaults to true. + type: boolean + availabilitySetId: + description: Specifies the ID of the Availability Set in which + the Virtual Machine should exist. Changing this forces a new + resource to be created. + type: string + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + bypassPlatformSafetyChecksOnUserScheduleEnabled: + description: Specifies whether to skip platform scheduled patching + when a user schedule is associated with the VM. Defaults to + false. + type: boolean + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine should be allocated to. + type: string + computerName: + description: Specifies the Hostname which should be used for this + Virtual Machine. If unspecified this defaults to the value for + the name field. If the value of the name field is not a valid + computer_name, then you must specify computer_name. Changing + this forces a new resource to be created. + type: string + dedicatedHostGroupId: + description: The ID of a Dedicated Host Group that this Windows + Virtual Machine should be run within. Conflicts with dedicated_host_id. + type: string + dedicatedHostId: + description: The ID of a Dedicated Host where this machine should + be run on. Conflicts with dedicated_host_group_id. + type: string + diskControllerType: + description: Specifies the Disk Controller Type used for this + Virtual Machine. Possible values are SCSI and NVMe. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Windows Virtual Machine should exist. Changing this forces + a new Windows Virtual Machine to be created. + type: string + enableAutomaticUpdates: + description: Specifies if Automatic Updates are Enabled for the + Windows Virtual Machine. Changing this forces a new resource + to be created. Defaults to true. + type: boolean + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies what should happen when the Virtual Machine + is evicted for price reasons when using a Spot instance. Possible + values are Deallocate and Delete. Changing this forces a new + resource to be created. + type: string + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + automaticUpgradeEnabled: + description: Specifies whether the version will be automatically + updated for the VM when a new Gallery Application version + is available in PIR/SIG. Defaults to false. + type: boolean + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. + type: string + treatFailureAsDeploymentFailureEnabled: + description: Specifies whether any failure for any operation + in the VmApplication will fail the deployment of the VM. + Defaults to false. + type: boolean + versionId: + description: Specifies the Gallery Application Version resource + ID. + type: string + type: object + type: array + hotpatchingEnabled: + description: Should the VM be patched without requiring a reboot? + Possible values are true or false. Defaults to false. For more + information about hot patching please see the product documentation. + type: boolean + id: + description: The ID of the Windows Virtual Machine. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Windows Virtual Machine. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Virtual Machine. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine. Possible values are None, Windows_Client and Windows_Server. + type: string + location: + description: The Azure location where the Windows Virtual Machine + should exist. Changing this forces a new resource to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for this + Virtual Machine, in US Dollars; which must be greater than the + current spot price. If this bid price falls below the current + spot price the Virtual Machine will be evicted using the eviction_policy. + Defaults to -1, which means that the Virtual Machine should + not be evicted for price reasons. + type: number + networkInterfaceIds: + description: . A list of Network Interface IDs which should be + attached to this Virtual Machine. The first Network Interface + ID in this list will be the Primary Network Interface on the + Virtual Machine. + items: + type: string + type: array + osDisk: + description: A os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine is sourced from. + type: number + name: + description: The name which should be used for the Internal + OS Disk. Changing this forces a new resource to be created. + type: string + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt this OS Disk when the Virtual Machine + is a Confidential VM. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine is a + Confidential VM. Possible values are VMGuestStateOnly and + DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values are Standard_LRS, + StandardSSD_LRS, Premium_LRS, StandardSSD_ZRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + osImageNotification: + description: A os_image_notification block as defined below. + properties: + timeout: + description: Length of time a notification to be sent to the + VM on the instance metadata server till the VM gets OS upgraded. + The only possible value is PT15M. Defaults to PT15M. + type: string + type: object + patchAssessmentMode: + description: Specifies the mode of VM Guest Patching for the Virtual + Machine. Possible values are AutomaticByPlatform or ImageDefault. + Defaults to ImageDefault. + type: string + patchMode: + description: Specifies the mode of in-guest patching to this Windows + Virtual Machine. Possible values are Manual, AutomaticByOS and + AutomaticByPlatform. Defaults to AutomaticByOS. For more information + on patch modes please see the product documentation. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the Name of the Marketplace Image this + Virtual Machine should be created from. Changing this forces + a new resource to be created. + type: string + product: + description: Specifies the Product of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + publisher: + description: Specifies the Publisher of the Marketplace Image + this Virtual Machine should be created from. Changing this + forces a new resource to be created. + type: string + type: object + platformFaultDomain: + description: Specifies the Platform Fault Domain in which this + Windows Virtual Machine should be created. Defaults to -1, which + means this will be automatically assigned to a fault domain + that best maintains balance across the available fault domains. + Changing this forces a new Windows Virtual Machine to be created. + type: number + priority: + description: Specifies the priority of this Virtual Machine. Possible + values are Regular and Spot. Defaults to Regular. Changing this + forces a new resource to be created. + type: string + privateIpAddress: + description: The Primary Private IP Address assigned to this Virtual + Machine. + type: string + privateIpAddresses: + description: A list of Private IP Addresses assigned to this Virtual + Machine. + items: + type: string + type: array + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on this + Virtual Machine? Defaults to true. Changing this forces a new + resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group which the + Virtual Machine should be assigned to. + type: string + publicIpAddress: + description: The Primary Public IP Address assigned to this Virtual + Machine. + type: string + publicIpAddresses: + description: A list of the Public IP Addresses assigned to this + Virtual Machine. + items: + type: string + type: array + rebootSetting: + description: Specifies the reboot setting for platform scheduled + patching. Possible values are Always, IfRequired and Never. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Windows + Virtual Machine should be exist. Changing this forces a new + resource to be created. + type: string + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine where the certificate should be added. + type: string + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies if Secure Boot and Trusted Launch is enabled + for the Virtual Machine. Changing this forces a new resource + to be created. + type: boolean + size: + description: The SKU which should be used for this Virtual Machine, + such as Standard_F2. + type: string + sourceImageId: + description: The ID of the Image which this Virtual Machine should + be created from. Changing this forces a new resource to be created. + Possible Image ID types include Image IDs, Shared Image IDs, + Shared Image Version IDs, Community Gallery Image IDs, Community + Gallery Image Version IDs, Shared Gallery Image IDs and Shared + Gallery Image Version IDs. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + Changing this forces a new resource to be created. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine. + type: object + x-kubernetes-map-type: granular + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + timezone: + description: Specifies the Time Zone which should be used by the + Virtual Machine, the possible values are defined here. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine. + type: string + virtualMachineId: + description: A 128-bit identifier which uniquely identifies this + Virtual Machine. + type: string + virtualMachineScaleSetId: + description: Specifies the Orchestrated Virtual Machine Scale + Set that this Virtual Machine should be created within. + type: string + vmAgentPlatformUpdatesEnabled: + description: Specifies whether VMAgent Platform Updates is enabled. + Defaults to false. + type: boolean + vtpmEnabled: + description: Specifies if vTPM (virtual Trusted Platform Module) + and Trusted Launch is enabled for the Virtual Machine. Changing + this forces a new resource to be created. + type: boolean + winrmListener: + description: One or more winrm_listener blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to Https. + Changing this forces a new resource to be created. + type: string + protocol: + description: Specifies the protocol of listener. Possible + values are Http or Https. Changing this forces a new resource + to be created. + type: string + type: object + type: array + zone: + description: '* zones - Specifies the Availability Zone in which + this Windows Virtual Machine should be located. Changing this + forces a new Windows Virtual Machine to be created.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/compute.azure.upbound.io_windowsvirtualmachinescalesets.yaml b/package/crds/compute.azure.upbound.io_windowsvirtualmachinescalesets.yaml index 009724174..25f20afeb 100644 --- a/package/crds/compute.azure.upbound.io_windowsvirtualmachinescalesets.yaml +++ b/package/crds/compute.azure.upbound.io_windowsvirtualmachinescalesets.yaml @@ -3189,3 +3189,3063 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WindowsVirtualMachineScaleSet is the Schema for the WindowsVirtualMachineScaleSets + API. Manages a Windows Virtual Machine Scale Set. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WindowsVirtualMachineScaleSetSpec defines the desired state + of WindowsVirtualMachineScaleSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + type: object + additionalUnattendContent: + description: One or more additional_unattend_content blocks as + defined below. Changing this forces a new resource to be created. + items: + properties: + contentSecretRef: + description: The XML formatted content that is added to + the unattend.xml file for the specified path and component. + Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + setting: + description: The name of the setting to which the content + applies. Possible values are AutoLogon and FirstLogonCommands. + Changing this forces a new resource to be created. + type: string + required: + - contentSecretRef + type: object + type: array + adminPasswordSecretRef: + description: The Password which should be used for the local-administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + adminUsername: + description: The username of the local administrator on each Virtual + Machine Scale Set instance. Changing this forces a new resource + to be created. + type: string + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + To enable the automatic instance repair, this Virtual Machine + Scale Set must have a valid health_probe_id or an Application + Health Extension. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? + type: boolean + gracePeriod: + description: Amount of time (in minutes, between 30 and 90) + for which automatic repairs will be delayed. The grace period + starts right after the VM is found unhealthy. The time duration + should be specified in ISO 8601 format. Defaults to PT30M. + type: string + type: object + automaticOsUpgradePolicy: + description: An automatic_os_upgrade_policy block as defined below. + This can only be specified when upgrade_mode is set to either + Automatic or Rolling. + properties: + disableAutomaticRollback: + description: Should automatic rollbacks be disabled? + type: boolean + enableAutomaticOsUpgrade: + description: Should OS Upgrades automatically be applied to + Scale Set instances in a rolling fashion when a newer version + of the OS Image becomes available? + type: boolean + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name of the + Virtual Machines in this Scale Set. If unspecified this defaults + to the value for the name field. If the value of the name field + is not a valid computer_name_prefix, then you must specify computer_name_prefix. + Changing this forces a new resource to be created. + type: string + customDataSecretRef: + description: The Base64-Encoded Custom Data which should be used + for this Virtual Machine Scale Set. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. + type: number + name: + description: The name of the Data Disk. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + type: array + doNotRunExtensionsOnOverprovisionedMachines: + description: Should Virtual Machine Extensions be run on Overprovisioned + Virtual Machines in the Scale Set? Defaults to false. + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Windows Virtual Machine Scale Set should exist. Changing + this forces a new Windows Virtual Machine Scale Set to be created. + type: string + enableAutomaticUpdates: + description: Are automatic updates enabled for this Virtual Machine? + Defaults to true. + type: boolean + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies the eviction policy for Virtual Machines + in this Scale Set. Possible values are Deallocate and Delete. + Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersion: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated + whenever the Publisher releases a new version of this + VM Extension? + type: boolean + forceUpdateTag: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + protectedSettingsSecretRef: + description: A JSON String which specifies Sensitive Settings + (such as Passwords) for the Extension. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + provisionAfterExtensions: + description: An ordered list of Extension names which this + should be provisioned after. + items: + type: string + type: array + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Windows Virtual Machine + Scale Set to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + Changing this forces a new resource to be created. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. Changing + this forces a new resource to be created. + type: string + versionId: + description: Specifies the Gallery Application Version resource + ID. Changing this forces a new resource to be created. + type: string + type: object + type: array + galleryApplications: + items: + properties: + configurationReferenceBlobUri: + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + packageReferenceId: + description: The ID of the Windows Virtual Machine Scale + Set. + type: string + tag: + description: The IP Tag associated with the Public IP, such + as SQL or Storage. Changing this forces a new resource + to be created. + type: string + type: object + type: array + healthProbeId: + description: The ID of a Load Balancer Probe which should be used + to determine the health of an instance. This is Required and + can only be specified when upgrade_mode is set to Automatic + or Rolling. + type: string + hostGroupId: + description: Specifies the ID of the dedicated host group that + the virtual machine scale set resides in. Changing this forces + a new resource to be created. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Windows Virtual Machine Scale + Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Virtual Machine + Scale Set. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). + type: string + type: object + instances: + description: The number of Virtual Machines in the Scale Set. + type: number + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine Scale Set. Possible values are None, Windows_Client + and Windows_Server. + type: string + location: + description: The Azure location where the Windows Virtual Machine + Scale Set should exist. Changing this forces a new resource + to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in the Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Defaults to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + ID's which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Load Balancer which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerInboundNatRulesIds: + description: A list of NAT Rule ID's from a Load Balancer + which this Virtual Machine Scale Set should be connected + to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt the OS Disk when the Virtual Machine + Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine Scale + Set is Confidential VMSS. Possible values are VMGuestStateOnly + and DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + overprovision: + description: Should Azure over-provision Virtual Machines in this + Scale Set? This means that multiple Virtual Machines will be + provisioned and Azure will keep the instances which become available + first - which improves provisioning success rates and improves + deployment time. You're not billed for these over-provisioned + VM's and they don't count towards the Subscription Quota. Defaults + to true. + type: boolean + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Linux Virtual Machine Scale Set. Changing this forces + a new resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on each + Virtual Machine in the Scale Set? Defaults to true. Changing + this value forces a new resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group in which + the Virtual Machine Scale Set should be assigned to. Changing + this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Windows + Virtual Machine Scale Set should be exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rollingUpgradePolicy: + description: A rolling_upgrade_policy block as defined below. + This is Required and can only be specified when upgrade_mode + is set to Automatic or Rolling. Changing this forces a new resource + to be created. + properties: + crossZoneUpgradesEnabled: + description: Should the Virtual Machine Scale Set ignore the + Azure Zone boundaries when constructing upgrade batches? + Possible values are true or false. + type: boolean + maxBatchInstancePercent: + description: The maximum percent of total virtual machine + instances that will be upgraded simultaneously by the rolling + upgrade in one batch. As this is a maximum, unhealthy instances + in previous or future batches can cause the percentage of + instances in a batch to decrease to ensure higher reliability. + type: number + maxUnhealthyInstancePercent: + description: The maximum percentage of the total virtual machine + instances in the scale set that can be simultaneously unhealthy, + either as a result of being upgraded, or by being found + in an unhealthy state by the virtual machine health checks + before the rolling upgrade aborts. This constraint will + be checked prior to starting any batch. + type: number + maxUnhealthyUpgradedInstancePercent: + description: The maximum percentage of upgraded virtual machine + instances that can be found to be in an unhealthy state. + This check will happen after each batch is upgraded. If + this percentage is ever exceeded, the rolling update aborts. + type: number + pauseTimeBetweenBatches: + description: The wait time between completing the update for + all virtual machines in one batch and starting the next + batch. The time duration should be specified in ISO 8601 + format. + type: string + prioritizeUnhealthyInstancesEnabled: + description: Upgrade all unhealthy instances in a scale set + before any healthy instances. Possible values are true or + false. + type: boolean + type: object + scaleIn: + description: A scale_in block as defined below. + properties: + forceDeletionEnabled: + description: Should the virtual machines chosen for removal + be force deleted when the virtual machine scale set is being + scaled-in? Possible values are true or false. Defaults to + false. + type: boolean + rule: + description: The scale-in policy rule that decides which virtual + machines are chosen for removal when a Virtual Machine Scale + Set is scaled in. Possible values for the scale-in policy + rules are Default, NewestVM and OldestVM, defaults to Default. + For more information about scale in policy, please refer + to this doc. + type: string + type: object + scaleInPolicy: + description: 'Deprecated: scaleInPolicy will be removed in favour + of the scaleIn code block.' + type: string + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine where the certificate should be added. + type: string + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies if Secure Boot and Trusted Launch is enabled + for the Virtual Machine. Changing this forces a new resource + to be created. + type: boolean + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Defaults to true. + type: boolean + sku: + description: The Virtual Machine SKU for the Scale Set, such as + Standard_F2. + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image ID, Shared Image ID, Shared Image Version ID, Community + Gallery Image ID, Community Gallery Image Version ID, Shared + Gallery Image ID and Shared Gallery Image Version ID. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + spotRestore: + description: A spot_restore block as defined below. + properties: + enabled: + description: Should the Spot-Try-Restore feature be enabled? + The Spot-Try-Restore feature will attempt to automatically + restore the evicted Spot Virtual Machine Scale Set VM instances + opportunistically based on capacity availability and pricing + constraints. Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + timeout: + description: The length of time that the Virtual Machine Scale + Set should attempt to restore the Spot VM instances which + have been evicted. The time duration should be between 15 + minutes and 120 minutes (inclusive). The time duration should + be specified in the ISO 8601 format. Defaults to PT1H. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminateNotification: + description: A terminate_notification block as defined below. + properties: + enabled: + description: Should the terminate notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + timezone: + description: Specifies the time zone of the virtual machine, the + possible values are defined here. + type: string + upgradeMode: + description: Specifies how Upgrades (e.g. changing the Image/SKU) + should be performed to Virtual Machine Instances. Possible values + are Automatic, Manual and Rolling. Defaults to Manual. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine Scale Set. + type: string + vtpmEnabled: + description: Specifies if vTPM (Virtual Trusted Platform Module) + and Trusted Launch is enabled for the Virtual Machine. Changing + this forces a new resource to be created. + type: boolean + winrmListener: + description: One or more winrm_listener blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to Https. + Changing this forces a new resource to be created. + type: string + protocol: + description: The Protocol of the WinRM Listener. Possible + values are Http and Https. Changing this forces a new + resource to be created. + type: string + type: object + type: array + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones in which this + Windows Virtual Machine Scale Set should be located. Changing + this forces a new Windows Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + type: object + additionalUnattendContent: + description: One or more additional_unattend_content blocks as + defined below. Changing this forces a new resource to be created. + items: + properties: + setting: + description: The name of the setting to which the content + applies. Possible values are AutoLogon and FirstLogonCommands. + Changing this forces a new resource to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on each Virtual + Machine Scale Set instance. Changing this forces a new resource + to be created. + type: string + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + To enable the automatic instance repair, this Virtual Machine + Scale Set must have a valid health_probe_id or an Application + Health Extension. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? + type: boolean + gracePeriod: + description: Amount of time (in minutes, between 30 and 90) + for which automatic repairs will be delayed. The grace period + starts right after the VM is found unhealthy. The time duration + should be specified in ISO 8601 format. Defaults to PT30M. + type: string + type: object + automaticOsUpgradePolicy: + description: An automatic_os_upgrade_policy block as defined below. + This can only be specified when upgrade_mode is set to either + Automatic or Rolling. + properties: + disableAutomaticRollback: + description: Should automatic rollbacks be disabled? + type: boolean + enableAutomaticOsUpgrade: + description: Should OS Upgrades automatically be applied to + Scale Set instances in a rolling fashion when a newer version + of the OS Image becomes available? + type: boolean + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name of the + Virtual Machines in this Scale Set. If unspecified this defaults + to the value for the name field. If the value of the name field + is not a valid computer_name_prefix, then you must specify computer_name_prefix. + Changing this forces a new resource to be created. + type: string + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. + type: number + name: + description: The name of the Data Disk. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + type: array + doNotRunExtensionsOnOverprovisionedMachines: + description: Should Virtual Machine Extensions be run on Overprovisioned + Virtual Machines in the Scale Set? Defaults to false. + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Windows Virtual Machine Scale Set should exist. Changing + this forces a new Windows Virtual Machine Scale Set to be created. + type: string + enableAutomaticUpdates: + description: Are automatic updates enabled for this Virtual Machine? + Defaults to true. + type: boolean + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies the eviction policy for Virtual Machines + in this Scale Set. Possible values are Deallocate and Delete. + Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersion: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated + whenever the Publisher releases a new version of this + VM Extension? + type: boolean + forceUpdateTag: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + provisionAfterExtensions: + description: An ordered list of Extension names which this + should be provisioned after. + items: + type: string + type: array + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Windows Virtual Machine + Scale Set to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + Changing this forces a new resource to be created. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. Changing + this forces a new resource to be created. + type: string + versionId: + description: Specifies the Gallery Application Version resource + ID. Changing this forces a new resource to be created. + type: string + type: object + type: array + galleryApplications: + items: + properties: + configurationReferenceBlobUri: + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + packageReferenceId: + description: The ID of the Windows Virtual Machine Scale + Set. + type: string + tag: + description: The IP Tag associated with the Public IP, such + as SQL or Storage. Changing this forces a new resource + to be created. + type: string + type: object + type: array + healthProbeId: + description: The ID of a Load Balancer Probe which should be used + to determine the health of an instance. This is Required and + can only be specified when upgrade_mode is set to Automatic + or Rolling. + type: string + hostGroupId: + description: Specifies the ID of the dedicated host group that + the virtual machine scale set resides in. Changing this forces + a new resource to be created. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Windows Virtual Machine Scale + Set. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Virtual Machine + Scale Set. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). + type: string + type: object + instances: + description: The number of Virtual Machines in the Scale Set. + type: number + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine Scale Set. Possible values are None, Windows_Client + and Windows_Server. + type: string + location: + description: The Azure location where the Windows Virtual Machine + Scale Set should exist. Changing this forces a new resource + to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in the Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Defaults to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + ID's which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Load Balancer which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerInboundNatRulesIds: + description: A list of NAT Rule ID's from a Load Balancer + which this Virtual Machine Scale Set should be connected + to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt the OS Disk when the Virtual Machine + Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine Scale + Set is Confidential VMSS. Possible values are VMGuestStateOnly + and DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + overprovision: + description: Should Azure over-provision Virtual Machines in this + Scale Set? This means that multiple Virtual Machines will be + provisioned and Azure will keep the instances which become available + first - which improves provisioning success rates and improves + deployment time. You're not billed for these over-provisioned + VM's and they don't count towards the Subscription Quota. Defaults + to true. + type: boolean + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Linux Virtual Machine Scale Set. Changing this forces + a new resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on each + Virtual Machine in the Scale Set? Defaults to true. Changing + this value forces a new resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group in which + the Virtual Machine Scale Set should be assigned to. Changing + this forces a new resource to be created. + type: string + rollingUpgradePolicy: + description: A rolling_upgrade_policy block as defined below. + This is Required and can only be specified when upgrade_mode + is set to Automatic or Rolling. Changing this forces a new resource + to be created. + properties: + crossZoneUpgradesEnabled: + description: Should the Virtual Machine Scale Set ignore the + Azure Zone boundaries when constructing upgrade batches? + Possible values are true or false. + type: boolean + maxBatchInstancePercent: + description: The maximum percent of total virtual machine + instances that will be upgraded simultaneously by the rolling + upgrade in one batch. As this is a maximum, unhealthy instances + in previous or future batches can cause the percentage of + instances in a batch to decrease to ensure higher reliability. + type: number + maxUnhealthyInstancePercent: + description: The maximum percentage of the total virtual machine + instances in the scale set that can be simultaneously unhealthy, + either as a result of being upgraded, or by being found + in an unhealthy state by the virtual machine health checks + before the rolling upgrade aborts. This constraint will + be checked prior to starting any batch. + type: number + maxUnhealthyUpgradedInstancePercent: + description: The maximum percentage of upgraded virtual machine + instances that can be found to be in an unhealthy state. + This check will happen after each batch is upgraded. If + this percentage is ever exceeded, the rolling update aborts. + type: number + pauseTimeBetweenBatches: + description: The wait time between completing the update for + all virtual machines in one batch and starting the next + batch. The time duration should be specified in ISO 8601 + format. + type: string + prioritizeUnhealthyInstancesEnabled: + description: Upgrade all unhealthy instances in a scale set + before any healthy instances. Possible values are true or + false. + type: boolean + type: object + scaleIn: + description: A scale_in block as defined below. + properties: + forceDeletionEnabled: + description: Should the virtual machines chosen for removal + be force deleted when the virtual machine scale set is being + scaled-in? Possible values are true or false. Defaults to + false. + type: boolean + rule: + description: The scale-in policy rule that decides which virtual + machines are chosen for removal when a Virtual Machine Scale + Set is scaled in. Possible values for the scale-in policy + rules are Default, NewestVM and OldestVM, defaults to Default. + For more information about scale in policy, please refer + to this doc. + type: string + type: object + scaleInPolicy: + description: 'Deprecated: scaleInPolicy will be removed in favour + of the scaleIn code block.' + type: string + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine where the certificate should be added. + type: string + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies if Secure Boot and Trusted Launch is enabled + for the Virtual Machine. Changing this forces a new resource + to be created. + type: boolean + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Defaults to true. + type: boolean + sku: + description: The Virtual Machine SKU for the Scale Set, such as + Standard_F2. + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image ID, Shared Image ID, Shared Image Version ID, Community + Gallery Image ID, Community Gallery Image Version ID, Shared + Gallery Image ID and Shared Gallery Image Version ID. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + spotRestore: + description: A spot_restore block as defined below. + properties: + enabled: + description: Should the Spot-Try-Restore feature be enabled? + The Spot-Try-Restore feature will attempt to automatically + restore the evicted Spot Virtual Machine Scale Set VM instances + opportunistically based on capacity availability and pricing + constraints. Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + timeout: + description: The length of time that the Virtual Machine Scale + Set should attempt to restore the Spot VM instances which + have been evicted. The time duration should be between 15 + minutes and 120 minutes (inclusive). The time duration should + be specified in the ISO 8601 format. Defaults to PT1H. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminateNotification: + description: A terminate_notification block as defined below. + properties: + enabled: + description: Should the terminate notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + timezone: + description: Specifies the time zone of the virtual machine, the + possible values are defined here. + type: string + upgradeMode: + description: Specifies how Upgrades (e.g. changing the Image/SKU) + should be performed to Virtual Machine Instances. Possible values + are Automatic, Manual and Rolling. Defaults to Manual. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine Scale Set. + type: string + vtpmEnabled: + description: Specifies if vTPM (Virtual Trusted Platform Module) + and Trusted Launch is enabled for the Virtual Machine. Changing + this forces a new resource to be created. + type: boolean + winrmListener: + description: One or more winrm_listener blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to Https. + Changing this forces a new resource to be created. + type: string + protocol: + description: The Protocol of the WinRM Listener. Possible + values are Http and Https. Changing this forces a new + resource to be created. + type: string + type: object + type: array + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones in which this + Windows Virtual Machine Scale Set should be located. Changing + this forces a new Windows Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.adminPasswordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.adminPasswordSecretRef)' + - message: spec.forProvider.adminUsername is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.adminUsername) + || (has(self.initProvider) && has(self.initProvider.adminUsername))' + - message: spec.forProvider.instances is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.instances) + || (has(self.initProvider) && has(self.initProvider.instances))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.networkInterface is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.networkInterface) + || (has(self.initProvider) && has(self.initProvider.networkInterface))' + - message: spec.forProvider.osDisk is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.osDisk) + || (has(self.initProvider) && has(self.initProvider.osDisk))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: WindowsVirtualMachineScaleSetStatus defines the observed + state of WindowsVirtualMachineScaleSet. + properties: + atProvider: + properties: + additionalCapabilities: + description: An additional_capabilities block as defined below. + properties: + ultraSsdEnabled: + description: Should the capacity to enable Data Disks of the + UltraSSD_LRS storage account type be supported on this Virtual + Machine Scale Set? Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + type: object + additionalUnattendContent: + description: One or more additional_unattend_content blocks as + defined below. Changing this forces a new resource to be created. + items: + properties: + setting: + description: The name of the setting to which the content + applies. Possible values are AutoLogon and FirstLogonCommands. + Changing this forces a new resource to be created. + type: string + type: object + type: array + adminUsername: + description: The username of the local administrator on each Virtual + Machine Scale Set instance. Changing this forces a new resource + to be created. + type: string + automaticInstanceRepair: + description: An automatic_instance_repair block as defined below. + To enable the automatic instance repair, this Virtual Machine + Scale Set must have a valid health_probe_id or an Application + Health Extension. + properties: + enabled: + description: Should the automatic instance repair be enabled + on this Virtual Machine Scale Set? + type: boolean + gracePeriod: + description: Amount of time (in minutes, between 30 and 90) + for which automatic repairs will be delayed. The grace period + starts right after the VM is found unhealthy. The time duration + should be specified in ISO 8601 format. Defaults to PT30M. + type: string + type: object + automaticOsUpgradePolicy: + description: An automatic_os_upgrade_policy block as defined below. + This can only be specified when upgrade_mode is set to either + Automatic or Rolling. + properties: + disableAutomaticRollback: + description: Should automatic rollbacks be disabled? + type: boolean + enableAutomaticOsUpgrade: + description: Should OS Upgrades automatically be applied to + Scale Set instances in a rolling fashion when a newer version + of the OS Image becomes available? + type: boolean + type: object + bootDiagnostics: + description: A boot_diagnostics block as defined below. + properties: + storageAccountUri: + description: The Primary/Secondary Endpoint for the Azure + Storage Account which should be used to store Boot Diagnostics, + including Console Output and Screenshots from the Hypervisor. + type: string + type: object + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + which the Virtual Machine Scale Set should be allocated to. + Changing this forces a new resource to be created. + type: string + computerNamePrefix: + description: The prefix which should be used for the name of the + Virtual Machines in this Scale Set. If unspecified this defaults + to the value for the name field. If the value of the name field + is not a valid computer_name_prefix, then you must specify computer_name_prefix. + Changing this forces a new resource to be created. + type: string + dataDisk: + description: One or more data_disk blocks as defined below. + items: + properties: + caching: + description: The type of Caching which should be used for + this Data Disk. Possible values are None, ReadOnly and + ReadWrite. + type: string + createOption: + description: The create option which should be used for + this Data Disk. Possible values are Empty and FromImage. + Defaults to Empty. (FromImage should only be used if the + source image includes data disks). + type: string + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The size of the Data Disk which should be created. + type: number + lun: + description: The Logical Unit Number of the Data Disk, which + must be unique within the Virtual Machine. + type: number + name: + description: The name of the Data Disk. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this Data Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS, PremiumV2_LRS, + Premium_ZRS and UltraSSD_LRS. + type: string + ultraSsdDiskIopsReadWrite: + description: Specifies the Read-Write IOPS for this Data + Disk. Only settable when storage_account_type is PremiumV2_LRS + or UltraSSD_LRS. + type: number + ultraSsdDiskMbpsReadWrite: + description: Specifies the bandwidth in MB per second for + this Data Disk. Only settable when storage_account_type + is PremiumV2_LRS or UltraSSD_LRS. + type: number + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + type: array + doNotRunExtensionsOnOverprovisionedMachines: + description: Should Virtual Machine Extensions be run on Overprovisioned + Virtual Machines in the Scale Set? Defaults to false. + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Windows Virtual Machine Scale Set should exist. Changing + this forces a new Windows Virtual Machine Scale Set to be created. + type: string + enableAutomaticUpdates: + description: Are automatic updates enabled for this Virtual Machine? + Defaults to true. + type: boolean + encryptionAtHostEnabled: + description: Should all of the disks (including the temp disk) + attached to this Virtual Machine be encrypted by enabling Encryption + at Host? + type: boolean + evictionPolicy: + description: Specifies the eviction policy for Virtual Machines + in this Scale Set. Possible values are Deallocate and Delete. + Changing this forces a new resource to be created. + type: string + extension: + description: One or more extension blocks as defined below + items: + properties: + autoUpgradeMinorVersion: + description: Should the latest version of the Extension + be used at Deployment Time, if one is available? This + won't auto-update the extension on existing installation. + Defaults to true. + type: boolean + automaticUpgradeEnabled: + description: Should the Extension be automatically updated + whenever the Publisher releases a new version of this + VM Extension? + type: boolean + forceUpdateTag: + description: A value which, when different to the previous + value can be used to force-run the Extension even if the + Extension Configuration hasn't changed. + type: string + name: + description: The name for the Virtual Machine Scale Set + Extension. + type: string + protectedSettingsFromKeyVault: + description: A protected_settings_from_key_vault block as + defined below. + properties: + secretUrl: + description: The URL to the Key Vault Secret which stores + the protected settings. + type: string + sourceVaultId: + description: The ID of the source Key Vault. + type: string + type: object + provisionAfterExtensions: + description: An ordered list of Extension names which this + should be provisioned after. + items: + type: string + type: array + publisher: + description: Specifies the Publisher of the Extension. + type: string + settings: + description: A JSON String which specifies Settings for + the Extension. + type: string + type: + description: Specifies the Type of the Extension. + type: string + typeHandlerVersion: + description: Specifies the version of the extension to use, + available versions can be found using the Azure CLI. + type: string + type: object + type: array + extensionOperationsEnabled: + description: Should extension operations be allowed on the Virtual + Machine Scale Set? Possible values are true or false. Defaults + to true. Changing this forces a new Windows Virtual Machine + Scale Set to be created. + type: boolean + extensionsTimeBudget: + description: Specifies the duration allocated for all extensions + to start. The time duration should be between 15 minutes and + 120 minutes (inclusive) and should be specified in ISO 8601 + format. Defaults to PT1H30M. + type: string + galleryApplication: + description: One or more gallery_application blocks as defined + below. + items: + properties: + configurationBlobUri: + description: Specifies the URI to an Azure Blob that will + replace the default configuration for the package if provided. + Changing this forces a new resource to be created. + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + tag: + description: Specifies a passthrough value for more generic + context. This field can be any valid string value. Changing + this forces a new resource to be created. + type: string + versionId: + description: Specifies the Gallery Application Version resource + ID. Changing this forces a new resource to be created. + type: string + type: object + type: array + galleryApplications: + items: + properties: + configurationReferenceBlobUri: + type: string + order: + description: Specifies the order in which the packages have + to be installed. Possible values are between 0 and 2,147,483,647. + Changing this forces a new resource to be created. + type: number + packageReferenceId: + description: The ID of the Windows Virtual Machine Scale + Set. + type: string + tag: + description: The IP Tag associated with the Public IP, such + as SQL or Storage. Changing this forces a new resource + to be created. + type: string + type: object + type: array + healthProbeId: + description: The ID of a Load Balancer Probe which should be used + to determine the health of an instance. This is Required and + can only be specified when upgrade_mode is set to Automatic + or Rolling. + type: string + hostGroupId: + description: Specifies the ID of the dedicated host group that + the virtual machine scale set resides in. Changing this forces + a new resource to be created. + type: string + id: + description: The ID of the Windows Virtual Machine Scale Set. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Windows Virtual Machine Scale + Set. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Virtual Machine + Scale Set. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). + type: string + type: object + instances: + description: The number of Virtual Machines in the Scale Set. + type: number + licenseType: + description: Specifies the type of on-premise license (also known + as Azure Hybrid Use Benefit) which should be used for this Virtual + Machine Scale Set. Possible values are None, Windows_Client + and Windows_Server. + type: string + location: + description: The Azure location where the Windows Virtual Machine + Scale Set should exist. Changing this forces a new resource + to be created. + type: string + maxBidPrice: + description: The maximum price you're willing to pay for each + Virtual Machine in this Scale Set, in US Dollars; which must + be greater than the current spot price. If this bid price falls + below the current spot price the Virtual Machines in the Scale + Set will be evicted using the eviction_policy. Defaults to -1, + which means that each Virtual Machine in the Scale Set should + not be evicted for price reasons. + type: number + networkInterface: + description: One or more network_interface blocks as defined below. + items: + properties: + dnsServers: + description: A list of IP Addresses of DNS Servers which + should be assigned to the Network Interface. + items: + type: string + type: array + enableAcceleratedNetworking: + description: Does this Network Interface support Accelerated + Networking? Defaults to false. + type: boolean + enableIpForwarding: + description: Does this Network Interface support IP Forwarding? + Defaults to false. + type: boolean + ipConfiguration: + description: One or more ip_configuration blocks as defined + above. + items: + properties: + applicationGatewayBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Application Gateway which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + applicationSecurityGroupIds: + description: A list of Application Security Group + ID's which this Virtual Machine Scale Set should + be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerBackendAddressPoolIds: + description: A list of Backend Address Pools ID's + from a Load Balancer which this Virtual Machine + Scale Set should be connected to. + items: + type: string + type: array + x-kubernetes-list-type: set + loadBalancerInboundNatRulesIds: + description: A list of NAT Rule ID's from a Load Balancer + which this Virtual Machine Scale Set should be connected + to. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the Public IP Address Configuration. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + publicIpAddress: + description: A public_ip_address block as defined + below. + items: + properties: + domainNameLabel: + description: The Prefix which should be used + for the Domain Name Label for each Virtual + Machine Instance. Azure concatenates the Domain + Name Label and Virtual Machine Index to create + a unique Domain Name Label for each Virtual + Machine. + type: string + idleTimeoutInMinutes: + description: The Idle Timeout in Minutes for + the Public IP Address. Possible values are + in the range 4 to 32. + type: number + ipTag: + description: One or more ip_tag blocks as defined + above. Changing this forces a new resource + to be created. + items: + properties: + tag: + description: The IP Tag associated with + the Public IP, such as SQL or Storage. + Changing this forces a new resource + to be created. + type: string + type: + description: The Type of IP Tag, such + as FirstPartyUsage. Changing this forces + a new resource to be created. + type: string + type: object + type: array + name: + description: The Name of the Public IP Address + Configuration. + type: string + publicIpPrefixId: + description: The ID of the Public IP Address + Prefix from where Public IP Addresses should + be allocated. Changing this forces a new resource + to be created. + type: string + version: + description: Specifies the version of the image + used to create the virtual machines. + type: string + type: object + type: array + subnetId: + description: The ID of the Subnet which this IP Configuration + should be connected to. + type: string + version: + description: Specifies the version of the image used + to create the virtual machines. + type: string + type: object + type: array + name: + description: The Name which should be used for this Network + Interface. Changing this forces a new resource to be created. + type: string + networkSecurityGroupId: + description: The ID of a Network Security Group which should + be assigned to this Network Interface. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + type: object + type: array + osDisk: + description: An os_disk block as defined below. + properties: + caching: + description: The Type of Caching which should be used for + the Internal OS Disk. Possible values are None, ReadOnly + and ReadWrite. + type: string + diffDiskSettings: + description: A diff_disk_settings block as defined above. + Changing this forces a new resource to be created. + properties: + option: + description: Specifies the Ephemeral Disk Settings for + the OS Disk. At this time the only possible value is + Local. Changing this forces a new resource to be created. + type: string + placement: + description: Specifies where to store the Ephemeral Disk. + Possible values are CacheDisk and ResourceDisk. Defaults + to CacheDisk. Changing this forces a new resource to + be created. + type: string + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to encrypt this OS Disk. Conflicts with secure_vm_disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + diskSizeGb: + description: The Size of the Internal OS Disk in GB, if you + wish to vary from the size used in the image this Virtual + Machine Scale Set is sourced from. + type: number + secureVmDiskEncryptionSetId: + description: The ID of the Disk Encryption Set which should + be used to Encrypt the OS Disk when the Virtual Machine + Scale Set is Confidential VMSS. Conflicts with disk_encryption_set_id. + Changing this forces a new resource to be created. + type: string + securityEncryptionType: + description: Encryption Type when the Virtual Machine Scale + Set is Confidential VMSS. Possible values are VMGuestStateOnly + and DiskWithVMGuestState. Changing this forces a new resource + to be created. + type: string + storageAccountType: + description: The Type of Storage Account which should back + this the Internal OS Disk. Possible values include Standard_LRS, + StandardSSD_LRS, StandardSSD_ZRS, Premium_LRS and Premium_ZRS. + Changing this forces a new resource to be created. + type: string + writeAcceleratorEnabled: + description: Should Write Accelerator be Enabled for this + OS Disk? Defaults to false. + type: boolean + type: object + overprovision: + description: Should Azure over-provision Virtual Machines in this + Scale Set? This means that multiple Virtual Machines will be + provisioned and Azure will keep the instances which become available + first - which improves provisioning success rates and improves + deployment time. You're not billed for these over-provisioned + VM's and they don't count towards the Subscription Quota. Defaults + to true. + type: boolean + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + name: + description: Specifies the name of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + product: + description: Specifies the product of the image from the marketplace. + Changing this forces a new resource to be created. + type: string + publisher: + description: Specifies the publisher of the image. Changing + this forces a new resource to be created. + type: string + type: object + platformFaultDomainCount: + description: Specifies the number of fault domains that are used + by this Linux Virtual Machine Scale Set. Changing this forces + a new resource to be created. + type: number + priority: + description: The Priority of this Virtual Machine Scale Set. Possible + values are Regular and Spot. Defaults to Regular. Changing this + value forces a new resource. + type: string + provisionVmAgent: + description: Should the Azure VM Agent be provisioned on each + Virtual Machine in the Scale Set? Defaults to true. Changing + this value forces a new resource to be created. + type: boolean + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group in which + the Virtual Machine Scale Set should be assigned to. Changing + this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Windows + Virtual Machine Scale Set should be exist. Changing this forces + a new resource to be created. + type: string + rollingUpgradePolicy: + description: A rolling_upgrade_policy block as defined below. + This is Required and can only be specified when upgrade_mode + is set to Automatic or Rolling. Changing this forces a new resource + to be created. + properties: + crossZoneUpgradesEnabled: + description: Should the Virtual Machine Scale Set ignore the + Azure Zone boundaries when constructing upgrade batches? + Possible values are true or false. + type: boolean + maxBatchInstancePercent: + description: The maximum percent of total virtual machine + instances that will be upgraded simultaneously by the rolling + upgrade in one batch. As this is a maximum, unhealthy instances + in previous or future batches can cause the percentage of + instances in a batch to decrease to ensure higher reliability. + type: number + maxUnhealthyInstancePercent: + description: The maximum percentage of the total virtual machine + instances in the scale set that can be simultaneously unhealthy, + either as a result of being upgraded, or by being found + in an unhealthy state by the virtual machine health checks + before the rolling upgrade aborts. This constraint will + be checked prior to starting any batch. + type: number + maxUnhealthyUpgradedInstancePercent: + description: The maximum percentage of upgraded virtual machine + instances that can be found to be in an unhealthy state. + This check will happen after each batch is upgraded. If + this percentage is ever exceeded, the rolling update aborts. + type: number + pauseTimeBetweenBatches: + description: The wait time between completing the update for + all virtual machines in one batch and starting the next + batch. The time duration should be specified in ISO 8601 + format. + type: string + prioritizeUnhealthyInstancesEnabled: + description: Upgrade all unhealthy instances in a scale set + before any healthy instances. Possible values are true or + false. + type: boolean + type: object + scaleIn: + description: A scale_in block as defined below. + properties: + forceDeletionEnabled: + description: Should the virtual machines chosen for removal + be force deleted when the virtual machine scale set is being + scaled-in? Possible values are true or false. Defaults to + false. + type: boolean + rule: + description: The scale-in policy rule that decides which virtual + machines are chosen for removal when a Virtual Machine Scale + Set is scaled in. Possible values for the scale-in policy + rules are Default, NewestVM and OldestVM, defaults to Default. + For more information about scale in policy, please refer + to this doc. + type: string + type: object + scaleInPolicy: + description: 'Deprecated: scaleInPolicy will be removed in favour + of the scaleIn code block.' + type: string + secret: + description: One or more secret blocks as defined below. + items: + properties: + certificate: + description: One or more certificate blocks as defined above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine where the certificate should be added. + type: string + url: + description: The Secret URL of a Key Vault Certificate. + type: string + type: object + type: array + keyVaultId: + description: The ID of the Key Vault from which all Secrets + should be sourced. + type: string + type: object + type: array + secureBootEnabled: + description: Specifies if Secure Boot and Trusted Launch is enabled + for the Virtual Machine. Changing this forces a new resource + to be created. + type: boolean + singlePlacementGroup: + description: Should this Virtual Machine Scale Set be limited + to a Single Placement Group, which means the number of instances + will be capped at 100 Virtual Machines. Defaults to true. + type: boolean + sku: + description: The Virtual Machine SKU for the Scale Set, such as + Standard_F2. + type: string + sourceImageId: + description: The ID of an Image which each Virtual Machine in + this Scale Set should be based on. Possible Image ID types include + Image ID, Shared Image ID, Shared Image Version ID, Community + Gallery Image ID, Community Gallery Image Version ID, Shared + Gallery Image ID and Shared Gallery Image Version ID. + type: string + sourceImageReference: + description: A source_image_reference block as defined below. + properties: + offer: + description: Specifies the offer of the image used to create + the virtual machines. Changing this forces a new resource + to be created. + type: string + publisher: + description: Specifies the publisher of the image used to + create the virtual machines. Changing this forces a new + resource to be created. + type: string + sku: + description: Specifies the SKU of the image used to create + the virtual machines. + type: string + version: + description: Specifies the version of the image used to create + the virtual machines. + type: string + type: object + spotRestore: + description: A spot_restore block as defined below. + properties: + enabled: + description: Should the Spot-Try-Restore feature be enabled? + The Spot-Try-Restore feature will attempt to automatically + restore the evicted Spot Virtual Machine Scale Set VM instances + opportunistically based on capacity availability and pricing + constraints. Possible values are true or false. Defaults + to false. Changing this forces a new resource to be created. + type: boolean + timeout: + description: The length of time that the Virtual Machine Scale + Set should attempt to restore the Spot VM instances which + have been evicted. The time duration should be between 15 + minutes and 120 minutes (inclusive). The time duration should + be specified in the ISO 8601 format. Defaults to PT1H. Changing + this forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to this + Virtual Machine Scale Set. + type: object + x-kubernetes-map-type: granular + terminateNotification: + description: A terminate_notification block as defined below. + properties: + enabled: + description: Should the terminate notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + terminationNotification: + description: A termination_notification block as defined below. + properties: + enabled: + description: Should the termination notification be enabled + on this Virtual Machine Scale Set? + type: boolean + timeout: + description: Length of time (in minutes, between 5 and 15) + a notification to be sent to the VM on the instance metadata + server till the VM gets deleted. The time duration should + be specified in ISO 8601 format. Defaults to PT5M. + type: string + type: object + timezone: + description: Specifies the time zone of the virtual machine, the + possible values are defined here. + type: string + uniqueId: + description: The Unique ID for this Windows Virtual Machine Scale + Set. + type: string + upgradeMode: + description: Specifies how Upgrades (e.g. changing the Image/SKU) + should be performed to Virtual Machine Instances. Possible values + are Automatic, Manual and Rolling. Defaults to Manual. Changing + this forces a new resource to be created. + type: string + userData: + description: The Base64-Encoded User Data which should be used + for this Virtual Machine Scale Set. + type: string + vtpmEnabled: + description: Specifies if vTPM (Virtual Trusted Platform Module) + and Trusted Launch is enabled for the Virtual Machine. Changing + this forces a new resource to be created. + type: boolean + winrmListener: + description: One or more winrm_listener blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + certificateUrl: + description: The Secret URL of a Key Vault Certificate, + which must be specified when protocol is set to Https. + Changing this forces a new resource to be created. + type: string + protocol: + description: The Protocol of the WinRM Listener. Possible + values are Http and Https. Changing this forces a new + resource to be created. + type: string + type: object + type: array + zoneBalance: + description: Should the Virtual Machines in this Scale Set be + strictly evenly distributed across Availability Zones? Defaults + to false. Changing this forces a new resource to be created. + type: boolean + zones: + description: Specifies a list of Availability Zones in which this + Windows Virtual Machine Scale Set should be located. Changing + this forces a new Windows Virtual Machine Scale Set to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/consumption.azure.upbound.io_budgetmanagementgroups.yaml b/package/crds/consumption.azure.upbound.io_budgetmanagementgroups.yaml index 0ac16364e..05377f62d 100644 --- a/package/crds/consumption.azure.upbound.io_budgetmanagementgroups.yaml +++ b/package/crds/consumption.azure.upbound.io_budgetmanagementgroups.yaml @@ -996,3 +996,936 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BudgetManagementGroup is the Schema for the BudgetManagementGroups + API. Manages a Consumption Budget for a Management Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BudgetManagementGroupSpec defines the desired state of BudgetManagementGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Management Group Consumption Budget. + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + managementGroupId: + description: The ID of the Management Group. Changing this forces + a new resource to be created. + type: string + managementGroupIdRef: + description: Reference to a ManagementGroup in management to populate + managementGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managementGroupIdSelector: + description: Selector for a ManagementGroup in management to populate + managementGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name which should be used for this Management + Group Consumption Budget. Changing this forces a new resource + to be created. + type: string + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new resource to be created. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Management Group Consumption Budget. + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + managementGroupId: + description: The ID of the Management Group. Changing this forces + a new resource to be created. + type: string + managementGroupIdRef: + description: Reference to a ManagementGroup in management to populate + managementGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managementGroupIdSelector: + description: Selector for a ManagementGroup in management to populate + managementGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name which should be used for this Management + Group Consumption Budget. Changing this forces a new resource + to be created. + type: string + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new resource to be created. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.amount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.amount) + || (has(self.initProvider) && has(self.initProvider.amount))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.notification is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.notification) + || (has(self.initProvider) && has(self.initProvider.notification))' + - message: spec.forProvider.timePeriod is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timePeriod) + || (has(self.initProvider) && has(self.initProvider.timePeriod))' + status: + description: BudgetManagementGroupStatus defines the observed state of + BudgetManagementGroup. + properties: + atProvider: + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Management Group Consumption Budget. + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + id: + description: The ID of the Management Group Consumption Budget. + type: string + managementGroupId: + description: The ID of the Management Group. Changing this forces + a new resource to be created. + type: string + name: + description: The name which should be used for this Management + Group Consumption Budget. Changing this forces a new resource + to be created. + type: string + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new resource to be created. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/consumption.azure.upbound.io_budgetresourcegroups.yaml b/package/crds/consumption.azure.upbound.io_budgetresourcegroups.yaml index 7356b61e3..3ac0cef38 100644 --- a/package/crds/consumption.azure.upbound.io_budgetresourcegroups.yaml +++ b/package/crds/consumption.azure.upbound.io_budgetresourcegroups.yaml @@ -1037,3 +1037,980 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BudgetResourceGroup is the Schema for the BudgetResourceGroups + API. Manages a Resource Group Consumption Budget. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BudgetResourceGroupSpec defines the desired state of BudgetResourceGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Resource Group Consumption Budget + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + name: + description: The name which should be used for this Resource Group + Consumption Budget. Changing this forces a new Resource Group + Consumption Budget to be created. + type: string + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactGroups: + description: Specifies a list of Action Group IDs to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactRoles: + description: Specifies a list of contact roles to send the + budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + resourceGroupId: + description: The ID of the Resource Group to create the consumption + budget for in the form of /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1. + Changing this forces a new Resource Group Consumption Budget + to be created. + type: string + resourceGroupIdRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupIdSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new Resource Group Consumption + Budget to be created. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Resource Group Consumption Budget + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + name: + description: The name which should be used for this Resource Group + Consumption Budget. Changing this forces a new Resource Group + Consumption Budget to be created. + type: string + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactGroups: + description: Specifies a list of Action Group IDs to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactRoles: + description: Specifies a list of contact roles to send the + budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + resourceGroupId: + description: The ID of the Resource Group to create the consumption + budget for in the form of /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1. + Changing this forces a new Resource Group Consumption Budget + to be created. + type: string + resourceGroupIdRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupIdSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new Resource Group Consumption + Budget to be created. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.amount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.amount) + || (has(self.initProvider) && has(self.initProvider.amount))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.notification is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.notification) + || (has(self.initProvider) && has(self.initProvider.notification))' + - message: spec.forProvider.timePeriod is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timePeriod) + || (has(self.initProvider) && has(self.initProvider.timePeriod))' + status: + description: BudgetResourceGroupStatus defines the observed state of BudgetResourceGroup. + properties: + atProvider: + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Resource Group Consumption Budget + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + id: + description: The ID of the Resource Group Consumption Budget. + type: string + name: + description: The name which should be used for this Resource Group + Consumption Budget. Changing this forces a new Resource Group + Consumption Budget to be created. + type: string + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactGroups: + description: Specifies a list of Action Group IDs to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactRoles: + description: Specifies a list of contact roles to send the + budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + resourceGroupId: + description: The ID of the Resource Group to create the consumption + budget for in the form of /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1. + Changing this forces a new Resource Group Consumption Budget + to be created. + type: string + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new Resource Group Consumption + Budget to be created. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/consumption.azure.upbound.io_budgetsubscriptions.yaml b/package/crds/consumption.azure.upbound.io_budgetsubscriptions.yaml index 9c329fdfd..31270232b 100644 --- a/package/crds/consumption.azure.upbound.io_budgetsubscriptions.yaml +++ b/package/crds/consumption.azure.upbound.io_budgetsubscriptions.yaml @@ -1025,3 +1025,968 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BudgetSubscription is the Schema for the BudgetSubscriptions + API. Manages a Subscription Consumption Budget. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BudgetSubscriptionSpec defines the desired state of BudgetSubscription + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Subscription Consumption Budget. + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactGroups: + description: Specifies a list of Action Group IDs to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactGroupsRefs: + description: References to MonitorActionGroup in insights + to populate contactGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + contactGroupsSelector: + description: Selector for a list of MonitorActionGroup in + insights to populate contactGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + contactRoles: + description: Specifies a list of contact roles to send the + budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + subscriptionId: + description: The ID of the Subscription for which to create a + Consumption Budget. Changing this forces a new resource to be + created. + type: string + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new Subscription Consumption + Budget to be created. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Subscription Consumption Budget. + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactGroups: + description: Specifies a list of Action Group IDs to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactGroupsRefs: + description: References to MonitorActionGroup in insights + to populate contactGroups. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + contactGroupsSelector: + description: Selector for a list of MonitorActionGroup in + insights to populate contactGroups. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + contactRoles: + description: Specifies a list of contact roles to send the + budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + subscriptionId: + description: The ID of the Subscription for which to create a + Consumption Budget. Changing this forces a new resource to be + created. + type: string + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new Subscription Consumption + Budget to be created. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.amount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.amount) + || (has(self.initProvider) && has(self.initProvider.amount))' + - message: spec.forProvider.notification is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.notification) + || (has(self.initProvider) && has(self.initProvider.notification))' + - message: spec.forProvider.subscriptionId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subscriptionId) + || (has(self.initProvider) && has(self.initProvider.subscriptionId))' + - message: spec.forProvider.timePeriod is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timePeriod) + || (has(self.initProvider) && has(self.initProvider.timePeriod))' + status: + description: BudgetSubscriptionStatus defines the observed state of BudgetSubscription. + properties: + atProvider: + properties: + amount: + description: The total amount of cost to track with the budget. + type: number + etag: + description: The ETag of the Subscription Consumption Budget. + type: string + filter: + description: A filter block as defined below. + properties: + dimension: + description: One or more dimension blocks as defined below + to filter the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + not: + description: A not block as defined below to filter the budget + on. This is deprecated as the API no longer supports it + and will be removed in version 4.0 of the provider. + properties: + dimension: + description: One dimension block as defined below to filter + the budget on. Conflicts with tag. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + tag: + description: One tag block as defined below to filter + the budget on. Conflicts with dimension. + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: object + tag: + description: One or more tag blocks as defined below to filter + the budget on. + items: + properties: + name: + description: The name of the tag to use for the filter. + type: string + operator: + description: The operator to use for comparison. The + allowed values are In. Defaults to In. + type: string + values: + description: Specifies a list of values for the tag. + items: + type: string + type: array + type: object + type: array + type: object + id: + description: The ID of the Subscription Consumption Budget. + type: string + notification: + description: One or more notification blocks as defined below. + items: + properties: + contactEmails: + description: Specifies a list of email addresses to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactGroups: + description: Specifies a list of Action Group IDs to send + the budget notification to when the threshold is exceeded. + items: + type: string + type: array + contactRoles: + description: Specifies a list of contact roles to send the + budget notification to when the threshold is exceeded. + items: + type: string + type: array + enabled: + description: Should the notification be enabled? Defaults + to true. + type: boolean + operator: + description: The comparison operator for the notification. + Must be one of EqualTo, GreaterThan, or GreaterThanOrEqualTo. + type: string + threshold: + description: Threshold value associated with a notification. + Notification is sent when the cost exceeded the threshold. + It is always percent and has to be between 0 and 1000. + type: number + thresholdType: + description: The type of threshold for the notification. + This determines whether the notification is triggered + by forecasted costs or actual costs. The allowed values + are Actual and Forecasted. Default is Actual. Changing + this forces a new resource to be created. + type: string + type: object + type: array + subscriptionId: + description: The ID of the Subscription for which to create a + Consumption Budget. Changing this forces a new resource to be + created. + type: string + timeGrain: + description: The time covered by a budget. Tracking of the amount + will be reset based on the time grain. Must be one of BillingAnnual, + BillingMonth, BillingQuarter, Annually, Monthly and Quarterly. + Defaults to Monthly. Changing this forces a new resource to + be created. + type: string + timePeriod: + description: A time_period block as defined below. + properties: + endDate: + description: The end date for the budget. If not set this + will be 10 years after the start date. + type: string + startDate: + description: The start date for the budget. The start date + must be first of the month and should be less than the end + date. Budget start date must be on or after June 1, 2017. + Future start date should not be more than twelve months. + Past start date should be selected within the timegrain + period. Changing this forces a new Subscription Consumption + Budget to be created. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/containerapp.azure.upbound.io_containerapps.yaml b/package/crds/containerapp.azure.upbound.io_containerapps.yaml index 17f189d9d..e1cd3ed68 100644 --- a/package/crds/containerapp.azure.upbound.io_containerapps.yaml +++ b/package/crds/containerapp.azure.upbound.io_containerapps.yaml @@ -2876,3 +2876,2819 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ContainerApp is the Schema for the ContainerApps API. Manages + a Container App. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ContainerAppSpec defines the desired state of ContainerApp + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerAppEnvironmentId: + description: |- + The ID of the Container App Environment within which this Container App should exist. Changing this forces a new resource to be created. + The ID of the Container App Environment to host this Container App. + type: string + containerAppEnvironmentIdRef: + description: Reference to a Environment in containerapp to populate + containerAppEnvironmentId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerAppEnvironmentIdSelector: + description: Selector for a Environment in containerapp to populate + containerAppEnvironmentId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dapr: + description: A dapr block as detailed below. + properties: + appId: + description: |- + The Dapr Application Identifier. + The Dapr Application Identifier. + type: string + appPort: + description: |- + The port which the application is listening on. This is the same as the ingress port. + The port which the application is listening on. This is the same as the `ingress` port. + type: number + appProtocol: + description: |- + The protocol for the app. Possible values include http and grpc. Defaults to http. + The protocol for the app. Possible values include `http` and `grpc`. Defaults to `http`. + type: string + type: object + identity: + description: An identity block as detailed below. + properties: + identityIds: + description: '- A list of one or more Resource IDs for User + Assigned Managed identities to assign. Required when type + is set to UserAssigned or SystemAssigned, UserAssigned.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of managed identity to assign. Possible + values are SystemAssigned, UserAssigned, and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + ingress: + description: An ingress block as detailed below. + properties: + allowInsecureConnections: + description: |- + Should this ingress allow insecure connections? + Should this ingress allow insecure connections? + type: boolean + customDomain: + description: One or more custom_domain block as detailed below. + properties: + certificateBindingType: + description: |- + The Binding type. Possible values include Disabled and SniEnabled. Defaults to Disabled. + The Binding type. Possible values include `Disabled` and `SniEnabled`. Defaults to `Disabled` + type: string + certificateId: + description: The ID of the Container App Environment Certificate. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The hostname of the Certificate. Must be the CN or a named SAN in the certificate. + type: string + type: object + exposedPort: + description: |- + The exposed port on the container for the Ingress traffic. + The exposed port on the container for the Ingress traffic. + type: number + externalEnabled: + description: |- + Are connections to this Ingress from outside the Container App Environment enabled? Defaults to false. + Is this an external Ingress. + type: boolean + ipSecurityRestriction: + description: One or more ip_security_restriction blocks for + IP-filtering rules as defined below. + items: + properties: + action: + description: |- + The IP-filter action. Allow or Deny. + The action. Allow or Deny. + type: string + description: + description: |- + Describe the IP restriction rule that is being sent to the container-app. + Describe the IP restriction rule that is being sent to the container-app. + type: string + ipAddressRange: + description: |- + CIDR notation to match incoming IP address. + CIDR notation to match incoming IP address. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + Name for the IP restriction rule. + type: string + type: object + type: array + targetPort: + description: |- + The target port on the container for the Ingress traffic. + The target port on the container for the Ingress traffic. + type: number + trafficWeight: + description: One or more traffic_weight blocks as detailed + below. + items: + properties: + label: + description: |- + The label to apply to the revision as a name prefix for routing traffic. + The label to apply to the revision as a name prefix for routing traffic. + type: string + latestRevision: + description: |- + This traffic Weight applies to the latest stable Container Revision. At most only one traffic_weight block can have the latest_revision set to true. + This traffic Weight relates to the latest stable Container Revision. + type: boolean + percentage: + description: |- + The percentage of traffic which should be sent this revision. + The percentage of traffic to send to this revision. + type: number + revisionSuffix: + description: |- + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + The suffix string to append to the revision. This must be unique for the Container App's lifetime. A default hash created by the service will be used if this value is omitted. + type: string + type: object + type: array + transport: + description: |- + The transport method for the Ingress. Possible values are auto, http, http2 and tcp. Defaults to auto. + The transport method for the Ingress. Possible values include `auto`, `http`, and `http2`, `tcp`. Defaults to `auto` + type: string + type: object + registry: + description: A registry block as detailed below. + items: + properties: + identity: + description: |- + Resource ID for the User Assigned Managed identity to use when pulling from the Container Registry. + ID of the System or User Managed Identity used to pull images from the Container Registry + type: string + passwordSecretName: + description: |- + The name of the Secret Reference containing the password value for this user on the Container Registry, username must also be supplied. + The name of the Secret Reference containing the password value for this user on the Container Registry. + type: string + server: + description: |- + The hostname for the Container Registry. + The hostname for the Container Registry. + type: string + username: + description: |- + The username to use for this Container Registry, password_secret_name must also be supplied.. + The username to use for this Container Registry. + type: string + type: object + type: array + resourceGroupName: + description: The name of the resource group in which the Container + App Environment is to be created. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + revisionMode: + description: The revisions operational mode for the Container + App. Possible values include Single and Multiple. In Single + mode, a single revision is in operation at any given time. In + Multiple mode, more than one revision can be active at a time + and can be configured with load distribution via the traffic_weight + block in the ingress configuration. + type: string + secret: + description: One or more secret block as detailed below. + items: + properties: + nameSecretRef: + description: |- + The Secret name. + The Secret name. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + valueSecretRef: + description: |- + The value for this secret. + The value for this secret. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - nameSecretRef + - valueSecretRef + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Container App. + type: object + x-kubernetes-map-type: granular + template: + description: A template block as detailed below. + properties: + azureQueueScaleRule: + description: One or more azure_queue_scale_rule blocks as + defined below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + name: + description: The name of the Volume to be mounted in + the container. + type: string + queueLength: + description: The value of the length of the queue to + trigger scaling actions. + type: number + queueName: + description: The name of the Azure Queue + type: string + type: object + type: array + container: + description: One or more container blocks as detailed below. + items: + properties: + args: + description: |- + A list of extra arguments to pass to the container. + A list of args to pass to the container. + items: + type: string + type: array + command: + description: |- + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + items: + type: string + type: array + cpu: + description: |- + The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + type: number + env: + description: One or more env blocks as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the environment variable for the container. + type: string + secretName: + description: |- + The name of the secret that contains the value for this environment variable. + The name of the secret that contains the value for this environment variable. + type: string + value: + description: |- + The value for this secret. + The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + type: string + type: object + type: array + image: + description: |- + The image to use to create the container. + The image to use to create the container. + type: string + livenessProbe: + description: A liveness_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + initialDelay: + description: |- + The time in seconds to wait after the container has started before the probe is started. + The time in seconds to wait after the container has started before the probe is started. + type: number + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + memory: + description: |- + The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the container. + type: string + readinessProbe: + description: A readiness_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + successCountThreshold: + description: |- + The number of consecutive successful responses required to consider this probe as successful. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive successful responses required to consider this probe as successful. Possible values are between `1` and `10`. Defaults to `3`. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + startupProbe: + description: A startup_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + volumeMounts: + description: A volume_mounts block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the Volume to be mounted in the container. + type: string + path: + description: |- + The path in the container at which to mount this volume. + The path in the container at which to mount this volume. + type: string + type: object + type: array + type: object + type: array + customScaleRule: + description: One or more custom_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + customRuleType: + description: 'The Custom rule type. Possible values + include: activemq, artemis-queue, kafka, pulsar, aws-cloudwatch, + aws-dynamodb, aws-dynamodb-streams, aws-kinesis-stream, + aws-sqs-queue, azure-app-insights, azure-blob, azure-data-explorer, + azure-eventhub, azure-log-analytics, azure-monitor, + azure-pipelines, azure-servicebus, azure-queue, cassandra, + cpu, cron, datadog, elasticsearch, external, external-push, + gcp-stackdriver, gcp-storage, gcp-pubsub, graphite, + http, huawei-cloudeye, ibmmq, influxdb, kubernetes-workload, + liiklus, memory, metrics-api, mongodb, mssql, mysql, + nats-jetstream, stan, tcp, new-relic, openstack-metric, + openstack-swift, postgresql, predictkube, prometheus, + rabbitmq, redis, redis-cluster, redis-sentinel, redis-streams, + redis-cluster-streams, redis-sentinel-streams, selenium-grid,solace-event-queue, + and github-runner.' + type: string + metadata: + additionalProperties: + type: string + description: '- A map of string key-value pairs to configure + the Custom Scale Rule.' + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + httpScaleRule: + description: One or more http_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + concurrentRequests: + description: '- The number of concurrent requests to + trigger scaling.' + type: string + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + initContainer: + description: The definition of an init container that is part + of the group as documented in the init_container block below. + items: + properties: + args: + description: |- + A list of extra arguments to pass to the container. + A list of args to pass to the container. + items: + type: string + type: array + command: + description: |- + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + items: + type: string + type: array + cpu: + description: |- + The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + type: number + env: + description: One or more env blocks as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the environment variable for the container. + type: string + secretName: + description: |- + The name of the secret that contains the value for this environment variable. + The name of the secret that contains the value for this environment variable. + type: string + value: + description: |- + The value for this secret. + The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + type: string + type: object + type: array + image: + description: |- + The image to use to create the container. + The image to use to create the container. + type: string + memory: + description: |- + The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the container. + type: string + volumeMounts: + description: A volume_mounts block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the Volume to be mounted in the container. + type: string + path: + description: |- + The path in the container at which to mount this volume. + The path in the container at which to mount this volume. + type: string + type: object + type: array + type: object + type: array + maxReplicas: + description: |- + The maximum number of replicas for this container. + The maximum number of replicas for this container. + type: number + minReplicas: + description: |- + The minimum number of replicas for this container. + The minimum number of replicas for this container. + type: number + revisionSuffix: + description: |- + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + type: string + tcpScaleRule: + description: One or more tcp_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + concurrentRequests: + description: '- The number of concurrent requests to + trigger scaling.' + type: string + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + volume: + description: A volume block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the volume. + type: string + storageName: + description: |- + The name of the AzureFile storage. + The name of the `AzureFile` storage. Required when `storage_type` is `AzureFile` + type: string + storageType: + description: |- + The type of storage volume. Possible values are AzureFile, EmptyDir and Secret. Defaults to EmptyDir. + The type of storage volume. Possible values include `AzureFile` and `EmptyDir`. Defaults to `EmptyDir`. + type: string + type: object + type: array + type: object + workloadProfileName: + description: The name of the Workload Profile in the Container + App Environment to place this Container App. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerAppEnvironmentId: + description: |- + The ID of the Container App Environment within which this Container App should exist. Changing this forces a new resource to be created. + The ID of the Container App Environment to host this Container App. + type: string + containerAppEnvironmentIdRef: + description: Reference to a Environment in containerapp to populate + containerAppEnvironmentId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerAppEnvironmentIdSelector: + description: Selector for a Environment in containerapp to populate + containerAppEnvironmentId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dapr: + description: A dapr block as detailed below. + properties: + appId: + description: |- + The Dapr Application Identifier. + The Dapr Application Identifier. + type: string + appPort: + description: |- + The port which the application is listening on. This is the same as the ingress port. + The port which the application is listening on. This is the same as the `ingress` port. + type: number + appProtocol: + description: |- + The protocol for the app. Possible values include http and grpc. Defaults to http. + The protocol for the app. Possible values include `http` and `grpc`. Defaults to `http`. + type: string + type: object + identity: + description: An identity block as detailed below. + properties: + identityIds: + description: '- A list of one or more Resource IDs for User + Assigned Managed identities to assign. Required when type + is set to UserAssigned or SystemAssigned, UserAssigned.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of managed identity to assign. Possible + values are SystemAssigned, UserAssigned, and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + ingress: + description: An ingress block as detailed below. + properties: + allowInsecureConnections: + description: |- + Should this ingress allow insecure connections? + Should this ingress allow insecure connections? + type: boolean + customDomain: + description: One or more custom_domain block as detailed below. + properties: + certificateBindingType: + description: |- + The Binding type. Possible values include Disabled and SniEnabled. Defaults to Disabled. + The Binding type. Possible values include `Disabled` and `SniEnabled`. Defaults to `Disabled` + type: string + certificateId: + description: The ID of the Container App Environment Certificate. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The hostname of the Certificate. Must be the CN or a named SAN in the certificate. + type: string + type: object + exposedPort: + description: |- + The exposed port on the container for the Ingress traffic. + The exposed port on the container for the Ingress traffic. + type: number + externalEnabled: + description: |- + Are connections to this Ingress from outside the Container App Environment enabled? Defaults to false. + Is this an external Ingress. + type: boolean + ipSecurityRestriction: + description: One or more ip_security_restriction blocks for + IP-filtering rules as defined below. + items: + properties: + action: + description: |- + The IP-filter action. Allow or Deny. + The action. Allow or Deny. + type: string + description: + description: |- + Describe the IP restriction rule that is being sent to the container-app. + Describe the IP restriction rule that is being sent to the container-app. + type: string + ipAddressRange: + description: |- + CIDR notation to match incoming IP address. + CIDR notation to match incoming IP address. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + Name for the IP restriction rule. + type: string + type: object + type: array + targetPort: + description: |- + The target port on the container for the Ingress traffic. + The target port on the container for the Ingress traffic. + type: number + trafficWeight: + description: One or more traffic_weight blocks as detailed + below. + items: + properties: + label: + description: |- + The label to apply to the revision as a name prefix for routing traffic. + The label to apply to the revision as a name prefix for routing traffic. + type: string + latestRevision: + description: |- + This traffic Weight applies to the latest stable Container Revision. At most only one traffic_weight block can have the latest_revision set to true. + This traffic Weight relates to the latest stable Container Revision. + type: boolean + percentage: + description: |- + The percentage of traffic which should be sent this revision. + The percentage of traffic to send to this revision. + type: number + revisionSuffix: + description: |- + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + The suffix string to append to the revision. This must be unique for the Container App's lifetime. A default hash created by the service will be used if this value is omitted. + type: string + type: object + type: array + transport: + description: |- + The transport method for the Ingress. Possible values are auto, http, http2 and tcp. Defaults to auto. + The transport method for the Ingress. Possible values include `auto`, `http`, and `http2`, `tcp`. Defaults to `auto` + type: string + type: object + registry: + description: A registry block as detailed below. + items: + properties: + identity: + description: |- + Resource ID for the User Assigned Managed identity to use when pulling from the Container Registry. + ID of the System or User Managed Identity used to pull images from the Container Registry + type: string + passwordSecretName: + description: |- + The name of the Secret Reference containing the password value for this user on the Container Registry, username must also be supplied. + The name of the Secret Reference containing the password value for this user on the Container Registry. + type: string + server: + description: |- + The hostname for the Container Registry. + The hostname for the Container Registry. + type: string + username: + description: |- + The username to use for this Container Registry, password_secret_name must also be supplied.. + The username to use for this Container Registry. + type: string + type: object + type: array + revisionMode: + description: The revisions operational mode for the Container + App. Possible values include Single and Multiple. In Single + mode, a single revision is in operation at any given time. In + Multiple mode, more than one revision can be active at a time + and can be configured with load distribution via the traffic_weight + block in the ingress configuration. + type: string + secret: + description: One or more secret block as detailed below. + items: + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Container App. + type: object + x-kubernetes-map-type: granular + template: + description: A template block as detailed below. + properties: + azureQueueScaleRule: + description: One or more azure_queue_scale_rule blocks as + defined below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + name: + description: The name of the Volume to be mounted in + the container. + type: string + queueLength: + description: The value of the length of the queue to + trigger scaling actions. + type: number + queueName: + description: The name of the Azure Queue + type: string + type: object + type: array + container: + description: One or more container blocks as detailed below. + items: + properties: + args: + description: |- + A list of extra arguments to pass to the container. + A list of args to pass to the container. + items: + type: string + type: array + command: + description: |- + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + items: + type: string + type: array + cpu: + description: |- + The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + type: number + env: + description: One or more env blocks as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the environment variable for the container. + type: string + secretName: + description: |- + The name of the secret that contains the value for this environment variable. + The name of the secret that contains the value for this environment variable. + type: string + value: + description: |- + The value for this secret. + The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + type: string + type: object + type: array + image: + description: |- + The image to use to create the container. + The image to use to create the container. + type: string + livenessProbe: + description: A liveness_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + initialDelay: + description: |- + The time in seconds to wait after the container has started before the probe is started. + The time in seconds to wait after the container has started before the probe is started. + type: number + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + memory: + description: |- + The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the container. + type: string + readinessProbe: + description: A readiness_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + successCountThreshold: + description: |- + The number of consecutive successful responses required to consider this probe as successful. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive successful responses required to consider this probe as successful. Possible values are between `1` and `10`. Defaults to `3`. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + startupProbe: + description: A startup_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + volumeMounts: + description: A volume_mounts block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the Volume to be mounted in the container. + type: string + path: + description: |- + The path in the container at which to mount this volume. + The path in the container at which to mount this volume. + type: string + type: object + type: array + type: object + type: array + customScaleRule: + description: One or more custom_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + customRuleType: + description: 'The Custom rule type. Possible values + include: activemq, artemis-queue, kafka, pulsar, aws-cloudwatch, + aws-dynamodb, aws-dynamodb-streams, aws-kinesis-stream, + aws-sqs-queue, azure-app-insights, azure-blob, azure-data-explorer, + azure-eventhub, azure-log-analytics, azure-monitor, + azure-pipelines, azure-servicebus, azure-queue, cassandra, + cpu, cron, datadog, elasticsearch, external, external-push, + gcp-stackdriver, gcp-storage, gcp-pubsub, graphite, + http, huawei-cloudeye, ibmmq, influxdb, kubernetes-workload, + liiklus, memory, metrics-api, mongodb, mssql, mysql, + nats-jetstream, stan, tcp, new-relic, openstack-metric, + openstack-swift, postgresql, predictkube, prometheus, + rabbitmq, redis, redis-cluster, redis-sentinel, redis-streams, + redis-cluster-streams, redis-sentinel-streams, selenium-grid,solace-event-queue, + and github-runner.' + type: string + metadata: + additionalProperties: + type: string + description: '- A map of string key-value pairs to configure + the Custom Scale Rule.' + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + httpScaleRule: + description: One or more http_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + concurrentRequests: + description: '- The number of concurrent requests to + trigger scaling.' + type: string + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + initContainer: + description: The definition of an init container that is part + of the group as documented in the init_container block below. + items: + properties: + args: + description: |- + A list of extra arguments to pass to the container. + A list of args to pass to the container. + items: + type: string + type: array + command: + description: |- + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + items: + type: string + type: array + cpu: + description: |- + The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + type: number + env: + description: One or more env blocks as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the environment variable for the container. + type: string + secretName: + description: |- + The name of the secret that contains the value for this environment variable. + The name of the secret that contains the value for this environment variable. + type: string + value: + description: |- + The value for this secret. + The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + type: string + type: object + type: array + image: + description: |- + The image to use to create the container. + The image to use to create the container. + type: string + memory: + description: |- + The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the container. + type: string + volumeMounts: + description: A volume_mounts block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the Volume to be mounted in the container. + type: string + path: + description: |- + The path in the container at which to mount this volume. + The path in the container at which to mount this volume. + type: string + type: object + type: array + type: object + type: array + maxReplicas: + description: |- + The maximum number of replicas for this container. + The maximum number of replicas for this container. + type: number + minReplicas: + description: |- + The minimum number of replicas for this container. + The minimum number of replicas for this container. + type: number + revisionSuffix: + description: |- + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + type: string + tcpScaleRule: + description: One or more tcp_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + concurrentRequests: + description: '- The number of concurrent requests to + trigger scaling.' + type: string + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + volume: + description: A volume block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the volume. + type: string + storageName: + description: |- + The name of the AzureFile storage. + The name of the `AzureFile` storage. Required when `storage_type` is `AzureFile` + type: string + storageType: + description: |- + The type of storage volume. Possible values are AzureFile, EmptyDir and Secret. Defaults to EmptyDir. + The type of storage volume. Possible values include `AzureFile` and `EmptyDir`. Defaults to `EmptyDir`. + type: string + type: object + type: array + type: object + workloadProfileName: + description: The name of the Workload Profile in the Container + App Environment to place this Container App. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.revisionMode is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.revisionMode) + || (has(self.initProvider) && has(self.initProvider.revisionMode))' + - message: spec.forProvider.template is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.template) + || (has(self.initProvider) && has(self.initProvider.template))' + status: + description: ContainerAppStatus defines the observed state of ContainerApp. + properties: + atProvider: + properties: + containerAppEnvironmentId: + description: |- + The ID of the Container App Environment within which this Container App should exist. Changing this forces a new resource to be created. + The ID of the Container App Environment to host this Container App. + type: string + dapr: + description: A dapr block as detailed below. + properties: + appId: + description: |- + The Dapr Application Identifier. + The Dapr Application Identifier. + type: string + appPort: + description: |- + The port which the application is listening on. This is the same as the ingress port. + The port which the application is listening on. This is the same as the `ingress` port. + type: number + appProtocol: + description: |- + The protocol for the app. Possible values include http and grpc. Defaults to http. + The protocol for the app. Possible values include `http` and `grpc`. Defaults to `http`. + type: string + type: object + id: + description: The ID of the Container App. + type: string + identity: + description: An identity block as detailed below. + properties: + identityIds: + description: '- A list of one or more Resource IDs for User + Assigned Managed identities to assign. Required when type + is set to UserAssigned or SystemAssigned, UserAssigned.' + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The ID of the Container App. + type: string + tenantId: + description: The ID of the Container App. + type: string + type: + description: The type of managed identity to assign. Possible + values are SystemAssigned, UserAssigned, and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + ingress: + description: An ingress block as detailed below. + properties: + allowInsecureConnections: + description: |- + Should this ingress allow insecure connections? + Should this ingress allow insecure connections? + type: boolean + customDomain: + description: One or more custom_domain block as detailed below. + properties: + certificateBindingType: + description: |- + The Binding type. Possible values include Disabled and SniEnabled. Defaults to Disabled. + The Binding type. Possible values include `Disabled` and `SniEnabled`. Defaults to `Disabled` + type: string + certificateId: + description: The ID of the Container App Environment Certificate. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The hostname of the Certificate. Must be the CN or a named SAN in the certificate. + type: string + type: object + exposedPort: + description: |- + The exposed port on the container for the Ingress traffic. + The exposed port on the container for the Ingress traffic. + type: number + externalEnabled: + description: |- + Are connections to this Ingress from outside the Container App Environment enabled? Defaults to false. + Is this an external Ingress. + type: boolean + fqdn: + description: |- + The FQDN of the ingress. + The FQDN of the ingress. + type: string + ipSecurityRestriction: + description: One or more ip_security_restriction blocks for + IP-filtering rules as defined below. + items: + properties: + action: + description: |- + The IP-filter action. Allow or Deny. + The action. Allow or Deny. + type: string + description: + description: |- + Describe the IP restriction rule that is being sent to the container-app. + Describe the IP restriction rule that is being sent to the container-app. + type: string + ipAddressRange: + description: |- + CIDR notation to match incoming IP address. + CIDR notation to match incoming IP address. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + Name for the IP restriction rule. + type: string + type: object + type: array + targetPort: + description: |- + The target port on the container for the Ingress traffic. + The target port on the container for the Ingress traffic. + type: number + trafficWeight: + description: One or more traffic_weight blocks as detailed + below. + items: + properties: + label: + description: |- + The label to apply to the revision as a name prefix for routing traffic. + The label to apply to the revision as a name prefix for routing traffic. + type: string + latestRevision: + description: |- + This traffic Weight applies to the latest stable Container Revision. At most only one traffic_weight block can have the latest_revision set to true. + This traffic Weight relates to the latest stable Container Revision. + type: boolean + percentage: + description: |- + The percentage of traffic which should be sent this revision. + The percentage of traffic to send to this revision. + type: number + revisionSuffix: + description: |- + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + The suffix string to append to the revision. This must be unique for the Container App's lifetime. A default hash created by the service will be used if this value is omitted. + type: string + type: object + type: array + transport: + description: |- + The transport method for the Ingress. Possible values are auto, http, http2 and tcp. Defaults to auto. + The transport method for the Ingress. Possible values include `auto`, `http`, and `http2`, `tcp`. Defaults to `auto` + type: string + type: object + latestRevisionFqdn: + description: |- + The FQDN of the Latest Revision of the Container App. + The FQDN of the Latest Revision of the Container App. + type: string + latestRevisionName: + description: |- + The name of the latest Container Revision. + The name of the latest Container Revision. + type: string + location: + description: The location this Container App is deployed in. This + is the same as the Environment in which it is deployed. + type: string + outboundIpAddresses: + description: A list of the Public IP Addresses which the Container + App uses for outbound network access. + items: + type: string + type: array + registry: + description: A registry block as detailed below. + items: + properties: + identity: + description: |- + Resource ID for the User Assigned Managed identity to use when pulling from the Container Registry. + ID of the System or User Managed Identity used to pull images from the Container Registry + type: string + passwordSecretName: + description: |- + The name of the Secret Reference containing the password value for this user on the Container Registry, username must also be supplied. + The name of the Secret Reference containing the password value for this user on the Container Registry. + type: string + server: + description: |- + The hostname for the Container Registry. + The hostname for the Container Registry. + type: string + username: + description: |- + The username to use for this Container Registry, password_secret_name must also be supplied.. + The username to use for this Container Registry. + type: string + type: object + type: array + resourceGroupName: + description: The name of the resource group in which the Container + App Environment is to be created. Changing this forces a new + resource to be created. + type: string + revisionMode: + description: The revisions operational mode for the Container + App. Possible values include Single and Multiple. In Single + mode, a single revision is in operation at any given time. In + Multiple mode, more than one revision can be active at a time + and can be configured with load distribution via the traffic_weight + block in the ingress configuration. + type: string + secret: + description: One or more secret block as detailed below. + items: + properties: + nameSecretRef: + description: |- + The Secret name. + The Secret name. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + valueSecretRef: + description: |- + The value for this secret. + The value for this secret. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - nameSecretRef + - valueSecretRef + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Container App. + type: object + x-kubernetes-map-type: granular + template: + description: A template block as detailed below. + properties: + azureQueueScaleRule: + description: One or more azure_queue_scale_rule blocks as + defined below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + name: + description: The name of the Volume to be mounted in + the container. + type: string + queueLength: + description: The value of the length of the queue to + trigger scaling actions. + type: number + queueName: + description: The name of the Azure Queue + type: string + type: object + type: array + container: + description: One or more container blocks as detailed below. + items: + properties: + args: + description: |- + A list of extra arguments to pass to the container. + A list of args to pass to the container. + items: + type: string + type: array + command: + description: |- + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + items: + type: string + type: array + cpu: + description: |- + The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + type: number + env: + description: One or more env blocks as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the environment variable for the container. + type: string + secretName: + description: |- + The name of the secret that contains the value for this environment variable. + The name of the secret that contains the value for this environment variable. + type: string + value: + description: |- + The value for this secret. + The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + type: string + type: object + type: array + ephemeralStorage: + description: |- + The amount of ephemeral storage available to the Container App. + The amount of ephemeral storage available to the Container App. + type: string + image: + description: |- + The image to use to create the container. + The image to use to create the container. + type: string + livenessProbe: + description: A liveness_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + initialDelay: + description: |- + The time in seconds to wait after the container has started before the probe is started. + The time in seconds to wait after the container has started before the probe is started. + type: number + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + terminationGracePeriodSeconds: + description: |- + The time in seconds after the container is sent the termination signal before the process if forcibly killed. + The time in seconds after the container is sent the termination signal before the process if forcibly killed. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + memory: + description: |- + The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the container. + type: string + readinessProbe: + description: A readiness_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + successCountThreshold: + description: |- + The number of consecutive successful responses required to consider this probe as successful. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive successful responses required to consider this probe as successful. Possible values are between `1` and `10`. Defaults to `3`. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + startupProbe: + description: A startup_probe block as detailed below. + items: + properties: + failureCountThreshold: + description: |- + The number of consecutive failures required to consider this probe as failed. Possible values are between 1 and 10. Defaults to 3. + The number of consecutive failures required to consider this probe as failed. Possible values are between `1` and `10`. Defaults to `3`. + type: number + header: + description: A header block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The HTTP Header Name. + type: string + value: + description: |- + The value for this secret. + The HTTP Header value. + type: string + type: object + type: array + host: + description: |- + The value for the host header which should be sent with this probe. If unspecified, the IP Address of the Pod is used as the host header. Setting a value for Host in headers can be used to override this for HTTP and HTTPS type probes. + The probe hostname. Defaults to the pod IP address. Setting a value for `Host` in `headers` can be used to override this for `http` and `https` type probes. + type: string + intervalSeconds: + description: |- + How often, in seconds, the probe should run. Possible values are between 1 and 240. Defaults to 10 + How often, in seconds, the probe should run. Possible values are between `1` and `240`. Defaults to `10` + type: number + path: + description: |- + The path in the container at which to mount this volume. + The URI to use with the `host` for http type probes. Not valid for `TCP` type probes. Defaults to `/`. + type: string + port: + description: |- + The port number on which to connect. Possible values are between 1 and 65535. + The port number on which to connect. Possible values are between `1` and `65535`. + type: number + terminationGracePeriodSeconds: + description: |- + The time in seconds after the container is sent the termination signal before the process if forcibly killed. + The time in seconds after the container is sent the termination signal before the process if forcibly killed. + type: number + timeout: + description: |- + Time in seconds after which the probe times out. Possible values are in the range 1 - 240. Defaults to 1. + Time in seconds after which the probe times out. Possible values are between `1` an `240`. Defaults to `1`. + type: number + transport: + description: |- + Type of probe. Possible values are TCP, HTTP, and HTTPS. + Type of probe. Possible values are `TCP`, `HTTP`, and `HTTPS`. + type: string + type: object + type: array + volumeMounts: + description: A volume_mounts block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the Volume to be mounted in the container. + type: string + path: + description: |- + The path in the container at which to mount this volume. + The path in the container at which to mount this volume. + type: string + type: object + type: array + type: object + type: array + customScaleRule: + description: One or more custom_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + customRuleType: + description: 'The Custom rule type. Possible values + include: activemq, artemis-queue, kafka, pulsar, aws-cloudwatch, + aws-dynamodb, aws-dynamodb-streams, aws-kinesis-stream, + aws-sqs-queue, azure-app-insights, azure-blob, azure-data-explorer, + azure-eventhub, azure-log-analytics, azure-monitor, + azure-pipelines, azure-servicebus, azure-queue, cassandra, + cpu, cron, datadog, elasticsearch, external, external-push, + gcp-stackdriver, gcp-storage, gcp-pubsub, graphite, + http, huawei-cloudeye, ibmmq, influxdb, kubernetes-workload, + liiklus, memory, metrics-api, mongodb, mssql, mysql, + nats-jetstream, stan, tcp, new-relic, openstack-metric, + openstack-swift, postgresql, predictkube, prometheus, + rabbitmq, redis, redis-cluster, redis-sentinel, redis-streams, + redis-cluster-streams, redis-sentinel-streams, selenium-grid,solace-event-queue, + and github-runner.' + type: string + metadata: + additionalProperties: + type: string + description: '- A map of string key-value pairs to configure + the Custom Scale Rule.' + type: object + x-kubernetes-map-type: granular + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + httpScaleRule: + description: One or more http_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + concurrentRequests: + description: '- The number of concurrent requests to + trigger scaling.' + type: string + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + initContainer: + description: The definition of an init container that is part + of the group as documented in the init_container block below. + items: + properties: + args: + description: |- + A list of extra arguments to pass to the container. + A list of args to pass to the container. + items: + type: string + type: array + command: + description: |- + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + A command to pass to the container to override the default. This is provided as a list of command line elements without spaces. + items: + type: string + type: array + cpu: + description: |- + The amount of vCPU to allocate to the container. Possible values include 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, and 2.0. When there's a workload profile specified, there's no such constraint. + The amount of vCPU to allocate to the container. Possible values include `0.25`, `0.5`, `0.75`, `1.0`, `1.25`, `1.5`, `1.75`, and `2.0`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.0` / `2.0` or `0.5` / `1.0`. When there's a workload profile specified, there's no such constraint. + type: number + env: + description: One or more env blocks as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the environment variable for the container. + type: string + secretName: + description: |- + The name of the secret that contains the value for this environment variable. + The name of the secret that contains the value for this environment variable. + type: string + value: + description: |- + The value for this secret. + The value for this environment variable. **NOTE:** This value is ignored if `secret_name` is used + type: string + type: object + type: array + ephemeralStorage: + description: |- + The amount of ephemeral storage available to the Container App. + The amount of ephemeral storage available to the Container App. + type: string + image: + description: |- + The image to use to create the container. + The image to use to create the container. + type: string + memory: + description: |- + The amount of memory to allocate to the container. Possible values are 0.5Gi, 1Gi, 1.5Gi, 2Gi, 2.5Gi, 3Gi, 3.5Gi and 4Gi. When there's a workload profile specified, there's no such constraint. + The amount of memory to allocate to the container. Possible values include `0.5Gi`, `1.0Gi`, `1.5Gi`, `2.0Gi`, `2.5Gi`, `3.0Gi`, `3.5Gi`, and `4.0Gi`. **NOTE:** `cpu` and `memory` must be specified in `0.25'/'0.5Gi` combination increments. e.g. `1.25` / `2.5Gi` or `0.75` / `1.5Gi`. When there's a workload profile specified, there's no such constraint. + type: string + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the container. + type: string + volumeMounts: + description: A volume_mounts block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the Volume to be mounted in the container. + type: string + path: + description: |- + The path in the container at which to mount this volume. + The path in the container at which to mount this volume. + type: string + type: object + type: array + type: object + type: array + maxReplicas: + description: |- + The maximum number of replicas for this container. + The maximum number of replicas for this container. + type: number + minReplicas: + description: |- + The minimum number of replicas for this container. + The minimum number of replicas for this container. + type: number + revisionSuffix: + description: |- + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + The suffix for the revision. This value must be unique for the lifetime of the Resource. If omitted the service will use a hash function to create one. + type: string + tcpScaleRule: + description: One or more tcp_scale_rule blocks as defined + below. + items: + properties: + authentication: + description: Zero or more authentication blocks as defined + below. + items: + properties: + secretName: + description: The name of the secret that contains + the value for this environment variable. + type: string + triggerParameter: + description: The Trigger Parameter name to use + the supply the value retrieved from the secret_name. + type: string + type: object + type: array + concurrentRequests: + description: '- The number of concurrent requests to + trigger scaling.' + type: string + name: + description: The name of the Volume to be mounted in + the container. + type: string + type: object + type: array + volume: + description: A volume block as detailed below. + items: + properties: + name: + description: |- + The name of the Volume to be mounted in the container. + The name of the volume. + type: string + storageName: + description: |- + The name of the AzureFile storage. + The name of the `AzureFile` storage. Required when `storage_type` is `AzureFile` + type: string + storageType: + description: |- + The type of storage volume. Possible values are AzureFile, EmptyDir and Secret. Defaults to EmptyDir. + The type of storage volume. Possible values include `AzureFile` and `EmptyDir`. Defaults to `EmptyDir`. + type: string + type: object + type: array + type: object + workloadProfileName: + description: The name of the Workload Profile in the Container + App Environment to place this Container App. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/containerregistry.azure.upbound.io_registries.yaml b/package/crds/containerregistry.azure.upbound.io_registries.yaml index 09480bdc1..815b99856 100644 --- a/package/crds/containerregistry.azure.upbound.io_registries.yaml +++ b/package/crds/containerregistry.azure.upbound.io_registries.yaml @@ -1259,3 +1259,1238 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Registry is the Schema for the Registrys API. Manages an Azure + Container Registry. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RegistrySpec defines the desired state of Registry + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + adminEnabled: + description: Specifies whether the admin user is enabled. Defaults + to false. + type: boolean + anonymousPullEnabled: + description: Whether allows anonymous (unauthenticated) pull access + to this Container Registry? This is only supported on resources + with the Standard or Premium SKU. + type: boolean + dataEndpointEnabled: + description: Whether to enable dedicated data endpoints for this + Container Registry? This is only supported on resources with + the Premium SKU. + type: boolean + encryption: + description: An encryption block as documented below. + items: + properties: + enabled: + description: Boolean value that indicates whether encryption + is enabled. + type: boolean + identityClientId: + description: The client ID of the managed identity associated + with the encryption key. + type: string + identityClientIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate identityClientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + identityClientIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate identityClientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + type: object + type: array + exportPolicyEnabled: + description: Boolean value that indicates whether export policy + is enabled. Defaults to true. In order to set it to false, make + sure the public_network_access_enabled is also set to false. + type: boolean + georeplications: + description: A georeplications block as documented below. + items: + properties: + location: + description: A location where the container registry should + be geo-replicated. + type: string + regionalEndpointEnabled: + description: Whether regional endpoint is enabled for this + Container Registry? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to this replication + location. + type: object + x-kubernetes-map-type: granular + zoneRedundancyEnabled: + description: Whether zone redundancy is enabled for this + Container Registry? Changing this forces a new resource + to be created. Defaults to false. + type: boolean + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Container Registry. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Container Registry. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkRuleBypassOption: + description: Whether to allow trusted Azure services to access + a network restricted Container Registry? Possible values are + None and AzureServices. Defaults to AzureServices. + type: string + networkRuleSet: + description: A network_rule_set block as documented below. + items: + properties: + defaultAction: + description: The behaviour for requests matching no rules. + Either Allow or Deny. Defaults to Allow + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The behaviour for requests matching this + rule. At this time the only supported value is Allow + type: string + ipRange: + description: The CIDR block from which requests will + match the rule. + type: string + type: object + type: array + virtualNetwork: + items: + properties: + action: + description: The behaviour for requests matching this + rule. At this time the only supported value is Allow + type: string + subnetId: + description: The ID of the Container Registry. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + container registry. Defaults to true. + type: boolean + quarantinePolicyEnabled: + description: Boolean value that indicates whether quarantine policy + is enabled. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Container Registry. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionPolicy: + description: A retention_policy block as documented below. + items: + properties: + days: + description: The number of days to retain an untagged manifest + after which it gets purged. Default is 7. + type: number + enabled: + description: Boolean value that indicates whether the policy + is enabled. + type: boolean + type: object + type: array + sku: + description: The SKU name of the container registry. Possible + values are Basic, Standard and Premium. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustPolicy: + description: A trust_policy block as documented below. + items: + properties: + enabled: + description: Boolean value that indicates whether the policy + is enabled. + type: boolean + type: object + type: array + zoneRedundancyEnabled: + description: Whether zone redundancy is enabled for this Container + Registry? Changing this forces a new resource to be created. + Defaults to false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + adminEnabled: + description: Specifies whether the admin user is enabled. Defaults + to false. + type: boolean + anonymousPullEnabled: + description: Whether allows anonymous (unauthenticated) pull access + to this Container Registry? This is only supported on resources + with the Standard or Premium SKU. + type: boolean + dataEndpointEnabled: + description: Whether to enable dedicated data endpoints for this + Container Registry? This is only supported on resources with + the Premium SKU. + type: boolean + encryption: + description: An encryption block as documented below. + items: + properties: + enabled: + description: Boolean value that indicates whether encryption + is enabled. + type: boolean + identityClientId: + description: The client ID of the managed identity associated + with the encryption key. + type: string + identityClientIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate identityClientId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + identityClientIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate identityClientId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + type: object + type: array + exportPolicyEnabled: + description: Boolean value that indicates whether export policy + is enabled. Defaults to true. In order to set it to false, make + sure the public_network_access_enabled is also set to false. + type: boolean + georeplications: + description: A georeplications block as documented below. + items: + properties: + location: + description: A location where the container registry should + be geo-replicated. + type: string + regionalEndpointEnabled: + description: Whether regional endpoint is enabled for this + Container Registry? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to this replication + location. + type: object + x-kubernetes-map-type: granular + zoneRedundancyEnabled: + description: Whether zone redundancy is enabled for this + Container Registry? Changing this forces a new resource + to be created. Defaults to false. + type: boolean + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Container Registry. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Container Registry. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkRuleBypassOption: + description: Whether to allow trusted Azure services to access + a network restricted Container Registry? Possible values are + None and AzureServices. Defaults to AzureServices. + type: string + networkRuleSet: + description: A network_rule_set block as documented below. + items: + properties: + defaultAction: + description: The behaviour for requests matching no rules. + Either Allow or Deny. Defaults to Allow + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The behaviour for requests matching this + rule. At this time the only supported value is Allow + type: string + ipRange: + description: The CIDR block from which requests will + match the rule. + type: string + type: object + type: array + virtualNetwork: + items: + properties: + action: + description: The behaviour for requests matching this + rule. At this time the only supported value is Allow + type: string + subnetId: + description: The ID of the Container Registry. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + container registry. Defaults to true. + type: boolean + quarantinePolicyEnabled: + description: Boolean value that indicates whether quarantine policy + is enabled. + type: boolean + retentionPolicy: + description: A retention_policy block as documented below. + items: + properties: + days: + description: The number of days to retain an untagged manifest + after which it gets purged. Default is 7. + type: number + enabled: + description: Boolean value that indicates whether the policy + is enabled. + type: boolean + type: object + type: array + sku: + description: The SKU name of the container registry. Possible + values are Basic, Standard and Premium. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustPolicy: + description: A trust_policy block as documented below. + items: + properties: + enabled: + description: Boolean value that indicates whether the policy + is enabled. + type: boolean + type: object + type: array + zoneRedundancyEnabled: + description: Whether zone redundancy is enabled for this Container + Registry? Changing this forces a new resource to be created. + Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: RegistryStatus defines the observed state of Registry. + properties: + atProvider: + properties: + adminEnabled: + description: Specifies whether the admin user is enabled. Defaults + to false. + type: boolean + adminUsername: + description: The Username associated with the Container Registry + Admin account - if the admin account is enabled. + type: string + anonymousPullEnabled: + description: Whether allows anonymous (unauthenticated) pull access + to this Container Registry? This is only supported on resources + with the Standard or Premium SKU. + type: boolean + dataEndpointEnabled: + description: Whether to enable dedicated data endpoints for this + Container Registry? This is only supported on resources with + the Premium SKU. + type: boolean + encryption: + description: An encryption block as documented below. + items: + properties: + enabled: + description: Boolean value that indicates whether encryption + is enabled. + type: boolean + identityClientId: + description: The client ID of the managed identity associated + with the encryption key. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + type: object + type: array + exportPolicyEnabled: + description: Boolean value that indicates whether export policy + is enabled. Defaults to true. In order to set it to false, make + sure the public_network_access_enabled is also set to false. + type: boolean + georeplications: + description: A georeplications block as documented below. + items: + properties: + location: + description: A location where the container registry should + be geo-replicated. + type: string + regionalEndpointEnabled: + description: Whether regional endpoint is enabled for this + Container Registry? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to this replication + location. + type: object + x-kubernetes-map-type: granular + zoneRedundancyEnabled: + description: Whether zone redundancy is enabled for this + Container Registry? Changing this forces a new resource + to be created. Defaults to false. + type: boolean + type: object + type: array + id: + description: The ID of the Container Registry. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Container Registry. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Container Registry. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + loginServer: + description: The URL that can be used to log into the container + registry. + type: string + networkRuleBypassOption: + description: Whether to allow trusted Azure services to access + a network restricted Container Registry? Possible values are + None and AzureServices. Defaults to AzureServices. + type: string + networkRuleSet: + description: A network_rule_set block as documented below. + items: + properties: + defaultAction: + description: The behaviour for requests matching no rules. + Either Allow or Deny. Defaults to Allow + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The behaviour for requests matching this + rule. At this time the only supported value is Allow + type: string + ipRange: + description: The CIDR block from which requests will + match the rule. + type: string + type: object + type: array + virtualNetwork: + items: + properties: + action: + description: The behaviour for requests matching this + rule. At this time the only supported value is Allow + type: string + subnetId: + description: The ID of the Container Registry. + type: string + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + container registry. Defaults to true. + type: boolean + quarantinePolicyEnabled: + description: Boolean value that indicates whether quarantine policy + is enabled. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Container Registry. Changing this forces a new resource + to be created. + type: string + retentionPolicy: + description: A retention_policy block as documented below. + items: + properties: + days: + description: The number of days to retain an untagged manifest + after which it gets purged. Default is 7. + type: number + enabled: + description: Boolean value that indicates whether the policy + is enabled. + type: boolean + type: object + type: array + sku: + description: The SKU name of the container registry. Possible + values are Basic, Standard and Premium. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustPolicy: + description: A trust_policy block as documented below. + items: + properties: + enabled: + description: Boolean value that indicates whether the policy + is enabled. + type: boolean + type: object + type: array + zoneRedundancyEnabled: + description: Whether zone redundancy is enabled for this Container + Registry? Changing this forces a new resource to be created. + Defaults to false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/containerregistry.azure.upbound.io_tokenpasswords.yaml b/package/crds/containerregistry.azure.upbound.io_tokenpasswords.yaml index e7f887f4f..07c58b733 100644 --- a/package/crds/containerregistry.azure.upbound.io_tokenpasswords.yaml +++ b/package/crds/containerregistry.azure.upbound.io_tokenpasswords.yaml @@ -563,3 +563,536 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: TokenPassword is the Schema for the TokenPasswords API. Manages + a Container Registry Token Password. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TokenPasswordSpec defines the desired state of TokenPassword + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerRegistryTokenId: + description: The ID of the Container Registry Token that this + Container Registry Token Password resides in. Changing this + forces a new Container Registry Token Password to be created. + type: string + containerRegistryTokenIdRef: + description: Reference to a Token in containerregistry to populate + containerRegistryTokenId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerRegistryTokenIdSelector: + description: Selector for a Token in containerregistry to populate + containerRegistryTokenId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + password1: + description: One password block as defined below. + properties: + expiry: + description: The expiration date of the password in RFC3339 + format. If not specified, the password never expires. Changing + this forces a new resource to be created. + type: string + type: object + password2: + description: One password block as defined below. + properties: + expiry: + description: The expiration date of the password in RFC3339 + format. If not specified, the password never expires. Changing + this forces a new resource to be created. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerRegistryTokenId: + description: The ID of the Container Registry Token that this + Container Registry Token Password resides in. Changing this + forces a new Container Registry Token Password to be created. + type: string + containerRegistryTokenIdRef: + description: Reference to a Token in containerregistry to populate + containerRegistryTokenId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerRegistryTokenIdSelector: + description: Selector for a Token in containerregistry to populate + containerRegistryTokenId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + password1: + description: One password block as defined below. + properties: + expiry: + description: The expiration date of the password in RFC3339 + format. If not specified, the password never expires. Changing + this forces a new resource to be created. + type: string + type: object + password2: + description: One password block as defined below. + properties: + expiry: + description: The expiration date of the password in RFC3339 + format. If not specified, the password never expires. Changing + this forces a new resource to be created. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.password1 is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.password1) + || (has(self.initProvider) && has(self.initProvider.password1))' + status: + description: TokenPasswordStatus defines the observed state of TokenPassword. + properties: + atProvider: + properties: + containerRegistryTokenId: + description: The ID of the Container Registry Token that this + Container Registry Token Password resides in. Changing this + forces a new Container Registry Token Password to be created. + type: string + id: + description: The ID of the Container Registry Token Password. + type: string + password1: + description: One password block as defined below. + properties: + expiry: + description: The expiration date of the password in RFC3339 + format. If not specified, the password never expires. Changing + this forces a new resource to be created. + type: string + type: object + password2: + description: One password block as defined below. + properties: + expiry: + description: The expiration date of the password in RFC3339 + format. If not specified, the password never expires. Changing + this forces a new resource to be created. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/containerservice.azure.upbound.io_kubernetesclusternodepools.yaml b/package/crds/containerservice.azure.upbound.io_kubernetesclusternodepools.yaml index 1aa097f85..493a50ab7 100644 --- a/package/crds/containerservice.azure.upbound.io_kubernetesclusternodepools.yaml +++ b/package/crds/containerservice.azure.upbound.io_kubernetesclusternodepools.yaml @@ -2154,3 +2154,2097 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: KubernetesClusterNodePool is the Schema for the KubernetesClusterNodePools + API. Manages a Node Pool within a Kubernetes Cluster + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KubernetesClusterNodePoolSpec defines the desired state of + KubernetesClusterNodePool + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + where this Node Pool should exist. Changing this forces a new + resource to be created. + type: string + customCaTrustEnabled: + description: Specifies whether to trust a Custom CA. + type: boolean + enableAutoScaling: + description: Whether to enable auto-scaler. + type: boolean + enableHostEncryption: + description: Should the nodes in this Node Pool have host encryption + enabled? Changing this forces a new resource to be created. + type: boolean + enableNodePublicIp: + description: Should each node have a Public IP Address? Changing + this forces a new resource to be created. + type: boolean + evictionPolicy: + description: The Eviction Policy which should be used for Virtual + Machines within the Virtual Machine Scale Set powering this + Node Pool. Possible values are Deallocate and Delete. Changing + this forces a new resource to be created. + type: string + fipsEnabled: + description: Should the nodes in this Node Pool have Federal Information + Processing Standard enabled? Changing this forces a new resource + to be created. + type: boolean + gpuInstance: + description: Specifies the GPU MIG instance profile for supported + GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g + and MIG7g. Changing this forces a new resource to be created. + type: string + hostGroupId: + description: The fully qualified resource ID of the Dedicated + Host Group to provision virtual machines from. Changing this + forces a new resource to be created. + type: string + kubeletConfig: + description: A kubelet_config block as defined below. Changing + this forces a new resource to be created. + properties: + allowedUnsafeSysctls: + description: Specifies the allow list of unsafe sysctls command + or patterns (ending in *). Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + containerLogMaxLine: + description: Specifies the maximum number of container log + files that can be present for a container. must be at least + 2. Changing this forces a new resource to be created. + type: number + containerLogMaxSizeMb: + description: Specifies the maximum size (e.g. 10MB) of container + log file before it is rotated. Changing this forces a new + resource to be created. + type: number + cpuCfsQuotaEnabled: + description: Is CPU CFS quota enforcement for containers enabled? + Changing this forces a new resource to be created. + type: boolean + cpuCfsQuotaPeriod: + description: Specifies the CPU CFS quota period value. Changing + this forces a new resource to be created. + type: string + cpuManagerPolicy: + description: Specifies the CPU Manager policy to use. Possible + values are none and static, Changing this forces a new resource + to be created. + type: string + imageGcHighThreshold: + description: Specifies the percent of disk usage above which + image garbage collection is always run. Must be between + 0 and 100. Changing this forces a new resource to be created. + type: number + imageGcLowThreshold: + description: Specifies the percent of disk usage lower than + which image garbage collection is never run. Must be between + 0 and 100. Changing this forces a new resource to be created. + type: number + podMaxPid: + description: Specifies the maximum number of processes per + pod. Changing this forces a new resource to be created. + type: number + topologyManagerPolicy: + description: Specifies the Topology Manager policy to use. + Possible values are none, best-effort, restricted or single-numa-node. + Changing this forces a new resource to be created. + type: string + type: object + kubeletDiskType: + description: The type of disk used by kubelet. Possible values + are OS and Temporary. + type: string + kubernetesClusterId: + description: The ID of the Kubernetes Cluster where this Node + Pool should exist. Changing this forces a new resource to be + created. + type: string + kubernetesClusterIdRef: + description: Reference to a KubernetesCluster in containerservice + to populate kubernetesClusterId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + kubernetesClusterIdSelector: + description: Selector for a KubernetesCluster in containerservice + to populate kubernetesClusterId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + linuxOsConfig: + description: A linux_os_config block as defined below. Changing + this forces a new resource to be created. + properties: + swapFileSizeMb: + description: Specifies the size of swap file on each node + in MB. Changing this forces a new resource to be created. + type: number + sysctlConfig: + description: A sysctl_config block as defined below. Changing + this forces a new resource to be created. + properties: + fsAioMaxNr: + description: The sysctl setting fs.aio-max-nr. Must be + between 65536 and 6553500. Changing this forces a new + resource to be created. + type: number + fsFileMax: + description: The sysctl setting fs.file-max. Must be between + 8192 and 12000500. Changing this forces a new resource + to be created. + type: number + fsInotifyMaxUserWatches: + description: The sysctl setting fs.inotify.max_user_watches. + Must be between 781250 and 2097152. Changing this forces + a new resource to be created. + type: number + fsNrOpen: + description: The sysctl setting fs.nr_open. Must be between + 8192 and 20000500. Changing this forces a new resource + to be created. + type: number + kernelThreadsMax: + description: The sysctl setting kernel.threads-max. Must + be between 20 and 513785. Changing this forces a new + resource to be created. + type: number + netCoreNetdevMaxBacklog: + description: The sysctl setting net.core.netdev_max_backlog. + Must be between 1000 and 3240000. Changing this forces + a new resource to be created. + type: number + netCoreOptmemMax: + description: The sysctl setting net.core.optmem_max. Must + be between 20480 and 4194304. Changing this forces a + new resource to be created. + type: number + netCoreRmemDefault: + description: The sysctl setting net.core.rmem_default. + Must be between 212992 and 134217728. Changing this + forces a new resource to be created. + type: number + netCoreRmemMax: + description: The sysctl setting net.core.rmem_max. Must + be between 212992 and 134217728. Changing this forces + a new resource to be created. + type: number + netCoreSomaxconn: + description: The sysctl setting net.core.somaxconn. Must + be between 4096 and 3240000. Changing this forces a + new resource to be created. + type: number + netCoreWmemDefault: + description: The sysctl setting net.core.wmem_default. + Must be between 212992 and 134217728. Changing this + forces a new resource to be created. + type: number + netCoreWmemMax: + description: The sysctl setting net.core.wmem_max. Must + be between 212992 and 134217728. Changing this forces + a new resource to be created. + type: number + netIpv4IpLocalPortRangeMax: + description: The sysctl setting net.ipv4.ip_local_port_range + max value. Must be between 32768 and 65535. Changing + this forces a new resource to be created. + type: number + netIpv4IpLocalPortRangeMin: + description: The sysctl setting net.ipv4.ip_local_port_range + min value. Must be between 1024 and 60999. Changing + this forces a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh1: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh1. + Must be between 128 and 80000. Changing this forces + a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh2: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh2. + Must be between 512 and 90000. Changing this forces + a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh3: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh3. + Must be between 1024 and 100000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpFinTimeout: + description: The sysctl setting net.ipv4.tcp_fin_timeout. + Must be between 5 and 120. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveIntvl: + description: The sysctl setting net.ipv4.tcp_keepalive_intvl. + Must be between 10 and 90. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveProbes: + description: The sysctl setting net.ipv4.tcp_keepalive_probes. + Must be between 1 and 15. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveTime: + description: The sysctl setting net.ipv4.tcp_keepalive_time. + Must be between 30 and 432000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpMaxSynBacklog: + description: The sysctl setting net.ipv4.tcp_max_syn_backlog. + Must be between 128 and 3240000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpMaxTwBuckets: + description: The sysctl setting net.ipv4.tcp_max_tw_buckets. + Must be between 8000 and 1440000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpTwReuse: + description: Is sysctl setting net.ipv4.tcp_tw_reuse enabled? + Changing this forces a new resource to be created. + type: boolean + netNetfilterNfConntrackBuckets: + description: The sysctl setting net.netfilter.nf_conntrack_buckets. + Must be between 65536 and 524288. Changing this forces + a new resource to be created. + type: number + netNetfilterNfConntrackMax: + description: The sysctl setting net.netfilter.nf_conntrack_max. + Must be between 131072 and 2097152. Changing this forces + a new resource to be created. + type: number + vmMaxMapCount: + description: The sysctl setting vm.max_map_count. Must + be between 65530 and 262144. Changing this forces a + new resource to be created. + type: number + vmSwappiness: + description: The sysctl setting vm.swappiness. Must be + between 0 and 100. Changing this forces a new resource + to be created. + type: number + vmVfsCachePressure: + description: The sysctl setting vm.vfs_cache_pressure. + Must be between 0 and 100. Changing this forces a new + resource to be created. + type: number + type: object + transparentHugePageDefrag: + description: specifies the defrag configuration for Transparent + Huge Page. Possible values are always, defer, defer+madvise, + madvise and never. Changing this forces a new resource to + be created. + type: string + transparentHugePageEnabled: + description: Specifies the Transparent Huge Page enabled configuration. + Possible values are always, madvise and never. Changing + this forces a new resource to be created. + type: string + type: object + maxCount: + description: The maximum number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 and must + be greater than or equal to min_count. + type: number + maxPods: + description: The maximum number of pods that can run on each agent. + Changing this forces a new resource to be created. + type: number + messageOfTheDay: + description: A base64-encoded string which will be written to + /etc/motd after decoding. This allows customization of the message + of the day for Linux nodes. It cannot be specified for Windows + nodes and must be a static string (i.e. will be printed raw + and not executed as a script). Changing this forces a new resource + to be created. + type: string + minCount: + description: The minimum number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 and must + be less than or equal to max_count. + type: number + mode: + description: Should this Node Pool be used for System or User + resources? Possible values are System and User. Defaults to + User. + type: string + nodeCount: + description: The initial number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 (inclusive) + for user pools and between 1 and 1000 (inclusive) for system + pools and must be a value in the range min_count - max_count. + type: number + nodeLabels: + additionalProperties: + type: string + description: A map of Kubernetes labels which should be applied + to nodes in this Node Pool. + type: object + x-kubernetes-map-type: granular + nodeNetworkProfile: + description: A node_network_profile block as documented below. + properties: + allowedHostPorts: + description: One or more allowed_host_ports blocks as defined + below. + items: + properties: + portEnd: + description: Specifies the end of the port range. + type: number + portStart: + description: Specifies the start of the port range. + type: number + protocol: + description: Specifies the protocol of the port range. + Possible values are TCP and UDP. + type: string + type: object + type: array + applicationSecurityGroupIds: + description: A list of Application Security Group IDs which + should be associated with this Node Pool. + items: + type: string + type: array + nodePublicIpTags: + additionalProperties: + type: string + description: Specifies a mapping of tags to the instance-level + public IPs. Changing this forces a new resource to be created. + type: object + x-kubernetes-map-type: granular + type: object + nodePublicIpPrefixId: + description: Resource ID for the Public IP Addresses Prefix for + the nodes in this Node Pool. enable_node_public_ip should be + true. Changing this forces a new resource to be created. + type: string + nodeTaints: + description: A list of Kubernetes taints which should be applied + to nodes in the agent pool (e.g key=value:NoSchedule). + items: + type: string + type: array + orchestratorVersion: + description: Version of Kubernetes used for the Agents. If not + specified, the latest recommended version will be used at provisioning + time (but won't auto-upgrade). AKS does not require an exact + patch version to be specified, minor version aliases such as + 1.22 are also supported. - The minor version's latest GA patch + is automatically chosen in that case. More details can be found + in the documentation. + type: string + osDiskSizeGb: + description: The Agent Operating System disk size in GB. Changing + this forces a new resource to be created. + type: number + osDiskType: + description: The type of disk which should be used for the Operating + System. Possible values are Ephemeral and Managed. Defaults + to Managed. Changing this forces a new resource to be created. + type: string + osSku: + description: Specifies the OS SKU used by the agent pool. Possible + values are AzureLinux, Ubuntu, Windows2019 and Windows2022. + If not specified, the default is Ubuntu if OSType=Linux or Windows2019 + if OSType=Windows. And the default Windows OSSKU will be changed + to Windows2022 after Windows2019 is deprecated. Changing this + forces a new resource to be created. + type: string + osType: + description: The Operating System which should be used for this + Node Pool. Changing this forces a new resource to be created. + Possible values are Linux and Windows. Defaults to Linux. + type: string + podSubnetId: + description: The ID of the Subnet where the pods in the Node Pool + should exist. Changing this forces a new resource to be created. + type: string + podSubnetIdRef: + description: Reference to a Subnet in network to populate podSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + podSubnetIdSelector: + description: Selector for a Subnet in network to populate podSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + priority: + description: The Priority for Virtual Machines within the Virtual + Machine Scale Set that powers this Node Pool. Possible values + are Regular and Spot. Defaults to Regular. Changing this forces + a new resource to be created. + type: string + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group where the + Virtual Machine Scale Set that powers this Node Pool will be + placed. Changing this forces a new resource to be created. + type: string + scaleDownMode: + description: Specifies how the node pool should deal with scaled-down + nodes. Allowed values are Delete and Deallocate. Defaults to + Delete. + type: string + snapshotId: + description: The ID of the Snapshot which should be used to create + this Node Pool. Changing this forces a new resource to be created. + type: string + spotMaxPrice: + description: The maximum price you're willing to pay in USD per + Virtual Machine. Valid values are -1 (the current on-demand + price for a Virtual Machine) or a positive value with up to + five decimal places. Changing this forces a new resource to + be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + ultraSsdEnabled: + description: Used to specify whether the UltraSSD is enabled in + the Node Pool. Defaults to false. See the documentation for + more information. Changing this forces a new resource to be + created. + type: boolean + upgradeSettings: + description: A upgrade_settings block as documented below. + properties: + maxSurge: + description: The maximum number or percentage of nodes which + will be added to the Node Pool size during an upgrade. + type: string + type: object + vmSize: + description: The SKU which should be used for the Virtual Machines + used in this Node Pool. Changing this forces a new resource + to be created. + type: string + vnetSubnetId: + description: The ID of the Subnet where this Node Pool should + exist. Changing this forces a new resource to be created. + type: string + vnetSubnetIdRef: + description: Reference to a Subnet in network to populate vnetSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vnetSubnetIdSelector: + description: Selector for a Subnet in network to populate vnetSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + windowsProfile: + description: A windows_profile block as documented below. Changing + this forces a new resource to be created. + properties: + outboundNatEnabled: + description: Should the Windows nodes in this Node Pool have + outbound NAT enabled? Defaults to true. Changing this forces + a new resource to be created. + type: boolean + type: object + workloadRuntime: + description: Used to specify the workload runtime. Allowed values + are OCIContainer, WasmWasi and KataMshvVmIsolation. + type: string + zones: + description: Specifies a list of Availability Zones in which this + Kubernetes Cluster Node Pool should be located. Changing this + forces a new Kubernetes Cluster Node Pool to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + where this Node Pool should exist. Changing this forces a new + resource to be created. + type: string + customCaTrustEnabled: + description: Specifies whether to trust a Custom CA. + type: boolean + enableAutoScaling: + description: Whether to enable auto-scaler. + type: boolean + enableHostEncryption: + description: Should the nodes in this Node Pool have host encryption + enabled? Changing this forces a new resource to be created. + type: boolean + enableNodePublicIp: + description: Should each node have a Public IP Address? Changing + this forces a new resource to be created. + type: boolean + evictionPolicy: + description: The Eviction Policy which should be used for Virtual + Machines within the Virtual Machine Scale Set powering this + Node Pool. Possible values are Deallocate and Delete. Changing + this forces a new resource to be created. + type: string + fipsEnabled: + description: Should the nodes in this Node Pool have Federal Information + Processing Standard enabled? Changing this forces a new resource + to be created. + type: boolean + gpuInstance: + description: Specifies the GPU MIG instance profile for supported + GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g + and MIG7g. Changing this forces a new resource to be created. + type: string + hostGroupId: + description: The fully qualified resource ID of the Dedicated + Host Group to provision virtual machines from. Changing this + forces a new resource to be created. + type: string + kubeletConfig: + description: A kubelet_config block as defined below. Changing + this forces a new resource to be created. + properties: + allowedUnsafeSysctls: + description: Specifies the allow list of unsafe sysctls command + or patterns (ending in *). Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + containerLogMaxLine: + description: Specifies the maximum number of container log + files that can be present for a container. must be at least + 2. Changing this forces a new resource to be created. + type: number + containerLogMaxSizeMb: + description: Specifies the maximum size (e.g. 10MB) of container + log file before it is rotated. Changing this forces a new + resource to be created. + type: number + cpuCfsQuotaEnabled: + description: Is CPU CFS quota enforcement for containers enabled? + Changing this forces a new resource to be created. + type: boolean + cpuCfsQuotaPeriod: + description: Specifies the CPU CFS quota period value. Changing + this forces a new resource to be created. + type: string + cpuManagerPolicy: + description: Specifies the CPU Manager policy to use. Possible + values are none and static, Changing this forces a new resource + to be created. + type: string + imageGcHighThreshold: + description: Specifies the percent of disk usage above which + image garbage collection is always run. Must be between + 0 and 100. Changing this forces a new resource to be created. + type: number + imageGcLowThreshold: + description: Specifies the percent of disk usage lower than + which image garbage collection is never run. Must be between + 0 and 100. Changing this forces a new resource to be created. + type: number + podMaxPid: + description: Specifies the maximum number of processes per + pod. Changing this forces a new resource to be created. + type: number + topologyManagerPolicy: + description: Specifies the Topology Manager policy to use. + Possible values are none, best-effort, restricted or single-numa-node. + Changing this forces a new resource to be created. + type: string + type: object + kubeletDiskType: + description: The type of disk used by kubelet. Possible values + are OS and Temporary. + type: string + linuxOsConfig: + description: A linux_os_config block as defined below. Changing + this forces a new resource to be created. + properties: + swapFileSizeMb: + description: Specifies the size of swap file on each node + in MB. Changing this forces a new resource to be created. + type: number + sysctlConfig: + description: A sysctl_config block as defined below. Changing + this forces a new resource to be created. + properties: + fsAioMaxNr: + description: The sysctl setting fs.aio-max-nr. Must be + between 65536 and 6553500. Changing this forces a new + resource to be created. + type: number + fsFileMax: + description: The sysctl setting fs.file-max. Must be between + 8192 and 12000500. Changing this forces a new resource + to be created. + type: number + fsInotifyMaxUserWatches: + description: The sysctl setting fs.inotify.max_user_watches. + Must be between 781250 and 2097152. Changing this forces + a new resource to be created. + type: number + fsNrOpen: + description: The sysctl setting fs.nr_open. Must be between + 8192 and 20000500. Changing this forces a new resource + to be created. + type: number + kernelThreadsMax: + description: The sysctl setting kernel.threads-max. Must + be between 20 and 513785. Changing this forces a new + resource to be created. + type: number + netCoreNetdevMaxBacklog: + description: The sysctl setting net.core.netdev_max_backlog. + Must be between 1000 and 3240000. Changing this forces + a new resource to be created. + type: number + netCoreOptmemMax: + description: The sysctl setting net.core.optmem_max. Must + be between 20480 and 4194304. Changing this forces a + new resource to be created. + type: number + netCoreRmemDefault: + description: The sysctl setting net.core.rmem_default. + Must be between 212992 and 134217728. Changing this + forces a new resource to be created. + type: number + netCoreRmemMax: + description: The sysctl setting net.core.rmem_max. Must + be between 212992 and 134217728. Changing this forces + a new resource to be created. + type: number + netCoreSomaxconn: + description: The sysctl setting net.core.somaxconn. Must + be between 4096 and 3240000. Changing this forces a + new resource to be created. + type: number + netCoreWmemDefault: + description: The sysctl setting net.core.wmem_default. + Must be between 212992 and 134217728. Changing this + forces a new resource to be created. + type: number + netCoreWmemMax: + description: The sysctl setting net.core.wmem_max. Must + be between 212992 and 134217728. Changing this forces + a new resource to be created. + type: number + netIpv4IpLocalPortRangeMax: + description: The sysctl setting net.ipv4.ip_local_port_range + max value. Must be between 32768 and 65535. Changing + this forces a new resource to be created. + type: number + netIpv4IpLocalPortRangeMin: + description: The sysctl setting net.ipv4.ip_local_port_range + min value. Must be between 1024 and 60999. Changing + this forces a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh1: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh1. + Must be between 128 and 80000. Changing this forces + a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh2: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh2. + Must be between 512 and 90000. Changing this forces + a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh3: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh3. + Must be between 1024 and 100000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpFinTimeout: + description: The sysctl setting net.ipv4.tcp_fin_timeout. + Must be between 5 and 120. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveIntvl: + description: The sysctl setting net.ipv4.tcp_keepalive_intvl. + Must be between 10 and 90. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveProbes: + description: The sysctl setting net.ipv4.tcp_keepalive_probes. + Must be between 1 and 15. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveTime: + description: The sysctl setting net.ipv4.tcp_keepalive_time. + Must be between 30 and 432000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpMaxSynBacklog: + description: The sysctl setting net.ipv4.tcp_max_syn_backlog. + Must be between 128 and 3240000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpMaxTwBuckets: + description: The sysctl setting net.ipv4.tcp_max_tw_buckets. + Must be between 8000 and 1440000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpTwReuse: + description: Is sysctl setting net.ipv4.tcp_tw_reuse enabled? + Changing this forces a new resource to be created. + type: boolean + netNetfilterNfConntrackBuckets: + description: The sysctl setting net.netfilter.nf_conntrack_buckets. + Must be between 65536 and 524288. Changing this forces + a new resource to be created. + type: number + netNetfilterNfConntrackMax: + description: The sysctl setting net.netfilter.nf_conntrack_max. + Must be between 131072 and 2097152. Changing this forces + a new resource to be created. + type: number + vmMaxMapCount: + description: The sysctl setting vm.max_map_count. Must + be between 65530 and 262144. Changing this forces a + new resource to be created. + type: number + vmSwappiness: + description: The sysctl setting vm.swappiness. Must be + between 0 and 100. Changing this forces a new resource + to be created. + type: number + vmVfsCachePressure: + description: The sysctl setting vm.vfs_cache_pressure. + Must be between 0 and 100. Changing this forces a new + resource to be created. + type: number + type: object + transparentHugePageDefrag: + description: specifies the defrag configuration for Transparent + Huge Page. Possible values are always, defer, defer+madvise, + madvise and never. Changing this forces a new resource to + be created. + type: string + transparentHugePageEnabled: + description: Specifies the Transparent Huge Page enabled configuration. + Possible values are always, madvise and never. Changing + this forces a new resource to be created. + type: string + type: object + maxCount: + description: The maximum number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 and must + be greater than or equal to min_count. + type: number + maxPods: + description: The maximum number of pods that can run on each agent. + Changing this forces a new resource to be created. + type: number + messageOfTheDay: + description: A base64-encoded string which will be written to + /etc/motd after decoding. This allows customization of the message + of the day for Linux nodes. It cannot be specified for Windows + nodes and must be a static string (i.e. will be printed raw + and not executed as a script). Changing this forces a new resource + to be created. + type: string + minCount: + description: The minimum number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 and must + be less than or equal to max_count. + type: number + mode: + description: Should this Node Pool be used for System or User + resources? Possible values are System and User. Defaults to + User. + type: string + nodeCount: + description: The initial number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 (inclusive) + for user pools and between 1 and 1000 (inclusive) for system + pools and must be a value in the range min_count - max_count. + type: number + nodeLabels: + additionalProperties: + type: string + description: A map of Kubernetes labels which should be applied + to nodes in this Node Pool. + type: object + x-kubernetes-map-type: granular + nodeNetworkProfile: + description: A node_network_profile block as documented below. + properties: + allowedHostPorts: + description: One or more allowed_host_ports blocks as defined + below. + items: + properties: + portEnd: + description: Specifies the end of the port range. + type: number + portStart: + description: Specifies the start of the port range. + type: number + protocol: + description: Specifies the protocol of the port range. + Possible values are TCP and UDP. + type: string + type: object + type: array + applicationSecurityGroupIds: + description: A list of Application Security Group IDs which + should be associated with this Node Pool. + items: + type: string + type: array + nodePublicIpTags: + additionalProperties: + type: string + description: Specifies a mapping of tags to the instance-level + public IPs. Changing this forces a new resource to be created. + type: object + x-kubernetes-map-type: granular + type: object + nodePublicIpPrefixId: + description: Resource ID for the Public IP Addresses Prefix for + the nodes in this Node Pool. enable_node_public_ip should be + true. Changing this forces a new resource to be created. + type: string + nodeTaints: + description: A list of Kubernetes taints which should be applied + to nodes in the agent pool (e.g key=value:NoSchedule). + items: + type: string + type: array + orchestratorVersion: + description: Version of Kubernetes used for the Agents. If not + specified, the latest recommended version will be used at provisioning + time (but won't auto-upgrade). AKS does not require an exact + patch version to be specified, minor version aliases such as + 1.22 are also supported. - The minor version's latest GA patch + is automatically chosen in that case. More details can be found + in the documentation. + type: string + osDiskSizeGb: + description: The Agent Operating System disk size in GB. Changing + this forces a new resource to be created. + type: number + osDiskType: + description: The type of disk which should be used for the Operating + System. Possible values are Ephemeral and Managed. Defaults + to Managed. Changing this forces a new resource to be created. + type: string + osSku: + description: Specifies the OS SKU used by the agent pool. Possible + values are AzureLinux, Ubuntu, Windows2019 and Windows2022. + If not specified, the default is Ubuntu if OSType=Linux or Windows2019 + if OSType=Windows. And the default Windows OSSKU will be changed + to Windows2022 after Windows2019 is deprecated. Changing this + forces a new resource to be created. + type: string + osType: + description: The Operating System which should be used for this + Node Pool. Changing this forces a new resource to be created. + Possible values are Linux and Windows. Defaults to Linux. + type: string + podSubnetId: + description: The ID of the Subnet where the pods in the Node Pool + should exist. Changing this forces a new resource to be created. + type: string + podSubnetIdRef: + description: Reference to a Subnet in network to populate podSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + podSubnetIdSelector: + description: Selector for a Subnet in network to populate podSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + priority: + description: The Priority for Virtual Machines within the Virtual + Machine Scale Set that powers this Node Pool. Possible values + are Regular and Spot. Defaults to Regular. Changing this forces + a new resource to be created. + type: string + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group where the + Virtual Machine Scale Set that powers this Node Pool will be + placed. Changing this forces a new resource to be created. + type: string + scaleDownMode: + description: Specifies how the node pool should deal with scaled-down + nodes. Allowed values are Delete and Deallocate. Defaults to + Delete. + type: string + snapshotId: + description: The ID of the Snapshot which should be used to create + this Node Pool. Changing this forces a new resource to be created. + type: string + spotMaxPrice: + description: The maximum price you're willing to pay in USD per + Virtual Machine. Valid values are -1 (the current on-demand + price for a Virtual Machine) or a positive value with up to + five decimal places. Changing this forces a new resource to + be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + ultraSsdEnabled: + description: Used to specify whether the UltraSSD is enabled in + the Node Pool. Defaults to false. See the documentation for + more information. Changing this forces a new resource to be + created. + type: boolean + upgradeSettings: + description: A upgrade_settings block as documented below. + properties: + maxSurge: + description: The maximum number or percentage of nodes which + will be added to the Node Pool size during an upgrade. + type: string + type: object + vmSize: + description: The SKU which should be used for the Virtual Machines + used in this Node Pool. Changing this forces a new resource + to be created. + type: string + vnetSubnetId: + description: The ID of the Subnet where this Node Pool should + exist. Changing this forces a new resource to be created. + type: string + vnetSubnetIdRef: + description: Reference to a Subnet in network to populate vnetSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vnetSubnetIdSelector: + description: Selector for a Subnet in network to populate vnetSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + windowsProfile: + description: A windows_profile block as documented below. Changing + this forces a new resource to be created. + properties: + outboundNatEnabled: + description: Should the Windows nodes in this Node Pool have + outbound NAT enabled? Defaults to true. Changing this forces + a new resource to be created. + type: boolean + type: object + workloadRuntime: + description: Used to specify the workload runtime. Allowed values + are OCIContainer, WasmWasi and KataMshvVmIsolation. + type: string + zones: + description: Specifies a list of Availability Zones in which this + Kubernetes Cluster Node Pool should be located. Changing this + forces a new Kubernetes Cluster Node Pool to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.vmSize is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vmSize) + || (has(self.initProvider) && has(self.initProvider.vmSize))' + status: + description: KubernetesClusterNodePoolStatus defines the observed state + of KubernetesClusterNodePool. + properties: + atProvider: + properties: + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation Group + where this Node Pool should exist. Changing this forces a new + resource to be created. + type: string + customCaTrustEnabled: + description: Specifies whether to trust a Custom CA. + type: boolean + enableAutoScaling: + description: Whether to enable auto-scaler. + type: boolean + enableHostEncryption: + description: Should the nodes in this Node Pool have host encryption + enabled? Changing this forces a new resource to be created. + type: boolean + enableNodePublicIp: + description: Should each node have a Public IP Address? Changing + this forces a new resource to be created. + type: boolean + evictionPolicy: + description: The Eviction Policy which should be used for Virtual + Machines within the Virtual Machine Scale Set powering this + Node Pool. Possible values are Deallocate and Delete. Changing + this forces a new resource to be created. + type: string + fipsEnabled: + description: Should the nodes in this Node Pool have Federal Information + Processing Standard enabled? Changing this forces a new resource + to be created. + type: boolean + gpuInstance: + description: Specifies the GPU MIG instance profile for supported + GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, MIG4g + and MIG7g. Changing this forces a new resource to be created. + type: string + hostGroupId: + description: The fully qualified resource ID of the Dedicated + Host Group to provision virtual machines from. Changing this + forces a new resource to be created. + type: string + id: + description: The ID of the Kubernetes Cluster Node Pool. + type: string + kubeletConfig: + description: A kubelet_config block as defined below. Changing + this forces a new resource to be created. + properties: + allowedUnsafeSysctls: + description: Specifies the allow list of unsafe sysctls command + or patterns (ending in *). Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + containerLogMaxLine: + description: Specifies the maximum number of container log + files that can be present for a container. must be at least + 2. Changing this forces a new resource to be created. + type: number + containerLogMaxSizeMb: + description: Specifies the maximum size (e.g. 10MB) of container + log file before it is rotated. Changing this forces a new + resource to be created. + type: number + cpuCfsQuotaEnabled: + description: Is CPU CFS quota enforcement for containers enabled? + Changing this forces a new resource to be created. + type: boolean + cpuCfsQuotaPeriod: + description: Specifies the CPU CFS quota period value. Changing + this forces a new resource to be created. + type: string + cpuManagerPolicy: + description: Specifies the CPU Manager policy to use. Possible + values are none and static, Changing this forces a new resource + to be created. + type: string + imageGcHighThreshold: + description: Specifies the percent of disk usage above which + image garbage collection is always run. Must be between + 0 and 100. Changing this forces a new resource to be created. + type: number + imageGcLowThreshold: + description: Specifies the percent of disk usage lower than + which image garbage collection is never run. Must be between + 0 and 100. Changing this forces a new resource to be created. + type: number + podMaxPid: + description: Specifies the maximum number of processes per + pod. Changing this forces a new resource to be created. + type: number + topologyManagerPolicy: + description: Specifies the Topology Manager policy to use. + Possible values are none, best-effort, restricted or single-numa-node. + Changing this forces a new resource to be created. + type: string + type: object + kubeletDiskType: + description: The type of disk used by kubelet. Possible values + are OS and Temporary. + type: string + kubernetesClusterId: + description: The ID of the Kubernetes Cluster where this Node + Pool should exist. Changing this forces a new resource to be + created. + type: string + linuxOsConfig: + description: A linux_os_config block as defined below. Changing + this forces a new resource to be created. + properties: + swapFileSizeMb: + description: Specifies the size of swap file on each node + in MB. Changing this forces a new resource to be created. + type: number + sysctlConfig: + description: A sysctl_config block as defined below. Changing + this forces a new resource to be created. + properties: + fsAioMaxNr: + description: The sysctl setting fs.aio-max-nr. Must be + between 65536 and 6553500. Changing this forces a new + resource to be created. + type: number + fsFileMax: + description: The sysctl setting fs.file-max. Must be between + 8192 and 12000500. Changing this forces a new resource + to be created. + type: number + fsInotifyMaxUserWatches: + description: The sysctl setting fs.inotify.max_user_watches. + Must be between 781250 and 2097152. Changing this forces + a new resource to be created. + type: number + fsNrOpen: + description: The sysctl setting fs.nr_open. Must be between + 8192 and 20000500. Changing this forces a new resource + to be created. + type: number + kernelThreadsMax: + description: The sysctl setting kernel.threads-max. Must + be between 20 and 513785. Changing this forces a new + resource to be created. + type: number + netCoreNetdevMaxBacklog: + description: The sysctl setting net.core.netdev_max_backlog. + Must be between 1000 and 3240000. Changing this forces + a new resource to be created. + type: number + netCoreOptmemMax: + description: The sysctl setting net.core.optmem_max. Must + be between 20480 and 4194304. Changing this forces a + new resource to be created. + type: number + netCoreRmemDefault: + description: The sysctl setting net.core.rmem_default. + Must be between 212992 and 134217728. Changing this + forces a new resource to be created. + type: number + netCoreRmemMax: + description: The sysctl setting net.core.rmem_max. Must + be between 212992 and 134217728. Changing this forces + a new resource to be created. + type: number + netCoreSomaxconn: + description: The sysctl setting net.core.somaxconn. Must + be between 4096 and 3240000. Changing this forces a + new resource to be created. + type: number + netCoreWmemDefault: + description: The sysctl setting net.core.wmem_default. + Must be between 212992 and 134217728. Changing this + forces a new resource to be created. + type: number + netCoreWmemMax: + description: The sysctl setting net.core.wmem_max. Must + be between 212992 and 134217728. Changing this forces + a new resource to be created. + type: number + netIpv4IpLocalPortRangeMax: + description: The sysctl setting net.ipv4.ip_local_port_range + max value. Must be between 32768 and 65535. Changing + this forces a new resource to be created. + type: number + netIpv4IpLocalPortRangeMin: + description: The sysctl setting net.ipv4.ip_local_port_range + min value. Must be between 1024 and 60999. Changing + this forces a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh1: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh1. + Must be between 128 and 80000. Changing this forces + a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh2: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh2. + Must be between 512 and 90000. Changing this forces + a new resource to be created. + type: number + netIpv4NeighDefaultGcThresh3: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh3. + Must be between 1024 and 100000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpFinTimeout: + description: The sysctl setting net.ipv4.tcp_fin_timeout. + Must be between 5 and 120. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveIntvl: + description: The sysctl setting net.ipv4.tcp_keepalive_intvl. + Must be between 10 and 90. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveProbes: + description: The sysctl setting net.ipv4.tcp_keepalive_probes. + Must be between 1 and 15. Changing this forces a new + resource to be created. + type: number + netIpv4TcpKeepaliveTime: + description: The sysctl setting net.ipv4.tcp_keepalive_time. + Must be between 30 and 432000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpMaxSynBacklog: + description: The sysctl setting net.ipv4.tcp_max_syn_backlog. + Must be between 128 and 3240000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpMaxTwBuckets: + description: The sysctl setting net.ipv4.tcp_max_tw_buckets. + Must be between 8000 and 1440000. Changing this forces + a new resource to be created. + type: number + netIpv4TcpTwReuse: + description: Is sysctl setting net.ipv4.tcp_tw_reuse enabled? + Changing this forces a new resource to be created. + type: boolean + netNetfilterNfConntrackBuckets: + description: The sysctl setting net.netfilter.nf_conntrack_buckets. + Must be between 65536 and 524288. Changing this forces + a new resource to be created. + type: number + netNetfilterNfConntrackMax: + description: The sysctl setting net.netfilter.nf_conntrack_max. + Must be between 131072 and 2097152. Changing this forces + a new resource to be created. + type: number + vmMaxMapCount: + description: The sysctl setting vm.max_map_count. Must + be between 65530 and 262144. Changing this forces a + new resource to be created. + type: number + vmSwappiness: + description: The sysctl setting vm.swappiness. Must be + between 0 and 100. Changing this forces a new resource + to be created. + type: number + vmVfsCachePressure: + description: The sysctl setting vm.vfs_cache_pressure. + Must be between 0 and 100. Changing this forces a new + resource to be created. + type: number + type: object + transparentHugePageDefrag: + description: specifies the defrag configuration for Transparent + Huge Page. Possible values are always, defer, defer+madvise, + madvise and never. Changing this forces a new resource to + be created. + type: string + transparentHugePageEnabled: + description: Specifies the Transparent Huge Page enabled configuration. + Possible values are always, madvise and never. Changing + this forces a new resource to be created. + type: string + type: object + maxCount: + description: The maximum number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 and must + be greater than or equal to min_count. + type: number + maxPods: + description: The maximum number of pods that can run on each agent. + Changing this forces a new resource to be created. + type: number + messageOfTheDay: + description: A base64-encoded string which will be written to + /etc/motd after decoding. This allows customization of the message + of the day for Linux nodes. It cannot be specified for Windows + nodes and must be a static string (i.e. will be printed raw + and not executed as a script). Changing this forces a new resource + to be created. + type: string + minCount: + description: The minimum number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 and must + be less than or equal to max_count. + type: number + mode: + description: Should this Node Pool be used for System or User + resources? Possible values are System and User. Defaults to + User. + type: string + nodeCount: + description: The initial number of nodes which should exist within + this Node Pool. Valid values are between 0 and 1000 (inclusive) + for user pools and between 1 and 1000 (inclusive) for system + pools and must be a value in the range min_count - max_count. + type: number + nodeLabels: + additionalProperties: + type: string + description: A map of Kubernetes labels which should be applied + to nodes in this Node Pool. + type: object + x-kubernetes-map-type: granular + nodeNetworkProfile: + description: A node_network_profile block as documented below. + properties: + allowedHostPorts: + description: One or more allowed_host_ports blocks as defined + below. + items: + properties: + portEnd: + description: Specifies the end of the port range. + type: number + portStart: + description: Specifies the start of the port range. + type: number + protocol: + description: Specifies the protocol of the port range. + Possible values are TCP and UDP. + type: string + type: object + type: array + applicationSecurityGroupIds: + description: A list of Application Security Group IDs which + should be associated with this Node Pool. + items: + type: string + type: array + nodePublicIpTags: + additionalProperties: + type: string + description: Specifies a mapping of tags to the instance-level + public IPs. Changing this forces a new resource to be created. + type: object + x-kubernetes-map-type: granular + type: object + nodePublicIpPrefixId: + description: Resource ID for the Public IP Addresses Prefix for + the nodes in this Node Pool. enable_node_public_ip should be + true. Changing this forces a new resource to be created. + type: string + nodeTaints: + description: A list of Kubernetes taints which should be applied + to nodes in the agent pool (e.g key=value:NoSchedule). + items: + type: string + type: array + orchestratorVersion: + description: Version of Kubernetes used for the Agents. If not + specified, the latest recommended version will be used at provisioning + time (but won't auto-upgrade). AKS does not require an exact + patch version to be specified, minor version aliases such as + 1.22 are also supported. - The minor version's latest GA patch + is automatically chosen in that case. More details can be found + in the documentation. + type: string + osDiskSizeGb: + description: The Agent Operating System disk size in GB. Changing + this forces a new resource to be created. + type: number + osDiskType: + description: The type of disk which should be used for the Operating + System. Possible values are Ephemeral and Managed. Defaults + to Managed. Changing this forces a new resource to be created. + type: string + osSku: + description: Specifies the OS SKU used by the agent pool. Possible + values are AzureLinux, Ubuntu, Windows2019 and Windows2022. + If not specified, the default is Ubuntu if OSType=Linux or Windows2019 + if OSType=Windows. And the default Windows OSSKU will be changed + to Windows2022 after Windows2019 is deprecated. Changing this + forces a new resource to be created. + type: string + osType: + description: The Operating System which should be used for this + Node Pool. Changing this forces a new resource to be created. + Possible values are Linux and Windows. Defaults to Linux. + type: string + podSubnetId: + description: The ID of the Subnet where the pods in the Node Pool + should exist. Changing this forces a new resource to be created. + type: string + priority: + description: The Priority for Virtual Machines within the Virtual + Machine Scale Set that powers this Node Pool. Possible values + are Regular and Spot. Defaults to Regular. Changing this forces + a new resource to be created. + type: string + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group where the + Virtual Machine Scale Set that powers this Node Pool will be + placed. Changing this forces a new resource to be created. + type: string + scaleDownMode: + description: Specifies how the node pool should deal with scaled-down + nodes. Allowed values are Delete and Deallocate. Defaults to + Delete. + type: string + snapshotId: + description: The ID of the Snapshot which should be used to create + this Node Pool. Changing this forces a new resource to be created. + type: string + spotMaxPrice: + description: The maximum price you're willing to pay in USD per + Virtual Machine. Valid values are -1 (the current on-demand + price for a Virtual Machine) or a positive value with up to + five decimal places. Changing this forces a new resource to + be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + ultraSsdEnabled: + description: Used to specify whether the UltraSSD is enabled in + the Node Pool. Defaults to false. See the documentation for + more information. Changing this forces a new resource to be + created. + type: boolean + upgradeSettings: + description: A upgrade_settings block as documented below. + properties: + maxSurge: + description: The maximum number or percentage of nodes which + will be added to the Node Pool size during an upgrade. + type: string + type: object + vmSize: + description: The SKU which should be used for the Virtual Machines + used in this Node Pool. Changing this forces a new resource + to be created. + type: string + vnetSubnetId: + description: The ID of the Subnet where this Node Pool should + exist. Changing this forces a new resource to be created. + type: string + windowsProfile: + description: A windows_profile block as documented below. Changing + this forces a new resource to be created. + properties: + outboundNatEnabled: + description: Should the Windows nodes in this Node Pool have + outbound NAT enabled? Defaults to true. Changing this forces + a new resource to be created. + type: boolean + type: object + workloadRuntime: + description: Used to specify the workload runtime. Allowed values + are OCIContainer, WasmWasi and KataMshvVmIsolation. + type: string + zones: + description: Specifies a list of Availability Zones in which this + Kubernetes Cluster Node Pool should be located. Changing this + forces a new Kubernetes Cluster Node Pool to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/containerservice.azure.upbound.io_kubernetesclusters.yaml b/package/crds/containerservice.azure.upbound.io_kubernetesclusters.yaml index 09ed1bae2..565ae63e0 100644 --- a/package/crds/containerservice.azure.upbound.io_kubernetesclusters.yaml +++ b/package/crds/containerservice.azure.upbound.io_kubernetesclusters.yaml @@ -5650,3 +5650,5371 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: KubernetesCluster is the Schema for the KubernetesClusters API. + Manages a managed Kubernetes Cluster (also known as AKS / Azure Kubernetes + Service) + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KubernetesClusterSpec defines the desired state of KubernetesCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aciConnectorLinux: + description: A aci_connector_linux block as defined below. For + more details, please visit Create and configure an AKS cluster + to use virtual nodes. + properties: + subnetName: + description: The subnet name for the virtual nodes to run. + type: string + subnetNameRef: + description: Reference to a Subnet in network to populate + subnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetNameSelector: + description: Selector for a Subnet in network to populate + subnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + apiServerAccessProfile: + description: An api_server_access_profile block as defined below. + properties: + authorizedIpRanges: + description: Set of authorized IP ranges to allow access to + API server, e.g. ["198.51.100.0/24"]. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet where the API server endpoint + is delegated to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vnetIntegrationEnabled: + description: Should API Server VNet Integration be enabled? + For more details please visit Use API Server VNet Integration. + type: boolean + type: object + apiServerAuthorizedIpRanges: + description: Deprecated in favor of `spec.forProvider.apiServerAccessProfile[0].authorizedIpRanges` + items: + type: string + type: array + x-kubernetes-list-type: set + autoScalerProfile: + description: A auto_scaler_profile block as defined below. + properties: + balanceSimilarNodeGroups: + description: Detect similar node groups and balance the number + of nodes between them. Defaults to false. + type: boolean + emptyBulkDeleteMax: + description: Maximum number of empty nodes that can be deleted + at the same time. Defaults to 10. + type: string + expander: + description: Expander to use. Possible values are least-waste, + priority, most-pods and random. Defaults to random. + type: string + maxGracefulTerminationSec: + description: Maximum number of seconds the cluster autoscaler + waits for pod termination when trying to scale down a node. + Defaults to 600. + type: string + maxNodeProvisioningTime: + description: Maximum time the autoscaler waits for a node + to be provisioned. Defaults to 15m. + type: string + maxUnreadyNodes: + description: Maximum Number of allowed unready nodes. Defaults + to 3. + type: number + maxUnreadyPercentage: + description: Maximum percentage of unready nodes the cluster + autoscaler will stop if the percentage is exceeded. Defaults + to 45. + type: number + newPodScaleUpDelay: + description: For scenarios like burst/batch scale where you + don't want CA to act before the kubernetes scheduler could + schedule all the pods, you can tell CA to ignore unscheduled + pods before they're a certain age. Defaults to 10s. + type: string + scaleDownDelayAfterAdd: + description: How long after the scale up of AKS nodes the + scale down evaluation resumes. Defaults to 10m. + type: string + scaleDownDelayAfterDelete: + description: How long after node deletion that scale down + evaluation resumes. Defaults to the value used for scan_interval. + type: string + scaleDownDelayAfterFailure: + description: How long after scale down failure that scale + down evaluation resumes. Defaults to 3m. + type: string + scaleDownUnneeded: + description: How long a node should be unneeded before it + is eligible for scale down. Defaults to 10m. + type: string + scaleDownUnready: + description: How long an unready node should be unneeded before + it is eligible for scale down. Defaults to 20m. + type: string + scaleDownUtilizationThreshold: + description: Node utilization level, defined as sum of requested + resources divided by capacity, below which a node can be + considered for scale down. Defaults to 0.5. + type: string + scanInterval: + description: How often the AKS Cluster should be re-evaluated + for scale up/down. Defaults to 10s. + type: string + skipNodesWithLocalStorage: + description: If true cluster autoscaler will never delete + nodes with pods with local storage, for example, EmptyDir + or HostPath. Defaults to true. + type: boolean + skipNodesWithSystemPods: + description: If true cluster autoscaler will never delete + nodes with pods from kube-system (except for DaemonSet or + mirror pods). Defaults to true. + type: boolean + type: object + automaticChannelUpgrade: + description: The upgrade channel for this Kubernetes Cluster. + Possible values are patch, rapid, node-image and stable. Omitting + this field sets this value to none. + type: string + azureActiveDirectoryRoleBasedAccessControl: + description: A azure_active_directory_role_based_access_control + block as defined below. + properties: + adminGroupObjectIds: + description: A list of Object IDs of Azure Active Directory + Groups which should have Admin Role on the Cluster. + items: + type: string + type: array + azureRbacEnabled: + description: Is Role Based Access Control based on Azure AD + enabled? + type: boolean + clientAppId: + description: The Client ID of an Azure Active Directory Application. + type: string + managed: + description: Is the Azure Active Directory integration Managed, + meaning that Azure will create/manage the Service Principal + used for integration. + type: boolean + serverAppId: + description: The Server ID of an Azure Active Directory Application. + type: string + serverAppSecretSecretRef: + description: The Server Secret of an Azure Active Directory + Application. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + tenantId: + description: The Tenant ID used for Azure Active Directory + Application. If this isn't specified the Tenant ID of the + current Subscription is used. + type: string + type: object + azurePolicyEnabled: + description: Should the Azure Policy Add-On be enabled? For more + details please visit Understand Azure Policy for Azure Kubernetes + Service + type: boolean + confidentialComputing: + description: A confidential_computing block as defined below. + For more details please the documentation + properties: + sgxQuoteHelperEnabled: + description: Should the SGX quote helper be enabled? + type: boolean + type: object + customCaTrustCertificatesBase64: + description: A list of up to 10 base64 encoded CAs that will be + added to the trust store on nodes with the custom_ca_trust_enabled + feature enabled. + items: + type: string + type: array + defaultNodePool: + description: A default_node_pool block as defined below. + properties: + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation + Group within which this AKS Cluster should be created. Changing + this forces a new resource to be created. + type: string + customCaTrustEnabled: + description: Specifies whether to trust a Custom CA. + type: boolean + enableAutoScaling: + description: Should the Kubernetes Auto Scaler be enabled + for this Node Pool? + type: boolean + enableHostEncryption: + description: Should the nodes in the Default Node Pool have + host encryption enabled? temporary_name_for_rotation must + be specified when changing this property. + type: boolean + enableNodePublicIp: + description: Should nodes in this Node Pool have a Public + IP Address? temporary_name_for_rotation must be specified + when changing this property. + type: boolean + fipsEnabled: + description: Should the nodes in this Node Pool have Federal + Information Processing Standard enabled? temporary_name_for_rotation + must be specified when changing this block. Changing this + forces a new resource to be created. + type: boolean + gpuInstance: + description: Specifies the GPU MIG instance profile for supported + GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, + MIG4g and MIG7g. Changing this forces a new resource to + be created. + type: string + hostGroupId: + description: Specifies the ID of the Host Group within which + this AKS Cluster should be created. Changing this forces + a new resource to be created. + type: string + kubeletConfig: + description: A kubelet_config block as defined below. temporary_name_for_rotation + must be specified when changing this block. + properties: + allowedUnsafeSysctls: + description: Specifies the allow list of unsafe sysctls + command or patterns (ending in *). + items: + type: string + type: array + x-kubernetes-list-type: set + containerLogMaxLine: + description: Specifies the maximum number of container + log files that can be present for a container. must + be at least 2. + type: number + containerLogMaxSizeMb: + description: Specifies the maximum size (e.g. 10MB) of + container log file before it is rotated. + type: number + cpuCfsQuotaEnabled: + description: Is CPU CFS quota enforcement for containers + enabled? + type: boolean + cpuCfsQuotaPeriod: + description: Specifies the CPU CFS quota period value. + type: string + cpuManagerPolicy: + description: Specifies the CPU Manager policy to use. + Possible values are none and static,. + type: string + imageGcHighThreshold: + description: Specifies the percent of disk usage above + which image garbage collection is always run. Must be + between 0 and 100. + type: number + imageGcLowThreshold: + description: Specifies the percent of disk usage lower + than which image garbage collection is never run. Must + be between 0 and 100. + type: number + podMaxPid: + description: Specifies the maximum number of processes + per pod. + type: number + topologyManagerPolicy: + description: Specifies the Topology Manager policy to + use. Possible values are none, best-effort, restricted + or single-numa-node. + type: string + type: object + kubeletDiskType: + description: The type of disk used by kubelet. Possible values + are OS and Temporary. + type: string + linuxOsConfig: + description: A linux_os_config block as defined below. temporary_name_for_rotation + must be specified when changing this block. + properties: + swapFileSizeMb: + description: Specifies the size of the swap file on each + node in MB. + type: number + sysctlConfig: + description: A sysctl_config block as defined below. + properties: + fsAioMaxNr: + description: The sysctl setting fs.aio-max-nr. Must + be between 65536 and 6553500. + type: number + fsFileMax: + description: The sysctl setting fs.file-max. Must + be between 8192 and 12000500. + type: number + fsInotifyMaxUserWatches: + description: The sysctl setting fs.inotify.max_user_watches. + Must be between 781250 and 2097152. + type: number + fsNrOpen: + description: The sysctl setting fs.nr_open. Must be + between 8192 and 20000500. + type: number + kernelThreadsMax: + description: The sysctl setting kernel.threads-max. + Must be between 20 and 513785. + type: number + netCoreNetdevMaxBacklog: + description: The sysctl setting net.core.netdev_max_backlog. + Must be between 1000 and 3240000. + type: number + netCoreOptmemMax: + description: The sysctl setting net.core.optmem_max. + Must be between 20480 and 4194304. + type: number + netCoreRmemDefault: + description: The sysctl setting net.core.rmem_default. + Must be between 212992 and 134217728. + type: number + netCoreRmemMax: + description: The sysctl setting net.core.rmem_max. + Must be between 212992 and 134217728. + type: number + netCoreSomaxconn: + description: The sysctl setting net.core.somaxconn. + Must be between 4096 and 3240000. + type: number + netCoreWmemDefault: + description: The sysctl setting net.core.wmem_default. + Must be between 212992 and 134217728. + type: number + netCoreWmemMax: + description: The sysctl setting net.core.wmem_max. + Must be between 212992 and 134217728. + type: number + netIpv4IpLocalPortRangeMax: + description: The sysctl setting net.ipv4.ip_local_port_range + max value. Must be between 32768 and 65535. + type: number + netIpv4IpLocalPortRangeMin: + description: The sysctl setting net.ipv4.ip_local_port_range + min value. Must be between 1024 and 60999. + type: number + netIpv4NeighDefaultGcThresh1: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh1. + Must be between 128 and 80000. + type: number + netIpv4NeighDefaultGcThresh2: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh2. + Must be between 512 and 90000. + type: number + netIpv4NeighDefaultGcThresh3: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh3. + Must be between 1024 and 100000. + type: number + netIpv4TcpFinTimeout: + description: The sysctl setting net.ipv4.tcp_fin_timeout. + Must be between 5 and 120. + type: number + netIpv4TcpKeepaliveIntvl: + description: The sysctl setting net.ipv4.tcp_keepalive_intvl. + Must be between 10 and 90. + type: number + netIpv4TcpKeepaliveProbes: + description: The sysctl setting net.ipv4.tcp_keepalive_probes. + Must be between 1 and 15. + type: number + netIpv4TcpKeepaliveTime: + description: The sysctl setting net.ipv4.tcp_keepalive_time. + Must be between 30 and 432000. + type: number + netIpv4TcpMaxSynBacklog: + description: The sysctl setting net.ipv4.tcp_max_syn_backlog. + Must be between 128 and 3240000. + type: number + netIpv4TcpMaxTwBuckets: + description: The sysctl setting net.ipv4.tcp_max_tw_buckets. + Must be between 8000 and 1440000. + type: number + netIpv4TcpTwReuse: + description: The sysctl setting net.ipv4.tcp_tw_reuse. + type: boolean + netNetfilterNfConntrackBuckets: + description: The sysctl setting net.netfilter.nf_conntrack_buckets. + Must be between 65536 and 524288. + type: number + netNetfilterNfConntrackMax: + description: The sysctl setting net.netfilter.nf_conntrack_max. + Must be between 131072 and 2097152. + type: number + vmMaxMapCount: + description: The sysctl setting vm.max_map_count. + Must be between 65530 and 262144. + type: number + vmSwappiness: + description: The sysctl setting vm.swappiness. Must + be between 0 and 100. + type: number + vmVfsCachePressure: + description: The sysctl setting vm.vfs_cache_pressure. + Must be between 0 and 100. + type: number + type: object + transparentHugePageDefrag: + description: specifies the defrag configuration for Transparent + Huge Page. Possible values are always, defer, defer+madvise, + madvise and never. + type: string + transparentHugePageEnabled: + description: Specifies the Transparent Huge Page enabled + configuration. Possible values are always, madvise and + never. + type: string + type: object + maxCount: + description: The maximum number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000. + type: number + maxPods: + description: The maximum number of pods that can run on each + agent. temporary_name_for_rotation must be specified when + changing this property. + type: number + messageOfTheDay: + description: A base64-encoded string which will be written + to /etc/motd after decoding. This allows customization of + the message of the day for Linux nodes. It cannot be specified + for Windows nodes and must be a static string (i.e. will + be printed raw and not executed as a script). Changing this + forces a new resource to be created. + type: string + minCount: + description: The minimum number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000. + type: number + name: + description: The name which should be used for the default + Kubernetes Node Pool. + type: string + nodeCount: + description: The initial number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000 and between min_count and max_count. + type: number + nodeLabels: + additionalProperties: + type: string + description: A map of Kubernetes labels which should be applied + to nodes in the Default Node Pool. + type: object + x-kubernetes-map-type: granular + nodeNetworkProfile: + description: A node_network_profile block as documented below. + properties: + allowedHostPorts: + description: One or more allowed_host_ports blocks as + defined below. + items: + properties: + portEnd: + description: Specifies the end of the port range. + type: number + portStart: + description: Specifies the start of the port range. + type: number + protocol: + description: Specifies the protocol of the port + range. Possible values are TCP and UDP. + type: string + type: object + type: array + applicationSecurityGroupIds: + description: A list of Application Security Group IDs + which should be associated with this Node Pool. + items: + type: string + type: array + nodePublicIpTags: + additionalProperties: + type: string + description: Specifies a mapping of tags to the instance-level + public IPs. Changing this forces a new resource to be + created. + type: object + x-kubernetes-map-type: granular + type: object + nodePublicIpPrefixId: + description: Resource ID for the Public IP Addresses Prefix + for the nodes in this Node Pool. enable_node_public_ip should + be true. Changing this forces a new resource to be created. + type: string + nodeTaints: + items: + type: string + type: array + onlyCriticalAddonsEnabled: + description: Enabling this option will taint default node + pool with CriticalAddonsOnly=true:NoSchedule taint. temporary_name_for_rotation + must be specified when changing this property. + type: boolean + orchestratorVersion: + description: Version of Kubernetes used for the Agents. If + not specified, the default node pool will be created with + the version specified by kubernetes_version. If both are + unspecified, the latest recommended version will be used + at provisioning time (but won't auto-upgrade). AKS does + not require an exact patch version to be specified, minor + version aliases such as 1.22 are also supported. - The minor + version's latest GA patch is automatically chosen in that + case. More details can be found in the documentation. + type: string + osDiskSizeGb: + description: The size of the OS Disk which should be used + for each agent in the Node Pool. temporary_name_for_rotation + must be specified when attempting a change. + type: number + osDiskType: + description: The type of disk which should be used for the + Operating System. Possible values are Ephemeral and Managed. + Defaults to Managed. temporary_name_for_rotation must be + specified when attempting a change. + type: string + osSku: + description: Specifies the OS SKU used by the agent pool. + Possible values are AzureLinux, Ubuntu, Windows2019 and + Windows2022. If not specified, the default is Ubuntu if + OSType=Linux or Windows2019 if OSType=Windows. And the default + Windows OSSKU will be changed to Windows2022 after Windows2019 + is deprecated. temporary_name_for_rotation must be specified + when attempting a change. + type: string + podSubnetId: + description: The ID of the Subnet where the pods in the default + Node Pool should exist. + type: string + podSubnetIdRef: + description: Reference to a Subnet in network to populate + podSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + podSubnetIdSelector: + description: Selector for a Subnet in network to populate + podSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group. Changing + this forces a new resource to be created. + type: string + scaleDownMode: + description: Specifies the autoscaling behaviour of the Kubernetes + Cluster. Allowed values are Delete and Deallocate. Defaults + to Delete. + type: string + snapshotId: + description: The ID of the Snapshot which should be used to + create this default Node Pool. temporary_name_for_rotation + must be specified when changing this property. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Node Pool. + type: object + x-kubernetes-map-type: granular + temporaryNameForRotation: + description: Specifies the name of the temporary node pool + used to cycle the default node pool for VM resizing. + type: string + type: + description: The type of Node Pool which should be created. + Possible values are AvailabilitySet and VirtualMachineScaleSets. + Defaults to VirtualMachineScaleSets. Changing this forces + a new resource to be created. + type: string + ultraSsdEnabled: + description: Used to specify whether the UltraSSD is enabled + in the Default Node Pool. Defaults to false. See the documentation + for more information. temporary_name_for_rotation must be + specified when attempting a change. + type: boolean + upgradeSettings: + description: A upgrade_settings block as documented below. + properties: + maxSurge: + description: The maximum number or percentage of nodes + which will be added to the Node Pool size during an + upgrade. + type: string + type: object + vmSize: + description: The size of the Virtual Machine, such as Standard_DS2_v2. + temporary_name_for_rotation must be specified when attempting + a resize. + type: string + vnetSubnetId: + description: The ID of a Subnet where the Kubernetes Node + Pool should exist. + type: string + vnetSubnetIdRef: + description: Reference to a Subnet in network to populate + vnetSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vnetSubnetIdSelector: + description: Selector for a Subnet in network to populate + vnetSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + workloadRuntime: + description: Specifies the workload runtime used by the node + pool. Possible values are OCIContainer and KataMshvVmIsolation. + type: string + zones: + description: Specifies a list of Availability Zones in which + this Kubernetes Cluster should be located. temporary_name_for_rotation + must be specified when changing this property. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should be + used for the Nodes and Volumes. More information can be found + in the documentation. Changing this forces a new resource to + be created. + type: string + dnsPrefix: + description: DNS prefix specified when creating the managed cluster. + Possible values must begin and end with a letter or number, + contain only letters, numbers, and hyphens and be between 1 + and 54 characters in length. Changing this forces a new resource + to be created. + type: string + dnsPrefixPrivateCluster: + description: Specifies the DNS prefix to use with private clusters. + Changing this forces a new resource to be created. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Managed Kubernetes Cluster should exist. Changing this + forces a new resource to be created. + type: string + enablePodSecurityPolicy: + type: boolean + httpApplicationRoutingEnabled: + description: Should HTTP Application Routing be enabled? + type: boolean + httpProxyConfig: + description: A http_proxy_config block as defined below. + properties: + httpProxy: + description: The proxy address to be used when communicating + over HTTP. + type: string + httpsProxy: + description: The proxy address to be used when communicating + over HTTPS. + type: string + noProxy: + description: The list of domains that will not use the proxy + for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + trustedCaSecretRef: + description: The base64 encoded alternative CA certificate + content in PEM format. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + identity: + description: An identity block as defined below. One of either + identity or service_principal must be specified. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Kubernetes Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Kubernetes Cluster. Possible + values are SystemAssigned or UserAssigned. + type: string + type: object + imageCleanerEnabled: + description: Specifies whether Image Cleaner is enabled. + type: boolean + imageCleanerIntervalHours: + description: Specifies the interval in hours when images should + be cleaned up. Defaults to 48. + type: number + ingressApplicationGateway: + description: An ingress_application_gateway block as defined below. + properties: + gatewayId: + description: The ID of the Application Gateway to integrate + with the ingress controller of this Kubernetes Cluster. + See this page for further details. + type: string + gatewayName: + description: The name of the Application Gateway to be used + or created in the Nodepool Resource Group, which in turn + will be integrated with the ingress controller of this Kubernetes + Cluster. See this page for further details. + type: string + subnetCidr: + description: The subnet CIDR to be used to create an Application + Gateway, which in turn will be integrated with the ingress + controller of this Kubernetes Cluster. See this page for + further details. + type: string + subnetId: + description: The ID of the subnet on which to create an Application + Gateway, which in turn will be integrated with the ingress + controller of this Kubernetes Cluster. See this page for + further details. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + keyManagementService: + description: A key_management_service block as defined below. + For more details, please visit Key Management Service (KMS) + etcd encryption to an AKS cluster. + properties: + keyVaultKeyId: + description: Identifier of Azure Key Vault key. See key identifier + format for more details. + type: string + keyVaultNetworkAccess: + description: Network access of the key vault Network access + of key vault. The possible values are Public and Private. + Public means the key vault allows public access from all + networks. Private means the key vault disables public access + and enables private link. Defaults to Public. + type: string + type: object + keyVaultSecretsProvider: + description: A key_vault_secrets_provider block as defined below. + properties: + secretRotationEnabled: + description: Should the secret store CSI driver on the AKS + cluster be enabled? + type: boolean + secretRotationInterval: + description: The interval to poll for secret rotation. This + attribute is only set when secret_rotation is true. Defaults + to 2m. + type: string + type: object + kubeletIdentity: + description: A kubelet_identity block as defined below. + properties: + clientId: + description: The Client ID of the user-defined Managed Identity + to be assigned to the Kubelets. If not specified a Managed + Identity is created automatically. Changing this forces + a new resource to be created. + type: string + objectId: + description: The Object ID of the user-defined Managed Identity + assigned to the Kubelets.If not specified a Managed Identity + is created automatically. Changing this forces a new resource + to be created. + type: string + userAssignedIdentityId: + description: The ID of the User Assigned Identity assigned + to the Kubelets. If not specified a Managed Identity is + created automatically. Changing this forces a new resource + to be created. + type: string + type: object + kubernetesVersion: + description: Version of Kubernetes specified when creating the + AKS managed cluster. If not specified, the latest recommended + version will be used at provisioning time (but won't auto-upgrade). + AKS does not require an exact patch version to be specified, + minor version aliases such as 1.22 are also supported. - The + minor version's latest GA patch is automatically chosen in that + case. More details can be found in the documentation. + type: string + linuxProfile: + description: A linux_profile block as defined below. + properties: + adminUsername: + description: The Admin Username for the Cluster. Changing + this forces a new resource to be created. + type: string + sshKey: + description: An ssh_key block as defined below. Only one is + currently allowed. Changing this will update the key on + all node pools. More information can be found in the documentation. + properties: + keyData: + description: The Public SSH Key used to access the cluster. + type: string + type: object + type: object + localAccountDisabled: + description: If true local accounts will be disabled. See the + documentation for more information. + type: boolean + location: + description: The location where the Managed Kubernetes Cluster + should be created. Changing this forces a new resource to be + created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + allowed: + description: One or more allowed blocks as defined below. + items: + properties: + day: + description: A day in a week. Possible values are Sunday, + Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + type: string + hours: + description: An array of hour slots in a day. For example, + specifying 1 will allow maintenance from 1:00am to + 2:00am. Specifying 1, 2 will allow maintenance from + 1:00am to 3:00m. Possible values are between 0 and + 23. + items: + type: number + type: array + x-kubernetes-list-type: set + type: object + type: array + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + type: object + maintenanceWindowAutoUpgrade: + description: A maintenance_window_auto_upgrade block as defined + below. + properties: + dayOfMonth: + description: The day of the month for the maintenance run. + Required in combination with RelativeMonthly frequency. + Value between 0 and 31 (inclusive). + type: number + dayOfWeek: + description: The day of the week for the maintenance run. + Required in combination with weekly frequency. Possible + values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday + and Wednesday. + type: string + duration: + description: The duration of the window for maintenance to + run in hours. + type: number + frequency: + description: Frequency of maintenance. Possible options are + Weekly, AbsoluteMonthly and RelativeMonthly. + type: string + interval: + description: The interval for maintenance runs. Depending + on the frequency this interval is week or month based. + type: number + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + startDate: + description: The date on which the maintenance window begins + to take effect. + type: string + startTime: + description: The time for maintenance to begin, based on the + timezone determined by utc_offset. Format is HH:mm. + type: string + utcOffset: + description: Used to determine the timezone for cluster maintenance. + type: string + weekIndex: + description: |- + Specifies on which instance of the allowed days specified in day_of_week the maintenance occurs. Options are First, Second, Third, Fourth, and Last. + Required in combination with relative monthly frequency. + type: string + type: object + maintenanceWindowNodeOs: + description: A maintenance_window_node_os block as defined below. + properties: + dayOfMonth: + description: The day of the month for the maintenance run. + Required in combination with RelativeMonthly frequency. + Value between 0 and 31 (inclusive). + type: number + dayOfWeek: + description: The day of the week for the maintenance run. + Required in combination with weekly frequency. Possible + values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday + and Wednesday. + type: string + duration: + description: The duration of the window for maintenance to + run in hours. + type: number + frequency: + description: Frequency of maintenance. Possible options are + Daily, Weekly, AbsoluteMonthly and RelativeMonthly. + type: string + interval: + description: The interval for maintenance runs. Depending + on the frequency this interval is week or month based. + type: number + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + startDate: + description: The date on which the maintenance window begins + to take effect. + type: string + startTime: + description: The time for maintenance to begin, based on the + timezone determined by utc_offset. Format is HH:mm. + type: string + utcOffset: + description: Used to determine the timezone for cluster maintenance. + type: string + weekIndex: + description: The week in the month used for the maintenance + run. Options are First, Second, Third, Fourth, and Last. + type: string + type: object + microsoftDefender: + description: A microsoft_defender block as defined below. + properties: + logAnalyticsWorkspaceId: + description: Specifies the ID of the Log Analytics Workspace + where the audit logs collected by Microsoft Defender should + be sent to. + type: string + type: object + monitorMetrics: + description: Specifies a Prometheus add-on profile for the Kubernetes + Cluster. A monitor_metrics block as defined below. + properties: + annotationsAllowed: + description: Specifies a comma-separated list of Kubernetes + annotation keys that will be used in the resource's labels + metric. + type: string + labelsAllowed: + description: Specifies a Comma-separated list of additional + Kubernetes label keys that will be used in the resource's + labels metric. + type: string + type: object + networkProfile: + description: A network_profile block as defined below. + properties: + dnsServiceIp: + description: IP address within the Kubernetes service address + range that will be used by cluster service discovery (kube-dns). + Changing this forces a new resource to be created. + type: string + dockerBridgeCidr: + description: IP address (in CIDR notation) used as the Docker + bridge IP address on nodes. Changing this forces a new resource + to be created. + type: string + ebpfDataPlane: + description: Specifies the eBPF data plane used for building + the Kubernetes network. Possible value is cilium. Disabling + this forces a new resource to be created. + type: string + ipVersions: + description: Specifies a list of IP versions the Kubernetes + Cluster will use to assign IP addresses to its nodes and + pods. Possible values are IPv4 and/or IPv6. IPv4 must always + be specified. Changing this forces a new resource to be + created. + items: + type: string + type: array + loadBalancerProfile: + description: A load_balancer_profile block as defined below. + This can only be specified when load_balancer_sku is set + to standard. Changing this forces a new resource to be created. + properties: + idleTimeoutInMinutes: + description: Desired outbound flow idle timeout in minutes + for the cluster load balancer. Must be between 4 and + 120 inclusive. Defaults to 4. + type: number + managedOutboundIpCount: + description: Count of desired managed outbound IPs for + the cluster load balancer. Must be between 1 and 100 + inclusive. + type: number + managedOutboundIpv6Count: + description: The desired number of IPv6 outbound IPs created + and managed by Azure for the cluster load balancer. + Must be in the range of 1 to 100 (inclusive). The default + value is 0 for single-stack and 1 for dual-stack. + type: number + outboundIpAddressIds: + description: The ID of the Public IP Addresses which should + be used for outbound communication for the cluster load + balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + outboundIpPrefixIds: + description: The ID of the outbound Public IP Address + Prefixes which should be used for the cluster load balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + outboundPortsAllocated: + description: Number of desired SNAT port for each VM in + the clusters load balancer. Must be between 0 and 64000 + inclusive. Defaults to 0. + type: number + type: object + loadBalancerSku: + description: Specifies the SKU of the Load Balancer used for + this Kubernetes Cluster. Possible values are basic and standard. + Defaults to standard. Changing this forces a new resource + to be created. + type: string + natGatewayProfile: + description: A nat_gateway_profile block as defined below. + This can only be specified when load_balancer_sku is set + to standard and outbound_type is set to managedNATGateway + or userAssignedNATGateway. Changing this forces a new resource + to be created. + properties: + idleTimeoutInMinutes: + description: Desired outbound flow idle timeout in minutes + for the cluster load balancer. Must be between 4 and + 120 inclusive. Defaults to 4. + type: number + managedOutboundIpCount: + description: Count of desired managed outbound IPs for + the cluster load balancer. Must be between 1 and 100 + inclusive. + type: number + type: object + networkMode: + description: Network mode to be used with Azure CNI. Possible + values are bridge and transparent. Changing this forces + a new resource to be created. + type: string + networkPlugin: + description: Network plugin to use for networking. Currently + supported values are azure, kubenet and none. Changing this + forces a new resource to be created. + type: string + networkPluginMode: + description: Specifies the network plugin mode used for building + the Kubernetes network. Possible value is overlay. + type: string + networkPolicy: + description: Sets up network policy to be used with Azure + CNI. Network policy allows us to control the traffic flow + between pods. Currently supported values are calico, azure + and cilium. + type: string + outboundType: + description: The outbound (egress) routing method which should + be used for this Kubernetes Cluster. Possible values are + loadBalancer, userDefinedRouting, managedNATGateway and + userAssignedNATGateway. Defaults to loadBalancer. More information + on supported migration paths for outbound_type can be found + in this documentation. + type: string + podCidr: + description: The CIDR to use for pod IP addresses. This field + can only be set when network_plugin is set to kubenet. Changing + this forces a new resource to be created. + type: string + podCidrs: + description: A list of CIDRs to use for pod IP addresses. + For single-stack networking a single IPv4 CIDR is expected. + For dual-stack networking an IPv4 and IPv6 CIDR are expected. + Changing this forces a new resource to be created. + items: + type: string + type: array + serviceCidr: + description: The Network Range used by the Kubernetes service. + Changing this forces a new resource to be created. + type: string + serviceCidrs: + description: A list of CIDRs to use for Kubernetes services. + For single-stack networking a single IPv4 CIDR is expected. + For dual-stack networking an IPv4 and IPv6 CIDR are expected. + Changing this forces a new resource to be created. + items: + type: string + type: array + type: object + nodeOsChannelUpgrade: + description: The upgrade channel for this Kubernetes Cluster Nodes' + OS Image. Possible values are Unmanaged, SecurityPatch, NodeImage + and None. + type: string + nodeResourceGroup: + description: The auto-generated Resource Group which contains + the resources for this Managed Kubernetes Cluster. + type: string + oidcIssuerEnabled: + description: Enable or Disable the OIDC issuer URL + type: boolean + omsAgent: + description: An oms_agent block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The ID of the Log Analytics Workspace which the + OMS Agent should send data to. + type: string + msiAuthForMonitoringEnabled: + description: Is managed identity authentication for monitoring + enabled? + type: boolean + type: object + openServiceMeshEnabled: + description: Is Open Service Mesh enabled? For more details, please + visit Open Service Mesh for AKS. + type: boolean + privateClusterEnabled: + description: Should this Kubernetes Cluster have its API server + only exposed on internal IP addresses? This provides a Private + IP Address for the Kubernetes API on the Virtual Network where + the Kubernetes Cluster is located. Defaults to false. Changing + this forces a new resource to be created. + type: boolean + privateClusterPublicFqdnEnabled: + description: Specifies whether a Public FQDN for this Private + Cluster should be added. Defaults to false. + type: boolean + privateDnsZoneId: + description: Either the ID of Private DNS Zone which should be + delegated to this Cluster, System to have AKS manage this or + None. In case of None you will need to bring your own DNS server + and set up resolving, otherwise, the cluster will have issues + after provisioning. Changing this forces a new resource to be + created. + type: string + privateDnsZoneIdRef: + description: Reference to a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateDnsZoneIdSelector: + description: Selector for a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + Kubernetes Cluster. Defaults to true. + type: boolean + resourceGroupName: + description: Specifies the Resource Group where the Managed Kubernetes + Cluster should exist. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roleBasedAccessControlEnabled: + description: Whether Role Based Access Control for the Kubernetes + Cluster should be enabled. Defaults to true. Changing this forces + a new resource to be created. + type: boolean + runCommandEnabled: + description: Whether to enable run command for the cluster or + not. Defaults to true. + type: boolean + serviceMeshProfile: + description: A service_mesh_profile block as defined below. + properties: + externalIngressGatewayEnabled: + description: Is Istio External Ingress Gateway enabled? + type: boolean + internalIngressGatewayEnabled: + description: Is Istio Internal Ingress Gateway enabled? + type: boolean + mode: + description: The mode of the service mesh. Possible value + is Istio. + type: string + type: object + servicePrincipal: + description: A service_principal block as documented below. One + of either identity or service_principal must be specified. + properties: + clientId: + description: The Client ID for the Service Principal. + type: string + clientSecretSecretRef: + description: The Client Secret for the Service Principal. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - clientSecretSecretRef + type: object + skuTier: + description: The SKU Tier that should be used for this Kubernetes + Cluster. Possible values are Free, Standard (which includes + the Uptime SLA) and Premium. Defaults to Free. + type: string + storageProfile: + description: A storage_profile block as defined below. + properties: + blobDriverEnabled: + description: Is the Blob CSI driver enabled? Defaults to false. + type: boolean + diskDriverEnabled: + description: Is the Disk CSI driver enabled? Defaults to true. + type: boolean + diskDriverVersion: + description: Disk CSI Driver version to be used. Possible + values are v1 and v2. Defaults to v1. + type: string + fileDriverEnabled: + description: Is the File CSI driver enabled? Defaults to true. + type: boolean + snapshotControllerEnabled: + description: Is the Snapshot Controller enabled? Defaults + to true. + type: boolean + type: object + supportPlan: + description: Specifies the support plan which should be used for + this Kubernetes Cluster. Possible values are KubernetesOfficial + and AKSLongTermSupport. Defaults to KubernetesOfficial. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + webAppRouting: + description: A web_app_routing block as defined below. + properties: + dnsZoneId: + description: Specifies the ID of the DNS Zone in which DNS + entries are created for applications deployed to the cluster + when Web App Routing is enabled. For Bring-Your-Own DNS + zones this property should be set to an empty string "". + type: string + type: object + windowsProfile: + description: A windows_profile block as defined below. + properties: + adminPasswordSecretRef: + description: The Admin Password for Windows VMs. Length must + be between 14 and 123 characters. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + adminUsername: + description: The Admin Username for Windows VMs. Changing + this forces a new resource to be created. + type: string + gmsa: + description: A gmsa block as defined below. + properties: + dnsServer: + description: Specifies the DNS server for Windows gMSA. + Set this to an empty string if you have configured the + DNS server in the VNet which was used to create the + managed cluster. + type: string + rootDomain: + description: Specifies the root domain name for Windows + gMSA. Set this to an empty string if you have configured + the DNS server in the VNet which was used to create + the managed cluster. + type: string + type: object + license: + description: Specifies the type of on-premise license which + should be used for Node Pool Windows Virtual Machine. At + this time the only possible value is Windows_Server. + type: string + type: object + workloadAutoscalerProfile: + description: A workload_autoscaler_profile block defined below. + properties: + kedaEnabled: + description: Specifies whether KEDA Autoscaler can be used + for workloads. + type: boolean + verticalPodAutoscalerEnabled: + description: Specifies whether Vertical Pod Autoscaler should + be enabled. + type: boolean + type: object + workloadIdentityEnabled: + description: Specifies whether Azure AD Workload Identity should + be enabled for the Cluster. Defaults to false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aciConnectorLinux: + description: A aci_connector_linux block as defined below. For + more details, please visit Create and configure an AKS cluster + to use virtual nodes. + properties: + subnetName: + description: The subnet name for the virtual nodes to run. + type: string + subnetNameRef: + description: Reference to a Subnet in network to populate + subnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetNameSelector: + description: Selector for a Subnet in network to populate + subnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + apiServerAccessProfile: + description: An api_server_access_profile block as defined below. + properties: + authorizedIpRanges: + description: Set of authorized IP ranges to allow access to + API server, e.g. ["198.51.100.0/24"]. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet where the API server endpoint + is delegated to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vnetIntegrationEnabled: + description: Should API Server VNet Integration be enabled? + For more details please visit Use API Server VNet Integration. + type: boolean + type: object + apiServerAuthorizedIpRanges: + description: Deprecated in favor of `spec.forProvider.apiServerAccessProfile[0].authorizedIpRanges` + items: + type: string + type: array + x-kubernetes-list-type: set + autoScalerProfile: + description: A auto_scaler_profile block as defined below. + properties: + balanceSimilarNodeGroups: + description: Detect similar node groups and balance the number + of nodes between them. Defaults to false. + type: boolean + emptyBulkDeleteMax: + description: Maximum number of empty nodes that can be deleted + at the same time. Defaults to 10. + type: string + expander: + description: Expander to use. Possible values are least-waste, + priority, most-pods and random. Defaults to random. + type: string + maxGracefulTerminationSec: + description: Maximum number of seconds the cluster autoscaler + waits for pod termination when trying to scale down a node. + Defaults to 600. + type: string + maxNodeProvisioningTime: + description: Maximum time the autoscaler waits for a node + to be provisioned. Defaults to 15m. + type: string + maxUnreadyNodes: + description: Maximum Number of allowed unready nodes. Defaults + to 3. + type: number + maxUnreadyPercentage: + description: Maximum percentage of unready nodes the cluster + autoscaler will stop if the percentage is exceeded. Defaults + to 45. + type: number + newPodScaleUpDelay: + description: For scenarios like burst/batch scale where you + don't want CA to act before the kubernetes scheduler could + schedule all the pods, you can tell CA to ignore unscheduled + pods before they're a certain age. Defaults to 10s. + type: string + scaleDownDelayAfterAdd: + description: How long after the scale up of AKS nodes the + scale down evaluation resumes. Defaults to 10m. + type: string + scaleDownDelayAfterDelete: + description: How long after node deletion that scale down + evaluation resumes. Defaults to the value used for scan_interval. + type: string + scaleDownDelayAfterFailure: + description: How long after scale down failure that scale + down evaluation resumes. Defaults to 3m. + type: string + scaleDownUnneeded: + description: How long a node should be unneeded before it + is eligible for scale down. Defaults to 10m. + type: string + scaleDownUnready: + description: How long an unready node should be unneeded before + it is eligible for scale down. Defaults to 20m. + type: string + scaleDownUtilizationThreshold: + description: Node utilization level, defined as sum of requested + resources divided by capacity, below which a node can be + considered for scale down. Defaults to 0.5. + type: string + scanInterval: + description: How often the AKS Cluster should be re-evaluated + for scale up/down. Defaults to 10s. + type: string + skipNodesWithLocalStorage: + description: If true cluster autoscaler will never delete + nodes with pods with local storage, for example, EmptyDir + or HostPath. Defaults to true. + type: boolean + skipNodesWithSystemPods: + description: If true cluster autoscaler will never delete + nodes with pods from kube-system (except for DaemonSet or + mirror pods). Defaults to true. + type: boolean + type: object + automaticChannelUpgrade: + description: The upgrade channel for this Kubernetes Cluster. + Possible values are patch, rapid, node-image and stable. Omitting + this field sets this value to none. + type: string + azureActiveDirectoryRoleBasedAccessControl: + description: A azure_active_directory_role_based_access_control + block as defined below. + properties: + adminGroupObjectIds: + description: A list of Object IDs of Azure Active Directory + Groups which should have Admin Role on the Cluster. + items: + type: string + type: array + azureRbacEnabled: + description: Is Role Based Access Control based on Azure AD + enabled? + type: boolean + clientAppId: + description: The Client ID of an Azure Active Directory Application. + type: string + managed: + description: Is the Azure Active Directory integration Managed, + meaning that Azure will create/manage the Service Principal + used for integration. + type: boolean + serverAppId: + description: The Server ID of an Azure Active Directory Application. + type: string + tenantId: + description: The Tenant ID used for Azure Active Directory + Application. If this isn't specified the Tenant ID of the + current Subscription is used. + type: string + type: object + azurePolicyEnabled: + description: Should the Azure Policy Add-On be enabled? For more + details please visit Understand Azure Policy for Azure Kubernetes + Service + type: boolean + confidentialComputing: + description: A confidential_computing block as defined below. + For more details please the documentation + properties: + sgxQuoteHelperEnabled: + description: Should the SGX quote helper be enabled? + type: boolean + type: object + customCaTrustCertificatesBase64: + description: A list of up to 10 base64 encoded CAs that will be + added to the trust store on nodes with the custom_ca_trust_enabled + feature enabled. + items: + type: string + type: array + defaultNodePool: + description: A default_node_pool block as defined below. + properties: + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation + Group within which this AKS Cluster should be created. Changing + this forces a new resource to be created. + type: string + customCaTrustEnabled: + description: Specifies whether to trust a Custom CA. + type: boolean + enableAutoScaling: + description: Should the Kubernetes Auto Scaler be enabled + for this Node Pool? + type: boolean + enableHostEncryption: + description: Should the nodes in the Default Node Pool have + host encryption enabled? temporary_name_for_rotation must + be specified when changing this property. + type: boolean + enableNodePublicIp: + description: Should nodes in this Node Pool have a Public + IP Address? temporary_name_for_rotation must be specified + when changing this property. + type: boolean + fipsEnabled: + description: Should the nodes in this Node Pool have Federal + Information Processing Standard enabled? temporary_name_for_rotation + must be specified when changing this block. Changing this + forces a new resource to be created. + type: boolean + gpuInstance: + description: Specifies the GPU MIG instance profile for supported + GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, + MIG4g and MIG7g. Changing this forces a new resource to + be created. + type: string + hostGroupId: + description: Specifies the ID of the Host Group within which + this AKS Cluster should be created. Changing this forces + a new resource to be created. + type: string + kubeletConfig: + description: A kubelet_config block as defined below. temporary_name_for_rotation + must be specified when changing this block. + properties: + allowedUnsafeSysctls: + description: Specifies the allow list of unsafe sysctls + command or patterns (ending in *). + items: + type: string + type: array + x-kubernetes-list-type: set + containerLogMaxLine: + description: Specifies the maximum number of container + log files that can be present for a container. must + be at least 2. + type: number + containerLogMaxSizeMb: + description: Specifies the maximum size (e.g. 10MB) of + container log file before it is rotated. + type: number + cpuCfsQuotaEnabled: + description: Is CPU CFS quota enforcement for containers + enabled? + type: boolean + cpuCfsQuotaPeriod: + description: Specifies the CPU CFS quota period value. + type: string + cpuManagerPolicy: + description: Specifies the CPU Manager policy to use. + Possible values are none and static,. + type: string + imageGcHighThreshold: + description: Specifies the percent of disk usage above + which image garbage collection is always run. Must be + between 0 and 100. + type: number + imageGcLowThreshold: + description: Specifies the percent of disk usage lower + than which image garbage collection is never run. Must + be between 0 and 100. + type: number + podMaxPid: + description: Specifies the maximum number of processes + per pod. + type: number + topologyManagerPolicy: + description: Specifies the Topology Manager policy to + use. Possible values are none, best-effort, restricted + or single-numa-node. + type: string + type: object + kubeletDiskType: + description: The type of disk used by kubelet. Possible values + are OS and Temporary. + type: string + linuxOsConfig: + description: A linux_os_config block as defined below. temporary_name_for_rotation + must be specified when changing this block. + properties: + swapFileSizeMb: + description: Specifies the size of the swap file on each + node in MB. + type: number + sysctlConfig: + description: A sysctl_config block as defined below. + properties: + fsAioMaxNr: + description: The sysctl setting fs.aio-max-nr. Must + be between 65536 and 6553500. + type: number + fsFileMax: + description: The sysctl setting fs.file-max. Must + be between 8192 and 12000500. + type: number + fsInotifyMaxUserWatches: + description: The sysctl setting fs.inotify.max_user_watches. + Must be between 781250 and 2097152. + type: number + fsNrOpen: + description: The sysctl setting fs.nr_open. Must be + between 8192 and 20000500. + type: number + kernelThreadsMax: + description: The sysctl setting kernel.threads-max. + Must be between 20 and 513785. + type: number + netCoreNetdevMaxBacklog: + description: The sysctl setting net.core.netdev_max_backlog. + Must be between 1000 and 3240000. + type: number + netCoreOptmemMax: + description: The sysctl setting net.core.optmem_max. + Must be between 20480 and 4194304. + type: number + netCoreRmemDefault: + description: The sysctl setting net.core.rmem_default. + Must be between 212992 and 134217728. + type: number + netCoreRmemMax: + description: The sysctl setting net.core.rmem_max. + Must be between 212992 and 134217728. + type: number + netCoreSomaxconn: + description: The sysctl setting net.core.somaxconn. + Must be between 4096 and 3240000. + type: number + netCoreWmemDefault: + description: The sysctl setting net.core.wmem_default. + Must be between 212992 and 134217728. + type: number + netCoreWmemMax: + description: The sysctl setting net.core.wmem_max. + Must be between 212992 and 134217728. + type: number + netIpv4IpLocalPortRangeMax: + description: The sysctl setting net.ipv4.ip_local_port_range + max value. Must be between 32768 and 65535. + type: number + netIpv4IpLocalPortRangeMin: + description: The sysctl setting net.ipv4.ip_local_port_range + min value. Must be between 1024 and 60999. + type: number + netIpv4NeighDefaultGcThresh1: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh1. + Must be between 128 and 80000. + type: number + netIpv4NeighDefaultGcThresh2: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh2. + Must be between 512 and 90000. + type: number + netIpv4NeighDefaultGcThresh3: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh3. + Must be between 1024 and 100000. + type: number + netIpv4TcpFinTimeout: + description: The sysctl setting net.ipv4.tcp_fin_timeout. + Must be between 5 and 120. + type: number + netIpv4TcpKeepaliveIntvl: + description: The sysctl setting net.ipv4.tcp_keepalive_intvl. + Must be between 10 and 90. + type: number + netIpv4TcpKeepaliveProbes: + description: The sysctl setting net.ipv4.tcp_keepalive_probes. + Must be between 1 and 15. + type: number + netIpv4TcpKeepaliveTime: + description: The sysctl setting net.ipv4.tcp_keepalive_time. + Must be between 30 and 432000. + type: number + netIpv4TcpMaxSynBacklog: + description: The sysctl setting net.ipv4.tcp_max_syn_backlog. + Must be between 128 and 3240000. + type: number + netIpv4TcpMaxTwBuckets: + description: The sysctl setting net.ipv4.tcp_max_tw_buckets. + Must be between 8000 and 1440000. + type: number + netIpv4TcpTwReuse: + description: The sysctl setting net.ipv4.tcp_tw_reuse. + type: boolean + netNetfilterNfConntrackBuckets: + description: The sysctl setting net.netfilter.nf_conntrack_buckets. + Must be between 65536 and 524288. + type: number + netNetfilterNfConntrackMax: + description: The sysctl setting net.netfilter.nf_conntrack_max. + Must be between 131072 and 2097152. + type: number + vmMaxMapCount: + description: The sysctl setting vm.max_map_count. + Must be between 65530 and 262144. + type: number + vmSwappiness: + description: The sysctl setting vm.swappiness. Must + be between 0 and 100. + type: number + vmVfsCachePressure: + description: The sysctl setting vm.vfs_cache_pressure. + Must be between 0 and 100. + type: number + type: object + transparentHugePageDefrag: + description: specifies the defrag configuration for Transparent + Huge Page. Possible values are always, defer, defer+madvise, + madvise and never. + type: string + transparentHugePageEnabled: + description: Specifies the Transparent Huge Page enabled + configuration. Possible values are always, madvise and + never. + type: string + type: object + maxCount: + description: The maximum number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000. + type: number + maxPods: + description: The maximum number of pods that can run on each + agent. temporary_name_for_rotation must be specified when + changing this property. + type: number + messageOfTheDay: + description: A base64-encoded string which will be written + to /etc/motd after decoding. This allows customization of + the message of the day for Linux nodes. It cannot be specified + for Windows nodes and must be a static string (i.e. will + be printed raw and not executed as a script). Changing this + forces a new resource to be created. + type: string + minCount: + description: The minimum number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000. + type: number + name: + description: The name which should be used for the default + Kubernetes Node Pool. + type: string + nodeCount: + description: The initial number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000 and between min_count and max_count. + type: number + nodeLabels: + additionalProperties: + type: string + description: A map of Kubernetes labels which should be applied + to nodes in the Default Node Pool. + type: object + x-kubernetes-map-type: granular + nodeNetworkProfile: + description: A node_network_profile block as documented below. + properties: + allowedHostPorts: + description: One or more allowed_host_ports blocks as + defined below. + items: + properties: + portEnd: + description: Specifies the end of the port range. + type: number + portStart: + description: Specifies the start of the port range. + type: number + protocol: + description: Specifies the protocol of the port + range. Possible values are TCP and UDP. + type: string + type: object + type: array + applicationSecurityGroupIds: + description: A list of Application Security Group IDs + which should be associated with this Node Pool. + items: + type: string + type: array + nodePublicIpTags: + additionalProperties: + type: string + description: Specifies a mapping of tags to the instance-level + public IPs. Changing this forces a new resource to be + created. + type: object + x-kubernetes-map-type: granular + type: object + nodePublicIpPrefixId: + description: Resource ID for the Public IP Addresses Prefix + for the nodes in this Node Pool. enable_node_public_ip should + be true. Changing this forces a new resource to be created. + type: string + nodeTaints: + items: + type: string + type: array + onlyCriticalAddonsEnabled: + description: Enabling this option will taint default node + pool with CriticalAddonsOnly=true:NoSchedule taint. temporary_name_for_rotation + must be specified when changing this property. + type: boolean + orchestratorVersion: + description: Version of Kubernetes used for the Agents. If + not specified, the default node pool will be created with + the version specified by kubernetes_version. If both are + unspecified, the latest recommended version will be used + at provisioning time (but won't auto-upgrade). AKS does + not require an exact patch version to be specified, minor + version aliases such as 1.22 are also supported. - The minor + version's latest GA patch is automatically chosen in that + case. More details can be found in the documentation. + type: string + osDiskSizeGb: + description: The size of the OS Disk which should be used + for each agent in the Node Pool. temporary_name_for_rotation + must be specified when attempting a change. + type: number + osDiskType: + description: The type of disk which should be used for the + Operating System. Possible values are Ephemeral and Managed. + Defaults to Managed. temporary_name_for_rotation must be + specified when attempting a change. + type: string + osSku: + description: Specifies the OS SKU used by the agent pool. + Possible values are AzureLinux, Ubuntu, Windows2019 and + Windows2022. If not specified, the default is Ubuntu if + OSType=Linux or Windows2019 if OSType=Windows. And the default + Windows OSSKU will be changed to Windows2022 after Windows2019 + is deprecated. temporary_name_for_rotation must be specified + when attempting a change. + type: string + podSubnetId: + description: The ID of the Subnet where the pods in the default + Node Pool should exist. + type: string + podSubnetIdRef: + description: Reference to a Subnet in network to populate + podSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + podSubnetIdSelector: + description: Selector for a Subnet in network to populate + podSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group. Changing + this forces a new resource to be created. + type: string + scaleDownMode: + description: Specifies the autoscaling behaviour of the Kubernetes + Cluster. Allowed values are Delete and Deallocate. Defaults + to Delete. + type: string + snapshotId: + description: The ID of the Snapshot which should be used to + create this default Node Pool. temporary_name_for_rotation + must be specified when changing this property. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Node Pool. + type: object + x-kubernetes-map-type: granular + temporaryNameForRotation: + description: Specifies the name of the temporary node pool + used to cycle the default node pool for VM resizing. + type: string + type: + description: The type of Node Pool which should be created. + Possible values are AvailabilitySet and VirtualMachineScaleSets. + Defaults to VirtualMachineScaleSets. Changing this forces + a new resource to be created. + type: string + ultraSsdEnabled: + description: Used to specify whether the UltraSSD is enabled + in the Default Node Pool. Defaults to false. See the documentation + for more information. temporary_name_for_rotation must be + specified when attempting a change. + type: boolean + upgradeSettings: + description: A upgrade_settings block as documented below. + properties: + maxSurge: + description: The maximum number or percentage of nodes + which will be added to the Node Pool size during an + upgrade. + type: string + type: object + vmSize: + description: The size of the Virtual Machine, such as Standard_DS2_v2. + temporary_name_for_rotation must be specified when attempting + a resize. + type: string + vnetSubnetId: + description: The ID of a Subnet where the Kubernetes Node + Pool should exist. + type: string + vnetSubnetIdRef: + description: Reference to a Subnet in network to populate + vnetSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vnetSubnetIdSelector: + description: Selector for a Subnet in network to populate + vnetSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + workloadRuntime: + description: Specifies the workload runtime used by the node + pool. Possible values are OCIContainer and KataMshvVmIsolation. + type: string + zones: + description: Specifies a list of Availability Zones in which + this Kubernetes Cluster should be located. temporary_name_for_rotation + must be specified when changing this property. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should be + used for the Nodes and Volumes. More information can be found + in the documentation. Changing this forces a new resource to + be created. + type: string + dnsPrefix: + description: DNS prefix specified when creating the managed cluster. + Possible values must begin and end with a letter or number, + contain only letters, numbers, and hyphens and be between 1 + and 54 characters in length. Changing this forces a new resource + to be created. + type: string + dnsPrefixPrivateCluster: + description: Specifies the DNS prefix to use with private clusters. + Changing this forces a new resource to be created. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Managed Kubernetes Cluster should exist. Changing this + forces a new resource to be created. + type: string + enablePodSecurityPolicy: + type: boolean + httpApplicationRoutingEnabled: + description: Should HTTP Application Routing be enabled? + type: boolean + httpProxyConfig: + description: A http_proxy_config block as defined below. + properties: + httpProxy: + description: The proxy address to be used when communicating + over HTTP. + type: string + httpsProxy: + description: The proxy address to be used when communicating + over HTTPS. + type: string + noProxy: + description: The list of domains that will not use the proxy + for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + identity: + description: An identity block as defined below. One of either + identity or service_principal must be specified. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Kubernetes Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Kubernetes Cluster. Possible + values are SystemAssigned or UserAssigned. + type: string + type: object + imageCleanerEnabled: + description: Specifies whether Image Cleaner is enabled. + type: boolean + imageCleanerIntervalHours: + description: Specifies the interval in hours when images should + be cleaned up. Defaults to 48. + type: number + ingressApplicationGateway: + description: An ingress_application_gateway block as defined below. + properties: + gatewayId: + description: The ID of the Application Gateway to integrate + with the ingress controller of this Kubernetes Cluster. + See this page for further details. + type: string + gatewayName: + description: The name of the Application Gateway to be used + or created in the Nodepool Resource Group, which in turn + will be integrated with the ingress controller of this Kubernetes + Cluster. See this page for further details. + type: string + subnetCidr: + description: The subnet CIDR to be used to create an Application + Gateway, which in turn will be integrated with the ingress + controller of this Kubernetes Cluster. See this page for + further details. + type: string + subnetId: + description: The ID of the subnet on which to create an Application + Gateway, which in turn will be integrated with the ingress + controller of this Kubernetes Cluster. See this page for + further details. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + keyManagementService: + description: A key_management_service block as defined below. + For more details, please visit Key Management Service (KMS) + etcd encryption to an AKS cluster. + properties: + keyVaultKeyId: + description: Identifier of Azure Key Vault key. See key identifier + format for more details. + type: string + keyVaultNetworkAccess: + description: Network access of the key vault Network access + of key vault. The possible values are Public and Private. + Public means the key vault allows public access from all + networks. Private means the key vault disables public access + and enables private link. Defaults to Public. + type: string + type: object + keyVaultSecretsProvider: + description: A key_vault_secrets_provider block as defined below. + properties: + secretRotationEnabled: + description: Should the secret store CSI driver on the AKS + cluster be enabled? + type: boolean + secretRotationInterval: + description: The interval to poll for secret rotation. This + attribute is only set when secret_rotation is true. Defaults + to 2m. + type: string + type: object + kubeletIdentity: + description: A kubelet_identity block as defined below. + properties: + clientId: + description: The Client ID of the user-defined Managed Identity + to be assigned to the Kubelets. If not specified a Managed + Identity is created automatically. Changing this forces + a new resource to be created. + type: string + objectId: + description: The Object ID of the user-defined Managed Identity + assigned to the Kubelets.If not specified a Managed Identity + is created automatically. Changing this forces a new resource + to be created. + type: string + userAssignedIdentityId: + description: The ID of the User Assigned Identity assigned + to the Kubelets. If not specified a Managed Identity is + created automatically. Changing this forces a new resource + to be created. + type: string + type: object + kubernetesVersion: + description: Version of Kubernetes specified when creating the + AKS managed cluster. If not specified, the latest recommended + version will be used at provisioning time (but won't auto-upgrade). + AKS does not require an exact patch version to be specified, + minor version aliases such as 1.22 are also supported. - The + minor version's latest GA patch is automatically chosen in that + case. More details can be found in the documentation. + type: string + linuxProfile: + description: A linux_profile block as defined below. + properties: + adminUsername: + description: The Admin Username for the Cluster. Changing + this forces a new resource to be created. + type: string + sshKey: + description: An ssh_key block as defined below. Only one is + currently allowed. Changing this will update the key on + all node pools. More information can be found in the documentation. + properties: + keyData: + description: The Public SSH Key used to access the cluster. + type: string + type: object + type: object + localAccountDisabled: + description: If true local accounts will be disabled. See the + documentation for more information. + type: boolean + location: + description: The location where the Managed Kubernetes Cluster + should be created. Changing this forces a new resource to be + created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + allowed: + description: One or more allowed blocks as defined below. + items: + properties: + day: + description: A day in a week. Possible values are Sunday, + Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + type: string + hours: + description: An array of hour slots in a day. For example, + specifying 1 will allow maintenance from 1:00am to + 2:00am. Specifying 1, 2 will allow maintenance from + 1:00am to 3:00m. Possible values are between 0 and + 23. + items: + type: number + type: array + x-kubernetes-list-type: set + type: object + type: array + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + type: object + maintenanceWindowAutoUpgrade: + description: A maintenance_window_auto_upgrade block as defined + below. + properties: + dayOfMonth: + description: The day of the month for the maintenance run. + Required in combination with RelativeMonthly frequency. + Value between 0 and 31 (inclusive). + type: number + dayOfWeek: + description: The day of the week for the maintenance run. + Required in combination with weekly frequency. Possible + values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday + and Wednesday. + type: string + duration: + description: The duration of the window for maintenance to + run in hours. + type: number + frequency: + description: Frequency of maintenance. Possible options are + Weekly, AbsoluteMonthly and RelativeMonthly. + type: string + interval: + description: The interval for maintenance runs. Depending + on the frequency this interval is week or month based. + type: number + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + startDate: + description: The date on which the maintenance window begins + to take effect. + type: string + startTime: + description: The time for maintenance to begin, based on the + timezone determined by utc_offset. Format is HH:mm. + type: string + utcOffset: + description: Used to determine the timezone for cluster maintenance. + type: string + weekIndex: + description: |- + Specifies on which instance of the allowed days specified in day_of_week the maintenance occurs. Options are First, Second, Third, Fourth, and Last. + Required in combination with relative monthly frequency. + type: string + type: object + maintenanceWindowNodeOs: + description: A maintenance_window_node_os block as defined below. + properties: + dayOfMonth: + description: The day of the month for the maintenance run. + Required in combination with RelativeMonthly frequency. + Value between 0 and 31 (inclusive). + type: number + dayOfWeek: + description: The day of the week for the maintenance run. + Required in combination with weekly frequency. Possible + values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday + and Wednesday. + type: string + duration: + description: The duration of the window for maintenance to + run in hours. + type: number + frequency: + description: Frequency of maintenance. Possible options are + Daily, Weekly, AbsoluteMonthly and RelativeMonthly. + type: string + interval: + description: The interval for maintenance runs. Depending + on the frequency this interval is week or month based. + type: number + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + startDate: + description: The date on which the maintenance window begins + to take effect. + type: string + startTime: + description: The time for maintenance to begin, based on the + timezone determined by utc_offset. Format is HH:mm. + type: string + utcOffset: + description: Used to determine the timezone for cluster maintenance. + type: string + weekIndex: + description: The week in the month used for the maintenance + run. Options are First, Second, Third, Fourth, and Last. + type: string + type: object + microsoftDefender: + description: A microsoft_defender block as defined below. + properties: + logAnalyticsWorkspaceId: + description: Specifies the ID of the Log Analytics Workspace + where the audit logs collected by Microsoft Defender should + be sent to. + type: string + type: object + monitorMetrics: + description: Specifies a Prometheus add-on profile for the Kubernetes + Cluster. A monitor_metrics block as defined below. + properties: + annotationsAllowed: + description: Specifies a comma-separated list of Kubernetes + annotation keys that will be used in the resource's labels + metric. + type: string + labelsAllowed: + description: Specifies a Comma-separated list of additional + Kubernetes label keys that will be used in the resource's + labels metric. + type: string + type: object + networkProfile: + description: A network_profile block as defined below. + properties: + dnsServiceIp: + description: IP address within the Kubernetes service address + range that will be used by cluster service discovery (kube-dns). + Changing this forces a new resource to be created. + type: string + dockerBridgeCidr: + description: IP address (in CIDR notation) used as the Docker + bridge IP address on nodes. Changing this forces a new resource + to be created. + type: string + ebpfDataPlane: + description: Specifies the eBPF data plane used for building + the Kubernetes network. Possible value is cilium. Disabling + this forces a new resource to be created. + type: string + ipVersions: + description: Specifies a list of IP versions the Kubernetes + Cluster will use to assign IP addresses to its nodes and + pods. Possible values are IPv4 and/or IPv6. IPv4 must always + be specified. Changing this forces a new resource to be + created. + items: + type: string + type: array + loadBalancerProfile: + description: A load_balancer_profile block as defined below. + This can only be specified when load_balancer_sku is set + to standard. Changing this forces a new resource to be created. + properties: + idleTimeoutInMinutes: + description: Desired outbound flow idle timeout in minutes + for the cluster load balancer. Must be between 4 and + 120 inclusive. Defaults to 4. + type: number + managedOutboundIpCount: + description: Count of desired managed outbound IPs for + the cluster load balancer. Must be between 1 and 100 + inclusive. + type: number + managedOutboundIpv6Count: + description: The desired number of IPv6 outbound IPs created + and managed by Azure for the cluster load balancer. + Must be in the range of 1 to 100 (inclusive). The default + value is 0 for single-stack and 1 for dual-stack. + type: number + outboundIpAddressIds: + description: The ID of the Public IP Addresses which should + be used for outbound communication for the cluster load + balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + outboundIpPrefixIds: + description: The ID of the outbound Public IP Address + Prefixes which should be used for the cluster load balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + outboundPortsAllocated: + description: Number of desired SNAT port for each VM in + the clusters load balancer. Must be between 0 and 64000 + inclusive. Defaults to 0. + type: number + type: object + loadBalancerSku: + description: Specifies the SKU of the Load Balancer used for + this Kubernetes Cluster. Possible values are basic and standard. + Defaults to standard. Changing this forces a new resource + to be created. + type: string + natGatewayProfile: + description: A nat_gateway_profile block as defined below. + This can only be specified when load_balancer_sku is set + to standard and outbound_type is set to managedNATGateway + or userAssignedNATGateway. Changing this forces a new resource + to be created. + properties: + idleTimeoutInMinutes: + description: Desired outbound flow idle timeout in minutes + for the cluster load balancer. Must be between 4 and + 120 inclusive. Defaults to 4. + type: number + managedOutboundIpCount: + description: Count of desired managed outbound IPs for + the cluster load balancer. Must be between 1 and 100 + inclusive. + type: number + type: object + networkMode: + description: Network mode to be used with Azure CNI. Possible + values are bridge and transparent. Changing this forces + a new resource to be created. + type: string + networkPlugin: + description: Network plugin to use for networking. Currently + supported values are azure, kubenet and none. Changing this + forces a new resource to be created. + type: string + networkPluginMode: + description: Specifies the network plugin mode used for building + the Kubernetes network. Possible value is overlay. + type: string + networkPolicy: + description: Sets up network policy to be used with Azure + CNI. Network policy allows us to control the traffic flow + between pods. Currently supported values are calico, azure + and cilium. + type: string + outboundType: + description: The outbound (egress) routing method which should + be used for this Kubernetes Cluster. Possible values are + loadBalancer, userDefinedRouting, managedNATGateway and + userAssignedNATGateway. Defaults to loadBalancer. More information + on supported migration paths for outbound_type can be found + in this documentation. + type: string + podCidr: + description: The CIDR to use for pod IP addresses. This field + can only be set when network_plugin is set to kubenet. Changing + this forces a new resource to be created. + type: string + podCidrs: + description: A list of CIDRs to use for pod IP addresses. + For single-stack networking a single IPv4 CIDR is expected. + For dual-stack networking an IPv4 and IPv6 CIDR are expected. + Changing this forces a new resource to be created. + items: + type: string + type: array + serviceCidr: + description: The Network Range used by the Kubernetes service. + Changing this forces a new resource to be created. + type: string + serviceCidrs: + description: A list of CIDRs to use for Kubernetes services. + For single-stack networking a single IPv4 CIDR is expected. + For dual-stack networking an IPv4 and IPv6 CIDR are expected. + Changing this forces a new resource to be created. + items: + type: string + type: array + type: object + nodeOsChannelUpgrade: + description: The upgrade channel for this Kubernetes Cluster Nodes' + OS Image. Possible values are Unmanaged, SecurityPatch, NodeImage + and None. + type: string + nodeResourceGroup: + description: The auto-generated Resource Group which contains + the resources for this Managed Kubernetes Cluster. + type: string + oidcIssuerEnabled: + description: Enable or Disable the OIDC issuer URL + type: boolean + omsAgent: + description: An oms_agent block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The ID of the Log Analytics Workspace which the + OMS Agent should send data to. + type: string + msiAuthForMonitoringEnabled: + description: Is managed identity authentication for monitoring + enabled? + type: boolean + type: object + openServiceMeshEnabled: + description: Is Open Service Mesh enabled? For more details, please + visit Open Service Mesh for AKS. + type: boolean + privateClusterEnabled: + description: Should this Kubernetes Cluster have its API server + only exposed on internal IP addresses? This provides a Private + IP Address for the Kubernetes API on the Virtual Network where + the Kubernetes Cluster is located. Defaults to false. Changing + this forces a new resource to be created. + type: boolean + privateClusterPublicFqdnEnabled: + description: Specifies whether a Public FQDN for this Private + Cluster should be added. Defaults to false. + type: boolean + privateDnsZoneId: + description: Either the ID of Private DNS Zone which should be + delegated to this Cluster, System to have AKS manage this or + None. In case of None you will need to bring your own DNS server + and set up resolving, otherwise, the cluster will have issues + after provisioning. Changing this forces a new resource to be + created. + type: string + privateDnsZoneIdRef: + description: Reference to a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateDnsZoneIdSelector: + description: Selector for a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + Kubernetes Cluster. Defaults to true. + type: boolean + roleBasedAccessControlEnabled: + description: Whether Role Based Access Control for the Kubernetes + Cluster should be enabled. Defaults to true. Changing this forces + a new resource to be created. + type: boolean + runCommandEnabled: + description: Whether to enable run command for the cluster or + not. Defaults to true. + type: boolean + serviceMeshProfile: + description: A service_mesh_profile block as defined below. + properties: + externalIngressGatewayEnabled: + description: Is Istio External Ingress Gateway enabled? + type: boolean + internalIngressGatewayEnabled: + description: Is Istio Internal Ingress Gateway enabled? + type: boolean + mode: + description: The mode of the service mesh. Possible value + is Istio. + type: string + type: object + servicePrincipal: + description: A service_principal block as documented below. One + of either identity or service_principal must be specified. + properties: + clientId: + description: The Client ID for the Service Principal. + type: string + type: object + skuTier: + description: The SKU Tier that should be used for this Kubernetes + Cluster. Possible values are Free, Standard (which includes + the Uptime SLA) and Premium. Defaults to Free. + type: string + storageProfile: + description: A storage_profile block as defined below. + properties: + blobDriverEnabled: + description: Is the Blob CSI driver enabled? Defaults to false. + type: boolean + diskDriverEnabled: + description: Is the Disk CSI driver enabled? Defaults to true. + type: boolean + diskDriverVersion: + description: Disk CSI Driver version to be used. Possible + values are v1 and v2. Defaults to v1. + type: string + fileDriverEnabled: + description: Is the File CSI driver enabled? Defaults to true. + type: boolean + snapshotControllerEnabled: + description: Is the Snapshot Controller enabled? Defaults + to true. + type: boolean + type: object + supportPlan: + description: Specifies the support plan which should be used for + this Kubernetes Cluster. Possible values are KubernetesOfficial + and AKSLongTermSupport. Defaults to KubernetesOfficial. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + webAppRouting: + description: A web_app_routing block as defined below. + properties: + dnsZoneId: + description: Specifies the ID of the DNS Zone in which DNS + entries are created for applications deployed to the cluster + when Web App Routing is enabled. For Bring-Your-Own DNS + zones this property should be set to an empty string "". + type: string + type: object + windowsProfile: + description: A windows_profile block as defined below. + properties: + adminUsername: + description: The Admin Username for Windows VMs. Changing + this forces a new resource to be created. + type: string + gmsa: + description: A gmsa block as defined below. + properties: + dnsServer: + description: Specifies the DNS server for Windows gMSA. + Set this to an empty string if you have configured the + DNS server in the VNet which was used to create the + managed cluster. + type: string + rootDomain: + description: Specifies the root domain name for Windows + gMSA. Set this to an empty string if you have configured + the DNS server in the VNet which was used to create + the managed cluster. + type: string + type: object + license: + description: Specifies the type of on-premise license which + should be used for Node Pool Windows Virtual Machine. At + this time the only possible value is Windows_Server. + type: string + type: object + workloadAutoscalerProfile: + description: A workload_autoscaler_profile block defined below. + properties: + kedaEnabled: + description: Specifies whether KEDA Autoscaler can be used + for workloads. + type: boolean + verticalPodAutoscalerEnabled: + description: Specifies whether Vertical Pod Autoscaler should + be enabled. + type: boolean + type: object + workloadIdentityEnabled: + description: Specifies whether Azure AD Workload Identity should + be enabled for the Cluster. Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.defaultNodePool is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultNodePool) + || (has(self.initProvider) && has(self.initProvider.defaultNodePool))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: KubernetesClusterStatus defines the observed state of KubernetesCluster. + properties: + atProvider: + properties: + aciConnectorLinux: + description: A aci_connector_linux block as defined below. For + more details, please visit Create and configure an AKS cluster + to use virtual nodes. + properties: + connectorIdentity: + description: A connector_identity block is exported. The exported + attributes are defined below. + items: + properties: + clientId: + description: The Client ID of the user-defined Managed + Identity used by the ACI Connector. + type: string + objectId: + description: The Object ID of the user-defined Managed + Identity used by the ACI Connector. + type: string + userAssignedIdentityId: + description: The ID of the User Assigned Identity used + by the ACI Connector. + type: string + type: object + type: array + subnetName: + description: The subnet name for the virtual nodes to run. + type: string + type: object + apiServerAccessProfile: + description: An api_server_access_profile block as defined below. + properties: + authorizedIpRanges: + description: Set of authorized IP ranges to allow access to + API server, e.g. ["198.51.100.0/24"]. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet where the API server endpoint + is delegated to. + type: string + vnetIntegrationEnabled: + description: Should API Server VNet Integration be enabled? + For more details please visit Use API Server VNet Integration. + type: boolean + type: object + apiServerAuthorizedIpRanges: + description: Deprecated in favor of `spec.forProvider.apiServerAccessProfile[0].authorizedIpRanges` + items: + type: string + type: array + x-kubernetes-list-type: set + autoScalerProfile: + description: A auto_scaler_profile block as defined below. + properties: + balanceSimilarNodeGroups: + description: Detect similar node groups and balance the number + of nodes between them. Defaults to false. + type: boolean + emptyBulkDeleteMax: + description: Maximum number of empty nodes that can be deleted + at the same time. Defaults to 10. + type: string + expander: + description: Expander to use. Possible values are least-waste, + priority, most-pods and random. Defaults to random. + type: string + maxGracefulTerminationSec: + description: Maximum number of seconds the cluster autoscaler + waits for pod termination when trying to scale down a node. + Defaults to 600. + type: string + maxNodeProvisioningTime: + description: Maximum time the autoscaler waits for a node + to be provisioned. Defaults to 15m. + type: string + maxUnreadyNodes: + description: Maximum Number of allowed unready nodes. Defaults + to 3. + type: number + maxUnreadyPercentage: + description: Maximum percentage of unready nodes the cluster + autoscaler will stop if the percentage is exceeded. Defaults + to 45. + type: number + newPodScaleUpDelay: + description: For scenarios like burst/batch scale where you + don't want CA to act before the kubernetes scheduler could + schedule all the pods, you can tell CA to ignore unscheduled + pods before they're a certain age. Defaults to 10s. + type: string + scaleDownDelayAfterAdd: + description: How long after the scale up of AKS nodes the + scale down evaluation resumes. Defaults to 10m. + type: string + scaleDownDelayAfterDelete: + description: How long after node deletion that scale down + evaluation resumes. Defaults to the value used for scan_interval. + type: string + scaleDownDelayAfterFailure: + description: How long after scale down failure that scale + down evaluation resumes. Defaults to 3m. + type: string + scaleDownUnneeded: + description: How long a node should be unneeded before it + is eligible for scale down. Defaults to 10m. + type: string + scaleDownUnready: + description: How long an unready node should be unneeded before + it is eligible for scale down. Defaults to 20m. + type: string + scaleDownUtilizationThreshold: + description: Node utilization level, defined as sum of requested + resources divided by capacity, below which a node can be + considered for scale down. Defaults to 0.5. + type: string + scanInterval: + description: How often the AKS Cluster should be re-evaluated + for scale up/down. Defaults to 10s. + type: string + skipNodesWithLocalStorage: + description: If true cluster autoscaler will never delete + nodes with pods with local storage, for example, EmptyDir + or HostPath. Defaults to true. + type: boolean + skipNodesWithSystemPods: + description: If true cluster autoscaler will never delete + nodes with pods from kube-system (except for DaemonSet or + mirror pods). Defaults to true. + type: boolean + type: object + automaticChannelUpgrade: + description: The upgrade channel for this Kubernetes Cluster. + Possible values are patch, rapid, node-image and stable. Omitting + this field sets this value to none. + type: string + azureActiveDirectoryRoleBasedAccessControl: + description: A azure_active_directory_role_based_access_control + block as defined below. + properties: + adminGroupObjectIds: + description: A list of Object IDs of Azure Active Directory + Groups which should have Admin Role on the Cluster. + items: + type: string + type: array + azureRbacEnabled: + description: Is Role Based Access Control based on Azure AD + enabled? + type: boolean + clientAppId: + description: The Client ID of an Azure Active Directory Application. + type: string + managed: + description: Is the Azure Active Directory integration Managed, + meaning that Azure will create/manage the Service Principal + used for integration. + type: boolean + serverAppId: + description: The Server ID of an Azure Active Directory Application. + type: string + tenantId: + description: The Tenant ID used for Azure Active Directory + Application. If this isn't specified the Tenant ID of the + current Subscription is used. + type: string + type: object + azurePolicyEnabled: + description: Should the Azure Policy Add-On be enabled? For more + details please visit Understand Azure Policy for Azure Kubernetes + Service + type: boolean + confidentialComputing: + description: A confidential_computing block as defined below. + For more details please the documentation + properties: + sgxQuoteHelperEnabled: + description: Should the SGX quote helper be enabled? + type: boolean + type: object + currentKubernetesVersion: + description: The current version running on the Azure Kubernetes + Managed Cluster. + type: string + customCaTrustCertificatesBase64: + description: A list of up to 10 base64 encoded CAs that will be + added to the trust store on nodes with the custom_ca_trust_enabled + feature enabled. + items: + type: string + type: array + defaultNodePool: + description: A default_node_pool block as defined below. + properties: + capacityReservationGroupId: + description: Specifies the ID of the Capacity Reservation + Group within which this AKS Cluster should be created. Changing + this forces a new resource to be created. + type: string + customCaTrustEnabled: + description: Specifies whether to trust a Custom CA. + type: boolean + enableAutoScaling: + description: Should the Kubernetes Auto Scaler be enabled + for this Node Pool? + type: boolean + enableHostEncryption: + description: Should the nodes in the Default Node Pool have + host encryption enabled? temporary_name_for_rotation must + be specified when changing this property. + type: boolean + enableNodePublicIp: + description: Should nodes in this Node Pool have a Public + IP Address? temporary_name_for_rotation must be specified + when changing this property. + type: boolean + fipsEnabled: + description: Should the nodes in this Node Pool have Federal + Information Processing Standard enabled? temporary_name_for_rotation + must be specified when changing this block. Changing this + forces a new resource to be created. + type: boolean + gpuInstance: + description: Specifies the GPU MIG instance profile for supported + GPU VM SKU. The allowed values are MIG1g, MIG2g, MIG3g, + MIG4g and MIG7g. Changing this forces a new resource to + be created. + type: string + hostGroupId: + description: Specifies the ID of the Host Group within which + this AKS Cluster should be created. Changing this forces + a new resource to be created. + type: string + kubeletConfig: + description: A kubelet_config block as defined below. temporary_name_for_rotation + must be specified when changing this block. + properties: + allowedUnsafeSysctls: + description: Specifies the allow list of unsafe sysctls + command or patterns (ending in *). + items: + type: string + type: array + x-kubernetes-list-type: set + containerLogMaxLine: + description: Specifies the maximum number of container + log files that can be present for a container. must + be at least 2. + type: number + containerLogMaxSizeMb: + description: Specifies the maximum size (e.g. 10MB) of + container log file before it is rotated. + type: number + cpuCfsQuotaEnabled: + description: Is CPU CFS quota enforcement for containers + enabled? + type: boolean + cpuCfsQuotaPeriod: + description: Specifies the CPU CFS quota period value. + type: string + cpuManagerPolicy: + description: Specifies the CPU Manager policy to use. + Possible values are none and static,. + type: string + imageGcHighThreshold: + description: Specifies the percent of disk usage above + which image garbage collection is always run. Must be + between 0 and 100. + type: number + imageGcLowThreshold: + description: Specifies the percent of disk usage lower + than which image garbage collection is never run. Must + be between 0 and 100. + type: number + podMaxPid: + description: Specifies the maximum number of processes + per pod. + type: number + topologyManagerPolicy: + description: Specifies the Topology Manager policy to + use. Possible values are none, best-effort, restricted + or single-numa-node. + type: string + type: object + kubeletDiskType: + description: The type of disk used by kubelet. Possible values + are OS and Temporary. + type: string + linuxOsConfig: + description: A linux_os_config block as defined below. temporary_name_for_rotation + must be specified when changing this block. + properties: + swapFileSizeMb: + description: Specifies the size of the swap file on each + node in MB. + type: number + sysctlConfig: + description: A sysctl_config block as defined below. + properties: + fsAioMaxNr: + description: The sysctl setting fs.aio-max-nr. Must + be between 65536 and 6553500. + type: number + fsFileMax: + description: The sysctl setting fs.file-max. Must + be between 8192 and 12000500. + type: number + fsInotifyMaxUserWatches: + description: The sysctl setting fs.inotify.max_user_watches. + Must be between 781250 and 2097152. + type: number + fsNrOpen: + description: The sysctl setting fs.nr_open. Must be + between 8192 and 20000500. + type: number + kernelThreadsMax: + description: The sysctl setting kernel.threads-max. + Must be between 20 and 513785. + type: number + netCoreNetdevMaxBacklog: + description: The sysctl setting net.core.netdev_max_backlog. + Must be between 1000 and 3240000. + type: number + netCoreOptmemMax: + description: The sysctl setting net.core.optmem_max. + Must be between 20480 and 4194304. + type: number + netCoreRmemDefault: + description: The sysctl setting net.core.rmem_default. + Must be between 212992 and 134217728. + type: number + netCoreRmemMax: + description: The sysctl setting net.core.rmem_max. + Must be between 212992 and 134217728. + type: number + netCoreSomaxconn: + description: The sysctl setting net.core.somaxconn. + Must be between 4096 and 3240000. + type: number + netCoreWmemDefault: + description: The sysctl setting net.core.wmem_default. + Must be between 212992 and 134217728. + type: number + netCoreWmemMax: + description: The sysctl setting net.core.wmem_max. + Must be between 212992 and 134217728. + type: number + netIpv4IpLocalPortRangeMax: + description: The sysctl setting net.ipv4.ip_local_port_range + max value. Must be between 32768 and 65535. + type: number + netIpv4IpLocalPortRangeMin: + description: The sysctl setting net.ipv4.ip_local_port_range + min value. Must be between 1024 and 60999. + type: number + netIpv4NeighDefaultGcThresh1: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh1. + Must be between 128 and 80000. + type: number + netIpv4NeighDefaultGcThresh2: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh2. + Must be between 512 and 90000. + type: number + netIpv4NeighDefaultGcThresh3: + description: The sysctl setting net.ipv4.neigh.default.gc_thresh3. + Must be between 1024 and 100000. + type: number + netIpv4TcpFinTimeout: + description: The sysctl setting net.ipv4.tcp_fin_timeout. + Must be between 5 and 120. + type: number + netIpv4TcpKeepaliveIntvl: + description: The sysctl setting net.ipv4.tcp_keepalive_intvl. + Must be between 10 and 90. + type: number + netIpv4TcpKeepaliveProbes: + description: The sysctl setting net.ipv4.tcp_keepalive_probes. + Must be between 1 and 15. + type: number + netIpv4TcpKeepaliveTime: + description: The sysctl setting net.ipv4.tcp_keepalive_time. + Must be between 30 and 432000. + type: number + netIpv4TcpMaxSynBacklog: + description: The sysctl setting net.ipv4.tcp_max_syn_backlog. + Must be between 128 and 3240000. + type: number + netIpv4TcpMaxTwBuckets: + description: The sysctl setting net.ipv4.tcp_max_tw_buckets. + Must be between 8000 and 1440000. + type: number + netIpv4TcpTwReuse: + description: The sysctl setting net.ipv4.tcp_tw_reuse. + type: boolean + netNetfilterNfConntrackBuckets: + description: The sysctl setting net.netfilter.nf_conntrack_buckets. + Must be between 65536 and 524288. + type: number + netNetfilterNfConntrackMax: + description: The sysctl setting net.netfilter.nf_conntrack_max. + Must be between 131072 and 2097152. + type: number + vmMaxMapCount: + description: The sysctl setting vm.max_map_count. + Must be between 65530 and 262144. + type: number + vmSwappiness: + description: The sysctl setting vm.swappiness. Must + be between 0 and 100. + type: number + vmVfsCachePressure: + description: The sysctl setting vm.vfs_cache_pressure. + Must be between 0 and 100. + type: number + type: object + transparentHugePageDefrag: + description: specifies the defrag configuration for Transparent + Huge Page. Possible values are always, defer, defer+madvise, + madvise and never. + type: string + transparentHugePageEnabled: + description: Specifies the Transparent Huge Page enabled + configuration. Possible values are always, madvise and + never. + type: string + type: object + maxCount: + description: The maximum number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000. + type: number + maxPods: + description: The maximum number of pods that can run on each + agent. temporary_name_for_rotation must be specified when + changing this property. + type: number + messageOfTheDay: + description: A base64-encoded string which will be written + to /etc/motd after decoding. This allows customization of + the message of the day for Linux nodes. It cannot be specified + for Windows nodes and must be a static string (i.e. will + be printed raw and not executed as a script). Changing this + forces a new resource to be created. + type: string + minCount: + description: The minimum number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000. + type: number + name: + description: The name which should be used for the default + Kubernetes Node Pool. + type: string + nodeCount: + description: The initial number of nodes which should exist + in this Node Pool. If specified this must be between 1 and + 1000 and between min_count and max_count. + type: number + nodeLabels: + additionalProperties: + type: string + description: A map of Kubernetes labels which should be applied + to nodes in the Default Node Pool. + type: object + x-kubernetes-map-type: granular + nodeNetworkProfile: + description: A node_network_profile block as documented below. + properties: + allowedHostPorts: + description: One or more allowed_host_ports blocks as + defined below. + items: + properties: + portEnd: + description: Specifies the end of the port range. + type: number + portStart: + description: Specifies the start of the port range. + type: number + protocol: + description: Specifies the protocol of the port + range. Possible values are TCP and UDP. + type: string + type: object + type: array + applicationSecurityGroupIds: + description: A list of Application Security Group IDs + which should be associated with this Node Pool. + items: + type: string + type: array + nodePublicIpTags: + additionalProperties: + type: string + description: Specifies a mapping of tags to the instance-level + public IPs. Changing this forces a new resource to be + created. + type: object + x-kubernetes-map-type: granular + type: object + nodePublicIpPrefixId: + description: Resource ID for the Public IP Addresses Prefix + for the nodes in this Node Pool. enable_node_public_ip should + be true. Changing this forces a new resource to be created. + type: string + nodeTaints: + items: + type: string + type: array + onlyCriticalAddonsEnabled: + description: Enabling this option will taint default node + pool with CriticalAddonsOnly=true:NoSchedule taint. temporary_name_for_rotation + must be specified when changing this property. + type: boolean + orchestratorVersion: + description: Version of Kubernetes used for the Agents. If + not specified, the default node pool will be created with + the version specified by kubernetes_version. If both are + unspecified, the latest recommended version will be used + at provisioning time (but won't auto-upgrade). AKS does + not require an exact patch version to be specified, minor + version aliases such as 1.22 are also supported. - The minor + version's latest GA patch is automatically chosen in that + case. More details can be found in the documentation. + type: string + osDiskSizeGb: + description: The size of the OS Disk which should be used + for each agent in the Node Pool. temporary_name_for_rotation + must be specified when attempting a change. + type: number + osDiskType: + description: The type of disk which should be used for the + Operating System. Possible values are Ephemeral and Managed. + Defaults to Managed. temporary_name_for_rotation must be + specified when attempting a change. + type: string + osSku: + description: Specifies the OS SKU used by the agent pool. + Possible values are AzureLinux, Ubuntu, Windows2019 and + Windows2022. If not specified, the default is Ubuntu if + OSType=Linux or Windows2019 if OSType=Windows. And the default + Windows OSSKU will be changed to Windows2022 after Windows2019 + is deprecated. temporary_name_for_rotation must be specified + when attempting a change. + type: string + podSubnetId: + description: The ID of the Subnet where the pods in the default + Node Pool should exist. + type: string + proximityPlacementGroupId: + description: The ID of the Proximity Placement Group. Changing + this forces a new resource to be created. + type: string + scaleDownMode: + description: Specifies the autoscaling behaviour of the Kubernetes + Cluster. Allowed values are Delete and Deallocate. Defaults + to Delete. + type: string + snapshotId: + description: The ID of the Snapshot which should be used to + create this default Node Pool. temporary_name_for_rotation + must be specified when changing this property. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Node Pool. + type: object + x-kubernetes-map-type: granular + temporaryNameForRotation: + description: Specifies the name of the temporary node pool + used to cycle the default node pool for VM resizing. + type: string + type: + description: The type of Node Pool which should be created. + Possible values are AvailabilitySet and VirtualMachineScaleSets. + Defaults to VirtualMachineScaleSets. Changing this forces + a new resource to be created. + type: string + ultraSsdEnabled: + description: Used to specify whether the UltraSSD is enabled + in the Default Node Pool. Defaults to false. See the documentation + for more information. temporary_name_for_rotation must be + specified when attempting a change. + type: boolean + upgradeSettings: + description: A upgrade_settings block as documented below. + properties: + maxSurge: + description: The maximum number or percentage of nodes + which will be added to the Node Pool size during an + upgrade. + type: string + type: object + vmSize: + description: The size of the Virtual Machine, such as Standard_DS2_v2. + temporary_name_for_rotation must be specified when attempting + a resize. + type: string + vnetSubnetId: + description: The ID of a Subnet where the Kubernetes Node + Pool should exist. + type: string + workloadRuntime: + description: Specifies the workload runtime used by the node + pool. Possible values are OCIContainer and KataMshvVmIsolation. + type: string + zones: + description: Specifies a list of Availability Zones in which + this Kubernetes Cluster should be located. temporary_name_for_rotation + must be specified when changing this property. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + diskEncryptionSetId: + description: The ID of the Disk Encryption Set which should be + used for the Nodes and Volumes. More information can be found + in the documentation. Changing this forces a new resource to + be created. + type: string + dnsPrefix: + description: DNS prefix specified when creating the managed cluster. + Possible values must begin and end with a letter or number, + contain only letters, numbers, and hyphens and be between 1 + and 54 characters in length. Changing this forces a new resource + to be created. + type: string + dnsPrefixPrivateCluster: + description: Specifies the DNS prefix to use with private clusters. + Changing this forces a new resource to be created. + type: string + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Managed Kubernetes Cluster should exist. Changing this + forces a new resource to be created. + type: string + enablePodSecurityPolicy: + type: boolean + fqdn: + description: The FQDN of the Azure Kubernetes Managed Cluster. + type: string + httpApplicationRoutingEnabled: + description: Should HTTP Application Routing be enabled? + type: boolean + httpApplicationRoutingZoneName: + description: The Zone Name of the HTTP Application Routing. + type: string + httpProxyConfig: + description: A http_proxy_config block as defined below. + properties: + httpProxy: + description: The proxy address to be used when communicating + over HTTP. + type: string + httpsProxy: + description: The proxy address to be used when communicating + over HTTPS. + type: string + noProxy: + description: The list of domains that will not use the proxy + for communication. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: The Kubernetes Managed Cluster ID. + type: string + identity: + description: An identity block as defined below. One of either + identity or service_principal must be specified. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Kubernetes Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Kubernetes Cluster. Possible + values are SystemAssigned or UserAssigned. + type: string + type: object + imageCleanerEnabled: + description: Specifies whether Image Cleaner is enabled. + type: boolean + imageCleanerIntervalHours: + description: Specifies the interval in hours when images should + be cleaned up. Defaults to 48. + type: number + ingressApplicationGateway: + description: An ingress_application_gateway block as defined below. + properties: + effectiveGatewayId: + description: The ID of the Application Gateway associated + with the ingress controller deployed to this Kubernetes + Cluster. + type: string + gatewayId: + description: The ID of the Application Gateway to integrate + with the ingress controller of this Kubernetes Cluster. + See this page for further details. + type: string + gatewayName: + description: The name of the Application Gateway to be used + or created in the Nodepool Resource Group, which in turn + will be integrated with the ingress controller of this Kubernetes + Cluster. See this page for further details. + type: string + ingressApplicationGatewayIdentity: + description: An ingress_application_gateway_identity block + is exported. The exported attributes are defined below. + items: + properties: + clientId: + description: The Client ID of the user-defined Managed + Identity used for Web App Routing. + type: string + objectId: + description: The Object ID of the user-defined Managed + Identity used for Web App Routing + type: string + userAssignedIdentityId: + description: The ID of the User Assigned Identity used + for Web App Routing. + type: string + type: object + type: array + subnetCidr: + description: The subnet CIDR to be used to create an Application + Gateway, which in turn will be integrated with the ingress + controller of this Kubernetes Cluster. See this page for + further details. + type: string + subnetId: + description: The ID of the subnet on which to create an Application + Gateway, which in turn will be integrated with the ingress + controller of this Kubernetes Cluster. See this page for + further details. + type: string + type: object + keyManagementService: + description: A key_management_service block as defined below. + For more details, please visit Key Management Service (KMS) + etcd encryption to an AKS cluster. + properties: + keyVaultKeyId: + description: Identifier of Azure Key Vault key. See key identifier + format for more details. + type: string + keyVaultNetworkAccess: + description: Network access of the key vault Network access + of key vault. The possible values are Public and Private. + Public means the key vault allows public access from all + networks. Private means the key vault disables public access + and enables private link. Defaults to Public. + type: string + type: object + keyVaultSecretsProvider: + description: A key_vault_secrets_provider block as defined below. + properties: + secretIdentity: + description: An secret_identity block is exported. The exported + attributes are defined below. + items: + properties: + clientId: + description: The Client ID of the user-defined Managed + Identity used by the Secret Provider. + type: string + objectId: + description: The Object ID of the user-defined Managed + Identity used by the Secret Provider. + type: string + userAssignedIdentityId: + description: The ID of the User Assigned Identity used + by the Secret Provider. + type: string + type: object + type: array + secretRotationEnabled: + description: Should the secret store CSI driver on the AKS + cluster be enabled? + type: boolean + secretRotationInterval: + description: The interval to poll for secret rotation. This + attribute is only set when secret_rotation is true. Defaults + to 2m. + type: string + type: object + kubeletIdentity: + description: A kubelet_identity block as defined below. + properties: + clientId: + description: The Client ID of the user-defined Managed Identity + to be assigned to the Kubelets. If not specified a Managed + Identity is created automatically. Changing this forces + a new resource to be created. + type: string + objectId: + description: The Object ID of the user-defined Managed Identity + assigned to the Kubelets.If not specified a Managed Identity + is created automatically. Changing this forces a new resource + to be created. + type: string + userAssignedIdentityId: + description: The ID of the User Assigned Identity assigned + to the Kubelets. If not specified a Managed Identity is + created automatically. Changing this forces a new resource + to be created. + type: string + type: object + kubernetesVersion: + description: Version of Kubernetes specified when creating the + AKS managed cluster. If not specified, the latest recommended + version will be used at provisioning time (but won't auto-upgrade). + AKS does not require an exact patch version to be specified, + minor version aliases such as 1.22 are also supported. - The + minor version's latest GA patch is automatically chosen in that + case. More details can be found in the documentation. + type: string + linuxProfile: + description: A linux_profile block as defined below. + properties: + adminUsername: + description: The Admin Username for the Cluster. Changing + this forces a new resource to be created. + type: string + sshKey: + description: An ssh_key block as defined below. Only one is + currently allowed. Changing this will update the key on + all node pools. More information can be found in the documentation. + properties: + keyData: + description: The Public SSH Key used to access the cluster. + type: string + type: object + type: object + localAccountDisabled: + description: If true local accounts will be disabled. See the + documentation for more information. + type: boolean + location: + description: The location where the Managed Kubernetes Cluster + should be created. Changing this forces a new resource to be + created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + allowed: + description: One or more allowed blocks as defined below. + items: + properties: + day: + description: A day in a week. Possible values are Sunday, + Monday, Tuesday, Wednesday, Thursday, Friday and Saturday. + type: string + hours: + description: An array of hour slots in a day. For example, + specifying 1 will allow maintenance from 1:00am to + 2:00am. Specifying 1, 2 will allow maintenance from + 1:00am to 3:00m. Possible values are between 0 and + 23. + items: + type: number + type: array + x-kubernetes-list-type: set + type: object + type: array + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + type: object + maintenanceWindowAutoUpgrade: + description: A maintenance_window_auto_upgrade block as defined + below. + properties: + dayOfMonth: + description: The day of the month for the maintenance run. + Required in combination with RelativeMonthly frequency. + Value between 0 and 31 (inclusive). + type: number + dayOfWeek: + description: The day of the week for the maintenance run. + Required in combination with weekly frequency. Possible + values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday + and Wednesday. + type: string + duration: + description: The duration of the window for maintenance to + run in hours. + type: number + frequency: + description: Frequency of maintenance. Possible options are + Weekly, AbsoluteMonthly and RelativeMonthly. + type: string + interval: + description: The interval for maintenance runs. Depending + on the frequency this interval is week or month based. + type: number + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + startDate: + description: The date on which the maintenance window begins + to take effect. + type: string + startTime: + description: The time for maintenance to begin, based on the + timezone determined by utc_offset. Format is HH:mm. + type: string + utcOffset: + description: Used to determine the timezone for cluster maintenance. + type: string + weekIndex: + description: |- + Specifies on which instance of the allowed days specified in day_of_week the maintenance occurs. Options are First, Second, Third, Fourth, and Last. + Required in combination with relative monthly frequency. + type: string + type: object + maintenanceWindowNodeOs: + description: A maintenance_window_node_os block as defined below. + properties: + dayOfMonth: + description: The day of the month for the maintenance run. + Required in combination with RelativeMonthly frequency. + Value between 0 and 31 (inclusive). + type: number + dayOfWeek: + description: The day of the week for the maintenance run. + Required in combination with weekly frequency. Possible + values are Friday, Monday, Saturday, Sunday, Thursday, Tuesday + and Wednesday. + type: string + duration: + description: The duration of the window for maintenance to + run in hours. + type: number + frequency: + description: Frequency of maintenance. Possible options are + Daily, Weekly, AbsoluteMonthly and RelativeMonthly. + type: string + interval: + description: The interval for maintenance runs. Depending + on the frequency this interval is week or month based. + type: number + notAllowed: + description: One or more not_allowed block as defined below. + items: + properties: + end: + description: The end of a time span, formatted as an + RFC3339 string. + type: string + start: + description: The start of a time span, formatted as + an RFC3339 string. + type: string + type: object + type: array + startDate: + description: The date on which the maintenance window begins + to take effect. + type: string + startTime: + description: The time for maintenance to begin, based on the + timezone determined by utc_offset. Format is HH:mm. + type: string + utcOffset: + description: Used to determine the timezone for cluster maintenance. + type: string + weekIndex: + description: The week in the month used for the maintenance + run. Options are First, Second, Third, Fourth, and Last. + type: string + type: object + microsoftDefender: + description: A microsoft_defender block as defined below. + properties: + logAnalyticsWorkspaceId: + description: Specifies the ID of the Log Analytics Workspace + where the audit logs collected by Microsoft Defender should + be sent to. + type: string + type: object + monitorMetrics: + description: Specifies a Prometheus add-on profile for the Kubernetes + Cluster. A monitor_metrics block as defined below. + properties: + annotationsAllowed: + description: Specifies a comma-separated list of Kubernetes + annotation keys that will be used in the resource's labels + metric. + type: string + labelsAllowed: + description: Specifies a Comma-separated list of additional + Kubernetes label keys that will be used in the resource's + labels metric. + type: string + type: object + networkProfile: + description: A network_profile block as defined below. + properties: + dnsServiceIp: + description: IP address within the Kubernetes service address + range that will be used by cluster service discovery (kube-dns). + Changing this forces a new resource to be created. + type: string + dockerBridgeCidr: + description: IP address (in CIDR notation) used as the Docker + bridge IP address on nodes. Changing this forces a new resource + to be created. + type: string + ebpfDataPlane: + description: Specifies the eBPF data plane used for building + the Kubernetes network. Possible value is cilium. Disabling + this forces a new resource to be created. + type: string + ipVersions: + description: Specifies a list of IP versions the Kubernetes + Cluster will use to assign IP addresses to its nodes and + pods. Possible values are IPv4 and/or IPv6. IPv4 must always + be specified. Changing this forces a new resource to be + created. + items: + type: string + type: array + loadBalancerProfile: + description: A load_balancer_profile block as defined below. + This can only be specified when load_balancer_sku is set + to standard. Changing this forces a new resource to be created. + properties: + effectiveOutboundIps: + description: The outcome (resource IDs) of the specified + arguments. + items: + type: string + type: array + x-kubernetes-list-type: set + idleTimeoutInMinutes: + description: Desired outbound flow idle timeout in minutes + for the cluster load balancer. Must be between 4 and + 120 inclusive. Defaults to 4. + type: number + managedOutboundIpCount: + description: Count of desired managed outbound IPs for + the cluster load balancer. Must be between 1 and 100 + inclusive. + type: number + managedOutboundIpv6Count: + description: The desired number of IPv6 outbound IPs created + and managed by Azure for the cluster load balancer. + Must be in the range of 1 to 100 (inclusive). The default + value is 0 for single-stack and 1 for dual-stack. + type: number + outboundIpAddressIds: + description: The ID of the Public IP Addresses which should + be used for outbound communication for the cluster load + balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + outboundIpPrefixIds: + description: The ID of the outbound Public IP Address + Prefixes which should be used for the cluster load balancer. + items: + type: string + type: array + x-kubernetes-list-type: set + outboundPortsAllocated: + description: Number of desired SNAT port for each VM in + the clusters load balancer. Must be between 0 and 64000 + inclusive. Defaults to 0. + type: number + type: object + loadBalancerSku: + description: Specifies the SKU of the Load Balancer used for + this Kubernetes Cluster. Possible values are basic and standard. + Defaults to standard. Changing this forces a new resource + to be created. + type: string + natGatewayProfile: + description: A nat_gateway_profile block as defined below. + This can only be specified when load_balancer_sku is set + to standard and outbound_type is set to managedNATGateway + or userAssignedNATGateway. Changing this forces a new resource + to be created. + properties: + effectiveOutboundIps: + description: The outcome (resource IDs) of the specified + arguments. + items: + type: string + type: array + x-kubernetes-list-type: set + idleTimeoutInMinutes: + description: Desired outbound flow idle timeout in minutes + for the cluster load balancer. Must be between 4 and + 120 inclusive. Defaults to 4. + type: number + managedOutboundIpCount: + description: Count of desired managed outbound IPs for + the cluster load balancer. Must be between 1 and 100 + inclusive. + type: number + type: object + networkMode: + description: Network mode to be used with Azure CNI. Possible + values are bridge and transparent. Changing this forces + a new resource to be created. + type: string + networkPlugin: + description: Network plugin to use for networking. Currently + supported values are azure, kubenet and none. Changing this + forces a new resource to be created. + type: string + networkPluginMode: + description: Specifies the network plugin mode used for building + the Kubernetes network. Possible value is overlay. + type: string + networkPolicy: + description: Sets up network policy to be used with Azure + CNI. Network policy allows us to control the traffic flow + between pods. Currently supported values are calico, azure + and cilium. + type: string + outboundType: + description: The outbound (egress) routing method which should + be used for this Kubernetes Cluster. Possible values are + loadBalancer, userDefinedRouting, managedNATGateway and + userAssignedNATGateway. Defaults to loadBalancer. More information + on supported migration paths for outbound_type can be found + in this documentation. + type: string + podCidr: + description: The CIDR to use for pod IP addresses. This field + can only be set when network_plugin is set to kubenet. Changing + this forces a new resource to be created. + type: string + podCidrs: + description: A list of CIDRs to use for pod IP addresses. + For single-stack networking a single IPv4 CIDR is expected. + For dual-stack networking an IPv4 and IPv6 CIDR are expected. + Changing this forces a new resource to be created. + items: + type: string + type: array + serviceCidr: + description: The Network Range used by the Kubernetes service. + Changing this forces a new resource to be created. + type: string + serviceCidrs: + description: A list of CIDRs to use for Kubernetes services. + For single-stack networking a single IPv4 CIDR is expected. + For dual-stack networking an IPv4 and IPv6 CIDR are expected. + Changing this forces a new resource to be created. + items: + type: string + type: array + type: object + nodeOsChannelUpgrade: + description: The upgrade channel for this Kubernetes Cluster Nodes' + OS Image. Possible values are Unmanaged, SecurityPatch, NodeImage + and None. + type: string + nodeResourceGroup: + description: The auto-generated Resource Group which contains + the resources for this Managed Kubernetes Cluster. + type: string + nodeResourceGroupId: + description: The ID of the Resource Group containing the resources + for this Managed Kubernetes Cluster. + type: string + oidcIssuerEnabled: + description: Enable or Disable the OIDC issuer URL + type: boolean + oidcIssuerUrl: + description: The OIDC issuer URL that is associated with the cluster. + type: string + omsAgent: + description: An oms_agent block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The ID of the Log Analytics Workspace which the + OMS Agent should send data to. + type: string + msiAuthForMonitoringEnabled: + description: Is managed identity authentication for monitoring + enabled? + type: boolean + omsAgentIdentity: + description: An oms_agent_identity block is exported. The + exported attributes are defined below. + items: + properties: + clientId: + description: The Client ID of the user-defined Managed + Identity used by the OMS Agents. + type: string + objectId: + description: The Object ID of the user-defined Managed + Identity used by the OMS Agents. + type: string + userAssignedIdentityId: + description: The ID of the User Assigned Identity used + by the OMS Agents. + type: string + type: object + type: array + type: object + openServiceMeshEnabled: + description: Is Open Service Mesh enabled? For more details, please + visit Open Service Mesh for AKS. + type: boolean + portalFqdn: + description: The FQDN for the Azure Portal resources when private + link has been enabled, which is only resolvable inside the Virtual + Network used by the Kubernetes Cluster. + type: string + privateClusterEnabled: + description: Should this Kubernetes Cluster have its API server + only exposed on internal IP addresses? This provides a Private + IP Address for the Kubernetes API on the Virtual Network where + the Kubernetes Cluster is located. Defaults to false. Changing + this forces a new resource to be created. + type: boolean + privateClusterPublicFqdnEnabled: + description: Specifies whether a Public FQDN for this Private + Cluster should be added. Defaults to false. + type: boolean + privateDnsZoneId: + description: Either the ID of Private DNS Zone which should be + delegated to this Cluster, System to have AKS manage this or + None. In case of None you will need to bring your own DNS server + and set up resolving, otherwise, the cluster will have issues + after provisioning. Changing this forces a new resource to be + created. + type: string + privateFqdn: + description: The FQDN for the Kubernetes Cluster when private + link has been enabled, which is only resolvable inside the Virtual + Network used by the Kubernetes Cluster. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + Kubernetes Cluster. Defaults to true. + type: boolean + resourceGroupName: + description: Specifies the Resource Group where the Managed Kubernetes + Cluster should exist. Changing this forces a new resource to + be created. + type: string + roleBasedAccessControlEnabled: + description: Whether Role Based Access Control for the Kubernetes + Cluster should be enabled. Defaults to true. Changing this forces + a new resource to be created. + type: boolean + runCommandEnabled: + description: Whether to enable run command for the cluster or + not. Defaults to true. + type: boolean + serviceMeshProfile: + description: A service_mesh_profile block as defined below. + properties: + externalIngressGatewayEnabled: + description: Is Istio External Ingress Gateway enabled? + type: boolean + internalIngressGatewayEnabled: + description: Is Istio Internal Ingress Gateway enabled? + type: boolean + mode: + description: The mode of the service mesh. Possible value + is Istio. + type: string + type: object + servicePrincipal: + description: A service_principal block as documented below. One + of either identity or service_principal must be specified. + properties: + clientId: + description: The Client ID for the Service Principal. + type: string + type: object + skuTier: + description: The SKU Tier that should be used for this Kubernetes + Cluster. Possible values are Free, Standard (which includes + the Uptime SLA) and Premium. Defaults to Free. + type: string + storageProfile: + description: A storage_profile block as defined below. + properties: + blobDriverEnabled: + description: Is the Blob CSI driver enabled? Defaults to false. + type: boolean + diskDriverEnabled: + description: Is the Disk CSI driver enabled? Defaults to true. + type: boolean + diskDriverVersion: + description: Disk CSI Driver version to be used. Possible + values are v1 and v2. Defaults to v1. + type: string + fileDriverEnabled: + description: Is the File CSI driver enabled? Defaults to true. + type: boolean + snapshotControllerEnabled: + description: Is the Snapshot Controller enabled? Defaults + to true. + type: boolean + type: object + supportPlan: + description: Specifies the support plan which should be used for + this Kubernetes Cluster. Possible values are KubernetesOfficial + and AKSLongTermSupport. Defaults to KubernetesOfficial. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + webAppRouting: + description: A web_app_routing block as defined below. + properties: + dnsZoneId: + description: Specifies the ID of the DNS Zone in which DNS + entries are created for applications deployed to the cluster + when Web App Routing is enabled. For Bring-Your-Own DNS + zones this property should be set to an empty string "". + type: string + webAppRoutingIdentity: + description: A web_app_routing_identity block is exported. + The exported attributes are defined below. + items: + properties: + clientId: + description: The Client ID of the user-defined Managed + Identity used for Web App Routing. + type: string + objectId: + description: The Object ID of the user-defined Managed + Identity used for Web App Routing + type: string + userAssignedIdentityId: + description: The ID of the User Assigned Identity used + for Web App Routing. + type: string + type: object + type: array + type: object + windowsProfile: + description: A windows_profile block as defined below. + properties: + adminUsername: + description: The Admin Username for Windows VMs. Changing + this forces a new resource to be created. + type: string + gmsa: + description: A gmsa block as defined below. + properties: + dnsServer: + description: Specifies the DNS server for Windows gMSA. + Set this to an empty string if you have configured the + DNS server in the VNet which was used to create the + managed cluster. + type: string + rootDomain: + description: Specifies the root domain name for Windows + gMSA. Set this to an empty string if you have configured + the DNS server in the VNet which was used to create + the managed cluster. + type: string + type: object + license: + description: Specifies the type of on-premise license which + should be used for Node Pool Windows Virtual Machine. At + this time the only possible value is Windows_Server. + type: string + type: object + workloadAutoscalerProfile: + description: A workload_autoscaler_profile block defined below. + properties: + kedaEnabled: + description: Specifies whether KEDA Autoscaler can be used + for workloads. + type: boolean + verticalPodAutoscalerControlledValues: + description: Which resources values should be controlled. + type: string + verticalPodAutoscalerEnabled: + description: Specifies whether Vertical Pod Autoscaler should + be enabled. + type: boolean + verticalPodAutoscalerUpdateMode: + description: How the autoscaler applies changes to pod resources. + type: string + type: object + workloadIdentityEnabled: + description: Specifies whether Azure AD Workload Identity should + be enabled for the Cluster. Defaults to false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/containerservice.azure.upbound.io_kubernetesfleetmanagers.yaml b/package/crds/containerservice.azure.upbound.io_kubernetesfleetmanagers.yaml index 1ed2eee0c..98591529d 100644 --- a/package/crds/containerservice.azure.upbound.io_kubernetesfleetmanagers.yaml +++ b/package/crds/containerservice.azure.upbound.io_kubernetesfleetmanagers.yaml @@ -478,3 +478,457 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: KubernetesFleetManager is the Schema for the KubernetesFleetManagers + API. Manages a Kubernetes Fleet Manager. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KubernetesFleetManagerSpec defines the desired state of KubernetesFleetManager + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + hubProfile: + properties: + dnsPrefix: + type: string + type: object + location: + description: The Azure Region where the Kubernetes Fleet Manager + should exist. Changing this forces a new Kubernetes Fleet Manager + to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group within which + this Kubernetes Fleet Manager should exist. Changing this forces + a new Kubernetes Fleet Manager to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Kubernetes Fleet Manager. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + hubProfile: + properties: + dnsPrefix: + type: string + type: object + location: + description: The Azure Region where the Kubernetes Fleet Manager + should exist. Changing this forces a new Kubernetes Fleet Manager + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Kubernetes Fleet Manager. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: KubernetesFleetManagerStatus defines the observed state of + KubernetesFleetManager. + properties: + atProvider: + properties: + hubProfile: + properties: + dnsPrefix: + type: string + fqdn: + type: string + kubernetesVersion: + type: string + type: object + id: + description: The ID of the Kubernetes Fleet Manager. + type: string + location: + description: The Azure Region where the Kubernetes Fleet Manager + should exist. Changing this forces a new Kubernetes Fleet Manager + to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group within which + this Kubernetes Fleet Manager should exist. Changing this forces + a new Kubernetes Fleet Manager to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Kubernetes Fleet Manager. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_accounts.yaml b/package/crds/cosmosdb.azure.upbound.io_accounts.yaml index 99e56f287..87dd06793 100644 --- a/package/crds/cosmosdb.azure.upbound.io_accounts.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_accounts.yaml @@ -1647,3 +1647,1590 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Account is the Schema for the Accounts API. Manages a CosmosDB + (formally DocumentDB) Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessKeyMetadataWritesEnabled: + description: Is write operations on metadata resources (databases, + containers, throughput) via account keys enabled? Defaults to + true. + type: boolean + analyticalStorage: + description: An analytical_storage block as defined below. + properties: + schemaType: + description: The schema type of the Analytical Storage for + this Cosmos DB account. Possible values are FullFidelity + and WellDefined. + type: string + type: object + analyticalStorageEnabled: + description: Enable Analytical Storage option for this Cosmos + DB account. Defaults to false. Enabling and then disabling analytical + storage forces a new resource to be created. + type: boolean + backup: + description: A backup block as defined below. + properties: + intervalInMinutes: + description: The interval in minutes between two backups. + Possible values are between 60 and 1440. Defaults to 240. + type: number + retentionInHours: + description: The time in hours that each backup is retained. + Possible values are between 8 and 720. Defaults to 8. + type: number + storageRedundancy: + description: The storage redundancy is used to indicate the + type of backup residency. Possible values are Geo, Local + and Zone. Defaults to Geo. + type: string + tier: + description: The continuous backup tier. Possible values are + Continuous7Days and Continuous30Days. + type: string + type: + description: The type of the backup. Possible values are Continuous + and Periodic. + type: string + type: object + capabilities: + description: The capabilities which should be enabled for this + Cosmos DB account. Value is a capabilities block as defined + below. + items: + properties: + name: + description: The capability to enable - Possible values + are AllowSelfServeUpgradeToMongo36, DisableRateLimitingResponses, + EnableAggregationPipeline, EnableCassandra, EnableGremlin, + EnableMongo, EnableMongo16MBDocumentSupport, EnableMongoRetryableWrites, + EnableMongoRoleBasedAccessControl, EnablePartialUniqueIndex, + EnableServerless, EnableTable, EnableTtlOnCustomPath, + EnableUniqueCompoundNestedDocs, MongoDBv3.4 and mongoEnableDocLevelTTL. + type: string + type: object + type: array + capacity: + description: A capacity block as defined below. + properties: + totalThroughputLimit: + description: The total throughput limit imposed on this Cosmos + DB account (RU/s). Possible values are at least -1. -1 means + no limit. + type: number + type: object + consistencyPolicy: + description: Specifies one consistency_policy block as defined + below, used to define the consistency policy for this CosmosDB + account. + properties: + consistencyLevel: + description: The Consistency Level to use for this CosmosDB + Account - can be either BoundedStaleness, Eventual, Session, + Strong or ConsistentPrefix. + type: string + maxIntervalInSeconds: + description: When used with the Bounded Staleness consistency + level, this value represents the time amount of staleness + (in seconds) tolerated. The accepted range for this value + is 5 - 86400 (1 day). Defaults to 5. Required when consistency_level + is set to BoundedStaleness. + type: number + maxStalenessPrefix: + description: When used with the Bounded Staleness consistency + level, this value represents the number of stale requests + tolerated. The accepted range for this value is 10 – 2147483647. + Defaults to 100. Required when consistency_level is set + to BoundedStaleness. + type: number + type: object + corsRule: + description: A cors_rule block as defined below. + properties: + allowedHeaders: + description: A list of headers that are allowed to be a part + of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: A list of HTTP headers that are allowed to be + executed by the origin. Valid options are DELETE, GET, HEAD, + MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed to + CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should cache + a preflight response. Possible values are between 1 and + 2147483647. + type: number + type: object + createMode: + description: The creation mode for the CosmosDB Account. Possible + values are Default and Restore. Changing this forces a new resource + to be created. + type: string + defaultIdentityType: + description: The default identity for accessing Key Vault. Possible + values are FirstPartyIdentity, SystemAssignedIdentity or UserAssignedIdentity. + Defaults to FirstPartyIdentity. + type: string + enableAutomaticFailover: + description: Enable automatic failover for this Cosmos DB account. + type: boolean + enableFreeTier: + description: Enable the Free Tier pricing option for this Cosmos + DB account. Defaults to false. Changing this forces a new resource + to be created. + type: boolean + enableMultipleWriteLocations: + description: Enable multiple write locations for this Cosmos DB + account. + type: boolean + geoLocation: + description: Specifies a geo_location resource, used to define + where data should be replicated with the failover_priority 0 + specifying the primary location. Value is a geo_location block + as defined below. + items: + properties: + failoverPriority: + description: The failover priority of the region. A failover + priority of 0 indicates a write region. The maximum value + for a failover priority = (total number of regions - 1). + Failover priority values must be unique for each of the + regions in which the database account exists. Changing + this causes the location to be re-provisioned and cannot + be changed for the location with failover priority 0. + type: number + location: + description: The name of the Azure region to host replicated + data. + type: string + zoneRedundant: + description: Should zone redundancy be enabled for this + region? Defaults to false. + type: boolean + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Cosmos Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity assigned to this + Cosmos account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned. + type: string + type: object + ipRangeFilter: + description: 'CosmosDB Firewall Support: This value specifies + the set of IP addresses or IP address ranges in CIDR form to + be included as the allowed list of client IPs for a given database + account. IP addresses/ranges must be comma separated and must + not contain any spaces.' + type: string + isVirtualNetworkFilterEnabled: + description: Enables virtual network filtering for this Cosmos + DB account. + type: boolean + keyVaultKeyId: + description: A versionless Key Vault Key ID for CMK encryption. + Changing this forces a new resource to be created. + type: string + kind: + description: Specifies the Kind of CosmosDB to create - possible + values are GlobalDocumentDB, MongoDB and Parse. Defaults to + GlobalDocumentDB. Changing this forces a new resource to be + created. + type: string + localAuthenticationDisabled: + description: Disable local authentication and ensure only MSI + and AAD can be used exclusively for authentication. Defaults + to false. Can be set only when using the SQL API. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimalTlsVersion: + description: 'Specifies the minimal TLS version for the CosmosDB + account. Possible values are: Tls, Tls11, and Tls12. Defaults + to Tls12.' + type: string + mongoServerVersion: + description: The Server Version of a MongoDB account. Possible + values are 4.2, 4.0, 3.6, and 3.2. + type: string + networkAclBypassForAzureServices: + description: If Azure services can bypass ACLs. Defaults to false. + type: boolean + networkAclBypassIds: + description: The list of resource Ids for Network Acl Bypass for + this Cosmos DB account. + items: + type: string + type: array + offerType: + description: Specifies the Offer Type to use for this CosmosDB + Account; currently, this can only be set to Standard. + type: string + partitionMergeEnabled: + description: Is partition merge on the Cosmos DB account enabled? + Defaults to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this CosmosDB account. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which the CosmosDB + Account is created. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restore: + description: A restore block as defined below. + properties: + database: + description: A database block as defined below. Changing this + forces a new resource to be created. + items: + properties: + collectionNames: + description: A list of the collection names for the + restore request. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: Specifies the name of the CosmosDB Account. + Changing this forces a new resource to be created. + type: string + type: object + type: array + gremlinDatabase: + description: One or more gremlin_database blocks as defined + below. Changing this forces a new resource to be created. + items: + properties: + graphNames: + description: A list of the Graph names for the restore + request. Changing this forces a new resource to be + created. + items: + type: string + type: array + name: + description: Specifies the name of the CosmosDB Account. + Changing this forces a new resource to be created. + type: string + type: object + type: array + restoreTimestampInUtc: + description: The creation time of the database or the collection + (Datetime Format RFC 3339). Changing this forces a new resource + to be created. + type: string + sourceCosmosdbAccountId: + description: The resource ID of the restorable database account + from which the restore has to be initiated. The example + is /subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/locations/{location}/restorableDatabaseAccounts/{restorableDatabaseAccountName}. + Changing this forces a new resource to be created. + type: string + sourceCosmosdbAccountIdRef: + description: Reference to a Account in cosmosdb to populate + sourceCosmosdbAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceCosmosdbAccountIdSelector: + description: Selector for a Account in cosmosdb to populate + sourceCosmosdbAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tablesToRestore: + description: A list of specific tables available for restore. + Changing this forces a new resource to be created. + items: + type: string + type: array + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + virtualNetworkRule: + description: Specifies a virtual_network_rule block as defined + below, used to define which subnets are allowed to access this + CosmosDB account. + items: + properties: + id: + description: The ID of the virtual network subnet. + type: string + ignoreMissingVnetServiceEndpoint: + description: If set to true, the specified subnet will be + added as a virtual network rule even if its CosmosDB service + endpoint is not active. Defaults to false. + type: boolean + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessKeyMetadataWritesEnabled: + description: Is write operations on metadata resources (databases, + containers, throughput) via account keys enabled? Defaults to + true. + type: boolean + analyticalStorage: + description: An analytical_storage block as defined below. + properties: + schemaType: + description: The schema type of the Analytical Storage for + this Cosmos DB account. Possible values are FullFidelity + and WellDefined. + type: string + type: object + analyticalStorageEnabled: + description: Enable Analytical Storage option for this Cosmos + DB account. Defaults to false. Enabling and then disabling analytical + storage forces a new resource to be created. + type: boolean + backup: + description: A backup block as defined below. + properties: + intervalInMinutes: + description: The interval in minutes between two backups. + Possible values are between 60 and 1440. Defaults to 240. + type: number + retentionInHours: + description: The time in hours that each backup is retained. + Possible values are between 8 and 720. Defaults to 8. + type: number + storageRedundancy: + description: The storage redundancy is used to indicate the + type of backup residency. Possible values are Geo, Local + and Zone. Defaults to Geo. + type: string + tier: + description: The continuous backup tier. Possible values are + Continuous7Days and Continuous30Days. + type: string + type: + description: The type of the backup. Possible values are Continuous + and Periodic. + type: string + type: object + capabilities: + description: The capabilities which should be enabled for this + Cosmos DB account. Value is a capabilities block as defined + below. + items: + properties: + name: + description: The capability to enable - Possible values + are AllowSelfServeUpgradeToMongo36, DisableRateLimitingResponses, + EnableAggregationPipeline, EnableCassandra, EnableGremlin, + EnableMongo, EnableMongo16MBDocumentSupport, EnableMongoRetryableWrites, + EnableMongoRoleBasedAccessControl, EnablePartialUniqueIndex, + EnableServerless, EnableTable, EnableTtlOnCustomPath, + EnableUniqueCompoundNestedDocs, MongoDBv3.4 and mongoEnableDocLevelTTL. + type: string + type: object + type: array + capacity: + description: A capacity block as defined below. + properties: + totalThroughputLimit: + description: The total throughput limit imposed on this Cosmos + DB account (RU/s). Possible values are at least -1. -1 means + no limit. + type: number + type: object + consistencyPolicy: + description: Specifies one consistency_policy block as defined + below, used to define the consistency policy for this CosmosDB + account. + properties: + consistencyLevel: + description: The Consistency Level to use for this CosmosDB + Account - can be either BoundedStaleness, Eventual, Session, + Strong or ConsistentPrefix. + type: string + maxIntervalInSeconds: + description: When used with the Bounded Staleness consistency + level, this value represents the time amount of staleness + (in seconds) tolerated. The accepted range for this value + is 5 - 86400 (1 day). Defaults to 5. Required when consistency_level + is set to BoundedStaleness. + type: number + maxStalenessPrefix: + description: When used with the Bounded Staleness consistency + level, this value represents the number of stale requests + tolerated. The accepted range for this value is 10 – 2147483647. + Defaults to 100. Required when consistency_level is set + to BoundedStaleness. + type: number + type: object + corsRule: + description: A cors_rule block as defined below. + properties: + allowedHeaders: + description: A list of headers that are allowed to be a part + of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: A list of HTTP headers that are allowed to be + executed by the origin. Valid options are DELETE, GET, HEAD, + MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed to + CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should cache + a preflight response. Possible values are between 1 and + 2147483647. + type: number + type: object + createMode: + description: The creation mode for the CosmosDB Account. Possible + values are Default and Restore. Changing this forces a new resource + to be created. + type: string + defaultIdentityType: + description: The default identity for accessing Key Vault. Possible + values are FirstPartyIdentity, SystemAssignedIdentity or UserAssignedIdentity. + Defaults to FirstPartyIdentity. + type: string + enableAutomaticFailover: + description: Enable automatic failover for this Cosmos DB account. + type: boolean + enableFreeTier: + description: Enable the Free Tier pricing option for this Cosmos + DB account. Defaults to false. Changing this forces a new resource + to be created. + type: boolean + enableMultipleWriteLocations: + description: Enable multiple write locations for this Cosmos DB + account. + type: boolean + geoLocation: + description: Specifies a geo_location resource, used to define + where data should be replicated with the failover_priority 0 + specifying the primary location. Value is a geo_location block + as defined below. + items: + properties: + failoverPriority: + description: The failover priority of the region. A failover + priority of 0 indicates a write region. The maximum value + for a failover priority = (total number of regions - 1). + Failover priority values must be unique for each of the + regions in which the database account exists. Changing + this causes the location to be re-provisioned and cannot + be changed for the location with failover priority 0. + type: number + location: + description: The name of the Azure region to host replicated + data. + type: string + zoneRedundant: + description: Should zone redundancy be enabled for this + region? Defaults to false. + type: boolean + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Cosmos Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity assigned to this + Cosmos account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned. + type: string + type: object + ipRangeFilter: + description: 'CosmosDB Firewall Support: This value specifies + the set of IP addresses or IP address ranges in CIDR form to + be included as the allowed list of client IPs for a given database + account. IP addresses/ranges must be comma separated and must + not contain any spaces.' + type: string + isVirtualNetworkFilterEnabled: + description: Enables virtual network filtering for this Cosmos + DB account. + type: boolean + keyVaultKeyId: + description: A versionless Key Vault Key ID for CMK encryption. + Changing this forces a new resource to be created. + type: string + kind: + description: Specifies the Kind of CosmosDB to create - possible + values are GlobalDocumentDB, MongoDB and Parse. Defaults to + GlobalDocumentDB. Changing this forces a new resource to be + created. + type: string + localAuthenticationDisabled: + description: Disable local authentication and ensure only MSI + and AAD can be used exclusively for authentication. Defaults + to false. Can be set only when using the SQL API. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimalTlsVersion: + description: 'Specifies the minimal TLS version for the CosmosDB + account. Possible values are: Tls, Tls11, and Tls12. Defaults + to Tls12.' + type: string + mongoServerVersion: + description: The Server Version of a MongoDB account. Possible + values are 4.2, 4.0, 3.6, and 3.2. + type: string + networkAclBypassForAzureServices: + description: If Azure services can bypass ACLs. Defaults to false. + type: boolean + networkAclBypassIds: + description: The list of resource Ids for Network Acl Bypass for + this Cosmos DB account. + items: + type: string + type: array + offerType: + description: Specifies the Offer Type to use for this CosmosDB + Account; currently, this can only be set to Standard. + type: string + partitionMergeEnabled: + description: Is partition merge on the Cosmos DB account enabled? + Defaults to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this CosmosDB account. Defaults to true. + type: boolean + restore: + description: A restore block as defined below. + properties: + database: + description: A database block as defined below. Changing this + forces a new resource to be created. + items: + properties: + collectionNames: + description: A list of the collection names for the + restore request. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: Specifies the name of the CosmosDB Account. + Changing this forces a new resource to be created. + type: string + type: object + type: array + gremlinDatabase: + description: One or more gremlin_database blocks as defined + below. Changing this forces a new resource to be created. + items: + properties: + graphNames: + description: A list of the Graph names for the restore + request. Changing this forces a new resource to be + created. + items: + type: string + type: array + name: + description: Specifies the name of the CosmosDB Account. + Changing this forces a new resource to be created. + type: string + type: object + type: array + restoreTimestampInUtc: + description: The creation time of the database or the collection + (Datetime Format RFC 3339). Changing this forces a new resource + to be created. + type: string + sourceCosmosdbAccountId: + description: The resource ID of the restorable database account + from which the restore has to be initiated. The example + is /subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/locations/{location}/restorableDatabaseAccounts/{restorableDatabaseAccountName}. + Changing this forces a new resource to be created. + type: string + sourceCosmosdbAccountIdRef: + description: Reference to a Account in cosmosdb to populate + sourceCosmosdbAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceCosmosdbAccountIdSelector: + description: Selector for a Account in cosmosdb to populate + sourceCosmosdbAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tablesToRestore: + description: A list of specific tables available for restore. + Changing this forces a new resource to be created. + items: + type: string + type: array + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + virtualNetworkRule: + description: Specifies a virtual_network_rule block as defined + below, used to define which subnets are allowed to access this + CosmosDB account. + items: + properties: + id: + description: The ID of the virtual network subnet. + type: string + ignoreMissingVnetServiceEndpoint: + description: If set to true, the specified subnet will be + added as a virtual network rule even if its CosmosDB service + endpoint is not active. Defaults to false. + type: boolean + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.consistencyPolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.consistencyPolicy) + || (has(self.initProvider) && has(self.initProvider.consistencyPolicy))' + - message: spec.forProvider.geoLocation is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.geoLocation) + || (has(self.initProvider) && has(self.initProvider.geoLocation))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.offerType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.offerType) + || (has(self.initProvider) && has(self.initProvider.offerType))' + status: + description: AccountStatus defines the observed state of Account. + properties: + atProvider: + properties: + accessKeyMetadataWritesEnabled: + description: Is write operations on metadata resources (databases, + containers, throughput) via account keys enabled? Defaults to + true. + type: boolean + analyticalStorage: + description: An analytical_storage block as defined below. + properties: + schemaType: + description: The schema type of the Analytical Storage for + this Cosmos DB account. Possible values are FullFidelity + and WellDefined. + type: string + type: object + analyticalStorageEnabled: + description: Enable Analytical Storage option for this Cosmos + DB account. Defaults to false. Enabling and then disabling analytical + storage forces a new resource to be created. + type: boolean + backup: + description: A backup block as defined below. + properties: + intervalInMinutes: + description: The interval in minutes between two backups. + Possible values are between 60 and 1440. Defaults to 240. + type: number + retentionInHours: + description: The time in hours that each backup is retained. + Possible values are between 8 and 720. Defaults to 8. + type: number + storageRedundancy: + description: The storage redundancy is used to indicate the + type of backup residency. Possible values are Geo, Local + and Zone. Defaults to Geo. + type: string + tier: + description: The continuous backup tier. Possible values are + Continuous7Days and Continuous30Days. + type: string + type: + description: The type of the backup. Possible values are Continuous + and Periodic. + type: string + type: object + capabilities: + description: The capabilities which should be enabled for this + Cosmos DB account. Value is a capabilities block as defined + below. + items: + properties: + name: + description: The capability to enable - Possible values + are AllowSelfServeUpgradeToMongo36, DisableRateLimitingResponses, + EnableAggregationPipeline, EnableCassandra, EnableGremlin, + EnableMongo, EnableMongo16MBDocumentSupport, EnableMongoRetryableWrites, + EnableMongoRoleBasedAccessControl, EnablePartialUniqueIndex, + EnableServerless, EnableTable, EnableTtlOnCustomPath, + EnableUniqueCompoundNestedDocs, MongoDBv3.4 and mongoEnableDocLevelTTL. + type: string + type: object + type: array + capacity: + description: A capacity block as defined below. + properties: + totalThroughputLimit: + description: The total throughput limit imposed on this Cosmos + DB account (RU/s). Possible values are at least -1. -1 means + no limit. + type: number + type: object + consistencyPolicy: + description: Specifies one consistency_policy block as defined + below, used to define the consistency policy for this CosmosDB + account. + properties: + consistencyLevel: + description: The Consistency Level to use for this CosmosDB + Account - can be either BoundedStaleness, Eventual, Session, + Strong or ConsistentPrefix. + type: string + maxIntervalInSeconds: + description: When used with the Bounded Staleness consistency + level, this value represents the time amount of staleness + (in seconds) tolerated. The accepted range for this value + is 5 - 86400 (1 day). Defaults to 5. Required when consistency_level + is set to BoundedStaleness. + type: number + maxStalenessPrefix: + description: When used with the Bounded Staleness consistency + level, this value represents the number of stale requests + tolerated. The accepted range for this value is 10 – 2147483647. + Defaults to 100. Required when consistency_level is set + to BoundedStaleness. + type: number + type: object + corsRule: + description: A cors_rule block as defined below. + properties: + allowedHeaders: + description: A list of headers that are allowed to be a part + of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: A list of HTTP headers that are allowed to be + executed by the origin. Valid options are DELETE, GET, HEAD, + MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed to + CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should cache + a preflight response. Possible values are between 1 and + 2147483647. + type: number + type: object + createMode: + description: The creation mode for the CosmosDB Account. Possible + values are Default and Restore. Changing this forces a new resource + to be created. + type: string + defaultIdentityType: + description: The default identity for accessing Key Vault. Possible + values are FirstPartyIdentity, SystemAssignedIdentity or UserAssignedIdentity. + Defaults to FirstPartyIdentity. + type: string + enableAutomaticFailover: + description: Enable automatic failover for this Cosmos DB account. + type: boolean + enableFreeTier: + description: Enable the Free Tier pricing option for this Cosmos + DB account. Defaults to false. Changing this forces a new resource + to be created. + type: boolean + enableMultipleWriteLocations: + description: Enable multiple write locations for this Cosmos DB + account. + type: boolean + endpoint: + description: The endpoint used to connect to the CosmosDB account. + type: string + geoLocation: + description: Specifies a geo_location resource, used to define + where data should be replicated with the failover_priority 0 + specifying the primary location. Value is a geo_location block + as defined below. + items: + properties: + failoverPriority: + description: The failover priority of the region. A failover + priority of 0 indicates a write region. The maximum value + for a failover priority = (total number of regions - 1). + Failover priority values must be unique for each of the + regions in which the database account exists. Changing + this causes the location to be re-provisioned and cannot + be changed for the location with failover priority 0. + type: number + id: + description: The ID of the virtual network subnet. + type: string + location: + description: The name of the Azure region to host replicated + data. + type: string + zoneRedundant: + description: Should zone redundancy be enabled for this + region? Defaults to false. + type: boolean + type: object + type: array + id: + description: The CosmosDB Account ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Cosmos Account. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: The Type of Managed Identity assigned to this + Cosmos account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned. + type: string + type: object + ipRangeFilter: + description: 'CosmosDB Firewall Support: This value specifies + the set of IP addresses or IP address ranges in CIDR form to + be included as the allowed list of client IPs for a given database + account. IP addresses/ranges must be comma separated and must + not contain any spaces.' + type: string + isVirtualNetworkFilterEnabled: + description: Enables virtual network filtering for this Cosmos + DB account. + type: boolean + keyVaultKeyId: + description: A versionless Key Vault Key ID for CMK encryption. + Changing this forces a new resource to be created. + type: string + kind: + description: Specifies the Kind of CosmosDB to create - possible + values are GlobalDocumentDB, MongoDB and Parse. Defaults to + GlobalDocumentDB. Changing this forces a new resource to be + created. + type: string + localAuthenticationDisabled: + description: Disable local authentication and ensure only MSI + and AAD can be used exclusively for authentication. Defaults + to false. Can be set only when using the SQL API. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimalTlsVersion: + description: 'Specifies the minimal TLS version for the CosmosDB + account. Possible values are: Tls, Tls11, and Tls12. Defaults + to Tls12.' + type: string + mongoServerVersion: + description: The Server Version of a MongoDB account. Possible + values are 4.2, 4.0, 3.6, and 3.2. + type: string + networkAclBypassForAzureServices: + description: If Azure services can bypass ACLs. Defaults to false. + type: boolean + networkAclBypassIds: + description: The list of resource Ids for Network Acl Bypass for + this Cosmos DB account. + items: + type: string + type: array + offerType: + description: Specifies the Offer Type to use for this CosmosDB + Account; currently, this can only be set to Standard. + type: string + partitionMergeEnabled: + description: Is partition merge on the Cosmos DB account enabled? + Defaults to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this CosmosDB account. Defaults to true. + type: boolean + readEndpoints: + description: A list of read endpoints available for this CosmosDB + account. + items: + type: string + type: array + resourceGroupName: + description: The name of the resource group in which the CosmosDB + Account is created. Changing this forces a new resource to be + created. + type: string + restore: + description: A restore block as defined below. + properties: + database: + description: A database block as defined below. Changing this + forces a new resource to be created. + items: + properties: + collectionNames: + description: A list of the collection names for the + restore request. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: Specifies the name of the CosmosDB Account. + Changing this forces a new resource to be created. + type: string + type: object + type: array + gremlinDatabase: + description: One or more gremlin_database blocks as defined + below. Changing this forces a new resource to be created. + items: + properties: + graphNames: + description: A list of the Graph names for the restore + request. Changing this forces a new resource to be + created. + items: + type: string + type: array + name: + description: Specifies the name of the CosmosDB Account. + Changing this forces a new resource to be created. + type: string + type: object + type: array + restoreTimestampInUtc: + description: The creation time of the database or the collection + (Datetime Format RFC 3339). Changing this forces a new resource + to be created. + type: string + sourceCosmosdbAccountId: + description: The resource ID of the restorable database account + from which the restore has to be initiated. The example + is /subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/locations/{location}/restorableDatabaseAccounts/{restorableDatabaseAccountName}. + Changing this forces a new resource to be created. + type: string + tablesToRestore: + description: A list of specific tables available for restore. + Changing this forces a new resource to be created. + items: + type: string + type: array + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + virtualNetworkRule: + description: Specifies a virtual_network_rule block as defined + below, used to define which subnets are allowed to access this + CosmosDB account. + items: + properties: + id: + description: The ID of the virtual network subnet. + type: string + ignoreMissingVnetServiceEndpoint: + description: If set to true, the specified subnet will be + added as a virtual network rule even if its CosmosDB service + endpoint is not active. Defaults to false. + type: boolean + type: object + type: array + writeEndpoints: + description: A list of write endpoints available for this CosmosDB + account. + items: + type: string + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_cassandraclusters.yaml b/package/crds/cosmosdb.azure.upbound.io_cassandraclusters.yaml index 7b83c2080..153a73c1f 100644 --- a/package/crds/cosmosdb.azure.upbound.io_cassandraclusters.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_cassandraclusters.yaml @@ -781,3 +781,760 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CassandraCluster is the Schema for the CassandraClusters API. + Manages a Cassandra Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CassandraClusterSpec defines the desired state of CassandraCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationMethod: + description: The authentication method that is used to authenticate + clients. Possible values are None and Cassandra. Defaults to + Cassandra. + type: string + clientCertificatePems: + description: A list of TLS certificates that is used to authorize + client connecting to the Cassandra Cluster. + items: + type: string + type: array + defaultAdminPasswordSecretRef: + description: The initial admin password for this Cassandra Cluster. + Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + delegatedManagementSubnetId: + description: The ID of the delegated management subnet for this + Cassandra Cluster. Changing this forces a new Cassandra Cluster + to be created. + type: string + delegatedManagementSubnetIdRef: + description: Reference to a Subnet in network to populate delegatedManagementSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + delegatedManagementSubnetIdSelector: + description: Selector for a Subnet in network to populate delegatedManagementSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + externalGossipCertificatePems: + description: A list of TLS certificates that is used to authorize + gossip from unmanaged Cassandra Data Center. + items: + type: string + type: array + externalSeedNodeIpAddresses: + description: A list of IP Addresses of the seed nodes in unmanaged + the Cassandra Data Center which will be added to the seed node + lists of all managed nodes. + items: + type: string + type: array + hoursBetweenBackups: + description: The number of hours to wait between taking a backup + of the Cassandra Cluster. Defaults to 24. + type: number + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Cassandra Cluster. The + only possible value is SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Cassandra Cluster should + exist. Changing this forces a new Cassandra Cluster to be created. + type: string + repairEnabled: + description: Is the automatic repair enabled on the Cassandra + Cluster? Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Cassandra + Cluster should exist. Changing this forces a new Cassandra Cluster + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The version of Cassandra what the Cluster converges + to run. Possible values are 3.11 and 4.0. Defaults to 3.11. + Changing this forces a new Cassandra Cluster to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationMethod: + description: The authentication method that is used to authenticate + clients. Possible values are None and Cassandra. Defaults to + Cassandra. + type: string + clientCertificatePems: + description: A list of TLS certificates that is used to authorize + client connecting to the Cassandra Cluster. + items: + type: string + type: array + delegatedManagementSubnetId: + description: The ID of the delegated management subnet for this + Cassandra Cluster. Changing this forces a new Cassandra Cluster + to be created. + type: string + delegatedManagementSubnetIdRef: + description: Reference to a Subnet in network to populate delegatedManagementSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + delegatedManagementSubnetIdSelector: + description: Selector for a Subnet in network to populate delegatedManagementSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + externalGossipCertificatePems: + description: A list of TLS certificates that is used to authorize + gossip from unmanaged Cassandra Data Center. + items: + type: string + type: array + externalSeedNodeIpAddresses: + description: A list of IP Addresses of the seed nodes in unmanaged + the Cassandra Data Center which will be added to the seed node + lists of all managed nodes. + items: + type: string + type: array + hoursBetweenBackups: + description: The number of hours to wait between taking a backup + of the Cassandra Cluster. Defaults to 24. + type: number + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Cassandra Cluster. The + only possible value is SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Cassandra Cluster should + exist. Changing this forces a new Cassandra Cluster to be created. + type: string + repairEnabled: + description: Is the automatic repair enabled on the Cassandra + Cluster? Defaults to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The version of Cassandra what the Cluster converges + to run. Possible values are 3.11 and 4.0. Defaults to 3.11. + Changing this forces a new Cassandra Cluster to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.defaultAdminPasswordSecretRef is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultAdminPasswordSecretRef)' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: CassandraClusterStatus defines the observed state of CassandraCluster. + properties: + atProvider: + properties: + authenticationMethod: + description: The authentication method that is used to authenticate + clients. Possible values are None and Cassandra. Defaults to + Cassandra. + type: string + clientCertificatePems: + description: A list of TLS certificates that is used to authorize + client connecting to the Cassandra Cluster. + items: + type: string + type: array + delegatedManagementSubnetId: + description: The ID of the delegated management subnet for this + Cassandra Cluster. Changing this forces a new Cassandra Cluster + to be created. + type: string + externalGossipCertificatePems: + description: A list of TLS certificates that is used to authorize + gossip from unmanaged Cassandra Data Center. + items: + type: string + type: array + externalSeedNodeIpAddresses: + description: A list of IP Addresses of the seed nodes in unmanaged + the Cassandra Data Center which will be added to the seed node + lists of all managed nodes. + items: + type: string + type: array + hoursBetweenBackups: + description: The number of hours to wait between taking a backup + of the Cassandra Cluster. Defaults to 24. + type: number + id: + description: The ID of the Cassandra Cluster. + type: string + identity: + description: An identity block as defined below. + properties: + principalId: + description: The ID of the Cassandra Cluster. + type: string + tenantId: + description: The ID of the Cassandra Cluster. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Cassandra Cluster. The + only possible value is SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Cassandra Cluster should + exist. Changing this forces a new Cassandra Cluster to be created. + type: string + repairEnabled: + description: Is the automatic repair enabled on the Cassandra + Cluster? Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Cassandra + Cluster should exist. Changing this forces a new Cassandra Cluster + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The version of Cassandra what the Cluster converges + to run. Possible values are 3.11 and 4.0. Defaults to 3.11. + Changing this forces a new Cassandra Cluster to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_cassandrakeyspaces.yaml b/package/crds/cosmosdb.azure.upbound.io_cassandrakeyspaces.yaml index d8dbb07d4..ff92702aa 100644 --- a/package/crds/cosmosdb.azure.upbound.io_cassandrakeyspaces.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_cassandrakeyspaces.yaml @@ -540,3 +540,519 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CassandraKeySpace is the Schema for the CassandraKeySpaces API. + Manages a Cassandra KeySpace within a Cosmos DB Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CassandraKeySpaceSpec defines the desired state of CassandraKeySpace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the Cosmos DB Cassandra KeySpace to create + the table within. Changing this forces a new resource to be + created. + type: string + accountNameRef: + description: Reference to a Account in cosmosdb to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in cosmosdb to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Cassandra KeySpace + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Cassandra KeySpace is created. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throughput: + description: The throughput of Cassandra KeySpace (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Cassandra KeySpace + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + throughput: + description: The throughput of Cassandra KeySpace (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: CassandraKeySpaceStatus defines the observed state of CassandraKeySpace. + properties: + atProvider: + properties: + accountName: + description: The name of the Cosmos DB Cassandra KeySpace to create + the table within. Changing this forces a new resource to be + created. + type: string + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Cassandra KeySpace + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + id: + description: the ID of the CosmosDB Cassandra KeySpace. + type: string + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Cassandra KeySpace is created. Changing this forces a new + resource to be created. + type: string + throughput: + description: The throughput of Cassandra KeySpace (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_cassandratables.yaml b/package/crds/cosmosdb.azure.upbound.io_cassandratables.yaml index 8722d070c..1aedcc1bf 100644 --- a/package/crds/cosmosdb.azure.upbound.io_cassandratables.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_cassandratables.yaml @@ -614,3 +614,584 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CassandraTable is the Schema for the CassandraTables API. Manages + a Cassandra Table within a Cosmos DB Cassandra Keyspace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CassandraTableSpec defines the desired state of CassandraTable + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + analyticalStorageTtl: + description: Time to live of the Analytical Storage. Possible + values are between -1 and 2147483647 except 0. -1 means the + Analytical Storage never expires. Changing this forces a new + resource to be created. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Cassandra Table + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + cassandraKeyspaceId: + description: The ID of the Cosmos DB Cassandra Keyspace to create + the table within. Changing this forces a new resource to be + created. + type: string + cassandraKeyspaceIdRef: + description: Reference to a CassandraKeySpace in cosmosdb to populate + cassandraKeyspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + cassandraKeyspaceIdSelector: + description: Selector for a CassandraKeySpace in cosmosdb to populate + cassandraKeyspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultTtl: + description: Time to live of the Cosmos DB Cassandra table. Possible + values are at least -1. -1 means the Cassandra table never expires. + type: number + schema: + description: A schema block as defined below. + properties: + clusterKey: + description: One or more cluster_key blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + orderBy: + description: Order of the key. Currently supported values + are Asc and Desc. + type: string + type: object + type: array + column: + description: One or more column blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + type: + description: Type of the column to be created. + type: string + type: object + type: array + partitionKey: + description: One or more partition_key blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + type: object + type: array + type: object + throughput: + description: The throughput of Cassandra KeySpace (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + analyticalStorageTtl: + description: Time to live of the Analytical Storage. Possible + values are between -1 and 2147483647 except 0. -1 means the + Analytical Storage never expires. Changing this forces a new + resource to be created. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Cassandra Table + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + defaultTtl: + description: Time to live of the Cosmos DB Cassandra table. Possible + values are at least -1. -1 means the Cassandra table never expires. + type: number + schema: + description: A schema block as defined below. + properties: + clusterKey: + description: One or more cluster_key blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + orderBy: + description: Order of the key. Currently supported values + are Asc and Desc. + type: string + type: object + type: array + column: + description: One or more column blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + type: + description: Type of the column to be created. + type: string + type: object + type: array + partitionKey: + description: One or more partition_key blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + type: object + type: array + type: object + throughput: + description: The throughput of Cassandra KeySpace (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.schema is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.schema) + || (has(self.initProvider) && has(self.initProvider.schema))' + status: + description: CassandraTableStatus defines the observed state of CassandraTable. + properties: + atProvider: + properties: + analyticalStorageTtl: + description: Time to live of the Analytical Storage. Possible + values are between -1 and 2147483647 except 0. -1 means the + Analytical Storage never expires. Changing this forces a new + resource to be created. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Cassandra Table + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + cassandraKeyspaceId: + description: The ID of the Cosmos DB Cassandra Keyspace to create + the table within. Changing this forces a new resource to be + created. + type: string + defaultTtl: + description: Time to live of the Cosmos DB Cassandra table. Possible + values are at least -1. -1 means the Cassandra table never expires. + type: number + id: + description: the ID of the CosmosDB Cassandra Table. + type: string + schema: + description: A schema block as defined below. + properties: + clusterKey: + description: One or more cluster_key blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + orderBy: + description: Order of the key. Currently supported values + are Asc and Desc. + type: string + type: object + type: array + column: + description: One or more column blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + type: + description: Type of the column to be created. + type: string + type: object + type: array + partitionKey: + description: One or more partition_key blocks as defined below. + items: + properties: + name: + description: Name of the column to be created. + type: string + type: object + type: array + type: object + throughput: + description: The throughput of Cassandra KeySpace (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_gremlindatabases.yaml b/package/crds/cosmosdb.azure.upbound.io_gremlindatabases.yaml index 38a920787..3a4d5f52c 100644 --- a/package/crds/cosmosdb.azure.upbound.io_gremlindatabases.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_gremlindatabases.yaml @@ -538,3 +538,517 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GremlinDatabase is the Schema for the GremlinDatabases API. Manages + a Gremlin Database within a Cosmos DB Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GremlinDatabaseSpec defines the desired state of GremlinDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the CosmosDB Account to create the Gremlin + Database within. Changing this forces a new resource to be created. + type: string + accountNameRef: + description: Reference to a Account in cosmosdb to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in cosmosdb to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Gremlin database + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Gremlin Database is created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throughput: + description: The throughput of the Gremlin database (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Gremlin database + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + throughput: + description: The throughput of the Gremlin database (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: GremlinDatabaseStatus defines the observed state of GremlinDatabase. + properties: + atProvider: + properties: + accountName: + description: The name of the CosmosDB Account to create the Gremlin + Database within. Changing this forces a new resource to be created. + type: string + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Gremlin database + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + id: + description: The ID of the CosmosDB Gremlin Database. + type: string + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Gremlin Database is created. Changing this forces a new resource + to be created. + type: string + throughput: + description: The throughput of the Gremlin database (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_gremlingraphs.yaml b/package/crds/cosmosdb.azure.upbound.io_gremlingraphs.yaml index 631b7ea49..ec89676c2 100644 --- a/package/crds/cosmosdb.azure.upbound.io_gremlingraphs.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_gremlingraphs.yaml @@ -991,3 +991,952 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GremlinGraph is the Schema for the GremlinGraphs API. Manages + a Gremlin Graph within a Cosmos DB Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GremlinGraphSpec defines the desired state of GremlinGraph + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the CosmosDB Account to create the Gremlin + Graph within. Changing this forces a new resource to be created. + type: string + accountNameRef: + description: Reference to a Account in cosmosdb to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in cosmosdb to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + analyticalStorageTtl: + description: The time to live of Analytical Storage for this Cosmos + DB Gremlin Graph. Possible values are between -1 to 2147483647 + not including 0. If present and the value is set to -1, it means + never expire. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. Requires + partition_key_path to be set. + properties: + maxThroughput: + description: The maximum throughput of the Gremlin graph (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + conflictResolutionPolicy: + description: A conflict_resolution_policy blocks as defined below. + Changing this forces a new resource to be created. + properties: + conflictResolutionPath: + description: The conflict resolution path in the case of LastWriterWins + mode. + type: string + conflictResolutionProcedure: + description: The procedure to resolve conflicts in the case + of custom mode. + type: string + mode: + description: 'Indicates the conflict resolution mode. Possible + values include: LastWriterWins, Custom.' + type: string + type: object + databaseName: + description: The name of the Cosmos DB Graph Database in which + the Cosmos DB Gremlin Graph is created. Changing this forces + a new resource to be created. + type: string + databaseNameRef: + description: Reference to a GremlinDatabase in cosmosdb to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a GremlinDatabase in cosmosdb to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultTtl: + description: The default time to live (TTL) of the Gremlin graph. + If the value is missing or set to "-1", items don’t expire. + type: number + indexPolicy: + description: The configuration of the indexing policy. One or + more index_policy blocks as defined below. + properties: + automatic: + description: Indicates if the indexing policy is automatic. + Defaults to true. + type: boolean + compositeIndex: + description: One or more composite_index blocks as defined + below. + items: + properties: + index: + description: One or more index blocks as defined below. + items: + properties: + order: + description: Order of the index. Possible values + are Ascending or Descending. + type: string + path: + description: Path for which the indexing behaviour + applies to. According to the service design, + all spatial types including LineString, MultiPolygon, + Point, and Polygon will be applied to the path. + type: string + type: object + type: array + type: object + type: array + excludedPaths: + description: List of paths to exclude from indexing. Required + if indexing_mode is Consistent or Lazy. + items: + type: string + type: array + x-kubernetes-list-type: set + includedPaths: + description: List of paths to include in the indexing. Required + if indexing_mode is Consistent or Lazy. + items: + type: string + type: array + x-kubernetes-list-type: set + indexingMode: + description: 'Indicates the indexing mode. Possible values + include: Consistent, Lazy, None.' + type: string + spatialIndex: + description: One or more spatial_index blocks as defined below. + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + type: object + partitionKeyPath: + description: Define a partition key. Changing this forces a new + resource to be created. + type: string + partitionKeyVersion: + description: Define a partition key version. Changing this forces + a new resource to be created. Possible values are 1and 2. This + should be set to 2 in order to use large partition keys. + type: number + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Gremlin Graph is created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throughput: + description: The throughput of the Gremlin graph (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + uniqueKey: + description: One or more unique_key blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + paths: + description: A list of paths to use for this unique key. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + analyticalStorageTtl: + description: The time to live of Analytical Storage for this Cosmos + DB Gremlin Graph. Possible values are between -1 to 2147483647 + not including 0. If present and the value is set to -1, it means + never expire. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. Requires + partition_key_path to be set. + properties: + maxThroughput: + description: The maximum throughput of the Gremlin graph (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + conflictResolutionPolicy: + description: A conflict_resolution_policy blocks as defined below. + Changing this forces a new resource to be created. + properties: + conflictResolutionPath: + description: The conflict resolution path in the case of LastWriterWins + mode. + type: string + conflictResolutionProcedure: + description: The procedure to resolve conflicts in the case + of custom mode. + type: string + mode: + description: 'Indicates the conflict resolution mode. Possible + values include: LastWriterWins, Custom.' + type: string + type: object + defaultTtl: + description: The default time to live (TTL) of the Gremlin graph. + If the value is missing or set to "-1", items don’t expire. + type: number + indexPolicy: + description: The configuration of the indexing policy. One or + more index_policy blocks as defined below. + properties: + automatic: + description: Indicates if the indexing policy is automatic. + Defaults to true. + type: boolean + compositeIndex: + description: One or more composite_index blocks as defined + below. + items: + properties: + index: + description: One or more index blocks as defined below. + items: + properties: + order: + description: Order of the index. Possible values + are Ascending or Descending. + type: string + path: + description: Path for which the indexing behaviour + applies to. According to the service design, + all spatial types including LineString, MultiPolygon, + Point, and Polygon will be applied to the path. + type: string + type: object + type: array + type: object + type: array + excludedPaths: + description: List of paths to exclude from indexing. Required + if indexing_mode is Consistent or Lazy. + items: + type: string + type: array + x-kubernetes-list-type: set + includedPaths: + description: List of paths to include in the indexing. Required + if indexing_mode is Consistent or Lazy. + items: + type: string + type: array + x-kubernetes-list-type: set + indexingMode: + description: 'Indicates the indexing mode. Possible values + include: Consistent, Lazy, None.' + type: string + spatialIndex: + description: One or more spatial_index blocks as defined below. + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + type: object + partitionKeyPath: + description: Define a partition key. Changing this forces a new + resource to be created. + type: string + partitionKeyVersion: + description: Define a partition key version. Changing this forces + a new resource to be created. Possible values are 1and 2. This + should be set to 2 in order to use large partition keys. + type: number + throughput: + description: The throughput of the Gremlin graph (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + uniqueKey: + description: One or more unique_key blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + paths: + description: A list of paths to use for this unique key. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.partitionKeyPath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.partitionKeyPath) + || (has(self.initProvider) && has(self.initProvider.partitionKeyPath))' + status: + description: GremlinGraphStatus defines the observed state of GremlinGraph. + properties: + atProvider: + properties: + accountName: + description: The name of the CosmosDB Account to create the Gremlin + Graph within. Changing this forces a new resource to be created. + type: string + analyticalStorageTtl: + description: The time to live of Analytical Storage for this Cosmos + DB Gremlin Graph. Possible values are between -1 to 2147483647 + not including 0. If present and the value is set to -1, it means + never expire. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. Requires + partition_key_path to be set. + properties: + maxThroughput: + description: The maximum throughput of the Gremlin graph (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + conflictResolutionPolicy: + description: A conflict_resolution_policy blocks as defined below. + Changing this forces a new resource to be created. + properties: + conflictResolutionPath: + description: The conflict resolution path in the case of LastWriterWins + mode. + type: string + conflictResolutionProcedure: + description: The procedure to resolve conflicts in the case + of custom mode. + type: string + mode: + description: 'Indicates the conflict resolution mode. Possible + values include: LastWriterWins, Custom.' + type: string + type: object + databaseName: + description: The name of the Cosmos DB Graph Database in which + the Cosmos DB Gremlin Graph is created. Changing this forces + a new resource to be created. + type: string + defaultTtl: + description: The default time to live (TTL) of the Gremlin graph. + If the value is missing or set to "-1", items don’t expire. + type: number + id: + description: The ID of the CosmosDB Gremlin Graph. + type: string + indexPolicy: + description: The configuration of the indexing policy. One or + more index_policy blocks as defined below. + properties: + automatic: + description: Indicates if the indexing policy is automatic. + Defaults to true. + type: boolean + compositeIndex: + description: One or more composite_index blocks as defined + below. + items: + properties: + index: + description: One or more index blocks as defined below. + items: + properties: + order: + description: Order of the index. Possible values + are Ascending or Descending. + type: string + path: + description: Path for which the indexing behaviour + applies to. According to the service design, + all spatial types including LineString, MultiPolygon, + Point, and Polygon will be applied to the path. + type: string + type: object + type: array + type: object + type: array + excludedPaths: + description: List of paths to exclude from indexing. Required + if indexing_mode is Consistent or Lazy. + items: + type: string + type: array + x-kubernetes-list-type: set + includedPaths: + description: List of paths to include in the indexing. Required + if indexing_mode is Consistent or Lazy. + items: + type: string + type: array + x-kubernetes-list-type: set + indexingMode: + description: 'Indicates the indexing mode. Possible values + include: Consistent, Lazy, None.' + type: string + spatialIndex: + description: One or more spatial_index blocks as defined below. + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + types: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + partitionKeyPath: + description: Define a partition key. Changing this forces a new + resource to be created. + type: string + partitionKeyVersion: + description: Define a partition key version. Changing this forces + a new resource to be created. Possible values are 1and 2. This + should be set to 2 in order to use large partition keys. + type: number + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Gremlin Graph is created. Changing this forces a new resource + to be created. + type: string + throughput: + description: The throughput of the Gremlin graph (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + uniqueKey: + description: One or more unique_key blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + paths: + description: A list of paths to use for this unique key. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_mongocollections.yaml b/package/crds/cosmosdb.azure.upbound.io_mongocollections.yaml index 48e04f8fc..4b61df02d 100644 --- a/package/crds/cosmosdb.azure.upbound.io_mongocollections.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_mongocollections.yaml @@ -735,3 +735,714 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MongoCollection is the Schema for the MongoCollections API. Manages + a Mongo Collection within a Cosmos DB Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MongoCollectionSpec defines the desired state of MongoCollection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the Cosmos DB Account in which the Cosmos + DB Mongo Collection is created. Changing this forces a new resource + to be created. + type: string + accountNameRef: + description: Reference to a Account in cosmosdb to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in cosmosdb to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + analyticalStorageTtl: + description: The default time to live of Analytical Storage for + this Mongo Collection. If present and the value is set to -1, + it is equal to infinity, and items don’t expire by default. + If present and the value is set to some number n – items will + expire n seconds after their last modified time. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the MongoDB collection + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + databaseName: + description: The name of the Cosmos DB Mongo Database in which + the Cosmos DB Mongo Collection is created. Changing this forces + a new resource to be created. + type: string + databaseNameRef: + description: Reference to a MongoDatabase in cosmosdb to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a MongoDatabase in cosmosdb to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultTtlSeconds: + description: The default Time To Live in seconds. If the value + is -1, items are not automatically expired. + type: number + index: + description: One or more index blocks as defined below. + items: + properties: + keys: + description: Specifies the list of user settable keys for + each Cosmos DB Mongo Collection. + items: + type: string + type: array + unique: + description: Is the index unique or not? Defaults to false. + type: boolean + type: object + type: array + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Mongo Collection is created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + shardKey: + description: The name of the key to partition on for sharding. + There must not be any other unique index keys. Changing this + forces a new resource to be created. + type: string + throughput: + description: The throughput of the MongoDB collection (RU/s). + Must be set in increments of 100. The minimum value is 400. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + analyticalStorageTtl: + description: The default time to live of Analytical Storage for + this Mongo Collection. If present and the value is set to -1, + it is equal to infinity, and items don’t expire by default. + If present and the value is set to some number n – items will + expire n seconds after their last modified time. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the MongoDB collection + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + defaultTtlSeconds: + description: The default Time To Live in seconds. If the value + is -1, items are not automatically expired. + type: number + index: + description: One or more index blocks as defined below. + items: + properties: + keys: + description: Specifies the list of user settable keys for + each Cosmos DB Mongo Collection. + items: + type: string + type: array + unique: + description: Is the index unique or not? Defaults to false. + type: boolean + type: object + type: array + shardKey: + description: The name of the key to partition on for sharding. + There must not be any other unique index keys. Changing this + forces a new resource to be created. + type: string + throughput: + description: The throughput of the MongoDB collection (RU/s). + Must be set in increments of 100. The minimum value is 400. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MongoCollectionStatus defines the observed state of MongoCollection. + properties: + atProvider: + properties: + accountName: + description: The name of the Cosmos DB Account in which the Cosmos + DB Mongo Collection is created. Changing this forces a new resource + to be created. + type: string + analyticalStorageTtl: + description: The default time to live of Analytical Storage for + this Mongo Collection. If present and the value is set to -1, + it is equal to infinity, and items don’t expire by default. + If present and the value is set to some number n – items will + expire n seconds after their last modified time. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the MongoDB collection + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + databaseName: + description: The name of the Cosmos DB Mongo Database in which + the Cosmos DB Mongo Collection is created. Changing this forces + a new resource to be created. + type: string + defaultTtlSeconds: + description: The default Time To Live in seconds. If the value + is -1, items are not automatically expired. + type: number + id: + description: The ID of the Cosmos DB Mongo Collection. + type: string + index: + description: One or more index blocks as defined below. + items: + properties: + keys: + description: Specifies the list of user settable keys for + each Cosmos DB Mongo Collection. + items: + type: string + type: array + unique: + description: Is the index unique or not? Defaults to false. + type: boolean + type: object + type: array + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Mongo Collection is created. Changing this forces a new resource + to be created. + type: string + shardKey: + description: The name of the key to partition on for sharding. + There must not be any other unique index keys. Changing this + forces a new resource to be created. + type: string + systemIndexes: + description: One or more system_indexes blocks as defined below. + items: + properties: + keys: + description: The list of system keys which are not settable + for each Cosmos DB Mongo Collection. + items: + type: string + type: array + unique: + description: Identifies whether the table contains no duplicate + values. + type: boolean + type: object + type: array + throughput: + description: The throughput of the MongoDB collection (RU/s). + Must be set in increments of 100. The minimum value is 400. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_mongodatabases.yaml b/package/crds/cosmosdb.azure.upbound.io_mongodatabases.yaml index 17a7b0a54..a19de8421 100644 --- a/package/crds/cosmosdb.azure.upbound.io_mongodatabases.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_mongodatabases.yaml @@ -540,3 +540,519 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MongoDatabase is the Schema for the MongoDatabases API. Manages + a Mongo Database within a Cosmos DB Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MongoDatabaseSpec defines the desired state of MongoDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the Cosmos DB Mongo Database to create + the table within. Changing this forces a new resource to be + created. + type: string + accountNameRef: + description: Reference to a Account in cosmosdb to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in cosmosdb to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the MongoDB database + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Mongo Database is created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throughput: + description: The throughput of the MongoDB database (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the MongoDB database + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + throughput: + description: The throughput of the MongoDB database (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MongoDatabaseStatus defines the observed state of MongoDatabase. + properties: + atProvider: + properties: + accountName: + description: The name of the Cosmos DB Mongo Database to create + the table within. Changing this forces a new resource to be + created. + type: string + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the MongoDB database + (RU/s). Must be between 1,000 and 1,000,000. Must be set + in increments of 1,000. Conflicts with throughput. + type: number + type: object + id: + description: The ID of the Cosmos DB Mongo Database. + type: string + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Mongo Database is created. Changing this forces a new resource + to be created. + type: string + throughput: + description: The throughput of the MongoDB database (RU/s). Must + be set in increments of 100. The minimum value is 400. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_sqlcontainers.yaml b/package/crds/cosmosdb.azure.upbound.io_sqlcontainers.yaml index 00c912bc3..482282e82 100644 --- a/package/crds/cosmosdb.azure.upbound.io_sqlcontainers.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_sqlcontainers.yaml @@ -1031,3 +1031,992 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SQLContainer is the Schema for the SQLContainers API. Manages + a SQL Container within a Cosmos DB Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SQLContainerSpec defines the desired state of SQLContainer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the Cosmos DB Account to create the container + within. Changing this forces a new resource to be created. + type: string + accountNameRef: + description: Reference to a Account in cosmosdb to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in cosmosdb to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + analyticalStorageTtl: + description: The default time to live of Analytical Storage for + this SQL container. If present and the value is set to -1, it + is equal to infinity, and items don’t expire by default. If + present and the value is set to some number n – items will expire + n seconds after their last modified time. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. Requires + partition_key_path to be set. + properties: + maxThroughput: + description: The maximum throughput of the SQL container (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + conflictResolutionPolicy: + description: A conflict_resolution_policy blocks as defined below. + Changing this forces a new resource to be created. + properties: + conflictResolutionPath: + description: The conflict resolution path in the case of LastWriterWins + mode. + type: string + conflictResolutionProcedure: + description: The procedure to resolve conflicts in the case + of Custom mode. + type: string + mode: + description: 'Indicates the conflict resolution mode. Possible + values include: LastWriterWins, Custom.' + type: string + type: object + databaseName: + description: The name of the Cosmos DB SQL Database to create + the container within. Changing this forces a new resource to + be created. + type: string + databaseNameRef: + description: Reference to a SQLDatabase in cosmosdb to populate + databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a SQLDatabase in cosmosdb to populate + databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultTtl: + description: The default time to live of SQL container. If missing, + items are not expired automatically. If present and the value + is set to -1, it is equal to infinity, and items don’t expire + by default. If present and the value is set to some number n + – items will expire n seconds after their last modified time. + type: number + indexingPolicy: + description: An indexing_policy block as defined below. + properties: + compositeIndex: + description: One or more composite_index blocks as defined + below. + items: + properties: + index: + description: One or more index blocks as defined below. + items: + properties: + order: + description: Order of the index. Possible values + are Ascending or Descending. + type: string + path: + description: Path for which the indexing behaviour + applies to. According to the service design, + all spatial types including LineString, MultiPolygon, + Point, and Polygon will be applied to the path. + type: string + type: object + type: array + type: object + type: array + excludedPath: + description: One or more excluded_path blocks as defined below. + Either included_path or excluded_path must contain the path + /* + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + includedPath: + description: One or more included_path blocks as defined below. + Either included_path or excluded_path must contain the path + /* + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + indexingMode: + description: 'Indicates the indexing mode. Possible values + include: consistent and none. Defaults to consistent.' + type: string + spatialIndex: + description: One or more spatial_index blocks as defined below. + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + type: object + partitionKeyPath: + description: Define a partition key. Changing this forces a new + resource to be created. + type: string + partitionKeyVersion: + description: Define a partition key version. Changing this forces + a new resource to be created. Possible values are 1and 2. This + should be set to 2 in order to use large partition keys. + type: number + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB SQL Container is created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throughput: + description: The throughput of SQL container (RU/s). Must be set + in increments of 100. The minimum value is 400. + type: number + uniqueKey: + description: One or more unique_key blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + paths: + description: A list of paths to use for this unique key. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + analyticalStorageTtl: + description: The default time to live of Analytical Storage for + this SQL container. If present and the value is set to -1, it + is equal to infinity, and items don’t expire by default. If + present and the value is set to some number n – items will expire + n seconds after their last modified time. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. Requires + partition_key_path to be set. + properties: + maxThroughput: + description: The maximum throughput of the SQL container (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + conflictResolutionPolicy: + description: A conflict_resolution_policy blocks as defined below. + Changing this forces a new resource to be created. + properties: + conflictResolutionPath: + description: The conflict resolution path in the case of LastWriterWins + mode. + type: string + conflictResolutionProcedure: + description: The procedure to resolve conflicts in the case + of Custom mode. + type: string + mode: + description: 'Indicates the conflict resolution mode. Possible + values include: LastWriterWins, Custom.' + type: string + type: object + defaultTtl: + description: The default time to live of SQL container. If missing, + items are not expired automatically. If present and the value + is set to -1, it is equal to infinity, and items don’t expire + by default. If present and the value is set to some number n + – items will expire n seconds after their last modified time. + type: number + indexingPolicy: + description: An indexing_policy block as defined below. + properties: + compositeIndex: + description: One or more composite_index blocks as defined + below. + items: + properties: + index: + description: One or more index blocks as defined below. + items: + properties: + order: + description: Order of the index. Possible values + are Ascending or Descending. + type: string + path: + description: Path for which the indexing behaviour + applies to. According to the service design, + all spatial types including LineString, MultiPolygon, + Point, and Polygon will be applied to the path. + type: string + type: object + type: array + type: object + type: array + excludedPath: + description: One or more excluded_path blocks as defined below. + Either included_path or excluded_path must contain the path + /* + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + includedPath: + description: One or more included_path blocks as defined below. + Either included_path or excluded_path must contain the path + /* + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + indexingMode: + description: 'Indicates the indexing mode. Possible values + include: consistent and none. Defaults to consistent.' + type: string + spatialIndex: + description: One or more spatial_index blocks as defined below. + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + type: object + partitionKeyPath: + description: Define a partition key. Changing this forces a new + resource to be created. + type: string + partitionKeyVersion: + description: Define a partition key version. Changing this forces + a new resource to be created. Possible values are 1and 2. This + should be set to 2 in order to use large partition keys. + type: number + throughput: + description: The throughput of SQL container (RU/s). Must be set + in increments of 100. The minimum value is 400. + type: number + uniqueKey: + description: One or more unique_key blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + paths: + description: A list of paths to use for this unique key. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.partitionKeyPath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.partitionKeyPath) + || (has(self.initProvider) && has(self.initProvider.partitionKeyPath))' + status: + description: SQLContainerStatus defines the observed state of SQLContainer. + properties: + atProvider: + properties: + accountName: + description: The name of the Cosmos DB Account to create the container + within. Changing this forces a new resource to be created. + type: string + analyticalStorageTtl: + description: The default time to live of Analytical Storage for + this SQL container. If present and the value is set to -1, it + is equal to infinity, and items don’t expire by default. If + present and the value is set to some number n – items will expire + n seconds after their last modified time. + type: number + autoscaleSettings: + description: An autoscale_settings block as defined below. Requires + partition_key_path to be set. + properties: + maxThroughput: + description: The maximum throughput of the SQL container (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + conflictResolutionPolicy: + description: A conflict_resolution_policy blocks as defined below. + Changing this forces a new resource to be created. + properties: + conflictResolutionPath: + description: The conflict resolution path in the case of LastWriterWins + mode. + type: string + conflictResolutionProcedure: + description: The procedure to resolve conflicts in the case + of Custom mode. + type: string + mode: + description: 'Indicates the conflict resolution mode. Possible + values include: LastWriterWins, Custom.' + type: string + type: object + databaseName: + description: The name of the Cosmos DB SQL Database to create + the container within. Changing this forces a new resource to + be created. + type: string + defaultTtl: + description: The default time to live of SQL container. If missing, + items are not expired automatically. If present and the value + is set to -1, it is equal to infinity, and items don’t expire + by default. If present and the value is set to some number n + – items will expire n seconds after their last modified time. + type: number + id: + description: The ID of the CosmosDB SQL Container. + type: string + indexingPolicy: + description: An indexing_policy block as defined below. + properties: + compositeIndex: + description: One or more composite_index blocks as defined + below. + items: + properties: + index: + description: One or more index blocks as defined below. + items: + properties: + order: + description: Order of the index. Possible values + are Ascending or Descending. + type: string + path: + description: Path for which the indexing behaviour + applies to. According to the service design, + all spatial types including LineString, MultiPolygon, + Point, and Polygon will be applied to the path. + type: string + type: object + type: array + type: object + type: array + excludedPath: + description: One or more excluded_path blocks as defined below. + Either included_path or excluded_path must contain the path + /* + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + includedPath: + description: One or more included_path blocks as defined below. + Either included_path or excluded_path must contain the path + /* + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + type: object + type: array + indexingMode: + description: 'Indicates the indexing mode. Possible values + include: consistent and none. Defaults to consistent.' + type: string + spatialIndex: + description: One or more spatial_index blocks as defined below. + items: + properties: + path: + description: Path for which the indexing behaviour applies + to. According to the service design, all spatial types + including LineString, MultiPolygon, Point, and Polygon + will be applied to the path. + type: string + types: + description: A set of spatial types of the path. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + partitionKeyPath: + description: Define a partition key. Changing this forces a new + resource to be created. + type: string + partitionKeyVersion: + description: Define a partition key version. Changing this forces + a new resource to be created. Possible values are 1and 2. This + should be set to 2 in order to use large partition keys. + type: number + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB SQL Container is created. Changing this forces a new resource + to be created. + type: string + throughput: + description: The throughput of SQL container (RU/s). Must be set + in increments of 100. The minimum value is 400. + type: number + uniqueKey: + description: One or more unique_key blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + paths: + description: A list of paths to use for this unique key. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_sqldatabases.yaml b/package/crds/cosmosdb.azure.upbound.io_sqldatabases.yaml index 60a1f8ac0..7676e3a36 100644 --- a/package/crds/cosmosdb.azure.upbound.io_sqldatabases.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_sqldatabases.yaml @@ -546,3 +546,525 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SQLDatabase is the Schema for the SQLDatabases API. Manages a + SQL Database within a Cosmos DB Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SQLDatabaseSpec defines the desired state of SQLDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the Cosmos DB SQL Database to create + the table within. Changing this forces a new resource to be + created. + type: string + accountNameRef: + description: Reference to a Account in cosmosdb to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in cosmosdb to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the SQL database (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB SQL Database is created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throughput: + description: The throughput of SQL database (RU/s). Must be set + in increments of 100. The minimum value is 400. Do not set when + azurerm_cosmosdb_account is configured with EnableServerless + capability. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the SQL database (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + throughput: + description: The throughput of SQL database (RU/s). Must be set + in increments of 100. The minimum value is 400. Do not set when + azurerm_cosmosdb_account is configured with EnableServerless + capability. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SQLDatabaseStatus defines the observed state of SQLDatabase. + properties: + atProvider: + properties: + accountName: + description: The name of the Cosmos DB SQL Database to create + the table within. Changing this forces a new resource to be + created. + type: string + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the SQL database (RU/s). + Must be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + id: + description: The ID of the CosmosDB SQL Database. + type: string + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB SQL Database is created. Changing this forces a new resource + to be created. + type: string + throughput: + description: The throughput of SQL database (RU/s). Must be set + in increments of 100. The minimum value is 400. Do not set when + azurerm_cosmosdb_account is configured with EnableServerless + capability. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/cosmosdb.azure.upbound.io_tables.yaml b/package/crds/cosmosdb.azure.upbound.io_tables.yaml index c7179e8e1..afb4b8dad 100644 --- a/package/crds/cosmosdb.azure.upbound.io_tables.yaml +++ b/package/crds/cosmosdb.azure.upbound.io_tables.yaml @@ -538,3 +538,517 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Table is the Schema for the Tables API. Manages a Table within + a Cosmos DB Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TableSpec defines the desired state of Table + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the Cosmos DB Table to create the table + within. Changing this forces a new resource to be created. + type: string + accountNameRef: + description: Reference to a Account in cosmosdb to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in cosmosdb to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Table (RU/s). Must + be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Table is created. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + throughput: + description: The throughput of Table (RU/s). Must be set in increments + of 100. The minimum value is 400. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Table (RU/s). Must + be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + throughput: + description: The throughput of Table (RU/s). Must be set in increments + of 100. The minimum value is 400. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TableStatus defines the observed state of Table. + properties: + atProvider: + properties: + accountName: + description: The name of the Cosmos DB Table to create the table + within. Changing this forces a new resource to be created. + type: string + autoscaleSettings: + description: An autoscale_settings block as defined below. + properties: + maxThroughput: + description: The maximum throughput of the Table (RU/s). Must + be between 1,000 and 1,000,000. Must be set in increments + of 1,000. Conflicts with throughput. + type: number + type: object + id: + description: The ID of the CosmosDB Table. + type: string + resourceGroupName: + description: The name of the resource group in which the Cosmos + DB Table is created. Changing this forces a new resource to + be created. + type: string + throughput: + description: The throughput of Table (RU/s). Must be set in increments + of 100. The minimum value is 400. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/costmanagement.azure.upbound.io_resourcegroupcostmanagementexports.yaml b/package/crds/costmanagement.azure.upbound.io_resourcegroupcostmanagementexports.yaml index 30e19bf75..7b604e07e 100644 --- a/package/crds/costmanagement.azure.upbound.io_resourcegroupcostmanagementexports.yaml +++ b/package/crds/costmanagement.azure.upbound.io_resourcegroupcostmanagementexports.yaml @@ -803,3 +803,773 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResourceGroupCostManagementExport is the Schema for the ResourceGroupCostManagementExports + API. Manages an Azure Cost Management Export for a Resource Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceGroupCostManagementExportSpec defines the desired + state of ResourceGroupCostManagementExport + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + active: + description: Is the cost management export active? Default is + true. + type: boolean + exportDataOptions: + description: A export_data_options block as defined below. + properties: + timeFrame: + description: 'The time frame for pulling data for the query. + If custom, then a specific time period must be provided. + Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, + TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom.' + type: string + type: + description: The type of the query. Possible values are ActualCost, + AmortizedCost and Usage. + type: string + type: object + exportDataStorageLocation: + description: A export_data_storage_location block as defined below. + properties: + containerId: + description: The Resource Manager ID of the container where + exports will be uploaded. Changing this forces a new resource + to be created. + type: string + containerIdRef: + description: Reference to a Container in storage to populate + containerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerIdSelector: + description: Selector for a Container in storage to populate + containerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rootFolderPath: + description: The path of the directory where exports will + be uploaded. Changing this forces a new resource to be created. + type: string + type: object + recurrencePeriodEndDate: + description: The date the export will stop capturing information. + type: string + recurrencePeriodStartDate: + description: The date the export will start capturing information. + type: string + recurrenceType: + description: How often the requested information will be exported. + Valid values include Annually, Daily, Monthly, Weekly. + type: string + resourceGroupId: + description: The id of the resource group on which to create an + export. Changing this forces a new resource to be created. + type: string + resourceGroupIdRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupIdSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + active: + description: Is the cost management export active? Default is + true. + type: boolean + exportDataOptions: + description: A export_data_options block as defined below. + properties: + timeFrame: + description: 'The time frame for pulling data for the query. + If custom, then a specific time period must be provided. + Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, + TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom.' + type: string + type: + description: The type of the query. Possible values are ActualCost, + AmortizedCost and Usage. + type: string + type: object + exportDataStorageLocation: + description: A export_data_storage_location block as defined below. + properties: + containerId: + description: The Resource Manager ID of the container where + exports will be uploaded. Changing this forces a new resource + to be created. + type: string + containerIdRef: + description: Reference to a Container in storage to populate + containerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerIdSelector: + description: Selector for a Container in storage to populate + containerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rootFolderPath: + description: The path of the directory where exports will + be uploaded. Changing this forces a new resource to be created. + type: string + type: object + recurrencePeriodEndDate: + description: The date the export will stop capturing information. + type: string + recurrencePeriodStartDate: + description: The date the export will start capturing information. + type: string + recurrenceType: + description: How often the requested information will be exported. + Valid values include Annually, Daily, Monthly, Weekly. + type: string + resourceGroupId: + description: The id of the resource group on which to create an + export. Changing this forces a new resource to be created. + type: string + resourceGroupIdRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupIdSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.exportDataOptions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.exportDataOptions) + || (has(self.initProvider) && has(self.initProvider.exportDataOptions))' + - message: spec.forProvider.exportDataStorageLocation is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.exportDataStorageLocation) + || (has(self.initProvider) && has(self.initProvider.exportDataStorageLocation))' + - message: spec.forProvider.recurrencePeriodEndDate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.recurrencePeriodEndDate) + || (has(self.initProvider) && has(self.initProvider.recurrencePeriodEndDate))' + - message: spec.forProvider.recurrencePeriodStartDate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.recurrencePeriodStartDate) + || (has(self.initProvider) && has(self.initProvider.recurrencePeriodStartDate))' + - message: spec.forProvider.recurrenceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.recurrenceType) + || (has(self.initProvider) && has(self.initProvider.recurrenceType))' + status: + description: ResourceGroupCostManagementExportStatus defines the observed + state of ResourceGroupCostManagementExport. + properties: + atProvider: + properties: + active: + description: Is the cost management export active? Default is + true. + type: boolean + exportDataOptions: + description: A export_data_options block as defined below. + properties: + timeFrame: + description: 'The time frame for pulling data for the query. + If custom, then a specific time period must be provided. + Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, + TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom.' + type: string + type: + description: The type of the query. Possible values are ActualCost, + AmortizedCost and Usage. + type: string + type: object + exportDataStorageLocation: + description: A export_data_storage_location block as defined below. + properties: + containerId: + description: The Resource Manager ID of the container where + exports will be uploaded. Changing this forces a new resource + to be created. + type: string + rootFolderPath: + description: The path of the directory where exports will + be uploaded. Changing this forces a new resource to be created. + type: string + type: object + id: + description: The ID of the Cost Management Export for this Resource + Group. + type: string + recurrencePeriodEndDate: + description: The date the export will stop capturing information. + type: string + recurrencePeriodStartDate: + description: The date the export will start capturing information. + type: string + recurrenceType: + description: How often the requested information will be exported. + Valid values include Annually, Daily, Monthly, Weekly. + type: string + resourceGroupId: + description: The id of the resource group on which to create an + export. Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/costmanagement.azure.upbound.io_subscriptioncostmanagementexports.yaml b/package/crds/costmanagement.azure.upbound.io_subscriptioncostmanagementexports.yaml index 77cbf4248..44e2f5a6e 100644 --- a/package/crds/costmanagement.azure.upbound.io_subscriptioncostmanagementexports.yaml +++ b/package/crds/costmanagement.azure.upbound.io_subscriptioncostmanagementexports.yaml @@ -818,3 +818,788 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SubscriptionCostManagementExport is the Schema for the SubscriptionCostManagementExports + API. Manages an Azure Cost Management Export for a Subscription. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionCostManagementExportSpec defines the desired + state of SubscriptionCostManagementExport + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + active: + description: Is the cost management export active? Default is + true. + type: boolean + exportDataOptions: + description: A export_data_options block as defined below. + properties: + timeFrame: + description: 'The time frame for pulling data for the query. + If custom, then a specific time period must be provided. + Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, + TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom.' + type: string + type: + description: The type of the query. Possible values are ActualCost, + AmortizedCost and Usage. + type: string + type: object + exportDataStorageLocation: + description: A export_data_storage_location block as defined below. + properties: + containerId: + description: The Resource Manager ID of the container where + exports will be uploaded. Changing this forces a new resource + to be created. + type: string + containerIdRef: + description: Reference to a Container in storage to populate + containerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerIdSelector: + description: Selector for a Container in storage to populate + containerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rootFolderPath: + description: The path of the directory where exports will + be uploaded. Changing this forces a new resource to be created. + type: string + type: object + name: + description: Specifies the name of the Cost Management Export. + Changing this forces a new resource to be created. + type: string + recurrencePeriodEndDate: + description: The date the export will stop capturing information. + type: string + recurrencePeriodStartDate: + description: The date the export will start capturing information. + type: string + recurrenceType: + description: How often the requested information will be exported. + Valid values include Annually, Daily, Monthly, Weekly. + type: string + subscriptionId: + description: The id of the subscription on which to create an + export. Changing this forces a new resource to be created. + type: string + subscriptionIdRef: + description: Reference to a Subscription in azure to populate + subscriptionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subscriptionIdSelector: + description: Selector for a Subscription in azure to populate + subscriptionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + active: + description: Is the cost management export active? Default is + true. + type: boolean + exportDataOptions: + description: A export_data_options block as defined below. + properties: + timeFrame: + description: 'The time frame for pulling data for the query. + If custom, then a specific time period must be provided. + Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, + TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom.' + type: string + type: + description: The type of the query. Possible values are ActualCost, + AmortizedCost and Usage. + type: string + type: object + exportDataStorageLocation: + description: A export_data_storage_location block as defined below. + properties: + containerId: + description: The Resource Manager ID of the container where + exports will be uploaded. Changing this forces a new resource + to be created. + type: string + containerIdRef: + description: Reference to a Container in storage to populate + containerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerIdSelector: + description: Selector for a Container in storage to populate + containerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rootFolderPath: + description: The path of the directory where exports will + be uploaded. Changing this forces a new resource to be created. + type: string + type: object + name: + description: Specifies the name of the Cost Management Export. + Changing this forces a new resource to be created. + type: string + recurrencePeriodEndDate: + description: The date the export will stop capturing information. + type: string + recurrencePeriodStartDate: + description: The date the export will start capturing information. + type: string + recurrenceType: + description: How often the requested information will be exported. + Valid values include Annually, Daily, Monthly, Weekly. + type: string + subscriptionId: + description: The id of the subscription on which to create an + export. Changing this forces a new resource to be created. + type: string + subscriptionIdRef: + description: Reference to a Subscription in azure to populate + subscriptionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subscriptionIdSelector: + description: Selector for a Subscription in azure to populate + subscriptionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.exportDataOptions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.exportDataOptions) + || (has(self.initProvider) && has(self.initProvider.exportDataOptions))' + - message: spec.forProvider.exportDataStorageLocation is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.exportDataStorageLocation) + || (has(self.initProvider) && has(self.initProvider.exportDataStorageLocation))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.recurrencePeriodEndDate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.recurrencePeriodEndDate) + || (has(self.initProvider) && has(self.initProvider.recurrencePeriodEndDate))' + - message: spec.forProvider.recurrencePeriodStartDate is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.recurrencePeriodStartDate) + || (has(self.initProvider) && has(self.initProvider.recurrencePeriodStartDate))' + - message: spec.forProvider.recurrenceType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.recurrenceType) + || (has(self.initProvider) && has(self.initProvider.recurrenceType))' + status: + description: SubscriptionCostManagementExportStatus defines the observed + state of SubscriptionCostManagementExport. + properties: + atProvider: + properties: + active: + description: Is the cost management export active? Default is + true. + type: boolean + exportDataOptions: + description: A export_data_options block as defined below. + properties: + timeFrame: + description: 'The time frame for pulling data for the query. + If custom, then a specific time period must be provided. + Possible values include: WeekToDate, MonthToDate, BillingMonthToDate, + TheLast7Days, TheLastMonth, TheLastBillingMonth, Custom.' + type: string + type: + description: The type of the query. Possible values are ActualCost, + AmortizedCost and Usage. + type: string + type: object + exportDataStorageLocation: + description: A export_data_storage_location block as defined below. + properties: + containerId: + description: The Resource Manager ID of the container where + exports will be uploaded. Changing this forces a new resource + to be created. + type: string + rootFolderPath: + description: The path of the directory where exports will + be uploaded. Changing this forces a new resource to be created. + type: string + type: object + id: + description: The ID of the Cost Management Export for this Subscription. + type: string + name: + description: Specifies the name of the Cost Management Export. + Changing this forces a new resource to be created. + type: string + recurrencePeriodEndDate: + description: The date the export will stop capturing information. + type: string + recurrencePeriodStartDate: + description: The date the export will start capturing information. + type: string + recurrenceType: + description: How often the requested information will be exported. + Valid values include Annually, Daily, Monthly, Weekly. + type: string + subscriptionId: + description: The id of the subscription on which to create an + export. Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/databricks.azure.upbound.io_accessconnectors.yaml b/package/crds/databricks.azure.upbound.io_accessconnectors.yaml index 00bad07ec..d2254db3c 100644 --- a/package/crds/databricks.azure.upbound.io_accessconnectors.yaml +++ b/package/crds/databricks.azure.upbound.io_accessconnectors.yaml @@ -518,3 +518,497 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AccessConnector is the Schema for the AccessConnectors API. Manages + a Databricks Access Connector + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccessConnectorSpec defines the desired state of AccessConnector + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to the Databricks Access Connector. Only + one User Assigned Managed Identity ID is supported per Databricks + Access Connector resource. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on the Databricks Access Connector. + Possible values include SystemAssigned or UserAssigned. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Databricks + Access Connector should exist. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to the Databricks Access Connector. Only + one User Assigned Managed Identity ID is supported per Databricks + Access Connector resource. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on the Databricks Access Connector. + Possible values include SystemAssigned or UserAssigned. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: AccessConnectorStatus defines the observed state of AccessConnector. + properties: + atProvider: + properties: + id: + description: The ID of the Databricks Access Connector in the + Azure management plane. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to the Databricks Access Connector. Only + one User Assigned Managed Identity ID is supported per Databricks + Access Connector resource. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID of the System Assigned Managed + Service Identity that is configured on this Access Connector. + type: string + tenantId: + description: The Tenant ID of the System Assigned Managed + Service Identity that is configured on this Access Connector. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on the Databricks Access Connector. + Possible values include SystemAssigned or UserAssigned. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Databricks + Access Connector should exist. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/databricks.azure.upbound.io_workspaces.yaml b/package/crds/databricks.azure.upbound.io_workspaces.yaml index a1a80e216..0d3efe67b 100644 --- a/package/crds/databricks.azure.upbound.io_workspaces.yaml +++ b/package/crds/databricks.azure.upbound.io_workspaces.yaml @@ -1331,3 +1331,1304 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workspace is the Schema for the Workspaces API. Manages a Databricks + Workspace + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkspaceSpec defines the desired state of Workspace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customParameters: + description: A custom_parameters block as documented below. + properties: + machineLearningWorkspaceId: + description: The ID of a Azure Machine Learning workspace + to link with Databricks workspace. Changing this forces + a new resource to be created. + type: string + natGatewayName: + description: Name of the NAT gateway for Secure Cluster Connectivity + (No Public IP) workspace subnets. Defaults to nat-gateway. + Changing this forces a new resource to be created. + type: string + noPublicIp: + description: Are public IP Addresses not allowed? Possible + values are true or false. Defaults to false. + type: boolean + privateSubnetName: + description: The name of the Private Subnet within the Virtual + Network. Required if virtual_network_id is set. Changing + this forces a new resource to be created. + type: string + privateSubnetNameRef: + description: Reference to a Subnet in network to populate + privateSubnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateSubnetNameSelector: + description: Selector for a Subnet in network to populate + privateSubnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + privateSubnetNetworkSecurityGroupAssociationId: + description: The resource ID of the azurerm_subnet_network_security_group_association + resource which is referred to by the private_subnet_name + field. This is the same as the ID of the subnet referred + to by the private_subnet_name field. Required if virtual_network_id + is set. + type: string + publicIpName: + description: Name of the Public IP for No Public IP workspace + with managed vNet. Defaults to nat-gw-public-ip. Changing + this forces a new resource to be created. + type: string + publicSubnetName: + description: The name of the Public Subnet within the Virtual + Network. Required if virtual_network_id is set. Changing + this forces a new resource to be created. + type: string + publicSubnetNameRef: + description: Reference to a Subnet in network to populate + publicSubnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicSubnetNameSelector: + description: Selector for a Subnet in network to populate + publicSubnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicSubnetNetworkSecurityGroupAssociationId: + description: The resource ID of the azurerm_subnet_network_security_group_association + resource which is referred to by the public_subnet_name + field. This is the same as the ID of the subnet referred + to by the public_subnet_name field. Required if virtual_network_id + is set. + type: string + storageAccountName: + description: Default Databricks File Storage account name. + Defaults to a randomized name(e.g. dbstoragel6mfeghoe5kxu). + Changing this forces a new resource to be created. + type: string + storageAccountSkuName: + description: Storage account SKU name. Possible values include + Standard_LRS, Standard_GRS, Standard_RAGRS, Standard_GZRS, + Standard_RAGZRS, Standard_ZRS, Premium_LRS or Premium_ZRS. + Defaults to Standard_GRS. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of a Virtual Network where this Databricks + Cluster should be created. Changing this forces a new resource + to be created. + type: string + vnetAddressPrefix: + description: Address prefix for Managed virtual network. Defaults + to 10.139. Changing this forces a new resource to be created. + type: string + type: object + customerManagedKeyEnabled: + description: Is the workspace enabled for customer managed key + encryption? If true this enables the Managed Identity for the + managed storage account. Possible values are true or false. + Defaults to false. This field is only valid if the Databricks + Workspace sku is set to premium. + type: boolean + infrastructureEncryptionEnabled: + description: Is the Databricks File System root file system enabled + with a secondary layer of encryption with platform managed keys? + Possible values are true or false. Defaults to false. This field + is only valid if the Databricks Workspace sku is set to premium. + Changing this forces a new resource to be created. + type: boolean + loadBalancerBackendAddressPoolId: + description: Resource ID of the Outbound Load balancer Backend + Address Pool for Secure Cluster Connectivity (No Public IP) + workspace. Changing this forces a new resource to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + managedDiskCmkKeyVaultKeyId: + description: Customer managed encryption properties for the Databricks + Workspace managed disks. + type: string + managedDiskCmkRotationToLatestVersionEnabled: + description: Whether customer managed keys for disk encryption + will automatically be rotated to the latest version. + type: boolean + managedResourceGroupName: + description: The name of the resource group where Azure should + place the managed Databricks resources. Changing this forces + a new resource to be created. + type: string + managedResourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedResourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + managedServicesCmkKeyVaultKeyId: + description: Customer managed encryption properties for the Databricks + Workspace managed resources(e.g. Notebooks and Artifacts). + type: string + networkSecurityGroupRulesRequired: + description: Does the data plane (clusters) to control plane communication + happen over private link endpoint only or publicly? Possible + values AllRules, NoAzureDatabricksRules or NoAzureServiceRules. + Required when public_network_access_enabled is set to false. + type: string + publicNetworkAccessEnabled: + description: Allow public access for accessing workspace. Set + value to false to access workspace only via private link endpoint. + Possible values include true or false. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group in which the Databricks + Workspace should exist. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: The sku to use for the Databricks Workspace. Possible + values are standard, premium, or trial. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customParameters: + description: A custom_parameters block as documented below. + properties: + machineLearningWorkspaceId: + description: The ID of a Azure Machine Learning workspace + to link with Databricks workspace. Changing this forces + a new resource to be created. + type: string + natGatewayName: + description: Name of the NAT gateway for Secure Cluster Connectivity + (No Public IP) workspace subnets. Defaults to nat-gateway. + Changing this forces a new resource to be created. + type: string + noPublicIp: + description: Are public IP Addresses not allowed? Possible + values are true or false. Defaults to false. + type: boolean + privateSubnetName: + description: The name of the Private Subnet within the Virtual + Network. Required if virtual_network_id is set. Changing + this forces a new resource to be created. + type: string + privateSubnetNameRef: + description: Reference to a Subnet in network to populate + privateSubnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateSubnetNameSelector: + description: Selector for a Subnet in network to populate + privateSubnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + privateSubnetNetworkSecurityGroupAssociationId: + description: The resource ID of the azurerm_subnet_network_security_group_association + resource which is referred to by the private_subnet_name + field. This is the same as the ID of the subnet referred + to by the private_subnet_name field. Required if virtual_network_id + is set. + type: string + publicIpName: + description: Name of the Public IP for No Public IP workspace + with managed vNet. Defaults to nat-gw-public-ip. Changing + this forces a new resource to be created. + type: string + publicSubnetName: + description: The name of the Public Subnet within the Virtual + Network. Required if virtual_network_id is set. Changing + this forces a new resource to be created. + type: string + publicSubnetNameRef: + description: Reference to a Subnet in network to populate + publicSubnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicSubnetNameSelector: + description: Selector for a Subnet in network to populate + publicSubnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicSubnetNetworkSecurityGroupAssociationId: + description: The resource ID of the azurerm_subnet_network_security_group_association + resource which is referred to by the public_subnet_name + field. This is the same as the ID of the subnet referred + to by the public_subnet_name field. Required if virtual_network_id + is set. + type: string + storageAccountName: + description: Default Databricks File Storage account name. + Defaults to a randomized name(e.g. dbstoragel6mfeghoe5kxu). + Changing this forces a new resource to be created. + type: string + storageAccountSkuName: + description: Storage account SKU name. Possible values include + Standard_LRS, Standard_GRS, Standard_RAGRS, Standard_GZRS, + Standard_RAGZRS, Standard_ZRS, Premium_LRS or Premium_ZRS. + Defaults to Standard_GRS. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of a Virtual Network where this Databricks + Cluster should be created. Changing this forces a new resource + to be created. + type: string + vnetAddressPrefix: + description: Address prefix for Managed virtual network. Defaults + to 10.139. Changing this forces a new resource to be created. + type: string + type: object + customerManagedKeyEnabled: + description: Is the workspace enabled for customer managed key + encryption? If true this enables the Managed Identity for the + managed storage account. Possible values are true or false. + Defaults to false. This field is only valid if the Databricks + Workspace sku is set to premium. + type: boolean + infrastructureEncryptionEnabled: + description: Is the Databricks File System root file system enabled + with a secondary layer of encryption with platform managed keys? + Possible values are true or false. Defaults to false. This field + is only valid if the Databricks Workspace sku is set to premium. + Changing this forces a new resource to be created. + type: boolean + loadBalancerBackendAddressPoolId: + description: Resource ID of the Outbound Load balancer Backend + Address Pool for Secure Cluster Connectivity (No Public IP) + workspace. Changing this forces a new resource to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + managedDiskCmkKeyVaultKeyId: + description: Customer managed encryption properties for the Databricks + Workspace managed disks. + type: string + managedDiskCmkRotationToLatestVersionEnabled: + description: Whether customer managed keys for disk encryption + will automatically be rotated to the latest version. + type: boolean + managedResourceGroupName: + description: The name of the resource group where Azure should + place the managed Databricks resources. Changing this forces + a new resource to be created. + type: string + managedResourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedResourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + managedServicesCmkKeyVaultKeyId: + description: Customer managed encryption properties for the Databricks + Workspace managed resources(e.g. Notebooks and Artifacts). + type: string + networkSecurityGroupRulesRequired: + description: Does the data plane (clusters) to control plane communication + happen over private link endpoint only or publicly? Possible + values AllRules, NoAzureDatabricksRules or NoAzureServiceRules. + Required when public_network_access_enabled is set to false. + type: string + publicNetworkAccessEnabled: + description: Allow public access for accessing workspace. Set + value to false to access workspace only via private link endpoint. + Possible values include true or false. Defaults to true. + type: boolean + sku: + description: The sku to use for the Databricks Workspace. Possible + values are standard, premium, or trial. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: WorkspaceStatus defines the observed state of Workspace. + properties: + atProvider: + properties: + customParameters: + description: A custom_parameters block as documented below. + properties: + machineLearningWorkspaceId: + description: The ID of a Azure Machine Learning workspace + to link with Databricks workspace. Changing this forces + a new resource to be created. + type: string + natGatewayName: + description: Name of the NAT gateway for Secure Cluster Connectivity + (No Public IP) workspace subnets. Defaults to nat-gateway. + Changing this forces a new resource to be created. + type: string + noPublicIp: + description: Are public IP Addresses not allowed? Possible + values are true or false. Defaults to false. + type: boolean + privateSubnetName: + description: The name of the Private Subnet within the Virtual + Network. Required if virtual_network_id is set. Changing + this forces a new resource to be created. + type: string + privateSubnetNetworkSecurityGroupAssociationId: + description: The resource ID of the azurerm_subnet_network_security_group_association + resource which is referred to by the private_subnet_name + field. This is the same as the ID of the subnet referred + to by the private_subnet_name field. Required if virtual_network_id + is set. + type: string + publicIpName: + description: Name of the Public IP for No Public IP workspace + with managed vNet. Defaults to nat-gw-public-ip. Changing + this forces a new resource to be created. + type: string + publicSubnetName: + description: The name of the Public Subnet within the Virtual + Network. Required if virtual_network_id is set. Changing + this forces a new resource to be created. + type: string + publicSubnetNetworkSecurityGroupAssociationId: + description: The resource ID of the azurerm_subnet_network_security_group_association + resource which is referred to by the public_subnet_name + field. This is the same as the ID of the subnet referred + to by the public_subnet_name field. Required if virtual_network_id + is set. + type: string + storageAccountName: + description: Default Databricks File Storage account name. + Defaults to a randomized name(e.g. dbstoragel6mfeghoe5kxu). + Changing this forces a new resource to be created. + type: string + storageAccountSkuName: + description: Storage account SKU name. Possible values include + Standard_LRS, Standard_GRS, Standard_RAGRS, Standard_GZRS, + Standard_RAGZRS, Standard_ZRS, Premium_LRS or Premium_ZRS. + Defaults to Standard_GRS. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of a Virtual Network where this Databricks + Cluster should be created. Changing this forces a new resource + to be created. + type: string + vnetAddressPrefix: + description: Address prefix for Managed virtual network. Defaults + to 10.139. Changing this forces a new resource to be created. + type: string + type: object + customerManagedKeyEnabled: + description: Is the workspace enabled for customer managed key + encryption? If true this enables the Managed Identity for the + managed storage account. Possible values are true or false. + Defaults to false. This field is only valid if the Databricks + Workspace sku is set to premium. + type: boolean + diskEncryptionSetId: + description: The ID of Managed Disk Encryption Set created by + the Databricks Workspace. + type: string + id: + description: The ID of the Databricks Workspace in the Azure management + plane. + type: string + infrastructureEncryptionEnabled: + description: Is the Databricks File System root file system enabled + with a secondary layer of encryption with platform managed keys? + Possible values are true or false. Defaults to false. This field + is only valid if the Databricks Workspace sku is set to premium. + Changing this forces a new resource to be created. + type: boolean + loadBalancerBackendAddressPoolId: + description: Resource ID of the Outbound Load balancer Backend + Address Pool for Secure Cluster Connectivity (No Public IP) + workspace. Changing this forces a new resource to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + managedDiskCmkKeyVaultKeyId: + description: Customer managed encryption properties for the Databricks + Workspace managed disks. + type: string + managedDiskCmkRotationToLatestVersionEnabled: + description: Whether customer managed keys for disk encryption + will automatically be rotated to the latest version. + type: boolean + managedDiskIdentity: + description: A managed_disk_identity block as documented below. + items: + properties: + principalId: + description: The principal UUID for the internal databricks + disks identity needed to provide access to the workspace + for enabling Customer Managed Keys. + type: string + tenantId: + description: The UUID of the tenant where the internal databricks + disks identity was created. + type: string + type: + description: The type of the internal databricks disks identity. + type: string + type: object + type: array + managedResourceGroupId: + description: The ID of the Managed Resource Group created by the + Databricks Workspace. + type: string + managedResourceGroupName: + description: The name of the resource group where Azure should + place the managed Databricks resources. Changing this forces + a new resource to be created. + type: string + managedServicesCmkKeyVaultKeyId: + description: Customer managed encryption properties for the Databricks + Workspace managed resources(e.g. Notebooks and Artifacts). + type: string + networkSecurityGroupRulesRequired: + description: Does the data plane (clusters) to control plane communication + happen over private link endpoint only or publicly? Possible + values AllRules, NoAzureDatabricksRules or NoAzureServiceRules. + Required when public_network_access_enabled is set to false. + type: string + publicNetworkAccessEnabled: + description: Allow public access for accessing workspace. Set + value to false to access workspace only via private link endpoint. + Possible values include true or false. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group in which the Databricks + Workspace should exist. Changing this forces a new resource + to be created. + type: string + sku: + description: The sku to use for the Databricks Workspace. Possible + values are standard, premium, or trial. + type: string + storageAccountIdentity: + description: A storage_account_identity block as documented below. + items: + properties: + principalId: + description: The principal UUID for the internal databricks + storage account needed to provide access to the workspace + for enabling Customer Managed Keys. + type: string + tenantId: + description: The UUID of the tenant where the internal databricks + storage account was created. + type: string + type: + description: The type of the internal databricks storage + account. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + workspaceId: + description: The unique identifier of the databricks workspace + in Databricks control plane. + type: string + workspaceUrl: + description: The workspace URL which is of the format 'adb-{workspaceId}.{random}.azuredatabricks.net' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_customdatasets.yaml b/package/crds/datafactory.azure.upbound.io_customdatasets.yaml index 7617cba03..24d96875b 100644 --- a/package/crds/datafactory.azure.upbound.io_customdatasets.yaml +++ b/package/crds/datafactory.azure.upbound.io_customdatasets.yaml @@ -740,3 +740,719 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CustomDataSet is the Schema for the CustomDataSets API. Manages + a Dataset inside an Azure Data Factory. This is a generic resource that + supports all different Dataset Types. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CustomDataSetSpec defines the desired state of CustomDataSet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Dataset + with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name of the Data Factory Linked Service. + type: string + nameRef: + description: Reference to a LinkedCustomService in datafactory + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a LinkedCustomService in datafactory + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data + Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaJson: + description: A JSON object that contains the schema of the Data + Factory Dataset. + type: string + type: + description: The type of dataset that will be associated with + Data Factory. Changing this forces a new resource to be created. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Data Factory Dataset. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name of the Data Factory Linked Service. + type: string + nameRef: + description: Reference to a LinkedCustomService in datafactory + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a LinkedCustomService in datafactory + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data + Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaJson: + description: A JSON object that contains the schema of the Data + Factory Dataset. + type: string + type: + description: The type of dataset that will be associated with + Data Factory. Changing this forces a new resource to be created. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Data Factory Dataset. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.linkedService is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.linkedService) + || (has(self.initProvider) && has(self.initProvider.linkedService))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + - message: spec.forProvider.typePropertiesJson is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.typePropertiesJson) + || (has(self.initProvider) && has(self.initProvider.typePropertiesJson))' + status: + description: CustomDataSetStatus defines the observed state of CustomDataSet. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Dataset + with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + id: + description: The ID of the Data Factory Dataset. + type: string + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name of the Data Factory Linked Service. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data + Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaJson: + description: A JSON object that contains the schema of the Data + Factory Dataset. + type: string + type: + description: The type of dataset that will be associated with + Data Factory. Changing this forces a new resource to be created. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Data Factory Dataset. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_dataflows.yaml b/package/crds/datafactory.azure.upbound.io_dataflows.yaml index e47e46fbe..93a806d7e 100644 --- a/package/crds/datafactory.azure.upbound.io_dataflows.yaml +++ b/package/crds/datafactory.azure.upbound.io_dataflows.yaml @@ -1559,3 +1559,1466 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataFlow is the Schema for the DataFlows API. Manages a Data + Flow inside an Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataFlowSpec defines the desired state of DataFlow + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + annotations: + description: List of tags that can be used for describing the + Data Factory Data Flow. + items: + type: string + type: array + dataFactoryId: + description: The ID of Data Factory in which to associate the + Data Flow with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Data Flow. + type: string + folder: + description: The folder that this Data Flow is in. If not specified, + the Data Flow will appear at the root level. + type: string + script: + description: The script for the Data Factory Data Flow. + type: string + scriptLines: + description: The script lines for the Data Factory Data Flow. + items: + type: string + type: array + sink: + description: One or more sink blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + nameRef: + description: Reference to a DataSetJSON in datafactory + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a DataSetJSON in datafactory + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow Source. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow Source. + type: string + rejectedLinkedService: + description: A rejected_linked_service block as defined + below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + schemaLinkedService: + description: A schema_linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + source: + description: One or more source blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + nameRef: + description: Reference to a DataSetJSON in datafactory + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a DataSetJSON in datafactory + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow Source. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow Source. + type: string + rejectedLinkedService: + description: A rejected_linked_service block as defined + below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + schemaLinkedService: + description: A schema_linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + transformation: + description: One or more transformation blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow transformation. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow transformation. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + annotations: + description: List of tags that can be used for describing the + Data Factory Data Flow. + items: + type: string + type: array + description: + description: The description for the Data Factory Data Flow. + type: string + folder: + description: The folder that this Data Flow is in. If not specified, + the Data Flow will appear at the root level. + type: string + script: + description: The script for the Data Factory Data Flow. + type: string + scriptLines: + description: The script lines for the Data Factory Data Flow. + items: + type: string + type: array + sink: + description: One or more sink blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + nameRef: + description: Reference to a DataSetJSON in datafactory + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a DataSetJSON in datafactory + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow Source. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow Source. + type: string + rejectedLinkedService: + description: A rejected_linked_service block as defined + below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + schemaLinkedService: + description: A schema_linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + source: + description: One or more source blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + nameRef: + description: Reference to a DataSetJSON in datafactory + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a DataSetJSON in datafactory + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow Source. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow Source. + type: string + rejectedLinkedService: + description: A rejected_linked_service block as defined + below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + schemaLinkedService: + description: A schema_linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + transformation: + description: One or more transformation blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow transformation. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow transformation. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.sink is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sink) + || (has(self.initProvider) && has(self.initProvider.sink))' + - message: spec.forProvider.source is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.source) + || (has(self.initProvider) && has(self.initProvider.source))' + status: + description: DataFlowStatus defines the observed state of DataFlow. + properties: + atProvider: + properties: + annotations: + description: List of tags that can be used for describing the + Data Factory Data Flow. + items: + type: string + type: array + dataFactoryId: + description: The ID of Data Factory in which to associate the + Data Flow with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Data Flow. + type: string + folder: + description: The folder that this Data Flow is in. If not specified, + the Data Flow will appear at the root level. + type: string + id: + description: The ID of the Data Factory Data Flow. + type: string + script: + description: The script for the Data Factory Data Flow. + type: string + scriptLines: + description: The script lines for the Data Factory Data Flow. + items: + type: string + type: array + sink: + description: One or more sink blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow Source. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow Source. + type: string + rejectedLinkedService: + description: A rejected_linked_service block as defined + below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + schemaLinkedService: + description: A schema_linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + source: + description: One or more source blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow Source. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow Source. + type: string + rejectedLinkedService: + description: A rejected_linked_service block as defined + below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + schemaLinkedService: + description: A schema_linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + type: object + type: array + transformation: + description: One or more transformation blocks as defined below. + items: + properties: + dataset: + description: A dataset block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + description: + description: The description for the Data Flow transformation. + type: string + flowlet: + description: A flowlet block as defined below. + properties: + datasetParameters: + description: Specifies the reference data flow parameters + from dataset. + type: string + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + linkedService: + description: A linked_service block as defined below. + properties: + name: + description: The name for the Data Flow transformation. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the + Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + name: + description: The name for the Data Flow transformation. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_datasetbinaries.yaml b/package/crds/datafactory.azure.upbound.io_datasetbinaries.yaml index 1b37a42b2..f82bae33e 100644 --- a/package/crds/datafactory.azure.upbound.io_datasetbinaries.yaml +++ b/package/crds/datafactory.azure.upbound.io_datasetbinaries.yaml @@ -915,3 +915,873 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataSetBinary is the Schema for the DataSetBinarys API. Manages + a Data Factory Binary Dataset inside an Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataSetBinarySpec defines the desired state of DataSetBinary + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Binary Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Binary Dataset. + items: + type: string + type: array + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file in the blob container. + type: string + path: + description: The folder path to the file in the blob container. + type: string + type: object + compression: + description: A compression block as defined below. + properties: + level: + description: The level of compression. Possible values are + Fastest and Optimal. + type: string + type: + description: The type of compression used during transport. + Possible values are BZip2, Deflate, GZip, Tar, TarGZip and + ZipDeflate. + type: string + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Binary Dataset with. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceSFTP in datafactory to + populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceSFTP in datafactory to + populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: Specifies a list of parameters to associate with + the Data Factory Binary Dataset. + type: object + x-kubernetes-map-type: granular + sftpServerLocation: + description: A sftp_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the SFTP server. + type: string + path: + description: The folder path to the file on the SFTP server. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Binary Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Binary Dataset. + items: + type: string + type: array + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file in the blob container. + type: string + path: + description: The folder path to the file in the blob container. + type: string + type: object + compression: + description: A compression block as defined below. + properties: + level: + description: The level of compression. Possible values are + Fastest and Optimal. + type: string + type: + description: The type of compression used during transport. + Possible values are BZip2, Deflate, GZip, Tar, TarGZip and + ZipDeflate. + type: string + type: object + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Binary Dataset with. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceSFTP in datafactory to + populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceSFTP in datafactory to + populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: Specifies a list of parameters to associate with + the Data Factory Binary Dataset. + type: object + x-kubernetes-map-type: granular + sftpServerLocation: + description: A sftp_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the SFTP server. + type: string + path: + description: The folder path to the file on the SFTP server. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DataSetBinaryStatus defines the observed state of DataSetBinary. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Binary Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Binary Dataset. + items: + type: string + type: array + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file in the blob container. + type: string + path: + description: The folder path to the file in the blob container. + type: string + type: object + compression: + description: A compression block as defined below. + properties: + level: + description: The level of compression. Possible values are + Fastest and Optimal. + type: string + type: + description: The type of compression used during transport. + Possible values are BZip2, Deflate, GZip, Tar, TarGZip and + ZipDeflate. + type: string + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + id: + description: The ID of the Data Factory Dataset. + type: string + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Binary Dataset with. + type: string + parameters: + additionalProperties: + type: string + description: Specifies a list of parameters to associate with + the Data Factory Binary Dataset. + type: object + x-kubernetes-map-type: granular + sftpServerLocation: + description: A sftp_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the SFTP server. + type: string + path: + description: The folder path to the file on the SFTP server. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_datasetdelimitedtexts.yaml b/package/crds/datafactory.azure.upbound.io_datasetdelimitedtexts.yaml index 2eb502c5d..ac1b7a76c 100644 --- a/package/crds/datafactory.azure.upbound.io_datasetdelimitedtexts.yaml +++ b/package/crds/datafactory.azure.upbound.io_datasetdelimitedtexts.yaml @@ -1057,3 +1057,1021 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataSetDelimitedText is the Schema for the DataSetDelimitedTexts + API. Manages an Azure Delimited Text Dataset inside an Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataSetDelimitedTextSpec defines the desired state of DataSetDelimitedText + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobFsLocation: + description: An azure_blob_fs_location block as defined below. + properties: + dynamicFileSystemEnabled: + description: Is the file_system using dynamic expression, + function or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + fileSystem: + description: The storage data lake gen2 file system on the + Azure Blob Storage Account hosting the file. + type: string + filename: + description: The filename of the file. + type: string + path: + description: The folder path to the file. + type: string + type: object + azureBlobStorageLocation: + description: An azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file. + type: string + path: + description: The folder path to the file. This can be an empty + string. + type: string + type: object + columnDelimiter: + description: The column delimiter. Defaults to ,. + type: string + compressionCodec: + description: The compression codec used to read/write text files. + Valid values are None, bzip2, gzip, deflate, ZipDeflate, TarGzip, + Tar, snappy and lz4. Please note these values are case sensitive. + type: string + compressionLevel: + description: The compression ratio for the Data Factory Dataset. + Valid values are Fastest or Optimal. Please note these values + are case sensitive. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Dataset. + type: string + encoding: + description: The encoding format for the file. + type: string + escapeCharacter: + description: The escape character. Defaults to \. + type: string + firstRowAsHeader: + description: When used as input, treat the first row of data as + headers. When used as output, write the headers into the output + as the first row of data. Defaults to false. + type: boolean + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + nullValue: + description: The null value string. Defaults to "". + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + quoteCharacter: + description: The quote character. Defaults to ". + type: string + rowDelimiter: + description: 'The row delimiter. Defaults to any of the following + values on read: \r\n, \r, \n, and \n or \r\n on write by mapping + data flow and Copy activity respectively.' + type: string + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobFsLocation: + description: An azure_blob_fs_location block as defined below. + properties: + dynamicFileSystemEnabled: + description: Is the file_system using dynamic expression, + function or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + fileSystem: + description: The storage data lake gen2 file system on the + Azure Blob Storage Account hosting the file. + type: string + filename: + description: The filename of the file. + type: string + path: + description: The folder path to the file. + type: string + type: object + azureBlobStorageLocation: + description: An azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file. + type: string + path: + description: The folder path to the file. This can be an empty + string. + type: string + type: object + columnDelimiter: + description: The column delimiter. Defaults to ,. + type: string + compressionCodec: + description: The compression codec used to read/write text files. + Valid values are None, bzip2, gzip, deflate, ZipDeflate, TarGzip, + Tar, snappy and lz4. Please note these values are case sensitive. + type: string + compressionLevel: + description: The compression ratio for the Data Factory Dataset. + Valid values are Fastest or Optimal. Please note these values + are case sensitive. + type: string + description: + description: The description for the Data Factory Dataset. + type: string + encoding: + description: The encoding format for the file. + type: string + escapeCharacter: + description: The escape character. Defaults to \. + type: string + firstRowAsHeader: + description: When used as input, treat the first row of data as + headers. When used as output, write the headers into the output + as the first row of data. Defaults to false. + type: boolean + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + nullValue: + description: The null value string. Defaults to "". + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + quoteCharacter: + description: The quote character. Defaults to ". + type: string + rowDelimiter: + description: 'The row delimiter. Defaults to any of the following + values on read: \r\n, \r, \n, and \n or \r\n on write by mapping + data flow and Copy activity respectively.' + type: string + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DataSetDelimitedTextStatus defines the observed state of + DataSetDelimitedText. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobFsLocation: + description: An azure_blob_fs_location block as defined below. + properties: + dynamicFileSystemEnabled: + description: Is the file_system using dynamic expression, + function or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + fileSystem: + description: The storage data lake gen2 file system on the + Azure Blob Storage Account hosting the file. + type: string + filename: + description: The filename of the file. + type: string + path: + description: The folder path to the file. + type: string + type: object + azureBlobStorageLocation: + description: An azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file. + type: string + path: + description: The folder path to the file. This can be an empty + string. + type: string + type: object + columnDelimiter: + description: The column delimiter. Defaults to ,. + type: string + compressionCodec: + description: The compression codec used to read/write text files. + Valid values are None, bzip2, gzip, deflate, ZipDeflate, TarGzip, + Tar, snappy and lz4. Please note these values are case sensitive. + type: string + compressionLevel: + description: The compression ratio for the Data Factory Dataset. + Valid values are Fastest or Optimal. Please note these values + are case sensitive. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Dataset. + type: string + encoding: + description: The encoding format for the file. + type: string + escapeCharacter: + description: The escape character. Defaults to \. + type: string + firstRowAsHeader: + description: When used as input, treat the first row of data as + headers. When used as output, write the headers into the output + as the first row of data. Defaults to false. + type: boolean + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + id: + description: The ID of the Data Factory Dataset. + type: string + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + nullValue: + description: The null value string. Defaults to "". + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + quoteCharacter: + description: The quote character. Defaults to ". + type: string + rowDelimiter: + description: 'The row delimiter. Defaults to any of the following + values on read: \r\n, \r, \n, and \n or \r\n on write by mapping + data flow and Copy activity respectively.' + type: string + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_datasetjsons.yaml b/package/crds/datafactory.azure.upbound.io_datasetjsons.yaml index b54124bc7..13cd6f73e 100644 --- a/package/crds/datafactory.azure.upbound.io_datasetjsons.yaml +++ b/package/crds/datafactory.azure.upbound.io_datasetjsons.yaml @@ -873,3 +873,843 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataSetJSON is the Schema for the DataSetJSONs API. Manages an + Azure JSON Dataset inside an Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataSetJSONSpec defines the desired state of DataSetJSON + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Dataset. + type: string + encoding: + description: The encoding format for the file. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + type: object + description: + description: The description for the Data Factory Dataset. + type: string + encoding: + description: The encoding format for the file. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DataSetJSONStatus defines the observed state of DataSetJSON. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Dataset. + type: string + encoding: + description: The encoding format for the file. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + id: + description: The ID of the Data Factory Dataset. + type: string + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_datasetparquets.yaml b/package/crds/datafactory.azure.upbound.io_datasetparquets.yaml index 4258b784c..388b2eaf5 100644 --- a/package/crds/datafactory.azure.upbound.io_datasetparquets.yaml +++ b/package/crds/datafactory.azure.upbound.io_datasetparquets.yaml @@ -987,3 +987,951 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataSetParquet is the Schema for the DataSetParquets API. Manages + an Azure Parquet Dataset inside an Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataSetParquetSpec defines the desired state of DataSetParquet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobFsLocation: + description: A azure_blob_fs_location block as defined below. + properties: + dynamicFileSystemEnabled: + description: Is the file_system using dynamic expression, + function or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + fileSystem: + description: The container on the Azure Data Lake Storage + Account hosting the file. + type: string + filename: + description: The filename of the file on the Azure Data Lake + Storage Account. + type: string + path: + description: The folder path to the file on the Azure Data + Lake Storage Account. + type: string + type: object + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the Azure Blob Storage + Account. + type: string + path: + description: The folder path to the file on the Azure Blob + Storage Account. + type: string + type: object + compressionCodec: + description: The compression codec used to read/write text files. + Valid values are bzip2, gzip, deflate, ZipDeflate, TarGzip, + Tar, snappy, or lz4. Please note these values are case-sensitive. + type: string + compressionLevel: + description: Specifies the compression level. Possible values + are Optimal and Fastest, + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Dataset + with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobFsLocation: + description: A azure_blob_fs_location block as defined below. + properties: + dynamicFileSystemEnabled: + description: Is the file_system using dynamic expression, + function or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + fileSystem: + description: The container on the Azure Data Lake Storage + Account hosting the file. + type: string + filename: + description: The filename of the file on the Azure Data Lake + Storage Account. + type: string + path: + description: The folder path to the file on the Azure Data + Lake Storage Account. + type: string + type: object + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the Azure Blob Storage + Account. + type: string + path: + description: The folder path to the file on the Azure Blob + Storage Account. + type: string + type: object + compressionCodec: + description: The compression codec used to read/write text files. + Valid values are bzip2, gzip, deflate, ZipDeflate, TarGzip, + Tar, snappy, or lz4. Please note these values are case-sensitive. + type: string + compressionLevel: + description: Specifies the compression level. Possible values + are Optimal and Fastest, + type: string + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceWeb in datafactory to + populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DataSetParquetStatus defines the observed state of DataSetParquet. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Dataset. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Dataset. + items: + type: string + type: array + azureBlobFsLocation: + description: A azure_blob_fs_location block as defined below. + properties: + dynamicFileSystemEnabled: + description: Is the file_system using dynamic expression, + function or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + fileSystem: + description: The container on the Azure Data Lake Storage + Account hosting the file. + type: string + filename: + description: The filename of the file on the Azure Data Lake + Storage Account. + type: string + path: + description: The folder path to the file on the Azure Data + Lake Storage Account. + type: string + type: object + azureBlobStorageLocation: + description: A azure_blob_storage_location block as defined below. + properties: + container: + description: The container on the Azure Blob Storage Account + hosting the file. + type: string + dynamicContainerEnabled: + description: Is the container using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the Azure Blob Storage + Account. + type: string + path: + description: The folder path to the file on the Azure Blob + Storage Account. + type: string + type: object + compressionCodec: + description: The compression codec used to read/write text files. + Valid values are bzip2, gzip, deflate, ZipDeflate, TarGzip, + Tar, snappy, or lz4. Please note these values are case-sensitive. + type: string + compressionLevel: + description: Specifies the compression level. Possible values + are Optimal and Fastest, + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Dataset + with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Dataset. + type: string + folder: + description: The folder that this Dataset is in. If not specified, + the Dataset will appear at the root level. + type: string + httpServerLocation: + description: A http_server_location block as defined below. + properties: + dynamicFilenameEnabled: + description: Is the filename using dynamic expression, function + or system variables? Defaults to false. + type: boolean + dynamicPathEnabled: + description: Is the path using dynamic expression, function + or system variables? Defaults to false. + type: boolean + filename: + description: The filename of the file on the web server. + type: string + path: + description: The folder path to the file on the web server. + type: string + relativeUrl: + description: The base URL to the web server hosting the file. + type: string + type: object + id: + description: The ID of the Data Factory Dataset. + type: string + linkedServiceName: + description: The Data Factory Linked Service name in which to + associate the Dataset with. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Dataset. + type: object + x-kubernetes-map-type: granular + schemaColumn: + description: A schema_column block as defined below. + items: + properties: + description: + description: The description of the column. + type: string + name: + description: The name of the column. + type: string + type: + description: Type of the column. Valid values are Byte, + Byte[], Boolean, Date, DateTime,DateTimeOffset, Decimal, + Double, Guid, Int16, Int32, Int64, Single, String, TimeSpan. + Please note these values are case sensitive. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_factories.yaml b/package/crds/datafactory.azure.upbound.io_factories.yaml index ce8d2fcf3..9200caadb 100644 --- a/package/crds/datafactory.azure.upbound.io_factories.yaml +++ b/package/crds/datafactory.azure.upbound.io_factories.yaml @@ -802,3 +802,769 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Factory is the Schema for the Factorys API. Manages an Azure + Data Factory (Version 2). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FactorySpec defines the desired state of Factory + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customerManagedKeyId: + description: Specifies the Azure Key Vault Key ID to be used as + the Customer Managed Key (CMK) for double encryption. Required + with user assigned identity. + type: string + customerManagedKeyIdentityId: + description: Specifies the ID of the user assigned identity associated + with the Customer Managed Key. Must be supplied if customer_managed_key_id + is set. + type: string + githubConfiguration: + description: A github_configuration block as defined below. + properties: + accountName: + description: Specifies the GitHub account name. + type: string + branchName: + description: Specifies the branch of the repository to get + code from. + type: string + gitUrl: + description: 'Specifies the GitHub Enterprise host name. For + example: https://github.mydomain.com. Use https://github.com + for open source repositories.' + type: string + publishingEnabled: + description: Is automated publishing enabled? Defaults to + true. + type: boolean + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + type: object + globalParameter: + description: A list of global_parameter blocks as defined above. + items: + properties: + name: + description: Specifies the global parameter name. + type: string + type: + description: Specifies the global parameter type. Possible + Values are Array, Bool, Float, Int, Object or String. + type: string + value: + description: Specifies the global parameter value. + type: string + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Data Factory. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Factory. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + managedVirtualNetworkEnabled: + description: Is Managed Virtual Network enabled? + type: boolean + publicNetworkEnabled: + description: Is the Data Factory visible to the public network? + Defaults to true. + type: boolean + purviewId: + description: Specifies the ID of the purview account resource + associated with the Data Factory. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Data Factory. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + vstsConfiguration: + description: A vsts_configuration block as defined below. + properties: + accountName: + description: Specifies the VSTS account name. + type: string + branchName: + description: Specifies the branch of the repository to get + code from. + type: string + projectName: + description: Specifies the name of the VSTS project. + type: string + publishingEnabled: + description: Is automated publishing enabled? Defaults to + true. + type: boolean + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + tenantId: + description: Specifies the Tenant ID associated with the VSTS + account. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customerManagedKeyId: + description: Specifies the Azure Key Vault Key ID to be used as + the Customer Managed Key (CMK) for double encryption. Required + with user assigned identity. + type: string + customerManagedKeyIdentityId: + description: Specifies the ID of the user assigned identity associated + with the Customer Managed Key. Must be supplied if customer_managed_key_id + is set. + type: string + githubConfiguration: + description: A github_configuration block as defined below. + properties: + accountName: + description: Specifies the GitHub account name. + type: string + branchName: + description: Specifies the branch of the repository to get + code from. + type: string + gitUrl: + description: 'Specifies the GitHub Enterprise host name. For + example: https://github.mydomain.com. Use https://github.com + for open source repositories.' + type: string + publishingEnabled: + description: Is automated publishing enabled? Defaults to + true. + type: boolean + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + type: object + globalParameter: + description: A list of global_parameter blocks as defined above. + items: + properties: + name: + description: Specifies the global parameter name. + type: string + type: + description: Specifies the global parameter type. Possible + Values are Array, Bool, Float, Int, Object or String. + type: string + value: + description: Specifies the global parameter value. + type: string + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Data Factory. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Factory. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + managedVirtualNetworkEnabled: + description: Is Managed Virtual Network enabled? + type: boolean + publicNetworkEnabled: + description: Is the Data Factory visible to the public network? + Defaults to true. + type: boolean + purviewId: + description: Specifies the ID of the purview account resource + associated with the Data Factory. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + vstsConfiguration: + description: A vsts_configuration block as defined below. + properties: + accountName: + description: Specifies the VSTS account name. + type: string + branchName: + description: Specifies the branch of the repository to get + code from. + type: string + projectName: + description: Specifies the name of the VSTS project. + type: string + publishingEnabled: + description: Is automated publishing enabled? Defaults to + true. + type: boolean + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + tenantId: + description: Specifies the Tenant ID associated with the VSTS + account. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: FactoryStatus defines the observed state of Factory. + properties: + atProvider: + properties: + customerManagedKeyId: + description: Specifies the Azure Key Vault Key ID to be used as + the Customer Managed Key (CMK) for double encryption. Required + with user assigned identity. + type: string + customerManagedKeyIdentityId: + description: Specifies the ID of the user assigned identity associated + with the Customer Managed Key. Must be supplied if customer_managed_key_id + is set. + type: string + githubConfiguration: + description: A github_configuration block as defined below. + properties: + accountName: + description: Specifies the GitHub account name. + type: string + branchName: + description: Specifies the branch of the repository to get + code from. + type: string + gitUrl: + description: 'Specifies the GitHub Enterprise host name. For + example: https://github.mydomain.com. Use https://github.com + for open source repositories.' + type: string + publishingEnabled: + description: Is automated publishing enabled? Defaults to + true. + type: boolean + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + type: object + globalParameter: + description: A list of global_parameter blocks as defined above. + items: + properties: + name: + description: Specifies the global parameter name. + type: string + type: + description: Specifies the global parameter type. Possible + Values are Array, Bool, Float, Int, Object or String. + type: string + value: + description: Specifies the global parameter value. + type: string + type: object + type: array + id: + description: The ID of the Data Factory. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Data Factory. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Factory. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + managedVirtualNetworkEnabled: + description: Is Managed Virtual Network enabled? + type: boolean + publicNetworkEnabled: + description: Is the Data Factory visible to the public network? + Defaults to true. + type: boolean + purviewId: + description: Specifies the ID of the purview account resource + associated with the Data Factory. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Data Factory. Changing this forces a new resource to be + created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + vstsConfiguration: + description: A vsts_configuration block as defined below. + properties: + accountName: + description: Specifies the VSTS account name. + type: string + branchName: + description: Specifies the branch of the repository to get + code from. + type: string + projectName: + description: Specifies the name of the VSTS project. + type: string + publishingEnabled: + description: Is automated publishing enabled? Defaults to + true. + type: boolean + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + tenantId: + description: Specifies the Tenant ID associated with the VSTS + account. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_integrationruntimeazuressis.yaml b/package/crds/datafactory.azure.upbound.io_integrationruntimeazuressis.yaml index 7c53afdf8..e99b678da 100644 --- a/package/crds/datafactory.azure.upbound.io_integrationruntimeazuressis.yaml +++ b/package/crds/datafactory.azure.upbound.io_integrationruntimeazuressis.yaml @@ -1681,3 +1681,1612 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IntegrationRuntimeAzureSSIS is the Schema for the IntegrationRuntimeAzureSSISs + API. Manages a Data Factory Azure-SSIS Integration Runtime. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IntegrationRuntimeAzureSSISSpec defines the desired state + of IntegrationRuntimeAzureSSIS + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogInfo: + description: A catalog_info block as defined below. + properties: + administratorLogin: + description: Administrator login name for the SQL Server. + type: string + administratorPasswordSecretRef: + description: Administrator login password for the SQL Server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dualStandbyPairName: + description: The dual standby Azure-SSIS Integration Runtime + pair with SSISDB failover. + type: string + elasticPoolName: + description: The name of SQL elastic pool where the database + will be created for the SSIS catalog. Mutually exclusive + with pricing_tier. + type: string + pricingTier: + description: 'Pricing tier for the database that will be created + for the SSIS catalog. Valid values are: Basic, S0, S1, S2, + S3, S4, S6, S7, S9, S12, P1, P2, P4, P6, P11, P15, GP_S_Gen5_1, + GP_S_Gen5_2, GP_S_Gen5_4, GP_S_Gen5_6, GP_S_Gen5_8, GP_S_Gen5_10, + GP_S_Gen5_12, GP_S_Gen5_14, GP_S_Gen5_16, GP_S_Gen5_18, + GP_S_Gen5_20, GP_S_Gen5_24, GP_S_Gen5_32, GP_S_Gen5_40, + GP_Gen5_2, GP_Gen5_4, GP_Gen5_6, GP_Gen5_8, GP_Gen5_10, + GP_Gen5_12, GP_Gen5_14, GP_Gen5_16, GP_Gen5_18, GP_Gen5_20, + GP_Gen5_24, GP_Gen5_32, GP_Gen5_40, GP_Gen5_80, BC_Gen5_2, + BC_Gen5_4, BC_Gen5_6, BC_Gen5_8, BC_Gen5_10, BC_Gen5_12, + BC_Gen5_14, BC_Gen5_16, BC_Gen5_18, BC_Gen5_20, BC_Gen5_24, + BC_Gen5_32, BC_Gen5_40, BC_Gen5_80, HS_Gen5_2, HS_Gen5_4, + HS_Gen5_6, HS_Gen5_8, HS_Gen5_10, HS_Gen5_12, HS_Gen5_14, + HS_Gen5_16, HS_Gen5_18, HS_Gen5_20, HS_Gen5_24, HS_Gen5_32, + HS_Gen5_40 and HS_Gen5_80. Mutually exclusive with elastic_pool_name.' + type: string + serverEndpoint: + description: The endpoint of an Azure SQL Server that will + be used to host the SSIS catalog. + type: string + type: object + credentialName: + description: The name of a Data Factory Credential that the SSIS + integration will use to access data sources. For example, azurerm_data_factory_credential_user_managed_identity + type: string + customSetupScript: + description: A custom_setup_script block as defined below. + properties: + blobContainerUri: + description: The blob endpoint for the container which contains + a custom setup script that will be run on every node on + startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup + for more information. + type: string + sasTokenSecretRef: + description: A container SAS token that gives access to the + files. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup + for more information. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - sasTokenSecretRef + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Integration runtime description. + type: string + edition: + description: The Azure-SSIS Integration Runtime edition. Valid + values are Standard and Enterprise. Defaults to Standard. + type: string + expressCustomSetup: + description: An express_custom_setup block as defined below. + properties: + commandKey: + description: One or more command_key blocks as defined below. + items: + properties: + keyVaultPassword: + description: A key_vault_secret_reference block as defined + below. + properties: + linkedServiceName: + description: Name of the Linked Service to associate + with the packages. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with + the Key Vault Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + secretName: + description: Specifies the secret name in Azure + Key Vault. + type: string + secretVersion: + description: Specifies the secret version in Azure + Key Vault. + type: string + type: object + passwordSecretRef: + description: The password for the target device. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + targetName: + description: The target computer or domain name. + type: string + userName: + description: The username for the target device. + type: string + type: object + type: array + component: + description: One or more component blocks as defined below. + items: + properties: + keyVaultLicense: + description: A key_vault_secret_reference block as defined + below. + properties: + linkedServiceName: + description: Name of the Linked Service to associate + with the packages. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with + the Key Vault Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + secretName: + description: Specifies the secret name in Azure + Key Vault. + type: string + secretVersion: + description: Specifies the secret version in Azure + Key Vault. + type: string + type: object + licenseSecretRef: + description: The license used for the Component. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + name: + description: Name of the package store. + type: string + type: object + type: array + environment: + additionalProperties: + type: string + description: The Environment Variables for the Azure-SSIS + Integration Runtime. + type: object + x-kubernetes-map-type: granular + powershellVersion: + description: The version of Azure Powershell installed for + the Azure-SSIS Integration Runtime. + type: string + type: object + expressVnetIntegration: + description: A express_vnet_integration block as defined below. + properties: + subnetId: + description: id of the subnet to which the nodes of the Azure-SSIS + Integration Runtime will be added. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + licenseType: + description: The type of the license that is used. Valid values + are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maxParallelExecutionsPerNode: + description: Defines the maximum parallel executions per node. + Defaults to 1. Max is 1. + type: number + nodeSize: + description: 'The size of the nodes on which the Azure-SSIS Integration + Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, + Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, + Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, + Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, + Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2' + type: string + numberOfNodes: + description: Number of nodes for the Azure-SSIS Integration Runtime. + Max is 10. Defaults to 1. + type: number + packageStore: + description: One or more package_store block as defined below. + items: + properties: + linkedServiceName: + description: Name of the Linked Service to associate with + the packages. + type: string + name: + description: Name of the package store. + type: string + type: object + type: array + proxy: + description: A proxy block as defined below. + properties: + path: + description: The path in the data store to be used when moving + data between Self-Hosted and Azure-SSIS Integration Runtimes. + type: string + selfHostedIntegrationRuntimeName: + description: Name of Self Hosted Integration Runtime as a + proxy. + type: string + stagingStorageLinkedServiceName: + description: Name of Azure Blob Storage linked service to + reference the staging data store to be used when moving + data between self-hosted and Azure-SSIS integration runtimes. + type: string + type: object + vnetIntegration: + description: A vnet_integration block as defined below. + properties: + publicIps: + description: Static public IP addresses for the Azure-SSIS + Integration Runtime. The size must be 2. + items: + type: string + type: array + subnetId: + description: id of the subnet to which the nodes of the Azure-SSIS + Integration Runtime will be added. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetName: + description: Name of the subnet to which the nodes of the + Azure-SSIS Integration Runtime will be added. + type: string + subnetNameRef: + description: Reference to a Subnet in network to populate + subnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetNameSelector: + description: Selector for a Subnet in network to populate + subnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vnetId: + description: ID of the virtual network to which the nodes + of the Azure-SSIS Integration Runtime will be added. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + catalogInfo: + description: A catalog_info block as defined below. + properties: + administratorLogin: + description: Administrator login name for the SQL Server. + type: string + dualStandbyPairName: + description: The dual standby Azure-SSIS Integration Runtime + pair with SSISDB failover. + type: string + elasticPoolName: + description: The name of SQL elastic pool where the database + will be created for the SSIS catalog. Mutually exclusive + with pricing_tier. + type: string + pricingTier: + description: 'Pricing tier for the database that will be created + for the SSIS catalog. Valid values are: Basic, S0, S1, S2, + S3, S4, S6, S7, S9, S12, P1, P2, P4, P6, P11, P15, GP_S_Gen5_1, + GP_S_Gen5_2, GP_S_Gen5_4, GP_S_Gen5_6, GP_S_Gen5_8, GP_S_Gen5_10, + GP_S_Gen5_12, GP_S_Gen5_14, GP_S_Gen5_16, GP_S_Gen5_18, + GP_S_Gen5_20, GP_S_Gen5_24, GP_S_Gen5_32, GP_S_Gen5_40, + GP_Gen5_2, GP_Gen5_4, GP_Gen5_6, GP_Gen5_8, GP_Gen5_10, + GP_Gen5_12, GP_Gen5_14, GP_Gen5_16, GP_Gen5_18, GP_Gen5_20, + GP_Gen5_24, GP_Gen5_32, GP_Gen5_40, GP_Gen5_80, BC_Gen5_2, + BC_Gen5_4, BC_Gen5_6, BC_Gen5_8, BC_Gen5_10, BC_Gen5_12, + BC_Gen5_14, BC_Gen5_16, BC_Gen5_18, BC_Gen5_20, BC_Gen5_24, + BC_Gen5_32, BC_Gen5_40, BC_Gen5_80, HS_Gen5_2, HS_Gen5_4, + HS_Gen5_6, HS_Gen5_8, HS_Gen5_10, HS_Gen5_12, HS_Gen5_14, + HS_Gen5_16, HS_Gen5_18, HS_Gen5_20, HS_Gen5_24, HS_Gen5_32, + HS_Gen5_40 and HS_Gen5_80. Mutually exclusive with elastic_pool_name.' + type: string + serverEndpoint: + description: The endpoint of an Azure SQL Server that will + be used to host the SSIS catalog. + type: string + type: object + credentialName: + description: The name of a Data Factory Credential that the SSIS + integration will use to access data sources. For example, azurerm_data_factory_credential_user_managed_identity + type: string + customSetupScript: + description: A custom_setup_script block as defined below. + properties: + blobContainerUri: + description: The blob endpoint for the container which contains + a custom setup script that will be run on every node on + startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup + for more information. + type: string + type: object + description: + description: Integration runtime description. + type: string + edition: + description: The Azure-SSIS Integration Runtime edition. Valid + values are Standard and Enterprise. Defaults to Standard. + type: string + expressCustomSetup: + description: An express_custom_setup block as defined below. + properties: + commandKey: + description: One or more command_key blocks as defined below. + items: + properties: + keyVaultPassword: + description: A key_vault_secret_reference block as defined + below. + properties: + linkedServiceName: + description: Name of the Linked Service to associate + with the packages. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with + the Key Vault Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + secretName: + description: Specifies the secret name in Azure + Key Vault. + type: string + secretVersion: + description: Specifies the secret version in Azure + Key Vault. + type: string + type: object + targetName: + description: The target computer or domain name. + type: string + userName: + description: The username for the target device. + type: string + type: object + type: array + component: + description: One or more component blocks as defined below. + items: + properties: + keyVaultLicense: + description: A key_vault_secret_reference block as defined + below. + properties: + linkedServiceName: + description: Name of the Linked Service to associate + with the packages. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with + the Key Vault Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + secretName: + description: Specifies the secret name in Azure + Key Vault. + type: string + secretVersion: + description: Specifies the secret version in Azure + Key Vault. + type: string + type: object + name: + description: Name of the package store. + type: string + type: object + type: array + environment: + additionalProperties: + type: string + description: The Environment Variables for the Azure-SSIS + Integration Runtime. + type: object + x-kubernetes-map-type: granular + powershellVersion: + description: The version of Azure Powershell installed for + the Azure-SSIS Integration Runtime. + type: string + type: object + expressVnetIntegration: + description: A express_vnet_integration block as defined below. + properties: + subnetId: + description: id of the subnet to which the nodes of the Azure-SSIS + Integration Runtime will be added. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + licenseType: + description: The type of the license that is used. Valid values + are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maxParallelExecutionsPerNode: + description: Defines the maximum parallel executions per node. + Defaults to 1. Max is 1. + type: number + nodeSize: + description: 'The size of the nodes on which the Azure-SSIS Integration + Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, + Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, + Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, + Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, + Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2' + type: string + numberOfNodes: + description: Number of nodes for the Azure-SSIS Integration Runtime. + Max is 10. Defaults to 1. + type: number + packageStore: + description: One or more package_store block as defined below. + items: + properties: + linkedServiceName: + description: Name of the Linked Service to associate with + the packages. + type: string + name: + description: Name of the package store. + type: string + type: object + type: array + proxy: + description: A proxy block as defined below. + properties: + path: + description: The path in the data store to be used when moving + data between Self-Hosted and Azure-SSIS Integration Runtimes. + type: string + selfHostedIntegrationRuntimeName: + description: Name of Self Hosted Integration Runtime as a + proxy. + type: string + stagingStorageLinkedServiceName: + description: Name of Azure Blob Storage linked service to + reference the staging data store to be used when moving + data between self-hosted and Azure-SSIS integration runtimes. + type: string + type: object + vnetIntegration: + description: A vnet_integration block as defined below. + properties: + publicIps: + description: Static public IP addresses for the Azure-SSIS + Integration Runtime. The size must be 2. + items: + type: string + type: array + subnetId: + description: id of the subnet to which the nodes of the Azure-SSIS + Integration Runtime will be added. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetName: + description: Name of the subnet to which the nodes of the + Azure-SSIS Integration Runtime will be added. + type: string + subnetNameRef: + description: Reference to a Subnet in network to populate + subnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetNameSelector: + description: Selector for a Subnet in network to populate + subnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vnetId: + description: ID of the virtual network to which the nodes + of the Azure-SSIS Integration Runtime will be added. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.nodeSize is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nodeSize) + || (has(self.initProvider) && has(self.initProvider.nodeSize))' + status: + description: IntegrationRuntimeAzureSSISStatus defines the observed state + of IntegrationRuntimeAzureSSIS. + properties: + atProvider: + properties: + catalogInfo: + description: A catalog_info block as defined below. + properties: + administratorLogin: + description: Administrator login name for the SQL Server. + type: string + dualStandbyPairName: + description: The dual standby Azure-SSIS Integration Runtime + pair with SSISDB failover. + type: string + elasticPoolName: + description: The name of SQL elastic pool where the database + will be created for the SSIS catalog. Mutually exclusive + with pricing_tier. + type: string + pricingTier: + description: 'Pricing tier for the database that will be created + for the SSIS catalog. Valid values are: Basic, S0, S1, S2, + S3, S4, S6, S7, S9, S12, P1, P2, P4, P6, P11, P15, GP_S_Gen5_1, + GP_S_Gen5_2, GP_S_Gen5_4, GP_S_Gen5_6, GP_S_Gen5_8, GP_S_Gen5_10, + GP_S_Gen5_12, GP_S_Gen5_14, GP_S_Gen5_16, GP_S_Gen5_18, + GP_S_Gen5_20, GP_S_Gen5_24, GP_S_Gen5_32, GP_S_Gen5_40, + GP_Gen5_2, GP_Gen5_4, GP_Gen5_6, GP_Gen5_8, GP_Gen5_10, + GP_Gen5_12, GP_Gen5_14, GP_Gen5_16, GP_Gen5_18, GP_Gen5_20, + GP_Gen5_24, GP_Gen5_32, GP_Gen5_40, GP_Gen5_80, BC_Gen5_2, + BC_Gen5_4, BC_Gen5_6, BC_Gen5_8, BC_Gen5_10, BC_Gen5_12, + BC_Gen5_14, BC_Gen5_16, BC_Gen5_18, BC_Gen5_20, BC_Gen5_24, + BC_Gen5_32, BC_Gen5_40, BC_Gen5_80, HS_Gen5_2, HS_Gen5_4, + HS_Gen5_6, HS_Gen5_8, HS_Gen5_10, HS_Gen5_12, HS_Gen5_14, + HS_Gen5_16, HS_Gen5_18, HS_Gen5_20, HS_Gen5_24, HS_Gen5_32, + HS_Gen5_40 and HS_Gen5_80. Mutually exclusive with elastic_pool_name.' + type: string + serverEndpoint: + description: The endpoint of an Azure SQL Server that will + be used to host the SSIS catalog. + type: string + type: object + credentialName: + description: The name of a Data Factory Credential that the SSIS + integration will use to access data sources. For example, azurerm_data_factory_credential_user_managed_identity + type: string + customSetupScript: + description: A custom_setup_script block as defined below. + properties: + blobContainerUri: + description: The blob endpoint for the container which contains + a custom setup script that will be run on every node on + startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup + for more information. + type: string + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: Integration runtime description. + type: string + edition: + description: The Azure-SSIS Integration Runtime edition. Valid + values are Standard and Enterprise. Defaults to Standard. + type: string + expressCustomSetup: + description: An express_custom_setup block as defined below. + properties: + commandKey: + description: One or more command_key blocks as defined below. + items: + properties: + keyVaultPassword: + description: A key_vault_secret_reference block as defined + below. + properties: + linkedServiceName: + description: Name of the Linked Service to associate + with the packages. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with + the Key Vault Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + secretName: + description: Specifies the secret name in Azure + Key Vault. + type: string + secretVersion: + description: Specifies the secret version in Azure + Key Vault. + type: string + type: object + targetName: + description: The target computer or domain name. + type: string + userName: + description: The username for the target device. + type: string + type: object + type: array + component: + description: One or more component blocks as defined below. + items: + properties: + keyVaultLicense: + description: A key_vault_secret_reference block as defined + below. + properties: + linkedServiceName: + description: Name of the Linked Service to associate + with the packages. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with + the Key Vault Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + secretName: + description: Specifies the secret name in Azure + Key Vault. + type: string + secretVersion: + description: Specifies the secret version in Azure + Key Vault. + type: string + type: object + name: + description: Name of the package store. + type: string + type: object + type: array + environment: + additionalProperties: + type: string + description: The Environment Variables for the Azure-SSIS + Integration Runtime. + type: object + x-kubernetes-map-type: granular + powershellVersion: + description: The version of Azure Powershell installed for + the Azure-SSIS Integration Runtime. + type: string + type: object + expressVnetIntegration: + description: A express_vnet_integration block as defined below. + properties: + subnetId: + description: id of the subnet to which the nodes of the Azure-SSIS + Integration Runtime will be added. + type: string + type: object + id: + description: The ID of the Data Factory Azure-SSIS Integration + Runtime. + type: string + licenseType: + description: The type of the license that is used. Valid values + are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maxParallelExecutionsPerNode: + description: Defines the maximum parallel executions per node. + Defaults to 1. Max is 1. + type: number + nodeSize: + description: 'The size of the nodes on which the Azure-SSIS Integration + Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, + Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, + Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, + Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, + Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2' + type: string + numberOfNodes: + description: Number of nodes for the Azure-SSIS Integration Runtime. + Max is 10. Defaults to 1. + type: number + packageStore: + description: One or more package_store block as defined below. + items: + properties: + linkedServiceName: + description: Name of the Linked Service to associate with + the packages. + type: string + name: + description: Name of the package store. + type: string + type: object + type: array + proxy: + description: A proxy block as defined below. + properties: + path: + description: The path in the data store to be used when moving + data between Self-Hosted and Azure-SSIS Integration Runtimes. + type: string + selfHostedIntegrationRuntimeName: + description: Name of Self Hosted Integration Runtime as a + proxy. + type: string + stagingStorageLinkedServiceName: + description: Name of Azure Blob Storage linked service to + reference the staging data store to be used when moving + data between self-hosted and Azure-SSIS integration runtimes. + type: string + type: object + vnetIntegration: + description: A vnet_integration block as defined below. + properties: + publicIps: + description: Static public IP addresses for the Azure-SSIS + Integration Runtime. The size must be 2. + items: + type: string + type: array + subnetId: + description: id of the subnet to which the nodes of the Azure-SSIS + Integration Runtime will be added. + type: string + subnetName: + description: Name of the subnet to which the nodes of the + Azure-SSIS Integration Runtime will be added. + type: string + vnetId: + description: ID of the virtual network to which the nodes + of the Azure-SSIS Integration Runtime will be added. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_integrationruntimemanageds.yaml b/package/crds/datafactory.azure.upbound.io_integrationruntimemanageds.yaml index 78a5777fb..6fcbc7e3d 100644 --- a/package/crds/datafactory.azure.upbound.io_integrationruntimemanageds.yaml +++ b/package/crds/datafactory.azure.upbound.io_integrationruntimemanageds.yaml @@ -847,3 +847,814 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IntegrationRuntimeManaged is the Schema for the IntegrationRuntimeManageds + API. Manages an Azure Data Factory Managed Integration Runtime. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IntegrationRuntimeManagedSpec defines the desired state of + IntegrationRuntimeManaged + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + catalogInfo: + description: A catalog_info block as defined below. + properties: + administratorLogin: + description: Administrator login name for the SQL Server. + type: string + administratorPasswordSecretRef: + description: Administrator login password for the SQL Server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + pricingTier: + description: 'Pricing tier for the database that will be created + for the SSIS catalog. Valid values are: Basic, Standard, + Premium and PremiumRS. Defaults to Basic.' + type: string + serverEndpoint: + description: The endpoint of an Azure SQL Server that will + be used to host the SSIS catalog. + type: string + type: object + credentialName: + description: The name of the credential to use for the Managed + Integration Runtime. + type: string + customSetupScript: + description: A custom_setup_script block as defined below. + properties: + blobContainerUri: + description: The blob endpoint for the container which contains + a custom setup script that will be run on every node on + startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup + for more information. + type: string + sasTokenSecretRef: + description: A container SAS token that gives access to the + files. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup + for more information. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - sasTokenSecretRef + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Integration runtime description. + type: string + edition: + description: The Managed Integration Runtime edition. Valid values + are Standard and Enterprise. Defaults to Standard. + type: string + licenseType: + description: The type of the license that is used. Valid values + are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maxParallelExecutionsPerNode: + description: Defines the maximum parallel executions per node. + Defaults to 1. Max is 1. + type: number + nodeSize: + description: 'The size of the nodes on which the Managed Integration + Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, + Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, + Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, + Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, + Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2' + type: string + numberOfNodes: + description: Number of nodes for the Managed Integration Runtime. + Max is 10. Defaults to 1. + type: number + vnetIntegration: + description: A vnet_integration block as defined below. + properties: + subnetName: + description: Name of the subnet to which the nodes of the + Managed Integration Runtime will be added. + type: string + subnetNameRef: + description: Reference to a Subnet in network to populate + subnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetNameSelector: + description: Selector for a Subnet in network to populate + subnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vnetId: + description: ID of the virtual network to which the nodes + of the Managed Integration Runtime will be added. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + catalogInfo: + description: A catalog_info block as defined below. + properties: + administratorLogin: + description: Administrator login name for the SQL Server. + type: string + pricingTier: + description: 'Pricing tier for the database that will be created + for the SSIS catalog. Valid values are: Basic, Standard, + Premium and PremiumRS. Defaults to Basic.' + type: string + serverEndpoint: + description: The endpoint of an Azure SQL Server that will + be used to host the SSIS catalog. + type: string + type: object + credentialName: + description: The name of the credential to use for the Managed + Integration Runtime. + type: string + customSetupScript: + description: A custom_setup_script block as defined below. + properties: + blobContainerUri: + description: The blob endpoint for the container which contains + a custom setup script that will be run on every node on + startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup + for more information. + type: string + type: object + description: + description: Integration runtime description. + type: string + edition: + description: The Managed Integration Runtime edition. Valid values + are Standard and Enterprise. Defaults to Standard. + type: string + licenseType: + description: The type of the license that is used. Valid values + are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maxParallelExecutionsPerNode: + description: Defines the maximum parallel executions per node. + Defaults to 1. Max is 1. + type: number + nodeSize: + description: 'The size of the nodes on which the Managed Integration + Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, + Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, + Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, + Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, + Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2' + type: string + numberOfNodes: + description: Number of nodes for the Managed Integration Runtime. + Max is 10. Defaults to 1. + type: number + vnetIntegration: + description: A vnet_integration block as defined below. + properties: + subnetName: + description: Name of the subnet to which the nodes of the + Managed Integration Runtime will be added. + type: string + subnetNameRef: + description: Reference to a Subnet in network to populate + subnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetNameSelector: + description: Selector for a Subnet in network to populate + subnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vnetId: + description: ID of the virtual network to which the nodes + of the Managed Integration Runtime will be added. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.nodeSize is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nodeSize) + || (has(self.initProvider) && has(self.initProvider.nodeSize))' + status: + description: IntegrationRuntimeManagedStatus defines the observed state + of IntegrationRuntimeManaged. + properties: + atProvider: + properties: + catalogInfo: + description: A catalog_info block as defined below. + properties: + administratorLogin: + description: Administrator login name for the SQL Server. + type: string + pricingTier: + description: 'Pricing tier for the database that will be created + for the SSIS catalog. Valid values are: Basic, Standard, + Premium and PremiumRS. Defaults to Basic.' + type: string + serverEndpoint: + description: The endpoint of an Azure SQL Server that will + be used to host the SSIS catalog. + type: string + type: object + credentialName: + description: The name of the credential to use for the Managed + Integration Runtime. + type: string + customSetupScript: + description: A custom_setup_script block as defined below. + properties: + blobContainerUri: + description: The blob endpoint for the container which contains + a custom setup script that will be run on every node on + startup. See https://docs.microsoft.com/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup + for more information. + type: string + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: Integration runtime description. + type: string + edition: + description: The Managed Integration Runtime edition. Valid values + are Standard and Enterprise. Defaults to Standard. + type: string + id: + description: The ID of the Data Factory Integration Managed Runtime. + type: string + licenseType: + description: The type of the license that is used. Valid values + are LicenseIncluded and BasePrice. Defaults to LicenseIncluded. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maxParallelExecutionsPerNode: + description: Defines the maximum parallel executions per node. + Defaults to 1. Max is 1. + type: number + nodeSize: + description: 'The size of the nodes on which the Managed Integration + Runtime runs. Valid values are: Standard_D2_v3, Standard_D4_v3, + Standard_D8_v3, Standard_D16_v3, Standard_D32_v3, Standard_D64_v3, + Standard_E2_v3, Standard_E4_v3, Standard_E8_v3, Standard_E16_v3, + Standard_E32_v3, Standard_E64_v3, Standard_D1_v2, Standard_D2_v2, + Standard_D3_v2, Standard_D4_v2, Standard_A4_v2 and Standard_A8_v2' + type: string + numberOfNodes: + description: Number of nodes for the Managed Integration Runtime. + Max is 10. Defaults to 1. + type: number + vnetIntegration: + description: A vnet_integration block as defined below. + properties: + subnetName: + description: Name of the subnet to which the nodes of the + Managed Integration Runtime will be added. + type: string + vnetId: + description: ID of the virtual network to which the nodes + of the Managed Integration Runtime will be added. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedcustomservices.yaml b/package/crds/datafactory.azure.upbound.io_linkedcustomservices.yaml index 4a47c5407..a53a9cbb6 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedcustomservices.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedcustomservices.yaml @@ -570,3 +570,549 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedCustomService is the Schema for the LinkedCustomServices + API. Manages a Linked Service (connection) between a resource and Azure + Data Factory. This is a generic resource that supports all different Linked + Service Types. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedCustomServiceSpec defines the desired state of LinkedCustomService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service. + type: string + integrationRuntime: + description: An integration_runtime block as defined below. + properties: + name: + description: The integration runtime reference to associate + with the Data Factory Linked Service. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the integration + runtime. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: + description: The type of data stores that will be connected to + Data Factory. For full list of supported data stores, please + refer to Azure Data Factory connector. Changing this forces + a new resource to be created. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Data Factory Linked Service. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + description: + description: The description for the Data Factory Linked Service. + type: string + integrationRuntime: + description: An integration_runtime block as defined below. + properties: + name: + description: The integration runtime reference to associate + with the Data Factory Linked Service. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the integration + runtime. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: + description: The type of data stores that will be connected to + Data Factory. For full list of supported data stores, please + refer to Azure Data Factory connector. Changing this forces + a new resource to be created. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Data Factory Linked Service. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + - message: spec.forProvider.typePropertiesJson is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.typePropertiesJson) + || (has(self.initProvider) && has(self.initProvider.typePropertiesJson))' + status: + description: LinkedCustomServiceStatus defines the observed state of LinkedCustomService. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service. + type: string + id: + description: The ID of the Data Factory Linked Service. + type: string + integrationRuntime: + description: An integration_runtime block as defined below. + properties: + name: + description: The integration runtime reference to associate + with the Data Factory Linked Service. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the integration + runtime. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: + description: The type of data stores that will be connected to + Data Factory. For full list of supported data stores, please + refer to Azure Data Factory connector. Changing this forces + a new resource to be created. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Data Factory Linked Service. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedserviceazureblobstorages.yaml b/package/crds/datafactory.azure.upbound.io_linkedserviceazureblobstorages.yaml index 6b91ddbcd..c1af830ce 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedserviceazureblobstorages.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedserviceazureblobstorages.yaml @@ -1028,3 +1028,1001 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceAzureBlobStorage is the Schema for the LinkedServiceAzureBlobStorages + API. Manages a Linked Service (connection) between an Azure Blob Storage + Account and Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceAzureBlobStorageSpec defines the desired state + of LinkedServiceAzureBlobStorage + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + connectionStringInsecure: + description: The connection string sent insecurely. Conflicts + with connection_string, sas_uri and service_endpoint. + type: string + connectionStringSecretRef: + description: The connection string. Conflicts with connection_string_insecure, + sas_uri and service_endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultSasToken: + description: A key_vault_sas_token block as defined below. Use + this argument to store SAS Token in an existing Key Vault. It + needs an existing Key Vault Data Factory Linked Service. A sas_uri + is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the SAS token. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + sasuriSecretRef: + description: The SAS URI. Conflicts with connection_string_insecure, + connection_string and service_endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + serviceEndpointSecretRef: + description: The Service Endpoint. Conflicts with connection_string, + connection_string_insecure and sas_uri. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + servicePrincipalId: + description: The service principal id in which to authenticate + against the Azure Blob Storage account. + type: string + servicePrincipalKey: + description: The service principal key in which to authenticate + against the AAzure Blob Storage account. + type: string + servicePrincipalLinkedKeyVaultKey: + description: A service_principal_linked_key_vault_key block as + defined below. Use this argument to store Service Principal + key in an existing Key Vault. It needs an existing Key Vault + Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the Service Principal key. + type: string + type: object + storageKind: + description: Specify the kind of the storage account. Allowed + values are Storage, StorageV2, BlobStorage and BlockBlobStorage. + type: string + tenantId: + description: The tenant id or name in which to authenticate against + the Azure Blob Storage account. + type: string + useManagedIdentity: + description: Whether to use the Data Factory's managed identity + to authenticate against the Azure Blob Storage account. Incompatible + with service_principal_id and service_principal_key. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + connectionStringInsecure: + description: The connection string sent insecurely. Conflicts + with connection_string, sas_uri and service_endpoint. + type: string + description: + description: The description for the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultSasToken: + description: A key_vault_sas_token block as defined below. Use + this argument to store SAS Token in an existing Key Vault. It + needs an existing Key Vault Data Factory Linked Service. A sas_uri + is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the SAS token. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + servicePrincipalId: + description: The service principal id in which to authenticate + against the Azure Blob Storage account. + type: string + servicePrincipalKey: + description: The service principal key in which to authenticate + against the AAzure Blob Storage account. + type: string + servicePrincipalLinkedKeyVaultKey: + description: A service_principal_linked_key_vault_key block as + defined below. Use this argument to store Service Principal + key in an existing Key Vault. It needs an existing Key Vault + Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the Service Principal key. + type: string + type: object + storageKind: + description: Specify the kind of the storage account. Allowed + values are Storage, StorageV2, BlobStorage and BlockBlobStorage. + type: string + tenantId: + description: The tenant id or name in which to authenticate against + the Azure Blob Storage account. + type: string + useManagedIdentity: + description: Whether to use the Data Factory's managed identity + to authenticate against the Azure Blob Storage account. Incompatible + with service_principal_id and service_principal_key. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: LinkedServiceAzureBlobStorageStatus defines the observed + state of LinkedServiceAzureBlobStorage. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + connectionStringInsecure: + description: The connection string sent insecurely. Conflicts + with connection_string, sas_uri and service_endpoint. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service. + type: string + id: + description: The ID of the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultSasToken: + description: A key_vault_sas_token block as defined below. Use + this argument to store SAS Token in an existing Key Vault. It + needs an existing Key Vault Data Factory Linked Service. A sas_uri + is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the SAS token. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + servicePrincipalId: + description: The service principal id in which to authenticate + against the Azure Blob Storage account. + type: string + servicePrincipalKey: + description: The service principal key in which to authenticate + against the AAzure Blob Storage account. + type: string + servicePrincipalLinkedKeyVaultKey: + description: A service_principal_linked_key_vault_key block as + defined below. Use this argument to store Service Principal + key in an existing Key Vault. It needs an existing Key Vault + Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the Service Principal key. + type: string + type: object + storageKind: + description: Specify the kind of the storage account. Allowed + values are Storage, StorageV2, BlobStorage and BlockBlobStorage. + type: string + tenantId: + description: The tenant id or name in which to authenticate against + the Azure Blob Storage account. + type: string + useManagedIdentity: + description: Whether to use the Data Factory's managed identity + to authenticate against the Azure Blob Storage account. Incompatible + with service_principal_id and service_principal_key. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedserviceazuredatabricks.yaml b/package/crds/datafactory.azure.upbound.io_linkedserviceazuredatabricks.yaml index 5eb0eada3..798189803 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedserviceazuredatabricks.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedserviceazuredatabricks.yaml @@ -976,3 +976,940 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceAzureDatabricks is the Schema for the LinkedServiceAzureDatabrickss + API. Manages a Linked Service (connection) between Azure Databricks and + Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceAzureDatabricksSpec defines the desired state + of LinkedServiceAzureDatabricks + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessTokenSecretRef: + description: Authenticate to ADB via an access token. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + adbDomain: + description: The domain URL of the databricks instance. + type: string + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service. + type: string + existingClusterId: + description: The cluster_id of an existing cluster within the + linked ADB instance. + type: string + instancePool: + description: Leverages an instance pool within the linked ADB + instance as one instance_pool block defined below. + properties: + clusterVersion: + description: Spark version of a the cluster. + type: string + instancePoolId: + description: Identifier of the instance pool within the linked + ADB instance. + type: string + maxNumberOfWorkers: + description: The max number of worker nodes. Set this value + if you want to enable autoscaling between the min_number_of_workers + and this value. Omit this value to use a fixed number of + workers defined in the min_number_of_workers property. + type: number + minNumberOfWorkers: + description: The minimum number of worker nodes. Defaults + to 1. + type: number + type: object + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: Authenticate to ADB via Azure Key Vault Linked Service + as defined in the key_vault_password block below. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores ADB access token. + type: string + type: object + msiWorkSpaceResourceId: + description: Authenticate to ADB via managed service identity. + type: string + msiWorkSpaceResourceIdRef: + description: Reference to a Workspace in databricks to populate + msiWorkSpaceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + msiWorkSpaceResourceIdSelector: + description: Selector for a Workspace in databricks to populate + msiWorkSpaceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + newClusterConfig: + description: Creates new clusters within the linked ADB instance + as defined in the new_cluster_config block below. + properties: + clusterVersion: + description: Spark version of a the cluster. + type: string + customTags: + additionalProperties: + type: string + description: Tags for the cluster resource. + type: object + x-kubernetes-map-type: granular + driverNodeType: + description: Driver node type for the cluster. + type: string + initScripts: + description: User defined initialization scripts for the cluster. + items: + type: string + type: array + logDestination: + description: Location to deliver Spark driver, worker, and + event logs. + type: string + maxNumberOfWorkers: + description: Specifies the maximum number of worker nodes. + It should be between 1 and 25000. + type: number + minNumberOfWorkers: + description: Specifies the minimum number of worker nodes. + It should be between 1 and 25000. It defaults to 1. + type: number + nodeType: + description: Node type for the new cluster. + type: string + sparkConfig: + additionalProperties: + type: string + description: User-specified Spark configuration variables + key-value pairs. + type: object + x-kubernetes-map-type: granular + sparkEnvironmentVariables: + additionalProperties: + type: string + description: User-specified Spark environment variables key-value + pairs. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + adbDomain: + description: The domain URL of the databricks instance. + type: string + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + description: + description: The description for the Data Factory Linked Service. + type: string + existingClusterId: + description: The cluster_id of an existing cluster within the + linked ADB instance. + type: string + instancePool: + description: Leverages an instance pool within the linked ADB + instance as one instance_pool block defined below. + properties: + clusterVersion: + description: Spark version of a the cluster. + type: string + instancePoolId: + description: Identifier of the instance pool within the linked + ADB instance. + type: string + maxNumberOfWorkers: + description: The max number of worker nodes. Set this value + if you want to enable autoscaling between the min_number_of_workers + and this value. Omit this value to use a fixed number of + workers defined in the min_number_of_workers property. + type: number + minNumberOfWorkers: + description: The minimum number of worker nodes. Defaults + to 1. + type: number + type: object + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: Authenticate to ADB via Azure Key Vault Linked Service + as defined in the key_vault_password block below. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores ADB access token. + type: string + type: object + msiWorkSpaceResourceId: + description: Authenticate to ADB via managed service identity. + type: string + msiWorkSpaceResourceIdRef: + description: Reference to a Workspace in databricks to populate + msiWorkSpaceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + msiWorkSpaceResourceIdSelector: + description: Selector for a Workspace in databricks to populate + msiWorkSpaceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + newClusterConfig: + description: Creates new clusters within the linked ADB instance + as defined in the new_cluster_config block below. + properties: + clusterVersion: + description: Spark version of a the cluster. + type: string + customTags: + additionalProperties: + type: string + description: Tags for the cluster resource. + type: object + x-kubernetes-map-type: granular + driverNodeType: + description: Driver node type for the cluster. + type: string + initScripts: + description: User defined initialization scripts for the cluster. + items: + type: string + type: array + logDestination: + description: Location to deliver Spark driver, worker, and + event logs. + type: string + maxNumberOfWorkers: + description: Specifies the maximum number of worker nodes. + It should be between 1 and 25000. + type: number + minNumberOfWorkers: + description: Specifies the minimum number of worker nodes. + It should be between 1 and 25000. It defaults to 1. + type: number + nodeType: + description: Node type for the new cluster. + type: string + sparkConfig: + additionalProperties: + type: string + description: User-specified Spark configuration variables + key-value pairs. + type: object + x-kubernetes-map-type: granular + sparkEnvironmentVariables: + additionalProperties: + type: string + description: User-specified Spark environment variables key-value + pairs. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.adbDomain is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.adbDomain) + || (has(self.initProvider) && has(self.initProvider.adbDomain))' + status: + description: LinkedServiceAzureDatabricksStatus defines the observed state + of LinkedServiceAzureDatabricks. + properties: + atProvider: + properties: + adbDomain: + description: The domain URL of the databricks instance. + type: string + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service. + type: string + existingClusterId: + description: The cluster_id of an existing cluster within the + linked ADB instance. + type: string + id: + description: The ID of the Data Factory Linked Service. + type: string + instancePool: + description: Leverages an instance pool within the linked ADB + instance as one instance_pool block defined below. + properties: + clusterVersion: + description: Spark version of a the cluster. + type: string + instancePoolId: + description: Identifier of the instance pool within the linked + ADB instance. + type: string + maxNumberOfWorkers: + description: The max number of worker nodes. Set this value + if you want to enable autoscaling between the min_number_of_workers + and this value. Omit this value to use a fixed number of + workers defined in the min_number_of_workers property. + type: number + minNumberOfWorkers: + description: The minimum number of worker nodes. Defaults + to 1. + type: number + type: object + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: Authenticate to ADB via Azure Key Vault Linked Service + as defined in the key_vault_password block below. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores ADB access token. + type: string + type: object + msiWorkSpaceResourceId: + description: Authenticate to ADB via managed service identity. + type: string + newClusterConfig: + description: Creates new clusters within the linked ADB instance + as defined in the new_cluster_config block below. + properties: + clusterVersion: + description: Spark version of a the cluster. + type: string + customTags: + additionalProperties: + type: string + description: Tags for the cluster resource. + type: object + x-kubernetes-map-type: granular + driverNodeType: + description: Driver node type for the cluster. + type: string + initScripts: + description: User defined initialization scripts for the cluster. + items: + type: string + type: array + logDestination: + description: Location to deliver Spark driver, worker, and + event logs. + type: string + maxNumberOfWorkers: + description: Specifies the maximum number of worker nodes. + It should be between 1 and 25000. + type: number + minNumberOfWorkers: + description: Specifies the minimum number of worker nodes. + It should be between 1 and 25000. It defaults to 1. + type: number + nodeType: + description: Node type for the new cluster. + type: string + sparkConfig: + additionalProperties: + type: string + description: User-specified Spark configuration variables + key-value pairs. + type: object + x-kubernetes-map-type: granular + sparkEnvironmentVariables: + additionalProperties: + type: string + description: User-specified Spark environment variables key-value + pairs. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedserviceazurefilestorages.yaml b/package/crds/datafactory.azure.upbound.io_linkedserviceazurefilestorages.yaml index 351dbcd59..bfbd60b5f 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedserviceazurefilestorages.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedserviceazurefilestorages.yaml @@ -609,3 +609,588 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceAzureFileStorage is the Schema for the LinkedServiceAzureFileStorages + API. Manages a Linked Service (connection) between an Azure File Storage + Account and Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceAzureFileStorageSpec defines the desired state + of LinkedServiceAzureFileStorage + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + connectionStringSecretRef: + description: The connection string. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service. + type: string + fileShare: + description: The name of the file share. + type: string + host: + description: The Host name of the server. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Azure File Storage password in an existing + Key Vault. It needs an existing Key Vault Data Factory Linked + Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Azure File Storage password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + passwordSecretRef: + description: The password to log in the server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + userId: + description: The user ID to log in the server. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + description: + description: The description for the Data Factory Linked Service. + type: string + fileShare: + description: The name of the file share. + type: string + host: + description: The Host name of the server. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Azure File Storage password in an existing + Key Vault. It needs an existing Key Vault Data Factory Linked + Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Azure File Storage password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + userId: + description: The user ID to log in the server. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.connectionStringSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.connectionStringSecretRef)' + status: + description: LinkedServiceAzureFileStorageStatus defines the observed + state of LinkedServiceAzureFileStorage. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service. + type: string + fileShare: + description: The name of the file share. + type: string + host: + description: The Host name of the server. + type: string + id: + description: The ID of the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Azure File Storage password in an existing + Key Vault. It needs an existing Key Vault Data Factory Linked + Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Azure File Storage password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + userId: + description: The user ID to log in the server. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedserviceazurefunctions.yaml b/package/crds/datafactory.azure.upbound.io_linkedserviceazurefunctions.yaml index b8013303f..1fa899c28 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedserviceazurefunctions.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedserviceazurefunctions.yaml @@ -579,3 +579,558 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceAzureFunction is the Schema for the LinkedServiceAzureFunctions + API. Manages a Linked Service (connection) between an Azure Function Account + and Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceAzureFunctionSpec defines the desired state + of LinkedServiceAzureFunction + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keySecretRef: + description: The system key of the Azure Function. Exactly one + of either key or key_vault_key is required + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + keyVaultKey: + description: A key_vault_key block as defined below. Use this + Argument to store the system key of the Azure Function in an + existing Key Vault. It needs an existing Key Vault Data Factory + Linked Service. Exactly one of either key or key_vault_key is + required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the system key of the Azure Function. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + url: + description: The url of the Azure Function. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + description: + description: The description for the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultKey: + description: A key_vault_key block as defined below. Use this + Argument to store the system key of the Azure Function in an + existing Key Vault. It needs an existing Key Vault Data Factory + Linked Service. Exactly one of either key or key_vault_key is + required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the system key of the Azure Function. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + url: + description: The url of the Azure Function. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.url is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.url) + || (has(self.initProvider) && has(self.initProvider.url))' + status: + description: LinkedServiceAzureFunctionStatus defines the observed state + of LinkedServiceAzureFunction. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service. + type: string + id: + description: The ID of the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultKey: + description: A key_vault_key block as defined below. Use this + Argument to store the system key of the Azure Function in an + existing Key Vault. It needs an existing Key Vault Data Factory + Linked Service. Exactly one of either key or key_vault_key is + required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores the system key of the Azure Function. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + url: + description: The url of the Azure Function. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedserviceazuresqldatabases.yaml b/package/crds/datafactory.azure.upbound.io_linkedserviceazuresqldatabases.yaml index 7df2dc24a..ccd0f659e 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedserviceazuresqldatabases.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedserviceazuresqldatabases.yaml @@ -671,3 +671,644 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceAzureSQLDatabase is the Schema for the LinkedServiceAzureSQLDatabases + API. Manages a Linked Service (connection) between Azure SQL Database and + Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceAzureSQLDatabaseSpec defines the desired state + of LinkedServiceAzureSQLDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service Azure SQL Database. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service Azure SQL Database. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + Azure SQL Database. Exactly one of either connection_string + or key_vault_connection_string is required. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service + Azure SQL Database. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service Azure SQL Database. + type: string + keyVaultConnectionString: + description: A key_vault_connection_string block as defined below. + Use this argument to store Azure SQL Database connection string + in an existing Key Vault. It needs an existing Key Vault Data + Factory Linked Service. Exactly one of either connection_string + or key_vault_connection_string is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server connection string. + type: string + type: object + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store SQL Server password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service Azure SQL Database. + type: object + x-kubernetes-map-type: granular + servicePrincipalId: + description: The service principal id in which to authenticate + against the Azure SQL Database. Required if service_principal_key + is set. + type: string + servicePrincipalKey: + description: The service principal key in which to authenticate + against the Azure SQL Database. Required if service_principal_id + is set. + type: string + tenantId: + description: The tenant id or name in which to authenticate against + the Azure SQL Database. + type: string + useManagedIdentity: + description: Whether to use the Data Factory's managed identity + to authenticate against the Azure SQL Database. Incompatible + with service_principal_id and service_principal_key + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service Azure SQL Database. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service Azure SQL Database. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + Azure SQL Database. Exactly one of either connection_string + or key_vault_connection_string is required. + type: string + description: + description: The description for the Data Factory Linked Service + Azure SQL Database. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service Azure SQL Database. + type: string + keyVaultConnectionString: + description: A key_vault_connection_string block as defined below. + Use this argument to store Azure SQL Database connection string + in an existing Key Vault. It needs an existing Key Vault Data + Factory Linked Service. Exactly one of either connection_string + or key_vault_connection_string is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server connection string. + type: string + type: object + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store SQL Server password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service Azure SQL Database. + type: object + x-kubernetes-map-type: granular + servicePrincipalId: + description: The service principal id in which to authenticate + against the Azure SQL Database. Required if service_principal_key + is set. + type: string + servicePrincipalKey: + description: The service principal key in which to authenticate + against the Azure SQL Database. Required if service_principal_id + is set. + type: string + tenantId: + description: The tenant id or name in which to authenticate against + the Azure SQL Database. + type: string + useManagedIdentity: + description: Whether to use the Data Factory's managed identity + to authenticate against the Azure SQL Database. Incompatible + with service_principal_id and service_principal_key + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: LinkedServiceAzureSQLDatabaseStatus defines the observed + state of LinkedServiceAzureSQLDatabase. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service Azure SQL Database. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service Azure SQL Database. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + Azure SQL Database. Exactly one of either connection_string + or key_vault_connection_string is required. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service + Azure SQL Database. + type: string + id: + description: The ID of the Data Factory Azure SQL Database Linked + Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service Azure SQL Database. + type: string + keyVaultConnectionString: + description: A key_vault_connection_string block as defined below. + Use this argument to store Azure SQL Database connection string + in an existing Key Vault. It needs an existing Key Vault Data + Factory Linked Service. Exactly one of either connection_string + or key_vault_connection_string is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server connection string. + type: string + type: object + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store SQL Server password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service Azure SQL Database. + type: object + x-kubernetes-map-type: granular + servicePrincipalId: + description: The service principal id in which to authenticate + against the Azure SQL Database. Required if service_principal_key + is set. + type: string + servicePrincipalKey: + description: The service principal key in which to authenticate + against the Azure SQL Database. Required if service_principal_id + is set. + type: string + tenantId: + description: The tenant id or name in which to authenticate against + the Azure SQL Database. + type: string + useManagedIdentity: + description: Whether to use the Data Factory's managed identity + to authenticate against the Azure SQL Database. Incompatible + with service_principal_id and service_principal_key + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedserviceodata.yaml b/package/crds/datafactory.azure.upbound.io_linkedserviceodata.yaml index 0b396f0cb..6fcc9f3fc 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedserviceodata.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedserviceodata.yaml @@ -558,3 +558,537 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceOData is the Schema for the LinkedServiceODatas + API. Manages a Linked Service (connection) between a Database and Azure + Data Factory through OData protocol. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceODataSpec defines the desired state of LinkedServiceOData + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service OData. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service OData. + items: + type: string + type: array + basicAuthentication: + description: A basic_authentication block as defined below. + properties: + passwordSecretRef: + description: The password associated with the username, which + can be used to authenticate to the OData endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username which can be used to authenticate + to the OData endpoint. + type: string + required: + - passwordSecretRef + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service + OData. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service OData. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service OData. + type: object + x-kubernetes-map-type: granular + url: + description: The URL of the OData service endpoint. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service OData. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service OData. + items: + type: string + type: array + basicAuthentication: + description: A basic_authentication block as defined below. + properties: + username: + description: The username which can be used to authenticate + to the OData endpoint. + type: string + type: object + description: + description: The description for the Data Factory Linked Service + OData. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service OData. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service OData. + type: object + x-kubernetes-map-type: granular + url: + description: The URL of the OData service endpoint. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.url is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.url) + || (has(self.initProvider) && has(self.initProvider.url))' + status: + description: LinkedServiceODataStatus defines the observed state of LinkedServiceOData. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service OData. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service OData. + items: + type: string + type: array + basicAuthentication: + description: A basic_authentication block as defined below. + properties: + username: + description: The username which can be used to authenticate + to the OData endpoint. + type: string + type: object + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service + OData. + type: string + id: + description: The ID of the Data Factory OData Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service OData. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service OData. + type: object + x-kubernetes-map-type: granular + url: + description: The URL of the OData service endpoint. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedserviceodbcs.yaml b/package/crds/datafactory.azure.upbound.io_linkedserviceodbcs.yaml index 1f2f7aa38..021cc5404 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedserviceodbcs.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedserviceodbcs.yaml @@ -561,3 +561,540 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceOdbc is the Schema for the LinkedServiceOdbcs API. + Manages a Linked Service (connection) between a Database and Azure Data + Factory through ODBC protocol. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceOdbcSpec defines the desired state of LinkedServiceOdbc + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service ODBC. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service ODBC. + items: + type: string + type: array + basicAuthentication: + description: A basic_authentication block as defined below. + properties: + passwordSecretRef: + description: The password associated with the username, which + can be used to authenticate to the ODBC endpoint. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username which can be used to authenticate + to the ODBC endpoint. + type: string + required: + - passwordSecretRef + type: object + connectionString: + description: The connection string in which to authenticate with + ODBC. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service + ODBC. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service ODBC. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service ODBC. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service ODBC. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service ODBC. + items: + type: string + type: array + basicAuthentication: + description: A basic_authentication block as defined below. + properties: + username: + description: The username which can be used to authenticate + to the ODBC endpoint. + type: string + type: object + connectionString: + description: The connection string in which to authenticate with + ODBC. + type: string + description: + description: The description for the Data Factory Linked Service + ODBC. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service ODBC. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service ODBC. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.connectionString is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.connectionString) + || (has(self.initProvider) && has(self.initProvider.connectionString))' + status: + description: LinkedServiceOdbcStatus defines the observed state of LinkedServiceOdbc. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service ODBC. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service ODBC. + items: + type: string + type: array + basicAuthentication: + description: A basic_authentication block as defined below. + properties: + username: + description: The username which can be used to authenticate + to the ODBC endpoint. + type: string + type: object + connectionString: + description: The connection string in which to authenticate with + ODBC. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service + ODBC. + type: string + id: + description: The ID of the Data Factory ODBC Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service ODBC. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service ODBC. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedservicesnowflakes.yaml b/package/crds/datafactory.azure.upbound.io_linkedservicesnowflakes.yaml index 2474a3f03..2e218f4b2 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedservicesnowflakes.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedservicesnowflakes.yaml @@ -709,3 +709,688 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceSnowflake is the Schema for the LinkedServiceSnowflakes + API. Manages a Linked Service (connection) between Snowflake and Azure Data + Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceSnowflakeSpec defines the desired state of LinkedServiceSnowflake + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + Snowflake. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Snowflake password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Snowflake password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + Snowflake. + type: string + description: + description: The description for the Data Factory Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Snowflake password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Snowflake password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.connectionString is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.connectionString) + || (has(self.initProvider) && has(self.initProvider.connectionString))' + status: + description: LinkedServiceSnowflakeStatus defines the observed state of + LinkedServiceSnowflake. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + Snowflake. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service. + type: string + id: + description: The ID of the Data Factory Snowflake Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Snowflake password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Snowflake password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedservicesqlservers.yaml b/package/crds/datafactory.azure.upbound.io_linkedservicesqlservers.yaml index d1049d2eb..319435d86 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedservicesqlservers.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedservicesqlservers.yaml @@ -773,3 +773,746 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceSQLServer is the Schema for the LinkedServiceSQLServers + API. Manages a Linked Service (connection) between a SQL Server and Azure + Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceSQLServerSpec defines the desired state of LinkedServiceSQLServer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service SQL Server. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service SQL Server. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + the SQL Server. Exactly one of either connection_string or key_vault_connection_string + is required. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service + SQL Server. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service SQL Server. + type: string + keyVaultConnectionString: + description: A key_vault_connection_string block as defined below. + Use this argument to store SQL Server connection string in an + existing Key Vault. It needs an existing Key Vault Data Factory + Linked Service. Exactly one of either connection_string or key_vault_connection_string + is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server connection string. + type: string + type: object + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store SQL Server password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service SQL Server. + type: object + x-kubernetes-map-type: granular + userName: + description: The on-premises Windows authentication user name. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service SQL Server. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service SQL Server. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + the SQL Server. Exactly one of either connection_string or key_vault_connection_string + is required. + type: string + description: + description: The description for the Data Factory Linked Service + SQL Server. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service SQL Server. + type: string + keyVaultConnectionString: + description: A key_vault_connection_string block as defined below. + Use this argument to store SQL Server connection string in an + existing Key Vault. It needs an existing Key Vault Data Factory + Linked Service. Exactly one of either connection_string or key_vault_connection_string + is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server connection string. + type: string + type: object + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store SQL Server password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service SQL Server. + type: object + x-kubernetes-map-type: granular + userName: + description: The on-premises Windows authentication user name. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: LinkedServiceSQLServerStatus defines the observed state of + LinkedServiceSQLServer. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service SQL Server. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service SQL Server. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + the SQL Server. Exactly one of either connection_string or key_vault_connection_string + is required. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service + SQL Server. + type: string + id: + description: The ID of the Data Factory SQL Server Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service SQL Server. + type: string + keyVaultConnectionString: + description: A key_vault_connection_string block as defined below. + Use this argument to store SQL Server connection string in an + existing Key Vault. It needs an existing Key Vault Data Factory + Linked Service. Exactly one of either connection_string or key_vault_connection_string + is required. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server connection string. + type: string + type: object + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store SQL Server password in an existing Key + Vault. It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores SQL Server password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service SQL Server. + type: object + x-kubernetes-map-type: granular + userName: + description: The on-premises Windows authentication user name. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_linkedservicesynapses.yaml b/package/crds/datafactory.azure.upbound.io_linkedservicesynapses.yaml index 905bdfed3..919e49f45 100644 --- a/package/crds/datafactory.azure.upbound.io_linkedservicesynapses.yaml +++ b/package/crds/datafactory.azure.upbound.io_linkedservicesynapses.yaml @@ -712,3 +712,691 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedServiceSynapse is the Schema for the LinkedServiceSynapses + API. Manages a Linked Service (connection) between Synapse and Azure Data + Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceSynapseSpec defines the desired state of LinkedServiceSynapse + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service Synapse. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service Synapse. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + the Synapse. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description for the Data Factory Linked Service + Synapse. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service Synapse. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Synapse password in an existing Key Vault. + It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Synapse password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service Synapse. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service Synapse. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service Synapse. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + the Synapse. + type: string + description: + description: The description for the Data Factory Linked Service + Synapse. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service Synapse. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Synapse password in an existing Key Vault. + It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + linkedServiceNameRef: + description: Reference to a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + linkedServiceNameSelector: + description: Selector for a LinkedServiceKeyVault in datafactory + to populate linkedServiceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Synapse password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service Synapse. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.connectionString is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.connectionString) + || (has(self.initProvider) && has(self.initProvider.connectionString))' + status: + description: LinkedServiceSynapseStatus defines the observed state of + LinkedServiceSynapse. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Data Factory Linked Service Synapse. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Data Factory Linked Service Synapse. + items: + type: string + type: array + connectionString: + description: The connection string in which to authenticate with + the Synapse. + type: string + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The description for the Data Factory Linked Service + Synapse. + type: string + id: + description: The ID of the Data Factory Synapse Linked Service. + type: string + integrationRuntimeName: + description: The integration runtime reference to associate with + the Data Factory Linked Service Synapse. + type: string + keyVaultPassword: + description: A key_vault_password block as defined below. Use + this argument to store Synapse password in an existing Key Vault. + It needs an existing Key Vault Data Factory Linked Service. + properties: + linkedServiceName: + description: Specifies the name of an existing Key Vault Data + Factory Linked Service. + type: string + secretName: + description: Specifies the secret name in Azure Key Vault + that stores Synapse password. + type: string + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Data Factory + Linked Service Synapse. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datafactory.azure.upbound.io_triggerschedules.yaml b/package/crds/datafactory.azure.upbound.io_triggerschedules.yaml index aea18fabf..fdfcef739 100644 --- a/package/crds/datafactory.azure.upbound.io_triggerschedules.yaml +++ b/package/crds/datafactory.azure.upbound.io_triggerschedules.yaml @@ -897,3 +897,867 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: TriggerSchedule is the Schema for the TriggerSchedules API. Manages + a Trigger Schedule inside a Azure Data Factory. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TriggerScheduleSpec defines the desired state of TriggerSchedule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + activated: + description: Specifies if the Data Factory Schedule Trigger is + activated. Defaults to true. + type: boolean + annotations: + description: List of tags that can be used for describing the + Data Factory Schedule Trigger. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + dataFactoryIdRef: + description: Reference to a Factory in datafactory to populate + dataFactoryId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataFactoryIdSelector: + description: Selector for a Factory in datafactory to populate + dataFactoryId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The Schedule Trigger's description. + type: string + endTime: + description: The time the Schedule Trigger should end. The time + will be represented in UTC. + type: string + frequency: + description: The trigger frequency. Valid values include Minute, + Hour, Day, Week, Month. Defaults to Minute. + type: string + interval: + description: The interval for how often the trigger occurs. This + defaults to 1. + type: number + pipeline: + description: A pipeline block as defined below. + items: + properties: + name: + description: Reference pipeline name. + type: string + parameters: + additionalProperties: + type: string + description: The pipeline parameters that the trigger will + act upon. + type: object + x-kubernetes-map-type: granular + type: object + type: array + pipelineName: + description: The Data Factory Pipeline name that the trigger will + act on. + type: string + pipelineNameRef: + description: Reference to a Pipeline in datafactory to populate + pipelineName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + pipelineNameSelector: + description: Selector for a Pipeline in datafactory to populate + pipelineName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + pipelineParameters: + additionalProperties: + type: string + description: The pipeline parameters that the trigger will act + upon. + type: object + x-kubernetes-map-type: granular + schedule: + description: A schedule block as defined below, which further + specifies the recurrence schedule for the trigger. A schedule + is capable of limiting or increasing the number of trigger executions + specified by the frequency and interval properties. + properties: + daysOfMonth: + description: Day(s) of the month on which the trigger is scheduled. + This value can be specified with a monthly frequency only. + items: + type: number + type: array + daysOfWeek: + description: Days of the week on which the trigger is scheduled. + This value can be specified only with a weekly frequency. + items: + type: string + type: array + hours: + description: Hours of the day on which the trigger is scheduled. + items: + type: number + type: array + minutes: + description: Minutes of the hour on which the trigger is scheduled. + items: + type: number + type: array + monthly: + description: A monthly block as documented below, which specifies + the days of the month on which the trigger is scheduled. + The value can be specified only with a monthly frequency. + items: + properties: + week: + description: The occurrence of the specified day during + the month. For example, a monthly property with weekday + and week values of Sunday, -1 means the last Sunday + of the month. + type: number + weekday: + description: The day of the week on which the trigger + runs. For example, a monthly property with a weekday + value of Sunday means every Sunday of the month. + type: string + type: object + type: array + type: object + startTime: + description: The time the Schedule Trigger will start. This defaults + to the current time. The time will be represented in UTC. + type: string + timeZone: + description: The timezone of the start/end time. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + activated: + description: Specifies if the Data Factory Schedule Trigger is + activated. Defaults to true. + type: boolean + annotations: + description: List of tags that can be used for describing the + Data Factory Schedule Trigger. + items: + type: string + type: array + description: + description: The Schedule Trigger's description. + type: string + endTime: + description: The time the Schedule Trigger should end. The time + will be represented in UTC. + type: string + frequency: + description: The trigger frequency. Valid values include Minute, + Hour, Day, Week, Month. Defaults to Minute. + type: string + interval: + description: The interval for how often the trigger occurs. This + defaults to 1. + type: number + pipeline: + description: A pipeline block as defined below. + items: + properties: + name: + description: Reference pipeline name. + type: string + parameters: + additionalProperties: + type: string + description: The pipeline parameters that the trigger will + act upon. + type: object + x-kubernetes-map-type: granular + type: object + type: array + pipelineName: + description: The Data Factory Pipeline name that the trigger will + act on. + type: string + pipelineNameRef: + description: Reference to a Pipeline in datafactory to populate + pipelineName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + pipelineNameSelector: + description: Selector for a Pipeline in datafactory to populate + pipelineName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + pipelineParameters: + additionalProperties: + type: string + description: The pipeline parameters that the trigger will act + upon. + type: object + x-kubernetes-map-type: granular + schedule: + description: A schedule block as defined below, which further + specifies the recurrence schedule for the trigger. A schedule + is capable of limiting or increasing the number of trigger executions + specified by the frequency and interval properties. + properties: + daysOfMonth: + description: Day(s) of the month on which the trigger is scheduled. + This value can be specified with a monthly frequency only. + items: + type: number + type: array + daysOfWeek: + description: Days of the week on which the trigger is scheduled. + This value can be specified only with a weekly frequency. + items: + type: string + type: array + hours: + description: Hours of the day on which the trigger is scheduled. + items: + type: number + type: array + minutes: + description: Minutes of the hour on which the trigger is scheduled. + items: + type: number + type: array + monthly: + description: A monthly block as documented below, which specifies + the days of the month on which the trigger is scheduled. + The value can be specified only with a monthly frequency. + items: + properties: + week: + description: The occurrence of the specified day during + the month. For example, a monthly property with weekday + and week values of Sunday, -1 means the last Sunday + of the month. + type: number + weekday: + description: The day of the week on which the trigger + runs. For example, a monthly property with a weekday + value of Sunday means every Sunday of the month. + type: string + type: object + type: array + type: object + startTime: + description: The time the Schedule Trigger will start. This defaults + to the current time. The time will be represented in UTC. + type: string + timeZone: + description: The timezone of the start/end time. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TriggerScheduleStatus defines the observed state of TriggerSchedule. + properties: + atProvider: + properties: + activated: + description: Specifies if the Data Factory Schedule Trigger is + activated. Defaults to true. + type: boolean + annotations: + description: List of tags that can be used for describing the + Data Factory Schedule Trigger. + items: + type: string + type: array + dataFactoryId: + description: The Data Factory ID in which to associate the Linked + Service with. Changing this forces a new resource. + type: string + description: + description: The Schedule Trigger's description. + type: string + endTime: + description: The time the Schedule Trigger should end. The time + will be represented in UTC. + type: string + frequency: + description: The trigger frequency. Valid values include Minute, + Hour, Day, Week, Month. Defaults to Minute. + type: string + id: + description: The ID of the Data Factory Schedule Trigger. + type: string + interval: + description: The interval for how often the trigger occurs. This + defaults to 1. + type: number + pipeline: + description: A pipeline block as defined below. + items: + properties: + name: + description: Reference pipeline name. + type: string + parameters: + additionalProperties: + type: string + description: The pipeline parameters that the trigger will + act upon. + type: object + x-kubernetes-map-type: granular + type: object + type: array + pipelineName: + description: The Data Factory Pipeline name that the trigger will + act on. + type: string + pipelineParameters: + additionalProperties: + type: string + description: The pipeline parameters that the trigger will act + upon. + type: object + x-kubernetes-map-type: granular + schedule: + description: A schedule block as defined below, which further + specifies the recurrence schedule for the trigger. A schedule + is capable of limiting or increasing the number of trigger executions + specified by the frequency and interval properties. + properties: + daysOfMonth: + description: Day(s) of the month on which the trigger is scheduled. + This value can be specified with a monthly frequency only. + items: + type: number + type: array + daysOfWeek: + description: Days of the week on which the trigger is scheduled. + This value can be specified only with a weekly frequency. + items: + type: string + type: array + hours: + description: Hours of the day on which the trigger is scheduled. + items: + type: number + type: array + minutes: + description: Minutes of the hour on which the trigger is scheduled. + items: + type: number + type: array + monthly: + description: A monthly block as documented below, which specifies + the days of the month on which the trigger is scheduled. + The value can be specified only with a monthly frequency. + items: + properties: + week: + description: The occurrence of the specified day during + the month. For example, a monthly property with weekday + and week values of Sunday, -1 means the last Sunday + of the month. + type: number + weekday: + description: The day of the week on which the trigger + runs. For example, a monthly property with a weekday + value of Sunday means every Sunday of the month. + type: string + type: object + type: array + type: object + startTime: + description: The time the Schedule Trigger will start. This defaults + to the current time. The time will be represented in UTC. + type: string + timeZone: + description: The timezone of the start/end time. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dataprotection.azure.upbound.io_backuppolicydisks.yaml b/package/crds/dataprotection.azure.upbound.io_backuppolicydisks.yaml index 0a8de48c8..8d4f7b636 100644 --- a/package/crds/dataprotection.azure.upbound.io_backuppolicydisks.yaml +++ b/package/crds/dataprotection.azure.upbound.io_backuppolicydisks.yaml @@ -571,3 +571,550 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupPolicyDisk is the Schema for the BackupPolicyDisks API. + Manages a Backup Policy Disk. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupPolicyDiskSpec defines the desired state of BackupPolicyDisk + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + backupRepeatingTimeIntervals: + description: Specifies a list of repeating time interval. It should + follow ISO 8601 repeating time interval . Changing this forces + a new Backup Policy Disk to be created. + items: + type: string + type: array + defaultRetentionDuration: + description: The duration of default retention rule. It should + follow ISO 8601 duration format. Changing this forces a new + Backup Policy Disk to be created. + type: string + retentionRule: + description: One or more retention_rule blocks as defined below. + Changing this forces a new Backup Policy Disk to be created. + items: + properties: + criteria: + description: A criteria block as defined below. Changing + this forces a new Backup Policy Disk to be created. + properties: + absoluteCriteria: + description: Possible values are FirstOfDay and FirstOfWeek. + Changing this forces a new Backup Policy Disk to be + created. + type: string + type: object + duration: + description: Duration of deletion after given timespan. + It should follow ISO 8601 duration format. Changing this + forces a new Backup Policy Disk to be created. + type: string + name: + description: The name which should be used for this retention + rule. Changing this forces a new Backup Policy Disk to + be created. + type: string + priority: + description: Retention Tag priority. Changing this forces + a new Backup Policy Disk to be created. + type: number + type: object + type: array + timeZone: + description: Specifies the Time Zone which should be used by the + backup schedule. Changing this forces a new Backup Policy Disk + to be created. + type: string + vaultId: + description: The ID of the Backup Vault within which the Backup + Policy Disk should exist. Changing this forces a new Backup + Policy Disk to be created. + type: string + vaultIdRef: + description: Reference to a BackupVault in dataprotection to populate + vaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vaultIdSelector: + description: Selector for a BackupVault in dataprotection to populate + vaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + backupRepeatingTimeIntervals: + description: Specifies a list of repeating time interval. It should + follow ISO 8601 repeating time interval . Changing this forces + a new Backup Policy Disk to be created. + items: + type: string + type: array + defaultRetentionDuration: + description: The duration of default retention rule. It should + follow ISO 8601 duration format. Changing this forces a new + Backup Policy Disk to be created. + type: string + retentionRule: + description: One or more retention_rule blocks as defined below. + Changing this forces a new Backup Policy Disk to be created. + items: + properties: + criteria: + description: A criteria block as defined below. Changing + this forces a new Backup Policy Disk to be created. + properties: + absoluteCriteria: + description: Possible values are FirstOfDay and FirstOfWeek. + Changing this forces a new Backup Policy Disk to be + created. + type: string + type: object + duration: + description: Duration of deletion after given timespan. + It should follow ISO 8601 duration format. Changing this + forces a new Backup Policy Disk to be created. + type: string + name: + description: The name which should be used for this retention + rule. Changing this forces a new Backup Policy Disk to + be created. + type: string + priority: + description: Retention Tag priority. Changing this forces + a new Backup Policy Disk to be created. + type: number + type: object + type: array + timeZone: + description: Specifies the Time Zone which should be used by the + backup schedule. Changing this forces a new Backup Policy Disk + to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.backupRepeatingTimeIntervals is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backupRepeatingTimeIntervals) + || (has(self.initProvider) && has(self.initProvider.backupRepeatingTimeIntervals))' + - message: spec.forProvider.defaultRetentionDuration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultRetentionDuration) + || (has(self.initProvider) && has(self.initProvider.defaultRetentionDuration))' + status: + description: BackupPolicyDiskStatus defines the observed state of BackupPolicyDisk. + properties: + atProvider: + properties: + backupRepeatingTimeIntervals: + description: Specifies a list of repeating time interval. It should + follow ISO 8601 repeating time interval . Changing this forces + a new Backup Policy Disk to be created. + items: + type: string + type: array + defaultRetentionDuration: + description: The duration of default retention rule. It should + follow ISO 8601 duration format. Changing this forces a new + Backup Policy Disk to be created. + type: string + id: + description: The ID of the Backup Policy Disk. + type: string + retentionRule: + description: One or more retention_rule blocks as defined below. + Changing this forces a new Backup Policy Disk to be created. + items: + properties: + criteria: + description: A criteria block as defined below. Changing + this forces a new Backup Policy Disk to be created. + properties: + absoluteCriteria: + description: Possible values are FirstOfDay and FirstOfWeek. + Changing this forces a new Backup Policy Disk to be + created. + type: string + type: object + duration: + description: Duration of deletion after given timespan. + It should follow ISO 8601 duration format. Changing this + forces a new Backup Policy Disk to be created. + type: string + name: + description: The name which should be used for this retention + rule. Changing this forces a new Backup Policy Disk to + be created. + type: string + priority: + description: Retention Tag priority. Changing this forces + a new Backup Policy Disk to be created. + type: number + type: object + type: array + timeZone: + description: Specifies the Time Zone which should be used by the + backup schedule. Changing this forces a new Backup Policy Disk + to be created. + type: string + vaultId: + description: The ID of the Backup Vault within which the Backup + Policy Disk should exist. Changing this forces a new Backup + Policy Disk to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dataprotection.azure.upbound.io_backuppolicypostgresqls.yaml b/package/crds/dataprotection.azure.upbound.io_backuppolicypostgresqls.yaml index f3dd4bbf0..46c3bad13 100644 --- a/package/crds/dataprotection.azure.upbound.io_backuppolicypostgresqls.yaml +++ b/package/crds/dataprotection.azure.upbound.io_backuppolicypostgresqls.yaml @@ -772,3 +772,748 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupPolicyPostgreSQL is the Schema for the BackupPolicyPostgreSQLs + API. Manages a Backup Policy to back up PostgreSQL. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupPolicyPostgreSQLSpec defines the desired state of BackupPolicyPostgreSQL + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + backupRepeatingTimeIntervals: + description: Specifies a list of repeating time interval. It supports + weekly back. It should follow ISO 8601 repeating time interval. + Changing this forces a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + defaultRetentionDuration: + description: The duration of default retention rule. It should + follow ISO 8601 duration format. Changing this forces a new + Backup Policy PostgreSQL to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Backup Policy + PostgreSQL should exist. Changing this forces a new Backup Policy + PostgreSQL to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionRule: + description: One or more retention_rule blocks as defined below. + Changing this forces a new Backup Policy PostgreSQL to be created. + items: + properties: + criteria: + description: A criteria block as defined below. Changing + this forces a new Backup Policy PostgreSQL to be created. + properties: + absoluteCriteria: + description: Possible values are AllBackup, FirstOfDay, + FirstOfWeek, FirstOfMonth and FirstOfYear. These values + mean the first successful backup of the day/week/month/year. + Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + daysOfWeek: + description: Possible values are Monday, Tuesday, Thursday, + Friday, Saturday and Sunday. Changing this forces + a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + monthsOfYear: + description: Possible values are January, February, + March, April, May, June, July, August, September, + October, November and December. Changing this forces + a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + scheduledBackupTimes: + description: Specifies a list of backup times for backup + in the RFC3339 format. Changing this forces a new + Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + weeksOfMonth: + description: Possible values are First, Second, Third, + Fourth and Last. Changing this forces a new Backup + Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + duration: + description: Duration after which the backup is deleted. + It should follow ISO 8601 duration format. Changing this + forces a new Backup Policy PostgreSQL to be created. + type: string + name: + description: The name which should be used for this retention + rule. Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + priority: + description: Specifies the priority of the rule. The priority + number must be unique for each rule. The lower the priority + number, the higher the priority of the rule. Changing + this forces a new Backup Policy PostgreSQL to be created. + type: number + type: object + type: array + timeZone: + description: Specifies the Time Zone which should be used by the + backup schedule. Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + vaultName: + description: The name of the Backup Vault where the Backup Policy + PostgreSQL should exist. Changing this forces a new Backup Policy + PostgreSQL to be created. + type: string + vaultNameRef: + description: Reference to a BackupVault in dataprotection to populate + vaultName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vaultNameSelector: + description: Selector for a BackupVault in dataprotection to populate + vaultName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + backupRepeatingTimeIntervals: + description: Specifies a list of repeating time interval. It supports + weekly back. It should follow ISO 8601 repeating time interval. + Changing this forces a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + defaultRetentionDuration: + description: The duration of default retention rule. It should + follow ISO 8601 duration format. Changing this forces a new + Backup Policy PostgreSQL to be created. + type: string + retentionRule: + description: One or more retention_rule blocks as defined below. + Changing this forces a new Backup Policy PostgreSQL to be created. + items: + properties: + criteria: + description: A criteria block as defined below. Changing + this forces a new Backup Policy PostgreSQL to be created. + properties: + absoluteCriteria: + description: Possible values are AllBackup, FirstOfDay, + FirstOfWeek, FirstOfMonth and FirstOfYear. These values + mean the first successful backup of the day/week/month/year. + Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + daysOfWeek: + description: Possible values are Monday, Tuesday, Thursday, + Friday, Saturday and Sunday. Changing this forces + a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + monthsOfYear: + description: Possible values are January, February, + March, April, May, June, July, August, September, + October, November and December. Changing this forces + a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + scheduledBackupTimes: + description: Specifies a list of backup times for backup + in the RFC3339 format. Changing this forces a new + Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + weeksOfMonth: + description: Possible values are First, Second, Third, + Fourth and Last. Changing this forces a new Backup + Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + duration: + description: Duration after which the backup is deleted. + It should follow ISO 8601 duration format. Changing this + forces a new Backup Policy PostgreSQL to be created. + type: string + name: + description: The name which should be used for this retention + rule. Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + priority: + description: Specifies the priority of the rule. The priority + number must be unique for each rule. The lower the priority + number, the higher the priority of the rule. Changing + this forces a new Backup Policy PostgreSQL to be created. + type: number + type: object + type: array + timeZone: + description: Specifies the Time Zone which should be used by the + backup schedule. Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.backupRepeatingTimeIntervals is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backupRepeatingTimeIntervals) + || (has(self.initProvider) && has(self.initProvider.backupRepeatingTimeIntervals))' + - message: spec.forProvider.defaultRetentionDuration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultRetentionDuration) + || (has(self.initProvider) && has(self.initProvider.defaultRetentionDuration))' + status: + description: BackupPolicyPostgreSQLStatus defines the observed state of + BackupPolicyPostgreSQL. + properties: + atProvider: + properties: + backupRepeatingTimeIntervals: + description: Specifies a list of repeating time interval. It supports + weekly back. It should follow ISO 8601 repeating time interval. + Changing this forces a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + defaultRetentionDuration: + description: The duration of default retention rule. It should + follow ISO 8601 duration format. Changing this forces a new + Backup Policy PostgreSQL to be created. + type: string + id: + description: The ID of the Backup Policy PostgreSQL. + type: string + resourceGroupName: + description: The name of the Resource Group where the Backup Policy + PostgreSQL should exist. Changing this forces a new Backup Policy + PostgreSQL to be created. + type: string + retentionRule: + description: One or more retention_rule blocks as defined below. + Changing this forces a new Backup Policy PostgreSQL to be created. + items: + properties: + criteria: + description: A criteria block as defined below. Changing + this forces a new Backup Policy PostgreSQL to be created. + properties: + absoluteCriteria: + description: Possible values are AllBackup, FirstOfDay, + FirstOfWeek, FirstOfMonth and FirstOfYear. These values + mean the first successful backup of the day/week/month/year. + Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + daysOfWeek: + description: Possible values are Monday, Tuesday, Thursday, + Friday, Saturday and Sunday. Changing this forces + a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + monthsOfYear: + description: Possible values are January, February, + March, April, May, June, July, August, September, + October, November and December. Changing this forces + a new Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + scheduledBackupTimes: + description: Specifies a list of backup times for backup + in the RFC3339 format. Changing this forces a new + Backup Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + weeksOfMonth: + description: Possible values are First, Second, Third, + Fourth and Last. Changing this forces a new Backup + Policy PostgreSQL to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + duration: + description: Duration after which the backup is deleted. + It should follow ISO 8601 duration format. Changing this + forces a new Backup Policy PostgreSQL to be created. + type: string + name: + description: The name which should be used for this retention + rule. Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + priority: + description: Specifies the priority of the rule. The priority + number must be unique for each rule. The lower the priority + number, the higher the priority of the rule. Changing + this forces a new Backup Policy PostgreSQL to be created. + type: number + type: object + type: array + timeZone: + description: Specifies the Time Zone which should be used by the + backup schedule. Changing this forces a new Backup Policy PostgreSQL + to be created. + type: string + vaultName: + description: The name of the Backup Vault where the Backup Policy + PostgreSQL should exist. Changing this forces a new Backup Policy + PostgreSQL to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dataprotection.azure.upbound.io_backupvaults.yaml b/package/crds/dataprotection.azure.upbound.io_backupvaults.yaml index c076e5f9c..556d8c59d 100644 --- a/package/crds/dataprotection.azure.upbound.io_backupvaults.yaml +++ b/package/crds/dataprotection.azure.upbound.io_backupvaults.yaml @@ -552,3 +552,531 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupVault is the Schema for the BackupVaults API. Manages a + Backup Vault. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupVaultSpec defines the desired state of BackupVault + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + datastoreType: + description: Specifies the type of the data store. Possible values + are ArchiveStore, OperationalStore, SnapshotStore and VaultStore. + Changing this forces a new resource to be created. + type: string + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Backup Vault. The only + possible value is SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Backup Vault should exist. + Changing this forces a new Backup Vault to be created. + type: string + redundancy: + description: Specifies the backup storage redundancy. Possible + values are GeoRedundant, LocallyRedundant and ZoneRedundant. + Changing this forces a new Backup Vault to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Backup Vault + should exist. Changing this forces a new Backup Vault to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionDurationInDays: + description: The soft delete retention duration for this Backup + Vault. Possible values are between 14 and 180. Defaults to 14. + type: number + softDelete: + description: The state of soft delete for this Backup Vault. Possible + values are AlwaysOn, Off and On. Defaults to On. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Backup Vault. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + datastoreType: + description: Specifies the type of the data store. Possible values + are ArchiveStore, OperationalStore, SnapshotStore and VaultStore. + Changing this forces a new resource to be created. + type: string + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Backup Vault. The only + possible value is SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Backup Vault should exist. + Changing this forces a new Backup Vault to be created. + type: string + redundancy: + description: Specifies the backup storage redundancy. Possible + values are GeoRedundant, LocallyRedundant and ZoneRedundant. + Changing this forces a new Backup Vault to be created. + type: string + retentionDurationInDays: + description: The soft delete retention duration for this Backup + Vault. Possible values are between 14 and 180. Defaults to 14. + type: number + softDelete: + description: The state of soft delete for this Backup Vault. Possible + values are AlwaysOn, Off and On. Defaults to On. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Backup Vault. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.datastoreType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.datastoreType) + || (has(self.initProvider) && has(self.initProvider.datastoreType))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.redundancy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.redundancy) + || (has(self.initProvider) && has(self.initProvider.redundancy))' + status: + description: BackupVaultStatus defines the observed state of BackupVault. + properties: + atProvider: + properties: + datastoreType: + description: Specifies the type of the data store. Possible values + are ArchiveStore, OperationalStore, SnapshotStore and VaultStore. + Changing this forces a new resource to be created. + type: string + id: + description: The ID of the Backup Vault. + type: string + identity: + description: An identity block as defined below. + properties: + principalId: + description: The Principal ID for the Service Principal associated + with the Identity of this Backup Vault. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Identity of this Backup Vault. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Backup Vault. The only + possible value is SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Backup Vault should exist. + Changing this forces a new Backup Vault to be created. + type: string + redundancy: + description: Specifies the backup storage redundancy. Possible + values are GeoRedundant, LocallyRedundant and ZoneRedundant. + Changing this forces a new Backup Vault to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Backup Vault + should exist. Changing this forces a new Backup Vault to be + created. + type: string + retentionDurationInDays: + description: The soft delete retention duration for this Backup + Vault. Possible values are between 14 and 180. Defaults to 14. + type: number + softDelete: + description: The state of soft delete for this Backup Vault. Possible + values are AlwaysOn, Off and On. Defaults to On. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Backup Vault. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datashare.azure.upbound.io_accounts.yaml b/package/crds/datashare.azure.upbound.io_accounts.yaml index 183702e08..adbc0fabf 100644 --- a/package/crds/datashare.azure.upbound.io_accounts.yaml +++ b/package/crds/datashare.azure.upbound.io_accounts.yaml @@ -500,3 +500,479 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Account is the Schema for the Accounts API. Manages a Data Share + Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. Changing this + forces a new resource to be created. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Share Account. The + only possible value is SystemAssigned. Changing this forces + a new resource to be created. + type: string + type: object + location: + description: The Azure Region where the Data Share Account should + exist. Changing this forces a new Data Share Account to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Data Share + Account should exist. Changing this forces a new Data Share + Account to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Data Share Account. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. Changing this + forces a new resource to be created. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Share Account. The + only possible value is SystemAssigned. Changing this forces + a new resource to be created. + type: string + type: object + location: + description: The Azure Region where the Data Share Account should + exist. Changing this forces a new Data Share Account to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Data Share Account. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.identity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.identity) + || (has(self.initProvider) && has(self.initProvider.identity))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: AccountStatus defines the observed state of Account. + properties: + atProvider: + properties: + id: + description: The ID of the Data Share Account. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new resource to be created. + properties: + principalId: + description: The Principal ID for the Service Principal associated + with the Identity of this Data Share Account. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Identity of this Data Share Account. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Share Account. The + only possible value is SystemAssigned. Changing this forces + a new resource to be created. + type: string + type: object + location: + description: The Azure Region where the Data Share Account should + exist. Changing this forces a new Data Share Account to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Data Share + Account should exist. Changing this forces a new Data Share + Account to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Data Share Account. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datashare.azure.upbound.io_datasetblobstorages.yaml b/package/crds/datashare.azure.upbound.io_datasetblobstorages.yaml index 6fa35387b..1fe47c2a8 100644 --- a/package/crds/datashare.azure.upbound.io_datasetblobstorages.yaml +++ b/package/crds/datashare.azure.upbound.io_datasetblobstorages.yaml @@ -982,3 +982,961 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataSetBlobStorage is the Schema for the DataSetBlobStorages + API. Manages a Data Share Blob Storage Dataset. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataSetBlobStorageSpec defines the desired state of DataSetBlobStorage + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerName: + description: The name of the storage account container to be shared + with the receiver. Changing this forces a new Data Share Blob + Storage Dataset to be created. + type: string + containerNameRef: + description: Reference to a Container in storage to populate containerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerNameSelector: + description: Selector for a Container in storage to populate containerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dataShareId: + description: The ID of the Data Share in which this Data Share + Blob Storage Dataset should be created. Changing this forces + a new Data Share Blob Storage Dataset to be created. + type: string + dataShareIdRef: + description: Reference to a DataShare in datashare to populate + dataShareId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataShareIdSelector: + description: Selector for a DataShare in datashare to populate + dataShareId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + filePath: + description: The path of the file in the storage container to + be shared with the receiver. Changing this forces a new Data + Share Blob Storage Dataset to be created. + type: string + folderPath: + description: The path of the folder in the storage container to + be shared with the receiver. Changing this forces a new Data + Share Blob Storage Dataset to be created. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new resource to be created. + properties: + name: + description: The name of the storage account to be shared + with the receiver. Changing this forces a new Data Share + Blob Storage Dataset to be created. + type: string + nameRef: + description: Reference to a Account in storage to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Account in storage to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The resource group name of the storage account + to be shared with the receiver. Changing this forces a new + Data Share Blob Storage Dataset to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subscriptionId: + description: The subscription id of the storage account to + be shared with the receiver. Changing this forces a new + Data Share Blob Storage Dataset to be created. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerName: + description: The name of the storage account container to be shared + with the receiver. Changing this forces a new Data Share Blob + Storage Dataset to be created. + type: string + containerNameRef: + description: Reference to a Container in storage to populate containerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerNameSelector: + description: Selector for a Container in storage to populate containerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + filePath: + description: The path of the file in the storage container to + be shared with the receiver. Changing this forces a new Data + Share Blob Storage Dataset to be created. + type: string + folderPath: + description: The path of the folder in the storage container to + be shared with the receiver. Changing this forces a new Data + Share Blob Storage Dataset to be created. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new resource to be created. + properties: + name: + description: The name of the storage account to be shared + with the receiver. Changing this forces a new Data Share + Blob Storage Dataset to be created. + type: string + nameRef: + description: Reference to a Account in storage to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Account in storage to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The resource group name of the storage account + to be shared with the receiver. Changing this forces a new + Data Share Blob Storage Dataset to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subscriptionId: + description: The subscription id of the storage account to + be shared with the receiver. Changing this forces a new + Data Share Blob Storage Dataset to be created. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.storageAccount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageAccount) + || (has(self.initProvider) && has(self.initProvider.storageAccount))' + status: + description: DataSetBlobStorageStatus defines the observed state of DataSetBlobStorage. + properties: + atProvider: + properties: + containerName: + description: The name of the storage account container to be shared + with the receiver. Changing this forces a new Data Share Blob + Storage Dataset to be created. + type: string + dataShareId: + description: The ID of the Data Share in which this Data Share + Blob Storage Dataset should be created. Changing this forces + a new Data Share Blob Storage Dataset to be created. + type: string + displayName: + description: The name of the Data Share Dataset. + type: string + filePath: + description: The path of the file in the storage container to + be shared with the receiver. Changing this forces a new Data + Share Blob Storage Dataset to be created. + type: string + folderPath: + description: The path of the folder in the storage container to + be shared with the receiver. Changing this forces a new Data + Share Blob Storage Dataset to be created. + type: string + id: + description: The ID of the Data Share Blob Storage Dataset. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new resource to be created. + properties: + name: + description: The name of the storage account to be shared + with the receiver. Changing this forces a new Data Share + Blob Storage Dataset to be created. + type: string + resourceGroupName: + description: The resource group name of the storage account + to be shared with the receiver. Changing this forces a new + Data Share Blob Storage Dataset to be created. + type: string + subscriptionId: + description: The subscription id of the storage account to + be shared with the receiver. Changing this forces a new + Data Share Blob Storage Dataset to be created. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/datashare.azure.upbound.io_datashares.yaml b/package/crds/datashare.azure.upbound.io_datashares.yaml index b8ad66be1..35124b9e8 100644 --- a/package/crds/datashare.azure.upbound.io_datashares.yaml +++ b/package/crds/datashare.azure.upbound.io_datashares.yaml @@ -495,3 +495,474 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DataShare is the Schema for the DataShares API. Manages a Data + Share. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataShareSpec defines the desired state of DataShare + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountId: + description: The ID of the Data Share account in which the Data + Share is created. Changing this forces a new Data Share to be + created. + type: string + accountIdRef: + description: Reference to a Account in datashare to populate accountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountIdSelector: + description: Selector for a Account in datashare to populate accountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The Data Share's description. + type: string + kind: + description: The kind of the Data Share. Possible values are CopyBased + and InPlace. Changing this forces a new Data Share to be created. + type: string + snapshotSchedule: + description: A snapshot_schedule block as defined below. + properties: + name: + description: The name of the snapshot schedule. + type: string + recurrence: + description: The interval of the synchronization with the + source data. Possible values are Hour and Day. + type: string + startTime: + description: The synchronization with the source data's start + time. + type: string + type: object + terms: + description: The terms of the Data Share. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The Data Share's description. + type: string + kind: + description: The kind of the Data Share. Possible values are CopyBased + and InPlace. Changing this forces a new Data Share to be created. + type: string + snapshotSchedule: + description: A snapshot_schedule block as defined below. + properties: + name: + description: The name of the snapshot schedule. + type: string + recurrence: + description: The interval of the synchronization with the + source data. Possible values are Hour and Day. + type: string + startTime: + description: The synchronization with the source data's start + time. + type: string + type: object + terms: + description: The terms of the Data Share. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.kind is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.kind) + || (has(self.initProvider) && has(self.initProvider.kind))' + status: + description: DataShareStatus defines the observed state of DataShare. + properties: + atProvider: + properties: + accountId: + description: The ID of the Data Share account in which the Data + Share is created. Changing this forces a new Data Share to be + created. + type: string + description: + description: The Data Share's description. + type: string + id: + description: The ID of the Data Share. + type: string + kind: + description: The kind of the Data Share. Possible values are CopyBased + and InPlace. Changing this forces a new Data Share to be created. + type: string + snapshotSchedule: + description: A snapshot_schedule block as defined below. + properties: + name: + description: The name of the snapshot schedule. + type: string + recurrence: + description: The interval of the synchronization with the + source data. Possible values are Hour and Day. + type: string + startTime: + description: The synchronization with the source data's start + time. + type: string + type: object + terms: + description: The terms of the Data Share. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dbformysql.azure.upbound.io_flexibleservers.yaml b/package/crds/dbformysql.azure.upbound.io_flexibleservers.yaml index 4cc973cf7..adbc52066 100644 --- a/package/crds/dbformysql.azure.upbound.io_flexibleservers.yaml +++ b/package/crds/dbformysql.azure.upbound.io_flexibleservers.yaml @@ -1247,3 +1247,1202 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FlexibleServer is the Schema for the FlexibleServers API. Manages + a MySQL Flexible Server. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FlexibleServerSpec defines the desired state of FlexibleServer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + administratorLogin: + description: The Administrator login for the MySQL Flexible Server. + Required when create_mode is Default. Changing this forces a + new MySQL Flexible Server to be created. + type: string + administratorPasswordSecretRef: + description: The Password associated with the administrator_login + for the MySQL Flexible Server. Required when create_mode is + Default. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + backupRetentionDays: + description: The backup retention days for the MySQL Flexible + Server. Possible values are between 1 and 35 days. Defaults + to 7. + type: number + createMode: + description: The creation mode which can be used to restore or + replicate existing servers. Possible values are Default, PointInTimeRestore, + GeoRestore, and Replica. Changing this forces a new MySQL Flexible + Server to be created. + type: string + customerManagedKey: + description: A customer_managed_key block as defined below. + properties: + geoBackupKeyVaultKeyId: + description: The ID of the geo backup Key Vault Key. It can't + cross region and need Customer Managed Key in same region + as geo backup. + type: string + geoBackupUserAssignedIdentityId: + description: The geo backup user managed identity id for a + Customer Managed Key. Should be added with identity_ids. + It can't cross region and need identity in same region as + geo backup. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id + for a Customer Managed Key. Should be added with identity_ids. + type: string + type: object + delegatedSubnetId: + description: The ID of the virtual network subnet to create the + MySQL Flexible Server. Changing this forces a new MySQL Flexible + Server to be created. + type: string + delegatedSubnetIdRef: + description: Reference to a Subnet in network to populate delegatedSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + delegatedSubnetIdSelector: + description: Selector for a Subnet in network to populate delegatedSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + geoRedundantBackupEnabled: + description: Should geo redundant backup enabled? Defaults to + false. Changing this forces a new MySQL Flexible Server to be + created. + type: boolean + highAvailability: + description: A high_availability block as defined below. + properties: + mode: + description: The high availability mode for the MySQL Flexible + Server. Possibles values are SameZone and ZoneRedundant. + type: string + standbyAvailabilityZone: + description: Specifies the Availability Zone in which the + standby Flexible Server should be located. Possible values + are 1, 2 and 3. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this MySQL Flexible Server. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this MySQL Flexible Server. + The only possible value is UserAssigned. + type: string + type: object + location: + description: The Azure Region where the MySQL Flexible Server + should exist. Changing this forces a new MySQL Flexible Server + to be created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + dayOfWeek: + description: The day of week for maintenance window. Defaults + to 0. + type: number + startHour: + description: The start hour for maintenance window. Defaults + to 0. + type: number + startMinute: + description: The start minute for maintenance window. Defaults + to 0. + type: number + type: object + pointInTimeRestoreTimeInUtc: + description: The point in time to restore from creation_source_server_id + when create_mode is PointInTimeRestore. Changing this forces + a new MySQL Flexible Server to be created. + type: string + privateDnsZoneId: + description: The ID of the private DNS zone to create the MySQL + Flexible Server. Changing this forces a new MySQL Flexible Server + to be created. + type: string + privateDnsZoneIdRef: + description: Reference to a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateDnsZoneIdSelector: + description: Selector for a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replicationRole: + description: The replication role. Possible value is None. + type: string + resourceGroupName: + description: The name of the Resource Group where the MySQL Flexible + Server should exist. Changing this forces a new MySQL Flexible + Server to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: The SKU Name for the MySQL Flexible Server. + type: string + sourceServerId: + description: The resource ID of the source MySQL Flexible Server + to be restored. Required when create_mode is PointInTimeRestore, + GeoRestore, and Replica. Changing this forces a new MySQL Flexible + Server to be created. + type: string + storage: + description: A storage block as defined below. + properties: + autoGrowEnabled: + description: Should Storage Auto Grow be enabled? Defaults + to true. + type: boolean + ioScalingEnabled: + description: Should IOPS be scaled automatically? If true, + iops can not be set. Defaults to false. + type: boolean + iops: + description: The storage IOPS for the MySQL Flexible Server. + Possible values are between 360 and 20000. + type: number + sizeGb: + description: The max storage allowed for the MySQL Flexible + Server. Possible values are between 20 and 16384. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + MySQL Flexible Server. + type: object + x-kubernetes-map-type: granular + version: + description: The version of the MySQL Flexible Server to use. + Possible values are 5.7, and 8.0.21. Changing this forces a + new MySQL Flexible Server to be created. + type: string + zone: + description: Specifies the Availability Zone in which this MySQL + Flexible Server should be located. Possible values are 1, 2 + and 3. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + administratorLogin: + description: The Administrator login for the MySQL Flexible Server. + Required when create_mode is Default. Changing this forces a + new MySQL Flexible Server to be created. + type: string + backupRetentionDays: + description: The backup retention days for the MySQL Flexible + Server. Possible values are between 1 and 35 days. Defaults + to 7. + type: number + createMode: + description: The creation mode which can be used to restore or + replicate existing servers. Possible values are Default, PointInTimeRestore, + GeoRestore, and Replica. Changing this forces a new MySQL Flexible + Server to be created. + type: string + customerManagedKey: + description: A customer_managed_key block as defined below. + properties: + geoBackupKeyVaultKeyId: + description: The ID of the geo backup Key Vault Key. It can't + cross region and need Customer Managed Key in same region + as geo backup. + type: string + geoBackupUserAssignedIdentityId: + description: The geo backup user managed identity id for a + Customer Managed Key. Should be added with identity_ids. + It can't cross region and need identity in same region as + geo backup. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id + for a Customer Managed Key. Should be added with identity_ids. + type: string + type: object + delegatedSubnetId: + description: The ID of the virtual network subnet to create the + MySQL Flexible Server. Changing this forces a new MySQL Flexible + Server to be created. + type: string + delegatedSubnetIdRef: + description: Reference to a Subnet in network to populate delegatedSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + delegatedSubnetIdSelector: + description: Selector for a Subnet in network to populate delegatedSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + geoRedundantBackupEnabled: + description: Should geo redundant backup enabled? Defaults to + false. Changing this forces a new MySQL Flexible Server to be + created. + type: boolean + highAvailability: + description: A high_availability block as defined below. + properties: + mode: + description: The high availability mode for the MySQL Flexible + Server. Possibles values are SameZone and ZoneRedundant. + type: string + standbyAvailabilityZone: + description: Specifies the Availability Zone in which the + standby Flexible Server should be located. Possible values + are 1, 2 and 3. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this MySQL Flexible Server. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this MySQL Flexible Server. + The only possible value is UserAssigned. + type: string + type: object + location: + description: The Azure Region where the MySQL Flexible Server + should exist. Changing this forces a new MySQL Flexible Server + to be created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + dayOfWeek: + description: The day of week for maintenance window. Defaults + to 0. + type: number + startHour: + description: The start hour for maintenance window. Defaults + to 0. + type: number + startMinute: + description: The start minute for maintenance window. Defaults + to 0. + type: number + type: object + pointInTimeRestoreTimeInUtc: + description: The point in time to restore from creation_source_server_id + when create_mode is PointInTimeRestore. Changing this forces + a new MySQL Flexible Server to be created. + type: string + privateDnsZoneId: + description: The ID of the private DNS zone to create the MySQL + Flexible Server. Changing this forces a new MySQL Flexible Server + to be created. + type: string + privateDnsZoneIdRef: + description: Reference to a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateDnsZoneIdSelector: + description: Selector for a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replicationRole: + description: The replication role. Possible value is None. + type: string + skuName: + description: The SKU Name for the MySQL Flexible Server. + type: string + sourceServerId: + description: The resource ID of the source MySQL Flexible Server + to be restored. Required when create_mode is PointInTimeRestore, + GeoRestore, and Replica. Changing this forces a new MySQL Flexible + Server to be created. + type: string + storage: + description: A storage block as defined below. + properties: + autoGrowEnabled: + description: Should Storage Auto Grow be enabled? Defaults + to true. + type: boolean + ioScalingEnabled: + description: Should IOPS be scaled automatically? If true, + iops can not be set. Defaults to false. + type: boolean + iops: + description: The storage IOPS for the MySQL Flexible Server. + Possible values are between 360 and 20000. + type: number + sizeGb: + description: The max storage allowed for the MySQL Flexible + Server. Possible values are between 20 and 16384. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + MySQL Flexible Server. + type: object + x-kubernetes-map-type: granular + version: + description: The version of the MySQL Flexible Server to use. + Possible values are 5.7, and 8.0.21. Changing this forces a + new MySQL Flexible Server to be created. + type: string + zone: + description: Specifies the Availability Zone in which this MySQL + Flexible Server should be located. Possible values are 1, 2 + and 3. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: FlexibleServerStatus defines the observed state of FlexibleServer. + properties: + atProvider: + properties: + administratorLogin: + description: The Administrator login for the MySQL Flexible Server. + Required when create_mode is Default. Changing this forces a + new MySQL Flexible Server to be created. + type: string + backupRetentionDays: + description: The backup retention days for the MySQL Flexible + Server. Possible values are between 1 and 35 days. Defaults + to 7. + type: number + createMode: + description: The creation mode which can be used to restore or + replicate existing servers. Possible values are Default, PointInTimeRestore, + GeoRestore, and Replica. Changing this forces a new MySQL Flexible + Server to be created. + type: string + customerManagedKey: + description: A customer_managed_key block as defined below. + properties: + geoBackupKeyVaultKeyId: + description: The ID of the geo backup Key Vault Key. It can't + cross region and need Customer Managed Key in same region + as geo backup. + type: string + geoBackupUserAssignedIdentityId: + description: The geo backup user managed identity id for a + Customer Managed Key. Should be added with identity_ids. + It can't cross region and need identity in same region as + geo backup. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id + for a Customer Managed Key. Should be added with identity_ids. + type: string + type: object + delegatedSubnetId: + description: The ID of the virtual network subnet to create the + MySQL Flexible Server. Changing this forces a new MySQL Flexible + Server to be created. + type: string + fqdn: + description: The fully qualified domain name of the MySQL Flexible + Server. + type: string + geoRedundantBackupEnabled: + description: Should geo redundant backup enabled? Defaults to + false. Changing this forces a new MySQL Flexible Server to be + created. + type: boolean + highAvailability: + description: A high_availability block as defined below. + properties: + mode: + description: The high availability mode for the MySQL Flexible + Server. Possibles values are SameZone and ZoneRedundant. + type: string + standbyAvailabilityZone: + description: Specifies the Availability Zone in which the + standby Flexible Server should be located. Possible values + are 1, 2 and 3. + type: string + type: object + id: + description: The ID of the MySQL Flexible Server. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this MySQL Flexible Server. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this MySQL Flexible Server. + The only possible value is UserAssigned. + type: string + type: object + location: + description: The Azure Region where the MySQL Flexible Server + should exist. Changing this forces a new MySQL Flexible Server + to be created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + dayOfWeek: + description: The day of week for maintenance window. Defaults + to 0. + type: number + startHour: + description: The start hour for maintenance window. Defaults + to 0. + type: number + startMinute: + description: The start minute for maintenance window. Defaults + to 0. + type: number + type: object + pointInTimeRestoreTimeInUtc: + description: The point in time to restore from creation_source_server_id + when create_mode is PointInTimeRestore. Changing this forces + a new MySQL Flexible Server to be created. + type: string + privateDnsZoneId: + description: The ID of the private DNS zone to create the MySQL + Flexible Server. Changing this forces a new MySQL Flexible Server + to be created. + type: string + publicNetworkAccessEnabled: + description: Is the public network access enabled? + type: boolean + replicaCapacity: + description: The maximum number of replicas that a primary MySQL + Flexible Server can have. + type: number + replicationRole: + description: The replication role. Possible value is None. + type: string + resourceGroupName: + description: The name of the Resource Group where the MySQL Flexible + Server should exist. Changing this forces a new MySQL Flexible + Server to be created. + type: string + skuName: + description: The SKU Name for the MySQL Flexible Server. + type: string + sourceServerId: + description: The resource ID of the source MySQL Flexible Server + to be restored. Required when create_mode is PointInTimeRestore, + GeoRestore, and Replica. Changing this forces a new MySQL Flexible + Server to be created. + type: string + storage: + description: A storage block as defined below. + properties: + autoGrowEnabled: + description: Should Storage Auto Grow be enabled? Defaults + to true. + type: boolean + ioScalingEnabled: + description: Should IOPS be scaled automatically? If true, + iops can not be set. Defaults to false. + type: boolean + iops: + description: The storage IOPS for the MySQL Flexible Server. + Possible values are between 360 and 20000. + type: number + sizeGb: + description: The max storage allowed for the MySQL Flexible + Server. Possible values are between 20 and 16384. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + MySQL Flexible Server. + type: object + x-kubernetes-map-type: granular + version: + description: The version of the MySQL Flexible Server to use. + Possible values are 5.7, and 8.0.21. Changing this forces a + new MySQL Flexible Server to be created. + type: string + zone: + description: Specifies the Availability Zone in which this MySQL + Flexible Server should be located. Possible values are 1, 2 + and 3. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dbformysql.azure.upbound.io_servers.yaml b/package/crds/dbformysql.azure.upbound.io_servers.yaml index 34079ccce..e3db5d279 100644 --- a/package/crds/dbformysql.azure.upbound.io_servers.yaml +++ b/package/crds/dbformysql.azure.upbound.io_servers.yaml @@ -885,3 +885,855 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Server is the Schema for the Servers API. Manages a MySQL Server. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServerSpec defines the desired state of Server + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + administratorLogin: + description: The Administrator login for the MySQL Server. Required + when create_mode is Default. Changing this forces a new resource + to be created. + type: string + administratorLoginPasswordSecretRef: + description: The Password associated with the administrator_login + for the MySQL Server. Required when create_mode is Default. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + autoGrowEnabled: + description: Enable/Disable auto-growing of the storage. Storage + auto-grow prevents your server from running out of storage and + becoming read-only. If storage auto grow is enabled, the storage + automatically grows without impacting the workload. Defaults + to true. + type: boolean + backupRetentionDays: + description: Backup retention days for the server, supported values + are between 7 and 35 days. + type: number + createMode: + description: The creation mode. Can be used to restore or replicate + existing servers. Possible values are Default, Replica, GeoRestore, + and PointInTimeRestore. Defaults to Default. + type: string + creationSourceServerId: + description: For creation modes other than Default, the source + server ID to use. + type: string + geoRedundantBackupEnabled: + description: Turn Geo-redundant server backups on/off. This allows + you to choose between locally redundant or geo-redundant backup + storage in the General Purpose and Memory Optimized tiers. When + the backups are stored in geo-redundant backup storage, they + are not only stored within the region in which your server is + hosted, but are also replicated to a paired data center. This + provides better protection and ability to restore your server + in a different region in the event of a disaster. This is not + supported for the Basic tier. + type: boolean + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this MySQL Server. The only + possible value is SystemAssigned. + type: string + type: object + infrastructureEncryptionEnabled: + description: Whether or not infrastructure is encrypted for this + server. Changing this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the MySQL Server. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restorePointInTime: + description: When create_mode is PointInTimeRestore, specifies + the point in time to restore from creation_source_server_id. + It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + type: string + skuName: + description: Specifies the SKU Name for this MySQL Server. The + name of the SKU, follows the tier + family + cores pattern (e.g. + B_Gen4_1, GP_Gen5_8). For more information see the product documentation. + Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, + GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, + GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, + MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + type: string + sslEnforcementEnabled: + description: Specifies if SSL should be enforced on connections. + Possible values are true and false. + type: boolean + sslMinimalTlsVersionEnforced: + description: The minimum TLS version to support on the sever. + Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, + and TLS1_2. Defaults to TLS1_2. + type: string + storageMb: + description: Max storage allowed for a server. Possible values + are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU + and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory + Optimized SKUs. For more information see the product documentation. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration, known in the + API as Server Security Alerts Policy. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values are Sql_Injection, Sql_Injection_Vulnerability, + Access_Anomaly, Data_Exfiltration and Unsafe_Action. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? + type: boolean + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Is the policy enabled? + type: boolean + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + storageAccountAccessKeySecretRef: + description: Specifies the identifier key of the Threat Detection + audit storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + type: string + type: object + version: + description: Specifies the version of MySQL to use. Valid values + are 5.7, or 8.0. Changing this forces a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + administratorLogin: + description: The Administrator login for the MySQL Server. Required + when create_mode is Default. Changing this forces a new resource + to be created. + type: string + autoGrowEnabled: + description: Enable/Disable auto-growing of the storage. Storage + auto-grow prevents your server from running out of storage and + becoming read-only. If storage auto grow is enabled, the storage + automatically grows without impacting the workload. Defaults + to true. + type: boolean + backupRetentionDays: + description: Backup retention days for the server, supported values + are between 7 and 35 days. + type: number + createMode: + description: The creation mode. Can be used to restore or replicate + existing servers. Possible values are Default, Replica, GeoRestore, + and PointInTimeRestore. Defaults to Default. + type: string + creationSourceServerId: + description: For creation modes other than Default, the source + server ID to use. + type: string + geoRedundantBackupEnabled: + description: Turn Geo-redundant server backups on/off. This allows + you to choose between locally redundant or geo-redundant backup + storage in the General Purpose and Memory Optimized tiers. When + the backups are stored in geo-redundant backup storage, they + are not only stored within the region in which your server is + hosted, but are also replicated to a paired data center. This + provides better protection and ability to restore your server + in a different region in the event of a disaster. This is not + supported for the Basic tier. + type: boolean + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this MySQL Server. The only + possible value is SystemAssigned. + type: string + type: object + infrastructureEncryptionEnabled: + description: Whether or not infrastructure is encrypted for this + server. Changing this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + restorePointInTime: + description: When create_mode is PointInTimeRestore, specifies + the point in time to restore from creation_source_server_id. + It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + type: string + skuName: + description: Specifies the SKU Name for this MySQL Server. The + name of the SKU, follows the tier + family + cores pattern (e.g. + B_Gen4_1, GP_Gen5_8). For more information see the product documentation. + Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, + GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, + GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, + MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + type: string + sslEnforcementEnabled: + description: Specifies if SSL should be enforced on connections. + Possible values are true and false. + type: boolean + sslMinimalTlsVersionEnforced: + description: The minimum TLS version to support on the sever. + Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, + and TLS1_2. Defaults to TLS1_2. + type: string + storageMb: + description: Max storage allowed for a server. Possible values + are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU + and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory + Optimized SKUs. For more information see the product documentation. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration, known in the + API as Server Security Alerts Policy. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values are Sql_Injection, Sql_Injection_Vulnerability, + Access_Anomaly, Data_Exfiltration and Unsafe_Action. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? + type: boolean + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Is the policy enabled? + type: boolean + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + type: string + type: object + version: + description: Specifies the version of MySQL to use. Valid values + are 5.7, or 8.0. Changing this forces a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + - message: spec.forProvider.sslEnforcementEnabled is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sslEnforcementEnabled) + || (has(self.initProvider) && has(self.initProvider.sslEnforcementEnabled))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: ServerStatus defines the observed state of Server. + properties: + atProvider: + properties: + administratorLogin: + description: The Administrator login for the MySQL Server. Required + when create_mode is Default. Changing this forces a new resource + to be created. + type: string + autoGrowEnabled: + description: Enable/Disable auto-growing of the storage. Storage + auto-grow prevents your server from running out of storage and + becoming read-only. If storage auto grow is enabled, the storage + automatically grows without impacting the workload. Defaults + to true. + type: boolean + backupRetentionDays: + description: Backup retention days for the server, supported values + are between 7 and 35 days. + type: number + createMode: + description: The creation mode. Can be used to restore or replicate + existing servers. Possible values are Default, Replica, GeoRestore, + and PointInTimeRestore. Defaults to Default. + type: string + creationSourceServerId: + description: For creation modes other than Default, the source + server ID to use. + type: string + fqdn: + description: The FQDN of the MySQL Server. + type: string + geoRedundantBackupEnabled: + description: Turn Geo-redundant server backups on/off. This allows + you to choose between locally redundant or geo-redundant backup + storage in the General Purpose and Memory Optimized tiers. When + the backups are stored in geo-redundant backup storage, they + are not only stored within the region in which your server is + hosted, but are also replicated to a paired data center. This + provides better protection and ability to restore your server + in a different region in the event of a disaster. This is not + supported for the Basic tier. + type: boolean + id: + description: The ID of the MySQL Server. + type: string + identity: + description: An identity block as defined below. + properties: + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this MySQL Server. The only + possible value is SystemAssigned. + type: string + type: object + infrastructureEncryptionEnabled: + description: Whether or not infrastructure is encrypted for this + server. Changing this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the MySQL Server. Changing this forces a new resource to be + created. + type: string + restorePointInTime: + description: When create_mode is PointInTimeRestore, specifies + the point in time to restore from creation_source_server_id. + It should be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + type: string + skuName: + description: Specifies the SKU Name for this MySQL Server. The + name of the SKU, follows the tier + family + cores pattern (e.g. + B_Gen4_1, GP_Gen5_8). For more information see the product documentation. + Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, B_Gen5_2, + GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, GP_Gen5_2, + GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, MO_Gen5_2, + MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + type: string + sslEnforcementEnabled: + description: Specifies if SSL should be enforced on connections. + Possible values are true and false. + type: boolean + sslMinimalTlsVersionEnforced: + description: The minimum TLS version to support on the sever. + Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, + and TLS1_2. Defaults to TLS1_2. + type: string + storageMb: + description: Max storage allowed for a server. Possible values + are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU + and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory + Optimized SKUs. For more information see the product documentation. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration, known in the + API as Server Security Alerts Policy. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values are Sql_Injection, Sql_Injection_Vulnerability, + Access_Anomaly, Data_Exfiltration and Unsafe_Action. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? + type: boolean + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Is the policy enabled? + type: boolean + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + type: string + type: object + version: + description: Specifies the version of MySQL to use. Valid values + are 5.7, or 8.0. Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dbforpostgresql.azure.upbound.io_flexibleservers.yaml b/package/crds/dbforpostgresql.azure.upbound.io_flexibleservers.yaml index d2eb0c5e4..5c9158f9e 100644 --- a/package/crds/dbforpostgresql.azure.upbound.io_flexibleservers.yaml +++ b/package/crds/dbforpostgresql.azure.upbound.io_flexibleservers.yaml @@ -1304,3 +1304,1256 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FlexibleServer is the Schema for the FlexibleServers API. Manages + a PostgreSQL Flexible Server. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FlexibleServerSpec defines the desired state of FlexibleServer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + administratorLogin: + description: The Administrator login for the PostgreSQL Flexible + Server. Required when create_mode is Default and authentication.password_auth_enabled + is true. + type: string + administratorPasswordSecretRef: + description: |- + The Password associated with the administrator_login for the PostgreSQL Flexible Server. Required when create_mode is Default and authentication.password_auth_enabled is true. + Password for the master DB user. If you set autoGeneratePassword to true, the Secret referenced here will be created or updated with generated password if it does not already contain one. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + authentication: + description: An authentication block as defined below. + properties: + activeDirectoryAuthEnabled: + description: Whether or not Active Directory authentication + is allowed to access the PostgreSQL Flexible Server. Defaults + to false. + type: boolean + passwordAuthEnabled: + description: Whether or not password authentication is allowed + to access the PostgreSQL Flexible Server. Defaults to true. + type: boolean + tenantId: + description: The Tenant ID of the Azure Active Directory which + is used by the Active Directory authentication. active_directory_auth_enabled + must be set to true. + type: string + type: object + autoGeneratePassword: + description: If true, the password will be auto-generated and + stored in the Secret referenced by the administratorPasswordSecretRef + field. + type: boolean + autoGrowEnabled: + description: Is the storage auto grow for PostgreSQL Flexible + Server enabled? Defaults to false. + type: boolean + backupRetentionDays: + description: The backup retention days for the PostgreSQL Flexible + Server. Possible values are between 7 and 35 days. + type: number + createMode: + description: The creation mode which can be used to restore or + replicate existing servers. Possible values are Default, PointInTimeRestore, + Replica and Update. + type: string + customerManagedKey: + description: A customer_managed_key block as defined below. Changing + this forces a new resource to be created. + properties: + geoBackupKeyVaultKeyId: + description: The ID of the geo backup Key Vault Key. It can't + cross region and need Customer Managed Key in same region + as geo backup. + type: string + geoBackupUserAssignedIdentityId: + description: The geo backup user managed identity id for a + Customer Managed Key. Should be added with identity_ids. + It can't cross region and need identity in same region as + geo backup. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id + for a Customer Managed Key. Should be added with identity_ids. + type: string + type: object + delegatedSubnetId: + description: The ID of the virtual network subnet to create the + PostgreSQL Flexible Server. The provided subnet should not have + any other resource deployed in it and this subnet will be delegated + to the PostgreSQL Flexible Server, if not already delegated. + Changing this forces a new PostgreSQL Flexible Server to be + created. + type: string + delegatedSubnetIdRef: + description: Reference to a Subnet in network to populate delegatedSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + delegatedSubnetIdSelector: + description: Selector for a Subnet in network to populate delegatedSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + geoRedundantBackupEnabled: + description: Is Geo-Redundant backup enabled on the PostgreSQL + Flexible Server. Defaults to false. Changing this forces a new + PostgreSQL Flexible Server to be created. + type: boolean + highAvailability: + description: A high_availability block as defined below. + properties: + mode: + description: The high availability mode for the PostgreSQL + Flexible Server. Possible value are SameZone or ZoneRedundant. + type: string + standbyAvailabilityZone: + description: Specifies the Availability Zone in which the + standby Flexible Server should be located. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this PostgreSQL Flexible Server. Required + if used together with customer_managed_key block. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this PostgreSQL Flexible Server. + The only possible value is UserAssigned. + type: string + type: object + location: + description: The Azure Region where the PostgreSQL Flexible Server + should exist. Changing this forces a new PostgreSQL Flexible + Server to be created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + dayOfWeek: + description: The day of week for maintenance window, where + the week starts on a Sunday, i.e. Sunday = 0, Monday = 1. + Defaults to 0. + type: number + startHour: + description: The start hour for maintenance window. Defaults + to 0. + type: number + startMinute: + description: The start minute for maintenance window. Defaults + to 0. + type: number + type: object + pointInTimeRestoreTimeInUtc: + description: The point in time to restore from source_server_id + when create_mode is PointInTimeRestore. Changing this forces + a new PostgreSQL Flexible Server to be created. + type: string + privateDnsZoneId: + description: The ID of the private DNS zone to create the PostgreSQL + Flexible Server. + type: string + privateDnsZoneIdRef: + description: Reference to a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateDnsZoneIdSelector: + description: Selector for a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replicationRole: + description: The replication role for the PostgreSQL Flexible + Server. Possible value is None. + type: string + resourceGroupName: + description: The name of the Resource Group where the PostgreSQL + Flexible Server should exist. Changing this forces a new PostgreSQL + Flexible Server to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: The SKU Name for the PostgreSQL Flexible Server. + The name of the SKU, follows the tier + name pattern (e.g. B_Standard_B1ms, + GP_Standard_D2s_v3, MO_Standard_E4s_v3). + type: string + sourceServerId: + description: The resource ID of the source PostgreSQL Flexible + Server to be restored. Required when create_mode is PointInTimeRestore + or Replica. Changing this forces a new PostgreSQL Flexible Server + to be created. + type: string + storageMb: + description: The max storage allowed for the PostgreSQL Flexible + Server. Possible values are 32768, 65536, 131072, 262144, 524288, + 1048576, 2097152, 4193280, 4194304, 8388608, 16777216 and 33553408. + type: number + storageTier: + description: The name of storage performance tier for IOPS of + the PostgreSQL Flexible Server. Possible values are P4, P6, + P10, P15,P20, P30,P40, P50,P60, P70 or P80. Default value is + dependant on the storage_mb value. Please see the storage_tier + defaults based on storage_mb table below. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + PostgreSQL Flexible Server. + type: object + x-kubernetes-map-type: granular + version: + description: The version of PostgreSQL Flexible Server to use. + Possible values are 11,12, 13, 14, 15 and 16. Required when + create_mode is Default. + type: string + zone: + description: Specifies the Availability Zone in which the PostgreSQL + Flexible Server should be located. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + administratorLogin: + description: The Administrator login for the PostgreSQL Flexible + Server. Required when create_mode is Default and authentication.password_auth_enabled + is true. + type: string + authentication: + description: An authentication block as defined below. + properties: + activeDirectoryAuthEnabled: + description: Whether or not Active Directory authentication + is allowed to access the PostgreSQL Flexible Server. Defaults + to false. + type: boolean + passwordAuthEnabled: + description: Whether or not password authentication is allowed + to access the PostgreSQL Flexible Server. Defaults to true. + type: boolean + tenantId: + description: The Tenant ID of the Azure Active Directory which + is used by the Active Directory authentication. active_directory_auth_enabled + must be set to true. + type: string + type: object + autoGrowEnabled: + description: Is the storage auto grow for PostgreSQL Flexible + Server enabled? Defaults to false. + type: boolean + backupRetentionDays: + description: The backup retention days for the PostgreSQL Flexible + Server. Possible values are between 7 and 35 days. + type: number + createMode: + description: The creation mode which can be used to restore or + replicate existing servers. Possible values are Default, PointInTimeRestore, + Replica and Update. + type: string + customerManagedKey: + description: A customer_managed_key block as defined below. Changing + this forces a new resource to be created. + properties: + geoBackupKeyVaultKeyId: + description: The ID of the geo backup Key Vault Key. It can't + cross region and need Customer Managed Key in same region + as geo backup. + type: string + geoBackupUserAssignedIdentityId: + description: The geo backup user managed identity id for a + Customer Managed Key. Should be added with identity_ids. + It can't cross region and need identity in same region as + geo backup. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id + for a Customer Managed Key. Should be added with identity_ids. + type: string + type: object + delegatedSubnetId: + description: The ID of the virtual network subnet to create the + PostgreSQL Flexible Server. The provided subnet should not have + any other resource deployed in it and this subnet will be delegated + to the PostgreSQL Flexible Server, if not already delegated. + Changing this forces a new PostgreSQL Flexible Server to be + created. + type: string + delegatedSubnetIdRef: + description: Reference to a Subnet in network to populate delegatedSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + delegatedSubnetIdSelector: + description: Selector for a Subnet in network to populate delegatedSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + geoRedundantBackupEnabled: + description: Is Geo-Redundant backup enabled on the PostgreSQL + Flexible Server. Defaults to false. Changing this forces a new + PostgreSQL Flexible Server to be created. + type: boolean + highAvailability: + description: A high_availability block as defined below. + properties: + mode: + description: The high availability mode for the PostgreSQL + Flexible Server. Possible value are SameZone or ZoneRedundant. + type: string + standbyAvailabilityZone: + description: Specifies the Availability Zone in which the + standby Flexible Server should be located. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this PostgreSQL Flexible Server. Required + if used together with customer_managed_key block. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this PostgreSQL Flexible Server. + The only possible value is UserAssigned. + type: string + type: object + location: + description: The Azure Region where the PostgreSQL Flexible Server + should exist. Changing this forces a new PostgreSQL Flexible + Server to be created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + dayOfWeek: + description: The day of week for maintenance window, where + the week starts on a Sunday, i.e. Sunday = 0, Monday = 1. + Defaults to 0. + type: number + startHour: + description: The start hour for maintenance window. Defaults + to 0. + type: number + startMinute: + description: The start minute for maintenance window. Defaults + to 0. + type: number + type: object + pointInTimeRestoreTimeInUtc: + description: The point in time to restore from source_server_id + when create_mode is PointInTimeRestore. Changing this forces + a new PostgreSQL Flexible Server to be created. + type: string + privateDnsZoneId: + description: The ID of the private DNS zone to create the PostgreSQL + Flexible Server. + type: string + privateDnsZoneIdRef: + description: Reference to a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + privateDnsZoneIdSelector: + description: Selector for a PrivateDNSZone in network to populate + privateDnsZoneId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replicationRole: + description: The replication role for the PostgreSQL Flexible + Server. Possible value is None. + type: string + skuName: + description: The SKU Name for the PostgreSQL Flexible Server. + The name of the SKU, follows the tier + name pattern (e.g. B_Standard_B1ms, + GP_Standard_D2s_v3, MO_Standard_E4s_v3). + type: string + sourceServerId: + description: The resource ID of the source PostgreSQL Flexible + Server to be restored. Required when create_mode is PointInTimeRestore + or Replica. Changing this forces a new PostgreSQL Flexible Server + to be created. + type: string + storageMb: + description: The max storage allowed for the PostgreSQL Flexible + Server. Possible values are 32768, 65536, 131072, 262144, 524288, + 1048576, 2097152, 4193280, 4194304, 8388608, 16777216 and 33553408. + type: number + storageTier: + description: The name of storage performance tier for IOPS of + the PostgreSQL Flexible Server. Possible values are P4, P6, + P10, P15,P20, P30,P40, P50,P60, P70 or P80. Default value is + dependant on the storage_mb value. Please see the storage_tier + defaults based on storage_mb table below. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + PostgreSQL Flexible Server. + type: object + x-kubernetes-map-type: granular + version: + description: The version of PostgreSQL Flexible Server to use. + Possible values are 11,12, 13, 14, 15 and 16. Required when + create_mode is Default. + type: string + zone: + description: Specifies the Availability Zone in which the PostgreSQL + Flexible Server should be located. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: FlexibleServerStatus defines the observed state of FlexibleServer. + properties: + atProvider: + properties: + administratorLogin: + description: The Administrator login for the PostgreSQL Flexible + Server. Required when create_mode is Default and authentication.password_auth_enabled + is true. + type: string + authentication: + description: An authentication block as defined below. + properties: + activeDirectoryAuthEnabled: + description: Whether or not Active Directory authentication + is allowed to access the PostgreSQL Flexible Server. Defaults + to false. + type: boolean + passwordAuthEnabled: + description: Whether or not password authentication is allowed + to access the PostgreSQL Flexible Server. Defaults to true. + type: boolean + tenantId: + description: The Tenant ID of the Azure Active Directory which + is used by the Active Directory authentication. active_directory_auth_enabled + must be set to true. + type: string + type: object + autoGrowEnabled: + description: Is the storage auto grow for PostgreSQL Flexible + Server enabled? Defaults to false. + type: boolean + backupRetentionDays: + description: The backup retention days for the PostgreSQL Flexible + Server. Possible values are between 7 and 35 days. + type: number + createMode: + description: The creation mode which can be used to restore or + replicate existing servers. Possible values are Default, PointInTimeRestore, + Replica and Update. + type: string + customerManagedKey: + description: A customer_managed_key block as defined below. Changing + this forces a new resource to be created. + properties: + geoBackupKeyVaultKeyId: + description: The ID of the geo backup Key Vault Key. It can't + cross region and need Customer Managed Key in same region + as geo backup. + type: string + geoBackupUserAssignedIdentityId: + description: The geo backup user managed identity id for a + Customer Managed Key. Should be added with identity_ids. + It can't cross region and need identity in same region as + geo backup. + type: string + keyVaultKeyId: + description: The ID of the Key Vault Key. + type: string + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id + for a Customer Managed Key. Should be added with identity_ids. + type: string + type: object + delegatedSubnetId: + description: The ID of the virtual network subnet to create the + PostgreSQL Flexible Server. The provided subnet should not have + any other resource deployed in it and this subnet will be delegated + to the PostgreSQL Flexible Server, if not already delegated. + Changing this forces a new PostgreSQL Flexible Server to be + created. + type: string + fqdn: + description: The FQDN of the PostgreSQL Flexible Server. + type: string + geoRedundantBackupEnabled: + description: Is Geo-Redundant backup enabled on the PostgreSQL + Flexible Server. Defaults to false. Changing this forces a new + PostgreSQL Flexible Server to be created. + type: boolean + highAvailability: + description: A high_availability block as defined below. + properties: + mode: + description: The high availability mode for the PostgreSQL + Flexible Server. Possible value are SameZone or ZoneRedundant. + type: string + standbyAvailabilityZone: + description: Specifies the Availability Zone in which the + standby Flexible Server should be located. + type: string + type: object + id: + description: The ID of the PostgreSQL Flexible Server. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this PostgreSQL Flexible Server. Required + if used together with customer_managed_key block. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this PostgreSQL Flexible Server. + The only possible value is UserAssigned. + type: string + type: object + location: + description: The Azure Region where the PostgreSQL Flexible Server + should exist. Changing this forces a new PostgreSQL Flexible + Server to be created. + type: string + maintenanceWindow: + description: A maintenance_window block as defined below. + properties: + dayOfWeek: + description: The day of week for maintenance window, where + the week starts on a Sunday, i.e. Sunday = 0, Monday = 1. + Defaults to 0. + type: number + startHour: + description: The start hour for maintenance window. Defaults + to 0. + type: number + startMinute: + description: The start minute for maintenance window. Defaults + to 0. + type: number + type: object + pointInTimeRestoreTimeInUtc: + description: The point in time to restore from source_server_id + when create_mode is PointInTimeRestore. Changing this forces + a new PostgreSQL Flexible Server to be created. + type: string + privateDnsZoneId: + description: The ID of the private DNS zone to create the PostgreSQL + Flexible Server. + type: string + publicNetworkAccessEnabled: + description: Is public network access enabled? + type: boolean + replicationRole: + description: The replication role for the PostgreSQL Flexible + Server. Possible value is None. + type: string + resourceGroupName: + description: The name of the Resource Group where the PostgreSQL + Flexible Server should exist. Changing this forces a new PostgreSQL + Flexible Server to be created. + type: string + skuName: + description: The SKU Name for the PostgreSQL Flexible Server. + The name of the SKU, follows the tier + name pattern (e.g. B_Standard_B1ms, + GP_Standard_D2s_v3, MO_Standard_E4s_v3). + type: string + sourceServerId: + description: The resource ID of the source PostgreSQL Flexible + Server to be restored. Required when create_mode is PointInTimeRestore + or Replica. Changing this forces a new PostgreSQL Flexible Server + to be created. + type: string + storageMb: + description: The max storage allowed for the PostgreSQL Flexible + Server. Possible values are 32768, 65536, 131072, 262144, 524288, + 1048576, 2097152, 4193280, 4194304, 8388608, 16777216 and 33553408. + type: number + storageTier: + description: The name of storage performance tier for IOPS of + the PostgreSQL Flexible Server. Possible values are P4, P6, + P10, P15,P20, P30,P40, P50,P60, P70 or P80. Default value is + dependant on the storage_mb value. Please see the storage_tier + defaults based on storage_mb table below. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + PostgreSQL Flexible Server. + type: object + x-kubernetes-map-type: granular + version: + description: The version of PostgreSQL Flexible Server to use. + Possible values are 11,12, 13, 14, 15 and 16. Required when + create_mode is Default. + type: string + zone: + description: Specifies the Availability Zone in which the PostgreSQL + Flexible Server should be located. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/dbforpostgresql.azure.upbound.io_servers.yaml b/package/crds/dbforpostgresql.azure.upbound.io_servers.yaml index 80128d8d7..e180d8356 100644 --- a/package/crds/dbforpostgresql.azure.upbound.io_servers.yaml +++ b/package/crds/dbforpostgresql.azure.upbound.io_servers.yaml @@ -892,3 +892,862 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Server is the Schema for the Servers API. Manages a PostgreSQL + Server. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServerSpec defines the desired state of Server + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + administratorLogin: + description: The Administrator login for the PostgreSQL Server. + Required when create_mode is Default. Changing this forces a + new resource to be created. + type: string + administratorLoginPasswordSecretRef: + description: The Password associated with the administrator_login + for the PostgreSQL Server. Required when create_mode is Default. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + autoGrowEnabled: + description: Enable/Disable auto-growing of the storage. Storage + auto-grow prevents your server from running out of storage and + becoming read-only. If storage auto grow is enabled, the storage + automatically grows without impacting the workload. Defaults + to true. + type: boolean + backupRetentionDays: + description: Backup retention days for the server, supported values + are between 7 and 35 days. + type: number + createMode: + description: The creation mode. Can be used to restore or replicate + existing servers. Possible values are Default, Replica, GeoRestore, + and PointInTimeRestore. Defaults to Default. + type: string + creationSourceServerId: + description: For creation modes other than Default, the source + server ID to use. + type: string + geoRedundantBackupEnabled: + description: Turn Geo-redundant server backups on/off. This allows + you to choose between locally redundant or geo-redundant backup + storage in the General Purpose and Memory Optimized tiers. When + the backups are stored in geo-redundant backup storage, they + are not only stored within the region in which your server is + hosted, but are also replicated to a paired data center. This + provides better protection and ability to restore your server + in a different region in the event of a disaster. This is not + support for the Basic tier. Changing this forces a new resource + to be created. + type: boolean + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this PostgreSQL Server. The + only possible value is SystemAssigned. + type: string + type: object + infrastructureEncryptionEnabled: + description: Whether or not infrastructure is encrypted for this + server. Changing this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the PostgreSQL Server. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restorePointInTime: + description: When create_mode is PointInTimeRestore the point + in time to restore from creation_source_server_id. It should + be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + type: string + skuName: + description: Specifies the SKU Name for this PostgreSQL Server. + The name of the SKU, follows the tier + family + cores pattern + (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product + documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, + B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, + GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, + MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + type: string + sslEnforcementEnabled: + description: Specifies if SSL should be enforced on connections. + Possible values are true and false. + type: boolean + sslMinimalTlsVersionEnforced: + description: The minimum TLS version to support on the sever. + Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, + and TLS1_2. Defaults to TLS1_2. + type: string + storageMb: + description: Max storage allowed for a server. Possible values + are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU + and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory + Optimized SKUs. For more information see the product documentation. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration, known in the + API as Server Security Alerts Policy. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values are Sql_Injection, Sql_Injection_Vulnerability, + Access_Anomaly, Data_Exfiltration and Unsafe_Action. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? + type: boolean + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Is the policy enabled? + type: boolean + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + storageAccountAccessKeySecretRef: + description: Specifies the identifier key of the Threat Detection + audit storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + type: string + type: object + version: + description: Specifies the version of PostgreSQL to use. Valid + values are 9.5, 9.6, 10, 10.0, 10.2 and 11. Changing this forces + a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + administratorLogin: + description: The Administrator login for the PostgreSQL Server. + Required when create_mode is Default. Changing this forces a + new resource to be created. + type: string + autoGrowEnabled: + description: Enable/Disable auto-growing of the storage. Storage + auto-grow prevents your server from running out of storage and + becoming read-only. If storage auto grow is enabled, the storage + automatically grows without impacting the workload. Defaults + to true. + type: boolean + backupRetentionDays: + description: Backup retention days for the server, supported values + are between 7 and 35 days. + type: number + createMode: + description: The creation mode. Can be used to restore or replicate + existing servers. Possible values are Default, Replica, GeoRestore, + and PointInTimeRestore. Defaults to Default. + type: string + creationSourceServerId: + description: For creation modes other than Default, the source + server ID to use. + type: string + geoRedundantBackupEnabled: + description: Turn Geo-redundant server backups on/off. This allows + you to choose between locally redundant or geo-redundant backup + storage in the General Purpose and Memory Optimized tiers. When + the backups are stored in geo-redundant backup storage, they + are not only stored within the region in which your server is + hosted, but are also replicated to a paired data center. This + provides better protection and ability to restore your server + in a different region in the event of a disaster. This is not + support for the Basic tier. Changing this forces a new resource + to be created. + type: boolean + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this PostgreSQL Server. The + only possible value is SystemAssigned. + type: string + type: object + infrastructureEncryptionEnabled: + description: Whether or not infrastructure is encrypted for this + server. Changing this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + restorePointInTime: + description: When create_mode is PointInTimeRestore the point + in time to restore from creation_source_server_id. It should + be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + type: string + skuName: + description: Specifies the SKU Name for this PostgreSQL Server. + The name of the SKU, follows the tier + family + cores pattern + (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product + documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, + B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, + GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, + MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + type: string + sslEnforcementEnabled: + description: Specifies if SSL should be enforced on connections. + Possible values are true and false. + type: boolean + sslMinimalTlsVersionEnforced: + description: The minimum TLS version to support on the sever. + Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, + and TLS1_2. Defaults to TLS1_2. + type: string + storageMb: + description: Max storage allowed for a server. Possible values + are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU + and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory + Optimized SKUs. For more information see the product documentation. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration, known in the + API as Server Security Alerts Policy. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values are Sql_Injection, Sql_Injection_Vulnerability, + Access_Anomaly, Data_Exfiltration and Unsafe_Action. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? + type: boolean + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Is the policy enabled? + type: boolean + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + type: string + type: object + version: + description: Specifies the version of PostgreSQL to use. Valid + values are 9.5, 9.6, 10, 10.0, 10.2 and 11. Changing this forces + a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + - message: spec.forProvider.sslEnforcementEnabled is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sslEnforcementEnabled) + || (has(self.initProvider) && has(self.initProvider.sslEnforcementEnabled))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: ServerStatus defines the observed state of Server. + properties: + atProvider: + properties: + administratorLogin: + description: The Administrator login for the PostgreSQL Server. + Required when create_mode is Default. Changing this forces a + new resource to be created. + type: string + autoGrowEnabled: + description: Enable/Disable auto-growing of the storage. Storage + auto-grow prevents your server from running out of storage and + becoming read-only. If storage auto grow is enabled, the storage + automatically grows without impacting the workload. Defaults + to true. + type: boolean + backupRetentionDays: + description: Backup retention days for the server, supported values + are between 7 and 35 days. + type: number + createMode: + description: The creation mode. Can be used to restore or replicate + existing servers. Possible values are Default, Replica, GeoRestore, + and PointInTimeRestore. Defaults to Default. + type: string + creationSourceServerId: + description: For creation modes other than Default, the source + server ID to use. + type: string + fqdn: + description: The FQDN of the PostgreSQL Server. + type: string + geoRedundantBackupEnabled: + description: Turn Geo-redundant server backups on/off. This allows + you to choose between locally redundant or geo-redundant backup + storage in the General Purpose and Memory Optimized tiers. When + the backups are stored in geo-redundant backup storage, they + are not only stored within the region in which your server is + hosted, but are also replicated to a paired data center. This + provides better protection and ability to restore your server + in a different region in the event of a disaster. This is not + support for the Basic tier. Changing this forces a new resource + to be created. + type: boolean + id: + description: The ID of the PostgreSQL Server. + type: string + identity: + description: An identity block as defined below. + properties: + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this PostgreSQL Server. The + only possible value is SystemAssigned. + type: string + type: object + infrastructureEncryptionEnabled: + description: Whether or not infrastructure is encrypted for this + server. Changing this forces a new resource to be created. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the PostgreSQL Server. Changing this forces a new resource to + be created. + type: string + restorePointInTime: + description: When create_mode is PointInTimeRestore the point + in time to restore from creation_source_server_id. It should + be provided in RFC3339 format, e.g. 2013-11-08T22:00:40Z. + type: string + skuName: + description: Specifies the SKU Name for this PostgreSQL Server. + The name of the SKU, follows the tier + family + cores pattern + (e.g. B_Gen4_1, GP_Gen5_8). For more information see the product + documentation. Possible values are B_Gen4_1, B_Gen4_2, B_Gen5_1, + B_Gen5_2, GP_Gen4_2, GP_Gen4_4, GP_Gen4_8, GP_Gen4_16, GP_Gen4_32, + GP_Gen5_2, GP_Gen5_4, GP_Gen5_8, GP_Gen5_16, GP_Gen5_32, GP_Gen5_64, + MO_Gen5_2, MO_Gen5_4, MO_Gen5_8, MO_Gen5_16 and MO_Gen5_32. + type: string + sslEnforcementEnabled: + description: Specifies if SSL should be enforced on connections. + Possible values are true and false. + type: boolean + sslMinimalTlsVersionEnforced: + description: The minimum TLS version to support on the sever. + Possible values are TLSEnforcementDisabled, TLS1_0, TLS1_1, + and TLS1_2. Defaults to TLS1_2. + type: string + storageMb: + description: Max storage allowed for a server. Possible values + are between 5120 MB(5GB) and 1048576 MB(1TB) for the Basic SKU + and between 5120 MB(5GB) and 16777216 MB(16TB) for General Purpose/Memory + Optimized SKUs. For more information see the product documentation. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration, known in the + API as Server Security Alerts Policy. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values are Sql_Injection, Sql_Injection_Vulnerability, + Access_Anomaly, Data_Exfiltration and Unsafe_Action. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? + type: boolean + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Is the policy enabled? + type: boolean + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + type: string + type: object + version: + description: Specifies the version of PostgreSQL to use. Valid + values are 9.5, 9.6, 10, 10.0, 10.2 and 11. Changing this forces + a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/devices.azure.upbound.io_iothubdps.yaml b/package/crds/devices.azure.upbound.io_iothubdps.yaml index e9bd25bd8..ffe7f489e 100644 --- a/package/crds/devices.azure.upbound.io_iothubdps.yaml +++ b/package/crds/devices.azure.upbound.io_iothubdps.yaml @@ -683,3 +683,662 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IOTHubDPS is the Schema for the IOTHubDPSs API. Manages an IoT + Device Provisioning Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IOTHubDPSSpec defines the desired state of IOTHubDPS + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allocationPolicy: + description: The allocation policy of the IoT Device Provisioning + Service (Hashed, GeoLatency or Static). Defaults to Hashed. + type: string + dataResidencyEnabled: + description: Specifies if the IoT Device Provisioning Service + has data residency and disaster recovery enabled. Defaults to + false. Changing this forces a new resource to be created. + type: boolean + ipFilterRule: + description: An ip_filter_rule block as defined below. + items: + properties: + action: + description: The desired action for requests captured by + this rule. Possible values are Accept, Reject + type: string + ipMask: + description: The IP address range in CIDR notation for the + rule. + type: string + name: + description: The name of the filter. + type: string + target: + description: Target for requests captured by this rule. + Possible values are all, deviceApi and serviceApi. + type: string + type: object + type: array + linkedHub: + description: A linked_hub block as defined below. + items: + properties: + allocationWeight: + description: The weight applied to the IoT Hub. Defaults + to 1. + type: number + applyAllocationPolicy: + description: Determines whether to apply allocation policies + to the IoT Hub. Defaults to true. + type: boolean + connectionStringSecretRef: + description: The connection string to connect to the IoT + Hub. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + location: + description: The location of the IoT hub. + type: string + required: + - connectionStringSecretRef + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + publicNetworkAccessEnabled: + description: Whether requests from Public Network are allowed. + Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group under which the Iot + Device Provisioning Service resource has to be created. Changing + this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: A sku block as defined below. + properties: + capacity: + description: The number of provisioned IoT Device Provisioning + Service units. + type: number + name: + description: The name of the sku. Currently can only be set + to S1. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allocationPolicy: + description: The allocation policy of the IoT Device Provisioning + Service (Hashed, GeoLatency or Static). Defaults to Hashed. + type: string + dataResidencyEnabled: + description: Specifies if the IoT Device Provisioning Service + has data residency and disaster recovery enabled. Defaults to + false. Changing this forces a new resource to be created. + type: boolean + ipFilterRule: + description: An ip_filter_rule block as defined below. + items: + properties: + action: + description: The desired action for requests captured by + this rule. Possible values are Accept, Reject + type: string + ipMask: + description: The IP address range in CIDR notation for the + rule. + type: string + name: + description: The name of the filter. + type: string + target: + description: Target for requests captured by this rule. + Possible values are all, deviceApi and serviceApi. + type: string + type: object + type: array + linkedHub: + description: A linked_hub block as defined below. + items: + properties: + allocationWeight: + description: The weight applied to the IoT Hub. Defaults + to 1. + type: number + applyAllocationPolicy: + description: Determines whether to apply allocation policies + to the IoT Hub. Defaults to true. + type: boolean + location: + description: The location of the IoT hub. + type: string + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + publicNetworkAccessEnabled: + description: Whether requests from Public Network are allowed. + Defaults to true. + type: boolean + sku: + description: A sku block as defined below. + properties: + capacity: + description: The number of provisioned IoT Device Provisioning + Service units. + type: number + name: + description: The name of the sku. Currently can only be set + to S1. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: IOTHubDPSStatus defines the observed state of IOTHubDPS. + properties: + atProvider: + properties: + allocationPolicy: + description: The allocation policy of the IoT Device Provisioning + Service (Hashed, GeoLatency or Static). Defaults to Hashed. + type: string + dataResidencyEnabled: + description: Specifies if the IoT Device Provisioning Service + has data residency and disaster recovery enabled. Defaults to + false. Changing this forces a new resource to be created. + type: boolean + deviceProvisioningHostName: + description: The device endpoint of the IoT Device Provisioning + Service. + type: string + id: + description: The ID of the IoT Device Provisioning Service. + type: string + idScope: + description: The unique identifier of the IoT Device Provisioning + Service. + type: string + ipFilterRule: + description: An ip_filter_rule block as defined below. + items: + properties: + action: + description: The desired action for requests captured by + this rule. Possible values are Accept, Reject + type: string + ipMask: + description: The IP address range in CIDR notation for the + rule. + type: string + name: + description: The name of the filter. + type: string + target: + description: Target for requests captured by this rule. + Possible values are all, deviceApi and serviceApi. + type: string + type: object + type: array + linkedHub: + description: A linked_hub block as defined below. + items: + properties: + allocationWeight: + description: The weight applied to the IoT Hub. Defaults + to 1. + type: number + applyAllocationPolicy: + description: Determines whether to apply allocation policies + to the IoT Hub. Defaults to true. + type: boolean + hostname: + description: (Computed) The IoT Hub hostname. + type: string + location: + description: The location of the IoT hub. + type: string + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + publicNetworkAccessEnabled: + description: Whether requests from Public Network are allowed. + Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group under which the Iot + Device Provisioning Service resource has to be created. Changing + this forces a new resource to be created. + type: string + serviceOperationsHostName: + description: The service endpoint of the IoT Device Provisioning + Service. + type: string + sku: + description: A sku block as defined below. + properties: + capacity: + description: The number of provisioned IoT Device Provisioning + Service units. + type: number + name: + description: The name of the sku. Currently can only be set + to S1. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/devices.azure.upbound.io_iothubs.yaml b/package/crds/devices.azure.upbound.io_iothubs.yaml index b5a2a43fb..08bd43a0f 100644 --- a/package/crds/devices.azure.upbound.io_iothubs.yaml +++ b/package/crds/devices.azure.upbound.io_iothubs.yaml @@ -1190,3 +1190,1142 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IOTHub is the Schema for the IOTHubs API. Manages an IotHub + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IOTHubSpec defines the desired state of IOTHub + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cloudToDevice: + description: A cloud_to_device block as defined below. + properties: + defaultTtl: + description: The default time to live for cloud-to-device + messages, specified as an ISO 8601 timespan duration. This + value must be between 1 minute and 48 hours. Defaults to + PT1H. + type: string + feedback: + description: A feedback block as defined below. + items: + properties: + lockDuration: + description: The lock duration for the file upload notifications + queue, specified as an ISO 8601 timespan duration. + This value must be between 5 and 300 seconds. Defaults + to PT1M. + type: string + maxDeliveryCount: + description: The number of times the IoT Hub attempts + to deliver a file upload notification message. Defaults + to 10. + type: number + timeToLive: + description: The retention time for service-bound feedback + messages, specified as an ISO 8601 timespan duration. + This value must be between 1 minute and 48 hours. + Defaults to PT1H. + type: string + type: object + type: array + maxDeliveryCount: + description: The maximum delivery count for cloud-to-device + per-device queues. This value must be between 1 and 100. + Defaults to 10. + type: number + type: object + eventHubPartitionCount: + description: The number of device-to-cloud partitions used by + backing event hubs. Must be between 2 and 128. + type: number + eventHubRetentionInDays: + description: The event hub retention to use in days. Must be between + 1 and 7. + type: number + fileUpload: + description: A file_upload block as defined below. + properties: + authenticationType: + description: The type used to authenticate against the storage + account. Possible values are keyBased and identityBased. + Defaults to keyBased. + type: string + connectionStringSecretRef: + description: The connection string for the Azure Storage account + to which files are uploaded. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + containerName: + description: The name of the root container where the files + should be uploaded to. The container need not exist but + should be creatable using the connection_string specified. + type: string + defaultTtl: + description: The period of time for which a file upload notification + message is available to consume before it expires, specified + as an ISO 8601 timespan duration. This value must be between + 1 minute and 48 hours. Defaults to PT1H. + type: string + identityId: + description: The ID of the User Managed Identity used to authenticate + against the storage account. + type: string + lockDuration: + description: The lock duration for the file upload notifications + queue, specified as an ISO 8601 timespan duration. This + value must be between 5 and 300 seconds. Defaults to PT1M. + type: string + maxDeliveryCount: + description: The number of times the IoT Hub attempts to deliver + a file upload notification message. Defaults to 10. + type: number + notifications: + description: Used to specify whether file notifications are + sent to IoT Hub on upload. Defaults to false. + type: boolean + sasTtl: + description: The period of time for which the SAS URI generated + by IoT Hub for file upload is valid, specified as an ISO + 8601 timespan duration. This value must be between 1 minute + and 24 hours. Defaults to PT1H. + type: string + required: + - connectionStringSecretRef + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this IoT Hub. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Hub. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + localAuthenticationEnabled: + description: If false, SAS tokens with Iot hub scoped SAS keys + cannot be used for authentication. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + minTlsVersion: + description: Specifies the minimum TLS version to support for + this hub. The only valid value is 1.2. Changing this forces + a new resource to be created. + type: string + networkRuleSet: + description: A network_rule_set block as defined below. + items: + properties: + applyToBuiltinEventhubEndpoint: + description: Determines if Network Rule Set is also applied + to the BuiltIn EventHub EndPoint of the IotHub. Defaults + to false. + type: boolean + defaultAction: + description: Default Action for Network Rule Set. Possible + values are Deny, Allow. Defaults to Deny. + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The desired action for requests captured + by this rule. Possible values are Allow. Defaults + to Allow. + type: string + ipMask: + description: The IP address range in CIDR notation + for the IP rule. + type: string + name: + description: The name of the sku. Possible values + are B1, B2, B3, F1, S1, S2, and S3. + type: string + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Is the IotHub resource accessible from a public network? + type: boolean + resourceGroupName: + description: The name of the resource group under which the IotHub + resource has to be created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: A sku block as defined below. + properties: + capacity: + description: The number of provisioned IoT Hub units. + type: number + name: + description: The name of the sku. Possible values are B1, + B2, B3, F1, S1, S2, and S3. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cloudToDevice: + description: A cloud_to_device block as defined below. + properties: + defaultTtl: + description: The default time to live for cloud-to-device + messages, specified as an ISO 8601 timespan duration. This + value must be between 1 minute and 48 hours. Defaults to + PT1H. + type: string + feedback: + description: A feedback block as defined below. + items: + properties: + lockDuration: + description: The lock duration for the file upload notifications + queue, specified as an ISO 8601 timespan duration. + This value must be between 5 and 300 seconds. Defaults + to PT1M. + type: string + maxDeliveryCount: + description: The number of times the IoT Hub attempts + to deliver a file upload notification message. Defaults + to 10. + type: number + timeToLive: + description: The retention time for service-bound feedback + messages, specified as an ISO 8601 timespan duration. + This value must be between 1 minute and 48 hours. + Defaults to PT1H. + type: string + type: object + type: array + maxDeliveryCount: + description: The maximum delivery count for cloud-to-device + per-device queues. This value must be between 1 and 100. + Defaults to 10. + type: number + type: object + eventHubPartitionCount: + description: The number of device-to-cloud partitions used by + backing event hubs. Must be between 2 and 128. + type: number + eventHubRetentionInDays: + description: The event hub retention to use in days. Must be between + 1 and 7. + type: number + fileUpload: + description: A file_upload block as defined below. + properties: + authenticationType: + description: The type used to authenticate against the storage + account. Possible values are keyBased and identityBased. + Defaults to keyBased. + type: string + containerName: + description: The name of the root container where the files + should be uploaded to. The container need not exist but + should be creatable using the connection_string specified. + type: string + defaultTtl: + description: The period of time for which a file upload notification + message is available to consume before it expires, specified + as an ISO 8601 timespan duration. This value must be between + 1 minute and 48 hours. Defaults to PT1H. + type: string + identityId: + description: The ID of the User Managed Identity used to authenticate + against the storage account. + type: string + lockDuration: + description: The lock duration for the file upload notifications + queue, specified as an ISO 8601 timespan duration. This + value must be between 5 and 300 seconds. Defaults to PT1M. + type: string + maxDeliveryCount: + description: The number of times the IoT Hub attempts to deliver + a file upload notification message. Defaults to 10. + type: number + notifications: + description: Used to specify whether file notifications are + sent to IoT Hub on upload. Defaults to false. + type: boolean + sasTtl: + description: The period of time for which the SAS URI generated + by IoT Hub for file upload is valid, specified as an ISO + 8601 timespan duration. This value must be between 1 minute + and 24 hours. Defaults to PT1H. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this IoT Hub. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Hub. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + localAuthenticationEnabled: + description: If false, SAS tokens with Iot hub scoped SAS keys + cannot be used for authentication. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + minTlsVersion: + description: Specifies the minimum TLS version to support for + this hub. The only valid value is 1.2. Changing this forces + a new resource to be created. + type: string + networkRuleSet: + description: A network_rule_set block as defined below. + items: + properties: + applyToBuiltinEventhubEndpoint: + description: Determines if Network Rule Set is also applied + to the BuiltIn EventHub EndPoint of the IotHub. Defaults + to false. + type: boolean + defaultAction: + description: Default Action for Network Rule Set. Possible + values are Deny, Allow. Defaults to Deny. + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The desired action for requests captured + by this rule. Possible values are Allow. Defaults + to Allow. + type: string + ipMask: + description: The IP address range in CIDR notation + for the IP rule. + type: string + name: + description: The name of the sku. Possible values + are B1, B2, B3, F1, S1, S2, and S3. + type: string + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Is the IotHub resource accessible from a public network? + type: boolean + sku: + description: A sku block as defined below. + properties: + capacity: + description: The number of provisioned IoT Hub units. + type: number + name: + description: The name of the sku. Possible values are B1, + B2, B3, F1, S1, S2, and S3. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: IOTHubStatus defines the observed state of IOTHub. + properties: + atProvider: + properties: + cloudToDevice: + description: A cloud_to_device block as defined below. + properties: + defaultTtl: + description: The default time to live for cloud-to-device + messages, specified as an ISO 8601 timespan duration. This + value must be between 1 minute and 48 hours. Defaults to + PT1H. + type: string + feedback: + description: A feedback block as defined below. + items: + properties: + lockDuration: + description: The lock duration for the file upload notifications + queue, specified as an ISO 8601 timespan duration. + This value must be between 5 and 300 seconds. Defaults + to PT1M. + type: string + maxDeliveryCount: + description: The number of times the IoT Hub attempts + to deliver a file upload notification message. Defaults + to 10. + type: number + timeToLive: + description: The retention time for service-bound feedback + messages, specified as an ISO 8601 timespan duration. + This value must be between 1 minute and 48 hours. + Defaults to PT1H. + type: string + type: object + type: array + maxDeliveryCount: + description: The maximum delivery count for cloud-to-device + per-device queues. This value must be between 1 and 100. + Defaults to 10. + type: number + type: object + endpoint: + description: An endpoint block as defined below. + items: + properties: + authenticationType: + description: The type used to authenticate against the endpoint. + Possible values are keyBased and identityBased. Defaults + to keyBased. + type: string + batchFrequencyInSeconds: + description: Time interval at which blobs are written to + storage. Value should be between 60 and 720 seconds. Default + value is 300 seconds. This attribute is applicable for + endpoint type AzureIotHub.StorageContainer. + type: number + containerName: + description: The name of storage container in the storage + account. This attribute is mandatory for endpoint type + AzureIotHub.StorageContainer. + type: string + encoding: + description: Encoding that is used to serialize messages + to blobs. Supported values are Avro, AvroDeflate and JSON. + Default value is Avro. This attribute is applicable for + endpoint type AzureIotHub.StorageContainer. Changing this + forces a new resource to be created. + type: string + endpointUri: + description: URI of the Service Bus or Event Hubs Namespace + endpoint. This attribute can only be specified and is + mandatory when authentication_type is identityBased for + endpoint type AzureIotHub.ServiceBusQueue, AzureIotHub.ServiceBusTopic + or AzureIotHub.EventHub. + type: string + entityPath: + description: Name of the Service Bus Queue/Topic or Event + Hub. This attribute can only be specified and is mandatory + when authentication_type is identityBased for endpoint + type AzureIotHub.ServiceBusQueue, AzureIotHub.ServiceBusTopic + or AzureIotHub.EventHub. + type: string + fileNameFormat: + description: File name format for the blob. All parameters + are mandatory but can be reordered. This attribute is + applicable for endpoint type AzureIotHub.StorageContainer. + Defaults to {iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. + type: string + identityId: + description: The ID of the User Managed Identity used to + authenticate against the endpoint. + type: string + maxChunkSizeInBytes: + description: Maximum number of bytes for each blob written + to storage. Value should be between 10485760(10MB) and + 524288000(500MB). Default value is 314572800(300MB). This + attribute is applicable for endpoint type AzureIotHub.StorageContainer. + type: number + name: + description: 'The name of the endpoint. The name must be + unique across endpoint types. The following names are + reserved: events, operationsMonitoringEvents, fileNotifications + and $default.' + type: string + resourceGroupName: + description: The resource group in which the endpoint will + be created. + type: string + type: + description: The type of the endpoint. Possible values are + AzureIotHub.StorageContainer, AzureIotHub.ServiceBusQueue, + AzureIotHub.ServiceBusTopic or AzureIotHub.EventHub. + type: string + type: object + type: array + enrichment: + description: A enrichment block as defined below. + items: + properties: + endpointNames: + description: The list of endpoints which will be enriched. + items: + type: string + type: array + key: + description: The key of the enrichment. + type: string + value: + description: 'The value of the enrichment. Value can be + any static string, the name of the IoT Hub sending the + message (use $iothubname) or information from the device + twin (ex: $twin.tags.latitude)' + type: string + type: object + type: array + eventHubEventsEndpoint: + description: The EventHub compatible endpoint for events data + type: string + eventHubEventsNamespace: + description: The EventHub namespace for events data + type: string + eventHubEventsPath: + description: The EventHub compatible path for events data + type: string + eventHubOperationsEndpoint: + description: The EventHub compatible endpoint for operational + data + type: string + eventHubOperationsPath: + description: The EventHub compatible path for operational data + type: string + eventHubPartitionCount: + description: The number of device-to-cloud partitions used by + backing event hubs. Must be between 2 and 128. + type: number + eventHubRetentionInDays: + description: The event hub retention to use in days. Must be between + 1 and 7. + type: number + fallbackRoute: + description: A fallback_route block as defined below. If the fallback + route is enabled, messages that don't match any of the supplied + routes are automatically sent to this route. Defaults to messages/events. + properties: + condition: + description: 'The condition that is evaluated to apply the + routing rule. Defaults to true. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.' + type: string + enabled: + description: Used to specify whether the fallback route is + enabled. + type: boolean + endpointNames: + description: The endpoints to which messages that satisfy + the condition are routed. Currently only 1 endpoint is allowed. + items: + type: string + type: array + source: + description: 'The source that the routing rule is to be applied + to, such as DeviceMessages. Possible values include: Invalid, + DeviceMessages, TwinChangeEvents, DeviceLifecycleEvents, + DeviceConnectionStateEvents, DeviceJobLifecycleEvents and + DigitalTwinChangeEvents. Defaults to DeviceMessages.' + type: string + type: object + fileUpload: + description: A file_upload block as defined below. + properties: + authenticationType: + description: The type used to authenticate against the storage + account. Possible values are keyBased and identityBased. + Defaults to keyBased. + type: string + containerName: + description: The name of the root container where the files + should be uploaded to. The container need not exist but + should be creatable using the connection_string specified. + type: string + defaultTtl: + description: The period of time for which a file upload notification + message is available to consume before it expires, specified + as an ISO 8601 timespan duration. This value must be between + 1 minute and 48 hours. Defaults to PT1H. + type: string + identityId: + description: The ID of the User Managed Identity used to authenticate + against the storage account. + type: string + lockDuration: + description: The lock duration for the file upload notifications + queue, specified as an ISO 8601 timespan duration. This + value must be between 5 and 300 seconds. Defaults to PT1M. + type: string + maxDeliveryCount: + description: The number of times the IoT Hub attempts to deliver + a file upload notification message. Defaults to 10. + type: number + notifications: + description: Used to specify whether file notifications are + sent to IoT Hub on upload. Defaults to false. + type: boolean + sasTtl: + description: The period of time for which the SAS URI generated + by IoT Hub for file upload is valid, specified as an ISO + 8601 timespan duration. This value must be between 1 minute + and 24 hours. Defaults to PT1H. + type: string + type: object + hostname: + description: The hostname of the IotHub Resource. + type: string + id: + description: The ID of the IoTHub. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this IoT Hub. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Hub. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + localAuthenticationEnabled: + description: If false, SAS tokens with Iot hub scoped SAS keys + cannot be used for authentication. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource has to be created. Changing this forces a new resource + to be created. + type: string + minTlsVersion: + description: Specifies the minimum TLS version to support for + this hub. The only valid value is 1.2. Changing this forces + a new resource to be created. + type: string + networkRuleSet: + description: A network_rule_set block as defined below. + items: + properties: + applyToBuiltinEventhubEndpoint: + description: Determines if Network Rule Set is also applied + to the BuiltIn EventHub EndPoint of the IotHub. Defaults + to false. + type: boolean + defaultAction: + description: Default Action for Network Rule Set. Possible + values are Deny, Allow. Defaults to Deny. + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The desired action for requests captured + by this rule. Possible values are Allow. Defaults + to Allow. + type: string + ipMask: + description: The IP address range in CIDR notation + for the IP rule. + type: string + name: + description: The name of the sku. Possible values + are B1, B2, B3, F1, S1, S2, and S3. + type: string + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Is the IotHub resource accessible from a public network? + type: boolean + resourceGroupName: + description: The name of the resource group under which the IotHub + resource has to be created. Changing this forces a new resource + to be created. + type: string + route: + description: A route block as defined below. + items: + properties: + condition: + description: 'The condition that is evaluated to apply the + routing rule. Defaults to true. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.' + type: string + enabled: + description: Used to specify whether a route is enabled. + type: boolean + endpointNames: + description: The list of endpoints to which messages that + satisfy the condition are routed. + items: + type: string + type: array + name: + description: The name of the route. + type: string + source: + description: 'The source that the routing rule is to be + applied to, such as DeviceMessages. Possible values include: + Invalid, DeviceMessages, TwinChangeEvents, DeviceLifecycleEvents, + DeviceConnectionStateEvents, DeviceJobLifecycleEvents + and DigitalTwinChangeEvents.' + type: string + type: object + type: array + sharedAccessPolicy: + description: One or more shared_access_policy blocks as defined + below. + items: + properties: + keyName: + description: The name of the shared access policy. + type: string + permissions: + description: The permissions assigned to the shared access + policy. + type: string + type: object + type: array + sku: + description: A sku block as defined below. + properties: + capacity: + description: The number of provisioned IoT Hub units. + type: number + name: + description: The name of the sku. Possible values are B1, + B2, B3, F1, S1, S2, and S3. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: + description: Specifies the type of Managed Service Identity that + should be configured on this IoT Hub. Possible values are SystemAssigned, + UserAssigned, SystemAssigned, UserAssigned (to enable both). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/deviceupdate.azure.upbound.io_iothubdeviceupdateaccounts.yaml b/package/crds/deviceupdate.azure.upbound.io_iothubdeviceupdateaccounts.yaml index 5a7be47f1..b39a40d4c 100644 --- a/package/crds/deviceupdate.azure.upbound.io_iothubdeviceupdateaccounts.yaml +++ b/package/crds/deviceupdate.azure.upbound.io_iothubdeviceupdateaccounts.yaml @@ -554,3 +554,533 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IOTHubDeviceUpdateAccount is the Schema for the IOTHubDeviceUpdateAccounts + API. Manages an IoT Hub Device Update Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IOTHubDeviceUpdateAccountSpec defines the desired state of + IOTHubDeviceUpdateAccount + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this IoT Hub Device Update Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Hub Device Update + Account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the Azure Region where the IoT Hub Device + Update Account should exist. Changing this forces a new resource + to be created. + type: string + publicNetworkAccessEnabled: + description: Specifies whether the public network access is enabled + for the IoT Hub Device Update Account. Possible values are true + and false. Defaults to true. + type: boolean + resourceGroupName: + description: Specifies the name of the Resource Group where the + IoT Hub Device Update Account should exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: Sku of the IoT Hub Device Update Account. Possible + values are Free and Standard. Defaults to Standard. Changing + this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + IoT Hub Device Update Account. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this IoT Hub Device Update Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Hub Device Update + Account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the Azure Region where the IoT Hub Device + Update Account should exist. Changing this forces a new resource + to be created. + type: string + publicNetworkAccessEnabled: + description: Specifies whether the public network access is enabled + for the IoT Hub Device Update Account. Possible values are true + and false. Defaults to true. + type: boolean + sku: + description: Sku of the IoT Hub Device Update Account. Possible + values are Free and Standard. Defaults to Standard. Changing + this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + IoT Hub Device Update Account. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: IOTHubDeviceUpdateAccountStatus defines the observed state + of IOTHubDeviceUpdateAccount. + properties: + atProvider: + properties: + hostName: + description: The API host name of the IoT Hub Device Update Account. + type: string + id: + description: The ID of the IoT Hub Device Update Account. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this IoT Hub Device Update Account. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this IoT Hub Device + Update Account. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this IoT Hub Device + Update Account. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Hub Device Update + Account. Possible values are SystemAssigned, UserAssigned + and SystemAssigned, UserAssigned (to enable both). + type: string + type: object + location: + description: Specifies the Azure Region where the IoT Hub Device + Update Account should exist. Changing this forces a new resource + to be created. + type: string + publicNetworkAccessEnabled: + description: Specifies whether the public network access is enabled + for the IoT Hub Device Update Account. Possible values are true + and false. Defaults to true. + type: boolean + resourceGroupName: + description: Specifies the name of the Resource Group where the + IoT Hub Device Update Account should exist. Changing this forces + a new resource to be created. + type: string + sku: + description: Sku of the IoT Hub Device Update Account. Possible + values are Free and Standard. Defaults to Standard. Changing + this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + IoT Hub Device Update Account. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/deviceupdate.azure.upbound.io_iothubdeviceupdateinstances.yaml b/package/crds/deviceupdate.azure.upbound.io_iothubdeviceupdateinstances.yaml index 8093f2b2e..3c8f741f4 100644 --- a/package/crds/deviceupdate.azure.upbound.io_iothubdeviceupdateinstances.yaml +++ b/package/crds/deviceupdate.azure.upbound.io_iothubdeviceupdateinstances.yaml @@ -808,3 +808,786 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IOTHubDeviceUpdateInstance is the Schema for the IOTHubDeviceUpdateInstances + API. Manages an IoT Hub Device Update Instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IOTHubDeviceUpdateInstanceSpec defines the desired state + of IOTHubDeviceUpdateInstance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deviceUpdateAccountId: + description: Specifies the ID of the IoT Hub Device Update Account + where the IoT Hub Device Update Instance exists. Changing this + forces a new resource to be created. + type: string + deviceUpdateAccountIdRef: + description: Reference to a IOTHubDeviceUpdateAccount in deviceupdate + to populate deviceUpdateAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + deviceUpdateAccountIdSelector: + description: Selector for a IOTHubDeviceUpdateAccount in deviceupdate + to populate deviceUpdateAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + diagnosticEnabled: + description: Whether the diagnostic log collection is enabled. + Possible values are true and false. Defaults to false. + type: boolean + diagnosticStorageAccount: + description: A diagnostic_storage_account block as defined below. + properties: + connectionStringSecretRef: + description: Connection String of the Diagnostic Storage Account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + id: + description: Resource ID of the Diagnostic Storage Account. + type: string + idRef: + description: Reference to a Account in storage to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Account in storage to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - connectionStringSecretRef + type: object + iothubId: + description: Specifies the ID of the IoT Hub associated with the + IoT Hub Device Update Instance. Changing this forces a new resource + to be created. + type: string + iothubIdRef: + description: Reference to a IOTHub in devices to populate iothubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iothubIdSelector: + description: Selector for a IOTHub in devices to populate iothubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + IoT Hub Device Update Instance. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + diagnosticEnabled: + description: Whether the diagnostic log collection is enabled. + Possible values are true and false. Defaults to false. + type: boolean + diagnosticStorageAccount: + description: A diagnostic_storage_account block as defined below. + properties: + id: + description: Resource ID of the Diagnostic Storage Account. + type: string + idRef: + description: Reference to a Account in storage to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Account in storage to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + iothubId: + description: Specifies the ID of the IoT Hub associated with the + IoT Hub Device Update Instance. Changing this forces a new resource + to be created. + type: string + iothubIdRef: + description: Reference to a IOTHub in devices to populate iothubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iothubIdSelector: + description: Selector for a IOTHub in devices to populate iothubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + IoT Hub Device Update Instance. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: IOTHubDeviceUpdateInstanceStatus defines the observed state + of IOTHubDeviceUpdateInstance. + properties: + atProvider: + properties: + deviceUpdateAccountId: + description: Specifies the ID of the IoT Hub Device Update Account + where the IoT Hub Device Update Instance exists. Changing this + forces a new resource to be created. + type: string + diagnosticEnabled: + description: Whether the diagnostic log collection is enabled. + Possible values are true and false. Defaults to false. + type: boolean + diagnosticStorageAccount: + description: A diagnostic_storage_account block as defined below. + properties: + id: + description: Resource ID of the Diagnostic Storage Account. + type: string + type: object + id: + description: The ID of the IoT Hub Device Update Instance. + type: string + iothubId: + description: Specifies the ID of the IoT Hub associated with the + IoT Hub Device Update Instance. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + IoT Hub Device Update Instance. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/devtestlab.azure.upbound.io_globalvmshutdownschedules.yaml b/package/crds/devtestlab.azure.upbound.io_globalvmshutdownschedules.yaml index a8c67adbc..bf7ecb14a 100644 --- a/package/crds/devtestlab.azure.upbound.io_globalvmshutdownschedules.yaml +++ b/package/crds/devtestlab.azure.upbound.io_globalvmshutdownschedules.yaml @@ -650,3 +650,629 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GlobalVMShutdownSchedule is the Schema for the GlobalVMShutdownSchedules + API. Manages automated shutdown schedules for Azure Resource Manager VMs + outside of Dev Test Labs. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GlobalVMShutdownScheduleSpec defines the desired state of + GlobalVMShutdownSchedule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dailyRecurrenceTime: + description: The time each day when the schedule takes effect. + Must match the format HHmm where HH is 00-23 and mm is 00-59 + (e.g. 0930, 2300, etc.) + type: string + enabled: + description: Whether to enable the schedule. Possible values are + true and false. Defaults to true. + type: boolean + location: + description: The location where the schedule is created. Changing + this forces a new resource to be created. + type: string + notificationSettings: + description: The notification setting of a schedule. A notification_settings + block as defined below. + properties: + email: + description: E-mail address to which the notification will + be sent. + type: string + enabled: + description: Whether to enable pre-shutdown notifications. + Possible values are true and false. + type: boolean + timeInMinutes: + description: Time in minutes between 15 and 120 before a shutdown + event at which a notification will be sent. Defaults to + 30. + type: number + webhookUrl: + description: The webhook URL to which the notification will + be sent. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + timezone: + description: The time zone ID (e.g. Pacific Standard time). Refer + to this guide for a full list of accepted time zone names. + type: string + virtualMachineId: + description: The resource ID of the target ARM-based Virtual Machine. + Changing this forces a new resource to be created. + type: string + virtualMachineIdRef: + description: Reference to a LinuxVirtualMachine in compute to + populate virtualMachineId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualMachineIdSelector: + description: Selector for a LinuxVirtualMachine in compute to + populate virtualMachineId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dailyRecurrenceTime: + description: The time each day when the schedule takes effect. + Must match the format HHmm where HH is 00-23 and mm is 00-59 + (e.g. 0930, 2300, etc.) + type: string + enabled: + description: Whether to enable the schedule. Possible values are + true and false. Defaults to true. + type: boolean + location: + description: The location where the schedule is created. Changing + this forces a new resource to be created. + type: string + notificationSettings: + description: The notification setting of a schedule. A notification_settings + block as defined below. + properties: + email: + description: E-mail address to which the notification will + be sent. + type: string + enabled: + description: Whether to enable pre-shutdown notifications. + Possible values are true and false. + type: boolean + timeInMinutes: + description: Time in minutes between 15 and 120 before a shutdown + event at which a notification will be sent. Defaults to + 30. + type: number + webhookUrl: + description: The webhook URL to which the notification will + be sent. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + timezone: + description: The time zone ID (e.g. Pacific Standard time). Refer + to this guide for a full list of accepted time zone names. + type: string + virtualMachineId: + description: The resource ID of the target ARM-based Virtual Machine. + Changing this forces a new resource to be created. + type: string + virtualMachineIdRef: + description: Reference to a LinuxVirtualMachine in compute to + populate virtualMachineId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualMachineIdSelector: + description: Selector for a LinuxVirtualMachine in compute to + populate virtualMachineId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dailyRecurrenceTime is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dailyRecurrenceTime) + || (has(self.initProvider) && has(self.initProvider.dailyRecurrenceTime))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.notificationSettings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.notificationSettings) + || (has(self.initProvider) && has(self.initProvider.notificationSettings))' + - message: spec.forProvider.timezone is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timezone) + || (has(self.initProvider) && has(self.initProvider.timezone))' + status: + description: GlobalVMShutdownScheduleStatus defines the observed state + of GlobalVMShutdownSchedule. + properties: + atProvider: + properties: + dailyRecurrenceTime: + description: The time each day when the schedule takes effect. + Must match the format HHmm where HH is 00-23 and mm is 00-59 + (e.g. 0930, 2300, etc.) + type: string + enabled: + description: Whether to enable the schedule. Possible values are + true and false. Defaults to true. + type: boolean + id: + description: The Dev Test Global Schedule ID. + type: string + location: + description: The location where the schedule is created. Changing + this forces a new resource to be created. + type: string + notificationSettings: + description: The notification setting of a schedule. A notification_settings + block as defined below. + properties: + email: + description: E-mail address to which the notification will + be sent. + type: string + enabled: + description: Whether to enable pre-shutdown notifications. + Possible values are true and false. + type: boolean + timeInMinutes: + description: Time in minutes between 15 and 120 before a shutdown + event at which a notification will be sent. Defaults to + 30. + type: number + webhookUrl: + description: The webhook URL to which the notification will + be sent. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + timezone: + description: The time zone ID (e.g. Pacific Standard time). Refer + to this guide for a full list of accepted time zone names. + type: string + virtualMachineId: + description: The resource ID of the target ARM-based Virtual Machine. + Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/devtestlab.azure.upbound.io_linuxvirtualmachines.yaml b/package/crds/devtestlab.azure.upbound.io_linuxvirtualmachines.yaml index 80c693517..716bf7e1c 100644 --- a/package/crds/devtestlab.azure.upbound.io_linuxvirtualmachines.yaml +++ b/package/crds/devtestlab.azure.upbound.io_linuxvirtualmachines.yaml @@ -1288,3 +1288,1267 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinuxVirtualMachine is the Schema for the LinuxVirtualMachines + API. Manages a Linux Virtual Machine within a Dev Test Lab. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinuxVirtualMachineSpec defines the desired state of LinuxVirtualMachine + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowClaim: + description: Can this Virtual Machine be claimed by users? Defaults + to true. + type: boolean + disallowPublicIpAddress: + description: Should the Virtual Machine be created without a Public + IP Address? Changing this forces a new resource to be created. + type: boolean + galleryImageReference: + description: A gallery_image_reference block as defined below. + properties: + offer: + description: The Offer of the Gallery Image. Changing this + forces a new resource to be created. + type: string + publisher: + description: The Publisher of the Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The SKU of the Gallery Image. Changing this forces + a new resource to be created. + type: string + version: + description: The Version of the Gallery Image. Changing this + forces a new resource to be created. + type: string + type: object + inboundNatRule: + description: One or more inbound_nat_rule blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + backendPort: + description: The Backend Port associated with this NAT Rule. + Changing this forces a new resource to be created. + type: number + protocol: + description: The Protocol used for this NAT Rule. Possible + values are Tcp and Udp. + type: string + type: object + type: array + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labNameRef: + description: Reference to a Lab in devtestlab to populate labName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labNameSelector: + description: Selector for a Lab in devtestlab to populate labName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labSubnetName: + description: The name of a Subnet within the Dev Test Virtual + Network where this machine should exist. Changing this forces + a new resource to be created. + type: string + labSubnetNameRef: + description: Reference to a Subnet in network to populate labSubnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labSubnetNameSelector: + description: Selector for a Subnet in network to populate labSubnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labVirtualNetworkId: + description: The ID of the Dev Test Virtual Network where this + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labVirtualNetworkIdRef: + description: Reference to a VirtualNetwork in devtestlab to populate + labVirtualNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labVirtualNetworkIdSelector: + description: Selector for a VirtualNetwork in devtestlab to populate + labVirtualNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: Specifies the supported Azure location where the + Dev Test Lab exists. Changing this forces a new resource to + be created. + type: string + name: + description: Specifies the name of the Dev Test Machine. Changing + this forces a new resource to be created. + type: string + notes: + description: Any notes about the Virtual Machine. + type: string + passwordSecretRef: + description: The Password associated with the username used to + login to this Virtual Machine. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + size: + description: The Machine Size to use for this Virtual Machine, + such as Standard_F2. Changing this forces a new resource to + be created. + type: string + sshKey: + description: The SSH Key associated with the username used to + login to this Virtual Machine. Changing this forces a new resource + to be created. + type: string + storageType: + description: The type of Storage to use on this Virtual Machine. + Possible values are Standard and Premium. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + username: + description: The Username associated with the local administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowClaim: + description: Can this Virtual Machine be claimed by users? Defaults + to true. + type: boolean + disallowPublicIpAddress: + description: Should the Virtual Machine be created without a Public + IP Address? Changing this forces a new resource to be created. + type: boolean + galleryImageReference: + description: A gallery_image_reference block as defined below. + properties: + offer: + description: The Offer of the Gallery Image. Changing this + forces a new resource to be created. + type: string + publisher: + description: The Publisher of the Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The SKU of the Gallery Image. Changing this forces + a new resource to be created. + type: string + version: + description: The Version of the Gallery Image. Changing this + forces a new resource to be created. + type: string + type: object + inboundNatRule: + description: One or more inbound_nat_rule blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + backendPort: + description: The Backend Port associated with this NAT Rule. + Changing this forces a new resource to be created. + type: number + protocol: + description: The Protocol used for this NAT Rule. Possible + values are Tcp and Udp. + type: string + type: object + type: array + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labNameRef: + description: Reference to a Lab in devtestlab to populate labName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labNameSelector: + description: Selector for a Lab in devtestlab to populate labName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labSubnetName: + description: The name of a Subnet within the Dev Test Virtual + Network where this machine should exist. Changing this forces + a new resource to be created. + type: string + labSubnetNameRef: + description: Reference to a Subnet in network to populate labSubnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labSubnetNameSelector: + description: Selector for a Subnet in network to populate labSubnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labVirtualNetworkId: + description: The ID of the Dev Test Virtual Network where this + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labVirtualNetworkIdRef: + description: Reference to a VirtualNetwork in devtestlab to populate + labVirtualNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labVirtualNetworkIdSelector: + description: Selector for a VirtualNetwork in devtestlab to populate + labVirtualNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: Specifies the supported Azure location where the + Dev Test Lab exists. Changing this forces a new resource to + be created. + type: string + name: + description: Specifies the name of the Dev Test Machine. Changing + this forces a new resource to be created. + type: string + notes: + description: Any notes about the Virtual Machine. + type: string + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + size: + description: The Machine Size to use for this Virtual Machine, + such as Standard_F2. Changing this forces a new resource to + be created. + type: string + sshKey: + description: The SSH Key associated with the username used to + login to this Virtual Machine. Changing this forces a new resource + to be created. + type: string + storageType: + description: The type of Storage to use on this Virtual Machine. + Possible values are Standard and Premium. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + username: + description: The Username associated with the local administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.galleryImageReference is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.galleryImageReference) + || (has(self.initProvider) && has(self.initProvider.galleryImageReference))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.size is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.size) + || (has(self.initProvider) && has(self.initProvider.size))' + - message: spec.forProvider.storageType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageType) + || (has(self.initProvider) && has(self.initProvider.storageType))' + - message: spec.forProvider.username is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.username) + || (has(self.initProvider) && has(self.initProvider.username))' + status: + description: LinuxVirtualMachineStatus defines the observed state of LinuxVirtualMachine. + properties: + atProvider: + properties: + allowClaim: + description: Can this Virtual Machine be claimed by users? Defaults + to true. + type: boolean + disallowPublicIpAddress: + description: Should the Virtual Machine be created without a Public + IP Address? Changing this forces a new resource to be created. + type: boolean + fqdn: + description: The FQDN of the Virtual Machine. + type: string + galleryImageReference: + description: A gallery_image_reference block as defined below. + properties: + offer: + description: The Offer of the Gallery Image. Changing this + forces a new resource to be created. + type: string + publisher: + description: The Publisher of the Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The SKU of the Gallery Image. Changing this forces + a new resource to be created. + type: string + version: + description: The Version of the Gallery Image. Changing this + forces a new resource to be created. + type: string + type: object + id: + description: The ID of the Virtual Machine. + type: string + inboundNatRule: + description: One or more inbound_nat_rule blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + backendPort: + description: The Backend Port associated with this NAT Rule. + Changing this forces a new resource to be created. + type: number + frontendPort: + description: The frontend port associated with this Inbound + NAT Rule. + type: number + protocol: + description: The Protocol used for this NAT Rule. Possible + values are Tcp and Udp. + type: string + type: object + type: array + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labSubnetName: + description: The name of a Subnet within the Dev Test Virtual + Network where this machine should exist. Changing this forces + a new resource to be created. + type: string + labVirtualNetworkId: + description: The ID of the Dev Test Virtual Network where this + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + location: + description: Specifies the supported Azure location where the + Dev Test Lab exists. Changing this forces a new resource to + be created. + type: string + name: + description: Specifies the name of the Dev Test Machine. Changing + this forces a new resource to be created. + type: string + notes: + description: Any notes about the Virtual Machine. + type: string + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + size: + description: The Machine Size to use for this Virtual Machine, + such as Standard_F2. Changing this forces a new resource to + be created. + type: string + sshKey: + description: The SSH Key associated with the username used to + login to this Virtual Machine. Changing this forces a new resource + to be created. + type: string + storageType: + description: The type of Storage to use on this Virtual Machine. + Possible values are Standard and Premium. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + uniqueIdentifier: + description: The unique immutable identifier of the Virtual Machine. + type: string + username: + description: The Username associated with the local administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/devtestlab.azure.upbound.io_schedules.yaml b/package/crds/devtestlab.azure.upbound.io_schedules.yaml index 51e3b4c4d..250bf88f7 100644 --- a/package/crds/devtestlab.azure.upbound.io_schedules.yaml +++ b/package/crds/devtestlab.azure.upbound.io_schedules.yaml @@ -750,3 +750,711 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Schedule is the Schema for the Schedules API. Manages automated + startup and shutdown schedules for Azure Dev Test Lab. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ScheduleSpec defines the desired state of Schedule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dailyRecurrence: + description: The properties of a daily schedule. If the schedule + occurs once each day of the week, specify the daily recurrence. + A daily_recurrence block as defined below. + properties: + time: + description: The time each day when the schedule takes effect. + type: string + type: object + hourlyRecurrence: + description: The properties of an hourly schedule. If the schedule + occurs multiple times a day, specify the hourly recurrence. + A hourly_recurrence block as defined below. + properties: + minute: + description: Minutes of the hour the schedule will run. + type: number + type: object + labName: + description: The name of the dev test lab. Changing this forces + a new resource to be created. + type: string + labNameRef: + description: Reference to a Lab in devtestlab to populate labName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labNameSelector: + description: Selector for a Lab in devtestlab to populate labName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The location where the schedule is created. Changing + this forces a new resource to be created. + type: string + notificationSettings: + description: The notification setting of a schedule. A notification_settings + block as defined below. + properties: + status: + description: The status of the notification. Possible values + are Enabled and Disabled. Defaults to Disabled + type: string + timeInMinutes: + description: Time in minutes before event at which notification + will be sent. + type: number + webhookUrl: + description: The webhook URL to which the notification will + be sent. + type: string + type: object + resourceGroupName: + description: The name of the resource group in which to create + the dev test lab schedule. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + status: + description: The status of this schedule. Possible values are + Enabled and Disabled. Defaults to Disabled. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + taskType: + description: The task type of the schedule. Possible values include + LabVmsShutdownTask and LabVmAutoStart. + type: string + timeZoneId: + description: The time zone ID (e.g. Pacific Standard time). + type: string + weeklyRecurrence: + description: The properties of a weekly schedule. If the schedule + occurs only some days of the week, specify the weekly recurrence. + A weekly_recurrence block as defined below. + properties: + time: + description: The time when the schedule takes effect. + type: string + weekDays: + description: A list of days that this schedule takes effect + . Possible values include Monday, Tuesday, Wednesday, Thursday, + Friday, Saturday and Sunday. + items: + type: string + type: array + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dailyRecurrence: + description: The properties of a daily schedule. If the schedule + occurs once each day of the week, specify the daily recurrence. + A daily_recurrence block as defined below. + properties: + time: + description: The time each day when the schedule takes effect. + type: string + type: object + hourlyRecurrence: + description: The properties of an hourly schedule. If the schedule + occurs multiple times a day, specify the hourly recurrence. + A hourly_recurrence block as defined below. + properties: + minute: + description: Minutes of the hour the schedule will run. + type: number + type: object + location: + description: The location where the schedule is created. Changing + this forces a new resource to be created. + type: string + notificationSettings: + description: The notification setting of a schedule. A notification_settings + block as defined below. + properties: + status: + description: The status of the notification. Possible values + are Enabled and Disabled. Defaults to Disabled + type: string + timeInMinutes: + description: Time in minutes before event at which notification + will be sent. + type: number + webhookUrl: + description: The webhook URL to which the notification will + be sent. + type: string + type: object + status: + description: The status of this schedule. Possible values are + Enabled and Disabled. Defaults to Disabled. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + taskType: + description: The task type of the schedule. Possible values include + LabVmsShutdownTask and LabVmAutoStart. + type: string + timeZoneId: + description: The time zone ID (e.g. Pacific Standard time). + type: string + weeklyRecurrence: + description: The properties of a weekly schedule. If the schedule + occurs only some days of the week, specify the weekly recurrence. + A weekly_recurrence block as defined below. + properties: + time: + description: The time when the schedule takes effect. + type: string + weekDays: + description: A list of days that this schedule takes effect + . Possible values include Monday, Tuesday, Wednesday, Thursday, + Friday, Saturday and Sunday. + items: + type: string + type: array + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.notificationSettings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.notificationSettings) + || (has(self.initProvider) && has(self.initProvider.notificationSettings))' + - message: spec.forProvider.taskType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.taskType) + || (has(self.initProvider) && has(self.initProvider.taskType))' + - message: spec.forProvider.timeZoneId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timeZoneId) + || (has(self.initProvider) && has(self.initProvider.timeZoneId))' + status: + description: ScheduleStatus defines the observed state of Schedule. + properties: + atProvider: + properties: + dailyRecurrence: + description: The properties of a daily schedule. If the schedule + occurs once each day of the week, specify the daily recurrence. + A daily_recurrence block as defined below. + properties: + time: + description: The time each day when the schedule takes effect. + type: string + type: object + hourlyRecurrence: + description: The properties of an hourly schedule. If the schedule + occurs multiple times a day, specify the hourly recurrence. + A hourly_recurrence block as defined below. + properties: + minute: + description: Minutes of the hour the schedule will run. + type: number + type: object + id: + description: The ID of the DevTest Schedule. + type: string + labName: + description: The name of the dev test lab. Changing this forces + a new resource to be created. + type: string + location: + description: The location where the schedule is created. Changing + this forces a new resource to be created. + type: string + notificationSettings: + description: The notification setting of a schedule. A notification_settings + block as defined below. + properties: + status: + description: The status of the notification. Possible values + are Enabled and Disabled. Defaults to Disabled + type: string + timeInMinutes: + description: Time in minutes before event at which notification + will be sent. + type: number + webhookUrl: + description: The webhook URL to which the notification will + be sent. + type: string + type: object + resourceGroupName: + description: The name of the resource group in which to create + the dev test lab schedule. Changing this forces a new resource + to be created. + type: string + status: + description: The status of this schedule. Possible values are + Enabled and Disabled. Defaults to Disabled. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + taskType: + description: The task type of the schedule. Possible values include + LabVmsShutdownTask and LabVmAutoStart. + type: string + timeZoneId: + description: The time zone ID (e.g. Pacific Standard time). + type: string + weeklyRecurrence: + description: The properties of a weekly schedule. If the schedule + occurs only some days of the week, specify the weekly recurrence. + A weekly_recurrence block as defined below. + properties: + time: + description: The time when the schedule takes effect. + type: string + weekDays: + description: A list of days that this schedule takes effect + . Possible values include Monday, Tuesday, Wednesday, Thursday, + Friday, Saturday and Sunday. + items: + type: string + type: array + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/devtestlab.azure.upbound.io_virtualnetworks.yaml b/package/crds/devtestlab.azure.upbound.io_virtualnetworks.yaml index 8fdf2cab6..bce65a97f 100644 --- a/package/crds/devtestlab.azure.upbound.io_virtualnetworks.yaml +++ b/package/crds/devtestlab.azure.upbound.io_virtualnetworks.yaml @@ -754,3 +754,733 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualNetwork is the Schema for the VirtualNetworks API. Manages + a Virtual Network within a DevTest Lab. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualNetworkSpec defines the desired state of VirtualNetwork + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description for the Virtual Network. + type: string + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Network should be created. Changing this forces a new + resource to be created. + type: string + labNameRef: + description: Reference to a Lab in devtestlab to populate labName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labNameSelector: + description: Selector for a Lab in devtestlab to populate labName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Dev Test Virtual Network. + Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnet: + description: A subnet block as defined below. + properties: + useInVirtualMachineCreation: + description: Can this subnet be used for creating Virtual + Machines? Possible values are Allow, Default and Deny. Defaults + to Allow. + type: string + usePublicIpAddress: + description: Can Virtual Machines in this Subnet use Public + IP Addresses? Possible values are Allow, Default and Deny. + Defaults to Allow. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description for the Virtual Network. + type: string + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Network should be created. Changing this forces a new + resource to be created. + type: string + labNameRef: + description: Reference to a Lab in devtestlab to populate labName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labNameSelector: + description: Selector for a Lab in devtestlab to populate labName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Dev Test Virtual Network. + Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnet: + description: A subnet block as defined below. + properties: + useInVirtualMachineCreation: + description: Can this subnet be used for creating Virtual + Machines? Possible values are Allow, Default and Deny. Defaults + to Allow. + type: string + usePublicIpAddress: + description: Can Virtual Machines in this Subnet use Public + IP Addresses? Possible values are Allow, Default and Deny. + Defaults to Allow. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: VirtualNetworkStatus defines the observed state of VirtualNetwork. + properties: + atProvider: + properties: + description: + description: A description for the Virtual Network. + type: string + id: + description: The ID of the Dev Test Virtual Network. + type: string + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Network should be created. Changing this forces a new + resource to be created. + type: string + name: + description: Specifies the name of the Dev Test Virtual Network. + Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + subnet: + description: A subnet block as defined below. + properties: + name: + description: The name of the Subnet for this Virtual Network. + type: string + useInVirtualMachineCreation: + description: Can this subnet be used for creating Virtual + Machines? Possible values are Allow, Default and Deny. Defaults + to Allow. + type: string + usePublicIpAddress: + description: Can Virtual Machines in this Subnet use Public + IP Addresses? Possible values are Allow, Default and Deny. + Defaults to Allow. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + uniqueIdentifier: + description: The unique immutable identifier of the Dev Test Virtual + Network. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/devtestlab.azure.upbound.io_windowsvirtualmachines.yaml b/package/crds/devtestlab.azure.upbound.io_windowsvirtualmachines.yaml index a48e88b9e..0d1c28c12 100644 --- a/package/crds/devtestlab.azure.upbound.io_windowsvirtualmachines.yaml +++ b/package/crds/devtestlab.azure.upbound.io_windowsvirtualmachines.yaml @@ -1277,3 +1277,1256 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WindowsVirtualMachine is the Schema for the WindowsVirtualMachines + API. Manages a Windows Virtual Machine within a Dev Test Lab. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WindowsVirtualMachineSpec defines the desired state of WindowsVirtualMachine + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowClaim: + description: Can this Virtual Machine be claimed by users? Defaults + to true. + type: boolean + disallowPublicIpAddress: + description: Should the Virtual Machine be created without a Public + IP Address? Changing this forces a new resource to be created. + type: boolean + galleryImageReference: + description: A gallery_image_reference block as defined below. + properties: + offer: + description: The Offer of the Gallery Image. Changing this + forces a new resource to be created. + type: string + publisher: + description: The Publisher of the Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The SKU of the Gallery Image. Changing this forces + a new resource to be created. + type: string + version: + description: The Version of the Gallery Image. Changing this + forces a new resource to be created. + type: string + type: object + inboundNatRule: + description: One or more inbound_nat_rule blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + backendPort: + description: The Backend Port associated with this NAT Rule. + Changing this forces a new resource to be created. + type: number + protocol: + description: The Protocol used for this NAT Rule. Possible + values are Tcp and Udp. + type: string + type: object + type: array + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labNameRef: + description: Reference to a Lab in devtestlab to populate labName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labNameSelector: + description: Selector for a Lab in devtestlab to populate labName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labSubnetName: + description: The name of a Subnet within the Dev Test Virtual + Network where this machine should exist. Changing this forces + a new resource to be created. + type: string + labSubnetNameRef: + description: Reference to a Subnet in network to populate labSubnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labSubnetNameSelector: + description: Selector for a Subnet in network to populate labSubnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labVirtualNetworkId: + description: The ID of the Dev Test Virtual Network where this + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labVirtualNetworkIdRef: + description: Reference to a VirtualNetwork in devtestlab to populate + labVirtualNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labVirtualNetworkIdSelector: + description: Selector for a VirtualNetwork in devtestlab to populate + labVirtualNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: Specifies the supported Azure location where the + Dev Test Lab exists. Changing this forces a new resource to + be created. + type: string + name: + description: Specifies the name of the Dev Test Machine. Changing + this forces a new resource to be created. + type: string + notes: + description: Any notes about the Virtual Machine. + type: string + passwordSecretRef: + description: The Password associated with the username used to + login to this Virtual Machine. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + size: + description: The Machine Size to use for this Virtual Machine, + such as Standard_F2. Changing this forces a new resource to + be created. + type: string + storageType: + description: The type of Storage to use on this Virtual Machine. + Possible values are Standard and Premium. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + username: + description: The Username associated with the local administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowClaim: + description: Can this Virtual Machine be claimed by users? Defaults + to true. + type: boolean + disallowPublicIpAddress: + description: Should the Virtual Machine be created without a Public + IP Address? Changing this forces a new resource to be created. + type: boolean + galleryImageReference: + description: A gallery_image_reference block as defined below. + properties: + offer: + description: The Offer of the Gallery Image. Changing this + forces a new resource to be created. + type: string + publisher: + description: The Publisher of the Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The SKU of the Gallery Image. Changing this forces + a new resource to be created. + type: string + version: + description: The Version of the Gallery Image. Changing this + forces a new resource to be created. + type: string + type: object + inboundNatRule: + description: One or more inbound_nat_rule blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + backendPort: + description: The Backend Port associated with this NAT Rule. + Changing this forces a new resource to be created. + type: number + protocol: + description: The Protocol used for this NAT Rule. Possible + values are Tcp and Udp. + type: string + type: object + type: array + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labNameRef: + description: Reference to a Lab in devtestlab to populate labName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labNameSelector: + description: Selector for a Lab in devtestlab to populate labName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labSubnetName: + description: The name of a Subnet within the Dev Test Virtual + Network where this machine should exist. Changing this forces + a new resource to be created. + type: string + labSubnetNameRef: + description: Reference to a Subnet in network to populate labSubnetName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labSubnetNameSelector: + description: Selector for a Subnet in network to populate labSubnetName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + labVirtualNetworkId: + description: The ID of the Dev Test Virtual Network where this + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labVirtualNetworkIdRef: + description: Reference to a VirtualNetwork in devtestlab to populate + labVirtualNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + labVirtualNetworkIdSelector: + description: Selector for a VirtualNetwork in devtestlab to populate + labVirtualNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: Specifies the supported Azure location where the + Dev Test Lab exists. Changing this forces a new resource to + be created. + type: string + name: + description: Specifies the name of the Dev Test Machine. Changing + this forces a new resource to be created. + type: string + notes: + description: Any notes about the Virtual Machine. + type: string + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + size: + description: The Machine Size to use for this Virtual Machine, + such as Standard_F2. Changing this forces a new resource to + be created. + type: string + storageType: + description: The type of Storage to use on this Virtual Machine. + Possible values are Standard and Premium. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + username: + description: The Username associated with the local administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.galleryImageReference is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.galleryImageReference) + || (has(self.initProvider) && has(self.initProvider.galleryImageReference))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.passwordSecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.passwordSecretRef)' + - message: spec.forProvider.size is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.size) + || (has(self.initProvider) && has(self.initProvider.size))' + - message: spec.forProvider.storageType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageType) + || (has(self.initProvider) && has(self.initProvider.storageType))' + - message: spec.forProvider.username is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.username) + || (has(self.initProvider) && has(self.initProvider.username))' + status: + description: WindowsVirtualMachineStatus defines the observed state of + WindowsVirtualMachine. + properties: + atProvider: + properties: + allowClaim: + description: Can this Virtual Machine be claimed by users? Defaults + to true. + type: boolean + disallowPublicIpAddress: + description: Should the Virtual Machine be created without a Public + IP Address? Changing this forces a new resource to be created. + type: boolean + fqdn: + description: The FQDN of the Virtual Machine. + type: string + galleryImageReference: + description: A gallery_image_reference block as defined below. + properties: + offer: + description: The Offer of the Gallery Image. Changing this + forces a new resource to be created. + type: string + publisher: + description: The Publisher of the Gallery Image. Changing + this forces a new resource to be created. + type: string + sku: + description: The SKU of the Gallery Image. Changing this forces + a new resource to be created. + type: string + version: + description: The Version of the Gallery Image. Changing this + forces a new resource to be created. + type: string + type: object + id: + description: The ID of the Virtual Machine. + type: string + inboundNatRule: + description: One or more inbound_nat_rule blocks as defined below. + Changing this forces a new resource to be created. + items: + properties: + backendPort: + description: The Backend Port associated with this NAT Rule. + Changing this forces a new resource to be created. + type: number + frontendPort: + description: The frontend port associated with this Inbound + NAT Rule. + type: number + protocol: + description: The Protocol used for this NAT Rule. Possible + values are Tcp and Udp. + type: string + type: object + type: array + labName: + description: Specifies the name of the Dev Test Lab in which the + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + labSubnetName: + description: The name of a Subnet within the Dev Test Virtual + Network where this machine should exist. Changing this forces + a new resource to be created. + type: string + labVirtualNetworkId: + description: The ID of the Dev Test Virtual Network where this + Virtual Machine should be created. Changing this forces a new + resource to be created. + type: string + location: + description: Specifies the supported Azure location where the + Dev Test Lab exists. Changing this forces a new resource to + be created. + type: string + name: + description: Specifies the name of the Dev Test Machine. Changing + this forces a new resource to be created. + type: string + notes: + description: Any notes about the Virtual Machine. + type: string + resourceGroupName: + description: The name of the resource group in which the Dev Test + Lab resource exists. Changing this forces a new resource to + be created. + type: string + size: + description: The Machine Size to use for this Virtual Machine, + such as Standard_F2. Changing this forces a new resource to + be created. + type: string + storageType: + description: The type of Storage to use on this Virtual Machine. + Possible values are Standard and Premium. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + uniqueIdentifier: + description: The unique immutable identifier of the Virtual Machine. + type: string + username: + description: The Username associated with the local administrator + on this Virtual Machine. Changing this forces a new resource + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/digitaltwins.azure.upbound.io_instances.yaml b/package/crds/digitaltwins.azure.upbound.io_instances.yaml index 5f3a7a41c..96df77c9e 100644 --- a/package/crds/digitaltwins.azure.upbound.io_instances.yaml +++ b/package/crds/digitaltwins.azure.upbound.io_instances.yaml @@ -521,3 +521,500 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Instance is the Schema for the Instances API. Manages a Digital + Twins instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceSpec defines the desired state of Instance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Digital Twins instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Digital Twins instance. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The Azure Region where the Digital Twins instance + should exist. Changing this forces a new Digital Twins instance + to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Digital + Twins instance should exist. Changing this forces a new Digital + Twins instance to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Digital Twins instance. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Digital Twins instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Digital Twins instance. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The Azure Region where the Digital Twins instance + should exist. Changing this forces a new Digital Twins instance + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Digital Twins instance. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: InstanceStatus defines the observed state of Instance. + properties: + atProvider: + properties: + hostName: + description: The API endpoint to work with this Digital Twins + instance. + type: string + id: + description: The ID of the Digital Twins instance. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Digital Twins instance. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Digital Twins instance. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + location: + description: The Azure Region where the Digital Twins instance + should exist. Changing this forces a new Digital Twins instance + to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Digital + Twins instance should exist. Changing this forces a new Digital + Twins instance to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Digital Twins instance. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/elastic.azure.upbound.io_cloudelasticsearches.yaml b/package/crds/elastic.azure.upbound.io_cloudelasticsearches.yaml index f3f7368bb..bb6768a6c 100644 --- a/package/crds/elastic.azure.upbound.io_cloudelasticsearches.yaml +++ b/package/crds/elastic.azure.upbound.io_cloudelasticsearches.yaml @@ -638,3 +638,617 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: CloudElasticsearch is the Schema for the CloudElasticsearchs + API. Manages an Elasticsearch cluster in Elastic Cloud. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudElasticsearchSpec defines the desired state of CloudElasticsearch + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + elasticCloudEmailAddress: + description: Specifies the Email Address which should be associated + with this Elasticsearch account. Changing this forces a new + Elasticsearch to be created. + type: string + location: + description: The Azure Region where the Elasticsearch resource + should exist. Changing this forces a new Elasticsearch to be + created. + type: string + logs: + description: A logs block as defined below. + properties: + filteringTag: + description: A list of filtering_tag blocks as defined above. + items: + properties: + action: + description: Specifies the type of action which should + be taken when the Tag matches the name and value. + Possible values are Exclude and Include. + type: string + name: + description: The name which should be used for this + Elasticsearch resource. Changing this forces a new + Elasticsearch to be created. + type: string + value: + description: Specifies the value of the Tag which should + be filtered. + type: string + type: object + type: array + sendActivityLogs: + description: Specifies if the Azure Activity Logs should be + sent to the Elasticsearch cluster. Defaults to false. + type: boolean + sendAzureadLogs: + description: Specifies if the AzureAD Logs should be sent + to the Elasticsearch cluster. Defaults to false. + type: boolean + sendSubscriptionLogs: + description: Specifies if the Azure Subscription Logs should + be sent to the Elasticsearch cluster. Defaults to false. + type: boolean + type: object + monitoringEnabled: + description: Specifies if the Elasticsearch should have monitoring + configured? Defaults to true. Changing this forces a new Elasticsearch + to be created. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Elasticsearch + resource should exist. Changing this forces a new Elasticsearch + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: Specifies the name of the SKU for this Elasticsearch. + Changing this forces a new Elasticsearch to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Elasticsearch resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + elasticCloudEmailAddress: + description: Specifies the Email Address which should be associated + with this Elasticsearch account. Changing this forces a new + Elasticsearch to be created. + type: string + location: + description: The Azure Region where the Elasticsearch resource + should exist. Changing this forces a new Elasticsearch to be + created. + type: string + logs: + description: A logs block as defined below. + properties: + filteringTag: + description: A list of filtering_tag blocks as defined above. + items: + properties: + action: + description: Specifies the type of action which should + be taken when the Tag matches the name and value. + Possible values are Exclude and Include. + type: string + name: + description: The name which should be used for this + Elasticsearch resource. Changing this forces a new + Elasticsearch to be created. + type: string + value: + description: Specifies the value of the Tag which should + be filtered. + type: string + type: object + type: array + sendActivityLogs: + description: Specifies if the Azure Activity Logs should be + sent to the Elasticsearch cluster. Defaults to false. + type: boolean + sendAzureadLogs: + description: Specifies if the AzureAD Logs should be sent + to the Elasticsearch cluster. Defaults to false. + type: boolean + sendSubscriptionLogs: + description: Specifies if the Azure Subscription Logs should + be sent to the Elasticsearch cluster. Defaults to false. + type: boolean + type: object + monitoringEnabled: + description: Specifies if the Elasticsearch should have monitoring + configured? Defaults to true. Changing this forces a new Elasticsearch + to be created. + type: boolean + skuName: + description: Specifies the name of the SKU for this Elasticsearch. + Changing this forces a new Elasticsearch to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Elasticsearch resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.elasticCloudEmailAddress is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.elasticCloudEmailAddress) + || (has(self.initProvider) && has(self.initProvider.elasticCloudEmailAddress))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + status: + description: CloudElasticsearchStatus defines the observed state of CloudElasticsearch. + properties: + atProvider: + properties: + elasticCloudDeploymentId: + description: The ID of the Deployment within Elastic Cloud. + type: string + elasticCloudEmailAddress: + description: Specifies the Email Address which should be associated + with this Elasticsearch account. Changing this forces a new + Elasticsearch to be created. + type: string + elasticCloudSsoDefaultUrl: + description: The Default URL used for Single Sign On (SSO) to + Elastic Cloud. + type: string + elasticCloudUserId: + description: The ID of the User Account within Elastic Cloud. + type: string + elasticsearchServiceUrl: + description: The URL to the Elasticsearch Service associated with + this Elasticsearch. + type: string + id: + description: The ID of the Elasticsearch. + type: string + kibanaServiceUrl: + description: The URL to the Kibana Dashboard associated with this + Elasticsearch. + type: string + kibanaSsoUri: + description: The URI used for SSO to the Kibana Dashboard associated + with this Elasticsearch. + type: string + location: + description: The Azure Region where the Elasticsearch resource + should exist. Changing this forces a new Elasticsearch to be + created. + type: string + logs: + description: A logs block as defined below. + properties: + filteringTag: + description: A list of filtering_tag blocks as defined above. + items: + properties: + action: + description: Specifies the type of action which should + be taken when the Tag matches the name and value. + Possible values are Exclude and Include. + type: string + name: + description: The name which should be used for this + Elasticsearch resource. Changing this forces a new + Elasticsearch to be created. + type: string + value: + description: Specifies the value of the Tag which should + be filtered. + type: string + type: object + type: array + sendActivityLogs: + description: Specifies if the Azure Activity Logs should be + sent to the Elasticsearch cluster. Defaults to false. + type: boolean + sendAzureadLogs: + description: Specifies if the AzureAD Logs should be sent + to the Elasticsearch cluster. Defaults to false. + type: boolean + sendSubscriptionLogs: + description: Specifies if the Azure Subscription Logs should + be sent to the Elasticsearch cluster. Defaults to false. + type: boolean + type: object + monitoringEnabled: + description: Specifies if the Elasticsearch should have monitoring + configured? Defaults to true. Changing this forces a new Elasticsearch + to be created. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Elasticsearch + resource should exist. Changing this forces a new Elasticsearch + to be created. + type: string + skuName: + description: Specifies the name of the SKU for this Elasticsearch. + Changing this forces a new Elasticsearch to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Elasticsearch resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eventgrid.azure.upbound.io_domains.yaml b/package/crds/eventgrid.azure.upbound.io_domains.yaml index 0b4a0c44c..a33f58e4e 100644 --- a/package/crds/eventgrid.azure.upbound.io_domains.yaml +++ b/package/crds/eventgrid.azure.upbound.io_domains.yaml @@ -797,3 +797,764 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Domain is the Schema for the Domains API. Manages an EventGrid + Domain + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DomainSpec defines the desired state of Domain + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoCreateTopicWithFirstSubscription: + description: Whether to create the domain topic when the first + event subscription at the scope of the domain topic is created. + Defaults to true. + type: boolean + autoDeleteTopicWithLastSubscription: + description: Whether to delete the domain topic when the last + event subscription at the scope of the domain topic is deleted. + Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid Domain. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid Domain. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + inboundIpRule: + description: One or more inbound_ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask (CIDR) to match on. + type: string + type: object + type: array + inputMappingDefaultValues: + description: A input_mapping_default_values block as defined below. + Changing this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the default data version of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + eventType: + description: Specifies the default event type of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + subject: + description: Specifies the default subject of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + type: object + inputMappingFields: + description: A input_mapping_fields block as defined below. Changing + this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the data version of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventTime: + description: Specifies the event time of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventType: + description: Specifies the event type of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + id: + description: Specifies the id of the EventGrid Event to associate + with the domain. Changing this forces a new resource to + be created. + type: string + subject: + description: Specifies the subject of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + topic: + description: Specifies the topic of the EventGrid Event to + associate with the domain. Changing this forces a new resource + to be created. + type: string + type: object + inputSchema: + description: Specifies the schema in which incoming events will + be published to this domain. Allowed values are CloudEventSchemaV1_0, + CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. + Changing this forces a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the EventGrid Domain. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which the EventGrid + Domain exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoCreateTopicWithFirstSubscription: + description: Whether to create the domain topic when the first + event subscription at the scope of the domain topic is created. + Defaults to true. + type: boolean + autoDeleteTopicWithLastSubscription: + description: Whether to delete the domain topic when the last + event subscription at the scope of the domain topic is deleted. + Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid Domain. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid Domain. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + inboundIpRule: + description: One or more inbound_ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask (CIDR) to match on. + type: string + type: object + type: array + inputMappingDefaultValues: + description: A input_mapping_default_values block as defined below. + Changing this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the default data version of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + eventType: + description: Specifies the default event type of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + subject: + description: Specifies the default subject of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + type: object + inputMappingFields: + description: A input_mapping_fields block as defined below. Changing + this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the data version of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventTime: + description: Specifies the event time of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventType: + description: Specifies the event type of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + id: + description: Specifies the id of the EventGrid Event to associate + with the domain. Changing this forces a new resource to + be created. + type: string + subject: + description: Specifies the subject of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + topic: + description: Specifies the topic of the EventGrid Event to + associate with the domain. Changing this forces a new resource + to be created. + type: string + type: object + inputSchema: + description: Specifies the schema in which incoming events will + be published to this domain. Allowed values are CloudEventSchemaV1_0, + CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. + Changing this forces a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the EventGrid Domain. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: DomainStatus defines the observed state of Domain. + properties: + atProvider: + properties: + autoCreateTopicWithFirstSubscription: + description: Whether to create the domain topic when the first + event subscription at the scope of the domain topic is created. + Defaults to true. + type: boolean + autoDeleteTopicWithLastSubscription: + description: Whether to delete the domain topic when the last + event subscription at the scope of the domain topic is deleted. + Defaults to true. + type: boolean + endpoint: + description: The Endpoint associated with the EventGrid Domain. + type: string + id: + description: The ID of the EventGrid Domain. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid Domain. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid Domain. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + inboundIpRule: + description: One or more inbound_ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask (CIDR) to match on. + type: string + type: object + type: array + inputMappingDefaultValues: + description: A input_mapping_default_values block as defined below. + Changing this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the default data version of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + eventType: + description: Specifies the default event type of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + subject: + description: Specifies the default subject of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + type: object + inputMappingFields: + description: A input_mapping_fields block as defined below. Changing + this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the data version of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventTime: + description: Specifies the event time of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventType: + description: Specifies the event type of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + id: + description: Specifies the id of the EventGrid Event to associate + with the domain. Changing this forces a new resource to + be created. + type: string + subject: + description: Specifies the subject of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + topic: + description: Specifies the topic of the EventGrid Event to + associate with the domain. Changing this forces a new resource + to be created. + type: string + type: object + inputSchema: + description: Specifies the schema in which incoming events will + be published to this domain. Allowed values are CloudEventSchemaV1_0, + CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. + Changing this forces a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the EventGrid Domain. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which the EventGrid + Domain exists. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eventgrid.azure.upbound.io_eventsubscriptions.yaml b/package/crds/eventgrid.azure.upbound.io_eventsubscriptions.yaml index 227d3c989..eb27071c7 100644 --- a/package/crds/eventgrid.azure.upbound.io_eventsubscriptions.yaml +++ b/package/crds/eventgrid.azure.upbound.io_eventsubscriptions.yaml @@ -2413,3 +2413,2337 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EventSubscription is the Schema for the EventSubscriptions API. + Manages an EventGrid Event Subscription + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EventSubscriptionSpec defines the desired state of EventSubscription + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + advancedFilter: + description: A advanced_filter block as defined below. + properties: + boolEquals: + description: Compares a value of an event using a single boolean + value. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: boolean + type: object + type: array + isNotNull: + description: Evaluates if a value of an event isn't NULL or + undefined. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + type: object + type: array + isNullOrUndefined: + description: Evaluates if a value of an event is NULL or undefined. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + type: object + type: array + numberGreaterThan: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberGreaterThanOrEquals: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberIn: + description: Compares a value of an event using multiple floating + point numbers. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: number + type: array + type: object + type: array + numberInRange: + description: Compares a value of an event using multiple floating + point number ranges. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + items: + type: number + type: array + type: array + type: object + type: array + numberLessThan: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberLessThanOrEquals: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberNotIn: + description: Compares a value of an event using multiple floating + point numbers. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: number + type: array + type: object + type: array + numberNotInRange: + description: Compares a value of an event using multiple floating + point number ranges. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + items: + type: number + type: array + type: array + type: object + type: array + stringBeginsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringContains: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringEndsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringIn: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotBeginsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotContains: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotEndsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotIn: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + type: object + advancedFilteringOnArraysEnabled: + description: Specifies whether advanced filters should be evaluated + against an array of values instead of expecting a singular value. + Defaults to false. + type: boolean + azureFunctionEndpoint: + description: An azure_function_endpoint block as defined below. + properties: + functionId: + description: Specifies the ID of the Function where the Event + Subscription will receive events. This must be the functions + ID in format {function_app.id}/functions/{name}. + type: string + maxEventsPerBatch: + description: Maximum number of events per batch. + type: number + preferredBatchSizeInKilobytes: + description: Preferred batch size in Kilobytes. + type: number + type: object + deadLetterIdentity: + description: A dead_letter_identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that is used for dead lettering. Allowed value is SystemAssigned, + UserAssigned. + type: string + userAssignedIdentity: + description: The user identity associated with the resource. + type: string + type: object + deliveryIdentity: + description: A delivery_identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that is used for event delivery. Allowed value is SystemAssigned, + UserAssigned. + type: string + userAssignedIdentity: + description: The user identity associated with the resource. + type: string + type: object + deliveryProperty: + description: One or more delivery_property blocks as defined below. + items: + properties: + headerName: + description: The name of the header to send on to the destination + type: string + secret: + description: True if the value is a secret and should be + protected, otherwise false. If True, then this value won't + be returned from Azure API calls + type: boolean + sourceField: + description: If the type is Dynamic, then provide the payload + field to be used as the value. Valid source fields differ + by subscription type. + type: string + type: + description: Either Static or Dynamic + type: string + valueSecretRef: + description: If the type is Static, then provide the value + to use + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + eventDeliverySchema: + description: 'Specifies the event delivery schema for the event + subscription. Possible values include: EventGridSchema, CloudEventSchemaV1_0, + CustomInputSchema. Defaults to EventGridSchema. Changing this + forces a new resource to be created.' + type: string + eventhubEndpointId: + description: Specifies the id where the Event Hub is located. + type: string + expirationTimeUtc: + description: Specifies the expiration time of the event subscription + (Datetime Format RFC 3339). + type: string + hybridConnectionEndpointId: + description: Specifies the id where the Hybrid Connection is located. + type: string + includedEventTypes: + description: A list of applicable event types that need to be + part of the event subscription. + items: + type: string + type: array + labels: + description: A list of labels to assign to the event subscription. + items: + type: string + type: array + name: + description: Specifies the name of the EventGrid Event Subscription + resource. Changing this forces a new resource to be created. + type: string + retryPolicy: + description: A retry_policy block as defined below. + properties: + eventTimeToLive: + description: Specifies the time to live (in minutes) for events. + Supported range is 1 to 1440. See official documentation + for more details. + type: number + maxDeliveryAttempts: + description: Specifies the maximum number of delivery retry + attempts for events. + type: number + type: object + scope: + description: Specifies the scope at which the EventGrid Event + Subscription should be created. Changing this forces a new resource + to be created. + type: string + scopeRef: + description: Reference to a ResourceGroup in azure to populate + scope. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + scopeSelector: + description: Selector for a ResourceGroup in azure to populate + scope. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceBusQueueEndpointId: + description: Specifies the id where the Service Bus Queue is located. + type: string + serviceBusTopicEndpointId: + description: Specifies the id where the Service Bus Topic is located. + type: string + storageBlobDeadLetterDestination: + description: A storage_blob_dead_letter_destination block as defined + below. + properties: + storageAccountId: + description: Specifies the id of the storage account id where + the storage blob is located. + type: string + storageBlobContainerName: + description: Specifies the name of the Storage blob container + that is the destination of the deadletter events. + type: string + type: object + storageQueueEndpoint: + description: A storage_queue_endpoint block as defined below. + properties: + queueMessageTimeToLiveInSeconds: + description: Storage queue message time to live in seconds. + type: number + queueName: + description: Specifies the name of the storage queue where + the Event Subscription will receive events. + type: string + queueNameRef: + description: Reference to a Queue in storage to populate queueName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + queueNameSelector: + description: Selector for a Queue in storage to populate queueName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageAccountId: + description: Specifies the id of the storage account id where + the storage queue is located. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate + storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate + storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + subjectFilter: + description: A subject_filter block as defined below. + properties: + caseSensitive: + description: Specifies if subject_begins_with and subject_ends_with + case sensitive. This value + type: boolean + subjectBeginsWith: + description: A string to filter events for an event subscription + based on a resource path prefix. + type: string + subjectEndsWith: + description: A string to filter events for an event subscription + based on a resource path suffix. + type: string + type: object + webhookEndpoint: + description: A webhook_endpoint block as defined below. + properties: + activeDirectoryAppIdOrUri: + description: The Azure Active Directory Application ID or + URI to get the access token that will be included as the + bearer token in delivery requests. + type: string + activeDirectoryTenantId: + description: The Azure Active Directory Tenant ID to get the + access token that will be included as the bearer token in + delivery requests. + type: string + maxEventsPerBatch: + description: Maximum number of events per batch. + type: number + preferredBatchSizeInKilobytes: + description: Preferred batch size in Kilobytes. + type: number + url: + description: Specifies the url of the webhook where the Event + Subscription will receive events. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + advancedFilter: + description: A advanced_filter block as defined below. + properties: + boolEquals: + description: Compares a value of an event using a single boolean + value. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: boolean + type: object + type: array + isNotNull: + description: Evaluates if a value of an event isn't NULL or + undefined. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + type: object + type: array + isNullOrUndefined: + description: Evaluates if a value of an event is NULL or undefined. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + type: object + type: array + numberGreaterThan: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberGreaterThanOrEquals: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberIn: + description: Compares a value of an event using multiple floating + point numbers. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: number + type: array + type: object + type: array + numberInRange: + description: Compares a value of an event using multiple floating + point number ranges. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + items: + type: number + type: array + type: array + type: object + type: array + numberLessThan: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberLessThanOrEquals: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberNotIn: + description: Compares a value of an event using multiple floating + point numbers. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: number + type: array + type: object + type: array + numberNotInRange: + description: Compares a value of an event using multiple floating + point number ranges. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + items: + type: number + type: array + type: array + type: object + type: array + stringBeginsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringContains: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringEndsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringIn: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotBeginsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotContains: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotEndsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotIn: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + type: object + advancedFilteringOnArraysEnabled: + description: Specifies whether advanced filters should be evaluated + against an array of values instead of expecting a singular value. + Defaults to false. + type: boolean + azureFunctionEndpoint: + description: An azure_function_endpoint block as defined below. + properties: + functionId: + description: Specifies the ID of the Function where the Event + Subscription will receive events. This must be the functions + ID in format {function_app.id}/functions/{name}. + type: string + maxEventsPerBatch: + description: Maximum number of events per batch. + type: number + preferredBatchSizeInKilobytes: + description: Preferred batch size in Kilobytes. + type: number + type: object + deadLetterIdentity: + description: A dead_letter_identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that is used for dead lettering. Allowed value is SystemAssigned, + UserAssigned. + type: string + userAssignedIdentity: + description: The user identity associated with the resource. + type: string + type: object + deliveryIdentity: + description: A delivery_identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that is used for event delivery. Allowed value is SystemAssigned, + UserAssigned. + type: string + userAssignedIdentity: + description: The user identity associated with the resource. + type: string + type: object + deliveryProperty: + description: One or more delivery_property blocks as defined below. + items: + properties: + headerName: + description: The name of the header to send on to the destination + type: string + secret: + description: True if the value is a secret and should be + protected, otherwise false. If True, then this value won't + be returned from Azure API calls + type: boolean + sourceField: + description: If the type is Dynamic, then provide the payload + field to be used as the value. Valid source fields differ + by subscription type. + type: string + type: + description: Either Static or Dynamic + type: string + type: object + type: array + eventDeliverySchema: + description: 'Specifies the event delivery schema for the event + subscription. Possible values include: EventGridSchema, CloudEventSchemaV1_0, + CustomInputSchema. Defaults to EventGridSchema. Changing this + forces a new resource to be created.' + type: string + eventhubEndpointId: + description: Specifies the id where the Event Hub is located. + type: string + expirationTimeUtc: + description: Specifies the expiration time of the event subscription + (Datetime Format RFC 3339). + type: string + hybridConnectionEndpointId: + description: Specifies the id where the Hybrid Connection is located. + type: string + includedEventTypes: + description: A list of applicable event types that need to be + part of the event subscription. + items: + type: string + type: array + labels: + description: A list of labels to assign to the event subscription. + items: + type: string + type: array + name: + description: Specifies the name of the EventGrid Event Subscription + resource. Changing this forces a new resource to be created. + type: string + retryPolicy: + description: A retry_policy block as defined below. + properties: + eventTimeToLive: + description: Specifies the time to live (in minutes) for events. + Supported range is 1 to 1440. See official documentation + for more details. + type: number + maxDeliveryAttempts: + description: Specifies the maximum number of delivery retry + attempts for events. + type: number + type: object + scope: + description: Specifies the scope at which the EventGrid Event + Subscription should be created. Changing this forces a new resource + to be created. + type: string + scopeRef: + description: Reference to a ResourceGroup in azure to populate + scope. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + scopeSelector: + description: Selector for a ResourceGroup in azure to populate + scope. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceBusQueueEndpointId: + description: Specifies the id where the Service Bus Queue is located. + type: string + serviceBusTopicEndpointId: + description: Specifies the id where the Service Bus Topic is located. + type: string + storageBlobDeadLetterDestination: + description: A storage_blob_dead_letter_destination block as defined + below. + properties: + storageAccountId: + description: Specifies the id of the storage account id where + the storage blob is located. + type: string + storageBlobContainerName: + description: Specifies the name of the Storage blob container + that is the destination of the deadletter events. + type: string + type: object + storageQueueEndpoint: + description: A storage_queue_endpoint block as defined below. + properties: + queueMessageTimeToLiveInSeconds: + description: Storage queue message time to live in seconds. + type: number + queueName: + description: Specifies the name of the storage queue where + the Event Subscription will receive events. + type: string + queueNameRef: + description: Reference to a Queue in storage to populate queueName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + queueNameSelector: + description: Selector for a Queue in storage to populate queueName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageAccountId: + description: Specifies the id of the storage account id where + the storage queue is located. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate + storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate + storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + subjectFilter: + description: A subject_filter block as defined below. + properties: + caseSensitive: + description: Specifies if subject_begins_with and subject_ends_with + case sensitive. This value + type: boolean + subjectBeginsWith: + description: A string to filter events for an event subscription + based on a resource path prefix. + type: string + subjectEndsWith: + description: A string to filter events for an event subscription + based on a resource path suffix. + type: string + type: object + webhookEndpoint: + description: A webhook_endpoint block as defined below. + properties: + activeDirectoryAppIdOrUri: + description: The Azure Active Directory Application ID or + URI to get the access token that will be included as the + bearer token in delivery requests. + type: string + activeDirectoryTenantId: + description: The Azure Active Directory Tenant ID to get the + access token that will be included as the bearer token in + delivery requests. + type: string + maxEventsPerBatch: + description: Maximum number of events per batch. + type: number + preferredBatchSizeInKilobytes: + description: Preferred batch size in Kilobytes. + type: number + url: + description: Specifies the url of the webhook where the Event + Subscription will receive events. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: EventSubscriptionStatus defines the observed state of EventSubscription. + properties: + atProvider: + properties: + advancedFilter: + description: A advanced_filter block as defined below. + properties: + boolEquals: + description: Compares a value of an event using a single boolean + value. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: boolean + type: object + type: array + isNotNull: + description: Evaluates if a value of an event isn't NULL or + undefined. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + type: object + type: array + isNullOrUndefined: + description: Evaluates if a value of an event is NULL or undefined. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + type: object + type: array + numberGreaterThan: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberGreaterThanOrEquals: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberIn: + description: Compares a value of an event using multiple floating + point numbers. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: number + type: array + type: object + type: array + numberInRange: + description: Compares a value of an event using multiple floating + point number ranges. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + items: + type: number + type: array + type: array + type: object + type: array + numberLessThan: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberLessThanOrEquals: + description: Compares a value of an event using a single floating + point number. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + value: + description: Specifies a single value to compare to + when using a single value operator. + type: number + type: object + type: array + numberNotIn: + description: Compares a value of an event using multiple floating + point numbers. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: number + type: array + type: object + type: array + numberNotInRange: + description: Compares a value of an event using multiple floating + point number ranges. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + items: + type: number + type: array + type: array + type: object + type: array + stringBeginsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringContains: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringEndsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringIn: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotBeginsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotContains: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotEndsWith: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + stringNotIn: + description: Compares a value of an event using multiple string + values. + items: + properties: + key: + description: Specifies the field within the event data + that you want to use for filtering. Type of the field + can be a number, boolean, or string. + type: string + values: + description: Specifies an array of values to compare + to when using a multiple values operator. + items: + type: string + type: array + type: object + type: array + type: object + advancedFilteringOnArraysEnabled: + description: Specifies whether advanced filters should be evaluated + against an array of values instead of expecting a singular value. + Defaults to false. + type: boolean + azureFunctionEndpoint: + description: An azure_function_endpoint block as defined below. + properties: + functionId: + description: Specifies the ID of the Function where the Event + Subscription will receive events. This must be the functions + ID in format {function_app.id}/functions/{name}. + type: string + maxEventsPerBatch: + description: Maximum number of events per batch. + type: number + preferredBatchSizeInKilobytes: + description: Preferred batch size in Kilobytes. + type: number + type: object + deadLetterIdentity: + description: A dead_letter_identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that is used for dead lettering. Allowed value is SystemAssigned, + UserAssigned. + type: string + userAssignedIdentity: + description: The user identity associated with the resource. + type: string + type: object + deliveryIdentity: + description: A delivery_identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that is used for event delivery. Allowed value is SystemAssigned, + UserAssigned. + type: string + userAssignedIdentity: + description: The user identity associated with the resource. + type: string + type: object + deliveryProperty: + description: One or more delivery_property blocks as defined below. + items: + properties: + headerName: + description: The name of the header to send on to the destination + type: string + secret: + description: True if the value is a secret and should be + protected, otherwise false. If True, then this value won't + be returned from Azure API calls + type: boolean + sourceField: + description: If the type is Dynamic, then provide the payload + field to be used as the value. Valid source fields differ + by subscription type. + type: string + type: + description: Either Static or Dynamic + type: string + type: object + type: array + eventDeliverySchema: + description: 'Specifies the event delivery schema for the event + subscription. Possible values include: EventGridSchema, CloudEventSchemaV1_0, + CustomInputSchema. Defaults to EventGridSchema. Changing this + forces a new resource to be created.' + type: string + eventhubEndpointId: + description: Specifies the id where the Event Hub is located. + type: string + expirationTimeUtc: + description: Specifies the expiration time of the event subscription + (Datetime Format RFC 3339). + type: string + hybridConnectionEndpointId: + description: Specifies the id where the Hybrid Connection is located. + type: string + id: + description: The ID of the EventGrid Event Subscription. + type: string + includedEventTypes: + description: A list of applicable event types that need to be + part of the event subscription. + items: + type: string + type: array + labels: + description: A list of labels to assign to the event subscription. + items: + type: string + type: array + name: + description: Specifies the name of the EventGrid Event Subscription + resource. Changing this forces a new resource to be created. + type: string + retryPolicy: + description: A retry_policy block as defined below. + properties: + eventTimeToLive: + description: Specifies the time to live (in minutes) for events. + Supported range is 1 to 1440. See official documentation + for more details. + type: number + maxDeliveryAttempts: + description: Specifies the maximum number of delivery retry + attempts for events. + type: number + type: object + scope: + description: Specifies the scope at which the EventGrid Event + Subscription should be created. Changing this forces a new resource + to be created. + type: string + serviceBusQueueEndpointId: + description: Specifies the id where the Service Bus Queue is located. + type: string + serviceBusTopicEndpointId: + description: Specifies the id where the Service Bus Topic is located. + type: string + storageBlobDeadLetterDestination: + description: A storage_blob_dead_letter_destination block as defined + below. + properties: + storageAccountId: + description: Specifies the id of the storage account id where + the storage blob is located. + type: string + storageBlobContainerName: + description: Specifies the name of the Storage blob container + that is the destination of the deadletter events. + type: string + type: object + storageQueueEndpoint: + description: A storage_queue_endpoint block as defined below. + properties: + queueMessageTimeToLiveInSeconds: + description: Storage queue message time to live in seconds. + type: number + queueName: + description: Specifies the name of the storage queue where + the Event Subscription will receive events. + type: string + storageAccountId: + description: Specifies the id of the storage account id where + the storage queue is located. + type: string + type: object + subjectFilter: + description: A subject_filter block as defined below. + properties: + caseSensitive: + description: Specifies if subject_begins_with and subject_ends_with + case sensitive. This value + type: boolean + subjectBeginsWith: + description: A string to filter events for an event subscription + based on a resource path prefix. + type: string + subjectEndsWith: + description: A string to filter events for an event subscription + based on a resource path suffix. + type: string + type: object + webhookEndpoint: + description: A webhook_endpoint block as defined below. + properties: + activeDirectoryAppIdOrUri: + description: The Azure Active Directory Application ID or + URI to get the access token that will be included as the + bearer token in delivery requests. + type: string + activeDirectoryTenantId: + description: The Azure Active Directory Tenant ID to get the + access token that will be included as the bearer token in + delivery requests. + type: string + baseUrl: + description: (Computed) The base url of the webhook where + the Event Subscription will receive events. + type: string + maxEventsPerBatch: + description: Maximum number of events per batch. + type: number + preferredBatchSizeInKilobytes: + description: Preferred batch size in Kilobytes. + type: number + url: + description: Specifies the url of the webhook where the Event + Subscription will receive events. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eventgrid.azure.upbound.io_systemtopics.yaml b/package/crds/eventgrid.azure.upbound.io_systemtopics.yaml index 4d7a253b3..e37f1c74b 100644 --- a/package/crds/eventgrid.azure.upbound.io_systemtopics.yaml +++ b/package/crds/eventgrid.azure.upbound.io_systemtopics.yaml @@ -721,3 +721,700 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SystemTopic is the Schema for the SystemTopics API. Manages an + Event Grid System Topic + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SystemTopicSpec defines the desired state of SystemTopic + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid System Topic. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid System Topic. + Possible values are SystemAssigned, UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Event Grid System Topic + should exist. Changing this forces a new Event Grid System Topic + to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Event Grid + System Topic should exist. Changing this forces a new Event + Grid System Topic to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceArmResourceId: + description: The ID of the Event Grid System Topic ARM Source. + Changing this forces a new Event Grid System Topic to be created. + type: string + sourceArmResourceIdRef: + description: Reference to a Account in storage to populate sourceArmResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceArmResourceIdSelector: + description: Selector for a Account in storage to populate sourceArmResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Event Grid System Topic. + type: object + x-kubernetes-map-type: granular + topicType: + description: 'The Topic Type of the Event Grid System Topic. The + topic type is validated by Azure and there may be additional + topic types beyond the following: Microsoft.AppConfiguration.ConfigurationStores, + Microsoft.Communication.CommunicationServices, Microsoft.ContainerRegistry.Registries, + Microsoft.Devices.IoTHubs, Microsoft.EventGrid.Domains, Microsoft.EventGrid.Topics, + Microsoft.Eventhub.Namespaces, Microsoft.KeyVault.vaults, Microsoft.MachineLearningServices.Workspaces, + Microsoft.Maps.Accounts, Microsoft.Media.MediaServices, Microsoft.Resources.ResourceGroups, + Microsoft.Resources.Subscriptions, Microsoft.ServiceBus.Namespaces, + Microsoft.SignalRService.SignalR, Microsoft.Storage.StorageAccounts, + Microsoft.Web.ServerFarms and Microsoft.Web.Sites. Changing + this forces a new Event Grid System Topic to be created.' + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid System Topic. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid System Topic. + Possible values are SystemAssigned, UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Event Grid System Topic + should exist. Changing this forces a new Event Grid System Topic + to be created. + type: string + sourceArmResourceId: + description: The ID of the Event Grid System Topic ARM Source. + Changing this forces a new Event Grid System Topic to be created. + type: string + sourceArmResourceIdRef: + description: Reference to a Account in storage to populate sourceArmResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + sourceArmResourceIdSelector: + description: Selector for a Account in storage to populate sourceArmResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Event Grid System Topic. + type: object + x-kubernetes-map-type: granular + topicType: + description: 'The Topic Type of the Event Grid System Topic. The + topic type is validated by Azure and there may be additional + topic types beyond the following: Microsoft.AppConfiguration.ConfigurationStores, + Microsoft.Communication.CommunicationServices, Microsoft.ContainerRegistry.Registries, + Microsoft.Devices.IoTHubs, Microsoft.EventGrid.Domains, Microsoft.EventGrid.Topics, + Microsoft.Eventhub.Namespaces, Microsoft.KeyVault.vaults, Microsoft.MachineLearningServices.Workspaces, + Microsoft.Maps.Accounts, Microsoft.Media.MediaServices, Microsoft.Resources.ResourceGroups, + Microsoft.Resources.Subscriptions, Microsoft.ServiceBus.Namespaces, + Microsoft.SignalRService.SignalR, Microsoft.Storage.StorageAccounts, + Microsoft.Web.ServerFarms and Microsoft.Web.Sites. Changing + this forces a new Event Grid System Topic to be created.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.topicType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.topicType) + || (has(self.initProvider) && has(self.initProvider.topicType))' + status: + description: SystemTopicStatus defines the observed state of SystemTopic. + properties: + atProvider: + properties: + id: + description: The ID of the Event Grid System Topic. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid System Topic. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid System Topic. + Possible values are SystemAssigned, UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Event Grid System Topic + should exist. Changing this forces a new Event Grid System Topic + to be created. + type: string + metricArmResourceId: + description: The Metric ARM Resource ID of the Event Grid System + Topic. + type: string + resourceGroupName: + description: The name of the Resource Group where the Event Grid + System Topic should exist. Changing this forces a new Event + Grid System Topic to be created. + type: string + sourceArmResourceId: + description: The ID of the Event Grid System Topic ARM Source. + Changing this forces a new Event Grid System Topic to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Event Grid System Topic. + type: object + x-kubernetes-map-type: granular + topicType: + description: 'The Topic Type of the Event Grid System Topic. The + topic type is validated by Azure and there may be additional + topic types beyond the following: Microsoft.AppConfiguration.ConfigurationStores, + Microsoft.Communication.CommunicationServices, Microsoft.ContainerRegistry.Registries, + Microsoft.Devices.IoTHubs, Microsoft.EventGrid.Domains, Microsoft.EventGrid.Topics, + Microsoft.Eventhub.Namespaces, Microsoft.KeyVault.vaults, Microsoft.MachineLearningServices.Workspaces, + Microsoft.Maps.Accounts, Microsoft.Media.MediaServices, Microsoft.Resources.ResourceGroups, + Microsoft.Resources.Subscriptions, Microsoft.ServiceBus.Namespaces, + Microsoft.SignalRService.SignalR, Microsoft.Storage.StorageAccounts, + Microsoft.Web.ServerFarms and Microsoft.Web.Sites. Changing + this forces a new Event Grid System Topic to be created.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eventgrid.azure.upbound.io_topics.yaml b/package/crds/eventgrid.azure.upbound.io_topics.yaml index 075d2a9a7..fecbfbc32 100644 --- a/package/crds/eventgrid.azure.upbound.io_topics.yaml +++ b/package/crds/eventgrid.azure.upbound.io_topics.yaml @@ -767,3 +767,734 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Topic is the Schema for the Topics API. Manages an EventGrid + Topic + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TopicSpec defines the desired state of Topic + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid Topic. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid Topic. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + inboundIpRule: + description: One or more inbound_ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask (CIDR) to match on. + type: string + type: object + type: array + inputMappingDefaultValues: + description: A input_mapping_default_values block as defined below. + Changing this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the default data version of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + eventType: + description: Specifies the default event type of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + subject: + description: Specifies the default subject of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + type: object + inputMappingFields: + description: A input_mapping_fields block as defined below. Changing + this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the data version of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventTime: + description: Specifies the event time of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventType: + description: Specifies the event type of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + id: + description: Specifies the id of the EventGrid Event to associate + with the domain. Changing this forces a new resource to + be created. + type: string + subject: + description: Specifies the subject of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + topic: + description: Specifies the topic of the EventGrid Event to + associate with the domain. Changing this forces a new resource + to be created. + type: string + type: object + inputSchema: + description: Specifies the schema in which incoming events will + be published to this domain. Allowed values are CloudEventSchemaV1_0, + CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. + Changing this forces a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the EventGrid Topic. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which the EventGrid + Topic exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid Topic. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid Topic. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + inboundIpRule: + description: One or more inbound_ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask (CIDR) to match on. + type: string + type: object + type: array + inputMappingDefaultValues: + description: A input_mapping_default_values block as defined below. + Changing this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the default data version of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + eventType: + description: Specifies the default event type of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + subject: + description: Specifies the default subject of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + type: object + inputMappingFields: + description: A input_mapping_fields block as defined below. Changing + this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the data version of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventTime: + description: Specifies the event time of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventType: + description: Specifies the event type of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + id: + description: Specifies the id of the EventGrid Event to associate + with the domain. Changing this forces a new resource to + be created. + type: string + subject: + description: Specifies the subject of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + topic: + description: Specifies the topic of the EventGrid Event to + associate with the domain. Changing this forces a new resource + to be created. + type: string + type: object + inputSchema: + description: Specifies the schema in which incoming events will + be published to this domain. Allowed values are CloudEventSchemaV1_0, + CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. + Changing this forces a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the EventGrid Topic. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: TopicStatus defines the observed state of Topic. + properties: + atProvider: + properties: + endpoint: + description: The Endpoint associated with the EventGrid Topic. + type: string + id: + description: The EventGrid Topic ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Event Grid Topic. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Grid Topic. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + inboundIpRule: + description: One or more inbound_ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask (CIDR) to match on. + type: string + type: object + type: array + inputMappingDefaultValues: + description: A input_mapping_default_values block as defined below. + Changing this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the default data version of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + eventType: + description: Specifies the default event type of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + subject: + description: Specifies the default subject of the EventGrid + Event to associate with the domain. Changing this forces + a new resource to be created. + type: string + type: object + inputMappingFields: + description: A input_mapping_fields block as defined below. Changing + this forces a new resource to be created. + properties: + dataVersion: + description: Specifies the data version of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventTime: + description: Specifies the event time of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + eventType: + description: Specifies the event type of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + id: + description: Specifies the id of the EventGrid Event to associate + with the domain. Changing this forces a new resource to + be created. + type: string + subject: + description: Specifies the subject of the EventGrid Event + to associate with the domain. Changing this forces a new + resource to be created. + type: string + topic: + description: Specifies the topic of the EventGrid Event to + associate with the domain. Changing this forces a new resource + to be created. + type: string + type: object + inputSchema: + description: Specifies the schema in which incoming events will + be published to this domain. Allowed values are CloudEventSchemaV1_0, + CustomEventSchema, or EventGridSchema. Defaults to EventGridSchema. + Changing this forces a new resource to be created. + type: string + localAuthEnabled: + description: Whether local authentication methods is enabled for + the EventGrid Topic. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether or not public network access is allowed for + this server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which the EventGrid + Topic exists. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eventhub.azure.upbound.io_eventhubnamespaces.yaml b/package/crds/eventhub.azure.upbound.io_eventhubnamespaces.yaml index 9a509149b..d8a5d63cb 100644 --- a/package/crds/eventhub.azure.upbound.io_eventhubnamespaces.yaml +++ b/package/crds/eventhub.azure.upbound.io_eventhubnamespaces.yaml @@ -920,3 +920,899 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EventHubNamespace is the Schema for the EventHubNamespaces API. + Manages an EventHub Namespace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EventHubNamespaceSpec defines the desired state of EventHubNamespace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoInflateEnabled: + description: Is Auto Inflate enabled for the EventHub Namespace? + type: boolean + capacity: + description: Specifies the Capacity / Throughput Units for a Standard + SKU namespace. Default capacity has a maximum of 2, but can + be increased in blocks of 2 on a committed purchase basis. Defaults + to 1. + type: number + dedicatedClusterId: + description: Specifies the ID of the EventHub Dedicated Cluster + where this Namespace should created. Changing this forces a + new resource to be created. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this EventHub namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Hub Namespace. Possible + values are SystemAssigned or UserAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Is SAS authentication enabled for the EventHub Namespace? + Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maximumThroughputUnits: + description: Specifies the maximum number of throughput units + when Auto Inflate is Enabled. Valid values range from 1 - 20. + type: number + minimumTlsVersion: + description: 'The minimum supported TLS version for this EventHub + Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default + minimum TLS version is 1.2.' + type: string + networkRulesets: + description: A network_rulesets block as defined below. + items: + properties: + defaultAction: + description: The default action to take when a rule is not + matched. Possible values are Allow and Deny. + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask to match on. + type: string + type: object + type: array + publicNetworkAccessEnabled: + description: Is public network access enabled for the EventHub + Namespace? Defaults to true. + type: boolean + trustedServiceAccessEnabled: + description: Whether Trusted Microsoft Services are allowed + to bypass firewall. + type: boolean + virtualNetworkRule: + description: One or more virtual_network_rule blocks as + defined below. + items: + properties: + ignoreMissingVirtualNetworkServiceEndpoint: + description: Are missing virtual network service endpoints + ignored? + type: boolean + subnetId: + description: The id of the subnet to match on. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Is public network access enabled for the EventHub + Namespace? Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the namespace. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: Defines which tier to use. Valid options are Basic, + Standard, and Premium. Please note that setting this field to + Premium will force the creation of a new resource. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Specifies if the EventHub Namespace should be Zone + Redundant (created across Availability Zones). Changing this + forces a new resource to be created. Defaults to false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoInflateEnabled: + description: Is Auto Inflate enabled for the EventHub Namespace? + type: boolean + capacity: + description: Specifies the Capacity / Throughput Units for a Standard + SKU namespace. Default capacity has a maximum of 2, but can + be increased in blocks of 2 on a committed purchase basis. Defaults + to 1. + type: number + dedicatedClusterId: + description: Specifies the ID of the EventHub Dedicated Cluster + where this Namespace should created. Changing this forces a + new resource to be created. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this EventHub namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Hub Namespace. Possible + values are SystemAssigned or UserAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Is SAS authentication enabled for the EventHub Namespace? + Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maximumThroughputUnits: + description: Specifies the maximum number of throughput units + when Auto Inflate is Enabled. Valid values range from 1 - 20. + type: number + minimumTlsVersion: + description: 'The minimum supported TLS version for this EventHub + Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default + minimum TLS version is 1.2.' + type: string + networkRulesets: + description: A network_rulesets block as defined below. + items: + properties: + defaultAction: + description: The default action to take when a rule is not + matched. Possible values are Allow and Deny. + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask to match on. + type: string + type: object + type: array + publicNetworkAccessEnabled: + description: Is public network access enabled for the EventHub + Namespace? Defaults to true. + type: boolean + trustedServiceAccessEnabled: + description: Whether Trusted Microsoft Services are allowed + to bypass firewall. + type: boolean + virtualNetworkRule: + description: One or more virtual_network_rule blocks as + defined below. + items: + properties: + ignoreMissingVirtualNetworkServiceEndpoint: + description: Are missing virtual network service endpoints + ignored? + type: boolean + subnetId: + description: The id of the subnet to match on. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Is public network access enabled for the EventHub + Namespace? Defaults to true. + type: boolean + sku: + description: Defines which tier to use. Valid options are Basic, + Standard, and Premium. Please note that setting this field to + Premium will force the creation of a new resource. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Specifies if the EventHub Namespace should be Zone + Redundant (created across Availability Zones). Changing this + forces a new resource to be created. Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: EventHubNamespaceStatus defines the observed state of EventHubNamespace. + properties: + atProvider: + properties: + autoInflateEnabled: + description: Is Auto Inflate enabled for the EventHub Namespace? + type: boolean + capacity: + description: Specifies the Capacity / Throughput Units for a Standard + SKU namespace. Default capacity has a maximum of 2, but can + be increased in blocks of 2 on a committed purchase basis. Defaults + to 1. + type: number + dedicatedClusterId: + description: Specifies the ID of the EventHub Dedicated Cluster + where this Namespace should created. Changing this forces a + new resource to be created. + type: string + id: + description: The EventHub Namespace ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this EventHub namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Event Hub Namespace. Possible + values are SystemAssigned or UserAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Is SAS authentication enabled for the EventHub Namespace? + Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maximumThroughputUnits: + description: Specifies the maximum number of throughput units + when Auto Inflate is Enabled. Valid values range from 1 - 20. + type: number + minimumTlsVersion: + description: 'The minimum supported TLS version for this EventHub + Namespace. Valid values are: 1.0, 1.1 and 1.2. The current default + minimum TLS version is 1.2.' + type: string + networkRulesets: + description: A network_rulesets block as defined below. + items: + properties: + defaultAction: + description: The default action to take when a rule is not + matched. Possible values are Allow and Deny. + type: string + ipRule: + description: One or more ip_rule blocks as defined below. + items: + properties: + action: + description: The action to take when the rule is matched. + Possible values are Allow. Defaults to Allow. + type: string + ipMask: + description: The IP mask to match on. + type: string + type: object + type: array + publicNetworkAccessEnabled: + description: Is public network access enabled for the EventHub + Namespace? Defaults to true. + type: boolean + trustedServiceAccessEnabled: + description: Whether Trusted Microsoft Services are allowed + to bypass firewall. + type: boolean + virtualNetworkRule: + description: One or more virtual_network_rule blocks as + defined below. + items: + properties: + ignoreMissingVirtualNetworkServiceEndpoint: + description: Are missing virtual network service endpoints + ignored? + type: boolean + subnetId: + description: The id of the subnet to match on. + type: string + type: object + type: array + type: object + type: array + publicNetworkAccessEnabled: + description: Is public network access enabled for the EventHub + Namespace? Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the namespace. Changing this forces a new resource to be created. + type: string + sku: + description: Defines which tier to use. Valid options are Basic, + Standard, and Premium. Please note that setting this field to + Premium will force the creation of a new resource. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Specifies if the EventHub Namespace should be Zone + Redundant (created across Availability Zones). Changing this + forces a new resource to be created. Defaults to false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/eventhub.azure.upbound.io_eventhubs.yaml b/package/crds/eventhub.azure.upbound.io_eventhubs.yaml index 4e0305b8e..68a40f1b7 100644 --- a/package/crds/eventhub.azure.upbound.io_eventhubs.yaml +++ b/package/crds/eventhub.azure.upbound.io_eventhubs.yaml @@ -706,3 +706,679 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EventHub is the Schema for the EventHubs API. Manages a Event + Hubs as a nested resource within an Event Hubs namespace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EventHubSpec defines the desired state of EventHub + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + captureDescription: + description: A capture_description block as defined below. + properties: + destination: + description: A destination block as defined below. + properties: + archiveNameFormat: + description: The Blob naming convention for archiving. + e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. + Here all the parameters (Namespace,EventHub .. etc) + are mandatory irrespective of order + type: string + blobContainerName: + description: The name of the Container within the Blob + Storage Account where messages should be archived. + type: string + name: + description: Specifies the name of the EventHub resource. + Changing this forces a new resource to be created. + type: string + storageAccountId: + description: The ID of the Blob Storage Account where + messages should be archived. + type: string + type: object + enabled: + description: Specifies if the Capture Description is Enabled. + type: boolean + encoding: + description: Specifies the Encoding used for the Capture Description. + Possible values are Avro and AvroDeflate. + type: string + intervalInSeconds: + description: Specifies the time interval in seconds at which + the capture will happen. Values can be between 60 and 900 + seconds. Defaults to 300 seconds. + type: number + sizeLimitInBytes: + description: Specifies the amount of data built up in your + EventHub before a Capture Operation occurs. Value should + be between 10485760 and 524288000 bytes. Defaults to 314572800 + bytes. + type: number + skipEmptyArchives: + description: Specifies if empty files should not be emitted + if no events occur during the Capture time window. Defaults + to false. + type: boolean + type: object + messageRetention: + description: Specifies the number of days to retain the events + for this Event Hub. + type: number + namespaceName: + description: Specifies the name of the EventHub Namespace. Changing + this forces a new resource to be created. + type: string + namespaceNameRef: + description: Reference to a EventHubNamespace in eventhub to populate + namespaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + namespaceNameSelector: + description: Selector for a EventHubNamespace in eventhub to populate + namespaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + partitionCount: + description: Specifies the current number of shards on the Event + Hub. + type: number + resourceGroupName: + description: The name of the resource group in which the EventHub's + parent Namespace exists. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + status: + description: Specifies the status of the Event Hub resource. Possible + values are Active, Disabled and SendDisabled. Defaults to Active. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + captureDescription: + description: A capture_description block as defined below. + properties: + destination: + description: A destination block as defined below. + properties: + archiveNameFormat: + description: The Blob naming convention for archiving. + e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. + Here all the parameters (Namespace,EventHub .. etc) + are mandatory irrespective of order + type: string + blobContainerName: + description: The name of the Container within the Blob + Storage Account where messages should be archived. + type: string + name: + description: Specifies the name of the EventHub resource. + Changing this forces a new resource to be created. + type: string + storageAccountId: + description: The ID of the Blob Storage Account where + messages should be archived. + type: string + type: object + enabled: + description: Specifies if the Capture Description is Enabled. + type: boolean + encoding: + description: Specifies the Encoding used for the Capture Description. + Possible values are Avro and AvroDeflate. + type: string + intervalInSeconds: + description: Specifies the time interval in seconds at which + the capture will happen. Values can be between 60 and 900 + seconds. Defaults to 300 seconds. + type: number + sizeLimitInBytes: + description: Specifies the amount of data built up in your + EventHub before a Capture Operation occurs. Value should + be between 10485760 and 524288000 bytes. Defaults to 314572800 + bytes. + type: number + skipEmptyArchives: + description: Specifies if empty files should not be emitted + if no events occur during the Capture time window. Defaults + to false. + type: boolean + type: object + messageRetention: + description: Specifies the number of days to retain the events + for this Event Hub. + type: number + partitionCount: + description: Specifies the current number of shards on the Event + Hub. + type: number + status: + description: Specifies the status of the Event Hub resource. Possible + values are Active, Disabled and SendDisabled. Defaults to Active. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.messageRetention is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.messageRetention) + || (has(self.initProvider) && has(self.initProvider.messageRetention))' + - message: spec.forProvider.partitionCount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.partitionCount) + || (has(self.initProvider) && has(self.initProvider.partitionCount))' + status: + description: EventHubStatus defines the observed state of EventHub. + properties: + atProvider: + properties: + captureDescription: + description: A capture_description block as defined below. + properties: + destination: + description: A destination block as defined below. + properties: + archiveNameFormat: + description: The Blob naming convention for archiving. + e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. + Here all the parameters (Namespace,EventHub .. etc) + are mandatory irrespective of order + type: string + blobContainerName: + description: The name of the Container within the Blob + Storage Account where messages should be archived. + type: string + name: + description: Specifies the name of the EventHub resource. + Changing this forces a new resource to be created. + type: string + storageAccountId: + description: The ID of the Blob Storage Account where + messages should be archived. + type: string + type: object + enabled: + description: Specifies if the Capture Description is Enabled. + type: boolean + encoding: + description: Specifies the Encoding used for the Capture Description. + Possible values are Avro and AvroDeflate. + type: string + intervalInSeconds: + description: Specifies the time interval in seconds at which + the capture will happen. Values can be between 60 and 900 + seconds. Defaults to 300 seconds. + type: number + sizeLimitInBytes: + description: Specifies the amount of data built up in your + EventHub before a Capture Operation occurs. Value should + be between 10485760 and 524288000 bytes. Defaults to 314572800 + bytes. + type: number + skipEmptyArchives: + description: Specifies if empty files should not be emitted + if no events occur during the Capture time window. Defaults + to false. + type: boolean + type: object + id: + description: The ID of the EventHub. + type: string + messageRetention: + description: Specifies the number of days to retain the events + for this Event Hub. + type: number + namespaceName: + description: Specifies the name of the EventHub Namespace. Changing + this forces a new resource to be created. + type: string + partitionCount: + description: Specifies the current number of shards on the Event + Hub. + type: number + partitionIds: + description: The identifiers for partitions created for Event + Hubs. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceGroupName: + description: The name of the resource group in which the EventHub's + parent Namespace exists. Changing this forces a new resource + to be created. + type: string + status: + description: Specifies the status of the Event Hub resource. Possible + values are Active, Disabled and SendDisabled. Defaults to Active. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/fluidrelay.azure.upbound.io_servers.yaml b/package/crds/fluidrelay.azure.upbound.io_servers.yaml index 69950d5c6..f03f822f6 100644 --- a/package/crds/fluidrelay.azure.upbound.io_servers.yaml +++ b/package/crds/fluidrelay.azure.upbound.io_servers.yaml @@ -652,3 +652,631 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Server is the Schema for the Servers API. Manages a Fluid Relay + Server. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServerSpec defines the desired state of Server + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Fluid Relay Service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Fluid Relay Service. Possible + values are SystemAssigned,UserAssigned and SystemAssigned, + UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Fluid Relay Server should + exist. Changing this forces a new Fluid Relay Server to be created. + type: string + name: + description: The name which should be used for this Fluid Relay + Server. Changing this forces a new Fluid Relay Server to be + created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Fluid Relay + Server should exist. Changing this forces a new Fluid Relay + Server to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageSku: + description: Sku of the storage associated with the resource, + Possible values are standard and basic. Changing this forces + a new Fluid Relay Server to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Fluid Relay Server. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Fluid Relay Service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Fluid Relay Service. Possible + values are SystemAssigned,UserAssigned and SystemAssigned, + UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Fluid Relay Server should + exist. Changing this forces a new Fluid Relay Server to be created. + type: string + name: + description: The name which should be used for this Fluid Relay + Server. Changing this forces a new Fluid Relay Server to be + created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Fluid Relay + Server should exist. Changing this forces a new Fluid Relay + Server to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageSku: + description: Sku of the storage associated with the resource, + Possible values are standard and basic. Changing this forces + a new Fluid Relay Server to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Fluid Relay Server. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ServerStatus defines the observed state of Server. + properties: + atProvider: + properties: + frsTenantId: + description: The Fluid tenantId for this server. + type: string + id: + description: The ID of the Fluid Relay Server. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Fluid Relay Service. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Identity of this Fluid Relay Server. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Identity of this Fluid Relay Server. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Fluid Relay Service. Possible + values are SystemAssigned,UserAssigned and SystemAssigned, + UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Fluid Relay Server should + exist. Changing this forces a new Fluid Relay Server to be created. + type: string + name: + description: The name which should be used for this Fluid Relay + Server. Changing this forces a new Fluid Relay Server to be + created. + type: string + ordererEndpoints: + description: An array of the Fluid Relay Orderer endpoints. This + will be deprecated in future version of fluid relay server and + will always be empty, more details. + items: + type: string + type: array + resourceGroupName: + description: The name of the Resource Group where the Fluid Relay + Server should exist. Changing this forces a new Fluid Relay + Server to be created. + type: string + serviceEndpoints: + description: An array of service endpoints for this Fluid Relay + Server. + items: + type: string + type: array + storageEndpoints: + description: An array of storage endpoints for this Fluid Relay + Server. This will be deprecated in future version of fluid relay + server and will always be empty, more details. + items: + type: string + type: array + storageSku: + description: Sku of the storage associated with the resource, + Possible values are standard and basic. Changing this forces + a new Fluid Relay Server to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Fluid Relay Server. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/guestconfiguration.azure.upbound.io_policyvirtualmachineconfigurationassignments.yaml b/package/crds/guestconfiguration.azure.upbound.io_policyvirtualmachineconfigurationassignments.yaml index 90ab1de1b..21e77f0f1 100644 --- a/package/crds/guestconfiguration.azure.upbound.io_policyvirtualmachineconfigurationassignments.yaml +++ b/package/crds/guestconfiguration.azure.upbound.io_policyvirtualmachineconfigurationassignments.yaml @@ -556,3 +556,532 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: PolicyVirtualMachineConfigurationAssignment is the Schema for + the PolicyVirtualMachineConfigurationAssignments API. Applies a Guest Configuration + Policy to a Virtual Machine. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PolicyVirtualMachineConfigurationAssignmentSpec defines the + desired state of PolicyVirtualMachineConfigurationAssignment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + configuration: + description: A configuration block as defined below. + properties: + assignmentType: + description: The assignment type for the Guest Configuration + Assignment. Possible values are Audit, ApplyAndAutoCorrect, + ApplyAndMonitor and DeployAndAutoCorrect. + type: string + contentHash: + description: The content hash for the Guest Configuration + package. + type: string + contentUri: + description: The content URI where the Guest Configuration + package is stored. + type: string + parameter: + description: One or more parameter blocks as defined below + which define what configuration parameters and values against. + items: + properties: + name: + description: The name of the configuration parameter + to check. + type: string + value: + description: The value to check the configuration parameter + with. + type: string + type: object + type: array + version: + description: The version of the Guest Configuration that will + be assigned in this Guest Configuration Assignment. + type: string + type: object + location: + description: The Azure location where the Policy Virtual Machine + Configuration Assignment should exist. Changing this forces + a new resource to be created. + type: string + virtualMachineId: + description: The resource ID of the Policy Virtual Machine which + this Guest Configuration Assignment should apply to. Changing + this forces a new resource to be created. + type: string + virtualMachineIdRef: + description: Reference to a WindowsVirtualMachine in compute to + populate virtualMachineId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualMachineIdSelector: + description: Selector for a WindowsVirtualMachine in compute to + populate virtualMachineId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + configuration: + description: A configuration block as defined below. + properties: + assignmentType: + description: The assignment type for the Guest Configuration + Assignment. Possible values are Audit, ApplyAndAutoCorrect, + ApplyAndMonitor and DeployAndAutoCorrect. + type: string + contentHash: + description: The content hash for the Guest Configuration + package. + type: string + contentUri: + description: The content URI where the Guest Configuration + package is stored. + type: string + parameter: + description: One or more parameter blocks as defined below + which define what configuration parameters and values against. + items: + properties: + name: + description: The name of the configuration parameter + to check. + type: string + value: + description: The value to check the configuration parameter + with. + type: string + type: object + type: array + version: + description: The version of the Guest Configuration that will + be assigned in this Guest Configuration Assignment. + type: string + type: object + location: + description: The Azure location where the Policy Virtual Machine + Configuration Assignment should exist. Changing this forces + a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.configuration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.configuration) + || (has(self.initProvider) && has(self.initProvider.configuration))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: PolicyVirtualMachineConfigurationAssignmentStatus defines + the observed state of PolicyVirtualMachineConfigurationAssignment. + properties: + atProvider: + properties: + configuration: + description: A configuration block as defined below. + properties: + assignmentType: + description: The assignment type for the Guest Configuration + Assignment. Possible values are Audit, ApplyAndAutoCorrect, + ApplyAndMonitor and DeployAndAutoCorrect. + type: string + contentHash: + description: The content hash for the Guest Configuration + package. + type: string + contentUri: + description: The content URI where the Guest Configuration + package is stored. + type: string + parameter: + description: One or more parameter blocks as defined below + which define what configuration parameters and values against. + items: + properties: + name: + description: The name of the configuration parameter + to check. + type: string + value: + description: The value to check the configuration parameter + with. + type: string + type: object + type: array + version: + description: The version of the Guest Configuration that will + be assigned in this Guest Configuration Assignment. + type: string + type: object + id: + description: The ID of the Policy Virtual Machine Configuration + Assignment. + type: string + location: + description: The Azure location where the Policy Virtual Machine + Configuration Assignment should exist. Changing this forces + a new resource to be created. + type: string + virtualMachineId: + description: The resource ID of the Policy Virtual Machine which + this Guest Configuration Assignment should apply to. Changing + this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/hdinsight.azure.upbound.io_hadoopclusters.yaml b/package/crds/hdinsight.azure.upbound.io_hadoopclusters.yaml index fb2d7bded..070b5e303 100644 --- a/package/crds/hdinsight.azure.upbound.io_hadoopclusters.yaml +++ b/package/crds/hdinsight.azure.upbound.io_hadoopclusters.yaml @@ -3368,3 +3368,3113 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HadoopCluster is the Schema for the HadoopClusters API. Manages + a HDInsight Hadoop Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HadoopClusterSpec defines the desired state of HadoopCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + hadoop: + description: The version of Hadoop which should be used for + this HDInsight Hadoop Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + primaryKeySecretRef: + description: The workspace key of the log analytics extension. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + gateway: + description: A gateway block as defined below. + properties: + passwordSecretRef: + description: The password used for the Ambari Portal. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + required: + - passwordSecretRef + type: object + location: + description: Specifies the Azure Region which this HDInsight Hadoop + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + primaryKeySecretRef: + description: The Operations Management Suite (OMS) workspace + key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight Hadoop Cluster should exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roles: + description: A roles block as defined below. + properties: + edgeNode: + description: A edge_node block as defined below. + properties: + httpsEndpoints: + description: The HTTPS Connectivity Endpoint for this + HDInsight Hadoop Cluster. One or more https_endpoints + blocks as defined below. + items: + properties: + accessModes: + description: A list of access modes for the application. + items: + type: string + type: array + destinationPort: + description: The destination port to connect to. + type: number + disableGatewayAuth: + description: The value indicates whether the gateway + authentication is enabled or not. + type: boolean + privateIpAddress: + description: The private ip address of the endpoint. + type: string + subDomainSuffix: + description: The application's subdomain suffix. + type: string + type: object + type: array + installScriptAction: + description: A install_script_action block as defined + below. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + uninstallScriptActions: + description: A uninstall_script_actions block as defined + below. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + headNode: + description: A head_node block as defined above. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + description: A capacity block as defined below. + properties: + maxInstanceCount: + description: The maximum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + minInstanceCount: + description: The minimum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUserPasswordSecretRef: + description: The user password of the Azure Active Directory + Domain. Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + required: + - domainUserPasswordSecretRef + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageAccountKeySecretRef: + description: The Access Key which should be used to connect + to the Storage Account. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + required: + - storageAccountKeySecretRef + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Hadoop Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Hadoop Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + hadoop: + description: The version of Hadoop which should be used for + this HDInsight Hadoop Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the Azure Region which this HDInsight Hadoop + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + roles: + description: A roles block as defined below. + properties: + edgeNode: + description: A edge_node block as defined below. + properties: + httpsEndpoints: + description: The HTTPS Connectivity Endpoint for this + HDInsight Hadoop Cluster. One or more https_endpoints + blocks as defined below. + items: + properties: + accessModes: + description: A list of access modes for the application. + items: + type: string + type: array + destinationPort: + description: The destination port to connect to. + type: number + disableGatewayAuth: + description: The value indicates whether the gateway + authentication is enabled or not. + type: boolean + privateIpAddress: + description: The private ip address of the endpoint. + type: string + subDomainSuffix: + description: The application's subdomain suffix. + type: string + type: object + type: array + installScriptAction: + description: A install_script_action block as defined + below. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + uninstallScriptActions: + description: A uninstall_script_actions block as defined + below. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + description: A capacity block as defined below. + properties: + maxInstanceCount: + description: The maximum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + minInstanceCount: + description: The minimum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Hadoop Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Hadoop Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterVersion) + || (has(self.initProvider) && has(self.initProvider.clusterVersion))' + - message: spec.forProvider.componentVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.componentVersion) + || (has(self.initProvider) && has(self.initProvider.componentVersion))' + - message: spec.forProvider.gateway is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gateway) + || (has(self.initProvider) && has(self.initProvider.gateway))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.roles is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.roles) + || (has(self.initProvider) && has(self.initProvider.roles))' + - message: spec.forProvider.tier is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tier) + || (has(self.initProvider) && has(self.initProvider.tier))' + status: + description: HadoopClusterStatus defines the observed state of HadoopCluster. + properties: + atProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + hadoop: + description: The version of Hadoop which should be used for + this HDInsight Hadoop Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + httpsEndpoint: + description: The HTTPS Connectivity Endpoint for this HDInsight + Hadoop Cluster. + type: string + id: + description: The ID of the HDInsight Hadoop Cluster. + type: string + location: + description: Specifies the Azure Region which this HDInsight Hadoop + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight Hadoop Cluster should exist. Changing this forces + a new resource to be created. + type: string + roles: + description: A roles block as defined below. + properties: + edgeNode: + description: A edge_node block as defined below. + properties: + httpsEndpoints: + description: The HTTPS Connectivity Endpoint for this + HDInsight Hadoop Cluster. One or more https_endpoints + blocks as defined below. + items: + properties: + accessModes: + description: A list of access modes for the application. + items: + type: string + type: array + destinationPort: + description: The destination port to connect to. + type: number + disableGatewayAuth: + description: The value indicates whether the gateway + authentication is enabled or not. + type: boolean + privateIpAddress: + description: The private ip address of the endpoint. + type: string + subDomainSuffix: + description: The application's subdomain suffix. + type: string + type: object + type: array + installScriptAction: + description: A install_script_action block as defined + below. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + uninstallScriptActions: + description: A uninstall_script_actions block as defined + below. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + description: A capacity block as defined below. + properties: + maxInstanceCount: + description: The maximum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + minInstanceCount: + description: The minimum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the uninstall script action. + type: string + parameters: + description: The parameters for the script. + type: string + uri: + description: The URI pointing to the script to run + during the installation of the edge node. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + sshEndpoint: + description: The SSH Connectivity Endpoint for this HDInsight + Hadoop Cluster. + type: string + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Hadoop Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Hadoop Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/hdinsight.azure.upbound.io_hbaseclusters.yaml b/package/crds/hdinsight.azure.upbound.io_hbaseclusters.yaml index 7ce2cec0a..2b1c56233 100644 --- a/package/crds/hdinsight.azure.upbound.io_hbaseclusters.yaml +++ b/package/crds/hdinsight.azure.upbound.io_hbaseclusters.yaml @@ -2966,3 +2966,2774 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HBaseCluster is the Schema for the HBaseClusters API. Manages + a HDInsight HBase Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HBaseClusterSpec defines the desired state of HBaseCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + hbase: + description: The version of HBase which should be used for + this HDInsight HBase Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + primaryKeySecretRef: + description: The workspace key of the log analytics extension. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + gateway: + description: A gateway block as defined below. + properties: + passwordSecretRef: + description: The password used for the Ambari Portal. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + required: + - passwordSecretRef + type: object + location: + description: Specifies the Azure Region which this HDInsight HBase + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + primaryKeySecretRef: + description: The Operations Management Suite (OMS) workspace + key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight HBase Cluster should exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUserPasswordSecretRef: + description: The user password of the Azure Active Directory + Domain. Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + required: + - domainUserPasswordSecretRef + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageAccountKeySecretRef: + description: The Access Key which should be used to connect + to the Storage Account. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + required: + - storageAccountKeySecretRef + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + HBase Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight HBase Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + hbase: + description: The version of HBase which should be used for + this HDInsight HBase Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the Azure Region which this HDInsight HBase + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + HBase Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight HBase Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterVersion) + || (has(self.initProvider) && has(self.initProvider.clusterVersion))' + - message: spec.forProvider.componentVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.componentVersion) + || (has(self.initProvider) && has(self.initProvider.componentVersion))' + - message: spec.forProvider.gateway is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gateway) + || (has(self.initProvider) && has(self.initProvider.gateway))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.roles is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.roles) + || (has(self.initProvider) && has(self.initProvider.roles))' + - message: spec.forProvider.tier is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tier) + || (has(self.initProvider) && has(self.initProvider.tier))' + status: + description: HBaseClusterStatus defines the observed state of HBaseCluster. + properties: + atProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + hbase: + description: The version of HBase which should be used for + this HDInsight HBase Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + httpsEndpoint: + description: The HTTPS Connectivity Endpoint for this HDInsight + HBase Cluster. + type: string + id: + description: The ID of the HDInsight HBase Cluster. + type: string + location: + description: Specifies the Azure Region which this HDInsight HBase + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight HBase Cluster should exist. Changing this forces + a new resource to be created. + type: string + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + sshEndpoint: + description: The SSH Connectivity Endpoint for this HDInsight + HBase Cluster. + type: string + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + HBase Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight HBase Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/hdinsight.azure.upbound.io_interactivequeryclusters.yaml b/package/crds/hdinsight.azure.upbound.io_interactivequeryclusters.yaml index 1aac2a493..b7982bdb5 100644 --- a/package/crds/hdinsight.azure.upbound.io_interactivequeryclusters.yaml +++ b/package/crds/hdinsight.azure.upbound.io_interactivequeryclusters.yaml @@ -3007,3 +3007,2809 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: InteractiveQueryCluster is the Schema for the InteractiveQueryClusters + API. Manages a HDInsight Interactive Query Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InteractiveQueryClusterSpec defines the desired state of + InteractiveQueryCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + interactiveHive: + description: The version of Interactive Query which should + be used for this HDInsight Interactive Query Cluster. Changing + this forces a new resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: A disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + Cluster. Changing this forces a new resource to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + primaryKeySecretRef: + description: The workspace key of the log analytics extension. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + gateway: + description: A gateway block as defined below. + properties: + passwordSecretRef: + description: The password used for the Ambari Portal. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + required: + - passwordSecretRef + type: object + location: + description: Specifies the Azure Region which this HDInsight Interactive + Query Cluster should exist. Changing this forces a new resource + to be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + primaryKeySecretRef: + description: The Operations Management Suite (OMS) workspace + key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight Interactive Query Cluster should exist. Changing + this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + properties: + maxInstanceCount: + type: number + minInstanceCount: + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUserPasswordSecretRef: + description: The user password of the Azure Active Directory + Domain. Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + required: + - domainUserPasswordSecretRef + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageAccountKeySecretRef: + description: The Access Key which should be used to connect + to the Storage Account. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + required: + - storageAccountKeySecretRef + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Interactive Query Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Interactive Query Cluster. Possible values are Standard + or Premium. Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + interactiveHive: + description: The version of Interactive Query which should + be used for this HDInsight Interactive Query Cluster. Changing + this forces a new resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: A disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + Cluster. Changing this forces a new resource to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the Azure Region which this HDInsight Interactive + Query Cluster should exist. Changing this forces a new resource + to be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + properties: + maxInstanceCount: + type: number + minInstanceCount: + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Interactive Query Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Interactive Query Cluster. Possible values are Standard + or Premium. Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterVersion) + || (has(self.initProvider) && has(self.initProvider.clusterVersion))' + - message: spec.forProvider.componentVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.componentVersion) + || (has(self.initProvider) && has(self.initProvider.componentVersion))' + - message: spec.forProvider.gateway is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gateway) + || (has(self.initProvider) && has(self.initProvider.gateway))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.roles is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.roles) + || (has(self.initProvider) && has(self.initProvider.roles))' + - message: spec.forProvider.tier is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tier) + || (has(self.initProvider) && has(self.initProvider.tier))' + status: + description: InteractiveQueryClusterStatus defines the observed state + of InteractiveQueryCluster. + properties: + atProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + interactiveHive: + description: The version of Interactive Query which should + be used for this HDInsight Interactive Query Cluster. Changing + this forces a new resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: A disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + Cluster. Changing this forces a new resource to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + httpsEndpoint: + description: The HTTPS Connectivity Endpoint for this HDInsight + Interactive Query Cluster. + type: string + id: + description: The ID of the HDInsight Interactive Query Cluster. + type: string + location: + description: Specifies the Azure Region which this HDInsight Interactive + Query Cluster should exist. Changing this forces a new resource + to be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight Interactive Query Cluster should exist. Changing + this forces a new resource to be created. + type: string + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + properties: + maxInstanceCount: + type: number + minInstanceCount: + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + sshEndpoint: + description: The SSH Connectivity Endpoint for this HDInsight + Interactive Query Cluster. + type: string + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Interactive Query Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Interactive Query Cluster. Possible values are Standard + or Premium. Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/hdinsight.azure.upbound.io_kafkaclusters.yaml b/package/crds/hdinsight.azure.upbound.io_kafkaclusters.yaml index 583454604..4cfab61a3 100644 --- a/package/crds/hdinsight.azure.upbound.io_kafkaclusters.yaml +++ b/package/crds/hdinsight.azure.upbound.io_kafkaclusters.yaml @@ -3330,3 +3330,3129 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: KafkaCluster is the Schema for the KafkaClusters API. Manages + a HDInsight Kafka Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KafkaClusterSpec defines the desired state of KafkaCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + kafka: + description: The version of Kafka which should be used for + this HDInsight Kafka Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + HDInsight Kafka Cluster. Changing this forces a new resource + to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + primaryKeySecretRef: + description: The workspace key of the log analytics extension. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + gateway: + description: A gateway block as defined below. + properties: + passwordSecretRef: + description: The password used for the Ambari Portal. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + required: + - passwordSecretRef + type: object + location: + description: Specifies the Azure Region which this HDInsight Kafka + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + primaryKeySecretRef: + description: The Operations Management Suite (OMS) workspace + key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight Kafka Cluster should exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + restProxy: + description: A rest_proxy block as defined below. + properties: + securityGroupId: + description: The Azure Active Directory Security Group ID. + Changing this forces a new resource to be created. + type: string + securityGroupName: + description: The Azure Active Directory Security Group name. + Changing this forces a new resource to be created. + type: string + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + kafkaManagementNode: + description: A kafka_management_node block as defined below. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + numberOfDisksPerNode: + description: The number of Data Disks which should be + assigned to each Worker Node, which can be between 1 + and 8. Changing this forces a new resource to be created. + type: number + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUserPasswordSecretRef: + description: The user password of the Azure Active Directory + Domain. Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + required: + - domainUserPasswordSecretRef + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageAccountKeySecretRef: + description: The Access Key which should be used to connect + to the Storage Account. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + required: + - storageAccountKeySecretRef + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Kafka Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Kafka Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + kafka: + description: The version of Kafka which should be used for + this HDInsight Kafka Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + HDInsight Kafka Cluster. Changing this forces a new resource + to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the Azure Region which this HDInsight Kafka + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + restProxy: + description: A rest_proxy block as defined below. + properties: + securityGroupId: + description: The Azure Active Directory Security Group ID. + Changing this forces a new resource to be created. + type: string + securityGroupName: + description: The Azure Active Directory Security Group name. + Changing this forces a new resource to be created. + type: string + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + kafkaManagementNode: + description: A kafka_management_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + numberOfDisksPerNode: + description: The number of Data Disks which should be + assigned to each Worker Node, which can be between 1 + and 8. Changing this forces a new resource to be created. + type: number + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Kafka Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Kafka Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterVersion) + || (has(self.initProvider) && has(self.initProvider.clusterVersion))' + - message: spec.forProvider.componentVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.componentVersion) + || (has(self.initProvider) && has(self.initProvider.componentVersion))' + - message: spec.forProvider.gateway is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gateway) + || (has(self.initProvider) && has(self.initProvider.gateway))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.roles is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.roles) + || (has(self.initProvider) && has(self.initProvider.roles))' + - message: spec.forProvider.tier is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tier) + || (has(self.initProvider) && has(self.initProvider.tier))' + status: + description: KafkaClusterStatus defines the observed state of KafkaCluster. + properties: + atProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + kafka: + description: The version of Kafka which should be used for + this HDInsight Kafka Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + HDInsight Kafka Cluster. Changing this forces a new resource + to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + httpsEndpoint: + description: The HTTPS Connectivity Endpoint for this HDInsight + Kafka Cluster. + type: string + id: + description: The ID of the HDInsight Kafka Cluster. + type: string + kafkaRestProxyEndpoint: + description: The Kafka Rest Proxy Endpoint for this HDInsight + Kafka Cluster. + type: string + location: + description: Specifies the Azure Region which this HDInsight Kafka + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight Kafka Cluster should exist. Changing this forces + a new resource to be created. + type: string + restProxy: + description: A rest_proxy block as defined below. + properties: + securityGroupId: + description: The Azure Active Directory Security Group ID. + Changing this forces a new resource to be created. + type: string + securityGroupName: + description: The Azure Active Directory Security Group name. + Changing this forces a new resource to be created. + type: string + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + kafkaManagementNode: + description: A kafka_management_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + numberOfDisksPerNode: + description: The number of Data Disks which should be + assigned to each Worker Node, which can be between 1 + and 8. Changing this forces a new resource to be created. + type: number + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined below. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + sshEndpoint: + description: The SSH Connectivity Endpoint for this HDInsight + Kafka Cluster. + type: string + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Kafka Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Kafka Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/hdinsight.azure.upbound.io_sparkclusters.yaml b/package/crds/hdinsight.azure.upbound.io_sparkclusters.yaml index e11df70a7..308e1ad03 100644 --- a/package/crds/hdinsight.azure.upbound.io_sparkclusters.yaml +++ b/package/crds/hdinsight.azure.upbound.io_sparkclusters.yaml @@ -3026,3 +3026,2822 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SparkCluster is the Schema for the SparkClusters API. Manages + a HDInsight Spark Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SparkClusterSpec defines the desired state of SparkCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + spark: + description: The version of Spark which should be used for + this HDInsight Spark Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + Cluster. Changing this forces a new resource to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + primaryKeySecretRef: + description: The workspace key of the log analytics extension. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + gateway: + description: A gateway block as defined below. + properties: + passwordSecretRef: + description: The password used for the Ambari Portal. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + required: + - passwordSecretRef + type: object + location: + description: Specifies the Azure Region which this HDInsight Spark + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + required: + - passwordSecretRef + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + primaryKeySecretRef: + description: The Operations Management Suite (OMS) workspace + key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - primaryKeySecretRef + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight Spark Cluster should exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + description: A capacity block as defined below. + properties: + maxInstanceCount: + description: The maximum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + minInstanceCount: + description: The minimum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + passwordSecretRef: + description: The Password associated with the local administrator + for the Zookeeper Nodes. Changing this forces a new + resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUserPasswordSecretRef: + description: The user password of the Azure Active Directory + Domain. Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + required: + - domainUserPasswordSecretRef + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageAccountKeySecretRef: + description: The Access Key which should be used to connect + to the Storage Account. Changing this forces a new resource + to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + required: + - storageAccountKeySecretRef + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Spark Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Spark Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + spark: + description: The version of Spark which should be used for + this HDInsight Spark Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + Cluster. Changing this forces a new resource to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the Azure Region which this HDInsight Spark + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + description: A capacity block as defined below. + properties: + maxInstanceCount: + description: The maximum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + minInstanceCount: + description: The minimum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageContainerIdRef: + description: Reference to a Container in storage to populate + storageContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerIdSelector: + description: Selector for a Container in storage to populate + storageContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Spark Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Spark Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clusterVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clusterVersion) + || (has(self.initProvider) && has(self.initProvider.clusterVersion))' + - message: spec.forProvider.componentVersion is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.componentVersion) + || (has(self.initProvider) && has(self.initProvider.componentVersion))' + - message: spec.forProvider.gateway is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gateway) + || (has(self.initProvider) && has(self.initProvider.gateway))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.roles is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.roles) + || (has(self.initProvider) && has(self.initProvider.roles))' + - message: spec.forProvider.tier is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tier) + || (has(self.initProvider) && has(self.initProvider.tier))' + status: + description: SparkClusterStatus defines the observed state of SparkCluster. + properties: + atProvider: + properties: + clusterVersion: + description: Specifies the Version of HDInsights which should + be used for this Cluster. Changing this forces a new resource + to be created. + type: string + componentVersion: + description: A component_version block as defined below. + properties: + spark: + description: The version of Spark which should be used for + this HDInsight Spark Cluster. Changing this forces a new + resource to be created. + type: string + type: object + computeIsolation: + description: A compute_isolation block as defined below. + properties: + computeIsolationEnabled: + description: This field indicates whether enable compute isolation + or not. Possible values are true or false. + type: boolean + hostSku: + description: The name of the host SKU. + type: string + type: object + diskEncryption: + description: One or more disk_encryption block as defined below. + items: + properties: + encryptionAlgorithm: + description: This is an algorithm identifier for encryption. + Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256. + type: string + encryptionAtHostEnabled: + description: This is indicator to show whether resource + disk encryption is enabled. + type: boolean + keyVaultKeyId: + description: The ID of the key vault key. + type: string + keyVaultManagedIdentityId: + description: This is the resource ID of Managed Identity + used to access the key vault. + type: string + type: object + type: array + encryptionInTransitEnabled: + description: Whether encryption in transit is enabled for this + Cluster. Changing this forces a new resource to be created. + type: boolean + extension: + description: An extension block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The workspace ID of the log analytics extension. + type: string + type: object + gateway: + description: A gateway block as defined below. + properties: + username: + description: The username used for the Ambari Portal. Changing + this forces a new resource to be created. + type: string + type: object + httpsEndpoint: + description: The HTTPS Connectivity Endpoint for this HDInsight + Spark Cluster. + type: string + id: + description: The ID of the HDInsight Spark Cluster. + type: string + location: + description: Specifies the Azure Region which this HDInsight Spark + Cluster should exist. Changing this forces a new resource to + be created. + type: string + metastores: + description: A metastores block as defined below. + properties: + ambari: + description: An ambari block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + hive: + description: A hive block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + oozie: + description: An oozie block as defined below. + properties: + databaseName: + description: The external Oozie metastore's existing SQL + database. Changing this forces a new resource to be + created. + type: string + server: + description: The fully-qualified domain name (FQDN) of + the SQL server to use for the external Oozie metastore. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + type: object + type: object + monitor: + description: A monitor block as defined below. + properties: + logAnalyticsWorkspaceId: + description: The Operations Management Suite (OMS) workspace + ID. + type: string + type: object + network: + description: A network block as defined below. + properties: + connectionDirection: + description: The direction of the resource provider connection. + Possible values include Inbound or Outbound. Defaults to + Inbound. Changing this forces a new resource to be created. + type: string + privateLinkEnabled: + description: Is the private link enabled? Possible values + include true or false. Defaults to false. Changing this + forces a new resource to be created. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the Resource Group in which + this HDInsight Spark Cluster should exist. Changing this forces + a new resource to be created. + type: string + roles: + description: A roles block as defined below. + properties: + headNode: + description: A head_node block as defined above. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + workerNode: + description: A worker_node block as defined below. + properties: + autoscale: + description: A autoscale block as defined below. + properties: + capacity: + description: A capacity block as defined below. + properties: + maxInstanceCount: + description: The maximum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + minInstanceCount: + description: The minimum number of worker nodes + to autoscale to based on the cluster's activity. + type: number + type: object + recurrence: + description: A recurrence block as defined below. + properties: + schedule: + description: A list of schedule blocks as defined + below. + items: + properties: + days: + description: The days of the week to perform + autoscale. Possible values are Monday, + Tuesday, Wednesday, Thursday, Friday, + Saturday and Sunday. + items: + type: string + type: array + targetInstanceCount: + description: The number of instances which + should be run for the Worker Nodes. + type: number + time: + description: The time of day to perform + the autoscale in 24hour format. + type: string + type: object + type: array + timezone: + description: The time zone for the autoscale schedule + times. + type: string + type: object + type: object + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + targetInstanceCount: + description: The number of instances which should be run + for the Worker Nodes. + type: number + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + zookeeperNode: + description: A zookeeper_node block as defined below. + properties: + scriptActions: + description: The script action which will run on the cluster. + One or more script_actions blocks as defined above. + items: + properties: + name: + description: The name of the script action. + type: string + parameters: + description: The parameters for the script provided. + type: string + uri: + description: The URI to the script. + type: string + type: object + type: array + sshKeys: + description: A list of SSH Keys which should be used for + the local administrator on the Zookeeper Nodes. Changing + this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + subnetId: + description: The ID of the Subnet within the Virtual Network + where the Zookeeper Nodes should be provisioned within. + Changing this forces a new resource to be created. + type: string + username: + description: The Username of the local administrator for + the Zookeeper Nodes. Changing this forces a new resource + to be created. + type: string + virtualNetworkId: + description: The ID of the Virtual Network where the Zookeeper + Nodes should be provisioned within. Changing this forces + a new resource to be created. + type: string + vmSize: + description: The Size of the Virtual Machine which should + be used as the Zookeeper Nodes. Possible values are + ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, + A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, + Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, + Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, + Standard_D3, Standard_D4, Standard_D11, Standard_D12, + Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, + Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, + Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, + Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, + Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, + Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, + Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, + Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, + Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, + Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, + Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, + Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, + Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, + Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, + Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, + Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, + Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, + Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, + Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, + Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. + Changing this forces a new resource to be created. + type: string + type: object + type: object + securityProfile: + description: A security_profile block as defined below. Changing + this forces a new resource to be created. + properties: + aaddsResourceId: + description: The resource ID of the Azure Active Directory + Domain Service. Changing this forces a new resource to be + created. + type: string + clusterUsersGroupDns: + description: A list of the distinguished names for the cluster + user groups. Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + domainName: + description: The name of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + domainUsername: + description: The username of the Azure Active Directory Domain. + Changing this forces a new resource to be created. + type: string + ldapsUrls: + description: A list of the LDAPS URLs to communicate with + the Azure Active Directory. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + msiResourceId: + description: The User Assigned Identity for the HDInsight + Cluster. Changing this forces a new resource to be created. + type: string + type: object + sshEndpoint: + description: The SSH Connectivity Endpoint for this HDInsight + Spark Cluster. + type: string + storageAccount: + description: One or more storage_account block as defined below. + items: + properties: + isDefault: + description: Is this the Default Storage Account for the + HDInsight Hadoop Cluster? Changing this forces a new resource + to be created. + type: boolean + storageContainerId: + description: The ID of the Storage Container. Changing this + forces a new resource to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + type: array + storageAccountGen2: + description: A storage_account_gen2 block as defined below. + properties: + filesystemId: + description: The ID of the Gen2 Filesystem. Changing this + forces a new resource to be created. + type: string + isDefault: + description: Is this the Default Storage Account for the HDInsight + Hadoop Cluster? Changing this forces a new resource to be + created. + type: boolean + managedIdentityResourceId: + description: The ID of Managed Identity to use for accessing + the Gen2 filesystem. Changing this forces a new resource + to be created. + type: string + storageResourceId: + description: The ID of the Storage Account. Changing this + forces a new resource to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A map of Tags which should be assigned to this HDInsight + Spark Cluster. + type: object + x-kubernetes-map-type: granular + tier: + description: Specifies the Tier which should be used for this + HDInsight Spark Cluster. Possible values are Standard or Premium. + Changing this forces a new resource to be created. + type: string + tlsMinVersion: + description: The minimal supported TLS version. Possible values + are 1.0, 1.1 or 1.2. Changing this forces a new resource to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/healthcareapis.azure.upbound.io_healthcaredicomservices.yaml b/package/crds/healthcareapis.azure.upbound.io_healthcaredicomservices.yaml index ef4809df6..96b86a622 100644 --- a/package/crds/healthcareapis.azure.upbound.io_healthcaredicomservices.yaml +++ b/package/crds/healthcareapis.azure.upbound.io_healthcaredicomservices.yaml @@ -565,3 +565,544 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HealthcareDICOMService is the Schema for the HealthcareDICOMServices + API. Manages a Healthcare DICOM (Digital Imaging and Communications in Medicine) + Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HealthcareDICOMServiceSpec defines the desired state of HealthcareDICOMService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Identity IDs which should + be assigned to this Healthcare DICOM service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of identity used for the Healthcare + DICOM service. Possible values are UserAssigned, SystemAssigned + and SystemAssigned, UserAssigned. If UserAssigned is set, + an identity_ids must be set as well. + type: string + type: object + location: + description: Specifies the Azure Region where the Healthcare DICOM + Service should be created. Changing this forces a new Healthcare + DICOM Service to be created. + type: string + publicNetworkAccessEnabled: + description: Whether to enabled public networks when data plane + traffic coming from public networks while private endpoint is + enabled. Defaults to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare DICOM + Service. + type: object + x-kubernetes-map-type: granular + workspaceId: + description: Specifies the id of the Healthcare Workspace where + the Healthcare DICOM Service should exist. Changing this forces + a new Healthcare DICOM Service to be created. + type: string + workspaceIdRef: + description: Reference to a HealthcareWorkspace in healthcareapis + to populate workspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceIdSelector: + description: Selector for a HealthcareWorkspace in healthcareapis + to populate workspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Identity IDs which should + be assigned to this Healthcare DICOM service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of identity used for the Healthcare + DICOM service. Possible values are UserAssigned, SystemAssigned + and SystemAssigned, UserAssigned. If UserAssigned is set, + an identity_ids must be set as well. + type: string + type: object + location: + description: Specifies the Azure Region where the Healthcare DICOM + Service should be created. Changing this forces a new Healthcare + DICOM Service to be created. + type: string + publicNetworkAccessEnabled: + description: Whether to enabled public networks when data plane + traffic coming from public networks while private endpoint is + enabled. Defaults to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare DICOM + Service. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: HealthcareDICOMServiceStatus defines the observed state of + HealthcareDICOMService. + properties: + atProvider: + properties: + authentication: + description: The authentication block as defined below. + items: + properties: + audience: + description: The intended audience to receive authentication + tokens for the service. The default value is https://dicom.azurehealthcareapis.azure.com + items: + type: string + type: array + authority: + description: |- + The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + type: string + type: object + type: array + id: + description: The ID of the Healthcare DICOM Service. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Identity IDs which should + be assigned to this Healthcare DICOM service. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The ID of the Healthcare DICOM Service. + type: string + tenantId: + description: The ID of the Healthcare DICOM Service. + type: string + type: + description: The type of identity used for the Healthcare + DICOM service. Possible values are UserAssigned, SystemAssigned + and SystemAssigned, UserAssigned. If UserAssigned is set, + an identity_ids must be set as well. + type: string + type: object + location: + description: Specifies the Azure Region where the Healthcare DICOM + Service should be created. Changing this forces a new Healthcare + DICOM Service to be created. + type: string + privateEndpoint: + items: + properties: + id: + description: The ID of the Healthcare DICOM Service. + type: string + name: + description: Specifies the name of the Healthcare DICOM + Service. Changing this forces a new Healthcare DICOM Service + to be created. + type: string + type: object + type: array + publicNetworkAccessEnabled: + description: Whether to enabled public networks when data plane + traffic coming from public networks while private endpoint is + enabled. Defaults to true. + type: boolean + serviceUrl: + description: The url of the Healthcare DICOM Services. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare DICOM + Service. + type: object + x-kubernetes-map-type: granular + workspaceId: + description: Specifies the id of the Healthcare Workspace where + the Healthcare DICOM Service should exist. Changing this forces + a new Healthcare DICOM Service to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/healthcareapis.azure.upbound.io_healthcarefhirservices.yaml b/package/crds/healthcareapis.azure.upbound.io_healthcarefhirservices.yaml index adae47ba6..df6bafacd 100644 --- a/package/crds/healthcareapis.azure.upbound.io_healthcarefhirservices.yaml +++ b/package/crds/healthcareapis.azure.upbound.io_healthcarefhirservices.yaml @@ -964,3 +964,931 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HealthcareFHIRService is the Schema for the HealthcareFHIRServices + API. Manages a Healthcare FHIR (Fast Healthcare Interoperability Resources) + Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HealthcareFHIRServiceSpec defines the desired state of HealthcareFHIRService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessPolicyObjectIds: + description: A list of the access policies of the service instance. + items: + type: string + type: array + x-kubernetes-list-type: set + authentication: + description: An authentication block as defined below. + properties: + audience: + description: The intended audience to receive authentication + tokens for the service. + type: string + authority: + description: |- + The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + type: string + smartProxyEnabled: + description: Whether smart proxy is enabled. + type: boolean + type: object + configurationExportStorageAccountName: + description: Specifies the name of the storage account which the + operation configuration information is exported to. + type: string + containerRegistryLoginServerUrl: + description: A list of azure container registry settings used + for convert data operation of the service instance. + items: + type: string + type: array + x-kubernetes-list-type: set + cors: + description: A cors block as defined below. + properties: + allowedHeaders: + description: A set of headers to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: The methods to be allowed via CORS. Possible + values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH + and PUT. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOrigins: + description: A set of origins to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + credentialsAllowed: + description: If credentials are allowed via CORS. + type: boolean + maxAgeInSeconds: + description: The max age to be allowed via CORS. + type: number + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of one or more Resource IDs for User Assigned + Managed identities to assign. Required when type is set + to UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of managed identity to assign. Possible + values are UserAssigned and SystemAssigned. + type: string + type: object + kind: + description: 'Specifies the kind of the Healthcare FHIR Service. + Possible values are: fhir-Stu3 and fhir-R4. Defaults to fhir-R4. + Changing this forces a new Healthcare FHIR Service to be created.' + type: string + location: + description: Specifies the Azure Region where the Healthcare FHIR + Service should be created. Changing this forces a new Healthcare + FHIR Service to be created. + type: string + ociArtifact: + description: A list of oci_artifact objects as defined below to + describe OCI artifacts for export. + items: + properties: + digest: + description: A digest of an image within Azure container + registry used for export operations of the service instance + to narrow the artifacts down. + type: string + imageName: + description: An image within Azure container registry used + for export operations of the service instance. + type: string + loginServer: + description: An Azure container registry used for export + operations of the service instance. + type: string + type: object + type: array + resourceGroupName: + description: Specifies the name of the Resource Group in which + to create the Healthcare FHIR Service. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare FHIR + Service. + type: object + x-kubernetes-map-type: granular + workspaceId: + description: Specifies the id of the Healthcare Workspace where + the Healthcare FHIR Service should exist. Changing this forces + a new Healthcare FHIR Service to be created. + type: string + workspaceIdRef: + description: Reference to a HealthcareWorkspace in healthcareapis + to populate workspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceIdSelector: + description: Selector for a HealthcareWorkspace in healthcareapis + to populate workspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessPolicyObjectIds: + description: A list of the access policies of the service instance. + items: + type: string + type: array + x-kubernetes-list-type: set + authentication: + description: An authentication block as defined below. + properties: + audience: + description: The intended audience to receive authentication + tokens for the service. + type: string + authority: + description: |- + The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + type: string + smartProxyEnabled: + description: Whether smart proxy is enabled. + type: boolean + type: object + configurationExportStorageAccountName: + description: Specifies the name of the storage account which the + operation configuration information is exported to. + type: string + containerRegistryLoginServerUrl: + description: A list of azure container registry settings used + for convert data operation of the service instance. + items: + type: string + type: array + x-kubernetes-list-type: set + cors: + description: A cors block as defined below. + properties: + allowedHeaders: + description: A set of headers to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: The methods to be allowed via CORS. Possible + values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH + and PUT. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOrigins: + description: A set of origins to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + credentialsAllowed: + description: If credentials are allowed via CORS. + type: boolean + maxAgeInSeconds: + description: The max age to be allowed via CORS. + type: number + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of one or more Resource IDs for User Assigned + Managed identities to assign. Required when type is set + to UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of managed identity to assign. Possible + values are UserAssigned and SystemAssigned. + type: string + type: object + kind: + description: 'Specifies the kind of the Healthcare FHIR Service. + Possible values are: fhir-Stu3 and fhir-R4. Defaults to fhir-R4. + Changing this forces a new Healthcare FHIR Service to be created.' + type: string + location: + description: Specifies the Azure Region where the Healthcare FHIR + Service should be created. Changing this forces a new Healthcare + FHIR Service to be created. + type: string + ociArtifact: + description: A list of oci_artifact objects as defined below to + describe OCI artifacts for export. + items: + properties: + digest: + description: A digest of an image within Azure container + registry used for export operations of the service instance + to narrow the artifacts down. + type: string + imageName: + description: An image within Azure container registry used + for export operations of the service instance. + type: string + loginServer: + description: An Azure container registry used for export + operations of the service instance. + type: string + type: object + type: array + resourceGroupName: + description: Specifies the name of the Resource Group in which + to create the Healthcare FHIR Service. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare FHIR + Service. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.authentication is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authentication) + || (has(self.initProvider) && has(self.initProvider.authentication))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: HealthcareFHIRServiceStatus defines the observed state of + HealthcareFHIRService. + properties: + atProvider: + properties: + accessPolicyObjectIds: + description: A list of the access policies of the service instance. + items: + type: string + type: array + x-kubernetes-list-type: set + authentication: + description: An authentication block as defined below. + properties: + audience: + description: The intended audience to receive authentication + tokens for the service. + type: string + authority: + description: |- + The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + type: string + smartProxyEnabled: + description: Whether smart proxy is enabled. + type: boolean + type: object + configurationExportStorageAccountName: + description: Specifies the name of the storage account which the + operation configuration information is exported to. + type: string + containerRegistryLoginServerUrl: + description: A list of azure container registry settings used + for convert data operation of the service instance. + items: + type: string + type: array + x-kubernetes-list-type: set + cors: + description: A cors block as defined below. + properties: + allowedHeaders: + description: A set of headers to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: The methods to be allowed via CORS. Possible + values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH + and PUT. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedOrigins: + description: A set of origins to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + credentialsAllowed: + description: If credentials are allowed via CORS. + type: boolean + maxAgeInSeconds: + description: The max age to be allowed via CORS. + type: number + type: object + id: + description: The ID of the Healthcare FHIR Service. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of one or more Resource IDs for User Assigned + Managed identities to assign. Required when type is set + to UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The ID of the Healthcare FHIR Service. + type: string + tenantId: + description: The ID of the Healthcare FHIR Service. + type: string + type: + description: The type of managed identity to assign. Possible + values are UserAssigned and SystemAssigned. + type: string + type: object + kind: + description: 'Specifies the kind of the Healthcare FHIR Service. + Possible values are: fhir-Stu3 and fhir-R4. Defaults to fhir-R4. + Changing this forces a new Healthcare FHIR Service to be created.' + type: string + location: + description: Specifies the Azure Region where the Healthcare FHIR + Service should be created. Changing this forces a new Healthcare + FHIR Service to be created. + type: string + ociArtifact: + description: A list of oci_artifact objects as defined below to + describe OCI artifacts for export. + items: + properties: + digest: + description: A digest of an image within Azure container + registry used for export operations of the service instance + to narrow the artifacts down. + type: string + imageName: + description: An image within Azure container registry used + for export operations of the service instance. + type: string + loginServer: + description: An Azure container registry used for export + operations of the service instance. + type: string + type: object + type: array + publicNetworkAccessEnabled: + description: Whether public networks access is enabled. + type: boolean + resourceGroupName: + description: Specifies the name of the Resource Group in which + to create the Healthcare FHIR Service. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare FHIR + Service. + type: object + x-kubernetes-map-type: granular + workspaceId: + description: Specifies the id of the Healthcare Workspace where + the Healthcare FHIR Service should exist. Changing this forces + a new Healthcare FHIR Service to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/healthcareapis.azure.upbound.io_healthcaremedtechservices.yaml b/package/crds/healthcareapis.azure.upbound.io_healthcaremedtechservices.yaml index 2891fc8d3..1cfec8d8f 100644 --- a/package/crds/healthcareapis.azure.upbound.io_healthcaremedtechservices.yaml +++ b/package/crds/healthcareapis.azure.upbound.io_healthcaremedtechservices.yaml @@ -1014,3 +1014,993 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HealthcareMedtechService is the Schema for the HealthcareMedtechServices + API. Manages a Healthcare MedTech (Internet of Medical Things) devices Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HealthcareMedtechServiceSpec defines the desired state of + HealthcareMedtechService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + deviceMappingJson: + description: Specifies the Device Mappings of the Med Tech Service. + type: string + eventhubConsumerGroupName: + description: Specifies the Consumer Group of the Event Hub to + connect to. + type: string + eventhubConsumerGroupNameRef: + description: Reference to a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubConsumerGroupNameSelector: + description: Selector for a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventhubName: + description: Specifies the name of the Event Hub to connect to. + type: string + eventhubNameRef: + description: Reference to a EventHub in eventhub to populate eventhubName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNameSelector: + description: Selector for a EventHub in eventhub to populate eventhubName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventhubNamespaceName: + description: Specifies the namespace name of the Event Hub to + connect to. + type: string + eventhubNamespaceNameRef: + description: Reference to a EventHubNamespace in eventhub to populate + eventhubNamespaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNamespaceNameSelector: + description: Selector for a EventHubNamespace in eventhub to populate + eventhubNamespaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Healthcare Med Tech Service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Healthcare Med Tech Service. + Possible values are SystemAssigned. + type: string + type: object + location: + description: Specifies the Azure Region where the Healthcare Med + Tech Service should be created. Changing this forces a new Healthcare + Med Tech Service to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare Med + Tech Service. + type: object + x-kubernetes-map-type: granular + workspaceId: + description: Specifies the id of the Healthcare Workspace where + the Healthcare Med Tech Service should exist. Changing this + forces a new Healthcare Med Tech Service to be created. + type: string + workspaceIdRef: + description: Reference to a HealthcareWorkspace in healthcareapis + to populate workspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceIdSelector: + description: Selector for a HealthcareWorkspace in healthcareapis + to populate workspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + deviceMappingJson: + description: Specifies the Device Mappings of the Med Tech Service. + type: string + eventhubConsumerGroupName: + description: Specifies the Consumer Group of the Event Hub to + connect to. + type: string + eventhubConsumerGroupNameRef: + description: Reference to a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubConsumerGroupNameSelector: + description: Selector for a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventhubName: + description: Specifies the name of the Event Hub to connect to. + type: string + eventhubNameRef: + description: Reference to a EventHub in eventhub to populate eventhubName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNameSelector: + description: Selector for a EventHub in eventhub to populate eventhubName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventhubNamespaceName: + description: Specifies the namespace name of the Event Hub to + connect to. + type: string + eventhubNamespaceNameRef: + description: Reference to a EventHubNamespace in eventhub to populate + eventhubNamespaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNamespaceNameSelector: + description: Selector for a EventHubNamespace in eventhub to populate + eventhubNamespaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Healthcare Med Tech Service. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Healthcare Med Tech Service. + Possible values are SystemAssigned. + type: string + type: object + location: + description: Specifies the Azure Region where the Healthcare Med + Tech Service should be created. Changing this forces a new Healthcare + Med Tech Service to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare Med + Tech Service. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.deviceMappingJson is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.deviceMappingJson) + || (has(self.initProvider) && has(self.initProvider.deviceMappingJson))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: HealthcareMedtechServiceStatus defines the observed state + of HealthcareMedtechService. + properties: + atProvider: + properties: + deviceMappingJson: + description: Specifies the Device Mappings of the Med Tech Service. + type: string + eventhubConsumerGroupName: + description: Specifies the Consumer Group of the Event Hub to + connect to. + type: string + eventhubName: + description: Specifies the name of the Event Hub to connect to. + type: string + eventhubNamespaceName: + description: Specifies the namespace name of the Event Hub to + connect to. + type: string + id: + description: The ID of the Healthcare Med Tech Service. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Healthcare Med Tech Service. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this System + Assigned Managed Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this System Assigned + Managed Service Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Healthcare Med Tech Service. + Possible values are SystemAssigned. + type: string + type: object + location: + description: Specifies the Azure Region where the Healthcare Med + Tech Service should be created. Changing this forces a new Healthcare + Med Tech Service to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Healthcare Med + Tech Service. + type: object + x-kubernetes-map-type: granular + workspaceId: + description: Specifies the id of the Healthcare Workspace where + the Healthcare Med Tech Service should exist. Changing this + forces a new Healthcare Med Tech Service to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/healthcareapis.azure.upbound.io_healthcareservices.yaml b/package/crds/healthcareapis.azure.upbound.io_healthcareservices.yaml index 53aacc5f1..667589daf 100644 --- a/package/crds/healthcareapis.azure.upbound.io_healthcareservices.yaml +++ b/package/crds/healthcareapis.azure.upbound.io_healthcareservices.yaml @@ -672,3 +672,645 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HealthcareService is the Schema for the HealthcareServices API. + Manages a Healthcare Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HealthcareServiceSpec defines the desired state of HealthcareService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessPolicyObjectIds: + description: A set of Azure object IDs that are allowed to access + the Service. + items: + type: string + type: array + x-kubernetes-list-type: set + authenticationConfiguration: + description: An authentication_configuration block as defined + below. + properties: + audience: + description: The intended audience to receive authentication + tokens for the service. The default value is https://azurehealthcareapis.com + type: string + authority: + description: |- + The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + type: string + smartProxyEnabled: + description: (Boolean) Enables the 'SMART on FHIR' option + for mobile and web implementations. + type: boolean + type: object + corsConfiguration: + description: A cors_configuration block as defined below. + properties: + allowCredentials: + description: (Boolean) If credentials are allowed via CORS. + type: boolean + allowedHeaders: + description: A set of headers to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: The methods to be allowed via CORS. Possible + values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH + and PUT. + items: + type: string + type: array + allowedOrigins: + description: A set of origins to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAgeInSeconds: + description: The max age to be allowed via CORS. + type: number + type: object + cosmosdbKeyVaultKeyVersionlessId: + description: A versionless Key Vault Key ID for CMK encryption + of the backing database. Changing this forces a new resource + to be created. + type: string + cosmosdbThroughput: + description: The provisioned throughput for the backing database. + Range of 400-100000. Defaults to 1000. + type: number + kind: + description: 'The type of the service. Values at time of publication + are: fhir, fhir-Stu3 and fhir-R4. Default value is fhir.' + type: string + location: + description: Specifies the supported Azure Region where the Service + should be created. Changing this forces a new resource to be + created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is enabled or disabled + for this service instance. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group in which to create + the Service. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessPolicyObjectIds: + description: A set of Azure object IDs that are allowed to access + the Service. + items: + type: string + type: array + x-kubernetes-list-type: set + authenticationConfiguration: + description: An authentication_configuration block as defined + below. + properties: + audience: + description: The intended audience to receive authentication + tokens for the service. The default value is https://azurehealthcareapis.com + type: string + authority: + description: |- + The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + type: string + smartProxyEnabled: + description: (Boolean) Enables the 'SMART on FHIR' option + for mobile and web implementations. + type: boolean + type: object + corsConfiguration: + description: A cors_configuration block as defined below. + properties: + allowCredentials: + description: (Boolean) If credentials are allowed via CORS. + type: boolean + allowedHeaders: + description: A set of headers to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: The methods to be allowed via CORS. Possible + values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH + and PUT. + items: + type: string + type: array + allowedOrigins: + description: A set of origins to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAgeInSeconds: + description: The max age to be allowed via CORS. + type: number + type: object + cosmosdbKeyVaultKeyVersionlessId: + description: A versionless Key Vault Key ID for CMK encryption + of the backing database. Changing this forces a new resource + to be created. + type: string + cosmosdbThroughput: + description: The provisioned throughput for the backing database. + Range of 400-100000. Defaults to 1000. + type: number + kind: + description: 'The type of the service. Values at time of publication + are: fhir, fhir-Stu3 and fhir-R4. Default value is fhir.' + type: string + location: + description: Specifies the supported Azure Region where the Service + should be created. Changing this forces a new resource to be + created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is enabled or disabled + for this service instance. Defaults to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: HealthcareServiceStatus defines the observed state of HealthcareService. + properties: + atProvider: + properties: + accessPolicyObjectIds: + description: A set of Azure object IDs that are allowed to access + the Service. + items: + type: string + type: array + x-kubernetes-list-type: set + authenticationConfiguration: + description: An authentication_configuration block as defined + below. + properties: + audience: + description: The intended audience to receive authentication + tokens for the service. The default value is https://azurehealthcareapis.com + type: string + authority: + description: |- + The Azure Active Directory (tenant) that serves as the authentication authority to access the service. + Authority must be registered to Azure AD and in the following format: https://{Azure-AD-endpoint}/{tenant-id}. + type: string + smartProxyEnabled: + description: (Boolean) Enables the 'SMART on FHIR' option + for mobile and web implementations. + type: boolean + type: object + corsConfiguration: + description: A cors_configuration block as defined below. + properties: + allowCredentials: + description: (Boolean) If credentials are allowed via CORS. + type: boolean + allowedHeaders: + description: A set of headers to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + allowedMethods: + description: The methods to be allowed via CORS. Possible + values are DELETE, GET, HEAD, MERGE, POST, OPTIONS, PATCH + and PUT. + items: + type: string + type: array + allowedOrigins: + description: A set of origins to be allowed via CORS. + items: + type: string + type: array + x-kubernetes-list-type: set + maxAgeInSeconds: + description: The max age to be allowed via CORS. + type: number + type: object + cosmosdbKeyVaultKeyVersionlessId: + description: A versionless Key Vault Key ID for CMK encryption + of the backing database. Changing this forces a new resource + to be created. + type: string + cosmosdbThroughput: + description: The provisioned throughput for the backing database. + Range of 400-100000. Defaults to 1000. + type: number + id: + description: The ID of the Healthcare Service. + type: string + kind: + description: 'The type of the service. Values at time of publication + are: fhir, fhir-Stu3 and fhir-R4. Default value is fhir.' + type: string + location: + description: Specifies the supported Azure Region where the Service + should be created. Changing this forces a new resource to be + created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is enabled or disabled + for this service instance. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group in which to create + the Service. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_applicationinsightsstandardwebtests.yaml b/package/crds/insights.azure.upbound.io_applicationinsightsstandardwebtests.yaml index b29025842..d51912a29 100644 --- a/package/crds/insights.azure.upbound.io_applicationinsightsstandardwebtests.yaml +++ b/package/crds/insights.azure.upbound.io_applicationinsightsstandardwebtests.yaml @@ -942,3 +942,906 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ApplicationInsightsStandardWebTest is the Schema for the ApplicationInsightsStandardWebTests + API. Manages a Application Insights Standard WebTest. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ApplicationInsightsStandardWebTestSpec defines the desired + state of ApplicationInsightsStandardWebTest + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + applicationInsightsId: + description: The ID of the Application Insights instance on which + the WebTest operates. Changing this forces a new Application + Insights Standard WebTest to be created. + type: string + applicationInsightsIdRef: + description: Reference to a ApplicationInsights in insights to + populate applicationInsightsId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + applicationInsightsIdSelector: + description: Selector for a ApplicationInsights in insights to + populate applicationInsightsId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Purpose/user defined descriptive test for this WebTest. + type: string + enabled: + description: Should the WebTest be enabled? + type: boolean + frequency: + description: Interval in seconds between test runs for this WebTest. + Valid options are 300, 600 and 900. Defaults to 300. + type: number + geoLocations: + description: Specifies a list of where to physically run the tests + from to give global coverage for accessibility of your application. + items: + type: string + type: array + location: + description: The Azure Region where the Application Insights Standard + WebTest should exist. Changing this forces a new Application + Insights Standard WebTest to be created. It needs to correlate + with location of the parent resource (azurerm_application_insights) + type: string + request: + description: A request block as defined below. + properties: + body: + description: The WebTest request body. + type: string + followRedirectsEnabled: + description: Should the following of redirects be enabled? + Defaults to true. + type: boolean + header: + description: One or more header blocks as defined above. + items: + properties: + name: + description: The name which should be used for this + Application Insights Standard WebTest. Changing this + forces a new Application Insights Standard WebTest + to be created. + type: string + value: + description: The value which should be used for a header + in the request. + type: string + type: object + type: array + httpVerb: + description: Which HTTP verb to use for the call. Options + are 'GET', 'POST', 'PUT', 'PATCH', and 'DELETE'. Defaults + to GET. + type: string + parseDependentRequestsEnabled: + description: Should the parsing of dependend requests be enabled? + Defaults to true. + type: boolean + url: + description: The WebTest request URL. + type: string + type: object + resourceGroupName: + description: The name of the Resource Group where the Application + Insights Standard WebTest should exist. Changing this forces + a new Application Insights Standard WebTest to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retryEnabled: + description: Should the retry on WebTest failure be enabled? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Application Insights Standard WebTest. + type: object + x-kubernetes-map-type: granular + timeout: + description: Seconds until this WebTest will timeout and fail. + Default is 30. + type: number + validationRules: + description: A validation_rules block as defined below. + properties: + content: + description: A content block as defined above. + properties: + contentMatch: + description: A string value containing the content to + match on. + type: string + ignoreCase: + description: Ignore the casing in the content_match value. + type: boolean + passIfTextFound: + description: If the content of content_match is found, + pass the test. If set to false, the WebTest is failing + if the content of content_match is found. + type: boolean + type: object + expectedStatusCode: + description: The expected status code of the response. Default + is '200', '0' means 'response code < 400' + type: number + sslCertRemainingLifetime: + description: The number of days of SSL certificate validity + remaining for the checked endpoint. If the certificate has + a shorter remaining lifetime left, the test will fail. This + number should be between 1 and 365. + type: number + sslCheckEnabled: + description: Should the SSL check be enabled? + type: boolean + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + applicationInsightsId: + description: The ID of the Application Insights instance on which + the WebTest operates. Changing this forces a new Application + Insights Standard WebTest to be created. + type: string + applicationInsightsIdRef: + description: Reference to a ApplicationInsights in insights to + populate applicationInsightsId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + applicationInsightsIdSelector: + description: Selector for a ApplicationInsights in insights to + populate applicationInsightsId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: Purpose/user defined descriptive test for this WebTest. + type: string + enabled: + description: Should the WebTest be enabled? + type: boolean + frequency: + description: Interval in seconds between test runs for this WebTest. + Valid options are 300, 600 and 900. Defaults to 300. + type: number + geoLocations: + description: Specifies a list of where to physically run the tests + from to give global coverage for accessibility of your application. + items: + type: string + type: array + location: + description: The Azure Region where the Application Insights Standard + WebTest should exist. Changing this forces a new Application + Insights Standard WebTest to be created. It needs to correlate + with location of the parent resource (azurerm_application_insights) + type: string + request: + description: A request block as defined below. + properties: + body: + description: The WebTest request body. + type: string + followRedirectsEnabled: + description: Should the following of redirects be enabled? + Defaults to true. + type: boolean + header: + description: One or more header blocks as defined above. + items: + properties: + name: + description: The name which should be used for this + Application Insights Standard WebTest. Changing this + forces a new Application Insights Standard WebTest + to be created. + type: string + value: + description: The value which should be used for a header + in the request. + type: string + type: object + type: array + httpVerb: + description: Which HTTP verb to use for the call. Options + are 'GET', 'POST', 'PUT', 'PATCH', and 'DELETE'. Defaults + to GET. + type: string + parseDependentRequestsEnabled: + description: Should the parsing of dependend requests be enabled? + Defaults to true. + type: boolean + url: + description: The WebTest request URL. + type: string + type: object + retryEnabled: + description: Should the retry on WebTest failure be enabled? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Application Insights Standard WebTest. + type: object + x-kubernetes-map-type: granular + timeout: + description: Seconds until this WebTest will timeout and fail. + Default is 30. + type: number + validationRules: + description: A validation_rules block as defined below. + properties: + content: + description: A content block as defined above. + properties: + contentMatch: + description: A string value containing the content to + match on. + type: string + ignoreCase: + description: Ignore the casing in the content_match value. + type: boolean + passIfTextFound: + description: If the content of content_match is found, + pass the test. If set to false, the WebTest is failing + if the content of content_match is found. + type: boolean + type: object + expectedStatusCode: + description: The expected status code of the response. Default + is '200', '0' means 'response code < 400' + type: number + sslCertRemainingLifetime: + description: The number of days of SSL certificate validity + remaining for the checked endpoint. If the certificate has + a shorter remaining lifetime left, the test will fail. This + number should be between 1 and 365. + type: number + sslCheckEnabled: + description: Should the SSL check be enabled? + type: boolean + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.geoLocations is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.geoLocations) + || (has(self.initProvider) && has(self.initProvider.geoLocations))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.request is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.request) + || (has(self.initProvider) && has(self.initProvider.request))' + status: + description: ApplicationInsightsStandardWebTestStatus defines the observed + state of ApplicationInsightsStandardWebTest. + properties: + atProvider: + properties: + applicationInsightsId: + description: The ID of the Application Insights instance on which + the WebTest operates. Changing this forces a new Application + Insights Standard WebTest to be created. + type: string + description: + description: Purpose/user defined descriptive test for this WebTest. + type: string + enabled: + description: Should the WebTest be enabled? + type: boolean + frequency: + description: Interval in seconds between test runs for this WebTest. + Valid options are 300, 600 and 900. Defaults to 300. + type: number + geoLocations: + description: Specifies a list of where to physically run the tests + from to give global coverage for accessibility of your application. + items: + type: string + type: array + id: + description: The ID of the Application Insights Standard WebTest. + type: string + location: + description: The Azure Region where the Application Insights Standard + WebTest should exist. Changing this forces a new Application + Insights Standard WebTest to be created. It needs to correlate + with location of the parent resource (azurerm_application_insights) + type: string + request: + description: A request block as defined below. + properties: + body: + description: The WebTest request body. + type: string + followRedirectsEnabled: + description: Should the following of redirects be enabled? + Defaults to true. + type: boolean + header: + description: One or more header blocks as defined above. + items: + properties: + name: + description: The name which should be used for this + Application Insights Standard WebTest. Changing this + forces a new Application Insights Standard WebTest + to be created. + type: string + value: + description: The value which should be used for a header + in the request. + type: string + type: object + type: array + httpVerb: + description: Which HTTP verb to use for the call. Options + are 'GET', 'POST', 'PUT', 'PATCH', and 'DELETE'. Defaults + to GET. + type: string + parseDependentRequestsEnabled: + description: Should the parsing of dependend requests be enabled? + Defaults to true. + type: boolean + url: + description: The WebTest request URL. + type: string + type: object + resourceGroupName: + description: The name of the Resource Group where the Application + Insights Standard WebTest should exist. Changing this forces + a new Application Insights Standard WebTest to be created. + type: string + retryEnabled: + description: Should the retry on WebTest failure be enabled? + type: boolean + syntheticMonitorId: + description: Unique ID of this WebTest. This is typically the + same value as the Name field. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Application Insights Standard WebTest. + type: object + x-kubernetes-map-type: granular + timeout: + description: Seconds until this WebTest will timeout and fail. + Default is 30. + type: number + validationRules: + description: A validation_rules block as defined below. + properties: + content: + description: A content block as defined above. + properties: + contentMatch: + description: A string value containing the content to + match on. + type: string + ignoreCase: + description: Ignore the casing in the content_match value. + type: boolean + passIfTextFound: + description: If the content of content_match is found, + pass the test. If set to false, the WebTest is failing + if the content of content_match is found. + type: boolean + type: object + expectedStatusCode: + description: The expected status code of the response. Default + is '200', '0' means 'response code < 400' + type: number + sslCertRemainingLifetime: + description: The number of days of SSL certificate validity + remaining for the checked endpoint. If the certificate has + a shorter remaining lifetime left, the test will fail. This + number should be between 1 and 365. + type: number + sslCheckEnabled: + description: Should the SSL check be enabled? + type: boolean + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_applicationinsightsworkbooks.yaml b/package/crds/insights.azure.upbound.io_applicationinsightsworkbooks.yaml index 543b1ffb9..61caaaf48 100644 --- a/package/crds/insights.azure.upbound.io_applicationinsightsworkbooks.yaml +++ b/package/crds/insights.azure.upbound.io_applicationinsightsworkbooks.yaml @@ -705,3 +705,684 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ApplicationInsightsWorkbook is the Schema for the ApplicationInsightsWorkbooks + API. Manages an Azure Workbook. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ApplicationInsightsWorkbookSpec defines the desired state + of ApplicationInsightsWorkbook + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + category: + description: 'Workbook category, as defined by the user at creation + time. There may be additional category types beyond the following: + workbook, sentinel. Defaults to workbook.' + type: string + dataJson: + description: Configuration of this particular workbook. Configuration + data is a string containing valid JSON. + type: string + description: + description: Specifies the description of the workbook. + type: string + displayName: + description: Specifies the user-defined name (display name) of + the workbook. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Workbook to be created. + properties: + identityIds: + description: The list of User Assigned Managed Identity IDs + assigned to this Workbook. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of Managed Service Identity that is + configured on this Workbook. Possible values are UserAssigned, + SystemAssigned and SystemAssigned, UserAssigned. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Workbook should + exist. Changing this forces a new Workbook to be created. + type: string + name: + description: Specifies the name of this Workbook as a UUID/GUID. + It should not contain any uppercase letters. Changing this forces + a new Workbook to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Workbook should exist. Changing this forces a new Workbook to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceId: + description: Resource ID for a source resource. It should not + contain any uppercase letters. Defaults to azure monitor. + type: string + storageContainerId: + description: Specifies the Resource Manager ID of the Storage + Container when bring your own storage is used. Changing this + forces a new Workbook to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Workbook. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + category: + description: 'Workbook category, as defined by the user at creation + time. There may be additional category types beyond the following: + workbook, sentinel. Defaults to workbook.' + type: string + dataJson: + description: Configuration of this particular workbook. Configuration + data is a string containing valid JSON. + type: string + description: + description: Specifies the description of the workbook. + type: string + displayName: + description: Specifies the user-defined name (display name) of + the workbook. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Workbook to be created. + properties: + identityIds: + description: The list of User Assigned Managed Identity IDs + assigned to this Workbook. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The type of Managed Service Identity that is + configured on this Workbook. Possible values are UserAssigned, + SystemAssigned and SystemAssigned, UserAssigned. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Workbook should + exist. Changing this forces a new Workbook to be created. + type: string + name: + description: Specifies the name of this Workbook as a UUID/GUID. + It should not contain any uppercase letters. Changing this forces + a new Workbook to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Workbook should exist. Changing this forces a new Workbook to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sourceId: + description: Resource ID for a source resource. It should not + contain any uppercase letters. Defaults to azure monitor. + type: string + storageContainerId: + description: Specifies the Resource Manager ID of the Storage + Container when bring your own storage is used. Changing this + forces a new Workbook to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Workbook. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dataJson is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dataJson) + || (has(self.initProvider) && has(self.initProvider.dataJson))' + - message: spec.forProvider.displayName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.displayName) + || (has(self.initProvider) && has(self.initProvider.displayName))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: ApplicationInsightsWorkbookStatus defines the observed state + of ApplicationInsightsWorkbook. + properties: + atProvider: + properties: + category: + description: 'Workbook category, as defined by the user at creation + time. There may be additional category types beyond the following: + workbook, sentinel. Defaults to workbook.' + type: string + dataJson: + description: Configuration of this particular workbook. Configuration + data is a string containing valid JSON. + type: string + description: + description: Specifies the description of the workbook. + type: string + displayName: + description: Specifies the user-defined name (display name) of + the workbook. + type: string + id: + description: The ID of the Workbook. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Workbook to be created. + properties: + identityIds: + description: The list of User Assigned Managed Identity IDs + assigned to this Workbook. Changing this forces a new resource + to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID of the System Assigned Managed + Service Identity that is configured on this Workbook. + type: string + tenantId: + description: The Tenant ID of the System Assigned Managed + Service Identity that is configured on this Workbook. + type: string + type: + description: The type of Managed Service Identity that is + configured on this Workbook. Possible values are UserAssigned, + SystemAssigned and SystemAssigned, UserAssigned. Changing + this forces a new resource to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Workbook should + exist. Changing this forces a new Workbook to be created. + type: string + name: + description: Specifies the name of this Workbook as a UUID/GUID. + It should not contain any uppercase letters. Changing this forces + a new Workbook to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Workbook should exist. Changing this forces a new Workbook to + be created. + type: string + sourceId: + description: Resource ID for a source resource. It should not + contain any uppercase letters. Defaults to azure monitor. + type: string + storageContainerId: + description: Specifies the Resource Manager ID of the Storage + Container when bring your own storage is used. Changing this + forces a new Workbook to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Workbook. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitoractiongroups.yaml b/package/crds/insights.azure.upbound.io_monitoractiongroups.yaml index 92435373d..d0c6e5347 100644 --- a/package/crds/insights.azure.upbound.io_monitoractiongroups.yaml +++ b/package/crds/insights.azure.upbound.io_monitoractiongroups.yaml @@ -1202,3 +1202,1181 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorActionGroup is the Schema for the MonitorActionGroups + API. Manages an Action Group within Azure Monitor + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorActionGroupSpec defines the desired state of MonitorActionGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + armRoleReceiver: + description: One or more arm_role_receiver blocks as defined below. + items: + properties: + name: + description: The name of the ARM role receiver. + type: string + roleId: + description: The arm role id. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + automationRunbookReceiver: + description: One or more automation_runbook_receiver blocks as + defined below. + items: + properties: + automationAccountId: + description: The automation account ID which holds this + runbook and authenticates to Azure resources. + type: string + isGlobalRunbook: + description: Indicates whether this instance is global runbook. + type: boolean + name: + description: The name of the automation runbook receiver. + type: string + runbookName: + description: The name for this runbook. + type: string + serviceUri: + description: The URI where webhooks should be sent. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + webhookResourceId: + description: The resource id for webhook linked to this + runbook. + type: string + type: object + type: array + azureAppPushReceiver: + description: One or more azure_app_push_receiver blocks as defined + below. + items: + properties: + emailAddress: + description: The email address of the user signed into the + mobile app who will receive push notifications from this + receiver. + type: string + name: + description: The name of the Azure app push receiver. + type: string + type: object + type: array + azureFunctionReceiver: + description: One or more azure_function_receiver blocks as defined + below. + items: + properties: + functionAppResourceId: + description: The Azure resource ID of the function app. + type: string + functionName: + description: The function name in the function app. + type: string + httpTriggerUrl: + description: The HTTP trigger url where HTTP request sent + to. + type: string + name: + description: The name of the Azure Function receiver. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + emailReceiver: + description: One or more email_receiver blocks as defined below. + items: + properties: + emailAddress: + description: The email address of this receiver. + type: string + name: + description: The name of the email receiver. Names must + be unique (case-insensitive) across all receivers within + an action group. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + enabled: + description: Whether this action group is enabled. If an action + group is not enabled, then none of its receivers will receive + communications. Defaults to true. + type: boolean + eventHubReceiver: + description: One or more event_hub_receiver blocks as defined + below. + items: + properties: + eventHubId: + description: The resource ID of the respective Event Hub. + type: string + eventHubName: + description: The name of the specific Event Hub queue. + type: string + eventHubNamespace: + description: The namespace name of the Event Hub. + type: string + name: + description: The name of the EventHub Receiver, must be + unique within action group. + type: string + subscriptionId: + description: The ID for the subscription containing this + Event Hub. Default to the subscription ID of the Action + Group. + type: string + tenantId: + description: The Tenant ID for the subscription containing + this Event Hub. + type: string + useCommonAlertSchema: + description: Indicates whether to use common alert schema. + type: boolean + type: object + type: array + itsmReceiver: + description: One or more itsm_receiver blocks as defined below. + items: + properties: + connectionId: + description: The unique connection identifier of the ITSM + connection. + type: string + name: + description: The name of the ITSM receiver. + type: string + region: + description: The region of the workspace. + type: string + ticketConfiguration: + description: A JSON blob for the configurations of the ITSM + action. CreateMultipleWorkItems option will be part of + this blob as well. + type: string + workspaceId: + description: The Azure Log Analytics workspace ID where + this connection is defined. Format is |, for example 00000000-0000-0000-0000-000000000000|00000000-0000-0000-0000-000000000000. + type: string + type: object + type: array + location: + description: The Azure Region where the Action Group should exist. + Changing this forces a new Action Group to be created. Defaults + to global. + type: string + logicAppReceiver: + description: One or more logic_app_receiver blocks as defined + below. + items: + properties: + callbackUrl: + description: The callback url where HTTP request sent to. + type: string + name: + description: The name of the logic app receiver. + type: string + resourceId: + description: The Azure resource ID of the logic app. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + resourceGroupName: + description: The name of the resource group in which to create + the Action Group instance. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + shortName: + description: The short name of the action group. This will be + used in SMS messages. + type: string + smsReceiver: + description: One or more sms_receiver blocks as defined below. + items: + properties: + countryCode: + description: The country code of the SMS receiver. + type: string + name: + description: The name of the SMS receiver. Names must be + unique (case-insensitive) across all receivers within + an action group. + type: string + phoneNumber: + description: The phone number of the SMS receiver. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + voiceReceiver: + description: One or more voice_receiver blocks as defined below. + items: + properties: + countryCode: + description: The country code of the voice receiver. + type: string + name: + description: The name of the voice receiver. + type: string + phoneNumber: + description: The phone number of the voice receiver. + type: string + type: object + type: array + webhookReceiver: + description: One or more webhook_receiver blocks as defined below. + items: + properties: + aadAuth: + description: The aad_auth block as defined below. + properties: + identifierUri: + description: The identifier URI for AAD auth. + type: string + objectId: + description: The webhook application object Id for AAD + auth. + type: string + tenantId: + description: The tenant id for AAD auth. + type: string + type: object + name: + description: The name of the webhook receiver. Names must + be unique (case-insensitive) across all receivers within + an action group. + type: string + serviceUri: + description: The URI where webhooks should be sent. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + armRoleReceiver: + description: One or more arm_role_receiver blocks as defined below. + items: + properties: + name: + description: The name of the ARM role receiver. + type: string + roleId: + description: The arm role id. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + automationRunbookReceiver: + description: One or more automation_runbook_receiver blocks as + defined below. + items: + properties: + automationAccountId: + description: The automation account ID which holds this + runbook and authenticates to Azure resources. + type: string + isGlobalRunbook: + description: Indicates whether this instance is global runbook. + type: boolean + name: + description: The name of the automation runbook receiver. + type: string + runbookName: + description: The name for this runbook. + type: string + serviceUri: + description: The URI where webhooks should be sent. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + webhookResourceId: + description: The resource id for webhook linked to this + runbook. + type: string + type: object + type: array + azureAppPushReceiver: + description: One or more azure_app_push_receiver blocks as defined + below. + items: + properties: + emailAddress: + description: The email address of the user signed into the + mobile app who will receive push notifications from this + receiver. + type: string + name: + description: The name of the Azure app push receiver. + type: string + type: object + type: array + azureFunctionReceiver: + description: One or more azure_function_receiver blocks as defined + below. + items: + properties: + functionAppResourceId: + description: The Azure resource ID of the function app. + type: string + functionName: + description: The function name in the function app. + type: string + httpTriggerUrl: + description: The HTTP trigger url where HTTP request sent + to. + type: string + name: + description: The name of the Azure Function receiver. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + emailReceiver: + description: One or more email_receiver blocks as defined below. + items: + properties: + emailAddress: + description: The email address of this receiver. + type: string + name: + description: The name of the email receiver. Names must + be unique (case-insensitive) across all receivers within + an action group. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + enabled: + description: Whether this action group is enabled. If an action + group is not enabled, then none of its receivers will receive + communications. Defaults to true. + type: boolean + eventHubReceiver: + description: One or more event_hub_receiver blocks as defined + below. + items: + properties: + eventHubId: + description: The resource ID of the respective Event Hub. + type: string + eventHubName: + description: The name of the specific Event Hub queue. + type: string + eventHubNamespace: + description: The namespace name of the Event Hub. + type: string + name: + description: The name of the EventHub Receiver, must be + unique within action group. + type: string + subscriptionId: + description: The ID for the subscription containing this + Event Hub. Default to the subscription ID of the Action + Group. + type: string + tenantId: + description: The Tenant ID for the subscription containing + this Event Hub. + type: string + useCommonAlertSchema: + description: Indicates whether to use common alert schema. + type: boolean + type: object + type: array + itsmReceiver: + description: One or more itsm_receiver blocks as defined below. + items: + properties: + connectionId: + description: The unique connection identifier of the ITSM + connection. + type: string + name: + description: The name of the ITSM receiver. + type: string + region: + description: The region of the workspace. + type: string + ticketConfiguration: + description: A JSON blob for the configurations of the ITSM + action. CreateMultipleWorkItems option will be part of + this blob as well. + type: string + workspaceId: + description: The Azure Log Analytics workspace ID where + this connection is defined. Format is |, for example 00000000-0000-0000-0000-000000000000|00000000-0000-0000-0000-000000000000. + type: string + type: object + type: array + location: + description: The Azure Region where the Action Group should exist. + Changing this forces a new Action Group to be created. Defaults + to global. + type: string + logicAppReceiver: + description: One or more logic_app_receiver blocks as defined + below. + items: + properties: + callbackUrl: + description: The callback url where HTTP request sent to. + type: string + name: + description: The name of the logic app receiver. + type: string + resourceId: + description: The Azure resource ID of the logic app. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + shortName: + description: The short name of the action group. This will be + used in SMS messages. + type: string + smsReceiver: + description: One or more sms_receiver blocks as defined below. + items: + properties: + countryCode: + description: The country code of the SMS receiver. + type: string + name: + description: The name of the SMS receiver. Names must be + unique (case-insensitive) across all receivers within + an action group. + type: string + phoneNumber: + description: The phone number of the SMS receiver. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + voiceReceiver: + description: One or more voice_receiver blocks as defined below. + items: + properties: + countryCode: + description: The country code of the voice receiver. + type: string + name: + description: The name of the voice receiver. + type: string + phoneNumber: + description: The phone number of the voice receiver. + type: string + type: object + type: array + webhookReceiver: + description: One or more webhook_receiver blocks as defined below. + items: + properties: + aadAuth: + description: The aad_auth block as defined below. + properties: + identifierUri: + description: The identifier URI for AAD auth. + type: string + objectId: + description: The webhook application object Id for AAD + auth. + type: string + tenantId: + description: The tenant id for AAD auth. + type: string + type: object + name: + description: The name of the webhook receiver. Names must + be unique (case-insensitive) across all receivers within + an action group. + type: string + serviceUri: + description: The URI where webhooks should be sent. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.shortName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.shortName) + || (has(self.initProvider) && has(self.initProvider.shortName))' + status: + description: MonitorActionGroupStatus defines the observed state of MonitorActionGroup. + properties: + atProvider: + properties: + armRoleReceiver: + description: One or more arm_role_receiver blocks as defined below. + items: + properties: + name: + description: The name of the ARM role receiver. + type: string + roleId: + description: The arm role id. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + automationRunbookReceiver: + description: One or more automation_runbook_receiver blocks as + defined below. + items: + properties: + automationAccountId: + description: The automation account ID which holds this + runbook and authenticates to Azure resources. + type: string + isGlobalRunbook: + description: Indicates whether this instance is global runbook. + type: boolean + name: + description: The name of the automation runbook receiver. + type: string + runbookName: + description: The name for this runbook. + type: string + serviceUri: + description: The URI where webhooks should be sent. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + webhookResourceId: + description: The resource id for webhook linked to this + runbook. + type: string + type: object + type: array + azureAppPushReceiver: + description: One or more azure_app_push_receiver blocks as defined + below. + items: + properties: + emailAddress: + description: The email address of the user signed into the + mobile app who will receive push notifications from this + receiver. + type: string + name: + description: The name of the Azure app push receiver. + type: string + type: object + type: array + azureFunctionReceiver: + description: One or more azure_function_receiver blocks as defined + below. + items: + properties: + functionAppResourceId: + description: The Azure resource ID of the function app. + type: string + functionName: + description: The function name in the function app. + type: string + httpTriggerUrl: + description: The HTTP trigger url where HTTP request sent + to. + type: string + name: + description: The name of the Azure Function receiver. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + emailReceiver: + description: One or more email_receiver blocks as defined below. + items: + properties: + emailAddress: + description: The email address of this receiver. + type: string + name: + description: The name of the email receiver. Names must + be unique (case-insensitive) across all receivers within + an action group. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + enabled: + description: Whether this action group is enabled. If an action + group is not enabled, then none of its receivers will receive + communications. Defaults to true. + type: boolean + eventHubReceiver: + description: One or more event_hub_receiver blocks as defined + below. + items: + properties: + eventHubId: + description: The resource ID of the respective Event Hub. + type: string + eventHubName: + description: The name of the specific Event Hub queue. + type: string + eventHubNamespace: + description: The namespace name of the Event Hub. + type: string + name: + description: The name of the EventHub Receiver, must be + unique within action group. + type: string + subscriptionId: + description: The ID for the subscription containing this + Event Hub. Default to the subscription ID of the Action + Group. + type: string + tenantId: + description: The Tenant ID for the subscription containing + this Event Hub. + type: string + useCommonAlertSchema: + description: Indicates whether to use common alert schema. + type: boolean + type: object + type: array + id: + description: The ID of the Action Group. + type: string + itsmReceiver: + description: One or more itsm_receiver blocks as defined below. + items: + properties: + connectionId: + description: The unique connection identifier of the ITSM + connection. + type: string + name: + description: The name of the ITSM receiver. + type: string + region: + description: The region of the workspace. + type: string + ticketConfiguration: + description: A JSON blob for the configurations of the ITSM + action. CreateMultipleWorkItems option will be part of + this blob as well. + type: string + workspaceId: + description: The Azure Log Analytics workspace ID where + this connection is defined. Format is |, for example 00000000-0000-0000-0000-000000000000|00000000-0000-0000-0000-000000000000. + type: string + type: object + type: array + location: + description: The Azure Region where the Action Group should exist. + Changing this forces a new Action Group to be created. Defaults + to global. + type: string + logicAppReceiver: + description: One or more logic_app_receiver blocks as defined + below. + items: + properties: + callbackUrl: + description: The callback url where HTTP request sent to. + type: string + name: + description: The name of the logic app receiver. + type: string + resourceId: + description: The Azure resource ID of the logic app. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + resourceGroupName: + description: The name of the resource group in which to create + the Action Group instance. Changing this forces a new resource + to be created. + type: string + shortName: + description: The short name of the action group. This will be + used in SMS messages. + type: string + smsReceiver: + description: One or more sms_receiver blocks as defined below. + items: + properties: + countryCode: + description: The country code of the SMS receiver. + type: string + name: + description: The name of the SMS receiver. Names must be + unique (case-insensitive) across all receivers within + an action group. + type: string + phoneNumber: + description: The phone number of the SMS receiver. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + voiceReceiver: + description: One or more voice_receiver blocks as defined below. + items: + properties: + countryCode: + description: The country code of the voice receiver. + type: string + name: + description: The name of the voice receiver. + type: string + phoneNumber: + description: The phone number of the voice receiver. + type: string + type: object + type: array + webhookReceiver: + description: One or more webhook_receiver blocks as defined below. + items: + properties: + aadAuth: + description: The aad_auth block as defined below. + properties: + identifierUri: + description: The identifier URI for AAD auth. + type: string + objectId: + description: The webhook application object Id for AAD + auth. + type: string + tenantId: + description: The tenant id for AAD auth. + type: string + type: object + name: + description: The name of the webhook receiver. Names must + be unique (case-insensitive) across all receivers within + an action group. + type: string + serviceUri: + description: The URI where webhooks should be sent. + type: string + useCommonAlertSchema: + description: Enables or disables the common alert schema. + type: boolean + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitoractivitylogalerts.yaml b/package/crds/insights.azure.upbound.io_monitoractivitylogalerts.yaml index a6e1b8489..8e13e946a 100644 --- a/package/crds/insights.azure.upbound.io_monitoractivitylogalerts.yaml +++ b/package/crds/insights.azure.upbound.io_monitoractivitylogalerts.yaml @@ -1592,3 +1592,1553 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorActivityLogAlert is the Schema for the MonitorActivityLogAlerts + API. Manages an Activity Log Alert within Azure Monitor + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorActivityLogAlertSpec defines the desired state of + MonitorActivityLogAlert + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: One or more action blocks as defined below. + items: + properties: + actionGroupId: + description: The ID of the Action Group can be sourced from + the . + type: string + actionGroupIdRef: + description: Reference to a MonitorActionGroup in insights + to populate actionGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + actionGroupIdSelector: + description: Selector for a MonitorActionGroup in insights + to populate actionGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webhookProperties: + additionalProperties: + type: string + description: The map of custom string properties to include + with the post operation. These data are appended to the + webhook payload. + type: object + x-kubernetes-map-type: granular + type: object + type: array + criteria: + description: A criteria block as defined below. + properties: + caller: + description: The email address or Azure Active Directory identifier + of the user who performed the operation. + type: string + category: + description: The category of the operation. Possible values + are Administrative, Autoscale, Policy, Recommendation, ResourceHealth, + Security and ServiceHealth. + type: string + level: + description: The severity level of the event. Possible values + are Verbose, Informational, Warning, Error, and Critical. + type: string + levels: + description: A list of severity level of the event. Possible + values are Verbose, Informational, Warning, Error, and Critical. + items: + type: string + type: array + operationName: + description: 'The Resource Manager Role-Based Access Control + operation name. Supported operation should be of the form: + //.' + type: string + recommendationCategory: + description: The recommendation category of the event. Possible + values are Cost, Reliability, OperationalExcellence, HighAvailability + and Performance. It is only allowed when category is Recommendation. + type: string + recommendationImpact: + description: The recommendation impact of the event. Possible + values are High, Medium and Low. It is only allowed when + category is Recommendation. + type: string + recommendationType: + description: The recommendation type of the event. It is only + allowed when category is Recommendation. + type: string + resourceGroup: + description: The name of resource group monitored by the activity + log alert. + type: string + resourceGroups: + description: A list of names of resource groups monitored + by the activity log alert. + items: + type: string + type: array + resourceHealth: + description: A block to define fine grain resource health + settings. + properties: + current: + description: The current resource health statuses that + will log an alert. Possible values are Available, Degraded, + Unavailable and Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + previous: + description: The previous resource health statuses that + will log an alert. Possible values are Available, Degraded, + Unavailable and Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + reason: + description: The reason that will log an alert. Possible + values are PlatformInitiated (such as a problem with + the resource in an affected region of an Azure incident), + UserInitiated (such as a shutdown request of a VM) and + Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + resourceId: + description: The specific resource monitored by the activity + log alert. It should be within one of the scopes. + type: string + resourceIdRef: + description: Reference to a Account in storage to populate + resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a Account in storage to populate + resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceIds: + description: A list of specific resources monitored by the + activity log alert. It should be within one of the scopes. + items: + type: string + type: array + resourceProvider: + description: The name of the resource provider monitored by + the activity log alert. + type: string + resourceProviders: + description: A list of names of resource providers monitored + by the activity log alert. + items: + type: string + type: array + resourceType: + description: The resource type monitored by the activity log + alert. + type: string + resourceTypes: + description: A list of resource types monitored by the activity + log alert. + items: + type: string + type: array + serviceHealth: + description: A block to define fine grain service health settings. + properties: + events: + description: Events this alert will monitor Possible values + are Incident, Maintenance, Informational, ActionRequired + and Security. + items: + type: string + type: array + x-kubernetes-list-type: set + locations: + description: Locations this alert will monitor. For example, + West Europe. + items: + type: string + type: array + x-kubernetes-list-type: set + services: + description: Services this alert will monitor. For example, + Activity Logs & Alerts, Action Groups. Defaults to all + Services. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + status: + description: The status of the event. For example, Started, + Failed, or Succeeded. + type: string + statuses: + description: A list of status of the event. For example, Started, + Failed, or Succeeded. + items: + type: string + type: array + subStatus: + description: The sub status of the event. + type: string + subStatuses: + description: A list of sub status of the event. + items: + type: string + type: array + type: object + description: + description: The description of this activity log alert. + type: string + enabled: + description: Should this Activity Log Alert be enabled? Defaults + to true. + type: boolean + name: + description: The name of the activity log alert. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the activity log alert instance. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scopes: + description: The Scope at which the Activity Log should be applied. + A list of strings which could be a resource group , or a subscription, + or a resource ID (such as a Storage Account). + items: + type: string + type: array + x-kubernetes-list-type: set + scopesRefs: + description: References to ResourceGroup in azure to populate + scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of ResourceGroup in azure to + populate scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: One or more action blocks as defined below. + items: + properties: + actionGroupId: + description: The ID of the Action Group can be sourced from + the . + type: string + actionGroupIdRef: + description: Reference to a MonitorActionGroup in insights + to populate actionGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + actionGroupIdSelector: + description: Selector for a MonitorActionGroup in insights + to populate actionGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webhookProperties: + additionalProperties: + type: string + description: The map of custom string properties to include + with the post operation. These data are appended to the + webhook payload. + type: object + x-kubernetes-map-type: granular + type: object + type: array + criteria: + description: A criteria block as defined below. + properties: + caller: + description: The email address or Azure Active Directory identifier + of the user who performed the operation. + type: string + category: + description: The category of the operation. Possible values + are Administrative, Autoscale, Policy, Recommendation, ResourceHealth, + Security and ServiceHealth. + type: string + level: + description: The severity level of the event. Possible values + are Verbose, Informational, Warning, Error, and Critical. + type: string + levels: + description: A list of severity level of the event. Possible + values are Verbose, Informational, Warning, Error, and Critical. + items: + type: string + type: array + operationName: + description: 'The Resource Manager Role-Based Access Control + operation name. Supported operation should be of the form: + //.' + type: string + recommendationCategory: + description: The recommendation category of the event. Possible + values are Cost, Reliability, OperationalExcellence, HighAvailability + and Performance. It is only allowed when category is Recommendation. + type: string + recommendationImpact: + description: The recommendation impact of the event. Possible + values are High, Medium and Low. It is only allowed when + category is Recommendation. + type: string + recommendationType: + description: The recommendation type of the event. It is only + allowed when category is Recommendation. + type: string + resourceGroup: + description: The name of resource group monitored by the activity + log alert. + type: string + resourceGroups: + description: A list of names of resource groups monitored + by the activity log alert. + items: + type: string + type: array + resourceHealth: + description: A block to define fine grain resource health + settings. + properties: + current: + description: The current resource health statuses that + will log an alert. Possible values are Available, Degraded, + Unavailable and Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + previous: + description: The previous resource health statuses that + will log an alert. Possible values are Available, Degraded, + Unavailable and Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + reason: + description: The reason that will log an alert. Possible + values are PlatformInitiated (such as a problem with + the resource in an affected region of an Azure incident), + UserInitiated (such as a shutdown request of a VM) and + Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + resourceId: + description: The specific resource monitored by the activity + log alert. It should be within one of the scopes. + type: string + resourceIdRef: + description: Reference to a Account in storage to populate + resourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceIdSelector: + description: Selector for a Account in storage to populate + resourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceIds: + description: A list of specific resources monitored by the + activity log alert. It should be within one of the scopes. + items: + type: string + type: array + resourceProvider: + description: The name of the resource provider monitored by + the activity log alert. + type: string + resourceProviders: + description: A list of names of resource providers monitored + by the activity log alert. + items: + type: string + type: array + resourceType: + description: The resource type monitored by the activity log + alert. + type: string + resourceTypes: + description: A list of resource types monitored by the activity + log alert. + items: + type: string + type: array + serviceHealth: + description: A block to define fine grain service health settings. + properties: + events: + description: Events this alert will monitor Possible values + are Incident, Maintenance, Informational, ActionRequired + and Security. + items: + type: string + type: array + x-kubernetes-list-type: set + locations: + description: Locations this alert will monitor. For example, + West Europe. + items: + type: string + type: array + x-kubernetes-list-type: set + services: + description: Services this alert will monitor. For example, + Activity Logs & Alerts, Action Groups. Defaults to all + Services. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + status: + description: The status of the event. For example, Started, + Failed, or Succeeded. + type: string + statuses: + description: A list of status of the event. For example, Started, + Failed, or Succeeded. + items: + type: string + type: array + subStatus: + description: The sub status of the event. + type: string + subStatuses: + description: A list of sub status of the event. + items: + type: string + type: array + type: object + description: + description: The description of this activity log alert. + type: string + enabled: + description: Should this Activity Log Alert be enabled? Defaults + to true. + type: boolean + name: + description: The name of the activity log alert. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the activity log alert instance. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scopes: + description: The Scope at which the Activity Log should be applied. + A list of strings which could be a resource group , or a subscription, + or a resource ID (such as a Storage Account). + items: + type: string + type: array + x-kubernetes-list-type: set + scopesRefs: + description: References to ResourceGroup in azure to populate + scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of ResourceGroup in azure to + populate scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.criteria is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.criteria) + || (has(self.initProvider) && has(self.initProvider.criteria))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: MonitorActivityLogAlertStatus defines the observed state + of MonitorActivityLogAlert. + properties: + atProvider: + properties: + action: + description: One or more action blocks as defined below. + items: + properties: + actionGroupId: + description: The ID of the Action Group can be sourced from + the . + type: string + webhookProperties: + additionalProperties: + type: string + description: The map of custom string properties to include + with the post operation. These data are appended to the + webhook payload. + type: object + x-kubernetes-map-type: granular + type: object + type: array + criteria: + description: A criteria block as defined below. + properties: + caller: + description: The email address or Azure Active Directory identifier + of the user who performed the operation. + type: string + category: + description: The category of the operation. Possible values + are Administrative, Autoscale, Policy, Recommendation, ResourceHealth, + Security and ServiceHealth. + type: string + level: + description: The severity level of the event. Possible values + are Verbose, Informational, Warning, Error, and Critical. + type: string + levels: + description: A list of severity level of the event. Possible + values are Verbose, Informational, Warning, Error, and Critical. + items: + type: string + type: array + operationName: + description: 'The Resource Manager Role-Based Access Control + operation name. Supported operation should be of the form: + //.' + type: string + recommendationCategory: + description: The recommendation category of the event. Possible + values are Cost, Reliability, OperationalExcellence, HighAvailability + and Performance. It is only allowed when category is Recommendation. + type: string + recommendationImpact: + description: The recommendation impact of the event. Possible + values are High, Medium and Low. It is only allowed when + category is Recommendation. + type: string + recommendationType: + description: The recommendation type of the event. It is only + allowed when category is Recommendation. + type: string + resourceGroup: + description: The name of resource group monitored by the activity + log alert. + type: string + resourceGroups: + description: A list of names of resource groups monitored + by the activity log alert. + items: + type: string + type: array + resourceHealth: + description: A block to define fine grain resource health + settings. + properties: + current: + description: The current resource health statuses that + will log an alert. Possible values are Available, Degraded, + Unavailable and Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + previous: + description: The previous resource health statuses that + will log an alert. Possible values are Available, Degraded, + Unavailable and Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + reason: + description: The reason that will log an alert. Possible + values are PlatformInitiated (such as a problem with + the resource in an affected region of an Azure incident), + UserInitiated (such as a shutdown request of a VM) and + Unknown. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + resourceId: + description: The specific resource monitored by the activity + log alert. It should be within one of the scopes. + type: string + resourceIds: + description: A list of specific resources monitored by the + activity log alert. It should be within one of the scopes. + items: + type: string + type: array + resourceProvider: + description: The name of the resource provider monitored by + the activity log alert. + type: string + resourceProviders: + description: A list of names of resource providers monitored + by the activity log alert. + items: + type: string + type: array + resourceType: + description: The resource type monitored by the activity log + alert. + type: string + resourceTypes: + description: A list of resource types monitored by the activity + log alert. + items: + type: string + type: array + serviceHealth: + description: A block to define fine grain service health settings. + properties: + events: + description: Events this alert will monitor Possible values + are Incident, Maintenance, Informational, ActionRequired + and Security. + items: + type: string + type: array + x-kubernetes-list-type: set + locations: + description: Locations this alert will monitor. For example, + West Europe. + items: + type: string + type: array + x-kubernetes-list-type: set + services: + description: Services this alert will monitor. For example, + Activity Logs & Alerts, Action Groups. Defaults to all + Services. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + status: + description: The status of the event. For example, Started, + Failed, or Succeeded. + type: string + statuses: + description: A list of status of the event. For example, Started, + Failed, or Succeeded. + items: + type: string + type: array + subStatus: + description: The sub status of the event. + type: string + subStatuses: + description: A list of sub status of the event. + items: + type: string + type: array + type: object + description: + description: The description of this activity log alert. + type: string + enabled: + description: Should this Activity Log Alert be enabled? Defaults + to true. + type: boolean + id: + description: The ID of the activity log alert. + type: string + name: + description: The name of the activity log alert. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the activity log alert instance. Changing this forces a new + resource to be created. + type: string + scopes: + description: The Scope at which the Activity Log should be applied. + A list of strings which could be a resource group , or a subscription, + or a resource ID (such as a Storage Account). + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitorautoscalesettings.yaml b/package/crds/insights.azure.upbound.io_monitorautoscalesettings.yaml index e30d9c6b2..0dacd68f1 100644 --- a/package/crds/insights.azure.upbound.io_monitorautoscalesettings.yaml +++ b/package/crds/insights.azure.upbound.io_monitorautoscalesettings.yaml @@ -1661,3 +1661,1595 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorAutoscaleSetting is the Schema for the MonitorAutoscaleSettings + API. Manages an AutoScale Setting which can be applied to Virtual Machine + Scale Sets, App Services and other scalable resources. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorAutoscaleSettingSpec defines the desired state of + MonitorAutoscaleSetting + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + enabled: + description: Specifies whether automatic scaling is enabled for + the target resource. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + AutoScale Setting should exist. Changing this forces a new resource + to be created. + type: string + name: + description: The name of the AutoScale Setting. Changing this + forces a new resource to be created. + type: string + notification: + description: Specifies a notification block as defined below. + properties: + email: + description: A email block as defined below. + properties: + customEmails: + description: Specifies a list of custom email addresses + to which the email notifications will be sent. + items: + type: string + type: array + sendToSubscriptionAdministrator: + description: Should email notifications be sent to the + subscription administrator? Defaults to false. + type: boolean + sendToSubscriptionCoAdministrator: + description: Should email notifications be sent to the + subscription co-administrator? Defaults to false. + type: boolean + type: object + webhook: + description: One or more webhook blocks as defined below. + items: + properties: + properties: + additionalProperties: + type: string + description: A map of settings. + type: object + x-kubernetes-map-type: granular + serviceUri: + description: The HTTPS URI which should receive scale + notifications. + type: string + type: object + type: array + type: object + predictive: + description: A predictive block as defined below. + properties: + lookAheadTime: + description: Specifies the amount of time by which instances + are launched in advance. It must be between PT1M and PT1H + in ISO 8601 format. + type: string + scaleMode: + description: Specifies the predictive scale mode. Possible + values are Enabled or ForecastOnly. + type: string + type: object + profile: + description: Specifies one or more (up to 20) profile blocks as + defined below. + items: + properties: + capacity: + description: A capacity block as defined below. + properties: + default: + description: The number of instances that are available + for scaling if metrics are not available for evaluation. + The default is only used if the current instance count + is lower than the default. Valid values are between + 0 and 1000. + type: number + maximum: + description: The maximum number of instances for this + resource. Valid values are between 0 and 1000. + type: number + minimum: + description: The minimum number of instances for this + resource. Valid values are between 0 and 1000. + type: number + type: object + fixedDate: + description: A fixed_date block as defined below. This cannot + be specified if a recurrence block is specified. + properties: + end: + description: Specifies the end date for the profile, + formatted as an RFC3339 date string. + type: string + start: + description: Specifies the start date for the profile, + formatted as an RFC3339 date string. + type: string + timezone: + description: The Time Zone used for the hours field. + A list of possible values can be found here. Defaults + to UTC. + type: string + type: object + name: + description: Specifies the name of the profile. + type: string + recurrence: + description: A recurrence block as defined below. This cannot + be specified if a fixed_date block is specified. + properties: + days: + description: A list of days that this profile takes + effect on. Possible values include Monday, Tuesday, + Wednesday, Thursday, Friday, Saturday and Sunday. + items: + type: string + type: array + hours: + description: A list containing a single item, which + specifies the Hour interval at which this recurrence + should be triggered (in 24-hour time). Possible values + are from 0 to 23. + items: + type: number + type: array + minutes: + description: A list containing a single item which specifies + the Minute interval at which this recurrence should + be triggered. + items: + type: number + type: array + timezone: + description: The Time Zone used for the hours field. + A list of possible values can be found here. Defaults + to UTC. + type: string + type: object + rule: + description: One or more (up to 10) rule blocks as defined + below. + items: + properties: + metricTrigger: + description: A metric_trigger block as defined below. + properties: + dimensions: + description: One or more dimensions block as defined + below. + items: + properties: + name: + description: Specifies the name of the profile. + type: string + operator: + description: 'Specifies the operator used + to compare the metric data and threshold. + Possible values are: Equals, NotEquals, + GreaterThan, GreaterThanOrEqual, LessThan, + LessThanOrEqual.' + type: string + values: + description: A list of dimension values. + items: + type: string + type: array + type: object + type: array + divideByInstanceCount: + description: Whether to enable metric divide by + instance count. + type: boolean + metricName: + description: The name of the metric that defines + what the rule monitors, such as Percentage CPU + for Virtual Machine Scale Sets and CpuPercentage + for App Service Plan. + type: string + metricNamespace: + description: The namespace of the metric that + defines what the rule monitors, such as microsoft.compute/virtualmachinescalesets + for Virtual Machine Scale Sets. + type: string + metricResourceId: + description: The ID of the Resource which the + Rule monitors. + type: string + metricResourceIdRef: + description: Reference to a LinuxVirtualMachineScaleSet + in compute to populate metricResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metricResourceIdSelector: + description: Selector for a LinuxVirtualMachineScaleSet + in compute to populate metricResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + operator: + description: 'Specifies the operator used to compare + the metric data and threshold. Possible values + are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual.' + type: string + statistic: + description: Specifies how the metrics from multiple + instances are combined. Possible values are + Average, Max, Min and Sum. + type: string + threshold: + description: Specifies the threshold of the metric + that triggers the scale action. + type: number + timeAggregation: + description: Specifies how the data that's collected + should be combined over time. Possible values + include Average, Count, Maximum, Minimum, Last + and Total. + type: string + timeGrain: + description: Specifies the granularity of metrics + that the rule monitors, which must be one of + the pre-defined values returned from the metric + definitions for the metric. This value must + be between 1 minute and 12 hours an be formatted + as an ISO 8601 string. + type: string + timeWindow: + description: Specifies the time range for which + data is collected, which must be greater than + the delay in metric collection (which varies + from resource to resource). This value must + be between 5 minutes and 12 hours and be formatted + as an ISO 8601 string. + type: string + type: object + scaleAction: + description: A scale_action block as defined below. + properties: + cooldown: + description: The amount of time to wait since + the last scaling action before this action occurs. + Must be between 1 minute and 1 week and formatted + as a ISO 8601 string. + type: string + direction: + description: The scale direction. Possible values + are Increase and Decrease. + type: string + type: + description: The type of action that should occur. + Possible values are ChangeCount, ExactCount, + PercentChangeCount and ServiceAllowedNextValue. + type: string + value: + description: The number of instances involved + in the scaling action. + type: number + type: object + type: object + type: array + type: object + type: array + resourceGroupName: + description: The name of the Resource Group in the AutoScale Setting + should be created. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + targetResourceId: + description: Specifies the resource ID of the resource that the + autoscale setting should be added to. Changing this forces a + new resource to be created. + type: string + targetResourceIdRef: + description: Reference to a LinuxVirtualMachineScaleSet in compute + to populate targetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetResourceIdSelector: + description: Selector for a LinuxVirtualMachineScaleSet in compute + to populate targetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + enabled: + description: Specifies whether automatic scaling is enabled for + the target resource. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + AutoScale Setting should exist. Changing this forces a new resource + to be created. + type: string + name: + description: The name of the AutoScale Setting. Changing this + forces a new resource to be created. + type: string + notification: + description: Specifies a notification block as defined below. + properties: + email: + description: A email block as defined below. + properties: + customEmails: + description: Specifies a list of custom email addresses + to which the email notifications will be sent. + items: + type: string + type: array + sendToSubscriptionAdministrator: + description: Should email notifications be sent to the + subscription administrator? Defaults to false. + type: boolean + sendToSubscriptionCoAdministrator: + description: Should email notifications be sent to the + subscription co-administrator? Defaults to false. + type: boolean + type: object + webhook: + description: One or more webhook blocks as defined below. + items: + properties: + properties: + additionalProperties: + type: string + description: A map of settings. + type: object + x-kubernetes-map-type: granular + serviceUri: + description: The HTTPS URI which should receive scale + notifications. + type: string + type: object + type: array + type: object + predictive: + description: A predictive block as defined below. + properties: + lookAheadTime: + description: Specifies the amount of time by which instances + are launched in advance. It must be between PT1M and PT1H + in ISO 8601 format. + type: string + scaleMode: + description: Specifies the predictive scale mode. Possible + values are Enabled or ForecastOnly. + type: string + type: object + profile: + description: Specifies one or more (up to 20) profile blocks as + defined below. + items: + properties: + capacity: + description: A capacity block as defined below. + properties: + default: + description: The number of instances that are available + for scaling if metrics are not available for evaluation. + The default is only used if the current instance count + is lower than the default. Valid values are between + 0 and 1000. + type: number + maximum: + description: The maximum number of instances for this + resource. Valid values are between 0 and 1000. + type: number + minimum: + description: The minimum number of instances for this + resource. Valid values are between 0 and 1000. + type: number + type: object + fixedDate: + description: A fixed_date block as defined below. This cannot + be specified if a recurrence block is specified. + properties: + end: + description: Specifies the end date for the profile, + formatted as an RFC3339 date string. + type: string + start: + description: Specifies the start date for the profile, + formatted as an RFC3339 date string. + type: string + timezone: + description: The Time Zone used for the hours field. + A list of possible values can be found here. Defaults + to UTC. + type: string + type: object + name: + description: Specifies the name of the profile. + type: string + recurrence: + description: A recurrence block as defined below. This cannot + be specified if a fixed_date block is specified. + properties: + days: + description: A list of days that this profile takes + effect on. Possible values include Monday, Tuesday, + Wednesday, Thursday, Friday, Saturday and Sunday. + items: + type: string + type: array + hours: + description: A list containing a single item, which + specifies the Hour interval at which this recurrence + should be triggered (in 24-hour time). Possible values + are from 0 to 23. + items: + type: number + type: array + minutes: + description: A list containing a single item which specifies + the Minute interval at which this recurrence should + be triggered. + items: + type: number + type: array + timezone: + description: The Time Zone used for the hours field. + A list of possible values can be found here. Defaults + to UTC. + type: string + type: object + rule: + description: One or more (up to 10) rule blocks as defined + below. + items: + properties: + metricTrigger: + description: A metric_trigger block as defined below. + properties: + dimensions: + description: One or more dimensions block as defined + below. + items: + properties: + name: + description: Specifies the name of the profile. + type: string + operator: + description: 'Specifies the operator used + to compare the metric data and threshold. + Possible values are: Equals, NotEquals, + GreaterThan, GreaterThanOrEqual, LessThan, + LessThanOrEqual.' + type: string + values: + description: A list of dimension values. + items: + type: string + type: array + type: object + type: array + divideByInstanceCount: + description: Whether to enable metric divide by + instance count. + type: boolean + metricName: + description: The name of the metric that defines + what the rule monitors, such as Percentage CPU + for Virtual Machine Scale Sets and CpuPercentage + for App Service Plan. + type: string + metricNamespace: + description: The namespace of the metric that + defines what the rule monitors, such as microsoft.compute/virtualmachinescalesets + for Virtual Machine Scale Sets. + type: string + metricResourceId: + description: The ID of the Resource which the + Rule monitors. + type: string + metricResourceIdRef: + description: Reference to a LinuxVirtualMachineScaleSet + in compute to populate metricResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metricResourceIdSelector: + description: Selector for a LinuxVirtualMachineScaleSet + in compute to populate metricResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object + with matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + operator: + description: 'Specifies the operator used to compare + the metric data and threshold. Possible values + are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual.' + type: string + statistic: + description: Specifies how the metrics from multiple + instances are combined. Possible values are + Average, Max, Min and Sum. + type: string + threshold: + description: Specifies the threshold of the metric + that triggers the scale action. + type: number + timeAggregation: + description: Specifies how the data that's collected + should be combined over time. Possible values + include Average, Count, Maximum, Minimum, Last + and Total. + type: string + timeGrain: + description: Specifies the granularity of metrics + that the rule monitors, which must be one of + the pre-defined values returned from the metric + definitions for the metric. This value must + be between 1 minute and 12 hours an be formatted + as an ISO 8601 string. + type: string + timeWindow: + description: Specifies the time range for which + data is collected, which must be greater than + the delay in metric collection (which varies + from resource to resource). This value must + be between 5 minutes and 12 hours and be formatted + as an ISO 8601 string. + type: string + type: object + scaleAction: + description: A scale_action block as defined below. + properties: + cooldown: + description: The amount of time to wait since + the last scaling action before this action occurs. + Must be between 1 minute and 1 week and formatted + as a ISO 8601 string. + type: string + direction: + description: The scale direction. Possible values + are Increase and Decrease. + type: string + type: + description: The type of action that should occur. + Possible values are ChangeCount, ExactCount, + PercentChangeCount and ServiceAllowedNextValue. + type: string + value: + description: The number of instances involved + in the scaling action. + type: number + type: object + type: object + type: array + type: object + type: array + resourceGroupName: + description: The name of the Resource Group in the AutoScale Setting + should be created. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + targetResourceId: + description: Specifies the resource ID of the resource that the + autoscale setting should be added to. Changing this forces a + new resource to be created. + type: string + targetResourceIdRef: + description: Reference to a LinuxVirtualMachineScaleSet in compute + to populate targetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetResourceIdSelector: + description: Selector for a LinuxVirtualMachineScaleSet in compute + to populate targetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.profile is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.profile) + || (has(self.initProvider) && has(self.initProvider.profile))' + status: + description: MonitorAutoscaleSettingStatus defines the observed state + of MonitorAutoscaleSetting. + properties: + atProvider: + properties: + enabled: + description: Specifies whether automatic scaling is enabled for + the target resource. Defaults to true. + type: boolean + id: + description: The ID of the AutoScale Setting. + type: string + location: + description: Specifies the supported Azure location where the + AutoScale Setting should exist. Changing this forces a new resource + to be created. + type: string + name: + description: The name of the AutoScale Setting. Changing this + forces a new resource to be created. + type: string + notification: + description: Specifies a notification block as defined below. + properties: + email: + description: A email block as defined below. + properties: + customEmails: + description: Specifies a list of custom email addresses + to which the email notifications will be sent. + items: + type: string + type: array + sendToSubscriptionAdministrator: + description: Should email notifications be sent to the + subscription administrator? Defaults to false. + type: boolean + sendToSubscriptionCoAdministrator: + description: Should email notifications be sent to the + subscription co-administrator? Defaults to false. + type: boolean + type: object + webhook: + description: One or more webhook blocks as defined below. + items: + properties: + properties: + additionalProperties: + type: string + description: A map of settings. + type: object + x-kubernetes-map-type: granular + serviceUri: + description: The HTTPS URI which should receive scale + notifications. + type: string + type: object + type: array + type: object + predictive: + description: A predictive block as defined below. + properties: + lookAheadTime: + description: Specifies the amount of time by which instances + are launched in advance. It must be between PT1M and PT1H + in ISO 8601 format. + type: string + scaleMode: + description: Specifies the predictive scale mode. Possible + values are Enabled or ForecastOnly. + type: string + type: object + profile: + description: Specifies one or more (up to 20) profile blocks as + defined below. + items: + properties: + capacity: + description: A capacity block as defined below. + properties: + default: + description: The number of instances that are available + for scaling if metrics are not available for evaluation. + The default is only used if the current instance count + is lower than the default. Valid values are between + 0 and 1000. + type: number + maximum: + description: The maximum number of instances for this + resource. Valid values are between 0 and 1000. + type: number + minimum: + description: The minimum number of instances for this + resource. Valid values are between 0 and 1000. + type: number + type: object + fixedDate: + description: A fixed_date block as defined below. This cannot + be specified if a recurrence block is specified. + properties: + end: + description: Specifies the end date for the profile, + formatted as an RFC3339 date string. + type: string + start: + description: Specifies the start date for the profile, + formatted as an RFC3339 date string. + type: string + timezone: + description: The Time Zone used for the hours field. + A list of possible values can be found here. Defaults + to UTC. + type: string + type: object + name: + description: Specifies the name of the profile. + type: string + recurrence: + description: A recurrence block as defined below. This cannot + be specified if a fixed_date block is specified. + properties: + days: + description: A list of days that this profile takes + effect on. Possible values include Monday, Tuesday, + Wednesday, Thursday, Friday, Saturday and Sunday. + items: + type: string + type: array + hours: + description: A list containing a single item, which + specifies the Hour interval at which this recurrence + should be triggered (in 24-hour time). Possible values + are from 0 to 23. + items: + type: number + type: array + minutes: + description: A list containing a single item which specifies + the Minute interval at which this recurrence should + be triggered. + items: + type: number + type: array + timezone: + description: The Time Zone used for the hours field. + A list of possible values can be found here. Defaults + to UTC. + type: string + type: object + rule: + description: One or more (up to 10) rule blocks as defined + below. + items: + properties: + metricTrigger: + description: A metric_trigger block as defined below. + properties: + dimensions: + description: One or more dimensions block as defined + below. + items: + properties: + name: + description: Specifies the name of the profile. + type: string + operator: + description: 'Specifies the operator used + to compare the metric data and threshold. + Possible values are: Equals, NotEquals, + GreaterThan, GreaterThanOrEqual, LessThan, + LessThanOrEqual.' + type: string + values: + description: A list of dimension values. + items: + type: string + type: array + type: object + type: array + divideByInstanceCount: + description: Whether to enable metric divide by + instance count. + type: boolean + metricName: + description: The name of the metric that defines + what the rule monitors, such as Percentage CPU + for Virtual Machine Scale Sets and CpuPercentage + for App Service Plan. + type: string + metricNamespace: + description: The namespace of the metric that + defines what the rule monitors, such as microsoft.compute/virtualmachinescalesets + for Virtual Machine Scale Sets. + type: string + metricResourceId: + description: The ID of the Resource which the + Rule monitors. + type: string + operator: + description: 'Specifies the operator used to compare + the metric data and threshold. Possible values + are: Equals, NotEquals, GreaterThan, GreaterThanOrEqual, + LessThan, LessThanOrEqual.' + type: string + statistic: + description: Specifies how the metrics from multiple + instances are combined. Possible values are + Average, Max, Min and Sum. + type: string + threshold: + description: Specifies the threshold of the metric + that triggers the scale action. + type: number + timeAggregation: + description: Specifies how the data that's collected + should be combined over time. Possible values + include Average, Count, Maximum, Minimum, Last + and Total. + type: string + timeGrain: + description: Specifies the granularity of metrics + that the rule monitors, which must be one of + the pre-defined values returned from the metric + definitions for the metric. This value must + be between 1 minute and 12 hours an be formatted + as an ISO 8601 string. + type: string + timeWindow: + description: Specifies the time range for which + data is collected, which must be greater than + the delay in metric collection (which varies + from resource to resource). This value must + be between 5 minutes and 12 hours and be formatted + as an ISO 8601 string. + type: string + type: object + scaleAction: + description: A scale_action block as defined below. + properties: + cooldown: + description: The amount of time to wait since + the last scaling action before this action occurs. + Must be between 1 minute and 1 week and formatted + as a ISO 8601 string. + type: string + direction: + description: The scale direction. Possible values + are Increase and Decrease. + type: string + type: + description: The type of action that should occur. + Possible values are ChangeCount, ExactCount, + PercentChangeCount and ServiceAllowedNextValue. + type: string + value: + description: The number of instances involved + in the scaling action. + type: number + type: object + type: object + type: array + type: object + type: array + resourceGroupName: + description: The name of the Resource Group in the AutoScale Setting + should be created. Changing this forces a new resource to be + created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + targetResourceId: + description: Specifies the resource ID of the resource that the + autoscale setting should be added to. Changing this forces a + new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitordatacollectionrules.yaml b/package/crds/insights.azure.upbound.io_monitordatacollectionrules.yaml index 9703e390d..e517d44d4 100644 --- a/package/crds/insights.azure.upbound.io_monitordatacollectionrules.yaml +++ b/package/crds/insights.azure.upbound.io_monitordatacollectionrules.yaml @@ -2940,3 +2940,2820 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorDataCollectionRule is the Schema for the MonitorDataCollectionRules + API. Manages a Data Collection Rule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorDataCollectionRuleSpec defines the desired state of + MonitorDataCollectionRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dataCollectionEndpointId: + description: The resource ID of the Data Collection Endpoint that + this rule can be used with. + type: string + dataCollectionEndpointIdRef: + description: Reference to a MonitorDataCollectionEndpoint in insights + to populate dataCollectionEndpointId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataCollectionEndpointIdSelector: + description: Selector for a MonitorDataCollectionEndpoint in insights + to populate dataCollectionEndpointId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dataFlow: + description: One or more data_flow blocks as defined below. + items: + properties: + builtInTransform: + description: The built-in transform to transform stream + data. + type: string + destinations: + description: Specifies a list of destination names. A azure_monitor_metrics + data source only allows for stream of kind Microsoft-InsightsMetrics. + items: + type: string + type: array + outputStream: + description: The output stream of the transform. Only required + if the data flow changes data to a different stream. + type: string + streams: + description: Specifies a list of streams. Possible values + include but not limited to Microsoft-Event, Microsoft-InsightsMetrics, + Microsoft-Perf, Microsoft-Syslog, Microsoft-WindowsEvent, + and Microsoft-PrometheusMetrics. + items: + type: string + type: array + transformKql: + description: The KQL query to transform stream data. + type: string + type: object + type: array + dataSources: + description: A data_sources block as defined below. This property + is optional and can be omitted if the rule is meant to be used + via direct calls to the provisioned endpoint. + properties: + dataImport: + description: A data_import block as defined above. + properties: + eventHubDataSource: + description: An event_hub_data_source block as defined + below. + items: + properties: + consumerGroup: + description: The Event Hub consumer group name. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across + all data sources regardless of type within the + Data Collection Rule. + type: string + stream: + description: The stream to collect from Event Hub. + Possible value should be a custom stream name. + type: string + type: object + type: array + type: object + extension: + description: One or more extension blocks as defined below. + items: + properties: + extensionJson: + description: A JSON String which specifies the extension + setting. + type: string + extensionName: + description: The name of the VM extension. + type: string + inputDataSources: + description: 'Specifies a list of data sources this + extension needs data from. An item should be a name + of a supported data source which produces only one + stream. Supported data sources type: performance_counter, + windows_event_log,and syslog.' + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + iisLog: + description: One or more iis_log blocks as defined below. + items: + properties: + logDirectories: + description: Specifies a list of absolute paths where + the log files are located. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + logFile: + description: One or more log_file blocks as defined below. + items: + properties: + filePatterns: + description: Specifies a list of file patterns where + the log files are located. For example, C:\\JavaLogs\\*.log. + items: + type: string + type: array + format: + description: The data format of the log files. possible + value is text. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + settings: + description: A settings block as defined below. + properties: + text: + description: A text block as defined below. + properties: + recordStartTimestampFormat: + description: The timestamp format of the text + log files. Possible values are ISO 8601, YYYY-MM-DD + HH:MM:SS, M/D/YYYY HH:MM:SS AM/PM, Mon DD, + YYYY HH:MM:SS, yyMMdd HH:mm:ss, ddMMyy HH:mm:ss, + MMM d hh:mm:ss, dd/MMM/yyyy:HH:mm:ss zzz,and + yyyy-MM-ddTHH:mm:ssK. + type: string + type: object + type: object + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + performanceCounter: + description: One or more performance_counter blocks as defined + below. + items: + properties: + counterSpecifiers: + description: Specifies a list of specifier names of + the performance counters you want to collect. To get + a list of performance counters on Windows, run the + command typeperf. Please see this document for more + information. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + samplingFrequencyInSeconds: + description: The number of seconds between consecutive + counter measurements (samples). The value should be + integer between 1 and 300 inclusive. sampling_frequency_in_seconds + must be equal to 60 seconds for counters collected + with Microsoft-InsightsMetrics stream. + type: number + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + platformTelemetry: + description: One or more platform_telemetry blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + prometheusForwarder: + description: One or more prometheus_forwarder blocks as defined + below. + items: + properties: + labelIncludeFilter: + description: One or more label_include_filter blocks + as defined above. + items: + properties: + label: + description: The label of the filter. This label + should be unique across all label_include_fileter + block. Possible value is microsoft_metrics_include_label. + type: string + value: + description: The value of the filter. + type: string + type: object + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + syslog: + description: One or more syslog blocks as defined below. + items: + properties: + facilityNames: + description: Specifies a list of facility names. Use + a wildcard * to collect logs for all facility names. + Possible values are alert, *, audit, auth, authpriv, + clock, cron, daemon, ftp, kern, local5, local4, local1, + local7, local6, local3, local2, local0, lpr, mail, + mark, news, nopri, ntp, syslog, user and uucp. + items: + type: string + type: array + logLevels: + description: Specifies a list of log levels. Use a wildcard + * to collect logs for all log levels. Possible values + are Debug, Info, Notice, Warning, Error, Critical, + Alert, Emergency,and *. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + windowsEventLog: + description: One or more windows_event_log blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + xPathQueries: + description: Specifies a list of Windows Event Log queries + in XPath expression. Please see this document for + more information. + items: + type: string + type: array + type: object + type: array + windowsFirewallLog: + description: One or more windows_firewall_log blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + type: object + description: + description: The description of the Data Collection Rule. + type: string + destinations: + description: A destinations block as defined below. + properties: + azureMonitorMetrics: + description: A azure_monitor_metrics block as defined above. + properties: + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + eventHub: + description: One or more event_hub blocks as defined below. + properties: + eventHubId: + description: The resource ID of the Event Hub. + type: string + eventHubIdRef: + description: Reference to a EventHub in eventhub to populate + eventHubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventHubIdSelector: + description: Selector for a EventHub in eventhub to populate + eventHubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + eventHubDirect: + description: One or more event_hub blocks as defined below. + properties: + eventHubId: + description: The resource ID of the Event Hub. + type: string + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + logAnalytics: + description: One or more log_analytics blocks as defined below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + workspaceResourceId: + description: The ID of a Log Analytic Workspace resource. + type: string + workspaceResourceIdRef: + description: Reference to a Workspace in operationalinsights + to populate workspaceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceResourceIdSelector: + description: Selector for a Workspace in operationalinsights + to populate workspaceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + monitorAccount: + description: One or more monitor_account blocks as defined + below. + items: + properties: + monitorAccountId: + description: The resource ID of the Monitor Account. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + type: object + type: array + storageBlob: + description: One or more storage_blob blocks as defined below. + items: + properties: + containerName: + description: The Storage Container name. + type: string + containerNameRef: + description: Reference to a Container in storage to + populate containerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerNameSelector: + description: Selector for a Container in storage to + populate containerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate + storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate + storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + storageBlobDirect: + description: One or more storage_blob_direct blocks as defined + below. + items: + properties: + containerName: + description: The Storage Container name. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + type: object + type: array + storageTableDirect: + description: One or more storage_table_direct blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + tableName: + description: The Storage Table name. + type: string + type: object + type: array + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Data Collection Rule. Currently, + up to 1 identity is supported. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Collection Rule. + Possible values are SystemAssigned and UserAssigned. + type: string + type: object + kind: + description: The kind of the Data Collection Rule. Possible values + are Linux, Windows, AgentDirectToStore and WorkspaceTransforms. + A rule of kind Linux does not allow for windows_event_log data + sources. And a rule of kind Windows does not allow for syslog + data sources. If kind is not specified, all kinds of data sources + are allowed. + type: string + location: + description: The Azure Region where the Data Collection Rule should + exist. Changing this forces a new Data Collection Rule to be + created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Data Collection + Rule should exist. Changing this forces a new Data Collection + Rule to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamDeclaration: + description: A stream_declaration block as defined below. + items: + properties: + column: + description: One or more column blocks as defined above. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data + Collection Rule. + type: string + type: + description: Specifies the type of Managed Service + Identity that should be configured on this Data + Collection Rule. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + type: array + streamName: + description: The name of the custom stream. This name should + be unique across all stream_declaration blocks. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Data Collection Rule. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dataCollectionEndpointId: + description: The resource ID of the Data Collection Endpoint that + this rule can be used with. + type: string + dataCollectionEndpointIdRef: + description: Reference to a MonitorDataCollectionEndpoint in insights + to populate dataCollectionEndpointId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataCollectionEndpointIdSelector: + description: Selector for a MonitorDataCollectionEndpoint in insights + to populate dataCollectionEndpointId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dataFlow: + description: One or more data_flow blocks as defined below. + items: + properties: + builtInTransform: + description: The built-in transform to transform stream + data. + type: string + destinations: + description: Specifies a list of destination names. A azure_monitor_metrics + data source only allows for stream of kind Microsoft-InsightsMetrics. + items: + type: string + type: array + outputStream: + description: The output stream of the transform. Only required + if the data flow changes data to a different stream. + type: string + streams: + description: Specifies a list of streams. Possible values + include but not limited to Microsoft-Event, Microsoft-InsightsMetrics, + Microsoft-Perf, Microsoft-Syslog, Microsoft-WindowsEvent, + and Microsoft-PrometheusMetrics. + items: + type: string + type: array + transformKql: + description: The KQL query to transform stream data. + type: string + type: object + type: array + dataSources: + description: A data_sources block as defined below. This property + is optional and can be omitted if the rule is meant to be used + via direct calls to the provisioned endpoint. + properties: + dataImport: + description: A data_import block as defined above. + properties: + eventHubDataSource: + description: An event_hub_data_source block as defined + below. + items: + properties: + consumerGroup: + description: The Event Hub consumer group name. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across + all data sources regardless of type within the + Data Collection Rule. + type: string + stream: + description: The stream to collect from Event Hub. + Possible value should be a custom stream name. + type: string + type: object + type: array + type: object + extension: + description: One or more extension blocks as defined below. + items: + properties: + extensionJson: + description: A JSON String which specifies the extension + setting. + type: string + extensionName: + description: The name of the VM extension. + type: string + inputDataSources: + description: 'Specifies a list of data sources this + extension needs data from. An item should be a name + of a supported data source which produces only one + stream. Supported data sources type: performance_counter, + windows_event_log,and syslog.' + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + iisLog: + description: One or more iis_log blocks as defined below. + items: + properties: + logDirectories: + description: Specifies a list of absolute paths where + the log files are located. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + logFile: + description: One or more log_file blocks as defined below. + items: + properties: + filePatterns: + description: Specifies a list of file patterns where + the log files are located. For example, C:\\JavaLogs\\*.log. + items: + type: string + type: array + format: + description: The data format of the log files. possible + value is text. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + settings: + description: A settings block as defined below. + properties: + text: + description: A text block as defined below. + properties: + recordStartTimestampFormat: + description: The timestamp format of the text + log files. Possible values are ISO 8601, YYYY-MM-DD + HH:MM:SS, M/D/YYYY HH:MM:SS AM/PM, Mon DD, + YYYY HH:MM:SS, yyMMdd HH:mm:ss, ddMMyy HH:mm:ss, + MMM d hh:mm:ss, dd/MMM/yyyy:HH:mm:ss zzz,and + yyyy-MM-ddTHH:mm:ssK. + type: string + type: object + type: object + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + performanceCounter: + description: One or more performance_counter blocks as defined + below. + items: + properties: + counterSpecifiers: + description: Specifies a list of specifier names of + the performance counters you want to collect. To get + a list of performance counters on Windows, run the + command typeperf. Please see this document for more + information. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + samplingFrequencyInSeconds: + description: The number of seconds between consecutive + counter measurements (samples). The value should be + integer between 1 and 300 inclusive. sampling_frequency_in_seconds + must be equal to 60 seconds for counters collected + with Microsoft-InsightsMetrics stream. + type: number + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + platformTelemetry: + description: One or more platform_telemetry blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + prometheusForwarder: + description: One or more prometheus_forwarder blocks as defined + below. + items: + properties: + labelIncludeFilter: + description: One or more label_include_filter blocks + as defined above. + items: + properties: + label: + description: The label of the filter. This label + should be unique across all label_include_fileter + block. Possible value is microsoft_metrics_include_label. + type: string + value: + description: The value of the filter. + type: string + type: object + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + syslog: + description: One or more syslog blocks as defined below. + items: + properties: + facilityNames: + description: Specifies a list of facility names. Use + a wildcard * to collect logs for all facility names. + Possible values are alert, *, audit, auth, authpriv, + clock, cron, daemon, ftp, kern, local5, local4, local1, + local7, local6, local3, local2, local0, lpr, mail, + mark, news, nopri, ntp, syslog, user and uucp. + items: + type: string + type: array + logLevels: + description: Specifies a list of log levels. Use a wildcard + * to collect logs for all log levels. Possible values + are Debug, Info, Notice, Warning, Error, Critical, + Alert, Emergency,and *. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + windowsEventLog: + description: One or more windows_event_log blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + xPathQueries: + description: Specifies a list of Windows Event Log queries + in XPath expression. Please see this document for + more information. + items: + type: string + type: array + type: object + type: array + windowsFirewallLog: + description: One or more windows_firewall_log blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + type: object + description: + description: The description of the Data Collection Rule. + type: string + destinations: + description: A destinations block as defined below. + properties: + azureMonitorMetrics: + description: A azure_monitor_metrics block as defined above. + properties: + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + eventHub: + description: One or more event_hub blocks as defined below. + properties: + eventHubId: + description: The resource ID of the Event Hub. + type: string + eventHubIdRef: + description: Reference to a EventHub in eventhub to populate + eventHubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventHubIdSelector: + description: Selector for a EventHub in eventhub to populate + eventHubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + eventHubDirect: + description: One or more event_hub blocks as defined below. + properties: + eventHubId: + description: The resource ID of the Event Hub. + type: string + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + logAnalytics: + description: One or more log_analytics blocks as defined below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + workspaceResourceId: + description: The ID of a Log Analytic Workspace resource. + type: string + workspaceResourceIdRef: + description: Reference to a Workspace in operationalinsights + to populate workspaceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceResourceIdSelector: + description: Selector for a Workspace in operationalinsights + to populate workspaceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + monitorAccount: + description: One or more monitor_account blocks as defined + below. + items: + properties: + monitorAccountId: + description: The resource ID of the Monitor Account. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + type: object + type: array + storageBlob: + description: One or more storage_blob blocks as defined below. + items: + properties: + containerName: + description: The Storage Container name. + type: string + containerNameRef: + description: Reference to a Container in storage to + populate containerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + containerNameSelector: + description: Selector for a Container in storage to + populate containerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate + storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate + storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + storageBlobDirect: + description: One or more storage_blob_direct blocks as defined + below. + items: + properties: + containerName: + description: The Storage Container name. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + type: object + type: array + storageTableDirect: + description: One or more storage_table_direct blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + tableName: + description: The Storage Table name. + type: string + type: object + type: array + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Data Collection Rule. Currently, + up to 1 identity is supported. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Collection Rule. + Possible values are SystemAssigned and UserAssigned. + type: string + type: object + kind: + description: The kind of the Data Collection Rule. Possible values + are Linux, Windows, AgentDirectToStore and WorkspaceTransforms. + A rule of kind Linux does not allow for windows_event_log data + sources. And a rule of kind Windows does not allow for syslog + data sources. If kind is not specified, all kinds of data sources + are allowed. + type: string + location: + description: The Azure Region where the Data Collection Rule should + exist. Changing this forces a new Data Collection Rule to be + created. + type: string + streamDeclaration: + description: A stream_declaration block as defined below. + items: + properties: + column: + description: One or more column blocks as defined above. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data + Collection Rule. + type: string + type: + description: Specifies the type of Managed Service + Identity that should be configured on this Data + Collection Rule. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + type: array + streamName: + description: The name of the custom stream. This name should + be unique across all stream_declaration blocks. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Data Collection Rule. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dataFlow is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dataFlow) + || (has(self.initProvider) && has(self.initProvider.dataFlow))' + - message: spec.forProvider.destinations is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.destinations) + || (has(self.initProvider) && has(self.initProvider.destinations))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: MonitorDataCollectionRuleStatus defines the observed state + of MonitorDataCollectionRule. + properties: + atProvider: + properties: + dataCollectionEndpointId: + description: The resource ID of the Data Collection Endpoint that + this rule can be used with. + type: string + dataFlow: + description: One or more data_flow blocks as defined below. + items: + properties: + builtInTransform: + description: The built-in transform to transform stream + data. + type: string + destinations: + description: Specifies a list of destination names. A azure_monitor_metrics + data source only allows for stream of kind Microsoft-InsightsMetrics. + items: + type: string + type: array + outputStream: + description: The output stream of the transform. Only required + if the data flow changes data to a different stream. + type: string + streams: + description: Specifies a list of streams. Possible values + include but not limited to Microsoft-Event, Microsoft-InsightsMetrics, + Microsoft-Perf, Microsoft-Syslog, Microsoft-WindowsEvent, + and Microsoft-PrometheusMetrics. + items: + type: string + type: array + transformKql: + description: The KQL query to transform stream data. + type: string + type: object + type: array + dataSources: + description: A data_sources block as defined below. This property + is optional and can be omitted if the rule is meant to be used + via direct calls to the provisioned endpoint. + properties: + dataImport: + description: A data_import block as defined above. + properties: + eventHubDataSource: + description: An event_hub_data_source block as defined + below. + items: + properties: + consumerGroup: + description: The Event Hub consumer group name. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across + all data sources regardless of type within the + Data Collection Rule. + type: string + stream: + description: The stream to collect from Event Hub. + Possible value should be a custom stream name. + type: string + type: object + type: array + type: object + extension: + description: One or more extension blocks as defined below. + items: + properties: + extensionJson: + description: A JSON String which specifies the extension + setting. + type: string + extensionName: + description: The name of the VM extension. + type: string + inputDataSources: + description: 'Specifies a list of data sources this + extension needs data from. An item should be a name + of a supported data source which produces only one + stream. Supported data sources type: performance_counter, + windows_event_log,and syslog.' + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + iisLog: + description: One or more iis_log blocks as defined below. + items: + properties: + logDirectories: + description: Specifies a list of absolute paths where + the log files are located. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + logFile: + description: One or more log_file blocks as defined below. + items: + properties: + filePatterns: + description: Specifies a list of file patterns where + the log files are located. For example, C:\\JavaLogs\\*.log. + items: + type: string + type: array + format: + description: The data format of the log files. possible + value is text. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + settings: + description: A settings block as defined below. + properties: + text: + description: A text block as defined below. + properties: + recordStartTimestampFormat: + description: The timestamp format of the text + log files. Possible values are ISO 8601, YYYY-MM-DD + HH:MM:SS, M/D/YYYY HH:MM:SS AM/PM, Mon DD, + YYYY HH:MM:SS, yyMMdd HH:mm:ss, ddMMyy HH:mm:ss, + MMM d hh:mm:ss, dd/MMM/yyyy:HH:mm:ss zzz,and + yyyy-MM-ddTHH:mm:ssK. + type: string + type: object + type: object + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + performanceCounter: + description: One or more performance_counter blocks as defined + below. + items: + properties: + counterSpecifiers: + description: Specifies a list of specifier names of + the performance counters you want to collect. To get + a list of performance counters on Windows, run the + command typeperf. Please see this document for more + information. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + samplingFrequencyInSeconds: + description: The number of seconds between consecutive + counter measurements (samples). The value should be + integer between 1 and 300 inclusive. sampling_frequency_in_seconds + must be equal to 60 seconds for counters collected + with Microsoft-InsightsMetrics stream. + type: number + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + platformTelemetry: + description: One or more platform_telemetry blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + prometheusForwarder: + description: One or more prometheus_forwarder blocks as defined + below. + items: + properties: + labelIncludeFilter: + description: One or more label_include_filter blocks + as defined above. + items: + properties: + label: + description: The label of the filter. This label + should be unique across all label_include_fileter + block. Possible value is microsoft_metrics_include_label. + type: string + value: + description: The value of the filter. + type: string + type: object + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + syslog: + description: One or more syslog blocks as defined below. + items: + properties: + facilityNames: + description: Specifies a list of facility names. Use + a wildcard * to collect logs for all facility names. + Possible values are alert, *, audit, auth, authpriv, + clock, cron, daemon, ftp, kern, local5, local4, local1, + local7, local6, local3, local2, local0, lpr, mail, + mark, news, nopri, ntp, syslog, user and uucp. + items: + type: string + type: array + logLevels: + description: Specifies a list of log levels. Use a wildcard + * to collect logs for all log levels. Possible values + are Debug, Info, Notice, Warning, Error, Critical, + Alert, Emergency,and *. + items: + type: string + type: array + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + windowsEventLog: + description: One or more windows_event_log blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + xPathQueries: + description: Specifies a list of Windows Event Log queries + in XPath expression. Please see this document for + more information. + items: + type: string + type: array + type: object + type: array + windowsFirewallLog: + description: One or more windows_firewall_log blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + streams: + description: Specifies a list of streams that this data + source will be sent to. A stream indicates what schema + will be used for this data and usually what table + in Log Analytics the data will be sent to. + items: + type: string + type: array + type: object + type: array + type: object + description: + description: The description of the Data Collection Rule. + type: string + destinations: + description: A destinations block as defined below. + properties: + azureMonitorMetrics: + description: A azure_monitor_metrics block as defined above. + properties: + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + eventHub: + description: One or more event_hub blocks as defined below. + properties: + eventHubId: + description: The resource ID of the Event Hub. + type: string + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + eventHubDirect: + description: One or more event_hub blocks as defined below. + properties: + eventHubId: + description: The resource ID of the Event Hub. + type: string + name: + description: The name which should be used for this data + source. This name should be unique across all data sources + regardless of type within the Data Collection Rule. + type: string + type: object + logAnalytics: + description: One or more log_analytics blocks as defined below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + workspaceResourceId: + description: The ID of a Log Analytic Workspace resource. + type: string + type: object + type: array + monitorAccount: + description: One or more monitor_account blocks as defined + below. + items: + properties: + monitorAccountId: + description: The resource ID of the Monitor Account. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + type: object + type: array + storageBlob: + description: One or more storage_blob blocks as defined below. + items: + properties: + containerName: + description: The Storage Container name. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + type: object + type: array + storageBlobDirect: + description: One or more storage_blob_direct blocks as defined + below. + items: + properties: + containerName: + description: The Storage Container name. + type: string + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + type: object + type: array + storageTableDirect: + description: One or more storage_table_direct blocks as defined + below. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data Collection + Rule. + type: string + storageAccountId: + description: The resource ID of the Storage Account. + type: string + tableName: + description: The Storage Table name. + type: string + type: object + type: array + type: object + id: + description: The ID of the Data Collection Rule. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Data Collection Rule. Currently, + up to 1 identity is supported. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Data Collection Rule. + Possible values are SystemAssigned and UserAssigned. + type: string + type: object + immutableId: + description: The immutable ID of the Data Collection Rule. + type: string + kind: + description: The kind of the Data Collection Rule. Possible values + are Linux, Windows, AgentDirectToStore and WorkspaceTransforms. + A rule of kind Linux does not allow for windows_event_log data + sources. And a rule of kind Windows does not allow for syslog + data sources. If kind is not specified, all kinds of data sources + are allowed. + type: string + location: + description: The Azure Region where the Data Collection Rule should + exist. Changing this forces a new Data Collection Rule to be + created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Data Collection + Rule should exist. Changing this forces a new Data Collection + Rule to be created. + type: string + streamDeclaration: + description: A stream_declaration block as defined below. + items: + properties: + column: + description: One or more column blocks as defined above. + items: + properties: + name: + description: The name which should be used for this + data source. This name should be unique across all + data sources regardless of type within the Data + Collection Rule. + type: string + type: + description: Specifies the type of Managed Service + Identity that should be configured on this Data + Collection Rule. Possible values are SystemAssigned + and UserAssigned. + type: string + type: object + type: array + streamName: + description: The name of the custom stream. This name should + be unique across all stream_declaration blocks. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Data Collection Rule. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitordiagnosticsettings.yaml b/package/crds/insights.azure.upbound.io_monitordiagnosticsettings.yaml index 105ce4d6b..9c435ba93 100644 --- a/package/crds/insights.azure.upbound.io_monitordiagnosticsettings.yaml +++ b/package/crds/insights.azure.upbound.io_monitordiagnosticsettings.yaml @@ -847,3 +847,814 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorDiagnosticSetting is the Schema for the MonitorDiagnosticSettings + API. Manages a Diagnostic Setting for an existing Resource. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorDiagnosticSettingSpec defines the desired state of + MonitorDiagnosticSetting + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + enabledLog: + description: One or more enabled_log blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Log Category for this + Resource. + type: string + categoryGroup: + description: The name of a Diagnostic Log Category Group + for this Resource. + type: string + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + eventhubAuthorizationRuleId: + description: Specifies the ID of an Event Hub Namespace Authorization + Rule used to send Diagnostics Data. + type: string + eventhubName: + description: Specifies the name of the Event Hub where Diagnostics + Data should be sent. + type: string + log: + description: One or more log blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Log Category for this + Resource. + type: string + categoryGroup: + description: The name of a Diagnostic Log Category Group + for this Resource. + type: string + enabled: + description: Is this Diagnostic Log enabled? Defaults to + true. + type: boolean + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + logAnalyticsDestinationType: + description: Possible values are AzureDiagnostics and Dedicated. + When set to Dedicated, logs sent to a Log Analytics workspace + will go into resource specific tables, instead of the legacy + AzureDiagnostics table. + type: string + logAnalyticsWorkspaceId: + description: Specifies the ID of a Log Analytics Workspace where + Diagnostics Data should be sent. + type: string + metric: + description: One or more metric blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Metric Category for + this Resource. + type: string + enabled: + description: Is this Diagnostic Metric enabled? Defaults + to true. + type: boolean + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + name: + description: Specifies the name of the Diagnostic Setting. Changing + this forces a new resource to be created. + type: string + partnerSolutionId: + description: The ID of the market partner solution where Diagnostics + Data should be sent. For potential partner integrations, click + to learn more about partner integration. + type: string + storageAccountId: + description: The ID of the Storage Account where logs should be + sent. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetResourceId: + description: The ID of an existing Resource on which to configure + Diagnostic Settings. Changing this forces a new resource to + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + enabledLog: + description: One or more enabled_log blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Log Category for this + Resource. + type: string + categoryGroup: + description: The name of a Diagnostic Log Category Group + for this Resource. + type: string + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + eventhubAuthorizationRuleId: + description: Specifies the ID of an Event Hub Namespace Authorization + Rule used to send Diagnostics Data. + type: string + eventhubName: + description: Specifies the name of the Event Hub where Diagnostics + Data should be sent. + type: string + log: + description: One or more log blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Log Category for this + Resource. + type: string + categoryGroup: + description: The name of a Diagnostic Log Category Group + for this Resource. + type: string + enabled: + description: Is this Diagnostic Log enabled? Defaults to + true. + type: boolean + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + logAnalyticsDestinationType: + description: Possible values are AzureDiagnostics and Dedicated. + When set to Dedicated, logs sent to a Log Analytics workspace + will go into resource specific tables, instead of the legacy + AzureDiagnostics table. + type: string + logAnalyticsWorkspaceId: + description: Specifies the ID of a Log Analytics Workspace where + Diagnostics Data should be sent. + type: string + metric: + description: One or more metric blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Metric Category for + this Resource. + type: string + enabled: + description: Is this Diagnostic Metric enabled? Defaults + to true. + type: boolean + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + name: + description: Specifies the name of the Diagnostic Setting. Changing + this forces a new resource to be created. + type: string + partnerSolutionId: + description: The ID of the market partner solution where Diagnostics + Data should be sent. For potential partner integrations, click + to learn more about partner integration. + type: string + storageAccountId: + description: The ID of the Storage Account where logs should be + sent. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetResourceId: + description: The ID of an existing Resource on which to configure + Diagnostic Settings. Changing this forces a new resource to + be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.targetResourceId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetResourceId) + || (has(self.initProvider) && has(self.initProvider.targetResourceId))' + status: + description: MonitorDiagnosticSettingStatus defines the observed state + of MonitorDiagnosticSetting. + properties: + atProvider: + properties: + enabledLog: + description: One or more enabled_log blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Log Category for this + Resource. + type: string + categoryGroup: + description: The name of a Diagnostic Log Category Group + for this Resource. + type: string + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + eventhubAuthorizationRuleId: + description: Specifies the ID of an Event Hub Namespace Authorization + Rule used to send Diagnostics Data. + type: string + eventhubName: + description: Specifies the name of the Event Hub where Diagnostics + Data should be sent. + type: string + id: + description: The ID of the Diagnostic Setting. + type: string + log: + description: One or more log blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Log Category for this + Resource. + type: string + categoryGroup: + description: The name of a Diagnostic Log Category Group + for this Resource. + type: string + enabled: + description: Is this Diagnostic Log enabled? Defaults to + true. + type: boolean + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + logAnalyticsDestinationType: + description: Possible values are AzureDiagnostics and Dedicated. + When set to Dedicated, logs sent to a Log Analytics workspace + will go into resource specific tables, instead of the legacy + AzureDiagnostics table. + type: string + logAnalyticsWorkspaceId: + description: Specifies the ID of a Log Analytics Workspace where + Diagnostics Data should be sent. + type: string + metric: + description: One or more metric blocks as defined below. + items: + properties: + category: + description: The name of a Diagnostic Metric Category for + this Resource. + type: string + enabled: + description: Is this Diagnostic Metric enabled? Defaults + to true. + type: boolean + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: The number of days for which this Retention + Policy should apply. + type: number + enabled: + description: Is this Retention Policy enabled? + type: boolean + type: object + type: object + type: array + name: + description: Specifies the name of the Diagnostic Setting. Changing + this forces a new resource to be created. + type: string + partnerSolutionId: + description: The ID of the market partner solution where Diagnostics + Data should be sent. For potential partner integrations, click + to learn more about partner integration. + type: string + storageAccountId: + description: The ID of the Storage Account where logs should be + sent. + type: string + targetResourceId: + description: The ID of an existing Resource on which to configure + Diagnostic Settings. Changing this forces a new resource to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitormetricalerts.yaml b/package/crds/insights.azure.upbound.io_monitormetricalerts.yaml index 59ce5e22f..b15419c53 100644 --- a/package/crds/insights.azure.upbound.io_monitormetricalerts.yaml +++ b/package/crds/insights.azure.upbound.io_monitormetricalerts.yaml @@ -1307,3 +1307,1280 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorMetricAlert is the Schema for the MonitorMetricAlerts + API. Manages a Metric Alert within Azure Monitor + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorMetricAlertSpec defines the desired state of MonitorMetricAlert + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: One or more action blocks as defined below. + items: + properties: + actionGroupId: + description: The ID of the Action Group can be sourced from + the + type: string + actionGroupIdRef: + description: Reference to a MonitorActionGroup in insights + to populate actionGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + actionGroupIdSelector: + description: Selector for a MonitorActionGroup in insights + to populate actionGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webhookProperties: + additionalProperties: + type: string + description: The map of custom string properties to include + with the post operation. These data are appended to the + webhook payload. + type: object + x-kubernetes-map-type: granular + type: object + type: array + applicationInsightsWebTestLocationAvailabilityCriteria: + description: A application_insights_web_test_location_availability_criteria + block as defined below. + properties: + componentId: + description: The ID of the Application Insights Resource. + type: string + failedLocationCount: + description: The number of failed locations. + type: number + webTestId: + description: The ID of the Application Insights Web Test. + type: string + type: object + autoMitigate: + description: Should the alerts in this Metric Alert be auto resolved? + Defaults to true. + type: boolean + criteria: + description: One or more (static) criteria blocks as defined below. + items: + properties: + aggregation: + description: The statistic that runs over the metric values. + Possible values are Average, Count, Minimum, Maximum and + Total. + type: string + dimension: + description: One or more dimension blocks as defined below. + items: + properties: + name: + description: The name of the Metric Alert. Changing + this forces a new resource to be created. + type: string + operator: + description: The criteria operator. Possible values + are LessThan, GreaterThan and GreaterOrLessThan. + type: string + values: + description: The list of dimension values. + items: + type: string + type: array + type: object + type: array + metricName: + description: One of the metric names to be monitored. + type: string + metricNamespace: + description: One of the metric namespaces to be monitored. + type: string + operator: + description: The criteria operator. Possible values are + Equals, GreaterThan, GreaterThanOrEqual, LessThan and + LessThanOrEqual. + type: string + skipMetricValidation: + description: Skip the metric validation to allow creating + an alert rule on a custom metric that isn't yet emitted? + Defaults to false. + type: boolean + threshold: + description: The criteria threshold value that activates + the alert. + type: number + type: object + type: array + description: + description: The description of this Metric Alert. + type: string + dynamicCriteria: + description: A dynamic_criteria block as defined below. + properties: + aggregation: + description: The statistic that runs over the metric values. + Possible values are Average, Count, Minimum, Maximum and + Total. + type: string + alertSensitivity: + description: The extent of deviation required to trigger an + alert. Possible values are Low, Medium and High. + type: string + dimension: + description: One or more dimension blocks as defined below. + items: + properties: + name: + description: The name of the Metric Alert. Changing + this forces a new resource to be created. + type: string + operator: + description: The criteria operator. Possible values + are LessThan, GreaterThan and GreaterOrLessThan. + type: string + values: + description: The list of dimension values. + items: + type: string + type: array + type: object + type: array + evaluationFailureCount: + description: The number of violations to trigger an alert. + Should be smaller or equal to evaluation_total_count. Defaults + to 4. + type: number + evaluationTotalCount: + description: The number of aggregated lookback points. The + lookback time window is calculated based on the aggregation + granularity (window_size) and the selected number of aggregated + points. Defaults to 4. + type: number + ignoreDataBefore: + description: The ISO8601 date from which to start learning + the metric historical data and calculate the dynamic thresholds. + type: string + metricName: + description: One of the metric names to be monitored. + type: string + metricNamespace: + description: One of the metric namespaces to be monitored. + type: string + operator: + description: The criteria operator. Possible values are LessThan, + GreaterThan and GreaterOrLessThan. + type: string + skipMetricValidation: + description: Skip the metric validation to allow creating + an alert rule on a custom metric that isn't yet emitted? + type: boolean + type: object + enabled: + description: Should this Metric Alert be enabled? Defaults to + true. + type: boolean + frequency: + description: The evaluation frequency of this Metric Alert, represented + in ISO 8601 duration format. Possible values are PT1M, PT5M, + PT15M, PT30M and PT1H. Defaults to PT1M. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Metric Alert instance. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scopes: + description: A set of strings of resource IDs at which the metric + criteria should be applied. + items: + type: string + type: array + x-kubernetes-list-type: set + scopesRefs: + description: References to Account in storage to populate scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of Account in storage to populate + scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + severity: + description: The severity of this Metric Alert. Possible values + are 0, 1, 2, 3 and 4. Defaults to 3. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + targetResourceLocation: + description: |- + The location of the target resource. + The location of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + type: string + targetResourceType: + description: |- + The resource type (e.g. Microsoft.Compute/virtualMachines) of the target resource. + The resource type (e.g. Microsoft.Compute/virtualMachines) of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + type: string + windowSize: + description: The period of time that is used to monitor alert + activity, represented in ISO 8601 duration format. This value + must be greater than frequency. Possible values are PT1M, PT5M, + PT15M, PT30M, PT1H, PT6H, PT12H and P1D. Defaults to PT5M. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: One or more action blocks as defined below. + items: + properties: + actionGroupId: + description: The ID of the Action Group can be sourced from + the + type: string + actionGroupIdRef: + description: Reference to a MonitorActionGroup in insights + to populate actionGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + actionGroupIdSelector: + description: Selector for a MonitorActionGroup in insights + to populate actionGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webhookProperties: + additionalProperties: + type: string + description: The map of custom string properties to include + with the post operation. These data are appended to the + webhook payload. + type: object + x-kubernetes-map-type: granular + type: object + type: array + applicationInsightsWebTestLocationAvailabilityCriteria: + description: A application_insights_web_test_location_availability_criteria + block as defined below. + properties: + componentId: + description: The ID of the Application Insights Resource. + type: string + failedLocationCount: + description: The number of failed locations. + type: number + webTestId: + description: The ID of the Application Insights Web Test. + type: string + type: object + autoMitigate: + description: Should the alerts in this Metric Alert be auto resolved? + Defaults to true. + type: boolean + criteria: + description: One or more (static) criteria blocks as defined below. + items: + properties: + aggregation: + description: The statistic that runs over the metric values. + Possible values are Average, Count, Minimum, Maximum and + Total. + type: string + dimension: + description: One or more dimension blocks as defined below. + items: + properties: + name: + description: The name of the Metric Alert. Changing + this forces a new resource to be created. + type: string + operator: + description: The criteria operator. Possible values + are LessThan, GreaterThan and GreaterOrLessThan. + type: string + values: + description: The list of dimension values. + items: + type: string + type: array + type: object + type: array + metricName: + description: One of the metric names to be monitored. + type: string + metricNamespace: + description: One of the metric namespaces to be monitored. + type: string + operator: + description: The criteria operator. Possible values are + Equals, GreaterThan, GreaterThanOrEqual, LessThan and + LessThanOrEqual. + type: string + skipMetricValidation: + description: Skip the metric validation to allow creating + an alert rule on a custom metric that isn't yet emitted? + Defaults to false. + type: boolean + threshold: + description: The criteria threshold value that activates + the alert. + type: number + type: object + type: array + description: + description: The description of this Metric Alert. + type: string + dynamicCriteria: + description: A dynamic_criteria block as defined below. + properties: + aggregation: + description: The statistic that runs over the metric values. + Possible values are Average, Count, Minimum, Maximum and + Total. + type: string + alertSensitivity: + description: The extent of deviation required to trigger an + alert. Possible values are Low, Medium and High. + type: string + dimension: + description: One or more dimension blocks as defined below. + items: + properties: + name: + description: The name of the Metric Alert. Changing + this forces a new resource to be created. + type: string + operator: + description: The criteria operator. Possible values + are LessThan, GreaterThan and GreaterOrLessThan. + type: string + values: + description: The list of dimension values. + items: + type: string + type: array + type: object + type: array + evaluationFailureCount: + description: The number of violations to trigger an alert. + Should be smaller or equal to evaluation_total_count. Defaults + to 4. + type: number + evaluationTotalCount: + description: The number of aggregated lookback points. The + lookback time window is calculated based on the aggregation + granularity (window_size) and the selected number of aggregated + points. Defaults to 4. + type: number + ignoreDataBefore: + description: The ISO8601 date from which to start learning + the metric historical data and calculate the dynamic thresholds. + type: string + metricName: + description: One of the metric names to be monitored. + type: string + metricNamespace: + description: One of the metric namespaces to be monitored. + type: string + operator: + description: The criteria operator. Possible values are LessThan, + GreaterThan and GreaterOrLessThan. + type: string + skipMetricValidation: + description: Skip the metric validation to allow creating + an alert rule on a custom metric that isn't yet emitted? + type: boolean + type: object + enabled: + description: Should this Metric Alert be enabled? Defaults to + true. + type: boolean + frequency: + description: The evaluation frequency of this Metric Alert, represented + in ISO 8601 duration format. Possible values are PT1M, PT5M, + PT15M, PT30M and PT1H. Defaults to PT1M. + type: string + scopes: + description: A set of strings of resource IDs at which the metric + criteria should be applied. + items: + type: string + type: array + x-kubernetes-list-type: set + scopesRefs: + description: References to Account in storage to populate scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of Account in storage to populate + scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + severity: + description: The severity of this Metric Alert. Possible values + are 0, 1, 2, 3 and 4. Defaults to 3. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + targetResourceLocation: + description: |- + The location of the target resource. + The location of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + type: string + targetResourceType: + description: |- + The resource type (e.g. Microsoft.Compute/virtualMachines) of the target resource. + The resource type (e.g. Microsoft.Compute/virtualMachines) of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + type: string + windowSize: + description: The period of time that is used to monitor alert + activity, represented in ISO 8601 duration format. This value + must be greater than frequency. Possible values are PT1M, PT5M, + PT15M, PT30M, PT1H, PT6H, PT12H and P1D. Defaults to PT5M. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MonitorMetricAlertStatus defines the observed state of MonitorMetricAlert. + properties: + atProvider: + properties: + action: + description: One or more action blocks as defined below. + items: + properties: + actionGroupId: + description: The ID of the Action Group can be sourced from + the + type: string + webhookProperties: + additionalProperties: + type: string + description: The map of custom string properties to include + with the post operation. These data are appended to the + webhook payload. + type: object + x-kubernetes-map-type: granular + type: object + type: array + applicationInsightsWebTestLocationAvailabilityCriteria: + description: A application_insights_web_test_location_availability_criteria + block as defined below. + properties: + componentId: + description: The ID of the Application Insights Resource. + type: string + failedLocationCount: + description: The number of failed locations. + type: number + webTestId: + description: The ID of the Application Insights Web Test. + type: string + type: object + autoMitigate: + description: Should the alerts in this Metric Alert be auto resolved? + Defaults to true. + type: boolean + criteria: + description: One or more (static) criteria blocks as defined below. + items: + properties: + aggregation: + description: The statistic that runs over the metric values. + Possible values are Average, Count, Minimum, Maximum and + Total. + type: string + dimension: + description: One or more dimension blocks as defined below. + items: + properties: + name: + description: The name of the Metric Alert. Changing + this forces a new resource to be created. + type: string + operator: + description: The criteria operator. Possible values + are LessThan, GreaterThan and GreaterOrLessThan. + type: string + values: + description: The list of dimension values. + items: + type: string + type: array + type: object + type: array + metricName: + description: One of the metric names to be monitored. + type: string + metricNamespace: + description: One of the metric namespaces to be monitored. + type: string + operator: + description: The criteria operator. Possible values are + Equals, GreaterThan, GreaterThanOrEqual, LessThan and + LessThanOrEqual. + type: string + skipMetricValidation: + description: Skip the metric validation to allow creating + an alert rule on a custom metric that isn't yet emitted? + Defaults to false. + type: boolean + threshold: + description: The criteria threshold value that activates + the alert. + type: number + type: object + type: array + description: + description: The description of this Metric Alert. + type: string + dynamicCriteria: + description: A dynamic_criteria block as defined below. + properties: + aggregation: + description: The statistic that runs over the metric values. + Possible values are Average, Count, Minimum, Maximum and + Total. + type: string + alertSensitivity: + description: The extent of deviation required to trigger an + alert. Possible values are Low, Medium and High. + type: string + dimension: + description: One or more dimension blocks as defined below. + items: + properties: + name: + description: The name of the Metric Alert. Changing + this forces a new resource to be created. + type: string + operator: + description: The criteria operator. Possible values + are LessThan, GreaterThan and GreaterOrLessThan. + type: string + values: + description: The list of dimension values. + items: + type: string + type: array + type: object + type: array + evaluationFailureCount: + description: The number of violations to trigger an alert. + Should be smaller or equal to evaluation_total_count. Defaults + to 4. + type: number + evaluationTotalCount: + description: The number of aggregated lookback points. The + lookback time window is calculated based on the aggregation + granularity (window_size) and the selected number of aggregated + points. Defaults to 4. + type: number + ignoreDataBefore: + description: The ISO8601 date from which to start learning + the metric historical data and calculate the dynamic thresholds. + type: string + metricName: + description: One of the metric names to be monitored. + type: string + metricNamespace: + description: One of the metric namespaces to be monitored. + type: string + operator: + description: The criteria operator. Possible values are LessThan, + GreaterThan and GreaterOrLessThan. + type: string + skipMetricValidation: + description: Skip the metric validation to allow creating + an alert rule on a custom metric that isn't yet emitted? + type: boolean + type: object + enabled: + description: Should this Metric Alert be enabled? Defaults to + true. + type: boolean + frequency: + description: The evaluation frequency of this Metric Alert, represented + in ISO 8601 duration format. Possible values are PT1M, PT5M, + PT15M, PT30M and PT1H. Defaults to PT1M. + type: string + id: + description: The ID of the metric alert. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Metric Alert instance. Changing this forces a new resource + to be created. + type: string + scopes: + description: A set of strings of resource IDs at which the metric + criteria should be applied. + items: + type: string + type: array + x-kubernetes-list-type: set + severity: + description: The severity of this Metric Alert. Possible values + are 0, 1, 2, 3 and 4. Defaults to 3. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + targetResourceLocation: + description: |- + The location of the target resource. + The location of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + type: string + targetResourceType: + description: |- + The resource type (e.g. Microsoft.Compute/virtualMachines) of the target resource. + The resource type (e.g. Microsoft.Compute/virtualMachines) of the target pluginsdk. Required when using subscription, resource group scope or multiple scopes. + type: string + windowSize: + description: The period of time that is used to monitor alert + activity, represented in ISO 8601 duration format. This value + must be greater than frequency. Possible values are PT1M, PT5M, + PT15M, PT30M, PT1H, PT6H, PT12H and P1D. Defaults to PT5M. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitorscheduledqueryrulesalerts.yaml b/package/crds/insights.azure.upbound.io_monitorscheduledqueryrulesalerts.yaml index 85123c3a6..f6509ca06 100644 --- a/package/crds/insights.azure.upbound.io_monitorscheduledqueryrulesalerts.yaml +++ b/package/crds/insights.azure.upbound.io_monitorscheduledqueryrulesalerts.yaml @@ -1200,3 +1200,1161 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorScheduledQueryRulesAlert is the Schema for the MonitorScheduledQueryRulesAlerts + API. Manages an AlertingAction Scheduled Query Rules resource within Azure + Monitor + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorScheduledQueryRulesAlertSpec defines the desired state + of MonitorScheduledQueryRulesAlert + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: An action block as defined below. + properties: + actionGroup: + description: List of action group reference resource IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + actionGroupRefs: + description: References to MonitorActionGroup in insights + to populate actionGroup. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + actionGroupSelector: + description: Selector for a list of MonitorActionGroup in + insights to populate actionGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customWebhookPayload: + description: Custom payload to be sent for all webhook payloads + in alerting action. + type: string + emailSubject: + description: Custom subject override for all email ids in + Azure action group. + type: string + type: object + authorizedResourceIds: + description: List of Resource IDs referred into query. + items: + type: string + type: array + x-kubernetes-list-type: set + autoMitigationEnabled: + description: |- + Should the alerts in this Metric Alert be auto resolved? Defaults to false. + -> NOTE auto_mitigation_enabled and throttling are mutually exclusive and cannot both be set. + type: boolean + dataSourceId: + description: The resource URI over which log search query is to + be run. Changing this forces a new resource to be created. + type: string + dataSourceIdRef: + description: Reference to a ApplicationInsights in insights to + populate dataSourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataSourceIdSelector: + description: Selector for a ApplicationInsights in insights to + populate dataSourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description of the scheduled query rule. + type: string + enabled: + description: Whether this scheduled query rule is enabled. Default + is true. + type: boolean + frequency: + description: Frequency (in minutes) at which rule condition should + be evaluated. Values must be between 5 and 1440 (inclusive). + type: number + location: + description: Specifies the Azure Region where the resource should + exist. Changing this forces a new resource to be created. + type: string + name: + description: The name of the scheduled query rule. Changing this + forces a new resource to be created. + type: string + query: + description: Log search query. + type: string + queryType: + description: The type of query results. Possible values are ResultCount + and Number. Default is ResultCount. If set to ResultCount, query + must include an AggregatedValue column of a numeric type, for + example, Heartbeat | summarize AggregatedValue = count() by + bin(TimeGenerated, 5m). + type: string + resourceGroupName: + description: The name of the resource group in which to create + the scheduled query rule instance. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + severity: + description: 'Severity of the alert. Possible values include: + 0, 1, 2, 3, or 4.' + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throttling: + description: Time (in minutes) for which Alerts should be throttled + or suppressed. Values must be between 0 and 10000 (inclusive). + type: number + timeWindow: + description: Time window for which data needs to be fetched for + query (must be greater than or equal to frequency). Values must + be between 5 and 2880 (inclusive). + type: number + trigger: + description: A trigger block as defined below. + properties: + metricTrigger: + description: A metric_trigger block as defined above. Trigger + condition for metric query rule. + properties: + metricColumn: + description: Evaluation of metric on a particular column. + type: string + metricTriggerType: + description: Metric Trigger Type - 'Consecutive' or 'Total'. + type: string + operator: + description: Evaluation operation for rule - 'GreaterThan', + GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + type: string + threshold: + description: Result or count threshold based on which + rule should be triggered. Values must be between 0 and + 10000 inclusive. + type: number + type: object + operator: + description: Evaluation operation for rule - 'GreaterThan', + GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + type: string + threshold: + description: Result or count threshold based on which rule + should be triggered. Values must be between 0 and 10000 + inclusive. + type: number + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: An action block as defined below. + properties: + actionGroup: + description: List of action group reference resource IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + actionGroupRefs: + description: References to MonitorActionGroup in insights + to populate actionGroup. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + actionGroupSelector: + description: Selector for a list of MonitorActionGroup in + insights to populate actionGroup. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customWebhookPayload: + description: Custom payload to be sent for all webhook payloads + in alerting action. + type: string + emailSubject: + description: Custom subject override for all email ids in + Azure action group. + type: string + type: object + authorizedResourceIds: + description: List of Resource IDs referred into query. + items: + type: string + type: array + x-kubernetes-list-type: set + autoMitigationEnabled: + description: |- + Should the alerts in this Metric Alert be auto resolved? Defaults to false. + -> NOTE auto_mitigation_enabled and throttling are mutually exclusive and cannot both be set. + type: boolean + dataSourceId: + description: The resource URI over which log search query is to + be run. Changing this forces a new resource to be created. + type: string + dataSourceIdRef: + description: Reference to a ApplicationInsights in insights to + populate dataSourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataSourceIdSelector: + description: Selector for a ApplicationInsights in insights to + populate dataSourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description of the scheduled query rule. + type: string + enabled: + description: Whether this scheduled query rule is enabled. Default + is true. + type: boolean + frequency: + description: Frequency (in minutes) at which rule condition should + be evaluated. Values must be between 5 and 1440 (inclusive). + type: number + location: + description: Specifies the Azure Region where the resource should + exist. Changing this forces a new resource to be created. + type: string + name: + description: The name of the scheduled query rule. Changing this + forces a new resource to be created. + type: string + query: + description: Log search query. + type: string + queryType: + description: The type of query results. Possible values are ResultCount + and Number. Default is ResultCount. If set to ResultCount, query + must include an AggregatedValue column of a numeric type, for + example, Heartbeat | summarize AggregatedValue = count() by + bin(TimeGenerated, 5m). + type: string + resourceGroupName: + description: The name of the resource group in which to create + the scheduled query rule instance. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + severity: + description: 'Severity of the alert. Possible values include: + 0, 1, 2, 3, or 4.' + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throttling: + description: Time (in minutes) for which Alerts should be throttled + or suppressed. Values must be between 0 and 10000 (inclusive). + type: number + timeWindow: + description: Time window for which data needs to be fetched for + query (must be greater than or equal to frequency). Values must + be between 5 and 2880 (inclusive). + type: number + trigger: + description: A trigger block as defined below. + properties: + metricTrigger: + description: A metric_trigger block as defined above. Trigger + condition for metric query rule. + properties: + metricColumn: + description: Evaluation of metric on a particular column. + type: string + metricTriggerType: + description: Metric Trigger Type - 'Consecutive' or 'Total'. + type: string + operator: + description: Evaluation operation for rule - 'GreaterThan', + GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + type: string + threshold: + description: Result or count threshold based on which + rule should be triggered. Values must be between 0 and + 10000 inclusive. + type: number + type: object + operator: + description: Evaluation operation for rule - 'GreaterThan', + GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + type: string + threshold: + description: Result or count threshold based on which rule + should be triggered. Values must be between 0 and 10000 + inclusive. + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.action is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.action) + || (has(self.initProvider) && has(self.initProvider.action))' + - message: spec.forProvider.frequency is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.frequency) + || (has(self.initProvider) && has(self.initProvider.frequency))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.query is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.query) + || (has(self.initProvider) && has(self.initProvider.query))' + - message: spec.forProvider.timeWindow is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timeWindow) + || (has(self.initProvider) && has(self.initProvider.timeWindow))' + - message: spec.forProvider.trigger is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.trigger) + || (has(self.initProvider) && has(self.initProvider.trigger))' + status: + description: MonitorScheduledQueryRulesAlertStatus defines the observed + state of MonitorScheduledQueryRulesAlert. + properties: + atProvider: + properties: + action: + description: An action block as defined below. + properties: + actionGroup: + description: List of action group reference resource IDs. + items: + type: string + type: array + x-kubernetes-list-type: set + customWebhookPayload: + description: Custom payload to be sent for all webhook payloads + in alerting action. + type: string + emailSubject: + description: Custom subject override for all email ids in + Azure action group. + type: string + type: object + authorizedResourceIds: + description: List of Resource IDs referred into query. + items: + type: string + type: array + x-kubernetes-list-type: set + autoMitigationEnabled: + description: |- + Should the alerts in this Metric Alert be auto resolved? Defaults to false. + -> NOTE auto_mitigation_enabled and throttling are mutually exclusive and cannot both be set. + type: boolean + dataSourceId: + description: The resource URI over which log search query is to + be run. Changing this forces a new resource to be created. + type: string + description: + description: The description of the scheduled query rule. + type: string + enabled: + description: Whether this scheduled query rule is enabled. Default + is true. + type: boolean + frequency: + description: Frequency (in minutes) at which rule condition should + be evaluated. Values must be between 5 and 1440 (inclusive). + type: number + id: + description: The ID of the scheduled query rule. + type: string + location: + description: Specifies the Azure Region where the resource should + exist. Changing this forces a new resource to be created. + type: string + name: + description: The name of the scheduled query rule. Changing this + forces a new resource to be created. + type: string + query: + description: Log search query. + type: string + queryType: + description: The type of query results. Possible values are ResultCount + and Number. Default is ResultCount. If set to ResultCount, query + must include an AggregatedValue column of a numeric type, for + example, Heartbeat | summarize AggregatedValue = count() by + bin(TimeGenerated, 5m). + type: string + resourceGroupName: + description: The name of the resource group in which to create + the scheduled query rule instance. Changing this forces a new + resource to be created. + type: string + severity: + description: 'Severity of the alert. Possible values include: + 0, 1, 2, 3, or 4.' + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throttling: + description: Time (in minutes) for which Alerts should be throttled + or suppressed. Values must be between 0 and 10000 (inclusive). + type: number + timeWindow: + description: Time window for which data needs to be fetched for + query (must be greater than or equal to frequency). Values must + be between 5 and 2880 (inclusive). + type: number + trigger: + description: A trigger block as defined below. + properties: + metricTrigger: + description: A metric_trigger block as defined above. Trigger + condition for metric query rule. + properties: + metricColumn: + description: Evaluation of metric on a particular column. + type: string + metricTriggerType: + description: Metric Trigger Type - 'Consecutive' or 'Total'. + type: string + operator: + description: Evaluation operation for rule - 'GreaterThan', + GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + type: string + threshold: + description: Result or count threshold based on which + rule should be triggered. Values must be between 0 and + 10000 inclusive. + type: number + type: object + operator: + description: Evaluation operation for rule - 'GreaterThan', + GreaterThanOrEqual', 'LessThan', or 'LessThanOrEqual'. + type: string + threshold: + description: Result or count threshold based on which rule + should be triggered. Values must be between 0 and 10000 + inclusive. + type: number + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitorscheduledqueryrulesalertv2s.yaml b/package/crds/insights.azure.upbound.io_monitorscheduledqueryrulesalertv2s.yaml index 79c6bf061..431cf20b4 100644 --- a/package/crds/insights.azure.upbound.io_monitorscheduledqueryrulesalertv2s.yaml +++ b/package/crds/insights.azure.upbound.io_monitorscheduledqueryrulesalertv2s.yaml @@ -1131,3 +1131,1101 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorScheduledQueryRulesAlertV2 is the Schema for the MonitorScheduledQueryRulesAlertV2s + API. Manages an AlertingAction Scheduled Query Rules Version 2 resource + within Azure Monitor + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorScheduledQueryRulesAlertV2Spec defines the desired + state of MonitorScheduledQueryRulesAlertV2 + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: An action block as defined below. + properties: + actionGroups: + description: List of Action Group resource IDs to invoke when + the alert fires. + items: + type: string + type: array + customProperties: + additionalProperties: + type: string + description: Specifies the properties of an alert payload. + type: object + x-kubernetes-map-type: granular + type: object + autoMitigationEnabled: + description: Specifies the flag that indicates whether the alert + should be automatically resolved or not. Value should be true + or false. The default is false. + type: boolean + criteria: + description: A criteria block as defined below. + items: + properties: + dimension: + description: A dimension block as defined below. + items: + properties: + name: + description: Specifies the name which should be used + for this Monitor Scheduled Query Rule. Changing + this forces a new resource to be created. + type: string + operator: + description: Operator for dimension values. Possible + values are Exclude,and Include. + type: string + values: + description: List of dimension values. Use a wildcard + * to collect all. + items: + type: string + type: array + type: object + type: array + failingPeriods: + description: A failing_periods block as defined below. + properties: + minimumFailingPeriodsToTriggerAlert: + description: Specifies the number of violations to trigger + an alert. Should be smaller or equal to number_of_evaluation_periods. + Possible value is integer between 1 and 6. + type: number + numberOfEvaluationPeriods: + description: Specifies the number of aggregated look-back + points. The look-back time window is calculated based + on the aggregation granularity window_duration and + the selected number of aggregated points. Possible + value is integer between 1 and 6. + type: number + type: object + metricMeasureColumn: + description: Specifies the column containing the metric + measure number. + type: string + operator: + description: Specifies the criteria operator. Possible values + are Equal, GreaterThan, GreaterThanOrEqual, LessThan,and + LessThanOrEqual. + type: string + query: + description: The query to run on logs. The results returned + by this query are used to populate the alert. + type: string + resourceIdColumn: + description: Specifies the column containing the resource + ID. The content of the column must be an uri formatted + as resource ID. + type: string + threshold: + description: Specifies the criteria threshold value that + activates the alert. + type: number + timeAggregationMethod: + description: The type of aggregation to apply to the data + points in aggregation granularity. Possible values are + Average, Count, Maximum, Minimum,and Total. + type: string + type: object + type: array + description: + description: Specifies the description of the scheduled query + rule. + type: string + displayName: + description: Specifies the display name of the alert rule. + type: string + enabled: + description: Specifies the flag which indicates whether this scheduled + query rule is enabled. Value should be true or false. Defaults + to true. + type: boolean + evaluationFrequency: + description: How often the scheduled query rule is evaluated, + represented in ISO 8601 duration format. Possible values are + PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, PT6H, P1D. + type: string + location: + description: Specifies the Azure Region where the Monitor Scheduled + Query Rule should exist. Changing this forces a new resource + to be created. + type: string + muteActionsAfterAlertDuration: + description: Mute actions for the chosen period of time in ISO + 8601 duration format after the alert is fired. Possible values + are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, PT6H, P1D and P2D. + type: string + queryTimeRangeOverride: + description: Set this if the alert evaluation period is different + from the query time range. If not specified, the value is window_duration*number_of_evaluation_periods. + Possible values are PT5M, PT10M, PT15M, PT20M, PT30M, PT45M, + PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Monitor Scheduled Query Rule should exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scopes: + description: Specifies the list of resource IDs that this scheduled + query rule is scoped to. Changing this forces a new resource + to be created. Currently, the API supports exactly 1 resource + ID in the scopes list. + items: + type: string + type: array + scopesRefs: + description: References to ApplicationInsights in insights to + populate scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of ApplicationInsights in insights + to populate scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + severity: + description: Severity of the alert. Should be an integer between + 0 and 4. Value of 0 is severest. + type: number + skipQueryValidation: + description: Specifies the flag which indicates whether the provided + query should be validated or not. The default is false. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Monitor Scheduled Query Rule. + type: object + x-kubernetes-map-type: granular + targetResourceTypes: + description: List of resource type of the target resource(s) on + which the alert is created/updated. For example if the scope + is a resource group and targetResourceTypes is Microsoft.Compute/virtualMachines, + then a different alert will be fired for each virtual machine + in the resource group which meet the alert criteria. + items: + type: string + type: array + windowDuration: + description: Specifies the period of time in ISO 8601 duration + format on which the Scheduled Query Rule will be executed (bin + size). If evaluation_frequency is PT1M, possible values are + PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, and PT6H. Otherwise, possible values are PT5M, PT10M, + PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D, + and P2D. + type: string + workspaceAlertsStorageEnabled: + description: Specifies the flag which indicates whether this scheduled + query rule check if storage is configured. Value should be true + or false. The default is false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: An action block as defined below. + properties: + actionGroups: + description: List of Action Group resource IDs to invoke when + the alert fires. + items: + type: string + type: array + customProperties: + additionalProperties: + type: string + description: Specifies the properties of an alert payload. + type: object + x-kubernetes-map-type: granular + type: object + autoMitigationEnabled: + description: Specifies the flag that indicates whether the alert + should be automatically resolved or not. Value should be true + or false. The default is false. + type: boolean + criteria: + description: A criteria block as defined below. + items: + properties: + dimension: + description: A dimension block as defined below. + items: + properties: + name: + description: Specifies the name which should be used + for this Monitor Scheduled Query Rule. Changing + this forces a new resource to be created. + type: string + operator: + description: Operator for dimension values. Possible + values are Exclude,and Include. + type: string + values: + description: List of dimension values. Use a wildcard + * to collect all. + items: + type: string + type: array + type: object + type: array + failingPeriods: + description: A failing_periods block as defined below. + properties: + minimumFailingPeriodsToTriggerAlert: + description: Specifies the number of violations to trigger + an alert. Should be smaller or equal to number_of_evaluation_periods. + Possible value is integer between 1 and 6. + type: number + numberOfEvaluationPeriods: + description: Specifies the number of aggregated look-back + points. The look-back time window is calculated based + on the aggregation granularity window_duration and + the selected number of aggregated points. Possible + value is integer between 1 and 6. + type: number + type: object + metricMeasureColumn: + description: Specifies the column containing the metric + measure number. + type: string + operator: + description: Specifies the criteria operator. Possible values + are Equal, GreaterThan, GreaterThanOrEqual, LessThan,and + LessThanOrEqual. + type: string + query: + description: The query to run on logs. The results returned + by this query are used to populate the alert. + type: string + resourceIdColumn: + description: Specifies the column containing the resource + ID. The content of the column must be an uri formatted + as resource ID. + type: string + threshold: + description: Specifies the criteria threshold value that + activates the alert. + type: number + timeAggregationMethod: + description: The type of aggregation to apply to the data + points in aggregation granularity. Possible values are + Average, Count, Maximum, Minimum,and Total. + type: string + type: object + type: array + description: + description: Specifies the description of the scheduled query + rule. + type: string + displayName: + description: Specifies the display name of the alert rule. + type: string + enabled: + description: Specifies the flag which indicates whether this scheduled + query rule is enabled. Value should be true or false. Defaults + to true. + type: boolean + evaluationFrequency: + description: How often the scheduled query rule is evaluated, + represented in ISO 8601 duration format. Possible values are + PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, PT6H, P1D. + type: string + location: + description: Specifies the Azure Region where the Monitor Scheduled + Query Rule should exist. Changing this forces a new resource + to be created. + type: string + muteActionsAfterAlertDuration: + description: Mute actions for the chosen period of time in ISO + 8601 duration format after the alert is fired. Possible values + are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, PT6H, P1D and P2D. + type: string + queryTimeRangeOverride: + description: Set this if the alert evaluation period is different + from the query time range. If not specified, the value is window_duration*number_of_evaluation_periods. + Possible values are PT5M, PT10M, PT15M, PT20M, PT30M, PT45M, + PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + type: string + scopes: + description: Specifies the list of resource IDs that this scheduled + query rule is scoped to. Changing this forces a new resource + to be created. Currently, the API supports exactly 1 resource + ID in the scopes list. + items: + type: string + type: array + scopesRefs: + description: References to ApplicationInsights in insights to + populate scopes. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + scopesSelector: + description: Selector for a list of ApplicationInsights in insights + to populate scopes. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + severity: + description: Severity of the alert. Should be an integer between + 0 and 4. Value of 0 is severest. + type: number + skipQueryValidation: + description: Specifies the flag which indicates whether the provided + query should be validated or not. The default is false. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Monitor Scheduled Query Rule. + type: object + x-kubernetes-map-type: granular + targetResourceTypes: + description: List of resource type of the target resource(s) on + which the alert is created/updated. For example if the scope + is a resource group and targetResourceTypes is Microsoft.Compute/virtualMachines, + then a different alert will be fired for each virtual machine + in the resource group which meet the alert criteria. + items: + type: string + type: array + windowDuration: + description: Specifies the period of time in ISO 8601 duration + format on which the Scheduled Query Rule will be executed (bin + size). If evaluation_frequency is PT1M, possible values are + PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, and PT6H. Otherwise, possible values are PT5M, PT10M, + PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D, + and P2D. + type: string + workspaceAlertsStorageEnabled: + description: Specifies the flag which indicates whether this scheduled + query rule check if storage is configured. Value should be true + or false. The default is false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.criteria is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.criteria) + || (has(self.initProvider) && has(self.initProvider.criteria))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.severity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.severity) + || (has(self.initProvider) && has(self.initProvider.severity))' + - message: spec.forProvider.windowDuration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.windowDuration) + || (has(self.initProvider) && has(self.initProvider.windowDuration))' + status: + description: MonitorScheduledQueryRulesAlertV2Status defines the observed + state of MonitorScheduledQueryRulesAlertV2. + properties: + atProvider: + properties: + action: + description: An action block as defined below. + properties: + actionGroups: + description: List of Action Group resource IDs to invoke when + the alert fires. + items: + type: string + type: array + customProperties: + additionalProperties: + type: string + description: Specifies the properties of an alert payload. + type: object + x-kubernetes-map-type: granular + type: object + autoMitigationEnabled: + description: Specifies the flag that indicates whether the alert + should be automatically resolved or not. Value should be true + or false. The default is false. + type: boolean + createdWithApiVersion: + description: The api-version used when creating this alert rule. + type: string + criteria: + description: A criteria block as defined below. + items: + properties: + dimension: + description: A dimension block as defined below. + items: + properties: + name: + description: Specifies the name which should be used + for this Monitor Scheduled Query Rule. Changing + this forces a new resource to be created. + type: string + operator: + description: Operator for dimension values. Possible + values are Exclude,and Include. + type: string + values: + description: List of dimension values. Use a wildcard + * to collect all. + items: + type: string + type: array + type: object + type: array + failingPeriods: + description: A failing_periods block as defined below. + properties: + minimumFailingPeriodsToTriggerAlert: + description: Specifies the number of violations to trigger + an alert. Should be smaller or equal to number_of_evaluation_periods. + Possible value is integer between 1 and 6. + type: number + numberOfEvaluationPeriods: + description: Specifies the number of aggregated look-back + points. The look-back time window is calculated based + on the aggregation granularity window_duration and + the selected number of aggregated points. Possible + value is integer between 1 and 6. + type: number + type: object + metricMeasureColumn: + description: Specifies the column containing the metric + measure number. + type: string + operator: + description: Specifies the criteria operator. Possible values + are Equal, GreaterThan, GreaterThanOrEqual, LessThan,and + LessThanOrEqual. + type: string + query: + description: The query to run on logs. The results returned + by this query are used to populate the alert. + type: string + resourceIdColumn: + description: Specifies the column containing the resource + ID. The content of the column must be an uri formatted + as resource ID. + type: string + threshold: + description: Specifies the criteria threshold value that + activates the alert. + type: number + timeAggregationMethod: + description: The type of aggregation to apply to the data + points in aggregation granularity. Possible values are + Average, Count, Maximum, Minimum,and Total. + type: string + type: object + type: array + description: + description: Specifies the description of the scheduled query + rule. + type: string + displayName: + description: Specifies the display name of the alert rule. + type: string + enabled: + description: Specifies the flag which indicates whether this scheduled + query rule is enabled. Value should be true or false. Defaults + to true. + type: boolean + evaluationFrequency: + description: How often the scheduled query rule is evaluated, + represented in ISO 8601 duration format. Possible values are + PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, PT6H, P1D. + type: string + id: + description: The ID of the Monitor Scheduled Query Rule. + type: string + isALegacyLogAnalyticsRule: + description: True if this alert rule is a legacy Log Analytic + Rule. + type: boolean + isWorkspaceAlertsStorageConfigured: + description: The flag indicates whether this Scheduled Query Rule + has been configured to be stored in the customer's storage. + type: boolean + location: + description: Specifies the Azure Region where the Monitor Scheduled + Query Rule should exist. Changing this forces a new resource + to be created. + type: string + muteActionsAfterAlertDuration: + description: Mute actions for the chosen period of time in ISO + 8601 duration format after the alert is fired. Possible values + are PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, PT6H, P1D and P2D. + type: string + queryTimeRangeOverride: + description: Set this if the alert evaluation period is different + from the query time range. If not specified, the value is window_duration*number_of_evaluation_periods. + Possible values are PT5M, PT10M, PT15M, PT20M, PT30M, PT45M, + PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D and P2D. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Monitor Scheduled Query Rule should exist. Changing this forces + a new resource to be created. + type: string + scopes: + description: Specifies the list of resource IDs that this scheduled + query rule is scoped to. Changing this forces a new resource + to be created. Currently, the API supports exactly 1 resource + ID in the scopes list. + items: + type: string + type: array + severity: + description: Severity of the alert. Should be an integer between + 0 and 4. Value of 0 is severest. + type: number + skipQueryValidation: + description: Specifies the flag which indicates whether the provided + query should be validated or not. The default is false. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Monitor Scheduled Query Rule. + type: object + x-kubernetes-map-type: granular + targetResourceTypes: + description: List of resource type of the target resource(s) on + which the alert is created/updated. For example if the scope + is a resource group and targetResourceTypes is Microsoft.Compute/virtualMachines, + then a different alert will be fired for each virtual machine + in the resource group which meet the alert criteria. + items: + type: string + type: array + windowDuration: + description: Specifies the period of time in ISO 8601 duration + format on which the Scheduled Query Rule will be executed (bin + size). If evaluation_frequency is PT1M, possible values are + PT1M, PT5M, PT10M, PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, + PT5H, and PT6H. Otherwise, possible values are PT5M, PT10M, + PT15M, PT30M, PT45M, PT1H, PT2H, PT3H, PT4H, PT5H, PT6H, P1D, + and P2D. + type: string + workspaceAlertsStorageEnabled: + description: Specifies the flag which indicates whether this scheduled + query rule check if storage is configured. Value should be true + or false. The default is false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/insights.azure.upbound.io_monitorscheduledqueryruleslogs.yaml b/package/crds/insights.azure.upbound.io_monitorscheduledqueryruleslogs.yaml index d6ba89e39..6080c1a5d 100644 --- a/package/crds/insights.azure.upbound.io_monitorscheduledqueryruleslogs.yaml +++ b/package/crds/insights.azure.upbound.io_monitorscheduledqueryruleslogs.yaml @@ -840,3 +840,819 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MonitorScheduledQueryRulesLog is the Schema for the MonitorScheduledQueryRulesLogs + API. Manages a LogToMetricAction Scheduled Query Rules resources within + Azure Monitor + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorScheduledQueryRulesLogSpec defines the desired state + of MonitorScheduledQueryRulesLog + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authorizedResourceIds: + description: A list of IDs of Resources referred into query. + items: + type: string + type: array + x-kubernetes-list-type: set + criteria: + description: A criteria block as defined below. + properties: + dimension: + description: A dimension block as defined below. + items: + properties: + name: + description: Name of the dimension. + type: string + operator: + description: Operator for dimension values, - 'Include'. + Defaults to Include. + type: string + values: + description: List of dimension values. + items: + type: string + type: array + type: object + type: array + metricName: + description: Name of the metric. Supported metrics are listed + in the Azure Monitor Microsoft.OperationalInsights/workspaces + metrics namespace. + type: string + type: object + dataSourceId: + description: The resource URI over which log search query is to + be run. Changing this forces a new resource to be created. + type: string + dataSourceIdRef: + description: Reference to a Workspace in operationalinsights to + populate dataSourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataSourceIdSelector: + description: Selector for a Workspace in operationalinsights to + populate dataSourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description of the scheduled query rule. + type: string + enabled: + description: Whether this scheduled query rule is enabled. Default + is true. + type: boolean + location: + description: Specifies the Azure Region where the resource should + exist. Changing this forces a new resource to be created. + type: string + name: + description: The name of the scheduled query rule. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the scheduled query rule instance. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authorizedResourceIds: + description: A list of IDs of Resources referred into query. + items: + type: string + type: array + x-kubernetes-list-type: set + criteria: + description: A criteria block as defined below. + properties: + dimension: + description: A dimension block as defined below. + items: + properties: + name: + description: Name of the dimension. + type: string + operator: + description: Operator for dimension values, - 'Include'. + Defaults to Include. + type: string + values: + description: List of dimension values. + items: + type: string + type: array + type: object + type: array + metricName: + description: Name of the metric. Supported metrics are listed + in the Azure Monitor Microsoft.OperationalInsights/workspaces + metrics namespace. + type: string + type: object + dataSourceId: + description: The resource URI over which log search query is to + be run. Changing this forces a new resource to be created. + type: string + dataSourceIdRef: + description: Reference to a Workspace in operationalinsights to + populate dataSourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dataSourceIdSelector: + description: Selector for a Workspace in operationalinsights to + populate dataSourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + description: + description: The description of the scheduled query rule. + type: string + enabled: + description: Whether this scheduled query rule is enabled. Default + is true. + type: boolean + location: + description: Specifies the Azure Region where the resource should + exist. Changing this forces a new resource to be created. + type: string + name: + description: The name of the scheduled query rule. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the scheduled query rule instance. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.criteria is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.criteria) + || (has(self.initProvider) && has(self.initProvider.criteria))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: MonitorScheduledQueryRulesLogStatus defines the observed + state of MonitorScheduledQueryRulesLog. + properties: + atProvider: + properties: + authorizedResourceIds: + description: A list of IDs of Resources referred into query. + items: + type: string + type: array + x-kubernetes-list-type: set + criteria: + description: A criteria block as defined below. + properties: + dimension: + description: A dimension block as defined below. + items: + properties: + name: + description: Name of the dimension. + type: string + operator: + description: Operator for dimension values, - 'Include'. + Defaults to Include. + type: string + values: + description: List of dimension values. + items: + type: string + type: array + type: object + type: array + metricName: + description: Name of the metric. Supported metrics are listed + in the Azure Monitor Microsoft.OperationalInsights/workspaces + metrics namespace. + type: string + type: object + dataSourceId: + description: The resource URI over which log search query is to + be run. Changing this forces a new resource to be created. + type: string + description: + description: The description of the scheduled query rule. + type: string + enabled: + description: Whether this scheduled query rule is enabled. Default + is true. + type: boolean + id: + description: The ID of the scheduled query rule. + type: string + location: + description: Specifies the Azure Region where the resource should + exist. Changing this forces a new resource to be created. + type: string + name: + description: The name of the scheduled query rule. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the scheduled query rule instance. Changing this forces a new + resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/iotcentral.azure.upbound.io_applications.yaml b/package/crds/iotcentral.azure.upbound.io_applications.yaml index 36042e97a..4a9d548be 100644 --- a/package/crds/iotcentral.azure.upbound.io_applications.yaml +++ b/package/crds/iotcentral.azure.upbound.io_applications.yaml @@ -654,3 +654,633 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Application is the Schema for the Applications API. Manages an + IotCentral Application + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ApplicationSpec defines the desired state of Application + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + displayName: + description: A display_name name. Custom display name for the + IoT Central application. Default is resource name. + type: string + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Central Application. + The only possible value is SystemAssigned. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource has to be create. Changing this forces a new resource + to be created. + type: string + name: + description: Specifies the name of the IotHub resource. Changing + this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + IoT Central Application. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group under which the IotHub + resource has to be created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: A sku name. Possible values is ST0, ST1, ST2, Default + value is ST1 + type: string + subDomain: + description: A sub_domain name. Subdomain for the IoT Central + URL. Each application must have a unique subdomain. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + template: + description: A template name. IoT Central application template + name. Default is a custom application. Changing this forces + a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + displayName: + description: A display_name name. Custom display name for the + IoT Central application. Default is resource name. + type: string + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Central Application. + The only possible value is SystemAssigned. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource has to be create. Changing this forces a new resource + to be created. + type: string + name: + description: Specifies the name of the IotHub resource. Changing + this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + IoT Central Application. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group under which the IotHub + resource has to be created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: A sku name. Possible values is ST0, ST1, ST2, Default + value is ST1 + type: string + subDomain: + description: A sub_domain name. Subdomain for the IoT Central + URL. Each application must have a unique subdomain. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + template: + description: A template name. IoT Central application template + name. Default is a custom application. Changing this forces + a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.subDomain is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.subDomain) + || (has(self.initProvider) && has(self.initProvider.subDomain))' + status: + description: ApplicationStatus defines the observed state of Application. + properties: + atProvider: + properties: + displayName: + description: A display_name name. Custom display name for the + IoT Central application. Default is resource name. + type: string + id: + description: The ID of the IoT Central Application. + type: string + identity: + description: An identity block as defined below. + properties: + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this IoT Central Application. + The only possible value is SystemAssigned. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource has to be create. Changing this forces a new resource + to be created. + type: string + name: + description: Specifies the name of the IotHub resource. Changing + this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + IoT Central Application. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group under which the IotHub + resource has to be created. Changing this forces a new resource + to be created. + type: string + sku: + description: A sku name. Possible values is ST0, ST1, ST2, Default + value is ST1 + type: string + subDomain: + description: A sub_domain name. Subdomain for the IoT Central + URL. Each application must have a unique subdomain. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + template: + description: A template name. IoT Central application template + name. Default is a custom application. Changing this forces + a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/keyvault.azure.upbound.io_certificates.yaml b/package/crds/keyvault.azure.upbound.io_certificates.yaml index ccc0971f6..97f5ca990 100644 --- a/package/crds/keyvault.azure.upbound.io_certificates.yaml +++ b/package/crds/keyvault.azure.upbound.io_certificates.yaml @@ -1123,3 +1123,1048 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Certificate is the Schema for the Certificates API. Manages a + Key Vault Certificate. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CertificateSpec defines the desired state of Certificate + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + certificate: + description: A certificate block as defined below, used to Import + an existing certificate. Changing this will create a new version + of the Key Vault Certificate. + properties: + contentsSecretRef: + description: The base64-encoded certificate contents. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + passwordSecretRef: + description: The password associated with the certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - contentsSecretRef + type: object + certificatePolicy: + description: A certificate_policy block as defined below. Changing + this (except the lifetime_action field) will create a new version + of the Key Vault Certificate. + properties: + issuerParameters: + description: A issuer_parameters block as defined below. + properties: + name: + description: The name of the Certificate Issuer. Possible + values include Self (for self-signed certificate), or + Unknown (for a certificate issuing authority like Let's + Encrypt and Azure direct supported ones). + type: string + type: object + keyProperties: + description: A key_properties block as defined below. + properties: + curve: + description: Specifies the curve to use when creating + an EC key. Possible values are P-256, P-256K, P-384, + and P-521. This field will be required in a future release + if key_type is EC or EC-HSM. + type: string + exportable: + description: Is this certificate exportable? + type: boolean + keySize: + description: The size of the key used in the certificate. + Possible values include 2048, 3072, and 4096 for RSA + keys, or 256, 384, and 521 for EC keys. This property + is required when using RSA keys. + type: number + keyType: + description: Specifies the type of key. Possible values + are EC, EC-HSM, RSA, RSA-HSM and oct. + type: string + reuseKey: + description: Is the key reusable? + type: boolean + type: object + lifetimeAction: + description: A lifetime_action block as defined below. + items: + properties: + action: + description: A action block as defined below. + properties: + actionType: + description: The Type of action to be performed + when the lifetime trigger is triggerec. Possible + values include AutoRenew and EmailContacts. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + daysBeforeExpiry: + description: The number of days before the Certificate + expires that the action associated with this Trigger + should run. Conflicts with lifetime_percentage. + type: number + lifetimePercentage: + description: The percentage at which during the + Certificates Lifetime the action associated with + this Trigger should run. Conflicts with days_before_expiry. + type: number + type: object + type: object + type: array + secretProperties: + description: A secret_properties block as defined below. + properties: + contentType: + description: The Content-Type of the Certificate, such + as application/x-pkcs12 for a PFX or application/x-pem-file + for a PEM. + type: string + type: object + x509CertificateProperties: + description: A x509_certificate_properties block as defined + below. Required when certificate block is not specified. + properties: + extendedKeyUsage: + description: A list of Extended/Enhanced Key Usages. + items: + type: string + type: array + keyUsage: + description: A list of uses associated with this Key. + Possible values include cRLSign, dataEncipherment, decipherOnly, + digitalSignature, encipherOnly, keyAgreement, keyCertSign, + keyEncipherment and nonRepudiation and are case-sensitive. + items: + type: string + type: array + x-kubernetes-list-type: set + subject: + description: The Certificate's Subject. + type: string + subjectAlternativeNames: + description: A subject_alternative_names block as defined + below. + properties: + dnsNames: + description: A list of alternative DNS names (FQDNs) + identified by the Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + emails: + description: A list of email addresses identified + by this Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + upns: + description: A list of User Principal Names identified + by the Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + validityInMonths: + description: The Certificates Validity Period in Months. + type: number + type: object + type: object + keyVaultId: + description: The ID of the Key Vault where the Certificate should + be created. Changing this forces a new resource to be created. + type: string + keyVaultIdRef: + description: Reference to a Vault in keyvault to populate keyVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultIdSelector: + description: Selector for a Vault in keyvault to populate keyVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Key Vault Certificate. + Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + certificate: + description: A certificate block as defined below, used to Import + an existing certificate. Changing this will create a new version + of the Key Vault Certificate. + type: object + certificatePolicy: + description: A certificate_policy block as defined below. Changing + this (except the lifetime_action field) will create a new version + of the Key Vault Certificate. + properties: + issuerParameters: + description: A issuer_parameters block as defined below. + properties: + name: + description: The name of the Certificate Issuer. Possible + values include Self (for self-signed certificate), or + Unknown (for a certificate issuing authority like Let's + Encrypt and Azure direct supported ones). + type: string + type: object + keyProperties: + description: A key_properties block as defined below. + properties: + curve: + description: Specifies the curve to use when creating + an EC key. Possible values are P-256, P-256K, P-384, + and P-521. This field will be required in a future release + if key_type is EC or EC-HSM. + type: string + exportable: + description: Is this certificate exportable? + type: boolean + keySize: + description: The size of the key used in the certificate. + Possible values include 2048, 3072, and 4096 for RSA + keys, or 256, 384, and 521 for EC keys. This property + is required when using RSA keys. + type: number + keyType: + description: Specifies the type of key. Possible values + are EC, EC-HSM, RSA, RSA-HSM and oct. + type: string + reuseKey: + description: Is the key reusable? + type: boolean + type: object + lifetimeAction: + description: A lifetime_action block as defined below. + items: + properties: + action: + description: A action block as defined below. + properties: + actionType: + description: The Type of action to be performed + when the lifetime trigger is triggerec. Possible + values include AutoRenew and EmailContacts. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + daysBeforeExpiry: + description: The number of days before the Certificate + expires that the action associated with this Trigger + should run. Conflicts with lifetime_percentage. + type: number + lifetimePercentage: + description: The percentage at which during the + Certificates Lifetime the action associated with + this Trigger should run. Conflicts with days_before_expiry. + type: number + type: object + type: object + type: array + secretProperties: + description: A secret_properties block as defined below. + properties: + contentType: + description: The Content-Type of the Certificate, such + as application/x-pkcs12 for a PFX or application/x-pem-file + for a PEM. + type: string + type: object + x509CertificateProperties: + description: A x509_certificate_properties block as defined + below. Required when certificate block is not specified. + properties: + extendedKeyUsage: + description: A list of Extended/Enhanced Key Usages. + items: + type: string + type: array + keyUsage: + description: A list of uses associated with this Key. + Possible values include cRLSign, dataEncipherment, decipherOnly, + digitalSignature, encipherOnly, keyAgreement, keyCertSign, + keyEncipherment and nonRepudiation and are case-sensitive. + items: + type: string + type: array + x-kubernetes-list-type: set + subject: + description: The Certificate's Subject. + type: string + subjectAlternativeNames: + description: A subject_alternative_names block as defined + below. + properties: + dnsNames: + description: A list of alternative DNS names (FQDNs) + identified by the Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + emails: + description: A list of email addresses identified + by this Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + upns: + description: A list of User Principal Names identified + by the Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + validityInMonths: + description: The Certificates Validity Period in Months. + type: number + type: object + type: object + keyVaultId: + description: The ID of the Key Vault where the Certificate should + be created. Changing this forces a new resource to be created. + type: string + keyVaultIdRef: + description: Reference to a Vault in keyvault to populate keyVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultIdSelector: + description: Selector for a Vault in keyvault to populate keyVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Key Vault Certificate. + Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: CertificateStatus defines the observed state of Certificate. + properties: + atProvider: + properties: + certificate: + description: A certificate block as defined below, used to Import + an existing certificate. Changing this will create a new version + of the Key Vault Certificate. + properties: + contentsSecretRef: + description: The base64-encoded certificate contents. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + passwordSecretRef: + description: The password associated with the certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - contentsSecretRef + type: object + certificateAttribute: + description: A certificate_attribute block as defined below. + items: + properties: + created: + description: The create time of the Key Vault Certificate. + type: string + enabled: + description: whether the Key Vault Certificate is enabled. + type: boolean + expires: + description: The expires time of the Key Vault Certificate. + type: string + notBefore: + description: The not before valid time of the Key Vault + Certificate. + type: string + recoveryLevel: + description: The deletion recovery level of the Key Vault + Certificate. + type: string + updated: + description: The recent update time of the Key Vault Certificate. + type: string + type: object + type: array + certificateData: + description: The raw Key Vault Certificate data represented as + a hexadecimal string. + type: string + certificateDataBase64: + description: The Base64 encoded Key Vault Certificate data. + type: string + certificatePolicy: + description: A certificate_policy block as defined below. Changing + this (except the lifetime_action field) will create a new version + of the Key Vault Certificate. + properties: + issuerParameters: + description: A issuer_parameters block as defined below. + properties: + name: + description: The name of the Certificate Issuer. Possible + values include Self (for self-signed certificate), or + Unknown (for a certificate issuing authority like Let's + Encrypt and Azure direct supported ones). + type: string + type: object + keyProperties: + description: A key_properties block as defined below. + properties: + curve: + description: Specifies the curve to use when creating + an EC key. Possible values are P-256, P-256K, P-384, + and P-521. This field will be required in a future release + if key_type is EC or EC-HSM. + type: string + exportable: + description: Is this certificate exportable? + type: boolean + keySize: + description: The size of the key used in the certificate. + Possible values include 2048, 3072, and 4096 for RSA + keys, or 256, 384, and 521 for EC keys. This property + is required when using RSA keys. + type: number + keyType: + description: Specifies the type of key. Possible values + are EC, EC-HSM, RSA, RSA-HSM and oct. + type: string + reuseKey: + description: Is the key reusable? + type: boolean + type: object + lifetimeAction: + description: A lifetime_action block as defined below. + items: + properties: + action: + description: A action block as defined below. + properties: + actionType: + description: The Type of action to be performed + when the lifetime trigger is triggerec. Possible + values include AutoRenew and EmailContacts. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + daysBeforeExpiry: + description: The number of days before the Certificate + expires that the action associated with this Trigger + should run. Conflicts with lifetime_percentage. + type: number + lifetimePercentage: + description: The percentage at which during the + Certificates Lifetime the action associated with + this Trigger should run. Conflicts with days_before_expiry. + type: number + type: object + type: object + type: array + secretProperties: + description: A secret_properties block as defined below. + properties: + contentType: + description: The Content-Type of the Certificate, such + as application/x-pkcs12 for a PFX or application/x-pem-file + for a PEM. + type: string + type: object + x509CertificateProperties: + description: A x509_certificate_properties block as defined + below. Required when certificate block is not specified. + properties: + extendedKeyUsage: + description: A list of Extended/Enhanced Key Usages. + items: + type: string + type: array + keyUsage: + description: A list of uses associated with this Key. + Possible values include cRLSign, dataEncipherment, decipherOnly, + digitalSignature, encipherOnly, keyAgreement, keyCertSign, + keyEncipherment and nonRepudiation and are case-sensitive. + items: + type: string + type: array + x-kubernetes-list-type: set + subject: + description: The Certificate's Subject. + type: string + subjectAlternativeNames: + description: A subject_alternative_names block as defined + below. + properties: + dnsNames: + description: A list of alternative DNS names (FQDNs) + identified by the Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + emails: + description: A list of email addresses identified + by this Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + upns: + description: A list of User Principal Names identified + by the Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + validityInMonths: + description: The Certificates Validity Period in Months. + type: number + type: object + type: object + id: + description: The Key Vault Certificate ID. + type: string + keyVaultId: + description: The ID of the Key Vault where the Certificate should + be created. Changing this forces a new resource to be created. + type: string + name: + description: Specifies the name of the Key Vault Certificate. + Changing this forces a new resource to be created. + type: string + resourceManagerId: + description: The (Versioned) ID for this Key Vault Certificate. + This property points to a specific version of a Key Vault Certificate, + as such using this won't auto-rotate values if used in other + Azure Services. + type: string + resourceManagerVersionlessId: + description: The Versionless ID of the Key Vault Certificate. + This property allows other Azure Services (that support it) + to auto-rotate their value when the Key Vault Certificate is + updated. + type: string + secretId: + description: The ID of the associated Key Vault Secret. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + thumbprint: + description: The X509 Thumbprint of the Key Vault Certificate + represented as a hexadecimal string. + type: string + version: + description: The current version of the Key Vault Certificate. + type: string + versionlessId: + description: The Base ID of the Key Vault Certificate. + type: string + versionlessSecretId: + description: The Base ID of the Key Vault Secret. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/keyvault.azure.upbound.io_keys.yaml b/package/crds/keyvault.azure.upbound.io_keys.yaml index 35795fcaa..3756a138f 100644 --- a/package/crds/keyvault.azure.upbound.io_keys.yaml +++ b/package/crds/keyvault.azure.upbound.io_keys.yaml @@ -748,3 +748,721 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Key is the Schema for the Keys API. Manages a Key Vault Key. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KeySpec defines the desired state of Key + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + curve: + description: Specifies the curve to use when creating an EC key. + Possible values are P-256, P-256K, P-384, and P-521. This field + will be required in a future release if key_type is EC or EC-HSM. + The API will default to P-256 if nothing is specified. Changing + this forces a new resource to be created. + type: string + expirationDate: + description: Expiration UTC datetime (Y-m-d'T'H:M:S'Z'). When + this parameter gets changed on reruns, if newer date is ahead + of current date, an update is performed. If the newer date is + before the current date, resource will be force created. + type: string + keyOpts: + description: 'A list of JSON web key operations. Possible values + include: decrypt, encrypt, sign, unwrapKey, verify and wrapKey. + Please note these values are case sensitive.' + items: + type: string + type: array + keySize: + description: 'Specifies the Size of the RSA key to create in bytes. + For example, 1024 or 2048. Note: This field is required if key_type + is RSA or RSA-HSM. Changing this forces a new resource to be + created.' + type: number + keyType: + description: Specifies the Key Type to use for this Key Vault + Key. Possible values are EC (Elliptic Curve), EC-HSM, RSA and + RSA-HSM. Changing this forces a new resource to be created. + type: string + keyVaultId: + description: The ID of the Key Vault where the Key should be created. + Changing this forces a new resource to be created. + type: string + keyVaultIdRef: + description: Reference to a Vault in keyvault to populate keyVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultIdSelector: + description: Selector for a Vault in keyvault to populate keyVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Key Vault Key. Changing + this forces a new resource to be created. + type: string + notBeforeDate: + description: Key not usable before the provided UTC datetime (Y-m-d'T'H:M:S'Z'). + type: string + rotationPolicy: + description: A rotation_policy block as defined below. + properties: + automatic: + description: An automatic block as defined below. + properties: + timeAfterCreation: + description: Rotate automatically at a duration after + create as an ISO 8601 duration. + type: string + timeBeforeExpiry: + description: Rotate automatically at a duration before + expiry as an ISO 8601 duration. + type: string + type: object + expireAfter: + description: Expire a Key Vault Key after given duration as + an ISO 8601 duration. + type: string + notifyBeforeExpiry: + description: Notify at a given duration before expiry as an + ISO 8601 duration. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + curve: + description: Specifies the curve to use when creating an EC key. + Possible values are P-256, P-256K, P-384, and P-521. This field + will be required in a future release if key_type is EC or EC-HSM. + The API will default to P-256 if nothing is specified. Changing + this forces a new resource to be created. + type: string + expirationDate: + description: Expiration UTC datetime (Y-m-d'T'H:M:S'Z'). When + this parameter gets changed on reruns, if newer date is ahead + of current date, an update is performed. If the newer date is + before the current date, resource will be force created. + type: string + keyOpts: + description: 'A list of JSON web key operations. Possible values + include: decrypt, encrypt, sign, unwrapKey, verify and wrapKey. + Please note these values are case sensitive.' + items: + type: string + type: array + keySize: + description: 'Specifies the Size of the RSA key to create in bytes. + For example, 1024 or 2048. Note: This field is required if key_type + is RSA or RSA-HSM. Changing this forces a new resource to be + created.' + type: number + keyType: + description: Specifies the Key Type to use for this Key Vault + Key. Possible values are EC (Elliptic Curve), EC-HSM, RSA and + RSA-HSM. Changing this forces a new resource to be created. + type: string + keyVaultId: + description: The ID of the Key Vault where the Key should be created. + Changing this forces a new resource to be created. + type: string + keyVaultIdRef: + description: Reference to a Vault in keyvault to populate keyVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultIdSelector: + description: Selector for a Vault in keyvault to populate keyVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Key Vault Key. Changing + this forces a new resource to be created. + type: string + notBeforeDate: + description: Key not usable before the provided UTC datetime (Y-m-d'T'H:M:S'Z'). + type: string + rotationPolicy: + description: A rotation_policy block as defined below. + properties: + automatic: + description: An automatic block as defined below. + properties: + timeAfterCreation: + description: Rotate automatically at a duration after + create as an ISO 8601 duration. + type: string + timeBeforeExpiry: + description: Rotate automatically at a duration before + expiry as an ISO 8601 duration. + type: string + type: object + expireAfter: + description: Expire a Key Vault Key after given duration as + an ISO 8601 duration. + type: string + notifyBeforeExpiry: + description: Notify at a given duration before expiry as an + ISO 8601 duration. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.keyOpts is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.keyOpts) + || (has(self.initProvider) && has(self.initProvider.keyOpts))' + - message: spec.forProvider.keyType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.keyType) + || (has(self.initProvider) && has(self.initProvider.keyType))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: KeyStatus defines the observed state of Key. + properties: + atProvider: + properties: + curve: + description: Specifies the curve to use when creating an EC key. + Possible values are P-256, P-256K, P-384, and P-521. This field + will be required in a future release if key_type is EC or EC-HSM. + The API will default to P-256 if nothing is specified. Changing + this forces a new resource to be created. + type: string + e: + description: The RSA public exponent of this Key Vault Key. + type: string + expirationDate: + description: Expiration UTC datetime (Y-m-d'T'H:M:S'Z'). When + this parameter gets changed on reruns, if newer date is ahead + of current date, an update is performed. If the newer date is + before the current date, resource will be force created. + type: string + id: + description: The Key Vault Key ID. + type: string + keyOpts: + description: 'A list of JSON web key operations. Possible values + include: decrypt, encrypt, sign, unwrapKey, verify and wrapKey. + Please note these values are case sensitive.' + items: + type: string + type: array + keySize: + description: 'Specifies the Size of the RSA key to create in bytes. + For example, 1024 or 2048. Note: This field is required if key_type + is RSA or RSA-HSM. Changing this forces a new resource to be + created.' + type: number + keyType: + description: Specifies the Key Type to use for this Key Vault + Key. Possible values are EC (Elliptic Curve), EC-HSM, RSA and + RSA-HSM. Changing this forces a new resource to be created. + type: string + keyVaultId: + description: The ID of the Key Vault where the Key should be created. + Changing this forces a new resource to be created. + type: string + "n": + description: The RSA modulus of this Key Vault Key. + type: string + name: + description: Specifies the name of the Key Vault Key. Changing + this forces a new resource to be created. + type: string + notBeforeDate: + description: Key not usable before the provided UTC datetime (Y-m-d'T'H:M:S'Z'). + type: string + publicKeyOpenssh: + description: The OpenSSH encoded public key of this Key Vault + Key. + type: string + publicKeyPem: + description: The PEM encoded public key of this Key Vault Key. + type: string + resourceId: + description: The (Versioned) ID for this Key Vault Key. This property + points to a specific version of a Key Vault Key, as such using + this won't auto-rotate values if used in other Azure Services. + type: string + resourceVersionlessId: + description: The Versionless ID of the Key Vault Key. This property + allows other Azure Services (that support it) to auto-rotate + their value when the Key Vault Key is updated. + type: string + rotationPolicy: + description: A rotation_policy block as defined below. + properties: + automatic: + description: An automatic block as defined below. + properties: + timeAfterCreation: + description: Rotate automatically at a duration after + create as an ISO 8601 duration. + type: string + timeBeforeExpiry: + description: Rotate automatically at a duration before + expiry as an ISO 8601 duration. + type: string + type: object + expireAfter: + description: Expire a Key Vault Key after given duration as + an ISO 8601 duration. + type: string + notifyBeforeExpiry: + description: Notify at a given duration before expiry as an + ISO 8601 duration. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The current version of the Key Vault Key. + type: string + versionlessId: + description: The Base ID of the Key Vault Key. + type: string + x: + description: The EC X component of this Key Vault Key. + type: string + "y": + description: The EC Y component of this Key Vault Key. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/keyvault.azure.upbound.io_managedhardwaresecuritymodules.yaml b/package/crds/keyvault.azure.upbound.io_managedhardwaresecuritymodules.yaml index 9a4ab2786..e47d244c9 100644 --- a/package/crds/keyvault.azure.upbound.io_managedhardwaresecuritymodules.yaml +++ b/package/crds/keyvault.azure.upbound.io_managedhardwaresecuritymodules.yaml @@ -648,3 +648,627 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ManagedHardwareSecurityModule is the Schema for the ManagedHardwareSecurityModules + API. Manages a Key Vault Managed Hardware Security Module. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ManagedHardwareSecurityModuleSpec defines the desired state + of ManagedHardwareSecurityModule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + adminObjectIds: + description: Specifies a list of administrators object IDs for + the key vault Managed Hardware Security Module. Changing this + forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. + properties: + bypass: + description: Specifies which traffic can bypass the network + rules. Possible values are AzureServices and None. + type: string + defaultAction: + description: The Default Action to use. Possible values are + Allow and Deny. + type: string + type: object + publicNetworkAccessEnabled: + description: Whether traffic from public networks is permitted. + Defaults to true. Changing this forces a new resource to be + created. + type: boolean + purgeProtectionEnabled: + description: Is Purge Protection enabled for this Key Vault Managed + Hardware Security Module? Changing this forces a new resource + to be created. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Key Vault Managed Hardware Security Module. Changing this + forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityDomainKeyVaultCertificateIds: + description: A list of KeyVault certificates resource IDs (minimum + of three and up to a maximum of 10) to activate this Managed + HSM. More information see activate-your-managed-hsm + items: + type: string + type: array + securityDomainQuorum: + description: Specifies the minimum number of shares required to + decrypt the security domain for recovery. This is required when + security_domain_key_vault_certificate_ids is specified. Valid + values are between 2 and 10. + type: number + skuName: + description: The Name of the SKU used for this Key Vault Managed + Hardware Security Module. Possible value is Standard_B1. Changing + this forces a new resource to be created. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This value can be between 7 and 90 days. + Defaults to 90. Changing this forces a new resource to be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantId: + description: The Azure Active Directory Tenant ID that should + be used for authenticating requests to the key vault Managed + Hardware Security Module. Changing this forces a new resource + to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + adminObjectIds: + description: Specifies a list of administrators object IDs for + the key vault Managed Hardware Security Module. Changing this + forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. + properties: + bypass: + description: Specifies which traffic can bypass the network + rules. Possible values are AzureServices and None. + type: string + defaultAction: + description: The Default Action to use. Possible values are + Allow and Deny. + type: string + type: object + publicNetworkAccessEnabled: + description: Whether traffic from public networks is permitted. + Defaults to true. Changing this forces a new resource to be + created. + type: boolean + purgeProtectionEnabled: + description: Is Purge Protection enabled for this Key Vault Managed + Hardware Security Module? Changing this forces a new resource + to be created. + type: boolean + securityDomainKeyVaultCertificateIds: + description: A list of KeyVault certificates resource IDs (minimum + of three and up to a maximum of 10) to activate this Managed + HSM. More information see activate-your-managed-hsm + items: + type: string + type: array + securityDomainQuorum: + description: Specifies the minimum number of shares required to + decrypt the security domain for recovery. This is required when + security_domain_key_vault_certificate_ids is specified. Valid + values are between 2 and 10. + type: number + skuName: + description: The Name of the SKU used for this Key Vault Managed + Hardware Security Module. Possible value is Standard_B1. Changing + this forces a new resource to be created. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This value can be between 7 and 90 days. + Defaults to 90. Changing this forces a new resource to be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantId: + description: The Azure Active Directory Tenant ID that should + be used for authenticating requests to the key vault Managed + Hardware Security Module. Changing this forces a new resource + to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.adminObjectIds is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.adminObjectIds) + || (has(self.initProvider) && has(self.initProvider.adminObjectIds))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + - message: spec.forProvider.tenantId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tenantId) + || (has(self.initProvider) && has(self.initProvider.tenantId))' + status: + description: ManagedHardwareSecurityModuleStatus defines the observed + state of ManagedHardwareSecurityModule. + properties: + atProvider: + properties: + adminObjectIds: + description: Specifies a list of administrators object IDs for + the key vault Managed Hardware Security Module. Changing this + forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + hsmUri: + description: The URI of the Key Vault Managed Hardware Security + Module, used for performing operations on keys. + type: string + id: + description: The Key Vault Secret Managed Hardware Security Module + ID. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. + properties: + bypass: + description: Specifies which traffic can bypass the network + rules. Possible values are AzureServices and None. + type: string + defaultAction: + description: The Default Action to use. Possible values are + Allow and Deny. + type: string + type: object + publicNetworkAccessEnabled: + description: Whether traffic from public networks is permitted. + Defaults to true. Changing this forces a new resource to be + created. + type: boolean + purgeProtectionEnabled: + description: Is Purge Protection enabled for this Key Vault Managed + Hardware Security Module? Changing this forces a new resource + to be created. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Key Vault Managed Hardware Security Module. Changing this + forces a new resource to be created. + type: string + securityDomainKeyVaultCertificateIds: + description: A list of KeyVault certificates resource IDs (minimum + of three and up to a maximum of 10) to activate this Managed + HSM. More information see activate-your-managed-hsm + items: + type: string + type: array + securityDomainQuorum: + description: Specifies the minimum number of shares required to + decrypt the security domain for recovery. This is required when + security_domain_key_vault_certificate_ids is specified. Valid + values are between 2 and 10. + type: number + skuName: + description: The Name of the SKU used for this Key Vault Managed + Hardware Security Module. Possible value is Standard_B1. Changing + this forces a new resource to be created. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This value can be between 7 and 90 days. + Defaults to 90. Changing this forces a new resource to be created. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantId: + description: The Azure Active Directory Tenant ID that should + be used for authenticating requests to the key vault Managed + Hardware Security Module. Changing this forces a new resource + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/keyvault.azure.upbound.io_vaults.yaml b/package/crds/keyvault.azure.upbound.io_vaults.yaml index 9b9f5aaa9..bb2b967c1 100644 --- a/package/crds/keyvault.azure.upbound.io_vaults.yaml +++ b/package/crds/keyvault.azure.upbound.io_vaults.yaml @@ -753,3 +753,732 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Vault is the Schema for the Vaults API. Manages a Key Vault. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VaultSpec defines the desired state of Vault + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + contact: + description: One or more contact block as defined below. + items: + properties: + email: + description: E-mail address of the contact. + type: string + name: + description: Name of the contact. + type: string + phone: + description: Phone number of the contact. + type: string + type: object + type: array + enableRbacAuthorization: + description: Boolean flag to specify whether Azure Key Vault uses + Role Based Access Control (RBAC) for authorization of data actions. + type: boolean + enabledForDeployment: + description: Boolean flag to specify whether Azure Virtual Machines + are permitted to retrieve certificates stored as secrets from + the key vault. + type: boolean + enabledForDiskEncryption: + description: Boolean flag to specify whether Azure Disk Encryption + is permitted to retrieve secrets from the vault and unwrap keys. + type: boolean + enabledForTemplateDeployment: + description: Boolean flag to specify whether Azure Resource Manager + is permitted to retrieve secrets from the key vault. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. + properties: + bypass: + description: Specifies which traffic can bypass the network + rules. Possible values are AzureServices and None. + type: string + defaultAction: + description: The Default Action to use when no rules match + from ip_rules / virtual_network_subnet_ids. Possible values + are Allow and Deny. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Key Vault. + items: + type: string + type: array + x-kubernetes-list-type: set + virtualNetworkSubnetIds: + description: One or more Subnet IDs which should be able to + access this Key Vault. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + Key Vault. Defaults to true. + type: boolean + purgeProtectionEnabled: + description: Is Purge Protection enabled for this Key Vault? + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Key Vault. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: The Name of the SKU used for this Key Vault. Possible + values are standard and premium. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This value can be between 7 and 90 (the + default) days. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantId: + description: The Azure Active Directory tenant ID that should + be used for authenticating requests to the key vault. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + contact: + description: One or more contact block as defined below. + items: + properties: + email: + description: E-mail address of the contact. + type: string + name: + description: Name of the contact. + type: string + phone: + description: Phone number of the contact. + type: string + type: object + type: array + enableRbacAuthorization: + description: Boolean flag to specify whether Azure Key Vault uses + Role Based Access Control (RBAC) for authorization of data actions. + type: boolean + enabledForDeployment: + description: Boolean flag to specify whether Azure Virtual Machines + are permitted to retrieve certificates stored as secrets from + the key vault. + type: boolean + enabledForDiskEncryption: + description: Boolean flag to specify whether Azure Disk Encryption + is permitted to retrieve secrets from the vault and unwrap keys. + type: boolean + enabledForTemplateDeployment: + description: Boolean flag to specify whether Azure Resource Manager + is permitted to retrieve secrets from the key vault. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. + properties: + bypass: + description: Specifies which traffic can bypass the network + rules. Possible values are AzureServices and None. + type: string + defaultAction: + description: The Default Action to use when no rules match + from ip_rules / virtual_network_subnet_ids. Possible values + are Allow and Deny. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Key Vault. + items: + type: string + type: array + x-kubernetes-list-type: set + virtualNetworkSubnetIds: + description: One or more Subnet IDs which should be able to + access this Key Vault. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + Key Vault. Defaults to true. + type: boolean + purgeProtectionEnabled: + description: Is Purge Protection enabled for this Key Vault? + type: boolean + skuName: + description: The Name of the SKU used for this Key Vault. Possible + values are standard and premium. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This value can be between 7 and 90 (the + default) days. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantId: + description: The Azure Active Directory tenant ID that should + be used for authenticating requests to the key vault. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + - message: spec.forProvider.tenantId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.tenantId) + || (has(self.initProvider) && has(self.initProvider.tenantId))' + status: + description: VaultStatus defines the observed state of Vault. + properties: + atProvider: + properties: + accessPolicy: + description: A list of access_policy objects (up to 1024) describing + access policies, as described below. + items: + properties: + applicationId: + description: The object ID of an Application in Azure Active + Directory. + type: string + certificatePermissions: + description: 'List of certificate permissions, must be one + or more from the following: Backup, Create, Delete, DeleteIssuers, + Get, GetIssuers, Import, List, ListIssuers, ManageContacts, + ManageIssuers, Purge, Recover, Restore, SetIssuers and + Update.' + items: + type: string + type: array + keyPermissions: + description: List of key permissions. Possible values are + Backup, Create, Decrypt, Delete, Encrypt, Get, Import, + List, Purge, Recover, Restore, Sign, UnwrapKey, Update, + Verify, WrapKey, Release, Rotate, GetRotationPolicy and + SetRotationPolicy. + items: + type: string + type: array + objectId: + description: The object ID of a user, service principal + or security group in the Azure Active Directory tenant + for the vault. The object ID must be unique for the list + of access policies. + type: string + secretPermissions: + description: 'List of secret permissions, must be one or + more from the following: Backup, Delete, Get, List, Purge, + Recover, Restore and Set.' + items: + type: string + type: array + storagePermissions: + description: 'List of storage permissions, must be one or + more from the following: Backup, Delete, DeleteSAS, Get, + GetSAS, List, ListSAS, Purge, Recover, RegenerateKey, + Restore, Set, SetSAS and Update.' + items: + type: string + type: array + tenantId: + description: The Azure Active Directory tenant ID that should + be used for authenticating requests to the key vault. + Must match the tenant_id used above. + type: string + type: object + type: array + contact: + description: One or more contact block as defined below. + items: + properties: + email: + description: E-mail address of the contact. + type: string + name: + description: Name of the contact. + type: string + phone: + description: Phone number of the contact. + type: string + type: object + type: array + enableRbacAuthorization: + description: Boolean flag to specify whether Azure Key Vault uses + Role Based Access Control (RBAC) for authorization of data actions. + type: boolean + enabledForDeployment: + description: Boolean flag to specify whether Azure Virtual Machines + are permitted to retrieve certificates stored as secrets from + the key vault. + type: boolean + enabledForDiskEncryption: + description: Boolean flag to specify whether Azure Disk Encryption + is permitted to retrieve secrets from the vault and unwrap keys. + type: boolean + enabledForTemplateDeployment: + description: Boolean flag to specify whether Azure Resource Manager + is permitted to retrieve secrets from the key vault. + type: boolean + id: + description: The ID of the Key Vault. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkAcls: + description: A network_acls block as defined below. + properties: + bypass: + description: Specifies which traffic can bypass the network + rules. Possible values are AzureServices and None. + type: string + defaultAction: + description: The Default Action to use when no rules match + from ip_rules / virtual_network_subnet_ids. Possible values + are Allow and Deny. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Key Vault. + items: + type: string + type: array + x-kubernetes-list-type: set + virtualNetworkSubnetIds: + description: One or more Subnet IDs which should be able to + access this Key Vault. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + Key Vault. Defaults to true. + type: boolean + purgeProtectionEnabled: + description: Is Purge Protection enabled for this Key Vault? + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Key Vault. Changing this forces a new resource to be created. + type: string + skuName: + description: The Name of the SKU used for this Key Vault. Possible + values are standard and premium. + type: string + softDeleteRetentionDays: + description: The number of days that items should be retained + for once soft-deleted. This value can be between 7 and 90 (the + default) days. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tenantId: + description: The Azure Active Directory tenant ID that should + be used for authenticating requests to the key vault. + type: string + vaultUri: + description: The URI of the Key Vault, used for performing operations + on keys and secrets. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kusto.azure.upbound.io_attacheddatabaseconfigurations.yaml b/package/crds/kusto.azure.upbound.io_attacheddatabaseconfigurations.yaml index 0c43e1742..6d12f836d 100644 --- a/package/crds/kusto.azure.upbound.io_attacheddatabaseconfigurations.yaml +++ b/package/crds/kusto.azure.upbound.io_attacheddatabaseconfigurations.yaml @@ -1178,3 +1178,1154 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AttachedDatabaseConfiguration is the Schema for the AttachedDatabaseConfigurations + API. Manages Kusto / Data Explorer Attached Database Configuration + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AttachedDatabaseConfigurationSpec defines the desired state + of AttachedDatabaseConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterName: + description: Specifies the name of the Kusto Cluster for which + the configuration will be created. Changing this forces a new + resource to be created. + type: string + clusterNameRef: + description: Reference to a Cluster in kusto to populate clusterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterNameSelector: + description: Selector for a Cluster in kusto to populate clusterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + clusterResourceId: + description: The resource id of the cluster where the databases + you would like to attach reside. Changing this forces a new + resource to be created. + type: string + clusterResourceIdRef: + description: Reference to a Cluster in kusto to populate clusterResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterResourceIdSelector: + description: Selector for a Cluster in kusto to populate clusterResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + databaseName: + description: The name of the database which you would like to + attach, use * if you want to follow all current and future databases. + Changing this forces a new resource to be created. + type: string + databaseNameRef: + description: Reference to a Database in kusto to populate databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a Database in kusto to populate databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultPrincipalModificationKind: + description: 'The default principals modification kind. Valid + values are: None (default), Replace and Union. Defaults to None.' + type: string + location: + description: Specifies the location of the Kusto Cluster for which + the configuration will be created. Changing this forces a new + resource to be created. + type: string + name: + description: The name of the Kusto Attached Database Configuration + to create. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the resource group of the Kusto Cluster + for which the configuration will be created. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharing: + description: A sharing block as defined below. + properties: + externalTablesToExclude: + description: List of external tables exclude from the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + externalTablesToInclude: + description: List of external tables to include in the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + materializedViewsToExclude: + description: List of materialized views exclude from the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + materializedViewsToInclude: + description: List of materialized views to include in the + follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + tablesToExclude: + description: List of tables to exclude from the follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + tablesToInclude: + description: List of tables to include in the follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + clusterName: + description: Specifies the name of the Kusto Cluster for which + the configuration will be created. Changing this forces a new + resource to be created. + type: string + clusterNameRef: + description: Reference to a Cluster in kusto to populate clusterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterNameSelector: + description: Selector for a Cluster in kusto to populate clusterName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + clusterResourceId: + description: The resource id of the cluster where the databases + you would like to attach reside. Changing this forces a new + resource to be created. + type: string + clusterResourceIdRef: + description: Reference to a Cluster in kusto to populate clusterResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterResourceIdSelector: + description: Selector for a Cluster in kusto to populate clusterResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + databaseName: + description: The name of the database which you would like to + attach, use * if you want to follow all current and future databases. + Changing this forces a new resource to be created. + type: string + databaseNameRef: + description: Reference to a Database in kusto to populate databaseName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + databaseNameSelector: + description: Selector for a Database in kusto to populate databaseName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + defaultPrincipalModificationKind: + description: 'The default principals modification kind. Valid + values are: None (default), Replace and Union. Defaults to None.' + type: string + location: + description: Specifies the location of the Kusto Cluster for which + the configuration will be created. Changing this forces a new + resource to be created. + type: string + name: + description: The name of the Kusto Attached Database Configuration + to create. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the resource group of the Kusto Cluster + for which the configuration will be created. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharing: + description: A sharing block as defined below. + properties: + externalTablesToExclude: + description: List of external tables exclude from the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + externalTablesToInclude: + description: List of external tables to include in the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + materializedViewsToExclude: + description: List of materialized views exclude from the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + materializedViewsToInclude: + description: List of materialized views to include in the + follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + tablesToExclude: + description: List of tables to exclude from the follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + tablesToInclude: + description: List of tables to include in the follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: AttachedDatabaseConfigurationStatus defines the observed + state of AttachedDatabaseConfiguration. + properties: + atProvider: + properties: + attachedDatabaseNames: + description: The list of databases from the cluster_resource_id + which are currently attached to the cluster. + items: + type: string + type: array + clusterName: + description: Specifies the name of the Kusto Cluster for which + the configuration will be created. Changing this forces a new + resource to be created. + type: string + clusterResourceId: + description: The resource id of the cluster where the databases + you would like to attach reside. Changing this forces a new + resource to be created. + type: string + databaseName: + description: The name of the database which you would like to + attach, use * if you want to follow all current and future databases. + Changing this forces a new resource to be created. + type: string + defaultPrincipalModificationKind: + description: 'The default principals modification kind. Valid + values are: None (default), Replace and Union. Defaults to None.' + type: string + id: + description: The Kusto Attached Database Configuration ID. + type: string + location: + description: Specifies the location of the Kusto Cluster for which + the configuration will be created. Changing this forces a new + resource to be created. + type: string + name: + description: The name of the Kusto Attached Database Configuration + to create. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the resource group of the Kusto Cluster + for which the configuration will be created. Changing this forces + a new resource to be created. + type: string + sharing: + description: A sharing block as defined below. + properties: + externalTablesToExclude: + description: List of external tables exclude from the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + externalTablesToInclude: + description: List of external tables to include in the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + materializedViewsToExclude: + description: List of materialized views exclude from the follower + database. + items: + type: string + type: array + x-kubernetes-list-type: set + materializedViewsToInclude: + description: List of materialized views to include in the + follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + tablesToExclude: + description: List of tables to exclude from the follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + tablesToInclude: + description: List of tables to include in the follower database. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/kusto.azure.upbound.io_clusters.yaml b/package/crds/kusto.azure.upbound.io_clusters.yaml index fd7453286..0159c3e28 100644 --- a/package/crds/kusto.azure.upbound.io_clusters.yaml +++ b/package/crds/kusto.azure.upbound.io_clusters.yaml @@ -1082,3 +1082,1037 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Manages Kusto (also + known as Azure Data Explorer) Cluster + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowedFqdns: + description: List of allowed FQDNs(Fully Qualified Domain Name) + for egress from Cluster. + items: + type: string + type: array + allowedIpRanges: + description: The list of ips in the format of CIDR allowed to + connect to the cluster. + items: + type: string + type: array + autoStopEnabled: + description: Specifies if the cluster could be automatically stopped + (due to lack of data or no activity for many days). Defaults + to true. + type: boolean + diskEncryptionEnabled: + description: Specifies if the cluster's disks are encrypted. + type: boolean + doubleEncryptionEnabled: + description: Is the cluster's double encryption enabled? Changing + this forces a new resource to be created. + type: boolean + engine: + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Kusto Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: 'Specifies the type of Managed Service Identity + that is configured on this Kusto Cluster. Possible values + are: SystemAssigned, UserAssigned and SystemAssigned, UserAssigned.' + type: string + type: object + languageExtensions: + description: 'An list of language_extensions to enable. Valid + values are: PYTHON, PYTHON_3.10.8 and R. PYTHON is used to specify + Python 3.6.5 image and PYTHON_3.10.8 is used to specify Python + 3.10.8 image. Note that PYTHON_3.10.8 is only available in skus + which support nested virtualization.' + items: + type: string + type: array + x-kubernetes-list-type: set + location: + description: The location where the Kusto Cluster should be created. + Changing this forces a new resource to be created. + type: string + optimizedAutoScale: + description: An optimized_auto_scale block as defined below. + properties: + maximumInstances: + description: The maximum number of allowed instances. Must + between 0 and 1000. + type: number + minimumInstances: + description: The minimum number of allowed instances. Must + between 0 and 1000. + type: number + type: object + outboundNetworkAccessRestricted: + description: Whether to restrict outbound network access. Value + is optional but if passed in, must be true or false, default + is false. + type: boolean + publicIpType: + description: Indicates what public IP type to create - IPv4 (default), + or DualStack (both IPv4 and IPv6). Defaults to IPv4. + type: string + publicNetworkAccessEnabled: + description: Is the public network access enabled? Defaults to + true. + type: boolean + purgeEnabled: + description: Specifies if the purge operations are enabled. + type: boolean + resourceGroupName: + description: Specifies the Resource Group where the Kusto Cluster + should exist. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: A sku block as defined below. + properties: + capacity: + description: Specifies the node count for the cluster. Boundaries + depend on the SKU name. + type: number + name: + description: The name of the SKU. Possible values are Dev(No + SLA)_Standard_D11_v2, Dev(No SLA)_Standard_E2a_v4, Standard_D14_v2, + Standard_D11_v2, Standard_D16d_v5, Standard_D13_v2, Standard_D12_v2, + Standard_DS14_v2+4TB_PS, Standard_DS14_v2+3TB_PS, Standard_DS13_v2+1TB_PS, + Standard_DS13_v2+2TB_PS, Standard_D32d_v5, Standard_D32d_v4, + Standard_EC8ads_v5, Standard_EC8as_v5+1TB_PS, Standard_EC8as_v5+2TB_PS, + Standard_EC16ads_v5, Standard_EC16as_v5+4TB_PS, Standard_EC16as_v5+3TB_PS, + Standard_E80ids_v4, Standard_E8a_v4, Standard_E8ads_v5, + Standard_E8as_v5+1TB_PS, Standard_E8as_v5+2TB_PS, Standard_E8as_v4+1TB_PS, + Standard_E8as_v4+2TB_PS, Standard_E8d_v5, Standard_E8d_v4, + Standard_E8s_v5+1TB_PS, Standard_E8s_v5+2TB_PS, Standard_E8s_v4+1TB_PS, + Standard_E8s_v4+2TB_PS, Standard_E4a_v4, Standard_E4ads_v5, + Standard_E4d_v5, Standard_E4d_v4, Standard_E16a_v4, Standard_E16ads_v5, + Standard_E16as_v5+4TB_PS, Standard_E16as_v5+3TB_PS, Standard_E16as_v4+4TB_PS, + Standard_E16as_v4+3TB_PS, Standard_E16d_v5, Standard_E16d_v4, + Standard_E16s_v5+4TB_PS, Standard_E16s_v5+3TB_PS, Standard_E16s_v4+4TB_PS, + Standard_E16s_v4+3TB_PS, Standard_E64i_v3, Standard_E2a_v4, + Standard_E2ads_v5, Standard_E2d_v5, Standard_E2d_v4, Standard_L8as_v3, + Standard_L8s, Standard_L8s_v3, Standard_L8s_v2, Standard_L4s, + Standard_L16as_v3, Standard_L16s, Standard_L16s_v3, Standard_L16s_v2, + Standard_L32as_v3 and Standard_L32s_v3. + type: string + type: object + streamingIngestionEnabled: + description: Specifies if the streaming ingest is enabled. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustedExternalTenants: + description: Specifies a list of tenant IDs that are trusted by + the cluster. Default setting trusts all other tenants. Use trusted_external_tenants + = ["*"] to explicitly allow all other tenants, trusted_external_tenants + = ["MyTenantOnly"] for only your tenant or trusted_external_tenants + = ["", ""] to allow specific other tenants. + items: + type: string + type: array + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Changing this forces a new resource to be created. + properties: + dataManagementPublicIpId: + description: Data management's service public IP address resource + id. + type: string + enginePublicIpId: + description: Engine service's public IP address resource id. + type: string + subnetId: + description: The subnet resource id. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + zones: + description: Specifies a list of Availability Zones in which this + Kusto Cluster should be located. Changing this forces a new + Kusto Cluster to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowedFqdns: + description: List of allowed FQDNs(Fully Qualified Domain Name) + for egress from Cluster. + items: + type: string + type: array + allowedIpRanges: + description: The list of ips in the format of CIDR allowed to + connect to the cluster. + items: + type: string + type: array + autoStopEnabled: + description: Specifies if the cluster could be automatically stopped + (due to lack of data or no activity for many days). Defaults + to true. + type: boolean + diskEncryptionEnabled: + description: Specifies if the cluster's disks are encrypted. + type: boolean + doubleEncryptionEnabled: + description: Is the cluster's double encryption enabled? Changing + this forces a new resource to be created. + type: boolean + engine: + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Kusto Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: 'Specifies the type of Managed Service Identity + that is configured on this Kusto Cluster. Possible values + are: SystemAssigned, UserAssigned and SystemAssigned, UserAssigned.' + type: string + type: object + languageExtensions: + description: 'An list of language_extensions to enable. Valid + values are: PYTHON, PYTHON_3.10.8 and R. PYTHON is used to specify + Python 3.6.5 image and PYTHON_3.10.8 is used to specify Python + 3.10.8 image. Note that PYTHON_3.10.8 is only available in skus + which support nested virtualization.' + items: + type: string + type: array + x-kubernetes-list-type: set + location: + description: The location where the Kusto Cluster should be created. + Changing this forces a new resource to be created. + type: string + optimizedAutoScale: + description: An optimized_auto_scale block as defined below. + properties: + maximumInstances: + description: The maximum number of allowed instances. Must + between 0 and 1000. + type: number + minimumInstances: + description: The minimum number of allowed instances. Must + between 0 and 1000. + type: number + type: object + outboundNetworkAccessRestricted: + description: Whether to restrict outbound network access. Value + is optional but if passed in, must be true or false, default + is false. + type: boolean + publicIpType: + description: Indicates what public IP type to create - IPv4 (default), + or DualStack (both IPv4 and IPv6). Defaults to IPv4. + type: string + publicNetworkAccessEnabled: + description: Is the public network access enabled? Defaults to + true. + type: boolean + purgeEnabled: + description: Specifies if the purge operations are enabled. + type: boolean + sku: + description: A sku block as defined below. + properties: + capacity: + description: Specifies the node count for the cluster. Boundaries + depend on the SKU name. + type: number + name: + description: The name of the SKU. Possible values are Dev(No + SLA)_Standard_D11_v2, Dev(No SLA)_Standard_E2a_v4, Standard_D14_v2, + Standard_D11_v2, Standard_D16d_v5, Standard_D13_v2, Standard_D12_v2, + Standard_DS14_v2+4TB_PS, Standard_DS14_v2+3TB_PS, Standard_DS13_v2+1TB_PS, + Standard_DS13_v2+2TB_PS, Standard_D32d_v5, Standard_D32d_v4, + Standard_EC8ads_v5, Standard_EC8as_v5+1TB_PS, Standard_EC8as_v5+2TB_PS, + Standard_EC16ads_v5, Standard_EC16as_v5+4TB_PS, Standard_EC16as_v5+3TB_PS, + Standard_E80ids_v4, Standard_E8a_v4, Standard_E8ads_v5, + Standard_E8as_v5+1TB_PS, Standard_E8as_v5+2TB_PS, Standard_E8as_v4+1TB_PS, + Standard_E8as_v4+2TB_PS, Standard_E8d_v5, Standard_E8d_v4, + Standard_E8s_v5+1TB_PS, Standard_E8s_v5+2TB_PS, Standard_E8s_v4+1TB_PS, + Standard_E8s_v4+2TB_PS, Standard_E4a_v4, Standard_E4ads_v5, + Standard_E4d_v5, Standard_E4d_v4, Standard_E16a_v4, Standard_E16ads_v5, + Standard_E16as_v5+4TB_PS, Standard_E16as_v5+3TB_PS, Standard_E16as_v4+4TB_PS, + Standard_E16as_v4+3TB_PS, Standard_E16d_v5, Standard_E16d_v4, + Standard_E16s_v5+4TB_PS, Standard_E16s_v5+3TB_PS, Standard_E16s_v4+4TB_PS, + Standard_E16s_v4+3TB_PS, Standard_E64i_v3, Standard_E2a_v4, + Standard_E2ads_v5, Standard_E2d_v5, Standard_E2d_v4, Standard_L8as_v3, + Standard_L8s, Standard_L8s_v3, Standard_L8s_v2, Standard_L4s, + Standard_L16as_v3, Standard_L16s, Standard_L16s_v3, Standard_L16s_v2, + Standard_L32as_v3 and Standard_L32s_v3. + type: string + type: object + streamingIngestionEnabled: + description: Specifies if the streaming ingest is enabled. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustedExternalTenants: + description: Specifies a list of tenant IDs that are trusted by + the cluster. Default setting trusts all other tenants. Use trusted_external_tenants + = ["*"] to explicitly allow all other tenants, trusted_external_tenants + = ["MyTenantOnly"] for only your tenant or trusted_external_tenants + = ["", ""] to allow specific other tenants. + items: + type: string + type: array + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Changing this forces a new resource to be created. + properties: + dataManagementPublicIpId: + description: Data management's service public IP address resource + id. + type: string + enginePublicIpId: + description: Engine service's public IP address resource id. + type: string + subnetId: + description: The subnet resource id. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + zones: + description: Specifies a list of Availability Zones in which this + Kusto Cluster should be located. Changing this forces a new + Kusto Cluster to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + allowedFqdns: + description: List of allowed FQDNs(Fully Qualified Domain Name) + for egress from Cluster. + items: + type: string + type: array + allowedIpRanges: + description: The list of ips in the format of CIDR allowed to + connect to the cluster. + items: + type: string + type: array + autoStopEnabled: + description: Specifies if the cluster could be automatically stopped + (due to lack of data or no activity for many days). Defaults + to true. + type: boolean + dataIngestionUri: + description: The Kusto Cluster URI to be used for data ingestion. + type: string + diskEncryptionEnabled: + description: Specifies if the cluster's disks are encrypted. + type: boolean + doubleEncryptionEnabled: + description: Is the cluster's double encryption enabled? Changing + this forces a new resource to be created. + type: boolean + engine: + type: string + id: + description: The Kusto Cluster ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Kusto Cluster. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this System + Assigned Managed Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this System Assigned + Managed Service Identity. + type: string + type: + description: 'Specifies the type of Managed Service Identity + that is configured on this Kusto Cluster. Possible values + are: SystemAssigned, UserAssigned and SystemAssigned, UserAssigned.' + type: string + type: object + languageExtensions: + description: 'An list of language_extensions to enable. Valid + values are: PYTHON, PYTHON_3.10.8 and R. PYTHON is used to specify + Python 3.6.5 image and PYTHON_3.10.8 is used to specify Python + 3.10.8 image. Note that PYTHON_3.10.8 is only available in skus + which support nested virtualization.' + items: + type: string + type: array + x-kubernetes-list-type: set + location: + description: The location where the Kusto Cluster should be created. + Changing this forces a new resource to be created. + type: string + optimizedAutoScale: + description: An optimized_auto_scale block as defined below. + properties: + maximumInstances: + description: The maximum number of allowed instances. Must + between 0 and 1000. + type: number + minimumInstances: + description: The minimum number of allowed instances. Must + between 0 and 1000. + type: number + type: object + outboundNetworkAccessRestricted: + description: Whether to restrict outbound network access. Value + is optional but if passed in, must be true or false, default + is false. + type: boolean + publicIpType: + description: Indicates what public IP type to create - IPv4 (default), + or DualStack (both IPv4 and IPv6). Defaults to IPv4. + type: string + publicNetworkAccessEnabled: + description: Is the public network access enabled? Defaults to + true. + type: boolean + purgeEnabled: + description: Specifies if the purge operations are enabled. + type: boolean + resourceGroupName: + description: Specifies the Resource Group where the Kusto Cluster + should exist. Changing this forces a new resource to be created. + type: string + sku: + description: A sku block as defined below. + properties: + capacity: + description: Specifies the node count for the cluster. Boundaries + depend on the SKU name. + type: number + name: + description: The name of the SKU. Possible values are Dev(No + SLA)_Standard_D11_v2, Dev(No SLA)_Standard_E2a_v4, Standard_D14_v2, + Standard_D11_v2, Standard_D16d_v5, Standard_D13_v2, Standard_D12_v2, + Standard_DS14_v2+4TB_PS, Standard_DS14_v2+3TB_PS, Standard_DS13_v2+1TB_PS, + Standard_DS13_v2+2TB_PS, Standard_D32d_v5, Standard_D32d_v4, + Standard_EC8ads_v5, Standard_EC8as_v5+1TB_PS, Standard_EC8as_v5+2TB_PS, + Standard_EC16ads_v5, Standard_EC16as_v5+4TB_PS, Standard_EC16as_v5+3TB_PS, + Standard_E80ids_v4, Standard_E8a_v4, Standard_E8ads_v5, + Standard_E8as_v5+1TB_PS, Standard_E8as_v5+2TB_PS, Standard_E8as_v4+1TB_PS, + Standard_E8as_v4+2TB_PS, Standard_E8d_v5, Standard_E8d_v4, + Standard_E8s_v5+1TB_PS, Standard_E8s_v5+2TB_PS, Standard_E8s_v4+1TB_PS, + Standard_E8s_v4+2TB_PS, Standard_E4a_v4, Standard_E4ads_v5, + Standard_E4d_v5, Standard_E4d_v4, Standard_E16a_v4, Standard_E16ads_v5, + Standard_E16as_v5+4TB_PS, Standard_E16as_v5+3TB_PS, Standard_E16as_v4+4TB_PS, + Standard_E16as_v4+3TB_PS, Standard_E16d_v5, Standard_E16d_v4, + Standard_E16s_v5+4TB_PS, Standard_E16s_v5+3TB_PS, Standard_E16s_v4+4TB_PS, + Standard_E16s_v4+3TB_PS, Standard_E64i_v3, Standard_E2a_v4, + Standard_E2ads_v5, Standard_E2d_v5, Standard_E2d_v4, Standard_L8as_v3, + Standard_L8s, Standard_L8s_v3, Standard_L8s_v2, Standard_L4s, + Standard_L16as_v3, Standard_L16s, Standard_L16s_v3, Standard_L16s_v2, + Standard_L32as_v3 and Standard_L32s_v3. + type: string + type: object + streamingIngestionEnabled: + description: Specifies if the streaming ingest is enabled. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustedExternalTenants: + description: Specifies a list of tenant IDs that are trusted by + the cluster. Default setting trusts all other tenants. Use trusted_external_tenants + = ["*"] to explicitly allow all other tenants, trusted_external_tenants + = ["MyTenantOnly"] for only your tenant or trusted_external_tenants + = ["", ""] to allow specific other tenants. + items: + type: string + type: array + uri: + description: The FQDN of the Azure Kusto Cluster. + type: string + virtualNetworkConfiguration: + description: A virtual_network_configuration block as defined + below. Changing this forces a new resource to be created. + properties: + dataManagementPublicIpId: + description: Data management's service public IP address resource + id. + type: string + enginePublicIpId: + description: Engine service's public IP address resource id. + type: string + subnetId: + description: The subnet resource id. + type: string + type: object + zones: + description: Specifies a list of Availability Zones in which this + Kusto Cluster should be located. Changing this forces a new + Kusto Cluster to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/labservices.azure.upbound.io_labservicelabs.yaml b/package/crds/labservices.azure.upbound.io_labservicelabs.yaml index 141ba73d0..e501ea663 100644 --- a/package/crds/labservices.azure.upbound.io_labservicelabs.yaml +++ b/package/crds/labservices.azure.upbound.io_labservicelabs.yaml @@ -1215,3 +1215,1137 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LabServiceLab is the Schema for the LabServiceLabs API. Manages + a Lab Service Lab. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LabServiceLabSpec defines the desired state of LabServiceLab + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoShutdown: + description: An auto_shutdown block as defined below. + properties: + disconnectDelay: + description: The amount of time a VM will stay running after + a user disconnects if this behavior is enabled. This value + must be formatted as an ISO 8601 string. + type: string + idleDelay: + description: The amount of time a VM will idle before it is + shutdown if this behavior is enabled. This value must be + formatted as an ISO 8601 string. + type: string + noConnectDelay: + description: The amount of time a VM will stay running before + it is shutdown if no connection is made and this behavior + is enabled. This value must be formatted as an ISO 8601 + string. + type: string + shutdownOnIdle: + description: A VM will get shutdown when it has idled for + a period of time. Possible values are LowUsage and UserAbsence. + type: string + type: object + connectionSetting: + description: A connection_setting block as defined below. + properties: + clientRdpAccess: + description: The enabled access level for Client Access over + RDP. Possible value is Public. + type: string + clientSshAccess: + description: The enabled access level for Client Access over + SSH. Possible value is Public. + type: string + type: object + description: + description: The description of the Lab Service Lab. + type: string + labPlanId: + description: The resource ID of the Lab Plan that is used during + resource creation to provide defaults and acts as a permission + container when creating a Lab Service Lab via labs.azure.com. + type: string + location: + description: The Azure Region where the Lab Service Lab should + exist. Changing this forces a new resource to be created. + type: string + network: + description: A network block as defined below. + properties: + subnetId: + description: The resource ID of the Subnet for the network + profile of the Lab Service Lab. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the Lab Service + Lab should exist. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + roster: + description: A roster block as defined below. + properties: + activeDirectoryGroupId: + description: The AAD group ID which this Lab Service Lab roster + is populated from. + type: string + lmsInstance: + description: The base URI identifying the lms instance. + type: string + ltiClientId: + description: The unique id of the Azure Lab Service tool in + the lms. + type: string + ltiContextId: + description: The unique context identifier for the Lab Service + Lab in the lms. + type: string + ltiRosterEndpoint: + description: The URI of the names and roles service endpoint + on the lms for the class attached to this Lab Service Lab. + type: string + type: object + security: + description: A security block as defined below. + properties: + openAccessEnabled: + description: Is open access enabled to allow any user or only + specified users to register to a Lab Service Lab? + type: boolean + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Lab Service Lab. + type: object + x-kubernetes-map-type: granular + title: + description: The title of the Lab Service Lab. + type: string + virtualMachine: + description: A virtual_machine block as defined below. + properties: + additionalCapabilityGpuDriversInstalled: + description: Is flagged to pre-install dedicated GPU drivers? + Defaults to false. Changing this forces a new resource to + be created. + type: boolean + adminUser: + description: An admin_user block as defined below. + properties: + passwordSecretRef: + description: The password for the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username to use when signing in to Lab + Service Lab VMs. + type: string + required: + - passwordSecretRef + type: object + createOption: + description: The create option to indicate what Lab Service + Lab VMs are created from. Possible values are Image and + TemplateVM. Defaults to Image. Changing this forces a new + resource to be created. + type: string + imageReference: + description: An image_reference block as defined below. + properties: + id: + description: The resource ID of the image. Changing this + forces a new resource to be created. + type: string + offer: + description: The image offer if applicable. Changing this + forces a new resource to be created. + type: string + publisher: + description: The image publisher. Changing this forces + a new resource to be created. + type: string + sku: + description: A sku block as defined below. + type: string + version: + description: The image version specified on creation. + Changing this forces a new resource to be created. + type: string + type: object + nonAdminUser: + description: A non_admin_user block as defined below. + properties: + passwordSecretRef: + description: The password for the user. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username to use when signing in to Lab + Service Lab VMs. + type: string + required: + - passwordSecretRef + type: object + sharedPasswordEnabled: + description: Is the shared password enabled with the same + password for all user VMs? Defaults to false. Changing this + forces a new resource to be created. + type: boolean + sku: + description: A sku block as defined below. + properties: + capacity: + description: The capacity for the SKU. Possible values + are between 0 and 400. + type: number + name: + description: The name of the SKU. Changing this forces + a new resource to be created. + type: string + type: object + usageQuota: + description: The initial quota allocated to each Lab Service + Lab user. Defaults to PT0S. This value must be formatted + as an ISO 8601 string. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoShutdown: + description: An auto_shutdown block as defined below. + properties: + disconnectDelay: + description: The amount of time a VM will stay running after + a user disconnects if this behavior is enabled. This value + must be formatted as an ISO 8601 string. + type: string + idleDelay: + description: The amount of time a VM will idle before it is + shutdown if this behavior is enabled. This value must be + formatted as an ISO 8601 string. + type: string + noConnectDelay: + description: The amount of time a VM will stay running before + it is shutdown if no connection is made and this behavior + is enabled. This value must be formatted as an ISO 8601 + string. + type: string + shutdownOnIdle: + description: A VM will get shutdown when it has idled for + a period of time. Possible values are LowUsage and UserAbsence. + type: string + type: object + connectionSetting: + description: A connection_setting block as defined below. + properties: + clientRdpAccess: + description: The enabled access level for Client Access over + RDP. Possible value is Public. + type: string + clientSshAccess: + description: The enabled access level for Client Access over + SSH. Possible value is Public. + type: string + type: object + description: + description: The description of the Lab Service Lab. + type: string + labPlanId: + description: The resource ID of the Lab Plan that is used during + resource creation to provide defaults and acts as a permission + container when creating a Lab Service Lab via labs.azure.com. + type: string + location: + description: The Azure Region where the Lab Service Lab should + exist. Changing this forces a new resource to be created. + type: string + network: + description: A network block as defined below. + properties: + subnetId: + description: The resource ID of the Subnet for the network + profile of the Lab Service Lab. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + roster: + description: A roster block as defined below. + properties: + activeDirectoryGroupId: + description: The AAD group ID which this Lab Service Lab roster + is populated from. + type: string + lmsInstance: + description: The base URI identifying the lms instance. + type: string + ltiClientId: + description: The unique id of the Azure Lab Service tool in + the lms. + type: string + ltiContextId: + description: The unique context identifier for the Lab Service + Lab in the lms. + type: string + ltiRosterEndpoint: + description: The URI of the names and roles service endpoint + on the lms for the class attached to this Lab Service Lab. + type: string + type: object + security: + description: A security block as defined below. + properties: + openAccessEnabled: + description: Is open access enabled to allow any user or only + specified users to register to a Lab Service Lab? + type: boolean + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Lab Service Lab. + type: object + x-kubernetes-map-type: granular + title: + description: The title of the Lab Service Lab. + type: string + virtualMachine: + description: A virtual_machine block as defined below. + properties: + additionalCapabilityGpuDriversInstalled: + description: Is flagged to pre-install dedicated GPU drivers? + Defaults to false. Changing this forces a new resource to + be created. + type: boolean + adminUser: + description: An admin_user block as defined below. + properties: + username: + description: The username to use when signing in to Lab + Service Lab VMs. + type: string + type: object + createOption: + description: The create option to indicate what Lab Service + Lab VMs are created from. Possible values are Image and + TemplateVM. Defaults to Image. Changing this forces a new + resource to be created. + type: string + imageReference: + description: An image_reference block as defined below. + properties: + id: + description: The resource ID of the image. Changing this + forces a new resource to be created. + type: string + offer: + description: The image offer if applicable. Changing this + forces a new resource to be created. + type: string + publisher: + description: The image publisher. Changing this forces + a new resource to be created. + type: string + sku: + description: A sku block as defined below. + type: string + version: + description: The image version specified on creation. + Changing this forces a new resource to be created. + type: string + type: object + nonAdminUser: + description: A non_admin_user block as defined below. + properties: + username: + description: The username to use when signing in to Lab + Service Lab VMs. + type: string + type: object + sharedPasswordEnabled: + description: Is the shared password enabled with the same + password for all user VMs? Defaults to false. Changing this + forces a new resource to be created. + type: boolean + sku: + description: A sku block as defined below. + properties: + capacity: + description: The capacity for the SKU. Possible values + are between 0 and 400. + type: number + name: + description: The name of the SKU. Changing this forces + a new resource to be created. + type: string + type: object + usageQuota: + description: The initial quota allocated to each Lab Service + Lab user. Defaults to PT0S. This value must be formatted + as an ISO 8601 string. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.connectionSetting is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.connectionSetting) + || (has(self.initProvider) && has(self.initProvider.connectionSetting))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.security is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.security) + || (has(self.initProvider) && has(self.initProvider.security))' + - message: spec.forProvider.title is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.title) + || (has(self.initProvider) && has(self.initProvider.title))' + - message: spec.forProvider.virtualMachine is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.virtualMachine) + || (has(self.initProvider) && has(self.initProvider.virtualMachine))' + status: + description: LabServiceLabStatus defines the observed state of LabServiceLab. + properties: + atProvider: + properties: + autoShutdown: + description: An auto_shutdown block as defined below. + properties: + disconnectDelay: + description: The amount of time a VM will stay running after + a user disconnects if this behavior is enabled. This value + must be formatted as an ISO 8601 string. + type: string + idleDelay: + description: The amount of time a VM will idle before it is + shutdown if this behavior is enabled. This value must be + formatted as an ISO 8601 string. + type: string + noConnectDelay: + description: The amount of time a VM will stay running before + it is shutdown if no connection is made and this behavior + is enabled. This value must be formatted as an ISO 8601 + string. + type: string + shutdownOnIdle: + description: A VM will get shutdown when it has idled for + a period of time. Possible values are LowUsage and UserAbsence. + type: string + type: object + connectionSetting: + description: A connection_setting block as defined below. + properties: + clientRdpAccess: + description: The enabled access level for Client Access over + RDP. Possible value is Public. + type: string + clientSshAccess: + description: The enabled access level for Client Access over + SSH. Possible value is Public. + type: string + type: object + description: + description: The description of the Lab Service Lab. + type: string + id: + description: The ID of the Lab Service Lab. + type: string + labPlanId: + description: The resource ID of the Lab Plan that is used during + resource creation to provide defaults and acts as a permission + container when creating a Lab Service Lab via labs.azure.com. + type: string + location: + description: The Azure Region where the Lab Service Lab should + exist. Changing this forces a new resource to be created. + type: string + network: + description: A network block as defined below. + properties: + loadBalancerId: + description: The resource ID of the Load Balancer for the + network profile of the Lab Service Lab. + type: string + publicIpId: + description: The resource ID of the Public IP for the network + profile of the Lab Service Lab. + type: string + subnetId: + description: The resource ID of the Subnet for the network + profile of the Lab Service Lab. + type: string + type: object + resourceGroupName: + description: The name of the Resource Group where the Lab Service + Lab should exist. Changing this forces a new resource to be + created. + type: string + roster: + description: A roster block as defined below. + properties: + activeDirectoryGroupId: + description: The AAD group ID which this Lab Service Lab roster + is populated from. + type: string + lmsInstance: + description: The base URI identifying the lms instance. + type: string + ltiClientId: + description: The unique id of the Azure Lab Service tool in + the lms. + type: string + ltiContextId: + description: The unique context identifier for the Lab Service + Lab in the lms. + type: string + ltiRosterEndpoint: + description: The URI of the names and roles service endpoint + on the lms for the class attached to this Lab Service Lab. + type: string + type: object + security: + description: A security block as defined below. + properties: + openAccessEnabled: + description: Is open access enabled to allow any user or only + specified users to register to a Lab Service Lab? + type: boolean + registrationCode: + description: The registration code for the Lab Service Lab. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Lab Service Lab. + type: object + x-kubernetes-map-type: granular + title: + description: The title of the Lab Service Lab. + type: string + virtualMachine: + description: A virtual_machine block as defined below. + properties: + additionalCapabilityGpuDriversInstalled: + description: Is flagged to pre-install dedicated GPU drivers? + Defaults to false. Changing this forces a new resource to + be created. + type: boolean + adminUser: + description: An admin_user block as defined below. + properties: + username: + description: The username to use when signing in to Lab + Service Lab VMs. + type: string + type: object + createOption: + description: The create option to indicate what Lab Service + Lab VMs are created from. Possible values are Image and + TemplateVM. Defaults to Image. Changing this forces a new + resource to be created. + type: string + imageReference: + description: An image_reference block as defined below. + properties: + id: + description: The resource ID of the image. Changing this + forces a new resource to be created. + type: string + offer: + description: The image offer if applicable. Changing this + forces a new resource to be created. + type: string + publisher: + description: The image publisher. Changing this forces + a new resource to be created. + type: string + sku: + description: A sku block as defined below. + type: string + version: + description: The image version specified on creation. + Changing this forces a new resource to be created. + type: string + type: object + nonAdminUser: + description: A non_admin_user block as defined below. + properties: + username: + description: The username to use when signing in to Lab + Service Lab VMs. + type: string + type: object + sharedPasswordEnabled: + description: Is the shared password enabled with the same + password for all user VMs? Defaults to false. Changing this + forces a new resource to be created. + type: boolean + sku: + description: A sku block as defined below. + properties: + capacity: + description: The capacity for the SKU. Possible values + are between 0 and 400. + type: number + name: + description: The name of the SKU. Changing this forces + a new resource to be created. + type: string + type: object + usageQuota: + description: The initial quota allocated to each Lab Service + Lab user. Defaults to PT0S. This value must be formatted + as an ISO 8601 string. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/labservices.azure.upbound.io_labserviceplans.yaml b/package/crds/labservices.azure.upbound.io_labserviceplans.yaml index eeedc3420..aceeb43c3 100644 --- a/package/crds/labservices.azure.upbound.io_labserviceplans.yaml +++ b/package/crds/labservices.azure.upbound.io_labserviceplans.yaml @@ -859,3 +859,826 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LabServicePlan is the Schema for the LabServicePlans API. Manages + a Lab Service Plan. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LabServicePlanSpec defines the desired state of LabServicePlan + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowedRegions: + description: The allowed regions for the lab creator to use when + creating labs using this Lab Service Plan. The allowed region's + count must be between 1 and 28. + items: + type: string + type: array + defaultAutoShutdown: + description: A default_auto_shutdown block as defined below. + properties: + disconnectDelay: + description: The amount of time a VM will stay running after + a user disconnects if this behavior is enabled. This value + must be formatted as an ISO 8601 string. + type: string + idleDelay: + description: The amount of time a VM will idle before it is + shutdown if this behavior is enabled. This value must be + formatted as an ISO 8601 string. + type: string + noConnectDelay: + description: The amount of time a VM will stay running before + it is shutdown if no connection is made and this behavior + is enabled. This value must be formatted as an ISO 8601 + string. + type: string + shutdownOnIdle: + description: Will a VM get shutdown when it has idled for + a period of time? Possible values are LowUsage and UserAbsence. + type: string + type: object + defaultConnection: + description: A default_connection block as defined below. + properties: + clientRdpAccess: + description: The enabled access level for Client Access over + RDP. Possible values are Private and Public. + type: string + clientSshAccess: + description: The enabled access level for Client Access over + SSH. Possible values are Private and Public. + type: string + webRdpAccess: + description: The enabled access level for Web Access over + RDP. Possible values are Private and Public. + type: string + webSshAccess: + description: The enabled access level for Web Access over + SSH. Possible values are Private and Public. + type: string + type: object + defaultNetworkSubnetId: + description: The resource ID of the Subnet for the Lab Service + Plan network profile. + type: string + defaultNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate defaultNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate defaultNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The Azure Region where the Lab Service Plan should + exist. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Lab Service + Plan should exist. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedGalleryId: + description: The resource ID of the Shared Image Gallery attached + to this Lab Service Plan. When saving a lab template virtual + machine image it will be persisted in this gallery. The shared + images from the gallery can be made available to use when creating + new labs. + type: string + support: + description: A support block as defined below. + properties: + email: + description: The email address for the support contact. + type: string + instructions: + description: The instructions for users of the Lab Service + Plan. + type: string + phone: + description: The phone number for the support contact. + type: string + url: + description: The web address for users of the Lab Service + Plan. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Lab Service Plan. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowedRegions: + description: The allowed regions for the lab creator to use when + creating labs using this Lab Service Plan. The allowed region's + count must be between 1 and 28. + items: + type: string + type: array + defaultAutoShutdown: + description: A default_auto_shutdown block as defined below. + properties: + disconnectDelay: + description: The amount of time a VM will stay running after + a user disconnects if this behavior is enabled. This value + must be formatted as an ISO 8601 string. + type: string + idleDelay: + description: The amount of time a VM will idle before it is + shutdown if this behavior is enabled. This value must be + formatted as an ISO 8601 string. + type: string + noConnectDelay: + description: The amount of time a VM will stay running before + it is shutdown if no connection is made and this behavior + is enabled. This value must be formatted as an ISO 8601 + string. + type: string + shutdownOnIdle: + description: Will a VM get shutdown when it has idled for + a period of time? Possible values are LowUsage and UserAbsence. + type: string + type: object + defaultConnection: + description: A default_connection block as defined below. + properties: + clientRdpAccess: + description: The enabled access level for Client Access over + RDP. Possible values are Private and Public. + type: string + clientSshAccess: + description: The enabled access level for Client Access over + SSH. Possible values are Private and Public. + type: string + webRdpAccess: + description: The enabled access level for Web Access over + RDP. Possible values are Private and Public. + type: string + webSshAccess: + description: The enabled access level for Web Access over + SSH. Possible values are Private and Public. + type: string + type: object + defaultNetworkSubnetId: + description: The resource ID of the Subnet for the Lab Service + Plan network profile. + type: string + defaultNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate defaultNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + defaultNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate defaultNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The Azure Region where the Lab Service Plan should + exist. Changing this forces a new resource to be created. + type: string + sharedGalleryId: + description: The resource ID of the Shared Image Gallery attached + to this Lab Service Plan. When saving a lab template virtual + machine image it will be persisted in this gallery. The shared + images from the gallery can be made available to use when creating + new labs. + type: string + support: + description: A support block as defined below. + properties: + email: + description: The email address for the support contact. + type: string + instructions: + description: The instructions for users of the Lab Service + Plan. + type: string + phone: + description: The phone number for the support contact. + type: string + url: + description: The web address for users of the Lab Service + Plan. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Lab Service Plan. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.allowedRegions is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.allowedRegions) + || (has(self.initProvider) && has(self.initProvider.allowedRegions))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: LabServicePlanStatus defines the observed state of LabServicePlan. + properties: + atProvider: + properties: + allowedRegions: + description: The allowed regions for the lab creator to use when + creating labs using this Lab Service Plan. The allowed region's + count must be between 1 and 28. + items: + type: string + type: array + defaultAutoShutdown: + description: A default_auto_shutdown block as defined below. + properties: + disconnectDelay: + description: The amount of time a VM will stay running after + a user disconnects if this behavior is enabled. This value + must be formatted as an ISO 8601 string. + type: string + idleDelay: + description: The amount of time a VM will idle before it is + shutdown if this behavior is enabled. This value must be + formatted as an ISO 8601 string. + type: string + noConnectDelay: + description: The amount of time a VM will stay running before + it is shutdown if no connection is made and this behavior + is enabled. This value must be formatted as an ISO 8601 + string. + type: string + shutdownOnIdle: + description: Will a VM get shutdown when it has idled for + a period of time? Possible values are LowUsage and UserAbsence. + type: string + type: object + defaultConnection: + description: A default_connection block as defined below. + properties: + clientRdpAccess: + description: The enabled access level for Client Access over + RDP. Possible values are Private and Public. + type: string + clientSshAccess: + description: The enabled access level for Client Access over + SSH. Possible values are Private and Public. + type: string + webRdpAccess: + description: The enabled access level for Web Access over + RDP. Possible values are Private and Public. + type: string + webSshAccess: + description: The enabled access level for Web Access over + SSH. Possible values are Private and Public. + type: string + type: object + defaultNetworkSubnetId: + description: The resource ID of the Subnet for the Lab Service + Plan network profile. + type: string + id: + description: The ID of the Lab Service Plan. + type: string + location: + description: The Azure Region where the Lab Service Plan should + exist. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Lab Service + Plan should exist. Changing this forces a new resource to be + created. + type: string + sharedGalleryId: + description: The resource ID of the Shared Image Gallery attached + to this Lab Service Plan. When saving a lab template virtual + machine image it will be persisted in this gallery. The shared + images from the gallery can be made available to use when creating + new labs. + type: string + support: + description: A support block as defined below. + properties: + email: + description: The email address for the support contact. + type: string + instructions: + description: The instructions for users of the Lab Service + Plan. + type: string + phone: + description: The phone number for the support contact. + type: string + url: + description: The web address for users of the Lab Service + Plan. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Lab Service Plan. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/loadtestservice.azure.upbound.io_loadtests.yaml b/package/crds/loadtestservice.azure.upbound.io_loadtests.yaml index 36ecadde9..25be9c656 100644 --- a/package/crds/loadtestservice.azure.upbound.io_loadtests.yaml +++ b/package/crds/loadtestservice.azure.upbound.io_loadtests.yaml @@ -529,3 +529,508 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LoadTest is the Schema for the LoadTests API. Manages a Load + Test. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LoadTestSpec defines the desired state of LoadTest + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Description of the resource. Changing this forces + a new Load Test to be created. + type: string + identity: + description: An identity block as defined below. Specifies the + Managed Identity which should be assigned to this Load Test. + properties: + identityIds: + description: A list of the User Assigned Identity IDs that + should be assigned to this Load Test. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Identity that should + be assigned to this Load Test. Possible values are SystemAssigned, + SystemAssigned, UserAssigned and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Load Test should exist. + Changing this forces a new Load Test to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group within which + this Load Test should exist. Changing this forces a new Load + Test to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Load Test. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Description of the resource. Changing this forces + a new Load Test to be created. + type: string + identity: + description: An identity block as defined below. Specifies the + Managed Identity which should be assigned to this Load Test. + properties: + identityIds: + description: A list of the User Assigned Identity IDs that + should be assigned to this Load Test. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Identity that should + be assigned to this Load Test. Possible values are SystemAssigned, + SystemAssigned, UserAssigned and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Load Test should exist. + Changing this forces a new Load Test to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Load Test. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: LoadTestStatus defines the observed state of LoadTest. + properties: + atProvider: + properties: + dataPlaneUri: + description: Resource data plane URI. + type: string + description: + description: Description of the resource. Changing this forces + a new Load Test to be created. + type: string + id: + description: The ID of the Load Test. + type: string + identity: + description: An identity block as defined below. Specifies the + Managed Identity which should be assigned to this Load Test. + properties: + identityIds: + description: A list of the User Assigned Identity IDs that + should be assigned to this Load Test. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the System-Assigned Managed + Identity assigned to this Load Test. + type: string + tenantId: + description: The Tenant ID for the System-Assigned Managed + Identity assigned to this Load Test. + type: string + type: + description: Specifies the type of Managed Identity that should + be assigned to this Load Test. Possible values are SystemAssigned, + SystemAssigned, UserAssigned and UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Load Test should exist. + Changing this forces a new Load Test to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group within which + this Load Test should exist. Changing this forces a new Load + Test to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Load Test. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/logic.azure.upbound.io_appintegrationaccountbatchconfigurations.yaml b/package/crds/logic.azure.upbound.io_appintegrationaccountbatchconfigurations.yaml index 666847523..9fe1f7411 100644 --- a/package/crds/logic.azure.upbound.io_appintegrationaccountbatchconfigurations.yaml +++ b/package/crds/logic.azure.upbound.io_appintegrationaccountbatchconfigurations.yaml @@ -1009,3 +1009,970 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AppIntegrationAccountBatchConfiguration is the Schema for the + AppIntegrationAccountBatchConfigurations API. Manages a Logic App Integration + Account Batch Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppIntegrationAccountBatchConfigurationSpec defines the desired + state of AppIntegrationAccountBatchConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + batchGroupName: + description: The batch group name of the Logic App Integration + Batch Configuration. Changing this forces a new resource to + be created. + type: string + integrationAccountName: + description: The name of the Logic App Integration Account. Changing + this forces a new resource to be created. + type: string + integrationAccountNameRef: + description: Reference to a AppIntegrationAccount in logic to + populate integrationAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + integrationAccountNameSelector: + description: Selector for a AppIntegrationAccount in logic to + populate integrationAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + metadata: + additionalProperties: + type: string + description: A JSON mapping of any Metadata for this Logic App + Integration Account Batch Configuration. + type: object + x-kubernetes-map-type: granular + name: + description: The name which should be used for this Logic App + Integration Account Batch Configuration. Only Alphanumeric characters + allowed. Changing this forces a new resource to be created. + type: string + releaseCriteria: + description: A release_criteria block as documented below, which + is used to select the criteria to meet before processing each + batch. + properties: + batchSize: + description: The batch size in bytes for the Logic App Integration + Batch Configuration. + type: number + messageCount: + description: The message count for the Logic App Integration + Batch Configuration. + type: number + recurrence: + description: A recurrence block as documented below. + properties: + endTime: + description: The end time of the schedule, formatted as + an RFC3339 string. + type: string + frequency: + description: The frequency of the schedule. Possible values + are Day, Hour, Minute, Month, NotSpecified, Second, + Week and Year. + type: string + interval: + description: The number of frequencys between runs. + type: number + schedule: + description: A schedule block as documented below. + properties: + hours: + description: A list containing a single item, which + specifies the Hour interval at which this recurrence + should be triggered. + items: + type: number + type: array + x-kubernetes-list-type: set + minutes: + description: A list containing a single item which + specifies the Minute interval at which this recurrence + should be triggered. + items: + type: number + type: array + x-kubernetes-list-type: set + monthDays: + description: A list of days of the month that the + job should execute on. + items: + type: number + type: array + x-kubernetes-list-type: set + monthly: + description: A monthly block as documented below. + items: + properties: + week: + description: The occurrence of the week within + the month. + type: number + weekday: + description: The day of the occurrence. Possible + values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday and Saturday. + type: string + type: object + type: array + weekDays: + description: A list of days of the week that the job + should execute on. Possible values are Sunday, Monday, + Tuesday, Wednesday, Thursday, Friday and Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + startTime: + description: The start time of the schedule, formatted + as an RFC3339 string. + type: string + timeZone: + description: The timezone of the start/end time. + type: string + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the Logic App + Integration Account Batch Configuration should exist. Changing + this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + batchGroupName: + description: The batch group name of the Logic App Integration + Batch Configuration. Changing this forces a new resource to + be created. + type: string + integrationAccountName: + description: The name of the Logic App Integration Account. Changing + this forces a new resource to be created. + type: string + integrationAccountNameRef: + description: Reference to a AppIntegrationAccount in logic to + populate integrationAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + integrationAccountNameSelector: + description: Selector for a AppIntegrationAccount in logic to + populate integrationAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + metadata: + additionalProperties: + type: string + description: A JSON mapping of any Metadata for this Logic App + Integration Account Batch Configuration. + type: object + x-kubernetes-map-type: granular + name: + description: The name which should be used for this Logic App + Integration Account Batch Configuration. Only Alphanumeric characters + allowed. Changing this forces a new resource to be created. + type: string + releaseCriteria: + description: A release_criteria block as documented below, which + is used to select the criteria to meet before processing each + batch. + properties: + batchSize: + description: The batch size in bytes for the Logic App Integration + Batch Configuration. + type: number + messageCount: + description: The message count for the Logic App Integration + Batch Configuration. + type: number + recurrence: + description: A recurrence block as documented below. + properties: + endTime: + description: The end time of the schedule, formatted as + an RFC3339 string. + type: string + frequency: + description: The frequency of the schedule. Possible values + are Day, Hour, Minute, Month, NotSpecified, Second, + Week and Year. + type: string + interval: + description: The number of frequencys between runs. + type: number + schedule: + description: A schedule block as documented below. + properties: + hours: + description: A list containing a single item, which + specifies the Hour interval at which this recurrence + should be triggered. + items: + type: number + type: array + x-kubernetes-list-type: set + minutes: + description: A list containing a single item which + specifies the Minute interval at which this recurrence + should be triggered. + items: + type: number + type: array + x-kubernetes-list-type: set + monthDays: + description: A list of days of the month that the + job should execute on. + items: + type: number + type: array + x-kubernetes-list-type: set + monthly: + description: A monthly block as documented below. + items: + properties: + week: + description: The occurrence of the week within + the month. + type: number + weekday: + description: The day of the occurrence. Possible + values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday and Saturday. + type: string + type: object + type: array + weekDays: + description: A list of days of the week that the job + should execute on. Possible values are Sunday, Monday, + Tuesday, Wednesday, Thursday, Friday and Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + startTime: + description: The start time of the schedule, formatted + as an RFC3339 string. + type: string + timeZone: + description: The timezone of the start/end time. + type: string + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the Logic App + Integration Account Batch Configuration should exist. Changing + this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.batchGroupName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.batchGroupName) + || (has(self.initProvider) && has(self.initProvider.batchGroupName))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.releaseCriteria is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.releaseCriteria) + || (has(self.initProvider) && has(self.initProvider.releaseCriteria))' + status: + description: AppIntegrationAccountBatchConfigurationStatus defines the + observed state of AppIntegrationAccountBatchConfiguration. + properties: + atProvider: + properties: + batchGroupName: + description: The batch group name of the Logic App Integration + Batch Configuration. Changing this forces a new resource to + be created. + type: string + id: + description: The ID of the Logic App Integration Account Batch + Configuration. + type: string + integrationAccountName: + description: The name of the Logic App Integration Account. Changing + this forces a new resource to be created. + type: string + metadata: + additionalProperties: + type: string + description: A JSON mapping of any Metadata for this Logic App + Integration Account Batch Configuration. + type: object + x-kubernetes-map-type: granular + name: + description: The name which should be used for this Logic App + Integration Account Batch Configuration. Only Alphanumeric characters + allowed. Changing this forces a new resource to be created. + type: string + releaseCriteria: + description: A release_criteria block as documented below, which + is used to select the criteria to meet before processing each + batch. + properties: + batchSize: + description: The batch size in bytes for the Logic App Integration + Batch Configuration. + type: number + messageCount: + description: The message count for the Logic App Integration + Batch Configuration. + type: number + recurrence: + description: A recurrence block as documented below. + properties: + endTime: + description: The end time of the schedule, formatted as + an RFC3339 string. + type: string + frequency: + description: The frequency of the schedule. Possible values + are Day, Hour, Minute, Month, NotSpecified, Second, + Week and Year. + type: string + interval: + description: The number of frequencys between runs. + type: number + schedule: + description: A schedule block as documented below. + properties: + hours: + description: A list containing a single item, which + specifies the Hour interval at which this recurrence + should be triggered. + items: + type: number + type: array + x-kubernetes-list-type: set + minutes: + description: A list containing a single item which + specifies the Minute interval at which this recurrence + should be triggered. + items: + type: number + type: array + x-kubernetes-list-type: set + monthDays: + description: A list of days of the month that the + job should execute on. + items: + type: number + type: array + x-kubernetes-list-type: set + monthly: + description: A monthly block as documented below. + items: + properties: + week: + description: The occurrence of the week within + the month. + type: number + weekday: + description: The day of the occurrence. Possible + values are Sunday, Monday, Tuesday, Wednesday, + Thursday, Friday and Saturday. + type: string + type: object + type: array + weekDays: + description: A list of days of the week that the job + should execute on. Possible values are Sunday, Monday, + Tuesday, Wednesday, Thursday, Friday and Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + startTime: + description: The start time of the schedule, formatted + as an RFC3339 string. + type: string + timeZone: + description: The timezone of the start/end time. + type: string + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the Logic App + Integration Account Batch Configuration should exist. Changing + this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/logic.azure.upbound.io_apptriggerrecurrences.yaml b/package/crds/logic.azure.upbound.io_apptriggerrecurrences.yaml index 4c8edf3d7..85b75030b 100644 --- a/package/crds/logic.azure.upbound.io_apptriggerrecurrences.yaml +++ b/package/crds/logic.azure.upbound.io_apptriggerrecurrences.yaml @@ -556,3 +556,535 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AppTriggerRecurrence is the Schema for the AppTriggerRecurrences + API. Manages a Recurrence Trigger within a Logic App Workflow + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppTriggerRecurrenceSpec defines the desired state of AppTriggerRecurrence + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + frequency: + description: Specifies the Frequency at which this Trigger should + be run. Possible values include Month, Week, Day, Hour, Minute + and Second. + type: string + interval: + description: Specifies interval used for the Frequency, for example + a value of 4 for interval and hour for frequency would run the + Trigger every 4 hours. + type: number + logicAppId: + description: Specifies the ID of the Logic App Workflow. Changing + this forces a new resource to be created. + type: string + logicAppIdRef: + description: Reference to a AppWorkflow in logic to populate logicAppId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logicAppIdSelector: + description: Selector for a AppWorkflow in logic to populate logicAppId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + schedule: + description: A schedule block as specified below. + properties: + atTheseHours: + description: Specifies a list of hours when the trigger should + run. Valid values are between 0 and 23. + items: + type: number + type: array + x-kubernetes-list-type: set + atTheseMinutes: + description: Specifies a list of minutes when the trigger + should run. Valid values are between 0 and 59. + items: + type: number + type: array + x-kubernetes-list-type: set + onTheseDays: + description: Specifies a list of days when the trigger should + run. Valid values include Monday, Tuesday, Wednesday, Thursday, + Friday, Saturday, and Sunday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + startTime: + description: 'Specifies the start date and time for this trigger + in RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + timeZone: + description: Specifies the time zone for this trigger. Supported + time zone options are listed here + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + frequency: + description: Specifies the Frequency at which this Trigger should + be run. Possible values include Month, Week, Day, Hour, Minute + and Second. + type: string + interval: + description: Specifies interval used for the Frequency, for example + a value of 4 for interval and hour for frequency would run the + Trigger every 4 hours. + type: number + schedule: + description: A schedule block as specified below. + properties: + atTheseHours: + description: Specifies a list of hours when the trigger should + run. Valid values are between 0 and 23. + items: + type: number + type: array + x-kubernetes-list-type: set + atTheseMinutes: + description: Specifies a list of minutes when the trigger + should run. Valid values are between 0 and 59. + items: + type: number + type: array + x-kubernetes-list-type: set + onTheseDays: + description: Specifies a list of days when the trigger should + run. Valid values include Monday, Tuesday, Wednesday, Thursday, + Friday, Saturday, and Sunday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + startTime: + description: 'Specifies the start date and time for this trigger + in RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + timeZone: + description: Specifies the time zone for this trigger. Supported + time zone options are listed here + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.frequency is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.frequency) + || (has(self.initProvider) && has(self.initProvider.frequency))' + - message: spec.forProvider.interval is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.interval) + || (has(self.initProvider) && has(self.initProvider.interval))' + status: + description: AppTriggerRecurrenceStatus defines the observed state of + AppTriggerRecurrence. + properties: + atProvider: + properties: + frequency: + description: Specifies the Frequency at which this Trigger should + be run. Possible values include Month, Week, Day, Hour, Minute + and Second. + type: string + id: + description: The ID of the Recurrence Trigger within the Logic + App Workflow. + type: string + interval: + description: Specifies interval used for the Frequency, for example + a value of 4 for interval and hour for frequency would run the + Trigger every 4 hours. + type: number + logicAppId: + description: Specifies the ID of the Logic App Workflow. Changing + this forces a new resource to be created. + type: string + schedule: + description: A schedule block as specified below. + properties: + atTheseHours: + description: Specifies a list of hours when the trigger should + run. Valid values are between 0 and 23. + items: + type: number + type: array + x-kubernetes-list-type: set + atTheseMinutes: + description: Specifies a list of minutes when the trigger + should run. Valid values are between 0 and 59. + items: + type: number + type: array + x-kubernetes-list-type: set + onTheseDays: + description: Specifies a list of days when the trigger should + run. Valid values include Monday, Tuesday, Wednesday, Thursday, + Friday, Saturday, and Sunday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + startTime: + description: 'Specifies the start date and time for this trigger + in RFC3339 format: 2000-01-02T03:04:05Z.' + type: string + timeZone: + description: Specifies the time zone for this trigger. Supported + time zone options are listed here + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/logic.azure.upbound.io_appworkflows.yaml b/package/crds/logic.azure.upbound.io_appworkflows.yaml index cee61f01a..eedf7bd3b 100644 --- a/package/crds/logic.azure.upbound.io_appworkflows.yaml +++ b/package/crds/logic.azure.upbound.io_appworkflows.yaml @@ -895,3 +895,831 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AppWorkflow is the Schema for the AppWorkflows API. Manages a + Logic App Workflow. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppWorkflowSpec defines the desired state of AppWorkflow + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessControl: + description: A access_control block as defined below. + properties: + action: + description: A action block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + content: + description: A content block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + trigger: + description: A trigger block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + openAuthenticationPolicy: + description: A open_authentication_policy block as defined + below. + items: + properties: + claim: + description: A claim block as defined below. + items: + properties: + name: + description: The OAuth policy name for the + Logic App Workflow. + type: string + value: + description: The value of the OAuth policy + claim for the Logic App Workflow. + type: string + type: object + type: array + name: + description: The OAuth policy name for the Logic + App Workflow. + type: string + type: object + type: array + type: object + workflowManagement: + description: A workflow_management block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + enabled: + description: Is the Logic App Workflow enabled? Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Logic App Workflow. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Logic App Workflow. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + integrationServiceEnvironmentId: + description: The ID of the Integration Service Environment to + which this Logic App Workflow belongs. Changing this forces + a new Logic App Workflow to be created. + type: string + location: + description: Specifies the supported Azure location where the + Logic App Workflow exists. Changing this forces a new resource + to be created. + type: string + logicAppIntegrationAccountId: + description: The ID of the integration account linked by this + Logic App Workflow. + type: string + parameters: + additionalProperties: + type: string + description: A map of Key-Value pairs. + type: object + x-kubernetes-map-type: granular + resourceGroupName: + description: The name of the Resource Group in which the Logic + App Workflow should be created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + workflowParameters: + additionalProperties: + type: string + description: 'Specifies a map of Key-Value pairs of the Parameter + Definitions to use for this Logic App Workflow. The key is the + parameter name, and the value is a JSON encoded string of the + parameter definition (see: https://docs.microsoft.com/azure/logic-apps/logic-apps-workflow-definition-language#parameters).' + type: object + x-kubernetes-map-type: granular + workflowSchema: + description: Specifies the Schema to use for this Logic App Workflow. + Defaults to https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#. + Changing this forces a new resource to be created. + type: string + workflowVersion: + description: Specifies the version of the Schema used for this + Logic App Workflow. Defaults to 1.0.0.0. Changing this forces + a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessControl: + description: A access_control block as defined below. + properties: + action: + description: A action block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + content: + description: A content block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + trigger: + description: A trigger block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + openAuthenticationPolicy: + description: A open_authentication_policy block as defined + below. + items: + properties: + claim: + description: A claim block as defined below. + items: + properties: + name: + description: The OAuth policy name for the + Logic App Workflow. + type: string + value: + description: The value of the OAuth policy + claim for the Logic App Workflow. + type: string + type: object + type: array + name: + description: The OAuth policy name for the Logic + App Workflow. + type: string + type: object + type: array + type: object + workflowManagement: + description: A workflow_management block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + enabled: + description: Is the Logic App Workflow enabled? Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Logic App Workflow. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Logic App Workflow. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + integrationServiceEnvironmentId: + description: The ID of the Integration Service Environment to + which this Logic App Workflow belongs. Changing this forces + a new Logic App Workflow to be created. + type: string + location: + description: Specifies the supported Azure location where the + Logic App Workflow exists. Changing this forces a new resource + to be created. + type: string + logicAppIntegrationAccountId: + description: The ID of the integration account linked by this + Logic App Workflow. + type: string + parameters: + additionalProperties: + type: string + description: A map of Key-Value pairs. + type: object + x-kubernetes-map-type: granular + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + workflowParameters: + additionalProperties: + type: string + description: 'Specifies a map of Key-Value pairs of the Parameter + Definitions to use for this Logic App Workflow. The key is the + parameter name, and the value is a JSON encoded string of the + parameter definition (see: https://docs.microsoft.com/azure/logic-apps/logic-apps-workflow-definition-language#parameters).' + type: object + x-kubernetes-map-type: granular + workflowSchema: + description: Specifies the Schema to use for this Logic App Workflow. + Defaults to https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#. + Changing this forces a new resource to be created. + type: string + workflowVersion: + description: Specifies the version of the Schema used for this + Logic App Workflow. Defaults to 1.0.0.0. Changing this forces + a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: AppWorkflowStatus defines the observed state of AppWorkflow. + properties: + atProvider: + properties: + accessControl: + description: A access_control block as defined below. + properties: + action: + description: A action block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + content: + description: A content block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + trigger: + description: A trigger block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + openAuthenticationPolicy: + description: A open_authentication_policy block as defined + below. + items: + properties: + claim: + description: A claim block as defined below. + items: + properties: + name: + description: The OAuth policy name for the + Logic App Workflow. + type: string + value: + description: The value of the OAuth policy + claim for the Logic App Workflow. + type: string + type: object + type: array + name: + description: The OAuth policy name for the Logic + App Workflow. + type: string + type: object + type: array + type: object + workflowManagement: + description: A workflow_management block as defined below. + properties: + allowedCallerIpAddressRange: + description: A list of the allowed caller IP address ranges. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + accessEndpoint: + description: The Access Endpoint for the Logic App Workflow. + type: string + connectorEndpointIpAddresses: + description: The list of access endpoint IP addresses of connector. + items: + type: string + type: array + connectorOutboundIpAddresses: + description: The list of outgoing IP addresses of connector. + items: + type: string + type: array + enabled: + description: Is the Logic App Workflow enabled? Defaults to true. + type: boolean + id: + description: The Logic App Workflow ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Logic App Workflow. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this Logic App Workflow. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this Logic App Workflow. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Logic App Workflow. Possible + values are SystemAssigned, UserAssigned. + type: string + type: object + integrationServiceEnvironmentId: + description: The ID of the Integration Service Environment to + which this Logic App Workflow belongs. Changing this forces + a new Logic App Workflow to be created. + type: string + location: + description: Specifies the supported Azure location where the + Logic App Workflow exists. Changing this forces a new resource + to be created. + type: string + logicAppIntegrationAccountId: + description: The ID of the integration account linked by this + Logic App Workflow. + type: string + parameters: + additionalProperties: + type: string + description: A map of Key-Value pairs. + type: object + x-kubernetes-map-type: granular + resourceGroupName: + description: The name of the Resource Group in which the Logic + App Workflow should be created. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + workflowEndpointIpAddresses: + description: The list of access endpoint IP addresses of workflow. + items: + type: string + type: array + workflowOutboundIpAddresses: + description: The list of outgoing IP addresses of workflow. + items: + type: string + type: array + workflowParameters: + additionalProperties: + type: string + description: 'Specifies a map of Key-Value pairs of the Parameter + Definitions to use for this Logic App Workflow. The key is the + parameter name, and the value is a JSON encoded string of the + parameter definition (see: https://docs.microsoft.com/azure/logic-apps/logic-apps-workflow-definition-language#parameters).' + type: object + x-kubernetes-map-type: granular + workflowSchema: + description: Specifies the Schema to use for this Logic App Workflow. + Defaults to https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#. + Changing this forces a new resource to be created. + type: string + workflowVersion: + description: Specifies the version of the Schema used for this + Logic App Workflow. Defaults to 1.0.0.0. Changing this forces + a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/logz.azure.upbound.io_monitors.yaml b/package/crds/logz.azure.upbound.io_monitors.yaml index 344a73d70..9f455de50 100644 --- a/package/crds/logz.azure.upbound.io_monitors.yaml +++ b/package/crds/logz.azure.upbound.io_monitors.yaml @@ -653,3 +653,626 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Monitor is the Schema for the Monitors API. Manages a logz Monitor. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitorSpec defines the desired state of Monitor + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + companyName: + description: Name of the Logz organization. Changing this forces + a new logz Monitor to be created. + type: string + enabled: + description: Whether the resource monitoring is enabled? Defaults + to true. + type: boolean + enterpriseAppId: + description: The ID of the Enterprise App. Changing this forces + a new logz Monitor to be created. + type: string + location: + description: The Azure Region where the logz Monitor should exist. + Changing this forces a new logz Monitor to be created. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + billingCycle: + description: Different billing cycles. Possible values are + MONTHLY or WEEKLY. Changing this forces a new logz Monitor + to be created. + type: string + effectiveDate: + description: Date when plan was applied. Changing this forces + a new logz Monitor to be created. + type: string + planId: + description: Plan id as published by Logz. The only possible + value is 100gb14days. Defaults to 100gb14days. Changing + this forces a new logz Monitor to be created. + type: string + usageType: + description: Different usage types. Possible values are PAYG + or COMMITTED. Changing this forces a new logz Monitor to + be created. + type: string + type: object + resourceGroupName: + description: The name of the Resource Group where the logz Monitor + should exist. Changing this forces a new logz Monitor to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + logz Monitor. + type: object + x-kubernetes-map-type: granular + user: + description: A user block as defined below. Changing this forces + a new resource to be created. + properties: + email: + description: Email of the user used by Logz for contacting + them if needed. Changing this forces a new logz Monitor + to be created. + type: string + firstName: + description: First Name of the user. Changing this forces + a new logz Monitor to be created. + type: string + lastName: + description: Last Name of the user. Changing this forces a + new logz Monitor to be created. + type: string + phoneNumber: + description: Phone number of the user used by Logz for contacting + them if needed. Changing this forces a new logz Monitor + to be created. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + companyName: + description: Name of the Logz organization. Changing this forces + a new logz Monitor to be created. + type: string + enabled: + description: Whether the resource monitoring is enabled? Defaults + to true. + type: boolean + enterpriseAppId: + description: The ID of the Enterprise App. Changing this forces + a new logz Monitor to be created. + type: string + location: + description: The Azure Region where the logz Monitor should exist. + Changing this forces a new logz Monitor to be created. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + billingCycle: + description: Different billing cycles. Possible values are + MONTHLY or WEEKLY. Changing this forces a new logz Monitor + to be created. + type: string + effectiveDate: + description: Date when plan was applied. Changing this forces + a new logz Monitor to be created. + type: string + planId: + description: Plan id as published by Logz. The only possible + value is 100gb14days. Defaults to 100gb14days. Changing + this forces a new logz Monitor to be created. + type: string + usageType: + description: Different usage types. Possible values are PAYG + or COMMITTED. Changing this forces a new logz Monitor to + be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + logz Monitor. + type: object + x-kubernetes-map-type: granular + user: + description: A user block as defined below. Changing this forces + a new resource to be created. + properties: + email: + description: Email of the user used by Logz for contacting + them if needed. Changing this forces a new logz Monitor + to be created. + type: string + firstName: + description: First Name of the user. Changing this forces + a new logz Monitor to be created. + type: string + lastName: + description: Last Name of the user. Changing this forces a + new logz Monitor to be created. + type: string + phoneNumber: + description: Phone number of the user used by Logz for contacting + them if needed. Changing this forces a new logz Monitor + to be created. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.plan is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.plan) + || (has(self.initProvider) && has(self.initProvider.plan))' + - message: spec.forProvider.user is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.user) + || (has(self.initProvider) && has(self.initProvider.user))' + status: + description: MonitorStatus defines the observed state of Monitor. + properties: + atProvider: + properties: + companyName: + description: Name of the Logz organization. Changing this forces + a new logz Monitor to be created. + type: string + enabled: + description: Whether the resource monitoring is enabled? Defaults + to true. + type: boolean + enterpriseAppId: + description: The ID of the Enterprise App. Changing this forces + a new logz Monitor to be created. + type: string + id: + description: The ID of the logz Monitor. + type: string + location: + description: The Azure Region where the logz Monitor should exist. + Changing this forces a new logz Monitor to be created. + type: string + logzOrganizationId: + description: The ID associated with the logz organization of this + logz Monitor. + type: string + plan: + description: A plan block as defined below. Changing this forces + a new resource to be created. + properties: + billingCycle: + description: Different billing cycles. Possible values are + MONTHLY or WEEKLY. Changing this forces a new logz Monitor + to be created. + type: string + effectiveDate: + description: Date when plan was applied. Changing this forces + a new logz Monitor to be created. + type: string + planId: + description: Plan id as published by Logz. The only possible + value is 100gb14days. Defaults to 100gb14days. Changing + this forces a new logz Monitor to be created. + type: string + usageType: + description: Different usage types. Possible values are PAYG + or COMMITTED. Changing this forces a new logz Monitor to + be created. + type: string + type: object + resourceGroupName: + description: The name of the Resource Group where the logz Monitor + should exist. Changing this forces a new logz Monitor to be + created. + type: string + singleSignOnUrl: + description: The single sign on url associated with the logz organization + of this logz Monitor. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + logz Monitor. + type: object + x-kubernetes-map-type: granular + user: + description: A user block as defined below. Changing this forces + a new resource to be created. + properties: + email: + description: Email of the user used by Logz for contacting + them if needed. Changing this forces a new logz Monitor + to be created. + type: string + firstName: + description: First Name of the user. Changing this forces + a new logz Monitor to be created. + type: string + lastName: + description: Last Name of the user. Changing this forces a + new logz Monitor to be created. + type: string + phoneNumber: + description: Phone number of the user used by Logz for contacting + them if needed. Changing this forces a new logz Monitor + to be created. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/logz.azure.upbound.io_subaccounts.yaml b/package/crds/logz.azure.upbound.io_subaccounts.yaml index aa98a9b64..a1562d290 100644 --- a/package/crds/logz.azure.upbound.io_subaccounts.yaml +++ b/package/crds/logz.azure.upbound.io_subaccounts.yaml @@ -538,3 +538,514 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SubAccount is the Schema for the SubAccounts API. Manages a logz + Sub Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubAccountSpec defines the desired state of SubAccount + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + enabled: + description: Whether the resource monitoring is enabled? Defaults + to true. + type: boolean + logzMonitorId: + description: The ID of the Logz Monitor. Changing this forces + a new logz Sub Account to be created. + type: string + logzMonitorIdRef: + description: Reference to a Monitor in logz to populate logzMonitorId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + logzMonitorIdSelector: + description: Selector for a Monitor in logz to populate logzMonitorId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + logz Sub Account. + type: object + x-kubernetes-map-type: granular + user: + description: A user block as defined below. Changing this forces + a new resource to be created. + properties: + email: + description: Email of the user used by Logz for contacting + them if needed. A valid email address consists of an email + prefix and an email domain. The prefix and domain may contain + only letters, numbers, underscores, periods and dashes. + Changing this forces a new logz Sub Account to be created. + type: string + firstName: + description: First Name of the user. Possible values must + be between 1 and 50 characters in length. Changing this + forces a new logz Sub Account to be created. + type: string + lastName: + description: Last Name of the user. Possible values must be + between 1 and 50 characters in length. Changing this forces + a new logz Sub Account to be created. + type: string + phoneNumber: + description: Phone number of the user used by Logz for contacting + them if needed. Possible values must be between 1 and 40 + characters in length. Changing this forces a new logz Sub + Account to be created. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + enabled: + description: Whether the resource monitoring is enabled? Defaults + to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + logz Sub Account. + type: object + x-kubernetes-map-type: granular + user: + description: A user block as defined below. Changing this forces + a new resource to be created. + properties: + email: + description: Email of the user used by Logz for contacting + them if needed. A valid email address consists of an email + prefix and an email domain. The prefix and domain may contain + only letters, numbers, underscores, periods and dashes. + Changing this forces a new logz Sub Account to be created. + type: string + firstName: + description: First Name of the user. Possible values must + be between 1 and 50 characters in length. Changing this + forces a new logz Sub Account to be created. + type: string + lastName: + description: Last Name of the user. Possible values must be + between 1 and 50 characters in length. Changing this forces + a new logz Sub Account to be created. + type: string + phoneNumber: + description: Phone number of the user used by Logz for contacting + them if needed. Possible values must be between 1 and 40 + characters in length. Changing this forces a new logz Sub + Account to be created. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.user is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.user) + || (has(self.initProvider) && has(self.initProvider.user))' + status: + description: SubAccountStatus defines the observed state of SubAccount. + properties: + atProvider: + properties: + enabled: + description: Whether the resource monitoring is enabled? Defaults + to true. + type: boolean + id: + description: The ID of the logz Sub Account. + type: string + logzMonitorId: + description: The ID of the Logz Monitor. Changing this forces + a new logz Sub Account to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + logz Sub Account. + type: object + x-kubernetes-map-type: granular + user: + description: A user block as defined below. Changing this forces + a new resource to be created. + properties: + email: + description: Email of the user used by Logz for contacting + them if needed. A valid email address consists of an email + prefix and an email domain. The prefix and domain may contain + only letters, numbers, underscores, periods and dashes. + Changing this forces a new logz Sub Account to be created. + type: string + firstName: + description: First Name of the user. Possible values must + be between 1 and 50 characters in length. Changing this + forces a new logz Sub Account to be created. + type: string + lastName: + description: Last Name of the user. Possible values must be + between 1 and 50 characters in length. Changing this forces + a new logz Sub Account to be created. + type: string + phoneNumber: + description: Phone number of the user used by Logz for contacting + them if needed. Possible values must be between 1 and 40 + characters in length. Changing this forces a new logz Sub + Account to be created. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/machinelearningservices.azure.upbound.io_computeclusters.yaml b/package/crds/machinelearningservices.azure.upbound.io_computeclusters.yaml index 58eb20ee8..d210e707c 100644 --- a/package/crds/machinelearningservices.azure.upbound.io_computeclusters.yaml +++ b/package/crds/machinelearningservices.azure.upbound.io_computeclusters.yaml @@ -1031,3 +1031,998 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ComputeCluster is the Schema for the ComputeClusters API. Manages + a Machine Learning Compute Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ComputeClusterSpec defines the desired state of ComputeCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the Machine Learning compute. + Changing this forces a new Machine Learning Compute Cluster + to be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Compute Cluster to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Compute Cluster. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Compute + Cluster. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Compute + Cluster should exist. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: string + machineLearningWorkspaceId: + description: The ID of the Machine Learning Workspace. Changing + this forces a new Machine Learning Compute Cluster to be created. + type: string + machineLearningWorkspaceIdRef: + description: Reference to a Workspace in machinelearningservices + to populate machineLearningWorkspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + machineLearningWorkspaceIdSelector: + description: Selector for a Workspace in machinelearningservices + to populate machineLearningWorkspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name which should be used for this Machine Learning + Compute Cluster. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: string + nodePublicIpEnabled: + description: Whether the compute cluster will have a public ip. + To set this to false a subnet_resource_id needs to be set. Defaults + to true. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + scaleSettings: + description: A scale_settings block as defined below. Changing + this forces a new Machine Learning Compute Cluster to be created. + properties: + maxNodeCount: + description: Maximum node count. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: number + minNodeCount: + description: Minimal node count. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: number + scaleDownNodesAfterIdleDuration: + description: 'Node Idle Time Before Scale Down: defines the + time until the compute is shutdown when it has gone into + Idle state. Is defined according to W3C XML schema standard + for duration. Changing this forces a new Machine Learning + Compute Cluster to be created.' + type: string + type: object + ssh: + description: Credentials for an administrator user account that + will be created on each compute node. A ssh block as defined + below. Changing this forces a new Machine Learning Compute Cluster + to be created. + properties: + adminPassword: + description: Password of the administrator user account. Changing + this forces a new Machine Learning Compute Cluster to be + created. + type: string + adminUsername: + description: Name of the administrator user account which + can be used to SSH to nodes. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: string + keyValue: + description: SSH public key of the administrator user account. + Changing this forces a new Machine Learning Compute Cluster + to be created. + type: string + type: object + sshPublicAccessEnabled: + description: A boolean value indicating whether enable the public + SSH port. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + subnetResourceId: + description: The ID of the Subnet that the Compute Cluster should + reside in. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: string + subnetResourceIdRef: + description: Reference to a Subnet in network to populate subnetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetResourceIdSelector: + description: Selector for a Subnet in network to populate subnetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Compute Cluster. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: object + x-kubernetes-map-type: granular + vmPriority: + description: The priority of the VM. Changing this forces a new + Machine Learning Compute Cluster to be created. Accepted values + are Dedicated and LowPriority. + type: string + vmSize: + description: The size of the VM. Changing this forces a new Machine + Learning Compute Cluster to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the Machine Learning compute. + Changing this forces a new Machine Learning Compute Cluster + to be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Compute Cluster to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Compute Cluster. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Compute + Cluster. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Compute + Cluster should exist. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: string + machineLearningWorkspaceId: + description: The ID of the Machine Learning Workspace. Changing + this forces a new Machine Learning Compute Cluster to be created. + type: string + machineLearningWorkspaceIdRef: + description: Reference to a Workspace in machinelearningservices + to populate machineLearningWorkspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + machineLearningWorkspaceIdSelector: + description: Selector for a Workspace in machinelearningservices + to populate machineLearningWorkspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name which should be used for this Machine Learning + Compute Cluster. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: string + nodePublicIpEnabled: + description: Whether the compute cluster will have a public ip. + To set this to false a subnet_resource_id needs to be set. Defaults + to true. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + scaleSettings: + description: A scale_settings block as defined below. Changing + this forces a new Machine Learning Compute Cluster to be created. + properties: + maxNodeCount: + description: Maximum node count. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: number + minNodeCount: + description: Minimal node count. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: number + scaleDownNodesAfterIdleDuration: + description: 'Node Idle Time Before Scale Down: defines the + time until the compute is shutdown when it has gone into + Idle state. Is defined according to W3C XML schema standard + for duration. Changing this forces a new Machine Learning + Compute Cluster to be created.' + type: string + type: object + ssh: + description: Credentials for an administrator user account that + will be created on each compute node. A ssh block as defined + below. Changing this forces a new Machine Learning Compute Cluster + to be created. + properties: + adminPassword: + description: Password of the administrator user account. Changing + this forces a new Machine Learning Compute Cluster to be + created. + type: string + adminUsername: + description: Name of the administrator user account which + can be used to SSH to nodes. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: string + keyValue: + description: SSH public key of the administrator user account. + Changing this forces a new Machine Learning Compute Cluster + to be created. + type: string + type: object + sshPublicAccessEnabled: + description: A boolean value indicating whether enable the public + SSH port. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + subnetResourceId: + description: The ID of the Subnet that the Compute Cluster should + reside in. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: string + subnetResourceIdRef: + description: Reference to a Subnet in network to populate subnetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetResourceIdSelector: + description: Selector for a Subnet in network to populate subnetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Compute Cluster. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: object + x-kubernetes-map-type: granular + vmPriority: + description: The priority of the VM. Changing this forces a new + Machine Learning Compute Cluster to be created. Accepted values + are Dedicated and LowPriority. + type: string + vmSize: + description: The size of the VM. Changing this forces a new Machine + Learning Compute Cluster to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.scaleSettings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scaleSettings) + || (has(self.initProvider) && has(self.initProvider.scaleSettings))' + - message: spec.forProvider.vmPriority is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vmPriority) + || (has(self.initProvider) && has(self.initProvider.vmPriority))' + - message: spec.forProvider.vmSize is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vmSize) + || (has(self.initProvider) && has(self.initProvider.vmSize))' + status: + description: ComputeClusterStatus defines the observed state of ComputeCluster. + properties: + atProvider: + properties: + description: + description: The description of the Machine Learning compute. + Changing this forces a new Machine Learning Compute Cluster + to be created. + type: string + id: + description: The ID of the Machine Learning Compute Cluster. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Compute Cluster to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Compute Cluster. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this Machine Learning + Compute Cluster. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this Machine Learning + Compute Cluster. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Compute + Cluster. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Compute + Cluster should exist. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: string + machineLearningWorkspaceId: + description: The ID of the Machine Learning Workspace. Changing + this forces a new Machine Learning Compute Cluster to be created. + type: string + name: + description: The name which should be used for this Machine Learning + Compute Cluster. Changing this forces a new Machine Learning + Compute Cluster to be created. + type: string + nodePublicIpEnabled: + description: Whether the compute cluster will have a public ip. + To set this to false a subnet_resource_id needs to be set. Defaults + to true. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + scaleSettings: + description: A scale_settings block as defined below. Changing + this forces a new Machine Learning Compute Cluster to be created. + properties: + maxNodeCount: + description: Maximum node count. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: number + minNodeCount: + description: Minimal node count. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: number + scaleDownNodesAfterIdleDuration: + description: 'Node Idle Time Before Scale Down: defines the + time until the compute is shutdown when it has gone into + Idle state. Is defined according to W3C XML schema standard + for duration. Changing this forces a new Machine Learning + Compute Cluster to be created.' + type: string + type: object + ssh: + description: Credentials for an administrator user account that + will be created on each compute node. A ssh block as defined + below. Changing this forces a new Machine Learning Compute Cluster + to be created. + properties: + adminPassword: + description: Password of the administrator user account. Changing + this forces a new Machine Learning Compute Cluster to be + created. + type: string + adminUsername: + description: Name of the administrator user account which + can be used to SSH to nodes. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: string + keyValue: + description: SSH public key of the administrator user account. + Changing this forces a new Machine Learning Compute Cluster + to be created. + type: string + type: object + sshPublicAccessEnabled: + description: A boolean value indicating whether enable the public + SSH port. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + subnetResourceId: + description: The ID of the Subnet that the Compute Cluster should + reside in. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Compute Cluster. Changing this forces a new + Machine Learning Compute Cluster to be created. + type: object + x-kubernetes-map-type: granular + vmPriority: + description: The priority of the VM. Changing this forces a new + Machine Learning Compute Cluster to be created. Accepted values + are Dedicated and LowPriority. + type: string + vmSize: + description: The size of the VM. Changing this forces a new Machine + Learning Compute Cluster to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/machinelearningservices.azure.upbound.io_computeinstances.yaml b/package/crds/machinelearningservices.azure.upbound.io_computeinstances.yaml index f4ac6bed8..414b8bb9f 100644 --- a/package/crds/machinelearningservices.azure.upbound.io_computeinstances.yaml +++ b/package/crds/machinelearningservices.azure.upbound.io_computeinstances.yaml @@ -859,3 +859,826 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ComputeInstance is the Schema for the ComputeInstances API. Manages + a Machine Learning Compute Instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ComputeInstanceSpec defines the desired state of ComputeInstance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + assignToUser: + description: A assign_to_user block as defined below. A user explicitly + assigned to a personal compute instance. Changing this forces + a new Machine Learning Compute Instance to be created. + properties: + objectId: + description: User’s AAD Object Id. + type: string + tenantId: + description: User’s AAD Tenant Id. + type: string + type: object + authorizationType: + description: 'The Compute Instance Authorization type. Possible + values include: personal. Changing this forces a new Machine + Learning Compute Instance to be created.' + type: string + description: + description: The description of the Machine Learning Compute Instance. + Changing this forces a new Machine Learning Compute Instance + to be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Compute Instance to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Compute Instance. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Compute + Instance. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Compute Instance to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Compute + Instance should exist. Changing this forces a new Machine Learning + Compute Instance to be created. + type: string + machineLearningWorkspaceId: + description: The ID of the Machine Learning Workspace. Changing + this forces a new Machine Learning Compute Instance to be created. + type: string + machineLearningWorkspaceIdRef: + description: Reference to a Workspace in machinelearningservices + to populate machineLearningWorkspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + machineLearningWorkspaceIdSelector: + description: Selector for a Workspace in machinelearningservices + to populate machineLearningWorkspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + nodePublicIpEnabled: + description: Whether the compute instance will have a public ip. + To set this to false a subnet_resource_id needs to be set. Defaults + to true. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + ssh: + description: A ssh block as defined below. Specifies policy and + settings for SSH access. Changing this forces a new Machine + Learning Compute Instance to be created. + properties: + publicKey: + description: Specifies the SSH rsa public key file as a string. + Use "ssh-keygen -t rsa -b 2048" to generate your SSH key + pairs. + type: string + type: object + subnetResourceId: + description: Virtual network subnet resource ID the compute nodes + belong to. Changing this forces a new Machine Learning Compute + Instance to be created. + type: string + subnetResourceIdRef: + description: Reference to a Subnet in network to populate subnetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetResourceIdSelector: + description: Selector for a Subnet in network to populate subnetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Compute Instance. Changing this forces a new + Machine Learning Compute Instance to be created. + type: object + x-kubernetes-map-type: granular + virtualMachineSize: + description: The Virtual Machine Size. Changing this forces a + new Machine Learning Compute Instance to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + assignToUser: + description: A assign_to_user block as defined below. A user explicitly + assigned to a personal compute instance. Changing this forces + a new Machine Learning Compute Instance to be created. + properties: + objectId: + description: User’s AAD Object Id. + type: string + tenantId: + description: User’s AAD Tenant Id. + type: string + type: object + authorizationType: + description: 'The Compute Instance Authorization type. Possible + values include: personal. Changing this forces a new Machine + Learning Compute Instance to be created.' + type: string + description: + description: The description of the Machine Learning Compute Instance. + Changing this forces a new Machine Learning Compute Instance + to be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Compute Instance to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Compute Instance. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Compute + Instance. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Compute Instance to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Compute + Instance should exist. Changing this forces a new Machine Learning + Compute Instance to be created. + type: string + nodePublicIpEnabled: + description: Whether the compute instance will have a public ip. + To set this to false a subnet_resource_id needs to be set. Defaults + to true. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + ssh: + description: A ssh block as defined below. Specifies policy and + settings for SSH access. Changing this forces a new Machine + Learning Compute Instance to be created. + properties: + publicKey: + description: Specifies the SSH rsa public key file as a string. + Use "ssh-keygen -t rsa -b 2048" to generate your SSH key + pairs. + type: string + type: object + subnetResourceId: + description: Virtual network subnet resource ID the compute nodes + belong to. Changing this forces a new Machine Learning Compute + Instance to be created. + type: string + subnetResourceIdRef: + description: Reference to a Subnet in network to populate subnetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetResourceIdSelector: + description: Selector for a Subnet in network to populate subnetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Compute Instance. Changing this forces a new + Machine Learning Compute Instance to be created. + type: object + x-kubernetes-map-type: granular + virtualMachineSize: + description: The Virtual Machine Size. Changing this forces a + new Machine Learning Compute Instance to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.virtualMachineSize is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.virtualMachineSize) + || (has(self.initProvider) && has(self.initProvider.virtualMachineSize))' + status: + description: ComputeInstanceStatus defines the observed state of ComputeInstance. + properties: + atProvider: + properties: + assignToUser: + description: A assign_to_user block as defined below. A user explicitly + assigned to a personal compute instance. Changing this forces + a new Machine Learning Compute Instance to be created. + properties: + objectId: + description: User’s AAD Object Id. + type: string + tenantId: + description: User’s AAD Tenant Id. + type: string + type: object + authorizationType: + description: 'The Compute Instance Authorization type. Possible + values include: personal. Changing this forces a new Machine + Learning Compute Instance to be created.' + type: string + description: + description: The description of the Machine Learning Compute Instance. + Changing this forces a new Machine Learning Compute Instance + to be created. + type: string + id: + description: The ID of the Machine Learning Compute Instance. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Compute Instance to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Compute Instance. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this Machine Learning + Compute Instance. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this Machine Learning + Compute Instance. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Compute + Instance. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Compute Instance to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Compute + Instance should exist. Changing this forces a new Machine Learning + Compute Instance to be created. + type: string + machineLearningWorkspaceId: + description: The ID of the Machine Learning Workspace. Changing + this forces a new Machine Learning Compute Instance to be created. + type: string + nodePublicIpEnabled: + description: Whether the compute instance will have a public ip. + To set this to false a subnet_resource_id needs to be set. Defaults + to true. Changing this forces a new Machine Learning Compute + Cluster to be created. + type: boolean + ssh: + description: A ssh block as defined below. Specifies policy and + settings for SSH access. Changing this forces a new Machine + Learning Compute Instance to be created. + properties: + port: + description: Describes the port for connecting through SSH. + type: number + publicKey: + description: Specifies the SSH rsa public key file as a string. + Use "ssh-keygen -t rsa -b 2048" to generate your SSH key + pairs. + type: string + username: + description: The admin username of this Machine Learning Compute + Instance. + type: string + type: object + subnetResourceId: + description: Virtual network subnet resource ID the compute nodes + belong to. Changing this forces a new Machine Learning Compute + Instance to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Compute Instance. Changing this forces a new + Machine Learning Compute Instance to be created. + type: object + x-kubernetes-map-type: granular + virtualMachineSize: + description: The Virtual Machine Size. Changing this forces a + new Machine Learning Compute Instance to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/machinelearningservices.azure.upbound.io_synapsesparks.yaml b/package/crds/machinelearningservices.azure.upbound.io_synapsesparks.yaml index 41572d67d..16eb6494c 100644 --- a/package/crds/machinelearningservices.azure.upbound.io_synapsesparks.yaml +++ b/package/crds/machinelearningservices.azure.upbound.io_synapsesparks.yaml @@ -720,3 +720,699 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SynapseSpark is the Schema for the SynapseSparks API. Manages + the linked service to link an Azure Machine learning workspace to an Azure + Synapse workspace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SynapseSparkSpec defines the desired state of SynapseSpark + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: The description of the Machine Learning Synapse Spark. + Changing this forces a new Machine Learning Synapse Spark to + be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Synapse Spark to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Synapse Spark. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Synapse + Spark. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Synapse Spark to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Synapse + Spark should exist. Changing this forces a new Machine Learning + Synapse Spark to be created. + type: string + machineLearningWorkspaceId: + description: The ID of the Machine Learning Workspace. Changing + this forces a new Machine Learning Synapse Spark to be created. + type: string + machineLearningWorkspaceIdRef: + description: Reference to a Workspace in machinelearningservices + to populate machineLearningWorkspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + machineLearningWorkspaceIdSelector: + description: Selector for a Workspace in machinelearningservices + to populate machineLearningWorkspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + synapseSparkPoolId: + description: The ID of the linked Synapse Spark Pool. Changing + this forces a new Machine Learning Synapse Spark to be created. + type: string + synapseSparkPoolIdRef: + description: Reference to a SparkPool in synapse to populate synapseSparkPoolId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + synapseSparkPoolIdSelector: + description: Selector for a SparkPool in synapse to populate synapseSparkPoolId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Synapse Spark. Changing this forces a new Machine + Learning Synapse Spark to be created. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: The description of the Machine Learning Synapse Spark. + Changing this forces a new Machine Learning Synapse Spark to + be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Synapse Spark to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Synapse Spark. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Synapse + Spark. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Synapse Spark to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Synapse + Spark should exist. Changing this forces a new Machine Learning + Synapse Spark to be created. + type: string + synapseSparkPoolId: + description: The ID of the linked Synapse Spark Pool. Changing + this forces a new Machine Learning Synapse Spark to be created. + type: string + synapseSparkPoolIdRef: + description: Reference to a SparkPool in synapse to populate synapseSparkPoolId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + synapseSparkPoolIdSelector: + description: Selector for a SparkPool in synapse to populate synapseSparkPoolId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Synapse Spark. Changing this forces a new Machine + Learning Synapse Spark to be created. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: SynapseSparkStatus defines the observed state of SynapseSpark. + properties: + atProvider: + properties: + description: + description: The description of the Machine Learning Synapse Spark. + Changing this forces a new Machine Learning Synapse Spark to + be created. + type: string + id: + description: The ID of the Machine Learning Synapse Spark. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Machine Learning Synapse Spark to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Synapse Spark. + Changing this forces a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this Machine Learning + Synapse Spark. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this Machine Learning + Synapse Spark. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Synapse + Spark. Possible values are SystemAssigned, UserAssigned, + SystemAssigned, UserAssigned (to enable both). Changing + this forces a new resource to be created. + type: string + type: object + localAuthEnabled: + description: Whether local authentication methods is enabled. + Defaults to true. Changing this forces a new Machine Learning + Synapse Spark to be created. + type: boolean + location: + description: The Azure Region where the Machine Learning Synapse + Spark should exist. Changing this forces a new Machine Learning + Synapse Spark to be created. + type: string + machineLearningWorkspaceId: + description: The ID of the Machine Learning Workspace. Changing + this forces a new Machine Learning Synapse Spark to be created. + type: string + synapseSparkPoolId: + description: The ID of the linked Synapse Spark Pool. Changing + this forces a new Machine Learning Synapse Spark to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Machine Learning Synapse Spark. Changing this forces a new Machine + Learning Synapse Spark to be created. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/machinelearningservices.azure.upbound.io_workspaces.yaml b/package/crds/machinelearningservices.azure.upbound.io_workspaces.yaml index e1e3729f8..9a3b9ce23 100644 --- a/package/crds/machinelearningservices.azure.upbound.io_workspaces.yaml +++ b/package/crds/machinelearningservices.azure.upbound.io_workspaces.yaml @@ -1899,3 +1899,1850 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workspace is the Schema for the Workspaces API. Manages a Azure + Machine Learning Workspace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkspaceSpec defines the desired state of Workspace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + applicationInsightsId: + description: The ID of the Application Insights associated with + this Machine Learning Workspace. Changing this forces a new + resource to be created. + type: string + applicationInsightsIdRef: + description: Reference to a ApplicationInsights in insights to + populate applicationInsightsId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + applicationInsightsIdSelector: + description: Selector for a ApplicationInsights in insights to + populate applicationInsightsId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + containerRegistryId: + description: The ID of the container registry associated with + this Machine Learning Workspace. Changing this forces a new + resource to be created. + type: string + description: + description: The description of this Machine Learning Workspace. + type: string + encryption: + description: An encryption block as defined below. Changing this + forces a new resource to be created. + properties: + keyId: + description: The Key Vault URI to access the encryption key. + type: string + keyIdRef: + description: Reference to a Key in keyvault to populate keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a Key in keyvault to populate keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + keyVaultId: + description: The ID of the keyVault where the customer owned + encryption key is present. + type: string + keyVaultIdRef: + description: Reference to a Vault in keyvault to populate + keyVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultIdSelector: + description: Selector for a Vault in keyvault to populate + keyVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userAssignedIdentityId: + description: The Key Vault URI to access the encryption key. + type: string + userAssignedIdentityIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate userAssignedIdentityId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userAssignedIdentityIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate userAssignedIdentityId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + featureStore: + description: A feature_store block as defined below. + properties: + computerSparkRuntimeVersion: + description: The version of Spark runtime. + type: string + offlineConnectionName: + description: The name of offline store connection. + type: string + onlineConnectionName: + description: The name of online store connection. + type: string + type: object + friendlyName: + description: Display name for this Machine Learning Workspace. + type: string + highBusinessImpact: + description: Flag to signal High Business Impact (HBI) data in + the workspace and reduce diagnostic data collected by the service. + Changing this forces a new resource to be created. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Workspace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Workspace. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + imageBuildComputeName: + description: The compute name for image build of the Machine Learning + Workspace. + type: string + keyVaultId: + description: The ID of key vault associated with this Machine + Learning Workspace. Changing this forces a new resource to be + created. + type: string + keyVaultIdRef: + description: Reference to a Vault in keyvault to populate keyVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultIdSelector: + description: Selector for a Vault in keyvault to populate keyVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kind: + description: The type of the Workspace. Possible values are Default, + FeatureStore. Defaults to Default + type: string + location: + description: Specifies the supported Azure location where the + Machine Learning Workspace should exist. Changing this forces + a new resource to be created. + type: string + managedNetwork: + description: A managed_network block as defined below. + properties: + isolationMode: + description: The isolation mode of the Machine Learning Workspace. + Possible values are Disabled, AllowOnlyApprovedOutbound, + and AllowInternetOutbound + type: string + type: object + primaryUserAssignedIdentity: + description: The user assigned identity id that represents the + workspace identity. + type: string + primaryUserAssignedIdentityRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate primaryUserAssignedIdentity. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + primaryUserAssignedIdentitySelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate primaryUserAssignedIdentity. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicAccessBehindVirtualNetworkEnabled: + description: Enable public access when this Machine Learning Workspace + is behind a VNet. Changing this forces a new resource to be + created. + type: boolean + publicNetworkAccessEnabled: + description: Enable public access when this Machine Learning Workspace + is behind VNet. + type: boolean + resourceGroupName: + description: Specifies the name of the Resource Group in which + the Machine Learning Workspace should exist. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: SKU/edition of the Machine Learning Workspace, possible + values are Free, Basic, Standard and Premium. Defaults to Basic. + type: string + storageAccountId: + description: The ID of the Storage Account associated with this + Machine Learning Workspace. Changing this forces a new resource + to be created. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + v1LegacyModeEnabled: + description: Enable V1 API features, enabling v1_legacy_mode may + prevent you from using features provided by the v2 API. Defaults + to false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + applicationInsightsId: + description: The ID of the Application Insights associated with + this Machine Learning Workspace. Changing this forces a new + resource to be created. + type: string + applicationInsightsIdRef: + description: Reference to a ApplicationInsights in insights to + populate applicationInsightsId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + applicationInsightsIdSelector: + description: Selector for a ApplicationInsights in insights to + populate applicationInsightsId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + containerRegistryId: + description: The ID of the container registry associated with + this Machine Learning Workspace. Changing this forces a new + resource to be created. + type: string + description: + description: The description of this Machine Learning Workspace. + type: string + encryption: + description: An encryption block as defined below. Changing this + forces a new resource to be created. + properties: + keyId: + description: The Key Vault URI to access the encryption key. + type: string + keyIdRef: + description: Reference to a Key in keyvault to populate keyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyIdSelector: + description: Selector for a Key in keyvault to populate keyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + keyVaultId: + description: The ID of the keyVault where the customer owned + encryption key is present. + type: string + keyVaultIdRef: + description: Reference to a Vault in keyvault to populate + keyVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultIdSelector: + description: Selector for a Vault in keyvault to populate + keyVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userAssignedIdentityId: + description: The Key Vault URI to access the encryption key. + type: string + userAssignedIdentityIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate userAssignedIdentityId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + userAssignedIdentityIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate userAssignedIdentityId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + featureStore: + description: A feature_store block as defined below. + properties: + computerSparkRuntimeVersion: + description: The version of Spark runtime. + type: string + offlineConnectionName: + description: The name of offline store connection. + type: string + onlineConnectionName: + description: The name of online store connection. + type: string + type: object + friendlyName: + description: Display name for this Machine Learning Workspace. + type: string + highBusinessImpact: + description: Flag to signal High Business Impact (HBI) data in + the workspace and reduce diagnostic data collected by the service. + Changing this forces a new resource to be created. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Workspace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Workspace. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + imageBuildComputeName: + description: The compute name for image build of the Machine Learning + Workspace. + type: string + keyVaultId: + description: The ID of key vault associated with this Machine + Learning Workspace. Changing this forces a new resource to be + created. + type: string + keyVaultIdRef: + description: Reference to a Vault in keyvault to populate keyVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVaultIdSelector: + description: Selector for a Vault in keyvault to populate keyVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + kind: + description: The type of the Workspace. Possible values are Default, + FeatureStore. Defaults to Default + type: string + location: + description: Specifies the supported Azure location where the + Machine Learning Workspace should exist. Changing this forces + a new resource to be created. + type: string + managedNetwork: + description: A managed_network block as defined below. + properties: + isolationMode: + description: The isolation mode of the Machine Learning Workspace. + Possible values are Disabled, AllowOnlyApprovedOutbound, + and AllowInternetOutbound + type: string + type: object + primaryUserAssignedIdentity: + description: The user assigned identity id that represents the + workspace identity. + type: string + primaryUserAssignedIdentityRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate primaryUserAssignedIdentity. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + primaryUserAssignedIdentitySelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate primaryUserAssignedIdentity. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicAccessBehindVirtualNetworkEnabled: + description: Enable public access when this Machine Learning Workspace + is behind a VNet. Changing this forces a new resource to be + created. + type: boolean + publicNetworkAccessEnabled: + description: Enable public access when this Machine Learning Workspace + is behind VNet. + type: boolean + skuName: + description: SKU/edition of the Machine Learning Workspace, possible + values are Free, Basic, Standard and Premium. Defaults to Basic. + type: string + storageAccountId: + description: The ID of the Storage Account associated with this + Machine Learning Workspace. Changing this forces a new resource + to be created. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + v1LegacyModeEnabled: + description: Enable V1 API features, enabling v1_legacy_mode may + prevent you from using features provided by the v2 API. Defaults + to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.identity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.identity) + || (has(self.initProvider) && has(self.initProvider.identity))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: WorkspaceStatus defines the observed state of Workspace. + properties: + atProvider: + properties: + applicationInsightsId: + description: The ID of the Application Insights associated with + this Machine Learning Workspace. Changing this forces a new + resource to be created. + type: string + containerRegistryId: + description: The ID of the container registry associated with + this Machine Learning Workspace. Changing this forces a new + resource to be created. + type: string + description: + description: The description of this Machine Learning Workspace. + type: string + discoveryUrl: + description: The url for the discovery service to identify regional + endpoints for machine learning experimentation services. + type: string + encryption: + description: An encryption block as defined below. Changing this + forces a new resource to be created. + properties: + keyId: + description: The Key Vault URI to access the encryption key. + type: string + keyVaultId: + description: The ID of the keyVault where the customer owned + encryption key is present. + type: string + userAssignedIdentityId: + description: The Key Vault URI to access the encryption key. + type: string + type: object + featureStore: + description: A feature_store block as defined below. + properties: + computerSparkRuntimeVersion: + description: The version of Spark runtime. + type: string + offlineConnectionName: + description: The name of offline store connection. + type: string + onlineConnectionName: + description: The name of online store connection. + type: string + type: object + friendlyName: + description: Display name for this Machine Learning Workspace. + type: string + highBusinessImpact: + description: Flag to signal High Business Impact (HBI) data in + the workspace and reduce diagnostic data collected by the service. + Changing this forces a new resource to be created. + type: boolean + id: + description: The ID of the Machine Learning Workspace. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Machine Learning Workspace. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Machine Learning Workspace. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + imageBuildComputeName: + description: The compute name for image build of the Machine Learning + Workspace. + type: string + keyVaultId: + description: The ID of key vault associated with this Machine + Learning Workspace. Changing this forces a new resource to be + created. + type: string + kind: + description: The type of the Workspace. Possible values are Default, + FeatureStore. Defaults to Default + type: string + location: + description: Specifies the supported Azure location where the + Machine Learning Workspace should exist. Changing this forces + a new resource to be created. + type: string + managedNetwork: + description: A managed_network block as defined below. + properties: + isolationMode: + description: The isolation mode of the Machine Learning Workspace. + Possible values are Disabled, AllowOnlyApprovedOutbound, + and AllowInternetOutbound + type: string + type: object + primaryUserAssignedIdentity: + description: The user assigned identity id that represents the + workspace identity. + type: string + publicAccessBehindVirtualNetworkEnabled: + description: Enable public access when this Machine Learning Workspace + is behind a VNet. Changing this forces a new resource to be + created. + type: boolean + publicNetworkAccessEnabled: + description: Enable public access when this Machine Learning Workspace + is behind VNet. + type: boolean + resourceGroupName: + description: Specifies the name of the Resource Group in which + the Machine Learning Workspace should exist. Changing this forces + a new resource to be created. + type: string + skuName: + description: SKU/edition of the Machine Learning Workspace, possible + values are Free, Basic, Standard and Premium. Defaults to Basic. + type: string + storageAccountId: + description: The ID of the Storage Account associated with this + Machine Learning Workspace. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + v1LegacyModeEnabled: + description: Enable V1 API features, enabling v1_legacy_mode may + prevent you from using features provided by the v2 API. Defaults + to false. + type: boolean + workspaceId: + description: The immutable id associated with this workspace. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/maintenance.azure.upbound.io_maintenanceconfigurations.yaml b/package/crds/maintenance.azure.upbound.io_maintenanceconfigurations.yaml index 51ff9cc38..71a6521f1 100644 --- a/package/crds/maintenance.azure.upbound.io_maintenanceconfigurations.yaml +++ b/package/crds/maintenance.azure.upbound.io_maintenanceconfigurations.yaml @@ -797,3 +797,767 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MaintenanceConfiguration is the Schema for the MaintenanceConfigurations + API. Manages a Maintenance Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MaintenanceConfigurationSpec defines the desired state of + MaintenanceConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + inGuestUserPatchMode: + description: The in guest user patch mode. Possible values are + Platform or User. Must be specified when scope is InGuestPatch. + type: string + installPatches: + description: An install_patches block as defined below. + properties: + linux: + description: A linux block as defined above. This property + only applies when scope is set to InGuestPatch + items: + properties: + classificationsToInclude: + description: List of Classification category of patches + to be patched. Possible values are Critical, Security, + UpdateRollup, FeaturePack, ServicePack, Definition, + Tools and Updates. + items: + type: string + type: array + packageNamesMaskToExclude: + description: List of package names to be excluded from + patching. + items: + type: string + type: array + packageNamesMaskToInclude: + description: List of package names to be included for + patching. + items: + type: string + type: array + type: object + type: array + reboot: + description: Possible reboot preference as defined by the + user based on which it would be decided to reboot the machine + or not after the patch operation is completed. Possible + values are Always, IfRequired and Never. This property only + applies when scope is set to InGuestPatch. + type: string + windows: + description: A windows block as defined above. This property + only applies when scope is set to InGuestPatch + items: + properties: + classificationsToInclude: + description: List of Classification category of patches + to be patched. Possible values are Critical, Security, + UpdateRollup, FeaturePack, ServicePack, Definition, + Tools and Updates. + items: + type: string + type: array + kbNumbersToExclude: + description: List of KB numbers to be excluded from + patching. + items: + type: string + type: array + kbNumbersToInclude: + description: List of KB numbers to be included for patching. + items: + type: string + type: array + type: object + type: array + type: object + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + properties: + additionalProperties: + type: string + description: A mapping of properties to assign to the resource. + type: object + x-kubernetes-map-type: granular + resourceGroupName: + description: The name of the Resource Group where the Maintenance + Configuration should exist. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scope: + description: The scope of the Maintenance Configuration. Possible + values are Extension, Host, InGuestPatch, OSImage, SQLDB or + SQLManagedInstance. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. The + key could not contain upper case letter. + type: object + x-kubernetes-map-type: granular + visibility: + description: The visibility of the Maintenance Configuration. + The only allowable value is Custom. Defaults to Custom. + type: string + window: + description: A window block as defined below. + properties: + duration: + description: The duration of the maintenance window in HH:mm + format. + type: string + expirationDateTime: + description: Effective expiration date of the maintenance + window in YYYY-MM-DD hh:mm format. + type: string + recurEvery: + description: The rate at which a maintenance window is expected + to recur. The rate can be expressed as daily, weekly, or + monthly schedules. + type: string + startDateTime: + description: Effective start date of the maintenance window + in YYYY-MM-DD hh:mm format. + type: string + timeZone: + description: The time zone for the maintenance window. A list + of timezones can be obtained by executing [System.TimeZoneInfo]::GetSystemTimeZones() + in PowerShell. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + inGuestUserPatchMode: + description: The in guest user patch mode. Possible values are + Platform or User. Must be specified when scope is InGuestPatch. + type: string + installPatches: + description: An install_patches block as defined below. + properties: + linux: + description: A linux block as defined above. This property + only applies when scope is set to InGuestPatch + items: + properties: + classificationsToInclude: + description: List of Classification category of patches + to be patched. Possible values are Critical, Security, + UpdateRollup, FeaturePack, ServicePack, Definition, + Tools and Updates. + items: + type: string + type: array + packageNamesMaskToExclude: + description: List of package names to be excluded from + patching. + items: + type: string + type: array + packageNamesMaskToInclude: + description: List of package names to be included for + patching. + items: + type: string + type: array + type: object + type: array + reboot: + description: Possible reboot preference as defined by the + user based on which it would be decided to reboot the machine + or not after the patch operation is completed. Possible + values are Always, IfRequired and Never. This property only + applies when scope is set to InGuestPatch. + type: string + windows: + description: A windows block as defined above. This property + only applies when scope is set to InGuestPatch + items: + properties: + classificationsToInclude: + description: List of Classification category of patches + to be patched. Possible values are Critical, Security, + UpdateRollup, FeaturePack, ServicePack, Definition, + Tools and Updates. + items: + type: string + type: array + kbNumbersToExclude: + description: List of KB numbers to be excluded from + patching. + items: + type: string + type: array + kbNumbersToInclude: + description: List of KB numbers to be included for patching. + items: + type: string + type: array + type: object + type: array + type: object + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + properties: + additionalProperties: + type: string + description: A mapping of properties to assign to the resource. + type: object + x-kubernetes-map-type: granular + scope: + description: The scope of the Maintenance Configuration. Possible + values are Extension, Host, InGuestPatch, OSImage, SQLDB or + SQLManagedInstance. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. The + key could not contain upper case letter. + type: object + x-kubernetes-map-type: granular + visibility: + description: The visibility of the Maintenance Configuration. + The only allowable value is Custom. Defaults to Custom. + type: string + window: + description: A window block as defined below. + properties: + duration: + description: The duration of the maintenance window in HH:mm + format. + type: string + expirationDateTime: + description: Effective expiration date of the maintenance + window in YYYY-MM-DD hh:mm format. + type: string + recurEvery: + description: The rate at which a maintenance window is expected + to recur. The rate can be expressed as daily, weekly, or + monthly schedules. + type: string + startDateTime: + description: Effective start date of the maintenance window + in YYYY-MM-DD hh:mm format. + type: string + timeZone: + description: The time zone for the maintenance window. A list + of timezones can be obtained by executing [System.TimeZoneInfo]::GetSystemTimeZones() + in PowerShell. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.scope is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scope) + || (has(self.initProvider) && has(self.initProvider.scope))' + status: + description: MaintenanceConfigurationStatus defines the observed state + of MaintenanceConfiguration. + properties: + atProvider: + properties: + id: + description: The ID of the Maintenance Configuration. + type: string + inGuestUserPatchMode: + description: The in guest user patch mode. Possible values are + Platform or User. Must be specified when scope is InGuestPatch. + type: string + installPatches: + description: An install_patches block as defined below. + properties: + linux: + description: A linux block as defined above. This property + only applies when scope is set to InGuestPatch + items: + properties: + classificationsToInclude: + description: List of Classification category of patches + to be patched. Possible values are Critical, Security, + UpdateRollup, FeaturePack, ServicePack, Definition, + Tools and Updates. + items: + type: string + type: array + packageNamesMaskToExclude: + description: List of package names to be excluded from + patching. + items: + type: string + type: array + packageNamesMaskToInclude: + description: List of package names to be included for + patching. + items: + type: string + type: array + type: object + type: array + reboot: + description: Possible reboot preference as defined by the + user based on which it would be decided to reboot the machine + or not after the patch operation is completed. Possible + values are Always, IfRequired and Never. This property only + applies when scope is set to InGuestPatch. + type: string + windows: + description: A windows block as defined above. This property + only applies when scope is set to InGuestPatch + items: + properties: + classificationsToInclude: + description: List of Classification category of patches + to be patched. Possible values are Critical, Security, + UpdateRollup, FeaturePack, ServicePack, Definition, + Tools and Updates. + items: + type: string + type: array + kbNumbersToExclude: + description: List of KB numbers to be excluded from + patching. + items: + type: string + type: array + kbNumbersToInclude: + description: List of KB numbers to be included for patching. + items: + type: string + type: array + type: object + type: array + type: object + location: + description: Specified the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + properties: + additionalProperties: + type: string + description: A mapping of properties to assign to the resource. + type: object + x-kubernetes-map-type: granular + resourceGroupName: + description: The name of the Resource Group where the Maintenance + Configuration should exist. Changing this forces a new resource + to be created. + type: string + scope: + description: The scope of the Maintenance Configuration. Possible + values are Extension, Host, InGuestPatch, OSImage, SQLDB or + SQLManagedInstance. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. The + key could not contain upper case letter. + type: object + x-kubernetes-map-type: granular + visibility: + description: The visibility of the Maintenance Configuration. + The only allowable value is Custom. Defaults to Custom. + type: string + window: + description: A window block as defined below. + properties: + duration: + description: The duration of the maintenance window in HH:mm + format. + type: string + expirationDateTime: + description: Effective expiration date of the maintenance + window in YYYY-MM-DD hh:mm format. + type: string + recurEvery: + description: The rate at which a maintenance window is expected + to recur. The rate can be expressed as daily, weekly, or + monthly schedules. + type: string + startDateTime: + description: Effective start date of the maintenance window + in YYYY-MM-DD hh:mm format. + type: string + timeZone: + description: The time zone for the maintenance window. A list + of timezones can be obtained by executing [System.TimeZoneInfo]::GetSystemTimeZones() + in PowerShell. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_assetfilters.yaml b/package/crds/media.azure.upbound.io_assetfilters.yaml index 17adad8e5..3ca34259a 100644 --- a/package/crds/media.azure.upbound.io_assetfilters.yaml +++ b/package/crds/media.azure.upbound.io_assetfilters.yaml @@ -683,3 +683,659 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AssetFilter is the Schema for the AssetFilters API. Manages an + Azure Media Asset Filter. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AssetFilterSpec defines the desired state of AssetFilter + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + assetId: + description: The Asset ID for which the Asset Filter should be + created. Changing this forces a new Asset Filter to be created. + type: string + assetIdRef: + description: Reference to a Asset in media to populate assetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + assetIdSelector: + description: Selector for a Asset in media to populate assetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + firstQualityBitrate: + description: The first quality bitrate. Sets the first video track + to appear in the Live Streaming playlist to allow HLS native + players to start downloading from this quality level at the + beginning. + type: number + presentationTimeRange: + description: A presentation_time_range block as defined below. + properties: + endInUnits: + description: |- + The absolute end time boundary. Applies to Video on Demand (VoD). + For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + type: number + forceEnd: + description: 'Indicates whether the end_in_units property + must be present. If true, end_in_units must be specified + or a bad request code is returned. Applies to Live Streaming + only. Allowed values: false, true.' + type: boolean + liveBackoffInUnits: + description: |- + The relative to end right edge. Applies to Live Streaming only. + This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_miliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + type: number + presentationWindowInUnits: + description: The relative to end sliding window. Applies to + Live Streaming only. Use presentation_window_in_units to + apply a sliding window of fragments to include in a playlist. + The unit is defined by unit_timescale_in_miliseconds. For + example, set presentation_window_in_units to 120 to apply + a two-minute sliding window. Media within 2 minutes of the + live edge will be included in the playlist. If a fragment + straddles the boundary, the entire fragment will be included + in the playlist. The minimum presentation window duration + is 60 seconds. + type: number + startInUnits: + description: The absolute start time boundary. Applies to + Video on Demand (VoD) or Live Streaming. This is a long + value that represents an absolute start point of the stream. + The value gets rounded to the closest next GOP start. The + unit is defined by unit_timescale_in_miliseconds, so a start_in_units + of 15 would be for 15 seconds. Use start_in_units and end_in_units + to trim the fragments that will be in the playlist (manifest). + For example, start_in_units set to 20 and end_in_units set + to 60 using unit_timescale_in_miliseconds in 1000 will generate + a playlist that contains fragments from between 20 seconds + and 60 seconds of the VoD presentation. If a fragment straddles + the boundary, the entire fragment will be included in the + manifest. + type: number + unitTimescaleInMiliseconds: + description: Specified as the number of miliseconds in one + unit timescale. For example, if you want to set a start_in_units + at 30 seconds, you would use a value of 30 when using the + unit_timescale_in_miliseconds in 1000. Or if you want to + set start_in_units in 30 miliseconds, you would use a value + of 30 when using the unit_timescale_in_miliseconds in 1. + Applies timescale to start_in_units, start_timescale and + presentation_window_in_timescale and live_backoff_in_timescale. + type: number + type: object + trackSelection: + description: One or more track_selection blocks as defined below. + items: + properties: + condition: + description: One or more condition blocks as defined above. + items: + properties: + operation: + description: The condition operation to test a track + property against. Supported values are Equal and + NotEqual. + type: string + property: + description: The track property to compare. Supported + values are Bitrate, FourCC, Language, Name and Type. + Check documentation for more details. + type: string + value: + description: The track property value to match or + not match. + type: string + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + firstQualityBitrate: + description: The first quality bitrate. Sets the first video track + to appear in the Live Streaming playlist to allow HLS native + players to start downloading from this quality level at the + beginning. + type: number + presentationTimeRange: + description: A presentation_time_range block as defined below. + properties: + endInUnits: + description: |- + The absolute end time boundary. Applies to Video on Demand (VoD). + For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + type: number + forceEnd: + description: 'Indicates whether the end_in_units property + must be present. If true, end_in_units must be specified + or a bad request code is returned. Applies to Live Streaming + only. Allowed values: false, true.' + type: boolean + liveBackoffInUnits: + description: |- + The relative to end right edge. Applies to Live Streaming only. + This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_miliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + type: number + presentationWindowInUnits: + description: The relative to end sliding window. Applies to + Live Streaming only. Use presentation_window_in_units to + apply a sliding window of fragments to include in a playlist. + The unit is defined by unit_timescale_in_miliseconds. For + example, set presentation_window_in_units to 120 to apply + a two-minute sliding window. Media within 2 minutes of the + live edge will be included in the playlist. If a fragment + straddles the boundary, the entire fragment will be included + in the playlist. The minimum presentation window duration + is 60 seconds. + type: number + startInUnits: + description: The absolute start time boundary. Applies to + Video on Demand (VoD) or Live Streaming. This is a long + value that represents an absolute start point of the stream. + The value gets rounded to the closest next GOP start. The + unit is defined by unit_timescale_in_miliseconds, so a start_in_units + of 15 would be for 15 seconds. Use start_in_units and end_in_units + to trim the fragments that will be in the playlist (manifest). + For example, start_in_units set to 20 and end_in_units set + to 60 using unit_timescale_in_miliseconds in 1000 will generate + a playlist that contains fragments from between 20 seconds + and 60 seconds of the VoD presentation. If a fragment straddles + the boundary, the entire fragment will be included in the + manifest. + type: number + unitTimescaleInMiliseconds: + description: Specified as the number of miliseconds in one + unit timescale. For example, if you want to set a start_in_units + at 30 seconds, you would use a value of 30 when using the + unit_timescale_in_miliseconds in 1000. Or if you want to + set start_in_units in 30 miliseconds, you would use a value + of 30 when using the unit_timescale_in_miliseconds in 1. + Applies timescale to start_in_units, start_timescale and + presentation_window_in_timescale and live_backoff_in_timescale. + type: number + type: object + trackSelection: + description: One or more track_selection blocks as defined below. + items: + properties: + condition: + description: One or more condition blocks as defined above. + items: + properties: + operation: + description: The condition operation to test a track + property against. Supported values are Equal and + NotEqual. + type: string + property: + description: The track property to compare. Supported + values are Bitrate, FourCC, Language, Name and Type. + Check documentation for more details. + type: string + value: + description: The track property value to match or + not match. + type: string + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AssetFilterStatus defines the observed state of AssetFilter. + properties: + atProvider: + properties: + assetId: + description: The Asset ID for which the Asset Filter should be + created. Changing this forces a new Asset Filter to be created. + type: string + firstQualityBitrate: + description: The first quality bitrate. Sets the first video track + to appear in the Live Streaming playlist to allow HLS native + players to start downloading from this quality level at the + beginning. + type: number + id: + description: The ID of the Asset Filter. + type: string + presentationTimeRange: + description: A presentation_time_range block as defined below. + properties: + endInUnits: + description: |- + The absolute end time boundary. Applies to Video on Demand (VoD). + For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_miliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_miliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + type: number + forceEnd: + description: 'Indicates whether the end_in_units property + must be present. If true, end_in_units must be specified + or a bad request code is returned. Applies to Live Streaming + only. Allowed values: false, true.' + type: boolean + liveBackoffInUnits: + description: |- + The relative to end right edge. Applies to Live Streaming only. + This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_miliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + type: number + presentationWindowInUnits: + description: The relative to end sliding window. Applies to + Live Streaming only. Use presentation_window_in_units to + apply a sliding window of fragments to include in a playlist. + The unit is defined by unit_timescale_in_miliseconds. For + example, set presentation_window_in_units to 120 to apply + a two-minute sliding window. Media within 2 minutes of the + live edge will be included in the playlist. If a fragment + straddles the boundary, the entire fragment will be included + in the playlist. The minimum presentation window duration + is 60 seconds. + type: number + startInUnits: + description: The absolute start time boundary. Applies to + Video on Demand (VoD) or Live Streaming. This is a long + value that represents an absolute start point of the stream. + The value gets rounded to the closest next GOP start. The + unit is defined by unit_timescale_in_miliseconds, so a start_in_units + of 15 would be for 15 seconds. Use start_in_units and end_in_units + to trim the fragments that will be in the playlist (manifest). + For example, start_in_units set to 20 and end_in_units set + to 60 using unit_timescale_in_miliseconds in 1000 will generate + a playlist that contains fragments from between 20 seconds + and 60 seconds of the VoD presentation. If a fragment straddles + the boundary, the entire fragment will be included in the + manifest. + type: number + unitTimescaleInMiliseconds: + description: Specified as the number of miliseconds in one + unit timescale. For example, if you want to set a start_in_units + at 30 seconds, you would use a value of 30 when using the + unit_timescale_in_miliseconds in 1000. Or if you want to + set start_in_units in 30 miliseconds, you would use a value + of 30 when using the unit_timescale_in_miliseconds in 1. + Applies timescale to start_in_units, start_timescale and + presentation_window_in_timescale and live_backoff_in_timescale. + type: number + type: object + trackSelection: + description: One or more track_selection blocks as defined below. + items: + properties: + condition: + description: One or more condition blocks as defined above. + items: + properties: + operation: + description: The condition operation to test a track + property against. Supported values are Equal and + NotEqual. + type: string + property: + description: The track property to compare. Supported + values are Bitrate, FourCC, Language, Name and Type. + Check documentation for more details. + type: string + value: + description: The track property value to match or + not match. + type: string + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_contentkeypolicies.yaml b/package/crds/media.azure.upbound.io_contentkeypolicies.yaml index 5ff5c2e30..95a27994c 100644 --- a/package/crds/media.azure.upbound.io_contentkeypolicies.yaml +++ b/package/crds/media.azure.upbound.io_contentkeypolicies.yaml @@ -1468,3 +1468,1407 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ContentKeyPolicy is the Schema for the ContentKeyPolicys API. + Manages a Content Key Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ContentKeyPolicySpec defines the desired state of ContentKeyPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description for the Policy. + type: string + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Content Key Policy to be created. + type: string + mediaServicesAccountNameRef: + description: Reference to a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaServicesAccountNameSelector: + description: Selector for a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyOption: + description: One or more policy_option blocks as defined below. + items: + properties: + clearKeyConfigurationEnabled: + description: Enable a configuration for non-DRM keys. + type: boolean + fairplayConfiguration: + description: A fairplay_configuration block as defined above. + Check license requirements here https://docs.microsoft.com/azure/media-services/latest/fairplay-license-overview. + properties: + askSecretRef: + description: The key that must be used as FairPlay Application + Secret key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + offlineRentalConfiguration: + description: A offline_rental_configuration block as + defined below. + properties: + playbackDurationSeconds: + description: Playback duration. + type: number + storageDurationSeconds: + description: Storage duration. + type: number + type: object + pfxPasswordSecretRef: + description: The password encrypting FairPlay certificate + in PKCS 12 (pfx) format. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + pfxSecretRef: + description: The Base64 representation of FairPlay certificate + in PKCS 12 (pfx) format (including private key). + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + rentalAndLeaseKeyType: + description: The rental and lease key type. Supported + values are DualExpiry, PersistentLimited, PersistentUnlimited + or Undefined. + type: string + rentalDurationSeconds: + description: The rental duration. Must be greater than + 0. + type: number + type: object + name: + description: The name which should be used for this Policy + Option. + type: string + openRestrictionEnabled: + description: Enable an open restriction. License or key + will be delivered on every request. + type: boolean + playreadyConfigurationLicense: + description: One or more playready_configuration_license + blocks as defined above. + items: + properties: + allowTestDevices: + description: A flag indicating whether test devices + can use the license. + type: boolean + beginDate: + description: The begin date of license. + type: string + contentKeyLocationFromHeaderEnabled: + description: Specifies that the content key ID is + in the PlayReady header. + type: boolean + contentKeyLocationFromKeyId: + description: The content key ID. Specifies that the + content key ID is specified in the PlayReady configuration. + type: string + contentType: + description: The PlayReady content type. Supported + values are UltraVioletDownload, UltraVioletStreaming + or Unspecified. + type: string + expirationDate: + description: The expiration date of license. + type: string + gracePeriodSecretRef: + description: The grace period of license. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + licenseType: + description: The license type. Supported values are + NonPersistent or Persistent. + type: string + playRight: + description: A play_right block as defined above. + properties: + agcAndColorStripeRestriction: + description: Configures Automatic Gain Control + (AGC) and Color Stripe in the license. Must + be between 0 and 3 inclusive. + type: number + allowPassingVideoContentToUnknownOutput: + description: Configures Unknown output handling + settings of the license. Supported values are + Allowed, AllowedWithVideoConstriction or NotAllowed. + type: string + analogVideoOpl: + description: Specifies the output protection level + for compressed digital audio. Supported values + are 100, 150 or 200. + type: number + compressedDigitalAudioOpl: + description: Specifies the output protection level + for compressed digital audio.Supported values + are 100, 150, 200, 250 or 300. + type: number + compressedDigitalVideoOpl: + description: Specifies the output protection level + for compressed digital video. Supported values + are 400 or 500. + type: number + digitalVideoOnlyContentRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + explicitAnalogTelevisionOutputRestriction: + description: An explicit_analog_television_output_restriction + block as defined above. + properties: + bestEffortEnforced: + description: Indicates whether this restriction + is enforced on a best effort basis. Possible + values are true or false. Defaults to false. + type: boolean + controlBits: + description: The restriction control bits. + Possible value is integer between 0 and + 3 inclusive. + type: number + type: object + firstPlayExpiration: + description: The amount of time that the license + is valid after the license is first used to + play content. + type: string + imageConstraintForAnalogComponentVideoRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + imageConstraintForAnalogComputerMonitorRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + scmsRestriction: + description: Configures the Serial Copy Management + System (SCMS) in the license. Must be between + 0 and 3 inclusive. + type: number + uncompressedDigitalAudioOpl: + description: Specifies the output protection level + for uncompressed digital audio. Supported values + are 100, 150, 200, 250 or 300. + type: number + uncompressedDigitalVideoOpl: + description: Specifies the output protection level + for uncompressed digital video. Supported values + are 100, 250, 270 or 300. + type: number + type: object + relativeBeginDate: + description: The relative begin date of license. + type: string + relativeExpirationDate: + description: The relative expiration date of license. + type: string + securityLevel: + description: The security level of the PlayReady license. + Possible values are SL150, SL2000 and SL3000. Please + see this document for more information about security + level. See this document for more information about + SL3000 support. + type: string + type: object + type: array + playreadyResponseCustomData: + description: The custom response data of the PlayReady configuration. + This only applies when playready_configuration_license + is specified. + type: string + tokenRestriction: + description: A token_restriction block as defined below. + properties: + alternateKey: + description: One or more alternate_key block as defined + above. + items: + properties: + rsaTokenKeyExponentSecretRef: + description: The RSA parameter exponent. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + rsaTokenKeyModulusSecretRef: + description: The RSA parameter modulus. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + symmetricTokenKeySecretRef: + description: The key value of the key. Specifies + a symmetric key for token validation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + x509TokenKeyRawSecretRef: + description: The raw data field of a certificate + in PKCS 12 format (X509Certificate2 in .NET). + Specifies a certificate for token validation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + audience: + description: The audience for the token. + type: string + issuer: + description: The token issuer. + type: string + openIdConnectDiscoveryDocument: + description: The OpenID connect discovery document. + type: string + primaryRsaTokenKeyExponentSecretRef: + description: The RSA parameter exponent. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + primaryRsaTokenKeyModulusSecretRef: + description: The RSA parameter modulus. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + primarySymmetricTokenKeySecretRef: + description: The key value of the key. Specifies a symmetric + key for token validation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + primaryX509TokenKeyRawSecretRef: + description: The raw data field of a certificate in + PKCS 12 format (X509Certificate2 in .NET). Specifies + a certificate for token validation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + requiredClaim: + description: One or more required_claim blocks as defined + above. + items: + properties: + type: + description: Token claim type. + type: string + value: + description: Token claim value. + type: string + type: object + type: array + tokenType: + description: The type of token. Supported values are + Jwt or Swt. + type: string + type: object + widevineConfigurationTemplate: + description: The Widevine template. + type: string + type: object + type: array + resourceGroupName: + description: The name of the Resource Group where the Content + Key Policy should exist. Changing this forces a new Content + Key Policy to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description for the Policy. + type: string + policyOption: + description: One or more policy_option blocks as defined below. + items: + properties: + clearKeyConfigurationEnabled: + description: Enable a configuration for non-DRM keys. + type: boolean + fairplayConfiguration: + description: A fairplay_configuration block as defined above. + Check license requirements here https://docs.microsoft.com/azure/media-services/latest/fairplay-license-overview. + properties: + offlineRentalConfiguration: + description: A offline_rental_configuration block as + defined below. + properties: + playbackDurationSeconds: + description: Playback duration. + type: number + storageDurationSeconds: + description: Storage duration. + type: number + type: object + rentalAndLeaseKeyType: + description: The rental and lease key type. Supported + values are DualExpiry, PersistentLimited, PersistentUnlimited + or Undefined. + type: string + rentalDurationSeconds: + description: The rental duration. Must be greater than + 0. + type: number + type: object + name: + description: The name which should be used for this Policy + Option. + type: string + openRestrictionEnabled: + description: Enable an open restriction. License or key + will be delivered on every request. + type: boolean + playreadyConfigurationLicense: + description: One or more playready_configuration_license + blocks as defined above. + items: + properties: + allowTestDevices: + description: A flag indicating whether test devices + can use the license. + type: boolean + beginDate: + description: The begin date of license. + type: string + contentKeyLocationFromHeaderEnabled: + description: Specifies that the content key ID is + in the PlayReady header. + type: boolean + contentKeyLocationFromKeyId: + description: The content key ID. Specifies that the + content key ID is specified in the PlayReady configuration. + type: string + contentType: + description: The PlayReady content type. Supported + values are UltraVioletDownload, UltraVioletStreaming + or Unspecified. + type: string + expirationDate: + description: The expiration date of license. + type: string + licenseType: + description: The license type. Supported values are + NonPersistent or Persistent. + type: string + playRight: + description: A play_right block as defined above. + properties: + agcAndColorStripeRestriction: + description: Configures Automatic Gain Control + (AGC) and Color Stripe in the license. Must + be between 0 and 3 inclusive. + type: number + allowPassingVideoContentToUnknownOutput: + description: Configures Unknown output handling + settings of the license. Supported values are + Allowed, AllowedWithVideoConstriction or NotAllowed. + type: string + analogVideoOpl: + description: Specifies the output protection level + for compressed digital audio. Supported values + are 100, 150 or 200. + type: number + compressedDigitalAudioOpl: + description: Specifies the output protection level + for compressed digital audio.Supported values + are 100, 150, 200, 250 or 300. + type: number + compressedDigitalVideoOpl: + description: Specifies the output protection level + for compressed digital video. Supported values + are 400 or 500. + type: number + digitalVideoOnlyContentRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + explicitAnalogTelevisionOutputRestriction: + description: An explicit_analog_television_output_restriction + block as defined above. + properties: + bestEffortEnforced: + description: Indicates whether this restriction + is enforced on a best effort basis. Possible + values are true or false. Defaults to false. + type: boolean + controlBits: + description: The restriction control bits. + Possible value is integer between 0 and + 3 inclusive. + type: number + type: object + firstPlayExpiration: + description: The amount of time that the license + is valid after the license is first used to + play content. + type: string + imageConstraintForAnalogComponentVideoRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + imageConstraintForAnalogComputerMonitorRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + scmsRestriction: + description: Configures the Serial Copy Management + System (SCMS) in the license. Must be between + 0 and 3 inclusive. + type: number + uncompressedDigitalAudioOpl: + description: Specifies the output protection level + for uncompressed digital audio. Supported values + are 100, 150, 200, 250 or 300. + type: number + uncompressedDigitalVideoOpl: + description: Specifies the output protection level + for uncompressed digital video. Supported values + are 100, 250, 270 or 300. + type: number + type: object + relativeBeginDate: + description: The relative begin date of license. + type: string + relativeExpirationDate: + description: The relative expiration date of license. + type: string + securityLevel: + description: The security level of the PlayReady license. + Possible values are SL150, SL2000 and SL3000. Please + see this document for more information about security + level. See this document for more information about + SL3000 support. + type: string + type: object + type: array + playreadyResponseCustomData: + description: The custom response data of the PlayReady configuration. + This only applies when playready_configuration_license + is specified. + type: string + tokenRestriction: + description: A token_restriction block as defined below. + properties: + alternateKey: + description: One or more alternate_key block as defined + above. + items: + type: object + type: array + audience: + description: The audience for the token. + type: string + issuer: + description: The token issuer. + type: string + openIdConnectDiscoveryDocument: + description: The OpenID connect discovery document. + type: string + requiredClaim: + description: One or more required_claim blocks as defined + above. + items: + properties: + type: + description: Token claim type. + type: string + value: + description: Token claim value. + type: string + type: object + type: array + tokenType: + description: The type of token. Supported values are + Jwt or Swt. + type: string + type: object + widevineConfigurationTemplate: + description: The Widevine template. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.policyOption is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyOption) + || (has(self.initProvider) && has(self.initProvider.policyOption))' + status: + description: ContentKeyPolicyStatus defines the observed state of ContentKeyPolicy. + properties: + atProvider: + properties: + description: + description: A description for the Policy. + type: string + id: + description: The ID of the Content Key Policy. + type: string + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Content Key Policy to be created. + type: string + policyOption: + description: One or more policy_option blocks as defined below. + items: + properties: + clearKeyConfigurationEnabled: + description: Enable a configuration for non-DRM keys. + type: boolean + fairplayConfiguration: + description: A fairplay_configuration block as defined above. + Check license requirements here https://docs.microsoft.com/azure/media-services/latest/fairplay-license-overview. + properties: + offlineRentalConfiguration: + description: A offline_rental_configuration block as + defined below. + properties: + playbackDurationSeconds: + description: Playback duration. + type: number + storageDurationSeconds: + description: Storage duration. + type: number + type: object + rentalAndLeaseKeyType: + description: The rental and lease key type. Supported + values are DualExpiry, PersistentLimited, PersistentUnlimited + or Undefined. + type: string + rentalDurationSeconds: + description: The rental duration. Must be greater than + 0. + type: number + type: object + name: + description: The name which should be used for this Policy + Option. + type: string + openRestrictionEnabled: + description: Enable an open restriction. License or key + will be delivered on every request. + type: boolean + playreadyConfigurationLicense: + description: One or more playready_configuration_license + blocks as defined above. + items: + properties: + allowTestDevices: + description: A flag indicating whether test devices + can use the license. + type: boolean + beginDate: + description: The begin date of license. + type: string + contentKeyLocationFromHeaderEnabled: + description: Specifies that the content key ID is + in the PlayReady header. + type: boolean + contentKeyLocationFromKeyId: + description: The content key ID. Specifies that the + content key ID is specified in the PlayReady configuration. + type: string + contentType: + description: The PlayReady content type. Supported + values are UltraVioletDownload, UltraVioletStreaming + or Unspecified. + type: string + expirationDate: + description: The expiration date of license. + type: string + licenseType: + description: The license type. Supported values are + NonPersistent or Persistent. + type: string + playRight: + description: A play_right block as defined above. + properties: + agcAndColorStripeRestriction: + description: Configures Automatic Gain Control + (AGC) and Color Stripe in the license. Must + be between 0 and 3 inclusive. + type: number + allowPassingVideoContentToUnknownOutput: + description: Configures Unknown output handling + settings of the license. Supported values are + Allowed, AllowedWithVideoConstriction or NotAllowed. + type: string + analogVideoOpl: + description: Specifies the output protection level + for compressed digital audio. Supported values + are 100, 150 or 200. + type: number + compressedDigitalAudioOpl: + description: Specifies the output protection level + for compressed digital audio.Supported values + are 100, 150, 200, 250 or 300. + type: number + compressedDigitalVideoOpl: + description: Specifies the output protection level + for compressed digital video. Supported values + are 400 or 500. + type: number + digitalVideoOnlyContentRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + explicitAnalogTelevisionOutputRestriction: + description: An explicit_analog_television_output_restriction + block as defined above. + properties: + bestEffortEnforced: + description: Indicates whether this restriction + is enforced on a best effort basis. Possible + values are true or false. Defaults to false. + type: boolean + controlBits: + description: The restriction control bits. + Possible value is integer between 0 and + 3 inclusive. + type: number + type: object + firstPlayExpiration: + description: The amount of time that the license + is valid after the license is first used to + play content. + type: string + imageConstraintForAnalogComponentVideoRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + imageConstraintForAnalogComputerMonitorRestriction: + description: Enables the Image Constraint For + Analog Component Video Restriction in the license. + type: boolean + scmsRestriction: + description: Configures the Serial Copy Management + System (SCMS) in the license. Must be between + 0 and 3 inclusive. + type: number + uncompressedDigitalAudioOpl: + description: Specifies the output protection level + for uncompressed digital audio. Supported values + are 100, 150, 200, 250 or 300. + type: number + uncompressedDigitalVideoOpl: + description: Specifies the output protection level + for uncompressed digital video. Supported values + are 100, 250, 270 or 300. + type: number + type: object + relativeBeginDate: + description: The relative begin date of license. + type: string + relativeExpirationDate: + description: The relative expiration date of license. + type: string + securityLevel: + description: The security level of the PlayReady license. + Possible values are SL150, SL2000 and SL3000. Please + see this document for more information about security + level. See this document for more information about + SL3000 support. + type: string + type: object + type: array + playreadyResponseCustomData: + description: The custom response data of the PlayReady configuration. + This only applies when playready_configuration_license + is specified. + type: string + tokenRestriction: + description: A token_restriction block as defined below. + properties: + alternateKey: + description: One or more alternate_key block as defined + above. + items: + properties: + rsaTokenKeyExponentSecretRef: + description: The RSA parameter exponent. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + rsaTokenKeyModulusSecretRef: + description: The RSA parameter modulus. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + symmetricTokenKeySecretRef: + description: The key value of the key. Specifies + a symmetric key for token validation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + x509TokenKeyRawSecretRef: + description: The raw data field of a certificate + in PKCS 12 format (X509Certificate2 in .NET). + Specifies a certificate for token validation. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + audience: + description: The audience for the token. + type: string + issuer: + description: The token issuer. + type: string + openIdConnectDiscoveryDocument: + description: The OpenID connect discovery document. + type: string + requiredClaim: + description: One or more required_claim blocks as defined + above. + items: + properties: + type: + description: Token claim type. + type: string + value: + description: Token claim value. + type: string + type: object + type: array + tokenType: + description: The type of token. Supported values are + Jwt or Swt. + type: string + type: object + widevineConfigurationTemplate: + description: The Widevine template. + type: string + type: object + type: array + resourceGroupName: + description: The name of the Resource Group where the Content + Key Policy should exist. Changing this forces a new Content + Key Policy to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_jobs.yaml b/package/crds/media.azure.upbound.io_jobs.yaml index 756b55000..2ee6ac705 100644 --- a/package/crds/media.azure.upbound.io_jobs.yaml +++ b/package/crds/media.azure.upbound.io_jobs.yaml @@ -1056,3 +1056,1035 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Job is the Schema for the Jobs API. Manages a Media Job. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: JobSpec defines the desired state of Job + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: Optional customer supplied description of the Job. + type: string + inputAsset: + description: A input_asset block as defined below. Changing this + forces a new Media Job to be created. + properties: + label: + description: A label that is assigned to a JobInputClip, that + is used to satisfy a reference used in the Transform. For + example, a Transform can be authored so as to take an image + file with the label 'xyz' and apply it as an overlay onto + the input video before it is encoded. When submitting a + Job, exactly one of the JobInputs should be the image file, + and it should have the label 'xyz'. Changing this forces + a new resource to be created. + type: string + name: + description: The name of the input Asset. Changing this forces + a new Media Job to be created. + type: string + nameRef: + description: Reference to a Asset in media to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Asset in media to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Transform to be created. + type: string + mediaServicesAccountNameRef: + description: Reference to a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaServicesAccountNameSelector: + description: Selector for a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + outputAsset: + description: One or more output_asset blocks as defined below. + Changing this forces a new Media Job to be created. + items: + properties: + label: + description: A label that is assigned to a JobOutput in + order to help uniquely identify it. This is useful when + your Transform has more than one TransformOutput, whereby + your Job has more than one JobOutput. In such cases, when + you submit the Job, you will add two or more JobOutputs, + in the same order as TransformOutputs in the Transform. + Subsequently, when you retrieve the Job, either through + events or on a GET request, you can use the label to easily + identify the JobOutput. If a label is not provided, a + default value of '{presetName}_{outputIndex}' will be + used, where the preset name is the name of the preset + in the corresponding TransformOutput and the output index + is the relative index of the this JobOutput within the + Job. Note that this index is the same as the relative + index of the corresponding TransformOutput within its + Transform. Changing this forces a new resource to be created. + type: string + name: + description: The name of the output Asset. Changing this + forces a new Media Job to be created. + type: string + nameRef: + description: Reference to a Asset in media to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Asset in media to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + priority: + description: Priority with which the job should be processed. + Higher priority jobs are processed before lower priority jobs. + Changing this forces a new Media Job to be created. Possible + values are High, Normal and Low. Defaults to Normal. + type: string + resourceGroupName: + description: The name of the Resource Group where the Media Job + should exist. Changing this forces a new Media Job to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + transformName: + description: The Transform name. Changing this forces a new Media + Job to be created. + type: string + transformNameRef: + description: Reference to a Transform in media to populate transformName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transformNameSelector: + description: Selector for a Transform in media to populate transformName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: Optional customer supplied description of the Job. + type: string + inputAsset: + description: A input_asset block as defined below. Changing this + forces a new Media Job to be created. + properties: + label: + description: A label that is assigned to a JobInputClip, that + is used to satisfy a reference used in the Transform. For + example, a Transform can be authored so as to take an image + file with the label 'xyz' and apply it as an overlay onto + the input video before it is encoded. When submitting a + Job, exactly one of the JobInputs should be the image file, + and it should have the label 'xyz'. Changing this forces + a new resource to be created. + type: string + name: + description: The name of the input Asset. Changing this forces + a new Media Job to be created. + type: string + nameRef: + description: Reference to a Asset in media to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Asset in media to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + outputAsset: + description: One or more output_asset blocks as defined below. + Changing this forces a new Media Job to be created. + items: + properties: + label: + description: A label that is assigned to a JobOutput in + order to help uniquely identify it. This is useful when + your Transform has more than one TransformOutput, whereby + your Job has more than one JobOutput. In such cases, when + you submit the Job, you will add two or more JobOutputs, + in the same order as TransformOutputs in the Transform. + Subsequently, when you retrieve the Job, either through + events or on a GET request, you can use the label to easily + identify the JobOutput. If a label is not provided, a + default value of '{presetName}_{outputIndex}' will be + used, where the preset name is the name of the preset + in the corresponding TransformOutput and the output index + is the relative index of the this JobOutput within the + Job. Note that this index is the same as the relative + index of the corresponding TransformOutput within its + Transform. Changing this forces a new resource to be created. + type: string + name: + description: The name of the output Asset. Changing this + forces a new Media Job to be created. + type: string + nameRef: + description: Reference to a Asset in media to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Asset in media to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + priority: + description: Priority with which the job should be processed. + Higher priority jobs are processed before lower priority jobs. + Changing this forces a new Media Job to be created. Possible + values are High, Normal and Low. Defaults to Normal. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.inputAsset is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.inputAsset) + || (has(self.initProvider) && has(self.initProvider.inputAsset))' + - message: spec.forProvider.outputAsset is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.outputAsset) + || (has(self.initProvider) && has(self.initProvider.outputAsset))' + status: + description: JobStatus defines the observed state of Job. + properties: + atProvider: + properties: + description: + description: Optional customer supplied description of the Job. + type: string + id: + description: The ID of the Media Job. + type: string + inputAsset: + description: A input_asset block as defined below. Changing this + forces a new Media Job to be created. + properties: + label: + description: A label that is assigned to a JobInputClip, that + is used to satisfy a reference used in the Transform. For + example, a Transform can be authored so as to take an image + file with the label 'xyz' and apply it as an overlay onto + the input video before it is encoded. When submitting a + Job, exactly one of the JobInputs should be the image file, + and it should have the label 'xyz'. Changing this forces + a new resource to be created. + type: string + name: + description: The name of the input Asset. Changing this forces + a new Media Job to be created. + type: string + type: object + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Transform to be created. + type: string + outputAsset: + description: One or more output_asset blocks as defined below. + Changing this forces a new Media Job to be created. + items: + properties: + label: + description: A label that is assigned to a JobOutput in + order to help uniquely identify it. This is useful when + your Transform has more than one TransformOutput, whereby + your Job has more than one JobOutput. In such cases, when + you submit the Job, you will add two or more JobOutputs, + in the same order as TransformOutputs in the Transform. + Subsequently, when you retrieve the Job, either through + events or on a GET request, you can use the label to easily + identify the JobOutput. If a label is not provided, a + default value of '{presetName}_{outputIndex}' will be + used, where the preset name is the name of the preset + in the corresponding TransformOutput and the output index + is the relative index of the this JobOutput within the + Job. Note that this index is the same as the relative + index of the corresponding TransformOutput within its + Transform. Changing this forces a new resource to be created. + type: string + name: + description: The name of the output Asset. Changing this + forces a new Media Job to be created. + type: string + type: object + type: array + priority: + description: Priority with which the job should be processed. + Higher priority jobs are processed before lower priority jobs. + Changing this forces a new Media Job to be created. Possible + values are High, Normal and Low. Defaults to Normal. + type: string + resourceGroupName: + description: The name of the Resource Group where the Media Job + should exist. Changing this forces a new Media Job to be created. + type: string + transformName: + description: The Transform name. Changing this forces a new Media + Job to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_liveevents.yaml b/package/crds/media.azure.upbound.io_liveevents.yaml index d5d94cff2..57f1ba197 100644 --- a/package/crds/media.azure.upbound.io_liveevents.yaml +++ b/package/crds/media.azure.upbound.io_liveevents.yaml @@ -1075,3 +1075,1033 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LiveEvent is the Schema for the LiveEvents API. Manages a Live + Event. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LiveEventSpec defines the desired state of LiveEvent + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoStartEnabled: + description: The flag indicates if the resource should be automatically + started on creation. Changing this forces a new resource to + be created. + type: boolean + crossSiteAccessPolicy: + description: A cross_site_access_policy block as defined below. + properties: + clientAccessPolicy: + description: The content of clientaccesspolicy.xml used by + Silverlight. + type: string + crossDomainPolicy: + description: The content of the Cross Domain Policy (crossdomain.xml). + type: string + type: object + description: + description: A description for the live event. + type: string + encoding: + description: A encoding block as defined below. + properties: + keyFrameInterval: + description: Use an ISO 8601 time value between 0.5 to 20 + seconds to specify the output fragment length for the video + and audio tracks of an encoding live event. For example, + use PT2S to indicate 2 seconds. For the video track it also + defines the key frame interval, or the length of a GoP (group + of pictures). The value cannot be set for pass-through live + events. Defaults to PT2S. + type: string + presetName: + description: The optional encoding preset name, used when + type is not None. If the type is set to Standard, then the + default preset name is Default720p. Else if the type is + set to Premium1080p, Changing this forces a new resource + to be created. + type: string + stretchMode: + description: Specifies how the input video will be resized + to fit the desired output resolution(s). Allowed values + are None, AutoFit or AutoSize. Default is None. + type: string + type: + description: Live event type. Possible values are None, Premium1080p, + PassthroughBasic, PassthroughStandard and Standard. When + set to None, the service simply passes through the incoming + video and audio layer(s) to the output. When type is set + to Standard or Premium1080p, a live encoder transcodes the + incoming stream into multiple bitrates or layers. Defaults + to None. Changing this forces a new resource to be created. + type: string + type: object + hostnamePrefix: + description: When use_static_hostname is set to true, the hostname_prefix + specifies the first part of the hostname assigned to the live + event preview and ingest endpoints. The final hostname would + be a combination of this prefix, the media service account name + and a short code for the Azure Media Services data center. + type: string + input: + description: A input block as defined below. + properties: + accessToken: + description: A UUID in string form to uniquely identify the + stream. If omitted, the service will generate a unique value. + Changing this forces a new value to be created. + type: string + ipAccessControlAllow: + description: One or more ip_access_control_allow blocks as + defined below. + items: + properties: + address: + description: The IP address or CIDR range. + type: string + name: + description: The name which should be used for this + Live Event. Changing this forces a new Live Event + to be created. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + keyFrameIntervalDuration: + description: ISO 8601 time duration of the key frame interval + duration of the input. This value sets the EXT-X-TARGETDURATION + property in the HLS output. For example, use PT2S to indicate + 2 seconds. This field cannot be set when type is set to + Encoding. + type: string + streamingProtocol: + description: The input protocol for the live event. Allowed + values are FragmentedMP4 and RTMP. Changing this forces + a new resource to be created. + type: string + type: object + location: + description: The Azure Region where the Live Event should exist. + Changing this forces a new Live Event to be created. + type: string + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Live Event to be created. + type: string + mediaServicesAccountNameRef: + description: Reference to a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaServicesAccountNameSelector: + description: Selector for a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + preview: + description: A preview block as defined below. + properties: + alternativeMediaId: + description: An alternative media identifier associated with + the streaming locator created for the preview. The identifier + can be used in the CustomLicenseAcquisitionUrlTemplate or + the CustomKeyAcquisitionUrlTemplate of the Streaming Policy + specified in the streaming_policy_name field. + type: string + ipAccessControlAllow: + description: One or more ip_access_control_allow blocks as + defined above. + items: + properties: + address: + description: The IP address or CIDR range. + type: string + name: + description: The name which should be used for this + Live Event. Changing this forces a new Live Event + to be created. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + previewLocator: + description: The identifier of the preview locator in GUID + format. Specifying this at creation time allows the caller + to know the preview locator url before the event is created. + If omitted, the service will generate a random identifier. + Changing this forces a new resource to be created. + type: string + streamingPolicyName: + description: The name of streaming policy used for the live + event preview. Changing this forces a new resource to be + created. + type: string + type: object + resourceGroupName: + description: The name of the Resource Group where the Live Event + should exist. Changing this forces a new Live Event to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamOptions: + description: A list of options to use for the LiveEvent. Possible + values are Default, LowLatency, LowLatencyV2. Please see more + at this document. Changing this forces a new resource to be + created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Live Event. + type: object + x-kubernetes-map-type: granular + transcriptionLanguages: + description: 'Specifies a list of languages (locale) to be used + for speech-to-text transcription – it should match the spoken + language in the audio track. The value should be in BCP-47 format + (e.g: en-US). See the Microsoft Documentation for more information + about the live transcription feature and the list of supported + languages.' + items: + type: string + type: array + useStaticHostname: + description: Specifies whether a static hostname would be assigned + to the live event preview and ingest endpoints. Changing this + forces a new Live Event to be created. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoStartEnabled: + description: The flag indicates if the resource should be automatically + started on creation. Changing this forces a new resource to + be created. + type: boolean + crossSiteAccessPolicy: + description: A cross_site_access_policy block as defined below. + properties: + clientAccessPolicy: + description: The content of clientaccesspolicy.xml used by + Silverlight. + type: string + crossDomainPolicy: + description: The content of the Cross Domain Policy (crossdomain.xml). + type: string + type: object + description: + description: A description for the live event. + type: string + encoding: + description: A encoding block as defined below. + properties: + keyFrameInterval: + description: Use an ISO 8601 time value between 0.5 to 20 + seconds to specify the output fragment length for the video + and audio tracks of an encoding live event. For example, + use PT2S to indicate 2 seconds. For the video track it also + defines the key frame interval, or the length of a GoP (group + of pictures). The value cannot be set for pass-through live + events. Defaults to PT2S. + type: string + presetName: + description: The optional encoding preset name, used when + type is not None. If the type is set to Standard, then the + default preset name is Default720p. Else if the type is + set to Premium1080p, Changing this forces a new resource + to be created. + type: string + stretchMode: + description: Specifies how the input video will be resized + to fit the desired output resolution(s). Allowed values + are None, AutoFit or AutoSize. Default is None. + type: string + type: + description: Live event type. Possible values are None, Premium1080p, + PassthroughBasic, PassthroughStandard and Standard. When + set to None, the service simply passes through the incoming + video and audio layer(s) to the output. When type is set + to Standard or Premium1080p, a live encoder transcodes the + incoming stream into multiple bitrates or layers. Defaults + to None. Changing this forces a new resource to be created. + type: string + type: object + hostnamePrefix: + description: When use_static_hostname is set to true, the hostname_prefix + specifies the first part of the hostname assigned to the live + event preview and ingest endpoints. The final hostname would + be a combination of this prefix, the media service account name + and a short code for the Azure Media Services data center. + type: string + input: + description: A input block as defined below. + properties: + accessToken: + description: A UUID in string form to uniquely identify the + stream. If omitted, the service will generate a unique value. + Changing this forces a new value to be created. + type: string + ipAccessControlAllow: + description: One or more ip_access_control_allow blocks as + defined below. + items: + properties: + address: + description: The IP address or CIDR range. + type: string + name: + description: The name which should be used for this + Live Event. Changing this forces a new Live Event + to be created. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + keyFrameIntervalDuration: + description: ISO 8601 time duration of the key frame interval + duration of the input. This value sets the EXT-X-TARGETDURATION + property in the HLS output. For example, use PT2S to indicate + 2 seconds. This field cannot be set when type is set to + Encoding. + type: string + streamingProtocol: + description: The input protocol for the live event. Allowed + values are FragmentedMP4 and RTMP. Changing this forces + a new resource to be created. + type: string + type: object + location: + description: The Azure Region where the Live Event should exist. + Changing this forces a new Live Event to be created. + type: string + preview: + description: A preview block as defined below. + properties: + alternativeMediaId: + description: An alternative media identifier associated with + the streaming locator created for the preview. The identifier + can be used in the CustomLicenseAcquisitionUrlTemplate or + the CustomKeyAcquisitionUrlTemplate of the Streaming Policy + specified in the streaming_policy_name field. + type: string + ipAccessControlAllow: + description: One or more ip_access_control_allow blocks as + defined above. + items: + properties: + address: + description: The IP address or CIDR range. + type: string + name: + description: The name which should be used for this + Live Event. Changing this forces a new Live Event + to be created. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + previewLocator: + description: The identifier of the preview locator in GUID + format. Specifying this at creation time allows the caller + to know the preview locator url before the event is created. + If omitted, the service will generate a random identifier. + Changing this forces a new resource to be created. + type: string + streamingPolicyName: + description: The name of streaming policy used for the live + event preview. Changing this forces a new resource to be + created. + type: string + type: object + streamOptions: + description: A list of options to use for the LiveEvent. Possible + values are Default, LowLatency, LowLatencyV2. Please see more + at this document. Changing this forces a new resource to be + created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Live Event. + type: object + x-kubernetes-map-type: granular + transcriptionLanguages: + description: 'Specifies a list of languages (locale) to be used + for speech-to-text transcription – it should match the spoken + language in the audio track. The value should be in BCP-47 format + (e.g: en-US). See the Microsoft Documentation for more information + about the live transcription feature and the list of supported + languages.' + items: + type: string + type: array + useStaticHostname: + description: Specifies whether a static hostname would be assigned + to the live event preview and ingest endpoints. Changing this + forces a new Live Event to be created. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.input is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.input) + || (has(self.initProvider) && has(self.initProvider.input))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: LiveEventStatus defines the observed state of LiveEvent. + properties: + atProvider: + properties: + autoStartEnabled: + description: The flag indicates if the resource should be automatically + started on creation. Changing this forces a new resource to + be created. + type: boolean + crossSiteAccessPolicy: + description: A cross_site_access_policy block as defined below. + properties: + clientAccessPolicy: + description: The content of clientaccesspolicy.xml used by + Silverlight. + type: string + crossDomainPolicy: + description: The content of the Cross Domain Policy (crossdomain.xml). + type: string + type: object + description: + description: A description for the live event. + type: string + encoding: + description: A encoding block as defined below. + properties: + keyFrameInterval: + description: Use an ISO 8601 time value between 0.5 to 20 + seconds to specify the output fragment length for the video + and audio tracks of an encoding live event. For example, + use PT2S to indicate 2 seconds. For the video track it also + defines the key frame interval, or the length of a GoP (group + of pictures). The value cannot be set for pass-through live + events. Defaults to PT2S. + type: string + presetName: + description: The optional encoding preset name, used when + type is not None. If the type is set to Standard, then the + default preset name is Default720p. Else if the type is + set to Premium1080p, Changing this forces a new resource + to be created. + type: string + stretchMode: + description: Specifies how the input video will be resized + to fit the desired output resolution(s). Allowed values + are None, AutoFit or AutoSize. Default is None. + type: string + type: + description: Live event type. Possible values are None, Premium1080p, + PassthroughBasic, PassthroughStandard and Standard. When + set to None, the service simply passes through the incoming + video and audio layer(s) to the output. When type is set + to Standard or Premium1080p, a live encoder transcodes the + incoming stream into multiple bitrates or layers. Defaults + to None. Changing this forces a new resource to be created. + type: string + type: object + hostnamePrefix: + description: When use_static_hostname is set to true, the hostname_prefix + specifies the first part of the hostname assigned to the live + event preview and ingest endpoints. The final hostname would + be a combination of this prefix, the media service account name + and a short code for the Azure Media Services data center. + type: string + id: + description: The ID of the Live Event. + type: string + input: + description: A input block as defined below. + properties: + accessToken: + description: A UUID in string form to uniquely identify the + stream. If omitted, the service will generate a unique value. + Changing this forces a new value to be created. + type: string + endpoint: + items: + properties: + protocol: + type: string + url: + type: string + type: object + type: array + ipAccessControlAllow: + description: One or more ip_access_control_allow blocks as + defined below. + items: + properties: + address: + description: The IP address or CIDR range. + type: string + name: + description: The name which should be used for this + Live Event. Changing this forces a new Live Event + to be created. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + keyFrameIntervalDuration: + description: ISO 8601 time duration of the key frame interval + duration of the input. This value sets the EXT-X-TARGETDURATION + property in the HLS output. For example, use PT2S to indicate + 2 seconds. This field cannot be set when type is set to + Encoding. + type: string + streamingProtocol: + description: The input protocol for the live event. Allowed + values are FragmentedMP4 and RTMP. Changing this forces + a new resource to be created. + type: string + type: object + location: + description: The Azure Region where the Live Event should exist. + Changing this forces a new Live Event to be created. + type: string + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Live Event to be created. + type: string + preview: + description: A preview block as defined below. + properties: + alternativeMediaId: + description: An alternative media identifier associated with + the streaming locator created for the preview. The identifier + can be used in the CustomLicenseAcquisitionUrlTemplate or + the CustomKeyAcquisitionUrlTemplate of the Streaming Policy + specified in the streaming_policy_name field. + type: string + endpoint: + items: + properties: + protocol: + type: string + url: + type: string + type: object + type: array + ipAccessControlAllow: + description: One or more ip_access_control_allow blocks as + defined above. + items: + properties: + address: + description: The IP address or CIDR range. + type: string + name: + description: The name which should be used for this + Live Event. Changing this forces a new Live Event + to be created. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + previewLocator: + description: The identifier of the preview locator in GUID + format. Specifying this at creation time allows the caller + to know the preview locator url before the event is created. + If omitted, the service will generate a random identifier. + Changing this forces a new resource to be created. + type: string + streamingPolicyName: + description: The name of streaming policy used for the live + event preview. Changing this forces a new resource to be + created. + type: string + type: object + resourceGroupName: + description: The name of the Resource Group where the Live Event + should exist. Changing this forces a new Live Event to be created. + type: string + streamOptions: + description: A list of options to use for the LiveEvent. Possible + values are Default, LowLatency, LowLatencyV2. Please see more + at this document. Changing this forces a new resource to be + created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Live Event. + type: object + x-kubernetes-map-type: granular + transcriptionLanguages: + description: 'Specifies a list of languages (locale) to be used + for speech-to-text transcription – it should match the spoken + language in the audio track. The value should be in BCP-47 format + (e.g: en-US). See the Microsoft Documentation for more information + about the live transcription feature and the list of supported + languages.' + items: + type: string + type: array + useStaticHostname: + description: Specifies whether a static hostname would be assigned + to the live event preview and ingest endpoints. Changing this + forces a new Live Event to be created. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_servicesaccountfilters.yaml b/package/crds/media.azure.upbound.io_servicesaccountfilters.yaml index 0864cbdb2..f39130abe 100644 --- a/package/crds/media.azure.upbound.io_servicesaccountfilters.yaml +++ b/package/crds/media.azure.upbound.io_servicesaccountfilters.yaml @@ -772,3 +772,748 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ServicesAccountFilter is the Schema for the ServicesAccountFilters + API. Manages a Media Services Account Filter. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServicesAccountFilterSpec defines the desired state of ServicesAccountFilter + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + firstQualityBitrate: + description: The first quality bitrate. Sets the first video track + to appear in the Live Streaming playlist to allow HLS native + players to start downloading from this quality level at the + beginning. + type: number + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Account Filter to be created. + type: string + mediaServicesAccountNameRef: + description: Reference to a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaServicesAccountNameSelector: + description: Selector for a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + presentationTimeRange: + description: A presentation_time_range block as defined below. + properties: + endInUnits: + description: |- + The absolute end time boundary. Applies to Video on Demand (VoD). + For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + type: number + forceEnd: + description: 'Indicates whether the end_in_units property + must be present. If true, end_in_units must be specified + or a bad request code is returned. Applies to Live Streaming + only. Allowed values: false, true.' + type: boolean + liveBackoffInUnits: + description: |- + The relative to end right edge. Applies to Live Streaming only. + This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_milliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + type: number + presentationWindowInUnits: + description: The relative to end sliding window. Applies to + Live Streaming only. Use presentation_window_in_units to + apply a sliding window of fragments to include in a playlist. + The unit is defined by unit_timescale_in_milliseconds. For + example, set presentation_window_in_units to 120 to apply + a two-minute sliding window. Media within 2 minutes of the + live edge will be included in the playlist. If a fragment + straddles the boundary, the entire fragment will be included + in the playlist. The minimum presentation window duration + is 60 seconds. + type: number + startInUnits: + description: The absolute start time boundary. Applies to + Video on Demand (VoD) or Live Streaming. This is a long + value that represents an absolute start point of the stream. + The value gets rounded to the closest next GOP start. The + unit is defined by unit_timescale_in_milliseconds, so a + start_in_units of 15 would be for 15 seconds. Use start_in_units + and end_in_units to trim the fragments that will be in the + playlist (manifest). For example, start_in_units set to + 20 and end_in_units set to 60 using unit_timescale_in_milliseconds + in 1000 will generate a playlist that contains fragments + from between 20 seconds and 60 seconds of the VoD presentation. + If a fragment straddles the boundary, the entire fragment + will be included in the manifest. + type: number + unitTimescaleInMilliseconds: + description: Specified as the number of milliseconds in one + unit timescale. For example, if you want to set a start_in_units + at 30 seconds, you would use a value of 30 when using the + unit_timescale_in_milliseconds in 1000. Or if you want to + set start_in_units in 30 milliseconds, you would use a value + of 30 when using the unit_timescale_in_milliseconds in 1. + Applies timescale to start_in_units, start_timescale and + presentation_window_in_timescale and live_backoff_in_timescale. + type: number + type: object + resourceGroupName: + description: The name of the Resource Group where the Account + Filter should exist. Changing this forces a new Account Filter + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + trackSelection: + description: One or more track_selection blocks as defined below. + items: + properties: + condition: + description: One or more selection blocks as defined above. + items: + properties: + operation: + description: The condition operation to test a track + property against. Supported values are Equal and + NotEqual. + type: string + property: + description: The track property to compare. Supported + values are Bitrate, FourCC, Language, Name and Type. + Check documentation for more details. + type: string + value: + description: The track property value to match or + not match. + type: string + type: object + type: array + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + firstQualityBitrate: + description: The first quality bitrate. Sets the first video track + to appear in the Live Streaming playlist to allow HLS native + players to start downloading from this quality level at the + beginning. + type: number + presentationTimeRange: + description: A presentation_time_range block as defined below. + properties: + endInUnits: + description: |- + The absolute end time boundary. Applies to Video on Demand (VoD). + For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + type: number + forceEnd: + description: 'Indicates whether the end_in_units property + must be present. If true, end_in_units must be specified + or a bad request code is returned. Applies to Live Streaming + only. Allowed values: false, true.' + type: boolean + liveBackoffInUnits: + description: |- + The relative to end right edge. Applies to Live Streaming only. + This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_milliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + type: number + presentationWindowInUnits: + description: The relative to end sliding window. Applies to + Live Streaming only. Use presentation_window_in_units to + apply a sliding window of fragments to include in a playlist. + The unit is defined by unit_timescale_in_milliseconds. For + example, set presentation_window_in_units to 120 to apply + a two-minute sliding window. Media within 2 minutes of the + live edge will be included in the playlist. If a fragment + straddles the boundary, the entire fragment will be included + in the playlist. The minimum presentation window duration + is 60 seconds. + type: number + startInUnits: + description: The absolute start time boundary. Applies to + Video on Demand (VoD) or Live Streaming. This is a long + value that represents an absolute start point of the stream. + The value gets rounded to the closest next GOP start. The + unit is defined by unit_timescale_in_milliseconds, so a + start_in_units of 15 would be for 15 seconds. Use start_in_units + and end_in_units to trim the fragments that will be in the + playlist (manifest). For example, start_in_units set to + 20 and end_in_units set to 60 using unit_timescale_in_milliseconds + in 1000 will generate a playlist that contains fragments + from between 20 seconds and 60 seconds of the VoD presentation. + If a fragment straddles the boundary, the entire fragment + will be included in the manifest. + type: number + unitTimescaleInMilliseconds: + description: Specified as the number of milliseconds in one + unit timescale. For example, if you want to set a start_in_units + at 30 seconds, you would use a value of 30 when using the + unit_timescale_in_milliseconds in 1000. Or if you want to + set start_in_units in 30 milliseconds, you would use a value + of 30 when using the unit_timescale_in_milliseconds in 1. + Applies timescale to start_in_units, start_timescale and + presentation_window_in_timescale and live_backoff_in_timescale. + type: number + type: object + trackSelection: + description: One or more track_selection blocks as defined below. + items: + properties: + condition: + description: One or more selection blocks as defined above. + items: + properties: + operation: + description: The condition operation to test a track + property against. Supported values are Equal and + NotEqual. + type: string + property: + description: The track property to compare. Supported + values are Bitrate, FourCC, Language, Name and Type. + Check documentation for more details. + type: string + value: + description: The track property value to match or + not match. + type: string + type: object + type: array + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ServicesAccountFilterStatus defines the observed state of + ServicesAccountFilter. + properties: + atProvider: + properties: + firstQualityBitrate: + description: The first quality bitrate. Sets the first video track + to appear in the Live Streaming playlist to allow HLS native + players to start downloading from this quality level at the + beginning. + type: number + id: + description: The ID of the Account Filter. + type: string + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Account Filter to be created. + type: string + presentationTimeRange: + description: A presentation_time_range block as defined below. + properties: + endInUnits: + description: |- + The absolute end time boundary. Applies to Video on Demand (VoD). + For the Live Streaming presentation, it is silently ignored and applied when the presentation ends and the stream becomes VoD. This is a long value that represents an absolute end point of the presentation, rounded to the closest next GOP start. The unit is defined by unit_timescale_in_milliseconds, so an end_in_units of 180 would be for 3 minutes. Use start_in_units and end_in_units to trim the fragments that will be in the playlist (manifest). For example, start_in_units set to 20 and end_in_units set to 60 using unit_timescale_in_milliseconds in 1000 will generate a playlist that contains fragments from between 20 seconds and 60 seconds of the VoD presentation. If a fragment straddles the boundary, the entire fragment will be included in the manifest. + type: number + forceEnd: + description: 'Indicates whether the end_in_units property + must be present. If true, end_in_units must be specified + or a bad request code is returned. Applies to Live Streaming + only. Allowed values: false, true.' + type: boolean + liveBackoffInUnits: + description: |- + The relative to end right edge. Applies to Live Streaming only. + This value defines the latest live position that a client can seek to. Using this property, you can delay live playback position and create a server-side buffer for players. The unit is defined by unit_timescale_in_milliseconds. The maximum live back off duration is 300 seconds. For example, a value of 20 means that the latest available content is 20 seconds delayed from the real live edge. + type: number + presentationWindowInUnits: + description: The relative to end sliding window. Applies to + Live Streaming only. Use presentation_window_in_units to + apply a sliding window of fragments to include in a playlist. + The unit is defined by unit_timescale_in_milliseconds. For + example, set presentation_window_in_units to 120 to apply + a two-minute sliding window. Media within 2 minutes of the + live edge will be included in the playlist. If a fragment + straddles the boundary, the entire fragment will be included + in the playlist. The minimum presentation window duration + is 60 seconds. + type: number + startInUnits: + description: The absolute start time boundary. Applies to + Video on Demand (VoD) or Live Streaming. This is a long + value that represents an absolute start point of the stream. + The value gets rounded to the closest next GOP start. The + unit is defined by unit_timescale_in_milliseconds, so a + start_in_units of 15 would be for 15 seconds. Use start_in_units + and end_in_units to trim the fragments that will be in the + playlist (manifest). For example, start_in_units set to + 20 and end_in_units set to 60 using unit_timescale_in_milliseconds + in 1000 will generate a playlist that contains fragments + from between 20 seconds and 60 seconds of the VoD presentation. + If a fragment straddles the boundary, the entire fragment + will be included in the manifest. + type: number + unitTimescaleInMilliseconds: + description: Specified as the number of milliseconds in one + unit timescale. For example, if you want to set a start_in_units + at 30 seconds, you would use a value of 30 when using the + unit_timescale_in_milliseconds in 1000. Or if you want to + set start_in_units in 30 milliseconds, you would use a value + of 30 when using the unit_timescale_in_milliseconds in 1. + Applies timescale to start_in_units, start_timescale and + presentation_window_in_timescale and live_backoff_in_timescale. + type: number + type: object + resourceGroupName: + description: The name of the Resource Group where the Account + Filter should exist. Changing this forces a new Account Filter + to be created. + type: string + trackSelection: + description: One or more track_selection blocks as defined below. + items: + properties: + condition: + description: One or more selection blocks as defined above. + items: + properties: + operation: + description: The condition operation to test a track + property against. Supported values are Equal and + NotEqual. + type: string + property: + description: The track property to compare. Supported + values are Bitrate, FourCC, Language, Name and Type. + Check documentation for more details. + type: string + value: + description: The track property value to match or + not match. + type: string + type: object + type: array + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_servicesaccounts.yaml b/package/crds/media.azure.upbound.io_servicesaccounts.yaml index bdf6a153b..6e57edd11 100644 --- a/package/crds/media.azure.upbound.io_servicesaccounts.yaml +++ b/package/crds/media.azure.upbound.io_servicesaccounts.yaml @@ -929,3 +929,884 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ServicesAccount is the Schema for the ServicesAccounts API. Manages + a Media Services Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServicesAccountSpec defines the desired state of ServicesAccount + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + encryption: + description: An encryption block as defined below. + properties: + keyVaultKeyIdentifier: + description: Specifies the URI of the Key Vault Key used to + encrypt data. The key may either be versioned (for example + https://vault/keys/mykey/version1) or reference a key without + a version (for example https://vault/keys/mykey). + type: string + managedIdentity: + description: A managed_identity block as defined below. + properties: + useSystemAssignedIdentity: + description: Whether to use System Assigned Identity. + Possible Values are true and false. + type: boolean + userAssignedIdentityId: + description: The ID of the User Assigned Identity. This + value can only be set when use_system_assigned_identity + is false + type: string + type: object + type: + description: Specifies the type of key used to encrypt the + account data. Possible values are SystemKey and CustomerKey. + Defaults to SystemKey. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Media Services Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Media Services Account. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyDeliveryAccessControl: + description: A key_delivery_access_control block as defined below. + properties: + defaultAction: + description: The Default Action to use when no rules match + from ip_allow_list. Possible values are Allow and Deny. + type: string + ipAllowList: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Key Delivery. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Media Services Account. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + id: + description: Specifies the ID of the Storage Account that + will be associated with the Media Services instance. + type: string + idRef: + description: Reference to a Account in storage to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Account in storage to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + isPrimary: + description: Specifies whether the storage account should + be the primary account or not. Defaults to false. + type: boolean + managedIdentity: + description: A managed_identity block as defined below. + properties: + useSystemAssignedIdentity: + description: Whether to use System Assigned Identity. + Possible Values are true and false. + type: boolean + userAssignedIdentityId: + description: The ID of the User Assigned Identity. This + value can only be set when use_system_assigned_identity + is false + type: string + type: object + type: object + type: array + storageAuthenticationType: + description: Specifies the storage authentication type. Possible + value is ManagedIdentity or System. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + encryption: + description: An encryption block as defined below. + properties: + keyVaultKeyIdentifier: + description: Specifies the URI of the Key Vault Key used to + encrypt data. The key may either be versioned (for example + https://vault/keys/mykey/version1) or reference a key without + a version (for example https://vault/keys/mykey). + type: string + managedIdentity: + description: A managed_identity block as defined below. + properties: + useSystemAssignedIdentity: + description: Whether to use System Assigned Identity. + Possible Values are true and false. + type: boolean + userAssignedIdentityId: + description: The ID of the User Assigned Identity. This + value can only be set when use_system_assigned_identity + is false + type: string + type: object + type: + description: Specifies the type of key used to encrypt the + account data. Possible values are SystemKey and CustomerKey. + Defaults to SystemKey. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Media Services Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Media Services Account. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyDeliveryAccessControl: + description: A key_delivery_access_control block as defined below. + properties: + defaultAction: + description: The Default Action to use when no rules match + from ip_allow_list. Possible values are Allow and Deny. + type: string + ipAllowList: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Key Delivery. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + server. Defaults to true. + type: boolean + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + id: + description: Specifies the ID of the Storage Account that + will be associated with the Media Services instance. + type: string + idRef: + description: Reference to a Account in storage to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a Account in storage to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + isPrimary: + description: Specifies whether the storage account should + be the primary account or not. Defaults to false. + type: boolean + managedIdentity: + description: A managed_identity block as defined below. + properties: + useSystemAssignedIdentity: + description: Whether to use System Assigned Identity. + Possible Values are true and false. + type: boolean + userAssignedIdentityId: + description: The ID of the User Assigned Identity. This + value can only be set when use_system_assigned_identity + is false + type: string + type: object + type: object + type: array + storageAuthenticationType: + description: Specifies the storage authentication type. Possible + value is ManagedIdentity or System. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.storageAccount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageAccount) + || (has(self.initProvider) && has(self.initProvider.storageAccount))' + status: + description: ServicesAccountStatus defines the observed state of ServicesAccount. + properties: + atProvider: + properties: + encryption: + description: An encryption block as defined below. + properties: + currentKeyIdentifier: + description: The current key used to encrypt the Media Services + Account, including the key version. + type: string + keyVaultKeyIdentifier: + description: Specifies the URI of the Key Vault Key used to + encrypt data. The key may either be versioned (for example + https://vault/keys/mykey/version1) or reference a key without + a version (for example https://vault/keys/mykey). + type: string + managedIdentity: + description: A managed_identity block as defined below. + properties: + useSystemAssignedIdentity: + description: Whether to use System Assigned Identity. + Possible Values are true and false. + type: boolean + userAssignedIdentityId: + description: The ID of the User Assigned Identity. This + value can only be set when use_system_assigned_identity + is false + type: string + type: object + type: + description: Specifies the type of key used to encrypt the + account data. Possible values are SystemKey and CustomerKey. + Defaults to SystemKey. + type: string + type: object + id: + description: The ID of the Media Services Account. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Media Services Account. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Media Services Account. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyDeliveryAccessControl: + description: A key_delivery_access_control block as defined below. + properties: + defaultAction: + description: The Default Action to use when no rules match + from ip_allow_list. Possible values are Allow and Deny. + type: string + ipAllowList: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the Key Delivery. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Media Services Account. Changing this forces a new resource + to be created. + type: string + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + id: + description: Specifies the ID of the Storage Account that + will be associated with the Media Services instance. + type: string + isPrimary: + description: Specifies whether the storage account should + be the primary account or not. Defaults to false. + type: boolean + managedIdentity: + description: A managed_identity block as defined below. + properties: + useSystemAssignedIdentity: + description: Whether to use System Assigned Identity. + Possible Values are true and false. + type: boolean + userAssignedIdentityId: + description: The ID of the User Assigned Identity. This + value can only be set when use_system_assigned_identity + is false + type: string + type: object + type: object + type: array + storageAuthenticationType: + description: Specifies the storage authentication type. Possible + value is ManagedIdentity or System. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_streamingendpoints.yaml b/package/crds/media.azure.upbound.io_streamingendpoints.yaml index fa706dd98..5861c9f94 100644 --- a/package/crds/media.azure.upbound.io_streamingendpoints.yaml +++ b/package/crds/media.azure.upbound.io_streamingendpoints.yaml @@ -801,3 +801,774 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StreamingEndpoint is the Schema for the StreamingEndpoints API. + Manages a Streaming Endpoint. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StreamingEndpointSpec defines the desired state of StreamingEndpoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessControl: + description: A access_control block as defined below. + properties: + akamaiSignatureHeaderAuthenticationKey: + description: One or more akamai_signature_header_authentication_key + blocks as defined below. + items: + properties: + base64Key: + description: Authentication key. + type: string + expiration: + description: The expiration time of the authentication + key. + type: string + identifier: + description: Identifier of the key. + type: string + type: object + type: array + ipAllow: + description: A ip_allow block as defined below. + items: + properties: + address: + description: The IP address to allow. + type: string + name: + description: The sku name of Streaming Endpoint. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + type: object + autoStartEnabled: + description: The flag indicates if the resource should be automatically + started on creation. + type: boolean + cdnEnabled: + description: The CDN enabled flag. + type: boolean + cdnProfile: + description: The CDN profile name. + type: string + cdnProvider: + description: The CDN provider name. Supported value are StandardVerizon,PremiumVerizon + and StandardAkamai + type: string + crossSiteAccessPolicy: + description: A cross_site_access_policy block as defined below. + properties: + clientAccessPolicy: + description: The content of clientaccesspolicy.xml used by + Silverlight. + type: string + crossDomainPolicy: + description: The content of crossdomain.xml used by Silverlight. + type: string + type: object + customHostNames: + description: The custom host names of the streaming endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: The streaming endpoint description. + type: string + location: + description: The Azure Region where the Streaming Endpoint should + exist. Changing this forces a new Streaming Endpoint to be created. + type: string + maxCacheAgeSeconds: + description: Max cache age in seconds. + type: number + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Streaming Endpoint to be created. + type: string + mediaServicesAccountNameRef: + description: Reference to a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaServicesAccountNameSelector: + description: Selector for a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the Streaming + Endpoint should exist. Changing this forces a new Streaming + Endpoint to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scaleUnits: + description: The number of scale units. To create a Standard Streaming + Endpoint set 0. For Premium Streaming Endpoint valid values + are between 1 and 10. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Streaming Endpoint. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessControl: + description: A access_control block as defined below. + properties: + akamaiSignatureHeaderAuthenticationKey: + description: One or more akamai_signature_header_authentication_key + blocks as defined below. + items: + properties: + base64Key: + description: Authentication key. + type: string + expiration: + description: The expiration time of the authentication + key. + type: string + identifier: + description: Identifier of the key. + type: string + type: object + type: array + ipAllow: + description: A ip_allow block as defined below. + items: + properties: + address: + description: The IP address to allow. + type: string + name: + description: The sku name of Streaming Endpoint. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + type: object + autoStartEnabled: + description: The flag indicates if the resource should be automatically + started on creation. + type: boolean + cdnEnabled: + description: The CDN enabled flag. + type: boolean + cdnProfile: + description: The CDN profile name. + type: string + cdnProvider: + description: The CDN provider name. Supported value are StandardVerizon,PremiumVerizon + and StandardAkamai + type: string + crossSiteAccessPolicy: + description: A cross_site_access_policy block as defined below. + properties: + clientAccessPolicy: + description: The content of clientaccesspolicy.xml used by + Silverlight. + type: string + crossDomainPolicy: + description: The content of crossdomain.xml used by Silverlight. + type: string + type: object + customHostNames: + description: The custom host names of the streaming endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: The streaming endpoint description. + type: string + location: + description: The Azure Region where the Streaming Endpoint should + exist. Changing this forces a new Streaming Endpoint to be created. + type: string + maxCacheAgeSeconds: + description: Max cache age in seconds. + type: number + scaleUnits: + description: The number of scale units. To create a Standard Streaming + Endpoint set 0. For Premium Streaming Endpoint valid values + are between 1 and 10. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Streaming Endpoint. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.scaleUnits is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scaleUnits) + || (has(self.initProvider) && has(self.initProvider.scaleUnits))' + status: + description: StreamingEndpointStatus defines the observed state of StreamingEndpoint. + properties: + atProvider: + properties: + accessControl: + description: A access_control block as defined below. + properties: + akamaiSignatureHeaderAuthenticationKey: + description: One or more akamai_signature_header_authentication_key + blocks as defined below. + items: + properties: + base64Key: + description: Authentication key. + type: string + expiration: + description: The expiration time of the authentication + key. + type: string + identifier: + description: Identifier of the key. + type: string + type: object + type: array + ipAllow: + description: A ip_allow block as defined below. + items: + properties: + address: + description: The IP address to allow. + type: string + name: + description: The sku name of Streaming Endpoint. + type: string + subnetPrefixLength: + description: The subnet mask prefix length (see CIDR + notation). + type: number + type: object + type: array + type: object + autoStartEnabled: + description: The flag indicates if the resource should be automatically + started on creation. + type: boolean + cdnEnabled: + description: The CDN enabled flag. + type: boolean + cdnProfile: + description: The CDN profile name. + type: string + cdnProvider: + description: The CDN provider name. Supported value are StandardVerizon,PremiumVerizon + and StandardAkamai + type: string + crossSiteAccessPolicy: + description: A cross_site_access_policy block as defined below. + properties: + clientAccessPolicy: + description: The content of clientaccesspolicy.xml used by + Silverlight. + type: string + crossDomainPolicy: + description: The content of crossdomain.xml used by Silverlight. + type: string + type: object + customHostNames: + description: The custom host names of the streaming endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + description: + description: The streaming endpoint description. + type: string + hostName: + description: The host name of the Streaming Endpoint. + type: string + id: + description: The ID of the Streaming Endpoint. + type: string + location: + description: The Azure Region where the Streaming Endpoint should + exist. Changing this forces a new Streaming Endpoint to be created. + type: string + maxCacheAgeSeconds: + description: Max cache age in seconds. + type: number + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Streaming Endpoint to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Streaming + Endpoint should exist. Changing this forces a new Streaming + Endpoint to be created. + type: string + scaleUnits: + description: The number of scale units. To create a Standard Streaming + Endpoint set 0. For Premium Streaming Endpoint valid values + are between 1 and 10. + type: number + sku: + description: A sku block defined as below. + items: + properties: + capacity: + description: The sku capacity of Streaming Endpoint. + type: number + name: + description: The sku name of Streaming Endpoint. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Streaming Endpoint. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_streamingpolicies.yaml b/package/crds/media.azure.upbound.io_streamingpolicies.yaml index b870eb88d..5fcc1216a 100644 --- a/package/crds/media.azure.upbound.io_streamingpolicies.yaml +++ b/package/crds/media.azure.upbound.io_streamingpolicies.yaml @@ -1619,3 +1619,1493 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StreamingPolicy is the Schema for the StreamingPolicys API. Manages + a Streaming Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StreamingPolicySpec defines the desired state of StreamingPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + commonEncryptionCbcs: + description: A common_encryption_cbcs block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + clearKeyEncryption: + description: A clear_key_encryption block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + defaultContentKey: + description: A default_content_key block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + type: object + drmFairplay: + description: A drm_fairplay block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + allowPersistentLicense: + description: All license to be persistent or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + customLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. + Changing this forces a new Streaming Policy to be created. + type: string + type: object + enabledProtocols: + description: A enabled_protocols block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + commonEncryptionCenc: + description: A common_encryption_cenc block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + clearKeyEncryption: + description: A clear_key_encryption block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + clearTrack: + description: One or more clear_track blocks as defined below. + Changing this forces a new Streaming Policy to be created. + items: + properties: + condition: + description: One or more condition blocks as defined + below. Changing this forces a new Streaming Policy + to be created. + items: + properties: + operation: + description: The track property condition operation. + Possible value is Equal. Changing this forces + a new Streaming Policy to be created. + type: string + property: + description: The track property type. Possible + value is FourCC. Changing this forces a new + Streaming Policy to be created. + type: string + value: + description: The track property value. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + type: array + type: object + type: array + contentKeyToTrackMapping: + description: One or more content_key_to_track_mapping blocks + as defined below. Changing this forces a new Streaming Policy + to be created. + items: + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + track: + description: One or more track blocks as defined below. + Changing this forces a new Streaming Policy to be + created. + items: + properties: + condition: + description: One or more condition blocks as defined + below. Changing this forces a new Streaming + Policy to be created. + items: + properties: + operation: + description: The track property condition + operation. Possible value is Equal. Changing + this forces a new Streaming Policy to + be created. + type: string + property: + description: The track property type. Possible + value is FourCC. Changing this forces + a new Streaming Policy to be created. + type: string + value: + description: The track property value. Changing + this forces a new Streaming Policy to + be created. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + defaultContentKey: + description: A default_content_key block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + policyNameRef: + description: Reference to a ContentKeyPolicy in media + to populate policyName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyNameSelector: + description: Selector for a ContentKeyPolicy in media + to populate policyName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + drmPlayready: + description: A drm_playready block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + customAttributes: + description: Custom attributes for PlayReady. Changing + this forces a new Streaming Policy to be created. + type: string + customLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. + Changing this forces a new Streaming Policy to be created. + type: string + type: object + drmWidevineCustomLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. Changing + this forces a new Streaming Policy to be created. + type: string + enabledProtocols: + description: A enabled_protocols block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + defaultContentKeyPolicyName: + description: Default Content Key used by current Streaming Policy. + Changing this forces a new Streaming Policy to be created. + type: string + envelopeEncryption: + description: A envelope_encryption block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + defaultContentKey: + description: A default_content_key block as defined above. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + type: object + enabledProtocols: + description: A enabled_protocols block as defined above. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Streaming Policy to be created. + type: string + mediaServicesAccountNameRef: + description: Reference to a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaServicesAccountNameSelector: + description: Selector for a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + noEncryptionEnabledProtocols: + description: A no_encryption_enabled_protocols block as defined + below. Changing this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this forces + a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this forces + a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + resourceGroupName: + description: The name of the Resource Group where the Streaming + Policy should exist. Changing this forces a new Streaming Policy + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + commonEncryptionCbcs: + description: A common_encryption_cbcs block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + clearKeyEncryption: + description: A clear_key_encryption block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + defaultContentKey: + description: A default_content_key block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + type: object + drmFairplay: + description: A drm_fairplay block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + allowPersistentLicense: + description: All license to be persistent or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + customLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. + Changing this forces a new Streaming Policy to be created. + type: string + type: object + enabledProtocols: + description: A enabled_protocols block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + commonEncryptionCenc: + description: A common_encryption_cenc block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + clearKeyEncryption: + description: A clear_key_encryption block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + clearTrack: + description: One or more clear_track blocks as defined below. + Changing this forces a new Streaming Policy to be created. + items: + properties: + condition: + description: One or more condition blocks as defined + below. Changing this forces a new Streaming Policy + to be created. + items: + properties: + operation: + description: The track property condition operation. + Possible value is Equal. Changing this forces + a new Streaming Policy to be created. + type: string + property: + description: The track property type. Possible + value is FourCC. Changing this forces a new + Streaming Policy to be created. + type: string + value: + description: The track property value. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + type: array + type: object + type: array + contentKeyToTrackMapping: + description: One or more content_key_to_track_mapping blocks + as defined below. Changing this forces a new Streaming Policy + to be created. + items: + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + track: + description: One or more track blocks as defined below. + Changing this forces a new Streaming Policy to be + created. + items: + properties: + condition: + description: One or more condition blocks as defined + below. Changing this forces a new Streaming + Policy to be created. + items: + properties: + operation: + description: The track property condition + operation. Possible value is Equal. Changing + this forces a new Streaming Policy to + be created. + type: string + property: + description: The track property type. Possible + value is FourCC. Changing this forces + a new Streaming Policy to be created. + type: string + value: + description: The track property value. Changing + this forces a new Streaming Policy to + be created. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + defaultContentKey: + description: A default_content_key block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + policyNameRef: + description: Reference to a ContentKeyPolicy in media + to populate policyName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + policyNameSelector: + description: Selector for a ContentKeyPolicy in media + to populate policyName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + drmPlayready: + description: A drm_playready block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + customAttributes: + description: Custom attributes for PlayReady. Changing + this forces a new Streaming Policy to be created. + type: string + customLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. + Changing this forces a new Streaming Policy to be created. + type: string + type: object + drmWidevineCustomLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. Changing + this forces a new Streaming Policy to be created. + type: string + enabledProtocols: + description: A enabled_protocols block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + defaultContentKeyPolicyName: + description: Default Content Key used by current Streaming Policy. + Changing this forces a new Streaming Policy to be created. + type: string + envelopeEncryption: + description: A envelope_encryption block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + defaultContentKey: + description: A default_content_key block as defined above. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + type: object + enabledProtocols: + description: A enabled_protocols block as defined above. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + noEncryptionEnabledProtocols: + description: A no_encryption_enabled_protocols block as defined + below. Changing this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this forces + a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this forces + a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: StreamingPolicyStatus defines the observed state of StreamingPolicy. + properties: + atProvider: + properties: + commonEncryptionCbcs: + description: A common_encryption_cbcs block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + clearKeyEncryption: + description: A clear_key_encryption block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + defaultContentKey: + description: A default_content_key block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + type: object + drmFairplay: + description: A drm_fairplay block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + allowPersistentLicense: + description: All license to be persistent or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + customLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. + Changing this forces a new Streaming Policy to be created. + type: string + type: object + enabledProtocols: + description: A enabled_protocols block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + commonEncryptionCenc: + description: A common_encryption_cenc block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + clearKeyEncryption: + description: A clear_key_encryption block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + clearTrack: + description: One or more clear_track blocks as defined below. + Changing this forces a new Streaming Policy to be created. + items: + properties: + condition: + description: One or more condition blocks as defined + below. Changing this forces a new Streaming Policy + to be created. + items: + properties: + operation: + description: The track property condition operation. + Possible value is Equal. Changing this forces + a new Streaming Policy to be created. + type: string + property: + description: The track property type. Possible + value is FourCC. Changing this forces a new + Streaming Policy to be created. + type: string + value: + description: The track property value. Changing + this forces a new Streaming Policy to be created. + type: string + type: object + type: array + type: object + type: array + contentKeyToTrackMapping: + description: One or more content_key_to_track_mapping blocks + as defined below. Changing this forces a new Streaming Policy + to be created. + items: + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + track: + description: One or more track blocks as defined below. + Changing this forces a new Streaming Policy to be + created. + items: + properties: + condition: + description: One or more condition blocks as defined + below. Changing this forces a new Streaming + Policy to be created. + items: + properties: + operation: + description: The track property condition + operation. Possible value is Equal. Changing + this forces a new Streaming Policy to + be created. + type: string + property: + description: The track property type. Possible + value is FourCC. Changing this forces + a new Streaming Policy to be created. + type: string + value: + description: The track property value. Changing + this forces a new Streaming Policy to + be created. + type: string + type: object + type: array + type: object + type: array + type: object + type: array + defaultContentKey: + description: A default_content_key block as defined below. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + type: object + drmPlayready: + description: A drm_playready block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + customAttributes: + description: Custom attributes for PlayReady. Changing + this forces a new Streaming Policy to be created. + type: string + customLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. + Changing this forces a new Streaming Policy to be created. + type: string + type: object + drmWidevineCustomLicenseAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers licenses to the end user. This is not required + when using Azure Media Services for issuing licenses. Changing + this forces a new Streaming Policy to be created. + type: string + enabledProtocols: + description: A enabled_protocols block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + defaultContentKeyPolicyName: + description: Default Content Key used by current Streaming Policy. + Changing this forces a new Streaming Policy to be created. + type: string + envelopeEncryption: + description: A envelope_encryption block as defined below. Changing + this forces a new Streaming Policy to be created. + properties: + customKeysAcquisitionUrlTemplate: + description: The URL template for the custom service that + delivers content keys to the end user. This is not required + when using Azure Media Services for issuing keys. Changing + this forces a new Streaming Policy to be created. + type: string + defaultContentKey: + description: A default_content_key block as defined above. + Changing this forces a new Streaming Policy to be created. + properties: + label: + description: Label can be used to specify Content Key + when creating a Streaming Locator. Changing this forces + a new Streaming Policy to be created. + type: string + policyName: + description: Policy used by Default Key. Changing this + forces a new Streaming Policy to be created. + type: string + type: object + enabledProtocols: + description: A enabled_protocols block as defined above. Changing + this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + type: object + id: + description: The ID of the Streaming Policy. + type: string + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Streaming Policy to be created. + type: string + noEncryptionEnabledProtocols: + description: A no_encryption_enabled_protocols block as defined + below. Changing this forces a new Streaming Policy to be created. + properties: + dash: + description: Enable DASH protocol or not. Changing this forces + a new Streaming Policy to be created. + type: boolean + download: + description: Enable Download protocol or not. Changing this + forces a new Streaming Policy to be created. + type: boolean + hls: + description: Enable HLS protocol or not. Changing this forces + a new Streaming Policy to be created. + type: boolean + smoothStreaming: + description: Enable SmoothStreaming protocol or not. Changing + this forces a new Streaming Policy to be created. + type: boolean + type: object + resourceGroupName: + description: The name of the Resource Group where the Streaming + Policy should exist. Changing this forces a new Streaming Policy + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/media.azure.upbound.io_transforms.yaml b/package/crds/media.azure.upbound.io_transforms.yaml index 4ab66e7a8..33d2593f3 100644 --- a/package/crds/media.azure.upbound.io_transforms.yaml +++ b/package/crds/media.azure.upbound.io_transforms.yaml @@ -4923,3 +4923,4455 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Transform is the Schema for the Transforms API. Manages a Transform. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TransformSpec defines the desired state of Transform + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: An optional verbose description of the Transform. + type: string + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Transform to be created. + type: string + mediaServicesAccountNameRef: + description: Reference to a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + mediaServicesAccountNameSelector: + description: Selector for a ServicesAccount in media to populate + mediaServicesAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + output: + description: One or more output blocks as defined below. At least + one output must be defined. + items: + properties: + audioAnalyzerPreset: + description: An audio_analyzer_preset block as defined above. + properties: + audioAnalysisMode: + description: Possible values are Basic or Standard. + Determines the set of audio analysis operations to + be performed. Default to Standard. + type: string + audioLanguage: + description: 'The language for the audio payload in + the input using the BCP-47 format of ''language tag-region'' + (e.g: ''en-US''). If you know the language of your + content, it is recommended that you specify it. The + language must be specified explicitly for AudioAnalysisMode:Basic, + since automatic language detection is not included + in basic mode. If the language isn''t specified, automatic + language detection will choose the first language + detected and process with the selected language for + the duration of the file. It does not currently support + dynamically switching between languages after the + first language is detected. The automatic detection + works best with audio recordings with clearly discernible + speech. If automatic detection fails to find the language, + transcription would fall back to en-US. The list of + supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.' + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + type: object + builtinPreset: + description: A builtin_preset block as defined above. + properties: + presetConfiguration: + description: A preset_configuration block as defined + below. + properties: + complexity: + description: The complexity of the encoding. Possible + values are Balanced, Speed or Quality. + type: string + interleaveOutput: + description: Specifies the interleave mode of the + output to control how audio are stored in the + container format. Possible values are InterleavedOutput + and NonInterleavedOutput. + type: string + keyFrameIntervalInSeconds: + description: The key frame interval in seconds. + Possible value is a positive float. For example, + set as 2.0 to reduce the playback buffering for + some players. + type: number + maxBitrateBps: + description: The maximum bitrate in bits per second + (threshold for the top video layer). For example, + set as 6000000 to avoid producing very high bitrate + outputs for contents with high complexity. + type: number + maxHeight: + description: The maximum height of output video + layers. For example, set as 720 to produce output + layers up to 720P even if the input is 4K. + type: number + maxLayers: + description: The maximum number of output video + layers. For example, set as 4 to make sure at + most 4 output layers are produced to control the + overall cost of the encoding job. + type: number + minBitrateBps: + description: The minimum bitrate in bits per second + (threshold for the bottom video layer). For example, + set as 200000 to have a bottom layer that covers + users with low network bandwidth. + type: number + minHeight: + description: The minimum height of output video + layers. For example, set as 360 to avoid output + layers of smaller resolutions like 180P. + type: number + type: object + presetName: + description: The built-in preset to be used for encoding + videos. The Possible values are AACGoodQualityAudio, + AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, + CopyAllBitrateNonInterleaved, DDGoodQualityAudio, + H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, + H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, + H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, + H264SingleBitrateSD and H264SingleBitrate720p. + type: string + type: object + customPreset: + description: A custom_preset block as defined above. + properties: + codec: + description: One or more codec blocks as defined above. + items: + properties: + aacAudio: + description: A aac_audio block as defined above. + properties: + bitrate: + description: The average bitrate in bits per + second at which to encode the input video + when generating this layer. + type: number + channels: + description: The number of audio channels. + Default to 2. + type: number + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + profile: + description: The H.264 profile. Possible values + are Auto, Baseline, High, High422, High444,or + Main. Default to Auto. + type: string + samplingRate: + description: The sampling rate to use for + encoding in Hertz. Default to 48000. + type: number + type: object + copyAudio: + description: A copy_audio block as defined below. + properties: + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + type: object + copyVideo: + description: A copy_video block as defined below. + properties: + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + type: object + ddAudio: + description: A dd_audio block as defined below. + properties: + bitrate: + description: The average bitrate in bits per + second at which to encode the input video + when generating this layer. + type: number + channels: + description: The number of audio channels. + Default to 2. + type: number + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + samplingRate: + description: The sampling rate to use for + encoding in Hertz. Default to 48000. + type: number + type: object + h264Video: + description: A h264_video block as defined below. + properties: + complexity: + description: The complexity of the encoding. + Possible values are Balanced, Speed or Quality. + type: string + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + adaptiveBFrameEnabled: + description: Whether adaptive B-frames + are used when encoding this layer. + If not specified, the encoder will + turn it on whenever the video profile + permits its use. Default to true. + type: boolean + bFrames: + description: The number of B-frames + to use when encoding this layer. If + not specified, the encoder chooses + an appropriate number based on the + video profile and level. + type: number + bitrate: + description: The average bitrate in + bits per second at which to encode + the input video when generating this + layer. + type: number + bufferWindow: + description: Specifies the maximum amount + of time that the encoder should buffer + frames before encoding. The value + should be in ISO 8601 format. The + value should be in the range 0.1 to + 100 seconds. Defaults to PT5S. + type: string + crf: + description: The value of CRF to be + used when encoding this layer. This + setting takes effect when rate_control_mode + is set CRF. The range of CRF value + is between 0 and 51, where lower values + would result in better quality, at + the expense of higher file sizes. + Higher values mean more compression, + but at some point quality degradation + will be noticed. Default to 23. + type: number + entropyMode: + description: The entropy mode to be + used for this layer. Possible values + are Cabac or Cavlc. If not specified, + the encoder chooses the mode that + is appropriate for the profile and + level. + type: string + frameRate: + description: The frame rate (in frames + per second) at which to encode this + layer. The value can be in the form + of M/N where M and N are integers + (For example, 30000/1001), or in the + form of a number (For example, 30, + or 29.97). The encoder enforces constraints + on allowed frame rates based on the + profile and level. If it is not specified, + the encoder will use the same frame + rate as the input video. + type: string + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + level: + description: The H.264 levels. Currently, + the resource support Level up to 6.2. + The value can be auto, or a number + that matches the H.264 profile. If + not specified, the default is auto, + which lets the encoder choose the + Level that is appropriate for this + layer. + type: string + maxBitrate: + description: The maximum bitrate (in + bits per second), at which the VBV + buffer should be assumed to refill. + If not specified, defaults to the + same value as bitrate. + type: number + profile: + description: The H.264 profile. Possible + values are Auto, Baseline, High, High422, + High444,or Main. Default to Auto. + type: string + referenceFrames: + description: The number of reference + frames to be used when encoding this + layer. If not specified, the encoder + determines an appropriate number based + on the encoder complexity setting. + type: number + slices: + description: The number of slices to + be used when encoding this layer. + If not specified, default is 1, which + means that encoder will use a single + slice for each frame. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + rateControlMode: + description: The rate control mode. Possible + values are ABR, CBR or CRF. Default to ABR. + type: string + sceneChangeDetectionEnabled: + description: Whether the encoder should insert + key frames at scene changes. This flag should + be set to true only when the encoder is + being configured to produce a single output + video. Default to false. + type: boolean + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + h265Video: + description: A h265_video block as defined below. + properties: + complexity: + description: The complexity of the encoding. + Possible values are Balanced, Speed or Quality. + type: string + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + adaptiveBFrameEnabled: + description: Whether adaptive B-frames + are used when encoding this layer. + If not specified, the encoder will + turn it on whenever the video profile + permits its use. Default to true. + type: boolean + bFrames: + description: The number of B-frames + to use when encoding this layer. If + not specified, the encoder chooses + an appropriate number based on the + video profile and level. + type: number + bitrate: + description: The average bitrate in + bits per second at which to encode + the input video when generating this + layer. + type: number + bufferWindow: + description: Specifies the maximum amount + of time that the encoder should buffer + frames before encoding. The value + should be in ISO 8601 format. The + value should be in the range 0.1 to + 100 seconds. Defaults to PT5S. + type: string + crf: + description: The value of CRF to be + used when encoding this layer. This + setting takes effect when rate_control_mode + is set CRF. The range of CRF value + is between 0 and 51, where lower values + would result in better quality, at + the expense of higher file sizes. + Higher values mean more compression, + but at some point quality degradation + will be noticed. Default to 23. + type: number + frameRate: + description: The frame rate (in frames + per second) at which to encode this + layer. The value can be in the form + of M/N where M and N are integers + (For example, 30000/1001), or in the + form of a number (For example, 30, + or 29.97). The encoder enforces constraints + on allowed frame rates based on the + profile and level. If it is not specified, + the encoder will use the same frame + rate as the input video. + type: string + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + level: + description: The H.264 levels. Currently, + the resource support Level up to 6.2. + The value can be auto, or a number + that matches the H.264 profile. If + not specified, the default is auto, + which lets the encoder choose the + Level that is appropriate for this + layer. + type: string + maxBitrate: + description: The maximum bitrate (in + bits per second), at which the VBV + buffer should be assumed to refill. + If not specified, defaults to the + same value as bitrate. + type: number + profile: + description: The H.264 profile. Possible + values are Auto, Baseline, High, High422, + High444,or Main. Default to Auto. + type: string + referenceFrames: + description: The number of reference + frames to be used when encoding this + layer. If not specified, the encoder + determines an appropriate number based + on the encoder complexity setting. + type: number + slices: + description: The number of slices to + be used when encoding this layer. + If not specified, default is 1, which + means that encoder will use a single + slice for each frame. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + sceneChangeDetectionEnabled: + description: Whether the encoder should insert + key frames at scene changes. This flag should + be set to true only when the encoder is + being configured to produce a single output + video. Default to false. + type: boolean + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + jpgImage: + description: A jpg_image block as defined below. + properties: + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + quality: + description: The compression quality + of the JPEG output. Range is from + 0 to 100 and the default is 70. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + range: + description: The position relative to transform + preset start time in the input video at + which to stop generating thumbnails. The + value can be in ISO 8601 format (For example, + PT5M30S to stop at 5 minutes and 30 seconds + from start time), or a frame count (For + example, 300 to stop at the 300th frame + from the frame at start time. If this value + is 1, it means only producing one thumbnail + at start time), or a relative value to the + stream duration (For example, 50% to stop + at half of stream duration from start time). + The default value is 100%, which means to + stop at the end of the stream. + type: string + spriteColumn: + description: 'Sets the number of columns used + in thumbnail sprite image. The number of + rows are automatically calculated and a + VTT file is generated with the coordinate + mappings for each thumbnail in the sprite. + Note: this value should be a positive integer + and a proper value is recommended so that + the output image resolution will not go + beyond JPEG maximum pixel resolution limit + 65535x65535.' + type: number + start: + description: The start position, with reference + to the input video, at which the overlay + starts. The value should be in ISO 8601 + format. For example, PT05S to start the + overlay at 5 seconds into the input video. + If not specified the overlay starts from + the beginning of the input video. + type: string + step: + description: 'The intervals at which thumbnails + are generated. The value can be in ISO 8601 + format (For example, PT05S for one image + every 5 seconds), or a frame count (For + example, 30 for one image every 30 frames), + or a relative value to stream duration (For + example, 10% for one image every 10% of + stream duration). Note: Step value will + affect the first generated thumbnail, which + may not be exactly the one specified at + transform preset start time. This is due + to the encoder, which tries to select the + best thumbnail between start time and Step + position from start time as the first output. + As the default value is 10%, it means if + stream has long duration, the first generated + thumbnail might be far away from the one + specified at start time. Try to select reasonable + value for Step if the first thumbnail is + expected close to start time, or set Range + value at 1 if only one thumbnail is needed + at start time.' + type: string + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + pngImage: + description: A png_image block as defined below. + properties: + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + range: + description: The position relative to transform + preset start time in the input video at + which to stop generating thumbnails. The + value can be in ISO 8601 format (For example, + PT5M30S to stop at 5 minutes and 30 seconds + from start time), or a frame count (For + example, 300 to stop at the 300th frame + from the frame at start time. If this value + is 1, it means only producing one thumbnail + at start time), or a relative value to the + stream duration (For example, 50% to stop + at half of stream duration from start time). + The default value is 100%, which means to + stop at the end of the stream. + type: string + start: + description: The start position, with reference + to the input video, at which the overlay + starts. The value should be in ISO 8601 + format. For example, PT05S to start the + overlay at 5 seconds into the input video. + If not specified the overlay starts from + the beginning of the input video. + type: string + step: + description: 'The intervals at which thumbnails + are generated. The value can be in ISO 8601 + format (For example, PT05S for one image + every 5 seconds), or a frame count (For + example, 30 for one image every 30 frames), + or a relative value to stream duration (For + example, 10% for one image every 10% of + stream duration). Note: Step value will + affect the first generated thumbnail, which + may not be exactly the one specified at + transform preset start time. This is due + to the encoder, which tries to select the + best thumbnail between start time and Step + position from start time as the first output. + As the default value is 10%, it means if + stream has long duration, the first generated + thumbnail might be far away from the one + specified at start time. Try to select reasonable + value for Step if the first thumbnail is + expected close to start time, or set Range + value at 1 if only one thumbnail is needed + at start time.' + type: string + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + type: object + type: array + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + filter: + description: A filter block as defined below. + properties: + cropRectangle: + description: A crop_rectangle block as defined above. + properties: + height: + description: The height of the rectangular region + in pixels. This can be absolute pixel value + (e.g 100), or relative to the size of the + video (For example, 50%). + type: string + left: + description: The number of pixels from the left-margin. + This can be absolute pixel value (e.g 100), + or relative to the size of the video (For + example, 50%). + type: string + top: + description: The number of pixels from the top-margin. + This can be absolute pixel value (e.g 100), + or relative to the size of the video (For + example, 50%). + type: string + width: + description: The width of the rectangular region + in pixels. This can be absolute pixel value + (e.g 100), or relative to the size of the + video (For example, 50%). + type: string + type: object + deinterlace: + description: A deinterlace block as defined below. + properties: + mode: + description: The deinterlacing mode. Possible + values are AutoPixelAdaptive or Off. Default + to AutoPixelAdaptive. + type: string + parity: + description: The field parity to use for deinterlacing. + Possible values are Auto, TopFieldFirst or + BottomFieldFirst. Default to Auto. + type: string + type: object + fadeIn: + description: A fade_in block as defined above. + properties: + duration: + description: The duration of the fade effect + in the video. The value can be in ISO 8601 + format (For example, PT05S to fade In/Out + a color during 5 seconds), or a frame count + (For example, 10 to fade 10 frames from the + start time), or a relative value to stream + duration (For example, 10% to fade 10% of + stream duration). + type: string + fadeColor: + description: 'The color for the fade in/out. + It can be on the CSS Level1 colors or an RGB/hex + value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.' + type: string + start: + description: The start position, with reference + to the input video, at which the overlay starts. + The value should be in ISO 8601 format. For + example, PT05S to start the overlay at 5 seconds + into the input video. If not specified the + overlay starts from the beginning of the input + video. + type: string + type: object + fadeOut: + description: A fade_out block as defined above. + properties: + duration: + description: The duration of the fade effect + in the video. The value can be in ISO 8601 + format (For example, PT05S to fade In/Out + a color during 5 seconds), or a frame count + (For example, 10 to fade 10 frames from the + start time), or a relative value to stream + duration (For example, 10% to fade 10% of + stream duration). + type: string + fadeColor: + description: 'The color for the fade in/out. + It can be on the CSS Level1 colors or an RGB/hex + value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.' + type: string + start: + description: The start position, with reference + to the input video, at which the overlay starts. + The value should be in ISO 8601 format. For + example, PT05S to start the overlay at 5 seconds + into the input video. If not specified the + overlay starts from the beginning of the input + video. + type: string + type: object + overlay: + description: One or more overlay blocks as defined + below. + items: + properties: + audio: + description: An audio block as defined above. + properties: + audioGainLevel: + description: The gain level of audio in + the overlay. The value should be in + range between 0 to 1.0. The default + is 1.0. + type: number + end: + description: The end position, with reference + to the input video, at which the overlay + ends. The value should be in ISO 8601 + format. For example, PT30S to end the + overlay at 30 seconds into the input + video. If not specified or the value + is greater than the input video duration, + the overlay will be applied until the + end of the input video if the overlay + media duration is greater than the input + video duration, else the overlay will + last as long as the overlay media duration. + type: string + fadeInDuration: + description: The duration over which the + overlay fades in onto the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade in (same + as PT0S). + type: string + fadeOutDuration: + description: The duration over which the + overlay fades out of the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade out (same + as PT0S). + type: string + inputLabel: + description: The label of the job input + which is to be used as an overlay. The + input must specify exact one file. You + can specify an image file in JPG, PNG, + GIF or BMP format, or an audio file + (such as a WAV, MP3, WMA or M4A file), + or a video file. + type: string + start: + description: The start position, with + reference to the input video, at which + the overlay starts. The value should + be in ISO 8601 format. For example, + PT05S to start the overlay at 5 seconds + into the input video. If not specified + the overlay starts from the beginning + of the input video. + type: string + type: object + video: + description: A video block as defined below. + properties: + audioGainLevel: + description: The gain level of audio in + the overlay. The value should be in + range between 0 to 1.0. The default + is 1.0. + type: number + cropRectangle: + description: A crop_rectangle block as + defined above. + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + left: + description: The number of pixels + from the left-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + top: + description: The number of pixels + from the top-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + end: + description: The end position, with reference + to the input video, at which the overlay + ends. The value should be in ISO 8601 + format. For example, PT30S to end the + overlay at 30 seconds into the input + video. If not specified or the value + is greater than the input video duration, + the overlay will be applied until the + end of the input video if the overlay + media duration is greater than the input + video duration, else the overlay will + last as long as the overlay media duration. + type: string + fadeInDuration: + description: The duration over which the + overlay fades in onto the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade in (same + as PT0S). + type: string + fadeOutDuration: + description: The duration over which the + overlay fades out of the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade out (same + as PT0S). + type: string + inputLabel: + description: The label of the job input + which is to be used as an overlay. The + input must specify exact one file. You + can specify an image file in JPG, PNG, + GIF or BMP format, or an audio file + (such as a WAV, MP3, WMA or M4A file), + or a video file. + type: string + opacity: + description: The opacity of the overlay. + The value should be in the range between + 0 to 1.0. Default to 1.0, which means + the overlay is opaque. + type: number + position: + description: A position block as defined + above. + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + left: + description: The number of pixels + from the left-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + top: + description: The number of pixels + from the top-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + start: + description: The start position, with + reference to the input video, at which + the overlay starts. The value should + be in ISO 8601 format. For example, + PT05S to start the overlay at 5 seconds + into the input video. If not specified + the overlay starts from the beginning + of the input video. + type: string + type: object + type: object + type: array + rotation: + description: The rotation to be applied to the input + video before it is encoded. Possible values are + Auto, None, Rotate90, Rotate180, Rotate270,or + Rotate0. Default to Auto. + type: string + type: object + format: + description: One or more format blocks as defined below. + items: + properties: + jpg: + description: A jpg block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + type: object + mp4: + description: A mp4 block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + outputFile: + description: One or more output_file blocks + as defined above. + items: + properties: + labels: + description: The list of labels that + describe how the encoder should multiplex + video and audio into an output file. + For example, if the encoder is producing + two video layers with labels v1 and + v2, and one audio layer with label + a1, then an array like ["v1", "a1"] + tells the encoder to produce an output + file with the video track represented + by v1 and the audio track represented + by a1. + items: + type: string + type: array + type: object + type: array + type: object + png: + description: A png block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + type: object + transportStream: + description: A transport_stream block as defined + below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + outputFile: + description: One or more output_file blocks + as defined above. + items: + properties: + labels: + description: The list of labels that + describe how the encoder should multiplex + video and audio into an output file. + For example, if the encoder is producing + two video layers with labels v1 and + v2, and one audio layer with label + a1, then an array like ["v1", "a1"] + tells the encoder to produce an output + file with the video track represented + by v1 and the audio track represented + by a1. + items: + type: string + type: array + type: object + type: array + type: object + type: object + type: array + type: object + faceDetectorPreset: + description: A face_detector_preset block as defined above. + properties: + analysisResolution: + description: Possible values are SourceResolution or + StandardDefinition. Specifies the maximum resolution + at which your video is analyzed. which will keep the + input video at its original resolution when analyzed. + Using StandardDefinition will resize input videos + to standard definition while preserving the appropriate + aspect ratio. It will only resize if the video is + of higher resolution. For example, a 1920x1080 input + would be scaled to 640x360 before processing. Switching + to StandardDefinition will reduce the time it takes + to process high resolution video. It may also reduce + the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics + for details). However, faces that end up being too + small in the resized video may not be detected. Default + to SourceResolution. + type: string + blurType: + description: Specifies the type of blur to apply to + faces in the output video. Possible values are Black, + Box, High, Low,and Med. + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + faceRedactorMode: + description: 'This mode provides the ability to choose + between the following settings: 1) Analyze - For detection + only. This mode generates a metadata JSON file marking + appearances of faces throughout the video. Where possible, + appearances of the same person are assigned the same + ID. 2) Combined - Additionally redacts(blurs) detected + faces. 3) Redact - This enables a 2-pass process, + allowing for selective redaction of a subset of detected + faces. It takes in the metadata file from a prior + analyze pass, along with the source video, and a user-selected + subset of IDs that require redaction. Default to Analyze.' + type: string + type: object + onErrorAction: + description: A Transform can define more than one outputs. + This property defines what the service should do when + one output fails - either continue to produce other outputs, + or, stop the other outputs. The overall Job state will + not reflect failures of outputs that are specified with + ContinueJob. Possible values are StopProcessingJob or + ContinueJob. Defaults to StopProcessingJob. + type: string + relativePriority: + description: Sets the relative priority of the TransformOutputs + within a Transform. This sets the priority that the service + uses for processing Transform Outputs. Possible values + are High, Normal or Low. Defaults to Normal. + type: string + videoAnalyzerPreset: + description: A video_analyzer_preset block as defined below. + properties: + audioAnalysisMode: + description: Possible values are Basic or Standard. + Determines the set of audio analysis operations to + be performed. Default to Standard. + type: string + audioLanguage: + description: 'The language for the audio payload in + the input using the BCP-47 format of ''language tag-region'' + (e.g: ''en-US''). If you know the language of your + content, it is recommended that you specify it. The + language must be specified explicitly for AudioAnalysisMode:Basic, + since automatic language detection is not included + in basic mode. If the language isn''t specified, automatic + language detection will choose the first language + detected and process with the selected language for + the duration of the file. It does not currently support + dynamically switching between languages after the + first language is detected. The automatic detection + works best with audio recordings with clearly discernible + speech. If automatic detection fails to find the language, + transcription would fall back to en-US. The list of + supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.' + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + insightsType: + description: Defines the type of insights that you want + the service to generate. The allowed values are AudioInsightsOnly, + VideoInsightsOnly, and AllInsights. If you set this + to AllInsights and the input is audio only, then only + audio insights are generated. Similarly, if the input + is video only, then only video insights are generated. + It is recommended that you not use AudioInsightsOnly + if you expect some of your inputs to be video only; + or use VideoInsightsOnly if you expect some of your + inputs to be audio only. Your Jobs in such conditions + would error out. Default to AllInsights. + type: string + type: object + type: object + type: array + resourceGroupName: + description: The name of the Resource Group where the Transform + should exist. Changing this forces a new Transform to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: An optional verbose description of the Transform. + type: string + output: + description: One or more output blocks as defined below. At least + one output must be defined. + items: + properties: + audioAnalyzerPreset: + description: An audio_analyzer_preset block as defined above. + properties: + audioAnalysisMode: + description: Possible values are Basic or Standard. + Determines the set of audio analysis operations to + be performed. Default to Standard. + type: string + audioLanguage: + description: 'The language for the audio payload in + the input using the BCP-47 format of ''language tag-region'' + (e.g: ''en-US''). If you know the language of your + content, it is recommended that you specify it. The + language must be specified explicitly for AudioAnalysisMode:Basic, + since automatic language detection is not included + in basic mode. If the language isn''t specified, automatic + language detection will choose the first language + detected and process with the selected language for + the duration of the file. It does not currently support + dynamically switching between languages after the + first language is detected. The automatic detection + works best with audio recordings with clearly discernible + speech. If automatic detection fails to find the language, + transcription would fall back to en-US. The list of + supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.' + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + type: object + builtinPreset: + description: A builtin_preset block as defined above. + properties: + presetConfiguration: + description: A preset_configuration block as defined + below. + properties: + complexity: + description: The complexity of the encoding. Possible + values are Balanced, Speed or Quality. + type: string + interleaveOutput: + description: Specifies the interleave mode of the + output to control how audio are stored in the + container format. Possible values are InterleavedOutput + and NonInterleavedOutput. + type: string + keyFrameIntervalInSeconds: + description: The key frame interval in seconds. + Possible value is a positive float. For example, + set as 2.0 to reduce the playback buffering for + some players. + type: number + maxBitrateBps: + description: The maximum bitrate in bits per second + (threshold for the top video layer). For example, + set as 6000000 to avoid producing very high bitrate + outputs for contents with high complexity. + type: number + maxHeight: + description: The maximum height of output video + layers. For example, set as 720 to produce output + layers up to 720P even if the input is 4K. + type: number + maxLayers: + description: The maximum number of output video + layers. For example, set as 4 to make sure at + most 4 output layers are produced to control the + overall cost of the encoding job. + type: number + minBitrateBps: + description: The minimum bitrate in bits per second + (threshold for the bottom video layer). For example, + set as 200000 to have a bottom layer that covers + users with low network bandwidth. + type: number + minHeight: + description: The minimum height of output video + layers. For example, set as 360 to avoid output + layers of smaller resolutions like 180P. + type: number + type: object + presetName: + description: The built-in preset to be used for encoding + videos. The Possible values are AACGoodQualityAudio, + AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, + CopyAllBitrateNonInterleaved, DDGoodQualityAudio, + H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, + H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, + H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, + H264SingleBitrateSD and H264SingleBitrate720p. + type: string + type: object + customPreset: + description: A custom_preset block as defined above. + properties: + codec: + description: One or more codec blocks as defined above. + items: + properties: + aacAudio: + description: A aac_audio block as defined above. + properties: + bitrate: + description: The average bitrate in bits per + second at which to encode the input video + when generating this layer. + type: number + channels: + description: The number of audio channels. + Default to 2. + type: number + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + profile: + description: The H.264 profile. Possible values + are Auto, Baseline, High, High422, High444,or + Main. Default to Auto. + type: string + samplingRate: + description: The sampling rate to use for + encoding in Hertz. Default to 48000. + type: number + type: object + copyAudio: + description: A copy_audio block as defined below. + properties: + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + type: object + copyVideo: + description: A copy_video block as defined below. + properties: + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + type: object + ddAudio: + description: A dd_audio block as defined below. + properties: + bitrate: + description: The average bitrate in bits per + second at which to encode the input video + when generating this layer. + type: number + channels: + description: The number of audio channels. + Default to 2. + type: number + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + samplingRate: + description: The sampling rate to use for + encoding in Hertz. Default to 48000. + type: number + type: object + h264Video: + description: A h264_video block as defined below. + properties: + complexity: + description: The complexity of the encoding. + Possible values are Balanced, Speed or Quality. + type: string + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + adaptiveBFrameEnabled: + description: Whether adaptive B-frames + are used when encoding this layer. + If not specified, the encoder will + turn it on whenever the video profile + permits its use. Default to true. + type: boolean + bFrames: + description: The number of B-frames + to use when encoding this layer. If + not specified, the encoder chooses + an appropriate number based on the + video profile and level. + type: number + bitrate: + description: The average bitrate in + bits per second at which to encode + the input video when generating this + layer. + type: number + bufferWindow: + description: Specifies the maximum amount + of time that the encoder should buffer + frames before encoding. The value + should be in ISO 8601 format. The + value should be in the range 0.1 to + 100 seconds. Defaults to PT5S. + type: string + crf: + description: The value of CRF to be + used when encoding this layer. This + setting takes effect when rate_control_mode + is set CRF. The range of CRF value + is between 0 and 51, where lower values + would result in better quality, at + the expense of higher file sizes. + Higher values mean more compression, + but at some point quality degradation + will be noticed. Default to 23. + type: number + entropyMode: + description: The entropy mode to be + used for this layer. Possible values + are Cabac or Cavlc. If not specified, + the encoder chooses the mode that + is appropriate for the profile and + level. + type: string + frameRate: + description: The frame rate (in frames + per second) at which to encode this + layer. The value can be in the form + of M/N where M and N are integers + (For example, 30000/1001), or in the + form of a number (For example, 30, + or 29.97). The encoder enforces constraints + on allowed frame rates based on the + profile and level. If it is not specified, + the encoder will use the same frame + rate as the input video. + type: string + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + level: + description: The H.264 levels. Currently, + the resource support Level up to 6.2. + The value can be auto, or a number + that matches the H.264 profile. If + not specified, the default is auto, + which lets the encoder choose the + Level that is appropriate for this + layer. + type: string + maxBitrate: + description: The maximum bitrate (in + bits per second), at which the VBV + buffer should be assumed to refill. + If not specified, defaults to the + same value as bitrate. + type: number + profile: + description: The H.264 profile. Possible + values are Auto, Baseline, High, High422, + High444,or Main. Default to Auto. + type: string + referenceFrames: + description: The number of reference + frames to be used when encoding this + layer. If not specified, the encoder + determines an appropriate number based + on the encoder complexity setting. + type: number + slices: + description: The number of slices to + be used when encoding this layer. + If not specified, default is 1, which + means that encoder will use a single + slice for each frame. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + rateControlMode: + description: The rate control mode. Possible + values are ABR, CBR or CRF. Default to ABR. + type: string + sceneChangeDetectionEnabled: + description: Whether the encoder should insert + key frames at scene changes. This flag should + be set to true only when the encoder is + being configured to produce a single output + video. Default to false. + type: boolean + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + h265Video: + description: A h265_video block as defined below. + properties: + complexity: + description: The complexity of the encoding. + Possible values are Balanced, Speed or Quality. + type: string + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + adaptiveBFrameEnabled: + description: Whether adaptive B-frames + are used when encoding this layer. + If not specified, the encoder will + turn it on whenever the video profile + permits its use. Default to true. + type: boolean + bFrames: + description: The number of B-frames + to use when encoding this layer. If + not specified, the encoder chooses + an appropriate number based on the + video profile and level. + type: number + bitrate: + description: The average bitrate in + bits per second at which to encode + the input video when generating this + layer. + type: number + bufferWindow: + description: Specifies the maximum amount + of time that the encoder should buffer + frames before encoding. The value + should be in ISO 8601 format. The + value should be in the range 0.1 to + 100 seconds. Defaults to PT5S. + type: string + crf: + description: The value of CRF to be + used when encoding this layer. This + setting takes effect when rate_control_mode + is set CRF. The range of CRF value + is between 0 and 51, where lower values + would result in better quality, at + the expense of higher file sizes. + Higher values mean more compression, + but at some point quality degradation + will be noticed. Default to 23. + type: number + frameRate: + description: The frame rate (in frames + per second) at which to encode this + layer. The value can be in the form + of M/N where M and N are integers + (For example, 30000/1001), or in the + form of a number (For example, 30, + or 29.97). The encoder enforces constraints + on allowed frame rates based on the + profile and level. If it is not specified, + the encoder will use the same frame + rate as the input video. + type: string + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + level: + description: The H.264 levels. Currently, + the resource support Level up to 6.2. + The value can be auto, or a number + that matches the H.264 profile. If + not specified, the default is auto, + which lets the encoder choose the + Level that is appropriate for this + layer. + type: string + maxBitrate: + description: The maximum bitrate (in + bits per second), at which the VBV + buffer should be assumed to refill. + If not specified, defaults to the + same value as bitrate. + type: number + profile: + description: The H.264 profile. Possible + values are Auto, Baseline, High, High422, + High444,or Main. Default to Auto. + type: string + referenceFrames: + description: The number of reference + frames to be used when encoding this + layer. If not specified, the encoder + determines an appropriate number based + on the encoder complexity setting. + type: number + slices: + description: The number of slices to + be used when encoding this layer. + If not specified, default is 1, which + means that encoder will use a single + slice for each frame. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + sceneChangeDetectionEnabled: + description: Whether the encoder should insert + key frames at scene changes. This flag should + be set to true only when the encoder is + being configured to produce a single output + video. Default to false. + type: boolean + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + jpgImage: + description: A jpg_image block as defined below. + properties: + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + quality: + description: The compression quality + of the JPEG output. Range is from + 0 to 100 and the default is 70. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + range: + description: The position relative to transform + preset start time in the input video at + which to stop generating thumbnails. The + value can be in ISO 8601 format (For example, + PT5M30S to stop at 5 minutes and 30 seconds + from start time), or a frame count (For + example, 300 to stop at the 300th frame + from the frame at start time. If this value + is 1, it means only producing one thumbnail + at start time), or a relative value to the + stream duration (For example, 50% to stop + at half of stream duration from start time). + The default value is 100%, which means to + stop at the end of the stream. + type: string + spriteColumn: + description: 'Sets the number of columns used + in thumbnail sprite image. The number of + rows are automatically calculated and a + VTT file is generated with the coordinate + mappings for each thumbnail in the sprite. + Note: this value should be a positive integer + and a proper value is recommended so that + the output image resolution will not go + beyond JPEG maximum pixel resolution limit + 65535x65535.' + type: number + start: + description: The start position, with reference + to the input video, at which the overlay + starts. The value should be in ISO 8601 + format. For example, PT05S to start the + overlay at 5 seconds into the input video. + If not specified the overlay starts from + the beginning of the input video. + type: string + step: + description: 'The intervals at which thumbnails + are generated. The value can be in ISO 8601 + format (For example, PT05S for one image + every 5 seconds), or a frame count (For + example, 30 for one image every 30 frames), + or a relative value to stream duration (For + example, 10% for one image every 10% of + stream duration). Note: Step value will + affect the first generated thumbnail, which + may not be exactly the one specified at + transform preset start time. This is due + to the encoder, which tries to select the + best thumbnail between start time and Step + position from start time as the first output. + As the default value is 10%, it means if + stream has long duration, the first generated + thumbnail might be far away from the one + specified at start time. Try to select reasonable + value for Step if the first thumbnail is + expected close to start time, or set Range + value at 1 if only one thumbnail is needed + at start time.' + type: string + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + pngImage: + description: A png_image block as defined below. + properties: + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + range: + description: The position relative to transform + preset start time in the input video at + which to stop generating thumbnails. The + value can be in ISO 8601 format (For example, + PT5M30S to stop at 5 minutes and 30 seconds + from start time), or a frame count (For + example, 300 to stop at the 300th frame + from the frame at start time. If this value + is 1, it means only producing one thumbnail + at start time), or a relative value to the + stream duration (For example, 50% to stop + at half of stream duration from start time). + The default value is 100%, which means to + stop at the end of the stream. + type: string + start: + description: The start position, with reference + to the input video, at which the overlay + starts. The value should be in ISO 8601 + format. For example, PT05S to start the + overlay at 5 seconds into the input video. + If not specified the overlay starts from + the beginning of the input video. + type: string + step: + description: 'The intervals at which thumbnails + are generated. The value can be in ISO 8601 + format (For example, PT05S for one image + every 5 seconds), or a frame count (For + example, 30 for one image every 30 frames), + or a relative value to stream duration (For + example, 10% for one image every 10% of + stream duration). Note: Step value will + affect the first generated thumbnail, which + may not be exactly the one specified at + transform preset start time. This is due + to the encoder, which tries to select the + best thumbnail between start time and Step + position from start time as the first output. + As the default value is 10%, it means if + stream has long duration, the first generated + thumbnail might be far away from the one + specified at start time. Try to select reasonable + value for Step if the first thumbnail is + expected close to start time, or set Range + value at 1 if only one thumbnail is needed + at start time.' + type: string + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + type: object + type: array + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + filter: + description: A filter block as defined below. + properties: + cropRectangle: + description: A crop_rectangle block as defined above. + properties: + height: + description: The height of the rectangular region + in pixels. This can be absolute pixel value + (e.g 100), or relative to the size of the + video (For example, 50%). + type: string + left: + description: The number of pixels from the left-margin. + This can be absolute pixel value (e.g 100), + or relative to the size of the video (For + example, 50%). + type: string + top: + description: The number of pixels from the top-margin. + This can be absolute pixel value (e.g 100), + or relative to the size of the video (For + example, 50%). + type: string + width: + description: The width of the rectangular region + in pixels. This can be absolute pixel value + (e.g 100), or relative to the size of the + video (For example, 50%). + type: string + type: object + deinterlace: + description: A deinterlace block as defined below. + properties: + mode: + description: The deinterlacing mode. Possible + values are AutoPixelAdaptive or Off. Default + to AutoPixelAdaptive. + type: string + parity: + description: The field parity to use for deinterlacing. + Possible values are Auto, TopFieldFirst or + BottomFieldFirst. Default to Auto. + type: string + type: object + fadeIn: + description: A fade_in block as defined above. + properties: + duration: + description: The duration of the fade effect + in the video. The value can be in ISO 8601 + format (For example, PT05S to fade In/Out + a color during 5 seconds), or a frame count + (For example, 10 to fade 10 frames from the + start time), or a relative value to stream + duration (For example, 10% to fade 10% of + stream duration). + type: string + fadeColor: + description: 'The color for the fade in/out. + It can be on the CSS Level1 colors or an RGB/hex + value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.' + type: string + start: + description: The start position, with reference + to the input video, at which the overlay starts. + The value should be in ISO 8601 format. For + example, PT05S to start the overlay at 5 seconds + into the input video. If not specified the + overlay starts from the beginning of the input + video. + type: string + type: object + fadeOut: + description: A fade_out block as defined above. + properties: + duration: + description: The duration of the fade effect + in the video. The value can be in ISO 8601 + format (For example, PT05S to fade In/Out + a color during 5 seconds), or a frame count + (For example, 10 to fade 10 frames from the + start time), or a relative value to stream + duration (For example, 10% to fade 10% of + stream duration). + type: string + fadeColor: + description: 'The color for the fade in/out. + It can be on the CSS Level1 colors or an RGB/hex + value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.' + type: string + start: + description: The start position, with reference + to the input video, at which the overlay starts. + The value should be in ISO 8601 format. For + example, PT05S to start the overlay at 5 seconds + into the input video. If not specified the + overlay starts from the beginning of the input + video. + type: string + type: object + overlay: + description: One or more overlay blocks as defined + below. + items: + properties: + audio: + description: An audio block as defined above. + properties: + audioGainLevel: + description: The gain level of audio in + the overlay. The value should be in + range between 0 to 1.0. The default + is 1.0. + type: number + end: + description: The end position, with reference + to the input video, at which the overlay + ends. The value should be in ISO 8601 + format. For example, PT30S to end the + overlay at 30 seconds into the input + video. If not specified or the value + is greater than the input video duration, + the overlay will be applied until the + end of the input video if the overlay + media duration is greater than the input + video duration, else the overlay will + last as long as the overlay media duration. + type: string + fadeInDuration: + description: The duration over which the + overlay fades in onto the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade in (same + as PT0S). + type: string + fadeOutDuration: + description: The duration over which the + overlay fades out of the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade out (same + as PT0S). + type: string + inputLabel: + description: The label of the job input + which is to be used as an overlay. The + input must specify exact one file. You + can specify an image file in JPG, PNG, + GIF or BMP format, or an audio file + (such as a WAV, MP3, WMA or M4A file), + or a video file. + type: string + start: + description: The start position, with + reference to the input video, at which + the overlay starts. The value should + be in ISO 8601 format. For example, + PT05S to start the overlay at 5 seconds + into the input video. If not specified + the overlay starts from the beginning + of the input video. + type: string + type: object + video: + description: A video block as defined below. + properties: + audioGainLevel: + description: The gain level of audio in + the overlay. The value should be in + range between 0 to 1.0. The default + is 1.0. + type: number + cropRectangle: + description: A crop_rectangle block as + defined above. + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + left: + description: The number of pixels + from the left-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + top: + description: The number of pixels + from the top-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + end: + description: The end position, with reference + to the input video, at which the overlay + ends. The value should be in ISO 8601 + format. For example, PT30S to end the + overlay at 30 seconds into the input + video. If not specified or the value + is greater than the input video duration, + the overlay will be applied until the + end of the input video if the overlay + media duration is greater than the input + video duration, else the overlay will + last as long as the overlay media duration. + type: string + fadeInDuration: + description: The duration over which the + overlay fades in onto the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade in (same + as PT0S). + type: string + fadeOutDuration: + description: The duration over which the + overlay fades out of the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade out (same + as PT0S). + type: string + inputLabel: + description: The label of the job input + which is to be used as an overlay. The + input must specify exact one file. You + can specify an image file in JPG, PNG, + GIF or BMP format, or an audio file + (such as a WAV, MP3, WMA or M4A file), + or a video file. + type: string + opacity: + description: The opacity of the overlay. + The value should be in the range between + 0 to 1.0. Default to 1.0, which means + the overlay is opaque. + type: number + position: + description: A position block as defined + above. + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + left: + description: The number of pixels + from the left-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + top: + description: The number of pixels + from the top-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + start: + description: The start position, with + reference to the input video, at which + the overlay starts. The value should + be in ISO 8601 format. For example, + PT05S to start the overlay at 5 seconds + into the input video. If not specified + the overlay starts from the beginning + of the input video. + type: string + type: object + type: object + type: array + rotation: + description: The rotation to be applied to the input + video before it is encoded. Possible values are + Auto, None, Rotate90, Rotate180, Rotate270,or + Rotate0. Default to Auto. + type: string + type: object + format: + description: One or more format blocks as defined below. + items: + properties: + jpg: + description: A jpg block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + type: object + mp4: + description: A mp4 block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + outputFile: + description: One or more output_file blocks + as defined above. + items: + properties: + labels: + description: The list of labels that + describe how the encoder should multiplex + video and audio into an output file. + For example, if the encoder is producing + two video layers with labels v1 and + v2, and one audio layer with label + a1, then an array like ["v1", "a1"] + tells the encoder to produce an output + file with the video track represented + by v1 and the audio track represented + by a1. + items: + type: string + type: array + type: object + type: array + type: object + png: + description: A png block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + type: object + transportStream: + description: A transport_stream block as defined + below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + outputFile: + description: One or more output_file blocks + as defined above. + items: + properties: + labels: + description: The list of labels that + describe how the encoder should multiplex + video and audio into an output file. + For example, if the encoder is producing + two video layers with labels v1 and + v2, and one audio layer with label + a1, then an array like ["v1", "a1"] + tells the encoder to produce an output + file with the video track represented + by v1 and the audio track represented + by a1. + items: + type: string + type: array + type: object + type: array + type: object + type: object + type: array + type: object + faceDetectorPreset: + description: A face_detector_preset block as defined above. + properties: + analysisResolution: + description: Possible values are SourceResolution or + StandardDefinition. Specifies the maximum resolution + at which your video is analyzed. which will keep the + input video at its original resolution when analyzed. + Using StandardDefinition will resize input videos + to standard definition while preserving the appropriate + aspect ratio. It will only resize if the video is + of higher resolution. For example, a 1920x1080 input + would be scaled to 640x360 before processing. Switching + to StandardDefinition will reduce the time it takes + to process high resolution video. It may also reduce + the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics + for details). However, faces that end up being too + small in the resized video may not be detected. Default + to SourceResolution. + type: string + blurType: + description: Specifies the type of blur to apply to + faces in the output video. Possible values are Black, + Box, High, Low,and Med. + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + faceRedactorMode: + description: 'This mode provides the ability to choose + between the following settings: 1) Analyze - For detection + only. This mode generates a metadata JSON file marking + appearances of faces throughout the video. Where possible, + appearances of the same person are assigned the same + ID. 2) Combined - Additionally redacts(blurs) detected + faces. 3) Redact - This enables a 2-pass process, + allowing for selective redaction of a subset of detected + faces. It takes in the metadata file from a prior + analyze pass, along with the source video, and a user-selected + subset of IDs that require redaction. Default to Analyze.' + type: string + type: object + onErrorAction: + description: A Transform can define more than one outputs. + This property defines what the service should do when + one output fails - either continue to produce other outputs, + or, stop the other outputs. The overall Job state will + not reflect failures of outputs that are specified with + ContinueJob. Possible values are StopProcessingJob or + ContinueJob. Defaults to StopProcessingJob. + type: string + relativePriority: + description: Sets the relative priority of the TransformOutputs + within a Transform. This sets the priority that the service + uses for processing Transform Outputs. Possible values + are High, Normal or Low. Defaults to Normal. + type: string + videoAnalyzerPreset: + description: A video_analyzer_preset block as defined below. + properties: + audioAnalysisMode: + description: Possible values are Basic or Standard. + Determines the set of audio analysis operations to + be performed. Default to Standard. + type: string + audioLanguage: + description: 'The language for the audio payload in + the input using the BCP-47 format of ''language tag-region'' + (e.g: ''en-US''). If you know the language of your + content, it is recommended that you specify it. The + language must be specified explicitly for AudioAnalysisMode:Basic, + since automatic language detection is not included + in basic mode. If the language isn''t specified, automatic + language detection will choose the first language + detected and process with the selected language for + the duration of the file. It does not currently support + dynamically switching between languages after the + first language is detected. The automatic detection + works best with audio recordings with clearly discernible + speech. If automatic detection fails to find the language, + transcription would fall back to en-US. The list of + supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.' + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + insightsType: + description: Defines the type of insights that you want + the service to generate. The allowed values are AudioInsightsOnly, + VideoInsightsOnly, and AllInsights. If you set this + to AllInsights and the input is audio only, then only + audio insights are generated. Similarly, if the input + is video only, then only video insights are generated. + It is recommended that you not use AudioInsightsOnly + if you expect some of your inputs to be video only; + or use VideoInsightsOnly if you expect some of your + inputs to be audio only. Your Jobs in such conditions + would error out. Default to AllInsights. + type: string + type: object + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: TransformStatus defines the observed state of Transform. + properties: + atProvider: + properties: + description: + description: An optional verbose description of the Transform. + type: string + id: + description: The ID of the Transform. + type: string + mediaServicesAccountName: + description: The Media Services account name. Changing this forces + a new Transform to be created. + type: string + output: + description: One or more output blocks as defined below. At least + one output must be defined. + items: + properties: + audioAnalyzerPreset: + description: An audio_analyzer_preset block as defined above. + properties: + audioAnalysisMode: + description: Possible values are Basic or Standard. + Determines the set of audio analysis operations to + be performed. Default to Standard. + type: string + audioLanguage: + description: 'The language for the audio payload in + the input using the BCP-47 format of ''language tag-region'' + (e.g: ''en-US''). If you know the language of your + content, it is recommended that you specify it. The + language must be specified explicitly for AudioAnalysisMode:Basic, + since automatic language detection is not included + in basic mode. If the language isn''t specified, automatic + language detection will choose the first language + detected and process with the selected language for + the duration of the file. It does not currently support + dynamically switching between languages after the + first language is detected. The automatic detection + works best with audio recordings with clearly discernible + speech. If automatic detection fails to find the language, + transcription would fall back to en-US. The list of + supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.' + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + type: object + builtinPreset: + description: A builtin_preset block as defined above. + properties: + presetConfiguration: + description: A preset_configuration block as defined + below. + properties: + complexity: + description: The complexity of the encoding. Possible + values are Balanced, Speed or Quality. + type: string + interleaveOutput: + description: Specifies the interleave mode of the + output to control how audio are stored in the + container format. Possible values are InterleavedOutput + and NonInterleavedOutput. + type: string + keyFrameIntervalInSeconds: + description: The key frame interval in seconds. + Possible value is a positive float. For example, + set as 2.0 to reduce the playback buffering for + some players. + type: number + maxBitrateBps: + description: The maximum bitrate in bits per second + (threshold for the top video layer). For example, + set as 6000000 to avoid producing very high bitrate + outputs for contents with high complexity. + type: number + maxHeight: + description: The maximum height of output video + layers. For example, set as 720 to produce output + layers up to 720P even if the input is 4K. + type: number + maxLayers: + description: The maximum number of output video + layers. For example, set as 4 to make sure at + most 4 output layers are produced to control the + overall cost of the encoding job. + type: number + minBitrateBps: + description: The minimum bitrate in bits per second + (threshold for the bottom video layer). For example, + set as 200000 to have a bottom layer that covers + users with low network bandwidth. + type: number + minHeight: + description: The minimum height of output video + layers. For example, set as 360 to avoid output + layers of smaller resolutions like 180P. + type: number + type: object + presetName: + description: The built-in preset to be used for encoding + videos. The Possible values are AACGoodQualityAudio, + AdaptiveStreaming, ContentAwareEncoding, ContentAwareEncodingExperimental, + CopyAllBitrateNonInterleaved, DDGoodQualityAudio, + H265AdaptiveStreaming, H265ContentAwareEncoding, H265SingleBitrate4K, + H265SingleBitrate1080p, H265SingleBitrate720p, H264MultipleBitrate1080p, + H264MultipleBitrateSD, H264MultipleBitrate720p, H264SingleBitrate1080p, + H264SingleBitrateSD and H264SingleBitrate720p. + type: string + type: object + customPreset: + description: A custom_preset block as defined above. + properties: + codec: + description: One or more codec blocks as defined above. + items: + properties: + aacAudio: + description: A aac_audio block as defined above. + properties: + bitrate: + description: The average bitrate in bits per + second at which to encode the input video + when generating this layer. + type: number + channels: + description: The number of audio channels. + Default to 2. + type: number + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + profile: + description: The H.264 profile. Possible values + are Auto, Baseline, High, High422, High444,or + Main. Default to Auto. + type: string + samplingRate: + description: The sampling rate to use for + encoding in Hertz. Default to 48000. + type: number + type: object + copyAudio: + description: A copy_audio block as defined below. + properties: + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + type: object + copyVideo: + description: A copy_video block as defined below. + properties: + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + type: object + ddAudio: + description: A dd_audio block as defined below. + properties: + bitrate: + description: The average bitrate in bits per + second at which to encode the input video + when generating this layer. + type: number + channels: + description: The number of audio channels. + Default to 2. + type: number + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + samplingRate: + description: The sampling rate to use for + encoding in Hertz. Default to 48000. + type: number + type: object + h264Video: + description: A h264_video block as defined below. + properties: + complexity: + description: The complexity of the encoding. + Possible values are Balanced, Speed or Quality. + type: string + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + adaptiveBFrameEnabled: + description: Whether adaptive B-frames + are used when encoding this layer. + If not specified, the encoder will + turn it on whenever the video profile + permits its use. Default to true. + type: boolean + bFrames: + description: The number of B-frames + to use when encoding this layer. If + not specified, the encoder chooses + an appropriate number based on the + video profile and level. + type: number + bitrate: + description: The average bitrate in + bits per second at which to encode + the input video when generating this + layer. + type: number + bufferWindow: + description: Specifies the maximum amount + of time that the encoder should buffer + frames before encoding. The value + should be in ISO 8601 format. The + value should be in the range 0.1 to + 100 seconds. Defaults to PT5S. + type: string + crf: + description: The value of CRF to be + used when encoding this layer. This + setting takes effect when rate_control_mode + is set CRF. The range of CRF value + is between 0 and 51, where lower values + would result in better quality, at + the expense of higher file sizes. + Higher values mean more compression, + but at some point quality degradation + will be noticed. Default to 23. + type: number + entropyMode: + description: The entropy mode to be + used for this layer. Possible values + are Cabac or Cavlc. If not specified, + the encoder chooses the mode that + is appropriate for the profile and + level. + type: string + frameRate: + description: The frame rate (in frames + per second) at which to encode this + layer. The value can be in the form + of M/N where M and N are integers + (For example, 30000/1001), or in the + form of a number (For example, 30, + or 29.97). The encoder enforces constraints + on allowed frame rates based on the + profile and level. If it is not specified, + the encoder will use the same frame + rate as the input video. + type: string + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + level: + description: The H.264 levels. Currently, + the resource support Level up to 6.2. + The value can be auto, or a number + that matches the H.264 profile. If + not specified, the default is auto, + which lets the encoder choose the + Level that is appropriate for this + layer. + type: string + maxBitrate: + description: The maximum bitrate (in + bits per second), at which the VBV + buffer should be assumed to refill. + If not specified, defaults to the + same value as bitrate. + type: number + profile: + description: The H.264 profile. Possible + values are Auto, Baseline, High, High422, + High444,or Main. Default to Auto. + type: string + referenceFrames: + description: The number of reference + frames to be used when encoding this + layer. If not specified, the encoder + determines an appropriate number based + on the encoder complexity setting. + type: number + slices: + description: The number of slices to + be used when encoding this layer. + If not specified, default is 1, which + means that encoder will use a single + slice for each frame. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + rateControlMode: + description: The rate control mode. Possible + values are ABR, CBR or CRF. Default to ABR. + type: string + sceneChangeDetectionEnabled: + description: Whether the encoder should insert + key frames at scene changes. This flag should + be set to true only when the encoder is + being configured to produce a single output + video. Default to false. + type: boolean + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + h265Video: + description: A h265_video block as defined below. + properties: + complexity: + description: The complexity of the encoding. + Possible values are Balanced, Speed or Quality. + type: string + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + adaptiveBFrameEnabled: + description: Whether adaptive B-frames + are used when encoding this layer. + If not specified, the encoder will + turn it on whenever the video profile + permits its use. Default to true. + type: boolean + bFrames: + description: The number of B-frames + to use when encoding this layer. If + not specified, the encoder chooses + an appropriate number based on the + video profile and level. + type: number + bitrate: + description: The average bitrate in + bits per second at which to encode + the input video when generating this + layer. + type: number + bufferWindow: + description: Specifies the maximum amount + of time that the encoder should buffer + frames before encoding. The value + should be in ISO 8601 format. The + value should be in the range 0.1 to + 100 seconds. Defaults to PT5S. + type: string + crf: + description: The value of CRF to be + used when encoding this layer. This + setting takes effect when rate_control_mode + is set CRF. The range of CRF value + is between 0 and 51, where lower values + would result in better quality, at + the expense of higher file sizes. + Higher values mean more compression, + but at some point quality degradation + will be noticed. Default to 23. + type: number + frameRate: + description: The frame rate (in frames + per second) at which to encode this + layer. The value can be in the form + of M/N where M and N are integers + (For example, 30000/1001), or in the + form of a number (For example, 30, + or 29.97). The encoder enforces constraints + on allowed frame rates based on the + profile and level. If it is not specified, + the encoder will use the same frame + rate as the input video. + type: string + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + level: + description: The H.264 levels. Currently, + the resource support Level up to 6.2. + The value can be auto, or a number + that matches the H.264 profile. If + not specified, the default is auto, + which lets the encoder choose the + Level that is appropriate for this + layer. + type: string + maxBitrate: + description: The maximum bitrate (in + bits per second), at which the VBV + buffer should be assumed to refill. + If not specified, defaults to the + same value as bitrate. + type: number + profile: + description: The H.264 profile. Possible + values are Auto, Baseline, High, High422, + High444,or Main. Default to Auto. + type: string + referenceFrames: + description: The number of reference + frames to be used when encoding this + layer. If not specified, the encoder + determines an appropriate number based + on the encoder complexity setting. + type: number + slices: + description: The number of slices to + be used when encoding this layer. + If not specified, default is 1, which + means that encoder will use a single + slice for each frame. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + sceneChangeDetectionEnabled: + description: Whether the encoder should insert + key frames at scene changes. This flag should + be set to true only when the encoder is + being configured to produce a single output + video. Default to false. + type: boolean + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + jpgImage: + description: A jpg_image block as defined below. + properties: + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + quality: + description: The compression quality + of the JPEG output. Range is from + 0 to 100 and the default is 70. + type: number + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + range: + description: The position relative to transform + preset start time in the input video at + which to stop generating thumbnails. The + value can be in ISO 8601 format (For example, + PT5M30S to stop at 5 minutes and 30 seconds + from start time), or a frame count (For + example, 300 to stop at the 300th frame + from the frame at start time. If this value + is 1, it means only producing one thumbnail + at start time), or a relative value to the + stream duration (For example, 50% to stop + at half of stream duration from start time). + The default value is 100%, which means to + stop at the end of the stream. + type: string + spriteColumn: + description: 'Sets the number of columns used + in thumbnail sprite image. The number of + rows are automatically calculated and a + VTT file is generated with the coordinate + mappings for each thumbnail in the sprite. + Note: this value should be a positive integer + and a proper value is recommended so that + the output image resolution will not go + beyond JPEG maximum pixel resolution limit + 65535x65535.' + type: number + start: + description: The start position, with reference + to the input video, at which the overlay + starts. The value should be in ISO 8601 + format. For example, PT05S to start the + overlay at 5 seconds into the input video. + If not specified the overlay starts from + the beginning of the input video. + type: string + step: + description: 'The intervals at which thumbnails + are generated. The value can be in ISO 8601 + format (For example, PT05S for one image + every 5 seconds), or a frame count (For + example, 30 for one image every 30 frames), + or a relative value to stream duration (For + example, 10% for one image every 10% of + stream duration). Note: Step value will + affect the first generated thumbnail, which + may not be exactly the one specified at + transform preset start time. This is due + to the encoder, which tries to select the + best thumbnail between start time and Step + position from start time as the first output. + As the default value is 10%, it means if + stream has long duration, the first generated + thumbnail might be far away from the one + specified at start time. Try to select reasonable + value for Step if the first thumbnail is + expected close to start time, or set Range + value at 1 if only one thumbnail is needed + at start time.' + type: string + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + pngImage: + description: A png_image block as defined below. + properties: + keyFrameInterval: + description: The distance between two key + frames. The value should be non-zero in + the range 0.5 to 20 seconds, specified in + ISO 8601 format. Note that this setting + is ignored if sync_mode is set to Passthrough, + where the KeyFrameInterval value will follow + the input source setting. Defaults to PT2S. + type: string + label: + description: Specifies the label for the codec. + The label can be used to control muxing + behavior. + type: string + layer: + description: One or more layer blocks as defined + below. + items: + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + label: + description: Specifies the label for + the codec. The label can be used to + control muxing behavior. + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + type: array + range: + description: The position relative to transform + preset start time in the input video at + which to stop generating thumbnails. The + value can be in ISO 8601 format (For example, + PT5M30S to stop at 5 minutes and 30 seconds + from start time), or a frame count (For + example, 300 to stop at the 300th frame + from the frame at start time. If this value + is 1, it means only producing one thumbnail + at start time), or a relative value to the + stream duration (For example, 50% to stop + at half of stream duration from start time). + The default value is 100%, which means to + stop at the end of the stream. + type: string + start: + description: The start position, with reference + to the input video, at which the overlay + starts. The value should be in ISO 8601 + format. For example, PT05S to start the + overlay at 5 seconds into the input video. + If not specified the overlay starts from + the beginning of the input video. + type: string + step: + description: 'The intervals at which thumbnails + are generated. The value can be in ISO 8601 + format (For example, PT05S for one image + every 5 seconds), or a frame count (For + example, 30 for one image every 30 frames), + or a relative value to stream duration (For + example, 10% for one image every 10% of + stream duration). Note: Step value will + affect the first generated thumbnail, which + may not be exactly the one specified at + transform preset start time. This is due + to the encoder, which tries to select the + best thumbnail between start time and Step + position from start time as the first output. + As the default value is 10%, it means if + stream has long duration, the first generated + thumbnail might be far away from the one + specified at start time. Try to select reasonable + value for Step if the first thumbnail is + expected close to start time, or set Range + value at 1 if only one thumbnail is needed + at start time.' + type: string + stretchMode: + description: The resizing mode, which indicates + how the input video will be resized to fit + the desired output resolution(s). Possible + values are AutoFit, AutoSize or None. Default + to AutoSize. + type: string + syncMode: + description: Specifies the synchronization + mode for the video. Possible values are + Auto, Cfr, Passthrough or Vfr. Default to + Auto. + type: string + type: object + type: object + type: array + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + filter: + description: A filter block as defined below. + properties: + cropRectangle: + description: A crop_rectangle block as defined above. + properties: + height: + description: The height of the rectangular region + in pixels. This can be absolute pixel value + (e.g 100), or relative to the size of the + video (For example, 50%). + type: string + left: + description: The number of pixels from the left-margin. + This can be absolute pixel value (e.g 100), + or relative to the size of the video (For + example, 50%). + type: string + top: + description: The number of pixels from the top-margin. + This can be absolute pixel value (e.g 100), + or relative to the size of the video (For + example, 50%). + type: string + width: + description: The width of the rectangular region + in pixels. This can be absolute pixel value + (e.g 100), or relative to the size of the + video (For example, 50%). + type: string + type: object + deinterlace: + description: A deinterlace block as defined below. + properties: + mode: + description: The deinterlacing mode. Possible + values are AutoPixelAdaptive or Off. Default + to AutoPixelAdaptive. + type: string + parity: + description: The field parity to use for deinterlacing. + Possible values are Auto, TopFieldFirst or + BottomFieldFirst. Default to Auto. + type: string + type: object + fadeIn: + description: A fade_in block as defined above. + properties: + duration: + description: The duration of the fade effect + in the video. The value can be in ISO 8601 + format (For example, PT05S to fade In/Out + a color during 5 seconds), or a frame count + (For example, 10 to fade 10 frames from the + start time), or a relative value to stream + duration (For example, 10% to fade 10% of + stream duration). + type: string + fadeColor: + description: 'The color for the fade in/out. + It can be on the CSS Level1 colors or an RGB/hex + value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.' + type: string + start: + description: The start position, with reference + to the input video, at which the overlay starts. + The value should be in ISO 8601 format. For + example, PT05S to start the overlay at 5 seconds + into the input video. If not specified the + overlay starts from the beginning of the input + video. + type: string + type: object + fadeOut: + description: A fade_out block as defined above. + properties: + duration: + description: The duration of the fade effect + in the video. The value can be in ISO 8601 + format (For example, PT05S to fade In/Out + a color during 5 seconds), or a frame count + (For example, 10 to fade 10 frames from the + start time), or a relative value to stream + duration (For example, 10% to fade 10% of + stream duration). + type: string + fadeColor: + description: 'The color for the fade in/out. + It can be on the CSS Level1 colors or an RGB/hex + value: e.g: rgb(255,0,0), 0xFF0000 or #FF0000.' + type: string + start: + description: The start position, with reference + to the input video, at which the overlay starts. + The value should be in ISO 8601 format. For + example, PT05S to start the overlay at 5 seconds + into the input video. If not specified the + overlay starts from the beginning of the input + video. + type: string + type: object + overlay: + description: One or more overlay blocks as defined + below. + items: + properties: + audio: + description: An audio block as defined above. + properties: + audioGainLevel: + description: The gain level of audio in + the overlay. The value should be in + range between 0 to 1.0. The default + is 1.0. + type: number + end: + description: The end position, with reference + to the input video, at which the overlay + ends. The value should be in ISO 8601 + format. For example, PT30S to end the + overlay at 30 seconds into the input + video. If not specified or the value + is greater than the input video duration, + the overlay will be applied until the + end of the input video if the overlay + media duration is greater than the input + video duration, else the overlay will + last as long as the overlay media duration. + type: string + fadeInDuration: + description: The duration over which the + overlay fades in onto the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade in (same + as PT0S). + type: string + fadeOutDuration: + description: The duration over which the + overlay fades out of the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade out (same + as PT0S). + type: string + inputLabel: + description: The label of the job input + which is to be used as an overlay. The + input must specify exact one file. You + can specify an image file in JPG, PNG, + GIF or BMP format, or an audio file + (such as a WAV, MP3, WMA or M4A file), + or a video file. + type: string + start: + description: The start position, with + reference to the input video, at which + the overlay starts. The value should + be in ISO 8601 format. For example, + PT05S to start the overlay at 5 seconds + into the input video. If not specified + the overlay starts from the beginning + of the input video. + type: string + type: object + video: + description: A video block as defined below. + properties: + audioGainLevel: + description: The gain level of audio in + the overlay. The value should be in + range between 0 to 1.0. The default + is 1.0. + type: number + cropRectangle: + description: A crop_rectangle block as + defined above. + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + left: + description: The number of pixels + from the left-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + top: + description: The number of pixels + from the top-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + end: + description: The end position, with reference + to the input video, at which the overlay + ends. The value should be in ISO 8601 + format. For example, PT30S to end the + overlay at 30 seconds into the input + video. If not specified or the value + is greater than the input video duration, + the overlay will be applied until the + end of the input video if the overlay + media duration is greater than the input + video duration, else the overlay will + last as long as the overlay media duration. + type: string + fadeInDuration: + description: The duration over which the + overlay fades in onto the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade in (same + as PT0S). + type: string + fadeOutDuration: + description: The duration over which the + overlay fades out of the input video. + The value should be in ISO 8601 duration + format. If not specified the default + behavior is to have no fade out (same + as PT0S). + type: string + inputLabel: + description: The label of the job input + which is to be used as an overlay. The + input must specify exact one file. You + can specify an image file in JPG, PNG, + GIF or BMP format, or an audio file + (such as a WAV, MP3, WMA or M4A file), + or a video file. + type: string + opacity: + description: The opacity of the overlay. + The value should be in the range between + 0 to 1.0. Default to 1.0, which means + the overlay is opaque. + type: number + position: + description: A position block as defined + above. + properties: + height: + description: The height of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + left: + description: The number of pixels + from the left-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + top: + description: The number of pixels + from the top-margin. This can be + absolute pixel value (e.g 100), + or relative to the size of the video + (For example, 50%). + type: string + width: + description: The width of the rectangular + region in pixels. This can be absolute + pixel value (e.g 100), or relative + to the size of the video (For example, + 50%). + type: string + type: object + start: + description: The start position, with + reference to the input video, at which + the overlay starts. The value should + be in ISO 8601 format. For example, + PT05S to start the overlay at 5 seconds + into the input video. If not specified + the overlay starts from the beginning + of the input video. + type: string + type: object + type: object + type: array + rotation: + description: The rotation to be applied to the input + video before it is encoded. Possible values are + Auto, None, Rotate90, Rotate180, Rotate270,or + Rotate0. Default to Auto. + type: string + type: object + format: + description: One or more format blocks as defined below. + items: + properties: + jpg: + description: A jpg block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + type: object + mp4: + description: A mp4 block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + outputFile: + description: One or more output_file blocks + as defined above. + items: + properties: + labels: + description: The list of labels that + describe how the encoder should multiplex + video and audio into an output file. + For example, if the encoder is producing + two video layers with labels v1 and + v2, and one audio layer with label + a1, then an array like ["v1", "a1"] + tells the encoder to produce an output + file with the video track represented + by v1 and the audio track represented + by a1. + items: + type: string + type: array + type: object + type: array + type: object + png: + description: A png block as defined below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + type: object + transportStream: + description: A transport_stream block as defined + below. + properties: + filenamePattern: + description: 'The file naming pattern used + for the creation of output files. The following + macros are supported in the file name: {Basename} + - An expansion macro that will use the name + of the input video file. If the base name(the + file suffix is not included) of the input + video file is less than 32 characters long, + the base name of input video files will + be used. If the length of base name of the + input video file exceeds 32 characters, + the base name is truncated to the first + 32 characters in total length. {Extension} + - The appropriate extension for this format. + {Label} - The label assigned to the codec/layer. + {Index} - A unique index for thumbnails. + Only applicable to thumbnails. {AudioStream} + - string "Audio" plus audio stream number(start + from 1). {Bitrate} - The audio/video bitrate + in kbps. Not applicable to thumbnails. {Codec} + - The type of the audio/video codec. {Resolution} + - The video resolution. Any unsubstituted + macros will be collapsed and removed from + the filename.' + type: string + outputFile: + description: One or more output_file blocks + as defined above. + items: + properties: + labels: + description: The list of labels that + describe how the encoder should multiplex + video and audio into an output file. + For example, if the encoder is producing + two video layers with labels v1 and + v2, and one audio layer with label + a1, then an array like ["v1", "a1"] + tells the encoder to produce an output + file with the video track represented + by v1 and the audio track represented + by a1. + items: + type: string + type: array + type: object + type: array + type: object + type: object + type: array + type: object + faceDetectorPreset: + description: A face_detector_preset block as defined above. + properties: + analysisResolution: + description: Possible values are SourceResolution or + StandardDefinition. Specifies the maximum resolution + at which your video is analyzed. which will keep the + input video at its original resolution when analyzed. + Using StandardDefinition will resize input videos + to standard definition while preserving the appropriate + aspect ratio. It will only resize if the video is + of higher resolution. For example, a 1920x1080 input + would be scaled to 640x360 before processing. Switching + to StandardDefinition will reduce the time it takes + to process high resolution video. It may also reduce + the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics + for details). However, faces that end up being too + small in the resized video may not be detected. Default + to SourceResolution. + type: string + blurType: + description: Specifies the type of blur to apply to + faces in the output video. Possible values are Black, + Box, High, Low,and Med. + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + faceRedactorMode: + description: 'This mode provides the ability to choose + between the following settings: 1) Analyze - For detection + only. This mode generates a metadata JSON file marking + appearances of faces throughout the video. Where possible, + appearances of the same person are assigned the same + ID. 2) Combined - Additionally redacts(blurs) detected + faces. 3) Redact - This enables a 2-pass process, + allowing for selective redaction of a subset of detected + faces. It takes in the metadata file from a prior + analyze pass, along with the source video, and a user-selected + subset of IDs that require redaction. Default to Analyze.' + type: string + type: object + onErrorAction: + description: A Transform can define more than one outputs. + This property defines what the service should do when + one output fails - either continue to produce other outputs, + or, stop the other outputs. The overall Job state will + not reflect failures of outputs that are specified with + ContinueJob. Possible values are StopProcessingJob or + ContinueJob. Defaults to StopProcessingJob. + type: string + relativePriority: + description: Sets the relative priority of the TransformOutputs + within a Transform. This sets the priority that the service + uses for processing Transform Outputs. Possible values + are High, Normal or Low. Defaults to Normal. + type: string + videoAnalyzerPreset: + description: A video_analyzer_preset block as defined below. + properties: + audioAnalysisMode: + description: Possible values are Basic or Standard. + Determines the set of audio analysis operations to + be performed. Default to Standard. + type: string + audioLanguage: + description: 'The language for the audio payload in + the input using the BCP-47 format of ''language tag-region'' + (e.g: ''en-US''). If you know the language of your + content, it is recommended that you specify it. The + language must be specified explicitly for AudioAnalysisMode:Basic, + since automatic language detection is not included + in basic mode. If the language isn''t specified, automatic + language detection will choose the first language + detected and process with the selected language for + the duration of the file. It does not currently support + dynamically switching between languages after the + first language is detected. The automatic detection + works best with audio recordings with clearly discernible + speech. If automatic detection fails to find the language, + transcription would fall back to en-US. The list of + supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463.' + type: string + experimentalOptions: + additionalProperties: + type: string + description: Dictionary containing key value pairs for + parameters not exposed in the preset itself. + type: object + x-kubernetes-map-type: granular + insightsType: + description: Defines the type of insights that you want + the service to generate. The allowed values are AudioInsightsOnly, + VideoInsightsOnly, and AllInsights. If you set this + to AllInsights and the input is audio only, then only + audio insights are generated. Similarly, if the input + is video only, then only video insights are generated. + It is recommended that you not use AudioInsightsOnly + if you expect some of your inputs to be video only; + or use VideoInsightsOnly if you expect some of your + inputs to be audio only. Your Jobs in such conditions + would error out. Default to AllInsights. + type: string + type: object + type: object + type: array + resourceGroupName: + description: The name of the Resource Group where the Transform + should exist. Changing this forces a new Transform to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/netapp.azure.upbound.io_accounts.yaml b/package/crds/netapp.azure.upbound.io_accounts.yaml index 837c70b09..d0d5fcf35 100644 --- a/package/crds/netapp.azure.upbound.io_accounts.yaml +++ b/package/crds/netapp.azure.upbound.io_accounts.yaml @@ -609,3 +609,582 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Account is the Schema for the Accounts API. Manages a NetApp + Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + activeDirectory: + description: A active_directory block as defined below. + properties: + dnsServers: + description: A list of DNS server IP addresses for the Active + Directory domain. Only allows IPv4 address. + items: + type: string + type: array + domain: + description: The name of the Active Directory domain. + type: string + organizationalUnit: + description: The Organizational Unit (OU) within the Active + Directory Domain. + type: string + passwordSecretRef: + description: The password associated with the username. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + smbServerName: + description: The NetBIOS name which should be used for the + NetApp SMB Server, which will be registered as a computer + account in the AD and used to mount volumes. + type: string + username: + description: The Username of Active Directory Domain Administrator. + type: string + required: + - passwordSecretRef + type: object + identity: + description: The identity block where it is used when customer + managed keys based encryption will be enabled as defined below. + properties: + identityIds: + description: The identity id of the user assigned identity + to use when type is UserAssigned + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The identity type, which can be SystemAssigned + or UserAssigned. Only one type at a time is supported by + Azure NetApp Files. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group where the NetApp Account + should be created. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + activeDirectory: + description: A active_directory block as defined below. + properties: + dnsServers: + description: A list of DNS server IP addresses for the Active + Directory domain. Only allows IPv4 address. + items: + type: string + type: array + domain: + description: The name of the Active Directory domain. + type: string + organizationalUnit: + description: The Organizational Unit (OU) within the Active + Directory Domain. + type: string + smbServerName: + description: The NetBIOS name which should be used for the + NetApp SMB Server, which will be registered as a computer + account in the AD and used to mount volumes. + type: string + username: + description: The Username of Active Directory Domain Administrator. + type: string + type: object + identity: + description: The identity block where it is used when customer + managed keys based encryption will be enabled as defined below. + properties: + identityIds: + description: The identity id of the user assigned identity + to use when type is UserAssigned + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The identity type, which can be SystemAssigned + or UserAssigned. Only one type at a time is supported by + Azure NetApp Files. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: AccountStatus defines the observed state of Account. + properties: + atProvider: + properties: + activeDirectory: + description: A active_directory block as defined below. + properties: + dnsServers: + description: A list of DNS server IP addresses for the Active + Directory domain. Only allows IPv4 address. + items: + type: string + type: array + domain: + description: The name of the Active Directory domain. + type: string + organizationalUnit: + description: The Organizational Unit (OU) within the Active + Directory Domain. + type: string + smbServerName: + description: The NetBIOS name which should be used for the + NetApp SMB Server, which will be registered as a computer + account in the AD and used to mount volumes. + type: string + username: + description: The Username of Active Directory Domain Administrator. + type: string + type: object + id: + description: The ID of the NetApp Account. + type: string + identity: + description: The identity block where it is used when customer + managed keys based encryption will be enabled as defined below. + properties: + identityIds: + description: The identity id of the user assigned identity + to use when type is UserAssigned + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The ID of the NetApp Account. + type: string + tenantId: + description: The ID of the NetApp Account. + type: string + type: + description: The identity type, which can be SystemAssigned + or UserAssigned. Only one type at a time is supported by + Azure NetApp Files. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group where the NetApp Account + should be created. Changing this forces a new resource to be + created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/netapp.azure.upbound.io_snapshotpolicies.yaml b/package/crds/netapp.azure.upbound.io_snapshotpolicies.yaml index 8523f6b19..fe3121ce3 100644 --- a/package/crds/netapp.azure.upbound.io_snapshotpolicies.yaml +++ b/package/crds/netapp.azure.upbound.io_snapshotpolicies.yaml @@ -804,3 +804,765 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SnapshotPolicy is the Schema for the SnapshotPolicys API. Manages + a NetApp Snapshot Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotPolicySpec defines the desired state of SnapshotPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the NetApp Account in which the NetApp + Snapshot Policy should be created. Changing this forces a new + resource to be created. + type: string + accountNameRef: + description: Reference to a Account in netapp to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in netapp to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dailySchedule: + description: Sets a daily snapshot schedule. A daily_schedule + block as defined below. + properties: + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + enabled: + description: Defines that the NetApp Snapshot Policy is enabled + or not. + type: boolean + hourlySchedule: + description: Sets an hourly snapshot schedule. A hourly_schedule + block as defined below. + properties: + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + monthlySchedule: + description: Sets a monthly snapshot schedule. A monthly_schedule + block as defined below. + properties: + daysOfMonth: + description: List of the days of the month when the snapshots + will be created, valid range is from 1 to 30. + items: + type: number + type: array + x-kubernetes-list-type: set + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + resourceGroupName: + description: The name of the resource group where the NetApp Snapshot + Policy should be created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + weeklySchedule: + description: Sets a weekly snapshot schedule. A weekly_schedule + block as defined below. + properties: + daysOfWeek: + description: List of the week days using English names when + the snapshots will be created. + items: + type: string + type: array + x-kubernetes-list-type: set + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dailySchedule: + description: Sets a daily snapshot schedule. A daily_schedule + block as defined below. + properties: + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + enabled: + description: Defines that the NetApp Snapshot Policy is enabled + or not. + type: boolean + hourlySchedule: + description: Sets an hourly snapshot schedule. A hourly_schedule + block as defined below. + properties: + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + monthlySchedule: + description: Sets a monthly snapshot schedule. A monthly_schedule + block as defined below. + properties: + daysOfMonth: + description: List of the days of the month when the snapshots + will be created, valid range is from 1 to 30. + items: + type: number + type: array + x-kubernetes-list-type: set + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + weeklySchedule: + description: Sets a weekly snapshot schedule. A weekly_schedule + block as defined below. + properties: + daysOfWeek: + description: List of the week days using English names when + the snapshots will be created. + items: + type: string + type: array + x-kubernetes-list-type: set + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.enabled is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.enabled) + || (has(self.initProvider) && has(self.initProvider.enabled))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: SnapshotPolicyStatus defines the observed state of SnapshotPolicy. + properties: + atProvider: + properties: + accountName: + description: The name of the NetApp Account in which the NetApp + Snapshot Policy should be created. Changing this forces a new + resource to be created. + type: string + dailySchedule: + description: Sets a daily snapshot schedule. A daily_schedule + block as defined below. + properties: + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + enabled: + description: Defines that the NetApp Snapshot Policy is enabled + or not. + type: boolean + hourlySchedule: + description: Sets an hourly snapshot schedule. A hourly_schedule + block as defined below. + properties: + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + id: + description: The ID of the NetApp Snapshot. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + monthlySchedule: + description: Sets a monthly snapshot schedule. A monthly_schedule + block as defined below. + properties: + daysOfMonth: + description: List of the days of the month when the snapshots + will be created, valid range is from 1 to 30. + items: + type: number + type: array + x-kubernetes-list-type: set + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + resourceGroupName: + description: The name of the resource group where the NetApp Snapshot + Policy should be created. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + weeklySchedule: + description: Sets a weekly snapshot schedule. A weekly_schedule + block as defined below. + properties: + daysOfWeek: + description: List of the week days using English names when + the snapshots will be created. + items: + type: string + type: array + x-kubernetes-list-type: set + hour: + description: Hour of the day that the snapshots will be created, + valid range is from 0 to 23. + type: number + minute: + description: Minute of the hour that the snapshots will be + created, valid range is from 0 to 59. + type: number + snapshotsToKeep: + description: How many hourly snapshots to keep, valid range + is from 0 to 255. + type: number + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/netapp.azure.upbound.io_volumes.yaml b/package/crds/netapp.azure.upbound.io_volumes.yaml index 089716217..b23d45828 100644 --- a/package/crds/netapp.azure.upbound.io_volumes.yaml +++ b/package/crds/netapp.azure.upbound.io_volumes.yaml @@ -1726,3 +1726,1692 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Volume is the Schema for the Volumes API. Manages a NetApp Volume. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VolumeSpec defines the desired state of Volume + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accountName: + description: The name of the NetApp account in which the NetApp + Pool should be created. Changing this forces a new resource + to be created. + type: string + accountNameRef: + description: Reference to a Account in netapp to populate accountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + accountNameSelector: + description: Selector for a Account in netapp to populate accountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + azureVmwareDataStoreEnabled: + description: Is the NetApp Volume enabled for Azure VMware Solution + (AVS) datastore purpose. Defaults to false. Changing this forces + a new resource to be created. + type: boolean + createFromSnapshotResourceId: + description: 'Creates volume from snapshot. Following properties + must be the same as the original volume where the snapshot was + taken from: protocols, subnet_id, location, service_level, resource_group_name, + account_name and pool_name. Changing this forces a new resource + to be created.' + type: string + createFromSnapshotResourceIdRef: + description: Reference to a Snapshot in netapp to populate createFromSnapshotResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + createFromSnapshotResourceIdSelector: + description: Selector for a Snapshot in netapp to populate createFromSnapshotResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dataProtectionReplication: + description: A data_protection_replication block as defined below. + Changing this forces a new resource to be created. + properties: + endpointType: + description: The endpoint type, default value is dst for destination. + type: string + remoteVolumeLocation: + description: Location of the primary volume. Changing this + forces a new resource to be created. + type: string + remoteVolumeResourceId: + description: Resource ID of the primary volume. + type: string + remoteVolumeResourceIdRef: + description: Reference to a Volume in netapp to populate remoteVolumeResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + remoteVolumeResourceIdSelector: + description: Selector for a Volume in netapp to populate remoteVolumeResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replicationFrequency: + description: Replication frequency, supported values are '10minutes', + 'hourly', 'daily', values are case sensitive. + type: string + type: object + dataProtectionSnapshotPolicy: + description: A data_protection_snapshot_policy block as defined + below. + properties: + snapshotPolicyId: + description: Resource ID of the snapshot policy to apply to + the volume. + type: string + snapshotPolicyIdRef: + description: Reference to a SnapshotPolicy in netapp to populate + snapshotPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snapshotPolicyIdSelector: + description: Selector for a SnapshotPolicy in netapp to populate + snapshotPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + encryptionKeySource: + description: The encryption key source, it can be Microsoft.NetApp + for platform managed keys or Microsoft.KeyVault for customer-managed + keys. This is required with key_vault_private_endpoint_id. Changing + this forces a new resource to be created. + type: string + exportPolicyRule: + description: One or more export_policy_rule block defined below. + items: + properties: + allowedClients: + description: A list of allowed clients IPv4 addresses. + items: + type: string + type: array + x-kubernetes-list-type: set + protocolsEnabled: + description: 'A list of allowed protocols. Valid values + include CIFS, NFSv3, or NFSv4.1. Only one value is supported + at this time. This replaces the previous arguments: cifs_enabled, + nfsv3_enabled and nfsv4_enabled.' + items: + type: string + type: array + rootAccessEnabled: + description: Is root access permitted to this volume? + type: boolean + ruleIndex: + description: The index number of the rule. + type: number + unixReadOnly: + description: Is the file system on unix read only? + type: boolean + unixReadWrite: + description: Is the file system on unix read and write? + type: boolean + type: object + type: array + keyVaultPrivateEndpointId: + description: The Private Endpoint ID for Key Vault, which is required + when using customer-managed keys. This is required with encryption_key_source. + Changing this forces a new resource to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkFeatures: + description: Indicates which network feature to use, accepted + values are Basic or Standard, it defaults to Basic if not defined. + This is a feature in public preview and for more information + about it and how to register, please refer to Configure network + features for an Azure NetApp Files volume. + type: string + poolName: + description: The name of the NetApp pool in which the NetApp Volume + should be created. Changing this forces a new resource to be + created. + type: string + poolNameRef: + description: Reference to a Pool in netapp to populate poolName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + poolNameSelector: + description: Selector for a Pool in netapp to populate poolName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + protocols: + description: The target volume protocol expressed as a list. Supported + single value include CIFS, NFSv3, or NFSv4.1. If argument is + not defined it will default to NFSv3. Changing this forces a + new resource to be created and data will be lost. Dual protocol + scenario is supported for CIFS and NFSv3, for more information, + please refer to Create a dual-protocol volume for Azure NetApp + Files document. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceGroupName: + description: The name of the resource group where the NetApp Volume + should be created. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + securityStyle: + description: Volume security style, accepted values are unix or + ntfs. If not provided, single-protocol volume is created defaulting + to unix if it is NFSv3 or NFSv4.1 volume, if CIFS, it will default + to ntfs. In a dual-protocol volume, if not provided, its value + will be ntfs. Changing this forces a new resource to be created. + type: string + serviceLevel: + description: The target performance of the file system. Valid + values include Premium, Standard, or Ultra. Changing this forces + a new resource to be created. + type: string + smbAccessBasedEnumerationEnabled: + description: Limits enumeration of files and folders (that is, + listing the contents) in SMB only to users with allowed access + on the share. For instance, if a user doesn't have access to + read a file or folder in a share with access-based enumeration + enabled, then the file or folder doesn't show up in directory + listings. Defaults to false. For more information, please refer + to Understand NAS share permissions in Azure NetApp Files + type: boolean + smbNonBrowsableEnabled: + description: Limits clients from browsing for an SMB share by + hiding the share from view in Windows Explorer or when listing + shares in "net view." Only end users that know the absolute + paths to the share are able to find the share. Defaults to false. + For more information, please refer to Understand NAS share permissions + in Azure NetApp Files + type: boolean + snapshotDirectoryVisible: + description: Specifies whether the .snapshot (NFS clients) or + ~snapshot (SMB clients) path of a volume is visible, default + value is true. + type: boolean + storageQuotaInGb: + description: The maximum Storage Quota allowed for a file system + in Gigabytes. + type: number + subnetId: + description: The ID of the Subnet the NetApp Volume resides in, + which must have the Microsoft.NetApp/volumes delegation. Changing + this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throughputInMibps: + description: Throughput of this volume in Mibps. + type: number + volumePath: + description: A unique file path for the volume. Used when creating + mount targets. Changing this forces a new resource to be created. + type: string + zone: + description: Specifies the Availability Zone in which the Volume + should be located. Possible values are 1, 2 and 3. Changing + this forces a new resource to be created. This feature is currently + in preview, for more information on how to enable it, please + refer to Manage availability zone volume placement for Azure + NetApp Files. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + azureVmwareDataStoreEnabled: + description: Is the NetApp Volume enabled for Azure VMware Solution + (AVS) datastore purpose. Defaults to false. Changing this forces + a new resource to be created. + type: boolean + createFromSnapshotResourceId: + description: 'Creates volume from snapshot. Following properties + must be the same as the original volume where the snapshot was + taken from: protocols, subnet_id, location, service_level, resource_group_name, + account_name and pool_name. Changing this forces a new resource + to be created.' + type: string + createFromSnapshotResourceIdRef: + description: Reference to a Snapshot in netapp to populate createFromSnapshotResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + createFromSnapshotResourceIdSelector: + description: Selector for a Snapshot in netapp to populate createFromSnapshotResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + dataProtectionReplication: + description: A data_protection_replication block as defined below. + Changing this forces a new resource to be created. + properties: + endpointType: + description: The endpoint type, default value is dst for destination. + type: string + remoteVolumeLocation: + description: Location of the primary volume. Changing this + forces a new resource to be created. + type: string + remoteVolumeResourceId: + description: Resource ID of the primary volume. + type: string + remoteVolumeResourceIdRef: + description: Reference to a Volume in netapp to populate remoteVolumeResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + remoteVolumeResourceIdSelector: + description: Selector for a Volume in netapp to populate remoteVolumeResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + replicationFrequency: + description: Replication frequency, supported values are '10minutes', + 'hourly', 'daily', values are case sensitive. + type: string + type: object + dataProtectionSnapshotPolicy: + description: A data_protection_snapshot_policy block as defined + below. + properties: + snapshotPolicyId: + description: Resource ID of the snapshot policy to apply to + the volume. + type: string + snapshotPolicyIdRef: + description: Reference to a SnapshotPolicy in netapp to populate + snapshotPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + snapshotPolicyIdSelector: + description: Selector for a SnapshotPolicy in netapp to populate + snapshotPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + encryptionKeySource: + description: The encryption key source, it can be Microsoft.NetApp + for platform managed keys or Microsoft.KeyVault for customer-managed + keys. This is required with key_vault_private_endpoint_id. Changing + this forces a new resource to be created. + type: string + exportPolicyRule: + description: One or more export_policy_rule block defined below. + items: + properties: + allowedClients: + description: A list of allowed clients IPv4 addresses. + items: + type: string + type: array + x-kubernetes-list-type: set + protocolsEnabled: + description: 'A list of allowed protocols. Valid values + include CIFS, NFSv3, or NFSv4.1. Only one value is supported + at this time. This replaces the previous arguments: cifs_enabled, + nfsv3_enabled and nfsv4_enabled.' + items: + type: string + type: array + rootAccessEnabled: + description: Is root access permitted to this volume? + type: boolean + ruleIndex: + description: The index number of the rule. + type: number + unixReadOnly: + description: Is the file system on unix read only? + type: boolean + unixReadWrite: + description: Is the file system on unix read and write? + type: boolean + type: object + type: array + keyVaultPrivateEndpointId: + description: The Private Endpoint ID for Key Vault, which is required + when using customer-managed keys. This is required with encryption_key_source. + Changing this forces a new resource to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + networkFeatures: + description: Indicates which network feature to use, accepted + values are Basic or Standard, it defaults to Basic if not defined. + This is a feature in public preview and for more information + about it and how to register, please refer to Configure network + features for an Azure NetApp Files volume. + type: string + protocols: + description: The target volume protocol expressed as a list. Supported + single value include CIFS, NFSv3, or NFSv4.1. If argument is + not defined it will default to NFSv3. Changing this forces a + new resource to be created and data will be lost. Dual protocol + scenario is supported for CIFS and NFSv3, for more information, + please refer to Create a dual-protocol volume for Azure NetApp + Files document. + items: + type: string + type: array + x-kubernetes-list-type: set + securityStyle: + description: Volume security style, accepted values are unix or + ntfs. If not provided, single-protocol volume is created defaulting + to unix if it is NFSv3 or NFSv4.1 volume, if CIFS, it will default + to ntfs. In a dual-protocol volume, if not provided, its value + will be ntfs. Changing this forces a new resource to be created. + type: string + serviceLevel: + description: The target performance of the file system. Valid + values include Premium, Standard, or Ultra. Changing this forces + a new resource to be created. + type: string + smbAccessBasedEnumerationEnabled: + description: Limits enumeration of files and folders (that is, + listing the contents) in SMB only to users with allowed access + on the share. For instance, if a user doesn't have access to + read a file or folder in a share with access-based enumeration + enabled, then the file or folder doesn't show up in directory + listings. Defaults to false. For more information, please refer + to Understand NAS share permissions in Azure NetApp Files + type: boolean + smbNonBrowsableEnabled: + description: Limits clients from browsing for an SMB share by + hiding the share from view in Windows Explorer or when listing + shares in "net view." Only end users that know the absolute + paths to the share are able to find the share. Defaults to false. + For more information, please refer to Understand NAS share permissions + in Azure NetApp Files + type: boolean + snapshotDirectoryVisible: + description: Specifies whether the .snapshot (NFS clients) or + ~snapshot (SMB clients) path of a volume is visible, default + value is true. + type: boolean + storageQuotaInGb: + description: The maximum Storage Quota allowed for a file system + in Gigabytes. + type: number + subnetId: + description: The ID of the Subnet the NetApp Volume resides in, + which must have the Microsoft.NetApp/volumes delegation. Changing + this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throughputInMibps: + description: Throughput of this volume in Mibps. + type: number + volumePath: + description: A unique file path for the volume. Used when creating + mount targets. Changing this forces a new resource to be created. + type: string + zone: + description: Specifies the Availability Zone in which the Volume + should be located. Possible values are 1, 2 and 3. Changing + this forces a new resource to be created. This feature is currently + in preview, for more information on how to enable it, please + refer to Manage availability zone volume placement for Azure + NetApp Files. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.serviceLevel is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serviceLevel) + || (has(self.initProvider) && has(self.initProvider.serviceLevel))' + - message: spec.forProvider.storageQuotaInGb is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageQuotaInGb) + || (has(self.initProvider) && has(self.initProvider.storageQuotaInGb))' + - message: spec.forProvider.volumePath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.volumePath) + || (has(self.initProvider) && has(self.initProvider.volumePath))' + status: + description: VolumeStatus defines the observed state of Volume. + properties: + atProvider: + properties: + accountName: + description: The name of the NetApp account in which the NetApp + Pool should be created. Changing this forces a new resource + to be created. + type: string + azureVmwareDataStoreEnabled: + description: Is the NetApp Volume enabled for Azure VMware Solution + (AVS) datastore purpose. Defaults to false. Changing this forces + a new resource to be created. + type: boolean + createFromSnapshotResourceId: + description: 'Creates volume from snapshot. Following properties + must be the same as the original volume where the snapshot was + taken from: protocols, subnet_id, location, service_level, resource_group_name, + account_name and pool_name. Changing this forces a new resource + to be created.' + type: string + dataProtectionReplication: + description: A data_protection_replication block as defined below. + Changing this forces a new resource to be created. + properties: + endpointType: + description: The endpoint type, default value is dst for destination. + type: string + remoteVolumeLocation: + description: Location of the primary volume. Changing this + forces a new resource to be created. + type: string + remoteVolumeResourceId: + description: Resource ID of the primary volume. + type: string + replicationFrequency: + description: Replication frequency, supported values are '10minutes', + 'hourly', 'daily', values are case sensitive. + type: string + type: object + dataProtectionSnapshotPolicy: + description: A data_protection_snapshot_policy block as defined + below. + properties: + snapshotPolicyId: + description: Resource ID of the snapshot policy to apply to + the volume. + type: string + type: object + encryptionKeySource: + description: The encryption key source, it can be Microsoft.NetApp + for platform managed keys or Microsoft.KeyVault for customer-managed + keys. This is required with key_vault_private_endpoint_id. Changing + this forces a new resource to be created. + type: string + exportPolicyRule: + description: One or more export_policy_rule block defined below. + items: + properties: + allowedClients: + description: A list of allowed clients IPv4 addresses. + items: + type: string + type: array + x-kubernetes-list-type: set + protocolsEnabled: + description: 'A list of allowed protocols. Valid values + include CIFS, NFSv3, or NFSv4.1. Only one value is supported + at this time. This replaces the previous arguments: cifs_enabled, + nfsv3_enabled and nfsv4_enabled.' + items: + type: string + type: array + rootAccessEnabled: + description: Is root access permitted to this volume? + type: boolean + ruleIndex: + description: The index number of the rule. + type: number + unixReadOnly: + description: Is the file system on unix read only? + type: boolean + unixReadWrite: + description: Is the file system on unix read and write? + type: boolean + type: object + type: array + id: + description: The ID of the NetApp Volume. + type: string + keyVaultPrivateEndpointId: + description: The Private Endpoint ID for Key Vault, which is required + when using customer-managed keys. This is required with encryption_key_source. + Changing this forces a new resource to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + mountIpAddresses: + description: A list of IPv4 Addresses which should be used to + mount the volume. + items: + type: string + type: array + networkFeatures: + description: Indicates which network feature to use, accepted + values are Basic or Standard, it defaults to Basic if not defined. + This is a feature in public preview and for more information + about it and how to register, please refer to Configure network + features for an Azure NetApp Files volume. + type: string + poolName: + description: The name of the NetApp pool in which the NetApp Volume + should be created. Changing this forces a new resource to be + created. + type: string + protocols: + description: The target volume protocol expressed as a list. Supported + single value include CIFS, NFSv3, or NFSv4.1. If argument is + not defined it will default to NFSv3. Changing this forces a + new resource to be created and data will be lost. Dual protocol + scenario is supported for CIFS and NFSv3, for more information, + please refer to Create a dual-protocol volume for Azure NetApp + Files document. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceGroupName: + description: The name of the resource group where the NetApp Volume + should be created. Changing this forces a new resource to be + created. + type: string + securityStyle: + description: Volume security style, accepted values are unix or + ntfs. If not provided, single-protocol volume is created defaulting + to unix if it is NFSv3 or NFSv4.1 volume, if CIFS, it will default + to ntfs. In a dual-protocol volume, if not provided, its value + will be ntfs. Changing this forces a new resource to be created. + type: string + serviceLevel: + description: The target performance of the file system. Valid + values include Premium, Standard, or Ultra. Changing this forces + a new resource to be created. + type: string + smbAccessBasedEnumerationEnabled: + description: Limits enumeration of files and folders (that is, + listing the contents) in SMB only to users with allowed access + on the share. For instance, if a user doesn't have access to + read a file or folder in a share with access-based enumeration + enabled, then the file or folder doesn't show up in directory + listings. Defaults to false. For more information, please refer + to Understand NAS share permissions in Azure NetApp Files + type: boolean + smbNonBrowsableEnabled: + description: Limits clients from browsing for an SMB share by + hiding the share from view in Windows Explorer or when listing + shares in "net view." Only end users that know the absolute + paths to the share are able to find the share. Defaults to false. + For more information, please refer to Understand NAS share permissions + in Azure NetApp Files + type: boolean + snapshotDirectoryVisible: + description: Specifies whether the .snapshot (NFS clients) or + ~snapshot (SMB clients) path of a volume is visible, default + value is true. + type: boolean + storageQuotaInGb: + description: The maximum Storage Quota allowed for a file system + in Gigabytes. + type: number + subnetId: + description: The ID of the Subnet the NetApp Volume resides in, + which must have the Microsoft.NetApp/volumes delegation. Changing + this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + throughputInMibps: + description: Throughput of this volume in Mibps. + type: number + volumePath: + description: A unique file path for the volume. Used when creating + mount targets. Changing this forces a new resource to be created. + type: string + zone: + description: Specifies the Availability Zone in which the Volume + should be located. Possible values are 1, 2 and 3. Changing + this forces a new resource to be created. This feature is currently + in preview, for more information on how to enable it, please + refer to Manage availability zone volume placement for Azure + NetApp Files. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_applicationgateways.yaml b/package/crds/network.azure.upbound.io_applicationgateways.yaml index af77a38ce..b98606139 100644 --- a/package/crds/network.azure.upbound.io_applicationgateways.yaml +++ b/package/crds/network.azure.upbound.io_applicationgateways.yaml @@ -4160,3 +4160,4085 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ApplicationGateway is the Schema for the ApplicationGateways + API. Manages an Application Gateway. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ApplicationGatewaySpec defines the desired state of ApplicationGateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationCertificate: + description: One or more authentication_certificate blocks as + defined below. + items: + properties: + dataSecretRef: + description: The contents of the Authentication Certificate + which should be used. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + name: + description: The Name of the Authentication Certificate + to use. + type: string + required: + - dataSecretRef + type: object + type: array + autoscaleConfiguration: + description: An autoscale_configuration block as defined below. + properties: + maxCapacity: + description: Maximum capacity for autoscaling. Accepted values + are in the range 2 to 125. + type: number + minCapacity: + description: Minimum capacity for autoscaling. Accepted values + are in the range 0 to 100. + type: number + type: object + backendAddressPool: + description: One or more backend_address_pool blocks as defined + below. + items: + properties: + fqdns: + description: A list of FQDN's which should be part of the + Backend Address Pool. + items: + type: string + type: array + x-kubernetes-list-type: set + ipAddresses: + description: A list of IP Addresses which should be part + of the Backend Address Pool. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the Backend Address Pool. + type: string + type: object + type: array + backendHttpSettings: + description: One or more backend_http_settings blocks as defined + below. + items: + properties: + affinityCookieName: + description: The name of the affinity cookie. + type: string + authenticationCertificate: + description: One or more authentication_certificate_backend + blocks as defined below. + items: + properties: + name: + description: The Name of the URL Path Map. + type: string + type: object + type: array + connectionDraining: + description: A connection_draining block as defined below. + properties: + drainTimeoutSec: + description: The number of seconds connection draining + is active. Acceptable values are from 1 second to + 3600 seconds. + type: number + enabled: + description: Is the Web Application Firewall enabled? + type: boolean + type: object + cookieBasedAffinity: + description: Is Cookie-Based Affinity enabled? Possible + values are Enabled and Disabled. + type: string + hostName: + description: Host header to be sent to the backend servers. + Cannot be set if pick_host_name_from_backend_address is + set to true. + type: string + name: + description: The name of the Backend HTTP Settings Collection. + type: string + path: + description: The Path which should be used as a prefix for + all HTTP requests. + type: string + pickHostNameFromBackendAddress: + description: Whether host header should be picked from the + host name of the backend server. Defaults to false. + type: boolean + port: + description: The port which should be used for this Backend + HTTP Settings Collection. + type: number + probeName: + description: The name of an associated HTTP Probe. + type: string + protocol: + description: The Protocol which should be used. Possible + values are Http and Https. + type: string + requestTimeout: + description: The request timeout in seconds, which must + be between 1 and 86400 seconds. Defaults to 30. + type: number + trustedRootCertificateNames: + description: A list of trusted_root_certificate names. + items: + type: string + type: array + type: object + type: array + customErrorConfiguration: + description: One or more custom_error_configuration blocks as + defined below. + items: + properties: + customErrorPageUrl: + description: Error page URL of the application gateway customer + error. + type: string + statusCode: + description: Status code of the application gateway customer + error. Possible values are HttpStatus403 and HttpStatus502 + type: string + type: object + type: array + enableHttp2: + description: Is HTTP2 enabled on the application gateway resource? + Defaults to false. + type: boolean + fipsEnabled: + description: Is FIPS enabled on the Application Gateway? + type: boolean + firewallPolicyId: + description: The ID of the Web Application Firewall Policy. + type: string + forceFirewallPolicyAssociation: + description: Is the Firewall Policy associated with the Application + Gateway? + type: boolean + frontendIpConfiguration: + description: One or more frontend_ip_configuration blocks as defined + below. + items: + properties: + name: + description: The name of the Frontend IP Configuration. + type: string + privateIpAddress: + description: The Private IP Address to use for the Application + Gateway. + type: string + privateIpAddressAllocation: + description: The Allocation Method for the Private IP Address. + Possible values are Dynamic and Static. Defaults to Dynamic. + type: string + privateLinkConfigurationName: + description: The name of the private link configuration + to use for this frontend IP configuration. + type: string + publicIpAddressId: + description: The ID of a Public IP Address which the Application + Gateway should use. The allocation method for the Public + IP Address depends on the sku of this Application Gateway. + Please refer to the Azure documentation for public IP + addresses for details. + type: string + publicIpAddressIdRef: + description: Reference to a PublicIP in network to populate + publicIpAddressId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicIpAddressIdSelector: + description: Selector for a PublicIP in network to populate + publicIpAddressId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: The ID of the Subnet. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + frontendPort: + description: One or more frontend_port blocks as defined below. + items: + properties: + name: + description: The name of the Frontend Port. + type: string + port: + description: The port used for this Frontend Port. + type: number + type: object + type: array + gatewayIpConfiguration: + description: One or more gateway_ip_configuration blocks as defined + below. + items: + properties: + name: + description: The Name of this Gateway IP Configuration. + type: string + subnetId: + description: The ID of the Subnet which the Application + Gateway should be connected to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + global: + description: A global block as defined below. + properties: + requestBufferingEnabled: + description: Whether Application Gateway's Request buffer + is enabled. + type: boolean + responseBufferingEnabled: + description: Whether Application Gateway's Response buffer + is enabled. + type: boolean + type: object + httpListener: + description: One or more http_listener blocks as defined below. + items: + properties: + customErrorConfiguration: + description: One or more custom_error_configuration blocks + as defined below. + items: + properties: + customErrorPageUrl: + description: Error page URL of the application gateway + customer error. + type: string + statusCode: + description: A list of allowed status codes for this + Health Probe. + type: string + type: object + type: array + firewallPolicyId: + description: The ID of the Web Application Firewall Policy + which should be used for this HTTP Listener. + type: string + frontendIpConfigurationName: + description: The Name of the Frontend IP Configuration used + for this HTTP Listener. + type: string + frontendPortName: + description: The Name of the Frontend Port use for this + HTTP Listener. + type: string + hostName: + description: The Hostname which should be used for this + HTTP Listener. Setting this value changes Listener Type + to 'Multi site'. + type: string + hostNames: + description: A list of Hostname(s) should be used for this + HTTP Listener. It allows special wildcard characters. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the HTTP Listener. + type: string + protocol: + description: The Protocol to use for this HTTP Listener. + Possible values are Http and Https. + type: string + requireSni: + description: Should Server Name Indication be Required? + Defaults to false. + type: boolean + sslCertificateName: + description: The name of the associated SSL Certificate + which should be used for this HTTP Listener. + type: string + sslProfileName: + description: The name of the associated SSL Profile which + should be used for this HTTP Listener. + type: string + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Application Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Application Gateway. Only + possible value is UserAssigned. + type: string + type: object + location: + description: The Azure region where the Application Gateway should + exist. Changing this forces a new resource to be created. + type: string + privateLinkConfiguration: + description: One or more private_link_configuration blocks as + defined below. + items: + properties: + ipConfiguration: + description: One or more ip_configuration blocks as defined + below. + items: + properties: + name: + description: The Name of the URL Path Map. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + privateIpAddress: + description: The Static IP Address which should be + used. + type: string + privateIpAddressAllocation: + description: The allocation method used for the Private + IP Address. Possible values are Dynamic and Static. + type: string + subnetId: + description: The ID of the subnet the private link + configuration should connect to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + name: + description: The name of the private link configuration. + type: string + type: object + type: array + probe: + description: One or more probe blocks as defined below. + items: + properties: + host: + description: The Hostname used for this Probe. If the Application + Gateway is configured for a single site, by default the + Host name should be specified as 127.0.0.1, unless otherwise + configured in custom probe. Cannot be set if pick_host_name_from_backend_http_settings + is set to true. + type: string + interval: + description: The Interval between two consecutive probes + in seconds. Possible values range from 1 second to a maximum + of 86,400 seconds. + type: number + match: + description: A match block as defined above. + properties: + body: + description: A snippet from the Response Body which + must be present in the Response. + type: string + statusCode: + description: A list of allowed status codes for this + Health Probe. + items: + type: string + type: array + type: object + minimumServers: + description: The minimum number of servers that are always + marked as healthy. Defaults to 0. + type: number + name: + description: The Name of the Probe. + type: string + path: + description: The Path used for this Probe. + type: string + pickHostNameFromBackendHttpSettings: + description: Whether the host header should be picked from + the backend HTTP settings. Defaults to false. + type: boolean + port: + description: Custom port which will be used for probing + the backend servers. The valid value ranges from 1 to + 65535. In case not set, port from HTTP settings will be + used. This property is valid for Standard_v2 and WAF_v2 + only. + type: number + protocol: + description: The Protocol used for this Probe. Possible + values are Http and Https. + type: string + timeout: + description: The Timeout used for this Probe, which indicates + when a probe becomes unhealthy. Possible values range + from 1 second to a maximum of 86,400 seconds. + type: number + unhealthyThreshold: + description: The Unhealthy Threshold for this Probe, which + indicates the amount of retries which should be attempted + before a node is deemed unhealthy. Possible values are + from 1 to 20. + type: number + type: object + type: array + redirectConfiguration: + description: One or more redirect_configuration blocks as defined + below. + items: + properties: + includePath: + description: Whether to include the path in the redirected + URL. Defaults to false + type: boolean + includeQueryString: + description: Whether to include the query string in the + redirected URL. Default to false + type: boolean + name: + description: Unique name of the redirect configuration block + type: string + redirectType: + description: The type of redirect. Possible values are Permanent, + Temporary, Found and SeeOther + type: string + targetListenerName: + description: The name of the listener to redirect to. Cannot + be set if target_url is set. + type: string + targetUrl: + description: The URL to redirect the request to. Cannot + be set if target_listener_name is set. + type: string + type: object + type: array + requestRoutingRule: + description: One or more request_routing_rule blocks as defined + below. + items: + properties: + backendAddressPoolName: + description: The Name of the Backend Address Pool which + should be used for this Routing Rule. Cannot be set if + redirect_configuration_name is set. + type: string + backendHttpSettingsName: + description: The Name of the Backend HTTP Settings Collection + which should be used for this Routing Rule. Cannot be + set if redirect_configuration_name is set. + type: string + httpListenerName: + description: The Name of the HTTP Listener which should + be used for this Routing Rule. + type: string + name: + description: The Name of this Request Routing Rule. + type: string + priority: + description: Rule evaluation order can be dictated by specifying + an integer value from 1 to 20000 with 1 being the highest + priority and 20000 being the lowest priority. + type: number + redirectConfigurationName: + description: The Name of the Redirect Configuration which + should be used for this Routing Rule. Cannot be set if + either backend_address_pool_name or backend_http_settings_name + is set. + type: string + rewriteRuleSetName: + description: The Name of the Rewrite Rule Set which should + be used for this Routing Rule. Only valid for v2 SKUs. + type: string + ruleType: + description: The Type of Routing that should be used for + this Rule. Possible values are Basic and PathBasedRouting. + type: string + urlPathMapName: + description: The Name of the URL Path Map which should be + associated with this Routing Rule. + type: string + type: object + type: array + resourceGroupName: + description: The name of the resource group in which to the Application + Gateway should exist. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rewriteRuleSet: + description: One or more rewrite_rule_set blocks as defined below. + Only valid for v2 SKUs. + items: + properties: + name: + description: Unique name of the rewrite rule set block + type: string + rewriteRule: + description: One or more rewrite_rule blocks as defined + below. + items: + properties: + condition: + description: One or more condition blocks as defined + above. + items: + properties: + ignoreCase: + description: Perform a case in-sensitive comparison. + Defaults to false + type: boolean + negate: + description: Negate the result of the condition + evaluation. Defaults to false + type: boolean + pattern: + description: The pattern, either fixed string + or regular expression, that evaluates the + truthfulness of the condition. + type: string + variable: + description: The variable of the condition. + type: string + type: object + type: array + name: + description: The Name of the URL Path Map. + type: string + requestHeaderConfiguration: + description: One or more request_header_configuration + blocks as defined above. + items: + properties: + headerName: + description: Header name of the header configuration. + type: string + headerValue: + description: Header value of the header configuration. + To delete a response header set this property + to an empty string. + type: string + type: object + type: array + responseHeaderConfiguration: + description: One or more response_header_configuration + blocks as defined above. + items: + properties: + headerName: + description: Header name of the header configuration. + type: string + headerValue: + description: Header value of the header configuration. + To delete a response header set this property + to an empty string. + type: string + type: object + type: array + ruleSequence: + description: Rule sequence of the rewrite rule that + determines the order of execution in a set. + type: number + url: + description: One url block as defined below + properties: + components: + description: The components used to rewrite the + URL. Possible values are path_only and query_string_only + to limit the rewrite to the URL Path or URL + Query String only. + type: string + path: + description: The URL path to rewrite. + type: string + queryString: + description: The query string to rewrite. + type: string + reroute: + description: Whether the URL path map should be + reevaluated after this rewrite has been applied. + More info on rewrite configuration + type: boolean + type: object + type: object + type: array + type: object + type: array + sku: + description: A sku block as defined below. + properties: + capacity: + description: The Capacity of the SKU to use for this Application + Gateway. When using a V1 SKU this value must be between + 1 and 32, and 1 to 125 for a V2 SKU. This property is optional + if autoscale_configuration is set. + type: number + name: + description: The Name of the SKU to use for this Application + Gateway. Possible values are Standard_Small, Standard_Medium, + Standard_Large, Standard_v2, WAF_Medium, WAF_Large, and + WAF_v2. + type: string + tier: + description: The Tier of the SKU to use for this Application + Gateway. Possible values are Standard, Standard_v2, WAF + and WAF_v2. + type: string + type: object + sslCertificate: + description: One or more ssl_certificate blocks as defined below. + items: + properties: + dataSecretRef: + description: The base64-encoded PFX certificate data. Required + if key_vault_secret_id is not set. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + keyVaultSecretId: + description: The Secret ID of (base-64 encoded unencrypted + pfx) the Secret or Certificate object stored in Azure + KeyVault. You need to enable soft delete for Key Vault + to use this feature. Required if data is not set. + type: string + name: + description: The Name of the SSL certificate that is unique + within this Application Gateway + type: string + passwordSecretRef: + description: Password for the pfx file specified in data. + Required if data is set. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + sslPolicy: + description: a ssl_policy block as defined below. + properties: + cipherSuites: + description: 'A List of accepted cipher suites. Possible values + are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, + TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, + TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, + TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384.' + items: + type: string + type: array + disabledProtocols: + description: A list of SSL Protocols which should be disabled + on this Application Gateway. Possible values are TLSv1_0, + TLSv1_1, TLSv1_2 and TLSv1_3. + items: + type: string + type: array + minProtocolVersion: + description: The minimal TLS version. Possible values are + TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + type: string + policyName: + description: The Name of the Policy e.g. AppGwSslPolicy20170401S. + Required if policy_type is set to Predefined. Possible values + can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. + Not compatible with disabled_protocols. + type: string + policyType: + description: The Type of the Policy. Possible values are Predefined, + Custom and CustomV2. + type: string + type: object + sslProfile: + description: One or more ssl_profile blocks as defined below. + items: + properties: + name: + description: The name of the SSL Profile that is unique + within this Application Gateway. + type: string + sslPolicy: + description: a ssl_policy block as defined below. + properties: + cipherSuites: + description: 'A List of accepted cipher suites. Possible + values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, + TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, + TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, + TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384.' + items: + type: string + type: array + disabledProtocols: + description: A list of SSL Protocols which should be + disabled on this Application Gateway. Possible values + are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + items: + type: string + type: array + minProtocolVersion: + description: The minimal TLS version. Possible values + are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + type: string + policyName: + description: The Name of the Policy e.g. AppGwSslPolicy20170401S. + Required if policy_type is set to Predefined. Possible + values can change over time and are published here + https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. + Not compatible with disabled_protocols. + type: string + policyType: + description: The Type of the Policy. Possible values + are Predefined, Custom and CustomV2. + type: string + type: object + trustedClientCertificateNames: + description: The name of the Trusted Client Certificate + that will be used to authenticate requests from clients. + items: + type: string + type: array + verifyClientCertIssuerDn: + description: Should client certificate issuer DN be verified? + Defaults to false. + type: boolean + verifyClientCertificateRevocation: + description: Specify the method to check client certificate + revocation status. Possible value is OCSP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustedClientCertificate: + description: One or more trusted_client_certificate blocks as + defined below. + items: + properties: + dataSecretRef: + description: The base-64 encoded certificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + name: + description: The name of the Trusted Client Certificate + that is unique within this Application Gateway. + type: string + required: + - dataSecretRef + type: object + type: array + trustedRootCertificate: + description: One or more trusted_root_certificate blocks as defined + below. + items: + properties: + dataSecretRef: + description: The contents of the Trusted Root Certificate + which should be used. Required if key_vault_secret_id + is not set. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + keyVaultSecretId: + description: The Secret ID of (base-64 encoded unencrypted + pfx) Secret or Certificate object stored in Azure KeyVault. + You need to enable soft delete for the Key Vault to use + this feature. Required if data is not set. + type: string + name: + description: The Name of the Trusted Root Certificate to + use. + type: string + type: object + type: array + urlPathMap: + description: One or more url_path_map blocks as defined below. + items: + properties: + defaultBackendAddressPoolName: + description: The Name of the Default Backend Address Pool + which should be used for this URL Path Map. Cannot be + set if default_redirect_configuration_name is set. + type: string + defaultBackendHttpSettingsName: + description: The Name of the Default Backend HTTP Settings + Collection which should be used for this URL Path Map. + Cannot be set if default_redirect_configuration_name is + set. + type: string + defaultRedirectConfigurationName: + description: The Name of the Default Redirect Configuration + which should be used for this URL Path Map. Cannot be + set if either default_backend_address_pool_name or default_backend_http_settings_name + is set. + type: string + defaultRewriteRuleSetName: + description: The Name of the Default Rewrite Rule Set which + should be used for this URL Path Map. Only valid for v2 + SKUs. + type: string + name: + description: The Name of the URL Path Map. + type: string + pathRule: + description: One or more path_rule blocks as defined above. + items: + properties: + backendAddressPoolName: + description: The Name of the Backend Address Pool + which should be used for this Routing Rule. Cannot + be set if redirect_configuration_name is set. + type: string + backendHttpSettingsName: + description: The Name of the Backend HTTP Settings + Collection which should be used for this Routing + Rule. Cannot be set if redirect_configuration_name + is set. + type: string + firewallPolicyId: + description: The ID of the Web Application Firewall + Policy which should be used as an HTTP Listener. + type: string + name: + description: The Name of the URL Path Map. + type: string + paths: + description: A list of Paths used in this Path Rule. + items: + type: string + type: array + redirectConfigurationName: + description: The Name of the Redirect Configuration + which should be used for this Routing Rule. Cannot + be set if either backend_address_pool_name or backend_http_settings_name + is set. + type: string + rewriteRuleSetName: + description: The Name of the Rewrite Rule Set which + should be used for this Routing Rule. Only valid + for v2 SKUs. + type: string + type: object + type: array + type: object + type: array + wafConfiguration: + description: A waf_configuration block as defined below. + properties: + disabledRuleGroup: + description: One or more disabled_rule_group blocks as defined + below. + items: + properties: + ruleGroupName: + description: The rule group where specific rules should + be disabled. Possible values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, + REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, + REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, + REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, + REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, + REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, + LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, + MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI + and MS-ThreatIntel-CVEs. + type: string + rules: + description: A list of rules which should be disabled + in that group. Disables all rules in the specified + group if rules is not specified. + items: + type: number + type: array + type: object + type: array + enabled: + description: Is the Web Application Firewall enabled? + type: boolean + exclusion: + description: One or more exclusion blocks as defined below. + items: + properties: + matchVariable: + description: Match variable of the exclusion rule to + exclude header, cookie or GET arguments. Possible + values are RequestArgKeys, RequestArgNames, RequestArgValues, + RequestCookieKeys, RequestCookieNames, RequestCookieValues, + RequestHeaderKeys, RequestHeaderNames and RequestHeaderValues + type: string + selector: + description: String value which will be used for the + filter operation. If empty will exclude all traffic + on this match_variable + type: string + selectorMatchOperator: + description: Operator which will be used to search in + the variable content. Possible values are Contains, + EndsWith, Equals, EqualsAny and StartsWith. If empty + will exclude all traffic on this match_variable + type: string + type: object + type: array + fileUploadLimitMb: + description: The File Upload Limit in MB. Accepted values + are in the range 1MB to 750MB for the WAF_v2 SKU, and 1MB + to 500MB for all other SKUs. Defaults to 100MB. + type: number + firewallMode: + description: The Web Application Firewall Mode. Possible values + are Detection and Prevention. + type: string + maxRequestBodySizeKb: + description: The Maximum Request Body Size in KB. Accepted + values are in the range 1KB to 128KB. Defaults to 128KB. + type: number + requestBodyCheck: + description: Is Request Body Inspection enabled? Defaults + to true. + type: boolean + ruleSetType: + description: The Type of the Rule Set used for this Web Application + Firewall. Possible values are OWASP, Microsoft_BotManagerRuleSet + and Microsoft_DefaultRuleSet. Defaults to OWASP. + type: string + ruleSetVersion: + description: The Version of the Rule Set used for this Web + Application Firewall. Possible values are 0.1, 1.0, 2.1, + 2.2.9, 3.0, 3.1 and 3.2. + type: string + type: object + zones: + description: Specifies a list of Availability Zones in which this + Application Gateway should be located. Changing this forces + a new Application Gateway to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationCertificate: + description: One or more authentication_certificate blocks as + defined below. + items: + properties: + name: + description: The Name of the Authentication Certificate + to use. + type: string + type: object + type: array + autoscaleConfiguration: + description: An autoscale_configuration block as defined below. + properties: + maxCapacity: + description: Maximum capacity for autoscaling. Accepted values + are in the range 2 to 125. + type: number + minCapacity: + description: Minimum capacity for autoscaling. Accepted values + are in the range 0 to 100. + type: number + type: object + backendAddressPool: + description: One or more backend_address_pool blocks as defined + below. + items: + properties: + fqdns: + description: A list of FQDN's which should be part of the + Backend Address Pool. + items: + type: string + type: array + x-kubernetes-list-type: set + ipAddresses: + description: A list of IP Addresses which should be part + of the Backend Address Pool. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the Backend Address Pool. + type: string + type: object + type: array + backendHttpSettings: + description: One or more backend_http_settings blocks as defined + below. + items: + properties: + affinityCookieName: + description: The name of the affinity cookie. + type: string + authenticationCertificate: + description: One or more authentication_certificate_backend + blocks as defined below. + items: + properties: + name: + description: The Name of the URL Path Map. + type: string + type: object + type: array + connectionDraining: + description: A connection_draining block as defined below. + properties: + drainTimeoutSec: + description: The number of seconds connection draining + is active. Acceptable values are from 1 second to + 3600 seconds. + type: number + enabled: + description: Is the Web Application Firewall enabled? + type: boolean + type: object + cookieBasedAffinity: + description: Is Cookie-Based Affinity enabled? Possible + values are Enabled and Disabled. + type: string + hostName: + description: Host header to be sent to the backend servers. + Cannot be set if pick_host_name_from_backend_address is + set to true. + type: string + name: + description: The name of the Backend HTTP Settings Collection. + type: string + path: + description: The Path which should be used as a prefix for + all HTTP requests. + type: string + pickHostNameFromBackendAddress: + description: Whether host header should be picked from the + host name of the backend server. Defaults to false. + type: boolean + port: + description: The port which should be used for this Backend + HTTP Settings Collection. + type: number + probeName: + description: The name of an associated HTTP Probe. + type: string + protocol: + description: The Protocol which should be used. Possible + values are Http and Https. + type: string + requestTimeout: + description: The request timeout in seconds, which must + be between 1 and 86400 seconds. Defaults to 30. + type: number + trustedRootCertificateNames: + description: A list of trusted_root_certificate names. + items: + type: string + type: array + type: object + type: array + customErrorConfiguration: + description: One or more custom_error_configuration blocks as + defined below. + items: + properties: + customErrorPageUrl: + description: Error page URL of the application gateway customer + error. + type: string + statusCode: + description: Status code of the application gateway customer + error. Possible values are HttpStatus403 and HttpStatus502 + type: string + type: object + type: array + enableHttp2: + description: Is HTTP2 enabled on the application gateway resource? + Defaults to false. + type: boolean + fipsEnabled: + description: Is FIPS enabled on the Application Gateway? + type: boolean + firewallPolicyId: + description: The ID of the Web Application Firewall Policy. + type: string + forceFirewallPolicyAssociation: + description: Is the Firewall Policy associated with the Application + Gateway? + type: boolean + frontendIpConfiguration: + description: One or more frontend_ip_configuration blocks as defined + below. + items: + properties: + name: + description: The name of the Frontend IP Configuration. + type: string + privateIpAddress: + description: The Private IP Address to use for the Application + Gateway. + type: string + privateIpAddressAllocation: + description: The Allocation Method for the Private IP Address. + Possible values are Dynamic and Static. Defaults to Dynamic. + type: string + privateLinkConfigurationName: + description: The name of the private link configuration + to use for this frontend IP configuration. + type: string + publicIpAddressId: + description: The ID of a Public IP Address which the Application + Gateway should use. The allocation method for the Public + IP Address depends on the sku of this Application Gateway. + Please refer to the Azure documentation for public IP + addresses for details. + type: string + publicIpAddressIdRef: + description: Reference to a PublicIP in network to populate + publicIpAddressId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicIpAddressIdSelector: + description: Selector for a PublicIP in network to populate + publicIpAddressId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: The ID of the Subnet. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + frontendPort: + description: One or more frontend_port blocks as defined below. + items: + properties: + name: + description: The name of the Frontend Port. + type: string + port: + description: The port used for this Frontend Port. + type: number + type: object + type: array + gatewayIpConfiguration: + description: One or more gateway_ip_configuration blocks as defined + below. + items: + properties: + name: + description: The Name of this Gateway IP Configuration. + type: string + subnetId: + description: The ID of the Subnet which the Application + Gateway should be connected to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + global: + description: A global block as defined below. + properties: + requestBufferingEnabled: + description: Whether Application Gateway's Request buffer + is enabled. + type: boolean + responseBufferingEnabled: + description: Whether Application Gateway's Response buffer + is enabled. + type: boolean + type: object + httpListener: + description: One or more http_listener blocks as defined below. + items: + properties: + customErrorConfiguration: + description: One or more custom_error_configuration blocks + as defined below. + items: + properties: + customErrorPageUrl: + description: Error page URL of the application gateway + customer error. + type: string + statusCode: + description: A list of allowed status codes for this + Health Probe. + type: string + type: object + type: array + firewallPolicyId: + description: The ID of the Web Application Firewall Policy + which should be used for this HTTP Listener. + type: string + frontendIpConfigurationName: + description: The Name of the Frontend IP Configuration used + for this HTTP Listener. + type: string + frontendPortName: + description: The Name of the Frontend Port use for this + HTTP Listener. + type: string + hostName: + description: The Hostname which should be used for this + HTTP Listener. Setting this value changes Listener Type + to 'Multi site'. + type: string + hostNames: + description: A list of Hostname(s) should be used for this + HTTP Listener. It allows special wildcard characters. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The Name of the HTTP Listener. + type: string + protocol: + description: The Protocol to use for this HTTP Listener. + Possible values are Http and Https. + type: string + requireSni: + description: Should Server Name Indication be Required? + Defaults to false. + type: boolean + sslCertificateName: + description: The name of the associated SSL Certificate + which should be used for this HTTP Listener. + type: string + sslProfileName: + description: The name of the associated SSL Profile which + should be used for this HTTP Listener. + type: string + type: object + type: array + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Application Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Application Gateway. Only + possible value is UserAssigned. + type: string + type: object + location: + description: The Azure region where the Application Gateway should + exist. Changing this forces a new resource to be created. + type: string + privateLinkConfiguration: + description: One or more private_link_configuration blocks as + defined below. + items: + properties: + ipConfiguration: + description: One or more ip_configuration blocks as defined + below. + items: + properties: + name: + description: The Name of the URL Path Map. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + privateIpAddress: + description: The Static IP Address which should be + used. + type: string + privateIpAddressAllocation: + description: The allocation method used for the Private + IP Address. Possible values are Dynamic and Static. + type: string + subnetId: + description: The ID of the subnet the private link + configuration should connect to. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + name: + description: The name of the private link configuration. + type: string + type: object + type: array + probe: + description: One or more probe blocks as defined below. + items: + properties: + host: + description: The Hostname used for this Probe. If the Application + Gateway is configured for a single site, by default the + Host name should be specified as 127.0.0.1, unless otherwise + configured in custom probe. Cannot be set if pick_host_name_from_backend_http_settings + is set to true. + type: string + interval: + description: The Interval between two consecutive probes + in seconds. Possible values range from 1 second to a maximum + of 86,400 seconds. + type: number + match: + description: A match block as defined above. + properties: + body: + description: A snippet from the Response Body which + must be present in the Response. + type: string + statusCode: + description: A list of allowed status codes for this + Health Probe. + items: + type: string + type: array + type: object + minimumServers: + description: The minimum number of servers that are always + marked as healthy. Defaults to 0. + type: number + name: + description: The Name of the Probe. + type: string + path: + description: The Path used for this Probe. + type: string + pickHostNameFromBackendHttpSettings: + description: Whether the host header should be picked from + the backend HTTP settings. Defaults to false. + type: boolean + port: + description: Custom port which will be used for probing + the backend servers. The valid value ranges from 1 to + 65535. In case not set, port from HTTP settings will be + used. This property is valid for Standard_v2 and WAF_v2 + only. + type: number + protocol: + description: The Protocol used for this Probe. Possible + values are Http and Https. + type: string + timeout: + description: The Timeout used for this Probe, which indicates + when a probe becomes unhealthy. Possible values range + from 1 second to a maximum of 86,400 seconds. + type: number + unhealthyThreshold: + description: The Unhealthy Threshold for this Probe, which + indicates the amount of retries which should be attempted + before a node is deemed unhealthy. Possible values are + from 1 to 20. + type: number + type: object + type: array + redirectConfiguration: + description: One or more redirect_configuration blocks as defined + below. + items: + properties: + includePath: + description: Whether to include the path in the redirected + URL. Defaults to false + type: boolean + includeQueryString: + description: Whether to include the query string in the + redirected URL. Default to false + type: boolean + name: + description: Unique name of the redirect configuration block + type: string + redirectType: + description: The type of redirect. Possible values are Permanent, + Temporary, Found and SeeOther + type: string + targetListenerName: + description: The name of the listener to redirect to. Cannot + be set if target_url is set. + type: string + targetUrl: + description: The URL to redirect the request to. Cannot + be set if target_listener_name is set. + type: string + type: object + type: array + requestRoutingRule: + description: One or more request_routing_rule blocks as defined + below. + items: + properties: + backendAddressPoolName: + description: The Name of the Backend Address Pool which + should be used for this Routing Rule. Cannot be set if + redirect_configuration_name is set. + type: string + backendHttpSettingsName: + description: The Name of the Backend HTTP Settings Collection + which should be used for this Routing Rule. Cannot be + set if redirect_configuration_name is set. + type: string + httpListenerName: + description: The Name of the HTTP Listener which should + be used for this Routing Rule. + type: string + name: + description: The Name of this Request Routing Rule. + type: string + priority: + description: Rule evaluation order can be dictated by specifying + an integer value from 1 to 20000 with 1 being the highest + priority and 20000 being the lowest priority. + type: number + redirectConfigurationName: + description: The Name of the Redirect Configuration which + should be used for this Routing Rule. Cannot be set if + either backend_address_pool_name or backend_http_settings_name + is set. + type: string + rewriteRuleSetName: + description: The Name of the Rewrite Rule Set which should + be used for this Routing Rule. Only valid for v2 SKUs. + type: string + ruleType: + description: The Type of Routing that should be used for + this Rule. Possible values are Basic and PathBasedRouting. + type: string + urlPathMapName: + description: The Name of the URL Path Map which should be + associated with this Routing Rule. + type: string + type: object + type: array + rewriteRuleSet: + description: One or more rewrite_rule_set blocks as defined below. + Only valid for v2 SKUs. + items: + properties: + name: + description: Unique name of the rewrite rule set block + type: string + rewriteRule: + description: One or more rewrite_rule blocks as defined + below. + items: + properties: + condition: + description: One or more condition blocks as defined + above. + items: + properties: + ignoreCase: + description: Perform a case in-sensitive comparison. + Defaults to false + type: boolean + negate: + description: Negate the result of the condition + evaluation. Defaults to false + type: boolean + pattern: + description: The pattern, either fixed string + or regular expression, that evaluates the + truthfulness of the condition. + type: string + variable: + description: The variable of the condition. + type: string + type: object + type: array + name: + description: The Name of the URL Path Map. + type: string + requestHeaderConfiguration: + description: One or more request_header_configuration + blocks as defined above. + items: + properties: + headerName: + description: Header name of the header configuration. + type: string + headerValue: + description: Header value of the header configuration. + To delete a response header set this property + to an empty string. + type: string + type: object + type: array + responseHeaderConfiguration: + description: One or more response_header_configuration + blocks as defined above. + items: + properties: + headerName: + description: Header name of the header configuration. + type: string + headerValue: + description: Header value of the header configuration. + To delete a response header set this property + to an empty string. + type: string + type: object + type: array + ruleSequence: + description: Rule sequence of the rewrite rule that + determines the order of execution in a set. + type: number + url: + description: One url block as defined below + properties: + components: + description: The components used to rewrite the + URL. Possible values are path_only and query_string_only + to limit the rewrite to the URL Path or URL + Query String only. + type: string + path: + description: The URL path to rewrite. + type: string + queryString: + description: The query string to rewrite. + type: string + reroute: + description: Whether the URL path map should be + reevaluated after this rewrite has been applied. + More info on rewrite configuration + type: boolean + type: object + type: object + type: array + type: object + type: array + sku: + description: A sku block as defined below. + properties: + capacity: + description: The Capacity of the SKU to use for this Application + Gateway. When using a V1 SKU this value must be between + 1 and 32, and 1 to 125 for a V2 SKU. This property is optional + if autoscale_configuration is set. + type: number + name: + description: The Name of the SKU to use for this Application + Gateway. Possible values are Standard_Small, Standard_Medium, + Standard_Large, Standard_v2, WAF_Medium, WAF_Large, and + WAF_v2. + type: string + tier: + description: The Tier of the SKU to use for this Application + Gateway. Possible values are Standard, Standard_v2, WAF + and WAF_v2. + type: string + type: object + sslCertificate: + description: One or more ssl_certificate blocks as defined below. + items: + properties: + keyVaultSecretId: + description: The Secret ID of (base-64 encoded unencrypted + pfx) the Secret or Certificate object stored in Azure + KeyVault. You need to enable soft delete for Key Vault + to use this feature. Required if data is not set. + type: string + name: + description: The Name of the SSL certificate that is unique + within this Application Gateway + type: string + type: object + type: array + sslPolicy: + description: a ssl_policy block as defined below. + properties: + cipherSuites: + description: 'A List of accepted cipher suites. Possible values + are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, + TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, + TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, + TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384.' + items: + type: string + type: array + disabledProtocols: + description: A list of SSL Protocols which should be disabled + on this Application Gateway. Possible values are TLSv1_0, + TLSv1_1, TLSv1_2 and TLSv1_3. + items: + type: string + type: array + minProtocolVersion: + description: The minimal TLS version. Possible values are + TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + type: string + policyName: + description: The Name of the Policy e.g. AppGwSslPolicy20170401S. + Required if policy_type is set to Predefined. Possible values + can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. + Not compatible with disabled_protocols. + type: string + policyType: + description: The Type of the Policy. Possible values are Predefined, + Custom and CustomV2. + type: string + type: object + sslProfile: + description: One or more ssl_profile blocks as defined below. + items: + properties: + name: + description: The name of the SSL Profile that is unique + within this Application Gateway. + type: string + sslPolicy: + description: a ssl_policy block as defined below. + properties: + cipherSuites: + description: 'A List of accepted cipher suites. Possible + values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, + TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, + TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, + TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384.' + items: + type: string + type: array + disabledProtocols: + description: A list of SSL Protocols which should be + disabled on this Application Gateway. Possible values + are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + items: + type: string + type: array + minProtocolVersion: + description: The minimal TLS version. Possible values + are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + type: string + policyName: + description: The Name of the Policy e.g. AppGwSslPolicy20170401S. + Required if policy_type is set to Predefined. Possible + values can change over time and are published here + https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. + Not compatible with disabled_protocols. + type: string + policyType: + description: The Type of the Policy. Possible values + are Predefined, Custom and CustomV2. + type: string + type: object + trustedClientCertificateNames: + description: The name of the Trusted Client Certificate + that will be used to authenticate requests from clients. + items: + type: string + type: array + verifyClientCertIssuerDn: + description: Should client certificate issuer DN be verified? + Defaults to false. + type: boolean + verifyClientCertificateRevocation: + description: Specify the method to check client certificate + revocation status. Possible value is OCSP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustedClientCertificate: + description: One or more trusted_client_certificate blocks as + defined below. + items: + properties: + name: + description: The name of the Trusted Client Certificate + that is unique within this Application Gateway. + type: string + type: object + type: array + trustedRootCertificate: + description: One or more trusted_root_certificate blocks as defined + below. + items: + properties: + keyVaultSecretId: + description: The Secret ID of (base-64 encoded unencrypted + pfx) Secret or Certificate object stored in Azure KeyVault. + You need to enable soft delete for the Key Vault to use + this feature. Required if data is not set. + type: string + name: + description: The Name of the Trusted Root Certificate to + use. + type: string + type: object + type: array + urlPathMap: + description: One or more url_path_map blocks as defined below. + items: + properties: + defaultBackendAddressPoolName: + description: The Name of the Default Backend Address Pool + which should be used for this URL Path Map. Cannot be + set if default_redirect_configuration_name is set. + type: string + defaultBackendHttpSettingsName: + description: The Name of the Default Backend HTTP Settings + Collection which should be used for this URL Path Map. + Cannot be set if default_redirect_configuration_name is + set. + type: string + defaultRedirectConfigurationName: + description: The Name of the Default Redirect Configuration + which should be used for this URL Path Map. Cannot be + set if either default_backend_address_pool_name or default_backend_http_settings_name + is set. + type: string + defaultRewriteRuleSetName: + description: The Name of the Default Rewrite Rule Set which + should be used for this URL Path Map. Only valid for v2 + SKUs. + type: string + name: + description: The Name of the URL Path Map. + type: string + pathRule: + description: One or more path_rule blocks as defined above. + items: + properties: + backendAddressPoolName: + description: The Name of the Backend Address Pool + which should be used for this Routing Rule. Cannot + be set if redirect_configuration_name is set. + type: string + backendHttpSettingsName: + description: The Name of the Backend HTTP Settings + Collection which should be used for this Routing + Rule. Cannot be set if redirect_configuration_name + is set. + type: string + firewallPolicyId: + description: The ID of the Web Application Firewall + Policy which should be used as an HTTP Listener. + type: string + name: + description: The Name of the URL Path Map. + type: string + paths: + description: A list of Paths used in this Path Rule. + items: + type: string + type: array + redirectConfigurationName: + description: The Name of the Redirect Configuration + which should be used for this Routing Rule. Cannot + be set if either backend_address_pool_name or backend_http_settings_name + is set. + type: string + rewriteRuleSetName: + description: The Name of the Rewrite Rule Set which + should be used for this Routing Rule. Only valid + for v2 SKUs. + type: string + type: object + type: array + type: object + type: array + wafConfiguration: + description: A waf_configuration block as defined below. + properties: + disabledRuleGroup: + description: One or more disabled_rule_group blocks as defined + below. + items: + properties: + ruleGroupName: + description: The rule group where specific rules should + be disabled. Possible values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, + REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, + REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, + REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, + REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, + REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, + LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, + MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI + and MS-ThreatIntel-CVEs. + type: string + rules: + description: A list of rules which should be disabled + in that group. Disables all rules in the specified + group if rules is not specified. + items: + type: number + type: array + type: object + type: array + enabled: + description: Is the Web Application Firewall enabled? + type: boolean + exclusion: + description: One or more exclusion blocks as defined below. + items: + properties: + matchVariable: + description: Match variable of the exclusion rule to + exclude header, cookie or GET arguments. Possible + values are RequestArgKeys, RequestArgNames, RequestArgValues, + RequestCookieKeys, RequestCookieNames, RequestCookieValues, + RequestHeaderKeys, RequestHeaderNames and RequestHeaderValues + type: string + selector: + description: String value which will be used for the + filter operation. If empty will exclude all traffic + on this match_variable + type: string + selectorMatchOperator: + description: Operator which will be used to search in + the variable content. Possible values are Contains, + EndsWith, Equals, EqualsAny and StartsWith. If empty + will exclude all traffic on this match_variable + type: string + type: object + type: array + fileUploadLimitMb: + description: The File Upload Limit in MB. Accepted values + are in the range 1MB to 750MB for the WAF_v2 SKU, and 1MB + to 500MB for all other SKUs. Defaults to 100MB. + type: number + firewallMode: + description: The Web Application Firewall Mode. Possible values + are Detection and Prevention. + type: string + maxRequestBodySizeKb: + description: The Maximum Request Body Size in KB. Accepted + values are in the range 1KB to 128KB. Defaults to 128KB. + type: number + requestBodyCheck: + description: Is Request Body Inspection enabled? Defaults + to true. + type: boolean + ruleSetType: + description: The Type of the Rule Set used for this Web Application + Firewall. Possible values are OWASP, Microsoft_BotManagerRuleSet + and Microsoft_DefaultRuleSet. Defaults to OWASP. + type: string + ruleSetVersion: + description: The Version of the Rule Set used for this Web + Application Firewall. Possible values are 0.1, 1.0, 2.1, + 2.2.9, 3.0, 3.1 and 3.2. + type: string + type: object + zones: + description: Specifies a list of Availability Zones in which this + Application Gateway should be located. Changing this forces + a new Application Gateway to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.backendAddressPool is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backendAddressPool) + || (has(self.initProvider) && has(self.initProvider.backendAddressPool))' + - message: spec.forProvider.backendHttpSettings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backendHttpSettings) + || (has(self.initProvider) && has(self.initProvider.backendHttpSettings))' + - message: spec.forProvider.frontendIpConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.frontendIpConfiguration) + || (has(self.initProvider) && has(self.initProvider.frontendIpConfiguration))' + - message: spec.forProvider.frontendPort is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.frontendPort) + || (has(self.initProvider) && has(self.initProvider.frontendPort))' + - message: spec.forProvider.gatewayIpConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.gatewayIpConfiguration) + || (has(self.initProvider) && has(self.initProvider.gatewayIpConfiguration))' + - message: spec.forProvider.httpListener is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.httpListener) + || (has(self.initProvider) && has(self.initProvider.httpListener))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.requestRoutingRule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.requestRoutingRule) + || (has(self.initProvider) && has(self.initProvider.requestRoutingRule))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: ApplicationGatewayStatus defines the observed state of ApplicationGateway. + properties: + atProvider: + properties: + authenticationCertificate: + description: One or more authentication_certificate blocks as + defined below. + items: + properties: + id: + description: The ID of the Authentication Certificate. + type: string + name: + description: The Name of the Authentication Certificate + to use. + type: string + type: object + type: array + autoscaleConfiguration: + description: An autoscale_configuration block as defined below. + properties: + maxCapacity: + description: Maximum capacity for autoscaling. Accepted values + are in the range 2 to 125. + type: number + minCapacity: + description: Minimum capacity for autoscaling. Accepted values + are in the range 0 to 100. + type: number + type: object + backendAddressPool: + description: One or more backend_address_pool blocks as defined + below. + items: + properties: + fqdns: + description: A list of FQDN's which should be part of the + Backend Address Pool. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Backend Address Pool. + type: string + ipAddresses: + description: A list of IP Addresses which should be part + of the Backend Address Pool. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the Backend Address Pool. + type: string + type: object + type: array + backendHttpSettings: + description: One or more backend_http_settings blocks as defined + below. + items: + properties: + affinityCookieName: + description: The name of the affinity cookie. + type: string + authenticationCertificate: + description: One or more authentication_certificate_backend + blocks as defined below. + items: + properties: + id: + description: The ID of the URL Path Map. + type: string + name: + description: The Name of the URL Path Map. + type: string + type: object + type: array + connectionDraining: + description: A connection_draining block as defined below. + properties: + drainTimeoutSec: + description: The number of seconds connection draining + is active. Acceptable values are from 1 second to + 3600 seconds. + type: number + enabled: + description: Is the Web Application Firewall enabled? + type: boolean + type: object + cookieBasedAffinity: + description: Is Cookie-Based Affinity enabled? Possible + values are Enabled and Disabled. + type: string + hostName: + description: Host header to be sent to the backend servers. + Cannot be set if pick_host_name_from_backend_address is + set to true. + type: string + id: + description: The ID of the Backend HTTP Settings Configuration. + type: string + name: + description: The name of the Backend HTTP Settings Collection. + type: string + path: + description: The Path which should be used as a prefix for + all HTTP requests. + type: string + pickHostNameFromBackendAddress: + description: Whether host header should be picked from the + host name of the backend server. Defaults to false. + type: boolean + port: + description: The port which should be used for this Backend + HTTP Settings Collection. + type: number + probeId: + description: The ID of the associated Probe. + type: string + probeName: + description: The name of an associated HTTP Probe. + type: string + protocol: + description: The Protocol which should be used. Possible + values are Http and Https. + type: string + requestTimeout: + description: The request timeout in seconds, which must + be between 1 and 86400 seconds. Defaults to 30. + type: number + trustedRootCertificateNames: + description: A list of trusted_root_certificate names. + items: + type: string + type: array + type: object + type: array + customErrorConfiguration: + description: One or more custom_error_configuration blocks as + defined below. + items: + properties: + customErrorPageUrl: + description: Error page URL of the application gateway customer + error. + type: string + id: + description: The ID of the Custom Error Configuration. + type: string + statusCode: + description: Status code of the application gateway customer + error. Possible values are HttpStatus403 and HttpStatus502 + type: string + type: object + type: array + enableHttp2: + description: Is HTTP2 enabled on the application gateway resource? + Defaults to false. + type: boolean + fipsEnabled: + description: Is FIPS enabled on the Application Gateway? + type: boolean + firewallPolicyId: + description: The ID of the Web Application Firewall Policy. + type: string + forceFirewallPolicyAssociation: + description: Is the Firewall Policy associated with the Application + Gateway? + type: boolean + frontendIpConfiguration: + description: One or more frontend_ip_configuration blocks as defined + below. + items: + properties: + id: + description: The ID of the Frontend IP Configuration. + type: string + name: + description: The name of the Frontend IP Configuration. + type: string + privateIpAddress: + description: The Private IP Address to use for the Application + Gateway. + type: string + privateIpAddressAllocation: + description: The Allocation Method for the Private IP Address. + Possible values are Dynamic and Static. Defaults to Dynamic. + type: string + privateLinkConfigurationId: + description: The ID of the associated private link configuration. + type: string + privateLinkConfigurationName: + description: The name of the private link configuration + to use for this frontend IP configuration. + type: string + publicIpAddressId: + description: The ID of a Public IP Address which the Application + Gateway should use. The allocation method for the Public + IP Address depends on the sku of this Application Gateway. + Please refer to the Azure documentation for public IP + addresses for details. + type: string + subnetId: + description: The ID of the Subnet. + type: string + type: object + type: array + frontendPort: + description: One or more frontend_port blocks as defined below. + items: + properties: + id: + description: The ID of the Frontend Port. + type: string + name: + description: The name of the Frontend Port. + type: string + port: + description: The port used for this Frontend Port. + type: number + type: object + type: array + gatewayIpConfiguration: + description: One or more gateway_ip_configuration blocks as defined + below. + items: + properties: + id: + description: The ID of the Gateway IP Configuration. + type: string + name: + description: The Name of this Gateway IP Configuration. + type: string + subnetId: + description: The ID of the Subnet which the Application + Gateway should be connected to. + type: string + type: object + type: array + global: + description: A global block as defined below. + properties: + requestBufferingEnabled: + description: Whether Application Gateway's Request buffer + is enabled. + type: boolean + responseBufferingEnabled: + description: Whether Application Gateway's Response buffer + is enabled. + type: boolean + type: object + httpListener: + description: One or more http_listener blocks as defined below. + items: + properties: + customErrorConfiguration: + description: One or more custom_error_configuration blocks + as defined below. + items: + properties: + customErrorPageUrl: + description: Error page URL of the application gateway + customer error. + type: string + id: + description: The ID of the URL Path Map. + type: string + statusCode: + description: A list of allowed status codes for this + Health Probe. + type: string + type: object + type: array + firewallPolicyId: + description: The ID of the Web Application Firewall Policy + which should be used for this HTTP Listener. + type: string + frontendIpConfigurationId: + description: The ID of the associated Frontend Configuration. + type: string + frontendIpConfigurationName: + description: The Name of the Frontend IP Configuration used + for this HTTP Listener. + type: string + frontendPortId: + description: The ID of the associated Frontend Port. + type: string + frontendPortName: + description: The Name of the Frontend Port use for this + HTTP Listener. + type: string + hostName: + description: The Hostname which should be used for this + HTTP Listener. Setting this value changes Listener Type + to 'Multi site'. + type: string + hostNames: + description: A list of Hostname(s) should be used for this + HTTP Listener. It allows special wildcard characters. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the HTTP Listener. + type: string + name: + description: The Name of the HTTP Listener. + type: string + protocol: + description: The Protocol to use for this HTTP Listener. + Possible values are Http and Https. + type: string + requireSni: + description: Should Server Name Indication be Required? + Defaults to false. + type: boolean + sslCertificateId: + description: The ID of the associated SSL Certificate. + type: string + sslCertificateName: + description: The name of the associated SSL Certificate + which should be used for this HTTP Listener. + type: string + sslProfileId: + description: The ID of the associated SSL Profile. + type: string + sslProfileName: + description: The name of the associated SSL Profile which + should be used for this HTTP Listener. + type: string + type: object + type: array + id: + description: The ID of the Application Gateway. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Application Gateway. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Application Gateway. Only + possible value is UserAssigned. + type: string + type: object + location: + description: The Azure region where the Application Gateway should + exist. Changing this forces a new resource to be created. + type: string + privateEndpointConnection: + description: A list of private_endpoint_connection blocks as defined + below. + items: + properties: + id: + description: The ID of the private endpoint connection. + type: string + name: + description: The name of the private endpoint connection. + type: string + type: object + type: array + privateLinkConfiguration: + description: One or more private_link_configuration blocks as + defined below. + items: + properties: + id: + description: The ID of the private link configuration. + type: string + ipConfiguration: + description: One or more ip_configuration blocks as defined + below. + items: + properties: + name: + description: The Name of the URL Path Map. + type: string + primary: + description: Is this the Primary IP Configuration? + type: boolean + privateIpAddress: + description: The Static IP Address which should be + used. + type: string + privateIpAddressAllocation: + description: The allocation method used for the Private + IP Address. Possible values are Dynamic and Static. + type: string + subnetId: + description: The ID of the subnet the private link + configuration should connect to. + type: string + type: object + type: array + name: + description: The name of the private link configuration. + type: string + type: object + type: array + probe: + description: One or more probe blocks as defined below. + items: + properties: + host: + description: The Hostname used for this Probe. If the Application + Gateway is configured for a single site, by default the + Host name should be specified as 127.0.0.1, unless otherwise + configured in custom probe. Cannot be set if pick_host_name_from_backend_http_settings + is set to true. + type: string + id: + description: The ID of the Probe. + type: string + interval: + description: The Interval between two consecutive probes + in seconds. Possible values range from 1 second to a maximum + of 86,400 seconds. + type: number + match: + description: A match block as defined above. + properties: + body: + description: A snippet from the Response Body which + must be present in the Response. + type: string + statusCode: + description: A list of allowed status codes for this + Health Probe. + items: + type: string + type: array + type: object + minimumServers: + description: The minimum number of servers that are always + marked as healthy. Defaults to 0. + type: number + name: + description: The Name of the Probe. + type: string + path: + description: The Path used for this Probe. + type: string + pickHostNameFromBackendHttpSettings: + description: Whether the host header should be picked from + the backend HTTP settings. Defaults to false. + type: boolean + port: + description: Custom port which will be used for probing + the backend servers. The valid value ranges from 1 to + 65535. In case not set, port from HTTP settings will be + used. This property is valid for Standard_v2 and WAF_v2 + only. + type: number + protocol: + description: The Protocol used for this Probe. Possible + values are Http and Https. + type: string + timeout: + description: The Timeout used for this Probe, which indicates + when a probe becomes unhealthy. Possible values range + from 1 second to a maximum of 86,400 seconds. + type: number + unhealthyThreshold: + description: The Unhealthy Threshold for this Probe, which + indicates the amount of retries which should be attempted + before a node is deemed unhealthy. Possible values are + from 1 to 20. + type: number + type: object + type: array + redirectConfiguration: + description: One or more redirect_configuration blocks as defined + below. + items: + properties: + id: + description: The ID of the Redirect Configuration. + type: string + includePath: + description: Whether to include the path in the redirected + URL. Defaults to false + type: boolean + includeQueryString: + description: Whether to include the query string in the + redirected URL. Default to false + type: boolean + name: + description: Unique name of the redirect configuration block + type: string + redirectType: + description: The type of redirect. Possible values are Permanent, + Temporary, Found and SeeOther + type: string + targetListenerId: + description: The ID of the Application Gateway. + type: string + targetListenerName: + description: The name of the listener to redirect to. Cannot + be set if target_url is set. + type: string + targetUrl: + description: The URL to redirect the request to. Cannot + be set if target_listener_name is set. + type: string + type: object + type: array + requestRoutingRule: + description: One or more request_routing_rule blocks as defined + below. + items: + properties: + backendAddressPoolId: + description: The ID of the associated Backend Address Pool. + type: string + backendAddressPoolName: + description: The Name of the Backend Address Pool which + should be used for this Routing Rule. Cannot be set if + redirect_configuration_name is set. + type: string + backendHttpSettingsId: + description: The ID of the associated Backend HTTP Settings + Configuration. + type: string + backendHttpSettingsName: + description: The Name of the Backend HTTP Settings Collection + which should be used for this Routing Rule. Cannot be + set if redirect_configuration_name is set. + type: string + httpListenerId: + description: The ID of the associated HTTP Listener. + type: string + httpListenerName: + description: The Name of the HTTP Listener which should + be used for this Routing Rule. + type: string + id: + description: The ID of the Request Routing Rule. + type: string + name: + description: The Name of this Request Routing Rule. + type: string + priority: + description: Rule evaluation order can be dictated by specifying + an integer value from 1 to 20000 with 1 being the highest + priority and 20000 being the lowest priority. + type: number + redirectConfigurationId: + description: The ID of the associated Redirect Configuration. + type: string + redirectConfigurationName: + description: The Name of the Redirect Configuration which + should be used for this Routing Rule. Cannot be set if + either backend_address_pool_name or backend_http_settings_name + is set. + type: string + rewriteRuleSetId: + description: The ID of the associated Rewrite Rule Set. + type: string + rewriteRuleSetName: + description: The Name of the Rewrite Rule Set which should + be used for this Routing Rule. Only valid for v2 SKUs. + type: string + ruleType: + description: The Type of Routing that should be used for + this Rule. Possible values are Basic and PathBasedRouting. + type: string + urlPathMapId: + description: The ID of the associated URL Path Map. + type: string + urlPathMapName: + description: The Name of the URL Path Map which should be + associated with this Routing Rule. + type: string + type: object + type: array + resourceGroupName: + description: The name of the resource group in which to the Application + Gateway should exist. Changing this forces a new resource to + be created. + type: string + rewriteRuleSet: + description: One or more rewrite_rule_set blocks as defined below. + Only valid for v2 SKUs. + items: + properties: + id: + description: The ID of the Rewrite Rule Set + type: string + name: + description: Unique name of the rewrite rule set block + type: string + rewriteRule: + description: One or more rewrite_rule blocks as defined + below. + items: + properties: + condition: + description: One or more condition blocks as defined + above. + items: + properties: + ignoreCase: + description: Perform a case in-sensitive comparison. + Defaults to false + type: boolean + negate: + description: Negate the result of the condition + evaluation. Defaults to false + type: boolean + pattern: + description: The pattern, either fixed string + or regular expression, that evaluates the + truthfulness of the condition. + type: string + variable: + description: The variable of the condition. + type: string + type: object + type: array + name: + description: The Name of the URL Path Map. + type: string + requestHeaderConfiguration: + description: One or more request_header_configuration + blocks as defined above. + items: + properties: + headerName: + description: Header name of the header configuration. + type: string + headerValue: + description: Header value of the header configuration. + To delete a response header set this property + to an empty string. + type: string + type: object + type: array + responseHeaderConfiguration: + description: One or more response_header_configuration + blocks as defined above. + items: + properties: + headerName: + description: Header name of the header configuration. + type: string + headerValue: + description: Header value of the header configuration. + To delete a response header set this property + to an empty string. + type: string + type: object + type: array + ruleSequence: + description: Rule sequence of the rewrite rule that + determines the order of execution in a set. + type: number + url: + description: One url block as defined below + properties: + components: + description: The components used to rewrite the + URL. Possible values are path_only and query_string_only + to limit the rewrite to the URL Path or URL + Query String only. + type: string + path: + description: The URL path to rewrite. + type: string + queryString: + description: The query string to rewrite. + type: string + reroute: + description: Whether the URL path map should be + reevaluated after this rewrite has been applied. + More info on rewrite configuration + type: boolean + type: object + type: object + type: array + type: object + type: array + sku: + description: A sku block as defined below. + properties: + capacity: + description: The Capacity of the SKU to use for this Application + Gateway. When using a V1 SKU this value must be between + 1 and 32, and 1 to 125 for a V2 SKU. This property is optional + if autoscale_configuration is set. + type: number + name: + description: The Name of the SKU to use for this Application + Gateway. Possible values are Standard_Small, Standard_Medium, + Standard_Large, Standard_v2, WAF_Medium, WAF_Large, and + WAF_v2. + type: string + tier: + description: The Tier of the SKU to use for this Application + Gateway. Possible values are Standard, Standard_v2, WAF + and WAF_v2. + type: string + type: object + sslCertificate: + description: One or more ssl_certificate blocks as defined below. + items: + properties: + id: + description: The ID of the SSL Certificate. + type: string + keyVaultSecretId: + description: The Secret ID of (base-64 encoded unencrypted + pfx) the Secret or Certificate object stored in Azure + KeyVault. You need to enable soft delete for Key Vault + to use this feature. Required if data is not set. + type: string + name: + description: The Name of the SSL certificate that is unique + within this Application Gateway + type: string + publicCertData: + description: The Public Certificate Data associated with + the SSL Certificate. + type: string + type: object + type: array + sslPolicy: + description: a ssl_policy block as defined below. + properties: + cipherSuites: + description: 'A List of accepted cipher suites. Possible values + are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, + TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, + TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, + TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384.' + items: + type: string + type: array + disabledProtocols: + description: A list of SSL Protocols which should be disabled + on this Application Gateway. Possible values are TLSv1_0, + TLSv1_1, TLSv1_2 and TLSv1_3. + items: + type: string + type: array + minProtocolVersion: + description: The minimal TLS version. Possible values are + TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + type: string + policyName: + description: The Name of the Policy e.g. AppGwSslPolicy20170401S. + Required if policy_type is set to Predefined. Possible values + can change over time and are published here https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. + Not compatible with disabled_protocols. + type: string + policyType: + description: The Type of the Policy. Possible values are Predefined, + Custom and CustomV2. + type: string + type: object + sslProfile: + description: One or more ssl_profile blocks as defined below. + items: + properties: + id: + description: The ID of the URL Path Map. + type: string + name: + description: The name of the SSL Profile that is unique + within this Application Gateway. + type: string + sslPolicy: + description: a ssl_policy block as defined below. + properties: + cipherSuites: + description: 'A List of accepted cipher suites. Possible + values are: TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_3DES_EDE_CBC_SHA, + TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, + TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, + TLS_RSA_WITH_AES_256_CBC_SHA256 and TLS_RSA_WITH_AES_256_GCM_SHA384.' + items: + type: string + type: array + disabledProtocols: + description: A list of SSL Protocols which should be + disabled on this Application Gateway. Possible values + are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + items: + type: string + type: array + minProtocolVersion: + description: The minimal TLS version. Possible values + are TLSv1_0, TLSv1_1, TLSv1_2 and TLSv1_3. + type: string + policyName: + description: The Name of the Policy e.g. AppGwSslPolicy20170401S. + Required if policy_type is set to Predefined. Possible + values can change over time and are published here + https://docs.microsoft.com/azure/application-gateway/application-gateway-ssl-policy-overview. + Not compatible with disabled_protocols. + type: string + policyType: + description: The Type of the Policy. Possible values + are Predefined, Custom and CustomV2. + type: string + type: object + trustedClientCertificateNames: + description: The name of the Trusted Client Certificate + that will be used to authenticate requests from clients. + items: + type: string + type: array + verifyClientCertIssuerDn: + description: Should client certificate issuer DN be verified? + Defaults to false. + type: boolean + verifyClientCertificateRevocation: + description: Specify the method to check client certificate + revocation status. Possible value is OCSP. + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trustedClientCertificate: + description: One or more trusted_client_certificate blocks as + defined below. + items: + properties: + id: + description: The ID of the URL Path Map. + type: string + name: + description: The name of the Trusted Client Certificate + that is unique within this Application Gateway. + type: string + type: object + type: array + trustedRootCertificate: + description: One or more trusted_root_certificate blocks as defined + below. + items: + properties: + id: + description: The ID of the URL Path Map. + type: string + keyVaultSecretId: + description: The Secret ID of (base-64 encoded unencrypted + pfx) Secret or Certificate object stored in Azure KeyVault. + You need to enable soft delete for the Key Vault to use + this feature. Required if data is not set. + type: string + name: + description: The Name of the Trusted Root Certificate to + use. + type: string + type: object + type: array + urlPathMap: + description: One or more url_path_map blocks as defined below. + items: + properties: + defaultBackendAddressPoolId: + description: The ID of the Default Backend Address Pool. + type: string + defaultBackendAddressPoolName: + description: The Name of the Default Backend Address Pool + which should be used for this URL Path Map. Cannot be + set if default_redirect_configuration_name is set. + type: string + defaultBackendHttpSettingsId: + description: The ID of the Default Backend HTTP Settings + Collection. + type: string + defaultBackendHttpSettingsName: + description: The Name of the Default Backend HTTP Settings + Collection which should be used for this URL Path Map. + Cannot be set if default_redirect_configuration_name is + set. + type: string + defaultRedirectConfigurationId: + description: The ID of the Default Redirect Configuration. + type: string + defaultRedirectConfigurationName: + description: The Name of the Default Redirect Configuration + which should be used for this URL Path Map. Cannot be + set if either default_backend_address_pool_name or default_backend_http_settings_name + is set. + type: string + defaultRewriteRuleSetId: + description: The ID of the Application Gateway. + type: string + defaultRewriteRuleSetName: + description: The Name of the Default Rewrite Rule Set which + should be used for this URL Path Map. Only valid for v2 + SKUs. + type: string + id: + description: The ID of the URL Path Map. + type: string + name: + description: The Name of the URL Path Map. + type: string + pathRule: + description: One or more path_rule blocks as defined above. + items: + properties: + backendAddressPoolId: + description: The ID of the associated Backend Address + Pool. + type: string + backendAddressPoolName: + description: The Name of the Backend Address Pool + which should be used for this Routing Rule. Cannot + be set if redirect_configuration_name is set. + type: string + backendHttpSettingsId: + description: The ID of the associated Backend HTTP + Settings Configuration. + type: string + backendHttpSettingsName: + description: The Name of the Backend HTTP Settings + Collection which should be used for this Routing + Rule. Cannot be set if redirect_configuration_name + is set. + type: string + firewallPolicyId: + description: The ID of the Web Application Firewall + Policy which should be used as an HTTP Listener. + type: string + id: + description: The ID of the URL Path Map. + type: string + name: + description: The Name of the URL Path Map. + type: string + paths: + description: A list of Paths used in this Path Rule. + items: + type: string + type: array + redirectConfigurationId: + description: The ID of the associated Redirect Configuration. + type: string + redirectConfigurationName: + description: The Name of the Redirect Configuration + which should be used for this Routing Rule. Cannot + be set if either backend_address_pool_name or backend_http_settings_name + is set. + type: string + rewriteRuleSetId: + description: The ID of the associated Rewrite Rule + Set. + type: string + rewriteRuleSetName: + description: The Name of the Rewrite Rule Set which + should be used for this Routing Rule. Only valid + for v2 SKUs. + type: string + type: object + type: array + type: object + type: array + wafConfiguration: + description: A waf_configuration block as defined below. + properties: + disabledRuleGroup: + description: One or more disabled_rule_group blocks as defined + below. + items: + properties: + ruleGroupName: + description: The rule group where specific rules should + be disabled. Possible values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, REQUEST-911-METHOD-ENFORCEMENT, + REQUEST-913-SCANNER-DETECTION, REQUEST-920-PROTOCOL-ENFORCEMENT, + REQUEST-921-PROTOCOL-ATTACK, REQUEST-930-APPLICATION-ATTACK-LFI, + REQUEST-931-APPLICATION-ATTACK-RFI, REQUEST-932-APPLICATION-ATTACK-RCE, + REQUEST-933-APPLICATION-ATTACK-PHP, REQUEST-941-APPLICATION-ATTACK-XSS, + REQUEST-942-APPLICATION-ATTACK-SQLI, REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, + LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, JAVA, + MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI + and MS-ThreatIntel-CVEs. + type: string + rules: + description: A list of rules which should be disabled + in that group. Disables all rules in the specified + group if rules is not specified. + items: + type: number + type: array + type: object + type: array + enabled: + description: Is the Web Application Firewall enabled? + type: boolean + exclusion: + description: One or more exclusion blocks as defined below. + items: + properties: + matchVariable: + description: Match variable of the exclusion rule to + exclude header, cookie or GET arguments. Possible + values are RequestArgKeys, RequestArgNames, RequestArgValues, + RequestCookieKeys, RequestCookieNames, RequestCookieValues, + RequestHeaderKeys, RequestHeaderNames and RequestHeaderValues + type: string + selector: + description: String value which will be used for the + filter operation. If empty will exclude all traffic + on this match_variable + type: string + selectorMatchOperator: + description: Operator which will be used to search in + the variable content. Possible values are Contains, + EndsWith, Equals, EqualsAny and StartsWith. If empty + will exclude all traffic on this match_variable + type: string + type: object + type: array + fileUploadLimitMb: + description: The File Upload Limit in MB. Accepted values + are in the range 1MB to 750MB for the WAF_v2 SKU, and 1MB + to 500MB for all other SKUs. Defaults to 100MB. + type: number + firewallMode: + description: The Web Application Firewall Mode. Possible values + are Detection and Prevention. + type: string + maxRequestBodySizeKb: + description: The Maximum Request Body Size in KB. Accepted + values are in the range 1KB to 128KB. Defaults to 128KB. + type: number + requestBodyCheck: + description: Is Request Body Inspection enabled? Defaults + to true. + type: boolean + ruleSetType: + description: The Type of the Rule Set used for this Web Application + Firewall. Possible values are OWASP, Microsoft_BotManagerRuleSet + and Microsoft_DefaultRuleSet. Defaults to OWASP. + type: string + ruleSetVersion: + description: The Version of the Rule Set used for this Web + Application Firewall. Possible values are 0.1, 1.0, 2.1, + 2.2.9, 3.0, 3.1 and 3.2. + type: string + type: object + zones: + description: Specifies a list of Availability Zones in which this + Application Gateway should be located. Changing this forces + a new Application Gateway to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_connectionmonitors.yaml b/package/crds/network.azure.upbound.io_connectionmonitors.yaml index 322e45e94..a1453d8dd 100644 --- a/package/crds/network.azure.upbound.io_connectionmonitors.yaml +++ b/package/crds/network.azure.upbound.io_connectionmonitors.yaml @@ -1102,3 +1102,1057 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ConnectionMonitor is the Schema for the ConnectionMonitors API. + Manages a Network Connection Monitor. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ConnectionMonitorSpec defines the desired state of ConnectionMonitor + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + endpoint: + description: A endpoint block as defined below. + items: + properties: + address: + description: The IP address or domain name of the Network + Connection Monitor endpoint. + type: string + coverageLevel: + description: The test coverage for the Network Connection + Monitor endpoint. Possible values are AboveAverage, Average, + BelowAverage, Default, Full and Low. + type: string + excludedIpAddresses: + description: A list of IPv4/IPv6 subnet masks or IPv4/IPv6 + IP addresses to be excluded to the Network Connection + Monitor endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + filter: + description: A filter block as defined below. + properties: + item: + description: A item block as defined below. + items: + properties: + address: + description: The address of the filter item. + type: string + type: + description: The type of items included in the + filter. Possible values are AgentAddress. Defaults + to AgentAddress. + type: string + type: object + type: array + type: + description: The type of items included in the filter. + Possible values are AgentAddress. Defaults to AgentAddress. + type: string + type: object + includedIpAddresses: + description: A list of IPv4/IPv6 subnet masks or IPv4/IPv6 + IP addresses to be included to the Network Connection + Monitor endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the endpoint for the Network Connection + Monitor . + type: string + targetResourceId: + description: The resource ID which is used as the endpoint + by the Network Connection Monitor. + type: string + targetResourceType: + description: The endpoint type of the Network Connection + Monitor. Possible values are AzureSubnet, AzureVM, AzureVNet, + ExternalAddress, MMAWorkspaceMachine and MMAWorkspaceNetwork. + type: string + type: object + type: array + location: + description: The Azure Region where the Network Connection Monitor + should exist. Changing this forces a new resource to be created. + type: string + networkWatcherId: + description: The ID of the Network Watcher. Changing this forces + a new resource to be created. + type: string + networkWatcherIdRef: + description: Reference to a Watcher in network to populate networkWatcherId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkWatcherIdSelector: + description: Selector for a Watcher in network to populate networkWatcherId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + notes: + description: The description of the Network Connection Monitor. + type: string + outputWorkspaceResourceIds: + description: A list of IDs of the Log Analytics Workspace which + will accept the output from the Network Connection Monitor. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Connection Monitor. + type: object + x-kubernetes-map-type: granular + testConfiguration: + description: A test_configuration block as defined below. + items: + properties: + httpConfiguration: + description: A http_configuration block as defined below. + properties: + method: + description: The HTTP method for the HTTP request. Possible + values are Get and Post. Defaults to Get. + type: string + path: + description: The path component of the URI. It only + accepts the absolute path. + type: string + port: + description: The port for the TCP connection. + type: number + preferHttps: + description: Should HTTPS be preferred over HTTP in + cases where the choice is not explicit? Defaults to + false. + type: boolean + requestHeader: + description: A request_header block as defined below. + items: + properties: + name: + description: The name of the test group for the + Network Connection Monitor. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + validStatusCodeRanges: + description: The HTTP status codes to consider successful. + For instance, 2xx, 301-304 and 418. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + icmpConfiguration: + description: A icmp_configuration block as defined below. + properties: + traceRouteEnabled: + description: Should path evaluation with trace route + be enabled? Defaults to true. + type: boolean + type: object + name: + description: The name of test configuration for the Network + Connection Monitor. + type: string + preferredIpVersion: + description: The preferred IP version which is used in the + test evaluation. Possible values are IPv4 and IPv6. + type: string + protocol: + description: The protocol used to evaluate tests. Possible + values are Tcp, Http and Icmp. + type: string + successThreshold: + description: A success_threshold block as defined below. + properties: + checksFailedPercent: + description: The maximum percentage of failed checks + permitted for a test to be successful. + type: number + roundTripTimeMs: + description: The maximum round-trip time in milliseconds + permitted for a test to be successful. + type: number + type: object + tcpConfiguration: + description: A tcp_configuration block as defined below. + properties: + destinationPortBehavior: + description: The destination port behavior for the TCP + connection. Possible values are None and ListenIfAvailable. + type: string + port: + description: The port for the TCP connection. + type: number + traceRouteEnabled: + description: Should path evaluation with trace route + be enabled? Defaults to true. + type: boolean + type: object + testFrequencyInSeconds: + description: The time interval in seconds at which the test + evaluation will happen. Defaults to 60. + type: number + type: object + type: array + testGroup: + description: A test_group block as defined below. + items: + properties: + destinationEndpoints: + description: A list of destination endpoint names. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Should the test group be enabled? Defaults + to true. + type: boolean + name: + description: The name of the test group for the Network + Connection Monitor. + type: string + sourceEndpoints: + description: A list of source endpoint names. + items: + type: string + type: array + x-kubernetes-list-type: set + testConfigurationNames: + description: A list of test configuration names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + endpoint: + description: A endpoint block as defined below. + items: + properties: + address: + description: The IP address or domain name of the Network + Connection Monitor endpoint. + type: string + coverageLevel: + description: The test coverage for the Network Connection + Monitor endpoint. Possible values are AboveAverage, Average, + BelowAverage, Default, Full and Low. + type: string + excludedIpAddresses: + description: A list of IPv4/IPv6 subnet masks or IPv4/IPv6 + IP addresses to be excluded to the Network Connection + Monitor endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + filter: + description: A filter block as defined below. + properties: + item: + description: A item block as defined below. + items: + properties: + address: + description: The address of the filter item. + type: string + type: + description: The type of items included in the + filter. Possible values are AgentAddress. Defaults + to AgentAddress. + type: string + type: object + type: array + type: + description: The type of items included in the filter. + Possible values are AgentAddress. Defaults to AgentAddress. + type: string + type: object + includedIpAddresses: + description: A list of IPv4/IPv6 subnet masks or IPv4/IPv6 + IP addresses to be included to the Network Connection + Monitor endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the endpoint for the Network Connection + Monitor . + type: string + targetResourceId: + description: The resource ID which is used as the endpoint + by the Network Connection Monitor. + type: string + targetResourceType: + description: The endpoint type of the Network Connection + Monitor. Possible values are AzureSubnet, AzureVM, AzureVNet, + ExternalAddress, MMAWorkspaceMachine and MMAWorkspaceNetwork. + type: string + type: object + type: array + location: + description: The Azure Region where the Network Connection Monitor + should exist. Changing this forces a new resource to be created. + type: string + notes: + description: The description of the Network Connection Monitor. + type: string + outputWorkspaceResourceIds: + description: A list of IDs of the Log Analytics Workspace which + will accept the output from the Network Connection Monitor. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Connection Monitor. + type: object + x-kubernetes-map-type: granular + testConfiguration: + description: A test_configuration block as defined below. + items: + properties: + httpConfiguration: + description: A http_configuration block as defined below. + properties: + method: + description: The HTTP method for the HTTP request. Possible + values are Get and Post. Defaults to Get. + type: string + path: + description: The path component of the URI. It only + accepts the absolute path. + type: string + port: + description: The port for the TCP connection. + type: number + preferHttps: + description: Should HTTPS be preferred over HTTP in + cases where the choice is not explicit? Defaults to + false. + type: boolean + requestHeader: + description: A request_header block as defined below. + items: + properties: + name: + description: The name of the test group for the + Network Connection Monitor. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + validStatusCodeRanges: + description: The HTTP status codes to consider successful. + For instance, 2xx, 301-304 and 418. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + icmpConfiguration: + description: A icmp_configuration block as defined below. + properties: + traceRouteEnabled: + description: Should path evaluation with trace route + be enabled? Defaults to true. + type: boolean + type: object + name: + description: The name of test configuration for the Network + Connection Monitor. + type: string + preferredIpVersion: + description: The preferred IP version which is used in the + test evaluation. Possible values are IPv4 and IPv6. + type: string + protocol: + description: The protocol used to evaluate tests. Possible + values are Tcp, Http and Icmp. + type: string + successThreshold: + description: A success_threshold block as defined below. + properties: + checksFailedPercent: + description: The maximum percentage of failed checks + permitted for a test to be successful. + type: number + roundTripTimeMs: + description: The maximum round-trip time in milliseconds + permitted for a test to be successful. + type: number + type: object + tcpConfiguration: + description: A tcp_configuration block as defined below. + properties: + destinationPortBehavior: + description: The destination port behavior for the TCP + connection. Possible values are None and ListenIfAvailable. + type: string + port: + description: The port for the TCP connection. + type: number + traceRouteEnabled: + description: Should path evaluation with trace route + be enabled? Defaults to true. + type: boolean + type: object + testFrequencyInSeconds: + description: The time interval in seconds at which the test + evaluation will happen. Defaults to 60. + type: number + type: object + type: array + testGroup: + description: A test_group block as defined below. + items: + properties: + destinationEndpoints: + description: A list of destination endpoint names. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Should the test group be enabled? Defaults + to true. + type: boolean + name: + description: The name of the test group for the Network + Connection Monitor. + type: string + sourceEndpoints: + description: A list of source endpoint names. + items: + type: string + type: array + x-kubernetes-list-type: set + testConfigurationNames: + description: A list of test configuration names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.endpoint is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.endpoint) + || (has(self.initProvider) && has(self.initProvider.endpoint))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.testConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.testConfiguration) + || (has(self.initProvider) && has(self.initProvider.testConfiguration))' + - message: spec.forProvider.testGroup is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.testGroup) + || (has(self.initProvider) && has(self.initProvider.testGroup))' + status: + description: ConnectionMonitorStatus defines the observed state of ConnectionMonitor. + properties: + atProvider: + properties: + endpoint: + description: A endpoint block as defined below. + items: + properties: + address: + description: The IP address or domain name of the Network + Connection Monitor endpoint. + type: string + coverageLevel: + description: The test coverage for the Network Connection + Monitor endpoint. Possible values are AboveAverage, Average, + BelowAverage, Default, Full and Low. + type: string + excludedIpAddresses: + description: A list of IPv4/IPv6 subnet masks or IPv4/IPv6 + IP addresses to be excluded to the Network Connection + Monitor endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + filter: + description: A filter block as defined below. + properties: + item: + description: A item block as defined below. + items: + properties: + address: + description: The address of the filter item. + type: string + type: + description: The type of items included in the + filter. Possible values are AgentAddress. Defaults + to AgentAddress. + type: string + type: object + type: array + type: + description: The type of items included in the filter. + Possible values are AgentAddress. Defaults to AgentAddress. + type: string + type: object + includedIpAddresses: + description: A list of IPv4/IPv6 subnet masks or IPv4/IPv6 + IP addresses to be included to the Network Connection + Monitor endpoint. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name of the endpoint for the Network Connection + Monitor . + type: string + targetResourceId: + description: The resource ID which is used as the endpoint + by the Network Connection Monitor. + type: string + targetResourceType: + description: The endpoint type of the Network Connection + Monitor. Possible values are AzureSubnet, AzureVM, AzureVNet, + ExternalAddress, MMAWorkspaceMachine and MMAWorkspaceNetwork. + type: string + type: object + type: array + id: + description: The ID of the Network Connection Monitor. + type: string + location: + description: The Azure Region where the Network Connection Monitor + should exist. Changing this forces a new resource to be created. + type: string + networkWatcherId: + description: The ID of the Network Watcher. Changing this forces + a new resource to be created. + type: string + notes: + description: The description of the Network Connection Monitor. + type: string + outputWorkspaceResourceIds: + description: A list of IDs of the Log Analytics Workspace which + will accept the output from the Network Connection Monitor. + items: + type: string + type: array + x-kubernetes-list-type: set + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Connection Monitor. + type: object + x-kubernetes-map-type: granular + testConfiguration: + description: A test_configuration block as defined below. + items: + properties: + httpConfiguration: + description: A http_configuration block as defined below. + properties: + method: + description: The HTTP method for the HTTP request. Possible + values are Get and Post. Defaults to Get. + type: string + path: + description: The path component of the URI. It only + accepts the absolute path. + type: string + port: + description: The port for the TCP connection. + type: number + preferHttps: + description: Should HTTPS be preferred over HTTP in + cases where the choice is not explicit? Defaults to + false. + type: boolean + requestHeader: + description: A request_header block as defined below. + items: + properties: + name: + description: The name of the test group for the + Network Connection Monitor. + type: string + value: + description: The value of the HTTP header. + type: string + type: object + type: array + validStatusCodeRanges: + description: The HTTP status codes to consider successful. + For instance, 2xx, 301-304 and 418. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + icmpConfiguration: + description: A icmp_configuration block as defined below. + properties: + traceRouteEnabled: + description: Should path evaluation with trace route + be enabled? Defaults to true. + type: boolean + type: object + name: + description: The name of test configuration for the Network + Connection Monitor. + type: string + preferredIpVersion: + description: The preferred IP version which is used in the + test evaluation. Possible values are IPv4 and IPv6. + type: string + protocol: + description: The protocol used to evaluate tests. Possible + values are Tcp, Http and Icmp. + type: string + successThreshold: + description: A success_threshold block as defined below. + properties: + checksFailedPercent: + description: The maximum percentage of failed checks + permitted for a test to be successful. + type: number + roundTripTimeMs: + description: The maximum round-trip time in milliseconds + permitted for a test to be successful. + type: number + type: object + tcpConfiguration: + description: A tcp_configuration block as defined below. + properties: + destinationPortBehavior: + description: The destination port behavior for the TCP + connection. Possible values are None and ListenIfAvailable. + type: string + port: + description: The port for the TCP connection. + type: number + traceRouteEnabled: + description: Should path evaluation with trace route + be enabled? Defaults to true. + type: boolean + type: object + testFrequencyInSeconds: + description: The time interval in seconds at which the test + evaluation will happen. Defaults to 60. + type: number + type: object + type: array + testGroup: + description: A test_group block as defined below. + items: + properties: + destinationEndpoints: + description: A list of destination endpoint names. + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: Should the test group be enabled? Defaults + to true. + type: boolean + name: + description: The name of the test group for the Network + Connection Monitor. + type: string + sourceEndpoints: + description: A list of source endpoint names. + items: + type: string + type: array + x-kubernetes-list-type: set + testConfigurationNames: + description: A list of test configuration names. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_dnszones.yaml b/package/crds/network.azure.upbound.io_dnszones.yaml index ef93cbd92..a9fb566f7 100644 --- a/package/crds/network.azure.upbound.io_dnszones.yaml +++ b/package/crds/network.azure.upbound.io_dnszones.yaml @@ -577,3 +577,556 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: DNSZone is the Schema for the DNSZones API. Manages a DNS Zone. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DNSZoneSpec defines the desired state of DNSZone + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + resourceGroupName: + description: Specifies the resource group where the resource exists. + Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + soaRecord: + description: An soa_record block as defined below. + properties: + email: + description: The email contact for the SOA record. + type: string + expireTime: + description: The expire time for the SOA record. Defaults + to 2419200. + type: number + hostName: + description: The domain name of the authoritative name server + for the SOA record. If not set, computed value from Azure + will be used. + type: string + minimumTtl: + description: The minimum Time To Live for the SOA record. + By convention, it is used to determine the negative caching + duration. Defaults to 300. + type: number + refreshTime: + description: The refresh time for the SOA record. Defaults + to 3600. + type: number + retryTime: + description: The retry time for the SOA record. Defaults to + 300. + type: number + serialNumber: + description: The serial number for the SOA record. Defaults + to 1. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Record Set. + type: object + x-kubernetes-map-type: granular + ttl: + description: The Time To Live of the SOA Record in seconds. + Defaults to 3600. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + soaRecord: + description: An soa_record block as defined below. + properties: + email: + description: The email contact for the SOA record. + type: string + expireTime: + description: The expire time for the SOA record. Defaults + to 2419200. + type: number + hostName: + description: The domain name of the authoritative name server + for the SOA record. If not set, computed value from Azure + will be used. + type: string + minimumTtl: + description: The minimum Time To Live for the SOA record. + By convention, it is used to determine the negative caching + duration. Defaults to 300. + type: number + refreshTime: + description: The refresh time for the SOA record. Defaults + to 3600. + type: number + retryTime: + description: The retry time for the SOA record. Defaults to + 300. + type: number + serialNumber: + description: The serial number for the SOA record. Defaults + to 1. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Record Set. + type: object + x-kubernetes-map-type: granular + ttl: + description: The Time To Live of the SOA Record in seconds. + Defaults to 3600. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: DNSZoneStatus defines the observed state of DNSZone. + properties: + atProvider: + properties: + id: + description: The DNS Zone ID. + type: string + maxNumberOfRecordSets: + description: Maximum number of Records in the zone. Defaults to + 1000. + type: number + nameServers: + description: A list of values that make up the NS record for the + zone. + items: + type: string + type: array + x-kubernetes-list-type: set + numberOfRecordSets: + description: The number of records already in the zone. + type: number + resourceGroupName: + description: Specifies the resource group where the resource exists. + Changing this forces a new resource to be created. + type: string + soaRecord: + description: An soa_record block as defined below. + properties: + email: + description: The email contact for the SOA record. + type: string + expireTime: + description: The expire time for the SOA record. Defaults + to 2419200. + type: number + fqdn: + type: string + hostName: + description: The domain name of the authoritative name server + for the SOA record. If not set, computed value from Azure + will be used. + type: string + minimumTtl: + description: The minimum Time To Live for the SOA record. + By convention, it is used to determine the negative caching + duration. Defaults to 300. + type: number + refreshTime: + description: The refresh time for the SOA record. Defaults + to 3600. + type: number + retryTime: + description: The retry time for the SOA record. Defaults to + 300. + type: number + serialNumber: + description: The serial number for the SOA record. Defaults + to 1. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Record Set. + type: object + x-kubernetes-map-type: granular + ttl: + description: The Time To Live of the SOA Record in seconds. + Defaults to 3600. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_expressroutecircuitpeerings.yaml b/package/crds/network.azure.upbound.io_expressroutecircuitpeerings.yaml index 3419f295a..314dc68e9 100644 --- a/package/crds/network.azure.upbound.io_expressroutecircuitpeerings.yaml +++ b/package/crds/network.azure.upbound.io_expressroutecircuitpeerings.yaml @@ -823,3 +823,787 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ExpressRouteCircuitPeering is the Schema for the ExpressRouteCircuitPeerings + API. Manages an ExpressRoute Circuit Peering. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExpressRouteCircuitPeeringSpec defines the desired state + of ExpressRouteCircuitPeering + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + expressRouteCircuitName: + description: The name of the ExpressRoute Circuit in which to + create the Peering. Changing this forces a new resource to be + created. + type: string + expressRouteCircuitNameRef: + description: Reference to a ExpressRouteCircuit in network to + populate expressRouteCircuitName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + expressRouteCircuitNameSelector: + description: Selector for a ExpressRouteCircuit in network to + populate expressRouteCircuitName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + ipv4Enabled: + description: A boolean value indicating whether the IPv4 peering + is enabled. Defaults to true. + type: boolean + ipv6: + description: A ipv6 block as defined below. + properties: + enabled: + description: A boolean value indicating whether the IPv6 peering + is enabled. Defaults to true. + type: boolean + microsoftPeering: + description: A microsoft_peering block as defined below. + properties: + advertisedCommunities: + description: The communities of Bgp Peering specified + for microsoft peering. + items: + type: string + type: array + advertisedPublicPrefixes: + description: A list of Advertised Public Prefixes. + items: + type: string + type: array + customerAsn: + description: The CustomerASN of the peering. Defaults + to 0. + type: number + routingRegistryName: + description: 'The Routing Registry against which the AS + number and prefixes are registered. For example: ARIN, + RIPE, AFRINIC etc. Defaults to NONE.' + type: string + type: object + primaryPeerAddressPrefix: + description: A subnet for the primary link. + type: string + routeFilterId: + description: The ID of the Route Filter. Only available when + peering_type is set to MicrosoftPeering. + type: string + secondaryPeerAddressPrefix: + description: A subnet for the secondary link. + type: string + type: object + microsoftPeeringConfig: + description: A microsoft_peering_config block as defined below. + Required when peering_type is set to MicrosoftPeering and config + for IPv4. + properties: + advertisedCommunities: + description: The communities of Bgp Peering specified for + microsoft peering. + items: + type: string + type: array + advertisedPublicPrefixes: + description: A list of Advertised Public Prefixes. + items: + type: string + type: array + customerAsn: + description: The CustomerASN of the peering. Defaults to 0. + type: number + routingRegistryName: + description: 'The Routing Registry against which the AS number + and prefixes are registered. For example: ARIN, RIPE, AFRINIC + etc. Defaults to NONE.' + type: string + type: object + peerAsn: + description: The Either a 16-bit or a 32-bit ASN. Can either be + public or private. + type: number + primaryPeerAddressPrefix: + description: A /30 subnet for the primary link. Required when + config for IPv4. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Express Route Circuit Peering. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routeFilterId: + description: The ID of the Route Filter. Only available when peering_type + is set to MicrosoftPeering. + type: string + secondaryPeerAddressPrefix: + description: A /30 subnet for the secondary link. Required when + config for IPv4. + type: string + sharedKeySecretRef: + description: The shared key. Can be a maximum of 25 characters. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + vlanId: + description: A valid VLAN ID to establish this peering on. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + ipv4Enabled: + description: A boolean value indicating whether the IPv4 peering + is enabled. Defaults to true. + type: boolean + ipv6: + description: A ipv6 block as defined below. + properties: + enabled: + description: A boolean value indicating whether the IPv6 peering + is enabled. Defaults to true. + type: boolean + microsoftPeering: + description: A microsoft_peering block as defined below. + properties: + advertisedCommunities: + description: The communities of Bgp Peering specified + for microsoft peering. + items: + type: string + type: array + advertisedPublicPrefixes: + description: A list of Advertised Public Prefixes. + items: + type: string + type: array + customerAsn: + description: The CustomerASN of the peering. Defaults + to 0. + type: number + routingRegistryName: + description: 'The Routing Registry against which the AS + number and prefixes are registered. For example: ARIN, + RIPE, AFRINIC etc. Defaults to NONE.' + type: string + type: object + primaryPeerAddressPrefix: + description: A subnet for the primary link. + type: string + routeFilterId: + description: The ID of the Route Filter. Only available when + peering_type is set to MicrosoftPeering. + type: string + secondaryPeerAddressPrefix: + description: A subnet for the secondary link. + type: string + type: object + microsoftPeeringConfig: + description: A microsoft_peering_config block as defined below. + Required when peering_type is set to MicrosoftPeering and config + for IPv4. + properties: + advertisedCommunities: + description: The communities of Bgp Peering specified for + microsoft peering. + items: + type: string + type: array + advertisedPublicPrefixes: + description: A list of Advertised Public Prefixes. + items: + type: string + type: array + customerAsn: + description: The CustomerASN of the peering. Defaults to 0. + type: number + routingRegistryName: + description: 'The Routing Registry against which the AS number + and prefixes are registered. For example: ARIN, RIPE, AFRINIC + etc. Defaults to NONE.' + type: string + type: object + peerAsn: + description: The Either a 16-bit or a 32-bit ASN. Can either be + public or private. + type: number + primaryPeerAddressPrefix: + description: A /30 subnet for the primary link. Required when + config for IPv4. + type: string + routeFilterId: + description: The ID of the Route Filter. Only available when peering_type + is set to MicrosoftPeering. + type: string + secondaryPeerAddressPrefix: + description: A /30 subnet for the secondary link. Required when + config for IPv4. + type: string + vlanId: + description: A valid VLAN ID to establish this peering on. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.vlanId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vlanId) + || (has(self.initProvider) && has(self.initProvider.vlanId))' + status: + description: ExpressRouteCircuitPeeringStatus defines the observed state + of ExpressRouteCircuitPeering. + properties: + atProvider: + properties: + azureAsn: + description: The ASN used by Azure. + type: number + expressRouteCircuitName: + description: The name of the ExpressRoute Circuit in which to + create the Peering. Changing this forces a new resource to be + created. + type: string + gatewayManagerEtag: + type: string + id: + description: The ID of the ExpressRoute Circuit Peering. + type: string + ipv4Enabled: + description: A boolean value indicating whether the IPv4 peering + is enabled. Defaults to true. + type: boolean + ipv6: + description: A ipv6 block as defined below. + properties: + enabled: + description: A boolean value indicating whether the IPv6 peering + is enabled. Defaults to true. + type: boolean + microsoftPeering: + description: A microsoft_peering block as defined below. + properties: + advertisedCommunities: + description: The communities of Bgp Peering specified + for microsoft peering. + items: + type: string + type: array + advertisedPublicPrefixes: + description: A list of Advertised Public Prefixes. + items: + type: string + type: array + customerAsn: + description: The CustomerASN of the peering. Defaults + to 0. + type: number + routingRegistryName: + description: 'The Routing Registry against which the AS + number and prefixes are registered. For example: ARIN, + RIPE, AFRINIC etc. Defaults to NONE.' + type: string + type: object + primaryPeerAddressPrefix: + description: A subnet for the primary link. + type: string + routeFilterId: + description: The ID of the Route Filter. Only available when + peering_type is set to MicrosoftPeering. + type: string + secondaryPeerAddressPrefix: + description: A subnet for the secondary link. + type: string + type: object + microsoftPeeringConfig: + description: A microsoft_peering_config block as defined below. + Required when peering_type is set to MicrosoftPeering and config + for IPv4. + properties: + advertisedCommunities: + description: The communities of Bgp Peering specified for + microsoft peering. + items: + type: string + type: array + advertisedPublicPrefixes: + description: A list of Advertised Public Prefixes. + items: + type: string + type: array + customerAsn: + description: The CustomerASN of the peering. Defaults to 0. + type: number + routingRegistryName: + description: 'The Routing Registry against which the AS number + and prefixes are registered. For example: ARIN, RIPE, AFRINIC + etc. Defaults to NONE.' + type: string + type: object + peerAsn: + description: The Either a 16-bit or a 32-bit ASN. Can either be + public or private. + type: number + primaryAzurePort: + description: The Primary Port used by Azure for this Peering. + type: string + primaryPeerAddressPrefix: + description: A /30 subnet for the primary link. Required when + config for IPv4. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Express Route Circuit Peering. Changing this forces a new + resource to be created. + type: string + routeFilterId: + description: The ID of the Route Filter. Only available when peering_type + is set to MicrosoftPeering. + type: string + secondaryAzurePort: + description: The Secondary Port used by Azure for this Peering. + type: string + secondaryPeerAddressPrefix: + description: A /30 subnet for the secondary link. Required when + config for IPv4. + type: string + vlanId: + description: A valid VLAN ID to establish this peering on. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_expressroutecircuits.yaml b/package/crds/network.azure.upbound.io_expressroutecircuits.yaml index 716f6a9b4..bc6243a28 100644 --- a/package/crds/network.azure.upbound.io_expressroutecircuits.yaml +++ b/package/crds/network.azure.upbound.io_expressroutecircuits.yaml @@ -597,3 +597,576 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ExpressRouteCircuit is the Schema for the ExpressRouteCircuits + API. Manages an ExpressRoute circuit. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExpressRouteCircuitSpec defines the desired state of ExpressRouteCircuit + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowClassicOperations: + description: Allow the circuit to interact with classic (RDFE) + resources. Defaults to false. + type: boolean + authorizationKeySecretRef: + description: The authorization key. This can be used to set up + an ExpressRoute Circuit with an ExpressRoute Port from another + subscription. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + bandwidthInGbps: + description: The bandwidth in Gbps of the circuit being created + on the Express Route Port. + type: number + bandwidthInMbps: + description: The bandwidth in Mbps of the circuit being created + on the Service Provider. + type: number + expressRoutePortId: + description: The ID of the Express Route Port this Express Route + Circuit is based on. Changing this forces a new resource to + be created. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + peeringLocation: + description: The name of the peering location and not the Azure + resource location. Changing this forces a new resource to be + created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the ExpressRoute circuit. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceProviderName: + description: The name of the ExpressRoute Service Provider. Changing + this forces a new resource to be created. + type: string + sku: + description: A sku block for the ExpressRoute circuit as documented + below. + properties: + family: + description: The billing mode for bandwidth. Possible values + are MeteredData or UnlimitedData. + type: string + tier: + description: The service tier. Possible values are Basic, + Local, Standard or Premium. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowClassicOperations: + description: Allow the circuit to interact with classic (RDFE) + resources. Defaults to false. + type: boolean + bandwidthInGbps: + description: The bandwidth in Gbps of the circuit being created + on the Express Route Port. + type: number + bandwidthInMbps: + description: The bandwidth in Mbps of the circuit being created + on the Service Provider. + type: number + expressRoutePortId: + description: The ID of the Express Route Port this Express Route + Circuit is based on. Changing this forces a new resource to + be created. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + peeringLocation: + description: The name of the peering location and not the Azure + resource location. Changing this forces a new resource to be + created. + type: string + serviceProviderName: + description: The name of the ExpressRoute Service Provider. Changing + this forces a new resource to be created. + type: string + sku: + description: A sku block for the ExpressRoute circuit as documented + below. + properties: + family: + description: The billing mode for bandwidth. Possible values + are MeteredData or UnlimitedData. + type: string + tier: + description: The service tier. Possible values are Basic, + Local, Standard or Premium. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: ExpressRouteCircuitStatus defines the observed state of ExpressRouteCircuit. + properties: + atProvider: + properties: + allowClassicOperations: + description: Allow the circuit to interact with classic (RDFE) + resources. Defaults to false. + type: boolean + bandwidthInGbps: + description: The bandwidth in Gbps of the circuit being created + on the Express Route Port. + type: number + bandwidthInMbps: + description: The bandwidth in Mbps of the circuit being created + on the Service Provider. + type: number + expressRoutePortId: + description: The ID of the Express Route Port this Express Route + Circuit is based on. Changing this forces a new resource to + be created. + type: string + id: + description: The ID of the ExpressRoute circuit. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + peeringLocation: + description: The name of the peering location and not the Azure + resource location. Changing this forces a new resource to be + created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the ExpressRoute circuit. Changing this forces a new resource + to be created. + type: string + serviceProviderName: + description: The name of the ExpressRoute Service Provider. Changing + this forces a new resource to be created. + type: string + serviceProviderProvisioningState: + description: The ExpressRoute circuit provisioning state from + your chosen service provider. Possible values are NotProvisioned, + Provisioning, Provisioned, and Deprovisioning. + type: string + sku: + description: A sku block for the ExpressRoute circuit as documented + below. + properties: + family: + description: The billing mode for bandwidth. Possible values + are MeteredData or UnlimitedData. + type: string + tier: + description: The service tier. Possible values are Basic, + Local, Standard or Premium. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_expressrouteconnections.yaml b/package/crds/network.azure.upbound.io_expressrouteconnections.yaml index 0b9f900b0..944f01b00 100644 --- a/package/crds/network.azure.upbound.io_expressrouteconnections.yaml +++ b/package/crds/network.azure.upbound.io_expressrouteconnections.yaml @@ -744,3 +744,717 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ExpressRouteConnection is the Schema for the ExpressRouteConnections + API. Manages an Express Route Connection. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExpressRouteConnectionSpec defines the desired state of ExpressRouteConnection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authorizationKey: + description: The authorization key to establish the Express Route + Connection. + type: string + enableInternetSecurity: + description: Is Internet security enabled for this Express Route + Connection? + type: boolean + expressRouteCircuitPeeringId: + description: The ID of the Express Route Circuit Peering that + this Express Route Connection connects with. Changing this forces + a new resource to be created. + type: string + expressRouteCircuitPeeringIdRef: + description: Reference to a ExpressRouteCircuitPeering in network + to populate expressRouteCircuitPeeringId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + expressRouteCircuitPeeringIdSelector: + description: Selector for a ExpressRouteCircuitPeering in network + to populate expressRouteCircuitPeeringId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expressRouteGatewayBypassEnabled: + description: Specified whether Fast Path is enabled for Virtual + Wan Firewall Hub. Defaults to false. + type: boolean + expressRouteGatewayId: + description: The ID of the Express Route Gateway that this Express + Route Connection connects with. Changing this forces a new resource + to be created. + type: string + expressRouteGatewayIdRef: + description: Reference to a ExpressRouteGateway in network to + populate expressRouteGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + expressRouteGatewayIdSelector: + description: Selector for a ExpressRouteGateway in network to + populate expressRouteGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routing: + description: A routing block as defined below. + properties: + associatedRouteTableId: + description: The ID of the Virtual Hub Route Table associated + with this Express Route Connection. + type: string + inboundRouteMapId: + description: The ID of the Route Map associated with this + Express Route Connection for inbound routes. + type: string + outboundRouteMapId: + description: The ID of the Route Map associated with this + Express Route Connection for outbound routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: The list of labels to logically group route + tables. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of IDs of the Virtual Hub Route Table + to propagate routes from Express Route Connection to + the route table. + items: + type: string + type: array + type: object + type: object + routingWeight: + description: The routing weight associated to the Express Route + Connection. Possible value is between 0 and 32000. Defaults + to 0. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authorizationKey: + description: The authorization key to establish the Express Route + Connection. + type: string + enableInternetSecurity: + description: Is Internet security enabled for this Express Route + Connection? + type: boolean + expressRouteCircuitPeeringId: + description: The ID of the Express Route Circuit Peering that + this Express Route Connection connects with. Changing this forces + a new resource to be created. + type: string + expressRouteCircuitPeeringIdRef: + description: Reference to a ExpressRouteCircuitPeering in network + to populate expressRouteCircuitPeeringId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + expressRouteCircuitPeeringIdSelector: + description: Selector for a ExpressRouteCircuitPeering in network + to populate expressRouteCircuitPeeringId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + expressRouteGatewayBypassEnabled: + description: Specified whether Fast Path is enabled for Virtual + Wan Firewall Hub. Defaults to false. + type: boolean + routing: + description: A routing block as defined below. + properties: + associatedRouteTableId: + description: The ID of the Virtual Hub Route Table associated + with this Express Route Connection. + type: string + inboundRouteMapId: + description: The ID of the Route Map associated with this + Express Route Connection for inbound routes. + type: string + outboundRouteMapId: + description: The ID of the Route Map associated with this + Express Route Connection for outbound routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: The list of labels to logically group route + tables. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of IDs of the Virtual Hub Route Table + to propagate routes from Express Route Connection to + the route table. + items: + type: string + type: array + type: object + type: object + routingWeight: + description: The routing weight associated to the Express Route + Connection. Possible value is between 0 and 32000. Defaults + to 0. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ExpressRouteConnectionStatus defines the observed state of + ExpressRouteConnection. + properties: + atProvider: + properties: + authorizationKey: + description: The authorization key to establish the Express Route + Connection. + type: string + enableInternetSecurity: + description: Is Internet security enabled for this Express Route + Connection? + type: boolean + expressRouteCircuitPeeringId: + description: The ID of the Express Route Circuit Peering that + this Express Route Connection connects with. Changing this forces + a new resource to be created. + type: string + expressRouteGatewayBypassEnabled: + description: Specified whether Fast Path is enabled for Virtual + Wan Firewall Hub. Defaults to false. + type: boolean + expressRouteGatewayId: + description: The ID of the Express Route Gateway that this Express + Route Connection connects with. Changing this forces a new resource + to be created. + type: string + id: + description: The ID of the Express Route Connection. + type: string + routing: + description: A routing block as defined below. + properties: + associatedRouteTableId: + description: The ID of the Virtual Hub Route Table associated + with this Express Route Connection. + type: string + inboundRouteMapId: + description: The ID of the Route Map associated with this + Express Route Connection for inbound routes. + type: string + outboundRouteMapId: + description: The ID of the Route Map associated with this + Express Route Connection for outbound routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: The list of labels to logically group route + tables. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of IDs of the Virtual Hub Route Table + to propagate routes from Express Route Connection to + the route table. + items: + type: string + type: array + type: object + type: object + routingWeight: + description: The routing weight associated to the Express Route + Connection. Possible value is between 0 and 32000. Defaults + to 0. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_expressrouteports.yaml b/package/crds/network.azure.upbound.io_expressrouteports.yaml index 48f15b2ee..c0be219d0 100644 --- a/package/crds/network.azure.upbound.io_expressrouteports.yaml +++ b/package/crds/network.azure.upbound.io_expressrouteports.yaml @@ -787,3 +787,754 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ExpressRoutePort is the Schema for the ExpressRoutePorts API. + Manages a Express Route Port. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExpressRoutePortSpec defines the desired state of ExpressRoutePort + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bandwidthInGbps: + description: Bandwidth of the Express Route Port in Gbps. Changing + this forces a new Express Route Port to be created. + type: number + billingType: + description: The billing type of the Express Route Port. Possible + values are MeteredData and UnlimitedData. + type: string + encapsulation: + description: 'The encapsulation method used for the Express Route + Port. Changing this forces a new Express Route Port to be created. + Possible values are: Dot1Q, QinQ.' + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Express Route Port. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Express Route Port. Only + possible value is UserAssigned. + type: string + type: object + link1: + description: A list of link blocks as defined below. + properties: + adminEnabled: + description: Whether enable administration state on the Express + Route Port Link? Defaults to false. + type: boolean + macsecCakKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the Mac security CAK key for this Express Route Port Link. + type: string + macsecCipher: + description: The MACSec cipher used for this Express Route + Port Link. Possible values are GcmAes128 and GcmAes256. + Defaults to GcmAes128. + type: string + macsecCknKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the MACSec CKN key for this Express Route Port Link. + type: string + macsecSciEnabled: + description: Should Secure Channel Identifier on the Express + Route Port Link be enabled? Defaults to false. + type: boolean + type: object + link2: + description: A list of link blocks as defined below. + properties: + adminEnabled: + description: Whether enable administration state on the Express + Route Port Link? Defaults to false. + type: boolean + macsecCakKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the Mac security CAK key for this Express Route Port Link. + type: string + macsecCipher: + description: The MACSec cipher used for this Express Route + Port Link. Possible values are GcmAes128 and GcmAes256. + Defaults to GcmAes128. + type: string + macsecCknKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the MACSec CKN key for this Express Route Port Link. + type: string + macsecSciEnabled: + description: Should Secure Channel Identifier on the Express + Route Port Link be enabled? Defaults to false. + type: boolean + type: object + location: + description: The Azure Region where the Express Route Port should + exist. Changing this forces a new Express Route Port to be created. + type: string + peeringLocation: + description: The name of the peering location that this Express + Route Port is physically mapped to. Changing this forces a new + Express Route Port to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Express + Route Port should exist. Changing this forces a new Express + Route Port to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Express Route Port. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bandwidthInGbps: + description: Bandwidth of the Express Route Port in Gbps. Changing + this forces a new Express Route Port to be created. + type: number + billingType: + description: The billing type of the Express Route Port. Possible + values are MeteredData and UnlimitedData. + type: string + encapsulation: + description: 'The encapsulation method used for the Express Route + Port. Changing this forces a new Express Route Port to be created. + Possible values are: Dot1Q, QinQ.' + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Express Route Port. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Express Route Port. Only + possible value is UserAssigned. + type: string + type: object + link1: + description: A list of link blocks as defined below. + properties: + adminEnabled: + description: Whether enable administration state on the Express + Route Port Link? Defaults to false. + type: boolean + macsecCakKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the Mac security CAK key for this Express Route Port Link. + type: string + macsecCipher: + description: The MACSec cipher used for this Express Route + Port Link. Possible values are GcmAes128 and GcmAes256. + Defaults to GcmAes128. + type: string + macsecCknKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the MACSec CKN key for this Express Route Port Link. + type: string + macsecSciEnabled: + description: Should Secure Channel Identifier on the Express + Route Port Link be enabled? Defaults to false. + type: boolean + type: object + link2: + description: A list of link blocks as defined below. + properties: + adminEnabled: + description: Whether enable administration state on the Express + Route Port Link? Defaults to false. + type: boolean + macsecCakKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the Mac security CAK key for this Express Route Port Link. + type: string + macsecCipher: + description: The MACSec cipher used for this Express Route + Port Link. Possible values are GcmAes128 and GcmAes256. + Defaults to GcmAes128. + type: string + macsecCknKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the MACSec CKN key for this Express Route Port Link. + type: string + macsecSciEnabled: + description: Should Secure Channel Identifier on the Express + Route Port Link be enabled? Defaults to false. + type: boolean + type: object + location: + description: The Azure Region where the Express Route Port should + exist. Changing this forces a new Express Route Port to be created. + type: string + peeringLocation: + description: The name of the peering location that this Express + Route Port is physically mapped to. Changing this forces a new + Express Route Port to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Express Route Port. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.bandwidthInGbps is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.bandwidthInGbps) + || (has(self.initProvider) && has(self.initProvider.bandwidthInGbps))' + - message: spec.forProvider.encapsulation is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.encapsulation) + || (has(self.initProvider) && has(self.initProvider.encapsulation))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.peeringLocation is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.peeringLocation) + || (has(self.initProvider) && has(self.initProvider.peeringLocation))' + status: + description: ExpressRoutePortStatus defines the observed state of ExpressRoutePort. + properties: + atProvider: + properties: + bandwidthInGbps: + description: Bandwidth of the Express Route Port in Gbps. Changing + this forces a new Express Route Port to be created. + type: number + billingType: + description: The billing type of the Express Route Port. Possible + values are MeteredData and UnlimitedData. + type: string + encapsulation: + description: 'The encapsulation method used for the Express Route + Port. Changing this forces a new Express Route Port to be created. + Possible values are: Dot1Q, QinQ.' + type: string + ethertype: + description: The EtherType of the Express Route Port. + type: string + guid: + description: The resource GUID of the Express Route Port. + type: string + id: + description: The ID of the Express Route Port. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Express Route Port. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Express Route Port. Only + possible value is UserAssigned. + type: string + type: object + link1: + description: A list of link blocks as defined below. + properties: + adminEnabled: + description: Whether enable administration state on the Express + Route Port Link? Defaults to false. + type: boolean + connectorType: + description: The connector type of the Express Route Port + Link. + type: string + id: + description: The ID of this Express Route Port Link. + type: string + interfaceName: + description: The interface name of the Azure router associated + with the Express Route Port Link. + type: string + macsecCakKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the Mac security CAK key for this Express Route Port Link. + type: string + macsecCipher: + description: The MACSec cipher used for this Express Route + Port Link. Possible values are GcmAes128 and GcmAes256. + Defaults to GcmAes128. + type: string + macsecCknKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the MACSec CKN key for this Express Route Port Link. + type: string + macsecSciEnabled: + description: Should Secure Channel Identifier on the Express + Route Port Link be enabled? Defaults to false. + type: boolean + patchPanelId: + description: The ID that maps from the Express Route Port + Link to the patch panel port. + type: string + rackId: + description: The ID that maps from the patch panel port to + the rack. + type: string + routerName: + description: The name of the Azure router associated with + the Express Route Port Link. + type: string + type: object + link2: + description: A list of link blocks as defined below. + properties: + adminEnabled: + description: Whether enable administration state on the Express + Route Port Link? Defaults to false. + type: boolean + connectorType: + description: The connector type of the Express Route Port + Link. + type: string + id: + description: The ID of this Express Route Port Link. + type: string + interfaceName: + description: The interface name of the Azure router associated + with the Express Route Port Link. + type: string + macsecCakKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the Mac security CAK key for this Express Route Port Link. + type: string + macsecCipher: + description: The MACSec cipher used for this Express Route + Port Link. Possible values are GcmAes128 and GcmAes256. + Defaults to GcmAes128. + type: string + macsecCknKeyvaultSecretId: + description: The ID of the Key Vault Secret that contains + the MACSec CKN key for this Express Route Port Link. + type: string + macsecSciEnabled: + description: Should Secure Channel Identifier on the Express + Route Port Link be enabled? Defaults to false. + type: boolean + patchPanelId: + description: The ID that maps from the Express Route Port + Link to the patch panel port. + type: string + rackId: + description: The ID that maps from the patch panel port to + the rack. + type: string + routerName: + description: The name of the Azure router associated with + the Express Route Port Link. + type: string + type: object + location: + description: The Azure Region where the Express Route Port should + exist. Changing this forces a new Express Route Port to be created. + type: string + mtu: + description: The maximum transmission unit of the Express Route + Port. + type: string + peeringLocation: + description: The name of the peering location that this Express + Route Port is physically mapped to. Changing this forces a new + Express Route Port to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Express + Route Port should exist. Changing this forces a new Express + Route Port to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Express Route Port. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_firewallpolicies.yaml b/package/crds/network.azure.upbound.io_firewallpolicies.yaml index 72686f5ec..37eec4fcc 100644 --- a/package/crds/network.azure.upbound.io_firewallpolicies.yaml +++ b/package/crds/network.azure.upbound.io_firewallpolicies.yaml @@ -1190,3 +1190,1133 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FirewallPolicy is the Schema for the FirewallPolicys API. Manages + a Firewall Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FirewallPolicySpec defines the desired state of FirewallPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoLearnPrivateRangesEnabled: + description: Whether enable auto learn private ip range. + type: boolean + basePolicyId: + description: The ID of the base Firewall Policy. + type: string + dns: + description: A dns block as defined below. + properties: + proxyEnabled: + description: Whether to enable DNS proxy on Firewalls attached + to this Firewall Policy? Defaults to false. + type: boolean + servers: + description: A list of custom DNS servers' IP addresses. + items: + type: string + type: array + type: object + explicitProxy: + description: A explicit_proxy block as defined below. + properties: + enablePacFile: + description: Whether the pac file port and url need to be + provided. + type: boolean + enabled: + description: Whether the explicit proxy is enabled for this + Firewall Policy. + type: boolean + httpPort: + description: The port number for explicit http protocol. + type: number + httpsPort: + description: The port number for explicit proxy https protocol. + type: number + pacFile: + description: Specifies a SAS URL for PAC file. + type: string + pacFilePort: + description: Specifies a port number for firewall to serve + PAC file. + type: number + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Firewall Policy. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Firewall Policy. Only + possible value is UserAssigned. + type: string + type: object + insights: + description: An insights block as defined below. + properties: + defaultLogAnalyticsWorkspaceId: + description: The ID of the default Log Analytics Workspace + that the Firewalls associated with this Firewall Policy + will send their logs to, when there is no location matches + in the log_analytics_workspace. + type: string + enabled: + description: Whether the insights functionality is enabled + for this Firewall Policy. + type: boolean + logAnalyticsWorkspace: + description: A list of log_analytics_workspace block as defined + below. + items: + properties: + firewallLocation: + description: The location of the Firewalls, that when + matches this Log Analytics Workspace will be used + to consume their logs. + type: string + id: + description: 12-digit number (id) which identifies your + signature. + type: string + type: object + type: array + retentionInDays: + description: The log retention period in days. + type: number + type: object + intrusionDetection: + description: A intrusion_detection block as defined below. + properties: + mode: + description: 'In which mode you want to run intrusion detection: + Off, Alert or Deny.' + type: string + privateRanges: + description: A list of Private IP address ranges to identify + traffic direction. By default, only ranges defined by IANA + RFC 1918 are considered private IP addresses. + items: + type: string + type: array + signatureOverrides: + description: One or more signature_overrides blocks as defined + below. + items: + properties: + id: + description: 12-digit number (id) which identifies your + signature. + type: string + state: + description: state can be any of Off, Alert or Deny. + type: string + type: object + type: array + trafficBypass: + description: One or more traffic_bypass blocks as defined + below. + items: + properties: + description: + description: The description for this bypass traffic + setting. + type: string + destinationAddresses: + description: Specifies a list of destination IP addresses + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + destinationIpGroups: + description: Specifies a list of destination IP groups + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + destinationPorts: + description: Specifies a list of destination IP ports + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name which should be used for this + bypass traffic setting. + type: string + protocol: + description: The protocols any of ANY, TCP, ICMP, UDP + that shall be bypassed by intrusion detection. + type: string + sourceAddresses: + description: Specifies a list of source addresses that + shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + sourceIpGroups: + description: Specifies a list of source IP groups that + shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + location: + description: The Azure Region where the Firewall Policy should + exist. Changing this forces a new Firewall Policy to be created. + type: string + privateIpRanges: + description: A list of private IP ranges to which traffic will + not be SNAT. + items: + type: string + type: array + resourceGroupName: + description: The name of the Resource Group where the Firewall + Policy should exist. Changing this forces a new Firewall Policy + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: The SKU Tier of the Firewall Policy. Possible values + are Standard, Premium and Basic. Changing this forces a new + Firewall Policy to be created. + type: string + sqlRedirectAllowed: + description: Whether SQL Redirect traffic filtering is allowed. + Enabling this flag requires no rule using ports between 11000-11999. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Firewall Policy. + type: object + x-kubernetes-map-type: granular + threatIntelligenceAllowlist: + description: A threat_intelligence_allowlist block as defined + below. + properties: + fqdns: + description: A list of FQDNs that will be skipped for threat + detection. + items: + type: string + type: array + x-kubernetes-list-type: set + ipAddresses: + description: A list of IP addresses or CIDR ranges that will + be skipped for threat detection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + threatIntelligenceMode: + description: The operation mode for Threat Intelligence. Possible + values are Alert, Deny and Off. Defaults to Alert. + type: string + tlsCertificate: + description: A tls_certificate block as defined below. + properties: + keyVaultSecretId: + description: The ID of the Key Vault, where the secret or + certificate is stored. + type: string + name: + description: The name of the certificate. + type: string + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoLearnPrivateRangesEnabled: + description: Whether enable auto learn private ip range. + type: boolean + basePolicyId: + description: The ID of the base Firewall Policy. + type: string + dns: + description: A dns block as defined below. + properties: + proxyEnabled: + description: Whether to enable DNS proxy on Firewalls attached + to this Firewall Policy? Defaults to false. + type: boolean + servers: + description: A list of custom DNS servers' IP addresses. + items: + type: string + type: array + type: object + explicitProxy: + description: A explicit_proxy block as defined below. + properties: + enablePacFile: + description: Whether the pac file port and url need to be + provided. + type: boolean + enabled: + description: Whether the explicit proxy is enabled for this + Firewall Policy. + type: boolean + httpPort: + description: The port number for explicit http protocol. + type: number + httpsPort: + description: The port number for explicit proxy https protocol. + type: number + pacFile: + description: Specifies a SAS URL for PAC file. + type: string + pacFilePort: + description: Specifies a port number for firewall to serve + PAC file. + type: number + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Firewall Policy. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Firewall Policy. Only + possible value is UserAssigned. + type: string + type: object + insights: + description: An insights block as defined below. + properties: + defaultLogAnalyticsWorkspaceId: + description: The ID of the default Log Analytics Workspace + that the Firewalls associated with this Firewall Policy + will send their logs to, when there is no location matches + in the log_analytics_workspace. + type: string + enabled: + description: Whether the insights functionality is enabled + for this Firewall Policy. + type: boolean + logAnalyticsWorkspace: + description: A list of log_analytics_workspace block as defined + below. + items: + properties: + firewallLocation: + description: The location of the Firewalls, that when + matches this Log Analytics Workspace will be used + to consume their logs. + type: string + id: + description: 12-digit number (id) which identifies your + signature. + type: string + type: object + type: array + retentionInDays: + description: The log retention period in days. + type: number + type: object + intrusionDetection: + description: A intrusion_detection block as defined below. + properties: + mode: + description: 'In which mode you want to run intrusion detection: + Off, Alert or Deny.' + type: string + privateRanges: + description: A list of Private IP address ranges to identify + traffic direction. By default, only ranges defined by IANA + RFC 1918 are considered private IP addresses. + items: + type: string + type: array + signatureOverrides: + description: One or more signature_overrides blocks as defined + below. + items: + properties: + id: + description: 12-digit number (id) which identifies your + signature. + type: string + state: + description: state can be any of Off, Alert or Deny. + type: string + type: object + type: array + trafficBypass: + description: One or more traffic_bypass blocks as defined + below. + items: + properties: + description: + description: The description for this bypass traffic + setting. + type: string + destinationAddresses: + description: Specifies a list of destination IP addresses + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + destinationIpGroups: + description: Specifies a list of destination IP groups + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + destinationPorts: + description: Specifies a list of destination IP ports + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name which should be used for this + bypass traffic setting. + type: string + protocol: + description: The protocols any of ANY, TCP, ICMP, UDP + that shall be bypassed by intrusion detection. + type: string + sourceAddresses: + description: Specifies a list of source addresses that + shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + sourceIpGroups: + description: Specifies a list of source IP groups that + shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + location: + description: The Azure Region where the Firewall Policy should + exist. Changing this forces a new Firewall Policy to be created. + type: string + privateIpRanges: + description: A list of private IP ranges to which traffic will + not be SNAT. + items: + type: string + type: array + sku: + description: The SKU Tier of the Firewall Policy. Possible values + are Standard, Premium and Basic. Changing this forces a new + Firewall Policy to be created. + type: string + sqlRedirectAllowed: + description: Whether SQL Redirect traffic filtering is allowed. + Enabling this flag requires no rule using ports between 11000-11999. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Firewall Policy. + type: object + x-kubernetes-map-type: granular + threatIntelligenceAllowlist: + description: A threat_intelligence_allowlist block as defined + below. + properties: + fqdns: + description: A list of FQDNs that will be skipped for threat + detection. + items: + type: string + type: array + x-kubernetes-list-type: set + ipAddresses: + description: A list of IP addresses or CIDR ranges that will + be skipped for threat detection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + threatIntelligenceMode: + description: The operation mode for Threat Intelligence. Possible + values are Alert, Deny and Off. Defaults to Alert. + type: string + tlsCertificate: + description: A tls_certificate block as defined below. + properties: + keyVaultSecretId: + description: The ID of the Key Vault, where the secret or + certificate is stored. + type: string + name: + description: The name of the certificate. + type: string + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: FirewallPolicyStatus defines the observed state of FirewallPolicy. + properties: + atProvider: + properties: + autoLearnPrivateRangesEnabled: + description: Whether enable auto learn private ip range. + type: boolean + basePolicyId: + description: The ID of the base Firewall Policy. + type: string + childPolicies: + description: A list of reference to child Firewall Policies of + this Firewall Policy. + items: + type: string + type: array + dns: + description: A dns block as defined below. + properties: + proxyEnabled: + description: Whether to enable DNS proxy on Firewalls attached + to this Firewall Policy? Defaults to false. + type: boolean + servers: + description: A list of custom DNS servers' IP addresses. + items: + type: string + type: array + type: object + explicitProxy: + description: A explicit_proxy block as defined below. + properties: + enablePacFile: + description: Whether the pac file port and url need to be + provided. + type: boolean + enabled: + description: Whether the explicit proxy is enabled for this + Firewall Policy. + type: boolean + httpPort: + description: The port number for explicit http protocol. + type: number + httpsPort: + description: The port number for explicit proxy https protocol. + type: number + pacFile: + description: Specifies a SAS URL for PAC file. + type: string + pacFilePort: + description: Specifies a port number for firewall to serve + PAC file. + type: number + type: object + firewalls: + description: A list of references to Azure Firewalls that this + Firewall Policy is associated with. + items: + type: string + type: array + id: + description: The ID of the Firewall Policy. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Firewall Policy. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The ID of the Firewall Policy. + type: string + tenantId: + description: The ID of the Firewall Policy. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Firewall Policy. Only + possible value is UserAssigned. + type: string + type: object + insights: + description: An insights block as defined below. + properties: + defaultLogAnalyticsWorkspaceId: + description: The ID of the default Log Analytics Workspace + that the Firewalls associated with this Firewall Policy + will send their logs to, when there is no location matches + in the log_analytics_workspace. + type: string + enabled: + description: Whether the insights functionality is enabled + for this Firewall Policy. + type: boolean + logAnalyticsWorkspace: + description: A list of log_analytics_workspace block as defined + below. + items: + properties: + firewallLocation: + description: The location of the Firewalls, that when + matches this Log Analytics Workspace will be used + to consume their logs. + type: string + id: + description: 12-digit number (id) which identifies your + signature. + type: string + type: object + type: array + retentionInDays: + description: The log retention period in days. + type: number + type: object + intrusionDetection: + description: A intrusion_detection block as defined below. + properties: + mode: + description: 'In which mode you want to run intrusion detection: + Off, Alert or Deny.' + type: string + privateRanges: + description: A list of Private IP address ranges to identify + traffic direction. By default, only ranges defined by IANA + RFC 1918 are considered private IP addresses. + items: + type: string + type: array + signatureOverrides: + description: One or more signature_overrides blocks as defined + below. + items: + properties: + id: + description: 12-digit number (id) which identifies your + signature. + type: string + state: + description: state can be any of Off, Alert or Deny. + type: string + type: object + type: array + trafficBypass: + description: One or more traffic_bypass blocks as defined + below. + items: + properties: + description: + description: The description for this bypass traffic + setting. + type: string + destinationAddresses: + description: Specifies a list of destination IP addresses + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + destinationIpGroups: + description: Specifies a list of destination IP groups + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + destinationPorts: + description: Specifies a list of destination IP ports + that shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name which should be used for this + bypass traffic setting. + type: string + protocol: + description: The protocols any of ANY, TCP, ICMP, UDP + that shall be bypassed by intrusion detection. + type: string + sourceAddresses: + description: Specifies a list of source addresses that + shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + sourceIpGroups: + description: Specifies a list of source IP groups that + shall be bypassed by intrusion detection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + type: object + location: + description: The Azure Region where the Firewall Policy should + exist. Changing this forces a new Firewall Policy to be created. + type: string + privateIpRanges: + description: A list of private IP ranges to which traffic will + not be SNAT. + items: + type: string + type: array + resourceGroupName: + description: The name of the Resource Group where the Firewall + Policy should exist. Changing this forces a new Firewall Policy + to be created. + type: string + ruleCollectionGroups: + description: A list of references to Firewall Policy Rule Collection + Groups that belongs to this Firewall Policy. + items: + type: string + type: array + sku: + description: The SKU Tier of the Firewall Policy. Possible values + are Standard, Premium and Basic. Changing this forces a new + Firewall Policy to be created. + type: string + sqlRedirectAllowed: + description: Whether SQL Redirect traffic filtering is allowed. + Enabling this flag requires no rule using ports between 11000-11999. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Firewall Policy. + type: object + x-kubernetes-map-type: granular + threatIntelligenceAllowlist: + description: A threat_intelligence_allowlist block as defined + below. + properties: + fqdns: + description: A list of FQDNs that will be skipped for threat + detection. + items: + type: string + type: array + x-kubernetes-list-type: set + ipAddresses: + description: A list of IP addresses or CIDR ranges that will + be skipped for threat detection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + threatIntelligenceMode: + description: The operation mode for Threat Intelligence. Possible + values are Alert, Deny and Off. Defaults to Alert. + type: string + tlsCertificate: + description: A tls_certificate block as defined below. + properties: + keyVaultSecretId: + description: The ID of the Key Vault, where the secret or + certificate is stored. + type: string + name: + description: The name of the certificate. + type: string + type: object + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_firewalls.yaml b/package/crds/network.azure.upbound.io_firewalls.yaml index a45136e90..6060046fa 100644 --- a/package/crds/network.azure.upbound.io_firewalls.yaml +++ b/package/crds/network.azure.upbound.io_firewalls.yaml @@ -1219,3 +1219,1190 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Firewall is the Schema for the Firewalls API. Manages an Azure + Firewall. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FirewallSpec defines the desired state of Firewall + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dnsProxyEnabled: + description: Whether DNS proxy is enabled. It will forward DNS + requests to the DNS servers when set to true. It will be set + to true if dns_servers provided with a not empty list. + type: boolean + dnsServers: + description: A list of DNS servers that the Azure Firewall will + direct DNS traffic to the for name resolution. + items: + type: string + type: array + firewallPolicyId: + description: The ID of the Firewall Policy applied to this Firewall. + type: string + ipConfiguration: + description: An ip_configuration block as documented below. + items: + properties: + name: + description: Specifies the name of the IP Configuration. + type: string + publicIpAddressId: + description: The ID of the Public IP Address associated + with the firewall. + type: string + publicIpAddressIdRef: + description: Reference to a PublicIP in network to populate + publicIpAddressId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicIpAddressIdSelector: + description: Selector for a PublicIP in network to populate + publicIpAddressId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: Reference to the subnet associated with the + IP Configuration. Changing this forces a new resource + to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + managementIpConfiguration: + description: A management_ip_configuration block as documented + below, which allows force-tunnelling of traffic to be performed + by the firewall. Adding or removing this block or changing the + subnet_id in an existing block forces a new resource to be created. + Changing this forces a new resource to be created. + properties: + name: + description: Specifies the name of the IP Configuration. + type: string + publicIpAddressId: + description: The ID of the Public IP Address associated with + the firewall. + type: string + subnetId: + description: Reference to the subnet associated with the IP + Configuration. Changing this forces a new resource to be + created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + privateIpRanges: + description: A list of SNAT private CIDR IP ranges, or the special + string IANAPrivateRanges, which indicates Azure Firewall does + not SNAT when the destination IP address is a private range + per IANA RFC 1918. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceGroupName: + description: The name of the resource group in which to create + the resource. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: SKU name of the Firewall. Possible values are AZFW_Hub + and AZFW_VNet. Changing this forces a new resource to be created. + type: string + skuTier: + description: SKU tier of the Firewall. Possible values are Premium, + Standard and Basic. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatIntelMode: + description: 'The operation mode for threat intelligence-based + filtering. Possible values are: Off, Alert and Deny. Defaults + to Alert.' + type: string + virtualHub: + description: A virtual_hub block as documented below. + properties: + publicIpCount: + description: Specifies the number of public IPs to assign + to the Firewall. Defaults to 1. + type: number + virtualHubId: + description: Specifies the ID of the Virtual Hub where the + Firewall resides in. + type: string + type: object + zones: + description: Specifies a list of Availability Zones in which this + Azure Firewall should be located. Changing this forces a new + Azure Firewall to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dnsProxyEnabled: + description: Whether DNS proxy is enabled. It will forward DNS + requests to the DNS servers when set to true. It will be set + to true if dns_servers provided with a not empty list. + type: boolean + dnsServers: + description: A list of DNS servers that the Azure Firewall will + direct DNS traffic to the for name resolution. + items: + type: string + type: array + firewallPolicyId: + description: The ID of the Firewall Policy applied to this Firewall. + type: string + ipConfiguration: + description: An ip_configuration block as documented below. + items: + properties: + name: + description: Specifies the name of the IP Configuration. + type: string + publicIpAddressId: + description: The ID of the Public IP Address associated + with the firewall. + type: string + publicIpAddressIdRef: + description: Reference to a PublicIP in network to populate + publicIpAddressId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicIpAddressIdSelector: + description: Selector for a PublicIP in network to populate + publicIpAddressId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: Reference to the subnet associated with the + IP Configuration. Changing this forces a new resource + to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + managementIpConfiguration: + description: A management_ip_configuration block as documented + below, which allows force-tunnelling of traffic to be performed + by the firewall. Adding or removing this block or changing the + subnet_id in an existing block forces a new resource to be created. + Changing this forces a new resource to be created. + properties: + name: + description: Specifies the name of the IP Configuration. + type: string + publicIpAddressId: + description: The ID of the Public IP Address associated with + the firewall. + type: string + subnetId: + description: Reference to the subnet associated with the IP + Configuration. Changing this forces a new resource to be + created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + privateIpRanges: + description: A list of SNAT private CIDR IP ranges, or the special + string IANAPrivateRanges, which indicates Azure Firewall does + not SNAT when the destination IP address is a private range + per IANA RFC 1918. + items: + type: string + type: array + x-kubernetes-list-type: set + skuName: + description: SKU name of the Firewall. Possible values are AZFW_Hub + and AZFW_VNet. Changing this forces a new resource to be created. + type: string + skuTier: + description: SKU tier of the Firewall. Possible values are Premium, + Standard and Basic. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatIntelMode: + description: 'The operation mode for threat intelligence-based + filtering. Possible values are: Off, Alert and Deny. Defaults + to Alert.' + type: string + virtualHub: + description: A virtual_hub block as documented below. + properties: + publicIpCount: + description: Specifies the number of public IPs to assign + to the Firewall. Defaults to 1. + type: number + virtualHubId: + description: Specifies the ID of the Virtual Hub where the + Firewall resides in. + type: string + type: object + zones: + description: Specifies a list of Availability Zones in which this + Azure Firewall should be located. Changing this forces a new + Azure Firewall to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + - message: spec.forProvider.skuTier is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuTier) + || (has(self.initProvider) && has(self.initProvider.skuTier))' + status: + description: FirewallStatus defines the observed state of Firewall. + properties: + atProvider: + properties: + dnsProxyEnabled: + description: Whether DNS proxy is enabled. It will forward DNS + requests to the DNS servers when set to true. It will be set + to true if dns_servers provided with a not empty list. + type: boolean + dnsServers: + description: A list of DNS servers that the Azure Firewall will + direct DNS traffic to the for name resolution. + items: + type: string + type: array + firewallPolicyId: + description: The ID of the Firewall Policy applied to this Firewall. + type: string + id: + description: The ID of the Azure Firewall. + type: string + ipConfiguration: + description: An ip_configuration block as documented below. + items: + properties: + name: + description: Specifies the name of the IP Configuration. + type: string + privateIpAddress: + description: The Private IP address of the Azure Firewall. + type: string + publicIpAddressId: + description: The ID of the Public IP Address associated + with the firewall. + type: string + subnetId: + description: Reference to the subnet associated with the + IP Configuration. Changing this forces a new resource + to be created. + type: string + type: object + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + managementIpConfiguration: + description: A management_ip_configuration block as documented + below, which allows force-tunnelling of traffic to be performed + by the firewall. Adding or removing this block or changing the + subnet_id in an existing block forces a new resource to be created. + Changing this forces a new resource to be created. + properties: + name: + description: Specifies the name of the IP Configuration. + type: string + privateIpAddress: + description: The private IP address associated with the Firewall. + type: string + publicIpAddressId: + description: The ID of the Public IP Address associated with + the firewall. + type: string + subnetId: + description: Reference to the subnet associated with the IP + Configuration. Changing this forces a new resource to be + created. + type: string + type: object + privateIpRanges: + description: A list of SNAT private CIDR IP ranges, or the special + string IANAPrivateRanges, which indicates Azure Firewall does + not SNAT when the destination IP address is a private range + per IANA RFC 1918. + items: + type: string + type: array + x-kubernetes-list-type: set + resourceGroupName: + description: The name of the resource group in which to create + the resource. Changing this forces a new resource to be created. + type: string + skuName: + description: SKU name of the Firewall. Possible values are AZFW_Hub + and AZFW_VNet. Changing this forces a new resource to be created. + type: string + skuTier: + description: SKU tier of the Firewall. Possible values are Premium, + Standard and Basic. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatIntelMode: + description: 'The operation mode for threat intelligence-based + filtering. Possible values are: Off, Alert and Deny. Defaults + to Alert.' + type: string + virtualHub: + description: A virtual_hub block as documented below. + properties: + privateIpAddress: + description: The private IP address associated with the Firewall. + type: string + publicIpAddresses: + description: The list of public IP addresses associated with + the Firewall. + items: + type: string + type: array + publicIpCount: + description: Specifies the number of public IPs to assign + to the Firewall. Defaults to 1. + type: number + virtualHubId: + description: Specifies the ID of the Virtual Hub where the + Firewall resides in. + type: string + type: object + zones: + description: Specifies a list of Availability Zones in which this + Azure Firewall should be located. Changing this forces a new + Azure Firewall to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_frontdoorcustomhttpsconfigurations.yaml b/package/crds/network.azure.upbound.io_frontdoorcustomhttpsconfigurations.yaml index 898f8fa8b..6a20eadcd 100644 --- a/package/crds/network.azure.upbound.io_frontdoorcustomhttpsconfigurations.yaml +++ b/package/crds/network.azure.upbound.io_frontdoorcustomhttpsconfigurations.yaml @@ -593,3 +593,565 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontdoorCustomHTTPSConfiguration is the Schema for the FrontdoorCustomHTTPSConfigurations + API. Manages the Custom Https Configuration for an Azure Front Door (classic) + Frontend Endpoint. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontdoorCustomHTTPSConfigurationSpec defines the desired + state of FrontdoorCustomHTTPSConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customHttpsConfiguration: + description: A custom_https_configuration block as defined above. + properties: + azureKeyVaultCertificateSecretName: + description: The name of the Key Vault secret representing + the full certificate PFX. + type: string + azureKeyVaultCertificateSecretVersion: + description: The version of the Key Vault secret representing + the full certificate PFX. + type: string + azureKeyVaultCertificateVaultId: + description: The ID of the Key Vault containing the SSL certificate. + type: string + azureKeyVaultCertificateVaultIdRef: + description: Reference to a Key in keyvault to populate azureKeyVaultCertificateVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + azureKeyVaultCertificateVaultIdSelector: + description: Selector for a Key in keyvault to populate azureKeyVaultCertificateVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateSource: + description: Certificate source to encrypted HTTPS traffic + with. Allowed values are FrontDoor or AzureKeyVault. Defaults + to FrontDoor. + type: string + type: object + customHttpsProvisioningEnabled: + description: Should the HTTPS protocol be enabled for this custom + domain associated with the Front Door? + type: boolean + frontendEndpointId: + description: The ID of the Front Door Frontend Endpoint which + this configuration refers to. Changing this forces a new resource + to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customHttpsConfiguration: + description: A custom_https_configuration block as defined above. + properties: + azureKeyVaultCertificateSecretName: + description: The name of the Key Vault secret representing + the full certificate PFX. + type: string + azureKeyVaultCertificateSecretVersion: + description: The version of the Key Vault secret representing + the full certificate PFX. + type: string + azureKeyVaultCertificateVaultId: + description: The ID of the Key Vault containing the SSL certificate. + type: string + azureKeyVaultCertificateVaultIdRef: + description: Reference to a Key in keyvault to populate azureKeyVaultCertificateVaultId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + azureKeyVaultCertificateVaultIdSelector: + description: Selector for a Key in keyvault to populate azureKeyVaultCertificateVaultId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + certificateSource: + description: Certificate source to encrypted HTTPS traffic + with. Allowed values are FrontDoor or AzureKeyVault. Defaults + to FrontDoor. + type: string + type: object + customHttpsProvisioningEnabled: + description: Should the HTTPS protocol be enabled for this custom + domain associated with the Front Door? + type: boolean + frontendEndpointId: + description: The ID of the Front Door Frontend Endpoint which + this configuration refers to. Changing this forces a new resource + to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.customHttpsProvisioningEnabled is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.customHttpsProvisioningEnabled) + || (has(self.initProvider) && has(self.initProvider.customHttpsProvisioningEnabled))' + - message: spec.forProvider.frontendEndpointId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.frontendEndpointId) + || (has(self.initProvider) && has(self.initProvider.frontendEndpointId))' + status: + description: FrontdoorCustomHTTPSConfigurationStatus defines the observed + state of FrontdoorCustomHTTPSConfiguration. + properties: + atProvider: + properties: + customHttpsConfiguration: + description: A custom_https_configuration block as defined above. + properties: + azureKeyVaultCertificateSecretName: + description: The name of the Key Vault secret representing + the full certificate PFX. + type: string + azureKeyVaultCertificateSecretVersion: + description: The version of the Key Vault secret representing + the full certificate PFX. + type: string + azureKeyVaultCertificateVaultId: + description: The ID of the Key Vault containing the SSL certificate. + type: string + certificateSource: + description: Certificate source to encrypted HTTPS traffic + with. Allowed values are FrontDoor or AzureKeyVault. Defaults + to FrontDoor. + type: string + minimumTlsVersion: + description: Minimum client TLS version supported. + type: string + provisioningState: + type: string + provisioningSubstate: + type: string + type: object + customHttpsProvisioningEnabled: + description: Should the HTTPS protocol be enabled for this custom + domain associated with the Front Door? + type: boolean + frontendEndpointId: + description: The ID of the Front Door Frontend Endpoint which + this configuration refers to. Changing this forces a new resource + to be created. + type: string + id: + description: The ID of the Azure Front Door Custom HTTPS Configuration. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_frontdoorrulesengines.yaml b/package/crds/network.azure.upbound.io_frontdoorrulesengines.yaml index 3b8144b28..f5d432b4c 100644 --- a/package/crds/network.azure.upbound.io_frontdoorrulesengines.yaml +++ b/package/crds/network.azure.upbound.io_frontdoorrulesengines.yaml @@ -776,3 +776,755 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontdoorRulesEngine is the Schema for the FrontdoorRulesEngines + API. Manages an Azure Front Door (classic) Rules Engine configuration and + rules. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontdoorRulesEngineSpec defines the desired state of FrontdoorRulesEngine + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + enabled: + description: Whether this Rules engine configuration is enabled? + Defaults to true. + type: boolean + frontdoorName: + description: The name of the Front Door instance. Changing this + forces a new resource to be created. + type: string + frontdoorNameRef: + description: Reference to a FrontDoor in network to populate frontdoorName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + frontdoorNameSelector: + description: Selector for a FrontDoor in network to populate frontdoorName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + rule: + description: A rule block as defined below. + items: + properties: + action: + description: An action block as defined below. + properties: + requestHeader: + description: A request_header block as defined below. + items: + properties: + headerActionType: + description: can be set to Overwrite, Append or + Delete. + type: string + headerName: + description: header name (string). + type: string + value: + description: value name (string). + type: string + type: object + type: array + responseHeader: + description: A response_header block as defined below. + items: + properties: + headerActionType: + description: can be set to Overwrite, Append or + Delete. + type: string + headerName: + description: header name (string). + type: string + value: + description: value name (string). + type: string + type: object + type: array + type: object + matchCondition: + description: One or more match_condition block as defined + below. + items: + properties: + negateCondition: + description: can be set to true or false to negate + the given condition. Defaults to false. + type: boolean + operator: + description: can be set to Any, IPMatch, GeoMatch, + Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, + GreaterThanOrEqual, BeginsWith or EndsWith + type: string + selector: + description: match against a specific key when variable + is set to PostArgs or RequestHeader. It cannot be + used with QueryString and RequestMethod. + type: string + transform: + description: can be set to one or more values out + of Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + and UrlEncode + items: + type: string + type: array + value: + description: value name (string). + items: + type: string + type: array + variable: + description: can be set to IsMobile, RemoteAddr, RequestMethod, + QueryString, PostArgs, RequestURI, RequestPath, + RequestFilename, RequestFilenameExtension,RequestHeader,RequestBody + or RequestScheme. + type: string + type: object + type: array + name: + description: The name of the rule. + type: string + priority: + description: Priority of the rule, must be unique per rules + engine definition. + type: number + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + enabled: + description: Whether this Rules engine configuration is enabled? + Defaults to true. + type: boolean + rule: + description: A rule block as defined below. + items: + properties: + action: + description: An action block as defined below. + properties: + requestHeader: + description: A request_header block as defined below. + items: + properties: + headerActionType: + description: can be set to Overwrite, Append or + Delete. + type: string + headerName: + description: header name (string). + type: string + value: + description: value name (string). + type: string + type: object + type: array + responseHeader: + description: A response_header block as defined below. + items: + properties: + headerActionType: + description: can be set to Overwrite, Append or + Delete. + type: string + headerName: + description: header name (string). + type: string + value: + description: value name (string). + type: string + type: object + type: array + type: object + matchCondition: + description: One or more match_condition block as defined + below. + items: + properties: + negateCondition: + description: can be set to true or false to negate + the given condition. Defaults to false. + type: boolean + operator: + description: can be set to Any, IPMatch, GeoMatch, + Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, + GreaterThanOrEqual, BeginsWith or EndsWith + type: string + selector: + description: match against a specific key when variable + is set to PostArgs or RequestHeader. It cannot be + used with QueryString and RequestMethod. + type: string + transform: + description: can be set to one or more values out + of Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + and UrlEncode + items: + type: string + type: array + value: + description: value name (string). + items: + type: string + type: array + variable: + description: can be set to IsMobile, RemoteAddr, RequestMethod, + QueryString, PostArgs, RequestURI, RequestPath, + RequestFilename, RequestFilenameExtension,RequestHeader,RequestBody + or RequestScheme. + type: string + type: object + type: array + name: + description: The name of the rule. + type: string + priority: + description: Priority of the rule, must be unique per rules + engine definition. + type: number + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: FrontdoorRulesEngineStatus defines the observed state of + FrontdoorRulesEngine. + properties: + atProvider: + properties: + enabled: + description: Whether this Rules engine configuration is enabled? + Defaults to true. + type: boolean + frontdoorName: + description: The name of the Front Door instance. Changing this + forces a new resource to be created. + type: string + id: + type: string + location: + type: string + resourceGroupName: + description: The name of the resource group. Changing this forces + a new resource to be created. + type: string + rule: + description: A rule block as defined below. + items: + properties: + action: + description: An action block as defined below. + properties: + requestHeader: + description: A request_header block as defined below. + items: + properties: + headerActionType: + description: can be set to Overwrite, Append or + Delete. + type: string + headerName: + description: header name (string). + type: string + value: + description: value name (string). + type: string + type: object + type: array + responseHeader: + description: A response_header block as defined below. + items: + properties: + headerActionType: + description: can be set to Overwrite, Append or + Delete. + type: string + headerName: + description: header name (string). + type: string + value: + description: value name (string). + type: string + type: object + type: array + type: object + matchCondition: + description: One or more match_condition block as defined + below. + items: + properties: + negateCondition: + description: can be set to true or false to negate + the given condition. Defaults to false. + type: boolean + operator: + description: can be set to Any, IPMatch, GeoMatch, + Equal, Contains, LessThan, GreaterThan, LessThanOrEqual, + GreaterThanOrEqual, BeginsWith or EndsWith + type: string + selector: + description: match against a specific key when variable + is set to PostArgs or RequestHeader. It cannot be + used with QueryString and RequestMethod. + type: string + transform: + description: can be set to one or more values out + of Lowercase, RemoveNulls, Trim, Uppercase, UrlDecode + and UrlEncode + items: + type: string + type: array + value: + description: value name (string). + items: + type: string + type: array + variable: + description: can be set to IsMobile, RemoteAddr, RequestMethod, + QueryString, PostArgs, RequestURI, RequestPath, + RequestFilename, RequestFilenameExtension,RequestHeader,RequestBody + or RequestScheme. + type: string + type: object + type: array + name: + description: The name of the rule. + type: string + priority: + description: Priority of the rule, must be unique per rules + engine definition. + type: number + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_frontdoors.yaml b/package/crds/network.azure.upbound.io_frontdoors.yaml index 0e4e5c51a..e008a61de 100644 --- a/package/crds/network.azure.upbound.io_frontdoors.yaml +++ b/package/crds/network.azure.upbound.io_frontdoors.yaml @@ -1337,3 +1337,1310 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FrontDoor is the Schema for the FrontDoors API. Manages an Azure + Front Door (classic) instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FrontDoorSpec defines the desired state of FrontDoor + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + backendPool: + description: A backend_pool block as defined below. + items: + properties: + backend: + description: A backend block as defined below. + items: + properties: + address: + description: Location of the backend (IP address or + FQDN) + type: string + enabled: + description: Enable or Disable use of this Backend + Routing Rule. Permitted values are true or false. + Defaults to true. + type: boolean + hostHeader: + description: The value to use as the host header sent + to the backend. + type: string + httpPort: + description: The HTTP TCP port number. Possible values + are between 1 - 65535. + type: number + httpsPort: + description: The HTTPS TCP port number. Possible values + are between 1 - 65535. + type: number + priority: + description: Priority to use for load balancing. Higher + priorities will not be used for load balancing if + any lower priority backend is healthy. Defaults + to 1. + type: number + weight: + description: Weight of this endpoint for load balancing + purposes. Defaults to 50. + type: number + type: object + type: array + healthProbeName: + description: Specifies the name of the backend_pool_health_probe + block within this resource to use for this Backend Pool. + type: string + loadBalancingName: + description: Specifies the name of the backend_pool_load_balancing + block within this resource to use for this Backend Pool. + type: string + name: + description: Specifies the name of the Backend Pool. + type: string + type: object + type: array + backendPoolHealthProbe: + description: A backend_pool_health_probe block as defined below. + items: + properties: + enabled: + description: Is this health probe enabled? Defaults to true. + type: boolean + intervalInSeconds: + description: The number of seconds between each Health Probe. + Defaults to 120. + type: number + name: + description: Specifies the name of the Health Probe. + type: string + path: + description: The path to use for the Health Probe. Default + is /. + type: string + probeMethod: + description: 'Specifies HTTP method the health probe uses + when querying the backend pool instances. Possible values + include: GET and HEAD. Defaults to GET.' + type: string + protocol: + description: Protocol scheme to use for the Health Probe. + Possible values are Http and Https. Defaults to Http. + type: string + type: object + type: array + backendPoolLoadBalancing: + description: A backend_pool_load_balancing block as defined below. + items: + properties: + additionalLatencyMilliseconds: + description: The additional latency in milliseconds for + probes to fall into the lowest latency bucket. Defaults + to 0. + type: number + name: + description: Specifies the name of the Load Balancer. + type: string + sampleSize: + description: The number of samples to consider for load + balancing decisions. Defaults to 4. + type: number + successfulSamplesRequired: + description: The number of samples within the sample period + that must succeed. Defaults to 2. + type: number + type: object + type: array + backendPoolSettings: + description: A backend_pool_settings block as defined below. + items: + properties: + backendPoolsSendReceiveTimeoutSeconds: + description: Specifies the send and receive timeout on forwarding + request to the backend. When the timeout is reached, the + request fails and returns. Possible values are between + 0 - 240. Defaults to 60. + type: number + enforceBackendPoolsCertificateNameCheck: + description: Enforce certificate name check on HTTPS requests + to all backend pools, this setting will have no effect + on HTTP requests. Permitted values are true or false. + type: boolean + type: object + type: array + friendlyName: + description: A friendly name for the Front Door service. + type: string + frontendEndpoint: + description: A frontend_endpoint block as defined below. + items: + properties: + hostName: + description: Specifies the host name of the frontend_endpoint. + Must be a domain name. In order to use a name.azurefd.net + domain, the name value must match the Front Door name. + type: string + name: + description: Specifies the name of the frontend_endpoint. + type: string + sessionAffinityEnabled: + description: Whether to allow session affinity on this host. + Valid options are true or false Defaults to false. + type: boolean + sessionAffinityTtlSeconds: + description: The TTL to use in seconds for session affinity, + if applicable. Defaults to 0. + type: number + webApplicationFirewallPolicyLinkId: + description: Defines the Web Application Firewall policy + ID for each host. + type: string + type: object + type: array + loadBalancerEnabled: + description: Should the Front Door Load Balancer be Enabled? Defaults + to true. + type: boolean + resourceGroupName: + description: Specifies the name of the Resource Group in which + the Front Door service should exist. Changing this forces a + new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingRule: + description: A routing_rule block as defined below. + items: + properties: + acceptedProtocols: + description: Protocol schemes to match for the Backend Routing + Rule. Possible values are Http and Https. + items: + type: string + type: array + enabled: + description: Enable or Disable use of this Backend Routing + Rule. Permitted values are true or false. Defaults to + true. + type: boolean + forwardingConfiguration: + description: A forwarding_configuration block as defined + below. + properties: + backendPoolName: + description: Specifies the name of the Backend Pool + to forward the incoming traffic to. + type: string + cacheDuration: + description: Specify the minimum caching duration (in + ISO8601 notation e.g. P1DT2H for 1 day and 2 hours). + Needs to be greater than 0 and smaller than 365 days. + cache_duration works only in combination with cache_enabled + set to true. + type: string + cacheEnabled: + description: Specifies whether to Enable caching or + not. Valid options are true or false. Defaults to + false. + type: boolean + cacheQueryParameterStripDirective: + description: Defines cache behaviour in relation to + query string parameters. Valid options are StripAll, + StripAllExcept, StripOnly or StripNone. Defaults to + StripAll. + type: string + cacheQueryParameters: + description: Specify query parameters (array). Works + only in combination with cache_query_parameter_strip_directive + set to StripAllExcept or StripOnly. + items: + type: string + type: array + cacheUseDynamicCompression: + description: Whether to use dynamic compression when + caching. Valid options are true or false. Defaults + to false. + type: boolean + customForwardingPath: + description: Path to use when constructing the request + to forward to the backend. This functions as a URL + Rewrite. Default behaviour preserves the URL path. + type: string + forwardingProtocol: + description: Protocol to use when redirecting. Valid + options are HttpOnly, HttpsOnly, or MatchRequest. + Defaults to HttpsOnly. + type: string + type: object + frontendEndpoints: + description: The names of the frontend_endpoint blocks within + this resource to associate with this routing_rule. + items: + type: string + type: array + name: + description: Specifies the name of the Routing Rule. + type: string + patternsToMatch: + description: The route patterns for the Backend Routing + Rule. + items: + type: string + type: array + redirectConfiguration: + description: A redirect_configuration block as defined below. + properties: + customFragment: + description: The destination fragment in the portion + of URL after '#'. Set this to add a fragment to the + redirect URL. + type: string + customHost: + description: Set this to change the URL for the redirection. + type: string + customPath: + description: The path to retain as per the incoming + request, or update in the URL for the redirection. + type: string + customQueryString: + description: Replace any existing query string from + the incoming request URL. + type: string + redirectProtocol: + description: Protocol to use when redirecting. Valid + options are HttpOnly, HttpsOnly, or MatchRequest. + type: string + redirectType: + description: Status code for the redirect. Valida options + are Moved, Found, TemporaryRedirect, PermanentRedirect. + type: string + type: object + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + backendPool: + description: A backend_pool block as defined below. + items: + properties: + backend: + description: A backend block as defined below. + items: + properties: + address: + description: Location of the backend (IP address or + FQDN) + type: string + enabled: + description: Enable or Disable use of this Backend + Routing Rule. Permitted values are true or false. + Defaults to true. + type: boolean + hostHeader: + description: The value to use as the host header sent + to the backend. + type: string + httpPort: + description: The HTTP TCP port number. Possible values + are between 1 - 65535. + type: number + httpsPort: + description: The HTTPS TCP port number. Possible values + are between 1 - 65535. + type: number + priority: + description: Priority to use for load balancing. Higher + priorities will not be used for load balancing if + any lower priority backend is healthy. Defaults + to 1. + type: number + weight: + description: Weight of this endpoint for load balancing + purposes. Defaults to 50. + type: number + type: object + type: array + healthProbeName: + description: Specifies the name of the backend_pool_health_probe + block within this resource to use for this Backend Pool. + type: string + loadBalancingName: + description: Specifies the name of the backend_pool_load_balancing + block within this resource to use for this Backend Pool. + type: string + name: + description: Specifies the name of the Backend Pool. + type: string + type: object + type: array + backendPoolHealthProbe: + description: A backend_pool_health_probe block as defined below. + items: + properties: + enabled: + description: Is this health probe enabled? Defaults to true. + type: boolean + intervalInSeconds: + description: The number of seconds between each Health Probe. + Defaults to 120. + type: number + name: + description: Specifies the name of the Health Probe. + type: string + path: + description: The path to use for the Health Probe. Default + is /. + type: string + probeMethod: + description: 'Specifies HTTP method the health probe uses + when querying the backend pool instances. Possible values + include: GET and HEAD. Defaults to GET.' + type: string + protocol: + description: Protocol scheme to use for the Health Probe. + Possible values are Http and Https. Defaults to Http. + type: string + type: object + type: array + backendPoolLoadBalancing: + description: A backend_pool_load_balancing block as defined below. + items: + properties: + additionalLatencyMilliseconds: + description: The additional latency in milliseconds for + probes to fall into the lowest latency bucket. Defaults + to 0. + type: number + name: + description: Specifies the name of the Load Balancer. + type: string + sampleSize: + description: The number of samples to consider for load + balancing decisions. Defaults to 4. + type: number + successfulSamplesRequired: + description: The number of samples within the sample period + that must succeed. Defaults to 2. + type: number + type: object + type: array + backendPoolSettings: + description: A backend_pool_settings block as defined below. + items: + properties: + backendPoolsSendReceiveTimeoutSeconds: + description: Specifies the send and receive timeout on forwarding + request to the backend. When the timeout is reached, the + request fails and returns. Possible values are between + 0 - 240. Defaults to 60. + type: number + enforceBackendPoolsCertificateNameCheck: + description: Enforce certificate name check on HTTPS requests + to all backend pools, this setting will have no effect + on HTTP requests. Permitted values are true or false. + type: boolean + type: object + type: array + friendlyName: + description: A friendly name for the Front Door service. + type: string + frontendEndpoint: + description: A frontend_endpoint block as defined below. + items: + properties: + hostName: + description: Specifies the host name of the frontend_endpoint. + Must be a domain name. In order to use a name.azurefd.net + domain, the name value must match the Front Door name. + type: string + name: + description: Specifies the name of the frontend_endpoint. + type: string + sessionAffinityEnabled: + description: Whether to allow session affinity on this host. + Valid options are true or false Defaults to false. + type: boolean + sessionAffinityTtlSeconds: + description: The TTL to use in seconds for session affinity, + if applicable. Defaults to 0. + type: number + webApplicationFirewallPolicyLinkId: + description: Defines the Web Application Firewall policy + ID for each host. + type: string + type: object + type: array + loadBalancerEnabled: + description: Should the Front Door Load Balancer be Enabled? Defaults + to true. + type: boolean + routingRule: + description: A routing_rule block as defined below. + items: + properties: + acceptedProtocols: + description: Protocol schemes to match for the Backend Routing + Rule. Possible values are Http and Https. + items: + type: string + type: array + enabled: + description: Enable or Disable use of this Backend Routing + Rule. Permitted values are true or false. Defaults to + true. + type: boolean + forwardingConfiguration: + description: A forwarding_configuration block as defined + below. + properties: + backendPoolName: + description: Specifies the name of the Backend Pool + to forward the incoming traffic to. + type: string + cacheDuration: + description: Specify the minimum caching duration (in + ISO8601 notation e.g. P1DT2H for 1 day and 2 hours). + Needs to be greater than 0 and smaller than 365 days. + cache_duration works only in combination with cache_enabled + set to true. + type: string + cacheEnabled: + description: Specifies whether to Enable caching or + not. Valid options are true or false. Defaults to + false. + type: boolean + cacheQueryParameterStripDirective: + description: Defines cache behaviour in relation to + query string parameters. Valid options are StripAll, + StripAllExcept, StripOnly or StripNone. Defaults to + StripAll. + type: string + cacheQueryParameters: + description: Specify query parameters (array). Works + only in combination with cache_query_parameter_strip_directive + set to StripAllExcept or StripOnly. + items: + type: string + type: array + cacheUseDynamicCompression: + description: Whether to use dynamic compression when + caching. Valid options are true or false. Defaults + to false. + type: boolean + customForwardingPath: + description: Path to use when constructing the request + to forward to the backend. This functions as a URL + Rewrite. Default behaviour preserves the URL path. + type: string + forwardingProtocol: + description: Protocol to use when redirecting. Valid + options are HttpOnly, HttpsOnly, or MatchRequest. + Defaults to HttpsOnly. + type: string + type: object + frontendEndpoints: + description: The names of the frontend_endpoint blocks within + this resource to associate with this routing_rule. + items: + type: string + type: array + name: + description: Specifies the name of the Routing Rule. + type: string + patternsToMatch: + description: The route patterns for the Backend Routing + Rule. + items: + type: string + type: array + redirectConfiguration: + description: A redirect_configuration block as defined below. + properties: + customFragment: + description: The destination fragment in the portion + of URL after '#'. Set this to add a fragment to the + redirect URL. + type: string + customHost: + description: Set this to change the URL for the redirection. + type: string + customPath: + description: The path to retain as per the incoming + request, or update in the URL for the redirection. + type: string + customQueryString: + description: Replace any existing query string from + the incoming request URL. + type: string + redirectProtocol: + description: Protocol to use when redirecting. Valid + options are HttpOnly, HttpsOnly, or MatchRequest. + type: string + redirectType: + description: Status code for the redirect. Valida options + are Moved, Found, TemporaryRedirect, PermanentRedirect. + type: string + type: object + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.backendPool is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backendPool) + || (has(self.initProvider) && has(self.initProvider.backendPool))' + - message: spec.forProvider.backendPoolHealthProbe is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backendPoolHealthProbe) + || (has(self.initProvider) && has(self.initProvider.backendPoolHealthProbe))' + - message: spec.forProvider.backendPoolLoadBalancing is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backendPoolLoadBalancing) + || (has(self.initProvider) && has(self.initProvider.backendPoolLoadBalancing))' + - message: spec.forProvider.frontendEndpoint is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.frontendEndpoint) + || (has(self.initProvider) && has(self.initProvider.frontendEndpoint))' + - message: spec.forProvider.routingRule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.routingRule) + || (has(self.initProvider) && has(self.initProvider.routingRule))' + status: + description: FrontDoorStatus defines the observed state of FrontDoor. + properties: + atProvider: + properties: + backendPool: + description: A backend_pool block as defined below. + items: + properties: + backend: + description: A backend block as defined below. + items: + properties: + address: + description: Location of the backend (IP address or + FQDN) + type: string + enabled: + description: Enable or Disable use of this Backend + Routing Rule. Permitted values are true or false. + Defaults to true. + type: boolean + hostHeader: + description: The value to use as the host header sent + to the backend. + type: string + httpPort: + description: The HTTP TCP port number. Possible values + are between 1 - 65535. + type: number + httpsPort: + description: The HTTPS TCP port number. Possible values + are between 1 - 65535. + type: number + priority: + description: Priority to use for load balancing. Higher + priorities will not be used for load balancing if + any lower priority backend is healthy. Defaults + to 1. + type: number + weight: + description: Weight of this endpoint for load balancing + purposes. Defaults to 50. + type: number + type: object + type: array + healthProbeName: + description: Specifies the name of the backend_pool_health_probe + block within this resource to use for this Backend Pool. + type: string + id: + description: The ID of the Azure Front Door Backend. + type: string + loadBalancingName: + description: Specifies the name of the backend_pool_load_balancing + block within this resource to use for this Backend Pool. + type: string + name: + description: Specifies the name of the Backend Pool. + type: string + type: object + type: array + backendPoolHealthProbe: + description: A backend_pool_health_probe block as defined below. + items: + properties: + enabled: + description: Is this health probe enabled? Defaults to true. + type: boolean + id: + description: The ID of the Azure Front Door Backend. + type: string + intervalInSeconds: + description: The number of seconds between each Health Probe. + Defaults to 120. + type: number + name: + description: Specifies the name of the Health Probe. + type: string + path: + description: The path to use for the Health Probe. Default + is /. + type: string + probeMethod: + description: 'Specifies HTTP method the health probe uses + when querying the backend pool instances. Possible values + include: GET and HEAD. Defaults to GET.' + type: string + protocol: + description: Protocol scheme to use for the Health Probe. + Possible values are Http and Https. Defaults to Http. + type: string + type: object + type: array + backendPoolHealthProbes: + additionalProperties: + type: string + description: A map/dictionary of Backend Pool Health Probe Names + (key) to the Backend Pool Health Probe ID (value) + type: object + x-kubernetes-map-type: granular + backendPoolLoadBalancing: + description: A backend_pool_load_balancing block as defined below. + items: + properties: + additionalLatencyMilliseconds: + description: The additional latency in milliseconds for + probes to fall into the lowest latency bucket. Defaults + to 0. + type: number + id: + description: The ID of the Azure Front Door Backend. + type: string + name: + description: Specifies the name of the Load Balancer. + type: string + sampleSize: + description: The number of samples to consider for load + balancing decisions. Defaults to 4. + type: number + successfulSamplesRequired: + description: The number of samples within the sample period + that must succeed. Defaults to 2. + type: number + type: object + type: array + backendPoolLoadBalancingSettings: + additionalProperties: + type: string + description: A map/dictionary of Backend Pool Load Balancing Setting + Names (key) to the Backend Pool Load Balancing Setting ID (value) + type: object + x-kubernetes-map-type: granular + backendPoolSettings: + description: A backend_pool_settings block as defined below. + items: + properties: + backendPoolsSendReceiveTimeoutSeconds: + description: Specifies the send and receive timeout on forwarding + request to the backend. When the timeout is reached, the + request fails and returns. Possible values are between + 0 - 240. Defaults to 60. + type: number + enforceBackendPoolsCertificateNameCheck: + description: Enforce certificate name check on HTTPS requests + to all backend pools, this setting will have no effect + on HTTP requests. Permitted values are true or false. + type: boolean + type: object + type: array + backendPools: + additionalProperties: + type: string + description: A map/dictionary of Backend Pool Names (key) to the + Backend Pool ID (value) + type: object + x-kubernetes-map-type: granular + cname: + description: The host that each frontendEndpoint must CNAME to. + type: string + explicitResourceOrder: + items: + properties: + backendPoolHealthProbeIds: + items: + type: string + type: array + backendPoolIds: + items: + type: string + type: array + backendPoolLoadBalancingIds: + items: + type: string + type: array + frontendEndpointIds: + items: + type: string + type: array + routingRuleIds: + items: + type: string + type: array + type: object + type: array + friendlyName: + description: A friendly name for the Front Door service. + type: string + frontendEndpoint: + description: A frontend_endpoint block as defined below. + items: + properties: + hostName: + description: Specifies the host name of the frontend_endpoint. + Must be a domain name. In order to use a name.azurefd.net + domain, the name value must match the Front Door name. + type: string + id: + description: The ID of the Azure Front Door Backend. + type: string + name: + description: Specifies the name of the frontend_endpoint. + type: string + sessionAffinityEnabled: + description: Whether to allow session affinity on this host. + Valid options are true or false Defaults to false. + type: boolean + sessionAffinityTtlSeconds: + description: The TTL to use in seconds for session affinity, + if applicable. Defaults to 0. + type: number + webApplicationFirewallPolicyLinkId: + description: Defines the Web Application Firewall policy + ID for each host. + type: string + type: object + type: array + frontendEndpoints: + additionalProperties: + type: string + description: A map/dictionary of Frontend Endpoint Names (key) + to the Frontend Endpoint ID (value) + type: object + x-kubernetes-map-type: granular + headerFrontdoorId: + description: The unique ID of the Front Door which is embedded + into the incoming headers X-Azure-FDID attribute and maybe used + to filter traffic sent by the Front Door to your backend. + type: string + id: + description: The ID of the Azure Front Door Backend. + type: string + loadBalancerEnabled: + description: Should the Front Door Load Balancer be Enabled? Defaults + to true. + type: boolean + resourceGroupName: + description: Specifies the name of the Resource Group in which + the Front Door service should exist. Changing this forces a + new resource to be created. + type: string + routingRule: + description: A routing_rule block as defined below. + items: + properties: + acceptedProtocols: + description: Protocol schemes to match for the Backend Routing + Rule. Possible values are Http and Https. + items: + type: string + type: array + enabled: + description: Enable or Disable use of this Backend Routing + Rule. Permitted values are true or false. Defaults to + true. + type: boolean + forwardingConfiguration: + description: A forwarding_configuration block as defined + below. + properties: + backendPoolName: + description: Specifies the name of the Backend Pool + to forward the incoming traffic to. + type: string + cacheDuration: + description: Specify the minimum caching duration (in + ISO8601 notation e.g. P1DT2H for 1 day and 2 hours). + Needs to be greater than 0 and smaller than 365 days. + cache_duration works only in combination with cache_enabled + set to true. + type: string + cacheEnabled: + description: Specifies whether to Enable caching or + not. Valid options are true or false. Defaults to + false. + type: boolean + cacheQueryParameterStripDirective: + description: Defines cache behaviour in relation to + query string parameters. Valid options are StripAll, + StripAllExcept, StripOnly or StripNone. Defaults to + StripAll. + type: string + cacheQueryParameters: + description: Specify query parameters (array). Works + only in combination with cache_query_parameter_strip_directive + set to StripAllExcept or StripOnly. + items: + type: string + type: array + cacheUseDynamicCompression: + description: Whether to use dynamic compression when + caching. Valid options are true or false. Defaults + to false. + type: boolean + customForwardingPath: + description: Path to use when constructing the request + to forward to the backend. This functions as a URL + Rewrite. Default behaviour preserves the URL path. + type: string + forwardingProtocol: + description: Protocol to use when redirecting. Valid + options are HttpOnly, HttpsOnly, or MatchRequest. + Defaults to HttpsOnly. + type: string + type: object + frontendEndpoints: + description: The names of the frontend_endpoint blocks within + this resource to associate with this routing_rule. + items: + type: string + type: array + id: + description: The ID of the Azure Front Door Backend. + type: string + name: + description: Specifies the name of the Routing Rule. + type: string + patternsToMatch: + description: The route patterns for the Backend Routing + Rule. + items: + type: string + type: array + redirectConfiguration: + description: A redirect_configuration block as defined below. + properties: + customFragment: + description: The destination fragment in the portion + of URL after '#'. Set this to add a fragment to the + redirect URL. + type: string + customHost: + description: Set this to change the URL for the redirection. + type: string + customPath: + description: The path to retain as per the incoming + request, or update in the URL for the redirection. + type: string + customQueryString: + description: Replace any existing query string from + the incoming request URL. + type: string + redirectProtocol: + description: Protocol to use when redirecting. Valid + options are HttpOnly, HttpsOnly, or MatchRequest. + type: string + redirectType: + description: Status code for the redirect. Valida options + are Moved, Found, TemporaryRedirect, PermanentRedirect. + type: string + type: object + type: object + type: array + routingRules: + additionalProperties: + type: string + description: A map/dictionary of Routing Rule Names (key) to the + Routing Rule ID (value) + type: object + x-kubernetes-map-type: granular + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_localnetworkgateways.yaml b/package/crds/network.azure.upbound.io_localnetworkgateways.yaml index e4d9c31a8..d8078bf9b 100644 --- a/package/crds/network.azure.upbound.io_localnetworkgateways.yaml +++ b/package/crds/network.azure.upbound.io_localnetworkgateways.yaml @@ -537,3 +537,516 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LocalNetworkGateway is the Schema for the LocalNetworkGateways + API. Manages a local network gateway connection over which specific connections + can be configured. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LocalNetworkGatewaySpec defines the desired state of LocalNetworkGateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addressSpace: + description: The list of string CIDRs representing the address + spaces the gateway exposes. + items: + type: string + type: array + bgpSettings: + description: A bgp_settings block as defined below containing + the Local Network Gateway's BGP speaker settings. + properties: + asn: + description: The BGP speaker's ASN. + type: number + bgpPeeringAddress: + description: The BGP peering address and BGP identifier of + this BGP speaker. + type: string + peerWeight: + description: The weight added to routes learned from this + BGP speaker. + type: number + type: object + gatewayAddress: + description: The gateway IP address to connect with. + type: string + gatewayFqdn: + description: The gateway FQDN to connect with. + type: string + location: + description: The location/region where the local network gateway + is created. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the local network gateway. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addressSpace: + description: The list of string CIDRs representing the address + spaces the gateway exposes. + items: + type: string + type: array + bgpSettings: + description: A bgp_settings block as defined below containing + the Local Network Gateway's BGP speaker settings. + properties: + asn: + description: The BGP speaker's ASN. + type: number + bgpPeeringAddress: + description: The BGP peering address and BGP identifier of + this BGP speaker. + type: string + peerWeight: + description: The weight added to routes learned from this + BGP speaker. + type: number + type: object + gatewayAddress: + description: The gateway IP address to connect with. + type: string + gatewayFqdn: + description: The gateway FQDN to connect with. + type: string + location: + description: The location/region where the local network gateway + is created. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: LocalNetworkGatewayStatus defines the observed state of LocalNetworkGateway. + properties: + atProvider: + properties: + addressSpace: + description: The list of string CIDRs representing the address + spaces the gateway exposes. + items: + type: string + type: array + bgpSettings: + description: A bgp_settings block as defined below containing + the Local Network Gateway's BGP speaker settings. + properties: + asn: + description: The BGP speaker's ASN. + type: number + bgpPeeringAddress: + description: The BGP peering address and BGP identifier of + this BGP speaker. + type: string + peerWeight: + description: The weight added to routes learned from this + BGP speaker. + type: number + type: object + gatewayAddress: + description: The gateway IP address to connect with. + type: string + gatewayFqdn: + description: The gateway FQDN to connect with. + type: string + id: + description: The ID of the Local Network Gateway. + type: string + location: + description: The location/region where the local network gateway + is created. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the local network gateway. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_managers.yaml b/package/crds/network.azure.upbound.io_managers.yaml index a859cc8ad..9e4d33ae1 100644 --- a/package/crds/network.azure.upbound.io_managers.yaml +++ b/package/crds/network.azure.upbound.io_managers.yaml @@ -558,3 +558,537 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Manager is the Schema for the Managers API. Manages a Network + Managers. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ManagerSpec defines the desired state of Manager + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + description: + description: A description of the network manager. + type: string + location: + description: Specifies the Azure Region where the Network Managers + should exist. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Network Managers should exist. Changing this forces a new Network + Managers to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scope: + description: A scope block as defined below. + properties: + managementGroupIds: + description: A list of management group IDs. + items: + type: string + type: array + subscriptionIds: + description: A list of subscription IDs. + items: + type: string + type: array + type: object + scopeAccesses: + description: A list of configuration deployment type. Possible + values are Connectivity and SecurityAdmin, corresponds to if + Connectivity Configuration and Security Admin Configuration + is allowed for the Network Manager. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Managers. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + description: + description: A description of the network manager. + type: string + location: + description: Specifies the Azure Region where the Network Managers + should exist. Changing this forces a new resource to be created. + type: string + scope: + description: A scope block as defined below. + properties: + managementGroupIds: + description: A list of management group IDs. + items: + type: string + type: array + subscriptionIds: + description: A list of subscription IDs. + items: + type: string + type: array + type: object + scopeAccesses: + description: A list of configuration deployment type. Possible + values are Connectivity and SecurityAdmin, corresponds to if + Connectivity Configuration and Security Admin Configuration + is allowed for the Network Manager. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Managers. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.scope is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scope) + || (has(self.initProvider) && has(self.initProvider.scope))' + - message: spec.forProvider.scopeAccesses is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scopeAccesses) + || (has(self.initProvider) && has(self.initProvider.scopeAccesses))' + status: + description: ManagerStatus defines the observed state of Manager. + properties: + atProvider: + properties: + crossTenantScopes: + description: One or more cross_tenant_scopes blocks as defined + below. + items: + properties: + managementGroups: + description: List of management groups. + items: + type: string + type: array + subscriptions: + description: List of subscriptions. + items: + type: string + type: array + tenantId: + description: Tenant ID. + type: string + type: object + type: array + description: + description: A description of the network manager. + type: string + id: + description: The ID of the Network Managers. + type: string + location: + description: Specifies the Azure Region where the Network Managers + should exist. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Network Managers should exist. Changing this forces a new Network + Managers to be created. + type: string + scope: + description: A scope block as defined below. + properties: + managementGroupIds: + description: A list of management group IDs. + items: + type: string + type: array + subscriptionIds: + description: A list of subscription IDs. + items: + type: string + type: array + type: object + scopeAccesses: + description: A list of configuration deployment type. Possible + values are Connectivity and SecurityAdmin, corresponds to if + Connectivity Configuration and Security Admin Configuration + is allowed for the Network Manager. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Managers. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_packetcaptures.yaml b/package/crds/network.azure.upbound.io_packetcaptures.yaml index 840db9875..dcf377663 100644 --- a/package/crds/network.azure.upbound.io_packetcaptures.yaml +++ b/package/crds/network.azure.upbound.io_packetcaptures.yaml @@ -887,3 +887,866 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: PacketCapture is the Schema for the PacketCaptures API. Configures + Packet Capturing against a Virtual Machine using a Network Watcher. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PacketCaptureSpec defines the desired state of PacketCapture + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + filter: + description: One or more filter blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + localIpAddress: + description: 'The local IP Address to be filtered on. Notation: + "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" + for range. "127.0.0.1;127.0.0.5" for multiple entries. + Multiple ranges not currently supported. Mixing ranges + with multiple entries not currently supported. Changing + this forces a new resource to be created.' + type: string + localPort: + description: 'The local port to be filtered on. Notation: + "80" for single port entry."80-85" for range. "80;443;" + for multiple entries. Multiple ranges not currently supported. + Mixing ranges with multiple entries not currently supported. + Changing this forces a new resource to be created.' + type: string + protocol: + description: The Protocol to be filtered on. Possible values + include Any, TCP and UDP. Changing this forces a new resource + to be created. + type: string + remoteIpAddress: + description: 'The remote IP Address to be filtered on. Notation: + "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" + for range. "127.0.0.1;127.0.0.5;" for multiple entries. + Multiple ranges not currently supported. Mixing ranges + with multiple entries not currently supported.. Changing + this forces a new resource to be created.' + type: string + remotePort: + description: 'The remote port to be filtered on. Notation: + "80" for single port entry."80-85" for range. "80;443;" + for multiple entries. Multiple ranges not currently supported. + Mixing ranges with multiple entries not currently supported. + Changing this forces a new resource to be created.' + type: string + type: object + type: array + maximumBytesPerPacket: + description: The number of bytes captured per packet. The remaining + bytes are truncated. Defaults to 0 (Entire Packet Captured). + Changing this forces a new resource to be created. + type: number + maximumBytesPerSession: + description: Maximum size of the capture in Bytes. Defaults to + 1073741824 (1GB). Changing this forces a new resource to be + created. + type: number + maximumCaptureDuration: + description: The maximum duration of the capture session in seconds. + Defaults to 18000 (5 hours). Changing this forces a new resource + to be created. + type: number + networkWatcherName: + description: The name of the Network Watcher. Changing this forces + a new resource to be created. + type: string + networkWatcherNameRef: + description: Reference to a Watcher in network to populate networkWatcherName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkWatcherNameSelector: + description: Selector for a Watcher in network to populate networkWatcherName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group in which the Network + Watcher exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageLocation: + description: A storage_location block as defined below. Changing + this forces a new resource to be created. + properties: + filePath: + description: A valid local path on the targeting VM. Must + include the name of the capture file (*.cap). For Linux + virtual machine it must start with /var/captures. + type: string + storageAccountId: + description: The ID of the storage account to save the packet + capture session + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate + storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate + storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + targetResourceId: + description: The ID of the Resource to capture packets from. Changing + this forces a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + filter: + description: One or more filter blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + localIpAddress: + description: 'The local IP Address to be filtered on. Notation: + "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" + for range. "127.0.0.1;127.0.0.5" for multiple entries. + Multiple ranges not currently supported. Mixing ranges + with multiple entries not currently supported. Changing + this forces a new resource to be created.' + type: string + localPort: + description: 'The local port to be filtered on. Notation: + "80" for single port entry."80-85" for range. "80;443;" + for multiple entries. Multiple ranges not currently supported. + Mixing ranges with multiple entries not currently supported. + Changing this forces a new resource to be created.' + type: string + protocol: + description: The Protocol to be filtered on. Possible values + include Any, TCP and UDP. Changing this forces a new resource + to be created. + type: string + remoteIpAddress: + description: 'The remote IP Address to be filtered on. Notation: + "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" + for range. "127.0.0.1;127.0.0.5;" for multiple entries. + Multiple ranges not currently supported. Mixing ranges + with multiple entries not currently supported.. Changing + this forces a new resource to be created.' + type: string + remotePort: + description: 'The remote port to be filtered on. Notation: + "80" for single port entry."80-85" for range. "80;443;" + for multiple entries. Multiple ranges not currently supported. + Mixing ranges with multiple entries not currently supported. + Changing this forces a new resource to be created.' + type: string + type: object + type: array + maximumBytesPerPacket: + description: The number of bytes captured per packet. The remaining + bytes are truncated. Defaults to 0 (Entire Packet Captured). + Changing this forces a new resource to be created. + type: number + maximumBytesPerSession: + description: Maximum size of the capture in Bytes. Defaults to + 1073741824 (1GB). Changing this forces a new resource to be + created. + type: number + maximumCaptureDuration: + description: The maximum duration of the capture session in seconds. + Defaults to 18000 (5 hours). Changing this forces a new resource + to be created. + type: number + storageLocation: + description: A storage_location block as defined below. Changing + this forces a new resource to be created. + properties: + filePath: + description: A valid local path on the targeting VM. Must + include the name of the capture file (*.cap). For Linux + virtual machine it must start with /var/captures. + type: string + storageAccountId: + description: The ID of the storage account to save the packet + capture session + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate + storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate + storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + targetResourceId: + description: The ID of the Resource to capture packets from. Changing + this forces a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.storageLocation is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageLocation) + || (has(self.initProvider) && has(self.initProvider.storageLocation))' + - message: spec.forProvider.targetResourceId is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.targetResourceId) + || (has(self.initProvider) && has(self.initProvider.targetResourceId))' + status: + description: PacketCaptureStatus defines the observed state of PacketCapture. + properties: + atProvider: + properties: + filter: + description: One or more filter blocks as defined below. Changing + this forces a new resource to be created. + items: + properties: + localIpAddress: + description: 'The local IP Address to be filtered on. Notation: + "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" + for range. "127.0.0.1;127.0.0.5" for multiple entries. + Multiple ranges not currently supported. Mixing ranges + with multiple entries not currently supported. Changing + this forces a new resource to be created.' + type: string + localPort: + description: 'The local port to be filtered on. Notation: + "80" for single port entry."80-85" for range. "80;443;" + for multiple entries. Multiple ranges not currently supported. + Mixing ranges with multiple entries not currently supported. + Changing this forces a new resource to be created.' + type: string + protocol: + description: The Protocol to be filtered on. Possible values + include Any, TCP and UDP. Changing this forces a new resource + to be created. + type: string + remoteIpAddress: + description: 'The remote IP Address to be filtered on. Notation: + "127.0.0.1" for single address entry. "127.0.0.1-127.0.0.255" + for range. "127.0.0.1;127.0.0.5;" for multiple entries. + Multiple ranges not currently supported. Mixing ranges + with multiple entries not currently supported.. Changing + this forces a new resource to be created.' + type: string + remotePort: + description: 'The remote port to be filtered on. Notation: + "80" for single port entry."80-85" for range. "80;443;" + for multiple entries. Multiple ranges not currently supported. + Mixing ranges with multiple entries not currently supported. + Changing this forces a new resource to be created.' + type: string + type: object + type: array + id: + description: The Packet Capture ID. + type: string + maximumBytesPerPacket: + description: The number of bytes captured per packet. The remaining + bytes are truncated. Defaults to 0 (Entire Packet Captured). + Changing this forces a new resource to be created. + type: number + maximumBytesPerSession: + description: Maximum size of the capture in Bytes. Defaults to + 1073741824 (1GB). Changing this forces a new resource to be + created. + type: number + maximumCaptureDuration: + description: The maximum duration of the capture session in seconds. + Defaults to 18000 (5 hours). Changing this forces a new resource + to be created. + type: number + networkWatcherName: + description: The name of the Network Watcher. Changing this forces + a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which the Network + Watcher exists. Changing this forces a new resource to be created. + type: string + storageLocation: + description: A storage_location block as defined below. Changing + this forces a new resource to be created. + properties: + filePath: + description: A valid local path on the targeting VM. Must + include the name of the capture file (*.cap). For Linux + virtual machine it must start with /var/captures. + type: string + storageAccountId: + description: The ID of the storage account to save the packet + capture session + type: string + storagePath: + description: The URI of the storage path to save the packet + capture. + type: string + type: object + targetResourceId: + description: The ID of the Resource to capture packets from. Changing + this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_pointtositevpngateways.yaml b/package/crds/network.azure.upbound.io_pointtositevpngateways.yaml index e081e938b..06047a9f0 100644 --- a/package/crds/network.azure.upbound.io_pointtositevpngateways.yaml +++ b/package/crds/network.azure.upbound.io_pointtositevpngateways.yaml @@ -1047,3 +1047,1014 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: PointToSiteVPNGateway is the Schema for the PointToSiteVPNGateways + API. Manages a Point-to-Site VPN Gateway. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PointToSiteVPNGatewaySpec defines the desired state of PointToSiteVPNGateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + connectionConfiguration: + description: A connection_configuration block as defined below. + items: + properties: + internetSecurityEnabled: + description: Should Internet Security be enabled to secure + internet traffic? Changing this forces a new resource + to be created. Defaults to false. + type: boolean + name: + description: The Name which should be used for this Connection + Configuration. + type: string + route: + description: A route block as defined below. + properties: + associatedRouteTableId: + description: The Virtual Hub Route Table resource id + associated with this Routing Configuration. + type: string + inboundRouteMapId: + description: The resource ID of the Route Map associated + with this Routing Configuration for inbound learned + routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated + with this Routing Configuration for outbound advertised + routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined + below. + properties: + ids: + description: The list of Virtual Hub Route Table + resource id which the routes will be propagated + to. + items: + type: string + type: array + labels: + description: The list of labels to logically group + Virtual Hub Route Tables which the routes will + be propagated to. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + vpnClientAddressPool: + description: A vpn_client_address_pool block as defined + below. + properties: + addressPrefixes: + description: A list of CIDR Ranges which should be used + as Address Prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + dnsServers: + description: A list of IP Addresses of DNS Servers for the Point-to-Site + VPN Gateway. + items: + type: string + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Point-to-Site VPN Gateway. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingPreferenceInternetEnabled: + description: Is the Routing Preference for the Public IP Interface + of the VPN Gateway enabled? Defaults to false. Changing this + forces a new resource to be created. + type: boolean + scaleUnit: + description: The Scale Unit for this Point-to-Site VPN Gateway. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Point-to-Site + VPN Gateway. + type: object + x-kubernetes-map-type: granular + virtualHubId: + description: The ID of the Virtual Hub where this Point-to-Site + VPN Gateway should exist. Changing this forces a new resource + to be created. + type: string + virtualHubIdRef: + description: Reference to a VirtualHub in network to populate + virtualHubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualHubIdSelector: + description: Selector for a VirtualHub in network to populate + virtualHubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpnServerConfigurationId: + description: The ID of the VPN Server Configuration which this + Point-to-Site VPN Gateway should use. Changing this forces a + new resource to be created. + type: string + vpnServerConfigurationIdRef: + description: Reference to a VPNServerConfiguration in network + to populate vpnServerConfigurationId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpnServerConfigurationIdSelector: + description: Selector for a VPNServerConfiguration in network + to populate vpnServerConfigurationId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + connectionConfiguration: + description: A connection_configuration block as defined below. + items: + properties: + internetSecurityEnabled: + description: Should Internet Security be enabled to secure + internet traffic? Changing this forces a new resource + to be created. Defaults to false. + type: boolean + name: + description: The Name which should be used for this Connection + Configuration. + type: string + route: + description: A route block as defined below. + properties: + associatedRouteTableId: + description: The Virtual Hub Route Table resource id + associated with this Routing Configuration. + type: string + inboundRouteMapId: + description: The resource ID of the Route Map associated + with this Routing Configuration for inbound learned + routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated + with this Routing Configuration for outbound advertised + routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined + below. + properties: + ids: + description: The list of Virtual Hub Route Table + resource id which the routes will be propagated + to. + items: + type: string + type: array + labels: + description: The list of labels to logically group + Virtual Hub Route Tables which the routes will + be propagated to. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + vpnClientAddressPool: + description: A vpn_client_address_pool block as defined + below. + properties: + addressPrefixes: + description: A list of CIDR Ranges which should be used + as Address Prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + dnsServers: + description: A list of IP Addresses of DNS Servers for the Point-to-Site + VPN Gateway. + items: + type: string + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + routingPreferenceInternetEnabled: + description: Is the Routing Preference for the Public IP Interface + of the VPN Gateway enabled? Defaults to false. Changing this + forces a new resource to be created. + type: boolean + scaleUnit: + description: The Scale Unit for this Point-to-Site VPN Gateway. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Point-to-Site + VPN Gateway. + type: object + x-kubernetes-map-type: granular + virtualHubId: + description: The ID of the Virtual Hub where this Point-to-Site + VPN Gateway should exist. Changing this forces a new resource + to be created. + type: string + virtualHubIdRef: + description: Reference to a VirtualHub in network to populate + virtualHubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualHubIdSelector: + description: Selector for a VirtualHub in network to populate + virtualHubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpnServerConfigurationId: + description: The ID of the VPN Server Configuration which this + Point-to-Site VPN Gateway should use. Changing this forces a + new resource to be created. + type: string + vpnServerConfigurationIdRef: + description: Reference to a VPNServerConfiguration in network + to populate vpnServerConfigurationId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpnServerConfigurationIdSelector: + description: Selector for a VPNServerConfiguration in network + to populate vpnServerConfigurationId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.connectionConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.connectionConfiguration) + || (has(self.initProvider) && has(self.initProvider.connectionConfiguration))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.scaleUnit is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.scaleUnit) + || (has(self.initProvider) && has(self.initProvider.scaleUnit))' + status: + description: PointToSiteVPNGatewayStatus defines the observed state of + PointToSiteVPNGateway. + properties: + atProvider: + properties: + connectionConfiguration: + description: A connection_configuration block as defined below. + items: + properties: + internetSecurityEnabled: + description: Should Internet Security be enabled to secure + internet traffic? Changing this forces a new resource + to be created. Defaults to false. + type: boolean + name: + description: The Name which should be used for this Connection + Configuration. + type: string + route: + description: A route block as defined below. + properties: + associatedRouteTableId: + description: The Virtual Hub Route Table resource id + associated with this Routing Configuration. + type: string + inboundRouteMapId: + description: The resource ID of the Route Map associated + with this Routing Configuration for inbound learned + routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated + with this Routing Configuration for outbound advertised + routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined + below. + properties: + ids: + description: The list of Virtual Hub Route Table + resource id which the routes will be propagated + to. + items: + type: string + type: array + labels: + description: The list of labels to logically group + Virtual Hub Route Tables which the routes will + be propagated to. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + vpnClientAddressPool: + description: A vpn_client_address_pool block as defined + below. + properties: + addressPrefixes: + description: A list of CIDR Ranges which should be used + as Address Prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + type: array + dnsServers: + description: A list of IP Addresses of DNS Servers for the Point-to-Site + VPN Gateway. + items: + type: string + type: array + id: + description: The ID of the Point-to-Site VPN Gateway. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Point-to-Site VPN Gateway. Changing this forces a new resource + to be created. + type: string + routingPreferenceInternetEnabled: + description: Is the Routing Preference for the Public IP Interface + of the VPN Gateway enabled? Defaults to false. Changing this + forces a new resource to be created. + type: boolean + scaleUnit: + description: The Scale Unit for this Point-to-Site VPN Gateway. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Point-to-Site + VPN Gateway. + type: object + x-kubernetes-map-type: granular + virtualHubId: + description: The ID of the Virtual Hub where this Point-to-Site + VPN Gateway should exist. Changing this forces a new resource + to be created. + type: string + vpnServerConfigurationId: + description: The ID of the VPN Server Configuration which this + Point-to-Site VPN Gateway should use. Changing this forces a + new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_privatednszones.yaml b/package/crds/network.azure.upbound.io_privatednszones.yaml index 5a3452b67..55b9f46a3 100644 --- a/package/crds/network.azure.upbound.io_privatednszones.yaml +++ b/package/crds/network.azure.upbound.io_privatednszones.yaml @@ -565,3 +565,544 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: PrivateDNSZone is the Schema for the PrivateDNSZones API. Manages + a Private DNS Zone. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PrivateDNSZoneSpec defines the desired state of PrivateDNSZone + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + resourceGroupName: + description: Specifies the resource group where the resource exists. + Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + soaRecord: + description: An soa_record block as defined below. Changing this + forces a new resource to be created. + properties: + email: + description: The email contact for the SOA record. + type: string + expireTime: + description: The expire time for the SOA record. Defaults + to 2419200. + type: number + minimumTtl: + description: The minimum Time To Live for the SOA record. + By convention, it is used to determine the negative caching + duration. Defaults to 10. + type: number + refreshTime: + description: The refresh time for the SOA record. Defaults + to 3600. + type: number + retryTime: + description: The retry time for the SOA record. Defaults to + 300. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Record Set. + type: object + x-kubernetes-map-type: granular + ttl: + description: The Time To Live of the SOA Record in seconds. + Defaults to 3600. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + soaRecord: + description: An soa_record block as defined below. Changing this + forces a new resource to be created. + properties: + email: + description: The email contact for the SOA record. + type: string + expireTime: + description: The expire time for the SOA record. Defaults + to 2419200. + type: number + minimumTtl: + description: The minimum Time To Live for the SOA record. + By convention, it is used to determine the negative caching + duration. Defaults to 10. + type: number + refreshTime: + description: The refresh time for the SOA record. Defaults + to 3600. + type: number + retryTime: + description: The retry time for the SOA record. Defaults to + 300. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Record Set. + type: object + x-kubernetes-map-type: granular + ttl: + description: The Time To Live of the SOA Record in seconds. + Defaults to 3600. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: PrivateDNSZoneStatus defines the observed state of PrivateDNSZone. + properties: + atProvider: + properties: + id: + description: The Private DNS Zone ID. + type: string + maxNumberOfRecordSets: + description: The maximum number of record sets that can be created + in this Private DNS zone. + type: number + maxNumberOfVirtualNetworkLinks: + description: The maximum number of virtual networks that can be + linked to this Private DNS zone. + type: number + maxNumberOfVirtualNetworkLinksWithRegistration: + description: The maximum number of virtual networks that can be + linked to this Private DNS zone with registration enabled. + type: number + numberOfRecordSets: + description: The current number of record sets in this Private + DNS zone. + type: number + resourceGroupName: + description: Specifies the resource group where the resource exists. + Changing this forces a new resource to be created. + type: string + soaRecord: + description: An soa_record block as defined below. Changing this + forces a new resource to be created. + properties: + email: + description: The email contact for the SOA record. + type: string + expireTime: + description: The expire time for the SOA record. Defaults + to 2419200. + type: number + fqdn: + description: The fully qualified domain name of the Record + Set. + type: string + hostName: + description: The domain name of the authoritative name server + for the SOA record. + type: string + minimumTtl: + description: The minimum Time To Live for the SOA record. + By convention, it is used to determine the negative caching + duration. Defaults to 10. + type: number + refreshTime: + description: The refresh time for the SOA record. Defaults + to 3600. + type: number + retryTime: + description: The retry time for the SOA record. Defaults to + 300. + type: number + serialNumber: + description: The serial number for the SOA record. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Record Set. + type: object + x-kubernetes-map-type: granular + ttl: + description: The Time To Live of the SOA Record in seconds. + Defaults to 3600. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_privateendpoints.yaml b/package/crds/network.azure.upbound.io_privateendpoints.yaml index bb33ddc70..d447028aa 100644 --- a/package/crds/network.azure.upbound.io_privateendpoints.yaml +++ b/package/crds/network.azure.upbound.io_privateendpoints.yaml @@ -1137,3 +1137,1107 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: PrivateEndpoint is the Schema for the PrivateEndpoints API. Manages + a Private Endpoint. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PrivateEndpointSpec defines the desired state of PrivateEndpoint + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customNetworkInterfaceName: + description: The custom name of the network interface attached + to the private endpoint. Changing this forces a new resource + to be created. + type: string + ipConfiguration: + description: One or more ip_configuration blocks as defined below. + This allows a static IP address to be set for this Private Endpoint, + otherwise an address is dynamically allocated from the Subnet. + items: + properties: + memberName: + description: Specifies the member name this IP address applies + to. If it is not specified, it will use the value of subresource_name. + Changing this forces a new resource to be created. + type: string + name: + description: Specifies the Name of the IP Configuration. + Changing this forces a new resource to be created. + type: string + privateIpAddress: + description: Specifies the static IP address within the + private endpoint's subnet to be used. Changing this forces + a new resource to be created. + type: string + subresourceName: + description: Specifies the subresource this IP address applies + to. subresource_names corresponds to group_id. Changing + this forces a new resource to be created. + type: string + type: object + type: array + location: + description: The supported Azure location where the resource exists. + Changing this forces a new resource to be created. + type: string + privateDnsZoneGroup: + description: A private_dns_zone_group block as defined below. + properties: + name: + description: Specifies the Name of the Private DNS Zone Group. + type: string + privateDnsZoneIds: + description: Specifies the list of Private DNS Zones to include + within the private_dns_zone_group. + items: + type: string + type: array + privateDnsZoneIdsRefs: + description: References to PrivateDNSZone in network to populate + privateDnsZoneIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + privateDnsZoneIdsSelector: + description: Selector for a list of PrivateDNSZone in network + to populate privateDnsZoneIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + privateServiceConnection: + description: A private_service_connection block as defined below. + properties: + isManualConnection: + description: Does the Private Endpoint require Manual Approval + from the remote resource owner? Changing this forces a new + resource to be created. + type: boolean + name: + description: Specifies the Name of the Private Service Connection. + Changing this forces a new resource to be created. + type: string + privateConnectionResourceAlias: + description: The Service Alias of the Private Link Enabled + Remote Resource which this Private Endpoint should be connected + to. One of private_connection_resource_id or private_connection_resource_alias + must be specified. Changing this forces a new resource to + be created. + type: string + privateConnectionResourceId: + description: The ID of the Private Link Enabled Remote Resource + which this Private Endpoint should be connected to. One + of private_connection_resource_id or private_connection_resource_alias + must be specified. Changing this forces a new resource to + be created. For a web app or function app slot, the parent + web app should be used in this field instead of a reference + to the slot itself. + type: string + requestMessage: + description: A message passed to the owner of the remote resource + when the private endpoint attempts to establish the connection + to the remote resource. The request message can be a maximum + of 140 characters in length. Only valid if is_manual_connection + is set to true. + type: string + subresourceNames: + description: A list of subresource names which the Private + Endpoint is able to connect to. subresource_names corresponds + to group_id. Possible values are detailed in the product + documentation in the Subresources column. Changing this + forces a new resource to be created. + items: + type: string + type: array + type: object + resourceGroupName: + description: Specifies the Name of the Resource Group within which + the Private Endpoint should exist. Changing this forces a new + resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: The ID of the Subnet from which Private IP Addresses + will be allocated for this Private Endpoint. Changing this forces + a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customNetworkInterfaceName: + description: The custom name of the network interface attached + to the private endpoint. Changing this forces a new resource + to be created. + type: string + ipConfiguration: + description: One or more ip_configuration blocks as defined below. + This allows a static IP address to be set for this Private Endpoint, + otherwise an address is dynamically allocated from the Subnet. + items: + properties: + memberName: + description: Specifies the member name this IP address applies + to. If it is not specified, it will use the value of subresource_name. + Changing this forces a new resource to be created. + type: string + name: + description: Specifies the Name of the IP Configuration. + Changing this forces a new resource to be created. + type: string + privateIpAddress: + description: Specifies the static IP address within the + private endpoint's subnet to be used. Changing this forces + a new resource to be created. + type: string + subresourceName: + description: Specifies the subresource this IP address applies + to. subresource_names corresponds to group_id. Changing + this forces a new resource to be created. + type: string + type: object + type: array + location: + description: The supported Azure location where the resource exists. + Changing this forces a new resource to be created. + type: string + privateDnsZoneGroup: + description: A private_dns_zone_group block as defined below. + properties: + name: + description: Specifies the Name of the Private DNS Zone Group. + type: string + privateDnsZoneIds: + description: Specifies the list of Private DNS Zones to include + within the private_dns_zone_group. + items: + type: string + type: array + privateDnsZoneIdsRefs: + description: References to PrivateDNSZone in network to populate + privateDnsZoneIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + privateDnsZoneIdsSelector: + description: Selector for a list of PrivateDNSZone in network + to populate privateDnsZoneIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + privateServiceConnection: + description: A private_service_connection block as defined below. + properties: + isManualConnection: + description: Does the Private Endpoint require Manual Approval + from the remote resource owner? Changing this forces a new + resource to be created. + type: boolean + name: + description: Specifies the Name of the Private Service Connection. + Changing this forces a new resource to be created. + type: string + privateConnectionResourceAlias: + description: The Service Alias of the Private Link Enabled + Remote Resource which this Private Endpoint should be connected + to. One of private_connection_resource_id or private_connection_resource_alias + must be specified. Changing this forces a new resource to + be created. + type: string + privateConnectionResourceId: + description: The ID of the Private Link Enabled Remote Resource + which this Private Endpoint should be connected to. One + of private_connection_resource_id or private_connection_resource_alias + must be specified. Changing this forces a new resource to + be created. For a web app or function app slot, the parent + web app should be used in this field instead of a reference + to the slot itself. + type: string + requestMessage: + description: A message passed to the owner of the remote resource + when the private endpoint attempts to establish the connection + to the remote resource. The request message can be a maximum + of 140 characters in length. Only valid if is_manual_connection + is set to true. + type: string + subresourceNames: + description: A list of subresource names which the Private + Endpoint is able to connect to. subresource_names corresponds + to group_id. Possible values are detailed in the product + documentation in the Subresources column. Changing this + forces a new resource to be created. + items: + type: string + type: array + type: object + subnetId: + description: The ID of the Subnet from which Private IP Addresses + will be allocated for this Private Endpoint. Changing this forces + a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.privateServiceConnection is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.privateServiceConnection) + || (has(self.initProvider) && has(self.initProvider.privateServiceConnection))' + status: + description: PrivateEndpointStatus defines the observed state of PrivateEndpoint. + properties: + atProvider: + properties: + customDnsConfigs: + description: A custom_dns_configs block as defined below. + items: + properties: + fqdn: + description: The fully qualified domain name to the private_endpoint. + type: string + ipAddresses: + description: A list of all IP Addresses that map to the + private_endpoint fqdn. + items: + type: string + type: array + type: object + type: array + customNetworkInterfaceName: + description: The custom name of the network interface attached + to the private endpoint. Changing this forces a new resource + to be created. + type: string + id: + description: The ID of the Private Endpoint. + type: string + ipConfiguration: + description: One or more ip_configuration blocks as defined below. + This allows a static IP address to be set for this Private Endpoint, + otherwise an address is dynamically allocated from the Subnet. + items: + properties: + memberName: + description: Specifies the member name this IP address applies + to. If it is not specified, it will use the value of subresource_name. + Changing this forces a new resource to be created. + type: string + name: + description: Specifies the Name of the IP Configuration. + Changing this forces a new resource to be created. + type: string + privateIpAddress: + description: Specifies the static IP address within the + private endpoint's subnet to be used. Changing this forces + a new resource to be created. + type: string + subresourceName: + description: Specifies the subresource this IP address applies + to. subresource_names corresponds to group_id. Changing + this forces a new resource to be created. + type: string + type: object + type: array + location: + description: The supported Azure location where the resource exists. + Changing this forces a new resource to be created. + type: string + networkInterface: + description: A network_interface block as defined below. + items: + properties: + id: + description: The ID of the network interface associated + with the private_endpoint. + type: string + name: + description: The name of the network interface associated + with the private_endpoint. + type: string + type: object + type: array + privateDnsZoneConfigs: + description: A private_dns_zone_configs block as defined below. + items: + properties: + id: + description: The ID of the Private DNS Zone Config. + type: string + name: + description: The name of the Private DNS Zone that the config + belongs to. + type: string + privateDnsZoneId: + description: A list of IP Addresses + type: string + recordSets: + description: A record_sets block as defined below. + items: + properties: + fqdn: + description: The fully qualified domain name to the + private_dns_zone. + type: string + ipAddresses: + description: A list of all IP Addresses that map to + the private_dns_zone fqdn. + items: + type: string + type: array + name: + description: The name of the Private DNS Zone that + the config belongs to. + type: string + ttl: + description: The time to live for each connection + to the private_dns_zone. + type: number + type: + description: The type of DNS record. + type: string + type: object + type: array + type: object + type: array + privateDnsZoneGroup: + description: A private_dns_zone_group block as defined below. + properties: + id: + description: The ID of the Private DNS Zone Group. + type: string + name: + description: Specifies the Name of the Private DNS Zone Group. + type: string + privateDnsZoneIds: + description: Specifies the list of Private DNS Zones to include + within the private_dns_zone_group. + items: + type: string + type: array + type: object + privateServiceConnection: + description: A private_service_connection block as defined below. + properties: + isManualConnection: + description: Does the Private Endpoint require Manual Approval + from the remote resource owner? Changing this forces a new + resource to be created. + type: boolean + name: + description: Specifies the Name of the Private Service Connection. + Changing this forces a new resource to be created. + type: string + privateConnectionResourceAlias: + description: The Service Alias of the Private Link Enabled + Remote Resource which this Private Endpoint should be connected + to. One of private_connection_resource_id or private_connection_resource_alias + must be specified. Changing this forces a new resource to + be created. + type: string + privateConnectionResourceId: + description: The ID of the Private Link Enabled Remote Resource + which this Private Endpoint should be connected to. One + of private_connection_resource_id or private_connection_resource_alias + must be specified. Changing this forces a new resource to + be created. For a web app or function app slot, the parent + web app should be used in this field instead of a reference + to the slot itself. + type: string + privateIpAddress: + description: (Computed) The private IP address associated + with the private endpoint, note that you will have a private + IP address assigned to the private endpoint even if the + connection request was Rejected. + type: string + requestMessage: + description: A message passed to the owner of the remote resource + when the private endpoint attempts to establish the connection + to the remote resource. The request message can be a maximum + of 140 characters in length. Only valid if is_manual_connection + is set to true. + type: string + subresourceNames: + description: A list of subresource names which the Private + Endpoint is able to connect to. subresource_names corresponds + to group_id. Possible values are detailed in the product + documentation in the Subresources column. Changing this + forces a new resource to be created. + items: + type: string + type: array + type: object + resourceGroupName: + description: Specifies the Name of the Resource Group within which + the Private Endpoint should exist. Changing this forces a new + resource to be created. + type: string + subnetId: + description: The ID of the Subnet from which Private IP Addresses + will be allocated for this Private Endpoint. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_profiles.yaml b/package/crds/network.azure.upbound.io_profiles.yaml index 840ae99a3..f057fcfdf 100644 --- a/package/crds/network.azure.upbound.io_profiles.yaml +++ b/package/crds/network.azure.upbound.io_profiles.yaml @@ -680,3 +680,659 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Profile is the Schema for the Profiles API. Manages a Network + Profile. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ProfileSpec defines the desired state of Profile + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + containerNetworkInterface: + description: A container_network_interface block as documented + below. + properties: + ipConfiguration: + description: One or more ip_configuration blocks as documented + below. + items: + properties: + name: + description: Specifies the name of the Network Profile. + Changing this forces a new resource to be created. + type: string + subnetId: + description: Reference to the subnet associated with + the IP Configuration. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + name: + description: Specifies the name of the IP Configuration. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the resource. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + containerNetworkInterface: + description: A container_network_interface block as documented + below. + properties: + ipConfiguration: + description: One or more ip_configuration blocks as documented + below. + items: + properties: + name: + description: Specifies the name of the Network Profile. + Changing this forces a new resource to be created. + type: string + subnetId: + description: Reference to the subnet associated with + the IP Configuration. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + name: + description: Specifies the name of the IP Configuration. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.containerNetworkInterface is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.containerNetworkInterface) + || (has(self.initProvider) && has(self.initProvider.containerNetworkInterface))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: ProfileStatus defines the observed state of Profile. + properties: + atProvider: + properties: + containerNetworkInterface: + description: A container_network_interface block as documented + below. + properties: + ipConfiguration: + description: One or more ip_configuration blocks as documented + below. + items: + properties: + name: + description: Specifies the name of the Network Profile. + Changing this forces a new resource to be created. + type: string + subnetId: + description: Reference to the subnet associated with + the IP Configuration. + type: string + type: object + type: array + name: + description: Specifies the name of the IP Configuration. + type: string + type: object + containerNetworkInterfaceIds: + description: A list of Container Network Interface IDs. + items: + type: string + type: array + id: + description: The ID of the Network Profile. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the resource. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_subnets.yaml b/package/crds/network.azure.upbound.io_subnets.yaml index ae3dacf29..5f388ba93 100644 --- a/package/crds/network.azure.upbound.io_subnets.yaml +++ b/package/crds/network.azure.upbound.io_subnets.yaml @@ -804,3 +804,777 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Subnet is the Schema for the Subnets API. Manages a subnet. Subnets + represent network segments within the IP space defined by the virtual network. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubnetSpec defines the desired state of Subnet + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addressPrefixes: + description: The address prefixes to use for the subnet. + items: + type: string + type: array + delegation: + description: One or more delegation blocks as defined below. + items: + properties: + name: + description: A name for this delegation. + type: string + serviceDelegation: + description: A service_delegation block as defined below. + properties: + actions: + description: A list of Actions which should be delegated. + This list is specific to the service to delegate to. + Possible values are Microsoft.Network/networkinterfaces/*, + Microsoft.Network/publicIPAddresses/join/action, Microsoft.Network/publicIPAddresses/read, + Microsoft.Network/virtualNetworks/read, Microsoft.Network/virtualNetworks/subnets/action, + Microsoft.Network/virtualNetworks/subnets/join/action, + Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action, + and Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action. + items: + type: string + type: array + name: + description: The name of service to delegate to. Possible + values are GitHub.Network/networkSettings, Microsoft.ApiManagement/service, + Microsoft.Apollo/npu, Microsoft.App/environments, + Microsoft.App/testClients, Microsoft.AVS/PrivateClouds, + Microsoft.AzureCosmosDB/clusters, Microsoft.BareMetal/AzureHostedService, + Microsoft.BareMetal/AzureHPC, Microsoft.BareMetal/AzurePaymentHSM, + Microsoft.BareMetal/AzureVMware, Microsoft.BareMetal/CrayServers, + Microsoft.BareMetal/MonitoringServers, Microsoft.Batch/batchAccounts, + Microsoft.CloudTest/hostedpools, Microsoft.CloudTest/images, + Microsoft.CloudTest/pools, Microsoft.Codespaces/plans, + Microsoft.ContainerInstance/containerGroups, Microsoft.ContainerService/managedClusters, + Microsoft.ContainerService/TestClients, Microsoft.Databricks/workspaces, + Microsoft.DBforMySQL/flexibleServers, Microsoft.DBforMySQL/servers, + Microsoft.DBforMySQL/serversv2, Microsoft.DBforPostgreSQL/flexibleServers, + Microsoft.DBforPostgreSQL/serversv2, Microsoft.DBforPostgreSQL/singleServers, + Microsoft.DelegatedNetwork/controller, Microsoft.DevCenter/networkConnection, + Microsoft.DocumentDB/cassandraClusters, Microsoft.Fidalgo/networkSettings, + Microsoft.HardwareSecurityModules/dedicatedHSMs, Microsoft.Kusto/clusters, + Microsoft.LabServices/labplans, Microsoft.Logic/integrationServiceEnvironments, + Microsoft.MachineLearningServices/workspaces, Microsoft.Netapp/volumes, + Microsoft.Network/dnsResolvers, Microsoft.Network/managedResolvers, + Microsoft.Network/fpgaNetworkInterfaces, Microsoft.Network/networkWatchers., + Microsoft.Network/virtualNetworkGateways, Microsoft.Orbital/orbitalGateways, + Microsoft.PowerPlatform/enterprisePolicies, Microsoft.PowerPlatform/vnetaccesslinks, + Microsoft.ServiceFabricMesh/networks, Microsoft.ServiceNetworking/trafficControllers, + Microsoft.Singularity/accounts/networks, Microsoft.Singularity/accounts/npu, + Microsoft.Sql/managedInstances, Microsoft.Sql/managedInstancesOnebox, + Microsoft.Sql/managedInstancesStage, Microsoft.Sql/managedInstancesTest, + Microsoft.Sql/servers, Microsoft.StoragePool/diskPools, + Microsoft.StreamAnalytics/streamingJobs, Microsoft.Synapse/workspaces, + Microsoft.Web/hostingEnvironments, Microsoft.Web/serverFarms, + NGINX.NGINXPLUS/nginxDeployments, PaloAltoNetworks.Cloudngfw/firewalls + and Qumulo.Storage/fileSystems. + type: string + type: object + type: object + type: array + enforcePrivateLinkEndpointNetworkPolicies: + type: boolean + enforcePrivateLinkServiceNetworkPolicies: + type: boolean + privateEndpointNetworkPoliciesEnabled: + description: Enable or Disable network policies for the private + endpoint on the subnet. Setting this to true will Enable the + policy and setting this to false will Disable the policy. Defaults + to true. + type: boolean + privateLinkServiceNetworkPoliciesEnabled: + description: Enable or Disable network policies for the private + link service on the subnet. Setting this to true will Enable + the policy and setting this to false will Disable the policy. + Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the subnet. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serviceEndpointPolicyIds: + description: The list of IDs of Service Endpoint Policies to associate + with the subnet. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceEndpoints: + description: 'The list of Service endpoints to associate with + the subnet. Possible values include: Microsoft.AzureActiveDirectory, + Microsoft.AzureCosmosDB, Microsoft.ContainerRegistry, Microsoft.EventHub, + Microsoft.KeyVault, Microsoft.ServiceBus, Microsoft.Sql, Microsoft.Storage, + Microsoft.Storage.Global and Microsoft.Web.' + items: + type: string + type: array + x-kubernetes-list-type: set + virtualNetworkName: + description: The name of the virtual network to which to attach + the subnet. Changing this forces a new resource to be created. + type: string + virtualNetworkNameRef: + description: Reference to a VirtualNetwork in network to populate + virtualNetworkName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkNameSelector: + description: Selector for a VirtualNetwork in network to populate + virtualNetworkName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addressPrefixes: + description: The address prefixes to use for the subnet. + items: + type: string + type: array + delegation: + description: One or more delegation blocks as defined below. + items: + properties: + name: + description: A name for this delegation. + type: string + serviceDelegation: + description: A service_delegation block as defined below. + properties: + actions: + description: A list of Actions which should be delegated. + This list is specific to the service to delegate to. + Possible values are Microsoft.Network/networkinterfaces/*, + Microsoft.Network/publicIPAddresses/join/action, Microsoft.Network/publicIPAddresses/read, + Microsoft.Network/virtualNetworks/read, Microsoft.Network/virtualNetworks/subnets/action, + Microsoft.Network/virtualNetworks/subnets/join/action, + Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action, + and Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action. + items: + type: string + type: array + name: + description: The name of service to delegate to. Possible + values are GitHub.Network/networkSettings, Microsoft.ApiManagement/service, + Microsoft.Apollo/npu, Microsoft.App/environments, + Microsoft.App/testClients, Microsoft.AVS/PrivateClouds, + Microsoft.AzureCosmosDB/clusters, Microsoft.BareMetal/AzureHostedService, + Microsoft.BareMetal/AzureHPC, Microsoft.BareMetal/AzurePaymentHSM, + Microsoft.BareMetal/AzureVMware, Microsoft.BareMetal/CrayServers, + Microsoft.BareMetal/MonitoringServers, Microsoft.Batch/batchAccounts, + Microsoft.CloudTest/hostedpools, Microsoft.CloudTest/images, + Microsoft.CloudTest/pools, Microsoft.Codespaces/plans, + Microsoft.ContainerInstance/containerGroups, Microsoft.ContainerService/managedClusters, + Microsoft.ContainerService/TestClients, Microsoft.Databricks/workspaces, + Microsoft.DBforMySQL/flexibleServers, Microsoft.DBforMySQL/servers, + Microsoft.DBforMySQL/serversv2, Microsoft.DBforPostgreSQL/flexibleServers, + Microsoft.DBforPostgreSQL/serversv2, Microsoft.DBforPostgreSQL/singleServers, + Microsoft.DelegatedNetwork/controller, Microsoft.DevCenter/networkConnection, + Microsoft.DocumentDB/cassandraClusters, Microsoft.Fidalgo/networkSettings, + Microsoft.HardwareSecurityModules/dedicatedHSMs, Microsoft.Kusto/clusters, + Microsoft.LabServices/labplans, Microsoft.Logic/integrationServiceEnvironments, + Microsoft.MachineLearningServices/workspaces, Microsoft.Netapp/volumes, + Microsoft.Network/dnsResolvers, Microsoft.Network/managedResolvers, + Microsoft.Network/fpgaNetworkInterfaces, Microsoft.Network/networkWatchers., + Microsoft.Network/virtualNetworkGateways, Microsoft.Orbital/orbitalGateways, + Microsoft.PowerPlatform/enterprisePolicies, Microsoft.PowerPlatform/vnetaccesslinks, + Microsoft.ServiceFabricMesh/networks, Microsoft.ServiceNetworking/trafficControllers, + Microsoft.Singularity/accounts/networks, Microsoft.Singularity/accounts/npu, + Microsoft.Sql/managedInstances, Microsoft.Sql/managedInstancesOnebox, + Microsoft.Sql/managedInstancesStage, Microsoft.Sql/managedInstancesTest, + Microsoft.Sql/servers, Microsoft.StoragePool/diskPools, + Microsoft.StreamAnalytics/streamingJobs, Microsoft.Synapse/workspaces, + Microsoft.Web/hostingEnvironments, Microsoft.Web/serverFarms, + NGINX.NGINXPLUS/nginxDeployments, PaloAltoNetworks.Cloudngfw/firewalls + and Qumulo.Storage/fileSystems. + type: string + type: object + type: object + type: array + enforcePrivateLinkEndpointNetworkPolicies: + type: boolean + enforcePrivateLinkServiceNetworkPolicies: + type: boolean + privateEndpointNetworkPoliciesEnabled: + description: Enable or Disable network policies for the private + endpoint on the subnet. Setting this to true will Enable the + policy and setting this to false will Disable the policy. Defaults + to true. + type: boolean + privateLinkServiceNetworkPoliciesEnabled: + description: Enable or Disable network policies for the private + link service on the subnet. Setting this to true will Enable + the policy and setting this to false will Disable the policy. + Defaults to true. + type: boolean + serviceEndpointPolicyIds: + description: The list of IDs of Service Endpoint Policies to associate + with the subnet. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceEndpoints: + description: 'The list of Service endpoints to associate with + the subnet. Possible values include: Microsoft.AzureActiveDirectory, + Microsoft.AzureCosmosDB, Microsoft.ContainerRegistry, Microsoft.EventHub, + Microsoft.KeyVault, Microsoft.ServiceBus, Microsoft.Sql, Microsoft.Storage, + Microsoft.Storage.Global and Microsoft.Web.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.addressPrefixes is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.addressPrefixes) + || (has(self.initProvider) && has(self.initProvider.addressPrefixes))' + status: + description: SubnetStatus defines the observed state of Subnet. + properties: + atProvider: + properties: + addressPrefixes: + description: The address prefixes to use for the subnet. + items: + type: string + type: array + delegation: + description: One or more delegation blocks as defined below. + items: + properties: + name: + description: A name for this delegation. + type: string + serviceDelegation: + description: A service_delegation block as defined below. + properties: + actions: + description: A list of Actions which should be delegated. + This list is specific to the service to delegate to. + Possible values are Microsoft.Network/networkinterfaces/*, + Microsoft.Network/publicIPAddresses/join/action, Microsoft.Network/publicIPAddresses/read, + Microsoft.Network/virtualNetworks/read, Microsoft.Network/virtualNetworks/subnets/action, + Microsoft.Network/virtualNetworks/subnets/join/action, + Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action, + and Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action. + items: + type: string + type: array + name: + description: The name of service to delegate to. Possible + values are GitHub.Network/networkSettings, Microsoft.ApiManagement/service, + Microsoft.Apollo/npu, Microsoft.App/environments, + Microsoft.App/testClients, Microsoft.AVS/PrivateClouds, + Microsoft.AzureCosmosDB/clusters, Microsoft.BareMetal/AzureHostedService, + Microsoft.BareMetal/AzureHPC, Microsoft.BareMetal/AzurePaymentHSM, + Microsoft.BareMetal/AzureVMware, Microsoft.BareMetal/CrayServers, + Microsoft.BareMetal/MonitoringServers, Microsoft.Batch/batchAccounts, + Microsoft.CloudTest/hostedpools, Microsoft.CloudTest/images, + Microsoft.CloudTest/pools, Microsoft.Codespaces/plans, + Microsoft.ContainerInstance/containerGroups, Microsoft.ContainerService/managedClusters, + Microsoft.ContainerService/TestClients, Microsoft.Databricks/workspaces, + Microsoft.DBforMySQL/flexibleServers, Microsoft.DBforMySQL/servers, + Microsoft.DBforMySQL/serversv2, Microsoft.DBforPostgreSQL/flexibleServers, + Microsoft.DBforPostgreSQL/serversv2, Microsoft.DBforPostgreSQL/singleServers, + Microsoft.DelegatedNetwork/controller, Microsoft.DevCenter/networkConnection, + Microsoft.DocumentDB/cassandraClusters, Microsoft.Fidalgo/networkSettings, + Microsoft.HardwareSecurityModules/dedicatedHSMs, Microsoft.Kusto/clusters, + Microsoft.LabServices/labplans, Microsoft.Logic/integrationServiceEnvironments, + Microsoft.MachineLearningServices/workspaces, Microsoft.Netapp/volumes, + Microsoft.Network/dnsResolvers, Microsoft.Network/managedResolvers, + Microsoft.Network/fpgaNetworkInterfaces, Microsoft.Network/networkWatchers., + Microsoft.Network/virtualNetworkGateways, Microsoft.Orbital/orbitalGateways, + Microsoft.PowerPlatform/enterprisePolicies, Microsoft.PowerPlatform/vnetaccesslinks, + Microsoft.ServiceFabricMesh/networks, Microsoft.ServiceNetworking/trafficControllers, + Microsoft.Singularity/accounts/networks, Microsoft.Singularity/accounts/npu, + Microsoft.Sql/managedInstances, Microsoft.Sql/managedInstancesOnebox, + Microsoft.Sql/managedInstancesStage, Microsoft.Sql/managedInstancesTest, + Microsoft.Sql/servers, Microsoft.StoragePool/diskPools, + Microsoft.StreamAnalytics/streamingJobs, Microsoft.Synapse/workspaces, + Microsoft.Web/hostingEnvironments, Microsoft.Web/serverFarms, + NGINX.NGINXPLUS/nginxDeployments, PaloAltoNetworks.Cloudngfw/firewalls + and Qumulo.Storage/fileSystems. + type: string + type: object + type: object + type: array + enforcePrivateLinkEndpointNetworkPolicies: + type: boolean + enforcePrivateLinkServiceNetworkPolicies: + type: boolean + id: + description: The subnet ID. + type: string + privateEndpointNetworkPoliciesEnabled: + description: Enable or Disable network policies for the private + endpoint on the subnet. Setting this to true will Enable the + policy and setting this to false will Disable the policy. Defaults + to true. + type: boolean + privateLinkServiceNetworkPoliciesEnabled: + description: Enable or Disable network policies for the private + link service on the subnet. Setting this to true will Enable + the policy and setting this to false will Disable the policy. + Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the subnet. Changing this forces a new resource to be created. + type: string + serviceEndpointPolicyIds: + description: The list of IDs of Service Endpoint Policies to associate + with the subnet. + items: + type: string + type: array + x-kubernetes-list-type: set + serviceEndpoints: + description: 'The list of Service endpoints to associate with + the subnet. Possible values include: Microsoft.AzureActiveDirectory, + Microsoft.AzureCosmosDB, Microsoft.ContainerRegistry, Microsoft.EventHub, + Microsoft.KeyVault, Microsoft.ServiceBus, Microsoft.Sql, Microsoft.Storage, + Microsoft.Storage.Global and Microsoft.Web.' + items: + type: string + type: array + x-kubernetes-list-type: set + virtualNetworkName: + description: The name of the virtual network to which to attach + the subnet. Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_trafficmanagerprofiles.yaml b/package/crds/network.azure.upbound.io_trafficmanagerprofiles.yaml index 9e6987a3a..156b25fa6 100644 --- a/package/crds/network.azure.upbound.io_trafficmanagerprofiles.yaml +++ b/package/crds/network.azure.upbound.io_trafficmanagerprofiles.yaml @@ -731,3 +731,698 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: TrafficManagerProfile is the Schema for the TrafficManagerProfiles + API. Manages a Traffic Manager Profile. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TrafficManagerProfileSpec defines the desired state of TrafficManagerProfile + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dnsConfig: + description: This block specifies the DNS configuration of the + Profile. One dns_config block as defined below. + properties: + relativeName: + description: The relative domain name, this is combined with + the domain name used by Traffic Manager to form the FQDN + which is exported as documented below. Changing this forces + a new resource to be created. + type: string + ttl: + description: The TTL value of the Profile used by Local DNS + resolvers and clients. + type: number + type: object + maxReturn: + description: The amount of endpoints to return for DNS queries + to this Profile. Possible values range from 1 to 8. + type: number + monitorConfig: + description: This block specifies the Endpoint monitoring configuration + for the Profile. One monitor_config block as defined below. + properties: + customHeader: + description: One or more custom_header blocks as defined below. + items: + properties: + name: + description: The name of the Traffic Manager profile. + Changing this forces a new resource to be created. + type: string + value: + description: The value of custom header. Applicable + for HTTP and HTTPS protocol. + type: string + type: object + type: array + expectedStatusCodeRanges: + description: A list of status code ranges in the format of + 100-101. + items: + type: string + type: array + intervalInSeconds: + description: 'The interval used to check the endpoint health + from a Traffic Manager probing agent. You can specify two + values here: 30 (normal probing) and 10 (fast probing). + The default value is 30.' + type: number + path: + description: The path used by the monitoring checks. Required + when protocol is set to HTTP or HTTPS - cannot be set when + protocol is set to TCP. + type: string + port: + description: The port number used by the monitoring checks. + type: number + protocol: + description: The protocol used by the monitoring checks, supported + values are HTTP, HTTPS and TCP. + type: string + timeoutInSeconds: + description: The amount of time the Traffic Manager probing + agent should wait before considering that check a failure + when a health check probe is sent to the endpoint. If interval_in_seconds + is set to 30, then timeout_in_seconds can be between 5 and + 10. The default value is 10. If interval_in_seconds is set + to 10, then valid values are between 5 and 9 and timeout_in_seconds + is required. + type: number + toleratedNumberOfFailures: + description: The number of failures a Traffic Manager probing + agent tolerates before marking that endpoint as unhealthy. + Valid values are between 0 and 9. The default value is 3 + type: number + type: object + profileStatus: + description: The status of the profile, can be set to either Enabled + or Disabled. Defaults to Enabled. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Traffic Manager profile. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trafficRoutingMethod: + description: Specifies the algorithm used to route traffic. Possible + values are Geographic, Weighted, Performance, Priority, Subnet + and MultiValue. + type: string + trafficViewEnabled: + description: Indicates whether Traffic View is enabled for the + Traffic Manager profile. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dnsConfig: + description: This block specifies the DNS configuration of the + Profile. One dns_config block as defined below. + properties: + relativeName: + description: The relative domain name, this is combined with + the domain name used by Traffic Manager to form the FQDN + which is exported as documented below. Changing this forces + a new resource to be created. + type: string + ttl: + description: The TTL value of the Profile used by Local DNS + resolvers and clients. + type: number + type: object + maxReturn: + description: The amount of endpoints to return for DNS queries + to this Profile. Possible values range from 1 to 8. + type: number + monitorConfig: + description: This block specifies the Endpoint monitoring configuration + for the Profile. One monitor_config block as defined below. + properties: + customHeader: + description: One or more custom_header blocks as defined below. + items: + properties: + name: + description: The name of the Traffic Manager profile. + Changing this forces a new resource to be created. + type: string + value: + description: The value of custom header. Applicable + for HTTP and HTTPS protocol. + type: string + type: object + type: array + expectedStatusCodeRanges: + description: A list of status code ranges in the format of + 100-101. + items: + type: string + type: array + intervalInSeconds: + description: 'The interval used to check the endpoint health + from a Traffic Manager probing agent. You can specify two + values here: 30 (normal probing) and 10 (fast probing). + The default value is 30.' + type: number + path: + description: The path used by the monitoring checks. Required + when protocol is set to HTTP or HTTPS - cannot be set when + protocol is set to TCP. + type: string + port: + description: The port number used by the monitoring checks. + type: number + protocol: + description: The protocol used by the monitoring checks, supported + values are HTTP, HTTPS and TCP. + type: string + timeoutInSeconds: + description: The amount of time the Traffic Manager probing + agent should wait before considering that check a failure + when a health check probe is sent to the endpoint. If interval_in_seconds + is set to 30, then timeout_in_seconds can be between 5 and + 10. The default value is 10. If interval_in_seconds is set + to 10, then valid values are between 5 and 9 and timeout_in_seconds + is required. + type: number + toleratedNumberOfFailures: + description: The number of failures a Traffic Manager probing + agent tolerates before marking that endpoint as unhealthy. + Valid values are between 0 and 9. The default value is 3 + type: number + type: object + profileStatus: + description: The status of the profile, can be set to either Enabled + or Disabled. Defaults to Enabled. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trafficRoutingMethod: + description: Specifies the algorithm used to route traffic. Possible + values are Geographic, Weighted, Performance, Priority, Subnet + and MultiValue. + type: string + trafficViewEnabled: + description: Indicates whether Traffic View is enabled for the + Traffic Manager profile. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dnsConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dnsConfig) + || (has(self.initProvider) && has(self.initProvider.dnsConfig))' + - message: spec.forProvider.monitorConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.monitorConfig) + || (has(self.initProvider) && has(self.initProvider.monitorConfig))' + - message: spec.forProvider.trafficRoutingMethod is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.trafficRoutingMethod) + || (has(self.initProvider) && has(self.initProvider.trafficRoutingMethod))' + status: + description: TrafficManagerProfileStatus defines the observed state of + TrafficManagerProfile. + properties: + atProvider: + properties: + dnsConfig: + description: This block specifies the DNS configuration of the + Profile. One dns_config block as defined below. + properties: + relativeName: + description: The relative domain name, this is combined with + the domain name used by Traffic Manager to form the FQDN + which is exported as documented below. Changing this forces + a new resource to be created. + type: string + ttl: + description: The TTL value of the Profile used by Local DNS + resolvers and clients. + type: number + type: object + fqdn: + description: The FQDN of the created Profile. + type: string + id: + description: The ID of the Traffic Manager Profile. + type: string + maxReturn: + description: The amount of endpoints to return for DNS queries + to this Profile. Possible values range from 1 to 8. + type: number + monitorConfig: + description: This block specifies the Endpoint monitoring configuration + for the Profile. One monitor_config block as defined below. + properties: + customHeader: + description: One or more custom_header blocks as defined below. + items: + properties: + name: + description: The name of the Traffic Manager profile. + Changing this forces a new resource to be created. + type: string + value: + description: The value of custom header. Applicable + for HTTP and HTTPS protocol. + type: string + type: object + type: array + expectedStatusCodeRanges: + description: A list of status code ranges in the format of + 100-101. + items: + type: string + type: array + intervalInSeconds: + description: 'The interval used to check the endpoint health + from a Traffic Manager probing agent. You can specify two + values here: 30 (normal probing) and 10 (fast probing). + The default value is 30.' + type: number + path: + description: The path used by the monitoring checks. Required + when protocol is set to HTTP or HTTPS - cannot be set when + protocol is set to TCP. + type: string + port: + description: The port number used by the monitoring checks. + type: number + protocol: + description: The protocol used by the monitoring checks, supported + values are HTTP, HTTPS and TCP. + type: string + timeoutInSeconds: + description: The amount of time the Traffic Manager probing + agent should wait before considering that check a failure + when a health check probe is sent to the endpoint. If interval_in_seconds + is set to 30, then timeout_in_seconds can be between 5 and + 10. The default value is 10. If interval_in_seconds is set + to 10, then valid values are between 5 and 9 and timeout_in_seconds + is required. + type: number + toleratedNumberOfFailures: + description: The number of failures a Traffic Manager probing + agent tolerates before marking that endpoint as unhealthy. + Valid values are between 0 and 9. The default value is 3 + type: number + type: object + profileStatus: + description: The status of the profile, can be set to either Enabled + or Disabled. Defaults to Enabled. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Traffic Manager profile. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trafficRoutingMethod: + description: Specifies the algorithm used to route traffic. Possible + values are Geographic, Weighted, Performance, Priority, Subnet + and MultiValue. + type: string + trafficViewEnabled: + description: Indicates whether Traffic View is enabled for the + Traffic Manager profile. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_virtualhubconnections.yaml b/package/crds/network.azure.upbound.io_virtualhubconnections.yaml index 2f47c58a0..e86c704f2 100644 --- a/package/crds/network.azure.upbound.io_virtualhubconnections.yaml +++ b/package/crds/network.azure.upbound.io_virtualhubconnections.yaml @@ -941,3 +941,911 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualHubConnection is the Schema for the VirtualHubConnections + API. Manages a Connection for a Virtual Hub. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualHubConnectionSpec defines the desired state of VirtualHubConnection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + internetSecurityEnabled: + description: Should Internet Security be enabled to secure internet + traffic? Defaults to false. + type: boolean + remoteVirtualNetworkId: + description: The ID of the Virtual Network which the Virtual Hub + should be connected to. Changing this forces a new resource + to be created. + type: string + remoteVirtualNetworkIdRef: + description: Reference to a VirtualNetwork in network to populate + remoteVirtualNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + remoteVirtualNetworkIdSelector: + description: Selector for a VirtualNetwork in network to populate + remoteVirtualNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routing: + description: A routing block as defined below. + properties: + associatedRouteTableId: + description: The ID of the route table associated with this + Virtual Hub connection. + type: string + associatedRouteTableIdRef: + description: Reference to a VirtualHubRouteTable in network + to populate associatedRouteTableId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + associatedRouteTableIdSelector: + description: Selector for a VirtualHubRouteTable in network + to populate associatedRouteTableId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + inboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for inbound learned routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for outbound advertised routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: The list of labels to assign to this route + table. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of Route Table IDs to associated with + this Virtual Hub Connection. + items: + type: string + type: array + type: object + staticVnetLocalRouteOverrideCriteria: + description: The static VNet local route override criteria + that is used to determine whether NVA in spoke VNet is bypassed + for traffic with destination in spoke VNet. Possible values + are Contains and Equal. Defaults to Contains. Changing this + forces a new resource to be created. + type: string + staticVnetRoute: + description: A static_vnet_route block as defined below. + items: + properties: + addressPrefixes: + description: A list of CIDR Ranges which should be used + as Address Prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name which should be used for this + Static Route. + type: string + nextHopIpAddress: + description: The IP Address which should be used for + the Next Hop. + type: string + type: object + type: array + type: object + virtualHubId: + description: The ID of the Virtual Hub within which this connection + should be created. Changing this forces a new resource to be + created. + type: string + virtualHubIdRef: + description: Reference to a VirtualHub in network to populate + virtualHubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualHubIdSelector: + description: Selector for a VirtualHub in network to populate + virtualHubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + internetSecurityEnabled: + description: Should Internet Security be enabled to secure internet + traffic? Defaults to false. + type: boolean + remoteVirtualNetworkId: + description: The ID of the Virtual Network which the Virtual Hub + should be connected to. Changing this forces a new resource + to be created. + type: string + remoteVirtualNetworkIdRef: + description: Reference to a VirtualNetwork in network to populate + remoteVirtualNetworkId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + remoteVirtualNetworkIdSelector: + description: Selector for a VirtualNetwork in network to populate + remoteVirtualNetworkId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routing: + description: A routing block as defined below. + properties: + associatedRouteTableId: + description: The ID of the route table associated with this + Virtual Hub connection. + type: string + associatedRouteTableIdRef: + description: Reference to a VirtualHubRouteTable in network + to populate associatedRouteTableId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + associatedRouteTableIdSelector: + description: Selector for a VirtualHubRouteTable in network + to populate associatedRouteTableId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + inboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for inbound learned routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for outbound advertised routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: The list of labels to assign to this route + table. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of Route Table IDs to associated with + this Virtual Hub Connection. + items: + type: string + type: array + type: object + staticVnetLocalRouteOverrideCriteria: + description: The static VNet local route override criteria + that is used to determine whether NVA in spoke VNet is bypassed + for traffic with destination in spoke VNet. Possible values + are Contains and Equal. Defaults to Contains. Changing this + forces a new resource to be created. + type: string + staticVnetRoute: + description: A static_vnet_route block as defined below. + items: + properties: + addressPrefixes: + description: A list of CIDR Ranges which should be used + as Address Prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name which should be used for this + Static Route. + type: string + nextHopIpAddress: + description: The IP Address which should be used for + the Next Hop. + type: string + type: object + type: array + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: VirtualHubConnectionStatus defines the observed state of + VirtualHubConnection. + properties: + atProvider: + properties: + id: + description: The ID of the Virtual Hub Connection. + type: string + internetSecurityEnabled: + description: Should Internet Security be enabled to secure internet + traffic? Defaults to false. + type: boolean + remoteVirtualNetworkId: + description: The ID of the Virtual Network which the Virtual Hub + should be connected to. Changing this forces a new resource + to be created. + type: string + routing: + description: A routing block as defined below. + properties: + associatedRouteTableId: + description: The ID of the route table associated with this + Virtual Hub connection. + type: string + inboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for inbound learned routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for outbound advertised routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: The list of labels to assign to this route + table. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of Route Table IDs to associated with + this Virtual Hub Connection. + items: + type: string + type: array + type: object + staticVnetLocalRouteOverrideCriteria: + description: The static VNet local route override criteria + that is used to determine whether NVA in spoke VNet is bypassed + for traffic with destination in spoke VNet. Possible values + are Contains and Equal. Defaults to Contains. Changing this + forces a new resource to be created. + type: string + staticVnetRoute: + description: A static_vnet_route block as defined below. + items: + properties: + addressPrefixes: + description: A list of CIDR Ranges which should be used + as Address Prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + name: + description: The name which should be used for this + Static Route. + type: string + nextHopIpAddress: + description: The IP Address which should be used for + the Next Hop. + type: string + type: object + type: array + type: object + virtualHubId: + description: The ID of the Virtual Hub within which this connection + should be created. Changing this forces a new resource to be + created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_virtualnetworkgatewayconnections.yaml b/package/crds/network.azure.upbound.io_virtualnetworkgatewayconnections.yaml index 77ebe79fd..18a8167f4 100644 --- a/package/crds/network.azure.upbound.io_virtualnetworkgatewayconnections.yaml +++ b/package/crds/network.azure.upbound.io_virtualnetworkgatewayconnections.yaml @@ -1408,3 +1408,1381 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualNetworkGatewayConnection is the Schema for the VirtualNetworkGatewayConnections + API. Manages a connection in an existing Virtual Network Gateway. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualNetworkGatewayConnectionSpec defines the desired state + of VirtualNetworkGatewayConnection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authorizationKeySecretRef: + description: The authorization key associated with the Express + Route Circuit. This field is required only if the type is an + ExpressRoute connection. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + connectionMode: + description: Connection mode to use. Possible values are Default, + InitiatorOnly and ResponderOnly. Defaults to Default. Changing + this value will force a resource to be created. + type: string + connectionProtocol: + description: |- + The IKE protocol version to use. Possible values are IKEv1 and IKEv2, values are IKEv1 and IKEv2. Defaults to IKEv2. Changing this forces a new resource to be created. + -> Note: Only valid for IPSec connections on virtual network gateways with SKU VpnGw1, VpnGw2, VpnGw3, VpnGw1AZ, VpnGw2AZ or VpnGw3AZ. + type: string + customBgpAddresses: + description: |- + A custom_bgp_addresses block which is documented below. + The block can only be used on IPSec / activeactive connections, + For details about see the relevant section in the Azure documentation. + properties: + primary: + description: single IP address that is part of the azurerm_virtual_network_gateway + ip_configuration (first one) + type: string + secondary: + description: single IP address that is part of the azurerm_virtual_network_gateway + ip_configuration (second one) + type: string + type: object + dpdTimeoutSeconds: + description: The dead peer detection timeout of this connection + in seconds. Changing this forces a new resource to be created. + type: number + egressNatRuleIds: + description: A list of the egress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + enableBgp: + description: If true, BGP (Border Gateway Protocol) is enabled + for this connection. Defaults to false. + type: boolean + expressRouteCircuitId: + description: The ID of the Express Route Circuit when creating + an ExpressRoute connection (i.e. when type is ExpressRoute). + The Express Route Circuit can be in the same or in a different + subscription. Changing this forces a new resource to be created. + type: string + expressRouteGatewayBypass: + description: If true, data packets will bypass ExpressRoute Gateway + for data forwarding This is only valid for ExpressRoute connections. + type: boolean + ingressNatRuleIds: + description: A list of the ingress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ipsecPolicy: + description: |- + A ipsec_policy block which is documented below. + Only a single policy can be defined for a connection. For details on + custom policies refer to the relevant section in the Azure documentation. + properties: + dhGroup: + description: The DH group used in IKE phase 1 for initial + SA. Valid options are DHGroup1, DHGroup14, DHGroup2, DHGroup2048, + DHGroup24, ECP256, ECP384, or None. + type: string + ikeEncryption: + description: The IKE encryption algorithm. Valid options are + AES128, AES192, AES256, DES, DES3, GCMAES128, or GCMAES256. + type: string + ikeIntegrity: + description: The IKE integrity algorithm. Valid options are + GCMAES128, GCMAES256, MD5, SHA1, SHA256, or SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm. Valid options + are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, + GCMAES256, or None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm. Valid options + are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1, or SHA256. + type: string + pfsGroup: + description: |- + The DH group used in IKE phase 2 for new child SA. + Valid options are ECP256, ECP384, PFS1, PFS14, PFS2, PFS2048, PFS24, PFSMM, + or None. + type: string + saDatasize: + description: The IPSec SA payload size in KB. Must be at least + 1024 KB. Defaults to 102400000 KB. + type: number + saLifetime: + description: The IPSec SA lifetime in seconds. Must be at + least 300 seconds. Defaults to 27000 seconds. + type: number + type: object + localAzureIpAddressEnabled: + description: Use private local Azure IP for the connection. Changing + this forces a new resource to be created. + type: boolean + localNetworkGatewayId: + description: The ID of the local network gateway when creating + Site-to-Site connection (i.e. when type is IPsec). + type: string + localNetworkGatewayIdRef: + description: Reference to a LocalNetworkGateway in network to + populate localNetworkGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + localNetworkGatewayIdSelector: + description: Selector for a LocalNetworkGateway in network to + populate localNetworkGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The location/region where the connection is located. + Changing this forces a new resource to be created. + type: string + peerVirtualNetworkGatewayId: + description: The ID of the peer virtual network gateway when creating + a VNet-to-VNet connection (i.e. when type is Vnet2Vnet). The + peer Virtual Network Gateway can be in the same or in a different + subscription. Changing this forces a new resource to be created. + type: string + peerVirtualNetworkGatewayIdRef: + description: Reference to a VirtualNetworkGateway in network to + populate peerVirtualNetworkGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + peerVirtualNetworkGatewayIdSelector: + description: Selector for a VirtualNetworkGateway in network to + populate peerVirtualNetworkGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group in which to create + the connection Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingWeight: + description: The routing weight. Defaults to 10. + type: number + sharedKeySecretRef: + description: The shared IPSec key. A key could be provided if + a Site-to-Site, VNet-to-VNet or ExpressRoute connection is created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trafficSelectorPolicy: + description: |- + One or more traffic_selector_policy blocks which are documented below. + A traffic_selector_policy allows to specify a traffic selector policy proposal to be used in a virtual network gateway connection. + For details about traffic selectors refer to the relevant section in the Azure documentation. + items: + properties: + localAddressCidrs: + description: List of local CIDRs. + items: + type: string + type: array + remoteAddressCidrs: + description: List of remote CIDRs. + items: + type: string + type: array + type: object + type: array + type: + description: The type of connection. Valid options are IPsec (Site-to-Site), + ExpressRoute (ExpressRoute), and Vnet2Vnet (VNet-to-VNet). Each + connection type requires different mandatory arguments (refer + to the examples above). Changing this forces a new resource + to be created. + type: string + usePolicyBasedTrafficSelectors: + description: If true, policy-based traffic selectors are enabled + for this connection. Enabling policy-based traffic selectors + requires an ipsec_policy block. Defaults to false. + type: boolean + virtualNetworkGatewayId: + description: The ID of the Virtual Network Gateway in which the + connection will be created. Changing this forces a new resource + to be created. + type: string + virtualNetworkGatewayIdRef: + description: Reference to a VirtualNetworkGateway in network to + populate virtualNetworkGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkGatewayIdSelector: + description: Selector for a VirtualNetworkGateway in network to + populate virtualNetworkGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + connectionMode: + description: Connection mode to use. Possible values are Default, + InitiatorOnly and ResponderOnly. Defaults to Default. Changing + this value will force a resource to be created. + type: string + connectionProtocol: + description: |- + The IKE protocol version to use. Possible values are IKEv1 and IKEv2, values are IKEv1 and IKEv2. Defaults to IKEv2. Changing this forces a new resource to be created. + -> Note: Only valid for IPSec connections on virtual network gateways with SKU VpnGw1, VpnGw2, VpnGw3, VpnGw1AZ, VpnGw2AZ or VpnGw3AZ. + type: string + customBgpAddresses: + description: |- + A custom_bgp_addresses block which is documented below. + The block can only be used on IPSec / activeactive connections, + For details about see the relevant section in the Azure documentation. + properties: + primary: + description: single IP address that is part of the azurerm_virtual_network_gateway + ip_configuration (first one) + type: string + secondary: + description: single IP address that is part of the azurerm_virtual_network_gateway + ip_configuration (second one) + type: string + type: object + dpdTimeoutSeconds: + description: The dead peer detection timeout of this connection + in seconds. Changing this forces a new resource to be created. + type: number + egressNatRuleIds: + description: A list of the egress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + enableBgp: + description: If true, BGP (Border Gateway Protocol) is enabled + for this connection. Defaults to false. + type: boolean + expressRouteCircuitId: + description: The ID of the Express Route Circuit when creating + an ExpressRoute connection (i.e. when type is ExpressRoute). + The Express Route Circuit can be in the same or in a different + subscription. Changing this forces a new resource to be created. + type: string + expressRouteGatewayBypass: + description: If true, data packets will bypass ExpressRoute Gateway + for data forwarding This is only valid for ExpressRoute connections. + type: boolean + ingressNatRuleIds: + description: A list of the ingress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ipsecPolicy: + description: |- + A ipsec_policy block which is documented below. + Only a single policy can be defined for a connection. For details on + custom policies refer to the relevant section in the Azure documentation. + properties: + dhGroup: + description: The DH group used in IKE phase 1 for initial + SA. Valid options are DHGroup1, DHGroup14, DHGroup2, DHGroup2048, + DHGroup24, ECP256, ECP384, or None. + type: string + ikeEncryption: + description: The IKE encryption algorithm. Valid options are + AES128, AES192, AES256, DES, DES3, GCMAES128, or GCMAES256. + type: string + ikeIntegrity: + description: The IKE integrity algorithm. Valid options are + GCMAES128, GCMAES256, MD5, SHA1, SHA256, or SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm. Valid options + are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, + GCMAES256, or None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm. Valid options + are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1, or SHA256. + type: string + pfsGroup: + description: |- + The DH group used in IKE phase 2 for new child SA. + Valid options are ECP256, ECP384, PFS1, PFS14, PFS2, PFS2048, PFS24, PFSMM, + or None. + type: string + saDatasize: + description: The IPSec SA payload size in KB. Must be at least + 1024 KB. Defaults to 102400000 KB. + type: number + saLifetime: + description: The IPSec SA lifetime in seconds. Must be at + least 300 seconds. Defaults to 27000 seconds. + type: number + type: object + localAzureIpAddressEnabled: + description: Use private local Azure IP for the connection. Changing + this forces a new resource to be created. + type: boolean + localNetworkGatewayId: + description: The ID of the local network gateway when creating + Site-to-Site connection (i.e. when type is IPsec). + type: string + localNetworkGatewayIdRef: + description: Reference to a LocalNetworkGateway in network to + populate localNetworkGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + localNetworkGatewayIdSelector: + description: Selector for a LocalNetworkGateway in network to + populate localNetworkGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: The location/region where the connection is located. + Changing this forces a new resource to be created. + type: string + peerVirtualNetworkGatewayId: + description: The ID of the peer virtual network gateway when creating + a VNet-to-VNet connection (i.e. when type is Vnet2Vnet). The + peer Virtual Network Gateway can be in the same or in a different + subscription. Changing this forces a new resource to be created. + type: string + peerVirtualNetworkGatewayIdRef: + description: Reference to a VirtualNetworkGateway in network to + populate peerVirtualNetworkGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + peerVirtualNetworkGatewayIdSelector: + description: Selector for a VirtualNetworkGateway in network to + populate peerVirtualNetworkGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingWeight: + description: The routing weight. Defaults to 10. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trafficSelectorPolicy: + description: |- + One or more traffic_selector_policy blocks which are documented below. + A traffic_selector_policy allows to specify a traffic selector policy proposal to be used in a virtual network gateway connection. + For details about traffic selectors refer to the relevant section in the Azure documentation. + items: + properties: + localAddressCidrs: + description: List of local CIDRs. + items: + type: string + type: array + remoteAddressCidrs: + description: List of remote CIDRs. + items: + type: string + type: array + type: object + type: array + type: + description: The type of connection. Valid options are IPsec (Site-to-Site), + ExpressRoute (ExpressRoute), and Vnet2Vnet (VNet-to-VNet). Each + connection type requires different mandatory arguments (refer + to the examples above). Changing this forces a new resource + to be created. + type: string + usePolicyBasedTrafficSelectors: + description: If true, policy-based traffic selectors are enabled + for this connection. Enabling policy-based traffic selectors + requires an ipsec_policy block. Defaults to false. + type: boolean + virtualNetworkGatewayId: + description: The ID of the Virtual Network Gateway in which the + connection will be created. Changing this forces a new resource + to be created. + type: string + virtualNetworkGatewayIdRef: + description: Reference to a VirtualNetworkGateway in network to + populate virtualNetworkGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkGatewayIdSelector: + description: Selector for a VirtualNetworkGateway in network to + populate virtualNetworkGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: VirtualNetworkGatewayConnectionStatus defines the observed + state of VirtualNetworkGatewayConnection. + properties: + atProvider: + properties: + connectionMode: + description: Connection mode to use. Possible values are Default, + InitiatorOnly and ResponderOnly. Defaults to Default. Changing + this value will force a resource to be created. + type: string + connectionProtocol: + description: |- + The IKE protocol version to use. Possible values are IKEv1 and IKEv2, values are IKEv1 and IKEv2. Defaults to IKEv2. Changing this forces a new resource to be created. + -> Note: Only valid for IPSec connections on virtual network gateways with SKU VpnGw1, VpnGw2, VpnGw3, VpnGw1AZ, VpnGw2AZ or VpnGw3AZ. + type: string + customBgpAddresses: + description: |- + A custom_bgp_addresses block which is documented below. + The block can only be used on IPSec / activeactive connections, + For details about see the relevant section in the Azure documentation. + properties: + primary: + description: single IP address that is part of the azurerm_virtual_network_gateway + ip_configuration (first one) + type: string + secondary: + description: single IP address that is part of the azurerm_virtual_network_gateway + ip_configuration (second one) + type: string + type: object + dpdTimeoutSeconds: + description: The dead peer detection timeout of this connection + in seconds. Changing this forces a new resource to be created. + type: number + egressNatRuleIds: + description: A list of the egress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + enableBgp: + description: If true, BGP (Border Gateway Protocol) is enabled + for this connection. Defaults to false. + type: boolean + expressRouteCircuitId: + description: The ID of the Express Route Circuit when creating + an ExpressRoute connection (i.e. when type is ExpressRoute). + The Express Route Circuit can be in the same or in a different + subscription. Changing this forces a new resource to be created. + type: string + expressRouteGatewayBypass: + description: If true, data packets will bypass ExpressRoute Gateway + for data forwarding This is only valid for ExpressRoute connections. + type: boolean + id: + description: The ID of the Virtual Network Gateway Connection. + type: string + ingressNatRuleIds: + description: A list of the ingress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ipsecPolicy: + description: |- + A ipsec_policy block which is documented below. + Only a single policy can be defined for a connection. For details on + custom policies refer to the relevant section in the Azure documentation. + properties: + dhGroup: + description: The DH group used in IKE phase 1 for initial + SA. Valid options are DHGroup1, DHGroup14, DHGroup2, DHGroup2048, + DHGroup24, ECP256, ECP384, or None. + type: string + ikeEncryption: + description: The IKE encryption algorithm. Valid options are + AES128, AES192, AES256, DES, DES3, GCMAES128, or GCMAES256. + type: string + ikeIntegrity: + description: The IKE integrity algorithm. Valid options are + GCMAES128, GCMAES256, MD5, SHA1, SHA256, or SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm. Valid options + are AES128, AES192, AES256, DES, DES3, GCMAES128, GCMAES192, + GCMAES256, or None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm. Valid options + are GCMAES128, GCMAES192, GCMAES256, MD5, SHA1, or SHA256. + type: string + pfsGroup: + description: |- + The DH group used in IKE phase 2 for new child SA. + Valid options are ECP256, ECP384, PFS1, PFS14, PFS2, PFS2048, PFS24, PFSMM, + or None. + type: string + saDatasize: + description: The IPSec SA payload size in KB. Must be at least + 1024 KB. Defaults to 102400000 KB. + type: number + saLifetime: + description: The IPSec SA lifetime in seconds. Must be at + least 300 seconds. Defaults to 27000 seconds. + type: number + type: object + localAzureIpAddressEnabled: + description: Use private local Azure IP for the connection. Changing + this forces a new resource to be created. + type: boolean + localNetworkGatewayId: + description: The ID of the local network gateway when creating + Site-to-Site connection (i.e. when type is IPsec). + type: string + location: + description: The location/region where the connection is located. + Changing this forces a new resource to be created. + type: string + peerVirtualNetworkGatewayId: + description: The ID of the peer virtual network gateway when creating + a VNet-to-VNet connection (i.e. when type is Vnet2Vnet). The + peer Virtual Network Gateway can be in the same or in a different + subscription. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the connection Changing this forces a new resource to be created. + type: string + routingWeight: + description: The routing weight. Defaults to 10. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + trafficSelectorPolicy: + description: |- + One or more traffic_selector_policy blocks which are documented below. + A traffic_selector_policy allows to specify a traffic selector policy proposal to be used in a virtual network gateway connection. + For details about traffic selectors refer to the relevant section in the Azure documentation. + items: + properties: + localAddressCidrs: + description: List of local CIDRs. + items: + type: string + type: array + remoteAddressCidrs: + description: List of remote CIDRs. + items: + type: string + type: array + type: object + type: array + type: + description: The type of connection. Valid options are IPsec (Site-to-Site), + ExpressRoute (ExpressRoute), and Vnet2Vnet (VNet-to-VNet). Each + connection type requires different mandatory arguments (refer + to the examples above). Changing this forces a new resource + to be created. + type: string + usePolicyBasedTrafficSelectors: + description: If true, policy-based traffic selectors are enabled + for this connection. Enabling policy-based traffic selectors + requires an ipsec_policy block. Defaults to false. + type: boolean + virtualNetworkGatewayId: + description: The ID of the Virtual Network Gateway in which the + connection will be created. Changing this forces a new resource + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_virtualnetworkgateways.yaml b/package/crds/network.azure.upbound.io_virtualnetworkgateways.yaml index 6e281848f..fd9723a20 100644 --- a/package/crds/network.azure.upbound.io_virtualnetworkgateways.yaml +++ b/package/crds/network.azure.upbound.io_virtualnetworkgateways.yaml @@ -1885,3 +1885,1834 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualNetworkGateway is the Schema for the VirtualNetworkGateways + API. Manages a virtual network gateway to establish secure, cross-premises + connectivity. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualNetworkGatewaySpec defines the desired state of VirtualNetworkGateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + activeActive: + description: If true, an active-active Virtual Network Gateway + will be created. An active-active gateway requires a HighPerformance + or an UltraPerformance SKU. If false, an active-standby gateway + will be created. Defaults to false. + type: boolean + bgpRouteTranslationForNatEnabled: + description: Is BGP Route Translation for NAT enabled? Defaults + to false. + type: boolean + bgpSettings: + description: A bgp_settings block which is documented below. In + this block the BGP specific settings can be defined. + properties: + asn: + description: The Autonomous System Number (ASN) to use as + part of the BGP. + type: number + peerWeight: + description: The weight added to routes which have been learned + through BGP peering. Valid values can be between 0 and 100. + type: number + peeringAddresses: + description: A list of peering_addresses blocks as defined + below. Only one peering_addresses block can be specified + except when active_active of this Virtual Network Gateway + is true. + items: + properties: + apipaAddresses: + description: A list of Azure custom APIPA addresses + assigned to the BGP peer of the Virtual Network Gateway. + items: + type: string + type: array + ipConfigurationName: + description: The name of the IP configuration of this + Virtual Network Gateway. In case there are multiple + ip_configuration blocks defined, this property is + required to specify. + type: string + type: object + type: array + type: object + customRoute: + description: A custom_route block as defined below. Specifies + a custom routes address space for a virtual network gateway + and a VpnClient. + properties: + addressPrefixes: + description: A list of address blocks reserved for this virtual + network in CIDR notation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + defaultLocalNetworkGatewayId: + description: The ID of the local network gateway through which + outbound Internet traffic from the virtual network in which + the gateway is created will be routed (forced tunnelling). Refer + to the Azure documentation on forced tunnelling. If not specified, + forced tunnelling is disabled. + type: string + dnsForwardingEnabled: + description: Is DNS forwarding enabled? + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Virtual Network Gateway should exist. Changing this forces + a new Virtual Network Gateway to be created. + type: string + enableBgp: + description: If true, BGP (Border Gateway Protocol) will be enabled + for this Virtual Network Gateway. Defaults to false. + type: boolean + generation: + description: The Generation of the Virtual Network gateway. Possible + values include Generation1, Generation2 or None. Changing this + forces a new resource to be created. + type: string + ipConfiguration: + description: |- + One or more (up to 3) ip_configuration blocks documented below. + An active-standby gateway requires exactly one ip_configuration block, + an active-active gateway requires exactly two ip_configuration blocks whereas + an active-active zone redundant gateway with P2S configuration requires exactly three ip_configuration blocks. + items: + properties: + name: + description: A user-defined name of the IP configuration. + Defaults to vnetGatewayConfig. + type: string + privateIpAddressAllocation: + description: Defines how the private IP address of the gateways + virtual interface is assigned. The only valid value is + Dynamic for Virtual Network Gateway (Static is not supported + by the service yet). Defaults to Dynamic. + type: string + publicIpAddressId: + description: The ID of the public IP address to associate + with the Virtual Network Gateway. + type: string + publicIpAddressIdRef: + description: Reference to a PublicIP in network to populate + publicIpAddressId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicIpAddressIdSelector: + description: Selector for a PublicIP in network to populate + publicIpAddressId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: The ID of the gateway subnet of a virtual network + in which the virtual network gateway will be created. + It is mandatory that the associated subnet is named GatewaySubnet. + Therefore, each virtual network can contain at most a + single Virtual Network Gateway. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipSecReplayProtectionEnabled: + description: Is IP Sec Replay Protection enabled? Defaults to + true. + type: boolean + location: + description: The location/region where the Virtual Network Gateway + is located. Changing this forces a new resource to be created. + type: string + policyGroup: + description: One or more policy_group blocks as defined below. + items: + properties: + isDefault: + description: Is this a Default Virtual Network Gateway Policy + Group? Defaults to false. + type: boolean + name: + description: The name of the Virtual Network Gateway Policy + Group. + type: string + policyMember: + description: One or more policy_member blocks as defined + below. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + type: + description: The type of the Virtual Network Gateway. + Valid options are Vpn or ExpressRoute. Changing + the type forces a new resource to be created. + type: string + value: + description: The value of attribute that is used for + this Virtual Network Gateway Policy Group Member. + type: string + type: object + type: array + priority: + description: The priority for the Virtual Network Gateway + Policy Group. Defaults to 0. + type: number + type: object + type: array + privateIpAddressEnabled: + description: Should private IP be enabled on this gateway for + connections? Changing this forces a new resource to be created. + type: boolean + remoteVnetTrafficEnabled: + description: Is remote vnet traffic that is used to configure + this gateway to accept traffic from other Azure Virtual Networks + enabled? Defaults to false. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Virtual Network Gateway. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: Configuration of the size and capacity of the virtual + network gateway. Valid options are Basic, Standard, HighPerformance, + UltraPerformance, ErGw1AZ, ErGw2AZ, ErGw3AZ, VpnGw1, VpnGw2, + VpnGw3, VpnGw4,VpnGw5, VpnGw1AZ, VpnGw2AZ, VpnGw3AZ,VpnGw4AZ + and VpnGw5AZ and depend on the type, vpn_type and generation + arguments. A PolicyBased gateway only supports the Basic SKU. + Further, the UltraPerformance SKU is only supported by an ExpressRoute + gateway. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: + description: The type of the Virtual Network Gateway. Valid options + are Vpn or ExpressRoute. Changing the type forces a new resource + to be created. + type: string + virtualWanTrafficEnabled: + description: Is remote vnet traffic that is used to configure + this gateway to accept traffic from remote Virtual WAN networks + enabled? Defaults to false. + type: boolean + vpnClientConfiguration: + description: A vpn_client_configuration block which is documented + below. In this block the Virtual Network Gateway can be configured + to accept IPSec point-to-site connections. + properties: + aadAudience: + description: |- + The client id of the Azure VPN application. + See Create an Active Directory (AD) tenant for P2S OpenVPN protocol connections for values + type: string + aadIssuer: + description: The STS url for your tenant + type: string + aadTenant: + description: AzureAD Tenant URL + type: string + addressSpace: + description: The address space out of which IP addresses for + vpn clients will be taken. You can provide more than one + address space, e.g. in CIDR notation. + items: + type: string + type: array + ipsecPolicy: + description: An ipsec_policy block as defined below. + properties: + dhGroup: + description: The DH Group, used in IKE Phase 1. Possible + values are DHGroup1, DHGroup2, DHGroup14, DHGroup24, + DHGroup2048, ECP256, ECP384 and None. + type: string + ikeEncryption: + description: The IKE encryption algorithm, used for IKE + Phase 2. Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128 and GCMAES256. + type: string + ikeIntegrity: + description: The IKE encryption integrity algorithm, used + for IKE Phase 2. Possible values are GCMAES128, GCMAES256, + MD5, SHA1, SHA256 and SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm, used for + IKE phase 1. Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm, used for IKE + phase 1. Possible values are GCMAES128, GCMAES192, GCMAES256, + MD5, SHA1 and SHA256. + type: string + pfsGroup: + description: The Pfs Group, used in IKE Phase 2. Possible + values are ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, + PFS2048, PFSMM and None. + type: string + saDataSizeInKilobytes: + description: The IPSec Security Association payload size + in KB for a Site-to-Site VPN tunnel. Possible values + are between 1024 and 2147483647. + type: number + saLifetimeInSeconds: + description: The IPSec Security Association lifetime in + seconds for a Site-to-Site VPN tunnel. Possible values + are between 300 and 172799. + type: number + type: object + radiusServer: + description: One or more radius_server blocks as defined below. + items: + properties: + address: + description: The address of the Radius Server. + type: string + score: + description: The score of the Radius Server determines + the priority of the server. Possible values are between + 1 and 30. + type: number + secretSecretRef: + description: The secret that is used to communicate + with the Radius Server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - secretSecretRef + type: object + type: array + radiusServerAddress: + description: The address of the Radius server. + type: string + radiusServerSecret: + description: The secret used by the Radius server. + type: string + revokedCertificate: + description: One or more revoked_certificate blocks which + are defined below. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + thumbprint: + description: Specifies the public data of the certificate. + type: string + type: object + type: array + rootCertificate: + description: One or more root_certificate blocks which are + defined below. These root certificates are used to sign + the client certificate used by the VPN clients to connect + to the gateway. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + publicCertData: + description: The public certificate of the root certificate + authority. The certificate must be provided in Base-64 + encoded X.509 format (PEM). In particular, this argument + must not include the -----BEGIN CERTIFICATE----- or + -----END CERTIFICATE----- markers, nor any newlines. + type: string + type: object + type: array + virtualNetworkGatewayClientConnection: + description: One or more virtual_network_gateway_client_connection + blocks as defined below. + items: + properties: + addressPrefixes: + description: A list of address prefixes for P2S VPN + Client. + items: + type: string + type: array + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + policyGroupNames: + description: A list of names of Virtual Network Gateway + Policy Groups. + items: + type: string + type: array + type: object + type: array + vpnAuthTypes: + description: |- + List of the vpn authentication types for the virtual network gateway. + The supported values are AAD, Radius and Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + vpnClientProtocols: + description: |- + List of the protocols supported by the vpn client. + The supported values are SSTP, IkeV2 and OpenVPN. + Values SSTP and IkeV2 are incompatible with the use of + aad_tenant, aad_audience and aad_issuer. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + vpnType: + description: The routing type of the Virtual Network Gateway. + Valid options are RouteBased or PolicyBased. Defaults to RouteBased. + Changing this forces a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + activeActive: + description: If true, an active-active Virtual Network Gateway + will be created. An active-active gateway requires a HighPerformance + or an UltraPerformance SKU. If false, an active-standby gateway + will be created. Defaults to false. + type: boolean + bgpRouteTranslationForNatEnabled: + description: Is BGP Route Translation for NAT enabled? Defaults + to false. + type: boolean + bgpSettings: + description: A bgp_settings block which is documented below. In + this block the BGP specific settings can be defined. + properties: + asn: + description: The Autonomous System Number (ASN) to use as + part of the BGP. + type: number + peerWeight: + description: The weight added to routes which have been learned + through BGP peering. Valid values can be between 0 and 100. + type: number + peeringAddresses: + description: A list of peering_addresses blocks as defined + below. Only one peering_addresses block can be specified + except when active_active of this Virtual Network Gateway + is true. + items: + properties: + apipaAddresses: + description: A list of Azure custom APIPA addresses + assigned to the BGP peer of the Virtual Network Gateway. + items: + type: string + type: array + ipConfigurationName: + description: The name of the IP configuration of this + Virtual Network Gateway. In case there are multiple + ip_configuration blocks defined, this property is + required to specify. + type: string + type: object + type: array + type: object + customRoute: + description: A custom_route block as defined below. Specifies + a custom routes address space for a virtual network gateway + and a VpnClient. + properties: + addressPrefixes: + description: A list of address blocks reserved for this virtual + network in CIDR notation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + defaultLocalNetworkGatewayId: + description: The ID of the local network gateway through which + outbound Internet traffic from the virtual network in which + the gateway is created will be routed (forced tunnelling). Refer + to the Azure documentation on forced tunnelling. If not specified, + forced tunnelling is disabled. + type: string + dnsForwardingEnabled: + description: Is DNS forwarding enabled? + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Virtual Network Gateway should exist. Changing this forces + a new Virtual Network Gateway to be created. + type: string + enableBgp: + description: If true, BGP (Border Gateway Protocol) will be enabled + for this Virtual Network Gateway. Defaults to false. + type: boolean + generation: + description: The Generation of the Virtual Network gateway. Possible + values include Generation1, Generation2 or None. Changing this + forces a new resource to be created. + type: string + ipConfiguration: + description: |- + One or more (up to 3) ip_configuration blocks documented below. + An active-standby gateway requires exactly one ip_configuration block, + an active-active gateway requires exactly two ip_configuration blocks whereas + an active-active zone redundant gateway with P2S configuration requires exactly three ip_configuration blocks. + items: + properties: + name: + description: A user-defined name of the IP configuration. + Defaults to vnetGatewayConfig. + type: string + privateIpAddressAllocation: + description: Defines how the private IP address of the gateways + virtual interface is assigned. The only valid value is + Dynamic for Virtual Network Gateway (Static is not supported + by the service yet). Defaults to Dynamic. + type: string + publicIpAddressId: + description: The ID of the public IP address to associate + with the Virtual Network Gateway. + type: string + publicIpAddressIdRef: + description: Reference to a PublicIP in network to populate + publicIpAddressId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publicIpAddressIdSelector: + description: Selector for a PublicIP in network to populate + publicIpAddressId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + subnetId: + description: The ID of the gateway subnet of a virtual network + in which the virtual network gateway will be created. + It is mandatory that the associated subnet is named GatewaySubnet. + Therefore, each virtual network can contain at most a + single Virtual Network Gateway. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipSecReplayProtectionEnabled: + description: Is IP Sec Replay Protection enabled? Defaults to + true. + type: boolean + location: + description: The location/region where the Virtual Network Gateway + is located. Changing this forces a new resource to be created. + type: string + policyGroup: + description: One or more policy_group blocks as defined below. + items: + properties: + isDefault: + description: Is this a Default Virtual Network Gateway Policy + Group? Defaults to false. + type: boolean + name: + description: The name of the Virtual Network Gateway Policy + Group. + type: string + policyMember: + description: One or more policy_member blocks as defined + below. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + type: + description: The type of the Virtual Network Gateway. + Valid options are Vpn or ExpressRoute. Changing + the type forces a new resource to be created. + type: string + value: + description: The value of attribute that is used for + this Virtual Network Gateway Policy Group Member. + type: string + type: object + type: array + priority: + description: The priority for the Virtual Network Gateway + Policy Group. Defaults to 0. + type: number + type: object + type: array + privateIpAddressEnabled: + description: Should private IP be enabled on this gateway for + connections? Changing this forces a new resource to be created. + type: boolean + remoteVnetTrafficEnabled: + description: Is remote vnet traffic that is used to configure + this gateway to accept traffic from other Azure Virtual Networks + enabled? Defaults to false. + type: boolean + sku: + description: Configuration of the size and capacity of the virtual + network gateway. Valid options are Basic, Standard, HighPerformance, + UltraPerformance, ErGw1AZ, ErGw2AZ, ErGw3AZ, VpnGw1, VpnGw2, + VpnGw3, VpnGw4,VpnGw5, VpnGw1AZ, VpnGw2AZ, VpnGw3AZ,VpnGw4AZ + and VpnGw5AZ and depend on the type, vpn_type and generation + arguments. A PolicyBased gateway only supports the Basic SKU. + Further, the UltraPerformance SKU is only supported by an ExpressRoute + gateway. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: + description: The type of the Virtual Network Gateway. Valid options + are Vpn or ExpressRoute. Changing the type forces a new resource + to be created. + type: string + virtualWanTrafficEnabled: + description: Is remote vnet traffic that is used to configure + this gateway to accept traffic from remote Virtual WAN networks + enabled? Defaults to false. + type: boolean + vpnClientConfiguration: + description: A vpn_client_configuration block which is documented + below. In this block the Virtual Network Gateway can be configured + to accept IPSec point-to-site connections. + properties: + aadAudience: + description: |- + The client id of the Azure VPN application. + See Create an Active Directory (AD) tenant for P2S OpenVPN protocol connections for values + type: string + aadIssuer: + description: The STS url for your tenant + type: string + aadTenant: + description: AzureAD Tenant URL + type: string + addressSpace: + description: The address space out of which IP addresses for + vpn clients will be taken. You can provide more than one + address space, e.g. in CIDR notation. + items: + type: string + type: array + ipsecPolicy: + description: An ipsec_policy block as defined below. + properties: + dhGroup: + description: The DH Group, used in IKE Phase 1. Possible + values are DHGroup1, DHGroup2, DHGroup14, DHGroup24, + DHGroup2048, ECP256, ECP384 and None. + type: string + ikeEncryption: + description: The IKE encryption algorithm, used for IKE + Phase 2. Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128 and GCMAES256. + type: string + ikeIntegrity: + description: The IKE encryption integrity algorithm, used + for IKE Phase 2. Possible values are GCMAES128, GCMAES256, + MD5, SHA1, SHA256 and SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm, used for + IKE phase 1. Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm, used for IKE + phase 1. Possible values are GCMAES128, GCMAES192, GCMAES256, + MD5, SHA1 and SHA256. + type: string + pfsGroup: + description: The Pfs Group, used in IKE Phase 2. Possible + values are ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, + PFS2048, PFSMM and None. + type: string + saDataSizeInKilobytes: + description: The IPSec Security Association payload size + in KB for a Site-to-Site VPN tunnel. Possible values + are between 1024 and 2147483647. + type: number + saLifetimeInSeconds: + description: The IPSec Security Association lifetime in + seconds for a Site-to-Site VPN tunnel. Possible values + are between 300 and 172799. + type: number + type: object + radiusServer: + description: One or more radius_server blocks as defined below. + items: + properties: + address: + description: The address of the Radius Server. + type: string + score: + description: The score of the Radius Server determines + the priority of the server. Possible values are between + 1 and 30. + type: number + type: object + type: array + radiusServerAddress: + description: The address of the Radius server. + type: string + radiusServerSecret: + description: The secret used by the Radius server. + type: string + revokedCertificate: + description: One or more revoked_certificate blocks which + are defined below. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + thumbprint: + description: Specifies the public data of the certificate. + type: string + type: object + type: array + rootCertificate: + description: One or more root_certificate blocks which are + defined below. These root certificates are used to sign + the client certificate used by the VPN clients to connect + to the gateway. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + publicCertData: + description: The public certificate of the root certificate + authority. The certificate must be provided in Base-64 + encoded X.509 format (PEM). In particular, this argument + must not include the -----BEGIN CERTIFICATE----- or + -----END CERTIFICATE----- markers, nor any newlines. + type: string + type: object + type: array + virtualNetworkGatewayClientConnection: + description: One or more virtual_network_gateway_client_connection + blocks as defined below. + items: + properties: + addressPrefixes: + description: A list of address prefixes for P2S VPN + Client. + items: + type: string + type: array + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + policyGroupNames: + description: A list of names of Virtual Network Gateway + Policy Groups. + items: + type: string + type: array + type: object + type: array + vpnAuthTypes: + description: |- + List of the vpn authentication types for the virtual network gateway. + The supported values are AAD, Radius and Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + vpnClientProtocols: + description: |- + List of the protocols supported by the vpn client. + The supported values are SSTP, IkeV2 and OpenVPN. + Values SSTP and IkeV2 are incompatible with the use of + aad_tenant, aad_audience and aad_issuer. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + vpnType: + description: The routing type of the Virtual Network Gateway. + Valid options are RouteBased or PolicyBased. Defaults to RouteBased. + Changing this forces a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.ipConfiguration is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.ipConfiguration) + || (has(self.initProvider) && has(self.initProvider.ipConfiguration))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + status: + description: VirtualNetworkGatewayStatus defines the observed state of + VirtualNetworkGateway. + properties: + atProvider: + properties: + activeActive: + description: If true, an active-active Virtual Network Gateway + will be created. An active-active gateway requires a HighPerformance + or an UltraPerformance SKU. If false, an active-standby gateway + will be created. Defaults to false. + type: boolean + bgpRouteTranslationForNatEnabled: + description: Is BGP Route Translation for NAT enabled? Defaults + to false. + type: boolean + bgpSettings: + description: A bgp_settings block which is documented below. In + this block the BGP specific settings can be defined. + properties: + asn: + description: The Autonomous System Number (ASN) to use as + part of the BGP. + type: number + peerWeight: + description: The weight added to routes which have been learned + through BGP peering. Valid values can be between 0 and 100. + type: number + peeringAddresses: + description: A list of peering_addresses blocks as defined + below. Only one peering_addresses block can be specified + except when active_active of this Virtual Network Gateway + is true. + items: + properties: + apipaAddresses: + description: A list of Azure custom APIPA addresses + assigned to the BGP peer of the Virtual Network Gateway. + items: + type: string + type: array + defaultAddresses: + description: A list of peering address assigned to the + BGP peer of the Virtual Network Gateway. + items: + type: string + type: array + ipConfigurationName: + description: The name of the IP configuration of this + Virtual Network Gateway. In case there are multiple + ip_configuration blocks defined, this property is + required to specify. + type: string + tunnelIpAddresses: + description: A list of tunnel IP addresses assigned + to the BGP peer of the Virtual Network Gateway. + items: + type: string + type: array + type: object + type: array + type: object + customRoute: + description: A custom_route block as defined below. Specifies + a custom routes address space for a virtual network gateway + and a VpnClient. + properties: + addressPrefixes: + description: A list of address blocks reserved for this virtual + network in CIDR notation. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + defaultLocalNetworkGatewayId: + description: The ID of the local network gateway through which + outbound Internet traffic from the virtual network in which + the gateway is created will be routed (forced tunnelling). Refer + to the Azure documentation on forced tunnelling. If not specified, + forced tunnelling is disabled. + type: string + dnsForwardingEnabled: + description: Is DNS forwarding enabled? + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Virtual Network Gateway should exist. Changing this forces + a new Virtual Network Gateway to be created. + type: string + enableBgp: + description: If true, BGP (Border Gateway Protocol) will be enabled + for this Virtual Network Gateway. Defaults to false. + type: boolean + generation: + description: The Generation of the Virtual Network gateway. Possible + values include Generation1, Generation2 or None. Changing this + forces a new resource to be created. + type: string + id: + description: The ID of the Virtual Network Gateway. + type: string + ipConfiguration: + description: |- + One or more (up to 3) ip_configuration blocks documented below. + An active-standby gateway requires exactly one ip_configuration block, + an active-active gateway requires exactly two ip_configuration blocks whereas + an active-active zone redundant gateway with P2S configuration requires exactly three ip_configuration blocks. + items: + properties: + name: + description: A user-defined name of the IP configuration. + Defaults to vnetGatewayConfig. + type: string + privateIpAddressAllocation: + description: Defines how the private IP address of the gateways + virtual interface is assigned. The only valid value is + Dynamic for Virtual Network Gateway (Static is not supported + by the service yet). Defaults to Dynamic. + type: string + publicIpAddressId: + description: The ID of the public IP address to associate + with the Virtual Network Gateway. + type: string + subnetId: + description: The ID of the gateway subnet of a virtual network + in which the virtual network gateway will be created. + It is mandatory that the associated subnet is named GatewaySubnet. + Therefore, each virtual network can contain at most a + single Virtual Network Gateway. + type: string + type: object + type: array + ipSecReplayProtectionEnabled: + description: Is IP Sec Replay Protection enabled? Defaults to + true. + type: boolean + location: + description: The location/region where the Virtual Network Gateway + is located. Changing this forces a new resource to be created. + type: string + policyGroup: + description: One or more policy_group blocks as defined below. + items: + properties: + isDefault: + description: Is this a Default Virtual Network Gateway Policy + Group? Defaults to false. + type: boolean + name: + description: The name of the Virtual Network Gateway Policy + Group. + type: string + policyMember: + description: One or more policy_member blocks as defined + below. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + type: + description: The type of the Virtual Network Gateway. + Valid options are Vpn or ExpressRoute. Changing + the type forces a new resource to be created. + type: string + value: + description: The value of attribute that is used for + this Virtual Network Gateway Policy Group Member. + type: string + type: object + type: array + priority: + description: The priority for the Virtual Network Gateway + Policy Group. Defaults to 0. + type: number + type: object + type: array + privateIpAddressEnabled: + description: Should private IP be enabled on this gateway for + connections? Changing this forces a new resource to be created. + type: boolean + remoteVnetTrafficEnabled: + description: Is remote vnet traffic that is used to configure + this gateway to accept traffic from other Azure Virtual Networks + enabled? Defaults to false. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Virtual Network Gateway. Changing this forces a new resource + to be created. + type: string + sku: + description: Configuration of the size and capacity of the virtual + network gateway. Valid options are Basic, Standard, HighPerformance, + UltraPerformance, ErGw1AZ, ErGw2AZ, ErGw3AZ, VpnGw1, VpnGw2, + VpnGw3, VpnGw4,VpnGw5, VpnGw1AZ, VpnGw2AZ, VpnGw3AZ,VpnGw4AZ + and VpnGw5AZ and depend on the type, vpn_type and generation + arguments. A PolicyBased gateway only supports the Basic SKU. + Further, the UltraPerformance SKU is only supported by an ExpressRoute + gateway. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: + description: The type of the Virtual Network Gateway. Valid options + are Vpn or ExpressRoute. Changing the type forces a new resource + to be created. + type: string + virtualWanTrafficEnabled: + description: Is remote vnet traffic that is used to configure + this gateway to accept traffic from remote Virtual WAN networks + enabled? Defaults to false. + type: boolean + vpnClientConfiguration: + description: A vpn_client_configuration block which is documented + below. In this block the Virtual Network Gateway can be configured + to accept IPSec point-to-site connections. + properties: + aadAudience: + description: |- + The client id of the Azure VPN application. + See Create an Active Directory (AD) tenant for P2S OpenVPN protocol connections for values + type: string + aadIssuer: + description: The STS url for your tenant + type: string + aadTenant: + description: AzureAD Tenant URL + type: string + addressSpace: + description: The address space out of which IP addresses for + vpn clients will be taken. You can provide more than one + address space, e.g. in CIDR notation. + items: + type: string + type: array + ipsecPolicy: + description: An ipsec_policy block as defined below. + properties: + dhGroup: + description: The DH Group, used in IKE Phase 1. Possible + values are DHGroup1, DHGroup2, DHGroup14, DHGroup24, + DHGroup2048, ECP256, ECP384 and None. + type: string + ikeEncryption: + description: The IKE encryption algorithm, used for IKE + Phase 2. Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128 and GCMAES256. + type: string + ikeIntegrity: + description: The IKE encryption integrity algorithm, used + for IKE Phase 2. Possible values are GCMAES128, GCMAES256, + MD5, SHA1, SHA256 and SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm, used for + IKE phase 1. Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm, used for IKE + phase 1. Possible values are GCMAES128, GCMAES192, GCMAES256, + MD5, SHA1 and SHA256. + type: string + pfsGroup: + description: The Pfs Group, used in IKE Phase 2. Possible + values are ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, + PFS2048, PFSMM and None. + type: string + saDataSizeInKilobytes: + description: The IPSec Security Association payload size + in KB for a Site-to-Site VPN tunnel. Possible values + are between 1024 and 2147483647. + type: number + saLifetimeInSeconds: + description: The IPSec Security Association lifetime in + seconds for a Site-to-Site VPN tunnel. Possible values + are between 300 and 172799. + type: number + type: object + radiusServer: + description: One or more radius_server blocks as defined below. + items: + properties: + address: + description: The address of the Radius Server. + type: string + score: + description: The score of the Radius Server determines + the priority of the server. Possible values are between + 1 and 30. + type: number + type: object + type: array + radiusServerAddress: + description: The address of the Radius server. + type: string + radiusServerSecret: + description: The secret used by the Radius server. + type: string + revokedCertificate: + description: One or more revoked_certificate blocks which + are defined below. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + thumbprint: + description: Specifies the public data of the certificate. + type: string + type: object + type: array + rootCertificate: + description: One or more root_certificate blocks which are + defined below. These root certificates are used to sign + the client certificate used by the VPN clients to connect + to the gateway. + items: + properties: + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + publicCertData: + description: The public certificate of the root certificate + authority. The certificate must be provided in Base-64 + encoded X.509 format (PEM). In particular, this argument + must not include the -----BEGIN CERTIFICATE----- or + -----END CERTIFICATE----- markers, nor any newlines. + type: string + type: object + type: array + virtualNetworkGatewayClientConnection: + description: One or more virtual_network_gateway_client_connection + blocks as defined below. + items: + properties: + addressPrefixes: + description: A list of address prefixes for P2S VPN + Client. + items: + type: string + type: array + name: + description: The name of the Virtual Network Gateway + Client Connection. + type: string + policyGroupNames: + description: A list of names of Virtual Network Gateway + Policy Groups. + items: + type: string + type: array + type: object + type: array + vpnAuthTypes: + description: |- + List of the vpn authentication types for the virtual network gateway. + The supported values are AAD, Radius and Certificate. + items: + type: string + type: array + x-kubernetes-list-type: set + vpnClientProtocols: + description: |- + List of the protocols supported by the vpn client. + The supported values are SSTP, IkeV2 and OpenVPN. + Values SSTP and IkeV2 are incompatible with the use of + aad_tenant, aad_audience and aad_issuer. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + vpnType: + description: The routing type of the Virtual Network Gateway. + Valid options are RouteBased or PolicyBased. Defaults to RouteBased. + Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_virtualnetworks.yaml b/package/crds/network.azure.upbound.io_virtualnetworks.yaml index 393bbe35b..ae74b0f5e 100644 --- a/package/crds/network.azure.upbound.io_virtualnetworks.yaml +++ b/package/crds/network.azure.upbound.io_virtualnetworks.yaml @@ -618,3 +618,591 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VirtualNetwork is the Schema for the VirtualNetworks API. Manages + a virtual network including any configured subnets. Each subnet can optionally + be configured with a security group to be associated with the subnet. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VirtualNetworkSpec defines the desired state of VirtualNetwork + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addressSpace: + description: The address space that is used the virtual network. + You can supply more than one address space. + items: + type: string + type: array + bgpCommunity: + description: The BGP community attribute in format :. + type: string + ddosProtectionPlan: + description: A ddos_protection_plan block as documented below. + properties: + enable: + description: Enable/disable DDoS Protection Plan on Virtual + Network. + type: boolean + id: + description: The ID of DDoS Protection Plan. + type: string + type: object + dnsServers: + description: List of IP addresses of DNS servers + items: + type: string + type: array + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Virtual Network should exist. Changing this forces a new + Virtual Network to be created. + type: string + encryption: + description: A encryption block as defined below. + properties: + enforcement: + description: Specifies if the encrypted Virtual Network allows + VM that does not support encryption. Possible values are + DropUnencrypted and AllowUnencrypted. + type: string + type: object + flowTimeoutInMinutes: + description: The flow timeout in minutes for the Virtual Network, + which is used to enable connection tracking for intra-VM flows. + Possible values are between 4 and 30 minutes. + type: number + location: + description: The location/region where the virtual network is + created. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the virtual network. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addressSpace: + description: The address space that is used the virtual network. + You can supply more than one address space. + items: + type: string + type: array + bgpCommunity: + description: The BGP community attribute in format :. + type: string + ddosProtectionPlan: + description: A ddos_protection_plan block as documented below. + properties: + enable: + description: Enable/disable DDoS Protection Plan on Virtual + Network. + type: boolean + id: + description: The ID of DDoS Protection Plan. + type: string + type: object + dnsServers: + description: List of IP addresses of DNS servers + items: + type: string + type: array + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Virtual Network should exist. Changing this forces a new + Virtual Network to be created. + type: string + encryption: + description: A encryption block as defined below. + properties: + enforcement: + description: Specifies if the encrypted Virtual Network allows + VM that does not support encryption. Possible values are + DropUnencrypted and AllowUnencrypted. + type: string + type: object + flowTimeoutInMinutes: + description: The flow timeout in minutes for the Virtual Network, + which is used to enable connection tracking for intra-VM flows. + Possible values are between 4 and 30 minutes. + type: number + location: + description: The location/region where the virtual network is + created. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.addressSpace is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.addressSpace) + || (has(self.initProvider) && has(self.initProvider.addressSpace))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: VirtualNetworkStatus defines the observed state of VirtualNetwork. + properties: + atProvider: + properties: + addressSpace: + description: The address space that is used the virtual network. + You can supply more than one address space. + items: + type: string + type: array + bgpCommunity: + description: The BGP community attribute in format :. + type: string + ddosProtectionPlan: + description: A ddos_protection_plan block as documented below. + properties: + enable: + description: Enable/disable DDoS Protection Plan on Virtual + Network. + type: boolean + id: + description: The ID of DDoS Protection Plan. + type: string + type: object + dnsServers: + description: List of IP addresses of DNS servers + items: + type: string + type: array + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Virtual Network should exist. Changing this forces a new + Virtual Network to be created. + type: string + encryption: + description: A encryption block as defined below. + properties: + enforcement: + description: Specifies if the encrypted Virtual Network allows + VM that does not support encryption. Possible values are + DropUnencrypted and AllowUnencrypted. + type: string + type: object + flowTimeoutInMinutes: + description: The flow timeout in minutes for the Virtual Network, + which is used to enable connection tracking for intra-VM flows. + Possible values are between 4 and 30 minutes. + type: number + guid: + description: The GUID of the virtual network. + type: string + id: + description: The virtual NetworkConfiguration ID. + type: string + location: + description: The location/region where the virtual network is + created. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the virtual network. Changing this forces a new resource to + be created. + type: string + subnet: + description: Can be specified multiple times to define multiple + subnets. Each subnet block supports fields documented below. + items: + properties: + addressPrefix: + description: The address prefix to use for the subnet. + type: string + id: + description: The ID of this subnet. + type: string + name: + description: The name of the subnet. + type: string + securityGroup: + description: The Network Security Group to associate with + the subnet. (Referenced by id, ie. azurerm_network_security_group.example.id) + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_vpngatewayconnections.yaml b/package/crds/network.azure.upbound.io_vpngatewayconnections.yaml index a5e93d42b..d2e59125c 100644 --- a/package/crds/network.azure.upbound.io_vpngatewayconnections.yaml +++ b/package/crds/network.azure.upbound.io_vpngatewayconnections.yaml @@ -1150,3 +1150,1120 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPNGatewayConnection is the Schema for the VPNGatewayConnections + API. Manages a VPN Gateway Connection. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPNGatewayConnectionSpec defines the desired state of VPNGatewayConnection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + internetSecurityEnabled: + description: Whether Internet Security is enabled for this VPN + Connection. Defaults to false. + type: boolean + remoteVpnSiteId: + description: The ID of the remote VPN Site, which will connect + to the VPN Gateway. Changing this forces a new VPN Gateway Connection + to be created. + type: string + remoteVpnSiteIdRef: + description: Reference to a VPNSite in network to populate remoteVpnSiteId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + remoteVpnSiteIdSelector: + description: Selector for a VPNSite in network to populate remoteVpnSiteId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routing: + description: A routing block as defined below. If this is not + specified, there will be a default route table created implicitly. + properties: + associatedRouteTable: + description: The ID of the Route Table associated with this + VPN Connection. + type: string + inboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for inbound learned routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for outbound advertised routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: A list of labels to assign to this route + table. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of Route Table IDs to associated with + this VPN Gateway Connection. + items: + type: string + type: array + type: object + type: object + trafficSelectorPolicy: + description: One or more traffic_selector_policy blocks as defined + below. + items: + properties: + localAddressRanges: + description: A list of local address spaces in CIDR format + for this VPN Gateway Connection. + items: + type: string + type: array + x-kubernetes-list-type: set + remoteAddressRanges: + description: A list of remote address spaces in CIDR format + for this VPN Gateway Connection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + vpnGatewayId: + description: The ID of the VPN Gateway that this VPN Gateway Connection + belongs to. Changing this forces a new VPN Gateway Connection + to be created. + type: string + vpnGatewayIdRef: + description: Reference to a VPNGateway in network to populate + vpnGatewayId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + vpnGatewayIdSelector: + description: Selector for a VPNGateway in network to populate + vpnGatewayId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vpnLink: + description: One or more vpn_link blocks as defined below. + items: + properties: + bandwidthMbps: + description: The expected connection bandwidth in MBPS. + Defaults to 10. + type: number + bgpEnabled: + description: Should the BGP be enabled? Defaults to false. + Changing this forces a new VPN Gateway Connection to be + created. + type: boolean + connectionMode: + description: The connection mode of this VPN Link. Possible + values are Default, InitiatorOnly and ResponderOnly. Defaults + to Default. + type: string + customBgpAddress: + description: One or more custom_bgp_address blocks as defined + below. + items: + properties: + ipAddress: + description: The custom bgp ip address which belongs + to the IP Configuration. + type: string + ipConfigurationId: + description: The ID of the IP Configuration which + belongs to the VPN Gateway. + type: string + type: object + type: array + egressNatRuleIds: + description: A list of the egress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ingressNatRuleIds: + description: A list of the ingress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ipsecPolicy: + description: One or more ipsec_policy blocks as defined + above. + items: + properties: + dhGroup: + description: The DH Group used in IKE Phase 1 for + initial SA. Possible values are None, DHGroup1, + DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, + ECP384. + type: string + encryptionAlgorithm: + description: The IPSec encryption algorithm (IKE phase + 1). Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256, None. + type: string + ikeEncryptionAlgorithm: + description: The IKE encryption algorithm (IKE phase + 2). Possible values are DES, DES3, AES128, AES192, + AES256, GCMAES128, GCMAES256. + type: string + ikeIntegrityAlgorithm: + description: The IKE integrity algorithm (IKE phase + 2). Possible values are MD5, SHA1, SHA256, SHA384, + GCMAES128, GCMAES256. + type: string + integrityAlgorithm: + description: The IPSec integrity algorithm (IKE phase + 1). Possible values are MD5, SHA1, SHA256, GCMAES128, + GCMAES192, GCMAES256. + type: string + pfsGroup: + description: The Pfs Group used in IKE Phase 2 for + the new child SA. Possible values are None, PFS1, + PFS2, PFS14, PFS24, PFS2048, PFSMM, ECP256, ECP384. + type: string + saDataSizeKb: + description: The IPSec Security Association (also + called Quick Mode or Phase 2 SA) payload size in + KB for the site to site VPN tunnel. + type: number + saLifetimeSec: + description: The IPSec Security Association (also + called Quick Mode or Phase 2 SA) lifetime in seconds + for the site to site VPN tunnel. + type: number + type: object + type: array + localAzureIpAddressEnabled: + description: Whether to use local Azure IP to initiate connection? + Defaults to false. + type: boolean + name: + description: The name which should be used for this VPN + Link Connection. + type: string + policyBasedTrafficSelectorEnabled: + description: Whether to enable policy-based traffic selectors? + Defaults to false. + type: boolean + protocol: + description: The protocol used for this VPN Link Connection. + Possible values are IKEv1 and IKEv2. Defaults to IKEv2. + type: string + ratelimitEnabled: + description: Should the rate limit be enabled? Defaults + to false. + type: boolean + routeWeight: + description: Routing weight for this VPN Link Connection. + Defaults to 0. + type: number + sharedKey: + description: SharedKey for this VPN Link Connection. + type: string + vpnSiteLinkId: + description: The ID of the connected VPN Site Link. Changing + this forces a new VPN Gateway Connection to be created. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + internetSecurityEnabled: + description: Whether Internet Security is enabled for this VPN + Connection. Defaults to false. + type: boolean + remoteVpnSiteId: + description: The ID of the remote VPN Site, which will connect + to the VPN Gateway. Changing this forces a new VPN Gateway Connection + to be created. + type: string + remoteVpnSiteIdRef: + description: Reference to a VPNSite in network to populate remoteVpnSiteId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + remoteVpnSiteIdSelector: + description: Selector for a VPNSite in network to populate remoteVpnSiteId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routing: + description: A routing block as defined below. If this is not + specified, there will be a default route table created implicitly. + properties: + associatedRouteTable: + description: The ID of the Route Table associated with this + VPN Connection. + type: string + inboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for inbound learned routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for outbound advertised routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: A list of labels to assign to this route + table. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of Route Table IDs to associated with + this VPN Gateway Connection. + items: + type: string + type: array + type: object + type: object + trafficSelectorPolicy: + description: One or more traffic_selector_policy blocks as defined + below. + items: + properties: + localAddressRanges: + description: A list of local address spaces in CIDR format + for this VPN Gateway Connection. + items: + type: string + type: array + x-kubernetes-list-type: set + remoteAddressRanges: + description: A list of remote address spaces in CIDR format + for this VPN Gateway Connection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + vpnLink: + description: One or more vpn_link blocks as defined below. + items: + properties: + bandwidthMbps: + description: The expected connection bandwidth in MBPS. + Defaults to 10. + type: number + bgpEnabled: + description: Should the BGP be enabled? Defaults to false. + Changing this forces a new VPN Gateway Connection to be + created. + type: boolean + connectionMode: + description: The connection mode of this VPN Link. Possible + values are Default, InitiatorOnly and ResponderOnly. Defaults + to Default. + type: string + customBgpAddress: + description: One or more custom_bgp_address blocks as defined + below. + items: + properties: + ipAddress: + description: The custom bgp ip address which belongs + to the IP Configuration. + type: string + ipConfigurationId: + description: The ID of the IP Configuration which + belongs to the VPN Gateway. + type: string + type: object + type: array + egressNatRuleIds: + description: A list of the egress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ingressNatRuleIds: + description: A list of the ingress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ipsecPolicy: + description: One or more ipsec_policy blocks as defined + above. + items: + properties: + dhGroup: + description: The DH Group used in IKE Phase 1 for + initial SA. Possible values are None, DHGroup1, + DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, + ECP384. + type: string + encryptionAlgorithm: + description: The IPSec encryption algorithm (IKE phase + 1). Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256, None. + type: string + ikeEncryptionAlgorithm: + description: The IKE encryption algorithm (IKE phase + 2). Possible values are DES, DES3, AES128, AES192, + AES256, GCMAES128, GCMAES256. + type: string + ikeIntegrityAlgorithm: + description: The IKE integrity algorithm (IKE phase + 2). Possible values are MD5, SHA1, SHA256, SHA384, + GCMAES128, GCMAES256. + type: string + integrityAlgorithm: + description: The IPSec integrity algorithm (IKE phase + 1). Possible values are MD5, SHA1, SHA256, GCMAES128, + GCMAES192, GCMAES256. + type: string + pfsGroup: + description: The Pfs Group used in IKE Phase 2 for + the new child SA. Possible values are None, PFS1, + PFS2, PFS14, PFS24, PFS2048, PFSMM, ECP256, ECP384. + type: string + saDataSizeKb: + description: The IPSec Security Association (also + called Quick Mode or Phase 2 SA) payload size in + KB for the site to site VPN tunnel. + type: number + saLifetimeSec: + description: The IPSec Security Association (also + called Quick Mode or Phase 2 SA) lifetime in seconds + for the site to site VPN tunnel. + type: number + type: object + type: array + localAzureIpAddressEnabled: + description: Whether to use local Azure IP to initiate connection? + Defaults to false. + type: boolean + name: + description: The name which should be used for this VPN + Link Connection. + type: string + policyBasedTrafficSelectorEnabled: + description: Whether to enable policy-based traffic selectors? + Defaults to false. + type: boolean + protocol: + description: The protocol used for this VPN Link Connection. + Possible values are IKEv1 and IKEv2. Defaults to IKEv2. + type: string + ratelimitEnabled: + description: Should the rate limit be enabled? Defaults + to false. + type: boolean + routeWeight: + description: Routing weight for this VPN Link Connection. + Defaults to 0. + type: number + sharedKey: + description: SharedKey for this VPN Link Connection. + type: string + vpnSiteLinkId: + description: The ID of the connected VPN Site Link. Changing + this forces a new VPN Gateway Connection to be created. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.vpnLink is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vpnLink) + || (has(self.initProvider) && has(self.initProvider.vpnLink))' + status: + description: VPNGatewayConnectionStatus defines the observed state of + VPNGatewayConnection. + properties: + atProvider: + properties: + id: + description: The ID of the VPN Gateway Connection. + type: string + internetSecurityEnabled: + description: Whether Internet Security is enabled for this VPN + Connection. Defaults to false. + type: boolean + remoteVpnSiteId: + description: The ID of the remote VPN Site, which will connect + to the VPN Gateway. Changing this forces a new VPN Gateway Connection + to be created. + type: string + routing: + description: A routing block as defined below. If this is not + specified, there will be a default route table created implicitly. + properties: + associatedRouteTable: + description: The ID of the Route Table associated with this + VPN Connection. + type: string + inboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for inbound learned routes. + type: string + outboundRouteMapId: + description: The resource ID of the Route Map associated with + this Routing Configuration for outbound advertised routes. + type: string + propagatedRouteTable: + description: A propagated_route_table block as defined below. + properties: + labels: + description: A list of labels to assign to this route + table. + items: + type: string + type: array + x-kubernetes-list-type: set + routeTableIds: + description: A list of Route Table IDs to associated with + this VPN Gateway Connection. + items: + type: string + type: array + type: object + type: object + trafficSelectorPolicy: + description: One or more traffic_selector_policy blocks as defined + below. + items: + properties: + localAddressRanges: + description: A list of local address spaces in CIDR format + for this VPN Gateway Connection. + items: + type: string + type: array + x-kubernetes-list-type: set + remoteAddressRanges: + description: A list of remote address spaces in CIDR format + for this VPN Gateway Connection. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + vpnGatewayId: + description: The ID of the VPN Gateway that this VPN Gateway Connection + belongs to. Changing this forces a new VPN Gateway Connection + to be created. + type: string + vpnLink: + description: One or more vpn_link blocks as defined below. + items: + properties: + bandwidthMbps: + description: The expected connection bandwidth in MBPS. + Defaults to 10. + type: number + bgpEnabled: + description: Should the BGP be enabled? Defaults to false. + Changing this forces a new VPN Gateway Connection to be + created. + type: boolean + connectionMode: + description: The connection mode of this VPN Link. Possible + values are Default, InitiatorOnly and ResponderOnly. Defaults + to Default. + type: string + customBgpAddress: + description: One or more custom_bgp_address blocks as defined + below. + items: + properties: + ipAddress: + description: The custom bgp ip address which belongs + to the IP Configuration. + type: string + ipConfigurationId: + description: The ID of the IP Configuration which + belongs to the VPN Gateway. + type: string + type: object + type: array + egressNatRuleIds: + description: A list of the egress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ingressNatRuleIds: + description: A list of the ingress NAT Rule Ids. + items: + type: string + type: array + x-kubernetes-list-type: set + ipsecPolicy: + description: One or more ipsec_policy blocks as defined + above. + items: + properties: + dhGroup: + description: The DH Group used in IKE Phase 1 for + initial SA. Possible values are None, DHGroup1, + DHGroup2, DHGroup14, DHGroup24, DHGroup2048, ECP256, + ECP384. + type: string + encryptionAlgorithm: + description: The IPSec encryption algorithm (IKE phase + 1). Possible values are AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256, None. + type: string + ikeEncryptionAlgorithm: + description: The IKE encryption algorithm (IKE phase + 2). Possible values are DES, DES3, AES128, AES192, + AES256, GCMAES128, GCMAES256. + type: string + ikeIntegrityAlgorithm: + description: The IKE integrity algorithm (IKE phase + 2). Possible values are MD5, SHA1, SHA256, SHA384, + GCMAES128, GCMAES256. + type: string + integrityAlgorithm: + description: The IPSec integrity algorithm (IKE phase + 1). Possible values are MD5, SHA1, SHA256, GCMAES128, + GCMAES192, GCMAES256. + type: string + pfsGroup: + description: The Pfs Group used in IKE Phase 2 for + the new child SA. Possible values are None, PFS1, + PFS2, PFS14, PFS24, PFS2048, PFSMM, ECP256, ECP384. + type: string + saDataSizeKb: + description: The IPSec Security Association (also + called Quick Mode or Phase 2 SA) payload size in + KB for the site to site VPN tunnel. + type: number + saLifetimeSec: + description: The IPSec Security Association (also + called Quick Mode or Phase 2 SA) lifetime in seconds + for the site to site VPN tunnel. + type: number + type: object + type: array + localAzureIpAddressEnabled: + description: Whether to use local Azure IP to initiate connection? + Defaults to false. + type: boolean + name: + description: The name which should be used for this VPN + Link Connection. + type: string + policyBasedTrafficSelectorEnabled: + description: Whether to enable policy-based traffic selectors? + Defaults to false. + type: boolean + protocol: + description: The protocol used for this VPN Link Connection. + Possible values are IKEv1 and IKEv2. Defaults to IKEv2. + type: string + ratelimitEnabled: + description: Should the rate limit be enabled? Defaults + to false. + type: boolean + routeWeight: + description: Routing weight for this VPN Link Connection. + Defaults to 0. + type: number + sharedKey: + description: SharedKey for this VPN Link Connection. + type: string + vpnSiteLinkId: + description: The ID of the connected VPN Site Link. Changing + this forces a new VPN Gateway Connection to be created. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_vpngateways.yaml b/package/crds/network.azure.upbound.io_vpngateways.yaml index 27ed53eed..9f33d8f69 100644 --- a/package/crds/network.azure.upbound.io_vpngateways.yaml +++ b/package/crds/network.azure.upbound.io_vpngateways.yaml @@ -833,3 +833,795 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPNGateway is the Schema for the VPNGateways API. Manages a VPN + Gateway within a Virtual Hub. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPNGatewaySpec defines the desired state of VPNGateway + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + bgpRouteTranslationForNatEnabled: + description: Is BGP route translation for NAT on this VPN Gateway + enabled? Defaults to false. + type: boolean + bgpSettings: + description: A bgp_settings block as defined below. + properties: + asn: + description: The ASN of the BGP Speaker. Changing this forces + a new resource to be created. + type: number + instance0BgpPeeringAddress: + description: An instance_bgp_peering_address block as defined + below. + properties: + customIps: + description: A list of custom BGP peering addresses to + assign to this instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + instance1BgpPeeringAddress: + description: An instance_bgp_peering_address block as defined + below. + properties: + customIps: + description: A list of custom BGP peering addresses to + assign to this instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + peerWeight: + description: The weight added to Routes learned from this + BGP Speaker. Changing this forces a new resource to be created. + type: number + type: object + location: + description: The Azure location where this VPN Gateway should + be created. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The Name of the Resource Group in which this VPN + Gateway should be created. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routingPreference: + description: Azure routing preference lets you to choose how your + traffic routes between Azure and the internet. You can choose + to route traffic either via the Microsoft network (default value, + Microsoft Network), or via the ISP network (public internet, + set to Internet). More context of the configuration can be found + in the Microsoft Docs to create a VPN Gateway. Changing this + forces a new resource to be created. + type: string + scaleUnit: + description: The Scale Unit for this VPN Gateway. Defaults to + 1. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the VPN Gateway. + type: object + x-kubernetes-map-type: granular + virtualHubId: + description: The ID of the Virtual Hub within which this VPN Gateway + should be created. Changing this forces a new resource to be + created. + type: string + virtualHubIdRef: + description: Reference to a VirtualHub in network to populate + virtualHubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualHubIdSelector: + description: Selector for a VirtualHub in network to populate + virtualHubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + bgpRouteTranslationForNatEnabled: + description: Is BGP route translation for NAT on this VPN Gateway + enabled? Defaults to false. + type: boolean + bgpSettings: + description: A bgp_settings block as defined below. + properties: + asn: + description: The ASN of the BGP Speaker. Changing this forces + a new resource to be created. + type: number + instance0BgpPeeringAddress: + description: An instance_bgp_peering_address block as defined + below. + properties: + customIps: + description: A list of custom BGP peering addresses to + assign to this instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + instance1BgpPeeringAddress: + description: An instance_bgp_peering_address block as defined + below. + properties: + customIps: + description: A list of custom BGP peering addresses to + assign to this instance. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + peerWeight: + description: The weight added to Routes learned from this + BGP Speaker. Changing this forces a new resource to be created. + type: number + type: object + location: + description: The Azure location where this VPN Gateway should + be created. Changing this forces a new resource to be created. + type: string + routingPreference: + description: Azure routing preference lets you to choose how your + traffic routes between Azure and the internet. You can choose + to route traffic either via the Microsoft network (default value, + Microsoft Network), or via the ISP network (public internet, + set to Internet). More context of the configuration can be found + in the Microsoft Docs to create a VPN Gateway. Changing this + forces a new resource to be created. + type: string + scaleUnit: + description: The Scale Unit for this VPN Gateway. Defaults to + 1. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the VPN Gateway. + type: object + x-kubernetes-map-type: granular + virtualHubId: + description: The ID of the Virtual Hub within which this VPN Gateway + should be created. Changing this forces a new resource to be + created. + type: string + virtualHubIdRef: + description: Reference to a VirtualHub in network to populate + virtualHubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualHubIdSelector: + description: Selector for a VirtualHub in network to populate + virtualHubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: VPNGatewayStatus defines the observed state of VPNGateway. + properties: + atProvider: + properties: + bgpRouteTranslationForNatEnabled: + description: Is BGP route translation for NAT on this VPN Gateway + enabled? Defaults to false. + type: boolean + bgpSettings: + description: A bgp_settings block as defined below. + properties: + asn: + description: The ASN of the BGP Speaker. Changing this forces + a new resource to be created. + type: number + bgpPeeringAddress: + description: The Address which should be used for the BGP + Peering. + type: string + instance0BgpPeeringAddress: + description: An instance_bgp_peering_address block as defined + below. + properties: + customIps: + description: A list of custom BGP peering addresses to + assign to this instance. + items: + type: string + type: array + x-kubernetes-list-type: set + defaultIps: + description: The list of default BGP peering addresses + which belong to the pre-defined VPN Gateway IP configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + ipConfigurationId: + description: The pre-defined id of VPN Gateway IP Configuration. + type: string + tunnelIps: + description: The list of tunnel public IP addresses which + belong to the pre-defined VPN Gateway IP configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + instance1BgpPeeringAddress: + description: An instance_bgp_peering_address block as defined + below. + properties: + customIps: + description: A list of custom BGP peering addresses to + assign to this instance. + items: + type: string + type: array + x-kubernetes-list-type: set + defaultIps: + description: The list of default BGP peering addresses + which belong to the pre-defined VPN Gateway IP configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + ipConfigurationId: + description: The pre-defined id of VPN Gateway IP Configuration. + type: string + tunnelIps: + description: The list of tunnel public IP addresses which + belong to the pre-defined VPN Gateway IP configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + peerWeight: + description: The weight added to Routes learned from this + BGP Speaker. Changing this forces a new resource to be created. + type: number + type: object + id: + description: The ID of the VPN Gateway. + type: string + location: + description: The Azure location where this VPN Gateway should + be created. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The Name of the Resource Group in which this VPN + Gateway should be created. Changing this forces a new resource + to be created. + type: string + routingPreference: + description: Azure routing preference lets you to choose how your + traffic routes between Azure and the internet. You can choose + to route traffic either via the Microsoft network (default value, + Microsoft Network), or via the ISP network (public internet, + set to Internet). More context of the configuration can be found + in the Microsoft Docs to create a VPN Gateway. Changing this + forces a new resource to be created. + type: string + scaleUnit: + description: The Scale Unit for this VPN Gateway. Defaults to + 1. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the VPN Gateway. + type: object + x-kubernetes-map-type: granular + virtualHubId: + description: The ID of the Virtual Hub within which this VPN Gateway + should be created. Changing this forces a new resource to be + created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_vpnserverconfigurations.yaml b/package/crds/network.azure.upbound.io_vpnserverconfigurations.yaml index 7b86700d1..f567cf459 100644 --- a/package/crds/network.azure.upbound.io_vpnserverconfigurations.yaml +++ b/package/crds/network.azure.upbound.io_vpnserverconfigurations.yaml @@ -918,3 +918,885 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPNServerConfiguration is the Schema for the VPNServerConfigurations + API. Manages a VPN Server Configuration. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPNServerConfigurationSpec defines the desired state of VPNServerConfiguration + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + azureActiveDirectoryAuthentication: + description: A azure_active_directory_authentication block as + defined below. + items: + properties: + audience: + description: The Audience which should be used for authentication. + type: string + issuer: + description: The Issuer which should be used for authentication. + type: string + tenant: + description: The Tenant which should be used for authentication. + type: string + type: object + type: array + clientRevokedCertificate: + description: One or more client_revoked_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + thumbprint: + description: The Thumbprint of the Certificate. + type: string + type: object + type: array + clientRootCertificate: + description: One or more client_root_certificate blocks as defined + below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + publicCertData: + description: The Public Key Data associated with the Certificate. + type: string + type: object + type: array + ipsecPolicy: + description: A ipsec_policy block as defined below. + properties: + dhGroup: + description: The DH Group, used in IKE Phase 1. Possible values + include DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, + ECP256, ECP384 and None. + type: string + ikeEncryption: + description: The IKE encryption algorithm, used for IKE Phase + 2. Possible values include AES128, AES192, AES256, DES, + DES3, GCMAES128 and GCMAES256. + type: string + ikeIntegrity: + description: The IKE encryption integrity algorithm, used + for IKE Phase 2. Possible values include GCMAES128, GCMAES256, + MD5, SHA1, SHA256 and SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm, used for IKE + phase 1. Possible values include AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm, used for IKE phase + 1. Possible values include GCMAES128, GCMAES192, GCMAES256, + MD5, SHA1 and SHA256. + type: string + pfsGroup: + description: The Pfs Group, used in IKE Phase 2. Possible + values include ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, + PFS2048, PFSMM and None. + type: string + saDataSizeKilobytes: + description: The IPSec Security Association payload size in + KB for a Site-to-Site VPN tunnel. + type: number + saLifetimeSeconds: + description: The IPSec Security Association lifetime in seconds + for a Site-to-Site VPN tunnel. + type: number + type: object + location: + description: The Azure location where this VPN Server Configuration + should be created. Changing this forces a new resource to be + created. + type: string + radius: + description: A radius block as defined below. + properties: + clientRootCertificate: + description: One or more client_root_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + thumbprint: + description: The Thumbprint of the Certificate. + type: string + type: object + type: array + server: + description: One or more server blocks as defined below. + items: + properties: + address: + description: The Address of the Radius Server. + type: string + score: + description: The Score of the Radius Server determines + the priority of the server. Ranges from 1 to 30. + type: number + secretSecretRef: + description: The Secret used to communicate with the + Radius Server. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - secretSecretRef + type: object + type: array + serverRootCertificate: + description: One or more server_root_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + publicCertData: + description: The Public Key Data associated with the + Certificate. + type: string + type: object + type: array + type: object + resourceGroupName: + description: The Name of the Resource Group in which this VPN + Server Configuration should be created. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + vpnAuthenticationTypes: + description: A list of Authentication Types applicable for this + VPN Server Configuration. Possible values are AAD (Azure Active + Directory), Certificate and Radius. + items: + type: string + type: array + vpnProtocols: + description: A list of VPN Protocols to use for this Server Configuration. + Possible values are IkeV2 and OpenVPN. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + azureActiveDirectoryAuthentication: + description: A azure_active_directory_authentication block as + defined below. + items: + properties: + audience: + description: The Audience which should be used for authentication. + type: string + issuer: + description: The Issuer which should be used for authentication. + type: string + tenant: + description: The Tenant which should be used for authentication. + type: string + type: object + type: array + clientRevokedCertificate: + description: One or more client_revoked_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + thumbprint: + description: The Thumbprint of the Certificate. + type: string + type: object + type: array + clientRootCertificate: + description: One or more client_root_certificate blocks as defined + below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + publicCertData: + description: The Public Key Data associated with the Certificate. + type: string + type: object + type: array + ipsecPolicy: + description: A ipsec_policy block as defined below. + properties: + dhGroup: + description: The DH Group, used in IKE Phase 1. Possible values + include DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, + ECP256, ECP384 and None. + type: string + ikeEncryption: + description: The IKE encryption algorithm, used for IKE Phase + 2. Possible values include AES128, AES192, AES256, DES, + DES3, GCMAES128 and GCMAES256. + type: string + ikeIntegrity: + description: The IKE encryption integrity algorithm, used + for IKE Phase 2. Possible values include GCMAES128, GCMAES256, + MD5, SHA1, SHA256 and SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm, used for IKE + phase 1. Possible values include AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm, used for IKE phase + 1. Possible values include GCMAES128, GCMAES192, GCMAES256, + MD5, SHA1 and SHA256. + type: string + pfsGroup: + description: The Pfs Group, used in IKE Phase 2. Possible + values include ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, + PFS2048, PFSMM and None. + type: string + saDataSizeKilobytes: + description: The IPSec Security Association payload size in + KB for a Site-to-Site VPN tunnel. + type: number + saLifetimeSeconds: + description: The IPSec Security Association lifetime in seconds + for a Site-to-Site VPN tunnel. + type: number + type: object + location: + description: The Azure location where this VPN Server Configuration + should be created. Changing this forces a new resource to be + created. + type: string + radius: + description: A radius block as defined below. + properties: + clientRootCertificate: + description: One or more client_root_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + thumbprint: + description: The Thumbprint of the Certificate. + type: string + type: object + type: array + server: + description: One or more server blocks as defined below. + items: + properties: + address: + description: The Address of the Radius Server. + type: string + score: + description: The Score of the Radius Server determines + the priority of the server. Ranges from 1 to 30. + type: number + type: object + type: array + serverRootCertificate: + description: One or more server_root_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + publicCertData: + description: The Public Key Data associated with the + Certificate. + type: string + type: object + type: array + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + vpnAuthenticationTypes: + description: A list of Authentication Types applicable for this + VPN Server Configuration. Possible values are AAD (Azure Active + Directory), Certificate and Radius. + items: + type: string + type: array + vpnProtocols: + description: A list of VPN Protocols to use for this Server Configuration. + Possible values are IkeV2 and OpenVPN. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.vpnAuthenticationTypes is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vpnAuthenticationTypes) + || (has(self.initProvider) && has(self.initProvider.vpnAuthenticationTypes))' + status: + description: VPNServerConfigurationStatus defines the observed state of + VPNServerConfiguration. + properties: + atProvider: + properties: + azureActiveDirectoryAuthentication: + description: A azure_active_directory_authentication block as + defined below. + items: + properties: + audience: + description: The Audience which should be used for authentication. + type: string + issuer: + description: The Issuer which should be used for authentication. + type: string + tenant: + description: The Tenant which should be used for authentication. + type: string + type: object + type: array + clientRevokedCertificate: + description: One or more client_revoked_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + thumbprint: + description: The Thumbprint of the Certificate. + type: string + type: object + type: array + clientRootCertificate: + description: One or more client_root_certificate blocks as defined + below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + publicCertData: + description: The Public Key Data associated with the Certificate. + type: string + type: object + type: array + id: + description: The ID of the VPN Server Configuration. + type: string + ipsecPolicy: + description: A ipsec_policy block as defined below. + properties: + dhGroup: + description: The DH Group, used in IKE Phase 1. Possible values + include DHGroup1, DHGroup2, DHGroup14, DHGroup24, DHGroup2048, + ECP256, ECP384 and None. + type: string + ikeEncryption: + description: The IKE encryption algorithm, used for IKE Phase + 2. Possible values include AES128, AES192, AES256, DES, + DES3, GCMAES128 and GCMAES256. + type: string + ikeIntegrity: + description: The IKE encryption integrity algorithm, used + for IKE Phase 2. Possible values include GCMAES128, GCMAES256, + MD5, SHA1, SHA256 and SHA384. + type: string + ipsecEncryption: + description: The IPSec encryption algorithm, used for IKE + phase 1. Possible values include AES128, AES192, AES256, + DES, DES3, GCMAES128, GCMAES192, GCMAES256 and None. + type: string + ipsecIntegrity: + description: The IPSec integrity algorithm, used for IKE phase + 1. Possible values include GCMAES128, GCMAES192, GCMAES256, + MD5, SHA1 and SHA256. + type: string + pfsGroup: + description: The Pfs Group, used in IKE Phase 2. Possible + values include ECP256, ECP384, PFS1, PFS2, PFS14, PFS24, + PFS2048, PFSMM and None. + type: string + saDataSizeKilobytes: + description: The IPSec Security Association payload size in + KB for a Site-to-Site VPN tunnel. + type: number + saLifetimeSeconds: + description: The IPSec Security Association lifetime in seconds + for a Site-to-Site VPN tunnel. + type: number + type: object + location: + description: The Azure location where this VPN Server Configuration + should be created. Changing this forces a new resource to be + created. + type: string + radius: + description: A radius block as defined below. + properties: + clientRootCertificate: + description: One or more client_root_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + thumbprint: + description: The Thumbprint of the Certificate. + type: string + type: object + type: array + server: + description: One or more server blocks as defined below. + items: + properties: + address: + description: The Address of the Radius Server. + type: string + score: + description: The Score of the Radius Server determines + the priority of the server. Ranges from 1 to 30. + type: number + type: object + type: array + serverRootCertificate: + description: One or more server_root_certificate blocks as + defined below. + items: + properties: + name: + description: A name used to uniquely identify this certificate. + type: string + publicCertData: + description: The Public Key Data associated with the + Certificate. + type: string + type: object + type: array + type: object + resourceGroupName: + description: The Name of the Resource Group in which this VPN + Server Configuration should be created. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + vpnAuthenticationTypes: + description: A list of Authentication Types applicable for this + VPN Server Configuration. Possible values are AAD (Azure Active + Directory), Certificate and Radius. + items: + type: string + type: array + vpnProtocols: + description: A list of VPN Protocols to use for this Server Configuration. + Possible values are IkeV2 and OpenVPN. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_vpnsites.yaml b/package/crds/network.azure.upbound.io_vpnsites.yaml index ea5a84fe5..7ace3210d 100644 --- a/package/crds/network.azure.upbound.io_vpnsites.yaml +++ b/package/crds/network.azure.upbound.io_vpnsites.yaml @@ -862,3 +862,826 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VPNSite is the Schema for the VPNSites API. Manages a VPN Site. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VPNSiteSpec defines the desired state of VPNSite + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addressCidrs: + description: Specifies a list of IP address CIDRs that are located + on your on-premises site. Traffic destined for these address + spaces is routed to your local site. + items: + type: string + type: array + x-kubernetes-list-type: set + deviceModel: + description: The model of the VPN device. + type: string + deviceVendor: + description: The name of the VPN device vendor. + type: string + link: + description: One or more link blocks as defined below. + items: + properties: + bgp: + description: A bgp block as defined above. + properties: + asn: + description: The BGP speaker's ASN. + type: number + peeringAddress: + description: The BGP peering IP address. + type: string + type: object + fqdn: + description: The FQDN of this VPN Site Link. + type: string + ipAddress: + description: The IP address of this VPN Site Link. + type: string + name: + description: The name which should be used for this VPN + Site Link. + type: string + providerName: + description: 'The name of the physical link at the VPN Site. + Example: ATT, Verizon.' + type: string + speedInMbps: + description: The speed of the VPN device at the branch location + in unit of mbps. Defaults to 0. + type: number + type: object + type: array + location: + description: The Azure Region where the VPN Site should exist. + Changing this forces a new VPN Site to be created. + type: string + o365Policy: + description: An o365_policy block as defined below. + properties: + trafficCategory: + description: A traffic_category block as defined above. + properties: + allowEndpointEnabled: + description: Is allow endpoint enabled? The Allow endpoint + is required for connectivity to specific O365 services + and features, but are not as sensitive to network performance + and latency as other endpoint types. Defaults to false. + type: boolean + defaultEndpointEnabled: + description: Is default endpoint enabled? The Default + endpoint represents O365 services and dependencies that + do not require any optimization, and can be treated + by customer networks as normal Internet bound traffic. + Defaults to false. + type: boolean + optimizeEndpointEnabled: + description: Is optimize endpoint enabled? The Optimize + endpoint is required for connectivity to every O365 + service and represents the O365 scenario that is the + most sensitive to network performance, latency, and + availability. Defaults to false. + type: boolean + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the VPN Site + should exist. Changing this forces a new VPN Site to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + VPN Site. + type: object + x-kubernetes-map-type: granular + virtualWanId: + description: The ID of the Virtual Wan where this VPN site resides + in. Changing this forces a new VPN Site to be created. + type: string + virtualWanIdRef: + description: Reference to a VirtualWAN in network to populate + virtualWanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualWanIdSelector: + description: Selector for a VirtualWAN in network to populate + virtualWanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addressCidrs: + description: Specifies a list of IP address CIDRs that are located + on your on-premises site. Traffic destined for these address + spaces is routed to your local site. + items: + type: string + type: array + x-kubernetes-list-type: set + deviceModel: + description: The model of the VPN device. + type: string + deviceVendor: + description: The name of the VPN device vendor. + type: string + link: + description: One or more link blocks as defined below. + items: + properties: + bgp: + description: A bgp block as defined above. + properties: + asn: + description: The BGP speaker's ASN. + type: number + peeringAddress: + description: The BGP peering IP address. + type: string + type: object + fqdn: + description: The FQDN of this VPN Site Link. + type: string + ipAddress: + description: The IP address of this VPN Site Link. + type: string + name: + description: The name which should be used for this VPN + Site Link. + type: string + providerName: + description: 'The name of the physical link at the VPN Site. + Example: ATT, Verizon.' + type: string + speedInMbps: + description: The speed of the VPN device at the branch location + in unit of mbps. Defaults to 0. + type: number + type: object + type: array + location: + description: The Azure Region where the VPN Site should exist. + Changing this forces a new VPN Site to be created. + type: string + o365Policy: + description: An o365_policy block as defined below. + properties: + trafficCategory: + description: A traffic_category block as defined above. + properties: + allowEndpointEnabled: + description: Is allow endpoint enabled? The Allow endpoint + is required for connectivity to specific O365 services + and features, but are not as sensitive to network performance + and latency as other endpoint types. Defaults to false. + type: boolean + defaultEndpointEnabled: + description: Is default endpoint enabled? The Default + endpoint represents O365 services and dependencies that + do not require any optimization, and can be treated + by customer networks as normal Internet bound traffic. + Defaults to false. + type: boolean + optimizeEndpointEnabled: + description: Is optimize endpoint enabled? The Optimize + endpoint is required for connectivity to every O365 + service and represents the O365 scenario that is the + most sensitive to network performance, latency, and + availability. Defaults to false. + type: boolean + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + VPN Site. + type: object + x-kubernetes-map-type: granular + virtualWanId: + description: The ID of the Virtual Wan where this VPN site resides + in. Changing this forces a new VPN Site to be created. + type: string + virtualWanIdRef: + description: Reference to a VirtualWAN in network to populate + virtualWanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualWanIdSelector: + description: Selector for a VirtualWAN in network to populate + virtualWanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: VPNSiteStatus defines the observed state of VPNSite. + properties: + atProvider: + properties: + addressCidrs: + description: Specifies a list of IP address CIDRs that are located + on your on-premises site. Traffic destined for these address + spaces is routed to your local site. + items: + type: string + type: array + x-kubernetes-list-type: set + deviceModel: + description: The model of the VPN device. + type: string + deviceVendor: + description: The name of the VPN device vendor. + type: string + id: + description: The ID of the VPN Site. + type: string + link: + description: One or more link blocks as defined below. + items: + properties: + bgp: + description: A bgp block as defined above. + properties: + asn: + description: The BGP speaker's ASN. + type: number + peeringAddress: + description: The BGP peering IP address. + type: string + type: object + fqdn: + description: The FQDN of this VPN Site Link. + type: string + id: + description: The ID of the VPN Site Link. + type: string + ipAddress: + description: The IP address of this VPN Site Link. + type: string + name: + description: The name which should be used for this VPN + Site Link. + type: string + providerName: + description: 'The name of the physical link at the VPN Site. + Example: ATT, Verizon.' + type: string + speedInMbps: + description: The speed of the VPN device at the branch location + in unit of mbps. Defaults to 0. + type: number + type: object + type: array + location: + description: The Azure Region where the VPN Site should exist. + Changing this forces a new VPN Site to be created. + type: string + o365Policy: + description: An o365_policy block as defined below. + properties: + trafficCategory: + description: A traffic_category block as defined above. + properties: + allowEndpointEnabled: + description: Is allow endpoint enabled? The Allow endpoint + is required for connectivity to specific O365 services + and features, but are not as sensitive to network performance + and latency as other endpoint types. Defaults to false. + type: boolean + defaultEndpointEnabled: + description: Is default endpoint enabled? The Default + endpoint represents O365 services and dependencies that + do not require any optimization, and can be treated + by customer networks as normal Internet bound traffic. + Defaults to false. + type: boolean + optimizeEndpointEnabled: + description: Is optimize endpoint enabled? The Optimize + endpoint is required for connectivity to every O365 + service and represents the O365 scenario that is the + most sensitive to network performance, latency, and + availability. Defaults to false. + type: boolean + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the VPN Site + should exist. Changing this forces a new VPN Site to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + VPN Site. + type: object + x-kubernetes-map-type: granular + virtualWanId: + description: The ID of the Virtual Wan where this VPN site resides + in. Changing this forces a new VPN Site to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_watcherflowlogs.yaml b/package/crds/network.azure.upbound.io_watcherflowlogs.yaml index b7f95704c..02fc72280 100644 --- a/package/crds/network.azure.upbound.io_watcherflowlogs.yaml +++ b/package/crds/network.azure.upbound.io_watcherflowlogs.yaml @@ -1292,3 +1292,1265 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WatcherFlowLog is the Schema for the WatcherFlowLogs API. Manages + a Network Watcher Flow Log. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WatcherFlowLogSpec defines the desired state of WatcherFlowLog + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + enabled: + description: Should Network Flow Logging be Enabled? + type: boolean + location: + description: The location where the Network Watcher Flow Log resides. + Changing this forces a new resource to be created. Defaults + to the location of the Network Watcher. + type: string + networkSecurityGroupId: + description: The ID of the Network Security Group for which to + enable flow logs for. Changing this forces a new resource to + be created. + type: string + networkSecurityGroupIdRef: + description: Reference to a SecurityGroup in network to populate + networkSecurityGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkSecurityGroupIdSelector: + description: Selector for a SecurityGroup in network to populate + networkSecurityGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + networkWatcherName: + description: The name of the Network Watcher. Changing this forces + a new resource to be created. + type: string + networkWatcherNameRef: + description: Reference to a Watcher in network to populate networkWatcherName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkWatcherNameSelector: + description: Selector for a Watcher in network to populate networkWatcherName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group in which the Network + Watcher was deployed. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionPolicy: + description: A retention_policy block as documented below. + properties: + days: + description: The number of days to retain flow log records. + type: number + enabled: + description: Boolean flag to enable/disable retention. + type: boolean + type: object + storageAccountId: + description: The ID of the Storage Account where flow logs are + stored. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Watcher Flow Log. + type: object + x-kubernetes-map-type: granular + trafficAnalytics: + description: A traffic_analytics block as documented below. + properties: + enabled: + description: Boolean flag to enable/disable traffic analytics. + type: boolean + intervalInMinutes: + description: How frequently service should do flow analytics + in minutes. Defaults to 60. + type: number + workspaceId: + description: The resource GUID of the attached workspace. + type: string + workspaceIdRef: + description: Reference to a Workspace in operationalinsights + to populate workspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceIdSelector: + description: Selector for a Workspace in operationalinsights + to populate workspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + workspaceRegion: + description: The location of the attached workspace. + type: string + workspaceResourceId: + description: The resource ID of the attached workspace. + type: string + workspaceResourceIdRef: + description: Reference to a Workspace in operationalinsights + to populate workspaceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceResourceIdSelector: + description: Selector for a Workspace in operationalinsights + to populate workspaceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + version: + description: The version (revision) of the flow log. Possible + values are 1 and 2. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + enabled: + description: Should Network Flow Logging be Enabled? + type: boolean + location: + description: The location where the Network Watcher Flow Log resides. + Changing this forces a new resource to be created. Defaults + to the location of the Network Watcher. + type: string + networkSecurityGroupId: + description: The ID of the Network Security Group for which to + enable flow logs for. Changing this forces a new resource to + be created. + type: string + networkSecurityGroupIdRef: + description: Reference to a SecurityGroup in network to populate + networkSecurityGroupId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + networkSecurityGroupIdSelector: + description: Selector for a SecurityGroup in network to populate + networkSecurityGroupId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionPolicy: + description: A retention_policy block as documented below. + properties: + days: + description: The number of days to retain flow log records. + type: number + enabled: + description: Boolean flag to enable/disable retention. + type: boolean + type: object + storageAccountId: + description: The ID of the Storage Account where flow logs are + stored. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Watcher Flow Log. + type: object + x-kubernetes-map-type: granular + trafficAnalytics: + description: A traffic_analytics block as documented below. + properties: + enabled: + description: Boolean flag to enable/disable traffic analytics. + type: boolean + intervalInMinutes: + description: How frequently service should do flow analytics + in minutes. Defaults to 60. + type: number + workspaceId: + description: The resource GUID of the attached workspace. + type: string + workspaceIdRef: + description: Reference to a Workspace in operationalinsights + to populate workspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceIdSelector: + description: Selector for a Workspace in operationalinsights + to populate workspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + workspaceRegion: + description: The location of the attached workspace. + type: string + workspaceResourceId: + description: The resource ID of the attached workspace. + type: string + workspaceResourceIdRef: + description: Reference to a Workspace in operationalinsights + to populate workspaceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceResourceIdSelector: + description: Selector for a Workspace in operationalinsights + to populate workspaceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + version: + description: The version (revision) of the flow log. Possible + values are 1 and 2. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.enabled is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.enabled) + || (has(self.initProvider) && has(self.initProvider.enabled))' + - message: spec.forProvider.retentionPolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.retentionPolicy) + || (has(self.initProvider) && has(self.initProvider.retentionPolicy))' + status: + description: WatcherFlowLogStatus defines the observed state of WatcherFlowLog. + properties: + atProvider: + properties: + enabled: + description: Should Network Flow Logging be Enabled? + type: boolean + id: + description: The ID of the Network Watcher. + type: string + location: + description: The location where the Network Watcher Flow Log resides. + Changing this forces a new resource to be created. Defaults + to the location of the Network Watcher. + type: string + networkSecurityGroupId: + description: The ID of the Network Security Group for which to + enable flow logs for. Changing this forces a new resource to + be created. + type: string + networkWatcherName: + description: The name of the Network Watcher. Changing this forces + a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which the Network + Watcher was deployed. Changing this forces a new resource to + be created. + type: string + retentionPolicy: + description: A retention_policy block as documented below. + properties: + days: + description: The number of days to retain flow log records. + type: number + enabled: + description: Boolean flag to enable/disable retention. + type: boolean + type: object + storageAccountId: + description: The ID of the Storage Account where flow logs are + stored. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Network Watcher Flow Log. + type: object + x-kubernetes-map-type: granular + trafficAnalytics: + description: A traffic_analytics block as documented below. + properties: + enabled: + description: Boolean flag to enable/disable traffic analytics. + type: boolean + intervalInMinutes: + description: How frequently service should do flow analytics + in minutes. Defaults to 60. + type: number + workspaceId: + description: The resource GUID of the attached workspace. + type: string + workspaceRegion: + description: The location of the attached workspace. + type: string + workspaceResourceId: + description: The resource ID of the attached workspace. + type: string + type: object + version: + description: The version (revision) of the flow log. Possible + values are 1 and 2. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/network.azure.upbound.io_webapplicationfirewallpolicies.yaml b/package/crds/network.azure.upbound.io_webapplicationfirewallpolicies.yaml index 808a31940..a2603b000 100644 --- a/package/crds/network.azure.upbound.io_webapplicationfirewallpolicies.yaml +++ b/package/crds/network.azure.upbound.io_webapplicationfirewallpolicies.yaml @@ -1387,3 +1387,1318 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WebApplicationFirewallPolicy is the Schema for the WebApplicationFirewallPolicys + API. Manages a Azure Web Application Firewall Policy instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WebApplicationFirewallPolicySpec defines the desired state + of WebApplicationFirewallPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + customRules: + description: One or more custom_rules blocks as defined below. + items: + properties: + action: + description: Type of action. Possible values are Allow, + Block and Log. + type: string + enabled: + description: Describes if the policy is in enabled state + or disabled state. Defaults to true. + type: boolean + groupRateLimitBy: + description: Specifies what grouping the rate limit will + count requests by. Possible values are GeoLocation, ClientAddr + and None. + type: string + matchConditions: + description: One or more match_conditions blocks as defined + below. + items: + properties: + matchValues: + description: A list of match values. This is Required + when the operator is not Any. + items: + type: string + type: array + matchVariables: + description: One or more match_variables blocks as + defined below. + items: + properties: + selector: + description: Specifies which elements in the + collection this rule applies to. + type: string + variableName: + description: The name of the Match Variable. + Possible values are RemoteAddr, RequestMethod, + QueryString, PostArgs, RequestUri, RequestHeaders, + RequestBody and RequestCookies. + type: string + type: object + type: array + negationCondition: + description: Describes if this is negate condition + or not + type: boolean + operator: + description: Describes operator to be matched. Possible + values are Any, IPMatch, GeoMatch, Equal, Contains, + LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, + BeginsWith, EndsWith and Regex. + type: string + transforms: + description: A list of transformations to do before + the match is attempted. Possible values are HtmlEntityDecode, + Lowercase, RemoveNulls, Trim, UrlDecode and UrlEncode. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + name: + description: Gets name of the resource that is unique within + a policy. This name can be used to access the resource. + type: string + priority: + description: Describes priority of the rule. Rules with + a lower value will be evaluated before rules with a higher + value. + type: number + rateLimitDuration: + description: Specifies the duration at which the rate limit + policy will be applied. Should be used with RateLimitRule + rule type. Possible values are FiveMins and OneMin. + type: string + rateLimitThreshold: + description: Specifies the threshold value for the rate + limit policy. Must be greater than or equal to 1 if provided. + type: number + ruleType: + description: Describes the type of rule. Possible values + are MatchRule, RateLimitRule and Invalid. + type: string + type: object + type: array + location: + description: Resource location. Changing this forces a new resource + to be created. + type: string + managedRules: + description: A managed_rules blocks as defined below. + properties: + exclusion: + description: One or more exclusion block defined below. + items: + properties: + excludedRuleSet: + description: One or more excluded_rule_set block defined + below. + properties: + ruleGroup: + description: One or more rule_group block defined + below. + items: + properties: + excludedRules: + description: One or more Rule IDs for exclusion. + items: + type: string + type: array + ruleGroupName: + description: The name of the Rule Group. Possible + values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, + REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, + REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, + REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, + REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, + REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, + REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, + PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, + XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, + MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI + and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + type: string + type: object + type: array + type: + description: 'The rule set type. Possible values: + Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet + and OWASP. Defaults to OWASP.' + type: string + version: + description: 'The rule set version. Possible values: + 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2.' + type: string + type: object + matchVariable: + description: Specifies the variable to be scrubbed from + the logs. Possible values are RequestHeaderNames, + RequestCookieNames, RequestArgNames, RequestPostArgNames, + RequestJSONArgNames and RequestIPAddress. + type: string + selector: + description: Specifies which elements in the collection + this rule applies to. + type: string + selectorMatchOperator: + description: Specifies the operating on the selector. + Possible values are Equals and EqualsAny. Defaults + to Equals. + type: string + type: object + type: array + managedRuleSet: + description: One or more managed_rule_set block defined below. + items: + properties: + ruleGroupOverride: + description: One or more rule_group_override block defined + below. + items: + properties: + disabledRules: + items: + type: string + type: array + rule: + description: One or more rule block defined below. + items: + properties: + action: + description: Describes the override action + to be applied when rule matches. Possible + values are Allow, AnomalyScoring, Block + and Log. + type: string + enabled: + description: Whether this rule is enabled. + Defaults to true. + type: boolean + id: + description: Identifier for the managed + rule. + type: string + type: object + type: array + ruleGroupName: + description: The name of the Rule Group. Possible + values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, + REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, + REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, + REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, + REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, + REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, + REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, + LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, + JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, + MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + type: string + type: object + type: array + type: + description: 'The rule set type. Possible values: Microsoft_BotManagerRuleSet, + Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP.' + type: string + version: + description: 'The rule set version. Possible values: + 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2.' + type: string + type: object + type: array + type: object + policySettings: + description: A policy_settings block as defined below. + properties: + enabled: + description: Describes if the policy is in enabled state or + disabled state. Defaults to true. + type: boolean + fileUploadLimitInMb: + description: The File Upload Limit in MB. Accepted values + are in the range 1 to 4000. Defaults to 100. + type: number + logScrubbing: + description: One log_scrubbing block as defined below. + properties: + enabled: + description: Whether this rule is enabled. Defaults to + true. + type: boolean + rule: + description: One or more rule block defined below. + items: + properties: + enabled: + description: Whether this rule is enabled. Defaults + to true. + type: boolean + matchVariable: + description: Specifies the variable to be scrubbed + from the logs. Possible values are RequestHeaderNames, + RequestCookieNames, RequestArgNames, RequestPostArgNames, + RequestJSONArgNames and RequestIPAddress. + type: string + selector: + description: |- + Specifies which elements in the collection this rule applies to. + When matchVariable is a collection, operator used to specify which elements in the collection this rule applies to. + type: string + selectorMatchOperator: + description: Specifies the operating on the selector. + Possible values are Equals and EqualsAny. Defaults + to Equals. + type: string + type: object + type: array + type: object + maxRequestBodySizeInKb: + description: The Maximum Request Body Size in KB. Accepted + values are in the range 8 to 2000. Defaults to 128. + type: number + mode: + description: Describes if it is in detection mode or prevention + mode at the policy level. Valid values are Detection and + Prevention. Defaults to Prevention. + type: string + requestBodyCheck: + description: Is Request Body Inspection enabled? Defaults + to true. + type: boolean + requestBodyInspectLimitInKb: + description: Specifies the maximum request body inspection + limit in KB for the Web Application Firewall. Defaults to + 128. + type: number + type: object + resourceGroupName: + description: The name of the resource group. Changing this forces + a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Web Application + Firewall Policy. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + customRules: + description: One or more custom_rules blocks as defined below. + items: + properties: + action: + description: Type of action. Possible values are Allow, + Block and Log. + type: string + enabled: + description: Describes if the policy is in enabled state + or disabled state. Defaults to true. + type: boolean + groupRateLimitBy: + description: Specifies what grouping the rate limit will + count requests by. Possible values are GeoLocation, ClientAddr + and None. + type: string + matchConditions: + description: One or more match_conditions blocks as defined + below. + items: + properties: + matchValues: + description: A list of match values. This is Required + when the operator is not Any. + items: + type: string + type: array + matchVariables: + description: One or more match_variables blocks as + defined below. + items: + properties: + selector: + description: Specifies which elements in the + collection this rule applies to. + type: string + variableName: + description: The name of the Match Variable. + Possible values are RemoteAddr, RequestMethod, + QueryString, PostArgs, RequestUri, RequestHeaders, + RequestBody and RequestCookies. + type: string + type: object + type: array + negationCondition: + description: Describes if this is negate condition + or not + type: boolean + operator: + description: Describes operator to be matched. Possible + values are Any, IPMatch, GeoMatch, Equal, Contains, + LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, + BeginsWith, EndsWith and Regex. + type: string + transforms: + description: A list of transformations to do before + the match is attempted. Possible values are HtmlEntityDecode, + Lowercase, RemoveNulls, Trim, UrlDecode and UrlEncode. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + name: + description: Gets name of the resource that is unique within + a policy. This name can be used to access the resource. + type: string + priority: + description: Describes priority of the rule. Rules with + a lower value will be evaluated before rules with a higher + value. + type: number + rateLimitDuration: + description: Specifies the duration at which the rate limit + policy will be applied. Should be used with RateLimitRule + rule type. Possible values are FiveMins and OneMin. + type: string + rateLimitThreshold: + description: Specifies the threshold value for the rate + limit policy. Must be greater than or equal to 1 if provided. + type: number + ruleType: + description: Describes the type of rule. Possible values + are MatchRule, RateLimitRule and Invalid. + type: string + type: object + type: array + location: + description: Resource location. Changing this forces a new resource + to be created. + type: string + managedRules: + description: A managed_rules blocks as defined below. + properties: + exclusion: + description: One or more exclusion block defined below. + items: + properties: + excludedRuleSet: + description: One or more excluded_rule_set block defined + below. + properties: + ruleGroup: + description: One or more rule_group block defined + below. + items: + properties: + excludedRules: + description: One or more Rule IDs for exclusion. + items: + type: string + type: array + ruleGroupName: + description: The name of the Rule Group. Possible + values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, + REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, + REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, + REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, + REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, + REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, + REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, + PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, + XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, + MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI + and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + type: string + type: object + type: array + type: + description: 'The rule set type. Possible values: + Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet + and OWASP. Defaults to OWASP.' + type: string + version: + description: 'The rule set version. Possible values: + 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2.' + type: string + type: object + matchVariable: + description: Specifies the variable to be scrubbed from + the logs. Possible values are RequestHeaderNames, + RequestCookieNames, RequestArgNames, RequestPostArgNames, + RequestJSONArgNames and RequestIPAddress. + type: string + selector: + description: Specifies which elements in the collection + this rule applies to. + type: string + selectorMatchOperator: + description: Specifies the operating on the selector. + Possible values are Equals and EqualsAny. Defaults + to Equals. + type: string + type: object + type: array + managedRuleSet: + description: One or more managed_rule_set block defined below. + items: + properties: + ruleGroupOverride: + description: One or more rule_group_override block defined + below. + items: + properties: + disabledRules: + items: + type: string + type: array + rule: + description: One or more rule block defined below. + items: + properties: + action: + description: Describes the override action + to be applied when rule matches. Possible + values are Allow, AnomalyScoring, Block + and Log. + type: string + enabled: + description: Whether this rule is enabled. + Defaults to true. + type: boolean + id: + description: Identifier for the managed + rule. + type: string + type: object + type: array + ruleGroupName: + description: The name of the Rule Group. Possible + values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, + REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, + REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, + REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, + REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, + REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, + REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, + LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, + JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, + MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + type: string + type: object + type: array + type: + description: 'The rule set type. Possible values: Microsoft_BotManagerRuleSet, + Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP.' + type: string + version: + description: 'The rule set version. Possible values: + 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2.' + type: string + type: object + type: array + type: object + policySettings: + description: A policy_settings block as defined below. + properties: + enabled: + description: Describes if the policy is in enabled state or + disabled state. Defaults to true. + type: boolean + fileUploadLimitInMb: + description: The File Upload Limit in MB. Accepted values + are in the range 1 to 4000. Defaults to 100. + type: number + logScrubbing: + description: One log_scrubbing block as defined below. + properties: + enabled: + description: Whether this rule is enabled. Defaults to + true. + type: boolean + rule: + description: One or more rule block defined below. + items: + properties: + enabled: + description: Whether this rule is enabled. Defaults + to true. + type: boolean + matchVariable: + description: Specifies the variable to be scrubbed + from the logs. Possible values are RequestHeaderNames, + RequestCookieNames, RequestArgNames, RequestPostArgNames, + RequestJSONArgNames and RequestIPAddress. + type: string + selector: + description: |- + Specifies which elements in the collection this rule applies to. + When matchVariable is a collection, operator used to specify which elements in the collection this rule applies to. + type: string + selectorMatchOperator: + description: Specifies the operating on the selector. + Possible values are Equals and EqualsAny. Defaults + to Equals. + type: string + type: object + type: array + type: object + maxRequestBodySizeInKb: + description: The Maximum Request Body Size in KB. Accepted + values are in the range 8 to 2000. Defaults to 128. + type: number + mode: + description: Describes if it is in detection mode or prevention + mode at the policy level. Valid values are Detection and + Prevention. Defaults to Prevention. + type: string + requestBodyCheck: + description: Is Request Body Inspection enabled? Defaults + to true. + type: boolean + requestBodyInspectLimitInKb: + description: Specifies the maximum request body inspection + limit in KB for the Web Application Firewall. Defaults to + 128. + type: number + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Web Application + Firewall Policy. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.managedRules is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.managedRules) + || (has(self.initProvider) && has(self.initProvider.managedRules))' + status: + description: WebApplicationFirewallPolicyStatus defines the observed state + of WebApplicationFirewallPolicy. + properties: + atProvider: + properties: + customRules: + description: One or more custom_rules blocks as defined below. + items: + properties: + action: + description: Type of action. Possible values are Allow, + Block and Log. + type: string + enabled: + description: Describes if the policy is in enabled state + or disabled state. Defaults to true. + type: boolean + groupRateLimitBy: + description: Specifies what grouping the rate limit will + count requests by. Possible values are GeoLocation, ClientAddr + and None. + type: string + matchConditions: + description: One or more match_conditions blocks as defined + below. + items: + properties: + matchValues: + description: A list of match values. This is Required + when the operator is not Any. + items: + type: string + type: array + matchVariables: + description: One or more match_variables blocks as + defined below. + items: + properties: + selector: + description: Specifies which elements in the + collection this rule applies to. + type: string + variableName: + description: The name of the Match Variable. + Possible values are RemoteAddr, RequestMethod, + QueryString, PostArgs, RequestUri, RequestHeaders, + RequestBody and RequestCookies. + type: string + type: object + type: array + negationCondition: + description: Describes if this is negate condition + or not + type: boolean + operator: + description: Describes operator to be matched. Possible + values are Any, IPMatch, GeoMatch, Equal, Contains, + LessThan, GreaterThan, LessThanOrEqual, GreaterThanOrEqual, + BeginsWith, EndsWith and Regex. + type: string + transforms: + description: A list of transformations to do before + the match is attempted. Possible values are HtmlEntityDecode, + Lowercase, RemoveNulls, Trim, UrlDecode and UrlEncode. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + name: + description: Gets name of the resource that is unique within + a policy. This name can be used to access the resource. + type: string + priority: + description: Describes priority of the rule. Rules with + a lower value will be evaluated before rules with a higher + value. + type: number + rateLimitDuration: + description: Specifies the duration at which the rate limit + policy will be applied. Should be used with RateLimitRule + rule type. Possible values are FiveMins and OneMin. + type: string + rateLimitThreshold: + description: Specifies the threshold value for the rate + limit policy. Must be greater than or equal to 1 if provided. + type: number + ruleType: + description: Describes the type of rule. Possible values + are MatchRule, RateLimitRule and Invalid. + type: string + type: object + type: array + httpListenerIds: + description: A list of HTTP Listener IDs from an azurerm_application_gateway. + items: + type: string + type: array + id: + description: The ID of the Web Application Firewall Policy. + type: string + location: + description: Resource location. Changing this forces a new resource + to be created. + type: string + managedRules: + description: A managed_rules blocks as defined below. + properties: + exclusion: + description: One or more exclusion block defined below. + items: + properties: + excludedRuleSet: + description: One or more excluded_rule_set block defined + below. + properties: + ruleGroup: + description: One or more rule_group block defined + below. + items: + properties: + excludedRules: + description: One or more Rule IDs for exclusion. + items: + type: string + type: array + ruleGroupName: + description: The name of the Rule Group. Possible + values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, + REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, + REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, + REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, + REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, + REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, + REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, + PROTOCOL-ATTACK, LFI, RFI, RCE, PHP, NODEJS, + XSS, SQLI, FIX, JAVA, MS-ThreatIntel-WebShells, + MS-ThreatIntel-AppSec, MS-ThreatIntel-SQLI + and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + type: string + type: object + type: array + type: + description: 'The rule set type. Possible values: + Microsoft_BotManagerRuleSet, Microsoft_DefaultRuleSet + and OWASP. Defaults to OWASP.' + type: string + version: + description: 'The rule set version. Possible values: + 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2.' + type: string + type: object + matchVariable: + description: Specifies the variable to be scrubbed from + the logs. Possible values are RequestHeaderNames, + RequestCookieNames, RequestArgNames, RequestPostArgNames, + RequestJSONArgNames and RequestIPAddress. + type: string + selector: + description: Specifies which elements in the collection + this rule applies to. + type: string + selectorMatchOperator: + description: Specifies the operating on the selector. + Possible values are Equals and EqualsAny. Defaults + to Equals. + type: string + type: object + type: array + managedRuleSet: + description: One or more managed_rule_set block defined below. + items: + properties: + ruleGroupOverride: + description: One or more rule_group_override block defined + below. + items: + properties: + disabledRules: + items: + type: string + type: array + rule: + description: One or more rule block defined below. + items: + properties: + action: + description: Describes the override action + to be applied when rule matches. Possible + values are Allow, AnomalyScoring, Block + and Log. + type: string + enabled: + description: Whether this rule is enabled. + Defaults to true. + type: boolean + id: + description: Identifier for the managed + rule. + type: string + type: object + type: array + ruleGroupName: + description: The name of the Rule Group. Possible + values are BadBots, crs_20_protocol_violations, + crs_21_protocol_anomalies, crs_23_request_limits, + crs_30_http_policy, crs_35_bad_robots, crs_40_generic_attacks, + crs_41_sql_injection_attacks, crs_41_xss_attacks, + crs_42_tight_security, crs_45_trojans, crs_49_inbound_blocking, + General, GoodBots, KnownBadBots, Known-CVEs, + REQUEST-911-METHOD-ENFORCEMENT, REQUEST-913-SCANNER-DETECTION, + REQUEST-920-PROTOCOL-ENFORCEMENT, REQUEST-921-PROTOCOL-ATTACK, + REQUEST-930-APPLICATION-ATTACK-LFI, REQUEST-931-APPLICATION-ATTACK-RFI, + REQUEST-932-APPLICATION-ATTACK-RCE, REQUEST-933-APPLICATION-ATTACK-PHP, + REQUEST-941-APPLICATION-ATTACK-XSS, REQUEST-942-APPLICATION-ATTACK-SQLI, + REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION, + REQUEST-944-APPLICATION-ATTACK-JAVA, UnknownBots, + METHOD-ENFORCEMENT, PROTOCOL-ENFORCEMENT, PROTOCOL-ATTACK, + LFI, RFI, RCE, PHP, NODEJS, XSS, SQLI, FIX, + JAVA, MS-ThreatIntel-WebShells, MS-ThreatIntel-AppSec, + MS-ThreatIntel-SQLI and MS-ThreatIntel-CVEsMS-ThreatIntel-WebShells`,. + type: string + type: object + type: array + type: + description: 'The rule set type. Possible values: Microsoft_BotManagerRuleSet, + Microsoft_DefaultRuleSet and OWASP. Defaults to OWASP.' + type: string + version: + description: 'The rule set version. Possible values: + 0.1, 1.0, 2.1, 2.2.9, 3.0, 3.1 and 3.2.' + type: string + type: object + type: array + type: object + pathBasedRuleIds: + description: A list of URL Path Map Path Rule IDs from an azurerm_application_gateway. + items: + type: string + type: array + policySettings: + description: A policy_settings block as defined below. + properties: + enabled: + description: Describes if the policy is in enabled state or + disabled state. Defaults to true. + type: boolean + fileUploadLimitInMb: + description: The File Upload Limit in MB. Accepted values + are in the range 1 to 4000. Defaults to 100. + type: number + logScrubbing: + description: One log_scrubbing block as defined below. + properties: + enabled: + description: Whether this rule is enabled. Defaults to + true. + type: boolean + rule: + description: One or more rule block defined below. + items: + properties: + enabled: + description: Whether this rule is enabled. Defaults + to true. + type: boolean + matchVariable: + description: Specifies the variable to be scrubbed + from the logs. Possible values are RequestHeaderNames, + RequestCookieNames, RequestArgNames, RequestPostArgNames, + RequestJSONArgNames and RequestIPAddress. + type: string + selector: + description: |- + Specifies which elements in the collection this rule applies to. + When matchVariable is a collection, operator used to specify which elements in the collection this rule applies to. + type: string + selectorMatchOperator: + description: Specifies the operating on the selector. + Possible values are Equals and EqualsAny. Defaults + to Equals. + type: string + type: object + type: array + type: object + maxRequestBodySizeInKb: + description: The Maximum Request Body Size in KB. Accepted + values are in the range 8 to 2000. Defaults to 128. + type: number + mode: + description: Describes if it is in detection mode or prevention + mode at the policy level. Valid values are Detection and + Prevention. Defaults to Prevention. + type: string + requestBodyCheck: + description: Is Request Body Inspection enabled? Defaults + to true. + type: boolean + requestBodyInspectLimitInKb: + description: Specifies the maximum request body inspection + limit in KB for the Web Application Firewall. Defaults to + 128. + type: number + type: object + resourceGroupName: + description: The name of the resource group. Changing this forces + a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the Web Application + Firewall Policy. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/notificationhubs.azure.upbound.io_notificationhubs.yaml b/package/crds/notificationhubs.azure.upbound.io_notificationhubs.yaml index 1c407710d..28a20285c 100644 --- a/package/crds/notificationhubs.azure.upbound.io_notificationhubs.yaml +++ b/package/crds/notificationhubs.azure.upbound.io_notificationhubs.yaml @@ -677,3 +677,647 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: NotificationHub is the Schema for the NotificationHubs API. Manages + a Notification Hub within a Notification Hub Namespace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NotificationHubSpec defines the desired state of NotificationHub + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + apnsCredential: + description: A apns_credential block as defined below. + properties: + applicationMode: + description: The Application Mode which defines which server + the APNS Messages should be sent to. Possible values are + Production and Sandbox. + type: string + bundleId: + description: The Bundle ID of the iOS/macOS application to + send push notifications for, such as com.hashicorp.example. + type: string + keyId: + description: The Apple Push Notifications Service (APNS) Key. + type: string + teamId: + description: The ID of the team the Token. + type: string + tokenSecretRef: + description: The Push Token associated with the Apple Developer + Account. This is the contents of the key downloaded from + the Apple Developer Portal between the -----BEGIN PRIVATE + KEY----- and -----END PRIVATE KEY----- blocks. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - tokenSecretRef + type: object + gcmCredential: + description: A gcm_credential block as defined below. + properties: + apiKeySecretRef: + description: The API Key associated with the Google Cloud + Messaging service. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - apiKeySecretRef + type: object + location: + description: The Azure Region in which this Notification Hub Namespace + exists. Changing this forces a new resource to be created. + type: string + namespaceName: + description: The name of the Notification Hub Namespace in which + to create this Notification Hub. Changing this forces a new + resource to be created. + type: string + namespaceNameRef: + description: Reference to a NotificationHubNamespace in notificationhubs + to populate namespaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + namespaceNameSelector: + description: Selector for a NotificationHubNamespace in notificationhubs + to populate namespaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the Resource Group in which the Notification + Hub Namespace exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + apnsCredential: + description: A apns_credential block as defined below. + properties: + applicationMode: + description: The Application Mode which defines which server + the APNS Messages should be sent to. Possible values are + Production and Sandbox. + type: string + bundleId: + description: The Bundle ID of the iOS/macOS application to + send push notifications for, such as com.hashicorp.example. + type: string + keyId: + description: The Apple Push Notifications Service (APNS) Key. + type: string + teamId: + description: The ID of the team the Token. + type: string + type: object + gcmCredential: + description: A gcm_credential block as defined below. + type: object + location: + description: The Azure Region in which this Notification Hub Namespace + exists. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: NotificationHubStatus defines the observed state of NotificationHub. + properties: + atProvider: + properties: + apnsCredential: + description: A apns_credential block as defined below. + properties: + applicationMode: + description: The Application Mode which defines which server + the APNS Messages should be sent to. Possible values are + Production and Sandbox. + type: string + bundleId: + description: The Bundle ID of the iOS/macOS application to + send push notifications for, such as com.hashicorp.example. + type: string + keyId: + description: The Apple Push Notifications Service (APNS) Key. + type: string + teamId: + description: The ID of the team the Token. + type: string + type: object + gcmCredential: + description: A gcm_credential block as defined below. + properties: + apiKeySecretRef: + description: The API Key associated with the Google Cloud + Messaging service. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - apiKeySecretRef + type: object + id: + description: The ID of the Notification Hub. + type: string + location: + description: The Azure Region in which this Notification Hub Namespace + exists. Changing this forces a new resource to be created. + type: string + namespaceName: + description: The name of the Notification Hub Namespace in which + to create this Notification Hub. Changing this forces a new + resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Notification + Hub Namespace exists. Changing this forces a new resource to + be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/operationalinsights.azure.upbound.io_workspaces.yaml b/package/crds/operationalinsights.azure.upbound.io_workspaces.yaml index 2d479b344..385c11ff4 100644 --- a/package/crds/operationalinsights.azure.upbound.io_workspaces.yaml +++ b/package/crds/operationalinsights.azure.upbound.io_workspaces.yaml @@ -657,3 +657,636 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workspace is the Schema for the Workspaces API. Manages a Log + Analytics (formally Operational Insights) Workspace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkspaceSpec defines the desired state of Workspace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowResourceOnlyPermissions: + description: Specifies if the log Analytics Workspace allow users + accessing to data associated with resources they have permission + to view, without permission to workspace. Defaults to true. + type: boolean + cmkForQueryForced: + description: Is Customer Managed Storage mandatory for query management? + type: boolean + dailyQuotaGb: + description: The workspace daily quota for ingestion in GB. Defaults + to -1 (unlimited) if omitted. + type: number + dataCollectionRuleId: + description: The ID of the Data Collection Rule to use for this + workspace. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the identity type of the Log Analytics + Workspace. Possible values are SystemAssigned (where Azure + will generate a Service Principal for you) and UserAssigned + where you can specify the Service Principal IDs in the identity_ids + field. + type: string + type: object + immediateDataPurgeOn30DaysEnabled: + description: Whether to remove the data in the Log Analytics Workspace + immediately after 30 days. + type: boolean + internetIngestionEnabled: + description: Should the Log Analytics Workspace support ingestion + over the Public Internet? Defaults to true. + type: boolean + internetQueryEnabled: + description: Should the Log Analytics Workspace support querying + over the Public Internet? Defaults to true. + type: boolean + localAuthenticationDisabled: + description: Specifies if the log Analytics workspace should enforce + authentication using Azure AD. Defaults to false. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + reservationCapacityInGbPerDay: + description: The capacity reservation level in GB for this workspace. + Possible values are 100, 200, 300, 400, 500, 1000, 2000 and + 5000. + type: number + resourceGroupName: + description: The name of the resource group in which the Log Analytics + workspace is created. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionInDays: + description: The workspace data retention in days. Possible values + are either 7 (Free Tier only) or range between 30 and 730. + type: number + sku: + description: Specifies the SKU of the Log Analytics Workspace. + Possible values are Free, PerNode, Premium, Standard, Standalone, + Unlimited, CapacityReservation, and PerGB2018 (new SKU as of + 2018-04-03). Defaults to PerGB2018. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowResourceOnlyPermissions: + description: Specifies if the log Analytics Workspace allow users + accessing to data associated with resources they have permission + to view, without permission to workspace. Defaults to true. + type: boolean + cmkForQueryForced: + description: Is Customer Managed Storage mandatory for query management? + type: boolean + dailyQuotaGb: + description: The workspace daily quota for ingestion in GB. Defaults + to -1 (unlimited) if omitted. + type: number + dataCollectionRuleId: + description: The ID of the Data Collection Rule to use for this + workspace. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the identity type of the Log Analytics + Workspace. Possible values are SystemAssigned (where Azure + will generate a Service Principal for you) and UserAssigned + where you can specify the Service Principal IDs in the identity_ids + field. + type: string + type: object + immediateDataPurgeOn30DaysEnabled: + description: Whether to remove the data in the Log Analytics Workspace + immediately after 30 days. + type: boolean + internetIngestionEnabled: + description: Should the Log Analytics Workspace support ingestion + over the Public Internet? Defaults to true. + type: boolean + internetQueryEnabled: + description: Should the Log Analytics Workspace support querying + over the Public Internet? Defaults to true. + type: boolean + localAuthenticationDisabled: + description: Specifies if the log Analytics workspace should enforce + authentication using Azure AD. Defaults to false. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + reservationCapacityInGbPerDay: + description: The capacity reservation level in GB for this workspace. + Possible values are 100, 200, 300, 400, 500, 1000, 2000 and + 5000. + type: number + retentionInDays: + description: The workspace data retention in days. Possible values + are either 7 (Free Tier only) or range between 30 and 730. + type: number + sku: + description: Specifies the SKU of the Log Analytics Workspace. + Possible values are Free, PerNode, Premium, Standard, Standalone, + Unlimited, CapacityReservation, and PerGB2018 (new SKU as of + 2018-04-03). Defaults to PerGB2018. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: WorkspaceStatus defines the observed state of Workspace. + properties: + atProvider: + properties: + allowResourceOnlyPermissions: + description: Specifies if the log Analytics Workspace allow users + accessing to data associated with resources they have permission + to view, without permission to workspace. Defaults to true. + type: boolean + cmkForQueryForced: + description: Is Customer Managed Storage mandatory for query management? + type: boolean + dailyQuotaGb: + description: The workspace daily quota for ingestion in GB. Defaults + to -1 (unlimited) if omitted. + type: number + dataCollectionRuleId: + description: The ID of the Data Collection Rule to use for this + workspace. + type: string + id: + description: The Log Analytics Workspace ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Log Analytics Workspace ID. + type: string + tenantId: + description: The Log Analytics Workspace ID. + type: string + type: + description: Specifies the identity type of the Log Analytics + Workspace. Possible values are SystemAssigned (where Azure + will generate a Service Principal for you) and UserAssigned + where you can specify the Service Principal IDs in the identity_ids + field. + type: string + type: object + immediateDataPurgeOn30DaysEnabled: + description: Whether to remove the data in the Log Analytics Workspace + immediately after 30 days. + type: boolean + internetIngestionEnabled: + description: Should the Log Analytics Workspace support ingestion + over the Public Internet? Defaults to true. + type: boolean + internetQueryEnabled: + description: Should the Log Analytics Workspace support querying + over the Public Internet? Defaults to true. + type: boolean + localAuthenticationDisabled: + description: Specifies if the log Analytics workspace should enforce + authentication using Azure AD. Defaults to false. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + reservationCapacityInGbPerDay: + description: The capacity reservation level in GB for this workspace. + Possible values are 100, 200, 300, 400, 500, 1000, 2000 and + 5000. + type: number + resourceGroupName: + description: The name of the resource group in which the Log Analytics + workspace is created. Changing this forces a new resource to + be created. + type: string + retentionInDays: + description: The workspace data retention in days. Possible values + are either 7 (Free Tier only) or range between 30 and 730. + type: number + sku: + description: Specifies the SKU of the Log Analytics Workspace. + Possible values are Free, PerNode, Premium, Standard, Standalone, + Unlimited, CapacityReservation, and PerGB2018 (new SKU as of + 2018-04-03). Defaults to PerGB2018. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + workspaceId: + description: The Workspace (or Customer) ID for the Log Analytics + Workspace. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/operationsmanagement.azure.upbound.io_loganalyticssolutions.yaml b/package/crds/operationsmanagement.azure.upbound.io_loganalyticssolutions.yaml index 4385ac85b..1c40e0d2b 100644 --- a/package/crds/operationsmanagement.azure.upbound.io_loganalyticssolutions.yaml +++ b/package/crds/operationsmanagement.azure.upbound.io_loganalyticssolutions.yaml @@ -946,3 +946,925 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LogAnalyticsSolution is the Schema for the LogAnalyticsSolutions + API. Manages a Log Analytics (formally Operational Insights) Solution. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LogAnalyticsSolutionSpec defines the desired state of LogAnalyticsSolution + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + plan: + description: A plan block as documented below. + properties: + product: + description: The product name of the solution. For example + OMSGallery/Containers. Changing this forces a new resource + to be created. + type: string + promotionCode: + description: A promotion code to be used with the solution. + Changing this forces a new resource to be created. + type: string + publisher: + description: The publisher of the solution. For example Microsoft. + Changing this forces a new resource to be created. + type: string + type: object + resourceGroupName: + description: 'The name of the resource group in which the Log + Analytics solution is created. Changing this forces a new resource + to be created. Note: The solution and its related workspace + can only exist in the same resource group.' + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + solutionName: + description: Specifies the name of the solution to be deployed. + See here for options.Changing this forces a new resource to + be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + workspaceName: + description: The full name of the Log Analytics workspace with + which the solution will be linked. Changing this forces a new + resource to be created. + type: string + workspaceNameRef: + description: Reference to a Workspace in operationalinsights to + populate workspaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceNameSelector: + description: Selector for a Workspace in operationalinsights to + populate workspaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + workspaceResourceId: + description: The full resource ID of the Log Analytics workspace + with which the solution will be linked. Changing this forces + a new resource to be created. + type: string + workspaceResourceIdRef: + description: Reference to a Workspace in operationalinsights to + populate workspaceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceResourceIdSelector: + description: Selector for a Workspace in operationalinsights to + populate workspaceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + plan: + description: A plan block as documented below. + properties: + product: + description: The product name of the solution. For example + OMSGallery/Containers. Changing this forces a new resource + to be created. + type: string + promotionCode: + description: A promotion code to be used with the solution. + Changing this forces a new resource to be created. + type: string + publisher: + description: The publisher of the solution. For example Microsoft. + Changing this forces a new resource to be created. + type: string + type: object + resourceGroupName: + description: 'The name of the resource group in which the Log + Analytics solution is created. Changing this forces a new resource + to be created. Note: The solution and its related workspace + can only exist in the same resource group.' + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + solutionName: + description: Specifies the name of the solution to be deployed. + See here for options.Changing this forces a new resource to + be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + workspaceName: + description: The full name of the Log Analytics workspace with + which the solution will be linked. Changing this forces a new + resource to be created. + type: string + workspaceNameRef: + description: Reference to a Workspace in operationalinsights to + populate workspaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceNameSelector: + description: Selector for a Workspace in operationalinsights to + populate workspaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + workspaceResourceId: + description: The full resource ID of the Log Analytics workspace + with which the solution will be linked. Changing this forces + a new resource to be created. + type: string + workspaceResourceIdRef: + description: Reference to a Workspace in operationalinsights to + populate workspaceResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceResourceIdSelector: + description: Selector for a Workspace in operationalinsights to + populate workspaceResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.plan is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.plan) + || (has(self.initProvider) && has(self.initProvider.plan))' + - message: spec.forProvider.solutionName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.solutionName) + || (has(self.initProvider) && has(self.initProvider.solutionName))' + status: + description: LogAnalyticsSolutionStatus defines the observed state of + LogAnalyticsSolution. + properties: + atProvider: + properties: + id: + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + plan: + description: A plan block as documented below. + properties: + name: + type: string + product: + description: The product name of the solution. For example + OMSGallery/Containers. Changing this forces a new resource + to be created. + type: string + promotionCode: + description: A promotion code to be used with the solution. + Changing this forces a new resource to be created. + type: string + publisher: + description: The publisher of the solution. For example Microsoft. + Changing this forces a new resource to be created. + type: string + type: object + resourceGroupName: + description: 'The name of the resource group in which the Log + Analytics solution is created. Changing this forces a new resource + to be created. Note: The solution and its related workspace + can only exist in the same resource group.' + type: string + solutionName: + description: Specifies the name of the solution to be deployed. + See here for options.Changing this forces a new resource to + be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + workspaceName: + description: The full name of the Log Analytics workspace with + which the solution will be linked. Changing this forces a new + resource to be created. + type: string + workspaceResourceId: + description: The full resource ID of the Log Analytics workspace + with which the solution will be linked. Changing this forces + a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/purview.azure.upbound.io_accounts.yaml b/package/crds/purview.azure.upbound.io_accounts.yaml index 85f2eea7d..df38c2497 100644 --- a/package/crds/purview.azure.upbound.io_accounts.yaml +++ b/package/crds/purview.azure.upbound.io_accounts.yaml @@ -718,3 +718,697 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Account is the Schema for the Accounts API. Manages a Purview + Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Purview Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Purview Account. Possible + values are UserAssigned and SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Purview Account should + exist. Changing this forces a new Purview Account to be created. + type: string + managedResourceGroupName: + description: The name which should be used for the new Resource + Group where Purview Account creates the managed resources. Changing + this forces a new Purview Account to be created. + type: string + managedResourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedResourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicNetworkEnabled: + description: Should the Purview Account be visible to the public + network? Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Purview + Account should exist. Changing this forces a new Purview Account + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Purview Account. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Purview Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Purview Account. Possible + values are UserAssigned and SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Purview Account should + exist. Changing this forces a new Purview Account to be created. + type: string + managedResourceGroupName: + description: The name which should be used for the new Resource + Group where Purview Account creates the managed resources. Changing + this forces a new Purview Account to be created. + type: string + managedResourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedResourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicNetworkEnabled: + description: Should the Purview Account be visible to the public + network? Defaults to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Purview Account. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.identity is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.identity) + || (has(self.initProvider) && has(self.initProvider.identity))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: AccountStatus defines the observed state of Account. + properties: + atProvider: + properties: + catalogEndpoint: + description: Catalog endpoint. + type: string + guardianEndpoint: + description: Guardian endpoint. + type: string + id: + description: The ID of the Purview Account. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Purview Account. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Purview Account. Possible + values are UserAssigned and SystemAssigned. + type: string + type: object + location: + description: The Azure Region where the Purview Account should + exist. Changing this forces a new Purview Account to be created. + type: string + managedResourceGroupName: + description: The name which should be used for the new Resource + Group where Purview Account creates the managed resources. Changing + this forces a new Purview Account to be created. + type: string + managedResources: + description: A managed_resources block as defined below. + items: + properties: + eventHubNamespaceId: + description: The ID of the managed event hub namespace. + type: string + resourceGroupId: + description: The ID of the managed resource group. + type: string + storageAccountId: + description: The ID of the managed storage account. + type: string + type: object + type: array + publicNetworkEnabled: + description: Should the Purview Account be visible to the public + network? Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Purview + Account should exist. Changing this forces a new Purview Account + to be created. + type: string + scanEndpoint: + description: Scan endpoint. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Purview Account. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/recoveryservices.azure.upbound.io_backuppolicyfileshares.yaml b/package/crds/recoveryservices.azure.upbound.io_backuppolicyfileshares.yaml index a0c5422a8..e5913cc44 100644 --- a/package/crds/recoveryservices.azure.upbound.io_backuppolicyfileshares.yaml +++ b/package/crds/recoveryservices.azure.upbound.io_backuppolicyfileshares.yaml @@ -965,3 +965,911 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupPolicyFileShare is the Schema for the BackupPolicyFileShares + API. Manages an Azure File Share Backup Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupPolicyFileShareSpec defines the desired state of BackupPolicyFileShare + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + backup: + description: Configures the Policy backup frequency and times + as documented in the backup block below. + properties: + frequency: + description: Sets the backup frequency. Possible values are + Daily and Hourly. + type: string + hourly: + description: A hourly block defined as below. This is required + when frequency is set to Hourly. + properties: + interval: + description: Specifies the interval at which backup needs + to be triggered. Possible values are 4, 6, 8 and 12. + type: number + startTime: + description: Specifies the start time of the hourly backup. + The time format should be in 24-hour format. Times must + be either on the hour or half hour (e.g. 12:00, 12:30, + 13:00, etc.). + type: string + windowDuration: + description: Species the duration of the backup window + in hours. Details could be found here. + type: number + type: object + time: + description: The time of day to perform the backup in 24-hour + format. Times must be either on the hour or half hour (e.g. + 12:00, 12:30, 13:00, etc.) + type: string + type: object + recoveryVaultName: + description: Specifies the name of the Recovery Services Vault + to use. Changing this forces a new resource to be created. + type: string + recoveryVaultNameRef: + description: Reference to a Vault in recoveryservices to populate + recoveryVaultName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryVaultNameSelector: + description: Selector for a Vault in recoveryservices to populate + recoveryVaultName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group in which to create + the policy. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionDaily: + description: Configures the policy daily retention as documented + in the retention_daily block below. + properties: + count: + description: The number of daily backups to keep. Must be + between 1 and 200 (inclusive) + type: number + type: object + retentionMonthly: + description: Configures the policy monthly retention as documented + in the retention_monthly block below. + properties: + count: + description: The number of monthly backups to keep. Must be + between 1 and 120 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: Configures the policy weekly retention as documented + in the retention_weekly block below. + properties: + count: + description: The number of daily backups to keep. Must be + between 1 and 200 (inclusive) + type: number + weekdays: + description: The weekday backups to retain. Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: Configures the policy yearly retention as documented + in the retention_yearly block below. + properties: + count: + description: The number of yearly backups to keep. Must be + between 1 and 10 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + months: + description: The months of the year to retain backups of. + Must be one of January, February, March, April, May, June, + July, Augest, September, October, November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timezone: + description: Specifies the timezone. the possible values are defined + here. Defaults to UTC + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + backup: + description: Configures the Policy backup frequency and times + as documented in the backup block below. + properties: + frequency: + description: Sets the backup frequency. Possible values are + Daily and Hourly. + type: string + hourly: + description: A hourly block defined as below. This is required + when frequency is set to Hourly. + properties: + interval: + description: Specifies the interval at which backup needs + to be triggered. Possible values are 4, 6, 8 and 12. + type: number + startTime: + description: Specifies the start time of the hourly backup. + The time format should be in 24-hour format. Times must + be either on the hour or half hour (e.g. 12:00, 12:30, + 13:00, etc.). + type: string + windowDuration: + description: Species the duration of the backup window + in hours. Details could be found here. + type: number + type: object + time: + description: The time of day to perform the backup in 24-hour + format. Times must be either on the hour or half hour (e.g. + 12:00, 12:30, 13:00, etc.) + type: string + type: object + retentionDaily: + description: Configures the policy daily retention as documented + in the retention_daily block below. + properties: + count: + description: The number of daily backups to keep. Must be + between 1 and 200 (inclusive) + type: number + type: object + retentionMonthly: + description: Configures the policy monthly retention as documented + in the retention_monthly block below. + properties: + count: + description: The number of monthly backups to keep. Must be + between 1 and 120 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: Configures the policy weekly retention as documented + in the retention_weekly block below. + properties: + count: + description: The number of daily backups to keep. Must be + between 1 and 200 (inclusive) + type: number + weekdays: + description: The weekday backups to retain. Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: Configures the policy yearly retention as documented + in the retention_yearly block below. + properties: + count: + description: The number of yearly backups to keep. Must be + between 1 and 10 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + months: + description: The months of the year to retain backups of. + Must be one of January, February, March, April, May, June, + July, Augest, September, October, November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timezone: + description: Specifies the timezone. the possible values are defined + here. Defaults to UTC + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.backup is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backup) + || (has(self.initProvider) && has(self.initProvider.backup))' + - message: spec.forProvider.retentionDaily is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.retentionDaily) + || (has(self.initProvider) && has(self.initProvider.retentionDaily))' + status: + description: BackupPolicyFileShareStatus defines the observed state of + BackupPolicyFileShare. + properties: + atProvider: + properties: + backup: + description: Configures the Policy backup frequency and times + as documented in the backup block below. + properties: + frequency: + description: Sets the backup frequency. Possible values are + Daily and Hourly. + type: string + hourly: + description: A hourly block defined as below. This is required + when frequency is set to Hourly. + properties: + interval: + description: Specifies the interval at which backup needs + to be triggered. Possible values are 4, 6, 8 and 12. + type: number + startTime: + description: Specifies the start time of the hourly backup. + The time format should be in 24-hour format. Times must + be either on the hour or half hour (e.g. 12:00, 12:30, + 13:00, etc.). + type: string + windowDuration: + description: Species the duration of the backup window + in hours. Details could be found here. + type: number + type: object + time: + description: The time of day to perform the backup in 24-hour + format. Times must be either on the hour or half hour (e.g. + 12:00, 12:30, 13:00, etc.) + type: string + type: object + id: + description: The ID of the Azure File Share Backup Policy. + type: string + recoveryVaultName: + description: Specifies the name of the Recovery Services Vault + to use. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the policy. Changing this forces a new resource to be created. + type: string + retentionDaily: + description: Configures the policy daily retention as documented + in the retention_daily block below. + properties: + count: + description: The number of daily backups to keep. Must be + between 1 and 200 (inclusive) + type: number + type: object + retentionMonthly: + description: Configures the policy monthly retention as documented + in the retention_monthly block below. + properties: + count: + description: The number of monthly backups to keep. Must be + between 1 and 120 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: Configures the policy weekly retention as documented + in the retention_weekly block below. + properties: + count: + description: The number of daily backups to keep. Must be + between 1 and 200 (inclusive) + type: number + weekdays: + description: The weekday backups to retain. Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: Configures the policy yearly retention as documented + in the retention_yearly block below. + properties: + count: + description: The number of yearly backups to keep. Must be + between 1 and 10 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + months: + description: The months of the year to retain backups of. + Must be one of January, February, March, April, May, June, + July, Augest, September, October, November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timezone: + description: Specifies the timezone. the possible values are defined + here. Defaults to UTC + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/recoveryservices.azure.upbound.io_backuppolicyvms.yaml b/package/crds/recoveryservices.azure.upbound.io_backuppolicyvms.yaml index 936307493..f2a8e9362 100644 --- a/package/crds/recoveryservices.azure.upbound.io_backuppolicyvms.yaml +++ b/package/crds/recoveryservices.azure.upbound.io_backuppolicyvms.yaml @@ -1026,3 +1026,975 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupPolicyVM is the Schema for the BackupPolicyVMs API. Manages + an Azure Backup VM Backup Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupPolicyVMSpec defines the desired state of BackupPolicyVM + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + backup: + description: Configures the Policy backup frequency, times & days + as documented in the backup block below. + properties: + frequency: + description: Sets the backup frequency. Possible values are + Hourly, Daily and Weekly. + type: string + hourDuration: + description: Duration of the backup window in hours. Possible + values are between 4 and 24 This is used when frequency + is Hourly. + type: number + hourInterval: + description: Interval in hour at which backup is triggered. + Possible values are 4, 6, 8 and 12. This is used when frequency + is Hourly. + type: number + time: + description: The time of day to perform the backup in 24hour + format. + type: string + weekdays: + description: The days of the week to perform backups on. Must + be one of Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. This is used when frequency is Weekly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + instantRestoreResourceGroup: + description: Specifies the instant restore resource group name + as documented in the instant_restore_resource_group block below. + properties: + prefix: + description: The prefix for the instant_restore_resource_group + name. + type: string + suffix: + description: The suffix for the instant_restore_resource_group + name. + type: string + type: object + instantRestoreRetentionDays: + description: Specifies the instant restore retention range in + days. Possible values are between 1 and 5 when policy_type is + V1, and 1 to 30 when policy_type is V2. + type: number + policyType: + description: Type of the Backup Policy. Possible values are V1 + and V2 where V2 stands for the Enhanced Policy. Defaults to + V1. Changing this forces a new resource to be created. + type: string + recoveryVaultName: + description: Specifies the name of the Recovery Services Vault + to use. Changing this forces a new resource to be created. + type: string + recoveryVaultNameRef: + description: Reference to a Vault in recoveryservices to populate + recoveryVaultName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryVaultNameSelector: + description: Selector for a Vault in recoveryservices to populate + recoveryVaultName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group in which to create + the policy. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionDaily: + description: Configures the policy daily retention as documented + in the retention_daily block below. Required when backup frequency + is Daily. + properties: + count: + description: The number of daily backups to keep. Must be + between 7 and 9999. + type: number + type: object + retentionMonthly: + description: Configures the policy monthly retention as documented + in the retention_monthly block below. + properties: + count: + description: The number of monthly backups to keep. Must be + between 1 and 9999 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: Configures the policy weekly retention as documented + in the retention_weekly block below. Required when backup frequency + is Weekly. + properties: + count: + description: The number of weekly backups to keep. Must be + between 1 and 9999 + type: number + weekdays: + description: The weekday backups to retain. Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: Configures the policy yearly retention as documented + in the retention_yearly block below. + properties: + count: + description: The number of yearly backups to keep. Must be + between 1 and 9999 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + months: + description: The months of the year to retain backups of. + Must be one of January, February, March, April, May, June, + July, August, September, October, November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timezone: + description: Specifies the timezone. the possible values are defined + here. Defaults to UTC + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + backup: + description: Configures the Policy backup frequency, times & days + as documented in the backup block below. + properties: + frequency: + description: Sets the backup frequency. Possible values are + Hourly, Daily and Weekly. + type: string + hourDuration: + description: Duration of the backup window in hours. Possible + values are between 4 and 24 This is used when frequency + is Hourly. + type: number + hourInterval: + description: Interval in hour at which backup is triggered. + Possible values are 4, 6, 8 and 12. This is used when frequency + is Hourly. + type: number + time: + description: The time of day to perform the backup in 24hour + format. + type: string + weekdays: + description: The days of the week to perform backups on. Must + be one of Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. This is used when frequency is Weekly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + instantRestoreResourceGroup: + description: Specifies the instant restore resource group name + as documented in the instant_restore_resource_group block below. + properties: + prefix: + description: The prefix for the instant_restore_resource_group + name. + type: string + suffix: + description: The suffix for the instant_restore_resource_group + name. + type: string + type: object + instantRestoreRetentionDays: + description: Specifies the instant restore retention range in + days. Possible values are between 1 and 5 when policy_type is + V1, and 1 to 30 when policy_type is V2. + type: number + policyType: + description: Type of the Backup Policy. Possible values are V1 + and V2 where V2 stands for the Enhanced Policy. Defaults to + V1. Changing this forces a new resource to be created. + type: string + retentionDaily: + description: Configures the policy daily retention as documented + in the retention_daily block below. Required when backup frequency + is Daily. + properties: + count: + description: The number of daily backups to keep. Must be + between 7 and 9999. + type: number + type: object + retentionMonthly: + description: Configures the policy monthly retention as documented + in the retention_monthly block below. + properties: + count: + description: The number of monthly backups to keep. Must be + between 1 and 9999 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: Configures the policy weekly retention as documented + in the retention_weekly block below. Required when backup frequency + is Weekly. + properties: + count: + description: The number of weekly backups to keep. Must be + between 1 and 9999 + type: number + weekdays: + description: The weekday backups to retain. Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: Configures the policy yearly retention as documented + in the retention_yearly block below. + properties: + count: + description: The number of yearly backups to keep. Must be + between 1 and 9999 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + months: + description: The months of the year to retain backups of. + Must be one of January, February, March, April, May, June, + July, August, September, October, November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timezone: + description: Specifies the timezone. the possible values are defined + here. Defaults to UTC + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.backup is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.backup) + || (has(self.initProvider) && has(self.initProvider.backup))' + status: + description: BackupPolicyVMStatus defines the observed state of BackupPolicyVM. + properties: + atProvider: + properties: + backup: + description: Configures the Policy backup frequency, times & days + as documented in the backup block below. + properties: + frequency: + description: Sets the backup frequency. Possible values are + Hourly, Daily and Weekly. + type: string + hourDuration: + description: Duration of the backup window in hours. Possible + values are between 4 and 24 This is used when frequency + is Hourly. + type: number + hourInterval: + description: Interval in hour at which backup is triggered. + Possible values are 4, 6, 8 and 12. This is used when frequency + is Hourly. + type: number + time: + description: The time of day to perform the backup in 24hour + format. + type: string + weekdays: + description: The days of the week to perform backups on. Must + be one of Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. This is used when frequency is Weekly. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: The ID of the VM Backup Policy. + type: string + instantRestoreResourceGroup: + description: Specifies the instant restore resource group name + as documented in the instant_restore_resource_group block below. + properties: + prefix: + description: The prefix for the instant_restore_resource_group + name. + type: string + suffix: + description: The suffix for the instant_restore_resource_group + name. + type: string + type: object + instantRestoreRetentionDays: + description: Specifies the instant restore retention range in + days. Possible values are between 1 and 5 when policy_type is + V1, and 1 to 30 when policy_type is V2. + type: number + policyType: + description: Type of the Backup Policy. Possible values are V1 + and V2 where V2 stands for the Enhanced Policy. Defaults to + V1. Changing this forces a new resource to be created. + type: string + recoveryVaultName: + description: Specifies the name of the Recovery Services Vault + to use. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the policy. Changing this forces a new resource to be created. + type: string + retentionDaily: + description: Configures the policy daily retention as documented + in the retention_daily block below. Required when backup frequency + is Daily. + properties: + count: + description: The number of daily backups to keep. Must be + between 7 and 9999. + type: number + type: object + retentionMonthly: + description: Configures the policy monthly retention as documented + in the retention_monthly block below. + properties: + count: + description: The number of monthly backups to keep. Must be + between 1 and 9999 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: Configures the policy weekly retention as documented + in the retention_weekly block below. Required when backup frequency + is Weekly. + properties: + count: + description: The number of weekly backups to keep. Must be + between 1 and 9999 + type: number + weekdays: + description: The weekday backups to retain. Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: Configures the policy yearly retention as documented + in the retention_yearly block below. + properties: + count: + description: The number of yearly backups to keep. Must be + between 1 and 9999 + type: number + days: + description: The days of the month to retain backups of. Must + be between 1 and 31. + items: + type: number + type: array + x-kubernetes-list-type: set + includeLastDays: + description: Including the last day of the month, default + to false. + type: boolean + months: + description: The months of the year to retain backups of. + Must be one of January, February, March, April, May, June, + July, August, September, October, November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain . Must be one of + Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or + Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups of. + Must be one of First, Second, Third, Fourth, Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + timezone: + description: Specifies the timezone. the possible values are defined + here. Defaults to UTC + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/recoveryservices.azure.upbound.io_backuppolicyvmworkloads.yaml b/package/crds/recoveryservices.azure.upbound.io_backuppolicyvmworkloads.yaml index fba5c61c3..68c5258e2 100644 --- a/package/crds/recoveryservices.azure.upbound.io_backuppolicyvmworkloads.yaml +++ b/package/crds/recoveryservices.azure.upbound.io_backuppolicyvmworkloads.yaml @@ -1064,3 +1064,1007 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupPolicyVMWorkload is the Schema for the BackupPolicyVMWorkloads + API. Manages an Azure VM Workload Backup Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupPolicyVMWorkloadSpec defines the desired state of BackupPolicyVMWorkload + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + protectionPolicy: + description: One or more protection_policy blocks as defined below. + items: + properties: + backup: + description: A backup block as defined below. + properties: + frequency: + description: The backup frequency for the VM Workload + Backup Policy. Possible values are Daily and Weekly. + type: string + frequencyInMinutes: + description: The backup frequency in minutes for the + VM Workload Backup Policy. Possible values are 15, + 30, 60, 120, 240, 480, 720 and 1440. + type: number + time: + description: The time of day to perform the backup in + 24hour format. + type: string + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + policyType: + description: The type of the VM Workload Backup Policy. + Possible values are Differential, Full, Incremental and + Log. + type: string + retentionDaily: + description: A retention_daily block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + type: object + retentionMonthly: + description: A retention_monthly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + formatType: + description: The retention schedule format type for + yearly retention policy. Possible values are Daily + and Weekly. + type: string + monthdays: + description: The monthday backups to retain. Possible + values are between 0 and 28. + items: + type: number + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups + of. Possible values are First, Second, Third, Fourth, + Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: A retention_weekly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: A retention_yearly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + formatType: + description: The retention schedule format type for + yearly retention policy. Possible values are Daily + and Weekly. + type: string + monthdays: + description: The monthday backups to retain. Possible + values are between 0 and 28. + items: + type: number + type: array + x-kubernetes-list-type: set + months: + description: The months of the year to retain backups + of. Possible values are January, February, March, + April, May, June, July, August, September, October, + November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups + of. Possible values are First, Second, Third, Fourth, + Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + simpleRetention: + description: A simple_retention block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + type: object + type: object + type: array + recoveryVaultName: + description: The name of the Recovery Services Vault to use. Changing + this forces a new resource to be created. + type: string + recoveryVaultNameRef: + description: Reference to a Vault in recoveryservices to populate + recoveryVaultName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryVaultNameSelector: + description: Selector for a Vault in recoveryservices to populate + recoveryVaultName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group in which to create + the VM Workload Backup Policy. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + settings: + description: A settings block as defined below. + properties: + compressionEnabled: + description: The compression setting for the VM Workload Backup + Policy. Defaults to false. + type: boolean + timeZone: + description: The timezone for the VM Workload Backup Policy. + The possible values are defined here. + type: string + type: object + workloadType: + description: The VM Workload type for the Backup Policy. Possible + values are SQLDataBase and SAPHanaDatabase. Changing this forces + a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + protectionPolicy: + description: One or more protection_policy blocks as defined below. + items: + properties: + backup: + description: A backup block as defined below. + properties: + frequency: + description: The backup frequency for the VM Workload + Backup Policy. Possible values are Daily and Weekly. + type: string + frequencyInMinutes: + description: The backup frequency in minutes for the + VM Workload Backup Policy. Possible values are 15, + 30, 60, 120, 240, 480, 720 and 1440. + type: number + time: + description: The time of day to perform the backup in + 24hour format. + type: string + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + policyType: + description: The type of the VM Workload Backup Policy. + Possible values are Differential, Full, Incremental and + Log. + type: string + retentionDaily: + description: A retention_daily block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + type: object + retentionMonthly: + description: A retention_monthly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + formatType: + description: The retention schedule format type for + yearly retention policy. Possible values are Daily + and Weekly. + type: string + monthdays: + description: The monthday backups to retain. Possible + values are between 0 and 28. + items: + type: number + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups + of. Possible values are First, Second, Third, Fourth, + Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: A retention_weekly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: A retention_yearly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + formatType: + description: The retention schedule format type for + yearly retention policy. Possible values are Daily + and Weekly. + type: string + monthdays: + description: The monthday backups to retain. Possible + values are between 0 and 28. + items: + type: number + type: array + x-kubernetes-list-type: set + months: + description: The months of the year to retain backups + of. Possible values are January, February, March, + April, May, June, July, August, September, October, + November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups + of. Possible values are First, Second, Third, Fourth, + Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + simpleRetention: + description: A simple_retention block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + type: object + type: object + type: array + settings: + description: A settings block as defined below. + properties: + compressionEnabled: + description: The compression setting for the VM Workload Backup + Policy. Defaults to false. + type: boolean + timeZone: + description: The timezone for the VM Workload Backup Policy. + The possible values are defined here. + type: string + type: object + workloadType: + description: The VM Workload type for the Backup Policy. Possible + values are SQLDataBase and SAPHanaDatabase. Changing this forces + a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.protectionPolicy is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.protectionPolicy) + || (has(self.initProvider) && has(self.initProvider.protectionPolicy))' + - message: spec.forProvider.settings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.settings) + || (has(self.initProvider) && has(self.initProvider.settings))' + - message: spec.forProvider.workloadType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.workloadType) + || (has(self.initProvider) && has(self.initProvider.workloadType))' + status: + description: BackupPolicyVMWorkloadStatus defines the observed state of + BackupPolicyVMWorkload. + properties: + atProvider: + properties: + id: + description: The ID of the Azure VM Workload Backup Policy. + type: string + protectionPolicy: + description: One or more protection_policy blocks as defined below. + items: + properties: + backup: + description: A backup block as defined below. + properties: + frequency: + description: The backup frequency for the VM Workload + Backup Policy. Possible values are Daily and Weekly. + type: string + frequencyInMinutes: + description: The backup frequency in minutes for the + VM Workload Backup Policy. Possible values are 15, + 30, 60, 120, 240, 480, 720 and 1440. + type: number + time: + description: The time of day to perform the backup in + 24hour format. + type: string + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + policyType: + description: The type of the VM Workload Backup Policy. + Possible values are Differential, Full, Incremental and + Log. + type: string + retentionDaily: + description: A retention_daily block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + type: object + retentionMonthly: + description: A retention_monthly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + formatType: + description: The retention schedule format type for + yearly retention policy. Possible values are Daily + and Weekly. + type: string + monthdays: + description: The monthday backups to retain. Possible + values are between 0 and 28. + items: + type: number + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups + of. Possible values are First, Second, Third, Fourth, + Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionWeekly: + description: A retention_weekly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + retentionYearly: + description: A retention_yearly block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + formatType: + description: The retention schedule format type for + yearly retention policy. Possible values are Daily + and Weekly. + type: string + monthdays: + description: The monthday backups to retain. Possible + values are between 0 and 28. + items: + type: number + type: array + x-kubernetes-list-type: set + months: + description: The months of the year to retain backups + of. Possible values are January, February, March, + April, May, June, July, August, September, October, + November and December. + items: + type: string + type: array + x-kubernetes-list-type: set + weekdays: + description: The weekday backups to retain. Possible + values are Sunday, Monday, Tuesday, Wednesday, Thursday, + Friday or Saturday. + items: + type: string + type: array + x-kubernetes-list-type: set + weeks: + description: The weeks of the month to retain backups + of. Possible values are First, Second, Third, Fourth, + Last. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + simpleRetention: + description: A simple_retention block as defined below. + properties: + count: + description: The count that is used to count retention + duration with duration type Days. Possible values + are between 7 and 35. + type: number + type: object + type: object + type: array + recoveryVaultName: + description: The name of the Recovery Services Vault to use. Changing + this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the VM Workload Backup Policy. Changing this forces a new resource + to be created. + type: string + settings: + description: A settings block as defined below. + properties: + compressionEnabled: + description: The compression setting for the VM Workload Backup + Policy. Defaults to false. + type: boolean + timeZone: + description: The timezone for the VM Workload Backup Policy. + The possible values are defined here. + type: string + type: object + workloadType: + description: The VM Workload type for the Backup Policy. Possible + values are SQLDataBase and SAPHanaDatabase. Changing this forces + a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/recoveryservices.azure.upbound.io_siterecoveryprotectioncontainermappings.yaml b/package/crds/recoveryservices.azure.upbound.io_siterecoveryprotectioncontainermappings.yaml index 2e6ce0a6a..d31c0bb93 100644 --- a/package/crds/recoveryservices.azure.upbound.io_siterecoveryprotectioncontainermappings.yaml +++ b/package/crds/recoveryservices.azure.upbound.io_siterecoveryprotectioncontainermappings.yaml @@ -1054,3 +1054,1030 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SiteRecoveryProtectionContainerMapping is the Schema for the + SiteRecoveryProtectionContainerMappings API. Manages a Site Recovery protection + container mapping on Azure. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SiteRecoveryProtectionContainerMappingSpec defines the desired + state of SiteRecoveryProtectionContainerMapping + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + automaticUpdate: + description: a automatic_update block defined as below. + properties: + authenticationType: + description: The authentication type used for automation account. + Possible values are RunAsAccount and SystemAssignedIdentity. + type: string + automationAccountId: + description: The automation account ID which holds the automatic + update runbook and authenticates to Azure resources. + type: string + enabled: + description: Should the Mobility service installed on Azure + virtual machines be automatically updated. Defaults to false. + type: boolean + type: object + recoveryFabricName: + description: Name of fabric that should contains the protection + container to map. Changing this forces a new resource to be + created. + type: string + recoveryFabricNameRef: + description: Reference to a SiteRecoveryFabric in recoveryservices + to populate recoveryFabricName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryFabricNameSelector: + description: Selector for a SiteRecoveryFabric in recoveryservices + to populate recoveryFabricName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + recoveryReplicationPolicyId: + description: Id of the policy to use for this mapping. Changing + this forces a new resource to be created. + type: string + recoveryReplicationPolicyIdRef: + description: Reference to a SiteRecoveryReplicationPolicy in recoveryservices + to populate recoveryReplicationPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryReplicationPolicyIdSelector: + description: Selector for a SiteRecoveryReplicationPolicy in recoveryservices + to populate recoveryReplicationPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + recoverySourceProtectionContainerName: + description: Name of the source protection container to map. Changing + this forces a new resource to be created. + type: string + recoverySourceProtectionContainerNameRef: + description: Reference to a SiteRecoveryProtectionContainer in + recoveryservices to populate recoverySourceProtectionContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoverySourceProtectionContainerNameSelector: + description: Selector for a SiteRecoveryProtectionContainer in + recoveryservices to populate recoverySourceProtectionContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + recoveryTargetProtectionContainerId: + description: Id of target protection container to map to. Changing + this forces a new resource to be created. + type: string + recoveryTargetProtectionContainerIdRef: + description: Reference to a SiteRecoveryProtectionContainer in + recoveryservices to populate recoveryTargetProtectionContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryTargetProtectionContainerIdSelector: + description: Selector for a SiteRecoveryProtectionContainer in + recoveryservices to populate recoveryTargetProtectionContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + recoveryVaultName: + description: The name of the vault that should be updated. Changing + this forces a new resource to be created. + type: string + recoveryVaultNameRef: + description: Reference to a Vault in recoveryservices to populate + recoveryVaultName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryVaultNameSelector: + description: Selector for a Vault in recoveryservices to populate + recoveryVaultName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: Name of the resource group where the vault that should + be updated is located. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + automaticUpdate: + description: a automatic_update block defined as below. + properties: + authenticationType: + description: The authentication type used for automation account. + Possible values are RunAsAccount and SystemAssignedIdentity. + type: string + automationAccountId: + description: The automation account ID which holds the automatic + update runbook and authenticates to Azure resources. + type: string + enabled: + description: Should the Mobility service installed on Azure + virtual machines be automatically updated. Defaults to false. + type: boolean + type: object + recoveryReplicationPolicyId: + description: Id of the policy to use for this mapping. Changing + this forces a new resource to be created. + type: string + recoveryReplicationPolicyIdRef: + description: Reference to a SiteRecoveryReplicationPolicy in recoveryservices + to populate recoveryReplicationPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryReplicationPolicyIdSelector: + description: Selector for a SiteRecoveryReplicationPolicy in recoveryservices + to populate recoveryReplicationPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + recoveryTargetProtectionContainerId: + description: Id of target protection container to map to. Changing + this forces a new resource to be created. + type: string + recoveryTargetProtectionContainerIdRef: + description: Reference to a SiteRecoveryProtectionContainer in + recoveryservices to populate recoveryTargetProtectionContainerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + recoveryTargetProtectionContainerIdSelector: + description: Selector for a SiteRecoveryProtectionContainer in + recoveryservices to populate recoveryTargetProtectionContainerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: SiteRecoveryProtectionContainerMappingStatus defines the + observed state of SiteRecoveryProtectionContainerMapping. + properties: + atProvider: + properties: + automaticUpdate: + description: a automatic_update block defined as below. + properties: + authenticationType: + description: The authentication type used for automation account. + Possible values are RunAsAccount and SystemAssignedIdentity. + type: string + automationAccountId: + description: The automation account ID which holds the automatic + update runbook and authenticates to Azure resources. + type: string + enabled: + description: Should the Mobility service installed on Azure + virtual machines be automatically updated. Defaults to false. + type: boolean + type: object + id: + description: The ID of the Site Recovery Protection Container + Mapping. + type: string + recoveryFabricName: + description: Name of fabric that should contains the protection + container to map. Changing this forces a new resource to be + created. + type: string + recoveryReplicationPolicyId: + description: Id of the policy to use for this mapping. Changing + this forces a new resource to be created. + type: string + recoverySourceProtectionContainerName: + description: Name of the source protection container to map. Changing + this forces a new resource to be created. + type: string + recoveryTargetProtectionContainerId: + description: Id of target protection container to map to. Changing + this forces a new resource to be created. + type: string + recoveryVaultName: + description: The name of the vault that should be updated. Changing + this forces a new resource to be created. + type: string + resourceGroupName: + description: Name of the resource group where the vault that should + be updated is located. Changing this forces a new resource to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/recoveryservices.azure.upbound.io_vaults.yaml b/package/crds/recoveryservices.azure.upbound.io_vaults.yaml index 8b3057595..20687e540 100644 --- a/package/crds/recoveryservices.azure.upbound.io_vaults.yaml +++ b/package/crds/recoveryservices.azure.upbound.io_vaults.yaml @@ -731,3 +731,698 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Vault is the Schema for the Vaults API. Manages a Recovery Services + Vault. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VaultSpec defines the desired state of Vault + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + classicVmwareReplicationEnabled: + description: Whether to enable the Classic experience for VMware + replication. If set to false VMware machines will be protected + using the new stateless ASR replication appliance. Changing + this forces a new resource to be created. + type: boolean + crossRegionRestoreEnabled: + description: Is cross region restore enabled for this Vault? Only + can be true, when storage_mode_type is GeoRedundant. Defaults + to false. + type: boolean + encryption: + description: An encryption block as defined below. Required with + identity. + properties: + infrastructureEncryptionEnabled: + description: Enabling/Disabling the Double Encryption state. + type: boolean + keyId: + description: The Key Vault key id used to encrypt this vault. + Key managed by Vault Managed Hardware Security Module is + also supported. + type: string + useSystemAssignedIdentity: + description: Indicate that system assigned identity should + be used or not. Defaults to true. Must be set to false when + user_assigned_identity_id is set. + type: boolean + userAssignedIdentityId: + description: Specifies the user assigned identity ID to be + used. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this App Configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Recovery Services Vault. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + immutability: + description: 'Immutability Settings of vault, possible values + include: Locked, Unlocked and Disabled.' + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + monitoring: + description: A monitoring block as defined below. + properties: + alertsForAllJobFailuresEnabled: + description: Enabling/Disabling built-in Azure Monitor alerts + for security scenarios and job failure scenarios. Defaults + to true. + type: boolean + alertsForCriticalOperationFailuresEnabled: + description: Enabling/Disabling alerts from the older (classic + alerts) solution. Defaults to true. More details could be + found here. + type: boolean + type: object + publicNetworkAccessEnabled: + description: Is it enabled to access the vault from public networks. + Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Recovery Services Vault. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: 'Sets the vault''s SKU. Possible values include: + Standard, RS0.' + type: string + softDeleteEnabled: + description: Is soft delete enable for this Vault? Defaults to + true. + type: boolean + storageModeType: + description: The storage type of the Recovery Services Vault. + Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. + Defaults to GeoRedundant. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + classicVmwareReplicationEnabled: + description: Whether to enable the Classic experience for VMware + replication. If set to false VMware machines will be protected + using the new stateless ASR replication appliance. Changing + this forces a new resource to be created. + type: boolean + crossRegionRestoreEnabled: + description: Is cross region restore enabled for this Vault? Only + can be true, when storage_mode_type is GeoRedundant. Defaults + to false. + type: boolean + encryption: + description: An encryption block as defined below. Required with + identity. + properties: + infrastructureEncryptionEnabled: + description: Enabling/Disabling the Double Encryption state. + type: boolean + keyId: + description: The Key Vault key id used to encrypt this vault. + Key managed by Vault Managed Hardware Security Module is + also supported. + type: string + useSystemAssignedIdentity: + description: Indicate that system assigned identity should + be used or not. Defaults to true. Must be set to false when + user_assigned_identity_id is set. + type: boolean + userAssignedIdentityId: + description: Specifies the user assigned identity ID to be + used. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this App Configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Recovery Services Vault. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + immutability: + description: 'Immutability Settings of vault, possible values + include: Locked, Unlocked and Disabled.' + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + monitoring: + description: A monitoring block as defined below. + properties: + alertsForAllJobFailuresEnabled: + description: Enabling/Disabling built-in Azure Monitor alerts + for security scenarios and job failure scenarios. Defaults + to true. + type: boolean + alertsForCriticalOperationFailuresEnabled: + description: Enabling/Disabling alerts from the older (classic + alerts) solution. Defaults to true. More details could be + found here. + type: boolean + type: object + publicNetworkAccessEnabled: + description: Is it enabled to access the vault from public networks. + Defaults to true. + type: boolean + sku: + description: 'Sets the vault''s SKU. Possible values include: + Standard, RS0.' + type: string + softDeleteEnabled: + description: Is soft delete enable for this Vault? Defaults to + true. + type: boolean + storageModeType: + description: The storage type of the Recovery Services Vault. + Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. + Defaults to GeoRedundant. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: VaultStatus defines the observed state of Vault. + properties: + atProvider: + properties: + classicVmwareReplicationEnabled: + description: Whether to enable the Classic experience for VMware + replication. If set to false VMware machines will be protected + using the new stateless ASR replication appliance. Changing + this forces a new resource to be created. + type: boolean + crossRegionRestoreEnabled: + description: Is cross region restore enabled for this Vault? Only + can be true, when storage_mode_type is GeoRedundant. Defaults + to false. + type: boolean + encryption: + description: An encryption block as defined below. Required with + identity. + properties: + infrastructureEncryptionEnabled: + description: Enabling/Disabling the Double Encryption state. + type: boolean + keyId: + description: The Key Vault key id used to encrypt this vault. + Key managed by Vault Managed Hardware Security Module is + also supported. + type: string + useSystemAssignedIdentity: + description: Indicate that system assigned identity should + be used or not. Defaults to true. Must be set to false when + user_assigned_identity_id is set. + type: boolean + userAssignedIdentityId: + description: Specifies the user assigned identity ID to be + used. + type: string + type: object + id: + description: The ID of the Recovery Services Vault. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this App Configuration. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Recovery Services Vault. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + immutability: + description: 'Immutability Settings of vault, possible values + include: Locked, Unlocked and Disabled.' + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + monitoring: + description: A monitoring block as defined below. + properties: + alertsForAllJobFailuresEnabled: + description: Enabling/Disabling built-in Azure Monitor alerts + for security scenarios and job failure scenarios. Defaults + to true. + type: boolean + alertsForCriticalOperationFailuresEnabled: + description: Enabling/Disabling alerts from the older (classic + alerts) solution. Defaults to true. More details could be + found here. + type: boolean + type: object + publicNetworkAccessEnabled: + description: Is it enabled to access the vault from public networks. + Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Recovery Services Vault. Changing this forces a new resource + to be created. + type: string + sku: + description: 'Sets the vault''s SKU. Possible values include: + Standard, RS0.' + type: string + softDeleteEnabled: + description: Is soft delete enable for this Vault? Defaults to + true. + type: boolean + storageModeType: + description: The storage type of the Recovery Services Vault. + Possible values are GeoRedundant, LocallyRedundant and ZoneRedundant. + Defaults to GeoRedundant. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/resources.azure.upbound.io_resourcedeploymentscriptazureclicli.yaml b/package/crds/resources.azure.upbound.io_resourcedeploymentscriptazureclicli.yaml index ef631f7a6..070da2761 100644 --- a/package/crds/resources.azure.upbound.io_resourcedeploymentscriptazureclicli.yaml +++ b/package/crds/resources.azure.upbound.io_resourcedeploymentscriptazureclicli.yaml @@ -1090,3 +1090,1057 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResourceDeploymentScriptAzureCli is the Schema for the ResourceDeploymentScriptAzureClis + API. Manages a Resource Deployment Script of Azure Cli. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceDeploymentScriptAzureCliSpec defines the desired + state of ResourceDeploymentScriptAzureCli + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cleanupPreference: + description: Specifies the cleanup preference when the script + execution gets in a terminal state. Possible values are Always, + OnExpiration, OnSuccess. Defaults to Always. Changing this forces + a new Resource Deployment Script to be created. + type: string + commandLine: + description: Command line arguments to pass to the script. Changing + this forces a new Resource Deployment Script to be created. + type: string + container: + description: A container block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + containerGroupName: + description: Container group name, if not specified then the + name will get auto-generated. For more information, please + refer to the Container Configuration documentation. + type: string + type: object + environmentVariable: + description: An environment_variable block as defined below. Changing + this forces a new Resource Deployment Script to be created. + items: + properties: + name: + description: Specifies the name of the environment variable. + type: string + secureValueSecretRef: + description: Specifies the value of the secure environment + variable. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + value: + description: Specifies the value of the environment variable. + type: string + type: object + type: array + forceUpdateTag: + description: Gets or sets how the deployment script should be + forced to execute even if the script resource has not changed. + Can be current time stamp or a GUID. Changing this forces a + new Resource Deployment Script to be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + identityIds: + description: Specifies the list of user-assigned managed identity + IDs associated with the resource. Changing this forces a + new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + identityIdsRefs: + description: References to UserAssignedIdentity in managedidentity + to populate identityIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + identityIdsSelector: + description: Selector for a list of UserAssignedIdentity in + managedidentity to populate identityIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: Type of the managed identity. The only possible + value is UserAssigned. Changing this forces a new resource + to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Resource Deployment + Script should exist. Changing this forces a new Resource Deployment + Script to be created. + type: string + name: + description: Specifies the name which should be used for this + Resource Deployment Script. The name length must be from 1 to + 260 characters. The name can only contain alphanumeric, underscore, + parentheses, hyphen and period, and it cannot end with a period. + Changing this forces a new Resource Deployment Script to be + created. + type: string + primaryScriptUri: + description: Uri for the script. This is the entry point for the + external script. Changing this forces a new Resource Deployment + Script to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Resource Deployment Script should exist. Changing this forces + a new Resource Deployment Script to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionInterval: + description: Interval for which the service retains the script + resource after it reaches a terminal state. Resource will be + deleted when this duration expires. The time duration should + be between 1 hour and 26 hours (inclusive) and should be specified + in ISO 8601 format. Changing this forces a new Resource Deployment + Script to be created. + type: string + scriptContent: + description: Script body. Changing this forces a new Resource + Deployment Script to be created. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new Resource Deployment Script to be created. + properties: + keySecretRef: + description: Specifies the storage account access key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + name: + description: Specifies the storage account name. + type: string + required: + - keySecretRef + type: object + supportingScriptUris: + description: Supporting files for the external script. Changing + this forces a new Resource Deployment Script to be created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Deployment Script. + type: object + x-kubernetes-map-type: granular + timeout: + description: Maximum allowed script execution time specified in + ISO 8601 format. Needs to be greater than 0 and smaller than + 1 day. Defaults to P1D. Changing this forces a new Resource + Deployment Script to be created. + type: string + version: + description: Specifies the version of the Azure CLI that should + be used in the format X.Y.Z (e.g. 2.30.0). A canonical list + of versions is available from the Microsoft Container Registry + API. Changing this forces a new Resource Deployment Script to + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cleanupPreference: + description: Specifies the cleanup preference when the script + execution gets in a terminal state. Possible values are Always, + OnExpiration, OnSuccess. Defaults to Always. Changing this forces + a new Resource Deployment Script to be created. + type: string + commandLine: + description: Command line arguments to pass to the script. Changing + this forces a new Resource Deployment Script to be created. + type: string + container: + description: A container block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + containerGroupName: + description: Container group name, if not specified then the + name will get auto-generated. For more information, please + refer to the Container Configuration documentation. + type: string + type: object + environmentVariable: + description: An environment_variable block as defined below. Changing + this forces a new Resource Deployment Script to be created. + items: + properties: + name: + description: Specifies the name of the environment variable. + type: string + value: + description: Specifies the value of the environment variable. + type: string + type: object + type: array + forceUpdateTag: + description: Gets or sets how the deployment script should be + forced to execute even if the script resource has not changed. + Can be current time stamp or a GUID. Changing this forces a + new Resource Deployment Script to be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + identityIds: + description: Specifies the list of user-assigned managed identity + IDs associated with the resource. Changing this forces a + new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + identityIdsRefs: + description: References to UserAssignedIdentity in managedidentity + to populate identityIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + identityIdsSelector: + description: Selector for a list of UserAssignedIdentity in + managedidentity to populate identityIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: Type of the managed identity. The only possible + value is UserAssigned. Changing this forces a new resource + to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Resource Deployment + Script should exist. Changing this forces a new Resource Deployment + Script to be created. + type: string + name: + description: Specifies the name which should be used for this + Resource Deployment Script. The name length must be from 1 to + 260 characters. The name can only contain alphanumeric, underscore, + parentheses, hyphen and period, and it cannot end with a period. + Changing this forces a new Resource Deployment Script to be + created. + type: string + primaryScriptUri: + description: Uri for the script. This is the entry point for the + external script. Changing this forces a new Resource Deployment + Script to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Resource Deployment Script should exist. Changing this forces + a new Resource Deployment Script to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionInterval: + description: Interval for which the service retains the script + resource after it reaches a terminal state. Resource will be + deleted when this duration expires. The time duration should + be between 1 hour and 26 hours (inclusive) and should be specified + in ISO 8601 format. Changing this forces a new Resource Deployment + Script to be created. + type: string + scriptContent: + description: Script body. Changing this forces a new Resource + Deployment Script to be created. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new Resource Deployment Script to be created. + properties: + name: + description: Specifies the storage account name. + type: string + type: object + supportingScriptUris: + description: Supporting files for the external script. Changing + this forces a new Resource Deployment Script to be created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Deployment Script. + type: object + x-kubernetes-map-type: granular + timeout: + description: Maximum allowed script execution time specified in + ISO 8601 format. Needs to be greater than 0 and smaller than + 1 day. Defaults to P1D. Changing this forces a new Resource + Deployment Script to be created. + type: string + version: + description: Specifies the version of the Azure CLI that should + be used in the format X.Y.Z (e.g. 2.30.0). A canonical list + of versions is available from the Microsoft Container Registry + API. Changing this forces a new Resource Deployment Script to + be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.retentionInterval is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.retentionInterval) + || (has(self.initProvider) && has(self.initProvider.retentionInterval))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: ResourceDeploymentScriptAzureCliStatus defines the observed + state of ResourceDeploymentScriptAzureCli. + properties: + atProvider: + properties: + cleanupPreference: + description: Specifies the cleanup preference when the script + execution gets in a terminal state. Possible values are Always, + OnExpiration, OnSuccess. Defaults to Always. Changing this forces + a new Resource Deployment Script to be created. + type: string + commandLine: + description: Command line arguments to pass to the script. Changing + this forces a new Resource Deployment Script to be created. + type: string + container: + description: A container block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + containerGroupName: + description: Container group name, if not specified then the + name will get auto-generated. For more information, please + refer to the Container Configuration documentation. + type: string + type: object + environmentVariable: + description: An environment_variable block as defined below. Changing + this forces a new Resource Deployment Script to be created. + items: + properties: + name: + description: Specifies the name of the environment variable. + type: string + value: + description: Specifies the value of the environment variable. + type: string + type: object + type: array + forceUpdateTag: + description: Gets or sets how the deployment script should be + forced to execute even if the script resource has not changed. + Can be current time stamp or a GUID. Changing this forces a + new Resource Deployment Script to be created. + type: string + id: + description: The ID of the Resource Deployment Script. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + identityIds: + description: Specifies the list of user-assigned managed identity + IDs associated with the resource. Changing this forces a + new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Type of the managed identity. The only possible + value is UserAssigned. Changing this forces a new resource + to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Resource Deployment + Script should exist. Changing this forces a new Resource Deployment + Script to be created. + type: string + name: + description: Specifies the name which should be used for this + Resource Deployment Script. The name length must be from 1 to + 260 characters. The name can only contain alphanumeric, underscore, + parentheses, hyphen and period, and it cannot end with a period. + Changing this forces a new Resource Deployment Script to be + created. + type: string + outputs: + description: List of script outputs. + type: string + primaryScriptUri: + description: Uri for the script. This is the entry point for the + external script. Changing this forces a new Resource Deployment + Script to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Resource Deployment Script should exist. Changing this forces + a new Resource Deployment Script to be created. + type: string + retentionInterval: + description: Interval for which the service retains the script + resource after it reaches a terminal state. Resource will be + deleted when this duration expires. The time duration should + be between 1 hour and 26 hours (inclusive) and should be specified + in ISO 8601 format. Changing this forces a new Resource Deployment + Script to be created. + type: string + scriptContent: + description: Script body. Changing this forces a new Resource + Deployment Script to be created. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new Resource Deployment Script to be created. + properties: + name: + description: Specifies the storage account name. + type: string + type: object + supportingScriptUris: + description: Supporting files for the external script. Changing + this forces a new Resource Deployment Script to be created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Deployment Script. + type: object + x-kubernetes-map-type: granular + timeout: + description: Maximum allowed script execution time specified in + ISO 8601 format. Needs to be greater than 0 and smaller than + 1 day. Defaults to P1D. Changing this forces a new Resource + Deployment Script to be created. + type: string + version: + description: Specifies the version of the Azure CLI that should + be used in the format X.Y.Z (e.g. 2.30.0). A canonical list + of versions is available from the Microsoft Container Registry + API. Changing this forces a new Resource Deployment Script to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/resources.azure.upbound.io_resourcedeploymentscriptazurepowershells.yaml b/package/crds/resources.azure.upbound.io_resourcedeploymentscriptazurepowershells.yaml index ad265d7a3..28a787b18 100644 --- a/package/crds/resources.azure.upbound.io_resourcedeploymentscriptazurepowershells.yaml +++ b/package/crds/resources.azure.upbound.io_resourcedeploymentscriptazurepowershells.yaml @@ -1091,3 +1091,1058 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ResourceDeploymentScriptAzurePowerShell is the Schema for the + ResourceDeploymentScriptAzurePowerShells API. Manages a Resource Deployment + Script of Azure PowerShell. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ResourceDeploymentScriptAzurePowerShellSpec defines the desired + state of ResourceDeploymentScriptAzurePowerShell + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + cleanupPreference: + description: Specifies the cleanup preference when the script + execution gets in a terminal state. Possible values are Always, + OnExpiration, OnSuccess. Defaults to Always. Changing this forces + a new Resource Deployment Script to be created. + type: string + commandLine: + description: Command line arguments to pass to the script. Changing + this forces a new Resource Deployment Script to be created. + type: string + container: + description: A container block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + containerGroupName: + description: Container group name, if not specified then the + name will get auto-generated. For more information, please + refer to the Container Configuration documentation. + type: string + type: object + environmentVariable: + description: An environment_variable block as defined below. Changing + this forces a new Resource Deployment Script to be created. + items: + properties: + name: + description: Specifies the name of the environment variable. + type: string + secureValueSecretRef: + description: Specifies the value of the secure environment + variable. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + value: + description: Specifies the value of the environment variable. + type: string + type: object + type: array + forceUpdateTag: + description: Gets or sets how the deployment script should be + forced to execute even if the script resource has not changed. + Can be current time stamp or a GUID. Changing this forces a + new Resource Deployment Script to be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + identityIds: + description: Specifies the list of user-assigned managed identity + IDs associated with the resource. Changing this forces a + new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + identityIdsRefs: + description: References to UserAssignedIdentity in managedidentity + to populate identityIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + identityIdsSelector: + description: Selector for a list of UserAssignedIdentity in + managedidentity to populate identityIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: Type of the managed identity. The only possible + value is UserAssigned. Changing this forces a new resource + to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Resource Deployment + Script should exist. Changing this forces a new Resource Deployment + Script to be created. + type: string + name: + description: Specifies the name which should be used for this + Resource Deployment Script. The name length must be from 1 to + 260 characters. The name can only contain alphanumeric, underscore, + parentheses, hyphen and period, and it cannot end with a period. + Changing this forces a new Resource Deployment Script to be + created. + type: string + primaryScriptUri: + description: Uri for the script. This is the entry point for the + external script. Changing this forces a new Resource Deployment + Script to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Resource Deployment Script should exist. Changing this forces + a new Resource Deployment Script to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionInterval: + description: Interval for which the service retains the script + resource after it reaches a terminal state. Resource will be + deleted when this duration expires. The time duration should + be between 1 hour and 26 hours (inclusive) and should be specified + in ISO 8601 format. Changing this forces a new Resource Deployment + Script to be created. + type: string + scriptContent: + description: Script body. Changing this forces a new Resource + Deployment Script to be created. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new Resource Deployment Script to be created. + properties: + keySecretRef: + description: Specifies the storage account access key. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + name: + description: Specifies the storage account name. + type: string + required: + - keySecretRef + type: object + supportingScriptUris: + description: Supporting files for the external script. Changing + this forces a new Resource Deployment Script to be created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Deployment Script. + type: object + x-kubernetes-map-type: granular + timeout: + description: Maximum allowed script execution time specified in + ISO 8601 format. Needs to be greater than 0 and smaller than + 1 day. Defaults to P1D. Changing this forces a new Resource + Deployment Script to be created. + type: string + version: + description: Specifies the version of the Azure PowerShell that + should be used in the format X.Y (e.g. 9.7). A canonical list + of versions is available from the Microsoft Container Registry + API. Changing this forces a new Resource Deployment Script to + be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + cleanupPreference: + description: Specifies the cleanup preference when the script + execution gets in a terminal state. Possible values are Always, + OnExpiration, OnSuccess. Defaults to Always. Changing this forces + a new Resource Deployment Script to be created. + type: string + commandLine: + description: Command line arguments to pass to the script. Changing + this forces a new Resource Deployment Script to be created. + type: string + container: + description: A container block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + containerGroupName: + description: Container group name, if not specified then the + name will get auto-generated. For more information, please + refer to the Container Configuration documentation. + type: string + type: object + environmentVariable: + description: An environment_variable block as defined below. Changing + this forces a new Resource Deployment Script to be created. + items: + properties: + name: + description: Specifies the name of the environment variable. + type: string + value: + description: Specifies the value of the environment variable. + type: string + type: object + type: array + forceUpdateTag: + description: Gets or sets how the deployment script should be + forced to execute even if the script resource has not changed. + Can be current time stamp or a GUID. Changing this forces a + new Resource Deployment Script to be created. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + identityIds: + description: Specifies the list of user-assigned managed identity + IDs associated with the resource. Changing this forces a + new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + identityIdsRefs: + description: References to UserAssignedIdentity in managedidentity + to populate identityIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + identityIdsSelector: + description: Selector for a list of UserAssignedIdentity in + managedidentity to populate identityIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: Type of the managed identity. The only possible + value is UserAssigned. Changing this forces a new resource + to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Resource Deployment + Script should exist. Changing this forces a new Resource Deployment + Script to be created. + type: string + name: + description: Specifies the name which should be used for this + Resource Deployment Script. The name length must be from 1 to + 260 characters. The name can only contain alphanumeric, underscore, + parentheses, hyphen and period, and it cannot end with a period. + Changing this forces a new Resource Deployment Script to be + created. + type: string + primaryScriptUri: + description: Uri for the script. This is the entry point for the + external script. Changing this forces a new Resource Deployment + Script to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Resource Deployment Script should exist. Changing this forces + a new Resource Deployment Script to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + retentionInterval: + description: Interval for which the service retains the script + resource after it reaches a terminal state. Resource will be + deleted when this duration expires. The time duration should + be between 1 hour and 26 hours (inclusive) and should be specified + in ISO 8601 format. Changing this forces a new Resource Deployment + Script to be created. + type: string + scriptContent: + description: Script body. Changing this forces a new Resource + Deployment Script to be created. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new Resource Deployment Script to be created. + properties: + name: + description: Specifies the storage account name. + type: string + type: object + supportingScriptUris: + description: Supporting files for the external script. Changing + this forces a new Resource Deployment Script to be created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Deployment Script. + type: object + x-kubernetes-map-type: granular + timeout: + description: Maximum allowed script execution time specified in + ISO 8601 format. Needs to be greater than 0 and smaller than + 1 day. Defaults to P1D. Changing this forces a new Resource + Deployment Script to be created. + type: string + version: + description: Specifies the version of the Azure PowerShell that + should be used in the format X.Y (e.g. 9.7). A canonical list + of versions is available from the Microsoft Container Registry + API. Changing this forces a new Resource Deployment Script to + be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.retentionInterval is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.retentionInterval) + || (has(self.initProvider) && has(self.initProvider.retentionInterval))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: ResourceDeploymentScriptAzurePowerShellStatus defines the + observed state of ResourceDeploymentScriptAzurePowerShell. + properties: + atProvider: + properties: + cleanupPreference: + description: Specifies the cleanup preference when the script + execution gets in a terminal state. Possible values are Always, + OnExpiration, OnSuccess. Defaults to Always. Changing this forces + a new Resource Deployment Script to be created. + type: string + commandLine: + description: Command line arguments to pass to the script. Changing + this forces a new Resource Deployment Script to be created. + type: string + container: + description: A container block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + containerGroupName: + description: Container group name, if not specified then the + name will get auto-generated. For more information, please + refer to the Container Configuration documentation. + type: string + type: object + environmentVariable: + description: An environment_variable block as defined below. Changing + this forces a new Resource Deployment Script to be created. + items: + properties: + name: + description: Specifies the name of the environment variable. + type: string + value: + description: Specifies the value of the environment variable. + type: string + type: object + type: array + forceUpdateTag: + description: Gets or sets how the deployment script should be + forced to execute even if the script resource has not changed. + Can be current time stamp or a GUID. Changing this forces a + new Resource Deployment Script to be created. + type: string + id: + description: The ID of the Resource Deployment Script. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new Resource Deployment Script to be created. + properties: + identityIds: + description: Specifies the list of user-assigned managed identity + IDs associated with the resource. Changing this forces a + new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Type of the managed identity. The only possible + value is UserAssigned. Changing this forces a new resource + to be created. + type: string + type: object + location: + description: Specifies the Azure Region where the Resource Deployment + Script should exist. Changing this forces a new Resource Deployment + Script to be created. + type: string + name: + description: Specifies the name which should be used for this + Resource Deployment Script. The name length must be from 1 to + 260 characters. The name can only contain alphanumeric, underscore, + parentheses, hyphen and period, and it cannot end with a period. + Changing this forces a new Resource Deployment Script to be + created. + type: string + outputs: + description: List of script outputs. + type: string + primaryScriptUri: + description: Uri for the script. This is the entry point for the + external script. Changing this forces a new Resource Deployment + Script to be created. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + Resource Deployment Script should exist. Changing this forces + a new Resource Deployment Script to be created. + type: string + retentionInterval: + description: Interval for which the service retains the script + resource after it reaches a terminal state. Resource will be + deleted when this duration expires. The time duration should + be between 1 hour and 26 hours (inclusive) and should be specified + in ISO 8601 format. Changing this forces a new Resource Deployment + Script to be created. + type: string + scriptContent: + description: Script body. Changing this forces a new Resource + Deployment Script to be created. + type: string + storageAccount: + description: A storage_account block as defined below. Changing + this forces a new Resource Deployment Script to be created. + properties: + name: + description: Specifies the storage account name. + type: string + type: object + supportingScriptUris: + description: Supporting files for the external script. Changing + this forces a new Resource Deployment Script to be created. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Deployment Script. + type: object + x-kubernetes-map-type: granular + timeout: + description: Maximum allowed script execution time specified in + ISO 8601 format. Needs to be greater than 0 and smaller than + 1 day. Defaults to P1D. Changing this forces a new Resource + Deployment Script to be created. + type: string + version: + description: Specifies the version of the Azure PowerShell that + should be used in the format X.Y (e.g. 9.7). A canonical list + of versions is available from the Microsoft Container Registry + API. Changing this forces a new Resource Deployment Script to + be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/search.azure.upbound.io_services.yaml b/package/crds/search.azure.upbound.io_services.yaml index 6b6e54b21..3c1222ab3 100644 --- a/package/crds/search.azure.upbound.io_services.yaml +++ b/package/crds/search.azure.upbound.io_services.yaml @@ -671,3 +671,650 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Service is the Schema for the Services API. Manages a Search + Service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceSpec defines the desired state of Service + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowedIps: + description: Specifies a list of inbound IPv4 or CIDRs that are + allowed to access the Search Service. If the incoming IP request + is from an IP address which is not included in the allowed_ips + it will be blocked by the Search Services firewall. + items: + type: string + type: array + x-kubernetes-list-type: set + authenticationFailureMode: + description: Specifies the response that the Search Service should + return for requests that fail authentication. Possible values + include http401WithBearerChallenge or http403. + type: string + customerManagedKeyEnforcementEnabled: + description: Specifies whether the Search Service should enforce + that non-customer resources are encrypted. Defaults to false. + type: boolean + hostingMode: + description: Specifies the Hosting Mode, which allows for High + Density partitions (that allow for up to 1000 indexes) should + be supported. Possible values are highDensity or default. Defaults + to default. Changing this forces a new Search Service to be + created. + type: string + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Search Service. The only + possible value is SystemAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Specifies whether the Search Service allows authenticating + using API Keys? Defaults to true. + type: boolean + location: + description: The Azure Region where the Search Service should + exist. Changing this forces a new Search Service to be created. + type: string + partitionCount: + description: Specifies the number of partitions which should be + created. This field cannot be set when using a free or basic + sku (see the Microsoft documentation). Possible values include + 1, 2, 3, 4, 6, or 12. Defaults to 1. + type: number + publicNetworkAccessEnabled: + description: Specifies whether Public Network Access is allowed + for this resource. Defaults to true. + type: boolean + replicaCount: + description: Specifies the number of Replica's which should be + created for this Search Service. This field cannot be set when + using a free sku (see the Microsoft documentation). + type: number + resourceGroupName: + description: The name of the Resource Group where the Search Service + should exist. Changing this forces a new Search Service to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + semanticSearchSku: + description: Specifies the Semantic Search SKU which should be + used for this Search Service. Possible values include free and + standard. + type: string + sku: + description: The SKU which should be used for this Search Service. + Possible values include basic, free, standard, standard2, standard3, + storage_optimized_l1 and storage_optimized_l2. Changing this + forces a new Search Service to be created. + type: string + tags: + additionalProperties: + type: string + description: Specifies a mapping of tags which should be assigned + to this Search Service. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowedIps: + description: Specifies a list of inbound IPv4 or CIDRs that are + allowed to access the Search Service. If the incoming IP request + is from an IP address which is not included in the allowed_ips + it will be blocked by the Search Services firewall. + items: + type: string + type: array + x-kubernetes-list-type: set + authenticationFailureMode: + description: Specifies the response that the Search Service should + return for requests that fail authentication. Possible values + include http401WithBearerChallenge or http403. + type: string + customerManagedKeyEnforcementEnabled: + description: Specifies whether the Search Service should enforce + that non-customer resources are encrypted. Defaults to false. + type: boolean + hostingMode: + description: Specifies the Hosting Mode, which allows for High + Density partitions (that allow for up to 1000 indexes) should + be supported. Possible values are highDensity or default. Defaults + to default. Changing this forces a new Search Service to be + created. + type: string + identity: + description: An identity block as defined below. + properties: + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Search Service. The only + possible value is SystemAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Specifies whether the Search Service allows authenticating + using API Keys? Defaults to true. + type: boolean + location: + description: The Azure Region where the Search Service should + exist. Changing this forces a new Search Service to be created. + type: string + partitionCount: + description: Specifies the number of partitions which should be + created. This field cannot be set when using a free or basic + sku (see the Microsoft documentation). Possible values include + 1, 2, 3, 4, 6, or 12. Defaults to 1. + type: number + publicNetworkAccessEnabled: + description: Specifies whether Public Network Access is allowed + for this resource. Defaults to true. + type: boolean + replicaCount: + description: Specifies the number of Replica's which should be + created for this Search Service. This field cannot be set when + using a free sku (see the Microsoft documentation). + type: number + semanticSearchSku: + description: Specifies the Semantic Search SKU which should be + used for this Search Service. Possible values include free and + standard. + type: string + sku: + description: The SKU which should be used for this Search Service. + Possible values include basic, free, standard, standard2, standard3, + storage_optimized_l1 and storage_optimized_l2. Changing this + forces a new Search Service to be created. + type: string + tags: + additionalProperties: + type: string + description: Specifies a mapping of tags which should be assigned + to this Search Service. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: ServiceStatus defines the observed state of Service. + properties: + atProvider: + properties: + allowedIps: + description: Specifies a list of inbound IPv4 or CIDRs that are + allowed to access the Search Service. If the incoming IP request + is from an IP address which is not included in the allowed_ips + it will be blocked by the Search Services firewall. + items: + type: string + type: array + x-kubernetes-list-type: set + authenticationFailureMode: + description: Specifies the response that the Search Service should + return for requests that fail authentication. Possible values + include http401WithBearerChallenge or http403. + type: string + customerManagedKeyEnforcementEnabled: + description: Specifies whether the Search Service should enforce + that non-customer resources are encrypted. Defaults to false. + type: boolean + hostingMode: + description: Specifies the Hosting Mode, which allows for High + Density partitions (that allow for up to 1000 indexes) should + be supported. Possible values are highDensity or default. Defaults + to default. Changing this forces a new Search Service to be + created. + type: string + id: + description: The ID of the Search Service. + type: string + identity: + description: An identity block as defined below. + properties: + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Search Service. The only + possible value is SystemAssigned. + type: string + type: object + localAuthenticationEnabled: + description: Specifies whether the Search Service allows authenticating + using API Keys? Defaults to true. + type: boolean + location: + description: The Azure Region where the Search Service should + exist. Changing this forces a new Search Service to be created. + type: string + partitionCount: + description: Specifies the number of partitions which should be + created. This field cannot be set when using a free or basic + sku (see the Microsoft documentation). Possible values include + 1, 2, 3, 4, 6, or 12. Defaults to 1. + type: number + publicNetworkAccessEnabled: + description: Specifies whether Public Network Access is allowed + for this resource. Defaults to true. + type: boolean + queryKeys: + description: A query_keys block as defined below. + items: + properties: + key: + description: The value of this Query Key. + type: string + name: + description: The name of this Query Key. + type: string + type: object + type: array + replicaCount: + description: Specifies the number of Replica's which should be + created for this Search Service. This field cannot be set when + using a free sku (see the Microsoft documentation). + type: number + resourceGroupName: + description: The name of the Resource Group where the Search Service + should exist. Changing this forces a new Search Service to be + created. + type: string + semanticSearchSku: + description: Specifies the Semantic Search SKU which should be + used for this Search Service. Possible values include free and + standard. + type: string + sku: + description: The SKU which should be used for this Search Service. + Possible values include basic, free, standard, standard2, standard3, + storage_optimized_l1 and storage_optimized_l2. Changing this + forces a new Search Service to be created. + type: string + tags: + additionalProperties: + type: string + description: Specifies a mapping of tags which should be assigned + to this Search Service. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/security.azure.upbound.io_iotsecuritydevicegroups.yaml b/package/crds/security.azure.upbound.io_iotsecuritydevicegroups.yaml index cf1924d6a..4da95463d 100644 --- a/package/crds/security.azure.upbound.io_iotsecuritydevicegroups.yaml +++ b/package/crds/security.azure.upbound.io_iotsecuritydevicegroups.yaml @@ -686,3 +686,665 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IOTSecurityDeviceGroup is the Schema for the IOTSecurityDeviceGroups + API. Manages a Iot Security Device Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IOTSecurityDeviceGroupSpec defines the desired state of IOTSecurityDeviceGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + allowRule: + description: an allow_rule blocks as defined below. + properties: + connectionFromIpsNotAllowed: + description: Specifies which IP is not allowed to be connected + to in current device group for inbound connection. + items: + type: string + type: array + x-kubernetes-list-type: set + connectionToIpsNotAllowed: + description: Specifies which IP is not allowed to be connected + to in current device group for outbound connection. + items: + type: string + type: array + x-kubernetes-list-type: set + localUsersNotAllowed: + description: Specifies which local user is not allowed to + login in current device group. + items: + type: string + type: array + x-kubernetes-list-type: set + processesNotAllowed: + description: Specifies which process is not allowed to be + executed in current device group. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + iothubId: + description: The ID of the IoT Hub which to link the Security + Device Group to. Changing this forces a new resource to be created. + type: string + iothubIdRef: + description: Reference to a IOTHub in devices to populate iothubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iothubIdSelector: + description: Selector for a IOTHub in devices to populate iothubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Device Security Group. + Changing this forces a new resource to be created. + type: string + rangeRule: + description: One or more range_rule blocks as defined below. + items: + properties: + duration: + description: Specifies the time range. represented in ISO + 8601 duration format. + type: string + max: + description: The maximum threshold in the given time window. + type: number + min: + description: The minimum threshold in the given time window. + type: number + type: + description: The type of supported rule type. Possible Values + are ActiveConnectionsNotInAllowedRange, AmqpC2DMessagesNotInAllowedRange, + MqttC2DMessagesNotInAllowedRange, HttpC2DMessagesNotInAllowedRange, + AmqpC2DRejectedMessagesNotInAllowedRange, MqttC2DRejectedMessagesNotInAllowedRange, + HttpC2DRejectedMessagesNotInAllowedRange, AmqpD2CMessagesNotInAllowedRange, + MqttD2CMessagesNotInAllowedRange, HttpD2CMessagesNotInAllowedRange, + DirectMethodInvokesNotInAllowedRange, FailedLocalLoginsNotInAllowedRange, + FileUploadsNotInAllowedRange, QueuePurgesNotInAllowedRange, + TwinUpdatesNotInAllowedRange and UnauthorizedOperationsNotInAllowedRange. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + allowRule: + description: an allow_rule blocks as defined below. + properties: + connectionFromIpsNotAllowed: + description: Specifies which IP is not allowed to be connected + to in current device group for inbound connection. + items: + type: string + type: array + x-kubernetes-list-type: set + connectionToIpsNotAllowed: + description: Specifies which IP is not allowed to be connected + to in current device group for outbound connection. + items: + type: string + type: array + x-kubernetes-list-type: set + localUsersNotAllowed: + description: Specifies which local user is not allowed to + login in current device group. + items: + type: string + type: array + x-kubernetes-list-type: set + processesNotAllowed: + description: Specifies which process is not allowed to be + executed in current device group. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + iothubId: + description: The ID of the IoT Hub which to link the Security + Device Group to. Changing this forces a new resource to be created. + type: string + iothubIdRef: + description: Reference to a IOTHub in devices to populate iothubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iothubIdSelector: + description: Selector for a IOTHub in devices to populate iothubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: Specifies the name of the Device Security Group. + Changing this forces a new resource to be created. + type: string + rangeRule: + description: One or more range_rule blocks as defined below. + items: + properties: + duration: + description: Specifies the time range. represented in ISO + 8601 duration format. + type: string + max: + description: The maximum threshold in the given time window. + type: number + min: + description: The minimum threshold in the given time window. + type: number + type: + description: The type of supported rule type. Possible Values + are ActiveConnectionsNotInAllowedRange, AmqpC2DMessagesNotInAllowedRange, + MqttC2DMessagesNotInAllowedRange, HttpC2DMessagesNotInAllowedRange, + AmqpC2DRejectedMessagesNotInAllowedRange, MqttC2DRejectedMessagesNotInAllowedRange, + HttpC2DRejectedMessagesNotInAllowedRange, AmqpD2CMessagesNotInAllowedRange, + MqttD2CMessagesNotInAllowedRange, HttpD2CMessagesNotInAllowedRange, + DirectMethodInvokesNotInAllowedRange, FailedLocalLoginsNotInAllowedRange, + FileUploadsNotInAllowedRange, QueuePurgesNotInAllowedRange, + TwinUpdatesNotInAllowedRange and UnauthorizedOperationsNotInAllowedRange. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: IOTSecurityDeviceGroupStatus defines the observed state of + IOTSecurityDeviceGroup. + properties: + atProvider: + properties: + allowRule: + description: an allow_rule blocks as defined below. + properties: + connectionFromIpsNotAllowed: + description: Specifies which IP is not allowed to be connected + to in current device group for inbound connection. + items: + type: string + type: array + x-kubernetes-list-type: set + connectionToIpsNotAllowed: + description: Specifies which IP is not allowed to be connected + to in current device group for outbound connection. + items: + type: string + type: array + x-kubernetes-list-type: set + localUsersNotAllowed: + description: Specifies which local user is not allowed to + login in current device group. + items: + type: string + type: array + x-kubernetes-list-type: set + processesNotAllowed: + description: Specifies which process is not allowed to be + executed in current device group. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + id: + description: The ID of the Iot Security Device Group resource. + type: string + iothubId: + description: The ID of the IoT Hub which to link the Security + Device Group to. Changing this forces a new resource to be created. + type: string + name: + description: Specifies the name of the Device Security Group. + Changing this forces a new resource to be created. + type: string + rangeRule: + description: One or more range_rule blocks as defined below. + items: + properties: + duration: + description: Specifies the time range. represented in ISO + 8601 duration format. + type: string + max: + description: The maximum threshold in the given time window. + type: number + min: + description: The minimum threshold in the given time window. + type: number + type: + description: The type of supported rule type. Possible Values + are ActiveConnectionsNotInAllowedRange, AmqpC2DMessagesNotInAllowedRange, + MqttC2DMessagesNotInAllowedRange, HttpC2DMessagesNotInAllowedRange, + AmqpC2DRejectedMessagesNotInAllowedRange, MqttC2DRejectedMessagesNotInAllowedRange, + HttpC2DRejectedMessagesNotInAllowedRange, AmqpD2CMessagesNotInAllowedRange, + MqttD2CMessagesNotInAllowedRange, HttpD2CMessagesNotInAllowedRange, + DirectMethodInvokesNotInAllowedRange, FailedLocalLoginsNotInAllowedRange, + FileUploadsNotInAllowedRange, QueuePurgesNotInAllowedRange, + TwinUpdatesNotInAllowedRange and UnauthorizedOperationsNotInAllowedRange. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/security.azure.upbound.io_iotsecuritysolutions.yaml b/package/crds/security.azure.upbound.io_iotsecuritysolutions.yaml index 70da90ff1..5f334d0dd 100644 --- a/package/crds/security.azure.upbound.io_iotsecuritysolutions.yaml +++ b/package/crds/security.azure.upbound.io_iotsecuritysolutions.yaml @@ -1027,3 +1027,1006 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: IOTSecuritySolution is the Schema for the IOTSecuritySolutions + API. Manages an iot security solution. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IOTSecuritySolutionSpec defines the desired state of IOTSecuritySolution + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalWorkspace: + description: A additional_workspace block as defined below. + items: + properties: + dataTypes: + description: A list of data types which sent to workspace. + Possible values are Alerts and RawEvents. + items: + type: string + type: array + x-kubernetes-list-type: set + workspaceId: + description: The resource ID of the Log Analytics Workspace. + type: string + type: object + type: array + disabledDataSources: + description: A list of disabled data sources for the Iot Security + Solution. Possible value is TwinData. + items: + type: string + type: array + x-kubernetes-list-type: set + displayName: + description: Specifies the Display Name for this Iot Security + Solution. + type: string + enabled: + description: Is the Iot Security Solution enabled? Defaults to + true. + type: boolean + eventsToExport: + description: A list of data which is to exported to analytic workspace. + Valid values include RawEvents. + items: + type: string + type: array + x-kubernetes-list-type: set + iothubIds: + description: Specifies the IoT Hub resource IDs to which this + Iot Security Solution is applied. + items: + type: string + type: array + x-kubernetes-list-type: set + iothubIdsRefs: + description: References to IOTHub in devices to populate iothubIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + iothubIdsSelector: + description: Selector for a list of IOTHub in devices to populate + iothubIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logAnalyticsWorkspaceId: + description: Specifies the Log Analytics Workspace ID to which + the security data will be sent. + type: string + logUnmaskedIpsEnabled: + description: Should IP addressed be unmasked in the log? Defaults + to false. + type: boolean + name: + description: Specifies the name of the Iot Security Solution. + Changing this forces a new resource to be created. + type: string + queryForResources: + description: An Azure Resource Graph query used to set the resources + monitored. + type: string + querySubscriptionIds: + description: A list of subscription Ids on which the user defined + resources query should be executed. + items: + type: string + type: array + x-kubernetes-list-type: set + recommendationsEnabled: + description: A recommendations_enabled block of options to enable + or disable as defined below. + properties: + acrAuthentication: + description: Is Principal Authentication enabled for the ACR + repository? Defaults to true. + type: boolean + agentSendUnutilizedMsg: + description: Is Agent send underutilized messages enabled? + Defaults to true. + type: boolean + baseline: + description: Is Security related system configuration issues + identified? Defaults to true. + type: boolean + edgeHubMemOptimize: + description: Is IoT Edge Hub memory optimized? Defaults to + true. + type: boolean + edgeLoggingOption: + description: Is logging configured for IoT Edge module? Defaults + to true. + type: boolean + inconsistentModuleSettings: + description: Is inconsistent module settings enabled for SecurityGroup? + Defaults to true. + type: boolean + installAgent: + description: is Azure IoT Security agent installed? Defaults + to true. + type: boolean + ipFilterDenyAll: + description: Is Default IP filter policy denied? Defaults + to true. + type: boolean + ipFilterPermissiveRule: + description: Is IP filter rule source allowable IP range too + large? Defaults to true. + type: boolean + openPorts: + description: Is any ports open on the device? Defaults to + true. + type: boolean + permissiveFirewallPolicy: + description: Does firewall policy exist which allow necessary + communication to/from the device? Defaults to true. + type: boolean + permissiveInputFirewallRules: + description: Is only necessary addresses or ports are permitted + in? Defaults to true. + type: boolean + permissiveOutputFirewallRules: + description: Is only necessary addresses or ports are permitted + out? Defaults to true. + type: boolean + privilegedDockerOptions: + description: Is high level permissions are needed for the + module? Defaults to true. + type: boolean + sharedCredentials: + description: Is any credentials shared among devices? Defaults + to true. + type: boolean + vulnerableTlsCipherSuite: + description: Does TLS cipher suite need to be updated? Defaults + to true. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the resource group in which + to create the Iot Security Solution. Changing this forces a + new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalWorkspace: + description: A additional_workspace block as defined below. + items: + properties: + dataTypes: + description: A list of data types which sent to workspace. + Possible values are Alerts and RawEvents. + items: + type: string + type: array + x-kubernetes-list-type: set + workspaceId: + description: The resource ID of the Log Analytics Workspace. + type: string + type: object + type: array + disabledDataSources: + description: A list of disabled data sources for the Iot Security + Solution. Possible value is TwinData. + items: + type: string + type: array + x-kubernetes-list-type: set + displayName: + description: Specifies the Display Name for this Iot Security + Solution. + type: string + enabled: + description: Is the Iot Security Solution enabled? Defaults to + true. + type: boolean + eventsToExport: + description: A list of data which is to exported to analytic workspace. + Valid values include RawEvents. + items: + type: string + type: array + x-kubernetes-list-type: set + iothubIds: + description: Specifies the IoT Hub resource IDs to which this + Iot Security Solution is applied. + items: + type: string + type: array + x-kubernetes-list-type: set + iothubIdsRefs: + description: References to IOTHub in devices to populate iothubIds. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + iothubIdsSelector: + description: Selector for a list of IOTHub in devices to populate + iothubIds. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logAnalyticsWorkspaceId: + description: Specifies the Log Analytics Workspace ID to which + the security data will be sent. + type: string + logUnmaskedIpsEnabled: + description: Should IP addressed be unmasked in the log? Defaults + to false. + type: boolean + name: + description: Specifies the name of the Iot Security Solution. + Changing this forces a new resource to be created. + type: string + queryForResources: + description: An Azure Resource Graph query used to set the resources + monitored. + type: string + querySubscriptionIds: + description: A list of subscription Ids on which the user defined + resources query should be executed. + items: + type: string + type: array + x-kubernetes-list-type: set + recommendationsEnabled: + description: A recommendations_enabled block of options to enable + or disable as defined below. + properties: + acrAuthentication: + description: Is Principal Authentication enabled for the ACR + repository? Defaults to true. + type: boolean + agentSendUnutilizedMsg: + description: Is Agent send underutilized messages enabled? + Defaults to true. + type: boolean + baseline: + description: Is Security related system configuration issues + identified? Defaults to true. + type: boolean + edgeHubMemOptimize: + description: Is IoT Edge Hub memory optimized? Defaults to + true. + type: boolean + edgeLoggingOption: + description: Is logging configured for IoT Edge module? Defaults + to true. + type: boolean + inconsistentModuleSettings: + description: Is inconsistent module settings enabled for SecurityGroup? + Defaults to true. + type: boolean + installAgent: + description: is Azure IoT Security agent installed? Defaults + to true. + type: boolean + ipFilterDenyAll: + description: Is Default IP filter policy denied? Defaults + to true. + type: boolean + ipFilterPermissiveRule: + description: Is IP filter rule source allowable IP range too + large? Defaults to true. + type: boolean + openPorts: + description: Is any ports open on the device? Defaults to + true. + type: boolean + permissiveFirewallPolicy: + description: Does firewall policy exist which allow necessary + communication to/from the device? Defaults to true. + type: boolean + permissiveInputFirewallRules: + description: Is only necessary addresses or ports are permitted + in? Defaults to true. + type: boolean + permissiveOutputFirewallRules: + description: Is only necessary addresses or ports are permitted + out? Defaults to true. + type: boolean + privilegedDockerOptions: + description: Is high level permissions are needed for the + module? Defaults to true. + type: boolean + sharedCredentials: + description: Is any credentials shared among devices? Defaults + to true. + type: boolean + vulnerableTlsCipherSuite: + description: Does TLS cipher suite need to be updated? Defaults + to true. + type: boolean + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.displayName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.displayName) + || (has(self.initProvider) && has(self.initProvider.displayName))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: IOTSecuritySolutionStatus defines the observed state of IOTSecuritySolution. + properties: + atProvider: + properties: + additionalWorkspace: + description: A additional_workspace block as defined below. + items: + properties: + dataTypes: + description: A list of data types which sent to workspace. + Possible values are Alerts and RawEvents. + items: + type: string + type: array + x-kubernetes-list-type: set + workspaceId: + description: The resource ID of the Log Analytics Workspace. + type: string + type: object + type: array + disabledDataSources: + description: A list of disabled data sources for the Iot Security + Solution. Possible value is TwinData. + items: + type: string + type: array + x-kubernetes-list-type: set + displayName: + description: Specifies the Display Name for this Iot Security + Solution. + type: string + enabled: + description: Is the Iot Security Solution enabled? Defaults to + true. + type: boolean + eventsToExport: + description: A list of data which is to exported to analytic workspace. + Valid values include RawEvents. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Iot Security Solution resource. + type: string + iothubIds: + description: Specifies the IoT Hub resource IDs to which this + Iot Security Solution is applied. + items: + type: string + type: array + x-kubernetes-list-type: set + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + logAnalyticsWorkspaceId: + description: Specifies the Log Analytics Workspace ID to which + the security data will be sent. + type: string + logUnmaskedIpsEnabled: + description: Should IP addressed be unmasked in the log? Defaults + to false. + type: boolean + name: + description: Specifies the name of the Iot Security Solution. + Changing this forces a new resource to be created. + type: string + queryForResources: + description: An Azure Resource Graph query used to set the resources + monitored. + type: string + querySubscriptionIds: + description: A list of subscription Ids on which the user defined + resources query should be executed. + items: + type: string + type: array + x-kubernetes-list-type: set + recommendationsEnabled: + description: A recommendations_enabled block of options to enable + or disable as defined below. + properties: + acrAuthentication: + description: Is Principal Authentication enabled for the ACR + repository? Defaults to true. + type: boolean + agentSendUnutilizedMsg: + description: Is Agent send underutilized messages enabled? + Defaults to true. + type: boolean + baseline: + description: Is Security related system configuration issues + identified? Defaults to true. + type: boolean + edgeHubMemOptimize: + description: Is IoT Edge Hub memory optimized? Defaults to + true. + type: boolean + edgeLoggingOption: + description: Is logging configured for IoT Edge module? Defaults + to true. + type: boolean + inconsistentModuleSettings: + description: Is inconsistent module settings enabled for SecurityGroup? + Defaults to true. + type: boolean + installAgent: + description: is Azure IoT Security agent installed? Defaults + to true. + type: boolean + ipFilterDenyAll: + description: Is Default IP filter policy denied? Defaults + to true. + type: boolean + ipFilterPermissiveRule: + description: Is IP filter rule source allowable IP range too + large? Defaults to true. + type: boolean + openPorts: + description: Is any ports open on the device? Defaults to + true. + type: boolean + permissiveFirewallPolicy: + description: Does firewall policy exist which allow necessary + communication to/from the device? Defaults to true. + type: boolean + permissiveInputFirewallRules: + description: Is only necessary addresses or ports are permitted + in? Defaults to true. + type: boolean + permissiveOutputFirewallRules: + description: Is only necessary addresses or ports are permitted + out? Defaults to true. + type: boolean + privilegedDockerOptions: + description: Is high level permissions are needed for the + module? Defaults to true. + type: boolean + sharedCredentials: + description: Is any credentials shared among devices? Defaults + to true. + type: boolean + vulnerableTlsCipherSuite: + description: Does TLS cipher suite need to be updated? Defaults + to true. + type: boolean + type: object + resourceGroupName: + description: Specifies the name of the resource group in which + to create the Iot Security Solution. Changing this forces a + new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/security.azure.upbound.io_securitycenterassessments.yaml b/package/crds/security.azure.upbound.io_securitycenterassessments.yaml index d546789a0..ae09207f1 100644 --- a/package/crds/security.azure.upbound.io_securitycenterassessments.yaml +++ b/package/crds/security.azure.upbound.io_securitycenterassessments.yaml @@ -732,3 +732,711 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SecurityCenterAssessment is the Schema for the SecurityCenterAssessments + API. Manages the Security Center Assessment for Azure Security Center. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecurityCenterAssessmentSpec defines the desired state of + SecurityCenterAssessment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalData: + additionalProperties: + type: string + description: A map of additional data to associate with the assessment. + type: object + x-kubernetes-map-type: granular + assessmentPolicyId: + description: The ID of the security Assessment policy to apply + to this resource. Changing this forces a new security Assessment + to be created. + type: string + assessmentPolicyIdRef: + description: Reference to a SecurityCenterAssessmentPolicy in + security to populate assessmentPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + assessmentPolicyIdSelector: + description: Selector for a SecurityCenterAssessmentPolicy in + security to populate assessmentPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + status: + description: A status block as defined below. + properties: + cause: + description: Specifies the cause of the assessment status. + type: string + code: + description: Specifies the programmatic code of the assessment + status. Possible values are Healthy, Unhealthy and NotApplicable. + type: string + description: + description: Specifies the human readable description of the + assessment status. + type: string + type: object + targetResourceId: + description: The ID of the target resource. Changing this forces + a new security Assessment to be created. + type: string + targetResourceIdRef: + description: Reference to a LinuxVirtualMachineScaleSet in compute + to populate targetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetResourceIdSelector: + description: Selector for a LinuxVirtualMachineScaleSet in compute + to populate targetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalData: + additionalProperties: + type: string + description: A map of additional data to associate with the assessment. + type: object + x-kubernetes-map-type: granular + assessmentPolicyId: + description: The ID of the security Assessment policy to apply + to this resource. Changing this forces a new security Assessment + to be created. + type: string + assessmentPolicyIdRef: + description: Reference to a SecurityCenterAssessmentPolicy in + security to populate assessmentPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + assessmentPolicyIdSelector: + description: Selector for a SecurityCenterAssessmentPolicy in + security to populate assessmentPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + status: + description: A status block as defined below. + properties: + cause: + description: Specifies the cause of the assessment status. + type: string + code: + description: Specifies the programmatic code of the assessment + status. Possible values are Healthy, Unhealthy and NotApplicable. + type: string + description: + description: Specifies the human readable description of the + assessment status. + type: string + type: object + targetResourceId: + description: The ID of the target resource. Changing this forces + a new security Assessment to be created. + type: string + targetResourceIdRef: + description: Reference to a LinuxVirtualMachineScaleSet in compute + to populate targetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetResourceIdSelector: + description: Selector for a LinuxVirtualMachineScaleSet in compute + to populate targetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.status is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.status) + || (has(self.initProvider) && has(self.initProvider.status))' + status: + description: SecurityCenterAssessmentStatus defines the observed state + of SecurityCenterAssessment. + properties: + atProvider: + properties: + additionalData: + additionalProperties: + type: string + description: A map of additional data to associate with the assessment. + type: object + x-kubernetes-map-type: granular + assessmentPolicyId: + description: The ID of the security Assessment policy to apply + to this resource. Changing this forces a new security Assessment + to be created. + type: string + id: + description: The ID of the Security Center Assessment. + type: string + status: + description: A status block as defined below. + properties: + cause: + description: Specifies the cause of the assessment status. + type: string + code: + description: Specifies the programmatic code of the assessment + status. Possible values are Healthy, Unhealthy and NotApplicable. + type: string + description: + description: Specifies the human readable description of the + assessment status. + type: string + type: object + targetResourceId: + description: The ID of the target resource. Changing this forces + a new security Assessment to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicebus.azure.upbound.io_servicebusnamespaces.yaml b/package/crds/servicebus.azure.upbound.io_servicebusnamespaces.yaml index 6ad458986..5af5e2024 100644 --- a/package/crds/servicebus.azure.upbound.io_servicebusnamespaces.yaml +++ b/package/crds/servicebus.azure.upbound.io_servicebusnamespaces.yaml @@ -959,3 +959,922 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ServiceBusNamespace is the Schema for the ServiceBusNamespaces + API. Manages a ServiceBus Namespace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceBusNamespaceSpec defines the desired state of ServiceBusNamespace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + capacity: + description: Specifies the capacity. When sku is Premium, capacity + can be 1, 2, 4, 8 or 16. When sku is Basic or Standard, capacity + can be 0 only. + type: number + customerManagedKey: + description: An customer_managed_key block as defined below. + properties: + identityId: + description: The ID of the User Assigned Identity that has + access to the key. + type: string + infrastructureEncryptionEnabled: + description: Used to specify whether enable Infrastructure + Encryption (Double Encryption). Changing this forces a new + resource to be created. + type: boolean + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to Encrypt the data in this ServiceBus Namespace. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this ServiceBus namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this ServiceBus Namespace. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + localAuthEnabled: + description: Whether or not SAS authentication is enabled for + the Service Bus namespace. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimumTlsVersion: + description: 'The minimum supported TLS version for this Service + Bus Namespace. Valid values are: 1.0, 1.1 and 1.2. The current + default minimum TLS version is 1.2.' + type: string + networkRuleSet: + description: An network_rule_set block as defined below. + properties: + defaultAction: + description: Specifies the default action for the Network + Rule Set. Possible values are Allow and Deny. Defaults to + Allow. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the ServiceBus Namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + networkRules: + description: One or more network_rules blocks as defined below. + items: + properties: + ignoreMissingVnetServiceEndpoint: + description: Should the ServiceBus Namespace Network + Rule Set ignore missing Virtual Network Service Endpoint + option in the Subnet? Defaults to false. + type: boolean + subnetId: + description: The Subnet ID which should be able to access + this ServiceBus Namespace. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + publicNetworkAccessEnabled: + description: Whether to allow traffic over public network. + Possible values are true and false. Defaults to true. + type: boolean + trustedServicesAllowed: + description: Are Azure Services that are known and trusted + for this resource type are allowed to bypass firewall configuration? + See Trusted Microsoft Services + type: boolean + type: object + premiumMessagingPartitions: + description: Specifies the number messaging partitions. Only valid + when sku is Premium and the minimum number is 1. Possible values + include 0, 1, 2, and 4. Defaults to 0 for Standard, Basic namespace. + Changing this forces a new resource to be created. + type: number + publicNetworkAccessEnabled: + description: Is public network access enabled for the Service + Bus Namespace? Defaults to true. + type: boolean + resourceGroupName: + description: |- + The name of the resource group in which to Changing this forces a new resource to be created. + create the namespace. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: Defines which tier to use. Options are Basic, Standard + or Premium. Please note that setting this field to Premium will + force the creation of a new resource. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Whether or not this resource is zone redundant. sku + needs to be Premium. Changing this forces a new resource to + be created. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + capacity: + description: Specifies the capacity. When sku is Premium, capacity + can be 1, 2, 4, 8 or 16. When sku is Basic or Standard, capacity + can be 0 only. + type: number + customerManagedKey: + description: An customer_managed_key block as defined below. + properties: + identityId: + description: The ID of the User Assigned Identity that has + access to the key. + type: string + infrastructureEncryptionEnabled: + description: Used to specify whether enable Infrastructure + Encryption (Double Encryption). Changing this forces a new + resource to be created. + type: boolean + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to Encrypt the data in this ServiceBus Namespace. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this ServiceBus namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this ServiceBus Namespace. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + localAuthEnabled: + description: Whether or not SAS authentication is enabled for + the Service Bus namespace. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimumTlsVersion: + description: 'The minimum supported TLS version for this Service + Bus Namespace. Valid values are: 1.0, 1.1 and 1.2. The current + default minimum TLS version is 1.2.' + type: string + networkRuleSet: + description: An network_rule_set block as defined below. + properties: + defaultAction: + description: Specifies the default action for the Network + Rule Set. Possible values are Allow and Deny. Defaults to + Allow. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the ServiceBus Namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + networkRules: + description: One or more network_rules blocks as defined below. + items: + properties: + ignoreMissingVnetServiceEndpoint: + description: Should the ServiceBus Namespace Network + Rule Set ignore missing Virtual Network Service Endpoint + option in the Subnet? Defaults to false. + type: boolean + subnetId: + description: The Subnet ID which should be able to access + this ServiceBus Namespace. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate + subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate + subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + publicNetworkAccessEnabled: + description: Whether to allow traffic over public network. + Possible values are true and false. Defaults to true. + type: boolean + trustedServicesAllowed: + description: Are Azure Services that are known and trusted + for this resource type are allowed to bypass firewall configuration? + See Trusted Microsoft Services + type: boolean + type: object + premiumMessagingPartitions: + description: Specifies the number messaging partitions. Only valid + when sku is Premium and the minimum number is 1. Possible values + include 0, 1, 2, and 4. Defaults to 0 for Standard, Basic namespace. + Changing this forces a new resource to be created. + type: number + publicNetworkAccessEnabled: + description: Is public network access enabled for the Service + Bus Namespace? Defaults to true. + type: boolean + sku: + description: Defines which tier to use. Options are Basic, Standard + or Premium. Please note that setting this field to Premium will + force the creation of a new resource. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Whether or not this resource is zone redundant. sku + needs to be Premium. Changing this forces a new resource to + be created. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: ServiceBusNamespaceStatus defines the observed state of ServiceBusNamespace. + properties: + atProvider: + properties: + capacity: + description: Specifies the capacity. When sku is Premium, capacity + can be 1, 2, 4, 8 or 16. When sku is Basic or Standard, capacity + can be 0 only. + type: number + customerManagedKey: + description: An customer_managed_key block as defined below. + properties: + identityId: + description: The ID of the User Assigned Identity that has + access to the key. + type: string + infrastructureEncryptionEnabled: + description: Used to specify whether enable Infrastructure + Encryption (Double Encryption). Changing this forces a new + resource to be created. + type: boolean + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to Encrypt the data in this ServiceBus Namespace. + type: string + type: object + endpoint: + description: The URL to access the ServiceBus Namespace. + type: string + id: + description: The ServiceBus Namespace ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this ServiceBus namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this ServiceBus Namespace. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this ServiceBus Namespace. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this ServiceBus Namespace. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + localAuthEnabled: + description: Whether or not SAS authentication is enabled for + the Service Bus namespace. Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimumTlsVersion: + description: 'The minimum supported TLS version for this Service + Bus Namespace. Valid values are: 1.0, 1.1 and 1.2. The current + default minimum TLS version is 1.2.' + type: string + networkRuleSet: + description: An network_rule_set block as defined below. + properties: + defaultAction: + description: Specifies the default action for the Network + Rule Set. Possible values are Allow and Deny. Defaults to + Allow. + type: string + ipRules: + description: One or more IP Addresses, or CIDR Blocks which + should be able to access the ServiceBus Namespace. + items: + type: string + type: array + x-kubernetes-list-type: set + networkRules: + description: One or more network_rules blocks as defined below. + items: + properties: + ignoreMissingVnetServiceEndpoint: + description: Should the ServiceBus Namespace Network + Rule Set ignore missing Virtual Network Service Endpoint + option in the Subnet? Defaults to false. + type: boolean + subnetId: + description: The Subnet ID which should be able to access + this ServiceBus Namespace. + type: string + type: object + type: array + publicNetworkAccessEnabled: + description: Whether to allow traffic over public network. + Possible values are true and false. Defaults to true. + type: boolean + trustedServicesAllowed: + description: Are Azure Services that are known and trusted + for this resource type are allowed to bypass firewall configuration? + See Trusted Microsoft Services + type: boolean + type: object + premiumMessagingPartitions: + description: Specifies the number messaging partitions. Only valid + when sku is Premium and the minimum number is 1. Possible values + include 0, 1, 2, and 4. Defaults to 0 for Standard, Basic namespace. + Changing this forces a new resource to be created. + type: number + publicNetworkAccessEnabled: + description: Is public network access enabled for the Service + Bus Namespace? Defaults to true. + type: boolean + resourceGroupName: + description: |- + The name of the resource group in which to Changing this forces a new resource to be created. + create the namespace. + type: string + sku: + description: Defines which tier to use. Options are Basic, Standard + or Premium. Please note that setting this field to Premium will + force the creation of a new resource. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Whether or not this resource is zone redundant. sku + needs to be Premium. Changing this forces a new resource to + be created. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicebus.azure.upbound.io_subscriptionrules.yaml b/package/crds/servicebus.azure.upbound.io_subscriptionrules.yaml index 671a5c7ef..f87ea4de1 100644 --- a/package/crds/servicebus.azure.upbound.io_subscriptionrules.yaml +++ b/package/crds/servicebus.azure.upbound.io_subscriptionrules.yaml @@ -574,3 +574,553 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SubscriptionRule is the Schema for the SubscriptionRules API. + Manages a ServiceBus Subscription Rule. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionRuleSpec defines the desired state of SubscriptionRule + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + action: + description: Represents set of actions written in SQL language-based + syntax that is performed against a BrokeredMessage. + type: string + correlationFilter: + description: A correlation_filter block as documented below to + be evaluated against a BrokeredMessage. Required when filter_type + is set to CorrelationFilter. + properties: + contentType: + description: Content type of the message. + type: string + correlationId: + description: Identifier of the correlation. + type: string + label: + description: Application specific label. + type: string + messageId: + description: Identifier of the message. + type: string + properties: + additionalProperties: + type: string + description: A list of user defined properties to be included + in the filter. Specified as a map of name/value pairs. + type: object + x-kubernetes-map-type: granular + replyTo: + description: Address of the queue to reply to. + type: string + replyToSessionId: + description: Session identifier to reply to. + type: string + sessionId: + description: Session identifier. + type: string + to: + description: Address to send to. + type: string + type: object + filterType: + description: Type of filter to be applied to a BrokeredMessage. + Possible values are SqlFilter and CorrelationFilter. + type: string + sqlFilter: + description: Represents a filter written in SQL language-based + syntax that to be evaluated against a BrokeredMessage. Required + when filter_type is set to SqlFilter. + type: string + subscriptionId: + description: The ID of the ServiceBus Subscription in which this + Rule should be created. Changing this forces a new resource + to be created. + type: string + subscriptionIdRef: + description: Reference to a Subscription in servicebus to populate + subscriptionId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subscriptionIdSelector: + description: Selector for a Subscription in servicebus to populate + subscriptionId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + action: + description: Represents set of actions written in SQL language-based + syntax that is performed against a BrokeredMessage. + type: string + correlationFilter: + description: A correlation_filter block as documented below to + be evaluated against a BrokeredMessage. Required when filter_type + is set to CorrelationFilter. + properties: + contentType: + description: Content type of the message. + type: string + correlationId: + description: Identifier of the correlation. + type: string + label: + description: Application specific label. + type: string + messageId: + description: Identifier of the message. + type: string + properties: + additionalProperties: + type: string + description: A list of user defined properties to be included + in the filter. Specified as a map of name/value pairs. + type: object + x-kubernetes-map-type: granular + replyTo: + description: Address of the queue to reply to. + type: string + replyToSessionId: + description: Session identifier to reply to. + type: string + sessionId: + description: Session identifier. + type: string + to: + description: Address to send to. + type: string + type: object + filterType: + description: Type of filter to be applied to a BrokeredMessage. + Possible values are SqlFilter and CorrelationFilter. + type: string + sqlFilter: + description: Represents a filter written in SQL language-based + syntax that to be evaluated against a BrokeredMessage. Required + when filter_type is set to SqlFilter. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.filterType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.filterType) + || (has(self.initProvider) && has(self.initProvider.filterType))' + status: + description: SubscriptionRuleStatus defines the observed state of SubscriptionRule. + properties: + atProvider: + properties: + action: + description: Represents set of actions written in SQL language-based + syntax that is performed against a BrokeredMessage. + type: string + correlationFilter: + description: A correlation_filter block as documented below to + be evaluated against a BrokeredMessage. Required when filter_type + is set to CorrelationFilter. + properties: + contentType: + description: Content type of the message. + type: string + correlationId: + description: Identifier of the correlation. + type: string + label: + description: Application specific label. + type: string + messageId: + description: Identifier of the message. + type: string + properties: + additionalProperties: + type: string + description: A list of user defined properties to be included + in the filter. Specified as a map of name/value pairs. + type: object + x-kubernetes-map-type: granular + replyTo: + description: Address of the queue to reply to. + type: string + replyToSessionId: + description: Session identifier to reply to. + type: string + sessionId: + description: Session identifier. + type: string + to: + description: Address to send to. + type: string + type: object + filterType: + description: Type of filter to be applied to a BrokeredMessage. + Possible values are SqlFilter and CorrelationFilter. + type: string + id: + description: The ServiceBus Subscription Rule ID. + type: string + sqlFilter: + description: Represents a filter written in SQL language-based + syntax that to be evaluated against a BrokeredMessage. Required + when filter_type is set to SqlFilter. + type: string + sqlFilterCompatibilityLevel: + type: number + subscriptionId: + description: The ID of the ServiceBus Subscription in which this + Rule should be created. Changing this forces a new resource + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicebus.azure.upbound.io_subscriptions.yaml b/package/crds/servicebus.azure.upbound.io_subscriptions.yaml index a13a40d63..b5fe771eb 100644 --- a/package/crds/servicebus.azure.upbound.io_subscriptions.yaml +++ b/package/crds/servicebus.azure.upbound.io_subscriptions.yaml @@ -627,3 +627,606 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the Subscriptions API. Manages + a ServiceBus Subscription. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoDeleteOnIdle: + description: The idle interval after which the topic is automatically + deleted as an ISO 8601 duration. The minimum duration is 5 minutes + or PT5M. + type: string + clientScopedSubscription: + description: A client_scoped_subscription block as defined below. + properties: + clientId: + description: Specifies the Client ID of the application that + created the client-scoped subscription. Changing this forces + a new resource to be created. + type: string + isClientScopedSubscriptionShareable: + description: Whether the client scoped subscription is shareable. + Defaults to true Changing this forces a new resource to + be created. + type: boolean + type: object + clientScopedSubscriptionEnabled: + description: whether the subscription is scoped to a client id. + Defaults to false. + type: boolean + deadLetteringOnFilterEvaluationError: + description: Boolean flag which controls whether the Subscription + has dead letter support on filter evaluation exceptions. Defaults + to true. + type: boolean + deadLetteringOnMessageExpiration: + description: Boolean flag which controls whether the Subscription + has dead letter support when a message expires. + type: boolean + defaultMessageTtl: + description: The Default message timespan to live as an ISO 8601 + duration. This is the duration after which the message expires, + starting from when the message is sent to Service Bus. This + is the default value used when TimeToLive is not set on a message + itself. + type: string + enableBatchedOperations: + description: Boolean flag which controls whether the Subscription + supports batched operations. + type: boolean + forwardDeadLetteredMessagesTo: + description: The name of a Queue or Topic to automatically forward + Dead Letter messages to. + type: string + forwardTo: + description: The name of a Queue or Topic to automatically forward + messages to. + type: string + lockDuration: + description: The lock duration for the subscription as an ISO + 8601 duration. The default value is 1 minute or P0DT0H1M0S . + The maximum value is 5 minutes or P0DT0H5M0S . + type: string + maxDeliveryCount: + description: The maximum number of deliveries. + type: number + requiresSession: + description: Boolean flag which controls whether this Subscription + supports the concept of a session. Changing this forces a new + resource to be created. + type: boolean + status: + description: The status of the Subscription. Possible values are + Active,ReceiveDisabled, or Disabled. Defaults to Active. + type: string + topicId: + description: The ID of the ServiceBus Topic to create this Subscription + in. Changing this forces a new resource to be created. + type: string + topicIdRef: + description: Reference to a Topic in servicebus to populate topicId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + topicIdSelector: + description: Selector for a Topic in servicebus to populate topicId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoDeleteOnIdle: + description: The idle interval after which the topic is automatically + deleted as an ISO 8601 duration. The minimum duration is 5 minutes + or PT5M. + type: string + clientScopedSubscription: + description: A client_scoped_subscription block as defined below. + properties: + clientId: + description: Specifies the Client ID of the application that + created the client-scoped subscription. Changing this forces + a new resource to be created. + type: string + isClientScopedSubscriptionShareable: + description: Whether the client scoped subscription is shareable. + Defaults to true Changing this forces a new resource to + be created. + type: boolean + type: object + clientScopedSubscriptionEnabled: + description: whether the subscription is scoped to a client id. + Defaults to false. + type: boolean + deadLetteringOnFilterEvaluationError: + description: Boolean flag which controls whether the Subscription + has dead letter support on filter evaluation exceptions. Defaults + to true. + type: boolean + deadLetteringOnMessageExpiration: + description: Boolean flag which controls whether the Subscription + has dead letter support when a message expires. + type: boolean + defaultMessageTtl: + description: The Default message timespan to live as an ISO 8601 + duration. This is the duration after which the message expires, + starting from when the message is sent to Service Bus. This + is the default value used when TimeToLive is not set on a message + itself. + type: string + enableBatchedOperations: + description: Boolean flag which controls whether the Subscription + supports batched operations. + type: boolean + forwardDeadLetteredMessagesTo: + description: The name of a Queue or Topic to automatically forward + Dead Letter messages to. + type: string + forwardTo: + description: The name of a Queue or Topic to automatically forward + messages to. + type: string + lockDuration: + description: The lock duration for the subscription as an ISO + 8601 duration. The default value is 1 minute or P0DT0H1M0S . + The maximum value is 5 minutes or P0DT0H5M0S . + type: string + maxDeliveryCount: + description: The maximum number of deliveries. + type: number + requiresSession: + description: Boolean flag which controls whether this Subscription + supports the concept of a session. Changing this forces a new + resource to be created. + type: boolean + status: + description: The status of the Subscription. Possible values are + Active,ReceiveDisabled, or Disabled. Defaults to Active. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.maxDeliveryCount is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.maxDeliveryCount) + || (has(self.initProvider) && has(self.initProvider.maxDeliveryCount))' + status: + description: SubscriptionStatus defines the observed state of Subscription. + properties: + atProvider: + properties: + autoDeleteOnIdle: + description: The idle interval after which the topic is automatically + deleted as an ISO 8601 duration. The minimum duration is 5 minutes + or PT5M. + type: string + clientScopedSubscription: + description: A client_scoped_subscription block as defined below. + properties: + clientId: + description: Specifies the Client ID of the application that + created the client-scoped subscription. Changing this forces + a new resource to be created. + type: string + isClientScopedSubscriptionDurable: + description: Whether the client scoped subscription is durable. + This property can only be controlled from the application + side. + type: boolean + isClientScopedSubscriptionShareable: + description: Whether the client scoped subscription is shareable. + Defaults to true Changing this forces a new resource to + be created. + type: boolean + type: object + clientScopedSubscriptionEnabled: + description: whether the subscription is scoped to a client id. + Defaults to false. + type: boolean + deadLetteringOnFilterEvaluationError: + description: Boolean flag which controls whether the Subscription + has dead letter support on filter evaluation exceptions. Defaults + to true. + type: boolean + deadLetteringOnMessageExpiration: + description: Boolean flag which controls whether the Subscription + has dead letter support when a message expires. + type: boolean + defaultMessageTtl: + description: The Default message timespan to live as an ISO 8601 + duration. This is the duration after which the message expires, + starting from when the message is sent to Service Bus. This + is the default value used when TimeToLive is not set on a message + itself. + type: string + enableBatchedOperations: + description: Boolean flag which controls whether the Subscription + supports batched operations. + type: boolean + forwardDeadLetteredMessagesTo: + description: The name of a Queue or Topic to automatically forward + Dead Letter messages to. + type: string + forwardTo: + description: The name of a Queue or Topic to automatically forward + messages to. + type: string + id: + description: The ServiceBus Subscription ID. + type: string + lockDuration: + description: The lock duration for the subscription as an ISO + 8601 duration. The default value is 1 minute or P0DT0H1M0S . + The maximum value is 5 minutes or P0DT0H5M0S . + type: string + maxDeliveryCount: + description: The maximum number of deliveries. + type: number + requiresSession: + description: Boolean flag which controls whether this Subscription + supports the concept of a session. Changing this forces a new + resource to be created. + type: boolean + status: + description: The status of the Subscription. Possible values are + Active,ReceiveDisabled, or Disabled. Defaults to Active. + type: string + topicId: + description: The ID of the ServiceBus Topic to create this Subscription + in. Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicefabric.azure.upbound.io_clusters.yaml b/package/crds/servicefabric.azure.upbound.io_clusters.yaml index b77a9b8e9..d7cbfe10f 100644 --- a/package/crds/servicefabric.azure.upbound.io_clusters.yaml +++ b/package/crds/servicefabric.azure.upbound.io_clusters.yaml @@ -1630,3 +1630,1546 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Manages a Service + Fabric Cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + addOnFeatures: + description: A List of one or more features which should be enabled, + such as DnsService. + items: + type: string + type: array + x-kubernetes-list-type: set + azureActiveDirectory: + description: An azure_active_directory block as defined below. + properties: + clientApplicationId: + description: The Azure Active Directory Client ID which should + be used for the Client Application. + type: string + clusterApplicationId: + description: The Azure Active Directory Cluster Application + ID. + type: string + tenantId: + description: The Azure Active Directory Tenant ID. + type: string + type: object + certificate: + description: A certificate block as defined below. Conflicts with + certificate_common_names. + properties: + thumbprint: + description: The Thumbprint of the Certificate. + type: string + thumbprintSecondary: + description: The Secondary Thumbprint of the Certificate. + type: string + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + certificateCommonNames: + description: A certificate_common_names block as defined below. + Conflicts with certificate. + properties: + commonNames: + description: A common_names block as defined below. + items: + properties: + certificateCommonName: + description: The common or subject name of the certificate. + type: string + certificateIssuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + clientCertificateCommonName: + description: A client_certificate_common_name block as defined + below. + items: + properties: + commonName: + description: The common or subject name of the certificate. + type: string + isAdmin: + description: Does the Client Certificate have Admin Access + to the cluster? Non-admin clients can only perform read + only operations on the cluster. + type: boolean + issuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + clientCertificateThumbprint: + description: One or more client_certificate_thumbprint blocks + as defined below. + items: + properties: + isAdmin: + description: Does the Client Certificate have Admin Access + to the cluster? Non-admin clients can only perform read + only operations on the cluster. + type: boolean + thumbprint: + description: The Thumbprint associated with the Client Certificate. + type: string + type: object + type: array + clusterCodeVersion: + description: Required if Upgrade Mode set to Manual, Specifies + the Version of the Cluster Code of the cluster. + type: string + diagnosticsConfig: + description: A diagnostics_config block as defined below. + properties: + blobEndpoint: + description: The Blob Endpoint of the Storage Account. + type: string + protectedAccountKeyName: + description: The protected diagnostics storage key name, such + as StorageAccountKey1. + type: string + queueEndpoint: + description: The Queue Endpoint of the Storage Account. + type: string + storageAccountName: + description: The name of the Storage Account where the Diagnostics + should be sent to. + type: string + tableEndpoint: + description: The Table Endpoint of the Storage Account. + type: string + type: object + fabricSettings: + description: One or more fabric_settings blocks as defined below. + items: + properties: + name: + description: The name of the Fabric Setting, such as Security + or Federation. + type: string + parameters: + additionalProperties: + type: string + description: A map containing settings for the specified + Fabric Setting. + type: object + x-kubernetes-map-type: granular + type: object + type: array + location: + description: Specifies the Azure Region where the Service Fabric + Cluster should exist. Changing this forces a new resource to + be created. + type: string + managementEndpoint: + description: Specifies the Management Endpoint of the cluster + such as http://example.com. Changing this forces a new resource + to be created. + type: string + nodeType: + description: One or more node_type blocks as defined below. + items: + properties: + applicationPorts: + description: A application_ports block as defined below. + properties: + endPort: + description: The end of the Ephemeral Port Range on + this Node Type. + type: number + startPort: + description: The start of the Ephemeral Port Range on + this Node Type. + type: number + type: object + capacities: + additionalProperties: + type: string + description: The capacity tags applied to the nodes in the + node type, the cluster resource manager uses these tags + to understand how much resource a node has. + type: object + x-kubernetes-map-type: granular + clientEndpointPort: + description: The Port used for the Client Endpoint for this + Node Type. + type: number + durabilityLevel: + description: The Durability Level for this Node Type. Possible + values include Bronze, Gold and Silver. Defaults to Bronze. + type: string + ephemeralPorts: + description: A ephemeral_ports block as defined below. + properties: + endPort: + description: The end of the Ephemeral Port Range on + this Node Type. + type: number + startPort: + description: The start of the Ephemeral Port Range on + this Node Type. + type: number + type: object + httpEndpointPort: + description: The Port used for the HTTP Endpoint for this + Node Type. + type: number + instanceCount: + description: The number of nodes for this Node Type. + type: number + isPrimary: + description: Is this the Primary Node Type? + type: boolean + isStateless: + description: Should this node type run only stateless services? + type: boolean + multipleAvailabilityZones: + description: Does this node type span availability zones? + type: boolean + name: + description: The name of the Node Type. + type: string + placementProperties: + additionalProperties: + type: string + description: The placement tags applied to nodes in the + node type, which can be used to indicate where certain + services (workload) should run. + type: object + x-kubernetes-map-type: granular + reverseProxyEndpointPort: + description: The Port used for the Reverse Proxy Endpoint + for this Node Type. Changing this will upgrade the cluster. + type: number + type: object + type: array + reliabilityLevel: + description: Specifies the Reliability Level of the Cluster. Possible + values include None, Bronze, Silver, Gold and Platinum. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Service + Fabric Cluster exists. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + reverseProxyCertificate: + description: A reverse_proxy_certificate block as defined below. + Conflicts with reverse_proxy_certificate_common_names. + properties: + thumbprint: + description: The Thumbprint of the Certificate. + type: string + thumbprintSecondary: + description: The Secondary Thumbprint of the Certificate. + type: string + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + reverseProxyCertificateCommonNames: + description: A reverse_proxy_certificate_common_names block as + defined below. Conflicts with reverse_proxy_certificate. + properties: + commonNames: + description: A common_names block as defined below. + items: + properties: + certificateCommonName: + description: The common or subject name of the certificate. + type: string + certificateIssuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + serviceFabricZonalUpgradeMode: + description: Specifies the logical grouping of VMs in upgrade + domains. Possible values are Hierarchical or Parallel. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + upgradeMode: + description: Specifies the Upgrade Mode of the cluster. Possible + values are Automatic or Manual. + type: string + upgradePolicy: + description: A upgrade_policy block as defined below. + properties: + deltaHealthPolicy: + description: A delta_health_policy block as defined below + properties: + maxDeltaUnhealthyApplicationsPercent: + description: Specifies the maximum tolerated percentage + of delta unhealthy applications that can have aggregated + health states of error. If the current unhealthy applications + do not respect the percentage relative to the state + at the beginning of the upgrade, the cluster is unhealthy. + Defaults to 0. + type: number + maxDeltaUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of delta unhealthy nodes that can have aggregated health + states of error. If the current unhealthy nodes do not + respect the percentage relative to the state at the + beginning of the upgrade, the cluster is unhealthy. + Defaults to 0. + type: number + maxUpgradeDomainDeltaUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of upgrade domain delta unhealthy nodes that can have + aggregated health state of error. If there is any upgrade + domain where the current unhealthy nodes do not respect + the percentage relative to the state at the beginning + of the upgrade, the cluster is unhealthy. Defaults to + 0. + type: number + type: object + forceRestartEnabled: + description: Indicates whether to restart the Service Fabric + node even if only dynamic configurations have changed. + type: boolean + healthCheckRetryTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, after which Service Fabric retries the health check + if the previous health check fails. Defaults to 00:45:00. + type: string + healthCheckStableDuration: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits in order to verify that + the cluster is stable before it continues to the next upgrade + domain or completes the upgrade. This wait duration prevents + undetected changes of health right after the health check + is performed. Defaults to 00:01:00. + type: string + healthCheckWaitDuration: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits before it performs the + initial health check after it finishes the upgrade on the + upgrade domain. Defaults to 00:00:30. + type: string + healthPolicy: + description: A health_policy block as defined below + properties: + maxUnhealthyApplicationsPercent: + description: Specifies the maximum tolerated percentage + of applications that can have aggregated health state + of error. If the upgrade exceeds this percentage, the + cluster is unhealthy. Defaults to 0. + type: number + maxUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of nodes that can have aggregated health states of error. + If an upgrade exceeds this percentage, the cluster is + unhealthy. Defaults to 0. + type: number + type: object + upgradeDomainTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric takes to upgrade a single upgrade + domain. After this period, the upgrade fails. Defaults to + 02:00:00. + type: string + upgradeReplicaSetCheckTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits for a replica set to reconfigure + into a safe state, if it is not already in a safe state, + before Service Fabric proceeds with the upgrade. Defaults + to 10675199.02:48:05.4775807. + type: string + upgradeTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric takes for the entire upgrade. + After this period, the upgrade fails. Defaults to 12:00:00. + type: string + type: object + vmImage: + description: Specifies the Image expected for the Service Fabric + Cluster, such as Windows. Changing this forces a new resource + to be created. + type: string + vmssZonalUpgradeMode: + description: Specifies the upgrade mode for the virtual machine + scale set updates that happen in all availability zones at once. + Possible values are Hierarchical or Parallel. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + addOnFeatures: + description: A List of one or more features which should be enabled, + such as DnsService. + items: + type: string + type: array + x-kubernetes-list-type: set + azureActiveDirectory: + description: An azure_active_directory block as defined below. + properties: + clientApplicationId: + description: The Azure Active Directory Client ID which should + be used for the Client Application. + type: string + clusterApplicationId: + description: The Azure Active Directory Cluster Application + ID. + type: string + tenantId: + description: The Azure Active Directory Tenant ID. + type: string + type: object + certificate: + description: A certificate block as defined below. Conflicts with + certificate_common_names. + properties: + thumbprint: + description: The Thumbprint of the Certificate. + type: string + thumbprintSecondary: + description: The Secondary Thumbprint of the Certificate. + type: string + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + certificateCommonNames: + description: A certificate_common_names block as defined below. + Conflicts with certificate. + properties: + commonNames: + description: A common_names block as defined below. + items: + properties: + certificateCommonName: + description: The common or subject name of the certificate. + type: string + certificateIssuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + clientCertificateCommonName: + description: A client_certificate_common_name block as defined + below. + items: + properties: + commonName: + description: The common or subject name of the certificate. + type: string + isAdmin: + description: Does the Client Certificate have Admin Access + to the cluster? Non-admin clients can only perform read + only operations on the cluster. + type: boolean + issuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + clientCertificateThumbprint: + description: One or more client_certificate_thumbprint blocks + as defined below. + items: + properties: + isAdmin: + description: Does the Client Certificate have Admin Access + to the cluster? Non-admin clients can only perform read + only operations on the cluster. + type: boolean + thumbprint: + description: The Thumbprint associated with the Client Certificate. + type: string + type: object + type: array + clusterCodeVersion: + description: Required if Upgrade Mode set to Manual, Specifies + the Version of the Cluster Code of the cluster. + type: string + diagnosticsConfig: + description: A diagnostics_config block as defined below. + properties: + blobEndpoint: + description: The Blob Endpoint of the Storage Account. + type: string + protectedAccountKeyName: + description: The protected diagnostics storage key name, such + as StorageAccountKey1. + type: string + queueEndpoint: + description: The Queue Endpoint of the Storage Account. + type: string + storageAccountName: + description: The name of the Storage Account where the Diagnostics + should be sent to. + type: string + tableEndpoint: + description: The Table Endpoint of the Storage Account. + type: string + type: object + fabricSettings: + description: One or more fabric_settings blocks as defined below. + items: + properties: + name: + description: The name of the Fabric Setting, such as Security + or Federation. + type: string + parameters: + additionalProperties: + type: string + description: A map containing settings for the specified + Fabric Setting. + type: object + x-kubernetes-map-type: granular + type: object + type: array + location: + description: Specifies the Azure Region where the Service Fabric + Cluster should exist. Changing this forces a new resource to + be created. + type: string + managementEndpoint: + description: Specifies the Management Endpoint of the cluster + such as http://example.com. Changing this forces a new resource + to be created. + type: string + nodeType: + description: One or more node_type blocks as defined below. + items: + properties: + applicationPorts: + description: A application_ports block as defined below. + properties: + endPort: + description: The end of the Ephemeral Port Range on + this Node Type. + type: number + startPort: + description: The start of the Ephemeral Port Range on + this Node Type. + type: number + type: object + capacities: + additionalProperties: + type: string + description: The capacity tags applied to the nodes in the + node type, the cluster resource manager uses these tags + to understand how much resource a node has. + type: object + x-kubernetes-map-type: granular + clientEndpointPort: + description: The Port used for the Client Endpoint for this + Node Type. + type: number + durabilityLevel: + description: The Durability Level for this Node Type. Possible + values include Bronze, Gold and Silver. Defaults to Bronze. + type: string + ephemeralPorts: + description: A ephemeral_ports block as defined below. + properties: + endPort: + description: The end of the Ephemeral Port Range on + this Node Type. + type: number + startPort: + description: The start of the Ephemeral Port Range on + this Node Type. + type: number + type: object + httpEndpointPort: + description: The Port used for the HTTP Endpoint for this + Node Type. + type: number + instanceCount: + description: The number of nodes for this Node Type. + type: number + isPrimary: + description: Is this the Primary Node Type? + type: boolean + isStateless: + description: Should this node type run only stateless services? + type: boolean + multipleAvailabilityZones: + description: Does this node type span availability zones? + type: boolean + name: + description: The name of the Node Type. + type: string + placementProperties: + additionalProperties: + type: string + description: The placement tags applied to nodes in the + node type, which can be used to indicate where certain + services (workload) should run. + type: object + x-kubernetes-map-type: granular + reverseProxyEndpointPort: + description: The Port used for the Reverse Proxy Endpoint + for this Node Type. Changing this will upgrade the cluster. + type: number + type: object + type: array + reliabilityLevel: + description: Specifies the Reliability Level of the Cluster. Possible + values include None, Bronze, Silver, Gold and Platinum. + type: string + reverseProxyCertificate: + description: A reverse_proxy_certificate block as defined below. + Conflicts with reverse_proxy_certificate_common_names. + properties: + thumbprint: + description: The Thumbprint of the Certificate. + type: string + thumbprintSecondary: + description: The Secondary Thumbprint of the Certificate. + type: string + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + reverseProxyCertificateCommonNames: + description: A reverse_proxy_certificate_common_names block as + defined below. Conflicts with reverse_proxy_certificate. + properties: + commonNames: + description: A common_names block as defined below. + items: + properties: + certificateCommonName: + description: The common or subject name of the certificate. + type: string + certificateIssuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + serviceFabricZonalUpgradeMode: + description: Specifies the logical grouping of VMs in upgrade + domains. Possible values are Hierarchical or Parallel. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + upgradeMode: + description: Specifies the Upgrade Mode of the cluster. Possible + values are Automatic or Manual. + type: string + upgradePolicy: + description: A upgrade_policy block as defined below. + properties: + deltaHealthPolicy: + description: A delta_health_policy block as defined below + properties: + maxDeltaUnhealthyApplicationsPercent: + description: Specifies the maximum tolerated percentage + of delta unhealthy applications that can have aggregated + health states of error. If the current unhealthy applications + do not respect the percentage relative to the state + at the beginning of the upgrade, the cluster is unhealthy. + Defaults to 0. + type: number + maxDeltaUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of delta unhealthy nodes that can have aggregated health + states of error. If the current unhealthy nodes do not + respect the percentage relative to the state at the + beginning of the upgrade, the cluster is unhealthy. + Defaults to 0. + type: number + maxUpgradeDomainDeltaUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of upgrade domain delta unhealthy nodes that can have + aggregated health state of error. If there is any upgrade + domain where the current unhealthy nodes do not respect + the percentage relative to the state at the beginning + of the upgrade, the cluster is unhealthy. Defaults to + 0. + type: number + type: object + forceRestartEnabled: + description: Indicates whether to restart the Service Fabric + node even if only dynamic configurations have changed. + type: boolean + healthCheckRetryTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, after which Service Fabric retries the health check + if the previous health check fails. Defaults to 00:45:00. + type: string + healthCheckStableDuration: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits in order to verify that + the cluster is stable before it continues to the next upgrade + domain or completes the upgrade. This wait duration prevents + undetected changes of health right after the health check + is performed. Defaults to 00:01:00. + type: string + healthCheckWaitDuration: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits before it performs the + initial health check after it finishes the upgrade on the + upgrade domain. Defaults to 00:00:30. + type: string + healthPolicy: + description: A health_policy block as defined below + properties: + maxUnhealthyApplicationsPercent: + description: Specifies the maximum tolerated percentage + of applications that can have aggregated health state + of error. If the upgrade exceeds this percentage, the + cluster is unhealthy. Defaults to 0. + type: number + maxUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of nodes that can have aggregated health states of error. + If an upgrade exceeds this percentage, the cluster is + unhealthy. Defaults to 0. + type: number + type: object + upgradeDomainTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric takes to upgrade a single upgrade + domain. After this period, the upgrade fails. Defaults to + 02:00:00. + type: string + upgradeReplicaSetCheckTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits for a replica set to reconfigure + into a safe state, if it is not already in a safe state, + before Service Fabric proceeds with the upgrade. Defaults + to 10675199.02:48:05.4775807. + type: string + upgradeTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric takes for the entire upgrade. + After this period, the upgrade fails. Defaults to 12:00:00. + type: string + type: object + vmImage: + description: Specifies the Image expected for the Service Fabric + Cluster, such as Windows. Changing this forces a new resource + to be created. + type: string + vmssZonalUpgradeMode: + description: Specifies the upgrade mode for the virtual machine + scale set updates that happen in all availability zones at once. + Possible values are Hierarchical or Parallel. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.managementEndpoint is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.managementEndpoint) + || (has(self.initProvider) && has(self.initProvider.managementEndpoint))' + - message: spec.forProvider.nodeType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nodeType) + || (has(self.initProvider) && has(self.initProvider.nodeType))' + - message: spec.forProvider.reliabilityLevel is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.reliabilityLevel) + || (has(self.initProvider) && has(self.initProvider.reliabilityLevel))' + - message: spec.forProvider.upgradeMode is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.upgradeMode) + || (has(self.initProvider) && has(self.initProvider.upgradeMode))' + - message: spec.forProvider.vmImage is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vmImage) + || (has(self.initProvider) && has(self.initProvider.vmImage))' + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + addOnFeatures: + description: A List of one or more features which should be enabled, + such as DnsService. + items: + type: string + type: array + x-kubernetes-list-type: set + azureActiveDirectory: + description: An azure_active_directory block as defined below. + properties: + clientApplicationId: + description: The Azure Active Directory Client ID which should + be used for the Client Application. + type: string + clusterApplicationId: + description: The Azure Active Directory Cluster Application + ID. + type: string + tenantId: + description: The Azure Active Directory Tenant ID. + type: string + type: object + certificate: + description: A certificate block as defined below. Conflicts with + certificate_common_names. + properties: + thumbprint: + description: The Thumbprint of the Certificate. + type: string + thumbprintSecondary: + description: The Secondary Thumbprint of the Certificate. + type: string + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + certificateCommonNames: + description: A certificate_common_names block as defined below. + Conflicts with certificate. + properties: + commonNames: + description: A common_names block as defined below. + items: + properties: + certificateCommonName: + description: The common or subject name of the certificate. + type: string + certificateIssuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + clientCertificateCommonName: + description: A client_certificate_common_name block as defined + below. + items: + properties: + commonName: + description: The common or subject name of the certificate. + type: string + isAdmin: + description: Does the Client Certificate have Admin Access + to the cluster? Non-admin clients can only perform read + only operations on the cluster. + type: boolean + issuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + clientCertificateThumbprint: + description: One or more client_certificate_thumbprint blocks + as defined below. + items: + properties: + isAdmin: + description: Does the Client Certificate have Admin Access + to the cluster? Non-admin clients can only perform read + only operations on the cluster. + type: boolean + thumbprint: + description: The Thumbprint associated with the Client Certificate. + type: string + type: object + type: array + clusterCodeVersion: + description: Required if Upgrade Mode set to Manual, Specifies + the Version of the Cluster Code of the cluster. + type: string + clusterEndpoint: + description: The Cluster Endpoint for this Service Fabric Cluster. + type: string + diagnosticsConfig: + description: A diagnostics_config block as defined below. + properties: + blobEndpoint: + description: The Blob Endpoint of the Storage Account. + type: string + protectedAccountKeyName: + description: The protected diagnostics storage key name, such + as StorageAccountKey1. + type: string + queueEndpoint: + description: The Queue Endpoint of the Storage Account. + type: string + storageAccountName: + description: The name of the Storage Account where the Diagnostics + should be sent to. + type: string + tableEndpoint: + description: The Table Endpoint of the Storage Account. + type: string + type: object + fabricSettings: + description: One or more fabric_settings blocks as defined below. + items: + properties: + name: + description: The name of the Fabric Setting, such as Security + or Federation. + type: string + parameters: + additionalProperties: + type: string + description: A map containing settings for the specified + Fabric Setting. + type: object + x-kubernetes-map-type: granular + type: object + type: array + id: + description: The ID of the Service Fabric Cluster. + type: string + location: + description: Specifies the Azure Region where the Service Fabric + Cluster should exist. Changing this forces a new resource to + be created. + type: string + managementEndpoint: + description: Specifies the Management Endpoint of the cluster + such as http://example.com. Changing this forces a new resource + to be created. + type: string + nodeType: + description: One or more node_type blocks as defined below. + items: + properties: + applicationPorts: + description: A application_ports block as defined below. + properties: + endPort: + description: The end of the Ephemeral Port Range on + this Node Type. + type: number + startPort: + description: The start of the Ephemeral Port Range on + this Node Type. + type: number + type: object + capacities: + additionalProperties: + type: string + description: The capacity tags applied to the nodes in the + node type, the cluster resource manager uses these tags + to understand how much resource a node has. + type: object + x-kubernetes-map-type: granular + clientEndpointPort: + description: The Port used for the Client Endpoint for this + Node Type. + type: number + durabilityLevel: + description: The Durability Level for this Node Type. Possible + values include Bronze, Gold and Silver. Defaults to Bronze. + type: string + ephemeralPorts: + description: A ephemeral_ports block as defined below. + properties: + endPort: + description: The end of the Ephemeral Port Range on + this Node Type. + type: number + startPort: + description: The start of the Ephemeral Port Range on + this Node Type. + type: number + type: object + httpEndpointPort: + description: The Port used for the HTTP Endpoint for this + Node Type. + type: number + instanceCount: + description: The number of nodes for this Node Type. + type: number + isPrimary: + description: Is this the Primary Node Type? + type: boolean + isStateless: + description: Should this node type run only stateless services? + type: boolean + multipleAvailabilityZones: + description: Does this node type span availability zones? + type: boolean + name: + description: The name of the Node Type. + type: string + placementProperties: + additionalProperties: + type: string + description: The placement tags applied to nodes in the + node type, which can be used to indicate where certain + services (workload) should run. + type: object + x-kubernetes-map-type: granular + reverseProxyEndpointPort: + description: The Port used for the Reverse Proxy Endpoint + for this Node Type. Changing this will upgrade the cluster. + type: number + type: object + type: array + reliabilityLevel: + description: Specifies the Reliability Level of the Cluster. Possible + values include None, Bronze, Silver, Gold and Platinum. + type: string + resourceGroupName: + description: The name of the Resource Group in which the Service + Fabric Cluster exists. Changing this forces a new resource to + be created. + type: string + reverseProxyCertificate: + description: A reverse_proxy_certificate block as defined below. + Conflicts with reverse_proxy_certificate_common_names. + properties: + thumbprint: + description: The Thumbprint of the Certificate. + type: string + thumbprintSecondary: + description: The Secondary Thumbprint of the Certificate. + type: string + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + reverseProxyCertificateCommonNames: + description: A reverse_proxy_certificate_common_names block as + defined below. Conflicts with reverse_proxy_certificate. + properties: + commonNames: + description: A common_names block as defined below. + items: + properties: + certificateCommonName: + description: The common or subject name of the certificate. + type: string + certificateIssuerThumbprint: + description: The Issuer Thumbprint of the Certificate. + type: string + type: object + type: array + x509StoreName: + description: The X509 Store where the Certificate Exists, + such as My. + type: string + type: object + serviceFabricZonalUpgradeMode: + description: Specifies the logical grouping of VMs in upgrade + domains. Possible values are Hierarchical or Parallel. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + upgradeMode: + description: Specifies the Upgrade Mode of the cluster. Possible + values are Automatic or Manual. + type: string + upgradePolicy: + description: A upgrade_policy block as defined below. + properties: + deltaHealthPolicy: + description: A delta_health_policy block as defined below + properties: + maxDeltaUnhealthyApplicationsPercent: + description: Specifies the maximum tolerated percentage + of delta unhealthy applications that can have aggregated + health states of error. If the current unhealthy applications + do not respect the percentage relative to the state + at the beginning of the upgrade, the cluster is unhealthy. + Defaults to 0. + type: number + maxDeltaUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of delta unhealthy nodes that can have aggregated health + states of error. If the current unhealthy nodes do not + respect the percentage relative to the state at the + beginning of the upgrade, the cluster is unhealthy. + Defaults to 0. + type: number + maxUpgradeDomainDeltaUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of upgrade domain delta unhealthy nodes that can have + aggregated health state of error. If there is any upgrade + domain where the current unhealthy nodes do not respect + the percentage relative to the state at the beginning + of the upgrade, the cluster is unhealthy. Defaults to + 0. + type: number + type: object + forceRestartEnabled: + description: Indicates whether to restart the Service Fabric + node even if only dynamic configurations have changed. + type: boolean + healthCheckRetryTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, after which Service Fabric retries the health check + if the previous health check fails. Defaults to 00:45:00. + type: string + healthCheckStableDuration: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits in order to verify that + the cluster is stable before it continues to the next upgrade + domain or completes the upgrade. This wait duration prevents + undetected changes of health right after the health check + is performed. Defaults to 00:01:00. + type: string + healthCheckWaitDuration: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits before it performs the + initial health check after it finishes the upgrade on the + upgrade domain. Defaults to 00:00:30. + type: string + healthPolicy: + description: A health_policy block as defined below + properties: + maxUnhealthyApplicationsPercent: + description: Specifies the maximum tolerated percentage + of applications that can have aggregated health state + of error. If the upgrade exceeds this percentage, the + cluster is unhealthy. Defaults to 0. + type: number + maxUnhealthyNodesPercent: + description: Specifies the maximum tolerated percentage + of nodes that can have aggregated health states of error. + If an upgrade exceeds this percentage, the cluster is + unhealthy. Defaults to 0. + type: number + type: object + upgradeDomainTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric takes to upgrade a single upgrade + domain. After this period, the upgrade fails. Defaults to + 02:00:00. + type: string + upgradeReplicaSetCheckTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric waits for a replica set to reconfigure + into a safe state, if it is not already in a safe state, + before Service Fabric proceeds with the upgrade. Defaults + to 10675199.02:48:05.4775807. + type: string + upgradeTimeout: + description: Specifies the duration, in "hh:mm:ss" string + format, that Service Fabric takes for the entire upgrade. + After this period, the upgrade fails. Defaults to 12:00:00. + type: string + type: object + vmImage: + description: Specifies the Image expected for the Service Fabric + Cluster, such as Windows. Changing this forces a new resource + to be created. + type: string + vmssZonalUpgradeMode: + description: Specifies the upgrade mode for the virtual machine + scale set updates that happen in all availability zones at once. + Possible values are Hierarchical or Parallel. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicefabric.azure.upbound.io_managedclusters.yaml b/package/crds/servicefabric.azure.upbound.io_managedclusters.yaml index 3c712832b..352d29b59 100644 --- a/package/crds/servicefabric.azure.upbound.io_managedclusters.yaml +++ b/package/crds/servicefabric.azure.upbound.io_managedclusters.yaml @@ -1121,3 +1121,1094 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ManagedCluster is the Schema for the ManagedClusters API. Manages + a Resource Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ManagedClusterSpec defines the desired state of ManagedCluster + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authentication: + description: Controls how connections to the cluster are authenticated. + A authentication block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined above. + properties: + clientApplicationId: + description: The ID of the Client Application. + type: string + clusterApplicationId: + description: The ID of the Cluster Application. + type: string + tenantId: + description: The ID of the Tenant. + type: string + type: object + certificate: + description: One or more certificate blocks as defined below. + items: + properties: + commonName: + description: The certificate's CN. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: + description: The type of the certificate. Can be AdminClient + or ReadOnlyClient. + type: string + type: object + type: array + type: object + backupServiceEnabled: + description: If true, backup service is enabled. + type: boolean + clientConnectionPort: + description: Port to use when connecting to the cluster. + type: number + customFabricSetting: + description: One or more custom_fabric_setting blocks as defined + below. + items: + properties: + parameter: + description: Parameter name. + type: string + section: + description: Section name. + type: string + value: + description: Parameter value. + type: string + type: object + type: array + dnsName: + description: Hostname for the cluster. If unset the cluster's + name will be used.. + type: string + dnsServiceEnabled: + description: If true, DNS service is enabled. + type: boolean + httpGatewayPort: + description: Port that should be used by the Service Fabric Explorer + to visualize applications and cluster status. + type: number + lbRule: + description: One or more lb_rule blocks as defined below. + items: + properties: + backendPort: + description: LB Backend port. + type: number + frontendPort: + description: LB Frontend port. + type: number + probeProtocol: + description: Protocol for the probe. Can be one of tcp, + udp, http, or https. + type: string + probeRequestPath: + description: Path for the probe to check, when probe protocol + is set to http. + type: string + protocol: + description: The transport protocol used in this rule. Can + be one of tcp or udp. + type: string + type: object + type: array + location: + description: The Azure Region where the Resource Group should + exist. Changing this forces a new Resource Group to be created. + type: string + nodeType: + description: One or more node_type blocks as defined below. + items: + properties: + applicationPortRange: + description: Sets the port range available for applications. + Format is -, for example 10000-20000. + type: string + capacities: + additionalProperties: + type: string + description: Specifies a list of key/value pairs used to + set capacity tags for this node type. + type: object + x-kubernetes-map-type: granular + dataDiskSizeGb: + description: The size of the data disk in gigabytes.. + type: number + dataDiskType: + description: The type of the disk to use for storing data. + It can be one of Premium_LRS, Standard_LRS, or StandardSSD_LRS. + Defaults to Standard_LRS. + type: string + ephemeralPortRange: + description: Sets the port range available for the OS. Format + is -, for example 10000-20000. There + has to be at least 255 ports available and cannot overlap + with application_port_range.. + type: string + multiplePlacementGroupsEnabled: + description: If set the node type can be composed of multiple + placement groups. + type: boolean + name: + description: The name which should be used for this node + type. + type: string + placementProperties: + additionalProperties: + type: string + description: Specifies a list of placement tags that can + be used to indicate where services should run.. + type: object + x-kubernetes-map-type: granular + primary: + description: If set to true, system services will run on + this node type. Only one node type should be marked as + primary. Primary node type cannot be deleted or changed + once they're created. + type: boolean + stateless: + description: If set to true, only stateless workloads can + run on this node type. + type: boolean + vmImageOffer: + description: The offer type of the marketplace image cluster + VMs will use. + type: string + vmImagePublisher: + description: The publisher of the marketplace image cluster + VMs will use. + type: string + vmImageSku: + description: The SKU of the marketplace image cluster VMs + will use. + type: string + vmImageVersion: + description: The version of the marketplace image cluster + VMs will use. + type: string + vmInstanceCount: + description: The number of instances this node type will + launch. + type: number + vmSecrets: + description: One or more vm_secrets blocks as defined below. + items: + properties: + certificates: + description: One or more certificates blocks as defined + above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine to which the certificate should be + added. + type: string + url: + description: The URL of a certificate that has + been uploaded to Key Vault as a secret + type: string + type: object + type: array + vaultId: + description: The ID of the Vault that contain the + certificates. + type: string + type: object + type: array + vmSize: + description: The size of the instances in this node type. + type: string + type: object + type: array + passwordSecretRef: + description: Administrator password for the VMs that will be created + as part of this cluster. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + resourceGroupName: + description: The name of the Resource Group where the Resource + Group should exist. Changing this forces a new Resource Group + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: SKU for this cluster. Changing this forces a new + resource to be created. Default is Basic, allowed values are + either Basic or Standard. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Group. + type: object + x-kubernetes-map-type: granular + upgradeWave: + description: Upgrade wave for the fabric runtime. Default is Wave0, + allowed value must be one of Wave0, Wave1, or Wave2. + type: string + username: + description: Administrator password for the VMs that will be created + as part of this cluster. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authentication: + description: Controls how connections to the cluster are authenticated. + A authentication block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined above. + properties: + clientApplicationId: + description: The ID of the Client Application. + type: string + clusterApplicationId: + description: The ID of the Cluster Application. + type: string + tenantId: + description: The ID of the Tenant. + type: string + type: object + certificate: + description: One or more certificate blocks as defined below. + items: + properties: + commonName: + description: The certificate's CN. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: + description: The type of the certificate. Can be AdminClient + or ReadOnlyClient. + type: string + type: object + type: array + type: object + backupServiceEnabled: + description: If true, backup service is enabled. + type: boolean + clientConnectionPort: + description: Port to use when connecting to the cluster. + type: number + customFabricSetting: + description: One or more custom_fabric_setting blocks as defined + below. + items: + properties: + parameter: + description: Parameter name. + type: string + section: + description: Section name. + type: string + value: + description: Parameter value. + type: string + type: object + type: array + dnsName: + description: Hostname for the cluster. If unset the cluster's + name will be used.. + type: string + dnsServiceEnabled: + description: If true, DNS service is enabled. + type: boolean + httpGatewayPort: + description: Port that should be used by the Service Fabric Explorer + to visualize applications and cluster status. + type: number + lbRule: + description: One or more lb_rule blocks as defined below. + items: + properties: + backendPort: + description: LB Backend port. + type: number + frontendPort: + description: LB Frontend port. + type: number + probeProtocol: + description: Protocol for the probe. Can be one of tcp, + udp, http, or https. + type: string + probeRequestPath: + description: Path for the probe to check, when probe protocol + is set to http. + type: string + protocol: + description: The transport protocol used in this rule. Can + be one of tcp or udp. + type: string + type: object + type: array + location: + description: The Azure Region where the Resource Group should + exist. Changing this forces a new Resource Group to be created. + type: string + nodeType: + description: One or more node_type blocks as defined below. + items: + properties: + applicationPortRange: + description: Sets the port range available for applications. + Format is -, for example 10000-20000. + type: string + capacities: + additionalProperties: + type: string + description: Specifies a list of key/value pairs used to + set capacity tags for this node type. + type: object + x-kubernetes-map-type: granular + dataDiskSizeGb: + description: The size of the data disk in gigabytes.. + type: number + dataDiskType: + description: The type of the disk to use for storing data. + It can be one of Premium_LRS, Standard_LRS, or StandardSSD_LRS. + Defaults to Standard_LRS. + type: string + ephemeralPortRange: + description: Sets the port range available for the OS. Format + is -, for example 10000-20000. There + has to be at least 255 ports available and cannot overlap + with application_port_range.. + type: string + multiplePlacementGroupsEnabled: + description: If set the node type can be composed of multiple + placement groups. + type: boolean + name: + description: The name which should be used for this node + type. + type: string + placementProperties: + additionalProperties: + type: string + description: Specifies a list of placement tags that can + be used to indicate where services should run.. + type: object + x-kubernetes-map-type: granular + primary: + description: If set to true, system services will run on + this node type. Only one node type should be marked as + primary. Primary node type cannot be deleted or changed + once they're created. + type: boolean + stateless: + description: If set to true, only stateless workloads can + run on this node type. + type: boolean + vmImageOffer: + description: The offer type of the marketplace image cluster + VMs will use. + type: string + vmImagePublisher: + description: The publisher of the marketplace image cluster + VMs will use. + type: string + vmImageSku: + description: The SKU of the marketplace image cluster VMs + will use. + type: string + vmImageVersion: + description: The version of the marketplace image cluster + VMs will use. + type: string + vmInstanceCount: + description: The number of instances this node type will + launch. + type: number + vmSecrets: + description: One or more vm_secrets blocks as defined below. + items: + properties: + certificates: + description: One or more certificates blocks as defined + above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine to which the certificate should be + added. + type: string + url: + description: The URL of a certificate that has + been uploaded to Key Vault as a secret + type: string + type: object + type: array + vaultId: + description: The ID of the Vault that contain the + certificates. + type: string + type: object + type: array + vmSize: + description: The size of the instances in this node type. + type: string + type: object + type: array + sku: + description: SKU for this cluster. Changing this forces a new + resource to be created. Default is Basic, allowed values are + either Basic or Standard. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Group. + type: object + x-kubernetes-map-type: granular + upgradeWave: + description: Upgrade wave for the fabric runtime. Default is Wave0, + allowed value must be one of Wave0, Wave1, or Wave2. + type: string + username: + description: Administrator password for the VMs that will be created + as part of this cluster. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.clientConnectionPort is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.clientConnectionPort) + || (has(self.initProvider) && has(self.initProvider.clientConnectionPort))' + - message: spec.forProvider.httpGatewayPort is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.httpGatewayPort) + || (has(self.initProvider) && has(self.initProvider.httpGatewayPort))' + - message: spec.forProvider.lbRule is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.lbRule) + || (has(self.initProvider) && has(self.initProvider.lbRule))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: ManagedClusterStatus defines the observed state of ManagedCluster. + properties: + atProvider: + properties: + authentication: + description: Controls how connections to the cluster are authenticated. + A authentication block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined above. + properties: + clientApplicationId: + description: The ID of the Client Application. + type: string + clusterApplicationId: + description: The ID of the Cluster Application. + type: string + tenantId: + description: The ID of the Tenant. + type: string + type: object + certificate: + description: One or more certificate blocks as defined below. + items: + properties: + commonName: + description: The certificate's CN. + type: string + thumbprint: + description: The thumbprint of the certificate. + type: string + type: + description: The type of the certificate. Can be AdminClient + or ReadOnlyClient. + type: string + type: object + type: array + type: object + backupServiceEnabled: + description: If true, backup service is enabled. + type: boolean + clientConnectionPort: + description: Port to use when connecting to the cluster. + type: number + customFabricSetting: + description: One or more custom_fabric_setting blocks as defined + below. + items: + properties: + parameter: + description: Parameter name. + type: string + section: + description: Section name. + type: string + value: + description: Parameter value. + type: string + type: object + type: array + dnsName: + description: Hostname for the cluster. If unset the cluster's + name will be used.. + type: string + dnsServiceEnabled: + description: If true, DNS service is enabled. + type: boolean + httpGatewayPort: + description: Port that should be used by the Service Fabric Explorer + to visualize applications and cluster status. + type: number + id: + description: The ID of the Resource Group. + type: string + lbRule: + description: One or more lb_rule blocks as defined below. + items: + properties: + backendPort: + description: LB Backend port. + type: number + frontendPort: + description: LB Frontend port. + type: number + probeProtocol: + description: Protocol for the probe. Can be one of tcp, + udp, http, or https. + type: string + probeRequestPath: + description: Path for the probe to check, when probe protocol + is set to http. + type: string + protocol: + description: The transport protocol used in this rule. Can + be one of tcp or udp. + type: string + type: object + type: array + location: + description: The Azure Region where the Resource Group should + exist. Changing this forces a new Resource Group to be created. + type: string + nodeType: + description: One or more node_type blocks as defined below. + items: + properties: + applicationPortRange: + description: Sets the port range available for applications. + Format is -, for example 10000-20000. + type: string + capacities: + additionalProperties: + type: string + description: Specifies a list of key/value pairs used to + set capacity tags for this node type. + type: object + x-kubernetes-map-type: granular + dataDiskSizeGb: + description: The size of the data disk in gigabytes.. + type: number + dataDiskType: + description: The type of the disk to use for storing data. + It can be one of Premium_LRS, Standard_LRS, or StandardSSD_LRS. + Defaults to Standard_LRS. + type: string + ephemeralPortRange: + description: Sets the port range available for the OS. Format + is -, for example 10000-20000. There + has to be at least 255 ports available and cannot overlap + with application_port_range.. + type: string + id: + description: The ID of the Resource Group. + type: string + multiplePlacementGroupsEnabled: + description: If set the node type can be composed of multiple + placement groups. + type: boolean + name: + description: The name which should be used for this node + type. + type: string + placementProperties: + additionalProperties: + type: string + description: Specifies a list of placement tags that can + be used to indicate where services should run.. + type: object + x-kubernetes-map-type: granular + primary: + description: If set to true, system services will run on + this node type. Only one node type should be marked as + primary. Primary node type cannot be deleted or changed + once they're created. + type: boolean + stateless: + description: If set to true, only stateless workloads can + run on this node type. + type: boolean + vmImageOffer: + description: The offer type of the marketplace image cluster + VMs will use. + type: string + vmImagePublisher: + description: The publisher of the marketplace image cluster + VMs will use. + type: string + vmImageSku: + description: The SKU of the marketplace image cluster VMs + will use. + type: string + vmImageVersion: + description: The version of the marketplace image cluster + VMs will use. + type: string + vmInstanceCount: + description: The number of instances this node type will + launch. + type: number + vmSecrets: + description: One or more vm_secrets blocks as defined below. + items: + properties: + certificates: + description: One or more certificates blocks as defined + above. + items: + properties: + store: + description: The certificate store on the Virtual + Machine to which the certificate should be + added. + type: string + url: + description: The URL of a certificate that has + been uploaded to Key Vault as a secret + type: string + type: object + type: array + vaultId: + description: The ID of the Vault that contain the + certificates. + type: string + type: object + type: array + vmSize: + description: The size of the instances in this node type. + type: string + type: object + type: array + resourceGroupName: + description: The name of the Resource Group where the Resource + Group should exist. Changing this forces a new Resource Group + to be created. + type: string + sku: + description: SKU for this cluster. Changing this forces a new + resource to be created. Default is Basic, allowed values are + either Basic or Standard. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Resource Group. + type: object + x-kubernetes-map-type: granular + upgradeWave: + description: Upgrade wave for the fabric runtime. Default is Wave0, + allowed value must be one of Wave0, Wave1, or Wave2. + type: string + username: + description: Administrator password for the VMs that will be created + as part of this cluster. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/servicelinker.azure.upbound.io_springcloudconnections.yaml b/package/crds/servicelinker.azure.upbound.io_springcloudconnections.yaml index 04fa7e7e5..64e78fce3 100644 --- a/package/crds/servicelinker.azure.upbound.io_springcloudconnections.yaml +++ b/package/crds/servicelinker.azure.upbound.io_springcloudconnections.yaml @@ -889,3 +889,862 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SpringCloudConnection is the Schema for the SpringCloudConnections + API. Manages a service connector for spring cloud app. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SpringCloudConnectionSpec defines the desired state of SpringCloudConnection + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authentication: + description: The authentication info. An authentication block + as defined below. + properties: + certificateSecretRef: + description: Service principal certificate for servicePrincipal + auth. Should be specified when type is set to servicePrincipalCertificate. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientId: + description: Client ID for userAssignedIdentity or servicePrincipal + auth. Should be specified when type is set to servicePrincipalSecret + or servicePrincipalCertificate. When type is set to userAssignedIdentity, + client_id and subscription_id should be either both specified + or both not specified. + type: string + name: + description: Username or account name for secret auth. name + and secret should be either both specified or both not specified + when type is set to secret. + type: string + principalId: + description: Principal ID for servicePrincipal auth. Should + be specified when type is set to servicePrincipalSecret + or servicePrincipalCertificate. + type: string + secretSecretRef: + description: Password or account key for secret auth. secret + and name should be either both specified or both not specified + when type is set to secret. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + subscriptionId: + description: Subscription ID for userAssignedIdentity. subscription_id + and client_id should be either both specified or both not + specified. + type: string + type: + description: The authentication type. Possible values are + systemAssignedIdentity, userAssignedIdentity, servicePrincipalSecret, + servicePrincipalCertificate, secret. Changing this forces + a new resource to be created. + type: string + type: object + clientType: + description: The application client type. Possible values are + none, dotnet, java, python, go, php, ruby, django, nodejs and + springBoot. Defaults to none. + type: string + name: + description: The name of the service connection. Changing this + forces a new resource to be created. + type: string + secretStore: + description: An option to store secret value in secure place. + An secret_store block as defined below. + properties: + keyVaultId: + description: The key vault id to store secret. + type: string + type: object + springCloudId: + description: The ID of the data source spring cloud. Changing + this forces a new resource to be created. + type: string + springCloudIdRef: + description: Reference to a SpringCloudJavaDeployment in appplatform + to populate springCloudId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudIdSelector: + description: Selector for a SpringCloudJavaDeployment in appplatform + to populate springCloudId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetResourceId: + description: The ID of the target resource. Changing this forces + a new resource to be created. Possible target resources are + Postgres, PostgresFlexible, Mysql, Sql, Redis, RedisEnterprise, + CosmosCassandra, CosmosGremlin, CosmosMongo, CosmosSql, CosmosTable, + StorageBlob, StorageQueue, StorageFile, StorageTable, AppConfig, + EventHub, ServiceBus, SignalR, WebPubSub, ConfluentKafka. The + integration guide can be found here. + type: string + targetResourceIdRef: + description: Reference to a SQLDatabase in cosmosdb to populate + targetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetResourceIdSelector: + description: Selector for a SQLDatabase in cosmosdb to populate + targetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vnetSolution: + description: The type of the VNet solution. Possible values are + serviceEndpoint, privateLink. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authentication: + description: The authentication info. An authentication block + as defined below. + properties: + clientId: + description: Client ID for userAssignedIdentity or servicePrincipal + auth. Should be specified when type is set to servicePrincipalSecret + or servicePrincipalCertificate. When type is set to userAssignedIdentity, + client_id and subscription_id should be either both specified + or both not specified. + type: string + name: + description: Username or account name for secret auth. name + and secret should be either both specified or both not specified + when type is set to secret. + type: string + principalId: + description: Principal ID for servicePrincipal auth. Should + be specified when type is set to servicePrincipalSecret + or servicePrincipalCertificate. + type: string + subscriptionId: + description: Subscription ID for userAssignedIdentity. subscription_id + and client_id should be either both specified or both not + specified. + type: string + type: + description: The authentication type. Possible values are + systemAssignedIdentity, userAssignedIdentity, servicePrincipalSecret, + servicePrincipalCertificate, secret. Changing this forces + a new resource to be created. + type: string + type: object + clientType: + description: The application client type. Possible values are + none, dotnet, java, python, go, php, ruby, django, nodejs and + springBoot. Defaults to none. + type: string + name: + description: The name of the service connection. Changing this + forces a new resource to be created. + type: string + secretStore: + description: An option to store secret value in secure place. + An secret_store block as defined below. + properties: + keyVaultId: + description: The key vault id to store secret. + type: string + type: object + springCloudId: + description: The ID of the data source spring cloud. Changing + this forces a new resource to be created. + type: string + springCloudIdRef: + description: Reference to a SpringCloudJavaDeployment in appplatform + to populate springCloudId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + springCloudIdSelector: + description: Selector for a SpringCloudJavaDeployment in appplatform + to populate springCloudId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + targetResourceId: + description: The ID of the target resource. Changing this forces + a new resource to be created. Possible target resources are + Postgres, PostgresFlexible, Mysql, Sql, Redis, RedisEnterprise, + CosmosCassandra, CosmosGremlin, CosmosMongo, CosmosSql, CosmosTable, + StorageBlob, StorageQueue, StorageFile, StorageTable, AppConfig, + EventHub, ServiceBus, SignalR, WebPubSub, ConfluentKafka. The + integration guide can be found here. + type: string + targetResourceIdRef: + description: Reference to a SQLDatabase in cosmosdb to populate + targetResourceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + targetResourceIdSelector: + description: Selector for a SQLDatabase in cosmosdb to populate + targetResourceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + vnetSolution: + description: The type of the VNet solution. Possible values are + serviceEndpoint, privateLink. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.authentication is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.authentication) + || (has(self.initProvider) && has(self.initProvider.authentication))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: SpringCloudConnectionStatus defines the observed state of + SpringCloudConnection. + properties: + atProvider: + properties: + authentication: + description: The authentication info. An authentication block + as defined below. + properties: + clientId: + description: Client ID for userAssignedIdentity or servicePrincipal + auth. Should be specified when type is set to servicePrincipalSecret + or servicePrincipalCertificate. When type is set to userAssignedIdentity, + client_id and subscription_id should be either both specified + or both not specified. + type: string + name: + description: Username or account name for secret auth. name + and secret should be either both specified or both not specified + when type is set to secret. + type: string + principalId: + description: Principal ID for servicePrincipal auth. Should + be specified when type is set to servicePrincipalSecret + or servicePrincipalCertificate. + type: string + subscriptionId: + description: Subscription ID for userAssignedIdentity. subscription_id + and client_id should be either both specified or both not + specified. + type: string + type: + description: The authentication type. Possible values are + systemAssignedIdentity, userAssignedIdentity, servicePrincipalSecret, + servicePrincipalCertificate, secret. Changing this forces + a new resource to be created. + type: string + type: object + clientType: + description: The application client type. Possible values are + none, dotnet, java, python, go, php, ruby, django, nodejs and + springBoot. Defaults to none. + type: string + id: + description: The ID of the service connector. + type: string + name: + description: The name of the service connection. Changing this + forces a new resource to be created. + type: string + secretStore: + description: An option to store secret value in secure place. + An secret_store block as defined below. + properties: + keyVaultId: + description: The key vault id to store secret. + type: string + type: object + springCloudId: + description: The ID of the data source spring cloud. Changing + this forces a new resource to be created. + type: string + targetResourceId: + description: The ID of the target resource. Changing this forces + a new resource to be created. Possible target resources are + Postgres, PostgresFlexible, Mysql, Sql, Redis, RedisEnterprise, + CosmosCassandra, CosmosGremlin, CosmosMongo, CosmosSql, CosmosTable, + StorageBlob, StorageQueue, StorageFile, StorageTable, AppConfig, + EventHub, ServiceBus, SignalR, WebPubSub, ConfluentKafka. The + integration guide can be found here. + type: string + vnetSolution: + description: The type of the VNet solution. Possible values are + serviceEndpoint, privateLink. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/signalrservice.azure.upbound.io_networkacls.yaml b/package/crds/signalrservice.azure.upbound.io_networkacls.yaml index e0be02dfa..858148ee8 100644 --- a/package/crds/signalrservice.azure.upbound.io_networkacls.yaml +++ b/package/crds/signalrservice.azure.upbound.io_networkacls.yaml @@ -806,3 +806,785 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: NetworkACL is the Schema for the NetworkACLs API. Manages the + Network ACL for a SignalR service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NetworkACLSpec defines the desired state of NetworkACL + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + defaultAction: + description: The default action to control the network access + when no other rule matches. Possible values are Allow and Deny. + type: string + privateEndpoint: + description: A private_endpoint block as defined below. + items: + properties: + allowedRequestTypes: + description: The allowed request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Private Endpoint which is based + on the SignalR service. + type: string + idRef: + description: Reference to a PrivateEndpoint in network to + populate id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a PrivateEndpoint in network to + populate id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + publicNetwork: + description: A public_network block as defined below. + properties: + allowedRequestTypes: + description: The allowed request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + signalrServiceId: + description: The ID of the SignalR service. Changing this forces + a new resource to be created. + type: string + signalrServiceIdRef: + description: Reference to a Service in signalrservice to populate + signalrServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + signalrServiceIdSelector: + description: Selector for a Service in signalrservice to populate + signalrServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + defaultAction: + description: The default action to control the network access + when no other rule matches. Possible values are Allow and Deny. + type: string + privateEndpoint: + description: A private_endpoint block as defined below. + items: + properties: + allowedRequestTypes: + description: The allowed request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Private Endpoint which is based + on the SignalR service. + type: string + idRef: + description: Reference to a PrivateEndpoint in network to + populate id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a PrivateEndpoint in network to + populate id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + publicNetwork: + description: A public_network block as defined below. + properties: + allowedRequestTypes: + description: The allowed request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + signalrServiceId: + description: The ID of the SignalR service. Changing this forces + a new resource to be created. + type: string + signalrServiceIdRef: + description: Reference to a Service in signalrservice to populate + signalrServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + signalrServiceIdSelector: + description: Selector for a Service in signalrservice to populate + signalrServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.defaultAction is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.defaultAction) + || (has(self.initProvider) && has(self.initProvider.defaultAction))' + - message: spec.forProvider.publicNetwork is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.publicNetwork) + || (has(self.initProvider) && has(self.initProvider.publicNetwork))' + status: + description: NetworkACLStatus defines the observed state of NetworkACL. + properties: + atProvider: + properties: + defaultAction: + description: The default action to control the network access + when no other rule matches. Possible values are Allow and Deny. + type: string + id: + description: The ID of the SignalR service. + type: string + privateEndpoint: + description: A private_endpoint block as defined below. + items: + properties: + allowedRequestTypes: + description: The allowed request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Private Endpoint which is based + on the SignalR service. + type: string + type: object + type: array + publicNetwork: + description: A public_network block as defined below. + properties: + allowedRequestTypes: + description: The allowed request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + signalrServiceId: + description: The ID of the SignalR service. Changing this forces + a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/signalrservice.azure.upbound.io_services.yaml b/package/crds/signalrservice.azure.upbound.io_services.yaml index e4c699354..d48523857 100644 --- a/package/crds/signalrservice.azure.upbound.io_services.yaml +++ b/package/crds/signalrservice.azure.upbound.io_services.yaml @@ -893,3 +893,860 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Service is the Schema for the Services API. Manages an Azure + SignalR service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServiceSpec defines the desired state of Service + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aadAuthEnabled: + description: Whether to enable AAD auth? Defaults to true. + type: boolean + connectivityLogsEnabled: + description: Specifies if Connectivity Logs are enabled or not. + Defaults to false. + type: boolean + cors: + description: A cors block as documented below. + items: + properties: + allowedOrigins: + description: A list of origins which should be able to make + cross-origin calls. * can be used to allow all calls. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + httpRequestLogsEnabled: + description: Specifies if Http Request Logs are enabled or not. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this signalR. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this signalR. Possible values + are SystemAssigned, UserAssigned. + type: string + type: object + liveTrace: + description: A live_trace block as defined below. + properties: + connectivityLogsEnabled: + description: Whether the log category ConnectivityLogs is + enabled? Defaults to true + type: boolean + enabled: + description: Whether the live trace is enabled? Defaults to + true. + type: boolean + httpRequestLogsEnabled: + description: Whether the log category HttpRequestLogs is enabled? + Defaults to true + type: boolean + messagingLogsEnabled: + description: Whether the log category MessagingLogs is enabled? + Defaults to true + type: boolean + type: object + liveTraceEnabled: + description: Specifies if Live Trace is enabled or not. Defaults + to false. + type: boolean + localAuthEnabled: + description: Whether to enable local auth? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + SignalR service exists. Changing this forces a new resource + to be created. + type: string + messagingLogsEnabled: + description: Specifies if Messaging Logs are enabled or not. Defaults + to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether to enable public network access? Defaults + to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the SignalR service. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serverlessConnectionTimeoutInSeconds: + description: Specifies the client connection timeout. Defaults + to 30. + type: number + serviceMode: + description: Specifies the service mode. Possible values are Classic, + Default and Serverless. Defaults to Default. + type: string + sku: + description: A sku block as documented below. + properties: + capacity: + description: Specifies the number of units associated with + this SignalR service. Valid values are 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90 and 100. + type: number + name: + description: Specifies which tier to use. Valid values are + Free_F1, Standard_S1 and Premium_P1. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tlsClientCertEnabled: + description: Whether to request client certificate during TLS + handshake? Defaults to false. + type: boolean + upstreamEndpoint: + description: An upstream_endpoint block as documented below. Using + this block requires the SignalR service to be Serverless. When + creating multiple blocks they will be processed in the order + they are defined in. + items: + properties: + categoryPattern: + description: The categories to match on, or * for all. + items: + type: string + type: array + eventPattern: + description: The events to match on, or * for all. + items: + type: string + type: array + hubPattern: + description: The hubs to match on, or * for all. + items: + type: string + type: array + urlTemplate: + description: The upstream URL Template. This can be a url + or a template such as http://host.com/{hub}/api/{category}/{event}. + type: string + userAssignedIdentityId: + description: Specifies the Managed Identity IDs to be assigned + to this signalR upstream setting by using resource uuid + as both system assigned and user assigned identity is + supported. + type: string + type: object + type: array + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aadAuthEnabled: + description: Whether to enable AAD auth? Defaults to true. + type: boolean + connectivityLogsEnabled: + description: Specifies if Connectivity Logs are enabled or not. + Defaults to false. + type: boolean + cors: + description: A cors block as documented below. + items: + properties: + allowedOrigins: + description: A list of origins which should be able to make + cross-origin calls. * can be used to allow all calls. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + httpRequestLogsEnabled: + description: Specifies if Http Request Logs are enabled or not. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this signalR. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this signalR. Possible values + are SystemAssigned, UserAssigned. + type: string + type: object + liveTrace: + description: A live_trace block as defined below. + properties: + connectivityLogsEnabled: + description: Whether the log category ConnectivityLogs is + enabled? Defaults to true + type: boolean + enabled: + description: Whether the live trace is enabled? Defaults to + true. + type: boolean + httpRequestLogsEnabled: + description: Whether the log category HttpRequestLogs is enabled? + Defaults to true + type: boolean + messagingLogsEnabled: + description: Whether the log category MessagingLogs is enabled? + Defaults to true + type: boolean + type: object + liveTraceEnabled: + description: Specifies if Live Trace is enabled or not. Defaults + to false. + type: boolean + localAuthEnabled: + description: Whether to enable local auth? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + SignalR service exists. Changing this forces a new resource + to be created. + type: string + messagingLogsEnabled: + description: Specifies if Messaging Logs are enabled or not. Defaults + to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether to enable public network access? Defaults + to true. + type: boolean + serverlessConnectionTimeoutInSeconds: + description: Specifies the client connection timeout. Defaults + to 30. + type: number + serviceMode: + description: Specifies the service mode. Possible values are Classic, + Default and Serverless. Defaults to Default. + type: string + sku: + description: A sku block as documented below. + properties: + capacity: + description: Specifies the number of units associated with + this SignalR service. Valid values are 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90 and 100. + type: number + name: + description: Specifies which tier to use. Valid values are + Free_F1, Standard_S1 and Premium_P1. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tlsClientCertEnabled: + description: Whether to request client certificate during TLS + handshake? Defaults to false. + type: boolean + upstreamEndpoint: + description: An upstream_endpoint block as documented below. Using + this block requires the SignalR service to be Serverless. When + creating multiple blocks they will be processed in the order + they are defined in. + items: + properties: + categoryPattern: + description: The categories to match on, or * for all. + items: + type: string + type: array + eventPattern: + description: The events to match on, or * for all. + items: + type: string + type: array + hubPattern: + description: The hubs to match on, or * for all. + items: + type: string + type: array + urlTemplate: + description: The upstream URL Template. This can be a url + or a template such as http://host.com/{hub}/api/{category}/{event}. + type: string + userAssignedIdentityId: + description: Specifies the Managed Identity IDs to be assigned + to this signalR upstream setting by using resource uuid + as both system assigned and user assigned identity is + supported. + type: string + type: object + type: array + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: ServiceStatus defines the observed state of Service. + properties: + atProvider: + properties: + aadAuthEnabled: + description: Whether to enable AAD auth? Defaults to true. + type: boolean + connectivityLogsEnabled: + description: Specifies if Connectivity Logs are enabled or not. + Defaults to false. + type: boolean + cors: + description: A cors block as documented below. + items: + properties: + allowedOrigins: + description: A list of origins which should be able to make + cross-origin calls. * can be used to allow all calls. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + hostname: + description: The FQDN of the SignalR service. + type: string + httpRequestLogsEnabled: + description: Specifies if Http Request Logs are enabled or not. + Defaults to false. + type: boolean + id: + description: The ID of the SignalR service. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this signalR. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The ID of the SignalR service. + type: string + tenantId: + description: The ID of the SignalR service. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this signalR. Possible values + are SystemAssigned, UserAssigned. + type: string + type: object + ipAddress: + description: The publicly accessible IP of the SignalR service. + type: string + liveTrace: + description: A live_trace block as defined below. + properties: + connectivityLogsEnabled: + description: Whether the log category ConnectivityLogs is + enabled? Defaults to true + type: boolean + enabled: + description: Whether the live trace is enabled? Defaults to + true. + type: boolean + httpRequestLogsEnabled: + description: Whether the log category HttpRequestLogs is enabled? + Defaults to true + type: boolean + messagingLogsEnabled: + description: Whether the log category MessagingLogs is enabled? + Defaults to true + type: boolean + type: object + liveTraceEnabled: + description: Specifies if Live Trace is enabled or not. Defaults + to false. + type: boolean + localAuthEnabled: + description: Whether to enable local auth? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + SignalR service exists. Changing this forces a new resource + to be created. + type: string + messagingLogsEnabled: + description: Specifies if Messaging Logs are enabled or not. Defaults + to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether to enable public network access? Defaults + to true. + type: boolean + publicPort: + description: The publicly accessible port of the SignalR service + which is designed for browser/client use. + type: number + resourceGroupName: + description: The name of the resource group in which to create + the SignalR service. Changing this forces a new resource to + be created. + type: string + serverPort: + description: The publicly accessible port of the SignalR service + which is designed for customer server side use. + type: number + serverlessConnectionTimeoutInSeconds: + description: Specifies the client connection timeout. Defaults + to 30. + type: number + serviceMode: + description: Specifies the service mode. Possible values are Classic, + Default and Serverless. Defaults to Default. + type: string + sku: + description: A sku block as documented below. + properties: + capacity: + description: Specifies the number of units associated with + this SignalR service. Valid values are 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90 and 100. + type: number + name: + description: Specifies which tier to use. Valid values are + Free_F1, Standard_S1 and Premium_P1. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tlsClientCertEnabled: + description: Whether to request client certificate during TLS + handshake? Defaults to false. + type: boolean + upstreamEndpoint: + description: An upstream_endpoint block as documented below. Using + this block requires the SignalR service to be Serverless. When + creating multiple blocks they will be processed in the order + they are defined in. + items: + properties: + categoryPattern: + description: The categories to match on, or * for all. + items: + type: string + type: array + eventPattern: + description: The events to match on, or * for all. + items: + type: string + type: array + hubPattern: + description: The hubs to match on, or * for all. + items: + type: string + type: array + urlTemplate: + description: The upstream URL Template. This can be a url + or a template such as http://host.com/{hub}/api/{category}/{event}. + type: string + userAssignedIdentityId: + description: Specifies the Managed Identity IDs to be assigned + to this signalR upstream setting by using resource uuid + as both system assigned and user assigned identity is + supported. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/signalrservice.azure.upbound.io_webpubsubhubs.yaml b/package/crds/signalrservice.azure.upbound.io_webpubsubhubs.yaml index 0cd4681ad..99c0fcdaa 100644 --- a/package/crds/signalrservice.azure.upbound.io_webpubsubhubs.yaml +++ b/package/crds/signalrservice.azure.upbound.io_webpubsubhubs.yaml @@ -1166,3 +1166,1142 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WebPubsubHub is the Schema for the WebPubsubHubs API. Manages + the hub settings for a Web Pubsub service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WebPubsubHubSpec defines the desired state of WebPubsubHub + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + anonymousConnectionsEnabled: + description: |- + Is anonymous connections are allowed for this hub? Defaults to false. + Possible values are true, false. + type: boolean + eventHandler: + description: An event_handler block as defined below. + items: + properties: + auth: + description: An auth block as defined below. + properties: + managedIdentityId: + description: Specify the identity ID of the target resource. + type: string + managedIdentityIdRef: + description: Reference to a UserAssignedIdentity in + managedidentity to populate managedIdentityId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedIdentityIdSelector: + description: Selector for a UserAssignedIdentity in + managedidentity to populate managedIdentityId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + systemEvents: + description: Specifies the list of system events. Supported + values are connect, connected and disconnected. + items: + type: string + type: array + x-kubernetes-list-type: set + urlTemplate: + description: 'The Event Handler URL Template. Two predefined + parameters {hub} and {event} are available to use in the + template. The value of the EventHandler URL is dynamically + calculated when the client request comes in. Example: + http://example.com/api/{hub}/{event}.' + type: string + userEventPattern: + description: 'Specifies the matching event names. There + are 3 kind of patterns supported: * * matches any event + name * , Combine multiple events with , for example event1,event2, + it matches event event1 and event2 * The single event + name, for example event1, it matches event1.' + type: string + type: object + type: array + eventListener: + description: An event_listener block as defined below. + items: + properties: + eventhubName: + description: Specifies the event hub name to receive the + events. + type: string + eventhubNameRef: + description: Reference to a EventHub in eventhub to populate + eventhubName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNameSelector: + description: Selector for a EventHub in eventhub to populate + eventhubName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventhubNamespaceName: + description: Specifies the event hub namespace name to receive + the events. + type: string + eventhubNamespaceNameRef: + description: Reference to a EventHubNamespace in eventhub + to populate eventhubNamespaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNamespaceNameSelector: + description: Selector for a EventHubNamespace in eventhub + to populate eventhubNamespaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemEventNameFilter: + description: Specifies the list of system events. Supported + values are connected and disconnected. + items: + type: string + type: array + userEventNameFilter: + description: Specifies the list of matching user event names. + ["*"] can be used to match all events. + items: + type: string + type: array + type: object + type: array + name: + description: The name of the Web Pubsub hub service. Changing + this forces a new resource to be created. + type: string + webPubsubId: + description: Specifies the id of the Web Pubsub. Changing this + forces a new resource to be created. + type: string + webPubsubIdRef: + description: Reference to a WebPubsub in signalrservice to populate + webPubsubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + webPubsubIdSelector: + description: Selector for a WebPubsub in signalrservice to populate + webPubsubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + anonymousConnectionsEnabled: + description: |- + Is anonymous connections are allowed for this hub? Defaults to false. + Possible values are true, false. + type: boolean + eventHandler: + description: An event_handler block as defined below. + items: + properties: + auth: + description: An auth block as defined below. + properties: + managedIdentityId: + description: Specify the identity ID of the target resource. + type: string + managedIdentityIdRef: + description: Reference to a UserAssignedIdentity in + managedidentity to populate managedIdentityId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedIdentityIdSelector: + description: Selector for a UserAssignedIdentity in + managedidentity to populate managedIdentityId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + systemEvents: + description: Specifies the list of system events. Supported + values are connect, connected and disconnected. + items: + type: string + type: array + x-kubernetes-list-type: set + urlTemplate: + description: 'The Event Handler URL Template. Two predefined + parameters {hub} and {event} are available to use in the + template. The value of the EventHandler URL is dynamically + calculated when the client request comes in. Example: + http://example.com/api/{hub}/{event}.' + type: string + userEventPattern: + description: 'Specifies the matching event names. There + are 3 kind of patterns supported: * * matches any event + name * , Combine multiple events with , for example event1,event2, + it matches event event1 and event2 * The single event + name, for example event1, it matches event1.' + type: string + type: object + type: array + eventListener: + description: An event_listener block as defined below. + items: + properties: + eventhubName: + description: Specifies the event hub name to receive the + events. + type: string + eventhubNameRef: + description: Reference to a EventHub in eventhub to populate + eventhubName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNameSelector: + description: Selector for a EventHub in eventhub to populate + eventhubName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventhubNamespaceName: + description: Specifies the event hub namespace name to receive + the events. + type: string + eventhubNamespaceNameRef: + description: Reference to a EventHubNamespace in eventhub + to populate eventhubNamespaceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNamespaceNameSelector: + description: Selector for a EventHubNamespace in eventhub + to populate eventhubNamespaceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemEventNameFilter: + description: Specifies the list of system events. Supported + values are connected and disconnected. + items: + type: string + type: array + userEventNameFilter: + description: Specifies the list of matching user event names. + ["*"] can be used to match all events. + items: + type: string + type: array + type: object + type: array + name: + description: The name of the Web Pubsub hub service. Changing + this forces a new resource to be created. + type: string + webPubsubId: + description: Specifies the id of the Web Pubsub. Changing this + forces a new resource to be created. + type: string + webPubsubIdRef: + description: Reference to a WebPubsub in signalrservice to populate + webPubsubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + webPubsubIdSelector: + description: Selector for a WebPubsub in signalrservice to populate + webPubsubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + status: + description: WebPubsubHubStatus defines the observed state of WebPubsubHub. + properties: + atProvider: + properties: + anonymousConnectionsEnabled: + description: |- + Is anonymous connections are allowed for this hub? Defaults to false. + Possible values are true, false. + type: boolean + eventHandler: + description: An event_handler block as defined below. + items: + properties: + auth: + description: An auth block as defined below. + properties: + managedIdentityId: + description: Specify the identity ID of the target resource. + type: string + type: object + systemEvents: + description: Specifies the list of system events. Supported + values are connect, connected and disconnected. + items: + type: string + type: array + x-kubernetes-list-type: set + urlTemplate: + description: 'The Event Handler URL Template. Two predefined + parameters {hub} and {event} are available to use in the + template. The value of the EventHandler URL is dynamically + calculated when the client request comes in. Example: + http://example.com/api/{hub}/{event}.' + type: string + userEventPattern: + description: 'Specifies the matching event names. There + are 3 kind of patterns supported: * * matches any event + name * , Combine multiple events with , for example event1,event2, + it matches event event1 and event2 * The single event + name, for example event1, it matches event1.' + type: string + type: object + type: array + eventListener: + description: An event_listener block as defined below. + items: + properties: + eventhubName: + description: Specifies the event hub name to receive the + events. + type: string + eventhubNamespaceName: + description: Specifies the event hub namespace name to receive + the events. + type: string + systemEventNameFilter: + description: Specifies the list of system events. Supported + values are connected and disconnected. + items: + type: string + type: array + userEventNameFilter: + description: Specifies the list of matching user event names. + ["*"] can be used to match all events. + items: + type: string + type: array + type: object + type: array + id: + description: The ID of the Web Pubsub Hub resource. + type: string + name: + description: The name of the Web Pubsub hub service. Changing + this forces a new resource to be created. + type: string + webPubsubId: + description: Specifies the id of the Web Pubsub. Changing this + forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/signalrservice.azure.upbound.io_webpubsubnetworkacls.yaml b/package/crds/signalrservice.azure.upbound.io_webpubsubnetworkacls.yaml index 33b29411a..e3faa53d2 100644 --- a/package/crds/signalrservice.azure.upbound.io_webpubsubnetworkacls.yaml +++ b/package/crds/signalrservice.azure.upbound.io_webpubsubnetworkacls.yaml @@ -805,3 +805,784 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WebPubsubNetworkACL is the Schema for the WebPubsubNetworkACLs + API. Manages the Network ACL for a Web Pubsub service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WebPubsubNetworkACLSpec defines the desired state of WebPubsubNetworkACL + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + defaultAction: + description: The default action to control the network access + when no other rule matches. Possible values are Allow and Deny. + Defaults to Deny. + type: string + privateEndpoint: + description: A private_endpoint block as defined below. + items: + properties: + allowedRequestTypes: + description: The allowed request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Private Endpoint which is based + on the Web Pubsub service. + type: string + idRef: + description: Reference to a PrivateEndpoint in network to + populate id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a PrivateEndpoint in network to + populate id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + publicNetwork: + description: A public_network block as defined below. + properties: + allowedRequestTypes: + description: The allowed request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + webPubsubId: + description: The ID of the Web Pubsub service. Changing this forces + a new resource to be created. + type: string + webPubsubIdRef: + description: Reference to a WebPubsub in signalrservice to populate + webPubsubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + webPubsubIdSelector: + description: Selector for a WebPubsub in signalrservice to populate + webPubsubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + defaultAction: + description: The default action to control the network access + when no other rule matches. Possible values are Allow and Deny. + Defaults to Deny. + type: string + privateEndpoint: + description: A private_endpoint block as defined below. + items: + properties: + allowedRequestTypes: + description: The allowed request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Private Endpoint which is based + on the Web Pubsub service. + type: string + idRef: + description: Reference to a PrivateEndpoint in network to + populate id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a PrivateEndpoint in network to + populate id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + publicNetwork: + description: A public_network block as defined below. + properties: + allowedRequestTypes: + description: The allowed request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + webPubsubId: + description: The ID of the Web Pubsub service. Changing this forces + a new resource to be created. + type: string + webPubsubIdRef: + description: Reference to a WebPubsub in signalrservice to populate + webPubsubId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + webPubsubIdSelector: + description: Selector for a WebPubsub in signalrservice to populate + webPubsubId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.publicNetwork is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.publicNetwork) + || (has(self.initProvider) && has(self.initProvider.publicNetwork))' + status: + description: WebPubsubNetworkACLStatus defines the observed state of WebPubsubNetworkACL. + properties: + atProvider: + properties: + defaultAction: + description: The default action to control the network access + when no other rule matches. Possible values are Allow and Deny. + Defaults to Deny. + type: string + id: + description: The ID of the Web Pubsub service. + type: string + privateEndpoint: + description: A private_endpoint block as defined below. + items: + properties: + allowedRequestTypes: + description: The allowed request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the Private Endpoint + Connection. Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Private Endpoint which is based + on the Web Pubsub service. + type: string + type: object + type: array + publicNetwork: + description: A public_network block as defined below. + properties: + allowedRequestTypes: + description: The allowed request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + deniedRequestTypes: + description: The denied request types for the public network. + Possible values are ClientConnection, ServerConnection, + RESTAPI and Trace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + webPubsubId: + description: The ID of the Web Pubsub service. Changing this forces + a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/signalrservice.azure.upbound.io_webpubsubs.yaml b/package/crds/signalrservice.azure.upbound.io_webpubsubs.yaml index caf948798..9e3f2e4ad 100644 --- a/package/crds/signalrservice.azure.upbound.io_webpubsubs.yaml +++ b/package/crds/signalrservice.azure.upbound.io_webpubsubs.yaml @@ -763,3 +763,736 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WebPubsub is the Schema for the WebPubsubs API. Manages an Azure + Web PubSub service. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WebPubsubSpec defines the desired state of WebPubsub + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aadAuthEnabled: + description: Whether to enable AAD auth? Defaults to true. + type: boolean + capacity: + description: 'Specifies the number of units associated with this + Web PubSub resource. Valid values are: Free: 1, Standard: 1, + 2, 5, 10, 20, 50, 100.' + type: number + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Web PubSub. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Web PubSub. Possible values + are SystemAssigned, UserAssigned. + type: string + type: object + liveTrace: + description: A live_trace block as defined below. + properties: + connectivityLogsEnabled: + description: Whether the log category ConnectivityLogs is + enabled? Defaults to true + type: boolean + enabled: + description: Whether the live trace is enabled? Defaults to + true. + type: boolean + httpRequestLogsEnabled: + description: Whether the log category HttpRequestLogs is enabled? + Defaults to true + type: boolean + messagingLogsEnabled: + description: Whether the log category MessagingLogs is enabled? + Defaults to true + type: boolean + type: object + localAuthEnabled: + description: Whether to enable local auth? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + Web PubSub service exists. Changing this forces a new resource + to be created. + type: string + name: + description: The name of the Web PubSub service. Changing this + forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether to enable public network access? Defaults + to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Web PubSub service. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: Specifies which SKU to use. Possible values are Free_F1, + Standard_S1, and Premium_P1. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tlsClientCertEnabled: + description: Whether to request client certificate during TLS + handshake? Defaults to false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aadAuthEnabled: + description: Whether to enable AAD auth? Defaults to true. + type: boolean + capacity: + description: 'Specifies the number of units associated with this + Web PubSub resource. Valid values are: Free: 1, Standard: 1, + 2, 5, 10, 20, 50, 100.' + type: number + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Web PubSub. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Web PubSub. Possible values + are SystemAssigned, UserAssigned. + type: string + type: object + liveTrace: + description: A live_trace block as defined below. + properties: + connectivityLogsEnabled: + description: Whether the log category ConnectivityLogs is + enabled? Defaults to true + type: boolean + enabled: + description: Whether the live trace is enabled? Defaults to + true. + type: boolean + httpRequestLogsEnabled: + description: Whether the log category HttpRequestLogs is enabled? + Defaults to true + type: boolean + messagingLogsEnabled: + description: Whether the log category MessagingLogs is enabled? + Defaults to true + type: boolean + type: object + localAuthEnabled: + description: Whether to enable local auth? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + Web PubSub service exists. Changing this forces a new resource + to be created. + type: string + name: + description: The name of the Web PubSub service. Changing this + forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether to enable public network access? Defaults + to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Web PubSub service. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: Specifies which SKU to use. Possible values are Free_F1, + Standard_S1, and Premium_P1. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tlsClientCertEnabled: + description: Whether to request client certificate during TLS + handshake? Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: WebPubsubStatus defines the observed state of WebPubsub. + properties: + atProvider: + properties: + aadAuthEnabled: + description: Whether to enable AAD auth? Defaults to true. + type: boolean + capacity: + description: 'Specifies the number of units associated with this + Web PubSub resource. Valid values are: Free: 1, Standard: 1, + 2, 5, 10, 20, 50, 100.' + type: number + externalIp: + description: The publicly accessible IP of the Web PubSub service. + type: string + hostname: + description: The FQDN of the Web PubSub service. + type: string + id: + description: The ID of the Web PubSub service. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Web PubSub. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Web PubSub. Possible values + are SystemAssigned, UserAssigned. + type: string + type: object + liveTrace: + description: A live_trace block as defined below. + properties: + connectivityLogsEnabled: + description: Whether the log category ConnectivityLogs is + enabled? Defaults to true + type: boolean + enabled: + description: Whether the live trace is enabled? Defaults to + true. + type: boolean + httpRequestLogsEnabled: + description: Whether the log category HttpRequestLogs is enabled? + Defaults to true + type: boolean + messagingLogsEnabled: + description: Whether the log category MessagingLogs is enabled? + Defaults to true + type: boolean + type: object + localAuthEnabled: + description: Whether to enable local auth? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + Web PubSub service exists. Changing this forces a new resource + to be created. + type: string + name: + description: The name of the Web PubSub service. Changing this + forces a new resource to be created. + type: string + publicNetworkAccessEnabled: + description: Whether to enable public network access? Defaults + to true. + type: boolean + publicPort: + description: The publicly accessible port of the Web PubSub service + which is designed for browser/client use. + type: number + resourceGroupName: + description: The name of the resource group in which to create + the Web PubSub service. Changing this forces a new resource + to be created. + type: string + serverPort: + description: The publicly accessible port of the Web PubSub service + which is designed for customer server side use. + type: number + sku: + description: Specifies which SKU to use. Possible values are Free_F1, + Standard_S1, and Premium_P1. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + tlsClientCertEnabled: + description: Whether to request client certificate during TLS + handshake? Defaults to false. + type: boolean + version: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqldatabases.yaml b/package/crds/sql.azure.upbound.io_mssqldatabases.yaml index f4b2f672a..766a82f93 100644 --- a/package/crds/sql.azure.upbound.io_mssqldatabases.yaml +++ b/package/crds/sql.azure.upbound.io_mssqldatabases.yaml @@ -1451,3 +1451,1405 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLDatabase is the Schema for the MSSQLDatabases API. Manages + a MS SQL Database. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLDatabaseSpec defines the desired state of MSSQLDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoPauseDelayInMinutes: + description: Time in minutes after which database is automatically + paused. A value of -1 means that automatic pause is disabled. + This property is only settable for Serverless databases. + type: number + collation: + description: Specifies the collation of the database. Changing + this forces a new resource to be created. + type: string + createMode: + description: The create mode of the database. Possible values + are Copy, Default, OnlineSecondary, PointInTimeRestore, Recovery, + Restore, RestoreExternalBackup, RestoreExternalBackupSecondary, + RestoreLongTermRetentionBackup and Secondary. Mutually exclusive + with import. Changing this forces a new resource to be created. + Defaults to Default. + type: string + creationSourceDatabaseId: + description: The ID of the source database from which to create + the new database. This should only be used for databases with + create_mode values that use another database as reference. Changing + this forces a new resource to be created. + type: string + elasticPoolId: + description: Specifies the ID of the elastic pool containing this + database. + type: string + enclaveType: + description: Specifies the type of enclave to be used by the database. + Possible value VBS. + type: string + geoBackupEnabled: + description: A boolean that specifies if the Geo Backup Policy + is enabled. Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Database. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Database. Possible + value is UserAssigned. + type: string + type: object + import: + description: A import block as documented below. Mutually exclusive + with create_mode. + properties: + administratorLogin: + description: Specifies the name of the SQL administrator. + type: string + administratorLoginPasswordSecretRef: + description: Specifies the password of the SQL administrator. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + authenticationType: + description: Specifies the type of authentication used to + access the server. Valid values are SQL or ADPassword. + type: string + storageAccountId: + description: The resource id for the storage account used + to store BACPAC file. If set, private endpoint connection + will be created for the storage account. Must match storage + account used for storage_uri parameter. + type: string + storageKeySecretRef: + description: Specifies the access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageKeyType: + description: Specifies the type of access key for the storage + account. Valid values are StorageAccessKey or SharedAccessKey. + type: string + storageUri: + description: Specifies the blob URI of the .bacpac file. + type: string + required: + - administratorLoginPasswordSecretRef + - storageKeySecretRef + type: object + ledgerEnabled: + description: A boolean that specifies if this is a ledger database. + Defaults to false. Changing this forces a new resource to be + created. + type: boolean + licenseType: + description: Specifies the license type applied to this database. + Possible values are LicenseIncluded and BasePrice. + type: string + longTermRetentionPolicy: + description: A long_term_retention_policy block as defined below. + properties: + immutableBackupsEnabled: + description: Specifies if the backups are immutable. Defaults + to false. + type: boolean + monthlyRetention: + description: The monthly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 120 months. + e.g. P1Y, P1M, P4W or P30D. + type: string + weekOfYear: + description: The week of year to take the yearly backup. Value + has to be between 1 and 52. + type: number + weeklyRetention: + description: The weekly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 520 weeks. + e.g. P1Y, P1M, P1W or P7D. + type: string + yearlyRetention: + description: The yearly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 10 years. + e.g. P1Y, P12M, P52W or P365D. + type: string + type: object + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the database. Valid values include SQL_Default, + SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, + SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, + SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, + SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, + SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, + SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, + SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, + SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, + SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, + SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, + SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, + SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, + SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, + SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, + SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, + SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, + SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, + SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, + SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + type: string + maxSizeGb: + description: The max size of the database in gigabytes. + type: number + minCapacity: + description: Minimal capacity that database will always have allocated, + if not paused. This property is only settable for Serverless + databases. + type: number + readReplicaCount: + description: The number of readonly secondary replicas associated + with the database to which readonly application intent connections + may be routed. This property is only settable for Hyperscale + edition databases. + type: number + readScale: + description: If enabled, connections that have application intent + set to readonly in their connection string may be routed to + a readonly secondary replica. This property is only settable + for Premium and Business Critical databases. + type: boolean + recoverDatabaseId: + description: The ID of the database to be recovered. This property + is only applicable when the create_mode is Recovery. + type: string + recoveryPointId: + description: The ID of the Recovery Services Recovery Point Id + to be restored. This property is only applicable when the create_mode + is Recovery. + type: string + restoreDroppedDatabaseId: + description: The ID of the database to be restored. This property + is only applicable when the create_mode is Restore. + type: string + restoreLongTermRetentionBackupId: + description: The ID of the long term retention backup to be restored. + This property is only applicable when the create_mode is RestoreLongTermRetentionBackup. + type: string + restorePointInTime: + description: Specifies the point in time (ISO8601 format) of the + source database that will be restored to create the new database. + This property is only settable for create_mode= PointInTimeRestore + databases. + type: string + sampleName: + description: Specifies the name of the sample schema to apply + when creating this database. Possible value is AdventureWorksLT. + type: string + serverId: + description: The id of the MS SQL Server on which to create the + database. Changing this forces a new resource to be created. + type: string + serverIdRef: + description: Reference to a MSSQLServer in sql to populate serverId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serverIdSelector: + description: Selector for a MSSQLServer in sql to populate serverId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + shortTermRetentionPolicy: + description: A short_term_retention_policy block as defined below. + properties: + backupIntervalInHours: + description: The hours between each differential backup. This + is only applicable to live databases but not dropped databases. + Value has to be 12 or 24. Defaults to 12 hours. + type: number + retentionDays: + description: Point In Time Restore configuration. Value has + to be between 1 and 35. + type: number + type: object + skuName: + description: Specifies the name of the SKU used by the database. + For example, GP_S_Gen5_2,HS_Gen4_1,BC_Gen5_2, ElasticPool, Basic,S0, + P2 ,DW100c, DS100. Changing this from the HyperScale service + tier to another service tier will create a new resource. + type: string + storageAccountType: + description: Specifies the storage account type used to store + backups for this database. Possible values are Geo, GeoZone, + Local and Zone. Defaults to Geo. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values include Access_Anomaly, Sql_Injection and + Sql_Injection_Vulnerability. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? Possible values are Enabled + or Disabled. Defaults to Disabled. + type: string + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + state: + description: The State of the Policy. Possible values are + Enabled or Disabled. Defaults to Disabled. + type: string + storageAccountAccessKeySecretRef: + description: Specifies the identifier key of the Threat Detection + audit storage account. Required if state is Enabled. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + Required if state is Enabled. + type: string + type: object + transparentDataEncryptionEnabled: + description: If set to true, Transparent Data Encryption will + be enabled on the database. Defaults to true. + type: boolean + transparentDataEncryptionKeyAutomaticRotationEnabled: + description: Boolean flag to specify whether TDE automatically + rotates the encryption Key to latest version or not. Possible + values are true or false. Defaults to false. + type: boolean + transparentDataEncryptionKeyVaultKeyId: + description: The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) + to be used as the Customer Managed Key(CMK/BYOK) for the Transparent + Data Encryption(TDE) layer. + type: string + transparentDataEncryptionKeyVaultKeyIdRef: + description: Reference to a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transparentDataEncryptionKeyVaultKeyIdSelector: + description: Selector for a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zoneRedundant: + description: Whether or not this database is zone redundant, which + means the replicas of this database will be spread across multiple + availability zones. This property is only settable for Premium + and Business Critical databases. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoPauseDelayInMinutes: + description: Time in minutes after which database is automatically + paused. A value of -1 means that automatic pause is disabled. + This property is only settable for Serverless databases. + type: number + collation: + description: Specifies the collation of the database. Changing + this forces a new resource to be created. + type: string + createMode: + description: The create mode of the database. Possible values + are Copy, Default, OnlineSecondary, PointInTimeRestore, Recovery, + Restore, RestoreExternalBackup, RestoreExternalBackupSecondary, + RestoreLongTermRetentionBackup and Secondary. Mutually exclusive + with import. Changing this forces a new resource to be created. + Defaults to Default. + type: string + creationSourceDatabaseId: + description: The ID of the source database from which to create + the new database. This should only be used for databases with + create_mode values that use another database as reference. Changing + this forces a new resource to be created. + type: string + elasticPoolId: + description: Specifies the ID of the elastic pool containing this + database. + type: string + enclaveType: + description: Specifies the type of enclave to be used by the database. + Possible value VBS. + type: string + geoBackupEnabled: + description: A boolean that specifies if the Geo Backup Policy + is enabled. Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Database. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Database. Possible + value is UserAssigned. + type: string + type: object + import: + description: A import block as documented below. Mutually exclusive + with create_mode. + properties: + administratorLogin: + description: Specifies the name of the SQL administrator. + type: string + authenticationType: + description: Specifies the type of authentication used to + access the server. Valid values are SQL or ADPassword. + type: string + storageAccountId: + description: The resource id for the storage account used + to store BACPAC file. If set, private endpoint connection + will be created for the storage account. Must match storage + account used for storage_uri parameter. + type: string + storageKeyType: + description: Specifies the type of access key for the storage + account. Valid values are StorageAccessKey or SharedAccessKey. + type: string + storageUri: + description: Specifies the blob URI of the .bacpac file. + type: string + type: object + ledgerEnabled: + description: A boolean that specifies if this is a ledger database. + Defaults to false. Changing this forces a new resource to be + created. + type: boolean + licenseType: + description: Specifies the license type applied to this database. + Possible values are LicenseIncluded and BasePrice. + type: string + longTermRetentionPolicy: + description: A long_term_retention_policy block as defined below. + properties: + immutableBackupsEnabled: + description: Specifies if the backups are immutable. Defaults + to false. + type: boolean + monthlyRetention: + description: The monthly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 120 months. + e.g. P1Y, P1M, P4W or P30D. + type: string + weekOfYear: + description: The week of year to take the yearly backup. Value + has to be between 1 and 52. + type: number + weeklyRetention: + description: The weekly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 520 weeks. + e.g. P1Y, P1M, P1W or P7D. + type: string + yearlyRetention: + description: The yearly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 10 years. + e.g. P1Y, P12M, P52W or P365D. + type: string + type: object + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the database. Valid values include SQL_Default, + SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, + SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, + SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, + SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, + SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, + SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, + SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, + SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, + SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, + SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, + SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, + SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, + SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, + SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, + SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, + SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, + SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, + SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, + SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + type: string + maxSizeGb: + description: The max size of the database in gigabytes. + type: number + minCapacity: + description: Minimal capacity that database will always have allocated, + if not paused. This property is only settable for Serverless + databases. + type: number + readReplicaCount: + description: The number of readonly secondary replicas associated + with the database to which readonly application intent connections + may be routed. This property is only settable for Hyperscale + edition databases. + type: number + readScale: + description: If enabled, connections that have application intent + set to readonly in their connection string may be routed to + a readonly secondary replica. This property is only settable + for Premium and Business Critical databases. + type: boolean + recoverDatabaseId: + description: The ID of the database to be recovered. This property + is only applicable when the create_mode is Recovery. + type: string + recoveryPointId: + description: The ID of the Recovery Services Recovery Point Id + to be restored. This property is only applicable when the create_mode + is Recovery. + type: string + restoreDroppedDatabaseId: + description: The ID of the database to be restored. This property + is only applicable when the create_mode is Restore. + type: string + restoreLongTermRetentionBackupId: + description: The ID of the long term retention backup to be restored. + This property is only applicable when the create_mode is RestoreLongTermRetentionBackup. + type: string + restorePointInTime: + description: Specifies the point in time (ISO8601 format) of the + source database that will be restored to create the new database. + This property is only settable for create_mode= PointInTimeRestore + databases. + type: string + sampleName: + description: Specifies the name of the sample schema to apply + when creating this database. Possible value is AdventureWorksLT. + type: string + shortTermRetentionPolicy: + description: A short_term_retention_policy block as defined below. + properties: + backupIntervalInHours: + description: The hours between each differential backup. This + is only applicable to live databases but not dropped databases. + Value has to be 12 or 24. Defaults to 12 hours. + type: number + retentionDays: + description: Point In Time Restore configuration. Value has + to be between 1 and 35. + type: number + type: object + skuName: + description: Specifies the name of the SKU used by the database. + For example, GP_S_Gen5_2,HS_Gen4_1,BC_Gen5_2, ElasticPool, Basic,S0, + P2 ,DW100c, DS100. Changing this from the HyperScale service + tier to another service tier will create a new resource. + type: string + storageAccountType: + description: Specifies the storage account type used to store + backups for this database. Possible values are Geo, GeoZone, + Local and Zone. Defaults to Geo. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values include Access_Anomaly, Sql_Injection and + Sql_Injection_Vulnerability. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? Possible values are Enabled + or Disabled. Defaults to Disabled. + type: string + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + state: + description: The State of the Policy. Possible values are + Enabled or Disabled. Defaults to Disabled. + type: string + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + Required if state is Enabled. + type: string + type: object + transparentDataEncryptionEnabled: + description: If set to true, Transparent Data Encryption will + be enabled on the database. Defaults to true. + type: boolean + transparentDataEncryptionKeyAutomaticRotationEnabled: + description: Boolean flag to specify whether TDE automatically + rotates the encryption Key to latest version or not. Possible + values are true or false. Defaults to false. + type: boolean + transparentDataEncryptionKeyVaultKeyId: + description: The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) + to be used as the Customer Managed Key(CMK/BYOK) for the Transparent + Data Encryption(TDE) layer. + type: string + transparentDataEncryptionKeyVaultKeyIdRef: + description: Reference to a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transparentDataEncryptionKeyVaultKeyIdSelector: + description: Selector for a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + zoneRedundant: + description: Whether or not this database is zone redundant, which + means the replicas of this database will be spread across multiple + availability zones. This property is only settable for Premium + and Business Critical databases. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MSSQLDatabaseStatus defines the observed state of MSSQLDatabase. + properties: + atProvider: + properties: + autoPauseDelayInMinutes: + description: Time in minutes after which database is automatically + paused. A value of -1 means that automatic pause is disabled. + This property is only settable for Serverless databases. + type: number + collation: + description: Specifies the collation of the database. Changing + this forces a new resource to be created. + type: string + createMode: + description: The create mode of the database. Possible values + are Copy, Default, OnlineSecondary, PointInTimeRestore, Recovery, + Restore, RestoreExternalBackup, RestoreExternalBackupSecondary, + RestoreLongTermRetentionBackup and Secondary. Mutually exclusive + with import. Changing this forces a new resource to be created. + Defaults to Default. + type: string + creationSourceDatabaseId: + description: The ID of the source database from which to create + the new database. This should only be used for databases with + create_mode values that use another database as reference. Changing + this forces a new resource to be created. + type: string + elasticPoolId: + description: Specifies the ID of the elastic pool containing this + database. + type: string + enclaveType: + description: Specifies the type of enclave to be used by the database. + Possible value VBS. + type: string + geoBackupEnabled: + description: A boolean that specifies if the Geo Backup Policy + is enabled. Defaults to true. + type: boolean + id: + description: The ID of the MS SQL Database. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Database. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Database. Possible + value is UserAssigned. + type: string + type: object + import: + description: A import block as documented below. Mutually exclusive + with create_mode. + properties: + administratorLogin: + description: Specifies the name of the SQL administrator. + type: string + authenticationType: + description: Specifies the type of authentication used to + access the server. Valid values are SQL or ADPassword. + type: string + storageAccountId: + description: The resource id for the storage account used + to store BACPAC file. If set, private endpoint connection + will be created for the storage account. Must match storage + account used for storage_uri parameter. + type: string + storageKeyType: + description: Specifies the type of access key for the storage + account. Valid values are StorageAccessKey or SharedAccessKey. + type: string + storageUri: + description: Specifies the blob URI of the .bacpac file. + type: string + type: object + ledgerEnabled: + description: A boolean that specifies if this is a ledger database. + Defaults to false. Changing this forces a new resource to be + created. + type: boolean + licenseType: + description: Specifies the license type applied to this database. + Possible values are LicenseIncluded and BasePrice. + type: string + longTermRetentionPolicy: + description: A long_term_retention_policy block as defined below. + properties: + immutableBackupsEnabled: + description: Specifies if the backups are immutable. Defaults + to false. + type: boolean + monthlyRetention: + description: The monthly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 120 months. + e.g. P1Y, P1M, P4W or P30D. + type: string + weekOfYear: + description: The week of year to take the yearly backup. Value + has to be between 1 and 52. + type: number + weeklyRetention: + description: The weekly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 520 weeks. + e.g. P1Y, P1M, P1W or P7D. + type: string + yearlyRetention: + description: The yearly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 10 years. + e.g. P1Y, P12M, P52W or P365D. + type: string + type: object + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the database. Valid values include SQL_Default, + SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, + SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, + SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, + SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, + SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, + SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, + SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, + SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, + SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, + SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, + SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, + SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, + SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, + SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, + SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, + SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, + SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, + SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, + SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + type: string + maxSizeGb: + description: The max size of the database in gigabytes. + type: number + minCapacity: + description: Minimal capacity that database will always have allocated, + if not paused. This property is only settable for Serverless + databases. + type: number + readReplicaCount: + description: The number of readonly secondary replicas associated + with the database to which readonly application intent connections + may be routed. This property is only settable for Hyperscale + edition databases. + type: number + readScale: + description: If enabled, connections that have application intent + set to readonly in their connection string may be routed to + a readonly secondary replica. This property is only settable + for Premium and Business Critical databases. + type: boolean + recoverDatabaseId: + description: The ID of the database to be recovered. This property + is only applicable when the create_mode is Recovery. + type: string + recoveryPointId: + description: The ID of the Recovery Services Recovery Point Id + to be restored. This property is only applicable when the create_mode + is Recovery. + type: string + restoreDroppedDatabaseId: + description: The ID of the database to be restored. This property + is only applicable when the create_mode is Restore. + type: string + restoreLongTermRetentionBackupId: + description: The ID of the long term retention backup to be restored. + This property is only applicable when the create_mode is RestoreLongTermRetentionBackup. + type: string + restorePointInTime: + description: Specifies the point in time (ISO8601 format) of the + source database that will be restored to create the new database. + This property is only settable for create_mode= PointInTimeRestore + databases. + type: string + sampleName: + description: Specifies the name of the sample schema to apply + when creating this database. Possible value is AdventureWorksLT. + type: string + serverId: + description: The id of the MS SQL Server on which to create the + database. Changing this forces a new resource to be created. + type: string + shortTermRetentionPolicy: + description: A short_term_retention_policy block as defined below. + properties: + backupIntervalInHours: + description: The hours between each differential backup. This + is only applicable to live databases but not dropped databases. + Value has to be 12 or 24. Defaults to 12 hours. + type: number + retentionDays: + description: Point In Time Restore configuration. Value has + to be between 1 and 35. + type: number + type: object + skuName: + description: Specifies the name of the SKU used by the database. + For example, GP_S_Gen5_2,HS_Gen4_1,BC_Gen5_2, ElasticPool, Basic,S0, + P2 ,DW100c, DS100. Changing this from the HyperScale service + tier to another service tier will create a new resource. + type: string + storageAccountType: + description: Specifies the storage account type used to store + backups for this database. Possible values are Geo, GeoZone, + Local and Zone. Defaults to Geo. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + threatDetectionPolicy: + description: Threat detection policy configuration. The threat_detection_policy + block supports fields documented below. + properties: + disabledAlerts: + description: Specifies a list of alerts which should be disabled. + Possible values include Access_Anomaly, Sql_Injection and + Sql_Injection_Vulnerability. + items: + type: string + type: array + x-kubernetes-list-type: set + emailAccountAdmins: + description: Should the account administrators be emailed + when this alert is triggered? Possible values are Enabled + or Disabled. Defaults to Disabled. + type: string + emailAddresses: + description: A list of email addresses which alerts should + be sent to. + items: + type: string + type: array + x-kubernetes-list-type: set + retentionDays: + description: Specifies the number of days to keep in the Threat + Detection audit logs. + type: number + state: + description: The State of the Policy. Possible values are + Enabled or Disabled. Defaults to Disabled. + type: string + storageEndpoint: + description: Specifies the blob storage endpoint (e.g. https://example.blob.core.windows.net). + This blob storage will hold all Threat Detection audit logs. + Required if state is Enabled. + type: string + type: object + transparentDataEncryptionEnabled: + description: If set to true, Transparent Data Encryption will + be enabled on the database. Defaults to true. + type: boolean + transparentDataEncryptionKeyAutomaticRotationEnabled: + description: Boolean flag to specify whether TDE automatically + rotates the encryption Key to latest version or not. Possible + values are true or false. Defaults to false. + type: boolean + transparentDataEncryptionKeyVaultKeyId: + description: The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) + to be used as the Customer Managed Key(CMK/BYOK) for the Transparent + Data Encryption(TDE) layer. + type: string + zoneRedundant: + description: Whether or not this database is zone redundant, which + means the replicas of this database will be spread across multiple + availability zones. This property is only settable for Premium + and Business Critical databases. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqlelasticpools.yaml b/package/crds/sql.azure.upbound.io_mssqlelasticpools.yaml index 957393567..137bf4a44 100644 --- a/package/crds/sql.azure.upbound.io_mssqlelasticpools.yaml +++ b/package/crds/sql.azure.upbound.io_mssqlelasticpools.yaml @@ -790,3 +790,760 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLElasticPool is the Schema for the MSSQLElasticPools API. + Manages an Azure SQL Elastic Pool. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLElasticPoolSpec defines the desired state of MSSQLElasticPool + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + enclaveType: + description: Specifies the type of enclave to be used by the elastic + pool. Possible value VBS. + type: string + licenseType: + description: Specifies the license type applied to this database. + Possible values are LicenseIncluded and BasePrice. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the elastic pool. Valid values include SQL_Default, + SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, + SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, + SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, + SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, + SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, + SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, + SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, + SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, + SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, + SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, + SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, + SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, + SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, + SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, + SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, + SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, + SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, + SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, + SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + type: string + maxSizeBytes: + description: The max data size of the elastic pool in bytes. Conflicts + with max_size_gb. + type: number + maxSizeGb: + description: The max data size of the elastic pool in gigabytes. + Conflicts with max_size_bytes. + type: number + perDatabaseSettings: + description: A per_database_settings block as defined below. + properties: + maxCapacity: + description: The maximum capacity any one database can consume. + type: number + minCapacity: + description: The minimum capacity all databases are guaranteed. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which to create + the elastic pool. This must be the same as the resource group + of the underlying SQL server. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serverName: + description: The name of the SQL Server on which to create the + elastic pool. Changing this forces a new resource to be created. + type: string + serverNameRef: + description: Reference to a MSSQLServer in sql to populate serverName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serverNameSelector: + description: Selector for a MSSQLServer in sql to populate serverName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: A sku block as defined below. + properties: + capacity: + description: 'The scale up/out capacity, representing server''s + compute units. For more information see the documentation + for your Elasticpool configuration: vCore-based or DTU-based.' + type: number + family: + description: The family of hardware Gen4, Gen5, Fsv2 or DC. + type: string + name: + description: Specifies the SKU Name for this Elasticpool. + The name of the SKU, will be either vCore based or DTU based. + Possible DTU based values are BasicPool, StandardPool, PremiumPool + while possible vCore based values are GP_Gen4, GP_Gen5, + GP_Fsv2, GP_DC, BC_Gen4, BC_Gen5, BC_DC, or HS_Gen5. + type: string + tier: + description: 'The tier of the particular SKU. Possible values + are GeneralPurpose, BusinessCritical, Basic, Standard, Premium, + or HyperScale. For more information see the documentation + for your Elasticpool configuration: vCore-based or DTU-based.' + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Whether or not this elastic pool is zone redundant. + tier needs to be Premium for DTU based or BusinessCritical for + vCore based sku. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + enclaveType: + description: Specifies the type of enclave to be used by the elastic + pool. Possible value VBS. + type: string + licenseType: + description: Specifies the license type applied to this database. + Possible values are LicenseIncluded and BasePrice. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the elastic pool. Valid values include SQL_Default, + SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, + SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, + SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, + SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, + SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, + SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, + SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, + SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, + SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, + SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, + SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, + SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, + SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, + SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, + SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, + SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, + SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, + SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, + SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + type: string + maxSizeBytes: + description: The max data size of the elastic pool in bytes. Conflicts + with max_size_gb. + type: number + maxSizeGb: + description: The max data size of the elastic pool in gigabytes. + Conflicts with max_size_bytes. + type: number + perDatabaseSettings: + description: A per_database_settings block as defined below. + properties: + maxCapacity: + description: The maximum capacity any one database can consume. + type: number + minCapacity: + description: The minimum capacity all databases are guaranteed. + type: number + type: object + sku: + description: A sku block as defined below. + properties: + capacity: + description: 'The scale up/out capacity, representing server''s + compute units. For more information see the documentation + for your Elasticpool configuration: vCore-based or DTU-based.' + type: number + family: + description: The family of hardware Gen4, Gen5, Fsv2 or DC. + type: string + name: + description: Specifies the SKU Name for this Elasticpool. + The name of the SKU, will be either vCore based or DTU based. + Possible DTU based values are BasicPool, StandardPool, PremiumPool + while possible vCore based values are GP_Gen4, GP_Gen5, + GP_Fsv2, GP_DC, BC_Gen4, BC_Gen5, BC_DC, or HS_Gen5. + type: string + tier: + description: 'The tier of the particular SKU. Possible values + are GeneralPurpose, BusinessCritical, Basic, Standard, Premium, + or HyperScale. For more information see the documentation + for your Elasticpool configuration: vCore-based or DTU-based.' + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Whether or not this elastic pool is zone redundant. + tier needs to be Premium for DTU based or BusinessCritical for + vCore based sku. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.perDatabaseSettings is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.perDatabaseSettings) + || (has(self.initProvider) && has(self.initProvider.perDatabaseSettings))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: MSSQLElasticPoolStatus defines the observed state of MSSQLElasticPool. + properties: + atProvider: + properties: + enclaveType: + description: Specifies the type of enclave to be used by the elastic + pool. Possible value VBS. + type: string + id: + description: The ID of the MS SQL Elastic Pool. + type: string + licenseType: + description: Specifies the license type applied to this database. + Possible values are LicenseIncluded and BasePrice. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the elastic pool. Valid values include SQL_Default, + SQL_EastUS_DB_1, SQL_EastUS2_DB_1, SQL_SoutheastAsia_DB_1, SQL_AustraliaEast_DB_1, + SQL_NorthEurope_DB_1, SQL_SouthCentralUS_DB_1, SQL_WestUS2_DB_1, + SQL_UKSouth_DB_1, SQL_WestEurope_DB_1, SQL_EastUS_DB_2, SQL_EastUS2_DB_2, + SQL_WestUS2_DB_2, SQL_SoutheastAsia_DB_2, SQL_AustraliaEast_DB_2, + SQL_NorthEurope_DB_2, SQL_SouthCentralUS_DB_2, SQL_UKSouth_DB_2, + SQL_WestEurope_DB_2, SQL_AustraliaSoutheast_DB_1, SQL_BrazilSouth_DB_1, + SQL_CanadaCentral_DB_1, SQL_CanadaEast_DB_1, SQL_CentralUS_DB_1, + SQL_EastAsia_DB_1, SQL_FranceCentral_DB_1, SQL_GermanyWestCentral_DB_1, + SQL_CentralIndia_DB_1, SQL_SouthIndia_DB_1, SQL_JapanEast_DB_1, + SQL_JapanWest_DB_1, SQL_NorthCentralUS_DB_1, SQL_UKWest_DB_1, + SQL_WestUS_DB_1, SQL_AustraliaSoutheast_DB_2, SQL_BrazilSouth_DB_2, + SQL_CanadaCentral_DB_2, SQL_CanadaEast_DB_2, SQL_CentralUS_DB_2, + SQL_EastAsia_DB_2, SQL_FranceCentral_DB_2, SQL_GermanyWestCentral_DB_2, + SQL_CentralIndia_DB_2, SQL_SouthIndia_DB_2, SQL_JapanEast_DB_2, + SQL_JapanWest_DB_2, SQL_NorthCentralUS_DB_2, SQL_UKWest_DB_2, + SQL_WestUS_DB_2, SQL_WestCentralUS_DB_1, SQL_FranceSouth_DB_1, + SQL_WestCentralUS_DB_2, SQL_FranceSouth_DB_2, SQL_SwitzerlandNorth_DB_1, + SQL_SwitzerlandNorth_DB_2, SQL_BrazilSoutheast_DB_1, SQL_UAENorth_DB_1, + SQL_BrazilSoutheast_DB_2, SQL_UAENorth_DB_2. Defaults to SQL_Default. + type: string + maxSizeBytes: + description: The max data size of the elastic pool in bytes. Conflicts + with max_size_gb. + type: number + maxSizeGb: + description: The max data size of the elastic pool in gigabytes. + Conflicts with max_size_bytes. + type: number + perDatabaseSettings: + description: A per_database_settings block as defined below. + properties: + maxCapacity: + description: The maximum capacity any one database can consume. + type: number + minCapacity: + description: The minimum capacity all databases are guaranteed. + type: number + type: object + resourceGroupName: + description: The name of the resource group in which to create + the elastic pool. This must be the same as the resource group + of the underlying SQL server. Changing this forces a new resource + to be created. + type: string + serverName: + description: The name of the SQL Server on which to create the + elastic pool. Changing this forces a new resource to be created. + type: string + sku: + description: A sku block as defined below. + properties: + capacity: + description: 'The scale up/out capacity, representing server''s + compute units. For more information see the documentation + for your Elasticpool configuration: vCore-based or DTU-based.' + type: number + family: + description: The family of hardware Gen4, Gen5, Fsv2 or DC. + type: string + name: + description: Specifies the SKU Name for this Elasticpool. + The name of the SKU, will be either vCore based or DTU based. + Possible DTU based values are BasicPool, StandardPool, PremiumPool + while possible vCore based values are GP_Gen4, GP_Gen5, + GP_Fsv2, GP_DC, BC_Gen4, BC_Gen5, BC_DC, or HS_Gen5. + type: string + tier: + description: 'The tier of the particular SKU. Possible values + are GeneralPurpose, BusinessCritical, Basic, Standard, Premium, + or HyperScale. For more information see the documentation + for your Elasticpool configuration: vCore-based or DTU-based.' + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Whether or not this elastic pool is zone redundant. + tier needs to be Premium for DTU based or BusinessCritical for + vCore based sku. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqlfailovergroups.yaml b/package/crds/sql.azure.upbound.io_mssqlfailovergroups.yaml index f09ffc76a..009cc6e9d 100644 --- a/package/crds/sql.azure.upbound.io_mssqlfailovergroups.yaml +++ b/package/crds/sql.azure.upbound.io_mssqlfailovergroups.yaml @@ -866,3 +866,845 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLFailoverGroup is the Schema for the MSSQLFailoverGroups + API. Manages a Microsoft Azure SQL Failover Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLFailoverGroupSpec defines the desired state of MSSQLFailoverGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + databases: + description: A set of database names to include in the failover + group. + items: + type: string + type: array + x-kubernetes-list-type: set + databasesRefs: + description: References to MSSQLDatabase in sql to populate databases. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + databasesSelector: + description: Selector for a list of MSSQLDatabase in sql to populate + databases. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + partnerServer: + description: A partner_server block as defined below. + items: + properties: + id: + description: The ID of a partner SQL server to include in + the failover group. + type: string + idRef: + description: Reference to a MSSQLServer in sql to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a MSSQLServer in sql to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + readWriteEndpointFailoverPolicy: + description: A read_write_endpoint_failover_policy block as defined + below. + properties: + graceMinutes: + description: The grace period in minutes, before failover + with data loss is attempted for the read-write endpoint. + Required when mode is Automatic. + type: number + mode: + description: The failover policy of the read-write endpoint + for the failover group. Possible values are Automatic or + Manual. + type: string + type: object + readonlyEndpointFailoverPolicyEnabled: + description: Whether failover is enabled for the readonly endpoint. + Defaults to false. + type: boolean + serverId: + description: The ID of the primary SQL Server on which to create + the failover group. Changing this forces a new resource to be + created. + type: string + serverIdRef: + description: Reference to a MSSQLServer in sql to populate serverId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serverIdSelector: + description: Selector for a MSSQLServer in sql to populate serverId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + databases: + description: A set of database names to include in the failover + group. + items: + type: string + type: array + x-kubernetes-list-type: set + databasesRefs: + description: References to MSSQLDatabase in sql to populate databases. + items: + description: A Reference to a named object. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + type: array + databasesSelector: + description: Selector for a list of MSSQLDatabase in sql to populate + databases. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + partnerServer: + description: A partner_server block as defined below. + items: + properties: + id: + description: The ID of a partner SQL server to include in + the failover group. + type: string + idRef: + description: Reference to a MSSQLServer in sql to populate + id. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + idSelector: + description: Selector for a MSSQLServer in sql to populate + id. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + readWriteEndpointFailoverPolicy: + description: A read_write_endpoint_failover_policy block as defined + below. + properties: + graceMinutes: + description: The grace period in minutes, before failover + with data loss is attempted for the read-write endpoint. + Required when mode is Automatic. + type: number + mode: + description: The failover policy of the read-write endpoint + for the failover group. Possible values are Automatic or + Manual. + type: string + type: object + readonlyEndpointFailoverPolicyEnabled: + description: Whether failover is enabled for the readonly endpoint. + Defaults to false. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.partnerServer is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.partnerServer) + || (has(self.initProvider) && has(self.initProvider.partnerServer))' + - message: spec.forProvider.readWriteEndpointFailoverPolicy is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.readWriteEndpointFailoverPolicy) + || (has(self.initProvider) && has(self.initProvider.readWriteEndpointFailoverPolicy))' + status: + description: MSSQLFailoverGroupStatus defines the observed state of MSSQLFailoverGroup. + properties: + atProvider: + properties: + databases: + description: A set of database names to include in the failover + group. + items: + type: string + type: array + x-kubernetes-list-type: set + id: + description: The ID of the Failover Group. + type: string + partnerServer: + description: A partner_server block as defined below. + items: + properties: + id: + description: The ID of a partner SQL server to include in + the failover group. + type: string + location: + description: The location of the partner server. + type: string + role: + description: The replication role of the partner server. + Possible values include Primary or Secondary. + type: string + type: object + type: array + readWriteEndpointFailoverPolicy: + description: A read_write_endpoint_failover_policy block as defined + below. + properties: + graceMinutes: + description: The grace period in minutes, before failover + with data loss is attempted for the read-write endpoint. + Required when mode is Automatic. + type: number + mode: + description: The failover policy of the read-write endpoint + for the failover group. Possible values are Automatic or + Manual. + type: string + type: object + readonlyEndpointFailoverPolicyEnabled: + description: Whether failover is enabled for the readonly endpoint. + Defaults to false. + type: boolean + serverId: + description: The ID of the primary SQL Server on which to create + the failover group. Changing this forces a new resource to be + created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqlmanageddatabases.yaml b/package/crds/sql.azure.upbound.io_mssqlmanageddatabases.yaml index 97f49eca3..c1af46455 100644 --- a/package/crds/sql.azure.upbound.io_mssqlmanageddatabases.yaml +++ b/package/crds/sql.azure.upbound.io_mssqlmanageddatabases.yaml @@ -559,3 +559,529 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLManagedDatabase is the Schema for the MSSQLManagedDatabases + API. Manages an Azure SQL Azure Managed Database. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLManagedDatabaseSpec defines the desired state of MSSQLManagedDatabase + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + longTermRetentionPolicy: + description: A long_term_retention_policy block as defined below. + properties: + immutableBackupsEnabled: + description: Specifies if the backups are immutable. Defaults + to false. + type: boolean + monthlyRetention: + description: The monthly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 120 months. + e.g. P1Y, P1M, P4W or P30D. + type: string + weekOfYear: + description: The week of year to take the yearly backup. Value + has to be between 1 and 52. + type: number + weeklyRetention: + description: The weekly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 520 weeks. + e.g. P1Y, P1M, P1W or P7D. + type: string + yearlyRetention: + description: The yearly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 10 years. + e.g. P1Y, P12M, P52W or P365D. + type: string + type: object + managedInstanceId: + description: The ID of the Azure SQL Managed Instance on which + to create this Managed Database. Changing this forces a new + resource to be created. + type: string + managedInstanceIdRef: + description: Reference to a MSSQLManagedInstance in sql to populate + managedInstanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedInstanceIdSelector: + description: Selector for a MSSQLManagedInstance in sql to populate + managedInstanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + pointInTimeRestore: + description: A point_in_time_restore block as defined below. Changing + this forces a new resource to be created. + properties: + restorePointInTime: + description: The point in time for the restore from source_database_id. + Changing this forces a new resource to be created. + type: string + sourceDatabaseId: + description: The source database id that will be used to restore + from. Changing this forces a new resource to be created. + type: string + type: object + shortTermRetentionDays: + description: The backup retention period in days. This is how + many days Point-in-Time Restore will be supported. + type: number + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + longTermRetentionPolicy: + description: A long_term_retention_policy block as defined below. + properties: + immutableBackupsEnabled: + description: Specifies if the backups are immutable. Defaults + to false. + type: boolean + monthlyRetention: + description: The monthly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 120 months. + e.g. P1Y, P1M, P4W or P30D. + type: string + weekOfYear: + description: The week of year to take the yearly backup. Value + has to be between 1 and 52. + type: number + weeklyRetention: + description: The weekly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 520 weeks. + e.g. P1Y, P1M, P1W or P7D. + type: string + yearlyRetention: + description: The yearly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 10 years. + e.g. P1Y, P12M, P52W or P365D. + type: string + type: object + pointInTimeRestore: + description: A point_in_time_restore block as defined below. Changing + this forces a new resource to be created. + properties: + restorePointInTime: + description: The point in time for the restore from source_database_id. + Changing this forces a new resource to be created. + type: string + sourceDatabaseId: + description: The source database id that will be used to restore + from. Changing this forces a new resource to be created. + type: string + type: object + shortTermRetentionDays: + description: The backup retention period in days. This is how + many days Point-in-Time Restore will be supported. + type: number + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: MSSQLManagedDatabaseStatus defines the observed state of + MSSQLManagedDatabase. + properties: + atProvider: + properties: + id: + description: The Azure SQL Managed Database ID. + type: string + longTermRetentionPolicy: + description: A long_term_retention_policy block as defined below. + properties: + immutableBackupsEnabled: + description: Specifies if the backups are immutable. Defaults + to false. + type: boolean + monthlyRetention: + description: The monthly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 120 months. + e.g. P1Y, P1M, P4W or P30D. + type: string + weekOfYear: + description: The week of year to take the yearly backup. Value + has to be between 1 and 52. + type: number + weeklyRetention: + description: The weekly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 520 weeks. + e.g. P1Y, P1M, P1W or P7D. + type: string + yearlyRetention: + description: The yearly retention policy for an LTR backup + in an ISO 8601 format. Valid value is between 1 to 10 years. + e.g. P1Y, P12M, P52W or P365D. + type: string + type: object + managedInstanceId: + description: The ID of the Azure SQL Managed Instance on which + to create this Managed Database. Changing this forces a new + resource to be created. + type: string + pointInTimeRestore: + description: A point_in_time_restore block as defined below. Changing + this forces a new resource to be created. + properties: + restorePointInTime: + description: The point in time for the restore from source_database_id. + Changing this forces a new resource to be created. + type: string + sourceDatabaseId: + description: The source database id that will be used to restore + from. Changing this forces a new resource to be created. + type: string + type: object + shortTermRetentionDays: + description: The backup retention period in days. This is how + many days Point-in-Time Restore will be supported. + type: number + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqlmanagedinstancefailovergroups.yaml b/package/crds/sql.azure.upbound.io_mssqlmanagedinstancefailovergroups.yaml index fc5611546..22db7ab69 100644 --- a/package/crds/sql.azure.upbound.io_mssqlmanagedinstancefailovergroups.yaml +++ b/package/crds/sql.azure.upbound.io_mssqlmanagedinstancefailovergroups.yaml @@ -754,3 +754,733 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLManagedInstanceFailoverGroup is the Schema for the MSSQLManagedInstanceFailoverGroups + API. Manages an Azure SQL Managed Instance Failover Group. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLManagedInstanceFailoverGroupSpec defines the desired + state of MSSQLManagedInstanceFailoverGroup + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + location: + description: The Azure Region where the Managed Instance Failover + Group should exist. Changing this forces a new resource to be + created. + type: string + managedInstanceId: + description: The ID of the Azure SQL Managed Instance which will + be replicated using a Managed Instance Failover Group. Changing + this forces a new resource to be created. + type: string + managedInstanceIdRef: + description: Reference to a MSSQLManagedInstance in sql to populate + managedInstanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedInstanceIdSelector: + description: Selector for a MSSQLManagedInstance in sql to populate + managedInstanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + partnerManagedInstanceId: + description: The ID of the Azure SQL Managed Instance which will + be replicated to. Changing this forces a new resource to be + created. + type: string + partnerManagedInstanceIdRef: + description: Reference to a MSSQLManagedInstance in sql to populate + partnerManagedInstanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + partnerManagedInstanceIdSelector: + description: Selector for a MSSQLManagedInstance in sql to populate + partnerManagedInstanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + readWriteEndpointFailoverPolicy: + description: A read_write_endpoint_failover_policy block as defined + below. + properties: + graceMinutes: + description: Applies only if mode is Automatic. The grace + period in minutes before failover with data loss is attempted. + type: number + mode: + description: The failover mode. Possible values are Automatic + or Manual. + type: string + type: object + readonlyEndpointFailoverPolicyEnabled: + description: Failover policy for the read-only endpoint. Defaults + to true. + type: boolean + required: + - location + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + managedInstanceId: + description: The ID of the Azure SQL Managed Instance which will + be replicated using a Managed Instance Failover Group. Changing + this forces a new resource to be created. + type: string + managedInstanceIdRef: + description: Reference to a MSSQLManagedInstance in sql to populate + managedInstanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedInstanceIdSelector: + description: Selector for a MSSQLManagedInstance in sql to populate + managedInstanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + partnerManagedInstanceId: + description: The ID of the Azure SQL Managed Instance which will + be replicated to. Changing this forces a new resource to be + created. + type: string + partnerManagedInstanceIdRef: + description: Reference to a MSSQLManagedInstance in sql to populate + partnerManagedInstanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + partnerManagedInstanceIdSelector: + description: Selector for a MSSQLManagedInstance in sql to populate + partnerManagedInstanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + readWriteEndpointFailoverPolicy: + description: A read_write_endpoint_failover_policy block as defined + below. + properties: + graceMinutes: + description: Applies only if mode is Automatic. The grace + period in minutes before failover with data loss is attempted. + type: number + mode: + description: The failover mode. Possible values are Automatic + or Manual. + type: string + type: object + readonlyEndpointFailoverPolicyEnabled: + description: Failover policy for the read-only endpoint. Defaults + to true. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.readWriteEndpointFailoverPolicy is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.readWriteEndpointFailoverPolicy) + || (has(self.initProvider) && has(self.initProvider.readWriteEndpointFailoverPolicy))' + status: + description: MSSQLManagedInstanceFailoverGroupStatus defines the observed + state of MSSQLManagedInstanceFailoverGroup. + properties: + atProvider: + properties: + id: + description: The ID of the Managed Instance Failover Group. + type: string + location: + description: The Azure Region where the Managed Instance Failover + Group should exist. Changing this forces a new resource to be + created. + type: string + managedInstanceId: + description: The ID of the Azure SQL Managed Instance which will + be replicated using a Managed Instance Failover Group. Changing + this forces a new resource to be created. + type: string + partnerManagedInstanceId: + description: The ID of the Azure SQL Managed Instance which will + be replicated to. Changing this forces a new resource to be + created. + type: string + partnerRegion: + description: A partner_region block as defined below. + items: + properties: + location: + description: The Azure Region where the Managed Instance + Failover Group partner exists. + type: string + role: + description: The partner replication role of the Managed + Instance Failover Group. + type: string + type: object + type: array + readWriteEndpointFailoverPolicy: + description: A read_write_endpoint_failover_policy block as defined + below. + properties: + graceMinutes: + description: Applies only if mode is Automatic. The grace + period in minutes before failover with data loss is attempted. + type: number + mode: + description: The failover mode. Possible values are Automatic + or Manual. + type: string + type: object + readonlyEndpointFailoverPolicyEnabled: + description: Failover policy for the read-only endpoint. Defaults + to true. + type: boolean + role: + description: The local replication role of the Managed Instance + Failover Group. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqlmanagedinstances.yaml b/package/crds/sql.azure.upbound.io_mssqlmanagedinstances.yaml index 92bd0fc4d..51f13f536 100644 --- a/package/crds/sql.azure.upbound.io_mssqlmanagedinstances.yaml +++ b/package/crds/sql.azure.upbound.io_mssqlmanagedinstances.yaml @@ -1077,3 +1077,1056 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLManagedInstance is the Schema for the MSSQLManagedInstances + API. Manages a Microsoft SQL Azure Managed Instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLManagedInstanceSpec defines the desired state of MSSQLManagedInstance + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + administratorLogin: + description: The administrator login name for the new SQL Managed + Instance. Changing this forces a new resource to be created. + type: string + administratorLoginPasswordSecretRef: + description: The password associated with the administrator_login + user. Needs to comply with Azure's Password Policy + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + collation: + description: Specifies how the SQL Managed Instance will be collated. + Default value is SQL_Latin1_General_CP1_CI_AS. Changing this + forces a new resource to be created. + type: string + dnsZonePartnerId: + description: The ID of the SQL Managed Instance which will share + the DNS zone. This is a prerequisite for creating an azurerm_sql_managed_instance_failover_group. + Setting this after creation forces a new resource to be created. + type: string + dnsZonePartnerIdRef: + description: Reference to a MSSQLManagedInstance in sql to populate + dnsZonePartnerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dnsZonePartnerIdSelector: + description: Selector for a MSSQLManagedInstance in sql to populate + dnsZonePartnerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Managed Instance. Required + when type is set to UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Managed Instance. + Possible values are SystemAssigned, UserAssigned. + type: string + type: object + licenseType: + description: What type of license the Managed Instance will use. + Possible values are LicenseIncluded and BasePrice. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the SQL Managed Instance. Valid values include + SQL_Default or an Azure Location in the format SQL_{Location}_MI_{Size}(for + example SQL_EastUS_MI_1). Defaults to SQL_Default. + type: string + minimumTlsVersion: + description: The Minimum TLS Version. Default value is 1.2 Valid + values include 1.0, 1.1, 1.2. + type: string + proxyOverride: + description: Specifies how the SQL Managed Instance will be accessed. + Default value is Default. Valid values include Default, Proxy, + and Redirect. + type: string + publicDataEndpointEnabled: + description: Is the public data endpoint enabled? Default value + is false. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the SQL Managed Instance. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: Specifies the SKU Name for the SQL Managed Instance. + Valid values include GP_Gen4, GP_Gen5, GP_Gen8IM, GP_Gen8IH, + BC_Gen4, BC_Gen5, BC_Gen8IM or BC_Gen8IH. + type: string + storageAccountType: + description: Specifies the storage account type used to store + backups for this database. Changing this forces a new resource + to be created. Possible values are GRS, LRS and ZRS. Defaults + to GRS. + type: string + storageSizeInGb: + description: Maximum storage space for the SQL Managed instance. + This should be a multiple of 32 (GB). + type: number + subnetId: + description: The subnet resource id that the SQL Managed Instance + will be associated with. Changing this forces a new resource + to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + timezoneId: + description: The TimeZone ID that the SQL Managed Instance will + be operating in. Default value is UTC. Changing this forces + a new resource to be created. + type: string + vcores: + description: Number of cores that should be assigned to the SQL + Managed Instance. Values can be 8, 16, or 24 for Gen4 SKUs, + or 4, 6, 8, 10, 12, 16, 20, 24, 32, 40, 48, 56, 64, 80, 96 or + 128 for Gen5 SKUs. + type: number + zoneRedundantEnabled: + description: Specifies whether or not the SQL Managed Instance + is zone redundant. Defaults to false. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + administratorLogin: + description: The administrator login name for the new SQL Managed + Instance. Changing this forces a new resource to be created. + type: string + collation: + description: Specifies how the SQL Managed Instance will be collated. + Default value is SQL_Latin1_General_CP1_CI_AS. Changing this + forces a new resource to be created. + type: string + dnsZonePartnerId: + description: The ID of the SQL Managed Instance which will share + the DNS zone. This is a prerequisite for creating an azurerm_sql_managed_instance_failover_group. + Setting this after creation forces a new resource to be created. + type: string + dnsZonePartnerIdRef: + description: Reference to a MSSQLManagedInstance in sql to populate + dnsZonePartnerId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + dnsZonePartnerIdSelector: + description: Selector for a MSSQLManagedInstance in sql to populate + dnsZonePartnerId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Managed Instance. Required + when type is set to UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Managed Instance. + Possible values are SystemAssigned, UserAssigned. + type: string + type: object + licenseType: + description: What type of license the Managed Instance will use. + Possible values are LicenseIncluded and BasePrice. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the SQL Managed Instance. Valid values include + SQL_Default or an Azure Location in the format SQL_{Location}_MI_{Size}(for + example SQL_EastUS_MI_1). Defaults to SQL_Default. + type: string + minimumTlsVersion: + description: The Minimum TLS Version. Default value is 1.2 Valid + values include 1.0, 1.1, 1.2. + type: string + proxyOverride: + description: Specifies how the SQL Managed Instance will be accessed. + Default value is Default. Valid values include Default, Proxy, + and Redirect. + type: string + publicDataEndpointEnabled: + description: Is the public data endpoint enabled? Default value + is false. + type: boolean + skuName: + description: Specifies the SKU Name for the SQL Managed Instance. + Valid values include GP_Gen4, GP_Gen5, GP_Gen8IM, GP_Gen8IH, + BC_Gen4, BC_Gen5, BC_Gen8IM or BC_Gen8IH. + type: string + storageAccountType: + description: Specifies the storage account type used to store + backups for this database. Changing this forces a new resource + to be created. Possible values are GRS, LRS and ZRS. Defaults + to GRS. + type: string + storageSizeInGb: + description: Maximum storage space for the SQL Managed instance. + This should be a multiple of 32 (GB). + type: number + subnetId: + description: The subnet resource id that the SQL Managed Instance + will be associated with. Changing this forces a new resource + to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + timezoneId: + description: The TimeZone ID that the SQL Managed Instance will + be operating in. Default value is UTC. Changing this forces + a new resource to be created. + type: string + vcores: + description: Number of cores that should be assigned to the SQL + Managed Instance. Values can be 8, 16, or 24 for Gen4 SKUs, + or 4, 6, 8, 10, 12, 16, 20, 24, 32, 40, 48, 56, 64, 80, 96 or + 128 for Gen5 SKUs. + type: number + zoneRedundantEnabled: + description: Specifies whether or not the SQL Managed Instance + is zone redundant. Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.administratorLogin is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.administratorLogin) + || (has(self.initProvider) && has(self.initProvider.administratorLogin))' + - message: spec.forProvider.administratorLoginPasswordSecretRef is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.administratorLoginPasswordSecretRef)' + - message: spec.forProvider.licenseType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.licenseType) + || (has(self.initProvider) && has(self.initProvider.licenseType))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + - message: spec.forProvider.storageSizeInGb is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageSizeInGb) + || (has(self.initProvider) && has(self.initProvider.storageSizeInGb))' + - message: spec.forProvider.vcores is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.vcores) + || (has(self.initProvider) && has(self.initProvider.vcores))' + status: + description: MSSQLManagedInstanceStatus defines the observed state of + MSSQLManagedInstance. + properties: + atProvider: + properties: + administratorLogin: + description: The administrator login name for the new SQL Managed + Instance. Changing this forces a new resource to be created. + type: string + collation: + description: Specifies how the SQL Managed Instance will be collated. + Default value is SQL_Latin1_General_CP1_CI_AS. Changing this + forces a new resource to be created. + type: string + dnsZone: + description: The Dns Zone where the SQL Managed Instance is located. + type: string + dnsZonePartnerId: + description: The ID of the SQL Managed Instance which will share + the DNS zone. This is a prerequisite for creating an azurerm_sql_managed_instance_failover_group. + Setting this after creation forces a new resource to be created. + type: string + fqdn: + description: The fully qualified domain name of the Azure Managed + SQL Instance + type: string + id: + description: The SQL Managed Instance ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Managed Instance. Required + when type is set to UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Identity of this SQL Managed Instance. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Identity of this SQL Managed Instance. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Managed Instance. + Possible values are SystemAssigned, UserAssigned. + type: string + type: object + licenseType: + description: What type of license the Managed Instance will use. + Possible values are LicenseIncluded and BasePrice. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maintenanceConfigurationName: + description: The name of the Public Maintenance Configuration + window to apply to the SQL Managed Instance. Valid values include + SQL_Default or an Azure Location in the format SQL_{Location}_MI_{Size}(for + example SQL_EastUS_MI_1). Defaults to SQL_Default. + type: string + minimumTlsVersion: + description: The Minimum TLS Version. Default value is 1.2 Valid + values include 1.0, 1.1, 1.2. + type: string + proxyOverride: + description: Specifies how the SQL Managed Instance will be accessed. + Default value is Default. Valid values include Default, Proxy, + and Redirect. + type: string + publicDataEndpointEnabled: + description: Is the public data endpoint enabled? Default value + is false. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the SQL Managed Instance. Changing this forces a new resource + to be created. + type: string + skuName: + description: Specifies the SKU Name for the SQL Managed Instance. + Valid values include GP_Gen4, GP_Gen5, GP_Gen8IM, GP_Gen8IH, + BC_Gen4, BC_Gen5, BC_Gen8IM or BC_Gen8IH. + type: string + storageAccountType: + description: Specifies the storage account type used to store + backups for this database. Changing this forces a new resource + to be created. Possible values are GRS, LRS and ZRS. Defaults + to GRS. + type: string + storageSizeInGb: + description: Maximum storage space for the SQL Managed instance. + This should be a multiple of 32 (GB). + type: number + subnetId: + description: The subnet resource id that the SQL Managed Instance + will be associated with. Changing this forces a new resource + to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + timezoneId: + description: The TimeZone ID that the SQL Managed Instance will + be operating in. Default value is UTC. Changing this forces + a new resource to be created. + type: string + vcores: + description: Number of cores that should be assigned to the SQL + Managed Instance. Values can be 8, 16, or 24 for Gen4 SKUs, + or 4, 6, 8, 10, 12, 16, 20, 24, 32, 40, 48, 56, 64, 80, 96 or + 128 for Gen5 SKUs. + type: number + zoneRedundantEnabled: + description: Specifies whether or not the SQL Managed Instance + is zone redundant. Defaults to false. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqlmanagedinstancevulnerabilityassessments.yaml b/package/crds/sql.azure.upbound.io_mssqlmanagedinstancevulnerabilityassessments.yaml index 201f4d6ce..86d365283 100644 --- a/package/crds/sql.azure.upbound.io_mssqlmanagedinstancevulnerabilityassessments.yaml +++ b/package/crds/sql.azure.upbound.io_mssqlmanagedinstancevulnerabilityassessments.yaml @@ -534,3 +534,513 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLManagedInstanceVulnerabilityAssessment is the Schema for + the MSSQLManagedInstanceVulnerabilityAssessments API. Manages the Vulnerability + Assessment for an MS Managed Instance. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLManagedInstanceVulnerabilityAssessmentSpec defines the + desired state of MSSQLManagedInstanceVulnerabilityAssessment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + managedInstanceId: + description: The id of the MS SQL Managed Instance. Changing this + forces a new resource to be created. + type: string + managedInstanceIdRef: + description: Reference to a MSSQLManagedInstance in sql to populate + managedInstanceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedInstanceIdSelector: + description: Selector for a MSSQLManagedInstance in sql to populate + managedInstanceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdmins: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to true. + type: boolean + emails: + description: Specifies an array of e-mail addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + storageAccountAccessKeySecretRef: + description: Specifies the identifier key of the storage account + for vulnerability assessment scan results. If storage_container_sas_key + isn't specified, storage_account_access_key is required. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://myStorage.blob.core.windows.net/VaScans/). + type: string + storageContainerSasKeySecretRef: + description: A shared access signature (SAS Key) that has write + access to the blob container specified in storage_container_path + parameter. If storage_account_access_key isn't specified, storage_container_sas_key + is required. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdmins: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to true. + type: boolean + emails: + description: Specifies an array of e-mail addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://myStorage.blob.core.windows.net/VaScans/). + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.storageContainerPath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageContainerPath) + || (has(self.initProvider) && has(self.initProvider.storageContainerPath))' + status: + description: MSSQLManagedInstanceVulnerabilityAssessmentStatus defines + the observed state of MSSQLManagedInstanceVulnerabilityAssessment. + properties: + atProvider: + properties: + id: + description: The ID of the Vulnerability Assessment. + type: string + managedInstanceId: + description: The id of the MS SQL Managed Instance. Changing this + forces a new resource to be created. + type: string + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdmins: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to true. + type: boolean + emails: + description: Specifies an array of e-mail addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://myStorage.blob.core.windows.net/VaScans/). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqlservers.yaml b/package/crds/sql.azure.upbound.io_mssqlservers.yaml index 2b5e955a6..d389786c7 100644 --- a/package/crds/sql.azure.upbound.io_mssqlservers.yaml +++ b/package/crds/sql.azure.upbound.io_mssqlservers.yaml @@ -1338,3 +1338,1311 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLServer is the Schema for the MSSQLServers API. Manages a + Microsoft SQL Azure Database Server. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLServerSpec defines the desired state of MSSQLServer + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + administratorLogin: + description: The administrator login name for the new server. + Required unless azuread_authentication_only in the azuread_administrator + block is true. When omitted, Azure will generate a default username + which cannot be subsequently changed. Changing this forces a + new resource to be created. + type: string + administratorLoginPasswordSecretRef: + description: The password associated with the administrator_login + user. Needs to comply with Azure's Password Policy. Required + unless azuread_authentication_only in the azuread_administrator + block is true. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + azureadAdministrator: + description: An azuread_administrator block as defined below. + properties: + azureadAuthenticationOnly: + description: Specifies whether only AD Users and administrators + (e.g. azuread_administrator[0].login_username) can be used + to login, or also local database users (e.g. administrator_login). + When true, the administrator_login and administrator_login_password + properties can be omitted. + type: boolean + loginUsername: + description: The login username of the Azure AD Administrator + of this SQL Server. + type: string + loginUsernameRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate loginUsername. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + loginUsernameSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate loginUsername. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectId: + description: The object id of the Azure AD Administrator of + this SQL Server. + type: string + objectIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate objectId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + objectIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate objectId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tenantId: + description: The tenant id of the Azure AD Administrator of + this SQL Server. + type: string + type: object + connectionPolicy: + description: The connection policy the server will use. Possible + values are Default, Proxy, and Redirect. Defaults to Default. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Server. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Server. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimumTlsVersion: + description: 'The Minimum TLS Version for all SQL Database and + SQL Data Warehouse databases associated with the server. Valid + values are: 1.0, 1.1 , 1.2 and Disabled. Defaults to 1.2.' + type: string + outboundNetworkRestrictionEnabled: + description: Whether outbound network traffic is restricted for + this server. Defaults to false. + type: boolean + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id. Required + if type is UserAssigned and should be combined with identity_ids. + type: string + primaryUserAssignedIdentityIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate primaryUserAssignedIdentityId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + primaryUserAssignedIdentityIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate primaryUserAssignedIdentityId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Microsoft SQL Server. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + transparentDataEncryptionKeyVaultKeyId: + description: The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) + to be used as the Customer Managed Key(CMK/BYOK) for the Transparent + Data Encryption(TDE) layer. + type: string + transparentDataEncryptionKeyVaultKeyIdRef: + description: Reference to a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transparentDataEncryptionKeyVaultKeyIdSelector: + description: Selector for a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: 'The version for the new server. Valid values are: + 2.0 (for v11 server) and 12.0 (for v12 server). Changing this + forces a new resource to be created.' + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + administratorLogin: + description: The administrator login name for the new server. + Required unless azuread_authentication_only in the azuread_administrator + block is true. When omitted, Azure will generate a default username + which cannot be subsequently changed. Changing this forces a + new resource to be created. + type: string + azureadAdministrator: + description: An azuread_administrator block as defined below. + properties: + azureadAuthenticationOnly: + description: Specifies whether only AD Users and administrators + (e.g. azuread_administrator[0].login_username) can be used + to login, or also local database users (e.g. administrator_login). + When true, the administrator_login and administrator_login_password + properties can be omitted. + type: boolean + loginUsername: + description: The login username of the Azure AD Administrator + of this SQL Server. + type: string + loginUsernameRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate loginUsername. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + loginUsernameSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate loginUsername. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + objectId: + description: The object id of the Azure AD Administrator of + this SQL Server. + type: string + objectIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate objectId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + objectIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate objectId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tenantId: + description: The tenant id of the Azure AD Administrator of + this SQL Server. + type: string + type: object + connectionPolicy: + description: The connection policy the server will use. Possible + values are Default, Proxy, and Redirect. Defaults to Default. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Server. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Server. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimumTlsVersion: + description: 'The Minimum TLS Version for all SQL Database and + SQL Data Warehouse databases associated with the server. Valid + values are: 1.0, 1.1 , 1.2 and Disabled. Defaults to 1.2.' + type: string + outboundNetworkRestrictionEnabled: + description: Whether outbound network traffic is restricted for + this server. Defaults to false. + type: boolean + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id. Required + if type is UserAssigned and should be combined with identity_ids. + type: string + primaryUserAssignedIdentityIdRef: + description: Reference to a UserAssignedIdentity in managedidentity + to populate primaryUserAssignedIdentityId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + primaryUserAssignedIdentityIdSelector: + description: Selector for a UserAssignedIdentity in managedidentity + to populate primaryUserAssignedIdentityId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + server. Defaults to true. + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + transparentDataEncryptionKeyVaultKeyId: + description: The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) + to be used as the Customer Managed Key(CMK/BYOK) for the Transparent + Data Encryption(TDE) layer. + type: string + transparentDataEncryptionKeyVaultKeyIdRef: + description: Reference to a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + transparentDataEncryptionKeyVaultKeyIdSelector: + description: Selector for a Key in keyvault to populate transparentDataEncryptionKeyVaultKeyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + version: + description: 'The version for the new server. Valid values are: + 2.0 (for v11 server) and 12.0 (for v12 server). Changing this + forces a new resource to be created.' + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.version is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.version) + || (has(self.initProvider) && has(self.initProvider.version))' + status: + description: MSSQLServerStatus defines the observed state of MSSQLServer. + properties: + atProvider: + properties: + administratorLogin: + description: The administrator login name for the new server. + Required unless azuread_authentication_only in the azuread_administrator + block is true. When omitted, Azure will generate a default username + which cannot be subsequently changed. Changing this forces a + new resource to be created. + type: string + azureadAdministrator: + description: An azuread_administrator block as defined below. + properties: + azureadAuthenticationOnly: + description: Specifies whether only AD Users and administrators + (e.g. azuread_administrator[0].login_username) can be used + to login, or also local database users (e.g. administrator_login). + When true, the administrator_login and administrator_login_password + properties can be omitted. + type: boolean + loginUsername: + description: The login username of the Azure AD Administrator + of this SQL Server. + type: string + objectId: + description: The object id of the Azure AD Administrator of + this SQL Server. + type: string + tenantId: + description: The tenant id of the Azure AD Administrator of + this SQL Server. + type: string + type: object + connectionPolicy: + description: The connection policy the server will use. Possible + values are Default, Proxy, and Redirect. Defaults to Default. + type: string + fullyQualifiedDomainName: + description: The fully qualified domain name of the Azure SQL + Server (e.g. myServerName.database.windows.net) + type: string + id: + description: the Microsoft SQL Server ID. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this SQL Server. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Identity of this SQL Server. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Identity of this SQL Server. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this SQL Server. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minimumTlsVersion: + description: 'The Minimum TLS Version for all SQL Database and + SQL Data Warehouse databases associated with the server. Valid + values are: 1.0, 1.1 , 1.2 and Disabled. Defaults to 1.2.' + type: string + outboundNetworkRestrictionEnabled: + description: Whether outbound network traffic is restricted for + this server. Defaults to false. + type: boolean + primaryUserAssignedIdentityId: + description: Specifies the primary user managed identity id. Required + if type is UserAssigned and should be combined with identity_ids. + type: string + publicNetworkAccessEnabled: + description: Whether public network access is allowed for this + server. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the Microsoft SQL Server. Changing this forces a new resource + to be created. + type: string + restorableDroppedDatabaseIds: + description: A list of dropped restorable database IDs on the + server. + items: + type: string + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + transparentDataEncryptionKeyVaultKeyId: + description: The fully versioned Key Vault Key URL (e.g. 'https://.vault.azure.net/keys//) + to be used as the Customer Managed Key(CMK/BYOK) for the Transparent + Data Encryption(TDE) layer. + type: string + version: + description: 'The version for the new server. Valid values are: + 2.0 (for v11 server) and 12.0 (for v12 server). Changing this + forces a new resource to be created.' + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/sql.azure.upbound.io_mssqlservervulnerabilityassessments.yaml b/package/crds/sql.azure.upbound.io_mssqlservervulnerabilityassessments.yaml index 535b30676..616b7d010 100644 --- a/package/crds/sql.azure.upbound.io_mssqlservervulnerabilityassessments.yaml +++ b/package/crds/sql.azure.upbound.io_mssqlservervulnerabilityassessments.yaml @@ -613,3 +613,592 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: MSSQLServerVulnerabilityAssessment is the Schema for the MSSQLServerVulnerabilityAssessments + API. Manages the Vulnerability Assessment for a MS SQL Server. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MSSQLServerVulnerabilityAssessmentSpec defines the desired + state of MSSQLServerVulnerabilityAssessment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdmins: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to false. + type: boolean + emails: + description: Specifies an array of email addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + serverSecurityAlertPolicyId: + description: The id of the security alert policy of the MS SQL + Server. Changing this forces a new resource to be created. + type: string + serverSecurityAlertPolicyIdRef: + description: Reference to a MSSQLServerSecurityAlertPolicy in + sql to populate serverSecurityAlertPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serverSecurityAlertPolicyIdSelector: + description: Selector for a MSSQLServerSecurityAlertPolicy in + sql to populate serverSecurityAlertPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageAccountAccessKeySecretRef: + description: Specifies the identifier key of the storage account + for vulnerability assessment scan results. If storage_container_sas_key + isn't specified, storage_account_access_key is required. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://example.blob.core.windows.net/VaScans/). + type: string + storageContainerSasKeySecretRef: + description: A shared access signature (SAS Key) that has write + access to the blob container specified in storage_container_path + parameter. If storage_account_access_key isn't specified, storage_container_sas_key + is required. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdmins: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to false. + type: boolean + emails: + description: Specifies an array of email addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + serverSecurityAlertPolicyId: + description: The id of the security alert policy of the MS SQL + Server. Changing this forces a new resource to be created. + type: string + serverSecurityAlertPolicyIdRef: + description: Reference to a MSSQLServerSecurityAlertPolicy in + sql to populate serverSecurityAlertPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serverSecurityAlertPolicyIdSelector: + description: Selector for a MSSQLServerSecurityAlertPolicy in + sql to populate serverSecurityAlertPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://example.blob.core.windows.net/VaScans/). + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.storageContainerPath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageContainerPath) + || (has(self.initProvider) && has(self.initProvider.storageContainerPath))' + status: + description: MSSQLServerVulnerabilityAssessmentStatus defines the observed + state of MSSQLServerVulnerabilityAssessment. + properties: + atProvider: + properties: + id: + description: The ID of the MS SQL Server Vulnerability Assessment. + type: string + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdmins: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to false. + type: boolean + emails: + description: Specifies an array of email addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + serverSecurityAlertPolicyId: + description: The id of the security alert policy of the MS SQL + Server. Changing this forces a new resource to be created. + type: string + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://example.blob.core.windows.net/VaScans/). + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/storage.azure.upbound.io_accountlocalusers.yaml b/package/crds/storage.azure.upbound.io_accountlocalusers.yaml index fa1fb1d80..18d43b456 100644 --- a/package/crds/storage.azure.upbound.io_accountlocalusers.yaml +++ b/package/crds/storage.azure.upbound.io_accountlocalusers.yaml @@ -759,3 +759,738 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AccountLocalUser is the Schema for the AccountLocalUsers API. + Manages a Storage Account Local User. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccountLocalUserSpec defines the desired state of AccountLocalUser + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + homeDirectory: + description: The home directory of the Storage Account Local User. + type: string + permissionScope: + description: One or more permission_scope blocks as defined below. + items: + properties: + permissions: + description: A permissions block as defined below. + properties: + create: + description: (Defaults to 30 minutes) Used when creating + the Storage Account Local User. + type: boolean + delete: + description: (Defaults to 30 minutes) Used when deleting + the Storage Account Local User. + type: boolean + list: + description: Specifies if the Local User has the list + permission for this scope. Defaults to false. + type: boolean + read: + description: (Defaults to 5 minutes) Used when retrieving + the Storage Account Local User. + type: boolean + write: + description: Specifies if the Local User has the write + permission for this scope. Defaults to false. + type: boolean + type: object + resourceName: + description: The container name (when service is set to + blob) or the file share name (when service is set to file), + used by the Storage Account Local User. + type: string + resourceNameRef: + description: Reference to a Container in storage to populate + resourceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceNameSelector: + description: Selector for a Container in storage to populate + resourceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + service: + description: The storage service used by this Storage Account + Local User. Possible values are blob and file. + type: string + type: object + type: array + sshAuthorizedKey: + description: One or more ssh_authorized_key blocks as defined + below. + items: + properties: + description: + description: The description of this SSH authorized key. + type: string + key: + description: The public key value of this SSH authorized + key. + type: string + type: object + type: array + sshKeyEnabled: + description: Specifies whether SSH Key Authentication is enabled. + Defaults to false. + type: boolean + sshPasswordEnabled: + description: Specifies whether SSH Password Authentication is + enabled. Defaults to false. + type: boolean + storageAccountId: + description: The ID of the Storage Account that this Storage Account + Local User resides in. Changing this forces a new Storage Account + Local User to be created. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + homeDirectory: + description: The home directory of the Storage Account Local User. + type: string + permissionScope: + description: One or more permission_scope blocks as defined below. + items: + properties: + permissions: + description: A permissions block as defined below. + properties: + create: + description: (Defaults to 30 minutes) Used when creating + the Storage Account Local User. + type: boolean + delete: + description: (Defaults to 30 minutes) Used when deleting + the Storage Account Local User. + type: boolean + list: + description: Specifies if the Local User has the list + permission for this scope. Defaults to false. + type: boolean + read: + description: (Defaults to 5 minutes) Used when retrieving + the Storage Account Local User. + type: boolean + write: + description: Specifies if the Local User has the write + permission for this scope. Defaults to false. + type: boolean + type: object + resourceName: + description: The container name (when service is set to + blob) or the file share name (when service is set to file), + used by the Storage Account Local User. + type: string + resourceNameRef: + description: Reference to a Container in storage to populate + resourceName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceNameSelector: + description: Selector for a Container in storage to populate + resourceName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + service: + description: The storage service used by this Storage Account + Local User. Possible values are blob and file. + type: string + type: object + type: array + sshAuthorizedKey: + description: One or more ssh_authorized_key blocks as defined + below. + items: + properties: + description: + description: The description of this SSH authorized key. + type: string + key: + description: The public key value of this SSH authorized + key. + type: string + type: object + type: array + sshKeyEnabled: + description: Specifies whether SSH Key Authentication is enabled. + Defaults to false. + type: boolean + sshPasswordEnabled: + description: Specifies whether SSH Password Authentication is + enabled. Defaults to false. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AccountLocalUserStatus defines the observed state of AccountLocalUser. + properties: + atProvider: + properties: + homeDirectory: + description: The home directory of the Storage Account Local User. + type: string + id: + description: The ID of the Storage Account Local User. + type: string + permissionScope: + description: One or more permission_scope blocks as defined below. + items: + properties: + permissions: + description: A permissions block as defined below. + properties: + create: + description: (Defaults to 30 minutes) Used when creating + the Storage Account Local User. + type: boolean + delete: + description: (Defaults to 30 minutes) Used when deleting + the Storage Account Local User. + type: boolean + list: + description: Specifies if the Local User has the list + permission for this scope. Defaults to false. + type: boolean + read: + description: (Defaults to 5 minutes) Used when retrieving + the Storage Account Local User. + type: boolean + write: + description: Specifies if the Local User has the write + permission for this scope. Defaults to false. + type: boolean + type: object + resourceName: + description: The container name (when service is set to + blob) or the file share name (when service is set to file), + used by the Storage Account Local User. + type: string + service: + description: The storage service used by this Storage Account + Local User. Possible values are blob and file. + type: string + type: object + type: array + sshAuthorizedKey: + description: One or more ssh_authorized_key blocks as defined + below. + items: + properties: + description: + description: The description of this SSH authorized key. + type: string + key: + description: The public key value of this SSH authorized + key. + type: string + type: object + type: array + sshKeyEnabled: + description: Specifies whether SSH Key Authentication is enabled. + Defaults to false. + type: boolean + sshPasswordEnabled: + description: Specifies whether SSH Password Authentication is + enabled. Defaults to false. + type: boolean + storageAccountId: + description: The ID of the Storage Account that this Storage Account + Local User resides in. Changing this forces a new Storage Account + Local User to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/storage.azure.upbound.io_accounts.yaml b/package/crds/storage.azure.upbound.io_accounts.yaml index 81e51b678..8243a1a04 100644 --- a/package/crds/storage.azure.upbound.io_accounts.yaml +++ b/package/crds/storage.azure.upbound.io_accounts.yaml @@ -2552,3 +2552,2405 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Account is the Schema for the Accounts API. Manages a Azure Storage + Account. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + accessTier: + description: Defines the access tier for BlobStorage, FileStorage + and StorageV2 accounts. Valid options are Hot and Cool, defaults + to Hot. + type: string + accountKind: + description: Defines the Kind of account. Valid options are BlobStorage, + BlockBlobStorage, FileStorage, Storage and StorageV2. Defaults + to StorageV2. + type: string + accountReplicationType: + description: Defines the type of replication to use for this storage + account. Valid options are LRS, GRS, RAGRS, ZRS, GZRS and RAGZRS. + Changing this forces a new resource to be created when types + LRS, GRS and RAGRS are changed to ZRS, GZRS or RAGZRS and vice + versa. + type: string + accountTier: + description: Defines the Tier to use for this storage account. + Valid options are Standard and Premium. For BlockBlobStorage + and FileStorage accounts only Premium is valid. Changing this + forces a new resource to be created. + type: string + allowNestedItemsToBePublic: + description: Allow or disallow nested items within this Account + to opt into being public. Defaults to true. + type: boolean + allowedCopyScope: + description: Restrict copy to and from Storage Accounts within + an AAD tenant or with Private Links to the same VNet. Possible + values are AAD and PrivateLink. + type: string + azureFilesAuthentication: + description: A azure_files_authentication block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined below. Required + when directory_type is AD. + properties: + domainGuid: + description: Specifies the domain GUID. + type: string + domainName: + description: Specifies the primary domain that the AD + DNS server is authoritative for. + type: string + domainSid: + description: Specifies the security identifier (SID). + This is required when directory_type is set to AD. + type: string + forestName: + description: Specifies the Active Directory forest. This + is required when directory_type is set to AD. + type: string + netbiosDomainName: + description: Specifies the NetBIOS domain name. This is + required when directory_type is set to AD. + type: string + storageSid: + description: Specifies the security identifier (SID) for + Azure Storage. This is required when directory_type + is set to AD. + type: string + type: object + directoryType: + description: Specifies the directory service used. Possible + values are AADDS, AD and AADKERB. + type: string + type: object + blobProperties: + description: A blob_properties block as defined below. + properties: + changeFeedEnabled: + description: Is the blob service properties for change feed + events enabled? Default to false. + type: boolean + changeFeedRetentionInDays: + description: The duration of change feed events retention + in days. The possible values are between 1 and 146000 days + (400 years). Setting this to null (or omit this in the configuration + file) indicates an infinite retention of the change feed. + type: number + containerDeleteRetentionPolicy: + description: A container_delete_retention_policy block as + defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + corsRule: + description: A cors_rule block as defined below. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + defaultServiceVersion: + description: The API Version which should be used by default + for requests to the Data Plane API if an incoming request + doesn't specify an API Version. + type: string + deleteRetentionPolicy: + description: A delete_retention_policy block as defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + lastAccessTimeEnabled: + description: Is the last access time based tracking enabled? + Default to false. + type: boolean + restorePolicy: + description: A restore_policy block as defined below. This + must be used together with delete_retention_policy set, + versioning_enabled and change_feed_enabled set to true. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + versioningEnabled: + description: Is versioning enabled? Default to false. + type: boolean + type: object + crossTenantReplicationEnabled: + description: Should cross Tenant replication be enabled? Defaults + to true. + type: boolean + customDomain: + description: A custom_domain block as documented below. + properties: + name: + description: The Custom Domain Name to use for the Storage + Account, which will be validated by Azure. + type: string + useSubdomain: + description: Should the Custom Domain Name be validated by + using indirect CNAME validation? + type: boolean + type: object + customerManagedKey: + description: A customer_managed_key block as documented below. + properties: + keyVaultKeyId: + description: The ID of the Key Vault Key, supplying a version-less + key ID will enable auto-rotation of this key. + type: string + userAssignedIdentityId: + description: The ID of a user assigned identity. + type: string + type: object + defaultToOauthAuthentication: + description: Default to Azure Active Directory authorization in + the Azure portal when accessing the Storage Account. The default + value is false + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Storage Account should exist. Changing this forces a new + Storage Account to be created. + type: string + enableHttpsTrafficOnly: + description: Boolean flag which forces HTTPS if enabled, see here + for more information. Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Storage Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Storage Account. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + immutabilityPolicy: + description: An immutability_policy block as defined below. Changing + this forces a new resource to be created. + properties: + allowProtectedAppendWrites: + description: When enabled, new blocks can be written to an + append blob while maintaining immutability protection and + compliance. Only new blocks can be added and any existing + blocks cannot be modified or deleted. + type: boolean + periodSinceCreationInDays: + description: The immutability period for the blobs in the + container since the policy creation, in days. + type: number + state: + description: Defines the mode of the policy. Disabled state + disables the policy, Unlocked state allows increase and + decrease of immutability retention time and also allows + toggling allowProtectedAppendWrites property, Locked state + only allows the increase of the immutability retention time. + A policy can only be created in a Disabled or Unlocked state + and can be toggled between the two states. Only a policy + in an Unlocked state can transition to a Locked state which + cannot be reverted. + type: string + type: object + infrastructureEncryptionEnabled: + description: Is infrastructure encryption enabled? Changing this + forces a new resource to be created. Defaults to false. + type: boolean + isHnsEnabled: + description: Is Hierarchical Namespace enabled? This can be used + with Azure Data Lake Storage Gen 2 (see here for more information). + Changing this forces a new resource to be created. + type: boolean + largeFileShareEnabled: + description: Is Large File Share Enabled? + type: boolean + localUserEnabled: + description: Is Local User Enabled? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minTlsVersion: + description: The minimum supported TLS version for the storage + account. Possible values are TLS1_0, TLS1_1, and TLS1_2. Defaults + to TLS1_2 for new storage accounts. + type: string + networkRules: + description: A network_rules block as documented below. + properties: + bypass: + description: Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. + Valid options are any combination of Logging, Metrics, AzureServices, + or None. + items: + type: string + type: array + x-kubernetes-list-type: set + defaultAction: + description: Specifies the default action of allow or deny + when no other rules match. Valid options are Deny or Allow. + type: string + ipRules: + description: List of public IP or IP ranges in CIDR Format. + Only IPv4 addresses are allowed. /31 CIDRs, /32 CIDRs, and + Private IP address ranges (as defined in RFC 1918), are + not allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + privateLinkAccess: + description: One or more private_link_access block as defined + below. + items: + properties: + endpointResourceId: + description: The ID of the Azure resource that should + be allowed access to the target storage account. + type: string + endpointTenantId: + description: The tenant id of the resource of the resource + access rule to be granted access. Defaults to the + current tenant id. + type: string + type: object + type: array + virtualNetworkSubnetIds: + description: A list of resource ids for subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + nfsv3Enabled: + description: Is NFSv3 protocol enabled? Changing this forces a + new resource to be created. Defaults to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether the public network access is enabled? Defaults + to true. + type: boolean + queueEncryptionKeyType: + description: The encryption type of the queue service. Possible + values are Service and Account. Changing this forces a new resource + to be created. Default value is Service. + type: string + queueProperties: + description: A queue_properties block as defined below. + properties: + corsRule: + description: A cors_rule block as defined above. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + hourMetrics: + description: A hour_metrics block as defined below. + properties: + enabled: + description: Indicates whether minute metrics are enabled + for the Queue service. + type: boolean + includeApis: + description: Indicates whether metrics should generate + summary statistics for called API operations. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + type: object + logging: + description: A logging block as defined below. + properties: + delete: + description: (Defaults to 60 minutes) Used when deleting + the Storage Account. + type: boolean + read: + description: (Defaults to 5 minutes) Used when retrieving + the Storage Account. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + write: + description: Indicates whether all write requests should + be logged. + type: boolean + type: object + minuteMetrics: + description: A minute_metrics block as defined below. + properties: + enabled: + description: Indicates whether minute metrics are enabled + for the Queue service. + type: boolean + includeApis: + description: Indicates whether metrics should generate + summary statistics for called API operations. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group in which to create + the storage account. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + routing: + description: A routing block as defined below. + properties: + choice: + description: Specifies the kind of network routing opted by + the user. Possible values are InternetRouting and MicrosoftRouting. + Defaults to MicrosoftRouting. + type: string + publishInternetEndpoints: + description: Should internet routing storage endpoints be + published? Defaults to false. + type: boolean + publishMicrosoftEndpoints: + description: Should Microsoft routing storage endpoints be + published? Defaults to false. + type: boolean + type: object + sasPolicy: + description: A sas_policy block as defined below. + properties: + expirationAction: + description: The SAS expiration action. The only possible + value is Log at this moment. Defaults to Log. + type: string + expirationPeriod: + description: The SAS expiration period in format of DD.HH:MM:SS. + type: string + type: object + sftpEnabled: + description: Boolean, enable SFTP for the storage account + type: boolean + shareProperties: + description: A share_properties block as defined below. + properties: + corsRule: + description: A cors_rule block as defined below. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + smb: + description: A smb block as defined below. + properties: + authenticationTypes: + description: A set of SMB authentication methods. Possible + values are NTLMv2, and Kerberos. + items: + type: string + type: array + x-kubernetes-list-type: set + channelEncryptionType: + description: A set of SMB channel encryption. Possible + values are AES-128-CCM, AES-128-GCM, and AES-256-GCM. + items: + type: string + type: array + x-kubernetes-list-type: set + kerberosTicketEncryptionType: + description: A set of Kerberos ticket encryption. Possible + values are RC4-HMAC, and AES-256. + items: + type: string + type: array + x-kubernetes-list-type: set + multichannelEnabled: + description: Indicates whether multichannel is enabled. + Defaults to false. This is only supported on Premium + storage accounts. + type: boolean + versions: + description: A set of SMB protocol versions. Possible + values are SMB2.1, SMB3.0, and SMB3.1.1. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + sharedAccessKeyEnabled: + description: Indicates whether the storage account permits requests + to be authorized with the account access key via Shared Key. + If false, then all requests, including shared access signatures, + must be authorized with Azure Active Directory (Azure AD). Defaults + to true. + type: boolean + staticWebsite: + description: A static_website block as defined below. + properties: + error404Document: + description: The absolute path to a custom webpage that should + be used when a request is made which does not correspond + to an existing file. + type: string + indexDocument: + description: The webpage that Azure Storage serves for requests + to the root of a website or any subfolder. For example, + index.html. The value is case-sensitive. + type: string + type: object + tableEncryptionKeyType: + description: The encryption type of the table service. Possible + values are Service and Account. Changing this forces a new resource + to be created. Default value is Service. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + accessTier: + description: Defines the access tier for BlobStorage, FileStorage + and StorageV2 accounts. Valid options are Hot and Cool, defaults + to Hot. + type: string + accountKind: + description: Defines the Kind of account. Valid options are BlobStorage, + BlockBlobStorage, FileStorage, Storage and StorageV2. Defaults + to StorageV2. + type: string + accountReplicationType: + description: Defines the type of replication to use for this storage + account. Valid options are LRS, GRS, RAGRS, ZRS, GZRS and RAGZRS. + Changing this forces a new resource to be created when types + LRS, GRS and RAGRS are changed to ZRS, GZRS or RAGZRS and vice + versa. + type: string + accountTier: + description: Defines the Tier to use for this storage account. + Valid options are Standard and Premium. For BlockBlobStorage + and FileStorage accounts only Premium is valid. Changing this + forces a new resource to be created. + type: string + allowNestedItemsToBePublic: + description: Allow or disallow nested items within this Account + to opt into being public. Defaults to true. + type: boolean + allowedCopyScope: + description: Restrict copy to and from Storage Accounts within + an AAD tenant or with Private Links to the same VNet. Possible + values are AAD and PrivateLink. + type: string + azureFilesAuthentication: + description: A azure_files_authentication block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined below. Required + when directory_type is AD. + properties: + domainGuid: + description: Specifies the domain GUID. + type: string + domainName: + description: Specifies the primary domain that the AD + DNS server is authoritative for. + type: string + domainSid: + description: Specifies the security identifier (SID). + This is required when directory_type is set to AD. + type: string + forestName: + description: Specifies the Active Directory forest. This + is required when directory_type is set to AD. + type: string + netbiosDomainName: + description: Specifies the NetBIOS domain name. This is + required when directory_type is set to AD. + type: string + storageSid: + description: Specifies the security identifier (SID) for + Azure Storage. This is required when directory_type + is set to AD. + type: string + type: object + directoryType: + description: Specifies the directory service used. Possible + values are AADDS, AD and AADKERB. + type: string + type: object + blobProperties: + description: A blob_properties block as defined below. + properties: + changeFeedEnabled: + description: Is the blob service properties for change feed + events enabled? Default to false. + type: boolean + changeFeedRetentionInDays: + description: The duration of change feed events retention + in days. The possible values are between 1 and 146000 days + (400 years). Setting this to null (or omit this in the configuration + file) indicates an infinite retention of the change feed. + type: number + containerDeleteRetentionPolicy: + description: A container_delete_retention_policy block as + defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + corsRule: + description: A cors_rule block as defined below. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + defaultServiceVersion: + description: The API Version which should be used by default + for requests to the Data Plane API if an incoming request + doesn't specify an API Version. + type: string + deleteRetentionPolicy: + description: A delete_retention_policy block as defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + lastAccessTimeEnabled: + description: Is the last access time based tracking enabled? + Default to false. + type: boolean + restorePolicy: + description: A restore_policy block as defined below. This + must be used together with delete_retention_policy set, + versioning_enabled and change_feed_enabled set to true. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + versioningEnabled: + description: Is versioning enabled? Default to false. + type: boolean + type: object + crossTenantReplicationEnabled: + description: Should cross Tenant replication be enabled? Defaults + to true. + type: boolean + customDomain: + description: A custom_domain block as documented below. + properties: + name: + description: The Custom Domain Name to use for the Storage + Account, which will be validated by Azure. + type: string + useSubdomain: + description: Should the Custom Domain Name be validated by + using indirect CNAME validation? + type: boolean + type: object + customerManagedKey: + description: A customer_managed_key block as documented below. + properties: + keyVaultKeyId: + description: The ID of the Key Vault Key, supplying a version-less + key ID will enable auto-rotation of this key. + type: string + userAssignedIdentityId: + description: The ID of a user assigned identity. + type: string + type: object + defaultToOauthAuthentication: + description: Default to Azure Active Directory authorization in + the Azure portal when accessing the Storage Account. The default + value is false + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Storage Account should exist. Changing this forces a new + Storage Account to be created. + type: string + enableHttpsTrafficOnly: + description: Boolean flag which forces HTTPS if enabled, see here + for more information. Defaults to true. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Storage Account. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Storage Account. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + immutabilityPolicy: + description: An immutability_policy block as defined below. Changing + this forces a new resource to be created. + properties: + allowProtectedAppendWrites: + description: When enabled, new blocks can be written to an + append blob while maintaining immutability protection and + compliance. Only new blocks can be added and any existing + blocks cannot be modified or deleted. + type: boolean + periodSinceCreationInDays: + description: The immutability period for the blobs in the + container since the policy creation, in days. + type: number + state: + description: Defines the mode of the policy. Disabled state + disables the policy, Unlocked state allows increase and + decrease of immutability retention time and also allows + toggling allowProtectedAppendWrites property, Locked state + only allows the increase of the immutability retention time. + A policy can only be created in a Disabled or Unlocked state + and can be toggled between the two states. Only a policy + in an Unlocked state can transition to a Locked state which + cannot be reverted. + type: string + type: object + infrastructureEncryptionEnabled: + description: Is infrastructure encryption enabled? Changing this + forces a new resource to be created. Defaults to false. + type: boolean + isHnsEnabled: + description: Is Hierarchical Namespace enabled? This can be used + with Azure Data Lake Storage Gen 2 (see here for more information). + Changing this forces a new resource to be created. + type: boolean + largeFileShareEnabled: + description: Is Large File Share Enabled? + type: boolean + localUserEnabled: + description: Is Local User Enabled? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minTlsVersion: + description: The minimum supported TLS version for the storage + account. Possible values are TLS1_0, TLS1_1, and TLS1_2. Defaults + to TLS1_2 for new storage accounts. + type: string + networkRules: + description: A network_rules block as documented below. + properties: + bypass: + description: Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. + Valid options are any combination of Logging, Metrics, AzureServices, + or None. + items: + type: string + type: array + x-kubernetes-list-type: set + defaultAction: + description: Specifies the default action of allow or deny + when no other rules match. Valid options are Deny or Allow. + type: string + ipRules: + description: List of public IP or IP ranges in CIDR Format. + Only IPv4 addresses are allowed. /31 CIDRs, /32 CIDRs, and + Private IP address ranges (as defined in RFC 1918), are + not allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + privateLinkAccess: + description: One or more private_link_access block as defined + below. + items: + properties: + endpointResourceId: + description: The ID of the Azure resource that should + be allowed access to the target storage account. + type: string + endpointTenantId: + description: The tenant id of the resource of the resource + access rule to be granted access. Defaults to the + current tenant id. + type: string + type: object + type: array + virtualNetworkSubnetIds: + description: A list of resource ids for subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + nfsv3Enabled: + description: Is NFSv3 protocol enabled? Changing this forces a + new resource to be created. Defaults to false. + type: boolean + publicNetworkAccessEnabled: + description: Whether the public network access is enabled? Defaults + to true. + type: boolean + queueEncryptionKeyType: + description: The encryption type of the queue service. Possible + values are Service and Account. Changing this forces a new resource + to be created. Default value is Service. + type: string + queueProperties: + description: A queue_properties block as defined below. + properties: + corsRule: + description: A cors_rule block as defined above. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + hourMetrics: + description: A hour_metrics block as defined below. + properties: + enabled: + description: Indicates whether minute metrics are enabled + for the Queue service. + type: boolean + includeApis: + description: Indicates whether metrics should generate + summary statistics for called API operations. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + type: object + logging: + description: A logging block as defined below. + properties: + delete: + description: (Defaults to 60 minutes) Used when deleting + the Storage Account. + type: boolean + read: + description: (Defaults to 5 minutes) Used when retrieving + the Storage Account. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + write: + description: Indicates whether all write requests should + be logged. + type: boolean + type: object + minuteMetrics: + description: A minute_metrics block as defined below. + properties: + enabled: + description: Indicates whether minute metrics are enabled + for the Queue service. + type: boolean + includeApis: + description: Indicates whether metrics should generate + summary statistics for called API operations. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + type: object + type: object + routing: + description: A routing block as defined below. + properties: + choice: + description: Specifies the kind of network routing opted by + the user. Possible values are InternetRouting and MicrosoftRouting. + Defaults to MicrosoftRouting. + type: string + publishInternetEndpoints: + description: Should internet routing storage endpoints be + published? Defaults to false. + type: boolean + publishMicrosoftEndpoints: + description: Should Microsoft routing storage endpoints be + published? Defaults to false. + type: boolean + type: object + sasPolicy: + description: A sas_policy block as defined below. + properties: + expirationAction: + description: The SAS expiration action. The only possible + value is Log at this moment. Defaults to Log. + type: string + expirationPeriod: + description: The SAS expiration period in format of DD.HH:MM:SS. + type: string + type: object + sftpEnabled: + description: Boolean, enable SFTP for the storage account + type: boolean + shareProperties: + description: A share_properties block as defined below. + properties: + corsRule: + description: A cors_rule block as defined below. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + smb: + description: A smb block as defined below. + properties: + authenticationTypes: + description: A set of SMB authentication methods. Possible + values are NTLMv2, and Kerberos. + items: + type: string + type: array + x-kubernetes-list-type: set + channelEncryptionType: + description: A set of SMB channel encryption. Possible + values are AES-128-CCM, AES-128-GCM, and AES-256-GCM. + items: + type: string + type: array + x-kubernetes-list-type: set + kerberosTicketEncryptionType: + description: A set of Kerberos ticket encryption. Possible + values are RC4-HMAC, and AES-256. + items: + type: string + type: array + x-kubernetes-list-type: set + multichannelEnabled: + description: Indicates whether multichannel is enabled. + Defaults to false. This is only supported on Premium + storage accounts. + type: boolean + versions: + description: A set of SMB protocol versions. Possible + values are SMB2.1, SMB3.0, and SMB3.1.1. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + sharedAccessKeyEnabled: + description: Indicates whether the storage account permits requests + to be authorized with the account access key via Shared Key. + If false, then all requests, including shared access signatures, + must be authorized with Azure Active Directory (Azure AD). Defaults + to true. + type: boolean + staticWebsite: + description: A static_website block as defined below. + properties: + error404Document: + description: The absolute path to a custom webpage that should + be used when a request is made which does not correspond + to an existing file. + type: string + indexDocument: + description: The webpage that Azure Storage serves for requests + to the root of a website or any subfolder. For example, + index.html. The value is case-sensitive. + type: string + type: object + tableEncryptionKeyType: + description: The encryption type of the table service. Possible + values are Service and Account. Changing this forces a new resource + to be created. Default value is Service. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.accountReplicationType is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.accountReplicationType) + || (has(self.initProvider) && has(self.initProvider.accountReplicationType))' + - message: spec.forProvider.accountTier is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.accountTier) + || (has(self.initProvider) && has(self.initProvider.accountTier))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: AccountStatus defines the observed state of Account. + properties: + atProvider: + properties: + accessTier: + description: Defines the access tier for BlobStorage, FileStorage + and StorageV2 accounts. Valid options are Hot and Cool, defaults + to Hot. + type: string + accountKind: + description: Defines the Kind of account. Valid options are BlobStorage, + BlockBlobStorage, FileStorage, Storage and StorageV2. Defaults + to StorageV2. + type: string + accountReplicationType: + description: Defines the type of replication to use for this storage + account. Valid options are LRS, GRS, RAGRS, ZRS, GZRS and RAGZRS. + Changing this forces a new resource to be created when types + LRS, GRS and RAGRS are changed to ZRS, GZRS or RAGZRS and vice + versa. + type: string + accountTier: + description: Defines the Tier to use for this storage account. + Valid options are Standard and Premium. For BlockBlobStorage + and FileStorage accounts only Premium is valid. Changing this + forces a new resource to be created. + type: string + allowNestedItemsToBePublic: + description: Allow or disallow nested items within this Account + to opt into being public. Defaults to true. + type: boolean + allowedCopyScope: + description: Restrict copy to and from Storage Accounts within + an AAD tenant or with Private Links to the same VNet. Possible + values are AAD and PrivateLink. + type: string + azureFilesAuthentication: + description: A azure_files_authentication block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined below. Required + when directory_type is AD. + properties: + domainGuid: + description: Specifies the domain GUID. + type: string + domainName: + description: Specifies the primary domain that the AD + DNS server is authoritative for. + type: string + domainSid: + description: Specifies the security identifier (SID). + This is required when directory_type is set to AD. + type: string + forestName: + description: Specifies the Active Directory forest. This + is required when directory_type is set to AD. + type: string + netbiosDomainName: + description: Specifies the NetBIOS domain name. This is + required when directory_type is set to AD. + type: string + storageSid: + description: Specifies the security identifier (SID) for + Azure Storage. This is required when directory_type + is set to AD. + type: string + type: object + directoryType: + description: Specifies the directory service used. Possible + values are AADDS, AD and AADKERB. + type: string + type: object + blobProperties: + description: A blob_properties block as defined below. + properties: + changeFeedEnabled: + description: Is the blob service properties for change feed + events enabled? Default to false. + type: boolean + changeFeedRetentionInDays: + description: The duration of change feed events retention + in days. The possible values are between 1 and 146000 days + (400 years). Setting this to null (or omit this in the configuration + file) indicates an infinite retention of the change feed. + type: number + containerDeleteRetentionPolicy: + description: A container_delete_retention_policy block as + defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + corsRule: + description: A cors_rule block as defined below. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + defaultServiceVersion: + description: The API Version which should be used by default + for requests to the Data Plane API if an incoming request + doesn't specify an API Version. + type: string + deleteRetentionPolicy: + description: A delete_retention_policy block as defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + lastAccessTimeEnabled: + description: Is the last access time based tracking enabled? + Default to false. + type: boolean + restorePolicy: + description: A restore_policy block as defined below. This + must be used together with delete_retention_policy set, + versioning_enabled and change_feed_enabled set to true. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + versioningEnabled: + description: Is versioning enabled? Default to false. + type: boolean + type: object + crossTenantReplicationEnabled: + description: Should cross Tenant replication be enabled? Defaults + to true. + type: boolean + customDomain: + description: A custom_domain block as documented below. + properties: + name: + description: The Custom Domain Name to use for the Storage + Account, which will be validated by Azure. + type: string + useSubdomain: + description: Should the Custom Domain Name be validated by + using indirect CNAME validation? + type: boolean + type: object + customerManagedKey: + description: A customer_managed_key block as documented below. + properties: + keyVaultKeyId: + description: The ID of the Key Vault Key, supplying a version-less + key ID will enable auto-rotation of this key. + type: string + userAssignedIdentityId: + description: The ID of a user assigned identity. + type: string + type: object + defaultToOauthAuthentication: + description: Default to Azure Active Directory authorization in + the Azure portal when accessing the Storage Account. The default + value is false + type: boolean + edgeZone: + description: Specifies the Edge Zone within the Azure Region where + this Storage Account should exist. Changing this forces a new + Storage Account to be created. + type: string + enableHttpsTrafficOnly: + description: Boolean flag which forces HTTPS if enabled, see here + for more information. Defaults to true. + type: boolean + id: + description: The ID of the Storage Account. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Storage Account. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Identity of this Storage Account. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Identity of this Storage Account. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Storage Account. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + immutabilityPolicy: + description: An immutability_policy block as defined below. Changing + this forces a new resource to be created. + properties: + allowProtectedAppendWrites: + description: When enabled, new blocks can be written to an + append blob while maintaining immutability protection and + compliance. Only new blocks can be added and any existing + blocks cannot be modified or deleted. + type: boolean + periodSinceCreationInDays: + description: The immutability period for the blobs in the + container since the policy creation, in days. + type: number + state: + description: Defines the mode of the policy. Disabled state + disables the policy, Unlocked state allows increase and + decrease of immutability retention time and also allows + toggling allowProtectedAppendWrites property, Locked state + only allows the increase of the immutability retention time. + A policy can only be created in a Disabled or Unlocked state + and can be toggled between the two states. Only a policy + in an Unlocked state can transition to a Locked state which + cannot be reverted. + type: string + type: object + infrastructureEncryptionEnabled: + description: Is infrastructure encryption enabled? Changing this + forces a new resource to be created. Defaults to false. + type: boolean + isHnsEnabled: + description: Is Hierarchical Namespace enabled? This can be used + with Azure Data Lake Storage Gen 2 (see here for more information). + Changing this forces a new resource to be created. + type: boolean + largeFileShareEnabled: + description: Is Large File Share Enabled? + type: boolean + localUserEnabled: + description: Is Local User Enabled? Defaults to true. + type: boolean + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + minTlsVersion: + description: The minimum supported TLS version for the storage + account. Possible values are TLS1_0, TLS1_1, and TLS1_2. Defaults + to TLS1_2 for new storage accounts. + type: string + networkRules: + description: A network_rules block as documented below. + properties: + bypass: + description: Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. + Valid options are any combination of Logging, Metrics, AzureServices, + or None. + items: + type: string + type: array + x-kubernetes-list-type: set + defaultAction: + description: Specifies the default action of allow or deny + when no other rules match. Valid options are Deny or Allow. + type: string + ipRules: + description: List of public IP or IP ranges in CIDR Format. + Only IPv4 addresses are allowed. /31 CIDRs, /32 CIDRs, and + Private IP address ranges (as defined in RFC 1918), are + not allowed. + items: + type: string + type: array + x-kubernetes-list-type: set + privateLinkAccess: + description: One or more private_link_access block as defined + below. + items: + properties: + endpointResourceId: + description: The ID of the Azure resource that should + be allowed access to the target storage account. + type: string + endpointTenantId: + description: The tenant id of the resource of the resource + access rule to be granted access. Defaults to the + current tenant id. + type: string + type: object + type: array + virtualNetworkSubnetIds: + description: A list of resource ids for subnets. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + nfsv3Enabled: + description: Is NFSv3 protocol enabled? Changing this forces a + new resource to be created. Defaults to false. + type: boolean + primaryBlobEndpoint: + description: The endpoint URL for blob storage in the primary + location. + type: string + primaryBlobHost: + description: The hostname with port if applicable for blob storage + in the primary location. + type: string + primaryBlobInternetEndpoint: + description: The internet routing endpoint URL for blob storage + in the primary location. + type: string + primaryBlobInternetHost: + description: The internet routing hostname with port if applicable + for blob storage in the primary location. + type: string + primaryBlobMicrosoftEndpoint: + description: The microsoft routing endpoint URL for blob storage + in the primary location. + type: string + primaryBlobMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for blob storage in the primary location. + type: string + primaryDfsEndpoint: + description: The endpoint URL for DFS storage in the primary location. + type: string + primaryDfsHost: + description: The hostname with port if applicable for DFS storage + in the primary location. + type: string + primaryDfsInternetEndpoint: + description: The internet routing endpoint URL for DFS storage + in the primary location. + type: string + primaryDfsInternetHost: + description: The internet routing hostname with port if applicable + for DFS storage in the primary location. + type: string + primaryDfsMicrosoftEndpoint: + description: The microsoft routing endpoint URL for DFS storage + in the primary location. + type: string + primaryDfsMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for DFS storage in the primary location. + type: string + primaryFileEndpoint: + description: The endpoint URL for file storage in the primary + location. + type: string + primaryFileHost: + description: The hostname with port if applicable for file storage + in the primary location. + type: string + primaryFileInternetEndpoint: + description: The internet routing endpoint URL for file storage + in the primary location. + type: string + primaryFileInternetHost: + description: The internet routing hostname with port if applicable + for file storage in the primary location. + type: string + primaryFileMicrosoftEndpoint: + description: The microsoft routing endpoint URL for file storage + in the primary location. + type: string + primaryFileMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for file storage in the primary location. + type: string + primaryLocation: + description: The primary location of the storage account. + type: string + primaryQueueEndpoint: + description: The endpoint URL for queue storage in the primary + location. + type: string + primaryQueueHost: + description: The hostname with port if applicable for queue storage + in the primary location. + type: string + primaryQueueMicrosoftEndpoint: + description: The microsoft routing endpoint URL for queue storage + in the primary location. + type: string + primaryQueueMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for queue storage in the primary location. + type: string + primaryTableEndpoint: + description: The endpoint URL for table storage in the primary + location. + type: string + primaryTableHost: + description: The hostname with port if applicable for table storage + in the primary location. + type: string + primaryTableMicrosoftEndpoint: + description: The microsoft routing endpoint URL for table storage + in the primary location. + type: string + primaryTableMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for table storage in the primary location. + type: string + primaryWebEndpoint: + description: The endpoint URL for web storage in the primary location. + type: string + primaryWebHost: + description: The hostname with port if applicable for web storage + in the primary location. + type: string + primaryWebInternetEndpoint: + description: The internet routing endpoint URL for web storage + in the primary location. + type: string + primaryWebInternetHost: + description: The internet routing hostname with port if applicable + for web storage in the primary location. + type: string + primaryWebMicrosoftEndpoint: + description: The microsoft routing endpoint URL for web storage + in the primary location. + type: string + primaryWebMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for web storage in the primary location. + type: string + publicNetworkAccessEnabled: + description: Whether the public network access is enabled? Defaults + to true. + type: boolean + queueEncryptionKeyType: + description: The encryption type of the queue service. Possible + values are Service and Account. Changing this forces a new resource + to be created. Default value is Service. + type: string + queueProperties: + description: A queue_properties block as defined below. + properties: + corsRule: + description: A cors_rule block as defined above. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + hourMetrics: + description: A hour_metrics block as defined below. + properties: + enabled: + description: Indicates whether minute metrics are enabled + for the Queue service. + type: boolean + includeApis: + description: Indicates whether metrics should generate + summary statistics for called API operations. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + type: object + logging: + description: A logging block as defined below. + properties: + delete: + description: (Defaults to 60 minutes) Used when deleting + the Storage Account. + type: boolean + read: + description: (Defaults to 5 minutes) Used when retrieving + the Storage Account. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + write: + description: Indicates whether all write requests should + be logged. + type: boolean + type: object + minuteMetrics: + description: A minute_metrics block as defined below. + properties: + enabled: + description: Indicates whether minute metrics are enabled + for the Queue service. + type: boolean + includeApis: + description: Indicates whether metrics should generate + summary statistics for called API operations. + type: boolean + retentionPolicyDays: + description: Specifies the number of days that logs will + be retained. + type: number + version: + description: The version of storage analytics to configure. + type: string + type: object + type: object + resourceGroupName: + description: The name of the resource group in which to create + the storage account. Changing this forces a new resource to + be created. + type: string + routing: + description: A routing block as defined below. + properties: + choice: + description: Specifies the kind of network routing opted by + the user. Possible values are InternetRouting and MicrosoftRouting. + Defaults to MicrosoftRouting. + type: string + publishInternetEndpoints: + description: Should internet routing storage endpoints be + published? Defaults to false. + type: boolean + publishMicrosoftEndpoints: + description: Should Microsoft routing storage endpoints be + published? Defaults to false. + type: boolean + type: object + sasPolicy: + description: A sas_policy block as defined below. + properties: + expirationAction: + description: The SAS expiration action. The only possible + value is Log at this moment. Defaults to Log. + type: string + expirationPeriod: + description: The SAS expiration period in format of DD.HH:MM:SS. + type: string + type: object + secondaryBlobEndpoint: + description: The endpoint URL for blob storage in the secondary + location. + type: string + secondaryBlobHost: + description: The hostname with port if applicable for blob storage + in the secondary location. + type: string + secondaryBlobInternetEndpoint: + description: The internet routing endpoint URL for blob storage + in the secondary location. + type: string + secondaryBlobInternetHost: + description: The internet routing hostname with port if applicable + for blob storage in the secondary location. + type: string + secondaryBlobMicrosoftEndpoint: + description: The microsoft routing endpoint URL for blob storage + in the secondary location. + type: string + secondaryBlobMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for blob storage in the secondary location. + type: string + secondaryDfsEndpoint: + description: The endpoint URL for DFS storage in the secondary + location. + type: string + secondaryDfsHost: + description: The hostname with port if applicable for DFS storage + in the secondary location. + type: string + secondaryDfsInternetEndpoint: + description: The internet routing endpoint URL for DFS storage + in the secondary location. + type: string + secondaryDfsInternetHost: + description: The internet routing hostname with port if applicable + for DFS storage in the secondary location. + type: string + secondaryDfsMicrosoftEndpoint: + description: The microsoft routing endpoint URL for DFS storage + in the secondary location. + type: string + secondaryDfsMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for DFS storage in the secondary location. + type: string + secondaryFileEndpoint: + description: The endpoint URL for file storage in the secondary + location. + type: string + secondaryFileHost: + description: The hostname with port if applicable for file storage + in the secondary location. + type: string + secondaryFileInternetEndpoint: + description: The internet routing endpoint URL for file storage + in the secondary location. + type: string + secondaryFileInternetHost: + description: The internet routing hostname with port if applicable + for file storage in the secondary location. + type: string + secondaryFileMicrosoftEndpoint: + description: The microsoft routing endpoint URL for file storage + in the secondary location. + type: string + secondaryFileMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for file storage in the secondary location. + type: string + secondaryLocation: + description: The secondary location of the storage account. + type: string + secondaryQueueEndpoint: + description: The endpoint URL for queue storage in the secondary + location. + type: string + secondaryQueueHost: + description: The hostname with port if applicable for queue storage + in the secondary location. + type: string + secondaryQueueMicrosoftEndpoint: + description: The microsoft routing endpoint URL for queue storage + in the secondary location. + type: string + secondaryQueueMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for queue storage in the secondary location. + type: string + secondaryTableEndpoint: + description: The endpoint URL for table storage in the secondary + location. + type: string + secondaryTableHost: + description: The hostname with port if applicable for table storage + in the secondary location. + type: string + secondaryTableMicrosoftEndpoint: + description: The microsoft routing endpoint URL for table storage + in the secondary location. + type: string + secondaryTableMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for table storage in the secondary location. + type: string + secondaryWebEndpoint: + description: The endpoint URL for web storage in the secondary + location. + type: string + secondaryWebHost: + description: The hostname with port if applicable for web storage + in the secondary location. + type: string + secondaryWebInternetEndpoint: + description: The internet routing endpoint URL for web storage + in the secondary location. + type: string + secondaryWebInternetHost: + description: The internet routing hostname with port if applicable + for web storage in the secondary location. + type: string + secondaryWebMicrosoftEndpoint: + description: The microsoft routing endpoint URL for web storage + in the secondary location. + type: string + secondaryWebMicrosoftHost: + description: The microsoft routing hostname with port if applicable + for web storage in the secondary location. + type: string + sftpEnabled: + description: Boolean, enable SFTP for the storage account + type: boolean + shareProperties: + description: A share_properties block as defined below. + properties: + corsRule: + description: A cors_rule block as defined below. + items: + properties: + allowedHeaders: + description: A list of headers that are allowed to be + a part of the cross-origin request. + items: + type: string + type: array + allowedMethods: + description: |- + A list of HTTP methods that are allowed to be executed by the origin. Valid options are + DELETE, GET, HEAD, MERGE, POST, OPTIONS, PUT or PATCH. + items: + type: string + type: array + allowedOrigins: + description: A list of origin domains that will be allowed + by CORS. + items: + type: string + type: array + exposedHeaders: + description: A list of response headers that are exposed + to CORS clients. + items: + type: string + type: array + maxAgeInSeconds: + description: The number of seconds the client should + cache a preflight response. + type: number + type: object + type: array + retentionPolicy: + description: A retention_policy block as defined below. + properties: + days: + description: Specifies the number of days that the azurerm_storage_share + should be retained, between 1 and 365 days. Defaults + to 7. + type: number + type: object + smb: + description: A smb block as defined below. + properties: + authenticationTypes: + description: A set of SMB authentication methods. Possible + values are NTLMv2, and Kerberos. + items: + type: string + type: array + x-kubernetes-list-type: set + channelEncryptionType: + description: A set of SMB channel encryption. Possible + values are AES-128-CCM, AES-128-GCM, and AES-256-GCM. + items: + type: string + type: array + x-kubernetes-list-type: set + kerberosTicketEncryptionType: + description: A set of Kerberos ticket encryption. Possible + values are RC4-HMAC, and AES-256. + items: + type: string + type: array + x-kubernetes-list-type: set + multichannelEnabled: + description: Indicates whether multichannel is enabled. + Defaults to false. This is only supported on Premium + storage accounts. + type: boolean + versions: + description: A set of SMB protocol versions. Possible + values are SMB2.1, SMB3.0, and SMB3.1.1. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + sharedAccessKeyEnabled: + description: Indicates whether the storage account permits requests + to be authorized with the account access key via Shared Key. + If false, then all requests, including shared access signatures, + must be authorized with Azure Active Directory (Azure AD). Defaults + to true. + type: boolean + staticWebsite: + description: A static_website block as defined below. + properties: + error404Document: + description: The absolute path to a custom webpage that should + be used when a request is made which does not correspond + to an existing file. + type: string + indexDocument: + description: The webpage that Azure Storage serves for requests + to the root of a website or any subfolder. For example, + index.html. The value is case-sensitive. + type: string + type: object + tableEncryptionKeyType: + description: The encryption type of the table service. Possible + values are Service and Account. Changing this forces a new resource + to be created. Default value is Service. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/storage.azure.upbound.io_blobinventorypolicies.yaml b/package/crds/storage.azure.upbound.io_blobinventorypolicies.yaml index aee25f319..9635aaae6 100644 --- a/package/crds/storage.azure.upbound.io_blobinventorypolicies.yaml +++ b/package/crds/storage.azure.upbound.io_blobinventorypolicies.yaml @@ -867,3 +867,843 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BlobInventoryPolicy is the Schema for the BlobInventoryPolicys + API. Manages a Storage Blob Inventory Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BlobInventoryPolicySpec defines the desired state of BlobInventoryPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + rules: + description: One or more rules blocks as defined below. + items: + properties: + filter: + description: A filter block as defined above. Can only be + set when the scope is Blob. + properties: + blobTypes: + description: A set of blob types. Possible values are + blockBlob, appendBlob, and pageBlob. The storage account + with is_hns_enabled is true doesn't support pageBlob. + items: + type: string + type: array + x-kubernetes-list-type: set + excludePrefixes: + description: A set of strings for blob prefixes to be + excluded. Maximum of 10 blob prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + includeBlobVersions: + description: Includes blob versions in blob inventory + or not? Defaults to false. + type: boolean + includeDeleted: + description: Includes deleted blobs in blob inventory + or not? Defaults to false. + type: boolean + includeSnapshots: + description: Includes blob snapshots in blob inventory + or not? Defaults to false. + type: boolean + prefixMatch: + description: A set of strings for blob prefixes to be + matched. Maximum of 10 blob prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + format: + description: The format of the inventory files. Possible + values are Csv and Parquet. + type: string + name: + description: The name which should be used for this Blob + Inventory Policy Rule. + type: string + schedule: + description: The inventory schedule applied by this rule. + Possible values are Daily and Weekly. + type: string + schemaFields: + description: A list of fields to be included in the inventory. + See the Azure API reference for all the supported fields. + items: + type: string + type: array + scope: + description: The scope of the inventory for this rule. Possible + values are Blob and Container. + type: string + storageContainerName: + description: The storage container name to store the blob + inventory files for this rule. + type: string + storageContainerNameRef: + description: Reference to a Container in storage to populate + storageContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerNameSelector: + description: Selector for a Container in storage to populate + storageContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + storageAccountId: + description: The ID of the storage account to apply this Blob + Inventory Policy to. Changing this forces a new Storage Blob + Inventory Policy to be created. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + rules: + description: One or more rules blocks as defined below. + items: + properties: + filter: + description: A filter block as defined above. Can only be + set when the scope is Blob. + properties: + blobTypes: + description: A set of blob types. Possible values are + blockBlob, appendBlob, and pageBlob. The storage account + with is_hns_enabled is true doesn't support pageBlob. + items: + type: string + type: array + x-kubernetes-list-type: set + excludePrefixes: + description: A set of strings for blob prefixes to be + excluded. Maximum of 10 blob prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + includeBlobVersions: + description: Includes blob versions in blob inventory + or not? Defaults to false. + type: boolean + includeDeleted: + description: Includes deleted blobs in blob inventory + or not? Defaults to false. + type: boolean + includeSnapshots: + description: Includes blob snapshots in blob inventory + or not? Defaults to false. + type: boolean + prefixMatch: + description: A set of strings for blob prefixes to be + matched. Maximum of 10 blob prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + format: + description: The format of the inventory files. Possible + values are Csv and Parquet. + type: string + name: + description: The name which should be used for this Blob + Inventory Policy Rule. + type: string + schedule: + description: The inventory schedule applied by this rule. + Possible values are Daily and Weekly. + type: string + schemaFields: + description: A list of fields to be included in the inventory. + See the Azure API reference for all the supported fields. + items: + type: string + type: array + scope: + description: The scope of the inventory for this rule. Possible + values are Blob and Container. + type: string + storageContainerName: + description: The storage container name to store the blob + inventory files for this rule. + type: string + storageContainerNameRef: + description: Reference to a Container in storage to populate + storageContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerNameSelector: + description: Selector for a Container in storage to populate + storageContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + storageAccountId: + description: The ID of the storage account to apply this Blob + Inventory Policy to. Changing this forces a new Storage Blob + Inventory Policy to be created. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.rules is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.rules) + || (has(self.initProvider) && has(self.initProvider.rules))' + status: + description: BlobInventoryPolicyStatus defines the observed state of BlobInventoryPolicy. + properties: + atProvider: + properties: + id: + description: The ID of the Storage Blob Inventory Policy. + type: string + rules: + description: One or more rules blocks as defined below. + items: + properties: + filter: + description: A filter block as defined above. Can only be + set when the scope is Blob. + properties: + blobTypes: + description: A set of blob types. Possible values are + blockBlob, appendBlob, and pageBlob. The storage account + with is_hns_enabled is true doesn't support pageBlob. + items: + type: string + type: array + x-kubernetes-list-type: set + excludePrefixes: + description: A set of strings for blob prefixes to be + excluded. Maximum of 10 blob prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + includeBlobVersions: + description: Includes blob versions in blob inventory + or not? Defaults to false. + type: boolean + includeDeleted: + description: Includes deleted blobs in blob inventory + or not? Defaults to false. + type: boolean + includeSnapshots: + description: Includes blob snapshots in blob inventory + or not? Defaults to false. + type: boolean + prefixMatch: + description: A set of strings for blob prefixes to be + matched. Maximum of 10 blob prefixes. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + format: + description: The format of the inventory files. Possible + values are Csv and Parquet. + type: string + name: + description: The name which should be used for this Blob + Inventory Policy Rule. + type: string + schedule: + description: The inventory schedule applied by this rule. + Possible values are Daily and Weekly. + type: string + schemaFields: + description: A list of fields to be included in the inventory. + See the Azure API reference for all the supported fields. + items: + type: string + type: array + scope: + description: The scope of the inventory for this rule. Possible + values are Blob and Container. + type: string + storageContainerName: + description: The storage container name to store the blob + inventory files for this rule. + type: string + type: object + type: array + storageAccountId: + description: The ID of the storage account to apply this Blob + Inventory Policy to. Changing this forces a new Storage Blob + Inventory Policy to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/storage.azure.upbound.io_managementpolicies.yaml b/package/crds/storage.azure.upbound.io_managementpolicies.yaml index eabb32efe..313d495fc 100644 --- a/package/crds/storage.azure.upbound.io_managementpolicies.yaml +++ b/package/crds/storage.azure.upbound.io_managementpolicies.yaml @@ -1121,3 +1121,1061 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ManagementPolicy is the Schema for the ManagementPolicys API. + Manages an Azure Storage Account Management Policy. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ManagementPolicySpec defines the desired state of ManagementPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + rule: + description: A rule block as documented below. + items: + properties: + actions: + description: An actions block as documented below. + properties: + baseBlob: + description: A base_blob block as documented below. + properties: + autoTierToHotFromCoolEnabled: + description: Whether a blob should automatically + be tiered from cool back to hot if it's accessed + again after being tiered to cool. Defaults to + false. + type: boolean + deleteAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to delete + the blob snapshot. Must be between 0 and 99999. + Defaults to -1. + type: number + deleteAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to delete the blob. Must be between 0 and 99999. + Defaults to -1. + type: number + deleteAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to delete the blob. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to archive + storage. Supports blob currently at Hot or Cool + tier. Must be between 0 and99999. Defaults to + -1. + type: number + tierToArchiveAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to archive storage. Supports blob + currently at Hot or Cool tier. Must be between + 0 and99999. Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToArchiveAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to archive storage. Supports blob + currently at Hot or Cool tier. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to cold storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToColdAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to cold storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToCoolAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cool + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + tierToCoolAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to cool storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToCoolAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to cool storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + type: object + snapshot: + description: A snapshot block as documented below. + properties: + changeTierToArchiveAfterDaysSinceCreation: + description: The age in days after creation to tier + blob version to archive storage. Must be between + 0 and 99999. Defaults to -1. + type: number + changeTierToCoolAfterDaysSinceCreation: + description: The age in days creation create to + tier blob version to cool storage. Must be between + 0 and 99999. Defaults to -1. + type: number + deleteAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to delete + the blob snapshot. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + type: object + version: + description: A version block as documented below. + properties: + changeTierToArchiveAfterDaysSinceCreation: + description: The age in days after creation to tier + blob version to archive storage. Must be between + 0 and 99999. Defaults to -1. + type: number + changeTierToCoolAfterDaysSinceCreation: + description: The age in days creation create to + tier blob version to cool storage. Must be between + 0 and 99999. Defaults to -1. + type: number + deleteAfterDaysSinceCreation: + description: The age in days after creation to delete + the blob version. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + type: object + type: object + enabled: + description: Boolean to specify whether the rule is enabled. + type: boolean + filters: + description: A filters block as documented below. + properties: + blobTypes: + description: An array of predefined values. Valid options + are blockBlob and appendBlob. + items: + type: string + type: array + x-kubernetes-list-type: set + matchBlobIndexTag: + description: A match_blob_index_tag block as defined + below. The block defines the blob index tag based + filtering for blob objects. + items: + properties: + name: + description: The name of the rule. Rule name is + case-sensitive. It must be unique within a policy. + type: string + operation: + description: The comparison operator which is + used for object comparison and filtering. Possible + value is ==. Defaults to ==. + type: string + value: + description: The filter tag value used for tag + based filtering for blob objects. + type: string + type: object + type: array + prefixMatch: + description: An array of strings for prefixes to be + matched. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + name: + description: The name of the rule. Rule name is case-sensitive. + It must be unique within a policy. + type: string + type: object + type: array + storageAccountId: + description: Specifies the id of the storage account to apply + the management policy to. Changing this forces a new resource + to be created. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + rule: + description: A rule block as documented below. + items: + properties: + actions: + description: An actions block as documented below. + properties: + baseBlob: + description: A base_blob block as documented below. + properties: + autoTierToHotFromCoolEnabled: + description: Whether a blob should automatically + be tiered from cool back to hot if it's accessed + again after being tiered to cool. Defaults to + false. + type: boolean + deleteAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to delete + the blob snapshot. Must be between 0 and 99999. + Defaults to -1. + type: number + deleteAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to delete the blob. Must be between 0 and 99999. + Defaults to -1. + type: number + deleteAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to delete the blob. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to archive + storage. Supports blob currently at Hot or Cool + tier. Must be between 0 and99999. Defaults to + -1. + type: number + tierToArchiveAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to archive storage. Supports blob + currently at Hot or Cool tier. Must be between + 0 and99999. Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToArchiveAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to archive storage. Supports blob + currently at Hot or Cool tier. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to cold storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToColdAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to cold storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToCoolAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cool + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + tierToCoolAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to cool storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToCoolAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to cool storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + type: object + snapshot: + description: A snapshot block as documented below. + properties: + changeTierToArchiveAfterDaysSinceCreation: + description: The age in days after creation to tier + blob version to archive storage. Must be between + 0 and 99999. Defaults to -1. + type: number + changeTierToCoolAfterDaysSinceCreation: + description: The age in days creation create to + tier blob version to cool storage. Must be between + 0 and 99999. Defaults to -1. + type: number + deleteAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to delete + the blob snapshot. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + type: object + version: + description: A version block as documented below. + properties: + changeTierToArchiveAfterDaysSinceCreation: + description: The age in days after creation to tier + blob version to archive storage. Must be between + 0 and 99999. Defaults to -1. + type: number + changeTierToCoolAfterDaysSinceCreation: + description: The age in days creation create to + tier blob version to cool storage. Must be between + 0 and 99999. Defaults to -1. + type: number + deleteAfterDaysSinceCreation: + description: The age in days after creation to delete + the blob version. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + type: object + type: object + enabled: + description: Boolean to specify whether the rule is enabled. + type: boolean + filters: + description: A filters block as documented below. + properties: + blobTypes: + description: An array of predefined values. Valid options + are blockBlob and appendBlob. + items: + type: string + type: array + x-kubernetes-list-type: set + matchBlobIndexTag: + description: A match_blob_index_tag block as defined + below. The block defines the blob index tag based + filtering for blob objects. + items: + properties: + name: + description: The name of the rule. Rule name is + case-sensitive. It must be unique within a policy. + type: string + operation: + description: The comparison operator which is + used for object comparison and filtering. Possible + value is ==. Defaults to ==. + type: string + value: + description: The filter tag value used for tag + based filtering for blob objects. + type: string + type: object + type: array + prefixMatch: + description: An array of strings for prefixes to be + matched. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + name: + description: The name of the rule. Rule name is case-sensitive. + It must be unique within a policy. + type: string + type: object + type: array + storageAccountId: + description: Specifies the id of the storage account to apply + the management policy to. Changing this forces a new resource + to be created. + type: string + storageAccountIdRef: + description: Reference to a Account in storage to populate storageAccountId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountIdSelector: + description: Selector for a Account in storage to populate storageAccountId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ManagementPolicyStatus defines the observed state of ManagementPolicy. + properties: + atProvider: + properties: + id: + description: The ID of the Storage Account Management Policy. + type: string + rule: + description: A rule block as documented below. + items: + properties: + actions: + description: An actions block as documented below. + properties: + baseBlob: + description: A base_blob block as documented below. + properties: + autoTierToHotFromCoolEnabled: + description: Whether a blob should automatically + be tiered from cool back to hot if it's accessed + again after being tiered to cool. Defaults to + false. + type: boolean + deleteAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to delete + the blob snapshot. Must be between 0 and 99999. + Defaults to -1. + type: number + deleteAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to delete the blob. Must be between 0 and 99999. + Defaults to -1. + type: number + deleteAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to delete the blob. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to archive + storage. Supports blob currently at Hot or Cool + tier. Must be between 0 and99999. Defaults to + -1. + type: number + tierToArchiveAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to archive storage. Supports blob + currently at Hot or Cool tier. Must be between + 0 and99999. Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToArchiveAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to archive storage. Supports blob + currently at Hot or Cool tier. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to cold storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToColdAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to cold storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToCoolAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cool + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + tierToCoolAfterDaysSinceLastAccessTimeGreaterThan: + description: The age in days after last access time + to tier blobs to cool storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + tierToCoolAfterDaysSinceModificationGreaterThan: + description: The age in days after last modification + to tier blobs to cool storage. Supports blob currently + at Hot tier. Must be between 0 and 99999. Defaults + to -1. + type: number + type: object + snapshot: + description: A snapshot block as documented below. + properties: + changeTierToArchiveAfterDaysSinceCreation: + description: The age in days after creation to tier + blob version to archive storage. Must be between + 0 and 99999. Defaults to -1. + type: number + changeTierToCoolAfterDaysSinceCreation: + description: The age in days creation create to + tier blob version to cool storage. Must be between + 0 and 99999. Defaults to -1. + type: number + deleteAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to delete + the blob snapshot. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + type: object + version: + description: A version block as documented below. + properties: + changeTierToArchiveAfterDaysSinceCreation: + description: The age in days after creation to tier + blob version to archive storage. Must be between + 0 and 99999. Defaults to -1. + type: number + changeTierToCoolAfterDaysSinceCreation: + description: The age in days creation create to + tier blob version to cool storage. Must be between + 0 and 99999. Defaults to -1. + type: number + deleteAfterDaysSinceCreation: + description: The age in days after creation to delete + the blob version. Must be between 0 and 99999. + Defaults to -1. + type: number + tierToArchiveAfterDaysSinceLastTierChangeGreaterThan: + description: The age in days after last tier change + to the blobs to skip to be archved. Must be between + 0 and 99999. Defaults to -1. + type: number + tierToColdAfterDaysSinceCreationGreaterThan: + description: The age in days after creation to cold + storage. Supports blob currently at Hot tier. + Must be between 0 and 99999. Defaults to -1. + type: number + type: object + type: object + enabled: + description: Boolean to specify whether the rule is enabled. + type: boolean + filters: + description: A filters block as documented below. + properties: + blobTypes: + description: An array of predefined values. Valid options + are blockBlob and appendBlob. + items: + type: string + type: array + x-kubernetes-list-type: set + matchBlobIndexTag: + description: A match_blob_index_tag block as defined + below. The block defines the blob index tag based + filtering for blob objects. + items: + properties: + name: + description: The name of the rule. Rule name is + case-sensitive. It must be unique within a policy. + type: string + operation: + description: The comparison operator which is + used for object comparison and filtering. Possible + value is ==. Defaults to ==. + type: string + value: + description: The filter tag value used for tag + based filtering for blob objects. + type: string + type: object + type: array + prefixMatch: + description: An array of strings for prefixes to be + matched. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + name: + description: The name of the rule. Rule name is case-sensitive. + It must be unique within a policy. + type: string + type: object + type: array + storageAccountId: + description: Specifies the id of the storage account to apply + the management policy to. Changing this forces a new resource + to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/storagecache.azure.upbound.io_hpccaches.yaml b/package/crds/storagecache.azure.upbound.io_hpccaches.yaml index 08fe26b49..cbb26ac39 100644 --- a/package/crds/storagecache.azure.upbound.io_hpccaches.yaml +++ b/package/crds/storagecache.azure.upbound.io_hpccaches.yaml @@ -1247,3 +1247,1183 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HPCCache is the Schema for the HPCCaches API. Manages a HPC Cache. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HPCCacheSpec defines the desired state of HPCCache + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + automaticallyRotateKeyToLatestEnabled: + description: Specifies whether the HPC Cache automatically rotates + Encryption Key to the latest version. + type: boolean + cacheSizeInGb: + description: The size of the HPC Cache, in GB. Possible values + are 3072, 6144, 12288, 21623, 24576, 43246, 49152 and 86491. + Changing this forces a new resource to be created. + type: number + defaultAccessPolicy: + description: A default_access_policy block as defined below. + properties: + accessRule: + description: One or more access_rule blocks (up to three) + as defined above. + items: + properties: + access: + description: 'The access level for this rule. Possible + values are: rw, ro, no.' + type: string + anonymousGid: + description: The anonymous GID used when root_squash_enabled + is true. + type: number + anonymousUid: + description: The anonymous UID used when root_squash_enabled + is true. + type: number + filter: + description: 'The filter applied to the scope for this + rule. The filter''s format depends on its scope: default + scope matches all clients and has no filter value; + network scope takes a CIDR format; host takes an IP + address or fully qualified domain name. If a client + does not match any filter rule and there is no default + rule, access is denied.' + type: string + rootSquashEnabled: + description: Whether to enable root squash? + type: boolean + scope: + description: 'The scope of this rule. The scope and + (potentially) the filter determine which clients match + the rule. Possible values are: default, network, host.' + type: string + submountAccessEnabled: + description: Whether allow access to subdirectories + under the root export? + type: boolean + suidEnabled: + description: Whether SUID is allowed? + type: boolean + type: object + type: array + type: object + directoryActiveDirectory: + description: A directory_active_directory block as defined below. + properties: + cacheNetbiosName: + description: The NetBIOS name to assign to the HPC Cache when + it joins the Active Directory domain as a server. + type: string + dnsPrimaryIp: + description: The primary DNS IP address used to resolve the + Active Directory domain controller's FQDN. + type: string + dnsSecondaryIp: + description: The secondary DNS IP address used to resolve + the Active Directory domain controller's FQDN. + type: string + domainName: + description: The fully qualified domain name of the Active + Directory domain controller. + type: string + domainNetbiosName: + description: The Active Directory domain's NetBIOS name. + type: string + passwordSecretRef: + description: The password of the Active Directory domain administrator. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + username: + description: The username of the Active Directory domain administrator. + type: string + required: + - passwordSecretRef + type: object + directoryFlatFile: + description: A directory_flat_file block as defined below. + properties: + groupFileUri: + description: The URI of the file containing group information + (/etc/group file format in Unix-like OS). + type: string + passwordFileUri: + description: The URI of the file containing user information + (/etc/passwd file format in Unix-like OS). + type: string + type: object + directoryLdap: + description: A directory_ldap block as defined below. + properties: + baseDn: + description: The base distinguished name (DN) for the LDAP + domain. + type: string + bind: + description: A bind block as defined above. + properties: + dn: + description: The Bind Distinguished Name (DN) identity + to be used in the secure LDAP connection. + type: string + passwordSecretRef: + description: The password of the Active Directory domain + administrator. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - passwordSecretRef + type: object + certificateValidationUri: + description: The URI of the CA certificate to validate the + LDAP secure connection. + type: string + downloadCertificateAutomatically: + description: Whether the certificate should be automatically + downloaded. This can be set to true only when certificate_validation_uri + is provided. + type: boolean + encrypted: + description: Whether the LDAP connection should be encrypted? + type: boolean + server: + description: The FQDN or IP address of the LDAP server. + type: string + type: object + dns: + description: A dns block as defined below. + properties: + searchDomain: + description: The DNS search domain for the HPC Cache. + type: string + servers: + description: A list of DNS servers for the HPC Cache. At most + three IP(s) are allowed to set. + items: + type: string + type: array + type: object + identity: + description: An identity block as defined below. Changing this + forces a new resource to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this HPC Cache. Changing this forces + a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this HPC Cache. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). Changing this forces a new resource to + be created. + type: string + type: object + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to encrypt the data in this HPC Cache. + type: string + location: + description: Specifies the supported Azure Region where the HPC + Cache should be created. Changing this forces a new resource + to be created. + type: string + mtu: + description: The IPv4 maximum transmission unit configured for + the subnet of the HPC Cache. Possible values range from 576 + - 1500. Defaults to 1500. + type: number + ntpServer: + description: The NTP server IP Address or FQDN for the HPC Cache. + Defaults to time.windows.com. + type: string + resourceGroupName: + description: The name of the Resource Group in which to create + the HPC Cache. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: The SKU of HPC Cache to use. Possible values are + (ReadWrite) - Standard_2G, Standard_4G Standard_8G or (ReadOnly) + - Standard_L4_5G, Standard_L9G, and Standard_L16G. Changing + this forces a new resource to be created. + type: string + subnetId: + description: The ID of the Subnet for the HPC Cache. Changing + this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the HPC Cache. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + automaticallyRotateKeyToLatestEnabled: + description: Specifies whether the HPC Cache automatically rotates + Encryption Key to the latest version. + type: boolean + cacheSizeInGb: + description: The size of the HPC Cache, in GB. Possible values + are 3072, 6144, 12288, 21623, 24576, 43246, 49152 and 86491. + Changing this forces a new resource to be created. + type: number + defaultAccessPolicy: + description: A default_access_policy block as defined below. + properties: + accessRule: + description: One or more access_rule blocks (up to three) + as defined above. + items: + properties: + access: + description: 'The access level for this rule. Possible + values are: rw, ro, no.' + type: string + anonymousGid: + description: The anonymous GID used when root_squash_enabled + is true. + type: number + anonymousUid: + description: The anonymous UID used when root_squash_enabled + is true. + type: number + filter: + description: 'The filter applied to the scope for this + rule. The filter''s format depends on its scope: default + scope matches all clients and has no filter value; + network scope takes a CIDR format; host takes an IP + address or fully qualified domain name. If a client + does not match any filter rule and there is no default + rule, access is denied.' + type: string + rootSquashEnabled: + description: Whether to enable root squash? + type: boolean + scope: + description: 'The scope of this rule. The scope and + (potentially) the filter determine which clients match + the rule. Possible values are: default, network, host.' + type: string + submountAccessEnabled: + description: Whether allow access to subdirectories + under the root export? + type: boolean + suidEnabled: + description: Whether SUID is allowed? + type: boolean + type: object + type: array + type: object + directoryActiveDirectory: + description: A directory_active_directory block as defined below. + properties: + cacheNetbiosName: + description: The NetBIOS name to assign to the HPC Cache when + it joins the Active Directory domain as a server. + type: string + dnsPrimaryIp: + description: The primary DNS IP address used to resolve the + Active Directory domain controller's FQDN. + type: string + dnsSecondaryIp: + description: The secondary DNS IP address used to resolve + the Active Directory domain controller's FQDN. + type: string + domainName: + description: The fully qualified domain name of the Active + Directory domain controller. + type: string + domainNetbiosName: + description: The Active Directory domain's NetBIOS name. + type: string + username: + description: The username of the Active Directory domain administrator. + type: string + type: object + directoryFlatFile: + description: A directory_flat_file block as defined below. + properties: + groupFileUri: + description: The URI of the file containing group information + (/etc/group file format in Unix-like OS). + type: string + passwordFileUri: + description: The URI of the file containing user information + (/etc/passwd file format in Unix-like OS). + type: string + type: object + directoryLdap: + description: A directory_ldap block as defined below. + properties: + baseDn: + description: The base distinguished name (DN) for the LDAP + domain. + type: string + bind: + description: A bind block as defined above. + properties: + dn: + description: The Bind Distinguished Name (DN) identity + to be used in the secure LDAP connection. + type: string + type: object + certificateValidationUri: + description: The URI of the CA certificate to validate the + LDAP secure connection. + type: string + downloadCertificateAutomatically: + description: Whether the certificate should be automatically + downloaded. This can be set to true only when certificate_validation_uri + is provided. + type: boolean + encrypted: + description: Whether the LDAP connection should be encrypted? + type: boolean + server: + description: The FQDN or IP address of the LDAP server. + type: string + type: object + dns: + description: A dns block as defined below. + properties: + searchDomain: + description: The DNS search domain for the HPC Cache. + type: string + servers: + description: A list of DNS servers for the HPC Cache. At most + three IP(s) are allowed to set. + items: + type: string + type: array + type: object + identity: + description: An identity block as defined below. Changing this + forces a new resource to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this HPC Cache. Changing this forces + a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this HPC Cache. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). Changing this forces a new resource to + be created. + type: string + type: object + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to encrypt the data in this HPC Cache. + type: string + location: + description: Specifies the supported Azure Region where the HPC + Cache should be created. Changing this forces a new resource + to be created. + type: string + mtu: + description: The IPv4 maximum transmission unit configured for + the subnet of the HPC Cache. Possible values range from 576 + - 1500. Defaults to 1500. + type: number + ntpServer: + description: The NTP server IP Address or FQDN for the HPC Cache. + Defaults to time.windows.com. + type: string + skuName: + description: The SKU of HPC Cache to use. Possible values are + (ReadWrite) - Standard_2G, Standard_4G Standard_8G or (ReadOnly) + - Standard_L4_5G, Standard_L9G, and Standard_L16G. Changing + this forces a new resource to be created. + type: string + subnetId: + description: The ID of the Subnet for the HPC Cache. Changing + this forces a new resource to be created. + type: string + subnetIdRef: + description: Reference to a Subnet in network to populate subnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + subnetIdSelector: + description: Selector for a Subnet in network to populate subnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the HPC Cache. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.cacheSizeInGb is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.cacheSizeInGb) + || (has(self.initProvider) && has(self.initProvider.cacheSizeInGb))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + status: + description: HPCCacheStatus defines the observed state of HPCCache. + properties: + atProvider: + properties: + automaticallyRotateKeyToLatestEnabled: + description: Specifies whether the HPC Cache automatically rotates + Encryption Key to the latest version. + type: boolean + cacheSizeInGb: + description: The size of the HPC Cache, in GB. Possible values + are 3072, 6144, 12288, 21623, 24576, 43246, 49152 and 86491. + Changing this forces a new resource to be created. + type: number + defaultAccessPolicy: + description: A default_access_policy block as defined below. + properties: + accessRule: + description: One or more access_rule blocks (up to three) + as defined above. + items: + properties: + access: + description: 'The access level for this rule. Possible + values are: rw, ro, no.' + type: string + anonymousGid: + description: The anonymous GID used when root_squash_enabled + is true. + type: number + anonymousUid: + description: The anonymous UID used when root_squash_enabled + is true. + type: number + filter: + description: 'The filter applied to the scope for this + rule. The filter''s format depends on its scope: default + scope matches all clients and has no filter value; + network scope takes a CIDR format; host takes an IP + address or fully qualified domain name. If a client + does not match any filter rule and there is no default + rule, access is denied.' + type: string + rootSquashEnabled: + description: Whether to enable root squash? + type: boolean + scope: + description: 'The scope of this rule. The scope and + (potentially) the filter determine which clients match + the rule. Possible values are: default, network, host.' + type: string + submountAccessEnabled: + description: Whether allow access to subdirectories + under the root export? + type: boolean + suidEnabled: + description: Whether SUID is allowed? + type: boolean + type: object + type: array + type: object + directoryActiveDirectory: + description: A directory_active_directory block as defined below. + properties: + cacheNetbiosName: + description: The NetBIOS name to assign to the HPC Cache when + it joins the Active Directory domain as a server. + type: string + dnsPrimaryIp: + description: The primary DNS IP address used to resolve the + Active Directory domain controller's FQDN. + type: string + dnsSecondaryIp: + description: The secondary DNS IP address used to resolve + the Active Directory domain controller's FQDN. + type: string + domainName: + description: The fully qualified domain name of the Active + Directory domain controller. + type: string + domainNetbiosName: + description: The Active Directory domain's NetBIOS name. + type: string + username: + description: The username of the Active Directory domain administrator. + type: string + type: object + directoryFlatFile: + description: A directory_flat_file block as defined below. + properties: + groupFileUri: + description: The URI of the file containing group information + (/etc/group file format in Unix-like OS). + type: string + passwordFileUri: + description: The URI of the file containing user information + (/etc/passwd file format in Unix-like OS). + type: string + type: object + directoryLdap: + description: A directory_ldap block as defined below. + properties: + baseDn: + description: The base distinguished name (DN) for the LDAP + domain. + type: string + bind: + description: A bind block as defined above. + properties: + dn: + description: The Bind Distinguished Name (DN) identity + to be used in the secure LDAP connection. + type: string + type: object + certificateValidationUri: + description: The URI of the CA certificate to validate the + LDAP secure connection. + type: string + downloadCertificateAutomatically: + description: Whether the certificate should be automatically + downloaded. This can be set to true only when certificate_validation_uri + is provided. + type: boolean + encrypted: + description: Whether the LDAP connection should be encrypted? + type: boolean + server: + description: The FQDN or IP address of the LDAP server. + type: string + type: object + dns: + description: A dns block as defined below. + properties: + searchDomain: + description: The DNS search domain for the HPC Cache. + type: string + servers: + description: A list of DNS servers for the HPC Cache. At most + three IP(s) are allowed to set. + items: + type: string + type: array + type: object + id: + description: The id of the HPC Cache. + type: string + identity: + description: An identity block as defined below. Changing this + forces a new resource to be created. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this HPC Cache. Changing this forces + a new resource to be created. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this HPC Cache. Possible values + are SystemAssigned, UserAssigned, SystemAssigned, UserAssigned + (to enable both). Changing this forces a new resource to + be created. + type: string + type: object + keyVaultKeyId: + description: The ID of the Key Vault Key which should be used + to encrypt the data in this HPC Cache. + type: string + location: + description: Specifies the supported Azure Region where the HPC + Cache should be created. Changing this forces a new resource + to be created. + type: string + mountAddresses: + description: A list of IP Addresses where the HPC Cache can be + mounted. + items: + type: string + type: array + mtu: + description: The IPv4 maximum transmission unit configured for + the subnet of the HPC Cache. Possible values range from 576 + - 1500. Defaults to 1500. + type: number + ntpServer: + description: The NTP server IP Address or FQDN for the HPC Cache. + Defaults to time.windows.com. + type: string + resourceGroupName: + description: The name of the Resource Group in which to create + the HPC Cache. Changing this forces a new resource to be created. + type: string + skuName: + description: The SKU of HPC Cache to use. Possible values are + (ReadWrite) - Standard_2G, Standard_4G Standard_8G or (ReadOnly) + - Standard_L4_5G, Standard_L9G, and Standard_L16G. Changing + this forces a new resource to be created. + type: string + subnetId: + description: The ID of the Subnet for the HPC Cache. Changing + this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the HPC Cache. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_functionjavascriptudas.yaml b/package/crds/streamanalytics.azure.upbound.io_functionjavascriptudas.yaml index de09ff6ab..ad5c46771 100644 --- a/package/crds/streamanalytics.azure.upbound.io_functionjavascriptudas.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_functionjavascriptudas.yaml @@ -594,3 +594,573 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FunctionJavascriptUda is the Schema for the FunctionJavascriptUdas + API. Manages a JavaScript UDA Function within a Stream Analytics Streaming + Job. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionJavascriptUdaSpec defines the desired state of FunctionJavascriptUda + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + input: + description: One or more input blocks as defined below. + items: + properties: + configurationParameter: + description: Is this input parameter a configuration parameter? + Defaults to false. + type: boolean + type: + description: The input data type of this JavaScript Function. + Possible values include any, array, bigint, datetime, + float, nvarchar(max) and record. + type: string + type: object + type: array + output: + description: An output block as defined below. + properties: + type: + description: The output data type from this JavaScript Function. + Possible values include any, array, bigint, datetime, float, + nvarchar(max) and record. + type: string + type: object + script: + description: The JavaScript of this UDA Function. + type: string + streamAnalyticsJobId: + description: The resource ID of the Stream Analytics Job where + this Function should be created. Changing this forces a new + resource to be created. + type: string + streamAnalyticsJobIdRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobIdSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + input: + description: One or more input blocks as defined below. + items: + properties: + configurationParameter: + description: Is this input parameter a configuration parameter? + Defaults to false. + type: boolean + type: + description: The input data type of this JavaScript Function. + Possible values include any, array, bigint, datetime, + float, nvarchar(max) and record. + type: string + type: object + type: array + output: + description: An output block as defined below. + properties: + type: + description: The output data type from this JavaScript Function. + Possible values include any, array, bigint, datetime, float, + nvarchar(max) and record. + type: string + type: object + script: + description: The JavaScript of this UDA Function. + type: string + streamAnalyticsJobId: + description: The resource ID of the Stream Analytics Job where + this Function should be created. Changing this forces a new + resource to be created. + type: string + streamAnalyticsJobIdRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobIdSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.input is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.input) + || (has(self.initProvider) && has(self.initProvider.input))' + - message: spec.forProvider.output is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.output) + || (has(self.initProvider) && has(self.initProvider.output))' + - message: spec.forProvider.script is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.script) + || (has(self.initProvider) && has(self.initProvider.script))' + status: + description: FunctionJavascriptUdaStatus defines the observed state of + FunctionJavascriptUda. + properties: + atProvider: + properties: + id: + description: The ID of the Stream Analytics JavaScript UDA Function. + type: string + input: + description: One or more input blocks as defined below. + items: + properties: + configurationParameter: + description: Is this input parameter a configuration parameter? + Defaults to false. + type: boolean + type: + description: The input data type of this JavaScript Function. + Possible values include any, array, bigint, datetime, + float, nvarchar(max) and record. + type: string + type: object + type: array + output: + description: An output block as defined below. + properties: + type: + description: The output data type from this JavaScript Function. + Possible values include any, array, bigint, datetime, float, + nvarchar(max) and record. + type: string + type: object + script: + description: The JavaScript of this UDA Function. + type: string + streamAnalyticsJobId: + description: The resource ID of the Stream Analytics Job where + this Function should be created. Changing this forces a new + resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_jobs.yaml b/package/crds/streamanalytics.azure.upbound.io_jobs.yaml index 929f507a0..93c5a1592 100644 --- a/package/crds/streamanalytics.azure.upbound.io_jobs.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_jobs.yaml @@ -756,3 +756,735 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Job is the Schema for the Jobs API. Manages a Stream Analytics + Job. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: JobSpec defines the desired state of Job + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + compatibilityLevel: + description: Specifies the compatibility level for this job - + which controls certain runtime behaviours of the streaming job. + Possible values are 1.0, 1.1 and 1.2. + type: string + contentStoragePolicy: + description: The policy for storing stream analytics content. + Possible values are JobStorageAccount, SystemAccount. Defaults + to SystemAccount. + type: string + dataLocale: + description: Specifies the Data Locale of the Job, which should + be a supported .NET Culture. + type: string + eventsLateArrivalMaxDelayInSeconds: + description: Specifies the maximum tolerable delay in seconds + where events arriving late could be included. Supported range + is -1 (indefinite) to 1814399 (20d 23h 59m 59s). Default is + 5. + type: number + eventsOutOfOrderMaxDelayInSeconds: + description: Specifies the maximum tolerable delay in seconds + where out-of-order events can be adjusted to be back in order. + Supported range is 0 to 599 (9m 59s). Default is 0. + type: number + eventsOutOfOrderPolicy: + description: Specifies the policy which should be applied to events + which arrive out of order in the input event stream. Possible + values are Adjust and Drop. Default is Adjust. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: The identity id of the user assigned identity + to use when type is UserAssigned + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Stream Analytics Job. + Possible values are SystemAssigned and UserAssigned. + type: string + type: object + jobStorageAccount: + description: The details of the job storage account. A job_storage_account + block as defined below. + items: + properties: + accountKeySecretRef: + description: The account key for the Azure storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The name of the Azure storage account. + type: string + authenticationMode: + description: The authentication mode of the storage account. + The only supported value is ConnectionString. Defaults + to ConnectionString. + type: string + required: + - accountKeySecretRef + type: object + type: array + location: + description: The Azure Region in which the Resource Group exists. + Changing this forces a new resource to be created. + type: string + outputErrorPolicy: + description: Specifies the policy which should be applied to events + which arrive at the output and cannot be written to the external + storage due to being malformed (such as missing column values, + column values of wrong type or size). Possible values are Drop + and Stop. Default is Drop. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job should exist. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: The SKU Name to use for the Stream Analytics Job. + Possible values are Standard, StandardV2. Defaults to Standard. + type: string + streamAnalyticsClusterId: + description: The ID of an existing Stream Analytics Cluster where + the Stream Analytics Job should run. + type: string + streamingUnits: + description: Specifies the number of streaming units that the + streaming job uses. Supported values are 1, 3, 6 and multiples + of 6 up to 120. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + transformationQuery: + description: Specifies the query that will be run in the streaming + job, written in Stream Analytics Query Language (SAQL). + type: string + type: + description: The type of the Stream Analytics Job. Possible values + are Cloud and Edge. Defaults to Cloud. Changing this forces + a new resource to be created. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + compatibilityLevel: + description: Specifies the compatibility level for this job - + which controls certain runtime behaviours of the streaming job. + Possible values are 1.0, 1.1 and 1.2. + type: string + contentStoragePolicy: + description: The policy for storing stream analytics content. + Possible values are JobStorageAccount, SystemAccount. Defaults + to SystemAccount. + type: string + dataLocale: + description: Specifies the Data Locale of the Job, which should + be a supported .NET Culture. + type: string + eventsLateArrivalMaxDelayInSeconds: + description: Specifies the maximum tolerable delay in seconds + where events arriving late could be included. Supported range + is -1 (indefinite) to 1814399 (20d 23h 59m 59s). Default is + 5. + type: number + eventsOutOfOrderMaxDelayInSeconds: + description: Specifies the maximum tolerable delay in seconds + where out-of-order events can be adjusted to be back in order. + Supported range is 0 to 599 (9m 59s). Default is 0. + type: number + eventsOutOfOrderPolicy: + description: Specifies the policy which should be applied to events + which arrive out of order in the input event stream. Possible + values are Adjust and Drop. Default is Adjust. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: The identity id of the user assigned identity + to use when type is UserAssigned + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Stream Analytics Job. + Possible values are SystemAssigned and UserAssigned. + type: string + type: object + jobStorageAccount: + description: The details of the job storage account. A job_storage_account + block as defined below. + items: + properties: + accountName: + description: The name of the Azure storage account. + type: string + authenticationMode: + description: The authentication mode of the storage account. + The only supported value is ConnectionString. Defaults + to ConnectionString. + type: string + type: object + type: array + location: + description: The Azure Region in which the Resource Group exists. + Changing this forces a new resource to be created. + type: string + outputErrorPolicy: + description: Specifies the policy which should be applied to events + which arrive at the output and cannot be written to the external + storage due to being malformed (such as missing column values, + column values of wrong type or size). Possible values are Drop + and Stop. Default is Drop. + type: string + skuName: + description: The SKU Name to use for the Stream Analytics Job. + Possible values are Standard, StandardV2. Defaults to Standard. + type: string + streamAnalyticsClusterId: + description: The ID of an existing Stream Analytics Cluster where + the Stream Analytics Job should run. + type: string + streamingUnits: + description: Specifies the number of streaming units that the + streaming job uses. Supported values are 1, 3, 6 and multiples + of 6 up to 120. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + transformationQuery: + description: Specifies the query that will be run in the streaming + job, written in Stream Analytics Query Language (SAQL). + type: string + type: + description: The type of the Stream Analytics Job. Possible values + are Cloud and Edge. Defaults to Cloud. Changing this forces + a new resource to be created. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.transformationQuery is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.transformationQuery) + || (has(self.initProvider) && has(self.initProvider.transformationQuery))' + status: + description: JobStatus defines the observed state of Job. + properties: + atProvider: + properties: + compatibilityLevel: + description: Specifies the compatibility level for this job - + which controls certain runtime behaviours of the streaming job. + Possible values are 1.0, 1.1 and 1.2. + type: string + contentStoragePolicy: + description: The policy for storing stream analytics content. + Possible values are JobStorageAccount, SystemAccount. Defaults + to SystemAccount. + type: string + dataLocale: + description: Specifies the Data Locale of the Job, which should + be a supported .NET Culture. + type: string + eventsLateArrivalMaxDelayInSeconds: + description: Specifies the maximum tolerable delay in seconds + where events arriving late could be included. Supported range + is -1 (indefinite) to 1814399 (20d 23h 59m 59s). Default is + 5. + type: number + eventsOutOfOrderMaxDelayInSeconds: + description: Specifies the maximum tolerable delay in seconds + where out-of-order events can be adjusted to be back in order. + Supported range is 0 to 599 (9m 59s). Default is 0. + type: number + eventsOutOfOrderPolicy: + description: Specifies the policy which should be applied to events + which arrive out of order in the input event stream. Possible + values are Adjust and Drop. Default is Adjust. + type: string + id: + description: The ID of the Stream Analytics Job. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: The identity id of the user assigned identity + to use when type is UserAssigned + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Stream Analytics Job. + Possible values are SystemAssigned and UserAssigned. + type: string + type: object + jobId: + description: The Job ID assigned by the Stream Analytics Job. + type: string + jobStorageAccount: + description: The details of the job storage account. A job_storage_account + block as defined below. + items: + properties: + accountName: + description: The name of the Azure storage account. + type: string + authenticationMode: + description: The authentication mode of the storage account. + The only supported value is ConnectionString. Defaults + to ConnectionString. + type: string + type: object + type: array + location: + description: The Azure Region in which the Resource Group exists. + Changing this forces a new resource to be created. + type: string + outputErrorPolicy: + description: Specifies the policy which should be applied to events + which arrive at the output and cannot be written to the external + storage due to being malformed (such as missing column values, + column values of wrong type or size). Possible values are Drop + and Stop. Default is Drop. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job should exist. Changing this forces a new resource to be + created. + type: string + skuName: + description: The SKU Name to use for the Stream Analytics Job. + Possible values are Standard, StandardV2. Defaults to Standard. + type: string + streamAnalyticsClusterId: + description: The ID of an existing Stream Analytics Cluster where + the Stream Analytics Job should run. + type: string + streamingUnits: + description: Specifies the number of streaming units that the + streaming job uses. Supported values are 1, 3, 6 and multiples + of 6 up to 120. + type: number + tags: + additionalProperties: + type: string + description: A mapping of tags assigned to the resource. + type: object + x-kubernetes-map-type: granular + transformationQuery: + description: Specifies the query that will be run in the streaming + job, written in Stream Analytics Query Language (SAQL). + type: string + type: + description: The type of the Stream Analytics Job. Possible values + are Cloud and Edge. Defaults to Cloud. Changing this forces + a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_outputblobs.yaml b/package/crds/streamanalytics.azure.upbound.io_outputblobs.yaml index 1e061cc35..6007556a5 100644 --- a/package/crds/streamanalytics.azure.upbound.io_outputblobs.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_outputblobs.yaml @@ -1090,3 +1090,1066 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: OutputBlob is the Schema for the OutputBlobs API. Manages a Stream + Analytics Output to Blob Storage. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OutputBlobSpec defines the desired state of OutputBlob + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + batchMaxWaitTime: + description: The maximum wait time per batch in hh:mm:ss e.g. + 00:02:00 for two minutes. + type: string + batchMinRows: + description: The minimum number of rows per batch (must be between + 0 and 1000000). + type: number + blobWriteMode: + description: Determines whether blob blocks are either committed + automatically or appended. Possible values are Append and Once. + Defaults to Append. + type: string + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + storageAccountKeySecretRef: + description: The Access Key which should be used to connect to + this Storage Account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: The name of the Storage Account. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + storageContainerNameRef: + description: Reference to a Container in storage to populate storageContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerNameSelector: + description: Selector for a Container in storage to populate storageContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + batchMaxWaitTime: + description: The maximum wait time per batch in hh:mm:ss e.g. + 00:02:00 for two minutes. + type: string + batchMinRows: + description: The minimum number of rows per batch (must be between + 0 and 1000000). + type: number + blobWriteMode: + description: Determines whether blob blocks are either committed + automatically or appended. Possible values are Append and Once. + Defaults to Append. + type: string + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + storageAccountName: + description: The name of the Storage Account. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + storageContainerNameRef: + description: Reference to a Container in storage to populate storageContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerNameSelector: + description: Selector for a Container in storage to populate storageContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dateFormat is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dateFormat) + || (has(self.initProvider) && has(self.initProvider.dateFormat))' + - message: spec.forProvider.pathPattern is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.pathPattern) + || (has(self.initProvider) && has(self.initProvider.pathPattern))' + - message: spec.forProvider.serialization is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serialization) + || (has(self.initProvider) && has(self.initProvider.serialization))' + - message: spec.forProvider.timeFormat is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timeFormat) + || (has(self.initProvider) && has(self.initProvider.timeFormat))' + status: + description: OutputBlobStatus defines the observed state of OutputBlob. + properties: + atProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + batchMaxWaitTime: + description: The maximum wait time per batch in hh:mm:ss e.g. + 00:02:00 for two minutes. + type: string + batchMinRows: + description: The minimum number of rows per batch (must be between + 0 and 1000000). + type: number + blobWriteMode: + description: Determines whether blob blocks are either committed + automatically or appended. Possible values are Append and Once. + Defaults to Append. + type: string + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + id: + description: The ID of the Stream Analytics Output Blob Storage. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + storageAccountName: + description: The name of the Storage Account. + type: string + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_outputeventhubs.yaml b/package/crds/streamanalytics.azure.upbound.io_outputeventhubs.yaml index 23588f127..56be3e10b 100644 --- a/package/crds/streamanalytics.azure.upbound.io_outputeventhubs.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_outputeventhubs.yaml @@ -895,3 +895,871 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: OutputEventHub is the Schema for the OutputEventHubs API. Manages + a Stream Analytics Output to an EventHub. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OutputEventHubSpec defines the desired state of OutputEventHub + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + eventhubName: + description: The name of the Event Hub. + type: string + eventhubNameRef: + description: Reference to a EventHub in eventhub to populate eventhubName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNameSelector: + description: Selector for a EventHub in eventhub to populate eventhubName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + partitionKey: + description: The column that is used for the Event Hub partition + key. + type: string + propertyColumns: + description: A list of property columns to add to the Event Hub + output. + items: + type: string + type: array + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + servicebusNamespaceRef: + description: Reference to a EventHubNamespace in eventhub to populate + servicebusNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicebusNamespaceSelector: + description: Selector for a EventHubNamespace in eventhub to populate + servicebusNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedAccessPolicyKeySecretRef: + description: The shared access policy key for the specified shared + access policy. Required when authentication_mode is set to ConnectionString. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required when authentication_mode + is set to ConnectionString. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + required: + - streamAnalyticsJobName + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + eventhubName: + description: The name of the Event Hub. + type: string + eventhubNameRef: + description: Reference to a EventHub in eventhub to populate eventhubName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNameSelector: + description: Selector for a EventHub in eventhub to populate eventhubName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + partitionKey: + description: The column that is used for the Event Hub partition + key. + type: string + propertyColumns: + description: A list of property columns to add to the Event Hub + output. + items: + type: string + type: array + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + servicebusNamespaceRef: + description: Reference to a EventHubNamespace in eventhub to populate + servicebusNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicebusNamespaceSelector: + description: Selector for a EventHubNamespace in eventhub to populate + servicebusNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required when authentication_mode + is set to ConnectionString. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.serialization is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serialization) + || (has(self.initProvider) && has(self.initProvider.serialization))' + status: + description: OutputEventHubStatus defines the observed state of OutputEventHub. + properties: + atProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + eventhubName: + description: The name of the Event Hub. + type: string + id: + description: The ID of the Stream Analytics Output EventHub. + type: string + partitionKey: + description: The column that is used for the Event Hub partition + key. + type: string + propertyColumns: + description: A list of property columns to add to the Event Hub + output. + items: + type: string + type: array + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required when authentication_mode + is set to ConnectionString. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_outputservicebusqueues.yaml b/package/crds/streamanalytics.azure.upbound.io_outputservicebusqueues.yaml index 0de0b4522..65fd674af 100644 --- a/package/crds/streamanalytics.azure.upbound.io_outputservicebusqueues.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_outputservicebusqueues.yaml @@ -1159,3 +1159,1135 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: OutputServiceBusQueue is the Schema for the OutputServiceBusQueues + API. Manages a Stream Analytics Output to a ServiceBus Queue. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OutputServiceBusQueueSpec defines the desired state of OutputServiceBusQueue + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + name: + description: The name of the Stream Output. Changing this forces + a new resource to be created. + type: string + propertyColumns: + description: A list of property columns to add to the Service + Bus Queue output. + items: + type: string + type: array + queueName: + description: The name of the Service Bus Queue. + type: string + queueNameRef: + description: Reference to a Queue in servicebus to populate queueName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + queueNameSelector: + description: Selector for a Queue in servicebus to populate queueName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + servicebusNamespaceRef: + description: Reference to a ServiceBusNamespace in servicebus + to populate servicebusNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicebusNamespaceSelector: + description: Selector for a ServiceBusNamespace in servicebus + to populate servicebusNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedAccessPolicyKeySecretRef: + description: The shared access policy key for the specified shared + access policy. Required if authentication_mode is ConnectionString. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode + is ConnectionString. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPropertyColumns: + additionalProperties: + type: string + description: A key-value pair of system property columns that + will be attached to the outgoing messages for the Service Bus + Queue Output. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + name: + description: The name of the Stream Output. Changing this forces + a new resource to be created. + type: string + propertyColumns: + description: A list of property columns to add to the Service + Bus Queue output. + items: + type: string + type: array + queueName: + description: The name of the Service Bus Queue. + type: string + queueNameRef: + description: Reference to a Queue in servicebus to populate queueName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + queueNameSelector: + description: Selector for a Queue in servicebus to populate queueName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + servicebusNamespaceRef: + description: Reference to a ServiceBusNamespace in servicebus + to populate servicebusNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicebusNamespaceSelector: + description: Selector for a ServiceBusNamespace in servicebus + to populate servicebusNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode + is ConnectionString. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPropertyColumns: + additionalProperties: + type: string + description: A key-value pair of system property columns that + will be attached to the outgoing messages for the Service Bus + Queue Output. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.serialization is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serialization) + || (has(self.initProvider) && has(self.initProvider.serialization))' + status: + description: OutputServiceBusQueueStatus defines the observed state of + OutputServiceBusQueue. + properties: + atProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + id: + description: The ID of the Stream Analytics Output ServiceBus + Queue. + type: string + name: + description: The name of the Stream Output. Changing this forces + a new resource to be created. + type: string + propertyColumns: + description: A list of property columns to add to the Service + Bus Queue output. + items: + type: string + type: array + queueName: + description: The name of the Service Bus Queue. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode + is ConnectionString. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + systemPropertyColumns: + additionalProperties: + type: string + description: A key-value pair of system property columns that + will be attached to the outgoing messages for the Service Bus + Queue Output. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_outputservicebustopics.yaml b/package/crds/streamanalytics.azure.upbound.io_outputservicebustopics.yaml index 1d087370a..8d3b523ff 100644 --- a/package/crds/streamanalytics.azure.upbound.io_outputservicebustopics.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_outputservicebustopics.yaml @@ -1159,3 +1159,1135 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: OutputServiceBusTopic is the Schema for the OutputServiceBusTopics + API. Manages a Stream Analytics Output to a ServiceBus Topic. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OutputServiceBusTopicSpec defines the desired state of OutputServiceBusTopic + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + name: + description: The name of the Stream Output. Changing this forces + a new resource to be created. + type: string + propertyColumns: + description: A list of property columns to add to the Service + Bus Topic output. + items: + type: string + type: array + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Topic, Service Bus Topic, etc. + type: string + servicebusNamespaceRef: + description: Reference to a ServiceBusNamespace in servicebus + to populate servicebusNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicebusNamespaceSelector: + description: Selector for a ServiceBusNamespace in servicebus + to populate servicebusNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedAccessPolicyKeySecretRef: + description: The shared access policy key for the specified shared + access policy. Required if authentication_mode is ConnectionString. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode + is ConnectionString. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPropertyColumns: + additionalProperties: + type: string + description: A key-value pair of system property columns that + will be attached to the outgoing messages for the Service Bus + Topic Output. + type: object + x-kubernetes-map-type: granular + topicName: + description: The name of the Service Bus Topic. + type: string + topicNameRef: + description: Reference to a Topic in servicebus to populate topicName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + topicNameSelector: + description: Selector for a Topic in servicebus to populate topicName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + name: + description: The name of the Stream Output. Changing this forces + a new resource to be created. + type: string + propertyColumns: + description: A list of property columns to add to the Service + Bus Topic output. + items: + type: string + type: array + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Topic, Service Bus Topic, etc. + type: string + servicebusNamespaceRef: + description: Reference to a ServiceBusNamespace in servicebus + to populate servicebusNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicebusNamespaceSelector: + description: Selector for a ServiceBusNamespace in servicebus + to populate servicebusNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode + is ConnectionString. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + systemPropertyColumns: + additionalProperties: + type: string + description: A key-value pair of system property columns that + will be attached to the outgoing messages for the Service Bus + Topic Output. + type: object + x-kubernetes-map-type: granular + topicName: + description: The name of the Service Bus Topic. + type: string + topicNameRef: + description: Reference to a Topic in servicebus to populate topicName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + topicNameSelector: + description: Selector for a Topic in servicebus to populate topicName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.serialization is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serialization) + || (has(self.initProvider) && has(self.initProvider.serialization))' + status: + description: OutputServiceBusTopicStatus defines the observed state of + OutputServiceBusTopic. + properties: + atProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + id: + description: The ID of the Stream Analytics Output ServiceBus + Topic. + type: string + name: + description: The name of the Stream Output. Changing this forces + a new resource to be created. + type: string + propertyColumns: + description: A list of property columns to add to the Service + Bus Topic output. + items: + type: string + type: array + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + format: + description: Specifies the format of the JSON the output will + be written in. Possible values are Array and LineSeparated. + type: string + type: + description: The serialization format used for outgoing data + streams. Possible values are Avro, Csv, Json and Parquet. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Topic, Service Bus Topic, etc. + type: string + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. Required if authentication_mode + is ConnectionString. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + systemPropertyColumns: + additionalProperties: + type: string + description: A key-value pair of system property columns that + will be attached to the outgoing messages for the Service Bus + Topic Output. + type: object + x-kubernetes-map-type: granular + topicName: + description: The name of the Service Bus Topic. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_referenceinputblobs.yaml b/package/crds/streamanalytics.azure.upbound.io_referenceinputblobs.yaml index f17d21421..1e34b29a5 100644 --- a/package/crds/streamanalytics.azure.upbound.io_referenceinputblobs.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_referenceinputblobs.yaml @@ -1138,3 +1138,1117 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ReferenceInputBlob is the Schema for the ReferenceInputBlobs + API. Manages a Stream Analytics Reference Input Blob. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ReferenceInputBlobSpec defines the desired state of ReferenceInputBlob + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Analytics + Reference Input. Possible values are Msi and ConnectionString. + Defaults to ConnectionString. + type: string + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + name: + description: The name of the Reference Input Blob. Changing this + forces a new resource to be created. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for the reference + data. Possible values are Avro, Csv and Json. + type: string + type: object + storageAccountKeySecretRef: + description: The Access Key which should be used to connect to + this Storage Account. Required if authentication_mode is ConnectionString. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: The name of the Storage Account that has the blob + container with reference data. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + storageContainerNameRef: + description: Reference to a Container in storage to populate storageContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerNameSelector: + description: Selector for a Container in storage to populate storageContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationMode: + description: The authentication mode for the Stream Analytics + Reference Input. Possible values are Msi and ConnectionString. + Defaults to ConnectionString. + type: string + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + name: + description: The name of the Reference Input Blob. Changing this + forces a new resource to be created. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for the reference + data. Possible values are Avro, Csv and Json. + type: string + type: object + storageAccountName: + description: The name of the Storage Account that has the blob + container with reference data. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + storageContainerNameRef: + description: Reference to a Container in storage to populate storageContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerNameSelector: + description: Selector for a Container in storage to populate storageContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dateFormat is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dateFormat) + || (has(self.initProvider) && has(self.initProvider.dateFormat))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.pathPattern is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.pathPattern) + || (has(self.initProvider) && has(self.initProvider.pathPattern))' + - message: spec.forProvider.serialization is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serialization) + || (has(self.initProvider) && has(self.initProvider.serialization))' + - message: spec.forProvider.timeFormat is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timeFormat) + || (has(self.initProvider) && has(self.initProvider.timeFormat))' + status: + description: ReferenceInputBlobStatus defines the observed state of ReferenceInputBlob. + properties: + atProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Analytics + Reference Input. Possible values are Msi and ConnectionString. + Defaults to ConnectionString. + type: string + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + id: + description: The ID of the Stream Analytics Reference Input Blob. + type: string + name: + description: The name of the Reference Input Blob. Changing this + forces a new resource to be created. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for the reference + data. Possible values are Avro, Csv and Json. + type: string + type: object + storageAccountName: + description: The name of the Storage Account that has the blob + container with reference data. + type: string + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_streaminputblobs.yaml b/package/crds/streamanalytics.azure.upbound.io_streaminputblobs.yaml index 2e33b94a1..64119a8a7 100644 --- a/package/crds/streamanalytics.azure.upbound.io_streaminputblobs.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_streaminputblobs.yaml @@ -1123,3 +1123,1102 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StreamInputBlob is the Schema for the StreamInputBlobs API. Manages + a Stream Analytics Stream Input Blob. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StreamInputBlobSpec defines the desired state of StreamInputBlob + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + name: + description: The name of the Stream Input Blob. Changing this + forces a new resource to be created. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + storageAccountKeySecretRef: + description: The Access Key which should be used to connect to + this Storage Account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: The name of the Storage Account. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + storageContainerNameRef: + description: Reference to a Container in storage to populate storageContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerNameSelector: + description: Selector for a Container in storage to populate storageContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + name: + description: The name of the Stream Input Blob. Changing this + forces a new resource to be created. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + storageAccountName: + description: The name of the Storage Account. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + storageContainerNameRef: + description: Reference to a Container in storage to populate storageContainerName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageContainerNameSelector: + description: Selector for a Container in storage to populate storageContainerName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.dateFormat is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.dateFormat) + || (has(self.initProvider) && has(self.initProvider.dateFormat))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.pathPattern is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.pathPattern) + || (has(self.initProvider) && has(self.initProvider.pathPattern))' + - message: spec.forProvider.serialization is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serialization) + || (has(self.initProvider) && has(self.initProvider.serialization))' + - message: spec.forProvider.storageAccountKeySecretRef is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageAccountKeySecretRef)' + - message: spec.forProvider.timeFormat is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.timeFormat) + || (has(self.initProvider) && has(self.initProvider.timeFormat))' + status: + description: StreamInputBlobStatus defines the observed state of StreamInputBlob. + properties: + atProvider: + properties: + dateFormat: + description: The date format. Wherever {date} appears in path_pattern, + the value of this property is used as the date format instead. + type: string + id: + description: The ID of the Stream Analytics Stream Input Blob. + type: string + name: + description: The name of the Stream Input Blob. Changing this + forces a new resource to be created. + type: string + pathPattern: + description: The blob path pattern. Not a regular expression. + It represents a pattern against which blob names will be matched + to determine whether or not they should be included as input + or output to the job. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + storageAccountName: + description: The name of the Storage Account. + type: string + storageContainerName: + description: The name of the Container within the Storage Account. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + timeFormat: + description: The time format. Wherever {time} appears in path_pattern, + the value of this property is used as the time format instead. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_streaminputeventhubs.yaml b/package/crds/streamanalytics.azure.upbound.io_streaminputeventhubs.yaml index 92d602aef..7fd3700e8 100644 --- a/package/crds/streamanalytics.azure.upbound.io_streaminputeventhubs.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_streaminputeventhubs.yaml @@ -1282,3 +1282,1261 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StreamInputEventHub is the Schema for the StreamInputEventHubs + API. Manages a Stream Analytics Stream Input EventHub. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StreamInputEventHubSpec defines the desired state of StreamInputEventHub + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + eventhubConsumerGroupName: + description: The name of an Event Hub Consumer Group that should + be used to read events from the Event Hub. Specifying distinct + consumer group names for multiple inputs allows each of those + inputs to receive the same events from the Event Hub. If not + set the input will use the Event Hub's default consumer group. + type: string + eventhubConsumerGroupNameRef: + description: Reference to a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubConsumerGroupNameSelector: + description: Selector for a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventhubName: + description: The name of the Event Hub. + type: string + eventhubNameRef: + description: Reference to a EventHub in eventhub to populate eventhubName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNameSelector: + description: Selector for a EventHub in eventhub to populate eventhubName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the Stream Input EventHub. Changing this + forces a new resource to be created. + type: string + partitionKey: + description: The property the input Event Hub has been partitioned + by. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + servicebusNamespaceRef: + description: Reference to a EventHubNamespace in eventhub to populate + servicebusNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicebusNamespaceSelector: + description: Selector for a EventHubNamespace in eventhub to populate + servicebusNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedAccessPolicyKeySecretRef: + description: The shared access policy key for the specified shared + access policy. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + eventhubConsumerGroupName: + description: The name of an Event Hub Consumer Group that should + be used to read events from the Event Hub. Specifying distinct + consumer group names for multiple inputs allows each of those + inputs to receive the same events from the Event Hub. If not + set the input will use the Event Hub's default consumer group. + type: string + eventhubConsumerGroupNameRef: + description: Reference to a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubConsumerGroupNameSelector: + description: Selector for a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + eventhubName: + description: The name of the Event Hub. + type: string + eventhubNameRef: + description: Reference to a EventHub in eventhub to populate eventhubName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubNameSelector: + description: Selector for a EventHub in eventhub to populate eventhubName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the Stream Input EventHub. Changing this + forces a new resource to be created. + type: string + partitionKey: + description: The property the input Event Hub has been partitioned + by. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + servicebusNamespaceRef: + description: Reference to a EventHubNamespace in eventhub to populate + servicebusNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicebusNamespaceSelector: + description: Selector for a EventHubNamespace in eventhub to populate + servicebusNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.serialization is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serialization) + || (has(self.initProvider) && has(self.initProvider.serialization))' + status: + description: StreamInputEventHubStatus defines the observed state of StreamInputEventHub. + properties: + atProvider: + properties: + authenticationMode: + description: The authentication mode for the Stream Output. Possible + values are Msi and ConnectionString. Defaults to ConnectionString. + type: string + eventhubConsumerGroupName: + description: The name of an Event Hub Consumer Group that should + be used to read events from the Event Hub. Specifying distinct + consumer group names for multiple inputs allows each of those + inputs to receive the same events from the Event Hub. If not + set the input will use the Event Hub's default consumer group. + type: string + eventhubName: + description: The name of the Event Hub. + type: string + id: + description: The ID of the Stream Analytics Stream Input EventHub. + type: string + name: + description: The name of the Stream Input EventHub. Changing this + forces a new resource to be created. + type: string + partitionKey: + description: The property the input Event Hub has been partitioned + by. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + servicebusNamespace: + description: The namespace that is associated with the desired + Event Hub, Service Bus Queue, Service Bus Topic, etc. + type: string + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/streamanalytics.azure.upbound.io_streaminputiothubs.yaml b/package/crds/streamanalytics.azure.upbound.io_streaminputiothubs.yaml index 047326cb8..efd199407 100644 --- a/package/crds/streamanalytics.azure.upbound.io_streaminputiothubs.yaml +++ b/package/crds/streamanalytics.azure.upbound.io_streaminputiothubs.yaml @@ -1115,3 +1115,1094 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StreamInputIOTHub is the Schema for the StreamInputIOTHubs API. + Manages a Stream Analytics Stream Input IoTHub. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StreamInputIOTHubSpec defines the desired state of StreamInputIOTHub + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + endpoint: + description: The IoT Hub endpoint to connect to (ie. messages/events, + messages/operationsMonitoringEvents, etc.). + type: string + eventhubConsumerGroupName: + description: The name of an Event Hub Consumer Group that should + be used to read events from the Event Hub. Specifying distinct + consumer group names for multiple inputs allows each of those + inputs to receive the same events from the Event Hub. + type: string + eventhubConsumerGroupNameRef: + description: Reference to a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubConsumerGroupNameSelector: + description: Selector for a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iothubNamespace: + description: The name or the URI of the IoT Hub. + type: string + iothubNamespaceRef: + description: Reference to a IOTHub in devices to populate iothubNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iothubNamespaceSelector: + description: Selector for a IOTHub in devices to populate iothubNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the Stream Input IoTHub. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + sharedAccessPolicyKeySecretRef: + description: The shared access policy key for the specified shared + access policy. Changing this forces a new resource to be created. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + endpoint: + description: The IoT Hub endpoint to connect to (ie. messages/events, + messages/operationsMonitoringEvents, etc.). + type: string + eventhubConsumerGroupName: + description: The name of an Event Hub Consumer Group that should + be used to read events from the Event Hub. Specifying distinct + consumer group names for multiple inputs allows each of those + inputs to receive the same events from the Event Hub. + type: string + eventhubConsumerGroupNameRef: + description: Reference to a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + eventhubConsumerGroupNameSelector: + description: Selector for a ConsumerGroup in eventhub to populate + eventhubConsumerGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + iothubNamespace: + description: The name or the URI of the IoT Hub. + type: string + iothubNamespaceRef: + description: Reference to a IOTHub in devices to populate iothubNamespace. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + iothubNamespaceSelector: + description: Selector for a IOTHub in devices to populate iothubNamespace. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + name: + description: The name of the Stream Input IoTHub. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + streamAnalyticsJobNameRef: + description: Reference to a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + streamAnalyticsJobNameSelector: + description: Selector for a Job in streamanalytics to populate + streamAnalyticsJobName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.endpoint is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.endpoint) + || (has(self.initProvider) && has(self.initProvider.endpoint))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.serialization is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.serialization) + || (has(self.initProvider) && has(self.initProvider.serialization))' + - message: spec.forProvider.sharedAccessPolicyKeySecretRef is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sharedAccessPolicyKeySecretRef)' + - message: spec.forProvider.sharedAccessPolicyName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sharedAccessPolicyName) + || (has(self.initProvider) && has(self.initProvider.sharedAccessPolicyName))' + status: + description: StreamInputIOTHubStatus defines the observed state of StreamInputIOTHub. + properties: + atProvider: + properties: + endpoint: + description: The IoT Hub endpoint to connect to (ie. messages/events, + messages/operationsMonitoringEvents, etc.). + type: string + eventhubConsumerGroupName: + description: The name of an Event Hub Consumer Group that should + be used to read events from the Event Hub. Specifying distinct + consumer group names for multiple inputs allows each of those + inputs to receive the same events from the Event Hub. + type: string + id: + description: The ID of the Stream Analytics Stream Input IoTHub. + type: string + iothubNamespace: + description: The name or the URI of the IoT Hub. + type: string + name: + description: The name of the Stream Input IoTHub. Changing this + forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Stream Analytics + Job exists. Changing this forces a new resource to be created. + type: string + serialization: + description: A serialization block as defined below. + properties: + encoding: + description: The encoding of the incoming data in the case + of input and the encoding of outgoing data in the case of + output. It currently can only be set to UTF8. + type: string + fieldDelimiter: + description: "The delimiter that will be used to separate + comma-separated value (CSV) records. Possible values are + \ (space), , (comma), \t (tab), | (pipe) and ;." + type: string + type: + description: The serialization format used for incoming data + streams. Possible values are Avro, Csv and Json. + type: string + type: object + sharedAccessPolicyName: + description: The shared access policy name for the Event Hub, + Service Bus Queue, Service Bus Topic, etc. + type: string + streamAnalyticsJobName: + description: The name of the Stream Analytics Job. Changing this + forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/synapse.azure.upbound.io_linkedservices.yaml b/package/crds/synapse.azure.upbound.io_linkedservices.yaml index fcb1a825b..7e54ea465 100644 --- a/package/crds/synapse.azure.upbound.io_linkedservices.yaml +++ b/package/crds/synapse.azure.upbound.io_linkedservices.yaml @@ -739,3 +739,718 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinkedService is the Schema for the LinkedServices API. Manages + a Linked Service (connection) between a resource and Azure Synapse. This + is a generic resource that supports all different Linked Service Types. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinkedServiceSpec defines the desired state of LinkedService + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Synapse Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Synapse Linked Service. + items: + type: string + type: array + description: + description: The description for the Synapse Linked Service. + type: string + integrationRuntime: + description: A integration_runtime block as defined below. + properties: + name: + description: The integration runtime reference to associate + with the Synapse Linked Service. + type: string + nameRef: + description: Reference to a IntegrationRuntimeAzure in synapse + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a IntegrationRuntimeAzure in synapse + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the integration + runtime. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Synapse + Linked Service. + type: object + x-kubernetes-map-type: granular + synapseWorkspaceId: + description: The Synapse Workspace ID in which to associate the + Linked Service with. Changing this forces a new Synapse Linked + Service to be created. + type: string + synapseWorkspaceIdRef: + description: Reference to a Workspace in synapse to populate synapseWorkspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + synapseWorkspaceIdSelector: + description: Selector for a Workspace in synapse to populate synapseWorkspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: + description: |- + The type of data stores that will be connected to Synapse. Valid Values include AmazonMWS, AmazonRdsForOracle, AmazonRdsForSqlServer, AmazonRedshift, AmazonS3, AzureBatch. Changing this forces a new resource to be created. + AzureBlobFS, AzureBlobStorage, AzureDataExplorer, AzureDataLakeAnalytics, AzureDataLakeStore, AzureDatabricks, AzureDatabricksDeltaLake, AzureFileStorage, AzureFunction, + AzureKeyVault, AzureML, AzureMLService, AzureMariaDB, AzureMySql, AzurePostgreSql, AzureSqlDW, AzureSqlDatabase, AzureSqlMI, AzureSearch, AzureStorage, + AzureTableStorage, Cassandra, CommonDataServiceForApps, Concur, CosmosDb, CosmosDbMongoDbApi, Couchbase, CustomDataSource, Db2, Drill, + Dynamics, DynamicsAX, DynamicsCrm, Eloqua, FileServer, FtpServer, GoogleAdWords, GoogleBigQuery, GoogleCloudStorage, Greenplum, HBase, HDInsight, + HDInsightOnDemand, HttpServer, Hdfs, Hive, Hubspot, Impala, Informix, Jira, LinkedService, Magento, MariaDB, Marketo, MicrosoftAccess, MongoDb, + MongoDbAtlas, MongoDbV2, MySql, Netezza, OData, Odbc, Office365, Oracle, OracleServiceCloud, Paypal, Phoenix, PostgreSql, Presto, QuickBooks, + Responsys, RestService, SqlServer, Salesforce, SalesforceMarketingCloud, SalesforceServiceCloud, SapBW, SapCloudForCustomer, SapEcc, SapHana, SapOpenHub, + SapTable, ServiceNow, Sftp, SharePointOnlineList, Shopify, Snowflake, Spark, Square, Sybase, Teradata, Vertica, Web, Xero, Zoho. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Synapse Linked Service. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Synapse Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Synapse Linked Service. + items: + type: string + type: array + description: + description: The description for the Synapse Linked Service. + type: string + integrationRuntime: + description: A integration_runtime block as defined below. + properties: + name: + description: The integration runtime reference to associate + with the Synapse Linked Service. + type: string + nameRef: + description: Reference to a IntegrationRuntimeAzure in synapse + to populate name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a IntegrationRuntimeAzure in synapse + to populate name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the integration + runtime. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Synapse + Linked Service. + type: object + x-kubernetes-map-type: granular + type: + description: |- + The type of data stores that will be connected to Synapse. Valid Values include AmazonMWS, AmazonRdsForOracle, AmazonRdsForSqlServer, AmazonRedshift, AmazonS3, AzureBatch. Changing this forces a new resource to be created. + AzureBlobFS, AzureBlobStorage, AzureDataExplorer, AzureDataLakeAnalytics, AzureDataLakeStore, AzureDatabricks, AzureDatabricksDeltaLake, AzureFileStorage, AzureFunction, + AzureKeyVault, AzureML, AzureMLService, AzureMariaDB, AzureMySql, AzurePostgreSql, AzureSqlDW, AzureSqlDatabase, AzureSqlMI, AzureSearch, AzureStorage, + AzureTableStorage, Cassandra, CommonDataServiceForApps, Concur, CosmosDb, CosmosDbMongoDbApi, Couchbase, CustomDataSource, Db2, Drill, + Dynamics, DynamicsAX, DynamicsCrm, Eloqua, FileServer, FtpServer, GoogleAdWords, GoogleBigQuery, GoogleCloudStorage, Greenplum, HBase, HDInsight, + HDInsightOnDemand, HttpServer, Hdfs, Hive, Hubspot, Impala, Informix, Jira, LinkedService, Magento, MariaDB, Marketo, MicrosoftAccess, MongoDb, + MongoDbAtlas, MongoDbV2, MySql, Netezza, OData, Odbc, Office365, Oracle, OracleServiceCloud, Paypal, Phoenix, PostgreSql, Presto, QuickBooks, + Responsys, RestService, SqlServer, Salesforce, SalesforceMarketingCloud, SalesforceServiceCloud, SapBW, SapCloudForCustomer, SapEcc, SapHana, SapOpenHub, + SapTable, ServiceNow, Sftp, SharePointOnlineList, Shopify, Snowflake, Spark, Square, Sybase, Teradata, Vertica, Web, Xero, Zoho. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Synapse Linked Service. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.type is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.type) + || (has(self.initProvider) && has(self.initProvider.type))' + - message: spec.forProvider.typePropertiesJson is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.typePropertiesJson) + || (has(self.initProvider) && has(self.initProvider.typePropertiesJson))' + status: + description: LinkedServiceStatus defines the observed state of LinkedService. + properties: + atProvider: + properties: + additionalProperties: + additionalProperties: + type: string + description: A map of additional properties to associate with + the Synapse Linked Service. + type: object + x-kubernetes-map-type: granular + annotations: + description: List of tags that can be used for describing the + Synapse Linked Service. + items: + type: string + type: array + description: + description: The description for the Synapse Linked Service. + type: string + id: + description: The ID of the Synapse Linked Service. + type: string + integrationRuntime: + description: A integration_runtime block as defined below. + properties: + name: + description: The integration runtime reference to associate + with the Synapse Linked Service. + type: string + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the integration + runtime. + type: object + x-kubernetes-map-type: granular + type: object + parameters: + additionalProperties: + type: string + description: A map of parameters to associate with the Synapse + Linked Service. + type: object + x-kubernetes-map-type: granular + synapseWorkspaceId: + description: The Synapse Workspace ID in which to associate the + Linked Service with. Changing this forces a new Synapse Linked + Service to be created. + type: string + type: + description: |- + The type of data stores that will be connected to Synapse. Valid Values include AmazonMWS, AmazonRdsForOracle, AmazonRdsForSqlServer, AmazonRedshift, AmazonS3, AzureBatch. Changing this forces a new resource to be created. + AzureBlobFS, AzureBlobStorage, AzureDataExplorer, AzureDataLakeAnalytics, AzureDataLakeStore, AzureDatabricks, AzureDatabricksDeltaLake, AzureFileStorage, AzureFunction, + AzureKeyVault, AzureML, AzureMLService, AzureMariaDB, AzureMySql, AzurePostgreSql, AzureSqlDW, AzureSqlDatabase, AzureSqlMI, AzureSearch, AzureStorage, + AzureTableStorage, Cassandra, CommonDataServiceForApps, Concur, CosmosDb, CosmosDbMongoDbApi, Couchbase, CustomDataSource, Db2, Drill, + Dynamics, DynamicsAX, DynamicsCrm, Eloqua, FileServer, FtpServer, GoogleAdWords, GoogleBigQuery, GoogleCloudStorage, Greenplum, HBase, HDInsight, + HDInsightOnDemand, HttpServer, Hdfs, Hive, Hubspot, Impala, Informix, Jira, LinkedService, Magento, MariaDB, Marketo, MicrosoftAccess, MongoDb, + MongoDbAtlas, MongoDbV2, MySql, Netezza, OData, Odbc, Office365, Oracle, OracleServiceCloud, Paypal, Phoenix, PostgreSql, Presto, QuickBooks, + Responsys, RestService, SqlServer, Salesforce, SalesforceMarketingCloud, SalesforceServiceCloud, SapBW, SapCloudForCustomer, SapEcc, SapHana, SapOpenHub, + SapTable, ServiceNow, Sftp, SharePointOnlineList, Shopify, Snowflake, Spark, Square, Sybase, Teradata, Vertica, Web, Xero, Zoho. + type: string + typePropertiesJson: + description: A JSON object that contains the properties of the + Synapse Linked Service. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/synapse.azure.upbound.io_sparkpools.yaml b/package/crds/synapse.azure.upbound.io_sparkpools.yaml index 67478775d..6e3f126e8 100644 --- a/package/crds/synapse.azure.upbound.io_sparkpools.yaml +++ b/package/crds/synapse.azure.upbound.io_sparkpools.yaml @@ -730,3 +730,691 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SparkPool is the Schema for the SparkPools API. Manages a Synapse + Spark Pool. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SparkPoolSpec defines the desired state of SparkPool + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + autoPause: + description: An auto_pause block as defined below. + properties: + delayInMinutes: + description: Number of minutes of idle time before the Spark + Pool is automatically paused. Must be between 5 and 10080. + type: number + type: object + autoScale: + description: An auto_scale block as defined below. Exactly one + of node_count or auto_scale must be specified. + properties: + maxNodeCount: + description: The maximum number of nodes the Spark Pool can + support. Must be between 3 and 200. + type: number + minNodeCount: + description: The minimum number of nodes the Spark Pool can + support. Must be between 3 and 200. + type: number + type: object + cacheSize: + description: The cache size in the Spark Pool. + type: number + computeIsolationEnabled: + description: Indicates whether compute isolation is enabled or + not. Defaults to false. + type: boolean + dynamicExecutorAllocationEnabled: + description: Indicates whether Dynamic Executor Allocation is + enabled or not. Defaults to false. + type: boolean + libraryRequirement: + description: A library_requirement block as defined below. + properties: + content: + description: The content of library requirements. + type: string + filename: + description: The name of the library requirements file. + type: string + type: object + maxExecutors: + description: The maximum number of executors allocated only when + dynamic_executor_allocation_enabled set to true. + type: number + minExecutors: + description: The minimum number of executors allocated only when + dynamic_executor_allocation_enabled set to true. + type: number + nodeCount: + description: The number of nodes in the Spark Pool. Exactly one + of node_count or auto_scale must be specified. + type: number + nodeSize: + description: The level of node in the Spark Pool. Possible values + are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge. + type: string + nodeSizeFamily: + description: The kind of nodes that the Spark Pool provides. Possible + values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, + MemoryOptimized, and None. + type: string + sessionLevelPackagesEnabled: + description: Indicates whether session level packages are enabled + or not. Defaults to false. + type: boolean + sparkConfig: + description: A spark_config block as defined below. + properties: + content: + description: The contents of a spark configuration. + type: string + filename: + description: The name of the file where the spark configuration + content will be stored. + type: string + type: object + sparkEventsFolder: + description: The Spark events folder. Defaults to /events. + type: string + sparkLogFolder: + description: The default folder where Spark logs will be written. + Defaults to /logs. + type: string + sparkVersion: + description: The Apache Spark version. Possible values are 2.4 + , 3.1 , 3.2 and 3.3. Defaults to 2.4. + type: string + synapseWorkspaceId: + description: The ID of the Synapse Workspace where the Synapse + Spark Pool should exist. Changing this forces a new Synapse + Spark Pool to be created. + type: string + synapseWorkspaceIdRef: + description: Reference to a Workspace in synapse to populate synapseWorkspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + synapseWorkspaceIdSelector: + description: Selector for a Workspace in synapse to populate synapseWorkspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse Spark Pool. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + autoPause: + description: An auto_pause block as defined below. + properties: + delayInMinutes: + description: Number of minutes of idle time before the Spark + Pool is automatically paused. Must be between 5 and 10080. + type: number + type: object + autoScale: + description: An auto_scale block as defined below. Exactly one + of node_count or auto_scale must be specified. + properties: + maxNodeCount: + description: The maximum number of nodes the Spark Pool can + support. Must be between 3 and 200. + type: number + minNodeCount: + description: The minimum number of nodes the Spark Pool can + support. Must be between 3 and 200. + type: number + type: object + cacheSize: + description: The cache size in the Spark Pool. + type: number + computeIsolationEnabled: + description: Indicates whether compute isolation is enabled or + not. Defaults to false. + type: boolean + dynamicExecutorAllocationEnabled: + description: Indicates whether Dynamic Executor Allocation is + enabled or not. Defaults to false. + type: boolean + libraryRequirement: + description: A library_requirement block as defined below. + properties: + content: + description: The content of library requirements. + type: string + filename: + description: The name of the library requirements file. + type: string + type: object + maxExecutors: + description: The maximum number of executors allocated only when + dynamic_executor_allocation_enabled set to true. + type: number + minExecutors: + description: The minimum number of executors allocated only when + dynamic_executor_allocation_enabled set to true. + type: number + nodeCount: + description: The number of nodes in the Spark Pool. Exactly one + of node_count or auto_scale must be specified. + type: number + nodeSize: + description: The level of node in the Spark Pool. Possible values + are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge. + type: string + nodeSizeFamily: + description: The kind of nodes that the Spark Pool provides. Possible + values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, + MemoryOptimized, and None. + type: string + sessionLevelPackagesEnabled: + description: Indicates whether session level packages are enabled + or not. Defaults to false. + type: boolean + sparkConfig: + description: A spark_config block as defined below. + properties: + content: + description: The contents of a spark configuration. + type: string + filename: + description: The name of the file where the spark configuration + content will be stored. + type: string + type: object + sparkEventsFolder: + description: The Spark events folder. Defaults to /events. + type: string + sparkLogFolder: + description: The default folder where Spark logs will be written. + Defaults to /logs. + type: string + sparkVersion: + description: The Apache Spark version. Possible values are 2.4 + , 3.1 , 3.2 and 3.3. Defaults to 2.4. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse Spark Pool. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.nodeSize is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nodeSize) + || (has(self.initProvider) && has(self.initProvider.nodeSize))' + - message: spec.forProvider.nodeSizeFamily is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.nodeSizeFamily) + || (has(self.initProvider) && has(self.initProvider.nodeSizeFamily))' + status: + description: SparkPoolStatus defines the observed state of SparkPool. + properties: + atProvider: + properties: + autoPause: + description: An auto_pause block as defined below. + properties: + delayInMinutes: + description: Number of minutes of idle time before the Spark + Pool is automatically paused. Must be between 5 and 10080. + type: number + type: object + autoScale: + description: An auto_scale block as defined below. Exactly one + of node_count or auto_scale must be specified. + properties: + maxNodeCount: + description: The maximum number of nodes the Spark Pool can + support. Must be between 3 and 200. + type: number + minNodeCount: + description: The minimum number of nodes the Spark Pool can + support. Must be between 3 and 200. + type: number + type: object + cacheSize: + description: The cache size in the Spark Pool. + type: number + computeIsolationEnabled: + description: Indicates whether compute isolation is enabled or + not. Defaults to false. + type: boolean + dynamicExecutorAllocationEnabled: + description: Indicates whether Dynamic Executor Allocation is + enabled or not. Defaults to false. + type: boolean + id: + description: The ID of the Synapse Spark Pool. + type: string + libraryRequirement: + description: A library_requirement block as defined below. + properties: + content: + description: The content of library requirements. + type: string + filename: + description: The name of the library requirements file. + type: string + type: object + maxExecutors: + description: The maximum number of executors allocated only when + dynamic_executor_allocation_enabled set to true. + type: number + minExecutors: + description: The minimum number of executors allocated only when + dynamic_executor_allocation_enabled set to true. + type: number + nodeCount: + description: The number of nodes in the Spark Pool. Exactly one + of node_count or auto_scale must be specified. + type: number + nodeSize: + description: The level of node in the Spark Pool. Possible values + are Small, Medium, Large, None, XLarge, XXLarge and XXXLarge. + type: string + nodeSizeFamily: + description: The kind of nodes that the Spark Pool provides. Possible + values are HardwareAcceleratedFPGA, HardwareAcceleratedGPU, + MemoryOptimized, and None. + type: string + sessionLevelPackagesEnabled: + description: Indicates whether session level packages are enabled + or not. Defaults to false. + type: boolean + sparkConfig: + description: A spark_config block as defined below. + properties: + content: + description: The contents of a spark configuration. + type: string + filename: + description: The name of the file where the spark configuration + content will be stored. + type: string + type: object + sparkEventsFolder: + description: The Spark events folder. Defaults to /events. + type: string + sparkLogFolder: + description: The default folder where Spark logs will be written. + Defaults to /logs. + type: string + sparkVersion: + description: The Apache Spark version. Possible values are 2.4 + , 3.1 , 3.2 and 3.3. Defaults to 2.4. + type: string + synapseWorkspaceId: + description: The ID of the Synapse Workspace where the Synapse + Spark Pool should exist. Changing this forces a new Synapse + Spark Pool to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse Spark Pool. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/synapse.azure.upbound.io_sqlpools.yaml b/package/crds/synapse.azure.upbound.io_sqlpools.yaml index 6d0e4f922..d68183a97 100644 --- a/package/crds/synapse.azure.upbound.io_sqlpools.yaml +++ b/package/crds/synapse.azure.upbound.io_sqlpools.yaml @@ -597,3 +597,576 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SQLPool is the Schema for the SQLPools API. Manages a Synapse + SQL Pool. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SQLPoolSpec defines the desired state of SQLPool + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + collation: + description: The name of the collation to use with this pool, + only applicable when create_mode is set to Default. Azure default + is SQL_LATIN1_GENERAL_CP1_CI_AS. Changing this forces a new + Synapse SQL Pool to be created. + type: string + createMode: + description: 'Specifies how to create the SQL Pool. Valid values + are: Default, Recovery or PointInTimeRestore. Must be Default + to create a new database. Defaults to Default. Changing this + forces a new Synapse SQL Pool to be created.' + type: string + dataEncrypted: + description: Is transparent data encryption enabled? + type: boolean + geoBackupPolicyEnabled: + description: Is geo-backup policy enabled? Possible values include + true or false. Defaults to true. + type: boolean + recoveryDatabaseId: + description: The ID of the Synapse SQL Pool or SQL Database which + is to back up, only applicable when create_mode is set to Recovery. + Changing this forces a new Synapse SQL Pool to be created. + type: string + restore: + description: A restore block as defined below. Only applicable + when create_mode is set to PointInTimeRestore. Changing this + forces a new Synapse SQL Pool to be created. + properties: + pointInTime: + description: Specifies the Snapshot time to restore formatted + as an RFC3339 date string. Changing this forces a new Synapse + SQL Pool to be created. + type: string + sourceDatabaseId: + description: The ID of the Synapse SQL Pool or SQL Database + which is to restore. Changing this forces a new Synapse + SQL Pool to be created. + type: string + type: object + skuName: + description: Specifies the SKU Name for this Synapse SQL Pool. + Possible values are DW100c, DW200c, DW300c, DW400c, DW500c, + DW1000c, DW1500c, DW2000c, DW2500c, DW3000c, DW5000c, DW6000c, + DW7500c, DW10000c, DW15000c or DW30000c. + type: string + storageAccountType: + description: The storage account type that will be used to store + backups for this Synapse SQL Pool. Possible values are LRS or + GRS. Changing this forces a new Synapse SQL Pool to be created. + Defaults to GRS. + type: string + synapseWorkspaceId: + description: The ID of Synapse Workspace within which this SQL + Pool should be created. Changing this forces a new Synapse SQL + Pool to be created. + type: string + synapseWorkspaceIdRef: + description: Reference to a Workspace in synapse to populate synapseWorkspaceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + synapseWorkspaceIdSelector: + description: Selector for a Workspace in synapse to populate synapseWorkspaceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse SQL Pool. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + collation: + description: The name of the collation to use with this pool, + only applicable when create_mode is set to Default. Azure default + is SQL_LATIN1_GENERAL_CP1_CI_AS. Changing this forces a new + Synapse SQL Pool to be created. + type: string + createMode: + description: 'Specifies how to create the SQL Pool. Valid values + are: Default, Recovery or PointInTimeRestore. Must be Default + to create a new database. Defaults to Default. Changing this + forces a new Synapse SQL Pool to be created.' + type: string + dataEncrypted: + description: Is transparent data encryption enabled? + type: boolean + geoBackupPolicyEnabled: + description: Is geo-backup policy enabled? Possible values include + true or false. Defaults to true. + type: boolean + recoveryDatabaseId: + description: The ID of the Synapse SQL Pool or SQL Database which + is to back up, only applicable when create_mode is set to Recovery. + Changing this forces a new Synapse SQL Pool to be created. + type: string + restore: + description: A restore block as defined below. Only applicable + when create_mode is set to PointInTimeRestore. Changing this + forces a new Synapse SQL Pool to be created. + properties: + pointInTime: + description: Specifies the Snapshot time to restore formatted + as an RFC3339 date string. Changing this forces a new Synapse + SQL Pool to be created. + type: string + sourceDatabaseId: + description: The ID of the Synapse SQL Pool or SQL Database + which is to restore. Changing this forces a new Synapse + SQL Pool to be created. + type: string + type: object + skuName: + description: Specifies the SKU Name for this Synapse SQL Pool. + Possible values are DW100c, DW200c, DW300c, DW400c, DW500c, + DW1000c, DW1500c, DW2000c, DW2500c, DW3000c, DW5000c, DW6000c, + DW7500c, DW10000c, DW15000c or DW30000c. + type: string + storageAccountType: + description: The storage account type that will be used to store + backups for this Synapse SQL Pool. Possible values are LRS or + GRS. Changing this forces a new Synapse SQL Pool to be created. + Defaults to GRS. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse SQL Pool. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + status: + description: SQLPoolStatus defines the observed state of SQLPool. + properties: + atProvider: + properties: + collation: + description: The name of the collation to use with this pool, + only applicable when create_mode is set to Default. Azure default + is SQL_LATIN1_GENERAL_CP1_CI_AS. Changing this forces a new + Synapse SQL Pool to be created. + type: string + createMode: + description: 'Specifies how to create the SQL Pool. Valid values + are: Default, Recovery or PointInTimeRestore. Must be Default + to create a new database. Defaults to Default. Changing this + forces a new Synapse SQL Pool to be created.' + type: string + dataEncrypted: + description: Is transparent data encryption enabled? + type: boolean + geoBackupPolicyEnabled: + description: Is geo-backup policy enabled? Possible values include + true or false. Defaults to true. + type: boolean + id: + description: The ID of the Synapse SQL Pool. + type: string + recoveryDatabaseId: + description: The ID of the Synapse SQL Pool or SQL Database which + is to back up, only applicable when create_mode is set to Recovery. + Changing this forces a new Synapse SQL Pool to be created. + type: string + restore: + description: A restore block as defined below. Only applicable + when create_mode is set to PointInTimeRestore. Changing this + forces a new Synapse SQL Pool to be created. + properties: + pointInTime: + description: Specifies the Snapshot time to restore formatted + as an RFC3339 date string. Changing this forces a new Synapse + SQL Pool to be created. + type: string + sourceDatabaseId: + description: The ID of the Synapse SQL Pool or SQL Database + which is to restore. Changing this forces a new Synapse + SQL Pool to be created. + type: string + type: object + skuName: + description: Specifies the SKU Name for this Synapse SQL Pool. + Possible values are DW100c, DW200c, DW300c, DW400c, DW500c, + DW1000c, DW1500c, DW2000c, DW2500c, DW3000c, DW5000c, DW6000c, + DW7500c, DW10000c, DW15000c or DW30000c. + type: string + storageAccountType: + description: The storage account type that will be used to store + backups for this Synapse SQL Pool. Possible values are LRS or + GRS. Changing this forces a new Synapse SQL Pool to be created. + Defaults to GRS. + type: string + synapseWorkspaceId: + description: The ID of Synapse Workspace within which this SQL + Pool should be created. Changing this forces a new Synapse SQL + Pool to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse SQL Pool. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/synapse.azure.upbound.io_workspaces.yaml b/package/crds/synapse.azure.upbound.io_workspaces.yaml index b22579414..f2c07d573 100644 --- a/package/crds/synapse.azure.upbound.io_workspaces.yaml +++ b/package/crds/synapse.azure.upbound.io_workspaces.yaml @@ -1622,3 +1622,1578 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Workspace is the Schema for the Workspaces API. Manages a Synapse + Workspace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkspaceSpec defines the desired state of Workspace + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + aadAdmin: + description: An aad_admin block as defined below. + items: + properties: + login: + description: The login name of the Azure AD Administrator + of this Synapse Workspace. + type: string + objectId: + description: The object id of the Azure AD Administrator + of this Synapse Workspace. + type: string + tenantId: + description: The tenant id of the Azure AD Administrator + of this Synapse Workspace. + type: string + type: object + type: array + azureDevopsRepo: + description: An azure_devops_repo block as defined below. + properties: + accountName: + description: Specifies the Azure DevOps account name. + type: string + branchName: + description: Specifies the collaboration branch of the repository + to get code from. + type: string + lastCommitId: + description: The last commit ID. + type: string + projectName: + description: Specifies the name of the Azure DevOps project. + type: string + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + tenantId: + description: the ID of the tenant for the Azure DevOps account. + type: string + type: object + azureadAuthenticationOnly: + description: Is Azure Active Directory Authentication the only + way to authenticate with resources inside this synapse Workspace. + Defaults to false. + type: boolean + computeSubnetId: + description: Subnet ID used for computes in workspace Changing + this forces a new resource to be created. + type: string + computeSubnetIdRef: + description: Reference to a Subnet in network to populate computeSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + computeSubnetIdSelector: + description: Selector for a Subnet in network to populate computeSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customerManagedKey: + description: A customer_managed_key block as defined below. + properties: + keyName: + description: An identifier for the key. Name needs to match + the name of the key used with the azurerm_synapse_workspace_key + resource. Defaults to "cmk" if not specified. + type: string + keyVersionlessId: + description: The Azure Key Vault Key Versionless ID to be + used as the Customer Managed Key (CMK) for double encryption + (e.g. https://example-keyvault.vault.azure.net/type/cmk/). + type: string + keyVersionlessIdRef: + description: Reference to a Key in keyvault to populate keyVersionlessId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVersionlessIdSelector: + description: Selector for a Key in keyvault to populate keyVersionlessId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userAssignedIdentityId: + description: The User Assigned Identity ID to be used for + accessing the Customer Managed Key for encryption. + type: string + type: object + dataExfiltrationProtectionEnabled: + description: Is data exfiltration protection enabled in this workspace? + If set to true, managed_virtual_network_enabled must also be + set to true. Changing this forces a new resource to be created. + type: boolean + githubRepo: + description: A github_repo block as defined below. + properties: + accountName: + description: Specifies the GitHub account name. + type: string + branchName: + description: Specifies the collaboration branch of the repository + to get code from. + type: string + gitUrl: + description: 'Specifies the GitHub Enterprise host name. For + example: https://github.mydomain.com.' + type: string + lastCommitId: + description: The last commit ID. + type: string + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Synapse Workspace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be associated with this Synapse Workspace. Possible + values are SystemAssigned, UserAssigned and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + linkingAllowedForAadTenantIds: + description: Allowed AAD Tenant Ids For Linking. + items: + type: string + type: array + location: + description: Specifies the Azure Region where the synapse Workspace + should exist. Changing this forces a new resource to be created. + type: string + managedResourceGroupName: + description: Workspace managed resource group. Changing this forces + a new resource to be created. + type: string + managedResourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedResourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + managedVirtualNetworkEnabled: + description: Is Virtual Network enabled for all computes in this + workspace? Changing this forces a new resource to be created. + type: boolean + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + Cognitive Account. Defaults to true. + type: boolean + purviewId: + description: The ID of purview account. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + synapse Workspace should exist. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sqlAadAdmin: + description: An sql_aad_admin block as defined below. + items: + properties: + login: + description: The login name of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + objectId: + description: The object id of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + tenantId: + description: The tenant id of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + type: object + type: array + sqlAdministratorLogin: + description: Specifies The login name of the SQL administrator. + Changing this forces a new resource to be created. If this is + not provided aad_admin or customer_managed_key must be provided. + type: string + sqlAdministratorLoginPasswordSecretRef: + description: The Password associated with the sql_administrator_login + for the SQL administrator. If this is not provided aad_admin + or customer_managed_key must be provided. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + sqlIdentityControlEnabled: + description: Are pipelines (running as workspace's system assigned + identity) allowed to access SQL pools? + type: boolean + storageDataLakeGen2FilesystemId: + description: Specifies the ID of storage data lake gen2 filesystem + resource. Changing this forces a new resource to be created. + type: string + storageDataLakeGen2FilesystemIdRef: + description: Reference to a DataLakeGen2FileSystem in storage + to populate storageDataLakeGen2FilesystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageDataLakeGen2FilesystemIdSelector: + description: Selector for a DataLakeGen2FileSystem in storage + to populate storageDataLakeGen2FilesystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse Workspace. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + aadAdmin: + description: An aad_admin block as defined below. + items: + properties: + login: + description: The login name of the Azure AD Administrator + of this Synapse Workspace. + type: string + objectId: + description: The object id of the Azure AD Administrator + of this Synapse Workspace. + type: string + tenantId: + description: The tenant id of the Azure AD Administrator + of this Synapse Workspace. + type: string + type: object + type: array + azureDevopsRepo: + description: An azure_devops_repo block as defined below. + properties: + accountName: + description: Specifies the Azure DevOps account name. + type: string + branchName: + description: Specifies the collaboration branch of the repository + to get code from. + type: string + lastCommitId: + description: The last commit ID. + type: string + projectName: + description: Specifies the name of the Azure DevOps project. + type: string + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + tenantId: + description: the ID of the tenant for the Azure DevOps account. + type: string + type: object + azureadAuthenticationOnly: + description: Is Azure Active Directory Authentication the only + way to authenticate with resources inside this synapse Workspace. + Defaults to false. + type: boolean + computeSubnetId: + description: Subnet ID used for computes in workspace Changing + this forces a new resource to be created. + type: string + computeSubnetIdRef: + description: Reference to a Subnet in network to populate computeSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + computeSubnetIdSelector: + description: Selector for a Subnet in network to populate computeSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + customerManagedKey: + description: A customer_managed_key block as defined below. + properties: + keyName: + description: An identifier for the key. Name needs to match + the name of the key used with the azurerm_synapse_workspace_key + resource. Defaults to "cmk" if not specified. + type: string + keyVersionlessId: + description: The Azure Key Vault Key Versionless ID to be + used as the Customer Managed Key (CMK) for double encryption + (e.g. https://example-keyvault.vault.azure.net/type/cmk/). + type: string + keyVersionlessIdRef: + description: Reference to a Key in keyvault to populate keyVersionlessId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + keyVersionlessIdSelector: + description: Selector for a Key in keyvault to populate keyVersionlessId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + userAssignedIdentityId: + description: The User Assigned Identity ID to be used for + accessing the Customer Managed Key for encryption. + type: string + type: object + dataExfiltrationProtectionEnabled: + description: Is data exfiltration protection enabled in this workspace? + If set to true, managed_virtual_network_enabled must also be + set to true. Changing this forces a new resource to be created. + type: boolean + githubRepo: + description: A github_repo block as defined below. + properties: + accountName: + description: Specifies the GitHub account name. + type: string + branchName: + description: Specifies the collaboration branch of the repository + to get code from. + type: string + gitUrl: + description: 'Specifies the GitHub Enterprise host name. For + example: https://github.mydomain.com.' + type: string + lastCommitId: + description: The last commit ID. + type: string + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + type: object + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Synapse Workspace. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be associated with this Synapse Workspace. Possible + values are SystemAssigned, UserAssigned and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + linkingAllowedForAadTenantIds: + description: Allowed AAD Tenant Ids For Linking. + items: + type: string + type: array + location: + description: Specifies the Azure Region where the synapse Workspace + should exist. Changing this forces a new resource to be created. + type: string + managedResourceGroupName: + description: Workspace managed resource group. Changing this forces + a new resource to be created. + type: string + managedResourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + managedResourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + managedResourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + managedVirtualNetworkEnabled: + description: Is Virtual Network enabled for all computes in this + workspace? Changing this forces a new resource to be created. + type: boolean + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + Cognitive Account. Defaults to true. + type: boolean + purviewId: + description: The ID of purview account. + type: string + sqlAadAdmin: + description: An sql_aad_admin block as defined below. + items: + properties: + login: + description: The login name of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + objectId: + description: The object id of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + tenantId: + description: The tenant id of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + type: object + type: array + sqlAdministratorLogin: + description: Specifies The login name of the SQL administrator. + Changing this forces a new resource to be created. If this is + not provided aad_admin or customer_managed_key must be provided. + type: string + sqlIdentityControlEnabled: + description: Are pipelines (running as workspace's system assigned + identity) allowed to access SQL pools? + type: boolean + storageDataLakeGen2FilesystemId: + description: Specifies the ID of storage data lake gen2 filesystem + resource. Changing this forces a new resource to be created. + type: string + storageDataLakeGen2FilesystemIdRef: + description: Reference to a DataLakeGen2FileSystem in storage + to populate storageDataLakeGen2FilesystemId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageDataLakeGen2FilesystemIdSelector: + description: Selector for a DataLakeGen2FileSystem in storage + to populate storageDataLakeGen2FilesystemId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse Workspace. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: WorkspaceStatus defines the observed state of Workspace. + properties: + atProvider: + properties: + aadAdmin: + description: An aad_admin block as defined below. + items: + properties: + login: + description: The login name of the Azure AD Administrator + of this Synapse Workspace. + type: string + objectId: + description: The object id of the Azure AD Administrator + of this Synapse Workspace. + type: string + tenantId: + description: The tenant id of the Azure AD Administrator + of this Synapse Workspace. + type: string + type: object + type: array + azureDevopsRepo: + description: An azure_devops_repo block as defined below. + properties: + accountName: + description: Specifies the Azure DevOps account name. + type: string + branchName: + description: Specifies the collaboration branch of the repository + to get code from. + type: string + lastCommitId: + description: The last commit ID. + type: string + projectName: + description: Specifies the name of the Azure DevOps project. + type: string + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + tenantId: + description: the ID of the tenant for the Azure DevOps account. + type: string + type: object + azureadAuthenticationOnly: + description: Is Azure Active Directory Authentication the only + way to authenticate with resources inside this synapse Workspace. + Defaults to false. + type: boolean + computeSubnetId: + description: Subnet ID used for computes in workspace Changing + this forces a new resource to be created. + type: string + connectivityEndpoints: + additionalProperties: + type: string + description: A list of Connectivity endpoints for this Synapse + Workspace. + type: object + x-kubernetes-map-type: granular + customerManagedKey: + description: A customer_managed_key block as defined below. + properties: + keyName: + description: An identifier for the key. Name needs to match + the name of the key used with the azurerm_synapse_workspace_key + resource. Defaults to "cmk" if not specified. + type: string + keyVersionlessId: + description: The Azure Key Vault Key Versionless ID to be + used as the Customer Managed Key (CMK) for double encryption + (e.g. https://example-keyvault.vault.azure.net/type/cmk/). + type: string + userAssignedIdentityId: + description: The User Assigned Identity ID to be used for + accessing the Customer Managed Key for encryption. + type: string + type: object + dataExfiltrationProtectionEnabled: + description: Is data exfiltration protection enabled in this workspace? + If set to true, managed_virtual_network_enabled must also be + set to true. Changing this forces a new resource to be created. + type: boolean + githubRepo: + description: A github_repo block as defined below. + properties: + accountName: + description: Specifies the GitHub account name. + type: string + branchName: + description: Specifies the collaboration branch of the repository + to get code from. + type: string + gitUrl: + description: 'Specifies the GitHub Enterprise host name. For + example: https://github.mydomain.com.' + type: string + lastCommitId: + description: The last commit ID. + type: string + repositoryName: + description: Specifies the name of the git repository. + type: string + rootFolder: + description: Specifies the root folder within the repository. + Set to / for the top level. + type: string + type: object + id: + description: The ID of the synapse Workspace. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of User Assigned Managed Identity + IDs to be assigned to this Synapse Workspace. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this Synapse Workspace. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this Synapse Workspace. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be associated with this Synapse Workspace. Possible + values are SystemAssigned, UserAssigned and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + linkingAllowedForAadTenantIds: + description: Allowed AAD Tenant Ids For Linking. + items: + type: string + type: array + location: + description: Specifies the Azure Region where the synapse Workspace + should exist. Changing this forces a new resource to be created. + type: string + managedResourceGroupName: + description: Workspace managed resource group. Changing this forces + a new resource to be created. + type: string + managedVirtualNetworkEnabled: + description: Is Virtual Network enabled for all computes in this + workspace? Changing this forces a new resource to be created. + type: boolean + publicNetworkAccessEnabled: + description: Whether public network access is allowed for the + Cognitive Account. Defaults to true. + type: boolean + purviewId: + description: The ID of purview account. + type: string + resourceGroupName: + description: Specifies the name of the Resource Group where the + synapse Workspace should exist. Changing this forces a new resource + to be created. + type: string + sqlAadAdmin: + description: An sql_aad_admin block as defined below. + items: + properties: + login: + description: The login name of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + objectId: + description: The object id of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + tenantId: + description: The tenant id of the Azure AD Administrator + of this Synapse Workspace SQL. + type: string + type: object + type: array + sqlAdministratorLogin: + description: Specifies The login name of the SQL administrator. + Changing this forces a new resource to be created. If this is + not provided aad_admin or customer_managed_key must be provided. + type: string + sqlIdentityControlEnabled: + description: Are pipelines (running as workspace's system assigned + identity) allowed to access SQL pools? + type: boolean + storageDataLakeGen2FilesystemId: + description: Specifies the ID of storage data lake gen2 filesystem + resource. Changing this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Synapse Workspace. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/synapse.azure.upbound.io_workspacevulnerabilityassessments.yaml b/package/crds/synapse.azure.upbound.io_workspacevulnerabilityassessments.yaml index 514871920..097aed0d5 100644 --- a/package/crds/synapse.azure.upbound.io_workspacevulnerabilityassessments.yaml +++ b/package/crds/synapse.azure.upbound.io_workspacevulnerabilityassessments.yaml @@ -613,3 +613,592 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WorkspaceVulnerabilityAssessment is the Schema for the WorkspaceVulnerabilityAssessments + API. Manages the Vulnerability Assessment for a Synapse Workspace. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkspaceVulnerabilityAssessmentSpec defines the desired + state of WorkspaceVulnerabilityAssessment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdminsEnabled: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to false. + type: boolean + emails: + description: Specifies an array of email addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + storageAccountAccessKeySecretRef: + description: Specifies the identifier key of the storage account + for vulnerability assessment scan results. If storage_container_sas_key + isn't specified, storage_account_access_key is required. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://example.blob.core.windows.net/VaScans/). + type: string + storageContainerSasKeySecretRef: + description: A shared access signature (SAS Key) that has write + access to the blob container specified in storage_container_path + parameter. If storage_account_access_key isn't specified, storage_container_sas_key + is required. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + workspaceSecurityAlertPolicyId: + description: The ID of the security alert policy of the Synapse + Workspace. Changing this forces a new resource to be created. + type: string + workspaceSecurityAlertPolicyIdRef: + description: Reference to a WorkspaceSecurityAlertPolicy in synapse + to populate workspaceSecurityAlertPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceSecurityAlertPolicyIdSelector: + description: Selector for a WorkspaceSecurityAlertPolicy in synapse + to populate workspaceSecurityAlertPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdminsEnabled: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to false. + type: boolean + emails: + description: Specifies an array of email addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://example.blob.core.windows.net/VaScans/). + type: string + workspaceSecurityAlertPolicyId: + description: The ID of the security alert policy of the Synapse + Workspace. Changing this forces a new resource to be created. + type: string + workspaceSecurityAlertPolicyIdRef: + description: Reference to a WorkspaceSecurityAlertPolicy in synapse + to populate workspaceSecurityAlertPolicyId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + workspaceSecurityAlertPolicyIdSelector: + description: Selector for a WorkspaceSecurityAlertPolicy in synapse + to populate workspaceSecurityAlertPolicyId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.storageContainerPath is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageContainerPath) + || (has(self.initProvider) && has(self.initProvider.storageContainerPath))' + status: + description: WorkspaceVulnerabilityAssessmentStatus defines the observed + state of WorkspaceVulnerabilityAssessment. + properties: + atProvider: + properties: + id: + description: The ID of the Synapse Workspace Vulnerability Assessment. + type: string + recurringScans: + description: The recurring scans settings. The recurring_scans + block supports fields documented below. + properties: + emailSubscriptionAdminsEnabled: + description: Boolean flag which specifies if the schedule + scan notification will be sent to the subscription administrators. + Defaults to false. + type: boolean + emails: + description: Specifies an array of email addresses to which + the scan notification is sent. + items: + type: string + type: array + enabled: + description: Boolean flag which specifies if recurring scans + is enabled or disabled. Defaults to false. + type: boolean + type: object + storageContainerPath: + description: A blob storage container path to hold the scan results + (e.g. https://example.blob.core.windows.net/VaScans/). + type: string + workspaceSecurityAlertPolicyId: + description: The ID of the security alert policy of the Synapse + Workspace. Changing this forces a new resource to be created. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/timeseriesinsights.azure.upbound.io_gen2environments.yaml b/package/crds/timeseriesinsights.azure.upbound.io_gen2environments.yaml index 50c5dde48..d3338f34d 100644 --- a/package/crds/timeseriesinsights.azure.upbound.io_gen2environments.yaml +++ b/package/crds/timeseriesinsights.azure.upbound.io_gen2environments.yaml @@ -720,3 +720,699 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Gen2Environment is the Schema for the Gen2Environments API. Manages + an Azure IoT Time Series Insights Gen2 Environment. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Gen2EnvironmentSpec defines the desired state of Gen2Environment + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + idProperties: + description: A list of property ids for the Azure IoT Time Series + Insights Gen2 Environment. Changing this forces a new resource + to be created. + items: + type: string + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Azure IoT Time Series Insights Gen2 Environment. Changing + this forces a new resource to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuName: + description: Specifies the SKU Name for this IoT Time Series Insights + Gen2 Environment. Currently it supports only L1. For gen2, capacity + cannot be specified. Changing this forces a new resource to + be created. + type: string + storage: + description: A storage block as defined below. + properties: + keySecretRef: + description: Access key of storage account for Azure IoT Time + Series Insights Gen2 Environment + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + name: + description: Name of storage account for Azure IoT Time Series + Insights Gen2 Environment. Changing this forces a new resource + to be created. + type: string + nameRef: + description: Reference to a Account in storage to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Account in storage to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + required: + - keySecretRef + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + warmStoreDataRetentionTime: + description: Specifies the ISO8601 timespan specifying the minimum + number of days the environment's events will be available for + query. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + idProperties: + description: A list of property ids for the Azure IoT Time Series + Insights Gen2 Environment. Changing this forces a new resource + to be created. + items: + type: string + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + skuName: + description: Specifies the SKU Name for this IoT Time Series Insights + Gen2 Environment. Currently it supports only L1. For gen2, capacity + cannot be specified. Changing this forces a new resource to + be created. + type: string + storage: + description: A storage block as defined below. + properties: + name: + description: Name of storage account for Azure IoT Time Series + Insights Gen2 Environment. Changing this forces a new resource + to be created. + type: string + nameRef: + description: Reference to a Account in storage to populate + name. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + nameSelector: + description: Selector for a Account in storage to populate + name. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + warmStoreDataRetentionTime: + description: Specifies the ISO8601 timespan specifying the minimum + number of days the environment's events will be available for + query. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.idProperties is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.idProperties) + || (has(self.initProvider) && has(self.initProvider.idProperties))' + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.skuName is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.skuName) + || (has(self.initProvider) && has(self.initProvider.skuName))' + - message: spec.forProvider.storage is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storage) + || (has(self.initProvider) && has(self.initProvider.storage))' + status: + description: Gen2EnvironmentStatus defines the observed state of Gen2Environment. + properties: + atProvider: + properties: + dataAccessFqdn: + description: The FQDN used to access the environment data. + type: string + id: + description: The ID of the IoT Time Series Insights Gen2 Environment. + type: string + idProperties: + description: A list of property ids for the Azure IoT Time Series + Insights Gen2 Environment. Changing this forces a new resource + to be created. + items: + type: string + type: array + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Azure IoT Time Series Insights Gen2 Environment. Changing + this forces a new resource to be created. + type: string + skuName: + description: Specifies the SKU Name for this IoT Time Series Insights + Gen2 Environment. Currently it supports only L1. For gen2, capacity + cannot be specified. Changing this forces a new resource to + be created. + type: string + storage: + description: A storage block as defined below. + properties: + name: + description: Name of storage account for Azure IoT Time Series + Insights Gen2 Environment. Changing this forces a new resource + to be created. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + warmStoreDataRetentionTime: + description: Specifies the ISO8601 timespan specifying the minimum + number of days the environment's events will be available for + query. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_appserviceplans.yaml b/package/crds/web.azure.upbound.io_appserviceplans.yaml index 273b081b0..fd04b911e 100644 --- a/package/crds/web.azure.upbound.io_appserviceplans.yaml +++ b/package/crds/web.azure.upbound.io_appserviceplans.yaml @@ -595,3 +595,574 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: AppServicePlan is the Schema for the AppServicePlans API. Manages + an App Service Plan component. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AppServicePlanSpec defines the desired state of AppServicePlan + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appServiceEnvironmentId: + description: The ID of the App Service Environment where the App + Service Plan should be located. Changing forces a new resource + to be created. + type: string + isXenon: + description: Whether to create a xenon App Service Plan. + type: boolean + kind: + description: The kind of the App Service Plan to create. Possible + values are Windows (also available as App), Linux, elastic (for + Premium Consumption), xenon and FunctionApp (for a Consumption + Plan). Defaults to Windows. Changing this forces a new resource + to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maximumElasticWorkerCount: + description: The maximum number of total workers allowed for this + ElasticScaleEnabled App Service Plan. + type: number + perSiteScaling: + description: Can Apps assigned to this App Service Plan be scaled + independently? If set to false apps assigned to this plan will + scale to all instances of the plan. + type: boolean + reserved: + description: Is this App Service Plan Reserved. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the App Service Plan component. Changing this forces a new resource + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + sku: + description: A sku block as documented below. + properties: + capacity: + description: Specifies the number of workers associated with + this App Service Plan. + type: number + size: + description: Specifies the plan's instance size. + type: string + tier: + description: Specifies the plan's pricing tier. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Specifies if the App Service Plan should be Zone + Redundant. Changing this forces a new resource to be created. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appServiceEnvironmentId: + description: The ID of the App Service Environment where the App + Service Plan should be located. Changing forces a new resource + to be created. + type: string + isXenon: + description: Whether to create a xenon App Service Plan. + type: boolean + kind: + description: The kind of the App Service Plan to create. Possible + values are Windows (also available as App), Linux, elastic (for + Premium Consumption), xenon and FunctionApp (for a Consumption + Plan). Defaults to Windows. Changing this forces a new resource + to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maximumElasticWorkerCount: + description: The maximum number of total workers allowed for this + ElasticScaleEnabled App Service Plan. + type: number + perSiteScaling: + description: Can Apps assigned to this App Service Plan be scaled + independently? If set to false apps assigned to this plan will + scale to all instances of the plan. + type: boolean + reserved: + description: Is this App Service Plan Reserved. + type: boolean + sku: + description: A sku block as documented below. + properties: + capacity: + description: Specifies the number of workers associated with + this App Service Plan. + type: number + size: + description: Specifies the plan's instance size. + type: string + tier: + description: Specifies the plan's pricing tier. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Specifies if the App Service Plan should be Zone + Redundant. Changing this forces a new resource to be created. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.sku is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.sku) + || (has(self.initProvider) && has(self.initProvider.sku))' + status: + description: AppServicePlanStatus defines the observed state of AppServicePlan. + properties: + atProvider: + properties: + appServiceEnvironmentId: + description: The ID of the App Service Environment where the App + Service Plan should be located. Changing forces a new resource + to be created. + type: string + id: + description: The ID of the App Service Plan component. + type: string + isXenon: + description: Whether to create a xenon App Service Plan. + type: boolean + kind: + description: The kind of the App Service Plan to create. Possible + values are Windows (also available as App), Linux, elastic (for + Premium Consumption), xenon and FunctionApp (for a Consumption + Plan). Defaults to Windows. Changing this forces a new resource + to be created. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + maximumElasticWorkerCount: + description: The maximum number of total workers allowed for this + ElasticScaleEnabled App Service Plan. + type: number + maximumNumberOfWorkers: + description: The maximum number of workers supported with the + App Service Plan's sku. + type: number + perSiteScaling: + description: Can Apps assigned to this App Service Plan be scaled + independently? If set to false apps assigned to this plan will + scale to all instances of the plan. + type: boolean + reserved: + description: Is this App Service Plan Reserved. + type: boolean + resourceGroupName: + description: The name of the resource group in which to create + the App Service Plan component. Changing this forces a new resource + to be created. + type: string + sku: + description: A sku block as documented below. + properties: + capacity: + description: Specifies the number of workers associated with + this App Service Plan. + type: number + size: + description: Specifies the plan's instance size. + type: string + tier: + description: Specifies the plan's pricing tier. + type: string + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + zoneRedundant: + description: Specifies if the App Service Plan should be Zone + Redundant. Changing this forces a new resource to be created. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_functionapps.yaml b/package/crds/web.azure.upbound.io_functionapps.yaml index ea8dbb5ec..94e12327a 100644 --- a/package/crds/web.azure.upbound.io_functionapps.yaml +++ b/package/crds/web.azure.upbound.io_functionapps.yaml @@ -2761,3 +2761,2673 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FunctionApp is the Schema for the FunctionApps API. Manages a + Function App. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionAppSpec defines the desired state of FunctionApp + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appServicePlanId: + description: The ID of the App Service Plan within which to create + this Function App. + type: string + appServicePlanIdRef: + description: Reference to a AppServicePlan in web to populate + appServicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appServicePlanIdSelector: + description: Selector for a AppServicePlan in web to populate + appServicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs for App Settings and custom + values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined below. + properties: + allowedAudiences: + description: Allowed audience values to consider when + validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + clientSecretSecretRef: + description: The OAuth 2.0 client secret that was created + for the app used for authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + additionalLoginParams: + additionalProperties: + type: string + description: Login parameters to send to the OpenID Connect + authorization endpoint when a user logs in. Each parameter + must be in the form "key=value". + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: External URLs that can be redirected to as part + of logging in or logging out of the app. + items: + type: string + type: array + defaultProvider: + description: The default provider to use when multiple providers + have been set up. Possible values are AzureActiveDirectory, + Facebook, Google, MicrosoftAccount and Twitter. + type: string + enabled: + description: Is Authentication enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: The App ID of the Facebook app used for login + type: string + appSecretSecretRef: + description: The App Secret of the Facebook app used for + Facebook login. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + required: + - appSecretSecretRef + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + clientSecretSecretRef: + description: The OAuth 2.0 client secret that was created + for the app used for authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + required: + - clientSecretSecretRef + type: object + issuer: + description: Issuer URI. When using Azure Active Directory, + this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + clientSecretSecretRef: + description: The OAuth 2.0 client secret that was created + for the app used for authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + required: + - clientSecretSecretRef + type: object + runtimeVersion: + description: The runtime version of the Authentication/Authorization + module. + type: string + tokenRefreshExtensionHours: + description: The number of hours after session token expiration + that a session token can be used to call the token refresh + API. Defaults to 72. + type: number + tokenStoreEnabled: + description: If enabled the module will durably store platform-specific + security tokens that are obtained during login flows. Defaults + to false. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: The OAuth 1.0a consumer key of the Twitter + application used for sign-in. + type: string + consumerSecretSecretRef: + description: The OAuth 1.0a consumer secret of the Twitter + application used for sign-in. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - consumerSecretSecretRef + type: object + unauthenticatedClientAction: + description: The action to take when an unauthenticated client + attempts to access the app. Possible values are AllowAnonymous + and RedirectToLoginPage. + type: string + type: object + clientCertMode: + description: The mode of the Function App's client certificates + requirement for incoming requests. Possible values are Required + and Optional. + type: string + connectionString: + description: An connection_string block as defined below. + items: + properties: + name: + description: The name of the Connection String. + type: string + type: + description: The type of the Connection String. Possible + values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, + PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + type: string + valueSecretRef: + description: The value for the Connection String. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + dailyMemoryTimeQuota: + description: The amount of memory in gigabyte-seconds that your + application is allowed to consume per day. Setting this value + only affects function apps under the consumption plan. + type: number + enableBuiltinLogging: + description: Should the built-in logging of this Function App + be enabled? Defaults to true. + type: boolean + enabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + httpsOnly: + description: Can the Function App only be accessed via HTTPS? + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the identity type of the Function App. + Possible values are SystemAssigned (where Azure will generate + a Service Principal for you), UserAssigned where you can + specify the Service Principal IDs in the identity_ids field, + and SystemAssigned, UserAssigned which assigns both a system + managed identity as well as the specified user assigned + identities. + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity Id used for looking up + KeyVault secrets. The identity must be assigned to the application. + See Access vaults with a user-assigned identity for more information. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + name: + description: Specifies the name of the Function App. Changing + this forces a new resource to be created. Limit the function + name to 32 characters to avoid naming collisions. For more information + about Function App naming rule. + type: string + osType: + description: A string indicating the Operating System type for + this function app. Possible values are linux and “(empty string). + Changing this forces a new resource to be created. Defaults + to "". + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Function App. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config object as defined below. + properties: + alwaysOn: + description: Should the Function App be loaded at all times? + Defaults to false. + type: boolean + appScaleLimit: + description: The number of workers this function app can scale + out to. Only applicable to apps on the Consumption and Premium + plan. + type: number + autoSwapSlotName: + description: The name of the slot to automatically swap to + during deployment + type: string + cors: + description: A cors block as defined below. + properties: + allowedOrigins: + description: A list of origins which should be able to + make cross-origin calls. * can be used to allow all + calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: Are credentials supported? + type: boolean + type: object + dotnetFrameworkVersion: + description: The version of the .NET framework's CLR used + in this function app. Possible values are v4.0 (including + .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information + on which .NET Framework version to use based on the runtime + version you're targeting - please see this table. Defaults + to v4.0. + type: string + elasticInstanceMinimum: + description: The number of minimum instances for this function + app. Only affects apps on the Premium plan. + type: number + ftpsState: + description: 'State of FTP / FTPS service for this function + app. Possible values include: AllAllowed, FtpsOnly and Disabled. + Defaults to AllAllowed.' + type: string + healthCheckPath: + description: Path which will be checked for this function + app health. + type: string + http2Enabled: + description: Specifies whether or not the HTTP2 protocol should + be enabled. Defaults to false. + type: boolean + ipRestriction: + description: A list of ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + javaVersion: + description: Java version hosted by the function app in Azure. + Possible values are 1.8, 11 & 17 (In-Preview). + type: string + linuxFxVersion: + description: Linux App Framework and version for the AppService, + e.g. DOCKER|(golang:latest). + type: string + minTlsVersion: + description: The minimum supported TLS version for the function + app. Possible values are 1.0, 1.1, and 1.2. Defaults to + 1.2 for new function apps. + type: string + preWarmedInstanceCount: + description: The number of pre-warmed instances for this function + app. Only affects apps on the Premium plan. + type: number + runtimeScaleMonitoringEnabled: + description: Should Runtime Scale Monitoring be enabled?. + Only applicable to apps on the Premium plan. Defaults to + false. + type: boolean + scmIpRestriction: + description: A list of scm_ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmType: + description: 'The type of Source Control used by the Function + App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, + CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, + None (default), OneDrive, Tfs, VSO, and VSTSRM.' + type: string + scmUseMainIpRestriction: + description: IP security restrictions for scm to use main. + Defaults to false. + type: boolean + use32BitWorkerProcess: + description: Should the Function App run in 32 bit mode, rather + than 64 bit mode? Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: Should all outbound traffic to have Virtual Network + Security Groups and User Defined Routes applied? Defaults + to false. + type: boolean + websocketsEnabled: + description: Should WebSockets be enabled? + type: boolean + type: object + sourceControl: + description: A source_control block, as defined below. + properties: + branch: + description: The branch of the remote repository to use. Defaults + to 'master'. + type: string + manualIntegration: + description: Limits to manual integration. Defaults to false + if not specified. + type: boolean + repoUrl: + description: The URL of the source code repository. + type: string + rollbackEnabled: + description: Enable roll-back for the repository. Defaults + to false if not specified. + type: boolean + useMercurial: + description: Use Mercurial if true, otherwise uses Git. + type: boolean + type: object + storageAccountAccessKeySecretRef: + description: The access key which will be used to access the backend + storage account for the Function App. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: The backend storage account name which will be used + by this Function App (such as the dashboard, logs). Changing + this forces a new resource to be created. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The runtime version associated with the Function + App. Defaults to ~1. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appServicePlanId: + description: The ID of the App Service Plan within which to create + this Function App. + type: string + appServicePlanIdRef: + description: Reference to a AppServicePlan in web to populate + appServicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appServicePlanIdSelector: + description: Selector for a AppServicePlan in web to populate + appServicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs for App Settings and custom + values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined below. + properties: + allowedAudiences: + description: Allowed audience values to consider when + validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + type: object + additionalLoginParams: + additionalProperties: + type: string + description: Login parameters to send to the OpenID Connect + authorization endpoint when a user logs in. Each parameter + must be in the form "key=value". + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: External URLs that can be redirected to as part + of logging in or logging out of the app. + items: + type: string + type: array + defaultProvider: + description: The default provider to use when multiple providers + have been set up. Possible values are AzureActiveDirectory, + Facebook, Google, MicrosoftAccount and Twitter. + type: string + enabled: + description: Is Authentication enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: The App ID of the Facebook app used for login + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + issuer: + description: Issuer URI. When using Azure Active Directory, + this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + runtimeVersion: + description: The runtime version of the Authentication/Authorization + module. + type: string + tokenRefreshExtensionHours: + description: The number of hours after session token expiration + that a session token can be used to call the token refresh + API. Defaults to 72. + type: number + tokenStoreEnabled: + description: If enabled the module will durably store platform-specific + security tokens that are obtained during login flows. Defaults + to false. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: The OAuth 1.0a consumer key of the Twitter + application used for sign-in. + type: string + type: object + unauthenticatedClientAction: + description: The action to take when an unauthenticated client + attempts to access the app. Possible values are AllowAnonymous + and RedirectToLoginPage. + type: string + type: object + clientCertMode: + description: The mode of the Function App's client certificates + requirement for incoming requests. Possible values are Required + and Optional. + type: string + connectionString: + description: An connection_string block as defined below. + items: + properties: + name: + description: The name of the Connection String. + type: string + type: + description: The type of the Connection String. Possible + values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, + PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + type: string + type: object + type: array + dailyMemoryTimeQuota: + description: The amount of memory in gigabyte-seconds that your + application is allowed to consume per day. Setting this value + only affects function apps under the consumption plan. + type: number + enableBuiltinLogging: + description: Should the built-in logging of this Function App + be enabled? Defaults to true. + type: boolean + enabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + httpsOnly: + description: Can the Function App only be accessed via HTTPS? + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the identity type of the Function App. + Possible values are SystemAssigned (where Azure will generate + a Service Principal for you), UserAssigned where you can + specify the Service Principal IDs in the identity_ids field, + and SystemAssigned, UserAssigned which assigns both a system + managed identity as well as the specified user assigned + identities. + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity Id used for looking up + KeyVault secrets. The identity must be assigned to the application. + See Access vaults with a user-assigned identity for more information. + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + name: + description: Specifies the name of the Function App. Changing + this forces a new resource to be created. Limit the function + name to 32 characters to avoid naming collisions. For more information + about Function App naming rule. + type: string + osType: + description: A string indicating the Operating System type for + this function app. Possible values are linux and “(empty string). + Changing this forces a new resource to be created. Defaults + to "". + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Function App. Changing this forces a new resource to be + created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config object as defined below. + properties: + alwaysOn: + description: Should the Function App be loaded at all times? + Defaults to false. + type: boolean + appScaleLimit: + description: The number of workers this function app can scale + out to. Only applicable to apps on the Consumption and Premium + plan. + type: number + autoSwapSlotName: + description: The name of the slot to automatically swap to + during deployment + type: string + cors: + description: A cors block as defined below. + properties: + allowedOrigins: + description: A list of origins which should be able to + make cross-origin calls. * can be used to allow all + calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: Are credentials supported? + type: boolean + type: object + dotnetFrameworkVersion: + description: The version of the .NET framework's CLR used + in this function app. Possible values are v4.0 (including + .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information + on which .NET Framework version to use based on the runtime + version you're targeting - please see this table. Defaults + to v4.0. + type: string + elasticInstanceMinimum: + description: The number of minimum instances for this function + app. Only affects apps on the Premium plan. + type: number + ftpsState: + description: 'State of FTP / FTPS service for this function + app. Possible values include: AllAllowed, FtpsOnly and Disabled. + Defaults to AllAllowed.' + type: string + healthCheckPath: + description: Path which will be checked for this function + app health. + type: string + http2Enabled: + description: Specifies whether or not the HTTP2 protocol should + be enabled. Defaults to false. + type: boolean + ipRestriction: + description: A list of ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + javaVersion: + description: Java version hosted by the function app in Azure. + Possible values are 1.8, 11 & 17 (In-Preview). + type: string + linuxFxVersion: + description: Linux App Framework and version for the AppService, + e.g. DOCKER|(golang:latest). + type: string + minTlsVersion: + description: The minimum supported TLS version for the function + app. Possible values are 1.0, 1.1, and 1.2. Defaults to + 1.2 for new function apps. + type: string + preWarmedInstanceCount: + description: The number of pre-warmed instances for this function + app. Only affects apps on the Premium plan. + type: number + runtimeScaleMonitoringEnabled: + description: Should Runtime Scale Monitoring be enabled?. + Only applicable to apps on the Premium plan. Defaults to + false. + type: boolean + scmIpRestriction: + description: A list of scm_ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmType: + description: 'The type of Source Control used by the Function + App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, + CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, + None (default), OneDrive, Tfs, VSO, and VSTSRM.' + type: string + scmUseMainIpRestriction: + description: IP security restrictions for scm to use main. + Defaults to false. + type: boolean + use32BitWorkerProcess: + description: Should the Function App run in 32 bit mode, rather + than 64 bit mode? Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: Should all outbound traffic to have Virtual Network + Security Groups and User Defined Routes applied? Defaults + to false. + type: boolean + websocketsEnabled: + description: Should WebSockets be enabled? + type: boolean + type: object + sourceControl: + description: A source_control block, as defined below. + properties: + branch: + description: The branch of the remote repository to use. Defaults + to 'master'. + type: string + manualIntegration: + description: Limits to manual integration. Defaults to false + if not specified. + type: boolean + repoUrl: + description: The URL of the source code repository. + type: string + rollbackEnabled: + description: Enable roll-back for the repository. Defaults + to false if not specified. + type: boolean + useMercurial: + description: Use Mercurial if true, otherwise uses Git. + type: boolean + type: object + storageAccountName: + description: The backend storage account name which will be used + by this Function App (such as the dashboard, logs). Changing + this forces a new resource to be created. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The runtime version associated with the Function + App. Defaults to ~1. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.storageAccountAccessKeySecretRef is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageAccountAccessKeySecretRef)' + status: + description: FunctionAppStatus defines the observed state of FunctionApp. + properties: + atProvider: + properties: + appServicePlanId: + description: The ID of the App Service Plan within which to create + this Function App. + type: string + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs for App Settings and custom + values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: A active_directory block as defined below. + properties: + allowedAudiences: + description: Allowed audience values to consider when + validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + type: object + additionalLoginParams: + additionalProperties: + type: string + description: Login parameters to send to the OpenID Connect + authorization endpoint when a user logs in. Each parameter + must be in the form "key=value". + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: External URLs that can be redirected to as part + of logging in or logging out of the app. + items: + type: string + type: array + defaultProvider: + description: The default provider to use when multiple providers + have been set up. Possible values are AzureActiveDirectory, + Facebook, Google, MicrosoftAccount and Twitter. + type: string + enabled: + description: Is Authentication enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: The App ID of the Facebook app used for login + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + issuer: + description: Issuer URI. When using Azure Active Directory, + this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + runtimeVersion: + description: The runtime version of the Authentication/Authorization + module. + type: string + tokenRefreshExtensionHours: + description: The number of hours after session token expiration + that a session token can be used to call the token refresh + API. Defaults to 72. + type: number + tokenStoreEnabled: + description: If enabled the module will durably store platform-specific + security tokens that are obtained during login flows. Defaults + to false. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: The OAuth 1.0a consumer key of the Twitter + application used for sign-in. + type: string + type: object + unauthenticatedClientAction: + description: The action to take when an unauthenticated client + attempts to access the app. Possible values are AllowAnonymous + and RedirectToLoginPage. + type: string + type: object + clientCertMode: + description: The mode of the Function App's client certificates + requirement for incoming requests. Possible values are Required + and Optional. + type: string + connectionString: + description: An connection_string block as defined below. + items: + properties: + name: + description: The name of the Connection String. + type: string + type: + description: The type of the Connection String. Possible + values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, + PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + type: string + type: object + type: array + customDomainVerificationId: + description: An identifier used by App Service to perform domain + ownership verification via DNS TXT record. + type: string + dailyMemoryTimeQuota: + description: The amount of memory in gigabyte-seconds that your + application is allowed to consume per day. Setting this value + only affects function apps under the consumption plan. + type: number + defaultHostname: + description: The default hostname associated with the Function + App - such as mysite.azurewebsites.net + type: string + enableBuiltinLogging: + description: Should the built-in logging of this Function App + be enabled? Defaults to true. + type: boolean + enabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + httpsOnly: + description: Can the Function App only be accessed via HTTPS? + Defaults to false. + type: boolean + id: + description: The ID of the Function App + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this App Service. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this App Service. + type: string + type: + description: Specifies the identity type of the Function App. + Possible values are SystemAssigned (where Azure will generate + a Service Principal for you), UserAssigned where you can + specify the Service Principal IDs in the identity_ids field, + and SystemAssigned, UserAssigned which assigns both a system + managed identity as well as the specified user assigned + identities. + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity Id used for looking up + KeyVault secrets. The identity must be assigned to the application. + See Access vaults with a user-assigned identity for more information. + type: string + kind: + description: The Function App kind - such as functionapp,linux,container + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + name: + description: Specifies the name of the Function App. Changing + this forces a new resource to be created. Limit the function + name to 32 characters to avoid naming collisions. For more information + about Function App naming rule. + type: string + osType: + description: A string indicating the Operating System type for + this function app. Possible values are linux and “(empty string). + Changing this forces a new resource to be created. Defaults + to "". + type: string + outboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12 + type: string + possibleOutboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which + are necessarily in use. Superset of outbound_ip_addresses. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Function App. Changing this forces a new resource to be + created. + type: string + siteConfig: + description: A site_config object as defined below. + properties: + alwaysOn: + description: Should the Function App be loaded at all times? + Defaults to false. + type: boolean + appScaleLimit: + description: The number of workers this function app can scale + out to. Only applicable to apps on the Consumption and Premium + plan. + type: number + autoSwapSlotName: + description: The name of the slot to automatically swap to + during deployment + type: string + cors: + description: A cors block as defined below. + properties: + allowedOrigins: + description: A list of origins which should be able to + make cross-origin calls. * can be used to allow all + calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: Are credentials supported? + type: boolean + type: object + dotnetFrameworkVersion: + description: The version of the .NET framework's CLR used + in this function app. Possible values are v4.0 (including + .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information + on which .NET Framework version to use based on the runtime + version you're targeting - please see this table. Defaults + to v4.0. + type: string + elasticInstanceMinimum: + description: The number of minimum instances for this function + app. Only affects apps on the Premium plan. + type: number + ftpsState: + description: 'State of FTP / FTPS service for this function + app. Possible values include: AllAllowed, FtpsOnly and Disabled. + Defaults to AllAllowed.' + type: string + healthCheckPath: + description: Path which will be checked for this function + app health. + type: string + http2Enabled: + description: Specifies whether or not the HTTP2 protocol should + be enabled. Defaults to false. + type: boolean + ipRestriction: + description: A list of ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + type: object + type: array + javaVersion: + description: Java version hosted by the function app in Azure. + Possible values are 1.8, 11 & 17 (In-Preview). + type: string + linuxFxVersion: + description: Linux App Framework and version for the AppService, + e.g. DOCKER|(golang:latest). + type: string + minTlsVersion: + description: The minimum supported TLS version for the function + app. Possible values are 1.0, 1.1, and 1.2. Defaults to + 1.2 for new function apps. + type: string + preWarmedInstanceCount: + description: The number of pre-warmed instances for this function + app. Only affects apps on the Premium plan. + type: number + runtimeScaleMonitoringEnabled: + description: Should Runtime Scale Monitoring be enabled?. + Only applicable to apps on the Premium plan. Defaults to + false. + type: boolean + scmIpRestriction: + description: A list of scm_ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + type: object + type: array + scmType: + description: 'The type of Source Control used by the Function + App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, + CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, + None (default), OneDrive, Tfs, VSO, and VSTSRM.' + type: string + scmUseMainIpRestriction: + description: IP security restrictions for scm to use main. + Defaults to false. + type: boolean + use32BitWorkerProcess: + description: Should the Function App run in 32 bit mode, rather + than 64 bit mode? Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: Should all outbound traffic to have Virtual Network + Security Groups and User Defined Routes applied? Defaults + to false. + type: boolean + websocketsEnabled: + description: Should WebSockets be enabled? + type: boolean + type: object + siteCredential: + description: A site_credential block as defined below, which contains + the site-level credentials used to publish to this App Service. + items: + properties: + password: + description: The password associated with the username, + which can be used to publish to this App Service. + type: string + username: + description: The username which can be used to publish to + this App Service + type: string + type: object + type: array + sourceControl: + description: A source_control block, as defined below. + properties: + branch: + description: The branch of the remote repository to use. Defaults + to 'master'. + type: string + manualIntegration: + description: Limits to manual integration. Defaults to false + if not specified. + type: boolean + repoUrl: + description: The URL of the source code repository. + type: string + rollbackEnabled: + description: Enable roll-back for the repository. Defaults + to false if not specified. + type: boolean + useMercurial: + description: Use Mercurial if true, otherwise uses Git. + type: boolean + type: object + storageAccountName: + description: The backend storage account name which will be used + by this Function App (such as the dashboard, logs). Changing + this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The runtime version associated with the Function + App. Defaults to ~1. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_functionappslots.yaml b/package/crds/web.azure.upbound.io_functionappslots.yaml index 931c74493..1dc0f542f 100644 --- a/package/crds/web.azure.upbound.io_functionappslots.yaml +++ b/package/crds/web.azure.upbound.io_functionappslots.yaml @@ -2628,3 +2628,2543 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: FunctionAppSlot is the Schema for the FunctionAppSlots API. Manages + a Function App Deployment Slot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionAppSlotSpec defines the desired state of FunctionAppSlot + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appServicePlanId: + description: The ID of the App Service Plan within which to create + this Function App Slot. Changing this forces a new resource + to be created. + type: string + appServicePlanIdRef: + description: Reference to a AppServicePlan in web to populate + appServicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appServicePlanIdSelector: + description: Selector for a AppServicePlan in web to populate + appServicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + appSettings: + additionalProperties: + type: string + description: A key-value pair of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined below. + properties: + allowedAudiences: + description: Allowed audience values to consider when + validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + clientSecretSecretRef: + description: The OAuth 2.0 client secret that was created + for the app used for authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + additionalLoginParams: + additionalProperties: + type: string + description: login parameters to send to the OpenID Connect + authorization endpoint when a user logs in. Each parameter + must be in the form "key=value". + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: External URLs that can be redirected to as part + of logging in or logging out of the app. + items: + type: string + type: array + defaultProvider: + description: The default provider to use when multiple providers + have been set up. Possible values are AzureActiveDirectory, + Facebook, Google, MicrosoftAccount and Twitter. + type: string + enabled: + description: Is Authentication enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: The App ID of the Facebook app used for login + type: string + appSecretSecretRef: + description: The App Secret of the Facebook app used for + Facebook login. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + required: + - appSecretSecretRef + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + clientSecretSecretRef: + description: The OAuth 2.0 client secret that was created + for the app used for authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + required: + - clientSecretSecretRef + type: object + issuer: + description: Issuer URI. When using Azure Active Directory, + this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + clientSecretSecretRef: + description: The OAuth 2.0 client secret that was created + for the app used for authentication. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + required: + - clientSecretSecretRef + type: object + runtimeVersion: + description: The runtime version of the Authentication/Authorization + module. + type: string + tokenRefreshExtensionHours: + description: The number of hours after session token expiration + that a session token can be used to call the token refresh + API. Defaults to 72. + type: number + tokenStoreEnabled: + description: If enabled the module will durably store platform-specific + security tokens that are obtained during login flows. Defaults + to false. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: The OAuth 1.0a consumer key of the Twitter + application used for sign-in. + type: string + consumerSecretSecretRef: + description: The OAuth 1.0a consumer secret of the Twitter + application used for sign-in. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - consumerSecretSecretRef + type: object + unauthenticatedClientAction: + description: The action to take when an unauthenticated client + attempts to access the app. Possible values are AllowAnonymous + and RedirectToLoginPage. + type: string + type: object + connectionString: + description: A connection_string block as defined below. + items: + properties: + name: + description: The name of the Connection String. + type: string + type: + description: The type of the Connection String. Possible + values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, + PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + type: string + valueSecretRef: + description: The value for the Connection String. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + dailyMemoryTimeQuota: + description: The amount of memory in gigabyte-seconds that your + application is allowed to consume per day. Setting this value + only affects function apps under the consumption plan. + type: number + enableBuiltinLogging: + description: Should the built-in logging of the Function App be + enabled? Defaults to true. + type: boolean + enabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + functionAppName: + description: The name of the Function App within which to create + the Function App Slot. Changing this forces a new resource to + be created. + type: string + functionAppNameRef: + description: Reference to a FunctionApp in web to populate functionAppName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionAppNameSelector: + description: Selector for a FunctionApp in web to populate functionAppName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + httpsOnly: + description: Can the Function App only be accessed via HTTPS? + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the identity type of the Function App. + Possible values are SystemAssigned (where Azure will generate + a Service Principal for you), UserAssigned where you can + specify the Service Principal IDs in the identity_ids field, + and SystemAssigned, UserAssigned which assigns both a system + managed identity as well as the specified user assigned + identities. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + osType: + description: A string indicating the Operating System type for + this function app. The only possible value is linux. Changing + this forces a new resource to be created. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Function App Slot. Changing this forces a new resource to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config object as defined below. + properties: + alwaysOn: + description: Should the Function App be loaded at all times? + Defaults to false. + type: boolean + appScaleLimit: + description: The number of workers this function app can scale + out to. Only applicable to apps on the Consumption and Premium + plan. + type: number + autoSwapSlotName: + description: The name of the slot to automatically swap to + during deployment + type: string + cors: + description: A cors block as defined below. + properties: + allowedOrigins: + description: A list of origins which should be able to + make cross-origin calls. * can be used to allow all + calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: Are credentials supported? + type: boolean + type: object + dotnetFrameworkVersion: + description: The version of the .NET framework's CLR used + in this function app. Possible values are v4.0 (including + .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information + on which .NET Framework version to use based on the runtime + version you're targeting - please see this table. Defaults + to v4.0. + type: string + elasticInstanceMinimum: + description: The number of minimum instances for this function + app. Only applicable to apps on the Premium plan. + type: number + ftpsState: + description: 'State of FTP / FTPS service for this function + app. Possible values include: AllAllowed, FtpsOnly and Disabled.' + type: string + healthCheckPath: + description: Path which will be checked for this function + app health. + type: string + http2Enabled: + description: Specifies whether or not the HTTP2 protocol should + be enabled. Defaults to false. + type: boolean + ipRestriction: + description: A list of ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + javaVersion: + description: Java version hosted by the function app in Azure. + Possible values are 1.8, 11 & 17 (In-Preview). + type: string + linuxFxVersion: + description: Linux App Framework and version for the AppService, + e.g. DOCKER|(golang:latest). + type: string + minTlsVersion: + description: The minimum supported TLS version for the function + app. Possible values are 1.0, 1.1, and 1.2. Defaults to + 1.2 for new function apps. + type: string + preWarmedInstanceCount: + description: The number of pre-warmed instances for this function + app. Only affects apps on the Premium plan. + type: number + runtimeScaleMonitoringEnabled: + description: Should Runtime Scale Monitoring be enabled?. + Only applicable to apps on the Premium plan. Defaults to + false. + type: boolean + scmIpRestriction: + description: A list of scm_ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmType: + description: 'The type of Source Control used by this function + App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, + CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, + None (default), OneDrive, Tfs, VSO, and VSTSRM.' + type: string + scmUseMainIpRestriction: + description: IP security restrictions for scm to use main. + Defaults to false. + type: boolean + use32BitWorkerProcess: + description: Should the Function App run in 32 bit mode, rather + than 64 bit mode? Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + websocketsEnabled: + description: Should WebSockets be enabled? + type: boolean + type: object + storageAccountAccessKeySecretRef: + description: The access key which will be used to access the backend + storage account for the Function App. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: The backend storage account name which will be used + by the Function App (such as the dashboard, logs). Changing + this forces a new resource to be created. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The runtime version associated with the Function + App. Defaults to ~1. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appServicePlanId: + description: The ID of the App Service Plan within which to create + this Function App Slot. Changing this forces a new resource + to be created. + type: string + appServicePlanIdRef: + description: Reference to a AppServicePlan in web to populate + appServicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appServicePlanIdSelector: + description: Selector for a AppServicePlan in web to populate + appServicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + appSettings: + additionalProperties: + type: string + description: A key-value pair of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined below. + properties: + allowedAudiences: + description: Allowed audience values to consider when + validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + type: object + additionalLoginParams: + additionalProperties: + type: string + description: login parameters to send to the OpenID Connect + authorization endpoint when a user logs in. Each parameter + must be in the form "key=value". + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: External URLs that can be redirected to as part + of logging in or logging out of the app. + items: + type: string + type: array + defaultProvider: + description: The default provider to use when multiple providers + have been set up. Possible values are AzureActiveDirectory, + Facebook, Google, MicrosoftAccount and Twitter. + type: string + enabled: + description: Is Authentication enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: The App ID of the Facebook app used for login + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + issuer: + description: Issuer URI. When using Azure Active Directory, + this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + runtimeVersion: + description: The runtime version of the Authentication/Authorization + module. + type: string + tokenRefreshExtensionHours: + description: The number of hours after session token expiration + that a session token can be used to call the token refresh + API. Defaults to 72. + type: number + tokenStoreEnabled: + description: If enabled the module will durably store platform-specific + security tokens that are obtained during login flows. Defaults + to false. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: The OAuth 1.0a consumer key of the Twitter + application used for sign-in. + type: string + type: object + unauthenticatedClientAction: + description: The action to take when an unauthenticated client + attempts to access the app. Possible values are AllowAnonymous + and RedirectToLoginPage. + type: string + type: object + connectionString: + description: A connection_string block as defined below. + items: + properties: + name: + description: The name of the Connection String. + type: string + type: + description: The type of the Connection String. Possible + values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, + PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + type: string + type: object + type: array + dailyMemoryTimeQuota: + description: The amount of memory in gigabyte-seconds that your + application is allowed to consume per day. Setting this value + only affects function apps under the consumption plan. + type: number + enableBuiltinLogging: + description: Should the built-in logging of the Function App be + enabled? Defaults to true. + type: boolean + enabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + httpsOnly: + description: Can the Function App only be accessed via HTTPS? + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the identity type of the Function App. + Possible values are SystemAssigned (where Azure will generate + a Service Principal for you), UserAssigned where you can + specify the Service Principal IDs in the identity_ids field, + and SystemAssigned, UserAssigned which assigns both a system + managed identity as well as the specified user assigned + identities. + type: string + type: object + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + osType: + description: A string indicating the Operating System type for + this function app. The only possible value is linux. Changing + this forces a new resource to be created. + type: string + siteConfig: + description: A site_config object as defined below. + properties: + alwaysOn: + description: Should the Function App be loaded at all times? + Defaults to false. + type: boolean + appScaleLimit: + description: The number of workers this function app can scale + out to. Only applicable to apps on the Consumption and Premium + plan. + type: number + autoSwapSlotName: + description: The name of the slot to automatically swap to + during deployment + type: string + cors: + description: A cors block as defined below. + properties: + allowedOrigins: + description: A list of origins which should be able to + make cross-origin calls. * can be used to allow all + calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: Are credentials supported? + type: boolean + type: object + dotnetFrameworkVersion: + description: The version of the .NET framework's CLR used + in this function app. Possible values are v4.0 (including + .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information + on which .NET Framework version to use based on the runtime + version you're targeting - please see this table. Defaults + to v4.0. + type: string + elasticInstanceMinimum: + description: The number of minimum instances for this function + app. Only applicable to apps on the Premium plan. + type: number + ftpsState: + description: 'State of FTP / FTPS service for this function + app. Possible values include: AllAllowed, FtpsOnly and Disabled.' + type: string + healthCheckPath: + description: Path which will be checked for this function + app health. + type: string + http2Enabled: + description: Specifies whether or not the HTTP2 protocol should + be enabled. Defaults to false. + type: boolean + ipRestriction: + description: A list of ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + javaVersion: + description: Java version hosted by the function app in Azure. + Possible values are 1.8, 11 & 17 (In-Preview). + type: string + linuxFxVersion: + description: Linux App Framework and version for the AppService, + e.g. DOCKER|(golang:latest). + type: string + minTlsVersion: + description: The minimum supported TLS version for the function + app. Possible values are 1.0, 1.1, and 1.2. Defaults to + 1.2 for new function apps. + type: string + preWarmedInstanceCount: + description: The number of pre-warmed instances for this function + app. Only affects apps on the Premium plan. + type: number + runtimeScaleMonitoringEnabled: + description: Should Runtime Scale Monitoring be enabled?. + Only applicable to apps on the Premium plan. Defaults to + false. + type: boolean + scmIpRestriction: + description: A list of scm_ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmType: + description: 'The type of Source Control used by this function + App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, + CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, + None (default), OneDrive, Tfs, VSO, and VSTSRM.' + type: string + scmUseMainIpRestriction: + description: IP security restrictions for scm to use main. + Defaults to false. + type: boolean + use32BitWorkerProcess: + description: Should the Function App run in 32 bit mode, rather + than 64 bit mode? Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + websocketsEnabled: + description: Should WebSockets be enabled? + type: boolean + type: object + storageAccountName: + description: The backend storage account name which will be used + by the Function App (such as the dashboard, logs). Changing + this forces a new resource to be created. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The runtime version associated with the Function + App. Defaults to ~1. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.storageAccountAccessKeySecretRef is a required + parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.storageAccountAccessKeySecretRef)' + status: + description: FunctionAppSlotStatus defines the observed state of FunctionAppSlot. + properties: + atProvider: + properties: + appServicePlanId: + description: The ID of the App Service Plan within which to create + this Function App Slot. Changing this forces a new resource + to be created. + type: string + appSettings: + additionalProperties: + type: string + description: A key-value pair of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined below. + properties: + allowedAudiences: + description: Allowed audience values to consider when + validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + type: object + additionalLoginParams: + additionalProperties: + type: string + description: login parameters to send to the OpenID Connect + authorization endpoint when a user logs in. Each parameter + must be in the form "key=value". + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: External URLs that can be redirected to as part + of logging in or logging out of the app. + items: + type: string + type: array + defaultProvider: + description: The default provider to use when multiple providers + have been set up. Possible values are AzureActiveDirectory, + Facebook, Google, MicrosoftAccount and Twitter. + type: string + enabled: + description: Is Authentication enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: The App ID of the Facebook app used for login + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + issuer: + description: Issuer URI. When using Azure Active Directory, + this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: The OAuth 2.0 client ID that was created + for the app used for authentication. + type: string + oauthScopes: + description: The OAuth 2.0 scopes that will be requested + as part of Microsoft Account authentication. https://msdn.microsoft.com/en-us/library/dn631845.aspx + items: + type: string + type: array + type: object + runtimeVersion: + description: The runtime version of the Authentication/Authorization + module. + type: string + tokenRefreshExtensionHours: + description: The number of hours after session token expiration + that a session token can be used to call the token refresh + API. Defaults to 72. + type: number + tokenStoreEnabled: + description: If enabled the module will durably store platform-specific + security tokens that are obtained during login flows. Defaults + to false. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: The OAuth 1.0a consumer key of the Twitter + application used for sign-in. + type: string + type: object + unauthenticatedClientAction: + description: The action to take when an unauthenticated client + attempts to access the app. Possible values are AllowAnonymous + and RedirectToLoginPage. + type: string + type: object + connectionString: + description: A connection_string block as defined below. + items: + properties: + name: + description: The name of the Connection String. + type: string + type: + description: The type of the Connection String. Possible + values are APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, + PostgreSQL, RedisCache, ServiceBus, SQLAzure and SQLServer. + type: string + type: object + type: array + dailyMemoryTimeQuota: + description: The amount of memory in gigabyte-seconds that your + application is allowed to consume per day. Setting this value + only affects function apps under the consumption plan. + type: number + defaultHostname: + description: The default hostname associated with the Function + App - such as mysite.azurewebsites.net + type: string + enableBuiltinLogging: + description: Should the built-in logging of the Function App be + enabled? Defaults to true. + type: boolean + enabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + functionAppName: + description: The name of the Function App within which to create + the Function App Slot. Changing this forces a new resource to + be created. + type: string + httpsOnly: + description: Can the Function App only be accessed via HTTPS? + Defaults to false. + type: boolean + id: + description: The ID of the Function App Slot + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: Specifies a list of user managed identity ids + to be assigned. Required if type is UserAssigned. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID for the Service Principal associated + with the Managed Service Identity of this App Service. + type: string + tenantId: + description: The Tenant ID for the Service Principal associated + with the Managed Service Identity of this App Service. + type: string + type: + description: Specifies the identity type of the Function App. + Possible values are SystemAssigned (where Azure will generate + a Service Principal for you), UserAssigned where you can + specify the Service Principal IDs in the identity_ids field, + and SystemAssigned, UserAssigned which assigns both a system + managed identity as well as the specified user assigned + identities. + type: string + type: object + kind: + description: The Function App kind - such as functionapp,linux,container + type: string + location: + description: Specifies the supported Azure location where the + resource exists. Changing this forces a new resource to be created. + type: string + osType: + description: A string indicating the Operating System type for + this function app. The only possible value is linux. Changing + this forces a new resource to be created. + type: string + outboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12 + type: string + possibleOutboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which + are necessarily in use. Superset of outbound_ip_addresses. + type: string + resourceGroupName: + description: The name of the resource group in which to create + the Function App Slot. Changing this forces a new resource to + be created. + type: string + siteConfig: + description: A site_config object as defined below. + properties: + alwaysOn: + description: Should the Function App be loaded at all times? + Defaults to false. + type: boolean + appScaleLimit: + description: The number of workers this function app can scale + out to. Only applicable to apps on the Consumption and Premium + plan. + type: number + autoSwapSlotName: + description: The name of the slot to automatically swap to + during deployment + type: string + cors: + description: A cors block as defined below. + properties: + allowedOrigins: + description: A list of origins which should be able to + make cross-origin calls. * can be used to allow all + calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: Are credentials supported? + type: boolean + type: object + dotnetFrameworkVersion: + description: The version of the .NET framework's CLR used + in this function app. Possible values are v4.0 (including + .NET Core 2.1 and 3.1), v5.0 and v6.0. For more information + on which .NET Framework version to use based on the runtime + version you're targeting - please see this table. Defaults + to v4.0. + type: string + elasticInstanceMinimum: + description: The number of minimum instances for this function + app. Only applicable to apps on the Premium plan. + type: number + ftpsState: + description: 'State of FTP / FTPS service for this function + app. Possible values include: AllAllowed, FtpsOnly and Disabled.' + type: string + healthCheckPath: + description: Path which will be checked for this function + app health. + type: string + http2Enabled: + description: Specifies whether or not the HTTP2 protocol should + be enabled. Defaults to false. + type: boolean + ipRestriction: + description: A list of ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + type: object + type: array + javaVersion: + description: Java version hosted by the function app in Azure. + Possible values are 1.8, 11 & 17 (In-Preview). + type: string + linuxFxVersion: + description: Linux App Framework and version for the AppService, + e.g. DOCKER|(golang:latest). + type: string + minTlsVersion: + description: The minimum supported TLS version for the function + app. Possible values are 1.0, 1.1, and 1.2. Defaults to + 1.2 for new function apps. + type: string + preWarmedInstanceCount: + description: The number of pre-warmed instances for this function + app. Only affects apps on the Premium plan. + type: number + runtimeScaleMonitoringEnabled: + description: Should Runtime Scale Monitoring be enabled?. + Only applicable to apps on the Premium plan. Defaults to + false. + type: boolean + scmIpRestriction: + description: A list of scm_ip_restriction objects representing + IP restrictions as defined below. + items: + properties: + action: + description: Allow or Deny access for this IP range. + Defaults to Allow. + type: string + headers: + description: The headers block for this specific scm_ip_restriction + as defined below. + items: + properties: + xAzureFdid: + description: A list of allowed Azure FrontDoor + IDs in UUID notation with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + xFdHealthProbe: + description: A list to allow the Azure FrontDoor + health probe header. Only allowed value is "1". + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedFor: + description: A list of allowed 'X-Forwarded-For' + IPs in CIDR notation with a maximum of 8 + items: + type: string + type: array + x-kubernetes-list-type: set + xForwardedHost: + description: A list of allowed 'X-Forwarded-Host' + domains with a maximum of 8. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: array + ipAddress: + description: The IP Address used for this IP Restriction + in CIDR notation. + type: string + name: + description: The name for this IP Restriction. + type: string + priority: + description: The priority for this IP Restriction. Restrictions + are enforced in priority order. By default, priority + is set to 65000 if not specified. + type: number + serviceTag: + description: The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: The Virtual Network Subnet ID used for + this IP Restriction. + type: string + type: object + type: array + scmType: + description: 'The type of Source Control used by this function + App. Valid values include: BitBucketGit, BitBucketHg, CodePlexGit, + CodePlexHg, Dropbox, ExternalGit, ExternalHg, GitHub, LocalGit, + None (default), OneDrive, Tfs, VSO, and VSTSRM.' + type: string + scmUseMainIpRestriction: + description: IP security restrictions for scm to use main. + Defaults to false. + type: boolean + use32BitWorkerProcess: + description: Should the Function App run in 32 bit mode, rather + than 64 bit mode? Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: Is the Function App enabled? Defaults to true. + type: boolean + websocketsEnabled: + description: Should WebSockets be enabled? + type: boolean + type: object + siteCredential: + description: A site_credential block as defined below, which contains + the site-level credentials used to publish to this Function + App Slot. + items: + properties: + password: + description: The password associated with the username, + which can be used to publish to this App Service. + type: string + username: + description: The username which can be used to publish to + this App Service + type: string + type: object + type: array + storageAccountName: + description: The backend storage account name which will be used + by the Function App (such as the dashboard, logs). Changing + this forces a new resource to be created. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + version: + description: The runtime version associated with the Function + App. Defaults to ~1. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_linuxfunctionapps.yaml b/package/crds/web.azure.upbound.io_linuxfunctionapps.yaml index 7eb3dc641..1981004cd 100644 --- a/package/crds/web.azure.upbound.io_linuxfunctionapps.yaml +++ b/package/crds/web.azure.upbound.io_linuxfunctionapps.yaml @@ -5282,3 +5282,5114 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinuxFunctionApp is the Schema for the LinuxFunctionApps API. + Manages a Linux Function App. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinuxFunctionAppSpec defines the desired state of LinuxFunctionApp + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSecretRef: + description: |- + The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSecretRef: + description: |- + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + storageAccountUrlSecretRef: + description: |- + The SAS URL to the container. + The SAS URL to the container. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - storageAccountUrlSecretRef + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + type: boolean + clientCertificateEnabled: + description: |- + Should the function app use Client Certificates. + Should the function app use Client Certificates + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + valueSecretRef: + description: |- + The connection string value. + The connection string value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + contentShareForceDisabled: + description: |- + Should the settings for linking the Function App to storage be suppressed. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + enabled: + description: |- + Is the Function App enabled? Defaults to true. + Is the Linux Function App enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App. Defaults to ~4. + The runtime version associated with the Function App. + type: string + httpsOnly: + description: |- + Can the Function App only be accessed via HTTPS? Defaults to false. + Can the Function App only be accessed via HTTPS? + type: boolean + identity: + description: A identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Function App. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Function App. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + location: + description: The Azure Region where the Linux Function App should + exist. Changing this forces a new Linux Function App to be created. + type: string + name: + description: |- + The name which should be used for this Linux Function App. Changing this forces a new Linux Function App to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule and Host ID Collisions + Specifies the name of the Function App. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Linux Function + App should exist. Changing this forces a new Linux Function + App to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + servicePlanId: + description: |- + The ID of the App Service Plan within which to create this Function App. + The ID of the App Service Plan within which to create this Function App + type: string + servicePlanIdRef: + description: Reference to a ServicePlan in web to populate servicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicePlanIdSelector: + description: Selector for a ServicePlan in web to populate servicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: |- + If this Linux Web App is Always On enabled. Defaults to false. + If this Linux Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Linux Function App. + The URL of the API definition that describes this Linux Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Linux Function App. + The ID of the API Management API for this Linux Function App. + type: string + appCommandLine: + description: |- + The App command line to launch. + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: An app_service_logs block as defined above. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationInsightsConnectionStringSecretRef: + description: |- + The Connection String for linking the Linux Function App to Application Insights. + The Connection String for linking the Linux Function App to Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + applicationInsightsKeySecretRef: + description: |- + The Instrumentation Key for connecting the Linux Function App to Application Insights. + The Instrumentation Key for connecting the Linux Function App to Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + applicationStack: + description: An application_stack block as defined above. + properties: + docker: + description: |- + One or more docker blocks as defined below. + A docker block + items: + properties: + imageName: + description: |- + The name of the Docker image to use. + The name of the Docker image to use. + type: string + imageTag: + description: |- + The image tag of the image to use. + The image tag of the image to use. + type: string + registryPasswordSecretRef: + description: |- + The password for the account to use to connect to the registry. + The password for the account to use to connect to the registry. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + registryUrl: + description: |- + The URL of the docker registry. + The URL of the docker registry. + type: string + registryUsernameSecretRef: + description: |- + The username to use for connections to the registry. + The username to use for connections to the registry. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + dotnetVersion: + description: |- + The version of .NET to use. Possible values include 3.1, 6.0, 7.0 and 8.0. + The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + type: string + javaVersion: + description: |- + The Version of Java to use. Supported versions include 8, 11 & 17. + The version of Java to use. Possible values are `8`, `11`, and `17` + type: string + nodeVersion: + description: |- + The version of Node to run. Possible values include 12, 14, 16 and 18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to run. Possible values are 7, and 7.2. + The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + type: string + pythonVersion: + description: |- + The version of Python to run. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + type: string + useCustomRuntime: + description: Should the Linux Function App use a custom + runtime? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + containerRegistryManagedIdentityClientId: + description: |- + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: |- + Should connections for Azure Container Registry use Managed Identity. + Should connections for Azure Container Registry use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Linux Web App. + Specifies a list of Default Documents for the Linux Web App. + items: + type: string + type: array + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022“ + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmUseMainIpRestriction: + description: |- + Should the Linux Function App ip_restriction configuration be used for the SCM also. + Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Linux Web App use a 32-bit worker process. Defaults to false. + Should the Linux Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Linux Function App. + The number of Workers for this Linux Function App. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Linux Function + App will not swap between Slots when a swap operation is + triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Linux + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accessKeySecretRef: + description: The Access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob. + type: string + required: + - accessKeySecretRef + type: object + type: array + storageAccountAccessKeySecretRef: + description: |- + The access key which will be used to access the backend storage account for the Function App. Conflicts with storage_uses_managed_identity. + The access key which will be used to access the storage account for the Function App. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App. + The backend storage account name which will be used by this Function App. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + Should the Function App use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Function App. + The local path and filename of the Zip packaged application to deploy to this Linux Function App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + type: boolean + clientCertificateEnabled: + description: |- + Should the function app use Client Certificates. + Should the function app use Client Certificates + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + contentShareForceDisabled: + description: |- + Should the settings for linking the Function App to storage be suppressed. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + enabled: + description: |- + Is the Function App enabled? Defaults to true. + Is the Linux Function App enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App. Defaults to ~4. + The runtime version associated with the Function App. + type: string + httpsOnly: + description: |- + Can the Function App only be accessed via HTTPS? Defaults to false. + Can the Function App only be accessed via HTTPS? + type: boolean + identity: + description: A identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Function App. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Function App. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + location: + description: The Azure Region where the Linux Function App should + exist. Changing this forces a new Linux Function App to be created. + type: string + name: + description: |- + The name which should be used for this Linux Function App. Changing this forces a new Linux Function App to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule and Host ID Collisions + Specifies the name of the Function App. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Linux Function + App should exist. Changing this forces a new Linux Function + App to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + servicePlanId: + description: |- + The ID of the App Service Plan within which to create this Function App. + The ID of the App Service Plan within which to create this Function App + type: string + servicePlanIdRef: + description: Reference to a ServicePlan in web to populate servicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicePlanIdSelector: + description: Selector for a ServicePlan in web to populate servicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: |- + If this Linux Web App is Always On enabled. Defaults to false. + If this Linux Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Linux Function App. + The URL of the API definition that describes this Linux Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Linux Function App. + The ID of the API Management API for this Linux Function App. + type: string + appCommandLine: + description: |- + The App command line to launch. + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: An app_service_logs block as defined above. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationStack: + description: An application_stack block as defined above. + properties: + docker: + description: |- + One or more docker blocks as defined below. + A docker block + items: + properties: + imageName: + description: |- + The name of the Docker image to use. + The name of the Docker image to use. + type: string + imageTag: + description: |- + The image tag of the image to use. + The image tag of the image to use. + type: string + registryUrl: + description: |- + The URL of the docker registry. + The URL of the docker registry. + type: string + type: object + type: array + dotnetVersion: + description: |- + The version of .NET to use. Possible values include 3.1, 6.0, 7.0 and 8.0. + The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + type: string + javaVersion: + description: |- + The Version of Java to use. Supported versions include 8, 11 & 17. + The version of Java to use. Possible values are `8`, `11`, and `17` + type: string + nodeVersion: + description: |- + The version of Node to run. Possible values include 12, 14, 16 and 18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to run. Possible values are 7, and 7.2. + The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + type: string + pythonVersion: + description: |- + The version of Python to run. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + type: string + useCustomRuntime: + description: Should the Linux Function App use a custom + runtime? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + containerRegistryManagedIdentityClientId: + description: |- + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: |- + Should connections for Azure Container Registry use Managed Identity. + Should connections for Azure Container Registry use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Linux Web App. + Specifies a list of Default Documents for the Linux Web App. + items: + type: string + type: array + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022“ + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmUseMainIpRestriction: + description: |- + Should the Linux Function App ip_restriction configuration be used for the SCM also. + Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Linux Web App use a 32-bit worker process. Defaults to false. + Should the Linux Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Linux Function App. + The number of Workers for this Linux Function App. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Linux Function + App will not swap between Slots when a swap operation is + triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Linux + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob. + type: string + type: object + type: array + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App. + The backend storage account name which will be used by this Function App. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + Should the Function App use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Function App. + The local path and filename of the Zip packaged application to deploy to this Linux Function App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.siteConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.siteConfig) + || (has(self.initProvider) && has(self.initProvider.siteConfig))' + status: + description: LinuxFunctionAppStatus defines the observed state of LinuxFunctionApp. + properties: + atProvider: + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + loginScopes: + description: The list of Login scopes that should be requested + as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + authorisationEndpoint: + description: |- + The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + The endpoint to make the Authorisation Request. + type: string + certificationUri: + description: |- + The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + The endpoint that provides the keys necessary to validate the token. + type: string + clientCredentialMethod: + description: |- + The Client Credential Method used. + The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + type: string + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the secret for this Custom OIDC Client. + type: string + issuerEndpoint: + description: |- + The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + The endpoint that issued the Token. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + tokenEndpoint: + description: |- + The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + The endpoint used to request a Token. + type: string + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + lastExecutionTime: + description: The time the backup was last attempted. + type: string + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + type: boolean + clientCertificateEnabled: + description: |- + Should the function app use Client Certificates. + Should the function app use Client Certificates + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + contentShareForceDisabled: + description: |- + Should the settings for linking the Function App to storage be suppressed. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + defaultHostname: + description: The default hostname of the Linux Function App. + type: string + enabled: + description: |- + Is the Function App enabled? Defaults to true. + Is the Linux Function App enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App. Defaults to ~4. + The runtime version associated with the Function App. + type: string + hostingEnvironmentId: + description: The ID of the App Service Environment used by Function + App. + type: string + httpsOnly: + description: |- + Can the Function App only be accessed via HTTPS? Defaults to false. + Can the Function App only be accessed via HTTPS? + type: boolean + id: + description: The ID of the Linux Function App. + type: string + identity: + description: A identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Function App. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Function App. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + kind: + description: The Kind value for this Linux Function App. + type: string + location: + description: The Azure Region where the Linux Function App should + exist. Changing this forces a new Linux Function App to be created. + type: string + name: + description: |- + The name which should be used for this Linux Function App. Changing this forces a new Linux Function App to be created. Limit the function name to 32 characters to avoid naming collisions. For more information about Function App naming rule and Host ID Collisions + Specifies the name of the Function App. + type: string + outboundIpAddressList: + description: A list of outbound IP addresses. For example ["52.23.25.3", + "52.143.43.12"] + items: + type: string + type: array + outboundIpAddresses: + description: A comma separated list of outbound IP addresses as + a string. For example 52.23.25.3,52.143.43.12. + type: string + possibleOutboundIpAddressList: + description: A list of possible outbound IP addresses, not all + of which are necessarily in use. This is a superset of outbound_ip_address_list. + For example ["52.23.25.3", "52.143.43.12"]. + items: + type: string + type: array + possibleOutboundIpAddresses: + description: A comma separated list of possible outbound IP addresses + as a string. For example 52.23.25.3,52.143.43.12,52.143.43.17. + This is a superset of outbound_ip_addresses. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Linux Function + App should exist. Changing this forces a new Linux Function + App to be created. + type: string + servicePlanId: + description: |- + The ID of the App Service Plan within which to create this Function App. + The ID of the App Service Plan within which to create this Function App + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: |- + If this Linux Web App is Always On enabled. Defaults to false. + If this Linux Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Linux Function App. + The URL of the API definition that describes this Linux Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Linux Function App. + The ID of the API Management API for this Linux Function App. + type: string + appCommandLine: + description: |- + The App command line to launch. + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: An app_service_logs block as defined above. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationStack: + description: An application_stack block as defined above. + properties: + docker: + description: |- + One or more docker blocks as defined below. + A docker block + items: + properties: + imageName: + description: |- + The name of the Docker image to use. + The name of the Docker image to use. + type: string + imageTag: + description: |- + The image tag of the image to use. + The image tag of the image to use. + type: string + registryUrl: + description: |- + The URL of the docker registry. + The URL of the docker registry. + type: string + type: object + type: array + dotnetVersion: + description: |- + The version of .NET to use. Possible values include 3.1, 6.0, 7.0 and 8.0. + The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + type: string + javaVersion: + description: |- + The Version of Java to use. Supported versions include 8, 11 & 17. + The version of Java to use. Possible values are `8`, `11`, and `17` + type: string + nodeVersion: + description: |- + The version of Node to run. Possible values include 12, 14, 16 and 18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to run. Possible values are 7, and 7.2. + The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + type: string + pythonVersion: + description: |- + The version of Python to run. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + type: string + useCustomRuntime: + description: Should the Linux Function App use a custom + runtime? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + containerRegistryManagedIdentityClientId: + description: |- + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: |- + Should connections for Azure Container Registry use Managed Identity. + Should connections for Azure Container Registry use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Linux Web App. + Specifies a list of Default Documents for the Linux Web App. + items: + type: string + type: array + detailedErrorLoggingEnabled: + description: |- + Is the Function App enabled? Defaults to true. + Is detailed error logging enabled + type: boolean + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + linuxFxVersion: + description: The Linux FX Version + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022“ + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmType: + description: The SCM Type in use by the Linux Function App. + type: string + scmUseMainIpRestriction: + description: |- + Should the Linux Function App ip_restriction configuration be used for the SCM also. + Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Linux Web App use a 32-bit worker process. Defaults to false. + Should the Linux Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Linux Function App. + The number of Workers for this Linux Function App. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Linux Function + App will not swap between Slots when a swap operation is + triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Linux + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob. + type: string + type: object + type: array + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App. + The backend storage account name which will be used by this Function App. + type: string + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + Should the Function App use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App for regional virtual network integration. + type: string + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Function App. + The local path and filename of the Zip packaged application to deploy to this Linux Function App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_linuxfunctionappslots.yaml b/package/crds/web.azure.upbound.io_linuxfunctionappslots.yaml index 5674ce2ae..4da205322 100644 --- a/package/crds/web.azure.upbound.io_linuxfunctionappslots.yaml +++ b/package/crds/web.azure.upbound.io_linuxfunctionappslots.yaml @@ -4957,3 +4957,4795 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinuxFunctionAppSlot is the Schema for the LinuxFunctionAppSlots + API. Manages a Linux Function App Slot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinuxFunctionAppSlotSpec defines the desired state of LinuxFunctionAppSlot + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: an auth_settings block as detailed below. + properties: + activeDirectory: + description: an active_directory block as detailed below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: a facebook block as detailed below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSecretRef: + description: |- + The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: a github block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: a google block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: a microsoft block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: a twitter block as detailed below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSecretRef: + description: |- + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: an auth_settings_v2 block as detailed below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: a backup block as detailed below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: a schedule block as detailed below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + storageAccountUrlSecretRef: + description: |- + The SAS URL to the container. + The SAS URL to the container. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - storageAccountUrlSecretRef + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + type: boolean + clientCertificateEnabled: + description: |- + Should the Function App Slot use Client Certificates. + Should the Function App Slot use Client Certificates. + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + type: string + connectionString: + description: a connection_string block as detailed below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + valueSecretRef: + description: |- + The connection string value. + The connection string value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + contentShareForceDisabled: + description: |- + Force disable the content share settings. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + enabled: + description: |- + Is the Linux Function App Slot enabled. Defaults to true. + Is the Linux Function App Slot enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Are the default FTP Basic Authentication publishing + credentials enabled. Defaults to true. + type: boolean + functionAppId: + description: |- + The ID of the Linux Function App this Slot is a member of. Changing this forces a new resource to be created. + The ID of the Linux Function App this Slot is a member of. + type: string + functionAppIdRef: + description: Reference to a LinuxFunctionApp in web to populate + functionAppId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionAppIdSelector: + description: Selector for a LinuxFunctionApp in web to populate + functionAppId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App Slot. Defaults to ~4. + The runtime version associated with the Function App Slot. + type: string + httpsOnly: + description: |- + Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + Can the Function App Slot only be accessed via HTTPS? + type: boolean + identity: + description: An identity block as detailed below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Function App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Function App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Linux Function + App will be used. + type: string + siteConfig: + description: a site_config block as detailed below. + properties: + alwaysOn: + description: |- + If this Linux Web App is Always On enabled. Defaults to false. + If this Linux Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Linux Function App. + The URL of the API definition that describes this Linux Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Linux Function App. + The ID of the API Management API for this Linux Function App. + type: string + appCommandLine: + description: |- + The program and any arguments used to launch this app via the command line. (Example node myapp.js). + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: an app_service_logs block as detailed below. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationInsightsConnectionStringSecretRef: + description: |- + The Connection String for linking the Linux Function App to Application Insights. + The Connection String for linking the Linux Function App to Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + applicationInsightsKeySecretRef: + description: |- + The Instrumentation Key for connecting the Linux Function App to Application Insights. + The Instrumentation Key for connecting the Linux Function App to Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + applicationStack: + description: an application_stack block as detailed below. + properties: + docker: + description: |- + a docker block as detailed below. + A docker block + items: + properties: + imageName: + description: |- + The name of the Docker image to use. + The name of the Docker image to use. + type: string + imageTag: + description: |- + The image tag of the image to use. + The image tag of the image to use. + type: string + registryPasswordSecretRef: + description: |- + The password for the account to use to connect to the registry. + The password for the account to use to connect to the registry. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + registryUrl: + description: |- + The URL of the docker registry. + The URL of the docker registry. + type: string + registryUsernameSecretRef: + description: |- + The username to use for connections to the registry. + The username to use for connections to the registry. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + type: object + type: array + dotnetVersion: + description: |- + The version of .Net. Possible values are 3.1, 6.0, 7.0 and 8.0. + The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + type: string + javaVersion: + description: |- + The version of Java to use. Possible values are 8, 11 & 17 (In-Preview). + The version of Java to use. Possible values are `8`, `11`, and `17` + type: string + nodeVersion: + description: |- + The version of Node to use. Possible values include 12, 14, 16 and 18 + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to use. Possibles values are 7 , and 7.2. + The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + type: string + pythonVersion: + description: |- + The version of Python to use. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + type: string + useCustomRuntime: + description: Should the Linux Function App use a custom + runtime? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + autoSwapSlotName: + description: The name of the slot to automatically swap with + when this slot is successfully deployed. + type: string + containerRegistryManagedIdentityClientId: + description: |- + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: |- + Should connections for Azure Container Registry use Managed Identity. + Should connections for Azure Container Registry use Managed Identity. + type: boolean + cors: + description: a cors block as detailed below. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Linux Web App. + Specifies a list of Default Documents for the Linux Web App. + items: + type: string + type: array + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: an ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Functions Runtime Scale Monitoring be enabled. + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: a scm_ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmUseMainIpRestriction: + description: |- + Should the Linux Function App ip_restriction configuration be used for the SCM also. + Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Linux Web App use a 32-bit worker. + Should the Linux Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Linux Function App. + The number of Workers for this Linux Function App. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accessKeySecretRef: + description: The Access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob. + type: string + required: + - accessKeySecretRef + type: object + type: array + storageAccountAccessKeySecretRef: + description: |- + The access key which will be used to access the storage account for the Function App Slot. + The access key which will be used to access the storage account for the Function App Slot. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App Slot. + The backend storage account name which will be used by this Function App Slot. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App Slot use its Managed Identity to access storage. + Should the Function App Slot use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App Slot for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: an auth_settings block as detailed below. + properties: + activeDirectory: + description: an active_directory block as detailed below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: a facebook block as detailed below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: a github block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: a google block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: a microsoft block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: a twitter block as detailed below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: an auth_settings_v2 block as detailed below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: a backup block as detailed below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: a schedule block as detailed below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + type: boolean + clientCertificateEnabled: + description: |- + Should the Function App Slot use Client Certificates. + Should the Function App Slot use Client Certificates. + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + type: string + connectionString: + description: a connection_string block as detailed below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + contentShareForceDisabled: + description: |- + Force disable the content share settings. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + enabled: + description: |- + Is the Linux Function App Slot enabled. Defaults to true. + Is the Linux Function App Slot enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Are the default FTP Basic Authentication publishing + credentials enabled. Defaults to true. + type: boolean + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App Slot. Defaults to ~4. + The runtime version associated with the Function App Slot. + type: string + httpsOnly: + description: |- + Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + Can the Function App Slot only be accessed via HTTPS? + type: boolean + identity: + description: An identity block as detailed below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Function App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Function App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Linux Function + App will be used. + type: string + siteConfig: + description: a site_config block as detailed below. + properties: + alwaysOn: + description: |- + If this Linux Web App is Always On enabled. Defaults to false. + If this Linux Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Linux Function App. + The URL of the API definition that describes this Linux Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Linux Function App. + The ID of the API Management API for this Linux Function App. + type: string + appCommandLine: + description: |- + The program and any arguments used to launch this app via the command line. (Example node myapp.js). + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: an app_service_logs block as detailed below. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationStack: + description: an application_stack block as detailed below. + properties: + docker: + description: |- + a docker block as detailed below. + A docker block + items: + properties: + imageName: + description: |- + The name of the Docker image to use. + The name of the Docker image to use. + type: string + imageTag: + description: |- + The image tag of the image to use. + The image tag of the image to use. + type: string + registryUrl: + description: |- + The URL of the docker registry. + The URL of the docker registry. + type: string + type: object + type: array + dotnetVersion: + description: |- + The version of .Net. Possible values are 3.1, 6.0, 7.0 and 8.0. + The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + type: string + javaVersion: + description: |- + The version of Java to use. Possible values are 8, 11 & 17 (In-Preview). + The version of Java to use. Possible values are `8`, `11`, and `17` + type: string + nodeVersion: + description: |- + The version of Node to use. Possible values include 12, 14, 16 and 18 + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to use. Possibles values are 7 , and 7.2. + The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + type: string + pythonVersion: + description: |- + The version of Python to use. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + type: string + useCustomRuntime: + description: Should the Linux Function App use a custom + runtime? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + autoSwapSlotName: + description: The name of the slot to automatically swap with + when this slot is successfully deployed. + type: string + containerRegistryManagedIdentityClientId: + description: |- + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: |- + Should connections for Azure Container Registry use Managed Identity. + Should connections for Azure Container Registry use Managed Identity. + type: boolean + cors: + description: a cors block as detailed below. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Linux Web App. + Specifies a list of Default Documents for the Linux Web App. + items: + type: string + type: array + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: an ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Functions Runtime Scale Monitoring be enabled. + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: a scm_ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmUseMainIpRestriction: + description: |- + Should the Linux Function App ip_restriction configuration be used for the SCM also. + Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Linux Web App use a 32-bit worker. + Should the Linux Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Linux Function App. + The number of Workers for this Linux Function App. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob. + type: string + type: object + type: array + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App Slot. + The backend storage account name which will be used by this Function App Slot. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App Slot use its Managed Identity to access storage. + Should the Function App Slot use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App Slot for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.siteConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.siteConfig) + || (has(self.initProvider) && has(self.initProvider.siteConfig))' + status: + description: LinuxFunctionAppSlotStatus defines the observed state of + LinuxFunctionAppSlot. + properties: + atProvider: + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: an auth_settings block as detailed below. + properties: + activeDirectory: + description: an active_directory block as detailed below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: a facebook block as detailed below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: a github block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: a google block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: a microsoft block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: a twitter block as detailed below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: an auth_settings_v2 block as detailed below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + loginScopes: + description: The list of Login scopes that should be requested + as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + authorisationEndpoint: + description: |- + The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + The endpoint to make the Authorisation Request. + type: string + certificationUri: + description: |- + The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + The endpoint that provides the keys necessary to validate the token. + type: string + clientCredentialMethod: + description: |- + The Client Credential Method used. + The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + type: string + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the secret for this Custom OIDC Client. + type: string + issuerEndpoint: + description: |- + The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + The endpoint that issued the Token. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + tokenEndpoint: + description: |- + The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + The endpoint used to request a Token. + type: string + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: a backup block as detailed below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: a schedule block as detailed below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + lastExecutionTime: + description: |- + The time the backup was last attempted. + The time the backup was last attempted. + type: string + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + type: boolean + clientCertificateEnabled: + description: |- + Should the Function App Slot use Client Certificates. + Should the Function App Slot use Client Certificates. + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + type: string + connectionString: + description: a connection_string block as detailed below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + contentShareForceDisabled: + description: |- + Force disable the content share settings. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + defaultHostname: + description: The default hostname of the Linux Function App Slot. + type: string + enabled: + description: |- + Is the Linux Function App Slot enabled. Defaults to true. + Is the Linux Function App Slot enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Are the default FTP Basic Authentication publishing + credentials enabled. Defaults to true. + type: boolean + functionAppId: + description: |- + The ID of the Linux Function App this Slot is a member of. Changing this forces a new resource to be created. + The ID of the Linux Function App this Slot is a member of. + type: string + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App Slot. Defaults to ~4. + The runtime version associated with the Function App Slot. + type: string + hostingEnvironmentId: + description: The ID of the App Service Environment used by Function + App Slot. + type: string + httpsOnly: + description: |- + Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + Can the Function App Slot only be accessed via HTTPS? + type: boolean + id: + description: The ID of the Linux Function App Slot + type: string + identity: + description: An identity block as detailed below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Function App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Function App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + kind: + description: The Kind value for this Linux Function App Slot. + type: string + outboundIpAddressList: + description: A list of outbound IP addresses. For example ["52.23.25.3", + "52.143.43.12"] + items: + type: string + type: array + outboundIpAddresses: + description: A comma separated list of outbound IP addresses as + a string. For example 52.23.25.3,52.143.43.12. + type: string + possibleOutboundIpAddressList: + description: A list of possible outbound IP addresses, not all + of which are necessarily in use. This is a superset of outbound_ip_address_list. + For example ["52.23.25.3", "52.143.43.12"]. + items: + type: string + type: array + possibleOutboundIpAddresses: + description: A comma separated list of possible outbound IP addresses + as a string. For example 52.23.25.3,52.143.43.12,52.143.43.17. + This is a superset of outbound_ip_addresses. For example ["52.23.25.3", + "52.143.43.12","52.143.43.17"]. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Linux Function + App will be used. + type: string + siteConfig: + description: a site_config block as detailed below. + properties: + alwaysOn: + description: |- + If this Linux Web App is Always On enabled. Defaults to false. + If this Linux Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Linux Function App. + The URL of the API definition that describes this Linux Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Linux Function App. + The ID of the API Management API for this Linux Function App. + type: string + appCommandLine: + description: |- + The program and any arguments used to launch this app via the command line. (Example node myapp.js). + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: an app_service_logs block as detailed below. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationStack: + description: an application_stack block as detailed below. + properties: + docker: + description: |- + a docker block as detailed below. + A docker block + items: + properties: + imageName: + description: |- + The name of the Docker image to use. + The name of the Docker image to use. + type: string + imageTag: + description: |- + The image tag of the image to use. + The image tag of the image to use. + type: string + registryUrl: + description: |- + The URL of the docker registry. + The URL of the docker registry. + type: string + type: object + type: array + dotnetVersion: + description: |- + The version of .Net. Possible values are 3.1, 6.0, 7.0 and 8.0. + The version of .Net. Possible values are `3.1`, `6.0` and `7.0` + type: string + javaVersion: + description: |- + The version of Java to use. Possible values are 8, 11 & 17 (In-Preview). + The version of Java to use. Possible values are `8`, `11`, and `17` + type: string + nodeVersion: + description: |- + The version of Node to use. Possible values include 12, 14, 16 and 18 + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to use. Possibles values are 7 , and 7.2. + The version of PowerShell Core to use. Possibles values are `7`, and `7.2` + type: string + pythonVersion: + description: |- + The version of Python to use. Possible values are 3.12, 3.11, 3.10, 3.9, 3.8 and 3.7. + The version of Python to use. Possible values include `3.12`, `3.11`, `3.10`, `3.9`, `3.8`, and `3.7`. + type: string + useCustomRuntime: + description: Should the Linux Function App use a custom + runtime? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + autoSwapSlotName: + description: The name of the slot to automatically swap with + when this slot is successfully deployed. + type: string + containerRegistryManagedIdentityClientId: + description: |- + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + The Client ID of the Managed Service Identity to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: |- + Should connections for Azure Container Registry use Managed Identity. + Should connections for Azure Container Registry use Managed Identity. + type: boolean + cors: + description: a cors block as detailed below. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Linux Web App. + Specifies a list of Default Documents for the Linux Web App. + items: + type: string + type: array + detailedErrorLoggingEnabled: + description: |- + Is detailed error logging enabled + Is detailed error logging enabled + type: boolean + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Linux Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: an ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + linuxFxVersion: + description: |- + The Linux FX Version + The Linux FX Version + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Functions Runtime Scale Monitoring be enabled. + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: a scm_ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmType: + description: |- + The SCM Type in use by the Linux Function App. + The SCM Type in use by the Linux Function App. + type: string + scmUseMainIpRestriction: + description: |- + Should the Linux Function App ip_restriction configuration be used for the SCM also. + Should the Linux Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Linux Web App use a 32-bit worker. + Should the Linux Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Linux Function App. + The number of Workers for this Linux Function App. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob. + type: string + type: object + type: array + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App Slot. + The backend storage account name which will be used by this Function App Slot. + type: string + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App Slot use its Managed Identity to access storage. + Should the Function App Slot use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App Slot for regional virtual network integration. + type: string + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_linuxwebapps.yaml b/package/crds/web.azure.upbound.io_linuxwebapps.yaml index 8a28d0168..25c88176e 100644 --- a/package/crds/web.azure.upbound.io_linuxwebapps.yaml +++ b/package/crds/web.azure.upbound.io_linuxwebapps.yaml @@ -5245,3 +5245,4993 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinuxWebApp is the Schema for the LinuxWebApps API. Manages a + Linux Web App. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinuxWebAppSpec defines the desired state of LinuxWebApp + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSecretRef: + description: |- + The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSecretRef: + description: |- + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + Specifies the endpoint used for OpenID Connect Discovery. For example https://example.com/.well-known/openid-configuration. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + storageAccountUrlSecretRef: + description: |- + The SAS URL to the container. + The SAS URL to the container. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - storageAccountUrlSecretRef + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_certificate_enabled is false. Defaults + to Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + valueSecretRef: + description: |- + The connection string value. + The connection string value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + enabled: + description: Should the Linux Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + httpsOnly: + description: Should the Linux Web App require HTTPS connections. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Web App. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Web App. Possible + values are SystemAssigned, UserAssigned, and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity. + type: string + location: + description: The Azure Region where the Linux Web App should exist. + Changing this forces a new Linux Web App to be created. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + below. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled? + type: boolean + failedRequestTracing: + description: Should the failed request tracing be enabled? + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + below. + properties: + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + sasurlSecretRef: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - sasurlSecretRef + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Linux Web + App should exist. Changing this forces a new Linux Web App to + be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + servicePlanId: + description: The ID of the Service Plan that this Linux App Service + will be created in. + type: string + servicePlanIdRef: + description: Reference to a ServicePlan in web to populate servicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicePlanIdSelector: + description: Selector for a ServicePlan in web to populate servicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Linux Web App is Always On enabled. Defaults + to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Linux + Web App. + type: string + apiManagementApiId: + description: The API Management API ID this Linux Web App + is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + dockerImage: + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. appsvc/staticsite:latest. + type: string + dockerImageTag: + type: string + dockerRegistryPasswordSecretRef: + description: The User Name to use for authentication against + the registry to pull the image. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetVersion: + description: The version of .NET to use. Possible values + include 3.1, 5.0, 6.0, 7.0 and 8.0. + type: string + goVersion: + description: The version of Go to use. Possible values + include 1.18, and 1.19. + type: string + javaServer: + description: The Java server type. Possible values include + JAVA, TOMCAT, and JBOSSEAP. + type: string + javaServerVersion: + description: The Version of the java_server to use. + type: string + javaVersion: + description: The Version of Java to use. Possible values + include 8, 11, and 17. + type: string + nodeVersion: + description: The version of Node to run. Possible values + include 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This + property conflicts with java_version. + type: string + phpVersion: + description: The version of PHP to run. Possible values + are 7.4, 8.0, 8.1 and 8.2. + type: string + pythonVersion: + description: The version of Python to run. Possible values + include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + type: string + rubyVersion: + description: The version of Ruby to run. Possible values + include 2.6 and 2.7. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled? Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle.' + type: string + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Linux Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Linux Web App. + items: + type: string + type: array + ftpsState: + description: The State of FTP / FTPS service. Possible values + include AllAllowed, FtpsOnly, and Disabled. Defaults to + Disabled. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: Managed pipeline mode. Possible values include + Integrated, and Classic. Defaults to Integrated. + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled? Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017, VS2019 and VS2022. + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmUseMainIpRestriction: + description: Should the Linux Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Linux Web App use a 32-bit worker? + Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled? Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Linux App Service. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Linux Web + App will not swap between Slots when a swap operation is + triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Linux + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accessKeySecretRef: + description: The Access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + required: + - accessKeySecretRef + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Web App. + The local path and filename of the Zip packaged application to deploy to this Linux Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + Specifies the endpoint used for OpenID Connect Discovery. For example https://example.com/.well-known/openid-configuration. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_certificate_enabled is false. Defaults + to Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + enabled: + description: Should the Linux Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + httpsOnly: + description: Should the Linux Web App require HTTPS connections. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Web App. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Web App. Possible + values are SystemAssigned, UserAssigned, and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity. + type: string + location: + description: The Azure Region where the Linux Web App should exist. + Changing this forces a new Linux Web App to be created. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + below. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled? + type: boolean + failedRequestTracing: + description: Should the failed request tracing be enabled? + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + below. + properties: + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan that this Linux App Service + will be created in. + type: string + servicePlanIdRef: + description: Reference to a ServicePlan in web to populate servicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicePlanIdSelector: + description: Selector for a ServicePlan in web to populate servicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Linux Web App is Always On enabled. Defaults + to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Linux + Web App. + type: string + apiManagementApiId: + description: The API Management API ID this Linux Web App + is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + dockerImage: + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. appsvc/staticsite:latest. + type: string + dockerImageTag: + type: string + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetVersion: + description: The version of .NET to use. Possible values + include 3.1, 5.0, 6.0, 7.0 and 8.0. + type: string + goVersion: + description: The version of Go to use. Possible values + include 1.18, and 1.19. + type: string + javaServer: + description: The Java server type. Possible values include + JAVA, TOMCAT, and JBOSSEAP. + type: string + javaServerVersion: + description: The Version of the java_server to use. + type: string + javaVersion: + description: The Version of Java to use. Possible values + include 8, 11, and 17. + type: string + nodeVersion: + description: The version of Node to run. Possible values + include 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This + property conflicts with java_version. + type: string + phpVersion: + description: The version of PHP to run. Possible values + are 7.4, 8.0, 8.1 and 8.2. + type: string + pythonVersion: + description: The version of Python to run. Possible values + include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + type: string + rubyVersion: + description: The version of Ruby to run. Possible values + include 2.6 and 2.7. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled? Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle.' + type: string + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Linux Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Linux Web App. + items: + type: string + type: array + ftpsState: + description: The State of FTP / FTPS service. Possible values + include AllAllowed, FtpsOnly, and Disabled. Defaults to + Disabled. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: Managed pipeline mode. Possible values include + Integrated, and Classic. Defaults to Integrated. + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled? Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017, VS2019 and VS2022. + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmUseMainIpRestriction: + description: Should the Linux Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Linux Web App use a 32-bit worker? + Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled? Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Linux App Service. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Linux Web + App will not swap between Slots when a swap operation is + triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Linux + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Web App. + The local path and filename of the Zip packaged application to deploy to this Linux Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.siteConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.siteConfig) + || (has(self.initProvider) && has(self.initProvider.siteConfig))' + status: + description: LinuxWebAppStatus defines the observed state of LinuxWebApp. + properties: + atProvider: + properties: + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + loginScopes: + description: The list of Login scopes that should be requested + as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + authorisationEndpoint: + description: |- + The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + The endpoint to make the Authorisation Request. + type: string + certificationUri: + description: |- + The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + The endpoint that provides the keys necessary to validate the token. + type: string + clientCredentialMethod: + description: |- + The Client Credential Method used. + The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + type: string + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the secret for this Custom OIDC Client. + type: string + issuerEndpoint: + description: |- + The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + The endpoint that issued the Token. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + Specifies the endpoint used for OpenID Connect Discovery. For example https://example.com/.well-known/openid-configuration. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + tokenEndpoint: + description: |- + The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + The endpoint used to request a Token. + type: string + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + lastExecutionTime: + description: The time the backup was last attempted. + type: string + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_certificate_enabled is false. Defaults + to Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: MySQL, SQLServer, SQLAzure, Custom, NotificationHub, ServiceBus, EventHub, APIHub, DocDb, RedisCache, and PostgreSQL. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + defaultHostname: + description: The default hostname of the Linux Web App. + type: string + enabled: + description: Should the Linux Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + hostingEnvironmentId: + description: The ID of the App Service Environment used by App + Service. + type: string + httpsOnly: + description: Should the Linux Web App require HTTPS connections. + Defaults to false. + type: boolean + id: + description: The ID of the Linux Web App. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Web App. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Web App. Possible + values are SystemAssigned, UserAssigned, and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity. + type: string + kind: + description: The Kind value for this Linux Web App. + type: string + location: + description: The Azure Region where the Linux Web App should exist. + Changing this forces a new Linux Web App to be created. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + below. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled? + type: boolean + failedRequestTracing: + description: Should the failed request tracing be enabled? + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + below. + properties: + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A value + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + outboundIpAddressList: + description: A list of outbound IP addresses - such as ["52.23.25.3", + "52.143.43.12"] + items: + type: string + type: array + outboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12. + type: string + possibleOutboundIpAddressList: + description: A list of possible outbound ip address. + items: + type: string + type: array + possibleOutboundIpAddresses: + description: A comma-separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which + are necessarily in use. Superset of outbound_ip_addresses. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Linux Web + App should exist. Changing this forces a new Linux Web App to + be created. + type: string + servicePlanId: + description: The ID of the Service Plan that this Linux App Service + will be created in. + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Linux Web App is Always On enabled. Defaults + to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Linux + Web App. + type: string + apiManagementApiId: + description: The API Management API ID this Linux Web App + is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + dockerImage: + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. appsvc/staticsite:latest. + type: string + dockerImageTag: + type: string + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetVersion: + description: The version of .NET to use. Possible values + include 3.1, 5.0, 6.0, 7.0 and 8.0. + type: string + goVersion: + description: The version of Go to use. Possible values + include 1.18, and 1.19. + type: string + javaServer: + description: The Java server type. Possible values include + JAVA, TOMCAT, and JBOSSEAP. + type: string + javaServerVersion: + description: The Version of the java_server to use. + type: string + javaVersion: + description: The Version of Java to use. Possible values + include 8, 11, and 17. + type: string + nodeVersion: + description: The version of Node to run. Possible values + include 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This + property conflicts with java_version. + type: string + phpVersion: + description: The version of PHP to run. Possible values + are 7.4, 8.0, 8.1 and 8.2. + type: string + pythonVersion: + description: The version of Python to run. Possible values + include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + type: string + rubyVersion: + description: The version of Ruby to run. Possible values + include 2.6 and 2.7. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled? Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle.' + type: string + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Linux Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Linux Web App. + items: + type: string + type: array + detailedErrorLoggingEnabled: + description: Should the Linux Web App be enabled? Defaults + to true. + type: boolean + ftpsState: + description: The State of FTP / FTPS service. Possible values + include AllAllowed, FtpsOnly, and Disabled. Defaults to + Disabled. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + linuxFxVersion: + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: Managed pipeline mode. Possible values include + Integrated, and Classic. Defaults to Integrated. + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled? Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017, VS2019 and VS2022. + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmType: + type: string + scmUseMainIpRestriction: + description: Should the Linux Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Linux Web App use a 32-bit worker? + Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled? Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Linux App Service. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Linux Web + App will not swap between Slots when a swap operation is + triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Linux + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Linux Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + for regional virtual network integration. + type: string + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Web App. + The local path and filename of the Zip packaged application to deploy to this Linux Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_linuxwebappslots.yaml b/package/crds/web.azure.upbound.io_linuxwebappslots.yaml index cdde0eee0..a6cf7cc8f 100644 --- a/package/crds/web.azure.upbound.io_linuxwebappslots.yaml +++ b/package/crds/web.azure.upbound.io_linuxwebappslots.yaml @@ -5138,3 +5138,4892 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: LinuxWebAppSlot is the Schema for the LinuxWebAppSlots API. Manages + a Linux Web App Slot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LinuxWebAppSlotSpec defines the desired state of LinuxWebAppSlot + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appServiceId: + description: The ID of the Linux Web App this Deployment Slot + will be part of. + type: string + appServiceIdRef: + description: Reference to a LinuxWebApp in web to populate appServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appServiceIdSelector: + description: Selector for a LinuxWebApp in web to populate appServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSecretRef: + description: |- + The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSecretRef: + description: |- + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: An schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + storageAccountUrlSecretRef: + description: |- + The SAS URL to the container. + The SAS URL to the container. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - storageAccountUrlSecretRef + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + valueSecretRef: + description: |- + The connection string value. + The connection string value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + enabled: + description: Should the Linux Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + httpsOnly: + description: Should the Linux Web App require HTTPS connections. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Web App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Web App Slot. Possible + values are SystemAssigned, UserAssigned and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: Log level. Possible values include Off, Verbose, + Information, Warning, and Error. + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled? + type: boolean + failedRequestTracing: + description: Should the failed request tracing be enabled? + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasurlSecretRef: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - sasurlSecretRef + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + name: + description: The name which should be used for this Linux Web + App Slot. Changing this forces a new Linux Web App Slot to be + created. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Linux Web App + will be used. + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Linux Web App is Always On enabled. Defaults + to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Linux + Web App Slot. + type: string + apiManagementApiId: + description: The API Management API ID this Linux Web App + Slot is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + dockerImage: + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. appsvc/staticsite:latest. + type: string + dockerImageTag: + type: string + dockerRegistryPasswordSecretRef: + description: The User Name to use for authentication against + the registry to pull the image. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetVersion: + description: The version of .NET to use. Possible values + include 3.1, 5.0, 6.0, 7.0 and 8.0. + type: string + goVersion: + description: The version of Go to use. Possible values + include 1.18, and 1.19. + type: string + javaServer: + description: The Java server type. Possible values include + JAVA, TOMCAT, and JBOSSEAP. + type: string + javaServerVersion: + description: The Version of the java_server to use. + type: string + javaVersion: + description: The Version of Java to use. Possible values + include 8, 11, and 17. + type: string + nodeVersion: + description: The version of Node to run. Possible values + are 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This + property conflicts with java_version. + type: string + phpVersion: + description: The version of PHP to run. Possible values + are 7.4, 8.0, 8.1 and 8.2. + type: string + pythonVersion: + description: The version of Python to run. Possible values + include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + type: string + rubyVersion: + description: The version of Ruby to run. Possible values + include 2.6 and 2.7. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled? Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle.' + type: string + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Linux Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + autoSwapSlotName: + description: The Linux Web App Slot Name to automatically + swap to when deployment to that slot is successfully completed. + type: string + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Linux Web App. + items: + type: string + type: array + ftpsState: + description: The State of FTP / FTPS service. Possible values + include AllAllowed, FtpsOnly, and Disabled. Defaults to + Disabled. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled? Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017 and VS2019 + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmUseMainIpRestriction: + description: Should the Linux Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Linux Web App use a 32-bit worker? + Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled? Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Linux App Service + Slot. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accessKeySecretRef: + description: The Access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + required: + - accessKeySecretRef + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags that should be assigned to the + Linux Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + Slot for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appServiceId: + description: The ID of the Linux Web App this Deployment Slot + will be part of. + type: string + appServiceIdRef: + description: Reference to a LinuxWebApp in web to populate appServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appServiceIdSelector: + description: Selector for a LinuxWebApp in web to populate appServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: An schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + enabled: + description: Should the Linux Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + httpsOnly: + description: Should the Linux Web App require HTTPS connections. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Web App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Web App Slot. Possible + values are SystemAssigned, UserAssigned and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: Log level. Possible values include Off, Verbose, + Information, Warning, and Error. + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled? + type: boolean + failedRequestTracing: + description: Should the failed request tracing be enabled? + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + name: + description: The name which should be used for this Linux Web + App Slot. Changing this forces a new Linux Web App Slot to be + created. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Linux Web App + will be used. + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Linux Web App is Always On enabled. Defaults + to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Linux + Web App Slot. + type: string + apiManagementApiId: + description: The API Management API ID this Linux Web App + Slot is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + dockerImage: + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. appsvc/staticsite:latest. + type: string + dockerImageTag: + type: string + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetVersion: + description: The version of .NET to use. Possible values + include 3.1, 5.0, 6.0, 7.0 and 8.0. + type: string + goVersion: + description: The version of Go to use. Possible values + include 1.18, and 1.19. + type: string + javaServer: + description: The Java server type. Possible values include + JAVA, TOMCAT, and JBOSSEAP. + type: string + javaServerVersion: + description: The Version of the java_server to use. + type: string + javaVersion: + description: The Version of Java to use. Possible values + include 8, 11, and 17. + type: string + nodeVersion: + description: The version of Node to run. Possible values + are 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This + property conflicts with java_version. + type: string + phpVersion: + description: The version of PHP to run. Possible values + are 7.4, 8.0, 8.1 and 8.2. + type: string + pythonVersion: + description: The version of Python to run. Possible values + include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + type: string + rubyVersion: + description: The version of Ruby to run. Possible values + include 2.6 and 2.7. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled? Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle.' + type: string + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Linux Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + autoSwapSlotName: + description: The Linux Web App Slot Name to automatically + swap to when deployment to that slot is successfully completed. + type: string + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Linux Web App. + items: + type: string + type: array + ftpsState: + description: The State of FTP / FTPS service. Possible values + include AllAllowed, FtpsOnly, and Disabled. Defaults to + Disabled. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled? Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017 and VS2019 + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmUseMainIpRestriction: + description: Should the Linux Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Linux Web App use a 32-bit worker? + Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled? Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Linux App Service + Slot. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags that should be assigned to the + Linux Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + Slot for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.name is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.name) + || (has(self.initProvider) && has(self.initProvider.name))' + - message: spec.forProvider.siteConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.siteConfig) + || (has(self.initProvider) && has(self.initProvider.siteConfig))' + status: + description: LinuxWebAppSlotStatus defines the observed state of LinuxWebAppSlot. + properties: + atProvider: + properties: + appMetadata: + additionalProperties: + type: string + description: A app_metadata. + type: object + x-kubernetes-map-type: granular + appServiceId: + description: The ID of the Linux Web App this Deployment Slot + will be part of. + type: string + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Linux Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: BuiltInAuthenticationProviderAzureActiveDirectory, BuiltInAuthenticationProviderFacebook, BuiltInAuthenticationProviderGoogle, BuiltInAuthenticationProviderMicrosoftAccount, BuiltInAuthenticationProviderTwitter, BuiltInAuthenticationProviderGithub + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Linux Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity that issues access tokens for this Linux Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Linux Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Linux Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + loginScopes: + description: The list of Login scopes that should be requested + as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + authorisationEndpoint: + description: |- + The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + The endpoint to make the Authorisation Request. + type: string + certificationUri: + description: |- + The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + The endpoint that provides the keys necessary to validate the token. + type: string + clientCredentialMethod: + description: |- + The Client Credential Method used. + The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + type: string + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the secret for this Custom OIDC Client. + type: string + issuerEndpoint: + description: |- + The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + The endpoint that issued the Token. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + tokenEndpoint: + description: |- + The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + The endpoint used to request a Token. + type: string + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: An schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of the age of backup? Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + lastExecutionTime: + description: The time the backup was last attempted. + type: string + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + defaultHostname: + description: The default hostname of the Linux Web App. + type: string + enabled: + description: Should the Linux Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + hostingEnvironmentId: + description: The ID of the App Service Environment used by App + Service Slot. + type: string + httpsOnly: + description: Should the Linux Web App require HTTPS connections. + Defaults to false. + type: boolean + id: + description: The ID of the Linux Web App. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Linux Web App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Linux Web App Slot. Possible + values are SystemAssigned, UserAssigned and SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity. + type: string + kind: + description: The Kind value for this Linux Web App. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: Log level. Possible values include Off, Verbose, + Information, Warning, and Error. + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled? + type: boolean + failedRequestTracing: + description: Should the failed request tracing be enabled? + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + name: + description: The name which should be used for this Linux Web + App Slot. Changing this forces a new Linux Web App Slot to be + created. + type: string + outboundIpAddressList: + description: A list of outbound IP addresses - such as ["52.23.25.3", + "52.143.43.12"] + items: + type: string + type: array + outboundIpAddresses: + description: A comma-separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12. + type: string + possibleOutboundIpAddressList: + description: A possible_outbound_ip_address_list. + items: + type: string + type: array + possibleOutboundIpAddresses: + description: A comma-separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which + are necessarily in use. Superset of outbound_ip_addresses. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Linux Web App + will be used. + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Linux Web App is Always On enabled. Defaults + to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Linux + Web App Slot. + type: string + apiManagementApiId: + description: The API Management API ID this Linux Web App + Slot is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + dockerImage: + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. appsvc/staticsite:latest. + type: string + dockerImageTag: + type: string + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetVersion: + description: The version of .NET to use. Possible values + include 3.1, 5.0, 6.0, 7.0 and 8.0. + type: string + goVersion: + description: The version of Go to use. Possible values + include 1.18, and 1.19. + type: string + javaServer: + description: The Java server type. Possible values include + JAVA, TOMCAT, and JBOSSEAP. + type: string + javaServerVersion: + description: The Version of the java_server to use. + type: string + javaVersion: + description: The Version of Java to use. Possible values + include 8, 11, and 17. + type: string + nodeVersion: + description: The version of Node to run. Possible values + are 12-lts, 14-lts, 16-lts, 18-lts and 20-lts. This + property conflicts with java_version. + type: string + phpVersion: + description: The version of PHP to run. Possible values + are 7.4, 8.0, 8.1 and 8.2. + type: string + pythonVersion: + description: The version of Python to run. Possible values + include 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12. + type: string + rubyVersion: + description: The version of Ruby to run. Possible values + include 2.6 and 2.7. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled? Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle.' + type: string + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Linux Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + autoSwapSlotName: + description: The Linux Web App Slot Name to automatically + swap to when deployment to that slot is successfully completed. + type: string + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Linux Web App. + items: + type: string + type: array + detailedErrorLoggingEnabled: + description: Should the Linux Web App be enabled? Defaults + to true. + type: boolean + ftpsState: + description: The State of FTP / FTPS service. Possible values + include AllAllowed, FtpsOnly, and Disabled. Defaults to + Disabled. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + linuxFxVersion: + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled? Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017 and VS2019 + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmType: + type: string + scmUseMainIpRestriction: + description: Should the Linux Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Linux Web App use a 32-bit worker? + Defaults to true. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled? Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Linux App Service + Slot. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags that should be assigned to the + Linux Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + Slot for regional virtual network integration. + type: string + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Linux Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_staticsites.yaml b/package/crds/web.azure.upbound.io_staticsites.yaml index 9b3dd66a5..909b8cabf 100644 --- a/package/crds/web.azure.upbound.io_staticsites.yaml +++ b/package/crds/web.azure.upbound.io_staticsites.yaml @@ -552,3 +552,531 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: StaticSite is the Schema for the StaticSites API. Manages a Static + Site. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: StaticSiteSpec defines the desired state of StaticSite + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appSettings: + additionalProperties: + type: string + description: A key-value pair of App Settings. + type: object + x-kubernetes-map-type: granular + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of Managed Identity IDs which should be + assigned to this Static Site resource. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity assigned to this + Static Site resource. Possible values are SystemAssigned, + UserAssigned and SystemAssigned, UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Static Web App should + exist. Changing this forces a new Static Web App to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Static Web + App should exist. Changing this forces a new Static Web App + to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + skuSize: + description: Specifies the SKU size of the Static Web App. Possible + values are Free or Standard. Defaults to Free. + type: string + skuTier: + description: Specifies the SKU tier of the Static Web App. Possible + values are Free or Standard. Defaults to Free. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appSettings: + additionalProperties: + type: string + description: A key-value pair of App Settings. + type: object + x-kubernetes-map-type: granular + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of Managed Identity IDs which should be + assigned to this Static Site resource. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: The Type of Managed Identity assigned to this + Static Site resource. Possible values are SystemAssigned, + UserAssigned and SystemAssigned, UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Static Web App should + exist. Changing this forces a new Static Web App to be created. + type: string + skuSize: + description: Specifies the SKU size of the Static Web App. Possible + values are Free or Standard. Defaults to Free. + type: string + skuTier: + description: Specifies the SKU tier of the Static Web App. Possible + values are Free or Standard. Defaults to Free. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + status: + description: StaticSiteStatus defines the observed state of StaticSite. + properties: + atProvider: + properties: + appSettings: + additionalProperties: + type: string + description: A key-value pair of App Settings. + type: object + x-kubernetes-map-type: granular + defaultHostName: + description: The default host name of the Static Web App. + type: string + id: + description: The ID of the Static Web App. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of Managed Identity IDs which should be + assigned to this Static Site resource. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The ID of the Static Web App. + type: string + type: + description: The Type of Managed Identity assigned to this + Static Site resource. Possible values are SystemAssigned, + UserAssigned and SystemAssigned, UserAssigned. + type: string + type: object + location: + description: The Azure Region where the Static Web App should + exist. Changing this forces a new Static Web App to be created. + type: string + resourceGroupName: + description: The name of the Resource Group where the Static Web + App should exist. Changing this forces a new Static Web App + to be created. + type: string + skuSize: + description: Specifies the SKU size of the Static Web App. Possible + values are Free or Standard. Defaults to Free. + type: string + skuTier: + description: Specifies the SKU tier of the Static Web App. Possible + values are Free or Standard. Defaults to Free. + type: string + tags: + additionalProperties: + type: string + description: A mapping of tags to assign to the resource. + type: object + x-kubernetes-map-type: granular + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_windowsfunctionapps.yaml b/package/crds/web.azure.upbound.io_windowsfunctionapps.yaml index 496f4ffe3..bd2303d85 100644 --- a/package/crds/web.azure.upbound.io_windowsfunctionapps.yaml +++ b/package/crds/web.azure.upbound.io_windowsfunctionapps.yaml @@ -5037,3 +5037,4868 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WindowsFunctionApp is the Schema for the WindowsFunctionApps + API. Manages a Windows Function App. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WindowsFunctionAppSpec defines the desired state of WindowsFunctionApp + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Function App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Windows Function App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSecretRef: + description: |- + The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Function App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The Runtime Version of the Authentication / Authorization feature in use for the Windows Function App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Function App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSecretRef: + description: |- + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: A auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + storageAccountUrlSecretRef: + description: |- + The SAS URL to the container. + The SAS URL to the container. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - storageAccountUrlSecretRef + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + type: boolean + clientCertificateEnabled: + description: |- + Should the function app use Client Certificates. + Should the function app use Client Certificates + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + valueSecretRef: + description: |- + The connection string value. + The connection string value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + contentShareForceDisabled: + description: |- + Should Content Share Settings be disabled. Defaults to false. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + enabled: + description: |- + Is the Function App enabled? Defaults to true. + Is the Windows Function App enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App. Defaults to ~4. + The runtime version associated with the Function App. + type: string + httpsOnly: + description: |- + Can the Function App only be accessed via HTTPS?. Defaults to false. + Can the Function App only be accessed via HTTPS? + type: boolean + identity: + description: A identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Function App. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Function App. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + location: + description: The Azure Region where the Windows Function App should + exist. Changing this forces a new Windows Function App to be + created. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Windows + Function App should exist. Changing this forces a new Windows + Function App to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + servicePlanId: + description: |- + The ID of the App Service Plan within which to create this Function App. + The ID of the App Service Plan within which to create this Function App + type: string + servicePlanIdRef: + description: Reference to a ServicePlan in web to populate servicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicePlanIdSelector: + description: Selector for a ServicePlan in web to populate servicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: |- + If this Windows Function App is Always On enabled. Defaults to false. + If this Windows Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Windows Function App. + The URL of the API definition that describes this Windows Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Windows Function App. + The ID of the API Management API for this Windows Function App. + type: string + appCommandLine: + description: |- + The App command line to launch. + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: An app_service_logs block as defined above. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationInsightsConnectionStringSecretRef: + description: |- + The Connection String for linking the Windows Function App to Application Insights. + The Connection String for linking the Windows Function App to Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + applicationInsightsKeySecretRef: + description: |- + The Instrumentation Key for connecting the Windows Function App to Application Insights. + The Instrumentation Key for connecting the Windows Function App to Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + applicationStack: + description: An application_stack block as defined above. + properties: + dotnetVersion: + description: |- + The version of .NET to use. Possible values include v3.0, v4.0 v6.0, v7.0 and v8.0. Defaults to v4.0. + The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + type: string + javaVersion: + description: |- + The Version of Java to use. Supported versions include 1.8, 11 & 17 (In-Preview). + The version of Java to use. Possible values are `1.8`, `11` and `17` + type: string + nodeVersion: + description: |- + The version of Node to run. Possible values include ~12, ~14, ~16 and ~18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to run. Possible values are 7, and 7.2. + The PowerShell Core version to use. Possible values are `7`, and `7.2` + type: string + useCustomRuntime: + description: |- + Should the Windows Function App use a custom runtime? + Does the Function App use a custom Application Stack? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Windows Function App. + Specifies a list of Default Documents for the Windows Web App. + items: + type: string + type: array + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this Windows Function App. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this Windows Function App health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this Windows Function App. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmUseMainIpRestriction: + description: |- + Should the Windows Function App ip_restriction configuration be used for the SCM also. + Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Windows Function App use a 32-bit worker process. Defaults to true. + Should the Windows Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Windows Function App. + The number of Workers for this Windows Function App. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Windows + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Windows + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accessKeySecretRef: + description: The Access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles. + type: string + required: + - accessKeySecretRef + type: object + type: array + storageAccountAccessKeySecretRef: + description: |- + The access key which will be used to access the backend storage account for the Function App. Conflicts with storage_uses_managed_identity. + The access key which will be used to access the storage account for the Function App. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App. + The backend storage account name which will be used by this Function App. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + Should the Function App use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Function App. + The local path and filename of the Zip packaged application to deploy to this Windows Function App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` to be set on the App in `app_settings`. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Function App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Windows Function App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Function App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The Runtime Version of the Authentication / Authorization feature in use for the Windows Function App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Function App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: A auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + type: boolean + clientCertificateEnabled: + description: |- + Should the function app use Client Certificates. + Should the function app use Client Certificates + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + contentShareForceDisabled: + description: |- + Should Content Share Settings be disabled. Defaults to false. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + enabled: + description: |- + Is the Function App enabled? Defaults to true. + Is the Windows Function App enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App. Defaults to ~4. + The runtime version associated with the Function App. + type: string + httpsOnly: + description: |- + Can the Function App only be accessed via HTTPS?. Defaults to false. + Can the Function App only be accessed via HTTPS? + type: boolean + identity: + description: A identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Function App. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Function App. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + location: + description: The Azure Region where the Windows Function App should + exist. Changing this forces a new Windows Function App to be + created. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + servicePlanId: + description: |- + The ID of the App Service Plan within which to create this Function App. + The ID of the App Service Plan within which to create this Function App + type: string + servicePlanIdRef: + description: Reference to a ServicePlan in web to populate servicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicePlanIdSelector: + description: Selector for a ServicePlan in web to populate servicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: |- + If this Windows Function App is Always On enabled. Defaults to false. + If this Windows Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Windows Function App. + The URL of the API definition that describes this Windows Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Windows Function App. + The ID of the API Management API for this Windows Function App. + type: string + appCommandLine: + description: |- + The App command line to launch. + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: An app_service_logs block as defined above. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationStack: + description: An application_stack block as defined above. + properties: + dotnetVersion: + description: |- + The version of .NET to use. Possible values include v3.0, v4.0 v6.0, v7.0 and v8.0. Defaults to v4.0. + The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + type: string + javaVersion: + description: |- + The Version of Java to use. Supported versions include 1.8, 11 & 17 (In-Preview). + The version of Java to use. Possible values are `1.8`, `11` and `17` + type: string + nodeVersion: + description: |- + The version of Node to run. Possible values include ~12, ~14, ~16 and ~18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to run. Possible values are 7, and 7.2. + The PowerShell Core version to use. Possible values are `7`, and `7.2` + type: string + useCustomRuntime: + description: |- + Should the Windows Function App use a custom runtime? + Does the Function App use a custom Application Stack? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Windows Function App. + Specifies a list of Default Documents for the Windows Web App. + items: + type: string + type: array + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this Windows Function App. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this Windows Function App health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this Windows Function App. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmUseMainIpRestriction: + description: |- + Should the Windows Function App ip_restriction configuration be used for the SCM also. + Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Windows Function App use a 32-bit worker process. Defaults to true. + Should the Windows Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Windows Function App. + The number of Workers for this Windows Function App. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Windows + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Windows + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles. + type: string + type: object + type: array + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App. + The backend storage account name which will be used by this Function App. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + Should the Function App use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Function App. + The local path and filename of the Zip packaged application to deploy to this Windows Function App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` to be set on the App in `app_settings`. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.siteConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.siteConfig) + || (has(self.initProvider) && has(self.initProvider.siteConfig))' + status: + description: WindowsFunctionAppStatus defines the observed state of WindowsFunctionApp. + properties: + atProvider: + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: A auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Function App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Windows Function App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Function App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The Runtime Version of the Authentication / Authorization feature in use for the Windows Function App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Function App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: A auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + loginScopes: + description: The list of Login scopes that should be requested + as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + authorisationEndpoint: + description: |- + The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + The endpoint to make the Authorisation Request. + type: string + certificationUri: + description: |- + The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + The endpoint that provides the keys necessary to validate the token. + type: string + clientCredentialMethod: + description: |- + The Client Credential Method used. + The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + type: string + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the secret for this Custom OIDC Client. + type: string + issuerEndpoint: + description: |- + The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + The endpoint that issued the Token. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + tokenEndpoint: + description: |- + The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + The endpoint used to request a Token. + type: string + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + lastExecutionTime: + description: The time the backup was last attempted. + type: string + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + builtinLoggingEnabled: + description: |- + Should built in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting + type: boolean + clientCertificateEnabled: + description: |- + Should the function app use Client Certificates. + Should the function app use Client Certificates + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser` + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + contentShareForceDisabled: + description: |- + Should Content Share Settings be disabled. Defaults to false. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps under the consumption plan. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + defaultHostname: + description: The default hostname of the Windows Function App. + type: string + enabled: + description: |- + Is the Function App enabled? Defaults to true. + Is the Windows Function App enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App. Defaults to ~4. + The runtime version associated with the Function App. + type: string + hostingEnvironmentId: + description: The ID of the App Service Environment used by Function + App. + type: string + httpsOnly: + description: |- + Can the Function App only be accessed via HTTPS?. Defaults to false. + Can the Function App only be accessed via HTTPS? + type: boolean + id: + description: The ID of the Windows Function App. + type: string + identity: + description: A identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Function App. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Function App. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + kind: + description: The Kind value for this Windows Function App. + type: string + location: + description: The Azure Region where the Windows Function App should + exist. Changing this forces a new Windows Function App to be + created. + type: string + outboundIpAddressList: + description: A list of outbound IP addresses. For example ["52.23.25.3", + "52.143.43.12"] + items: + type: string + type: array + outboundIpAddresses: + description: A comma separated list of outbound IP addresses as + a string. For example 52.23.25.3,52.143.43.12. + type: string + possibleOutboundIpAddressList: + description: A list of possible outbound IP addresses, not all + of which are necessarily in use. This is a superset of outbound_ip_address_list. + For example ["52.23.25.3", "52.143.43.12"]. + items: + type: string + type: array + possibleOutboundIpAddresses: + description: A comma separated list of possible outbound IP addresses + as a string. For example 52.23.25.3,52.143.43.12,52.143.43.17. + This is a superset of outbound_ip_addresses. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Windows + Function App should exist. Changing this forces a new Windows + Function App to be created. + type: string + servicePlanId: + description: |- + The ID of the App Service Plan within which to create this Function App. + The ID of the App Service Plan within which to create this Function App + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: |- + If this Windows Function App is Always On enabled. Defaults to false. + If this Windows Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Windows Function App. + The URL of the API definition that describes this Windows Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Windows Function App. + The ID of the API Management API for this Windows Function App. + type: string + appCommandLine: + description: |- + The App command line to launch. + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: An app_service_logs block as defined above. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationStack: + description: An application_stack block as defined above. + properties: + dotnetVersion: + description: |- + The version of .NET to use. Possible values include v3.0, v4.0 v6.0, v7.0 and v8.0. Defaults to v4.0. + The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + type: string + javaVersion: + description: |- + The Version of Java to use. Supported versions include 1.8, 11 & 17 (In-Preview). + The version of Java to use. Possible values are `1.8`, `11` and `17` + type: string + nodeVersion: + description: |- + The version of Node to run. Possible values include ~12, ~14, ~16 and ~18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The version of PowerShell Core to run. Possible values are 7, and 7.2. + The PowerShell Core version to use. Possible values are `7`, and `7.2` + type: string + useCustomRuntime: + description: |- + Should the Windows Function App use a custom runtime? + Does the Function App use a custom Application Stack? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Windows Function App. + Specifies a list of Default Documents for the Windows Web App. + items: + type: string + type: array + detailedErrorLoggingEnabled: + description: |- + Is the Function App enabled? Defaults to true. + Is detailed error logging enabled + type: boolean + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this Windows Function App. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this Windows Function App health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + Managed pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this Windows Function App. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022. + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmType: + description: The SCM Type in use by the Windows Function App. + type: string + scmUseMainIpRestriction: + description: |- + Should the Windows Function App ip_restriction configuration be used for the SCM also. + Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Windows Function App use a 32-bit worker process. Defaults to true. + Should the Windows Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + windowsFxVersion: + description: The Windows FX Version string. + type: string + workerCount: + description: |- + The number of Workers for this Windows Function App. + The number of Workers for this Windows Function App. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Windows + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Windows + Function App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles. + type: string + type: object + type: array + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App. + The backend storage account name which will be used by this Function App. + type: string + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App use Managed Identity to access the storage account. Conflicts with storage_account_access_key. + Should the Function App use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Function App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App for regional virtual network integration. + type: string + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Function App. + The local path and filename of the Zip packaged application to deploy to this Windows Function App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` to be set on the App in `app_settings`. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_windowsfunctionappslots.yaml b/package/crds/web.azure.upbound.io_windowsfunctionappslots.yaml index bc4f904e2..b90d31cee 100644 --- a/package/crds/web.azure.upbound.io_windowsfunctionappslots.yaml +++ b/package/crds/web.azure.upbound.io_windowsfunctionappslots.yaml @@ -4813,3 +4813,4651 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WindowsFunctionAppSlot is the Schema for the WindowsFunctionAppSlots + API. Manages a Windows Function App Slot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WindowsFunctionAppSlotSpec defines the desired state of WindowsFunctionAppSlot + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: an auth_settings block as detailed below. + properties: + activeDirectory: + description: an active_directory block as detailed below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: a facebook block as detailed below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSecretRef: + description: |- + The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: a github block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: a google block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: a microsoft block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: a twitter block as detailed below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSecretRef: + description: |- + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: an auth_settings_v2 block as detailed below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: a backup block as detailed below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: a schedule block as detailed below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + storageAccountUrlSecretRef: + description: |- + The SAS URL to the container. + The SAS URL to the container. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - storageAccountUrlSecretRef + type: object + builtinLoggingEnabled: + description: |- + Should built-in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + type: boolean + clientCertificateEnabled: + description: |- + Should the Function App Slot use Client Certificates. + Should the Function App Slot use Client Certificates. + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + type: string + connectionString: + description: a connection_string block as detailed below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + valueSecretRef: + description: |- + The connection string value. + The connection string value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + contentShareForceDisabled: + description: |- + Force disable the content share settings. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + enabled: + description: |- + Is the Windows Function App Slot enabled. Defaults to true. + Is the Windows Function App Slot enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionAppId: + description: |- + The name of the Windows Function App this Slot is a member of. Changing this forces a new resource to be created. + The ID of the Windows Function App this Slot is a member of. + type: string + functionAppIdRef: + description: Reference to a WindowsFunctionApp in web to populate + functionAppId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + functionAppIdSelector: + description: Selector for a WindowsFunctionApp in web to populate + functionAppId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App Slot. Defaults to ~4. + The runtime version associated with the Function App Slot. + type: string + httpsOnly: + description: |- + Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + Can the Function App Slot only be accessed via HTTPS? + type: boolean + identity: + description: an identity block as detailed below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Function App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Function App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Windows Function + App will be used. + type: string + siteConfig: + description: a site_config block as detailed below. + properties: + alwaysOn: + description: |- + If this Windows Web App is Always On enabled. Defaults to false. + If this Windows Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Windows Function App. + The URL of the API definition that describes this Windows Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Windows Function App. + The ID of the API Management API for this Windows Function App. + type: string + appCommandLine: + description: |- + The program and any arguments used to launch this app via the command line. (Example node myapp.js). + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: an app_service_logs block as detailed below. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationInsightsConnectionStringSecretRef: + description: |- + The Connection String for linking the Windows Function App to Application Insights. + The Connection String for linking the Windows Function App to Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + applicationInsightsKeySecretRef: + description: |- + The Instrumentation Key for connecting the Windows Function App to Application Insights. + The Instrumentation Key for connecting the Windows Function App to Application Insights. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + applicationStack: + description: an application_stack block as detailed below. + properties: + dotnetVersion: + description: |- + The version of .Net. Possible values are v3.0, v4.0, v6.0, v7.0 and v8.0. Defaults to v4.0. + The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + type: string + javaVersion: + description: |- + The version of Java to use. Possible values are 1.8, 11 and 17 (In-Preview). + The version of Java to use. Possible values are `1.8`, `11` and `17` + type: string + nodeVersion: + description: |- + The version of Node to use. Possible values are ~12, ~14, ~16 and ~18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The PowerShell Core version to use. Possible values are 7, and 7.2. + The PowerShell Core version to use. Possible values are `7`, and `7.2` + type: string + useCustomRuntime: + description: |- + Does the Function App use a custom Application Stack? + Does the Function App use a custom Application Stack? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + autoSwapSlotName: + description: The name of the slot to automatically swap with + when this slot is successfully deployed. + type: string + cors: + description: a cors block as detailed below. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Windows Web App. + Specifies a list of Default Documents for the Windows Web App. + items: + type: string + type: array + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: an ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: a scm_ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmUseMainIpRestriction: + description: |- + Should the Windows Function App ip_restriction configuration be used for the SCM also. + Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Windows Web App use a 32-bit worker. Defaults to true. + Should the Windows Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Windows Function App. + The number of Workers for this Windows Function App. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accessKeySecretRef: + description: The Access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles. + type: string + required: + - accessKeySecretRef + type: object + type: array + storageAccountAccessKeySecretRef: + description: |- + The access key which will be used to access the storage account for the Function App Slot. + The access key which will be used to access the storage account for the Function App Slot. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App Slot. + The backend storage account name which will be used by this Function App Slot. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App Slot. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App Slot use its Managed Identity to access storage. + Should the Function App Slot use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Function App Slot. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App Slot for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: an auth_settings block as detailed below. + properties: + activeDirectory: + description: an active_directory block as detailed below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: a facebook block as detailed below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: a github block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: a google block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: a microsoft block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: a twitter block as detailed below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: an auth_settings_v2 block as detailed below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: a backup block as detailed below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: a schedule block as detailed below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + builtinLoggingEnabled: + description: |- + Should built-in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + type: boolean + clientCertificateEnabled: + description: |- + Should the Function App Slot use Client Certificates. + Should the Function App Slot use Client Certificates. + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + type: string + connectionString: + description: a connection_string block as detailed below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + contentShareForceDisabled: + description: |- + Force disable the content share settings. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + enabled: + description: |- + Is the Windows Function App Slot enabled. Defaults to true. + Is the Windows Function App Slot enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App Slot. Defaults to ~4. + The runtime version associated with the Function App Slot. + type: string + httpsOnly: + description: |- + Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + Can the Function App Slot only be accessed via HTTPS? + type: boolean + identity: + description: an identity block as detailed below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Function App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Function App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Windows Function + App will be used. + type: string + siteConfig: + description: a site_config block as detailed below. + properties: + alwaysOn: + description: |- + If this Windows Web App is Always On enabled. Defaults to false. + If this Windows Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Windows Function App. + The URL of the API definition that describes this Windows Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Windows Function App. + The ID of the API Management API for this Windows Function App. + type: string + appCommandLine: + description: |- + The program and any arguments used to launch this app via the command line. (Example node myapp.js). + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: an app_service_logs block as detailed below. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationStack: + description: an application_stack block as detailed below. + properties: + dotnetVersion: + description: |- + The version of .Net. Possible values are v3.0, v4.0, v6.0, v7.0 and v8.0. Defaults to v4.0. + The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + type: string + javaVersion: + description: |- + The version of Java to use. Possible values are 1.8, 11 and 17 (In-Preview). + The version of Java to use. Possible values are `1.8`, `11` and `17` + type: string + nodeVersion: + description: |- + The version of Node to use. Possible values are ~12, ~14, ~16 and ~18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The PowerShell Core version to use. Possible values are 7, and 7.2. + The PowerShell Core version to use. Possible values are `7`, and `7.2` + type: string + useCustomRuntime: + description: |- + Does the Function App use a custom Application Stack? + Does the Function App use a custom Application Stack? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + autoSwapSlotName: + description: The name of the slot to automatically swap with + when this slot is successfully deployed. + type: string + cors: + description: a cors block as detailed below. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Windows Web App. + Specifies a list of Default Documents for the Windows Web App. + items: + type: string + type: array + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: an ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: a scm_ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmUseMainIpRestriction: + description: |- + Should the Windows Function App ip_restriction configuration be used for the SCM also. + Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Windows Web App use a 32-bit worker. Defaults to true. + Should the Windows Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + workerCount: + description: |- + The number of Workers for this Windows Function App. + The number of Workers for this Windows Function App. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles. + type: string + type: object + type: array + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App Slot. + The backend storage account name which will be used by this Function App Slot. + type: string + storageAccountNameRef: + description: Reference to a Account in storage to populate storageAccountName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + storageAccountNameSelector: + description: Selector for a Account in storage to populate storageAccountName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App Slot. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App Slot use its Managed Identity to access storage. + Should the Function App Slot use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Function App Slot. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App Slot for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.siteConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.siteConfig) + || (has(self.initProvider) && has(self.initProvider.siteConfig))' + status: + description: WindowsFunctionAppSlotStatus defines the observed state of + WindowsFunctionAppSlot. + properties: + atProvider: + properties: + appSettings: + additionalProperties: + type: string + description: |- + A map of key-value pairs for App Settings and custom values. + A map of key-value pairs for [App Settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-app-settings) and custom values. + type: object + x-kubernetes-map-type: granular + authSettings: + description: an auth_settings block as detailed below. + properties: + activeDirectory: + description: an active_directory block as detailed below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: a facebook block as detailed below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: a github block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: a google block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: a microsoft block as detailed below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, wl.basic is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: a twitter block as detailed below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: an auth_settings_v2 block as detailed below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + loginScopes: + description: The list of Login scopes that should be requested + as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + authorisationEndpoint: + description: |- + The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + The endpoint to make the Authorisation Request. + type: string + certificationUri: + description: |- + The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + The endpoint that provides the keys necessary to validate the token. + type: string + clientCredentialMethod: + description: |- + The Client Credential Method used. + The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + type: string + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the secret for this Custom OIDC Client. + type: string + issuerEndpoint: + description: |- + The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + The endpoint that issued the Token. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + tokenEndpoint: + description: |- + The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + The endpoint used to request a Token. + type: string + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: a backup block as detailed below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: a schedule block as detailed below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day and Hour. + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + lastExecutionTime: + description: |- + The time the backup was last attempted. + The time the backup was last attempted. + type: string + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + builtinLoggingEnabled: + description: |- + Should built-in logging be enabled. Configures AzureWebJobsDashboard app setting based on the configured storage setting. Defaults to true. + Should built in logging be enabled. Configures `AzureWebJobsDashboard` app setting based on the configured storage setting. + type: boolean + clientCertificateEnabled: + description: |- + Should the Function App Slot use Client Certificates. + Should the Function App Slot use Client Certificates. + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: |- + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are Required, Optional, and OptionalInteractiveUser. Defaults to Optional. + The mode of the Function App Slot's client certificates requirement for incoming requests. Possible values are `Required`, `Optional`, and `OptionalInteractiveUser`. + type: string + connectionString: + description: a connection_string block as detailed below. + items: + properties: + name: + description: |- + The name which should be used for this Connection. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + contentShareForceDisabled: + description: |- + Force disable the content share settings. + Force disable the content share settings. + type: boolean + dailyMemoryTimeQuota: + description: |- + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. Defaults to 0. + The amount of memory in gigabyte-seconds that your application is allowed to consume per day. Setting this value only affects function apps in Consumption Plans. + type: number + defaultHostname: + description: |- + The default hostname of the Windows Function App Slot. + The default hostname of the Windows Function App Slot. + type: string + enabled: + description: |- + Is the Windows Function App Slot enabled. Defaults to true. + Is the Windows Function App Slot enabled. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + functionAppId: + description: |- + The name of the Windows Function App this Slot is a member of. Changing this forces a new resource to be created. + The ID of the Windows Function App this Slot is a member of. + type: string + functionsExtensionVersion: + description: |- + The runtime version associated with the Function App Slot. Defaults to ~4. + The runtime version associated with the Function App Slot. + type: string + hostingEnvironmentId: + description: The ID of the App Service Environment used by Function + App Slot. + type: string + httpsOnly: + description: |- + Can the Function App Slot only be accessed via HTTPS?. Defaults to false. + Can the Function App Slot only be accessed via HTTPS? + type: boolean + id: + description: The ID of the Windows Function App Slot + type: string + identity: + description: an identity block as detailed below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Function App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Function App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: |- + The User Assigned Identity ID used for accessing KeyVault secrets. The identity must be assigned to the application in the identity block. For more information see - Access vaults with a user-assigned identity + The User Assigned Identity to use for Key Vault access. + type: string + kind: + description: |- + The Kind value for this Windows Function App Slot. + The Kind value for this Windows Function App Slot. + type: string + outboundIpAddressList: + description: |- + A list of outbound IP addresses. For example ["52.23.25.3", "52.143.43.12"]. + A list of outbound IP addresses. For example `["52.23.25.3", "52.143.43.12"]`. + items: + type: string + type: array + outboundIpAddresses: + description: |- + A comma separated list of outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12. + A comma separated list of outbound IP addresses as a string. For example `52.23.25.3,52.143.43.12`. + type: string + possibleOutboundIpAddressList: + description: |- + A list of possible outbound IP addresses, not all of which are necessarily in use. This is a superset of outbound_ip_address_list. For example ["52.23.25.3", "52.143.43.12"]. + A list of possible outbound IP addresses, not all of which are necessarily in use. This is a superset of `outbound_ip_address_list`. For example `["52.23.25.3", "52.143.43.12"]`. + items: + type: string + type: array + possibleOutboundIpAddresses: + description: |- + A comma separated list of possible outbound IP addresses as a string. For example 52.23.25.3,52.143.43.12,52.143.43.17. This is a superset of outbound_ip_addresses. For example ["52.23.25.3", "52.143.43.12","52.143.43.17"]. + A comma separated list of possible outbound IP addresses as a string. For example `52.23.25.3,52.143.43.12,52.143.43.17`. This is a superset of `outbound_ip_addresses`. For example `["52.23.25.3", "52.143.43.12","52.143.43.17"]`. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Function + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Windows Function + App will be used. + type: string + siteConfig: + description: a site_config block as detailed below. + properties: + alwaysOn: + description: |- + If this Windows Web App is Always On enabled. Defaults to false. + If this Windows Web App is Always On enabled. Defaults to `false`. + type: boolean + apiDefinitionUrl: + description: |- + The URL of the API definition that describes this Windows Function App. + The URL of the API definition that describes this Windows Function App. + type: string + apiManagementApiId: + description: |- + The ID of the API Management API for this Windows Function App. + The ID of the API Management API for this Windows Function App. + type: string + appCommandLine: + description: |- + The program and any arguments used to launch this app via the command line. (Example node myapp.js). + The program and any arguments used to launch this app via the command line. (Example `node myapp.js`). + type: string + appScaleLimit: + description: |- + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + The number of workers this function app can scale out to. Only applicable to apps on the Consumption and Premium plan. + type: number + appServiceLogs: + description: an app_service_logs block as detailed below. + properties: + diskQuotaMb: + description: |- + The amount of disk space to use for logs. Valid values are between 25 and 100. Defaults to 35. + The amount of disk space to use for logs. Valid values are between `25` and `100`. + type: number + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + The retention period for logs in days. Valid values are between `0` and `99999`. Defaults to `0` (never delete). + type: number + type: object + applicationStack: + description: an application_stack block as detailed below. + properties: + dotnetVersion: + description: |- + The version of .Net. Possible values are v3.0, v4.0, v6.0, v7.0 and v8.0. Defaults to v4.0. + The version of .Net. Possible values are `v3.0`, `v4.0`, `v6.0` and `v7.0` + type: string + javaVersion: + description: |- + The version of Java to use. Possible values are 1.8, 11 and 17 (In-Preview). + The version of Java to use. Possible values are `1.8`, `11` and `17` + type: string + nodeVersion: + description: |- + The version of Node to use. Possible values are ~12, ~14, ~16 and ~18. + The version of Node to use. Possible values include `12`, `14`, `16` and `18` + type: string + powershellCoreVersion: + description: |- + The PowerShell Core version to use. Possible values are 7, and 7.2. + The PowerShell Core version to use. Possible values are `7`, and `7.2` + type: string + useCustomRuntime: + description: |- + Does the Function App use a custom Application Stack? + Does the Function App use a custom Application Stack? + type: boolean + useDotnetIsolatedRuntime: + description: |- + Should the DotNet process use an isolated runtime. Defaults to false. + Should the DotNet process use an isolated runtime. Defaults to `false`. + type: boolean + type: object + autoSwapSlotName: + description: The name of the slot to automatically swap with + when this slot is successfully deployed. + type: string + cors: + description: a cors block as detailed below. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Are credentials allowed in CORS requests? Defaults to false. + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: |- + Specifies a list of Default Documents for the Windows Web App. + Specifies a list of Default Documents for the Windows Web App. + items: + type: string + type: array + detailedErrorLoggingEnabled: + description: |- + Is detailed error logging enabled + Is detailed error logging enabled + type: boolean + elasticInstanceMinimum: + description: |- + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + The number of minimum instances for this Windows Function App. Only affects apps on Elastic Premium plans. + type: number + ftpsState: + description: |- + State of FTP / FTPS service for this function app. Possible values include: AllAllowed, FtpsOnly and Disabled. Defaults to Disabled. + State of FTP / FTPS service for this function app. Possible values include: `AllAllowed`, `FtpsOnly` and `Disabled`. Defaults to `Disabled`. + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Defaults to 0. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: |- + The path to be checked for this function app health. + The path to be checked for this function app health. + type: string + http2Enabled: + description: |- + Specifies if the HTTP2 protocol should be enabled. Defaults to false. + Specifies if the http2 protocol should be enabled. Defaults to `false`. + type: boolean + ipRestriction: + description: an ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: |- + The Site load balancing mode. Possible values include: WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, RequestHash, PerSiteRoundRobin. Defaults to LeastRequests if omitted. + The Site load balancing mode. Possible values include: `WeightedRoundRobin`, `LeastRequests`, `LeastResponseTime`, `WeightedTotalTraffic`, `RequestHash`, `PerSiteRoundRobin`. Defaults to `LeastRequests` if omitted. + type: string + managedPipelineMode: + description: |- + The Managed Pipeline mode. Possible values include: Integrated, Classic. Defaults to Integrated. + The Managed Pipeline mode. Possible values include: `Integrated`, `Classic`. Defaults to `Integrated`. + type: string + minimumTlsVersion: + description: |- + The configures the minimum version of TLS required for SSL requests. Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + The configures the minimum version of TLS required for SSL requests. Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + preWarmedInstanceCount: + description: |- + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + The number of pre-warmed instances for this function app. Only affects apps on an Elastic Premium plan. + type: number + remoteDebuggingEnabled: + description: |- + Should Remote Debugging be enabled. Defaults to false. + Should Remote Debugging be enabled. Defaults to `false`. + type: boolean + remoteDebuggingVersion: + description: |- + The Remote Debugging Version. Possible values include VS2017, VS2019, and VS2022 + The Remote Debugging Version. Possible values include `VS2017`, `VS2019`, and `VS2022` + type: string + runtimeScaleMonitoringEnabled: + description: |- + Should Scale Monitoring of the Functions Runtime be enabled? + Should Functions Runtime Scale Monitoring be enabled. + type: boolean + scmIpRestriction: + description: a scm_ip_restriction block as detailed below. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: a headers block as detailed below. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Function App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: |- + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: 1.0, 1.1, and 1.2. Defaults to 1.2. + Configures the minimum version of TLS required for SSL requests to the SCM site Possible values include: `1.0`, `1.1`, and `1.2`. Defaults to `1.2`. + type: string + scmType: + description: |- + The SCM Type in use by the Windows Function App. + The SCM Type in use by the Windows Function App. + type: string + scmUseMainIpRestriction: + description: |- + Should the Windows Function App ip_restriction configuration be used for the SCM also. + Should the Windows Function App `ip_restriction` configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: |- + Should the Windows Web App use a 32-bit worker. Defaults to true. + Should the Windows Web App use a 32-bit worker. + type: boolean + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: |- + Should Web Sockets be enabled. Defaults to false. + Should Web Sockets be enabled. Defaults to `false`. + type: boolean + windowsFxVersion: + description: |- + The Windows FX Version string. + The Windows FX Version string. + type: string + workerCount: + description: |- + The number of Workers for this Windows Function App. + The number of Workers for this Windows Function App. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles. + type: string + type: object + type: array + storageAccountName: + description: |- + The backend storage account name which will be used by this Function App Slot. + The backend storage account name which will be used by this Function App Slot. + type: string + storageKeyVaultSecretId: + description: |- + The Key Vault Secret ID, optionally including version, that contains the Connection String to connect to the storage account for this Function App Slot. + The Key Vault Secret ID, including version, that contains the Connection String to connect to the storage account for this Function App. + type: string + storageUsesManagedIdentity: + description: |- + Should the Function App Slot use its Managed Identity to access storage. + Should the Function App Slot use its Managed Identity to access storage? + type: boolean + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Function App Slot. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Function + App Slot for regional virtual network integration. + type: string + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_windowswebapps.yaml b/package/crds/web.azure.upbound.io_windowswebapps.yaml index fb029c8ab..7953bd755 100644 --- a/package/crds/web.azure.upbound.io_windowswebapps.yaml +++ b/package/crds/web.azure.upbound.io_windowswebapps.yaml @@ -5454,3 +5454,5184 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WindowsWebApp is the Schema for the WindowsWebApps API. Manages + a Windows Web App. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WindowsWebAppSpec defines the desired state of WindowsWebApp + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature is enabled for the Windows Web App be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSecretRef: + description: |- + The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSecretRef: + description: |- + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this TODO. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + storageAccountUrlSecretRef: + description: |- + The SAS URL to the container. + The SAS URL to the container. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - storageAccountUrlSecretRef + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + valueSecretRef: + description: |- + The connection string value. + The connection string value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + enabled: + description: Should the Windows Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + httpsOnly: + description: Should the Windows Web App require HTTPS connections. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Web App. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Web App. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity + type: string + location: + description: The Azure Region where the Windows Web App should + exist. Changing this forces a new Windows Web App to be created. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled. + type: boolean + failedRequestTracing: + description: Should tracing be enabled for failed requests. + type: boolean + httpLogs: + description: A http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasurlSecretRef: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - sasurlSecretRef + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Windows + Web App should exist. Changing this forces a new Windows Web + App to be created. + type: string + resourceGroupNameRef: + description: Reference to a ResourceGroup in azure to populate + resourceGroupName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceGroupNameSelector: + description: Selector for a ResourceGroup in azure to populate + resourceGroupName. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + servicePlanId: + description: The ID of the Service Plan that this Windows App + Service will be created in. + type: string + servicePlanIdRef: + description: Reference to a ServicePlan in web to populate servicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicePlanIdSelector: + description: Selector for a ServicePlan in web to populate servicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Windows Web App is Always On enabled. + Defaults to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Windows + Web App. + type: string + apiManagementApiId: + description: The API Management API ID this Windows Web App + Slot is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + currentStack: + description: The Application Stack for the Windows Web + App. Possible values include dotnet, dotnetcore, node, + python, php, and java. + type: string + dockerContainerName: + description: The name of the container to be used. This + value is required with docker_container_tag. + type: string + dockerContainerRegistry: + type: string + dockerContainerTag: + description: The tag of the container to be used. This + value is required with docker_container_name. + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. azure-app-service/windows/parkingpage:latest. + type: string + dockerRegistryPasswordSecretRef: + description: The User Name to use for authentication against + the registry to pull the image. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetCoreVersion: + description: |- + The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + The version of DotNetCore to use. + type: string + dotnetVersion: + description: The version of .NET to use when current_stack + is set to dotnet. Possible values include v2.0,v3.0, + v4.0, v5.0, v6.0, v7.0 and v8.0. + type: string + javaContainer: + type: string + javaContainerVersion: + type: string + javaEmbeddedServerEnabled: + description: |- + Should the Java Embedded Server (Java SE) be used to run the app. + Should the application use the embedded web server for the version of Java in use. + type: boolean + javaVersion: + description: The version of Java to use when current_stack + is set to java. + type: string + nodeVersion: + description: The version of node to use when current_stack + is set to node. Possible values are ~12, ~14, ~16, and + ~18. + type: string + phpVersion: + description: The version of PHP to use when current_stack + is set to php. Possible values are 7.1, 7.4 and Off. + type: string + python: + description: Specifies whether this is a Python app. Defaults + to false. + type: boolean + pythonVersion: + type: string + tomcatVersion: + description: The version of Tomcat the Java App should + use. Conflicts with java_embedded_server_enabled + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled. Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle, + LogEvent, and CustomAction.' + type: string + customAction: + description: A custom_action block as defined below. + properties: + executable: + description: The executable to run for the custom_action. + type: string + parameters: + description: The parameters to pass to the specified + executable. + type: string + type: object + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Windows Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + privateMemoryKb: + description: The amount of Private Memory to be consumed + for this rule to trigger. Possible values are between + 102400 and 13631488. + type: number + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Windows Web App. + items: + type: string + type: array + ftpsState: + description: 'The State of FTP / FTPS service. Possible values + include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled.' + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this TODO. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled. Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017, VS2019 and VS2022. + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this TODO. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmUseMainIpRestriction: + description: Should the Windows Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Windows Web App use a 32-bit worker. + Defaults to true. + type: boolean + virtualApplication: + description: One or more virtual_application blocks as defined + below. + items: + properties: + physicalPath: + description: The physical path for the Virtual Application. + type: string + preload: + description: Should pre-loading be enabled. + type: boolean + virtualDirectory: + description: One or more virtual_directory blocks as + defined below. + items: + properties: + physicalPath: + description: The physical path for the Virtual + Application. + type: string + virtualPath: + description: The Virtual Path for the Virtual + Application. + type: string + type: object + type: array + virtualPath: + description: The Virtual Path for the Virtual Application. + type: string + type: object + type: array + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled. Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Windows App Service. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Windows + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Windows + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accessKeySecretRef: + description: The Access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this TODO. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + required: + - accessKeySecretRef + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature is enabled for the Windows Web App be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this TODO. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + enabled: + description: Should the Windows Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + httpsOnly: + description: Should the Windows Web App require HTTPS connections. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Web App. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Web App. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity + type: string + location: + description: The Azure Region where the Windows Web App should + exist. Changing this forces a new Windows Web App to be created. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled. + type: boolean + failedRequestTracing: + description: Should tracing be enabled for failed requests. + type: boolean + httpLogs: + description: A http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan that this Windows App + Service will be created in. + type: string + servicePlanIdRef: + description: Reference to a ServicePlan in web to populate servicePlanId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + servicePlanIdSelector: + description: Selector for a ServicePlan in web to populate servicePlanId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Windows Web App is Always On enabled. + Defaults to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Windows + Web App. + type: string + apiManagementApiId: + description: The API Management API ID this Windows Web App + Slot is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + currentStack: + description: The Application Stack for the Windows Web + App. Possible values include dotnet, dotnetcore, node, + python, php, and java. + type: string + dockerContainerName: + description: The name of the container to be used. This + value is required with docker_container_tag. + type: string + dockerContainerRegistry: + type: string + dockerContainerTag: + description: The tag of the container to be used. This + value is required with docker_container_name. + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. azure-app-service/windows/parkingpage:latest. + type: string + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetCoreVersion: + description: |- + The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + The version of DotNetCore to use. + type: string + dotnetVersion: + description: The version of .NET to use when current_stack + is set to dotnet. Possible values include v2.0,v3.0, + v4.0, v5.0, v6.0, v7.0 and v8.0. + type: string + javaContainer: + type: string + javaContainerVersion: + type: string + javaEmbeddedServerEnabled: + description: |- + Should the Java Embedded Server (Java SE) be used to run the app. + Should the application use the embedded web server for the version of Java in use. + type: boolean + javaVersion: + description: The version of Java to use when current_stack + is set to java. + type: string + nodeVersion: + description: The version of node to use when current_stack + is set to node. Possible values are ~12, ~14, ~16, and + ~18. + type: string + phpVersion: + description: The version of PHP to use when current_stack + is set to php. Possible values are 7.1, 7.4 and Off. + type: string + python: + description: Specifies whether this is a Python app. Defaults + to false. + type: boolean + pythonVersion: + type: string + tomcatVersion: + description: The version of Tomcat the Java App should + use. Conflicts with java_embedded_server_enabled + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled. Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle, + LogEvent, and CustomAction.' + type: string + customAction: + description: A custom_action block as defined below. + properties: + executable: + description: The executable to run for the custom_action. + type: string + parameters: + description: The parameters to pass to the specified + executable. + type: string + type: object + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Windows Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + privateMemoryKb: + description: The amount of Private Memory to be consumed + for this rule to trigger. Possible values are between + 102400 and 13631488. + type: number + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Windows Web App. + items: + type: string + type: array + ftpsState: + description: 'The State of FTP / FTPS service. Possible values + include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled.' + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this TODO. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled. Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017, VS2019 and VS2022. + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this TODO. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmUseMainIpRestriction: + description: Should the Windows Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Windows Web App use a 32-bit worker. + Defaults to true. + type: boolean + virtualApplication: + description: One or more virtual_application blocks as defined + below. + items: + properties: + physicalPath: + description: The physical path for the Virtual Application. + type: string + preload: + description: Should pre-loading be enabled. + type: boolean + virtualDirectory: + description: One or more virtual_directory blocks as + defined below. + items: + properties: + physicalPath: + description: The physical path for the Virtual + Application. + type: string + virtualPath: + description: The Virtual Path for the Virtual + Application. + type: string + type: object + type: array + virtualPath: + description: The Virtual Path for the Virtual Application. + type: string + type: object + type: array + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled. Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Windows App Service. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Windows + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Windows + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this TODO. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.location is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.location) + || (has(self.initProvider) && has(self.initProvider.location))' + - message: spec.forProvider.siteConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.siteConfig) + || (has(self.initProvider) && has(self.initProvider.siteConfig))' + status: + description: WindowsWebAppStatus defines the observed state of WindowsWebApp. + properties: + atProvider: + properties: + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature is enabled for the Windows Web App be enabled? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + loginScopes: + description: The list of Login scopes that should be requested + as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + authorisationEndpoint: + description: |- + The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + The endpoint to make the Authorisation Request. + type: string + certificationUri: + description: |- + The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + The endpoint that provides the keys necessary to validate the token. + type: string + clientCredentialMethod: + description: |- + The Client Credential Method used. + The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + type: string + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the secret for this Custom OIDC Client. + type: string + issuerEndpoint: + description: |- + The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + The endpoint that issued the Token. + type: string + name: + description: |- + The name which should be used for this TODO. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + tokenEndpoint: + description: |- + The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + The endpoint used to request a Token. + type: string + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + lastExecutionTime: + description: The time the backup was last attempted. + type: string + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the Connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + defaultHostname: + description: The default hostname of the Windows Web App. + type: string + enabled: + description: Should the Windows Web App be enabled? Defaults to + true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + hostingEnvironmentId: + description: The ID of the App Service Environment used by App + Service. + type: string + httpsOnly: + description: Should the Windows Web App require HTTPS connections. + Defaults to false. + type: boolean + id: + description: The ID of the Windows Web App. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Web App. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Web App. Possible + values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity + type: string + kind: + description: The Kind value for this Windows Web App. + type: string + location: + description: The Azure Region where the Windows Web App should + exist. Changing this forces a new Windows Web App to be created. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled. + type: boolean + failedRequestTracing: + description: Should tracing be enabled for failed requests. + type: boolean + httpLogs: + description: A http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + outboundIpAddressList: + description: A list of outbound IP addresses - such as ["52.23.25.3", + "52.143.43.12"] + items: + type: string + type: array + outboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12. + type: string + possibleOutboundIpAddressList: + description: A list of possible outbound ip address. + items: + type: string + type: array + possibleOutboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which + are necessarily in use. Superset of outbound_ip_addresses. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + resourceGroupName: + description: The name of the Resource Group where the Windows + Web App should exist. Changing this forces a new Windows Web + App to be created. + type: string + servicePlanId: + description: The ID of the Service Plan that this Windows App + Service will be created in. + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Windows Web App is Always On enabled. + Defaults to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Windows + Web App. + type: string + apiManagementApiId: + description: The API Management API ID this Windows Web App + Slot is associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + currentStack: + description: The Application Stack for the Windows Web + App. Possible values include dotnet, dotnetcore, node, + python, php, and java. + type: string + dockerContainerName: + description: The name of the container to be used. This + value is required with docker_container_tag. + type: string + dockerContainerRegistry: + type: string + dockerContainerTag: + description: The tag of the container to be used. This + value is required with docker_container_name. + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. azure-app-service/windows/parkingpage:latest. + type: string + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetCoreVersion: + description: |- + The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + The version of DotNetCore to use. + type: string + dotnetVersion: + description: The version of .NET to use when current_stack + is set to dotnet. Possible values include v2.0,v3.0, + v4.0, v5.0, v6.0, v7.0 and v8.0. + type: string + javaContainer: + type: string + javaContainerVersion: + type: string + javaEmbeddedServerEnabled: + description: |- + Should the Java Embedded Server (Java SE) be used to run the app. + Should the application use the embedded web server for the version of Java in use. + type: boolean + javaVersion: + description: The version of Java to use when current_stack + is set to java. + type: string + nodeVersion: + description: The version of node to use when current_stack + is set to node. Possible values are ~12, ~14, ~16, and + ~18. + type: string + phpVersion: + description: The version of PHP to use when current_stack + is set to php. Possible values are 7.1, 7.4 and Off. + type: string + python: + description: Specifies whether this is a Python app. Defaults + to false. + type: boolean + pythonVersion: + type: string + tomcatVersion: + description: The version of Tomcat the Java App should + use. Conflicts with java_embedded_server_enabled + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled. Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: 'Predefined action to be taken to an + Auto Heal trigger. Possible values include: Recycle, + LogEvent, and CustomAction.' + type: string + customAction: + description: A custom_action block as defined below. + properties: + executable: + description: The executable to run for the custom_action. + type: string + parameters: + description: The parameters to pass to the specified + executable. + type: string + type: object + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Windows Web App must have been running before + the defined action will be run in the event of a + trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + privateMemoryKb: + description: The amount of Private Memory to be consumed + for this rule to trigger. Possible values are between + 102400 and 13631488. + type: number + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Windows Web App. + items: + type: string + type: array + detailedErrorLoggingEnabled: + description: Should the Windows Web App be enabled? Defaults + to true. + type: boolean + ftpsState: + description: 'The State of FTP / FTPS service. Possible values + include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled.' + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this TODO. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + linuxFxVersion: + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled. Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017, VS2019 and VS2022. + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this TODO. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmType: + type: string + scmUseMainIpRestriction: + description: Should the Windows Web App ip_restriction configuration + be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Windows Web App use a 32-bit worker. + Defaults to true. + type: boolean + virtualApplication: + description: One or more virtual_application blocks as defined + below. + items: + properties: + physicalPath: + description: The physical path for the Virtual Application. + type: string + preload: + description: Should pre-loading be enabled. + type: boolean + virtualDirectory: + description: One or more virtual_directory blocks as + defined below. + items: + properties: + physicalPath: + description: The physical path for the Virtual + Application. + type: string + virtualPath: + description: The Virtual Path for the Virtual + Application. + type: string + type: object + type: array + virtualPath: + description: The Virtual Path for the Virtual Application. + type: string + type: object + type: array + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled. Defaults to false. + type: boolean + windowsFxVersion: + type: string + workerCount: + description: The number of Workers for this Windows App Service. + type: number + type: object + stickySettings: + description: A sticky_settings block as defined below. + properties: + appSettingNames: + description: A list of app_setting names that the Windows + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + connectionStringNames: + description: A list of connection_string names that the Windows + Web App will not swap between Slots when a swap operation + is triggered. + items: + type: string + type: array + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this TODO. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Web App. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + for regional virtual network integration. + type: string + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires either `WEBSITE_RUN_FROM_PACKAGE=1` or `SCM_DO_BUILD_DURING_DEPLOYMENT=true` to be set on the App in `app_settings`. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/package/crds/web.azure.upbound.io_windowswebappslots.yaml b/package/crds/web.azure.upbound.io_windowswebappslots.yaml index 551962e88..6241f31af 100644 --- a/package/crds/web.azure.upbound.io_windowswebappslots.yaml +++ b/package/crds/web.azure.upbound.io_windowswebappslots.yaml @@ -5247,3 +5247,4983 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: WindowsWebAppSlot is the Schema for the WindowsWebAppSlots API. + Manages a Windows Web App Slot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WindowsWebAppSlotSpec defines the desired state of WindowsWebAppSlot + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + appServiceId: + description: The ID of the Windows Web App this Deployment Slot + will be part of. Changing this forces a new Windows Web App + to be created. + type: string + appServiceIdRef: + description: Reference to a WindowsWebApp in web to populate appServiceId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + appServiceIdSelector: + description: Selector for a WindowsWebApp in web to populate appServiceId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret for the Client ID. Cannot be used with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App Slot. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Windows Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSecretRef: + description: |- + The App Secret of the Facebook app used for Facebook login. Cannot be specified with app_secret_setting_name. + The App Secret of the Facebook app used for Facebook Login. Cannot be specified with `app_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The Client Secret of the GitHub app used for GitHub Login. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The client secret associated with the Google web application. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App Slot. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSecretRef: + description: |- + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with client_secret_setting_name. + The OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App Slot. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App Slot durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSecretRef: + description: |- + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with consumer_secret_setting_name. + The OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret_setting_name`. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + storageAccountUrlSecretRef: + description: |- + The SAS URL to the container. + The SAS URL to the container. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - storageAccountUrlSecretRef + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + valueSecretRef: + description: |- + The connection string value. + The connection string value. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - valueSecretRef + type: object + type: array + enabled: + description: Should the Windows Web App Slot be enabled? Defaults + to true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + httpsOnly: + description: Should the Windows Web App Slot require HTTPS connections. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Web App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Web App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled. + type: boolean + failedRequestTracing: + description: Should failed request tracing be enabled. + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasurlSecretRef: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + required: + - sasurlSecretRef + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Windows Web App + will be used. + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Windows Web App Slot is Always On enabled. + Defaults to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Windows + Web App Slot. + type: string + apiManagementApiId: + description: The API Management API ID this Windows Web App + Slot os associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + currentStack: + description: The Application Stack for the Windows Web + App. Possible values include dotnet, dotnetcore, node, + python, php, and java. + type: string + dockerContainerName: + description: The name of the container to be used. This + value is required with docker_container_tag. + type: string + dockerContainerRegistry: + type: string + dockerContainerTag: + description: The tag of the container to be used. This + value is required with docker_container_name. + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. azure-app-service/windows/parkingpage:latest. + type: string + dockerRegistryPasswordSecretRef: + description: The User Name to use for authentication against + the registry to pull the image. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetCoreVersion: + description: |- + The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + The version of DotNetCore to use. + type: string + dotnetVersion: + description: The version of .NET to use when current_stack + is set to dotnet. Possible values include v2.0,v3.0, + v4.0, v5.0, v6.0, v7.0 and v8.0. + type: string + javaContainer: + type: string + javaContainerVersion: + type: string + javaEmbeddedServerEnabled: + description: |- + Should the Java Embedded Server (Java SE) be used to run the app. + Should the application use the embedded web server for the version of Java in use. + type: boolean + javaVersion: + description: The version of Java to use when current_stack + is set to java. Possible values include 1.7, 1.8, 11 + and 17. Required with java_container and java_container_version. + type: string + nodeVersion: + description: The version of node to use when current_stack + is set to node. Possible values include ~12, ~14, ~16, + and ~18. + type: string + phpVersion: + description: The version of PHP to use when current_stack + is set to php. Possible values are 7.1, 7.4 and Off. + type: string + python: + description: The app is a Python app. Defaults to false. + type: boolean + pythonVersion: + type: string + tomcatVersion: + description: The version of Tomcat the Java App should + use. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled. Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: Predefined action to be taken to an Auto + Heal trigger. Possible values are CustomAction, + LogEvent and Recycle. + type: string + customAction: + description: A custom_action block as defined below. + properties: + executable: + description: The executable to run for the custom_action. + type: string + parameters: + description: The parameters to pass to the specified + executable. + type: string + type: object + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Windows Web App Slot must have been running + before the defined action will be run in the event + of a trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + privateMemoryKb: + description: The amount of Private Memory to be consumed + for this rule to trigger. Possible values are between + 102400 and 13631488. + type: number + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + autoSwapSlotName: + description: The Windows Web App Slot Name to automatically + swap to when deployment to that slot is successfully completed. + type: string + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Windows Web App Slot. + items: + type: string + type: array + ftpsState: + description: 'The State of FTP / FTPS service. Possible values + include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled.' + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled. Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017 and VS2019 + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmUseMainIpRestriction: + description: Should the Windows Web App Slot ip_restriction + configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Windows Web App Slotuse a 32-bit worker. + Defaults to true. + type: boolean + virtualApplication: + description: One or more virtual_application blocks as defined + below. + items: + properties: + physicalPath: + description: The physical path for the Virtual Application. + type: string + preload: + description: Should pre-loading be enabled. + type: boolean + virtualDirectory: + description: One or more virtual_directory blocks as + defined below. + items: + properties: + physicalPath: + description: The physical path for the Virtual + Application. + type: string + virtualPath: + description: The Virtual Path for the Virtual + Application. + type: string + type: object + type: array + virtualPath: + description: The Virtual Path for the Virtual Application. + type: string + type: object + type: array + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled. Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Windows App Service + Slot. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accessKeySecretRef: + description: The Access key for the storage account. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + required: + - accessKeySecretRef + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Web App Slot. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + Slot for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App Slot. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Windows Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App Slot. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App Slot. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App Slot durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + enabled: + description: Should the Windows Web App Slot be enabled? Defaults + to true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + httpsOnly: + description: Should the Windows Web App Slot require HTTPS connections. + Defaults to false. + type: boolean + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Web App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Web App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled. + type: boolean + failedRequestTracing: + description: Should failed request tracing be enabled. + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Windows Web App + will be used. + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Windows Web App Slot is Always On enabled. + Defaults to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Windows + Web App Slot. + type: string + apiManagementApiId: + description: The API Management API ID this Windows Web App + Slot os associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + currentStack: + description: The Application Stack for the Windows Web + App. Possible values include dotnet, dotnetcore, node, + python, php, and java. + type: string + dockerContainerName: + description: The name of the container to be used. This + value is required with docker_container_tag. + type: string + dockerContainerRegistry: + type: string + dockerContainerTag: + description: The tag of the container to be used. This + value is required with docker_container_name. + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. azure-app-service/windows/parkingpage:latest. + type: string + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetCoreVersion: + description: |- + The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + The version of DotNetCore to use. + type: string + dotnetVersion: + description: The version of .NET to use when current_stack + is set to dotnet. Possible values include v2.0,v3.0, + v4.0, v5.0, v6.0, v7.0 and v8.0. + type: string + javaContainer: + type: string + javaContainerVersion: + type: string + javaEmbeddedServerEnabled: + description: |- + Should the Java Embedded Server (Java SE) be used to run the app. + Should the application use the embedded web server for the version of Java in use. + type: boolean + javaVersion: + description: The version of Java to use when current_stack + is set to java. Possible values include 1.7, 1.8, 11 + and 17. Required with java_container and java_container_version. + type: string + nodeVersion: + description: The version of node to use when current_stack + is set to node. Possible values include ~12, ~14, ~16, + and ~18. + type: string + phpVersion: + description: The version of PHP to use when current_stack + is set to php. Possible values are 7.1, 7.4 and Off. + type: string + python: + description: The app is a Python app. Defaults to false. + type: boolean + pythonVersion: + type: string + tomcatVersion: + description: The version of Tomcat the Java App should + use. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled. Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: Predefined action to be taken to an Auto + Heal trigger. Possible values are CustomAction, + LogEvent and Recycle. + type: string + customAction: + description: A custom_action block as defined below. + properties: + executable: + description: The executable to run for the custom_action. + type: string + parameters: + description: The parameters to pass to the specified + executable. + type: string + type: object + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Windows Web App Slot must have been running + before the defined action will be run in the event + of a trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + privateMemoryKb: + description: The amount of Private Memory to be consumed + for this rule to trigger. Possible values are between + 102400 and 13631488. + type: number + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + autoSwapSlotName: + description: The Windows Web App Slot Name to automatically + swap to when deployment to that slot is successfully completed. + type: string + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Windows Web App Slot. + items: + type: string + type: array + ftpsState: + description: 'The State of FTP / FTPS service. Possible values + include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled.' + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled. Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017 and VS2019 + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate + virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate + virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmUseMainIpRestriction: + description: Should the Windows Web App Slot ip_restriction + configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Windows Web App Slotuse a 32-bit worker. + Defaults to true. + type: boolean + virtualApplication: + description: One or more virtual_application blocks as defined + below. + items: + properties: + physicalPath: + description: The physical path for the Virtual Application. + type: string + preload: + description: Should pre-loading be enabled. + type: boolean + virtualDirectory: + description: One or more virtual_directory blocks as + defined below. + items: + properties: + physicalPath: + description: The physical path for the Virtual + Application. + type: string + virtualPath: + description: The Virtual Path for the Virtual + Application. + type: string + type: object + type: array + virtualPath: + description: The Virtual Path for the Virtual Application. + type: string + type: object + type: array + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled. Defaults to false. + type: boolean + workerCount: + description: The number of Workers for this Windows App Service + Slot. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Web App Slot. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + Slot for regional virtual network integration. + type: string + virtualNetworkSubnetIdRef: + description: Reference to a Subnet in network to populate virtualNetworkSubnetId. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + virtualNetworkSubnetIdSelector: + description: Selector for a Subnet in network to populate virtualNetworkSubnetId. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.siteConfig is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.siteConfig) + || (has(self.initProvider) && has(self.initProvider.siteConfig))' + status: + description: WindowsWebAppSlotStatus defines the observed state of WindowsWebAppSlot. + properties: + atProvider: + properties: + appServiceId: + description: The ID of the Windows Web App this Deployment Slot + will be part of. Changing this forces a new Windows Web App + to be created. + type: string + appSettings: + additionalProperties: + type: string + description: A map of key-value pairs of App Settings. + type: object + x-kubernetes-map-type: granular + authSettings: + description: An auth_settings block as defined below. + properties: + activeDirectory: + description: An active_directory block as defined above. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. Cannot be used with `client_secret`. + type: string + type: object + additionalLoginParameters: + additionalProperties: + type: string + description: |- + Specifies a map of login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + Specifies a map of Login Parameters to send to the OpenID Connect authorization endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + allowedExternalRedirectUrls: + description: |- + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App Slot. + Specifies a list of External URLs that can be redirected to as part of logging in or logging out of the Windows Web App. + items: + type: string + type: array + defaultProvider: + description: |- + The default authentication provider to use when multiple providers are configured. Possible values include: AzureActiveDirectory, Facebook, Google, MicrosoftAccount, Twitter, Github. + The default authentication provider to use when multiple providers are configured. Possible values include: `AzureActiveDirectory`, `Facebook`, `Google`, `MicrosoftAccount`, `Twitter`, `Github`. + type: string + enabled: + description: |- + Should the Authentication / Authorization feature be enabled for the Windows Web App? + Should the Authentication / Authorization feature be enabled? + type: boolean + facebook: + description: A facebook block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. Cannot be specified with `app_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + github: + description: A github block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + google: + description: A google block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + Specifies a list of OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication. If not specified, "openid", "profile", and "email" are used as default scopes. + items: + type: string + type: array + type: object + issuer: + description: |- + The OpenID Connect Issuer URI that represents the entity which issues access tokens for this Windows Web App Slot. + The OpenID Connect Issuer URI that represents the entity which issues access tokens. + type: string + microsoft: + description: A microsoft block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. Cannot be specified with `client_secret`. + type: string + oauthScopes: + description: |- + Specifies a list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, "wl.basic" is used as the default scope. + The list of OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication. If not specified, `wl.basic` is used as the default scope. + items: + type: string + type: array + type: object + runtimeVersion: + description: |- + The RuntimeVersion of the Authentication / Authorization feature in use for the Windows Web App Slot. + The RuntimeVersion of the Authentication / Authorization feature in use. + type: string + tokenRefreshExtensionHours: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Windows Web App Slot durably store platform-specific security tokens that are obtained during login flows? Defaults to false. + Should the Windows Web App durably store platform-specific security tokens that are obtained during login flows? Defaults to `false`. + type: boolean + twitter: + description: A twitter block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. Cannot be specified with `consumer_secret`. + type: string + type: object + unauthenticatedClientAction: + description: |- + The action to take when an unauthenticated client attempts to access the app. Possible values include: RedirectToLoginPage, AllowAnonymous. + The action to take when an unauthenticated client attempts to access the app. Possible values include: `RedirectToLoginPage`, `AllowAnonymous`. + type: string + type: object + authSettingsV2: + description: An auth_settings_v2 block as defined below. + properties: + activeDirectoryV2: + description: An active_directory_v2 block as defined below. + properties: + allowedApplications: + description: |- + The list of allowed Applications for the Default Authorisation Policy. + The list of allowed Applications for the Default Authorisation Policy. + items: + type: string + type: array + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed audience values to consider when validating JWTs issued by Azure Active Directory. + items: + type: string + type: array + allowedGroups: + description: |- + The list of allowed Group Names for the Default Authorisation Policy. + The list of allowed Group Names for the Default Authorisation Policy. + items: + type: string + type: array + allowedIdentities: + description: |- + The list of allowed Identities for the Default Authorisation Policy. + The list of allowed Identities for the Default Authorisation Policy. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Active Directory. + type: string + clientSecretCertificateThumbprint: + description: |- + The thumbprint of the certificate used for signing purposes. + The thumbprint of the certificate used for signing purposes. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the client secret of the Client. + type: string + jwtAllowedClientApplications: + description: |- + A list of Allowed Client Applications in the JWT Claim. + A list of Allowed Client Applications in the JWT Claim. + items: + type: string + type: array + jwtAllowedGroups: + description: |- + A list of Allowed Groups in the JWT Claim. + A list of Allowed Groups in the JWT Claim. + items: + type: string + type: array + loginParameters: + additionalProperties: + type: string + description: |- + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + A map of key-value pairs to send to the Authorisation Endpoint when a user logs in. + type: object + x-kubernetes-map-type: granular + tenantAuthEndpoint: + description: |- + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. https://login.microsoftonline.com/v2.0/{tenant-guid}/ + The Azure Tenant Endpoint for the Authenticating Tenant. e.g. `https://login.microsoftonline.com/v2.0/{tenant-guid}/`. + type: string + wwwAuthenticationDisabled: + description: |- + Should the www-authenticate provider should be omitted from the request? Defaults to false. + Should the www-authenticate provider should be omitted from the request? Defaults to `false` + type: boolean + type: object + appleV2: + description: An apple_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Apple web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Apple Login. + type: string + loginScopes: + description: The list of Login scopes that should be requested + as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + authEnabled: + description: |- + Should the AuthV2 Settings be enabled. Defaults to false. + Should the AuthV2 Settings be enabled. Defaults to `false` + type: boolean + azureStaticWebAppV2: + description: An azure_static_web_app_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with Azure Static Web App Authentication. + type: string + type: object + configFilePath: + description: |- + The path to the App Auth settings. + The path to the App Auth settings. **Note:** Relative Paths are evaluated from the Site Root directory. + type: string + customOidcV2: + description: Zero or more custom_oidc_v2 blocks as defined + below. + items: + properties: + authorisationEndpoint: + description: |- + The endpoint to make the Authorisation Request as supplied by openid_configuration_endpoint response. + The endpoint to make the Authorisation Request. + type: string + certificationUri: + description: |- + The endpoint that provides the keys necessary to validate the token as supplied by openid_configuration_endpoint response. + The endpoint that provides the keys necessary to validate the token. + type: string + clientCredentialMethod: + description: |- + The Client Credential Method used. + The Client Credential Method used. Currently the only supported value is `ClientSecretPost`. + type: string + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the Client to use to authenticate with this Custom OIDC. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The App Setting name that contains the secret for this Custom OIDC Client. + type: string + issuerEndpoint: + description: |- + The endpoint that issued the Token as supplied by openid_configuration_endpoint response. + The endpoint that issued the Token. + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name of the Custom OIDC Authentication Provider. + type: string + nameClaimType: + description: |- + The name of the claim that contains the users name. + The name of the claim that contains the users name. + type: string + openidConfigurationEndpoint: + description: |- + The app setting name that contains the client_secret value used for the Custom OIDC Login. + The endpoint that contains all the configuration endpoints for this Custom OIDC provider. + type: string + scopes: + description: |- + The list of the scopes that should be requested while authenticating. + The list of the scopes that should be requested while authenticating. + items: + type: string + type: array + tokenEndpoint: + description: |- + The endpoint used to request a Token as supplied by openid_configuration_endpoint response. + The endpoint used to request a Token. + type: string + type: object + type: array + defaultProvider: + description: |- + The Default Authentication Provider to use when the unauthenticated_action is set to RedirectToLoginPage. Possible values include: apple, azureactivedirectory, facebook, github, google, twitter and the name of your custom_oidc_v2 provider. + The Default Authentication Provider to use when the `unauthenticated_action` is set to `RedirectToLoginPage`. Possible values include: `apple`, `azureactivedirectory`, `facebook`, `github`, `google`, `twitter` and the `name` of your `custom_oidc_v2` provider. + type: string + excludedPaths: + description: |- + The paths which should be excluded from the unauthenticated_action when it is set to RedirectToLoginPage. + The paths which should be excluded from the `unauthenticated_action` when it is set to `RedirectToLoginPage`. + items: + type: string + type: array + facebookV2: + description: A facebook_v2 block as defined below. + properties: + appId: + description: |- + The App ID of the Facebook app used for login. + The App ID of the Facebook app used for login. + type: string + appSecretSettingName: + description: |- + The app setting name that contains the app_secret value used for Facebook Login. + The app setting name that contains the `app_secret` value used for Facebook Login. + type: string + graphApiVersion: + description: |- + The version of the Facebook API to be used while logging in. + The version of the Facebook API to be used while logging in. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of scopes to be requested as part of Facebook Login authentication. + items: + type: string + type: array + type: object + forwardProxyConvention: + description: |- + The convention used to determine the url of the request made. Possible values include NoProxy, Standard, Custom. Defaults to NoProxy. + The convention used to determine the url of the request made. Possible values include `ForwardProxyConventionNoProxy`, `ForwardProxyConventionStandard`, `ForwardProxyConventionCustom`. Defaults to `ForwardProxyConventionNoProxy` + type: string + forwardProxyCustomHostHeaderName: + description: |- + The name of the custom header containing the host of the request. + The name of the header containing the host of the request. + type: string + forwardProxyCustomSchemeHeaderName: + description: |- + The name of the custom header containing the scheme of the request. + The name of the header containing the scheme of the request. + type: string + githubV2: + description: A github_v2 block as defined below. + properties: + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The ID of the GitHub app used for login. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for GitHub Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of OAuth 2.0 scopes that will be requested as part of GitHub Login authentication. + items: + type: string + type: array + type: object + googleV2: + description: A google_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OpenID Connect Client ID for the Google web application. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name that contains the `client_secret` value used for Google Login. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + Specifies a list of Login scopes that will be requested as part of Google Sign-In authentication. + items: + type: string + type: array + type: object + httpRouteApiPrefix: + description: |- + The prefix that should precede all the authentication and authorisation paths. Defaults to /.auth. + The prefix that should precede all the authentication and authorisation paths. Defaults to `/.auth` + type: string + login: + description: A login block as defined below. + properties: + allowedExternalRedirectUrls: + description: |- + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. + External URLs that can be redirected to as part of logging in or logging out of the app. This is an advanced setting typically only needed by Windows Store application backends. **Note:** URLs within the current domain are always implicitly allowed. + items: + type: string + type: array + cookieExpirationConvention: + description: |- + The method by which cookies expire. Possible values include: FixedTime, and IdentityProviderDerived. Defaults to FixedTime. + The method by which cookies expire. Possible values include: `FixedTime`, and `IdentityProviderDerived`. Defaults to `FixedTime`. + type: string + cookieExpirationTime: + description: |- + The time after the request is made when the session cookie should expire. Defaults to 08:00:00. + The time after the request is made when the session cookie should expire. Defaults to `08:00:00`. + type: string + logoutEndpoint: + description: |- + The endpoint to which logout requests should be made. + The endpoint to which logout requests should be made. + type: string + nonceExpirationTime: + description: |- + The time after the request is made when the nonce should expire. Defaults to 00:05:00. + The time after the request is made when the nonce should expire. Defaults to `00:05:00`. + type: string + preserveUrlFragmentsForLogins: + description: |- + Should the fragments from the request be preserved after the login request is made. Defaults to false. + Should the fragments from the request be preserved after the login request is made. Defaults to `false`. + type: boolean + tokenRefreshExtensionTime: + description: |- + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to 72 hours. + The number of hours after session token expiration that a session token can be used to call the token refresh API. Defaults to `72` hours. + type: number + tokenStoreEnabled: + description: |- + Should the Token Store configuration Enabled. Defaults to false + Should the Token Store configuration Enabled. Defaults to `false` + type: boolean + tokenStorePath: + description: |- + The directory path in the App Filesystem in which the tokens will be stored. + The directory path in the App Filesystem in which the tokens will be stored. + type: string + tokenStoreSasSettingName: + description: |- + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + The name of the app setting which contains the SAS URL of the blob storage containing the tokens. + type: string + validateNonce: + description: |- + Should the nonce be validated while completing the login flow. Defaults to true. + Should the nonce be validated while completing the login flow. Defaults to `true`. + type: boolean + type: object + microsoftV2: + description: A microsoft_v2 block as defined below. + properties: + allowedAudiences: + description: |- + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + Specifies a list of Allowed Audiences that will be requested as part of Microsoft Sign-In authentication. + items: + type: string + type: array + clientId: + description: |- + The OAuth 2.0 client ID that was created for the app used for authentication. + The OAuth 2.0 client ID that was created for the app used for authentication. + type: string + clientSecretSettingName: + description: |- + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication. + type: string + loginScopes: + description: |- + The list of Login scopes that should be requested as part of Microsoft Account authentication. + The list of Login scopes that will be requested as part of Microsoft Account authentication. + items: + type: string + type: array + type: object + requireAuthentication: + description: |- + Should the authentication flow be used for all requests. + Should the authentication flow be used for all requests. + type: boolean + requireHttps: + description: |- + Should HTTPS be required on connections? Defaults to true. + Should HTTPS be required on connections? Defaults to true. + type: boolean + runtimeVersion: + description: |- + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to ~1. + The Runtime Version of the Authentication and Authorisation feature of this App. Defaults to `~1` + type: string + twitterV2: + description: A twitter_v2 block as defined below. + properties: + consumerKey: + description: |- + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + The OAuth 1.0a consumer key of the Twitter application used for sign-in. + type: string + consumerSecretSettingName: + description: |- + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in. + type: string + type: object + unauthenticatedAction: + description: |- + The action to take for requests made without authentication. Possible values include RedirectToLoginPage, AllowAnonymous, Return401, and Return403. Defaults to RedirectToLoginPage. + The action to take for requests made without authentication. Possible values include `RedirectToLoginPage`, `AllowAnonymous`, `Return401`, and `Return403`. Defaults to `RedirectToLoginPage`. + type: string + type: object + backup: + description: A backup block as defined below. + properties: + enabled: + description: |- + Should this backup job be enabled? Defaults to true. + Should this backup job be enabled? + type: boolean + name: + description: |- + The name which should be used for this Backup. + The name which should be used for this Backup. + type: string + schedule: + description: A schedule block as defined below. + properties: + frequencyInterval: + description: |- + How often the backup should be executed (e.g. for weekly backup, this should be set to 7 and frequency_unit should be set to Day). + How often the backup should be executed (e.g. for weekly backup, this should be set to `7` and `frequency_unit` should be set to `Day`). + type: number + frequencyUnit: + description: |- + The unit of time for how often the backup should take place. Possible values include: Day, Hour + The unit of time for how often the backup should take place. Possible values include: `Day` and `Hour`. + type: string + keepAtLeastOneBackup: + description: |- + Should the service keep at least one backup, regardless of age of backup. Defaults to false. + Should the service keep at least one backup, regardless of age of backup. Defaults to `false`. + type: boolean + lastExecutionTime: + description: The time the backup was last attempted. + type: string + retentionPeriodDays: + description: |- + After how many days backups should be deleted. Defaults to 30. + After how many days backups should be deleted. + type: number + startTime: + description: |- + When the schedule should start working in RFC-3339 format. + When the schedule should start working in RFC-3339 format. + type: string + type: object + type: object + clientAffinityEnabled: + description: Should Client Affinity be enabled? + type: boolean + clientCertificateEnabled: + description: Should Client Certificates be enabled? + type: boolean + clientCertificateExclusionPaths: + description: |- + Paths to exclude when using client certificates, separated by ; + Paths to exclude when using client certificates, separated by ; + type: string + clientCertificateMode: + description: The Client Certificate mode. Possible values are + Required, Optional, and OptionalInteractiveUser. This property + has no effect when client_cert_enabled is false. Defaults to + Required. + type: string + connectionString: + description: One or more connection_string blocks as defined below. + items: + properties: + name: + description: |- + The name of the connection String. + The name which should be used for this Connection. + type: string + type: + description: |- + Type of database. Possible values include: APIHub, Custom, DocDb, EventHub, MySQL, NotificationHub, PostgreSQL, RedisCache, ServiceBus, SQLAzure, and SQLServer. + Type of database. Possible values include: `MySQL`, `SQLServer`, `SQLAzure`, `Custom`, `NotificationHub`, `ServiceBus`, `EventHub`, `APIHub`, `DocDb`, `RedisCache`, and `PostgreSQL`. + type: string + type: object + type: array + defaultHostname: + description: The default hostname of the Windows Web App Slot. + type: string + enabled: + description: Should the Windows Web App Slot be enabled? Defaults + to true. + type: boolean + ftpPublishBasicAuthenticationEnabled: + description: Should the default FTP Basic Authentication publishing + profile be enabled. Defaults to true. + type: boolean + hostingEnvironmentId: + description: The ID of the App Service Environment used by App + Service Slot. + type: string + httpsOnly: + description: Should the Windows Web App Slot require HTTPS connections. + Defaults to false. + type: boolean + id: + description: The ID of the Windows Web App Slot. + type: string + identity: + description: An identity block as defined below. + properties: + identityIds: + description: A list of User Assigned Managed Identity IDs + to be assigned to this Windows Web App Slot. + items: + type: string + type: array + x-kubernetes-list-type: set + principalId: + description: The Principal ID associated with this Managed + Service Identity. + type: string + tenantId: + description: The Tenant ID associated with this Managed Service + Identity. + type: string + type: + description: Specifies the type of Managed Service Identity + that should be configured on this Windows Web App Slot. + Possible values are SystemAssigned, UserAssigned, SystemAssigned, + UserAssigned (to enable both). + type: string + type: object + keyVaultReferenceIdentityId: + description: The User Assigned Identity ID used for accessing + KeyVault secrets. The identity must be assigned to the application + in the identity block. For more information see - Access vaults + with a user-assigned identity + type: string + kind: + description: The Kind value for this Windows Web App Slot. + type: string + logs: + description: A logs block as defined below. + properties: + applicationLogs: + description: A application_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + level: + description: 'The level at which to log. Possible + values include Error, Warning, Information, Verbose + and Off. NOTE: this field is not available for http_logs' + type: string + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + sasUrl: + description: SAS url to an Azure blob container with + read/write/list/delete permissions. + type: string + type: object + fileSystemLevel: + description: 'Log level. Possible values include: Off, + Verbose, Information, Warning, and Error.' + type: string + type: object + detailedErrorMessages: + description: Should detailed error messages be enabled. + type: boolean + failedRequestTracing: + description: Should failed request tracing be enabled. + type: boolean + httpLogs: + description: An http_logs block as defined above. + properties: + azureBlobStorage: + description: A azure_blob_storage_http block as defined + above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + type: object + fileSystem: + description: A file_system block as defined above. + properties: + retentionInDays: + description: The retention period in days. A values + of 0 means no retention. + type: number + retentionInMb: + description: The maximum size in megabytes that log + files can use. + type: number + type: object + type: object + type: object + outboundIpAddressList: + description: A list of outbound IP addresses - such as ["52.23.25.3", + "52.143.43.12"] + items: + type: string + type: array + outboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12. + type: string + possibleOutboundIpAddressList: + description: A list of possible outbound ip address. + items: + type: string + type: array + possibleOutboundIpAddresses: + description: A comma separated list of outbound IP addresses - + such as 52.23.25.3,52.143.43.12,52.143.43.17 - not all of which + are necessarily in use. Superset of outbound_ip_addresses. + type: string + publicNetworkAccessEnabled: + description: Should public network access be enabled for the Web + App. Defaults to true. + type: boolean + servicePlanId: + description: The ID of the Service Plan in which to run this slot. + If not specified the same Service Plan as the Windows Web App + will be used. + type: string + siteConfig: + description: A site_config block as defined below. + properties: + alwaysOn: + description: If this Windows Web App Slot is Always On enabled. + Defaults to true. + type: boolean + apiDefinitionUrl: + description: The URL to the API Definition for this Windows + Web App Slot. + type: string + apiManagementApiId: + description: The API Management API ID this Windows Web App + Slot os associated with. + type: string + appCommandLine: + description: The App command line to launch. + type: string + applicationStack: + description: A application_stack block as defined above. + properties: + currentStack: + description: The Application Stack for the Windows Web + App. Possible values include dotnet, dotnetcore, node, + python, php, and java. + type: string + dockerContainerName: + description: The name of the container to be used. This + value is required with docker_container_tag. + type: string + dockerContainerRegistry: + type: string + dockerContainerTag: + description: The tag of the container to be used. This + value is required with docker_container_name. + type: string + dockerImageName: + description: The docker image, including tag, to be used. + e.g. azure-app-service/windows/parkingpage:latest. + type: string + dockerRegistryUrl: + description: The URL of the container registry where the + docker_image_name is located. e.g. https://index.docker.io + or https://mcr.microsoft.com. This value is required + with docker_image_name. + type: string + dockerRegistryUsername: + description: The User Name to use for authentication against + the registry to pull the image. + type: string + dotnetCoreVersion: + description: |- + The version of .NET to use when current_stack is set to dotnetcore. Possible values include v4.0. + The version of DotNetCore to use. + type: string + dotnetVersion: + description: The version of .NET to use when current_stack + is set to dotnet. Possible values include v2.0,v3.0, + v4.0, v5.0, v6.0, v7.0 and v8.0. + type: string + javaContainer: + type: string + javaContainerVersion: + type: string + javaEmbeddedServerEnabled: + description: |- + Should the Java Embedded Server (Java SE) be used to run the app. + Should the application use the embedded web server for the version of Java in use. + type: boolean + javaVersion: + description: The version of Java to use when current_stack + is set to java. Possible values include 1.7, 1.8, 11 + and 17. Required with java_container and java_container_version. + type: string + nodeVersion: + description: The version of node to use when current_stack + is set to node. Possible values include ~12, ~14, ~16, + and ~18. + type: string + phpVersion: + description: The version of PHP to use when current_stack + is set to php. Possible values are 7.1, 7.4 and Off. + type: string + python: + description: The app is a Python app. Defaults to false. + type: boolean + pythonVersion: + type: string + tomcatVersion: + description: The version of Tomcat the Java App should + use. + type: string + type: object + autoHealEnabled: + description: Should Auto heal rules be enabled. Required with + auto_heal_setting. + type: boolean + autoHealSetting: + description: A auto_heal_setting block as defined above. Required + with auto_heal. + properties: + action: + description: The action to take. Possible values are Allow + or Deny. Defaults to Allow. + properties: + actionType: + description: Predefined action to be taken to an Auto + Heal trigger. Possible values are CustomAction, + LogEvent and Recycle. + type: string + customAction: + description: A custom_action block as defined below. + properties: + executable: + description: The executable to run for the custom_action. + type: string + parameters: + description: The parameters to pass to the specified + executable. + type: string + type: object + minimumProcessExecutionTime: + description: The minimum amount of time in hh:mm:ss + the Windows Web App Slot must have been running + before the defined action will be run in the event + of a trigger. + type: string + type: object + trigger: + description: A trigger block as defined below. + properties: + privateMemoryKb: + description: The amount of Private Memory to be consumed + for this rule to trigger. Possible values are between + 102400 and 13631488. + type: number + requests: + description: A requests block as defined above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + type: object + slowRequest: + description: One or more slow_request blocks as defined + above. + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + timeTaken: + description: The threshold of time passed to qualify + as a Slow Request in hh:mm:ss. + type: string + type: object + statusCode: + description: One or more status_code blocks as defined + above. + items: + properties: + count: + description: The number of occurrences of the + defined status_code in the specified interval + on which to trigger this rule. + type: number + interval: + description: The time interval in the form hh:mm:ss. + type: string + path: + description: The path to which this rule status + code applies. + type: string + statusCodeRange: + description: The status code for this rule, + accepts single status codes and status code + ranges. e.g. 500 or 400-499. Possible values + are integers between 101 and 599 + type: string + subStatus: + description: The Request Sub Status of the Status + Code. + type: number + win32StatusCode: + description: The Win32 Status Code of the Request. + type: number + type: object + type: array + type: object + type: object + autoSwapSlotName: + description: The Windows Web App Slot Name to automatically + swap to when deployment to that slot is successfully completed. + type: string + containerRegistryManagedIdentityClientId: + description: The Client ID of the Managed Service Identity + to use for connections to the Azure Container Registry. + type: string + containerRegistryUseManagedIdentity: + description: Should connections for Azure Container Registry + use Managed Identity. + type: boolean + cors: + description: A cors block as defined above. + properties: + allowedOrigins: + description: |- + Specifies a list of origins that should be allowed to make cross-origin calls. + Specifies a list of origins that should be allowed to make cross-origin calls. + items: + type: string + type: array + x-kubernetes-list-type: set + supportCredentials: + description: |- + Whether CORS requests with credentials are allowed. Defaults to false + Are credentials allowed in CORS requests? Defaults to `false`. + type: boolean + type: object + defaultDocuments: + description: Specifies a list of Default Documents for the + Windows Web App Slot. + items: + type: string + type: array + detailedErrorLoggingEnabled: + description: Should the Windows Web App Slot be enabled? Defaults + to true. + type: boolean + ftpsState: + description: 'The State of FTP / FTPS service. Possible values + include: AllAllowed, FtpsOnly, Disabled. Defaults to Disabled.' + type: string + healthCheckEvictionTimeInMin: + description: |- + The amount of time in minutes that a node can be unhealthy before being removed from the load balancer. Possible values are between 2 and 10. Only valid in conjunction with health_check_path. + The amount of time in minutes that a node is unhealthy before being removed from the load balancer. Possible values are between `2` and `10`. Defaults to `10`. Only valid in conjunction with `health_check_path` + type: number + healthCheckPath: + description: The path to the Health Check. + type: string + http2Enabled: + description: Should the HTTP2 be enabled? + type: boolean + ipRestriction: + description: One or more ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + ipRestrictionDefaultAction: + description: The Default action for traffic that does not + match any ip_restriction rule. possible values include Allow + and Deny. Defaults to Allow. + type: string + loadBalancingMode: + description: 'The Site load balancing. Possible values include: + WeightedRoundRobin, LeastRequests, LeastResponseTime, WeightedTotalTraffic, + RequestHash, PerSiteRoundRobin. Defaults to LeastRequests + if omitted.' + type: string + localMysqlEnabled: + description: Use Local MySQL. Defaults to false. + type: boolean + managedPipelineMode: + description: 'Managed pipeline mode. Possible values include: + Integrated, Classic. Defaults to Integrated.' + type: string + minimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests. Possible values include: 1.0, 1.1, and + 1.2. Defaults to 1.2.' + type: string + remoteDebuggingEnabled: + description: Should Remote Debugging be enabled. Defaults + to false. + type: boolean + remoteDebuggingVersion: + description: The Remote Debugging Version. Possible values + include VS2017 and VS2019 + type: string + scmIpRestriction: + description: One or more scm_ip_restriction blocks as defined + above. + items: + properties: + action: + description: |- + The action to take. Possible values are Allow or Deny. Defaults to Allow. + The action to take. Possible values are `Allow` or `Deny`. + type: string + description: + description: |- + The Description of this IP Restriction. + The description of the IP restriction rule. + type: string + headers: + description: A headers block as defined above. + items: + properties: + xAzureFdid: + description: Specifies a list of Azure Front Door + IDs. + items: + type: string + type: array + xFdHealthProbe: + description: Specifies if a Front Door Health + Probe should be expected. The only possible + value is 1. + items: + type: string + type: array + xForwardedFor: + description: Specifies a list of addresses for + which matching should be applied. Omitting this + value means allow any. + items: + type: string + type: array + xForwardedHost: + description: Specifies a list of Hosts for which + matching should be applied. + items: + type: string + type: array + type: object + type: array + ipAddress: + description: |- + The CIDR notation of the IP or IP Range to match. For example: 10.0.0.0/24 or 192.168.10.1/32 + The CIDR notation of the IP or IP Range to match. For example: `10.0.0.0/24` or `192.168.10.1/32` or `fe80::/64` or `13.107.6.152/31,13.107.128.0/22` + type: string + name: + description: |- + The name which should be used for this Storage Account. + The name which should be used for this `ip_restriction`. + type: string + priority: + description: |- + The priority value of this ip_restriction. Defaults to 65000. + The priority value of this `ip_restriction`. + type: number + serviceTag: + description: |- + The Service Tag used for this IP Restriction. + The Service Tag used for this IP Restriction. + type: string + virtualNetworkSubnetId: + description: |- + The subnet id which will be used by this Web App Slot for regional virtual network integration. + The Virtual Network Subnet ID used for this IP Restriction. + type: string + type: object + type: array + scmIpRestrictionDefaultAction: + description: The Default action for traffic that does not + match any scm_ip_restriction rule. possible values include + Allow and Deny. Defaults to Allow. + type: string + scmMinimumTlsVersion: + description: 'The configures the minimum version of TLS required + for SSL requests to the SCM site Possible values include: + 1.0, 1.1, and 1.2. Defaults to 1.2.' + type: string + scmType: + type: string + scmUseMainIpRestriction: + description: Should the Windows Web App Slot ip_restriction + configuration be used for the SCM also. + type: boolean + use32BitWorker: + description: Should the Windows Web App Slotuse a 32-bit worker. + Defaults to true. + type: boolean + virtualApplication: + description: One or more virtual_application blocks as defined + below. + items: + properties: + physicalPath: + description: The physical path for the Virtual Application. + type: string + preload: + description: Should pre-loading be enabled. + type: boolean + virtualDirectory: + description: One or more virtual_directory blocks as + defined below. + items: + properties: + physicalPath: + description: The physical path for the Virtual + Application. + type: string + virtualPath: + description: The Virtual Path for the Virtual + Application. + type: string + type: object + type: array + virtualPath: + description: The Virtual Path for the Virtual Application. + type: string + type: object + type: array + vnetRouteAllEnabled: + description: |- + Should all outbound traffic to have NAT Gateways, Network Security Groups and User Defined Routes applied? Defaults to false. + Should all outbound traffic to have Virtual Network Security Groups and User Defined Routes applied? Defaults to `false`. + type: boolean + websocketsEnabled: + description: Should Web Sockets be enabled. Defaults to false. + type: boolean + windowsFxVersion: + type: string + workerCount: + description: The number of Workers for this Windows App Service + Slot. + type: number + type: object + storageAccount: + description: One or more storage_account blocks as defined below. + items: + properties: + accountName: + description: The Name of the Storage Account. + type: string + mountPath: + description: The path at which to mount the storage share. + type: string + name: + description: The name which should be used for this Storage + Account. + type: string + shareName: + description: The Name of the File Share or Container Name + for Blob storage. + type: string + type: + description: The Azure Storage Type. Possible values include + AzureFiles and AzureBlob + type: string + type: object + type: array + tags: + additionalProperties: + type: string + description: A mapping of tags which should be assigned to the + Windows Web App Slot. + type: object + x-kubernetes-map-type: granular + virtualNetworkSubnetId: + description: The subnet id which will be used by this Web App + Slot for regional virtual network integration. + type: string + webdeployPublishBasicAuthenticationEnabled: + description: Should the default WebDeploy Basic Authentication + publishing credentials enabled. Defaults to true. + type: boolean + zipDeployFile: + description: |- + The local path and filename of the Zip packaged application to deploy to this Windows Web App. + The local path and filename of the Zip packaged application to deploy to this Windows Web App. **Note:** Using this value requires `WEBSITE_RUN_FROM_PACKAGE=1` on the App in `app_settings`. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {}